From 4ad3f01225745294474f1ae0de33e5a86824a744 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 4 Oct 2024 19:57:00 +0100 Subject: [PATCH 0001/1708] safeweb: allow passing http.Server in safeweb.Config (#13688) Extend safeweb.Config with the ability to pass a http.Server that safeweb will use to server traffic. Updates corp#8207 Signed-off-by: Patrick O'Doherty --- safeweb/http.go | 12 +++++++++++- safeweb/http_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/safeweb/http.go b/safeweb/http.go index 9130b42d3..14c61336a 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -144,6 +144,12 @@ type Config struct { // BrowserMux when SecureContext is true. // If empty, it defaults to max-age of 1 year. StrictTransportSecurityOptions string + + // HTTPServer, if specified, is the underlying http.Server that safeweb will + // use to serve requests. If nil, a new http.Server will be created. + // Do not use the Handler field of http.Server, as it will be ignored. + // Instead, set your handlers using APIMux and BrowserMux. + HTTPServer *http.Server } func (c *Config) setDefaults() error { @@ -203,7 +209,11 @@ func NewServer(config Config) (*Server, error) { if config.CSPAllowInlineStyles { s.csp = defaultCSP + `; style-src 'self' 'unsafe-inline'` } - s.h = &http.Server{Handler: s} + s.h = cmp.Or(config.HTTPServer, &http.Server{}) + if s.h.Handler != nil { + return nil, fmt.Errorf("use safeweb.Config.APIMux and safeweb.Config.BrowserMux instead of http.Server.Handler") + } + s.h.Handler = s return s, nil } diff --git a/safeweb/http_test.go b/safeweb/http_test.go index 843da08aa..cec14b2b9 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/gorilla/csrf" @@ -609,3 +610,26 @@ func TestStrictTransportSecurityOptions(t *testing.T) { }) } } + +func TestOverrideHTTPServer(t *testing.T) { + s, err := NewServer(Config{}) + if err != nil { + t.Fatalf("NewServer: %v", err) + } + if s.h.IdleTimeout != 0 { + t.Fatalf("got %v; want 0", s.h.IdleTimeout) + } + + c := http.Server{ + IdleTimeout: 10 * time.Second, + } + + s, err = NewServer(Config{HTTPServer: &c}) + if err != nil { + t.Fatalf("NewServer: %v", err) + } + + if s.h.IdleTimeout != c.IdleTimeout { + t.Fatalf("got %v; want %v", s.h.IdleTimeout, c.IdleTimeout) + } +} From 12f1bc7c7737848d8ff75a47e19f99a134503a65 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 4 Oct 2024 16:52:11 -0400 Subject: [PATCH 0002/1708] envknob: support disk-based envknobs on the macsys build Per my investigation just now, the $HOME environment variable is unset on the macsys (standalone macOS GUI) variant, but the current working directory is valid. Look for the environment variable file in that location in addition to inside the home directory. Updates #3707 Signed-off-by: Andrew Dunham Change-Id: I481ae2e0d19b316244373e06865e3b5c3a9f3b88 --- envknob/envknob.go | 76 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 23 deletions(-) diff --git a/envknob/envknob.go b/envknob/envknob.go index f1925ccf4..59a6d90af 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -17,6 +17,7 @@ package envknob import ( "bufio" + "errors" "fmt" "io" "log" @@ -503,7 +504,7 @@ func ApplyDiskConfigError() error { return applyDiskConfigErr } // // On macOS, use one of: // -// - ~/Library/Containers/io.tailscale.ipn.macsys/Data/tailscaled-env.txt +// - /private/var/root/Library/Containers/io.tailscale.ipn.macsys.network-extension/Data/tailscaled-env.txt // for standalone macOS GUI builds // - ~/Library/Containers/io.tailscale.ipn.macos.network-extension/Data/tailscaled-env.txt // for App Store builds @@ -533,44 +534,73 @@ func ApplyDiskConfig() (err error) { return applyKeyValueEnv(f) } - name := getPlatformEnvFile() - if name == "" { + names := getPlatformEnvFiles() + if len(names) == 0 { return nil } - f, err = os.Open(name) - if os.IsNotExist(err) { - return nil - } - if err != nil { - return err + + var errs []error + for _, name := range names { + f, err = os.Open(name) + if os.IsNotExist(err) { + continue + } + if err != nil { + errs = append(errs, err) + continue + } + defer f.Close() + + return applyKeyValueEnv(f) } - defer f.Close() - return applyKeyValueEnv(f) + + // If we have any errors, return them; if all errors are such that + // os.IsNotExist(err) returns true, then errs is empty and we will + // return nil. + return errors.Join(errs...) } -// getPlatformEnvFile returns the current platform's path to an optional -// tailscaled-env.txt file. It returns an empty string if none is defined -// for the platform. -func getPlatformEnvFile() string { +// getPlatformEnvFiles returns a list of paths to the current platform's +// optional tailscaled-env.txt file. It returns an empty list if none is +// defined for the platform. +func getPlatformEnvFiles() []string { switch runtime.GOOS { case "windows": - return filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt") + return []string{ + filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt"), + } case "linux": if distro.Get() == distro.Synology { - return "/etc/tailscale/tailscaled-env.txt" + return []string{"/etc/tailscale/tailscaled-env.txt"} } case "darwin": if version.IsSandboxedMacOS() { // the two GUI variants (App Store or separate download) - // This will be user-visible as ~/Library/Containers/$VARIANT/Data/tailscaled-env.txt - // where $VARIANT is "io.tailscale.ipn.macsys" for macsys (downloadable mac GUI builds) - // or "io.tailscale.ipn.macos.network-extension" for App Store builds. - return filepath.Join(os.Getenv("HOME"), "tailscaled-env.txt") + // On the App Store variant, the home directory is set + // to something like: + // ~/Library/Containers/io.tailscale.ipn.macos.network-extension/Data + // + // On the macsys (downloadable Mac GUI) variant, the + // home directory can be unset, but we have a working + // directory that looks like: + // /private/var/root/Library/Containers/io.tailscale.ipn.macsys.network-extension/Data + // + // Try both and see if we can find the file in either + // location. + var candidates []string + if home := os.Getenv("HOME"); home != "" { + candidates = append(candidates, filepath.Join(home, "tailscaled-env.txt")) + } + if wd, err := os.Getwd(); err == nil { + candidates = append(candidates, filepath.Join(wd, "tailscaled-env.txt")) + } + + return candidates } else { // Open source / homebrew variable, running tailscaled-on-macOS. - return "/etc/tailscale/tailscaled-env.txt" + return []string{"/etc/tailscale/tailscaled-env.txt"} } } - return "" + return nil } // applyKeyValueEnv reads key=value lines r and calls Setenv for each. From c48cc08de2517a8faa273a9e0674c32f2e540ace Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 4 Oct 2024 18:20:06 -0700 Subject: [PATCH 0003/1708] wgengine: stop conntrack log spam about Canonical net probes Like we do for the ones on iOS. As a bonus, this removes a caller of tsaddr.IsTailscaleIP which we want to revamp/remove soonish. Updates #13687 Change-Id: Iab576a0c48e9005c7844ab52a0aba5ba343b750e Signed-off-by: Brad Fitzpatrick --- wgengine/pendopen.go | 69 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 11 deletions(-) diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 59b1fccda..340c7e0f3 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -5,14 +5,17 @@ package wgengine import ( "fmt" + "net/netip" "runtime" + "strings" "time" + "github.com/gaissmai/bart" "tailscale.com/net/flowtrack" "tailscale.com/net/packet" - "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" "tailscale.com/types/ipproto" + "tailscale.com/types/lazy" "tailscale.com/util/mak" "tailscale.com/wgengine/filter" ) @@ -86,6 +89,57 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp return } +var ( + appleIPRange = netip.MustParsePrefix("17.0.0.0/8") + canonicalIPs = lazy.SyncFunc(func() (checkIPFunc func(netip.Addr) bool) { + // https://bgp.he.net/AS41231#_prefixes + t := &bart.Table[bool]{} + for _, s := range strings.Fields(` + 91.189.89.0/24 + 91.189.91.0/24 + 91.189.92.0/24 + 91.189.93.0/24 + 91.189.94.0/24 + 91.189.95.0/24 + 162.213.32.0/24 + 162.213.34.0/24 + 162.213.35.0/24 + 185.125.188.0/23 + 185.125.190.0/24 + 194.169.254.0/24`) { + t.Insert(netip.MustParsePrefix(s), true) + } + return func(ip netip.Addr) bool { + v, _ := t.Lookup(ip) + return v + } + }) +) + +// isOSNetworkProbe reports whether the target is likely a network +// connectivity probe target from e.g. iOS or Ubuntu network-manager. +// +// iOS likes to probe Apple IPs on all interfaces to check for connectivity. +// Don't start timers tracking those. They won't succeed anyway. Avoids log +// spam like: +func (e *userspaceEngine) isOSNetworkProbe(dst netip.AddrPort) bool { + // iOS had log spam like: + // open-conn-track: timeout opening (100.115.73.60:52501 => 17.125.252.5:443); no associated peer node + if runtime.GOOS == "ios" && dst.Port() == 443 && appleIPRange.Contains(dst.Addr()) { + if _, ok := e.PeerForIP(dst.Addr()); !ok { + return true + } + } + // NetworkManager; https://github.com/tailscale/tailscale/issues/13687 + // open-conn-track: timeout opening (TCP 100.96.229.119:42798 => 185.125.190.49:80); no associated peer node + if runtime.GOOS == "linux" && dst.Port() == 80 && canonicalIPs()(dst.Addr()) { + if _, ok := e.PeerForIP(dst.Addr()); !ok { + return true + } + } + return false +} + func (e *userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { res = filter.Accept // always @@ -95,19 +149,12 @@ func (e *userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wra pp.TCPFlags&packet.TCPSyn == 0 { return } + if e.isOSNetworkProbe(pp.Dst) { + return + } flow := flowtrack.MakeTuple(pp.IPProto, pp.Src, pp.Dst) - // iOS likes to probe Apple IPs on all interfaces to check for connectivity. - // Don't start timers tracking those. They won't succeed anyway. Avoids log spam - // like: - // open-conn-track: timeout opening (100.115.73.60:52501 => 17.125.252.5:443); no associated peer node - if runtime.GOOS == "ios" && flow.DstPort() == 443 && !tsaddr.IsTailscaleIP(flow.DstAddr()) { - if _, ok := e.PeerForIP(flow.DstAddr()); !ok { - return - } - } - e.mu.Lock() defer e.mu.Unlock() if _, dup := e.pendOpen[flow]; dup { From 1005cbc1e4b1b77f4d9c8e6b6ab54d4d14ebe15e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 6 Oct 2024 12:12:44 -0700 Subject: [PATCH 0004/1708] tailscaleroot: panic if tailscale_go build tag but Go toolchain mismatch Fixes #13527 Change-Id: I05921969a84a303b60d1b3b9227aff9865662831 Signed-off-by: Brad Fitzpatrick --- assert_ts_toolchain_match.go | 27 +++++++++++++++++++++++++++ version-embed.go | 18 +++++++++++++++++- version_tailscale_test.go | 10 +--------- 3 files changed, 45 insertions(+), 10 deletions(-) create mode 100644 assert_ts_toolchain_match.go diff --git a/assert_ts_toolchain_match.go b/assert_ts_toolchain_match.go new file mode 100644 index 000000000..40b24b334 --- /dev/null +++ b/assert_ts_toolchain_match.go @@ -0,0 +1,27 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build tailscale_go + +package tailscaleroot + +import ( + "fmt" + "os" + "strings" +) + +func init() { + tsRev, ok := tailscaleToolchainRev() + if !ok { + panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info") + } + want := strings.TrimSpace(GoToolchainRev) + if tsRev != want { + if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { + fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want) + return + } + panic(fmt.Sprintf("binary built with tailscale_go build tag but Go toolchain %q doesn't match github.com/tailscale/tailscale expected value %q; override this failure with TS_PERMIT_TOOLCHAIN_MISMATCH=1", tsRev, want)) + } +} diff --git a/version-embed.go b/version-embed.go index 40c2e7cef..2d517339d 100644 --- a/version-embed.go +++ b/version-embed.go @@ -4,7 +4,10 @@ // Package tailscaleroot embeds VERSION.txt into the binary. package tailscaleroot -import _ "embed" +import ( + _ "embed" + "runtime/debug" +) // VersionDotTxt is the contents of VERSION.txt. Despite the tempting filename, // this does not necessarily contain the accurate version number of the build, which @@ -22,3 +25,16 @@ var AlpineDockerTag string // //go:embed go.toolchain.rev var GoToolchainRev string + +func tailscaleToolchainRev() (gitHash string, ok bool) { + bi, ok := debug.ReadBuildInfo() + if !ok { + return "", false + } + for _, s := range bi.Settings { + if s.Key == "tailscale.toolchain.rev" { + return s.Value, true + } + } + return "", false +} diff --git a/version_tailscale_test.go b/version_tailscale_test.go index c15e0cbee..0a690e312 100644 --- a/version_tailscale_test.go +++ b/version_tailscale_test.go @@ -7,23 +7,15 @@ package tailscaleroot import ( "os" - "runtime/debug" "strings" "testing" ) func TestToolchainMatches(t *testing.T) { - bi, ok := debug.ReadBuildInfo() + tsRev, ok := tailscaleToolchainRev() if !ok { t.Fatal("failed to read build info") } - var tsRev string - for _, s := range bi.Settings { - if s.Key == "tailscale.toolchain.rev" { - tsRev = s.Value - break - } - } want := strings.TrimSpace(GoToolchainRev) if tsRev != want { if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { From e48cddfbb3409c83d34672de836d544b76e84637 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 7 Oct 2024 14:58:45 +0100 Subject: [PATCH 0005/1708] cmd/{containerboot,k8s-operator},k8s-operator,kube: add ProxyGroup controller (#13684) Implements the controller for the new ProxyGroup CRD, designed for running proxies in a high availability configuration. Each proxy gets its own config and state Secret, and its own tailscale node ID. We are currently mounting all of the config secrets into the container, but will stop mounting them and instead read them directly from the kube API once #13578 is implemented. Updates #13406 Signed-off-by: Tom Proctor --- cmd/containerboot/main.go | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 3 +- .../crds/tailscale.com_proxygroups.yaml | 5 +- .../deploy/examples/proxygroup.yaml | 7 + .../deploy/manifests/operator.yaml | 5 +- cmd/k8s-operator/egress-services_test.go | 2 +- cmd/k8s-operator/operator.go | 52 +- cmd/k8s-operator/proxygroup.go | 507 ++++++++++++++++++ cmd/k8s-operator/proxygroup_specs.go | 262 +++++++++ cmd/k8s-operator/proxygroup_test.go | 226 ++++++++ cmd/k8s-operator/sts.go | 13 +- cmd/k8s-operator/testutils_test.go | 2 +- cmd/k8s-operator/tsrecorder.go | 38 +- cmd/k8s-operator/tsrecorder_test.go | 2 +- k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 6 +- .../apis/v1alpha1/zz_generated.deepcopy.go | 2 +- k8s-operator/conditions.go | 8 + k8s-operator/utils.go | 4 +- kube/kubetypes/metrics.go | 1 + 20 files changed, 1117 insertions(+), 32 deletions(-) create mode 100644 cmd/k8s-operator/deploy/examples/proxygroup.yaml create mode 100644 cmd/k8s-operator/proxygroup.go create mode 100644 cmd/k8s-operator/proxygroup_specs.go create mode 100644 cmd/k8s-operator/proxygroup_test.go diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 86612d1a6..5ebe22e5f 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -769,5 +769,5 @@ func tailscaledConfigFilePath() string { log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion) } log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion) - return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer)) + return path.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer)) } diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 43ed382c6..de003f149 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -79,7 +79,8 @@ proxyConfig: defaultTags: "tag:k8s" firewallMode: auto # If defined, this proxy class will be used as the default proxy class for - # service and ingress resources that do not have a proxy class defined. + # service and ingress resources that do not have a proxy class defined. It + # does not apply to Connector and ProxyGroup resources. defaultProxyClass: "" # apiServerProxyConfig allows to configure whether the operator should expose diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 5f3520d26..32e2ab450 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -63,14 +63,15 @@ spec: description: |- ProxyClass is the name of the ProxyClass custom resource that contains configuration options that should be applied to the resources created - for this ProxyGroup. If unset, and no default ProxyClass is set, the - operator will create resources with the default configuration. + for this ProxyGroup. If unset, the operator will create resources with + the default configuration. type: string replicas: description: |- Replicas specifies how many replicas to create the StatefulSet with. Defaults to 2. type: integer + format: int32 tags: description: |- Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. diff --git a/cmd/k8s-operator/deploy/examples/proxygroup.yaml b/cmd/k8s-operator/deploy/examples/proxygroup.yaml new file mode 100644 index 000000000..337d87f0b --- /dev/null +++ b/cmd/k8s-operator/deploy/examples/proxygroup.yaml @@ -0,0 +1,7 @@ +apiVersion: tailscale.com/v1alpha1 +kind: ProxyGroup +metadata: + name: egress-proxies +spec: + type: egress + replicas: 3 diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 25f3b4d1c..e6358708b 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2475,13 +2475,14 @@ spec: description: |- ProxyClass is the name of the ProxyClass custom resource that contains configuration options that should be applied to the resources created - for this ProxyGroup. If unset, and no default ProxyClass is set, the - operator will create resources with the default configuration. + for this ProxyGroup. If unset, the operator will create resources with + the default configuration. type: string replicas: description: |- Replicas specifies how many replicas to create the StatefulSet with. Defaults to 2. + format: int32 type: integer tags: description: |- diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index 13fa31784..1adde4e90 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -34,7 +34,7 @@ func TestTailscaleEgressServices(t *testing.T) { UID: types.UID("1234-UID"), }, Spec: tsapi.ProxyGroupSpec{ - Replicas: pointer.To(3), + Replicas: pointer.To[int32](3), Type: tsapi.ProxyGroupTypeEgress, }, } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 5255d4f29..f744c9f5e 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -356,12 +356,12 @@ func runReconcilers(opts reconcilerOpts) { } egressSvcFilter := handler.EnqueueRequestsFromMapFunc(egressSvcsHandler) - proxyGroupFilter := handler.EnqueueRequestsFromMapFunc(egressSvcsFromEgressProxyGroup(mgr.GetClient(), opts.log)) + egressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(egressSvcsFromEgressProxyGroup(mgr.GetClient(), opts.log)) err = builder. ControllerManagedBy(mgr). Named("egress-svcs-reconciler"). Watches(&corev1.Service{}, egressSvcFilter). - Watches(&tsapi.ProxyGroup{}, proxyGroupFilter). + Watches(&tsapi.ProxyGroup{}, egressProxyGroupFilter). Complete(&egressSvcsReconciler{ Client: mgr.GetClient(), tsNamespace: opts.tailscaleNamespace, @@ -457,6 +457,33 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create Recorder reconciler: %v", err) } + // Recorder reconciler. + ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) + proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) + err = builder.ControllerManagedBy(mgr). + For(&tsapi.ProxyGroup{}). + Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). + Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). + Watches(&corev1.Secret{}, ownedByProxyGroupFilter). + Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). + Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). + Watches(&tsapi.ProxyClass{}, proxyClassFilterForProxyGroup). + Complete(&ProxyGroupReconciler{ + recorder: eventRecorder, + Client: mgr.GetClient(), + l: opts.log.Named("proxygroup-reconciler"), + clock: tstime.DefaultClock{}, + tsClient: opts.tsClient, + + tsNamespace: opts.tailscaleNamespace, + proxyImage: opts.proxyImage, + defaultTags: strings.Split(opts.proxyTags, ","), + tsFirewallMode: opts.proxyFirewallMode, + }) + if err != nil { + startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) + } + startlog.Infof("Startup complete, operator running, version: %s", version.Long()) if err := mgr.Start(signals.SetupSignalHandler()); err != nil { startlog.Fatalf("could not start manager: %v", err) @@ -689,6 +716,27 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger) } } +// proxyClassHandlerForConnector returns a handler that, for a given ProxyClass, +// returns a list of reconcile requests for all Connectors that have +// .spec.proxyClass set. +func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ProxyClass: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + proxyClassName := o.GetName() + for _, pg := range pgList.Items { + if pg.Spec.ProxyClass == proxyClassName { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + } + return reqs + } +} + // serviceHandlerForIngress returns a handler for Service events for ingress // reconciler that ensures that if the Service associated with an event is of // interest to the reconciler, the associated Ingress(es) gets be reconciled. diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go new file mode 100644 index 000000000..f19339059 --- /dev/null +++ b/cmd/k8s-operator/proxygroup.go @@ -0,0 +1,507 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "net/http" + "slices" + "sync" + + "github.com/pkg/errors" + "go.uber.org/zap" + xslices "golang.org/x/exp/slices" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" + "tailscale.com/ipn" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/types/ptr" + "tailscale.com/util/clientmetric" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +const ( + reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" + reasonProxyGroupReady = "ProxyGroupReady" + reasonProxyGroupCreating = "ProxyGroupCreating" + reasonProxyGroupInvalid = "ProxyGroupInvalid" +) + +var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupCount) + +// ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. +type ProxyGroupReconciler struct { + client.Client + l *zap.SugaredLogger + recorder record.EventRecorder + clock tstime.Clock + tsClient tsClient + + // User-specified defaults from the helm installation. + tsNamespace string + proxyImage string + defaultTags []string + tsFirewallMode string + + mu sync.Mutex // protects following + proxyGroups set.Slice[types.UID] // for proxygroups gauge +} + +func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { + return r.l.With("ProxyGroup", name) +} + +func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { + logger := r.logger(req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + pg := new(tsapi.ProxyGroup) + err = r.Get(ctx, req.NamespacedName, pg) + if apierrors.IsNotFound(err) { + logger.Debugf("ProxyGroup not found, assuming it was deleted") + return reconcile.Result{}, nil + } else if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com ProxyGroup: %w", err) + } + if markedForDeletion(pg) { + logger.Debugf("ProxyGroup is being deleted, cleaning up resources") + ix := xslices.Index(pg.Finalizers, FinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return reconcile.Result{}, nil + } + + if done, err := r.maybeCleanup(ctx, pg); err != nil { + return reconcile.Result{}, err + } else if !done { + logger.Debugf("ProxyGroup resource cleanup not yet finished, will retry...") + return reconcile.Result{RequeueAfter: shortRequeue}, nil + } + + pg.Finalizers = slices.Delete(pg.Finalizers, ix, ix+1) + if err := r.Update(ctx, pg); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + oldPGStatus := pg.Status.DeepCopy() + setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) + if !apiequality.Semantic.DeepEqual(oldPGStatus, pg.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + err = errors.Wrap(err, updateErr.Error()) + } + } + return reconcile.Result{}, err + } + + if !slices.Contains(pg.Finalizers, FinalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to log that the high level, multi-reconcile + // operation is underway. + logger.Infof("ensuring ProxyGroup is set up") + pg.Finalizers = append(pg.Finalizers, FinalizerName) + if err := r.Update(ctx, pg); err != nil { + logger.Errorf("error adding finalizer: %w", err) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, reasonProxyGroupCreationFailed) + } + } + + if err := r.validate(pg); err != nil { + logger.Errorf("error validating ProxyGroup spec: %w", err) + message := fmt.Sprintf("ProxyGroup is invalid: %s", err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupInvalid, message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupInvalid, message) + } + + if err = r.maybeProvision(ctx, pg); err != nil { + logger.Errorf("error provisioning ProxyGroup resources: %w", err) + message := fmt.Sprintf("failed provisioning ProxyGroup: %s", err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, message) + } + + desiredReplicas := int(pgReplicas(pg)) + if len(pg.Status.Devices) < desiredReplicas { + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) + logger.Debug(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + + if len(pg.Status.Devices) > desiredReplicas { + message := fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) + logger.Debug(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + + logger.Info("ProxyGroup resources synced") + return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) +} + +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup) error { + logger := r.logger(pg.Name) + r.mu.Lock() + r.proxyGroups.Add(pg.UID) + gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) + r.mu.Unlock() + + var proxyClass *tsapi.ProxyClass + if pg.Spec.ProxyClass != "" { + proxyClass = new(tsapi.ProxyClass) + if err := r.Get(ctx, types.NamespacedName{Name: pg.Spec.ProxyClass}, proxyClass); err != nil { + return fmt.Errorf("failed to get ProxyClass: %w", err) + } + if !tsoperator.ProxyClassIsReady(proxyClass) { + logger.Infof("ProxyClass %s specified for the ProxyGroup, but it is not (yet) in a ready state, waiting...", pg.Spec.ProxyClass) + return nil + } + } + + cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) + if err != nil { + return fmt.Errorf("error provisioning config Secrets: %w", err) + } + // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. + stateSecrets := pgStateSecrets(pg, r.tsNamespace) + for _, sec := range stateSecrets { + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { + s.ObjectMeta.Labels = sec.ObjectMeta.Labels + s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning state Secrets: %w", err) + } + } + sa := pgServiceAccount(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + s.ObjectMeta.Labels = sa.ObjectMeta.Labels + s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning ServiceAccount: %w", err) + } + role := pgRole(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + r.ObjectMeta.Labels = role.ObjectMeta.Labels + r.ObjectMeta.Annotations = role.ObjectMeta.Annotations + r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences + r.Rules = role.Rules + }); err != nil { + return fmt.Errorf("error provisioning Role: %w", err) + } + roleBinding := pgRoleBinding(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { + r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels + r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations + r.ObjectMeta.OwnerReferences = roleBinding.ObjectMeta.OwnerReferences + r.RoleRef = roleBinding.RoleRef + r.Subjects = roleBinding.Subjects + }); err != nil { + return fmt.Errorf("error provisioning RoleBinding: %w", err) + } + ss := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, cfgHash) + ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + s.ObjectMeta.Labels = ss.ObjectMeta.Labels + s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences + s.Spec = ss.Spec + }); err != nil { + return fmt.Errorf("error provisioning StatefulSet: %w", err) + } + + if err := r.cleanupDanglingResources(ctx, pg); err != nil { + return fmt.Errorf("error cleaning up dangling resources: %w", err) + } + + devices, err := r.getDeviceInfo(ctx, pg) + if err != nil { + return fmt.Errorf("failed to get device info: %w", err) + } + + pg.Status.Devices = devices + + return nil +} + +// cleanupDanglingResources ensures we don't leak config secrets, state secrets, and +// tailnet devices when the number of replicas specified is reduced. +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup) error { + logger := r.logger(pg.Name) + metadata, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return err + } + + for _, m := range metadata { + if m.ordinal+1 <= int(pgReplicas(pg)) { + continue + } + + // Dangling resource, delete the config + state Secrets, as well as + // deleting the device from the tailnet. + if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + return err + } + if err := r.Delete(ctx, m.stateSecret); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting state Secret %s: %w", m.stateSecret.Name, err) + } + } + configSecret := m.stateSecret.DeepCopy() + configSecret.Name += "-config" + if err := r.Delete(ctx, configSecret); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) + } + } + } + + return nil +} + +// maybeCleanup just deletes the device from the tailnet. All the kubernetes +// resources linked to a ProxyGroup will get cleaned up via owner references +// (which we can use because they are all in the same namespace). +func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.ProxyGroup) (bool, error) { + logger := r.logger(pg.Name) + + metadata, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return false, err + } + + for _, m := range metadata { + if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + return false, err + } + } + + logger.Infof("cleaned up ProxyGroup resources") + r.mu.Lock() + r.proxyGroups.Remove(pg.UID) + gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) + r.mu.Unlock() + return true, nil +} + +func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { + logger.Debugf("deleting device %s from control", string(id)) + if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) + } else { + return fmt.Errorf("error deleting device: %w", err) + } + } else { + logger.Debugf("device %s deleted from control", string(id)) + } + + return nil +} + +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) { + logger := r.logger(pg.Name) + var allConfigs []tailscaledConfigs + for i := range pgReplicas(pg) { + cfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d-config", pg.Name, i), + Namespace: r.tsNamespace, + Labels: pgSecretLabels(pg.Name, "config"), + OwnerReferences: pgOwnerReference(pg), + }, + } + + var existingCfgSecret *corev1.Secret // unmodified copy of secret + if err := r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { + logger.Debugf("secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) + existingCfgSecret = cfgSecret.DeepCopy() + } else if !apierrors.IsNotFound(err) { + return "", err + } + + var authKey string + if existingCfgSecret == nil { + logger.Debugf("creating authkey for new ProxyGroup proxy") + tags := pg.Spec.Tags.Stringify() + if len(tags) == 0 { + tags = r.defaultTags + } + authKey, err = newAuthKey(ctx, r.tsClient, tags) + if err != nil { + return "", err + } + } + + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) + if err != nil { + return "", fmt.Errorf("error creating tailscaled config: %w", err) + } + allConfigs = append(allConfigs, configs) + + for cap, cfg := range configs { + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return "", fmt.Errorf("error marshalling tailscaled config: %w", err) + } + mak.Set(&cfgSecret.StringData, tsoperator.TailscaledConfigFileName(cap), string(cfgJSON)) + } + + if existingCfgSecret != nil { + logger.Debugf("patching the existing ProxyGroup config Secret %s", cfgSecret.Name) + if err := r.Patch(ctx, cfgSecret, client.MergeFrom(existingCfgSecret)); err != nil { + return "", err + } + } else { + logger.Debugf("creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) + if err := r.Create(ctx, cfgSecret); err != nil { + return "", err + } + } + } + + sum := sha256.New() + b, err := json.Marshal(allConfigs) + if err != nil { + return "", err + } + if _, err := sum.Write(b); err != nil { + return "", err + } + + return fmt.Sprintf("%x", sum.Sum(nil)), nil +} + +func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { + conf := &ipn.ConfigVAlpha{ + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", // AcceptRoutes defaults to true + Locked: "false", + Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + } + + if pg.Spec.HostnamePrefix != "" { + conf.Hostname = ptr.To(fmt.Sprintf("%s%d", pg.Spec.HostnamePrefix, idx)) + } + + if shouldAcceptRoutes(class) { + conf.AcceptRoutes = "true" + } + + deviceAuthed := false + for _, d := range pg.Status.Devices { + if d.Hostname == *conf.Hostname { + deviceAuthed = true + break + } + } + + if authKey != "" { + conf.AuthKey = &authKey + } else if !deviceAuthed { + key, err := authKeyFromSecret(oldSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) + } + conf.AuthKey = key + } + capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) + capVerConfigs[106] = *conf + return capVerConfigs, nil +} + +func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { + return nil +} + +// getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by +// querying their state Secrets. It may not return the same number of items as +// specified in the ProxyGroup spec if e.g. it is getting scaled up or down, or +// some pods have failed to write state. +func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { + // List all state secrets owned by this ProxyGroup. + secrets := &corev1.SecretList{} + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { + return nil, fmt.Errorf("failed to list state Secrets: %w", err) + } + for _, secret := range secrets.Items { + var ordinal int + if _, err := fmt.Sscanf(secret.Name, pg.Name+"-%d", &ordinal); err != nil { + return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) + } + + id, dnsName, ok, err := getNodeMetadata(ctx, &secret) + if err != nil { + return nil, err + } + if !ok { + continue + } + + metadata = append(metadata, nodeMetadata{ + ordinal: ordinal, + stateSecret: &secret, + tsID: id, + dnsName: dnsName, + }) + } + + return metadata, nil +} + +func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { + metadata, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return nil, err + } + + for _, m := range metadata { + device, ok, err := getDeviceInfo(ctx, r.tsClient, m.stateSecret) + if err != nil { + return nil, err + } + if !ok { + continue + } + devices = append(devices, tsapi.TailnetDevice{ + Hostname: device.Hostname, + TailnetIPs: device.TailnetIPs, + }) + } + + return devices, nil +} + +type nodeMetadata struct { + ordinal int + stateSecret *corev1.Secret + tsID tailcfg.StableNodeID + dnsName string +} diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go new file mode 100644 index 000000000..bf2adcbf5 --- /dev/null +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -0,0 +1,262 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" +) + +// Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be +// applied over the top after. +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHash string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(pgReplicas(pg)), + Selector: &metav1.LabelSelector{ + MatchLabels: pgLabels(pg.Name, nil), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + DeletionGracePeriodSeconds: ptr.To[int64](10), + Annotations: map[string]string{ + podAnnotationLastSetConfigFileHash: cfgHash, + }, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: pg.Name, + InitContainers: []corev1.Container{ + { + Name: "sysctler", + Image: image, + SecurityContext: &corev1.SecurityContext{ + Privileged: ptr.To(true), + }, + Command: []string{ + "/bin/sh", + "-c", + }, + Args: []string{ + "sysctl -w net.ipv4.ip_forward=1 && if sysctl net.ipv6.conf.all.forwarding; then sysctl -w net.ipv6.conf.all.forwarding=1; fi", + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "tailscale", + Image: image, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", + }, + }, + }, + VolumeMounts: func() []corev1.VolumeMount { + var mounts []corev1.VolumeMount + for i := range pgReplicas(pg) { + mounts = append(mounts, corev1.VolumeMount{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), + }) + } + + return mounts + }(), + Env: func() []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + // Secret is named after the pod. + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "TS_KUBE_SECRET", + Value: "$(POD_NAME)", + }, + { + Name: "TS_STATE", + Value: "kube:$(POD_NAME)", + }, + { + Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", + Value: "/etc/tsconfig/$(POD_NAME)", + }, + { + Name: "TS_USERSPACE", + Value: "false", + }, + } + + if tsFirewallMode != "" { + envs = append(envs, corev1.EnvVar{ + Name: "TS_DEBUG_FIREWALL_MODE", + Value: tsFirewallMode, + }) + } + + return envs + }(), + }, + }, + Volumes: func() []corev1.Volume { + var volumes []corev1.Volume + for i := range pgReplicas(pg) { + volumes = append(volumes, corev1.Volume{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%d-config", pg.Name, i), + }, + }, + }) + } + + return volumes + }(), + }, + }, + }, + } +} + +func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + } +} + +func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{ + "get", + "patch", + "update", + }, + ResourceNames: func() (secrets []string) { + for i := range pgReplicas(pg) { + secrets = append(secrets, + fmt.Sprintf("%s-%d-config", pg.Name, i), // Config with auth key. + fmt.Sprintf("%s-%d", pg.Name, i), // State. + ) + } + return secrets + }(), + }, + }, + } +} + +func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: pg.Name, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: pg.Name, + }, + } +} + +func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) { + for i := range pgReplicas(pg) { + secrets = append(secrets, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, i), + Namespace: namespace, + Labels: pgSecretLabels(pg.Name, "state"), + OwnerReferences: pgOwnerReference(pg), + }, + }) + } + + return secrets +} + +func pgSecretLabels(pgName, typ string) map[string]string { + return pgLabels(pgName, map[string]string{ + labelSecretType: typ, // "config" or "state". + }) +} + +func pgLabels(pgName string, customLabels map[string]string) map[string]string { + l := make(map[string]string, len(customLabels)+3) + for k, v := range customLabels { + l[k] = v + } + + l[LabelManaged] = "true" + l[LabelParentType] = "proxygroup" + l[LabelParentName] = pgName + + return l +} + +func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { + return []metav1.OwnerReference{*metav1.NewControllerRef(owner, tsapi.SchemeGroupVersion.WithKind("ProxyGroup"))} +} + +func pgReplicas(pg *tsapi.ProxyGroup) int32 { + if pg.Spec.Replicas != nil { + return *pg.Spec.Replicas + } + + return 2 +} diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go new file mode 100644 index 000000000..402d67949 --- /dev/null +++ b/cmd/k8s-operator/proxygroup_test.go @@ -0,0 +1,226 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" + "tailscale.com/types/ptr" +) + +const testProxyImage = "tailscale/tailscale:test" + +func TestProxyGroup(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg). + WithStatusSubresource(pg). + Build() + tsClient := &fakeTSClient{} + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(1) + cl := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + Client: fc, + tsClient: tsClient, + recorder: fr, + l: zl.Sugar(), + clock: cl, + } + + t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { + expectReconciled(t, reconciler, "", pg.Name) + + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + if expected := 1; reconciler.proxyGroups.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.proxyGroups.Len()) + } + expectProxyGroupResources(t, fc, pg, true) + keyReq := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: false, + Preauthorized: true, + Tags: []string{"tag:test-tag"}, + }, + }, + } + if diff := cmp.Diff(tsClient.KeyRequests(), []tailscale.KeyCapabilities{keyReq, keyReq}); diff != "" { + t.Fatalf("unexpected secrets (-got +want):\n%s", diff) + } + }) + + t.Run("simulate_successful_device_auth", func(t *testing.T) { + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, reconciler, "", pg.Name) + + pg.Status.Devices = []tsapi.TailnetDevice{ + { + Hostname: "hostname-nodeid-0", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + { + Hostname: "hostname-nodeid-1", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true) + }) + + t.Run("scale_up_to_3", func(t *testing.T) { + pg.Spec.Replicas = ptr.To[int32](3) + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.Spec = pg.Spec + }) + expectReconciled(t, reconciler, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, reconciler, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ + Hostname: "hostname-nodeid-2", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }) + expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true) + }) + + t.Run("scale_down_to_1", func(t *testing.T) { + pg.Spec.Replicas = ptr.To[int32](1) + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.Spec = pg.Spec + }) + expectReconciled(t, reconciler, "", pg.Name) + pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. + expectEqual(t, fc, pg, nil) + + expectProxyGroupResources(t, fc, pg, true) + }) + + t.Run("delete_and_cleanup", func(t *testing.T) { + if err := fc.Delete(context.Background(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + + expectMissing[tsapi.Recorder](t, fc, "", pg.Name) + if expected := 0; reconciler.proxyGroups.Len() != expected { + t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len()) + } + // 2 nodes should get deleted as part of the scale down, and then finally + // the first node gets deleted with the ProxyGroup cleanup. + if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-1", "nodeid-2", "nodeid-0"}); diff != "" { + t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) + } + // The fake client does not clean up objects whose owner has been + // deleted, so we can't test for the owned resources getting deleted. + }) +} + +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool) { + t.Helper() + + role := pgRole(pg, tsNamespace) + roleBinding := pgRoleBinding(pg, tsNamespace) + serviceAccount := pgServiceAccount(pg, tsNamespace) + statefulSet := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "") + + if shouldExist { + expectEqual(t, fc, role, nil) + expectEqual(t, fc, roleBinding, nil) + expectEqual(t, fc, serviceAccount, nil) + expectEqual(t, fc, statefulSet, func(ss *appsv1.StatefulSet) { + ss.Spec.Template.Annotations[podAnnotationLastSetConfigFileHash] = "" + }) + } else { + expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) + expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) + expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name) + expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name) + } + + var expectedSecrets []string + for i := range pgReplicas(pg) { + expectedSecrets = append(expectedSecrets, + fmt.Sprintf("%s-%d", pg.Name, i), + fmt.Sprintf("%s-%d-config", pg.Name, i), + ) + } + expectSecrets(t, fc, expectedSecrets) +} + +func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { + t.Helper() + + secrets := &corev1.SecretList{} + if err := fc.List(context.Background(), secrets); err != nil { + t.Fatal(err) + } + + var actual []string + for _, secret := range secrets.Items { + actual = append(actual, secret.Name) + } + + if diff := cmp.Diff(actual, expected); diff != "" { + t.Fatalf("unexpected secrets (-got +want):\n%s", diff) + } +} + +func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup) { + const key = "profile-abc" + for i := range pgReplicas(pg) { + bytes, err := json.Marshal(map[string]any{ + "Config": map[string]any{ + "NodeID": fmt.Sprintf("nodeid-%d", i), + }, + }) + if err != nil { + t.Fatal(err) + } + + mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { + s.Data = map[string][]byte{ + currentProfileKey: []byte(key), + key: bytes, + } + }) + } +} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index cc6bdb8fe..19c98100f 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -47,6 +47,7 @@ const ( LabelParentType = "tailscale.com/parent-resource-type" LabelParentName = "tailscale.com/parent-resource" LabelParentNamespace = "tailscale.com/parent-resource-ns" + labelSecretType = "tailscale.com/secret-type" // "config" or "state". // LabelProxyClass can be set by users on Connectors, tailscale // Ingresses and Services that define cluster ingress or cluster egress, @@ -304,7 +305,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaleConfigs, _ error) { +func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaledConfigs, _ error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ // Hardcode a -0 suffix so that in future, if we support @@ -362,7 +363,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * latest := tailcfg.CapabilityVersion(-1) var latestConfig ipn.ConfigVAlpha for key, val := range configs { - fn := tsoperator.TailscaledConfigFileNameForCap(key) + fn := tsoperator.TailscaledConfigFileName(key) b, err := json.Marshal(val) if err != nil { return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) @@ -672,7 +673,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, if pc == nil || ss == nil { return ss } - if pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable { + if stsCfg != nil && pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable { if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy { enableMetrics(ss, pc) } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy { @@ -794,7 +795,7 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) { // TODO (irbekrm): remove the legacy config once we no longer need to support // versions older than cap94, // https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaleConfigs, error) { +func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -884,7 +885,7 @@ type ptrObject[T any] interface { *T } -type tailscaleConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha +type tailscaledConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha // hashBytes produces a hash for the provided tailscaled config that is the same across // different invocations of this code. We do not use the @@ -895,7 +896,7 @@ type tailscaleConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha // thing that changed is operator version (the hash is also exposed to users via // an annotation and might be confusing if it changes without the config having // changed). -func tailscaledConfigHash(c tailscaleConfigs) (string, error) { +func tailscaledConfigHash(c tailscaledConfigs) (string, error) { b, err := json.Marshal(c) if err != nil { return "", fmt.Errorf("error marshalling tailscaled configs: %w", err) diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 457248d57..6b6297cbd 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -604,7 +604,7 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) { return &tailscale.Device{ DeviceID: deviceID, - Hostname: "test-device", + Hostname: "hostname-" + deviceID, Addresses: []string{ "1.2.3.4", "::1", diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index dfbf96b0b..cfe38c50a 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -302,9 +302,7 @@ func (r *RecorderReconciler) validate(tsr *tsapi.Recorder) error { return nil } -// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName -// is expected to always be non-empty if the node ID is, but not required. -func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { +func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) (*corev1.Secret, error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: r.tsNamespace, @@ -313,12 +311,27 @@ func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string } if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { if apierrors.IsNotFound(err) { - return "", "", false, nil + return nil, nil } + return nil, fmt.Errorf("error getting state Secret: %w", err) + } + + return secret, nil +} + +func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName) + if err != nil || secret == nil { return "", "", false, err } + return getNodeMetadata(ctx, secret) +} + +// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName +// is expected to always be non-empty if the node ID is, but not required. +func getNodeMetadata(ctx context.Context, secret *corev1.Secret) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { // TODO(tomhjp): Should maybe use ipn to parse the following info instead. currentProfile, ok := secret.Data[currentProfileKey] if !ok { @@ -338,14 +351,23 @@ func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string } func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) { - nodeID, dnsName, ok, err := r.getNodeMetadata(ctx, tsrName) + secret, err := r.getStateSecret(ctx, tsrName) + if err != nil || secret == nil { + return tsapi.RecorderTailnetDevice{}, false, err + } + + return getDeviceInfo(ctx, r.tsClient, secret) +} + +func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { + nodeID, dnsName, ok, err := getNodeMetadata(ctx, secret) if !ok || err != nil { return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we // need the API. Should we instead update the profile to include addresses? - device, err := r.tsClient.Device(ctx, string(nodeID), nil) + device, err := tsClient.Device(ctx, string(nodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } @@ -370,6 +392,6 @@ type profile struct { } `json:"Config"` } -func markedForDeletion(tsr *tsapi.Recorder) bool { - return !tsr.DeletionTimestamp.IsZero() +func markedForDeletion(obj metav1.Object) bool { + return !obj.GetDeletionTimestamp().IsZero() } diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index a3500f191..bd73e8fb9 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -107,7 +107,7 @@ func TestRecorder(t *testing.T) { expectReconciled(t, reconciler, "", tsr.Name) tsr.Status.Devices = []tsapi.RecorderTailnetDevice{ { - Hostname: "test-device", + Hostname: "hostname-nodeid-123", TailnetIPs: []string{"1.2.3.4", "::1"}, URL: "https://test-0.example.ts.net", }, diff --git a/k8s-operator/api.md b/k8s-operator/api.md index d343e6395..82a3476ae 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -526,7 +526,7 @@ _Appears in:_ | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | | | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| -| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, and no default ProxyClass is set, the
operator will create resources with the default configuration. | | | +| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, the operator will create resources with
the default configuration. | | | #### ProxyGroupStatus diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 92912a779..9b0e4215e 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -54,7 +54,7 @@ type ProxyGroupSpec struct { // Replicas specifies how many replicas to create the StatefulSet with. // Defaults to 2. // +optional - Replicas *int `json:"replicas,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` // HostnamePrefix is the hostname prefix to use for tailnet devices created // by the ProxyGroup. Each device will have the integer number from its @@ -66,8 +66,8 @@ type ProxyGroupSpec struct { // ProxyClass is the name of the ProxyClass custom resource that contains // configuration options that should be applied to the resources created - // for this ProxyGroup. If unset, and no default ProxyClass is set, the - // operator will create resources with the default configuration. + // for this ProxyGroup. If unset, the operator will create resources with + // the default configuration. // +optional ProxyClass string `json:"proxyClass,omitempty"` } diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index b6b94ce3f..ba4ff40e4 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -584,7 +584,7 @@ func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas - *out = new(int) + *out = new(int32) **out = **in } } diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 2b4022c40..702ed2bd3 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -93,6 +93,14 @@ func SetRecorderCondition(tsr *tsapi.Recorder, conditionType tsapi.ConditionType tsr.Status.Conditions = conds } +// SetProxyGroupCondition ensures that ProxyGroup status has a condition with the +// given attributes. LastTransitionTime gets set every time condition's status +// changes. +func SetProxyGroupCondition(pg *tsapi.ProxyGroup, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(pg.Status.Conditions, conditionType, status, reason, message, gen, clock, logger) + pg.Status.Conditions = conds +} + func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition { newCondition := metav1.Condition{ Type: string(conditionType), diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 497f31b60..a1f225fe6 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -29,9 +29,9 @@ type Records struct { IP4 map[string][]string `json:"ip4"` } -// TailscaledConfigFileNameForCap returns a tailscaled config file name in +// TailscaledConfigFileName returns a tailscaled config file name in // format expected by containerboot for the given CapVer. -func TailscaledConfigFileNameForCap(cap tailcfg.CapabilityVersion) string { +func TailscaledConfigFileName(cap tailcfg.CapabilityVersion) string { if cap < 95 { return "tailscaled" } diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/metrics.go index 021c1e26b..b183f1f6f 100644 --- a/kube/kubetypes/metrics.go +++ b/kube/kubetypes/metrics.go @@ -22,4 +22,5 @@ const ( MetricNameserverCount = "k8s_nameserver_resources" MetricRecorderCount = "k8s_recorder_resources" MetricEgressServiceCount = "k8s_egress_service_resources" + MetricProxyGroupCount = "k8s_proxygroup_resources" ) From cb10eddc269b03a816f39b0bb4564d54d3843248 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 7 Oct 2024 08:01:42 -0700 Subject: [PATCH 0006/1708] tool/gocross: fix argument order to find To avoid warning: find: warning: you have specified the global option -maxdepth after the argument -type, but global options are not positional, i.e., -maxdepth affects tests specified before it as well as those specified after it. Please specify global options before other arguments. Fixes tailscale/corp#23689 Change-Id: I91ee260b295c552c0a029883d5e406733e081478 Signed-off-by: Brad Fitzpatrick --- tool/gocross/gocross-wrapper.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 6817b6e4e..366011fef 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -67,7 +67,7 @@ case "$REV" in rm -f "$toolchain.tar.gz" # Do some cleanup of old toolchains while we're here. - for hash in $(find "$HOME/.cache/tsgo" -type f -maxdepth 1 -name '*.extracted' -mtime 90 -exec basename {} \; | sed 's/.extracted$//'); do + for hash in $(find "$HOME/.cache/tsgo" -maxdepth 1 -type f -name '*.extracted' -mtime 90 -exec basename {} \; | sed 's/.extracted$//'); do echo "# Cleaning up old Go toolchain $hash" >&2 rm -rf "$HOME/.cache/tsgo/$hash" rm -rf "$HOME/.cache/tsgo/$hash.extracted" From c588c3623315960858d2744a9fd9505781b06e3a Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 7 Oct 2024 17:28:45 +0100 Subject: [PATCH 0007/1708] types/key: use tlpub: in error message (#13707) Fixes tailscale/corp#19442 Signed-off-by: Erisa A --- cmd/tailscale/cli/cli_test.go | 2 +- types/key/nl.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index b0658fd95..d103c8f7e 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -1448,7 +1448,7 @@ func TestParseNLArgs(t *testing.T) { name: "disablements not allowed", input: []string{"disablement:" + strings.Repeat("02", 32)}, parseKeys: true, - wantErr: fmt.Errorf("parsing key 1: key hex string doesn't have expected type prefix nlpub:"), + wantErr: fmt.Errorf("parsing key 1: key hex string doesn't have expected type prefix tlpub:"), }, { name: "keys not allowed", diff --git a/types/key/nl.go b/types/key/nl.go index e0b4e5ca6..50caed98c 100644 --- a/types/key/nl.go +++ b/types/key/nl.go @@ -131,10 +131,10 @@ func NLPublicFromEd25519Unsafe(public ed25519.PublicKey) NLPublic { // is able to decode both the CLI form (tlpub:) & the // regular form (nlpub:). func (k *NLPublic) UnmarshalText(b []byte) error { - if mem.HasPrefix(mem.B(b), mem.S(nlPublicHexPrefixCLI)) { - return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefixCLI)) + if mem.HasPrefix(mem.B(b), mem.S(nlPublicHexPrefix)) { + return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefix)) } - return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefix)) + return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefixCLI)) } // AppendText implements encoding.TextAppender. From 38f236c7259110f14c3a9a94c4879ed772ce4bcd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 4 Oct 2024 18:05:23 -0700 Subject: [PATCH 0008/1708] derp: add server metric for batch write sizes Updates tailscale/corp#23668 Change-Id: Ie6268c4035a3b29fd53c072c5793e4cbba93d031 Signed-off-by: Brad Fitzpatrick --- derp/derp_server.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/derp/derp_server.go b/derp/derp_server.go index 2e17cbfe5..cabd62653 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -145,6 +145,7 @@ type Server struct { tcpRtt metrics.LabelMap // histogram meshUpdateBatchSize *metrics.Histogram meshUpdateLoopCount *metrics.Histogram + bufferedWriteFrames *metrics.Histogram // how many sendLoop frames (or groups of related frames) get written per flush // verifyClientsLocalTailscaled only accepts client connections to the DERP // server if the clientKey is a known peer in the network, as specified by a @@ -349,6 +350,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { tcpRtt: metrics.LabelMap{Label: "le"}, meshUpdateBatchSize: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000}), meshUpdateLoopCount: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100}), + bufferedWriteFrames: metrics.NewHistogram([]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100}), keyOfAddr: map[netip.AddrPort]key.NodePublic{}, clock: tstime.StdClock{}, } @@ -1653,10 +1655,12 @@ func (c *sclient) sendLoop(ctx context.Context) error { defer keepAliveTick.Stop() var werr error // last write error + inBatch := -1 // for bufferedWriteFrames for { if werr != nil { return werr } + inBatch++ // First, a non-blocking select (with a default) that // does as many non-flushing writes as possible. select { @@ -1688,6 +1692,10 @@ func (c *sclient) sendLoop(ctx context.Context) error { if werr = c.bw.Flush(); werr != nil { return werr } + if inBatch != 0 { // the first loop will almost hit default & be size zero + c.s.bufferedWriteFrames.Observe(float64(inBatch)) + inBatch = 0 + } } // Then a blocking select with same: @@ -1698,7 +1706,6 @@ func (c *sclient) sendLoop(ctx context.Context) error { werr = c.sendPeerGone(msg.peer, msg.reason) case <-c.meshUpdate: werr = c.sendMeshUpdates() - continue case msg := <-c.sendQueue: werr = c.sendPacket(msg.src, msg.bs) c.recordQueueTime(msg.enqueuedAt) @@ -1707,7 +1714,6 @@ func (c *sclient) sendLoop(ctx context.Context) error { c.recordQueueTime(msg.enqueuedAt) case msg := <-c.sendPongCh: werr = c.sendPong(msg) - continue case <-keepAliveTickChannel: werr = c.sendKeepAlive() } @@ -2060,6 +2066,7 @@ func (s *Server) ExpVar() expvar.Var { m.Set("counter_tcp_rtt", &s.tcpRtt) m.Set("counter_mesh_update_batch_size", s.meshUpdateBatchSize) m.Set("counter_mesh_update_loop_count", s.meshUpdateLoopCount) + m.Set("counter_buffered_write_frames", s.bufferedWriteFrames) var expvarVersion expvar.String expvarVersion.Set(version.Long()) m.Set("version", &expvarVersion) From 7f016baa8743a6d66f61e5fc81d55e5147dd898c Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 7 Oct 2024 20:12:56 +0100 Subject: [PATCH 0009/1708] cmd/k8s-operator,k8s-operator: create ConfigMap for egress services + small fixes for egress services (#13715) cmd/k8s-operator, k8s-operator: create ConfigMap for egress services + small reconciler fixes Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxygroups.yaml | 5 +- .../deploy/manifests/operator.yaml | 5 +- cmd/k8s-operator/egress-eps.go | 6 +- cmd/k8s-operator/egress-eps_test.go | 12 ++- cmd/k8s-operator/egress-services.go | 32 +++---- cmd/k8s-operator/egress-services_test.go | 2 +- cmd/k8s-operator/operator.go | 95 +++++++++++++------ cmd/k8s-operator/proxygroup.go | 9 ++ cmd/k8s-operator/proxygroup_specs.go | 41 ++++++++ cmd/k8s-operator/sts.go | 7 +- cmd/k8s-operator/svc.go | 4 + k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 4 +- 13 files changed, 153 insertions(+), 71 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 32e2ab450..035d04786 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -85,10 +85,7 @@ spec: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: - description: |- - Type of the ProxyGroup, either ingress or egress. Each set of proxies - managed by a single ProxyGroup definition operate as only ingress or - only egress proxies. + description: Type of the ProxyGroup proxies. Currently the only supported type is egress. type: string enum: - egress diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index e6358708b..14166fed9 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2497,10 +2497,7 @@ spec: type: string type: array type: - description: |- - Type of the ProxyGroup, either ingress or egress. Each set of proxies - managed by a single ProxyGroup definition operate as only ingress or - only egress proxies. + description: Type of the ProxyGroup proxies. Currently the only supported type is egress. enum: - egress type: string diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 510d58783..fa13c525f 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -58,8 +58,8 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ // resources are set up for this tailnet service. svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: eps.Labels[labelExternalSvcName], - Namespace: eps.Labels[labelExternalSvcNamespace], + Name: eps.Labels[LabelParentName], + Namespace: eps.Labels[LabelParentNamespace], }, } err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) @@ -98,7 +98,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ // Check which Pods in ProxyGroup are ready to route traffic to this // egress service. podList := &corev1.PodList{} - if err := er.List(ctx, podList, client.MatchingLabels(map[string]string{labelProxyGroup: proxyGroupName})); err != nil { + if err := er.List(ctx, podList, client.MatchingLabels(pgLabels(proxyGroupName, nil))); err != nil { return res, fmt.Errorf("error listing Pods for ProxyGroup %s: %w", proxyGroupName, err) } newEndpoints := make([]discoveryv1.Endpoint, 0) diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index a2e95e5d3..00d13b2a7 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -75,7 +75,11 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "operator-ns", - Labels: map[string]string{labelExternalSvcName: "test", labelExternalSvcNamespace: "default", labelProxyGroup: "foo"}, + Labels: map[string]string{ + LabelParentName: "test", + LabelParentNamespace: "default", + labelSvcType: typeEgress, + labelProxyGroup: "foo"}, }, AddressType: discoveryv1.AddressTypeIPv4, } @@ -135,7 +139,7 @@ func configMapForSvc(t *testing.T, svc *corev1.Service, p uint16) *corev1.Config } cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(egressSvcsCMNameTemplate, svc.Annotations[AnnotationProxyGroup]), + Name: pgEgressCMName(svc.Annotations[AnnotationProxyGroup]), Namespace: "operator-ns", }, BinaryData: map[string][]byte{egressservices.KeyEgressServices: bs}, @@ -173,7 +177,7 @@ func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-0", pg), Namespace: "operator-ns", - Labels: map[string]string{labelProxyGroup: pg}, + Labels: pgLabels(pg, nil), UID: "foo", }, Status: corev1.PodStatus{ @@ -184,7 +188,7 @@ func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-0", pg), Namespace: "operator-ns", - Labels: map[string]string{labelProxyGroup: pg}, + Labels: pgSecretLabels(pg, "state"), }, } return p, s diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 1c4f70a96..20bafe8ec 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -46,10 +46,7 @@ const ( reasonEgressSvcCreationFailed = "EgressSvcCreationFailed" reasonProxyGroupNotReady = "ProxyGroupNotReady" - labelProxyGroup = "tailscale.com/proxy-group" - labelProxyGroupType = "tailscale.com/proxy-group-type" - labelExternalSvcName = "tailscale.com/external-service-name" - labelExternalSvcNamespace = "tailscale.com/external-service-namespace" + labelProxyGroup = "tailscale.com/proxy-group" labelSvcType = "tailscale.com/svc-type" // ingress or egress typeEgress = "egress" @@ -62,8 +59,6 @@ const ( maxPorts = 10000 indexEgressProxyGroup = ".metadata.annotations.egress-proxy-group" - - egressSvcsCMNameTemplate = "proxy-cfg-%s" ) var gaugeEgressServices = clientmetric.NewGauge(kubetypes.MetricEgressServiceCount) @@ -416,7 +411,7 @@ func (esr *egressSvcsReconciler) usedPortsForPG(ctx context.Context, pg string) func (esr *egressSvcsReconciler) clusterIPSvcForEgress(crl map[string]string) *corev1.Service { return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: svcNameBase(crl[labelExternalSvcName]), + GenerateName: svcNameBase(crl[LabelParentName]), Namespace: esr.tsNamespace, Labels: crl, }, @@ -428,7 +423,7 @@ func (esr *egressSvcsReconciler) clusterIPSvcForEgress(crl map[string]string) *c func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { crl := egressSvcChildResourceLabels(svc) - cmName := fmt.Sprintf(egressSvcsCMNameTemplate, crl[labelProxyGroup]) + cmName := pgEgressCMName(crl[labelProxyGroup]) cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cmName, @@ -479,15 +474,18 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } else if err != nil { err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, err } if !tsoperator.ProxyGroupIsReady(pg) { l.Infof("ProxyGroup %s is not ready, waiting...", proxyGroupName) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } @@ -496,6 +494,7 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) l.Info(msg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } l.Debugf("egress service is valid") @@ -599,15 +598,15 @@ func isEgressSvcForProxyGroup(obj client.Object) bool { // egressSvcConfig returns a ConfigMap that contains egress services configuration for the provided ProxyGroup as well // as unmarshalled configuration from the ConfigMap. func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, tsNamespace string) (cm *corev1.ConfigMap, cfgs *egressservices.Configs, err error) { - cmName := fmt.Sprintf(egressSvcsCMNameTemplate, proxyGroupName) + name := pgEgressCMName(proxyGroupName) cm = &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: cmName, + Name: name, Namespace: tsNamespace, }, } if err := cl.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil { - return nil, nil, fmt.Errorf("error retrieving egress services ConfigMap %s: %v", cmName, err) + return nil, nil, fmt.Errorf("error retrieving egress services ConfigMap %s: %v", name, err) } cfgs = &egressservices.Configs{} if len(cm.BinaryData[egressservices.KeyEgressServices]) != 0 { @@ -626,11 +625,12 @@ func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, ts // should probably validate and truncate (?) the names is they are too long. func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { return map[string]string{ - LabelManaged: "true", - labelProxyGroup: svc.Annotations[AnnotationProxyGroup], - labelExternalSvcName: svc.Name, - labelExternalSvcNamespace: svc.Namespace, - labelSvcType: typeEgress, + LabelManaged: "true", + LabelParentType: "svc", + LabelParentName: svc.Name, + LabelParentNamespace: svc.Namespace, + labelProxyGroup: svc.Annotations[AnnotationProxyGroup], + labelSvcType: typeEgress, } } diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index 1adde4e90..ac7733985 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -40,7 +40,7 @@ func TestTailscaleEgressServices(t *testing.T) { } cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(egressSvcsCMNameTemplate, "foo"), + Name: pgEgressCMName("foo"), Namespace: "operator-ns", }, } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index f744c9f5e..28895269d 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -377,15 +377,16 @@ func runReconcilers(opts reconcilerOpts) { } epsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsHandler) - podsSecretsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromEgressPGChildResources(mgr.GetClient(), opts.log, opts.tailscaleNamespace)) - epsFromExtNSvcFilter := handler.EnqueueRequestsFromMapFunc(epsFromExternalNameService(mgr.GetClient(), opts.log)) + podsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromPGPods(mgr.GetClient(), opts.tailscaleNamespace)) + secretsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromPGStateSecrets(mgr.GetClient(), opts.tailscaleNamespace)) + epsFromExtNSvcFilter := handler.EnqueueRequestsFromMapFunc(epsFromExternalNameService(mgr.GetClient(), opts.log, opts.tailscaleNamespace)) err = builder. ControllerManagedBy(mgr). Named("egress-eps-reconciler"). Watches(&discoveryv1.EndpointSlice{}, epsFilter). - Watches(&corev1.Pod{}, podsSecretsFilter). - Watches(&corev1.Secret{}, podsSecretsFilter). + Watches(&corev1.Pod{}, podsFilter). + Watches(&corev1.Secret{}, secretsFilter). Watches(&corev1.Service{}, epsFromExtNSvcFilter). Complete(&egressEpsReconciler{ Client: mgr.GetClient(), @@ -841,40 +842,70 @@ func egressEpsHandler(_ context.Context, o client.Object) []reconcile.Request { } } -// egressEpsFromEgressPGChildResources returns a handler that checks if an -// object is a child resource for an egress ProxyGroup (a Pod or a state Secret) -// and if it is, returns reconciler requests for all egress EndpointSlices for -// that ProxyGroup. -func egressEpsFromEgressPGChildResources(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { +// egressEpsFromEgressPods returns a Pod event handler that checks if Pod is a replica for a ProxyGroup and if it is, +// returns reconciler requests for all egress EndpointSlices for that ProxyGroup. +func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - pg, ok := o.GetLabels()[labelProxyGroup] + if _, ok := o.GetLabels()[LabelManaged]; !ok { + return nil + } + // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we + // have ingress ProxyGroups. + if typ := o.GetLabels()[LabelParentType]; typ != "proxygroup" { + return nil + } + pg, ok := o.GetLabels()[LabelParentName] if !ok { return nil } - // TODO(irbekrm): depending on what labels we add to ProxyGroup - // resources and which resources, this might need some extra - // checks. - if typ, ok := o.GetLabels()[labelProxyGroupType]; !ok || typ != typeEgress { + return reconcileRequestsForPG(pg, cl, ns) + } +} + +// egressEpsFromPGStateSecrets returns a Secret event handler that checks if Secret is a state Secret for a ProxyGroup and if it is, +// returns reconciler requests for all egress EndpointSlices for that ProxyGroup. +func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + if _, ok := o.GetLabels()[LabelManaged]; !ok { return nil } - epsList := discoveryv1.EndpointSliceList{} - if err := cl.List(context.Background(), &epsList, client.InNamespace(ns), client.MatchingLabels(map[string]string{labelProxyGroup: pg})); err != nil { - logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on %s %s", err, o.GetName(), o.GetObjectKind().GroupVersionKind().Kind) + // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we + // have ingress ProxyGroups. + if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } - reqs := make([]reconcile.Request, 0) - for _, ep := range epsList.Items { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: ep.Namespace, - Name: ep.Name, - }, - }) + if secretType := o.GetLabels()[labelSecretType]; secretType != "state" { + return nil } - return reqs + pg, ok := o.GetLabels()[LabelParentName] + if !ok { + return nil + } + return reconcileRequestsForPG(pg, cl, ns) + } +} + +func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile.Request { + epsList := discoveryv1.EndpointSliceList{} + if err := cl.List(context.Background(), &epsList, + client.InNamespace(ns), + client.MatchingLabels(map[string]string{labelProxyGroup: pg})); err != nil { + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ep := range epsList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: ep.Namespace, + Name: ep.Name, + }, + }) } + return reqs } +// egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all +// user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { pg, ok := o.(*tsapi.ProxyGroup) @@ -903,7 +934,9 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } -func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +// epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that +// should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service. +func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { svc, ok := o.(*corev1.Service) if !ok { @@ -914,10 +947,8 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger) han return nil } epsList := &discoveryv1.EndpointSliceList{} - if err := cl.List(context.Background(), epsList, client.MatchingLabels(map[string]string{ - labelExternalSvcName: svc.Name, - labelExternalSvcNamespace: svc.Namespace, - })); err != nil { + if err := cl.List(context.Background(), epsList, client.InNamespace(ns), + client.MatchingLabels(egressSvcChildResourceLabels(svc))); err != nil { logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on Service %s", err, svc.Name) return nil } @@ -934,6 +965,8 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger) han } } +// indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The +// index is used a list filter. func indexEgressServices(o client.Object) []string { if !isEgressSvcForProxyGroup(o) { return nil diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index f19339059..99f48f323 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -223,6 +223,15 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro }); err != nil { return fmt.Errorf("error provisioning RoleBinding: %w", err) } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + cm := pgEgressCM(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { + existing.ObjectMeta.Labels = cm.ObjectMeta.Labels + existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning ConfigMap: %w", err) + } + } ss := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, cfgHash) ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index bf2adcbf5..a1ec9ccde 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -13,6 +13,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" "tailscale.com/types/ptr" ) @@ -80,6 +81,13 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa }) } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + mounts = append(mounts, corev1.VolumeMount{ + Name: pgEgressCMName(pg.Name), + MountPath: "/etc/proxies", + ReadOnly: true, + }) + } return mounts }(), Env: func() []corev1.EnvVar { @@ -118,6 +126,12 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa Value: "false", }, } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + envs = append(envs, corev1.EnvVar{ + Name: "TS_EGRESS_SERVICES_CONFIG_PATH", + Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), + }) + } if tsFirewallMode != "" { envs = append(envs, corev1.EnvVar{ @@ -142,6 +156,18 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa }, }) } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + volumes = append(volumes, corev1.Volume{ + Name: pgEgressCMName(pg.Name), + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: pgEgressCMName(pg.Name), + }, + }, + }, + }) + } return volumes }(), @@ -230,6 +256,17 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S return secrets } +func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgEgressCMName(pg.Name), + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + } +} + func pgSecretLabels(pgName, typ string) map[string]string { return pgLabels(pgName, map[string]string{ labelSecretType: typ, // "config" or "state". @@ -260,3 +297,7 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { return 2 } + +func pgEgressCMName(pg string) string { + return fmt.Sprintf("%s-egress-config", pg) +} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 19c98100f..6378a8263 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -49,10 +49,9 @@ const ( LabelParentNamespace = "tailscale.com/parent-resource-ns" labelSecretType = "tailscale.com/secret-type" // "config" or "state". - // LabelProxyClass can be set by users on Connectors, tailscale - // Ingresses and Services that define cluster ingress or cluster egress, - // to specify that configuration in this ProxyClass should be applied to - // resources created for the Connector, Ingress or Service. + // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or + // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for + // the Ingress or Service. LabelProxyClass = "tailscale.com/proxy-class" FinalizerName = "tailscale.com/finalizer" diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index e47fcae7f..22487ee26 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -112,6 +112,10 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err) } + if _, ok := svc.Annotations[AnnotationProxyGroup]; ok { + return reconcile.Result{}, nil // this reconciler should not look at Services for ProxyGroup + } + if !svc.DeletionTimestamp.IsZero() || !a.isTailscaleService(svc) { logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 82a3476ae..fd0a4e6ce 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -522,7 +522,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup, either ingress or egress. Each set of proxies
managed by a single ProxyGroup definition operate as only ingress or
only egress proxies. | | Enum: [egress]
Type: string
| +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress. | | Enum: [egress]
Type: string
| | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | | | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 9b0e4215e..ef1e8c8c1 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -37,9 +37,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup, either ingress or egress. Each set of proxies - // managed by a single ProxyGroup definition operate as only ingress or - // only egress proxies. + // Type of the ProxyGroup proxies. Currently the only supported type is egress. Type ProxyGroupType `json:"type"` // Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. From f3de4e96a8e2ba191816d89ce8fbe02ebb572bb1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 7 Oct 2024 12:12:49 -0700 Subject: [PATCH 0010/1708] derp: fix omitted word in comment Fix comment just added in 38f236c7259. Updates tailscale/corp#23668 Updates #cleanup Change-Id: Icbe112e24fcccf8c61c759c631ad09f3e5480547 Signed-off-by: Brad Fitzpatrick --- derp/derp_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/derp/derp_server.go b/derp/derp_server.go index cabd62653..8c5d6e890 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -1692,7 +1692,7 @@ func (c *sclient) sendLoop(ctx context.Context) error { if werr = c.bw.Flush(); werr != nil { return werr } - if inBatch != 0 { // the first loop will almost hit default & be size zero + if inBatch != 0 { // the first loop will almost always hit default & be size zero c.s.bufferedWriteFrames.Observe(float64(inBatch)) inBatch = 0 } From 9a73462ea4c8c96df50490ec6f65f4f354de9118 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Sun, 6 Oct 2024 10:41:11 -0500 Subject: [PATCH 0011/1708] types/lazy: add DeferredInit type It is sometimes necessary to defer initialization steps until the first actual usage or until certain prerequisites have been met. For example, policy setting and policy source registration should not occur during package initialization. Instead, they should be deferred until the syspolicy package is actually used. Additionally, any errors should be properly handled and reported, rather than causing a panic within the package's init function. In this PR, we add DeferredInit, to facilitate the registration and invocation of deferred initialization functions. Updates #12687 Signed-off-by: Nick Hill --- types/lazy/deferred.go | 98 +++++++++++++ types/lazy/deferred_test.go | 277 ++++++++++++++++++++++++++++++++++++ 2 files changed, 375 insertions(+) create mode 100644 types/lazy/deferred.go create mode 100644 types/lazy/deferred_test.go diff --git a/types/lazy/deferred.go b/types/lazy/deferred.go new file mode 100644 index 000000000..964553cef --- /dev/null +++ b/types/lazy/deferred.go @@ -0,0 +1,98 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "sync" + "sync/atomic" + + "tailscale.com/types/ptr" +) + +// DeferredInit allows one or more funcs to be deferred +// until [DeferredInit.Do] is called for the first time. +// +// DeferredInit is safe for concurrent use. +type DeferredInit struct { + DeferredFuncs +} + +// DeferredFuncs allows one or more funcs to be deferred +// until the owner's [DeferredInit.Do] method is called +// for the first time. +// +// DeferredFuncs is safe for concurrent use. +type DeferredFuncs struct { + m sync.Mutex + funcs []func() error + + // err is either: + // * nil, if deferred init has not yet been completed + // * nilErrPtr, if initialization completed successfully + // * non-nil and not nilErrPtr, if there was an error + // + // It is an atomic.Pointer so it can be read without m held. + err atomic.Pointer[error] +} + +// Defer adds a function to be called when [DeferredInit.Do] +// is called for the first time. It returns true on success, +// or false if [DeferredInit.Do] has already been called. +func (d *DeferredFuncs) Defer(f func() error) bool { + d.m.Lock() + defer d.m.Unlock() + if d.err.Load() != nil { + return false + } + d.funcs = append(d.funcs, f) + return true +} + +// MustDefer is like [DeferredFuncs.Defer], but panics +// if [DeferredInit.Do] has already been called. +func (d *DeferredFuncs) MustDefer(f func() error) { + if !d.Defer(f) { + panic("deferred init already completed") + } +} + +// Do calls previously deferred init functions if it is being called +// for the first time on this instance of [DeferredInit]. +// It stops and returns an error if any init function returns an error. +// +// It is safe for concurrent use, and the deferred init is guaranteed +// to have been completed, either successfully or with an error, +// when Do() returns. +func (d *DeferredInit) Do() error { + err := d.err.Load() + if err == nil { + err = d.doSlow() + } + return *err +} + +func (d *DeferredInit) doSlow() (err *error) { + d.m.Lock() + defer d.m.Unlock() + if err := d.err.Load(); err != nil { + return err + } + defer func() { + d.err.Store(err) + d.funcs = nil // do not keep funcs alive after invoking + }() + for _, f := range d.funcs { + if err := f(); err != nil { + return ptr.To(err) + } + } + return nilErrPtr +} + +// Funcs is a shorthand for &d.DeferredFuncs. +// The returned value can safely be passed to external code, +// allowing to defer init funcs without also exposing [DeferredInit.Do]. +func (d *DeferredInit) Funcs() *DeferredFuncs { + return &d.DeferredFuncs +} diff --git a/types/lazy/deferred_test.go b/types/lazy/deferred_test.go new file mode 100644 index 000000000..9de16c67a --- /dev/null +++ b/types/lazy/deferred_test.go @@ -0,0 +1,277 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" +) + +func ExampleDeferredInit() { + // DeferredInit allows both registration and invocation of the + // deferred funcs. It should remain internal to the code that "owns" it. + var di DeferredInit + // Deferred funcs will not be executed until [DeferredInit.Do] is called. + deferred := di.Defer(func() error { + fmt.Println("Internal init") + return nil + }) + // [DeferredInit.Defer] reports whether the function was successfully deferred. + // A func can only fail to defer if [DeferredInit.Do] has already been called. + if deferred { + fmt.Printf("Internal init has been deferred\n\n") + } + + // If necessary, the value returned by [DeferredInit.Funcs] + // can be shared with external code to facilitate deferring + // funcs without allowing it to call [DeferredInit.Do]. + df := di.Funcs() + // If a certain init step must be completed for the program + // to function correctly, and failure to defer it indicates + // a coding error, use [DeferredFuncs.MustDefer] instead of + // [DeferredFuncs.Defer]. It panics if Do() has already been called. + df.MustDefer(func() error { + fmt.Println("External init - 1") + return nil + }) + // A deferred func may return an error to indicate a failed init. + // If a deferred func returns an error, execution stops + // and the error is propagated to the caller. + df.Defer(func() error { + fmt.Println("External init - 2") + return errors.New("bang!") + }) + // The deferred function below won't be executed. + df.Defer(func() error { + fmt.Println("Unreachable") + return nil + }) + + // When [DeferredInit]'s owner needs initialization to be completed, + // it can call [DeferredInit.Do]. When called for the first time, + // it invokes the deferred funcs. + err := di.Do() + if err != nil { + fmt.Printf("Deferred init failed: %v\n", err) + } + // [DeferredInit.Do] is safe for concurrent use and can be called + // multiple times by the same or different goroutines. + // However, the deferred functions are never invoked more than once. + // If the deferred init fails on the first attempt, all subsequent + // [DeferredInit.Do] calls will return the same error. + if err = di.Do(); err != nil { + fmt.Printf("Deferred init failed: %v\n\n", err) + } + + // Additionally, all subsequent attempts to defer a function will fail + // after [DeferredInit.Do] has been called. + deferred = di.Defer(func() error { + fmt.Println("Unreachable") + return nil + }) + if !deferred { + fmt.Println("Cannot defer a func once init has been completed") + } + + // Output: + // Internal init has been deferred + // + // Internal init + // External init - 1 + // External init - 2 + // Deferred init failed: bang! + // Deferred init failed: bang! + // + // Cannot defer a func once init has been completed +} + +func TestDeferredInit(t *testing.T) { + tests := []struct { + name string + numFuncs int + }{ + { + name: "no-funcs", + numFuncs: 0, + }, + { + name: "one-func", + numFuncs: 1, + }, + { + name: "many-funcs", + numFuncs: 1000, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var di DeferredInit + + calls := make([]atomic.Bool, tt.numFuncs) // whether N-th func has been called + checkCalls := func() { + t.Helper() + for i := range calls { + if !calls[i].Load() { + t.Errorf("Func #%d has never been called", i) + } + } + } + + // Defer funcs concurrently across multiple goroutines. + var wg sync.WaitGroup + wg.Add(tt.numFuncs) + for i := range tt.numFuncs { + go func() { + f := func() error { + if calls[i].Swap(true) { + t.Errorf("Func #%d has already been called", i) + } + return nil + } + if !di.Defer(f) { + t.Errorf("Func #%d cannot be deferred", i) + return + } + wg.Done() + }() + } + // Wait for all funcs to be deferred. + wg.Wait() + + // Call [DeferredInit.Do] concurrently. + const N = 10000 + for range N { + wg.Add(1) + go func() { + gotErr := di.Do() + checkError(t, gotErr, nil, false) + checkCalls() + wg.Done() + }() + } + wg.Wait() + }) + } +} + +func TestDeferredErr(t *testing.T) { + tests := []struct { + name string + funcs []func() error + wantErr error + }{ + { + name: "no-funcs", + wantErr: nil, + }, + { + name: "no-error", + funcs: []func() error{func() error { return nil }}, + wantErr: nil, + }, + { + name: "error", + funcs: []func() error{ + func() error { return nil }, + func() error { return errors.New("bang!") }, + func() error { return errors.New("unreachable") }, + }, + wantErr: errors.New("bang!"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var di DeferredInit + for _, f := range tt.funcs { + di.MustDefer(f) + } + + var wg sync.WaitGroup + N := 10000 + for range N { + wg.Add(1) + go func() { + gotErr := di.Do() + checkError(t, gotErr, tt.wantErr, false) + wg.Done() + }() + } + wg.Wait() + }) + } +} + +func TestDeferAfterDo(t *testing.T) { + var di DeferredInit + var deferred, called atomic.Int32 + + deferOnce := func() bool { + ok := di.Defer(func() error { + called.Add(1) + return nil + }) + if ok { + deferred.Add(1) + } + return ok + } + + // Deferring a func before calling [DeferredInit.Do] should always succeed. + if !deferOnce() { + t.Fatal("Failed to defer a func") + } + + // Defer up to N funcs concurrently while [DeferredInit.Do] is being called by the main goroutine. + // Since we'll likely attempt to defer some funcs after [DeferredInit.Do] has been called, + // we expect these late defers to fail, and the funcs will not be deferred or executed. + // However, the number of the deferred and called funcs should always be equal when [DeferredInit.Do] exits. + const N = 10000 + var wg sync.WaitGroup + for range N { + wg.Add(1) + go func() { + deferOnce() + wg.Done() + }() + } + + if err := di.Do(); err != nil { + t.Fatalf("DeferredInit.Do() failed: %v", err) + } + wantDeferred, wantCalled := deferred.Load(), called.Load() + + if deferOnce() { + t.Error("An init func was deferred after DeferredInit.Do() returned") + } + + // Wait for the goroutines deferring init funcs to exit. + // No funcs should be deferred after DeferredInit.Do() has returned, + // so the deferred and called counters should remain unchanged. + wg.Wait() + if gotDeferred := deferred.Load(); gotDeferred != wantDeferred { + t.Errorf("An init func was deferred after DeferredInit.Do() returned. Got %d, want %d", gotDeferred, wantDeferred) + } + if gotCalled := called.Load(); gotCalled != wantCalled { + t.Errorf("An init func was called after DeferredInit.Do() returned. Got %d, want %d", gotCalled, wantCalled) + } + if deferred, called := deferred.Load(), called.Load(); deferred != called { + t.Errorf("Deferred: %d; Called: %d", deferred, called) + } +} + +func checkError(tb testing.TB, got, want error, fatal bool) { + tb.Helper() + f := tb.Errorf + if fatal { + f = tb.Fatalf + } + if (want == nil && got != nil) || + (want != nil && got == nil) || + (want != nil && got != nil && want.Error() != got.Error()) { + f("gotErr: %v; wantErr: %v", got, want) + } +} From 266c14d6ca37ee929acdf42c796c01d7f031229a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 20:48:06 -0600 Subject: [PATCH 0012/1708] .github: Bump actions/cache from 4.0.2 to 4.1.0 (#13711) Bumps [actions/cache](https://github.com/actions/cache) from 4.0.2 to 4.1.0. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0c45773b623bea8c8e75f6c82b208c3cf94ea4f9...2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5cfd86c40..bc70040b0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -80,7 +80,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -159,7 +159,7 @@ jobs: cache: false - name: Restore Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -260,7 +260,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -319,7 +319,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -367,7 +367,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache From 866714a8941072ce576628d62b8cd0eed3f67e3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 22:15:40 -0600 Subject: [PATCH 0013/1708] .github: Bump github/codeql-action from 3.26.9 to 3.26.11 (#13710) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.9 to 3.26.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/461ef6c76dfe95d5c364de2f431ddbd31a417628...6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9dad75d91..4e266c6ea 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 + uses: github/codeql-action/init@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 + uses: github/codeql-action/autobuild@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 + uses: github/codeql-action/analyze@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 From cba2e765687f87f64e686a551d791887447de9c6 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 8 Oct 2024 17:13:00 +0100 Subject: [PATCH 0014/1708] cmd/containerboot: simplify k8s setup logic (#13627) Rearrange conditionals to reduce indentation and make it a bit easier to read the logic. Also makes some error message updates for better consistency with the recent decision around capitalising resource names and the upcoming addition of config secrets. Updates #cleanup Signed-off-by: Tom Proctor --- cmd/containerboot/settings.go | 65 +++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index d72aefbdf..fab4bd2fd 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -130,44 +130,51 @@ func (cfg *settings) setupKube(ctx context.Context) error { } canPatch, canCreate, err := kc.CheckSecretPermissions(ctx, cfg.KubeSecret) if err != nil { - return fmt.Errorf("Some Kubernetes permissions are missing, please check your RBAC configuration: %v", err) + return fmt.Errorf("some Kubernetes permissions are missing, please check your RBAC configuration: %v", err) } cfg.KubernetesCanPatch = canPatch s, err := kc.GetSecret(ctx, cfg.KubeSecret) - if err != nil && kubeclient.IsNotFoundErr(err) && !canCreate { - return fmt.Errorf("Tailscale state Secret %s does not exist and we don't have permissions to create it. "+ - "If you intend to store tailscale state elsewhere than a Kubernetes Secret, "+ - "you can explicitly set TS_KUBE_SECRET env var to an empty string. "+ - "Else ensure that RBAC is set up that allows the service account associated with this installation to create Secrets.", cfg.KubeSecret) - } else if err != nil && !kubeclient.IsNotFoundErr(err) { - return fmt.Errorf("Getting Tailscale state Secret %s: %v", cfg.KubeSecret, err) - } + if err != nil { + if !kubeclient.IsNotFoundErr(err) { + return fmt.Errorf("getting Tailscale state Secret %s: %v", cfg.KubeSecret, err) + } - if cfg.AuthKey == "" && !isOneStepConfig(cfg) { - if s == nil { - log.Print("TS_AUTHKEY not provided and kube secret does not exist, login will be interactive if needed.") - return nil + if !canCreate { + return fmt.Errorf("tailscale state Secret %s does not exist and we don't have permissions to create it. "+ + "If you intend to store tailscale state elsewhere than a Kubernetes Secret, "+ + "you can explicitly set TS_KUBE_SECRET env var to an empty string. "+ + "Else ensure that RBAC is set up that allows the service account associated with this installation to create Secrets.", cfg.KubeSecret) } - keyBytes, _ := s.Data["authkey"] - key := string(keyBytes) + } + + // Return early if we already have an auth key. + if cfg.AuthKey != "" || isOneStepConfig(cfg) { + return nil + } - if key != "" { - // This behavior of pulling authkeys from kube secrets was added - // at the same time as the patch permission, so we can enforce - // that we must be able to patch out the authkey after - // authenticating if you want to use this feature. This avoids - // us having to deal with the case where we might leave behind - // an unnecessary reusable authkey in a secret, like a rake in - // the grass. - if !cfg.KubernetesCanPatch { - return errors.New("authkey found in TS_KUBE_SECRET, but the pod doesn't have patch permissions on the secret to manage the authkey.") - } - cfg.AuthKey = key - } else { - log.Print("No authkey found in kube secret and TS_AUTHKEY not provided, login will be interactive if needed.") + if s == nil { + log.Print("TS_AUTHKEY not provided and state Secret does not exist, login will be interactive if needed.") + return nil + } + + keyBytes, _ := s.Data["authkey"] + key := string(keyBytes) + + if key != "" { + // Enforce that we must be able to patch out the authkey after + // authenticating if you want to use this feature. This avoids + // us having to deal with the case where we might leave behind + // an unnecessary reusable authkey in a secret, like a rake in + // the grass. + if !cfg.KubernetesCanPatch { + return errors.New("authkey found in TS_KUBE_SECRET, but the pod doesn't have patch permissions on the Secret to manage the authkey.") } + cfg.AuthKey = key } + + log.Print("No authkey found in state Secret and TS_AUTHKEY not provided, login will be interactive if needed.") + return nil } From 36cb2e4e5f3f48f9d651b22a9dc089efedc99d5d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 8 Oct 2024 17:34:34 +0100 Subject: [PATCH 0015/1708] cmd/k8s-operator,k8s-operator: use default ProxyClass if set for ProxyGroup (#13720) The default ProxyClass can be set via helm chart or env var, and applies to all proxies that do not otherwise have an explicit ProxyClass set. This ensures proxies created by the new ProxyGroup CRD are consistent with the behaviour of existing proxies Nearby but unrelated changes: * Fix up double error logs (controller runtime logs returned errors) * Fix a couple of variable names Updates #13406 Signed-off-by: Tom Proctor --- cmd/k8s-operator/connector_test.go | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 2 +- .../crds/tailscale.com_proxygroups.yaml | 5 +- .../deploy/manifests/operator.yaml | 5 +- cmd/k8s-operator/ingress.go | 4 +- cmd/k8s-operator/ingress_test.go | 2 +- cmd/k8s-operator/operator.go | 19 +++--- cmd/k8s-operator/operator_test.go | 2 +- cmd/k8s-operator/proxyclass.go | 4 +- cmd/k8s-operator/proxyclass_test.go | 8 +-- cmd/k8s-operator/proxygroup.go | 59 ++++++++++-------- cmd/k8s-operator/proxygroup_test.go | 60 +++++++++++++++---- cmd/k8s-operator/svc.go | 4 +- k8s-operator/api.md | 2 +- k8s-operator/apis/v1alpha1/types_connector.go | 2 +- .../apis/v1alpha1/types_proxygroup.go | 5 +- k8s-operator/conditions.go | 2 +- 17 files changed, 118 insertions(+), 69 deletions(-) diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 01c60bc9e..a4ba90d3d 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -278,7 +278,7 @@ func TestConnectorWithProxyClass(t *testing.T) { pc.Status = tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, }}} }) diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index de003f149..e6f4cada4 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -80,7 +80,7 @@ proxyConfig: firewallMode: auto # If defined, this proxy class will be used as the default proxy class for # service and ingress resources that do not have a proxy class defined. It - # does not apply to Connector and ProxyGroup resources. + # does not apply to Connector resources. defaultProxyClass: "" # apiServerProxyConfig allows to configure whether the operator should expose diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 035d04786..66701bdf4 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -63,8 +63,9 @@ spec: description: |- ProxyClass is the name of the ProxyClass custom resource that contains configuration options that should be applied to the resources created - for this ProxyGroup. If unset, the operator will create resources with - the default configuration. + for this ProxyGroup. If unset, and there is no default ProxyClass + configured, the operator will create resources with the default + configuration. type: string replicas: description: |- diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 14166fed9..1a812b736 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2475,8 +2475,9 @@ spec: description: |- ProxyClass is the name of the ProxyClass custom resource that contains configuration options that should be applied to the resources created - for this ProxyGroup. If unset, the operator will create resources with - the default configuration. + for this ProxyGroup. If unset, and there is no default ProxyClass + configured, the operator will create resources with the default + configuration. type: string replicas: description: |- diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 700cf4be8..acc90d465 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -48,7 +48,7 @@ type IngressReconciler struct { // managing. This is only used for metrics. managedIngresses set.Slice[types.UID] - proxyDefaultClass string + defaultProxyClass string } var ( @@ -136,7 +136,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } } - proxyClass := proxyClassForObject(ing, a.proxyDefaultClass) + proxyClass := proxyClassForObject(ing, a.defaultProxyClass) if proxyClass != "" { if ready, err := proxyClassIsReady(ctx, proxyClass, a.Client); err != nil { return fmt.Errorf("error verifying ProxyClass for Ingress: %w", err) diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 8b18776b4..38a041dde 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -253,7 +253,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { pc.Status = tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, }}} }) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 28895269d..ff29618df 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -109,7 +109,7 @@ func main() { proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer, proxyTags: tags, proxyFirewallMode: tsFirewallMode, - proxyDefaultClass: defaultProxyClass, + defaultProxyClass: defaultProxyClass, } runReconcilers(rOpts) } @@ -286,7 +286,7 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, tsNamespace: opts.tailscaleNamespace, clock: tstime.DefaultClock{}, - proxyDefaultClass: opts.proxyDefaultClass, + defaultProxyClass: opts.defaultProxyClass, }) if err != nil { startlog.Fatalf("could not create service reconciler: %v", err) @@ -309,7 +309,7 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, Client: mgr.GetClient(), logger: opts.log.Named("ingress-reconciler"), - proxyDefaultClass: opts.proxyDefaultClass, + defaultProxyClass: opts.defaultProxyClass, }) if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) @@ -476,10 +476,11 @@ func runReconcilers(opts reconcilerOpts) { clock: tstime.DefaultClock{}, tsClient: opts.tsClient, - tsNamespace: opts.tailscaleNamespace, - proxyImage: opts.proxyImage, - defaultTags: strings.Split(opts.proxyTags, ","), - tsFirewallMode: opts.proxyFirewallMode, + tsNamespace: opts.tailscaleNamespace, + proxyImage: opts.proxyImage, + defaultTags: strings.Split(opts.proxyTags, ","), + tsFirewallMode: opts.proxyFirewallMode, + defaultProxyClass: opts.defaultProxyClass, }) if err != nil { startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) @@ -525,10 +526,10 @@ type reconcilerOpts struct { // Auto is usually the best choice, unless you want to explicitly set // specific mode for debugging purposes. proxyFirewallMode string - // proxyDefaultClass is the name of the ProxyClass to use as the default + // defaultProxyClass is the name of the ProxyClass to use as the default // class for proxies that do not have a ProxyClass set. // this is defined by an operator env variable. - proxyDefaultClass string + defaultProxyClass string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 7ea8c09e1..21e1d4313 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1064,7 +1064,7 @@ func TestProxyClassForService(t *testing.T) { pc.Status = tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, }}} }) diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index b5d213746..882a9030f 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -98,9 +98,9 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re if errs := pcr.validate(pc); errs != nil { msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error()) pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg) - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) } else { - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger) } if !apiequality.Semantic.DeepEqual(oldPCStatus, pc.Status) { if err := pcr.Client.Status().Update(ctx, pc); err != nil { diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index c52fbb187..eb68811fc 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -69,7 +69,7 @@ func TestProxyClass(t *testing.T) { // 1. A valid ProxyClass resource gets its status updated to Ready. expectReconciled(t, pcr, "", "test") pc.Status.Conditions = append(pc.Status.Conditions, metav1.Condition{ - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), Status: metav1.ConditionTrue, Reason: reasonProxyClassValid, Message: reasonProxyClassValid, @@ -85,7 +85,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") msg := `ProxyClass is not valid: .spec.statefulSet.labels: Invalid value: "?!someVal": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')` - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) expectedEvent := "Warning ProxyClassInvalid ProxyClass is not valid: .spec.statefulSet.labels: Invalid value: \"?!someVal\": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')" expectEvents(t, fr, []string{expectedEvent}) @@ -99,7 +99,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.statefulSet.pod.tailscaleContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) expectedEvent = `Warning ProxyClassInvalid ProxyClass is not valid: spec.statefulSet.pod.tailscaleContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` expectEvents(t, fr, []string{expectedEvent}) @@ -118,7 +118,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.statefulSet.pod.tailscaleInitContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) expectedEvent = `Warning ProxyClassInvalid ProxyClass is not valid: spec.statefulSet.pod.tailscaleInitContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` expectEvents(t, fr, []string{expectedEvent}) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 99f48f323..7ba59586b 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -58,10 +58,11 @@ type ProxyGroupReconciler struct { tsClient tsClient // User-specified defaults from the helm installation. - tsNamespace string - proxyImage string - defaultTags []string - tsFirewallMode string + tsNamespace string + proxyImage string + defaultTags []string + tsFirewallMode string + defaultProxyClass string mu sync.Mutex // protects following proxyGroups set.Slice[types.UID] // for proxygroups gauge @@ -125,24 +126,42 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // operation is underway. logger.Infof("ensuring ProxyGroup is set up") pg.Finalizers = append(pg.Finalizers, FinalizerName) - if err := r.Update(ctx, pg); err != nil { - logger.Errorf("error adding finalizer: %w", err) + if err = r.Update(ctx, pg); err != nil { + err = fmt.Errorf("error adding finalizer: %w", err) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, reasonProxyGroupCreationFailed) } } - if err := r.validate(pg); err != nil { - logger.Errorf("error validating ProxyGroup spec: %w", err) + if err = r.validate(pg); err != nil { message := fmt.Sprintf("ProxyGroup is invalid: %s", err) r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupInvalid, message) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupInvalid, message) } - if err = r.maybeProvision(ctx, pg); err != nil { - logger.Errorf("error provisioning ProxyGroup resources: %w", err) - message := fmt.Sprintf("failed provisioning ProxyGroup: %s", err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, message) + proxyClassName := r.defaultProxyClass + if pg.Spec.ProxyClass != "" { + proxyClassName = pg.Spec.ProxyClass + } + + var proxyClass *tsapi.ProxyClass + if proxyClassName != "" { + proxyClass = new(tsapi.ProxyClass) + if err = r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass); err != nil { + err = fmt.Errorf("error getting ProxyGroup's ProxyClass %s: %s", proxyClassName, err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + } + if !tsoperator.ProxyClassIsReady(proxyClass) { + message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName) + logger.Info(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + } + + if err = r.maybeProvision(ctx, pg, proxyClass); err != nil { + err = fmt.Errorf("error provisioning ProxyGroup resources: %w", err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) } desiredReplicas := int(pgReplicas(pg)) @@ -162,25 +181,13 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup) error { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { logger := r.logger(pg.Name) r.mu.Lock() r.proxyGroups.Add(pg.UID) gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) r.mu.Unlock() - var proxyClass *tsapi.ProxyClass - if pg.Spec.ProxyClass != "" { - proxyClass = new(tsapi.ProxyClass) - if err := r.Get(ctx, types.NamespacedName{Name: pg.Spec.ProxyClass}, proxyClass); err != nil { - return fmt.Errorf("failed to get ProxyClass: %w", err) - } - if !tsoperator.ProxyClassIsReady(proxyClass) { - logger.Infof("ProxyClass %s specified for the ProxyGroup, but it is not (yet) in a ready state, waiting...", pg.Spec.ProxyClass) - return nil - } - } - cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) if err != nil { return fmt.Errorf("error provisioning config Secrets: %w", err) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 402d67949..b5a6a4d8d 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "testing" + "time" "github.com/google/go-cmp/cmp" "go.uber.org/zap" @@ -29,7 +30,21 @@ import ( const testProxyImage = "tailscale/tailscale:test" +var defaultProxyClassAnnotations = map[string]string{ + "some-annotation": "from-the-proxy-class", +} + func TestProxyGroup(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + }, + } pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -39,26 +54,48 @@ func TestProxyGroup(t *testing.T) { fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(pg). - WithStatusSubresource(pg). + WithObjects(pg, pc). + WithStatusSubresource(pg, pc). Build() tsClient := &fakeTSClient{} zl, _ := zap.NewDevelopment() fr := record.NewFakeRecorder(1) cl := tstest.NewClock(tstest.ClockOpts{}) reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - defaultTags: []string{"tag:test-tag"}, - tsFirewallMode: "auto", - Client: fc, - tsClient: tsClient, - recorder: fr, - l: zl.Sugar(), - clock: cl, + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + l: zl.Sugar(), + clock: cl, } + t.Run("proxyclass_not_ready", func(t *testing.T) { + expectReconciled(t, reconciler, "", pg.Name) + + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + }) + t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { + pc.Status = tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + } + if err := fc.Status().Update(context.Background(), pc); err != nil { + t.Fatal(err) + } + expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) @@ -161,6 +198,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) statefulSet := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "") + statefulSet.Annotations = defaultProxyClassAnnotations if shouldExist { expectEqual(t, fc, role, nil) diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 22487ee26..f45f92246 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -64,7 +64,7 @@ type ServiceReconciler struct { clock tstime.Clock - proxyDefaultClass string + defaultProxyClass string } var ( @@ -215,7 +215,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - proxyClass := proxyClassForObject(svc, a.proxyDefaultClass) + proxyClass := proxyClassForObject(svc, a.defaultProxyClass) if proxyClass != "" { if ready, err := proxyClassIsReady(ctx, proxyClass, a.Client); err != nil { errMsg := fmt.Errorf("error verifying ProxyClass for Service: %w", err) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index fd0a4e6ce..e8a6e248a 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -526,7 +526,7 @@ _Appears in:_ | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | | | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| -| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, the operator will create resources with
the default configuration. | | | +| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, and there is no default ProxyClass
configured, the operator will create resources with the default
configuration. | | | #### ProxyGroupStatus diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 175d62eea..07d05e1a5 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -171,7 +171,7 @@ type ConditionType string const ( ConnectorReady ConditionType = `ConnectorReady` - ProxyClassready ConditionType = `ProxyClassReady` + ProxyClassReady ConditionType = `ProxyClassReady` ProxyGroupReady ConditionType = `ProxyGroupReady` ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service RecorderReady ConditionType = `RecorderReady` diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index ef1e8c8c1..7e5515ba9 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -64,8 +64,9 @@ type ProxyGroupSpec struct { // ProxyClass is the name of the ProxyClass custom resource that contains // configuration options that should be applied to the resources created - // for this ProxyGroup. If unset, the operator will create resources with - // the default configuration. + // for this ProxyGroup. If unset, and there is no default ProxyClass + // configured, the operator will create resources with the default + // configuration. // +optional ProxyClass string `json:"proxyClass,omitempty"` } diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 702ed2bd3..ace0fb7e3 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -137,7 +137,7 @@ func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { idx := xslices.IndexFunc(pc.Status.Conditions, func(cond metav1.Condition) bool { - return cond.Type == string(tsapi.ProxyClassready) + return cond.Type == string(tsapi.ProxyClassReady) }) if idx == -1 { return false From 8ee7f82bf414264ca75b7d6f6a58597d7aef0091 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Tue, 8 Oct 2024 12:27:00 -0400 Subject: [PATCH 0016/1708] net/netcheck: don't panic if a region has no Nodes Updates #13728 Signed-off-by: Andrew Dunham Change-Id: I1e8319d6b2da013ae48f15113b30c9333e69cc0b --- net/netcheck/netcheck.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 003b5fbf8..dbb85cf9c 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -503,6 +503,10 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob plan = make(probePlan) for _, reg := range dm.Regions { + if len(reg.Nodes) == 0 { + continue + } + var p4 []probe var p6 []probe for try := 0; try < 3; try++ { From 861dc3631c4337c13d19308d9e0958d030bfcbf3 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 8 Oct 2024 18:35:23 +0100 Subject: [PATCH 0017/1708] cmd/{k8s-operator,containerboot},kube/egressservices: fix Pod IP check for dual stack clusters (#13721) Currently egress Services for ProxyGroup only work for Pods and Services with IPv4 addresses. Ensure that it works on dual stack clusters by reading proxy Pod's IP from the .status.podIPs list that always contains both IPv4 and IPv6 address (if the Pod has them) rather than .status.podIP that could contain IPv6 only for a dual stack cluster. Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- cmd/containerboot/main.go | 33 ++-------------- cmd/containerboot/services.go | 10 ++--- cmd/containerboot/settings.go | 57 +++++++++++++++++++++++++++ cmd/k8s-operator/egress-eps.go | 29 ++++++++++++-- cmd/k8s-operator/egress-eps_test.go | 18 +++++++-- cmd/k8s-operator/proxygroup_specs.go | 5 ++- kube/egressservices/egressservices.go | 2 +- 7 files changed, 109 insertions(+), 45 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 5ebe22e5f..4c8ba5807 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -132,36 +132,9 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) { func main() { log.SetPrefix("boot: ") tailscale.I_Acknowledge_This_API_Is_Unstable = true - cfg := &settings{ - AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), - Hostname: defaultEnv("TS_HOSTNAME", ""), - Routes: defaultEnvStringPointer("TS_ROUTES"), - ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), - ProxyTargetIP: defaultEnv("TS_DEST_IP", ""), - ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""), - TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""), - TailnetTargetFQDN: defaultEnv("TS_TAILNET_TARGET_FQDN", ""), - DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""), - ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""), - InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "", - UserspaceMode: defaultBool("TS_USERSPACE", true), - StateDir: defaultEnv("TS_STATE_DIR", ""), - AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"), - KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"), - SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""), - HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), - Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), - AuthOnce: defaultBool("TS_AUTH_ONCE", false), - Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"), - TailscaledConfigFilePath: tailscaledConfigFilePath(), - AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false), - PodIP: defaultEnv("POD_IP", ""), - EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), - HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), - EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), - } - if err := cfg.validate(); err != nil { + cfg, err := configFromEnv() + if err != nil { log.Fatalf("invalid configuration: %v", err) } @@ -612,7 +585,7 @@ runLoop: kc: kc, stateSecret: cfg.KubeSecret, netmapChan: egressSvcsNotify, - podIP: cfg.PodIP, + podIPv4: cfg.PodIPv4, tailnetAddrs: addrs, } go func() { diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go index e46c7c015..4da7286b7 100644 --- a/cmd/containerboot/services.go +++ b/cmd/containerboot/services.go @@ -46,7 +46,7 @@ type egressProxy struct { netmapChan chan ipn.Notify // chan to receive netmap updates on - podIP string // never empty string + podIPv4 string // never empty string, currently only IPv4 is supported // tailnetFQDNs is the egress service FQDN to tailnet IP mappings that // were last used to configure firewall rules for this proxy. @@ -361,7 +361,7 @@ func (ep *egressProxy) getStatus(ctx context.Context) (*egressservices.Status, e if err := json.Unmarshal([]byte(raw), status); err != nil { return nil, fmt.Errorf("error unmarshalling previous config: %w", err) } - if reflect.DeepEqual(status.PodIP, ep.podIP) { + if reflect.DeepEqual(status.PodIPv4, ep.podIPv4) { return status, nil } return nil, nil @@ -374,7 +374,7 @@ func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Sta if status == nil { status = &egressservices.Status{} } - status.PodIP = ep.podIP + status.PodIPv4 = ep.podIPv4 secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) if err != nil { return fmt.Errorf("error retrieving state Secret: %w", err) @@ -565,7 +565,7 @@ func servicesStatusIsEqual(st, st1 *egressservices.Status) bool { if st == nil || st1 == nil { return false } - st.PodIP = "" - st1.PodIP = "" + st.PodIPv4 = "" + st1.PodIPv4 = "" return reflect.DeepEqual(*st, *st1) } diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index fab4bd2fd..742713e77 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -14,6 +14,7 @@ import ( "os" "path" "strconv" + "strings" "tailscale.com/ipn/conffile" "tailscale.com/kube/kubeclient" @@ -62,11 +63,67 @@ type settings struct { // PodIP is the IP of the Pod if running in Kubernetes. This is used // when setting up rules to proxy cluster traffic to cluster ingress // target. + // Deprecated: use PodIPv4, PodIPv6 instead to support dual stack clusters PodIP string + PodIPv4 string + PodIPv6 string HealthCheckAddrPort string EgressSvcsCfgPath string } +func configFromEnv() (*settings, error) { + cfg := &settings{ + AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), + Hostname: defaultEnv("TS_HOSTNAME", ""), + Routes: defaultEnvStringPointer("TS_ROUTES"), + ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), + ProxyTargetIP: defaultEnv("TS_DEST_IP", ""), + ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""), + TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""), + TailnetTargetFQDN: defaultEnv("TS_TAILNET_TARGET_FQDN", ""), + DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""), + ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""), + InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "", + UserspaceMode: defaultBool("TS_USERSPACE", true), + StateDir: defaultEnv("TS_STATE_DIR", ""), + AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"), + KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"), + SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""), + HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), + Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), + AuthOnce: defaultBool("TS_AUTH_ONCE", false), + Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"), + TailscaledConfigFilePath: tailscaledConfigFilePath(), + AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false), + PodIP: defaultEnv("POD_IP", ""), + EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), + HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), + EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), + } + podIPs, ok := os.LookupEnv("POD_IPS") + if ok { + ips := strings.Split(podIPs, ",") + if len(ips) > 2 { + return nil, fmt.Errorf("POD_IPs can contain at most 2 IPs, got %d (%v)", len(ips), ips) + } + for _, ip := range ips { + parsed, err := netip.ParseAddr(ip) + if err != nil { + return nil, fmt.Errorf("error parsing IP address %s: %w", ip, err) + } + if parsed.Is4() { + cfg.PodIPv4 = parsed.String() + continue + } + cfg.PodIPv6 = parsed.String() + } + } + if err := cfg.validate(); err != nil { + return nil, fmt.Errorf("invalid configuration: %v", err) + } + return cfg, nil +} + func (s *settings) validate() error { if s.TailscaledConfigFilePath != "" { dir, file := path.Split(s.TailscaledConfigFilePath) diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index fa13c525f..e8b327263 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "net/netip" "reflect" "strings" @@ -132,6 +133,19 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ return res, nil } +func podIPv4(pod *corev1.Pod) (string, error) { + for _, ip := range pod.Status.PodIPs { + parsed, err := netip.ParseAddr(ip.IP) + if err != nil { + return "", fmt.Errorf("error parsing IP address %s: %w", ip, err) + } + if parsed.Is4() { + return parsed.String(), nil + } + } + return "", nil +} + // podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to // route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service // status written there to the desired service configuration. @@ -142,14 +156,21 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod l.Debugf("proxy Pod is being deleted, ignore") return false, nil } - podIP := pod.Status.PodIP + podIP, err := podIPv4(&pod) + if err != nil { + return false, fmt.Errorf("error determining Pod IP address: %v", err) + } + if podIP == "" { + l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") + return false, nil + } stateS := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: pod.Name, Namespace: pod.Namespace, }, } - err := er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) + err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) if apierrors.IsNotFound(err) { l.Debugf("proxy does not have a state Secret, waiting...") return false, nil @@ -166,8 +187,8 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod if err := json.Unmarshal(svcStatusBS, svcStatus); err != nil { return false, fmt.Errorf("error unmarshalling egress service status: %w", err) } - if !strings.EqualFold(podIP, svcStatus.PodIP) { - l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIP, podIP) + if !strings.EqualFold(podIP, svcStatus.PodIPv4) { + l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) return false, nil } st, ok := (*svcStatus).Services[tailnetSvcName] diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index 00d13b2a7..806f739fd 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -98,7 +98,7 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { t.Run("pods_are_ready_to_route_traffic", func(t *testing.T) { pod, stateS := podAndSecretForProxyGroup("foo") - stBs := serviceStatusForPodIP(t, svc, pod.Status.PodIP, port) + stBs := serviceStatusForPodIP(t, svc, pod.Status.PodIPs[0].IP, port) mustUpdate(t, fc, "operator-ns", stateS.Name, func(s *corev1.Secret) { mak.Set(&s.Data, egressservices.KeyEgressServices, stBs) }) @@ -114,6 +114,16 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { }) expectEqual(t, fc, eps, nil) }) + t.Run("status_does_not_match_pod_ip", func(t *testing.T) { + _, stateS := podAndSecretForProxyGroup("foo") // replica Pod has IP 10.0.0.1 + stBs := serviceStatusForPodIP(t, svc, "10.0.0.2", port) // status is for a Pod with IP 10.0.0.2 + mustUpdate(t, fc, "operator-ns", stateS.Name, func(s *corev1.Secret) { + mak.Set(&s.Data, egressservices.KeyEgressServices, stBs) + }) + expectReconciled(t, er, "operator-ns", "foo") + eps.Endpoints = []discoveryv1.Endpoint{} + expectEqual(t, fc, eps, nil) + }) } func configMapForSvc(t *testing.T, svc *corev1.Service, p uint16) *corev1.ConfigMap { @@ -162,7 +172,7 @@ func serviceStatusForPodIP(t *testing.T, svc *corev1.Service, ip string, p uint1 } svcName := tailnetSvcName(svc) st := egressservices.Status{ - PodIP: ip, + PodIPv4: ip, Services: map[string]*egressservices.ServiceStatus{svcName: &svcSt}, } bs, err := json.Marshal(st) @@ -181,7 +191,9 @@ func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { UID: "foo", }, Status: corev1.PodStatus{ - PodIP: "10.0.0.1", + PodIPs: []corev1.PodIP{ + {IP: "10.0.0.1"}, + }, }, } s := &corev1.Secret{ diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index a1ec9ccde..100c0707d 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -93,10 +93,11 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa Env: func() []corev1.EnvVar { envs := []corev1.EnvVar{ { - Name: "POD_IP", + // TODO(irbekrm): verify that .status.podIPs are always set, else read in .status.podIP as well. + Name: "POD_IPS", // this will be a comma separate list i.e 10.136.0.6,2600:1900:4011:161:0:e:0:6 ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIP", + FieldPath: "status.podIPs", }, }, }, diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index f634458d9..428b476b9 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -86,7 +86,7 @@ func (pm PortMap) MarshalText() ([]byte, error) { // Status represents the currently configured firewall rules for all egress // services for a proxy identified by the PodIP. type Status struct { - PodIP string `json:"podIP"` + PodIPv4 string `json:"podIPv4"` // All egress service status keyed by service name. Services map[string]*ServiceStatus `json:"services"` } From 841eaacb07b797fbca93d63b4cbe42132e1e1cff Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Oct 2024 10:34:08 -0700 Subject: [PATCH 0018/1708] net/sockstats: quiet some log spam in release builds Updates #13731 Change-Id: Ibee85426827ebb9e43a1c42a9c07c847daa50117 Signed-off-by: Brad Fitzpatrick --- net/sockstats/sockstats_tsgo.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index 2d1ccd5a3..af691302f 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -18,6 +18,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/version" ) const IsAvailable = true @@ -156,7 +157,11 @@ func withSockStats(ctx context.Context, label Label, logf logger.Logf) context.C } } willOverwrite := func(trace *net.SockTrace) { - logf("sockstats: trace %q was overwritten by another", label) + if version.IsUnstableBuild() { + // Only spam about this in dev builds. + // See https://github.com/tailscale/tailscale/issues/13731 for known problems. + logf("sockstats: trace %q was overwritten by another", label) + } } return net.WithSockTrace(ctx, &net.SockTrace{ From 83efadee9fc01ac8bcabd397a37c6c0eb3a1bea5 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 8 Oct 2024 19:48:18 +0100 Subject: [PATCH 0019/1708] kube/egressservices: improve egress ports config readability (#13722) Instead of converting our PortMap struct to a string during marshalling for use as a key, convert the whole collection of PortMaps to a list of PortMap objects, which improves the readability of the JSON config while still keeping the data structure we need in the code. Updates #13406 Signed-off-by: Tom Proctor --- kube/egressservices/egressservices.go | 60 +++++++++++----------- kube/egressservices/egressservices_test.go | 17 +++--- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index 428b476b9..04a1c362b 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -9,11 +9,8 @@ package egressservices import ( - "encoding" - "fmt" + "encoding/json" "net/netip" - "strconv" - "strings" ) // KeyEgressServices is name of the proxy state Secret field that contains the @@ -31,7 +28,7 @@ type Config struct { // should be proxied. TailnetTarget TailnetTarget `json:"tailnetTarget"` // Ports contains mappings for ports that can be accessed on the tailnet target. - Ports map[PortMap]struct{} `json:"ports"` + Ports PortMaps `json:"ports"` } // TailnetTarget is the tailnet target to which traffic for the egress service @@ -52,35 +49,38 @@ type PortMap struct { TargetPort uint16 `json:"targetPort"` } -// PortMap is used as a Config.Ports map key. Config needs to be serialized/deserialized to/from JSON. JSON only -// supports string map keys, so we need to implement TextMarshaler/TextUnmarshaler to convert PortMap to string and -// back. -var _ encoding.TextMarshaler = PortMap{} -var _ encoding.TextUnmarshaler = &PortMap{} - -func (pm *PortMap) UnmarshalText(t []byte) error { - tt := string(t) - ss := strings.Split(tt, ":") - if len(ss) != 3 { - return fmt.Errorf("error unmarshalling portmap from JSON, wants a portmap in form ::, got %q", tt) - } - pm.Protocol = ss[0] - matchPort, err := strconv.ParseUint(ss[1], 10, 16) - if err != nil { - return fmt.Errorf("error converting match port %q to uint16: %w", ss[1], err) +type PortMaps map[PortMap]struct{} + +// PortMaps is a list of PortMap structs, however, we want to use it as a set +// with efficient lookups in code. It implements custom JSON marshalling +// methods to convert between being a list in JSON and a set (map with empty +// values) in code. +var _ json.Marshaler = &PortMaps{} +var _ json.Marshaler = PortMaps{} +var _ json.Unmarshaler = &PortMaps{} + +func (p *PortMaps) UnmarshalJSON(data []byte) error { + *p = make(map[PortMap]struct{}) + + var l []PortMap + if err := json.Unmarshal(data, &l); err != nil { + return err } - pm.MatchPort = uint16(matchPort) - targetPort, err := strconv.ParseUint(ss[2], 10, 16) - if err != nil { - return fmt.Errorf("error converting target port %q to uint16: %w", ss[2], err) + + for _, pm := range l { + (*p)[pm] = struct{}{} } - pm.TargetPort = uint16(targetPort) + return nil } -func (pm PortMap) MarshalText() ([]byte, error) { - s := fmt.Sprintf("%s:%d:%d", pm.Protocol, pm.MatchPort, pm.TargetPort) - return []byte(s), nil +func (p PortMaps) MarshalJSON() ([]byte, error) { + l := make([]PortMap, 0, len(p)) + for pm := range p { + l = append(l, pm) + } + + return json.Marshal(l) } // Status represents the currently configured firewall rules for all egress @@ -94,7 +94,7 @@ type Status struct { // ServiceStatus is the currently configured firewall rules for an egress // service. type ServiceStatus struct { - Ports map[PortMap]struct{} `json:"ports"` + Ports PortMaps `json:"ports"` // TailnetTargetIPs are the tailnet target IPs that were used to // configure these firewall rules. For a TailnetTarget with IP set, this // is the same as IP. diff --git a/kube/egressservices/egressservices_test.go b/kube/egressservices/egressservices_test.go index 5e5651e77..d6f952ea0 100644 --- a/kube/egressservices/egressservices_test.go +++ b/kube/egressservices/egressservices_test.go @@ -5,8 +5,9 @@ package egressservices import ( "encoding/json" - "reflect" "testing" + + "github.com/google/go-cmp/cmp" ) func Test_jsonUnmarshalConfig(t *testing.T) { @@ -18,7 +19,7 @@ func Test_jsonUnmarshalConfig(t *testing.T) { }{ { name: "success", - bs: []byte(`{"ports":{"tcp:4003:80":{}}}`), + bs: []byte(`{"ports":[{"protocol":"tcp","matchPort":4003,"targetPort":80}]}`), wantsCfg: Config{Ports: map[PortMap]struct{}{{Protocol: "tcp", MatchPort: 4003, TargetPort: 80}: {}}}, }, { @@ -34,8 +35,8 @@ func Test_jsonUnmarshalConfig(t *testing.T) { if gotErr := json.Unmarshal(tt.bs, &cfg); (gotErr != nil) != tt.wantsErr { t.Errorf("json.Unmarshal returned error %v, wants error %v", gotErr, tt.wantsErr) } - if !reflect.DeepEqual(cfg, tt.wantsCfg) { - t.Errorf("json.Unmarshal produced Config %v, wants Config %v", cfg, tt.wantsCfg) + if diff := cmp.Diff(cfg, tt.wantsCfg); diff != "" { + t.Errorf("unexpected secrets (-got +want):\n%s", diff) } }) } @@ -54,12 +55,12 @@ func Test_jsonMarshalConfig(t *testing.T) { protocol: "tcp", matchPort: 4003, targetPort: 80, - wantsBs: []byte(`{"tailnetTarget":{"ip":"","fqdn":""},"ports":{"tcp:4003:80":{}}}`), + wantsBs: []byte(`{"tailnetTarget":{"ip":"","fqdn":""},"ports":[{"protocol":"tcp","matchPort":4003,"targetPort":80}]}`), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - cfg := Config{Ports: map[PortMap]struct{}{{ + cfg := Config{Ports: PortMaps{{ Protocol: tt.protocol, MatchPort: tt.matchPort, TargetPort: tt.targetPort}: {}}} @@ -68,8 +69,8 @@ func Test_jsonMarshalConfig(t *testing.T) { if gotErr != nil { t.Errorf("json.Marshal(%+#v) returned unexpected error %v", cfg, gotErr) } - if !reflect.DeepEqual(gotBs, tt.wantsBs) { - t.Errorf("json.Marshal(%+#v) returned '%v', wants '%v'", cfg, string(gotBs), string(tt.wantsBs)) + if diff := cmp.Diff(gotBs, tt.wantsBs); diff != "" { + t.Errorf("unexpected secrets (-got +want):\n%s", diff) } }) } From 07c157ee9f1b427437e0deab4f8344f249597991 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 8 Oct 2024 20:05:08 +0100 Subject: [PATCH 0020/1708] cmd/k8s-operator: base ProxyGroup StatefulSet on common proxy.yaml definition (#13714) As discussed in #13684, base the ProxyGroup's proxy definitions on the same scaffolding as the existing proxies, as defined in proxy.yaml Updates #13406 Signed-off-by: Tom Proctor --- cmd/k8s-operator/proxygroup.go | 5 +- cmd/k8s-operator/proxygroup_specs.go | 282 +++++++++++++-------------- cmd/k8s-operator/proxygroup_test.go | 5 +- 3 files changed, 144 insertions(+), 148 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 7ba59586b..b96641d68 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -239,7 +239,10 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return fmt.Errorf("error provisioning ConfigMap: %w", err) } } - ss := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, cfgHash) + ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, cfgHash) + if err != nil { + return fmt.Errorf("error generating StatefulSet spec: %w", err) + } ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 100c0707d..9aa7ac3b0 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -12,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" "tailscale.com/types/ptr" @@ -19,163 +20,152 @@ import ( // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. -func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHash string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: pg.Name, - Namespace: namespace, - Labels: pgLabels(pg.Name, nil), - OwnerReferences: pgOwnerReference(pg), +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHash string) (*appsv1.StatefulSet, error) { + ss := new(appsv1.StatefulSet) + if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { + return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) + } + // Validate some base assumptions. + if len(ss.Spec.Template.Spec.InitContainers) != 1 { + return nil, fmt.Errorf("[unexpected] base proxy config had %d init containers instead of 1", len(ss.Spec.Template.Spec.InitContainers)) + } + if len(ss.Spec.Template.Spec.Containers) != 1 { + return nil, fmt.Errorf("[unexpected] base proxy config had %d containers instead of 1", len(ss.Spec.Template.Spec.Containers)) + } + + // StatefulSet config. + ss.ObjectMeta = metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + } + ss.Spec.Replicas = ptr.To(pgReplicas(pg)) + ss.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: pgLabels(pg.Name, nil), + } + + // Template config. + tmpl := &ss.Spec.Template + tmpl.ObjectMeta = metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + DeletionGracePeriodSeconds: ptr.To[int64](10), + Annotations: map[string]string{ + podAnnotationLastSetConfigFileHash: cfgHash, }, - Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To(pgReplicas(pg)), - Selector: &metav1.LabelSelector{ - MatchLabels: pgLabels(pg.Name, nil), - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: pg.Name, - Namespace: namespace, - Labels: pgLabels(pg.Name, nil), - DeletionGracePeriodSeconds: ptr.To[int64](10), - Annotations: map[string]string{ - podAnnotationLastSetConfigFileHash: cfgHash, + } + tmpl.Spec.ServiceAccountName = pg.Name + tmpl.Spec.InitContainers[0].Image = image + tmpl.Spec.Volumes = func() []corev1.Volume { + var volumes []corev1.Volume + for i := range pgReplicas(pg) { + volumes = append(volumes, corev1.Volume{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%d-config", pg.Name, i), }, }, - Spec: corev1.PodSpec{ - ServiceAccountName: pg.Name, - InitContainers: []corev1.Container{ - { - Name: "sysctler", - Image: image, - SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), - }, - Command: []string{ - "/bin/sh", - "-c", - }, - Args: []string{ - "sysctl -w net.ipv4.ip_forward=1 && if sysctl net.ipv6.conf.all.forwarding; then sysctl -w net.ipv6.conf.all.forwarding=1; fi", - }, + }) + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + volumes = append(volumes, corev1.Volume{ + Name: pgEgressCMName(pg.Name), + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: pgEgressCMName(pg.Name), }, }, - Containers: []corev1.Container{ - { - Name: "tailscale", - Image: image, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{ - "NET_ADMIN", - }, - }, - }, - VolumeMounts: func() []corev1.VolumeMount { - var mounts []corev1.VolumeMount - for i := range pgReplicas(pg) { - mounts = append(mounts, corev1.VolumeMount{ - Name: fmt.Sprintf("tailscaledconfig-%d", i), - ReadOnly: true, - MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), - }) - } + }, + }) + } - if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - mounts = append(mounts, corev1.VolumeMount{ - Name: pgEgressCMName(pg.Name), - MountPath: "/etc/proxies", - ReadOnly: true, - }) - } - return mounts - }(), - Env: func() []corev1.EnvVar { - envs := []corev1.EnvVar{ - { - // TODO(irbekrm): verify that .status.podIPs are always set, else read in .status.podIP as well. - Name: "POD_IPS", // this will be a comma separate list i.e 10.136.0.6,2600:1900:4011:161:0:e:0:6 - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIPs", - }, - }, - }, - { - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - // Secret is named after the pod. - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "TS_KUBE_SECRET", - Value: "$(POD_NAME)", - }, - { - Name: "TS_STATE", - Value: "kube:$(POD_NAME)", - }, - { - Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", - Value: "/etc/tsconfig/$(POD_NAME)", - }, - { - Name: "TS_USERSPACE", - Value: "false", - }, - } - if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - envs = append(envs, corev1.EnvVar{ - Name: "TS_EGRESS_SERVICES_CONFIG_PATH", - Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), - }) - } + return volumes + }() - if tsFirewallMode != "" { - envs = append(envs, corev1.EnvVar{ - Name: "TS_DEBUG_FIREWALL_MODE", - Value: tsFirewallMode, - }) - } + // Main container config. + c := &ss.Spec.Template.Spec.Containers[0] + c.Image = image + c.VolumeMounts = func() []corev1.VolumeMount { + var mounts []corev1.VolumeMount + for i := range pgReplicas(pg) { + mounts = append(mounts, corev1.VolumeMount{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), + }) + } - return envs - }(), - }, - }, - Volumes: func() []corev1.Volume { - var volumes []corev1.Volume - for i := range pgReplicas(pg) { - volumes = append(volumes, corev1.Volume{ - Name: fmt.Sprintf("tailscaledconfig-%d", i), - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-%d-config", pg.Name, i), - }, - }, - }) - } - if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - volumes = append(volumes, corev1.Volume{ - Name: pgEgressCMName(pg.Name), - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: pgEgressCMName(pg.Name), - }, - }, - }, - }) - } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + mounts = append(mounts, corev1.VolumeMount{ + Name: pgEgressCMName(pg.Name), + MountPath: "/etc/proxies", + ReadOnly: true, + }) + } - return volumes - }(), + return mounts + }() + c.Env = func() []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + // TODO(irbekrm): verify that .status.podIPs are always set, else read in .status.podIP as well. + Name: "POD_IPS", // this will be a comma separate list i.e 10.136.0.6,2600:1900:4011:161:0:e:0:6 + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, }, }, - }, - } + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + // Secret is named after the pod. + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "TS_KUBE_SECRET", + Value: "$(POD_NAME)", + }, + { + Name: "TS_STATE", + Value: "kube:$(POD_NAME)", + }, + { + Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", + Value: "/etc/tsconfig/$(POD_NAME)", + }, + { + Name: "TS_USERSPACE", + Value: "false", + }, + } + + if tsFirewallMode != "" { + envs = append(envs, corev1.EnvVar{ + Name: "TS_DEBUG_FIREWALL_MODE", + Value: tsFirewallMode, + }) + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + envs = append(envs, corev1.EnvVar{ + Name: "TS_EGRESS_SERVICES_CONFIG_PATH", + Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), + }) + } + + return envs + }() + + return ss, nil } func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index b5a6a4d8d..445db7537 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -197,7 +197,10 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "") + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "") + if err != nil { + t.Fatal(err) + } statefulSet.Annotations = defaultProxyClassAnnotations if shouldExist { From 29cf59a9b442ecd6797f71d0ced5ca21e0f2dc11 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Oct 2024 12:32:28 -0500 Subject: [PATCH 0021/1708] util/syspolicy/setting: update Snapshot to use Go 1.23 iterators Updates #12912 Updates #12687 Signed-off-by: Nick Khyl --- util/syspolicy/setting/snapshot.go | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 306bf759e..512bc487c 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -4,6 +4,8 @@ package setting import ( + "iter" + "maps" "slices" "strings" @@ -25,15 +27,13 @@ func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot { return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)} } -// All returns a map of all policy settings in s. -// The returned map must not be modified. -func (s *Snapshot) All() map[Key]RawItem { +// All returns an iterator over policy settings in s. The iteration order is not +// specified and is not guaranteed to be the same from one call to the next. +func (s *Snapshot) All() iter.Seq2[Key, RawItem] { if s == nil { - return nil + return func(yield func(Key, RawItem) bool) {} } - // TODO(nickkhyl): return iter.Seq2[[Key], [RawItem]] in Go 1.23, - // and remove [keyItemPair]. - return s.m + return maps.All(s.m) } // Get returns the value of the policy setting with the specified key @@ -87,12 +87,11 @@ func (s *Snapshot) EqualItems(s2 *Snapshot) bool { // Keys return an iterator over keys in s. The iteration order is not specified // and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) Keys() []Key { +func (s *Snapshot) Keys() iter.Seq[Key] { if s.m == nil { - return nil + return func(yield func(Key) bool) {} } - // TODO(nickkhyl): return iter.Seq[Key] in Go 1.23. - return xmaps.Keys(s.m) + return maps.Keys(s.m) } // Len reports the number of [RawItem]s in s. @@ -116,8 +115,6 @@ func (s *Snapshot) String() string { if s.Len() == 0 && s.Summary().IsEmpty() { return "{Empty}" } - keys := s.Keys() - slices.Sort(keys) var sb strings.Builder if !s.summary.IsEmpty() { sb.WriteRune('{') @@ -127,7 +124,7 @@ func (s *Snapshot) String() string { sb.WriteString(s.summary.String()) sb.WriteRune('}') } - for _, k := range keys { + for _, k := range slices.Sorted(s.Keys()) { if sb.Len() != 0 { sb.WriteRune('\n') } From da40609abd128b522684adcf06923d2faaad6fac Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Oct 2024 13:59:50 -0500 Subject: [PATCH 0022/1708] util/syspolicy, ipn: add "tailscale debug component-logs" support Fixes #13313 Fixes #12687 Signed-off-by: Nick Khyl --- cmd/derper/depaware.txt | 3 +- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscale/depaware.txt | 3 +- cmd/tailscaled/depaware.txt | 3 +- ipn/backend.go | 1 + ipn/ipnlocal/local.go | 2 + util/syspolicy/internal/loggerx/logger.go | 42 ++++++++++----- .../syspolicy/internal/loggerx/logger_test.go | 53 +++++++++++++++++++ util/syspolicy/syspolicy.go | 6 +++ 9 files changed, 100 insertions(+), 16 deletions(-) create mode 100644 util/syspolicy/internal/loggerx/logger_test.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index eb9ba1619..417dbcfb0 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -162,7 +162,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 649296b59..c9f035372 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -809,7 +809,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index be6f42946..7b9d80af8 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -171,7 +171,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 018e74fac..0d8e51eda 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -398,7 +398,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/ipn/backend.go b/ipn/backend.go index d6ba95408..76ad1910b 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -238,6 +238,7 @@ type StateKey string var DebuggableComponents = []string{ "magicsock", "sockstats", + "syspolicy", } type Options struct { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8fc78a36b..06dd84831 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -583,6 +583,8 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim } } } + case "syspolicy": + setEnabled = syspolicy.SetDebugLoggingEnabled } if setEnabled == nil || !slices.Contains(ipn.DebuggableComponents, component) { return fmt.Errorf("unknown component %q", component) diff --git a/util/syspolicy/internal/loggerx/logger.go b/util/syspolicy/internal/loggerx/logger.go index b28610826..c29a5f084 100644 --- a/util/syspolicy/internal/loggerx/logger.go +++ b/util/syspolicy/internal/loggerx/logger.go @@ -6,6 +6,7 @@ package loggerx import ( "log" + "sync/atomic" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -13,34 +14,51 @@ import ( ) const ( - errorPrefix = "syspolicy: " + normalPrefix = "syspolicy: " verbosePrefix = "syspolicy: [v2] " ) var ( - lazyErrorf lazy.SyncValue[logger.Logf] + debugLogging atomic.Bool // whether debugging logging is enabled + + lazyPrintf lazy.SyncValue[logger.Logf] lazyVerbosef lazy.SyncValue[logger.Logf] ) +// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. +func SetDebugLoggingEnabled(v bool) { + debugLogging.Store(v) +} + // Errorf formats and writes an error message to the log. func Errorf(format string, args ...any) { - errorf := lazyErrorf.Get(func() logger.Logf { - return logger.WithPrefix(log.Printf, errorPrefix) - }) - errorf(format, args...) + printf(format, args...) } // Verbosef formats and writes an optional, verbose message to the log. func Verbosef(format string, args ...any) { - verbosef := lazyVerbosef.Get(func() logger.Logf { + if debugLogging.Load() { + printf(format, args...) + } else { + verbosef(format, args...) + } +} + +func printf(format string, args ...any) { + lazyPrintf.Get(func() logger.Logf { + return logger.WithPrefix(log.Printf, normalPrefix) + })(format, args...) +} + +func verbosef(format string, args ...any) { + lazyVerbosef.Get(func() logger.Logf { return logger.WithPrefix(log.Printf, verbosePrefix) - }) - verbosef(format, args...) + })(format, args...) } -// SetForTest sets the specified errorf and verbosef functions for the duration +// SetForTest sets the specified printf and verbosef functions for the duration // of tb and its subtests. -func SetForTest(tb internal.TB, errorf, verbosef logger.Logf) { - lazyErrorf.SetForTest(tb, errorf, nil) +func SetForTest(tb internal.TB, printf, verbosef logger.Logf) { + lazyPrintf.SetForTest(tb, printf, nil) lazyVerbosef.SetForTest(tb, verbosef, nil) } diff --git a/util/syspolicy/internal/loggerx/logger_test.go b/util/syspolicy/internal/loggerx/logger_test.go new file mode 100644 index 000000000..9735b5d30 --- /dev/null +++ b/util/syspolicy/internal/loggerx/logger_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package loggerx + +import ( + "fmt" + "io" + "strings" + "testing" + + "tailscale.com/types/logger" +) + +func TestDebugLogging(t *testing.T) { + var normal, verbose strings.Builder + SetForTest(t, logfTo(&normal), logfTo(&verbose)) + + checkOutput := func(wantNormal, wantVerbose string) { + t.Helper() + if gotNormal := normal.String(); gotNormal != wantNormal { + t.Errorf("Unexpected normal output: got %q; want %q", gotNormal, wantNormal) + } + if gotVerbose := verbose.String(); gotVerbose != wantVerbose { + t.Errorf("Unexpected verbose output: got %q; want %q", gotVerbose, wantVerbose) + } + normal.Reset() + verbose.Reset() + } + + Errorf("This is an error message: %v", 42) + checkOutput("This is an error message: 42", "") + Verbosef("This is a verbose message: %v", 17) + checkOutput("", "This is a verbose message: 17") + + SetDebugLoggingEnabled(true) + Errorf("This is an error message: %v", 42) + checkOutput("This is an error message: 42", "") + Verbosef("This is a verbose message: %v", 17) + checkOutput("This is a verbose message: 17", "") + + SetDebugLoggingEnabled(false) + Errorf("This is an error message: %v", 42) + checkOutput("This is an error message: 42", "") + Verbosef("This is a verbose message: %v", 17) + checkOutput("", "This is a verbose message: 17") +} + +func logfTo(w io.Writer) logger.Logf { + return func(format string, args ...any) { + fmt.Fprintf(w, format, args...) + } +} diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index ccfd83347..abe42ed90 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -8,6 +8,7 @@ import ( "errors" "time" + "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" ) @@ -135,3 +136,8 @@ func SelectControlURL(reg, disk string) string { } return def } + +// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. +func SetDebugLoggingEnabled(v bool) { + loggerx.SetDebugLoggingEnabled(v) +} From 60011e73b88fc2829d540ea6c314a67750528a3b Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 9 Oct 2024 13:22:50 +0100 Subject: [PATCH 0023/1708] cmd/k8s-operator: fix Pod IP selection (#13743) Ensure that .status.podIPs is used to select Pod's IP in all reconcilers. Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/egress-eps.go | 6 +++++- cmd/k8s-operator/egress-eps_test.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index e8b327263..85992abed 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -111,9 +111,13 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ if !ready { continue // maybe next time } + podIP, err := podIPv4(&pod) // we currently only support IPv4 + if err != nil { + return res, fmt.Errorf("error determining IPv4 address for Pod: %w", err) + } newEndpoints = append(newEndpoints, discoveryv1.Endpoint{ Hostname: (*string)(&pod.UID), - Addresses: []string{pod.Status.PodIP}, + Addresses: []string{podIP}, Conditions: discoveryv1.EndpointConditions{ Ready: ptr.To(true), Serving: ptr.To(true), diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index 806f739fd..a64f3e4e1 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -104,7 +104,7 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { }) expectReconciled(t, er, "operator-ns", "foo") eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ - Addresses: []string{pod.Status.PodIP}, + Addresses: []string{"10.0.0.1"}, Hostname: pointer.To("foo"), Conditions: discoveryv1.EndpointConditions{ Serving: pointer.ToBool(true), From f6d4d03355ebc5d0fb2269fc2330d36053fbd7fd Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 9 Oct 2024 13:23:00 +0100 Subject: [PATCH 0024/1708] cmd/k8s-operator: don't error out if ProxyClass for ProxyGroup not found. (#13736) We don't need to error out and continuously reconcile if ProxyClass has not (yet) been created, once it gets created the ProxyGroup reconciler will get triggered. Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/proxygroup.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index b96641d68..1f9983aa9 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -146,7 +146,14 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ var proxyClass *tsapi.ProxyClass if proxyClassName != "" { proxyClass = new(tsapi.ProxyClass) - if err = r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass); err != nil { + err := r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass) + if apierrors.IsNotFound(err) { + err = nil + message := fmt.Sprintf("the ProxyGroup's ProxyClass %s does not (yet) exist", proxyClassName) + logger.Info(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + if err != nil { err = fmt.Errorf("error getting ProxyGroup's ProxyClass %s: %s", proxyClassName, err) r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) From 94c79659fac7985e1ae6fce87cb2d708c9d64d1f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 9 Oct 2024 08:02:45 -0700 Subject: [PATCH 0025/1708] types/views: add iterators to the three Map view types Their callers using Range are all kinda clunky feeling. Iterators should make them more readable. Updates #12912 Change-Id: I93461eba8e735276fda4a8558a4ae4bfd6c04922 Signed-off-by: Brad Fitzpatrick --- types/views/views.go | 34 +++++++++++++++++++++++++++++++ types/views/views_test.go | 43 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/types/views/views.go b/types/views/views.go index 5fe88fa6c..19aa69d4a 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -440,6 +440,17 @@ func (m MapSlice[K, V]) AsMap() map[K][]V { return out } +// All returns an iterator iterating over the keys and values of m. +func (m MapSlice[K, V]) All() iter.Seq2[K, Slice[V]] { + return func(yield func(K, Slice[V]) bool) { + for k, v := range m.ж { + if !yield(k, SliceOf(v)) { + return + } + } + } +} + // Map provides a read-only view of a map. It is the caller's responsibility to // make sure V is immutable. type Map[K comparable, V any] struct { @@ -526,6 +537,18 @@ func (m Map[K, V]) Range(f MapRangeFn[K, V]) { } } +// All returns an iterator iterating over the keys +// and values of m. +func (m Map[K, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for k, v := range m.ж { + if !yield(k, v) { + return + } + } + } +} + // MapFnOf returns a MapFn for m. func MapFnOf[K comparable, T any, V any](m map[K]T, f func(T) V) MapFn[K, T, V] { return MapFn[K, T, V]{ @@ -587,6 +610,17 @@ func (m MapFn[K, T, V]) Range(f MapRangeFn[K, V]) { } } +// All returns an iterator iterating over the keys and value views of m. +func (m MapFn[K, T, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for k, v := range m.ж { + if !yield(k, m.wrapv(v)) { + return + } + } + } +} + // ContainsPointers reports whether T contains any pointers, // either explicitly or implicitly. // It has special handling for some types that contain pointers diff --git a/types/views/views_test.go b/types/views/views_test.go index ec7dcec4c..8a1ff3fdd 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -446,6 +446,7 @@ func (v testStructView) AsStruct() *testStruct { } return v.p.Clone() } +func (v testStructView) ValueForTest() string { return v.p.value } func TestSliceViewRange(t *testing.T) { vs := SliceOfViews([]*testStruct{{value: "foo"}, {value: "bar"}}) @@ -458,3 +459,45 @@ func TestSliceViewRange(t *testing.T) { t.Errorf("got %q; want %q", got, want) } } + +func TestMapIter(t *testing.T) { + m := MapOf(map[string]int{"foo": 1, "bar": 2}) + var got []string + for k, v := range m.All() { + got = append(got, fmt.Sprintf("%s-%d", k, v)) + } + slices.Sort(got) + want := []string{"bar-2", "foo-1"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestMapSliceIter(t *testing.T) { + m := MapSliceOf(map[string][]int{"foo": {3, 4}, "bar": {1, 2}}) + var got []string + for k, v := range m.All() { + got = append(got, fmt.Sprintf("%s-%d", k, v)) + } + slices.Sort(got) + want := []string{"bar-{[1 2]}", "foo-{[3 4]}"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestMapFnIter(t *testing.T) { + m := MapFnOf[string, *testStruct, testStructView](map[string]*testStruct{ + "foo": {value: "fooVal"}, + "bar": {value: "barVal"}, + }, func(p *testStruct) testStructView { return testStructView{p} }) + var got []string + for k, v := range m.All() { + got = append(got, fmt.Sprintf("%v-%v", k, v.ValueForTest())) + } + slices.Sort(got) + want := []string{"bar-barVal", "foo-fooVal"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} From 89ee6bbdaef4216fd1664f3c91bd5244e24bd252 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 9 Oct 2024 18:23:40 +0100 Subject: [PATCH 0026/1708] cmd/k8s-operator,k8s-operator/apis: set a readiness condition on egress Services for ProxyGroup (#13746) cmd/k8s-operator,k8s-operator/apis: set a readiness condition on egress Services Set a readiness condition on ExternalName Services that define a tailnet target to route cluster traffic to via a ProxyGroup's proxies. The condition is set to true if at least one proxy is currently set up to route. Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/egress-services-readiness.go | 179 ++++++++++++++++++ .../egress-services-readiness_test.go | 169 +++++++++++++++++ cmd/k8s-operator/egress-services.go | 18 +- cmd/k8s-operator/operator.go | 47 ++++- k8s-operator/apis/v1alpha1/types_connector.go | 21 +- 5 files changed, 420 insertions(+), 14 deletions(-) create mode 100644 cmd/k8s-operator/egress-services-readiness.go create mode 100644 cmd/k8s-operator/egress-services-readiness_test.go diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go new file mode 100644 index 000000000..f6991145f --- /dev/null +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -0,0 +1,179 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstime" +) + +const ( + reasonReadinessCheckFailed = "ReadinessCheckFailed" + reasonClusterResourcesNotReady = "ClusterResourcesNotReady" + reasonNoProxies = "NoProxiesConfigured" + reasonNotReady = "NotReadyToRouteTraffic" + reasonReady = "ReadyToRouteTraffic" + reasonPartiallyReady = "PartiallyReadyToRouteTraffic" + msgReadyToRouteTemplate = "%d out of %d replicas are ready to route traffic" +) + +type egressSvcsReadinessReconciler struct { + client.Client + logger *zap.SugaredLogger + clock tstime.Clock + tsNamespace string +} + +// Reconcile reconciles an ExternalName Service that defines a tailnet target to be exposed on a ProxyGroup and sets the +// EgressSvcReady condition on it. The condition gets set to true if at least one of the proxies is currently ready to +// route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress +// service to determine how many replicas are currently able to route traffic. +func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + l := esrr.logger.With("Service", req.NamespacedName) + defer l.Info("reconcile finished") + + svc := new(corev1.Service) + if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { + l.Info("Service not found") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get Service: %w", err) + } + var ( + reason, msg string + st metav1.ConditionStatus = metav1.ConditionUnknown + ) + oldStatus := svc.Status.DeepCopy() + defer func() { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) + if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) { + err = errors.Join(err, esrr.Status().Update(ctx, svc)) + } + }() + + crl := egressSvcChildResourceLabels(svc) + eps, err := getSingleObject[discoveryv1.EndpointSlice](ctx, esrr.Client, esrr.tsNamespace, crl) + if err != nil { + err = fmt.Errorf("error getting EndpointSlice: %w", err) + reason = reasonReadinessCheckFailed + msg = err.Error() + return res, err + } + if eps == nil { + l.Infof("EndpointSlice for Service does not yet exist, waiting...") + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + st = metav1.ConditionFalse + return res, nil + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: svc.Annotations[AnnotationProxyGroup], + }, + } + err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) + if apierrors.IsNotFound(err) { + l.Infof("ProxyGroup for Service does not exist, waiting...") + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + st = metav1.ConditionFalse + return res, nil + } + if err != nil { + err = fmt.Errorf("error retrieving ProxyGroup: %w", err) + reason = reasonReadinessCheckFailed + msg = err.Error() + return res, err + } + if !tsoperator.ProxyGroupIsReady(pg) { + l.Infof("ProxyGroup for Service is not ready, waiting...") + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + st = metav1.ConditionFalse + return res, nil + } + + replicas := pgReplicas(pg) + if replicas == 0 { + l.Infof("ProxyGroup replicas set to 0") + reason, msg = reasonNoProxies, reasonNoProxies + st = metav1.ConditionFalse + return res, nil + } + podLabels := pgLabels(pg.Name, nil) + var readyReplicas int32 + for i := range replicas { + podLabels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", i) + pod, err := getSingleObject[corev1.Pod](ctx, esrr.Client, esrr.tsNamespace, podLabels) + if err != nil { + err = fmt.Errorf("error retrieving ProxyGroup Pod: %w", err) + reason = reasonReadinessCheckFailed + msg = err.Error() + return res, err + } + if pod == nil { + l.Infof("[unexpected] ProxyGroup is ready, but replica %d was not found", i) + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + return res, nil + } + l.Infof("looking at Pod with IPs %v", pod.Status.PodIPs) + ready := false + for _, ep := range eps.Endpoints { + l.Infof("looking at endpoint with addresses %v", ep.Addresses) + if endpointReadyForPod(&ep, pod, l) { + l.Infof("endpoint is ready for Pod") + ready = true + break + } + } + if ready { + readyReplicas++ + } + } + msg = fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) + if readyReplicas == 0 { + reason = reasonNotReady + st = metav1.ConditionFalse + return res, nil + } + st = metav1.ConditionTrue + if readyReplicas < replicas { + reason = reasonPartiallyReady + } else { + reason = reasonReady + } + return res, nil +} + +// endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. +// Endpoint must not be nil. +func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { + podIP, err := podIPv4(pod) + if err != nil { + l.Infof("[unexpected] error retrieving Pod's IPv4 address: %v", err) + return false + } + // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. + if len(ep.Addresses) != 1 { + return false + } + return strings.EqualFold(ep.Addresses[0], podIP) && + *ep.Conditions.Ready && + *ep.Conditions.Serving && + !*ep.Conditions.Terminating +} diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go new file mode 100644 index 000000000..052eb1a49 --- /dev/null +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -0,0 +1,169 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + "testing" + + "github.com/AlekSi/pointer" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" + "tailscale.com/tstime" +) + +func TestEgressServiceReadiness(t *testing.T) { + // We need to pass a ProxyGroup object to WithStatusSubresource because of some quirks in how the fake client + // works. Without this code further down would not be able to update ProxyGroup status. + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&tsapi.ProxyGroup{}). + Build() + zl, _ := zap.NewDevelopment() + cl := tstest.NewClock(tstest.ClockOpts{}) + rec := &egressSvcsReadinessReconciler{ + tsNamespace: "operator-ns", + Client: fc, + logger: zl.Sugar(), + clock: cl, + } + tailnetFQDN := "my-app.tailnetxyz.ts.net" + egressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-app", + Namespace: "dev", + Annotations: map[string]string{ + AnnotationProxyGroup: "dev", + AnnotationTailnetTargetFQDN: tailnetFQDN, + }, + }, + } + fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} + l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) + eps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-app", + Namespace: "operator-ns", + Labels: l, + }, + AddressType: discoveryv1.AddressTypeIPv4, + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dev", + }, + } + mustCreate(t, fc, egressSvc) + setClusterNotReady(egressSvc, cl, zl.Sugar()) + t.Run("endpointslice_does_not_exist", func(t *testing.T) { + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // not ready + }) + t.Run("proxy_group_does_not_exist", func(t *testing.T) { + mustCreate(t, fc, eps) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // still not ready + }) + t.Run("proxy_group_not_ready", func(t *testing.T) { + mustCreate(t, fc, pg) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // still not ready + }) + t.Run("no_ready_replicas", func(t *testing.T) { + setPGReady(pg, cl, zl.Sugar()) + mustUpdateStatus(t, fc, pg.Namespace, pg.Name, func(p *tsapi.ProxyGroup) { + p.Status = pg.Status + }) + expectEqual(t, fc, pg, nil) + for i := range pgReplicas(pg) { + p := pod(pg, i) + mustCreate(t, fc, p) + mustUpdateStatus(t, fc, p.Namespace, p.Name, func(existing *corev1.Pod) { + existing.Status.PodIPs = p.Status.PodIPs + }) + } + expectReconciled(t, rec, "dev", "my-app") + setNotReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg)) + expectEqual(t, fc, egressSvc, nil) // still not ready + }) + t.Run("one_ready_replica", func(t *testing.T) { + setEndpointForReplica(pg, 0, eps) + mustUpdate(t, fc, eps.Namespace, eps.Name, func(e *discoveryv1.EndpointSlice) { + e.Endpoints = eps.Endpoints + }) + setReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg), 1) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // partially ready + }) + t.Run("all_replicas_ready", func(t *testing.T) { + for i := range pgReplicas(pg) { + setEndpointForReplica(pg, i, eps) + } + mustUpdate(t, fc, eps.Namespace, eps.Name, func(e *discoveryv1.EndpointSlice) { + e.Endpoints = eps.Endpoints + }) + setReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg), pgReplicas(pg)) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // ready + }) +} + +func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l) +} + +func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) { + msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l) +} + +func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) { + reason := reasonPartiallyReady + if readyReplicas == replicas { + reason = reasonReady + } + msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l) +} + +func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) +} + +func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { + p := pod(pg, ordinal) + eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ + Addresses: []string{p.Status.PodIPs[0].IP}, + Conditions: discoveryv1.EndpointConditions{ + Ready: pointer.ToBool(true), + Serving: pointer.ToBool(true), + Terminating: pointer.ToBool(false), + }, + }) +} + +func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { + l := pgLabels(pg.Name, nil) + l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) + ip := fmt.Sprintf("10.0.0.%d", ordinal) + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), + Namespace: "operator-ns", + Labels: l, + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: ip}}, + }, + } +} diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 20bafe8ec..98ed94366 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -161,7 +161,6 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re } func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { - l.Debug("maybe provision") r := svcConfiguredReason(svc, false, l) st := metav1.ConditionFalse defer func() { @@ -272,11 +271,9 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s } } - crl := egressSvcChildResourceLabels(svc) + crl := egressSvcEpsLabels(svc, clusterIPSvc) // TODO(irbekrm): support IPv6, but need to investigate how kube proxy // sets up Service -> Pod routing when IPv6 is involved. - crl[discoveryv1.LabelServiceName] = clusterIPSvc.Name - crl[discoveryv1.LabelManagedBy] = "tailscale.com" eps := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-ipv4", clusterIPSvc.Name), @@ -634,6 +631,19 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { } } +// egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. +func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { + l := egressSvcChildResourceLabels(extNSvc) + // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the + // endpoints defined on this EndpointSlice. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + l[discoveryv1.LabelServiceName] = clusterIPSvc.Name + // Kubernetes recommends setting this label. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management + l[discoveryv1.LabelManagedBy] = "tailscale.com" + return l +} + func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index ff29618df..bd9c0f7bc 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -376,6 +376,22 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("failed setting up indexer for egress Services: %v", err) } + egressSvcFromEpsFilter := handler.EnqueueRequestsFromMapFunc(egressSvcFromEps) + err = builder. + ControllerManagedBy(mgr). + Named("egress-svcs-readiness-reconciler"). + Watches(&corev1.Service{}, egressSvcFilter). + Watches(&discoveryv1.EndpointSlice{}, egressSvcFromEpsFilter). + Complete(&egressSvcsReadinessReconciler{ + Client: mgr.GetClient(), + tsNamespace: opts.tailscaleNamespace, + clock: tstime.DefaultClock{}, + logger: opts.log.Named("egress-svcs-readiness-reconciler"), + }) + if err != nil { + startlog.Fatalf("could not create egress Services readiness reconciler: %v", err) + } + epsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsHandler) podsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromPGPods(mgr.GetClient(), opts.tailscaleNamespace)) secretsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromPGStateSecrets(mgr.GetClient(), opts.tailscaleNamespace)) @@ -847,7 +863,7 @@ func egressEpsHandler(_ context.Context, o client.Object) []reconcile.Request { // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - if _, ok := o.GetLabels()[LabelManaged]; !ok { + if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { return nil } // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we @@ -867,7 +883,7 @@ func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - if _, ok := o.GetLabels()[LabelManaged]; !ok { + if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { return nil } // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we @@ -886,6 +902,33 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { } } +// egressSvcFromEps is an event handler for EndpointSlices. If an EndpointSlice is for an egress ExternalName Service +// meant to be exposed on a ProxyGroup, returns a reconcile request for the Service. +func egressSvcFromEps(_ context.Context, o client.Object) []reconcile.Request { + if typ := o.GetLabels()[labelSvcType]; typ != typeEgress { + return nil + } + if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + return nil + } + svcName, ok := o.GetLabels()[LabelParentName] + if !ok { + return nil + } + svcNs, ok := o.GetLabels()[LabelParentNamespace] + if !ok { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: svcNs, + Name: svcName, + }, + }, + } +} + func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile.Request { epsList := discoveryv1.EndpointSliceList{} if err := cl.List(context.Background(), &epsList, diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 07d05e1a5..358c2dd7a 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -172,14 +172,19 @@ type ConditionType string const ( ConnectorReady ConditionType = `ConnectorReady` ProxyClassReady ConditionType = `ProxyClassReady` - ProxyGroupReady ConditionType = `ProxyGroupReady` + ProxyGroupReady ConditionType = `TailscaleProxyGroupReady` ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service RecorderReady ConditionType = `RecorderReady` - // EgressSvcValid is set to true if the user configured ExternalName Service for exposing a tailnet target on - // ProxyGroup nodes is valid. - EgressSvcValid ConditionType = `EgressSvcValid` - // EgressSvcConfigured is set to true if the configuration for the egress Service (proxy ConfigMap update, - // EndpointSlice for the Service) has been successfully applied. The Reason for this condition - // contains the name of the ProxyGroup and the hash of the Service ports and the tailnet target. - EgressSvcConfigured ConditionType = `EgressSvcConfigured` + // EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed + // on a ProxyGroup. + // Set to true if the user provided configuration is valid. + EgressSvcValid ConditionType = `TailscaleEgressSvcValid` + // EgressSvcConfigured gets set on a user configured ExternalName Service that defines a tailnet target to be exposed + // on a ProxyGroup. + // Set to true if the cluster resources for the service have been successfully configured. + EgressSvcConfigured ConditionType = `TailscaleEgressSvcConfigured` + // EgressSvcReady gets set on a user configured ExternalName Service that defines a tailnet target to be exposed + // on a ProxyGroup. + // Set to true if the service is ready to route cluster traffic. + EgressSvcReady ConditionType = `TailscaleEgressSvcReady` ) From 910b4e8e6a72f147cf957a1b51a066802caf5a9b Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 9 Oct 2024 10:28:12 -0700 Subject: [PATCH 0027/1708] syncs: add iterators to Map (#13739) Add Keys, Values, and All to iterate over all keys, values, and entries, respectively. Updates #11038 Signed-off-by: Joe Tsai --- syncs/syncs.go | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/syncs/syncs.go b/syncs/syncs.go index 0d40204d2..bfb7c1e04 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -6,6 +6,7 @@ package syncs import ( "context" + "iter" "sync" "sync/atomic" @@ -256,6 +257,8 @@ func (m *Map[K, V]) Delete(key K) { // Iteration stops if f returns false. Map changes are blocked during iteration. // A read lock is held for the entire duration of the iteration. // Use the [WithLock] method instead to mutate the map during iteration. +// +// Deprecated: Use [All], [Keys], or [Values] instead. func (m *Map[K, V]) Range(f func(key K, value V) bool) { m.mu.RLock() defer m.mu.RUnlock() @@ -266,6 +269,51 @@ func (m *Map[K, V]) Range(f func(key K, value V) bool) { } } +// Keys iterates over all keys in the map in an undefined order. +// A read lock is held for the entire duration of the iteration. +// Use the [WithLock] method instead to mutate the map during iteration. +func (m *Map[K, V]) Keys() iter.Seq[K] { + return func(yield func(K) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k := range m.m { + if !yield(k) { + return + } + } + } +} + +// Values iterates over all values in the map in an undefined order. +// A read lock is held for the entire duration of the iteration. +// Use the [WithLock] method instead to mutate the map during iteration. +func (m *Map[K, V]) Values() iter.Seq[V] { + return func(yield func(V) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for _, v := range m.m { + if !yield(v) { + return + } + } + } +} + +// All iterates over all entries in the map in an undefined order. +// A read lock is held for the entire duration of the iteration. +// Use the [WithLock] method instead to mutate the map during iteration. +func (m *Map[K, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.m { + if !yield(k, v) { + return + } + } + } +} + // WithLock calls f with the underlying map. // Use of m2 must not escape the duration of this call. // The write-lock is held for the entire duration of this call. From 2cadb80fb2b6b0d6ef9c9259b5b77c62c7e9d2d0 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 9 Oct 2024 12:05:33 -0500 Subject: [PATCH 0028/1708] util/vizerror: add WrapWithMessage Thus new function allows constructing vizerrors that combine a message appropriate for display to users with a wrapped underlying error. Updates tailscale/corp#23781 Signed-off-by: Percy Wegmann --- util/vizerror/vizerror.go | 58 ++++++++++++++++++++++++++-------- util/vizerror/vizerror_test.go | 22 +++++++++++++ 2 files changed, 67 insertions(+), 13 deletions(-) diff --git a/util/vizerror/vizerror.go b/util/vizerror/vizerror.go index 158786494..919d765d0 100644 --- a/util/vizerror/vizerror.go +++ b/util/vizerror/vizerror.go @@ -12,35 +12,67 @@ import ( // Error is an error that is safe to display to end users. type Error struct { - err error + publicErr error // visible to end users + wrapped error // internal } -// Error implements the error interface. +// Error implements the error interface. The returned string is safe to display +// to end users. func (e Error) Error() string { - return e.err.Error() + return e.publicErr.Error() } // New returns an error that formats as the given text. It always returns a vizerror.Error. -func New(text string) error { - return Error{errors.New(text)} +func New(publicMsg string) error { + err := errors.New(publicMsg) + return Error{ + publicErr: err, + wrapped: err, + } } -// Errorf returns an Error with the specified format and values. It always returns a vizerror.Error. -func Errorf(format string, a ...any) error { - return Error{fmt.Errorf(format, a...)} +// Errorf returns an Error with the specified publicMsgFormat and values. It always returns a vizerror.Error. +// +// Warning: avoid using an error as one of the format arguments, as this will cause the text +// of that error to be displayed to the end user (which is probably not what you want). +func Errorf(publicMsgFormat string, a ...any) error { + err := fmt.Errorf(publicMsgFormat, a...) + return Error{ + publicErr: err, + wrapped: err, + } } // Unwrap returns the underlying error. +// +// If the Error was constructed using [WrapWithMessage], this is the wrapped (internal) error +// and not the user-visible error message. func (e Error) Unwrap() error { - return e.err + return e.wrapped } -// Wrap wraps err with a vizerror.Error. -func Wrap(err error) error { - if err == nil { +// Wrap wraps publicErr with a vizerror.Error. +// +// Deprecated: this is almost always the wrong thing to do. Are you really sure +// you know exactly what err.Error() will stringify to and be safe to show to +// users? [WrapWithMessage] is probably what you want. +func Wrap(publicErr error) error { + if publicErr == nil { return nil } - return Error{err} + return Error{publicErr: publicErr, wrapped: publicErr} +} + +// WrapWithMessage wraps the given error with a message that's safe to display +// to end users. The text of the wrapped error will not be displayed to end +// users. +// +// WrapWithMessage should almost always be preferred to [Wrap]. +func WrapWithMessage(wrapped error, publicMsg string) error { + return Error{ + publicErr: errors.New(publicMsg), + wrapped: wrapped, + } } // As returns the first vizerror.Error in err's chain. diff --git a/util/vizerror/vizerror_test.go b/util/vizerror/vizerror_test.go index bbd2c07e5..242ca6462 100644 --- a/util/vizerror/vizerror_test.go +++ b/util/vizerror/vizerror_test.go @@ -42,3 +42,25 @@ func TestAs(t *testing.T) { t.Errorf("As() returned error %v, want %v", got, verr) } } + +func TestWrap(t *testing.T) { + wrapped := errors.New("wrapped") + err := Wrap(wrapped) + if err.Error() != "wrapped" { + t.Errorf(`Wrap(wrapped).Error() = %q, want %q`, err.Error(), "wrapped") + } + if errors.Unwrap(err) != wrapped { + t.Errorf("Unwrap = %q, want %q", errors.Unwrap(err), wrapped) + } +} + +func TestWrapWithMessage(t *testing.T) { + wrapped := errors.New("wrapped") + err := WrapWithMessage(wrapped, "safe") + if err.Error() != "safe" { + t.Errorf(`WrapWithMessage(wrapped, "safe").Error() = %q, want %q`, err.Error(), "safe") + } + if errors.Unwrap(err) != wrapped { + t.Errorf("Unwrap = %q, want %q", errors.Unwrap(err), wrapped) + } +} From c763b7a7db6c3e3f35ed973881108c5adef7bae8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 9 Oct 2024 13:48:18 -0700 Subject: [PATCH 0029/1708] syncs: delete Map.Range, update callers to iterators Updates #11038 Change-Id: I2819fed896cc4035aba5e4e141b52c12637373b1 Signed-off-by: Brad Fitzpatrick --- syncs/syncs.go | 16 ---------------- syncs/syncs_test.go | 10 ++++------ taildrop/taildrop.go | 5 ++--- tstest/natlab/vnet/vnet.go | 5 ++--- wgengine/netstack/netstack.go | 7 +++---- 5 files changed, 11 insertions(+), 32 deletions(-) diff --git a/syncs/syncs.go b/syncs/syncs.go index bfb7c1e04..4496581a6 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -253,22 +253,6 @@ func (m *Map[K, V]) Delete(key K) { delete(m.m, key) } -// Range iterates over the map in an undefined order calling f for each entry. -// Iteration stops if f returns false. Map changes are blocked during iteration. -// A read lock is held for the entire duration of the iteration. -// Use the [WithLock] method instead to mutate the map during iteration. -// -// Deprecated: Use [All], [Keys], or [Values] instead. -func (m *Map[K, V]) Range(f func(key K, value V) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.m { - if !f(k, v) { - return - } - } -} - // Keys iterates over all keys in the map in an undefined order. // A read lock is held for the entire duration of the iteration. // Use the [WithLock] method instead to mutate the map during iteration. diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 0748dcb72..ee3711e76 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -160,10 +160,9 @@ func TestMap(t *testing.T) { } got := map[string]int{} want := map[string]int{"one": 1, "two": 2, "three": 3} - m.Range(func(k string, v int) bool { + for k, v := range m.All() { got[k] = v - return true - }) + } if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } @@ -178,10 +177,9 @@ func TestMap(t *testing.T) { m.Delete("noexist") got = map[string]int{} want = map[string]int{} - m.Range(func(k string, v int) bool { + for k, v := range m.All() { got[k] = v - return true - }) + } if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } diff --git a/taildrop/taildrop.go b/taildrop/taildrop.go index 9ad0e1a7e..e425027c5 100644 --- a/taildrop/taildrop.go +++ b/taildrop/taildrop.go @@ -226,7 +226,7 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { // in JSON to clients. They distinguish between empty and non-nil // to know whether a Notify should be able about files. files := make([]ipn.PartialFile, 0) - m.incomingFiles.Range(func(k incomingFileKey, f *incomingFile) bool { + for k, f := range m.incomingFiles.All() { f.mu.Lock() defer f.mu.Unlock() files = append(files, ipn.PartialFile{ @@ -238,8 +238,7 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { FinalPath: f.finalPath, Done: f.done, }) - return true - }) + } return files } diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 919ae1fa1..e7991b3e6 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -974,13 +974,12 @@ func (n *network) writeEth(res []byte) bool { if dstMAC.IsBroadcast() || (n.v6 && etherType == layers.EthernetTypeIPv6 && dstMAC == macAllNodes) { num := 0 - n.writers.Range(func(mac MAC, nw networkWriter) bool { + for mac, nw := range n.writers.All() { if mac != srcMAC { num++ nw.write(res) } - return true - }) + } return num > 0 } if srcMAC == dstMAC { diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index efb328102..3185c5d55 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -414,15 +414,14 @@ func init() { // endpoint, and name collisions will result in Prometheus scraping errors. clientmetric.NewCounterFunc("netstack_tcp_forward_dropped_attempts", func() int64 { var total uint64 - stacksForMetrics.Range(func(ns *Impl, _ struct{}) bool { + for ns := range stacksForMetrics.Keys() { delta := ns.ipstack.Stats().TCP.ForwardMaxInFlightDrop.Value() if total+delta > math.MaxInt64 { total = math.MaxInt64 - return false + break } total += delta - return true - }) + } return int64(total) }) } From 5b7303817eb5fe9d63b05a129be33d36d50301c7 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 9 Oct 2024 14:03:37 -0700 Subject: [PATCH 0030/1708] syncs: allocate map with Map.WithLock (#13755) One primary purpose of WithLock is to mutate the underlying map. However, this can lead to a panic if it happens to be nil. Thus, always allocate a map before passing it to f. Updates tailscale/corp#11038 Signed-off-by: Joe Tsai --- syncs/syncs.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/syncs/syncs.go b/syncs/syncs.go index 4496581a6..acc0c88f2 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -304,6 +304,9 @@ func (m *Map[K, V]) All() iter.Seq2[K, V] { func (m *Map[K, V]) WithLock(f func(m2 map[K]V)) { m.mu.Lock() defer m.mu.Unlock() + if m.m == nil { + m.m = make(map[K]V) + } f(m.m) } From 52ef27ab7c3078513ea7e265ce4a1b3e0adea833 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 9 Oct 2024 14:09:58 -0700 Subject: [PATCH 0031/1708] taildrop: fix defer in loop (#13757) However, this affects the scope of a defer. Updates #11038 Signed-off-by: Joe Tsai --- taildrop/taildrop.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taildrop/taildrop.go b/taildrop/taildrop.go index e425027c5..4d14787af 100644 --- a/taildrop/taildrop.go +++ b/taildrop/taildrop.go @@ -228,7 +228,6 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { files := make([]ipn.PartialFile, 0) for k, f := range m.incomingFiles.All() { f.mu.Lock() - defer f.mu.Unlock() files = append(files, ipn.PartialFile{ Name: k.name, Started: f.started, @@ -238,6 +237,7 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { FinalPath: f.finalPath, Done: f.done, }) + f.mu.Unlock() } return files } From 367fba8520cf7bc4ce9004c1861fce54db03d5d2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 9 Oct 2024 18:16:42 -0700 Subject: [PATCH 0032/1708] control/controlhttp: don't link ts2021 server + websocket code on iOS We probably shouldn't link it in anywhere, but let's fix iOS for now. Updates #13762 Updates tailscale/corp#20099 Change-Id: Idac116e9340434334c256acba3866f02bd19827c Signed-off-by: Brad Fitzpatrick --- control/controlhttp/server.go | 2 ++ tstest/iosdeps/iosdeps_test.go | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/control/controlhttp/server.go b/control/controlhttp/server.go index 6a0d2bc56..7c3dd5618 100644 --- a/control/controlhttp/server.go +++ b/control/controlhttp/server.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ios + package controlhttp import ( diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index 08df9a930..273872a64 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -14,9 +14,11 @@ func TestDeps(t *testing.T) { GOOS: "ios", GOARCH: "arm64", BadDeps: map[string]string{ - "testing": "do not use testing package in production code", - "text/template": "linker bloat (MethodByName)", - "html/template": "linker bloat (MethodByName)", + "testing": "do not use testing package in production code", + "text/template": "linker bloat (MethodByName)", + "html/template": "linker bloat (MethodByName)", + "tailscale.com/net/wsconn": "https://github.com/tailscale/tailscale/issues/13762", + "github.com/coder/websocket": "https://github.com/tailscale/tailscale/issues/13762", }, }.Check(t) } From fb420be1769426c96af8a5e58adcf6562e3d51a7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 9 Oct 2024 17:55:42 -0700 Subject: [PATCH 0033/1708] safesocket: don't depend on go-ps on iOS There's never a tailscaled on iOS. And we can't run child processes to look for it anyway. Updates tailscale/corp#20099 Change-Id: Ieb3776f4bb440c4f1c442fdd169bacbe17f23ddb Signed-off-by: Brad Fitzpatrick --- safesocket/safesocket_ps.go | 2 +- tstest/iosdeps/iosdeps_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index f7d97f7fd..18197846d 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || darwin || freebsd +//go:build linux || windows || (darwin && !ios) || freebsd package safesocket diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index 273872a64..40e084c21 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -19,6 +19,7 @@ func TestDeps(t *testing.T) { "html/template": "linker bloat (MethodByName)", "tailscale.com/net/wsconn": "https://github.com/tailscale/tailscale/issues/13762", "github.com/coder/websocket": "https://github.com/tailscale/tailscale/issues/13762", + "github.com/mitchellh/go-ps": "https://github.com/tailscale/tailscale/pull/13759", }, }.Check(t) } From 2531065d1010424866ef764bbea3aa3708cb49b2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 9 Oct 2024 18:06:56 -0700 Subject: [PATCH 0034/1708] clientupdate, ipn/localapi: don't use google/uuid, thin iOS deps We were using google/uuid in two places and that brought in database/sql/driver. We didn't need it in either place. Updates #13760 Updates tailscale/corp#20099 Change-Id: Ieed32f1bebe35d35f47ec5a2a429268f24f11f1f Signed-off-by: Brad Fitzpatrick --- clientupdate/clientupdate.go | 199 ----------------------- clientupdate/clientupdate_notwindows.go | 10 ++ clientupdate/clientupdate_windows.go | 205 +++++++++++++++++++++++- cmd/tailscale/depaware.txt | 6 +- cmd/tailscaled/depaware.txt | 4 +- ipn/localapi/localapi.go | 3 +- tstest/iosdeps/iosdeps_test.go | 2 + 7 files changed, 219 insertions(+), 210 deletions(-) create mode 100644 clientupdate/clientupdate_notwindows.go diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 67edce05b..dbca58722 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,11 +27,9 @@ import ( "strconv" "strings" - "github.com/google/uuid" "tailscale.com/clientupdate/distsign" "tailscale.com/types/logger" "tailscale.com/util/cmpver" - "tailscale.com/util/winutil" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -756,164 +754,6 @@ func (up *Updater) updateMacAppStore() error { return nil } -const ( - // winMSIEnv is the environment variable that, if set, is the MSI file for - // the update command to install. It's passed like this so we can stop the - // tailscale.exe process from running before the msiexec process runs and - // tries to overwrite ourselves. - winMSIEnv = "TS_UPDATE_WIN_MSI" - // winExePathEnv is the environment variable that is set along with - // winMSIEnv and carries the full path of the calling tailscale.exe binary. - // It is used to re-launch the GUI process (tailscale-ipn.exe) after - // install is complete. - winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" -) - -var ( - verifyAuthenticode func(string) error // set non-nil only on Windows - markTempFileFunc func(string) error // set non-nil only on Windows -) - -func (up *Updater) updateWindows() error { - if msi := os.Getenv(winMSIEnv); msi != "" { - // stdout/stderr from this part of the install could be lost since the - // parent tailscaled is replaced. Create a temp log file to have some - // output to debug with in case update fails. - close, err := up.switchOutputToFile() - if err != nil { - up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err) - } else { - defer close.Close() - } - - up.Logf("installing %v ...", msi) - if err := up.installMSI(msi); err != nil { - up.Logf("MSI install failed: %v", err) - return err - } - - up.Logf("success.") - return nil - } - - if !winutil.IsCurrentProcessElevated() { - return errors.New(`update must be run as Administrator - -you can run the command prompt as Administrator one of these ways: -* right-click cmd.exe, select 'Run as administrator' -* press Windows+x, then press a -* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`) - } - ver, err := requestedTailscaleVersion(up.Version, up.Track) - if err != nil { - return err - } - arch := runtime.GOARCH - if arch == "386" { - arch = "x86" - } - if !up.confirm(ver) { - return nil - } - - tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale") - msiDir := filepath.Join(tsDir, "MSICache") - if fi, err := os.Stat(tsDir); err != nil { - return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err) - } else if !fi.IsDir() { - return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode()) - } - if err := os.MkdirAll(msiDir, 0700); err != nil { - return err - } - up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) - pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) - msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) - if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { - return err - } - - up.Logf("verifying MSI authenticode...") - if err := verifyAuthenticode(msiTarget); err != nil { - return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err) - } - up.Logf("authenticode verification succeeded") - - up.Logf("making tailscale.exe copy to switch to...") - up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe")) - selfOrig, selfCopy, err := makeSelfCopy() - if err != nil { - return err - } - defer os.Remove(selfCopy) - up.Logf("running tailscale.exe copy for final install...") - - cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig) - cmd.Stdout = up.Stderr - cmd.Stderr = up.Stderr - cmd.Stdin = os.Stdin - if err := cmd.Start(); err != nil { - return err - } - // Once it's started, exit ourselves, so the binary is free - // to be replaced. - os.Exit(0) - panic("unreachable") -} - -func (up *Updater) switchOutputToFile() (io.Closer, error) { - var logFilePath string - exePath, err := os.Executable() - if err != nil { - logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log") - } else { - logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log" - } - - up.Logf("writing update output to %q", logFilePath) - logFile, err := os.Create(logFilePath) - if err != nil { - return nil, err - } - - up.Logf = func(m string, args ...any) { - fmt.Fprintf(logFile, m+"\n", args...) - } - up.Stdout = logFile - up.Stderr = logFile - return logFile, nil -} - -func (up *Updater) installMSI(msi string) error { - var err error - for tries := 0; tries < 2; tries++ { - cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn") - cmd.Dir = filepath.Dir(msi) - cmd.Stdout = up.Stdout - cmd.Stderr = up.Stderr - cmd.Stdin = os.Stdin - err = cmd.Run() - if err == nil { - break - } - up.Logf("Install attempt failed: %v", err) - uninstallVersion := up.currentVersion - if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" { - uninstallVersion = v - } - // Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first. - up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion) - cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn") - cmd.Stdout = up.Stdout - cmd.Stderr = up.Stderr - cmd.Stdin = os.Stdin - err = cmd.Run() - up.Logf("msiexec uninstall: %v", err) - } - return err -} - // cleanupOldDownloads removes all files matching glob (see filepath.Glob). // Only regular files are removed, so the glob must match specific files and // not directories. @@ -938,45 +778,6 @@ func (up *Updater) cleanupOldDownloads(glob string) { } } -func msiUUIDForVersion(ver string) string { - arch := runtime.GOARCH - if arch == "386" { - arch = "x86" - } - track, err := versionToTrack(ver) - if err != nil { - track = UnstableTrack - } - msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch) - return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}" -} - -func makeSelfCopy() (origPathExe, tmpPathExe string, err error) { - selfExe, err := os.Executable() - if err != nil { - return "", "", err - } - f, err := os.Open(selfExe) - if err != nil { - return "", "", err - } - defer f.Close() - f2, err := os.CreateTemp("", "tailscale-updater-*.exe") - if err != nil { - return "", "", err - } - if f := markTempFileFunc; f != nil { - if err := f(f2.Name()); err != nil { - return "", "", err - } - } - if _, err := io.Copy(f2, f); err != nil { - f2.Close() - return "", "", err - } - return selfExe, f2.Name(), f2.Close() -} - func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { c, err := distsign.NewClient(up.Logf, up.PkgsAddr) if err != nil { diff --git a/clientupdate/clientupdate_notwindows.go b/clientupdate/clientupdate_notwindows.go new file mode 100644 index 000000000..edadc210c --- /dev/null +++ b/clientupdate/clientupdate_notwindows.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package clientupdate + +func (up *Updater) updateWindows() error { + panic("unreachable") +} diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index 2f6899a60..973722974 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -7,13 +7,57 @@ package clientupdate import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + + "github.com/google/uuid" "golang.org/x/sys/windows" + "tailscale.com/util/winutil" "tailscale.com/util/winutil/authenticode" ) -func init() { - markTempFileFunc = markTempFileWindows - verifyAuthenticode = verifyTailscale +const ( + // winMSIEnv is the environment variable that, if set, is the MSI file for + // the update command to install. It's passed like this so we can stop the + // tailscale.exe process from running before the msiexec process runs and + // tries to overwrite ourselves. + winMSIEnv = "TS_UPDATE_WIN_MSI" + // winExePathEnv is the environment variable that is set along with + // winMSIEnv and carries the full path of the calling tailscale.exe binary. + // It is used to re-launch the GUI process (tailscale-ipn.exe) after + // install is complete. + winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" +) + +func makeSelfCopy() (origPathExe, tmpPathExe string, err error) { + selfExe, err := os.Executable() + if err != nil { + return "", "", err + } + f, err := os.Open(selfExe) + if err != nil { + return "", "", err + } + defer f.Close() + f2, err := os.CreateTemp("", "tailscale-updater-*.exe") + if err != nil { + return "", "", err + } + if err := markTempFileWindows(f2.Name()); err != nil { + return "", "", err + } + if _, err := io.Copy(f2, f); err != nil { + f2.Close() + return "", "", err + } + return selfExe, f2.Name(), f2.Close() } func markTempFileWindows(name string) error { @@ -23,6 +67,159 @@ func markTempFileWindows(name string) error { const certSubjectTailscale = "Tailscale Inc." -func verifyTailscale(path string) error { +func verifyAuthenticode(path string) error { return authenticode.Verify(path, certSubjectTailscale) } + +func (up *Updater) updateWindows() error { + if msi := os.Getenv(winMSIEnv); msi != "" { + // stdout/stderr from this part of the install could be lost since the + // parent tailscaled is replaced. Create a temp log file to have some + // output to debug with in case update fails. + close, err := up.switchOutputToFile() + if err != nil { + up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err) + } else { + defer close.Close() + } + + up.Logf("installing %v ...", msi) + if err := up.installMSI(msi); err != nil { + up.Logf("MSI install failed: %v", err) + return err + } + + up.Logf("success.") + return nil + } + + if !winutil.IsCurrentProcessElevated() { + return errors.New(`update must be run as Administrator + +you can run the command prompt as Administrator one of these ways: +* right-click cmd.exe, select 'Run as administrator' +* press Windows+x, then press a +* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`) + } + ver, err := requestedTailscaleVersion(up.Version, up.Track) + if err != nil { + return err + } + arch := runtime.GOARCH + if arch == "386" { + arch = "x86" + } + if !up.confirm(ver) { + return nil + } + + tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale") + msiDir := filepath.Join(tsDir, "MSICache") + if fi, err := os.Stat(tsDir); err != nil { + return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err) + } else if !fi.IsDir() { + return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode()) + } + if err := os.MkdirAll(msiDir, 0700); err != nil { + return err + } + up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) + pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) + msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) + if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { + return err + } + + up.Logf("verifying MSI authenticode...") + if err := verifyAuthenticode(msiTarget); err != nil { + return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err) + } + up.Logf("authenticode verification succeeded") + + up.Logf("making tailscale.exe copy to switch to...") + up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe")) + selfOrig, selfCopy, err := makeSelfCopy() + if err != nil { + return err + } + defer os.Remove(selfCopy) + up.Logf("running tailscale.exe copy for final install...") + + cmd := exec.Command(selfCopy, "update") + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig) + cmd.Stdout = up.Stderr + cmd.Stderr = up.Stderr + cmd.Stdin = os.Stdin + if err := cmd.Start(); err != nil { + return err + } + // Once it's started, exit ourselves, so the binary is free + // to be replaced. + os.Exit(0) + panic("unreachable") +} + +func (up *Updater) installMSI(msi string) error { + var err error + for tries := 0; tries < 2; tries++ { + cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn") + cmd.Dir = filepath.Dir(msi) + cmd.Stdout = up.Stdout + cmd.Stderr = up.Stderr + cmd.Stdin = os.Stdin + err = cmd.Run() + if err == nil { + break + } + up.Logf("Install attempt failed: %v", err) + uninstallVersion := up.currentVersion + if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" { + uninstallVersion = v + } + // Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first. + up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion) + cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn") + cmd.Stdout = up.Stdout + cmd.Stderr = up.Stderr + cmd.Stdin = os.Stdin + err = cmd.Run() + up.Logf("msiexec uninstall: %v", err) + } + return err +} + +func msiUUIDForVersion(ver string) string { + arch := runtime.GOARCH + if arch == "386" { + arch = "x86" + } + track, err := versionToTrack(ver) + if err != nil { + track = UnstableTrack + } + msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch) + return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}" +} + +func (up *Updater) switchOutputToFile() (io.Closer, error) { + var logFilePath string + exePath, err := os.Executable() + if err != nil { + logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log") + } else { + logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log" + } + + up.Logf("writing update output to %q", logFilePath) + logFile, err := os.Create(logFilePath) + if err != nil { + return nil, err + } + + up.Logf = func(m string, args ...any) { + fmt.Fprintf(logFile, m+"\n", args...) + } + up.Stdout = logFile + up.Stderr = logFile + return logFile, nil +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7b9d80af8..8c9a9b285 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -26,7 +26,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/google/nftables/expr from github.com/google/nftables+ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/google/uuid from tailscale.com/clientupdate+ + DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ @@ -178,7 +178,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ + W 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/client/web+ @@ -258,7 +258,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/tls from github.com/miekg/dns+ crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509+ - database/sql/driver from github.com/google/uuid + DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from crypto/internal/nistec+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 0d8e51eda..3661f9a50 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -111,7 +111,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/expr from github.com/google/nftables+ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/google/uuid from tailscale.com/clientupdate+ + DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ @@ -508,7 +508,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509+ - database/sql/driver from github.com/google/uuid + DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from crypto/internal/nistec+ diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 7c076e8ab..528304bab 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -31,7 +31,6 @@ import ( "sync" "time" - "github.com/google/uuid" "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" @@ -1563,7 +1562,7 @@ func (h *Handler) serveFilePut(w http.ResponseWriter, r *http.Request) { switch r.Method { case "PUT": file := ipn.OutgoingFile{ - ID: uuid.Must(uuid.NewRandom()).String(), + ID: rands.HexString(30), PeerID: peerID, Name: filenameEscaped, DeclaredSize: r.ContentLength, diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index 40e084c21..9679e2cfc 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -20,6 +20,8 @@ func TestDeps(t *testing.T) { "tailscale.com/net/wsconn": "https://github.com/tailscale/tailscale/issues/13762", "github.com/coder/websocket": "https://github.com/tailscale/tailscale/issues/13762", "github.com/mitchellh/go-ps": "https://github.com/tailscale/tailscale/pull/13759", + "database/sql/driver": "iOS doesn't use an SQL database", + "github.com/google/uuid": "see tailscale/tailscale#13760", }, }.Check(t) } From db1519cc9f2acc17f4345e7c1f037c87a4baa9d2 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 10 Oct 2024 13:00:32 +0100 Subject: [PATCH 0035/1708] k8s-operator/apis: revert ProxyGroup readiness cond name change (#13770) No need to prefix this with 'Tailscale' for tailscale.com custom resource types. Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- k8s-operator/apis/v1alpha1/types_connector.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 358c2dd7a..27afd0838 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -172,7 +172,7 @@ type ConditionType string const ( ConnectorReady ConditionType = `ConnectorReady` ProxyClassReady ConditionType = `ProxyClassReady` - ProxyGroupReady ConditionType = `TailscaleProxyGroupReady` + ProxyGroupReady ConditionType = `ProxyGroupReady` ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service RecorderReady ConditionType = `RecorderReady` // EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed From 1938685d397a0cfdcb0c1af5365730ae55a1f74f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 9 Oct 2024 20:34:11 -0700 Subject: [PATCH 0036/1708] clientupdate: don't link distsign on platforms that don't download Updates tailscale/corp#20099 Change-Id: Ie3b782379b19d5f7890a8d3a378096b4f3e8a612 Signed-off-by: Brad Fitzpatrick --- clientupdate/clientupdate.go | 9 --------- clientupdate/clientupdate_downloads.go | 20 ++++++++++++++++++++ clientupdate/clientupdate_not_downloads.go | 10 ++++++++++ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- tstest/iosdeps/iosdeps_test.go | 17 +++++++++-------- 7 files changed, 42 insertions(+), 20 deletions(-) create mode 100644 clientupdate/clientupdate_downloads.go create mode 100644 clientupdate/clientupdate_not_downloads.go diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index dbca58722..7fa84d67f 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,7 +27,6 @@ import ( "strconv" "strings" - "tailscale.com/clientupdate/distsign" "tailscale.com/types/logger" "tailscale.com/util/cmpver" "tailscale.com/version" @@ -778,14 +777,6 @@ func (up *Updater) cleanupOldDownloads(glob string) { } } -func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { - c, err := distsign.NewClient(up.Logf, up.PkgsAddr) - if err != nil { - return err - } - return c.Download(context.Background(), pathSrc, fileDst) -} - func (up *Updater) updateFreeBSD() (err error) { if up.Version != "" { return errors.New("installing a specific version on FreeBSD is not supported") diff --git a/clientupdate/clientupdate_downloads.go b/clientupdate/clientupdate_downloads.go new file mode 100644 index 000000000..18d3176b4 --- /dev/null +++ b/clientupdate/clientupdate_downloads.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !android) || windows + +package clientupdate + +import ( + "context" + + "tailscale.com/clientupdate/distsign" +) + +func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { + c, err := distsign.NewClient(up.Logf, up.PkgsAddr) + if err != nil { + return err + } + return c.Download(context.Background(), pathSrc, fileDst) +} diff --git a/clientupdate/clientupdate_not_downloads.go b/clientupdate/clientupdate_not_downloads.go new file mode 100644 index 000000000..057b4f2cd --- /dev/null +++ b/clientupdate/clientupdate_not_downloads.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !((linux && !android) || windows) + +package clientupdate + +func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { + panic("unreachable") +} diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index c9f035372..b77ea22ef 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -654,7 +654,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8c9a9b285..2c644d1be 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -80,7 +80,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli tailscale.com/clientupdate from tailscale.com/client/web+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3661f9a50..6f71a88a9 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -244,7 +244,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index 9679e2cfc..6daa70c3b 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -14,14 +14,15 @@ func TestDeps(t *testing.T) { GOOS: "ios", GOARCH: "arm64", BadDeps: map[string]string{ - "testing": "do not use testing package in production code", - "text/template": "linker bloat (MethodByName)", - "html/template": "linker bloat (MethodByName)", - "tailscale.com/net/wsconn": "https://github.com/tailscale/tailscale/issues/13762", - "github.com/coder/websocket": "https://github.com/tailscale/tailscale/issues/13762", - "github.com/mitchellh/go-ps": "https://github.com/tailscale/tailscale/pull/13759", - "database/sql/driver": "iOS doesn't use an SQL database", - "github.com/google/uuid": "see tailscale/tailscale#13760", + "testing": "do not use testing package in production code", + "text/template": "linker bloat (MethodByName)", + "html/template": "linker bloat (MethodByName)", + "tailscale.com/net/wsconn": "https://github.com/tailscale/tailscale/issues/13762", + "github.com/coder/websocket": "https://github.com/tailscale/tailscale/issues/13762", + "github.com/mitchellh/go-ps": "https://github.com/tailscale/tailscale/pull/13759", + "database/sql/driver": "iOS doesn't use an SQL database", + "github.com/google/uuid": "see tailscale/tailscale#13760", + "tailscale.com/clientupdate/distsign": "downloads via AppStore, not distsign", }, }.Check(t) } From 91f58c5e6330d97b35130a3b626cb01762879273 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 9 Oct 2024 18:14:50 -0400 Subject: [PATCH 0037/1708] tsnet: fix panic caused by logging after test finishes Updates #13773 Signed-off-by: Andrew Dunham Change-Id: I95e03eb6aef1639bd4a2efd3a415e2c10cdebc5a --- tsnet/tsnet_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index b95061d38..255baf618 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -510,7 +510,7 @@ func TestStartStopStartGetsSameIP(t *testing.T) { Dir: tmps1, ControlURL: controlURL, Hostname: "s1", - Logf: logger.TestLogger(t), + Logf: tstest.WhileTestRunningLogger(t), } } s1 := newServer() From 508980603b39af59f0f9423958b0180112aa7503 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 10 Oct 2024 07:57:09 -0700 Subject: [PATCH 0038/1708] ipn/conffile: don't depend on hujson on iOS/Android Fixes #13772 Change-Id: I3ae03a5ee48c801f2e5ea12d1e54681df25d4604 Signed-off-by: Brad Fitzpatrick --- ipn/conffile/conffile.go | 18 ++++++++++++++++-- ipn/conffile/conffile_hujson.go | 20 ++++++++++++++++++++ tstest/iosdeps/iosdeps_test.go | 1 + 3 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 ipn/conffile/conffile_hujson.go diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index 0b4670c42..a2bafb8b7 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -8,10 +8,11 @@ package conffile import ( "bytes" "encoding/json" + "errors" "fmt" "os" + "runtime" - "github.com/tailscale/hujson" "tailscale.com/ipn" ) @@ -39,8 +40,21 @@ func (c *Config) WantRunning() bool { // from the VM's metadata service's user-data field. const VMUserDataPath = "vm:user-data" +// hujsonStandardize is set to hujson.Standardize by conffile_hujson.go on +// platforms that support config files. +var hujsonStandardize func([]byte) ([]byte, error) + // Load reads and parses the config file at the provided path on disk. func Load(path string) (*Config, error) { + switch runtime.GOOS { + case "ios", "android": + // compile-time for deadcode elimination + return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS) + } + if hujsonStandardize == nil { + // Build tags are wrong in conffile_hujson.go + return nil, errors.New("[unexpected] config file loading not wired up") + } var c Config c.Path = path var err error @@ -54,7 +68,7 @@ func Load(path string) (*Config, error) { if err != nil { return nil, err } - c.Std, err = hujson.Standardize(c.Raw) + c.Std, err = hujsonStandardize(c.Raw) if err != nil { return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) } diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go new file mode 100644 index 000000000..6825a0638 --- /dev/null +++ b/ipn/conffile/conffile_hujson.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !android + +package conffile + +import "github.com/tailscale/hujson" + +// Only link the hujson package on platforms that use it, to reduce binary size +// & memory a bit. +// +// (iOS and Android don't have config files) + +// While the linker's deadcode mostly handles the hujson package today, this +// keeps us honest for the future. + +func init() { + hujsonStandardize = hujson.Standardize +} diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index 6daa70c3b..ab69f1c2b 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -23,6 +23,7 @@ func TestDeps(t *testing.T) { "database/sql/driver": "iOS doesn't use an SQL database", "github.com/google/uuid": "see tailscale/tailscale#13760", "tailscale.com/clientupdate/distsign": "downloads via AppStore, not distsign", + "github.com/tailscale/hujson": "no config file support on iOS", }, }.Check(t) } From acb4a22dcc1d535ee85c76bb25767eb76d83edd5 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Thu, 10 Oct 2024 14:34:14 -0400 Subject: [PATCH 0039/1708] VERSION.txt: this is v1.77.0 (#13779) --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 7c7053aa2..79e15fd49 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.75.0 +1.77.0 From 33029d4486d71714bfed29c84c5f6f0da1626ec2 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Oct 2024 15:52:47 -0700 Subject: [PATCH 0040/1708] net/netcheck: fix netcheck cli-triggered nil pointer deref (#13782) Updates #13780 Signed-off-by: Jordan Whited --- net/netcheck/netcheck.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index dbb85cf9c..bebf4c9b0 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -940,7 +940,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe } } if len(need) > 0 { - if !opts.OnlyTCP443 { + if opts == nil || !opts.OnlyTCP443 { // Kick off ICMP in parallel to HTTPS checks; we don't // reuse the same WaitGroup for those probes because we // need to close the underlying Pinger after a timeout From f9949cde8bba1156aaccc189e1632bf9a1478444 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 11 Oct 2024 08:06:53 -0500 Subject: [PATCH 0041/1708] client/tailscale,cmd/{cli,get-authkey,k8s-operator}: set distinct User-Agents This helps better distinguish what is generating activity to the Tailscale public API. Updates tailscale/corp#23838 Signed-off-by: Percy Wegmann --- client/tailscale/tailscale.go | 17 ++++++++++------- cmd/get-authkey/main.go | 1 + cmd/k8s-operator/operator.go | 1 + cmd/tailscale/cli/up.go | 1 + tsnet/tsnet.go | 1 + 5 files changed, 14 insertions(+), 7 deletions(-) diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index 894561965..8533b4712 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -51,6 +51,9 @@ type Client struct { // HTTPClient optionally specifies an alternate HTTP client to use. // If nil, http.DefaultClient is used. HTTPClient *http.Client + + // UserAgent optionally specifies an alternate User-Agent header + UserAgent string } func (c *Client) httpClient() *http.Client { @@ -97,8 +100,9 @@ func (c *Client) setAuth(r *http.Request) { // and can be changed manually by the user. func NewClient(tailnet string, auth AuthMethod) *Client { return &Client{ - tailnet: tailnet, - auth: auth, + tailnet: tailnet, + auth: auth, + UserAgent: "tailscale-client-oss", } } @@ -110,17 +114,16 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) { return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") } c.setAuth(req) + if c.UserAgent != "" { + req.Header.Set("User-Agent", c.UserAgent) + } return c.httpClient().Do(req) } // sendRequest add the authentication key to the request and sends it. It // receives the response and reads up to 10MB of it. func (c *Client) sendRequest(req *http.Request) ([]byte, *http.Response, error) { - if !I_Acknowledge_This_API_Is_Unstable { - return nil, nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") - } - c.setAuth(req) - resp, err := c.httpClient().Do(req) + resp, err := c.Do(req) if err != nil { return nil, resp, err } diff --git a/cmd/get-authkey/main.go b/cmd/get-authkey/main.go index d8030252c..777258d64 100644 --- a/cmd/get-authkey/main.go +++ b/cmd/get-authkey/main.go @@ -51,6 +51,7 @@ func main() { ctx := context.Background() tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-get-authkey" tsClient.HTTPClient = credentials.Client(ctx) tsClient.BaseURL = baseURL diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index bd9c0f7bc..d8dd403cc 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -143,6 +143,7 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) { TokenURL: "https://login.tailscale.com/api/v2/oauth/token", } tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-k8s-operator" tsClient.HTTPClient = credentials.Client(context.Background()) s := &tsnet.Server{ diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index e1b828105..bf6a9af77 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -1152,6 +1152,7 @@ func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { } tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-cli" tsClient.HTTPClient = credentials.Client(ctx) tsClient.BaseURL = baseURL diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 0be33ba8a..6751e0bb0 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -903,6 +903,7 @@ func (s *Server) APIClient() (*tailscale.Client, error) { } c := tailscale.NewClient("-", nil) + c.UserAgent = "tailscale-tsnet" c.HTTPClient = &http.Client{Transport: s.lb.KeyProvingNoiseRoundTripper()} return c, nil } From 17335d21049c724e365d4e9879286cd2fdb9aba5 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Fri, 4 Oct 2024 12:23:34 -0500 Subject: [PATCH 0042/1708] net/dns/resolver: forward SERVFAIL responses over PeerDNS As per the docstring, (*forwarder).forwardWithDestChan should either send to responseChan and returns nil, or returns a non-nil error (without sending to the channel). However, this does not hold when all upstream DNS servers replied with an error. We've been handling this special error path in (*Resolver).Query but not in (*Resolver).HandlePeerDNSQuery. As a result, SERVFAIL responses from upstream servers were being converted into HTTP 503 responses, instead of being properly forwarded as SERVFAIL within a successful HTTP response, as per RFC 8484, section 4.2.1: A successful HTTP response with a 2xx status code (see Section 6.3 of [RFC7231]) is used for any valid DNS response, regardless of the DNS response code. For example, a successful 2xx HTTP status code is used even with a DNS message whose DNS response code indicates failure, such as SERVFAIL or NXDOMAIN. In this PR we fix (*forwarder).forwardWithDestChan to no longer return an error when it sends a response to responseChan, and remove the special handling in (*Resolver).Query, as it is no longer necessary. Updates #13571 Signed-off-by: Nick Hill --- net/dns/resolver/forwarder.go | 1 + net/dns/resolver/tsdns.go | 10 +--------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 846ca3d5e..5920b7f29 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -1053,6 +1053,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo if verboseDNSForward() { f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) } + return nil } } return firstErr diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index d196ad4d6..43ba0acf1 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -321,15 +321,7 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net defer cancel() err = r.forwarder.forwardWithDestChan(ctx, packet{bs, family, from}, responses) if err != nil { - select { - // Best effort: use any error response sent by forwardWithDestChan. - // This is present in some errors paths, such as when all upstream - // DNS servers replied with an error. - case resp := <-responses: - return resp.bs, err - default: - return nil, err - } + return nil, err } return (<-responses).bs, nil } From e7545f2eac48ae9f35ba4a080d6e0b6ecfd054a4 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Fri, 4 Oct 2024 12:34:41 -0500 Subject: [PATCH 0043/1708] net/dns/resolver: translate 5xx DoH server errors into SERVFAIL DNS responses If a DoH server returns an HTTP server error, rather than a SERVFAIL within a successful HTTP response, we should handle it in the same way as SERVFAIL. Updates #13571 Signed-off-by: Nick Hill --- net/dns/resolver/forwarder.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 5920b7f29..0bf904070 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -487,6 +487,10 @@ func (f *forwarder) sendDoH(ctx context.Context, urlBase string, c *http.Client, defer hres.Body.Close() if hres.StatusCode != 200 { metricDNSFwdDoHErrorStatus.Add(1) + if hres.StatusCode/100 == 5 { + // Translate 5xx HTTP server errors into SERVFAIL DNS responses. + return nil, fmt.Errorf("%w: %s", errServerFailure, hres.Status) + } return nil, errors.New(hres.Status) } if ct := hres.Header.Get("Content-Type"); ct != dohType { From c2144c44a33a373174624ede9f4f6ffe8334cf05 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Fri, 4 Oct 2024 15:11:46 -0500 Subject: [PATCH 0044/1708] net/dns/resolver: update (*forwarder).forwardWithDestChan to always return an error unless it sends a response to responseChan We currently have two executions paths where (*forwarder).forwardWithDestChan returns nil, rather than an error, without sending a DNS response to responseChan. These paths are accompanied by a comment that reads: // Returning an error will cause an internal retry, there is // nothing we can do if parsing failed. Just drop the packet. But it is not (or no longer longer) accurate: returning an error from forwardWithDestChan does not currently cause a retry. Moreover, although these paths are currently unreachable due to implementation details, if (*forwarder).forwardWithDestChan were to return nil without sending a response to responseChan, it would cause a deadlock at one call site and a panic at another. Therefore, we update (*forwarder).forwardWithDestChan to return errors in those two paths and remove comments that were no longer accurate and misleading. Updates #cleanup Updates #13571 Signed-off-by: Nick Hill --- net/dns/resolver/forwarder.go | 10 ++-------- net/dns/resolver/forwarder_test.go | 17 +++++++++++------ net/dns/resolver/tsdns_test.go | 4 ++-- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 0bf904070..c00dea1ae 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -920,10 +920,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo metricDNSFwdDropBonjour.Add(1) res, err := nxDomainResponse(query) if err != nil { - f.logf("error parsing bonjour query: %v", err) - // Returning an error will cause an internal retry, there is - // nothing we can do if parsing failed. Just drop the packet. - return nil + return err } select { case <-ctx.Done(): @@ -955,10 +952,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo res, err := servfailResponse(query) if err != nil { - f.logf("building servfail response: %v", err) - // Returning an error will cause an internal retry, there is - // nothing we can do if parsing failed. Just drop the packet. - return nil + return err } select { case <-ctx.Done(): diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 09d810901..9c0964e93 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "encoding/binary" - "errors" "flag" "fmt" "io" @@ -657,14 +656,20 @@ func TestForwarderTCPFallbackError(t *testing.T) { } }) - _, err := runTestQuery(t, port, request, nil) + resp, err := runTestQuery(t, port, request, nil) if !sawRequest.Load() { t.Error("did not see DNS request") } - if err == nil { - t.Error("wanted error, got nil") - } else if !errors.Is(err, errServerFailure) { - t.Errorf("wanted errServerFailure, got: %v", err) + if err != nil { + t.Fatalf("wanted nil, got %v", err) + } + var parser dns.Parser + respHeader, err := parser.Start(resp) + if err != nil { + t.Fatalf("parser.Start() failed: %v", err) + } + if got, want := respHeader.RCode, dns.RCodeServerFailure; got != want { + t.Errorf("wanted %v, got %v", want, got) } } diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index e2c4750b5..d7b9fb360 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -1503,8 +1503,8 @@ func TestServfail(t *testing.T) { r.SetConfig(cfg) pkt, err := syncRespond(r, dnspacket("test.site.", dns.TypeA, noEdns)) - if !errors.Is(err, errServerFailure) { - t.Errorf("err = %v, want %v", err, errServerFailure) + if err != nil { + t.Fatalf("err = %v, want nil", err) } wantPkt := []byte{ From f07ff47922c11377374ffe91a8dbe0fa12fb1b56 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Oct 2024 17:08:22 -0500 Subject: [PATCH 0045/1708] net/dns/resolver: add tests for using a forwarder with multiple upstream resolvers If multiple upstream DNS servers are available, quad-100 sends requests to all of them and forwards the first successful response, if any. If no successful responses are received, it propagates the first failure from any of them. This PR adds some test coverage for these scenarios. Updates #13571 Signed-off-by: Nick Khyl --- net/dns/resolver/forwarder_test.go | 235 +++++++++++++++++++++++------ 1 file changed, 190 insertions(+), 45 deletions(-) diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 9c0964e93..e341186ec 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -449,7 +449,7 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) return } -func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwarder)) ([]byte, error) { +func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { netMon, err := netmon.New(tb.Logf) if err != nil { tb.Fatal(err) @@ -463,8 +463,9 @@ func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwa modify(fwd) } - rr := resolverAndDelay{ - name: &dnstype.Resolver{Addr: fmt.Sprintf("127.0.0.1:%d", port)}, + resolvers := make([]resolverAndDelay, len(ports)) + for i, port := range ports { + resolvers[i].name = &dnstype.Resolver{Addr: fmt.Sprintf("127.0.0.1:%d", port)} } rpkt := packet{ @@ -476,7 +477,7 @@ func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwa rchan := make(chan packet, 1) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) tb.Cleanup(cancel) - err = fwd.forwardWithDestChan(ctx, rpkt, rchan, rr) + err = fwd.forwardWithDestChan(ctx, rpkt, rchan, resolvers...) select { case res := <-rchan: return res.bs, err @@ -485,8 +486,62 @@ func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwa } } -func mustRunTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwarder)) []byte { - resp, err := runTestQuery(tb, port, request, modify) +// makeTestRequest returns a new TypeA request for the given domain. +func makeTestRequest(tb testing.TB, domain string) []byte { + tb.Helper() + name := dns.MustNewName(domain) + builder := dns.NewBuilder(nil, dns.Header{}) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: name, + Type: dns.TypeA, + Class: dns.ClassINET, + }) + request, err := builder.Finish() + if err != nil { + tb.Fatal(err) + } + return request +} + +// makeTestResponse returns a new Type A response for the given domain, +// with the specified status code and zero or more addresses. +func makeTestResponse(tb testing.TB, domain string, code dns.RCode, addrs ...netip.Addr) []byte { + tb.Helper() + name := dns.MustNewName(domain) + builder := dns.NewBuilder(nil, dns.Header{ + Response: true, + Authoritative: true, + RCode: code, + }) + builder.StartQuestions() + q := dns.Question{ + Name: name, + Type: dns.TypeA, + Class: dns.ClassINET, + } + builder.Question(q) + if len(addrs) > 0 { + builder.StartAnswers() + for _, addr := range addrs { + builder.AResource(dns.ResourceHeader{ + Name: q.Name, + Class: q.Class, + TTL: 120, + }, dns.AResource{ + A: addr.As4(), + }) + } + } + response, err := builder.Finish() + if err != nil { + tb.Fatal(err) + } + return response +} + +func mustRunTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) []byte { + resp, err := runTestQuery(tb, request, modify, ports...) if err != nil { tb.Fatalf("error making request: %v", err) } @@ -515,7 +570,7 @@ func TestForwarderTCPFallback(t *testing.T) { } }) - resp := mustRunTestQuery(t, port, request, nil) + resp := mustRunTestQuery(t, request, nil, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -553,7 +608,7 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } }) - resp := mustRunTestQuery(t, port, request, nil) + resp := mustRunTestQuery(t, request, nil, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -584,11 +639,11 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { } }) - resp := mustRunTestQuery(t, port, request, func(fwd *forwarder) { + resp := mustRunTestQuery(t, request, func(fwd *forwarder) { // Disable retries for this test. fwd.controlKnobs = &controlknobs.Knobs{} fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) - }) + }, port) wantResp := append([]byte(nil), largeResponse[:maxResponseBytes]...) @@ -612,41 +667,10 @@ func TestForwarderTCPFallbackError(t *testing.T) { const domain = "error-response.tailscale.com." // Our response is a SERVFAIL - response := func() []byte { - name := dns.MustNewName(domain) - - builder := dns.NewBuilder(nil, dns.Header{ - Response: true, - RCode: dns.RCodeServerFailure, - }) - builder.StartQuestions() - builder.Question(dns.Question{ - Name: name, - Type: dns.TypeA, - Class: dns.ClassINET, - }) - response, err := builder.Finish() - if err != nil { - t.Fatal(err) - } - return response - }() + response := makeTestResponse(t, domain, dns.RCodeServerFailure) // Our request is a single A query for the domain in the answer, above. - request := func() []byte { - builder := dns.NewBuilder(nil, dns.Header{}) - builder.StartQuestions() - builder.Question(dns.Question{ - Name: dns.MustNewName(domain), - Type: dns.TypeA, - Class: dns.ClassINET, - }) - request, err := builder.Finish() - if err != nil { - t.Fatal(err) - } - return request - }() + request := makeTestRequest(t, domain) var sawRequest atomic.Bool port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { @@ -656,7 +680,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { } }) - resp, err := runTestQuery(t, port, request, nil) + resp, err := runTestQuery(t, request, nil, port) if !sawRequest.Load() { t.Error("did not see DNS request") } @@ -673,6 +697,127 @@ func TestForwarderTCPFallbackError(t *testing.T) { } } +// Test to ensure that if we have more than one resolver, and at least one of them +// returns a successful response, we propagate it. +func TestForwarderWithManyResolvers(t *testing.T) { + enableDebug(t) + + const domain = "example.com." + request := makeTestRequest(t, domain) + + tests := []struct { + name string + responses [][]byte // upstream responses + wantResponses [][]byte // we should receive one of these from the forwarder + }{ + { + name: "Success", + responses: [][]byte{ // All upstream servers returned successful, but different, response. + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.2")), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.3")), + }, + wantResponses: [][]byte{ // We may forward whichever response is received first. + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.2")), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.3")), + }, + }, + { + name: "ServFail", + responses: [][]byte{ // All upstream servers returned a SERVFAIL. + makeTestResponse(t, domain, dns.RCodeServerFailure), + makeTestResponse(t, domain, dns.RCodeServerFailure), + makeTestResponse(t, domain, dns.RCodeServerFailure), + }, + wantResponses: [][]byte{ + makeTestResponse(t, domain, dns.RCodeServerFailure), + }, + }, + { + name: "ServFail+Success", + responses: [][]byte{ // All upstream servers fail except for one. + makeTestResponse(t, domain, dns.RCodeServerFailure), + makeTestResponse(t, domain, dns.RCodeServerFailure), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + makeTestResponse(t, domain, dns.RCodeServerFailure), + }, + wantResponses: [][]byte{ // We should forward the successful response. + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + }, + }, + { + name: "NXDomain", + responses: [][]byte{ // All upstream servers returned NXDOMAIN. + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeNameError), + }, + wantResponses: [][]byte{ + makeTestResponse(t, domain, dns.RCodeNameError), + }, + }, + { + name: "NXDomain+Success", + responses: [][]byte{ // All upstream servers returned NXDOMAIN except for one. + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + }, + wantResponses: [][]byte{ // However, only SERVFAIL are considered to be errors. Therefore, we may forward any response. + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + }, + }, + { + name: "Refused", + responses: [][]byte{ // All upstream servers return different failures. + makeTestResponse(t, domain, dns.RCodeRefused), + makeTestResponse(t, domain, dns.RCodeRefused), + makeTestResponse(t, domain, dns.RCodeRefused), + makeTestResponse(t, domain, dns.RCodeRefused), + makeTestResponse(t, domain, dns.RCodeRefused), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + }, + wantResponses: [][]byte{ // Refused is not considered to be an error and can be forwarded. + makeTestResponse(t, domain, dns.RCodeRefused), + makeTestResponse(t, domain, dns.RCodeSuccess, netip.MustParseAddr("127.0.0.1")), + }, + }, + { + name: "MixFail", + responses: [][]byte{ // All upstream servers return different failures. + makeTestResponse(t, domain, dns.RCodeServerFailure), + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeRefused), + }, + wantResponses: [][]byte{ // Both NXDomain and Refused can be forwarded. + makeTestResponse(t, domain, dns.RCodeNameError), + makeTestResponse(t, domain, dns.RCodeRefused), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ports := make([]uint16, len(tt.responses)) + for i := range tt.responses { + ports[i] = runDNSServer(t, nil, tt.responses[i], func(isTCP bool, gotRequest []byte) {}) + } + gotResponse, err := runTestQuery(t, request, nil, ports...) + if err != nil { + t.Fatalf("wanted nil, got %v", err) + } + responseOk := slices.ContainsFunc(tt.wantResponses, func(wantResponse []byte) bool { + return slices.Equal(gotResponse, wantResponse) + }) + if !responseOk { + t.Errorf("invalid response\ngot: %+v\nwant: %+v", gotResponse, tt.wantResponses[0]) + } + }) + } +} + // mdnsResponder at minimum has an expectation that NXDOMAIN must include the // question, otherwise it will penalize our server (#13511). func TestNXDOMAINIncludesQuestion(t *testing.T) { @@ -718,7 +863,7 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { }) - res, err := runTestQuery(t, port, request, nil) + res, err := runTestQuery(t, request, nil, port) if err != nil { t.Fatal(err) } From ecc8035f73f62424298d2a36dc2d747601fb04c8 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 11 Oct 2024 13:12:18 -0700 Subject: [PATCH 0046/1708] types/bools: add Compare to compare boolean values (#13792) The bools.Compare function compares boolean values by reporting -1, 0, +1 for ordering so that it can be easily used with slices.SortFunc. Updates #cleanup Updates tailscale/corp#11038 Signed-off-by: Joe Tsai --- types/bools/compare.go | 17 +++++++++++++++++ types/bools/compare_test.go | 21 +++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 types/bools/compare.go create mode 100644 types/bools/compare_test.go diff --git a/types/bools/compare.go b/types/bools/compare.go new file mode 100644 index 000000000..ac433b240 --- /dev/null +++ b/types/bools/compare.go @@ -0,0 +1,17 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package bools contains the bools.Compare function. +package bools + +// Compare compares two boolean values as if false is ordered before true. +func Compare[T ~bool](x, y T) int { + switch { + case x == false && y == true: + return -1 + case x == true && y == false: + return +1 + default: + return 0 + } +} diff --git a/types/bools/compare_test.go b/types/bools/compare_test.go new file mode 100644 index 000000000..280294621 --- /dev/null +++ b/types/bools/compare_test.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package bools + +import "testing" + +func TestCompare(t *testing.T) { + if got := Compare(false, false); got != 0 { + t.Errorf("Compare(false, false) = %v, want 0", got) + } + if got := Compare(false, true); got != -1 { + t.Errorf("Compare(false, true) = %v, want -1", got) + } + if got := Compare(true, false); got != +1 { + t.Errorf("Compare(true, false) = %v, want +1", got) + } + if got := Compare(true, true); got != 0 { + t.Errorf("Compare(true, true) = %v, want 0", got) + } +} From 12e6094d9c7e8f856d5117235d18ad86d0812d32 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 11 Oct 2024 14:59:47 -0500 Subject: [PATCH 0047/1708] ssh/tailssh: calculate passthrough environment at latest possible stage This allows passing through any environment variables that we set ourselves, for example DBUS_SESSION_BUS_ADDRESS. Updates #11175 Co-authored-by: Mario Minardi Signed-off-by: Percy Wegmann --- ssh/tailssh/incubator.go | 52 ++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 7748376b2..3ff676d51 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -210,8 +210,6 @@ type incubatorArgs struct { debugTest bool isSELinuxEnforcing bool encodedEnv string - allowListEnvKeys string - forwardedEnviron []string } func parseIncubatorArgs(args []string) (incubatorArgs, error) { @@ -246,31 +244,35 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { ia.gids = append(ia.gids, gid) } - ia.forwardedEnviron = os.Environ() + return ia, nil +} + +func (ia incubatorArgs) forwadedEnviron() ([]string, string, error) { + environ := os.Environ() // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding - ia.allowListEnvKeys = "SSH_AUTH_SOCK" + allowListKeys := "SSH_AUTH_SOCK" if ia.encodedEnv != "" { unquoted, err := strconv.Unquote(ia.encodedEnv) if err != nil { - return ia, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) } var extraEnviron []string err = json.Unmarshal([]byte(unquoted), &extraEnviron) if err != nil { - return ia, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) } - ia.forwardedEnviron = append(ia.forwardedEnviron, extraEnviron...) + environ = append(environ, extraEnviron...) for _, v := range extraEnviron { - ia.allowListEnvKeys = fmt.Sprintf("%s,%s", ia.allowListEnvKeys, strings.Split(v, "=")[0]) + allowListKeys = fmt.Sprintf("%s,%s", allowListKeys, strings.Split(v, "=")[0]) } } - return ia, nil + return environ, allowListKeys, nil } // beIncubator is the entrypoint to the `tailscaled be-child ssh` subcommand. @@ -450,8 +452,13 @@ func tryExecLogin(dlogf logger.Logf, ia incubatorArgs) error { loginArgs := ia.loginArgs(loginCmdPath) dlogf("logging in with %+v", loginArgs) + environ, _, err := ia.forwadedEnviron() + if err != nil { + return err + } + // If Exec works, the Go code will not proceed past this: - err = unix.Exec(loginCmdPath, loginArgs, ia.forwardedEnviron) + err = unix.Exec(loginCmdPath, loginArgs, environ) // If we made it here, Exec failed. return err @@ -484,9 +491,14 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { defer sessionCloser() } + environ, allowListEnvKeys, err := ia.forwadedEnviron() + if err != nil { + return false, err + } + loginArgs := []string{ su, - "-w", ia.allowListEnvKeys, + "-w", allowListEnvKeys, "-l", ia.localUser, } @@ -498,7 +510,7 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { dlogf("logging in with %+v", loginArgs) // If Exec works, the Go code will not proceed past this: - err = unix.Exec(su, loginArgs, ia.forwardedEnviron) + err = unix.Exec(su, loginArgs, environ) // If we made it here, Exec failed. return true, err @@ -527,11 +539,16 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { return "" } + _, allowListEnvKeys, err := ia.forwadedEnviron() + if err != nil { + return "" + } + // First try to execute su -w -l -c true // to make sure su supports the necessary arguments. err = exec.Command( su, - "-w", ia.allowListEnvKeys, + "-w", allowListEnvKeys, "-l", ia.localUser, "-c", "true", @@ -558,10 +575,15 @@ func handleSSHInProcess(dlogf logger.Logf, ia incubatorArgs) error { return err } + environ, _, err := ia.forwadedEnviron() + if err != nil { + return err + } + args := shellArgs(ia.isShell, ia.cmd) dlogf("running %s %q", ia.loginShell, args) - cmd := newCommand(ia.hasTTY, ia.loginShell, ia.forwardedEnviron, args) - err := cmd.Run() + cmd := newCommand(ia.hasTTY, ia.loginShell, environ, args) + err = cmd.Run() if ee, ok := err.(*exec.ExitError); ok { ps := ee.ProcessState code := ps.ExitCode() From adc83689649f4f7e5c576ed6a697bf8c0d4bef8c Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Mon, 14 Oct 2024 10:02:04 +0100 Subject: [PATCH 0048/1708] tstest: avoid Fatal in ResourceCheck to show panic (#13790) Fixes #13789 Signed-off-by: Paul Scott --- tstest/resource.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tstest/resource.go b/tstest/resource.go index a3c292094..b094c7911 100644 --- a/tstest/resource.go +++ b/tstest/resource.go @@ -29,7 +29,8 @@ func ResourceCheck(tb testing.TB) { startN, startStacks := goroutines() tb.Cleanup(func() { if tb.Failed() { - // Something else went wrong. + // Test has failed - but this doesn't catch panics due to + // https://github.com/golang/go/issues/49929. return } // Goroutines might be still exiting. @@ -44,7 +45,10 @@ func ResourceCheck(tb testing.TB) { return } tb.Logf("goroutine diff:\n%v\n", cmp.Diff(startStacks, endStacks)) - tb.Fatalf("goroutine count: expected %d, got %d\n", startN, endN) + + // tb.Failed() above won't report on panics, so we shouldn't call Fatal + // here or we risk suppressing reporting of the panic. + tb.Errorf("goroutine count: expected %d, got %d\n", startN, endN) }) } From 40c991f6b85b6a5ff1a4b440650750e95c755f61 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Wed, 25 Sep 2024 17:20:56 +0200 Subject: [PATCH 0049/1708] wgengine: instrument with usermetrics Updates tailscale/corp#22075 Signed-off-by: Kristoffer Dalby --- tsnet/tsnet_test.go | 146 ++++++++++++++++++++- util/clientmetric/clientmetric.go | 51 ++++++++ util/clientmetric/clientmetric_test.go | 49 ++++++++ wgengine/magicsock/derp.go | 6 +- wgengine/magicsock/endpoint.go | 27 ++-- wgengine/magicsock/magicsock.go | 167 +++++++++++++++++++++++-- wgengine/magicsock/magicsock_test.go | 86 +++++++++++++ 7 files changed, 509 insertions(+), 23 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 255baf618..98c1fd4ab 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -36,6 +36,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "golang.org/x/net/proxy" + "tailscale.com/client/tailscale" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/health" "tailscale.com/ipn" @@ -874,6 +875,78 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { return b.String() } +// sendData sends a given amount of bytes from s1 to s2. +func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error { + l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) + defer l.Close() + + // Dial to s1 from s2 + w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) + if err != nil { + return err + } + defer w.Close() + + stopReceive := make(chan struct{}) + defer close(stopReceive) + allReceived := make(chan error) + defer close(allReceived) + + go func() { + conn, err := l.Accept() + if err != nil { + allReceived <- err + return + } + conn.SetWriteDeadline(time.Now().Add(30 * time.Second)) + + total := 0 + recvStart := time.Now() + for { + got := make([]byte, bytesCount) + n, err := conn.Read(got) + if n != bytesCount { + logf("read %d bytes, want %d", n, bytesCount) + } + + select { + case <-stopReceive: + return + default: + } + + if err != nil { + allReceived <- fmt.Errorf("failed reading packet, %s", err) + return + } + + total += n + logf("received %d/%d bytes, %.2f %%", total, bytesCount, (float64(total) / (float64(bytesCount)) * 100)) + if total == bytesCount { + break + } + } + + logf("all received, took: %s", time.Since(recvStart).String()) + allReceived <- nil + }() + + sendStart := time.Now() + w.SetWriteDeadline(time.Now().Add(30 * time.Second)) + if _, err := w.Write(bytes.Repeat([]byte("A"), bytesCount)); err != nil { + stopReceive <- struct{}{} + return err + } + + logf("all sent (%s), waiting for all packets (%d) to be received", time.Since(sendStart).String(), bytesCount) + err, _ = <-allReceived + if err != nil { + return err + } + + return nil +} + func TestUserMetrics(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420") tstest.ResourceCheck(t) @@ -882,7 +955,7 @@ func TestUserMetrics(t *testing.T) { controlURL, c := startControl(t) s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1") - s2, _, _ := startServer(t, ctx, controlURL, "s2") + s2, s2ip, _ := startServer(t, ctx, controlURL, "s2") s1.lb.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ @@ -951,6 +1024,20 @@ func TestUserMetrics(t *testing.T) { return status1.Self.PrimaryRoutes != nil && status1.Self.PrimaryRoutes.Len() == int(wantRoutes)+1 }) + mustDirect(t, t.Logf, lc1, lc2) + + // 10 megabytes + bytesToSend := 10 * 1024 * 1024 + + // This asserts generates some traffic, it is factored out + // of TestUDPConn. + start := time.Now() + err = sendData(t.Logf, ctx, bytesToSend, s1, s2, s1ip, s2ip) + if err != nil { + t.Fatalf("Failed to send packets: %v", err) + } + t.Logf("Sent %d bytes from s1 to s2 in %s", bytesToSend, time.Since(start).String()) + ctxLc, cancelLc := context.WithTimeout(context.Background(), 5*time.Second) defer cancelLc() metrics1, err := lc1.UserMetrics(ctxLc) @@ -968,6 +1055,9 @@ func TestUserMetrics(t *testing.T) { t.Fatal(err) } + // Allow the metrics for the bytes sent to be off by 15%. + bytesSentTolerance := 1.15 + t.Logf("Metrics1:\n%s\n", metrics1) // The node is advertising 4 routes: @@ -997,6 +1087,18 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics1, tailscaled_primary_routes: got %v, want %v", got, want) } + // Verify that the amount of data recorded in bytes is higher or equal to the + // 10 megabytes sent. + inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`] + if inboundBytes1 < float64(bytesToSend) { + t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, inboundBytes1) + } + + // But ensure that it is not too much higher than the 10 megabytes sent. + if inboundBytes1 > float64(bytesToSend)*bytesSentTolerance { + t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, inboundBytes1) + } + metrics2, err := lc2.UserMetrics(ctx) if err != nil { t.Fatal(err) @@ -1033,6 +1135,18 @@ func TestUserMetrics(t *testing.T) { if got, want := parsedMetrics2["tailscaled_primary_routes"], 0.0; got != want { t.Errorf("metrics2, tailscaled_primary_routes: got %v, want %v", got, want) } + + // Verify that the amount of data recorded in bytes is higher or equal than the + // 10 megabytes sent. + outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`] + if outboundBytes2 < float64(bytesToSend) { + t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, outboundBytes2) + } + + // But ensure that it is not too much higher than the 10 megabytes sent. + if outboundBytes2 > float64(bytesToSend)*bytesSentTolerance { + t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, outboundBytes2) + } } func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func() bool) { @@ -1044,3 +1158,33 @@ func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func() } t.Fatalf("waiting for condition: %s", msg) } + +// mustDirect ensures there is a direct connection between LocalClient 1 and 2 +func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *tailscale.LocalClient) { + t.Helper() + lastLog := time.Now().Add(-time.Minute) + // See https://github.com/tailscale/tailscale/issues/654 + // and https://github.com/tailscale/tailscale/issues/3247 for discussions of this deadline. + for deadline := time.Now().Add(30 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + status1, err := lc1.Status(ctx) + if err != nil { + continue + } + status2, err := lc2.Status(ctx) + if err != nil { + continue + } + pst := status1.Peer[status2.Self.PublicKey] + if pst.CurAddr != "" { + logf("direct link %s->%s found with addr %s", status1.Self.HostName, status2.Self.HostName, pst.CurAddr) + return + } + if now := time.Now(); now.Sub(lastLog) > time.Second { + logf("no direct path %s->%s yet, addrs %v", status1.Self.HostName, status2.Self.HostName, pst.Addrs) + lastLog = now + } + } + t.Error("magicsock did not find a direct path from lc1 to lc2") +} diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index b2d356b60..584a24f73 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -9,6 +9,7 @@ import ( "bytes" "encoding/binary" "encoding/hex" + "expvar" "fmt" "io" "sort" @@ -16,6 +17,8 @@ import ( "sync" "sync/atomic" "time" + + "tailscale.com/util/set" ) var ( @@ -223,6 +226,54 @@ func NewGaugeFunc(name string, f func() int64) *Metric { return m } +// AggregateCounter returns a sum of expvar counters registered with it. +type AggregateCounter struct { + mu sync.RWMutex + counters set.Set[*expvar.Int] +} + +func (c *AggregateCounter) Value() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + var sum int64 + for cnt := range c.counters { + sum += cnt.Value() + } + return sum +} + +// Register registers provided expvar counter. +// When a counter is added to the counter, it will be reset +// to start counting from 0. This is to avoid incrementing the +// counter with an unexpectedly large value. +func (c *AggregateCounter) Register(counter *expvar.Int) { + c.mu.Lock() + defer c.mu.Unlock() + // No need to do anything if it's already registered. + if c.counters.Contains(counter) { + return + } + counter.Set(0) + c.counters.Add(counter) +} + +// UnregisterAll unregisters all counters resulting in it +// starting back down at zero. This is to ensure monotonicity +// and respect the semantics of the counter. +func (c *AggregateCounter) UnregisterAll() { + c.mu.Lock() + defer c.mu.Unlock() + c.counters = set.Set[*expvar.Int]{} +} + +// NewAggregateCounter returns a new aggregate counter that returns +// a sum of expvar variables registered with it. +func NewAggregateCounter(name string) *AggregateCounter { + c := &AggregateCounter{counters: set.Set[*expvar.Int]{}} + NewGaugeFunc(name, c.Value) + return c +} + // WritePrometheusExpositionFormat writes all client metrics to w in // the Prometheus text-based exposition format. // diff --git a/util/clientmetric/clientmetric_test.go b/util/clientmetric/clientmetric_test.go index ab6c4335a..555d7a711 100644 --- a/util/clientmetric/clientmetric_test.go +++ b/util/clientmetric/clientmetric_test.go @@ -4,8 +4,11 @@ package clientmetric import ( + "expvar" "testing" "time" + + qt "github.com/frankban/quicktest" ) func TestDeltaEncBuf(t *testing.T) { @@ -107,3 +110,49 @@ func TestWithFunc(t *testing.T) { t.Errorf("second = %q; want %q", got, want) } } + +func TestAggregateCounter(t *testing.T) { + clearMetrics() + + c := qt.New(t) + + expv1 := &expvar.Int{} + expv2 := &expvar.Int{} + expv3 := &expvar.Int{} + + aggCounter := NewAggregateCounter("agg_counter") + + aggCounter.Register(expv1) + c.Assert(aggCounter.Value(), qt.Equals, int64(0)) + + expv1.Add(1) + c.Assert(aggCounter.Value(), qt.Equals, int64(1)) + + aggCounter.Register(expv2) + c.Assert(aggCounter.Value(), qt.Equals, int64(1)) + + expv1.Add(1) + expv2.Add(1) + c.Assert(aggCounter.Value(), qt.Equals, int64(3)) + + // Adding a new expvar should not change the value + // and any value the counter already had is reset + expv3.Set(5) + aggCounter.Register(expv3) + c.Assert(aggCounter.Value(), qt.Equals, int64(3)) + + // Registering the same expvar multiple times should not change the value + aggCounter.Register(expv3) + c.Assert(aggCounter.Value(), qt.Equals, int64(3)) + + aggCounter.UnregisterAll() + c.Assert(aggCounter.Value(), qt.Equals, int64(0)) + + // Start over + expv3.Set(5) + aggCounter.Register(expv3) + c.Assert(aggCounter.Value(), qt.Equals, int64(0)) + + expv3.Set(5) + c.Assert(aggCounter.Value(), qt.Equals, int64(5)) +} diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 69c5cbc90..281447ac2 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -669,7 +669,8 @@ func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan c.logf("magicsock: derp.Send(%v): %v", wr.addr, err) metricSendDERPError.Add(1) } else { - metricSendDERP.Add(1) + c.metrics.outboundPacketsDERPTotal.Add(1) + c.metrics.outboundBytesDERPTotal.Add(int64(len(wr.b))) } } } @@ -690,7 +691,8 @@ func (c *connBind) receiveDERP(buffs [][]byte, sizes []int, eps []conn.Endpoint) // No data read occurred. Wait for another packet. continue } - metricRecvDataDERP.Add(1) + c.metrics.inboundPacketsDERPTotal.Add(1) + c.metrics.inboundBytesDERPTotal.Add(int64(n)) sizes[0] = n eps[0] = ep return 1, nil diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 53ecb84de..78b9ee92a 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -960,26 +960,39 @@ func (de *endpoint) send(buffs [][]byte) error { de.noteBadEndpoint(udpAddr) } + var txBytes int + for _, b := range buffs { + txBytes += len(b) + } + + switch { + case udpAddr.Addr().Is4(): + de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + case udpAddr.Addr().Is6(): + de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + } + // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. if stats := de.c.stats.Load(); err == nil && stats != nil { - var txBytes int - for _, b := range buffs { - txBytes += len(b) - } stats.UpdateTxPhysical(de.nodeAddr, udpAddr, txBytes) } } if derpAddr.IsValid() { allOk := true + var txBytes int for _, buff := range buffs { ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff) - if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buff)) - } + txBytes += len(buff) if !ok { allOk = false } } + + if stats := de.c.stats.Load(); stats != nil { + stats.UpdateTxPhysical(de.nodeAddr, derpAddr, txBytes) + } if allOk { return nil } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 08aff842d..2d4944baf 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "errors" + "expvar" "fmt" "io" "net" @@ -80,6 +81,54 @@ const ( socketBufferSize = 7 << 20 ) +// Path is a label indicating the type of path a packet took. +type Path string + +const ( + PathDirectIPv4 Path = "direct_ipv4" + PathDirectIPv6 Path = "direct_ipv6" + PathDERP Path = "derp" +) + +type pathLabel struct { + // Path indicates the path that the packet took: + // - direct_ipv4 + // - direct_ipv6 + // - derp + Path Path +} + +// metrics in wgengine contains the usermetrics counters for magicsock, it +// is however a bit special. All them metrics are labeled, but looking up +// the metric everytime we need to record it has an overhead, and includes +// a lock in MultiLabelMap. The metrics are therefore instead created with +// wgengine and the underlying expvar.Int is stored to be used directly. +type metrics struct { + // inboundPacketsTotal is the total number of inbound packets received, + // labeled by the path the packet took. + inboundPacketsIPv4Total expvar.Int + inboundPacketsIPv6Total expvar.Int + inboundPacketsDERPTotal expvar.Int + + // inboundBytesTotal is the total number of inbound bytes received, + // labeled by the path the packet took. + inboundBytesIPv4Total expvar.Int + inboundBytesIPv6Total expvar.Int + inboundBytesDERPTotal expvar.Int + + // outboundPacketsTotal is the total number of outbound packets sent, + // labeled by the path the packet took. + outboundPacketsIPv4Total expvar.Int + outboundPacketsIPv6Total expvar.Int + outboundPacketsDERPTotal expvar.Int + + // outboundBytesTotal is the total number of outbound bytes sent, + // labeled by the path the packet took. + outboundBytesIPv4Total expvar.Int + outboundBytesIPv6Total expvar.Int + outboundBytesDERPTotal expvar.Int +} + // A Conn routes UDP packets and actively manages a list of its endpoints. type Conn struct { // This block mirrors the contents and field order of the Options @@ -321,6 +370,9 @@ type Conn struct { // responsibility to ensure that traffic from these endpoints is routed // to the node. staticEndpoints views.Slice[netip.AddrPort] + + // metrics contains the metrics for the magicsock instance. + metrics *metrics } // SetDebugLoggingEnabled controls whether spammy debug logging is enabled. @@ -503,6 +555,8 @@ func NewConn(opts Options) (*Conn, error) { UseDNSCache: true, } + c.metrics = registerMetrics(opts.Metrics) + if d4, err := c.listenRawDisco("ip4"); err == nil { c.logf("[v1] using BPF disco receiver for IPv4") c.closeDisco4 = d4 @@ -520,6 +574,76 @@ func NewConn(opts Options) (*Conn, error) { return c, nil } +// registerMetrics wires up the metrics for wgengine, instead of +// registering the label metric directly, the underlying expvar is exposed. +// See metrics for more info. +func registerMetrics(reg *usermetric.Registry) *metrics { + pathDirectV4 := pathLabel{Path: PathDirectIPv4} + pathDirectV6 := pathLabel{Path: PathDirectIPv6} + pathDERP := pathLabel{Path: PathDERP} + inboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( + reg, + "tailscaled_inbound_packets_total", + "counter", + "Counts the number of packets received from other peers", + ) + inboundBytesTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( + reg, + "tailscaled_inbound_bytes_total", + "counter", + "Counts the number of bytes received from other peers", + ) + outboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( + reg, + "tailscaled_outbound_packets_total", + "counter", + "Counts the number of packets sent to other peers", + ) + outboundBytesTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( + reg, + "tailscaled_outbound_bytes_total", + "counter", + "Counts the number of bytes sent to other peers", + ) + m := new(metrics) + + // Map clientmetrics to the usermetric counters. + metricRecvDataPacketsIPv4.Register(&m.inboundPacketsIPv4Total) + metricRecvDataPacketsIPv6.Register(&m.inboundPacketsIPv6Total) + metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) + metricSendUDP.Register(&m.outboundPacketsIPv4Total) + metricSendUDP.Register(&m.outboundPacketsIPv6Total) + metricSendDERP.Register(&m.outboundPacketsDERPTotal) + + inboundPacketsTotal.Set(pathDirectV4, &m.inboundPacketsIPv4Total) + inboundPacketsTotal.Set(pathDirectV6, &m.inboundPacketsIPv6Total) + inboundPacketsTotal.Set(pathDERP, &m.inboundPacketsDERPTotal) + + inboundBytesTotal.Set(pathDirectV4, &m.inboundBytesIPv4Total) + inboundBytesTotal.Set(pathDirectV6, &m.inboundBytesIPv6Total) + inboundBytesTotal.Set(pathDERP, &m.inboundBytesDERPTotal) + + outboundPacketsTotal.Set(pathDirectV4, &m.outboundPacketsIPv4Total) + outboundPacketsTotal.Set(pathDirectV6, &m.outboundPacketsIPv6Total) + outboundPacketsTotal.Set(pathDERP, &m.outboundPacketsDERPTotal) + + outboundBytesTotal.Set(pathDirectV4, &m.outboundBytesIPv4Total) + outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total) + outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal) + + return m +} + +// deregisterMetrics unregisters the underlying usermetrics expvar counters +// from clientmetrics. +func deregisterMetrics(m *metrics) { + metricRecvDataPacketsIPv4.UnregisterAll() + metricRecvDataPacketsIPv6.UnregisterAll() + metricRecvDataPacketsDERP.UnregisterAll() + metricSendUDP.UnregisterAll() + metricSendDERP.UnregisterAll() +} + // InstallCaptureHook installs a callback which is called to // log debug information into the pcap stream. This function // can be called with a nil argument to uninstall the capture @@ -1140,7 +1264,14 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) { _ = c.maybeRebindOnError(runtime.GOOS, err) } else { if sent { - metricSendUDP.Add(1) + switch { + case ipp.Addr().Is4(): + c.metrics.outboundPacketsIPv4Total.Add(1) + c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + case ipp.Addr().Is6(): + c.metrics.outboundPacketsIPv6Total.Add(1) + c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + } } } return @@ -1278,19 +1409,24 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) { c.receiveBatchPool.Put(batch) } -// receiveIPv4 creates an IPv4 ReceiveFunc reading from c.pconn4. func (c *Conn) receiveIPv4() conn.ReceiveFunc { - return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), metricRecvDataIPv4) + return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), + &c.metrics.inboundPacketsIPv4Total, + &c.metrics.inboundBytesIPv4Total, + ) } // receiveIPv6 creates an IPv6 ReceiveFunc reading from c.pconn6. func (c *Conn) receiveIPv6() conn.ReceiveFunc { - return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), metricRecvDataIPv6) + return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), + &c.metrics.inboundPacketsIPv6Total, + &c.metrics.inboundBytesIPv6Total, + ) } // mkReceiveFunc creates a ReceiveFunc reading from ruc. -// The provided healthItem and metric are updated if non-nil. -func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, metric *clientmetric.Metric) conn.ReceiveFunc { +// The provided healthItem and metrics are updated if non-nil. +func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc { // epCache caches an IPPort->endpoint for hot flows. var epCache ippEndpointCache @@ -1327,8 +1463,11 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu } ipp := msg.Addr.(*net.UDPAddr).AddrPort() if ep, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { - if metric != nil { - metric.Add(1) + if packetMetric != nil { + packetMetric.Add(1) + } + if bytesMetric != nil { + bytesMetric.Add(int64(msg.N)) } eps[i] = ep sizes[i] = msg.N @@ -2377,6 +2516,8 @@ func (c *Conn) Close() error { pinger.Close() } + deregisterMetrics(c.metrics) + return nil } @@ -2930,17 +3071,17 @@ var ( metricSendDERPErrorChan = clientmetric.NewCounter("magicsock_send_derp_error_chan") metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") - metricSendUDP = clientmetric.NewCounter("magicsock_send_udp") + metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") - metricSendDERP = clientmetric.NewCounter("magicsock_send_derp") + metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") // Data packets (non-disco) metricSendData = clientmetric.NewCounter("magicsock_send_data") metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") - metricRecvDataDERP = clientmetric.NewCounter("magicsock_recv_data_derp") - metricRecvDataIPv4 = clientmetric.NewCounter("magicsock_recv_data_ipv4") - metricRecvDataIPv6 = clientmetric.NewCounter("magicsock_recv_data_ipv6") + metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") + metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") + metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 6b2d961b9..c1b8eef22 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -28,6 +28,7 @@ import ( "time" "unsafe" + qt "github.com/frankban/quicktest" wgconn "github.com/tailscale/wireguard-go/conn" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" @@ -1188,6 +1189,91 @@ func testTwoDevicePing(t *testing.T, d *devices) { checkStats(t, m1, m1Conns) checkStats(t, m2, m2Conns) }) + t.Run("compare-metrics-stats", func(t *testing.T) { + setT(t) + defer setT(outerT) + m1.conn.resetMetricsForTest() + m1.stats.TestExtract() + m2.conn.resetMetricsForTest() + m2.stats.TestExtract() + t.Logf("Metrics before: %s\n", m1.metrics.String()) + ping1(t) + ping2(t) + assertConnStatsAndUserMetricsEqual(t, m1) + assertConnStatsAndUserMetricsEqual(t, m2) + t.Logf("Metrics after: %s\n", m1.metrics.String()) + }) +} + +func (c *Conn) resetMetricsForTest() { + c.metrics.inboundBytesIPv4Total.Set(0) + c.metrics.inboundPacketsIPv4Total.Set(0) + c.metrics.outboundBytesIPv4Total.Set(0) + c.metrics.outboundPacketsIPv4Total.Set(0) + c.metrics.inboundBytesIPv6Total.Set(0) + c.metrics.inboundPacketsIPv6Total.Set(0) + c.metrics.outboundBytesIPv6Total.Set(0) + c.metrics.outboundPacketsIPv6Total.Set(0) + c.metrics.inboundBytesDERPTotal.Set(0) + c.metrics.inboundPacketsDERPTotal.Set(0) + c.metrics.outboundBytesDERPTotal.Set(0) + c.metrics.outboundPacketsDERPTotal.Set(0) +} + +func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { + _, phys := ms.stats.TestExtract() + + physIPv4RxBytes := int64(0) + physIPv4TxBytes := int64(0) + physDERPRxBytes := int64(0) + physDERPTxBytes := int64(0) + physIPv4RxPackets := int64(0) + physIPv4TxPackets := int64(0) + physDERPRxPackets := int64(0) + physDERPTxPackets := int64(0) + for conn, count := range phys { + t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String()) + if conn.Dst.String() == "127.3.3.40:1" { + physDERPRxBytes += int64(count.RxBytes) + physDERPTxBytes += int64(count.TxBytes) + physDERPRxPackets += int64(count.RxPackets) + physDERPTxPackets += int64(count.TxPackets) + } else { + physIPv4RxBytes += int64(count.RxBytes) + physIPv4TxBytes += int64(count.TxBytes) + physIPv4RxPackets += int64(count.RxPackets) + physIPv4TxPackets += int64(count.TxPackets) + } + } + + metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value() + metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value() + metricIPv4TxBytes := ms.conn.metrics.outboundBytesIPv4Total.Value() + metricIPv4TxPackets := ms.conn.metrics.outboundPacketsIPv4Total.Value() + + metricDERPRxBytes := ms.conn.metrics.inboundBytesDERPTotal.Value() + metricDERPRxPackets := ms.conn.metrics.inboundPacketsDERPTotal.Value() + metricDERPTxBytes := ms.conn.metrics.outboundBytesDERPTotal.Value() + metricDERPTxPackets := ms.conn.metrics.outboundPacketsDERPTotal.Value() + + c := qt.New(t) + c.Assert(physDERPRxBytes, qt.Equals, metricDERPRxBytes) + c.Assert(physDERPTxBytes, qt.Equals, metricDERPTxBytes) + c.Assert(physIPv4RxBytes, qt.Equals, metricIPv4RxBytes) + c.Assert(physIPv4TxBytes, qt.Equals, metricIPv4TxBytes) + c.Assert(physDERPRxPackets, qt.Equals, metricDERPRxPackets) + c.Assert(physDERPTxPackets, qt.Equals, metricDERPTxPackets) + c.Assert(physIPv4RxPackets, qt.Equals, metricIPv4RxPackets) + c.Assert(physIPv4TxPackets, qt.Equals, metricIPv4TxPackets) + + // Validate that the usermetrics and clientmetrics are in sync + // Note: the clientmetrics are global, this means that when they are registering with the + // wgengine, multiple in-process nodes used by this test will be updating the same metrics. This is why we need to multiply + // the metrics by 2 to get the expected value. + // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420 + c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) + c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) } func TestDiscoMessage(t *testing.T) { From e0d711c478e335e99302a5320b43538337fa298b Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 23 Sep 2024 17:07:38 +0200 Subject: [PATCH 0050/1708] {net/connstats,wgengine/magicsock}: fix packet counting in connstats connstats currently increments the packet counter whenever it is called to store a length of data, however when udp batch sending was introduced we pass the length for a series of packages, and it is only incremented ones, making it count wrongly if we are on a platform supporting udp batches. Updates tailscale/corp#22075 Signed-off-by: Kristoffer Dalby --- net/connstats/stats.go | 22 +++++++++++----------- wgengine/magicsock/derp.go | 2 +- wgengine/magicsock/endpoint.go | 4 ++-- wgengine/magicsock/magicsock.go | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/net/connstats/stats.go b/net/connstats/stats.go index dbcd946b8..4e6d8e109 100644 --- a/net/connstats/stats.go +++ b/net/connstats/stats.go @@ -131,23 +131,23 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { s.virtual[conn] = cnts } -// UpdateTxPhysical updates the counters for a transmitted wireguard packet +// UpdateTxPhysical updates the counters for zero or more transmitted wireguard packets. // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, n int) { - s.updatePhysical(src, dst, n, false) +func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { + s.updatePhysical(src, dst, packets, bytes, false) } -// UpdateRxPhysical updates the counters for a received wireguard packet. +// UpdateRxPhysical updates the counters for zero or more received wireguard packets. // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, n int) { - s.updatePhysical(src, dst, n, true) +func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { + s.updatePhysical(src, dst, packets, bytes, true) } -func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, n int, receive bool) { +func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int, receive bool) { conn := netlogtype.Connection{Src: netip.AddrPortFrom(src, 0), Dst: dst} s.mu.Lock() @@ -157,11 +157,11 @@ func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, n int, r return } if receive { - cnts.RxPackets++ - cnts.RxBytes += uint64(n) + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(n) + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) } s.physical[conn] = cnts } diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 281447ac2..bfee02f6e 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -730,7 +730,7 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en ep.noteRecvActivity(ipp, mono.Now()) if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, dm.n) + stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, dm.n) } return n, ep } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 78b9ee92a..ab9f3d47d 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -976,7 +976,7 @@ func (de *endpoint) send(buffs [][]byte) error { // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. if stats := de.c.stats.Load(); err == nil && stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, udpAddr, txBytes) + stats.UpdateTxPhysical(de.nodeAddr, udpAddr, len(buffs), txBytes) } } if derpAddr.IsValid() { @@ -991,7 +991,7 @@ func (de *endpoint) send(buffs [][]byte) error { } if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, txBytes) + stats.UpdateTxPhysical(de.nodeAddr, derpAddr, 1, txBytes) } if allOk { return nil diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 2d4944baf..72e59a2e7 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1523,7 +1523,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *ippEndpointCache) ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(ipp, now) if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, len(b)) + stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) } return ep, true } From a8f9c0d6e40a99e091e06572cd5e9b20db7baa21 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 14 Oct 2024 15:03:08 +0000 Subject: [PATCH 0051/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 2 -- licenses/apple.md | 3 +-- licenses/tailscale.md | 1 - licenses/windows.md | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index ef53117e8..94aeb3fc0 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -36,7 +36,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) @@ -57,7 +56,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 4cb100c62..751082d5b 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -63,7 +63,6 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) @@ -77,7 +76,7 @@ See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 544aa91ce..b1303d2a6 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -80,7 +80,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index e7f7f6f13..2a8e4e621 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -70,7 +70,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) From 5f22f726365851acfb189bfedf436ac34ef42782 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 15 Oct 2024 19:38:11 +0100 Subject: [PATCH 0052/1708] hostinfo,build_docker.sh,tailcfg: more reliably detect being in a container (#13826) Our existing container-detection tricks did not work on Kubernetes, where Docker is no longer used as a container runtime. Extends the existing go build tags for containers to the other container packages and uses that to reliably detect builds that were created by Tailscale for use in a container. Unfortunately this doesn't necessarily improve detection for users' custom builds, but that's a separate issue. Updates #13825 Signed-off-by: Tom Proctor --- build_docker.sh | 2 ++ hostinfo/hostinfo.go | 13 +++++++++++-- hostinfo/hostinfo_container_linux_test.go | 16 ++++++++++++++++ hostinfo/hostinfo_linux_test.go | 8 +++++++- tailcfg/tailcfg.go | 2 +- 5 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 hostinfo/hostinfo_container_linux_test.go diff --git a/build_docker.sh b/build_docker.sh index 1cbdc4b9e..e8b1c8f28 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -56,6 +56,7 @@ case "$TARGET" in -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ --base="${BASE}" \ --tags="${TAGS}" \ + --gotags="ts_kube,ts_package_container" \ --repos="${REPOS}" \ --push="${PUSH}" \ --target="${PLATFORM}" \ @@ -72,6 +73,7 @@ case "$TARGET" in -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ --base="${BASE}" \ --tags="${TAGS}" \ + --gotags="ts_kube,ts_package_container" \ --repos="${REPOS}" \ --push="${PUSH}" \ --target="${PLATFORM}" \ diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 1f9037829..3233a422d 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -280,13 +280,22 @@ func getEnvType() EnvType { return "" } -// inContainer reports whether we're running in a container. +// inContainer reports whether we're running in a container. Best-effort only, +// there's no foolproof way to detect this, but the build tag should catch all +// official builds from 1.78.0. func inContainer() opt.Bool { if runtime.GOOS != "linux" { return "" } var ret opt.Bool ret.Set(false) + if packageType != nil && packageType() == "container" { + // Go build tag ts_package_container was set during build. + ret.Set(true) + return ret + } + // Only set if using docker's container runtime. Not guaranteed by + // documentation, but it's been in place for a long time. if _, err := os.Stat("/.dockerenv"); err == nil { ret.Set(true) return ret @@ -362,7 +371,7 @@ func inFlyDotIo() bool { } func inReplit() bool { - // https://docs.replit.com/programming-ide/getting-repl-metadata + // https://docs.replit.com/replit-workspace/configuring-repl#environment-variables if os.Getenv("REPL_OWNER") != "" && os.Getenv("REPL_SLUG") != "" { return true } diff --git a/hostinfo/hostinfo_container_linux_test.go b/hostinfo/hostinfo_container_linux_test.go new file mode 100644 index 000000000..594a5f512 --- /dev/null +++ b/hostinfo/hostinfo_container_linux_test.go @@ -0,0 +1,16 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && ts_package_container + +package hostinfo + +import ( + "testing" +) + +func TestInContainer(t *testing.T) { + if got := inContainer(); !got.EqualBool(true) { + t.Errorf("inContainer = %v; want true due to ts_package_container build tag", got) + } +} diff --git a/hostinfo/hostinfo_linux_test.go b/hostinfo/hostinfo_linux_test.go index 4859167a2..c8bd2abbe 100644 --- a/hostinfo/hostinfo_linux_test.go +++ b/hostinfo/hostinfo_linux_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_package_container package hostinfo @@ -34,3 +34,9 @@ remotes/origin/QTSFW_5.0.0` t.Errorf("got %q; want %q", got, want) } } + +func TestInContainer(t *testing.T) { + if got := inContainer(); !got.EqualBool(false) { + t.Errorf("inContainer = %v; want false due to absence of ts_package_container build tag", got) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index df50a8603..92bf2cd95 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -771,7 +771,7 @@ type Hostinfo struct { // "5.10.0-17-amd64". OSVersion string `json:",omitempty"` - Container opt.Bool `json:",omitempty"` // whether the client is running in a container + Container opt.Bool `json:",omitempty"` // best-effort whether the client is running in a container Env string `json:",omitempty"` // a hostinfo.EnvType in string form Distro string `json:",omitempty"` // "debian", "ubuntu", "nixos", ... DistroVersion string `json:",omitempty"` // "20.04", ... From 2aa9125ac438ffa902158b5bedf9791c93117b9b Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Tue, 15 Oct 2024 16:18:04 -0400 Subject: [PATCH 0053/1708] cmd/derpprobe: add /healthz endpoint For a customer that wants to run their own DERP prober, let's add a /healthz endpoint that can be used to monitor derpprobe itself. Updates #6526 Signed-off-by: Andrew Dunham Change-Id: Iba315c999fc0b1a93d8c503c07cc733b4c8d5b6b --- cmd/derpprobe/derpprobe.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 1d0ec32c3..5b7b77091 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -75,6 +75,11 @@ func main() { prober.WithPageLink("Prober metrics", "/debug/varz"), prober.WithProbeLink("Run Probe", "/debug/probe-run?name={{.Name}}"), ), tsweb.HandlerOptions{Logf: log.Printf})) + mux.Handle("/healthz", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write([]byte("ok\n")) + })) log.Printf("Listening on %s", *listen) log.Fatal(http.ListenAndServe(*listen, mux)) } From ff5f233c3a43fa20a61ba4a76a2f3f5a75f8d437 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Oct 2024 21:18:45 -0500 Subject: [PATCH 0054/1708] util/syspolicy: add rsop package that provides access to the resultant policy In this PR we add syspolicy/rsop package that facilitates policy source registration and provides access to the resultant policy merged from all registered sources for a given scope. Updates #12687 Signed-off-by: Nick Khyl --- util/syspolicy/internal/internal.go | 3 + util/syspolicy/rsop/change_callbacks.go | 107 ++ util/syspolicy/rsop/resultant_policy.go | 449 +++++++++ util/syspolicy/rsop/resultant_policy_test.go | 986 +++++++++++++++++++ util/syspolicy/rsop/rsop.go | 174 ++++ util/syspolicy/rsop/store_registration.go | 94 ++ util/syspolicy/setting/policy_scope.go | 3 + util/syspolicy/setting/setting.go | 3 + util/syspolicy/source/test_store.go | 33 +- 9 files changed, 1834 insertions(+), 18 deletions(-) create mode 100644 util/syspolicy/rsop/change_callbacks.go create mode 100644 util/syspolicy/rsop/resultant_policy.go create mode 100644 util/syspolicy/rsop/resultant_policy_test.go create mode 100644 util/syspolicy/rsop/rsop.go create mode 100644 util/syspolicy/rsop/store_registration.go diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go index 4c3e28d39..8f2889625 100644 --- a/util/syspolicy/internal/internal.go +++ b/util/syspolicy/internal/internal.go @@ -13,6 +13,9 @@ import ( "tailscale.com/version" ) +// Init facilitates deferred invocation of initializers. +var Init lazy.DeferredInit + // OSForTesting is the operating system override used for testing. // It follows the same naming convention as [version.OS]. var OSForTesting lazy.SyncValue[string] diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go new file mode 100644 index 000000000..b962f30c0 --- /dev/null +++ b/util/syspolicy/rsop/change_callbacks.go @@ -0,0 +1,107 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package rsop + +import ( + "reflect" + "slices" + "sync" + "time" + + "tailscale.com/util/set" + "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/setting" +) + +// Change represents a change from the Old to the New value of type T. +type Change[T any] struct { + New, Old T +} + +// PolicyChangeCallback is a function called whenever a policy changes. +type PolicyChangeCallback func(*PolicyChange) + +// PolicyChange describes a policy change. +type PolicyChange struct { + snapshots Change[*setting.Snapshot] +} + +// New returns the [setting.Snapshot] after the change. +func (c PolicyChange) New() *setting.Snapshot { + return c.snapshots.New +} + +// Old returns the [setting.Snapshot] before the change. +func (c PolicyChange) Old() *setting.Snapshot { + return c.snapshots.Old +} + +// HasChanged reports whether a policy setting with the specified [setting.Key], has changed. +func (c PolicyChange) HasChanged(key setting.Key) bool { + new, newErr := c.snapshots.New.GetErr(key) + old, oldErr := c.snapshots.Old.GetErr(key) + if newErr != nil && oldErr != nil { + return false + } + if newErr != nil || oldErr != nil { + return true + } + switch newVal := new.(type) { + case bool, uint64, string, setting.Visibility, setting.PreferenceOption, time.Duration: + return newVal != old + case []string: + oldVal, ok := old.([]string) + return !ok || !slices.Equal(newVal, oldVal) + default: + loggerx.Errorf("[unexpected] %q has an unsupported value type: %T", key, newVal) + return !reflect.DeepEqual(new, old) + } +} + +// policyChangeCallbacks are the callbacks to invoke when the effective policy changes. +// It is safe for concurrent use. +type policyChangeCallbacks struct { + mu sync.Mutex + cbs set.HandleSet[PolicyChangeCallback] +} + +// Register adds the specified callback to be invoked whenever the policy changes. +func (c *policyChangeCallbacks) Register(callback PolicyChangeCallback) (unregister func()) { + c.mu.Lock() + handle := c.cbs.Add(callback) + c.mu.Unlock() + return func() { + c.mu.Lock() + delete(c.cbs, handle) + c.mu.Unlock() + } +} + +// Invoke calls the registered callback functions with the specified policy change info. +func (c *policyChangeCallbacks) Invoke(snapshots Change[*setting.Snapshot]) { + var wg sync.WaitGroup + defer wg.Wait() + + c.mu.Lock() + defer c.mu.Unlock() + + wg.Add(len(c.cbs)) + change := &PolicyChange{snapshots: snapshots} + for _, cb := range c.cbs { + go func() { + defer wg.Done() + cb(change) + }() + } +} + +// Close awaits the completion of active callbacks and prevents any further invocations. +func (c *policyChangeCallbacks) Close() { + c.mu.Lock() + defer c.mu.Unlock() + if c.cbs != nil { + clear(c.cbs) + c.cbs = nil + } +} diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go new file mode 100644 index 000000000..019b8f602 --- /dev/null +++ b/util/syspolicy/rsop/resultant_policy.go @@ -0,0 +1,449 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package rsop + +import ( + "errors" + "fmt" + "slices" + "sync" + "sync/atomic" + "time" + + "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/setting" + + "tailscale.com/util/syspolicy/source" +) + +// ErrPolicyClosed is returned by [Policy.Reload], [Policy.addSource], +// [Policy.removeSource] and [Policy.replaceSource] if the policy has been closed. +var ErrPolicyClosed = errors.New("effective policy closed") + +// The minimum and maximum wait times after detecting a policy change +// before reloading the policy. This only affects policy reloads triggered +// by a change in the underlying [source.Store] and does not impact +// synchronous, caller-initiated reloads, such as when [Policy.Reload] is called. +// +// Policy changes occurring within [policyReloadMinDelay] of each other +// will be batched together, resulting in a single policy reload +// no later than [policyReloadMaxDelay] after the first detected change. +// In other words, the effective policy will be reloaded no more often than once +// every 5 seconds, but at most 15 seconds after an underlying [source.Store] +// has issued a policy change callback. +// +// See [Policy.watchReload]. +var ( + policyReloadMinDelay = 5 * time.Second + policyReloadMaxDelay = 15 * time.Second +) + +// Policy provides access to the current effective [setting.Snapshot] for a given +// scope and allows to reload it from the underlying [source.Store] list. It also allows to +// subscribe and receive a callback whenever the effective [setting.Snapshot] is changed. +// +// It is safe for concurrent use. +type Policy struct { + scope setting.PolicyScope + + reloadCh chan reloadRequest // 1-buffered; written to when a policy reload is required + closeCh chan struct{} // closed to signal that the Policy is being closed + doneCh chan struct{} // closed by [Policy.closeInternal] + + // effective is the most recent version of the [setting.Snapshot] + // containing policy settings merged from all applicable sources. + effective atomic.Pointer[setting.Snapshot] + + changeCallbacks policyChangeCallbacks + + mu sync.Mutex + watcherStarted bool // whether [Policy.watchReload] was started + sources source.ReadableSources + closing bool // whether [Policy.Close] was called (even if we're still closing) +} + +// newPolicy returns a new [Policy] for the specified [setting.PolicyScope] +// that tracks changes and merges policy settings read from the specified sources. +func newPolicy(scope setting.PolicyScope, sources ...*source.Source) (_ *Policy, err error) { + readableSources := make(source.ReadableSources, 0, len(sources)) + defer func() { + if err != nil { + readableSources.Close() + } + }() + for _, s := range sources { + reader, err := s.Reader() + if err != nil { + return nil, fmt.Errorf("failed to get a store reader: %w", err) + } + session, err := reader.OpenSession() + if err != nil { + return nil, fmt.Errorf("failed to open a reading session: %w", err) + } + readableSources = append(readableSources, source.ReadableSource{Source: s, ReadingSession: session}) + } + + // Sort policy sources by their precedence from lower to higher. + // For example, {UserPolicy},{ProfilePolicy},{DevicePolicy}. + readableSources.StableSort() + + p := &Policy{ + scope: scope, + sources: readableSources, + reloadCh: make(chan reloadRequest, 1), + closeCh: make(chan struct{}), + doneCh: make(chan struct{}), + } + if _, err := p.reloadNow(false); err != nil { + p.Close() + return nil, err + } + p.startWatchReloadIfNeeded() + return p, nil +} + +// IsValid reports whether p is in a valid state and has not been closed. +// +// Since p's state can be changed by other goroutines at any time, this should +// only be used as an optimization. +func (p *Policy) IsValid() bool { + select { + case <-p.closeCh: + return false + default: + return true + } +} + +// Scope returns the [setting.PolicyScope] that this policy applies to. +func (p *Policy) Scope() setting.PolicyScope { + return p.scope +} + +// Get returns the effective [setting.Snapshot]. +func (p *Policy) Get() *setting.Snapshot { + return p.effective.Load() +} + +// RegisterChangeCallback adds a function to be called whenever the effective +// policy changes. The returned function can be used to unregister the callback. +func (p *Policy) RegisterChangeCallback(callback PolicyChangeCallback) (unregister func()) { + return p.changeCallbacks.Register(callback) +} + +// Reload synchronously re-reads policy settings from the underlying list of policy sources, +// constructing a new merged [setting.Snapshot] even if the policy remains unchanged. +// In most scenarios, there's no need to re-read the policy manually. +// Instead, it is recommended to register a policy change callback, or to use +// the most recent [setting.Snapshot] returned by the [Policy.Get] method. +// +// It must not be called with p.mu held. +func (p *Policy) Reload() (*setting.Snapshot, error) { + return p.reload(true) +} + +// reload is like Reload, but allows to specify whether to re-read policy settings +// from unchanged policy sources. +// +// It must not be called with p.mu held. +func (p *Policy) reload(force bool) (*setting.Snapshot, error) { + if !p.startWatchReloadIfNeeded() { + return p.Get(), nil + } + + respCh := make(chan reloadResponse, 1) + select { + case p.reloadCh <- reloadRequest{force: force, respCh: respCh}: + // continue + case <-p.closeCh: + return nil, ErrPolicyClosed + } + select { + case resp := <-respCh: + return resp.policy, resp.err + case <-p.closeCh: + return nil, ErrPolicyClosed + } +} + +// reloadAsync requests an asynchronous background policy reload. +// The policy will be reloaded no later than in [policyReloadMaxDelay]. +// +// It must not be called with p.mu held. +func (p *Policy) reloadAsync() { + if !p.startWatchReloadIfNeeded() { + return + } + select { + case p.reloadCh <- reloadRequest{}: + // Sent. + default: + // A reload request is already en route. + } +} + +// reloadNow loads and merges policies from all sources, updating the effective policy. +// If the force parameter is true, it forcibly reloads policies +// from the underlying policy store, even if no policy changes were detected. +// +// Except for the initial policy reload during the [Policy] creation, +// this method should only be called from the [Policy.watchReload] goroutine. +func (p *Policy) reloadNow(force bool) (*setting.Snapshot, error) { + new, err := p.readAndMerge(force) + if err != nil { + return nil, err + } + old := p.effective.Swap(new) + // A nil old value indicates the initial policy load rather than a policy change. + // Additionally, we should not invoke the policy change callbacks unless the + // policy items have actually changed. + if old != nil && !old.EqualItems(new) { + snapshots := Change[*setting.Snapshot]{New: new, Old: old} + p.changeCallbacks.Invoke(snapshots) + } + return new, nil +} + +// Done returns a channel that is closed when the [Policy] is closed. +func (p *Policy) Done() <-chan struct{} { + return p.doneCh +} + +// readAndMerge reads and merges policy settings from all applicable sources, +// returning a [setting.Snapshot] with the merged result. +// If the force parameter is true, it re-reads policy settings from each source +// even if no policy change was observed, and returns an error if the read +// operation fails. +func (p *Policy) readAndMerge(force bool) (*setting.Snapshot, error) { + p.mu.Lock() + defer p.mu.Unlock() + // Start with an empty policy in the target scope. + effective := setting.NewSnapshot(nil, setting.SummaryWith(p.scope)) + // Then merge policy settings from all sources. + // Policy sources with the highest precedence (e.g., the device policy) are merged last, + // overriding any conflicting policy settings with lower precedence. + for _, s := range p.sources { + var policy *setting.Snapshot + if force { + var err error + if policy, err = s.ReadSettings(); err != nil { + return nil, err + } + } else { + policy = s.GetSettings() + } + effective = setting.MergeSnapshots(effective, policy) + } + return effective, nil +} + +// addSource adds the specified source to the list of sources used by p, +// and triggers a synchronous policy refresh. It returns an error +// if the source is not a valid source for this effective policy, +// or if the effective policy is being closed, +// or if policy refresh fails with an error. +func (p *Policy) addSource(source *source.Source) error { + return p.applySourcesChange(source, nil) +} + +// removeSource removes the specified source from the list of sources used by p, +// and triggers a synchronous policy refresh. It returns an error if the +// effective policy is being closed, or if policy refresh fails with an error. +func (p *Policy) removeSource(source *source.Source) error { + return p.applySourcesChange(nil, source) +} + +// replaceSource replaces the old source with the new source atomically, +// and triggers a synchronous policy refresh. It returns an error +// if the source is not a valid source for this effective policy, +// or if the effective policy is being closed, +// or if policy refresh fails with an error. +func (p *Policy) replaceSource(old, new *source.Source) error { + return p.applySourcesChange(new, old) +} + +func (p *Policy) applySourcesChange(toAdd, toRemove *source.Source) error { + if toAdd == toRemove { + return nil + } + if toAdd != nil && !toAdd.Scope().Contains(p.scope) { + return errors.New("scope mismatch") + } + + changed, err := func() (changed bool, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if toAdd != nil && !p.sources.Contains(toAdd) { + reader, err := toAdd.Reader() + if err != nil { + return false, fmt.Errorf("failed to get a store reader: %w", err) + } + session, err := reader.OpenSession() + if err != nil { + return false, fmt.Errorf("failed to open a reading session: %w", err) + } + + addAt := p.sources.InsertionIndexOf(toAdd) + toAdd := source.ReadableSource{ + Source: toAdd, + ReadingSession: session, + } + p.sources = slices.Insert(p.sources, addAt, toAdd) + go p.watchPolicyChanges(toAdd) + changed = true + } + if toRemove != nil { + if deleteAt := p.sources.IndexOf(toRemove); deleteAt != -1 { + p.sources.DeleteAt(deleteAt) + changed = true + } + } + return changed, nil + }() + if changed { + _, err = p.reload(false) + } + return err // may be nil or non-nil +} + +func (p *Policy) watchPolicyChanges(s source.ReadableSource) { + for { + select { + case _, ok := <-s.ReadingSession.PolicyChanged(): + if !ok { + p.mu.Lock() + abruptlyClosed := slices.Contains(p.sources, s) + p.mu.Unlock() + if abruptlyClosed { + // The underlying [source.Source] was closed abruptly without + // being properly removed or replaced by another policy source. + // We can't keep this [Policy] up to date, so we should close it. + p.Close() + } + return + } + // The PolicyChanged channel was signaled. + // Request an asynchronous policy reload. + p.reloadAsync() + case <-p.closeCh: + // The [Policy] is being closed. + return + } + } +} + +// startWatchReloadIfNeeded starts [Policy.watchReload] in a new goroutine +// if the list of policy sources is not empty, it hasn't been started yet, +// and the [Policy] is not being closed. +// It reports whether [Policy.watchReload] has ever been started. +// +// It must not be called with p.mu held. +func (p *Policy) startWatchReloadIfNeeded() bool { + p.mu.Lock() + defer p.mu.Unlock() + if len(p.sources) != 0 && !p.watcherStarted && !p.closing { + go p.watchReload() + for i := range p.sources { + go p.watchPolicyChanges(p.sources[i]) + } + p.watcherStarted = true + } + return p.watcherStarted +} + +// reloadRequest describes a policy reload request. +type reloadRequest struct { + // force policy reload regardless of whether a policy change was detected. + force bool + // respCh is an optional channel. If non-nil, it makes the reload request + // synchronous and receives the result. + respCh chan<- reloadResponse +} + +// reloadResponse is a result of a synchronous policy reload. +type reloadResponse struct { + policy *setting.Snapshot + err error +} + +// watchReload processes incoming synchronous and asynchronous policy reload requests. +// +// Synchronous requests (with a non-nil respCh) are served immediately. +// +// Asynchronous requests are debounced and throttled: they are executed at least +// [policyReloadMinDelay] after the last request, but no later than [policyReloadMaxDelay] +// after the first request in a batch. +func (p *Policy) watchReload() { + defer p.closeInternal() + + force := false // whether a forced refresh was requested + var delayCh, timeoutCh <-chan time.Time + reload := func(respCh chan<- reloadResponse) { + delayCh, timeoutCh = nil, nil + policy, err := p.reloadNow(force) + if err != nil { + loggerx.Errorf("%v policy reload failed: %v\n", p.scope, err) + } + if respCh != nil { + respCh <- reloadResponse{policy: policy, err: err} + } + force = false + } + +loop: + for { + select { + case req := <-p.reloadCh: + if req.force { + force = true + } + if req.respCh != nil { + reload(req.respCh) + continue + } + if delayCh == nil { + timeoutCh = time.After(policyReloadMinDelay) + } + delayCh = time.After(policyReloadMaxDelay) + case <-delayCh: + reload(nil) + case <-timeoutCh: + reload(nil) + case <-p.closeCh: + break loop + } + } +} + +func (p *Policy) closeInternal() { + p.mu.Lock() + defer p.mu.Unlock() + p.sources.Close() + p.changeCallbacks.Close() + close(p.doneCh) + deletePolicy(p) +} + +// Close initiates the closing of the policy. +// The [Policy.Done] channel is closed to signal that the operation has been completed. +func (p *Policy) Close() { + p.mu.Lock() + alreadyClosing := p.closing + watcherStarted := p.watcherStarted + p.closing = true + p.mu.Unlock() + + if alreadyClosing { + return + } + + close(p.closeCh) + if !watcherStarted { + // Normally, closing p.closeCh signals [Policy.watchReload] to exit, + // and [Policy.closeInternal] performs the actual closing when + // [Policy.watchReload] returns. However, if the watcher was never + // started, we need to call [Policy.closeInternal] manually. + go p.closeInternal() + } +} diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go new file mode 100644 index 000000000..b2408c7f7 --- /dev/null +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -0,0 +1,986 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package rsop + +import ( + "errors" + "slices" + "sort" + "strconv" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tstest" + "tailscale.com/util/syspolicy/setting" + + "tailscale.com/util/syspolicy/source" +) + +func TestGetEffectivePolicyNoSource(t *testing.T) { + tests := []struct { + name string + scope setting.PolicyScope + }{ + { + name: "DevicePolicy", + scope: setting.DeviceScope, + }, + { + name: "CurrentProfilePolicy", + scope: setting.CurrentProfileScope, + }, + { + name: "CurrentUserPolicy", + scope: setting.CurrentUserScope, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var policy *Policy + t.Cleanup(func() { + if policy != nil { + policy.Close() + <-policy.Done() + } + }) + + // Make sure we don't create any goroutines. + // We intentionally call ResourceCheck after t.Cleanup, so that when the test exits, + // the resource check runs before the test cleanup closes the policy. + // This helps to report any unexpectedly created goroutines. + // The goal is to ensure that using the syspolicy package, and particularly + // the rsop sub-package, is not wasteful and does not create unnecessary goroutines + // on platforms without registered policy sources. + tstest.ResourceCheck(t) + + policy, err := PolicyFor(tt.scope) + if err != nil { + t.Fatalf("Failed to get effective policy for %v: %v", tt.scope, err) + } + + if got := policy.Get(); got.Len() != 0 { + t.Errorf("Snapshot: got %v; want empty", got) + } + + if got, err := policy.Reload(); err != nil { + t.Errorf("Reload failed: %v", err) + } else if got.Len() != 0 { + t.Errorf("Snapshot: got %v; want empty", got) + } + }) + } +} + +func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { + type sourceConfig struct { + name string + scope setting.PolicyScope + settingKey setting.Key + settingValue string + wantEffective bool + } + tests := []struct { + name string + scope setting.PolicyScope + initialSources []sourceConfig + additionalSources []sourceConfig + wantSnapshot *setting.Snapshot + }{ + { + name: "DevicePolicy/NoSources", + scope: setting.DeviceScope, + wantSnapshot: setting.NewSnapshot(nil, setting.DeviceScope), + }, + { + name: "UserScope/NoSources", + scope: setting.CurrentUserScope, + wantSnapshot: setting.NewSnapshot(nil, setting.CurrentUserScope), + }, + { + name: "DevicePolicy/OneInitialSource", + scope: setting.DeviceScope, + initialSources: []sourceConfig{ + { + name: "TestSourceA", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueA", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), + }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), + }, + { + name: "DevicePolicy/OneAdditionalSource", + scope: setting.DeviceScope, + additionalSources: []sourceConfig{ + { + name: "TestSourceA", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueA", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), + }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), + }, + { + name: "DevicePolicy/ManyInitialSources/NoConflicts", + scope: setting.DeviceScope, + initialSources: []sourceConfig{ + { + name: "TestSourceA", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueA", + wantEffective: true, + }, + { + name: "TestSourceB", + scope: setting.DeviceScope, + settingKey: "TestKeyB", + settingValue: "TestValueB", + wantEffective: true, + }, + { + name: "TestSourceC", + scope: setting.DeviceScope, + settingKey: "TestKeyC", + settingValue: "TestValueC", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), + "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), + "TestKeyC": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), + }, setting.DeviceScope), + }, + { + name: "DevicePolicy/ManyInitialSources/Conflicts", + scope: setting.DeviceScope, + initialSources: []sourceConfig{ + { + name: "TestSourceA", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueA", + wantEffective: true, + }, + { + name: "TestSourceB", + scope: setting.DeviceScope, + settingKey: "TestKeyB", + settingValue: "TestValueB", + wantEffective: true, + }, + { + name: "TestSourceC", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueC", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), + "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), + }, setting.DeviceScope), + }, + { + name: "DevicePolicy/MixedSources/Conflicts", + scope: setting.DeviceScope, + initialSources: []sourceConfig{ + { + name: "TestSourceA", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueA", + wantEffective: true, + }, + { + name: "TestSourceB", + scope: setting.DeviceScope, + settingKey: "TestKeyB", + settingValue: "TestValueB", + wantEffective: true, + }, + { + name: "TestSourceC", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueC", + wantEffective: true, + }, + }, + additionalSources: []sourceConfig{ + { + name: "TestSourceD", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueD", + wantEffective: true, + }, + { + name: "TestSourceE", + scope: setting.DeviceScope, + settingKey: "TestKeyC", + settingValue: "TestValueE", + wantEffective: true, + }, + { + name: "TestSourceF", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "TestValueF", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("TestValueF", nil, setting.NewNamedOrigin("TestSourceF", setting.DeviceScope)), + "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), + "TestKeyC": setting.RawItemWith("TestValueE", nil, setting.NewNamedOrigin("TestSourceE", setting.DeviceScope)), + }, setting.DeviceScope), + }, + { + name: "UserScope/Init-DeviceSource", + scope: setting.CurrentUserScope, + initialSources: []sourceConfig{ + { + name: "TestSourceDevice", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "DeviceValue", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), + }, setting.CurrentUserScope, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), + }, + { + name: "UserScope/Init-DeviceSource/Add-UserSource", + scope: setting.CurrentUserScope, + initialSources: []sourceConfig{ + { + name: "TestSourceDevice", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "DeviceValue", + wantEffective: true, + }, + }, + additionalSources: []sourceConfig{ + { + name: "TestSourceUser", + scope: setting.CurrentUserScope, + settingKey: "TestKeyB", + settingValue: "UserValue", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), + "TestKeyB": setting.RawItemWith("UserValue", nil, setting.NewNamedOrigin("TestSourceUser", setting.CurrentUserScope)), + }, setting.CurrentUserScope), + }, + { + name: "UserScope/Init-DeviceSource/Add-UserSource-and-ProfileSource", + scope: setting.CurrentUserScope, + initialSources: []sourceConfig{ + { + name: "TestSourceDevice", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "DeviceValue", + wantEffective: true, + }, + }, + additionalSources: []sourceConfig{ + { + name: "TestSourceProfile", + scope: setting.CurrentProfileScope, + settingKey: "TestKeyB", + settingValue: "ProfileValue", + wantEffective: true, + }, + { + name: "TestSourceUser", + scope: setting.CurrentUserScope, + settingKey: "TestKeyB", + settingValue: "UserValue", + wantEffective: true, + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), + "TestKeyB": setting.RawItemWith("ProfileValue", nil, setting.NewNamedOrigin("TestSourceProfile", setting.CurrentProfileScope)), + }, setting.CurrentUserScope), + }, + { + name: "DevicePolicy/User-Source-does-not-apply", + scope: setting.DeviceScope, + initialSources: []sourceConfig{ + { + name: "TestSourceDevice", + scope: setting.DeviceScope, + settingKey: "TestKeyA", + settingValue: "DeviceValue", + wantEffective: true, + }, + }, + additionalSources: []sourceConfig{ + { + name: "TestSourceUser", + scope: setting.CurrentUserScope, + settingKey: "TestKeyA", + settingValue: "UserValue", + wantEffective: false, // Registering a user source should have no impact on the device policy. + }, + }, + wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), + }, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Register all settings that we use in this test. + var definitions []*setting.Definition + for _, source := range slices.Concat(tt.initialSources, tt.additionalSources) { + definitions = append(definitions, setting.NewDefinition(source.settingKey, tt.scope.Kind(), setting.StringValue)) + } + if err := setting.SetDefinitionsForTest(t, definitions...); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + // Add the initial policy sources. + var wantSources []*source.Source + for _, s := range tt.initialSources { + store := source.NewTestStoreOf(t, source.TestSettingOf(s.settingKey, s.settingValue)) + source := source.NewSource(s.name, s.scope, store) + if err := registerSource(source); err != nil { + t.Fatalf("Failed to register policy source: %v", source) + } + if s.wantEffective { + wantSources = append(wantSources, source) + } + t.Cleanup(func() { unregisterSource(source) }) + } + + // Retrieve the effective policy. + policy, err := policyForTest(t, tt.scope) + if err != nil { + t.Fatalf("Failed to get effective policy for %v: %v", tt.scope, err) + } + + checkPolicySources(t, policy, wantSources) + + // Add additional setting sources. + for _, s := range tt.additionalSources { + store := source.NewTestStoreOf(t, source.TestSettingOf(s.settingKey, s.settingValue)) + source := source.NewSource(s.name, s.scope, store) + if err := registerSource(source); err != nil { + t.Fatalf("Failed to register additional policy source: %v", source) + } + if s.wantEffective { + wantSources = append(wantSources, source) + } + t.Cleanup(func() { unregisterSource(source) }) + } + + checkPolicySources(t, policy, wantSources) + + // Verify the final effective settings snapshots. + if got := policy.Get(); !got.Equal(tt.wantSnapshot) { + t.Errorf("Snapshot: got %v; want %v", got, tt.wantSnapshot) + } + }) + } +} + +func TestPolicyFor(t *testing.T) { + tests := []struct { + name string + scopeA, scopeB setting.PolicyScope + closePolicy bool // indicates whether to close policyA before retrieving policyB + wantSame bool // specifies whether policyA and policyB should reference the same [Policy] instance + }{ + { + name: "Device/Device", + scopeA: setting.DeviceScope, + scopeB: setting.DeviceScope, + wantSame: true, + }, + { + name: "Device/CurrentProfile", + scopeA: setting.DeviceScope, + scopeB: setting.CurrentProfileScope, + wantSame: false, + }, + { + name: "Device/CurrentUser", + scopeA: setting.DeviceScope, + scopeB: setting.CurrentUserScope, + wantSame: false, + }, + { + name: "CurrentProfile/CurrentProfile", + scopeA: setting.CurrentProfileScope, + scopeB: setting.CurrentProfileScope, + wantSame: true, + }, + { + name: "CurrentProfile/CurrentUser", + scopeA: setting.CurrentProfileScope, + scopeB: setting.CurrentUserScope, + wantSame: false, + }, + { + name: "CurrentUser/CurrentUser", + scopeA: setting.CurrentUserScope, + scopeB: setting.CurrentUserScope, + wantSame: true, + }, + { + name: "UserA/UserA", + scopeA: setting.UserScopeOf("UserA"), + scopeB: setting.UserScopeOf("UserA"), + wantSame: true, + }, + { + name: "UserA/UserB", + scopeA: setting.UserScopeOf("UserA"), + scopeB: setting.UserScopeOf("UserB"), + wantSame: false, + }, + { + name: "New-after-close", + scopeA: setting.DeviceScope, + scopeB: setting.DeviceScope, + closePolicy: true, + wantSame: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + policyA, err := policyForTest(t, tt.scopeA) + if err != nil { + t.Fatalf("Failed to get effective policy for %v: %v", tt.scopeA, err) + } + + if tt.closePolicy { + policyA.Close() + } + + policyB, err := policyForTest(t, tt.scopeB) + if err != nil { + t.Fatalf("Failed to get effective policy for %v: %v", tt.scopeB, err) + } + + if gotSame := policyA == policyB; gotSame != tt.wantSame { + t.Fatalf("Got same: %v; want same %v", gotSame, tt.wantSame) + } + }) + } +} + +func TestPolicyChangeHasChanged(t *testing.T) { + tests := []struct { + name string + old, new map[setting.Key]setting.RawItem + wantChanged []setting.Key + wantUnchanged []setting.Key + }{ + { + name: "String-Settings", + old: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf("Old"), + "UnchangedSetting": setting.RawItemOf("Value"), + }, + new: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf("New"), + "UnchangedSetting": setting.RawItemOf("Value"), + }, + wantChanged: []setting.Key{"ChangedSetting"}, + wantUnchanged: []setting.Key{"UnchangedSetting"}, + }, + { + name: "UInt64-Settings", + old: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf(uint64(0)), + "UnchangedSetting": setting.RawItemOf(uint64(42)), + }, + new: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf(uint64(1)), + "UnchangedSetting": setting.RawItemOf(uint64(42)), + }, + wantChanged: []setting.Key{"ChangedSetting"}, + wantUnchanged: []setting.Key{"UnchangedSetting"}, + }, + { + name: "StringSlice-Settings", + old: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf([]string{"Chicago"}), + "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), + }, + new: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf([]string{"New York"}), + "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), + }, + wantChanged: []setting.Key{"ChangedSetting"}, + wantUnchanged: []setting.Key{"UnchangedSetting"}, + }, + { + name: "Int8-Settings", // We don't have actual int8 settings, but this should still work. + old: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf(int8(0)), + "UnchangedSetting": setting.RawItemOf(int8(42)), + }, + new: map[setting.Key]setting.RawItem{ + "ChangedSetting": setting.RawItemOf(int8(1)), + "UnchangedSetting": setting.RawItemOf(int8(42)), + }, + wantChanged: []setting.Key{"ChangedSetting"}, + wantUnchanged: []setting.Key{"UnchangedSetting"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + old := setting.NewSnapshot(tt.old) + new := setting.NewSnapshot(tt.new) + change := PolicyChange{Change[*setting.Snapshot]{old, new}} + for _, wantChanged := range tt.wantChanged { + if !change.HasChanged(wantChanged) { + t.Errorf("%q changed: got false; want true", wantChanged) + } + } + for _, wantUnchanged := range tt.wantUnchanged { + if change.HasChanged(wantUnchanged) { + t.Errorf("%q unchanged: got true; want false", wantUnchanged) + } + } + }) + } +} + +func TestChangePolicySetting(t *testing.T) { + setForTest(t, &policyReloadMinDelay, 100*time.Millisecond) + setForTest(t, &policyReloadMaxDelay, 500*time.Millisecond) + + // Register policy settings used in this test. + settingA := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue) + settingB := setting.NewDefinition("TestSettingB", setting.DeviceSetting, setting.StringValue) + if err := setting.SetDefinitionsForTest(t, settingA, settingB); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + // Register a test policy store and create a effective policy that reads the policy settings from it. + store := source.NewTestStoreOf[string](t) + if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil { + t.Fatalf("Failed to register policy store: %v", err) + } + policy, err := policyForTest(t, setting.DeviceScope) + if err != nil { + t.Fatalf("Failed to get effective policy: %v", err) + } + + // The policy setting is not configured yet. + if _, ok := policy.Get().GetSetting(settingA.Key()); ok { + t.Fatalf("Policy setting %q unexpectedly exists", settingA.Key()) + } + + // Subscribe to the policy change callback... + policyChanged := make(chan *PolicyChange) + unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + t.Cleanup(unregister) + + // ...make the change, and measure the time between initiating the change + // and receiving the callback. + start := time.Now() + const wantValueA = "TestValueA" + store.SetStrings(source.TestSettingOf(settingA.Key(), wantValueA)) + change := <-policyChanged + gotDelay := time.Since(start) + + // Ensure there is at least a [policyReloadMinDelay] delay between + // a change and the policy reload along with the callback invocation. + // This prevents reloading policy settings too frequently + // when multiple settings change within a short period of time. + if gotDelay < policyReloadMinDelay { + t.Errorf("Delay: got %v; want >= %v", gotDelay, policyReloadMinDelay) + } + + // Verify that the [PolicyChange] passed to the policy change callback + // contains the correct information regarding the policy setting changes. + if !change.HasChanged(settingA.Key()) { + t.Errorf("Policy setting %q has not changed", settingA.Key()) + } + if change.HasChanged(settingB.Key()) { + t.Errorf("Policy setting %q was unexpectedly changed", settingB.Key()) + } + if _, ok := change.Old().GetSetting(settingA.Key()); ok { + t.Fatalf("Policy setting %q unexpectedly exists", settingA.Key()) + } + if gotValue := change.New().Get(settingA.Key()); gotValue != wantValueA { + t.Errorf("Policy setting %q: got %q; want %q", settingA.Key(), gotValue, wantValueA) + } + + // And also verify that the current (most recent) [setting.Snapshot] + // includes the change we just made. + if gotValue := policy.Get().Get(settingA.Key()); gotValue != wantValueA { + t.Errorf("Policy setting %q: got %q; want %q", settingA.Key(), gotValue, wantValueA) + } + + // Now, let's change another policy setting value N times. + const N = 10 + wantValueB := strconv.Itoa(N) + start = time.Now() + for i := range N { + store.SetStrings(source.TestSettingOf(settingB.Key(), strconv.Itoa(i+1))) + } + + // The callback should be invoked only once, even though the policy setting + // has changed N times. + change = <-policyChanged + gotDelay = time.Since(start) + gotCallbacks := 1 +drain: + for { + select { + case <-policyChanged: + gotCallbacks++ + case <-time.After(policyReloadMaxDelay): + break drain + } + } + if wantCallbacks := 1; gotCallbacks > wantCallbacks { + t.Errorf("Callbacks: got %d; want %d", gotCallbacks, wantCallbacks) + } + + // Additionally, the policy change callback should be received no sooner + // than [policyReloadMinDelay] and no later than [policyReloadMaxDelay]. + if gotDelay < policyReloadMinDelay || gotDelay > policyReloadMaxDelay { + t.Errorf("Delay: got %v; want >= %v && <= %v", gotDelay, policyReloadMinDelay, policyReloadMaxDelay) + } + + // Verify that the [PolicyChange] received via the callback + // contains the final policy setting value. + if !change.HasChanged(settingB.Key()) { + t.Errorf("Policy setting %q has not changed", settingB.Key()) + } + if change.HasChanged(settingA.Key()) { + t.Errorf("Policy setting %q was unexpectedly changed", settingA.Key()) + } + if _, ok := change.Old().GetSetting(settingB.Key()); ok { + t.Fatalf("Policy setting %q unexpectedly exists", settingB.Key()) + } + if gotValue := change.New().Get(settingB.Key()); gotValue != wantValueB { + t.Errorf("Policy setting %q: got %q; want %q", settingB.Key(), gotValue, wantValueB) + } + + // Lastly, if a policy store issues a change notification, but the effective policy + // remains unchanged, the [Policy] should ignore it without invoking the change callbacks. + store.NotifyPolicyChanged() + select { + case <-policyChanged: + t.Fatal("Unexpected policy changed notification") + case <-time.After(policyReloadMaxDelay): + } +} + +func TestClosePolicySource(t *testing.T) { + testSetting := setting.NewDefinition("TestSetting", setting.DeviceSetting, setting.StringValue) + if err := setting.SetDefinitionsForTest(t, testSetting); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + wantSettingValue := "TestValue" + store := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), wantSettingValue)) + if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil { + t.Fatalf("Failed to register policy store: %v", err) + } + policy, err := policyForTest(t, setting.DeviceScope) + if err != nil { + t.Fatalf("Failed to get effective policy: %v", err) + } + + initialSnapshot, err := policy.Reload() + if err != nil { + t.Fatalf("Failed to reload policy: %v", err) + } + if gotSettingValue, err := initialSnapshot.GetErr(testSetting.Key()); err != nil { + t.Fatalf("Failed to get %q setting value: %v", testSetting.Key(), err) + } else if gotSettingValue != wantSettingValue { + t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), gotSettingValue, wantSettingValue) + } + + store.Close() + + // Closing a policy source abruptly without removing it first should invalidate and close the policy. + <-policy.Done() + if policy.IsValid() { + t.Fatal("The policy was not properly closed") + } + + // The resulting policy snapshot should remain valid and unchanged. + finalSnapshot := policy.Get() + if !finalSnapshot.Equal(initialSnapshot) { + t.Fatal("Policy snapshot has changed") + } + if gotSettingValue, err := finalSnapshot.GetErr(testSetting.Key()); err != nil { + t.Fatalf("Failed to get final %q setting value: %v", testSetting.Key(), err) + } else if gotSettingValue != wantSettingValue { + t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), gotSettingValue, wantSettingValue) + } + + // However, any further requests to reload the policy should fail. + if _, err := policy.Reload(); err == nil || !errors.Is(err, ErrPolicyClosed) { + t.Fatalf("Reload: gotErr: %v; wantErr: %v", err, ErrPolicyClosed) + } +} + +func TestRemovePolicySource(t *testing.T) { + // Register policy settings used in this test. + settingA := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue) + settingB := setting.NewDefinition("TestSettingB", setting.DeviceSetting, setting.StringValue) + if err := setting.SetDefinitionsForTest(t, settingA, settingB); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + // Register two policy stores. + storeA := source.NewTestStoreOf(t, source.TestSettingOf(settingA.Key(), "A")) + storeRegA, err := RegisterStoreForTest(t, "TestSourceA", setting.DeviceScope, storeA) + if err != nil { + t.Fatalf("Failed to register policy store A: %v", err) + } + storeB := source.NewTestStoreOf(t, source.TestSettingOf(settingB.Key(), "B")) + storeRegB, err := RegisterStoreForTest(t, "TestSourceB", setting.DeviceScope, storeB) + if err != nil { + t.Fatalf("Failed to register policy store A: %v", err) + } + + // Create a effective [Policy] that reads policy settings from the two stores. + policy, err := policyForTest(t, setting.DeviceScope) + if err != nil { + t.Fatalf("Failed to get effective policy: %v", err) + } + + // Verify that the [Policy] uses both stores and includes policy settings from each. + if gotSources, wantSources := len(policy.sources), 2; gotSources != wantSources { + t.Fatalf("Policy Sources: got %v; want %v", gotSources, wantSources) + } + if got, want := policy.Get().Get(settingA.Key()), "A"; got != want { + t.Fatalf("Setting %q: got %q; want %q", settingA.Key(), got, want) + } + if got, want := policy.Get().Get(settingB.Key()), "B"; got != want { + t.Fatalf("Setting %q: got %q; want %q", settingB.Key(), got, want) + } + + // Unregister Store A and verify that the effective policy remains valid. + // It should no longer use the removed store or include any policy settings from it. + if err := storeRegA.Unregister(); err != nil { + t.Fatalf("Failed to unregister Store A: %v", err) + } + if !policy.IsValid() { + t.Fatalf("Policy was unexpectedly closed") + } + if gotSources, wantSources := len(policy.sources), 1; gotSources != wantSources { + t.Fatalf("Policy Sources: got %v; want %v", gotSources, wantSources) + } + if got, want := policy.Get().Get(settingA.Key()), any(nil); got != want { + t.Fatalf("Setting %q: got %q; want %q", settingA.Key(), got, want) + } + if got, want := policy.Get().Get(settingB.Key()), "B"; got != want { + t.Fatalf("Setting %q: got %q; want %q", settingB.Key(), got, want) + } + + // Unregister Store B and verify that the effective policy is still valid. + // However, it should be empty since there are no associated policy sources. + if err := storeRegB.Unregister(); err != nil { + t.Fatalf("Failed to unregister Store B: %v", err) + } + if !policy.IsValid() { + t.Fatalf("Policy was unexpectedly closed") + } + if gotSources, wantSources := len(policy.sources), 0; gotSources != wantSources { + t.Fatalf("Policy Sources: got %v; want %v", gotSources, wantSources) + } + if got := policy.Get(); got.Len() != 0 { + t.Fatalf("Settings: got %v; want {Empty}", got) + } +} + +func TestReplacePolicySource(t *testing.T) { + setForTest(t, &policyReloadMinDelay, 100*time.Millisecond) + setForTest(t, &policyReloadMaxDelay, 500*time.Millisecond) + + // Register policy settings used in this test. + testSetting := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue) + if err := setting.SetDefinitionsForTest(t, testSetting); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + // Create two policy stores. + initialStore := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), "InitialValue")) + newStore := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), "NewValue")) + unchangedStore := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), "NewValue")) + + // Register the initial store and create a effective [Policy] that reads policy settings from it. + reg, err := RegisterStoreForTest(t, "TestStore", setting.DeviceScope, initialStore) + if err != nil { + t.Fatalf("Failed to register the initial store: %v", err) + } + policy, err := policyForTest(t, setting.DeviceScope) + if err != nil { + t.Fatalf("Failed to get effective policy: %v", err) + } + + // Verify that the test setting has its initial value. + if got, want := policy.Get().Get(testSetting.Key()), "InitialValue"; got != want { + t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), got, want) + } + + // Subscribe to the policy change callback. + policyChanged := make(chan *PolicyChange, 1) + unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + t.Cleanup(unregister) + + // Now, let's replace the initial store with the new store. + reg, err = reg.ReplaceStore(newStore) + if err != nil { + t.Fatalf("Failed to replace the policy store: %v", err) + } + t.Cleanup(func() { reg.Unregister() }) + + // We should receive a policy change notification as the setting value has changed. + <-policyChanged + + // Verify that the test setting has the new value. + if got, want := policy.Get().Get(testSetting.Key()), "NewValue"; got != want { + t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), got, want) + } + + // Replacing a policy store with an identical one containing the same + // values for the same settings should not be considered a policy change. + reg, err = reg.ReplaceStore(unchangedStore) + if err != nil { + t.Fatalf("Failed to replace the policy store: %v", err) + } + t.Cleanup(func() { reg.Unregister() }) + + select { + case <-policyChanged: + t.Fatal("Unexpected policy changed notification") + default: + <-time.After(policyReloadMaxDelay) + } +} + +func TestAddClosedPolicySource(t *testing.T) { + store := source.NewTestStoreOf[string](t) + if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil { + t.Fatalf("Failed to register policy store: %v", err) + } + store.Close() + + _, err := policyForTest(t, setting.DeviceScope) + if err == nil || !errors.Is(err, source.ErrStoreClosed) { + t.Fatalf("got: %v; want: %v", err, source.ErrStoreClosed) + } +} + +func TestClosePolicyMoreThanOnce(t *testing.T) { + tests := []struct { + name string + numSources int + }{ + { + name: "NoSources", + numSources: 0, + }, + { + name: "OneSource", + numSources: 1, + }, + { + name: "ManySources", + numSources: 10, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for i := range tt.numSources { + store := source.NewTestStoreOf[string](t) + if _, err := RegisterStoreForTest(t, "TestSource #"+strconv.Itoa(i), setting.DeviceScope, store); err != nil { + t.Fatalf("Failed to register policy store: %v", err) + } + } + + policy, err := policyForTest(t, setting.DeviceScope) + if err != nil { + t.Fatalf("failed to get effective policy: %v", err) + } + + const N = 10000 + var wg sync.WaitGroup + for range N { + wg.Add(1) + go func() { + wg.Done() + policy.Close() + <-policy.Done() + }() + } + wg.Wait() + }) + } +} + +func checkPolicySources(tb testing.TB, gotPolicy *Policy, wantSources []*source.Source) { + tb.Helper() + sort.SliceStable(wantSources, func(i, j int) bool { + return wantSources[i].Compare(wantSources[j]) < 0 + }) + gotSources := make([]*source.Source, len(gotPolicy.sources)) + for i := range gotPolicy.sources { + gotSources[i] = gotPolicy.sources[i].Source + } + type sourceSummary struct{ Name, Scope string } + toSourceSummary := cmp.Transformer("source", func(s *source.Source) sourceSummary { return sourceSummary{s.Name(), s.Scope().String()} }) + if diff := cmp.Diff(wantSources, gotSources, toSourceSummary, cmpopts.EquateEmpty()); diff != "" { + tb.Errorf("Policy Sources mismatch: %v", diff) + } +} + +// policyForTest is like [PolicyFor], but it deletes the policy +// when tb and all its subtests complete. +func policyForTest(tb testing.TB, target setting.PolicyScope) (*Policy, error) { + tb.Helper() + + policy, err := PolicyFor(target) + if err != nil { + return nil, err + } + tb.Cleanup(func() { + policy.Close() + <-policy.Done() + deletePolicy(policy) + }) + return policy, nil +} + +func setForTest[T any](tb testing.TB, target *T, newValue T) { + oldValue := *target + tb.Cleanup(func() { *target = oldValue }) + *target = newValue +} diff --git a/util/syspolicy/rsop/rsop.go b/util/syspolicy/rsop/rsop.go new file mode 100644 index 000000000..429b9b101 --- /dev/null +++ b/util/syspolicy/rsop/rsop.go @@ -0,0 +1,174 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package rsop facilitates [source.Store] registration via [RegisterStore] +// and provides access to the effective policy merged from all registered sources +// via [PolicyFor]. +package rsop + +import ( + "errors" + "fmt" + "slices" + "sync" + + "tailscale.com/syncs" + "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" +) + +var ( + policyMu sync.Mutex // protects [policySources] and [effectivePolicies] + policySources []*source.Source // all registered policy sources + effectivePolicies []*Policy // all active (non-closed) effective policies returned by [PolicyFor] + + // effectivePolicyLRU is an LRU cache of [Policy] by [setting.Scope]. + // Although there could be multiple [setting.PolicyScope] instances with the same [setting.Scope], + // such as two user scopes for different users, there is only one [setting.DeviceScope], only one + // [setting.CurrentProfileScope], and in most cases, only one active user scope. + // Therefore, cache misses that require falling back to [effectivePolicies] are extremely rare. + // It's a fixed-size array of atomic values and can be accessed without [policyMu] held. + effectivePolicyLRU [setting.NumScopes]syncs.AtomicValue[*Policy] +) + +// PolicyFor returns the [Policy] for the specified scope, +// creating it from the registered [source.Store]s if it doesn't already exist. +func PolicyFor(scope setting.PolicyScope) (*Policy, error) { + if err := internal.Init.Do(); err != nil { + return nil, err + } + policy := effectivePolicyLRU[scope.Kind()].Load() + if policy != nil && policy.Scope() == scope && policy.IsValid() { + return policy, nil + } + return policyForSlow(scope) +} + +func policyForSlow(scope setting.PolicyScope) (policy *Policy, err error) { + defer func() { + // Always update the LRU cache on exit if we found (or created) + // a policy for the specified scope. + if policy != nil { + effectivePolicyLRU[scope.Kind()].Store(policy) + } + }() + + policyMu.Lock() + defer policyMu.Unlock() + if policy, ok := findPolicyByScopeLocked(scope); ok { + return policy, nil + } + + // If there is no existing effective policy for the specified scope, + // we need to create one using the policy sources registered for that scope. + sources := slicesx.Filter(nil, policySources, func(source *source.Source) bool { + return source.Scope().Contains(scope) + }) + policy, err = newPolicy(scope, sources...) + if err != nil { + return nil, err + } + effectivePolicies = append(effectivePolicies, policy) + return policy, nil +} + +// findPolicyByScopeLocked returns a policy with the specified scope and true if +// one exists in the [effectivePolicies] list, otherwise it returns nil, false. +// [policyMu] must be held. +func findPolicyByScopeLocked(target setting.PolicyScope) (policy *Policy, ok bool) { + for _, policy := range effectivePolicies { + if policy.Scope() == target && policy.IsValid() { + return policy, true + } + } + return nil, false +} + +// deletePolicy deletes the specified effective policy from [effectivePolicies] +// and [effectivePolicyLRU]. +func deletePolicy(policy *Policy) { + policyMu.Lock() + defer policyMu.Unlock() + if i := slices.Index(effectivePolicies, policy); i != -1 { + effectivePolicies = slices.Delete(effectivePolicies, i, i+1) + } + effectivePolicyLRU[policy.Scope().Kind()].CompareAndSwap(policy, nil) +} + +// registerSource registers the specified [source.Source] to be used by the package. +// It updates existing [Policy]s returned by [PolicyFor] to use this source if +// they are within the source's [setting.PolicyScope]. +func registerSource(source *source.Source) error { + policyMu.Lock() + defer policyMu.Unlock() + if slices.Contains(policySources, source) { + // already registered + return nil + } + policySources = append(policySources, source) + return forEachEffectivePolicyLocked(func(policy *Policy) error { + if !source.Scope().Contains(policy.Scope()) { + // Policy settings in the specified source do not apply + // to the scope of this effective policy. + // For example, a user policy source is being registered + // while the effective policy is for the device (or another user). + return nil + } + return policy.addSource(source) + }) +} + +// replaceSource is like [unregisterSource](old) followed by [registerSource](new), +// but performed atomically: the effective policy will contain settings +// either from the old source or the new source, never both and never neither. +func replaceSource(old, new *source.Source) error { + policyMu.Lock() + defer policyMu.Unlock() + oldIndex := slices.Index(policySources, old) + if oldIndex == -1 { + return fmt.Errorf("the source is not registered: %v", old) + } + policySources[oldIndex] = new + return forEachEffectivePolicyLocked(func(policy *Policy) error { + if !old.Scope().Contains(policy.Scope()) || !new.Scope().Contains(policy.Scope()) { + return nil + } + return policy.replaceSource(old, new) + }) +} + +// unregisterSource unregisters the specified [source.Source], +// so that it won't be used by any new or existing [Policy]. +func unregisterSource(source *source.Source) error { + policyMu.Lock() + defer policyMu.Unlock() + index := slices.Index(policySources, source) + if index == -1 { + return nil + } + policySources = slices.Delete(policySources, index, index+1) + return forEachEffectivePolicyLocked(func(policy *Policy) error { + if !source.Scope().Contains(policy.Scope()) { + return nil + } + return policy.removeSource(source) + }) +} + +// forEachEffectivePolicyLocked calls fn for every non-closed [Policy] in [effectivePolicies]. +// It accumulates the returned errors and returns an error that wraps all errors returned by fn. +// The [policyMu] mutex must be held while this function is executed. +func forEachEffectivePolicyLocked(fn func(p *Policy) error) error { + var errs []error + for _, policy := range effectivePolicies { + if policy.IsValid() { + err := fn(policy) + if err != nil && !errors.Is(err, ErrPolicyClosed) { + errs = append(errs, err) + } + } + } + return errors.Join(errs...) +} diff --git a/util/syspolicy/rsop/store_registration.go b/util/syspolicy/rsop/store_registration.go new file mode 100644 index 000000000..09c83e988 --- /dev/null +++ b/util/syspolicy/rsop/store_registration.go @@ -0,0 +1,94 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package rsop + +import ( + "errors" + "sync" + "sync/atomic" + + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" +) + +// ErrAlreadyConsumed is the error returned when [StoreRegistration.ReplaceStore] +// or [StoreRegistration.Unregister] is called more than once. +var ErrAlreadyConsumed = errors.New("the store registration is no longer valid") + +// StoreRegistration is a [source.Store] registered for use in the specified scope. +// It can be used to unregister the store, or replace it with another one. +type StoreRegistration struct { + source *source.Source + m sync.Mutex // protects the [StoreRegistration.consumeSlow] path + consumed atomic.Bool // can be read without holding m, but must be written with m held +} + +// RegisterStore registers a new policy [source.Store] with the specified name and [setting.PolicyScope]. +func RegisterStore(name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) { + return newStoreRegistration(name, scope, store) +} + +// RegisterStoreForTest is like [RegisterStore], but unregisters the store when +// tb and all its subtests complete. +func RegisterStoreForTest(tb internal.TB, name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) { + reg, err := RegisterStore(name, scope, store) + if err == nil { + tb.Cleanup(func() { + if err := reg.Unregister(); err != nil && !errors.Is(err, ErrAlreadyConsumed) { + tb.Fatalf("Unregister failed: %v", err) + } + }) + } + return reg, err // may be nil or non-nil +} + +func newStoreRegistration(name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) { + source := source.NewSource(name, scope, store) + if err := registerSource(source); err != nil { + return nil, err + } + return &StoreRegistration{source: source}, nil +} + +// ReplaceStore replaces the registered store with the new one, +// returning a new [StoreRegistration] or an error. +func (r *StoreRegistration) ReplaceStore(new source.Store) (*StoreRegistration, error) { + var res *StoreRegistration + err := r.consume(func() error { + newSource := source.NewSource(r.source.Name(), r.source.Scope(), new) + if err := replaceSource(r.source, newSource); err != nil { + return err + } + res = &StoreRegistration{source: newSource} + return nil + }) + return res, err +} + +// Unregister reverts the registration. +func (r *StoreRegistration) Unregister() error { + return r.consume(func() error { return unregisterSource(r.source) }) +} + +// consume invokes fn, consuming r if no error is returned. +// It returns [ErrAlreadyConsumed] on subsequent calls after the first successful call. +func (r *StoreRegistration) consume(fn func() error) (err error) { + if r.consumed.Load() { + return ErrAlreadyConsumed + } + return r.consumeSlow(fn) +} + +func (r *StoreRegistration) consumeSlow(fn func() error) (err error) { + r.m.Lock() + defer r.m.Unlock() + if r.consumed.Load() { + return ErrAlreadyConsumed + } + if err = fn(); err == nil { + r.consumed.Store(true) + } + return err // may be nil or non-nil +} diff --git a/util/syspolicy/setting/policy_scope.go b/util/syspolicy/setting/policy_scope.go index 55fa339e7..c2039fdda 100644 --- a/util/syspolicy/setting/policy_scope.go +++ b/util/syspolicy/setting/policy_scope.go @@ -8,6 +8,7 @@ import ( "strings" "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/internal" ) var ( @@ -35,6 +36,8 @@ type PolicyScope struct { // when querying policy settings. // It returns [DeviceScope], unless explicitly changed with [SetDefaultScope]. func DefaultScope() PolicyScope { + // Allow deferred package init functions to override the default scope. + internal.Init.Do() return lazyDefaultScope.Get(func() PolicyScope { return DeviceScope }) } diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 93be287b1..70fb0a931 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -243,6 +243,9 @@ func registerLocked(d *Definition) { func settingDefinitions() (DefinitionMap, error) { return definitions.GetErr(func() (DefinitionMap, error) { + if err := internal.Init.Do(); err != nil { + return nil, err + } definitionsMu.Lock() defer definitionsMu.Unlock() definitionsUsed = true diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index bb8e164fb..1f19bbb43 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -89,6 +89,7 @@ type TestStore struct { suspendCount int // change callback are suspended if > 0 mr, mw map[setting.Key]any // maps for reading and writing; they're the same unless the store is suspended. cbs set.HandleSet[func()] + closed bool readsMu sync.Mutex reads map[testReadOperation]int // how many times a policy setting was read @@ -98,24 +99,20 @@ type TestStore struct { // The tb will be used to report coding errors detected by the [TestStore]. func NewTestStore(tb internal.TB) *TestStore { m := make(map[setting.Key]any) - return &TestStore{ + store := &TestStore{ tb: tb, done: make(chan struct{}), mr: m, mw: m, } + tb.Cleanup(store.Close) + return store } // NewTestStoreOf is a shorthand for [NewTestStore] followed by [TestStore.SetBooleans], // [TestStore.SetUInt64s], [TestStore.SetStrings] or [TestStore.SetStringLists]. func NewTestStoreOf[T TestValueType](tb internal.TB, settings ...TestSetting[T]) *TestStore { - m := make(map[setting.Key]any) - store := &TestStore{ - tb: tb, - done: make(chan struct{}), - mr: m, - mw: m, - } + store := NewTestStore(tb) switch settings := any(settings).(type) { case []TestSetting[bool]: store.SetBooleans(settings...) @@ -308,7 +305,7 @@ func (s *TestStore) Resume() { s.mr = s.mw s.mu.Unlock() s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() case s.suspendCount < 0: s.tb.Fatal("negative suspendCount") default: @@ -333,7 +330,7 @@ func (s *TestStore) SetBooleans(settings ...TestSetting[bool]) { s.mu.Unlock() } s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() } // SetUInt64s sets the specified integer settings in s. @@ -352,7 +349,7 @@ func (s *TestStore) SetUInt64s(settings ...TestSetting[uint64]) { s.mu.Unlock() } s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() } // SetStrings sets the specified string settings in s. @@ -371,7 +368,7 @@ func (s *TestStore) SetStrings(settings ...TestSetting[string]) { s.mu.Unlock() } s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() } // SetStrings sets the specified string list settings in s. @@ -390,7 +387,7 @@ func (s *TestStore) SetStringLists(settings ...TestSetting[[]string]) { s.mu.Unlock() } s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() } // Delete deletes the specified settings from s. @@ -402,7 +399,7 @@ func (s *TestStore) Delete(keys ...setting.Key) { s.mu.Unlock() } s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() } // Clear deletes all settings from s. @@ -412,10 +409,10 @@ func (s *TestStore) Clear() { clear(s.mw) s.mu.Unlock() s.storeLock.Unlock() - s.notifyPolicyChanged() + s.NotifyPolicyChanged() } -func (s *TestStore) notifyPolicyChanged() { +func (s *TestStore) NotifyPolicyChanged() { s.mu.RLock() if s.suspendCount != 0 { s.mu.RUnlock() @@ -439,9 +436,9 @@ func (s *TestStore) notifyPolicyChanged() { func (s *TestStore) Close() { s.mu.Lock() defer s.mu.Unlock() - if s.done != nil { + if !s.closed { close(s.done) - s.done = nil + s.closed = true } } From 74dd24ce7173fc593f67692538a78d175b3b37c1 Mon Sep 17 00:00:00 2001 From: Christian Date: Mon, 14 Oct 2024 15:52:03 -0700 Subject: [PATCH 0055/1708] cmd/tsconnect, logpolicy: fixes for wasm_js.go * updates to LocalBackend require metrics to be passed in which are now initialized * os.MkdirTemp isn't supported in wasm/js so we simply return empty string for logger * adds a UDP dialer which was missing and led to the dialer being incompletely initialized Fixes #10454 and #8272 Signed-off-by: Christian --- cmd/tsconnect/wasm/wasm_js.go | 4 ++++ logpolicy/logpolicy.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 8291ac9b4..c35d543aa 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -108,6 +108,7 @@ func newIPN(jsConfig js.Value) map[string]any { SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), }) if err != nil { log.Fatal(err) @@ -128,6 +129,9 @@ func newIPN(jsConfig js.Value) map[string]any { dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { return ns.DialContextTCP(ctx, dst) } + dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + return ns.DialContextUDP(ctx, dst) + } sys.NetstackRouter.Set(true) sys.Tun.Get().Start() diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 0d2af77f2..d657c4e93 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -230,6 +230,9 @@ func LogsDir(logf logger.Logf) string { logf("logpolicy: using $STATE_DIRECTORY, %q", systemdStateDir) return systemdStateDir } + case "js": + logf("logpolicy: no logs directory in the browser") + return "" } // Default to e.g. /var/lib/tailscale or /var/db/tailscale on Unix. From 6a885dbc36edb4b2395c4df3d901f42b722d7ced Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 16 Oct 2024 09:33:21 -0700 Subject: [PATCH 0056/1708] wgengine/magicsock: fix CI-only test warning of missing health tracker While looking at deflaking TestTwoDevicePing/ping_1.0.0.2_via_SendPacket, there were a bunch of distracting: WARNING: (non-fatal) nil health.Tracker (being strict in CI): ... This pacifies those so it's easier to work on actually deflaking the test. Updates #11762 Updates #11874 Change-Id: I08dcb44511d4996b68d5f1ce5a2619b555a2a773 Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/magicsock_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c1b8eef22..7e48e1daa 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -176,6 +176,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen if err != nil { t.Fatalf("netmon.New: %v", err) } + ht := new(health.Tracker) var reg usermetric.Registry epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary @@ -183,6 +184,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen NetMon: netMon, Metrics: ®, Logf: logf, + HealthTracker: ht, DisablePortMapper: true, TestOnlyPacketListener: l, EndpointsFunc: func(eps []tailcfg.Endpoint) { From d32d742af0632445b71befecd75b7fcbf5c68865 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Wed, 16 Oct 2024 14:09:53 -0600 Subject: [PATCH 0057/1708] ipn/ipnlocal: error when trying to use exit node on unsupported platform (#13726) Adds logic to `checkExitNodePrefsLocked` to return an error when attempting to use exit nodes on a platform where this is not supported. This mirrors logic that was added to error out when trying to use `ssh` on an unsupported platform, and has very similar semantics. Fixes https://github.com/tailscale/tailscale/issues/13724 Signed-off-by: Mario Minardi --- client/web/web.go | 26 ++---------- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + envknob/featureknob/featureknob.go | 68 ++++++++++++++++++++++++++++++ envknob/features.go | 39 ----------------- ipn/ipnlocal/local.go | 7 ++- 7 files changed, 80 insertions(+), 63 deletions(-) create mode 100644 envknob/featureknob/featureknob.go delete mode 100644 envknob/features.go diff --git a/client/web/web.go b/client/web/web.go index 04ba2d086..56c5c92e8 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -26,6 +26,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/envknob/featureknob" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -960,37 +961,16 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { } func availableFeatures() map[string]bool { - env := hostinfo.GetEnvType() features := map[string]bool{ "advertise-exit-node": true, // available on all platforms "advertise-routes": true, // available on all platforms - "use-exit-node": canUseExitNode(env) == nil, - "ssh": envknob.CanRunTailscaleSSH() == nil, + "use-exit-node": featureknob.CanUseExitNode() == nil, + "ssh": featureknob.CanRunTailscaleSSH() == nil, "auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(), } - if env == hostinfo.HomeAssistantAddOn { - // Setting SSH on Home Assistant causes trouble on startup - // (since the flag is not being passed to `tailscale up`). - // Although Tailscale SSH does work here, - // it's not terribly useful since it's running in a separate container. - features["ssh"] = false - } return features } -func canUseExitNode(env hostinfo.EnvType) error { - switch dist := distro.Get(); dist { - case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995 - distro.QNAP, - distro.Unraid: - return fmt.Errorf("Tailscale exit nodes cannot be used on %s.", dist) - } - if env == hostinfo.HomeAssistantAddOn { - return errors.New("Tailscale exit nodes cannot be used on Home Assistant.") - } - return nil -} - // aclsAllowAccess returns whether tailnet ACLs (as expressed in the provided filter rules) // permit any devices to access the local web client. // This does not currently check whether a specific device can connect, just any device. diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b77ea22ef..66c2c8bae 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -668,6 +668,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/tailscale+ tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 2c644d1be..73aedc9e5 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -92,6 +92,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/tailscale+ tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 6f71a88a9..10df37d79 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -263,6 +263,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/drive/driveimpl/dirfs from tailscale.com/drive/driveimpl+ tailscale.com/drive/driveimpl/shared from tailscale.com/drive/driveimpl+ tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go new file mode 100644 index 000000000..d7af80d23 --- /dev/null +++ b/envknob/featureknob/featureknob.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package featureknob provides a facility to control whether features +// can run based on either an envknob or running OS / distro. +package featureknob + +import ( + "errors" + "runtime" + + "tailscale.com/envknob" + "tailscale.com/hostinfo" + "tailscale.com/version" + "tailscale.com/version/distro" +) + +// CanRunTailscaleSSH reports whether serving a Tailscale SSH server is +// supported for the current os/distro. +func CanRunTailscaleSSH() error { + switch runtime.GOOS { + case "linux": + if distro.Get() == distro.Synology && !envknob.UseWIPCode() { + return errors.New("The Tailscale SSH server does not run on Synology.") + } + if distro.Get() == distro.QNAP && !envknob.UseWIPCode() { + return errors.New("The Tailscale SSH server does not run on QNAP.") + } + + // Setting SSH on Home Assistant causes trouble on startup + // (since the flag is not being passed to `tailscale up`). + // Although Tailscale SSH does work here, + // it's not terribly useful since it's running in a separate container. + if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { + return errors.New("The Tailscale SSH server does not run on HomeAssistant.") + } + // otherwise okay + case "darwin": + // okay only in tailscaled mode for now. + if version.IsSandboxedMacOS() { + return errors.New("The Tailscale SSH server does not run in sandboxed Tailscale GUI builds.") + } + case "freebsd", "openbsd": + default: + return errors.New("The Tailscale SSH server is not supported on " + runtime.GOOS) + } + if !envknob.CanSSHD() { + return errors.New("The Tailscale SSH server has been administratively disabled.") + } + return nil +} + +// CanUseExitNode reports whether using an exit node is supported for the +// current os/distro. +func CanUseExitNode() error { + switch dist := distro.Get(); dist { + case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995 + distro.QNAP, + distro.Unraid: + return errors.New("Tailscale exit nodes cannot be used on " + string(dist)) + } + + if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { + return errors.New("Tailscale exit nodes cannot be used on HomeAssistant.") + } + + return nil +} diff --git a/envknob/features.go b/envknob/features.go deleted file mode 100644 index 9e5909de3..000000000 --- a/envknob/features.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package envknob - -import ( - "errors" - "runtime" - - "tailscale.com/version" - "tailscale.com/version/distro" -) - -// CanRunTailscaleSSH reports whether serving a Tailscale SSH server is -// supported for the current os/distro. -func CanRunTailscaleSSH() error { - switch runtime.GOOS { - case "linux": - if distro.Get() == distro.Synology && !UseWIPCode() { - return errors.New("The Tailscale SSH server does not run on Synology.") - } - if distro.Get() == distro.QNAP && !UseWIPCode() { - return errors.New("The Tailscale SSH server does not run on QNAP.") - } - // otherwise okay - case "darwin": - // okay only in tailscaled mode for now. - if version.IsSandboxedMacOS() { - return errors.New("The Tailscale SSH server does not run in sandboxed Tailscale GUI builds.") - } - case "freebsd", "openbsd": - default: - return errors.New("The Tailscale SSH server is not supported on " + runtime.GOOS) - } - if !CanSSHD() { - return errors.New("The Tailscale SSH server has been administratively disabled.") - } - return nil -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 06dd84831..c7df4333b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -51,6 +51,7 @@ import ( "tailscale.com/doctor/routetable" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/envknob/featureknob" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -3484,7 +3485,7 @@ func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { if !p.RunSSH { return nil } - if err := envknob.CanRunTailscaleSSH(); err != nil { + if err := featureknob.CanRunTailscaleSSH(); err != nil { return err } if runtime.GOOS == "linux" { @@ -3565,6 +3566,10 @@ func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTrac } func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { + if err := featureknob.CanUseExitNode(); err != nil { + return err + } + if (p.ExitNodeIP.IsValid() || p.ExitNodeID != "") && p.AdvertisesExitNode() { return errors.New("Cannot advertise an exit node and use an exit node at the same time.") } From 22c89fcb19ea36159e232c45b4f5e91c73b9e486 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Wed, 16 Oct 2024 19:08:06 -0400 Subject: [PATCH 0058/1708] cmd/tailscale,ipn,tailcfg: add `tailscale advertise` subcommand behind envknob (#13734) Signed-off-by: Naman Sood --- cmd/tailscale/cli/advertise.go | 78 ++++++++++++++++++++++++++++++++++ cmd/tailscale/cli/cli.go | 4 +- cmd/tailscale/cli/cli_test.go | 4 ++ cmd/tailscale/cli/up.go | 3 ++ ipn/ipn_clone.go | 2 + ipn/ipn_view.go | 4 ++ ipn/prefs.go | 11 +++++ ipn/prefs_test.go | 11 +++++ tailcfg/tailcfg.go | 15 +++++++ 9 files changed, 130 insertions(+), 2 deletions(-) create mode 100644 cmd/tailscale/cli/advertise.go diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go new file mode 100644 index 000000000..c9474c427 --- /dev/null +++ b/cmd/tailscale/cli/advertise.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "flag" + "fmt" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +var advertiseArgs struct { + services string // comma-separated list of services to advertise +} + +// TODO(naman): This flag may move to set.go or serve_v2.go after the WIPCode +// envknob is not needed. +var advertiseCmd = &ffcli.Command{ + Name: "advertise", + ShortUsage: "tailscale advertise --services=", + ShortHelp: "Advertise this node as a destination for a service", + Exec: runAdvertise, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("advertise") + fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")") + return fs + })(), +} + +func maybeAdvertiseCmd() []*ffcli.Command { + if !envknob.UseWIPCode() { + return nil + } + return []*ffcli.Command{advertiseCmd} +} + +func runAdvertise(ctx context.Context, args []string) error { + if len(args) > 0 { + return flag.ErrHelp + } + + services, err := parseServiceNames(advertiseArgs.services) + if err != nil { + return err + } + + _, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: services, + }, + }) + return err +} + +// parseServiceNames takes a comma-separated list of service names +// (eg. "svc:hello,svc:webserver,svc:catphotos"), splits them into +// a list and validates each service name. If valid, it returns +// the service names in a slice of strings. +func parseServiceNames(servicesArg string) ([]string, error) { + var services []string + if servicesArg != "" { + services = strings.Split(servicesArg, ",") + for _, svc := range services { + err := tailcfg.CheckServiceName(svc) + if err != nil { + return nil, fmt.Errorf("service %q: %s", svc, err) + } + } + } + return services, nil +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 864cf6903..de6bc2a4e 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -177,7 +177,7 @@ For help on subcommands, add --help after: "tailscale status --help". This CLI is still under active development. Commands and flags will change in the future. `), - Subcommands: []*ffcli.Command{ + Subcommands: append([]*ffcli.Command{ upCmd, downCmd, setCmd, @@ -207,7 +207,7 @@ change in the future. debugCmd, driveCmd, idTokenCmd, - }, + }, maybeAdvertiseCmd()...), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { if len(args) > 0 { diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index d103c8f7e..4b7548671 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -946,6 +946,10 @@ func TestPrefFlagMapping(t *testing.T) { // Handled by the tailscale share subcommand, we don't want a CLI // flag for this. continue + case "AdvertiseServices": + // Handled by the tailscale advertise subcommand, we don't want a + // CLI flag for this. + continue case "InternalExitNodePrior": // Used internally by LocalBackend as part of exit node usage toggling. // No CLI flag for this. diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index bf6a9af77..782df407d 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -164,6 +164,9 @@ func defaultNetfilterMode() string { return "on" } +// upArgsT is the type of upArgs, the argument struct for `tailscale up`. +// As of 2024-10-08, upArgsT is frozen and no new arguments should be +// added to it. Add new arguments to setArgsT instead. type upArgsT struct { qr bool reset bool diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index de35b60a7..0e9698faf 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -27,6 +27,7 @@ func (src *Prefs) Clone() *Prefs { *dst = *src dst.AdvertiseTags = append(src.AdvertiseTags[:0:0], src.AdvertiseTags...) dst.AdvertiseRoutes = append(src.AdvertiseRoutes[:0:0], src.AdvertiseRoutes...) + dst.AdvertiseServices = append(src.AdvertiseServices[:0:0], src.AdvertiseServices...) if src.DriveShares != nil { dst.DriveShares = make([]*drive.Share, len(src.DriveShares)) for i := range dst.DriveShares { @@ -61,6 +62,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { ForceDaemon bool Egg bool AdvertiseRoutes []netip.Prefix + AdvertiseServices []string NoSNAT bool NoStatefulFiltering opt.Bool NetfilterMode preftype.NetfilterMode diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index ff48b9c89..83a7aebb1 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -85,6 +85,9 @@ func (v PrefsView) Egg() bool { return v.ж.Eg func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AdvertiseRoutes) } +func (v PrefsView) AdvertiseServices() views.Slice[string] { + return views.SliceOf(v.ж.AdvertiseServices) +} func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT } func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering } func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode } @@ -120,6 +123,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { ForceDaemon bool Egg bool AdvertiseRoutes []netip.Prefix + AdvertiseServices []string NoSNAT bool NoStatefulFiltering opt.Bool NetfilterMode preftype.NetfilterMode diff --git a/ipn/prefs.go b/ipn/prefs.go index 5d61f0119..f5406f3b7 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -179,6 +179,12 @@ type Prefs struct { // node. AdvertiseRoutes []netip.Prefix + // AdvertiseServices specifies the list of services that this + // node can serve as a destination for. Note that an advertised + // service must still go through the approval process from the + // control server. + AdvertiseServices []string + // NoSNAT specifies whether to source NAT traffic going to // destinations in AdvertiseRoutes. The default is to apply source // NAT, which makes the traffic appear to come from the router @@ -319,6 +325,7 @@ type MaskedPrefs struct { ForceDaemonSet bool `json:",omitempty"` EggSet bool `json:",omitempty"` AdvertiseRoutesSet bool `json:",omitempty"` + AdvertiseServicesSet bool `json:",omitempty"` NoSNATSet bool `json:",omitempty"` NoStatefulFilteringSet bool `json:",omitempty"` NetfilterModeSet bool `json:",omitempty"` @@ -527,6 +534,9 @@ func (p *Prefs) pretty(goos string) string { if len(p.AdvertiseTags) > 0 { fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ",")) } + if len(p.AdvertiseServices) > 0 { + fmt.Fprintf(&sb, "services=%s ", strings.Join(p.AdvertiseServices, ",")) + } if goos == "linux" { fmt.Fprintf(&sb, "nf=%v ", p.NetfilterMode) } @@ -598,6 +608,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.ForceDaemon == p2.ForceDaemon && compareIPNets(p.AdvertiseRoutes, p2.AdvertiseRoutes) && compareStrings(p.AdvertiseTags, p2.AdvertiseTags) && + compareStrings(p.AdvertiseServices, p2.AdvertiseServices) && p.Persist.Equals(p2.Persist) && p.ProfileName == p2.ProfileName && p.AutoUpdate.Equals(p2.AutoUpdate) && diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index dcb999ef5..31671c0f8 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -54,6 +54,7 @@ func TestPrefsEqual(t *testing.T) { "ForceDaemon", "Egg", "AdvertiseRoutes", + "AdvertiseServices", "NoSNAT", "NoStatefulFiltering", "NetfilterMode", @@ -330,6 +331,16 @@ func TestPrefsEqual(t *testing.T) { &Prefs{NetfilterKind: ""}, false, }, + { + &Prefs{AdvertiseServices: []string{"svc:tux", "svc:xenia"}}, + &Prefs{AdvertiseServices: []string{"svc:tux", "svc:xenia"}}, + true, + }, + { + &Prefs{AdvertiseServices: []string{"svc:tux", "svc:xenia"}}, + &Prefs{AdvertiseServices: []string{"svc:tux", "svc:amelie"}}, + false, + }, } for i, tt := range tests { got := tt.a.Equals(tt.b) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 92bf2cd95..0e1b1d4ae 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -651,6 +651,21 @@ func CheckTag(tag string) error { return nil } +// CheckServiceName validates svc for use as a service name. +// We only allow valid DNS labels, since the expectation is that these will be +// used as parts of domain names. +func CheckServiceName(svc string) error { + var ok bool + svc, ok = strings.CutPrefix(svc, "svc:") + if !ok { + return errors.New("services must start with 'svc:'") + } + if svc == "" { + return errors.New("service names must not be empty") + } + return dnsname.ValidLabel(svc) +} + // CheckRequestTags checks that all of h.RequestTags are valid. func (h *Hostinfo) CheckRequestTags() error { if h == nil { From fa95318a47a96acd9dafd9829bd0c8c5332ad4c4 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Thu, 17 Oct 2024 15:37:10 -0700 Subject: [PATCH 0059/1708] tool/gocross: add support for tvOS Simulator (#13847) Updates ENG-5321 Allow gocross to build a static library for the Apple TV Simulator. Signed-off-by: Andrea Gottardo --- tool/gocross/autoflags.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tool/gocross/autoflags.go b/tool/gocross/autoflags.go index c66cab55a..020b19fa5 100644 --- a/tool/gocross/autoflags.go +++ b/tool/gocross/autoflags.go @@ -146,7 +146,11 @@ func autoflagsForTest(argv []string, env *Environment, goroot, nativeGOOS, nativ case env.IsSet("MACOSX_DEPLOYMENT_TARGET"): xcodeFlags = append(xcodeFlags, "-mmacosx-version-min="+env.Get("MACOSX_DEPLOYMENT_TARGET", "")) case env.IsSet("TVOS_DEPLOYMENT_TARGET"): - xcodeFlags = append(xcodeFlags, "-mtvos-version-min="+env.Get("TVOS_DEPLOYMENT_TARGET", "")) + if env.Get("TARGET_DEVICE_PLATFORM_NAME", "") == "appletvsimulator" { + xcodeFlags = append(xcodeFlags, "-mtvos-simulator-version-min="+env.Get("TVOS_DEPLOYMENT_TARGET", "")) + } else { + xcodeFlags = append(xcodeFlags, "-mtvos-version-min="+env.Get("TVOS_DEPLOYMENT_TARGET", "")) + } default: return nil, nil, fmt.Errorf("invoked by Xcode but couldn't figure out deployment target. Did Xcode change its envvars again?") } From c0a9895748a7d7f39577ca56b2dd25b9c0d4678e Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 17 Oct 2024 14:12:31 -0400 Subject: [PATCH 0060/1708] scripts/installer.sh: support DNF5 This fixes the installation on newer Fedora versions that use dnf5 as the 'dnf' binary. Updates #13828 Signed-off-by: Andrew Dunham Change-Id: I39513243c81640fab244a32b7dbb3f32071e9fce --- scripts/installer.sh | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 19911ee23..55315c0ce 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -488,9 +488,41 @@ main() { set +x ;; dnf) + # DNF 5 has a different argument format; determine which one we have. + DNF_VERSION="3" + if dnf --version | grep -q '^dnf5 version'; then + DNF_VERSION="5" + fi + + # The 'config-manager' plugin wasn't implemented when + # DNF5 was released; detect that and use the old + # version if necessary. + if [ "$DNF_VERSION" = "5" ]; then + set -x + $SUDO dnf install -y 'dnf-command(config-manager)' && DNF_HAVE_CONFIG_MANAGER=1 || DNF_HAVE_CONFIG_MANAGER=0 + set +x + + if [ "$DNF_HAVE_CONFIG_MANAGER" != "1" ]; then + if type dnf-3 >/dev/null; then + DNF_VERSION="3" + else + echo "dnf 5 detected, but 'dnf-command(config-manager)' not available and dnf-3 not found" + exit 1 + fi + fi + fi + set -x - $SUDO dnf install -y 'dnf-command(config-manager)' - $SUDO dnf config-manager --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" + if [ "$DNF_VERSION" = "3" ]; then + $SUDO dnf install -y 'dnf-command(config-manager)' + $SUDO dnf config-manager --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" + elif [ "$DNF_VERSION" = "5" ]; then + # Already installed config-manager, above. + $SUDO dnf config-manager addrepo --from-repofile="https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" + else + echo "unexpected: unknown dnf version $DNF_VERSION" + exit 1 + fi $SUDO dnf install -y tailscale $SUDO systemctl enable --now tailscaled set +x From 18fc093c0df7a04b9d0a396ad3b635e9f859ffa5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 18 Oct 2024 07:47:05 -0700 Subject: [PATCH 0061/1708] derp: give trusted mesh peers longer write timeouts Updates tailscale/corp#24014 Change-Id: I700872be48ab337dce8e11cabef7f82b97f0422a Signed-off-by: Brad Fitzpatrick --- derp/derp_server.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/derp/derp_server.go b/derp/derp_server.go index 8c5d6e890..94d2263f4 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -74,6 +74,7 @@ func init() { const ( perClientSendQueueDepth = 32 // packets buffered for sending writeTimeout = 2 * time.Second + privilegedWriteTimeout = 30 * time.Second // for clients with the mesh key ) // dupPolicy is a temporary (2021-08-30) mechanism to change the policy @@ -1721,7 +1722,19 @@ func (c *sclient) sendLoop(ctx context.Context) error { } func (c *sclient) setWriteDeadline() { - c.nc.SetWriteDeadline(time.Now().Add(writeTimeout)) + d := writeTimeout + if c.canMesh { + // Trusted peers get more tolerance. + // + // The "canMesh" is a bit of a misnomer; mesh peers typically run over a + // different interface for a per-region private VPC and are not + // throttled. But monitoring software elsewhere over the internet also + // use the private mesh key to subscribe to connect/disconnect events + // and might hit throttling and need more time to get the initial dump + // of connected peers. + d = privilegedWriteTimeout + } + c.nc.SetWriteDeadline(time.Now().Add(d)) } // sendKeepAlive sends a keep-alive frame, without flushing. From bb60da276468a18b5159598f09649289ad5471c3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 18 Oct 2024 10:53:49 -0700 Subject: [PATCH 0062/1708] derp: add sclient write deadline timeout metric (#13831) Write timeouts can be indicative of stalled TCP streams. Understanding changes in the rate of such events can be helpful in an ops context. Updates tailscale/corp#23668 Signed-off-by: Jordan Whited --- derp/derp_server.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/derp/derp_server.go b/derp/derp_server.go index 94d2263f4..2a0f1aa2a 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -26,6 +26,7 @@ import ( "net" "net/http" "net/netip" + "os" "os/exec" "runtime" "strconv" @@ -142,6 +143,7 @@ type Server struct { multiForwarderCreated expvar.Int multiForwarderDeleted expvar.Int removePktForwardOther expvar.Int + sclientWriteTimeouts expvar.Int avgQueueDuration *uint64 // In milliseconds; accessed atomically tcpRtt metrics.LabelMap // histogram meshUpdateBatchSize *metrics.Histogram @@ -882,6 +884,9 @@ func (c *sclient) run(ctx context.Context) error { if errors.Is(err, context.Canceled) { c.debugLogf("sender canceled by reader exiting") } else { + if errors.Is(err, os.ErrDeadlineExceeded) { + c.s.sclientWriteTimeouts.Add(1) + } c.logf("sender failed: %v", err) } } @@ -2073,6 +2078,7 @@ func (s *Server) ExpVar() expvar.Var { m.Set("multiforwarder_created", &s.multiForwarderCreated) m.Set("multiforwarder_deleted", &s.multiForwarderDeleted) m.Set("packet_forwarder_delete_other_value", &s.removePktForwardOther) + m.Set("sclient_write_timeouts", &s.sclientWriteTimeouts) m.Set("average_queue_duration_ms", expvar.Func(func() any { return math.Float64frombits(atomic.LoadUint64(s.avgQueueDuration)) })) From 874db2173b26894b6b48de95fcb462a8c006f7e4 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Sun, 13 Oct 2024 11:36:46 -0500 Subject: [PATCH 0063/1708] ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it. This method returns a unique ID of the connected client if the actor represents one. It helps link a series of interactions initiated by the client, such as when a notification needs to be sent back to a specific session, rather than all active sessions, in response to a certain request. We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods, which are like WatchNotifications and StartLoginInteractive but accept an additional parameter specifying an ipnauth.Actor who initiates the operation. We store these actor identities in watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo and related helper methods to enable sending notifications to watchSessions associated with actors (or, more broadly, identifiable recipients). We then use the above to change who receives the BrowseToURL notifications: - For user-initiated, interactive logins, the notification is delivered only to the user who initiated the process. If the initiating actor represents a specific connected client, the URL notification is sent back to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all clients connected as that user. Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS. - In all other cases (e.g., node key expiration), we send the notification to all connected users. Updates tailscale/corp#18342 Signed-off-by: Nick Khyl --- ipn/ipnauth/actor.go | 31 ++ ipn/ipnauth/ipnauth_notwindows.go | 4 +- ipn/ipnauth/test_actor.go | 36 ++ ipn/ipnlocal/local.go | 158 +++++++-- ipn/ipnlocal/local_test.go | 540 ++++++++++++++++++++++++++++++ ipn/ipnserver/actor.go | 23 +- ipn/localapi/localapi.go | 4 +- ipn/localapi/localapi_test.go | 19 +- 8 files changed, 762 insertions(+), 53 deletions(-) create mode 100644 ipn/ipnauth/test_actor.go diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index db3192c91..107017268 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -4,6 +4,8 @@ package ipnauth import ( + "fmt" + "tailscale.com/ipn" ) @@ -20,6 +22,9 @@ type Actor interface { // Username returns the user name associated with the receiver, // or "" if the actor does not represent a specific user. Username() (string, error) + // ClientID returns a non-zero ClientID and true if the actor represents + // a connected LocalAPI client. Otherwise, it returns a zero value and false. + ClientID() (_ ClientID, ok bool) // IsLocalSystem reports whether the actor is the Windows' Local System account. // @@ -45,3 +50,29 @@ type ActorCloser interface { // Close releases resources associated with the receiver. Close() error } + +// ClientID is an opaque, comparable value used to identify a connected LocalAPI +// client, such as a connected Tailscale GUI or CLI. It does not necessarily +// correspond to the same [net.Conn] or any physical session. +// +// Its zero value is valid, but does not represent a specific connected client. +type ClientID struct { + v any +} + +// NoClientID is the zero value of [ClientID]. +var NoClientID ClientID + +// ClientIDFrom returns a new [ClientID] derived from the specified value. +// ClientIDs derived from equal values are equal. +func ClientIDFrom[T comparable](v T) ClientID { + return ClientID{v} +} + +// String implements [fmt.Stringer]. +func (id ClientID) String() string { + if id.v == nil { + return "(none)" + } + return fmt.Sprint(id.v) +} diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_notwindows.go index 3dad8233a..d9d11bd0a 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_notwindows.go @@ -18,7 +18,9 @@ import ( func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { ci = &ConnIdentity{conn: c, notWindows: true} _, ci.isUnixSock = c.(*net.UnixConn) - ci.creds, _ = peercred.Get(c) + if ci.creds, _ = peercred.Get(c); ci.creds != nil { + ci.pid, _ = ci.creds.PID() + } return ci, nil } diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go new file mode 100644 index 000000000..d38aa2196 --- /dev/null +++ b/ipn/ipnauth/test_actor.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnauth + +import ( + "tailscale.com/ipn" +) + +var _ Actor = (*TestActor)(nil) + +// TestActor is an [Actor] used exclusively for testing purposes. +type TestActor struct { + UID ipn.WindowsUserID // OS-specific UID of the user, if the actor represents a local Windows user + Name string // username associated with the actor, or "" + NameErr error // error to be returned by [TestActor.Username] + CID ClientID // non-zero if the actor represents a connected LocalAPI client + LocalSystem bool // whether the actor represents the special Local System account on Windows + LocalAdmin bool // whether the actor has local admin access + +} + +// UserID implements [Actor]. +func (a *TestActor) UserID() ipn.WindowsUserID { return a.UID } + +// Username implements [Actor]. +func (a *TestActor) Username() (string, error) { return a.Name, a.NameErr } + +// ClientID implements [Actor]. +func (a *TestActor) ClientID() (_ ClientID, ok bool) { return a.CID, a.CID != NoClientID } + +// IsLocalSystem implements [Actor]. +func (a *TestActor) IsLocalSystem() bool { return a.LocalSystem } + +// IsLocalAdmin implements [Actor]. +func (a *TestActor) IsLocalAdmin(operatorUID string) bool { return a.LocalAdmin } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c7df4333b..b01f3a0c0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -155,10 +155,12 @@ func RegisterNewSSHServer(fn newSSHServerFunc) { newSSHServer = fn } -// watchSession represents a WatchNotifications channel +// watchSession represents a WatchNotifications channel, +// an [ipnauth.Actor] that owns it (e.g., a connected GUI/CLI), // and sessionID as required to close targeted buses. type watchSession struct { ch chan *ipn.Notify + owner ipnauth.Actor // or nil sessionID string cancel func() // call to signal that the session must be terminated } @@ -265,9 +267,9 @@ type LocalBackend struct { endpoints []tailcfg.Endpoint blocked bool keyExpired bool - authURL string // non-empty if not Running - authURLTime time.Time // when the authURL was received from the control server - interact bool // indicates whether a user requested interactive login + authURL string // non-empty if not Running + authURLTime time.Time // when the authURL was received from the control server + authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil egg bool prevIfState *netmon.State peerAPIServer *peerAPIServer // or nil @@ -2129,10 +2131,10 @@ func (b *LocalBackend) Start(opts ipn.Options) error { blid := b.backendLogID.String() b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID) - b.sendLocked(ipn.Notify{ + b.sendToLocked(ipn.Notify{ BackendLogID: &blid, Prefs: &prefs, - }) + }, allClients) if !loggedOut && (b.hasNodeKeyLocked() || confWantRunning) { // If we know that we're either logged in or meant to be @@ -2657,10 +2659,15 @@ func applyConfigToHostinfo(hi *tailcfg.Hostinfo, c *conffile.Config) { // notifications. There is currently (2022-11-22) no mechanism provided to // detect when a message has been dropped. func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) { - ch := make(chan *ipn.Notify, 128) + b.WatchNotificationsAs(ctx, nil, mask, onWatchAdded, fn) +} +// WatchNotificationsAs is like WatchNotifications but takes an [ipnauth.Actor] +// as an additional parameter. If non-nil, the specified callback is invoked +// only for notifications relevant to this actor. +func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.Actor, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) { + ch := make(chan *ipn.Notify, 128) sessionID := rands.HexString(16) - origFn := fn if mask&ipn.NotifyNoPrivateKeys != 0 { fn = func(n *ipn.Notify) bool { @@ -2712,6 +2719,7 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa session := &watchSession{ ch: ch, + owner: actor, sessionID: sessionID, cancel: cancel, } @@ -2834,13 +2842,71 @@ func (b *LocalBackend) DebugPickNewDERP() error { // // b.mu must not be held. func (b *LocalBackend) send(n ipn.Notify) { + b.sendTo(n, allClients) +} + +// notificationTarget describes a notification recipient. +// A zero value is valid and indicate that the notification +// should be broadcast to all active [watchSession]s. +type notificationTarget struct { + // userID is the OS-specific UID of the target user. + // If empty, the notification is not user-specific and + // will be broadcast to all connected users. + // TODO(nickkhyl): make this field cross-platform rather + // than Windows-specific. + userID ipn.WindowsUserID + // clientID identifies a client that should be the exclusive recipient + // of the notification. A zero value indicates that notification should + // be sent to all sessions of the specified user. + clientID ipnauth.ClientID +} + +var allClients = notificationTarget{} // broadcast to all connected clients + +// toNotificationTarget returns a [notificationTarget] that matches only actors +// representing the same user as the specified actor. If the actor represents +// a specific connected client, the [ipnauth.ClientID] must also match. +// If the actor is nil, the [notificationTarget] matches all actors. +func toNotificationTarget(actor ipnauth.Actor) notificationTarget { + t := notificationTarget{} + if actor != nil { + t.userID = actor.UserID() + t.clientID, _ = actor.ClientID() + } + return t +} + +// match reports whether the specified actor should receive notifications +// targeting t. If the actor is nil, it should only receive notifications +// intended for all users. +func (t notificationTarget) match(actor ipnauth.Actor) bool { + if t == allClients { + return true + } + if actor == nil { + return false + } + if t.userID != "" && t.userID != actor.UserID() { + return false + } + if t.clientID != ipnauth.NoClientID { + clientID, ok := actor.ClientID() + if !ok || clientID != t.clientID { + return false + } + } + return true +} + +// sendTo is like [LocalBackend.send] but allows specifying a recipient. +func (b *LocalBackend) sendTo(n ipn.Notify, recipient notificationTarget) { b.mu.Lock() defer b.mu.Unlock() - b.sendLocked(n) + b.sendToLocked(n, recipient) } -// sendLocked is like send, but assumes b.mu is already held. -func (b *LocalBackend) sendLocked(n ipn.Notify) { +// sendToLocked is like [LocalBackend.sendTo], but assumes b.mu is already held. +func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) { if n.Prefs != nil { n.Prefs = ptr.To(stripKeysFromPrefs(*n.Prefs)) } @@ -2854,10 +2920,12 @@ func (b *LocalBackend) sendLocked(n ipn.Notify) { } for _, sess := range b.notifyWatchers { - select { - case sess.ch <- &n: - default: - // Drop the notification if the channel is full. + if recipient.match(sess.owner) { + select { + case sess.ch <- &n: + default: + // Drop the notification if the channel is full. + } } } } @@ -2892,15 +2960,18 @@ func (b *LocalBackend) sendFileNotify() { // This method is called when a new authURL is received from the control plane, meaning that either a user // has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI), // or the control plane was unable to authenticate this node non-interactively (e.g., due to key expiration). -// b.interact indicates whether an interactive login is in progress. +// A non-nil b.authActor indicates that an interactive login is in progress and was initiated by the specified actor. // If url is "", it is equivalent to calling [LocalBackend.resetAuthURLLocked] with b.mu held. func (b *LocalBackend) setAuthURL(url string) { var popBrowser, keyExpired bool + var recipient ipnauth.Actor b.mu.Lock() switch { case url == "": b.resetAuthURLLocked() + b.mu.Unlock() + return case b.authURL != url: b.authURL = url b.authURLTime = b.clock.Now() @@ -2909,26 +2980,27 @@ func (b *LocalBackend) setAuthURL(url string) { popBrowser = true default: // Otherwise, only open it if the user explicitly requests interactive login. - popBrowser = b.interact + popBrowser = b.authActor != nil } keyExpired = b.keyExpired + recipient = b.authActor // or nil // Consume the StartLoginInteractive call, if any, that caused the control // plane to send us this URL. - b.interact = false + b.authActor = nil b.mu.Unlock() if popBrowser { - b.popBrowserAuthNow(url, keyExpired) + b.popBrowserAuthNow(url, keyExpired, recipient) } } -// popBrowserAuthNow shuts down the data plane and sends an auth URL -// to the connected frontend, if any. +// popBrowserAuthNow shuts down the data plane and sends the URL to the recipient's +// [watchSession]s if the recipient is non-nil; otherwise, it sends the URL to all watchSessions. // keyExpired is the value of b.keyExpired upon entry and indicates // whether the node's key has expired. // It must not be called with b.mu held. -func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool) { - b.logf("popBrowserAuthNow: url=%v, key-expired=%v, seamless-key-renewal=%v", url != "", keyExpired, b.seamlessRenewalEnabled()) +func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient ipnauth.Actor) { + b.logf("popBrowserAuthNow(%q): url=%v, key-expired=%v, seamless-key-renewal=%v", maybeUsernameOf(recipient), url != "", keyExpired, b.seamlessRenewalEnabled()) // Deconfigure the local network data plane if: // - seamless key renewal is not enabled; @@ -2937,7 +3009,7 @@ func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool) { b.blockEngineUpdates(true) b.stopEngineAndWait() } - b.tellClientToBrowseToURL(url) + b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) if b.State() == ipn.Running { b.enterState(ipn.Starting) } @@ -2978,8 +3050,13 @@ func (b *LocalBackend) validPopBrowserURL(urlStr string) bool { } func (b *LocalBackend) tellClientToBrowseToURL(url string) { + b.tellRecipientToBrowseToURL(url, allClients) +} + +// tellRecipientToBrowseToURL is like tellClientToBrowseToURL but allows specifying a recipient. +func (b *LocalBackend) tellRecipientToBrowseToURL(url string, recipient notificationTarget) { if b.validPopBrowserURL(url) { - b.send(ipn.Notify{BrowseToURL: &url}) + b.sendTo(ipn.Notify{BrowseToURL: &url}, recipient) } } @@ -3251,6 +3328,15 @@ func (b *LocalBackend) tryLookupUserName(uid string) string { // StartLoginInteractive attempts to pick up the in-progress flow where it left // off. func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { + return b.StartLoginInteractiveAs(ctx, nil) +} + +// StartLoginInteractiveAs is like StartLoginInteractive but takes an [ipnauth.Actor] +// as an additional parameter. If non-nil, the specified user is expected to complete +// the interactive login, and therefore will receive the BrowseToURL notification once +// the control plane sends us one. Otherwise, the notification will be delivered to all +// active [watchSession]s. +func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth.Actor) error { b.mu.Lock() if b.cc == nil { panic("LocalBackend.assertClient: b.cc == nil") @@ -3264,17 +3350,17 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { hasValidURL := url != "" && timeSinceAuthURLCreated < ((7*24*time.Hour)-(1*time.Hour)) if !hasValidURL { // A user wants to log in interactively, but we don't have a valid authURL. - // Set a flag to indicate that interactive login is in progress, forcing - // a BrowseToURL notification once the authURL becomes available. - b.interact = true + // Remember the user who initiated the login, so that we can notify them + // once the authURL is available. + b.authActor = user } cc := b.cc b.mu.Unlock() - b.logf("StartLoginInteractive: url=%v", hasValidURL) + b.logf("StartLoginInteractiveAs(%q): url=%v", maybeUsernameOf(user), hasValidURL) if hasValidURL { - b.popBrowserAuthNow(url, keyExpired) + b.popBrowserAuthNow(url, keyExpired, user) } else { cc.Login(b.loginFlags | controlclient.LoginInteractive) } @@ -5124,7 +5210,7 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { func (b *LocalBackend) resetAuthURLLocked() { b.authURL = "" b.authURLTime = time.Time{} - b.interact = false + b.authActor = nil } // ResetForClientDisconnect resets the backend for GUI clients running @@ -7369,3 +7455,13 @@ func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCa } return n.HasCap(cap) } + +// maybeUsernameOf returns the actor's username if the actor +// is non-nil and its username can be resolved. +func maybeUsernameOf(actor ipnauth.Actor) string { + var username string + if actor != nil { + username, _ = actor.Username() + } + return username +} diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b0e12d500..9a8fa5e02 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -15,6 +15,7 @@ import ( "os" "reflect" "slices" + "strings" "sync" "testing" "time" @@ -31,6 +32,7 @@ import ( "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/store/mem" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -3998,3 +4000,541 @@ func TestFillAllowedSuggestions(t *testing.T) { }) } } + +func TestNotificationTargetMatch(t *testing.T) { + tests := []struct { + name string + target notificationTarget + actor ipnauth.Actor + wantMatch bool + }{ + { + name: "AllClients/Nil", + target: allClients, + actor: nil, + wantMatch: true, + }, + { + name: "AllClients/NoUID/NoCID", + target: allClients, + actor: &ipnauth.TestActor{}, + wantMatch: true, + }, + { + name: "AllClients/WithUID/NoCID", + target: allClients, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.NoClientID}, + wantMatch: true, + }, + { + name: "AllClients/NoUID/WithCID", + target: allClients, + actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")}, + wantMatch: true, + }, + { + name: "AllClients/WithUID/WithCID", + target: allClients, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")}, + wantMatch: true, + }, + { + name: "FilterByUID/Nil", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: nil, + wantMatch: false, + }, + { + name: "FilterByUID/NoUID/NoCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: &ipnauth.TestActor{}, + wantMatch: false, + }, + { + name: "FilterByUID/NoUID/WithCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")}, + wantMatch: false, + }, + { + name: "FilterByUID/SameUID/NoCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4"}, + wantMatch: true, + }, + { + name: "FilterByUID/DifferentUID/NoCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8"}, + wantMatch: false, + }, + { + name: "FilterByUID/SameUID/WithCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")}, + wantMatch: true, + }, + { + name: "FilterByUID/DifferentUID/WithCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8", CID: ipnauth.ClientIDFrom("A")}, + wantMatch: false, + }, + { + name: "FilterByCID/Nil", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: nil, + wantMatch: false, + }, + { + name: "FilterByCID/NoUID/NoCID", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{}, + wantMatch: false, + }, + { + name: "FilterByCID/NoUID/SameCID", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")}, + wantMatch: true, + }, + { + name: "FilterByCID/NoUID/DifferentCID", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("B")}, + wantMatch: false, + }, + { + name: "FilterByCID/WithUID/NoCID", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4"}, + wantMatch: false, + }, + { + name: "FilterByCID/WithUID/SameCID", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")}, + wantMatch: true, + }, + { + name: "FilterByCID/WithUID/DifferentCID", + target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("B")}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/Nil", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4"}, + actor: nil, + wantMatch: false, + }, + { + name: "FilterByUID+CID/NoUID/NoCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/NoUID/SameCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/NoUID/DifferentCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("B")}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/SameUID/NoCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4"}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/SameUID/SameCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")}, + wantMatch: true, + }, + { + name: "FilterByUID+CID/SameUID/DifferentCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("B")}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/DifferentUID/NoCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8"}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/DifferentUID/SameCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8", CID: ipnauth.ClientIDFrom("A")}, + wantMatch: false, + }, + { + name: "FilterByUID+CID/DifferentUID/DifferentCID", + target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")}, + actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8", CID: ipnauth.ClientIDFrom("B")}, + wantMatch: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMatch := tt.target.match(tt.actor) + if gotMatch != tt.wantMatch { + t.Errorf("match: got %v; want %v", gotMatch, tt.wantMatch) + } + }) + } +} + +type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client + +func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { + logf := logger.Discard + if enableLogging { + logf = tstest.WhileTestRunningLogger(t) + } + sys := new(tsd.System) + store := new(mem.Store) + sys.Set(store) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + if err != nil { + t.Fatalf("NewFakeUserspaceEngine: %v", err) + } + t.Cleanup(e.Close) + sys.Set(e) + + b, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) + if err != nil { + t.Fatalf("NewLocalBackend: %v", err) + } + b.DisablePortMapperForTest() + + b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { + return newControl(t, opts), nil + }) + return b +} + +// notificationHandler is any function that can process (e.g., check) a notification. +// It returns whether the notification has been handled or should be passed to the next handler. +// The handler may be called from any goroutine, so it must avoid calling functions +// that are restricted to the goroutine running the test or benchmark function, +// such as [testing.common.FailNow] and [testing.common.Fatalf]. +type notificationHandler func(testing.TB, ipnauth.Actor, *ipn.Notify) bool + +// wantedNotification names a [notificationHandler] that processes a notification +// the test expects and wants to receive. The name is used to report notifications +// that haven't been received within the expected timeout. +type wantedNotification struct { + name string + cond notificationHandler +} + +// notificationWatcher observes [LocalBackend] notifications as the specified actor, +// reporting missing but expected notifications using [testing.common.Error], +// and delegating the handling of unexpected notifications to the [notificationHandler]s. +type notificationWatcher struct { + tb testing.TB + lb *LocalBackend + actor ipnauth.Actor + + mu sync.Mutex + mask ipn.NotifyWatchOpt + want []wantedNotification // notifications we want to receive + unexpected []notificationHandler // funcs that are called to check any other notifications + ctxCancel context.CancelFunc // cancels the outstanding [LocalBackend.WatchNotificationsAs] call + got []*ipn.Notify // all notifications, both wanted and unexpected, we've received so far + gotWanted []*ipn.Notify // only the expected notifications; holds nil for any notification that hasn't been received + gotWantedCh chan struct{} // closed when we have received the last wanted notification + doneCh chan struct{} // closed when [LocalBackend.WatchNotificationsAs] returns +} + +func newNotificationWatcher(tb testing.TB, lb *LocalBackend, actor ipnauth.Actor) *notificationWatcher { + return ¬ificationWatcher{tb: tb, lb: lb, actor: actor} +} + +func (w *notificationWatcher) watch(mask ipn.NotifyWatchOpt, wanted []wantedNotification, unexpected ...notificationHandler) { + w.tb.Helper() + + // Cancel any outstanding [LocalBackend.WatchNotificationsAs] calls. + w.mu.Lock() + ctxCancel := w.ctxCancel + doneCh := w.doneCh + w.mu.Unlock() + if doneCh != nil { + ctxCancel() + <-doneCh + } + + doneCh = make(chan struct{}) + gotWantedCh := make(chan struct{}) + ctx, ctxCancel := context.WithCancel(context.Background()) + w.tb.Cleanup(func() { + ctxCancel() + <-doneCh + }) + + w.mu.Lock() + w.mask = mask + w.want = wanted + w.unexpected = unexpected + w.ctxCancel = ctxCancel + w.got = nil + w.gotWanted = make([]*ipn.Notify, len(wanted)) + w.gotWantedCh = gotWantedCh + w.doneCh = doneCh + w.mu.Unlock() + + watchAddedCh := make(chan struct{}) + go func() { + defer close(doneCh) + if len(wanted) == 0 { + close(gotWantedCh) + if len(unexpected) == 0 { + close(watchAddedCh) + return + } + } + + var nextWantIdx int + w.lb.WatchNotificationsAs(ctx, w.actor, w.mask, func() { close(watchAddedCh) }, func(notify *ipn.Notify) (keepGoing bool) { + w.tb.Helper() + + w.mu.Lock() + defer w.mu.Unlock() + w.got = append(w.got, notify) + + wanted := false + for i := nextWantIdx; i < len(w.want); i++ { + if wanted = w.want[i].cond(w.tb, w.actor, notify); wanted { + w.gotWanted[i] = notify + nextWantIdx = i + 1 + break + } + } + + if wanted && nextWantIdx == len(w.want) { + close(w.gotWantedCh) + if len(w.unexpected) == 0 { + // If we have received the last wanted notification, + // and we don't have any handlers for the unexpected notifications, + // we can stop the watcher right away. + return false + } + + } + + if !wanted { + // If we've received a notification we didn't expect, + // it could either be an unwanted notification caused by a bug + // or just a miscellaneous one that's irrelevant for the current test. + // Call unexpected notification handlers, if any, to + // check and fail the test if necessary. + for _, h := range w.unexpected { + if h(w.tb, w.actor, notify) { + break + } + } + } + + return true + }) + + }() + <-watchAddedCh +} + +func (w *notificationWatcher) check() []*ipn.Notify { + w.tb.Helper() + + w.mu.Lock() + cancel := w.ctxCancel + gotWantedCh := w.gotWantedCh + checkUnexpected := len(w.unexpected) != 0 + doneCh := w.doneCh + w.mu.Unlock() + + // Wait for up to 10 seconds to receive expected notifications. + timeout := 10 * time.Second + for { + select { + case <-gotWantedCh: + if checkUnexpected { + gotWantedCh = nil + // But do not wait longer than 500ms for unexpected notifications after + // the expected notifications have been received. + timeout = 500 * time.Millisecond + continue + } + case <-doneCh: + // [LocalBackend.WatchNotificationsAs] has already returned, so no further + // notifications will be received. There's no reason to wait any longer. + case <-time.After(timeout): + } + cancel() + <-doneCh + break + } + + // Report missing notifications, if any, and log all received notifications, + // including both expected and unexpected ones. + w.mu.Lock() + defer w.mu.Unlock() + if hasMissing := slices.Contains(w.gotWanted, nil); hasMissing { + want := make([]string, len(w.want)) + got := make([]string, 0, len(w.want)) + for i, wn := range w.want { + want[i] = wn.name + if w.gotWanted[i] != nil { + got = append(got, wn.name) + } + } + w.tb.Errorf("Notifications(%s): got %q; want %q", actorDescriptionForTest(w.actor), strings.Join(got, ", "), strings.Join(want, ", ")) + for i, n := range w.got { + w.tb.Logf("%d. %v", i, n) + } + return nil + } + + return w.gotWanted +} + +func actorDescriptionForTest(actor ipnauth.Actor) string { + var parts []string + if actor != nil { + if name, _ := actor.Username(); name != "" { + parts = append(parts, name) + } + if uid := actor.UserID(); uid != "" { + parts = append(parts, string(uid)) + } + if clientID, _ := actor.ClientID(); clientID != ipnauth.NoClientID { + parts = append(parts, clientID.String()) + } + } + return fmt.Sprintf("Actor{%s}", strings.Join(parts, ", ")) +} + +func TestLoginNotifications(t *testing.T) { + const ( + enableLogging = true + controlURL = "https://localhost:1/" + loginURL = "https://localhost:1/1" + ) + + wantBrowseToURL := wantedNotification{ + name: "BrowseToURL", + cond: func(t testing.TB, actor ipnauth.Actor, n *ipn.Notify) bool { + if n.BrowseToURL != nil && *n.BrowseToURL != loginURL { + t.Errorf("BrowseToURL (%s): got %q; want %q", actorDescriptionForTest(actor), *n.BrowseToURL, loginURL) + return false + } + return n.BrowseToURL != nil + }, + } + unexpectedBrowseToURL := func(t testing.TB, actor ipnauth.Actor, n *ipn.Notify) bool { + if n.BrowseToURL != nil { + t.Errorf("Unexpected BrowseToURL(%s): %v", actorDescriptionForTest(actor), n) + return true + } + return false + } + + tests := []struct { + name string + logInAs ipnauth.Actor + urlExpectedBy []ipnauth.Actor + urlUnexpectedBy []ipnauth.Actor + }{ + { + name: "NoObservers", + logInAs: &ipnauth.TestActor{UID: "A"}, + urlExpectedBy: []ipnauth.Actor{}, // ensure that it does not panic if no one is watching + }, + { + name: "SingleUser", + logInAs: &ipnauth.TestActor{UID: "A"}, + urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}}, + }, + { + name: "SameUser/TwoSessions/NoCID", + logInAs: &ipnauth.TestActor{UID: "A"}, + urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}, &ipnauth.TestActor{UID: "A"}}, + }, + { + name: "SameUser/TwoSessions/OneWithCID", + logInAs: &ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}, + urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}}, + urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}}, + }, + { + name: "SameUser/TwoSessions/BothWithCID", + logInAs: &ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}, + urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}}, + urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("456")}}, + }, + { + name: "DifferentUsers/NoCID", + logInAs: &ipnauth.TestActor{UID: "A"}, + urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}}, + urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "B"}}, + }, + { + name: "DifferentUsers/SameCID", + logInAs: &ipnauth.TestActor{UID: "A"}, + urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}}, + urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "B", CID: ipnauth.ClientIDFrom("123")}}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ControlURLSet: true, Prefs: ipn.Prefs{ControlURL: controlURL}}); err != nil { + t.Fatalf("(*EditPrefs).Start(): %v", err) + } + if err := lb.Start(ipn.Options{}); err != nil { + t.Fatalf("(*LocalBackend).Start(): %v", err) + } + + sessions := make([]*notificationWatcher, 0, len(tt.urlExpectedBy)+len(tt.urlUnexpectedBy)) + for _, actor := range tt.urlExpectedBy { + session := newNotificationWatcher(t, lb, actor) + session.watch(0, []wantedNotification{wantBrowseToURL}) + sessions = append(sessions, session) + } + for _, actor := range tt.urlUnexpectedBy { + session := newNotificationWatcher(t, lb, actor) + session.watch(0, nil, unexpectedBrowseToURL) + sessions = append(sessions, session) + } + + if err := lb.StartLoginInteractiveAs(context.Background(), tt.logInAs); err != nil { + t.Fatal(err) + } + + lb.cc.(*mockControl).send(nil, loginURL, false, nil) + + var wg sync.WaitGroup + wg.Add(len(sessions)) + for _, sess := range sessions { + go func() { // check all sessions in parallel + sess.check() + wg.Done() + }() + } + wg.Wait() + }) + } +} diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 761c9816c..63d4b183c 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -31,6 +31,7 @@ type actor struct { logf logger.Logf ci *ipnauth.ConnIdentity + clientID ipnauth.ClientID isLocalSystem bool // whether the actor is the Windows' Local System identity. } @@ -39,7 +40,22 @@ func newActor(logf logger.Logf, c net.Conn) (*actor, error) { if err != nil { return nil, err } - return &actor{logf: logf, ci: ci, isLocalSystem: connIsLocalSystem(ci)}, nil + var clientID ipnauth.ClientID + if pid := ci.Pid(); pid != 0 { + // Derive [ipnauth.ClientID] from the PID of the connected client process. + // TODO(nickkhyl): This is transient and will be re-worked as we + // progress on tailscale/corp#18342. At minimum, we should use a 2-tuple + // (PID + StartTime) or a 3-tuple (PID + StartTime + UID) to identify + // the client process. This helps prevent security issues where a + // terminated client process's PID could be reused by a different + // process. This is not currently an issue as we allow only one user to + // connect anyway. + // Additionally, we should consider caching authentication results since + // operations like retrieving a username by SID might require network + // connectivity on domain-joined devices and/or be slow. + clientID = ipnauth.ClientIDFrom(pid) + } + return &actor{logf: logf, ci: ci, clientID: clientID, isLocalSystem: connIsLocalSystem(ci)}, nil } // IsLocalSystem implements [ipnauth.Actor]. @@ -61,6 +77,11 @@ func (a *actor) pid() int { return a.ci.Pid() } +// ClientID implements [ipnauth.Actor]. +func (a *actor) ClientID() (_ ipnauth.ClientID, ok bool) { + return a.clientID, a.clientID != ipnauth.NoClientID +} + // Username implements [ipnauth.Actor]. func (a *actor) Username() (string, error) { if a.ci == nil { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 528304bab..25ec19121 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1231,7 +1231,7 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") ctx := r.Context() enc := json.NewEncoder(w) - h.b.WatchNotifications(ctx, mask, f.Flush, func(roNotify *ipn.Notify) (keepGoing bool) { + h.b.WatchNotificationsAs(ctx, h.Actor, mask, f.Flush, func(roNotify *ipn.Notify) (keepGoing bool) { err := enc.Encode(roNotify) if err != nil { h.logf("json.Encode: %v", err) @@ -1251,7 +1251,7 @@ func (h *Handler) serveLoginInteractive(w http.ResponseWriter, r *http.Request) http.Error(w, "want POST", http.StatusBadRequest) return } - h.b.StartLoginInteractive(r.Context()) + h.b.StartLoginInteractiveAs(r.Context(), h.Actor) w.WriteHeader(http.StatusNoContent) return } diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index fa54a1e75..d89c46261 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -39,23 +39,6 @@ import ( "tailscale.com/wgengine" ) -var _ ipnauth.Actor = (*testActor)(nil) - -type testActor struct { - uid ipn.WindowsUserID - name string - isLocalSystem bool - isLocalAdmin bool -} - -func (u *testActor) UserID() ipn.WindowsUserID { return u.uid } - -func (u *testActor) Username() (string, error) { return u.name, nil } - -func (u *testActor) IsLocalSystem() bool { return u.isLocalSystem } - -func (u *testActor) IsLocalAdmin(operatorUID string) bool { return u.isLocalAdmin } - func TestValidHost(t *testing.T) { tests := []struct { host string @@ -207,7 +190,7 @@ func TestWhoIsArgTypes(t *testing.T) { func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { newHandler := func(connIsLocalAdmin bool) *Handler { - return &Handler{Actor: &testActor{isLocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)} + return &Handler{Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)} } tests := []struct { name string From 877fa504b429f662d714408397c0ed403a0eda01 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 18 Oct 2024 13:12:07 -0700 Subject: [PATCH 0064/1708] net/netcheck: remove arbitrary deadlines from GetReport() tests (#13832) GetReport() may have side effects when the caller enforces a deadline that is shorter than ReportTimeout. Updates #13783 Updates #13394 Signed-off-by: Jordan Whited --- net/netcheck/netcheck_test.go | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 02076f8d4..964014203 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -38,7 +38,7 @@ func TestBasic(t *testing.T) { c := newTestClient(t) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() if err := c.Standalone(ctx, "127.0.0.1:0"); err != nil { @@ -117,7 +117,7 @@ func TestWorksWhenUDPBlocked(t *testing.T) { c := newTestClient(t) - ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() r, err := c.GetReport(ctx, dm, nil) @@ -872,3 +872,30 @@ func TestReportTimeouts(t *testing.T) { t.Errorf("ReportTimeout (%v) cannot be less than httpsProbeTimeout (%v)", ReportTimeout, httpsProbeTimeout) } } + +func TestNoUDPNilGetReportOpts(t *testing.T) { + blackhole, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to open blackhole STUN listener: %v", err) + } + defer blackhole.Close() + + dm := stuntest.DERPMapOf(blackhole.LocalAddr().String()) + for _, region := range dm.Regions { + for _, n := range region.Nodes { + n.STUNOnly = false // exercise ICMP & HTTPS probing + } + } + + c := newTestClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r, err := c.GetReport(ctx, dm, nil) + if err != nil { + t.Fatal(err) + } + if r.UDP { + t.Fatal("unexpected working UDP") + } +} From e711ee5d226c3cc89790a54ffe8fbac7a20c67ed Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Fri, 18 Oct 2024 14:20:40 -0600 Subject: [PATCH 0065/1708] release/dist: clamp min / max version for synology package centre (#13857) Clamp the min and max version for DSM 7.0 and DSM 7.2 packages when we are building packages for the synology package centre. This change leaves packages destined for pkgs.tailscale.com with just the min version set to not break packages in the wild / our update flow. Updates https://github.com/tailscale/corp/issues/22908 Signed-off-by: Mario Minardi --- release/dist/synology/pkgs.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/release/dist/synology/pkgs.go b/release/dist/synology/pkgs.go index 7802470e1..ab89dbee3 100644 --- a/release/dist/synology/pkgs.go +++ b/release/dist/synology/pkgs.go @@ -155,8 +155,22 @@ func (t *target) mkInfo(b *dist.Build, uncompressedSz int64) []byte { f("os_min_ver", "6.0.1-7445") f("os_max_ver", "7.0-40000") case 7: - f("os_min_ver", "7.0-40000") - f("os_max_ver", "") + if t.packageCenter { + switch t.dsmMinorVersion { + case 0: + f("os_min_ver", "7.0-40000") + f("os_max_ver", "7.2-60000") + case 2: + f("os_min_ver", "7.2-60000") + default: + panic(fmt.Sprintf("unsupported DSM major.minor version %s", t.dsmVersionString())) + } + } else { + // We do not clamp the os_max_ver currently for non-package center builds as + // the binaries for 7.0 and 7.2 are identical. + f("os_min_ver", "7.0-40000") + f("os_max_ver", "") + } default: panic(fmt.Sprintf("unsupported DSM major version %d", t.dsmMajorVersion)) } From fd77965f23a317cb6f7bc53d585ace2c771d5b48 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Fri, 18 Oct 2024 17:35:46 -0700 Subject: [PATCH 0066/1708] net/tlsdial: call out firewalls blocking Tailscale in health warnings (#13840) Updates tailscale/tailscale#13839 Adds a new blockblame package which can detect common MITM SSL certificates used by network appliances. We use this in `tlsdial` to display a dedicated health warning when we cannot connect to control, and a network appliance MITM attack is detected. Signed-off-by: Andrea Gottardo --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + net/tlsdial/blockblame/blockblame.go | 104 ++++++++++++++++++++++ net/tlsdial/blockblame/blockblame_test.go | 54 +++++++++++ net/tlsdial/tlsdial.go | 32 ++++++- 7 files changed, 192 insertions(+), 2 deletions(-) create mode 100644 net/tlsdial/blockblame/blockblame.go create mode 100644 net/tlsdial/blockblame/blockblame_test.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 417dbcfb0..362b07882 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -113,6 +113,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/stunserver from tailscale.com/cmd/derper L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/derp/derphttp + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ tailscale.com/net/wsconn from tailscale.com/cmd/derper+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 66c2c8bae..58a9aa472 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -735,6 +735,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/stun from tailscale.com/ipn/localapi+ L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 73aedc9e5..de534df8d 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -121,6 +121,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/stun from tailscale.com/net/netcheck L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/wsconn from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 10df37d79..67d8489df 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -322,6 +322,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/stun from tailscale.com/ipn/localapi+ L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go new file mode 100644 index 000000000..57dc7a6e6 --- /dev/null +++ b/net/tlsdial/blockblame/blockblame.go @@ -0,0 +1,104 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package blockblame blames specific firewall manufacturers for blocking Tailscale, +// by analyzing the SSL certificate presented when attempting to connect to a remote +// server. +package blockblame + +import ( + "crypto/x509" + "strings" +) + +// VerifyCertificate checks if the given certificate c is issued by a firewall manufacturer +// that is known to block Tailscale connections. It returns true and the Manufacturer of +// the equipment if it is, or false and nil if it is not. +func VerifyCertificate(c *x509.Certificate) (m *Manufacturer, ok bool) { + for _, m := range Manufacturers { + if m.match != nil && m.match(c) { + return m, true + } + } + return nil, false +} + +// Manufacturer represents a firewall manufacturer that may be blocking Tailscale. +type Manufacturer struct { + // Name is the name of the firewall manufacturer to be + // mentioned in health warning messages, e.g. "Fortinet". + Name string + // match is a function that returns true if the given certificate looks like it might + // be issued by this manufacturer. + match matchFunc +} + +var Manufacturers = []*Manufacturer{ + { + Name: "Aruba Networks", + match: issuerContains("Aruba"), + }, + { + Name: "Cisco", + match: issuerContains("Cisco"), + }, + { + Name: "Fortinet", + match: matchAny( + issuerContains("Fortinet"), + certEmail("support@fortinet.com"), + ), + }, + { + Name: "Huawei", + match: certEmail("mobile@huawei.com"), + }, + { + Name: "Palo Alto Networks", + match: matchAny( + issuerContains("Palo Alto Networks"), + issuerContains("PAN-FW"), + ), + }, + { + Name: "Sophos", + match: issuerContains("Sophos"), + }, + { + Name: "Ubiquiti", + match: matchAny( + issuerContains("UniFi"), + issuerContains("Ubiquiti"), + ), + }, +} + +type matchFunc func(*x509.Certificate) bool + +func issuerContains(s string) matchFunc { + return func(c *x509.Certificate) bool { + return strings.Contains(strings.ToLower(c.Issuer.String()), strings.ToLower(s)) + } +} + +func certEmail(v string) matchFunc { + return func(c *x509.Certificate) bool { + for _, email := range c.EmailAddresses { + if strings.Contains(strings.ToLower(email), strings.ToLower(v)) { + return true + } + } + return false + } +} + +func matchAny(fs ...matchFunc) matchFunc { + return func(c *x509.Certificate) bool { + for _, f := range fs { + if f(c) { + return true + } + } + return false + } +} diff --git a/net/tlsdial/blockblame/blockblame_test.go b/net/tlsdial/blockblame/blockblame_test.go new file mode 100644 index 000000000..6d3592c60 --- /dev/null +++ b/net/tlsdial/blockblame/blockblame_test.go @@ -0,0 +1,54 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package blockblame + +import ( + "crypto/x509" + "encoding/pem" + "testing" +) + +const controlplaneDotTailscaleDotComPEM = ` +-----BEGIN CERTIFICATE----- +MIIDkzCCAxqgAwIBAgISA2GOahsftpp59yuHClbDuoduMAoGCCqGSM49BAMDMDIx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF +NjAeFw0yNDEwMTIxNjE2NDVaFw0yNTAxMTAxNjE2NDRaMCUxIzAhBgNVBAMTGmNv +bnRyb2xwbGFuZS50YWlsc2NhbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD +QgAExfraDUc1t185zuGtZlnPDtEJJSDBqvHN4vQcXSzSTPSAdDYHcA8fL5woU2Kg +jK/2C0wm/rYy2Rre/ulhkS4wB6OCAhswggIXMA4GA1UdDwEB/wQEAwIHgDAdBgNV +HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4E +FgQUpArnpDj8Yh6NTgMOZjDPx0TuLmcwHwYDVR0jBBgwFoAUkydGmAOpUWiOmNbE +QkjbI79YlNIwVQYIKwYBBQUHAQEESTBHMCEGCCsGAQUFBzABhhVodHRwOi8vZTYu +by5sZW5jci5vcmcwIgYIKwYBBQUHMAKGFmh0dHA6Ly9lNi5pLmxlbmNyLm9yZy8w +JQYDVR0RBB4wHIIaY29udHJvbHBsYW5lLnRhaWxzY2FsZS5jb20wEwYDVR0gBAww +CjAIBgZngQwBAgEwggEDBgorBgEEAdZ5AgQCBIH0BIHxAO8AdgDgkrP8DB3I52g2 +H95huZZNClJ4GYpy1nLEsE2lbW9UBAAAAZKBujCyAAAEAwBHMEUCIQDHMgUaL4H9 +ZJa090ZOpBeEVu3+t+EF4HlHI1NqAai6uQIgeY/lLfjAXfcVgxBHHR4zjd0SzhaP +TREHXzwxzN/8blkAdQDPEVbu1S58r/OHW9lpLpvpGnFnSrAX7KwB0lt3zsw7CAAA +AZKBujh8AAAEAwBGMEQCICQwhMk45t9aiFjfwOC/y6+hDbszqSCpIv63kFElweUy +AiAqTdkqmbqUVpnav5JdWkNERVAIlY4jqrThLsCLZYbNszAKBggqhkjOPQQDAwNn +ADBkAjALyfgAt1XQp1uSfxy4GapR5OsmjEMBRVq6IgsPBlCRBfmf0Q3/a6mF0pjb +Sj4oa+cCMEhZk4DmBTIdZY9zjuh8s7bXNfKxUQS0pEhALtXqyFr+D5dF7JcQo9+s +Z98JY7/PCA== +-----END CERTIFICATE-----` + +func TestVerifyCertificateOurControlPlane(t *testing.T) { + p, _ := pem.Decode([]byte(controlplaneDotTailscaleDotComPEM)) + if p == nil { + t.Fatalf("failed to extract certificate bytes for controlplane.tailscale.com") + return + } + cert, err := x509.ParseCertificate(p.Bytes) + if err != nil { + t.Fatalf("failed to parse certificate: %v", err) + return + } + m, found := VerifyCertificate(cert) + if found { + t.Fatalf("expected to not get a result for the controlplane.tailscale.com certificate") + } + if m != nil { + t.Fatalf("expected nil manufacturer for controlplane.tailscale.com certificate") + } +} diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index a49e7f0f7..7e847a8b6 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -27,6 +27,7 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" + "tailscale.com/net/tlsdial/blockblame" ) var counterFallbackOK int32 // atomic @@ -44,6 +45,16 @@ var debug = envknob.RegisterBool("TS_DEBUG_TLS_DIAL") // Headscale, etc. var tlsdialWarningPrinted sync.Map // map[string]bool +var mitmBlockWarnable = health.Register(&health.Warnable{ + Code: "blockblame-mitm-detected", + Title: "Network may be blocking Tailscale", + Text: func(args health.Args) string { + return fmt.Sprintf("Network equipment from %q may be blocking Tailscale traffic on this network. Connect to another network, or contact your network administrator for assistance.", args["manufacturer"]) + }, + Severity: health.SeverityMedium, + ImpactsConnectivity: true, +}) + // Config returns a tls.Config for connecting to a server. // If base is non-nil, it's cloned as the base config before // being configured and returned. @@ -86,12 +97,29 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // Perform some health checks on this certificate before we do // any verification. + var cert *x509.Certificate var selfSignedIssuer string - if certs := cs.PeerCertificates; len(certs) > 0 && certIsSelfSigned(certs[0]) { - selfSignedIssuer = certs[0].Issuer.String() + if certs := cs.PeerCertificates; len(certs) > 0 { + cert = certs[0] + if certIsSelfSigned(cert) { + selfSignedIssuer = cert.Issuer.String() + } } if ht != nil { defer func() { + if retErr != nil && cert != nil { + // Is it a MITM SSL certificate from a well-known network appliance manufacturer? + // Show a dedicated warning. + m, ok := blockblame.VerifyCertificate(cert) + if ok { + log.Printf("tlsdial: server cert for %q looks like %q equipment (could be blocking Tailscale)", host, m.Name) + ht.SetUnhealthy(mitmBlockWarnable, health.Args{"manufacturer": m.Name}) + } else { + ht.SetHealthy(mitmBlockWarnable) + } + } else { + ht.SetHealthy(mitmBlockWarnable) + } if retErr != nil && selfSignedIssuer != "" { // Self-signed certs are never valid. // From c76a6e5167d4f669a91818d502a642c1634251e7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 20 Oct 2024 13:22:31 -0700 Subject: [PATCH 0067/1708] derp: track client-advertised non-ideal DERP connections in more places In f77821fd63 (released in v1.72.0), we made the client tell a DERP server when the connection was not its ideal choice (the first node in its region). But we didn't do anything with that information until now. This adds a metric about how many such connections are on a given derper, and also adds a bit to the PeerPresentFlags bitmask so watchers can identify (and rebalance) them. Updates tailscale/corp#372 Change-Id: Ief8af448750aa6d598e5939a57c062f4e55962be Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/depaware.txt | 2 +- derp/derp.go | 1 + derp/derp_server.go | 30 ++++++++++++++++++++++++++---- derp/derphttp/derphttp_client.go | 2 +- derp/derphttp/derphttp_server.go | 8 +++++++- 5 files changed, 36 insertions(+), 7 deletions(-) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index de534df8d..765bbc483 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -155,7 +155,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/clientmetric from tailscale.com/net/netcheck+ tailscale.com/util/cloudenv from tailscale.com/net/dnscache+ tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+ - tailscale.com/util/ctxkey from tailscale.com/types/logger + tailscale.com/util/ctxkey from tailscale.com/types/logger+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ diff --git a/derp/derp.go b/derp/derp.go index f9b070647..878188cd2 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -147,6 +147,7 @@ const ( PeerPresentIsRegular = 1 << 0 PeerPresentIsMeshPeer = 1 << 1 PeerPresentIsProber = 1 << 2 + PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node ) var bin = binary.BigEndian diff --git a/derp/derp_server.go b/derp/derp_server.go index 2a0f1aa2a..ab0ab0a90 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -47,6 +47,7 @@ import ( "tailscale.com/tstime/rate" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/ctxkey" "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/slicesx" @@ -57,6 +58,16 @@ import ( // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} +// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests +// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. +// The HTTP header value is the name of the node they wish they were connected +// to. This is an optional header. +const IdealNodeHeader = "Ideal-Node" + +// IdealNodeContextKey is the context key used to pass the IdealNodeHeader value +// from the HTTP handler to the DERP server's Accept method. +var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "") + func init() { keys := envknob.String("TS_DEBUG_VERBOSE_DROPS") if keys == "" { @@ -133,6 +144,7 @@ type Server struct { sentPong expvar.Int // number of pong frames enqueued to client accepts expvar.Int curClients expvar.Int + curClientsNotIdeal expvar.Int curHomeClients expvar.Int // ones with preferred dupClientKeys expvar.Int // current number of public keys we have 2+ connections for dupClientConns expvar.Int // current number of connections sharing a public key @@ -603,6 +615,9 @@ func (s *Server) registerClient(c *sclient) { } s.keyOfAddr[c.remoteIPPort] = c.key s.curClients.Add(1) + if c.isNotIdealConn { + s.curClientsNotIdeal.Add(1) + } s.broadcastPeerStateChangeLocked(c.key, c.remoteIPPort, c.presentFlags(), true) } @@ -693,6 +708,9 @@ func (s *Server) unregisterClient(c *sclient) { if c.preferred { s.curHomeClients.Add(-1) } + if c.isNotIdealConn { + s.curClientsNotIdeal.Add(-1) + } } // addPeerGoneFromRegionWatcher adds a function to be called when peer is gone @@ -809,8 +827,8 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem return fmt.Errorf("receive client key: %v", err) } - clientAP, _ := netip.ParseAddrPort(remoteAddr) - if err := s.verifyClient(ctx, clientKey, clientInfo, clientAP.Addr()); err != nil { + remoteIPPort, _ := netip.ParseAddrPort(remoteAddr) + if err := s.verifyClient(ctx, clientKey, clientInfo, remoteIPPort.Addr()); err != nil { return fmt.Errorf("client %v rejected: %v", clientKey, err) } @@ -820,8 +838,6 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem ctx, cancel := context.WithCancel(ctx) defer cancel() - remoteIPPort, _ := netip.ParseAddrPort(remoteAddr) - c := &sclient{ connNum: connNum, s: s, @@ -838,6 +854,7 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem sendPongCh: make(chan [8]byte, 1), peerGone: make(chan peerGoneMsg), canMesh: s.isMeshPeer(clientInfo), + isNotIdealConn: IdealNodeContextKey.Value(ctx) != "", peerGoneLim: rate.NewLimiter(rate.Every(time.Second), 3), } @@ -1511,6 +1528,7 @@ type sclient struct { peerGone chan peerGoneMsg // write request that a peer is not at this server (not used by mesh peers) meshUpdate chan struct{} // write request to write peerStateChange canMesh bool // clientInfo had correct mesh token for inter-region routing + isNotIdealConn bool // client indicated it is not its ideal node in the region isDup atomic.Bool // whether more than 1 sclient for key is connected isDisabled atomic.Bool // whether sends to this peer are disabled due to active/active dups debug bool // turn on for verbose logging @@ -1546,6 +1564,9 @@ func (c *sclient) presentFlags() PeerPresentFlags { if c.canMesh { f |= PeerPresentIsMeshPeer } + if c.isNotIdealConn { + f |= PeerPresentNotIdeal + } if f == 0 { return PeerPresentIsRegular } @@ -2051,6 +2072,7 @@ func (s *Server) ExpVar() expvar.Var { m.Set("gauge_current_file_descriptors", expvar.Func(func() any { return metrics.CurrentFDs() })) m.Set("gauge_current_connections", &s.curClients) m.Set("gauge_current_home_connections", &s.curHomeClients) + m.Set("gauge_current_notideal_connections", &s.curClientsNotIdeal) m.Set("gauge_clients_total", expvar.Func(func() any { return len(s.clientsMesh) })) m.Set("gauge_clients_local", expvar.Func(func() any { return len(s.clients) })) m.Set("gauge_clients_remote", expvar.Func(func() any { return len(s.clientsMesh) - len(s.clients) })) diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index b8cce8cdc..b695a52a8 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -498,7 +498,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien req.Header.Set("Connection", "Upgrade") if !idealNodeInRegion && reg != nil { // This is purely informative for now (2024-07-06) for stats: - req.Header.Set("Ideal-Node", reg.Nodes[0].Name) + req.Header.Set(derp.IdealNodeHeader, reg.Nodes[0].Name) // TODO(bradfitz,raggi): start a time.AfterFunc for 30m-1h or so to // dialNode(reg.Nodes[0]) and see if we can even TCP connect to it. If // so, TLS handshake it as well (which is mixed up in this massive diff --git a/derp/derphttp/derphttp_server.go b/derp/derphttp/derphttp_server.go index 41ce86764..ed7d3d707 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derphttp/derphttp_server.go @@ -21,6 +21,8 @@ const fastStartHeader = "Derp-Fast-Start" // Handler returns an http.Handler to be mounted at /derp, serving s. func Handler(s *derp.Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + // These are installed both here and in cmd/derper. The check here // catches both cmd/derper run with DERP disabled (STUN only mode) as // well as DERP being run in tests with derphttp.Handler directly, @@ -66,7 +68,11 @@ func Handler(s *derp.Server) http.Handler { pubKey.UntypedHexString()) } - s.Accept(r.Context(), netConn, conn, netConn.RemoteAddr().String()) + if v := r.Header.Get(derp.IdealNodeHeader); v != "" { + ctx = derp.IdealNodeContextKey.WithValue(ctx, v) + } + + s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String()) }) } From 72587ab03cd5b4dc751d007c7c5c060b96b39ec3 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 21 Oct 2024 18:13:06 +0100 Subject: [PATCH 0068/1708] scripts/installer.sh: allow Archcraft for Arch packages (#13870) Fixes #13869 Signed-off-by: Erisa A --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 55315c0ce..d2971978e 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -224,7 +224,7 @@ main() { VERSION="leap/15.4" PACKAGETYPE="zypper" ;; - arch|archarm|endeavouros|blendos|garuda) + arch|archarm|endeavouros|blendos|garuda|archcraft) OS="arch" VERSION="" # rolling release PACKAGETYPE="pacman" From f8f53bb6d47526cd5819039d6fa52a050eabc22c Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Mon, 21 Oct 2024 13:40:43 -0700 Subject: [PATCH 0069/1708] health: remove SysDNSOS, add two Warnables for read+set system DNS config (#13874) --- health/health.go | 15 +-------------- net/dns/manager.go | 28 +++++++++++++++++++++++++--- net/dns/resolved.go | 9 ++++++--- 3 files changed, 32 insertions(+), 20 deletions(-) diff --git a/health/health.go b/health/health.go index 216535d17..16b41f075 100644 --- a/health/health.go +++ b/health/health.go @@ -128,9 +128,6 @@ const ( // SysDNS is the name of the net/dns subsystem. SysDNS = Subsystem("dns") - // SysDNSOS is the name of the net/dns OSConfigurator subsystem. - SysDNSOS = Subsystem("dns-os") - // SysDNSManager is the name of the net/dns manager subsystem. SysDNSManager = Subsystem("dns-manager") @@ -141,7 +138,7 @@ const ( var subsystemsWarnables = map[Subsystem]*Warnable{} func init() { - for _, s := range []Subsystem{SysRouter, SysDNS, SysDNSOS, SysDNSManager, SysTKA} { + for _, s := range []Subsystem{SysRouter, SysDNS, SysDNSManager, SysTKA} { w := Register(&Warnable{ Code: WarnableCode(s), Severity: SeverityMedium, @@ -510,22 +507,12 @@ func (t *Tracker) SetDNSHealth(err error) { t.setErr(SysDNS, err) } // Deprecated: Warnables should be preferred over Subsystem errors. func (t *Tracker) DNSHealth() error { return t.get(SysDNS) } -// SetDNSOSHealth sets the state of the net/dns.OSConfigurator -// -// Deprecated: Warnables should be preferred over Subsystem errors. -func (t *Tracker) SetDNSOSHealth(err error) { t.setErr(SysDNSOS, err) } - // SetDNSManagerHealth sets the state of the Linux net/dns manager's // discovery of the /etc/resolv.conf situation. // // Deprecated: Warnables should be preferred over Subsystem errors. func (t *Tracker) SetDNSManagerHealth(err error) { t.setErr(SysDNSManager, err) } -// DNSOSHealth returns the net/dns.OSConfigurator error state. -// -// Deprecated: Warnables should be preferred over Subsystem errors. -func (t *Tracker) DNSOSHealth() error { return t.get(SysDNSOS) } - // SetTKAHealth sets the health of the tailnet key authority. // // Deprecated: Warnables should be preferred over Subsystem errors. diff --git a/net/dns/manager.go b/net/dns/manager.go index 51a0fa12c..13cb2d84e 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -8,6 +8,7 @@ import ( "context" "encoding/binary" "errors" + "fmt" "io" "net" "net/netip" @@ -156,11 +157,11 @@ func (m *Manager) setLocked(cfg Config) error { return err } if err := m.os.SetDNS(ocfg); err != nil { - m.health.SetDNSOSHealth(err) + m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) return err } - m.health.SetDNSOSHealth(nil) + m.health.SetHealthy(osConfigurationSetWarnable) m.config = &cfg return nil @@ -217,6 +218,26 @@ func compileHostEntries(cfg Config) (hosts []*HostEntry) { return hosts } +var osConfigurationReadWarnable = health.Register(&health.Warnable{ + Code: "dns-read-os-config-failed", + Title: "Failed to read system DNS configuration", + Text: func(args health.Args) string { + return fmt.Sprintf("Tailscale failed to fetch the DNS configuration of your device: %v", args[health.ArgError]) + }, + Severity: health.SeverityLow, + DependsOn: []*health.Warnable{health.NetworkStatusWarnable}, +}) + +var osConfigurationSetWarnable = health.Register(&health.Warnable{ + Code: "dns-set-os-config-failed", + Title: "Failed to set system DNS configuration", + Text: func(args health.Args) string { + return fmt.Sprintf("Tailscale failed to set the DNS configuration of your device: %v", args[health.ArgError]) + }, + Severity: health.SeverityMedium, + DependsOn: []*health.Warnable{health.NetworkStatusWarnable}, +}) + // compileConfig converts cfg into a quad-100 resolver configuration // and an OS-level configuration. func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig, err error) { @@ -320,9 +341,10 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // This is currently (2022-10-13) expected on certain iOS and macOS // builds. } else { - m.health.SetDNSOSHealth(err) + m.health.SetUnhealthy(osConfigurationReadWarnable, health.Args{health.ArgError: err.Error()}) return resolver.Config{}, OSConfig{}, err } + m.health.SetHealthy(osConfigurationReadWarnable) } if baseCfg == nil { diff --git a/net/dns/resolved.go b/net/dns/resolved.go index d82d3fc31..1a7c86041 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -163,9 +163,9 @@ func (m *resolvedManager) run(ctx context.Context) { } conn.Signal(signals) - // Reset backoff and SetNSOSHealth after successful on reconnect. + // Reset backoff and set osConfigurationSetWarnable to healthy after a successful reconnect. bo.BackOff(ctx, nil) - m.health.SetDNSOSHealth(nil) + m.health.SetHealthy(osConfigurationSetWarnable) return nil } @@ -243,9 +243,12 @@ func (m *resolvedManager) run(ctx context.Context) { // Set health while holding the lock, because this will // graciously serialize the resync's health outcome with a // concurrent SetDNS call. - m.health.SetDNSOSHealth(err) + if err != nil { m.logf("failed to configure systemd-resolved: %v", err) + m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) + } else { + m.health.SetHealthy(osConfigurationSetWarnable) } } } From 0f4c9c0ecb133f2e7e3df2626e2a6a114d6dc251 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 21 Oct 2024 12:28:41 -0500 Subject: [PATCH 0070/1708] cmd/viewer: import types/views when generating a getter for a map field Fixes #13873 Signed-off-by: Nick Khyl --- cmd/viewer/viewer.go | 1 + cmd/viewer/viewer_test.go | 78 +++++++++++++++++++++++++++++++++++++++ util/codegen/codegen.go | 5 +++ 3 files changed, 84 insertions(+) create mode 100644 cmd/viewer/viewer_test.go diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 96223297b..0c5868f3a 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -258,6 +258,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi writeTemplate("unsupportedField") continue } + it.Import("tailscale.com/types/views") args.MapKeyType = it.QualifiedName(key) mElem := m.Elem() var template string diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go new file mode 100644 index 000000000..cd5f3d95f --- /dev/null +++ b/cmd/viewer/viewer_test.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "testing" + + "tailscale.com/util/codegen" +) + +func TestViewerImports(t *testing.T) { + tests := []struct { + name string + content string + typeNames []string + wantImports []string + }{ + { + name: "Map", + content: `type Test struct { Map map[string]int }`, + typeNames: []string{"Test"}, + wantImports: []string{"tailscale.com/types/views"}, + }, + { + name: "Slice", + content: `type Test struct { Slice []int }`, + typeNames: []string{"Test"}, + wantImports: []string{"tailscale.com/types/views"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "test.go", "package test\n\n"+tt.content, 0) + if err != nil { + fmt.Println("Error parsing:", err) + return + } + + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + } + + conf := types.Config{} + pkg, err := conf.Check("", fset, []*ast.File{f}, info) + if err != nil { + t.Fatal(err) + } + + var output bytes.Buffer + tracker := codegen.NewImportTracker(pkg) + for i := range tt.typeNames { + typeName, ok := pkg.Scope().Lookup(tt.typeNames[i]).(*types.TypeName) + if !ok { + t.Fatalf("type %q does not exist", tt.typeNames[i]) + } + namedType, ok := typeName.Type().(*types.Named) + if !ok { + t.Fatalf("%q is not a named type", tt.typeNames[i]) + } + genView(&output, tracker, namedType, pkg) + } + + for _, pkgName := range tt.wantImports { + if !tracker.Has(pkgName) { + t.Errorf("missing import %q", pkgName) + } + } + }) + } +} diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index d998d925d..2f7781b68 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -97,6 +97,11 @@ func (it *ImportTracker) Import(pkg string) { } } +// Has reports whether the specified package has been imported. +func (it *ImportTracker) Has(pkg string) bool { + return it.packages[pkg] +} + func (it *ImportTracker) qualifier(pkg *types.Package) string { if it.thisPkg == pkg { return "" From d4d21a0bbf2c1bd6f0de1bc654d7bd475ef1661e Mon Sep 17 00:00:00 2001 From: Maisem Ali Date: Mon, 21 Oct 2024 16:17:28 -0700 Subject: [PATCH 0071/1708] net/tstun: restore tap mode functionality It had bit-rotted likely during the transition to vector io in 76389d8baf942b10a8f0f4201b7c4b0737a0172c. Tested on Ubuntu 24.04 by creating a netns and doing the DHCP dance to get an IP. Updates #2589 Signed-off-by: Maisem Ali --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- net/tstun/tap_linux.go | 121 +++++++++++++++++++++------------- net/tstun/tap_unsupported.go | 8 --- net/tstun/tun.go | 4 +- net/tstun/wrap.go | 41 +++--------- 6 files changed, 88 insertions(+), 90 deletions(-) delete mode 100644 net/tstun/tap_unsupported.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 58a9aa472..19d6808d7 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -310,7 +310,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+ gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 67d8489df..26165d659 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -221,7 +221,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+ gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ diff --git a/net/tstun/tap_linux.go b/net/tstun/tap_linux.go index c721e6e27..c366b0560 100644 --- a/net/tstun/tap_linux.go +++ b/net/tstun/tap_linux.go @@ -6,6 +6,7 @@ package tstun import ( + "bytes" "fmt" "net" "net/netip" @@ -20,10 +21,13 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/checksum" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv6" "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "tailscale.com/net/netaddr" "tailscale.com/net/packet" + "tailscale.com/syncs" "tailscale.com/types/ipproto" + "tailscale.com/types/logger" "tailscale.com/util/multierr" ) @@ -35,13 +39,13 @@ var ourMAC = net.HardwareAddr{0x30, 0x2D, 0x66, 0xEC, 0x7A, 0x93} func init() { createTAP = createTAPLinux } -func createTAPLinux(tapName, bridgeName string) (tun.Device, error) { +func createTAPLinux(logf logger.Logf, tapName, bridgeName string) (tun.Device, error) { fd, err := unix.Open("/dev/net/tun", unix.O_RDWR, 0) if err != nil { return nil, err } - dev, err := openDevice(fd, tapName, bridgeName) + dev, err := openDevice(logf, fd, tapName, bridgeName) if err != nil { unix.Close(fd) return nil, err @@ -50,7 +54,7 @@ func createTAPLinux(tapName, bridgeName string) (tun.Device, error) { return dev, nil } -func openDevice(fd int, tapName, bridgeName string) (tun.Device, error) { +func openDevice(logf logger.Logf, fd int, tapName, bridgeName string) (tun.Device, error) { ifr, err := unix.NewIfreq(tapName) if err != nil { return nil, err @@ -71,7 +75,7 @@ func openDevice(fd int, tapName, bridgeName string) (tun.Device, error) { } } - return newTAPDevice(fd, tapName) + return newTAPDevice(logf, fd, tapName) } type etherType [2]byte @@ -91,7 +95,7 @@ const ( // handleTAPFrame handles receiving a raw TAP ethernet frame and reports whether // it's been handled (that is, whether it should NOT be passed to wireguard). -func (t *Wrapper) handleTAPFrame(ethBuf []byte) bool { +func (t *tapDevice) handleTAPFrame(ethBuf []byte) bool { if len(ethBuf) < ethernetFrameSize { // Corrupt. Ignore. @@ -164,8 +168,7 @@ func (t *Wrapper) handleTAPFrame(ethBuf []byte) bool { copy(res.HardwareAddressTarget(), req.HardwareAddressSender()) copy(res.ProtocolAddressTarget(), req.ProtocolAddressSender()) - // TODO(raggi): reduce allocs! - n, err := t.tdev.Write([][]byte{buf}, 0) + n, err := t.WriteEthernet(buf) if tapDebug { t.logf("tap: wrote ARP reply %v, %v", n, err) } @@ -182,7 +185,7 @@ const routerIP = "100.70.145.1" // must be in same netmask (currently hack at // handleDHCPRequest handles receiving a raw TAP ethernet frame and reports whether // it's been handled as a DHCP request. That is, it reports whether the frame should // be ignored by the caller and not passed on. -func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool { +func (t *tapDevice) handleDHCPRequest(ethBuf []byte) bool { const udpHeader = 8 if len(ethBuf) < ethernetFrameSize+ipv4HeaderLen+udpHeader { if tapDebug { @@ -207,7 +210,7 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool { if p.IPProto != ipproto.UDP || p.Src.Port() != 68 || p.Dst.Port() != 67 { // Not a DHCP request. if tapDebug { - t.logf("tap: DHCP wrong meta") + t.logf("tap: DHCP wrong meta: %+v", p) } return passOnPacket } @@ -250,8 +253,7 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool { netip.AddrPortFrom(netaddr.IPv4(255, 255, 255, 255), 68), // dst ) - // TODO(raggi): reduce allocs! - n, err := t.tdev.Write([][]byte{pkt}, 0) + n, err := t.WriteEthernet(pkt) if tapDebug { t.logf("tap: wrote DHCP OFFER %v, %v", n, err) } @@ -278,8 +280,7 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool { netip.AddrPortFrom(netaddr.IPv4(100, 100, 100, 100), 67), // src netip.AddrPortFrom(netaddr.IPv4(255, 255, 255, 255), 68), // dst ) - // TODO(raggi): reduce allocs! - n, err := t.tdev.Write([][]byte{pkt}, 0) + n, err := t.WriteEthernet(pkt) if tapDebug { t.logf("tap: wrote DHCP ACK %v, %v", n, err) } @@ -291,6 +292,16 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool { return consumePacket } +func writeEthernetFrame(buf []byte, srcMAC, dstMAC net.HardwareAddr, proto tcpip.NetworkProtocolNumber) { + // Ethernet header + eth := header.Ethernet(buf) + eth.Encode(&header.EthernetFields{ + SrcAddr: tcpip.LinkAddress(srcMAC), + DstAddr: tcpip.LinkAddress(dstMAC), + Type: proto, + }) +} + func packLayer2UDP(payload []byte, srcMAC, dstMAC net.HardwareAddr, src, dst netip.AddrPort) []byte { buf := make([]byte, header.EthernetMinimumSize+header.UDPMinimumSize+header.IPv4MinimumSize+len(payload)) payloadStart := len(buf) - len(payload) @@ -300,12 +311,7 @@ func packLayer2UDP(payload []byte, srcMAC, dstMAC net.HardwareAddr, src, dst net dstB := dst.Addr().As4() dstIP := tcpip.AddrFromSlice(dstB[:]) // Ethernet header - eth := header.Ethernet(buf) - eth.Encode(&header.EthernetFields{ - SrcAddr: tcpip.LinkAddress(srcMAC), - DstAddr: tcpip.LinkAddress(dstMAC), - Type: ipv4.ProtocolNumber, - }) + writeEthernetFrame(buf, srcMAC, dstMAC, ipv4.ProtocolNumber) // IP header ipbuf := buf[header.EthernetMinimumSize:] ip := header.IPv4(ipbuf) @@ -342,17 +348,18 @@ func run(prog string, args ...string) error { return nil } -func (t *Wrapper) destMAC() [6]byte { +func (t *tapDevice) destMAC() [6]byte { return t.destMACAtomic.Load() } -func newTAPDevice(fd int, tapName string) (tun.Device, error) { +func newTAPDevice(logf logger.Logf, fd int, tapName string) (tun.Device, error) { err := unix.SetNonblock(fd, true) if err != nil { return nil, err } file := os.NewFile(uintptr(fd), "/dev/tap") d := &tapDevice{ + logf: logf, file: file, events: make(chan tun.Event), name: tapName, @@ -360,20 +367,14 @@ func newTAPDevice(fd int, tapName string) (tun.Device, error) { return d, nil } -var ( - _ setWrapperer = &tapDevice{} -) - type tapDevice struct { file *os.File + logf func(format string, args ...any) events chan tun.Event name string - wrapper *Wrapper closeOnce sync.Once -} -func (t *tapDevice) setWrapper(wrapper *Wrapper) { - t.wrapper = wrapper + destMACAtomic syncs.AtomicValue[[6]byte] } func (t *tapDevice) File() *os.File { @@ -384,36 +385,63 @@ func (t *tapDevice) Name() (string, error) { return t.name, nil } +// Read reads an IP packet from the TAP device. It strips the ethernet frame header. func (t *tapDevice) Read(buffs [][]byte, sizes []int, offset int) (int, error) { + n, err := t.ReadEthernet(buffs, sizes, offset) + if err != nil || n == 0 { + return n, err + } + // Strip the ethernet frame header. + copy(buffs[0][offset:], buffs[0][offset+ethernetFrameSize:offset+sizes[0]]) + sizes[0] -= ethernetFrameSize + return 1, nil +} + +// ReadEthernet reads a raw ethernet frame from the TAP device. +func (t *tapDevice) ReadEthernet(buffs [][]byte, sizes []int, offset int) (int, error) { n, err := t.file.Read(buffs[0][offset:]) if err != nil { return 0, err } + if t.handleTAPFrame(buffs[0][offset : offset+n]) { + return 0, nil + } sizes[0] = n return 1, nil } +// WriteEthernet writes a raw ethernet frame to the TAP device. +func (t *tapDevice) WriteEthernet(buf []byte) (int, error) { + return t.file.Write(buf) +} + +// ethBufPool holds a pool of bytes.Buffers for use in [tapDevice.Write]. +var ethBufPool = syncs.Pool[*bytes.Buffer]{New: func() *bytes.Buffer { return new(bytes.Buffer) }} + +// Write writes a raw IP packet to the TAP device. It adds the ethernet frame header. func (t *tapDevice) Write(buffs [][]byte, offset int) (int, error) { errs := make([]error, 0) wrote := 0 + m := t.destMAC() + dstMac := net.HardwareAddr(m[:]) + buf := ethBufPool.Get() + defer ethBufPool.Put(buf) for _, buff := range buffs { - if offset < ethernetFrameSize { - errs = append(errs, fmt.Errorf("[unexpected] weird offset %d for TAP write", offset)) - return 0, multierr.New(errs...) - } - eth := buff[offset-ethernetFrameSize:] - dst := t.wrapper.destMAC() - copy(eth[:6], dst[:]) - copy(eth[6:12], ourMAC[:]) - et := etherTypeIPv4 - if buff[offset]>>4 == 6 { - et = etherTypeIPv6 + buf.Reset() + buf.Grow(header.EthernetMinimumSize + len(buff) - offset) + + var ebuf [14]byte + switch buff[offset] >> 4 { + case 4: + writeEthernetFrame(ebuf[:], ourMAC, dstMac, ipv4.ProtocolNumber) + case 6: + writeEthernetFrame(ebuf[:], ourMAC, dstMac, ipv6.ProtocolNumber) + default: + continue } - eth[12], eth[13] = et[0], et[1] - if tapDebug { - t.wrapper.logf("tap: tapWrite off=%v % x", offset, buff) - } - _, err := t.file.Write(buff[offset-ethernetFrameSize:]) + buf.Write(ebuf[:]) + buf.Write(buff[offset:]) + _, err := t.WriteEthernet(buf.Bytes()) if err != nil { errs = append(errs, err) } else { @@ -428,8 +456,7 @@ func (t *tapDevice) MTU() (int, error) { if err != nil { return 0, err } - err = unix.IoctlIfreq(int(t.file.Fd()), unix.SIOCGIFMTU, ifr) - if err != nil { + if err := unix.IoctlIfreq(int(t.file.Fd()), unix.SIOCGIFMTU, ifr); err != nil { return 0, err } return int(ifr.Uint32()), nil diff --git a/net/tstun/tap_unsupported.go b/net/tstun/tap_unsupported.go deleted file mode 100644 index 6792b229f..000000000 --- a/net/tstun/tap_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || ts_omit_tap - -package tstun - -func (*Wrapper) handleTAPFrame([]byte) bool { panic("unreachable") } diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 66e209d1a..9f5d42ecc 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -18,7 +18,7 @@ import ( ) // createTAP is non-nil on Linux. -var createTAP func(tapName, bridgeName string) (tun.Device, error) +var createTAP func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error) // New returns a tun.Device for the requested device name, along with // the OS-dependent name that was allocated to the device. @@ -42,7 +42,7 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { default: return nil, "", errors.New("bogus tap argument") } - dev, err = createTAP(tapName, bridgeName) + dev, err = createTAP(logf, tapName, bridgeName) } else { dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index dcd43d571..b0765b13d 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -109,9 +109,7 @@ type Wrapper struct { lastActivityAtomic mono.Time // time of last send or receive destIPActivity syncs.AtomicValue[map[netip.Addr]func()] - //lint:ignore U1000 used in tap_linux.go - destMACAtomic syncs.AtomicValue[[6]byte] - discoKey syncs.AtomicValue[key.DiscoPublic] + discoKey syncs.AtomicValue[key.DiscoPublic] // timeNow, if non-nil, will be used to obtain the current time. timeNow func() time.Time @@ -257,12 +255,6 @@ type tunVectorReadResult struct { dataOffset int } -type setWrapperer interface { - // setWrapper enables the underlying TUN/TAP to have access to the Wrapper. - // It MUST be called only once during initialization, other usage is unsafe. - setWrapper(*Wrapper) -} - // Start unblocks any Wrapper.Read calls that have already started // and makes the Wrapper functional. // @@ -313,10 +305,6 @@ func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry) w.bufferConsumed <- struct{}{} w.noteActivity() - if sw, ok := w.tdev.(setWrapperer); ok { - sw.setWrapper(w) - } - return w } @@ -459,12 +447,18 @@ const ethernetFrameSize = 14 // 2 six byte MACs, 2 bytes ethertype func (t *Wrapper) pollVector() { sizes := make([]int, len(t.vectorBuffer)) readOffset := PacketStartOffset + reader := t.tdev.Read if t.isTAP { - readOffset = PacketStartOffset - ethernetFrameSize + type tapReader interface { + ReadEthernet(buffs [][]byte, sizes []int, offset int) (int, error) + } + if r, ok := t.tdev.(tapReader); ok { + readOffset = PacketStartOffset - ethernetFrameSize + reader = r.ReadEthernet + } } for range t.bufferConsumed { - DoRead: for i := range t.vectorBuffer { t.vectorBuffer[i] = t.vectorBuffer[i][:cap(t.vectorBuffer[i])] } @@ -474,7 +468,7 @@ func (t *Wrapper) pollVector() { if t.isClosed() { return } - n, err = t.tdev.Read(t.vectorBuffer[:], sizes, readOffset) + n, err = reader(t.vectorBuffer[:], sizes, readOffset) if t.isTAP && tapDebug { s := fmt.Sprintf("% x", t.vectorBuffer[0][:]) for strings.HasSuffix(s, " 00") { @@ -486,21 +480,6 @@ func (t *Wrapper) pollVector() { for i := range sizes[:n] { t.vectorBuffer[i] = t.vectorBuffer[i][:readOffset+sizes[i]] } - if t.isTAP { - if err == nil { - ethernetFrame := t.vectorBuffer[0][readOffset:] - if t.handleTAPFrame(ethernetFrame) { - goto DoRead - } - } - // Fall through. We got an IP packet. - if sizes[0] >= ethernetFrameSize { - t.vectorBuffer[0] = t.vectorBuffer[0][:readOffset+sizes[0]-ethernetFrameSize] - } - if tapDebug { - t.logf("tap regular frame: %x", t.vectorBuffer[0][PacketStartOffset:PacketStartOffset+sizes[0]]) - } - } t.sendVectorOutbound(tunVectorReadResult{ data: t.vectorBuffer[:n], dataOffset: PacketStartOffset, From 85241f8408fd73f47b776c87366d54d240440d24 Mon Sep 17 00:00:00 2001 From: Maisem Ali Date: Mon, 21 Oct 2024 17:00:41 -0700 Subject: [PATCH 0072/1708] net/tstun: use /10 as subnet for TAP mode; read IP from netmap Few changes to resolve TODOs in the code: - Instead of using a hardcoded IP, get it from the netmap. - Use 100.100.100.100 as the gateway IP - Use the /10 CGNAT range instead of a random /24 Updates #2589 Signed-off-by: Maisem Ali --- net/tstun/tap_linux.go | 66 ++++++++++++++++++++++++++++-------------- net/tstun/wrap.go | 11 ++++++- 2 files changed, 54 insertions(+), 23 deletions(-) diff --git a/net/tstun/tap_linux.go b/net/tstun/tap_linux.go index c366b0560..8a00a9692 100644 --- a/net/tstun/tap_linux.go +++ b/net/tstun/tap_linux.go @@ -25,6 +25,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "tailscale.com/net/netaddr" "tailscale.com/net/packet" + "tailscale.com/net/tsaddr" "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" @@ -158,7 +159,7 @@ func (t *tapDevice) handleTAPFrame(ethBuf []byte) bool { // If the client's asking about their own IP, tell them it's // their own MAC. TODO(bradfitz): remove String allocs. - if net.IP(req.ProtocolAddressTarget()).String() == theClientIP { + if net.IP(req.ProtocolAddressTarget()).String() == t.clientIPv4.Load() { copy(res.HardwareAddressSender(), ethSrcMAC) } else { copy(res.HardwareAddressSender(), ourMAC[:]) @@ -178,9 +179,12 @@ func (t *tapDevice) handleTAPFrame(ethBuf []byte) bool { } } -// TODO(bradfitz): remove these hard-coded values and move from a /24 to a /10 CGNAT as the range. -const theClientIP = "100.70.145.3" // TODO: make dynamic from netmap -const routerIP = "100.70.145.1" // must be in same netmask (currently hack at /24) as theClientIP +var ( + // routerIP is the IP address of the DHCP server. + routerIP = net.ParseIP(tsaddr.TailscaleServiceIPString) + // cgnatNetMask is the netmask of the 100.64.0.0/10 CGNAT range. + cgnatNetMask = net.IPMask(net.ParseIP("255.192.0.0").To4()) +) // handleDHCPRequest handles receiving a raw TAP ethernet frame and reports whether // it's been handled as a DHCP request. That is, it reports whether the frame should @@ -228,17 +232,22 @@ func (t *tapDevice) handleDHCPRequest(ethBuf []byte) bool { } switch dp.MessageType() { case dhcpv4.MessageTypeDiscover: + ips := t.clientIPv4.Load() + if ips == "" { + t.logf("tap: DHCP no client IP") + return consumePacket + } offer, err := dhcpv4.New( dhcpv4.WithReply(dp), dhcpv4.WithMessageType(dhcpv4.MessageTypeOffer), - dhcpv4.WithRouter(net.ParseIP(routerIP)), // the default route - dhcpv4.WithDNS(net.ParseIP("100.100.100.100")), - dhcpv4.WithServerIP(net.ParseIP("100.100.100.100")), // TODO: what is this? - dhcpv4.WithOption(dhcpv4.OptServerIdentifier(net.ParseIP("100.100.100.100"))), - dhcpv4.WithYourIP(net.ParseIP(theClientIP)), + dhcpv4.WithRouter(routerIP), // the default route + dhcpv4.WithDNS(routerIP), + dhcpv4.WithServerIP(routerIP), // TODO: what is this? + dhcpv4.WithOption(dhcpv4.OptServerIdentifier(routerIP)), + dhcpv4.WithYourIP(net.ParseIP(ips)), dhcpv4.WithLeaseTime(3600), // hour works //dhcpv4.WithHwAddr(ethSrcMAC), - dhcpv4.WithNetmask(net.IPMask(net.ParseIP("255.255.255.0").To4())), // TODO: wrong + dhcpv4.WithNetmask(cgnatNetMask), //dhcpv4.WithTransactionID(dp.TransactionID), ) if err != nil { @@ -258,16 +267,21 @@ func (t *tapDevice) handleDHCPRequest(ethBuf []byte) bool { t.logf("tap: wrote DHCP OFFER %v, %v", n, err) } case dhcpv4.MessageTypeRequest: + ips := t.clientIPv4.Load() + if ips == "" { + t.logf("tap: DHCP no client IP") + return consumePacket + } ack, err := dhcpv4.New( dhcpv4.WithReply(dp), dhcpv4.WithMessageType(dhcpv4.MessageTypeAck), - dhcpv4.WithDNS(net.ParseIP("100.100.100.100")), - dhcpv4.WithRouter(net.ParseIP(routerIP)), // the default route - dhcpv4.WithServerIP(net.ParseIP("100.100.100.100")), // TODO: what is this? - dhcpv4.WithOption(dhcpv4.OptServerIdentifier(net.ParseIP("100.100.100.100"))), - dhcpv4.WithYourIP(net.ParseIP(theClientIP)), // Hello world - dhcpv4.WithLeaseTime(3600), // hour works - dhcpv4.WithNetmask(net.IPMask(net.ParseIP("255.255.255.0").To4())), + dhcpv4.WithDNS(routerIP), + dhcpv4.WithRouter(routerIP), // the default route + dhcpv4.WithServerIP(routerIP), // TODO: what is this? + dhcpv4.WithOption(dhcpv4.OptServerIdentifier(routerIP)), + dhcpv4.WithYourIP(net.ParseIP(ips)), // Hello world + dhcpv4.WithLeaseTime(3600), // hour works + dhcpv4.WithNetmask(cgnatNetMask), ) if err != nil { t.logf("error building DHCP ack: %v", err) @@ -368,15 +382,23 @@ func newTAPDevice(logf logger.Logf, fd int, tapName string) (tun.Device, error) } type tapDevice struct { - file *os.File - logf func(format string, args ...any) - events chan tun.Event - name string - closeOnce sync.Once + file *os.File + logf func(format string, args ...any) + events chan tun.Event + name string + closeOnce sync.Once + clientIPv4 syncs.AtomicValue[string] destMACAtomic syncs.AtomicValue[[6]byte] } +var _ setIPer = (*tapDevice)(nil) + +func (t *tapDevice) SetIP(ipV4, ipV6TODO netip.Addr) error { + t.clientIPv4.Store(ipV4.String()) + return nil +} + func (t *tapDevice) File() *os.File { return t.file } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index b0765b13d..0b858fc1c 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -802,10 +802,19 @@ func (pc *peerConfigTable) outboundPacketIsJailed(p *packet.Parsed) bool { return c.jailed } +type setIPer interface { + // SetIP sets the IP addresses of the TAP device. + SetIP(ipV4, ipV6 netip.Addr) error +} + // SetWGConfig is called when a new NetworkMap is received. func (t *Wrapper) SetWGConfig(wcfg *wgcfg.Config) { + if t.isTAP { + if sip, ok := t.tdev.(setIPer); ok { + sip.SetIP(findV4(wcfg.Addresses), findV6(wcfg.Addresses)) + } + } cfg := peerConfigTableFromWGConfig(wcfg) - old := t.peerConfig.Swap(cfg) if !reflect.DeepEqual(old, cfg) { t.logf("peer config: %v", cfg) From ae5bc88ebea2f96f67e54ba6886c63ee0af14b54 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 22 Oct 2024 09:40:17 -0500 Subject: [PATCH 0073/1708] health: fix spurious warning about DERP home region '0' Updates #13650 Change-Id: I6b0f165f66da3f881a4caa25d2d9936dc2a7f22c Signed-off-by: Brad Fitzpatrick --- health/health.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/health/health.go b/health/health.go index 16b41f075..3bebcb983 100644 --- a/health/health.go +++ b/health/health.go @@ -1038,11 +1038,15 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { ArgDuration: d.Round(time.Second).String(), }) } - } else { + } else if homeDERP != 0 { t.setUnhealthyLocked(noDERPConnectionWarnable, Args{ ArgDERPRegionID: fmt.Sprint(homeDERP), ArgDERPRegionName: t.derpRegionNameLocked(homeDERP), }) + } else { + // No DERP home yet determined yet. There's probably some + // other problem or things are just starting up. + t.setHealthyLocked(noDERPConnectionWarnable) } if !t.ipnWantRunning { From b2665d9b89ee8c7be10a8e0a2fa36d35d21d8440 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Tue, 22 Oct 2024 14:17:48 -0500 Subject: [PATCH 0074/1708] net/netcheck: add a Now field to the netcheck Report This allows us to print the time that a netcheck was run, which is useful in debugging. Updates #10972 Signed-off-by: Andrew Dunham Change-Id: Id48d30d4eb6d5208efb2b1526a71d83fe7f9320b --- cmd/tailscale/cli/netcheck.go | 1 + net/netcheck/netcheck.go | 16 +++++++++------- net/netcheck/netcheck_test.go | 14 ++++++++++++++ 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 682cd99a3..312475ece 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -136,6 +136,7 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { } printf("\nReport:\n") + printf("\t* Time: %v\n", report.Now.Format(time.RFC3339Nano)) printf("\t* UDP: %v\n", report.UDP) if report.GlobalV4.IsValid() { printf("\t* IPv4: yes, %s\n", report.GlobalV4) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index bebf4c9b0..171483730 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -85,13 +85,14 @@ const ( // Report contains the result of a single netcheck. type Report struct { - UDP bool // a UDP STUN round trip completed - IPv6 bool // an IPv6 STUN round trip completed - IPv4 bool // an IPv4 STUN round trip completed - IPv6CanSend bool // an IPv6 packet was able to be sent - IPv4CanSend bool // an IPv4 packet was able to be sent - OSHasIPv6 bool // could bind a socket to ::1 - ICMPv4 bool // an ICMPv4 round trip completed + Now time.Time // the time the report was run + UDP bool // a UDP STUN round trip completed + IPv6 bool // an IPv6 STUN round trip completed + IPv4 bool // an IPv4 STUN round trip completed + IPv6CanSend bool // an IPv6 packet was able to be sent + IPv4CanSend bool // an IPv4 packet was able to be sent + OSHasIPv6 bool // could bind a socket to ::1 + ICMPv4 bool // an ICMPv4 round trip completed // MappingVariesByDestIP is whether STUN results depend which // STUN server you're talking to (on IPv4). @@ -1297,6 +1298,7 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report, c.prev = map[time.Time]*Report{} } now := c.timeNow() + r.Now = now.UTC() c.prev[now] = r c.last = r diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 964014203..2780c9c44 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -28,6 +28,9 @@ func newTestClient(t testing.TB) *Client { c := &Client{ NetMon: netmon.NewStatic(), Logf: t.Logf, + TimeNow: func() time.Time { + return time.Unix(1729624521, 0) + }, } return c } @@ -52,6 +55,9 @@ func TestBasic(t *testing.T) { if !r.UDP { t.Error("want UDP") } + if r.Now.IsZero() { + t.Error("Now is zero") + } if len(r.RegionLatency) != 1 { t.Errorf("expected 1 key in DERPLatency; got %+v", r.RegionLatency) } @@ -130,6 +136,14 @@ func TestWorksWhenUDPBlocked(t *testing.T) { want := newReport() + // The Now field can't be compared with reflect.DeepEqual; check using + // the Equal method and then overwrite it so that the comparison below + // succeeds. + if !r.Now.Equal(c.TimeNow()) { + t.Errorf("Now = %v; want %v", r.Now, c.TimeNow()) + } + want.Now = r.Now + // The IPv4CanSend flag gets set differently across platforms. // On Windows this test detects false, while on Linux detects true. // That's not relevant to this test, so just accept what we're From 212270463b2916938a06db251621b7d2f15b08fb Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Thu, 24 Oct 2024 09:41:54 -0500 Subject: [PATCH 0075/1708] cmd/testwrapper: add pkg runtime to output (#13894) Fixes #13893 Signed-off-by: Paul Scott --- cmd/testwrapper/testwrapper.go | 25 ++++++++++++++++--------- cmd/testwrapper/testwrapper_test.go | 6 +++++- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 9b8d7a7c1..f6ff8f00a 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -42,6 +42,7 @@ type testAttempt struct { testName string // "TestFoo" outcome string // "pass", "fail", "skip" logs bytes.Buffer + start, end time.Time isMarkedFlaky bool // set if the test is marked as flaky issueURL string // set if the test is marked as flaky @@ -132,11 +133,17 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te } pkg := goOutput.Package pkgTests := resultMap[pkg] + if pkgTests == nil { + pkgTests = make(map[string]*testAttempt) + resultMap[pkg] = pkgTests + } if goOutput.Test == "" { switch goOutput.Action { + case "start": + pkgTests[""] = &testAttempt{start: goOutput.Time} case "fail", "pass", "skip": for _, test := range pkgTests { - if test.outcome == "" { + if test.testName != "" && test.outcome == "" { test.outcome = "fail" ch <- test } @@ -144,15 +151,13 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te ch <- &testAttempt{ pkg: goOutput.Package, outcome: goOutput.Action, + start: pkgTests[""].start, + end: goOutput.Time, pkgFinished: true, } } continue } - if pkgTests == nil { - pkgTests = make(map[string]*testAttempt) - resultMap[pkg] = pkgTests - } testName := goOutput.Test if test, _, isSubtest := strings.Cut(goOutput.Test, "/"); isSubtest { testName = test @@ -168,8 +173,10 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te pkgTests[testName] = &testAttempt{ pkg: pkg, testName: testName, + start: goOutput.Time, } case "skip", "pass", "fail": + pkgTests[testName].end = goOutput.Time pkgTests[testName].outcome = goOutput.Action ch <- pkgTests[testName] case "output": @@ -213,7 +220,7 @@ func main() { firstRun.tests = append(firstRun.tests, &packageTests{Pattern: pkg}) } toRun := []*nextRun{firstRun} - printPkgOutcome := func(pkg, outcome string, attempt int) { + printPkgOutcome := func(pkg, outcome string, attempt int, runtime time.Duration) { if outcome == "skip" { fmt.Printf("?\t%s [skipped/no tests] \n", pkg) return @@ -225,10 +232,10 @@ func main() { outcome = "FAIL" } if attempt > 1 { - fmt.Printf("%s\t%s [attempt=%d]\n", outcome, pkg, attempt) + fmt.Printf("%s\t%s\t%.3fs\t[attempt=%d]\n", outcome, pkg, runtime.Seconds(), attempt) return } - fmt.Printf("%s\t%s\n", outcome, pkg) + fmt.Printf("%s\t%s\t%.3fs\n", outcome, pkg, runtime.Seconds()) } // Check for -coverprofile argument and filter it out @@ -307,7 +314,7 @@ func main() { // when a package times out. failed = true } - printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt) + printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start)) continue } if testingVerbose || tr.outcome == "fail" { diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index d7dbccd09..fb2ed2c52 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "sync" "testing" ) @@ -76,7 +77,10 @@ func TestFlakeRun(t *testing.T) { t.Fatalf("go run . %s: %s with output:\n%s", testfile, err, out) } - want := []byte("ok\t" + testfile + " [attempt=2]") + // Replace the unpredictable timestamp with "0.00s". + out = regexp.MustCompile(`\t\d+\.\d\d\ds\t`).ReplaceAll(out, []byte("\t0.00s\t")) + + want := []byte("ok\t" + testfile + "\t0.00s\t[attempt=2]") if !bytes.Contains(out, want) { t.Fatalf("wanted output containing %q but got:\n%s", want, out) } From 7fe6e508588c6359fc51b0221aa1c20ac39e3eaa Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 24 Oct 2024 11:43:22 -0500 Subject: [PATCH 0076/1708] net/dns/resolver: fix test flake Updates #13902 Signed-off-by: Andrew Dunham Change-Id: Ib2def19caad17367e9a31786ac969278e65f51c6 --- net/dns/resolver/forwarder_test.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index e341186ec..f3e592d4f 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -27,6 +27,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" + "tailscale.com/tstest" "tailscale.com/types/dnstype" ) @@ -276,6 +277,8 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on tb.Fatal("cannot skip both UDP and TCP servers") } + logf := tstest.WhileTestRunningLogger(tb) + tcpResponse := make([]byte, len(response)+2) binary.BigEndian.PutUint16(tcpResponse, uint16(len(response))) copy(tcpResponse[2:], response) @@ -329,13 +332,13 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on // Read the length header, then the buffer var length uint16 if err := binary.Read(conn, binary.BigEndian, &length); err != nil { - tb.Logf("error reading length header: %v", err) + logf("error reading length header: %v", err) return } req := make([]byte, length) n, err := io.ReadFull(conn, req) if err != nil { - tb.Logf("error reading query: %v", err) + logf("error reading query: %v", err) return } req = req[:n] @@ -343,7 +346,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on // Write response if _, err := conn.Write(tcpResponse); err != nil { - tb.Logf("error writing response: %v", err) + logf("error writing response: %v", err) return } } @@ -367,7 +370,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on handleUDP := func(addr netip.AddrPort, req []byte) { onRequest(false, req) if _, err := udpLn.WriteToUDPAddrPort(response, addr); err != nil { - tb.Logf("error writing response: %v", err) + logf("error writing response: %v", err) } } @@ -390,7 +393,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on tb.Cleanup(func() { tcpLn.Close() udpLn.Close() - tb.Logf("waiting for listeners to finish...") + logf("waiting for listeners to finish...") wg.Wait() }) return @@ -450,7 +453,8 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) } func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { - netMon, err := netmon.New(tb.Logf) + logf := tstest.WhileTestRunningLogger(tb) + netMon, err := netmon.New(logf) if err != nil { tb.Fatal(err) } @@ -458,7 +462,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) - fwd := newForwarder(tb.Logf, netMon, nil, &dialer, new(health.Tracker), nil) + fwd := newForwarder(logf, netMon, nil, &dialer, new(health.Tracker), nil) if modify != nil { modify(fwd) } From e815ae0ec4b718486af9be3a30d3058b65b28c4e Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Oct 2024 10:50:14 -0500 Subject: [PATCH 0077/1708] util/syspolicy, ipn/ipnlocal: update syspolicy package to utilize syspolicy/rsop In this PR, we update the syspolicy package to utilize syspolicy/rsop under the hood, and remove syspolicy.CachingHandler, syspolicy.windowsHandler and related code which is no longer used. We mark the syspolicy.Handler interface and RegisterHandler/SetHandlerForTest functions as deprecated, but keep them temporarily until they are no longer used in other repos. We also update the package to register setting definitions for all existing policy settings and to register the Registry-based, Windows-specific policy stores when running on Windows. Finally, we update existing internal and external tests to use the new API and add a few more tests and benchmarks. Updates #12687 Signed-off-by: Nick Khyl --- cmd/derper/depaware.txt | 15 +- cmd/k8s-operator/depaware.txt | 9 +- cmd/tailscale/depaware.txt | 10 +- cmd/tailscaled/depaware.txt | 9 +- ipn/ipnlocal/local_test.go | 225 ++++----------- util/syspolicy/caching_handler.go | 122 -------- util/syspolicy/caching_handler_test.go | 262 ----------------- util/syspolicy/handler.go | 114 +++++--- util/syspolicy/handler_test.go | 19 -- util/syspolicy/handler_windows.go | 105 ------- util/syspolicy/policy_keys.go | 103 ++++++- util/syspolicy/policy_keys_test.go | 95 +++++++ util/syspolicy/policy_keys_windows.go | 38 --- util/syspolicy/syspolicy.go | 152 +++++++--- util/syspolicy/syspolicy_test.go | 377 ++++++++++++++++++------- util/syspolicy/syspolicy_windows.go | 92 ++++++ 16 files changed, 822 insertions(+), 925 deletions(-) delete mode 100644 util/syspolicy/caching_handler.go delete mode 100644 util/syspolicy/caching_handler_test.go delete mode 100644 util/syspolicy/handler_test.go delete mode 100644 util/syspolicy/handler_windows.go create mode 100644 util/syspolicy/policy_keys_test.go delete mode 100644 util/syspolicy/policy_keys_windows.go create mode 100644 util/syspolicy/syspolicy_windows.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 362b07882..e20c4e556 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -164,11 +164,16 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy from tailscale.com/ipn tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ + tailscale.com/util/testenv from tailscale.com/util/syspolicy+ tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/derp+ tailscale.com/version/distro from tailscale.com/envknob+ @@ -189,7 +194,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ W golang.org/x/exp/constraints from tailscale.com/util/winutil - golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http @@ -250,7 +255,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa encoding/pem from crypto/tls+ errors from bufio+ expvar from github.com/prometheus/client_golang/prometheus+ - flag from tailscale.com/cmd/derper + flag from tailscale.com/cmd/derper+ fmt from compress/flate+ go/token from google.golang.org/protobuf/internal/strs hash from crypto+ @@ -284,7 +289,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa os from crypto/rand+ os/exec from github.com/coreos/go-iptables/iptables+ os/signal from tailscale.com/cmd/derper - W os/user from tailscale.com/util/winutil + W os/user from tailscale.com/util/winutil+ path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 19d6808d7..2ad3978c9 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -812,8 +812,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ @@ -823,7 +826,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns + W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 765bbc483..cce76a81e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -174,14 +174,18 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/ipn tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy - tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ + tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate + W 💣 tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/client/web+ tailscale.com/version/distro from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 26165d659..b3a4aa86f 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -401,8 +401,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ @@ -412,7 +415,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns + W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 9a8fa5e02..5fee5d00e 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -54,6 +54,8 @@ import ( "tailscale.com/util/must" "tailscale.com/util/set" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/wgcfg" @@ -1559,94 +1561,6 @@ func dnsResponse(domain, address string) []byte { return must.Get(b.Finish()) } -type errorSyspolicyHandler struct { - t *testing.T - err error - key syspolicy.Key - allowKeys map[syspolicy.Key]*string -} - -func (h *errorSyspolicyHandler) ReadString(key string) (string, error) { - sk := syspolicy.Key(key) - if _, ok := h.allowKeys[sk]; !ok { - h.t.Errorf("ReadString: %q is not in list of permitted keys", h.key) - } - if sk == h.key { - return "", h.err - } - return "", syspolicy.ErrNoSuchKey -} - -func (h *errorSyspolicyHandler) ReadUInt64(key string) (uint64, error) { - h.t.Errorf("ReadUInt64(%q) unexpectedly called", key) - return 0, syspolicy.ErrNoSuchKey -} - -func (h *errorSyspolicyHandler) ReadBoolean(key string) (bool, error) { - h.t.Errorf("ReadBoolean(%q) unexpectedly called", key) - return false, syspolicy.ErrNoSuchKey -} - -func (h *errorSyspolicyHandler) ReadStringArray(key string) ([]string, error) { - h.t.Errorf("ReadStringArray(%q) unexpectedly called", key) - return nil, syspolicy.ErrNoSuchKey -} - -type mockSyspolicyHandler struct { - t *testing.T - // stringPolicies is the collection of policies that we expect to see - // queried by the current test. If the policy is expected but unset, then - // use nil, otherwise use a string equal to the policy's desired value. - stringPolicies map[syspolicy.Key]*string - // stringArrayPolicies is the collection of policies that we expected to see - // queries by the current test, that return policy string arrays. - stringArrayPolicies map[syspolicy.Key][]string - // failUnknownPolicies is set if policies other than those in stringPolicies - // (uint64 or bool policies are not supported by mockSyspolicyHandler yet) - // should be considered a test failure if they are queried. - failUnknownPolicies bool -} - -func (h *mockSyspolicyHandler) ReadString(key string) (string, error) { - if s, ok := h.stringPolicies[syspolicy.Key(key)]; ok { - if s == nil { - return "", syspolicy.ErrNoSuchKey - } - return *s, nil - } - if h.failUnknownPolicies { - h.t.Errorf("ReadString(%q) unexpectedly called", key) - } - return "", syspolicy.ErrNoSuchKey -} - -func (h *mockSyspolicyHandler) ReadUInt64(key string) (uint64, error) { - if h.failUnknownPolicies { - h.t.Errorf("ReadUInt64(%q) unexpectedly called", key) - } - return 0, syspolicy.ErrNoSuchKey -} - -func (h *mockSyspolicyHandler) ReadBoolean(key string) (bool, error) { - if h.failUnknownPolicies { - h.t.Errorf("ReadBoolean(%q) unexpectedly called", key) - } - return false, syspolicy.ErrNoSuchKey -} - -func (h *mockSyspolicyHandler) ReadStringArray(key string) ([]string, error) { - if h.failUnknownPolicies { - h.t.Errorf("ReadStringArray(%q) unexpectedly called", key) - } - if s, ok := h.stringArrayPolicies[syspolicy.Key(key)]; ok { - if s == nil { - return []string{}, syspolicy.ErrNoSuchKey - } - return s, nil - } - return nil, syspolicy.ErrNoSuchKey -} - func TestSetExitNodeIDPolicy(t *testing.T) { pfx := netip.MustParsePrefix tests := []struct { @@ -1856,23 +1770,18 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }, } + syspolicy.RegisterWellKnownSettingsForTest(t) + for _, test := range tests { t.Run(test.name, func(t *testing.T) { b := newTestBackend(t) - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: map[syspolicy.Key]*string{ - syspolicy.ExitNodeID: nil, - syspolicy.ExitNodeIP: nil, - }, - } - if test.exitNodeIDKey { - msh.stringPolicies[syspolicy.ExitNodeID] = &test.exitNodeID - } - if test.exitNodeIPKey { - msh.stringPolicies[syspolicy.ExitNodeIP] = &test.exitNodeIP - } - syspolicy.SetHandlerForTest(t, msh) + + policyStore := source.NewTestStoreOf(t, + source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID), + source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP), + ) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + if test.nm == nil { test.nm = new(netmap.NetworkMap) } @@ -1994,13 +1903,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { report: report, }, } - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: map[syspolicy.Key]*string{ - syspolicy.ExitNodeID: ptr.To("auto:any"), - }, - } - syspolicy.SetHandlerForTest(t, msh) + + syspolicy.RegisterWellKnownSettingsForTest(t) + policyStore := source.NewTestStoreOf(t, source.TestSettingOf( + syspolicy.ExitNodeID, "auto:any", + )) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := newTestLocalBackend(t) @@ -2049,13 +1958,11 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { } cc = newClient(t, opts) b.cc = cc - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: map[syspolicy.Key]*string{ - syspolicy.ExitNodeID: ptr.To("auto:any"), - }, - } - syspolicy.SetHandlerForTest(t, msh) + syspolicy.RegisterWellKnownSettingsForTest(t) + policyStore := source.NewTestStoreOf(t, source.TestSettingOf( + syspolicy.ExitNodeID, "auto:any", + )) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes()) peer2 := makePeer(2, withCap(26), withDERP(2), withSuggest(), withExitRoutes()) selfNode := tailcfg.Node{ @@ -2160,13 +2067,11 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { DERPMap: derpMap, } b := newTestLocalBackend(t) - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: map[syspolicy.Key]*string{ - syspolicy.ExitNodeID: ptr.To("auto:any"), - }, - } - syspolicy.SetHandlerForTest(t, msh) + syspolicy.RegisterWellKnownSettingsForTest(t) + policyStore := source.NewTestStoreOf(t, source.TestSettingOf( + syspolicy.ExitNodeID, "auto:any", + )) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) b.netMap = nm b.lastSuggestedExitNode = peer1.StableID() b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report) @@ -2400,17 +2305,16 @@ func TestApplySysPolicy(t *testing.T) { }, } + syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: make(map[syspolicy.Key]*string, len(tt.stringPolicies)), - } + settings := make([]source.TestSetting[string], 0, len(tt.stringPolicies)) for p, v := range tt.stringPolicies { - v := v // construct a unique pointer for each policy value - msh.stringPolicies[p] = &v + settings = append(settings, source.TestSettingOf(p, v)) } - syspolicy.SetHandlerForTest(t, msh) + policyStore := source.NewTestStoreOf(t, settings...) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() @@ -2546,35 +2450,19 @@ func TestPreferencePolicyInfo(t *testing.T) { }, } + syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, pp := range preferencePolicies { t.Run(string(pp.key), func(t *testing.T) { - var h syspolicy.Handler - - allPolicies := make(map[syspolicy.Key]*string, len(preferencePolicies)+1) - allPolicies[syspolicy.ControlURL] = nil - for _, pp := range preferencePolicies { - allPolicies[pp.key] = nil + s := source.TestSetting[string]{ + Key: pp.key, + Error: tt.policyError, + Value: tt.policyValue, } - - if tt.policyError != nil { - h = &errorSyspolicyHandler{ - t: t, - err: tt.policyError, - key: pp.key, - allowKeys: allPolicies, - } - } else { - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: allPolicies, - failUnknownPolicies: true, - } - msh.stringPolicies[pp.key] = &tt.policyValue - h = msh - } - syspolicy.SetHandlerForTest(t, h) + policyStore := source.NewTestStoreOf(t, s) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) @@ -3825,15 +3713,16 @@ func TestShouldAutoExitNode(t *testing.T) { expectedBool: false, }, } + + syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - msh := &mockSyspolicyHandler{ - t: t, - stringPolicies: map[syspolicy.Key]*string{ - syspolicy.ExitNodeID: ptr.To(tt.exitNodeIDPolicyValue), - }, - } - syspolicy.SetHandlerForTest(t, msh) + policyStore := source.NewTestStoreOf(t, source.TestSettingOf( + syspolicy.ExitNodeID, tt.exitNodeIDPolicyValue, + )) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + got := shouldAutoExitNode() if got != tt.expectedBool { t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue) @@ -3971,17 +3860,13 @@ func TestFillAllowedSuggestions(t *testing.T) { want: []tailcfg.StableNodeID{"ABC", "def", "gHiJ"}, }, } + syspolicy.RegisterWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mh := mockSyspolicyHandler{ - t: t, - } - if tt.allowPolicy != nil { - mh.stringArrayPolicies = map[syspolicy.Key][]string{ - syspolicy.AllowedSuggestedExitNodes: tt.allowPolicy, - } - } - syspolicy.SetHandlerForTest(t, &mh) + policyStore := source.NewTestStoreOf(t, source.TestSettingOf( + syspolicy.AllowedSuggestedExitNodes, tt.allowPolicy, + )) + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) got := fillAllowedSuggestions() if got == nil { diff --git a/util/syspolicy/caching_handler.go b/util/syspolicy/caching_handler.go deleted file mode 100644 index 5192958bc..000000000 --- a/util/syspolicy/caching_handler.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import ( - "errors" - "sync" -) - -// CachingHandler is a handler that reads policies from an underlying handler the first time each key is requested -// and permanently caches the result unless there is an error. If there is an ErrNoSuchKey error, that result is cached, -// otherwise the actual error is returned and the next read for that key will retry using the handler. -type CachingHandler struct { - mu sync.Mutex - strings map[string]string - uint64s map[string]uint64 - bools map[string]bool - strArrs map[string][]string - notFound map[string]bool - handler Handler -} - -// NewCachingHandler creates a CachingHandler given a handler. -func NewCachingHandler(handler Handler) *CachingHandler { - return &CachingHandler{ - handler: handler, - strings: make(map[string]string), - uint64s: make(map[string]uint64), - bools: make(map[string]bool), - strArrs: make(map[string][]string), - notFound: make(map[string]bool), - } -} - -// ReadString reads the policy settings value string given the key. -// ReadString first reads from the handler's cache before resorting to using the handler. -func (ch *CachingHandler) ReadString(key string) (string, error) { - ch.mu.Lock() - defer ch.mu.Unlock() - if val, ok := ch.strings[key]; ok { - return val, nil - } - if notFound := ch.notFound[key]; notFound { - return "", ErrNoSuchKey - } - val, err := ch.handler.ReadString(key) - if errors.Is(err, ErrNoSuchKey) { - ch.notFound[key] = true - return "", err - } else if err != nil { - return "", err - } - ch.strings[key] = val - return val, nil -} - -// ReadUInt64 reads the policy settings uint64 value given the key. -// ReadUInt64 first reads from the handler's cache before resorting to using the handler. -func (ch *CachingHandler) ReadUInt64(key string) (uint64, error) { - ch.mu.Lock() - defer ch.mu.Unlock() - if val, ok := ch.uint64s[key]; ok { - return val, nil - } - if notFound := ch.notFound[key]; notFound { - return 0, ErrNoSuchKey - } - val, err := ch.handler.ReadUInt64(key) - if errors.Is(err, ErrNoSuchKey) { - ch.notFound[key] = true - return 0, err - } else if err != nil { - return 0, err - } - ch.uint64s[key] = val - return val, nil -} - -// ReadBoolean reads the policy settings boolean value given the key. -// ReadBoolean first reads from the handler's cache before resorting to using the handler. -func (ch *CachingHandler) ReadBoolean(key string) (bool, error) { - ch.mu.Lock() - defer ch.mu.Unlock() - if val, ok := ch.bools[key]; ok { - return val, nil - } - if notFound := ch.notFound[key]; notFound { - return false, ErrNoSuchKey - } - val, err := ch.handler.ReadBoolean(key) - if errors.Is(err, ErrNoSuchKey) { - ch.notFound[key] = true - return false, err - } else if err != nil { - return false, err - } - ch.bools[key] = val - return val, nil -} - -// ReadBoolean reads the policy settings boolean value given the key. -// ReadBoolean first reads from the handler's cache before resorting to using the handler. -func (ch *CachingHandler) ReadStringArray(key string) ([]string, error) { - ch.mu.Lock() - defer ch.mu.Unlock() - if val, ok := ch.strArrs[key]; ok { - return val, nil - } - if notFound := ch.notFound[key]; notFound { - return nil, ErrNoSuchKey - } - val, err := ch.handler.ReadStringArray(key) - if errors.Is(err, ErrNoSuchKey) { - ch.notFound[key] = true - return nil, err - } else if err != nil { - return nil, err - } - ch.strArrs[key] = val - return val, nil -} diff --git a/util/syspolicy/caching_handler_test.go b/util/syspolicy/caching_handler_test.go deleted file mode 100644 index 881f6ff83..000000000 --- a/util/syspolicy/caching_handler_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import ( - "testing" -) - -func TestHandlerReadString(t *testing.T) { - tests := []struct { - name string - key string - handlerKey Key - handlerValue string - handlerError error - preserveHandler bool - wantValue string - wantErr error - strings map[string]string - expectedCalls int - }{ - { - name: "read existing cached values", - key: "test", - handlerKey: "do not read", - strings: map[string]string{"test": "foo"}, - wantValue: "foo", - expectedCalls: 0, - }, - { - name: "read existing values not cached", - key: "test", - handlerKey: "test", - handlerValue: "foo", - wantValue: "foo", - expectedCalls: 1, - }, - { - name: "error no such key", - key: "test", - handlerKey: "test", - handlerError: ErrNoSuchKey, - wantErr: ErrNoSuchKey, - expectedCalls: 1, - }, - { - name: "other error", - key: "test", - handlerKey: "test", - handlerError: someOtherError, - wantErr: someOtherError, - preserveHandler: true, - expectedCalls: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - testHandler := &testHandler{ - t: t, - key: tt.handlerKey, - s: tt.handlerValue, - err: tt.handlerError, - } - cache := NewCachingHandler(testHandler) - if tt.strings != nil { - cache.strings = tt.strings - } - got, err := cache.ReadString(tt.key) - if err != tt.wantErr { - t.Errorf("err=%v want %v", err, tt.wantErr) - } - if got != tt.wantValue { - t.Errorf("got %v want %v", got, cache.strings[tt.key]) - } - if !tt.preserveHandler { - testHandler.key, testHandler.s, testHandler.err = "do not read", "", nil - } - got, err = cache.ReadString(tt.key) - if err != tt.wantErr { - t.Errorf("repeat err=%v want %v", err, tt.wantErr) - } - if got != tt.wantValue { - t.Errorf("repeat got %v want %v", got, cache.strings[tt.key]) - } - if testHandler.calls != tt.expectedCalls { - t.Errorf("calls=%v want %v", testHandler.calls, tt.expectedCalls) - } - }) - } -} - -func TestHandlerReadUint64(t *testing.T) { - tests := []struct { - name string - key string - handlerKey Key - handlerValue uint64 - handlerError error - preserveHandler bool - wantValue uint64 - wantErr error - uint64s map[string]uint64 - expectedCalls int - }{ - { - name: "read existing cached values", - key: "test", - handlerKey: "do not read", - uint64s: map[string]uint64{"test": 1}, - wantValue: 1, - expectedCalls: 0, - }, - { - name: "read existing values not cached", - key: "test", - handlerKey: "test", - handlerValue: 1, - wantValue: 1, - expectedCalls: 1, - }, - { - name: "error no such key", - key: "test", - handlerKey: "test", - handlerError: ErrNoSuchKey, - wantErr: ErrNoSuchKey, - expectedCalls: 1, - }, - { - name: "other error", - key: "test", - handlerKey: "test", - handlerError: someOtherError, - wantErr: someOtherError, - preserveHandler: true, - expectedCalls: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - testHandler := &testHandler{ - t: t, - key: tt.handlerKey, - u64: tt.handlerValue, - err: tt.handlerError, - } - cache := NewCachingHandler(testHandler) - if tt.uint64s != nil { - cache.uint64s = tt.uint64s - } - got, err := cache.ReadUInt64(tt.key) - if err != tt.wantErr { - t.Errorf("err=%v want %v", err, tt.wantErr) - } - if got != tt.wantValue { - t.Errorf("got %v want %v", got, cache.strings[tt.key]) - } - if !tt.preserveHandler { - testHandler.key, testHandler.s, testHandler.err = "do not read", "", nil - } - got, err = cache.ReadUInt64(tt.key) - if err != tt.wantErr { - t.Errorf("repeat err=%v want %v", err, tt.wantErr) - } - if got != tt.wantValue { - t.Errorf("repeat got %v want %v", got, cache.strings[tt.key]) - } - if testHandler.calls != tt.expectedCalls { - t.Errorf("calls=%v want %v", testHandler.calls, tt.expectedCalls) - } - }) - } - -} - -func TestHandlerReadBool(t *testing.T) { - tests := []struct { - name string - key string - handlerKey Key - handlerValue bool - handlerError error - preserveHandler bool - wantValue bool - wantErr error - bools map[string]bool - expectedCalls int - }{ - { - name: "read existing cached values", - key: "test", - handlerKey: "do not read", - bools: map[string]bool{"test": true}, - wantValue: true, - expectedCalls: 0, - }, - { - name: "read existing values not cached", - key: "test", - handlerKey: "test", - handlerValue: true, - wantValue: true, - expectedCalls: 1, - }, - { - name: "error no such key", - key: "test", - handlerKey: "test", - handlerError: ErrNoSuchKey, - wantErr: ErrNoSuchKey, - expectedCalls: 1, - }, - { - name: "other error", - key: "test", - handlerKey: "test", - handlerError: someOtherError, - wantErr: someOtherError, - preserveHandler: true, - expectedCalls: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - testHandler := &testHandler{ - t: t, - key: tt.handlerKey, - b: tt.handlerValue, - err: tt.handlerError, - } - cache := NewCachingHandler(testHandler) - if tt.bools != nil { - cache.bools = tt.bools - } - got, err := cache.ReadBoolean(tt.key) - if err != tt.wantErr { - t.Errorf("err=%v want %v", err, tt.wantErr) - } - if got != tt.wantValue { - t.Errorf("got %v want %v", got, cache.strings[tt.key]) - } - if !tt.preserveHandler { - testHandler.key, testHandler.s, testHandler.err = "do not read", "", nil - } - got, err = cache.ReadBoolean(tt.key) - if err != tt.wantErr { - t.Errorf("repeat err=%v want %v", err, tt.wantErr) - } - if got != tt.wantValue { - t.Errorf("repeat got %v want %v", got, cache.strings[tt.key]) - } - if testHandler.calls != tt.expectedCalls { - t.Errorf("calls=%v want %v", testHandler.calls, tt.expectedCalls) - } - }) - } - -} diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go index f1fad9770..f511f0a56 100644 --- a/util/syspolicy/handler.go +++ b/util/syspolicy/handler.go @@ -4,16 +4,17 @@ package syspolicy import ( - "errors" - "sync/atomic" + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" ) -var ( - handlerUsed atomic.Bool - handler Handler = defaultHandler{} -) +// TODO(nickkhyl): delete this file once other repos are updated. // Handler reads system policies from OS-specific storage. +// +// Deprecated: implementing a [source.Store] should be preferred. type Handler interface { // ReadString reads the policy setting's string value for the given key. // It should return ErrNoSuchKey if the key does not have a value set. @@ -29,55 +30,88 @@ type Handler interface { ReadStringArray(key string) ([]string, error) } -// ErrNoSuchKey is returned by a Handler when the specified key does not have a -// value set. -var ErrNoSuchKey = errors.New("no such key") +// RegisterHandler wraps and registers the specified handler as the device's +// policy [source.Store] for the program's lifetime. +// +// Deprecated: using [RegisterStore] should be preferred. +func RegisterHandler(h Handler) { + rsop.RegisterStore("DeviceHandler", setting.DeviceScope, WrapHandler(h)) +} -// defaultHandler is the catch all syspolicy type for anything that isn't windows or apple. -type defaultHandler struct{} +// TB is a subset of testing.TB that we use to set up test helpers. +// It's defined here to avoid pulling in the testing package. +type TB = internal.TB -func (defaultHandler) ReadString(_ string) (string, error) { - return "", ErrNoSuchKey +// SetHandlerForTest wraps and sets the specified handler as the device's policy +// [source.Store] for the duration of tb. +// +// Deprecated: using [MustRegisterStoreForTest] should be preferred. +func SetHandlerForTest(tb TB, h Handler) { + RegisterWellKnownSettingsForTest(tb) + MustRegisterStoreForTest(tb, "DeviceHandler-TestOnly", setting.DefaultScope(), WrapHandler(h)) } -func (defaultHandler) ReadUInt64(_ string) (uint64, error) { - return 0, ErrNoSuchKey +var _ source.Store = (*handlerStore)(nil) + +// handlerStore is a [source.Store] that calls the underlying [Handler]. +// +// TODO(nickkhyl): remove it when the corp and android repos are updated. +type handlerStore struct { + h Handler } -func (defaultHandler) ReadBoolean(_ string) (bool, error) { - return false, ErrNoSuchKey +// WrapHandler returns a [source.Store] that wraps the specified [Handler]. +func WrapHandler(h Handler) source.Store { + return handlerStore{h} } -func (defaultHandler) ReadStringArray(_ string) ([]string, error) { - return nil, ErrNoSuchKey +// Lock implements [source.Lockable]. +func (s handlerStore) Lock() error { + if lockable, ok := s.h.(source.Lockable); ok { + return lockable.Lock() + } + return nil } -// markHandlerInUse is called before handler methods are called. -func markHandlerInUse() { - handlerUsed.Store(true) +// Unlock implements [source.Lockable]. +func (s handlerStore) Unlock() { + if lockable, ok := s.h.(source.Lockable); ok { + lockable.Unlock() + } } -// RegisterHandler initializes the policy handler and ensures registration will happen once. -func RegisterHandler(h Handler) { - // Technically this assignment is not concurrency safe, but in the - // event that there was any risk of a data race, we will panic due to - // the CompareAndSwap failing. - handler = h - if !handlerUsed.CompareAndSwap(false, true) { - panic("handler was already used before registration") +// RegisterChangeCallback implements [source.Changeable]. +func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func(), err error) { + if changeable, ok := s.h.(source.Changeable); ok { + return changeable.RegisterChangeCallback(callback) } + return func() {}, nil } -// TB is a subset of testing.TB that we use to set up test helpers. -// It's defined here to avoid pulling in the testing package. -type TB interface { - Helper() - Cleanup(func()) +// ReadString implements [source.Store]. +func (s handlerStore) ReadString(key setting.Key) (string, error) { + return s.h.ReadString(string(key)) } -func SetHandlerForTest(tb TB, h Handler) { - tb.Helper() - oldHandler := handler - handler = h - tb.Cleanup(func() { handler = oldHandler }) +// ReadUInt64 implements [source.Store]. +func (s handlerStore) ReadUInt64(key setting.Key) (uint64, error) { + return s.h.ReadUInt64(string(key)) +} + +// ReadBoolean implements [source.Store]. +func (s handlerStore) ReadBoolean(key setting.Key) (bool, error) { + return s.h.ReadBoolean(string(key)) +} + +// ReadStringArray implements [source.Store]. +func (s handlerStore) ReadStringArray(key setting.Key) ([]string, error) { + return s.h.ReadStringArray(string(key)) +} + +// Done implements [source.Expirable]. +func (s handlerStore) Done() <-chan struct{} { + if expirable, ok := s.h.(source.Expirable); ok { + return expirable.Done() + } + return nil } diff --git a/util/syspolicy/handler_test.go b/util/syspolicy/handler_test.go deleted file mode 100644 index 39b18936f..000000000 --- a/util/syspolicy/handler_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import "testing" - -func TestDefaultHandlerReadValues(t *testing.T) { - var h defaultHandler - - got, err := h.ReadString(string(AdminConsoleVisibility)) - if got != "" || err != ErrNoSuchKey { - t.Fatalf("got %v err %v", got, err) - } - result, err := h.ReadUInt64(string(LogSCMInteractions)) - if result != 0 || err != ErrNoSuchKey { - t.Fatalf("got %v err %v", result, err) - } -} diff --git a/util/syspolicy/handler_windows.go b/util/syspolicy/handler_windows.go deleted file mode 100644 index 661853ead..000000000 --- a/util/syspolicy/handler_windows.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import ( - "errors" - "fmt" - - "tailscale.com/util/clientmetric" - "tailscale.com/util/winutil" -) - -var ( - windowsErrors = clientmetric.NewCounter("windows_syspolicy_errors") - windowsAny = clientmetric.NewGauge("windows_syspolicy_any") -) - -type windowsHandler struct{} - -func init() { - RegisterHandler(NewCachingHandler(windowsHandler{})) - - keyList := []struct { - isSet func(Key) bool - keys []Key - }{ - { - isSet: func(k Key) bool { - _, err := handler.ReadString(string(k)) - return err == nil - }, - keys: stringKeys, - }, - { - isSet: func(k Key) bool { - _, err := handler.ReadBoolean(string(k)) - return err == nil - }, - keys: boolKeys, - }, - { - isSet: func(k Key) bool { - _, err := handler.ReadUInt64(string(k)) - return err == nil - }, - keys: uint64Keys, - }, - } - - var anySet bool - for _, l := range keyList { - for _, k := range l.keys { - if !l.isSet(k) { - continue - } - clientmetric.NewGauge(fmt.Sprintf("windows_syspolicy_%s", k)).Set(1) - anySet = true - } - } - if anySet { - windowsAny.Set(1) - } -} - -func (windowsHandler) ReadString(key string) (string, error) { - s, err := winutil.GetPolicyString(key) - if errors.Is(err, winutil.ErrNoValue) { - err = ErrNoSuchKey - } else if err != nil { - windowsErrors.Add(1) - } - - return s, err -} - -func (windowsHandler) ReadUInt64(key string) (uint64, error) { - value, err := winutil.GetPolicyInteger(key) - if errors.Is(err, winutil.ErrNoValue) { - err = ErrNoSuchKey - } else if err != nil { - windowsErrors.Add(1) - } - return value, err -} - -func (windowsHandler) ReadBoolean(key string) (bool, error) { - value, err := winutil.GetPolicyInteger(key) - if errors.Is(err, winutil.ErrNoValue) { - err = ErrNoSuchKey - } else if err != nil { - windowsErrors.Add(1) - } - return value != 0, err -} - -func (windowsHandler) ReadStringArray(key string) ([]string, error) { - value, err := winutil.GetPolicyStringArray(key) - if errors.Is(err, winutil.ErrNoValue) { - err = ErrNoSuchKey - } else if err != nil { - windowsErrors.Add(1) - } - return value, err -} diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ec0556a94..162885b27 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -3,10 +3,24 @@ package syspolicy -import "tailscale.com/util/syspolicy/setting" +import ( + "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/testenv" +) +// Key is a string that uniquely identifies a policy and must remain unchanged +// once established and documented for a given policy setting. It may contain +// alphanumeric characters and zero or more [KeyPathSeparator]s to group +// individual policy settings into categories. type Key = setting.Key +// The const block below lists known policy keys. +// When adding a key to this list, remember to add a corresponding +// [setting.Definition] to [implicitDefinitions] below. +// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. + const ( // Keys with a string value ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. @@ -110,3 +124,90 @@ const ( // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" ) + +// implicitDefinitions is a list of [setting.Definition] that will be registered +// automatically when the policy setting definitions are first used by the syspolicy package hierarchy. +// This includes the first time a policy needs to be read from any source. +var implicitDefinitions = []*setting.Definition{ + // Device policy settings (can only be configured on a per-device basis): + setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(ControlURL, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(Tailnet, setting.DeviceSetting, setting.StringValue), + + // User policy settings (can be configured on a user- or device-basis): + setting.NewDefinition(AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), + setting.NewDefinition(ManagedByCaption, setting.UserSetting, setting.StringValue), + setting.NewDefinition(ManagedByOrganizationName, setting.UserSetting, setting.StringValue), + setting.NewDefinition(ManagedByURL, setting.UserSetting, setting.StringValue), + setting.NewDefinition(NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), +} + +func init() { + internal.Init.MustDefer(func() error { + // Avoid implicit [setting.Definition] registration during tests. + // Each test should control which policy settings to register. + // Use [setting.SetDefinitionsForTest] to specify necessary definitions, + // or [setWellKnownSettingsForTest] to set implicit definitions for the test duration. + if testenv.InTest() { + return nil + } + for _, d := range implicitDefinitions { + setting.RegisterDefinition(d) + } + return nil + }) +} + +var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap] + +// WellKnownSettingDefinition returns a well-known, implicit setting definition by its key, +// or an [ErrNoSuchKey] if a policy setting with the specified key does not exist +// among implicit policy definitions. +func WellKnownSettingDefinition(k Key) (*setting.Definition, error) { + m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) { + return setting.DefinitionMapOf(implicitDefinitions) + }) + if err != nil { + return nil, err + } + if d, ok := m[k]; ok { + return d, nil + } + return nil, ErrNoSuchKey +} + +// RegisterWellKnownSettingsForTest registers all implicit setting definitions +// for the duration of the test. +func RegisterWellKnownSettingsForTest(tb TB) { + tb.Helper() + err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) + if err != nil { + tb.Fatalf("Failed to register well-known settings: %v", err) + } +} diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go new file mode 100644 index 000000000..4d3260f3e --- /dev/null +++ b/util/syspolicy/policy_keys_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package syspolicy + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "os" + "reflect" + "strconv" + "testing" + + "tailscale.com/util/syspolicy/setting" +) + +func TestKnownKeysRegistered(t *testing.T) { + keyConsts, err := listStringConsts[Key]("policy_keys.go") + if err != nil { + t.Fatalf("listStringConsts failed: %v", err) + } + + m, err := setting.DefinitionMapOf(implicitDefinitions) + if err != nil { + t.Fatalf("definitionMapOf failed: %v", err) + } + + for _, key := range keyConsts { + t.Run(string(key), func(t *testing.T) { + d := m[key] + if d == nil { + t.Fatalf("%q was not registered", key) + } + if d.Key() != key { + t.Fatalf("d.Key got: %s, want %s", d.Key(), key) + } + }) + } +} + +func TestNotAWellKnownSetting(t *testing.T) { + d, err := WellKnownSettingDefinition("TestSettingDoesNotExist") + if d != nil || err == nil { + t.Fatalf("got %v, %v; want nil, %v", d, err, ErrNoSuchKey) + } +} + +func listStringConsts[T ~string](filename string) (map[string]T, error) { + fset := token.NewFileSet() + src, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + f, err := parser.ParseFile(fset, filename, src, 0) + if err != nil { + return nil, err + } + + consts := make(map[string]T) + typeName := reflect.TypeFor[T]().Name() + for _, d := range f.Decls { + g, ok := d.(*ast.GenDecl) + if !ok || g.Tok != token.CONST { + continue + } + + for _, s := range g.Specs { + vs, ok := s.(*ast.ValueSpec) + if !ok || len(vs.Names) != len(vs.Values) { + continue + } + if typ, ok := vs.Type.(*ast.Ident); !ok || typ.Name != typeName { + continue + } + + for i, n := range vs.Names { + lit, ok := vs.Values[i].(*ast.BasicLit) + if !ok { + return nil, fmt.Errorf("unexpected string literal: %v = %v", n.Name, types.ExprString(vs.Values[i])) + } + val, err := strconv.Unquote(lit.Value) + if err != nil { + return nil, fmt.Errorf("unexpected string literal: %v = %v", n.Name, lit.Value) + } + consts[n.Name] = T(val) + } + } + } + + return consts, nil +} diff --git a/util/syspolicy/policy_keys_windows.go b/util/syspolicy/policy_keys_windows.go deleted file mode 100644 index 5e9a71695..000000000 --- a/util/syspolicy/policy_keys_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -var stringKeys = []Key{ - ControlURL, - LogTarget, - Tailnet, - ExitNodeID, - ExitNodeIP, - EnableIncomingConnections, - EnableServerMode, - ExitNodeAllowLANAccess, - EnableTailscaleDNS, - EnableTailscaleSubnets, - AdminConsoleVisibility, - NetworkDevicesVisibility, - TestMenuVisibility, - UpdateMenuVisibility, - RunExitNodeVisibility, - PreferencesMenuVisibility, - ExitNodeMenuVisibility, - AutoUpdateVisibility, - ResetToDefaultsVisibility, - KeyExpirationNoticeTime, - PostureChecking, - ManagedByOrganizationName, - ManagedByCaption, - ManagedByURL, -} - -var boolKeys = []Key{ - LogSCMInteractions, - FlushDNSOnSessionUnlock, -} - -var uint64Keys = []Key{} diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index abe42ed90..d925731c3 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -1,51 +1,82 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package syspolicy provides functions to retrieve system settings of a device. +// Package syspolicy facilitates retrieval of the current policy settings +// applied to the device or user and receiving notifications when the policy +// changes. +// +// It provides functions that return specific policy settings by their unique +// [setting.Key]s, such as [GetBoolean], [GetUint64], [GetString], +// [GetStringArray], [GetPreferenceOption], [GetVisibility] and [GetDuration]. package syspolicy import ( "errors" + "fmt" + "reflect" "time" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" ) -func GetString(key Key, defaultValue string) (string, error) { - markHandlerInUse() - v, err := handler.ReadString(string(key)) - if errors.Is(err, ErrNoSuchKey) { - return defaultValue, nil +var ( + // ErrNotConfigured is returned when the requested policy setting is not configured. + ErrNotConfigured = setting.ErrNotConfigured + // ErrTypeMismatch is returned when there's a type mismatch between the actual type + // of the setting value and the expected type. + ErrTypeMismatch = setting.ErrTypeMismatch + // ErrNoSuchKey is returned by [setting.DefinitionOf] when no policy setting + // has been registered with the specified key. + // + // This error is also returned by a (now deprecated) [Handler] when the specified + // key does not have a value set. While the package maintains compatibility with this + // usage of ErrNoSuchKey, it is recommended to return [ErrNotConfigured] from newer + // [source.Store] implementations. + ErrNoSuchKey = setting.ErrNoSuchKey +) + +// RegisterStore registers a new policy [source.Store] with the specified name and [setting.PolicyScope]. +// +// It is a shorthand for [rsop.RegisterStore]. +func RegisterStore(name string, scope setting.PolicyScope, store source.Store) (*rsop.StoreRegistration, error) { + return rsop.RegisterStore(name, scope, store) +} + +// MustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. +func MustRegisterStoreForTest(tb TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { + tb.Helper() + reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) + if err != nil { + tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) } - return v, err + return reg +} + +// GetString returns a string policy setting with the specified key, +// or defaultValue if it does not exist. +func GetString(key Key, defaultValue string) (string, error) { + return getCurrentPolicySettingValue(key, defaultValue) } +// GetUint64 returns a numeric policy setting with the specified key, +// or defaultValue if it does not exist. func GetUint64(key Key, defaultValue uint64) (uint64, error) { - markHandlerInUse() - v, err := handler.ReadUInt64(string(key)) - if errors.Is(err, ErrNoSuchKey) { - return defaultValue, nil - } - return v, err + return getCurrentPolicySettingValue(key, defaultValue) } +// GetBoolean returns a boolean policy setting with the specified key, +// or defaultValue if it does not exist. func GetBoolean(key Key, defaultValue bool) (bool, error) { - markHandlerInUse() - v, err := handler.ReadBoolean(string(key)) - if errors.Is(err, ErrNoSuchKey) { - return defaultValue, nil - } - return v, err + return getCurrentPolicySettingValue(key, defaultValue) } +// GetStringArray returns a multi-string policy setting with the specified key, +// or defaultValue if it does not exist. func GetStringArray(key Key, defaultValue []string) ([]string, error) { - markHandlerInUse() - v, err := handler.ReadStringArray(string(key)) - if errors.Is(err, ErrNoSuchKey) { - return defaultValue, nil - } - return v, err + return getCurrentPolicySettingValue(key, defaultValue) } // GetPreferenceOption loads a policy from the registry that can be @@ -55,13 +86,7 @@ func GetStringArray(key Key, defaultValue []string) ([]string, error) { // "always" and "never" remove the user's ability to make a selection. If not // present or set to a different value, "user-decides" is the default. func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { - s, err := GetString(name, "user-decides") - if err != nil { - return setting.ShowChoiceByPolicy, err - } - var opt setting.PreferenceOption - err = opt.UnmarshalText([]byte(s)) - return opt, err + return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) } // GetVisibility loads a policy from the registry that can be managed @@ -70,13 +95,7 @@ func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. func GetVisibility(name Key) (setting.Visibility, error) { - s, err := GetString(name, "show") - if err != nil { - return setting.VisibleByPolicy, err - } - var visibility setting.Visibility - visibility.UnmarshalText([]byte(s)) - return visibility, nil + return getCurrentPolicySettingValue(name, setting.VisibleByPolicy) } // GetDuration loads a policy from the registry that can be managed @@ -85,15 +104,58 @@ func GetVisibility(name Key) (setting.Visibility, error) { // understands. If the registry value is "" or can not be processed, // defaultValue is returned instead. func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) { - opt, err := GetString(name, "") - if opt == "" || err != nil { - return defaultValue, err + d, err := getCurrentPolicySettingValue(name, defaultValue) + if err != nil { + return d, err } - v, err := time.ParseDuration(opt) - if err != nil || v < 0 { + if d < 0 { return defaultValue, nil } - return v, nil + return d, nil +} + +// RegisterChangeCallback adds a function that will be called whenever the effective policy +// for the default scope changes. The returned function can be used to unregister the callback. +func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { + effective, err := rsop.PolicyFor(setting.DefaultScope()) + if err != nil { + return nil, err + } + return effective.RegisterChangeCallback(cb), nil +} + +// getCurrentPolicySettingValue returns the value of the policy setting +// specified by its key from the [rsop.Policy] of the [setting.DefaultScope]. It +// returns def if the policy setting is not configured, or an error if it has +// an error or could not be converted to the specified type T. +func getCurrentPolicySettingValue[T setting.ValueType](key Key, def T) (T, error) { + effective, err := rsop.PolicyFor(setting.DefaultScope()) + if err != nil { + return def, err + } + value, err := effective.Get().GetErr(key) + if err != nil { + if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) { + return def, nil + } + return def, err + } + if res, ok := value.(T); ok { + return res, nil + } + return convertPolicySettingValueTo(value, def) +} + +func convertPolicySettingValueTo[T setting.ValueType](value any, def T) (T, error) { + // Convert [PreferenceOption], [Visibility], or [time.Duration] back to a string + // if someone requests a string instead of the actual setting's value. + // TODO(nickkhyl): check if this behavior is relied upon anywhere besides the old tests. + if reflect.TypeFor[T]().Kind() == reflect.String { + if str, ok := value.(fmt.Stringer); ok { + return any(str.String()).(T), nil + } + } + return def, fmt.Errorf("%w: got %T, want %T", setting.ErrTypeMismatch, value, def) } // SelectControlURL returns the ControlURL to use based on a value in diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 8280aa1df..a70a49d39 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -9,57 +9,15 @@ import ( "testing" "time" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" ) -// testHandler encompasses all data types returned when testing any of the syspolicy -// methods that involve getting a policy value. -// For keys and the corresponding values, check policy_keys.go. -type testHandler struct { - t *testing.T - key Key - s string - u64 uint64 - b bool - sArr []string - err error - calls int // used for testing reads from cache vs. handler -} - var someOtherError = errors.New("error other than not found") -func (th *testHandler) ReadString(key string) (string, error) { - if key != string(th.key) { - th.t.Errorf("ReadString(%q) want %q", key, th.key) - } - th.calls++ - return th.s, th.err -} - -func (th *testHandler) ReadUInt64(key string) (uint64, error) { - if key != string(th.key) { - th.t.Errorf("ReadUint64(%q) want %q", key, th.key) - } - th.calls++ - return th.u64, th.err -} - -func (th *testHandler) ReadBoolean(key string) (bool, error) { - if key != string(th.key) { - th.t.Errorf("ReadBool(%q) want %q", key, th.key) - } - th.calls++ - return th.b, th.err -} - -func (th *testHandler) ReadStringArray(key string) ([]string, error) { - if key != string(th.key) { - th.t.Errorf("ReadStringArray(%q) want %q", key, th.key) - } - th.calls++ - return th.sArr, th.err -} - func TestGetString(t *testing.T) { tests := []struct { name string @@ -69,23 +27,28 @@ func TestGetString(t *testing.T) { defaultValue string wantValue string wantError error + wantMetrics []metrics.TestState }{ { name: "read existing value", key: AdminConsoleVisibility, handlerValue: "hide", wantValue: "hide", + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AdminConsole", Value: 1}, + }, }, { name: "read non-existing value", key: EnableServerMode, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non-blank default", key: EnableServerMode, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, defaultValue: "test", wantValue: "test", wantError: nil, @@ -95,24 +58,43 @@ func TestGetString(t *testing.T) { key: NetworkDevicesVisibility, handlerError: someOtherError, wantError: someOtherError, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_NetworkDevices_error", Value: 1}, + }, }, } + RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - s: tt.handlerValue, - err: tt.handlerError, - }) + h := metrics.NewTestHandler(t) + metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric) + + s := source.TestSetting[string]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + value, err := GetString(tt.key, tt.defaultValue) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if value != tt.wantValue { t.Errorf("value=%v, want %v", value, tt.wantValue) } + wantMetrics := tt.wantMetrics + if !metrics.ShouldReport() { + // Check that metrics are not reported on platforms + // where they shouldn't be reported. + // As of 2024-09-04, syspolicy only reports metrics + // on Windows and Android. + wantMetrics = nil + } + h.MustEqual(wantMetrics...) }) } } @@ -129,7 +111,7 @@ func TestGetUint64(t *testing.T) { }{ { name: "read existing value", - key: KeyExpirationNoticeTime, + key: LogSCMInteractions, handlerValue: 1, wantValue: 1, }, @@ -137,14 +119,14 @@ func TestGetUint64(t *testing.T) { name: "read non-existing value", key: LogSCMInteractions, handlerValue: 0, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: 0, }, { name: "read non-existing value, non-zero default", key: LogSCMInteractions, defaultValue: 2, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: 2, }, { @@ -157,14 +139,23 @@ func TestGetUint64(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - u64: tt.handlerValue, - err: tt.handlerError, - }) + // None of the policy settings tested here are integers. + // In fact, we don't have any integer policies as of 2024-10-08. + // However, we can register each of them as an integer policy setting + // for the duration of the test, providing us with something to test against. + if err := setting.SetDefinitionsForTest(t, setting.NewDefinition(tt.key, setting.DeviceSetting, setting.IntegerValue)); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + s := source.TestSetting[uint64]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + value, err := GetUint64(tt.key, tt.defaultValue) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if value != tt.wantValue { @@ -183,45 +174,69 @@ func TestGetBoolean(t *testing.T) { defaultValue bool wantValue bool wantError error + wantMetrics []metrics.TestState }{ { name: "read existing value", key: FlushDNSOnSessionUnlock, handlerValue: true, wantValue: true, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_FlushDNSOnSessionUnlock", Value: 1}, + }, }, { name: "read non-existing value", key: LogSCMInteractions, handlerValue: false, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: false, }, { name: "reading value returns other error", key: FlushDNSOnSessionUnlock, handlerError: someOtherError, - wantError: someOtherError, + wantError: someOtherError, // expect error... defaultValue: true, - wantValue: false, + wantValue: true, // ...AND default value if the handler fails. + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_FlushDNSOnSessionUnlock_error", Value: 1}, + }, }, } + RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - b: tt.handlerValue, - err: tt.handlerError, - }) + h := metrics.NewTestHandler(t) + metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric) + + s := source.TestSetting[bool]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + value, err := GetBoolean(tt.key, tt.defaultValue) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if value != tt.wantValue { t.Errorf("value=%v, want %v", value, tt.wantValue) } + wantMetrics := tt.wantMetrics + if !metrics.ShouldReport() { + // Check that metrics are not reported on platforms + // where they shouldn't be reported. + // As of 2024-09-04, syspolicy only reports metrics + // on Windows and Android. + wantMetrics = nil + } + h.MustEqual(wantMetrics...) }) } } @@ -234,29 +249,42 @@ func TestGetPreferenceOption(t *testing.T) { handlerError error wantValue setting.PreferenceOption wantError error + wantMetrics []metrics.TestState }{ { name: "always by policy", key: EnableIncomingConnections, handlerValue: "always", wantValue: setting.AlwaysByPolicy, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, + }, }, { name: "never by policy", key: EnableIncomingConnections, handlerValue: "never", wantValue: setting.NeverByPolicy, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, + }, }, { name: "use default", key: EnableIncomingConnections, handlerValue: "", wantValue: setting.ShowChoiceByPolicy, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, + }, }, { name: "read non-existing value", key: EnableIncomingConnections, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: setting.ShowChoiceByPolicy, }, { @@ -265,24 +293,43 @@ func TestGetPreferenceOption(t *testing.T) { handlerError: someOtherError, wantValue: setting.ShowChoiceByPolicy, wantError: someOtherError, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_AllowIncomingConnections_error", Value: 1}, + }, }, } + RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - s: tt.handlerValue, - err: tt.handlerError, - }) + h := metrics.NewTestHandler(t) + metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric) + + s := source.TestSetting[string]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + option, err := GetPreferenceOption(tt.key) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if option != tt.wantValue { t.Errorf("option=%v, want %v", option, tt.wantValue) } + wantMetrics := tt.wantMetrics + if !metrics.ShouldReport() { + // Check that metrics are not reported on platforms + // where they shouldn't be reported. + // As of 2024-09-04, syspolicy only reports metrics + // on Windows and Android. + wantMetrics = nil + } + h.MustEqual(wantMetrics...) }) } } @@ -295,24 +342,33 @@ func TestGetVisibility(t *testing.T) { handlerError error wantValue setting.Visibility wantError error + wantMetrics []metrics.TestState }{ { name: "hidden by policy", key: AdminConsoleVisibility, handlerValue: "hide", wantValue: setting.HiddenByPolicy, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AdminConsole", Value: 1}, + }, }, { name: "visibility default", key: AdminConsoleVisibility, handlerValue: "show", wantValue: setting.VisibleByPolicy, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AdminConsole", Value: 1}, + }, }, { name: "read non-existing value", key: AdminConsoleVisibility, handlerValue: "show", - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: setting.VisibleByPolicy, }, { @@ -322,24 +378,43 @@ func TestGetVisibility(t *testing.T) { handlerError: someOtherError, wantValue: setting.VisibleByPolicy, wantError: someOtherError, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_AdminConsole_error", Value: 1}, + }, }, } + RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - s: tt.handlerValue, - err: tt.handlerError, - }) + h := metrics.NewTestHandler(t) + metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric) + + s := source.TestSetting[string]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + visibility, err := GetVisibility(tt.key) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if visibility != tt.wantValue { t.Errorf("visibility=%v, want %v", visibility, tt.wantValue) } + wantMetrics := tt.wantMetrics + if !metrics.ShouldReport() { + // Check that metrics are not reported on platforms + // where they shouldn't be reported. + // As of 2024-09-04, syspolicy only reports metrics + // on Windows and Android. + wantMetrics = nil + } + h.MustEqual(wantMetrics...) }) } } @@ -353,6 +428,7 @@ func TestGetDuration(t *testing.T) { defaultValue time.Duration wantValue time.Duration wantError error + wantMetrics []metrics.TestState }{ { name: "read existing value", @@ -360,25 +436,34 @@ func TestGetDuration(t *testing.T) { handlerValue: "2h", wantValue: 2 * time.Hour, defaultValue: 24 * time.Hour, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_KeyExpirationNotice", Value: 1}, + }, }, { name: "invalid duration value", key: KeyExpirationNoticeTime, handlerValue: "-20", wantValue: 24 * time.Hour, + wantError: errors.New(`time: missing unit in duration "-20"`), defaultValue: 24 * time.Hour, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_KeyExpirationNotice_error", Value: 1}, + }, }, { name: "read non-existing value", key: KeyExpirationNoticeTime, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: 24 * time.Hour, defaultValue: 24 * time.Hour, }, { name: "read non-existing value different default", key: KeyExpirationNoticeTime, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantValue: 0 * time.Second, defaultValue: 0 * time.Second, }, @@ -389,24 +474,43 @@ func TestGetDuration(t *testing.T) { wantValue: 24 * time.Hour, wantError: someOtherError, defaultValue: 24 * time.Hour, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_KeyExpirationNotice_error", Value: 1}, + }, }, } + RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - s: tt.handlerValue, - err: tt.handlerError, - }) + h := metrics.NewTestHandler(t) + metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric) + + s := source.TestSetting[string]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + duration, err := GetDuration(tt.key, tt.defaultValue) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if duration != tt.wantValue { t.Errorf("duration=%v, want %v", duration, tt.wantValue) } + wantMetrics := tt.wantMetrics + if !metrics.ShouldReport() { + // Check that metrics are not reported on platforms + // where they shouldn't be reported. + // As of 2024-09-04, syspolicy only reports metrics + // on Windows and Android. + wantMetrics = nil + } + h.MustEqual(wantMetrics...) }) } } @@ -420,23 +524,28 @@ func TestGetStringArray(t *testing.T) { defaultValue []string wantValue []string wantError error + wantMetrics []metrics.TestState }{ { name: "read existing value", key: AllowedSuggestedExitNodes, handlerValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_any", Value: 1}, + {Name: "$os_syspolicy_AllowedSuggestedExitNodes", Value: 1}, + }, }, { name: "read non-existing value", key: AllowedSuggestedExitNodes, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non nil default", key: AllowedSuggestedExitNodes, - handlerError: ErrNoSuchKey, + handlerError: ErrNotConfigured, defaultValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, wantError: nil, @@ -446,28 +555,68 @@ func TestGetStringArray(t *testing.T) { key: AllowedSuggestedExitNodes, handlerError: someOtherError, wantError: someOtherError, + wantMetrics: []metrics.TestState{ + {Name: "$os_syspolicy_errors", Value: 1}, + {Name: "$os_syspolicy_AllowedSuggestedExitNodes_error", Value: 1}, + }, }, } + RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - SetHandlerForTest(t, &testHandler{ - t: t, - key: tt.key, - sArr: tt.handlerValue, - err: tt.handlerError, - }) + h := metrics.NewTestHandler(t) + metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric) + + s := source.TestSetting[[]string]{ + Key: tt.key, + Value: tt.handlerValue, + Error: tt.handlerError, + } + registerSingleSettingStoreForTest(t, s) + value, err := GetStringArray(tt.key, tt.defaultValue) - if err != tt.wantError { + if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } if !slices.Equal(tt.wantValue, value) { t.Errorf("value=%v, want %v", value, tt.wantValue) } + wantMetrics := tt.wantMetrics + if !metrics.ShouldReport() { + // Check that metrics are not reported on platforms + // where they shouldn't be reported. + // As of 2024-09-04, syspolicy only reports metrics + // on Windows and Android. + wantMetrics = nil + } + h.MustEqual(wantMetrics...) }) } } +func registerSingleSettingStoreForTest[T source.TestValueType](tb TB, s source.TestSetting[T]) { + policyStore := source.NewTestStoreOf(tb, s) + MustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) +} + +func BenchmarkGetString(b *testing.B) { + loggerx.SetForTest(b, logger.Discard, logger.Discard) + RegisterWellKnownSettingsForTest(b) + + wantControlURL := "https://login.tailscale.com" + registerSingleSettingStoreForTest(b, source.TestSettingOf(ControlURL, wantControlURL)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + gotControlURL, _ := GetString(ControlURL, "https://controlplane.tailscale.com") + if gotControlURL != wantControlURL { + b.Fatalf("got %v; want %v", gotControlURL, wantControlURL) + } + } +} + func TestSelectControlURL(t *testing.T) { tests := []struct { reg, disk, want string @@ -499,3 +648,13 @@ func TestSelectControlURL(t *testing.T) { } } } + +func errorsMatchForTest(got, want error) bool { + if got == nil && want == nil { + return true + } + if got == nil || want == nil { + return false + } + return errors.Is(got, want) || got.Error() == want.Error() +} diff --git a/util/syspolicy/syspolicy_windows.go b/util/syspolicy/syspolicy_windows.go new file mode 100644 index 000000000..9d57e249e --- /dev/null +++ b/util/syspolicy/syspolicy_windows.go @@ -0,0 +1,92 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package syspolicy + +import ( + "errors" + "fmt" + "os/user" + + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/source" + "tailscale.com/util/testenv" +) + +func init() { + // On Windows, we should automatically register the Registry-based policy + // store for the device. If we are running in a user's security context + // (e.g., we're the GUI), we should also register the Registry policy store for + // the user. In the future, we should register (and unregister) user policy + // stores whenever a user connects to (or disconnects from) the local backend. + // This ensures the backend is aware of the user's policy settings and can send + // them to the GUI/CLI/Web clients on demand or whenever they change. + // + // Other platforms, such as macOS, iOS and Android, should register their + // platform-specific policy stores via [RegisterStore] + // (or [RegisterHandler] until they implement the [source.Store] interface). + // + // External code, such as the ipnlocal package, may choose to register + // additional policy stores, such as config files and policies received from + // the control plane. + internal.Init.MustDefer(func() error { + // Do not register or use default policy stores during tests. + // Each test should set up its own necessary configurations. + if testenv.InTest() { + return nil + } + return configureSyspolicy(nil) + }) +} + +// configureSyspolicy configures syspolicy for use on Windows, +// either in test or regular builds depending on whether tb has a non-nil value. +func configureSyspolicy(tb internal.TB) error { + const localSystemSID = "S-1-5-18" + // Always create and register a machine policy store that reads + // policy settings from the HKEY_LOCAL_MACHINE registry hive. + machineStore, err := source.NewMachinePlatformPolicyStore() + if err != nil { + return fmt.Errorf("failed to create the machine policy store: %v", err) + } + if tb == nil { + _, err = rsop.RegisterStore("Platform", setting.DeviceScope, machineStore) + } else { + _, err = rsop.RegisterStoreForTest(tb, "Platform", setting.DeviceScope, machineStore) + } + if err != nil { + return err + } + // Check whether the current process is running as Local System or not. + u, err := user.Current() + if err != nil { + return err + } + if u.Uid == localSystemSID { + return nil + } + // If it's not a Local System's process (e.g., it's the GUI rather than the tailscaled service), + // we should create and use a policy store for the current user that reads + // policy settings from that user's registry hive (HKEY_CURRENT_USER). + userStore, err := source.NewUserPlatformPolicyStore(0) + if err != nil { + return fmt.Errorf("failed to create the current user's policy store: %v", err) + } + if tb == nil { + _, err = rsop.RegisterStore("Platform", setting.CurrentUserScope, userStore) + } else { + _, err = rsop.RegisterStoreForTest(tb, "Platform", setting.CurrentUserScope, userStore) + } + if err != nil { + return err + } + // And also set [setting.CurrentUserScope] as the [setting.DefaultScope], so [GetString], + // [GetVisibility] and similar functions would be returning a merged result + // of the machine's and user's policies. + if !setting.SetDefaultScope(setting.CurrentUserScope) { + return errors.New("current scope already set") + } + return nil +} From 6ab39b7bcd259a7cf4adb9331586a64698c85dcc Mon Sep 17 00:00:00 2001 From: Nick Kirby Date: Sat, 26 Oct 2024 13:03:36 +0100 Subject: [PATCH 0078/1708] cmd/k8s-operator: validate that tailscale.com/tailnet-ip annotation value is a valid IP Fixes #13836 Signed-off-by: Nick Kirby --- cmd/k8s-operator/operator_test.go | 142 ++++++++++++++++++++++++++++++ cmd/k8s-operator/svc.go | 11 ++- 2 files changed, 150 insertions(+), 3 deletions(-) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 21e1d4313..a440fafb5 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -432,6 +432,148 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectMissing[corev1.Secret](t, fc, "operator-ns", fullName) } +func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { + fc := fake.NewFakeClient() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + clock: clock, + recorder: record.NewFakeRecorder(100), + } + tailnetTargetIP := "invalid-ip" + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationTailnetTargetIP: tailnetTargetIP, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + }) + + expectReconciled(t, sr, "default", "test") + + t0 := conditionTime(clock) + + want := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationTailnetTargetIP: tailnetTargetIP, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + Status: corev1.ServiceStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyReady), + Status: metav1.ConditionFalse, + LastTransitionTime: t0, + Reason: reasonProxyInvalid, + Message: `unable to provision proxy resources: invalid Service: invalid value of annotation tailscale.com/tailnet-ip: "invalid-ip" could not be parsed as a valid IP Address, error: ParseAddr("invalid-ip"): unable to parse IP`, + }}, + }, + } + + expectEqual(t, fc, want, nil) +} + +func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { + fc := fake.NewFakeClient() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + clock: clock, + recorder: record.NewFakeRecorder(100), + } + tailnetTargetIP := "999.999.999.999" + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationTailnetTargetIP: tailnetTargetIP, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + }) + + expectReconciled(t, sr, "default", "test") + + t0 := conditionTime(clock) + + want := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationTailnetTargetIP: tailnetTargetIP, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + Status: corev1.ServiceStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyReady), + Status: metav1.ConditionFalse, + LastTransitionTime: t0, + Reason: reasonProxyInvalid, + Message: `unable to provision proxy resources: invalid Service: invalid value of annotation tailscale.com/tailnet-ip: "999.999.999.999" could not be parsed as a valid IP Address, error: ParseAddr("999.999.999.999"): IPv4 field has value >255`, + }}, + }, + } + + expectEqual(t, fc, want, nil) +} + func TestAnnotations(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index f45f92246..3c6bc27a9 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -358,9 +358,14 @@ func validateService(svc *corev1.Service) []string { violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q does not appear to be a valid MagicDNS name", AnnotationTailnetTargetFQDN, fqdn)) } } - - // TODO(irbekrm): validate that tailscale.com/tailnet-ip annotation is a - // valid IP address (tailscale/tailscale#13671). + if ipStr := svc.Annotations[AnnotationTailnetTargetIP]; ipStr != "" { + ip, err := netip.ParseAddr(ipStr) + if err != nil { + violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q could not be parsed as a valid IP Address, error: %s", AnnotationTailnetTargetIP, ipStr, err)) + } else if !ip.IsValid() { + violations = append(violations, fmt.Sprintf("parsed IP address in annotation %s: %q is not valid", AnnotationTailnetTargetIP, ipStr)) + } + } svcName := nameForService(svc) if err := dnsname.ValidLabel(svcName); err != nil { From 853fe3b7132959fb99648df3d5d7aec47a6734c1 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sat, 26 Oct 2024 09:33:47 -0500 Subject: [PATCH 0079/1708] ipn/store/kubestore: cache state in memory (#13918) Cache state in memory on writes, read from memory in reads. kubestore was previously always reading state from a Secret. This change should fix bugs caused by temporary loss of access to kube API server and imporove overall performance Fixes #7671 Updates tailscale/tailscale#12079,tailscale/tailscale#13900 Signed-off-by: Maisem Ali Signed-off-by: Irbe Krumina Co-authored-by: Maisem Ali --- ipn/store/kubestore/store_kube.go | 81 +++++++++++++++++++------------ ipn/store/mem/store_mem.go | 17 +++++++ 2 files changed, 67 insertions(+), 31 deletions(-) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 00950bd3b..1e0e01c7b 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -13,19 +13,27 @@ import ( "time" "tailscale.com/ipn" + "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/types/logger" ) +// TODO(irbekrm): should we bump this? should we have retries? See tailscale/tailscale#13024 +const timeout = 5 * time.Second + // Store is an ipn.StateStore that uses a Kubernetes Secret for persistence. type Store struct { client kubeclient.Client canPatch bool secretName string + + // memory holds the latest tailscale state. Writes write state to a kube Secret and memory, Reads read from + // memory. + memory mem.Store } -// New returns a new Store that persists to the named secret. +// New returns a new Store that persists to the named Secret. func New(_ logger.Logf, secretName string) (*Store, error) { c, err := kubeclient.New() if err != nil { @@ -39,11 +47,16 @@ func New(_ logger.Logf, secretName string) (*Store, error) { if err != nil { return nil, err } - return &Store{ + s := &Store{ client: c, canPatch: canPatch, secretName: secretName, - }, nil + } + // Load latest state from kube Secret if it already exists. + if err := s.loadState(); err != nil { + return nil, fmt.Errorf("error loading state from kube Secret: %w", err) + } + return s, nil } func (s *Store) SetDialer(d func(ctx context.Context, network, address string) (net.Conn, error)) { @@ -54,37 +67,17 @@ func (s *Store) String() string { return "kube.Store" } // ReadState implements the StateStore interface. func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - secret, err := s.client.GetSecret(ctx, s.secretName) - if err != nil { - if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { - return nil, ipn.ErrStateNotExist - } - return nil, err - } - b, ok := secret.Data[sanitizeKey(id)] - if !ok { - return nil, ipn.ErrStateNotExist - } - return b, nil -} - -func sanitizeKey(k ipn.StateKey) string { - // The only valid characters in a Kubernetes secret key are alphanumeric, -, - // _, and . - return strings.Map(func(r rune) rune { - if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { - return r - } - return '_' - }, string(k)) + return s.memory.ReadState(ipn.StateKey(sanitizeKey(id))) } // WriteState implements the StateStore interface. -func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) +func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { + defer func() { + if err == nil { + s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) + } + }() + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() secret, err := s.client.GetSecret(ctx, s.secretName) @@ -137,3 +130,29 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { } return err } + +func (s *Store) loadState() error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + secret, err := s.client.GetSecret(ctx, s.secretName) + if err != nil { + if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { + return ipn.ErrStateNotExist + } + return err + } + s.memory.LoadFromMap(secret.Data) + return nil +} + +func sanitizeKey(k ipn.StateKey) string { + // The only valid characters in a Kubernetes secret key are alphanumeric, -, + // _, and . + return strings.Map(func(r rune) rune { + if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { + return r + } + return '_' + }, string(k)) +} diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index f3a308ae5..6f474ce99 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -9,8 +9,10 @@ import ( "encoding/json" "sync" + xmaps "golang.org/x/exp/maps" "tailscale.com/ipn" "tailscale.com/types/logger" + "tailscale.com/util/mak" ) // New returns a new Store. @@ -28,6 +30,7 @@ type Store struct { func (s *Store) String() string { return "mem.Store" } // ReadState implements the StateStore interface. +// It returns ipn.ErrStateNotExist if the state does not exist. func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { s.mu.Lock() defer s.mu.Unlock() @@ -39,6 +42,7 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { } // WriteState implements the StateStore interface. +// It never returns an error. func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { s.mu.Lock() defer s.mu.Unlock() @@ -49,6 +53,19 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { return nil } +// LoadFromMap loads the in-memory cache from the provided map. +// Any existing content is cleared, and the provided map is +// copied into the cache. +func (s *Store) LoadFromMap(m map[string][]byte) { + s.mu.Lock() + defer s.mu.Unlock() + xmaps.Clear(s.cache) + for k, v := range m { + mak.Set(&s.cache, ipn.StateKey(k), v) + } + return +} + // LoadFromJSON attempts to unmarshal json content into the // in-memory cache. func (s *Store) LoadFromJSON(data []byte) error { From 9d1348fe212fccf52de11f4009e24a7436167fe7 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sun, 27 Oct 2024 10:54:38 -0500 Subject: [PATCH 0080/1708] ipn/store/kubestore: don't error if state cannot be preloaded (#13926) Preloading of state from kube Secret should not error if the Secret does not exist. Updates tailscale/tailscale#7671 Signed-off-by: Irbe Krumina --- ipn/store/kubestore/store_kube.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 1e0e01c7b..2dcc08b6e 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -53,7 +53,7 @@ func New(_ logger.Logf, secretName string) (*Store, error) { secretName: secretName, } // Load latest state from kube Secret if it already exists. - if err := s.loadState(); err != nil { + if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist { return nil, fmt.Errorf("error loading state from kube Secret: %w", err) } return s, nil From 5d07c17b9395c513dc2d4674d63e33397ce794d5 Mon Sep 17 00:00:00 2001 From: Renato Aguiar Date: Mon, 28 Oct 2024 08:00:48 -0700 Subject: [PATCH 0081/1708] net/dns: fix blank lines being added to resolv.conf on OpenBSD (#13928) During resolv.conf update, old 'search' lines are cleared but '\n' is not deleted, leaving behind a new blank line on every update. This adds 's' flag to regexp, so '\n' is included in the match and deleted when old lines are cleared. Also, insert missing `\n` when updated 'search' line is appended to resolv.conf. Signed-off-by: Renato Aguiar --- net/dns/resolvd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/dns/resolvd.go b/net/dns/resolvd.go index 9b067eb07..ad1a99c11 100644 --- a/net/dns/resolvd.go +++ b/net/dns/resolvd.go @@ -57,6 +57,7 @@ func (m *resolvdManager) SetDNS(config OSConfig) error { if len(newSearch) > 1 { newResolvConf = append(newResolvConf, []byte(strings.Join(newSearch, " "))...) + newResolvConf = append(newResolvConf, '\n') } err = m.fs.WriteFile(resolvConf, newResolvConf, 0644) @@ -123,6 +124,6 @@ func (m resolvdManager) readResolvConf() (config OSConfig, err error) { } func removeSearchLines(orig []byte) []byte { - re := regexp.MustCompile(`(?m)^search\s+.+$`) + re := regexp.MustCompile(`(?ms)^search\s+.+$`) return re.ReplaceAll(orig, []byte("")) } From 41aac261064602c5eb14ccbacd0a684ffe3ae533 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 28 Oct 2024 15:02:34 +0000 Subject: [PATCH 0082/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 8 ++++---- licenses/windows.md | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 751082d5b..36c654c59 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -73,13 +73,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.26.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.19.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 2a8e4e621..3f6650b9e 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -65,15 +65,15 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.26.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.19.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) From c0a1ed86cbe5e8a8511b04fe1406b3903cd9f8b8 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 13 Sep 2024 11:35:47 -0700 Subject: [PATCH 0083/1708] tstest/natlab: add latency & loss simulation A simple implementation of latency and loss simulation, applied to writes to the ethernet interface of the NIC. The latency implementation could be optimized substantially later if necessary. Updates #13355 Signed-off-by: James Tucker --- tstest/natlab/vnet/conf.go | 21 +++++++++++++++++++++ tstest/natlab/vnet/conf_test.go | 15 ++++++++++++++- tstest/natlab/vnet/vnet.go | 23 +++++++++++++++++++++-- 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index cf71a6674..a37c22a6c 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -10,6 +10,7 @@ import ( "net/netip" "os" "slices" + "time" "github.com/google/gopacket/layers" "github.com/google/gopacket/pcapgo" @@ -279,10 +280,28 @@ type Network struct { svcs set.Set[NetworkService] + latency time.Duration // latency applied to interface writes + lossRate float64 // chance of packet loss (0.0 to 1.0) + // ... err error // carried error } +// SetLatency sets the simulated network latency for this network. +func (n *Network) SetLatency(d time.Duration) { + n.latency = d +} + +// SetPacketLoss sets the packet loss rate for this network 0.0 (no loss) to 1.0 (total loss). +func (n *Network) SetPacketLoss(rate float64) { + if rate < 0 { + rate = 0 + } else if rate > 1 { + rate = 1 + } + n.lossRate = rate +} + // SetBlackholedIPv4 sets whether the network should blackhole all IPv4 traffic // out to the Internet. (DHCP etc continues to work on the LAN.) func (n *Network) SetBlackholedIPv4(v bool) { @@ -361,6 +380,8 @@ func (s *Server) initFromConfig(c *Config) error { wanIP4: conf.wanIP4, lanIP4: conf.lanIP4, breakWAN4: conf.breakWAN4, + latency: conf.latency, + lossRate: conf.lossRate, nodesByIP4: map[netip.Addr]*node{}, nodesByMAC: map[MAC]*node{}, logf: logger.WithPrefix(s.logf, fmt.Sprintf("[net-%v] ", conf.mac)), diff --git a/tstest/natlab/vnet/conf_test.go b/tstest/natlab/vnet/conf_test.go index 15d3c69ef..6566ac8cf 100644 --- a/tstest/natlab/vnet/conf_test.go +++ b/tstest/natlab/vnet/conf_test.go @@ -3,7 +3,10 @@ package vnet -import "testing" +import ( + "testing" + "time" +) func TestConfig(t *testing.T) { tests := []struct { @@ -18,6 +21,16 @@ func TestConfig(t *testing.T) { c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", HardNAT)) }, }, + { + name: "latency-and-loss", + setup: func(c *Config) { + n1 := c.AddNetwork("2.1.1.1", "192.168.1.1/24", EasyNAT, NATPMP) + n1.SetLatency(time.Second) + n1.SetPacketLoss(0.1) + c.AddNode(n1) + c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", HardNAT)) + }, + }, { name: "indirect", setup: func(c *Config) { diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index e7991b3e6..92312c039 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -515,6 +515,8 @@ type network struct { wanIP4 netip.Addr // router's LAN IPv4, if any lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24) breakWAN4 bool // break WAN IPv4 connectivity + latency time.Duration // latency applied to interface writes + lossRate float64 // probability of dropping a packet (0.0 to 1.0) nodesByIP4 map[netip.Addr]*node // by LAN IPv4 nodesByMAC map[MAC]*node logf func(format string, args ...any) @@ -977,7 +979,7 @@ func (n *network) writeEth(res []byte) bool { for mac, nw := range n.writers.All() { if mac != srcMAC { num++ - nw.write(res) + n.conditionedWrite(nw, res) } } return num > 0 @@ -987,7 +989,7 @@ func (n *network) writeEth(res []byte) bool { return false } if nw, ok := n.writers.Load(dstMAC); ok { - nw.write(res) + n.conditionedWrite(nw, res) return true } @@ -1000,6 +1002,23 @@ func (n *network) writeEth(res []byte) bool { return false } +func (n *network) conditionedWrite(nw networkWriter, packet []byte) { + if n.lossRate > 0 && rand.Float64() < n.lossRate { + // packet lost + return + } + if n.latency > 0 { + // copy the packet as there's no guarantee packet is owned long enough. + // TODO(raggi): this could be optimized substantially if necessary, + // a pool of buffers and a cheaper delay mechanism are both obvious improvements. + var pkt = make([]byte, len(packet)) + copy(pkt, packet) + time.AfterFunc(n.latency, func() { nw.write(pkt) }) + } else { + nw.write(packet) + } +} + var ( macAllNodes = MAC{0: 0x33, 1: 0x33, 5: 0x01} macAllRouters = MAC{0: 0x33, 1: 0x33, 5: 0x02} From 0d76d7d21c951872433de708839025c8dfb304b3 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 11 Sep 2024 11:28:33 -0700 Subject: [PATCH 0084/1708] tool/gocross: remove trimpath from test builds trimpath can be inconvenient for IDEs and LSPs that do not always correctly handle module relative paths, and can also contribute to caching bugs taking effect. We rarely have a real need for trimpath of test produced binaries, so avoiding it should be a net win. Updates #2988 Signed-off-by: James Tucker --- tool/gocross/autoflags.go | 6 +++++- tool/gocross/autoflags_test.go | 1 - 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tool/gocross/autoflags.go b/tool/gocross/autoflags.go index 020b19fa5..b28d3bc5d 100644 --- a/tool/gocross/autoflags.go +++ b/tool/gocross/autoflags.go @@ -35,7 +35,7 @@ func autoflagsForTest(argv []string, env *Environment, goroot, nativeGOOS, nativ cc = "cc" targetOS = cmp.Or(env.Get("GOOS", ""), nativeGOOS) targetArch = cmp.Or(env.Get("GOARCH", ""), nativeGOARCH) - buildFlags = []string{"-trimpath"} + buildFlags = []string{} cgoCflags = []string{"-O3", "-std=gnu11", "-g"} cgoLdflags []string ldflags []string @@ -47,6 +47,10 @@ func autoflagsForTest(argv []string, env *Environment, goroot, nativeGOOS, nativ subcommand = argv[1] } + if subcommand != "test" { + buildFlags = append(buildFlags, "-trimpath") + } + switch subcommand { case "build", "env", "install", "run", "test", "list": default: diff --git a/tool/gocross/autoflags_test.go b/tool/gocross/autoflags_test.go index 8f24dd8a3..a0f3edfd2 100644 --- a/tool/gocross/autoflags_test.go +++ b/tool/gocross/autoflags_test.go @@ -163,7 +163,6 @@ GOTOOLCHAIN=local (was ) TS_LINK_FAIL_REFLECT=0 (was )`, wantArgv: []string{ "gocross", "test", - "-trimpath", "-tags=tailscale_go,osusergo,netgo", "-ldflags", "-X tailscale.com/version.longStamp=1.2.3-long -X tailscale.com/version.shortStamp=1.2.3 -X tailscale.com/version.gitCommitStamp=abcd -X tailscale.com/version.extraGitCommitStamp=defg '-extldflags=-static'", "-race", From 94fa6d97c5a25269e9c68595d5fabfd847f9f7b4 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Mon, 28 Oct 2024 16:41:44 +0000 Subject: [PATCH 0085/1708] ipn/ipnlocal: log errors while fetching serial numbers If the client cannot fetch a serial number, write a log message helping the user understand what happened. Also, don't just return the error immediately, since we still have a chance to collect network interface addresses. Updates #5902 Signed-off-by: Anton Tolchanov --- ipn/ipnlocal/c2n.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index de6ca2321..c3ed32fd8 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -332,12 +332,10 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http } if choice.ShouldEnable(b.Prefs().PostureChecking()) { - sns, err := posture.GetSerialNumbers(b.logf) + res.SerialNumbers, err = posture.GetSerialNumbers(b.logf) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + b.logf("c2n: GetSerialNumbers returned error: %v", err) } - res.SerialNumbers = sns // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release // and looks good in client metrics, remove this parameter and always report MAC From 11e96760ff119dcfa60139371570f761e0c26050 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 29 Oct 2024 13:40:33 +0000 Subject: [PATCH 0086/1708] wgengine/magicsock: fix stats packet counter on derp egress Updates tailscale/corp#22075 Signed-off-by: Anton Tolchanov --- wgengine/magicsock/endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index ab9f3d47d..1ddde9752 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -991,7 +991,7 @@ func (de *endpoint) send(buffs [][]byte) error { } if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, 1, txBytes) + stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes) } if allOk { return nil From 38af62c7b303d707ba5cc46148809921557e36aa Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 29 Oct 2024 13:35:12 +0000 Subject: [PATCH 0087/1708] ipn/ipnlocal: remove the primary routes gauge for now Not confident this is the right way to expose this, so let's remote it for now. Updates tailscale/corp#22075 Signed-off-by: Anton Tolchanov --- ipn/ipnlocal/local.go | 9 --------- tsnet/tsnet_test.go | 12 ------------ 2 files changed, 21 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b01f3a0c0..b91f1337a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -399,11 +399,6 @@ type metrics struct { // approvedRoutes is a metric that reports the number of network routes served by the local node and approved // by the control server. approvedRoutes *usermetric.Gauge - - // primaryRoutes is a metric that reports the number of primary network routes served by the local node. - // A route being a primary route implies that the route is currently served by this node, and not by another - // subnet router in a high availability configuration. - primaryRoutes *usermetric.Gauge } // clientGen is a func that creates a control plane client. @@ -454,8 +449,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo "tailscaled_advertised_routes", "Number of advertised network routes (e.g. by a subnet router)"), approvedRoutes: sys.UserMetricsRegistry().NewGauge( "tailscaled_approved_routes", "Number of approved network routes (e.g. by a subnet router)"), - primaryRoutes: sys.UserMetricsRegistry().NewGauge( - "tailscaled_primary_routes", "Number of network routes for which this node is a primary router (in high availability configuration)"), } b := &LocalBackend{ @@ -5477,7 +5470,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { // If there is no netmap, the client is going into a "turned off" // state so reset the metrics. b.metrics.approvedRoutes.Set(0) - b.metrics.primaryRoutes.Set(0) return } @@ -5506,7 +5498,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } } b.metrics.approvedRoutes.Set(approved) - b.metrics.primaryRoutes.Set(float64(tsaddr.WithoutExitRoute(nm.SelfNode.PrimaryRoutes()).Len())) } for _, p := range nm.Peers { addNode(p) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 98c1fd4ab..7aebbdd4c 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1080,13 +1080,6 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics1, tailscaled_health_messages: got %v, want %v", got, want) } - // The node is the primary subnet router for 2 routes: - // - 192.0.2.0/24 - // - 192.0.5.1/32 - if got, want := parsedMetrics1["tailscaled_primary_routes"], wantRoutes; got != want { - t.Errorf("metrics1, tailscaled_primary_routes: got %v, want %v", got, want) - } - // Verify that the amount of data recorded in bytes is higher or equal to the // 10 megabytes sent. inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`] @@ -1131,11 +1124,6 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics2, tailscaled_health_messages: got %v, want %v", got, want) } - // The node is the primary subnet router for 0 routes - if got, want := parsedMetrics2["tailscaled_primary_routes"], 0.0; got != want { - t.Errorf("metrics2, tailscaled_primary_routes: got %v, want %v", got, want) - } - // Verify that the amount of data recorded in bytes is higher or equal than the // 10 megabytes sent. outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`] From 9545e36007e5859b0a9aec4052bcb7f7837b0948 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Sat, 26 Oct 2024 18:28:22 +0100 Subject: [PATCH 0088/1708] cmd/tailscale/cli: add 'tailscale metrics' command - `tailscale metrics print`: to show metric values in console - `tailscale metrics write`: to write metrics to a file (with a tempfile & rename dance, which is atomic on Unix). Also, remove the `TS_DEBUG_USER_METRICS` envknob as we are getting more confident in these metrics. Updates tailscale/corp#22075 Signed-off-by: Anton Tolchanov --- cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/cli/metrics.go | 88 ++++++++++++++++++++++++++++++++++++ ipn/localapi/localapi.go | 11 +---- 3 files changed, 91 insertions(+), 9 deletions(-) create mode 100644 cmd/tailscale/cli/metrics.go diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index de6bc2a4e..f786bcea5 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -189,6 +189,7 @@ change in the future. ipCmd, dnsCmd, statusCmd, + metricsCmd, pingCmd, ncCmd, sshCmd, diff --git a/cmd/tailscale/cli/metrics.go b/cmd/tailscale/cli/metrics.go new file mode 100644 index 000000000..d5fe9ad81 --- /dev/null +++ b/cmd/tailscale/cli/metrics.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/atomicfile" +) + +var metricsCmd = &ffcli.Command{ + Name: "metrics", + ShortHelp: "Show Tailscale metrics", + LongHelp: strings.TrimSpace(` + +The 'tailscale metrics' command shows Tailscale user-facing metrics (as opposed +to internal metrics printed by 'tailscale debug metrics'). + +For more information about Tailscale metrics, refer to +https://tailscale.com/s/client-metrics + +`), + ShortUsage: "tailscale metrics [flags]", + UsageFunc: usageFuncNoDefaultValues, + Exec: runMetricsNoSubcommand, + Subcommands: []*ffcli.Command{ + { + Name: "print", + ShortUsage: "tailscale metrics print", + Exec: runMetricsPrint, + ShortHelp: "Prints current metric values in the Prometheus text exposition format", + }, + { + Name: "write", + ShortUsage: "tailscale metrics write ", + Exec: runMetricsWrite, + ShortHelp: "Writes metric values to a file", + LongHelp: strings.TrimSpace(` + +The 'tailscale metrics write' command writes metric values to a text file provided as its +only argument. It's meant to be used alongside Prometheus node exporter, allowing Tailscale +metrics to be consumed and exported by the textfile collector. + +As an example, to export Tailscale metrics on an Ubuntu system running node exporter, you +can regularly run 'tailscale metrics write /var/lib/prometheus/node-exporter/tailscaled.prom' +using cron or a systemd timer. + + `), + }, + }, +} + +// runMetricsNoSubcommand prints metric values if no subcommand is specified. +func runMetricsNoSubcommand(ctx context.Context, args []string) error { + if len(args) > 0 { + return fmt.Errorf("tailscale metrics: unknown subcommand: %s", args[0]) + } + + return runMetricsPrint(ctx, args) +} + +// runMetricsPrint prints metric values to stdout. +func runMetricsPrint(ctx context.Context, args []string) error { + out, err := localClient.UserMetrics(ctx) + if err != nil { + return err + } + Stdout.Write(out) + return nil +} + +// runMetricsWrite writes metric values to a file. +func runMetricsWrite(ctx context.Context, args []string) error { + if len(args) != 1 { + return errors.New("usage: tailscale metrics write ") + } + path := args[0] + out, err := localClient.UserMetrics(ctx) + if err != nil { + return err + } + return atomicfile.WriteFile(path, out, 0644) +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 25ec19121..1d580eca9 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -62,7 +62,6 @@ import ( "tailscale.com/util/osdiag" "tailscale.com/util/progresstracking" "tailscale.com/util/rands" - "tailscale.com/util/testenv" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -570,15 +569,9 @@ func (h *Handler) serveMetrics(w http.ResponseWriter, r *http.Request) { clientmetric.WritePrometheusExpositionFormat(w) } -// TODO(kradalby): Remove this once we have landed on a final set of -// metrics to export to clients and consider the metrics stable. -var debugUsermetricsEndpoint = envknob.RegisterBool("TS_DEBUG_USER_METRICS") - +// serveUserMetrics returns user-facing metrics in Prometheus text +// exposition format. func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { - if !testenv.InTest() && !debugUsermetricsEndpoint() { - http.Error(w, "usermetrics debug flag not enabled", http.StatusForbidden) - return - } h.b.UserMetricsRegistry().Handler(w, r) } From 0f9a054cba58aa7f1c45d82f18be43ec0ffd592e Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 29 Oct 2024 13:49:29 -0400 Subject: [PATCH 0089/1708] tstest/tailmac: fix Host.app path generation (#13953) updates tailscale/corp#24197 Generation of the Host.app path was erroneous and tailmac run would not work unless the pwd was tailmac/bin. Now you can be able to invoke tailmac from anywhere. Signed-off-by: Jonathan Nobels --- tstest/tailmac/Swift/TailMac/TailMac.swift | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tstest/tailmac/Swift/TailMac/TailMac.swift b/tstest/tailmac/Swift/TailMac/TailMac.swift index 56f651696..6554d5deb 100644 --- a/tstest/tailmac/Swift/TailMac/TailMac.swift +++ b/tstest/tailmac/Swift/TailMac/TailMac.swift @@ -100,7 +100,10 @@ extension Tailmac { mutating func run() { let process = Process() let stdOutPipe = Pipe() - let appPath = "./Host.app/Contents/MacOS/Host" + + let executablePath = CommandLine.arguments[0] + let executableDirectory = (executablePath as NSString).deletingLastPathComponent + let appPath = executableDirectory + "/Host.app/Contents/MacOS/Host" process.executableURL = URL( fileURLWithPath: appPath, @@ -109,7 +112,7 @@ extension Tailmac { ) if !FileManager.default.fileExists(atPath: appPath) { - fatalError("Could not find Host.app. This must be co-located with the tailmac utility") + fatalError("Could not find Host.app at \(appPath). This must be co-located with the tailmac utility") } process.arguments = ["run", "--id", id] From aecb0ab76bb38c13d29b184380a53a5190c77302 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 29 Oct 2024 13:49:51 -0400 Subject: [PATCH 0090/1708] tstest/tailmac: add support for mounting host directories in the guest (#13957) updates tailscale/corp#24197 tailmac run now supports the --share option which will allow you to specify a directory on the host which can be mounted in the guest using mount_virtiofs vmshare . Signed-off-by: Jonathan Nobels --- tstest/tailmac/Swift/Common/Config.swift | 1 + .../Swift/Common/TailMacConfigHelper.swift | 13 ++++++++++ tstest/tailmac/Swift/Host/HostCli.swift | 4 +++- tstest/tailmac/Swift/Host/VMController.swift | 7 ++++++ tstest/tailmac/Swift/TailMac/TailMac.swift | 24 +++++++++---------- 5 files changed, 35 insertions(+), 14 deletions(-) diff --git a/tstest/tailmac/Swift/Common/Config.swift b/tstest/tailmac/Swift/Common/Config.swift index 01d5069b0..18b68ae9b 100644 --- a/tstest/tailmac/Swift/Common/Config.swift +++ b/tstest/tailmac/Swift/Common/Config.swift @@ -14,6 +14,7 @@ class Config: Codable { var mac = "52:cc:cc:cc:cc:01" var ethermac = "52:cc:cc:cc:ce:01" var port: UInt32 = 51009 + var sharedDir: String? // The virtual machines ID. Also double as the directory name under which // we will store configuration, block device, etc. diff --git a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift index 00f999a15..c0961c883 100644 --- a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift +++ b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift @@ -141,5 +141,18 @@ struct TailMacConfigHelper { func createKeyboardConfiguration() -> VZKeyboardConfiguration { return VZMacKeyboardConfiguration() } + + func createDirectoryShareConfiguration(tag: String) -> VZDirectorySharingDeviceConfiguration? { + guard let dir = config.sharedDir else { return nil } + + let sharedDir = VZSharedDirectory(url: URL(fileURLWithPath: dir), readOnly: false) + let share = VZSingleDirectoryShare(directory: sharedDir) + + // Create the VZVirtioFileSystemDeviceConfiguration and assign it a unique tag. + let sharingConfiguration = VZVirtioFileSystemDeviceConfiguration(tag: tag) + sharingConfiguration.share = share + + return sharingConfiguration + } } diff --git a/tstest/tailmac/Swift/Host/HostCli.swift b/tstest/tailmac/Swift/Host/HostCli.swift index 1318a09fa..c31478cc3 100644 --- a/tstest/tailmac/Swift/Host/HostCli.swift +++ b/tstest/tailmac/Swift/Host/HostCli.swift @@ -19,10 +19,12 @@ var config: Config = Config() extension HostCli { struct Run: ParsableCommand { @Option var id: String + @Option var share: String? mutating func run() { - print("Running vm with identifier \(id)") config = Config(id) + config.sharedDir = share + print("Running vm with identifier \(id) and sharedDir \(share ?? "")") _ = NSApplicationMain(CommandLine.argc, CommandLine.unsafeArgv) } } diff --git a/tstest/tailmac/Swift/Host/VMController.swift b/tstest/tailmac/Swift/Host/VMController.swift index 8774894c1..fe4a3828b 100644 --- a/tstest/tailmac/Swift/Host/VMController.swift +++ b/tstest/tailmac/Swift/Host/VMController.swift @@ -95,6 +95,13 @@ class VMController: NSObject, VZVirtualMachineDelegate { virtualMachineConfiguration.keyboards = [helper.createKeyboardConfiguration()] virtualMachineConfiguration.socketDevices = [helper.createSocketDeviceConfiguration()] + if let dir = config.sharedDir, let shareConfig = helper.createDirectoryShareConfiguration(tag: "vmshare") { + print("Sharing \(dir) as vmshare. Use: mount_virtiofs vmshare in the guest to mount.") + virtualMachineConfiguration.directorySharingDevices = [shareConfig] + } else { + print("No shared directory created. \(config.sharedDir ?? "none") was requested.") + } + try! virtualMachineConfiguration.validate() try! virtualMachineConfiguration.validateSaveRestoreSupport() diff --git a/tstest/tailmac/Swift/TailMac/TailMac.swift b/tstest/tailmac/Swift/TailMac/TailMac.swift index 6554d5deb..84aa5e498 100644 --- a/tstest/tailmac/Swift/TailMac/TailMac.swift +++ b/tstest/tailmac/Swift/TailMac/TailMac.swift @@ -95,6 +95,7 @@ extension Tailmac { extension Tailmac { struct Run: ParsableCommand { @Option(help: "The vm identifier") var id: String + @Option(help: "Optional share directory") var share: String? @Flag(help: "Tail the TailMac log output instead of returning immediatly") var tail mutating func run() { @@ -115,7 +116,12 @@ extension Tailmac { fatalError("Could not find Host.app at \(appPath). This must be co-located with the tailmac utility") } - process.arguments = ["run", "--id", id] + var args = ["run", "--id", id] + if let share { + args.append("--share") + args.append(share) + } + process.arguments = args do { process.standardOutput = stdOutPipe @@ -124,26 +130,18 @@ extension Tailmac { fatalError("Unable to launch the vm process") } - // This doesn't print until we exit which is not ideal, but at least we - // get the output if tail != 0 { + // (jonathan)TODO: How do we get the process output in real time? + // The child process only seems to flush to stdout on completion let outHandle = stdOutPipe.fileHandleForReading - - let queue = OperationQueue() - NotificationCenter.default.addObserver( - forName: NSNotification.Name.NSFileHandleDataAvailable, - object: outHandle, queue: queue) - { - notification -> Void in - let data = outHandle.availableData + outHandle.readabilityHandler = { handle in + let data = handle.availableData if data.count > 0 { if let str = String(data: data, encoding: String.Encoding.utf8) { print(str) } } - outHandle.waitForDataInBackgroundAndNotify() } - outHandle.waitForDataInBackgroundAndNotify() process.waitUntilExit() } } From 856ea2376b59df8f84f96119559d4273588a04ac Mon Sep 17 00:00:00 2001 From: Tim Walters Date: Wed, 23 Oct 2024 14:27:00 -0500 Subject: [PATCH 0091/1708] wgengine/magicsock: log home DERP changes with latency This adds additional logging on DERP home changes to allow better troubleshooting. Updates tailscale/corp#18095 Signed-off-by: Tim Walters --- wgengine/magicsock/derp.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index bfee02f6e..704ce3c4f 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -158,10 +158,10 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int) } else { connectedToControl = c.health.GetInPollNetMap() } + c.mu.Lock() + myDerp := c.myDerp + c.mu.Unlock() if !connectedToControl { - c.mu.Lock() - myDerp := c.myDerp - c.mu.Unlock() if myDerp != 0 { metricDERPHomeNoChangeNoControl.Add(1) return myDerp @@ -178,6 +178,11 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int) // one. preferredDERP = c.pickDERPFallback() } + if preferredDERP != myDerp { + c.logf( + "magicsock: home DERP changing from derp-%d [%dms] to derp-%d [%dms]", + c.myDerp, report.RegionLatency[myDerp].Milliseconds(), preferredDERP, report.RegionLatency[preferredDERP].Milliseconds()) + } if !c.setNearestDERP(preferredDERP) { preferredDERP = 0 } From 1103044598ac2897a3f2f6687dc9d2b3d23f7da5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 30 Oct 2024 05:45:31 -0500 Subject: [PATCH 0092/1708] cmd/k8s-operator,k8s-operator: add topology spread constraints to ProxyClass (#13959) Now when we have HA for egress proxies, it makes sense to support topology spread constraints that would allow users to define more complex topologies of how proxy Pods need to be deployed in relation with other Pods/across regions etc. Updates tailscale/tailscale#13406 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxyclasses.yaml | 176 ++++++++++++++++++ .../deploy/manifests/operator.yaml | 176 ++++++++++++++++++ cmd/k8s-operator/sts.go | 1 + cmd/k8s-operator/sts_test.go | 13 ++ k8s-operator/api.md | 1 + .../apis/v1alpha1/types_proxyclass.go | 4 + .../apis/v1alpha1/zz_generated.deepcopy.go | 7 + 7 files changed, 378 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 0fff30516..7086138c0 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1896,6 +1896,182 @@ spec: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. type: string + topologySpreadConstraints: + description: |- + Proxy Pod's topology spread constraints. + By default Tailscale Kubernetes operator does not apply any topology spread constraints. + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + type: array + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + type: object + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + type: array + items: + type: string + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + type: integer + format: int32 + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + type: integer + format: int32 + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 1a812b736..203a67066 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2323,6 +2323,182 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: |- + Proxy Pod's topology spread constraints. + By default Tailscale Kubernetes operator does not apply any topology spread constraints. + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object type: object tailscale: diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 6378a8263..e89b9c930 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -718,6 +718,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.NodeSelector = wantsPod.NodeSelector ss.Spec.Template.Spec.Affinity = wantsPod.Affinity ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations + ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints // Update containers. updateContainer := func(overlay *tsapi.Container, base corev1.Container) corev1.Container { diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index b2b2c8b93..7263c56c3 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -18,6 +18,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/types/ptr" @@ -73,6 +74,16 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + WhenUnsatisfiable: "DoNotSchedule", + TopologyKey: "kubernetes.io/hostname", + MaxSkew: 3, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, TailscaleContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -159,6 +170,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.NodeSelector = proxyClassAllOpts.Spec.StatefulSet.Pod.NodeSelector wantSS.Spec.Template.Spec.Affinity = proxyClassAllOpts.Spec.StatefulSet.Pod.Affinity wantSS.Spec.Template.Spec.Tolerations = proxyClassAllOpts.Spec.StatefulSet.Pod.Tolerations + wantSS.Spec.Template.Spec.TopologySpreadConstraints = proxyClassAllOpts.Spec.StatefulSet.Pod.TopologySpreadConstraints wantSS.Spec.Template.Spec.Containers[0].SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.SecurityContext wantSS.Spec.Template.Spec.InitContainers[0].SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleInitContainer.SecurityContext wantSS.Spec.Template.Spec.Containers[0].Resources = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.Resources @@ -201,6 +213,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.NodeSelector = proxyClassAllOpts.Spec.StatefulSet.Pod.NodeSelector wantSS.Spec.Template.Spec.Affinity = proxyClassAllOpts.Spec.StatefulSet.Pod.Affinity wantSS.Spec.Template.Spec.Tolerations = proxyClassAllOpts.Spec.StatefulSet.Pod.Tolerations + wantSS.Spec.Template.Spec.TopologySpreadConstraints = proxyClassAllOpts.Spec.StatefulSet.Pod.TopologySpreadConstraints wantSS.Spec.Template.Spec.Containers[0].SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.SecurityContext wantSS.Spec.Template.Spec.Containers[0].Resources = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.Resources wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: "foo", Value: "bar"}, {Name: "TS_USERSPACE", Value: "true"}, {Name: "bar"}}...) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index e8a6e248a..dae969516 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -381,6 +381,7 @@ _Appears in:_ | `nodeName` _string_ | Proxy Pod's node name.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector.
By default Tailscale Kubernetes operator does not apply any node
selector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
By default Tailscale Kubernetes operator does not apply any
tolerations.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
By default Tailscale Kubernetes operator does not apply any topology spread constraints.
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | #### ProxyClass diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 7f415bc34..0a224b796 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -154,7 +154,11 @@ type Pod struct { // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // Proxy Pod's topology spread constraints. + // By default Tailscale Kubernetes operator does not apply any topology spread constraints. + // https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` } type Metrics struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index ba4ff40e4..f53165b88 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -392,6 +392,13 @@ func (in *Pod) DeepCopyInto(out *Pod) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. From 2336c340c4fc72758a8e7bae15062fb78f98d895 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 18 Oct 2024 10:18:06 -0500 Subject: [PATCH 0093/1708] util/syspolicy: implement a syspolicy store that reads settings from environment variables In this PR, we implement (but do not use yet, pending #13727 review) a syspolicy/source.Store that reads policy settings from environment variables. It converts a CamelCase setting.Key, such as AuthKey or ExitNodeID, to a SCREAMING_SNAKE_CASE, TS_-prefixed environment variable name, such as TS_AUTH_KEY and TS_EXIT_NODE_ID. It then looks up the variable and attempts to parse it according to the expected value type. If the environment variable is not set, the policy setting is considered not configured in this store (the syspolicy package will still read it from other sources). Similarly, if the environment variable has an invalid value for the setting type, it won't be used (though the reported/logged error will differ). Updates #13193 Updates #12687 Signed-off-by: Nick Khyl --- util/syspolicy/internal/metrics/metrics.go | 2 +- util/syspolicy/setting/key.go | 2 +- util/syspolicy/source/env_policy_store.go | 159 ++++++++ .../syspolicy/source/env_policy_store_test.go | 354 ++++++++++++++++++ util/syspolicy/source/policy_store_windows.go | 6 +- 5 files changed, 518 insertions(+), 5 deletions(-) create mode 100644 util/syspolicy/source/env_policy_store.go create mode 100644 util/syspolicy/source/env_policy_store_test.go diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 2ea02278a..0a2aa1192 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -284,7 +284,7 @@ func SetHooksForTest(tb internal.TB, addMetric, setMetric metricFn) { } func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { - name := strings.ReplaceAll(string(key), setting.KeyPathSeparator, "_") + name := strings.ReplaceAll(string(key), string(setting.KeyPathSeparator), "_") return newMetric([]string{name, metricScopeName(scope), suffix}, typ) } diff --git a/util/syspolicy/setting/key.go b/util/syspolicy/setting/key.go index 406fde132..aa7606d36 100644 --- a/util/syspolicy/setting/key.go +++ b/util/syspolicy/setting/key.go @@ -10,4 +10,4 @@ package setting type Key string // KeyPathSeparator allows logical grouping of policy settings into categories. -const KeyPathSeparator = "/" +const KeyPathSeparator = '/' diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go new file mode 100644 index 000000000..2f07fffca --- /dev/null +++ b/util/syspolicy/source/env_policy_store.go @@ -0,0 +1,159 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "fmt" + "os" + "strconv" + "strings" + "unicode/utf8" + + "github.com/pkg/errors" + "tailscale.com/util/syspolicy/setting" +) + +var lookupEnv = os.LookupEnv // test hook + +var _ Store = (*EnvPolicyStore)(nil) + +// EnvPolicyStore is a [Store] that reads policy settings from environment variables. +type EnvPolicyStore struct{} + +// ReadString implements [Store]. +func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { + _, str, err := s.lookupSettingVariable(key) + if err != nil { + return "", err + } + return str, nil +} + +// ReadUInt64 implements [Store]. +func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { + name, str, err := s.lookupSettingVariable(key) + if err != nil { + return 0, err + } + if str == "" { + return 0, setting.ErrNotConfigured + } + value, err := strconv.ParseUint(str, 0, 64) + if err != nil { + return 0, fmt.Errorf("%s: %w: %q is not a valid uint64", name, setting.ErrTypeMismatch, str) + } + return value, nil +} + +// ReadBoolean implements [Store]. +func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { + name, str, err := s.lookupSettingVariable(key) + if err != nil { + return false, err + } + if str == "" { + return false, setting.ErrNotConfigured + } + value, err := strconv.ParseBool(str) + if err != nil { + return false, fmt.Errorf("%s: %w: %q is not a valid bool", name, setting.ErrTypeMismatch, str) + } + return value, nil +} + +// ReadStringArray implements [Store]. +func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { + _, str, err := s.lookupSettingVariable(key) + if err != nil || str == "" { + return nil, err + } + var dst int + res := strings.Split(str, ",") + for src := range res { + res[dst] = strings.TrimSpace(res[src]) + if res[dst] != "" { + dst++ + } + } + return res[0:dst], nil +} + +func (s *EnvPolicyStore) lookupSettingVariable(key setting.Key) (name, value string, err error) { + name, err = keyToEnvVarName(key) + if err != nil { + return "", "", err + } + value, ok := lookupEnv(name) + if !ok { + return name, "", setting.ErrNotConfigured + } + return name, value, nil +} + +var ( + errEmptyKey = errors.New("key must not be empty") + errInvalidKey = errors.New("key must consist of alphanumeric characters and slashes") +) + +// keyToEnvVarName returns the environment variable name for a given policy +// setting key, or an error if the key is invalid. It converts CamelCase keys into +// underscore-separated words and prepends the variable name with the TS prefix. +// For example: AuthKey => TS_AUTH_KEY, ExitNodeAllowLANAccess => TS_EXIT_NODE_ALLOW_LAN_ACCESS, etc. +// +// It's fine to use this in [EnvPolicyStore] without caching variable names since it's not a hot path. +// [EnvPolicyStore] is not a [Changeable] policy store, so the conversion will only happen once. +func keyToEnvVarName(key setting.Key) (string, error) { + if len(key) == 0 { + return "", errEmptyKey + } + + isLower := func(c byte) bool { return 'a' <= c && c <= 'z' } + isUpper := func(c byte) bool { return 'A' <= c && c <= 'Z' } + isLetter := func(c byte) bool { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') } + isDigit := func(c byte) bool { return '0' <= c && c <= '9' } + + words := make([]string, 0, 8) + words = append(words, "TS") + var currentWord strings.Builder + for i := 0; i < len(key); i++ { + c := key[i] + if c >= utf8.RuneSelf { + return "", errInvalidKey + } + + var split bool + switch { + case isLower(c): + c -= 'a' - 'A' // make upper + split = currentWord.Len() > 0 && !isLetter(key[i-1]) + case isUpper(c): + if currentWord.Len() > 0 { + prevUpper := isUpper(key[i-1]) + nextLower := i < len(key)-1 && isLower(key[i+1]) + split = !prevUpper || nextLower // split on case transition + } + case isDigit(c): + split = currentWord.Len() > 0 && !isDigit(key[i-1]) + case c == setting.KeyPathSeparator: + words = append(words, currentWord.String()) + currentWord.Reset() + continue + default: + return "", errInvalidKey + } + + if split { + words = append(words, currentWord.String()) + currentWord.Reset() + } + + currentWord.WriteByte(c) + } + + if currentWord.Len() > 0 { + words = append(words, currentWord.String()) + } + + return strings.Join(words, "_"), nil +} diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go new file mode 100644 index 000000000..364a6104d --- /dev/null +++ b/util/syspolicy/source/env_policy_store_test.go @@ -0,0 +1,354 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "cmp" + "errors" + "math" + "reflect" + "strconv" + "testing" + + "tailscale.com/util/syspolicy/setting" +) + +func TestKeyToVariableName(t *testing.T) { + tests := []struct { + name string + key setting.Key + want string + wantErr error + }{ + { + name: "empty", + key: "", + wantErr: errEmptyKey, + }, + { + name: "lowercase", + key: "tailnet", + want: "TS_TAILNET", + }, + { + name: "CamelCase", + key: "AuthKey", + want: "TS_AUTH_KEY", + }, + { + name: "LongerCamelCase", + key: "ManagedByOrganizationName", + want: "TS_MANAGED_BY_ORGANIZATION_NAME", + }, + { + name: "UPPERCASE", + key: "UPPERCASE", + want: "TS_UPPERCASE", + }, + { + name: "WithAbbrev/Front", + key: "DNSServer", + want: "TS_DNS_SERVER", + }, + { + name: "WithAbbrev/Middle", + key: "ExitNodeAllowLANAccess", + want: "TS_EXIT_NODE_ALLOW_LAN_ACCESS", + }, + { + name: "WithAbbrev/Back", + key: "ExitNodeID", + want: "TS_EXIT_NODE_ID", + }, + { + name: "WithDigits/Single/Front", + key: "0TestKey", + want: "TS_0_TEST_KEY", + }, + { + name: "WithDigits/Multi/Front", + key: "64TestKey", + want: "TS_64_TEST_KEY", + }, + { + name: "WithDigits/Single/Middle", + key: "Test0Key", + want: "TS_TEST_0_KEY", + }, + { + name: "WithDigits/Multi/Middle", + key: "Test64Key", + want: "TS_TEST_64_KEY", + }, + { + name: "WithDigits/Single/Back", + key: "TestKey0", + want: "TS_TEST_KEY_0", + }, + { + name: "WithDigits/Multi/Back", + key: "TestKey64", + want: "TS_TEST_KEY_64", + }, + { + name: "WithDigits/Multi/Back", + key: "TestKey64", + want: "TS_TEST_KEY_64", + }, + { + name: "WithPathSeparators/Single", + key: "Key/Subkey", + want: "TS_KEY_SUBKEY", + }, + { + name: "WithPathSeparators/Multi", + key: "Root/Level1/Level2", + want: "TS_ROOT_LEVEL_1_LEVEL_2", + }, + { + name: "Mixed", + key: "Network/DNSServer/IPAddress", + want: "TS_NETWORK_DNS_SERVER_IP_ADDRESS", + }, + { + name: "Non-Alphanumeric/NonASCII/1", + key: "ж", + wantErr: errInvalidKey, + }, + { + name: "Non-Alphanumeric/NonASCII/2", + key: "KeyжName", + wantErr: errInvalidKey, + }, + { + name: "Non-Alphanumeric/Space", + key: "Key Name", + wantErr: errInvalidKey, + }, + { + name: "Non-Alphanumeric/Punct", + key: "Key!Name", + wantErr: errInvalidKey, + }, + { + name: "Non-Alphanumeric/Backslash", + key: `Key\Name`, + wantErr: errInvalidKey, + }, + } + for _, tt := range tests { + t.Run(cmp.Or(tt.name, string(tt.key)), func(t *testing.T) { + got, err := keyToEnvVarName(tt.key) + checkError(t, err, tt.wantErr, true) + + if got != tt.want { + t.Fatalf("got %q; want %q", got, tt.want) + } + }) + } +} + +func TestEnvPolicyStore(t *testing.T) { + blankEnv := func(string) (string, bool) { return "", false } + makeEnv := func(wantName, value string) func(string) (string, bool) { + return func(gotName string) (string, bool) { + if gotName != wantName { + return "", false + } + return value, true + } + } + tests := []struct { + name string + key setting.Key + lookup func(string) (string, bool) + want any + wantErr error + }{ + { + name: "NotConfigured/String", + key: "AuthKey", + lookup: blankEnv, + wantErr: setting.ErrNotConfigured, + want: "", + }, + { + name: "Configured/String/Empty", + key: "AuthKey", + lookup: makeEnv("TS_AUTH_KEY", ""), + want: "", + }, + { + name: "Configured/String/NonEmpty", + key: "AuthKey", + lookup: makeEnv("TS_AUTH_KEY", "ABC123"), + want: "ABC123", + }, + { + name: "NotConfigured/UInt64", + key: "IntegerSetting", + lookup: blankEnv, + wantErr: setting.ErrNotConfigured, + want: uint64(0), + }, + { + name: "Configured/UInt64/Empty", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", ""), + wantErr: setting.ErrNotConfigured, + want: uint64(0), + }, + { + name: "Configured/UInt64/Zero", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", "0"), + want: uint64(0), + }, + { + name: "Configured/UInt64/NonZero", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", "12345"), + want: uint64(12345), + }, + { + name: "Configured/UInt64/MaxUInt64", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", strconv.FormatUint(math.MaxUint64, 10)), + want: uint64(math.MaxUint64), + }, + { + name: "Configured/UInt64/Negative", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", "-1"), + wantErr: setting.ErrTypeMismatch, + want: uint64(0), + }, + { + name: "Configured/UInt64/Hex", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", "0xDEADBEEF"), + want: uint64(0xDEADBEEF), + }, + { + name: "NotConfigured/Bool", + key: "LogSCMInteractions", + lookup: blankEnv, + wantErr: setting.ErrNotConfigured, + want: false, + }, + { + name: "Configured/Bool/Empty", + key: "LogSCMInteractions", + lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", ""), + wantErr: setting.ErrNotConfigured, + want: false, + }, + { + name: "Configured/Bool/True", + key: "LogSCMInteractions", + lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "true"), + want: true, + }, + { + name: "Configured/Bool/False", + key: "LogSCMInteractions", + lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "False"), + want: false, + }, + { + name: "Configured/Bool/1", + key: "LogSCMInteractions", + lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "1"), + want: true, + }, + { + name: "Configured/Bool/0", + key: "LogSCMInteractions", + lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "0"), + want: false, + }, + { + name: "Configured/Bool/Invalid", + key: "IntegerSetting", + lookup: makeEnv("TS_INTEGER_SETTING", "NotABool"), + wantErr: setting.ErrTypeMismatch, + want: false, + }, + { + name: "NotConfigured/StringArray", + key: "AllowedSuggestedExitNodes", + lookup: blankEnv, + wantErr: setting.ErrNotConfigured, + want: []string(nil), + }, + { + name: "Configured/StringArray/Empty", + key: "AllowedSuggestedExitNodes", + lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", ""), + want: []string(nil), + }, + { + name: "Configured/StringArray/Spaces", + key: "AllowedSuggestedExitNodes", + lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", " \t "), + want: []string{}, + }, + { + name: "Configured/StringArray/Single", + key: "AllowedSuggestedExitNodes", + lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", "NodeA"), + want: []string{"NodeA"}, + }, + { + name: "Configured/StringArray/Multi", + key: "AllowedSuggestedExitNodes", + lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,NodeB,NodeC"), + want: []string{"NodeA", "NodeB", "NodeC"}, + }, + { + name: "Configured/StringArray/WithBlank", + key: "AllowedSuggestedExitNodes", + lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,\t,, ,NodeB"), + want: []string{"NodeA", "NodeB"}, + }, + } + for _, tt := range tests { + t.Run(cmp.Or(tt.name, string(tt.key)), func(t *testing.T) { + oldLookupEnv := lookupEnv + t.Cleanup(func() { lookupEnv = oldLookupEnv }) + lookupEnv = tt.lookup + + var got any + var err error + var store EnvPolicyStore + switch tt.want.(type) { + case string: + got, err = store.ReadString(tt.key) + case uint64: + got, err = store.ReadUInt64(tt.key) + case bool: + got, err = store.ReadBoolean(tt.key) + case []string: + got, err = store.ReadStringArray(tt.key) + } + checkError(t, err, tt.wantErr, false) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %v; want %v", got, tt.want) + } + }) + } +} + +func checkError(tb testing.TB, got, want error, fatal bool) { + tb.Helper() + f := tb.Errorf + if fatal { + f = tb.Fatalf + } + if (want == nil && got != nil) || + (want != nil && got == nil) || + (want != nil && got != nil && !errors.Is(got, want) && want.Error() != got.Error()) { + f("gotErr: %v; wantErr: %v", got, want) + } +} diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go index f526b4ce1..86e2254e0 100644 --- a/util/syspolicy/source/policy_store_windows.go +++ b/util/syspolicy/source/policy_store_windows.go @@ -319,9 +319,9 @@ func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error // If there are no [setting.KeyPathSeparator]s in the key, the policy setting value // is meant to be stored directly under {HKLM,HKCU}\Software\Policies\Tailscale. func splitSettingKey(key setting.Key) (path, valueName string) { - if idx := strings.LastIndex(string(key), setting.KeyPathSeparator); idx != -1 { - path = strings.ReplaceAll(string(key[:idx]), setting.KeyPathSeparator, `\`) - valueName = string(key[idx+len(setting.KeyPathSeparator):]) + if idx := strings.LastIndexByte(string(key), setting.KeyPathSeparator); idx != -1 { + path = strings.ReplaceAll(string(key[:idx]), string(setting.KeyPathSeparator), `\`) + valueName = string(key[idx+1:]) return path, valueName } return "", string(key) From 2cc1100d242df512612781187eaa898d0de133dc Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 30 Oct 2024 12:01:20 -0500 Subject: [PATCH 0094/1708] util/syspolicy/source: use errors instead of github.com/pkg/errors Updates #12687 Signed-off-by: Nick Khyl --- util/syspolicy/source/env_policy_store.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go index 2f07fffca..61065ceff 100644 --- a/util/syspolicy/source/env_policy_store.go +++ b/util/syspolicy/source/env_policy_store.go @@ -4,13 +4,13 @@ package source import ( + "errors" "fmt" "os" "strconv" "strings" "unicode/utf8" - "github.com/pkg/errors" "tailscale.com/util/syspolicy/setting" ) From 2a2228f97b625b20f5d62092b9f17730078a7fb4 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 29 Oct 2024 11:24:46 -0500 Subject: [PATCH 0095/1708] util/syspolicy/setting: make setting.RawItem JSON-marshallable We add setting.RawValue, a new type that facilitates unmarshalling JSON numbers and arrays as uint64 and []string (instead of float64 and []any) for policy setting values. We then use it to make setting.RawItem JSON-marshallable and update the tests. Updates #12687 Signed-off-by: Nick Khyl --- types/opt/value.go | 2 +- util/syspolicy/setting/raw_item.go | 123 ++++++++++-- util/syspolicy/setting/raw_item_test.go | 101 ++++++++++ util/syspolicy/setting/snapshot_test.go | 251 ++++++++++++------------ 4 files changed, 336 insertions(+), 141 deletions(-) create mode 100644 util/syspolicy/setting/raw_item_test.go diff --git a/types/opt/value.go b/types/opt/value.go index 54fab7a53..b47b03c81 100644 --- a/types/opt/value.go +++ b/types/opt/value.go @@ -36,7 +36,7 @@ func ValueOf[T any](v T) Value[T] { } // String implements [fmt.Stringer]. -func (o *Value[T]) String() string { +func (o Value[T]) String() string { if !o.set { return fmt.Sprintf("(empty[%T])", o.value) } diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index 30480d892..cf46e54b7 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -5,7 +5,11 @@ package setting import ( "fmt" + "reflect" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/opt" "tailscale.com/types/structs" ) @@ -17,10 +21,15 @@ import ( // or converted from strings, these setting types predate the typed policy // hierarchies, and must be supported at this layer. type RawItem struct { - _ structs.Incomparable - value any - err *ErrorText - origin *Origin // or nil + _ structs.Incomparable + data rawItemJSON +} + +// rawItemJSON holds JSON-marshallable data for [RawItem]. +type rawItemJSON struct { + Value RawValue `json:",omitzero"` + Error *ErrorText `json:",omitzero"` // or nil + Origin *Origin `json:",omitzero"` // or nil } // RawItemOf returns a [RawItem] with the specified value. @@ -30,20 +39,20 @@ func RawItemOf(value any) RawItem { // RawItemWith returns a [RawItem] with the specified value, error and origin. func RawItemWith(value any, err *ErrorText, origin *Origin) RawItem { - return RawItem{value: value, err: err, origin: origin} + return RawItem{data: rawItemJSON{Value: RawValue{opt.ValueOf(value)}, Error: err, Origin: origin}} } // Value returns the value of the policy setting, or nil if the policy setting // is not configured, or an error occurred while reading it. func (i RawItem) Value() any { - return i.value + return i.data.Value.Get() } // Error returns the error that occurred when reading the policy setting, // or nil if no error occurred. func (i RawItem) Error() error { - if i.err != nil { - return i.err + if i.data.Error != nil { + return i.data.Error } return nil } @@ -51,17 +60,103 @@ func (i RawItem) Error() error { // Origin returns an optional [Origin] indicating where the policy setting is // configured. func (i RawItem) Origin() *Origin { - return i.origin + return i.data.Origin } // String implements [fmt.Stringer]. func (i RawItem) String() string { var suffix string - if i.origin != nil { - suffix = fmt.Sprintf(" - {%v}", i.origin) + if i.data.Origin != nil { + suffix = fmt.Sprintf(" - {%v}", i.data.Origin) + } + if i.data.Error != nil { + return fmt.Sprintf("Error{%q}%s", i.data.Error.Error(), suffix) + } + return fmt.Sprintf("%v%s", i.data.Value.Value, suffix) +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (i RawItem) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return jsonv2.MarshalEncode(out, &i.data, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (i *RawItem) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + return jsonv2.UnmarshalDecode(in, &i.data, opts) +} + +// MarshalJSON implements [json.Marshaler]. +func (i RawItem) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(i) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (i *RawItem) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, i) // uses UnmarshalJSONV2 +} + +// RawValue represents a raw policy setting value read from a policy store. +// It is JSON-marshallable and facilitates unmarshalling of JSON values +// into corresponding policy setting types, with special handling for JSON numbers +// (unmarshalled as float64) and JSON string arrays (unmarshalled as []string). +// See also [RawValue.UnmarshalJSONV2]. +type RawValue struct { + opt.Value[any] +} + +// RawValueType is a constraint that permits raw setting value types. +type RawValueType interface { + bool | uint64 | string | []string +} + +// RawValueOf returns a new [RawValue] holding the specified value. +func RawValueOf[T RawValueType](v T) RawValue { + return RawValue{opt.ValueOf[any](v)} +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (v RawValue) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return jsonv2.MarshalEncode(out, v.Value, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2] by attempting to unmarshal +// a JSON value as one of the supported policy setting value types (bool, string, uint64, or []string), +// based on the JSON value type. It fails if the JSON value is an object, if it's a JSON number that +// cannot be represented as a uint64, or if a JSON array contains anything other than strings. +func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + var valPtr any + switch k := in.PeekKind(); k { + case 't', 'f': + valPtr = new(bool) + case '"': + valPtr = new(string) + case '0': + valPtr = new(uint64) // unmarshal JSON numbers as uint64 + case '[', 'n': + valPtr = new([]string) // unmarshal arrays as string slices + case '{': + return fmt.Errorf("unexpected token: %v", k) + default: + panic("unreachable") } - if i.err != nil { - return fmt.Sprintf("Error{%q}%s", i.err.Error(), suffix) + if err := jsonv2.UnmarshalDecode(in, valPtr, opts); err != nil { + v.Value.Clear() + return err } - return fmt.Sprintf("%v%s", i.value, suffix) + value := reflect.ValueOf(valPtr).Elem().Interface() + v.Value = opt.ValueOf(value) + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (v RawValue) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(v) // uses MarshalJSONV2 } + +// UnmarshalJSON implements [json.Unmarshaler]. +func (v *RawValue) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, v) // uses UnmarshalJSONV2 +} + +// RawValues is a map of keyed setting values that can be read from a JSON. +type RawValues map[Key]RawValue diff --git a/util/syspolicy/setting/raw_item_test.go b/util/syspolicy/setting/raw_item_test.go new file mode 100644 index 000000000..05562d78c --- /dev/null +++ b/util/syspolicy/setting/raw_item_test.go @@ -0,0 +1,101 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "math" + "reflect" + "strconv" + "testing" + + jsonv2 "github.com/go-json-experiment/json" +) + +func TestMarshalUnmarshalRawValue(t *testing.T) { + tests := []struct { + name string + json string + want RawValue + wantErr bool + }{ + { + name: "Bool/True", + json: `true`, + want: RawValueOf(true), + }, + { + name: "Bool/False", + json: `false`, + want: RawValueOf(false), + }, + { + name: "String/Empty", + json: `""`, + want: RawValueOf(""), + }, + { + name: "String/NonEmpty", + json: `"Test"`, + want: RawValueOf("Test"), + }, + { + name: "StringSlice/Null", + json: `null`, + want: RawValueOf([]string(nil)), + }, + { + name: "StringSlice/Empty", + json: `[]`, + want: RawValueOf([]string{}), + }, + { + name: "StringSlice/NonEmpty", + json: `["A", "B", "C"]`, + want: RawValueOf([]string{"A", "B", "C"}), + }, + { + name: "StringSlice/NonStrings", + json: `[1, 2, 3]`, + wantErr: true, + }, + { + name: "Number/Integer/0", + json: `0`, + want: RawValueOf(uint64(0)), + }, + { + name: "Number/Integer/1", + json: `1`, + want: RawValueOf(uint64(1)), + }, + { + name: "Number/Integer/MaxUInt64", + json: strconv.FormatUint(math.MaxUint64, 10), + want: RawValueOf(uint64(math.MaxUint64)), + }, + { + name: "Number/Integer/Negative", + json: `-1`, + wantErr: true, + }, + { + name: "Object", + json: `{}`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got RawValue + gotErr := jsonv2.Unmarshal([]byte(tt.json), &got) + if (gotErr != nil) != tt.wantErr { + t.Fatalf("Error: got %v; want %v", gotErr, tt.wantErr) + } + + if !tt.wantErr && !reflect.DeepEqual(got, tt.want) { + t.Fatalf("Value: got %v; want %v", got, tt.want) + } + }) + } +} diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index e198d4a58..297685e29 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -30,134 +30,134 @@ func TestMergeSnapshots(t *testing.T) { name: "first-nil", s1: nil, s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }), }, { name: "first-empty", s1: NewSnapshot(map[Key]RawItem{}), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), }, { name: "second-nil", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }), s2: nil, want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }), }, { name: "second-empty", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), s2: NewSnapshot(map[Key]RawItem{}), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), }, { name: "no-conflicts", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), s2: NewSnapshot(map[Key]RawItem{ - "Setting4": {value: 2 * time.Hour}, - "Setting5": {value: VisibleByPolicy}, - "Setting6": {value: ShowChoiceByPolicy}, + "Setting4": RawItemOf(2 * time.Hour), + "Setting5": RawItemOf(VisibleByPolicy), + "Setting6": RawItemOf(ShowChoiceByPolicy), }), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, - "Setting4": {value: 2 * time.Hour}, - "Setting5": {value: VisibleByPolicy}, - "Setting6": {value: ShowChoiceByPolicy}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), + "Setting4": RawItemOf(2 * time.Hour), + "Setting5": RawItemOf(VisibleByPolicy), + "Setting6": RawItemOf(ShowChoiceByPolicy), }), }, { name: "with-conflicts", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 456}, - "Setting3": {value: false}, - "Setting4": {value: 2 * time.Hour}, + "Setting1": RawItemOf(456), + "Setting3": RawItemOf(false), + "Setting4": RawItemOf(2 * time.Hour), }), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 456}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, - "Setting4": {value: 2 * time.Hour}, + "Setting1": RawItemOf(456), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), + "Setting4": RawItemOf(2 * time.Hour), }), }, { name: "with-scope-first-wins", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }, DeviceScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 456}, - "Setting3": {value: false}, - "Setting4": {value: 2 * time.Hour}, + "Setting1": RawItemOf(456), + "Setting3": RawItemOf(false), + "Setting4": RawItemOf(2 * time.Hour), }, CurrentUserScope), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, - "Setting4": {value: 2 * time.Hour}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), + "Setting4": RawItemOf(2 * time.Hour), }, CurrentUserScope), }, { name: "with-scope-second-wins", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }, CurrentUserScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 456}, - "Setting3": {value: false}, - "Setting4": {value: 2 * time.Hour}, + "Setting1": RawItemOf(456), + "Setting3": RawItemOf(false), + "Setting4": RawItemOf(2 * time.Hour), }, DeviceScope), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 456}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, - "Setting4": {value: 2 * time.Hour}, + "Setting1": RawItemOf(456), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), + "Setting4": RawItemOf(2 * time.Hour), }, CurrentUserScope), }, { @@ -170,28 +170,27 @@ func TestMergeSnapshots(t *testing.T) { name: "with-scope-first-empty", s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}}, - DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)), + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true)}, DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }, CurrentUserScope, NewNamedOrigin("TestPolicy", DeviceScope)), }, { name: "with-scope-second-empty", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }, CurrentUserScope), s2: NewSnapshot(map[Key]RawItem{}), want: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }, CurrentUserScope), }, } @@ -244,9 +243,9 @@ func TestSnapshotEqual(t *testing.T) { name: "first-nil", s1: nil, s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), wantEqual: false, wantEqualItems: false, @@ -255,9 +254,9 @@ func TestSnapshotEqual(t *testing.T) { name: "first-empty", s1: NewSnapshot(map[Key]RawItem{}), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), wantEqual: false, wantEqualItems: false, @@ -265,9 +264,9 @@ func TestSnapshotEqual(t *testing.T) { { name: "second-nil", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: true}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(true), }), s2: nil, wantEqual: false, @@ -276,9 +275,9 @@ func TestSnapshotEqual(t *testing.T) { { name: "second-empty", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), s2: NewSnapshot(map[Key]RawItem{}), wantEqual: false, @@ -287,14 +286,14 @@ func TestSnapshotEqual(t *testing.T) { { name: "same-items-same-order-no-scope", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }), wantEqual: true, wantEqualItems: true, @@ -302,14 +301,14 @@ func TestSnapshotEqual(t *testing.T) { { name: "same-items-same-order-same-scope", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }, DeviceScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }, DeviceScope), wantEqual: true, wantEqualItems: true, @@ -317,14 +316,14 @@ func TestSnapshotEqual(t *testing.T) { { name: "same-items-different-order-same-scope", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }, DeviceScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting3": {value: false}, - "Setting1": {value: 123}, - "Setting2": {value: "String"}, + "Setting3": RawItemOf(false), + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), }, DeviceScope), wantEqual: true, wantEqualItems: true, @@ -332,14 +331,14 @@ func TestSnapshotEqual(t *testing.T) { { name: "same-items-same-order-different-scope", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }, DeviceScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }, CurrentUserScope), wantEqual: false, wantEqualItems: true, @@ -347,14 +346,14 @@ func TestSnapshotEqual(t *testing.T) { { name: "different-items-same-scope", s1: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 123}, - "Setting2": {value: "String"}, - "Setting3": {value: false}, + "Setting1": RawItemOf(123), + "Setting2": RawItemOf("String"), + "Setting3": RawItemOf(false), }, DeviceScope), s2: NewSnapshot(map[Key]RawItem{ - "Setting4": {value: 2 * time.Hour}, - "Setting5": {value: VisibleByPolicy}, - "Setting6": {value: ShowChoiceByPolicy}, + "Setting4": RawItemOf(2 * time.Hour), + "Setting5": RawItemOf(VisibleByPolicy), + "Setting6": RawItemOf(ShowChoiceByPolicy), }, DeviceScope), wantEqual: false, wantEqualItems: false, @@ -401,9 +400,9 @@ func TestSnapshotString(t *testing.T) { { name: "non-empty", snapshot: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 2 * time.Hour}, - "Setting2": {value: VisibleByPolicy}, - "Setting3": {value: ShowChoiceByPolicy}, + "Setting1": RawItemOf(2 * time.Hour), + "Setting2": RawItemOf(VisibleByPolicy), + "Setting3": RawItemOf(ShowChoiceByPolicy), }, NewNamedOrigin("Test Policy", DeviceScope)), wantString: `{Test Policy (Device)} Setting1 = 2h0m0s @@ -413,14 +412,14 @@ Setting3 = user-decides`, { name: "non-empty-with-item-origin", snapshot: NewSnapshot(map[Key]RawItem{ - "Setting1": {value: 42, origin: NewNamedOrigin("Test Policy", DeviceScope)}, + "Setting1": RawItemWith(42, nil, NewNamedOrigin("Test Policy", DeviceScope)), }), wantString: `Setting1 = 42 - {Test Policy (Device)}`, }, { name: "non-empty-with-item-error", snapshot: NewSnapshot(map[Key]RawItem{ - "Setting1": {err: NewErrorText("bang!")}, + "Setting1": RawItemWith(nil, NewErrorText("bang!"), nil), }), wantString: `Setting1 = Error{"bang!"}`, }, From 540e4c83d08ddfc506db35b27595fd818c14199c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 29 Oct 2024 11:29:02 -0500 Subject: [PATCH 0096/1708] util/syspolicy/setting: make setting.Snapshot JSON-marshallable We make setting.Snapshot JSON-marshallable in preparation for returning it from the LocalAPI. Updates #12687 Signed-off-by: Nick Khyl --- util/syspolicy/setting/snapshot.go | 45 ++++++++ util/syspolicy/setting/snapshot_test.go | 135 ++++++++++++++++++++++++ 2 files changed, 180 insertions(+) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 512bc487c..0af2bae0f 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -4,11 +4,14 @@ package setting import ( + "errors" "iter" "maps" "slices" "strings" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" xmaps "golang.org/x/exp/maps" "tailscale.com/util/deephash" ) @@ -65,6 +68,9 @@ func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) { // Equal reports whether s and s2 are equal. func (s *Snapshot) Equal(s2 *Snapshot) bool { + if s == s2 { + return true + } if !s.EqualItems(s2) { return false } @@ -135,6 +141,45 @@ func (s *Snapshot) String() string { return sb.String() } +// snapshotJSON holds JSON-marshallable data for [Snapshot]. +type snapshotJSON struct { + Summary Summary `json:",omitzero"` + Settings map[Key]RawItem `json:",omitempty"` +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (s *Snapshot) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + data := &snapshotJSON{} + if s != nil { + data.Summary = s.summary + data.Settings = s.m + } + return jsonv2.MarshalEncode(out, data, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (s *Snapshot) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + if s == nil { + return errors.New("s must not be nil") + } + data := &snapshotJSON{} + if err := jsonv2.UnmarshalDecode(in, data, opts); err != nil { + return err + } + *s = Snapshot{m: data.Settings, sig: deephash.Hash(&data.Settings), summary: data.Summary} + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (s *Snapshot) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(s) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (s *Snapshot) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 +} + // MergeSnapshots returns a [Snapshot] that contains all [RawItem]s // from snapshot1 and snapshot2 and the [Summary] with the narrower [PolicyScope]. // If there's a conflict between policy settings in the two snapshots, diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index 297685e29..d41b362f0 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -4,8 +4,13 @@ package setting import ( + "cmp" + "encoding/json" "testing" "time" + + jsonv2 "github.com/go-json-experiment/json" + "tailscale.com/util/syspolicy/internal" ) func TestMergeSnapshots(t *testing.T) { @@ -432,3 +437,133 @@ Setting3 = user-decides`, }) } } + +func TestMarshalUnmarshalSnapshot(t *testing.T) { + tests := []struct { + name string + snapshot *Snapshot + wantJSON string + wantBack *Snapshot + }{ + { + name: "Nil", + snapshot: (*Snapshot)(nil), + wantJSON: "null", + wantBack: NewSnapshot(nil), + }, + { + name: "Zero", + snapshot: &Snapshot{}, + wantJSON: "{}", + }, + { + name: "Bool/True", + snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(true)}), + wantJSON: `{"Settings": {"BoolPolicy": {"Value": true}}}`, + }, + { + name: "Bool/False", + snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(false)}), + wantJSON: `{"Settings": {"BoolPolicy": {"Value": false}}}`, + }, + { + name: "String/Non-Empty", + snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), + wantJSON: `{"Settings": {"StringPolicy": {"Value": "StringValue"}}}`, + }, + { + name: "String/Empty", + snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("")}), + wantJSON: `{"Settings": {"StringPolicy": {"Value": ""}}}`, + }, + { + name: "Integer/NonZero", + snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), + wantJSON: `{"Settings": {"IntPolicy": {"Value": 42}}}`, + }, + { + name: "Integer/Zero", + snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), + wantJSON: `{"Settings": {"IntPolicy": {"Value": 0}}}`, + }, + { + name: "String-List", + snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), + wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`, + }, + { + name: "Empty/With-Summary", + snapshot: NewSnapshot( + map[Key]RawItem{}, + SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), + ), + wantJSON: `{"Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"}}`, + }, + { + name: "Setting/With-Summary", + snapshot: NewSnapshot( + map[Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, + SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), + ), + wantJSON: `{ + "Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"}, + "Settings": {"PolicySetting": {"Value": 42}} + }`, + }, + { + name: "Settings/With-Origins", + snapshot: NewSnapshot( + map[Key]RawItem{ + "SettingA": RawItemWith(uint64(42), nil, NewNamedOrigin("SourceA", DeviceScope)), + "SettingB": RawItemWith("B", nil, NewNamedOrigin("SourceB", CurrentProfileScope)), + "SettingC": RawItemWith(true, nil, NewNamedOrigin("SourceC", CurrentUserScope)), + }, + ), + wantJSON: `{ + "Settings": { + "SettingA": {"Value": 42, "Origin": {"Name": "SourceA", "Scope": "Device"}}, + "SettingB": {"Value": "B", "Origin": {"Name": "SourceB", "Scope": "Profile"}}, + "SettingC": {"Value": true, "Origin": {"Name": "SourceC", "Scope": "User"}} + } + }`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + doTest := func(t *testing.T, useJSONv2 bool) { + var gotJSON []byte + var err error + if useJSONv2 { + gotJSON, err = jsonv2.Marshal(tt.snapshot) + } else { + gotJSON, err = json.Marshal(tt.snapshot) + } + if err != nil { + t.Fatal(err) + } + + if got, want, equal := internal.EqualJSONForTest(t, gotJSON, []byte(tt.wantJSON)); !equal { + t.Errorf("JSON: got %s; want %s", got, want) + } + + gotBack := &Snapshot{} + if useJSONv2 { + err = jsonv2.Unmarshal(gotJSON, &gotBack) + } else { + err = json.Unmarshal(gotJSON, &gotBack) + } + if err != nil { + t.Fatal(err) + } + + if wantBack := cmp.Or(tt.wantBack, tt.snapshot); !gotBack.Equal(wantBack) { + t.Errorf("Snapshot: got %+v; want %+v", gotBack, wantBack) + } + } + + t.Run("json", func(t *testing.T) { doTest(t, false) }) + t.Run("jsonv2", func(t *testing.T) { doTest(t, true) }) + }) + } +} From f81348a16b6dd8705cd75379daf3b7490185e841 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 30 Oct 2024 09:48:12 -0700 Subject: [PATCH 0097/1708] util/syspolicy/source: put EnvPolicyStore env keys in their own namespace ... all prefixed with TS_DEBUGSYSPOLICY_*. Updates #13193 Updates #12687 Updates #13855 Change-Id: Ia8024946f53e2b3afda4456a7bb85bbcf6d12bfc Signed-off-by: Brad Fitzpatrick --- util/syspolicy/source/env_policy_store.go | 2 +- .../syspolicy/source/env_policy_store_test.go | 85 ++++++++++--------- 2 files changed, 46 insertions(+), 41 deletions(-) diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go index 61065ceff..299132b4e 100644 --- a/util/syspolicy/source/env_policy_store.go +++ b/util/syspolicy/source/env_policy_store.go @@ -114,7 +114,7 @@ func keyToEnvVarName(key setting.Key) (string, error) { isDigit := func(c byte) bool { return '0' <= c && c <= '9' } words := make([]string, 0, 8) - words = append(words, "TS") + words = append(words, "TS_DEBUGSYSPOLICY") var currentWord strings.Builder for i := 0; i < len(key); i++ { c := key[i] diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go index 364a6104d..9eacf6378 100644 --- a/util/syspolicy/source/env_policy_store_test.go +++ b/util/syspolicy/source/env_policy_store_test.go @@ -14,11 +14,11 @@ import ( "tailscale.com/util/syspolicy/setting" ) -func TestKeyToVariableName(t *testing.T) { +func TestKeyToEnvVarName(t *testing.T) { tests := []struct { name string key setting.Key - want string + want string // suffix after "TS_DEBUGSYSPOLICY_" wantErr error }{ { @@ -29,87 +29,87 @@ func TestKeyToVariableName(t *testing.T) { { name: "lowercase", key: "tailnet", - want: "TS_TAILNET", + want: "TAILNET", }, { name: "CamelCase", key: "AuthKey", - want: "TS_AUTH_KEY", + want: "AUTH_KEY", }, { name: "LongerCamelCase", key: "ManagedByOrganizationName", - want: "TS_MANAGED_BY_ORGANIZATION_NAME", + want: "MANAGED_BY_ORGANIZATION_NAME", }, { name: "UPPERCASE", key: "UPPERCASE", - want: "TS_UPPERCASE", + want: "UPPERCASE", }, { name: "WithAbbrev/Front", key: "DNSServer", - want: "TS_DNS_SERVER", + want: "DNS_SERVER", }, { name: "WithAbbrev/Middle", key: "ExitNodeAllowLANAccess", - want: "TS_EXIT_NODE_ALLOW_LAN_ACCESS", + want: "EXIT_NODE_ALLOW_LAN_ACCESS", }, { name: "WithAbbrev/Back", key: "ExitNodeID", - want: "TS_EXIT_NODE_ID", + want: "EXIT_NODE_ID", }, { name: "WithDigits/Single/Front", key: "0TestKey", - want: "TS_0_TEST_KEY", + want: "0_TEST_KEY", }, { name: "WithDigits/Multi/Front", key: "64TestKey", - want: "TS_64_TEST_KEY", + want: "64_TEST_KEY", }, { name: "WithDigits/Single/Middle", key: "Test0Key", - want: "TS_TEST_0_KEY", + want: "TEST_0_KEY", }, { name: "WithDigits/Multi/Middle", key: "Test64Key", - want: "TS_TEST_64_KEY", + want: "TEST_64_KEY", }, { name: "WithDigits/Single/Back", key: "TestKey0", - want: "TS_TEST_KEY_0", + want: "TEST_KEY_0", }, { name: "WithDigits/Multi/Back", key: "TestKey64", - want: "TS_TEST_KEY_64", + want: "TEST_KEY_64", }, { name: "WithDigits/Multi/Back", key: "TestKey64", - want: "TS_TEST_KEY_64", + want: "TEST_KEY_64", }, { name: "WithPathSeparators/Single", key: "Key/Subkey", - want: "TS_KEY_SUBKEY", + want: "KEY_SUBKEY", }, { name: "WithPathSeparators/Multi", key: "Root/Level1/Level2", - want: "TS_ROOT_LEVEL_1_LEVEL_2", + want: "ROOT_LEVEL_1_LEVEL_2", }, { name: "Mixed", key: "Network/DNSServer/IPAddress", - want: "TS_NETWORK_DNS_SERVER_IP_ADDRESS", + want: "NETWORK_DNS_SERVER_IP_ADDRESS", }, { name: "Non-Alphanumeric/NonASCII/1", @@ -142,8 +142,12 @@ func TestKeyToVariableName(t *testing.T) { got, err := keyToEnvVarName(tt.key) checkError(t, err, tt.wantErr, true) - if got != tt.want { - t.Fatalf("got %q; want %q", got, tt.want) + want := tt.want + if want != "" { + want = "TS_DEBUGSYSPOLICY_" + want + } + if got != want { + t.Fatalf("got %q; want %q", got, want) } }) } @@ -152,6 +156,7 @@ func TestKeyToVariableName(t *testing.T) { func TestEnvPolicyStore(t *testing.T) { blankEnv := func(string) (string, bool) { return "", false } makeEnv := func(wantName, value string) func(string) (string, bool) { + wantName = "TS_DEBUGSYSPOLICY_" + wantName return func(gotName string) (string, bool) { if gotName != wantName { return "", false @@ -176,13 +181,13 @@ func TestEnvPolicyStore(t *testing.T) { { name: "Configured/String/Empty", key: "AuthKey", - lookup: makeEnv("TS_AUTH_KEY", ""), + lookup: makeEnv("AUTH_KEY", ""), want: "", }, { name: "Configured/String/NonEmpty", key: "AuthKey", - lookup: makeEnv("TS_AUTH_KEY", "ABC123"), + lookup: makeEnv("AUTH_KEY", "ABC123"), want: "ABC123", }, { @@ -195,39 +200,39 @@ func TestEnvPolicyStore(t *testing.T) { { name: "Configured/UInt64/Empty", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", ""), + lookup: makeEnv("INTEGER_SETTING", ""), wantErr: setting.ErrNotConfigured, want: uint64(0), }, { name: "Configured/UInt64/Zero", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", "0"), + lookup: makeEnv("INTEGER_SETTING", "0"), want: uint64(0), }, { name: "Configured/UInt64/NonZero", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", "12345"), + lookup: makeEnv("INTEGER_SETTING", "12345"), want: uint64(12345), }, { name: "Configured/UInt64/MaxUInt64", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", strconv.FormatUint(math.MaxUint64, 10)), + lookup: makeEnv("INTEGER_SETTING", strconv.FormatUint(math.MaxUint64, 10)), want: uint64(math.MaxUint64), }, { name: "Configured/UInt64/Negative", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", "-1"), + lookup: makeEnv("INTEGER_SETTING", "-1"), wantErr: setting.ErrTypeMismatch, want: uint64(0), }, { name: "Configured/UInt64/Hex", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", "0xDEADBEEF"), + lookup: makeEnv("INTEGER_SETTING", "0xDEADBEEF"), want: uint64(0xDEADBEEF), }, { @@ -240,38 +245,38 @@ func TestEnvPolicyStore(t *testing.T) { { name: "Configured/Bool/Empty", key: "LogSCMInteractions", - lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", ""), + lookup: makeEnv("LOG_SCM_INTERACTIONS", ""), wantErr: setting.ErrNotConfigured, want: false, }, { name: "Configured/Bool/True", key: "LogSCMInteractions", - lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "true"), + lookup: makeEnv("LOG_SCM_INTERACTIONS", "true"), want: true, }, { name: "Configured/Bool/False", key: "LogSCMInteractions", - lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "False"), + lookup: makeEnv("LOG_SCM_INTERACTIONS", "False"), want: false, }, { name: "Configured/Bool/1", key: "LogSCMInteractions", - lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "1"), + lookup: makeEnv("LOG_SCM_INTERACTIONS", "1"), want: true, }, { name: "Configured/Bool/0", key: "LogSCMInteractions", - lookup: makeEnv("TS_LOG_SCM_INTERACTIONS", "0"), + lookup: makeEnv("LOG_SCM_INTERACTIONS", "0"), want: false, }, { name: "Configured/Bool/Invalid", key: "IntegerSetting", - lookup: makeEnv("TS_INTEGER_SETTING", "NotABool"), + lookup: makeEnv("INTEGER_SETTING", "NotABool"), wantErr: setting.ErrTypeMismatch, want: false, }, @@ -285,31 +290,31 @@ func TestEnvPolicyStore(t *testing.T) { { name: "Configured/StringArray/Empty", key: "AllowedSuggestedExitNodes", - lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", ""), + lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", ""), want: []string(nil), }, { name: "Configured/StringArray/Spaces", key: "AllowedSuggestedExitNodes", - lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", " \t "), + lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", " \t "), want: []string{}, }, { name: "Configured/StringArray/Single", key: "AllowedSuggestedExitNodes", - lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", "NodeA"), + lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", "NodeA"), want: []string{"NodeA"}, }, { name: "Configured/StringArray/Multi", key: "AllowedSuggestedExitNodes", - lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,NodeB,NodeC"), + lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,NodeB,NodeC"), want: []string{"NodeA", "NodeB", "NodeC"}, }, { name: "Configured/StringArray/WithBlank", key: "AllowedSuggestedExitNodes", - lookup: makeEnv("TS_ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,\t,, ,NodeB"), + lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,\t,, ,NodeB"), want: []string{"NodeA", "NodeB"}, }, } From e1e22785b4cdef300ca89206f307f867ba262c6e Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 30 Oct 2024 11:31:36 -0700 Subject: [PATCH 0098/1708] net/netcheck: ensure prior preferred DERP is always in netchecks In an environment with unstable latency, such as upstream bufferbloat, there are cases where a full netcheck could drop the prior preferred DERP (likely home DERP) from future netcheck probe plans. This will then likely result in a home DERP having a missing sample on the next incremental netcheck, ultimately resulting in a home DERP move. This change does not fix our overall response to highly unstable latency, but it is an incremental improvement to prevent single spurious samples during a full netcheck from alone triggering a flapping condition, as now the prior changes to include historical latency will still provide the desired resistance, and the home DERP should not move unless latency is consistently worse over a 5 minute period. Note that there is a nomenclature and semantics issue remaining in the difference between a report preferred DERP and a home DERP. A report preferred DERP is aspirational, it is what will be picked as a home DERP if a home DERP connection needs to be established. A nodes home DERP may be different than a recent preferred DERP, in which case a lot of netcheck logic is fallible. In future enhancements much of the DERP move logic should move to consider the home DERP, rather than recent report preferred DERP. Updates #8603 Updates #13969 Signed-off-by: James Tucker --- net/netcheck/netcheck.go | 68 +++++++++++++++++++++++++++-------- net/netcheck/netcheck_test.go | 42 ++++++++++++++++++++-- 2 files changed, 93 insertions(+), 17 deletions(-) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 171483730..2c429862e 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -392,10 +392,11 @@ type probePlan map[string][]probe // sortRegions returns the regions of dm first sorted // from fastest to slowest (based on the 'last' report), // end in regions that have no data. -func sortRegions(dm *tailcfg.DERPMap, last *Report) (prev []*tailcfg.DERPRegion) { +func sortRegions(dm *tailcfg.DERPMap, last *Report, preferredDERP int) (prev []*tailcfg.DERPRegion) { prev = make([]*tailcfg.DERPRegion, 0, len(dm.Regions)) for _, reg := range dm.Regions { - if reg.Avoid { + // include an otherwise avoid region if it is the current preferred region + if reg.Avoid && reg.RegionID != preferredDERP { continue } prev = append(prev, reg) @@ -420,9 +421,19 @@ func sortRegions(dm *tailcfg.DERPMap, last *Report) (prev []*tailcfg.DERPRegion) // a full report, all regions are scanned.) const numIncrementalRegions = 3 -// makeProbePlan generates the probe plan for a DERPMap, given the most -// recent report and whether IPv6 is configured on an interface. -func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (plan probePlan) { +// makeProbePlan generates the probe plan for a DERPMap, given the most recent +// report and the current home DERP. preferredDERP is passed independently of +// last (report) because last is currently nil'd to indicate a desire for a full +// netcheck. +// +// TODO(raggi,jwhited): refactor the callers and this function to be more clear +// about full vs. incremental netchecks, and remove the need for the history +// hiding. This was avoided in an incremental change due to exactly this kind of +// distant coupling. +// TODO(raggi): change from "preferred DERP" from a historical report to "home +// DERP" as in what DERP is the current home connection, this would further +// reduce flap events. +func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report, preferredDERP int) (plan probePlan) { if last == nil || len(last.RegionLatency) == 0 { return makeProbePlanInitial(dm, ifState) } @@ -433,9 +444,34 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (pl had4 := len(last.RegionV4Latency) > 0 had6 := len(last.RegionV6Latency) > 0 hadBoth := have6if && had4 && had6 - for ri, reg := range sortRegions(dm, last) { - if ri == numIncrementalRegions { - break + // #13969 ensure that the home region is always probed. + // If a netcheck has unstable latency, such as a user with large amounts of + // bufferbloat or a highly congested connection, there are cases where a full + // netcheck may observe a one-off high latency to the current home DERP. Prior + // to the forced inclusion of the home DERP, this would result in an + // incremental netcheck following such an event to cause a home DERP move, with + // restoration back to the home DERP on the next full netcheck ~5 minutes later + // - which is highly disruptive when it causes shifts in geo routed subnet + // routers. By always including the home DERP in the incremental netcheck, we + // ensure that the home DERP is always probed, even if it observed a recenet + // poor latency sample. This inclusion enables the latency history checks in + // home DERP selection to still take effect. + // planContainsHome indicates whether the home DERP has been added to the probePlan, + // if there is no prior home, then there's no home to additionally include. + planContainsHome := preferredDERP == 0 + for ri, reg := range sortRegions(dm, last, preferredDERP) { + regIsHome := reg.RegionID == preferredDERP + if ri >= numIncrementalRegions { + // planned at least numIncrementalRegions regions and that includes the + // last home region (or there was none), plan complete. + if planContainsHome { + break + } + // planned at least numIncrementalRegions regions, but not the home region, + // check if this is the home region, if not, skip it. + if !regIsHome { + continue + } } var p4, p6 []probe do4 := have4if @@ -446,7 +482,7 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (pl tries := 1 isFastestTwo := ri < 2 - if isFastestTwo { + if isFastestTwo || regIsHome { tries = 2 } else if hadBoth { // For dual stack machines, make the 3rd & slower nodes alternate @@ -457,14 +493,15 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (pl do4, do6 = false, true } } - if !isFastestTwo && !had6 { + if !regIsHome && !isFastestTwo && !had6 { do6 = false } - if reg.RegionID == last.PreferredDERP { + if regIsHome { // But if we already had a DERP home, try extra hard to // make sure it's there so we don't flip flop around. tries = 4 + planContainsHome = true } for try := 0; try < tries; try++ { @@ -789,9 +826,10 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe c.curState = rs last := c.last - // Even if we're doing a non-incremental update, we may want to try our - // preferred DERP region for captive portal detection. Save that, if we - // have it. + // Extract preferredDERP from the last report, if available. This will be used + // in captive portal detection and DERP flapping suppression. Ideally this would + // be the current active home DERP rather than the last report preferred DERP, + // but only the latter is presently available. var preferredDERP int if last != nil { preferredDERP = last.PreferredDERP @@ -848,7 +886,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe var plan probePlan if opts == nil || !opts.OnlyTCP443 { - plan = makeProbePlan(dm, ifState, last) + plan = makeProbePlan(dm, ifState, last, preferredDERP) } // If we're doing a full probe, also check for a captive portal. We diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 2780c9c44..f287978d2 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -590,6 +590,40 @@ func TestMakeProbePlan(t *testing.T) { "region-3-v4": []probe{p("3a", 4)}, }, }, + { + // #13969: ensure that the prior/current home region is always included in + // probe plans, so that we don't flap between regions due to a single major + // netcheck having excluded the home region due to a spuriously high sample. + name: "ensure_home_region_inclusion", + dm: basicMap, + have6if: true, + last: &Report{ + RegionLatency: map[int]time.Duration{ + 1: 50 * time.Millisecond, + 2: 20 * time.Millisecond, + 3: 30 * time.Millisecond, + 4: 40 * time.Millisecond, + }, + RegionV4Latency: map[int]time.Duration{ + 1: 50 * time.Millisecond, + 2: 20 * time.Millisecond, + }, + RegionV6Latency: map[int]time.Duration{ + 3: 30 * time.Millisecond, + 4: 40 * time.Millisecond, + }, + PreferredDERP: 1, + }, + want: probePlan{ + "region-1-v4": []probe{p("1a", 4), p("1a", 4, 60*ms), p("1a", 4, 220*ms), p("1a", 4, 330*ms)}, + "region-1-v6": []probe{p("1a", 6), p("1a", 6, 60*ms), p("1a", 6, 220*ms), p("1a", 6, 330*ms)}, + "region-2-v4": []probe{p("2a", 4), p("2b", 4, 24*ms)}, + "region-2-v6": []probe{p("2a", 6), p("2b", 6, 24*ms)}, + "region-3-v4": []probe{p("3a", 4), p("3b", 4, 36*ms)}, + "region-3-v6": []probe{p("3a", 6), p("3b", 6, 36*ms)}, + "region-4-v4": []probe{p("4a", 4)}, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -597,7 +631,11 @@ func TestMakeProbePlan(t *testing.T) { HaveV6: tt.have6if, HaveV4: !tt.no4, } - got := makeProbePlan(tt.dm, ifState, tt.last) + preferredDERP := 0 + if tt.last != nil { + preferredDERP = tt.last.PreferredDERP + } + got := makeProbePlan(tt.dm, ifState, tt.last, preferredDERP) if !reflect.DeepEqual(got, tt.want) { t.Errorf("unexpected plan; got:\n%v\nwant:\n%v\n", got, tt.want) } @@ -770,7 +808,7 @@ func TestSortRegions(t *testing.T) { report.RegionLatency[3] = time.Second * time.Duration(6) report.RegionLatency[4] = time.Second * time.Duration(0) report.RegionLatency[5] = time.Second * time.Duration(2) - sortedMap := sortRegions(unsortedMap, report) + sortedMap := sortRegions(unsortedMap, report, 0) // Sorting by latency this should result in rid: 5, 2, 1, 3 // rid 4 with latency 0 should be at the end From 532b26145a088c3946c37040dc4731dc4edcb7cf Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 29 Oct 2024 13:46:34 +0000 Subject: [PATCH 0099/1708] wgengine/magicsock: exclude disco from throughput metrics The user-facing metrics are intended to track data transmitted at the overlay network level. Updates tailscale/corp#22075 Signed-off-by: Anton Tolchanov --- wgengine/magicsock/derp.go | 14 ++++++++------ wgengine/magicsock/endpoint.go | 3 ++- wgengine/magicsock/magicsock.go | 7 ++++--- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 704ce3c4f..0204fa0f5 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -649,9 +649,10 @@ func (c *Conn) runDerpReader(ctx context.Context, regionID int, dc *derphttp.Cli } type derpWriteRequest struct { - addr netip.AddrPort - pubKey key.NodePublic - b []byte // copied; ownership passed to receiver + addr netip.AddrPort + pubKey key.NodePublic + b []byte // copied; ownership passed to receiver + isDisco bool } // runDerpWriter runs in a goroutine for the life of a DERP @@ -673,7 +674,7 @@ func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan if err != nil { c.logf("magicsock: derp.Send(%v): %v", wr.addr, err) metricSendDERPError.Add(1) - } else { + } else if !wr.isDisco { c.metrics.outboundPacketsDERPTotal.Add(1) c.metrics.outboundBytesDERPTotal.Add(int64(len(wr.b))) } @@ -696,8 +697,6 @@ func (c *connBind) receiveDERP(buffs [][]byte, sizes []int, eps []conn.Endpoint) // No data read occurred. Wait for another packet. continue } - c.metrics.inboundPacketsDERPTotal.Add(1) - c.metrics.inboundBytesDERPTotal.Add(int64(n)) sizes[0] = n eps[0] = ep return 1, nil @@ -737,6 +736,9 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en if stats := c.stats.Load(); stats != nil { stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, dm.n) } + + c.metrics.inboundPacketsDERPTotal.Add(1) + c.metrics.inboundBytesDERPTotal.Add(int64(n)) return n, ep } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 1ddde9752..5e0ada617 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -983,7 +983,8 @@ func (de *endpoint) send(buffs [][]byte) error { allOk := true var txBytes int for _, buff := range buffs { - ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff) + const isDisco = false + ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco) txBytes += len(buff) if !ok { allOk = false diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 72e59a2e7..705e42d9e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1356,7 +1356,7 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error) // An example of when they might be different: sending to an // IPv6 address when the local machine doesn't have IPv6 support // returns (false, nil); it's not an error, but nothing was sent. -func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte) (sent bool, err error) { +func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) { if addr.Addr() != tailcfg.DerpMagicIPAddr { return c.sendUDP(addr, b) } @@ -1379,7 +1379,7 @@ func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte) (s case <-c.donec: metricSendDERPErrorClosed.Add(1) return false, errConnClosed - case ch <- derpWriteRequest{addr, pubKey, pkt}: + case ch <- derpWriteRequest{addr, pubKey, pkt, isDisco}: metricSendDERPQueued.Add(1) return true, nil default: @@ -1577,7 +1577,8 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, dstKey key.NodePublic, dstDi box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) - sent, err = c.sendAddr(dst, dstKey, pkt) + const isDisco = true + sent, err = c.sendAddr(dst, dstKey, pkt, isDisco) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" From b4f46c31bbf8f079a0e617997e8b86f3c94247bd Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 29 Oct 2024 09:19:40 +0000 Subject: [PATCH 0100/1708] wgengine/magicsock: export packet drop metric for outbound errors This required sharing the dropped packet metric between two packages (tstun and magicsock), so I've moved its definition to util/usermetric. Updates tailscale/corp#22075 Signed-off-by: Anton Tolchanov --- net/tstun/wrap.go | 44 ++++-------------- net/tstun/wrap_test.go | 6 +-- util/usermetric/metrics.go | 69 ++++++++++++++++++++++++++++ util/usermetric/usermetric.go | 3 ++ wgengine/magicsock/derp.go | 3 ++ wgengine/magicsock/magicsock.go | 15 +++++- wgengine/magicsock/magicsock_test.go | 25 ++++++++++ 7 files changed, 127 insertions(+), 38 deletions(-) create mode 100644 util/usermetric/metrics.go diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 0b858fc1c..c384abf9d 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -213,24 +213,14 @@ type Wrapper struct { } type metrics struct { - inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[dropPacketLabel] - outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[dropPacketLabel] + inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] + outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] } func registerMetrics(reg *usermetric.Registry) *metrics { return &metrics{ - inboundDroppedPacketsTotal: usermetric.NewMultiLabelMapWithRegistry[dropPacketLabel]( - reg, - "tailscaled_inbound_dropped_packets_total", - "counter", - "Counts the number of dropped packets received by the node from other peers", - ), - outboundDroppedPacketsTotal: usermetric.NewMultiLabelMapWithRegistry[dropPacketLabel]( - reg, - "tailscaled_outbound_dropped_packets_total", - "counter", - "Counts the number of packets dropped while being sent to other peers", - ), + inboundDroppedPacketsTotal: reg.DroppedPacketsInbound(), + outboundDroppedPacketsTotal: reg.DroppedPacketsOutbound(), } } @@ -886,8 +876,8 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf if filt.RunOut(p, t.filterFlags) != filter.Accept { metricPacketOutDropFilter.Add(1) - t.metrics.outboundDroppedPacketsTotal.Add(dropPacketLabel{ - Reason: DropReasonACL, + t.metrics.outboundDroppedPacketsTotal.Add(usermetric.DropLabels{ + Reason: usermetric.ReasonACL, }, 1) return filter.Drop, gro } @@ -1158,8 +1148,8 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca if outcome != filter.Accept { metricPacketInDropFilter.Add(1) - t.metrics.inboundDroppedPacketsTotal.Add(dropPacketLabel{ - Reason: DropReasonACL, + t.metrics.inboundDroppedPacketsTotal.Add(usermetric.DropLabels{ + Reason: usermetric.ReasonACL, }, 1) // Tell them, via TSMP, we're dropping them due to the ACL. @@ -1239,8 +1229,8 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { t.noteActivity() _, err := t.tdevWrite(buffs, offset) if err != nil { - t.metrics.inboundDroppedPacketsTotal.Add(dropPacketLabel{ - Reason: DropReasonError, + t.metrics.inboundDroppedPacketsTotal.Add(usermetric.DropLabels{ + Reason: usermetric.ReasonError, }, int64(len(buffs))) } return len(buffs), err @@ -1482,20 +1472,6 @@ var ( metricPacketOutDropSelfDisco = clientmetric.NewCounter("tstun_out_to_wg_drop_self_disco") ) -type DropReason string - -const ( - DropReasonACL DropReason = "acl" - DropReasonError DropReason = "error" -) - -type dropPacketLabel struct { - // Reason indicates what we have done with the packet, and has the following values: - // - acl (rejected packets because of ACL) - // - error (rejected packets because of an error) - Reason DropReason -} - func (t *Wrapper) InstallCaptureHook(cb capture.Callback) { t.captureHook.Store(cb) } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 0ed0075b6..9ebedda83 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -441,13 +441,13 @@ func TestFilter(t *testing.T) { } var metricInboundDroppedPacketsACL, metricInboundDroppedPacketsErr, metricOutboundDroppedPacketsACL int64 - if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}).(*expvar.Int); ok { + if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(usermetric.DropLabels{Reason: usermetric.ReasonACL}).(*expvar.Int); ok { metricInboundDroppedPacketsACL = m.Value() } - if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonError}).(*expvar.Int); ok { + if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(usermetric.DropLabels{Reason: usermetric.ReasonError}).(*expvar.Int); ok { metricInboundDroppedPacketsErr = m.Value() } - if m, ok := tun.metrics.outboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}).(*expvar.Int); ok { + if m, ok := tun.metrics.outboundDroppedPacketsTotal.Get(usermetric.DropLabels{Reason: usermetric.ReasonACL}).(*expvar.Int); ok { metricOutboundDroppedPacketsACL = m.Value() } diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go new file mode 100644 index 000000000..7f85989ff --- /dev/null +++ b/util/usermetric/metrics.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// This file contains user-facing metrics that are used by multiple packages. +// Use it to define more common metrics. Any changes to the registry and +// metric types should be in usermetric.go. + +package usermetric + +import ( + "sync" + + "tailscale.com/metrics" +) + +// Metrics contains user-facing metrics that are used by multiple packages. +type Metrics struct { + initOnce sync.Once + + droppedPacketsInbound *metrics.MultiLabelMap[DropLabels] + droppedPacketsOutbound *metrics.MultiLabelMap[DropLabels] +} + +// DropReason is the reason why a packet was dropped. +type DropReason string + +const ( + // ReasonACL means that the packet was not permitted by ACL. + ReasonACL DropReason = "acl" + + // ReasonError means that the packet was dropped because of an error. + ReasonError DropReason = "error" +) + +// DropLabels contains common label(s) for dropped packet counters. +type DropLabels struct { + Reason DropReason +} + +// initOnce initializes the common metrics. +func (r *Registry) initOnce() { + r.m.initOnce.Do(func() { + r.m.droppedPacketsInbound = NewMultiLabelMapWithRegistry[DropLabels]( + r, + "tailscaled_inbound_dropped_packets_total", + "counter", + "Counts the number of dropped packets received by the node from other peers", + ) + r.m.droppedPacketsOutbound = NewMultiLabelMapWithRegistry[DropLabels]( + r, + "tailscaled_outbound_dropped_packets_total", + "counter", + "Counts the number of packets dropped while being sent to other peers", + ) + }) +} + +// DroppedPacketsOutbound returns the outbound dropped packet metric, creating it +// if necessary. +func (r *Registry) DroppedPacketsOutbound() *metrics.MultiLabelMap[DropLabels] { + r.initOnce() + return r.m.droppedPacketsOutbound +} + +// DroppedPacketsInbound returns the inbound dropped packet metric. +func (r *Registry) DroppedPacketsInbound() *metrics.MultiLabelMap[DropLabels] { + r.initOnce() + return r.m.droppedPacketsInbound +} diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index c964e08a7..7913a4ef0 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -19,6 +19,9 @@ import ( // Registry tracks user-facing metrics of various Tailscale subsystems. type Registry struct { vars expvar.Map + + // m contains common metrics owned by the registry. + m Metrics } // NewMultiLabelMapWithRegistry creates and register a new diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 0204fa0f5..e9f070862 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -674,6 +674,9 @@ func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan if err != nil { c.logf("magicsock: derp.Send(%v): %v", wr.addr, err) metricSendDERPError.Add(1) + if !wr.isDisco { + c.metrics.outboundPacketsDroppedErrors.Add(1) + } } else if !wr.isDisco { c.metrics.outboundPacketsDERPTotal.Add(1) c.metrics.outboundBytesDERPTotal.Add(int64(len(wr.b))) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 705e42d9e..a9c6fa070 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -127,6 +127,10 @@ type metrics struct { outboundBytesIPv4Total expvar.Int outboundBytesIPv6Total expvar.Int outboundBytesDERPTotal expvar.Int + + // outboundPacketsDroppedErrors is the total number of outbound packets + // dropped due to errors. + outboundPacketsDroppedErrors expvar.Int } // A Conn routes UDP packets and actively manages a list of its endpoints. @@ -605,6 +609,8 @@ func registerMetrics(reg *usermetric.Registry) *metrics { "counter", "Counts the number of bytes sent to other peers", ) + outboundPacketsDroppedErrors := reg.DroppedPacketsOutbound() + m := new(metrics) // Map clientmetrics to the usermetric counters. @@ -631,6 +637,8 @@ func registerMetrics(reg *usermetric.Registry) *metrics { outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total) outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal) + outboundPacketsDroppedErrors.Set(usermetric.DropLabels{Reason: usermetric.ReasonError}, &m.outboundPacketsDroppedErrors) + return m } @@ -1202,8 +1210,13 @@ func (c *Conn) networkDown() bool { return !c.networkUp.Load() } // Send implements conn.Bind. // // See https://pkg.go.dev/golang.zx2c4.com/wireguard/conn#Bind.Send -func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) error { +func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) (err error) { n := int64(len(buffs)) + defer func() { + if err != nil { + c.metrics.outboundPacketsDroppedErrors.Add(n) + } + }() metricSendData.Add(n) if c.networkDown() { metricSendDataNetworkDown.Add(n) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 7e48e1daa..1b3f8ec73 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -63,6 +63,7 @@ import ( "tailscale.com/types/nettype" "tailscale.com/types/ptr" "tailscale.com/util/cibuild" + "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" "tailscale.com/util/usermetric" @@ -3083,3 +3084,27 @@ func TestMaybeRebindOnError(t *testing.T) { } }) } + +func TestNetworkDownSendErrors(t *testing.T) { + netMon := must.Get(netmon.New(t.Logf)) + defer netMon.Close() + + reg := new(usermetric.Registry) + conn := must.Get(NewConn(Options{ + DisablePortMapper: true, + Logf: t.Logf, + NetMon: netMon, + Metrics: reg, + })) + defer conn.Close() + + conn.SetNetworkUp(false) + if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}); err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + } +} From 45354dab9bddc97acaa84b03b99448ac49b4c0cf Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 31 Oct 2024 14:45:57 +0000 Subject: [PATCH 0101/1708] ipn,tailcfg: add app connector config knob to conffile (#13942) Make it possible to advertise app connector via a new conffile field. Also bumps capver - conffile deserialization errors out if unknonw fields are set, so we need to know which clients understand the new field. Updates tailscale/tailscale#11113 Signed-off-by: Irbe Krumina --- ipn/conf.go | 6 ++++++ tailcfg/tailcfg.go | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ipn/conf.go b/ipn/conf.go index 6a67f4004..1b2831b03 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -32,6 +32,8 @@ type ConfigVAlpha struct { AdvertiseRoutes []netip.Prefix `json:",omitempty"` DisableSNAT opt.Bool `json:",omitempty"` + AppConnector *AppConnectorPrefs `json:",omitempty"` // advertise app connector; defaults to false (if nil or explicitly set to false) + NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert" NoStatefulFiltering opt.Bool `json:",omitempty"` @@ -137,5 +139,9 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) { mp.AutoUpdate = *c.AutoUpdate mp.AutoUpdateSet = AutoUpdatePrefsMask{ApplySet: true, CheckSet: true} } + if c.AppConnector != nil { + mp.AppConnector = *c.AppConnector + mp.AppConnectorSet = true + } return mp, nil } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 0e1b1d4ae..9e39a4336 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -149,7 +149,8 @@ type CapabilityVersion int // - 104: 2024-08-03: SelfNodeV6MasqAddrForThisPeer now works // - 105: 2024-08-05: Fixed SSH behavior on systems that use busybox (issue #12849) // - 106: 2024-09-03: fix panic regression from cryptokey routing change (65fe0ba7b5) -const CurrentCapabilityVersion CapabilityVersion = 106 +// - 107: 2024-10-30: add App Connector to conffile (PR #13942) +const CurrentCapabilityVersion CapabilityVersion = 107 type StableID string From 3f626c0d774bc1b8a93be26a4aa8f2dadeb27ece Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 29 Oct 2024 15:22:49 -0500 Subject: [PATCH 0102/1708] cmd/tailscale/cli, client/tailscale, ipn/localapi: add tailscale syspolicy {list,reload} commands In this PR, we add the tailscale syspolicy command with two subcommands: list, which displays policy settings, and reload, which forces a reload of those settings. We also update the LocalAPI and LocalClient to facilitate these additions. Updates #12687 Signed-off-by: Nick Khyl --- client/tailscale/localclient.go | 28 ++++++++ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/cli/syspolicy.go | 110 ++++++++++++++++++++++++++++++++ cmd/tailscaled/depaware.txt | 2 +- ipn/localapi/localapi.go | 50 +++++++++++++++ 6 files changed, 191 insertions(+), 2 deletions(-) create mode 100644 cmd/tailscale/cli/syspolicy.go diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index df51dc1ca..9c2bcc467 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -40,6 +40,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/tkatype" + "tailscale.com/util/syspolicy/setting" ) // defaultLocalClient is the default LocalClient when using the legacy @@ -814,6 +815,33 @@ func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn return decodeJSON[*ipn.Prefs](body) } +// GetEffectivePolicy returns the effective policy for the specified scope. +func (lc *LocalClient) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} + +// ReloadEffectivePolicy reloads the effective policy for the specified scope +// by reading and merging policy settings from all applicable policy sources. +func (lc *LocalClient) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} + // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2ad3978c9..d62f2e225 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -814,7 +814,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source - tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index f786bcea5..130a11623 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -185,6 +185,7 @@ change in the future. logoutCmd, switchCmd, configureCmd, + syspolicyCmd, netcheckCmd, ipCmd, dnsCmd, diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go new file mode 100644 index 000000000..06a19defb --- /dev/null +++ b/cmd/tailscale/cli/syspolicy.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "os" + "slices" + "text/tabwriter" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/util/syspolicy/setting" +) + +var syspolicyArgs struct { + json bool // JSON output mode +} + +var syspolicyCmd = &ffcli.Command{ + Name: "syspolicy", + ShortHelp: "Diagnose the MDM and system policy configuration", + LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", + ShortUsage: "tailscale syspolicy ", + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "list", + ShortUsage: "tailscale syspolicy list", + Exec: runSysPolicyList, + ShortHelp: "Prints effective policy settings", + LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy list") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + { + Name: "reload", + ShortUsage: "tailscale syspolicy reload", + Exec: runSysPolicyReload, + ShortHelp: "Forces a reload of policy settings, even if no changes are detected, and prints the result", + LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy reload") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + }, +} + +func runSysPolicyList(ctx context.Context, args []string) error { + policy, err := localClient.GetEffectivePolicy(ctx, setting.DefaultScope()) + if err != nil { + return err + } + printPolicySettings(policy) + return nil + +} + +func runSysPolicyReload(ctx context.Context, args []string) error { + policy, err := localClient.ReloadEffectivePolicy(ctx, setting.DefaultScope()) + if err != nil { + return err + } + printPolicySettings(policy) + return nil +} + +func printPolicySettings(policy *setting.Snapshot) { + if syspolicyArgs.json { + json, err := json.MarshalIndent(policy, "", "\t") + if err != nil { + errf("syspolicy marshalling error: %v", err) + } else { + outln(string(json)) + } + return + } + if policy.Len() == 0 { + outln("No policy settings") + return + } + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "Name\tOrigin\tValue\tError") + fmt.Fprintln(w, "----\t------\t-----\t-----") + for _, k := range slices.Sorted(policy.Keys()) { + setting, _ := policy.GetSetting(k) + var origin string + if o := setting.Origin(); o != nil { + origin = o.String() + } + if err := setting.Error(); err != nil { + fmt.Fprintf(w, "%s\t%s\t\t{%s}\n", k, origin, err) + } else { + fmt.Fprintf(w, "%s\t%s\t%s\t\n", k, origin, setting.Value()) + } + } + w.Flush() + + fmt.Println() + return +} diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index b3a4aa86f..53e4790d3 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -403,7 +403,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source - tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy + tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 1d580eca9..0d41725d8 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -62,6 +62,8 @@ import ( "tailscale.com/util/osdiag" "tailscale.com/util/progresstracking" "tailscale.com/util/rands" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -76,6 +78,7 @@ var handler = map[string]localAPIHandler{ "cert/": (*Handler).serveCert, "file-put/": (*Handler).serveFilePut, "files/": (*Handler).serveFiles, + "policy/": (*Handler).servePolicy, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME @@ -1332,6 +1335,53 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { e.Encode(prefs) } +func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "policy access denied", http.StatusForbidden) + return + } + + suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") + if !ok { + http.Error(w, "misconfigured", http.StatusInternalServerError) + return + } + + var scope setting.PolicyScope + if suffix == "" { + scope = setting.DefaultScope() + } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { + http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) + return + } + + policy, err := rsop.PolicyFor(scope) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var effectivePolicy *setting.Snapshot + switch r.Method { + case "GET": + effectivePolicy = policy.Get() + case "POST": + effectivePolicy, err = policy.Reload() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Content-Type", "application/json") + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(effectivePolicy) +} + type resJSON struct { Error string `json:",omitempty"` } From 3477bfd234523601e2788a51bb365422448278ed Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 31 Oct 2024 13:12:38 -0500 Subject: [PATCH 0103/1708] safeweb: add support for "/" and "/foo" handler distinction (#13980) By counting "/" elements in the pattern we catch many scenarios, but not the root-level handler. If either of the patterns is "/", compare the pattern length to pick the right one. Updates https://github.com/tailscale/corp/issues/8027 Signed-off-by: Andrew Lytvynov --- safeweb/http.go | 17 ++++++++++++++++- safeweb/http_test.go | 10 ++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/safeweb/http.go b/safeweb/http.go index 14c61336a..d8818d386 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -225,12 +225,27 @@ const ( browserHandler ) +func (h handlerType) String() string { + switch h { + case browserHandler: + return "browser" + case apiHandler: + return "api" + default: + return "unknown" + } +} + // checkHandlerType returns either apiHandler or browserHandler, depending on // whether apiPattern or browserPattern is more specific (i.e. which pattern // contains more pathname components). If they are equally specific, it returns // unknownHandler. func checkHandlerType(apiPattern, browserPattern string) handlerType { - c := cmp.Compare(strings.Count(path.Clean(apiPattern), "/"), strings.Count(path.Clean(browserPattern), "/")) + apiPattern, browserPattern = path.Clean(apiPattern), path.Clean(browserPattern) + c := cmp.Compare(strings.Count(apiPattern, "/"), strings.Count(browserPattern, "/")) + if apiPattern == "/" || browserPattern == "/" { + c = cmp.Compare(len(apiPattern), len(browserPattern)) + } switch { case c > 0: return apiHandler diff --git a/safeweb/http_test.go b/safeweb/http_test.go index cec14b2b9..a2e2d7644 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -527,13 +527,13 @@ func TestGetMoreSpecificPattern(t *testing.T) { { desc: "same prefix", a: "/foo/bar/quux", - b: "/foo/bar/", + b: "/foo/bar/", // path.Clean will strip the trailing slash. want: apiHandler, }, { desc: "almost same prefix, but not a path component", a: "/goat/sheep/cheese", - b: "/goat/sheepcheese/", + b: "/goat/sheepcheese/", // path.Clean will strip the trailing slash. want: apiHandler, }, { @@ -554,6 +554,12 @@ func TestGetMoreSpecificPattern(t *testing.T) { b: "///////", want: unknownHandler, }, + { + desc: "root-level", + a: "/latest", + b: "/", // path.Clean will NOT strip the trailing slash. + want: apiHandler, + }, } { t.Run(tt.desc, func(t *testing.T) { got := checkHandlerType(tt.a, tt.b) From 6985369479db2c9d5bacccbde6d66630a81eb1ab Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Thu, 31 Oct 2024 12:00:34 -0700 Subject: [PATCH 0104/1708] net/sockstats: prevent crash in setNetMon (#13985) --- net/sockstats/sockstats_tsgo.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index af691302f..fec9ec3b0 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -279,7 +279,13 @@ func setNetMon(netMon *netmon.Monitor) { if ifName == "" { return } - ifIndex := state.Interface[ifName].Index + // DefaultRouteInterface and Interface are gathered at different points in time. + // Check for existence first, to avoid a nil pointer dereference. + iface, ok := state.Interface[ifName] + if !ok { + return + } + ifIndex := iface.Index sockStats.mu.Lock() defer sockStats.mu.Unlock() // Ignore changes to unknown interfaces -- it would require From ddbc950f466ff7fa4c0b2dfb11489311b0d384f2 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 31 Oct 2024 14:13:29 -0500 Subject: [PATCH 0105/1708] safeweb: add support for custom CSP (#13975) To allow more flexibility with CSPs, add a fully customizable `CSP` type that can be provided in `Config` and encodes itself into the correct format. Preserve the `CSPAllowInlineStyles` option as is today, but maybe that'll get deprecated later in favor of the new CSP field. In particular, this allows for pages loading external JS, or inline JS with nonces or hashes (see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src#unsafe_inline_script) Updates https://github.com/tailscale/corp/issues/8027 Signed-off-by: Andrew Lytvynov --- safeweb/http.go | 88 +++++++++++++++++++++++++++++++++++++------- safeweb/http_test.go | 28 +++++++++----- 2 files changed, 92 insertions(+), 24 deletions(-) diff --git a/safeweb/http.go b/safeweb/http.go index d8818d386..bd53eca5b 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -74,25 +74,74 @@ import ( crand "crypto/rand" "fmt" "log" + "maps" "net" "net/http" "net/url" "path" + "slices" "strings" "github.com/gorilla/csrf" ) -// The default Content-Security-Policy header. -var defaultCSP = strings.Join([]string{ - `default-src 'self'`, // origin is the only valid source for all content types - `script-src 'self'`, // disallow inline javascript - `frame-ancestors 'none'`, // disallow framing of the page - `form-action 'self'`, // disallow form submissions to other origins - `base-uri 'self'`, // disallow base URIs from other origins - `block-all-mixed-content`, // disallow mixed content when serving over HTTPS - `object-src 'self'`, // disallow embedding of resources from other origins -}, "; ") +// CSP is the value of a Content-Security-Policy header. Keys are CSP +// directives (like "default-src") and values are source expressions (like +// "'self'" or "https://tailscale.com"). A nil slice value is allowed for some +// directives like "upgrade-insecure-requests" that don't expect a list of +// source definitions. +type CSP map[string][]string + +// DefaultCSP is the recommended CSP to use when not loading resources from +// other domains and not embedding the current website. If you need to tweak +// the CSP, it is recommended to extend DefaultCSP instead of writing your own +// from scratch. +func DefaultCSP() CSP { + return CSP{ + "default-src": {"self"}, // origin is the only valid source for all content types + "frame-ancestors": {"none"}, // disallow framing of the page + "form-action": {"self"}, // disallow form submissions to other origins + "base-uri": {"self"}, // disallow base URIs from other origins + // TODO(awly): consider upgrade-insecure-requests in SecureContext + // instead, as this is deprecated. + "block-all-mixed-content": nil, // disallow mixed content when serving over HTTPS + } +} + +// Set sets the values for a given directive. Empty values are allowed, if the +// directive doesn't expect any (like "upgrade-insecure-requests"). +func (csp CSP) Set(directive string, values ...string) { + csp[directive] = values +} + +// Add adds a source expression to an existing directive. +func (csp CSP) Add(directive, value string) { + csp[directive] = append(csp[directive], value) +} + +// Del deletes a directive and all its values. +func (csp CSP) Del(directive string) { + delete(csp, directive) +} + +func (csp CSP) String() string { + keys := slices.Collect(maps.Keys(csp)) + slices.Sort(keys) + var s strings.Builder + for _, k := range keys { + s.WriteString(k) + for _, v := range csp[k] { + // Special values like 'self', 'none', 'unsafe-inline', etc., must + // be quoted. Do it implicitly as a convenience here. + if !strings.Contains(v, ".") && len(v) > 1 && v[0] != '\'' && v[len(v)-1] != '\'' { + v = "'" + v + "'" + } + s.WriteString(" " + v) + } + s.WriteString("; ") + } + return strings.TrimSpace(s.String()) +} // The default Strict-Transport-Security header. This header tells the browser // to exclusively use HTTPS for all requests to the origin for the next year. @@ -130,6 +179,9 @@ type Config struct { // startup. CSRFSecret []byte + // CSP is the Content-Security-Policy header to return with BrowserMux + // responses. + CSP CSP // CSPAllowInlineStyles specifies whether to include `style-src: // unsafe-inline` in the Content-Security-Policy header to permit the use of // inline CSS. @@ -168,6 +220,10 @@ func (c *Config) setDefaults() error { } } + if c.CSP == nil { + c.CSP = DefaultCSP() + } + return nil } @@ -199,16 +255,20 @@ func NewServer(config Config) (*Server, error) { if config.CookiesSameSiteLax { sameSite = csrf.SameSiteLaxMode } + if config.CSPAllowInlineStyles { + if _, ok := config.CSP["style-src"]; ok { + config.CSP.Add("style-src", "unsafe-inline") + } else { + config.CSP.Set("style-src", "self", "unsafe-inline") + } + } s := &Server{ Config: config, - csp: defaultCSP, + csp: config.CSP.String(), // only set Secure flag on CSRF cookies if we are in a secure context // as otherwise the browser will reject the cookie csrfProtect: csrf.Protect(config.CSRFSecret, csrf.Secure(config.SecureContext), csrf.SameSite(sameSite)), } - if config.CSPAllowInlineStyles { - s.csp = defaultCSP + `; style-src 'self' 'unsafe-inline'` - } s.h = cmp.Or(config.HTTPServer, &http.Server{}) if s.h.Handler != nil { return nil, fmt.Errorf("use safeweb.Config.APIMux and safeweb.Config.BrowserMux instead of http.Server.Handler") diff --git a/safeweb/http_test.go b/safeweb/http_test.go index a2e2d7644..852ce326b 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -241,18 +241,26 @@ func TestCSRFProtection(t *testing.T) { func TestContentSecurityPolicyHeader(t *testing.T) { tests := []struct { name string + csp CSP apiRoute bool - wantCSP bool + wantCSP string }{ { - name: "default routes get CSP headers", - apiRoute: false, - wantCSP: true, + name: "default CSP", + wantCSP: `base-uri 'self'; block-all-mixed-content; default-src 'self'; form-action 'self'; frame-ancestors 'none';`, + }, + { + name: "custom CSP", + csp: CSP{ + "default-src": {"'self'", "https://tailscale.com"}, + "upgrade-insecure-requests": nil, + }, + wantCSP: `default-src 'self' https://tailscale.com; upgrade-insecure-requests;`, }, { name: "`/api/*` routes do not get CSP headers", apiRoute: true, - wantCSP: false, + wantCSP: "", }, } @@ -265,9 +273,9 @@ func TestContentSecurityPolicyHeader(t *testing.T) { var s *Server var err error if tt.apiRoute { - s, err = NewServer(Config{APIMux: h}) + s, err = NewServer(Config{APIMux: h, CSP: tt.csp}) } else { - s, err = NewServer(Config{BrowserMux: h}) + s, err = NewServer(Config{BrowserMux: h, CSP: tt.csp}) } if err != nil { t.Fatal(err) @@ -279,8 +287,8 @@ func TestContentSecurityPolicyHeader(t *testing.T) { s.h.Handler.ServeHTTP(w, req) resp := w.Result() - if (resp.Header.Get("Content-Security-Policy") == "") == tt.wantCSP { - t.Fatalf("content security policy want: %v; got: %v", tt.wantCSP, resp.Header.Get("Content-Security-Policy")) + if got := resp.Header.Get("Content-Security-Policy"); got != tt.wantCSP { + t.Fatalf("content security policy want: %q; got: %q", tt.wantCSP, got) } }) } @@ -397,7 +405,7 @@ func TestCSPAllowInlineStyles(t *testing.T) { csp := resp.Header.Get("Content-Security-Policy") allowsStyles := strings.Contains(csp, "style-src 'self' 'unsafe-inline'") if allowsStyles != allow { - t.Fatalf("CSP inline styles want: %v; got: %v", allow, allowsStyles) + t.Fatalf("CSP inline styles want: %v, got: %v in %q", allow, allowsStyles, csp) } }) } From 84c88604728938a888ff3ca1bfb10c256a77e0f8 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Thu, 31 Oct 2024 15:13:08 -0600 Subject: [PATCH 0106/1708] util/syspolicy: add policy key for onboarding flow visibility Updates https://github.com/tailscale/corp/issues/23789 Signed-off-by: Aaron Klotz --- util/syspolicy/policy_keys.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 162885b27..bb9a5d6cc 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -77,6 +77,9 @@ const ( // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. SuggestedExitNodeVisibility Key = "SuggestedExitNode" + // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. + // When this system policy is set to 'hide', the onboarding flow is never shown to the user. + OnboardingFlowVisibility Key = "OnboardingFlow" // Keys with a string value formatted for use with time.ParseDuration(). KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours @@ -166,6 +169,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), setting.NewDefinition(TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), setting.NewDefinition(UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), } func init() { From 49de23cf1bae372996de797d86ced771ed314756 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 31 Oct 2024 19:25:00 -0700 Subject: [PATCH 0107/1708] net/netcheck: add addReportHistoryAndSetPreferredDERP() test case (#13989) Add an explicit case for exercising preferred DERP hysteresis around the branch that compares latencies on a percentage basis. Updates #cleanup Signed-off-by: Jordan Whited --- net/netcheck/netcheck_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index f287978d2..b4fbb4023 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -357,6 +357,15 @@ func TestAddReportHistoryAndSetPreferredDERP(t *testing.T) { wantPrevLen: 3, wantDERP: 2, // moved to d2 since d1 is gone }, + { + name: "preferred_derp_hysteresis_no_switch_pct", + steps: []step{ + {0 * time.Second, report("d1", 34*time.Millisecond, "d2", 35*time.Millisecond)}, + {1 * time.Second, report("d1", 34*time.Millisecond, "d2", 23*time.Millisecond)}, + }, + wantPrevLen: 2, + wantDERP: 1, // diff is 11ms, but d2 is greater than 2/3s of d1 + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 0ffc7bf38b92f4b064d00fe89239f2da756de642 Mon Sep 17 00:00:00 2001 From: Renato Aguiar Date: Fri, 25 Oct 2024 18:25:39 -0700 Subject: [PATCH 0108/1708] Fix MagicDNS on OpenBSD Add OpenBSD to the list of platforms that need DNS reconfigured on link changes. Signed-off-by: Renato Aguiar --- wgengine/userspace.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index fc204736a..2dd0c4cd5 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1236,7 +1236,7 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { // and Apple platforms. if changed { switch runtime.GOOS { - case "linux", "android", "ios", "darwin": + case "linux", "android", "ios", "darwin", "openbsd": e.wgLock.Lock() dnsCfg := e.lastDNSConfig e.wgLock.Unlock() From d09e9d967f1fd6349a2bddefffe2e9e9f4b33044 Mon Sep 17 00:00:00 2001 From: Maisem Ali Date: Thu, 31 Oct 2024 08:30:11 -0700 Subject: [PATCH 0109/1708] ipn/ipnlocal: reload prefs correctly on ReloadConfig We were only updating the ProfileManager and not going down the EditPrefs path which meant the prefs weren't applied till either the process restarted or some other pref changed. This makes it so that we reconfigure everything correctly when ReloadConfig is called. Updates #13032 Signed-off-by: Maisem Ali --- ipn/ipnlocal/local.go | 48 ++++++++++++++++++++++--------- ipn/ipnlocal/local_test.go | 59 ++++++++++++++++++++++++++++++++------ 2 files changed, 85 insertions(+), 22 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b91f1337a..edd56f7c4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -479,7 +479,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo mConn.SetNetInfoCallback(b.setNetInfo) if sys.InitialConfig != nil { - if err := b.setConfigLocked(sys.InitialConfig); err != nil { + if err := b.initPrefsFromConfig(sys.InitialConfig); err != nil { return nil, err } } @@ -712,8 +712,8 @@ func (b *LocalBackend) SetDirectFileRoot(dir string) { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if b.conf == nil { return false, nil } @@ -721,18 +721,21 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLocked(conf); err != nil { + if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { return false, fmt.Errorf("error setting config: %w", err) } return true, nil } -func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { - - // TODO(irbekrm): notify the relevant components to consume any prefs - // updates. Currently only initial configfile settings are applied - // immediately. +// initPrefsFromConfig initializes the backend's prefs from the provided config. +// This should only be called once, at startup. For updates at runtime, use +// [LocalBackend.setConfigLocked]. +func (b *LocalBackend) initPrefsFromConfig(conf *conffile.Config) error { + // TODO(maisem,bradfitz): combine this with setConfigLocked. This is called + // before anything is running, so there's no need to lock and we don't + // update any subsystems. At runtime, we both need to lock and update + // subsystems with the new prefs. p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -742,13 +745,14 @@ func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { if err := b.pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil { return err } + b.setStaticEndpointsFromConfigLocked(conf) + b.conf = conf + return nil +} - defer func() { - b.conf = conf - }() - +func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) { if conf.Parsed.StaticEndpoints == nil && (b.conf == nil || b.conf.Parsed.StaticEndpoints == nil) { - return nil + return } // Ensure that magicsock conn has the up to date static wireguard @@ -762,6 +766,22 @@ func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { ms.SetStaticEndpoints(views.SliceOf(conf.Parsed.StaticEndpoints)) } } +} + +// setConfigLockedOnEntry uses the provided config to update the backend's prefs +// and other state. +func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { + defer unlock() + p := b.pm.CurrentPrefs().AsStruct() + mp, err := conf.Parsed.ToPrefs() + if err != nil { + return fmt.Errorf("error parsing config to prefs: %w", err) + } + p.ApplyEdits(&mp) + b.setStaticEndpointsFromConfigLocked(conf) + b.setPrefsLockedOnEntry(p, unlock) + + b.conf = conf return nil } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5fee5d00e..433679dda 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -13,6 +13,7 @@ import ( "net/http" "net/netip" "os" + "path/filepath" "reflect" "slices" "strings" @@ -32,6 +33,7 @@ import ( "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/store/mem" "tailscale.com/net/netcheck" @@ -432,16 +434,25 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { } func newTestLocalBackend(t testing.TB) *LocalBackend { + return newTestLocalBackendWithSys(t, new(tsd.System)) +} + +// newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. +// If the state store or engine are not set in sys, they will be set to a new +// in-memory store and fake userspace engine, respectively. +func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { var logf logger.Logf = logger.Discard - sys := new(tsd.System) - store := new(mem.Store) - sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) - if err != nil { - t.Fatalf("NewFakeUserspaceEngine: %v", err) + if _, ok := sys.StateStore.GetOK(); !ok { + sys.Set(new(mem.Store)) + } + if _, ok := sys.Engine.GetOK(); !ok { + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + if err != nil { + t.Fatalf("NewFakeUserspaceEngine: %v", err) + } + t.Cleanup(eng.Close) + sys.Set(eng) } - t.Cleanup(eng.Close) - sys.Set(eng) lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) if err != nil { t.Fatalf("NewLocalBackend: %v", err) @@ -4423,3 +4434,35 @@ func TestLoginNotifications(t *testing.T) { }) } } + +// TestConfigFileReload tests that the LocalBackend reloads its configuration +// when the configuration file changes. +func TestConfigFileReload(t *testing.T) { + cfg1 := `{"Hostname": "foo", "Version": "alpha0"}` + f := filepath.Join(t.TempDir(), "cfg") + must.Do(os.WriteFile(f, []byte(cfg1), 0600)) + sys := new(tsd.System) + sys.InitialConfig = must.Get(conffile.Load(f)) + lb := newTestLocalBackendWithSys(t, sys) + must.Do(lb.Start(ipn.Options{})) + + lb.mu.Lock() + hn := lb.hostinfo.Hostname + lb.mu.Unlock() + if hn != "foo" { + t.Fatalf("got %q; want %q", hn, "foo") + } + + cfg2 := `{"Hostname": "bar", "Version": "alpha0"}` + must.Do(os.WriteFile(f, []byte(cfg2), 0600)) + if !must.Get(lb.ReloadConfig()) { + t.Fatal("reload failed") + } + + lb.mu.Lock() + hn = lb.hostinfo.Hostname + lb.mu.Unlock() + if hn != "bar" { + t.Fatalf("got %q; want %q", hn, "bar") + } +} From 634cc2ba4a03714173f23915e933f9eed918c137 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 1 Nov 2024 10:50:40 -0700 Subject: [PATCH 0110/1708] wgengine/netstack: remove unused taildrive deps A filesystem was plumbed into netstack in 993acf4475b22d693 but hasn't been used since 2d5d6f5403f3. Remove it. Noticed while rebasing a Tailscale fork elsewhere. Updates tailscale/corp#16827 Change-Id: Ib76deeda205ffe912b77a59b9d22853ebff42813 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 2 -- cmd/tsconnect/wasm/wasm_js.go | 2 +- tsnet/tsnet.go | 2 +- wgengine/netstack/netstack.go | 29 +++++++++++++---------------- wgengine/netstack/netstack_test.go | 4 ++-- 5 files changed, 17 insertions(+), 22 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 2831b4061..7a5ee0398 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -788,7 +788,6 @@ func runDebugServer(mux *http.ServeMux, addr string) { } func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { - tfs, _ := sys.DriveForLocal.GetOK() ret, err := netstack.Create(logf, sys.Tun.Get(), sys.Engine.Get(), @@ -796,7 +795,6 @@ func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { sys.Dialer.Get(), sys.DNSManager.Get(), sys.ProxyMapper(), - tfs, ) if err != nil { return nil, err diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index c35d543aa..d0bc991f2 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -115,7 +115,7 @@ func newIPN(jsConfig js.Value) map[string]any { } sys.Set(eng) - ns, err := netstack.Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil) + ns, err := netstack.Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper()) if err != nil { log.Fatalf("netstack.Create: %v", err) } diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 6751e0bb0..7252d89fe 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -546,7 +546,7 @@ func (s *Server) start() (reterr error) { sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) // TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how? - ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil) + ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper()) if err != nil { return fmt.Errorf("netstack.Create: %w", err) } diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 3185c5d55..280f4b7bb 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -32,7 +32,6 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" - "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" "tailscale.com/metrics" @@ -174,19 +173,18 @@ type Impl struct { // It can only be set before calling Start. ProcessSubnets bool - ipstack *stack.Stack - linkEP *linkEndpoint - tundev *tstun.Wrapper - e wgengine.Engine - pm *proxymap.Mapper - mc *magicsock.Conn - logf logger.Logf - dialer *tsdial.Dialer - ctx context.Context // alive until Close - ctxCancel context.CancelFunc // called on Close - lb *ipnlocal.LocalBackend // or nil - dns *dns.Manager - driveForLocal drive.FileSystemForLocal // or nil + ipstack *stack.Stack + linkEP *linkEndpoint + tundev *tstun.Wrapper + e wgengine.Engine + pm *proxymap.Mapper + mc *magicsock.Conn + logf logger.Logf + dialer *tsdial.Dialer + ctx context.Context // alive until Close + ctxCancel context.CancelFunc // called on Close + lb *ipnlocal.LocalBackend // or nil + dns *dns.Manager // loopbackPort, if non-nil, will enable Impl to loop back (dnat to // :loopbackPort) TCP & UDP flows originally @@ -288,7 +286,7 @@ func setTCPBufSizes(ipstack *stack.Stack) error { } // Create creates and populates a new Impl. -func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper, driveForLocal drive.FileSystemForLocal) (*Impl, error) { +func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper) (*Impl, error) { if mc == nil { return nil, errors.New("nil magicsock.Conn") } @@ -382,7 +380,6 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi connsInFlightByClient: make(map[netip.Addr]int), packetsInFlight: make(map[stack.TransportEndpointID]struct{}), dns: dns, - driveForLocal: driveForLocal, } loopbackPort, ok := envknob.LookupInt("TS_DEBUG_NETSTACK_LOOPBACK_PORT") if ok && loopbackPort >= 0 && loopbackPort <= math.MaxUint16 { diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 1bfc76fef..a46dcf9dd 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -65,7 +65,7 @@ func TestInjectInboundLeak(t *testing.T) { t.Fatal(err) } - ns, err := Create(logf, tunWrap, eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil) + ns, err := Create(logf, tunWrap, eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper()) if err != nil { t.Fatal(err) } @@ -116,7 +116,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { tb.Cleanup(func() { eng.Close() }) sys.Set(eng) - ns, err := Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil) + ns, err := Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper()) if err != nil { tb.Fatal(err) } From b0626ff84c11f8ad5c680fdec214eb5981307f1c Mon Sep 17 00:00:00 2001 From: VimT Date: Fri, 20 Sep 2024 23:52:45 +0800 Subject: [PATCH 0111/1708] net/socks5: fix UDP relay in userspace-networking mode This commit addresses an issue with the SOCKS5 UDP relay functionality when using the --tun=userspace-networking option. Previously, UDP packets were not being correctly routed into the Tailscale network in this mode. Key changes: - Replace single UDP connection with a map of connections per target - Use c.srv.dial for creating connections to ensure proper routing Updates #7581 Change-Id: Iaaa66f9de6a3713218014cf3f498003a7cac9832 Signed-off-by: VimT --- net/socks5/socks5.go | 101 +++++++++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 38 deletions(-) diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index 0d651537f..db315d949 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -22,6 +22,7 @@ import ( "log" "net" "strconv" + "tailscale.com/syncs" "time" "tailscale.com/types/logger" @@ -81,6 +82,12 @@ const ( addrTypeNotSupported replyCode = 8 ) +// UDP conn default buffer size and read timeout. +const ( + bufferSize = 8 * 1024 + readTimeout = 5 * time.Second +) + // Server is a SOCKS5 proxy server. type Server struct { // Logf optionally specifies the logger to use. @@ -143,7 +150,8 @@ type Conn struct { clientConn net.Conn request *request - udpClientAddr net.Addr + udpClientAddr net.Addr + udpTargetConns syncs.Map[string, net.Conn] } // Run starts the new connection. @@ -276,15 +284,6 @@ func (c *Conn) handleUDP() error { } defer clientUDPConn.Close() - serverUDPConn, err := net.ListenPacket("udp", "[::]:0") - if err != nil { - res := errorResponse(generalFailure) - buf, _ := res.marshal() - c.clientConn.Write(buf) - return err - } - defer serverUDPConn.Close() - bindAddr, bindPort, err := splitHostPort(clientUDPConn.LocalAddr().String()) if err != nil { return err @@ -305,14 +304,20 @@ func (c *Conn) handleUDP() error { } c.clientConn.Write(buf) - return c.transferUDP(c.clientConn, clientUDPConn, serverUDPConn) + return c.transferUDP(c.clientConn, clientUDPConn) } -func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, targetConn net.PacketConn) error { +func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - const bufferSize = 8 * 1024 - const readTimeout = 5 * time.Second + + // close all target udp connections when the client connection is closed + defer func() { + c.udpTargetConns.Range(func(_ string, conn net.Conn) bool { + _ = conn.Close() + return true + }) + }() // client -> target go func() { @@ -323,7 +328,7 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, ta case <-ctx.Done(): return default: - err := c.handleUDPRequest(clientConn, targetConn, buf, readTimeout) + err := c.handleUDPRequest(ctx, clientConn, buf) if err != nil { if isTimeout(err) { continue @@ -337,21 +342,50 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, ta } }() + // A UDP association terminates when the TCP connection that the UDP + // ASSOCIATE request arrived on terminates. RFC1928 + _, err := io.Copy(io.Discard, associatedTCP) + if err != nil { + err = fmt.Errorf("udp associated tcp conn: %w", err) + } + return err +} + +func (c *Conn) getOrDialTargetConn( + ctx context.Context, + clientConn net.PacketConn, + targetAddr string, +) (net.Conn, error) { + host, port, err := splitHostPort(targetAddr) + if err != nil { + return nil, err + } + + conn, loaded := c.udpTargetConns.Load(targetAddr) + if loaded { + return conn, nil + } + conn, err = c.srv.dial(ctx, "udp", targetAddr) + if err != nil { + return nil, err + } + c.udpTargetConns.Store(targetAddr, conn) + // target -> client go func() { - defer cancel() buf := make([]byte, bufferSize) + addr := socksAddr{addrType: getAddrType(host), addr: host, port: port} for { select { case <-ctx.Done(): return default: - err := c.handleUDPResponse(targetConn, clientConn, buf, readTimeout) + err := c.handleUDPResponse(clientConn, addr, conn, buf) if err != nil { if isTimeout(err) { continue } - if errors.Is(err, net.ErrClosed) { + if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) { return } c.logf("udp transfer: handle udp response fail: %v", err) @@ -360,20 +394,13 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, ta } }() - // A UDP association terminates when the TCP connection that the UDP - // ASSOCIATE request arrived on terminates. RFC1928 - _, err := io.Copy(io.Discard, associatedTCP) - if err != nil { - err = fmt.Errorf("udp associated tcp conn: %w", err) - } - return err + return conn, nil } func (c *Conn) handleUDPRequest( + ctx context.Context, clientConn net.PacketConn, - targetConn net.PacketConn, buf []byte, - readTimeout time.Duration, ) error { // add a deadline for the read to avoid blocking forever _ = clientConn.SetReadDeadline(time.Now().Add(readTimeout)) @@ -386,12 +413,14 @@ func (c *Conn) handleUDPRequest( if err != nil { return fmt.Errorf("parse udp request: %w", err) } - targetAddr, err := net.ResolveUDPAddr("udp", req.addr.hostPort()) + + targetAddr := req.addr.hostPort() + targetConn, err := c.getOrDialTargetConn(ctx, clientConn, targetAddr) if err != nil { - c.logf("resolve target addr fail: %v", err) + return fmt.Errorf("dial target %s fail: %w", targetAddr, err) } - nn, err := targetConn.WriteTo(data, targetAddr) + nn, err := targetConn.Write(data) if err != nil { return fmt.Errorf("write to target %s fail: %w", targetAddr, err) } @@ -402,22 +431,18 @@ func (c *Conn) handleUDPRequest( } func (c *Conn) handleUDPResponse( - targetConn net.PacketConn, clientConn net.PacketConn, + targetAddr socksAddr, + targetConn net.Conn, buf []byte, - readTimeout time.Duration, ) error { // add a deadline for the read to avoid blocking forever _ = targetConn.SetReadDeadline(time.Now().Add(readTimeout)) - n, addr, err := targetConn.ReadFrom(buf) + n, err := targetConn.Read(buf) if err != nil { return fmt.Errorf("read from target: %w", err) } - host, port, err := splitHostPort(addr.String()) - if err != nil { - return fmt.Errorf("split host port: %w", err) - } - hdr := udpRequest{addr: socksAddr{addrType: getAddrType(host), addr: host, port: port}} + hdr := udpRequest{addr: targetAddr} pkt, err := hdr.marshal() if err != nil { return fmt.Errorf("marshal udp request: %w", err) From 43138c7a5c8815ea104499866440e34bb1220e93 Mon Sep 17 00:00:00 2001 From: VimT Date: Sat, 21 Sep 2024 14:37:51 +0800 Subject: [PATCH 0112/1708] net/socks5: optimize UDP relay Key changes: - No mutex for every udp package: replace syncs.Map with regular map for udpTargetConns - Use socksAddr as map key for better type safety - Add test for multi udp target Updates #7581 Change-Id: Ic3d384a9eab62dcbf267d7d6d268bf242cc8ed3c Signed-off-by: VimT --- net/socks5/socks5.go | 52 ++++++------ net/socks5/socks5_test.go | 166 +++++++++++++++++++++----------------- 2 files changed, 119 insertions(+), 99 deletions(-) diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index db315d949..4a5befa1d 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -22,7 +22,6 @@ import ( "log" "net" "strconv" - "tailscale.com/syncs" "time" "tailscale.com/types/logger" @@ -151,7 +150,7 @@ type Conn struct { request *request udpClientAddr net.Addr - udpTargetConns syncs.Map[string, net.Conn] + udpTargetConns map[socksAddr]net.Conn } // Run starts the new connection. @@ -311,17 +310,18 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn) er ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // close all target udp connections when the client connection is closed - defer func() { - c.udpTargetConns.Range(func(_ string, conn net.Conn) bool { - _ = conn.Close() - return true - }) - }() - // client -> target go func() { defer cancel() + + c.udpTargetConns = make(map[socksAddr]net.Conn) + // close all target udp connections when the client connection is closed + defer func() { + for _, conn := range c.udpTargetConns { + _ = conn.Close() + } + }() + buf := make([]byte, bufferSize) for { select { @@ -354,33 +354,27 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn) er func (c *Conn) getOrDialTargetConn( ctx context.Context, clientConn net.PacketConn, - targetAddr string, + targetAddr socksAddr, ) (net.Conn, error) { - host, port, err := splitHostPort(targetAddr) - if err != nil { - return nil, err - } - - conn, loaded := c.udpTargetConns.Load(targetAddr) - if loaded { + conn, exist := c.udpTargetConns[targetAddr] + if exist { return conn, nil } - conn, err = c.srv.dial(ctx, "udp", targetAddr) + conn, err := c.srv.dial(ctx, "udp", targetAddr.hostPort()) if err != nil { return nil, err } - c.udpTargetConns.Store(targetAddr, conn) + c.udpTargetConns[targetAddr] = conn // target -> client go func() { buf := make([]byte, bufferSize) - addr := socksAddr{addrType: getAddrType(host), addr: host, port: port} for { select { case <-ctx.Done(): return default: - err := c.handleUDPResponse(clientConn, addr, conn, buf) + err := c.handleUDPResponse(clientConn, targetAddr, conn, buf) if err != nil { if isTimeout(err) { continue @@ -414,18 +408,17 @@ func (c *Conn) handleUDPRequest( return fmt.Errorf("parse udp request: %w", err) } - targetAddr := req.addr.hostPort() - targetConn, err := c.getOrDialTargetConn(ctx, clientConn, targetAddr) + targetConn, err := c.getOrDialTargetConn(ctx, clientConn, req.addr) if err != nil { - return fmt.Errorf("dial target %s fail: %w", targetAddr, err) + return fmt.Errorf("dial target %s fail: %w", req.addr, err) } nn, err := targetConn.Write(data) if err != nil { - return fmt.Errorf("write to target %s fail: %w", targetAddr, err) + return fmt.Errorf("write to target %s fail: %w", req.addr, err) } if nn != len(data) { - return fmt.Errorf("write to target %s fail: %w", targetAddr, io.ErrShortWrite) + return fmt.Errorf("write to target %s fail: %w", req.addr, io.ErrShortWrite) } return nil } @@ -652,10 +645,15 @@ func (s socksAddr) marshal() ([]byte, error) { pkt = binary.BigEndian.AppendUint16(pkt, s.port) return pkt, nil } + func (s socksAddr) hostPort() string { return net.JoinHostPort(s.addr, strconv.Itoa(int(s.port))) } +func (s socksAddr) String() string { + return s.hostPort() +} + // response contains the contents of // a response packet sent from the proxy // to the client. diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go index 11ea59d4b..bc6fac79f 100644 --- a/net/socks5/socks5_test.go +++ b/net/socks5/socks5_test.go @@ -169,12 +169,25 @@ func TestReadPassword(t *testing.T) { func TestUDP(t *testing.T) { // backend UDP server which we'll use SOCKS5 to connect to - listener, err := net.ListenPacket("udp", ":0") - if err != nil { - t.Fatal(err) + newUDPEchoServer := func() net.PacketConn { + listener, err := net.ListenPacket("udp", ":0") + if err != nil { + t.Fatal(err) + } + go udpEchoServer(listener) + return listener } - backendServerPort := listener.LocalAddr().(*net.UDPAddr).Port - go udpEchoServer(listener) + + const echoServerNumber = 3 + echoServerListener := make([]net.PacketConn, echoServerNumber) + for i := 0; i < echoServerNumber; i++ { + echoServerListener[i] = newUDPEchoServer() + } + defer func() { + for i := 0; i < echoServerNumber; i++ { + _ = echoServerListener[i].Close() + } + }() // SOCKS5 server socks5, err := net.Listen("tcp", ":0") @@ -184,84 +197,93 @@ func TestUDP(t *testing.T) { socks5Port := socks5.Addr().(*net.TCPAddr).Port go socks5Server(socks5) - // net/proxy don't support UDP, so we need to manually send the SOCKS5 UDP request - conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", socks5Port)) - if err != nil { - t.Fatal(err) - } - _, err = conn.Write([]byte{0x05, 0x01, 0x00}) // client hello with no auth - if err != nil { - t.Fatal(err) - } - buf := make([]byte, 1024) - n, err := conn.Read(buf) // server hello - if err != nil { - t.Fatal(err) - } - if n != 2 || buf[0] != 0x05 || buf[1] != 0x00 { - t.Fatalf("got: %q want: 0x05 0x00", buf[:n]) - } + // make a socks5 udpAssociate conn + newUdpAssociateConn := func() (socks5Conn net.Conn, socks5UDPAddr socksAddr) { + // net/proxy don't support UDP, so we need to manually send the SOCKS5 UDP request + conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", socks5Port)) + if err != nil { + t.Fatal(err) + } + _, err = conn.Write([]byte{socks5Version, 0x01, noAuthRequired}) // client hello with no auth + if err != nil { + t.Fatal(err) + } + buf := make([]byte, 1024) + n, err := conn.Read(buf) // server hello + if err != nil { + t.Fatal(err) + } + if n != 2 || buf[0] != socks5Version || buf[1] != noAuthRequired { + t.Fatalf("got: %q want: 0x05 0x00", buf[:n]) + } - targetAddr := socksAddr{ - addrType: domainName, - addr: "localhost", - port: uint16(backendServerPort), - } - targetAddrPkt, err := targetAddr.marshal() - if err != nil { - t.Fatal(err) - } - _, err = conn.Write(append([]byte{0x05, 0x03, 0x00}, targetAddrPkt...)) // client reqeust - if err != nil { - t.Fatal(err) - } + targetAddr := socksAddr{addrType: ipv4, addr: "0.0.0.0", port: 0} + targetAddrPkt, err := targetAddr.marshal() + if err != nil { + t.Fatal(err) + } + _, err = conn.Write(append([]byte{socks5Version, byte(udpAssociate), 0x00}, targetAddrPkt...)) // client reqeust + if err != nil { + t.Fatal(err) + } - n, err = conn.Read(buf) // server response - if err != nil { - t.Fatal(err) - } - if n < 3 || !bytes.Equal(buf[:3], []byte{0x05, 0x00, 0x00}) { - t.Fatalf("got: %q want: 0x05 0x00 0x00", buf[:n]) + n, err = conn.Read(buf) // server response + if err != nil { + t.Fatal(err) + } + if n < 3 || !bytes.Equal(buf[:3], []byte{socks5Version, 0x00, 0x00}) { + t.Fatalf("got: %q want: 0x05 0x00 0x00", buf[:n]) + } + udpProxySocksAddr, err := parseSocksAddr(bytes.NewReader(buf[3:n])) + if err != nil { + t.Fatal(err) + } + + return conn, udpProxySocksAddr } - udpProxySocksAddr, err := parseSocksAddr(bytes.NewReader(buf[3:n])) - if err != nil { - t.Fatal(err) + + conn, udpProxySocksAddr := newUdpAssociateConn() + defer conn.Close() + + sendUDPAndWaitResponse := func(socks5UDPConn net.Conn, addr socksAddr, body []byte) (responseBody []byte) { + udpPayload, err := (&udpRequest{addr: addr}).marshal() + if err != nil { + t.Fatal(err) + } + udpPayload = append(udpPayload, body...) + _, err = socks5UDPConn.Write(udpPayload) + if err != nil { + t.Fatal(err) + } + buf := make([]byte, 1024) + n, err := socks5UDPConn.Read(buf) + if err != nil { + t.Fatal(err) + } + _, responseBody, err = parseUDPRequest(buf[:n]) + if err != nil { + t.Fatal(err) + } + return responseBody } udpProxyAddr, err := net.ResolveUDPAddr("udp", udpProxySocksAddr.hostPort()) if err != nil { t.Fatal(err) } - udpConn, err := net.DialUDP("udp", nil, udpProxyAddr) - if err != nil { - t.Fatal(err) - } - udpPayload, err := (&udpRequest{addr: targetAddr}).marshal() - if err != nil { - t.Fatal(err) - } - udpPayload = append(udpPayload, []byte("Test")...) - _, err = udpConn.Write(udpPayload) // send udp package - if err != nil { - t.Fatal(err) - } - n, _, err = udpConn.ReadFrom(buf) - if err != nil { - t.Fatal(err) - } - _, responseBody, err := parseUDPRequest(buf[:n]) // read udp response - if err != nil { - t.Fatal(err) - } - if string(responseBody) != "Test" { - t.Fatalf("got: %q want: Test", responseBody) - } - err = udpConn.Close() + socks5UDPConn, err := net.DialUDP("udp", nil, udpProxyAddr) if err != nil { t.Fatal(err) } - err = conn.Close() - if err != nil { - t.Fatal(err) + defer socks5UDPConn.Close() + + for i := 0; i < echoServerNumber; i++ { + port := echoServerListener[i].LocalAddr().(*net.UDPAddr).Port + addr := socksAddr{addrType: ipv4, addr: "127.0.0.1", port: uint16(port)} + requestBody := []byte(fmt.Sprintf("Test %d", i)) + responseBody := sendUDPAndWaitResponse(socks5UDPConn, addr, requestBody) + if !bytes.Equal(requestBody, responseBody) { + t.Fatalf("got: %q want: %q", responseBody, requestBody) + } } } From 45da3a4b28715fc123af9d60b0284971e2be3096 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 3 Nov 2024 07:12:34 -0800 Subject: [PATCH 0113/1708] cmd/tsconnect: block after starting esbuild dev server Thanks to @davidbuzz for raising the issue in #13973. Fixes #8272 Fixes #13973 Change-Id: Ic413e14d34c82df3c70a97e591b90316b0b4946b Signed-off-by: Brad Fitzpatrick --- cmd/tsconnect/common.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/tsconnect/common.go b/cmd/tsconnect/common.go index a387c00c9..0b0813226 100644 --- a/cmd/tsconnect/common.go +++ b/cmd/tsconnect/common.go @@ -150,6 +150,7 @@ func runEsbuildServe(buildOptions esbuild.BuildOptions) { log.Fatalf("Cannot start esbuild server: %v", err) } log.Printf("Listening on http://%s:%d\n", result.Host, result.Port) + select {} } func runEsbuild(buildOptions esbuild.BuildOptions) esbuild.BuildResult { From d4222fae95c04102e75dbf97a8c3517a136881a4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 22 Oct 2024 13:53:34 -0500 Subject: [PATCH 0114/1708] tsnet: add accessor to get tsd.System Pulled of otherwise unrelated PR #13884. Updates tailscale/corp#22075 Change-Id: I5b539fcb4aca1b93406cf139c719a5e3c64ff7f7 Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 7252d89fe..70084c103 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -126,6 +126,7 @@ type Server struct { initOnce sync.Once initErr error lb *ipnlocal.LocalBackend + sys *tsd.System netstack *netstack.Impl netMon *netmon.Monitor rootPath string // the state directory @@ -518,6 +519,7 @@ func (s *Server) start() (reterr error) { } sys := new(tsd.System) + s.sys = sys if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { return err } @@ -1227,6 +1229,13 @@ func (s *Server) CapturePcap(ctx context.Context, pcapFile string) error { return nil } +// Sys returns a handle to the Tailscale subsystems of this node. +// +// This is not a stable API, nor are the APIs of the returned subsystems. +func (s *Server) Sys() *tsd.System { + return s.sys +} + type listenKey struct { network string host netip.Addr // or zero value for unspecified From 809a6eba80c94e7593b5f7d1604f1f4ac8a6b61c Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 4 Nov 2024 18:42:51 +0000 Subject: [PATCH 0115/1708] cmd/k8s-operator: allow to optionally configure tailscaled port (#14005) Updates tailscale/tailscale#13981 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/operator.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index d8dd403cc..116ba02e0 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -11,6 +11,7 @@ import ( "context" "os" "regexp" + "strconv" "strings" "time" @@ -150,6 +151,13 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) { Hostname: hostname, Logf: zlog.Named("tailscaled").Debugf, } + if p := os.Getenv("TS_PORT"); p != "" { + port, err := strconv.ParseUint(p, 10, 16) + if err != nil { + startlog.Fatalf("TS_PORT %q cannot be parsed as uint16: %v", p, err) + } + s.Port = uint16(port) + } if kubeSecret != "" { st, err := kubestore.New(logger.Discard, kubeSecret) if err != nil { From 01185e436fd39c2aa499b3c56bcb08d6c4dc7b84 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 4 Nov 2024 20:49:40 -0800 Subject: [PATCH 0116/1708] types/result, util/lineiter: add package for a result type, use it This adds a new generic result type (motivated by golang/go#70084) to try it out, and uses it in the new lineutil package (replacing the old lineread package), changing that package to return iterators: sometimes over []byte (when the input is all in memory), but sometimes iterators over results of []byte, if errors might happen at runtime. Updates #12912 Updates golang/go#70084 Change-Id: Iacdc1070e661b5fb163907b1e8b07ac7d51d3f83 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 3 +- cmd/k8s-operator/depaware.txt | 3 +- cmd/stund/depaware.txt | 3 +- cmd/tailscale/depaware.txt | 3 +- cmd/tailscaled/depaware.txt | 3 +- hostinfo/hostinfo.go | 24 ++++----- hostinfo/hostinfo_linux.go | 13 +++-- ipn/ipnlocal/ssh.go | 22 ++++---- net/netmon/interfaces_android.go | 51 ++++++++---------- net/netmon/interfaces_darwin_test.go | 24 ++++----- net/netmon/interfaces_linux.go | 37 ++++++------- net/netmon/netmon_linux_test.go | 2 + net/tshttpproxy/tshttpproxy_synology.go | 15 +++--- ssh/tailssh/tailssh_test.go | 13 ++--- ssh/tailssh/user.go | 18 +++---- types/result/result.go | 49 +++++++++++++++++ util/lineiter/lineiter.go | 72 +++++++++++++++++++++++++ util/lineiter/lineiter_test.go | 32 +++++++++++ util/pidowner/pidowner_linux.go | 20 +++---- version/distro/distro.go | 20 +++---- 20 files changed, 289 insertions(+), 138 deletions(-) create mode 100644 types/result/result.go create mode 100644 util/lineiter/lineiter.go create mode 100644 util/lineiter/lineiter_test.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index e20c4e556..a3eec2046 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -140,6 +140,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/persist from tailscale.com/ipn tailscale.com/types/preftype from tailscale.com/ipn tailscale.com/types/ptr from tailscale.com/hostinfo+ + tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/client/tailscale+ tailscale.com/types/views from tailscale.com/ipn+ @@ -154,7 +155,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/fastuuid from tailscale.com/tsweb 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale - tailscale.com/util/lineread from tailscale.com/hostinfo+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ tailscale.com/util/multierr from tailscale.com/health+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d62f2e225..74536c6c9 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -775,6 +775,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/cmd/k8s-operator+ + tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/tailscale+ tailscale.com/types/views from tailscale.com/appc+ @@ -792,7 +793,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+ tailscale.com/util/httpm from tailscale.com/client/tailscale+ - tailscale.com/util/lineread from tailscale.com/hostinfo+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index a35f59516..7031b18e2 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -67,6 +67,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/logger from tailscale.com/tsweb tailscale.com/types/opt from tailscale.com/envknob+ tailscale.com/types/ptr from tailscale.com/tailcfg+ + tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/tailcfg+ tailscale.com/types/tkatype from tailscale.com/tailcfg+ tailscale.com/types/views from tailscale.com/net/tsaddr+ @@ -74,7 +75,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/tailcfg tailscale.com/util/fastuuid from tailscale.com/tsweb - tailscale.com/util/lineread from tailscale.com/version/distro + tailscale.com/util/lineiter from tailscale.com/version/distro tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/slicesx from tailscale.com/tailcfg tailscale.com/util/vizerror from tailscale.com/tailcfg+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index cce76a81e..ac5440d2c 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -148,6 +148,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/persist from tailscale.com/ipn tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+ tailscale.com/types/ptr from tailscale.com/hostinfo+ + tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/types/key+ tailscale.com/types/views from tailscale.com/tailcfg+ @@ -162,7 +163,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ - tailscale.com/util/lineread from tailscale.com/hostinfo+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/multierr from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 53e4790d3..31a0cb67c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -364,6 +364,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ @@ -381,7 +382,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+ tailscale.com/util/httpm from tailscale.com/client/tailscale+ - tailscale.com/util/lineread from tailscale.com/hostinfo+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 3233a422d..3d4216922 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -25,7 +25,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -231,12 +231,12 @@ func desktop() (ret opt.Bool) { } seenDesktop := false - lineread.File("/proc/net/unix", func(line []byte) error { + for lr := range lineiter.File("/proc/net/unix") { + line, _ := lr.Value() seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S(" @/tmp/dbus-")) seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S(".X11-unix")) seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S("/wayland-1")) - return nil - }) + } ret.Set(seenDesktop) // Only cache after a minute - compositors might not have started yet. @@ -305,21 +305,21 @@ func inContainer() opt.Bool { ret.Set(true) return ret } - lineread.File("/proc/1/cgroup", func(line []byte) error { + for lr := range lineiter.File("/proc/1/cgroup") { + line, _ := lr.Value() if mem.Contains(mem.B(line), mem.S("/docker/")) || mem.Contains(mem.B(line), mem.S("/lxc/")) { ret.Set(true) - return io.EOF // arbitrary non-nil error to stop loop + break } - return nil - }) - lineread.File("/proc/mounts", func(line []byte) error { + } + for lr := range lineiter.File("/proc/mounts") { + line, _ := lr.Value() if mem.Contains(mem.B(line), mem.S("lxcfs /proc/cpuinfo fuse.lxcfs")) { ret.Set(true) - return io.EOF + break } - return nil - }) + } return ret } diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go index 53d4187bc..66484a358 100644 --- a/hostinfo/hostinfo_linux.go +++ b/hostinfo/hostinfo_linux.go @@ -12,7 +12,7 @@ import ( "golang.org/x/sys/unix" "tailscale.com/types/ptr" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" "tailscale.com/version/distro" ) @@ -106,15 +106,18 @@ func linuxVersionMeta() (meta versionMeta) { } m := map[string]string{} - lineread.File(propFile, func(line []byte) error { + for lr := range lineiter.File(propFile) { + line, err := lr.Value() + if err != nil { + break + } eq := bytes.IndexByte(line, '=') if eq == -1 { - return nil + continue } k, v := string(line[:eq]), strings.Trim(string(line[eq+1:]), `"'`) m[k] = v - return nil - }) + } if v := m["VERSION_CODENAME"]; v != "" { meta.DistroCodeName = v diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index fbeb19bd1..383d03f5a 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -27,7 +27,7 @@ import ( "github.com/tailscale/golang-x-crypto/ssh" "go4.org/mem" "tailscale.com/tailcfg" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" "tailscale.com/util/mak" ) @@ -80,30 +80,32 @@ func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*ta if err != nil { return nil, err } - lineread.Reader(bytes.NewReader(out), func(line []byte) error { + for line := range lineiter.Bytes(out) { line = bytes.TrimSpace(line) if len(line) == 0 || line[0] == '_' { - return nil + continue } add(string(line)) - return nil - }) + } default: - lineread.File("/etc/passwd", func(line []byte) error { + for lr := range lineiter.File("/etc/passwd") { + line, err := lr.Value() + if err != nil { + break + } line = bytes.TrimSpace(line) if len(line) == 0 || line[0] == '#' || line[0] == '_' { - return nil + continue } if mem.HasSuffix(mem.B(line), mem.S("/nologin")) || mem.HasSuffix(mem.B(line), mem.S("/false")) { - return nil + continue } colon := bytes.IndexByte(line, ':') if colon != -1 { add(string(line[:colon])) } - return nil - }) + } } return res, nil } diff --git a/net/netmon/interfaces_android.go b/net/netmon/interfaces_android.go index a96423eb6..26104e879 100644 --- a/net/netmon/interfaces_android.go +++ b/net/netmon/interfaces_android.go @@ -5,7 +5,6 @@ package netmon import ( "bytes" - "errors" "log" "net/netip" "os/exec" @@ -15,7 +14,7 @@ import ( "golang.org/x/sys/unix" "tailscale.com/net/netaddr" "tailscale.com/syncs" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" ) var ( @@ -34,11 +33,6 @@ func init() { var procNetRouteErr atomic.Bool -// errStopReading is a sentinel error value used internally by -// lineread.File callers to stop reading. It doesn't escape to -// callers/users. -var errStopReading = errors.New("stop reading") - /* Parse 10.0.0.1 out of: @@ -54,44 +48,42 @@ func likelyHomeRouterIPAndroid() (ret netip.Addr, myIP netip.Addr, ok bool) { } lineNum := 0 var f []mem.RO - err := lineread.File(procNetRoutePath, func(line []byte) error { + for lr := range lineiter.File(procNetRoutePath) { + line, err := lr.Value() + if err != nil { + procNetRouteErr.Store(true) + return likelyHomeRouterIP() + } + lineNum++ if lineNum == 1 { // Skip header line. - return nil + continue } if lineNum > maxProcNetRouteRead { - return errStopReading + break } f = mem.AppendFields(f[:0], mem.B(line)) if len(f) < 4 { - return nil + continue } gwHex, flagsHex := f[2], f[3] flags, err := mem.ParseUint(flagsHex, 16, 16) if err != nil { - return nil // ignore error, skip line and keep going + continue // ignore error, skip line and keep going } if flags&(unix.RTF_UP|unix.RTF_GATEWAY) != unix.RTF_UP|unix.RTF_GATEWAY { - return nil + continue } ipu32, err := mem.ParseUint(gwHex, 16, 32) if err != nil { - return nil // ignore error, skip line and keep going + continue // ignore error, skip line and keep going } ip := netaddr.IPv4(byte(ipu32), byte(ipu32>>8), byte(ipu32>>16), byte(ipu32>>24)) if ip.IsPrivate() { ret = ip - return errStopReading + break } - return nil - }) - if errors.Is(err, errStopReading) { - err = nil - } - if err != nil { - procNetRouteErr.Store(true) - return likelyHomeRouterIP() } if ret.IsValid() { // Try to get the local IP of the interface associated with @@ -144,23 +136,26 @@ func likelyHomeRouterIPHelper() (ret netip.Addr, _ netip.Addr, ok bool) { return } // Search for line like "default via 10.0.2.2 dev radio0 table 1016 proto static mtu 1500 " - lineread.Reader(out, func(line []byte) error { + for lr := range lineiter.Reader(out) { + line, err := lr.Value() + if err != nil { + break + } const pfx = "default via " if !mem.HasPrefix(mem.B(line), mem.S(pfx)) { - return nil + continue } line = line[len(pfx):] sp := bytes.IndexByte(line, ' ') if sp == -1 { - return nil + continue } ipb := line[:sp] if ip, err := netip.ParseAddr(string(ipb)); err == nil && ip.Is4() { ret = ip log.Printf("interfaces: found Android default route %v", ip) } - return nil - }) + } cmd.Process.Kill() cmd.Wait() return ret, netip.Addr{}, ret.IsValid() diff --git a/net/netmon/interfaces_darwin_test.go b/net/netmon/interfaces_darwin_test.go index d34040d60..d756d1334 100644 --- a/net/netmon/interfaces_darwin_test.go +++ b/net/netmon/interfaces_darwin_test.go @@ -4,14 +4,13 @@ package netmon import ( - "errors" "io" "net/netip" "os/exec" "testing" "go4.org/mem" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" "tailscale.com/version" ) @@ -73,31 +72,34 @@ func likelyHomeRouterIPDarwinExec() (ret netip.Addr, netif string, ok bool) { defer io.Copy(io.Discard, stdout) // clear the pipe to prevent hangs var f []mem.RO - lineread.Reader(stdout, func(lineb []byte) error { + for lr := range lineiter.Reader(stdout) { + lineb, err := lr.Value() + if err != nil { + break + } line := mem.B(lineb) if !mem.Contains(line, mem.S("default")) { - return nil + continue } f = mem.AppendFields(f[:0], line) if len(f) < 4 || !f[0].EqualString("default") { - return nil + continue } ipm, flagsm, netifm := f[1], f[2], f[3] if !mem.Contains(flagsm, mem.S("G")) { - return nil + continue } if mem.Contains(flagsm, mem.S("I")) { - return nil + continue } ip, err := netip.ParseAddr(string(mem.Append(nil, ipm))) if err == nil && ip.IsPrivate() { ret = ip netif = netifm.StringCopy() // We've found what we're looking for. - return errStopReadingNetstatTable + break } - return nil - }) + } return ret, netif, ret.IsValid() } @@ -110,5 +112,3 @@ func TestFetchRoutingTable(t *testing.T) { } } } - -var errStopReadingNetstatTable = errors.New("found private gateway") diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index 299f3101e..d0fb15aba 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -23,7 +23,7 @@ import ( "go4.org/mem" "golang.org/x/sys/unix" "tailscale.com/net/netaddr" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" ) func init() { @@ -32,11 +32,6 @@ func init() { var procNetRouteErr atomic.Bool -// errStopReading is a sentinel error value used internally by -// lineread.File callers to stop reading. It doesn't escape to -// callers/users. -var errStopReading = errors.New("stop reading") - /* Parse 10.0.0.1 out of: @@ -52,44 +47,42 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { } lineNum := 0 var f []mem.RO - err := lineread.File(procNetRoutePath, func(line []byte) error { + for lr := range lineiter.File(procNetRoutePath) { + line, err := lr.Value() + if err != nil { + procNetRouteErr.Store(true) + log.Printf("interfaces: failed to read /proc/net/route: %v", err) + return ret, myIP, false + } lineNum++ if lineNum == 1 { // Skip header line. - return nil + continue } if lineNum > maxProcNetRouteRead { - return errStopReading + break } f = mem.AppendFields(f[:0], mem.B(line)) if len(f) < 4 { - return nil + continue } gwHex, flagsHex := f[2], f[3] flags, err := mem.ParseUint(flagsHex, 16, 16) if err != nil { - return nil // ignore error, skip line and keep going + continue // ignore error, skip line and keep going } if flags&(unix.RTF_UP|unix.RTF_GATEWAY) != unix.RTF_UP|unix.RTF_GATEWAY { - return nil + continue } ipu32, err := mem.ParseUint(gwHex, 16, 32) if err != nil { - return nil // ignore error, skip line and keep going + continue // ignore error, skip line and keep going } ip := netaddr.IPv4(byte(ipu32), byte(ipu32>>8), byte(ipu32>>16), byte(ipu32>>24)) if ip.IsPrivate() { ret = ip - return errStopReading + break } - return nil - }) - if errors.Is(err, errStopReading) { - err = nil - } - if err != nil { - procNetRouteErr.Store(true) - log.Printf("interfaces: failed to read /proc/net/route: %v", err) } if ret.IsValid() { // Try to get the local IP of the interface associated with diff --git a/net/netmon/netmon_linux_test.go b/net/netmon/netmon_linux_test.go index d09fac26a..75d7c6465 100644 --- a/net/netmon/netmon_linux_test.go +++ b/net/netmon/netmon_linux_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !android + package netmon import ( diff --git a/net/tshttpproxy/tshttpproxy_synology.go b/net/tshttpproxy/tshttpproxy_synology.go index cda957648..2e50d26d3 100644 --- a/net/tshttpproxy/tshttpproxy_synology.go +++ b/net/tshttpproxy/tshttpproxy_synology.go @@ -17,7 +17,7 @@ import ( "sync" "time" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" ) // These vars are overridden for tests. @@ -76,21 +76,22 @@ func synologyProxiesFromConfig() (*url.URL, *url.URL, error) { func parseSynologyConfig(r io.Reader) (*url.URL, *url.URL, error) { cfg := map[string]string{} - if err := lineread.Reader(r, func(line []byte) error { + for lr := range lineiter.Reader(r) { + line, err := lr.Value() + if err != nil { + return nil, nil, err + } // accept and skip over empty lines line = bytes.TrimSpace(line) if len(line) == 0 { - return nil + continue } key, value, ok := strings.Cut(string(line), "=") if !ok { - return fmt.Errorf("missing \"=\" in proxy.conf line: %q", line) + return nil, nil, fmt.Errorf("missing \"=\" in proxy.conf line: %q", line) } cfg[string(key)] = string(value) - return nil - }); err != nil { - return nil, nil, err } if cfg["proxy_enabled"] != "yes" { diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 9e4f5ffd3..7ce0aeea3 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -48,7 +48,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/ptr" "tailscale.com/util/cibuild" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" "tailscale.com/util/must" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -1123,14 +1123,11 @@ func TestSSH(t *testing.T) { func parseEnv(out []byte) map[string]string { e := map[string]string{} - lineread.Reader(bytes.NewReader(out), func(line []byte) error { - i := bytes.IndexByte(line, '=') - if i == -1 { - return nil + for line := range lineiter.Bytes(out) { + if i := bytes.IndexByte(line, '='); i != -1 { + e[string(line[:i])] = string(line[i+1:]) } - e[string(line[:i])] = string(line[i+1:]) - return nil - }) + } return e } diff --git a/ssh/tailssh/user.go b/ssh/tailssh/user.go index 33ebb4db7..15191813b 100644 --- a/ssh/tailssh/user.go +++ b/ssh/tailssh/user.go @@ -6,7 +6,6 @@ package tailssh import ( - "io" "os" "os/exec" "os/user" @@ -18,7 +17,7 @@ import ( "go4.org/mem" "tailscale.com/envknob" "tailscale.com/hostinfo" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" "tailscale.com/util/osuser" "tailscale.com/version/distro" ) @@ -110,15 +109,16 @@ func defaultPathForUser(u *user.User) string { } func defaultPathForUserOnNixOS(u *user.User) string { - var path string - lineread.File("/etc/pam/environment", func(lineb []byte) error { + for lr := range lineiter.File("/etc/pam/environment") { + lineb, err := lr.Value() + if err != nil { + return "" + } if v := pathFromPAMEnvLine(lineb, u); v != "" { - path = v - return io.EOF // stop iteration + return v } - return nil - }) - return path + } + return "" } func pathFromPAMEnvLine(line []byte, u *user.User) (path string) { diff --git a/types/result/result.go b/types/result/result.go new file mode 100644 index 000000000..6bd1c2ea6 --- /dev/null +++ b/types/result/result.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package result contains the Of result type, which is +// either a value or an error. +package result + +// Of is either a T value or an error. +// +// Think of it like Rust or Swift's result types. +// It's named "Of" because the fully qualified name +// for callers reads result.Of[T]. +type Of[T any] struct { + v T // valid if Err is nil; invalid if Err is non-nil + err error +} + +// Value returns a new result with value v, +// without an error. +func Value[T any](v T) Of[T] { + return Of[T]{v: v} +} + +// Error returns a new result with error err. +// If err is nil, the returned result is equivalent +// to calling Value with T's zero value. +func Error[T any](err error) Of[T] { + return Of[T]{err: err} +} + +// MustValue returns r's result value. +// It panics if r.Err returns non-nil. +func (r Of[T]) MustValue() T { + if r.err != nil { + panic(r.err) + } + return r.v +} + +// Value returns r's result value and error. +func (r Of[T]) Value() (T, error) { + return r.v, r.err +} + +// Err returns r's error, if any. +// When r.Err returns nil, it's safe to call r.MustValue without it panicking. +func (r Of[T]) Err() error { + return r.err +} diff --git a/util/lineiter/lineiter.go b/util/lineiter/lineiter.go new file mode 100644 index 000000000..5cb1eeef3 --- /dev/null +++ b/util/lineiter/lineiter.go @@ -0,0 +1,72 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package lineiter iterates over lines in things. +package lineiter + +import ( + "bufio" + "bytes" + "io" + "iter" + "os" + + "tailscale.com/types/result" +) + +// File returns an iterator that reads lines from the named file. +// +// The returned substrings don't include the trailing newline. +// Lines may be empty. +func File(name string) iter.Seq[result.Of[[]byte]] { + f, err := os.Open(name) + return reader(f, f, err) +} + +// Bytes returns an iterator over the lines in bs. +// The returned substrings don't include the trailing newline. +// Lines may be empty. +func Bytes(bs []byte) iter.Seq[[]byte] { + return func(yield func([]byte) bool) { + for len(bs) > 0 { + i := bytes.IndexByte(bs, '\n') + if i < 0 { + yield(bs) + return + } + if !yield(bs[:i]) { + return + } + bs = bs[i+1:] + } + } +} + +// Reader returns an iterator over the lines in r. +// +// The returned substrings don't include the trailing newline. +// Lines may be empty. +func Reader(r io.Reader) iter.Seq[result.Of[[]byte]] { + return reader(r, nil, nil) +} + +func reader(r io.Reader, c io.Closer, err error) iter.Seq[result.Of[[]byte]] { + return func(yield func(result.Of[[]byte]) bool) { + if err != nil { + yield(result.Error[[]byte](err)) + return + } + if c != nil { + defer c.Close() + } + bs := bufio.NewScanner(r) + for bs.Scan() { + if !yield(result.Value(bs.Bytes())) { + return + } + } + if err := bs.Err(); err != nil { + yield(result.Error[[]byte](err)) + } + } +} diff --git a/util/lineiter/lineiter_test.go b/util/lineiter/lineiter_test.go new file mode 100644 index 000000000..3373d5fe7 --- /dev/null +++ b/util/lineiter/lineiter_test.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lineiter + +import ( + "slices" + "strings" + "testing" +) + +func TestBytesLines(t *testing.T) { + var got []string + for line := range Bytes([]byte("foo\n\nbar\nbaz")) { + got = append(got, string(line)) + } + want := []string{"foo", "", "bar", "baz"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestReader(t *testing.T) { + var got []string + for line := range Reader(strings.NewReader("foo\n\nbar\nbaz")) { + got = append(got, string(line.MustValue())) + } + want := []string{"foo", "", "bar", "baz"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} diff --git a/util/pidowner/pidowner_linux.go b/util/pidowner/pidowner_linux.go index 2a5181f14..a07f51242 100644 --- a/util/pidowner/pidowner_linux.go +++ b/util/pidowner/pidowner_linux.go @@ -8,26 +8,26 @@ import ( "os" "strings" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" ) func ownerOfPID(pid int) (userID string, err error) { file := fmt.Sprintf("/proc/%d/status", pid) - err = lineread.File(file, func(line []byte) error { + for lr := range lineiter.File(file) { + line, err := lr.Value() + if err != nil { + if os.IsNotExist(err) { + return "", ErrProcessNotFound + } + return "", err + } if len(line) < 4 || string(line[:4]) != "Uid:" { - return nil + continue } f := strings.Fields(string(line)) if len(f) >= 2 { userID = f[1] // real userid } - return nil - }) - if os.IsNotExist(err) { - return "", ErrProcessNotFound - } - if err != nil { - return } if userID == "" { return "", fmt.Errorf("missing Uid line in %s", file) diff --git a/version/distro/distro.go b/version/distro/distro.go index 8865a834b..ce61137cf 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -6,13 +6,12 @@ package distro import ( "bytes" - "io" "os" "runtime" "strconv" "tailscale.com/types/lazy" - "tailscale.com/util/lineread" + "tailscale.com/util/lineiter" ) type Distro string @@ -132,18 +131,19 @@ func DSMVersion() int { return v } // But when run from the command line, we have to read it from the file: - lineread.File("/etc/VERSION", func(line []byte) error { + for lr := range lineiter.File("/etc/VERSION") { + line, err := lr.Value() + if err != nil { + break // but otherwise ignore + } line = bytes.TrimSpace(line) if string(line) == `majorversion="7"` { - v = 7 - return io.EOF + return 7 } if string(line) == `majorversion="6"` { - v = 6 - return io.EOF + return 6 } - return nil - }) - return v + } + return 0 }) } From 065825e94c143bf50f997528332fd63cf47b6cda Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 4 Nov 2024 15:02:25 +0000 Subject: [PATCH 0117/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/windows.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/licenses/windows.md b/licenses/windows.md index 3f6650b9e..8cef25685 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -57,8 +57,8 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/52804fd3056a/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/6580b55d49ca/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/8865133fd3ef/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/28f7e73c7afb/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) From 8dcbd988f7653aa17b33094d3f917125414aeab6 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 23 Oct 2024 20:56:09 -0500 Subject: [PATCH 0118/1708] cmd/derper: show more information on home page - Basic description of DERP If configured to do so, also show - Mailto link to security@tailscale.com - Link to Tailscale Security Policies - Link to Tailscale Acceptable Use Policy Updates tailscale/corp#24092 Signed-off-by: Percy Wegmann --- cmd/derper/depaware.txt | 3 ++ cmd/derper/derper.go | 79 +++++++++++++++++++++++++++++---------- cmd/derper/derper_test.go | 29 ++++++++++++++ 3 files changed, 92 insertions(+), 19 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index a3eec2046..8fa5334aa 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -264,6 +264,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa hash/fnv from google.golang.org/protobuf/internal/detrand hash/maphash from go4.org/mem html from net/http/pprof+ + html/template from tailscale.com/cmd/derper io from bufio+ io/fs from crypto/x509+ io/ioutil from github.com/mitchellh/go-ps+ @@ -308,6 +309,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa sync/atomic from context+ syscall from crypto/rand+ text/tabwriter from runtime/pprof + text/template from html/template + text/template/parse from html/template+ time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 80c9dc44f..51be3abbe 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -19,6 +19,7 @@ import ( "expvar" "flag" "fmt" + "html/template" "io" "log" "math" @@ -212,25 +213,16 @@ func main() { tsweb.AddBrowserHeaders(w) w.Header().Set("Content-Type", "text/html; charset=utf-8") w.WriteHeader(200) - io.WriteString(w, ` -

DERP

-

- This is a Tailscale DERP server. -

-

- Documentation: -

- -`) - if !*runDERP { - io.WriteString(w, `

Status: disabled

`) - } - if tsweb.AllowDebugAccess(r) { - io.WriteString(w, "

Debug info at /debug/.

\n") + err := homePageTemplate.Execute(w, templateData{ + ShowAbuseInfo: validProdHostname.MatchString(*hostname), + Disabled: !*runDERP, + AllowDebug: tsweb.AllowDebugAccess(r), + }) + if err != nil { + if r.Context().Err() == nil { + log.Printf("homePageTemplate.Execute: %v", err) + } + return } })) mux.Handle("/robots.txt", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -468,3 +460,52 @@ func init() { return 0 })) } + +type templateData struct { + ShowAbuseInfo bool + Disabled bool + AllowDebug bool +} + +// homePageTemplate renders the home page using [templateData]. +var homePageTemplate = template.Must(template.New("home").Parse(` +

DERP

+

+ This is a Tailscale DERP server. +

+ +

+ It provides STUN, interactive connectivity establishment, and relaying of end-to-end encrypted traffic + for Tailscale clients. +

+ +{{if .ShowAbuseInfo }} +

+ If you suspect abuse, please contact security@tailscale.com. +

+{{end}} + +

+ Documentation: +

+ + + +{{if .Disabled}} +

Status: disabled

+{{end}} + +{{if .AllowDebug}} +

Debug info at /debug/.

+{{end}} + + +`)) diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 553a78f9f..6ddf4455b 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -4,7 +4,9 @@ package main import ( + "bytes" "context" + "fmt" "net/http" "net/http/httptest" "strings" @@ -110,3 +112,30 @@ func TestDeps(t *testing.T) { }, }.Check(t) } + +func TestTemplate(t *testing.T) { + buf := &bytes.Buffer{} + err := homePageTemplate.Execute(buf, templateData{ + ShowAbuseInfo: true, + Disabled: true, + AllowDebug: true, + }) + if err != nil { + t.Fatal(err) + } + + str := buf.String() + if !strings.Contains(str, "If you suspect abuse") { + t.Error("Output is missing abuse mailto") + } + if !strings.Contains(str, "Tailscale Security Policies") { + t.Error("Output is missing Tailscale Security Policies link") + } + if !strings.Contains(str, "Status:") { + t.Error("Output is missing disabled status") + } + if !strings.Contains(str, "Debug info") { + t.Error("Output is missing debug info") + } + fmt.Println(buf.String()) +} From 8ba9b558d2a8efe172f7a005ec1e6572b60f05e2 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 7 Nov 2024 12:42:29 +0000 Subject: [PATCH 0119/1708] envknob,kube/kubetypes,cmd/k8s-operator: add app type for ProxyGroup (#14029) Sets a custom hostinfo app type for ProxyGroup replicas, similarly to how we do it for all other Kubernetes Operator managed components. Updates tailscale/tailscale#13406,tailscale/corp#22920 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/proxygroup.go | 2 +- cmd/k8s-operator/proxygroup_specs.go | 5 +++++ envknob/envknob.go | 2 +- kube/kubetypes/metrics.go | 17 ++++++++++------- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 1f9983aa9..7dad9e573 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -47,7 +47,7 @@ const ( reasonProxyGroupInvalid = "ProxyGroupInvalid" ) -var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupCount) +var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. type ProxyGroupReconciler struct { diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 9aa7ac3b0..f9d1ea52b 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -15,6 +15,7 @@ import ( "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/types/ptr" ) @@ -146,6 +147,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa Name: "TS_USERSPACE", Value: "false", }, + { + Name: "TS_INTERNAL_APP", + Value: kubetypes.AppProxyGroupEgress, + }, } if tsFirewallMode != "" { diff --git a/envknob/envknob.go b/envknob/envknob.go index 59a6d90af..e74bfea71 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -411,7 +411,7 @@ func TKASkipSignatureCheck() bool { return Bool("TS_UNSAFE_SKIP_NKS_VERIFICATION // Kubernetes Operator components. func App() string { a := os.Getenv("TS_INTERNAL_APP") - if a == kubetypes.AppConnector || a == kubetypes.AppEgressProxy || a == kubetypes.AppIngressProxy || a == kubetypes.AppIngressResource { + if a == kubetypes.AppConnector || a == kubetypes.AppEgressProxy || a == kubetypes.AppIngressProxy || a == kubetypes.AppIngressResource || a == kubetypes.AppProxyGroupEgress || a == kubetypes.AppProxyGroupIngress { return a } return "" diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/metrics.go index b183f1f6f..63078385a 100644 --- a/kube/kubetypes/metrics.go +++ b/kube/kubetypes/metrics.go @@ -5,12 +5,14 @@ package kubetypes const ( // Hostinfo App values for the Tailscale Kubernetes Operator components. - AppOperator = "k8s-operator" - AppAPIServerProxy = "k8s-operator-proxy" - AppIngressProxy = "k8s-operator-ingress-proxy" - AppIngressResource = "k8s-operator-ingress-resource" - AppEgressProxy = "k8s-operator-egress-proxy" - AppConnector = "k8s-operator-connector-resource" + AppOperator = "k8s-operator" + AppAPIServerProxy = "k8s-operator-proxy" + AppIngressProxy = "k8s-operator-ingress-proxy" + AppIngressResource = "k8s-operator-ingress-resource" + AppEgressProxy = "k8s-operator-egress-proxy" + AppConnector = "k8s-operator-connector-resource" + AppProxyGroupEgress = "k8s-operator-proxygroup-egress" + AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" // Clientmetrics for Tailscale Kubernetes Operator components MetricIngressProxyCount = "k8s_ingress_proxies" // L3 @@ -22,5 +24,6 @@ const ( MetricNameserverCount = "k8s_nameserver_resources" MetricRecorderCount = "k8s_recorder_resources" MetricEgressServiceCount = "k8s_egress_service_resources" - MetricProxyGroupCount = "k8s_proxygroup_resources" + MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources" + MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources" ) From 3090461961e30fffb5a28b1432c47a627177a5a1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 7 Nov 2024 08:02:14 -0800 Subject: [PATCH 0120/1708] tsweb/varz: optimize some allocs, add helper func for others Updates #cleanup Updates tailscale/corp#23546 (noticed when doing this) Change-Id: Ia9f627fe32bb4955739b2787210ba18f5de27f4d Signed-off-by: Brad Fitzpatrick --- tsweb/varz/varz.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index 561b24877..952ebc231 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -23,10 +23,16 @@ import ( "tailscale.com/version" ) +// StaticStringVar returns a new expvar.Var that always returns s. +func StaticStringVar(s string) expvar.Var { + var v any = s // box s into an interface just once + return expvar.Func(func() any { return v }) +} + func init() { expvar.Publish("process_start_unix_time", expvar.Func(func() any { return timeStart.Unix() })) - expvar.Publish("version", expvar.Func(func() any { return version.Long() })) - expvar.Publish("go_version", expvar.Func(func() any { return runtime.Version() })) + expvar.Publish("version", StaticStringVar(version.Long())) + expvar.Publish("go_version", StaticStringVar(runtime.Version())) expvar.Publish("counter_uptime_sec", expvar.Func(func() any { return int64(Uptime().Seconds()) })) expvar.Publish("gauge_goroutines", expvar.Func(func() any { return runtime.NumGoroutine() })) } From 2c8859c2e725af2de59203c0b2d39b96f135cb60 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 7 Nov 2024 19:27:53 +0000 Subject: [PATCH 0121/1708] client/tailscale,ipn/{ipnlocal,localapi}: add a pre-shutdown localAPI endpoint that terminates control connections. (#14028) Adds a /disconnect-control local API endpoint that just shuts down control client. This can be run before shutting down an HA subnet router/app connector replica - it will ensure that all connection to control are dropped and control thus considers this node inactive and tells peers to switch over to another replica. Meanwhile the existing connections keep working (assuming that the replica is given some graceful shutdown period). Updates tailscale/tailscale#14020 Signed-off-by: Irbe Krumina --- client/tailscale/localclient.go | 11 +++++++++++ ipn/ipnlocal/local.go | 13 +++++++++++++ ipn/localapi/localapi.go | 17 +++++++++++++++++ 3 files changed, 41 insertions(+) diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 9c2bcc467..5eb668176 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -1327,6 +1327,17 @@ func (lc *LocalClient) SetServeConfig(ctx context.Context, config *ipn.ServeConf return nil } +// DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be +// run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over +// to another replica whilst there is still some grace period for the existing connections to terminate. +func (lc *LocalClient) DisconnectControl(ctx context.Context) error { + _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/disconnect-control", 200, nil, nil) + if err != nil { + return fmt.Errorf("error disconnecting control: %w", err) + } + return nil +} + // NetworkLockDisable shuts down network-lock across the tailnet. func (lc *LocalClient) NetworkLockDisable(ctx context.Context, secret []byte) error { if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index edd56f7c4..337fa3d2b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -800,6 +800,19 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { b.cc.SetPaused((b.state == ipn.Stopped && b.netMap != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) } +// DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe +// inactive. This can be used to ensure that nodes that are HA subnet router or app connector replicas are shutting +// down, clients switch over to other replicas whilst the existing connections are kept alive for some period of time. +func (b *LocalBackend) DisconnectControl() { + b.mu.Lock() + defer b.mu.Unlock() + cc := b.resetControlClientLocked() + if cc == nil { + return + } + cc.Shutdown() +} + // captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken // before running captive portal detection. const captivePortalDetectionInterval = 2 * time.Second diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 0d41725d8..dc8c08975 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -100,6 +100,7 @@ var handler = map[string]localAPIHandler{ "derpmap": (*Handler).serveDERPMap, "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, + "disconnect-control": (*Handler).disconnectControl, "dns-osconfig": (*Handler).serveDNSOSConfig, "dns-query": (*Handler).serveDNSQuery, "drive/fileserver-address": (*Handler).serveDriveServerAddr, @@ -952,6 +953,22 @@ func (h *Handler) servePprof(w http.ResponseWriter, r *http.Request) { servePprofFunc(w, r) } +// disconnectControl is the handler for local API /disconnect-control endpoint that shuts down control client, so that +// node no longer communicates with control. Doing this makes control consider this node inactive. This can be used +// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the +// peers to switch over to another replica whilst still maintaining th existing peer connections. +func (h *Handler) disconnectControl(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + h.b.DisconnectControl() +} + func (h *Handler) reloadConfig(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { http.Error(w, "access denied", http.StatusForbidden) From 23880eb5b05368d30023f91c314c9cc2e19f4a90 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 7 Nov 2024 15:21:44 -0800 Subject: [PATCH 0122/1708] cmd/tailscaled: support "ts_omit_ssh" build tag to remove SSH Some environments would like to remove Tailscale SSH support for the binary for various reasons when not needed (either for peace of mind, or the ~1MB of binary space savings). Updates tailscale/corp#24454 Updates #1278 Updates #12614 Change-Id: Iadd6c5a393992c254b5dc9aa9a526916f96fd07a Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 30 ++++++++++++++++++++++++++++++ cmd/tailscaled/ssh.go | 2 +- tstest/deptest/deptest.go | 3 ++- 3 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 cmd/tailscaled/deps_test.go diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go new file mode 100644 index 000000000..2b4bc280d --- /dev/null +++ b/cmd/tailscaled/deps_test.go @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "testing" + + "tailscale.com/tstest/deptest" +) + +func TestOmitSSH(t *testing.T) { + const msg = "unexpected with ts_omit_ssh" + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_ssh", + BadDeps: map[string]string{ + "tailscale.com/ssh/tailssh": msg, + "golang.org/x/crypto/ssh": msg, + "tailscale.com/sessionrecording": msg, + "github.com/anmitsu/go-shlex": msg, + "github.com/creack/pty": msg, + "github.com/kr/fs": msg, + "github.com/pkg/sftp": msg, + "github.com/u-root/u-root/pkg/termios": msg, + "tempfork/gliderlabs/ssh": msg, + }, + }.Check(t) +} diff --git a/cmd/tailscaled/ssh.go b/cmd/tailscaled/ssh.go index f7b0b367e..b10a3b774 100644 --- a/cmd/tailscaled/ssh.go +++ b/cmd/tailscaled/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || darwin || freebsd || openbsd +//go:build (linux || darwin || freebsd || openbsd) && !ts_omit_ssh package main diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 57db2b79a..ba214de32 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -21,6 +21,7 @@ type DepChecker struct { GOOS string // optional GOARCH string // optional BadDeps map[string]string // package => why + Tags string // comma-separated } func (c DepChecker) Check(t *testing.T) { @@ -29,7 +30,7 @@ func (c DepChecker) Check(t *testing.T) { t.Skip("skipping dep tests on windows hosts") } t.Helper() - cmd := exec.Command("go", "list", "-json", ".") + cmd := exec.Command("go", "list", "-json", "-tags="+c.Tags, ".") var extraEnv []string if c.GOOS != "" { extraEnv = append(extraEnv, "GOOS="+c.GOOS) From c3306bfd15e761e0ad38e3e3970becd0d301e4c7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 7 Nov 2024 15:59:19 -0800 Subject: [PATCH 0123/1708] control/controlhttp/controlhttpserver: split out Accept to its own package Otherwise all the clients only using control/controlhttp for the ts2021 HTTP client were also pulling in WebSocket libraries, as the server side always needs to speak websockets, but only GOOS=js clients speak it. This doesn't yet totally remove the websocket dependency on Linux because Linux has a envknob opt-in to act like GOOS=js for manual testing and force the use of WebSockets for DERP only (not control). We can put that behind a build tag in a future change to eliminate the dep on all GOOSes. Updates #1278 Change-Id: I4f60508f4cad52bf8c8943c8851ecee506b7ebc9 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 11 +++++----- cmd/tailscale/depaware.txt | 11 +++++----- cmd/tailscaled/depaware.txt | 11 +++++----- control/controlclient/noise_test.go | 4 ++-- control/controlhttp/client.go | 9 +++++---- control/controlhttp/constants.go | 9 --------- .../controlhttpcommon/controlhttpcommon.go | 15 ++++++++++++++ .../controlhttpserver.go} | 16 ++++++++------- control/controlhttp/http_test.go | 20 ++++++++++++++++--- tstest/integration/testcontrol/testcontrol.go | 4 ++-- 10 files changed, 68 insertions(+), 42 deletions(-) create mode 100644 control/controlhttp/controlhttpcommon/controlhttpcommon.go rename control/controlhttp/{server.go => controlhttpserver/controlhttpserver.go} (92%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 74536c6c9..cdd2ee722 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -80,10 +80,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus github.com/bits-and-blooms/bitset from github.com/gaissmai/bart 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus - github.com/coder/websocket from tailscale.com/control/controlhttp+ - github.com/coder/websocket/internal/errd from github.com/coder/websocket - github.com/coder/websocket/internal/util from github.com/coder/websocket - github.com/coder/websocket/internal/xsync from github.com/coder/websocket + L github.com/coder/websocket from tailscale.com/derp/derphttp+ + L github.com/coder/websocket/internal/errd from github.com/coder/websocket + L github.com/coder/websocket/internal/util from github.com/coder/websocket + L github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -658,6 +658,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -740,7 +741,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ - tailscale.com/net/wsconn from tailscale.com/control/controlhttp+ + L tailscale.com/net/wsconn from tailscale.com/derp/derphttp tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/tailscale+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index ac5440d2c..60af1de01 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -5,10 +5,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - github.com/coder/websocket from tailscale.com/control/controlhttp+ - github.com/coder/websocket/internal/errd from github.com/coder/websocket - github.com/coder/websocket/internal/util from github.com/coder/websocket - github.com/coder/websocket/internal/xsync from github.com/coder/websocket + L github.com/coder/websocket from tailscale.com/derp/derphttp+ + L github.com/coder/websocket/internal/errd from github.com/coder/websocket + L github.com/coder/websocket/internal/util from github.com/coder/websocket + L github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode @@ -86,6 +86,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/net/portmapper tailscale.com/derp from tailscale.com/derp/derphttp tailscale.com/derp/derphttp from tailscale.com/net/netcheck @@ -124,7 +125,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ - tailscale.com/net/wsconn from tailscale.com/control/controlhttp+ + L tailscale.com/net/wsconn from tailscale.com/derp/derphttp tailscale.com/paths from tailscale.com/client/tailscale+ 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 31a0cb67c..707c0c065 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -79,10 +79,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/bits-and-blooms/bitset from github.com/gaissmai/bart - github.com/coder/websocket from tailscale.com/control/controlhttp+ - github.com/coder/websocket/internal/errd from github.com/coder/websocket - github.com/coder/websocket/internal/util from github.com/coder/websocket - github.com/coder/websocket/internal/xsync from github.com/coder/websocket + L github.com/coder/websocket from tailscale.com/derp/derphttp+ + L github.com/coder/websocket/internal/errd from github.com/coder/websocket + L github.com/coder/websocket/internal/util from github.com/coder/websocket + L github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -249,6 +249,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ @@ -327,7 +328,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ - tailscale.com/net/wsconn from tailscale.com/control/controlhttp+ + L tailscale.com/net/wsconn from tailscale.com/derp/derphttp tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/tailscale+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index f2627bd0a..69a3a6a36 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -15,7 +15,7 @@ import ( "time" "golang.org/x/net/http2" - "tailscale.com/control/controlhttp" + "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/internal/noiseconn" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -201,7 +201,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { return nil } - cbConn, err := controlhttp.AcceptHTTP(r.Context(), w, r, up.noiseKeyPriv, earlyWriteFn) + cbConn, err := controlhttpserver.AcceptHTTP(r.Context(), w, r, up.noiseKeyPriv, earlyWriteFn) if err != nil { up.logf("controlhttp: Accept: %v", err) return diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 7e5263e33..9b1d5a1a5 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -38,6 +38,7 @@ import ( "time" "tailscale.com/control/controlbase" + "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/dnscache" @@ -571,9 +572,9 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad Method: "POST", URL: u, Header: http.Header{ - "Upgrade": []string{upgradeHeaderValue}, - "Connection": []string{"upgrade"}, - handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)}, + "Upgrade": []string{controlhttpcommon.UpgradeHeaderValue}, + "Connection": []string{"upgrade"}, + controlhttpcommon.HandshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)}, }, } req = req.WithContext(ctx) @@ -597,7 +598,7 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad return nil, fmt.Errorf("httptrace didn't provide a connection") } - if next := resp.Header.Get("Upgrade"); next != upgradeHeaderValue { + if next := resp.Header.Get("Upgrade"); next != controlhttpcommon.UpgradeHeaderValue { resp.Body.Close() return nil, fmt.Errorf("server switched to unexpected protocol %q", next) } diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index ea1725e76..0b550accc 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -18,15 +18,6 @@ import ( ) const ( - // upgradeHeader is the value of the Upgrade HTTP header used to - // indicate the Tailscale control protocol. - upgradeHeaderValue = "tailscale-control-protocol" - - // handshakeHeaderName is the HTTP request header that can - // optionally contain base64-encoded initial handshake - // payload, to save an RTT. - handshakeHeaderName = "X-Tailscale-Handshake" - // serverUpgradePath is where the server-side HTTP handler to // to do the protocol switch is located. serverUpgradePath = "/ts2021" diff --git a/control/controlhttp/controlhttpcommon/controlhttpcommon.go b/control/controlhttp/controlhttpcommon/controlhttpcommon.go new file mode 100644 index 000000000..a86b7ca04 --- /dev/null +++ b/control/controlhttp/controlhttpcommon/controlhttpcommon.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package controlhttpcommon contains common constants for used +// by the controlhttp client and controlhttpserver packages. +package controlhttpcommon + +// UpgradeHeader is the value of the Upgrade HTTP header used to +// indicate the Tailscale control protocol. +const UpgradeHeaderValue = "tailscale-control-protocol" + +// handshakeHeaderName is the HTTP request header that can +// optionally contain base64-encoded initial handshake +// payload, to save an RTT. +const HandshakeHeaderName = "X-Tailscale-Handshake" diff --git a/control/controlhttp/server.go b/control/controlhttp/controlhttpserver/controlhttpserver.go similarity index 92% rename from control/controlhttp/server.go rename to control/controlhttp/controlhttpserver/controlhttpserver.go index 7c3dd5618..47f049c18 100644 --- a/control/controlhttp/server.go +++ b/control/controlhttp/controlhttpserver/controlhttpserver.go @@ -3,7 +3,8 @@ //go:build !ios -package controlhttp +// Packet controlhttpserver contains the HTTP server side of the ts2021 control protocol. +package controlhttpserver import ( "context" @@ -18,6 +19,7 @@ import ( "github.com/coder/websocket" "tailscale.com/control/controlbase" + "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/net/netutil" "tailscale.com/net/wsconn" "tailscale.com/types/key" @@ -45,12 +47,12 @@ func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, pri if next == "websocket" { return acceptWebsocket(ctx, w, r, private) } - if next != upgradeHeaderValue { + if next != controlhttpcommon.UpgradeHeaderValue { http.Error(w, "unknown next protocol", http.StatusBadRequest) return nil, fmt.Errorf("client requested unhandled next protocol %q", next) } - initB64 := r.Header.Get(handshakeHeaderName) + initB64 := r.Header.Get(controlhttpcommon.HandshakeHeaderName) if initB64 == "" { http.Error(w, "missing Tailscale handshake header", http.StatusBadRequest) return nil, errors.New("no tailscale handshake header in HTTP request") @@ -67,7 +69,7 @@ func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, pri return nil, errors.New("can't hijack client connection") } - w.Header().Set("Upgrade", upgradeHeaderValue) + w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue) w.Header().Set("Connection", "upgrade") w.WriteHeader(http.StatusSwitchingProtocols) @@ -117,7 +119,7 @@ func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, pri // speak HTTP) to a Tailscale control protocol base transport connection. func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate) (*controlbase.Conn, error) { c, err := websocket.Accept(w, r, &websocket.AcceptOptions{ - Subprotocols: []string{upgradeHeaderValue}, + Subprotocols: []string{controlhttpcommon.UpgradeHeaderValue}, OriginPatterns: []string{"*"}, // Disable compression because we transmit Noise messages that are not // compressible. @@ -129,7 +131,7 @@ func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request if err != nil { return nil, fmt.Errorf("Could not accept WebSocket connection %v", err) } - if c.Subprotocol() != upgradeHeaderValue { + if c.Subprotocol() != controlhttpcommon.UpgradeHeaderValue { c.Close(websocket.StatusPolicyViolation, "client must speak the control subprotocol") return nil, fmt.Errorf("Unexpected subprotocol %q", c.Subprotocol()) } @@ -137,7 +139,7 @@ func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request c.Close(websocket.StatusPolicyViolation, "Could not parse parameters") return nil, fmt.Errorf("parse query parameters: %v", err) } - initB64 := r.Form.Get(handshakeHeaderName) + initB64 := r.Form.Get(controlhttpcommon.HandshakeHeaderName) if initB64 == "" { c.Close(websocket.StatusPolicyViolation, "missing Tailscale handshake parameter") return nil, errors.New("no tailscale handshake parameter in HTTP request") diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 8c8ed7f57..00cc1e6cf 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -23,12 +23,15 @@ import ( "time" "tailscale.com/control/controlbase" + "tailscale.com/control/controlhttp/controlhttpcommon" + "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -158,7 +161,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { return err } } - conn, err := AcceptHTTP(context.Background(), w, r, server, earlyWriteFn) + conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, earlyWriteFn) if err != nil { log.Print(err) } @@ -529,7 +532,7 @@ EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Upgrade", upgradeHeaderValue) + w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue) w.Header().Set("Connection", "upgrade") w.WriteHeader(http.StatusSwitchingProtocols) w.(http.Flusher).Flush() @@ -574,7 +577,7 @@ func TestDialPlan(t *testing.T) { close(done) }) var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - conn, err := AcceptHTTP(context.Background(), w, r, server, nil) + conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, nil) if err != nil { log.Print(err) } else { @@ -816,3 +819,14 @@ func (c *closeTrackConn) Close() error { c.d.noteClose(c) return c.Conn.Close() } + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + GOOS: "darwin", + GOARCH: "arm64", + BadDeps: map[string]string{ + // Only the controlhttpserver needs WebSockets... + "github.com/coder/websocket": "controlhttp client shouldn't need websockets", + }, + }.Check(t) +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index bbcf277d1..2d6a84361 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -26,7 +26,7 @@ import ( "time" "golang.org/x/net/http2" - "tailscale.com/control/controlhttp" + "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" @@ -288,7 +288,7 @@ func (s *Server) serveNoiseUpgrade(w http.ResponseWriter, r *http.Request) { s.mu.Lock() noisePrivate := s.noisePrivKey s.mu.Unlock() - cc, err := controlhttp.AcceptHTTP(ctx, w, r, noisePrivate, nil) + cc, err := controlhttpserver.AcceptHTTP(ctx, w, r, noisePrivate, nil) if err != nil { log.Printf("AcceptHTTP: %v", err) return From 020cacbe702463f14a5d2d5427819c491c7e6578 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 7 Nov 2024 16:49:47 -0800 Subject: [PATCH 0124/1708] derp/derphttp: don't link websockets other than on GOOS=js Or unless the new "ts_debug_websockets" build tag is set. Updates #1278 Change-Id: Ic4c4f81c1924250efd025b055585faec37a5491d Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 2 +- cmd/k8s-operator/depaware.txt | 5 ----- cmd/tailscale/depaware.txt | 7 +----- cmd/tailscaled/depaware.txt | 5 ----- control/controlhttp/client_js.go | 5 +++-- .../controlhttpserver/controlhttpserver.go | 2 +- derp/derphttp/derphttp_client.go | 5 ++++- derp/derphttp/derphttp_test.go | 22 +++++++++++++++++++ derp/derphttp/websocket.go | 4 +++- derp/derphttp/websocket_stub.go | 8 +++++++ tstest/deptest/deptest.go | 17 ++++++++++---- 11 files changed, 56 insertions(+), 26 deletions(-) create mode 100644 derp/derphttp/websocket_stub.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 8fa5334aa..81a7f14f4 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -116,7 +116,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ - tailscale.com/net/wsconn from tailscale.com/cmd/derper+ + tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/tailscale 💣 tailscale.com/safesocket from tailscale.com/client/tailscale tailscale.com/syncs from tailscale.com/cmd/derper+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index cdd2ee722..900d10efe 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -80,10 +80,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus github.com/bits-and-blooms/bitset from github.com/gaissmai/bart 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus - L github.com/coder/websocket from tailscale.com/derp/derphttp+ - L github.com/coder/websocket/internal/errd from github.com/coder/websocket - L github.com/coder/websocket/internal/util from github.com/coder/websocket - L github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -741,7 +737,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ - L tailscale.com/net/wsconn from tailscale.com/derp/derphttp tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/tailscale+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 60af1de01..d18d88873 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -5,10 +5,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/coder/websocket from tailscale.com/derp/derphttp+ - L github.com/coder/websocket/internal/errd from github.com/coder/websocket - L github.com/coder/websocket/internal/util from github.com/coder/websocket - L github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode @@ -125,7 +121,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ - L tailscale.com/net/wsconn from tailscale.com/derp/derphttp tailscale.com/paths from tailscale.com/client/tailscale+ 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+ @@ -326,7 +321,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep reflect from archive/tar+ regexp from github.com/coreos/go-iptables/iptables+ regexp/syntax from regexp - runtime/debug from github.com/coder/websocket/internal/xsync+ + runtime/debug from tailscale.com+ slices from tailscale.com/client/web+ sort from compress/flate+ strconv from archive/tar+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 707c0c065..81cd53271 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -79,10 +79,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/bits-and-blooms/bitset from github.com/gaissmai/bart - L github.com/coder/websocket from tailscale.com/derp/derphttp+ - L github.com/coder/websocket/internal/errd from github.com/coder/websocket - L github.com/coder/websocket/internal/util from github.com/coder/websocket - L github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -328,7 +324,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ - L tailscale.com/net/wsconn from tailscale.com/derp/derphttp tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/tailscale+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/control/controlhttp/client_js.go b/control/controlhttp/client_js.go index 4b7126b52..cc05b5b19 100644 --- a/control/controlhttp/client_js.go +++ b/control/controlhttp/client_js.go @@ -12,6 +12,7 @@ import ( "github.com/coder/websocket" "tailscale.com/control/controlbase" + "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/net/wsconn" ) @@ -42,11 +43,11 @@ func (d *Dialer) Dial(ctx context.Context) (*ClientConn, error) { // Can't set HTTP headers on the websocket request, so we have to to send // the handshake via an HTTP header. RawQuery: url.Values{ - handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)}, + controlhttpcommon.HandshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)}, }.Encode(), } wsConn, _, err := websocket.Dial(ctx, wsURL.String(), &websocket.DialOptions{ - Subprotocols: []string{upgradeHeaderValue}, + Subprotocols: []string{controlhttpcommon.UpgradeHeaderValue}, }) if err != nil { return nil, err diff --git a/control/controlhttp/controlhttpserver/controlhttpserver.go b/control/controlhttp/controlhttpserver/controlhttpserver.go index 47f049c18..af3207810 100644 --- a/control/controlhttp/controlhttpserver/controlhttpserver.go +++ b/control/controlhttp/controlhttpserver/controlhttpserver.go @@ -3,7 +3,7 @@ //go:build !ios -// Packet controlhttpserver contains the HTTP server side of the ts2021 control protocol. +// Package controlhttpserver contains the HTTP server side of the ts2021 control protocol. package controlhttpserver import ( diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index b695a52a8..c95d072b1 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -313,6 +313,9 @@ func (c *Client) preferIPv6() bool { var dialWebsocketFunc func(ctx context.Context, urlStr string) (net.Conn, error) func useWebsockets() bool { + if !canWebsockets { + return false + } if runtime.GOOS == "js" { return true } @@ -383,7 +386,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien var node *tailcfg.DERPNode // nil when using c.url to dial var idealNodeInRegion bool switch { - case useWebsockets(): + case canWebsockets && useWebsockets(): var urlStr string if c.url != nil { urlStr = c.url.String() diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index cfb3676cd..cf6032a5e 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -17,7 +17,9 @@ import ( "tailscale.com/derp" "tailscale.com/net/netmon" + "tailscale.com/tstest/deptest" "tailscale.com/types/key" + "tailscale.com/util/set" ) func TestSendRecv(t *testing.T) { @@ -485,3 +487,23 @@ func TestProbe(t *testing.T) { } } } + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + GOOS: "darwin", + GOARCH: "arm64", + BadDeps: map[string]string{ + "github.com/coder/websocket": "shouldn't link websockets except on js/wasm", + }, + }.Check(t) + + deptest.DepChecker{ + GOOS: "darwin", + GOARCH: "arm64", + Tags: "ts_debug_websockets", + WantDeps: set.Of( + "github.com/coder/websocket", + ), + }.Check(t) + +} diff --git a/derp/derphttp/websocket.go b/derp/derphttp/websocket.go index 6ef47473a..9dd640ee3 100644 --- a/derp/derphttp/websocket.go +++ b/derp/derphttp/websocket.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || js +//go:build js || ((linux || darwin) && ts_debug_websockets) package derphttp @@ -14,6 +14,8 @@ import ( "tailscale.com/net/wsconn" ) +const canWebsockets = true + func init() { dialWebsocketFunc = dialWebsocket } diff --git a/derp/derphttp/websocket_stub.go b/derp/derphttp/websocket_stub.go new file mode 100644 index 000000000..d84bfba57 --- /dev/null +++ b/derp/derphttp/websocket_stub.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(js || ((linux || darwin) && ts_debug_websockets)) + +package derphttp + +const canWebsockets = false diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index ba214de32..00faa8a38 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -13,15 +13,19 @@ import ( "path/filepath" "regexp" "runtime" + "slices" "strings" "testing" + + "tailscale.com/util/set" ) type DepChecker struct { - GOOS string // optional - GOARCH string // optional - BadDeps map[string]string // package => why - Tags string // comma-separated + GOOS string // optional + GOARCH string // optional + BadDeps map[string]string // package => why + WantDeps set.Set[string] // packages expected + Tags string // comma-separated } func (c DepChecker) Check(t *testing.T) { @@ -55,6 +59,11 @@ func (c DepChecker) Check(t *testing.T) { t.Errorf("package %q is not allowed as a dependency (env: %q); reason: %s", dep, extraEnv, why) } } + for dep := range c.WantDeps { + if !slices.Contains(res.Deps, dep) { + t.Errorf("expected package %q to be a dependency (env: %q)", dep, extraEnv) + } + } t.Logf("got %d dependencies", len(res.Deps)) } From 64d70fb718557f73a3cebdc41558405697b913ec Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 8 Nov 2024 13:21:38 +0000 Subject: [PATCH 0125/1708] ipn/ipnlocal: log a summary of posture identity response Perhaps I was too opimistic in #13323 thinking we won't need logs for this. Let's log a summary of the response without logging specific identifiers. Updates tailscale/corp#24437 Signed-off-by: Anton Tolchanov --- ipn/ipnlocal/c2n.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index c3ed32fd8..8380689d1 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -350,6 +350,8 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http res.PostureDisabled = true } + b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) + w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) } From 6ff85846bcb5c8aeb35e2fa36808366ec4f148fb Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 8 Nov 2024 10:02:16 -0800 Subject: [PATCH 0126/1708] safeweb: add a Shutdown method to the Server type (#14048) Updates #14047 Change-Id: I2d20454c715b11ad9c6aad1d81445e05a170c3a2 Signed-off-by: M. J. Fromberger --- safeweb/http.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/safeweb/http.go b/safeweb/http.go index bd53eca5b..983ff2fad 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -71,6 +71,7 @@ package safeweb import ( "cmp" + "context" crand "crypto/rand" "fmt" "log" @@ -416,3 +417,7 @@ func (s *Server) ListenAndServe(addr string) error { func (s *Server) Close() error { return s.h.Close() } + +// Shutdown gracefully shuts down the server without interrupting any active +// connections. It has the same semantics as[http.Server.Shutdown]. +func (s *Server) Shutdown(ctx context.Context) error { return s.h.Shutdown(ctx) } From b9ecc50ce38d23cfacd5fae6360fa9742c0564a6 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 11 Nov 2024 11:43:54 +0000 Subject: [PATCH 0127/1708] cmd/k8s-operator,k8s-operator,kube/kubetypes: add an option to configure app connector via Connector spec (#13950) * cmd/k8s-operator,k8s-operator,kube/kubetypes: add an option to configure app connector via Connector spec Updates tailscale/tailscale#11113 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/connector.go | 67 ++++++++++--- cmd/k8s-operator/connector_test.go | 99 +++++++++++++++++++ .../deploy/crds/tailscale.com_connectors.yaml | 53 ++++++++-- .../deploy/manifests/operator.yaml | 53 ++++++++-- cmd/k8s-operator/operator_test.go | 4 +- cmd/k8s-operator/sts.go | 22 ++++- cmd/k8s-operator/testutils_test.go | 34 ++++++- k8s-operator/api.md | 23 ++++- k8s-operator/apis/v1alpha1/types_connector.go | 46 +++++++-- .../apis/v1alpha1/zz_generated.deepcopy.go | 25 +++++ kube/kubetypes/metrics.go | 1 + 11 files changed, 381 insertions(+), 46 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 016166b4c..1c1df7c96 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -13,7 +13,8 @@ import ( "sync" "time" - "github.com/pkg/errors" + "errors" + "go.uber.org/zap" xslices "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" @@ -58,6 +59,7 @@ type ConnectorReconciler struct { subnetRouters set.Slice[types.UID] // for subnet routers gauge exitNodes set.Slice[types.UID] // for exit nodes gauge + appConnectors set.Slice[types.UID] // for app connectors gauge } var ( @@ -67,6 +69,8 @@ var ( gaugeConnectorSubnetRouterResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithSubnetRouterCount) // gaugeConnectorExitNodeResources tracks the number of Connectors currently managed by this operator instance that are exit nodes. gaugeConnectorExitNodeResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithExitNodeCount) + // gaugeConnectorAppConnectorResources tracks the number of Connectors currently managed by this operator instance that are app connectors. + gaugeConnectorAppConnectorResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithAppConnectorCount) ) func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { @@ -108,13 +112,12 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque oldCnStatus := cn.Status.DeepCopy() setStatus := func(cn *tsapi.Connector, _ tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { tsoperator.SetConnectorCondition(cn, tsapi.ConnectorReady, status, reason, message, cn.Generation, a.clock, logger) + var updateErr error if !apiequality.Semantic.DeepEqual(oldCnStatus, cn.Status) { // An error encountered here should get returned by the Reconcile function. - if updateErr := a.Client.Status().Update(ctx, cn); updateErr != nil { - err = errors.Wrap(err, updateErr.Error()) - } + updateErr = a.Client.Status().Update(ctx, cn) } - return res, err + return res, errors.Join(err, updateErr) } if !slices.Contains(cn.Finalizers, FinalizerName) { @@ -150,6 +153,9 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque cn.Status.SubnetRoutes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify() return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionTrue, reasonConnectorCreated, reasonConnectorCreated) } + if cn.Spec.AppConnector != nil { + cn.Status.IsAppConnector = true + } cn.Status.SubnetRoutes = "" return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionTrue, reasonConnectorCreated, reasonConnectorCreated) } @@ -189,23 +195,37 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify() } + if cn.Spec.AppConnector != nil { + sts.Connector.isAppConnector = true + if len(cn.Spec.AppConnector.Routes) != 0 { + sts.Connector.routes = cn.Spec.AppConnector.Routes.Stringify() + } + } + a.mu.Lock() - if sts.Connector.isExitNode { + if cn.Spec.ExitNode { a.exitNodes.Add(cn.UID) } else { a.exitNodes.Remove(cn.UID) } - if sts.Connector.routes != "" { + if cn.Spec.SubnetRouter != nil { a.subnetRouters.Add(cn.GetUID()) } else { a.subnetRouters.Remove(cn.GetUID()) } + if cn.Spec.AppConnector != nil { + a.appConnectors.Add(cn.GetUID()) + } else { + a.appConnectors.Remove(cn.GetUID()) + } a.mu.Unlock() gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len())) gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len())) + gaugeConnectorAppConnectorResources.Set(int64(a.appConnectors.Len())) var connectors set.Slice[types.UID] connectors.AddSlice(a.exitNodes.Slice()) connectors.AddSlice(a.subnetRouters.Slice()) + connectors.AddSlice(a.appConnectors.Slice()) gaugeConnectorResources.Set(int64(connectors.Len())) _, err := a.ssr.Provision(ctx, logger, sts) @@ -248,12 +268,15 @@ func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger a.mu.Lock() a.subnetRouters.Remove(cn.UID) a.exitNodes.Remove(cn.UID) + a.appConnectors.Remove(cn.UID) a.mu.Unlock() gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len())) gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len())) + gaugeConnectorAppConnectorResources.Set(int64(a.appConnectors.Len())) var connectors set.Slice[types.UID] connectors.AddSlice(a.exitNodes.Slice()) connectors.AddSlice(a.subnetRouters.Slice()) + connectors.AddSlice(a.appConnectors.Slice()) gaugeConnectorResources.Set(int64(connectors.Len())) return true, nil } @@ -262,8 +285,14 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error { // Connector fields are already validated at apply time with CEL validation // on custom resource fields. The checks here are a backup in case the // CEL validation breaks without us noticing. - if !(cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) { - return errors.New("invalid spec: a Connector must expose subnet routes or act as an exit node (or both)") + if cn.Spec.SubnetRouter == nil && !cn.Spec.ExitNode && cn.Spec.AppConnector == nil { + return errors.New("invalid spec: a Connector must be configured as at least one of subnet router, exit node or app connector") + } + if (cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) && cn.Spec.AppConnector != nil { + return errors.New("invalid spec: a Connector that is configured as an app connector must not be also configured as a subnet router or exit node") + } + if cn.Spec.AppConnector != nil { + return validateAppConnector(cn.Spec.AppConnector) } if cn.Spec.SubnetRouter == nil { return nil @@ -272,19 +301,27 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error { } func validateSubnetRouter(sb *tsapi.SubnetRouter) error { - if len(sb.AdvertiseRoutes) < 1 { + if len(sb.AdvertiseRoutes) == 0 { return errors.New("invalid subnet router spec: no routes defined") } - var err error - for _, route := range sb.AdvertiseRoutes { + return validateRoutes(sb.AdvertiseRoutes) +} + +func validateAppConnector(ac *tsapi.AppConnector) error { + return validateRoutes(ac.Routes) +} + +func validateRoutes(routes tsapi.Routes) error { + var errs []error + for _, route := range routes { pfx, e := netip.ParsePrefix(string(route)) if e != nil { - err = errors.Wrap(err, fmt.Sprintf("route %s is invalid: %v", route, err)) + errs = append(errs, fmt.Errorf("route %v is invalid: %v", route, e)) continue } if pfx.Masked() != pfx { - err = errors.Wrap(err, fmt.Sprintf("route %s has non-address bits set; expected %s", pfx, pfx.Masked())) + errs = append(errs, fmt.Errorf("route %s has non-address bits set; expected %s", pfx, pfx.Masked())) } } - return err + return errors.Join(errs...) } diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index a4ba90d3d..7cdd83115 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -8,12 +8,14 @@ package main import ( "context" "testing" + "time" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client/fake" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -296,3 +298,100 @@ func TestConnectorWithProxyClass(t *testing.T) { expectReconciled(t, cr, "", "test") expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) } + +func TestConnectorWithAppConnector(t *testing.T) { + // Setup + cn := &tsapi.Connector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: types.UID("1234-UID"), + }, + TypeMeta: metav1.TypeMeta{ + Kind: tsapi.ConnectorKind, + APIVersion: "tailscale.io/v1alpha1", + }, + Spec: tsapi.ConnectorSpec{ + AppConnector: &tsapi.AppConnector{}, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(cn). + WithStatusSubresource(cn). + Build() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + fr := record.NewFakeRecorder(1) + cr := &ConnectorReconciler{ + Client: fc, + clock: cl, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + recorder: fr, + } + + // 1. Connector with app connnector is created and becomes ready + expectReconciled(t, cr, "", "test") + fullName, shortName := findGenName(t, fc, "", "test", "connector") + opts := configOpts{ + stsName: shortName, + secretName: fullName, + parentType: "connector", + hostname: "test-connector", + app: kubetypes.AppConnector, + isAppConnector: true, + } + expectEqual(t, fc, expectedSecret(t, fc, opts), nil) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + // Connector's ready condition should be set to true + + cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") + cn.Status.IsAppConnector = true + cn.Status.Conditions = []metav1.Condition{{ + Type: string(tsapi.ConnectorReady), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + Reason: reasonConnectorCreated, + Message: reasonConnectorCreated, + }} + expectEqual(t, fc, cn, nil) + + // 2. Connector with invalid app connector routes has status set to invalid + mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { + conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + }) + cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + expectReconciled(t, cr, "", "test") + cn.Status.Conditions = []metav1.Condition{{ + Type: string(tsapi.ConnectorReady), + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + Reason: reasonConnectorInvalid, + Message: "Connector is invalid: route 1.2.3.4/5 has non-address bits set; expected 0.0.0.0/5", + }} + expectEqual(t, fc, cn, nil) + + // 3. Connector with valid app connnector routes becomes ready + mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { + conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + }) + cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + cn.Status.Conditions = []metav1.Condition{{ + Type: string(tsapi.ConnectorReady), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + Reason: reasonConnectorCreated, + Message: reasonConnectorCreated, + }} + expectReconciled(t, cr, "", "test") +} diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index 9614f74e6..4434c1283 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -24,6 +24,10 @@ spec: jsonPath: .status.isExitNode name: IsExitNode type: string + - description: Whether this Connector instance is an app connector. + jsonPath: .status.isAppConnector + name: IsAppConnector + type: string - description: Status of the deployed Connector resources. jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason name: Status @@ -66,10 +70,40 @@ spec: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status type: object properties: + appConnector: + description: |- + AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is + configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the + Connector does not act as an app connector. + Note that you will need to manually configure the permissions and the domains for the app connector via the + Admin panel. + Note also that the main tested and supported use case of this config option is to deploy an app connector on + Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose + cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have + tested or optimised for. + If you are using the app connector to access SaaS applications because you need a predictable egress IP that + can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows + via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT + device with a static IP address. + https://tailscale.com/kb/1281/app-connectors + type: object + properties: + routes: + description: |- + Routes are optional preconfigured routes for the domains routed via the app connector. + If not set, routes for the domains will be discovered dynamically. + If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may + also dynamically discover other routes. + https://tailscale.com/kb/1332/apps-best-practices#preconfiguration + type: array + minItems: 1 + items: + type: string + format: cidr exitNode: description: |- - ExitNode defines whether the Connector node should act as a - Tailscale exit node. Defaults to false. + ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false. + This field is mutually exclusive with the appConnector field. https://tailscale.com/kb/1103/exit-nodes type: boolean hostname: @@ -90,9 +124,11 @@ spec: type: string subnetRouter: description: |- - SubnetRouter defines subnet routes that the Connector node should - expose to tailnet. If unset, none are exposed. + SubnetRouter defines subnet routes that the Connector device should + expose to tailnet as a Tailscale subnet router. https://tailscale.com/kb/1019/subnets/ + If this field is unset, the device does not get configured as a Tailscale subnet router. + This field is mutually exclusive with the appConnector field. type: object required: - advertiseRoutes @@ -125,8 +161,10 @@ spec: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ x-kubernetes-validations: - - rule: has(self.subnetRouter) || self.exitNode == true - message: A Connector needs to be either an exit node or a subnet router, or both. + - rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) + message: A Connector needs to have at least one of exit node, subnet router or app connector configured. + - rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' + message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -200,6 +238,9 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + isAppConnector: + description: IsAppConnector is set to true if the Connector acts as an app connector. + type: boolean isExitNode: description: IsExitNode is set to true if the Connector acts as an exit node. type: boolean diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 203a67066..9d8e9faf6 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -53,6 +53,10 @@ spec: jsonPath: .status.isExitNode name: IsExitNode type: string + - description: Whether this Connector instance is an app connector. + jsonPath: .status.isAppConnector + name: IsAppConnector + type: string - description: Status of the deployed Connector resources. jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason name: Status @@ -91,10 +95,40 @@ spec: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: + appConnector: + description: |- + AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is + configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the + Connector does not act as an app connector. + Note that you will need to manually configure the permissions and the domains for the app connector via the + Admin panel. + Note also that the main tested and supported use case of this config option is to deploy an app connector on + Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose + cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have + tested or optimised for. + If you are using the app connector to access SaaS applications because you need a predictable egress IP that + can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows + via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT + device with a static IP address. + https://tailscale.com/kb/1281/app-connectors + properties: + routes: + description: |- + Routes are optional preconfigured routes for the domains routed via the app connector. + If not set, routes for the domains will be discovered dynamically. + If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may + also dynamically discover other routes. + https://tailscale.com/kb/1332/apps-best-practices#preconfiguration + items: + format: cidr + type: string + minItems: 1 + type: array + type: object exitNode: description: |- - ExitNode defines whether the Connector node should act as a - Tailscale exit node. Defaults to false. + ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false. + This field is mutually exclusive with the appConnector field. https://tailscale.com/kb/1103/exit-nodes type: boolean hostname: @@ -115,9 +149,11 @@ spec: type: string subnetRouter: description: |- - SubnetRouter defines subnet routes that the Connector node should - expose to tailnet. If unset, none are exposed. + SubnetRouter defines subnet routes that the Connector device should + expose to tailnet as a Tailscale subnet router. https://tailscale.com/kb/1019/subnets/ + If this field is unset, the device does not get configured as a Tailscale subnet router. + This field is mutually exclusive with the appConnector field. properties: advertiseRoutes: description: |- @@ -151,8 +187,10 @@ spec: type: array type: object x-kubernetes-validations: - - message: A Connector needs to be either an exit node or a subnet router, or both. - rule: has(self.subnetRouter) || self.exitNode == true + - message: A Connector needs to have at least one of exit node, subnet router or app connector configured. + rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) + - message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. + rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -225,6 +263,9 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + isAppConnector: + description: IsAppConnector is set to true if the Connector acts as an app connector. + type: boolean isExitNode: description: IsExitNode is set to true if the Connector acts as an exit node. type: boolean diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index a440fafb5..cc9927645 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1388,7 +1388,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", - confFileHash: "e09bededa0379920141cbd0b0dbdf9b8b66545877f9e8397423f5ce3e1ba439e", + confFileHash: "362360188dac62bca8013c8134929fed8efd84b1f410c00873d14a05709b5647", app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), nil) @@ -1399,7 +1399,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { mak.Set(&svc.Annotations, AnnotationHostname, "another-test") }) o.hostname = "another-test" - o.confFileHash = "5d754cf55463135ee34aa9821f2fd8483b53eb0570c3740c84a086304f427684" + o.confFileHash = "20db57cfabc3fc6490f6bb1dc85994e61d255cdfa2a56abb0141736e59f263ef" expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedSTS(t, fc, o), nil) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index e89b9c930..b6467b798 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -132,10 +132,13 @@ type tailscaleSTSConfig struct { } type connector struct { - // routes is a list of subnet routes that this Connector should expose. + // routes is a list of routes that this Connector should advertise either as a subnet router or as an app + // connector. routes string // isExitNode defines whether this Connector should act as an exit node. isExitNode bool + // isAppConnector defines whether this Connector should act as an app connector. + isAppConnector bool } type tsnetServer interface { CertDomains() []string @@ -674,7 +677,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, } if stsCfg != nil && pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable { if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy { - enableMetrics(ss, pc) + enableMetrics(ss) } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy { // TODO (irbekrm): fix this // For Ingress proxies that have been configured with @@ -763,7 +766,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, return ss } -func enableMetrics(ss *appsv1.StatefulSet, pc *tsapi.ProxyClass) { +func enableMetrics(ss *appsv1.StatefulSet) { for i, c := range ss.Spec.Template.Spec.Containers { if c.Name == "tailscale" { // Serve metrics on on :9001/debug/metrics. If @@ -803,11 +806,13 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co Locked: "false", Hostname: &stsC.Hostname, NoStatefulFiltering: "false", + AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } // For egress proxies only, we need to ensure that stateful filtering is // not in place so that traffic from cluster can be forwarded via // Tailscale IPs. + // TODO (irbekrm): set it to true always as this is now the default in core. if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" { conf.NoStatefulFiltering = "true" } @@ -817,6 +822,9 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co return nil, fmt.Errorf("error calculating routes: %w", err) } conf.AdvertiseRoutes = routes + if stsC.Connector.isAppConnector { + conf.AppConnector.Advertise = true + } } if shouldAcceptRoutes(stsC.ProxyClass) { conf.AcceptRoutes = "true" @@ -831,9 +839,15 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co } conf.AuthKey = key } + capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) + capVerConfigs[107] = *conf + + // AppConnector config option is only understood by clients of capver 107 and newer. + conf.AppConnector = nil capVerConfigs[95] = *conf - // legacy config should not contain NoStatefulFiltering field. + + // StatefulFiltering is only understood by clients of capver 95 and newer. conf.NoStatefulFiltering.Clear() capVerConfigs[94] = *conf return capVerConfigs, nil diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 6b6297cbd..4b25d103c 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -48,6 +48,7 @@ type configOpts struct { clusterTargetDNS string subnetRoutes string isExitNode bool + isAppConnector bool confFileHash string serveConfig *ipn.ServeConfig shouldEnableForwardingClusterTrafficViaIngress bool @@ -356,6 +357,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec Locked: "false", AuthKey: ptr.To("secret-authkey"), AcceptRoutes: "false", + AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } if opts.proxyClass != "" { t.Logf("applying configuration from ProxyClass %s", opts.proxyClass) @@ -370,6 +372,9 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec if opts.shouldRemoveAuthKey { conf.AuthKey = nil } + if opts.isAppConnector { + conf.AppConnector = &ipn.AppConnectorPrefs{Advertise: true} + } var routes []netip.Prefix if opts.subnetRoutes != "" || opts.isExitNode { r := opts.subnetRoutes @@ -384,22 +389,29 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec routes = append(routes, prefix) } } - conf.AdvertiseRoutes = routes - b, err := json.Marshal(conf) - if err != nil { - t.Fatalf("error marshalling tailscaled config") - } if opts.tailnetTargetFQDN != "" || opts.tailnetTargetIP != "" { conf.NoStatefulFiltering = "true" } else { conf.NoStatefulFiltering = "false" } + conf.AdvertiseRoutes = routes + bnn, err := json.Marshal(conf) + if err != nil { + t.Fatalf("error marshalling tailscaled config") + } + conf.AppConnector = nil bn, err := json.Marshal(conf) if err != nil { t.Fatalf("error marshalling tailscaled config") } + conf.NoStatefulFiltering.Clear() + b, err := json.Marshal(conf) + if err != nil { + t.Fatalf("error marshalling tailscaled config") + } mak.Set(&s.StringData, "tailscaled", string(b)) mak.Set(&s.StringData, "cap-95.hujson", string(bn)) + mak.Set(&s.StringData, "cap-107.hujson", string(bnn)) labels := map[string]string{ "tailscale.com/managed": "true", "tailscale.com/parent-resource": "test", @@ -674,5 +686,17 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { } mak.Set(&secret.StringData, "cap-95.hujson", string(b)) } + if len(secret.StringData["cap-107.hujson"]) != 0 { + conf := &ipn.ConfigVAlpha{} + if err := json.Unmarshal([]byte(secret.StringData["cap-107.hujson"]), conf); err != nil { + t.Fatalf("error umarshalling 'cap-107.hujson' contents: %v", err) + } + conf.AuthKey = nil + b, err := json.Marshal(conf) + if err != nil { + t.Fatalf("error marshalling 'cap-107.huson' contents: %v", err) + } + mak.Set(&secret.StringData, "cap-107.hujson", string(b)) + } } } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index dae969516..7b1aca314 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -21,6 +21,22 @@ +#### AppConnector + + + +AppConnector defines a Tailscale app connector node configured via Connector. + + + +_Appears in:_ +- [ConnectorSpec](#connectorspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `routes` _[Routes](#routes)_ | Routes are optional preconfigured routes for the domains routed via the app connector.
If not set, routes for the domains will be discovered dynamically.
If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
also dynamically discover other routes.
https://tailscale.com/kb/1332/apps-best-practices#preconfiguration | | Format: cidr
MinItems: 1
Type: string
| + + #### Connector @@ -86,8 +102,9 @@ _Appears in:_ | `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
Defaults to [tag:k8s].
To autoapprove the subnet routes or exit node defined by a Connector,
you can configure Tailscale ACLs to give these tags the necessary
permissions.
See https://tailscale.com/kb/1337/acl-syntax#autoapprovers.
If you specify custom tags here, you must also make the operator an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Connector node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
dashes, it must not start or end with a dash and must be between 2
and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
Type: string
| | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
contains configuration options that should be applied to the
resources created for this Connector. If unset, the operator will
create resources with the default configuration. | | | -| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector node should
expose to tailnet. If unset, none are exposed.
https://tailscale.com/kb/1019/subnets/ | | | -| `exitNode` _boolean_ | ExitNode defines whether the Connector node should act as a
Tailscale exit node. Defaults to false.
https://tailscale.com/kb/1103/exit-nodes | | | +| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector device should
expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
If this field is unset, the device does not get configured as a Tailscale subnet router.
This field is mutually exclusive with the appConnector field. | | | +| `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
Connector does not act as an app connector.
Note that you will need to manually configure the permissions and the domains for the app connector via the
Admin panel.
Note also that the main tested and supported use case of this config option is to deploy an app connector on
Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
tested or optimised for.
If you are using the app connector to access SaaS applications because you need a predictable egress IP that
can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
device with a static IP address.
https://tailscale.com/kb/1281/app-connectors | | | +| `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes | | | #### ConnectorStatus @@ -106,6 +123,7 @@ _Appears in:_ | `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Connector.
Known condition types are `ConnectorReady`. | | | | `subnetRoutes` _string_ | SubnetRoutes are the routes currently exposed to tailnet via this
Connector instance. | | | | `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | | +| `isAppConnector` _boolean_ | IsAppConnector is set to true if the Connector acts as an app connector. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the Connector node. | | | | `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | @@ -746,6 +764,7 @@ _Validation:_ - Type: string _Appears in:_ +- [AppConnector](#appconnector) - [SubnetRouter](#subnetrouter) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 27afd0838..022258485 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -22,6 +22,7 @@ var ConnectorKind = "Connector" // +kubebuilder:resource:scope=Cluster,shortName=cn // +kubebuilder:printcolumn:name="SubnetRoutes",type="string",JSONPath=`.status.subnetRoutes`,description="CIDR ranges exposed to tailnet by a subnet router defined via this Connector instance." // +kubebuilder:printcolumn:name="IsExitNode",type="string",JSONPath=`.status.isExitNode`,description="Whether this Connector instance defines an exit node." +// +kubebuilder:printcolumn:name="IsAppConnector",type="string",JSONPath=`.status.isAppConnector`,description="Whether this Connector instance is an app connector." // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ConnectorReady")].reason`,description="Status of the deployed Connector resources." // Connector defines a Tailscale node that will be deployed in the cluster. The @@ -55,7 +56,8 @@ type ConnectorList struct { } // ConnectorSpec describes a Tailscale node to be deployed in the cluster. -// +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || self.exitNode == true",message="A Connector needs to be either an exit node or a subnet router, or both." +// +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)",message="A Connector needs to have at least one of exit node, subnet router or app connector configured." +// +kubebuilder:validation:XValidation:rule="!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))",message="The appConnector field is mutually exclusive with exitNode and subnetRouter fields." type ConnectorSpec struct { // Tags that the Tailscale node will be tagged with. // Defaults to [tag:k8s]. @@ -82,13 +84,31 @@ type ConnectorSpec struct { // create resources with the default configuration. // +optional ProxyClass string `json:"proxyClass,omitempty"` - // SubnetRouter defines subnet routes that the Connector node should - // expose to tailnet. If unset, none are exposed. + // SubnetRouter defines subnet routes that the Connector device should + // expose to tailnet as a Tailscale subnet router. // https://tailscale.com/kb/1019/subnets/ + // If this field is unset, the device does not get configured as a Tailscale subnet router. + // This field is mutually exclusive with the appConnector field. // +optional - SubnetRouter *SubnetRouter `json:"subnetRouter"` - // ExitNode defines whether the Connector node should act as a - // Tailscale exit node. Defaults to false. + SubnetRouter *SubnetRouter `json:"subnetRouter,omitempty"` + // AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is + // configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the + // Connector does not act as an app connector. + // Note that you will need to manually configure the permissions and the domains for the app connector via the + // Admin panel. + // Note also that the main tested and supported use case of this config option is to deploy an app connector on + // Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose + // cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have + // tested or optimised for. + // If you are using the app connector to access SaaS applications because you need a predictable egress IP that + // can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows + // via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT + // device with a static IP address. + // https://tailscale.com/kb/1281/app-connectors + // +optional + AppConnector *AppConnector `json:"appConnector,omitempty"` + // ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false. + // This field is mutually exclusive with the appConnector field. // https://tailscale.com/kb/1103/exit-nodes // +optional ExitNode bool `json:"exitNode"` @@ -104,6 +124,17 @@ type SubnetRouter struct { AdvertiseRoutes Routes `json:"advertiseRoutes"` } +// AppConnector defines a Tailscale app connector node configured via Connector. +type AppConnector struct { + // Routes are optional preconfigured routes for the domains routed via the app connector. + // If not set, routes for the domains will be discovered dynamically. + // If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may + // also dynamically discover other routes. + // https://tailscale.com/kb/1332/apps-best-practices#preconfiguration + // +optional + Routes Routes `json:"routes"` +} + type Tags []Tag func (tags Tags) Stringify() []string { @@ -156,6 +187,9 @@ type ConnectorStatus struct { // IsExitNode is set to true if the Connector acts as an exit node. // +optional IsExitNode bool `json:"isExitNode"` + // IsAppConnector is set to true if the Connector acts as an app connector. + // +optional + IsAppConnector bool `json:"isAppConnector"` // TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) // assigned to the Connector node. // +optional diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index f53165b88..c2f69dc04 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -13,6 +13,26 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppConnector) DeepCopyInto(out *AppConnector) { + *out = *in + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make(Routes, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppConnector. +func (in *AppConnector) DeepCopy() *AppConnector { + if in == nil { + return nil + } + out := new(AppConnector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Connector) DeepCopyInto(out *Connector) { *out = *in @@ -85,6 +105,11 @@ func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) { *out = new(SubnetRouter) (*in).DeepCopyInto(*out) } + if in.AppConnector != nil { + in, out := &in.AppConnector, &out.AppConnector + *out = new(AppConnector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec. diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/metrics.go index 63078385a..63325182d 100644 --- a/kube/kubetypes/metrics.go +++ b/kube/kubetypes/metrics.go @@ -21,6 +21,7 @@ const ( MetricConnectorResourceCount = "k8s_connector_resources" MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources" MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources" + MetricConnectorWithAppConnectorCount = "k8s_connector_appconnector_resources" MetricNameserverCount = "k8s_nameserver_resources" MetricRecorderCount = "k8s_recorder_resources" MetricEgressServiceCount = "k8s_egress_service_resources" From 00be1761b76671635b478a20187d83b166991924 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 11 Nov 2024 08:48:09 -0800 Subject: [PATCH 0128/1708] util/codegen: treat unique.Handle as an opaque value type It doesn't need a Clone method, like a time.Time, etc. And then, because Go 1.23+ uses unique.Handle internally for the netip package types, we can remove those special cases. Updates #14058 (pulled out from that PR) Updates tailscale/corp#24485 Change-Id: Iac3548a9417ccda5987f98e0305745a6e178b375 Signed-off-by: Brad Fitzpatrick --- util/codegen/codegen.go | 11 ++++++++--- util/codegen/codegen_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index 2f7781b68..1b3af10e0 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -277,11 +277,16 @@ func IsInvalid(t types.Type) bool { // It has special handling for some types that contain pointers // that we know are free from memory aliasing/mutation concerns. func ContainsPointers(typ types.Type) bool { - switch typ.String() { + s := typ.String() + switch s { case "time.Time": - // time.Time contains a pointer that does not need copying + // time.Time contains a pointer that does not need cloning. return false - case "inet.af/netip.Addr", "net/netip.Addr", "net/netip.Prefix", "net/netip.AddrPort": + case "inet.af/netip.Addr": + return false + } + if strings.HasPrefix(s, "unique.Handle[") { + // unique.Handle contains a pointer that does not need cloning. return false } switch ft := typ.Underlying().(type) { diff --git a/util/codegen/codegen_test.go b/util/codegen/codegen_test.go index 28ddaed2b..74715eeca 100644 --- a/util/codegen/codegen_test.go +++ b/util/codegen/codegen_test.go @@ -10,6 +10,8 @@ import ( "strings" "sync" "testing" + "time" + "unique" "unsafe" "golang.org/x/exp/constraints" @@ -84,6 +86,16 @@ type PointerUnionParam[T netip.Prefix | BasicType | IntPtr] struct { V T } +type StructWithUniqueHandle struct{ _ unique.Handle[[32]byte] } + +type StructWithTime struct{ _ time.Time } + +type StructWithNetipTypes struct { + _ netip.Addr + _ netip.AddrPort + _ netip.Prefix +} + type Interface interface { Method() } @@ -161,6 +173,18 @@ func TestGenericContainsPointers(t *testing.T) { typ: "PointerUnionParam", wantPointer: true, }, + { + typ: "StructWithUniqueHandle", + wantPointer: false, + }, + { + typ: "StructWithTime", + wantPointer: false, + }, + { + typ: "StructWithNetipTypes", + wantPointer: false, + }, } for _, tt := range tests { From 4e0fc037e67a86a0734f025e041ba7f04f4cc3d4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 11 Nov 2024 13:08:47 -0800 Subject: [PATCH 0129/1708] all: use iterators over slice views more This gets close to all of the remaining ones. Updates #12912 Change-Id: I9c672bbed2654a6c5cab31e0cbece6c107d8c6fa Signed-off-by: Brad Fitzpatrick --- cmd/tsconnect/wasm/wasm_js.go | 8 ++++---- ipn/ipnlocal/drive.go | 5 ++--- ipn/ipnlocal/local.go | 24 ++++++++++-------------- ipn/ipnlocal/local_test.go | 4 +--- ipn/ipnlocal/network-lock.go | 6 ++---- ipn/ipnlocal/serve.go | 3 +-- ipn/ipnlocal/web_client.go | 4 ++-- net/ipset/ipset.go | 8 ++++---- net/tsaddr/tsaddr.go | 11 +++++------ net/tsdial/dnsmap.go | 9 ++++----- tsnet/tsnet.go | 3 +-- types/netmap/netmap.go | 15 +++++++-------- util/set/slice.go | 4 ++-- wgengine/magicsock/debughttp.go | 3 +-- wgengine/magicsock/endpoint.go | 7 +++---- wgengine/magicsock/magicsock.go | 10 ++++------ wgengine/netstack/netstack.go | 6 ++---- wgengine/pendopen.go | 3 +-- wgengine/userspace.go | 9 ++++----- wgengine/wgcfg/nmcfg/nmcfg.go | 6 ++---- 20 files changed, 62 insertions(+), 86 deletions(-) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index d0bc991f2..4ea1cd897 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -272,8 +272,8 @@ func (i *jsIPN) run(jsCallbacks js.Value) { name = p.Hostinfo().Hostname() } addrs := make([]string, p.Addresses().Len()) - for i := range p.Addresses().Len() { - addrs[i] = p.Addresses().At(i).Addr().String() + for i, ap := range p.Addresses().All() { + addrs[i] = ap.Addr().String() } return jsNetMapPeerNode{ jsNetMapNode: jsNetMapNode{ @@ -589,8 +589,8 @@ func mapSlice[T any, M any](a []T, f func(T) M) []M { func mapSliceView[T any, M any](a views.Slice[T], f func(T) M) []M { n := make([]M, a.Len()) - for i := range a.Len() { - n[i] = f(a.At(i)) + for i, v := range a.All() { + n[i] = f(v) } return n } diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 98d563d87..fe3622ba4 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -354,9 +354,8 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // Check that the peer is allowed to share with us. addresses := peer.Addresses() - for i := range addresses.Len() { - addr := addresses.At(i) - capsMap := b.PeerCaps(addr.Addr()) + for _, p := range addresses.All() { + capsMap := b.PeerCaps(p.Addr()) if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) { return true } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 337fa3d2b..493762fcc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1811,8 +1811,7 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNod } for _, peer := range nm.Peers { - for i := range peer.Addresses().Len() { - addr := peer.Addresses().At(i) + for _, addr := range peer.Addresses().All() { if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP { continue } @@ -4997,8 +4996,8 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock case ipn.Running: var addrStrs []string addrs := netMap.GetAddresses() - for i := range addrs.Len() { - addrStrs = append(addrStrs, addrs.At(i).Addr().String()) + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) } systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) case ipn.NoState: @@ -6089,8 +6088,7 @@ func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error { func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) { svcs := peer.Hostinfo().Services() - for i := range svcs.Len() { - s := svcs.At(i) + for _, s := range svcs.All() { switch s.Proto { case tailcfg.PeerAPI4: p4 = s.Port @@ -6122,8 +6120,7 @@ func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string { var have4, have6 bool addrs := nm.GetAddresses() - for i := range addrs.Len() { - a := addrs.At(i) + for _, a := range addrs.All() { if !a.IsSingleIP() { continue } @@ -6145,10 +6142,9 @@ func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string { } func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { - for i := range n.Addresses().Len() { - a := n.Addresses().At(i) - if a.IsSingleIP() && pred(a.Addr()) { - return a.Addr() + for _, pfx := range n.Addresses().All() { + if pfx.IsSingleIP() && pred(pfx.Addr()) { + return pfx.Addr() } } return netip.Addr{} @@ -6378,8 +6374,8 @@ func peerCanProxyDNS(p tailcfg.NodeView) bool { // If p.Cap is not populated (e.g. older control server), then do the old // thing of searching through services. services := p.Hostinfo().Services() - for i := range services.Len() { - if s := services.At(i); s.Proto == tailcfg.PeerAPIDNS && s.Port >= 1 { + for _, s := range services.All() { + if s.Proto == tailcfg.PeerAPIDNS && s.Port >= 1 { return true } } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 433679dda..6dad2dba4 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -3041,12 +3041,10 @@ func deterministicNodeForTest(t testing.TB, want views.Slice[tailcfg.StableNodeI var ret tailcfg.NodeView gotIDs := make([]tailcfg.StableNodeID, got.Len()) - for i := range got.Len() { - nv := got.At(i) + for i, nv := range got.All() { if !nv.Valid() { t.Fatalf("invalid node at index %v", i) } - gotIDs[i] = nv.StableID() if nv.StableID() == use { ret = nv diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index d20bf94eb..bf14d339e 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -430,8 +430,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per } bootstrapStateID := fmt.Sprintf("%d:%d", genesis.State.StateID1, genesis.State.StateID2) - for i := range persist.DisallowedTKAStateIDs().Len() { - stateID := persist.DisallowedTKAStateIDs().At(i) + for _, stateID := range persist.DisallowedTKAStateIDs().All() { if stateID == bootstrapStateID { return fmt.Errorf("TKA with stateID of %q is disallowed on this node", stateID) } @@ -572,8 +571,7 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { TailscaleIPs: make([]netip.Addr, 0, p.Addresses().Len()), NodeKey: p.Key(), } - for i := range p.Addresses().Len() { - addr := p.Addresses().At(i) + for _, addr := range p.Addresses().All() { if addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.Addr()) { fp.TailscaleIPs = append(fp.TailscaleIPs, addr.Addr()) } diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 67d521f09..61bed0552 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -242,8 +242,7 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1 } addrs := nm.GetAddresses() - for i := range addrs.Len() { - a := addrs.At(i) + for _, a := range addrs.All() { for _, p := range ports { addrPort := netip.AddrPortFrom(a.Addr(), p) if _, ok := b.serveListeners[addrPort]; ok { diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index ccde9f01d..37fc31819 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -121,8 +121,8 @@ func (b *LocalBackend) updateWebClientListenersLocked() { } addrs := b.netMap.GetAddresses() - for i := range addrs.Len() { - addrPort := netip.AddrPortFrom(addrs.At(i).Addr(), webClientPort) + for _, pfx := range addrs.All() { + addrPort := netip.AddrPortFrom(pfx.Addr(), webClientPort) if _, ok := b.webClientListeners[addrPort]; ok { continue // already listening } diff --git a/net/ipset/ipset.go b/net/ipset/ipset.go index 622fd61d0..27c1e27ed 100644 --- a/net/ipset/ipset.go +++ b/net/ipset/ipset.go @@ -82,8 +82,8 @@ func NewContainsIPFunc(addrs views.Slice[netip.Prefix]) func(ip netip.Addr) bool pathForTest("bart") // Built a bart table. t := &bart.Table[struct{}]{} - for i := range addrs.Len() { - t.Insert(addrs.At(i), struct{}{}) + for _, p := range addrs.All() { + t.Insert(p, struct{}{}) } return bartLookup(t) } @@ -99,8 +99,8 @@ func NewContainsIPFunc(addrs views.Slice[netip.Prefix]) func(ip netip.Addr) bool // General case: pathForTest("ip-map") m := set.Set[netip.Addr]{} - for i := range addrs.Len() { - m.Add(addrs.At(i).Addr()) + for _, p := range addrs.All() { + m.Add(p.Addr()) } return ipInMap(m) } diff --git a/net/tsaddr/tsaddr.go b/net/tsaddr/tsaddr.go index 880695387..e7e0ba088 100644 --- a/net/tsaddr/tsaddr.go +++ b/net/tsaddr/tsaddr.go @@ -180,8 +180,7 @@ func PrefixIs6(p netip.Prefix) bool { return p.Addr().Is6() } // IPv6 /0 route. func ContainsExitRoutes(rr views.Slice[netip.Prefix]) bool { var v4, v6 bool - for i := range rr.Len() { - r := rr.At(i) + for _, r := range rr.All() { if r == allIPv4 { v4 = true } else if r == allIPv6 { @@ -194,8 +193,8 @@ func ContainsExitRoutes(rr views.Slice[netip.Prefix]) bool { // ContainsExitRoute reports whether rr contains at least one of IPv4 or // IPv6 /0 (exit) routes. func ContainsExitRoute(rr views.Slice[netip.Prefix]) bool { - for i := range rr.Len() { - if rr.At(i).Bits() == 0 { + for _, r := range rr.All() { + if r.Bits() == 0 { return true } } @@ -205,8 +204,8 @@ func ContainsExitRoute(rr views.Slice[netip.Prefix]) bool { // ContainsNonExitSubnetRoutes reports whether v contains Subnet // Routes other than ExitNode Routes. func ContainsNonExitSubnetRoutes(rr views.Slice[netip.Prefix]) bool { - for i := range rr.Len() { - if rr.At(i).Bits() != 0 { + for _, r := range rr.All() { + if r.Bits() != 0 { return true } } diff --git a/net/tsdial/dnsmap.go b/net/tsdial/dnsmap.go index f5d13861b..2ef1cb1f1 100644 --- a/net/tsdial/dnsmap.go +++ b/net/tsdial/dnsmap.go @@ -42,8 +42,8 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap { if dnsname.HasSuffix(nm.Name, suffix) { ret[canonMapKey(dnsname.TrimSuffix(nm.Name, suffix))] = ip } - for i := range addrs.Len() { - if addrs.At(i).Addr().Is4() { + for _, p := range addrs.All() { + if p.Addr().Is4() { have4 = true } } @@ -52,9 +52,8 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap { if p.Name() == "" { continue } - for i := range p.Addresses().Len() { - a := p.Addresses().At(i) - ip := a.Addr() + for _, pfx := range p.Addresses().All() { + ip := pfx.Addr() if ip.Is4() && !have4 { continue } diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 70084c103..34cab7385 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -433,8 +433,7 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return } addrs := nm.GetAddresses() - for i := range addrs.Len() { - addr := addrs.At(i) + for _, addr := range addrs.All() { ip := addr.Addr() if ip.Is6() { ip6 = ip diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 5e0622922..94e872a55 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -279,15 +279,14 @@ func (a *NetworkMap) equalConciseHeader(b *NetworkMap) bool { // in nodeConciseEqual in sync. func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) { aip := make([]string, p.AllowedIPs().Len()) - for i := range aip { - a := p.AllowedIPs().At(i) - s := strings.TrimSuffix(fmt.Sprint(a), "/32") + for i, a := range p.AllowedIPs().All() { + s := strings.TrimSuffix(a.String(), "/32") aip[i] = s } - ep := make([]string, p.Endpoints().Len()) - for i := range ep { - e := p.Endpoints().At(i).String() + epStrs := make([]string, p.Endpoints().Len()) + for i, ep := range p.Endpoints().All() { + e := ep.String() // Align vertically on the ':' between IP and port colon := strings.IndexByte(e, ':') spaces := 0 @@ -295,7 +294,7 @@ func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) { spaces++ colon-- } - ep[i] = fmt.Sprintf("%21v", e+strings.Repeat(" ", spaces)) + epStrs[i] = fmt.Sprintf("%21v", e+strings.Repeat(" ", spaces)) } derp := p.DERP() @@ -316,7 +315,7 @@ func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) { discoShort, derp, strings.Join(aip, " "), - strings.Join(ep, " ")) + strings.Join(epStrs, " ")) } // nodeConciseEqual reports whether a and b are equal for the fields accessed by printPeerConcise. diff --git a/util/set/slice.go b/util/set/slice.go index 38551aee1..2fc65b82d 100644 --- a/util/set/slice.go +++ b/util/set/slice.go @@ -67,7 +67,7 @@ func (ss *Slice[T]) Add(vs ...T) { // AddSlice adds all elements in vs to the set. func (ss *Slice[T]) AddSlice(vs views.Slice[T]) { - for i := range vs.Len() { - ss.Add(vs.At(i)) + for _, v := range vs.All() { + ss.Add(v) } } diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index 6c07b0d5e..aa109c242 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -102,8 +102,7 @@ func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { sort.Slice(ent, func(i, j int) bool { return ent[i].pub.Less(ent[j].pub) }) peers := map[key.NodePublic]tailcfg.NodeView{} - for i := range c.peers.Len() { - p := c.peers.At(i) + for _, p := range c.peers.All() { peers[p.Key()] = p } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 5e0ada617..bbba3181c 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "iter" "math" "math/rand/v2" "net" @@ -1384,20 +1385,18 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p } func (de *endpoint) setEndpointsLocked(eps interface { - Len() int - At(i int) netip.AddrPort + All() iter.Seq2[int, netip.AddrPort] }) { for _, st := range de.endpointState { st.index = indexSentinelDeleted // assume deleted until updated in next loop } var newIpps []netip.AddrPort - for i := range eps.Len() { + for i, ipp := range eps.All() { if i > math.MaxInt16 { // Seems unlikely. break } - ipp := eps.At(i) if !ipp.IsValid() { de.c.logf("magicsock: bogus netmap endpoint from %v", eps) continue diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a9c6fa070..c361608ad 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1120,8 +1120,8 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro // re-run. eps = c.endpointTracker.update(time.Now(), eps) - for i := range c.staticEndpoints.Len() { - addAddr(c.staticEndpoints.At(i), tailcfg.EndpointExplicitConf) + for _, ep := range c.staticEndpoints.All() { + addAddr(ep, tailcfg.EndpointExplicitConf) } if localAddr := c.pconn4.LocalAddr(); localAddr.IP.IsUnspecified() { @@ -2360,16 +2360,14 @@ func (c *Conn) logEndpointCreated(n tailcfg.NodeView) { fmt.Fprintf(w, "derp=%v%s ", regionID, code) } - for i := range n.AllowedIPs().Len() { - a := n.AllowedIPs().At(i) + for _, a := range n.AllowedIPs().All() { if a.IsSingleIP() { fmt.Fprintf(w, "aip=%v ", a.Addr()) } else { fmt.Fprintf(w, "aip=%v ", a) } } - for i := range n.Endpoints().Len() { - ep := n.Endpoints().At(i) + for _, ep := range n.Endpoints().All() { fmt.Fprintf(w, "ep=%v ", ep) } })) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 280f4b7bb..20eac06e6 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -643,13 +643,11 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { newPfx := make(map[netip.Prefix]bool) if selfNode.Valid() { - for i := range selfNode.Addresses().Len() { - p := selfNode.Addresses().At(i) + for _, p := range selfNode.Addresses().All() { newPfx[p] = true } if ns.ProcessSubnets { - for i := range selfNode.AllowedIPs().Len() { - p := selfNode.AllowedIPs().At(i) + for _, p := range selfNode.AllowedIPs().All() { newPfx[p] = true } } diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 340c7e0f3..7db07c685 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -207,8 +207,7 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) { ps, found := e.getPeerStatusLite(n.Key()) if !found { onlyZeroRoute := true // whether peerForIP returned n only because its /0 route matched - for i := range n.AllowedIPs().Len() { - r := n.AllowedIPs().At(i) + for _, r := range n.AllowedIPs().All() { if r.Bits() != 0 && r.Contains(flow.DstAddr()) { onlyZeroRoute = false break diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 2dd0c4cd5..81f8000e0 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -852,8 +852,7 @@ func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic, // hasOverlap checks if there is a IPPrefix which is common amongst the two // provided slices. func hasOverlap(aips, rips views.Slice[netip.Prefix]) bool { - for i := range aips.Len() { - aip := aips.At(i) + for _, aip := range aips.All() { if views.SliceContains(rips, aip) { return true } @@ -1329,9 +1328,9 @@ func (e *userspaceEngine) mySelfIPMatchingFamily(dst netip.Addr) (src netip.Addr if addrs.Len() == 0 { return zero, errors.New("no self address in netmap") } - for i := range addrs.Len() { - if a := addrs.At(i); a.IsSingleIP() && a.Addr().BitLen() == dst.BitLen() { - return a.Addr(), nil + for _, p := range addrs.All() { + if p.IsSingleIP() && p.Addr().BitLen() == dst.BitLen() { + return p.Addr(), nil } } return zero, errors.New("no self address in netmap matching address family") diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index d156f7fcb..e7d5edf15 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -40,8 +40,7 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { if !cidr.IsSingleIP() { return true } - for i := range node.Addresses().Len() { - selfCIDR := node.Addresses().At(i) + for _, selfCIDR := range node.Addresses().All() { if cidr == selfCIDR { return false } @@ -110,8 +109,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer() cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer() cpeer.IsJailed = peer.IsJailed() - for i := range peer.AllowedIPs().Len() { - allowedIP := peer.AllowedIPs().At(i) + for _, allowedIP := range peer.AllowedIPs().All() { if allowedIP.Bits() == 0 && peer.StableID() != exitNode { if didExitNodeWarn { // Don't log about both the IPv4 /0 and IPv6 /0. From d8a3683fdfc21e0dfe41f47b72c56230296d383b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 12 Nov 2024 14:18:19 +0000 Subject: [PATCH 0130/1708] cmd/k8s-operator: restart ProxyGroup pods less (#14045) We currently annotate pods with a hash of the tailscaled config so that we can trigger pod restarts whenever it changes. However, the hash updates more frequently than is necessary causing more restarts than is necessary. This commit removes two causes; scaling up/down and removing the auth key after pods have initially authed to control. However, note that pods will still restart on scale-up/down because of the updated set of volumes mounted into each pod. Hopefully we can fix that in a planned follow-up PR. Updates #13406 Signed-off-by: Tom Proctor --- cmd/k8s-operator/proxygroup.go | 40 ++++++++++++++++------- cmd/k8s-operator/proxygroup_specs.go | 4 +++ cmd/k8s-operator/proxygroup_test.go | 48 ++++++++++++++++++++-------- 3 files changed, 66 insertions(+), 26 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 7dad9e573..6b7672466 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -353,7 +353,7 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) { logger := r.logger(pg.Name) - var allConfigs []tailscaledConfigs + var configSHA256Sum string for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -389,7 +389,6 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if err != nil { return "", fmt.Errorf("error creating tailscaled config: %w", err) } - allConfigs = append(allConfigs, configs) for cap, cfg := range configs { cfgJSON, err := json.Marshal(cfg) @@ -399,6 +398,32 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p mak.Set(&cfgSecret.StringData, tsoperator.TailscaledConfigFileName(cap), string(cfgJSON)) } + // The config sha256 sum is a value for a hash annotation used to trigger + // pod restarts when tailscaled config changes. Any config changes apply + // to all replicas, so it is sufficient to only hash the config for the + // first replica. + // + // In future, we're aiming to eliminate restarts altogether and have + // pods dynamically reload their config when it changes. + if i == 0 { + sum := sha256.New() + for _, cfg := range configs { + // Zero out the auth key so it doesn't affect the sha256 hash when we + // remove it from the config after the pods have all authed. Otherwise + // all the pods will need to restart immediately after authing. + cfg.AuthKey = nil + b, err := json.Marshal(cfg) + if err != nil { + return "", err + } + if _, err := sum.Write(b); err != nil { + return "", err + } + } + + configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil)) + } + if existingCfgSecret != nil { logger.Debugf("patching the existing ProxyGroup config Secret %s", cfgSecret.Name) if err := r.Patch(ctx, cfgSecret, client.MergeFrom(existingCfgSecret)); err != nil { @@ -412,16 +437,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - sum := sha256.New() - b, err := json.Marshal(allConfigs) - if err != nil { - return "", err - } - if _, err := sum.Write(b); err != nil { - return "", err - } - - return fmt.Sprintf("%x", sum.Sum(nil)), nil + return configSHA256Sum, nil } func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index f9d1ea52b..27fd9ef71 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -93,6 +93,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa c.Image = image c.VolumeMounts = func() []corev1.VolumeMount { var mounts []corev1.VolumeMount + + // TODO(tomhjp): Read config directly from the secret instead. The + // mounts change on scaling up/down which causes unnecessary restarts + // for pods that haven't meaningfully changed. for i := range pgReplicas(pg) { mounts = append(mounts, corev1.VolumeMount{ Name: fmt.Sprintf("tailscaledconfig-%d", i), diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 445db7537..23f50cc7a 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -35,6 +35,8 @@ var defaultProxyClassAnnotations = map[string]string{ } func TestProxyGroup(t *testing.T) { + const initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" + pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ Name: "default-pc", @@ -80,6 +82,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, false, "") }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -100,10 +103,11 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash) if expected := 1; reconciler.proxyGroups.Len() != expected { t.Fatalf("expected %d recorders, got %d", expected, reconciler.proxyGroups.Len()) } - expectProxyGroupResources(t, fc, pg, true) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash) keyReq := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -135,7 +139,7 @@ func TestProxyGroup(t *testing.T) { } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) expectEqual(t, fc, pg, nil) - expectProxyGroupResources(t, fc, pg, true) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash) }) t.Run("scale_up_to_3", func(t *testing.T) { @@ -146,6 +150,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) @@ -155,7 +160,7 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }) expectEqual(t, fc, pg, nil) - expectProxyGroupResources(t, fc, pg, true) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash) }) t.Run("scale_down_to_1", func(t *testing.T) { @@ -163,11 +168,26 @@ func TestProxyGroup(t *testing.T) { mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { p.Spec = pg.Spec }) + expectReconciled(t, reconciler, "", pg.Name) + pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + }) + + t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) { + pc.Spec.TailscaleConfig = &tsapi.TailscaleConfig{ + AcceptRoutes: true, + } + mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { + p.Spec = pc.Spec + }) - expectProxyGroupResources(t, fc, pg, true) + expectReconciled(t, reconciler, "", pg.Name) + + expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74") }) t.Run("delete_and_cleanup", func(t *testing.T) { @@ -191,13 +211,13 @@ func TestProxyGroup(t *testing.T) { }) } -func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool) { +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string) { t.Helper() role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "") + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", cfgHash) if err != nil { t.Fatal(err) } @@ -207,9 +227,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox expectEqual(t, fc, role, nil) expectEqual(t, fc, roleBinding, nil) expectEqual(t, fc, serviceAccount, nil) - expectEqual(t, fc, statefulSet, func(ss *appsv1.StatefulSet) { - ss.Spec.Template.Annotations[podAnnotationLastSetConfigFileHash] = "" - }) + expectEqual(t, fc, statefulSet, nil) } else { expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) @@ -218,11 +236,13 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox } var expectedSecrets []string - for i := range pgReplicas(pg) { - expectedSecrets = append(expectedSecrets, - fmt.Sprintf("%s-%d", pg.Name, i), - fmt.Sprintf("%s-%d-config", pg.Name, i), - ) + if shouldExist { + for i := range pgReplicas(pg) { + expectedSecrets = append(expectedSecrets, + fmt.Sprintf("%s-%d", pg.Name, i), + fmt.Sprintf("%s-%d-config", pg.Name, i), + ) + } } expectSecrets(t, fc, expectedSecrets) } From e38522c081ff48add7db73077e7be18f38ea709d Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 12 Nov 2024 14:23:38 +0000 Subject: [PATCH 0131/1708] go.{mod,sum},build_docker.sh: bump mkctr, add ability to set OCI annotations for images (#14065) Updates tailscale/tailscale#12914 Signed-off-by: Irbe Krumina --- build_docker.sh | 11 ++++++++ go.mod | 32 +++++++++++----------- go.sum | 72 ++++++++++++++++++++++++------------------------- 3 files changed, 63 insertions(+), 52 deletions(-) diff --git a/build_docker.sh b/build_docker.sh index e8b1c8f28..9f39eb08d 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -17,12 +17,20 @@ eval "$(./build_dist.sh shellvars)" DEFAULT_TARGET="client" DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}" DEFAULT_BASE="tailscale/alpine-base:3.18" +# Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked +# Github repo to find release notes for any new image tags. Note that for official Tailscale images the default +# annotations defined here will be overriden by release scripts that call this script. +# https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys +DEFAULT_ANNOTATIONS="org.opencontainers.image.source=https://github.com/tailscale/tailscale/blob/main/build_docker.sh,org.opencontainers.image.vendor=Tailscale" PUSH="${PUSH:-false}" TARGET="${TARGET:-${DEFAULT_TARGET}}" TAGS="${TAGS:-${DEFAULT_TAGS}}" BASE="${BASE:-${DEFAULT_BASE}}" PLATFORM="${PLATFORM:-}" # default to all platforms +# OCI annotations that will be added to the image. +# https://github.com/opencontainers/image-spec/blob/main/annotations.md +ANNOTATIONS="${ANNOTATIONS:-${DEFAULT_ANNOTATIONS}}" case "$TARGET" in client) @@ -43,6 +51,7 @@ case "$TARGET" in --repos="${REPOS}" \ --push="${PUSH}" \ --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ /usr/local/bin/containerboot ;; operator) @@ -60,6 +69,7 @@ case "$TARGET" in --repos="${REPOS}" \ --push="${PUSH}" \ --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ /usr/local/bin/operator ;; k8s-nameserver) @@ -77,6 +87,7 @@ case "$TARGET" in --repos="${REPOS}" \ --push="${PUSH}" \ --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ /usr/local/bin/k8s-nameserver ;; *) diff --git a/go.mod b/go.mod index 464db8313..b5451ab61 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.18.0 + github.com/google/go-containerregistry v0.20.2 github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 github.com/google/uuid v1.6.0 @@ -55,7 +55,7 @@ require ( github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 github.com/jsimonetti/rtnetlink v1.4.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.17.4 + github.com/klauspost/compress v1.17.11 github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 @@ -80,7 +80,7 @@ require ( github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba + github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 @@ -100,8 +100,8 @@ require ( golang.org/x/mod v0.19.0 golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.16.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 + golang.org/x/sync v0.9.0 + golang.org/x/sys v0.27.0 golang.org/x/term v0.22.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 @@ -125,7 +125,7 @@ require ( github.com/Antonboom/testifylint v1.2.0 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect @@ -138,7 +138,7 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 // indirect github.com/dave/brenda v1.1.0 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghostiam/protogetter v0.3.5 // indirect @@ -160,10 +160,10 @@ require ( github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect go-simpler.org/sloglint v0.5.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect ) @@ -220,10 +220,10 @@ require ( github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/docker/cli v25.0.0+incompatible // indirect + github.com/docker/cli v27.3.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v26.1.4+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/docker/docker v27.3.1+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/ettle/strcase v0.2.0 // indirect @@ -322,7 +322,7 @@ require ( github.com/nunnatsa/ginkgolinter v0.16.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc6 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect @@ -376,7 +376,7 @@ require ( github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.1.0 // indirect github.com/uudashr/gocognit v1.1.2 // indirect - github.com/vbatts/tar-split v0.11.5 // indirect + github.com/vbatts/tar-split v0.11.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/yagipy/maintidx v1.0.0 // indirect diff --git a/go.sum b/go.sum index 549f559d0..55aa3b535 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuN github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= @@ -277,16 +277,16 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/cli v25.0.0+incompatible h1:zaimaQdnX7fYWFqzN88exE9LDEvRslexpFowZBX6GoQ= -github.com/docker/cli v25.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= +github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= -github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= @@ -490,8 +490,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.18.0 h1:ShE7erKNPqRh5ue6Z9DUOlk04WsnFWPO6YGr3OxnfoQ= -github.com/google/go-containerregistry v0.18.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= +github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -627,8 +627,8 @@ github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -749,8 +749,8 @@ github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= -github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -931,8 +931,8 @@ github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPx github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba h1:uNo1VCm/xg4alMkIKo8RWTKNx5y1otfVOcKbp+irkL4= -github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba/go.mod h1:DxnqIXBplij66U2ZkL688xy07q97qQ83P+TVueLiHq4= +github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 h1:ZB47BgnHcEHQJODkDubs5ZiNeJxMhcgzefV3lykRwVQ= +github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10/go.mod h1:iDx/0Rr9VV/KanSUDpJ6I/ROf0sQ7OqljXc/esl0UIA= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= @@ -981,8 +981,8 @@ github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZ github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= +github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= @@ -1022,20 +1022,20 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= @@ -1176,8 +1176,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1239,8 +1239,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From cf41cec5a8da13809fab472e221aecd099009b6f Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 12 Nov 2024 17:13:26 +0000 Subject: [PATCH 0132/1708] cmd/{k8s-operator,containerboot},k8s-operator: remove support for proxies below capver 95. (#13986) Updates tailscale/tailscale#13984 Signed-off-by: Irbe Krumina --- cmd/containerboot/main.go | 9 ++++----- cmd/k8s-operator/operator_test.go | 4 ++-- cmd/k8s-operator/sts.go | 21 +++------------------ cmd/k8s-operator/testutils_test.go | 20 -------------------- k8s-operator/utils.go | 3 --- 5 files changed, 9 insertions(+), 48 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 4c8ba5807..17131faae 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -102,7 +102,6 @@ import ( "net/netip" "os" "os/signal" - "path" "path/filepath" "slices" "strings" @@ -731,7 +730,6 @@ func tailscaledConfigFilePath() string { } cv, err := kubeutils.CapVerFromFileName(e.Name()) if err != nil { - log.Printf("skipping file %q in tailscaled config directory %q: %v", e.Name(), dir, err) continue } if cv > maxCompatVer && cv <= tailcfg.CurrentCapabilityVersion { @@ -739,8 +737,9 @@ func tailscaledConfigFilePath() string { } } if maxCompatVer == -1 { - log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion) + log.Fatalf("no tailscaled config file found in %q for current capability version %d", dir, tailcfg.CurrentCapabilityVersion) } - log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion) - return path.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer)) + filePath := filepath.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer)) + log.Printf("Using tailscaled config file %q to match current capability version %d", filePath, tailcfg.CurrentCapabilityVersion) + return filePath } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index cc9927645..21ef08e52 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1388,7 +1388,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", - confFileHash: "362360188dac62bca8013c8134929fed8efd84b1f410c00873d14a05709b5647", + confFileHash: "a67b5ad3ff605531c822327e8f1a23dd0846e1075b722c13402f7d5d0ba32ba2", app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), nil) @@ -1399,7 +1399,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { mak.Set(&svc.Annotations, AnnotationHostname, "another-test") }) o.hostname = "another-test" - o.confFileHash = "20db57cfabc3fc6490f6bb1dc85994e61d255cdfa2a56abb0141736e59f263ef" + o.confFileHash = "888a993ebee20ad6be99623b45015339de117946850cf1252bede0b570e04293" expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedSTS(t, fc, o), nil) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index b6467b798..bdacec39b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -521,11 +521,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S Name: "TS_KUBE_SECRET", Value: proxySecret, }, - corev1.EnvVar{ - // Old tailscaled config key is still used for backwards compatibility. - Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", - Value: "/etc/tsconfig/tailscaled", - }, corev1.EnvVar{ // New style is in the form of cap-.hujson. Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", @@ -789,15 +784,9 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) { return origConf.AuthKey, nil } -// tailscaledConfig takes a proxy config, a newly generated auth key if -// generated and a Secret with the previous proxy state and auth key and -// returns tailscaled configuration and a hash of that configuration. -// -// As of 2024-05-09 it also returns legacy tailscaled config without the -// later added NoStatefulFilter field to support proxies older than cap95. -// TODO (irbekrm): remove the legacy config once we no longer need to support -// versions older than cap94, -// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies +// tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy +// state and auth key and returns tailscaled config files for currently supported proxy versions and a hash of that +// configuration. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", @@ -846,10 +835,6 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co // AppConnector config option is only understood by clients of capver 107 and newer. conf.AppConnector = nil capVerConfigs[95] = *conf - - // StatefulFiltering is only understood by clients of capver 95 and newer. - conf.NoStatefulFiltering.Clear() - capVerConfigs[94] = *conf return capVerConfigs, nil } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 4b25d103c..d42f1b7af 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -71,7 +71,6 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "TS_USERSPACE", Value: "false"}, {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, }, SecurityContext: &corev1.SecurityContext{ @@ -230,7 +229,6 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "TS_USERSPACE", Value: "true"}, {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, {Name: "TS_INTERNAL_APP", Value: opts.app}, @@ -404,12 +402,6 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec if err != nil { t.Fatalf("error marshalling tailscaled config") } - conf.NoStatefulFiltering.Clear() - b, err := json.Marshal(conf) - if err != nil { - t.Fatalf("error marshalling tailscaled config") - } - mak.Set(&s.StringData, "tailscaled", string(b)) mak.Set(&s.StringData, "cap-95.hujson", string(bn)) mak.Set(&s.StringData, "cap-107.hujson", string(bnn)) labels := map[string]string{ @@ -662,18 +654,6 @@ func removeTargetPortsFromSvc(svc *corev1.Service) { func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { return func(secret *corev1.Secret) { t.Helper() - if len(secret.StringData["tailscaled"]) != 0 { - conf := &ipn.ConfigVAlpha{} - if err := json.Unmarshal([]byte(secret.StringData["tailscaled"]), conf); err != nil { - t.Fatalf("error unmarshalling 'tailscaled' contents: %v", err) - } - conf.AuthKey = nil - b, err := json.Marshal(conf) - if err != nil { - t.Fatalf("error marshalling updated 'tailscaled' config: %v", err) - } - mak.Set(&secret.StringData, "tailscaled", string(b)) - } if len(secret.StringData["cap-95.hujson"]) != 0 { conf := &ipn.ConfigVAlpha{} if err := json.Unmarshal([]byte(secret.StringData["cap-95.hujson"]), conf); err != nil { diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index a1f225fe6..420d7e49c 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -32,9 +32,6 @@ type Records struct { // TailscaledConfigFileName returns a tailscaled config file name in // format expected by containerboot for the given CapVer. func TailscaledConfigFileName(cap tailcfg.CapabilityVersion) string { - if cap < 95 { - return "tailscaled" - } return fmt.Sprintf("cap-%v.hujson", cap) } From 0c6bd9a33b184eadaaba426b1249e5fa2cd2f4b1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 13 Nov 2024 05:49:51 -0800 Subject: [PATCH 0133/1708] words: add a scale https://portsmouthbrewery.com/shilling-scale/ Any scale that includes "wee heavy" is a scale worth including. Updates #words Change-Id: I85fd7a64cf22e14f686f1093a220cb59c43e46ba Signed-off-by: Brad Fitzpatrick --- words/scales.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/words/scales.txt b/words/scales.txt index f27dfc5c4..fdec078ee 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -391,3 +391,4 @@ godzilla sirius vector cherimoya +shilling From 7c6562c861541bf1652f83425b18f618b84d8cde Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Wed, 13 Nov 2024 09:56:02 -0500 Subject: [PATCH 0134/1708] words: scale up our word count (#14082) Updates tailscale/corp#14698 Signed-off-by: Naman Sood --- words/scales.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/words/scales.txt b/words/scales.txt index fdec078ee..c8041c0fc 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -392,3 +392,9 @@ sirius vector cherimoya shilling +kettle +kitchen +fahrenheit +rankine +piano +ruler From 1847f260428012701c61f8f86b72b530d15c1db3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:30:14 -0700 Subject: [PATCH 0135/1708] .github: Bump github/codeql-action from 3.26.11 to 3.27.1 (#14062) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.11 to 3.27.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea...4f3212b61783c3c68e8309a0f18a699764811cda) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4e266c6ea..0ea73a93c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 + uses: github/codeql-action/init@4f3212b61783c3c68e8309a0f18a699764811cda # v3.27.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 + uses: github/codeql-action/autobuild@4f3212b61783c3c68e8309a0f18a699764811cda # v3.27.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 + uses: github/codeql-action/analyze@4f3212b61783c3c68e8309a0f18a699764811cda # v3.27.1 From 0cfa217f3e6e2078b82d73bd177bc1d96c291fb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:34:10 -0700 Subject: [PATCH 0136/1708] .github: Bump actions/upload-artifact from 4.4.0 to 4.4.3 (#13811) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.4.0 to 4.4.3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/50769540e7f4bd5e21e526ee35c689e35e0d6874...b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bc70040b0..2fac634b4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -461,7 +461,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From 4474dcea686ee4ef4263456e3ec497667d4ccf97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:46:30 -0700 Subject: [PATCH 0137/1708] .github: Bump actions/cache from 4.1.0 to 4.1.2 (#13933) Bumps [actions/cache](https://github.com/actions/cache) from 4.1.0 to 4.1.2. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2...6849a6489940f00c2f30c0fb92c6274307ccb58a) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2fac634b4..a97e44917 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -80,7 +80,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -159,7 +159,7 @@ jobs: cache: false - name: Restore Cache - uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -260,7 +260,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -319,7 +319,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -367,7 +367,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # v4.1.0 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache From 0c9ade46a4321a732887b5ad55bcac69a096390e Mon Sep 17 00:00:00 2001 From: Walter Poupore Date: Wed, 13 Nov 2024 09:25:12 -0800 Subject: [PATCH 0138/1708] words: Add scoville to scales.txt (#14084) https://en.wikipedia.org/wiki/Scoville_scale Updates #words Signed-off-by: Walter Poupore --- words/scales.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/words/scales.txt b/words/scales.txt index c8041c0fc..2fe849bb9 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -398,3 +398,4 @@ fahrenheit rankine piano ruler +scoville From bfe5cd87606454e2d00631d2c29e0fa72443758c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:56:44 -0700 Subject: [PATCH 0139/1708] .github: Bump actions/setup-go from 5.0.2 to 5.1.0 (#13934) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.1.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32...41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0ea73a93c..d9a287be3 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9c34debc5..6630e8de8 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version-file: go.mod cache: false diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a97e44917..f9bb5cae2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -153,7 +153,7 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version-file: go.mod cache: false From f593d3c5c0eee55fdf988085d27aa991dbfd5fd6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 13 Nov 2024 07:36:43 -0800 Subject: [PATCH 0140/1708] cmd/tailscale/cli: add "help" alias for --help Fixes #14053 Change-Id: I0a13e11af089f02b0656fea0d316543c67591fb5 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 9 +++++++-- cmd/tailscale/cli/cli_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 130a11623..66961b2e0 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -93,8 +93,13 @@ func Run(args []string) (err error) { args = CleanUpArgs(args) - if len(args) == 1 && (args[0] == "-V" || args[0] == "--version") { - args = []string{"version"} + if len(args) == 1 { + switch args[0] { + case "-V", "--version": + args = []string{"version"} + case "help": + args = []string{"--help"} + } } var warnOnce sync.Once diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 4b7548671..0444e914c 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -9,6 +9,7 @@ import ( "encoding/json" "flag" "fmt" + "io" "net/netip" "reflect" "strings" @@ -1480,3 +1481,33 @@ func TestParseNLArgs(t *testing.T) { }) } } + +func TestHelpAlias(t *testing.T) { + var stdout, stderr bytes.Buffer + tstest.Replace[io.Writer](t, &Stdout, &stdout) + tstest.Replace[io.Writer](t, &Stderr, &stderr) + + gotExit0 := false + defer func() { + if !gotExit0 { + t.Error("expected os.Exit(0) to be called") + return + } + if !strings.Contains(stderr.String(), "SUBCOMMANDS") { + t.Errorf("expected help output to contain SUBCOMMANDS; got stderr=%q; stdout=%q", stderr.String(), stdout.String()) + } + }() + defer func() { + if e := recover(); e != nil { + if strings.Contains(fmt.Sprint(e), "unexpected call to os.Exit(0)") { + gotExit0 = true + } else { + t.Errorf("unexpected panic: %v", e) + } + } + }() + err := Run([]string{"help"}) + if err != nil { + t.Fatalf("Run: %v", err) + } +} From e73cfd9700095406d7263c855bc47801f7e0a2da Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 14 Nov 2024 09:50:42 -0800 Subject: [PATCH 0141/1708] go.toolchain.rev: bump from Go 1.23.1 to Go 1.23.3 Updates #14100 Change-Id: I57f9d4260be15ce1daebe4a9782910aba3fb9dc9 Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 5d87594c2..500d853e5 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -bf15628b759344c6fc7763795a405ba65b8be5d7 +96578f73d04e1a231fa2a495ad3fa97747785bc6 From 8fd471ce5748d2129dba584b4fa14b0d29229299 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 14 Nov 2024 09:44:16 -0800 Subject: [PATCH 0142/1708] control/controlclient: disable https on for http://localhost:$port URLs Previously we required the program to be running in a test or have TS_CONTROL_IS_PLAINTEXT_HTTP before we disabled its https fallback on "http" schema control URLs to localhost with ports. But nobody accidentally does all three of "http", explicit port number, localhost and doesn't mean it. And when they mean it, they're testing a localhost dev control server (like I was) and don't want 443 getting involved. As of the changes for #13597, this became more annoying in that we were trying to use a port which wasn't even available. Updates #13597 Change-Id: Icd00bca56043d2da58ab31de7aa05a3b269c490f Signed-off-by: Brad Fitzpatrick --- control/controlclient/noise.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index 3994af056..2e7c70fd1 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -17,7 +17,6 @@ import ( "golang.org/x/net/http2" "tailscale.com/control/controlhttp" - "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/internal/noiseconn" "tailscale.com/net/dnscache" @@ -30,7 +29,6 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/singleflight" - "tailscale.com/util/testenv" ) // NoiseClient provides a http.Client to connect to tailcontrol over @@ -107,11 +105,6 @@ type NoiseOpts struct { DialPlan func() *tailcfg.ControlDialPlan } -// controlIsPlaintext is whether we should assume that the controlplane is only accessible -// over plaintext HTTP (as the first hop, before the ts2021 encryption begins). -// This is used by some tests which don't have a real TLS certificate. -var controlIsPlaintext = envknob.RegisterBool("TS_CONTROL_IS_PLAINTEXT_HTTP") - // NewNoiseClient returns a new noiseClient for the provided server and machine key. // serverURL is of the form https://: (no trailing slash). // @@ -129,7 +122,7 @@ func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { if u.Scheme == "http" { httpPort = port httpsPort = "443" - if (testenv.InTest() || controlIsPlaintext()) && (u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost") { + if u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost" { httpsPort = "" } } else { From c3c4c05331ca13a7a159e5b6307fd72a6d2d3a00 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 15 Nov 2024 07:12:56 -0800 Subject: [PATCH 0143/1708] tstest/integration/testcontrol: remove a vestigial unused parameter Back in the day this testcontrol package only spoke the nacl-boxed-based control protocol, which used this. Then we added ts2021, which didn't, but still sometimes used it. Then we removed the old mode and didn't remove this parameter in 2409661a0da956. Updates #11585 Change-Id: Ifd290bd7dbbb52b681b3599786437a15bc98b6a5 Signed-off-by: Brad Fitzpatrick --- tstest/integration/testcontrol/testcontrol.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 2d6a84361..a6b2e1828 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -832,7 +832,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi w.WriteHeader(200) for { if resBytes, ok := s.takeRawMapMessage(req.NodeKey); ok { - if err := s.sendMapMsg(w, mkey, compress, resBytes); err != nil { + if err := s.sendMapMsg(w, compress, resBytes); err != nil { s.logf("sendMapMsg of raw message: %v", err) return } @@ -864,7 +864,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi s.logf("json.Marshal: %v", err) return } - if err := s.sendMapMsg(w, mkey, compress, resBytes); err != nil { + if err := s.sendMapMsg(w, compress, resBytes); err != nil { return } } @@ -895,7 +895,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi } break keepAliveLoop case <-keepAliveTimerCh: - if err := s.sendMapMsg(w, mkey, compress, keepAliveMsg); err != nil { + if err := s.sendMapMsg(w, compress, keepAliveMsg); err != nil { return } } @@ -1060,7 +1060,7 @@ func (s *Server) takeRawMapMessage(nk key.NodePublic) (mapResJSON []byte, ok boo return mapResJSON, true } -func (s *Server) sendMapMsg(w http.ResponseWriter, mkey key.MachinePublic, compress bool, msg any) error { +func (s *Server) sendMapMsg(w http.ResponseWriter, compress bool, msg any) error { resBytes, err := s.encode(compress, msg) if err != nil { return err From 1355f622beca0db5794201ab8802804ab1299e2f Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Thu, 14 Nov 2024 14:21:30 -0600 Subject: [PATCH 0144/1708] cmd/derpprobe,prober: add ability to restrict derpprobe to a single region Updates #24522 Co-authored-by: Mario Minardi Signed-off-by: Percy Wegmann --- cmd/derpprobe/derpprobe.go | 4 ++++ prober/derp.go | 23 +++++++++++++++++++++++ prober/derp_test.go | 31 +++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 5b7b77091..8f04326b0 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -29,6 +29,7 @@ var ( tlsInterval = flag.Duration("tls-interval", 15*time.Second, "TLS probe interval") bwInterval = flag.Duration("bw-interval", 0, "bandwidth probe interval (0 = no bandwidth probing)") bwSize = flag.Int64("bw-probe-size-bytes", 1_000_000, "bandwidth probe size") + regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed") ) func main() { @@ -47,6 +48,9 @@ func main() { if *bwInterval > 0 { opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize)) } + if *regionCode != "" { + opts = append(opts, prober.WithRegion(*regionCode)) + } dp, err := prober.DERP(p, *derpMapURL, opts...) if err != nil { log.Fatal(err) diff --git a/prober/derp.go b/prober/derp.go index 0dadbe8c2..b1ebc590d 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -45,6 +45,9 @@ type derpProber struct { bwInterval time.Duration bwProbeSize int64 + // Optionally restrict probes to a single regionCode. + regionCode string + // Probe class for fetching & updating the DERP map. ProbeMap ProbeClass @@ -97,6 +100,14 @@ func WithTLSProbing(interval time.Duration) DERPOpt { } } +// WithRegion restricts probing to the specified region identified by its code +// (e.g. "lax"). This is case sensitive. +func WithRegion(regionCode string) DERPOpt { + return func(d *derpProber) { + d.regionCode = regionCode + } +} + // DERP creates a new derpProber. // // If derpMapURL is "local", the DERPMap is fetched via @@ -135,6 +146,10 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { defer d.Unlock() for _, region := range d.lastDERPMap.Regions { + if d.skipRegion(region) { + continue + } + for _, server := range region.Nodes { labels := Labels{ "region": region.RegionCode, @@ -316,6 +331,10 @@ func (d *derpProber) updateMap(ctx context.Context) error { d.lastDERPMapAt = time.Now() d.nodes = make(map[string]*tailcfg.DERPNode) for _, reg := range d.lastDERPMap.Regions { + if d.skipRegion(reg) { + continue + } + for _, n := range reg.Nodes { if existing, ok := d.nodes[n.Name]; ok { return fmt.Errorf("derpmap has duplicate nodes: %+v and %+v", existing, n) @@ -338,6 +357,10 @@ func (d *derpProber) ProbeUDP(ipaddr string, port int) ProbeClass { } } +func (d *derpProber) skipRegion(region *tailcfg.DERPRegion) bool { + return d.regionCode != "" && region.RegionCode != d.regionCode +} + func derpProbeUDP(ctx context.Context, ipStr string, port int) error { pc, err := net.ListenPacket("udp", ":0") if err != nil { diff --git a/prober/derp_test.go b/prober/derp_test.go index a34292a23..c084803e9 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -44,6 +44,19 @@ func TestDerpProber(t *testing.T) { }, }, }, + 1: { + RegionID: 1, + RegionCode: "one", + Nodes: []*tailcfg.DERPNode{ + { + Name: "n3", + RegionID: 0, + HostName: "derpn3.tailscale.test", + IPv4: "1.1.1.1", + IPv6: "::1", + }, + }, + }, }, } srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -68,6 +81,7 @@ func TestDerpProber(t *testing.T) { meshProbeFn: func(_, _ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, nodes: make(map[string]*tailcfg.DERPNode), probes: make(map[string]*Probe), + regionCode: "zero", } if err := dp.probeMapFn(context.Background()); err != nil { t.Errorf("unexpected probeMapFn() error: %s", err) @@ -84,9 +98,9 @@ func TestDerpProber(t *testing.T) { // Add one more node and check that probes got created. dm.Regions[0].Nodes = append(dm.Regions[0].Nodes, &tailcfg.DERPNode{ - Name: "n3", + Name: "n4", RegionID: 0, - HostName: "derpn3.tailscale.test", + HostName: "derpn4.tailscale.test", IPv4: "1.1.1.1", IPv6: "::1", }) @@ -113,6 +127,19 @@ func TestDerpProber(t *testing.T) { if len(dp.probes) != 4 { t.Errorf("unexpected probes: %+v", dp.probes) } + + // Stop filtering regions. + dp.regionCode = "" + if err := dp.probeMapFn(context.Background()); err != nil { + t.Errorf("unexpected probeMapFn() error: %s", err) + } + if len(dp.nodes) != 2 { + t.Errorf("unexpected nodes: %+v", dp.nodes) + } + // 6 regular probes + 2 mesh probe + if len(dp.probes) != 8 { + t.Errorf("unexpected probes: %+v", dp.probes) + } } func TestRunDerpProbeNodePair(t *testing.T) { From aefbed323f33e7e02ea87147e2264efcce39d3f6 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 15 Nov 2024 16:14:06 -0500 Subject: [PATCH 0145/1708] ipn,tailcfg: add VIPService struct and c2n to fetch them from client (#14046) * ipn,tailcfg: add VIPService struct and c2n to fetch them from client Updates tailscale/corp#22743, tailscale/corp#22955 Signed-off-by: Naman Sood * more review fixes Signed-off-by: Naman Sood * don't mention PeerCapabilityServicesDestination since it's currently unused Signed-off-by: Naman Sood --------- Signed-off-by: Naman Sood --- ipn/ipnlocal/c2n.go | 9 ++++ ipn/ipnlocal/local.go | 48 +++++++++++++++++++++ ipn/ipnlocal/local_test.go | 88 ++++++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 29 ++++++++++++- tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 11 +++++ tailcfg/tailcfg_view.go | 2 + 7 files changed, 187 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 8380689d1..f3a4a3a3d 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -77,6 +77,9 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // Linux netfilter. req("POST /netfilter-kind"): handleC2NSetNetfilterKind, + + // VIP services. + req("GET /vip-services"): handleC2NVIPServicesGet, } type c2nHandler func(*LocalBackend, http.ResponseWriter, *http.Request) @@ -269,6 +272,12 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } +func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + b.logf("c2n: GET /vip-services received") + + json.NewEncoder(w).Encode(b.VIPServices()) +} + func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: GET /update received") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 493762fcc..3c7296038 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -9,6 +9,7 @@ import ( "bytes" "cmp" "context" + "crypto/sha256" "encoding/base64" "encoding/json" "errors" @@ -4888,6 +4889,14 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } hi.SSH_HostKeys = sshHostKeys + services := vipServicesFromPrefs(prefs) + if len(services) > 0 { + buf, _ := json.Marshal(services) + hi.ServicesHash = fmt.Sprintf("%02x", sha256.Sum256(buf)) + } else { + hi.ServicesHash = "" + } + // The Hostinfo.WantIngress field tells control whether this node wants to // be wired up for ingress connections. If harmless if it's accidentally // true; the actual policy is controlled in tailscaled by ServeConfig. But @@ -7485,3 +7494,42 @@ func maybeUsernameOf(actor ipnauth.Actor) string { } return username } + +// VIPServices returns the list of tailnet services that this node +// is serving as a destination for. +// The returned memory is owned by the caller. +func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { + b.mu.Lock() + defer b.mu.Unlock() + return vipServicesFromPrefs(b.pm.CurrentPrefs()) +} + +func vipServicesFromPrefs(prefs ipn.PrefsView) []*tailcfg.VIPService { + // keyed by service name + var services map[string]*tailcfg.VIPService + + // TODO(naman): this envknob will be replaced with service-specific port + // information once we start storing that. + var allPortsServices []string + if env := envknob.String("TS_DEBUG_ALLPORTS_SERVICES"); env != "" { + allPortsServices = strings.Split(env, ",") + } + + for _, s := range allPortsServices { + mak.Set(&services, s, &tailcfg.VIPService{ + Name: s, + Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}}, + }) + } + + for _, s := range prefs.AdvertiseServices().AsSlice() { + if services == nil || services[s] == nil { + mak.Set(&services, s, &tailcfg.VIPService{ + Name: s, + }) + } + services[s].Active = true + } + + return slices.Collect(maps.Values(services)) +} diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6dad2dba4..6d25a418f 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -30,6 +30,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -4464,3 +4465,90 @@ func TestConfigFileReload(t *testing.T) { t.Fatalf("got %q; want %q", hn, "bar") } } + +func TestGetVIPServices(t *testing.T) { + tests := []struct { + name string + advertised []string + mapped []string + want []*tailcfg.VIPService + }{ + { + "advertised-only", + []string{"svc:abc", "svc:def"}, + []string{}, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Active: true, + }, + { + Name: "svc:def", + Active: true, + }, + }, + }, + { + "mapped-only", + []string{}, + []string{"svc:abc"}, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}}, + }, + }, + }, + { + "mapped-and-advertised", + []string{"svc:abc"}, + []string{"svc:abc"}, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Active: true, + Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}}, + }, + }, + }, + { + "mapped-and-advertised-separately", + []string{"svc:def"}, + []string{"svc:abc"}, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}}, + }, + { + Name: "svc:def", + Active: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + envknob.Setenv("TS_DEBUG_ALLPORTS_SERVICES", strings.Join(tt.mapped, ",")) + prefs := &ipn.Prefs{ + AdvertiseServices: tt.advertised, + } + got := vipServicesFromPrefs(prefs.View()) + slices.SortFunc(got, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name, b.Name) + }) + if !reflect.DeepEqual(tt.want, got) { + t.Logf("want:") + for _, s := range tt.want { + t.Logf("%+v", s) + } + t.Logf("got:") + for _, s := range got { + t.Logf("%+v", s) + } + t.Fail() + return + } + }) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 9e39a4336..1b283a2fc 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -150,7 +150,8 @@ type CapabilityVersion int // - 105: 2024-08-05: Fixed SSH behavior on systems that use busybox (issue #12849) // - 106: 2024-09-03: fix panic regression from cryptokey routing change (65fe0ba7b5) // - 107: 2024-10-30: add App Connector to conffile (PR #13942) -const CurrentCapabilityVersion CapabilityVersion = 107 +// - 108: 2024-11-08: Client sends ServicesHash in Hostinfo, understands c2n GET /vip-services. +const CurrentCapabilityVersion CapabilityVersion = 108 type StableID string @@ -820,6 +821,7 @@ type Hostinfo struct { Userspace opt.Bool `json:",omitempty"` // if the client is running in userspace (netstack) mode UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service + ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n // Location represents geographical location data about a // Tailscale host. Location is optional and only set if @@ -830,6 +832,26 @@ type Hostinfo struct { // require changes to Hostinfo.Equal. } +// VIPService represents a service created on a tailnet from the +// perspective of a node providing that service. These services +// have an virtual IP (VIP) address pair distinct from the node's IPs. +type VIPService struct { + // Name is the name of the service, of the form `svc:dns-label`. + // See CheckServiceName for a validation func. + // Name uniquely identifies a service on a particular tailnet, + // and so also corresponds uniquely to the pair of IP addresses + // belonging to the VIP service. + Name string + + // Ports specify which ProtoPorts are made available by this node + // on the service's IPs. + Ports []ProtoPortRange + + // Active specifies whether new requests for the service should be + // sent to this node by control. + Active bool +} + // TailscaleSSHEnabled reports whether or not this node is acting as a // Tailscale SSH server. func (hi *Hostinfo) TailscaleSSHEnabled() bool { @@ -1429,6 +1451,11 @@ const ( // user groups as Kubernetes user groups. This capability is read by // peers that are Tailscale Kubernetes operator instances. PeerCapabilityKubernetes PeerCapability = "tailscale.com/cap/kubernetes" + + // PeerCapabilityServicesDestination grants a peer the ability to serve as + // a destination for a set of given VIP services, which is provided as the + // value of this key in NodeCapMap. + PeerCapabilityServicesDestination PeerCapability = "tailscale.com/cap/services-destination" ) // NodeCapMap is a map of capabilities to their optional values. It is valid for diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 61564f3f8..f4f02c017 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -183,6 +183,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { Userspace opt.Bool UserspaceRouter opt.Bool AppConnector opt.Bool + ServicesHash string Location *Location }{}) diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 0d0636677..9f8c418a1 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -66,6 +66,7 @@ func TestHostinfoEqual(t *testing.T) { "Userspace", "UserspaceRouter", "AppConnector", + "ServicesHash", "Location", } if have := fieldsOf(reflect.TypeFor[Hostinfo]()); !reflect.DeepEqual(have, hiHandles) { @@ -240,6 +241,16 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{AppConnector: opt.Bool("false")}, false, }, + { + &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"}, + &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"}, + true, + }, + { + &Hostinfo{ServicesHash: "084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0"}, + &Hostinfo{}, + false, + }, } for i, tt := range tests { got := tt.a.Equal(tt.b) diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index a3e19b0dc..f275a6a9d 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -318,6 +318,7 @@ func (v HostinfoView) Cloud() string { return v.ж.Clou func (v HostinfoView) Userspace() opt.Bool { return v.ж.Userspace } func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } +func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } func (v HostinfoView) Location() *Location { if v.ж.Location == nil { return nil @@ -365,6 +366,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { Userspace opt.Bool UserspaceRouter opt.Bool AppConnector opt.Bool + ServicesHash string Location *Location }{}) From 3b93fd9c4430332787e6d9ed6164efb63d3a9e8b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 15 Nov 2024 14:16:03 -0800 Subject: [PATCH 0146/1708] net/captivedetection: replace 10k log lines with ... less We see tons of logs of the form: 2024/11/15 19:57:29 netcheck: [v2] 76 available captive portal detection endpoints: [Endpoint{URL="http://192.73.240.161/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.240.121/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.240.132/generate_204", StatusCode=204, ExpectedContent="", 11:58SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://209.177.158.246/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://209.177.158.15/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://199.38.182.118/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.243.135/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.243.229/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.243.141/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://45.159.97.144/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://45.159.97.61/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://45.159.97.233/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://45.159.98.196/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://45.159.98.253/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://45.159.98.145/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://68.183.90.120/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://209.177.156.94/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.248.83/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://209.177.156.197/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://199.38.181.104/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://209.177.145.120/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://199.38.181.93/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://199.38.181.103/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://102.67.165.90/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://102.67.165.185/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://102.67.165.36/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.90.147/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.90.207/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.90.104/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://162.248.221.199/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://162.248.221.215/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://162.248.221.248/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://185.34.3.232/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://185.34.3.207/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://185.34.3.75/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://208.83.234.151/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://208.83.233.233/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://208.72.155.133/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://185.40.234.219/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://185.40.234.113/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://185.40.234.77/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://43.245.48.220/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://43.245.48.50/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://43.245.48.250/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.252.65/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.252.134/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://208.111.34.178/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://43.245.49.105/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://43.245.49.83/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://43.245.49.144/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.92.144/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.88.183/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.92.254/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://148.163.220.129/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://148.163.220.134/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://148.163.220.210/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.242.187/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.242.28/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.242.204/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.93.248/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.93.147/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://176.58.93.154/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://192.73.244.245/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://208.111.40.12/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://208.111.40.216/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://103.6.84.152/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://205.147.105.30/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://205.147.105.78/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://102.67.167.245/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://102.67.167.37/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://102.67.167.188/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://103.84.155.178/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://103.84.155.188/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://103.84.155.46/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=true, Provider=DERPMapOther} Endpoint{URL="http://controlplane.tailscale.com/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=false, Provider=Tailscale} Endpoint{URL="http://login.tailscale.com/generate_204", StatusCode=204, ExpectedContent="", SupportsTailscaleChallenge=false, Provider=Tailscale}] That can be much shorter. Also add a fast exit path to the concurrency on match. Doing 5 all at once is still pretty gratuitous, though. Updates #1634 Fixes #13019 Change-Id: Icdbb16572fca4477b0ee9882683a3ac6eb08e2f2 Signed-off-by: Brad Fitzpatrick --- net/captivedetection/captivedetection.go | 19 ++++++---- net/captivedetection/captivedetection_test.go | 37 +++++++++++++++---- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index c6e8bca3a..7d598d853 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -136,26 +136,31 @@ func interfaceNameDoesNotNeedCaptiveDetection(ifName string, goos string) bool { func (d *Detector) detectOnInterface(ctx context.Context, ifIndex int, endpoints []Endpoint) bool { defer d.httpClient.CloseIdleConnections() - d.logf("[v2] %d available captive portal detection endpoints: %v", len(endpoints), endpoints) + use := min(len(endpoints), 5) + endpoints = endpoints[:use] + d.logf("[v2] %d available captive portal detection endpoints; trying %v", len(endpoints), use) // We try to detect the captive portal more quickly by making requests to multiple endpoints concurrently. var wg sync.WaitGroup resultCh := make(chan bool, len(endpoints)) - for i, e := range endpoints { - if i >= 5 { - // Try a maximum of 5 endpoints, break out (returning false) if we run of attempts. - break - } + // Once any goroutine detects a captive portal, we shut down the others. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + for _, e := range endpoints { wg.Add(1) go func(endpoint Endpoint) { defer wg.Done() found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, ifIndex) if err != nil { - d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err) + if ctx.Err() == nil { + d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err) + } return } if found { + cancel() // one match is good enough resultCh <- true } }(e) diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index e74273afd..29a197d31 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -7,10 +7,12 @@ import ( "context" "runtime" "sync" + "sync/atomic" "testing" - "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/net/netmon" + "tailscale.com/syncs" + "tailscale.com/tstest/nettest" ) func TestAvailableEndpointsAlwaysAtLeastTwo(t *testing.T) { @@ -36,25 +38,46 @@ func TestDetectCaptivePortalReturnsFalse(t *testing.T) { } } -func TestAllEndpointsAreUpAndReturnExpectedResponse(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13019") +func TestEndpointsAreUpAndReturnExpectedResponse(t *testing.T) { + nettest.SkipIfNoNetwork(t) + d := NewDetector(t.Logf) endpoints := availableEndpoints(nil, 0, t.Logf, runtime.GOOS) + t.Logf("testing %d endpoints", len(endpoints)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var good atomic.Bool var wg sync.WaitGroup + sem := syncs.NewSemaphore(5) for _, e := range endpoints { wg.Add(1) go func(endpoint Endpoint) { defer wg.Done() - found, err := d.verifyCaptivePortalEndpoint(context.Background(), endpoint, 0) - if err != nil { - t.Errorf("verifyCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err) + + if !sem.AcquireContext(ctx) { + return + } + defer sem.Release() + + found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, 0) + if err != nil && ctx.Err() == nil { + t.Logf("verifyCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err) } if found { - t.Errorf("verifyCaptivePortalEndpoint with endpoint %v says we're behind a captive portal, but we aren't", endpoint) + t.Logf("verifyCaptivePortalEndpoint with endpoint %v says we're behind a captive portal, but we aren't", endpoint) + return } + good.Store(true) + t.Logf("endpoint good: %v", endpoint) + cancel() }(e) } wg.Wait() + + if !good.Load() { + t.Errorf("no good endpoints found") + } } From f1e1048977b848c8ad8882d77b73e4dd25b1c3f9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 12 Nov 2024 17:52:31 -0800 Subject: [PATCH 0147/1708] go.mod: bump tailscale/wireguard-go Updates #11899 Change-Id: Ibd75134a20798c84c7174ba3af639cf22836c7d7 Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b5451ab61..92ba6b9c7 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc + github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 55aa3b535..fadfb22b1 100644 --- a/go.sum +++ b/go.sum @@ -941,8 +941,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:t github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= -github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3 h1:dmoPb3dG27tZgMtrvqfD/LW4w7gA6BSWl8prCPNmkCQ= +github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= From 5cae7c51bfaaf1adbc645580e48fc55caac9e1c0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 16 Nov 2024 15:25:51 -0800 Subject: [PATCH 0148/1708] ipn: remove unused Notify.BackendLogID Updates #14129 Change-Id: I13b5df8765e786a4a919d6b2e72afe987000b2d1 Signed-off-by: Brad Fitzpatrick --- ipn/backend.go | 4 ---- ipn/ipnlocal/local.go | 5 +---- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/ipn/backend.go b/ipn/backend.go index 76ad1910b..5779727fe 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -100,7 +100,6 @@ type Notify struct { NetMap *netmap.NetworkMap // if non-nil, the new or current netmap Engine *EngineStatus // if non-nil, the new or current wireguard stats BrowseToURL *string // if non-nil, UI should open a browser right now - BackendLogID *string // if non-nil, the public logtail ID used by backend // FilesWaiting if non-nil means that files are buffered in // the Tailscale daemon and ready for local transfer to the @@ -173,9 +172,6 @@ func (n Notify) String() string { if n.BrowseToURL != nil { sb.WriteString("URL=<...> ") } - if n.BackendLogID != nil { - sb.WriteString("BackendLogID ") - } if n.FilesWaiting != nil { sb.WriteString("FilesWaiting ") } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3c7296038..33025ed40 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2157,10 +2157,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { blid := b.backendLogID.String() b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID) - b.sendToLocked(ipn.Notify{ - BackendLogID: &blid, - Prefs: &prefs, - }, allClients) + b.sendToLocked(ipn.Notify{Prefs: &prefs}, allClients) if !loggedOut && (b.hasNodeKeyLocked() || confWantRunning) { // If we know that we're either logged in or meant to be From c2a7f17f2b378897f4545ad6f43891f150423487 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 18 Nov 2024 09:55:54 -0800 Subject: [PATCH 0149/1708] sessionrecording: implement v2 recording endpoint support (#14105) The v2 endpoint supports HTTP/2 bidirectional streaming and acks for received bytes. This is used to detect when a recorder disappears to more quickly terminate the session. Updates https://github.com/tailscale/corp/issues/24023 Signed-off-by: Andrew Lytvynov --- k8s-operator/sessionrecording/hijacker.go | 2 +- .../sessionrecording/hijacker_test.go | 4 +- sessionrecording/connect.go | 320 ++++++++++++++---- sessionrecording/connect_test.go | 189 +++++++++++ ssh/tailssh/tailssh.go | 13 +- ssh/tailssh/tailssh_test.go | 61 ++-- 6 files changed, 500 insertions(+), 89 deletions(-) create mode 100644 sessionrecording/connect_test.go diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index f8ef951d4..43aa14e61 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -102,7 +102,7 @@ type Hijacker struct { // connection succeeds. In case of success, returns a list with a single // successful recording attempt and an error channel. If the connection errors // after having been established, an error is sent down the channel. -type RecorderDialFn func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) +type RecorderDialFn func(context.Context, []netip.AddrPort, sessionrecording.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) // Hijack hijacks a 'kubectl exec' session and configures for the session // contents to be sent to a recorder. diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index 440d9c942..e166ce63b 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "net" "net/http" "net/netip" "net/url" @@ -20,6 +19,7 @@ import ( "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" "tailscale.com/k8s-operator/sessionrecording/fakes" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/tstest" @@ -80,7 +80,7 @@ func Test_Hijacker(t *testing.T) { h := &Hijacker{ connectToRecorder: func(context.Context, []netip.AddrPort, - func(context.Context, string, string) (net.Conn, error), + sessionrecording.DialFunc, ) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) { if tt.failRecorderConnect { err = errors.New("test") diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index db966ba2c..94761393f 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -7,6 +7,8 @@ package sessionrecording import ( "context" + "crypto/tls" + "encoding/json" "errors" "fmt" "io" @@ -14,12 +16,33 @@ import ( "net/http" "net/http/httptrace" "net/netip" + "sync/atomic" "time" + "golang.org/x/net/http2" "tailscale.com/tailcfg" + "tailscale.com/util/httpm" "tailscale.com/util/multierr" ) +const ( + // Timeout for an individual DialFunc call for a single recorder address. + perDialAttemptTimeout = 5 * time.Second + // Timeout for the V2 API HEAD probe request (supportsV2). + http2ProbeTimeout = 10 * time.Second + // Maximum timeout for trying all available recorders, including V2 API + // probes and dial attempts. + allDialAttemptsTimeout = 30 * time.Second +) + +// uploadAckWindow is the period of time to wait for an ackFrame from recorder +// before terminating the connection. This is a variable to allow overriding it +// in tests. +var uploadAckWindow = 30 * time.Second + +// DialFunc is a function for dialing the recorder. +type DialFunc func(ctx context.Context, network, host string) (net.Conn, error) + // ConnectToRecorder connects to the recorder at any of the provided addresses. // It returns the first successful response, or a multierr if all attempts fail. // @@ -32,19 +55,15 @@ import ( // attempts are in order the recorder(s) was attempted. If successful a // successful connection is made, the last attempt in the slice is the // attempt for connected recorder. -func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) { +func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) { if len(recs) == 0 { return nil, nil, nil, errors.New("no recorders configured") } // We use a special context for dialing the recorder, so that we can // limit the time we spend dialing to 30 seconds and still have an // unbounded context for the upload. - dialCtx, dialCancel := context.WithTimeout(ctx, 30*time.Second) + dialCtx, dialCancel := context.WithTimeout(ctx, allDialAttemptsTimeout) defer dialCancel() - hc, err := SessionRecordingClientForDialer(dialCtx, dial) - if err != nil { - return nil, nil, nil, err - } var errs []error var attempts []*tailcfg.SSHRecordingAttempt @@ -54,74 +73,230 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial func(con } attempts = append(attempts, attempt) - // We dial the recorder and wait for it to send a 100-continue - // response before returning from this function. This ensures that - // the recorder is ready to accept the recording. - - // got100 is closed when we receive the 100-continue response. - got100 := make(chan struct{}) - ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ - Got100Continue: func() { - close(got100) - }, - }) - - pr, pw := io.Pipe() - req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s:%d/record", ap.Addr(), ap.Port()), pr) + var pw io.WriteCloser + var errChan <-chan error + var err error + hc := clientHTTP2(dialCtx, dial) + // We need to probe V2 support using a separate HEAD request. Sending + // an HTTP/2 POST request to a HTTP/1 server will just "hang" until the + // request body is closed (instead of returning a 404 as one would + // expect). Sending a HEAD request without a body does not have that + // problem. + if supportsV2(ctx, hc, ap) { + pw, errChan, err = connectV2(ctx, hc, ap) + } else { + pw, errChan, err = connectV1(ctx, clientHTTP1(dialCtx, dial), ap) + } if err != nil { - err = fmt.Errorf("recording: error starting recording: %w", err) + err = fmt.Errorf("recording: error starting recording on %q: %w", ap, err) attempt.FailureMessage = err.Error() errs = append(errs, err) continue } - // We set the Expect header to 100-continue, so that the recorder - // will send a 100-continue response before it starts reading the - // request body. - req.Header.Set("Expect", "100-continue") + return pw, attempts, errChan, nil + } + return nil, attempts, nil, multierr.New(errs...) +} - // errChan is used to indicate the result of the request. - errChan := make(chan error, 1) - go func() { - resp, err := hc.Do(req) - if err != nil { - errChan <- fmt.Errorf("recording: error starting recording: %w", err) +// supportsV2 checks whether a recorder instance supports the /v2/record +// endpoint. +func supportsV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) bool { + ctx, cancel := context.WithTimeout(ctx, http2ProbeTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, httpm.HEAD, fmt.Sprintf("http://%s/v2/record", ap), nil) + if err != nil { + return false + } + resp, err := hc.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK && resp.ProtoMajor > 1 +} + +// connectV1 connects to the legacy /record endpoint on the recorder. It is +// used for backwards-compatibility with older tsrecorder instances. +// +// On success, it returns a WriteCloser that can be used to upload the +// recording, and a channel that will be sent an error (or nil) when the upload +// fails or completes. +func connectV1(ctx context.Context, hc *http.Client, ap netip.AddrPort) (io.WriteCloser, <-chan error, error) { + // We dial the recorder and wait for it to send a 100-continue + // response before returning from this function. This ensures that + // the recorder is ready to accept the recording. + + // got100 is closed when we receive the 100-continue response. + got100 := make(chan struct{}) + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + Got100Continue: func() { + close(got100) + }, + }) + + pr, pw := io.Pipe() + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/record", ap), pr) + if err != nil { + return nil, nil, err + } + // We set the Expect header to 100-continue, so that the recorder + // will send a 100-continue response before it starts reading the + // request body. + req.Header.Set("Expect", "100-continue") + + // errChan is used to indicate the result of the request. + errChan := make(chan error, 1) + go func() { + defer close(errChan) + resp, err := hc.Do(req) + if err != nil { + errChan <- err + return + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + errChan <- fmt.Errorf("recording: unexpected status: %v", resp.Status) + return + } + }() + select { + case <-got100: + return pw, errChan, nil + case err := <-errChan: + // If we get an error before we get the 100-continue response, + // we need to try another recorder. + if err == nil { + // If the error is nil, we got a 200 response, which + // is unexpected as we haven't sent any data yet. + err = errors.New("recording: unexpected EOF") + } + return nil, nil, err + } +} + +// connectV2 connects to the /v2/record endpoint on the recorder over HTTP/2. +// It explicitly tracks ack frames sent in the response and terminates the +// connection if sent recording data is un-acked for uploadAckWindow. +// +// On success, it returns a WriteCloser that can be used to upload the +// recording, and a channel that will be sent an error (or nil) when the upload +// fails or completes. +func connectV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) (io.WriteCloser, <-chan error, error) { + pr, pw := io.Pipe() + upload := &readCounter{r: pr} + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/v2/record", ap), upload) + if err != nil { + return nil, nil, err + } + + // With HTTP/2, hc.Do will not block while the request body is being sent. + // It will return immediately and allow us to consume the response body at + // the same time. + resp, err := hc.Do(req) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, nil, fmt.Errorf("recording: unexpected status: %v", resp.Status) + } + + errChan := make(chan error, 1) + acks := make(chan int64) + // Read acks from the response and send them to the acks channel. + go func() { + defer close(errChan) + defer close(acks) + defer resp.Body.Close() + defer pw.Close() + dec := json.NewDecoder(resp.Body) + for { + var frame v2ResponseFrame + if err := dec.Decode(&frame); err != nil { + if !errors.Is(err, io.EOF) { + errChan <- fmt.Errorf("recording: unexpected error receiving acks: %w", err) + } return } - if resp.StatusCode != 200 { - errChan <- fmt.Errorf("recording: unexpected status: %v", resp.Status) + if frame.Error != "" { + errChan <- fmt.Errorf("recording: received error from the recorder: %q", frame.Error) return } - errChan <- nil - }() - select { - case <-got100: - case err := <-errChan: - // If we get an error before we get the 100-continue response, - // we need to try another recorder. - if err == nil { - // If the error is nil, we got a 200 response, which - // is unexpected as we haven't sent any data yet. - err = errors.New("recording: unexpected EOF") + select { + case acks <- frame.Ack: + case <-ctx.Done(): + return } - attempt.FailureMessage = err.Error() - errs = append(errs, err) - continue // try the next recorder } - return pw, attempts, errChan, nil - } - return nil, attempts, nil, multierr.New(errs...) + }() + // Track acks from the acks channel. + go func() { + // Hack for tests: some tests modify uploadAckWindow and reset it when + // the test ends. This can race with t.Reset call below. Making a copy + // here is a lazy workaround to not wait for this goroutine to exit in + // the test cases. + uploadAckWindow := uploadAckWindow + // This timer fires if we didn't receive an ack for too long. + t := time.NewTimer(uploadAckWindow) + defer t.Stop() + for { + select { + case <-t.C: + // Close the pipe which terminates the connection and cleans up + // other goroutines. Note that tsrecorder will send us ack + // frames even if there is no new data to ack. This helps + // detect broken recorder connection if the session is idle. + pr.CloseWithError(errNoAcks) + resp.Body.Close() + return + case _, ok := <-acks: + if !ok { + // acks channel closed means that the goroutine reading them + // finished, which means that the request has ended. + return + } + // TODO(awly): limit how far behind the received acks can be. This + // should handle scenarios where a session suddenly dumps a lot of + // output. + t.Reset(uploadAckWindow) + case <-ctx.Done(): + return + } + } + }() + + return pw, errChan, nil } -// SessionRecordingClientForDialer returns an http.Client that uses a clone of -// the provided Dialer's PeerTransport to dial connections. This is used to make -// requests to the session recording server to upload session recordings. It -// uses the provided dialCtx to dial connections, and limits a single dial to 5 -// seconds. -func SessionRecordingClientForDialer(dialCtx context.Context, dial func(context.Context, string, string) (net.Conn, error)) (*http.Client, error) { - tr := http.DefaultTransport.(*http.Transport).Clone() +var errNoAcks = errors.New("did not receive ack frames from the recorder in 30s") + +type v2ResponseFrame struct { + // Ack is the number of bytes received from the client so far. The bytes + // are not guaranteed to be durably stored yet. + Ack int64 `json:"ack,omitempty"` + // Error is an error encountered while storing the recording. Error is only + // ever set as the last frame in the response. + Error string `json:"error,omitempty"` +} +// readCounter is an io.Reader that counts how many bytes were read. +type readCounter struct { + r io.Reader + sent atomic.Int64 +} + +func (u *readCounter) Read(buf []byte) (int, error) { + n, err := u.r.Read(buf) + u.sent.Add(int64(n)) + return n, err +} + +// clientHTTP1 returns a claassic http.Client with a per-dial context. It uses +// dialCtx and adds a 5s timeout to it. +func clientHTTP1(dialCtx context.Context, dial DialFunc) *http.Client { + tr := http.DefaultTransport.(*http.Transport).Clone() tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - perAttemptCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { select { @@ -132,7 +307,32 @@ func SessionRecordingClientForDialer(dialCtx context.Context, dial func(context. }() return dial(perAttemptCtx, network, addr) } + return &http.Client{Transport: tr} +} + +// clientHTTP2 is like clientHTTP1 but returns an http.Client suitable for h2c +// requests (HTTP/2 over plaintext). Unfortunately the same client does not +// work for HTTP/1 so we need to split these up. +func clientHTTP2(dialCtx context.Context, dial DialFunc) *http.Client { return &http.Client{ - Transport: tr, - }, nil + Transport: &http2.Transport{ + // Allow "http://" scheme in URLs. + AllowHTTP: true, + // Pretend like we're using TLS, but actually use the provided + // DialFunc underneath. This is necessary to convince the transport + // to actually dial. + DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) + defer cancel() + go func() { + select { + case <-perAttemptCtx.Done(): + case <-dialCtx.Done(): + cancel() + } + }() + return dial(perAttemptCtx, network, addr) + }, + }, + } } diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go new file mode 100644 index 000000000..c0fcf6d40 --- /dev/null +++ b/sessionrecording/connect_test.go @@ -0,0 +1,189 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package sessionrecording + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "encoding/json" + "io" + "net" + "net/http" + "net/http/httptest" + "net/netip" + "testing" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" +) + +func TestConnectToRecorder(t *testing.T) { + tests := []struct { + desc string + http2 bool + // setup returns a recorder server mux, and a channel which sends the + // hash of the recording uploaded to it. The channel is expected to + // fire only once. + setup func(t *testing.T) (*http.ServeMux, <-chan []byte) + wantErr bool + }{ + { + desc: "v1 recorder", + setup: func(t *testing.T) (*http.ServeMux, <-chan []byte) { + uploadHash := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("POST /record", func(w http.ResponseWriter, r *http.Request) { + hash := sha256.New() + if _, err := io.Copy(hash, r.Body); err != nil { + t.Error(err) + } + uploadHash <- hash.Sum(nil) + }) + return mux, uploadHash + }, + }, + { + desc: "v2 recorder", + http2: true, + setup: func(t *testing.T) (*http.ServeMux, <-chan []byte) { + uploadHash := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("POST /record", func(w http.ResponseWriter, r *http.Request) { + t.Error("received request to v1 endpoint") + http.Error(w, "not found", http.StatusNotFound) + }) + mux.HandleFunc("POST /v2/record", func(w http.ResponseWriter, r *http.Request) { + // Force the status to send to unblock the client waiting + // for it. + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + + body := &readCounter{r: r.Body} + hash := sha256.New() + ctx, cancel := context.WithCancel(r.Context()) + go func() { + defer cancel() + if _, err := io.Copy(hash, body); err != nil { + t.Error(err) + } + }() + + // Send acks for received bytes. + tick := time.NewTicker(time.Millisecond) + defer tick.Stop() + enc := json.NewEncoder(w) + outer: + for { + select { + case <-ctx.Done(): + break outer + case <-tick.C: + if err := enc.Encode(v2ResponseFrame{Ack: body.sent.Load()}); err != nil { + t.Errorf("writing ack frame: %v", err) + break outer + } + } + } + + uploadHash <- hash.Sum(nil) + }) + // Probing HEAD endpoint which always returns 200 OK. + mux.HandleFunc("HEAD /v2/record", func(http.ResponseWriter, *http.Request) {}) + return mux, uploadHash + }, + }, + { + desc: "v2 recorder no acks", + http2: true, + wantErr: true, + setup: func(t *testing.T) (*http.ServeMux, <-chan []byte) { + // Make the client no-ack timeout quick for the test. + oldAckWindow := uploadAckWindow + uploadAckWindow = 100 * time.Millisecond + t.Cleanup(func() { uploadAckWindow = oldAckWindow }) + + uploadHash := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("POST /record", func(w http.ResponseWriter, r *http.Request) { + t.Error("received request to v1 endpoint") + http.Error(w, "not found", http.StatusNotFound) + }) + mux.HandleFunc("POST /v2/record", func(w http.ResponseWriter, r *http.Request) { + // Force the status to send to unblock the client waiting + // for it. + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + + // Consume the whole request body but don't send any acks + // back. + hash := sha256.New() + if _, err := io.Copy(hash, r.Body); err != nil { + t.Error(err) + } + // Goes in the channel buffer, non-blocking. + uploadHash <- hash.Sum(nil) + + // Block until the parent test case ends to prevent the + // request termination. We want to exercise the ack + // tracking logic specifically. + ctx, cancel := context.WithCancel(r.Context()) + t.Cleanup(cancel) + <-ctx.Done() + }) + mux.HandleFunc("HEAD /v2/record", func(http.ResponseWriter, *http.Request) {}) + return mux, uploadHash + }, + }, + } + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + mux, uploadHash := tt.setup(t) + + srv := httptest.NewUnstartedServer(mux) + if tt.http2 { + // Wire up h2c-compatible HTTP/2 server. This is optional + // because the v1 recorder didn't support HTTP/2 and we try to + // mimic that. + h2s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, h2s) + if err := http2.ConfigureServer(srv.Config, h2s); err != nil { + t.Errorf("configuring HTTP/2 support in server: %v", err) + } + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + + ctx := context.Background() + w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(srv.Listener.Addr().String())}, d.DialContext) + if err != nil { + t.Fatalf("ConnectToRecorder: %v", err) + } + + // Send some random data and hash it to compare with the recorded + // data hash. + hash := sha256.New() + const numBytes = 1 << 20 // 1MB + if _, err := io.CopyN(io.MultiWriter(w, hash), rand.Reader, numBytes); err != nil { + t.Fatalf("writing recording data: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("closing recording stream: %v", err) + } + if err := <-errc; err != nil && !tt.wantErr { + t.Fatalf("error from the channel: %v", err) + } else if err == nil && tt.wantErr { + t.Fatalf("did not receive expected error from the channel") + } + + if recv, sent := <-uploadHash, hash.Sum(nil); !bytes.Equal(recv, sent) { + t.Errorf("mismatch in recording data hash, sent %x, received %x", sent, recv) + } + }) + } +} diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 9ade1847e..7cb99c381 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -1170,7 +1170,7 @@ func (ss *sshSession) run() { if err != nil && !errors.Is(err, io.EOF) { isErrBecauseProcessExited := processDone.Load() && errors.Is(err, syscall.EIO) if !isErrBecauseProcessExited { - logf("stdout copy: %v, %T", err) + logf("stdout copy: %v", err) ss.cancelCtx(err) } } @@ -1520,9 +1520,14 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) { go func() { err := <-errChan if err == nil { - // Success. - ss.logf("recording: finished uploading recording") - return + select { + case <-ss.ctx.Done(): + // Success. + ss.logf("recording: finished uploading recording") + return + default: + err = errors.New("recording upload ended before the SSH session") + } } if onFailure != nil && onFailure.NotifyURL != "" && len(attempts) > 0 { lastAttempt := attempts[len(attempts)-1] diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 7ce0aeea3..ad9cb1e57 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -33,6 +33,8 @@ import ( "time" gossh "github.com/tailscale/golang-x-crypto/ssh" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -481,10 +483,9 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { } var handler http.HandlerFunc - recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) { handler(w, r) - })) - defer recordingServer.Close() + }) s := &server{ logf: t.Logf, @@ -533,9 +534,10 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { { name: "upload-fails-after-starting", handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() r.Body.Read(make([]byte, 1)) time.Sleep(100 * time.Millisecond) - w.WriteHeader(http.StatusInternalServerError) }, sshCommand: "echo hello && sleep 1 && echo world", wantClientOutput: "\r\n\r\nsession terminated\r\n\r\n", @@ -548,6 +550,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + s.logf = t.Logf tstest.Replace(t, &handler, tt.handler) sc, dc := memnet.NewTCPConn(src, dst, 1024) var wg sync.WaitGroup @@ -597,12 +600,12 @@ func TestMultipleRecorders(t *testing.T) { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } done := make(chan struct{}) - recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) { defer close(done) - io.ReadAll(r.Body) w.WriteHeader(http.StatusOK) - })) - defer recordingServer.Close() + w.(http.Flusher).Flush() + io.ReadAll(r.Body) + }) badRecorder, err := net.Listen("tcp", ":0") if err != nil { t.Fatal(err) @@ -610,15 +613,9 @@ func TestMultipleRecorders(t *testing.T) { badRecorderAddr := badRecorder.Addr().String() badRecorder.Close() - badRecordingServer500 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(500) - })) - defer badRecordingServer500.Close() - - badRecordingServer200 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - })) - defer badRecordingServer200.Close() + badRecordingServer500 := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + }) s := &server{ logf: t.Logf, @@ -630,7 +627,6 @@ func TestMultipleRecorders(t *testing.T) { Recorders: []netip.AddrPort{ netip.MustParseAddrPort(badRecorderAddr), netip.MustParseAddrPort(badRecordingServer500.Listener.Addr().String()), - netip.MustParseAddrPort(badRecordingServer200.Listener.Addr().String()), netip.MustParseAddrPort(recordingServer.Listener.Addr().String()), }, OnRecordingFailure: &tailcfg.SSHRecorderFailureAction{ @@ -701,19 +697,21 @@ func TestSSHRecordingNonInteractive(t *testing.T) { } var recording []byte ctx, cancel := context.WithTimeout(context.Background(), time.Second) - recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) { defer cancel() + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + var err error recording, err = io.ReadAll(r.Body) if err != nil { t.Error(err) return } - })) - defer recordingServer.Close() + }) s := &server{ - logf: logger.Discard, + logf: t.Logf, lb: &localState{ sshEnabled: true, matchingRule: newSSHRule( @@ -1299,3 +1297,22 @@ func TestStdOsUserUserAssumptions(t *testing.T) { t.Errorf("os/user.User has %v fields; this package assumes %v", got, want) } } + +func mockRecordingServer(t *testing.T, handleRecord http.HandlerFunc) *httptest.Server { + t.Helper() + mux := http.NewServeMux() + mux.HandleFunc("POST /record", func(http.ResponseWriter, *http.Request) { + t.Errorf("v1 recording endpoint called") + }) + mux.HandleFunc("HEAD /v2/record", func(http.ResponseWriter, *http.Request) {}) + mux.HandleFunc("POST /v2/record", handleRecord) + + h2s := &http2.Server{} + srv := httptest.NewUnstartedServer(h2c.NewHandler(mux, h2s)) + if err := http2.ConfigureServer(srv.Config, h2s); err != nil { + t.Errorf("configuring HTTP/2 support in recording server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + return srv +} From 93db50356536e89b70e5ca7650ab2abd36444fd2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 15 Nov 2024 13:31:35 -0800 Subject: [PATCH 0150/1708] ipn/ipnlocal: add IPN Bus NotifyRateLimit watch bit NotifyRateLimit Limit spamming GUIs with boring updates to once in 3 seconds, unless the notification is relatively interesting and the GUI should update immediately. This is basically @barnstar's #14119 but with the logic moved to be per-watch-session (since the bit is per session), rather than globally. And this distinguishes notable Notify messages (such as state changes) and makes them send immediately. Updates tailscale/corp#24553 Change-Id: I79cac52cce85280ce351e65e76ea11e107b00b49 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 5 + ipn/backend.go | 2 + ipn/ipnlocal/bus.go | 161 +++++++++++++++++++++++++++ ipn/ipnlocal/bus_test.go | 220 +++++++++++++++++++++++++++++++++++++ ipn/ipnlocal/local.go | 17 ++- 5 files changed, 395 insertions(+), 10 deletions(-) create mode 100644 ipn/ipnlocal/bus.go create mode 100644 ipn/ipnlocal/bus_test.go diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index fdde9ef09..7f235e85c 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -213,6 +213,7 @@ var debugCmd = &ffcli.Command{ fs := newFlagSet("watch-ipn") fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") + fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") return fs @@ -500,6 +501,7 @@ var watchIPNArgs struct { netmap bool initial bool showPrivateKey bool + rateLimit bool count int } @@ -511,6 +513,9 @@ func runWatchIPN(ctx context.Context, args []string) error { if !watchIPNArgs.showPrivateKey { mask |= ipn.NotifyNoPrivateKeys } + if watchIPNArgs.rateLimit { + mask |= ipn.NotifyRateLimit + } watcher, err := localClient.WatchIPNBus(ctx, mask) if err != nil { return err diff --git a/ipn/backend.go b/ipn/backend.go index 5779727fe..91a35df0d 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -73,6 +73,8 @@ const ( NotifyInitialOutgoingFiles // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles NotifyInitialHealthState // if set, the first Notify message (sent immediately) will contain the current health.State of the client + + NotifyRateLimit // if set, rate limit spammy netmap updates to every few seconds ) // Notify is a communication from a backend (e.g. tailscaled) to a frontend diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go new file mode 100644 index 000000000..65cc2573a --- /dev/null +++ b/ipn/ipnlocal/bus.go @@ -0,0 +1,161 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "context" + "time" + + "tailscale.com/ipn" + "tailscale.com/tstime" +) + +type rateLimitingBusSender struct { + fn func(*ipn.Notify) (keepGoing bool) + lastFlush time.Time // last call to fn, or zero value if none + interval time.Duration // 0 to flush immediately; non-zero to rate limit sends + clock tstime.DefaultClock // non-nil for testing + didSendTestHook func() // non-nil for testing + + // pending, if non-nil, is the pending notification that we + // haven't sent yet. We own this memory to mutate. + pending *ipn.Notify + + // flushTimer is non-nil if the timer is armed. + flushTimer tstime.TimerController // effectively a *time.Timer + flushTimerC <-chan time.Time // ... said ~Timer's C chan +} + +func (s *rateLimitingBusSender) close() { + if s.flushTimer != nil { + s.flushTimer.Stop() + } +} + +func (s *rateLimitingBusSender) flushChan() <-chan time.Time { + return s.flushTimerC +} + +func (s *rateLimitingBusSender) flush() (keepGoing bool) { + if n := s.pending; n != nil { + s.pending = nil + return s.flushNotify(n) + } + return true +} + +func (s *rateLimitingBusSender) flushNotify(n *ipn.Notify) (keepGoing bool) { + s.lastFlush = s.clock.Now() + return s.fn(n) +} + +// send conditionally sends n to the underlying fn, possibly rate +// limiting it, depending on whether s.interval is set, and whether +// n is a notable notification that the client (typically a GUI) would +// want to act on (render) immediately. +// +// It returns whether the caller should keep looping. +// +// The passed-in memory 'n' is owned by the caller and should +// not be mutated. +func (s *rateLimitingBusSender) send(n *ipn.Notify) (keepGoing bool) { + if s.interval <= 0 { + // No rate limiting case. + return s.fn(n) + } + if isNotableNotify(n) { + // Notable notifications are always sent immediately. + // But first send any boring one that was pending. + // TODO(bradfitz): there might be a boring one pending + // with a NetMap or Engine field that is redundant + // with the new one (n) with NetMap or Engine populated. + // We should clear the pending one's NetMap/Engine in + // that case. Or really, merge the two, but mergeBoringNotifies + // only handles the case of both sides being boring. + // So for now, flush both. + if !s.flush() { + return false + } + return s.flushNotify(n) + } + s.pending = mergeBoringNotifies(s.pending, n) + d := s.clock.Now().Sub(s.lastFlush) + if d > s.interval { + return s.flush() + } + nextFlushIn := s.interval - d + if s.flushTimer == nil { + s.flushTimer, s.flushTimerC = s.clock.NewTimer(nextFlushIn) + } else { + s.flushTimer.Reset(nextFlushIn) + } + return true +} + +func (s *rateLimitingBusSender) Run(ctx context.Context, ch <-chan *ipn.Notify) { + for { + select { + case <-ctx.Done(): + return + case n, ok := <-ch: + if !ok { + return + } + if !s.send(n) { + return + } + if f := s.didSendTestHook; f != nil { + f() + } + case <-s.flushChan(): + if !s.flush() { + return + } + } + } +} + +// mergeBoringNotify merges new notify 'src' into possibly-nil 'dst', +// either mutating 'dst' or allocating a new one if 'dst' is nil, +// returning the merged result. +// +// dst and src must both be "boring" (i.e. not notable per isNotifiableNotify). +func mergeBoringNotifies(dst, src *ipn.Notify) *ipn.Notify { + if dst == nil { + dst = &ipn.Notify{Version: src.Version} + } + if src.NetMap != nil { + dst.NetMap = src.NetMap + } + if src.Engine != nil { + dst.Engine = src.Engine + } + return dst +} + +// isNotableNotify reports whether n is a "notable" notification that +// should be sent on the IPN bus immediately (e.g. to GUIs) without +// rate limiting it for a few seconds. +// +// It effectively reports whether n contains any field set that's +// not NetMap or Engine. +func isNotableNotify(n *ipn.Notify) bool { + if n == nil { + return false + } + return n.State != nil || + n.SessionID != "" || + n.BackendLogID != nil || + n.BrowseToURL != nil || + n.LocalTCPPort != nil || + n.ClientVersion != nil || + n.Prefs != nil || + n.ErrMessage != nil || + n.LoginFinished != nil || + !n.DriveShares.IsNil() || + n.Health != nil || + len(n.IncomingFiles) > 0 || + len(n.OutgoingFiles) > 0 || + n.FilesWaiting != nil +} diff --git a/ipn/ipnlocal/bus_test.go b/ipn/ipnlocal/bus_test.go new file mode 100644 index 000000000..5c75ac54d --- /dev/null +++ b/ipn/ipnlocal/bus_test.go @@ -0,0 +1,220 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "context" + "reflect" + "slices" + "testing" + "time" + + "tailscale.com/drive" + "tailscale.com/ipn" + "tailscale.com/tstest" + "tailscale.com/tstime" + "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/types/views" +) + +func TestIsNotableNotify(t *testing.T) { + tests := []struct { + name string + notify *ipn.Notify + want bool + }{ + {"nil", nil, false}, + {"empty", &ipn.Notify{}, false}, + {"version", &ipn.Notify{Version: "foo"}, false}, + {"netmap", &ipn.Notify{NetMap: new(netmap.NetworkMap)}, false}, + {"engine", &ipn.Notify{Engine: new(ipn.EngineStatus)}, false}, + } + + // Then for all other fields, assume they're notable. + // We use reflect to catch fields that might be added in the future without + // remembering to update the [isNotableNotify] function. + rt := reflect.TypeFor[ipn.Notify]() + for i := range rt.NumField() { + n := &ipn.Notify{} + sf := rt.Field(i) + switch sf.Name { + case "_", "NetMap", "Engine", "Version": + // Already covered above or not applicable. + continue + case "DriveShares": + n.DriveShares = views.SliceOfViews[*drive.Share, drive.ShareView](make([]*drive.Share, 1)) + default: + rf := reflect.ValueOf(n).Elem().Field(i) + switch rf.Kind() { + case reflect.Pointer: + rf.Set(reflect.New(rf.Type().Elem())) + case reflect.String: + rf.SetString("foo") + case reflect.Slice: + rf.Set(reflect.MakeSlice(rf.Type(), 1, 1)) + default: + t.Errorf("unhandled field kind %v for %q", rf.Kind(), sf.Name) + } + } + + tests = append(tests, struct { + name string + notify *ipn.Notify + want bool + }{ + name: "field-" + rt.Field(i).Name, + notify: n, + want: true, + }) + } + + for _, tt := range tests { + if got := isNotableNotify(tt.notify); got != tt.want { + t.Errorf("%v: got %v; want %v", tt.name, got, tt.want) + } + } +} + +type rateLimitingBusSenderTester struct { + tb testing.TB + got []*ipn.Notify + clock *tstest.Clock + s *rateLimitingBusSender +} + +func (st *rateLimitingBusSenderTester) init() { + if st.s != nil { + return + } + st.clock = tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(1731777537, 0), // time I wrote this test :) + }) + st.s = &rateLimitingBusSender{ + clock: tstime.DefaultClock{Clock: st.clock}, + fn: func(n *ipn.Notify) bool { + st.got = append(st.got, n) + return true + }, + } +} + +func (st *rateLimitingBusSenderTester) send(n *ipn.Notify) { + st.tb.Helper() + st.init() + if !st.s.send(n) { + st.tb.Fatal("unexpected send failed") + } +} + +func (st *rateLimitingBusSenderTester) advance(d time.Duration) { + st.tb.Helper() + st.clock.Advance(d) + select { + case <-st.s.flushChan(): + if !st.s.flush() { + st.tb.Fatal("unexpected flush failed") + } + default: + } +} + +func TestRateLimitingBusSender(t *testing.T) { + nm1 := &ipn.Notify{NetMap: new(netmap.NetworkMap)} + nm2 := &ipn.Notify{NetMap: new(netmap.NetworkMap)} + eng1 := &ipn.Notify{Engine: new(ipn.EngineStatus)} + eng2 := &ipn.Notify{Engine: new(ipn.EngineStatus)} + + t.Run("unbuffered", func(t *testing.T) { + st := &rateLimitingBusSenderTester{tb: t} + st.send(nm1) + st.send(nm2) + st.send(eng1) + st.send(eng2) + if !slices.Equal(st.got, []*ipn.Notify{nm1, nm2, eng1, eng2}) { + t.Errorf("got %d items; want 4 specific ones, unmodified", len(st.got)) + } + }) + + t.Run("buffered", func(t *testing.T) { + st := &rateLimitingBusSenderTester{tb: t} + st.init() + st.s.interval = 1 * time.Second + st.send(&ipn.Notify{Version: "initial"}) + if len(st.got) != 1 { + t.Fatalf("got %d items; expected 1 (first to flush immediately)", len(st.got)) + } + st.send(nm1) + st.send(nm2) + st.send(eng1) + st.send(eng2) + if len(st.got) != 1 { + if len(st.got) != 1 { + t.Fatalf("got %d items; expected still just that first 1", len(st.got)) + } + } + + // But moving the clock should flush the rest, collasced into one new one. + st.advance(5 * time.Second) + if len(st.got) != 2 { + t.Fatalf("got %d items; want 2", len(st.got)) + } + gotn := st.got[1] + if gotn.NetMap != nm2.NetMap { + t.Errorf("got wrong NetMap; got %p", gotn.NetMap) + } + if gotn.Engine != eng2.Engine { + t.Errorf("got wrong Engine; got %p", gotn.Engine) + } + if t.Failed() { + t.Logf("failed Notify was: %v", logger.AsJSON(gotn)) + } + }) + + // Test the Run method + t.Run("run", func(t *testing.T) { + st := &rateLimitingBusSenderTester{tb: t} + st.init() + st.s.interval = 1 * time.Second + st.s.lastFlush = st.clock.Now() // pretend we just flushed + + flushc := make(chan *ipn.Notify, 1) + st.s.fn = func(n *ipn.Notify) bool { + flushc <- n + return true + } + didSend := make(chan bool, 2) + st.s.didSendTestHook = func() { didSend <- true } + waitSend := func() { + select { + case <-didSend: + case <-time.After(5 * time.Second): + t.Error("timeout waiting for call to send") + } + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + incoming := make(chan *ipn.Notify, 2) + go func() { + incoming <- nm1 + waitSend() + incoming <- nm2 + waitSend() + st.advance(5 * time.Second) + select { + case n := <-flushc: + if n.NetMap != nm2.NetMap { + t.Errorf("got wrong NetMap; got %p", n.NetMap) + } + case <-time.After(10 * time.Second): + t.Error("timeout") + } + cancel() + }() + + st.s.Run(ctx, incoming) + }) +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 33025ed40..cbbea32aa 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2780,20 +2780,17 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A go b.pollRequestEngineStatus(ctx) } - // TODO(marwan-at-work): check err // TODO(marwan-at-work): streaming background logs? defer b.DeleteForegroundSession(sessionID) - for { - select { - case <-ctx.Done(): - return - case n := <-ch: - if !fn(n) { - return - } - } + sender := &rateLimitingBusSender{fn: fn} + defer sender.close() + + if mask&ipn.NotifyRateLimit != 0 { + sender.interval = 3 * time.Second } + + sender.Run(ctx, ch) } // pollRequestEngineStatus calls b.e.RequestStatus every 2 seconds until ctx From da70a84a4babe00c2f07cb063e18098b795d6249 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 18 Nov 2024 12:04:12 -0800 Subject: [PATCH 0151/1708] ipn/ipnlocal: fix build, remove another Notify.BackendLogID reference that crept in I merged 5cae7c51bfa (removing Notify.BackendLogID) and 93db50356536e (adding another reference to Notify.BackendLogID) that didn't have merge conflicts, but didn't compile together. This removes the new reference, fixing the build. Updates #14129 Change-Id: I9bb68efd977342ea8822e525d656817235039a66 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/bus.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go index 65cc2573a..111a877d8 100644 --- a/ipn/ipnlocal/bus.go +++ b/ipn/ipnlocal/bus.go @@ -146,7 +146,6 @@ func isNotableNotify(n *ipn.Notify) bool { } return n.State != nil || n.SessionID != "" || - n.BackendLogID != nil || n.BrowseToURL != nil || n.LocalTCPPort != nil || n.ClientVersion != nil || From 00517c8189569171560c073cd983164ff7735e69 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 19 Nov 2024 13:07:19 +0000 Subject: [PATCH 0152/1708] kube/{kubeapi,kubeclient},ipn/store/kubestore,cmd/{containerboot,k8s-operator}: emit kube store Events (#14112) Adds functionality to kube client to emit Events. Updates kube store to emit Events when tailscaled state has been loaded, updated or if any errors where encountered during those operations. This should help in cases where an error related to state loading/updating caused the Pod to crash in a loop- unlike logs of the originally failed container instance, Events associated with the Pod will still be accessible even after N restarts. Updates tailscale/tailscale#14080 Signed-off-by: Irbe Krumina --- cmd/containerboot/kube.go | 4 +- cmd/containerboot/services.go | 2 +- .../deploy/chart/templates/proxy-rbac.yaml | 3 + .../deploy/manifests/operator.yaml | 8 + cmd/k8s-operator/deploy/manifests/proxy.yaml | 8 + .../deploy/manifests/userspace-proxy.yaml | 8 + cmd/k8s-operator/proxygroup_specs.go | 24 +- cmd/k8s-operator/testutils_test.go | 4 + ipn/store/kubestore/store_kube.go | 44 ++- kube/kubeapi/api.go | 57 +++- kube/kubeclient/client.go | 289 +++++++++++++----- kube/kubeclient/client_test.go | 151 +++++++++ kube/kubeclient/fake_client.go | 6 +- 13 files changed, 506 insertions(+), 102 deletions(-) create mode 100644 kube/kubeclient/client_test.go diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 908cc01ef..5a726c20b 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -61,7 +61,7 @@ func deleteAuthKey(ctx context.Context, secretName string) error { Path: "/data/authkey", }, } - if err := kc.JSONPatchSecret(ctx, secretName, m); err != nil { + if err := kc.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil { if s, ok := err.(*kubeapi.Status); ok && s.Code == http.StatusUnprocessableEntity { // This is kubernetes-ese for "the field you asked to // delete already doesn't exist", aka no-op. @@ -81,7 +81,7 @@ func initKubeClient(root string) { kubeclient.SetRootPathForTesting(root) } var err error - kc, err = kubeclient.New() + kc, err = kubeclient.New("tailscale-container") if err != nil { log.Fatalf("Error creating kube client: %v", err) } diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go index 4da7286b7..aed00250d 100644 --- a/cmd/containerboot/services.go +++ b/cmd/containerboot/services.go @@ -389,7 +389,7 @@ func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Sta Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices), Value: bs, } - if err := ep.kc.JSONPatchSecret(ctx, ep.stateSecret, []kubeclient.JSONPatch{patch}); err != nil { + if err := ep.kc.JSONPatchResource(ctx, ep.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil { return fmt.Errorf("error patching state Secret: %w", err) } ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() diff --git a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml index 1c15c9119..fa552a7c7 100644 --- a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml @@ -16,6 +16,9 @@ rules: - apiGroups: [""] resources: ["secrets"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch", "get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 9d8e9faf6..c6d7deef5 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4703,6 +4703,14 @@ rules: - patch - update - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/cmd/k8s-operator/deploy/manifests/proxy.yaml b/cmd/k8s-operator/deploy/manifests/proxy.yaml index a79d48d73..1ad63c265 100644 --- a/cmd/k8s-operator/deploy/manifests/proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/proxy.yaml @@ -30,6 +30,14 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid securityContext: capabilities: add: diff --git a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml index 46b49a57b..6617f6d4b 100644 --- a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml @@ -24,3 +24,11 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 27fd9ef71..b47cb39b1 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -126,15 +126,6 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa }, }, }, - { - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - // Secret is named after the pod. - FieldPath: "metadata.name", - }, - }, - }, { Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)", @@ -147,10 +138,6 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)", }, - { - Name: "TS_USERSPACE", - Value: "false", - }, { Name: "TS_INTERNAL_APP", Value: kubetypes.AppProxyGroupEgress, @@ -171,7 +158,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa }) } - return envs + return append(c.Env, envs...) }() return ss, nil @@ -215,6 +202,15 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { return secrets }(), }, + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{ + "create", + "patch", + "get", + }, + }, }, } } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index d42f1b7af..084f573e5 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -70,6 +70,8 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Env: []corev1.EnvVar{ {Name: "TS_USERSPACE", Value: "false"}, {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: opts.secretName}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, }, @@ -228,6 +230,8 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps Env: []corev1.EnvVar{ {Name: "TS_USERSPACE", Value: "true"}, {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: opts.secretName}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 2dcc08b6e..462e6d434 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -7,6 +7,7 @@ package kubestore import ( "context" "fmt" + "log" "net" "os" "strings" @@ -19,8 +20,18 @@ import ( "tailscale.com/types/logger" ) -// TODO(irbekrm): should we bump this? should we have retries? See tailscale/tailscale#13024 -const timeout = 5 * time.Second +const ( + // timeout is the timeout for a single state update that includes calls to the API server to write or read a + // state Secret and emit an Event. + timeout = 30 * time.Second + + reasonTailscaleStateUpdated = "TailscaledStateUpdated" + reasonTailscaleStateLoaded = "TailscaleStateLoaded" + reasonTailscaleStateUpdateFailed = "TailscaleStateUpdateFailed" + reasonTailscaleStateLoadFailed = "TailscaleStateLoadFailed" + eventTypeWarning = "Warning" + eventTypeNormal = "Normal" +) // Store is an ipn.StateStore that uses a Kubernetes Secret for persistence. type Store struct { @@ -35,7 +46,7 @@ type Store struct { // New returns a new Store that persists to the named Secret. func New(_ logger.Logf, secretName string) (*Store, error) { - c, err := kubeclient.New() + c, err := kubeclient.New("tailscale-state-store") if err != nil { return nil, err } @@ -72,13 +83,22 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { // WriteState implements the StateStore interface. func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer func() { if err == nil { s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) } + if err != nil { + if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { + log.Printf("kubestore: error creating tailscaled state update Event: %v", err) + } + } else { + if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateUpdated, "Successfully updated tailscaled state Secret"); err != nil { + log.Printf("kubestore: error creating tailscaled state Event: %v", err) + } + } + cancel() }() - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() secret, err := s.client.GetSecret(ctx, s.secretName) if err != nil { @@ -107,7 +127,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { Value: map[string][]byte{sanitizeKey(id): bs}, }, } - if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil { + if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { return fmt.Errorf("error patching Secret %s with a /data field: %v", s.secretName, err) } return nil @@ -119,8 +139,8 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { Value: bs, }, } - if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil { - return fmt.Errorf("error patching Secret %s with /data/%s field", s.secretName, sanitizeKey(id)) + if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { + return fmt.Errorf("error patching Secret %s with /data/%s field: %v", s.secretName, sanitizeKey(id), err) } return nil } @@ -131,7 +151,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { return err } -func (s *Store) loadState() error { +func (s *Store) loadState() (err error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -140,8 +160,14 @@ func (s *Store) loadState() error { if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { return ipn.ErrStateNotExist } + if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateLoadFailed, err.Error()); err != nil { + log.Printf("kubestore: error creating Event: %v", err) + } return err } + if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateLoaded, "Successfully loaded tailscaled state from Secret"); err != nil { + log.Printf("kubestore: error creating Event: %v", err) + } s.memory.LoadFromMap(secret.Data) return nil } diff --git a/kube/kubeapi/api.go b/kube/kubeapi/api.go index 0e42437a6..a2ae8cc79 100644 --- a/kube/kubeapi/api.go +++ b/kube/kubeapi/api.go @@ -7,7 +7,9 @@ // dependency size for those consumers when adding anything new here. package kubeapi -import "time" +import ( + "time" +) // Note: The API types are copied from k8s.io/api{,machinery} to not introduce a // module dependency on the Kubernetes API as it pulls in many more dependencies. @@ -151,6 +153,57 @@ type Secret struct { Data map[string][]byte `json:"data,omitempty"` } +// Event contains a subset of fields from corev1.Event. +// https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L7034 +// It is copied here to avoid having to import kube libraries. +type Event struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata"` + Message string `json:"message,omitempty"` + Reason string `json:"reason,omitempty"` + Source EventSource `json:"source,omitempty"` // who is emitting this Event + Type string `json:"type,omitempty"` // Normal or Warning + // InvolvedObject is the subject of the Event. `kubectl describe` will, for most object types, display any + // currently present cluster Events matching the object (but you probably want to set UID for this to work). + InvolvedObject ObjectReference `json:"involvedObject"` + Count int32 `json:"count,omitempty"` // how many times Event was observed + FirstTimestamp time.Time `json:"firstTimestamp,omitempty"` + LastTimestamp time.Time `json:"lastTimestamp,omitempty"` +} + +// EventSource includes a subset of fields from corev1.EventSource. +// https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L7007 +// It is copied here to avoid having to import kube libraries. +type EventSource struct { + // Component is the name of the component that is emitting the Event. + Component string `json:"component,omitempty"` +} + +// ObjectReference contains a subset of fields from corev1.ObjectReference. +// https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L6902 +// It is copied here to avoid having to import kube libraries. +type ObjectReference struct { + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + Namespace string `json:"namespace,omitempty"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty"` + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + UID string `json:"uid,omitempty"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` +} + // Status is a return value for calls that don't return other objects. type Status struct { TypeMeta `json:",inline"` @@ -186,6 +239,6 @@ type Status struct { Code int `json:"code,omitempty"` } -func (s *Status) Error() string { +func (s Status) Error() string { return s.Message } diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index e8ddec75d..d4309448d 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -23,16 +23,21 @@ import ( "net/url" "os" "path/filepath" + "strings" "sync" "time" "tailscale.com/kube/kubeapi" + "tailscale.com/tstime" "tailscale.com/util/multierr" ) const ( saPath = "/var/run/secrets/kubernetes.io/serviceaccount" defaultURL = "https://kubernetes.default.svc" + + TypeSecrets = "secrets" + typeEvents = "events" ) // rootPathForTests is set by tests to override the root path to the @@ -57,8 +62,13 @@ type Client interface { GetSecret(context.Context, string) (*kubeapi.Secret, error) UpdateSecret(context.Context, *kubeapi.Secret) error CreateSecret(context.Context, *kubeapi.Secret) error + // Event attempts to ensure an event with the specified options associated with the Pod in which we are + // currently running. This is best effort - if the client is not able to create events, this operation will be a + // no-op. If there is already an Event with the given reason for the current Pod, it will get updated (only + // count and timestamp are expected to change), else a new event will be created. + Event(_ context.Context, typ, reason, msg string) error StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error - JSONPatchSecret(context.Context, string, []JSONPatch) error + JSONPatchResource(_ context.Context, resourceName string, resourceType string, patches []JSONPatch) error CheckSecretPermissions(context.Context, string) (bool, bool, error) SetDialer(dialer func(context.Context, string, string) (net.Conn, error)) SetURL(string) @@ -66,15 +76,24 @@ type Client interface { type client struct { mu sync.Mutex + name string url string - ns string + podName string + podUID string + ns string // Pod namespace client *http.Client token string tokenExpiry time.Time + cl tstime.Clock + // hasEventsPerms is true if client can emit Events for the Pod in which it runs. If it is set to false any + // calls to Events() will be a no-op. + hasEventsPerms bool + // kubeAPIRequest sends a request to the kube API server. It can set to a fake in tests. + kubeAPIRequest kubeAPIRequestFunc } // New returns a new client -func New() (Client, error) { +func New(name string) (Client, error) { ns, err := readFile("namespace") if err != nil { return nil, err @@ -87,9 +106,11 @@ func New() (Client, error) { if ok := cp.AppendCertsFromPEM(caCert); !ok { return nil, fmt.Errorf("kube: error in creating root cert pool") } - return &client{ - url: defaultURL, - ns: string(ns), + c := &client{ + url: defaultURL, + ns: string(ns), + name: name, + cl: tstime.DefaultClock{}, client: &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{ @@ -97,7 +118,10 @@ func New() (Client, error) { }, }, }, - }, nil + } + c.kubeAPIRequest = newKubeAPIRequest(c) + c.setEventPerms() + return c, nil } // SetURL sets the URL to use for the Kubernetes API. @@ -115,14 +139,14 @@ func (c *client) SetDialer(dialer func(ctx context.Context, network, addr string func (c *client) expireToken() { c.mu.Lock() defer c.mu.Unlock() - c.tokenExpiry = time.Now() + c.tokenExpiry = c.cl.Now() } func (c *client) getOrRenewToken() (string, error) { c.mu.Lock() defer c.mu.Unlock() tk, te := c.token, c.tokenExpiry - if time.Now().Before(te) { + if c.cl.Now().Before(te) { return tk, nil } @@ -131,17 +155,10 @@ func (c *client) getOrRenewToken() (string, error) { return "", err } c.token = string(tkb) - c.tokenExpiry = time.Now().Add(30 * time.Minute) + c.tokenExpiry = c.cl.Now().Add(30 * time.Minute) return c.token, nil } -func (c *client) secretURL(name string) string { - if name == "" { - return fmt.Sprintf("%s/api/v1/namespaces/%s/secrets", c.url, c.ns) - } - return fmt.Sprintf("%s/api/v1/namespaces/%s/secrets/%s", c.url, c.ns, name) -} - func getError(resp *http.Response) error { if resp.StatusCode == 200 || resp.StatusCode == 201 { // These are the only success codes returned by the Kubernetes API. @@ -161,36 +178,41 @@ func setHeader(key, value string) func(*http.Request) { } } -// doRequest performs an HTTP request to the Kubernetes API. -// If in is not nil, it is expected to be a JSON-encodable object and will be -// sent as the request body. -// If out is not nil, it is expected to be a pointer to an object that can be -// decoded from JSON. -// If the request fails with a 401, the token is expired and a new one is -// requested. -func (c *client) doRequest(ctx context.Context, method, url string, in, out any, opts ...func(*http.Request)) error { - req, err := c.newRequest(ctx, method, url, in) - if err != nil { - return err - } - for _, opt := range opts { - opt(req) - } - resp, err := c.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if err := getError(resp); err != nil { - if st, ok := err.(*kubeapi.Status); ok && st.Code == 401 { - c.expireToken() +type kubeAPIRequestFunc func(ctx context.Context, method, url string, in, out any, opts ...func(*http.Request)) error + +// newKubeAPIRequest returns a function that can perform an HTTP request to the Kubernetes API. +func newKubeAPIRequest(c *client) kubeAPIRequestFunc { + // If in is not nil, it is expected to be a JSON-encodable object and will be + // sent as the request body. + // If out is not nil, it is expected to be a pointer to an object that can be + // decoded from JSON. + // If the request fails with a 401, the token is expired and a new one is + // requested. + f := func(ctx context.Context, method, url string, in, out any, opts ...func(*http.Request)) error { + req, err := c.newRequest(ctx, method, url, in) + if err != nil { + return err } - return err - } - if out != nil { - return json.NewDecoder(resp.Body).Decode(out) + for _, opt := range opts { + opt(req) + } + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if err := getError(resp); err != nil { + if st, ok := err.(*kubeapi.Status); ok && st.Code == 401 { + c.expireToken() + } + return err + } + if out != nil { + return json.NewDecoder(resp.Body).Decode(out) + } + return nil } - return nil + return f } func (c *client) newRequest(ctx context.Context, method, url string, in any) (*http.Request, error) { @@ -226,7 +248,7 @@ func (c *client) newRequest(ctx context.Context, method, url string, in any) (*h // GetSecret fetches the secret from the Kubernetes API. func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, error) { s := &kubeapi.Secret{Data: make(map[string][]byte)} - if err := c.doRequest(ctx, "GET", c.secretURL(name), nil, s); err != nil { + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, TypeSecrets), nil, s); err != nil { return nil, err } return s, nil @@ -235,16 +257,16 @@ func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, e // CreateSecret creates a secret in the Kubernetes API. func (c *client) CreateSecret(ctx context.Context, s *kubeapi.Secret) error { s.Namespace = c.ns - return c.doRequest(ctx, "POST", c.secretURL(""), s, nil) + return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", TypeSecrets), s, nil) } // UpdateSecret updates a secret in the Kubernetes API. func (c *client) UpdateSecret(ctx context.Context, s *kubeapi.Secret) error { - return c.doRequest(ctx, "PUT", c.secretURL(s.Name), s, nil) + return c.kubeAPIRequest(ctx, "PUT", c.resourceURL(s.Name, TypeSecrets), s, nil) } // JSONPatch is a JSON patch operation. -// It currently (2023-03-02) only supports "add" and "remove" operations. +// It currently (2024-11-15) only supports "add", "remove" and "replace" operations. // // https://tools.ietf.org/html/rfc6902 type JSONPatch struct { @@ -253,22 +275,22 @@ type JSONPatch struct { Value any `json:"value,omitempty"` } -// JSONPatchSecret updates a secret in the Kubernetes API using a JSON patch. -// It currently (2023-03-02) only supports "add" and "remove" operations. -func (c *client) JSONPatchSecret(ctx context.Context, name string, patch []JSONPatch) error { - for _, p := range patch { +// JSONPatchResource updates a resource in the Kubernetes API using a JSON patch. +// It currently (2024-11-15) only supports "add", "remove" and "replace" operations. +func (c *client) JSONPatchResource(ctx context.Context, name, typ string, patches []JSONPatch) error { + for _, p := range patches { if p.Op != "remove" && p.Op != "add" && p.Op != "replace" { return fmt.Errorf("unsupported JSON patch operation: %q", p.Op) } } - return c.doRequest(ctx, "PATCH", c.secretURL(name), patch, nil, setHeader("Content-Type", "application/json-patch+json")) + return c.kubeAPIRequest(ctx, "PATCH", c.resourceURL(name, typ), patches, nil, setHeader("Content-Type", "application/json-patch+json")) } // StrategicMergePatchSecret updates a secret in the Kubernetes API using a // strategic merge patch. // If a fieldManager is provided, it will be used to track the patch. func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { - surl := c.secretURL(name) + surl := c.resourceURL(name, TypeSecrets) if fieldManager != "" { uv := url.Values{ "fieldManager": {fieldManager}, @@ -277,7 +299,66 @@ func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s * } s.Namespace = c.ns s.Name = name - return c.doRequest(ctx, "PATCH", surl, s, nil, setHeader("Content-Type", "application/strategic-merge-patch+json")) + return c.kubeAPIRequest(ctx, "PATCH", surl, s, nil, setHeader("Content-Type", "application/strategic-merge-patch+json")) +} + +// Event tries to ensure an Event associated with the Pod in which we are running. It is best effort - the event will be +// created if the kube client on startup was able to determine the name and UID of this Pod from POD_NAME,POD_UID env +// vars and if permissions check for event creation succeeded. Events are keyed on opts.Reason- if an Event for the +// current Pod with that reason already exists, its count and first timestamp will be updated, else a new Event will be +// created. +func (c *client) Event(ctx context.Context, typ, reason, msg string) error { + if !c.hasEventsPerms { + return nil + } + name := c.nameForEvent(reason) + ev, err := c.getEvent(ctx, name) + now := c.cl.Now() + if err != nil { + if !IsNotFoundErr(err) { + return err + } + // Event not found - create it + ev := kubeapi.Event{ + ObjectMeta: kubeapi.ObjectMeta{ + Name: name, + Namespace: c.ns, + }, + Type: typ, + Reason: reason, + Message: msg, + Source: kubeapi.EventSource{ + Component: c.name, + }, + InvolvedObject: kubeapi.ObjectReference{ + Name: c.podName, + Namespace: c.ns, + UID: c.podUID, + Kind: "Pod", + APIVersion: "v1", + }, + + FirstTimestamp: now, + LastTimestamp: now, + Count: 1, + } + return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", typeEvents), &ev, nil) + } + // If the Event already exists, we patch its count and last timestamp. This ensures that when users run 'kubectl + // describe pod...', they see the event just once (but with a message of how many times it has appeared over + // last timestamp - first timestamp period of time). + count := ev.Count + 1 + countPatch := JSONPatch{ + Op: "replace", + Value: count, + Path: "/count", + } + tsPatch := JSONPatch{ + Op: "replace", + Value: now, + Path: "/lastTimestamp", + } + return c.JSONPatchResource(ctx, name, typeEvents, []JSONPatch{countPatch, tsPatch}) } // CheckSecretPermissions checks the secret access permissions of the current @@ -293,7 +374,7 @@ func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s * func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) (canPatch, canCreate bool, err error) { var errs []error for _, verb := range []string{"get", "update"} { - ok, err := c.checkPermission(ctx, verb, secretName) + ok, err := c.checkPermission(ctx, verb, TypeSecrets, secretName) if err != nil { log.Printf("error checking %s permission on secret %s: %v", verb, secretName, err) } else if !ok { @@ -303,12 +384,12 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) if len(errs) > 0 { return false, false, multierr.New(errs...) } - canPatch, err = c.checkPermission(ctx, "patch", secretName) + canPatch, err = c.checkPermission(ctx, "patch", TypeSecrets, secretName) if err != nil { log.Printf("error checking patch permission on secret %s: %v", secretName, err) return false, false, nil } - canCreate, err = c.checkPermission(ctx, "create", secretName) + canCreate, err = c.checkPermission(ctx, "create", TypeSecrets, secretName) if err != nil { log.Printf("error checking create permission on secret %s: %v", secretName, err) return false, false, nil @@ -316,19 +397,64 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) return canPatch, canCreate, nil } -// checkPermission reports whether the current pod has permission to use the -// given verb (e.g. get, update, patch, create) on secretName. -func (c *client) checkPermission(ctx context.Context, verb, secretName string) (bool, error) { +func IsNotFoundErr(err error) bool { + if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { + return true + } + return false +} + +// setEventPerms checks whether this client will be able to write tailscaled Events to its Pod and updates the state +// accordingly. If it determines that the client can not write Events, any subsequent calls to client.Event will be a +// no-op. +func (c *client) setEventPerms() { + name := os.Getenv("POD_NAME") + uid := os.Getenv("POD_UID") + hasPerms := false + defer func() { + c.podName = name + c.podUID = uid + c.hasEventsPerms = hasPerms + if !hasPerms { + log.Printf(`kubeclient: this client is not able to write tailscaled Events to the Pod in which it is running. + To help with future debugging you can make it able write Events by giving it get,create,patch permissions for Events in the Pod namespace + and setting POD_NAME, POD_UID env vars for the Pod.`) + } + }() + if name == "" || uid == "" { + return + } + for _, verb := range []string{"get", "create", "patch"} { + can, err := c.checkPermission(context.Background(), verb, typeEvents, "") + if err != nil { + log.Printf("kubeclient: error checking Events permissions: %v", err) + return + } + if !can { + return + } + } + hasPerms = true + return +} + +// checkPermission reports whether the current pod has permission to use the given verb (e.g. get, update, patch, +// create) on the given resource type. If name is not an empty string, will check the check will be for resource with +// the given name only. +func (c *client) checkPermission(ctx context.Context, verb, typ, name string) (bool, error) { + ra := map[string]any{ + "namespace": c.ns, + "verb": verb, + "resource": typ, + } + if name != "" { + ra["name"] = name + } sar := map[string]any{ "apiVersion": "authorization.k8s.io/v1", "kind": "SelfSubjectAccessReview", "spec": map[string]any{ - "resourceAttributes": map[string]any{ - "namespace": c.ns, - "verb": verb, - "resource": "secrets", - "name": secretName, - }, + "resourceAttributes": ra, }, } var res struct { @@ -337,15 +463,32 @@ func (c *client) checkPermission(ctx context.Context, verb, secretName string) ( } `json:"status"` } url := c.url + "/apis/authorization.k8s.io/v1/selfsubjectaccessreviews" - if err := c.doRequest(ctx, "POST", url, sar, &res); err != nil { + if err := c.kubeAPIRequest(ctx, "POST", url, sar, &res); err != nil { return false, err } return res.Status.Allowed, nil } -func IsNotFoundErr(err error) bool { - if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { - return true +// resourceURL returns a URL that can be used to interact with the given resource type and, if name is not empty string, +// the named resource of that type. +// Note that this only works for core/v1 resource types. +func (c *client) resourceURL(name, typ string) string { + if name == "" { + return fmt.Sprintf("%s/api/v1/namespaces/%s/%s", c.url, c.ns, typ) } - return false + return fmt.Sprintf("%s/api/v1/namespaces/%s/%s/%s", c.url, c.ns, typ, name) +} + +// nameForEvent returns a name for the Event that uniquely identifies Event with that reason for the current Pod. +func (c *client) nameForEvent(reason string) string { + return fmt.Sprintf("%s.%s.%s", c.podName, c.podUID, strings.ToLower(reason)) +} + +// getEvent fetches the event from the Kubernetes API. +func (c *client) getEvent(ctx context.Context, name string) (*kubeapi.Event, error) { + e := &kubeapi.Event{} + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, typeEvents), nil, e); err != nil { + return nil, err + } + return e, nil } diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go new file mode 100644 index 000000000..6b5e8171c --- /dev/null +++ b/kube/kubeclient/client_test.go @@ -0,0 +1,151 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package kubeclient + +import ( + "context" + "encoding/json" + "net/http" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/kube/kubeapi" + "tailscale.com/tstest" +) + +func Test_client_Event(t *testing.T) { + cl := &tstest.Clock{} + tests := []struct { + name string + typ string + reason string + msg string + argSets []args + wantErr bool + }{ + { + name: "new_event_gets_created", + typ: "Normal", + reason: "TestReason", + msg: "TestMessage", + argSets: []args{ + { // request to GET event returns not found + wantsMethod: "GET", + wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events/test-pod.test-uid.testreason", + setErr: &kubeapi.Status{Code: 404}, + }, + { // sends POST request to create event + wantsMethod: "POST", + wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events", + wantsIn: &kubeapi.Event{ + ObjectMeta: kubeapi.ObjectMeta{ + Name: "test-pod.test-uid.testreason", + Namespace: "test-ns", + }, + Type: "Normal", + Reason: "TestReason", + Message: "TestMessage", + Source: kubeapi.EventSource{ + Component: "test-client", + }, + InvolvedObject: kubeapi.ObjectReference{ + Name: "test-pod", + UID: "test-uid", + Namespace: "test-ns", + APIVersion: "v1", + Kind: "Pod", + }, + FirstTimestamp: cl.Now(), + LastTimestamp: cl.Now(), + Count: 1, + }, + }, + }, + }, + { + name: "existing_event_gets_patched", + typ: "Warning", + reason: "TestReason", + msg: "TestMsg", + argSets: []args{ + { // request to GET event does not error - this is enough to assume that event exists + wantsMethod: "GET", + wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events/test-pod.test-uid.testreason", + setOut: []byte(`{"count":2}`), + }, + { // sends PATCH request to update the event + wantsMethod: "PATCH", + wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events/test-pod.test-uid.testreason", + wantsIn: []JSONPatch{ + {Op: "replace", Path: "/count", Value: int32(3)}, + {Op: "replace", Path: "/lastTimestamp", Value: cl.Now()}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &client{ + cl: cl, + name: "test-client", + podName: "test-pod", + podUID: "test-uid", + url: "test-apiserver", + ns: "test-ns", + kubeAPIRequest: fakeKubeAPIRequest(t, tt.argSets), + hasEventsPerms: true, + } + if err := c.Event(context.Background(), tt.typ, tt.reason, tt.msg); (err != nil) != tt.wantErr { + t.Errorf("client.Event() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +// args is a set of values for testing a single call to client.kubeAPIRequest. +type args struct { + // wantsMethod is the expected value of 'method' arg. + wantsMethod string + // wantsURL is the expected value of 'url' arg. + wantsURL string + // wantsIn is the expected value of 'in' arg. + wantsIn any + // setOut can be set to a byte slice representing valid JSON. If set 'out' arg will get set to the unmarshalled + // JSON object. + setOut []byte + // setErr is the error that kubeAPIRequest will return. + setErr error +} + +// fakeKubeAPIRequest can be used to test that a series of calls to client.kubeAPIRequest gets called with expected +// values and to set these calls to return preconfigured values. 'argSets' should be set to a slice of expected +// arguments and should-be return values of a series of kubeAPIRequest calls. +func fakeKubeAPIRequest(t *testing.T, argSets []args) kubeAPIRequestFunc { + count := 0 + f := func(ctx context.Context, gotMethod, gotUrl string, gotIn, gotOut any, opts ...func(*http.Request)) error { + t.Helper() + if count >= len(argSets) { + t.Fatalf("unexpected call to client.kubeAPIRequest, expected %d calls, but got a %dth call", len(argSets), count+1) + } + a := argSets[count] + if gotMethod != a.wantsMethod { + t.Errorf("[%d] got method %q, wants method %q", count, gotMethod, a.wantsMethod) + } + if gotUrl != a.wantsURL { + t.Errorf("[%d] got URL %q, wants URL %q", count, gotMethod, a.wantsMethod) + } + if d := cmp.Diff(gotIn, a.wantsIn); d != "" { + t.Errorf("[%d] unexpected payload (-want + got):\n%s", count, d) + } + if len(a.setOut) != 0 { + if err := json.Unmarshal(a.setOut, gotOut); err != nil { + t.Fatalf("[%d] error unmarshalling output: %v", count, err) + } + } + count++ + return a.setErr + } + return f +} diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index 3cef3d27e..5716ca31b 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -29,7 +29,11 @@ func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr s func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error { return nil } -func (fc *FakeClient) JSONPatchSecret(context.Context, string, []JSONPatch) error { +func (fc *FakeClient) Event(context.Context, string, string, string) error { + return nil +} + +func (fc *FakeClient) JSONPatchResource(context.Context, string, string, []JSONPatch) error { return nil } func (fc *FakeClient) UpdateSecret(context.Context, *kubeapi.Secret) error { return nil } From bb3d0cae5f7669a4d665c2c282be770b9297650d Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 18 Nov 2024 15:02:33 +0000 Subject: [PATCH 0153/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 31 +++++++++++++++---------------- licenses/tailscale.md | 12 ++++++------ licenses/windows.md | 30 +++++++++++++++--------------- 3 files changed, 36 insertions(+), 37 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 36c654c59..aae006c95 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -12,24 +12,23 @@ See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.16/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.16/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.23/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.23/internal/endpoints/v2/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.0/service/internal/accept-encoding/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.4/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.4/internal/sync/singleflight/LICENSE)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.0/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.0/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) @@ -48,9 +47,9 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.8/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.8/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.8/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md)) @@ -74,12 +73,12 @@ See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.26.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fc45aab8:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.30.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.19.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.20.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index b1303d2a6..8f05acedc 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -58,9 +58,9 @@ Some packages may only be included on certain architectures or operating systems - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - [github.com/kballard/go-shellquote](https://pkg.go.dev/github.com/kballard/go-shellquote) ([MIT](https://github.com/kballard/go-shellquote/blob/95032a82bc51/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.4/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.4/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.4/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) @@ -84,7 +84,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/4e883d38c8d3/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) @@ -98,8 +98,8 @@ Some packages may only be included on certain architectures or operating systems - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.16.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 8cef25685..4cb35e8de 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -13,22 +13,22 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.16/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.16/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.23/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.23/internal/endpoints/v2/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.0/service/internal/accept-encoding/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.4/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.4/internal/sync/singleflight/LICENSE)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.0/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.0/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) @@ -44,9 +44,9 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.8/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.8/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.8/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) @@ -66,14 +66,14 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fc45aab8:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.26.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.30.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.19.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.20.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) From d62baa45e646c243b0a38e71e7cf76508a1b6c76 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 19 Nov 2024 09:07:32 -0800 Subject: [PATCH 0154/1708] version: validate Long format on Android builds Updates #14069 Change-Id: I134a90db561dacc4b1c1c66ccadac135b5d64cf3 Signed-off-by: Brad Fitzpatrick --- version/version.go | 40 ++++++++++++++++++++++++++++++++ version/version_checkformat.go | 17 ++++++++++++++ version/version_internal_test.go | 28 ++++++++++++++++++++++ 3 files changed, 85 insertions(+) create mode 100644 version/version_checkformat.go create mode 100644 version/version_internal_test.go diff --git a/version/version.go b/version/version.go index 4b96d15ea..5edea22ca 100644 --- a/version/version.go +++ b/version/version.go @@ -7,6 +7,7 @@ package version import ( "fmt" "runtime/debug" + "strconv" "strings" tailscaleroot "tailscale.com" @@ -169,3 +170,42 @@ func majorMinorPatch() string { ret, _, _ := strings.Cut(Short(), "-") return ret } + +func isValidLongWithTwoRepos(v string) bool { + s := strings.Split(v, "-") + if len(s) != 3 { + return false + } + hexChunk := func(s string) bool { + if len(s) < 6 { + return false + } + for i := range len(s) { + b := s[i] + if (b < '0' || b > '9') && (b < 'a' || b > 'f') { + return false + } + } + return true + } + + v, t, g := s[0], s[1], s[2] + if !strings.HasPrefix(t, "t") || !strings.HasPrefix(g, "g") || + !hexChunk(t[1:]) || !hexChunk(g[1:]) { + return false + } + nums := strings.Split(v, ".") + if len(nums) != 3 { + return false + } + for i, n := range nums { + bits := 8 + if i == 2 { + bits = 16 + } + if _, err := strconv.ParseUint(n, 10, bits); err != nil { + return false + } + } + return true +} diff --git a/version/version_checkformat.go b/version/version_checkformat.go new file mode 100644 index 000000000..8a24eda13 --- /dev/null +++ b/version/version_checkformat.go @@ -0,0 +1,17 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build tailscale_go && android + +package version + +import "fmt" + +func init() { + // For official Android builds using the tailscale_go toolchain, + // panic if the builder is screwed up we fail to stamp a valid + // version string. + if !isValidLongWithTwoRepos(Long()) { + panic(fmt.Sprintf("malformed version.Long value %q", Long())) + } +} diff --git a/version/version_internal_test.go b/version/version_internal_test.go new file mode 100644 index 000000000..ce6bd6270 --- /dev/null +++ b/version/version_internal_test.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package version + +import "testing" + +func TestIsValidLongWithTwoRepos(t *testing.T) { + tests := []struct { + long string + want bool + }{ + {"1.2.3-t01234abcde-g01234abcde", true}, + {"1.2.259-t01234abcde-g01234abcde", true}, // big patch version + {"1.2.3-t01234abcde", false}, // missing repo + {"1.2.3-g01234abcde", false}, // missing repo + {"1.2.3-g01234abcde", false}, // missing repo + {"-t01234abcde-g01234abcde", false}, + {"1.2.3", false}, + {"1.2.3-t01234abcde-g", false}, + {"1.2.3-t01234abcde-gERRBUILDINFO", false}, + } + for _, tt := range tests { + if got := isValidLongWithTwoRepos(tt.long); got != tt.want { + t.Errorf("IsValidLongWithTwoRepos(%q) = %v; want %v", tt.long, got, tt.want) + } + } +} From 810da91a9e3e4b2a9fe0e8aba21b10ed5cf9db34 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 19 Nov 2024 10:28:26 -0800 Subject: [PATCH 0155/1708] version: fix earlier test/wording mistakes Updates #14069 Change-Id: I1d2fd8a8ab6591af11bfb83748b94342a8ac718f Signed-off-by: Brad Fitzpatrick --- version/version_checkformat.go | 2 +- version/version_internal_test.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/version/version_checkformat.go b/version/version_checkformat.go index 8a24eda13..05a97d191 100644 --- a/version/version_checkformat.go +++ b/version/version_checkformat.go @@ -9,7 +9,7 @@ import "fmt" func init() { // For official Android builds using the tailscale_go toolchain, - // panic if the builder is screwed up we fail to stamp a valid + // panic if the builder is screwed up and we fail to stamp a valid // version string. if !isValidLongWithTwoRepos(Long()) { panic(fmt.Sprintf("malformed version.Long value %q", Long())) diff --git a/version/version_internal_test.go b/version/version_internal_test.go index ce6bd6270..19aeab442 100644 --- a/version/version_internal_test.go +++ b/version/version_internal_test.go @@ -14,7 +14,6 @@ func TestIsValidLongWithTwoRepos(t *testing.T) { {"1.2.259-t01234abcde-g01234abcde", true}, // big patch version {"1.2.3-t01234abcde", false}, // missing repo {"1.2.3-g01234abcde", false}, // missing repo - {"1.2.3-g01234abcde", false}, // missing repo {"-t01234abcde-g01234abcde", false}, {"1.2.3", false}, {"1.2.3-t01234abcde-g", false}, From 48343ee6738548dd85e908ea14d5f69338123ec1 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Tue, 19 Nov 2024 10:55:58 -0700 Subject: [PATCH 0156/1708] util/winutil/s4u: fix token handle leak Fixes #14156 Signed-off-by: Aaron Klotz --- util/winutil/s4u/s4u_windows.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/util/winutil/s4u/s4u_windows.go b/util/winutil/s4u/s4u_windows.go index a12b4786a..8926aaedc 100644 --- a/util/winutil/s4u/s4u_windows.go +++ b/util/winutil/s4u/s4u_windows.go @@ -17,6 +17,7 @@ import ( "slices" "strconv" "strings" + "sync" "sync/atomic" "unsafe" @@ -128,9 +129,10 @@ func Login(logf logger.Logf, srcName string, u *user.User, capLevel CapabilityLe if err != nil { return nil, err } + tokenCloseOnce := sync.OnceFunc(func() { token.Close() }) defer func() { if err != nil { - token.Close() + tokenCloseOnce() } }() @@ -162,6 +164,7 @@ func Login(logf logger.Logf, srcName string, u *user.User, capLevel CapabilityLe sessToken.Close() } }() + tokenCloseOnce() } userProfile, err := winutil.LoadUserProfile(sessToken, u) From 9f33aeb649f279412f6b7b24a61506ef37fadb47 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Mon, 11 Nov 2024 16:51:58 +0000 Subject: [PATCH 0157/1708] wgengine/filter: actually use the passed CapTestFunc [capver 109] Initial support for SrcCaps was added in 5ec01bf but it was not actually working without this. Updates #12542 Signed-off-by: Anton Tolchanov --- tailcfg/tailcfg.go | 5 +++-- wgengine/filter/filter.go | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 1b283a2fc..897e8d27f 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -142,7 +142,7 @@ type CapabilityVersion int // - 97: 2024-06-06: Client understands NodeAttrDisableSplitDNSWhenNoCustomResolvers // - 98: 2024-06-13: iOS/tvOS clients may provide serial number as part of posture information // - 99: 2024-06-14: Client understands NodeAttrDisableLocalDNSOverrideViaNRPT -// - 100: 2024-06-18: Client supports filtertype.Match.SrcCaps (issue #12542) +// - 100: 2024-06-18: Initial support for filtertype.Match.SrcCaps - actually usable in capver 109 (issue #12542) // - 101: 2024-07-01: Client supports SSH agent forwarding when handling connections with /bin/su // - 102: 2024-07-12: NodeAttrDisableMagicSockCryptoRouting support // - 103: 2024-07-24: Client supports NodeAttrDisableCaptivePortalDetection @@ -151,7 +151,8 @@ type CapabilityVersion int // - 106: 2024-09-03: fix panic regression from cryptokey routing change (65fe0ba7b5) // - 107: 2024-10-30: add App Connector to conffile (PR #13942) // - 108: 2024-11-08: Client sends ServicesHash in Hostinfo, understands c2n GET /vip-services. -const CurrentCapabilityVersion CapabilityVersion = 108 +// - 109: 2024-11-18: Client supports filtertype.Match.SrcCaps (issue #12542) +const CurrentCapabilityVersion CapabilityVersion = 109 type StableID string diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go index 56224ac5d..9e5d8a37f 100644 --- a/wgengine/filter/filter.go +++ b/wgengine/filter/filter.go @@ -202,16 +202,17 @@ func New(matches []Match, capTest CapTestFunc, localNets, logIPs *netipx.IPSet, } f := &Filter{ - logf: logf, - matches4: matchesFamily(matches, netip.Addr.Is4), - matches6: matchesFamily(matches, netip.Addr.Is6), - cap4: capMatchesFunc(matches, netip.Addr.Is4), - cap6: capMatchesFunc(matches, netip.Addr.Is6), - local4: ipset.FalseContainsIPFunc(), - local6: ipset.FalseContainsIPFunc(), - logIPs4: ipset.FalseContainsIPFunc(), - logIPs6: ipset.FalseContainsIPFunc(), - state: state, + logf: logf, + matches4: matchesFamily(matches, netip.Addr.Is4), + matches6: matchesFamily(matches, netip.Addr.Is6), + cap4: capMatchesFunc(matches, netip.Addr.Is4), + cap6: capMatchesFunc(matches, netip.Addr.Is6), + local4: ipset.FalseContainsIPFunc(), + local6: ipset.FalseContainsIPFunc(), + logIPs4: ipset.FalseContainsIPFunc(), + logIPs6: ipset.FalseContainsIPFunc(), + state: state, + srcIPHasCap: capTest, } if localNets != nil { p := localNets.Prefixes() From 303a4a1dfb2408e4dbe07bf4ddc66457bac85d03 Mon Sep 17 00:00:00 2001 From: James Stocker Date: Wed, 20 Nov 2024 07:43:59 +0100 Subject: [PATCH 0158/1708] Make the deployment of an IngressClass optional, default to true (#14153) Fixes tailscale/tailscale#14152 Signed-off-by: James Stocker jamesrstocker@gmail.com Co-authored-by: James Stocker --- cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml | 2 ++ cmd/k8s-operator/deploy/chart/values.yaml | 3 +++ 2 files changed, 5 insertions(+) diff --git a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml index 2a1fa81b4..208d58ee1 100644 --- a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml @@ -1,3 +1,4 @@ +{{- if .Values.ingressClass.enabled }} apiVersion: networking.k8s.io/v1 kind: IngressClass metadata: @@ -6,3 +7,4 @@ metadata: spec: controller: tailscale.com/ts-ingress # controller name currently can not be changed # parameters: {} # currently no parameters are supported +{{- end }} diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index e6f4cada4..b24ba37b0 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -54,6 +54,9 @@ operatorConfig: # - name: EXTRA_VAR2 # value: "value2" +# In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here +ingressClass: + enabled: true # proxyConfig contains configuraton that will be applied to any ingress/egress # proxies created by the operator. From ebeb5da202c00c41a3c87ebf687f89a2fc70bb90 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 20 Nov 2024 14:22:34 +0000 Subject: [PATCH 0159/1708] cmd/k8s-operator,kube/kubeclient,docs/k8s: update rbac to emit events + small fixes (#14164) This is a follow-up to #14112 where our internal kube client was updated to allow it to emit Events - this updates our sample kube manifests and tsrecorder manifest templates so they can benefit from this functionality. Updates tailscale/tailscale#14080 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/tsrecorder_specs.go | 17 +++++++++++++++++ docs/k8s/proxy.yaml | 8 ++++++++ docs/k8s/role.yaml | 3 +++ docs/k8s/sidecar.yaml | 8 ++++++++ docs/k8s/subnet.yaml | 8 ++++++++ docs/k8s/userspace-sidecar.yaml | 8 ++++++++ kube/kubeclient/client_test.go | 2 +- 7 files changed, 53 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 4a74fb7e0..4a7bf9887 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -130,6 +130,15 @@ func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { fmt.Sprintf("%s-0", tsr.Name), // Contains the node state. }, }, + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{ + "get", + "create", + "patch", + }, + }, }, } } @@ -203,6 +212,14 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar { }, }, }, + { + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, { Name: "TS_STATE", Value: "kube:$(POD_NAME)", diff --git a/docs/k8s/proxy.yaml b/docs/k8s/proxy.yaml index 2ab7ed334..78e97c83b 100644 --- a/docs/k8s/proxy.yaml +++ b/docs/k8s/proxy.yaml @@ -44,6 +44,14 @@ spec: value: "{{TS_DEST_IP}}" - name: TS_AUTH_ONCE value: "true" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid securityContext: capabilities: add: diff --git a/docs/k8s/role.yaml b/docs/k8s/role.yaml index 6d6a8117d..d7d0846ab 100644 --- a/docs/k8s/role.yaml +++ b/docs/k8s/role.yaml @@ -13,3 +13,6 @@ rules: resourceNames: ["{{TS_KUBE_SECRET}}"] resources: ["secrets"] verbs: ["get", "update", "patch"] +- apiGroups: [""] # "" indicates the core API group + resources: ["events"] + verbs: ["get", "create", "patch"] diff --git a/docs/k8s/sidecar.yaml b/docs/k8s/sidecar.yaml index 7efd32a38..6baa6d545 100644 --- a/docs/k8s/sidecar.yaml +++ b/docs/k8s/sidecar.yaml @@ -26,6 +26,14 @@ spec: name: tailscale-auth key: TS_AUTHKEY optional: true + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid securityContext: capabilities: add: diff --git a/docs/k8s/subnet.yaml b/docs/k8s/subnet.yaml index 4b7066fb3..1af146be6 100644 --- a/docs/k8s/subnet.yaml +++ b/docs/k8s/subnet.yaml @@ -28,6 +28,14 @@ spec: optional: true - name: TS_ROUTES value: "{{TS_ROUTES}}" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid securityContext: capabilities: add: diff --git a/docs/k8s/userspace-sidecar.yaml b/docs/k8s/userspace-sidecar.yaml index fc4ed6350..ee19b10a5 100644 --- a/docs/k8s/userspace-sidecar.yaml +++ b/docs/k8s/userspace-sidecar.yaml @@ -27,3 +27,11 @@ spec: name: tailscale-auth key: TS_AUTHKEY optional: true + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go index 6b5e8171c..31878befe 100644 --- a/kube/kubeclient/client_test.go +++ b/kube/kubeclient/client_test.go @@ -134,7 +134,7 @@ func fakeKubeAPIRequest(t *testing.T, argSets []args) kubeAPIRequestFunc { t.Errorf("[%d] got method %q, wants method %q", count, gotMethod, a.wantsMethod) } if gotUrl != a.wantsURL { - t.Errorf("[%d] got URL %q, wants URL %q", count, gotMethod, a.wantsMethod) + t.Errorf("[%d] got URL %q, wants URL %q", count, gotUrl, a.wantsURL) } if d := cmp.Diff(gotIn, a.wantsIn); d != "" { t.Errorf("[%d] unexpected payload (-want + got):\n%s", count, d) From ebaf33a80c5872a2d1156aa3bb55f82f3ce1b97b Mon Sep 17 00:00:00 2001 From: James Scott Date: Wed, 20 Nov 2024 12:28:25 -0800 Subject: [PATCH 0160/1708] net/tsaddr: extract IsTailscaleIPv4 from IsTailscaleIP (#14169) Extracts tsaddr.IsTailscaleIPv4 out of tsaddr.IsTailscaleIP. This will allow for checking valid Tailscale assigned IPv4 addresses without checking IPv6 addresses. Updates #14168 Updates tailscale/corp#24620 Signed-off-by: James Scott --- net/tsaddr/tsaddr.go | 10 ++++-- net/tsaddr/tsaddr_test.go | 68 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 2 deletions(-) diff --git a/net/tsaddr/tsaddr.go b/net/tsaddr/tsaddr.go index e7e0ba088..06e6a26dd 100644 --- a/net/tsaddr/tsaddr.go +++ b/net/tsaddr/tsaddr.go @@ -66,15 +66,21 @@ const ( TailscaleServiceIPv6String = "fd7a:115c:a1e0::53" ) -// IsTailscaleIP reports whether ip is an IP address in a range that +// IsTailscaleIP reports whether IP is an IP address in a range that // Tailscale assigns from. func IsTailscaleIP(ip netip.Addr) bool { if ip.Is4() { - return CGNATRange().Contains(ip) && !ChromeOSVMRange().Contains(ip) + return IsTailscaleIPv4(ip) } return TailscaleULARange().Contains(ip) } +// IsTailscaleIPv4 reports whether an IPv4 IP is an IP address that +// Tailscale assigns from. +func IsTailscaleIPv4(ip netip.Addr) bool { + return CGNATRange().Contains(ip) && !ChromeOSVMRange().Contains(ip) +} + // TailscaleULARange returns the IPv6 Unique Local Address range that // is the superset range that Tailscale assigns out of. func TailscaleULARange() netip.Prefix { diff --git a/net/tsaddr/tsaddr_test.go b/net/tsaddr/tsaddr_test.go index 4aa2f8c60..43977352b 100644 --- a/net/tsaddr/tsaddr_test.go +++ b/net/tsaddr/tsaddr_test.go @@ -222,3 +222,71 @@ func TestContainsExitRoute(t *testing.T) { } } } + +func TestIsTailscaleIPv4(t *testing.T) { + tests := []struct { + in netip.Addr + want bool + }{ + { + in: netip.MustParseAddr("100.67.19.57"), + want: true, + }, + { + in: netip.MustParseAddr("10.10.10.10"), + want: false, + }, + { + + in: netip.MustParseAddr("fd7a:115c:a1e0:3f2b:7a1d:4e88:9c2b:7f01"), + want: false, + }, + { + in: netip.MustParseAddr("bc9d:0aa0:1f0a:69ab:eb5c:28e0:5456:a518"), + want: false, + }, + { + in: netip.MustParseAddr("100.115.92.157"), + want: false, + }, + } + for _, tt := range tests { + if got := IsTailscaleIPv4(tt.in); got != tt.want { + t.Errorf("IsTailscaleIPv4() = %v, want %v", got, tt.want) + } + } +} + +func TestIsTailscaleIP(t *testing.T) { + tests := []struct { + in netip.Addr + want bool + }{ + { + in: netip.MustParseAddr("100.67.19.57"), + want: true, + }, + { + in: netip.MustParseAddr("10.10.10.10"), + want: false, + }, + { + + in: netip.MustParseAddr("fd7a:115c:a1e0:3f2b:7a1d:4e88:9c2b:7f01"), + want: true, + }, + { + in: netip.MustParseAddr("bc9d:0aa0:1f0a:69ab:eb5c:28e0:5456:a518"), + want: false, + }, + { + in: netip.MustParseAddr("100.115.92.157"), + want: false, + }, + } + for _, tt := range tests { + if got := IsTailscaleIP(tt.in); got != tt.want { + t.Errorf("IsTailscaleIP() = %v, want %v", got, tt.want) + } + } +} From 02cafbe1cadfcd82d22beb9138d4673169fcdc82 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 20 Nov 2024 14:19:59 -0800 Subject: [PATCH 0161/1708] tsweb: change RequestID format to have a date in it So we can locate them in logs more easily. Updates tailscale/corp#24721 Change-Id: Ia766c75608050dde7edc99835979a6e9bb328df2 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 6 ++---- cmd/derper/derper_test.go | 1 + cmd/stund/depaware.txt | 6 ++---- tsweb/request_id.go | 13 ++++++++----- tsweb/tsweb_test.go | 22 ++++++++++++++++++++++ 5 files changed, 35 insertions(+), 13 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 81a7f14f4..076074f25 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -27,7 +27,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L github.com/google/nftables/expr from github.com/google/nftables+ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/google/uuid from tailscale.com/util/fastuuid github.com/hdevalence/ed25519consensus from tailscale.com/tka L github.com/josharian/native from github.com/mdlayher/netlink+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon @@ -152,7 +151,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ - tailscale.com/util/fastuuid from tailscale.com/tsweb 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineiter from tailscale.com/hostinfo+ @@ -160,6 +158,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/mak from tailscale.com/health+ tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ @@ -244,7 +243,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/tls from golang.org/x/crypto/acme+ crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509+ - database/sql/driver from github.com/google/uuid embed from crypto/internal/nistec+ encoding from encoding/json+ encoding/asn1 from crypto/x509+ @@ -276,7 +274,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/mdlayher/netlink+ - math/rand/v2 from tailscale.com/util/fastuuid+ + math/rand/v2 from internal/concurrent+ mime from github.com/prometheus/common/expfmt+ mime/multipart from net/http mime/quotedprintable from mime/multipart diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 6ddf4455b..08d2e9cbf 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -109,6 +109,7 @@ func TestDeps(t *testing.T) { "gvisor.dev/gvisor/pkg/tcpip/header": "https://github.com/tailscale/tailscale/issues/9756", "tailscale.com/net/packet": "not needed in derper", "github.com/gaissmai/bart": "not needed in derper", + "database/sql/driver": "not needed in derper", // previously came in via github.com/google/uuid }, }.Check(t) } diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 7031b18e2..34a71c43e 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -8,7 +8,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - github.com/google/uuid from tailscale.com/util/fastuuid 💣 github.com/prometheus/client_golang/prometheus from tailscale.com/tsweb/promvarz github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ @@ -74,9 +73,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/ctxkey from tailscale.com/tsweb+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/tailcfg - tailscale.com/util/fastuuid from tailscale.com/tsweb tailscale.com/util/lineiter from tailscale.com/version/distro tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/slicesx from tailscale.com/tailcfg tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/version from tailscale.com/envknob+ @@ -133,7 +132,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/tls from net/http+ crypto/x509 from crypto/tls crypto/x509/pkix from crypto/x509 - database/sql/driver from github.com/google/uuid embed from crypto/internal/nistec+ encoding from encoding/json+ encoding/asn1 from crypto/x509+ @@ -164,7 +162,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from math/big+ - math/rand/v2 from tailscale.com/util/fastuuid+ + math/rand/v2 from internal/concurrent+ mime from github.com/prometheus/common/expfmt+ mime/multipart from net/http mime/quotedprintable from mime/multipart diff --git a/tsweb/request_id.go b/tsweb/request_id.go index 8516b8f72..46e523852 100644 --- a/tsweb/request_id.go +++ b/tsweb/request_id.go @@ -6,9 +6,10 @@ package tsweb import ( "context" "net/http" + "time" "tailscale.com/util/ctxkey" - "tailscale.com/util/fastuuid" + "tailscale.com/util/rands" ) // RequestID is an opaque identifier for a HTTP request, used to correlate @@ -41,10 +42,12 @@ const RequestIDHeader = "X-Tailscale-Request-Id" // GenerateRequestID generates a new request ID with the current format. func GenerateRequestID() RequestID { - // REQ-1 indicates the version of the RequestID pattern. It is - // currently arbitrary but allows for forward compatible - // transitions if needed. - return RequestID("REQ-1" + fastuuid.NewUUID().String()) + // Return a string of the form "REQ-<...>" + // Previously we returned "REQ-1". + // Now we return "REQ-2" version, where the "2" doubles as the year 2YYY + // in a leading date. + now := time.Now().UTC() + return RequestID("REQ-" + now.Format("20060102150405") + rands.HexString(16)) } // SetRequestID is an HTTP middleware that injects a RequestID in the diff --git a/tsweb/tsweb_test.go b/tsweb/tsweb_test.go index 13840c012..d4c9721e9 100644 --- a/tsweb/tsweb_test.go +++ b/tsweb/tsweb_test.go @@ -1307,6 +1307,28 @@ func TestBucket(t *testing.T) { } } +func TestGenerateRequestID(t *testing.T) { + t0 := time.Now() + got := GenerateRequestID() + t.Logf("Got: %q", got) + if !strings.HasPrefix(string(got), "REQ-2") { + t.Errorf("expect REQ-2 prefix; got %q", got) + } + const wantLen = len("REQ-2024112022140896f8ead3d3f3be27") + if len(got) != wantLen { + t.Fatalf("len = %d; want %d", len(got), wantLen) + } + d := got[len("REQ-"):][:14] + timeBack, err := time.Parse("20060102150405", string(d)) + if err != nil { + t.Fatalf("parsing time back: %v", err) + } + elapsed := timeBack.Sub(t0) + if elapsed > 3*time.Second { // allow for slow github actions runners :) + t.Fatalf("time back was %v; want within 3s", elapsed) + } +} + func ExampleMiddlewareStack() { // setHeader returns a middleware that sets header k = vs. setHeader := func(k string, vs ...string) Middleware { From 70d1241ca697a677145df84cf844f9c9cadd1bbc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 20 Nov 2024 16:43:32 -0800 Subject: [PATCH 0162/1708] util/fastuuid: delete unused package Its sole user was deleted in 02cafbe1cadfc. And it has no public users: https://pkg.go.dev/tailscale.com/util/fastuuid?tab=importedby And nothing in other Tailsale repos that I can find. Updates tailscale/corp#24721 Change-Id: I8755770a255a91c6c99f596e6d10c303b3ddf213 Signed-off-by: Brad Fitzpatrick --- util/fastuuid/fastuuid.go | 56 -------------------------- util/fastuuid/fastuuid_test.go | 72 ---------------------------------- 2 files changed, 128 deletions(-) delete mode 100644 util/fastuuid/fastuuid.go delete mode 100644 util/fastuuid/fastuuid_test.go diff --git a/util/fastuuid/fastuuid.go b/util/fastuuid/fastuuid.go deleted file mode 100644 index 4b115ea4e..000000000 --- a/util/fastuuid/fastuuid.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package fastuuid implements a UUID construction using an in process CSPRNG. -package fastuuid - -import ( - crand "crypto/rand" - "encoding/binary" - "io" - "math/rand/v2" - "sync" - - "github.com/google/uuid" -) - -// NewUUID returns a new UUID using a pool of generators, good for highly -// concurrent use. -func NewUUID() uuid.UUID { - g := pool.Get().(*generator) - defer pool.Put(g) - return g.newUUID() -} - -var pool = sync.Pool{ - New: func() any { - return newGenerator() - }, -} - -type generator struct { - rng rand.ChaCha8 -} - -func seed() [32]byte { - var r [32]byte - if _, err := io.ReadFull(crand.Reader, r[:]); err != nil { - panic(err) - } - return r -} - -func newGenerator() *generator { - return &generator{ - rng: *rand.NewChaCha8(seed()), - } -} - -func (g *generator) newUUID() uuid.UUID { - var u uuid.UUID - binary.NativeEndian.PutUint64(u[:8], g.rng.Uint64()) - binary.NativeEndian.PutUint64(u[8:], g.rng.Uint64()) - u[6] = (u[6] & 0x0f) | 0x40 // Version 4 - u[8] = (u[8] & 0x3f) | 0x80 // Variant 10 - return u -} diff --git a/util/fastuuid/fastuuid_test.go b/util/fastuuid/fastuuid_test.go deleted file mode 100644 index f0d993904..000000000 --- a/util/fastuuid/fastuuid_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package fastuuid - -import ( - "testing" - - "github.com/google/uuid" -) - -func TestNewUUID(t *testing.T) { - g := pool.Get().(*generator) - defer pool.Put(g) - u := g.newUUID() - if u[6] != (u[6]&0x0f)|0x40 { - t.Errorf("version bits are incorrect") - } - if u[8] != (u[8]&0x3f)|0x80 { - t.Errorf("variant bits are incorrect") - } -} - -func BenchmarkBasic(b *testing.B) { - b.Run("NewUUID", func(b *testing.B) { - for range b.N { - NewUUID() - } - }) - - b.Run("uuid.New-unpooled", func(b *testing.B) { - uuid.DisableRandPool() - for range b.N { - uuid.New() - } - }) - - b.Run("uuid.New-pooled", func(b *testing.B) { - uuid.EnableRandPool() - for range b.N { - uuid.New() - } - }) -} - -func BenchmarkParallel(b *testing.B) { - b.Run("NewUUID", func(b *testing.B) { - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - NewUUID() - } - }) - }) - - b.Run("uuid.New-unpooled", func(b *testing.B) { - uuid.DisableRandPool() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - uuid.New() - } - }) - }) - - b.Run("uuid.New-pooled", func(b *testing.B) { - uuid.EnableRandPool() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - uuid.New() - } - }) - }) -} From af4c3a4a1baba868996bc9ed022d67ebe0320873 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 20 Nov 2024 17:48:06 -0500 Subject: [PATCH 0163/1708] cmd/tailscale/cli: create netmon in debug ts2021 Otherwise we'll see a panic if we hit the dnsfallback code and try to call NewDialer with a nil NetMon. Updates #14161 Signed-off-by: Andrew Dunham Change-Id: I81c6e72376599b341cb58c37134c2a948b97cf5f --- cmd/tailscale/cli/debug.go | 7 +++++++ control/controlhttp/constants.go | 2 ++ 2 files changed, 9 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 7f235e85c..78bd708e5 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -36,6 +36,7 @@ import ( "tailscale.com/hostinfo" "tailscale.com/internal/noiseconn" "tailscale.com/ipn" + "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tshttpproxy" "tailscale.com/paths" @@ -850,6 +851,11 @@ func runTS2021(ctx context.Context, args []string) error { logf = log.Printf } + netMon, err := netmon.New(logger.WithPrefix(logf, "netmon: ")) + if err != nil { + return fmt.Errorf("creating netmon: %w", err) + } + noiseDialer := &controlhttp.Dialer{ Hostname: ts2021Args.host, HTTPPort: "80", @@ -859,6 +865,7 @@ func runTS2021(ctx context.Context, args []string) error { ProtocolVersion: uint16(ts2021Args.version), Dialer: dialFunc, Logf: logf, + NetMon: netMon, } const tries = 2 for i := range tries { diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 0b550accc..971212d63 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -76,6 +76,8 @@ type Dialer struct { // dropped. Logf logger.Logf + // NetMon is the [netmon.Monitor] to use for this Dialer. It must be + // non-nil. NetMon *netmon.Monitor // HealthTracker, if non-nil, is the health tracker to use. From 0c8c7c0f901f8a5e6cefe1334f3d2e0ad4db7b69 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 20 Nov 2024 16:14:13 -0800 Subject: [PATCH 0164/1708] net/tsaddr: include test input in test failure output https://go.dev/wiki/CodeReviewComments#useful-test-failures (Previously it was using subtests with names including the input, but once those went away, there was no context left) Updates #14169 Change-Id: Ib217028183a3d001fe4aee58f2edb746b7b3aa88 Signed-off-by: Brad Fitzpatrick --- net/tsaddr/tsaddr_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/tsaddr/tsaddr_test.go b/net/tsaddr/tsaddr_test.go index 43977352b..9ac1ce303 100644 --- a/net/tsaddr/tsaddr_test.go +++ b/net/tsaddr/tsaddr_test.go @@ -252,7 +252,7 @@ func TestIsTailscaleIPv4(t *testing.T) { } for _, tt := range tests { if got := IsTailscaleIPv4(tt.in); got != tt.want { - t.Errorf("IsTailscaleIPv4() = %v, want %v", got, tt.want) + t.Errorf("IsTailscaleIPv4(%v) = %v, want %v", tt.in, got, tt.want) } } } @@ -286,7 +286,7 @@ func TestIsTailscaleIP(t *testing.T) { } for _, tt := range tests { if got := IsTailscaleIP(tt.in); got != tt.want { - t.Errorf("IsTailscaleIP() = %v, want %v", got, tt.want) + t.Errorf("IsTailscaleIP(%v) = %v, want %v", tt.in, got, tt.want) } } } From e3c6ca43d3e3cad27714d07b3a9ec20141c9c65c Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Thu, 21 Nov 2024 12:56:41 -0800 Subject: [PATCH 0165/1708] cli: present risk warning when setting up app connector on macOS (#14181) --- cmd/tailscale/cli/risks.go | 13 ++++++++++--- cmd/tailscale/cli/set.go | 7 +++++++ cmd/tailscale/cli/up.go | 6 ++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index 4cfa50d58..acb50e723 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -17,11 +17,18 @@ import ( ) var ( - riskTypes []string - riskLoseSSH = registerRiskType("lose-ssh") - riskAll = registerRiskType("all") + riskTypes []string + riskLoseSSH = registerRiskType("lose-ssh") + riskMacAppConnector = registerRiskType("mac-app-connector") + riskAll = registerRiskType("all") ) +const riskMacAppConnectorMessage = ` +You are trying to configure an app connector on macOS, which is not officially supported due to system limitations. This may result in performance and reliability issues. + +Do not use a macOS app connector for any mission-critical purposes. For the best experience, Linux is the only recommended platform for app connectors. +` + func registerRiskType(riskType string) string { riskTypes = append(riskTypes, riskType) return riskType diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 2e1251f04..e8e5f0c51 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -10,6 +10,7 @@ import ( "fmt" "net/netip" "os/exec" + "runtime" "strings" "github.com/peterbourgon/ff/v3/ffcli" @@ -203,6 +204,12 @@ func runSet(ctx context.Context, args []string) (retErr error) { } } + if runtime.GOOS == "darwin" && maskedPrefs.AppConnector.Advertise { + if err := presentRiskToUser(riskMacAppConnector, riskMacAppConnectorMessage, setArgs.acceptedRisks); err != nil { + return err + } + } + if maskedPrefs.RunSSHSet { wantSSH, haveSSH := maskedPrefs.RunSSH, curPrefs.RunSSH if err := presentSSHToggleRisk(wantSSH, haveSSH, setArgs.acceptedRisks); err != nil { diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 782df407d..6c5c6f337 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -379,6 +379,12 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus return false, nil, err } + if runtime.GOOS == "darwin" && env.upArgs.advertiseConnector { + if err := presentRiskToUser(riskMacAppConnector, riskMacAppConnectorMessage, env.upArgs.acceptedRisks); err != nil { + return false, nil, err + } + } + if env.upArgs.forceReauth && isSSHOverTailscale() { if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { return false, nil, err From c59ab6baacf3ddc96982b0f6cacd683157e8bc41 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 22 Nov 2024 06:53:46 +0000 Subject: [PATCH 0166/1708] cmd/k8s-operator/deploy: ensure that operator can write kube state Events (#14177) A small follow-up to #14112- ensures that the operator itself can emit Events for its kube state store changes. Updates tailscale/tailscale#14080 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/deploy/chart/templates/deployment.yaml | 8 ++++++++ cmd/k8s-operator/deploy/manifests/operator.yaml | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index c428d5d1e..2653f2159 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -81,6 +81,14 @@ spec: - name: PROXY_DEFAULT_CLASS value: {{ .Values.proxyConfig.defaultProxyClass }} {{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid {{- with .Values.operatorConfig.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index c6d7deef5..4035afaba 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4783,6 +4783,14 @@ spec: value: "false" - name: PROXY_FIREWALL_MODE value: auto + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid image: tailscale/k8s-operator:unstable imagePullPolicy: Always name: operator From 74d4652144f11ace04612496095d658414ab09db Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 22 Nov 2024 15:41:07 +0000 Subject: [PATCH 0167/1708] cmd/{containerboot,k8s-operator},k8s-operator: new options to expose user metrics (#14035) containerboot: Adds 3 new environment variables for containerboot, `TS_LOCAL_ADDR_PORT` (default `"${POD_IP}:9002"`), `TS_METRICS_ENABLED` (default `false`), and `TS_DEBUG_ADDR_PORT` (default `""`), to configure metrics and debug endpoints. In a follow-up PR, the health check endpoint will be updated to use the `TS_LOCAL_ADDR_PORT` if `TS_HEALTHCHECK_ADDR_PORT` hasn't been set. Users previously only had access to internal debug metrics (which are unstable and not recommended) via passing the `--debug` flag to tailscaled, but can now set `TS_METRICS_ENABLED=true` to expose the stable metrics documented at https://tailscale.com/kb/1482/client-metrics at `/metrics` on the addr/port specified by `TS_LOCAL_ADDR_PORT`. Users can also now configure a debug endpoint more directly via the `TS_DEBUG_ADDR_PORT` environment variable. This is not recommended for production use, but exposes an internal set of debug metrics and pprof endpoints. operator: The `ProxyClass` CRD's `.spec.metrics.enable` field now enables serving the stable user metrics documented at https://tailscale.com/kb/1482/client-metrics at `/metrics` on the same "metrics" container port that debug metrics were previously served on. To smooth the transition for anyone relying on the way the operator previously consumed this field, we also _temporarily_ serve tailscaled's internal debug metrics on the same `/debug/metrics` path as before, until 1.82.0 when debug metrics will be turned off by default even if `.spec.metrics.enable` is set. At that point, anyone who wishes to continue using the internal debug metrics (not recommended) will need to set the new `ProxyClass` field `.spec.statefulSet.pod.tailscaleContainer.debug.enable`. Users who wish to opt out of the transitional behaviour, where enabling `.spec.metrics.enable` also enables debug metrics, can set `.spec.statefulSet.pod.tailscaleContainer.debug.enable` to false (recommended). Separately but related, the operator will no longer specify a host port for the "metrics" container port definition. This caused scheduling conflicts when k8s needs to schedule more than one proxy per node, and was not necessary for allowing the pod's port to be exposed to prometheus scrapers. Updates #11292 --------- Co-authored-by: Kristoffer Dalby Signed-off-by: Tom Proctor --- cmd/containerboot/healthz.go | 2 +- cmd/containerboot/main.go | 8 ++ cmd/containerboot/metrics.go | 91 +++++++++++++++++++ cmd/containerboot/settings.go | 22 ++++- cmd/containerboot/tailscaled.go | 6 ++ .../crds/tailscale.com_proxyclasses.yaml | 45 ++++++++- .../deploy/manifests/operator.yaml | 45 ++++++++- cmd/k8s-operator/proxyclass.go | 4 + cmd/k8s-operator/proxyclass_test.go | 53 +++++++++++ cmd/k8s-operator/sts.go | 90 +++++++++++++++--- cmd/k8s-operator/sts_test.go | 74 ++++++++++++--- k8s-operator/api.md | 19 +++- .../apis/v1alpha1/types_proxyclass.go | 27 +++++- .../apis/v1alpha1/zz_generated.deepcopy.go | 20 ++++ 14 files changed, 472 insertions(+), 34 deletions(-) create mode 100644 cmd/containerboot/metrics.go diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go index fb7fccd96..12e7ee9f8 100644 --- a/cmd/containerboot/healthz.go +++ b/cmd/containerboot/healthz.go @@ -39,7 +39,7 @@ func runHealthz(addr string, h *healthz) { log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err) } mux := http.NewServeMux() - mux.Handle("/healthz", h) + mux.Handle("GET /healthz", h) log.Printf("Running healthcheck endpoint at %s/healthz", addr) hs := &http.Server{Handler: mux} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 17131faae..313e8deb0 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -178,6 +178,14 @@ func main() { } defer killTailscaled() + if cfg.LocalAddrPort != "" && cfg.MetricsEnabled { + m := &metrics{ + lc: client, + debugEndpoint: cfg.DebugAddrPort, + } + runMetrics(cfg.LocalAddrPort, m) + } + if cfg.EnableForwardingOptimizations { if err := client.SetUDPGROForwarding(bootCtx); err != nil { log.Printf("[unexpected] error enabling UDP GRO forwarding: %v", err) diff --git a/cmd/containerboot/metrics.go b/cmd/containerboot/metrics.go new file mode 100644 index 000000000..e88406f97 --- /dev/null +++ b/cmd/containerboot/metrics.go @@ -0,0 +1,91 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "fmt" + "io" + "log" + "net" + "net/http" + + "tailscale.com/client/tailscale" + "tailscale.com/client/tailscale/apitype" +) + +// metrics is a simple metrics HTTP server, if enabled it forwards requests to +// the tailscaled's LocalAPI usermetrics endpoint at /localapi/v0/usermetrics. +type metrics struct { + debugEndpoint string + lc *tailscale.LocalClient +} + +func proxy(w http.ResponseWriter, r *http.Request, url string, do func(*http.Request) (*http.Response, error)) { + req, err := http.NewRequestWithContext(r.Context(), r.Method, url, r.Body) + if err != nil { + http.Error(w, fmt.Sprintf("failed to construct request: %s", err), http.StatusInternalServerError) + return + } + req.Header = r.Header.Clone() + + resp, err := do(req) + if err != nil { + http.Error(w, fmt.Sprintf("failed to proxy request: %s", err), http.StatusInternalServerError) + return + } + defer resp.Body.Close() + + w.WriteHeader(resp.StatusCode) + for key, val := range resp.Header { + for _, v := range val { + w.Header().Add(key, v) + } + } + if _, err := io.Copy(w, resp.Body); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func (m *metrics) handleMetrics(w http.ResponseWriter, r *http.Request) { + localAPIURL := "http://" + apitype.LocalAPIHost + "/localapi/v0/usermetrics" + proxy(w, r, localAPIURL, m.lc.DoLocalRequest) +} + +func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { + if m.debugEndpoint == "" { + http.Error(w, "debug endpoint not configured", http.StatusNotFound) + return + } + + debugURL := "http://" + m.debugEndpoint + r.URL.Path + proxy(w, r, debugURL, http.DefaultClient.Do) +} + +// runMetrics runs a simple HTTP metrics endpoint at /metrics, forwarding +// requests to tailscaled's /localapi/v0/usermetrics API. +// +// In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug +// endpoint if configured to ease migration for a breaking change serving user +// metrics instead of debug metrics on the "metrics" port. +func runMetrics(addr string, m *metrics) { + ln, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("error listening on the provided metrics endpoint address %q: %v", addr, err) + } + + mux := http.NewServeMux() + mux.HandleFunc("GET /metrics", m.handleMetrics) + mux.HandleFunc("/debug/", m.handleDebug) // TODO(tomhjp): Remove for 1.82.0 release. + + log.Printf("Running metrics endpoint at %s/metrics", addr) + ms := &http.Server{Handler: mux} + + go func() { + if err := ms.Serve(ln); err != nil { + log.Fatalf("failed running metrics endpoint: %v", err) + } + }() +} diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 742713e77..c877682b9 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -67,11 +67,18 @@ type settings struct { PodIP string PodIPv4 string PodIPv6 string - HealthCheckAddrPort string + HealthCheckAddrPort string // TODO(tomhjp): use the local addr/port instead. + LocalAddrPort string + MetricsEnabled bool + DebugAddrPort string EgressSvcsCfgPath string } func configFromEnv() (*settings, error) { + defaultLocalAddrPort := "" + if v, ok := os.LookupEnv("POD_IP"); ok && v != "" { + defaultLocalAddrPort = fmt.Sprintf("%s:9002", v) + } cfg := &settings{ AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), Hostname: defaultEnv("TS_HOSTNAME", ""), @@ -98,6 +105,9 @@ func configFromEnv() (*settings, error) { PodIP: defaultEnv("POD_IP", ""), EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), + LocalAddrPort: defaultEnv("TS_LOCAL_ADDR_PORT", defaultLocalAddrPort), + MetricsEnabled: defaultBool("TS_METRICS_ENABLED", false), + DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""), EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), } podIPs, ok := os.LookupEnv("POD_IPS") @@ -175,6 +185,16 @@ func (s *settings) validate() error { return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) } } + if s.LocalAddrPort != "" { + if _, err := netip.ParseAddrPort(s.LocalAddrPort); err != nil { + return fmt.Errorf("error parsing TS_LOCAL_ADDR_PORT value %q: %w", s.LocalAddrPort, err) + } + } + if s.DebugAddrPort != "" { + if _, err := netip.ParseAddrPort(s.DebugAddrPort); err != nil { + return fmt.Errorf("error parsing TS_DEBUG_ADDR_PORT value %q: %w", s.DebugAddrPort, err) + } + } return nil } diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 53fb7e703..d8da49b03 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -90,6 +90,12 @@ func tailscaledArgs(cfg *settings) []string { if cfg.TailscaledConfigFilePath != "" { args = append(args, "--config="+cfg.TailscaledConfigFilePath) } + // Once enough proxy versions have been released for all the supported + // versions to understand this cfg setting, the operator can stop + // setting TS_TAILSCALED_EXTRA_ARGS for the debug flag. + if cfg.DebugAddrPort != "" && !strings.Contains(cfg.DaemonExtraArgs, cfg.DebugAddrPort) { + args = append(args, "--debug="+cfg.DebugAddrPort) + } if cfg.DaemonExtraArgs != "" { args = append(args, strings.Fields(cfg.DaemonExtraArgs)...) } diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 7086138c0..4c24a1633 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -73,7 +73,12 @@ spec: enable: description: |- Setting enable to true will make the proxy serve Tailscale metrics - at :9001/debug/metrics. + at :9002/metrics. + + In 1.78.x and 1.80.x, this field also serves as the default value for + .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both + fields will independently default to false. + Defaults to false. type: boolean statefulSet: @@ -1249,6 +1254,25 @@ spec: description: Configuration for the proxy container running tailscale. type: object properties: + debug: + description: |- + Configuration for enabling extra debug information in the container. + Not recommended for production use. + type: object + properties: + enable: + description: |- + Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/ + and internal debug metrics endpoint at :9001/debug/metrics, where + 9001 is a container port named "debug". The endpoints and their responses + may change in backwards incompatible ways in the future, and should not + be considered stable. + + In 1.78.x and 1.80.x, this setting will default to the value of + .spec.metrics.enable, and requests to the "metrics" port matching the + mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x, + this setting will default to false, and no requests will be proxied. + type: boolean env: description: |- List of environment variables to set in the container. @@ -1553,6 +1577,25 @@ spec: description: Configuration for the proxy init container that enables forwarding. type: object properties: + debug: + description: |- + Configuration for enabling extra debug information in the container. + Not recommended for production use. + type: object + properties: + enable: + description: |- + Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/ + and internal debug metrics endpoint at :9001/debug/metrics, where + 9001 is a container port named "debug". The endpoints and their responses + may change in backwards incompatible ways in the future, and should not + be considered stable. + + In 1.78.x and 1.80.x, this setting will default to the value of + .spec.metrics.enable, and requests to the "metrics" port matching the + mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x, + this setting will default to false, and no requests will be proxied. + type: boolean env: description: |- List of environment variables to set in the container. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 4035afaba..f764fc09a 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -540,7 +540,12 @@ spec: enable: description: |- Setting enable to true will make the proxy serve Tailscale metrics - at :9001/debug/metrics. + at :9002/metrics. + + In 1.78.x and 1.80.x, this field also serves as the default value for + .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both + fields will independently default to false. + Defaults to false. type: boolean required: @@ -1716,6 +1721,25 @@ spec: tailscaleContainer: description: Configuration for the proxy container running tailscale. properties: + debug: + description: |- + Configuration for enabling extra debug information in the container. + Not recommended for production use. + properties: + enable: + description: |- + Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/ + and internal debug metrics endpoint at :9001/debug/metrics, where + 9001 is a container port named "debug". The endpoints and their responses + may change in backwards incompatible ways in the future, and should not + be considered stable. + + In 1.78.x and 1.80.x, this setting will default to the value of + .spec.metrics.enable, and requests to the "metrics" port matching the + mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x, + this setting will default to false, and no requests will be proxied. + type: boolean + type: object env: description: |- List of environment variables to set in the container. @@ -2020,6 +2044,25 @@ spec: tailscaleInitContainer: description: Configuration for the proxy init container that enables forwarding. properties: + debug: + description: |- + Configuration for enabling extra debug information in the container. + Not recommended for production use. + properties: + enable: + description: |- + Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/ + and internal debug metrics endpoint at :9001/debug/metrics, where + 9001 is a container port named "debug". The endpoints and their responses + may change in backwards incompatible ways in the future, and should not + be considered stable. + + In 1.78.x and 1.80.x, this setting will default to the value of + .spec.metrics.enable, and requests to the "metrics" port matching the + mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x, + this setting will default to false, and no requests will be proxied. + type: boolean + type: object env: description: |- List of environment variables to set in the container. diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index 882a9030f..13f217f3c 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -160,6 +160,10 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel violations = append(violations, field.TypeInvalid(field.NewPath("spec", "statefulSet", "pod", "tailscaleInitContainer", "image"), tc.Image, err.Error())) } } + + if tc.Debug != nil { + violations = append(violations, field.TypeInvalid(field.NewPath("spec", "statefulSet", "pod", "tailscaleInitContainer", "debug"), tc.Debug, "debug settings cannot be configured on the init container")) + } } } } diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index eb68811fc..fb17f5fe5 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -135,3 +135,56 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") expectEvents(t, fr, expectedEvents) } + +func TestValidateProxyClass(t *testing.T) { + for name, tc := range map[string]struct { + pc *tsapi.ProxyClass + valid bool + }{ + "empty": { + valid: true, + pc: &tsapi.ProxyClass{}, + }, + "debug_enabled_for_main_container": { + valid: true, + pc: &tsapi.ProxyClass{ + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &tsapi.Container{ + Debug: &tsapi.Debug{ + Enable: true, + }, + }, + }, + }, + }, + }, + }, + "debug_enabled_for_init_container": { + valid: false, + pc: &tsapi.ProxyClass{ + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleInitContainer: &tsapi.Container{ + Debug: &tsapi.Debug{ + Enable: true, + }, + }, + }, + }, + }, + }, + }, + } { + t.Run(name, func(t *testing.T) { + pcr := &ProxyClassReconciler{} + err := pcr.validate(tc.pc) + valid := err == nil + if valid != tc.valid { + t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) + } + }) + } +} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index bdacec39b..5df476478 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -476,7 +476,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, configs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, _ map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -666,24 +666,42 @@ func mergeStatefulSetLabelsOrAnnots(current, custom map[string]string, managed [ return custom } +func debugSetting(pc *tsapi.ProxyClass) bool { + if pc == nil || + pc.Spec.StatefulSet == nil || + pc.Spec.StatefulSet.Pod == nil || + pc.Spec.StatefulSet.Pod.TailscaleContainer == nil || + pc.Spec.StatefulSet.Pod.TailscaleContainer.Debug == nil { + // This default will change to false in 1.82.0. + return pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable + } + + return pc.Spec.StatefulSet.Pod.TailscaleContainer.Debug.Enable +} + func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, stsCfg *tailscaleSTSConfig, logger *zap.SugaredLogger) *appsv1.StatefulSet { if pc == nil || ss == nil { return ss } - if stsCfg != nil && pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable { - if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy { - enableMetrics(ss) - } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy { + + metricsEnabled := pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable + debugEnabled := debugSetting(pc) + if metricsEnabled || debugEnabled { + isEgress := stsCfg != nil && (stsCfg.TailnetTargetFQDN != "" || stsCfg.TailnetTargetIP != "") + isForwardingL7Ingress := stsCfg != nil && stsCfg.ForwardClusterTrafficViaL7IngressProxy + if isEgress { // TODO (irbekrm): fix this // For Ingress proxies that have been configured with // tailscale.com/experimental-forward-cluster-traffic-via-ingress // annotation, all cluster traffic is forwarded to the // Ingress backend(s). - logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.") - } else { + logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for egress proxies.") + } else if isForwardingL7Ingress { // TODO (irbekrm): fix this // For egress proxies, currently all cluster traffic is forwarded to the tailnet target. logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.") + } else { + enableEndpoints(ss, metricsEnabled, debugEnabled) } } @@ -761,16 +779,58 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, return ss } -func enableMetrics(ss *appsv1.StatefulSet) { +func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { for i, c := range ss.Spec.Template.Spec.Containers { if c.Name == "tailscale" { - // Serve metrics on on :9001/debug/metrics. If - // we didn't specify Pod IP here, the proxy would, in - // some cases, also listen to its Tailscale IP- we don't - // want folks to start relying on this side-effect as a - // feature. - ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(POD_IP):9001"}) - ss.Spec.Template.Spec.Containers[i].Ports = append(ss.Spec.Template.Spec.Containers[i].Ports, corev1.ContainerPort{Name: "metrics", Protocol: "TCP", HostPort: 9001, ContainerPort: 9001}) + if debug { + ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, + // Serve tailscaled's debug metrics on on + // :9001/debug/metrics. If we didn't specify Pod IP + // here, the proxy would, in some cases, also listen to its + // Tailscale IP- we don't want folks to start relying on this + // side-effect as a feature. + corev1.EnvVar{ + Name: "TS_DEBUG_ADDR_PORT", + Value: "$(POD_IP):9001", + }, + // TODO(tomhjp): Can remove this env var once 1.76.x is no + // longer supported. + corev1.EnvVar{ + Name: "TS_TAILSCALED_EXTRA_ARGS", + Value: "--debug=$(TS_DEBUG_ADDR_PORT)", + }, + ) + + ss.Spec.Template.Spec.Containers[i].Ports = append(ss.Spec.Template.Spec.Containers[i].Ports, + corev1.ContainerPort{ + Name: "debug", + Protocol: "TCP", + ContainerPort: 9001, + }, + ) + } + + if metrics { + ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, + // Serve client metrics on :9002/metrics. + corev1.EnvVar{ + Name: "TS_LOCAL_ADDR_PORT", + Value: "$(POD_IP):9002", + }, + corev1.EnvVar{ + Name: "TS_METRICS_ENABLED", + Value: "true", + }, + ) + ss.Spec.Template.Spec.Containers[i].Ports = append(ss.Spec.Template.Spec.Containers[i].Ports, + corev1.ContainerPort{ + Name: "metrics", + Protocol: "TCP", + ContainerPort: 9002, + }, + ) + } + break } } diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 7263c56c3..7986d1b91 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -125,10 +125,26 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, }, } - proxyClassMetrics := &tsapi.ProxyClass{ - Spec: tsapi.ProxyClassSpec{ - Metrics: &tsapi.Metrics{Enable: true}, - }, + + proxyClassWithMetricsDebug := func(metrics bool, debug *bool) *tsapi.ProxyClass { + return &tsapi.ProxyClass{ + Spec: tsapi.ProxyClassSpec{ + Metrics: &tsapi.Metrics{Enable: metrics}, + StatefulSet: func() *tsapi.StatefulSet { + if debug == nil { + return nil + } + + return &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &tsapi.Container{ + Debug: &tsapi.Debug{Enable: *debug}, + }, + }, + } + }(), + }, + } } var userspaceProxySS, nonUserspaceProxySS appsv1.StatefulSet @@ -184,7 +200,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { - t.Fatalf("Unexpected result applying ProxyClass with all fields set to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff) + t.Errorf("Unexpected result applying ProxyClass with all fields set to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff) } // 2. Test that a ProxyClass with custom labels and annotations for @@ -197,7 +213,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Annotations = proxyClassJustLabels.Spec.StatefulSet.Pod.Annotations gotSS = applyProxyClassToStatefulSet(proxyClassJustLabels, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { - t.Fatalf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff) + t.Errorf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff) } // 3. Test that a ProxyClass with all fields set gets correctly applied @@ -221,7 +237,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { - t.Fatalf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) + t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) } // 4. Test that a ProxyClass with custom labels and annotations gets correctly applied @@ -233,16 +249,48 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Annotations = proxyClassJustLabels.Spec.StatefulSet.Pod.Annotations gotSS = applyProxyClassToStatefulSet(proxyClassJustLabels, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { - t.Fatalf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) + t.Errorf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) + } + + // 5. Metrics enabled defaults to enabling both metrics and debug. + wantSS = nonUserspaceProxySS.DeepCopy() + wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{Name: "TS_DEBUG_ADDR_PORT", Value: "$(POD_IP):9001"}, + corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"}, + corev1.EnvVar{Name: "TS_LOCAL_ADDR_PORT", Value: "$(POD_IP):9002"}, + corev1.EnvVar{Name: "TS_METRICS_ENABLED", Value: "true"}, + ) + wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{ + {Name: "debug", Protocol: "TCP", ContainerPort: 9001}, + {Name: "metrics", Protocol: "TCP", ContainerPort: 9002}, + } + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, nil), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + if diff := cmp.Diff(gotSS, wantSS); diff != "" { + t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) + } + + // 6. Enable _just_ metrics by explicitly disabling debug. + wantSS = nonUserspaceProxySS.DeepCopy() + wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{Name: "TS_LOCAL_ADDR_PORT", Value: "$(POD_IP):9002"}, + corev1.EnvVar{Name: "TS_METRICS_ENABLED", Value: "true"}, + ) + wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9002}} + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, ptr.To(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + if diff := cmp.Diff(gotSS, wantSS); diff != "" { + t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } - // 5. Test that a ProxyClass with metrics enabled gets correctly applied to a StatefulSet. + // 7. Enable _just_ debug without metrics. wantSS = nonUserspaceProxySS.DeepCopy() - wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(POD_IP):9001"}) - wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9001, HostPort: 9001}} - gotSS = applyProxyClassToStatefulSet(proxyClassMetrics, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{Name: "TS_DEBUG_ADDR_PORT", Value: "$(POD_IP):9001"}, + corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"}, + ) + wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "debug", Protocol: "TCP", ContainerPort: 9001}} + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(false, ptr.To(true)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { - t.Fatalf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) + t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 7b1aca314..640d8fb07 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -146,6 +146,7 @@ _Appears in:_ | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
| | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
By default Tailscale Kubernetes operator does not apply any resource
requirements. The amount of resources required wil depend on the
amount of resources the operator needs to parse, usage patterns and
cluster size.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context by the operator.
By default the operator:
- sets 'privileged: true' for the init container
- set NET_ADMIN capability for tailscale container for proxies that
are created for Services or Connector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | +| `debug` _[Debug](#debug)_ | Configuration for enabling extra debug information in the container.
Not recommended for production use. | | | #### DNSConfig @@ -248,6 +249,22 @@ _Appears in:_ | `nameserver` _[NameserverStatus](#nameserverstatus)_ | Nameserver describes the status of nameserver cluster resources. | | | +#### Debug + + + + + + + +_Appears in:_ +- [Container](#container) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `enable` _boolean_ | Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
and internal debug metrics endpoint at :9001/debug/metrics, where
9001 is a container port named "debug". The endpoints and their responses
may change in backwards incompatible ways in the future, and should not
be considered stable.
In 1.78.x and 1.80.x, this setting will default to the value of
.spec.metrics.enable, and requests to the "metrics" port matching the
mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
this setting will default to false, and no requests will be proxied. | | | + + #### Env @@ -309,7 +326,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
at :9001/debug/metrics.
Defaults to false. | | | +| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
at :9002/metrics.
In 1.78.x and 1.80.x, this field also serves as the default value for
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
fields will independently default to false.
Defaults to false. | | | #### Name diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 0a224b796..7e408cd0a 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -163,7 +163,12 @@ type Pod struct { type Metrics struct { // Setting enable to true will make the proxy serve Tailscale metrics - // at :9001/debug/metrics. + // at :9002/metrics. + // + // In 1.78.x and 1.80.x, this field also serves as the default value for + // .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both + // fields will independently default to false. + // // Defaults to false. Enable bool `json:"enable"` } @@ -209,6 +214,26 @@ type Container struct { // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context // +optional SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + // Configuration for enabling extra debug information in the container. + // Not recommended for production use. + // +optional + Debug *Debug `json:"debug,omitempty"` +} + +type Debug struct { + // Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/ + // and internal debug metrics endpoint at :9001/debug/metrics, where + // 9001 is a container port named "debug". The endpoints and their responses + // may change in backwards incompatible ways in the future, and should not + // be considered stable. + // + // In 1.78.x and 1.80.x, this setting will default to the value of + // .spec.metrics.enable, and requests to the "metrics" port matching the + // mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x, + // this setting will default to false, and no requests will be proxied. + // + // +optional + Enable bool `json:"enable"` } type Env struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index c2f69dc04..07e46f3f5 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -163,6 +163,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(corev1.SecurityContext) (*in).DeepCopyInto(*out) } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(Debug) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. @@ -281,6 +286,21 @@ func (in *DNSConfigStatus) DeepCopy() *DNSConfigStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Debug) DeepCopyInto(out *Debug) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Debug. +func (in *Debug) DeepCopy() *Debug { + if in == nil { + return nil + } + out := new(Debug) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Env) DeepCopyInto(out *Env) { *out = *in From 462e1fc503fa2c26d8ff1a70a641ebb835ac9f8f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 08:25:54 -0600 Subject: [PATCH 0168/1708] ipn/{ipnlocal,localapi}, wgengine/netstack: call (*LocalBackend).Shutdown when tests that create them complete We have several places where LocalBackend instances are created for testing, but they are rarely shut down when the tests that created them exit. In this PR, we update newTestLocalBackend and similar functions to use testing.TB.Cleanup(lb.Shutdown) to ensure LocalBackend instances are properly shut down during test cleanup. Updates #12687 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local_test.go | 2 ++ ipn/ipnlocal/state_test.go | 3 +++ ipn/localapi/localapi_test.go | 1 + wgengine/netstack/netstack_test.go | 2 ++ 4 files changed, 8 insertions(+) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6d25a418f..f30ff6adb 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -458,6 +458,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { if err != nil { t.Fatalf("NewLocalBackend: %v", err) } + t.Cleanup(lb.Shutdown) return lb } @@ -4109,6 +4110,7 @@ func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl if err != nil { t.Fatalf("NewLocalBackend: %v", err) } + t.Cleanup(b.Shutdown) b.DisablePortMapperForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index bebd0152b..ef4b0ed62 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -309,6 +309,7 @@ func TestStateMachine(t *testing.T) { if err != nil { t.Fatalf("NewLocalBackend: %v", err) } + t.Cleanup(b.Shutdown) b.DisablePortMapperForTest() var cc, previousCC *mockControl @@ -942,6 +943,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { if err != nil { t.Fatalf("NewLocalBackend: %v", err) } + t.Cleanup(b.Shutdown) b.hostinfo = &tailcfg.Hostinfo{OS: "testos"} b.pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ @@ -1023,6 +1025,7 @@ func TestWGEngineStatusRace(t *testing.T) { sys.Set(eng) b, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) c.Assert(err, qt.IsNil) + t.Cleanup(b.Shutdown) var cc *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index d89c46261..145910830 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -349,6 +349,7 @@ func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { if err != nil { t.Fatalf("NewLocalBackend: %v", err) } + t.Cleanup(lb.Shutdown) return lb } diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index a46dcf9dd..823acee91 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -64,6 +64,7 @@ func TestInjectInboundLeak(t *testing.T) { if err != nil { t.Fatal(err) } + t.Cleanup(lb.Shutdown) ns, err := Create(logf, tunWrap, eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper()) if err != nil { @@ -126,6 +127,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { if err != nil { tb.Fatalf("NewLocalBackend: %v", err) } + tb.Cleanup(lb.Shutdown) ns.atomicIsLocalIPFunc.Store(func(netip.Addr) bool { return true }) if config != nil { From 8e5cfbe4ab11713e383b3ff0d978f116320de2a3 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 09:05:01 -0600 Subject: [PATCH 0169/1708] util/syspolicy/rsop: reduce policyReloadMinDelay and policyReloadMaxDelay when in tests These delays determine how soon syspolicy change callbacks are invoked after a policy setting is updated in a policy source. For tests, we shorten these delays to minimize unnecessary wait times. This adjustment only affects tests that subscribe to policy change notifications and modify policy settings after they have already been set. Initial policy settings are always available immediately without delay. Updates #12687 Signed-off-by: Nick Khyl --- util/syspolicy/rsop/resultant_policy.go | 7 +++++++ util/syspolicy/rsop/resultant_policy_test.go | 13 ++++--------- util/syspolicy/rsop/store_registration.go | 4 ++++ 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go index 019b8f602..b811a00ee 100644 --- a/util/syspolicy/rsop/resultant_policy.go +++ b/util/syspolicy/rsop/resultant_policy.go @@ -11,6 +11,7 @@ import ( "sync/atomic" "time" + "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" @@ -447,3 +448,9 @@ func (p *Policy) Close() { go p.closeInternal() } } + +func setForTest[T any](tb internal.TB, target *T, newValue T) { + oldValue := *target + tb.Cleanup(func() { *target = oldValue }) + *target = newValue +} diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index b2408c7f7..e4bfb1a88 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -574,9 +574,6 @@ func TestPolicyChangeHasChanged(t *testing.T) { } func TestChangePolicySetting(t *testing.T) { - setForTest(t, &policyReloadMinDelay, 100*time.Millisecond) - setForTest(t, &policyReloadMaxDelay, 500*time.Millisecond) - // Register policy settings used in this test. settingA := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue) settingB := setting.NewDefinition("TestSettingB", setting.DeviceSetting, setting.StringValue) @@ -589,6 +586,10 @@ func TestChangePolicySetting(t *testing.T) { if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil { t.Fatalf("Failed to register policy store: %v", err) } + + setForTest(t, &policyReloadMinDelay, 100*time.Millisecond) + setForTest(t, &policyReloadMaxDelay, 500*time.Millisecond) + policy, err := policyForTest(t, setting.DeviceScope) if err != nil { t.Fatalf("Failed to get effective policy: %v", err) @@ -978,9 +979,3 @@ func policyForTest(tb testing.TB, target setting.PolicyScope) (*Policy, error) { }) return policy, nil } - -func setForTest[T any](tb testing.TB, target *T, newValue T) { - oldValue := *target - tb.Cleanup(func() { *target = oldValue }) - *target = newValue -} diff --git a/util/syspolicy/rsop/store_registration.go b/util/syspolicy/rsop/store_registration.go index 09c83e988..f9836846e 100644 --- a/util/syspolicy/rsop/store_registration.go +++ b/util/syspolicy/rsop/store_registration.go @@ -7,6 +7,7 @@ import ( "errors" "sync" "sync/atomic" + "time" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/setting" @@ -33,6 +34,9 @@ func RegisterStore(name string, scope setting.PolicyScope, store source.Store) ( // RegisterStoreForTest is like [RegisterStore], but unregisters the store when // tb and all its subtests complete. func RegisterStoreForTest(tb internal.TB, name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) { + setForTest(tb, &policyReloadMinDelay, 10*time.Millisecond) + setForTest(tb, &policyReloadMaxDelay, 500*time.Millisecond) + reg, err := RegisterStore(name, scope, store) if err == nil { tb.Cleanup(func() { From 50bf32a0ba13935273e200d52b9327821f25efc5 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 09:35:55 -0600 Subject: [PATCH 0170/1708] cmd/tailscaled: flush DNS if FlushDNSOnSessionUnlock is true upon receiving a session change notification In this PR, we move the syspolicy.FlushDNSOnSessionUnlock check from service startup to when a session change notification is received. This ensures that the most recent policy setting value is used if it has changed since the service started. We also plan to handle session change notifications for unrelated reasons and need to decouple notification subscriptions from DNS anyway. Updates #12687 Updates tailscale/corp#18342 Signed-off-by: Nick Khyl --- cmd/tailscaled/tailscaled_windows.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 35c878f38..67f974465 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -160,10 +160,7 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch changes <- svc.Status{State: svc.StartPending} syslogf("Service start pending") - svcAccepts := svc.AcceptStop - if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { - svcAccepts |= svc.AcceptSessionChange - } + svcAccepts := svc.AcceptStop | svc.AcceptSessionChange ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -371,13 +368,15 @@ func handleSessionChange(chgRequest svc.ChangeRequest) { return } - log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") - go func() { - err := dns.Flush() - if err != nil { - log.Printf("Error flushing DNS on session unlock: %v", err) - } - }() + if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { + log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") + go func() { + err := dns.Flush() + if err != nil { + log.Printf("Error flushing DNS on session unlock: %v", err) + } + }() + } } var ( From 7c8f663d7059467353d9cd0fdae7b83bb1d4b998 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 09:52:08 -0600 Subject: [PATCH 0171/1708] cmd/tailscaled: log SCM interactions if the policy setting is enabled at the time of interaction This updates the syspolicy.LogSCMInteractions check to run at the time of an interaction, just before logging a message, instead of during service startup. This ensures the most recent policy setting is used if it has changed since the service started. Updates #12687 Signed-off-by: Nick Khyl --- cmd/tailscaled/tailscaled_windows.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 67f974465..786c5d833 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -134,14 +134,13 @@ func runWindowsService(pol *logpolicy.Policy) error { logger.Logf(log.Printf).JSON(1, "SupportInfo", osdiag.SupportInfo(osdiag.LogSupportInfoReasonStartup)) }() - if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions { - syslog, err := eventlog.Open(serviceName) - if err == nil { - syslogf = func(format string, args ...any) { + if syslog, err := eventlog.Open(serviceName); err == nil { + syslogf = func(format string, args ...any) { + if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions { syslog.Info(0, fmt.Sprintf(format, args...)) } - defer syslog.Close() } + defer syslog.Close() } syslogf("Service entering svc.Run") From 2ab66d9698cc77f27598f3642be4159c36231c65 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 21 Nov 2024 19:29:20 -0600 Subject: [PATCH 0172/1708] ipn/ipnlocal: move syspolicy handling from setExitNodeID to applySysPolicy This moves code that handles ExitNodeID/ExitNodeIP syspolicy settings from (*LocalBackend).setExitNodeID to applySysPolicy. Updates #12687 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 71 ++++++++++++++++++++------------------ ipn/ipnlocal/local_test.go | 30 ++++++++++------ 2 files changed, 56 insertions(+), 45 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index cbbea32aa..7c0ddc90c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1489,10 +1489,10 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.logf("SetControlClientStatus failed to select auto exit node: %v", err) } } - if setExitNodeID(prefs, curNetMap, b.lastSuggestedExitNode) { + if applySysPolicy(prefs, b.lastSuggestedExitNode) { prefsChanged = true } - if applySysPolicy(prefs) { + if setExitNodeID(prefs, curNetMap) { prefsChanged = true } @@ -1658,12 +1658,37 @@ var preferencePolicies = []preferencePolicyInfo{ // applySysPolicy overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs) (anyChange bool) { +func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID) (anyChange bool) { if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true } + if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { + exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) + if shouldAutoExitNode() && lastSuggestedExitNode != "" { + exitNodeID = lastSuggestedExitNode + } + // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "", + // then exitNodeID is now "auto" which will never match a peer's node ID. + // When there is no a peer matching the node ID, traffic will blackhole, + // preventing accidental non-exit-node usage when a policy is in effect that requires an exit node. + if prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid() { + anyChange = true + } + prefs.ExitNodeID = exitNodeID + prefs.ExitNodeIP = netip.Addr{} + } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { + exitNodeIP, err := netip.ParseAddr(exitNodeIPStr) + if exitNodeIP.IsValid() && err == nil { + if prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP { + anyChange = true + } + prefs.ExitNodeID = "" + prefs.ExitNodeIP = exitNodeIP + } + } + for _, opt := range preferencePolicies { if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { curVal := opt.get(prefs.View()) @@ -1770,30 +1795,7 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand // setExitNodeID updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. -func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNode tailcfg.StableNodeID) (prefsChanged bool) { - if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { - exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) - if shouldAutoExitNode() && lastSuggestedExitNode != "" { - exitNodeID = lastSuggestedExitNode - } - // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "", then exitNodeID is now "auto" which will never match a peer's node ID. - // When there is no a peer matching the node ID, traffic will blackhole, preventing accidental non-exit-node usage when a policy is in effect that requires an exit node. - changed := prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid() - prefs.ExitNodeID = exitNodeID - prefs.ExitNodeIP = netip.Addr{} - return changed - } - - oldExitNodeID := prefs.ExitNodeID - if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { - exitNodeIP, err := netip.ParseAddr(exitNodeIPStr) - if exitNodeIP.IsValid() && err == nil { - prefsChanged = prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP - prefs.ExitNodeID = "" - prefs.ExitNodeIP = exitNodeIP - } - } - +func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { if nm == nil { // No netmap, can't resolve anything. return false @@ -1811,6 +1813,7 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNod prefsChanged = true } + oldExitNodeID := prefs.ExitNodeID for _, peer := range nm.Peers { for _, addr := range peer.Addresses().All() { if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP { @@ -1820,7 +1823,7 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNod // reference it directly for next time. prefs.ExitNodeID = peer.StableID() prefs.ExitNodeIP = netip.Addr{} - return oldExitNodeID != prefs.ExitNodeID + return prefsChanged || oldExitNodeID != prefs.ExitNodeID } } @@ -3844,12 +3847,12 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if oldp.Valid() { newp.Persist = oldp.Persist().AsStruct() // caller isn't allowed to override this } - // setExitNodeID returns whether it updated b.prefs, but - // everything in this function treats b.prefs as completely new - // anyway. No-op if no exit node resolution is needed. - setExitNodeID(newp, netMap, b.lastSuggestedExitNode) - // applySysPolicy does likewise so we can also ignore its return value. - applySysPolicy(newp) + // applySysPolicyToPrefsLocked returns whether it updated newp, + // but everything in this function treats b.prefs as completely new + // anyway, so its return value can be ignored here. + applySysPolicy(newp, b.lastSuggestedExitNode) + // setExitNodeID does likewise. No-op if no exit node resolution is needed. + setExitNodeID(newp, netMap) // We do this to avoid holding the lock while doing everything else. oldHi := b.hostinfo diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f30ff6adb..c5bd51265 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1789,10 +1789,13 @@ func TestSetExitNodeIDPolicy(t *testing.T) { t.Run(test.name, func(t *testing.T) { b := newTestBackend(t) - policyStore := source.NewTestStoreOf(t, - source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID), - source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP), - ) + policyStore := source.NewTestStore(t) + if test.exitNodeIDKey { + policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID)) + } + if test.exitNodeIPKey { + policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP)) + } syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) if test.nm == nil { @@ -1806,7 +1809,16 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.netMap = test.nm b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode - changed := setExitNodeID(b.pm.prefs.AsStruct(), test.nm, tailcfg.StableNodeID(test.lastSuggestedExitNode)) + + prefs := b.pm.prefs.AsStruct() + if changed := applySysPolicy(prefs, test.lastSuggestedExitNode) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { + t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) + } + + // Both [LocalBackend.SetPrefsForTest] and [LocalBackend.EditPrefs] + // apply syspolicy settings to the current profile's preferences. Therefore, + // we pass the current, unmodified preferences and expect the effective + // preferences to change. b.SetPrefsForTest(pm.CurrentPrefs().AsStruct()) if got := b.pm.prefs.ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { @@ -1819,10 +1831,6 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } else if got.String() != test.exitNodeIPWant { t.Errorf("got %v want %v", got, test.exitNodeIPWant) } - - if changed != test.prefsChanged { - t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) - } }) } } @@ -2332,7 +2340,7 @@ func TestApplySysPolicy(t *testing.T) { t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs) + gotAnyChange := applySysPolicy(prefs, "") if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -2480,7 +2488,7 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs) + gotAnyChange := applySysPolicy(prefs, "") if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) From eb3cd3291106dc603316e4df65ad85cc0d3b3e6b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 08:45:53 -0600 Subject: [PATCH 0173/1708] ipn/ipnlocal: update ipn.Prefs when there's a change in syspolicy settings In this PR, we update ipnlocal.NewLocalBackend to subscribe to policy change notifications and reapply syspolicy settings to the current profile's ipn.Prefs whenever a change occurs. Updates #12687 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 102 ++++++++++++++++++++++-------- ipn/ipnlocal/local_test.go | 123 +++++++++++++++++++++++++++++++++++++ 2 files changed, 199 insertions(+), 26 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7c0ddc90c..8763581f1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -106,6 +106,7 @@ import ( "tailscale.com/util/rands" "tailscale.com/util/set" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/uniq" @@ -178,27 +179,28 @@ type watchSession struct { // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by Close - ctxCancel context.CancelFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System - health *health.Tracker // always non-nil - metrics metrics - e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys - store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys - dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys - pushDeviceToken syncs.AtomicValue[string] - backendLogID logid.PublicID - unregisterNetMon func() - unregisterHealthWatch func() - portpoll *portlist.Poller // may be nil - portpollOnce sync.Once // guards starting readPoller - varRoot string // or empty if SetVarRoot never called - logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil - sshAtomicBool atomic.Bool + ctx context.Context // canceled by Close + ctxCancel context.CancelFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + health *health.Tracker // always non-nil + metrics metrics + e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys + store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys + dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys + pushDeviceToken syncs.AtomicValue[string] + backendLogID logid.PublicID + unregisterNetMon func() + unregisterHealthWatch func() + unregisterSysPolicyWatch func() + portpoll *portlist.Poller // may be nil + portpollOnce sync.Once // guards starting readPoller + varRoot string // or empty if SetVarRoot never called + logFlushFunc func() // or nil if SetLogFlusher wasn't called + em *expiryManager // non-nil + sshAtomicBool atomic.Bool // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. webClientAtomicBool atomic.Bool @@ -410,7 +412,7 @@ type clientGen func(controlclient.Options) (controlclient.Client, error) // but is not actually running. // // If dialer is nil, a new one is made. -func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (*LocalBackend, error) { +func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (_ *LocalBackend, err error) { e := sys.Engine.Get() store := sys.StateStore.Get() dialer := sys.Dialer.Get() @@ -485,6 +487,15 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } + if b.unregisterSysPolicyWatch, err = b.registerSysPolicyWatch(); err != nil { + return nil, err + } + defer func() { + if err != nil { + b.unregisterSysPolicyWatch() + } + }() + netMon := sys.NetMon.Get() b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker()) if err != nil { @@ -981,6 +992,7 @@ func (b *LocalBackend) Shutdown() { b.unregisterNetMon() b.unregisterHealthWatch() + b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() } @@ -1703,6 +1715,40 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID return anyChange } +// registerSysPolicyWatch subscribes to syspolicy change notifications +// and immediately applies the effective syspolicy settings to the current profile. +func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { + if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil { + return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err) + } + if prefs, anyChange := b.applySysPolicy(); anyChange { + b.logf("syspolicy: changed initial profile prefs: %v", prefs.Pretty()) + } + return unregister, nil +} + +// applySysPolicy overwrites the current profile's preferences with policies +// that may be configured by the system administrator in an OS-specific way. +// +// b.mu must not be held. +func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { + unlock := b.lockAndGetUnlock() + prefs := b.pm.CurrentPrefs().AsStruct() + if !applySysPolicy(prefs, b.lastSuggestedExitNode) { + unlock.UnlockEarly() + return prefs.View(), false + } + return b.setPrefsLockedOnEntry(prefs, unlock), true +} + +// sysPolicyChanged is a callback triggered by syspolicy when it detects +// a change in one or more syspolicy settings. +func (b *LocalBackend) sysPolicyChanged(*rsop.PolicyChange) { + if prefs, anyChange := b.applySysPolicy(); anyChange { + b.logf("syspolicy: changed profile prefs: %v", prefs.Pretty()) + } +} + var _ controlclient.NetmapDeltaUpdater = (*LocalBackend)(nil) // UpdateNetmapDelta implements controlclient.NetmapDeltaUpdater. @@ -3889,10 +3935,14 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } prefs := newp.View() - if err := b.pm.SetPrefs(prefs, ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - }); err != nil { + np := b.pm.CurrentProfile().NetworkProfile + if netMap != nil { + np = ipn.NetworkProfile{ + MagicDNSName: b.netMap.MagicDNSSuffix(), + DomainName: b.netMap.DomainName(), + } + } + if err := b.pm.SetPrefs(prefs, np); err != nil { b.logf("failed to save new controlclient state: %v", err) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index c5bd51265..b1be86392 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4562,3 +4562,126 @@ func TestGetVIPServices(t *testing.T) { }) } } + +func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { + const enableLogging = false + + type fieldChange struct { + name string + want any + } + + wantPrefsChanges := func(want ...fieldChange) *wantedNotification { + return &wantedNotification{ + name: "Prefs", + cond: func(t testing.TB, actor ipnauth.Actor, n *ipn.Notify) bool { + if n.Prefs != nil { + prefs := reflect.Indirect(reflect.ValueOf(n.Prefs.AsStruct())) + for _, f := range want { + got := prefs.FieldByName(f.name).Interface() + if !reflect.DeepEqual(got, f.want) { + t.Errorf("%v: got %v; want %v", f.name, got, f.want) + } + } + } + return n.Prefs != nil + }, + } + } + + unexpectedPrefsChange := func(t testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + if n.Prefs != nil { + t.Errorf("Unexpected Prefs: %v", n.Prefs.Pretty()) + return true + } + return false + } + + tests := []struct { + name string + initialPrefs *ipn.Prefs + stringSettings []source.TestSetting[string] + want *wantedNotification + }{ + { + name: "ShieldsUp/True", + stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "never")}, + want: wantPrefsChanges(fieldChange{"ShieldsUp", true}), + }, + { + name: "ShieldsUp/False", + initialPrefs: &ipn.Prefs{ShieldsUp: true}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "always")}, + want: wantPrefsChanges(fieldChange{"ShieldsUp", false}), + }, + { + name: "ExitNodeID", + stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.ExitNodeID, "foo")}, + want: wantPrefsChanges(fieldChange{"ExitNodeID", tailcfg.StableNodeID("foo")}), + }, + { + name: "EnableRunExitNode", + stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableRunExitNode, "always")}, + want: wantPrefsChanges(fieldChange{"AdvertiseRoutes", []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}}), + }, + { + name: "Multiple", + initialPrefs: &ipn.Prefs{ + ExitNodeAllowLANAccess: true, + }, + stringSettings: []source.TestSetting[string]{ + source.TestSettingOf(syspolicy.EnableServerMode, "always"), + source.TestSettingOf(syspolicy.ExitNodeAllowLANAccess, "never"), + source.TestSettingOf(syspolicy.ExitNodeIP, "127.0.0.1"), + }, + want: wantPrefsChanges( + fieldChange{"ForceDaemon", true}, + fieldChange{"ExitNodeAllowLANAccess", false}, + fieldChange{"ExitNodeIP", netip.MustParseAddr("127.0.0.1")}, + ), + }, + { + name: "NoChange", + initialPrefs: &ipn.Prefs{ + CorpDNS: true, + ExitNodeID: "foo", + AdvertiseRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, + }, + stringSettings: []source.TestSetting[string]{ + source.TestSettingOf(syspolicy.EnableTailscaleDNS, "always"), + source.TestSettingOf(syspolicy.ExitNodeID, "foo"), + source.TestSettingOf(syspolicy.EnableRunExitNode, "always"), + }, + want: nil, // syspolicy settings match the preferences; no change notification is expected. + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + syspolicy.RegisterWellKnownSettingsForTest(t) + store := source.NewTestStoreOf[string](t) + syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store) + + lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + if tt.initialPrefs != nil { + lb.SetPrefsForTest(tt.initialPrefs) + } + if err := lb.Start(ipn.Options{}); err != nil { + t.Fatalf("(*LocalBackend).Start(): %v", err) + } + + nw := newNotificationWatcher(t, lb, &ipnauth.TestActor{}) + if tt.want != nil { + nw.watch(0, []wantedNotification{*tt.want}) + } else { + nw.watch(0, nil, unexpectedPrefsChange) + } + + store.SetStrings(tt.stringSettings...) + + nw.check() + }) + } +} From 3353f154bb341c9ed9e05ef21e5475f922986def Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 09:28:56 -0600 Subject: [PATCH 0174/1708] control/controlclient: use the most recent syspolicy.MachineCertificateSubject value This PR removes the sync.Once wrapper around retrieving the MachineCertificateSubject policy setting value, ensuring the most recent version is always used if it changes after the service starts. Although this policy setting is used by a very limited number of customers, recent support escalations have highlighted issues caused by outdated or incorrect policy values being applied. Updates #12687 Signed-off-by: Nick Khyl --- control/controlclient/sign_supported.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index 0e3dd038e..a5d42ad7d 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -13,7 +13,6 @@ import ( "crypto/x509" "errors" "fmt" - "sync" "time" "github.com/tailscale/certstore" @@ -22,11 +21,6 @@ import ( "tailscale.com/util/syspolicy" ) -var getMachineCertificateSubjectOnce struct { - sync.Once - v string // Subject of machine certificate to search for -} - // getMachineCertificateSubject returns the exact name of a Subject that needs // to be present in an identity's certificate chain to sign a RegisterRequest, // formatted as per pkix.Name.String(). The Subject may be that of the identity @@ -37,11 +31,8 @@ var getMachineCertificateSubjectOnce struct { // // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" func getMachineCertificateSubject() string { - getMachineCertificateSubjectOnce.Do(func() { - getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString(syspolicy.MachineCertificateSubject, "") - }) - - return getMachineCertificateSubjectOnce.v + machineCertSubject, _ := syspolicy.GetString(syspolicy.MachineCertificateSubject, "") + return machineCertSubject } var ( From 36b7449feafcf5450261193c2507a07f0fedcfa0 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 22 Nov 2024 09:57:26 -0600 Subject: [PATCH 0175/1708] ipn/ipnlocal: rebuild allowed suggested exit nodes when syspolicy changes In this PR, we update LocalBackend to rebuild the set of allowed suggested exit nodes whenever the AllowedSuggestedExitNodes syspolicy setting changes. Additionally, we request a new suggested exit node when this occurs, enabling its use if the ExitNodeID syspolicy setting is set to auto:any. Updates #12687 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8763581f1..fdbd5cf52 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -87,7 +87,6 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/empty" "tailscale.com/types/key" - "tailscale.com/types/lazy" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" @@ -356,6 +355,12 @@ type LocalBackend struct { // avoid unnecessary churn between multiple equally-good options. lastSuggestedExitNode tailcfg.StableNodeID + // allowedSuggestedExitNodes is a set of exit nodes permitted by the most recent + // [syspolicy.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu + // mutex guards access to this set. + allowedSuggestedExitNodesMu sync.Mutex + allowedSuggestedExitNodes set.Set[tailcfg.StableNodeID] + // refreshAutoExitNode indicates if the exit node should be recomputed when the next netcheck report is available. refreshAutoExitNode bool @@ -1724,6 +1729,7 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { if prefs, anyChange := b.applySysPolicy(); anyChange { b.logf("syspolicy: changed initial profile prefs: %v", prefs.Pretty()) } + b.refreshAllowedSuggestions() return unregister, nil } @@ -1743,7 +1749,20 @@ func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. -func (b *LocalBackend) sysPolicyChanged(*rsop.PolicyChange) { +func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { + if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { + b.refreshAllowedSuggestions() + // Re-evaluate exit node suggestion now that the policy setting has changed. + b.mu.Lock() + _, err := b.suggestExitNodeLocked(nil) + b.mu.Unlock() + if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) + } + // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID + // will be used when [applySysPolicy] updates the current profile's prefs. + } + if prefs, anyChange := b.applySysPolicy(); anyChange { b.logf("syspolicy: changed profile prefs: %v", prefs.Pretty()) } @@ -7197,7 +7216,7 @@ func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (respons lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode - res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, getAllowedSuggestions()) + res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) if err != nil { return res, err } @@ -7211,6 +7230,22 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes return b.suggestExitNodeLocked(nil) } +// getAllowedSuggestions returns a set of exit nodes permitted by the most recent +// [syspolicy.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. +func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { + b.allowedSuggestedExitNodesMu.Lock() + defer b.allowedSuggestedExitNodesMu.Unlock() + return b.allowedSuggestedExitNodes +} + +// refreshAllowedSuggestions rebuilds the set of permitted exit nodes +// from the current [syspolicy.AllowedSuggestedExitNodes] value. +func (b *LocalBackend) refreshAllowedSuggestions() { + b.allowedSuggestedExitNodesMu.Lock() + defer b.allowedSuggestedExitNodesMu.Unlock() + b.allowedSuggestedExitNodes = fillAllowedSuggestions() +} + // selectRegionFunc returns a DERP region from the slice of candidate regions. // The value is returned, not the slice index. type selectRegionFunc func(views.Slice[int]) int @@ -7220,8 +7255,6 @@ type selectRegionFunc func(views.Slice[int]) int // choice. type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView -var getAllowedSuggestions = lazy.SyncFunc(fillAllowedSuggestions) - func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { nodes, err := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil) if err != nil { From f6431185b0cd196acbefdda9fec523ed4d408aed Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 22 Nov 2024 14:26:42 -0800 Subject: [PATCH 0176/1708] net/netmon: catch ParseRIB panic to gather buffer data Updates #14201 Updates golang/go#70528 Signed-off-by: James Tucker --- net/netmon/netmon_darwin.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index cc6301125..a5096889b 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -56,7 +56,15 @@ func (m *darwinRouteMon) Receive() (message, error) { if err != nil { return nil, err } - msgs, err := route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) + msgs, err := func() (msgs []route.Message, err error) { + defer func() { + if recover() != nil { + msgs = nil + err = fmt.Errorf("panic parsing route message") + } + }() + return route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) + }() if err != nil { if debugRouteMessages { m.logf("read %d bytes (% 02x), failed to parse RIB: %v", n, m.buf[:n], err) From ba3523fc3f62835bcddba683e37257ed7d53493c Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sat, 23 Nov 2024 08:51:40 +0000 Subject: [PATCH 0177/1708] cmd/containerboot: preserve headers of metrics endpoints responses (#14204) Updates tailscale/tailscale#11292 Signed-off-by: Irbe Krumina --- cmd/containerboot/metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/containerboot/metrics.go b/cmd/containerboot/metrics.go index e88406f97..874774d7a 100644 --- a/cmd/containerboot/metrics.go +++ b/cmd/containerboot/metrics.go @@ -38,12 +38,12 @@ func proxy(w http.ResponseWriter, r *http.Request, url string, do func(*http.Req } defer resp.Body.Close() - w.WriteHeader(resp.StatusCode) for key, val := range resp.Header { for _, v := range val { w.Header().Add(key, v) } } + w.WriteHeader(resp.StatusCode) if _, err := io.Copy(w, resp.Body); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } From 788121f47536f2947e514370b45eaa1029a54488 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 25 Nov 2024 10:10:32 -0600 Subject: [PATCH 0178/1708] docs/windows/policy: update ADMX policy definitions to reflect the syspolicy settings We add a policy definition for the AllowedSuggestedExitNodes syspolicy setting, allowing admins to configure a list of exit node IDs to be used as a pool for automatic suggested exit node selection. We update definitions for policy settings configurable on both a per-user and per-machine basis, such as UI customizations, to specify class="Both". Lastly, we update the help text for existing policy definitions to include a link to the KB article as the last line instead of in the first paragraph. Updates #12687 Updates tailscale/corp#19681 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 111 ++++++++++++++--------- docs/windows/policy/tailscale.admx | 31 +++++-- 2 files changed, 91 insertions(+), 51 deletions(-) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 7a658422c..ebf1a5905 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -15,16 +15,18 @@ Tailscale version 1.58.0 and later Tailscale version 1.62.0 and later Tailscale version 1.74.0 and later + Tailscale version 1.78.0 and later Tailscale UI customization Settings Require using a specific Tailscale coordination server +If you disable or do not configure this policy, the Tailscale SaaS coordination server will be used by default, but a non-standard Tailscale coordination server can be configured using the CLI. + +See https://tailscale.com/kb/1315/mdm-keys#set-a-custom-control-server-url for more details.]]> Require using a specific Tailscale log server Specify which Tailnet should be used for Login +See https://tailscale.com/kb/1315/mdm-keys#set-a-suggested-or-required-tailnet for more details.]]> Specify the auth key to authenticate devices without user interaction Require using a specific Exit Node +If you do not configure this policy, no exit node will be used by default but an exit node (if one is available and permitted by ACLs) can be chosen by the user if desired. + +See https://tailscale.com/kb/1315/mdm-keys#force-an-exit-node-to-always-be-used and https://tailscale.com/kb/1103/exit-nodes for more details.]]> + Limit automated Exit Node suggestions to specific nodes + Allow incoming connections +If you do not configure this policy, then Allow Incoming Connections depends on what is selected in the Preferences submenu. + +See https://tailscale.com/kb/1315/mdm-keys#set-whether-to-allow-incoming-connections and https://tailscale.com/kb/1072/client-preferences#allow-incoming-connections for more details.]]> Run Tailscale in Unattended Mode +If you do not configure this policy, then Run Unattended depends on what is selected in the Preferences submenu. + +See https://tailscale.com/kb/1315/mdm-keys#set-unattended-mode and https://tailscale.com/kb/1088/run-unattended for more details.]]> Allow Local Network Access when an Exit Node is in use +If you do not configure this policy, then Allow Local Network Access depends on what is selected in the Exit Node submenu. + +See https://tailscale.com/kb/1315/mdm-keys#toggle-local-network-access-when-an-exit-node-is-in-use and https://tailscale.com/kb/1103/exit-nodes#step-4-use-the-exit-node for more details.]]> Use Tailscale DNS Settings +If you do not configure this policy, then Use Tailscale DNS depends on what is selected in the Preferences submenu. + +See https://tailscale.com/kb/1315/mdm-keys#set-whether-the-device-uses-tailscale-dns-settings for more details.]]> Use Tailscale Subnets +If you do not configure this policy, then Use Tailscale Subnets depends on what is selected in the Preferences submenu. + +See https://tailscale.com/kb/1315/mdm-keys#set-whether-the-device-accepts-tailscale-subnets or https://tailscale.com/kb/1019/subnets for more details.]]> Automatically install updates +If you do not configure this policy, then Automatically Install Updates depends on what is selected in the Preferences submenu. + +See https://tailscale.com/kb/1067/update#auto-updates for more details.]]> Run Tailscale as an Exit Node - Show the "Admin Panel" menu item - + Show the "Admin Console" menu item + Show the "Debug" submenu +If you disable this policy, the Debug submenu will be hidden from the Tailscale menu. + +See https://tailscale.com/kb/1315/mdm-keys#hide-the-debug-menu for more details.]]> Show the "Update Available" menu item +If you disable this policy, the Update Available item will be hidden from the Tailscale menu. + +See https://tailscale.com/kb/1315/mdm-keys#hide-the-update-menu for more details.]]> Show the "Run Exit Node" menu item +If you disable this policy, the Run Exit Node item will be hidden from the Exit Node submenu. + +See https://tailscale.com/kb/1315/mdm-keys#hide-the-run-as-exit-node-menu-item for more details.]]> Show the "Preferences" submenu +If you disable this policy, the Preferences submenu will be hidden from the Tailscale menu. + +See https://tailscale.com/kb/1315/mdm-keys#hide-the-preferences-menu for more details.]]> Show the "Exit Node" submenu +If you disable this policy, the Exit Node submenu will be hidden from the Tailscale menu. + +See https://tailscale.com/kb/1315/mdm-keys#hide-the-exit-node-picker for more details.]]> Specify a custom key expiration notification time +If you disable or don't configure this policy, the default time period will be used (as of Tailscale 1.56, this is 24 hours). + +See https://tailscale.com/kb/1315/mdm-keys#set-the-key-expiration-notice-period for more details.]]> Log extra details about service events Collect data for posture checking +If you do not configure this policy, then data collection depends on if it has been enabled from the CLI (as of Tailscale 1.56), it may be present in the GUI in later versions. + +See https://tailscale.com/kb/1315/mdm-keys#enable-gathering-device-posture-data and https://tailscale.com/kb/1326/device-identity for more details.]]> Show the "Managed By {Organization}" menu item Exit Node: + + Target IDs: + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index e70f124ed..f941525c4 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -50,6 +50,10 @@ displayName="$(string.SINCE_V1_74)"> + + + @@ -94,7 +98,14 @@ - + > + + + + + + + @@ -197,7 +208,7 @@ - + @@ -207,7 +218,7 @@ hide - + @@ -217,7 +228,7 @@ hide - + @@ -227,7 +238,7 @@ hide - + @@ -237,7 +248,7 @@ hide - + @@ -247,7 +258,7 @@ hide - + @@ -257,7 +268,7 @@ hide - + @@ -267,7 +278,7 @@ hide - + @@ -276,7 +287,7 @@ - + From 4d33f30f91eb7debdf90c8770990801f3857e30c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 25 Nov 2024 12:00:16 -0800 Subject: [PATCH 0179/1708] net/netmon: improve panic reporting from #14202 I was hoping we'd catch an example input quickly, but the reporter had rebooted their machine and it is no longer exhibiting the behavior. As such this code may be sticking around quite a bit longer and we might encounter other errors, so include the panic in the log entry. Updates #14201 Updates #14202 Updates golang/go#70528 Signed-off-by: James Tucker --- net/netmon/netmon_darwin.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index a5096889b..e89e2d047 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -58,9 +58,12 @@ func (m *darwinRouteMon) Receive() (message, error) { } msgs, err := func() (msgs []route.Message, err error) { defer func() { - if recover() != nil { + // TODO(raggi,#14201): remove once we've got a fix from + // golang/go#70528. + msg := recover() + if msg != nil { msgs = nil - err = fmt.Errorf("panic parsing route message") + err = fmt.Errorf("panic in route.ParseRIB: %s", msg) } }() return route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) From 26de518413277e0869b815c373f694f6b5d18562 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 26 Nov 2024 10:45:03 -0700 Subject: [PATCH 0180/1708] ipn/ipnlocal: only check CanUseExitNode if we are attempting to use one (#14230) In https://github.com/tailscale/tailscale/pull/13726 we added logic to `checkExitNodePrefsLocked` to error out on platforms where using an exit node is unsupported in order to give users more obvious feedback than having this silently fail downstream. The above change neglected to properly check whether the device in question was actually trying to use an exit node when doing the check and was incorrectly returning an error on any calls to `checkExitNodePrefsLocked` on platforms where using an exit node is not supported as a result. This change remedies this by adding a check to see whether the device is attempting to use an exit node before doing the `CanUseExitNode` check. Updates https://github.com/tailscale/corp/issues/24835 Signed-off-by: Mario Minardi --- ipn/ipnlocal/local.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fdbd5cf52..278614c0b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3740,11 +3740,16 @@ func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTrac } func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { + tryingToUseExitNode := p.ExitNodeIP.IsValid() || p.ExitNodeID != "" + if !tryingToUseExitNode { + return nil + } + if err := featureknob.CanUseExitNode(); err != nil { return err } - if (p.ExitNodeIP.IsValid() || p.ExitNodeID != "") && p.AdvertisesExitNode() { + if p.AdvertisesExitNode() { return errors.New("Cannot advertise an exit node and use an exit node at the same time.") } return nil From a62f7183e4f121a66a7ab32b474d7c5b3f349286 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 26 Nov 2024 13:11:55 -0600 Subject: [PATCH 0181/1708] cmd/tailscale/cli: fix format string Updates #12687 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/syspolicy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go index 06a19defb..0e903db39 100644 --- a/cmd/tailscale/cli/syspolicy.go +++ b/cmd/tailscale/cli/syspolicy.go @@ -98,9 +98,9 @@ func printPolicySettings(policy *setting.Snapshot) { origin = o.String() } if err := setting.Error(); err != nil { - fmt.Fprintf(w, "%s\t%s\t\t{%s}\n", k, origin, err) + fmt.Fprintf(w, "%s\t%s\t\t{%v}\n", k, origin, err) } else { - fmt.Fprintf(w, "%s\t%s\t%s\t\n", k, origin, setting.Value()) + fmt.Fprintf(w, "%s\t%s\t%v\t\n", k, origin, setting.Value()) } } w.Flush() From e87b71ec3c7bded3fadf44cb9374df5de5e213d6 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Tue, 26 Nov 2024 17:50:29 -0500 Subject: [PATCH 0182/1708] control/controlhttp: set *health.Tracker in tests Observed during another PR: https://github.com/tailscale/tailscale/actions/runs/12040045880/job/33569141807 Updates #cleanup Signed-off-by: Andrew Dunham Change-Id: I9e0f49a35485fa2e097892737e5e3c95bf775a90 --- control/controlhttp/http_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 00cc1e6cf..aef916ef6 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/control/controlhttp/controlhttpserver" + "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/socks5" @@ -228,6 +229,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { omitCertErrorLogging: true, testFallbackDelay: fallbackDelay, Clock: clock, + HealthTracker: new(health.Tracker), } if param.httpInDial { @@ -729,6 +731,7 @@ func TestDialPlan(t *testing.T) { omitCertErrorLogging: true, testFallbackDelay: 50 * time.Millisecond, Clock: clock, + HealthTracker: new(health.Tracker), } conn, err := a.dial(ctx) From bb80f14ff42c0e167eb34d65428a63a81d1090a2 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 26 Nov 2024 18:13:17 +0000 Subject: [PATCH 0183/1708] ipn/localapi: count localapi requests to metric endpoints Updates tailscale/corp#22075 Signed-off-by: Anton Tolchanov --- ipn/localapi/localapi.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index dc8c08975..ea931b028 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -563,6 +563,7 @@ func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveMetrics(w http.ResponseWriter, r *http.Request) { + metricDebugMetricsCalls.Add(1) // Require write access out of paranoia that the metrics // might contain something sensitive. if !h.PermitWrite { @@ -576,6 +577,7 @@ func (h *Handler) serveMetrics(w http.ResponseWriter, r *http.Request) { // serveUserMetrics returns user-facing metrics in Prometheus text // exposition format. func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { + metricUserMetricsCalls.Add(1) h.b.UserMetricsRegistry().Handler(w, r) } @@ -2972,7 +2974,9 @@ var ( metricInvalidRequests = clientmetric.NewCounter("localapi_invalid_requests") // User-visible LocalAPI endpoints. - metricFilePutCalls = clientmetric.NewCounter("localapi_file_put") + metricFilePutCalls = clientmetric.NewCounter("localapi_file_put") + metricDebugMetricsCalls = clientmetric.NewCounter("localapi_debugmetric_requests") + metricUserMetricsCalls = clientmetric.NewCounter("localapi_usermetric_requests") ) // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. From bac3af06f5a2e7dcf2976e4d8e846eab0a52b514 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 27 Nov 2024 11:18:04 -0800 Subject: [PATCH 0184/1708] logtail: avoid bytes.Buffer allocation (#11858) Re-use a pre-allocated bytes.Buffer struct and shallow the copy the result of bytes.NewBuffer into it to avoid allocating the struct. Note that we're only reusing the bytes.Buffer struct itself and not the underling []byte temporarily stored within it. Updates #cleanup Updates tailscale/corp#18514 Updates golang/go#67004 Signed-off-by: Joe Tsai --- logtail/logtail.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index 9df164273..13e8e85fd 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -213,6 +213,7 @@ type Logger struct { procSequence uint64 flushTimer tstime.TimerController // used when flushDelay is >0 writeBuf [bufferSize]byte // owned by Write for reuse + bytesBuf bytes.Buffer // owned by appendTextOrJSONLocked for reuse jsonDec jsontext.Decoder // owned by appendTextOrJSONLocked for reuse shutdownStartMu sync.Mutex // guards the closing of shutdownStart @@ -725,9 +726,16 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // whether it contains the reserved "logtail" name at the top-level. var logtailKeyOffset, logtailValOffset, logtailValLength int validJSON := func() bool { - // TODO(dsnet): Avoid allocation of bytes.Buffer struct. + // The jsontext.NewDecoder API operates on an io.Reader, for which + // bytes.Buffer provides a means to convert a []byte into an io.Reader. + // However, bytes.NewBuffer normally allocates unless + // we immediately shallow copy it into a pre-allocated Buffer struct. + // See https://go.dev/issue/67004. + l.bytesBuf = *bytes.NewBuffer(src) + defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src + dec := &l.jsonDec - dec.Reset(bytes.NewBuffer(src)) + dec.Reset(&l.bytesBuf) if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil { return false } From 41e56cedf8eea406e48ec1def6e7ea13a0c303fd Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Wed, 20 Nov 2024 11:46:14 +0100 Subject: [PATCH 0185/1708] health: move health metrics test to health_test Updates #13420 Signed-off-by: Kristoffer Dalby --- health/health.go | 4 +++- health/health_test.go | 48 ++++++++++++++++++++++++++++++++++++++++++- tsnet/tsnet_test.go | 31 ---------------------------- 3 files changed, 50 insertions(+), 33 deletions(-) diff --git a/health/health.go b/health/health.go index 3bebcb983..079b3195c 100644 --- a/health/health.go +++ b/health/health.go @@ -331,7 +331,7 @@ func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { ) t.metricHealthMessage.Set(metricHealthMessageLabel{ - Type: "warning", + Type: MetricLabelWarning, }, expvar.Func(func() any { if t.nil() { return 0 @@ -1283,6 +1283,8 @@ func (t *Tracker) LastNoiseDialWasRecent() bool { return dur < 2*time.Minute } +const MetricLabelWarning = "warning" + type metricHealthMessageLabel struct { // TODO: break down by warnable.severity as well? Type string diff --git a/health/health_test.go b/health/health_test.go index 8107c1cf0..69e586066 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -7,11 +7,13 @@ import ( "fmt" "reflect" "slices" + "strconv" "testing" "time" "tailscale.com/tailcfg" "tailscale.com/types/opt" + "tailscale.com/util/usermetric" ) func TestAppendWarnableDebugFlags(t *testing.T) { @@ -273,7 +275,7 @@ func TestShowUpdateWarnable(t *testing.T) { wantShow bool }{ { - desc: "nil CientVersion", + desc: "nil ClientVersion", check: true, cv: nil, wantWarnable: nil, @@ -348,3 +350,47 @@ func TestShowUpdateWarnable(t *testing.T) { }) } } + +func TestHealthMetric(t *testing.T) { + tests := []struct { + desc string + check bool + apply opt.Bool + cv *tailcfg.ClientVersion + wantMetricCount int + }{ + // When running in dev, and not initialising the client, there will be two warnings + // by default: + // - is-using-unstable-version + // - wantrunning-false + { + desc: "base-warnings", + check: true, + cv: nil, + wantMetricCount: 2, + }, + // with: update-available + { + desc: "update-warning", + check: true, + cv: &tailcfg.ClientVersion{RunningLatest: false, LatestVersion: "1.2.3"}, + wantMetricCount: 3, + }, + } + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + tr := &Tracker{ + checkForUpdates: tt.check, + applyUpdates: tt.apply, + latestVersion: tt.cv, + } + tr.SetMetricsRegistry(&usermetric.Registry{}) + if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { + t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) + } + for _, w := range tr.CurrentState().Warnings { + t.Logf("warning: %v", w) + } + }) + } +} diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 7aebbdd4c..0f904ad2d 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -38,7 +38,6 @@ import ( "golang.org/x/net/proxy" "tailscale.com/client/tailscale" "tailscale.com/cmd/testwrapper/flakytest" - "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" @@ -822,16 +821,6 @@ func TestUDPConn(t *testing.T) { } } -// testWarnable is a Warnable that is used within this package for testing purposes only. -var testWarnable = health.Register(&health.Warnable{ - Code: "test-warnable-tsnet", - Title: "Test warnable", - Severity: health.SeverityLow, - Text: func(args health.Args) string { - return args[health.ArgError] - }, -}) - func parseMetrics(m []byte) (map[string]float64, error) { metrics := make(map[string]float64) @@ -1045,11 +1034,6 @@ func TestUserMetrics(t *testing.T) { t.Fatal(err) } - status1, err := lc1.Status(ctxLc) - if err != nil { - t.Fatal(err) - } - parsedMetrics1, err := parseMetrics(metrics1) if err != nil { t.Fatal(err) @@ -1075,11 +1059,6 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics1, tailscaled_approved_routes: got %v, want %v", got, want) } - // Validate the health counter metric against the status of the node - if got, want := parsedMetrics1[`tailscaled_health_messages{type="warning"}`], float64(len(status1.Health)); got != want { - t.Errorf("metrics1, tailscaled_health_messages: got %v, want %v", got, want) - } - // Verify that the amount of data recorded in bytes is higher or equal to the // 10 megabytes sent. inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`] @@ -1097,11 +1076,6 @@ func TestUserMetrics(t *testing.T) { t.Fatal(err) } - status2, err := lc2.Status(ctx) - if err != nil { - t.Fatal(err) - } - parsedMetrics2, err := parseMetrics(metrics2) if err != nil { t.Fatal(err) @@ -1119,11 +1093,6 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics2, tailscaled_approved_routes: got %v, want %v", got, want) } - // Validate the health counter metric against the status of the node - if got, want := parsedMetrics2[`tailscaled_health_messages{type="warning"}`], float64(len(status2.Health)); got != want { - t.Errorf("metrics2, tailscaled_health_messages: got %v, want %v", got, want) - } - // Verify that the amount of data recorded in bytes is higher or equal than the // 10 megabytes sent. outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`] From 06d929f9ac87b0683a55ebd004d15899a0122f71 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 25 Nov 2024 10:15:04 +0100 Subject: [PATCH 0186/1708] tsnet: send less data in metrics integration test this commit reduced the amount of data sent in the metrics data integration test from 10MB to 1MB. On various machines 10MB was quite flaky, while 1MB has not failed once on 10000 runs. Updates #13420 Signed-off-by: Kristoffer Dalby --- tsnet/tsnet_test.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 0f904ad2d..aae034b61 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1015,8 +1015,8 @@ func TestUserMetrics(t *testing.T) { mustDirect(t, t.Logf, lc1, lc2) - // 10 megabytes - bytesToSend := 10 * 1024 * 1024 + // 1 megabytes + bytesToSend := 1 * 1024 * 1024 // This asserts generates some traffic, it is factored out // of TestUDPConn. @@ -1059,14 +1059,13 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics1, tailscaled_approved_routes: got %v, want %v", got, want) } - // Verify that the amount of data recorded in bytes is higher or equal to the - // 10 megabytes sent. + // Verify that the amount of data recorded in bytes is higher or equal to the data sent inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`] if inboundBytes1 < float64(bytesToSend) { t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, inboundBytes1) } - // But ensure that it is not too much higher than the 10 megabytes sent. + // But ensure that it is not too much higher than the data sent. if inboundBytes1 > float64(bytesToSend)*bytesSentTolerance { t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, inboundBytes1) } @@ -1093,14 +1092,13 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics2, tailscaled_approved_routes: got %v, want %v", got, want) } - // Verify that the amount of data recorded in bytes is higher or equal than the - // 10 megabytes sent. + // Verify that the amount of data recorded in bytes is higher or equal than the data sent. outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`] if outboundBytes2 < float64(bytesToSend) { t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, outboundBytes2) } - // But ensure that it is not too much higher than the 10 megabytes sent. + // But ensure that it is not too much higher than the data sent. if outboundBytes2 > float64(bytesToSend)*bytesSentTolerance { t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, outboundBytes2) } From e55899386b1f6d9f4b02d7a3349efdf83e162504 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 25 Nov 2024 13:36:37 +0100 Subject: [PATCH 0187/1708] tsnet: split bytes and routes metrics tests Updates #13420 Signed-off-by: Kristoffer Dalby --- tsnet/tsnet_test.go | 184 +++++++++++++++++++++++++++++--------------- 1 file changed, 123 insertions(+), 61 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index aae034b61..dbd010ce6 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -936,16 +936,136 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC return nil } -func TestUserMetrics(t *testing.T) { +func TestUserMetricsByteCounters(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420") tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() - controlURL, c := startControl(t) - s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1") + controlURL, _ := startControl(t) + s1, s1ip, _ := startServer(t, ctx, controlURL, "s1") s2, s2ip, _ := startServer(t, ctx, controlURL, "s2") + lc1, err := s1.LocalClient() + if err != nil { + t.Fatal(err) + } + + lc2, err := s2.LocalClient() + if err != nil { + t.Fatal(err) + } + + // Force an update to the netmap to ensure that the metrics are up-to-date. + s1.lb.DebugForceNetmapUpdate() + s2.lb.DebugForceNetmapUpdate() + + // Wait for both nodes to have a peer in their netmap. + waitForCondition(t, "waiting for netmaps to contain peer", 90*time.Second, func() bool { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + status1, err := lc1.Status(ctx) + if err != nil { + t.Logf("getting status: %s", err) + return false + } + status2, err := lc2.Status(ctx) + if err != nil { + t.Logf("getting status: %s", err) + return false + } + return len(status1.Peers()) > 0 && len(status2.Peers()) > 0 + }) + + // ping to make sure the connection is up. + res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP) + if err != nil { + t.Fatalf("pinging: %s", err) + } + t.Logf("ping success: %#+v", res) + + mustDirect(t, t.Logf, lc1, lc2) + + // 1 megabytes + bytesToSend := 1 * 1024 * 1024 + + // This asserts generates some traffic, it is factored out + // of TestUDPConn. + start := time.Now() + err = sendData(t.Logf, ctx, bytesToSend, s1, s2, s1ip, s2ip) + if err != nil { + t.Fatalf("Failed to send packets: %v", err) + } + t.Logf("Sent %d bytes from s1 to s2 in %s", bytesToSend, time.Since(start).String()) + + ctxLc, cancelLc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelLc() + metrics1, err := lc1.UserMetrics(ctxLc) + if err != nil { + t.Fatal(err) + } + + parsedMetrics1, err := parseMetrics(metrics1) + if err != nil { + t.Fatal(err) + } + + // Allow the metrics for the bytes sent to be off by 15%. + bytesSentTolerance := 1.15 + + t.Logf("Metrics1:\n%s\n", metrics1) + + // Verify that the amount of data recorded in bytes is higher or equal to the data sent + inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`] + if inboundBytes1 < float64(bytesToSend) { + t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, inboundBytes1) + } + + // But ensure that it is not too much higher than the data sent. + if inboundBytes1 > float64(bytesToSend)*bytesSentTolerance { + t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, inboundBytes1) + } + + metrics2, err := lc2.UserMetrics(ctx) + if err != nil { + t.Fatal(err) + } + + parsedMetrics2, err := parseMetrics(metrics2) + if err != nil { + t.Fatal(err) + } + + t.Logf("Metrics2:\n%s\n", metrics2) + + // Verify that the amount of data recorded in bytes is higher or equal than the data sent. + outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`] + if outboundBytes2 < float64(bytesToSend) { + t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, outboundBytes2) + } + + // But ensure that it is not too much higher than the data sent. + if outboundBytes2 > float64(bytesToSend)*bytesSentTolerance { + t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, outboundBytes2) + } +} + +func TestUserMetricsRouteGauges(t *testing.T) { + // Windows does not seem to support or report back routes when running in + // userspace via tsnet. So, we skip this check on Windows. + // TODO(kradalby): Figure out if this is correct. + if runtime.GOOS == "windows" { + t.Skipf("skipping on windows") + } + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420") + tstest.ResourceCheck(t) + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + + controlURL, c := startControl(t) + s1, _, s1PubKey := startServer(t, ctx, controlURL, "s1") + s2, _, _ := startServer(t, ctx, controlURL, "s2") + s1.lb.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: []netip.Prefix{ @@ -973,24 +1093,11 @@ func TestUserMetrics(t *testing.T) { t.Fatal(err) } - // ping to make sure the connection is up. - res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP) - if err != nil { - t.Fatalf("pinging: %s", err) - } - t.Logf("ping success: %#+v", res) - - ht := s1.lb.HealthTracker() - ht.SetUnhealthy(testWarnable, health.Args{"Text": "Hello world 1"}) - // Force an update to the netmap to ensure that the metrics are up-to-date. s1.lb.DebugForceNetmapUpdate() s2.lb.DebugForceNetmapUpdate() wantRoutes := float64(2) - if runtime.GOOS == "windows" { - wantRoutes = 0 - } // Wait for the routes to be propagated to node 1 to ensure // that the metrics are up-to-date. @@ -1002,31 +1109,11 @@ func TestUserMetrics(t *testing.T) { t.Logf("getting status: %s", err) return false } - if runtime.GOOS == "windows" { - // Windows does not seem to support or report back routes when running in - // userspace via tsnet. So, we skip this check on Windows. - // TODO(kradalby): Figure out if this is correct. - return true - } // Wait for the primary routes to reach our desired routes, which is wantRoutes + 1, because // the PrimaryRoutes list will contain a exit node route, which the metric does not count. return status1.Self.PrimaryRoutes != nil && status1.Self.PrimaryRoutes.Len() == int(wantRoutes)+1 }) - mustDirect(t, t.Logf, lc1, lc2) - - // 1 megabytes - bytesToSend := 1 * 1024 * 1024 - - // This asserts generates some traffic, it is factored out - // of TestUDPConn. - start := time.Now() - err = sendData(t.Logf, ctx, bytesToSend, s1, s2, s1ip, s2ip) - if err != nil { - t.Fatalf("Failed to send packets: %v", err) - } - t.Logf("Sent %d bytes from s1 to s2 in %s", bytesToSend, time.Since(start).String()) - ctxLc, cancelLc := context.WithTimeout(context.Background(), 5*time.Second) defer cancelLc() metrics1, err := lc1.UserMetrics(ctxLc) @@ -1039,9 +1126,6 @@ func TestUserMetrics(t *testing.T) { t.Fatal(err) } - // Allow the metrics for the bytes sent to be off by 15%. - bytesSentTolerance := 1.15 - t.Logf("Metrics1:\n%s\n", metrics1) // The node is advertising 4 routes: @@ -1059,17 +1143,6 @@ func TestUserMetrics(t *testing.T) { t.Errorf("metrics1, tailscaled_approved_routes: got %v, want %v", got, want) } - // Verify that the amount of data recorded in bytes is higher or equal to the data sent - inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`] - if inboundBytes1 < float64(bytesToSend) { - t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, inboundBytes1) - } - - // But ensure that it is not too much higher than the data sent. - if inboundBytes1 > float64(bytesToSend)*bytesSentTolerance { - t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, inboundBytes1) - } - metrics2, err := lc2.UserMetrics(ctx) if err != nil { t.Fatal(err) @@ -1091,17 +1164,6 @@ func TestUserMetrics(t *testing.T) { if got, want := parsedMetrics2["tailscaled_approved_routes"], 0.0; got != want { t.Errorf("metrics2, tailscaled_approved_routes: got %v, want %v", got, want) } - - // Verify that the amount of data recorded in bytes is higher or equal than the data sent. - outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`] - if outboundBytes2 < float64(bytesToSend) { - t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, outboundBytes2) - } - - // But ensure that it is not too much higher than the data sent. - if outboundBytes2 > float64(bytesToSend)*bytesSentTolerance { - t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, outboundBytes2) - } } func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func() bool) { From 225d8f5a881f01d3cb3ec05a56d6134188061d71 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 25 Nov 2024 14:14:08 +0100 Subject: [PATCH 0188/1708] tsnet: validate sent data in metrics test Updates #13420 Signed-off-by: Kristoffer Dalby --- tsnet/tsnet_test.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index dbd010ce6..fea68f6d4 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -894,9 +894,11 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC for { got := make([]byte, bytesCount) n, err := conn.Read(got) - if n != bytesCount { - logf("read %d bytes, want %d", n, bytesCount) + if err != nil { + allReceived <- fmt.Errorf("failed reading packet, %s", err) + return } + got = got[:n] select { case <-stopReceive: @@ -904,13 +906,17 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC default: } - if err != nil { - allReceived <- fmt.Errorf("failed reading packet, %s", err) - return - } - total += n logf("received %d/%d bytes, %.2f %%", total, bytesCount, (float64(total) / (float64(bytesCount)) * 100)) + + // Validate the received bytes to be the same as the sent bytes. + for _, b := range string(got) { + if b != 'A' { + allReceived <- fmt.Errorf("received unexpected byte: %c", b) + return + } + } + if total == bytesCount { break } From caba123008359e0987232a554277a64504be3f6c Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 25 Nov 2024 16:00:21 +0100 Subject: [PATCH 0189/1708] wgengine/magicsock: packet/bytes metrics should not count disco Updates #13420 Signed-off-by: Kristoffer Dalby --- wgengine/magicsock/magicsock.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c361608ad..805716e61 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1267,7 +1267,7 @@ func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err // sendUDP sends UDP packet b to ipp. // See sendAddr's docs on the return value meanings. -func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) { +func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, err error) { if runtime.GOOS == "js" { return false, errNoUDP } @@ -1276,7 +1276,7 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) { metricSendUDPError.Add(1) _ = c.maybeRebindOnError(runtime.GOOS, err) } else { - if sent { + if sent && !isDisco { switch { case ipp.Addr().Is4(): c.metrics.outboundPacketsIPv4Total.Add(1) @@ -1371,7 +1371,7 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error) // returns (false, nil); it's not an error, but nothing was sent. func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) { if addr.Addr() != tailcfg.DerpMagicIPAddr { - return c.sendUDP(addr, b) + return c.sendUDP(addr, b, isDisco) } regionID := int(addr.Port()) From 61dd2662eca775c8a3f6700a0194db5816de5049 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Thu, 28 Nov 2024 12:45:40 +0100 Subject: [PATCH 0190/1708] tsnet: remove flaky test marker from metrics Updates #13420 Signed-off-by: Kristoffer Dalby --- tsnet/tsnet_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index fea68f6d4..14d600817 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -943,14 +943,14 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC } func TestUserMetricsByteCounters(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420") - tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() controlURL, _ := startControl(t) s1, s1ip, _ := startServer(t, ctx, controlURL, "s1") + defer s1.Close() s2, s2ip, _ := startServer(t, ctx, controlURL, "s2") + defer s2.Close() lc1, err := s1.LocalClient() if err != nil { @@ -1063,14 +1063,14 @@ func TestUserMetricsRouteGauges(t *testing.T) { if runtime.GOOS == "windows" { t.Skipf("skipping on windows") } - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420") - tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() controlURL, c := startControl(t) s1, _, s1PubKey := startServer(t, ctx, controlURL, "s1") + defer s1.Close() s2, _, _ := startServer(t, ctx, controlURL, "s2") + defer s2.Close() s1.lb.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ From f8587e321ead0889b05a46ca03beef6f75f9d65d Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 29 Nov 2024 10:37:25 +0000 Subject: [PATCH 0191/1708] cmd/k8s-operator: fix port name change bug for egress ProxyGroup proxies (#14247) Ensure that the ExternalName Service port names are always synced to the ClusterIP Service, to fix a bug where if users created a Service with a single unnamed port and later changed to 1+ named ports, the operator attempted to apply an invalid multi-port Service with an unnamed port. Also, fixes a small internal issue where not-yet Service status conditons were lost on a spec update. Updates tailscale/tailscale#10102 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/egress-services.go | 24 +++++++- cmd/k8s-operator/egress-services_test.go | 75 +++++++++++++++++------- cmd/k8s-operator/testutils_test.go | 2 +- 3 files changed, 77 insertions(+), 24 deletions(-) diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 98ed94366..a562f0170 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -136,9 +136,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re } if !slices.Contains(svc.Finalizers, FinalizerName) { - l.Infof("configuring tailnet service") // logged exactly once svc.Finalizers = append(svc.Finalizers, FinalizerName) - if err := esr.Update(ctx, svc); err != nil { + if err := esr.updateSvcSpec(ctx, svc); err != nil { err := fmt.Errorf("failed to add finalizer: %w", err) r := svcConfiguredReason(svc, false, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) @@ -198,7 +197,7 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1 if svc.Spec.ExternalName != clusterIPSvcFQDN { l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) svc.Spec.ExternalName = clusterIPSvcFQDN - if err = esr.Update(ctx, svc); err != nil { + if err = esr.updateSvcSpec(ctx, svc); err != nil { err = fmt.Errorf("error updating ExternalName Service: %w", err) return err } @@ -222,6 +221,15 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s found := false for _, wantsPM := range svc.Spec.Ports { if wantsPM.Port == pm.Port && strings.EqualFold(string(wantsPM.Protocol), string(pm.Protocol)) { + // We don't use the port name to distinguish this port internally, but Kubernetes + // require that, for Service ports with more than one name each port is uniquely named. + // So we can always pick the port name from the ExternalName Service as at this point we + // know that those are valid names because Kuberentes already validated it once. Note + // that users could have changed an unnamed port to a named port and might have changed + // port names- this should still work. + // https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + // See also https://github.com/tailscale/tailscale/issues/13406#issuecomment-2507230388 + clusterIPSvc.Spec.Ports[i].Name = wantsPM.Name found = true break } @@ -714,3 +722,13 @@ func epsPortsFromSvc(svc *corev1.Service) (ep []discoveryv1.EndpointPort) { } return ep } + +// updateSvcSpec ensures that the given Service's spec is updated in cluster, but the local Service object still retains +// the not-yet-applied status. +// TODO(irbekrm): once we do SSA for these patch updates, this will no longer be needed. +func (esr *egressSvcsReconciler) updateSvcSpec(ctx context.Context, svc *corev1.Service) error { + st := svc.Status.DeepCopy() + err := esr.Update(ctx, svc) + svc.Status = *st + return err +} diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index ac7733985..06fe977ec 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -105,28 +105,40 @@ func TestTailscaleEgressServices(t *testing.T) { condition(tsapi.ProxyGroupReady, metav1.ConditionTrue, "", "", clock), } }) - // Quirks of the fake client. - mustUpdateStatus(t, fc, "default", "test", func(svc *corev1.Service) { - svc.Status.Conditions = []metav1.Condition{} + expectReconciled(t, esr, "default", "test") + validateReadyService(t, fc, esr, svc, clock, zl, cm) + }) + t.Run("service_retain_one_unnamed_port", func(t *testing.T) { + svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80}} + mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { + s.Spec.Ports = svc.Spec.Ports }) expectReconciled(t, esr, "default", "test") - // Verify that a ClusterIP Service has been created. - name := findGenNameForEgressSvcResources(t, fc, svc) - expectEqual(t, fc, clusterIPSvc(name, svc), removeTargetPortsFromSvc) - clusterSvc := mustGetClusterIPSvc(t, fc, name) - // Verify that an EndpointSlice has been created. - expectEqual(t, fc, endpointSlice(name, svc, clusterSvc), nil) - // Verify that ConfigMap contains configuration for the new egress service. - mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm) - r := svcConfiguredReason(svc, true, zl.Sugar()) - // Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the - // CluterIP Service. - svc.Status.Conditions = []metav1.Condition{ - condition(tsapi.EgressSvcConfigured, metav1.ConditionTrue, r, r, clock), - } - svc.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} - svc.Spec.ExternalName = fmt.Sprintf("%s.operator-ns.svc.cluster.local", name) - expectEqual(t, fc, svc, nil) + validateReadyService(t, fc, esr, svc, clock, zl, cm) + }) + t.Run("service_add_two_named_ports", func(t *testing.T) { + svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80, Name: "http"}, {Protocol: "TCP", Port: 443, Name: "https"}} + mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { + s.Spec.Ports = svc.Spec.Ports + }) + expectReconciled(t, esr, "default", "test") + validateReadyService(t, fc, esr, svc, clock, zl, cm) + }) + t.Run("service_add_udp_port", func(t *testing.T) { + svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{Port: 53, Protocol: "UDP", Name: "dns"}) + mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { + s.Spec.Ports = svc.Spec.Ports + }) + expectReconciled(t, esr, "default", "test") + validateReadyService(t, fc, esr, svc, clock, zl, cm) + }) + t.Run("service_change_protocol", func(t *testing.T) { + svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80, Name: "http"}, {Protocol: "TCP", Port: 443, Name: "https"}, {Port: 53, Protocol: "TCP", Name: "tcp_dns"}} + mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { + s.Spec.Ports = svc.Spec.Ports + }) + expectReconciled(t, esr, "default", "test") + validateReadyService(t, fc, esr, svc, clock, zl, cm) }) t.Run("delete_external_name_service", func(t *testing.T) { @@ -143,6 +155,29 @@ func TestTailscaleEgressServices(t *testing.T) { }) } +func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReconciler, svc *corev1.Service, clock *tstest.Clock, zl *zap.Logger, cm *corev1.ConfigMap) { + expectReconciled(t, esr, "default", "test") + // Verify that a ClusterIP Service has been created. + name := findGenNameForEgressSvcResources(t, fc, svc) + expectEqual(t, fc, clusterIPSvc(name, svc), removeTargetPortsFromSvc) + clusterSvc := mustGetClusterIPSvc(t, fc, name) + // Verify that an EndpointSlice has been created. + expectEqual(t, fc, endpointSlice(name, svc, clusterSvc), nil) + // Verify that ConfigMap contains configuration for the new egress service. + mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm) + r := svcConfiguredReason(svc, true, zl.Sugar()) + // Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the + // CluterIP Service. + svc.Status.Conditions = []metav1.Condition{ + condition(tsapi.EgressSvcValid, metav1.ConditionTrue, "EgressSvcValid", "EgressSvcValid", clock), + condition(tsapi.EgressSvcConfigured, metav1.ConditionTrue, r, r, clock), + } + svc.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} + svc.Spec.ExternalName = fmt.Sprintf("%s.operator-ns.svc.cluster.local", name) + expectEqual(t, fc, svc, nil) + +} + func condition(typ tsapi.ConditionType, st metav1.ConditionStatus, r, msg string, clock tstime.Clock) metav1.Condition { return metav1.Condition{ Type: string(typ), diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 084f573e5..5795a0aae 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -650,7 +650,7 @@ func removeHashAnnotation(sts *appsv1.StatefulSet) { func removeTargetPortsFromSvc(svc *corev1.Service) { newPorts := make([]corev1.ServicePort, 0) for _, p := range svc.Spec.Ports { - newPorts = append(newPorts, corev1.ServicePort{Protocol: p.Protocol, Port: p.Port}) + newPorts = append(newPorts, corev1.ServicePort{Protocol: p.Protocol, Port: p.Port, Name: p.Name}) } svc.Spec.Ports = newPorts } From 44c8892c1818d777423e58464686659d67756451 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 29 Nov 2024 15:32:18 +0000 Subject: [PATCH 0192/1708] Makefile,./build_docker.sh: update kube operator image build target name (#14251) Updates tailscale/corp#24540 Updates tailscale/tailscale#12914 Signed-off-by: Irbe Krumina --- Makefile | 2 +- build_docker.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 98c3d36cc..960f13885 100644 --- a/Makefile +++ b/Makefile @@ -100,7 +100,7 @@ publishdevoperator: ## Build and publish k8s-operator image to location specifie @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) - TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=operator ./build_docker.sh + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-operator ./build_docker.sh publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO} @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) diff --git a/build_docker.sh b/build_docker.sh index 9f39eb08d..f9632ea0a 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -54,7 +54,7 @@ case "$TARGET" in --annotations="${ANNOTATIONS}" \ /usr/local/bin/containerboot ;; - operator) + k8s-operator) DEFAULT_REPOS="tailscale/k8s-operator" REPOS="${REPOS:-${DEFAULT_REPOS}}" go run github.com/tailscale/mkctr \ From 13faa64c142148b1f8c8afd22d61e4a0de651b98 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 29 Nov 2024 15:44:58 +0000 Subject: [PATCH 0193/1708] cmd/k8s-operator: always set stateful filtering to false (#14216) Updates tailscale/tailscale#12108 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/operator_test.go | 4 ++-- cmd/k8s-operator/sts.go | 9 +-------- cmd/k8s-operator/testutils_test.go | 20 ++++++++------------ 3 files changed, 11 insertions(+), 22 deletions(-) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 21ef08e52..e46cdd7fe 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1388,7 +1388,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", - confFileHash: "a67b5ad3ff605531c822327e8f1a23dd0846e1075b722c13402f7d5d0ba32ba2", + confFileHash: "acf3467364b0a3ba9b8ee0dd772cb7c2f0bf585e288fa99b7fe4566009ed6041", app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), nil) @@ -1399,7 +1399,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { mak.Set(&svc.Annotations, AnnotationHostname, "another-test") }) o.hostname = "another-test" - o.confFileHash = "888a993ebee20ad6be99623b45015339de117946850cf1252bede0b570e04293" + o.confFileHash = "d4cc13f09f55f4f6775689004f9a466723325b84d2b590692796bfe22aeaa389" expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedSTS(t, fc, o), nil) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 5df476478..b12b1cdd0 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -854,17 +854,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", Hostname: &stsC.Hostname, - NoStatefulFiltering: "false", + NoStatefulFiltering: "true", // Explicitly enforce default value, see #14216 AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } - // For egress proxies only, we need to ensure that stateful filtering is - // not in place so that traffic from cluster can be forwarded via - // Tailscale IPs. - // TODO (irbekrm): set it to true always as this is now the default in core. - if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" { - conf.NoStatefulFiltering = "true" - } if stsC.Connector != nil { routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode) if err != nil { diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 5795a0aae..8f06f5979 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -353,13 +353,14 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec mak.Set(&s.StringData, "serve-config", string(serveConfigBs)) } conf := &ipn.ConfigVAlpha{ - Version: "alpha0", - AcceptDNS: "false", - Hostname: &opts.hostname, - Locked: "false", - AuthKey: ptr.To("secret-authkey"), - AcceptRoutes: "false", - AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, + Version: "alpha0", + AcceptDNS: "false", + Hostname: &opts.hostname, + Locked: "false", + AuthKey: ptr.To("secret-authkey"), + AcceptRoutes: "false", + AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, + NoStatefulFiltering: "true", } if opts.proxyClass != "" { t.Logf("applying configuration from ProxyClass %s", opts.proxyClass) @@ -391,11 +392,6 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec routes = append(routes, prefix) } } - if opts.tailnetTargetFQDN != "" || opts.tailnetTargetIP != "" { - conf.NoStatefulFiltering = "true" - } else { - conf.NoStatefulFiltering = "false" - } conf.AdvertiseRoutes = routes bnn, err := json.Marshal(conf) if err != nil { From a68efe2088c07c1abad537965be724bdc8273044 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 22 Oct 2024 13:53:34 -0500 Subject: [PATCH 0194/1708] cmd/checkmetrics: add command for checking metrics against kb This commit adds a command to validate that all the metrics that are registring in the client are also present in a path or url. It is intended to be ran from the KB against the latest version of tailscale. Updates tailscale/corp#24066 Updates tailscale/corp#22075 Co-Authored-By: Brad Fitzpatrick Signed-off-by: Kristoffer Dalby --- cmd/checkmetrics/checkmetrics.go | 131 +++++++++++++++++++++++++++++++ util/usermetric/usermetric.go | 11 +++ 2 files changed, 142 insertions(+) create mode 100644 cmd/checkmetrics/checkmetrics.go diff --git a/cmd/checkmetrics/checkmetrics.go b/cmd/checkmetrics/checkmetrics.go new file mode 100644 index 000000000..fb9e8ab4c --- /dev/null +++ b/cmd/checkmetrics/checkmetrics.go @@ -0,0 +1,131 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// checkmetrics validates that all metrics in the tailscale client-metrics +// are documented in a given path or URL. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "os" + "strings" + "time" + + "tailscale.com/ipn/store/mem" + "tailscale.com/tsnet" + "tailscale.com/tstest/integration/testcontrol" + "tailscale.com/util/httpm" +) + +var ( + kbPath = flag.String("kb-path", "", "filepath to the client-metrics knowledge base") + kbUrl = flag.String("kb-url", "", "URL to the client-metrics knowledge base page") +) + +func main() { + flag.Parse() + if *kbPath == "" && *kbUrl == "" { + log.Fatalf("either -kb-path or -kb-url must be set") + } + + var control testcontrol.Server + ts := httptest.NewServer(&control) + defer ts.Close() + + td, err := os.MkdirTemp("", "testcontrol") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(td) + + // tsnet is used not used as a Tailscale client, but as a way to + // boot up Tailscale, have all the metrics registered, and then + // verifiy that all the metrics are documented. + tsn := &tsnet.Server{ + Dir: td, + Store: new(mem.Store), + UserLogf: log.Printf, + Ephemeral: true, + ControlURL: ts.URL, + } + if err := tsn.Start(); err != nil { + log.Fatal(err) + } + defer tsn.Close() + + log.Printf("checking that all metrics are documented, looking for: %s", tsn.Sys().UserMetricsRegistry().MetricNames()) + + if *kbPath != "" { + kb, err := readKB(*kbPath) + if err != nil { + log.Fatalf("reading kb: %v", err) + } + missing := undocumentedMetrics(kb, tsn.Sys().UserMetricsRegistry().MetricNames()) + + if len(missing) > 0 { + log.Fatalf("found undocumented metrics in %q: %v", *kbPath, missing) + } + } + + if *kbUrl != "" { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + kb, err := getKB(ctx, *kbUrl) + if err != nil { + log.Fatalf("getting kb: %v", err) + } + missing := undocumentedMetrics(kb, tsn.Sys().UserMetricsRegistry().MetricNames()) + + if len(missing) > 0 { + log.Fatalf("found undocumented metrics in %q: %v", *kbUrl, missing) + } + } +} + +func readKB(path string) (string, error) { + b, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("reading file: %w", err) + } + + return string(b), nil +} + +func getKB(ctx context.Context, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, httpm.GET, url, nil) + if err != nil { + return "", fmt.Errorf("creating request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("getting kb page: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + b, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("reading body: %w", err) + } + return string(b), nil +} + +func undocumentedMetrics(b string, metrics []string) []string { + var missing []string + for _, metric := range metrics { + if !strings.Contains(b, metric) { + missing = append(missing, metric) + } + } + return missing +} diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index 7913a4ef0..74e9447a6 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -14,6 +14,7 @@ import ( "tailscale.com/metrics" "tailscale.com/tsweb/varz" + "tailscale.com/util/set" ) // Registry tracks user-facing metrics of various Tailscale subsystems. @@ -106,3 +107,13 @@ func (r *Registry) String() string { return sb.String() } + +// Metrics returns the name of all the metrics in the registry. +func (r *Registry) MetricNames() []string { + ret := make(set.Set[string]) + r.vars.Do(func(kv expvar.KeyValue) { + ret.Add(kv.Key) + }) + + return ret.Slice() +} From 24095e489716b7ec4a6bbe1978dd15ae442af73e Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 2 Dec 2024 12:18:09 +0000 Subject: [PATCH 0195/1708] cmd/containerboot: serve health on local endpoint (#14246) * cmd/containerboot: serve health on local endpoint We introduced stable (user) metrics in #14035, and `TS_LOCAL_ADDR_PORT` with it. Rather than requiring users to specify a new addr/port combination for each new local endpoint they want the container to serve, this combines the health check endpoint onto the local addr/port used by metrics if `TS_ENABLE_HEALTH_CHECK` is used instead of `TS_HEALTHCHECK_ADDR_PORT`. `TS_LOCAL_ADDR_PORT` now defaults to binding to all interfaces on 9002 so that it works more seamlessly and with less configuration in environments other than Kubernetes, where the operator always overrides the default anyway. In particular, listening on localhost would not be accessible from outside the container, and many scripted container environments do not know the IP address of the container before it's started. Listening on all interfaces allows users to just set one env var (`TS_ENABLE_METRICS` or `TS_ENABLE_HEALTH_CHECK`) to get a fully functioning local endpoint they can query from outside the container. Updates #14035, #12898 Signed-off-by: Tom Proctor --- cmd/containerboot/healthz.go | 35 ++++---- cmd/containerboot/main.go | 76 +++++++++++++---- cmd/containerboot/main_test.go | 150 ++++++++++++++++++++++++++++++++- cmd/containerboot/metrics.go | 22 ++--- cmd/containerboot/settings.go | 28 ++++-- cmd/k8s-operator/sts.go | 2 +- cmd/k8s-operator/sts_test.go | 4 +- 7 files changed, 251 insertions(+), 66 deletions(-) diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go index 12e7ee9f8..895290733 100644 --- a/cmd/containerboot/healthz.go +++ b/cmd/containerboot/healthz.go @@ -7,7 +7,6 @@ package main import ( "log" - "net" "net/http" "sync" ) @@ -23,29 +22,29 @@ type healthz struct { func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.Lock() defer h.Unlock() + if h.hasAddrs { w.Write([]byte("ok")) } else { - http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError) + http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) } } -// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the -// provided address. A containerized tailscale instance is considered healthy if -// it has at least one tailnet IP address. -func runHealthz(addr string, h *healthz) { - lis, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err) +func (h *healthz) update(healthy bool) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs != healthy { + log.Println("Setting healthy", healthy) } - mux := http.NewServeMux() + h.hasAddrs = healthy +} + +// healthHandlers registers a simple health handler at /healthz. +// A containerized tailscale instance is considered healthy if +// it has at least one tailnet IP address. +func healthHandlers(mux *http.ServeMux) *healthz { + h := &healthz{} mux.Handle("GET /healthz", h) - log.Printf("Running healthcheck endpoint at %s/healthz", addr) - hs := &http.Server{Handler: mux} - - go func() { - if err := hs.Serve(lis); err != nil { - log.Fatalf("failed running health endpoint: %v", err) - } - }() + return h } diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 313e8deb0..0af9062a5 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -52,11 +52,17 @@ // ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN. // It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes, // and will be re-applied when it changes. -// - TS_HEALTHCHECK_ADDR_PORT: if specified, an HTTP health endpoint will be -// served at /healthz at the provided address, which should be in form [
]:. -// If not set, no health check will be run. If set to :, addr will default to 0.0.0.0 -// The health endpoint will return 200 OK if this node has at least one tailnet IP address, -// otherwise returns 503. +// - TS_HEALTHCHECK_ADDR_PORT: deprecated, use TS_ENABLE_HEALTH_CHECK instead and optionally +// set TS_LOCAL_ADDR_PORT. Will be removed in 1.82.0. +// - TS_LOCAL_ADDR_PORT: the address and port to serve local metrics and health +// check endpoints if enabled via TS_ENABLE_METRICS and/or TS_ENABLE_HEALTH_CHECK. +// Defaults to [::]:9002, serving on all available interfaces. +// - TS_ENABLE_METRICS: if true, a metrics endpoint will be served at /metrics on +// the address specified by TS_LOCAL_ADDR_PORT. See https://tailscale.com/kb/1482/client-metrics +// for more information on the metrics exposed. +// - TS_ENABLE_HEALTH_CHECK: if true, a health check endpoint will be served at /healthz on +// the address specified by TS_LOCAL_ADDR_PORT. The health endpoint will return 200 +// OK if this node has at least one tailnet IP address, otherwise returns 503. // NB: the health criteria might change in the future. // - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a // directory that containers tailscaled config in file. The config file needs to be @@ -99,6 +105,7 @@ import ( "log" "math" "net" + "net/http" "net/netip" "os" "os/signal" @@ -178,12 +185,32 @@ func main() { } defer killTailscaled() - if cfg.LocalAddrPort != "" && cfg.MetricsEnabled { - m := &metrics{ - lc: client, - debugEndpoint: cfg.DebugAddrPort, + var healthCheck *healthz + if cfg.HealthCheckAddrPort != "" { + mux := http.NewServeMux() + + log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort) + healthCheck = healthHandlers(mux) + + close := runHTTPServer(mux, cfg.HealthCheckAddrPort) + defer close() + } + + if cfg.localMetricsEnabled() || cfg.localHealthEnabled() { + mux := http.NewServeMux() + + if cfg.localMetricsEnabled() { + log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort) + metricsHandlers(mux, client, cfg.DebugAddrPort) } - runMetrics(cfg.LocalAddrPort, m) + + if cfg.localHealthEnabled() { + log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort) + healthCheck = healthHandlers(mux) + } + + close := runHTTPServer(mux, cfg.LocalAddrPort) + defer close() } if cfg.EnableForwardingOptimizations { @@ -328,9 +355,6 @@ authLoop: certDomain = new(atomic.Pointer[string]) certDomainChanged = make(chan bool, 1) - - h = &healthz{} // http server for the healthz endpoint - healthzRunner = sync.OnceFunc(func() { runHealthz(cfg.HealthCheckAddrPort, h) }) ) if cfg.ServeConfigPath != "" { go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client) @@ -556,11 +580,8 @@ runLoop: } } - if cfg.HealthCheckAddrPort != "" { - h.Lock() - h.hasAddrs = len(addrs) != 0 - h.Unlock() - healthzRunner() + if healthCheck != nil { + healthCheck.update(len(addrs) != 0) } if egressSvcsNotify != nil { egressSvcsNotify <- n @@ -751,3 +772,22 @@ func tailscaledConfigFilePath() string { log.Printf("Using tailscaled config file %q to match current capability version %d", filePath, tailcfg.CurrentCapabilityVersion) return filePath } + +func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) { + ln, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("failed to listen on addr %q: %v", addr, err) + } + srv := &http.Server{Handler: mux} + + go func() { + if err := srv.Serve(ln); err != nil { + log.Fatalf("failed running server: %v", err) + } + }() + + return func() error { + err := srv.Shutdown(context.Background()) + return errors.Join(err, ln.Close()) + } +} diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 5c92787ce..47d7c19cf 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -101,6 +101,24 @@ func TestContainerBoot(t *testing.T) { argFile := filepath.Join(d, "args") runningSockPath := filepath.Join(d, "tmp/tailscaled.sock") + var localAddrPort, healthAddrPort int + for _, p := range []*int{&localAddrPort, &healthAddrPort} { + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Failed to open listener: %v", err) + } + if err := ln.Close(); err != nil { + t.Fatalf("Failed to close listener: %v", err) + } + port := ln.Addr().(*net.TCPAddr).Port + *p = port + } + metricsURL := func(port int) string { + return fmt.Sprintf("http://127.0.0.1:%d/metrics", port) + } + healthURL := func(port int) string { + return fmt.Sprintf("http://127.0.0.1:%d/healthz", port) + } type phase struct { // If non-nil, send this IPN bus notification (and remember it as the @@ -119,6 +137,8 @@ func TestContainerBoot(t *testing.T) { // WantFatalLog is the fatal log message we expect from containerboot. // If set for a phase, the test will finish on that phase. WantFatalLog string + + EndpointStatuses map[string]int } runningNotify := &ipn.Notify{ State: ptr.To(ipn.Running), @@ -147,6 +167,11 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", }, + // No metrics or health by default. + EndpointStatuses: map[string]int{ + metricsURL(9002): -1, + healthURL(9002): -1, + }, }, { Notify: runningNotify, @@ -700,6 +725,104 @@ func TestContainerBoot(t *testing.T) { }, }, }, + { + Name: "metrics_enabled", + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), + "TS_ENABLE_METRICS": "true", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): 200, + healthURL(localAddrPort): -1, + }, + }, { + Notify: runningNotify, + }, + }, + }, + { + Name: "health_enabled", + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), + "TS_ENABLE_HEALTH_CHECK": "true", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): -1, + healthURL(localAddrPort): 503, // Doesn't start passing until the next phase. + }, + }, { + Notify: runningNotify, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): -1, + healthURL(localAddrPort): 200, + }, + }, + }, + }, + { + Name: "metrics_and_health_on_same_port", + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), + "TS_ENABLE_METRICS": "true", + "TS_ENABLE_HEALTH_CHECK": "true", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): 200, + healthURL(localAddrPort): 503, // Doesn't start passing until the next phase. + }, + }, { + Notify: runningNotify, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): 200, + healthURL(localAddrPort): 200, + }, + }, + }, + }, + { + Name: "local_metrics_and_deprecated_health", + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), + "TS_ENABLE_METRICS": "true", + "TS_HEALTHCHECK_ADDR_PORT": fmt.Sprintf("[::]:%d", healthAddrPort), + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): 200, + healthURL(healthAddrPort): 503, // Doesn't start passing until the next phase. + }, + }, { + Notify: runningNotify, + EndpointStatuses: map[string]int{ + metricsURL(localAddrPort): 200, + healthURL(healthAddrPort): 200, + }, + }, + }, + }, } for _, test := range tests { @@ -796,7 +919,26 @@ func TestContainerBoot(t *testing.T) { return nil }) if err != nil { - t.Fatal(err) + t.Fatalf("phase %d: %v", i, err) + } + + for url, want := range p.EndpointStatuses { + err := tstest.WaitFor(2*time.Second, func() error { + resp, err := http.Get(url) + if err != nil && want != -1 { + return fmt.Errorf("GET %s: %v", url, err) + } + if want > 0 && resp.StatusCode != want { + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("GET %s, want %d, got %d\n%s", url, want, resp.StatusCode, string(body)) + } + + return nil + }) + if err != nil { + t.Fatalf("phase %d: %v", i, err) + } } } waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal") @@ -955,6 +1097,12 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { panic(fmt.Sprintf("unsupported method %q", r.Method)) } + case "/localapi/v0/usermetrics": + if r.Method != "GET" { + panic(fmt.Sprintf("unsupported method %q", r.Method)) + } + w.Write([]byte("fake metrics")) + return default: panic(fmt.Sprintf("unsupported path %q", r.URL.Path)) } diff --git a/cmd/containerboot/metrics.go b/cmd/containerboot/metrics.go index 874774d7a..a8b9222a5 100644 --- a/cmd/containerboot/metrics.go +++ b/cmd/containerboot/metrics.go @@ -8,8 +8,6 @@ package main import ( "fmt" "io" - "log" - "net" "net/http" "tailscale.com/client/tailscale" @@ -64,28 +62,18 @@ func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { proxy(w, r, debugURL, http.DefaultClient.Do) } -// runMetrics runs a simple HTTP metrics endpoint at /metrics, forwarding +// metricsHandlers registers a simple HTTP metrics handler at /metrics, forwarding // requests to tailscaled's /localapi/v0/usermetrics API. // // In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug // endpoint if configured to ease migration for a breaking change serving user // metrics instead of debug metrics on the "metrics" port. -func runMetrics(addr string, m *metrics) { - ln, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("error listening on the provided metrics endpoint address %q: %v", addr, err) +func metricsHandlers(mux *http.ServeMux, lc *tailscale.LocalClient, debugAddrPort string) { + m := &metrics{ + lc: lc, + debugEndpoint: debugAddrPort, } - mux := http.NewServeMux() mux.HandleFunc("GET /metrics", m.handleMetrics) mux.HandleFunc("/debug/", m.handleDebug) // TODO(tomhjp): Remove for 1.82.0 release. - - log.Printf("Running metrics endpoint at %s/metrics", addr) - ms := &http.Server{Handler: mux} - - go func() { - if err := ms.Serve(ln); err != nil { - log.Fatalf("failed running metrics endpoint: %v", err) - } - }() } diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index c877682b9..1262a0e18 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -67,18 +67,15 @@ type settings struct { PodIP string PodIPv4 string PodIPv6 string - HealthCheckAddrPort string // TODO(tomhjp): use the local addr/port instead. + HealthCheckAddrPort string LocalAddrPort string MetricsEnabled bool + HealthCheckEnabled bool DebugAddrPort string EgressSvcsCfgPath string } func configFromEnv() (*settings, error) { - defaultLocalAddrPort := "" - if v, ok := os.LookupEnv("POD_IP"); ok && v != "" { - defaultLocalAddrPort = fmt.Sprintf("%s:9002", v) - } cfg := &settings{ AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), Hostname: defaultEnv("TS_HOSTNAME", ""), @@ -105,8 +102,9 @@ func configFromEnv() (*settings, error) { PodIP: defaultEnv("POD_IP", ""), EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), - LocalAddrPort: defaultEnv("TS_LOCAL_ADDR_PORT", defaultLocalAddrPort), - MetricsEnabled: defaultBool("TS_METRICS_ENABLED", false), + LocalAddrPort: defaultEnv("TS_LOCAL_ADDR_PORT", "[::]:9002"), + MetricsEnabled: defaultBool("TS_ENABLE_METRICS", false), + HealthCheckEnabled: defaultBool("TS_ENABLE_HEALTH_CHECK", false), DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""), EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), } @@ -181,11 +179,12 @@ func (s *settings) validate() error { return errors.New("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS is not supported in userspace mode") } if s.HealthCheckAddrPort != "" { + log.Printf("[warning] TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0. Please use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT instead.") if _, err := netip.ParseAddrPort(s.HealthCheckAddrPort); err != nil { - return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) + return fmt.Errorf("error parsing TS_HEALTHCHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) } } - if s.LocalAddrPort != "" { + if s.localMetricsEnabled() || s.localHealthEnabled() { if _, err := netip.ParseAddrPort(s.LocalAddrPort); err != nil { return fmt.Errorf("error parsing TS_LOCAL_ADDR_PORT value %q: %w", s.LocalAddrPort, err) } @@ -195,6 +194,9 @@ func (s *settings) validate() error { return fmt.Errorf("error parsing TS_DEBUG_ADDR_PORT value %q: %w", s.DebugAddrPort, err) } } + if s.HealthCheckEnabled && s.HealthCheckAddrPort != "" { + return errors.New("TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0, use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT") + } return nil } @@ -292,6 +294,14 @@ func hasKubeStateStore(cfg *settings) bool { return cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" } +func (cfg *settings) localMetricsEnabled() bool { + return cfg.LocalAddrPort != "" && cfg.MetricsEnabled +} + +func (cfg *settings) localHealthEnabled() bool { + return cfg.LocalAddrPort != "" && cfg.HealthCheckEnabled +} + // defaultEnv returns the value of the given envvar name, or defVal if // unset. func defaultEnv(name, defVal string) string { diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index b12b1cdd0..73c54a93d 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -818,7 +818,7 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { Value: "$(POD_IP):9002", }, corev1.EnvVar{ - Name: "TS_METRICS_ENABLED", + Name: "TS_ENABLE_METRICS", Value: "true", }, ) diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 7986d1b91..05aafaee6 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -258,7 +258,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { corev1.EnvVar{Name: "TS_DEBUG_ADDR_PORT", Value: "$(POD_IP):9001"}, corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"}, corev1.EnvVar{Name: "TS_LOCAL_ADDR_PORT", Value: "$(POD_IP):9002"}, - corev1.EnvVar{Name: "TS_METRICS_ENABLED", Value: "true"}, + corev1.EnvVar{Name: "TS_ENABLE_METRICS", Value: "true"}, ) wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{ {Name: "debug", Protocol: "TCP", ContainerPort: 9001}, @@ -273,7 +273,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS = nonUserspaceProxySS.DeepCopy() wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "TS_LOCAL_ADDR_PORT", Value: "$(POD_IP):9002"}, - corev1.EnvVar{Name: "TS_METRICS_ENABLED", Value: "true"}, + corev1.EnvVar{Name: "TS_ENABLE_METRICS", Value: "true"}, ) wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9002}} gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, ptr.To(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) From 8d0c690f89971fa3ac30e3cba235cef8b2a81006 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 2 Dec 2024 08:58:21 -0800 Subject: [PATCH 0196/1708] net/netcheck: clean up ICMP probe AddrPort lookup Fixes #14200 Change-Id: Ib086814cf63dda5de021403fe1db4fb2a798eaae Signed-off-by: Brad Fitzpatrick --- net/netcheck/netcheck.go | 53 ++++++++++++++++++++--------------- net/netcheck/netcheck_test.go | 12 ++++---- 2 files changed, 36 insertions(+), 29 deletions(-) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 2c429862e..0bb930568 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1221,17 +1221,19 @@ func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion // Try pinging the first node in the region node := reg.Nodes[0] - // Get the IPAddr by asking for the UDP address that we would use for - // STUN and then using that IP. - // - // TODO(andrew-d): this is a bit ugly - nodeAddr := c.nodeAddr(ctx, node, probeIPv4) - if !nodeAddr.IsValid() { + if node.STUNPort < 0 { + // If STUN is disabled on a node, interpret that as meaning don't measure latency. + return 0, false, nil + } + const unusedPort = 0 + stunAddrPort, ok := c.nodeAddrPort(ctx, node, unusedPort, probeIPv4) + if !ok { return 0, false, fmt.Errorf("no address for node %v (v4-for-icmp)", node.Name) } + ip := stunAddrPort.Addr() addr := &net.IPAddr{ - IP: net.IP(nodeAddr.Addr().AsSlice()), - Zone: nodeAddr.Addr().Zone(), + IP: net.IP(ip.AsSlice()), + Zone: ip.Zone(), } // Use the unique node.Name field as the packet data to reduce the @@ -1478,8 +1480,8 @@ func (rs *reportState) runProbe(ctx context.Context, dm *tailcfg.DERPMap, probe return } - addr := c.nodeAddr(ctx, node, probe.proto) - if !addr.IsValid() { + addr, ok := c.nodeAddrPort(ctx, node, node.STUNPort, probe.proto) + if !ok { c.logf("netcheck.runProbe: named node %q has no %v address", probe.node, probe.proto) return } @@ -1528,12 +1530,17 @@ func (rs *reportState) runProbe(ctx context.Context, dm *tailcfg.DERPMap, probe c.vlogf("sent to %v", addr) } -// proto is 4 or 6 -// If it returns nil, the node is skipped. -func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeProto) (ap netip.AddrPort) { - port := cmp.Or(n.STUNPort, 3478) +// nodeAddrPort returns the IP:port to send a STUN queries to for a given node. +// +// The provided port should be n.STUNPort, which may be negative to disable STUN. +// If STUN is disabled for this node, it returns ok=false. +// The port parameter is separate for the ICMP caller to provide a fake value. +// +// proto is [probeIPv4] or [probeIPv6]. +func (c *Client) nodeAddrPort(ctx context.Context, n *tailcfg.DERPNode, port int, proto probeProto) (_ netip.AddrPort, ok bool) { + var zero netip.AddrPort if port < 0 || port > 1<<16-1 { - return + return zero, false } if n.STUNTestIP != "" { ip, err := netip.ParseAddr(n.STUNTestIP) @@ -1546,7 +1553,7 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP if proto == probeIPv6 && ip.Is4() { return } - return netip.AddrPortFrom(ip, uint16(port)) + return netip.AddrPortFrom(ip, uint16(port)), true } switch proto { @@ -1554,20 +1561,20 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP if n.IPv4 != "" { ip, _ := netip.ParseAddr(n.IPv4) if !ip.Is4() { - return + return zero, false } - return netip.AddrPortFrom(ip, uint16(port)) + return netip.AddrPortFrom(ip, uint16(port)), true } case probeIPv6: if n.IPv6 != "" { ip, _ := netip.ParseAddr(n.IPv6) if !ip.Is6() { - return + return zero, false } - return netip.AddrPortFrom(ip, uint16(port)) + return netip.AddrPortFrom(ip, uint16(port)), true } default: - return + return zero, false } // The default lookup function if we don't set UseDNSCache is to use net.DefaultResolver. @@ -1609,13 +1616,13 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP addrs, err := lookupIPAddr(ctx, n.HostName) for _, a := range addrs { if (a.Is4() && probeIsV4) || (a.Is6() && !probeIsV4) { - return netip.AddrPortFrom(a, uint16(port)) + return netip.AddrPortFrom(a, uint16(port)), true } } if err != nil { c.logf("netcheck: DNS lookup error for %q (node %q region %v): %v", n.HostName, n.Name, n.RegionID, err) } - return + return zero, false } func regionHasDERPNode(r *tailcfg.DERPRegion) bool { diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index b4fbb4023..23891efcc 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -887,8 +887,8 @@ func TestNodeAddrResolve(t *testing.T) { c.UseDNSCache = tt t.Run("IPv4", func(t *testing.T) { - ap := c.nodeAddr(ctx, dn, probeIPv4) - if !ap.IsValid() { + ap, ok := c.nodeAddrPort(ctx, dn, dn.STUNPort, probeIPv4) + if !ok { t.Fatal("expected valid AddrPort") } if !ap.Addr().Is4() { @@ -902,8 +902,8 @@ func TestNodeAddrResolve(t *testing.T) { t.Skipf("IPv6 may not work on this machine") } - ap := c.nodeAddr(ctx, dn, probeIPv6) - if !ap.IsValid() { + ap, ok := c.nodeAddrPort(ctx, dn, dn.STUNPort, probeIPv6) + if !ok { t.Fatal("expected valid AddrPort") } if !ap.Addr().Is6() { @@ -912,8 +912,8 @@ func TestNodeAddrResolve(t *testing.T) { t.Logf("got IPv6 addr: %v", ap) }) t.Run("IPv6 Failure", func(t *testing.T) { - ap := c.nodeAddr(ctx, dnV4Only, probeIPv6) - if ap.IsValid() { + ap, ok := c.nodeAddrPort(ctx, dnV4Only, dn.STUNPort, probeIPv6) + if ok { t.Fatalf("expected no addr but got: %v", ap) } t.Logf("correctly got invalid addr") From 3f545725392a0cd3185a12961f71fd87b6b956e2 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Thu, 28 Nov 2024 12:49:37 -0500 Subject: [PATCH 0197/1708] IPN: Update ServeConfig to accept configuration for Services. This commit updates ServeConfig to allow configuration to Services (VIPServices for now) via Serve. The scope of this commit is only adding the Services field to ServeConfig. The field doesn't actually allow packet flowing yet. The purpose of this commit is to unblock other work on k8s end. Updates #22953 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/doc.go | 2 +- ipn/ipn_clone.go | 49 ++++++++++++++++++++++++++++++++ ipn/ipn_view.go | 74 +++++++++++++++++++++++++++++++++++++++++++++++- ipn/serve.go | 21 ++++++++++++++ 4 files changed, 144 insertions(+), 2 deletions(-) diff --git a/ipn/doc.go b/ipn/doc.go index 4b3810be1..9a0bbb800 100644 --- a/ipn/doc.go +++ b/ipn/doc.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/viewer -type=Prefs,ServeConfig,TCPPortHandler,HTTPHandler,WebServerConfig +//go:generate go run tailscale.com/cmd/viewer -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig // Package ipn implements the interactions between the Tailscale cloud // control plane and the local network stack. diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 0e9698faf..34d7ba9a6 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -105,6 +105,16 @@ func (src *ServeConfig) Clone() *ServeConfig { } } } + if dst.Services != nil { + dst.Services = map[string]*ServiceConfig{} + for k, v := range src.Services { + if v == nil { + dst.Services[k] = nil + } else { + dst.Services[k] = v.Clone() + } + } + } dst.AllowFunnel = maps.Clone(src.AllowFunnel) if dst.Foreground != nil { dst.Foreground = map[string]*ServeConfig{} @@ -123,11 +133,50 @@ func (src *ServeConfig) Clone() *ServeConfig { var _ServeConfigCloneNeedsRegeneration = ServeConfig(struct { TCP map[uint16]*TCPPortHandler Web map[HostPort]*WebServerConfig + Services map[string]*ServiceConfig AllowFunnel map[HostPort]bool Foreground map[string]*ServeConfig ETag string }{}) +// Clone makes a deep copy of ServiceConfig. +// The result aliases no memory with the original. +func (src *ServiceConfig) Clone() *ServiceConfig { + if src == nil { + return nil + } + dst := new(ServiceConfig) + *dst = *src + if dst.TCP != nil { + dst.TCP = map[uint16]*TCPPortHandler{} + for k, v := range src.TCP { + if v == nil { + dst.TCP[k] = nil + } else { + dst.TCP[k] = ptr.To(*v) + } + } + } + if dst.Web != nil { + dst.Web = map[HostPort]*WebServerConfig{} + for k, v := range src.Web { + if v == nil { + dst.Web[k] = nil + } else { + dst.Web[k] = v.Clone() + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ServiceConfigCloneNeedsRegeneration = ServiceConfig(struct { + TCP map[uint16]*TCPPortHandler + Web map[HostPort]*WebServerConfig + Tun bool +}{}) + // Clone makes a deep copy of TCPPortHandler. // The result aliases no memory with the original. func (src *TCPPortHandler) Clone() *TCPPortHandler { diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 83a7aebb1..bc67531e4 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -18,7 +18,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,TCPPortHandler,HTTPHandler,WebServerConfig +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig // View returns a readonly view of Prefs. func (p *Prefs) View() PrefsView { @@ -195,6 +195,12 @@ func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServer }) } +func (v ServeConfigView) Services() views.MapFn[string, *ServiceConfig, ServiceConfigView] { + return views.MapFnOf(v.ж.Services, func(t *ServiceConfig) ServiceConfigView { + return t.View() + }) +} + func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] { return views.MapOf(v.ж.AllowFunnel) } @@ -210,11 +216,77 @@ func (v ServeConfigView) ETag() string { return v.ж.ETag } var _ServeConfigViewNeedsRegeneration = ServeConfig(struct { TCP map[uint16]*TCPPortHandler Web map[HostPort]*WebServerConfig + Services map[string]*ServiceConfig AllowFunnel map[HostPort]bool Foreground map[string]*ServeConfig ETag string }{}) +// View returns a readonly view of ServiceConfig. +func (p *ServiceConfig) View() ServiceConfigView { + return ServiceConfigView{ж: p} +} + +// ServiceConfigView provides a read-only view over ServiceConfig. +// +// Its methods should only be called if `Valid()` returns true. +type ServiceConfigView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *ServiceConfig +} + +// Valid reports whether underlying value is non-nil. +func (v ServiceConfigView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v ServiceConfigView) AsStruct() *ServiceConfig { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v ServiceConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x ServiceConfig + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v ServiceConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { + return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView { + return t.View() + }) +} + +func (v ServiceConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { + return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView { + return t.View() + }) +} +func (v ServiceConfigView) Tun() bool { return v.ж.Tun } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ServiceConfigViewNeedsRegeneration = ServiceConfig(struct { + TCP map[uint16]*TCPPortHandler + Web map[HostPort]*WebServerConfig + Tun bool +}{}) + // View returns a readonly view of TCPPortHandler. func (p *TCPPortHandler) View() TCPPortHandlerView { return TCPPortHandlerView{ж: p} diff --git a/ipn/serve.go b/ipn/serve.go index 5c0a97ed3..49e0d9fa3 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -24,6 +24,23 @@ func ServeConfigKey(profileID ProfileID) StateKey { return StateKey("_serve/" + profileID) } +// ServiceConfig contains the config information for a single service. +// it contains a bool to indicate if the service is in Tun mode (L3 forwarding). +// If the service is not in Tun mode, the service is configured by the L4 forwarding +// (TCP ports) and/or the L7 forwarding (http handlers) information. +type ServiceConfig struct { + // TCP are the list of TCP port numbers that tailscaled should handle for + // the Tailscale IP addresses. (not subnet routers, etc) + TCP map[uint16]*TCPPortHandler `json:",omitempty"` + + // Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers + // keyed by mount point ("/", "/foo", etc) + Web map[HostPort]*WebServerConfig `json:",omitempty"` + + // Tun determines if the service should be using L3 forwarding (Tun mode). + Tun bool `json:",omitempty"` +} + // ServeConfig is the JSON type stored in the StateStore for // StateKey "_serve/$PROFILE_ID" as returned by ServeConfigKey. type ServeConfig struct { @@ -35,6 +52,10 @@ type ServeConfig struct { // keyed by mount point ("/", "/foo", etc) Web map[HostPort]*WebServerConfig `json:",omitempty"` + // Services maps from service name to a ServiceConfig. Which describes the + // L3, L4, and L7 forwarding information for the service. + Services map[string]*ServiceConfig `json:",omitempty"` + // AllowFunnel is the set of SNI:port values for which funnel // traffic is allowed, from trusted ingress peers. AllowFunnel map[HostPort]bool `json:",omitempty"` From eabb424275c8c90dab8d3e0130edea2de432695e Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 3 Dec 2024 07:01:14 +0000 Subject: [PATCH 0198/1708] cmd/k8s-operator,docs/k8s: run tun mode proxies in privileged containers (#14262) We were previously relying on unintended behaviour by runc where all containers where by default given read/write/mknod permissions for tun devices. This behaviour was removed in https://github.com/opencontainers/runc/pull/3468 and released in runc 1.2. Containerd container runtime, used by Docker and majority of Kubernetes distributions bumped runc to 1.2 in 1.7.24 https://github.com/containerd/containerd/releases/tag/v1.7.24 thus breaking our reference tun mode Tailscale Kubernetes manifests and Kubernetes operator proxies. This PR changes the all Kubernetes container configs that run Tailscale in tun mode to privileged. This should not be a breaking change because all these containers would run in a Pod that already has a privileged init container. Updates tailscale/tailscale#14256 Updates tailscale/tailscale#10814 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxyclasses.yaml | 22 ++++++++++--------- .../deploy/manifests/operator.yaml | 22 ++++++++++--------- cmd/k8s-operator/deploy/manifests/proxy.yaml | 4 +--- cmd/k8s-operator/testutils_test.go | 4 +--- docs/k8s/proxy.yaml | 4 +--- docs/k8s/sidecar.yaml | 4 +--- docs/k8s/subnet.yaml | 4 +--- k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxyclass.go | 11 +++++----- 9 files changed, 36 insertions(+), 41 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 4c24a1633..ad2e8f243 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1384,11 +1384,12 @@ spec: securityContext: description: |- Container security context. - Security context specified here will override the security context by the operator. - By default the operator: - - sets 'privileged: true' for the init container - - set NET_ADMIN capability for tailscale container for proxies that - are created for Services or Connector. + Security context specified here will override the security context set by the operator. + By default the operator sets the Tailscale container and the Tailscale init container to privileged + for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup. + You can reduce the permissions of the Tailscale container to cap NET_ADMIN by + installing device plugin in your cluster and configuring the proxies tun device to be created + by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752 https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context type: object properties: @@ -1707,11 +1708,12 @@ spec: securityContext: description: |- Container security context. - Security context specified here will override the security context by the operator. - By default the operator: - - sets 'privileged: true' for the init container - - set NET_ADMIN capability for tailscale container for proxies that - are created for Services or Connector. + Security context specified here will override the security context set by the operator. + By default the operator sets the Tailscale container and the Tailscale init container to privileged + for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup. + You can reduce the permissions of the Tailscale container to cap NET_ADMIN by + installing device plugin in your cluster and configuring the proxies tun device to be created + by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752 https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context type: object properties: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index f764fc09a..9b90919fb 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1851,11 +1851,12 @@ spec: securityContext: description: |- Container security context. - Security context specified here will override the security context by the operator. - By default the operator: - - sets 'privileged: true' for the init container - - set NET_ADMIN capability for tailscale container for proxies that - are created for Services or Connector. + Security context specified here will override the security context set by the operator. + By default the operator sets the Tailscale container and the Tailscale init container to privileged + for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup. + You can reduce the permissions of the Tailscale container to cap NET_ADMIN by + installing device plugin in your cluster and configuring the proxies tun device to be created + by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752 https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context properties: allowPrivilegeEscalation: @@ -2174,11 +2175,12 @@ spec: securityContext: description: |- Container security context. - Security context specified here will override the security context by the operator. - By default the operator: - - sets 'privileged: true' for the init container - - set NET_ADMIN capability for tailscale container for proxies that - are created for Services or Connector. + Security context specified here will override the security context set by the operator. + By default the operator sets the Tailscale container and the Tailscale init container to privileged + for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup. + You can reduce the permissions of the Tailscale container to cap NET_ADMIN by + installing device plugin in your cluster and configuring the proxies tun device to be created + by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752 https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context properties: allowPrivilegeEscalation: diff --git a/cmd/k8s-operator/deploy/manifests/proxy.yaml b/cmd/k8s-operator/deploy/manifests/proxy.yaml index 1ad63c265..3c9a3eaa3 100644 --- a/cmd/k8s-operator/deploy/manifests/proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/proxy.yaml @@ -39,6 +39,4 @@ spec: fieldRef: fieldPath: metadata.uid securityContext: - capabilities: - add: - - NET_ADMIN + privileged: true diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 8f06f5979..5f016e91d 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -76,9 +76,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, }, SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, - }, + Privileged: ptr.To(true), }, ImagePullPolicy: "Always", } diff --git a/docs/k8s/proxy.yaml b/docs/k8s/proxy.yaml index 78e97c83b..048fd7a5b 100644 --- a/docs/k8s/proxy.yaml +++ b/docs/k8s/proxy.yaml @@ -53,6 +53,4 @@ spec: fieldRef: fieldPath: metadata.uid securityContext: - capabilities: - add: - - NET_ADMIN + privileged: true diff --git a/docs/k8s/sidecar.yaml b/docs/k8s/sidecar.yaml index 6baa6d545..520e4379a 100644 --- a/docs/k8s/sidecar.yaml +++ b/docs/k8s/sidecar.yaml @@ -35,6 +35,4 @@ spec: fieldRef: fieldPath: metadata.uid securityContext: - capabilities: - add: - - NET_ADMIN + privileged: true diff --git a/docs/k8s/subnet.yaml b/docs/k8s/subnet.yaml index 1af146be6..ef4e4748c 100644 --- a/docs/k8s/subnet.yaml +++ b/docs/k8s/subnet.yaml @@ -37,6 +37,4 @@ spec: fieldRef: fieldPath: metadata.uid securityContext: - capabilities: - add: - - NET_ADMIN + privileged: true diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 640d8fb07..730bed210 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -145,7 +145,7 @@ _Appears in:_ | `image` _string_ | Container image name. By default images are pulled from
docker.io/tailscale/tailscale, but the official images are also
available at ghcr.io/tailscale/tailscale. Specifying image name here
will override any proxy image values specified via the Kubernetes
operator's Helm chart values or PROXY_IMAGE env var in the operator
Deployment.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
| | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
By default Tailscale Kubernetes operator does not apply any resource
requirements. The amount of resources required wil depend on the
amount of resources the operator needs to parse, usage patterns and
cluster size.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | -| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context by the operator.
By default the operator:
- sets 'privileged: true' for the init container
- set NET_ADMIN capability for tailscale container for proxies that
are created for Services or Connector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | +| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context set by the operator.
By default the operator sets the Tailscale container and the Tailscale init container to privileged
for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
installing device plugin in your cluster and configuring the proxies tun device to be created
by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | | `debug` _[Debug](#debug)_ | Configuration for enabling extra debug information in the container.
Not recommended for production use. | | | diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 7e408cd0a..71fbf2439 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -206,11 +206,12 @@ type Container struct { // +optional Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Container security context. - // Security context specified here will override the security context by the operator. - // By default the operator: - // - sets 'privileged: true' for the init container - // - set NET_ADMIN capability for tailscale container for proxies that - // are created for Services or Connector. + // Security context specified here will override the security context set by the operator. + // By default the operator sets the Tailscale container and the Tailscale init container to privileged + // for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup. + // You can reduce the permissions of the Tailscale container to cap NET_ADMIN by + // installing device plugin in your cluster and configuring the proxies tun device to be created + // by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752 // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context // +optional SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` From 9f9063e624c66d295d286d2f7bc85c02dfd46d4f Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 3 Dec 2024 12:35:25 +0000 Subject: [PATCH 0199/1708] cmd/k8s-operator,k8s-operator,go.mod: optionally create ServiceMonitor (#14248) * cmd/k8s-operator,k8s-operator,go.mod: optionally create ServiceMonitor Adds a new spec.metrics.serviceMonitor field to ProxyClass. If that's set to true (and metrics are enabled), the operator will create a Prometheus ServiceMonitor for each proxy to which the ProxyClass applies. Additionally, create a metrics Service for each proxy that has metrics enabled. Updates tailscale/tailscale#11292 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/connector.go | 3 +- cmd/k8s-operator/depaware.txt | 2 +- .../deploy/chart/templates/operator-rbac.yaml | 7 + .../crds/tailscale.com_proxyclasses.yaml | 21 ++ .../deploy/manifests/operator.yaml | 41 +++ cmd/k8s-operator/ingress.go | 3 +- cmd/k8s-operator/ingress_test.go | 122 ++++++++ cmd/k8s-operator/metrics_resources.go | 272 ++++++++++++++++++ cmd/k8s-operator/operator.go | 77 ++++- cmd/k8s-operator/proxyclass.go | 25 +- cmd/k8s-operator/proxyclass_test.go | 23 +- cmd/k8s-operator/proxygroup.go | 17 ++ cmd/k8s-operator/proxygroup_test.go | 32 ++- cmd/k8s-operator/sts.go | 28 +- cmd/k8s-operator/svc.go | 11 +- cmd/k8s-operator/testutils_test.go | 148 ++++++++++ go.mod | 2 +- k8s-operator/api.md | 19 +- k8s-operator/apis/v1alpha1/register.go | 7 + .../apis/v1alpha1/types_proxyclass.go | 17 ++ .../apis/v1alpha1/zz_generated.deepcopy.go | 22 +- 21 files changed, 877 insertions(+), 22 deletions(-) create mode 100644 cmd/k8s-operator/metrics_resources.go diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 1c1df7c96..1ed6fd155 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -189,6 +189,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge isExitNode: cn.Spec.ExitNode, }, ProxyClassName: proxyClass, + proxyType: proxyTypeConnector, } if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 { @@ -253,7 +254,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) { - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector")); err != nil { + if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil { return false, fmt.Errorf("failed to cleanup Connector resources: %w", err) } else if !done { logger.Debugf("Connector cleanup not done yet, waiting for next reconcile") diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 900d10efe..d1d687432 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -378,7 +378,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/storage/v1beta1 from k8s.io/client-go/applyconfigurations/storage/v1beta1+ k8s.io/api/storagemigration/v1alpha1 from k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1+ k8s.io/apiextensions-apiserver/pkg/apis/apiextensions from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 - 💣 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 from sigs.k8s.io/controller-runtime/pkg/webhook/conversion + 💣 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 from sigs.k8s.io/controller-runtime/pkg/webhook/conversion+ k8s.io/apimachinery/pkg/api/equality from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+ k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+ diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index ede61070b..a56edfe0d 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -30,6 +30,10 @@ rules: - apiGroups: ["tailscale.com"] resources: ["recorders", "recorders/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] + resourceNames: ["servicemonitors.monitoring.coreos.com"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -65,6 +69,9 @@ rules: - apiGroups: ["rbac.authorization.k8s.io"] resources: ["roles", "rolebindings"] verbs: ["get", "create", "patch", "update", "list", "watch"] +- apiGroups: ["monitoring.coreos.com"] + resources: ["servicemonitors"] + verbs: ["get", "list", "update", "create", "delete"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index ad2e8f243..9b45deedb 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -74,6 +74,8 @@ spec: description: |- Setting enable to true will make the proxy serve Tailscale metrics at :9002/metrics. + A metrics Service named -metrics will also be created in the operator's namespace and will + serve the metrics at :9002/metrics. In 1.78.x and 1.80.x, this field also serves as the default value for .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both @@ -81,6 +83,25 @@ spec: Defaults to false. type: boolean + serviceMonitor: + description: |- + Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics. + The ServiceMonitor will select the metrics Service that gets created when metrics are enabled. + The ingested metrics for each Service monitor will have labels to identify the proxy: + ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup + ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup) + ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped) + job: ts__[]_ + type: object + required: + - enable + properties: + enable: + description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. + type: boolean + x-kubernetes-validations: + - rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)' + message: ServiceMonitor can only be enabled if metrics are enabled statefulSet: description: |- Configuration parameters for the proxy's StatefulSet. Tailscale diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 9b90919fb..210a7b434 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -541,6 +541,8 @@ spec: description: |- Setting enable to true will make the proxy serve Tailscale metrics at :9002/metrics. + A metrics Service named -metrics will also be created in the operator's namespace and will + serve the metrics at :9002/metrics. In 1.78.x and 1.80.x, this field also serves as the default value for .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both @@ -548,9 +550,28 @@ spec: Defaults to false. type: boolean + serviceMonitor: + description: |- + Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics. + The ServiceMonitor will select the metrics Service that gets created when metrics are enabled. + The ingested metrics for each Service monitor will have labels to identify the proxy: + ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup + ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup) + ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped) + job: ts__[]_ + properties: + enable: + description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. + type: boolean + required: + - enable + type: object required: - enable type: object + x-kubernetes-validations: + - message: ServiceMonitor can only be enabled if metrics are enabled + rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)' statefulSet: description: |- Configuration parameters for the proxy's StatefulSet. Tailscale @@ -4648,6 +4669,16 @@ rules: - list - watch - update + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - servicemonitors.monitoring.coreos.com + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -4728,6 +4759,16 @@ rules: - update - list - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - list + - update + - create + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index acc90d465..40a5d0928 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -90,7 +90,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare return nil } - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress")); err != nil { + if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil { return fmt.Errorf("failed to cleanup: %w", err) } else if !done { logger.Debugf("cleanup not done yet, waiting for next reconcile") @@ -268,6 +268,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga Tags: tags, ChildResourceLabels: crl, ProxyClassName: proxyClass, + proxyType: proxyTypeIngressResource, } if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" { diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 38a041dde..e695cc649 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -12,6 +12,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -271,3 +272,124 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { opts.proxyClass = "" expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) } + +func TestTailscaleIngressWithServiceMonitor(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{Name: "metrics", Generation: 1}, + Spec: tsapi.ProxyClassSpec{ + Metrics: &tsapi.Metrics{ + Enable: true, + ServiceMonitor: &tsapi.ServiceMonitor{Enable: true}, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Status: metav1.ConditionTrue, + Type: string(tsapi.ProxyClassReady), + ObservedGeneration: 1, + }}}, + } + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} + tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pc, tsIngressClass). + WithStatusSubresource(pc). + Build() + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + // 1. Enable metrics- expect metrics Service to be created + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + Labels: map[string]string{ + "tailscale.com/proxy-class": "metrics", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"default-test"}}, + }, + }, + } + mustCreate(t, fc, ing) + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Ports: []corev1.ServicePort{{ + Port: 8080, + Name: "http"}, + }, + }, + }) + + expectReconciled(t, ingR, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + opts := configOpts{ + stsName: shortName, + secretName: fullName, + namespace: "default", + tailscaleNamespace: "operator-ns", + parentType: "ingress", + hostname: "default-test", + app: kubetypes.AppIngressResource, + enableMetrics: true, + namespaced: true, + proxyType: proxyTypeIngressResource, + } + serveConfig := &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, + } + opts.serveConfig = serveConfig + + expectEqual(t, fc, expectedSecret(t, fc, opts), nil) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil) + expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + // 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster + mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { + pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true} + }) + expectReconciled(t, ingR, "default", "test") + // 3. Create ServiceMonitor CRD and reconcile- ServiceMonitor should get created + mustCreate(t, fc, crd) + expectReconciled(t, ingR, "default", "test") + expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) +} diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go new file mode 100644 index 000000000..4881436e8 --- /dev/null +++ b/cmd/k8s-operator/metrics_resources.go @@ -0,0 +1,272 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "fmt" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" +) + +const ( + labelMetricsTarget = "tailscale.com/metrics-target" + + // These labels get transferred from the metrics Service to the ingested Prometheus metrics. + labelPromProxyType = "ts_proxy_type" + labelPromProxyParentName = "ts_proxy_parent_name" + labelPromProxyParentNamespace = "ts_proxy_parent_namespace" + labelPromJob = "ts_prom_job" + + serviceMonitorCRD = "servicemonitors.monitoring.coreos.com" +) + +// ServiceMonitor contains a subset of fields of servicemonitors.monitoring.coreos.com Custom Resource Definition. +// Duplicating it here allows us to avoid importing prometheus-operator library. +// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L40 +type ServiceMonitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ServiceMonitorSpec `json:"spec"` +} + +// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L55 +type ServiceMonitorSpec struct { + // Endpoints defines the endpoints to be scraped on the selected Service(s). + // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L82 + Endpoints []ServiceMonitorEndpoint `json:"endpoints"` + // JobLabel is the label on the Service whose value will become the value of the Prometheus job label for the metrics ingested via this ServiceMonitor. + // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L66 + JobLabel string `json:"jobLabel"` + // NamespaceSelector selects the namespace of Service(s) that this ServiceMonitor allows to scrape. + // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L88 + NamespaceSelector ServiceMonitorNamespaceSelector `json:"namespaceSelector,omitempty"` + // Selector is the label selector for Service(s) that this ServiceMonitor allows to scrape. + // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L85 + Selector metav1.LabelSelector `json:"selector"` + // TargetLabels are labels on the selected Service that should be applied as Prometheus labels to the ingested metrics. + // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L72 + TargetLabels []string `json:"targetLabels"` +} + +// ServiceMonitorNamespaceSelector selects namespaces in which Prometheus operator will attempt to find Services for +// this ServiceMonitor. +// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L88 +type ServiceMonitorNamespaceSelector struct { + MatchNames []string `json:"matchNames,omitempty"` +} + +// ServiceMonitorEndpoint defines an endpoint of Service to scrape. We only define port here. Prometheus by default +// scrapes /metrics path, which is what we want. +type ServiceMonitorEndpoint struct { + // Port is the name of the Service port that Prometheus will scrape. + Port string `json:"port,omitempty"` +} + +func reconcileMetricsResources(ctx context.Context, logger *zap.SugaredLogger, opts *metricsOpts, pc *tsapi.ProxyClass, cl client.Client) error { + if opts.proxyType == proxyTypeEgress { + // Metrics are currently not being enabled for standalone egress proxies. + return nil + } + if pc == nil || pc.Spec.Metrics == nil || !pc.Spec.Metrics.Enable { + return maybeCleanupMetricsResources(ctx, opts, cl) + } + metricsSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: metricsResourceName(opts.proxyStsName), + Namespace: opts.tsNamespace, + Labels: metricsResourceLabels(opts), + }, + Spec: corev1.ServiceSpec{ + Selector: opts.proxyLabels, + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{Protocol: "TCP", Port: 9002, Name: "metrics"}}, + }, + } + var err error + metricsSvc, err = createOrUpdate(ctx, cl, opts.tsNamespace, metricsSvc, func(svc *corev1.Service) { + svc.Spec.Ports = metricsSvc.Spec.Ports + svc.Spec.Selector = metricsSvc.Spec.Selector + }) + if err != nil { + return fmt.Errorf("error ensuring metrics Service: %w", err) + } + + crdExists, err := hasServiceMonitorCRD(ctx, cl) + if err != nil { + return fmt.Errorf("error verifying that %q CRD exists: %w", serviceMonitorCRD, err) + } + if !crdExists { + return nil + } + + if pc.Spec.Metrics.ServiceMonitor == nil || !pc.Spec.Metrics.ServiceMonitor.Enable { + return maybeCleanupServiceMonitor(ctx, cl, opts.proxyStsName, opts.tsNamespace) + } + + logger.Info("ensuring ServiceMonitor for metrics Service %s/%s", metricsSvc.Namespace, metricsSvc.Name) + svcMonitor, err := newServiceMonitor(metricsSvc) + if err != nil { + return fmt.Errorf("error creating ServiceMonitor: %w", err) + } + // We don't use createOrUpdate here because that does not work with unstructured types. We also do not update + // the ServiceMonitor because it is not expected that any of its fields would change. Currently this is good + // enough, but in future we might want to add logic to create-or-update unstructured types. + err = cl.Get(ctx, client.ObjectKeyFromObject(metricsSvc), svcMonitor.DeepCopy()) + if apierrors.IsNotFound(err) { + if err := cl.Create(ctx, svcMonitor); err != nil { + return fmt.Errorf("error creating ServiceMonitor: %w", err) + } + return nil + } + if err != nil { + return fmt.Errorf("error getting ServiceMonitor: %w", err) + } + return nil +} + +// maybeCleanupMetricsResources ensures that any metrics resources created for a proxy are deleted. Only metrics Service +// gets deleted explicitly because the ServiceMonitor has Service's owner reference, so gets garbage collected +// automatically. +func maybeCleanupMetricsResources(ctx context.Context, opts *metricsOpts, cl client.Client) error { + sel := metricsSvcSelector(opts.proxyLabels, opts.proxyType) + return cl.DeleteAllOf(ctx, &corev1.Service{}, client.InNamespace(opts.tsNamespace), client.MatchingLabels(sel)) +} + +// maybeCleanupServiceMonitor cleans up any ServiceMonitor created for the named proxy StatefulSet. +func maybeCleanupServiceMonitor(ctx context.Context, cl client.Client, stsName, ns string) error { + smName := metricsResourceName(stsName) + sm := serviceMonitorTemplate(smName, ns) + u, err := serviceMonitorToUnstructured(sm) + if err != nil { + return fmt.Errorf("error building ServiceMonitor: %w", err) + } + err = cl.Get(ctx, types.NamespacedName{Name: smName, Namespace: ns}, u) + if apierrors.IsNotFound(err) { + return nil // nothing to do + } + if err != nil { + return fmt.Errorf("error verifying if ServiceMonitor %s/%s exists: %w", ns, stsName, err) + } + return cl.Delete(ctx, u) +} + +// newServiceMonitor takes a metrics Service created for a proxy and constructs and returns a ServiceMonitor for that +// proxy that can be applied to the kube API server. +// The ServiceMonitor is returned as Unstructured type - this allows us to avoid importing prometheus-operator API server client/schema. +func newServiceMonitor(metricsSvc *corev1.Service) (*unstructured.Unstructured, error) { + sm := serviceMonitorTemplate(metricsSvc.Name, metricsSvc.Namespace) + sm.ObjectMeta.Labels = metricsSvc.Labels + sm.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(metricsSvc, corev1.SchemeGroupVersion.WithKind("Service"))} + sm.Spec = ServiceMonitorSpec{ + Selector: metav1.LabelSelector{MatchLabels: metricsSvc.Labels}, + Endpoints: []ServiceMonitorEndpoint{{ + Port: "metrics", + }}, + NamespaceSelector: ServiceMonitorNamespaceSelector{ + MatchNames: []string{metricsSvc.Namespace}, + }, + JobLabel: labelPromJob, + TargetLabels: []string{ + labelPromProxyParentName, + labelPromProxyParentNamespace, + labelPromProxyType, + }, + } + return serviceMonitorToUnstructured(sm) +} + +// serviceMonitorToUnstructured takes a ServiceMonitor and converts it to Unstructured type that can be used by the c/r +// client in Kubernetes API server calls. +func serviceMonitorToUnstructured(sm *ServiceMonitor) (*unstructured.Unstructured, error) { + contents, err := runtime.DefaultUnstructuredConverter.ToUnstructured(sm) + if err != nil { + return nil, fmt.Errorf("error converting ServiceMonitor to Unstructured: %w", err) + } + u := &unstructured.Unstructured{} + u.SetUnstructuredContent(contents) + u.SetGroupVersionKind(sm.GroupVersionKind()) + return u, nil +} + +// metricsResourceName returns name for metrics Service and ServiceMonitor for a proxy StatefulSet. +func metricsResourceName(stsName string) string { + // Maximum length of StatefulSet name if 52 chars, so this is fine. + return fmt.Sprintf("%s-metrics", stsName) +} + +// metricsResourceLabels constructs labels that will be applied to metrics Service and metrics ServiceMonitor for a +// proxy. +func metricsResourceLabels(opts *metricsOpts) map[string]string { + lbls := map[string]string{ + LabelManaged: "true", + labelMetricsTarget: opts.proxyStsName, + labelPromProxyType: opts.proxyType, + labelPromProxyParentName: opts.proxyLabels[LabelParentName], + } + // Include namespace label for proxies created for a namespaced type. + if isNamespacedProxyType(opts.proxyType) { + lbls[labelPromProxyParentNamespace] = opts.proxyLabels[LabelParentNamespace] + } + lbls[labelPromJob] = promJobName(opts) + return lbls +} + +// promJobName constructs the value of the Prometheus job label that will apply to all metrics for a ServiceMonitor. +func promJobName(opts *metricsOpts) string { + // Include parent resource namespace for proxies created for namespaced types. + if opts.proxyType == proxyTypeIngressResource || opts.proxyType == proxyTypeIngressService { + return fmt.Sprintf("ts_%s_%s_%s", opts.proxyType, opts.proxyLabels[LabelParentNamespace], opts.proxyLabels[LabelParentName]) + } + return fmt.Sprintf("ts_%s_%s", opts.proxyType, opts.proxyLabels[LabelParentName]) +} + +// metricsSvcSelector returns the minimum label set to uniquely identify a metrics Service for a proxy. +func metricsSvcSelector(proxyLabels map[string]string, proxyType string) map[string]string { + sel := map[string]string{ + labelPromProxyType: proxyType, + labelPromProxyParentName: proxyLabels[LabelParentName], + } + // Include namespace label for proxies created for a namespaced type. + if isNamespacedProxyType(proxyType) { + sel[labelPromProxyParentNamespace] = proxyLabels[LabelParentNamespace] + } + return sel +} + +// serviceMonitorTemplate returns a base ServiceMonitor type that, when converted to Unstructured, is a valid type that +// can be used in kube API server calls via the c/r client. +func serviceMonitorTemplate(name, ns string) *ServiceMonitor { + return &ServiceMonitor{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceMonitor", + APIVersion: "monitoring.coreos.com/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + } +} + +type metricsOpts struct { + proxyStsName string // name of StatefulSet for proxy + tsNamespace string // namespace in which Tailscale is installed + proxyLabels map[string]string // labels of the proxy StatefulSet + proxyType string +} + +func isNamespacedProxyType(typ string) bool { + return typ == proxyTypeIngressResource || typ == proxyTypeIngressService +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 116ba02e0..ebb2c4578 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -24,8 +24,11 @@ import ( discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -239,21 +242,29 @@ func runReconcilers(opts reconcilerOpts) { nsFilter := cache.ByObject{ Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(), } + // We watch the ServiceMonitor CRD to ensure that reconcilers are re-triggered if user's workflows result in the + // ServiceMonitor CRD applied after some of our resources that define ServiceMonitor creation. This selector + // ensures that we only watch the ServiceMonitor CRD and that we don't cache full contents of it. + serviceMonitorSelector := cache.ByObject{ + Field: fields.SelectorFromSet(fields.Set{"metadata.name": serviceMonitorCRD}), + Transform: crdTransformer(startlog), + } mgrOpts := manager.Options{ // TODO (irbekrm): stricter filtering what we watch/cache/call // reconcilers on. c/r by default starts a watch on any // resources that we GET via the controller manager's client. Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ - &corev1.Secret{}: nsFilter, - &corev1.ServiceAccount{}: nsFilter, - &corev1.Pod{}: nsFilter, - &corev1.ConfigMap{}: nsFilter, - &appsv1.StatefulSet{}: nsFilter, - &appsv1.Deployment{}: nsFilter, - &discoveryv1.EndpointSlice{}: nsFilter, - &rbacv1.Role{}: nsFilter, - &rbacv1.RoleBinding{}: nsFilter, + &corev1.Secret{}: nsFilter, + &corev1.ServiceAccount{}: nsFilter, + &corev1.Pod{}: nsFilter, + &corev1.ConfigMap{}: nsFilter, + &appsv1.StatefulSet{}: nsFilter, + &appsv1.Deployment{}: nsFilter, + &discoveryv1.EndpointSlice{}: nsFilter, + &rbacv1.Role{}: nsFilter, + &rbacv1.RoleBinding{}: nsFilter, + &apiextensionsv1.CustomResourceDefinition{}: serviceMonitorSelector, }, }, Scheme: tsapi.GlobalScheme, @@ -422,8 +433,13 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err) } + // ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that + // define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get + // reconciled if the CRD is applied at a later point. + serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyClass{}). + Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter). Complete(&ProxyClassReconciler{ Client: mgr.GetClient(), recorder: eventRecorder, @@ -1018,6 +1034,49 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns } } +// proxyClassesWithServiceMonitor returns an event handler that, given that the event is for the Prometheus +// ServiceMonitor CRD, returns all ProxyClasses that define that a ServiceMonitor should be created. +func proxyClassesWithServiceMonitor(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + crd, ok := o.(*apiextensionsv1.CustomResourceDefinition) + if !ok { + logger.Debugf("[unexpected] ServiceMonitor CRD handler received an object that is not a CustomResourceDefinition") + return nil + } + if crd.Name != serviceMonitorCRD { + logger.Debugf("[unexpected] ServiceMonitor CRD handler received an unexpected CRD %q", crd.Name) + return nil + } + pcl := &tsapi.ProxyClassList{} + if err := cl.List(ctx, pcl); err != nil { + logger.Debugf("[unexpected] error listing ProxyClasses: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, pc := range pcl.Items { + if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && pc.Spec.Metrics.ServiceMonitor.Enable { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pc.Namespace, Name: pc.Name}, + }) + } + } + return reqs + } +} + +// crdTransformer gets called before a CRD is stored to c/r cache, it removes the CRD spec to reduce memory consumption. +func crdTransformer(log *zap.SugaredLogger) toolscache.TransformFunc { + return func(o any) (any, error) { + crd, ok := o.(*apiextensionsv1.CustomResourceDefinition) + if !ok { + log.Infof("[unexpected] CRD transformer called for a non-CRD type") + return crd, nil + } + crd.Spec = apiextensionsv1.CustomResourceDefinitionSpec{} + return crd, nil + } +} + // indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The // index is used a list filter. func indexEgressServices(o client.Object) []string { diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index 13f217f3c..ad3cfc9fd 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -15,6 +15,7 @@ import ( dockerref "github.com/distribution/reference" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" apivalidation "k8s.io/apimachinery/pkg/api/validation" @@ -95,7 +96,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re pcr.mu.Unlock() oldPCStatus := pc.Status.DeepCopy() - if errs := pcr.validate(pc); errs != nil { + if errs := pcr.validate(ctx, pc); errs != nil { msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error()) pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg) tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) @@ -111,7 +112,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re return reconcile.Result{}, nil } -func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations field.ErrorList) { +func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) { if sts := pc.Spec.StatefulSet; sts != nil { if len(sts.Labels) > 0 { if errs := metavalidation.ValidateLabels(sts.Labels, field.NewPath(".spec.statefulSet.labels")); errs != nil { @@ -167,6 +168,16 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel } } } + if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && pc.Spec.Metrics.ServiceMonitor.Enable { + found, err := hasServiceMonitorCRD(ctx, pcr.Client) + if err != nil { + pcr.logger.Infof("[unexpected]: error retrieving %q CRD: %v", serviceMonitorCRD, err) + // best effort validation - don't error out here + } else if !found { + msg := fmt.Sprintf("ProxyClass defines that a ServiceMonitor custom resource should be created, but %q CRD was not found", serviceMonitorCRD) + violations = append(violations, field.TypeInvalid(field.NewPath("spec", "metrics", "serviceMonitor"), "enable", msg)) + } + } // We do not validate embedded fields (security context, resource // requirements etc) as we inherit upstream validation for those fields. // Invalid values would get rejected by upstream validations at apply @@ -174,6 +185,16 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel return violations } +func hasServiceMonitorCRD(ctx context.Context, cl client.Client) (bool, error) { + sm := &apiextensionsv1.CustomResourceDefinition{} + if err := cl.Get(ctx, types.NamespacedName{Name: serviceMonitorCRD}, sm); apierrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + // maybeCleanup removes tailscale.com finalizer and ensures that the ProxyClass // is no longer counted towards k8s_proxyclass_resources. func (pcr *ProxyClassReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, pc *tsapi.ProxyClass) error { diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index fb17f5fe5..e6e16e9f9 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -8,10 +8,12 @@ package main import ( + "context" "testing" "time" "go.uber.org/zap" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -134,6 +136,25 @@ func TestProxyClass(t *testing.T) { "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."} expectReconciled(t, pcr, "", "test") expectEvents(t, fr, expectedEvents) + + // 6. A ProxyClass with ServiceMonitor enabled and in a cluster that has not ServiceMonitor CRD is invalid + pc.Spec.Metrics = &tsapi.Metrics{Enable: true, ServiceMonitor: &tsapi.ServiceMonitor{Enable: true}} + mustUpdate(t, fc, "", "test", func(proxyClass *tsapi.ProxyClass) { + proxyClass.Spec = pc.Spec + }) + expectReconciled(t, pcr, "", "test") + msg = `ProxyClass is not valid: spec.metrics.serviceMonitor: Invalid value: "enable": ProxyClass defines that a ServiceMonitor custom resource should be created, but "servicemonitors.monitoring.coreos.com" CRD was not found` + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + expectEqual(t, fc, pc, nil) + expectedEvent = "Warning ProxyClassInvalid " + msg + expectEvents(t, fr, []string{expectedEvent}) + + // 7. A ProxyClass with ServiceMonitor enabled and in a cluster that does have the ServiceMonitor CRD is valid + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} + mustCreate(t, fc, crd) + expectReconciled(t, pcr, "", "test") + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar()) + expectEqual(t, fc, pc, nil) } func TestValidateProxyClass(t *testing.T) { @@ -180,7 +201,7 @@ func TestValidateProxyClass(t *testing.T) { } { t.Run(name, func(t *testing.T) { pcr := &ProxyClassReconciler{} - err := pcr.validate(tc.pc) + err := pcr.validate(context.Background(), tc.pc) valid := err == nil if valid != tc.valid { t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 6b7672466..1aefbd2f6 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -259,6 +259,15 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro }); err != nil { return fmt.Errorf("error provisioning StatefulSet: %w", err) } + mo := &metricsOpts{ + tsNamespace: r.tsNamespace, + proxyStsName: pg.Name, + proxyLabels: pgLabels(pg.Name, nil), + proxyType: "proxygroup", + } + if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { + return fmt.Errorf("error reconciling metrics resources: %w", err) + } if err := r.cleanupDanglingResources(ctx, pg); err != nil { return fmt.Errorf("error cleaning up dangling resources: %w", err) @@ -327,6 +336,14 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy } } + mo := &metricsOpts{ + proxyLabels: pgLabels(pg.Name, nil), + tsNamespace: r.tsNamespace, + proxyType: "proxygroup"} + if err := maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil { + return false, fmt.Errorf("error cleaning up metrics resources: %w", err) + } + logger.Infof("cleaned up ProxyGroup resources") r.mu.Lock() r.proxyGroups.Remove(pg.UID) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 23f50cc7a..9c4df9e4f 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -17,6 +17,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -76,6 +77,13 @@ func TestProxyGroup(t *testing.T) { l: zl.Sugar(), clock: cl, } + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} + opts := configOpts{ + proxyType: "proxygroup", + stsName: pg.Name, + parentType: "proxygroup", + tailscaleNamespace: "tailscale", + } t.Run("proxyclass_not_ready", func(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) @@ -190,6 +198,27 @@ func TestProxyGroup(t *testing.T) { expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74") }) + t.Run("enable_metrics", func(t *testing.T) { + pc.Spec.Metrics = &tsapi.Metrics{Enable: true} + mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { + p.Spec = pc.Spec + }) + expectReconciled(t, reconciler, "", pg.Name) + expectEqual(t, fc, expectedMetricsService(opts), nil) + }) + t.Run("enable_service_monitor_no_crd", func(t *testing.T) { + pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true} + mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { + p.Spec.Metrics = pc.Spec.Metrics + }) + expectReconciled(t, reconciler, "", pg.Name) + }) + t.Run("create_crd_expect_service_monitor", func(t *testing.T) { + mustCreate(t, fc, crd) + expectReconciled(t, reconciler, "", pg.Name) + expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) + }) + t.Run("delete_and_cleanup", func(t *testing.T) { if err := fc.Delete(context.Background(), pg); err != nil { t.Fatal(err) @@ -197,7 +226,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) - expectMissing[tsapi.Recorder](t, fc, "", pg.Name) + expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) if expected := 0; reconciler.proxyGroups.Len() != expected { t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len()) } @@ -206,6 +235,7 @@ func TestProxyGroup(t *testing.T) { if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-1", "nodeid-2", "nodeid-0"}); diff != "" { t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) } + expectMissing[corev1.Service](t, reconciler, "tailscale", metricsResourceName(pg.Name)) // The fake client does not clean up objects whose owner has been // deleted, so we can't test for the owned resources getting deleted. }) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 73c54a93d..5de30154c 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -94,6 +94,12 @@ const ( podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn" // podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents. podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash" + + proxyTypeEgress = "egress_service" + proxyTypeIngressService = "ingress_service" + proxyTypeIngressResource = "ingress_resource" + proxyTypeConnector = "connector" + proxyTypeProxyGroup = "proxygroup" ) var ( @@ -122,6 +128,8 @@ type tailscaleSTSConfig struct { Hostname string Tags []string // if empty, use defaultTags + proxyType string + // Connector specifies a configuration of a Connector instance if that's // what this StatefulSet should be created for. Connector *connector @@ -197,14 +205,22 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } - + mo := &metricsOpts{ + proxyStsName: hsvc.Name, + tsNamespace: hsvc.Namespace, + proxyLabels: hsvc.Labels, + proxyType: sts.proxyType, + } + if err = reconcileMetricsResources(ctx, logger, mo, sts.ProxyClass, a.Client); err != nil { + return nil, fmt.Errorf("failed to ensure metrics resources: %w", err) + } return hsvc, nil } // Cleanup removes all resources associated that were created by Provision with // the given labels. It returns true when all resources have been removed, // otherwise it returns false and the caller should retry later. -func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string) (done bool, _ error) { +func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { // Need to delete the StatefulSet first, and delete it with foreground // cascading deletion. That way, the pod that's writing to the Secret will // stop running before we start looking at the Secret's contents, and @@ -257,6 +273,14 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare return false, err } } + mo := &metricsOpts{ + proxyLabels: labels, + tsNamespace: a.operatorNamespace, + proxyType: typ, + } + if err := maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { + return false, fmt.Errorf("error cleaning up metrics resources: %w", err) + } return true, nil } diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 3c6bc27a9..6afc56f97 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -152,7 +152,12 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare return nil } - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc")); err != nil { + proxyTyp := proxyTypeEgress + if a.shouldExpose(svc) { + proxyTyp = proxyTypeIngressService + } + + if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil { return fmt.Errorf("failed to cleanup: %w", err) } else if !done { logger.Debugf("cleanup not done yet, waiting for next reconcile") @@ -256,6 +261,10 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga ChildResourceLabels: crl, ProxyClassName: proxyClass, } + sts.proxyType = proxyTypeEgress + if a.shouldExpose(svc) { + sts.proxyType = proxyTypeIngressService + } a.mu.Lock() if a.shouldExposeClusterIP(svc) { diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 5f016e91d..f6ae29b62 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "fmt" "net/netip" "reflect" "strings" @@ -21,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,7 +41,10 @@ type configOpts struct { secretName string hostname string namespace string + tailscaleNamespace string + namespaced bool parentType string + proxyType string priorityClassName string firewallMode string tailnetTargetIP string @@ -56,6 +61,7 @@ type configOpts struct { app string shouldRemoveAuthKey bool secretExtraData map[string][]byte + enableMetrics bool } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -150,6 +156,29 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Name: "TS_INTERNAL_APP", Value: opts.app, }) + if opts.enableMetrics { + tsContainer.Env = append(tsContainer.Env, + corev1.EnvVar{ + Name: "TS_DEBUG_ADDR_PORT", + Value: "$(POD_IP):9001"}, + corev1.EnvVar{ + Name: "TS_TAILSCALED_EXTRA_ARGS", + Value: "--debug=$(TS_DEBUG_ADDR_PORT)", + }, + corev1.EnvVar{ + Name: "TS_LOCAL_ADDR_PORT", + Value: "$(POD_IP):9002", + }, + corev1.EnvVar{ + Name: "TS_ENABLE_METRICS", + Value: "true", + }, + ) + tsContainer.Ports = append(tsContainer.Ports, + corev1.ContainerPort{Name: "debug", ContainerPort: 9001, Protocol: "TCP"}, + corev1.ContainerPort{Name: "metrics", ContainerPort: 9002, Protocol: "TCP"}, + ) + } ss := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", @@ -241,6 +270,29 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}, }, } + if opts.enableMetrics { + tsContainer.Env = append(tsContainer.Env, + corev1.EnvVar{ + Name: "TS_DEBUG_ADDR_PORT", + Value: "$(POD_IP):9001"}, + corev1.EnvVar{ + Name: "TS_TAILSCALED_EXTRA_ARGS", + Value: "--debug=$(TS_DEBUG_ADDR_PORT)", + }, + corev1.EnvVar{ + Name: "TS_LOCAL_ADDR_PORT", + Value: "$(POD_IP):9002", + }, + corev1.EnvVar{ + Name: "TS_ENABLE_METRICS", + Value: "true", + }, + ) + tsContainer.Ports = append(tsContainer.Ports, corev1.ContainerPort{ + Name: "debug", ContainerPort: 9001, Protocol: "TCP"}, + corev1.ContainerPort{Name: "metrics", ContainerPort: 9002, Protocol: "TCP"}, + ) + } volumes := []corev1.Volume{ { Name: "tailscaledconfig", @@ -335,6 +387,87 @@ func expectedHeadlessService(name string, parentType string) *corev1.Service { } } +func expectedMetricsService(opts configOpts) *corev1.Service { + labels := metricsLabels(opts) + selector := map[string]string{ + "tailscale.com/managed": "true", + "tailscale.com/parent-resource": "test", + "tailscale.com/parent-resource-type": opts.parentType, + } + if opts.namespaced { + selector["tailscale.com/parent-resource-ns"] = opts.namespace + } + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: metricsResourceName(opts.stsName), + Namespace: opts.tailscaleNamespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Selector: selector, + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{Protocol: "TCP", Port: 9002, Name: "metrics"}}, + }, + } +} + +func metricsLabels(opts configOpts) map[string]string { + promJob := fmt.Sprintf("ts_%s_default_test", opts.proxyType) + if !opts.namespaced { + promJob = fmt.Sprintf("ts_%s_test", opts.proxyType) + } + labels := map[string]string{ + "tailscale.com/managed": "true", + "tailscale.com/metrics-target": opts.stsName, + "ts_prom_job": promJob, + "ts_proxy_type": opts.proxyType, + "ts_proxy_parent_name": "test", + } + if opts.namespaced { + labels["ts_proxy_parent_namespace"] = "default" + } + return labels +} + +func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstructured { + t.Helper() + labels := metricsLabels(opts) + name := metricsResourceName(opts.stsName) + sm := &ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: opts.tailscaleNamespace, + Labels: labels, + ResourceVersion: "1", + OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true)}}, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceMonitor", + APIVersion: "monitoring.coreos.com/v1", + }, + Spec: ServiceMonitorSpec{ + Selector: metav1.LabelSelector{MatchLabels: labels}, + Endpoints: []ServiceMonitorEndpoint{{ + Port: "metrics", + }}, + NamespaceSelector: ServiceMonitorNamespaceSelector{ + MatchNames: []string{opts.tailscaleNamespace}, + }, + JobLabel: "ts_prom_job", + TargetLabels: []string{ + "ts_proxy_parent_name", + "ts_proxy_parent_namespace", + "ts_proxy_type", + }, + }, + } + u, err := serviceMonitorToUnstructured(sm) + if err != nil { + t.Fatalf("error converting ServiceMonitor to unstructured: %v", err) + } + return u +} + func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Secret { t.Helper() s := &corev1.Secret{ @@ -502,6 +635,21 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want } } +func expectEqualUnstructured(t *testing.T, client client.Client, want *unstructured.Unstructured) { + t.Helper() + got := &unstructured.Unstructured{} + got.SetGroupVersionKind(want.GroupVersionKind()) + if err := client.Get(context.Background(), types.NamespacedName{ + Name: want.GetName(), + Namespace: want.GetNamespace(), + }, got); err != nil { + t.Fatalf("getting %q: %v", want.GetName(), err) + } + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("unexpected contents of Unstructured (-got +want):\n%s", diff) + } +} + func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string) { t.Helper() obj := O(new(T)) diff --git a/go.mod b/go.mod index 92ba6b9c7..1924e93ed 100644 --- a/go.mod +++ b/go.mod @@ -396,7 +396,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 howett.net/plist v1.0.0 // indirect - k8s.io/apiextensions-apiserver v0.30.3 // indirect + k8s.io/apiextensions-apiserver v0.30.3 k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 730bed210..08e1284fe 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -326,7 +326,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
at :9002/metrics.
In 1.78.x and 1.80.x, this field also serves as the default value for
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
fields will independently default to false.
Defaults to false. | | | +| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
at :9002/metrics.
A metrics Service named -metrics will also be created in the operator's namespace and will
serve the metrics at :9002/metrics.
In 1.78.x and 1.80.x, this field also serves as the default value for
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
fields will independently default to false.
Defaults to false. | | | +| `serviceMonitor` _[ServiceMonitor](#servicemonitor)_ | Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
The ingested metrics for each Service monitor will have labels to identify the proxy:
ts_proxy_type: ingress_service\|ingress_resource\|connector\|proxygroup
ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
job: ts__[]_ | | | #### Name @@ -836,6 +837,22 @@ _Appears in:_ | `name` _string_ | The name of a Kubernetes Secret in the operator's namespace that contains
credentials for writing to the configured bucket. Each key-value pair
from the secret's data will be mounted as an environment variable. It
should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if
using a static access key. | | | +#### ServiceMonitor + + + + + + + +_Appears in:_ +- [Metrics](#metrics) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `enable` _boolean_ | If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. | | | + + #### StatefulSet diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index 70b411d12..0880ac975 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -10,6 +10,7 @@ import ( "tailscale.com/k8s-operator/apis" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,12 +40,18 @@ func init() { localSchemeBuilder.Register(addKnownTypes) GlobalScheme = runtime.NewScheme() + // Add core types if err := scheme.AddToScheme(GlobalScheme); err != nil { panic(fmt.Sprintf("failed to add k8s.io scheme: %s", err)) } + // Add tailscale.com types if err := AddToScheme(GlobalScheme); err != nil { panic(fmt.Sprintf("failed to add tailscale.com scheme: %s", err)) } + // Add apiextensions types (CustomResourceDefinitions/CustomResourceDefinitionLists) + if err := apiextensionsv1.AddToScheme(GlobalScheme); err != nil { + panic(fmt.Sprintf("failed to add apiextensions.k8s.io scheme: %s", err)) + } } // Adds the list of known types to api.Scheme. diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 71fbf2439..ef9a071d0 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -161,9 +161,12 @@ type Pod struct { TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` } +// +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" type Metrics struct { // Setting enable to true will make the proxy serve Tailscale metrics // at :9002/metrics. + // A metrics Service named -metrics will also be created in the operator's namespace and will + // serve the metrics at :9002/metrics. // // In 1.78.x and 1.80.x, this field also serves as the default value for // .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both @@ -171,6 +174,20 @@ type Metrics struct { // // Defaults to false. Enable bool `json:"enable"` + // Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics. + // The ServiceMonitor will select the metrics Service that gets created when metrics are enabled. + // The ingested metrics for each Service monitor will have labels to identify the proxy: + // ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup + // ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup) + // ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped) + // job: ts__[]_ + // +optional + ServiceMonitor *ServiceMonitor `json:"serviceMonitor"` +} + +type ServiceMonitor struct { + // If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. + Enable bool `json:"enable"` } type Container struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 07e46f3f5..29c71cb90 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -319,6 +319,11 @@ func (in *Env) DeepCopy() *Env { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metrics) DeepCopyInto(out *Metrics) { *out = *in + if in.ServiceMonitor != nil { + in, out := &in.ServiceMonitor, &out.ServiceMonitor + *out = new(ServiceMonitor) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics. @@ -526,7 +531,7 @@ func (in *ProxyClassSpec) DeepCopyInto(out *ProxyClassSpec) { if in.Metrics != nil { in, out := &in.Metrics, &out.Metrics *out = new(Metrics) - **out = **in + (*in).DeepCopyInto(*out) } if in.TailscaleConfig != nil { in, out := &in.TailscaleConfig, &out.TailscaleConfig @@ -991,6 +996,21 @@ func (in *S3Secret) DeepCopy() *S3Secret { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor. +func (in *ServiceMonitor) DeepCopy() *ServiceMonitor { + if in == nil { + return nil + } + out := new(ServiceMonitor) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { *out = *in From efdfd547979fc09ea30d96bf31fcc06cadc538f3 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 3 Dec 2024 15:02:42 +0000 Subject: [PATCH 0200/1708] cmd/k8s-operator: avoid port collision with metrics endpoint (#14185) When the operator enables metrics on a proxy, it uses the port 9001, and in the near future it will start using 9002 for the debug endpoint as well. Make sure we don't choose ports from a range that includes 9001 so that we never clash. Setting TS_SOCKS5_SERVER, TS_HEALTHCHECK_ADDR_PORT, TS_OUTBOUND_HTTP_PROXY_LISTEN, and PORT could also open arbitrary ports, so we will need to document that users should not choose ports from the 10000-11000 range for those settings. Updates #13406 Signed-off-by: Tom Proctor --- cmd/k8s-operator/egress-services.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index a562f0170..17746b470 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -51,12 +51,12 @@ const ( labelSvcType = "tailscale.com/svc-type" // ingress or egress typeEgress = "egress" // maxPorts is the maximum number of ports that can be exposed on a - // container. In practice this will be ports in range [3000 - 4000). The + // container. In practice this will be ports in range [10000 - 11000). The // high range should make it easier to distinguish container ports from // the tailnet target ports for debugging purposes (i.e when reading - // netfilter rules). The limit of 10000 is somewhat arbitrary, the + // netfilter rules). The limit of 1000 is somewhat arbitrary, the // assumption is that this would not be hit in practice. - maxPorts = 10000 + maxPorts = 1000 indexEgressProxyGroup = ".metadata.annotations.egress-proxy-group" ) @@ -254,7 +254,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s if !found { // Calculate a free port to expose on container and add // a new PortMap to the ClusterIP Service. - if usedPorts.Len() == maxPorts { + if usedPorts.Len() >= maxPorts { // TODO(irbekrm): refactor to avoid extra reconciles here. Low priority as in practice, // the limit should not be hit. return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) @@ -548,13 +548,13 @@ func svcNameBase(s string) string { } } -// unusedPort returns a port in range [3000 - 4000). The caller must ensure that -// usedPorts does not contain all ports in range [3000 - 4000). +// unusedPort returns a port in range [10000 - 11000). The caller must ensure that +// usedPorts does not contain all ports in range [10000 - 11000). func unusedPort(usedPorts sets.Set[int32]) int32 { foundFreePort := false var suggestPort int32 for !foundFreePort { - suggestPort = rand.Int32N(maxPorts) + 3000 + suggestPort = rand.Int32N(maxPorts) + 10000 if !usedPorts.Has(suggestPort) { foundFreePort = true } From cbf1a4efe97a5424010a967285d71cf6ee4458ab Mon Sep 17 00:00:00 2001 From: Oliver Rahner Date: Tue, 3 Dec 2024 18:00:40 +0100 Subject: [PATCH 0201/1708] cmd/k8s-operator/deploy/chart: allow reading OAuth creds from a CSI driver's volume and annotating operator's Service account (#14264) cmd/k8s-operator/deploy/chart: allow reading OAuth creds from a CSI driver's volume and annotating operator's Service account Updates #14264 Signed-off-by: Oliver Rahner --- .../deploy/chart/templates/deployment.yaml | 10 +++++++--- .../deploy/chart/templates/operator-rbac.yaml | 4 ++++ cmd/k8s-operator/deploy/chart/values.yaml | 20 ++++++++++++++++++- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 2653f2159..1b9b97186 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -35,9 +35,13 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - - name: oauth - secret: - secretName: operator-oauth + - name: oauth + {{- with .Values.oauthSecretVolume }} + {{- toYaml . | nindent 10 }} + {{- else }} + secret: + secretName: operator-oauth + {{- end }} containers: - name: operator {{- with .Values.operatorConfig.securityContext }} diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index a56edfe0d..637bdf793 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -6,6 +6,10 @@ kind: ServiceAccount metadata: name: operator namespace: {{ .Release.Namespace }} + {{- with .Values.operatorConfig.serviceAccountAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index b24ba37b0..2d1effc25 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -3,11 +3,26 @@ # Operator oauth credentials. If set a Kubernetes Secret with the provided # values will be created in the operator namespace. If unset a Secret named -# operator-oauth must be precreated. +# operator-oauth must be precreated or oauthSecretVolume needs to be adjusted. +# This block will be overridden by oauthSecretVolume, if set. oauth: {} # clientId: "" # clientSecret: "" +# Secret volume. +# If set it defines the volume the oauth secrets will be mounted from. +# The volume needs to contain two files named `client_id` and `client_secret`. +# If unset the volume will reference the Secret named operator-oauth. +# This block will override the oauth block. +oauthSecretVolume: {} + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: tailscale-oauth + # + ## NAME is pre-defined! + # installCRDs determines whether tailscale.com CRDs should be installed as part # of chart installation. We do not use Helm's CRD installation mechanism as that # does not allow for upgrading CRDs. @@ -40,6 +55,9 @@ operatorConfig: podAnnotations: {} podLabels: {} + serviceAccountAnnotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/tailscale-operator-role + tolerations: [] affinity: {} From aa43388363bbb34835bc721cddc246e3f357d187 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 4 Dec 2024 06:46:51 +0000 Subject: [PATCH 0202/1708] cmd/k8s-operator: fix a bunch of status equality checks (#14270) Updates tailscale/tailscale#14269 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/connector.go | 2 +- cmd/k8s-operator/egress-services-readiness.go | 2 +- cmd/k8s-operator/egress-services.go | 2 +- cmd/k8s-operator/nameserver.go | 14 +++++++------- cmd/k8s-operator/proxyclass.go | 2 +- cmd/k8s-operator/proxygroup.go | 2 +- cmd/k8s-operator/svc.go | 4 ++-- cmd/k8s-operator/tsrecorder.go | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 1ed6fd155..dfeee6be1 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -113,7 +113,7 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque setStatus := func(cn *tsapi.Connector, _ tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { tsoperator.SetConnectorCondition(cn, tsapi.ConnectorReady, status, reason, message, cn.Generation, a.clock, logger) var updateErr error - if !apiequality.Semantic.DeepEqual(oldCnStatus, cn.Status) { + if !apiequality.Semantic.DeepEqual(oldCnStatus, &cn.Status) { // An error encountered here should get returned by the Reconcile function. updateErr = a.Client.Status().Update(ctx, cn) } diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index f6991145f..f1964d452 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re oldStatus := svc.Status.DeepCopy() defer func() { tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) - if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) { + if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { err = errors.Join(err, esrr.Status().Update(ctx, svc)) } }() diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 17746b470..a08c0b715 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -123,7 +123,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re oldStatus := svc.Status.DeepCopy() defer func() { - if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) { + if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { err = errors.Join(err, esr.Status().Update(ctx, svc)) } }() diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 52577c929..6a9a6be93 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -86,7 +86,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ return reconcile.Result{}, nil } logger.Info("Cleaning up DNSConfig resources") - if err := a.maybeCleanup(ctx, &dnsCfg, logger); err != nil { + if err := a.maybeCleanup(&dnsCfg); err != nil { logger.Errorf("error cleaning up reconciler resource: %v", err) return res, err } @@ -100,9 +100,9 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldCnStatus := dnsCfg.Status.DeepCopy() - setStatus := func(dnsCfg *tsapi.DNSConfig, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + setStatus := func(dnsCfg *tsapi.DNSConfig, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { tsoperator.SetDNSConfigCondition(dnsCfg, tsapi.NameserverReady, status, reason, message, dnsCfg.Generation, a.clock, logger) - if !apiequality.Semantic.DeepEqual(oldCnStatus, dnsCfg.Status) { + if !apiequality.Semantic.DeepEqual(oldCnStatus, &dnsCfg.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := a.Client.Status().Update(ctx, dnsCfg); updateErr != nil { err = errors.Wrap(err, updateErr.Error()) @@ -118,7 +118,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ msg := "invalid cluster configuration: more than one tailscale.com/dnsconfigs found. Please ensure that no more than one is created." logger.Error(msg) a.recorder.Event(&dnsCfg, corev1.EventTypeWarning, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent) - setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent) + setStatus(&dnsCfg, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent) } if !slices.Contains(dnsCfg.Finalizers, FinalizerName) { @@ -127,7 +127,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ if err := a.Update(ctx, &dnsCfg); err != nil { msg := fmt.Sprintf(messageNameserverCreationFailed, err) logger.Error(msg) - return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) + return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) } } if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { @@ -149,7 +149,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{ IP: ip, } - return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated) + return setStatus(&dnsCfg, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated) } logger.Info("nameserver Service does not have an IP address allocated, waiting...") return reconcile.Result{}, nil @@ -188,7 +188,7 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa // maybeCleanup removes DNSConfig from being tracked. The cluster resources // created, will be automatically garbage collected as they are owned by the // DNSConfig. -func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { +func (a *NameserverReconciler) maybeCleanup(dnsCfg *tsapi.DNSConfig) error { a.mu.Lock() a.managedNameservers.Remove(dnsCfg.UID) a.mu.Unlock() diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index ad3cfc9fd..b781af05a 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -103,7 +103,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re } else { tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger) } - if !apiequality.Semantic.DeepEqual(oldPCStatus, pc.Status) { + if !apiequality.Semantic.DeepEqual(oldPCStatus, &pc.Status) { if err := pcr.Client.Status().Update(ctx, pc); err != nil { logger.Errorf("error updating ProxyClass status: %v", err) return reconcile.Result{}, err diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 1aefbd2f6..344cd9ae0 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -110,7 +110,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ oldPGStatus := pg.Status.DeepCopy() setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldPGStatus, pg.Status) { + if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { err = errors.Wrap(err, updateErr.Error()) diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 6afc56f97..cbf50c81f 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -131,7 +131,7 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (err error) { oldSvcStatus := svc.Status.DeepCopy() defer func() { - if !apiequality.Semantic.DeepEqual(oldSvcStatus, svc.Status) { + if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) { // An error encountered here should get returned by the Reconcile function. err = errors.Join(err, a.Client.Status().Update(ctx, svc)) } @@ -196,7 +196,7 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (err error) { oldSvcStatus := svc.Status.DeepCopy() defer func() { - if !apiequality.Semantic.DeepEqual(oldSvcStatus, svc.Status) { + if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) { // An error encountered here should get returned by the Reconcile function. err = errors.Join(err, a.Client.Status().Update(ctx, svc)) } diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index cfe38c50a..4445578a6 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -102,7 +102,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques oldTSRStatus := tsr.Status.DeepCopy() setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldTSRStatus, tsr.Status) { + if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { err = errors.Wrap(err, updateErr.Error()) From 2aac91688883090d892f01a2953cc0318aee9c90 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 4 Dec 2024 12:00:04 +0000 Subject: [PATCH 0203/1708] cmd/{containerboot,k8s-operator},kube/kubetypes: kube Ingress L7 proxies only advertise HTTPS endpoint when ready (#14171) cmd/containerboot,kube/kubetypes,cmd/k8s-operator: detect if Ingress is created in a tailnet that has no HTTPS This attempts to make Kubernetes Operator L7 Ingress setup failures more explicit: - the Ingress resource now only advertises HTTPS endpoint via status.ingress.loadBalancer.hostname when/if the proxy has succesfully loaded serve config - the proxy attempts to catch cases where HTTPS is disabled for the tailnet and logs a warning Updates tailscale/tailscale#12079 Updates tailscale/tailscale#10407 Signed-off-by: Irbe Krumina --- cmd/containerboot/kube.go | 92 ++++++++++----- cmd/containerboot/kube_test.go | 42 +++---- cmd/containerboot/main.go | 49 ++++++-- cmd/containerboot/main_test.go | 36 +++--- cmd/containerboot/serve.go | 60 ++++++++-- cmd/containerboot/settings.go | 4 +- cmd/k8s-operator/connector.go | 10 +- cmd/k8s-operator/ingress.go | 12 +- cmd/k8s-operator/ingress_test.go | 148 ++++++++++++++++++++++++ cmd/k8s-operator/sts.go | 93 +++++++++++---- cmd/k8s-operator/svc.go | 10 +- kube/kubetypes/{metrics.go => types.go} | 15 +++ 12 files changed, 443 insertions(+), 128 deletions(-) rename kube/kubetypes/{metrics.go => types.go} (59%) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 5a726c20b..643eef385 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -9,30 +9,55 @@ import ( "context" "encoding/json" "fmt" - "log" "net/http" "net/netip" "os" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" ) -// storeDeviceID writes deviceID to 'device_id' data field of the named -// Kubernetes Secret. -func storeDeviceID(ctx context.Context, secretName string, deviceID tailcfg.StableNodeID) error { +// kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use +// this rather than any of the upstream Kubernetes client libaries to avoid extra imports. +type kubeClient struct { + kubeclient.Client + stateSecret string +} + +func newKubeClient(root string, stateSecret string) (*kubeClient, error) { + if root != "/" { + // If we are running in a test, we need to set the root path to the fake + // service account directory. + kubeclient.SetRootPathForTesting(root) + } + var err error + kc, err := kubeclient.New("tailscale-container") + if err != nil { + return nil, fmt.Errorf("Error creating kube client: %w", err) + } + if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" { + // Derive the API server address from the environment variables + // Used to set http server in tests, or optionally enabled by flag + kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS"))) + } + return &kubeClient{Client: kc, stateSecret: stateSecret}, nil +} + +// storeDeviceID writes deviceID to 'device_id' data field of the client's state Secret. +func (kc *kubeClient) storeDeviceID(ctx context.Context, deviceID tailcfg.StableNodeID) error { s := &kubeapi.Secret{ Data: map[string][]byte{ - "device_id": []byte(deviceID), + kubetypes.KeyDeviceID: []byte(deviceID), }, } - return kc.StrategicMergePatchSecret(ctx, secretName, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") } -// storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields -// 'device_ips', 'device_fqdn' of the named Kubernetes Secret. -func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, addresses []netip.Prefix) error { +// storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields 'device_ips', 'device_fqdn' of client's +// state Secret. +func (kc *kubeClient) storeDeviceEndpoints(ctx context.Context, fqdn string, addresses []netip.Prefix) error { var ips []string for _, addr := range addresses { ips = append(ips, addr.Addr().String()) @@ -44,16 +69,28 @@ func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, a s := &kubeapi.Secret{ Data: map[string][]byte{ - "device_fqdn": []byte(fqdn), - "device_ips": deviceIPs, + kubetypes.KeyDeviceFQDN: []byte(fqdn), + kubetypes.KeyDeviceIPs: deviceIPs, }, } - return kc.StrategicMergePatchSecret(ctx, secretName, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") +} + +// storeHTTPSEndpoint writes an HTTPS endpoint exposed by this device via 'tailscale serve' to the client's state +// Secret. In practice this will be the same value that gets written to 'device_fqdn', but this should only be called +// when the serve config has been successfully set up. +func (kc *kubeClient) storeHTTPSEndpoint(ctx context.Context, ep string) error { + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyHTTPSEndpoint: []byte(ep), + }, + } + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") } // deleteAuthKey deletes the 'authkey' field of the given kube // secret. No-op if there is no authkey in the secret. -func deleteAuthKey(ctx context.Context, secretName string) error { +func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { // m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902. m := []kubeclient.JSONPatch{ { @@ -61,7 +98,7 @@ func deleteAuthKey(ctx context.Context, secretName string) error { Path: "/data/authkey", }, } - if err := kc.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil { + if err := kc.JSONPatchResource(ctx, kc.stateSecret, kubeclient.TypeSecrets, m); err != nil { if s, ok := err.(*kubeapi.Status); ok && s.Code == http.StatusUnprocessableEntity { // This is kubernetes-ese for "the field you asked to // delete already doesn't exist", aka no-op. @@ -72,22 +109,19 @@ func deleteAuthKey(ctx context.Context, secretName string) error { return nil } -var kc kubeclient.Client - -func initKubeClient(root string) { - if root != "/" { - // If we are running in a test, we need to set the root path to the fake - // service account directory. - kubeclient.SetRootPathForTesting(root) +// storeCapVerUID stores the current capability version of tailscale and, if provided, UID of the Pod in the tailscale +// state Secret. +// These two fields are used by the Kubernetes Operator to observe the current capability version of tailscaled running in this container. +func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error { + capVerS := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion) + d := map[string][]byte{ + kubetypes.KeyCapVer: []byte(capVerS), } - var err error - kc, err = kubeclient.New("tailscale-container") - if err != nil { - log.Fatalf("Error creating kube client: %v", err) + if podUID != "" { + d[kubetypes.KeyPodUID] = []byte(podUID) } - if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" { - // Derive the API server address from the environment variables - // Used to set http server in tests, or optionally enabled by flag - kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS"))) + s := &kubeapi.Secret{ + Data: d, } + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") } diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index 1a5730548..2ba69af7c 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -21,7 +21,7 @@ func TestSetupKube(t *testing.T) { cfg *settings wantErr bool wantCfg *settings - kc kubeclient.Client + kc *kubeClient }{ { name: "TS_AUTHKEY set, state Secret exists", @@ -29,14 +29,14 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return nil, nil }, - }, + }}, wantCfg: &settings{ AuthKey: "foo", KubeSecret: "foo", @@ -48,14 +48,14 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, true, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return nil, &kubeapi.Status{Code: 404} }, - }, + }}, wantCfg: &settings{ AuthKey: "foo", KubeSecret: "foo", @@ -67,14 +67,14 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return nil, &kubeapi.Status{Code: 404} }, - }, + }}, wantCfg: &settings{ AuthKey: "foo", KubeSecret: "foo", @@ -87,14 +87,14 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return nil, &kubeapi.Status{Code: 403} }, - }, + }}, wantCfg: &settings{ AuthKey: "foo", KubeSecret: "foo", @@ -111,11 +111,11 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, errors.New("broken") }, - }, + }}, wantErr: true, }, { @@ -127,14 +127,14 @@ func TestSetupKube(t *testing.T) { wantCfg: &settings{ KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, true, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return nil, &kubeapi.Status{Code: 404} }, - }, + }}, }, { // Interactive login using URL in Pod logs @@ -145,28 +145,28 @@ func TestSetupKube(t *testing.T) { wantCfg: &settings{ KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return &kubeapi.Secret{}, nil }, - }, + }}, }, { name: "TS_AUTHKEY not set, state Secret contains auth key, we do not have RBAC to patch it", cfg: &settings{ KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil }, - }, + }}, wantCfg: &settings{ KubeSecret: "foo", }, @@ -177,14 +177,14 @@ func TestSetupKube(t *testing.T) { cfg: &settings{ KubeSecret: "foo", }, - kc: &kubeclient.FakeClient{ + kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return true, false, nil }, GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil }, - }, + }}, wantCfg: &settings{ KubeSecret: "foo", AuthKey: "foo", @@ -194,9 +194,9 @@ func TestSetupKube(t *testing.T) { } for _, tt := range tests { - kc = tt.kc + kc := tt.kc t.Run(tt.name, func(t *testing.T) { - if err := tt.cfg.setupKube(context.Background()); (err != nil) != tt.wantErr { + if err := tt.cfg.setupKube(context.Background(), kc); (err != nil) != tt.wantErr { t.Errorf("settings.setupKube() error = %v, wantErr %v", err, tt.wantErr) } if diff := cmp.Diff(*tt.cfg, *tt.wantCfg); diff != "" { diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 0af9062a5..ad1c0db20 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -121,6 +121,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/ipn" kubeutils "tailscale.com/k8s-operator" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/ptr" @@ -167,9 +168,13 @@ func main() { bootCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() + var kc *kubeClient if cfg.InKubernetes { - initKubeClient(cfg.Root) - if err := cfg.setupKube(bootCtx); err != nil { + kc, err = newKubeClient(cfg.Root, cfg.KubeSecret) + if err != nil { + log.Fatalf("error initializing kube client: %v", err) + } + if err := cfg.setupKube(bootCtx, kc); err != nil { log.Fatalf("error setting up for running on Kubernetes: %v", err) } } @@ -319,12 +324,16 @@ authLoop: } } + // Remove any serve config and advertised HTTPS endpoint that may have been set by a previous run of + // containerboot, but only if we're providing a new one. if cfg.ServeConfigPath != "" { - // Remove any serve config that may have been set by a previous run of - // containerboot, but only if we're providing a new one. + log.Printf("serve proxy: unsetting previous config") if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { log.Fatalf("failed to unset serve config: %v", err) } + if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { + log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err) + } } if hasKubeStateStore(cfg) && isTwoStepConfigAuthOnce(cfg) { @@ -332,11 +341,17 @@ authLoop: // authkey is no longer needed. We don't strictly need to // wipe it, but it's good hygiene. log.Printf("Deleting authkey from kube secret") - if err := deleteAuthKey(ctx, cfg.KubeSecret); err != nil { + if err := kc.deleteAuthKey(ctx); err != nil { log.Fatalf("deleting authkey from kube secret: %v", err) } } + if hasKubeStateStore(cfg) { + if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil { + log.Fatalf("storing capability version and UID: %v", err) + } + } + w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) if err != nil { log.Fatalf("rewatching tailscaled for updates after auth: %v", err) @@ -355,10 +370,10 @@ authLoop: certDomain = new(atomic.Pointer[string]) certDomainChanged = make(chan bool, 1) + + triggerWatchServeConfigChanges sync.Once ) - if cfg.ServeConfigPath != "" { - go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client) - } + var nfr linuxfw.NetfilterRunner if isL3Proxy(cfg) { nfr, err = newNetfilterRunner(log.Printf) @@ -459,7 +474,7 @@ runLoop: // fails. deviceID := n.NetMap.SelfNode.StableID() if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceID, &deviceID) { - if err := storeDeviceID(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID()); err != nil { + if err := kc.storeDeviceID(ctx, n.NetMap.SelfNode.StableID()); err != nil { log.Fatalf("storing device ID in Kubernetes Secret: %v", err) } } @@ -532,8 +547,11 @@ runLoop: resetTimer(false) backendAddrs = newBackendAddrs } - if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 { - cd := n.NetMap.DNS.CertDomains[0] + if cfg.ServeConfigPath != "" { + cd := certDomainFromNetmap(n.NetMap) + if cd == "" { + cd = kubetypes.ValueNoHTTPS + } prev := certDomain.Swap(ptr.To(cd)) if prev == nil || *prev != cd { select { @@ -575,7 +593,7 @@ runLoop: // TODO (irbekrm): instead of using the IP and FQDN, have some other mechanism for the proxy signal that it is 'Ready'. deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()} if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceEndpoints, &deviceEndpoints) { - if err := storeDeviceEndpoints(ctx, cfg.KubeSecret, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil { + if err := kc.storeDeviceEndpoints(ctx, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil { log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err) } } @@ -583,6 +601,13 @@ runLoop: if healthCheck != nil { healthCheck.update(len(addrs) != 0) } + + if cfg.ServeConfigPath != "" { + triggerWatchServeConfigChanges.Do(func() { + go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client, kc) + }) + } + if egressSvcsNotify != nil { egressSvcsNotify <- n } diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 47d7c19cf..83e001b62 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -120,6 +120,8 @@ func TestContainerBoot(t *testing.T) { return fmt.Sprintf("http://127.0.0.1:%d/healthz", port) } + capver := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion) + type phase struct { // If non-nil, send this IPN bus notification (and remember it as the // initial update for any future new watchers, then wait for all the @@ -478,10 +480,11 @@ func TestContainerBoot(t *testing.T) { { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, }, }, }, @@ -571,9 +574,10 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", }, WantKubeSecret: map[string]string{ - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, }, }, }, @@ -600,10 +604,11 @@ func TestContainerBoot(t *testing.T) { { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, }, }, { @@ -618,10 +623,11 @@ func TestContainerBoot(t *testing.T) { }, }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "new-name.test.ts.net", - "device_id": "newID", - "device_ips": `["100.64.0.1"]`, + "authkey": "tskey-key", + "device_fqdn": "new-name.test.ts.net", + "device_id": "newID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, }, }, }, diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 6c22b3eeb..29ee7347f 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -19,6 +19,8 @@ import ( "github.com/fsnotify/fsnotify" "tailscale.com/client/tailscale" "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/netmap" ) // watchServeConfigChanges watches path for changes, and when it sees one, reads @@ -26,21 +28,21 @@ import ( // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // is written to when the certDomain changes, causing the serve config to be // re-read and applied. -func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient) { +func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient, kc *kubeClient) { if certDomainAtomic == nil { - panic("cd must not be nil") + panic("certDomainAtomic must not be nil") } var tickChan <-chan time.Time var eventChan <-chan fsnotify.Event if w, err := fsnotify.NewWatcher(); err != nil { - log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) + log.Printf("serve proxy: failed to create fsnotify watcher, timer-only mode: %v", err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() tickChan = ticker.C } else { defer w.Close() if err := w.Add(filepath.Dir(path)); err != nil { - log.Fatalf("failed to add fsnotify watch: %v", err) + log.Fatalf("serve proxy: failed to add fsnotify watch: %v", err) } eventChan = w.Events } @@ -59,24 +61,60 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan // k8s handles these mounts. So just re-read the file and apply it // if it's changed. } - if certDomain == "" { - continue - } sc, err := readServeConfig(path, certDomain) if err != nil { - log.Fatalf("failed to read serve config: %v", err) + log.Fatalf("serve proxy: failed to read serve config: %v", err) } if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { continue } - log.Printf("Applying serve config") - if err := lc.SetServeConfig(ctx, sc); err != nil { - log.Fatalf("failed to set serve config: %v", err) + validateHTTPSServe(certDomain, sc) + if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil { + log.Fatalf("serve proxy: error updating serve config: %v", err) + } + if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { + log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) } prevServeConfig = sc } } +func certDomainFromNetmap(nm *netmap.NetworkMap) string { + if len(nm.DNS.CertDomains) == 0 { + return "" + } + return nm.DNS.CertDomains[0] +} + +func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc *tailscale.LocalClient) error { + // TODO(irbekrm): This means that serve config that does not expose HTTPS endpoint will not be set for a tailnet + // that does not have HTTPS enabled. We probably want to fix this. + if certDomain == kubetypes.ValueNoHTTPS { + return nil + } + log.Printf("serve proxy: applying serve config") + return lc.SetServeConfig(ctx, sc) +} + +func validateHTTPSServe(certDomain string, sc *ipn.ServeConfig) { + if certDomain != kubetypes.ValueNoHTTPS || !hasHTTPSEndpoint(sc) { + return + } + log.Printf( + `serve proxy: this node is configured as a proxy that exposes an HTTPS endpoint to tailnet, + (perhaps a Kubernetes operator Ingress proxy) but it is not able to issue TLS certs, so this will likely not work. + To make it work, ensure that HTTPS is enabled for your tailnet, see https://tailscale.com/kb/1153/enabling-https for more details.`) +} + +func hasHTTPSEndpoint(cfg *ipn.ServeConfig) bool { + for _, tcpCfg := range cfg.TCP { + if tcpCfg.HTTPS { + return true + } + } + return false +} + // readServeConfig reads the ipn.ServeConfig from path, replacing // ${TS_CERT_DOMAIN} with certDomain. func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 1262a0e18..4fae58584 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -67,6 +67,7 @@ type settings struct { PodIP string PodIPv4 string PodIPv6 string + PodUID string HealthCheckAddrPort string LocalAddrPort string MetricsEnabled bool @@ -107,6 +108,7 @@ func configFromEnv() (*settings, error) { HealthCheckEnabled: defaultBool("TS_ENABLE_HEALTH_CHECK", false), DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""), EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), + PodUID: defaultEnv("POD_UID", ""), } podIPs, ok := os.LookupEnv("POD_IPS") if ok { @@ -203,7 +205,7 @@ func (s *settings) validate() error { // setupKube is responsible for doing any necessary configuration and checks to // ensure that tailscale state storage and authentication mechanism will work on // Kubernetes. -func (cfg *settings) setupKube(ctx context.Context) error { +func (cfg *settings) setupKube(ctx context.Context, kc *kubeClient) error { if cfg.KubeSecret == "" { return nil } diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index dfeee6be1..1cce02fbb 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -234,21 +234,21 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge return err } - _, tsHost, ips, err := a.ssr.DeviceInfo(ctx, crl) + dev, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return err } - if tsHost == "" { - logger.Debugf("no Tailscale hostname known yet, waiting for connector pod to finish auth") + if dev == nil || dev.hostname == "" { + logger.Debugf("no Tailscale hostname known yet, waiting for Connector Pod to finish auth") // No hostname yet. Wait for the connector pod to auth. cn.Status.TailnetIPs = nil cn.Status.Hostname = "" return nil } - cn.Status.TailnetIPs = ips - cn.Status.Hostname = tsHost + cn.Status.TailnetIPs = dev.ips + cn.Status.Hostname = dev.hostname return nil } diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 40a5d0928..749869b22 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -279,12 +279,12 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return fmt.Errorf("failed to provision: %w", err) } - _, tsHost, _, err := a.ssr.DeviceInfo(ctx, crl) + dev, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { - return fmt.Errorf("failed to get device ID: %w", err) + return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err) } - if tsHost == "" { - logger.Debugf("no Tailscale hostname known yet, waiting for proxy pod to finish auth") + if dev == nil || dev.ingressDNSName == "" { + logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress") // No hostname yet. Wait for the proxy pod to auth. ing.Status.LoadBalancer.Ingress = nil if err := a.Status().Update(ctx, ing); err != nil { @@ -293,10 +293,10 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - logger.Debugf("setting ingress hostname to %q", tsHost) + logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ { - Hostname: tsHost, + Hostname: dev.ingressDNSName, Ports: []networkingv1.IngressPortStatus{ { Protocol: "TCP", diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index e695cc649..c4332908a 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -142,6 +142,154 @@ func TestTailscaleIngress(t *testing.T) { expectMissing[corev1.Secret](t, fc, "operator-ns", fullName) } +func TestTailscaleIngressHostname(t *testing.T) { + tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} + fc := fake.NewFakeClient(tsIngressClass) + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + + // 1. Resources get created for regular Ingress + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"default-test"}}, + }, + }, + } + mustCreate(t, fc, ing) + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Ports: []corev1.ServicePort{{ + Port: 8080, + Name: "http"}, + }, + }, + }) + + expectReconciled(t, ingR, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + mustCreate(t, fc, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fullName, + Namespace: "operator-ns", + UID: "test-uid", + }, + }) + opts := configOpts{ + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "ingress", + hostname: "default-test", + app: kubetypes.AppIngressResource, + } + serveConfig := &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, + } + opts.serveConfig = serveConfig + + expectEqual(t, fc, expectedSecret(t, fc, opts), nil) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + + // 2. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint set + mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "tailscale_capver", []byte("110")) + mak.Set(&secret.Data, "pod_uid", []byte("test-uid")) + mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) + }) + expectReconciled(t, ingR, "default", "test") + ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer") + + expectEqual(t, fc, ing, nil) + + // 3. Ingress proxy with capability version >= 110 advertises HTTPS endpoint + mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "tailscale_capver", []byte("110")) + mak.Set(&secret.Data, "pod_uid", []byte("test-uid")) + mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) + mak.Set(&secret.Data, "https_endpoint", []byte("foo.tailnetxyz.ts.net")) + }) + expectReconciled(t, ingR, "default", "test") + ing.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {Hostname: "foo.tailnetxyz.ts.net", Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}}, + }, + } + expectEqual(t, fc, ing, nil) + + // 4. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint ready + mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "tailscale_capver", []byte("110")) + mak.Set(&secret.Data, "pod_uid", []byte("test-uid")) + mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) + mak.Set(&secret.Data, "https_endpoint", []byte("no-https")) + }) + expectReconciled(t, ingR, "default", "test") + ing.Status.LoadBalancer.Ingress = nil + expectEqual(t, fc, ing, nil) + + // 5. Ingress proxy's state has https_endpoints set, but its capver is not matching Pod UID (downgrade) + mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "tailscale_capver", []byte("110")) + mak.Set(&secret.Data, "pod_uid", []byte("not-the-right-uid")) + mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) + mak.Set(&secret.Data, "https_endpoint", []byte("bar.tailnetxyz.ts.net")) + }) + ing.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {Hostname: "foo.tailnetxyz.ts.net", Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}}, + }, + } + expectReconciled(t, ingR, "default", "test") + expectEqual(t, fc, ing, nil) +} + func TestTailscaleIngressWithProxyClass(t *testing.T) { // Setup pc := &tsapi.ProxyClass{ diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 5de30154c..ff7c074a8 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -15,6 +15,7 @@ import ( "net/http" "os" "slices" + "strconv" "strings" "go.uber.org/zap" @@ -197,11 +198,11 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretName, tsConfigHash, configs, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretName, tsConfigHash, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash, configs) + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -246,21 +247,21 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare return false, nil } - id, _, _, err := a.DeviceInfo(ctx, labels) + dev, err := a.DeviceInfo(ctx, labels, logger) if err != nil { return false, fmt.Errorf("getting device info: %w", err) } - if id != "" { - logger.Debugf("deleting device %s from control", string(id)) - if err := a.tsClient.DeleteDevice(ctx, string(id)); err != nil { + if dev != nil && dev.id != "" { + logger.Debugf("deleting device %s from control", string(dev.id)) + if err := a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) } else { return false, fmt.Errorf("deleting device: %w", err) } } else { - logger.Debugf("device %s deleted from control", string(id)) + logger.Debugf("device %s deleted from control", string(dev.id)) } } @@ -440,40 +441,66 @@ func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { // that acts as an operator proxy. It retrieves info from a Kubernetes Secret // labeled with the provided labels. // Either of device ID, hostname and IPs can be empty string if not found in the Secret. -func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string) (id tailcfg.StableNodeID, hostname string, ips []string, err error) { +func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) { sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels) if err != nil { - return "", "", nil, err + return dev, err } if sec == nil { - return "", "", nil, nil + return dev, nil + } + pod := new(corev1.Pod) + if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) { + return dev, nil } - return deviceInfo(sec) + return deviceInfo(sec, pod, logger) +} + +// device contains tailscale state of a proxy device as gathered from its tailscale state Secret. +type device struct { + id tailcfg.StableNodeID // device's stable ID + hostname string // MagicDNS name of the device + ips []string // Tailscale IPs of the device + // ingressDNSName is the L7 Ingress DNS name. In practice this will be the same value as hostname, but only set + // when the device has been configured to serve traffic on it via 'tailscale serve'. + ingressDNSName string } -func deviceInfo(sec *corev1.Secret) (id tailcfg.StableNodeID, hostname string, ips []string, err error) { - id = tailcfg.StableNodeID(sec.Data["device_id"]) +func deviceInfo(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) (dev *device, err error) { + id := tailcfg.StableNodeID(sec.Data[kubetypes.KeyDeviceID]) if id == "" { - return "", "", nil, nil + return dev, nil } + dev = &device{id: id} // Kubernetes chokes on well-formed FQDNs with the trailing dot, so we have // to remove it. - hostname = strings.TrimSuffix(string(sec.Data["device_fqdn"]), ".") - if hostname == "" { + dev.hostname = strings.TrimSuffix(string(sec.Data[kubetypes.KeyDeviceFQDN]), ".") + if dev.hostname == "" { // Device ID gets stored and retrieved in a different flow than // FQDN and IPs. A device that acts as Kubernetes operator - // proxy, but whose route setup has failed might have an device + // proxy, but whose route setup has failed might have a device // ID, but no FQDN/IPs. If so, return the ID, to allow the // operator to clean up such devices. - return id, "", nil, nil + return dev, nil + } + // TODO(irbekrm): we fall back to using the hostname field to determine Ingress's hostname to ensure backwards + // compatibility. In 1.82 we can remove this fallback mechanism. + dev.ingressDNSName = dev.hostname + if proxyCapVer(sec, pod, log) >= 109 { + dev.ingressDNSName = strings.TrimSuffix(string(sec.Data[kubetypes.KeyHTTPSEndpoint]), ".") + if strings.EqualFold(dev.ingressDNSName, kubetypes.ValueNoHTTPS) { + dev.ingressDNSName = "" + } } - if rawDeviceIPs, ok := sec.Data["device_ips"]; ok { + if rawDeviceIPs, ok := sec.Data[kubetypes.KeyDeviceIPs]; ok { + ips := make([]string, 0) if err := json.Unmarshal(rawDeviceIPs, &ips); err != nil { - return "", "", nil, err + return nil, err } + dev.ips = ips } - return id, hostname, ips, nil + return dev, nil } func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) { @@ -500,7 +527,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, _ map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -1084,3 +1111,23 @@ func nameForService(svc *corev1.Service) string { func isValidFirewallMode(m string) bool { return m == "auto" || m == "nftables" || m == "iptables" } + +// proxyCapVer accepts a proxy state Secret and a proxy Pod returns the capability version of a proxy Pod. +// This is best effort - if the capability version can not (currently) be determined, it returns -1. +func proxyCapVer(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) tailcfg.CapabilityVersion { + if sec == nil || pod == nil { + return tailcfg.CapabilityVersion(-1) + } + if len(sec.Data[kubetypes.KeyCapVer]) == 0 || len(sec.Data[kubetypes.KeyPodUID]) == 0 { + return tailcfg.CapabilityVersion(-1) + } + capVer, err := strconv.Atoi(string(sec.Data[kubetypes.KeyCapVer])) + if err != nil { + log.Infof("[unexpected]: unexpected capability version in proxy's state Secret, expected an integer, got %q", string(sec.Data[kubetypes.KeyCapVer])) + return tailcfg.CapabilityVersion(-1) + } + if !strings.EqualFold(string(pod.ObjectMeta.UID), string(sec.Data[kubetypes.KeyPodUID])) { + return tailcfg.CapabilityVersion(-1) + } + return tailcfg.CapabilityVersion(capVer) +} diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index cbf50c81f..314ac2398 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -320,11 +320,11 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - _, tsHost, tsIPs, err := a.ssr.DeviceInfo(ctx, crl) + dev, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to get device ID: %w", err) } - if tsHost == "" { + if dev == nil || dev.hostname == "" { msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth" logger.Debug(msg) // No hostname yet. Wait for the proxy pod to auth. @@ -333,9 +333,9 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - logger.Debugf("setting Service LoadBalancer status to %q, %s", tsHost, strings.Join(tsIPs, ", ")) + logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", ")) ingress := []corev1.LoadBalancerIngress{ - {Hostname: tsHost}, + {Hostname: dev.hostname}, } clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP) if err != nil { @@ -343,7 +343,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger) return errors.New(msg) } - for _, ip := range tsIPs { + for _, ip := range dev.ips { addr, err := netip.ParseAddr(ip) if err != nil { continue diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/types.go similarity index 59% rename from kube/kubetypes/metrics.go rename to kube/kubetypes/types.go index 63325182d..3c97d8c7d 100644 --- a/kube/kubetypes/metrics.go +++ b/kube/kubetypes/types.go @@ -27,4 +27,19 @@ const ( MetricEgressServiceCount = "k8s_egress_service_resources" MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources" MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources" + + // Keys that containerboot writes to state file that can be used to determine its state. + // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine + // the state of this tailscale device. + KeyDeviceID string = "device_id" // node stable ID of the device + KeyDeviceFQDN string = "device_fqdn" // device's tailnet hostname + KeyDeviceIPs string = "device_ips" // device's tailnet IPs + KeyPodUID string = "pod_uid" // Pod UID + // KeyCapVer contains Tailscale capability version of this proxy instance. + KeyCapVer string = "tailscale_capver" + // KeyHTTPSEndpoint is a name of a field that can be set to the value of any HTTPS endpoint currently exposed by + // this device to the tailnet. This is used by the Kubernetes operator Ingress proxy to communicate to the operator + // that cluster workloads behind the Ingress can now be accessed via the given DNS name over HTTPS. + KeyHTTPSEndpoint string = "https_endpoint" + ValueNoHTTPS string = "no-https" ) From 74069774bee3aeb52637a58587ddfb0369f69676 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Dec 2024 08:41:37 -0800 Subject: [PATCH 0204/1708] net/tstun: remove tailscaled_outbound_dropped_packets_total reason=acl metric for now Updates #14280 Change-Id: Idff102b3d7650fc9dfbe0c340168806bdf542d76 Signed-off-by: Brad Fitzpatrick --- net/tstun/wrap.go | 7 ++++--- net/tstun/wrap_test.go | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index c384abf9d..deb8bc094 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -876,9 +876,10 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf if filt.RunOut(p, t.filterFlags) != filter.Accept { metricPacketOutDropFilter.Add(1) - t.metrics.outboundDroppedPacketsTotal.Add(usermetric.DropLabels{ - Reason: usermetric.ReasonACL, - }, 1) + // TODO(#14280): increment a t.metrics.outboundDroppedPacketsTotal here + // once we figure out & document what labels to use for multicast, + // link-local-unicast, IP fragments, etc. But they're not + // usermetric.ReasonACL. return filter.Drop, gro } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 9ebedda83..a3dfe7d86 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -453,7 +453,7 @@ func TestFilter(t *testing.T) { assertMetricPackets(t, "inACL", 3, metricInboundDroppedPacketsACL) assertMetricPackets(t, "inError", 0, metricInboundDroppedPacketsErr) - assertMetricPackets(t, "outACL", 1, metricOutboundDroppedPacketsACL) + assertMetricPackets(t, "outACL", 0, metricOutboundDroppedPacketsACL) } func assertMetricPackets(t *testing.T, metricName string, want, got int64) { From 7f9ebc0a83f82922787f4e8336b9f626d895a08c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 4 Dec 2024 12:02:59 -0800 Subject: [PATCH 0205/1708] cmd/tailscale,net/netcheck: add debug feature to force preferred DERP This provides an interface for a user to force a preferred DERP outcome for all future netchecks that will take precedence unless the forced region is unreachable. The option does not persist and will be lost when the daemon restarts. Updates tailscale/corp#18997 Updates tailscale/corp#24755 Signed-off-by: James Tucker --- client/tailscale/localclient.go | 11 +++++++ cmd/tailscale/cli/debug.go | 25 +++++++++++++++ ipn/ipnlocal/local.go | 6 ++++ ipn/localapi/localapi.go | 7 +++++ net/netcheck/netcheck.go | 28 +++++++++++++++++ net/netcheck/netcheck_test.go | 56 ++++++++++++++++++++++++++++++++- wgengine/magicsock/magicsock.go | 8 +++++ 7 files changed, 140 insertions(+), 1 deletion(-) diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 5eb668176..34c094a63 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -493,6 +493,17 @@ func (lc *LocalClient) DebugAction(ctx context.Context, action string) error { return nil } +// DebugActionBody invokes a debug action with a body parameter, such as +// "debug-force-prefer-derp". +// These are development tools and subject to change or removal over time. +func (lc *LocalClient) DebugActionBody(ctx context.Context, action string, rbody io.Reader) error { + body, err := lc.send(ctx, "POST", "/localapi/v0/debug?action="+url.QueryEscape(action), 200, rbody) + if err != nil { + return fmt.Errorf("error %w: %s", err, body) + } + return nil +} + // DebugResultJSON invokes a debug action and returns its result as something JSON-able. // These are development tools and subject to change or removal over time. func (lc *LocalClient) DebugResultJSON(ctx context.Context, action string) (any, error) { diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 78bd708e5..04b343e76 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -175,6 +175,12 @@ var debugCmd = &ffcli.Command{ Exec: localAPIAction("pick-new-derp"), ShortHelp: "Switch to some other random DERP home region for a short time", }, + { + Name: "force-prefer-derp", + ShortUsage: "tailscale debug force-prefer-derp", + Exec: forcePreferDERP, + ShortHelp: "Prefer the given region ID if reachable (until restart, or 0 to clear)", + }, { Name: "force-netmap-update", ShortUsage: "tailscale debug force-netmap-update", @@ -577,6 +583,25 @@ func runDERPMap(ctx context.Context, args []string) error { return nil } +func forcePreferDERP(ctx context.Context, args []string) error { + var n int + if len(args) != 1 { + return errors.New("expected exactly one integer argument") + } + n, err := strconv.Atoi(args[0]) + if err != nil { + return fmt.Errorf("expected exactly one integer argument: %w", err) + } + b, err := json.Marshal(n) + if err != nil { + return fmt.Errorf("failed to marshal DERP region: %w", err) + } + if err := localClient.DebugActionBody(ctx, "force-prefer-derp", bytes.NewReader(b)); err != nil { + return fmt.Errorf("failed to force preferred DERP: %w", err) + } + return nil +} + func localAPIAction(action string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { if len(args) > 0 { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 278614c0b..f456d4984 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2920,6 +2920,12 @@ func (b *LocalBackend) DebugPickNewDERP() error { return b.sys.MagicSock.Get().DebugPickNewDERP() } +// DebugForcePreferDERP forwards to netcheck.DebugForcePreferDERP. +// See its docs. +func (b *LocalBackend) DebugForcePreferDERP(n int) { + b.sys.MagicSock.Get().DebugForcePreferDERP(n) +} + // send delivers n to the connected frontend and any API watchers from // LocalBackend.WatchNotifications (via the LocalAPI). // diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ea931b028..c14a4bdf2 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -634,6 +634,13 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { } case "pick-new-derp": err = h.b.DebugPickNewDERP() + case "force-prefer-derp": + var n int + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugForcePreferDERP(n) case "": err = fmt.Errorf("missing parameter 'action'") default: diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 0bb930568..d8f5e1d49 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -236,6 +236,10 @@ type Client struct { // If false, the default net.Resolver will be used, with no caching. UseDNSCache bool + // if non-zero, force this DERP region to be preferred in all reports where + // the DERP is found to be reachable. + ForcePreferredDERP int + // For tests testEnoughRegions int testCaptivePortalDelay time.Duration @@ -780,6 +784,12 @@ func (o *GetReportOpts) getLastDERPActivity(region int) time.Time { return o.GetLastDERPActivity(region) } +func (c *Client) SetForcePreferredDERP(region int) { + c.mu.Lock() + defer c.mu.Unlock() + c.ForcePreferredDERP = region +} + // GetReport gets a report. The 'opts' argument is optional and can be nil. // Callers are discouraged from passing a ctx with an arbitrary deadline as this // may cause GetReport to return prematurely before all reporting methods have @@ -1277,6 +1287,9 @@ func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) { if r.CaptivePortal != "" { fmt.Fprintf(w, " captiveportal=%v", r.CaptivePortal) } + if c.ForcePreferredDERP != 0 { + fmt.Fprintf(w, " force=%v", c.ForcePreferredDERP) + } fmt.Fprintf(w, " derp=%v", r.PreferredDERP) if r.PreferredDERP != 0 { fmt.Fprintf(w, " derpdist=") @@ -1435,6 +1448,21 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report, // which undoes any region change we made above. r.PreferredDERP = prevDERP } + if c.ForcePreferredDERP != 0 { + // If the forced DERP region probed successfully, or has recent traffic, + // use it. + _, haveLatencySample := r.RegionLatency[c.ForcePreferredDERP] + var recentActivity bool + if lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP); !lastHeard.IsZero() { + now := c.timeNow() + recentActivity = lastHeard.After(rs.start) + recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime)) + } + + if haveLatencySample || recentActivity { + r.PreferredDERP = c.ForcePreferredDERP + } + } } func updateLatency(m map[int]time.Duration, regionID int, d time.Duration) { diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 23891efcc..88c19623d 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -201,6 +201,7 @@ func TestAddReportHistoryAndSetPreferredDERP(t *testing.T) { steps []step homeParams *tailcfg.DERPHomeParams opts *GetReportOpts + forcedDERP int // if non-zero, force this DERP to be the preferred one wantDERP int // want PreferredDERP on final step wantPrevLen int // wanted len(c.prev) }{ @@ -366,12 +367,65 @@ func TestAddReportHistoryAndSetPreferredDERP(t *testing.T) { wantPrevLen: 2, wantDERP: 1, // diff is 11ms, but d2 is greater than 2/3s of d1 }, + { + name: "forced_two", + steps: []step{ + {time.Second, report("d1", 2, "d2", 3)}, + {2 * time.Second, report("d1", 4, "d2", 3)}, + }, + forcedDERP: 2, + wantPrevLen: 2, + wantDERP: 2, + }, + { + name: "forced_two_unavailable", + steps: []step{ + {time.Second, report("d1", 2, "d2", 1)}, + {2 * time.Second, report("d1", 4)}, + }, + forcedDERP: 2, + wantPrevLen: 2, + wantDERP: 1, + }, + { + name: "forced_two_no_probe_recent_activity", + steps: []step{ + {time.Second, report("d1", 2)}, + {2 * time.Second, report("d1", 4)}, + }, + opts: &GetReportOpts{ + GetLastDERPActivity: mkLDAFunc(map[int]time.Time{ + 1: startTime, + 2: startTime.Add(time.Second), + }), + }, + forcedDERP: 2, + wantPrevLen: 2, + wantDERP: 2, + }, + { + name: "forced_two_no_probe_no_recent_activity", + steps: []step{ + {time.Second, report("d1", 2)}, + {PreferredDERPFrameTime + time.Second, report("d1", 4)}, + }, + opts: &GetReportOpts{ + GetLastDERPActivity: mkLDAFunc(map[int]time.Time{ + 1: startTime, + 2: startTime, + }), + }, + forcedDERP: 2, + wantPrevLen: 2, + wantDERP: 1, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fakeTime := startTime c := &Client{ - TimeNow: func() time.Time { return fakeTime }, + TimeNow: func() time.Time { return fakeTime }, + ForcePreferredDERP: tt.forcedDERP, } dm := &tailcfg.DERPMap{HomeParams: tt.homeParams} rs := &reportState{ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 805716e61..bff905caa 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3013,6 +3013,14 @@ func (c *Conn) DebugPickNewDERP() error { return errors.New("too few regions") } +func (c *Conn) DebugForcePreferDERP(n int) { + c.mu.Lock() + defer c.mu.Unlock() + + c.logf("magicsock: [debug] force preferred DERP set to: %d", n) + c.netChecker.SetForcePreferredDERP(n) +} + // portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize, // logging an error if it occurs. func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { From df94a1487076f744742d5b5c3a234d628bfd2bb5 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 5 Dec 2024 12:11:22 +0000 Subject: [PATCH 0206/1708] cmd/k8s-operator: don't error for transient failures (#14073) Every so often, the ProxyGroup and other controllers lose an optimistic locking race with other controllers that update the objects they create. Stop treating this as an error event, and instead just log an info level log line for it. Fixes #14072 Signed-off-by: Tom Proctor --- cmd/k8s-operator/connector.go | 17 +++++++++++++---- cmd/k8s-operator/dnsrecords.go | 11 ++++++++++- cmd/k8s-operator/egress-services.go | 10 +++++++++- cmd/k8s-operator/ingress.go | 10 +++++++++- cmd/k8s-operator/nameserver.go | 8 +++++++- cmd/k8s-operator/proxygroup.go | 18 +++++++++++++++--- cmd/k8s-operator/svc.go | 10 +++++++++- cmd/k8s-operator/tsrecorder.go | 17 ++++++++++++----- 8 files changed, 84 insertions(+), 17 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 1cce02fbb..c243036cb 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -10,6 +10,7 @@ import ( "fmt" "net/netip" "slices" + "strings" "sync" "time" @@ -35,6 +36,7 @@ import ( const ( reasonConnectorCreationFailed = "ConnectorCreationFailed" + reasonConnectorCreating = "ConnectorCreating" reasonConnectorCreated = "ConnectorCreated" reasonConnectorInvalid = "ConnectorInvalid" @@ -134,17 +136,24 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque } if err := a.validate(cn); err != nil { - logger.Errorf("error validating Connector spec: %w", err) message := fmt.Sprintf(messageConnectorInvalid, err) a.recorder.Eventf(cn, corev1.EventTypeWarning, reasonConnectorInvalid, message) return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionFalse, reasonConnectorInvalid, message) } if err = a.maybeProvisionConnector(ctx, logger, cn); err != nil { - logger.Errorf("error creating Connector resources: %w", err) + reason := reasonConnectorCreationFailed message := fmt.Sprintf(messageConnectorCreationFailed, err) - a.recorder.Eventf(cn, corev1.EventTypeWarning, reasonConnectorCreationFailed, message) - return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionFalse, reasonConnectorCreationFailed, message) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + reason = reasonConnectorCreating + message = fmt.Sprintf("optimistic lock error, retrying: %s", err) + err = nil + logger.Info(message) + } else { + a.recorder.Eventf(cn, corev1.EventTypeWarning, reason, message) + } + + return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionFalse, reason, message) } logger.Info("Connector resources synced") diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index bba87bf25..f91dd49ec 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "slices" + "strings" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -98,7 +99,15 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. return reconcile.Result{}, nil } - return reconcile.Result{}, dnsRR.maybeProvision(ctx, headlessSvc, logger) + if err := dnsRR.maybeProvision(ctx, headlessSvc, logger); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + } else { + return reconcile.Result{}, err + } + } + + return reconcile.Result{}, nil } // maybeProvision ensures that dnsrecords ConfigMap contains a record for the diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index a08c0b715..7544376fb 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -156,7 +156,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re return res, err } - return res, esr.maybeProvision(ctx, svc, l) + if err := esr.maybeProvision(ctx, svc, l); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + l.Infof("optimistic lock error, retrying: %s", err) + } else { + return reconcile.Result{}, err + } + } + + return res, nil } func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 749869b22..3eb47dfb0 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -76,7 +76,15 @@ func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{}, a.maybeCleanup(ctx, logger, ing) } - return reconcile.Result{}, a.maybeProvision(ctx, logger, ing) + if err := a.maybeProvision(ctx, logger, ing); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + } else { + return reconcile.Result{}, err + } + } + + return reconcile.Result{}, nil } func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error { diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 6a9a6be93..ef0762a12 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -9,6 +9,7 @@ import ( "context" "fmt" "slices" + "strings" "sync" _ "embed" @@ -131,7 +132,12 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ } } if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { - return reconcile.Result{}, fmt.Errorf("error provisioning nameserver resources: %w", err) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } else { + return reconcile.Result{}, fmt.Errorf("error provisioning nameserver resources: %w", err) + } } a.mu.Lock() diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 344cd9ae0..39b7ccc01 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -12,6 +12,7 @@ import ( "fmt" "net/http" "slices" + "strings" "sync" "github.com/pkg/errors" @@ -45,6 +46,9 @@ const ( reasonProxyGroupReady = "ProxyGroupReady" reasonProxyGroupCreating = "ProxyGroupCreating" reasonProxyGroupInvalid = "ProxyGroupInvalid" + + // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c + optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" ) var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) @@ -166,9 +170,17 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } if err = r.maybeProvision(ctx, pg, proxyClass); err != nil { - err = fmt.Errorf("error provisioning ProxyGroup resources: %w", err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + reason = reasonProxyGroupCreating + msg = fmt.Sprintf("optimistic lock error, retrying: %s", err) + err = nil + logger.Info(msg) + } else { + r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) + } + return setStatusReady(pg, metav1.ConditionFalse, reason, msg) } desiredReplicas := int(pgReplicas(pg)) diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 314ac2398..70c810b25 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -121,7 +121,15 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc) } - return reconcile.Result{}, a.maybeProvision(ctx, logger, svc) + if err := a.maybeProvision(ctx, logger, svc); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + } else { + return reconcile.Result{}, err + } + } + + return reconcile.Result{}, nil } // maybeCleanup removes any existing resources related to serving svc over tailscale. diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 4445578a6..44ce731fe 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -11,6 +11,7 @@ import ( "fmt" "net/http" "slices" + "strings" "sync" "github.com/pkg/errors" @@ -38,6 +39,7 @@ import ( const ( reasonRecorderCreationFailed = "RecorderCreationFailed" + reasonRecorderCreating = "RecorderCreating" reasonRecorderCreated = "RecorderCreated" reasonRecorderInvalid = "RecorderInvalid" @@ -119,23 +121,28 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques logger.Infof("ensuring Recorder is set up") tsr.Finalizers = append(tsr.Finalizers, FinalizerName) if err := r.Update(ctx, tsr); err != nil { - logger.Errorf("error adding finalizer: %w", err) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed) } } if err := r.validate(tsr); err != nil { - logger.Errorf("error validating Recorder spec: %w", err) message := fmt.Sprintf("Recorder is invalid: %s", err) r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) } if err = r.maybeProvision(ctx, tsr); err != nil { - logger.Errorf("error creating Recorder resources: %w", err) + reason := reasonRecorderCreationFailed message := fmt.Sprintf("failed creating Recorder: %s", err) - r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message) - return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, message) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + reason = reasonRecorderCreating + message = fmt.Sprintf("optimistic lock error, retrying: %s", err) + err = nil + logger.Info(message) + } else { + r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message) + } + return setStatusReady(tsr, metav1.ConditionFalse, reason, message) } logger.Info("Recorder resources synced") From 614c6126435f2f63090586a1a5835379f5d77874 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 5 Dec 2024 13:21:03 +0000 Subject: [PATCH 0207/1708] net/netcheck: preserve STUN port defaulting to 3478 (#14289) Updates tailscale/tailscale#14287 Signed-off-by: Irbe Krumina --- net/netcheck/netcheck.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index d8f5e1d49..7930f88f6 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1570,6 +1570,9 @@ func (c *Client) nodeAddrPort(ctx context.Context, n *tailcfg.DERPNode, port int if port < 0 || port > 1<<16-1 { return zero, false } + if port == 0 { + port = 3478 + } if n.STUNTestIP != "" { ip, err := netip.ParseAddr(n.STUNTestIP) if err != nil { From 87546a5edf6b6503a87eeb2d666baba57398a066 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 5 Dec 2024 09:40:40 -0800 Subject: [PATCH 0208/1708] cmd/derper: allow absent SNI when using manual certs and IP literal for hostname Updates #11776 Change-Id: I81756415feb630da093833accc3074903ebd84a7 Signed-off-by: Brad Fitzpatrick --- cmd/derper/cert.go | 14 ++++-- cmd/derper/cert_test.go | 97 +++++++++++++++++++++++++++++++++++++++ cmd/derper/derper.go | 2 +- cmd/derper/derper_test.go | 2 - 4 files changed, 108 insertions(+), 7 deletions(-) create mode 100644 cmd/derper/cert_test.go diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index db84aa515..623fa376f 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -8,6 +8,7 @@ import ( "crypto/x509" "errors" "fmt" + "net" "net/http" "path/filepath" "regexp" @@ -53,8 +54,9 @@ func certProviderByCertMode(mode, dir, hostname string) (certProvider, error) { } type manualCertManager struct { - cert *tls.Certificate - hostname string + cert *tls.Certificate + hostname string // hostname or IP address of server + noHostname bool // whether hostname is an IP address } // NewManualCertManager returns a cert provider which read certificate by given hostname on create. @@ -74,7 +76,11 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) { if err := x509Cert.VerifyHostname(hostname); err != nil { return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err) } - return &manualCertManager{cert: &cert, hostname: hostname}, nil + return &manualCertManager{ + cert: &cert, + hostname: hostname, + noHostname: net.ParseIP(hostname) != nil, + }, nil } func (m *manualCertManager) TLSConfig() *tls.Config { @@ -88,7 +94,7 @@ func (m *manualCertManager) TLSConfig() *tls.Config { } func (m *manualCertManager) getCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if hi.ServerName != m.hostname { + if hi.ServerName != m.hostname && !m.noHostname { return nil, fmt.Errorf("cert mismatch with hostname: %q", hi.ServerName) } diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go new file mode 100644 index 000000000..a379e5c04 --- /dev/null +++ b/cmd/derper/cert_test.go @@ -0,0 +1,97 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "os" + "path/filepath" + "testing" + "time" +) + +// Verify that in --certmode=manual mode, we can use a bare IP address +// as the --hostname and that GetCertificate will return it. +func TestCertIP(t *testing.T) { + dir := t.TempDir() + const hostname = "1.2.3.4" + + priv, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + t.Fatal(err) + } + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + t.Fatal(err) + } + ip := net.ParseIP(hostname) + if ip == nil { + t.Fatalf("invalid IP address %q", hostname) + } + template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Tailscale Test Corp"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(30 * 24 * time.Hour), + + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IPAddresses: []net.IP{ip}, + } + derBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv) + if err != nil { + t.Fatal(err) + } + certOut, err := os.Create(filepath.Join(dir, hostname+".crt")) + if err != nil { + t.Fatal(err) + } + if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + t.Fatalf("Failed to write data to cert.pem: %v", err) + } + if err := certOut.Close(); err != nil { + t.Fatalf("Error closing cert.pem: %v", err) + } + + keyOut, err := os.OpenFile(filepath.Join(dir, hostname+".key"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + t.Fatal(err) + } + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + t.Fatalf("Unable to marshal private key: %v", err) + } + if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { + t.Fatalf("Failed to write data to key.pem: %v", err) + } + if err := keyOut.Close(); err != nil { + t.Fatalf("Error closing key.pem: %v", err) + } + + cp, err := certProviderByCertMode("manual", dir, hostname) + if err != nil { + t.Fatal(err) + } + back, err := cp.TLSConfig().GetCertificate(&tls.ClientHelloInfo{ + ServerName: "", // no SNI + }) + if err != nil { + t.Fatalf("GetCertificate: %v", err) + } + if back == nil { + t.Fatalf("GetCertificate returned nil") + } +} diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 51be3abbe..6e24e0ab1 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -58,7 +58,7 @@ var ( configPath = flag.String("c", "", "config file path") certMode = flag.String("certmode", "letsencrypt", "mode for getting a cert. possible options: manual, letsencrypt") certDir = flag.String("certdir", tsweb.DefaultCertDir("derper-certs"), "directory to store LetsEncrypt certs, if addr's port is :443") - hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443") + hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.") runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 08d2e9cbf..6dce1fcdf 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -6,7 +6,6 @@ package main import ( "bytes" "context" - "fmt" "net/http" "net/http/httptest" "strings" @@ -138,5 +137,4 @@ func TestTemplate(t *testing.T) { if !strings.Contains(str, "Debug info") { t.Error("Output is missing debug info") } - fmt.Println(buf.String()) } From b37a478cacf0a3d92ec5a63a38b10dc24c22cc33 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 5 Dec 2024 14:00:12 -0800 Subject: [PATCH 0209/1708] go.mod: bump x/net and dependencies Pulling in upstream fix for #14201. Updates #14201 Signed-off-by: James Tucker --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 1924e93ed..e57573f18 100644 --- a/go.mod +++ b/go.mod @@ -95,14 +95,14 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20220726221520-4f986261bf13 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.25.0 + golang.org/x/crypto v0.30.0 golang.org/x/exp v0.0.0-20240119083558-1b970713d09a golang.org/x/mod v0.19.0 - golang.org/x/net v0.27.0 + golang.org/x/net v0.32.0 golang.org/x/oauth2 v0.16.0 - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 - golang.org/x/term v0.22.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 @@ -386,7 +386,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.18.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/text v0.21.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index fadfb22b1..1cbb440fa 100644 --- a/go.sum +++ b/go.sum @@ -1062,8 +1062,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1153,8 +1153,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1176,8 +1176,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1239,8 +1239,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1248,8 +1248,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1262,8 +1262,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 66aa77416744037baec93206ae212012a2314f83 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 5 Dec 2024 17:00:54 -0600 Subject: [PATCH 0210/1708] cmd/gitops-pusher: default previousEtag to controlEtag (#14296) If previousEtag is empty, then we assume control ACLs were not modified manually and push the local ACLs. Instead, we defaulted to localEtag which would be different if local ACLs were different from control. AFAIK this was always buggy, but never reported? Fixes #14295 Signed-off-by: Andrew Lytvynov --- cmd/gitops-pusher/gitops-pusher.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index c33937ef2..e7a0aeee1 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -58,8 +58,8 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte } if cache.PrevETag == "" { - log.Println("no previous etag found, assuming local file is correct and recording that") - cache.PrevETag = localEtag + log.Println("no previous etag found, assuming the latest control etag") + cache.PrevETag = controlEtag } log.Printf("control: %s", controlEtag) @@ -105,8 +105,8 @@ func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(contex } if cache.PrevETag == "" { - log.Println("no previous etag found, assuming local file is correct and recording that") - cache.PrevETag = localEtag + log.Println("no previous etag found, assuming the latest control etag") + cache.PrevETag = controlEtag } log.Printf("control: %s", controlEtag) @@ -148,8 +148,8 @@ func getChecksums(cache *Cache, client *http.Client, tailnet, apiKey string) fun } if cache.PrevETag == "" { - log.Println("no previous etag found, assuming local file is correct and recording that") - cache.PrevETag = Shuck(localEtag) + log.Println("no previous etag found, assuming control etag") + cache.PrevETag = Shuck(controlEtag) } log.Printf("control: %s", controlEtag) From a482dc037bf6d22624e8750ef889f8e025da8a6e Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 5 Dec 2024 15:50:24 -0800 Subject: [PATCH 0211/1708] logpolicy: cleanup options API and allow setting http.Client (#11503) This package grew organically over time and is an awful mix of explicitly declared options and globally set parameters via environment variables and other subtle effects. Add a new Options and TransportOptions type to allow for the creation of a Policy or http.RoundTripper with some set of options. The options struct avoids the need to add yet more NewXXX functions for every possible combination of ordered arguments. The goal of this refactor is to allow specifying the http.Client to use with the Policy. Updates tailscale/corp#18177 Signed-off-by: Joe Tsai --- logpolicy/logpolicy.go | 187 ++++++++++++++++++++++++++++------------- 1 file changed, 130 insertions(+), 57 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index d657c4e93..fa882ad3a 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -9,6 +9,7 @@ package logpolicy import ( "bufio" "bytes" + "cmp" "context" "crypto/tls" "encoding/json" @@ -449,25 +450,63 @@ func tryFixLogStateLocation(dir, cmdname string, logf logger.Logf) { } } -// New returns a new log policy (a logger and its instance ID) for a given -// collection name. -// -// The netMon parameter is optional. It should be specified in environments where -// Tailscaled is manipulating the routing table. -// -// The logf parameter is optional; if non-nil, information logs (e.g. when -// migrating state) are sent to that logger, and global changes to the log -// package are avoided. If nil, logs will be printed using log.Printf. +// Deprecated: Use [Options.New] instead. func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { - return NewWithConfigPath(collection, "", "", netMon, health, logf) + return Options{ + Collection: collection, + NetMon: netMon, + Health: health, + Logf: logf, + }.New() } -// NewWithConfigPath is identical to New, but uses the specified directory and -// command name. If either is empty, it derives them automatically. -// -// The netMon parameter is optional. It should be specified in environments where -// Tailscaled is manipulating the routing table. +// Deprecated: Use [Options.New] instead. func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { + return Options{ + Collection: collection, + Dir: dir, + CmdName: cmdName, + NetMon: netMon, + Health: health, + Logf: logf, + }.New() +} + +// Options is used to construct a [Policy]. +type Options struct { + // Collection is a required collection to upload logs under. + // Collection is a namespace for the type logs. + // For example, logs for a node use "tailnode.log.tailscale.io". + Collection string + + // Dir is an optional directory to store the log configuration. + // If empty, [LogsDir] is used. + Dir string + + // CmdName is an optional name of the current binary. + // If empty, [version.CmdName] is used. + CmdName string + + // NetMon is an optional parameter for monitoring. + // If non-nil, it's used to do faster interface lookups. + NetMon *netmon.Monitor + + // Health is an optional parameter for health status. + // If non-nil, it's used to construct the default HTTP client. + Health *health.Tracker + + // Logf is an optional logger to use. + // If nil, [log.Printf] will be used instead. + Logf logger.Logf + + // HTTPC is an optional client to use upload logs. + // If nil, [TransportOptions.New] is used to construct a new client + // with that particular transport sending logs to the default logs server. + HTTPC *http.Client +} + +// New returns a new log policy (a logger and its instance ID). +func (opts Options) New() *Policy { if hostinfo.IsNATLabGuestVM() { // In NATLab Gokrazy instances, tailscaled comes up concurently with // DHCP and the doesn't have DNS for a while. Wait for DHCP first. @@ -495,23 +534,23 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, earlyErrBuf.WriteByte('\n') } - if dir == "" { - dir = LogsDir(earlyLogf) + if opts.Dir == "" { + opts.Dir = LogsDir(earlyLogf) } - if cmdName == "" { - cmdName = version.CmdName() + if opts.CmdName == "" { + opts.CmdName = version.CmdName() } - useStdLogger := logf == nil + useStdLogger := opts.Logf == nil if useStdLogger { - logf = log.Printf + opts.Logf = log.Printf } - tryFixLogStateLocation(dir, cmdName, logf) + tryFixLogStateLocation(opts.Dir, opts.CmdName, opts.Logf) - cfgPath := filepath.Join(dir, fmt.Sprintf("%s.log.conf", cmdName)) + cfgPath := filepath.Join(opts.Dir, fmt.Sprintf("%s.log.conf", opts.CmdName)) if runtime.GOOS == "windows" { - switch cmdName { + switch opts.CmdName { case "tailscaled": // Tailscale 1.14 and before stored state under %LocalAppData% // (usually "C:\WINDOWS\system32\config\systemprofile\AppData\Local" @@ -542,7 +581,7 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, cfgPath = paths.TryConfigFileMigration(earlyLogf, oldPath, cfgPath) case "tailscale-ipn": for _, oldBase := range []string{"wg64.log.conf", "wg32.log.conf"} { - oldConf := filepath.Join(dir, oldBase) + oldConf := filepath.Join(opts.Dir, oldBase) if fi, err := os.Stat(oldConf); err == nil && fi.Mode().IsRegular() { cfgPath = paths.TryConfigFileMigration(earlyLogf, oldConf, cfgPath) break @@ -555,9 +594,9 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, if err != nil { earlyLogf("logpolicy.ConfigFromFile %v: %v", cfgPath, err) } - if err := newc.Validate(collection); err != nil { + if err := newc.Validate(opts.Collection); err != nil { earlyLogf("logpolicy.Config.Validate for %v: %v", cfgPath, err) - newc = NewConfig(collection) + newc = NewConfig(opts.Collection) if err := newc.Save(cfgPath); err != nil { earlyLogf("logpolicy.Config.Save for %v: %v", cfgPath, err) } @@ -568,31 +607,39 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, PrivateID: newc.PrivateID, Stderr: logWriter{console}, CompressLogs: true, - HTTPC: &http.Client{Transport: NewLogtailTransport(logtail.DefaultHost, netMon, health, logf)}, } - if collection == logtail.CollectionNode { + if opts.Collection == logtail.CollectionNode { conf.MetricsDelta = clientmetric.EncodeLogTailMetricsDelta conf.IncludeProcID = true conf.IncludeProcSequence = true } if envknob.NoLogsNoSupport() || testenv.InTest() { - logf("You have disabled logging. Tailscale will not be able to provide support.") + opts.Logf("You have disabled logging. Tailscale will not be able to provide support.") conf.HTTPC = &http.Client{Transport: noopPretendSuccessTransport{}} } else { // Only attach an on-disk filch buffer if we are going to be sending logs. // No reason to persist them locally just to drop them later. - attachFilchBuffer(&conf, dir, cmdName, logf) - - if val := getLogTarget(); val != "" { - logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") - conf.BaseURL = val - u, _ := url.Parse(val) - conf.HTTPC = &http.Client{Transport: NewLogtailTransport(u.Host, netMon, health, logf)} + attachFilchBuffer(&conf, opts.Dir, opts.CmdName, opts.Logf) + conf.HTTPC = opts.HTTPC + + if conf.HTTPC == nil { + logHost := logtail.DefaultHost + if val := getLogTarget(); val != "" { + opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") + conf.BaseURL = val + u, _ := url.Parse(val) + logHost = u.Host + } + conf.HTTPC = &http.Client{Transport: TransportOptions{ + Host: logHost, + NetMon: opts.NetMon, + Health: opts.Health, + Logf: opts.Logf, + }.New()} } - } - lw := logtail.NewLogger(conf, logf) + lw := logtail.NewLogger(conf, opts.Logf) var logOutput io.Writer = lw @@ -610,19 +657,19 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, log.SetOutput(logOutput) } - logf("Program starting: v%v, Go %v: %#v", + opts.Logf("Program starting: v%v, Go %v: %#v", version.Long(), goVersion(), os.Args) - logf("LogID: %v", newc.PublicID) + opts.Logf("LogID: %v", newc.PublicID) if earlyErrBuf.Len() != 0 { - logf("%s", earlyErrBuf.Bytes()) + opts.Logf("%s", earlyErrBuf.Bytes()) } return &Policy{ Logtail: lw, PublicID: newc.PublicID, - Logf: logf, + Logf: opts.Logf, } } @@ -763,23 +810,48 @@ func dialContext(ctx context.Context, netw, addr string, netMon *netmon.Monitor, return c, err } -// NewLogtailTransport returns an HTTP Transport particularly suited to uploading -// logs to the given host name. See DialContext for details on how it works. -// -// The netMon parameter is optional. It should be specified in environments where -// Tailscaled is manipulating the routing table. -// -// The logf parameter is optional; if non-nil, logs are printed using the -// provided function; if nil, log.Printf will be used instead. +// Deprecated: Use [TransportOptions.New] instead. func NewLogtailTransport(host string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) http.RoundTripper { + return TransportOptions{Host: host, NetMon: netMon, Health: health, Logf: logf}.New() +} + +// TransportOptions is used to construct an [http.RoundTripper]. +type TransportOptions struct { + // Host is the optional hostname of the logs server. + // If empty, then [logtail.DefaultHost] is used. + Host string + + // NetMon is an optional parameter for monitoring. + // If non-nil, it's used to do faster interface lookups. + NetMon *netmon.Monitor + + // Health is an optional parameter for health status. + // If non-nil, it's used to construct the default HTTP client. + Health *health.Tracker + + // Logf is an optional logger to use. + // If nil, [log.Printf] will be used instead. + Logf logger.Logf + + // TLSClientConfig is an optional TLS configuration to use. + // If non-nil, the configuration will be cloned. + TLSClientConfig *tls.Config +} + +// New returns an HTTP Transport particularly suited to uploading logs +// to the given host name. See [DialContext] for details on how it works. +func (opts TransportOptions) New() http.RoundTripper { if testenv.InTest() { return noopPretendSuccessTransport{} } - if netMon == nil { - netMon = netmon.NewStatic() + if opts.NetMon == nil { + opts.NetMon = netmon.NewStatic() } // Start with a copy of http.DefaultTransport and tweak it a bit. tr := http.DefaultTransport.(*http.Transport).Clone() + if opts.TLSClientConfig != nil { + tr.TLSClientConfig = opts.TLSClientConfig.Clone() + } tr.Proxy = tshttpproxy.ProxyFromEnvironment tshttpproxy.SetTransportGetProxyConnectHeader(tr) @@ -790,10 +862,10 @@ func NewLogtailTransport(host string, netMon *netmon.Monitor, health *health.Tra tr.DisableCompression = true // Log whenever we dial: - if logf == nil { - logf = log.Printf + if opts.Logf == nil { + opts.Logf = log.Printf } - tr.DialContext = MakeDialFunc(netMon, logf) + tr.DialContext = MakeDialFunc(opts.NetMon, opts.Logf) // We're uploading logs ideally infrequently, with specific timing that will // change over time. Try to keep the connection open, to avoid repeatedly @@ -815,7 +887,8 @@ func NewLogtailTransport(host string, netMon *netmon.Monitor, health *health.Tra tr.TLSNextProto = map[string]func(authority string, c *tls.Conn) http.RoundTripper{} } - tr.TLSClientConfig = tlsdial.Config(host, health, tr.TLSClientConfig) + host := cmp.Or(opts.Host, logtail.DefaultHost) + tr.TLSClientConfig = tlsdial.Config(host, opts.Health, tr.TLSClientConfig) // Force TLS 1.3 since we know log.tailscale.io supports it. tr.TLSClientConfig.MinVersion = tls.VersionTLS13 From dc6728729e903e83d7bc91de51dc38e115d79624 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 5 Dec 2024 15:45:48 -0800 Subject: [PATCH 0212/1708] health: fix TestHealthMetric to pass on release branch Fixes #14302 Change-Id: I9fd893a97711c72b713fe5535f2ccb93fadf7452 Signed-off-by: Brad Fitzpatrick --- health/health_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/health/health_test.go b/health/health_test.go index 69e586066..ebdddc988 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/opt" "tailscale.com/util/usermetric" + "tailscale.com/version" ) func TestAppendWarnableDebugFlags(t *testing.T) { @@ -352,6 +353,11 @@ func TestShowUpdateWarnable(t *testing.T) { } func TestHealthMetric(t *testing.T) { + unstableBuildWarning := 0 + if version.IsUnstableBuild() { + unstableBuildWarning = 1 + } + tests := []struct { desc string check bool @@ -361,20 +367,20 @@ func TestHealthMetric(t *testing.T) { }{ // When running in dev, and not initialising the client, there will be two warnings // by default: - // - is-using-unstable-version + // - is-using-unstable-version (except on the release branch) // - wantrunning-false { desc: "base-warnings", check: true, cv: nil, - wantMetricCount: 2, + wantMetricCount: unstableBuildWarning + 1, }, // with: update-available { desc: "update-warning", check: true, cv: &tailcfg.ClientVersion{RunningLatest: false, LatestVersion: "1.2.3"}, - wantMetricCount: 3, + wantMetricCount: unstableBuildWarning + 2, }, } for _, tt := range tests { From 06a82f416f2339e3309eec32ab98b4858d045697 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 4 Dec 2024 14:43:43 -0600 Subject: [PATCH 0213/1708] cmd,{get-authkey,tailscale}: remove unnecessary scope qualifier from OAuth clients OAuth clients that were used to generate an auth_key previously specified the scope 'device'. 'device' is not an actual scope, the real scope is 'devices'. The resulting OAuth token ended up including all scopes from the specified OAuth client, so the code was able to successfully create auth_keys. It's better not to hardcode a scope here anyway, so that we have the flexibility of changing which scope(s) are used in the future without having to update old clients. Since the qualifier never actually did anything, this commit simply removes it. Updates tailscale/corp#24934 Signed-off-by: Percy Wegmann --- cmd/get-authkey/main.go | 1 - cmd/tailscale/cli/up.go | 1 - 2 files changed, 2 deletions(-) diff --git a/cmd/get-authkey/main.go b/cmd/get-authkey/main.go index 777258d64..95c930756 100644 --- a/cmd/get-authkey/main.go +++ b/cmd/get-authkey/main.go @@ -46,7 +46,6 @@ func main() { ClientID: clientID, ClientSecret: clientSecret, TokenURL: baseURL + "/api/v2/oauth/token", - Scopes: []string{"device"}, } ctx := context.Background() diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 6c5c6f337..e86687527 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -1157,7 +1157,6 @@ func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { ClientID: "some-client-id", // ignored ClientSecret: clientSecret, TokenURL: baseURL + "/api/v2/oauth/token", - Scopes: []string{"device"}, } tsClient := tailscale.NewClient("-", nil) From f81786007989c5d3e37253f269a561f9937dccfc Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 6 Dec 2024 11:17:11 -0600 Subject: [PATCH 0214/1708] VERSION.txt: this is v1.79.0 Signed-off-by: Nick Khyl --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 79e15fd49..b3a8c61e6 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.77.0 +1.79.0 From c2761162a002e65e2305f7570b2c54c561ac151f Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Fri, 6 Dec 2024 14:27:52 -0500 Subject: [PATCH 0215/1708] cmd/stunc: enforce read timeout deadline (#14309) Make argparsing use flag for adding a new parameter that requires parsing. Enforce a read timeout deadline waiting for response from the stun server provided in the args. Otherwise the program will never exit. Fixes #14267 Signed-off-by: Mike O'Driscoll --- cmd/stunc/stunc.go | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/cmd/stunc/stunc.go b/cmd/stunc/stunc.go index 9743a3300..c4b2eedd3 100644 --- a/cmd/stunc/stunc.go +++ b/cmd/stunc/stunc.go @@ -5,24 +5,40 @@ package main import ( + "flag" "log" "net" "os" "strconv" + "time" "tailscale.com/net/stun" ) func main() { log.SetFlags(0) - - if len(os.Args) < 2 || len(os.Args) > 3 { - log.Fatalf("usage: %s [port]", os.Args[0]) - } - host := os.Args[1] + var host string port := "3478" - if len(os.Args) == 3 { - port = os.Args[2] + + var readTimeout time.Duration + flag.DurationVar(&readTimeout, "timeout", 3*time.Second, "response wait timeout") + + flag.Parse() + + values := flag.Args() + if len(values) < 1 || len(values) > 2 { + log.Printf("usage: %s [port]", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) + } else { + for i, value := range values { + switch i { + case 0: + host = value + case 1: + port = value + } + } } _, err := strconv.ParseUint(port, 10, 16) if err != nil { @@ -46,6 +62,10 @@ func main() { log.Fatal(err) } + err = c.SetReadDeadline(time.Now().Add(readTimeout)) + if err != nil { + log.Fatal(err) + } var buf [1024]byte n, raddr, err := c.ReadFromUDPAddrPort(buf[:]) if err != nil { From 06c5e83c204b29496e67a8184d9ed7791c05b23c Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 9 Dec 2024 20:42:10 +0000 Subject: [PATCH 0216/1708] hostinfo: fix testing in container (#14330) Previously this unit test failed if it was run in a container. Update the assert to focus on exactly the condition we are trying to assert: the package type should only be 'container' if we use the build tag. Updates #14317 Signed-off-by: Tom Proctor --- hostinfo/hostinfo_linux_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hostinfo/hostinfo_linux_test.go b/hostinfo/hostinfo_linux_test.go index c8bd2abbe..0286fadf3 100644 --- a/hostinfo/hostinfo_linux_test.go +++ b/hostinfo/hostinfo_linux_test.go @@ -35,8 +35,12 @@ remotes/origin/QTSFW_5.0.0` } } -func TestInContainer(t *testing.T) { - if got := inContainer(); !got.EqualBool(false) { - t.Errorf("inContainer = %v; want false due to absence of ts_package_container build tag", got) +func TestPackageTypeNotContainer(t *testing.T) { + var got string + if packageType != nil { + got = packageType() + } + if got == "container" { + t.Fatal("packageType = container; should only happen if build tag ts_package_container is set") } } From 24b243c19490be0d8d133659901be07281f4b745 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 10 Dec 2024 08:58:27 -0500 Subject: [PATCH 0217/1708] derp: add env var setting server send queue depth (#14334) Use envknob to configure the per client send queue depth for the derp server. Fixes tailscale/corp#24978 Signed-off-by: Mike O'Driscoll --- derp/derp_server.go | 23 ++++++++++++++++++----- derp/derp_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/derp/derp_server.go b/derp/derp_server.go index ab0ab0a90..8066b7f19 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -84,11 +84,19 @@ func init() { } const ( - perClientSendQueueDepth = 32 // packets buffered for sending - writeTimeout = 2 * time.Second - privilegedWriteTimeout = 30 * time.Second // for clients with the mesh key + defaultPerClientSendQueueDepth = 32 // default packets buffered for sending + writeTimeout = 2 * time.Second + privilegedWriteTimeout = 30 * time.Second // for clients with the mesh key ) +func getPerClientSendQueueDepth() int { + if v, ok := envknob.LookupInt("TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH"); ok { + return v + } + + return defaultPerClientSendQueueDepth +} + // dupPolicy is a temporary (2021-08-30) mechanism to change the policy // of how duplicate connection for the same key are handled. type dupPolicy int8 @@ -190,6 +198,9 @@ type Server struct { // maps from netip.AddrPort to a client's public key keyOfAddr map[netip.AddrPort]key.NodePublic + // Sets the client send queue depth for the server. + perClientSendQueueDepth int + clock tstime.Clock } @@ -377,6 +388,8 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { s.packetsDroppedTypeDisco = s.packetsDroppedType.Get("disco") s.packetsDroppedTypeOther = s.packetsDroppedType.Get("other") + + s.perClientSendQueueDepth = getPerClientSendQueueDepth() return s } @@ -849,8 +862,8 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem done: ctx.Done(), remoteIPPort: remoteIPPort, connectedAt: s.clock.Now(), - sendQueue: make(chan pkt, perClientSendQueueDepth), - discoSendQueue: make(chan pkt, perClientSendQueueDepth), + sendQueue: make(chan pkt, s.perClientSendQueueDepth), + discoSendQueue: make(chan pkt, s.perClientSendQueueDepth), sendPongCh: make(chan [8]byte, 1), peerGone: make(chan peerGoneMsg), canMesh: s.isMeshPeer(clientInfo), diff --git a/derp/derp_test.go b/derp/derp_test.go index 9185194dd..f0fc52fe7 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -6,6 +6,7 @@ package derp import ( "bufio" "bytes" + "cmp" "context" "crypto/x509" "encoding/asn1" @@ -23,6 +24,7 @@ import ( "testing" "time" + qt "github.com/frankban/quicktest" "go4.org/mem" "golang.org/x/time/rate" "tailscale.com/disco" @@ -1598,3 +1600,29 @@ func TestServerRepliesToPing(t *testing.T) { } } } + +func TestGetPerClientSendQueueDepth(t *testing.T) { + c := qt.New(t) + envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" + + testCases := []struct { + envVal string + want int + }{ + // Empty case, envknob treats empty as missing also. + { + "", defaultPerClientSendQueueDepth, + }, + { + "64", 64, + }, + } + + for _, tc := range testCases { + t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { + t.Setenv(envKey, tc.envVal) + val := getPerClientSendQueueDepth() + c.Assert(val, qt.Equals, tc.want) + }) + } +} From ea3d0bcfd4452697b966a5f5842fd812855a8828 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 10 Dec 2024 10:51:03 -0700 Subject: [PATCH 0218/1708] prober,derp/derphttp: make dev-mode DERP probes work without TLS (#14347) Make dev-mode DERP probes work without TLS. Properly dial port `3340` when not using HTTPS when dialing nodes in `derphttp_client`. Skip verifying TLS state in `newConn` if we are not running a prober. Updates tailscale/corp#24635 Signed-off-by: Percy Wegmann Co-authored-by: Percy Wegmann --- derp/derphttp/derphttp_client.go | 3 +++ prober/derp.go | 28 ++++++++++++++++------------ 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index c95d072b1..7387b60b4 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -757,6 +757,9 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e } dst := cmp.Or(dstPrimary, n.HostName) port := "443" + if !c.useHTTPS() { + port = "3340" + } if n.DERPPort != 0 { port = fmt.Sprint(n.DERPPort) } diff --git a/prober/derp.go b/prober/derp.go index b1ebc590d..bce40e34c 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -597,18 +597,22 @@ func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isPr if err != nil { return nil, err } - cs, ok := dc.TLSConnectionState() - if !ok { - dc.Close() - return nil, errors.New("no TLS state") - } - if len(cs.PeerCertificates) == 0 { - dc.Close() - return nil, errors.New("no peer certificates") - } - if cs.ServerName != n.HostName { - dc.Close() - return nil, fmt.Errorf("TLS server name %q != derp hostname %q", cs.ServerName, n.HostName) + + // Only verify TLS state if this is a prober. + if isProber { + cs, ok := dc.TLSConnectionState() + if !ok { + dc.Close() + return nil, errors.New("no TLS state") + } + if len(cs.PeerCertificates) == 0 { + dc.Close() + return nil, errors.New("no peer certificates") + } + if cs.ServerName != n.HostName { + dc.Close() + return nil, fmt.Errorf("TLS server name %q != derp hostname %q", cs.ServerName, n.HostName) + } } errc := make(chan error, 1) From fa28b024d6f9b9174a9e00ae2d798a7ed8d43a99 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 14:32:04 -0700 Subject: [PATCH 0219/1708] .github: Bump actions/cache from 4.1.2 to 4.2.0 (#14331) Bumps [actions/cache](https://github.com/actions/cache) from 4.1.2 to 4.2.0. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/6849a6489940f00c2f30c0fb92c6274307ccb58a...1bd1e32a3bdc45362d1e726936510720a7c30a57) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f9bb5cae2..a4dccd103 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -80,7 +80,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -159,7 +159,7 @@ jobs: cache: false - name: Restore Cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -260,7 +260,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -319,7 +319,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -367,7 +367,7 @@ jobs: - name: checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Restore Cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache From d54cd593905fc0bc7cc13009e1db2741bf6960e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 15:15:11 -0700 Subject: [PATCH 0220/1708] .github: Bump github/codeql-action from 3.27.1 to 3.27.6 (#14332) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.1 to 3.27.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4f3212b61783c3c68e8309a0f18a699764811cda...aa578102511db1f4524ed59b8cc2bae4f6e88195) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d9a287be3..ba21e8fe9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4f3212b61783c3c68e8309a0f18a699764811cda # v3.27.1 + uses: github/codeql-action/init@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4f3212b61783c3c68e8309a0f18a699764811cda # v3.27.1 + uses: github/codeql-action/autobuild@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4f3212b61783c3c68e8309a0f18a699764811cda # v3.27.1 + uses: github/codeql-action/analyze@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 From 8b1d01161bbca8a26c2a50208444087c9fa2b3f1 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Wed, 11 Dec 2024 02:52:56 -0700 Subject: [PATCH 0221/1708] cmd/containerboot: guard kubeClient against nil dereference (#14357) A method on kc was called unconditionally, even if was not initialized, leading to a nil pointer dereference when TS_SERVE_CONFIG was set outside Kubernetes. Add a guard symmetric with other uses of the kubeClient. Fixes #14354. Signed-off-by: Bjorn Neergaard --- cmd/containerboot/main.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index ad1c0db20..7411ea949 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -331,8 +331,10 @@ authLoop: if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { log.Fatalf("failed to unset serve config: %v", err) } - if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { - log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err) + if hasKubeStateStore(cfg) { + if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { + log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err) + } } } From 0cc071f15409071f2649c3e142eceaf7cabff560 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 11 Dec 2024 10:56:12 +0000 Subject: [PATCH 0222/1708] cmd/containerboot: don't attempt to write kube Secret in non-kube environments (#14358) Updates tailscale/tailscale#14354 Signed-off-by: Irbe Krumina --- cmd/containerboot/serve.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 29ee7347f..c8b9e098d 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -72,8 +72,10 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil { log.Fatalf("serve proxy: error updating serve config: %v", err) } - if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { - log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) + if kc != nil { + if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { + log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) + } } prevServeConfig = sc } From fa655e6ed366af5bdf2284449e1eb29dd784303a Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 11 Dec 2024 12:59:42 +0000 Subject: [PATCH 0223/1708] cmd/containerboot: add more tests, check that egress service config only set on kube (#14360) Updates tailscale/tailscale#14357 Signed-off-by: Irbe Krumina --- cmd/containerboot/main_test.go | 124 ++++++++++++++++++++++++++++++--- cmd/containerboot/settings.go | 3 + 2 files changed, 119 insertions(+), 8 deletions(-) diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 83e001b62..dacfb5bc6 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -31,6 +31,7 @@ import ( "github.com/google/go-cmp/cmp" "golang.org/x/sys/unix" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/netmap" @@ -57,6 +58,16 @@ func TestContainerBoot(t *testing.T) { if err != nil { t.Fatalf("error unmarshaling tailscaled config: %v", err) } + serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} + serveConfBytes, err := json.Marshal(serveConf) + if err != nil { + t.Fatalf("error unmarshaling serve config: %v", err) + } + egressSvcsCfg := egressservices.Configs{"foo": {TailnetTarget: egressservices.TailnetTarget{FQDN: "foo.tailnetxyx.ts.net"}}} + egressSvcsCfgBytes, err := json.Marshal(egressSvcsCfg) + if err != nil { + t.Fatalf("error unmarshaling egress services config: %v", err) + } dirs := []string{ "var/lib", @@ -73,14 +84,16 @@ func TestContainerBoot(t *testing.T) { } } files := map[string][]byte{ - "usr/bin/tailscaled": fakeTailscaled, - "usr/bin/tailscale": fakeTailscale, - "usr/bin/iptables": fakeTailscale, - "usr/bin/ip6tables": fakeTailscale, - "dev/net/tun": []byte(""), - "proc/sys/net/ipv4/ip_forward": []byte("0"), - "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), - "etc/tailscaled/cap-95.hujson": tailscaledConfBytes, + "usr/bin/tailscaled": fakeTailscaled, + "usr/bin/tailscale": fakeTailscale, + "usr/bin/iptables": fakeTailscale, + "usr/bin/ip6tables": fakeTailscale, + "dev/net/tun": []byte(""), + "proc/sys/net/ipv4/ip_forward": []byte("0"), + "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), + "etc/tailscaled/cap-95.hujson": tailscaledConfBytes, + "etc/tailscaled/serve-config.json": serveConfBytes, + "etc/tailscaled/egress-services-config.json": egressSvcsCfgBytes, } resetFiles := func() { for path, content := range files { @@ -829,6 +842,101 @@ func TestContainerBoot(t *testing.T) { }, }, }, + { + Name: "serve_config_no_kube", + Env: map[string]string{ + "TS_SERVE_CONFIG": filepath.Join(d, "etc/tailscaled/serve-config.json"), + "TS_AUTHKEY": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, + }, + }, + }, + { + Name: "serve_config_kube", + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, + "TS_SERVE_CONFIG": filepath.Join(d, "etc/tailscaled/serve-config.json"), + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + }, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "https_endpoint": "no-https", + "tailscale_capver": capver, + }, + }, + }, + }, + { + Name: "egress_svcs_config_kube", + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, + "TS_EGRESS_SERVICES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled/egress-services-config.json"), + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + }, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, + }, + }, + }, + }, + { + Name: "egress_svcs_config_no_kube", + Env: map[string]string{ + "TS_EGRESS_SERVICES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled/egress-services-config.json"), + "TS_AUTHKEY": "tskey-key", + }, + Phases: []phase{ + { + WantFatalLog: "TS_EGRESS_SERVICES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", + }, + }, + }, } for _, test := range tests { diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 4fae58584..e80dbee57 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -199,6 +199,9 @@ func (s *settings) validate() error { if s.HealthCheckEnabled && s.HealthCheckAddrPort != "" { return errors.New("TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0, use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT") } + if s.EgressSvcsCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") { + return errors.New("TS_EGRESS_SERVICES_CONFIG_PATH is only supported for Tailscale running on Kubernetes") + } return nil } From f1ccdcc713bfebc1500ea666d523f36301a9f782 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 11 Dec 2024 14:48:57 +0000 Subject: [PATCH 0224/1708] cmd/k8s-operator,k8s-operator: operator integration tests (#12792) This is the start of an integration/e2e test suite for the tailscale operator. It currently only tests two major features, ingress proxy and API server proxy, but we intend to expand it to cover more features over time. It also only supports manual runs for now. We intend to integrate it into CI checks in a separate update when we have planned how to securely provide CI with the secrets required for connecting to a test tailnet. Updates #12622 Change-Id: I31e464bb49719348b62a563790f2bc2ba165a11b Co-authored-by: Irbe Krumina Signed-off-by: Tom Proctor --- cmd/k8s-operator/e2e/ingress_test.go | 108 +++++++++++++++ cmd/k8s-operator/e2e/main_test.go | 194 +++++++++++++++++++++++++++ cmd/k8s-operator/e2e/proxy_test.go | 156 +++++++++++++++++++++ k8s-operator/conditions.go | 11 ++ 4 files changed, 469 insertions(+) create mode 100644 cmd/k8s-operator/e2e/ingress_test.go create mode 100644 cmd/k8s-operator/e2e/main_test.go create mode 100644 cmd/k8s-operator/e2e/proxy_test.go diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go new file mode 100644 index 000000000..373dd2c7d --- /dev/null +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + kube "tailscale.com/k8s-operator" + "tailscale.com/tstest" +) + +// See [TestMain] for test requirements. +func TestIngress(t *testing.T) { + if tsClient == nil { + t.Skip("TestIngress requires credentials for a tailscale client") + } + + ctx := context.Background() + cfg := config.GetConfigOrDie() + cl, err := client.New(cfg, client.Options{}) + if err != nil { + t.Fatal(err) + } + // Apply nginx + createAndCleanup(t, ctx, cl, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }) + // Apply service to expose it as ingress + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + Annotations: map[string]string{ + "tailscale.com/expose": "true", + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + }, + }, + }, + } + createAndCleanup(t, ctx, cl, svc) + + // TODO: instead of timing out only when test times out, cancel context after 60s or so. + if err := wait.PollUntilContextCancel(ctx, time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { + maybeReadySvc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} + if err := get(ctx, cl, maybeReadySvc); err != nil { + return false, err + } + isReady := kube.SvcIsReady(maybeReadySvc) + if isReady { + t.Log("Service is ready") + } + return isReady, nil + }); err != nil { + t.Fatalf("error waiting for the Service to become Ready: %v", err) + } + + var resp *http.Response + if err := tstest.WaitFor(time.Second*60, func() error { + // TODO(tomhjp): Get the tailnet DNS name from the associated secret instead. + // If we are not the first tailnet node with the requested name, we'll get + // a -N suffix. + resp, err = tsClient.HTTPClient.Get(fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name)) + if err != nil { + return err + } + return nil + }); err != nil { + t.Fatalf("error trying to reach service: %v", err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status: %v; response body s", resp.StatusCode) + } +} diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go new file mode 100644 index 000000000..ae23c939c --- /dev/null +++ b/cmd/k8s-operator/e2e/main_test.go @@ -0,0 +1,194 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "slices" + "strings" + "testing" + + "github.com/go-logr/zapr" + "github.com/tailscale/hujson" + "go.uber.org/zap/zapcore" + "golang.org/x/oauth2/clientcredentials" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + "tailscale.com/client/tailscale" +) + +const ( + e2eManagedComment = "// This is managed by the k8s-operator e2e tests" +) + +var ( + tsClient *tailscale.Client + testGrants = map[string]string{ + "test-proxy": `{ + "src": ["tag:e2e-test-proxy"], + "dst": ["tag:k8s-operator"], + "app": { + "tailscale.com/cap/kubernetes": [{ + "impersonate": { + "groups": ["ts:e2e-test-proxy"], + }, + }], + }, + }`, + } +) + +// This test suite is currently not run in CI. +// It requires some setup not handled by this code: +// - Kubernetes cluster with tailscale operator installed +// - Current kubeconfig context set to connect to that cluster (directly, no operator proxy) +// - Operator installed with --set apiServerProxyConfig.mode="true" +// - ACLs that define tag:e2e-test-proxy tag. TODO(tomhjp): Can maybe replace this prereq onwards with an API key +// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env +// - OAuth client must have auth_keys and policy_file write for tag:e2e-test-proxy tag +func TestMain(m *testing.M) { + code, err := runTests(m) + if err != nil { + log.Fatal(err) + } + os.Exit(code) +} + +func runTests(m *testing.M) (int, error) { + zlog := kzap.NewRaw([]kzap.Opts{kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel)}...).Sugar() + logf.SetLogger(zapr.NewLogger(zlog.Desugar())) + tailscale.I_Acknowledge_This_API_Is_Unstable = true + + if clientID := os.Getenv("TS_API_CLIENT_ID"); clientID != "" { + cleanup, err := setupClientAndACLs() + if err != nil { + return 0, err + } + defer func() { + err = errors.Join(err, cleanup()) + }() + } + + return m.Run(), nil +} + +func setupClientAndACLs() (cleanup func() error, _ error) { + ctx := context.Background() + credentials := clientcredentials.Config{ + ClientID: os.Getenv("TS_API_CLIENT_ID"), + ClientSecret: os.Getenv("TS_API_CLIENT_SECRET"), + TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + Scopes: []string{"auth_keys", "policy_file"}, + } + tsClient = tailscale.NewClient("-", nil) + tsClient.HTTPClient = credentials.Client(ctx) + + if err := patchACLs(ctx, tsClient, func(acls *hujson.Value) { + for test, grant := range testGrants { + deleteTestGrants(test, acls) + addTestGrant(test, grant, acls) + } + }); err != nil { + return nil, err + } + + return func() error { + return patchACLs(ctx, tsClient, func(acls *hujson.Value) { + for test := range testGrants { + deleteTestGrants(test, acls) + } + }) + }, nil +} + +func patchACLs(ctx context.Context, tsClient *tailscale.Client, patchFn func(*hujson.Value)) error { + acls, err := tsClient.ACLHuJSON(ctx) + if err != nil { + return err + } + hj, err := hujson.Parse([]byte(acls.ACL)) + if err != nil { + return err + } + + patchFn(&hj) + + hj.Format() + acls.ACL = hj.String() + if _, err := tsClient.SetACLHuJSON(ctx, *acls, true); err != nil { + return err + } + + return nil +} + +func addTestGrant(test, grant string, acls *hujson.Value) error { + v, err := hujson.Parse([]byte(grant)) + if err != nil { + return err + } + + // Add the managed comment to the first line of the grant object contents. + v.Value.(*hujson.Object).Members[0].Name.BeforeExtra = hujson.Extra(fmt.Sprintf("%s: %s\n", e2eManagedComment, test)) + + if err := acls.Patch([]byte(fmt.Sprintf(`[{"op": "add", "path": "/grants/-", "value": %s}]`, v.String()))); err != nil { + return err + } + + return nil +} + +func deleteTestGrants(test string, acls *hujson.Value) error { + grants := acls.Find("/grants") + + var patches []string + for i, g := range grants.Value.(*hujson.Array).Elements { + members := g.Value.(*hujson.Object).Members + if len(members) == 0 { + continue + } + comment := strings.TrimSpace(string(members[0].Name.BeforeExtra)) + if name, found := strings.CutPrefix(comment, e2eManagedComment+": "); found && name == test { + patches = append(patches, fmt.Sprintf(`{"op": "remove", "path": "/grants/%d"}`, i)) + } + } + + // Remove in reverse order so we don't affect the found indices as we mutate. + slices.Reverse(patches) + + if err := acls.Patch([]byte(fmt.Sprintf("[%s]", strings.Join(patches, ",")))); err != nil { + return err + } + + return nil +} + +func objectMeta(namespace, name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + } +} + +func createAndCleanup(t *testing.T, ctx context.Context, cl client.Client, obj client.Object) { + t.Helper() + if err := cl.Create(ctx, obj); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := cl.Delete(ctx, obj); err != nil { + t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) + } + }) +} + +func get(ctx context.Context, cl client.Client, obj client.Object) error { + return cl.Get(ctx, client.ObjectKeyFromObject(obj), obj) +} diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go new file mode 100644 index 000000000..eac983e88 --- /dev/null +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -0,0 +1,156 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "tailscale.com/client/tailscale" + "tailscale.com/tsnet" + "tailscale.com/tstest" +) + +// See [TestMain] for test requirements. +func TestProxy(t *testing.T) { + if tsClient == nil { + t.Skip("TestProxy requires credentials for a tailscale client") + } + + ctx := context.Background() + cfg := config.GetConfigOrDie() + cl, err := client.New(cfg, client.Options{}) + if err != nil { + t.Fatal(err) + } + + // Create role and role binding to allow a group we'll impersonate to do stuff. + createAndCleanup(t, ctx, cl, &rbacv1.Role{ + ObjectMeta: objectMeta("tailscale", "read-secrets"), + Rules: []rbacv1.PolicyRule{{ + APIGroups: []string{""}, + Verbs: []string{"get"}, + Resources: []string{"secrets"}, + }}, + }) + createAndCleanup(t, ctx, cl, &rbacv1.RoleBinding{ + ObjectMeta: objectMeta("tailscale", "read-secrets"), + Subjects: []rbacv1.Subject{{ + Kind: "Group", + Name: "ts:e2e-test-proxy", + }}, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "read-secrets", + }, + }) + + // Get operator host name from kube secret. + operatorSecret := corev1.Secret{ + ObjectMeta: objectMeta("tailscale", "operator"), + } + if err := get(ctx, cl, &operatorSecret); err != nil { + t.Fatal(err) + } + + // Connect to tailnet with test-specific tag so we can use the + // [testGrants] ACLs when connecting to the API server proxy + ts := tsnetServerWithTag(t, ctx, "tag:e2e-test-proxy") + proxyCfg := &rest.Config{ + Host: fmt.Sprintf("https://%s:443", hostNameFromOperatorSecret(t, operatorSecret)), + Dial: ts.Dial, + } + proxyCl, err := client.New(proxyCfg, client.Options{}) + if err != nil { + t.Fatal(err) + } + + // Expect success. + allowedSecret := corev1.Secret{ + ObjectMeta: objectMeta("tailscale", "operator"), + } + // Wait for up to a minute the first time we use the proxy, to give it time + // to provision the TLS certs. + if err := tstest.WaitFor(time.Second*60, func() error { + return get(ctx, proxyCl, &allowedSecret) + }); err != nil { + t.Fatal(err) + } + + // Expect forbidden. + forbiddenSecret := corev1.Secret{ + ObjectMeta: objectMeta("default", "operator"), + } + if err := get(ctx, proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { + t.Fatalf("expected forbidden error fetching secret from default namespace: %s", err) + } +} + +func tsnetServerWithTag(t *testing.T, ctx context.Context, tag string) *tsnet.Server { + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Preauthorized: true, + Ephemeral: true, + Tags: []string{tag}, + }, + }, + } + + authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := tsClient.DeleteKey(ctx, authKeyMeta.ID); err != nil { + t.Errorf("error deleting auth key: %s", err) + } + }) + + ts := &tsnet.Server{ + Hostname: "test-proxy", + Ephemeral: true, + Dir: t.TempDir(), + AuthKey: authKey, + } + _, err = ts.Up(ctx) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ts.Close(); err != nil { + t.Errorf("error shutting down tsnet.Server: %s", err) + } + }) + + return ts +} + +func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { + profiles := map[string]any{} + if err := json.Unmarshal(s.Data["_profiles"], &profiles); err != nil { + t.Fatal(err) + } + key, ok := strings.CutPrefix(string(s.Data["_current-profile"]), "profile-") + if !ok { + t.Fatal(string(s.Data["_current-profile"])) + } + profile, ok := profiles[key] + if !ok { + t.Fatal(profiles) + } + + return ((profile.(map[string]any))["Name"]).(string) +} diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index ace0fb7e3..1ecedfc07 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -167,3 +167,14 @@ func DNSCfgIsReady(cfg *tsapi.DNSConfig) bool { cond := cfg.Status.Conditions[idx] return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == cfg.Generation } + +func SvcIsReady(svc *corev1.Service) bool { + idx := xslices.IndexFunc(svc.Status.Conditions, func(cond metav1.Condition) bool { + return cond.Type == string(tsapi.ProxyReady) + }) + if idx == -1 { + return false + } + cond := svc.Status.Conditions[idx] + return cond.Status == metav1.ConditionTrue +} From 6e552f66a0289f6309477fb024019b62a251da16 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 11 Dec 2024 14:58:44 +0000 Subject: [PATCH 0225/1708] cmd/containerboot: don't attempt to patch a Secret field without permissions (#14365) Signed-off-by: Irbe Krumina --- cmd/containerboot/kube.go | 1 + cmd/containerboot/serve.go | 2 +- cmd/containerboot/settings.go | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 643eef385..4d00687ee 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -24,6 +24,7 @@ import ( type kubeClient struct { kubeclient.Client stateSecret string + canPatch bool // whether the client has permissions to patch Kubernetes Secrets } func newKubeClient(root string, stateSecret string) (*kubeClient, error) { diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index c8b9e098d..14c7f00d7 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -72,7 +72,7 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil { log.Fatalf("serve proxy: error updating serve config: %v", err) } - if kc != nil { + if kc != nil && kc.canPatch { if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) } diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index e80dbee57..5fc6cc3f0 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -217,6 +217,7 @@ func (cfg *settings) setupKube(ctx context.Context, kc *kubeClient) error { return fmt.Errorf("some Kubernetes permissions are missing, please check your RBAC configuration: %v", err) } cfg.KubernetesCanPatch = canPatch + kc.canPatch = canPatch s, err := kc.GetSecret(ctx, cfg.KubeSecret) if err != nil { From 00458600605e8db25655f9abb95cac5bb78b3c55 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 11 Dec 2024 10:55:21 -0800 Subject: [PATCH 0226/1708] types/iox: add function types for Reader and Writer (#14366) Throughout our codebase we have types that only exist only to implement an io.Reader or io.Writer, when it would have been simpler, cleaner, and more readable to use an inlined function literal that closes over the relevant types. This is arguably more readable since it keeps the semantic logic in place rather than have it be isolated elsewhere. Note that a function literal that closes over some variables is semantic equivalent to declaring a struct with fields and having the Read or Write method mutate those fields. Updates #cleanup Signed-off-by: Joe Tsai --- types/iox/io.go | 23 +++++++++++++++++++++++ types/iox/io_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 types/iox/io.go create mode 100644 types/iox/io_test.go diff --git a/types/iox/io.go b/types/iox/io.go new file mode 100644 index 000000000..a5ca1be43 --- /dev/null +++ b/types/iox/io.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package iox provides types to implement [io] functionality. +package iox + +// TODO(https://go.dev/issue/21670): Deprecate or remove this functionality +// once the Go language supports implementing an 1-method interface directly +// using a function value of a matching signature. + +// ReaderFunc implements [io.Reader] using the underlying function value. +type ReaderFunc func([]byte) (int, error) + +func (f ReaderFunc) Read(b []byte) (int, error) { + return f(b) +} + +// WriterFunc implements [io.Writer] using the underlying function value. +type WriterFunc func([]byte) (int, error) + +func (f WriterFunc) Write(b []byte) (int, error) { + return f(b) +} diff --git a/types/iox/io_test.go b/types/iox/io_test.go new file mode 100644 index 000000000..9fba39605 --- /dev/null +++ b/types/iox/io_test.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package iox + +import ( + "bytes" + "io" + "testing" + "testing/iotest" + + "tailscale.com/util/must" +) + +func TestCopy(t *testing.T) { + const testdata = "the quick brown fox jumped over the lazy dog" + src := testdata + bb := new(bytes.Buffer) + if got := must.Get(io.Copy(bb, ReaderFunc(func(b []byte) (n int, err error) { + n = copy(b[:min(len(b), 7)], src) + src = src[n:] + if len(src) == 0 { + err = io.EOF + } + return n, err + }))); int(got) != len(testdata) { + t.Errorf("copy = %d, want %d", got, len(testdata)) + } + var dst []byte + if got := must.Get(io.Copy(WriterFunc(func(b []byte) (n int, err error) { + dst = append(dst, b...) + return len(b), nil + }), iotest.OneByteReader(bb))); int(got) != len(testdata) { + t.Errorf("copy = %d, want %d", got, len(testdata)) + } + if string(dst) != testdata { + t.Errorf("copy = %q, want %q", dst, testdata) + } +} From c9188d7760fb68a60e6791f0adf42f8dc1728251 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 11 Dec 2024 10:55:33 -0800 Subject: [PATCH 0227/1708] types/bools: add IfElse (#14272) The IfElse function is equivalent to the ternary (c ? a : b) operator in many other languages like C. Unfortunately, this function cannot perform short-circuit evaluation like in many other languages, but this is a restriction that's not much different than the pre-existing cmp.Or function. The argument against ternary operators in Go is that nested ternary operators become unreadable (e.g., (c1 ? (c2 ? a : b) : (c2 ? x : y))). But a single layer of ternary expressions can sometimes make code much more readable. Having the bools.IfElse function gives code authors the ability to decide whether use of this is more readable or not. Obviously, code authors will need to be judicious about their use of this helper function. Readability is more of an art than a science. Updates #cleanup Signed-off-by: Joe Tsai --- types/bools/bools.go | 28 +++++++++++++++++++ .../bools/{compare_test.go => bools_test.go} | 9 ++++++ types/bools/compare.go | 17 ----------- 3 files changed, 37 insertions(+), 17 deletions(-) create mode 100644 types/bools/bools.go rename types/bools/{compare_test.go => bools_test.go} (70%) delete mode 100644 types/bools/compare.go diff --git a/types/bools/bools.go b/types/bools/bools.go new file mode 100644 index 000000000..962e39919 --- /dev/null +++ b/types/bools/bools.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package bools contains the [Compare] and [Select] functions. +package bools + +// Compare compares two boolean values as if false is ordered before true. +func Compare[T ~bool](x, y T) int { + switch { + case x == false && y == true: + return -1 + case x == true && y == false: + return +1 + default: + return 0 + } +} + +// IfElse is a ternary operator that returns trueVal if condExpr is true +// otherwise it returns falseVal. +// IfElse(c, a, b) is roughly equivalent to (c ? a : b) in languages like C. +func IfElse[T any](condExpr bool, trueVal T, falseVal T) T { + if condExpr { + return trueVal + } else { + return falseVal + } +} diff --git a/types/bools/compare_test.go b/types/bools/bools_test.go similarity index 70% rename from types/bools/compare_test.go rename to types/bools/bools_test.go index 280294621..1b466db17 100644 --- a/types/bools/compare_test.go +++ b/types/bools/bools_test.go @@ -19,3 +19,12 @@ func TestCompare(t *testing.T) { t.Errorf("Compare(true, true) = %v, want 0", got) } } + +func TestIfElse(t *testing.T) { + if got := IfElse(true, 0, 1); got != 0 { + t.Errorf("IfElse(true, 0, 1) = %v, want 0", got) + } + if got := IfElse(false, 0, 1); got != 1 { + t.Errorf("IfElse(false, 0, 1) = %v, want 1", got) + } +} diff --git a/types/bools/compare.go b/types/bools/compare.go deleted file mode 100644 index ac433b240..000000000 --- a/types/bools/compare.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package bools contains the bools.Compare function. -package bools - -// Compare compares two boolean values as if false is ordered before true. -func Compare[T ~bool](x, y T) int { - switch { - case x == false && y == true: - return -1 - case x == true && y == false: - return +1 - default: - return 0 - } -} From 716cb372563640d8a06deec218a03ea1982c1a15 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Wed, 11 Dec 2024 23:49:59 -0500 Subject: [PATCH 0228/1708] util/dnsname: use vizerror for all errors The errors emitted by util/dnsname are all written at least moderately friendly and none of them emit sensitive information. They should be safe to display to end users. Updates tailscale/corp#9025 Change-Id: Ic58705075bacf42f56378127532c5f28ff6bfc89 Signed-off-by: Adrian Dewhurst --- util/dnsname/dnsname.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index dde0baaed..131bdd14b 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -5,9 +5,9 @@ package dnsname import ( - "errors" - "fmt" "strings" + + "tailscale.com/util/vizerror" ) const ( @@ -36,7 +36,7 @@ func ToFQDN(s string) (FQDN, error) { totalLen += 1 // account for missing dot } if totalLen > maxNameLength { - return "", fmt.Errorf("%q is too long to be a DNS name", s) + return "", vizerror.Errorf("%q is too long to be a DNS name", s) } st := 0 @@ -54,7 +54,7 @@ func ToFQDN(s string) (FQDN, error) { // // See https://github.com/tailscale/tailscale/issues/2024 for more. if len(label) == 0 || len(label) > maxLabelLength { - return "", fmt.Errorf("%q is not a valid DNS label", label) + return "", vizerror.Errorf("%q is not a valid DNS label", label) } st = i + 1 } @@ -97,23 +97,23 @@ func (f FQDN) Contains(other FQDN) bool { // ValidLabel reports whether label is a valid DNS label. func ValidLabel(label string) error { if len(label) == 0 { - return errors.New("empty DNS label") + return vizerror.New("empty DNS label") } if len(label) > maxLabelLength { - return fmt.Errorf("%q is too long, max length is %d bytes", label, maxLabelLength) + return vizerror.Errorf("%q is too long, max length is %d bytes", label, maxLabelLength) } if !isalphanum(label[0]) { - return fmt.Errorf("%q is not a valid DNS label: must start with a letter or number", label) + return vizerror.Errorf("%q is not a valid DNS label: must start with a letter or number", label) } if !isalphanum(label[len(label)-1]) { - return fmt.Errorf("%q is not a valid DNS label: must end with a letter or number", label) + return vizerror.Errorf("%q is not a valid DNS label: must end with a letter or number", label) } if len(label) < 2 { return nil } for i := 1; i < len(label)-1; i++ { if !isdnschar(label[i]) { - return fmt.Errorf("%q is not a valid DNS label: contains invalid character %q", label, label[i]) + return vizerror.Errorf("%q is not a valid DNS label: contains invalid character %q", label, label[i]) } } return nil From 73128e25230fda8c82696ed0ffef991bce68cecc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 12 Dec 2024 09:38:07 -0800 Subject: [PATCH 0229/1708] ssh/tailssh: remove unused public key support When we first made Tailscale SSH, we assumed people would want public key support soon after. Turns out that hasn't been the case; people love the Tailscale identity authentication and check mode. In light of CVE-2024-45337, just remove all our public key code to not distract people, and to make the code smaller. We can always get it back from git if needed. Updates tailscale/corp#25131 Updates golang/go#70779 Co-authored-by: Percy Wegmann Change-Id: I87a6e79c2215158766a81942227a18b247333c22 Signed-off-by: Brad Fitzpatrick --- Makefile | 1 - ssh/tailssh/tailssh.go | 277 ++++-------------------------------- ssh/tailssh/tailssh_test.go | 88 +----------- tailcfg/tailcfg.go | 18 ++- tailcfg/tailcfg_clone.go | 12 +- tailcfg/tailcfg_view.go | 22 +-- 6 files changed, 54 insertions(+), 364 deletions(-) diff --git a/Makefile b/Makefile index 960f13885..d3e50af05 100644 --- a/Makefile +++ b/Makefile @@ -116,7 +116,6 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container GOOS=linux GOARCH=amd64 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \ - echo "Testing on ubuntu:mantic" && docker build --build-arg="BASE=ubuntu:mantic" -t ssh-ubuntu-mantic ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 7cb99c381..7f21ccd11 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -10,7 +10,6 @@ import ( "bytes" "context" "crypto/rand" - "encoding/base64" "encoding/json" "errors" "fmt" @@ -45,7 +44,6 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" - "tailscale.com/util/slicesx" ) var ( @@ -80,16 +78,14 @@ type server struct { logf logger.Logf tailscaledPath string - pubKeyHTTPClient *http.Client // or nil for http.DefaultClient - timeNow func() time.Time // or nil for time.Now + timeNow func() time.Time // or nil for time.Now sessionWaitGroup sync.WaitGroup // mu protects the following - mu sync.Mutex - activeConns map[*conn]bool // set; value is always true - fetchPublicKeysCache map[string]pubKeyCacheEntry // by https URL - shutdownCalled bool + mu sync.Mutex + activeConns map[*conn]bool // set; value is always true + shutdownCalled bool } func (srv *server) now() time.Time { @@ -204,7 +200,6 @@ func (srv *server) OnPolicyChange() { // // Do the user auth // - NoClientAuthHandler -// - PublicKeyHandler (only if NoClientAuthHandler returns errPubKeyRequired) // // Once auth is done, the conn can be multiplexed with multiple sessions and // channels concurrently. At which point any of the following can be called @@ -234,10 +229,9 @@ type conn struct { finalAction *tailcfg.SSHAction // set by doPolicyAuth or resolveNextAction finalActionErr error // set by doPolicyAuth or resolveNextAction - info *sshConnInfo // set by setInfo - localUser *userMeta // set by doPolicyAuth - userGroupIDs []string // set by doPolicyAuth - pubKey gossh.PublicKey // set by doPolicyAuth + info *sshConnInfo // set by setInfo + localUser *userMeta // set by doPolicyAuth + userGroupIDs []string // set by doPolicyAuth acceptEnv []string // mu protects the following fields. @@ -268,9 +262,6 @@ func (c *conn) isAuthorized(ctx ssh.Context) error { action := c.currentAction for { if action.Accept { - if c.pubKey != nil { - metricPublicKeyAccepts.Add(1) - } return nil } if action.Reject || action.HoldAndDelegate == "" { @@ -293,10 +284,6 @@ func (c *conn) isAuthorized(ctx ssh.Context) error { // policy. var errDenied = errors.New("ssh: access denied") -// errPubKeyRequired is returned by NoClientAuthCallback to make the client -// resort to public-key auth; not user visible. -var errPubKeyRequired = errors.New("ssh publickey required") - // NoClientAuthCallback implements gossh.NoClientAuthCallback and is called by // the ssh.Server when the client first connects with the "none" // authentication method. @@ -305,13 +292,12 @@ var errPubKeyRequired = errors.New("ssh publickey required") // starting it afresh). It returns an error if the policy evaluation fails, or // if the decision is "reject" // -// It either returns nil (accept) or errPubKeyRequired or errDenied -// (reject). The errors may be wrapped. +// It either returns nil (accept) or errDenied (reject). The errors may be wrapped. func (c *conn) NoClientAuthCallback(ctx ssh.Context) error { if c.insecureSkipTailscaleAuth { return nil } - if err := c.doPolicyAuth(ctx, nil /* no pub key */); err != nil { + if err := c.doPolicyAuth(ctx); err != nil { return err } if err := c.isAuthorized(ctx); err != nil { @@ -332,8 +318,6 @@ func (c *conn) nextAuthMethodCallback(cm gossh.ConnMetadata, prevErrors []error) switch { case c.anyPasswordIsOkay: nextMethod = append(nextMethod, "password") - case slicesx.LastEqual(prevErrors, errPubKeyRequired): - nextMethod = append(nextMethod, "publickey") } // The fake "tailscale" method is always appended to next so OpenSSH renders @@ -353,41 +337,20 @@ func (c *conn) fakePasswordHandler(ctx ssh.Context, password string) bool { return c.anyPasswordIsOkay } -// PublicKeyHandler implements ssh.PublicKeyHandler is called by the -// ssh.Server when the client presents a public key. -func (c *conn) PublicKeyHandler(ctx ssh.Context, pubKey ssh.PublicKey) error { - if err := c.doPolicyAuth(ctx, pubKey); err != nil { - // TODO(maisem/bradfitz): surface the error here. - c.logf("rejecting SSH public key %s: %v", bytes.TrimSpace(gossh.MarshalAuthorizedKey(pubKey)), err) - return err - } - if err := c.isAuthorized(ctx); err != nil { - return err - } - c.logf("accepting SSH public key %s", bytes.TrimSpace(gossh.MarshalAuthorizedKey(pubKey))) - return nil -} - -// doPolicyAuth verifies that conn can proceed with the specified (optional) -// pubKey. It returns nil if the matching policy action is Accept or -// HoldAndDelegate. If pubKey is nil, there was no policy match but there is a -// policy that might match a public key it returns errPubKeyRequired. Otherwise, -// it returns errDenied. -func (c *conn) doPolicyAuth(ctx ssh.Context, pubKey ssh.PublicKey) error { +// doPolicyAuth verifies that conn can proceed. +// It returns nil if the matching policy action is Accept or +// HoldAndDelegate. Otherwise, it returns errDenied. +func (c *conn) doPolicyAuth(ctx ssh.Context) error { if err := c.setInfo(ctx); err != nil { c.logf("failed to get conninfo: %v", err) return errDenied } - a, localUser, acceptEnv, err := c.evaluatePolicy(pubKey) + a, localUser, acceptEnv, err := c.evaluatePolicy() if err != nil { - if pubKey == nil && c.havePubKeyPolicy() { - return errPubKeyRequired - } return fmt.Errorf("%w: %v", errDenied, err) } c.action0 = a c.currentAction = a - c.pubKey = pubKey c.acceptEnv = acceptEnv if a.Message != "" { if err := ctx.SendAuthBanner(a.Message); err != nil { @@ -448,7 +411,6 @@ func (srv *server) newConn() (*conn, error) { ServerConfigCallback: c.ServerConfig, NoClientAuthHandler: c.NoClientAuthCallback, - PublicKeyHandler: c.PublicKeyHandler, PasswordHandler: c.fakePasswordHandler, Handler: c.handleSessionPostSSHAuth, @@ -516,34 +478,6 @@ func (c *conn) mayForwardLocalPortTo(ctx ssh.Context, destinationHost string, de return false } -// havePubKeyPolicy reports whether any policy rule may provide access by means -// of a ssh.PublicKey. -func (c *conn) havePubKeyPolicy() bool { - if c.info == nil { - panic("havePubKeyPolicy called before setInfo") - } - // Is there any rule that looks like it'd require a public key for this - // sshUser? - pol, ok := c.sshPolicy() - if !ok { - return false - } - for _, r := range pol.Rules { - if c.ruleExpired(r) { - continue - } - if mapLocalUser(r.SSHUsers, c.info.sshUser) == "" { - continue - } - for _, p := range r.Principals { - if len(p.PubKeys) > 0 && c.principalMatchesTailscaleIdentity(p) { - return true - } - } - } - return false -} - // sshPolicy returns the SSHPolicy for current node. // If there is no SSHPolicy in the netmap, it returns a debugPolicy // if one is defined. @@ -620,117 +554,19 @@ func (c *conn) setInfo(ctx ssh.Context) error { } // evaluatePolicy returns the SSHAction and localUser after evaluating -// the SSHPolicy for this conn. The pubKey may be nil for "none" auth. -func (c *conn) evaluatePolicy(pubKey gossh.PublicKey) (_ *tailcfg.SSHAction, localUser string, acceptEnv []string, _ error) { +// the SSHPolicy for this conn. +func (c *conn) evaluatePolicy() (_ *tailcfg.SSHAction, localUser string, acceptEnv []string, _ error) { pol, ok := c.sshPolicy() if !ok { return nil, "", nil, fmt.Errorf("tailssh: rejecting connection; no SSH policy") } - a, localUser, acceptEnv, ok := c.evalSSHPolicy(pol, pubKey) + a, localUser, acceptEnv, ok := c.evalSSHPolicy(pol) if !ok { return nil, "", nil, fmt.Errorf("tailssh: rejecting connection; no matching policy") } return a, localUser, acceptEnv, nil } -// pubKeyCacheEntry is the cache value for an HTTPS URL of public keys (like -// "https://github.com/foo.keys") -type pubKeyCacheEntry struct { - lines []string - etag string // if sent by server - at time.Time -} - -const ( - pubKeyCacheDuration = time.Minute // how long to cache non-empty public keys - pubKeyCacheEmptyDuration = 15 * time.Second // how long to cache empty responses -) - -func (srv *server) fetchPublicKeysURLCached(url string) (ce pubKeyCacheEntry, ok bool) { - srv.mu.Lock() - defer srv.mu.Unlock() - // Mostly don't care about the size of this cache. Clean rarely. - if m := srv.fetchPublicKeysCache; len(m) > 50 { - tooOld := srv.now().Add(pubKeyCacheDuration * 10) - for k, ce := range m { - if ce.at.Before(tooOld) { - delete(m, k) - } - } - } - ce, ok = srv.fetchPublicKeysCache[url] - if !ok { - return ce, false - } - maxAge := pubKeyCacheDuration - if len(ce.lines) == 0 { - maxAge = pubKeyCacheEmptyDuration - } - return ce, srv.now().Sub(ce.at) < maxAge -} - -func (srv *server) pubKeyClient() *http.Client { - if srv.pubKeyHTTPClient != nil { - return srv.pubKeyHTTPClient - } - return http.DefaultClient -} - -// fetchPublicKeysURL fetches the public keys from a URL. The strings are in the -// the typical public key "type base64-string [comment]" format seen at e.g. -// https://github.com/USER.keys -func (srv *server) fetchPublicKeysURL(url string) ([]string, error) { - if !strings.HasPrefix(url, "https://") { - return nil, errors.New("invalid URL scheme") - } - - ce, ok := srv.fetchPublicKeysURLCached(url) - if ok { - return ce.lines, nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - req, err := http.NewRequestWithContext(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - if ce.etag != "" { - req.Header.Add("If-None-Match", ce.etag) - } - res, err := srv.pubKeyClient().Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - var lines []string - var etag string - switch res.StatusCode { - default: - err = fmt.Errorf("unexpected status %v", res.Status) - srv.logf("fetching public keys from %s: %v", url, err) - case http.StatusNotModified: - lines = ce.lines - etag = ce.etag - case http.StatusOK: - var all []byte - all, err = io.ReadAll(io.LimitReader(res.Body, 4<<10)) - if s := strings.TrimSpace(string(all)); s != "" { - lines = strings.Split(s, "\n") - } - etag = res.Header.Get("Etag") - } - - srv.mu.Lock() - defer srv.mu.Unlock() - mak.Set(&srv.fetchPublicKeysCache, url, pubKeyCacheEntry{ - at: srv.now(), - lines: lines, - etag: etag, - }) - return lines, err -} - // handleSessionPostSSHAuth runs an SSH session after the SSH-level authentication, // but not necessarily before all the Tailscale-level extra verification has // completed. It also handles SFTP requests. @@ -832,18 +668,6 @@ func (c *conn) expandDelegateURLLocked(actionURL string) string { ).Replace(actionURL) } -func (c *conn) expandPublicKeyURL(pubKeyURL string) string { - if !strings.Contains(pubKeyURL, "$") { - return pubKeyURL - } - loginName := c.info.uprof.LoginName - localPart, _, _ := strings.Cut(loginName, "@") - return strings.NewReplacer( - "$LOGINNAME_EMAIL", loginName, - "$LOGINNAME_LOCALPART", localPart, - ).Replace(pubKeyURL) -} - // sshSession is an accepted Tailscale SSH session. type sshSession struct { ssh.Session @@ -894,7 +718,7 @@ func (c *conn) newSSHSession(s ssh.Session) *sshSession { // isStillValid reports whether the conn is still valid. func (c *conn) isStillValid() bool { - a, localUser, _, err := c.evaluatePolicy(c.pubKey) + a, localUser, _, err := c.evaluatePolicy() c.vlogf("stillValid: %+v %v %v", a, localUser, err) if err != nil { return false @@ -1277,9 +1101,9 @@ func (c *conn) ruleExpired(r *tailcfg.SSHRule) bool { return r.RuleExpires.Before(c.srv.now()) } -func (c *conn) evalSSHPolicy(pol *tailcfg.SSHPolicy, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, ok bool) { +func (c *conn) evalSSHPolicy(pol *tailcfg.SSHPolicy) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, ok bool) { for _, r := range pol.Rules { - if a, localUser, acceptEnv, err := c.matchRule(r, pubKey); err == nil { + if a, localUser, acceptEnv, err := c.matchRule(r); err == nil { return a, localUser, acceptEnv, true } } @@ -1296,7 +1120,7 @@ var ( errInvalidConn = errors.New("invalid connection state") ) -func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, err error) { +func (c *conn) matchRule(r *tailcfg.SSHRule) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, err error) { defer func() { c.vlogf("matchRule(%+v): %v", r, err) }() @@ -1326,9 +1150,7 @@ func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg return nil, "", nil, errUserMatch } } - if ok, err := c.anyPrincipalMatches(r.Principals, pubKey); err != nil { - return nil, "", nil, err - } else if !ok { + if !c.anyPrincipalMatches(r.Principals) { return nil, "", nil, errPrincipalMatch } return r.Action, localUser, r.AcceptEnv, nil @@ -1345,30 +1167,20 @@ func mapLocalUser(ruleSSHUsers map[string]string, reqSSHUser string) (localUser return v } -func (c *conn) anyPrincipalMatches(ps []*tailcfg.SSHPrincipal, pubKey gossh.PublicKey) (bool, error) { +func (c *conn) anyPrincipalMatches(ps []*tailcfg.SSHPrincipal) bool { for _, p := range ps { if p == nil { continue } - if ok, err := c.principalMatches(p, pubKey); err != nil { - return false, err - } else if ok { - return true, nil + if c.principalMatchesTailscaleIdentity(p) { + return true } } - return false, nil -} - -func (c *conn) principalMatches(p *tailcfg.SSHPrincipal, pubKey gossh.PublicKey) (bool, error) { - if !c.principalMatchesTailscaleIdentity(p) { - return false, nil - } - return c.principalMatchesPubKey(p, pubKey) + return false } // principalMatchesTailscaleIdentity reports whether one of p's four fields // that match the Tailscale identity match (Node, NodeIP, UserLogin, Any). -// This function does not consider PubKeys. func (c *conn) principalMatchesTailscaleIdentity(p *tailcfg.SSHPrincipal) bool { ci := c.info if p.Any { @@ -1388,42 +1200,6 @@ func (c *conn) principalMatchesTailscaleIdentity(p *tailcfg.SSHPrincipal) bool { return false } -func (c *conn) principalMatchesPubKey(p *tailcfg.SSHPrincipal, clientPubKey gossh.PublicKey) (bool, error) { - if len(p.PubKeys) == 0 { - return true, nil - } - if clientPubKey == nil { - return false, nil - } - knownKeys := p.PubKeys - if len(knownKeys) == 1 && strings.HasPrefix(knownKeys[0], "https://") { - var err error - knownKeys, err = c.srv.fetchPublicKeysURL(c.expandPublicKeyURL(knownKeys[0])) - if err != nil { - return false, err - } - } - for _, knownKey := range knownKeys { - if pubKeyMatchesAuthorizedKey(clientPubKey, knownKey) { - return true, nil - } - } - return false, nil -} - -func pubKeyMatchesAuthorizedKey(pubKey ssh.PublicKey, wantKey string) bool { - wantKeyType, rest, ok := strings.Cut(wantKey, " ") - if !ok { - return false - } - if pubKey.Type() != wantKeyType { - return false - } - wantKeyB64, _, _ := strings.Cut(rest, " ") - wantKeyData, _ := base64.StdEncoding.DecodeString(wantKeyB64) - return len(wantKeyData) > 0 && bytes.Equal(pubKey.Marshal(), wantKeyData) -} - func randBytes(n int) []byte { b := make([]byte, n) if _, err := rand.Read(b); err != nil { @@ -1749,7 +1525,6 @@ func envEq(a, b string) bool { var ( metricActiveSessions = clientmetric.NewGauge("ssh_active_sessions") metricIncomingConnections = clientmetric.NewCounter("ssh_incoming_connections") - metricPublicKeyAccepts = clientmetric.NewCounter("ssh_publickey_accepts") // accepted subset of ssh_publickey_connections metricTerminalAccept = clientmetric.NewCounter("ssh_terminalaction_accept") metricTerminalReject = clientmetric.NewCounter("ssh_terminalaction_reject") metricTerminalMalformed = clientmetric.NewCounter("ssh_terminalaction_malformed") diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index ad9cb1e57..9f3616d8c 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -10,7 +10,6 @@ import ( "context" "crypto/ed25519" "crypto/rand" - "crypto/sha256" "encoding/json" "errors" "fmt" @@ -229,7 +228,7 @@ func TestMatchRule(t *testing.T) { info: tt.ci, srv: &server{logf: t.Logf}, } - got, gotUser, gotAcceptEnv, err := c.matchRule(tt.rule, nil) + got, gotUser, gotAcceptEnv, err := c.matchRule(tt.rule) if err != tt.wantErr { t.Errorf("err = %v; want %v", err, tt.wantErr) } @@ -348,7 +347,7 @@ func TestEvalSSHPolicy(t *testing.T) { info: tt.ci, srv: &server{logf: t.Logf}, } - got, gotUser, gotAcceptEnv, match := c.evalSSHPolicy(tt.policy, nil) + got, gotUser, gotAcceptEnv, match := c.evalSSHPolicy(tt.policy) if match != tt.wantMatch { t.Errorf("match = %v; want %v", match, tt.wantMatch) } @@ -1129,89 +1128,6 @@ func parseEnv(out []byte) map[string]string { return e } -func TestPublicKeyFetching(t *testing.T) { - var reqsTotal, reqsIfNoneMatchHit, reqsIfNoneMatchMiss int32 - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32((&reqsTotal), 1) - etag := fmt.Sprintf("W/%q", sha256.Sum256([]byte(r.URL.Path))) - w.Header().Set("Etag", etag) - if v := r.Header.Get("If-None-Match"); v != "" { - if v == etag { - atomic.AddInt32(&reqsIfNoneMatchHit, 1) - w.WriteHeader(304) - return - } - atomic.AddInt32(&reqsIfNoneMatchMiss, 1) - } - io.WriteString(w, "foo\nbar\n"+string(r.URL.Path)+"\n") - })) - ts.StartTLS() - defer ts.Close() - keys := ts.URL - - clock := &tstest.Clock{} - srv := &server{ - pubKeyHTTPClient: ts.Client(), - timeNow: clock.Now, - } - for range 2 { - got, err := srv.fetchPublicKeysURL(keys + "/alice.keys") - if err != nil { - t.Fatal(err) - } - if want := []string{"foo", "bar", "/alice.keys"}; !reflect.DeepEqual(got, want) { - t.Errorf("got %q; want %q", got, want) - } - } - if got, want := atomic.LoadInt32(&reqsTotal), int32(1); got != want { - t.Errorf("got %d requests; want %d", got, want) - } - if got, want := atomic.LoadInt32(&reqsIfNoneMatchHit), int32(0); got != want { - t.Errorf("got %d etag hits; want %d", got, want) - } - clock.Advance(5 * time.Minute) - got, err := srv.fetchPublicKeysURL(keys + "/alice.keys") - if err != nil { - t.Fatal(err) - } - if want := []string{"foo", "bar", "/alice.keys"}; !reflect.DeepEqual(got, want) { - t.Errorf("got %q; want %q", got, want) - } - if got, want := atomic.LoadInt32(&reqsTotal), int32(2); got != want { - t.Errorf("got %d requests; want %d", got, want) - } - if got, want := atomic.LoadInt32(&reqsIfNoneMatchHit), int32(1); got != want { - t.Errorf("got %d etag hits; want %d", got, want) - } - if got, want := atomic.LoadInt32(&reqsIfNoneMatchMiss), int32(0); got != want { - t.Errorf("got %d etag misses; want %d", got, want) - } - -} - -func TestExpandPublicKeyURL(t *testing.T) { - c := &conn{ - info: &sshConnInfo{ - uprof: tailcfg.UserProfile{ - LoginName: "bar@baz.tld", - }, - }, - } - if got, want := c.expandPublicKeyURL("foo"), "foo"; got != want { - t.Errorf("basic: got %q; want %q", got, want) - } - if got, want := c.expandPublicKeyURL("https://example.com/$LOGINNAME_LOCALPART.keys"), "https://example.com/bar.keys"; got != want { - t.Errorf("localpart: got %q; want %q", got, want) - } - if got, want := c.expandPublicKeyURL("https://example.com/keys?email=$LOGINNAME_EMAIL"), "https://example.com/keys?email=bar@baz.tld"; got != want { - t.Errorf("email: got %q; want %q", got, want) - } - c.info = new(sshConnInfo) - if got, want := c.expandPublicKeyURL("https://example.com/keys?email=$LOGINNAME_EMAIL"), "https://example.com/keys?email="; got != want { - t.Errorf("on empty: got %q; want %q", got, want) - } -} - func TestAcceptEnvPair(t *testing.T) { tests := []struct { in string diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 897e8d27f..be6c4f0be 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -152,7 +152,8 @@ type CapabilityVersion int // - 107: 2024-10-30: add App Connector to conffile (PR #13942) // - 108: 2024-11-08: Client sends ServicesHash in Hostinfo, understands c2n GET /vip-services. // - 109: 2024-11-18: Client supports filtertype.Match.SrcCaps (issue #12542) -const CurrentCapabilityVersion CapabilityVersion = 109 +// - 110: 2024-12-12: removed never-before-used Tailscale SSH public key support (#14373) +const CurrentCapabilityVersion CapabilityVersion = 110 type StableID string @@ -2525,16 +2526,13 @@ type SSHPrincipal struct { Any bool `json:"any,omitempty"` // if true, match any connection // TODO(bradfitz): add StableUserID, once that exists - // PubKeys, if non-empty, means that this SSHPrincipal only - // matches if one of these public keys is presented by the user. + // UnusedPubKeys was public key support. It never became an official product + // feature and so as of 2024-12-12 is being removed. + // This stub exists to remind us not to re-use the JSON field name "pubKeys" + // in the future if we bring it back with different semantics. // - // As a special case, if len(PubKeys) == 1 and PubKeys[0] starts - // with "https://", then it's fetched (like https://github.com/username.keys). - // In that case, the following variable expansions are also supported - // in the URL: - // * $LOGINNAME_EMAIL ("foo@bar.com" or "foo@github") - // * $LOGINNAME_LOCALPART (the "foo" from either of the above) - PubKeys []string `json:"pubKeys,omitempty"` + // Deprecated: do not use. It does nothing. + UnusedPubKeys []string `json:"pubKeys,omitempty"` } // SSHAction is how to handle an incoming connection. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index f4f02c017..bf9bac298 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -556,17 +556,17 @@ func (src *SSHPrincipal) Clone() *SSHPrincipal { } dst := new(SSHPrincipal) *dst = *src - dst.PubKeys = append(src.PubKeys[:0:0], src.PubKeys...) + dst.UnusedPubKeys = append(src.UnusedPubKeys[:0:0], src.UnusedPubKeys...) return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _SSHPrincipalCloneNeedsRegeneration = SSHPrincipal(struct { - Node StableNodeID - NodeIP string - UserLogin string - Any bool - PubKeys []string + Node StableNodeID + NodeIP string + UserLogin string + Any bool + UnusedPubKeys []string }{}) // Clone makes a deep copy of ControlDialPlan. diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index f275a6a9d..6c21e5f45 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -1260,19 +1260,21 @@ func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { return nil } -func (v SSHPrincipalView) Node() StableNodeID { return v.ж.Node } -func (v SSHPrincipalView) NodeIP() string { return v.ж.NodeIP } -func (v SSHPrincipalView) UserLogin() string { return v.ж.UserLogin } -func (v SSHPrincipalView) Any() bool { return v.ж.Any } -func (v SSHPrincipalView) PubKeys() views.Slice[string] { return views.SliceOf(v.ж.PubKeys) } +func (v SSHPrincipalView) Node() StableNodeID { return v.ж.Node } +func (v SSHPrincipalView) NodeIP() string { return v.ж.NodeIP } +func (v SSHPrincipalView) UserLogin() string { return v.ж.UserLogin } +func (v SSHPrincipalView) Any() bool { return v.ж.Any } +func (v SSHPrincipalView) UnusedPubKeys() views.Slice[string] { + return views.SliceOf(v.ж.UnusedPubKeys) +} // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _SSHPrincipalViewNeedsRegeneration = SSHPrincipal(struct { - Node StableNodeID - NodeIP string - UserLogin string - Any bool - PubKeys []string + Node StableNodeID + NodeIP string + UserLogin string + Any bool + UnusedPubKeys []string }{}) // View returns a readonly view of ControlDialPlan. From aa04f61d5ef74fcb11373490876740a0bd9b2bac Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 5 Dec 2024 12:42:45 -0800 Subject: [PATCH 0230/1708] net/netcheck: adjust HTTPS latency check to connection time and avoid data race The go-httpstat package has a data race when used with connections that are performing happy-eyeballs connection setups as we are in the DERP client. There is a long-stale PR upstream to address this, however revisiting the purpose of this code suggests we don't really need httpstat here. The code populates a latency table that may be used to compare to STUN latency, which is a lightweight RTT check. Switching out the reported timing here to simply the request HTTP request RTT avoids the problematic package. Fixes tailscale/corp#25095 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscale/depaware.txt | 3 +-- cmd/tailscaled/depaware.txt | 3 +-- net/netcheck/netcheck.go | 24 +++++++++++++++++------- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d1d687432..0e42fe2b6 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -225,7 +225,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index d18d88873..a8496c411 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -58,7 +58,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/web-client-prebuilt from tailscale.com/client/web - github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 @@ -306,7 +305,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net from crypto/tls+ net/http from expvar+ net/http/cgi from tailscale.com/cmd/tailscale/cli - net/http/httptrace from github.com/tcnksm/go-httpstat+ + net/http/httptrace from golang.org/x/net/http2+ net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/netip from go4.org/netipx+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 81cd53271..264f8296f 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -181,7 +181,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/tailscale/xnet/webdav from tailscale.com/drive/driveimpl+ github.com/tailscale/xnet/webdav/internal/xml from github.com/tailscale/xnet/webdav - github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck LD github.com/u-root/u-root/pkg/termios from tailscale.com/ssh/tailssh L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ @@ -553,7 +552,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net from crypto/tls+ net/http from expvar+ net/http/httptest from tailscale.com/control/controlclient - net/http/httptrace from github.com/tcnksm/go-httpstat+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/pprof from tailscale.com/cmd/tailscaled+ diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 7930f88f6..c32eeee8b 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -23,7 +23,6 @@ import ( "syscall" "time" - "github.com/tcnksm/go-httpstat" "tailscale.com/derp/derphttp" "tailscale.com/envknob" "tailscale.com/net/captivedetection" @@ -1110,10 +1109,11 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report return nil } +// measureHTTPSLatency measures HTTP request latency to the DERP region, but +// only returns success if an HTTPS request to the region succeeds. func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegion) (time.Duration, netip.Addr, error) { metricHTTPSend.Add(1) - var result httpstat.Result - ctx, cancel := context.WithTimeout(httpstat.WithHTTPStat(ctx, &result), httpsProbeTimeout) + ctx, cancel := context.WithTimeout(ctx, httpsProbeTimeout) defer cancel() var ip netip.Addr @@ -1121,6 +1121,8 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio dc := derphttp.NewNetcheckClient(c.logf, c.NetMon) defer dc.Close() + // DialRegionTLS may dial multiple times if a node is not available, as such + // it does not have stable timing to measure. tlsConn, tcpConn, node, err := dc.DialRegionTLS(ctx, reg) if err != nil { return 0, ip, err @@ -1138,6 +1140,8 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio connc := make(chan *tls.Conn, 1) connc <- tlsConn + // make an HTTP request to measure, as this enables us to account for MITM + // overhead in e.g. corp environments that have HTTP MITM in front of DERP. tr := &http.Transport{ DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, errors.New("unexpected DialContext dial") @@ -1153,12 +1157,17 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio } hc := &http.Client{Transport: tr} + // This is the request that will be measured, the request and response + // should be small enough to fit into a single packet each way unless the + // connection has already become unstable. req, err := http.NewRequestWithContext(ctx, "GET", "https://"+node.HostName+"/derp/latency-check", nil) if err != nil { return 0, ip, err } + startTime := c.timeNow() resp, err := hc.Do(req) + reqDur := c.timeNow().Sub(startTime) if err != nil { return 0, ip, err } @@ -1175,11 +1184,12 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio if err != nil { return 0, ip, err } - result.End(c.timeNow()) - // TODO: decide best timing heuristic here. - // Maybe the server should return the tcpinfo_rtt? - return result.ServerProcessing, ip, nil + // return the connection duration, not the request duration, as this is the + // best approximation of the RTT latency to the node. Note that the + // connection setup performs happy-eyeballs and TLS so there are additional + // overheads. + return reqDur, ip, nil } func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, need []*tailcfg.DERPRegion) error { From 1ed9bd76d682299376f404521cf1958a7f9bea7a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 10 Dec 2024 11:52:51 -0600 Subject: [PATCH 0231/1708] prober: perform DERP bandwidth probes over TUN device to mimic real client Updates tailscale/corp#24635 Co-authored-by: Mario Minardi Signed-off-by: Percy Wegmann --- cmd/derpprobe/derpprobe.go | 27 ++-- prober/derp.go | 323 +++++++++++++++++++++++++++++++++++-- prober/tun_darwin.go | 35 ++++ prober/tun_default.go | 18 +++ prober/tun_linux.go | 36 +++++ 5 files changed, 411 insertions(+), 28 deletions(-) create mode 100644 prober/tun_darwin.go create mode 100644 prober/tun_default.go create mode 100644 prober/tun_linux.go diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 8f04326b0..620b96609 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -18,18 +18,19 @@ import ( ) var ( - derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://) or 'local' to use the local tailscaled's DERP map") - versionFlag = flag.Bool("version", false, "print version and exit") - listen = flag.String("listen", ":8030", "HTTP listen address") - probeOnce = flag.Bool("once", false, "probe once and print results, then exit; ignores the listen flag") - spread = flag.Bool("spread", true, "whether to spread probing over time") - interval = flag.Duration("interval", 15*time.Second, "probe interval") - meshInterval = flag.Duration("mesh-interval", 15*time.Second, "mesh probe interval") - stunInterval = flag.Duration("stun-interval", 15*time.Second, "STUN probe interval") - tlsInterval = flag.Duration("tls-interval", 15*time.Second, "TLS probe interval") - bwInterval = flag.Duration("bw-interval", 0, "bandwidth probe interval (0 = no bandwidth probing)") - bwSize = flag.Int64("bw-probe-size-bytes", 1_000_000, "bandwidth probe size") - regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed") + derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://) or 'local' to use the local tailscaled's DERP map") + versionFlag = flag.Bool("version", false, "print version and exit") + listen = flag.String("listen", ":8030", "HTTP listen address") + probeOnce = flag.Bool("once", false, "probe once and print results, then exit; ignores the listen flag") + spread = flag.Bool("spread", true, "whether to spread probing over time") + interval = flag.Duration("interval", 15*time.Second, "probe interval") + meshInterval = flag.Duration("mesh-interval", 15*time.Second, "mesh probe interval") + stunInterval = flag.Duration("stun-interval", 15*time.Second, "STUN probe interval") + tlsInterval = flag.Duration("tls-interval", 15*time.Second, "TLS probe interval") + bwInterval = flag.Duration("bw-interval", 0, "bandwidth probe interval (0 = no bandwidth probing)") + bwSize = flag.Int64("bw-probe-size-bytes", 1_000_000, "bandwidth probe size") + bwTUNIPv4Address = flag.String("bw-tun-ipv4-addr", "", "if specified, bandwidth probes will be performed over a TUN device at this address in order to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP. We will use a /30 subnet including this IP address.") + regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed") ) func main() { @@ -46,7 +47,7 @@ func main() { prober.WithTLSProbing(*tlsInterval), } if *bwInterval > 0 { - opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize)) + opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize, *bwTUNIPv4Address)) } if *regionCode != "" { opts = append(opts, prober.WithRegion(*regionCode)) diff --git a/prober/derp.go b/prober/derp.go index bce40e34c..8e8e6ac3d 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -12,20 +12,27 @@ import ( "errors" "expvar" "fmt" + "io" "log" "net" "net/http" + "net/netip" "strconv" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" + wgconn "github.com/tailscale/wireguard-go/conn" + "github.com/tailscale/wireguard-go/device" + "github.com/tailscale/wireguard-go/tun" + "go4.org/netipx" "tailscale.com/client/tailscale" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/net/netmon" "tailscale.com/net/stun" + "tailscale.com/net/tstun" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -42,8 +49,9 @@ type derpProber struct { tlsInterval time.Duration // Optional bandwidth probing. - bwInterval time.Duration - bwProbeSize int64 + bwInterval time.Duration + bwProbeSize int64 + bwTUNIPv4Prefix *netip.Prefix // Optionally restrict probes to a single regionCode. regionCode string @@ -68,11 +76,18 @@ type DERPOpt func(*derpProber) // WithBandwidthProbing enables bandwidth probing. When enabled, a payload of // `size` bytes will be regularly transferred through each DERP server, and each -// pair of DERP servers in every region. -func WithBandwidthProbing(interval time.Duration, size int64) DERPOpt { +// pair of DERP servers in every region. If tunAddress is specified, probes will +// use a TCP connection over a TUN device at this address in order to exercise +// TCP-in-TCP in similar fashion to TCP over Tailscale via DERP +func WithBandwidthProbing(interval time.Duration, size int64, tunAddress string) DERPOpt { return func(d *derpProber) { d.bwInterval = interval d.bwProbeSize = size + prefix, err := netip.ParsePrefix(fmt.Sprintf("%s/30", tunAddress)) + if err != nil { + log.Fatalf("failed to parse IP prefix from bw-tun-ipv4-addr: %v", err) + } + d.bwTUNIPv4Prefix = &prefix } } @@ -200,7 +215,11 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { n := fmt.Sprintf("derp/%s/%s/%s/bw", region.RegionCode, server.Name, to.Name) wantProbes[n] = true if d.probes[n] == nil { - log.Printf("adding DERP bandwidth probe for %s->%s (%s) %v bytes every %v", server.Name, to.Name, region.RegionName, d.bwProbeSize, d.bwInterval) + tunString := "" + if d.bwTUNIPv4Prefix != nil { + tunString = " (TUN)" + } + log.Printf("adding%s DERP bandwidth probe for %s->%s (%s) %v bytes every %v", tunString, server.Name, to.Name, region.RegionName, d.bwProbeSize, d.bwInterval) d.probes[n] = d.p.Run(n, d.bwInterval, labels, d.bwProbeFn(server.Name, to.Name, d.bwProbeSize)) } } @@ -251,21 +270,24 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { if from == to { derpPath = "single" } - var transferTime expvar.Float + var transferTimeSeconds expvar.Float return ProbeClass{ Probe: func(ctx context.Context) error { fromN, toN, err := d.getNodePair(from, to) if err != nil { return err } - return derpProbeBandwidth(ctx, d.lastDERPMap, fromN, toN, size, &transferTime) + return derpProbeBandwidth(ctx, d.lastDERPMap, fromN, toN, size, &transferTimeSeconds, d.bwTUNIPv4Prefix) + }, + Class: "derp_bw", + Labels: Labels{ + "derp_path": derpPath, + "tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil), }, - Class: "derp_bw", - Labels: Labels{"derp_path": derpPath}, Metrics: func(l prometheus.Labels) []prometheus.Metric { return []prometheus.Metric{ prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTime.Value()), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), } }, } @@ -412,8 +434,10 @@ func derpProbeUDP(ctx context.Context, ipStr string, port int) error { } // derpProbeBandwidth sends a payload of a given size between two local -// DERP clients connected to two DERP servers. -func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, size int64, transferTime *expvar.Float) (err error) { +// DERP clients connected to two DERP servers.If tunIPv4Address is specified, +// probes will use a TCP connection over a TUN device at this address in order +// to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP. +func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, size int64, transferTimeSeconds *expvar.Float, tunIPv4Prefix *netip.Prefix) (err error) { // This probe uses clients with isProber=false to avoid spamming the derper logs with every packet // sent by the bandwidth probe. fromc, err := newConn(ctx, dm, from, false) @@ -434,10 +458,13 @@ func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tail time.Sleep(100 * time.Millisecond) // pretty arbitrary } - start := time.Now() - defer func() { transferTime.Add(time.Since(start).Seconds()) }() + if tunIPv4Prefix != nil { + err = derpProbeBandwidthTUN(ctx, transferTimeSeconds, from, to, fromc, toc, size, tunIPv4Prefix) + } else { + err = derpProbeBandwidthDirect(ctx, transferTimeSeconds, from, to, fromc, toc, size) + } - if err := runDerpProbeNodePair(ctx, from, to, fromc, toc, size); err != nil { + if err != nil { // Record pubkeys on failed probes to aid investigation. return fmt.Errorf("%s -> %s: %w", fromc.SelfPublicKey().ShortString(), @@ -577,6 +604,272 @@ func runDerpProbeNodePair(ctx context.Context, from, to *tailcfg.DERPNode, fromc return nil } +// derpProbeBandwidthDirect takes two DERP clients (fromc and toc) connected to two +// DERP servers (from and to) and sends a test payload of a given size from one +// to another using runDerpProbeNodePair. The time taken to finish the transfer is +// recorded in `transferTimeSeconds`. +func derpProbeBandwidthDirect(ctx context.Context, transferTimeSeconds *expvar.Float, from, to *tailcfg.DERPNode, fromc, toc *derphttp.Client, size int64) error { + start := time.Now() + defer func() { transferTimeSeconds.Add(time.Since(start).Seconds()) }() + + return runDerpProbeNodePair(ctx, from, to, fromc, toc, size) +} + +// derpProbeBandwidthTUNMu ensures that TUN bandwidth probes don't run concurrently. +// This is necessary to avoid conflicts trying to create the TUN device, and +// it also has the nice benefit of preventing concurrent bandwidth probes from +// influencing each other's results. +// +// This guards derpProbeBandwidthTUN. +var derpProbeBandwidthTUNMu sync.Mutex + +// derpProbeBandwidthTUN takes two DERP clients (fromc and toc) connected to two +// DERP servers (from and to) and sends a test payload of a given size from one +// to another over a TUN device at an address at the start of the usable host IP +// range that the given tunAddress lives in. The time taken to finish the transfer +// is recorded in `transferTimeSeconds`. +func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds *expvar.Float, from, to *tailcfg.DERPNode, fromc, toc *derphttp.Client, size int64, prefix *netip.Prefix) error { + // Make sure all goroutines have finished. + var wg sync.WaitGroup + defer wg.Wait() + + // Close the clients to make sure goroutines that are reading/writing from them terminate. + defer fromc.Close() + defer toc.Close() + + ipRange := netipx.RangeOfPrefix(*prefix) + // Start of the usable host IP range from the address we have been passed in. + ifAddr := ipRange.From().Next() + // Destination address to dial. This is the next address in the range from + // our ifAddr to ensure that the underlying networking stack is actually being + // utilized instead of being optimized away and treated as a loopback. Packets + // sent to this address will be routed over the TUN. + destinationAddr := ifAddr.Next() + + derpProbeBandwidthTUNMu.Lock() + defer derpProbeBandwidthTUNMu.Unlock() + + // Temporarily set up a TUN device with which to simulate a real client TCP connection + // tunneling over DERP. Use `tstun.DefaultTUNMTU()` (e.g., 1280) as our MTU as this is + // the minimum safe MTU used by Tailscale. + dev, err := tun.CreateTUN(tunName, int(tstun.DefaultTUNMTU())) + if err != nil { + return fmt.Errorf("failed to create TUN device: %w", err) + } + defer func() { + if err := dev.Close(); err != nil { + log.Printf("failed to close TUN device: %s", err) + } + }() + mtu, err := dev.MTU() + if err != nil { + return fmt.Errorf("failed to get TUN MTU: %w", err) + } + + name, err := dev.Name() + if err != nil { + return fmt.Errorf("failed to get device name: %w", err) + } + + // Perform platform specific configuration of the TUN device. + err = configureTUN(*prefix, name) + if err != nil { + return fmt.Errorf("failed to configure tun: %w", err) + } + + // Depending on platform, we need some space for headers at the front + // of TUN I/O op buffers. The below constant is more than enough space + // for any platform that this might run on. + tunStartOffset := device.MessageTransportHeaderSize + + // This goroutine reads packets from the TUN device and evaluates if they + // are IPv4 packets destined for loopback via DERP. If so, it performs L3 NAT + // (swap src/dst) and writes them towards DERP in order to loopback via the + // `toc` DERP client. It only reports errors to `tunReadErrC`. + wg.Add(1) + tunReadErrC := make(chan error, 1) + go func() { + defer wg.Done() + + numBufs := wgconn.IdealBatchSize + bufs := make([][]byte, 0, numBufs) + sizes := make([]int, numBufs) + for range numBufs { + bufs = append(bufs, make([]byte, mtu+tunStartOffset)) + } + + destinationAddrBytes := destinationAddr.AsSlice() + scratch := make([]byte, 4) + for { + n, err := dev.Read(bufs, sizes, tunStartOffset) + if err != nil { + tunReadErrC <- err + return + } + + for i := range n { + pkt := bufs[i][tunStartOffset : sizes[i]+tunStartOffset] + // Skip everything except valid IPv4 packets + if len(pkt) < 20 { + // Doesn't even have a full IPv4 header + continue + } + if pkt[0]>>4 != 4 { + // Not IPv4 + continue + } + + if !bytes.Equal(pkt[16:20], destinationAddrBytes) { + // Unexpected dst address + continue + } + + copy(scratch, pkt[12:16]) + copy(pkt[12:16], pkt[16:20]) + copy(pkt[16:20], scratch) + + if err := fromc.Send(toc.SelfPublicKey(), pkt); err != nil { + tunReadErrC <- err + return + } + } + } + }() + + // This goroutine reads packets from the `toc` DERP client and writes them towards the TUN. + // It only reports errors to `recvErrC` channel. + wg.Add(1) + recvErrC := make(chan error, 1) + go func() { + defer wg.Done() + + buf := make([]byte, mtu+tunStartOffset) + bufs := make([][]byte, 1) + + for { + m, err := toc.Recv() + if err != nil { + recvErrC <- fmt.Errorf("failed to receive: %w", err) + return + } + switch v := m.(type) { + case derp.ReceivedPacket: + if v.Source != fromc.SelfPublicKey() { + recvErrC <- fmt.Errorf("got data packet from unexpected source, %v", v.Source) + return + } + pkt := v.Data + copy(buf[tunStartOffset:], pkt) + bufs[0] = buf[:len(pkt)+tunStartOffset] + if _, err := dev.Write(bufs, tunStartOffset); err != nil { + recvErrC <- fmt.Errorf("failed to write to TUN device: %w", err) + return + } + case derp.KeepAliveMessage: + // Silently ignore. + default: + log.Printf("%v: ignoring Recv frame type %T", to.Name, v) + // Loop. + } + } + }() + + // Start a listener to receive the data + l, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) + if err != nil { + return fmt.Errorf("failed to listen: %s", err) + } + defer l.Close() + + // 128KB by default + const writeChunkSize = 128 << 10 + + randData := make([]byte, writeChunkSize) + _, err = crand.Read(randData) + if err != nil { + return fmt.Errorf("failed to initialize random data: %w", err) + } + + // Dial ourselves + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return fmt.Errorf("failed to split address %q: %w", l.Addr().String(), err) + } + + connAddr := net.JoinHostPort(destinationAddr.String(), port) + conn, err := net.Dial("tcp", connAddr) + if err != nil { + return fmt.Errorf("failed to dial address %q: %w", connAddr, err) + } + defer conn.Close() + + // Timing only includes the actual sending and receiving of data. + start := time.Now() + + // This goroutine reads data from the TCP stream being looped back via DERP. + // It reports to `readFinishedC` when `size` bytes have been read, or if an + // error occurs. + wg.Add(1) + readFinishedC := make(chan error, 1) + go func() { + defer wg.Done() + + readConn, err := l.Accept() + if err != nil { + readFinishedC <- err + } + defer readConn.Close() + deadline, ok := ctx.Deadline() + if ok { + // Don't try reading past our context's deadline. + if err := readConn.SetReadDeadline(deadline); err != nil { + readFinishedC <- fmt.Errorf("unable to set read deadline: %w", err) + } + } + _, err = io.CopyN(io.Discard, readConn, size) + // Measure transfer time irrespective of whether it succeeded or failed. + transferTimeSeconds.Add(time.Since(start).Seconds()) + readFinishedC <- err + }() + + // This goroutine sends data to the TCP stream being looped back via DERP. + // It only reports errors to `sendErrC`. + wg.Add(1) + sendErrC := make(chan error, 1) + go func() { + defer wg.Done() + + for wrote := 0; wrote < int(size); wrote += len(randData) { + b := randData + if wrote+len(randData) > int(size) { + // This is the last chunk and we don't need the whole thing + b = b[0 : int(size)-wrote] + } + if _, err := conn.Write(b); err != nil { + sendErrC <- fmt.Errorf("failed to write to conn: %w", err) + return + } + } + }() + + select { + case <-ctx.Done(): + return fmt.Errorf("timeout: %w", ctx.Err()) + case err := <-tunReadErrC: + return fmt.Errorf("error reading from TUN via %q: %w", from.Name, err) + case err := <-sendErrC: + return fmt.Errorf("error sending via %q: %w", from.Name, err) + case err := <-recvErrC: + return fmt.Errorf("error receiving from %q: %w", to.Name, err) + case err := <-readFinishedC: + if err != nil { + return fmt.Errorf("error reading from %q to TUN: %w", to.Name, err) + } + } + + return nil +} + func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool) (*derphttp.Client, error) { // To avoid spamming the log with regular connection messages. l := logger.Filtered(log.Printf, func(s string) bool { diff --git a/prober/tun_darwin.go b/prober/tun_darwin.go new file mode 100644 index 000000000..0ef22e41e --- /dev/null +++ b/prober/tun_darwin.go @@ -0,0 +1,35 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build darwin + +package prober + +import ( + "fmt" + "net/netip" + "os/exec" + + "go4.org/netipx" +) + +const tunName = "utun" + +func configureTUN(addr netip.Prefix, tunname string) error { + cmd := exec.Command("ifconfig", tunname, "inet", addr.String(), addr.Addr().String()) + res, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to add address: %w (%s)", err, string(res)) + } + + net := netipx.PrefixIPNet(addr) + nip := net.IP.Mask(net.Mask) + nstr := fmt.Sprintf("%v/%d", nip, addr.Bits()) + cmd = exec.Command("route", "-q", "-n", "add", "-inet", nstr, "-iface", addr.Addr().String()) + res, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to add route: %w (%s)", err, string(res)) + } + + return nil +} diff --git a/prober/tun_default.go b/prober/tun_default.go new file mode 100644 index 000000000..93a5b07fd --- /dev/null +++ b/prober/tun_default.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux && !darwin + +package prober + +import ( + "fmt" + "net/netip" + "runtime" +) + +const tunName = "unused" + +func configureTUN(addr netip.Prefix, tunname string) error { + return fmt.Errorf("not implemented on " + runtime.GOOS) +} diff --git a/prober/tun_linux.go b/prober/tun_linux.go new file mode 100644 index 000000000..52a31efbb --- /dev/null +++ b/prober/tun_linux.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package prober + +import ( + "fmt" + "net/netip" + + "github.com/tailscale/netlink" + "go4.org/netipx" +) + +const tunName = "derpprobe" + +func configureTUN(addr netip.Prefix, tunname string) error { + link, err := netlink.LinkByName(tunname) + if err != nil { + return fmt.Errorf("failed to look up link %q: %w", tunname, err) + } + + // We need to bring the TUN device up before assigning an address. This + // allows the OS to automatically create a route for it. Otherwise, we'd + // have to manually create the route. + if err := netlink.LinkSetUp(link); err != nil { + return fmt.Errorf("failed to bring tun %q up: %w", tunname, err) + } + + if err := netlink.AddrReplace(link, &netlink.Addr{IPNet: netipx.PrefixIPNet(addr)}); err != nil { + return fmt.Errorf("failed to add address: %w", err) + } + + return nil +} From cc168d9f6bcd958872fd2d3b8999222a459f9d9a Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 16 Dec 2024 06:11:18 +0000 Subject: [PATCH 0232/1708] cmd/k8s-operator: fix ProxyGroup hostname (#14336) Updates tailscale/tailscale#14325 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/proxygroup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 39b7ccc01..60f470fc2 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -479,7 +479,7 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 } if pg.Spec.HostnamePrefix != "" { - conf.Hostname = ptr.To(fmt.Sprintf("%s%d", pg.Spec.HostnamePrefix, idx)) + conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) } if shouldAcceptRoutes(class) { From 5883ca72a7397df487a26ab64ed02b0086405b35 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 16 Dec 2024 10:56:55 -0800 Subject: [PATCH 0233/1708] types/opt: fix test to be agnostic to omitzero support (#14401) The omitzero tag option has been backported to v1 "encoding/json" from the "encoding/json/v2" prototype and will land in Go1.24. Until we fully upgrade to Go1.24, adjust the test to be agnostic to which version of Go someone is using. Updates tailscale/corp#25406 Signed-off-by: Joe Tsai --- types/opt/value_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/types/opt/value_test.go b/types/opt/value_test.go index 93d935e27..dbd8b255f 100644 --- a/types/opt/value_test.go +++ b/types/opt/value_test.go @@ -9,6 +9,8 @@ import ( "testing" jsonv2 "github.com/go-json-experiment/json" + "tailscale.com/types/bools" + "tailscale.com/util/must" ) type testStruct struct { @@ -87,7 +89,14 @@ func TestValue(t *testing.T) { False: ValueOf(false), ExplicitUnset: Value[bool]{}, }, - want: `{"True":true,"False":false,"Unset":null,"ExplicitUnset":null}`, + want: bools.IfElse( + // Detect whether v1 "encoding/json" supports `omitzero` or not. + // TODO(Go1.24): Remove this after `omitzero` is supported. + string(must.Get(json.Marshal(struct { + X int `json:",omitzero"` + }{}))) == `{}`, + `{"True":true,"False":false}`, // omitzero supported + `{"True":true,"False":false,"Unset":null,"ExplicitUnset":null}`), // omitzero not supported wantBack: struct { True Value[bool] `json:",omitzero"` False Value[bool] `json:",omitzero"` From 0cc2a8dc0d086f114f1031ef3cb621b8413ac946 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Dec 2024 10:19:10 -0800 Subject: [PATCH 0234/1708] go.toolchain.rev: bump Go toolchain For https://github.com/tailscale/go/pull/108 so we can depend on it in other repos. (This repo can't yet use it; we permit building tailscale/tailscale with the latest stock Go release) But that will be in Go 1.24. We're just impatient elsewhere and would like it in the control plane code earlier. Updates tailscale/corp#25406 Change-Id: I53ff367318365c465cbd02cea387c8ff1eb49fab Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 500d853e5..7be85deb6 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -96578f73d04e1a231fa2a495ad3fa97747785bc6 +e005697288a8d2fadc87bb7c3e2c74778d08554a From 2506b81471914ad10fe40476e2f9aae25777cee6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Dec 2024 12:11:38 -0800 Subject: [PATCH 0235/1708] prober: fix WithBandwidthProbing behavior with optional tunAddress 1ed9bd76d682299376f404521cf1958a7f9bea7a meant to make tunAddress be optional. Updates tailscale/corp#24635 Change-Id: Idc4a8540b294e480df5bd291967024c04df751c0 Signed-off-by: Brad Fitzpatrick --- prober/derp.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index 8e8e6ac3d..742e8a5f4 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -51,7 +51,7 @@ type derpProber struct { // Optional bandwidth probing. bwInterval time.Duration bwProbeSize int64 - bwTUNIPv4Prefix *netip.Prefix + bwTUNIPv4Prefix *netip.Prefix // or nil to not use TUN // Optionally restrict probes to a single regionCode. regionCode string @@ -78,16 +78,18 @@ type DERPOpt func(*derpProber) // `size` bytes will be regularly transferred through each DERP server, and each // pair of DERP servers in every region. If tunAddress is specified, probes will // use a TCP connection over a TUN device at this address in order to exercise -// TCP-in-TCP in similar fashion to TCP over Tailscale via DERP +// TCP-in-TCP in similar fashion to TCP over Tailscale via DERP. func WithBandwidthProbing(interval time.Duration, size int64, tunAddress string) DERPOpt { return func(d *derpProber) { d.bwInterval = interval d.bwProbeSize = size - prefix, err := netip.ParsePrefix(fmt.Sprintf("%s/30", tunAddress)) - if err != nil { - log.Fatalf("failed to parse IP prefix from bw-tun-ipv4-addr: %v", err) + if tunAddress != "" { + prefix, err := netip.ParsePrefix(fmt.Sprintf("%s/30", tunAddress)) + if err != nil { + log.Fatalf("failed to parse IP prefix from bw-tun-ipv4-addr: %v", err) + } + d.bwTUNIPv4Prefix = &prefix } - d.bwTUNIPv4Prefix = &prefix } } From b62a013ecbaff241fda500e95365bc28b77595b0 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 16 Dec 2024 14:53:34 -0800 Subject: [PATCH 0236/1708] Switch logging service from log.tailscale.io to log.tailscale.com (#14398) Updates tailscale/corp#23617 Signed-off-by: Joe Tsai --- cmd/derper/bootstrap_dns_test.go | 12 ++++++------ docs/windows/policy/en-US/tailscale.adml | 2 +- ipn/ipnserver/proxyconnect.go | 2 +- logpolicy/logpolicy.go | 2 +- logpolicy/logpolicy_test.go | 4 +++- logtail/api.md | 4 ++-- logtail/example/logadopt/logadopt.go | 2 +- logtail/example/logreprocess/demo.sh | 2 +- logtail/example/logreprocess/logreprocess.go | 2 +- logtail/logtail.go | 8 ++++---- net/tlsdial/tlsdial.go | 4 ++-- tsnet/tsnet.go | 2 +- tstest/natlab/vnet/vip.go | 2 +- tstest/natlab/vnet/vnet.go | 2 +- 14 files changed, 26 insertions(+), 24 deletions(-) diff --git a/cmd/derper/bootstrap_dns_test.go b/cmd/derper/bootstrap_dns_test.go index d151bc2b0..9b99103ab 100644 --- a/cmd/derper/bootstrap_dns_test.go +++ b/cmd/derper/bootstrap_dns_test.go @@ -20,10 +20,10 @@ import ( ) func BenchmarkHandleBootstrapDNS(b *testing.B) { - tstest.Replace(b, bootstrapDNS, "log.tailscale.io,login.tailscale.com,controlplane.tailscale.com,login.us.tailscale.com") + tstest.Replace(b, bootstrapDNS, "log.tailscale.com,login.tailscale.com,controlplane.tailscale.com,login.us.tailscale.com") refreshBootstrapDNS() w := new(bitbucketResponseWriter) - req, _ := http.NewRequest("GET", "https://localhost/bootstrap-dns?q="+url.QueryEscape("log.tailscale.io"), nil) + req, _ := http.NewRequest("GET", "https://localhost/bootstrap-dns?q="+url.QueryEscape("log.tailscale.com"), nil) b.ReportAllocs() b.ResetTimer() b.RunParallel(func(b *testing.PB) { @@ -63,7 +63,7 @@ func TestUnpublishedDNS(t *testing.T) { nettest.SkipIfNoNetwork(t) const published = "login.tailscale.com" - const unpublished = "log.tailscale.io" + const unpublished = "log.tailscale.com" prev1, prev2 := *bootstrapDNS, *unpublishedDNS *bootstrapDNS = published @@ -119,18 +119,18 @@ func TestUnpublishedDNSEmptyList(t *testing.T) { unpublishedDNSCache.Store(&dnsEntryMap{ IPs: map[string][]net.IP{ - "log.tailscale.io": {}, + "log.tailscale.com": {}, "controlplane.tailscale.com": {net.IPv4(1, 2, 3, 4)}, }, Percent: map[string]float64{ - "log.tailscale.io": 1.0, + "log.tailscale.com": 1.0, "controlplane.tailscale.com": 1.0, }, }) t.Run("CacheMiss", func(t *testing.T) { // One domain in map but empty, one not in map at all - for _, q := range []string{"log.tailscale.io", "login.tailscale.com"} { + for _, q := range []string{"log.tailscale.com", "login.tailscale.com"} { resetMetrics() ips := getBootstrapDNS(t, q) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index ebf1a5905..4d5893a32 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -31,7 +31,7 @@ See https://tailscale.com/kb/1315/mdm-keys#set-a-custom-control-server-url for m Specify which Tailnet should be used for Login diff --git a/ipn/ipnserver/proxyconnect.go b/ipn/ipnserver/proxyconnect.go index 1094a79f9..030c4efe4 100644 --- a/ipn/ipnserver/proxyconnect.go +++ b/ipn/ipnserver/proxyconnect.go @@ -14,7 +14,7 @@ import ( ) // handleProxyConnectConn handles a CONNECT request to -// log.tailscale.io (or whatever the configured log server is). This +// log.tailscale.com (or whatever the configured log server is). This // is intended for use by the Windows GUI client to log via when an // exit node is in use, so the logs don't go out via the exit node and // instead go directly, like tailscaled's. The dialer tried to do that diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index fa882ad3a..b9b813718 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -889,7 +889,7 @@ func (opts TransportOptions) New() http.RoundTripper { host := cmp.Or(opts.Host, logtail.DefaultHost) tr.TLSClientConfig = tlsdial.Config(host, opts.Health, tr.TLSClientConfig) - // Force TLS 1.3 since we know log.tailscale.io supports it. + // Force TLS 1.3 since we know log.tailscale.com supports it. tr.TLSClientConfig.MinVersion = tls.VersionTLS13 return tr diff --git a/logpolicy/logpolicy_test.go b/logpolicy/logpolicy_test.go index fdbfe4506..fb5666f86 100644 --- a/logpolicy/logpolicy_test.go +++ b/logpolicy/logpolicy_test.go @@ -7,6 +7,8 @@ import ( "os" "reflect" "testing" + + "tailscale.com/logtail" ) func TestLogHost(t *testing.T) { @@ -20,7 +22,7 @@ func TestLogHost(t *testing.T) { env string want string }{ - {"", "log.tailscale.io"}, + {"", logtail.DefaultHost}, {"http://foo.com", "foo.com"}, {"https://foo.com", "foo.com"}, {"https://foo.com/", "foo.com"}, diff --git a/logtail/api.md b/logtail/api.md index 8ec0b69c0..20726e209 100644 --- a/logtail/api.md +++ b/logtail/api.md @@ -6,14 +6,14 @@ retrieving, and processing log entries. # Overview HTTP requests are received at the service **base URL** -[https://log.tailscale.io](https://log.tailscale.io), and return JSON-encoded +[https://log.tailscale.com](https://log.tailscale.com), and return JSON-encoded responses using standard HTTP response codes. Authorization for the configuration and retrieval APIs is done with a secret API key passed as the HTTP basic auth username. Secret keys are generated via the web UI at base URL. An example of using basic auth with curl: - curl -u : https://log.tailscale.io/collections + curl -u : https://log.tailscale.com/collections In the future, an HTTP header will allow using MessagePack instead of JSON. diff --git a/logtail/example/logadopt/logadopt.go b/logtail/example/logadopt/logadopt.go index 984a8a35a..eba3f9311 100644 --- a/logtail/example/logadopt/logadopt.go +++ b/logtail/example/logadopt/logadopt.go @@ -25,7 +25,7 @@ func main() { } log.SetFlags(0) - req, err := http.NewRequest("POST", "https://log.tailscale.io/instances", strings.NewReader(url.Values{ + req, err := http.NewRequest("POST", "https://log.tailscale.com/instances", strings.NewReader(url.Values{ "collection": []string{*collection}, "instances": []string{*publicID}, "adopt": []string{"true"}, diff --git a/logtail/example/logreprocess/demo.sh b/logtail/example/logreprocess/demo.sh index 4ec819a67..583929c12 100755 --- a/logtail/example/logreprocess/demo.sh +++ b/logtail/example/logreprocess/demo.sh @@ -13,7 +13,7 @@ # # Then generate a LOGTAIL_API_KEY and two test collections by visiting: # -# https://log.tailscale.io +# https://log.tailscale.com # # Then set the three variables below. trap 'rv=$?; [ "$rv" = 0 ] || echo "-- exiting with code $rv"; exit $rv' EXIT diff --git a/logtail/example/logreprocess/logreprocess.go b/logtail/example/logreprocess/logreprocess.go index 5dbf76578..aae65df9f 100644 --- a/logtail/example/logreprocess/logreprocess.go +++ b/logtail/example/logreprocess/logreprocess.go @@ -37,7 +37,7 @@ func main() { }() } - req, err := http.NewRequest("GET", "https://log.tailscale.io/c/"+*collection+"?stream=true", nil) + req, err := http.NewRequest("GET", "https://log.tailscale.com/c/"+*collection+"?stream=true", nil) if err != nil { log.Fatal(err) } diff --git a/logtail/logtail.go b/logtail/logtail.go index 13e8e85fd..0e9c4f288 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package logtail sends logs to log.tailscale.io. +// Package logtail sends logs to log.tailscale.com. package logtail import ( @@ -55,7 +55,7 @@ const bufferSize = 4 << 10 // DefaultHost is the default host name to upload logs to when // Config.BaseURL isn't provided. -const DefaultHost = "log.tailscale.io" +const DefaultHost = "log.tailscale.com" const defaultFlushDelay = 2 * time.Second @@ -69,7 +69,7 @@ type Config struct { Collection string // collection name, a domain name PrivateID logid.PrivateID // private ID for the primary log stream CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream - BaseURL string // if empty defaults to "https://log.tailscale.io" + BaseURL string // if empty defaults to "https://log.tailscale.com" HTTPC *http.Client // if empty defaults to http.DefaultClient SkipClientTime bool // if true, client_time is not written to logs LowMemory bool // if true, logtail minimizes memory use @@ -507,7 +507,7 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft } if runtime.GOOS == "js" { // We once advertised we'd accept optional client certs (for internal use) - // on log.tailscale.io but then Tailscale SSH js/wasm clients prompted + // on log.tailscale.com but then Tailscale SSH js/wasm clients prompted // users (on some browsers?) to pick a client cert. We'll fix the server's // TLS ServerHello, but we can also fix it client side for good measure. // diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 7e847a8b6..2a109c790 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -89,8 +89,8 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // (with the baked-in fallback root) in the VerifyConnection hook. conf.InsecureSkipVerify = true conf.VerifyConnection = func(cs tls.ConnectionState) (retErr error) { - if host == "log.tailscale.io" && hostinfo.IsNATLabGuestVM() { - // Allow log.tailscale.io TLS MITM for integration tests when + if host == "log.tailscale.com" && hostinfo.IsNATLabGuestVM() { + // Allow log.tailscale.com TLS MITM for integration tests when // the client's running within a NATLab VM. return nil } diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 34cab7385..5f1d8073a 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -78,7 +78,7 @@ type Server struct { // If nil, a new FileStore is initialized at `Dir/tailscaled.state`. // See tailscale.com/ipn/store for supported stores. // - // Logs will automatically be uploaded to log.tailscale.io, + // Logs will automatically be uploaded to log.tailscale.com, // where the configuration file for logging will be saved at // `Dir/tailscaled.log.conf`. Store ipn.StateStore diff --git a/tstest/natlab/vnet/vip.go b/tstest/natlab/vnet/vip.go index c75f17cee..190c9e75f 100644 --- a/tstest/natlab/vnet/vip.go +++ b/tstest/natlab/vnet/vip.go @@ -17,7 +17,7 @@ var ( fakeControl = newVIP("control.tailscale", 3) fakeDERP1 = newVIP("derp1.tailscale", "33.4.0.1") // 3340=DERP; 1=derp 1 fakeDERP2 = newVIP("derp2.tailscale", "33.4.0.2") // 3340=DERP; 2=derp 2 - fakeLogCatcher = newVIP("log.tailscale.io", 4) + fakeLogCatcher = newVIP("log.tailscale.com", 4) fakeSyslog = newVIP("syslog.tailscale", 9) ) diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 92312c039..586fd28e0 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -394,7 +394,7 @@ func (n *network) acceptTCP(r *tcp.ForwarderRequest) { } } -// serveLogCatchConn serves a TCP connection to "log.tailscale.io", speaking the +// serveLogCatchConn serves a TCP connection to "log.tailscale.com", speaking the // logtail/logcatcher protocol. // // We terminate TLS with an arbitrary cert; the client is configured to not From b3d4ffe1688b4218feee06a5873fce75a839a022 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 17 Dec 2024 15:36:57 +0000 Subject: [PATCH 0237/1708] docs/k8s: add some high-level operator architecture diagrams (#13915) This is an experiment to see how useful we will find it to have some text-based diagrams to document how various components of the operator work. There are no plans to link to this from elsewhere yet, but hopefully it will be a useful reference internally. Updates #cleanup Change-Id: If5911ed39b09378fec0492e87738ec0cc3d8731e Signed-off-by: Tom Proctor --- docs/k8s/operator-architecture.md | 517 ++++++++++++++++++++++++++++++ 1 file changed, 517 insertions(+) create mode 100644 docs/k8s/operator-architecture.md diff --git a/docs/k8s/operator-architecture.md b/docs/k8s/operator-architecture.md new file mode 100644 index 000000000..26bfa8542 --- /dev/null +++ b/docs/k8s/operator-architecture.md @@ -0,0 +1,517 @@ +# Operator architecture diagrams + +The Tailscale [Kubernetes operator][kb-operator] has a collection of use-cases +that can be mixed and matched as required. The following diagrams illustrate +how the operator implements each use-case. + +In each diagram, the "tailscale" namespace is entirely managed by the operator +once the operator itself has been deployed. + +Tailscale devices are highlighted as black nodes. The salient devices for each +use-case are marked as "src" or "dst" to denote which node is a source or a +destination in the context of ACL rules that will apply to network traffic. + +Note, in some cases, the config and the state Secret may be the same Kubernetes +Secret. + +## API server proxy + +[Documentation][kb-operator-proxy] + +The operator runs the API server proxy in-process. If the proxy is running in +"noauth" mode, it forwards HTTP requests unmodified. If the proxy is running in +"auth" mode, it deletes any existing auth headers and adds +[impersonation headers][k8s-impersonation] to the request before forwarding to +the API server. A request with impersonation headers will look something like: + +``` +GET /api/v1/namespaces/default/pods HTTP/1.1 +Host: k8s-api.example.com +Authorization: Bearer +Impersonate-Group: tailnet-readers +Accept: application/json +``` + +```mermaid +%%{ init: { 'theme':'neutral' } }%% +flowchart LR + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator(("operator (dst)")):::tsnode + end + + subgraph controlplane["Control plane"] + api[kube-apiserver] + end + end + + client["client (src)"]:::tsnode --> operator + operator -->|"proxy (maybe with impersonation headers)"| api + + linkStyle 0 stroke:red; + linkStyle 2 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 3 stroke:blue; + +``` + +## L3 ingress + +[Documentation][kb-operator-l3-ingress] + +The user deploys an app to the default namespace, and creates a normal Service +that selects the app's Pods. Either add the annotation +`tailscale.com/expose: "true"` or specify `.spec.type` as `Loadbalancer` and +`.spec.loadBalancerClass` as `tailscale`. The operator will create an ingress +proxy that allows devices anywhere on the tailnet to access the Service. + +The proxy Pod uses `iptables` or `nftables` rules to DNAT traffic bound for the +proxy's tailnet IP to the Service's internal Cluster IP instead. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% +flowchart TD + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator((operator)):::tsnode + ingress-sts["StatefulSet"] + ingress(("ingress proxy (dst)")):::tsnode + config-secret["config Secret"] + state-secret["state Secret"] + end + + subgraph defaultns[namespace=default] + svc[annotated Service] + svc --> pod1((pod1)) + svc --> pod2((pod2)) + end + end + + client["client (src)"]:::tsnode --> ingress + ingress -->|forwards traffic| svc + operator -.->|creates| ingress-sts + ingress-sts -.->|manages| ingress + operator -.->|reads| svc + operator -.->|creates| config-secret + config-secret -.->|mounted| ingress + ingress -.->|stores state| state-secret + + linkStyle 0 stroke:red; + linkStyle 4 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 2 stroke:blue; + linkStyle 3 stroke:blue; + linkStyle 5 stroke:blue; + +``` + +## L7 ingress + +[Documentation][kb-operator-l7-ingress] + +L7 ingress is relatively similar to L3 ingress. It is configured via an +`Ingress` object instead of a `Service`, and uses `tailscale serve` to accept +traffic instead of configuring `iptables` or `nftables` rules. Note that we use +tailscaled's local API (`SetServeConfig`) to set serve config, not the +`tailscale serve` command. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% +flowchart TD + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator((operator)):::tsnode + ingress-sts["StatefulSet"] + ingress-pod(("ingress proxy (dst)")):::tsnode + config-secret["config Secret"] + state-secret["state Secret"] + end + + subgraph defaultns[namespace=default] + ingress[tailscale Ingress] + svc["Service"] + svc --> pod1((pod1)) + svc --> pod2((pod2)) + end + end + + client["client (src)"]:::tsnode --> ingress-pod + ingress-pod -->|forwards /api prefix traffic| svc + operator -.->|creates| ingress-sts + ingress-sts -.->|manages| ingress-pod + operator -.->|reads| ingress + operator -.->|creates| config-secret + config-secret -.->|mounted| ingress-pod + ingress-pod -.->|stores state| state-secret + ingress -.->|/api prefix| svc + + linkStyle 0 stroke:red; + linkStyle 4 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 2 stroke:blue; + linkStyle 3 stroke:blue; + linkStyle 5 stroke:blue; + +``` + +## L3 egress + +[Documentation][kb-operator-l3-egress] + +1. The user deploys a Service with `type: ExternalName` and an annotation + `tailscale.com/tailnet-fqdn: db.tails-scales.ts.net`. +1. The operator creates a proxy Pod managed by a single replica StatefulSet, and a headless Service pointing at the proxy Pod. +1. The operator updates the `ExternalName` Service's `spec.externalName` field to point + at the headless Service it created in the previous step. + +(Optional) If the user also adds the `tailscale.com/proxy-group: egress-proxies` +annotation to their `ExternalName` Service, the operator will skip creating a +proxy Pod and instead point the headless Service at the existing ProxyGroup's +pods. In this case, ports are also required in the `ExternalName` Service spec. +See below for a more representative diagram. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% + +flowchart TD + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator((operator)):::tsnode + egress(("egress proxy (src)")):::tsnode + egress-sts["StatefulSet"] + headless-svc[headless Service] + cfg-secret["config Secret"] + state-secret["state Secret"] + end + + subgraph defaultns[namespace=default] + svc[ExternalName Service] + pod1((pod1)) --> svc + pod2((pod2)) --> svc + end + end + + node["db.tails-scales.ts.net (dst)"]:::tsnode + + svc -->|DNS points to| headless-svc + headless-svc -->|selects egress Pod| egress + egress -->|forwards traffic| node + operator -.->|creates| egress-sts + egress-sts -.->|manages| egress + operator -.->|creates| headless-svc + operator -.->|creates| cfg-secret + operator -.->|watches & updates| svc + cfg-secret -.->|mounted| egress + egress -.->|stores state| state-secret + + linkStyle 0 stroke:red; + linkStyle 6 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 2 stroke:blue; + linkStyle 3 stroke:blue; + linkStyle 4 stroke:blue; + linkStyle 5 stroke:blue; + +``` + +## `ProxyGroup` + +[Documentation][kb-operator-l3-egress-proxygroup] + +The `ProxyGroup` custom resource manages a collection of proxy Pods that +can be configured to egress traffic out of the cluster via ExternalName +Services. A `ProxyGroup` is both a high availability (HA) version of L3 +egress, and a mechanism to serve multiple ExternalName Services on a single +set of Tailscale devices (coalescing). + +In this diagram, the `ProxyGroup` is named `pg`. The Secrets associated with +the `ProxyGroup` Pods are omitted for simplicity. They are similar to the L3 +egress case above, but there is a pair of config + state Secrets _per Pod_. + +Each ExternalName Service defines which ports should be mapped to their defined +egress target. The operator maps from these ports to randomly chosen ephemeral +ports via the ClusterIP Service and its EndpointSlice. The operator then +generates the egress ConfigMap that tells the `ProxyGroup` Pods which incoming +ports map to which egress targets. + +`ProxyGroups` currently only support egress. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% + +flowchart LR + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator((operator)):::tsnode + pg-sts[StatefulSet] + pg-0(("pg-0 (src)")):::tsnode + pg-1(("pg-1 (src)")):::tsnode + db-cluster-ip[db ClusterIP Service] + api-cluster-ip[api ClusterIP Service] + egress-cm["egress ConfigMap"] + end + + subgraph cluster-scope["Cluster scoped resources"] + pg["ProxyGroup 'pg'"] + end + + subgraph defaultns[namespace=default] + db-svc[db ExternalName Service] + api-svc[api ExternalName Service] + pod1((pod1)) --> db-svc + pod2((pod2)) --> db-svc + pod1((pod1)) --> api-svc + pod2((pod2)) --> api-svc + end + end + + db["db.tails-scales.ts.net (dst)"]:::tsnode + api["api.tails-scales.ts.net (dst)"]:::tsnode + + db-svc -->|DNS points to| db-cluster-ip + api-svc -->|DNS points to| api-cluster-ip + db-cluster-ip -->|maps to ephemeral db ports| pg-0 + db-cluster-ip -->|maps to ephemeral db ports| pg-1 + api-cluster-ip -->|maps to ephemeral api ports| pg-0 + api-cluster-ip -->|maps to ephemeral api ports| pg-1 + pg-0 -->|forwards db port traffic| db + pg-0 -->|forwards api port traffic| api + pg-1 -->|forwards db port traffic| db + pg-1 -->|forwards api port traffic| api + operator -.->|creates & populates endpointslice| db-cluster-ip + operator -.->|creates & populates endpointslice| api-cluster-ip + operator -.->|stores port mapping| egress-cm + egress-cm -.->|mounted| pg-0 + egress-cm -.->|mounted| pg-1 + operator -.->|watches| pg + operator -.->|creates| pg-sts + pg-sts -.->|manages| pg-0 + pg-sts -.->|manages| pg-1 + operator -.->|watches| db-svc + operator -.->|watches| api-svc + + linkStyle 0 stroke:red; + linkStyle 12 stroke:red; + linkStyle 13 stroke:red; + linkStyle 14 stroke:red; + linkStyle 15 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 2 stroke:blue; + linkStyle 3 stroke:blue; + linkStyle 4 stroke:blue; + linkStyle 5 stroke:blue; + linkStyle 6 stroke:blue; + linkStyle 7 stroke:blue; + linkStyle 8 stroke:blue; + linkStyle 9 stroke:blue; + linkStyle 10 stroke:blue; + linkStyle 11 stroke:blue; + +``` + +## Connector + +[Subnet router and exit node documentation][kb-operator-connector] + +[App connector documentation][kb-operator-app-connector] + +The Connector Custom Resource can deploy either a subnet router, an exit node, +or an app connector. The following diagram shows all 3, but only one workflow +can be configured per Connector resource. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% + +flowchart TD + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + classDef hidden display:none; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph grouping[" "] + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator((operator)):::tsnode + cn-sts[StatefulSet] + cn-pod(("tailscale (dst)")):::tsnode + cfg-secret["config Secret"] + state-secret["state Secret"] + end + + subgraph cluster-scope["Cluster scoped resources"] + cn["Connector"] + end + + subgraph defaultns["namespace=default"] + pod1 + end + end + + client["client (src)"]:::tsnode + Internet + end + + client --> cn-pod + cn-pod -->|app connector or exit node routes| Internet + cn-pod -->|subnet route| pod1 + operator -.->|watches| cn + operator -.->|creates| cn-sts + cn-sts -.->|manages| cn-pod + operator -.->|creates| cfg-secret + cfg-secret -.->|mounted| cn-pod + cn-pod -.->|stores state| state-secret + + class grouping hidden + + linkStyle 0 stroke:red; + linkStyle 2 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 3 stroke:blue; + linkStyle 4 stroke:blue; + +``` + +## Recorder nodes + +[Documentation][kb-operator-recorder] + +The `Recorder` custom resource makes it easier to deploy `tsrecorder` to a cluster. +It currently only supports a single replica. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% + +flowchart TD + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + classDef hidden display:none; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph grouping[" "] + subgraph k8s[Kubernetes cluster] + api["kube-apiserver"] + + subgraph tailscale-ns[namespace=tailscale] + operator(("operator (dst)")):::tsnode + rec-sts[StatefulSet] + rec-0(("tsrecorder")):::tsnode + cfg-secret-0["config Secret"] + state-secret-0["state Secret"] + end + + subgraph cluster-scope["Cluster scoped resources"] + rec["Recorder"] + end + end + + client["client (src)"]:::tsnode + kubectl-exec["kubectl exec (src)"]:::tsnode + server["server (dst)"]:::tsnode + s3["S3-compatible storage"] + end + + kubectl-exec -->|exec session| operator + operator -->|exec session recording| rec-0 + operator -->|exec session| api + client -->|ssh session| server + server -->|ssh session recording| rec-0 + rec-0 -->|session recordings| s3 + operator -.->|watches| rec + operator -.->|creates| rec-sts + rec-sts -.->|manages| rec-0 + operator -.->|creates| cfg-secret-0 + cfg-secret-0 -.->|mounted| rec-0 + rec-0 -.->|stores state| state-secret-0 + + class grouping hidden + + linkStyle 0 stroke:red; + linkStyle 2 stroke:red; + linkStyle 3 stroke:red; + linkStyle 5 stroke:red; + linkStyle 6 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 4 stroke:blue; + linkStyle 7 stroke:blue; + +``` + +[kb-operator]: https://tailscale.com/kb/1236/kubernetes-operator +[kb-operator-proxy]: https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy +[kb-operator-l3-ingress]: https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress#exposing-a-cluster-workload-using-a-kubernetes-service +[kb-operator-l7-ingress]: https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress#exposing-cluster-workloads-using-a-kubernetes-ingress +[kb-operator-l3-egress]: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +[kb-operator-l3-egress-proxygroup]: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress#configure-an-egress-service-using-proxygroup +[kb-operator-connector]: https://tailscale.com/kb/1441/kubernetes-operator-connector +[kb-operator-app-connector]: https://tailscale.com/kb/1517/kubernetes-operator-app-connector +[kb-operator-recorder]: https://tailscale.com/kb/1484/kubernetes-operator-deploying-tsrecorder +[k8s-impersonation]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation From ff5b4bae99c7dc8bb57660bb579d5df4ab31b1ce Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 18 Dec 2024 17:11:22 -0800 Subject: [PATCH 0238/1708] syncs: add MutexValue (#14422) MutexValue is simply a value guarded by a mutex. For any type that is not pointer-sized, MutexValue will perform much better than AtomicValue since it will not incur an allocation boxing the value into an interface value (which is how Go's atomic.Value is implemented under-the-hood). Updates #cleanup Signed-off-by: Joe Tsai --- syncs/syncs.go | 62 +++++++++++++++++++++++++++++++++++++++++++++ syncs/syncs_test.go | 34 +++++++++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/syncs/syncs.go b/syncs/syncs.go index acc0c88f2..337fca755 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -25,6 +25,7 @@ func initClosedChan() <-chan struct{} { } // AtomicValue is the generic version of [atomic.Value]. +// See [MutexValue] for guidance on whether to use this type. type AtomicValue[T any] struct { v atomic.Value } @@ -74,6 +75,67 @@ func (v *AtomicValue[T]) CompareAndSwap(oldV, newV T) (swapped bool) { return v.v.CompareAndSwap(wrappedValue[T]{oldV}, wrappedValue[T]{newV}) } +// MutexValue is a value protected by a mutex. +// +// AtomicValue, [MutexValue], [atomic.Pointer] are similar and +// overlap in their use cases. +// +// - Use [atomic.Pointer] if the value being stored is a pointer and +// you only ever need load and store operations. +// An atomic pointer only occupies 1 word of memory. +// +// - Use [MutexValue] if the value being stored is not a pointer or +// you need the ability for a mutex to protect a set of operations +// performed on the value. +// A mutex-guarded value occupies 1 word of memory plus +// the memory representation of T. +// +// - AtomicValue is useful for non-pointer types that happen to +// have the memory layout of a single pointer. +// Examples include a map, channel, func, or a single field struct +// that contains any prior types. +// An atomic value occupies 2 words of memory. +// Consequently, Storing of non-pointer types always allocates. +// +// Note that [AtomicValue] has the ability to report whether it was set +// while [MutexValue] lacks the ability to detect if the value was set +// and it happens to be the zero value of T. If such a use case is +// necessary, then you could consider wrapping T in [opt.Value]. +type MutexValue[T any] struct { + mu sync.Mutex + v T +} + +// WithLock calls f with a pointer to the value while holding the lock. +// The provided pointer must not leak beyond the scope of the call. +func (m *MutexValue[T]) WithLock(f func(p *T)) { + m.mu.Lock() + defer m.mu.Unlock() + f(&m.v) +} + +// Load returns a shallow copy of the underlying value. +func (m *MutexValue[T]) Load() T { + m.mu.Lock() + defer m.mu.Unlock() + return m.v +} + +// Store stores a shallow copy of the provided value. +func (m *MutexValue[T]) Store(v T) { + m.mu.Lock() + defer m.mu.Unlock() + m.v = v +} + +// Swap stores new into m and returns the previous value. +func (m *MutexValue[T]) Swap(new T) (old T) { + m.mu.Lock() + defer m.mu.Unlock() + old, m.v = m.v, new + return old +} + // WaitGroupChan is like a sync.WaitGroup, but has a chan that closes // on completion that you can wait on. (This, you can only use the // value once) diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index ee3711e76..901d42948 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -8,6 +8,7 @@ import ( "io" "os" "testing" + "time" "github.com/google/go-cmp/cmp" ) @@ -65,6 +66,39 @@ func TestAtomicValue(t *testing.T) { } } +func TestMutexValue(t *testing.T) { + var v MutexValue[time.Time] + if n := int(testing.AllocsPerRun(1000, func() { + v.Store(v.Load()) + v.WithLock(func(*time.Time) {}) + })); n != 0 { + t.Errorf("AllocsPerRun = %d, want 0", n) + } + + now := time.Now() + v.Store(now) + if !v.Load().Equal(now) { + t.Errorf("Load = %v, want %v", v.Load(), now) + } + + var group WaitGroup + var v2 MutexValue[int] + var sum int + for i := range 10 { + group.Go(func() { + old1 := v2.Load() + old2 := v2.Swap(old1 + i) + delta := old2 - old1 + v2.WithLock(func(p *int) { *p += delta }) + }) + sum += i + } + group.Wait() + if v2.Load() != sum { + t.Errorf("Load = %v, want %v", v2.Load(), sum) + } +} + func TestWaitGroupChan(t *testing.T) { wg := NewWaitGroupChan() From 6ae0287a5799f60eacdb1e7b1191fadb697fb75a Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 10 Dec 2024 13:54:31 -0800 Subject: [PATCH 0239/1708] cmd/systray: add account switcher Updates #1708 Signed-off-by: Andrew Lytvynov --- cmd/systray/systray.go | 91 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 75 insertions(+), 16 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index aca38f627..d175b55f3 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -49,6 +49,8 @@ type Menu struct { more *systray.MenuItem quit *systray.MenuItem + accountsCh chan ipn.ProfileID + eventCancel func() // cancel eventLoop } @@ -64,15 +66,32 @@ func onReady() { chState = make(chan ipn.State, 1) + menu := new(Menu) + menu.rebuild(fetchState(ctx)) + + go watchIPNBus(ctx) +} + +type state struct { + status *ipnstate.Status + curProfile ipn.LoginProfile + allProfiles []ipn.LoginProfile +} + +func fetchState(ctx context.Context) state { status, err := localClient.Status(ctx) if err != nil { log.Print(err) } - - menu := new(Menu) - menu.rebuild(status) - - go watchIPNBus(ctx) + curProfile, allProfiles, err := localClient.ProfileStatus(ctx) + if err != nil { + log.Print(err) + } + return state{ + status: status, + curProfile: curProfile, + allProfiles: allProfiles, + } } // rebuild the systray menu based on the current Tailscale state. @@ -80,14 +99,17 @@ func onReady() { // We currently rebuild the entire menu because it is not easy to update the existing menu. // You cannot iterate over the items in a menu, nor can you remove some items like separators. // So for now we rebuild the whole thing, and can optimize this later if needed. -func (menu *Menu) rebuild(status *ipnstate.Status) { +func (menu *Menu) rebuild(state state) { menu.mu.Lock() defer menu.mu.Unlock() if menu.eventCancel != nil { menu.eventCancel() } - menu.status = status + ctx := context.Background() + ctx, menu.eventCancel = context.WithCancel(ctx) + + menu.status = state.status systray.ResetMenu() menu.connect = systray.AddMenuItem("Connect", "") @@ -95,8 +117,46 @@ func (menu *Menu) rebuild(status *ipnstate.Status) { menu.disconnect.Hide() systray.AddSeparator() - if status != nil && status.Self != nil { - title := fmt.Sprintf("This Device: %s (%s)", status.Self.HostName, status.Self.TailscaleIPs[0]) + account := "Account" + if state.curProfile.Name != "" { + account += fmt.Sprintf(" (%s)", state.curProfile.Name) + } + accounts := systray.AddMenuItem(account, "") + // The dbus message about this menu item must propagate to the receiving + // end before we attach any submenu items. Otherwise the receiver may not + // yet record the parent menu item and error out. + // + // On waybar with libdbusmenu-gtk, this manifests as the following warning: + // (waybar:153009): LIBDBUSMENU-GTK-WARNING **: 18:07:11.551: Children but no menu, someone's been naughty with their 'children-display' property: 'submenu' + time.Sleep(100 * time.Millisecond) + // Aggregate all clicks into a shared channel. + menu.accountsCh = make(chan ipn.ProfileID) + for _, profile := range state.allProfiles { + title := fmt.Sprintf("%s (%s)", profile.Name, profile.NetworkProfile.DomainName) + // Note: we could use AddSubMenuItemCheckbox instead of this formatting + // hack, but checkboxes don't work across all desktops unfortunately. + if profile.ID == state.curProfile.ID { + title = "* " + title + } + item := accounts.AddSubMenuItem(title, "") + go func(profile ipn.LoginProfile) { + for { + select { + case <-ctx.Done(): + return + case <-item.ClickedCh: + select { + case <-ctx.Done(): + return + case menu.accountsCh <- profile.ID: + } + } + } + }(profile) + } + + if state.status != nil && state.status.Self != nil { + title := fmt.Sprintf("This Device: %s (%s)", state.status.Self.HostName, state.status.Self.TailscaleIPs[0]) menu.self = systray.AddMenuItem(title, "") } systray.AddSeparator() @@ -107,8 +167,6 @@ func (menu *Menu) rebuild(status *ipnstate.Status) { menu.quit = systray.AddMenuItem("Quit", "Quit the app") menu.quit.Enable() - ctx := context.Background() - ctx, menu.eventCancel = context.WithCancel(ctx) go menu.eventLoop(ctx) } @@ -124,11 +182,7 @@ func (menu *Menu) eventLoop(ctx context.Context) { switch state { case ipn.Running: setAppIcon(loading) - status, err := localClient.Status(ctx) - if err != nil { - log.Printf("error getting tailscale status: %v", err) - } - menu.rebuild(status) + menu.rebuild(fetchState(ctx)) setAppIcon(connected) menu.connect.SetTitle("Connected") menu.connect.Disable() @@ -172,6 +226,11 @@ func (menu *Menu) eventLoop(ctx context.Context) { case <-menu.more.ClickedCh: webbrowser.Open("http://100.100.100.100/") + case id := <-menu.accountsCh: + if err := localClient.SwitchProfile(ctx, id); err != nil { + log.Printf("failed switching to profile ID %v: %v", id, err) + } + case <-menu.quit.ClickedCh: systray.Quit() } From 00a4504cf1a1b150a8896283ccf5e01011112626 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 16 Dec 2024 23:05:46 -0600 Subject: [PATCH 0240/1708] cmd/derpprobe,prober: add ability to perform continuous queuing delay measurements against DERP servers This new type of probe sends DERP packets sized similarly to CallMeMaybe packets at a rate of 10 packets per second. It records the round-trip times in a Prometheus histogram. It also keeps track of how many packets are dropped. Packets that fail to arrive within 5 seconds are considered dropped. Updates tailscale/corp#24522 Signed-off-by: Percy Wegmann --- cmd/derpprobe/derpprobe.go | 31 +++--- prober/derp.go | 222 ++++++++++++++++++++++++++++++++++++- prober/histogram.go | 50 +++++++++ prober/histogram_test.go | 29 +++++ prober/prober.go | 64 +++++++++-- prober/prober_test.go | 17 +-- prober/status.go | 12 +- prober/status.html | 57 +++++++--- 8 files changed, 428 insertions(+), 54 deletions(-) create mode 100644 prober/histogram.go create mode 100644 prober/histogram_test.go diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 620b96609..62b7d47a4 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -18,19 +18,21 @@ import ( ) var ( - derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://) or 'local' to use the local tailscaled's DERP map") - versionFlag = flag.Bool("version", false, "print version and exit") - listen = flag.String("listen", ":8030", "HTTP listen address") - probeOnce = flag.Bool("once", false, "probe once and print results, then exit; ignores the listen flag") - spread = flag.Bool("spread", true, "whether to spread probing over time") - interval = flag.Duration("interval", 15*time.Second, "probe interval") - meshInterval = flag.Duration("mesh-interval", 15*time.Second, "mesh probe interval") - stunInterval = flag.Duration("stun-interval", 15*time.Second, "STUN probe interval") - tlsInterval = flag.Duration("tls-interval", 15*time.Second, "TLS probe interval") - bwInterval = flag.Duration("bw-interval", 0, "bandwidth probe interval (0 = no bandwidth probing)") - bwSize = flag.Int64("bw-probe-size-bytes", 1_000_000, "bandwidth probe size") - bwTUNIPv4Address = flag.String("bw-tun-ipv4-addr", "", "if specified, bandwidth probes will be performed over a TUN device at this address in order to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP. We will use a /30 subnet including this IP address.") - regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed") + derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://) or 'local' to use the local tailscaled's DERP map") + versionFlag = flag.Bool("version", false, "print version and exit") + listen = flag.String("listen", ":8030", "HTTP listen address") + probeOnce = flag.Bool("once", false, "probe once and print results, then exit; ignores the listen flag") + spread = flag.Bool("spread", true, "whether to spread probing over time") + interval = flag.Duration("interval", 15*time.Second, "probe interval") + meshInterval = flag.Duration("mesh-interval", 15*time.Second, "mesh probe interval") + stunInterval = flag.Duration("stun-interval", 15*time.Second, "STUN probe interval") + tlsInterval = flag.Duration("tls-interval", 15*time.Second, "TLS probe interval") + bwInterval = flag.Duration("bw-interval", 0, "bandwidth probe interval (0 = no bandwidth probing)") + bwSize = flag.Int64("bw-probe-size-bytes", 1_000_000, "bandwidth probe size") + bwTUNIPv4Address = flag.String("bw-tun-ipv4-addr", "", "if specified, bandwidth probes will be performed over a TUN device at this address in order to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP; we will use a /30 subnet including this IP address") + qdPacketsPerSecond = flag.Int("qd-packets-per-second", 0, "if greater than 0, queuing delay will be measured continuously using 260 byte packets (approximate size of a CallMeMaybe packet) sent at this rate per second") + qdPacketTimeout = flag.Duration("qd-packet-timeout", 5*time.Second, "queuing delay packets arriving after this period of time from being sent are treated like dropped packets and don't count toward queuing delay timings") + regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed") ) func main() { @@ -45,6 +47,7 @@ func main() { prober.WithMeshProbing(*meshInterval), prober.WithSTUNProbing(*stunInterval), prober.WithTLSProbing(*tlsInterval), + prober.WithQueuingDelayProbing(*qdPacketsPerSecond, *qdPacketTimeout), } if *bwInterval > 0 { opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize, *bwTUNIPv4Address)) @@ -107,7 +110,7 @@ func getOverallStatus(p *prober.Prober) (o overallStatus) { // Do not show probes that have not finished yet. continue } - if i.Result { + if i.Status == prober.ProbeStatusSucceeded { o.addGoodf("%s: %s", p, i.Latency) } else { o.addBadf("%s: %s", p, i.Error) diff --git a/prober/derp.go b/prober/derp.go index 742e8a5f4..5adc0c0b4 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -8,6 +8,7 @@ import ( "cmp" "context" crand "crypto/rand" + "encoding/binary" "encoding/json" "errors" "expvar" @@ -17,6 +18,7 @@ import ( "net" "net/http" "net/netip" + "slices" "strconv" "strings" "sync" @@ -53,6 +55,10 @@ type derpProber struct { bwProbeSize int64 bwTUNIPv4Prefix *netip.Prefix // or nil to not use TUN + // Optional queuing delay probing. + qdPacketsPerSecond int // in packets per second + qdPacketTimeout time.Duration + // Optionally restrict probes to a single regionCode. regionCode string @@ -64,6 +70,7 @@ type derpProber struct { udpProbeFn func(string, int) ProbeClass meshProbeFn func(string, string) ProbeClass bwProbeFn func(string, string, int64) ProbeClass + qdProbeFn func(string, string, int, time.Duration) ProbeClass sync.Mutex lastDERPMap *tailcfg.DERPMap @@ -93,6 +100,16 @@ func WithBandwidthProbing(interval time.Duration, size int64, tunAddress string) } } +// WithQueuingDelayProbing enables/disables queuing delay probing. qdSendRate +// is the number of packets sent per second. qdTimeout is the amount of time +// after which a sent packet is considered to have timed out. +func WithQueuingDelayProbing(qdPacketsPerSecond int, qdPacketTimeout time.Duration) DERPOpt { + return func(d *derpProber) { + d.qdPacketsPerSecond = qdPacketsPerSecond + d.qdPacketTimeout = qdPacketTimeout + } +} + // WithMeshProbing enables mesh probing. When enabled, a small message will be // transferred through each DERP server and each pair of DERP servers. func WithMeshProbing(interval time.Duration) DERPOpt { @@ -147,6 +164,7 @@ func DERP(p *Prober, derpMapURL string, opts ...DERPOpt) (*derpProber, error) { d.udpProbeFn = d.ProbeUDP d.meshProbeFn = d.probeMesh d.bwProbeFn = d.probeBandwidth + d.qdProbeFn = d.probeQueuingDelay return d, nil } @@ -213,7 +231,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { } } - if d.bwInterval > 0 && d.bwProbeSize > 0 { + if d.bwInterval != 0 && d.bwProbeSize > 0 { n := fmt.Sprintf("derp/%s/%s/%s/bw", region.RegionCode, server.Name, to.Name) wantProbes[n] = true if d.probes[n] == nil { @@ -225,6 +243,15 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { d.probes[n] = d.p.Run(n, d.bwInterval, labels, d.bwProbeFn(server.Name, to.Name, d.bwProbeSize)) } } + + if d.qdPacketsPerSecond > 0 { + n := fmt.Sprintf("derp/%s/%s/%s/qd", region.RegionCode, server.Name, to.Name) + wantProbes[n] = true + if d.probes[n] == nil { + log.Printf("adding DERP queuing delay probe for %s->%s (%s)", server.Name, to.Name, region.RegionName) + d.probes[n] = d.p.Run(n, -10*time.Second, labels, d.qdProbeFn(server.Name, to.Name, d.qdPacketsPerSecond, d.qdPacketTimeout)) + } + } } } } @@ -240,7 +267,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { return nil } -// probeMesh returs a probe class that sends a test packet through a pair of DERP +// probeMesh returns a probe class that sends a test packet through a pair of DERP // servers (or just one server, if 'from' and 'to' are the same). 'from' and 'to' // are expected to be names (DERPNode.Name) of two DERP servers in the same region. func (d *derpProber) probeMesh(from, to string) ProbeClass { @@ -263,7 +290,7 @@ func (d *derpProber) probeMesh(from, to string) ProbeClass { } } -// probeBandwidth returs a probe class that sends a payload of a given size +// probeBandwidth returns a probe class that sends a payload of a given size // through a pair of DERP servers (or just one server, if 'from' and 'to' are // the same). 'from' and 'to' are expected to be names (DERPNode.Name) of two // DERP servers in the same region. @@ -295,6 +322,193 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { } } +// probeQueuingDelay returns a probe class that continuously sends packets +// through a pair of DERP servers (or just one server, if 'from' and 'to' are +// the same) at a rate of `packetsPerSecond` packets per second in order to +// measure queuing delays. Packets arriving after `packetTimeout` don't contribute +// to the queuing delay measurement and are recorded as dropped. 'from' and 'to' are +// expected to be names (DERPNode.Name) of two DERP servers in the same region, +// and may refer to the same server. +func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, packetTimeout time.Duration) ProbeClass { + derpPath := "mesh" + if from == to { + derpPath = "single" + } + var packetsDropped expvar.Float + qdh := newHistogram([]float64{.005, .01, .025, .05, .1, .25, .5, 1}) + return ProbeClass{ + Probe: func(ctx context.Context) error { + fromN, toN, err := d.getNodePair(from, to) + if err != nil { + return err + } + return derpProbeQueuingDelay(ctx, d.lastDERPMap, fromN, toN, packetsPerSecond, packetTimeout, &packetsDropped, qdh) + }, + Class: "derp_qd", + Labels: Labels{"derp_path": derpPath}, + Metrics: func(l prometheus.Labels) []prometheus.Metric { + qdh.mx.Lock() + result := []prometheus.Metric{ + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())), + prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, qdh.bucketedCounts), + } + qdh.mx.Unlock() + return result + }, + } +} + +// derpProbeQueuingDelay continuously sends data between two local DERP clients +// connected to two DERP servers in order to measure queuing delays. From and to +// can be the same server. +func derpProbeQueuingDelay(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, packetsPerSecond int, packetTimeout time.Duration, packetsDropped *expvar.Float, qdh *histogram) (err error) { + // This probe uses clients with isProber=false to avoid spamming the derper + // logs with every packet sent by the queuing delay probe. + fromc, err := newConn(ctx, dm, from, false) + if err != nil { + return err + } + defer fromc.Close() + toc, err := newConn(ctx, dm, to, false) + if err != nil { + return err + } + defer toc.Close() + + // Wait a bit for from's node to hear about to existing on the + // other node in the region, in the case where the two nodes + // are different. + if from.Name != to.Name { + time.Sleep(100 * time.Millisecond) // pretty arbitrary + } + + if err := runDerpProbeQueuingDelayContinously(ctx, from, to, fromc, toc, packetsPerSecond, packetTimeout, packetsDropped, qdh); err != nil { + // Record pubkeys on failed probes to aid investigation. + return fmt.Errorf("%s -> %s: %w", + fromc.SelfPublicKey().ShortString(), + toc.SelfPublicKey().ShortString(), err) + } + return nil +} + +func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg.DERPNode, fromc, toc *derphttp.Client, packetsPerSecond int, packetTimeout time.Duration, packetsDropped *expvar.Float, qdh *histogram) error { + // Make sure all goroutines have finished. + var wg sync.WaitGroup + defer wg.Wait() + + // Close the clients to make sure goroutines that are reading/writing from them terminate. + defer fromc.Close() + defer toc.Close() + + type txRecord struct { + at time.Time + seq uint64 + } + // txRecords is sized to hold enough transmission records to keep timings + // for packets up to their timeout. As records age out of the front of this + // list, if the associated packet arrives, we won't have a txRecord for it + // and will consider it to have timed out. + txRecords := make([]txRecord, 0, packetsPerSecond*int(packetTimeout.Seconds())) + var txRecordsMu sync.Mutex + + // Send the packets. + sendErrC := make(chan error, 1) + // TODO: construct a disco CallMeMaybe in the same fashion as magicsock, e.g. magic bytes, src pub, seal payload. + // DERP server handling of disco may vary from non-disco, and we may want to measure queue delay of both. + pkt := make([]byte, 260) // the same size as a CallMeMaybe packet observed on a Tailscale client. + crand.Read(pkt) + + wg.Add(1) + go func() { + defer wg.Done() + t := time.NewTicker(time.Second / time.Duration(packetsPerSecond)) + defer t.Stop() + + seq := uint64(0) + for { + select { + case <-ctx.Done(): + return + case <-t.C: + txRecordsMu.Lock() + if len(txRecords) == cap(txRecords) { + txRecords = slices.Delete(txRecords, 0, 1) + packetsDropped.Add(1) + } + txRecords = append(txRecords, txRecord{time.Now(), seq}) + txRecordsMu.Unlock() + binary.BigEndian.PutUint64(pkt, seq) + seq++ + if err := fromc.Send(toc.SelfPublicKey(), pkt); err != nil { + sendErrC <- fmt.Errorf("sending packet %w", err) + return + } + } + } + }() + + // Receive the packets. + recvFinishedC := make(chan error, 1) + wg.Add(1) + go func() { + defer wg.Done() + defer close(recvFinishedC) // to break out of 'select' below. + for { + m, err := toc.Recv() + if err != nil { + recvFinishedC <- err + return + } + switch v := m.(type) { + case derp.ReceivedPacket: + now := time.Now() + if v.Source != fromc.SelfPublicKey() { + recvFinishedC <- fmt.Errorf("got data packet from unexpected source, %v", v.Source) + return + } + seq := binary.BigEndian.Uint64(v.Data) + txRecordsMu.Lock() + findTxRecord: + for i, record := range txRecords { + switch { + case record.seq == seq: + rtt := now.Sub(record.at) + qdh.add(rtt.Seconds()) + txRecords = slices.Delete(txRecords, i, i+1) + break findTxRecord + case record.seq > seq: + // No sent time found, probably a late arrival already + // recorded as drop by sender when deleted. + break findTxRecord + case record.seq < seq: + continue + } + } + txRecordsMu.Unlock() + + case derp.KeepAliveMessage: + // Silently ignore. + + default: + log.Printf("%v: ignoring Recv frame type %T", to.Name, v) + // Loop. + } + } + }() + + select { + case <-ctx.Done(): + return fmt.Errorf("timeout: %w", ctx.Err()) + case err := <-sendErrC: + return fmt.Errorf("error sending via %q: %w", from.Name, err) + case err := <-recvFinishedC: + if err != nil { + return fmt.Errorf("error receiving from %q: %w", to.Name, err) + } + } + return nil +} + // getNodePair returns DERPNode objects for two DERP servers based on their // short names. func (d *derpProber) getNodePair(n1, n2 string) (ret1, ret2 *tailcfg.DERPNode, _ error) { @@ -573,6 +787,8 @@ func runDerpProbeNodePair(ctx context.Context, from, to *tailcfg.DERPNode, fromc recvc <- fmt.Errorf("got data packet %d from unexpected source, %v", idx, v.Source) return } + // This assumes that the packets are received reliably and in order. + // The DERP protocol does not guarantee this, but this probe assumes it. if got, want := v.Data, pkts[idx]; !bytes.Equal(got, want) { recvc <- fmt.Errorf("unexpected data packet %d (out of %d)", idx, len(pkts)) return diff --git a/prober/histogram.go b/prober/histogram.go new file mode 100644 index 000000000..e9005b452 --- /dev/null +++ b/prober/histogram.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prober + +import ( + "slices" + "sync" +) + +// histogram serves as an adapter to the Prometheus histogram datatype. +// The prober framework passes labels at custom metric collection time that +// it expects to be coupled with the returned metrics. See ProbeClass.Metrics +// and its call sites. Native prometheus histograms cannot be collected while +// injecting more labels. Instead we use this type and pass observations + +// collection labels to prometheus.MustNewConstHistogram() at prometheus +// metric collection time. +type histogram struct { + count uint64 + sum float64 + buckets []float64 + bucketedCounts map[float64]uint64 + mx sync.Mutex +} + +// newHistogram constructs a histogram that buckets data based on the given +// slice of upper bounds. +func newHistogram(buckets []float64) *histogram { + slices.Sort(buckets) + return &histogram{ + buckets: buckets, + bucketedCounts: make(map[float64]uint64, len(buckets)), + } +} + +func (h *histogram) add(v float64) { + h.mx.Lock() + defer h.mx.Unlock() + + h.count++ + h.sum += v + + for _, b := range h.buckets { + if v > b { + continue + } + h.bucketedCounts[b] += 1 + break + } +} diff --git a/prober/histogram_test.go b/prober/histogram_test.go new file mode 100644 index 000000000..a569167e6 --- /dev/null +++ b/prober/histogram_test.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prober + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestHistogram(t *testing.T) { + h := newHistogram([]float64{1, 2}) + h.add(0.5) + h.add(1) + h.add(1.5) + h.add(2) + h.add(2.5) + + if diff := cmp.Diff(h.count, uint64(5)); diff != "" { + t.Errorf("wrong count; (-got+want):%v", diff) + } + if diff := cmp.Diff(h.sum, 7.5); diff != "" { + t.Errorf("wrong sum; (-got+want):%v", diff) + } + if diff := cmp.Diff(h.bucketedCounts, map[float64]uint64{1: 2, 2: 2}); diff != "" { + t.Errorf("wrong bucketedCounts; (-got+want):%v", diff) + } +} diff --git a/prober/prober.go b/prober/prober.go index 2a43628bd..e3860e7b9 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -94,6 +94,9 @@ func newForTest(now func() time.Time, newTicker func(time.Duration) ticker) *Pro // Run executes probe class function every interval, and exports probe results under probeName. // +// If interval is negative, the probe will run continuously. If it encounters a failure while +// running continuously, it will pause for -1*interval and then retry. +// // Registering a probe under an already-registered name panics. func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc ProbeClass) *Probe { p.mu.Lock() @@ -256,6 +259,11 @@ type Probe struct { latencyHist *ring.Ring } +// IsContinuous indicates that this is a continuous probe. +func (p *Probe) IsContinuous() bool { + return p.interval < 0 +} + // Close shuts down the Probe and unregisters it from its Prober. // It is safe to Run a new probe of the same name after Close returns. func (p *Probe) Close() error { @@ -288,6 +296,22 @@ func (p *Probe) loop() { return } + if p.IsContinuous() { + // Probe function is going to run continuously. + for { + p.run() + // Wait and then retry if probe fails. We use the inverse of the + // configured negative interval as our sleep period. + // TODO(percy):implement exponential backoff, possibly using logtail/backoff. + select { + case <-time.After(-1 * p.interval): + p.run() + case <-p.ctx.Done(): + return + } + } + } + p.tick = p.prober.newTicker(p.interval) defer p.tick.Stop() for { @@ -323,9 +347,13 @@ func (p *Probe) run() (pi ProbeInfo, err error) { p.recordEnd(err) } }() - timeout := time.Duration(float64(p.interval) * 0.8) - ctx, cancel := context.WithTimeout(p.ctx, timeout) - defer cancel() + ctx := p.ctx + if !p.IsContinuous() { + timeout := time.Duration(float64(p.interval) * 0.8) + var cancel func() + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } err = p.probeClass.Probe(ctx) p.recordEnd(err) @@ -365,6 +393,16 @@ func (p *Probe) recordEnd(err error) { p.successHist = p.successHist.Next() } +// ProbeStatus indicates the status of a probe. +type ProbeStatus string + +const ( + ProbeStatusUnknown = "unknown" + ProbeStatusRunning = "running" + ProbeStatusFailed = "failed" + ProbeStatusSucceeded = "succeeded" +) + // ProbeInfo is a snapshot of the configuration and state of a Probe. type ProbeInfo struct { Name string @@ -374,7 +412,7 @@ type ProbeInfo struct { Start time.Time End time.Time Latency time.Duration - Result bool + Status ProbeStatus Error string RecentResults []bool RecentLatencies []time.Duration @@ -402,6 +440,10 @@ func (pb ProbeInfo) RecentMedianLatency() time.Duration { return pb.RecentLatencies[len(pb.RecentLatencies)/2] } +func (pb ProbeInfo) Continuous() bool { + return pb.Interval < 0 +} + // ProbeInfo returns the state of all probes. func (p *Prober) ProbeInfo() map[string]ProbeInfo { out := map[string]ProbeInfo{} @@ -429,9 +471,14 @@ func (probe *Probe) probeInfoLocked() ProbeInfo { Labels: probe.metricLabels, Start: probe.start, End: probe.end, - Result: probe.succeeded, } - if probe.lastErr != nil { + inf.Status = ProbeStatusUnknown + if probe.end.Before(probe.start) { + inf.Status = ProbeStatusRunning + } else if probe.succeeded { + inf.Status = ProbeStatusSucceeded + } else if probe.lastErr != nil { + inf.Status = ProbeStatusFailed inf.Error = probe.lastErr.Error() } if probe.latency > 0 { @@ -467,7 +514,7 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { p.mu.Lock() probe, ok := p.probes[name] p.mu.Unlock() - if !ok { + if !ok || probe.IsContinuous() { return tsweb.Error(http.StatusNotFound, fmt.Sprintf("unknown probe %q", name), nil) } @@ -531,7 +578,8 @@ func (p *Probe) Collect(ch chan<- prometheus.Metric) { if !p.start.IsZero() { ch <- prometheus.MustNewConstMetric(p.mStartTime, prometheus.GaugeValue, float64(p.start.Unix())) } - if p.end.IsZero() { + // For periodic probes that haven't ended, don't collect probe metrics yet. + if p.end.IsZero() && !p.IsContinuous() { return } ch <- prometheus.MustNewConstMetric(p.mEndTime, prometheus.GaugeValue, float64(p.end.Unix())) diff --git a/prober/prober_test.go b/prober/prober_test.go index 742a914b2..3905bfbc9 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -316,7 +316,7 @@ func TestProberProbeInfo(t *testing.T) { Interval: probeInterval, Labels: map[string]string{"class": "", "name": "probe1"}, Latency: 500 * time.Millisecond, - Result: true, + Status: ProbeStatusSucceeded, RecentResults: []bool{true}, RecentLatencies: []time.Duration{500 * time.Millisecond}, }, @@ -324,6 +324,7 @@ func TestProberProbeInfo(t *testing.T) { Name: "probe2", Interval: probeInterval, Labels: map[string]string{"class": "", "name": "probe2"}, + Status: ProbeStatusFailed, Error: "error2", RecentResults: []bool{false}, RecentLatencies: nil, // no latency for failed probes @@ -349,7 +350,7 @@ func TestProbeInfoRecent(t *testing.T) { }{ { name: "no_runs", - wantProbeInfo: ProbeInfo{}, + wantProbeInfo: ProbeInfo{Status: ProbeStatusUnknown}, wantRecentSuccessRatio: 0, wantRecentMedianLatency: 0, }, @@ -358,7 +359,7 @@ func TestProbeInfoRecent(t *testing.T) { results: []probeResult{{latency: 100 * time.Millisecond, err: nil}}, wantProbeInfo: ProbeInfo{ Latency: 100 * time.Millisecond, - Result: true, + Status: ProbeStatusSucceeded, RecentResults: []bool{true}, RecentLatencies: []time.Duration{100 * time.Millisecond}, }, @@ -369,7 +370,7 @@ func TestProbeInfoRecent(t *testing.T) { name: "single_failure", results: []probeResult{{latency: 100 * time.Millisecond, err: errors.New("error123")}}, wantProbeInfo: ProbeInfo{ - Result: false, + Status: ProbeStatusFailed, RecentResults: []bool{false}, RecentLatencies: nil, Error: "error123", @@ -390,7 +391,7 @@ func TestProbeInfoRecent(t *testing.T) { {latency: 80 * time.Millisecond, err: nil}, }, wantProbeInfo: ProbeInfo{ - Result: true, + Status: ProbeStatusSucceeded, Latency: 80 * time.Millisecond, RecentResults: []bool{false, true, true, false, true, true, false, true}, RecentLatencies: []time.Duration{ @@ -420,7 +421,7 @@ func TestProbeInfoRecent(t *testing.T) { {latency: 110 * time.Millisecond, err: nil}, }, wantProbeInfo: ProbeInfo{ - Result: true, + Status: ProbeStatusSucceeded, Latency: 110 * time.Millisecond, RecentResults: []bool{true, true, true, true, true, true, true, true, true, true}, RecentLatencies: []time.Duration{ @@ -483,7 +484,7 @@ func TestProberRunHandler(t *testing.T) { ProbeInfo: ProbeInfo{ Name: "success", Interval: probeInterval, - Result: true, + Status: ProbeStatusSucceeded, RecentResults: []bool{true, true}, }, PreviousSuccessRatio: 1, @@ -498,7 +499,7 @@ func TestProberRunHandler(t *testing.T) { ProbeInfo: ProbeInfo{ Name: "failure", Interval: probeInterval, - Result: false, + Status: ProbeStatusFailed, Error: "error123", RecentResults: []bool{false, false}, }, diff --git a/prober/status.go b/prober/status.go index aa9ef99d0..20fbeec58 100644 --- a/prober/status.go +++ b/prober/status.go @@ -62,8 +62,9 @@ func (p *Prober) StatusHandler(opts ...statusHandlerOpt) tsweb.ReturnHandlerFunc return func(w http.ResponseWriter, r *http.Request) error { type probeStatus struct { ProbeInfo - TimeSinceLast time.Duration - Links map[string]template.URL + TimeSinceLastStart time.Duration + TimeSinceLastEnd time.Duration + Links map[string]template.URL } vars := struct { Title string @@ -81,12 +82,15 @@ func (p *Prober) StatusHandler(opts ...statusHandlerOpt) tsweb.ReturnHandlerFunc for name, info := range p.ProbeInfo() { vars.TotalProbes++ - if !info.Result { + if info.Error != "" { vars.UnhealthyProbes++ } s := probeStatus{ProbeInfo: info} + if !info.Start.IsZero() { + s.TimeSinceLastStart = time.Since(info.Start).Truncate(time.Second) + } if !info.End.IsZero() { - s.TimeSinceLast = time.Since(info.End).Truncate(time.Second) + s.TimeSinceLastEnd = time.Since(info.End).Truncate(time.Second) } for textTpl, urlTpl := range params.probeLinks { text, err := renderTemplate(textTpl, info) diff --git a/prober/status.html b/prober/status.html index ff0f06c13..d26588da1 100644 --- a/prober/status.html +++ b/prober/status.html @@ -73,8 +73,9 @@ Name Probe Class & Labels Interval - Last Attempt - Success + Last Finished + Last Started + Status Latency Last Error @@ -85,9 +86,11 @@ {{$name}} {{range $text, $url := $probeInfo.Links}}
- + {{if not $probeInfo.Continuous}} + + {{end}} {{end}} {{$probeInfo.Class}}
@@ -97,28 +100,48 @@ {{end}} - {{$probeInfo.Interval}} - - {{if $probeInfo.TimeSinceLast}} - {{$probeInfo.TimeSinceLast.String}} ago
+ + {{if $probeInfo.Continuous}} + Continuous + {{else}} + {{$probeInfo.Interval}} + {{end}} + + + {{if $probeInfo.TimeSinceLastEnd}} + {{$probeInfo.TimeSinceLastEnd.String}} ago
{{$probeInfo.End.Format "2006-01-02T15:04:05Z07:00"}} {{else}} Never {{end}} + + {{if $probeInfo.TimeSinceLastStart}} + {{$probeInfo.TimeSinceLastStart.String}} ago
+ {{$probeInfo.Start.Format "2006-01-02T15:04:05Z07:00"}} + {{else}} + Never + {{end}} + - {{if $probeInfo.Result}} - {{$probeInfo.Result}} + {{if $probeInfo.Error}} + {{$probeInfo.Status}} {{else}} - {{$probeInfo.Result}} + {{$probeInfo.Status}} {{end}}
-
Recent: {{$probeInfo.RecentResults}}
-
Mean: {{$probeInfo.RecentSuccessRatio}}
+ {{if not $probeInfo.Continuous}} +
Recent: {{$probeInfo.RecentResults}}
+
Mean: {{$probeInfo.RecentSuccessRatio}}
+ {{end}} - {{$probeInfo.Latency.String}} -
Recent: {{$probeInfo.RecentLatencies}}
-
Median: {{$probeInfo.RecentMedianLatency}}
+ {{if $probeInfo.Continuous}} + n/a + {{else}} + {{$probeInfo.Latency.String}} +
Recent: {{$probeInfo.RecentLatencies}}
+
Median: {{$probeInfo.RecentMedianLatency}}
+ {{end}} {{$probeInfo.Error}} From 2d4edd80f11b6263a4ef23c4e39032af00279b5a Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 19 Dec 2024 13:11:25 -0800 Subject: [PATCH 0241/1708] cmd/systray: add extra padding around notification icon Some notification managers crop the application icon to a circle, so ensure we have enough padding to account for that. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/logo.go | 8 +++++++- cmd/systray/systray.go | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/systray/logo.go b/cmd/systray/logo.go index cd79c94a0..ef8caca66 100644 --- a/cmd/systray/logo.go +++ b/cmd/systray/logo.go @@ -128,8 +128,14 @@ var ( // render returns a PNG image of the logo. func (logo tsLogo) render() *bytes.Buffer { - const radius = 25 const borderUnits = 1 + return logo.renderWithBorder(borderUnits) +} + +// renderWithBorder returns a PNG image of the logo with the specified border width. +// One border unit is equal to the radius of a tailscale logo dot. +func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { + const radius = 25 dim := radius * (8 + borderUnits*2) dc := gg.NewContext(dim, dim) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index d175b55f3..a3cd19c64 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -62,7 +62,7 @@ func onReady() { // dbus wants a file path for notification icons, so copy to a temp file. appIcon, _ = os.CreateTemp("", "tailscale-systray.png") - io.Copy(appIcon, connected.render()) + io.Copy(appIcon, connected.renderWithBorder(3)) chState = make(chan ipn.State, 1) From e8f1721147eb8d709232363e67cf96878adf8c9c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Tue, 17 Dec 2024 12:22:44 -0800 Subject: [PATCH 0242/1708] syncs: add ShardedInt expvar.Var type ShardedInt provides an int type expvar.Var that supports more efficient writes at high frequencies (one order of magnigude on an M1 Max, much more on NUMA systems). There are two implementations of ShardValue, one that abuses sync.Pool that will work on current public Go versions, and one that takes a dependency on a runtime.TailscaleP function exposed in Tailscale's Go fork. The sync.Pool variant has about 10x the throughput of a single atomic integer on an M1 Max, and the runtime.TailscaleP variant is about 10x faster than the sync.Pool variant. Neither variant have perfect distribution, or perfectly always avoid cross-CPU sharing, as there is no locking or affinity to ensure that the time of yield is on the same core as the time of core biasing, but in the average case the distributions are enough to provide substantially better performance. See golang/go#18802 for a related upstream proposal. Updates tailscale/go#109 Updates tailscale/corp#25450 Signed-off-by: James Tucker --- go.toolchain.rev | 2 +- syncs/shardedint.go | 69 ++++++++++++++++++++ syncs/shardedint_test.go | 119 ++++++++++++++++++++++++++++++++++ syncs/shardvalue.go | 36 ++++++++++ syncs/shardvalue_go.go | 36 ++++++++++ syncs/shardvalue_tailscale.go | 24 +++++++ syncs/shardvalue_test.go | 119 ++++++++++++++++++++++++++++++++++ 7 files changed, 404 insertions(+), 1 deletion(-) create mode 100644 syncs/shardedint.go create mode 100644 syncs/shardedint_test.go create mode 100644 syncs/shardvalue.go create mode 100644 syncs/shardvalue_go.go create mode 100644 syncs/shardvalue_tailscale.go create mode 100644 syncs/shardvalue_test.go diff --git a/go.toolchain.rev b/go.toolchain.rev index 7be85deb6..e90440d41 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -e005697288a8d2fadc87bb7c3e2c74778d08554a +161c3b79ed91039e65eb148f2547dea6b91e2247 diff --git a/syncs/shardedint.go b/syncs/shardedint.go new file mode 100644 index 000000000..28c4168d5 --- /dev/null +++ b/syncs/shardedint.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package syncs + +import ( + "encoding/json" + "strconv" + "sync/atomic" + + "golang.org/x/sys/cpu" +) + +// ShardedInt provides a sharded atomic int64 value that optimizes high +// frequency (Mhz range and above) writes in highly parallel workloads. +// The zero value is not safe for use; use [NewShardedInt]. +// ShardedInt implements the expvar.Var interface. +type ShardedInt struct { + sv *ShardValue[intShard] +} + +// NewShardedInt returns a new [ShardedInt]. +func NewShardedInt() *ShardedInt { + return &ShardedInt{ + sv: NewShardValue[intShard](), + } +} + +// Add adds delta to the value. +func (m *ShardedInt) Add(delta int64) { + m.sv.One(func(v *intShard) { + v.Add(delta) + }) +} + +type intShard struct { + atomic.Int64 + _ cpu.CacheLinePad // avoid false sharing of neighboring shards +} + +// Value returns the current value. +func (m *ShardedInt) Value() int64 { + var v int64 + for s := range m.sv.All { + v += s.Load() + } + return v +} + +// GetDistribution returns the current value in each shard. +// This is intended for observability/debugging only. +func (m *ShardedInt) GetDistribution() []int64 { + v := make([]int64, 0, m.sv.Len()) + for s := range m.sv.All { + v = append(v, s.Load()) + } + return v +} + +// String implements the expvar.Var interface +func (m *ShardedInt) String() string { + v, _ := json.Marshal(m.Value()) + return string(v) +} + +// AppendText implements the encoding.TextAppender interface +func (m *ShardedInt) AppendText(b []byte) ([]byte, error) { + return strconv.AppendInt(b, m.Value(), 10), nil +} diff --git a/syncs/shardedint_test.go b/syncs/shardedint_test.go new file mode 100644 index 000000000..d355a1540 --- /dev/null +++ b/syncs/shardedint_test.go @@ -0,0 +1,119 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package syncs + +import ( + "expvar" + "sync" + "testing" + + "tailscale.com/tstest" +) + +var ( + _ expvar.Var = (*ShardedInt)(nil) + // TODO(raggi): future go version: + // _ encoding.TextAppender = (*ShardedInt)(nil) +) + +func BenchmarkShardedInt(b *testing.B) { + b.ReportAllocs() + + b.Run("expvar", func(b *testing.B) { + var m expvar.Int + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + m.Add(1) + } + }) + }) + + b.Run("sharded int", func(b *testing.B) { + m := NewShardedInt() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + m.Add(1) + } + }) + }) +} + +func TestShardedInt(t *testing.T) { + t.Run("basics", func(t *testing.T) { + m := NewShardedInt() + if got, want := m.Value(), int64(0); got != want { + t.Errorf("got %v, want %v", got, want) + } + m.Add(1) + if got, want := m.Value(), int64(1); got != want { + t.Errorf("got %v, want %v", got, want) + } + m.Add(2) + if got, want := m.Value(), int64(3); got != want { + t.Errorf("got %v, want %v", got, want) + } + m.Add(-1) + if got, want := m.Value(), int64(2); got != want { + t.Errorf("got %v, want %v", got, want) + } + }) + + t.Run("high concurrency", func(t *testing.T) { + m := NewShardedInt() + wg := sync.WaitGroup{} + numWorkers := 1000 + numIncrements := 1000 + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go func() { + defer wg.Done() + for i := 0; i < numIncrements; i++ { + m.Add(1) + } + }() + } + wg.Wait() + if got, want := m.Value(), int64(numWorkers*numIncrements); got != want { + t.Errorf("got %v, want %v", got, want) + } + for i, shard := range m.GetDistribution() { + t.Logf("shard %d: %d", i, shard) + } + }) + + t.Run("encoding.TextAppender", func(t *testing.T) { + m := NewShardedInt() + m.Add(1) + b := make([]byte, 0, 10) + b, err := m.AppendText(b) + if err != nil { + t.Fatal(err) + } + if got, want := string(b), "1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + }) + + t.Run("allocs", func(t *testing.T) { + m := NewShardedInt() + tstest.MinAllocsPerRun(t, 0, func() { + m.Add(1) + _ = m.Value() + }) + + // TODO(raggi): fix access to expvar's internal append based + // interface, unfortunately it's not currently closed for external + // use, this will alloc when it escapes. + tstest.MinAllocsPerRun(t, 0, func() { + m.Add(1) + _ = m.String() + }) + + b := make([]byte, 0, 10) + tstest.MinAllocsPerRun(t, 0, func() { + m.Add(1) + m.AppendText(b) + }) + }) +} diff --git a/syncs/shardvalue.go b/syncs/shardvalue.go new file mode 100644 index 000000000..b1474477c --- /dev/null +++ b/syncs/shardvalue.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package syncs + +// TODO(raggi): this implementation is still imperfect as it will still result +// in cross CPU sharing periodically, we instead really want a per-CPU shard +// key, but the limitations of calling platform code make reaching for even the +// getcpu vdso very painful. See https://github.com/golang/go/issues/18802, and +// hopefully one day we can replace with a primitive that falls out of that +// work. + +// ShardValue contains a value sharded over a set of shards. +// In order to be useful, T should be aligned to cache lines. +// Users must organize that usage in One and All is concurrency safe. +// The zero value is not safe for use; use [NewShardValue]. +type ShardValue[T any] struct { + shards []T + + //lint:ignore U1000 unused under tailscale_go builds. + pool shardValuePool +} + +// Len returns the number of shards. +func (sp *ShardValue[T]) Len() int { + return len(sp.shards) +} + +// All yields a pointer to the value in each shard. +func (sp *ShardValue[T]) All(yield func(*T) bool) { + for i := range sp.shards { + if !yield(&sp.shards[i]) { + return + } + } +} diff --git a/syncs/shardvalue_go.go b/syncs/shardvalue_go.go new file mode 100644 index 000000000..9b9d252a7 --- /dev/null +++ b/syncs/shardvalue_go.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !tailscale_go + +package syncs + +import ( + "runtime" + "sync" + "sync/atomic" +) + +type shardValuePool struct { + atomic.Int64 + sync.Pool +} + +// NewShardValue constructs a new ShardValue[T] with a shard per CPU. +func NewShardValue[T any]() *ShardValue[T] { + sp := &ShardValue[T]{ + shards: make([]T, runtime.NumCPU()), + } + sp.pool.New = func() any { + i := sp.pool.Add(1) - 1 + return &sp.shards[i%int64(len(sp.shards))] + } + return sp +} + +// One yields a pointer to a single shard value with best-effort P-locality. +func (sp *ShardValue[T]) One(yield func(*T)) { + v := sp.pool.Get().(*T) + yield(v) + sp.pool.Put(v) +} diff --git a/syncs/shardvalue_tailscale.go b/syncs/shardvalue_tailscale.go new file mode 100644 index 000000000..8ef778ff3 --- /dev/null +++ b/syncs/shardvalue_tailscale.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// TODO(raggi): update build tag after toolchain update +//go:build tailscale_go + +package syncs + +import ( + "runtime" +) + +//lint:ignore U1000 unused under tailscale_go builds. +type shardValuePool struct{} + +// NewShardValue constructs a new ShardValue[T] with a shard per CPU. +func NewShardValue[T any]() *ShardValue[T] { + return &ShardValue[T]{shards: make([]T, runtime.NumCPU())} +} + +// One yields a pointer to a single shard value with best-effort P-locality. +func (sp *ShardValue[T]) One(f func(*T)) { + f(&sp.shards[runtime.TailscaleCurrentP()%len(sp.shards)]) +} diff --git a/syncs/shardvalue_test.go b/syncs/shardvalue_test.go new file mode 100644 index 000000000..8f6ac6414 --- /dev/null +++ b/syncs/shardvalue_test.go @@ -0,0 +1,119 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package syncs + +import ( + "math" + "runtime" + "sync" + "sync/atomic" + "testing" + + "golang.org/x/sys/cpu" +) + +func TestShardValue(t *testing.T) { + type intVal struct { + atomic.Int64 + _ cpu.CacheLinePad + } + + t.Run("One", func(t *testing.T) { + sv := NewShardValue[intVal]() + sv.One(func(v *intVal) { + v.Store(10) + }) + + var v int64 + for i := range sv.shards { + v += sv.shards[i].Load() + } + if v != 10 { + t.Errorf("got %v, want 10", v) + } + }) + + t.Run("All", func(t *testing.T) { + sv := NewShardValue[intVal]() + for i := range sv.shards { + sv.shards[i].Store(int64(i)) + } + + var total int64 + sv.All(func(v *intVal) bool { + total += v.Load() + return true + }) + // triangle coefficient lower one order due to 0 index + want := int64(len(sv.shards) * (len(sv.shards) - 1) / 2) + if total != want { + t.Errorf("got %v, want %v", total, want) + } + }) + + t.Run("Len", func(t *testing.T) { + sv := NewShardValue[intVal]() + if got, want := sv.Len(), runtime.NumCPU(); got != want { + t.Errorf("got %v, want %v", got, want) + } + }) + + t.Run("distribution", func(t *testing.T) { + sv := NewShardValue[intVal]() + + goroutines := 1000 + iterations := 10000 + var wg sync.WaitGroup + wg.Add(goroutines) + for i := 0; i < goroutines; i++ { + go func() { + defer wg.Done() + for i := 0; i < iterations; i++ { + sv.One(func(v *intVal) { + v.Add(1) + }) + } + }() + } + wg.Wait() + + var ( + total int64 + distribution []int64 + ) + t.Logf("distribution:") + sv.All(func(v *intVal) bool { + total += v.Load() + distribution = append(distribution, v.Load()) + t.Logf("%d", v.Load()) + return true + }) + + if got, want := total, int64(goroutines*iterations); got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := len(distribution), runtime.NumCPU(); got != want { + t.Errorf("got %v, want %v", got, want) + } + + mean := total / int64(len(distribution)) + for _, v := range distribution { + if v < mean/10 || v > mean*10 { + t.Logf("distribution is very unbalanced: %v", distribution) + } + } + t.Logf("mean: %d", mean) + + var standardDev int64 + for _, v := range distribution { + standardDev += ((v - mean) * (v - mean)) + } + standardDev = int64(math.Sqrt(float64(standardDev / int64(len(distribution))))) + t.Logf("stdev: %d", standardDev) + + if standardDev > mean/3 { + t.Logf("standard deviation is too high: %v", standardDev) + } + }) +} From 89adcd853dab6463d437e3a023b90704b66f3a3f Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 19 Dec 2024 11:31:31 -0800 Subject: [PATCH 0243/1708] cmd/systray: improve profile menu Bring UI closer to macOS and windows: - split login and tailnet name over separate lines - render profile picture (with very simple caching) - use checkbox to indicate active profile. I've not found any desktops that can't render checkboxes, so I'd like to explore other options if needed. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 55 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index a3cd19c64..504ca5b8c 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "log" + "net/http" "os" "strings" "sync" @@ -118,10 +119,11 @@ func (menu *Menu) rebuild(state state) { systray.AddSeparator() account := "Account" - if state.curProfile.Name != "" { - account += fmt.Sprintf(" (%s)", state.curProfile.Name) + if pt := profileTitle(state.curProfile); pt != "" { + account = pt } accounts := systray.AddMenuItem(account, "") + setRemoteIcon(accounts, state.curProfile.UserProfile.ProfilePicURL) // The dbus message about this menu item must propagate to the receiving // end before we attach any submenu items. Otherwise the receiver may not // yet record the parent menu item and error out. @@ -132,13 +134,14 @@ func (menu *Menu) rebuild(state state) { // Aggregate all clicks into a shared channel. menu.accountsCh = make(chan ipn.ProfileID) for _, profile := range state.allProfiles { - title := fmt.Sprintf("%s (%s)", profile.Name, profile.NetworkProfile.DomainName) - // Note: we could use AddSubMenuItemCheckbox instead of this formatting - // hack, but checkboxes don't work across all desktops unfortunately. + title := profileTitle(profile) + var item *systray.MenuItem if profile.ID == state.curProfile.ID { - title = "* " + title + item = accounts.AddSubMenuItemCheckbox(title, "", true) + } else { + item = accounts.AddSubMenuItem(title, "") } - item := accounts.AddSubMenuItem(title, "") + setRemoteIcon(item, profile.UserProfile.ProfilePicURL) go func(profile ipn.LoginProfile) { for { select { @@ -170,6 +173,44 @@ func (menu *Menu) rebuild(state state) { go menu.eventLoop(ctx) } +// profileTitle returns the title string for a profile menu item. +func profileTitle(profile ipn.LoginProfile) string { + title := profile.Name + if profile.NetworkProfile.DomainName != "" { + title += "\n" + profile.NetworkProfile.DomainName + } + return title +} + +var ( + cacheMu sync.Mutex + httpCache = map[string][]byte{} // URL => response body +) + +// setRemoteIcon sets the icon for menu to the specified remote image. +// Remote images are fetched as needed and cached. +func setRemoteIcon(menu *systray.MenuItem, urlStr string) { + if menu == nil || urlStr == "" { + return + } + + cacheMu.Lock() + b, ok := httpCache[urlStr] + if !ok { + resp, err := http.Get(urlStr) + if err == nil && resp.StatusCode == http.StatusOK { + b, _ = io.ReadAll(resp.Body) + httpCache[urlStr] = b + resp.Body.Close() + } + } + cacheMu.Unlock() + + if len(b) > 0 { + menu.SetIcon(b) + } +} + // eventLoop is the main event loop for handling click events on menu items // and responding to Tailscale state changes. // This method does not return until ctx.Done is closed. From 3adad364f137b072ef0342bf51aa23c4647908ba Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 20 Dec 2024 16:12:56 +0000 Subject: [PATCH 0244/1708] cmd/k8s-operator,k8s-operator: include top-level CRD descriptions (#14435) When reading https://doc.crds.dev/github.com/tailscale/tailscale/tailscale.com/ProxyGroup/v1alpha1@v1.78.3 I noticed there is no top-level description for ProxyGroup and Recorder. Add one to give some high-level direction. Updates #cleanup Change-Id: I3666c5445be272ea5a1d4d02b6d5ad4c23afb09f Signed-off-by: Tom Proctor --- .../deploy/crds/tailscale.com_proxygroups.yaml | 11 +++++++++++ .../deploy/crds/tailscale.com_recorders.yaml | 6 ++++++ cmd/k8s-operator/deploy/manifests/operator.yaml | 17 +++++++++++++++++ k8s-operator/api.md | 13 +++++++++++++ k8s-operator/apis/v1alpha1/types_proxygroup.go | 10 ++++++++++ k8s-operator/apis/v1alpha1/types_recorder.go | 5 +++++ 6 files changed, 62 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 66701bdf4..5e6b53785 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -23,6 +23,17 @@ spec: name: v1alpha1 schema: openAPIV3Schema: + description: |- + ProxyGroup defines a set of Tailscale devices that will act as proxies. + Currently only egress ProxyGroups are supported. + + Use the tailscale.com/proxy-group annotation on a Service to specify that + the egress proxy should be implemented by a ProxyGroup instead of a single + dedicated proxy. In addition to running a highly available set of proxies, + ProxyGroup also allows for serving many annotated Services from a single + set of proxies to minimise resource consumption. + + More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress type: object required: - spec diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index fda8bcebd..5b22297d8 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -27,6 +27,12 @@ spec: name: v1alpha1 schema: openAPIV3Schema: + description: |- + Recorder defines a tsrecorder device for recording SSH sessions. By default, + it will store recordings in a local ephemeral volume. If you want to persist + recordings, you can configure an S3-compatible API for storage. + + More info: https://tailscale.com/kb/1484/kubernetes-operator-deploying-tsrecorder type: object required: - spec diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 210a7b434..dd34c2a1e 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2724,6 +2724,17 @@ spec: name: v1alpha1 schema: openAPIV3Schema: + description: |- + ProxyGroup defines a set of Tailscale devices that will act as proxies. + Currently only egress ProxyGroups are supported. + + Use the tailscale.com/proxy-group annotation on a Service to specify that + the egress proxy should be implemented by a ProxyGroup instead of a single + dedicated proxy. In addition to running a highly available set of proxies, + ProxyGroup also allows for serving many annotated Services from a single + set of proxies to minimise resource consumption. + + More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress properties: apiVersion: description: |- @@ -2916,6 +2927,12 @@ spec: name: v1alpha1 schema: openAPIV3Schema: + description: |- + Recorder defines a tsrecorder device for recording SSH sessions. By default, + it will store recordings in a local ephemeral volume. If you want to persist + recordings, you can configure an S3-compatible API for storage. + + More info: https://tailscale.com/kb/1484/kubernetes-operator-deploying-tsrecorder properties: apiVersion: description: |- diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 08e1284fe..327f95ea9 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -508,7 +508,16 @@ _Appears in:_ +ProxyGroup defines a set of Tailscale devices that will act as proxies. +Currently only egress ProxyGroups are supported. +Use the tailscale.com/proxy-group annotation on a Service to specify that +the egress proxy should be implemented by a ProxyGroup instead of a single +dedicated proxy. In addition to running a highly available set of proxies, +ProxyGroup also allows for serving many annotated Services from a single +set of proxies to minimise resource consumption. + +More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress @@ -602,7 +611,11 @@ _Appears in:_ +Recorder defines a tsrecorder device for recording SSH sessions. By default, +it will store recordings in a local ephemeral volume. If you want to persist +recordings, you can configure an S3-compatible API for storage. +More info: https://tailscale.com/kb/1484/kubernetes-operator-deploying-tsrecorder diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 7e5515ba9..e7397f33e 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -14,6 +14,16 @@ import ( // +kubebuilder:resource:scope=Cluster,shortName=pg // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." +// ProxyGroup defines a set of Tailscale devices that will act as proxies. +// Currently only egress ProxyGroups are supported. +// +// Use the tailscale.com/proxy-group annotation on a Service to specify that +// the egress proxy should be implemented by a ProxyGroup instead of a single +// dedicated proxy. In addition to running a highly available set of proxies, +// ProxyGroup also allows for serving many annotated Services from a single +// set of proxies to minimise resource consumption. +// +// More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress type ProxyGroup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index 3728154b4..a32b8eb93 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -16,6 +16,11 @@ import ( // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "RecorderReady")].reason`,description="Status of the deployed Recorder resources." // +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.devices[?(@.url != "")].url`,description="URL on which the UI is exposed if enabled." +// Recorder defines a tsrecorder device for recording SSH sessions. By default, +// it will store recordings in a local ephemeral volume. If you want to persist +// recordings, you can configure an S3-compatible API for storage. +// +// More info: https://tailscale.com/kb/1484/kubernetes-operator-deploying-tsrecorder type Recorder struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` From 5095efd62831110e65cad79740bf302492756f1e Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 20 Dec 2024 08:07:54 -0600 Subject: [PATCH 0245/1708] prober: make histogram buckets cumulative Histogram buckets should include counts for all values under the bucket ceiling, not just those between the ceiling and the next lower ceiling. See https://prometheus.io/docs/tutorials/understanding_metric_types/\#histogram Updates tailscale/corp#24522 Signed-off-by: Percy Wegmann --- prober/histogram.go | 1 - prober/histogram_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/prober/histogram.go b/prober/histogram.go index e9005b452..c544a5f79 100644 --- a/prober/histogram.go +++ b/prober/histogram.go @@ -45,6 +45,5 @@ func (h *histogram) add(v float64) { continue } h.bucketedCounts[b] += 1 - break } } diff --git a/prober/histogram_test.go b/prober/histogram_test.go index a569167e6..dbb5eda67 100644 --- a/prober/histogram_test.go +++ b/prober/histogram_test.go @@ -23,7 +23,7 @@ func TestHistogram(t *testing.T) { if diff := cmp.Diff(h.sum, 7.5); diff != "" { t.Errorf("wrong sum; (-got+want):%v", diff) } - if diff := cmp.Diff(h.bucketedCounts, map[float64]uint64{1: 2, 2: 2}); diff != "" { + if diff := cmp.Diff(h.bucketedCounts, map[float64]uint64{1: 2, 2: 4}); diff != "" { t.Errorf("wrong bucketedCounts; (-got+want):%v", diff) } } From 256da8dfb5fc30ff8ac6405ef66bbc1880e01e30 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 20 Dec 2024 09:11:04 -0800 Subject: [PATCH 0246/1708] cmd/systray: remove new menu delay on KDE The new menu delay added to fix libdbusmenu systrays causes problems with KDE. Given the state of wildly varying systray implementations, I suspect we may need more desktop-specific hacks, so I'm setting this up to accommodate that. Updates #1708 Updates #14431 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 504ca5b8c..5b20ddde4 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -14,6 +14,7 @@ import ( "log" "net/http" "os" + "runtime" "strings" "sync" "time" @@ -32,6 +33,10 @@ var ( chState chan ipn.State // tailscale state changes appIcon *os.File + + // newMenuDelay is the amount of time to sleep after creating a new menu, + // but before adding items to it. This works around a bug in some dbus implementations. + newMenuDelay time.Duration ) func main() { @@ -55,6 +60,30 @@ type Menu struct { eventCancel func() // cancel eventLoop } +func init() { + if runtime.GOOS != "linux" { + // so far, these tweaks are only needed on Linux + return + } + + desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP")) + switch desktop { + case "kde": + // KDE doesn't need a delay, and actually won't render submenus + // if we delay for more than about 400µs. + newMenuDelay = 0 + default: + // Add a slight delay to ensure the menu is created before adding items. + // + // Systray implementations that use libdbusmenu sometimes process messages out of order, + // resulting in errors such as: + // (waybar:153009): LIBDBUSMENU-GTK-WARNING **: 18:07:11.551: Children but no menu, someone's been naughty with their 'children-display' property: 'submenu' + // + // See also: https://github.com/fyne-io/systray/issues/12 + newMenuDelay = 100 * time.Millisecond + } +} + func onReady() { log.Printf("starting") ctx := context.Background() @@ -124,13 +153,7 @@ func (menu *Menu) rebuild(state state) { } accounts := systray.AddMenuItem(account, "") setRemoteIcon(accounts, state.curProfile.UserProfile.ProfilePicURL) - // The dbus message about this menu item must propagate to the receiving - // end before we attach any submenu items. Otherwise the receiver may not - // yet record the parent menu item and error out. - // - // On waybar with libdbusmenu-gtk, this manifests as the following warning: - // (waybar:153009): LIBDBUSMENU-GTK-WARNING **: 18:07:11.551: Children but no menu, someone's been naughty with their 'children-display' property: 'submenu' - time.Sleep(100 * time.Millisecond) + time.Sleep(newMenuDelay) // Aggregate all clicks into a shared channel. menu.accountsCh = make(chan ipn.ProfileID) for _, profile := range state.allProfiles { From 887472312d7b896e8d59d235b68257e2bb7ea317 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 20 Dec 2024 15:57:46 -0500 Subject: [PATCH 0247/1708] tailcfg: rename and retype ServiceHost capability (#14380) * tailcfg: rename and retype ServiceHost capability, add value type Updates tailscale/corp#22743. In #14046, this was accidentally made a PeerCapability when it should have been NodeCapability. Also, renaming it to use the nomenclature that we decided on after #14046 went up, and adding the type of the value that will be passed down in the RawMessage for this capability. This shouldn't break anything, since no one was using this string or variable yet. Signed-off-by: Naman Sood --- tailcfg/tailcfg.go | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index be6c4f0be..ad07cff28 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1453,11 +1453,6 @@ const ( // user groups as Kubernetes user groups. This capability is read by // peers that are Tailscale Kubernetes operator instances. PeerCapabilityKubernetes PeerCapability = "tailscale.com/cap/kubernetes" - - // PeerCapabilityServicesDestination grants a peer the ability to serve as - // a destination for a set of given VIP services, which is provided as the - // value of this key in NodeCapMap. - PeerCapabilityServicesDestination PeerCapability = "tailscale.com/cap/services-destination" ) // NodeCapMap is a map of capabilities to their optional values. It is valid for @@ -2401,6 +2396,15 @@ const ( // NodeAttrSSHEnvironmentVariables enables logic for handling environment variables sent // via SendEnv in the SSH server and applying them to the SSH session. NodeAttrSSHEnvironmentVariables NodeCapability = "ssh-env-vars" + + // NodeAttrServiceHost indicates the VIP Services for which the client is + // approved to act as a service host, and which IP addresses are assigned + // to those VIP Services. Any VIP Services that the client is not + // advertising can be ignored. + // Each value of this key in [NodeCapMap] is of type [ServiceIPMappings]. + // If multiple values of this key exist, they should be merged in sequence + // (replace conflicting keys). + NodeAttrServiceHost NodeCapability = "service-host" ) // SetDNSRequest is a request to add a DNS record. @@ -2883,3 +2887,21 @@ type EarlyNoise struct { // For some request types, the header may have multiple values. (e.g. OldNodeKey // vs NodeKey) const LBHeader = "Ts-Lb" + +// ServiceIPMappings maps service names (strings that conform to +// [CheckServiceName]) to lists of IP addresses. This is used as the value of +// the [NodeAttrServiceHost] capability, to inform service hosts what IP +// addresses they need to listen on for each service that they are advertising. +// +// This is of the form: +// +// { +// "svc:samba": ["100.65.32.1", "fd7a:115c:a1e0::1234"], +// "svc:web": ["100.102.42.3", "fd7a:115c:a1e0::abcd"], +// } +// +// where the IP addresses are the IPs of the VIP services. These IPs are also +// provided in AllowedIPs, but this lets the client know which services +// correspond to those IPs. Any services that don't correspond to a service +// this client is hosting can be ignored. +type ServiceIPMappings map[string][]netip.Addr From cb59943501b068911eecd490119d3ca8f98c7129 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 20 Dec 2024 15:37:00 -0800 Subject: [PATCH 0248/1708] cmd/systray: add exit nodes menu This commit builds the exit node menu including the recommended exit node, if available, as well as tailnet and mullvad exit nodes. This does not yet update the menu based on changes in exit node outside of the systray app, which will come later. This also does not include the ability to run as an exit node. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 255 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 251 insertions(+), 4 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 5b20ddde4..1334a0351 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -7,14 +7,17 @@ package main import ( + "cmp" "context" "errors" "fmt" "io" "log" + "maps" "net/http" "os" "runtime" + "slices" "strings" "sync" "time" @@ -26,6 +29,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" ) var ( @@ -51,11 +55,13 @@ type Menu struct { connect *systray.MenuItem disconnect *systray.MenuItem - self *systray.MenuItem - more *systray.MenuItem - quit *systray.MenuItem + self *systray.MenuItem + more *systray.MenuItem + exitNodes *systray.MenuItem + quit *systray.MenuItem accountsCh chan ipn.ProfileID + exitNodeCh chan tailcfg.StableNodeID // ID of selected exit node eventCancel func() // cancel eventLoop } @@ -80,7 +86,7 @@ func init() { // (waybar:153009): LIBDBUSMENU-GTK-WARNING **: 18:07:11.551: Children but no menu, someone's been naughty with their 'children-display' property: 'submenu' // // See also: https://github.com/fyne-io/systray/issues/12 - newMenuDelay = 100 * time.Millisecond + newMenuDelay = 10 * time.Millisecond } } @@ -187,6 +193,9 @@ func (menu *Menu) rebuild(state state) { } systray.AddSeparator() + menu.exitNodeCh = make(chan tailcfg.StableNodeID) + menu.rebuildExitNodeMenu(ctx) + menu.more = systray.AddMenuItem("More settings", "") menu.more.Enable() @@ -295,6 +304,26 @@ func (menu *Menu) eventLoop(ctx context.Context) { log.Printf("failed switching to profile ID %v: %v", id, err) } + case exitNode := <-menu.exitNodeCh: + if exitNode.IsZero() { + log.Print("disable exit node") + if err := localClient.SetUseExitNode(ctx, false); err != nil { + log.Printf("failed disabling exit node: %v", err) + } + } else { + log.Printf("enable exit node: %v", exitNode) + mp := &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode, + }, + ExitNodeIDSet: true, + } + if _, err := localClient.EditPrefs(ctx, mp); err != nil { + log.Printf("failed setting exit node: %v", err) + } + } + menu.rebuild(fetchState(ctx)) + case <-menu.quit.ClickedCh: systray.Quit() } @@ -375,6 +404,224 @@ func sendNotification(title, content string) { } } +func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { + status := menu.status + menu.exitNodes = systray.AddMenuItem("Exit Nodes", "") + time.Sleep(newMenuDelay) + + // register a click handler for a menu item to set nodeID as the exit node. + onClick := func(item *systray.MenuItem, nodeID tailcfg.StableNodeID) { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-item.ClickedCh: + select { + case <-ctx.Done(): + return + case menu.exitNodeCh <- nodeID: + } + } + } + }() + } + + noExitNodeMenu := menu.exitNodes.AddSubMenuItemCheckbox("None", "", status.ExitNodeStatus == nil) + onClick(noExitNodeMenu, "") + + // Show recommended exit node if available. + if status.Self.CapMap.Contains(tailcfg.NodeAttrSuggestExitNodeUI) { + sugg, err := localClient.SuggestExitNode(ctx) + if err == nil { + title := "Recommended: " + if loc := sugg.Location; loc.Valid() && loc.Country() != "" { + flag := countryFlag(loc.CountryCode()) + title += fmt.Sprintf("%s %s: %s", flag, loc.Country(), loc.City()) + } else { + title += strings.Split(sugg.Name, ".")[0] + } + menu.exitNodes.AddSeparator() + rm := menu.exitNodes.AddSubMenuItemCheckbox(title, "", false) + onClick(rm, sugg.ID) + if status.ExitNodeStatus != nil && sugg.ID == status.ExitNodeStatus.ID { + rm.Check() + } + } + } + + // Add tailnet exit nodes if present. + var tailnetExitNodes []*ipnstate.PeerStatus + for _, ps := range status.Peer { + if ps.ExitNodeOption && ps.Location == nil { + tailnetExitNodes = append(tailnetExitNodes, ps) + } + } + if len(tailnetExitNodes) > 0 { + menu.exitNodes.AddSeparator() + menu.exitNodes.AddSubMenuItem("Tailnet Exit Nodes", "").Disable() + for _, ps := range status.Peer { + if !ps.ExitNodeOption || ps.Location != nil { + continue + } + name := strings.Split(ps.DNSName, ".")[0] + if !ps.Online { + name += " (offline)" + } + sm := menu.exitNodes.AddSubMenuItemCheckbox(name, "", false) + if !ps.Online { + sm.Disable() + } + if status.ExitNodeStatus != nil && ps.ID == status.ExitNodeStatus.ID { + sm.Check() + } + onClick(sm, ps.ID) + } + } + + // Add mullvad exit nodes if present. + var mullvadExitNodes mullvadPeers + if status.Self.CapMap.Contains("mullvad") { + mullvadExitNodes = newMullvadPeers(status) + } + if len(mullvadExitNodes.countries) > 0 { + menu.exitNodes.AddSeparator() + menu.exitNodes.AddSubMenuItem("Location-based Exit Nodes", "").Disable() + mullvadMenu := menu.exitNodes.AddSubMenuItemCheckbox("Mullvad VPN", "", false) + + for _, country := range mullvadExitNodes.sortedCountries() { + flag := countryFlag(country.code) + countryMenu := mullvadMenu.AddSubMenuItemCheckbox(flag+" "+country.name, "", false) + + // single-city country, no submenu + if len(country.cities) == 1 { + onClick(countryMenu, country.best.ID) + if status.ExitNodeStatus != nil { + for _, city := range country.cities { + for _, ps := range city.peers { + if status.ExitNodeStatus.ID == ps.ID { + mullvadMenu.Check() + countryMenu.Check() + } + } + } + } + continue + } + + // multi-city country, build submenu with "best available" option and cities. + time.Sleep(newMenuDelay) + bm := countryMenu.AddSubMenuItemCheckbox("Best Available", "", false) + onClick(bm, country.best.ID) + countryMenu.AddSeparator() + + for _, city := range country.sortedCities() { + cityMenu := countryMenu.AddSubMenuItemCheckbox(city.name, "", false) + onClick(cityMenu, city.best.ID) + if status.ExitNodeStatus != nil { + for _, ps := range city.peers { + if status.ExitNodeStatus.ID == ps.ID { + mullvadMenu.Check() + countryMenu.Check() + cityMenu.Check() + } + } + } + } + } + } + + // TODO: "Allow Local Network Access" and "Run Exit Node" menu items +} + +// mullvadPeers contains all mullvad peer nodes, sorted by country and city. +type mullvadPeers struct { + countries map[string]*mvCountry // country code (uppercase) => country +} + +// sortedCountries returns countries containing mullvad nodes, sorted by name. +func (mp mullvadPeers) sortedCountries() []*mvCountry { + countries := slices.Collect(maps.Values(mp.countries)) + slices.SortFunc(countries, func(a, b *mvCountry) int { + return cmp.Compare(a.name, b.name) + }) + return countries +} + +type mvCountry struct { + code string + name string + best *ipnstate.PeerStatus // highest priority peer in the country + cities map[string]*mvCity // city code => city +} + +// sortedCities returns cities containing mullvad nodes, sorted by name. +func (mc *mvCountry) sortedCities() []*mvCity { + cities := slices.Collect(maps.Values(mc.cities)) + slices.SortFunc(cities, func(a, b *mvCity) int { + return cmp.Compare(a.name, b.name) + }) + return cities +} + +// countryFlag takes a 2-character ASCII string and returns the corresponding emoji flag. +// It returns the empty string on error. +func countryFlag(code string) string { + if len(code) != 2 { + return "" + } + runes := make([]rune, 0, 2) + for i := range 2 { + b := code[i] | 32 // lowercase + if b < 'a' || b > 'z' { + return "" + } + // https://en.wikipedia.org/wiki/Regional_indicator_symbol + runes = append(runes, 0x1F1E6+rune(b-'a')) + } + return string(runes) +} + +type mvCity struct { + name string + best *ipnstate.PeerStatus // highest priority peer in the city + peers []*ipnstate.PeerStatus +} + +func newMullvadPeers(status *ipnstate.Status) mullvadPeers { + countries := make(map[string]*mvCountry) + for _, ps := range status.Peer { + if !ps.ExitNodeOption || ps.Location == nil { + continue + } + loc := ps.Location + country, ok := countries[loc.CountryCode] + if !ok { + country = &mvCountry{ + code: loc.CountryCode, + name: loc.Country, + cities: make(map[string]*mvCity), + } + countries[loc.CountryCode] = country + } + city, ok := countries[loc.CountryCode].cities[loc.CityCode] + if !ok { + city = &mvCity{ + name: loc.City, + } + countries[loc.CountryCode].cities[loc.CityCode] = city + } + city.peers = append(city.peers, ps) + if city.best == nil || ps.Location.Priority > city.best.Location.Priority { + city.best = ps + } + if country.best == nil || ps.Location.Priority > country.best.Location.Priority { + country.best = ps + } + } + return mullvadPeers{countries} +} + func onExit() { log.Printf("exiting") os.Remove(appIcon.Name()) From 10d4057a64e5ad8c5e7f1bc786f61eeb76cc158f Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 20 Dec 2024 17:32:10 -0800 Subject: [PATCH 0249/1708] cmd/systray: add visual workarounds for gnome, mac, and windows Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 1334a0351..26316feeb 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -41,6 +41,10 @@ var ( // newMenuDelay is the amount of time to sleep after creating a new menu, // but before adding items to it. This works around a bug in some dbus implementations. newMenuDelay time.Duration + + // if true, treat all mullvad exit node countries as single-city. + // Instead of rendering a submenu with cities, just select the highest-priority peer. + hideMullvadCities bool ) func main() { @@ -74,6 +78,12 @@ func init() { desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP")) switch desktop { + case "gnome": + // GNOME expands submenus downward in the main menu, rather than flyouts to the side. + // Either as a result of that or another limitation, there seems to be a maximum depth of submenus. + // Mullvad countries that have a city submenu are not being rendered, and so can't be selected. + // Handle this by simply treating all mullvad countries as single-city and select the best peer. + hideMullvadCities = true case "kde": // KDE doesn't need a delay, and actually won't render submenus // if we delay for more than about 400µs. @@ -209,7 +219,12 @@ func (menu *Menu) rebuild(state state) { func profileTitle(profile ipn.LoginProfile) string { title := profile.Name if profile.NetworkProfile.DomainName != "" { - title += "\n" + profile.NetworkProfile.DomainName + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // windows and mac don't support multi-line menu + title += " (" + profile.NetworkProfile.DomainName + ")" + } else { + title += "\n" + profile.NetworkProfile.DomainName + } } return title } @@ -494,7 +509,7 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { countryMenu := mullvadMenu.AddSubMenuItemCheckbox(flag+" "+country.name, "", false) // single-city country, no submenu - if len(country.cities) == 1 { + if len(country.cities) == 1 || hideMullvadCities { onClick(countryMenu, country.best.ID) if status.ExitNodeStatus != nil { for _, city := range country.cities { From 8d4ea4d90c76502bb6c15a6e6140f7d51de4c787 Mon Sep 17 00:00:00 2001 From: Jason Barnett Date: Thu, 11 Jan 2024 17:36:12 -0700 Subject: [PATCH 0250/1708] wgengine/router: add ip rules for unifi udm-pro Fixes: #4038 Signed-off-by: Jason Barnett --- version/distro/distro.go | 46 ++++++++++++++++++++++++ wgengine/router/router_linux.go | 52 ++++++++++++++++++++++------ wgengine/router/router_linux_test.go | 22 ++++++++++++ 3 files changed, 110 insertions(+), 10 deletions(-) diff --git a/version/distro/distro.go b/version/distro/distro.go index ce61137cf..8128ce395 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -9,6 +9,7 @@ import ( "os" "runtime" "strconv" + "strings" "tailscale.com/types/lazy" "tailscale.com/util/lineiter" @@ -30,6 +31,7 @@ const ( WDMyCloud = Distro("wdmycloud") Unraid = Distro("unraid") Alpine = Distro("alpine") + UDMPro = Distro("udmpro") ) var distro lazy.SyncValue[Distro] @@ -75,6 +77,9 @@ func linuxDistro() Distro { case have("/usr/local/bin/freenas-debug"): // TrueNAS Scale runs on debian return TrueNAS + case isUDMPro(): + // UDM-Pro runs on debian + return UDMPro case have("/etc/debian_version"): return Debian case have("/etc/arch-release"): @@ -147,3 +152,44 @@ func DSMVersion() int { return 0 }) } + +// isUDMPro checks a couple of files known to exist on a UDM-Pro and returns +// true if the expected content exists in the files. +func isUDMPro() bool { + // This is a performance guardrail against trying to load both + // /etc/board.info and /sys/firmware/devicetree/base/soc/board-cfg/id when + // not running on Debian so we don't make unnecessary calls in situations + // where we definitely are NOT on a UDM Pro. In other words, the have() call + // is much cheaper than the two os.ReadFile() in fileContainsAnyString(). + // That said, on Debian systems we will still be making the two + // os.ReadFile() in fileContainsAnyString(). + if !have("/etc/debian_version") { + return false + } + if exists, err := fileContainsAnyString("/etc/board.info", "UDMPRO", "Dream Machine PRO"); err == nil && exists { + return true + } + if exists, err := fileContainsAnyString("/sys/firmware/devicetree/base/soc/board-cfg/id", "udm pro"); err == nil && exists { + return true + } + return false +} + +// fileContainsAnyString is used to determine if one or more of the provided +// strings exists in a file. This is not efficient for larger files. If you want +// to use this function to parse large files, please refactor to use +// `io.LimitedReader`. +func fileContainsAnyString(filePath string, searchStrings ...string) (bool, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return false, err + } + + content := string(data) + for _, searchString := range searchStrings { + if strings.Contains(content, searchString) { + return true, nil + } + } + return false, nil +} diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index 2af73e26d..e154a30fa 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -32,6 +32,8 @@ import ( "tailscale.com/version/distro" ) +var getDistroFunc = distro.Get + const ( netfilterOff = preftype.NetfilterOff netfilterNoDivert = preftype.NetfilterNoDivert @@ -222,7 +224,7 @@ func busyboxParseVersion(output string) (major, minor, patch int, err error) { } func useAmbientCaps() bool { - if distro.Get() != distro.Synology { + if getDistroFunc() != distro.Synology { return false } return distro.DSMVersion() >= 7 @@ -438,7 +440,7 @@ func (r *linuxRouter) Set(cfg *Config) error { // Issue 11405: enable IP forwarding on gokrazy. advertisingRoutes := len(cfg.SubnetRoutes) > 0 - if distro.Get() == distro.Gokrazy && advertisingRoutes { + if getDistroFunc() == distro.Gokrazy && advertisingRoutes { r.enableIPForwarding() } @@ -1181,7 +1183,9 @@ var ( tailscaleRouteTable = newRouteTable("tailscale", 52) ) -// ipRules are the policy routing rules that Tailscale uses. +// baseIPRules are the policy routing rules that Tailscale uses, when not +// running on a UDM-Pro. +// // The priority is the value represented here added to r.ipPolicyPrefBase, // which is usually 5200. // @@ -1196,7 +1200,7 @@ var ( // and 'ip rule' implementations (including busybox), don't support // checking for the lack of a fwmark, only the presence. The technique // below works even on very old kernels. -var ipRules = []netlink.Rule{ +var baseIPRules = []netlink.Rule{ // Packets from us, tagged with our fwmark, first try the kernel's // main routing table. { @@ -1232,6 +1236,34 @@ var ipRules = []netlink.Rule{ // usual rules (pref 32766 and 32767, ie. main and default). } +// udmProIPRules are the policy routing rules that Tailscale uses, when running +// on a UDM-Pro. +// +// The priority is the value represented here added to +// r.ipPolicyPrefBase, which is usually 5200. +// +// This represents an experiment that will be used to gather more information. +// If this goes well, Tailscale may opt to use this for all of Linux. +var udmProIPRules = []netlink.Rule{ + // non-fwmark packets fall through to the usual rules (pref 32766 and 32767, + // ie. main and default). + { + Priority: 70, + Invert: true, + Mark: linuxfw.TailscaleBypassMarkNum, + Table: tailscaleRouteTable.Num, + }, +} + +// ipRules returns the appropriate list of ip rules to be used by Tailscale. See +// comments on baseIPRules and udmProIPRules for more details. +func ipRules() []netlink.Rule { + if getDistroFunc() == distro.UDMPro { + return udmProIPRules + } + return baseIPRules +} + // justAddIPRules adds policy routing rule without deleting any first. func (r *linuxRouter) justAddIPRules() error { if !r.ipRuleAvailable { @@ -1243,7 +1275,7 @@ func (r *linuxRouter) justAddIPRules() error { var errAcc error for _, family := range r.addrFamilies() { - for _, ru := range ipRules { + for _, ru := range ipRules() { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() if ru.Mark != 0 { @@ -1272,7 +1304,7 @@ func (r *linuxRouter) addIPRulesWithIPCommand() error { rg := newRunGroup(nil, r.cmd) for _, family := range r.addrFamilies() { - for _, rule := range ipRules { + for _, rule := range ipRules() { args := []string{ "ip", family.dashArg(), "rule", "add", @@ -1320,7 +1352,7 @@ func (r *linuxRouter) delIPRules() error { } var errAcc error for _, family := range r.addrFamilies() { - for _, ru := range ipRules { + for _, ru := range ipRules() { // Note: r is a value type here; safe to mutate it. // When deleting rules, we want to be a bit specific (mention which // table we were routing to) but not *too* specific (fwmarks, etc). @@ -1363,7 +1395,7 @@ func (r *linuxRouter) delIPRulesWithIPCommand() error { // That leaves us some flexibility to change these values in later // versions without having ongoing hacks for every possible // combination. - for _, rule := range ipRules { + for _, rule := range ipRules() { args := []string{ "ip", family.dashArg(), "rule", "del", @@ -1500,7 +1532,7 @@ func normalizeCIDR(cidr netip.Prefix) string { // platformCanNetfilter reports whether the current distro/environment supports // running iptables/nftables commands. func platformCanNetfilter() bool { - switch distro.Get() { + switch getDistroFunc() { case distro.Synology: // Synology doesn't support iptables or nftables. Attempting to run it // just blocks for a long time while it logs about failures. @@ -1526,7 +1558,7 @@ func cleanUp(logf logger.Logf, interfaceName string) { // of the config file being present as well as a policy rule with a specific // priority (2000 + 1 - first interface mwan3 manages) and non-zero mark. func checkOpenWRTUsingMWAN3() (bool, error) { - if distro.Get() != distro.OpenWrt { + if getDistroFunc() != distro.OpenWrt { return false, nil } diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index dce69550d..7718f17c4 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/linuxfw" + "tailscale.com/version/distro" ) func TestRouterStates(t *testing.T) { @@ -1231,3 +1232,24 @@ func adjustFwmask(t *testing.T, s string) string { return fwmaskAdjustRe.ReplaceAllString(s, "$1") } + +func TestIPRulesForUDMPro(t *testing.T) { + // Override the global getDistroFunc + getDistroFunc = func() distro.Distro { + return distro.UDMPro + } + defer func() { getDistroFunc = distro.Get }() // Restore original after the test + + expected := udmProIPRules + actual := ipRules() + + if len(expected) != len(actual) { + t.Fatalf("Expected %d rules, got %d", len(expected), len(actual)) + } + + for i, rule := range expected { + if rule != actual[i] { + t.Errorf("Rule mismatch at index %d: expected %+v, got %+v", i, rule, actual[i]) + } + } +} From c4f9f955ab281db35178a81b40c32c2828044f8f Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 23 Dec 2024 12:53:54 +0000 Subject: [PATCH 0251/1708] scripts/installer.sh: add support for PikaOS (#14461) Fixes #14460 Signed-off-by: Erisa A --- scripts/installer.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index d2971978e..6e530fefe 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -165,6 +165,19 @@ main() { VERSION="bullseye" fi ;; + pika) + PACKAGETYPE="apt" + # All versions of PikaOS are new enough to prefer keyring + APT_KEY_TYPE="keyring" + # Older versions of PikaOS are based on Ubuntu rather than Debian + if [ "$VERSION_ID" -lt 4 ]; then + OS="ubuntu" + VERSION="$UBUNTU_CODENAME" + else + OS="debian" + VERSION="$DEBIAN_CODENAME" + fi + ;; centos) OS="$ID" VERSION="$VERSION_ID" From 4267d0fc5b3ee065132711ab705f2fae76256906 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 23 Dec 2024 14:48:35 +0000 Subject: [PATCH 0252/1708] .github: update matrix of installer.sh tests (#14462) Remove EOL Ubuntu versions. Add new Ubuntu LTS. Update Alpine to test latest version. Also, make the test run when its workflow is updated and installer.sh isn't. Updates #cleanup Signed-off-by: Erisa A --- .github/workflows/installer.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 48b29c6ec..1c39e4d74 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -6,11 +6,13 @@ on: - "main" paths: - scripts/installer.sh + - .github/workflows/installer.yml pull_request: branches: - "*" paths: - scripts/installer.sh + - .github/workflows/installer.yml jobs: test: @@ -29,10 +31,9 @@ jobs: - "debian:stable-slim" - "debian:testing-slim" - "debian:sid-slim" - - "ubuntu:18.04" - "ubuntu:20.04" - "ubuntu:22.04" - - "ubuntu:23.04" + - "ubuntu:24.04" - "elementary/docker:stable" - "elementary/docker:unstable" - "parrotsec/core:lts-amd64" @@ -48,7 +49,7 @@ jobs: - "opensuse/leap:latest" - "opensuse/tumbleweed:latest" - "archlinux:latest" - - "alpine:3.14" + - "alpine:3.21" - "alpine:latest" - "alpine:edge" deps: @@ -58,10 +59,6 @@ jobs: # Check a few images with wget rather than curl. - { image: "debian:oldstable-slim", deps: "wget" } - { image: "debian:sid-slim", deps: "wget" } - - { image: "ubuntu:23.04", deps: "wget" } - # Ubuntu 16.04 also needs apt-transport-https installed. - - { image: "ubuntu:16.04", deps: "curl apt-transport-https" } - - { image: "ubuntu:16.04", deps: "wget apt-transport-https" } runs-on: ubuntu-latest container: image: ${{ matrix.image }} From 9e2819b5d4c00e3b10802b8197b112dcb02b327a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 22 Dec 2024 20:38:20 -0800 Subject: [PATCH 0253/1708] util/stringsx: add package for extra string functions, like CompareFold Noted as useful during review of #14448. Updates #14457 Change-Id: I0f16f08d5b05a8e9044b19ef6c02d3dab497f131 Signed-off-by: Brad Fitzpatrick --- util/stringsx/stringsx.go | 52 +++++++++++++++++++++++ util/stringsx/stringsx_test.go | 78 ++++++++++++++++++++++++++++++++++ 2 files changed, 130 insertions(+) create mode 100644 util/stringsx/stringsx.go create mode 100644 util/stringsx/stringsx_test.go diff --git a/util/stringsx/stringsx.go b/util/stringsx/stringsx.go new file mode 100644 index 000000000..6c7a8d20d --- /dev/null +++ b/util/stringsx/stringsx.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package stringsx provides additional string manipulation functions +// that aren't in the standard library's strings package or go4.org/mem. +package stringsx + +import ( + "unicode" + "unicode/utf8" +) + +// CompareFold returns -1, 0, or 1 depending on whether a < b, a == b, or a > b, +// like cmp.Compare, but case insensitively. +func CompareFold(a, b string) int { + // Track our position in both strings + ia, ib := 0, 0 + for ia < len(a) && ib < len(b) { + ra, wa := nextRuneLower(a[ia:]) + rb, wb := nextRuneLower(b[ib:]) + if ra < rb { + return -1 + } + if ra > rb { + return 1 + } + ia += wa + ib += wb + if wa == 0 || wb == 0 { + break + } + } + + // If we've reached here, one or both strings are exhausted + // The shorter string is "less than" if they match up to this point + switch { + case ia == len(a) && ib == len(b): + return 0 + case ia == len(a): + return -1 + default: + return 1 + } +} + +// nextRuneLower returns the next rune in the string, lowercased, along with its +// original (consumed) width in bytes. If the string is empty, it returns +// (utf8.RuneError, 0) +func nextRuneLower(s string) (r rune, width int) { + r, width = utf8.DecodeRuneInString(s) + return unicode.ToLower(r), width +} diff --git a/util/stringsx/stringsx_test.go b/util/stringsx/stringsx_test.go new file mode 100644 index 000000000..8575c0b27 --- /dev/null +++ b/util/stringsx/stringsx_test.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package stringsx + +import ( + "cmp" + "strings" + "testing" +) + +func TestCompareFold(t *testing.T) { + tests := []struct { + a, b string + }{ + // Basic ASCII cases + {"", ""}, + {"a", "a"}, + {"a", "A"}, + {"A", "a"}, + {"a", "b"}, + {"b", "a"}, + {"abc", "ABC"}, + {"ABC", "abc"}, + {"abc", "abd"}, + {"abd", "abc"}, + + // Length differences + {"abc", "ab"}, + {"ab", "abc"}, + + // Unicode cases + {"世界", "世界"}, + {"Hello世界", "hello世界"}, + {"世界Hello", "世界hello"}, + {"世界", "世界x"}, + {"世界x", "世界"}, + + // Special case folding examples + {"ß", "ss"}, // German sharp s + {"fi", "fi"}, // fi ligature + {"Σ", "σ"}, // Greek sigma + {"İ", "i\u0307"}, // Turkish dotted I + + // Mixed cases + {"HelloWorld", "helloworld"}, + {"HELLOWORLD", "helloworld"}, + {"helloworld", "HELLOWORLD"}, + {"HelloWorld", "helloworld"}, + {"helloworld", "HelloWorld"}, + + // Edge cases + {" ", " "}, + {"1", "1"}, + {"123", "123"}, + {"!@#", "!@#"}, + } + + wants := []int{} + for _, tt := range tests { + got := CompareFold(tt.a, tt.b) + want := cmp.Compare(strings.ToLower(tt.a), strings.ToLower(tt.b)) + if got != want { + t.Errorf("CompareFold(%q, %q) = %v, want %v", tt.a, tt.b, got, want) + } + wants = append(wants, want) + } + + if n := testing.AllocsPerRun(1000, func() { + for i, tt := range tests { + if CompareFold(tt.a, tt.b) != wants[i] { + panic("unexpected") + } + } + }); n > 0 { + t.Errorf("allocs = %v; want 0", int(n)) + } +} From 76ca1adc64ed97699968e09e0c3cfcfe80a66b9c Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 23 Dec 2024 16:47:55 +0000 Subject: [PATCH 0254/1708] scripts/installer.sh: accept different capitalisation of deepin (#14463) Newer Deepin Linux versions use `deepin` as their ID, older ones used `Deepin`. Fixes #13570 Signed-off-by: Erisa A --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 6e530fefe..c42ff03ea 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -154,7 +154,7 @@ main() { APT_KEY_TYPE="keyring" fi ;; - Deepin) # https://github.com/tailscale/tailscale/issues/7862 + Deepin|deepin) # https://github.com/tailscale/tailscale/issues/7862 OS="debian" PACKAGETYPE="apt" if [ "$VERSION_ID" -lt 20 ]; then From 3837b6cebc913a201e29b9e49abf3ea0e86cb0c0 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Sat, 21 Dec 2024 15:58:26 -0800 Subject: [PATCH 0255/1708] cmd/systray: rebuild menu on pref change, assorted other fixes - rebuild menu when prefs change outside of systray, such as setting an exit node - refactor onClick handler code - compare lowercase country name, the same as macOS and Windows (now sorts Ukraine before USA) - fix "connected / disconnected" menu items on stopped status - prevent nil pointer on "This Device" menu item Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 91 ++++++++++++++++++++++++------------------ 1 file changed, 52 insertions(+), 39 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 26316feeb..8a4ee08fd 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -7,7 +7,6 @@ package main import ( - "cmp" "context" "errors" "fmt" @@ -30,12 +29,15 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/util/stringsx" ) var ( localClient tailscale.LocalClient chState chan ipn.State // tailscale state changes + chRebuild chan struct{} // triggers a menu rebuild + appIcon *os.File // newMenuDelay is the amount of time to sleep after creating a new menu, @@ -111,6 +113,7 @@ func onReady() { io.Copy(appIcon, connected.renderWithBorder(3)) chState = make(chan ipn.State, 1) + chRebuild = make(chan struct{}, 1) menu := new(Menu) menu.rebuild(fetchState(ctx)) @@ -146,6 +149,10 @@ func fetchState(ctx context.Context) state { // You cannot iterate over the items in a menu, nor can you remove some items like separators. // So for now we rebuild the whole thing, and can optimize this later if needed. func (menu *Menu) rebuild(state state) { + if state.status == nil { + return + } + menu.mu.Lock() defer menu.mu.Unlock() @@ -181,25 +188,20 @@ func (menu *Menu) rebuild(state state) { item = accounts.AddSubMenuItem(title, "") } setRemoteIcon(item, profile.UserProfile.ProfilePicURL) - go func(profile ipn.LoginProfile) { - for { - select { - case <-ctx.Done(): - return - case <-item.ClickedCh: - select { - case <-ctx.Done(): - return - case menu.accountsCh <- profile.ID: - } - } + onClick(ctx, item, func(ctx context.Context) { + select { + case <-ctx.Done(): + case menu.accountsCh <- profile.ID: } - }(profile) + }) } - if state.status != nil && state.status.Self != nil { + if state.status != nil && state.status.Self != nil && len(state.status.Self.TailscaleIPs) > 0 { title := fmt.Sprintf("This Device: %s (%s)", state.status.Self.HostName, state.status.Self.TailscaleIPs[0]) menu.self = systray.AddMenuItem(title, "") + } else { + menu.self = systray.AddMenuItem("This Device: not connected", "") + menu.self.Disable() } systray.AddSeparator() @@ -266,6 +268,8 @@ func (menu *Menu) eventLoop(ctx context.Context) { select { case <-ctx.Done(): return + case <-chRebuild: + menu.rebuild(fetchState(ctx)) case state := <-chState: switch state { case ipn.Running: @@ -277,10 +281,11 @@ func (menu *Menu) eventLoop(ctx context.Context) { menu.disconnect.Show() menu.disconnect.Enable() case ipn.NoState, ipn.Stopped: + setAppIcon(disconnected) + menu.rebuild(fetchState(ctx)) menu.connect.SetTitle("Connect") menu.connect.Enable() menu.disconnect.Hide() - setAppIcon(disconnected) case ipn.Starting: setAppIcon(loading) } @@ -337,7 +342,6 @@ func (menu *Menu) eventLoop(ctx context.Context) { log.Printf("failed setting exit node: %v", err) } } - menu.rebuild(fetchState(ctx)) case <-menu.quit.ClickedCh: systray.Quit() @@ -345,6 +349,20 @@ func (menu *Menu) eventLoop(ctx context.Context) { } } +// onClick registers a click handler for a menu item. +func onClick(ctx context.Context, item *systray.MenuItem, fn func(ctx context.Context)) { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-item.ClickedCh: + fn(ctx) + } + } + }() +} + // watchIPNBus subscribes to the tailscale event bus and sends state updates to chState. // This method does not return. func watchIPNBus(ctx context.Context) { @@ -383,6 +401,9 @@ func watchIPNBusInner(ctx context.Context) error { chState <- *n.State log.Printf("new state: %v", n.State) } + if n.Prefs != nil { + chRebuild <- struct{}{} + } } } } @@ -425,25 +446,17 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { time.Sleep(newMenuDelay) // register a click handler for a menu item to set nodeID as the exit node. - onClick := func(item *systray.MenuItem, nodeID tailcfg.StableNodeID) { - go func() { - for { - select { - case <-ctx.Done(): - return - case <-item.ClickedCh: - select { - case <-ctx.Done(): - return - case menu.exitNodeCh <- nodeID: - } - } + setExitNodeOnClick := func(item *systray.MenuItem, nodeID tailcfg.StableNodeID) { + onClick(ctx, item, func(ctx context.Context) { + select { + case <-ctx.Done(): + case menu.exitNodeCh <- nodeID: } - }() + }) } noExitNodeMenu := menu.exitNodes.AddSubMenuItemCheckbox("None", "", status.ExitNodeStatus == nil) - onClick(noExitNodeMenu, "") + setExitNodeOnClick(noExitNodeMenu, "") // Show recommended exit node if available. if status.Self.CapMap.Contains(tailcfg.NodeAttrSuggestExitNodeUI) { @@ -458,7 +471,7 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { } menu.exitNodes.AddSeparator() rm := menu.exitNodes.AddSubMenuItemCheckbox(title, "", false) - onClick(rm, sugg.ID) + setExitNodeOnClick(rm, sugg.ID) if status.ExitNodeStatus != nil && sugg.ID == status.ExitNodeStatus.ID { rm.Check() } @@ -490,7 +503,7 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { if status.ExitNodeStatus != nil && ps.ID == status.ExitNodeStatus.ID { sm.Check() } - onClick(sm, ps.ID) + setExitNodeOnClick(sm, ps.ID) } } @@ -510,7 +523,7 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { // single-city country, no submenu if len(country.cities) == 1 || hideMullvadCities { - onClick(countryMenu, country.best.ID) + setExitNodeOnClick(countryMenu, country.best.ID) if status.ExitNodeStatus != nil { for _, city := range country.cities { for _, ps := range city.peers { @@ -527,12 +540,12 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { // multi-city country, build submenu with "best available" option and cities. time.Sleep(newMenuDelay) bm := countryMenu.AddSubMenuItemCheckbox("Best Available", "", false) - onClick(bm, country.best.ID) + setExitNodeOnClick(bm, country.best.ID) countryMenu.AddSeparator() for _, city := range country.sortedCities() { cityMenu := countryMenu.AddSubMenuItemCheckbox(city.name, "", false) - onClick(cityMenu, city.best.ID) + setExitNodeOnClick(cityMenu, city.best.ID) if status.ExitNodeStatus != nil { for _, ps := range city.peers { if status.ExitNodeStatus.ID == ps.ID { @@ -558,7 +571,7 @@ type mullvadPeers struct { func (mp mullvadPeers) sortedCountries() []*mvCountry { countries := slices.Collect(maps.Values(mp.countries)) slices.SortFunc(countries, func(a, b *mvCountry) int { - return cmp.Compare(a.name, b.name) + return stringsx.CompareFold(a.name, b.name) }) return countries } @@ -574,7 +587,7 @@ type mvCountry struct { func (mc *mvCountry) sortedCities() []*mvCity { cities := slices.Collect(maps.Values(mc.cities)) slices.SortFunc(cities, func(a, b *mvCity) int { - return cmp.Compare(a.name, b.name) + return stringsx.CompareFold(a.name, b.name) }) return cities } From 72b278937bfe429f4e1427a40d85eace4973feea Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 23 Dec 2024 17:53:06 +0000 Subject: [PATCH 0256/1708] scripts/installer.sh: allow CachyOS for Arch packages (#14464) Fixes #13955 Signed-off-by: Erisa A --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index c42ff03ea..8d1fc0212 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -237,7 +237,7 @@ main() { VERSION="leap/15.4" PACKAGETYPE="zypper" ;; - arch|archarm|endeavouros|blendos|garuda|archcraft) + arch|archarm|endeavouros|blendos|garuda|archcraft|cachyos) OS="arch" VERSION="" # rolling release PACKAGETYPE="pacman" From 68b12a74ed2379f85fa30ffe10b974cf6062c8fc Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 19 Dec 2024 16:32:40 -0800 Subject: [PATCH 0257/1708] metrics,syncs: add ShardedInt support to metrics.LabelMap metrics.LabelMap grows slightly more heavy, needing a lock to ensure proper ordering for newly initialized ShardedInt values. An Add method enables callers to use .Add for both expvar.Int and syncs.ShardedInt values, but retains the original behavior of defaulting to initializing expvar.Int values. Updates tailscale/corp#25450 Co-Authored-By: Andrew Dunham Signed-off-by: James Tucker --- cmd/stund/depaware.txt | 2 ++ metrics/metrics.go | 35 +++++++++++++++++++++++++++++++++++ metrics/metrics_test.go | 9 +++++++++ 3 files changed, 46 insertions(+) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 34a71c43e..9599f6a01 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -55,6 +55,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb + tailscale.com/syncs from tailscale.com/metrics tailscale.com/tailcfg from tailscale.com/version tailscale.com/tsweb from tailscale.com/cmd/stund tailscale.com/tsweb/promvarz from tailscale.com/tsweb @@ -74,6 +75,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/tailcfg tailscale.com/util/lineiter from tailscale.com/version/distro + tailscale.com/util/mak from tailscale.com/syncs tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/slicesx from tailscale.com/tailcfg diff --git a/metrics/metrics.go b/metrics/metrics.go index a07ddccae..d1b1c06c9 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -11,6 +11,9 @@ import ( "io" "slices" "strings" + "sync" + + "tailscale.com/syncs" ) // Set is a string-to-Var map variable that satisfies the expvar.Var @@ -37,6 +40,8 @@ type Set struct { type LabelMap struct { Label string expvar.Map + // shardedIntMu orders the initialization of new shardedint keys + shardedIntMu sync.Mutex } // SetInt64 sets the *Int value stored under the given map key. @@ -44,6 +49,19 @@ func (m *LabelMap) SetInt64(key string, v int64) { m.Get(key).Set(v) } +// Add adds delta to the any int-like value stored under the given map key. +func (m *LabelMap) Add(key string, delta int64) { + type intAdder interface { + Add(delta int64) + } + o := m.Map.Get(key) + if o == nil { + m.Map.Add(key, delta) + return + } + o.(intAdder).Add(delta) +} + // Get returns a direct pointer to the expvar.Int for key, creating it // if necessary. func (m *LabelMap) Get(key string) *expvar.Int { @@ -51,6 +69,23 @@ func (m *LabelMap) Get(key string) *expvar.Int { return m.Map.Get(key).(*expvar.Int) } +// GetShardedInt returns a direct pointer to the syncs.ShardedInt for key, +// creating it if necessary. +func (m *LabelMap) GetShardedInt(key string) *syncs.ShardedInt { + i := m.Map.Get(key) + if i == nil { + m.shardedIntMu.Lock() + defer m.shardedIntMu.Unlock() + i = m.Map.Get(key) + if i != nil { + return i.(*syncs.ShardedInt) + } + i = syncs.NewShardedInt() + m.Set(key, i) + } + return i.(*syncs.ShardedInt) +} + // GetIncrFunc returns a function that increments the expvar.Int named by key. // // Most callers should not need this; it exists to satisfy an diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index 45bf39e56..a808d5a73 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -21,6 +21,15 @@ func TestLabelMap(t *testing.T) { if g, w := m.Get("bar").Value(), int64(2); g != w { t.Errorf("bar = %v; want %v", g, w) } + m.GetShardedInt("sharded").Add(5) + if g, w := m.GetShardedInt("sharded").Value(), int64(5); g != w { + t.Errorf("sharded = %v; want %v", g, w) + } + m.Add("sharded", 1) + if g, w := m.GetShardedInt("sharded").Value(), int64(6); g != w { + t.Errorf("sharded = %v; want %v", g, w) + } + m.Add("neverbefore", 1) } func TestCurrentFileDescriptors(t *testing.T) { From 2bdbe5b2ab7825666378a275b4c4429c0c39f8b6 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 23 Dec 2024 13:35:27 -0800 Subject: [PATCH 0258/1708] cmd/systray: add icons for exit node online and offline restructure tsLogo to allow setting a mask to be used when drawing the logo dots, as well as add an overlay icon, such as the arrow when connected to an exit node. The icon is still renders as white on black, but this change also prepare for doing a black on white version, as well a fully transparent icon. I don't know if we can consistently determine which to use, so this just keeps the single icon for now. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/logo.go | 177 +++++++++++++++++++++++++++++++++----------- 1 file changed, 133 insertions(+), 44 deletions(-) diff --git a/cmd/systray/logo.go b/cmd/systray/logo.go index ef8caca66..13fd4c564 100644 --- a/cmd/systray/logo.go +++ b/cmd/systray/logo.go @@ -8,6 +8,7 @@ package main import ( "bytes" "context" + "image" "image/color" "image/png" "sync" @@ -17,113 +18,190 @@ import ( "github.com/fogleman/gg" ) -// tsLogo represents the state of the 3x3 dot grid in the Tailscale logo. -// A 0 represents a gray dot, any other value is a white dot. -type tsLogo [9]byte +// tsLogo represents the Tailscale logo displayed as the systray icon. +type tsLogo struct { + // dots represents the state of the 3x3 dot grid in the logo. + // A 0 represents a gray dot, any other value is a white dot. + dots [9]byte + + // dotMask returns an image mask to be used when rendering the logo dots. + dotMask func(dc *gg.Context, borderUnits int, radius int) *image.Alpha + + // overlay is called after the dots are rendered to draw an additional overlay. + overlay func(dc *gg.Context, borderUnits int, radius int) +} var ( // disconnected is all gray dots - disconnected = tsLogo{ + disconnected = tsLogo{dots: [9]byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, - } + }} // connected is the normal Tailscale logo - connected = tsLogo{ + connected = tsLogo{dots: [9]byte{ 0, 0, 0, 1, 1, 1, 0, 1, 0, - } + }} // loading is a special tsLogo value that is not meant to be rendered directly, // but indicates that the loading animation should be shown. - loading = tsLogo{'l', 'o', 'a', 'd', 'i', 'n', 'g'} + loading = tsLogo{dots: [9]byte{'l', 'o', 'a', 'd', 'i', 'n', 'g'}} // loadingIcons are shown in sequence as an animated loading icon. loadingLogos = []tsLogo{ - { + {dots: [9]byte{ 0, 1, 1, 1, 0, 1, 0, 0, 1, - }, - { + }}, + {dots: [9]byte{ 0, 1, 1, 0, 0, 1, 0, 1, 0, - }, - { + }}, + {dots: [9]byte{ 0, 1, 1, 0, 0, 0, 0, 0, 1, - }, - { + }}, + {dots: [9]byte{ 0, 0, 1, 0, 1, 0, 0, 0, 0, - }, - { + }}, + {dots: [9]byte{ 0, 1, 0, 0, 0, 0, 0, 0, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 0, 0, 1, 0, 0, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 1, 0, 0, 0, 0, 0, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 0, 0, 0, 1, 0, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 0, 0, 0, 1, 1, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 1, 0, 0, 1, 1, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 1, 1, 0, 0, 1, 0, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 1, 1, 0, 0, 1, 1, - }, - { + }}, + {dots: [9]byte{ 0, 0, 0, 1, 1, 1, 0, 0, 1, - }, - { + }}, + {dots: [9]byte{ 0, 1, 0, 0, 1, 1, 1, 0, 1, + }}, + } + + // exitNodeOnline is the Tailscale logo with an additional arrow overlay in the corner. + exitNodeOnline = tsLogo{ + dots: [9]byte{ + 0, 0, 0, + 1, 1, 1, + 0, 1, 0, + }, + dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha { + bu, r := float64(borderUnits), float64(radius) + + x1 := r * (bu + 3.5) + y := r * (bu + 7) + x2 := x1 + (r * 5) + + mc := gg.NewContext(dc.Width(), dc.Height()) + mc.DrawLine(x1, y, x2, y) + mc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) + mc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) + mc.SetLineWidth(r * 3) + mc.Stroke() + return mc.AsMask() + }, + overlay: func(dc *gg.Context, borderUnits int, radius int) { + bu, r := float64(borderUnits), float64(radius) + + x1 := r * (bu + 3.5) + y := r * (bu + 7) + x2 := x1 + (r * 5) + + dc.DrawLine(x1, y, x2, y) + dc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) + dc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) + dc.SetColor(fg) + dc.SetLineWidth(r) + dc.Stroke() + }, + } + + // exitNodeOffline is the Tailscale logo with a red "x" in the corner. + exitNodeOffline = tsLogo{ + dots: [9]byte{ + 0, 0, 0, + 1, 1, 1, + 0, 1, 0, + }, + dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha { + bu, r := float64(borderUnits), float64(radius) + x := r * (bu + 3) + + mc := gg.NewContext(dc.Width(), dc.Height()) + mc.DrawRectangle(x, x, r*6, r*6) + mc.Fill() + return mc.AsMask() + }, + overlay: func(dc *gg.Context, borderUnits int, radius int) { + bu, r := float64(borderUnits), float64(radius) + + x1 := r * (bu + 4) + x2 := x1 + (r * 3.5) + dc.DrawLine(x1, x1, x2, x2) + dc.DrawLine(x1, x2, x2, x1) + dc.SetColor(red) + dc.SetLineWidth(r) + dc.Stroke() }, } ) var ( - black = color.NRGBA{0, 0, 0, 255} - white = color.NRGBA{255, 255, 255, 255} - gray = color.NRGBA{255, 255, 255, 102} + bg = color.NRGBA{0, 0, 0, 255} + fg = color.NRGBA{255, 255, 255, 255} + gray = color.NRGBA{255, 255, 255, 102} + red = color.NRGBA{229, 111, 74, 255} ) // render returns a PNG image of the logo. @@ -140,15 +218,21 @@ func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { dc := gg.NewContext(dim, dim) dc.DrawRectangle(0, 0, float64(dim), float64(dim)) - dc.SetColor(black) + dc.SetColor(bg) dc.Fill() + if logo.dotMask != nil { + mask := logo.dotMask(dc, borderUnits, radius) + dc.SetMask(mask) + dc.InvertMask() + } + for y := 0; y < 3; y++ { for x := 0; x < 3; x++ { px := (borderUnits + 1 + 3*x) * radius py := (borderUnits + 1 + 3*y) * radius - col := white - if logo[y*3+x] == 0 { + col := fg + if logo.dots[y*3+x] == 0 { col = gray } dc.DrawCircle(float64(px), float64(py), radius) @@ -157,6 +241,11 @@ func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { } } + if logo.overlay != nil { + dc.ResetClip() + logo.overlay(dc, borderUnits, radius) + } + b := bytes.NewBuffer(nil) png.Encode(b, dc.Image()) return b @@ -164,7 +253,7 @@ func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { // setAppIcon renders logo and sets it as the systray icon. func setAppIcon(icon tsLogo) { - if icon == loading { + if icon.dots == loading.dots { startLoadingAnimation() } else { stopLoadingAnimation() From 86f273d930df52440641ef2397f0f7ebca648d7c Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 23 Dec 2024 13:38:09 -0800 Subject: [PATCH 0259/1708] cmd/systray: set app icon and title consistently Refactor code to set app icon and title as part of rebuild, rather than separately in eventLoop. This fixes several cases where they weren't getting updated properly. This change also makes use of the new exit node icons. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/logo.go | 20 ++++++++----- cmd/systray/systray.go | 67 ++++++++++++++++++++++++------------------ 2 files changed, 50 insertions(+), 37 deletions(-) diff --git a/cmd/systray/logo.go b/cmd/systray/logo.go index 13fd4c564..de60bcdbd 100644 --- a/cmd/systray/logo.go +++ b/cmd/systray/logo.go @@ -136,6 +136,7 @@ var ( 1, 1, 1, 0, 1, 0, }, + // draw an arrow mask in the bottom right corner with a reasonably thick line width. dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha { bu, r := float64(borderUnits), float64(radius) @@ -144,13 +145,14 @@ var ( x2 := x1 + (r * 5) mc := gg.NewContext(dc.Width(), dc.Height()) - mc.DrawLine(x1, y, x2, y) - mc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) - mc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) + mc.DrawLine(x1, y, x2, y) // arrow center line + mc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) // top of arrow tip + mc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) // bottom of arrow tip mc.SetLineWidth(r * 3) mc.Stroke() return mc.AsMask() }, + // draw an arrow in the bottom right corner over the masked area. overlay: func(dc *gg.Context, borderUnits int, radius int) { bu, r := float64(borderUnits), float64(radius) @@ -158,9 +160,9 @@ var ( y := r * (bu + 7) x2 := x1 + (r * 5) - dc.DrawLine(x1, y, x2, y) - dc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) - dc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) + dc.DrawLine(x1, y, x2, y) // arrow center line + dc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) // top of arrow tip + dc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) // bottom of arrow tip dc.SetColor(fg) dc.SetLineWidth(r) dc.Stroke() @@ -174,6 +176,7 @@ var ( 1, 1, 1, 0, 1, 0, }, + // Draw a square that hides the four dots in the bottom right corner, dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha { bu, r := float64(borderUnits), float64(radius) x := r * (bu + 3) @@ -183,13 +186,14 @@ var ( mc.Fill() return mc.AsMask() }, + // draw a red "x" over the bottom right corner. overlay: func(dc *gg.Context, borderUnits int, radius int) { bu, r := float64(borderUnits), float64(radius) x1 := r * (bu + 4) x2 := x1 + (r * 3.5) - dc.DrawLine(x1, x1, x2, x2) - dc.DrawLine(x1, x2, x2, x1) + dc.DrawLine(x1, x1, x2, x2) // top-left to bottom-right stroke + dc.DrawLine(x1, x2, x2, x1) // bottom-left to top-right stroke dc.SetColor(red) dc.SetLineWidth(r) dc.Stroke() diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 8a4ee08fd..0102b28a6 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -34,11 +34,8 @@ import ( var ( localClient tailscale.LocalClient - chState chan ipn.State // tailscale state changes - - chRebuild chan struct{} // triggers a menu rebuild - - appIcon *os.File + rebuildCh chan struct{} // triggers a menu rebuild + appIcon *os.File // newMenuDelay is the amount of time to sleep after creating a new menu, // but before adding items to it. This works around a bug in some dbus implementations. @@ -112,8 +109,7 @@ func onReady() { appIcon, _ = os.CreateTemp("", "tailscale-systray.png") io.Copy(appIcon, connected.renderWithBorder(3)) - chState = make(chan ipn.State, 1) - chRebuild = make(chan struct{}, 1) + rebuildCh = make(chan struct{}, 1) menu := new(Menu) menu.rebuild(fetchState(ctx)) @@ -170,6 +166,34 @@ func (menu *Menu) rebuild(state state) { menu.disconnect.Hide() systray.AddSeparator() + // Set systray menu icon and title. + // Also adjust connect/disconnect menu items if needed. + switch menu.status.BackendState { + case ipn.Running.String(): + if state.status.ExitNodeStatus != nil && !state.status.ExitNodeStatus.ID.IsZero() { + if state.status.ExitNodeStatus.Online { + systray.SetTitle("Using exit node") + setAppIcon(exitNodeOnline) + } else { + systray.SetTitle("Exit node offline") + setAppIcon(exitNodeOffline) + } + } else { + systray.SetTitle(fmt.Sprintf("Connected to %s", state.status.CurrentTailnet.Name)) + setAppIcon(connected) + } + menu.connect.SetTitle("Connected") + menu.connect.Disable() + menu.disconnect.Show() + menu.disconnect.Enable() + case ipn.Starting.String(): + systray.SetTitle("Connecting") + setAppIcon(loading) + default: + systray.SetTitle("Disconnected") + setAppIcon(disconnected) + } + account := "Account" if pt := profileTitle(state.curProfile); pt != "" { account = pt @@ -268,27 +292,8 @@ func (menu *Menu) eventLoop(ctx context.Context) { select { case <-ctx.Done(): return - case <-chRebuild: + case <-rebuildCh: menu.rebuild(fetchState(ctx)) - case state := <-chState: - switch state { - case ipn.Running: - setAppIcon(loading) - menu.rebuild(fetchState(ctx)) - setAppIcon(connected) - menu.connect.SetTitle("Connected") - menu.connect.Disable() - menu.disconnect.Show() - menu.disconnect.Enable() - case ipn.NoState, ipn.Stopped: - setAppIcon(disconnected) - menu.rebuild(fetchState(ctx)) - menu.connect.SetTitle("Connect") - menu.connect.Enable() - menu.disconnect.Hide() - case ipn.Starting: - setAppIcon(loading) - } case <-menu.connect.ClickedCh: _, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ @@ -397,12 +402,16 @@ func watchIPNBusInner(ctx context.Context) error { if err != nil { return fmt.Errorf("ipnbus error: %w", err) } + var rebuild bool if n.State != nil { - chState <- *n.State log.Printf("new state: %v", n.State) + rebuild = true } if n.Prefs != nil { - chRebuild <- struct{}{} + rebuild = true + } + if rebuild { + rebuildCh <- struct{}{} } } } From 5a4148e7e81287f4914fe01cd5b270c342d29d2f Mon Sep 17 00:00:00 2001 From: Will Norris Date: Wed, 25 Dec 2024 17:30:59 -0800 Subject: [PATCH 0260/1708] cmd/systray: update state management and initialization Move a number of global state vars into the Menu struct, keeping things better encapsulated. The systray package still relies on its own global state, so only a single Menu instance can run at a time. Move a lot of the initialization logic out of onReady, in particular fetching the latest tailscale state. Instead, populate the state before calling systray.Run, which fixes a timing issue in GNOME (#14477). This change also creates a separate bgContext for actions not tied menu item clicks. Because we have to rebuild the entire menu regularly, we cancel that context as needed, which can cancel subsequent updateState calls. Also exit cleanly on SIGINT and SIGTERM. Updates #1708 Fixes #14477 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 220 ++++++++++++++++++++++++----------------- 1 file changed, 128 insertions(+), 92 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 0102b28a6..5f498f35f 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -15,10 +15,12 @@ import ( "maps" "net/http" "os" + "os/signal" "runtime" "slices" "strings" "sync" + "syscall" "time" "fyne.io/systray" @@ -33,10 +35,6 @@ import ( ) var ( - localClient tailscale.LocalClient - rebuildCh chan struct{} // triggers a menu rebuild - appIcon *os.File - // newMenuDelay is the amount of time to sleep after creating a new menu, // but before adding items to it. This works around a bug in some dbus implementations. newMenuDelay time.Duration @@ -47,26 +45,68 @@ var ( ) func main() { - systray.Run(onReady, onExit) + menu := new(Menu) + menu.updateState() + + // exit cleanly on SIGINT and SIGTERM + go func() { + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + select { + case <-interrupt: + menu.onExit() + case <-menu.bgCtx.Done(): + } + }() + + systray.Run(menu.onReady, menu.onExit) } // Menu represents the systray menu, its items, and the current Tailscale state. type Menu struct { - mu sync.Mutex // protects the entire Menu - status *ipnstate.Status + mu sync.Mutex // protects the entire Menu + lc tailscale.LocalClient + status *ipnstate.Status + curProfile ipn.LoginProfile + allProfiles []ipn.LoginProfile + + bgCtx context.Context // ctx for background tasks not involving menu item clicks + bgCancel context.CancelFunc + + // Top-level menu items connect *systray.MenuItem disconnect *systray.MenuItem + self *systray.MenuItem + exitNodes *systray.MenuItem + more *systray.MenuItem + quit *systray.MenuItem - self *systray.MenuItem - more *systray.MenuItem - exitNodes *systray.MenuItem - quit *systray.MenuItem - + rebuildCh chan struct{} // triggers a menu rebuild accountsCh chan ipn.ProfileID exitNodeCh chan tailcfg.StableNodeID // ID of selected exit node - eventCancel func() // cancel eventLoop + eventCancel context.CancelFunc // cancel eventLoop + + notificationIcon *os.File // icon used for desktop notifications +} + +func (menu *Menu) init() { + if menu.bgCtx != nil { + // already initialized + return + } + + menu.rebuildCh = make(chan struct{}, 1) + menu.accountsCh = make(chan ipn.ProfileID) + menu.exitNodeCh = make(chan tailcfg.StableNodeID) + + // dbus wants a file path for notification icons, so copy to a temp file. + menu.notificationIcon, _ = os.CreateTemp("", "tailscale-systray.png") + io.Copy(menu.notificationIcon, connected.renderWithBorder(3)) + + menu.bgCtx, menu.bgCancel = context.WithCancel(context.Background()) + go menu.watchIPNBus() } func init() { @@ -99,44 +139,28 @@ func init() { } } -func onReady() { +// onReady is called by the systray package when the menu is ready to be built. +func (menu *Menu) onReady() { log.Printf("starting") - ctx := context.Background() - setAppIcon(disconnected) - - // dbus wants a file path for notification icons, so copy to a temp file. - appIcon, _ = os.CreateTemp("", "tailscale-systray.png") - io.Copy(appIcon, connected.renderWithBorder(3)) - - rebuildCh = make(chan struct{}, 1) - - menu := new(Menu) - menu.rebuild(fetchState(ctx)) - - go watchIPNBus(ctx) + menu.rebuild() } -type state struct { - status *ipnstate.Status - curProfile ipn.LoginProfile - allProfiles []ipn.LoginProfile -} +// updateState updates the Menu state from the Tailscale local client. +func (menu *Menu) updateState() { + menu.mu.Lock() + defer menu.mu.Unlock() + menu.init() -func fetchState(ctx context.Context) state { - status, err := localClient.Status(ctx) + var err error + menu.status, err = menu.lc.Status(menu.bgCtx) if err != nil { log.Print(err) } - curProfile, allProfiles, err := localClient.ProfileStatus(ctx) + menu.curProfile, menu.allProfiles, err = menu.lc.ProfileStatus(menu.bgCtx) if err != nil { log.Print(err) } - return state{ - status: status, - curProfile: curProfile, - allProfiles: allProfiles, - } } // rebuild the systray menu based on the current Tailscale state. @@ -144,13 +168,10 @@ func fetchState(ctx context.Context) state { // We currently rebuild the entire menu because it is not easy to update the existing menu. // You cannot iterate over the items in a menu, nor can you remove some items like separators. // So for now we rebuild the whole thing, and can optimize this later if needed. -func (menu *Menu) rebuild(state state) { - if state.status == nil { - return - } - +func (menu *Menu) rebuild() { menu.mu.Lock() defer menu.mu.Unlock() + menu.init() if menu.eventCancel != nil { menu.eventCancel() @@ -158,7 +179,6 @@ func (menu *Menu) rebuild(state state) { ctx := context.Background() ctx, menu.eventCancel = context.WithCancel(ctx) - menu.status = state.status systray.ResetMenu() menu.connect = systray.AddMenuItem("Connect", "") @@ -166,12 +186,19 @@ func (menu *Menu) rebuild(state state) { menu.disconnect.Hide() systray.AddSeparator() + // delay to prevent race setting icon on first start + time.Sleep(newMenuDelay) + // Set systray menu icon and title. // Also adjust connect/disconnect menu items if needed. - switch menu.status.BackendState { + var backendState string + if menu.status != nil { + backendState = menu.status.BackendState + } + switch backendState { case ipn.Running.String(): - if state.status.ExitNodeStatus != nil && !state.status.ExitNodeStatus.ID.IsZero() { - if state.status.ExitNodeStatus.Online { + if menu.status.ExitNodeStatus != nil && !menu.status.ExitNodeStatus.ID.IsZero() { + if menu.status.ExitNodeStatus.Online { systray.SetTitle("Using exit node") setAppIcon(exitNodeOnline) } else { @@ -179,7 +206,7 @@ func (menu *Menu) rebuild(state state) { setAppIcon(exitNodeOffline) } } else { - systray.SetTitle(fmt.Sprintf("Connected to %s", state.status.CurrentTailnet.Name)) + systray.SetTitle(fmt.Sprintf("Connected to %s", menu.status.CurrentTailnet.Name)) setAppIcon(connected) } menu.connect.SetTitle("Connected") @@ -195,18 +222,16 @@ func (menu *Menu) rebuild(state state) { } account := "Account" - if pt := profileTitle(state.curProfile); pt != "" { + if pt := profileTitle(menu.curProfile); pt != "" { account = pt } accounts := systray.AddMenuItem(account, "") - setRemoteIcon(accounts, state.curProfile.UserProfile.ProfilePicURL) + setRemoteIcon(accounts, menu.curProfile.UserProfile.ProfilePicURL) time.Sleep(newMenuDelay) - // Aggregate all clicks into a shared channel. - menu.accountsCh = make(chan ipn.ProfileID) - for _, profile := range state.allProfiles { + for _, profile := range menu.allProfiles { title := profileTitle(profile) var item *systray.MenuItem - if profile.ID == state.curProfile.ID { + if profile.ID == menu.curProfile.ID { item = accounts.AddSubMenuItemCheckbox(title, "", true) } else { item = accounts.AddSubMenuItem(title, "") @@ -220,8 +245,8 @@ func (menu *Menu) rebuild(state state) { }) } - if state.status != nil && state.status.Self != nil && len(state.status.Self.TailscaleIPs) > 0 { - title := fmt.Sprintf("This Device: %s (%s)", state.status.Self.HostName, state.status.Self.TailscaleIPs[0]) + if menu.status != nil && menu.status.Self != nil && len(menu.status.Self.TailscaleIPs) > 0 { + title := fmt.Sprintf("This Device: %s (%s)", menu.status.Self.HostName, menu.status.Self.TailscaleIPs[0]) menu.self = systray.AddMenuItem(title, "") } else { menu.self = systray.AddMenuItem("This Device: not connected", "") @@ -229,11 +254,14 @@ func (menu *Menu) rebuild(state state) { } systray.AddSeparator() - menu.exitNodeCh = make(chan tailcfg.StableNodeID) menu.rebuildExitNodeMenu(ctx) - menu.more = systray.AddMenuItem("More settings", "") - menu.more.Enable() + if menu.status != nil { + menu.more = systray.AddMenuItem("More settings", "") + onClick(ctx, menu.more, func(_ context.Context) { + webbrowser.Open("http://100.100.100.100/") + }) + } menu.quit = systray.AddMenuItem("Quit", "Quit the app") menu.quit.Enable() @@ -292,48 +320,44 @@ func (menu *Menu) eventLoop(ctx context.Context) { select { case <-ctx.Done(): return - case <-rebuildCh: - menu.rebuild(fetchState(ctx)) + case <-menu.rebuildCh: + menu.updateState() + menu.rebuild() case <-menu.connect.ClickedCh: - _, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + _, err := menu.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ WantRunning: true, }, WantRunningSet: true, }) if err != nil { - log.Print(err) - continue + log.Printf("error connecting: %v", err) } case <-menu.disconnect.ClickedCh: - _, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + _, err := menu.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ WantRunning: false, }, WantRunningSet: true, }) if err != nil { - log.Printf("disconnecting: %v", err) - continue + log.Printf("error disconnecting: %v", err) } case <-menu.self.ClickedCh: - copyTailscaleIP(menu.status.Self) - - case <-menu.more.ClickedCh: - webbrowser.Open("http://100.100.100.100/") + menu.copyTailscaleIP(menu.status.Self) case id := <-menu.accountsCh: - if err := localClient.SwitchProfile(ctx, id); err != nil { - log.Printf("failed switching to profile ID %v: %v", id, err) + if err := menu.lc.SwitchProfile(ctx, id); err != nil { + log.Printf("error switching to profile ID %v: %v", id, err) } case exitNode := <-menu.exitNodeCh: if exitNode.IsZero() { log.Print("disable exit node") - if err := localClient.SetUseExitNode(ctx, false); err != nil { - log.Printf("failed disabling exit node: %v", err) + if err := menu.lc.SetUseExitNode(ctx, false); err != nil { + log.Printf("error disabling exit node: %v", err) } } else { log.Printf("enable exit node: %v", exitNode) @@ -343,8 +367,8 @@ func (menu *Menu) eventLoop(ctx context.Context) { }, ExitNodeIDSet: true, } - if _, err := localClient.EditPrefs(ctx, mp); err != nil { - log.Printf("failed setting exit node: %v", err) + if _, err := menu.lc.EditPrefs(ctx, mp); err != nil { + log.Printf("error setting exit node: %v", err) } } @@ -370,9 +394,9 @@ func onClick(ctx context.Context, item *systray.MenuItem, fn func(ctx context.Co // watchIPNBus subscribes to the tailscale event bus and sends state updates to chState. // This method does not return. -func watchIPNBus(ctx context.Context) { +func (menu *Menu) watchIPNBus() { for { - if err := watchIPNBusInner(ctx); err != nil { + if err := menu.watchIPNBusInner(); err != nil { log.Println(err) if errors.Is(err, context.Canceled) { // If the context got canceled, we will never be able to @@ -387,15 +411,15 @@ func watchIPNBus(ctx context.Context) { } } -func watchIPNBusInner(ctx context.Context) error { - watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) +func (menu *Menu) watchIPNBusInner() error { + watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, ipn.NotifyNoPrivateKeys) if err != nil { return fmt.Errorf("watching ipn bus: %w", err) } defer watcher.Close() for { select { - case <-ctx.Done(): + case <-menu.bgCtx.Done(): return nil default: n, err := watcher.Next() @@ -411,7 +435,7 @@ func watchIPNBusInner(ctx context.Context) error { rebuild = true } if rebuild { - rebuildCh <- struct{}{} + menu.rebuildCh <- struct{}{} } } } @@ -419,7 +443,7 @@ func watchIPNBusInner(ctx context.Context) error { // copyTailscaleIP copies the first Tailscale IP of the given device to the clipboard // and sends a notification with the copied value. -func copyTailscaleIP(device *ipnstate.PeerStatus) { +func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) { if device == nil || len(device.TailscaleIPs) == 0 { return } @@ -430,11 +454,11 @@ func copyTailscaleIP(device *ipnstate.PeerStatus) { log.Printf("clipboard error: %v", err) } - sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) + menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } // sendNotification sends a desktop notification with the given title and content. -func sendNotification(title, content string) { +func (menu *Menu) sendNotification(title, content string) { conn, err := dbus.SessionBus() if err != nil { log.Printf("dbus: %v", err) @@ -443,13 +467,17 @@ func sendNotification(title, content string) { timeout := 3 * time.Second obj := conn.Object("org.freedesktop.Notifications", "/org/freedesktop/Notifications") call := obj.Call("org.freedesktop.Notifications.Notify", 0, "Tailscale", uint32(0), - appIcon.Name(), title, content, []string{}, map[string]dbus.Variant{}, int32(timeout.Milliseconds())) + menu.notificationIcon.Name(), title, content, []string{}, map[string]dbus.Variant{}, int32(timeout.Milliseconds())) if call.Err != nil { log.Printf("dbus: %v", call.Err) } } func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { + if menu.status == nil { + return + } + status := menu.status menu.exitNodes = systray.AddMenuItem("Exit Nodes", "") time.Sleep(newMenuDelay) @@ -469,7 +497,7 @@ func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) { // Show recommended exit node if available. if status.Self.CapMap.Contains(tailcfg.NodeAttrSuggestExitNodeUI) { - sugg, err := localClient.SuggestExitNode(ctx) + sugg, err := menu.lc.SuggestExitNode(ctx) if err == nil { title := "Recommended: " if loc := sugg.Location; loc.Valid() && loc.Country() != "" { @@ -659,7 +687,15 @@ func newMullvadPeers(status *ipnstate.Status) mullvadPeers { return mullvadPeers{countries} } -func onExit() { +// onExit is called by the systray package when the menu is exiting. +func (menu *Menu) onExit() { log.Printf("exiting") - os.Remove(appIcon.Name()) + if menu.bgCancel != nil { + menu.bgCancel() + } + if menu.eventCancel != nil { + menu.eventCancel() + } + + os.Remove(menu.notificationIcon.Name()) } From c43c5ca003d64a2250aafc4530cfb074be43d535 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 27 Dec 2024 12:34:16 -0800 Subject: [PATCH 0261/1708] cmd/systray: properly set tooltip on different platforms On Linux, systray.SetTitle actually seems to set the tooltip on all desktops I've tested on. But on macOS, it actually does set a title that is always displayed in the systray area next to the icon. This change should properly set the tooltip across platforms. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 5f498f35f..0d6f87916 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -199,14 +199,14 @@ func (menu *Menu) rebuild() { case ipn.Running.String(): if menu.status.ExitNodeStatus != nil && !menu.status.ExitNodeStatus.ID.IsZero() { if menu.status.ExitNodeStatus.Online { - systray.SetTitle("Using exit node") + setTooltip("Using exit node") setAppIcon(exitNodeOnline) } else { - systray.SetTitle("Exit node offline") + setTooltip("Exit node offline") setAppIcon(exitNodeOffline) } } else { - systray.SetTitle(fmt.Sprintf("Connected to %s", menu.status.CurrentTailnet.Name)) + setTooltip(fmt.Sprintf("Connected to %s", menu.status.CurrentTailnet.Name)) setAppIcon(connected) } menu.connect.SetTitle("Connected") @@ -214,10 +214,10 @@ func (menu *Menu) rebuild() { menu.disconnect.Show() menu.disconnect.Enable() case ipn.Starting.String(): - systray.SetTitle("Connecting") + setTooltip("Connecting") setAppIcon(loading) default: - systray.SetTitle("Disconnected") + setTooltip("Disconnected") setAppIcon(disconnected) } @@ -312,6 +312,16 @@ func setRemoteIcon(menu *systray.MenuItem, urlStr string) { } } +// setTooltip sets the tooltip text for the systray icon. +func setTooltip(text string) { + if runtime.GOOS == "darwin" || runtime.GOOS == "windows" { + systray.SetTooltip(text) + } else { + // on Linux, SetTitle actually sets the tooltip + systray.SetTitle(text) + } +} + // eventLoop is the main event loop for handling click events on menu items // and responding to Tailscale state changes. // This method does not return until ctx.Done is closed. From 30d3e7b2429bc6e2226c365dd4e92f211e09147f Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 30 Dec 2024 17:22:48 +0000 Subject: [PATCH 0262/1708] scripts/install.sh: add special case for Parrot Security (#14487) Their `os-release` doesn't follow convention. Fixes #10778 Signed-off-by: Erisa A --- scripts/installer.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 8d1fc0212..bdd425539 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -68,6 +68,14 @@ main() { if [ -z "${VERSION_ID:-}" ]; then # rolling release. If you haven't kept current, that's on you. APT_KEY_TYPE="keyring" + # Parrot Security is a special case that uses ID=debian + elif [ "$NAME" = "Parrot Security" ]; then + # All versions new enough to have this behaviour prefer keyring + # and their VERSION_ID is not consistent with Debian. + APT_KEY_TYPE="keyring" + # They don't specify the Debian version they're based off in os-release + # but Parrot 6 is based on Debian 12 Bookworm. + VERSION=bookworm elif [ "$VERSION_ID" -lt 11 ]; then APT_KEY_TYPE="legacy" else From ff095606ccff083160eb01a8a4cc062cacfe1a33 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 19 Nov 2024 20:17:58 -0800 Subject: [PATCH 0263/1708] all: add means to set device posture attributes from node Updates tailscale/corp#24690 Updates #4077 Change-Id: I05fe799beb1d2a71d1ec3ae08744cc68bcadae2a Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 50 +++++++++++++++++++++++++++++++++ control/controlclient/noise.go | 7 +++-- ipn/ipnlocal/local.go | 14 +++++++++ ipn/localapi/localapi.go | 28 ++++++++++++++++++ tailcfg/tailcfg.go | 28 ++++++++++++++++++ 5 files changed, 125 insertions(+), 2 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 9cbd0e14e..dd361c4a2 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1643,6 +1643,56 @@ func (c *Direct) ReportHealthChange(w *health.Warnable, us *health.UnhealthyStat res.Body.Close() } +// SetDeviceAttrs does a synchronous call to the control plane to update +// the node's attributes. +// +// See docs on [tailcfg.SetDeviceAttributesRequest] for background. +func (c *Auto) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) error { + return c.direct.SetDeviceAttrs(ctx, attrs) +} + +// SetDeviceAttrs does a synchronous call to the control plane to update +// the node's attributes. +// +// See docs on [tailcfg.SetDeviceAttributesRequest] for background. +func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) error { + nc, err := c.getNoiseClient() + if err != nil { + return err + } + nodeKey, ok := c.GetPersist().PublicNodeKeyOK() + if !ok { + return errors.New("no node key") + } + if c.panicOnUse { + panic("tainted client") + } + req := &tailcfg.SetDeviceAttributesRequest{ + NodeKey: nodeKey, + Version: tailcfg.CurrentCapabilityVersion, + Update: attrs, + } + + // TODO(bradfitz): unify the callers using doWithBody vs those using + // DoNoiseRequest. There seems to be a ~50/50 split and they're very close, + // but doWithBody sets the load balancing header and auto-JSON-encodes the + // body, but DoNoiseRequest is exported. Clean it up so they're consistent + // one way or another. + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + res, err := nc.doWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) + if err != nil { + return err + } + defer res.Body.Close() + all, _ := io.ReadAll(res.Body) + if res.StatusCode != 200 { + return fmt.Errorf("HTTP error from control plane: %v: %s", res.Status, all) + } + return nil +} + func addLBHeader(req *http.Request, nodeKey key.NodePublic) { if !nodeKey.IsZero() { req.Header.Add(tailcfg.LBHeader, nodeKey.String()) diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index 2e7c70fd1..db77014a6 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -380,17 +380,20 @@ func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { // post does a POST to the control server at the given path, JSON-encoding body. // The provided nodeKey is an optional load balancing hint. func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + return nc.doWithBody(ctx, "POST", path, nodeKey, body) +} + +func (nc *NoiseClient) doWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { jbody, err := json.Marshal(body) if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, "POST", "https://"+nc.host+path, bytes.NewReader(jbody)) + req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) if err != nil { return nil, err } addLBHeader(req, nodeKey) req.Header.Set("Content-Type", "application/json") - conn, err := nc.getConn(ctx) if err != nil { return nil, err diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f456d4984..d6daf3535 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6408,6 +6408,20 @@ func (b *LocalBackend) SetExpirySooner(ctx context.Context, expiry time.Time) er return cc.SetExpirySooner(ctx, expiry) } +// SetDeviceAttrs does a synchronous call to the control plane to update +// the node's attributes. +// +// See docs on [tailcfg.SetDeviceAttributesRequest] for background. +func (b *LocalBackend) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) error { + b.mu.Lock() + cc := b.ccAuto + b.mu.Unlock() + if cc == nil { + return errors.New("not running") + } + return cc.SetDeviceAttrs(ctx, attrs) +} + // exitNodeCanProxyDNS reports the DoH base URL ("http://foo/dns-query") without query parameters // to exitNodeID's DoH service, if available. // diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index c14a4bdf2..831f6a9b6 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -83,6 +83,7 @@ var handler = map[string]localAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: + "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, @@ -446,6 +447,33 @@ func (h *Handler) serveWhoIs(w http.ResponseWriter, r *http.Request) { h.serveWhoIsWithBackend(w, r, h.b) } +// serveSetDeviceAttrs is (as of 2024-12-30) an experimental LocalAPI handler to +// set device attributes via the control plane. +// +// See tailscale/corp#24690. +func (h *Handler) serveSetDeviceAttrs(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if !h.PermitWrite { + http.Error(w, "set-device-attrs access denied", http.StatusForbidden) + return + } + if r.Method != "PATCH" { + http.Error(w, "only PATCH allowed", http.StatusMethodNotAllowed) + return + } + var req map[string]any + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if err := h.b.SetDeviceAttrs(ctx, req); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, "{}\n") +} + // localBackendWhoIsMethods is the subset of ipn.LocalBackend as needed // by the localapi WhoIs method. type localBackendWhoIsMethods interface { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index ad07cff28..4c9cd59d9 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2455,6 +2455,34 @@ type HealthChangeRequest struct { NodeKey key.NodePublic } +// SetDeviceAttributesRequest is a request to update the +// current node's device posture attributes. +// +// As of 2024-12-30, this is an experimental dev feature +// for internal testing. See tailscale/corp#24690. +type SetDeviceAttributesRequest struct { + // Version is the current binary's [CurrentCapabilityVersion]. + Version CapabilityVersion + + // NodeKey identifies the node to modify. It should be the currently active + // node and is an error if not. + NodeKey key.NodePublic + + // Update is a map of device posture attributes to update. + // Attributes not in the map are left unchanged. + Update AttrUpdate +} + +// AttrUpdate is a map of attributes to update. +// Attributes not in the map are left unchanged. +// The value can be a string, float64, bool, or nil to delete. +// +// See https://tailscale.com/s/api-device-posture-attrs. +// +// TODO(bradfitz): add struct type for specifying optional associated data +// for each attribute value, like an expiry time? +type AttrUpdate map[string]any + // SSHPolicy is the policy for how to handle incoming SSH connections // over Tailscale. type SSHPolicy struct { From 03b9361f479a86b39b272c6a898b84983948aec8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Jan 2025 08:35:25 -0800 Subject: [PATCH 0264/1708] ipn: update reference to Notify's Swift definition Updates #cleanup Signed-off-by: Brad Fitzpatrick --- ipn/backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/backend.go b/ipn/backend.go index 91a35df0d..ef0700a70 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -147,7 +147,7 @@ type Notify struct { // any changes to the user in the UI. Health *health.State `json:",omitempty"` - // type is mirrored in xcode/Shared/IPN.swift + // type is mirrored in xcode/IPN/Core/LocalAPI/Model/LocalAPIModel.swift } func (n Notify) String() string { From e3bcb2ec83a45405f26391b8b10e47a66284d100 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Jan 2025 08:49:29 -0800 Subject: [PATCH 0265/1708] ipn/ipnlocal: use context.CancelFunc type for doc clarity Using context.CancelFunc as the type (instead of func()) answers questions like whether it's okay to call it multiple times, whether it blocks, etc. And that's the type it actually is in this case. Updates #cleanup Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d6daf3535..9e8886404 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -163,7 +163,7 @@ type watchSession struct { ch chan *ipn.Notify owner ipnauth.Actor // or nil sessionID string - cancel func() // call to signal that the session must be terminated + cancel context.CancelFunc // to shut down the session } // LocalBackend is the glue between the major pieces of the Tailscale From 17b881538ad4ded64b655c2b901a70628b00e921 Mon Sep 17 00:00:00 2001 From: Jason Barnett Date: Sun, 22 Dec 2024 13:18:40 -0700 Subject: [PATCH 0266/1708] wgengine/router: refactor udm-pro into broader ubnt support Fixes #14453 Signed-off-by: Jason Barnett --- version/distro/distro.go | 53 ++++------------------------ wgengine/router/router_linux.go | 14 ++++---- wgengine/router/router_linux_test.go | 6 ++-- 3 files changed, 17 insertions(+), 56 deletions(-) diff --git a/version/distro/distro.go b/version/distro/distro.go index 8128ce395..f7997e1d9 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -9,7 +9,6 @@ import ( "os" "runtime" "strconv" - "strings" "tailscale.com/types/lazy" "tailscale.com/util/lineiter" @@ -31,7 +30,7 @@ const ( WDMyCloud = Distro("wdmycloud") Unraid = Distro("unraid") Alpine = Distro("alpine") - UDMPro = Distro("udmpro") + UBNT = Distro("ubnt") // Ubiquiti Networks ) var distro lazy.SyncValue[Distro] @@ -77,9 +76,12 @@ func linuxDistro() Distro { case have("/usr/local/bin/freenas-debug"): // TrueNAS Scale runs on debian return TrueNAS - case isUDMPro(): - // UDM-Pro runs on debian - return UDMPro + case have("/usr/bin/ubnt-device-info"): + // UBNT runs on Debian-based systems. This MUST be checked before Debian. + // + // Currently supported product families: + // - UDM (UniFi Dream Machine, UDM-Pro) + return UBNT case have("/etc/debian_version"): return Debian case have("/etc/arch-release"): @@ -152,44 +154,3 @@ func DSMVersion() int { return 0 }) } - -// isUDMPro checks a couple of files known to exist on a UDM-Pro and returns -// true if the expected content exists in the files. -func isUDMPro() bool { - // This is a performance guardrail against trying to load both - // /etc/board.info and /sys/firmware/devicetree/base/soc/board-cfg/id when - // not running on Debian so we don't make unnecessary calls in situations - // where we definitely are NOT on a UDM Pro. In other words, the have() call - // is much cheaper than the two os.ReadFile() in fileContainsAnyString(). - // That said, on Debian systems we will still be making the two - // os.ReadFile() in fileContainsAnyString(). - if !have("/etc/debian_version") { - return false - } - if exists, err := fileContainsAnyString("/etc/board.info", "UDMPRO", "Dream Machine PRO"); err == nil && exists { - return true - } - if exists, err := fileContainsAnyString("/sys/firmware/devicetree/base/soc/board-cfg/id", "udm pro"); err == nil && exists { - return true - } - return false -} - -// fileContainsAnyString is used to determine if one or more of the provided -// strings exists in a file. This is not efficient for larger files. If you want -// to use this function to parse large files, please refactor to use -// `io.LimitedReader`. -func fileContainsAnyString(filePath string, searchStrings ...string) (bool, error) { - data, err := os.ReadFile(filePath) - if err != nil { - return false, err - } - - content := string(data) - for _, searchString := range searchStrings { - if strings.Contains(content, searchString) { - return true, nil - } - } - return false, nil -} diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index e154a30fa..80191b248 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -1184,7 +1184,7 @@ var ( ) // baseIPRules are the policy routing rules that Tailscale uses, when not -// running on a UDM-Pro. +// running on a UBNT device. // // The priority is the value represented here added to r.ipPolicyPrefBase, // which is usually 5200. @@ -1236,15 +1236,15 @@ var baseIPRules = []netlink.Rule{ // usual rules (pref 32766 and 32767, ie. main and default). } -// udmProIPRules are the policy routing rules that Tailscale uses, when running -// on a UDM-Pro. +// ubntIPRules are the policy routing rules that Tailscale uses, when running +// on a UBNT device. // // The priority is the value represented here added to // r.ipPolicyPrefBase, which is usually 5200. // // This represents an experiment that will be used to gather more information. // If this goes well, Tailscale may opt to use this for all of Linux. -var udmProIPRules = []netlink.Rule{ +var ubntIPRules = []netlink.Rule{ // non-fwmark packets fall through to the usual rules (pref 32766 and 32767, // ie. main and default). { @@ -1256,10 +1256,10 @@ var udmProIPRules = []netlink.Rule{ } // ipRules returns the appropriate list of ip rules to be used by Tailscale. See -// comments on baseIPRules and udmProIPRules for more details. +// comments on baseIPRules and ubntIPRules for more details. func ipRules() []netlink.Rule { - if getDistroFunc() == distro.UDMPro { - return udmProIPRules + if getDistroFunc() == distro.UBNT { + return ubntIPRules } return baseIPRules } diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index 7718f17c4..9a159aea8 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -1233,14 +1233,14 @@ func adjustFwmask(t *testing.T, s string) string { return fwmaskAdjustRe.ReplaceAllString(s, "$1") } -func TestIPRulesForUDMPro(t *testing.T) { +func TestIPRulesForUBNT(t *testing.T) { // Override the global getDistroFunc getDistroFunc = func() distro.Distro { - return distro.UDMPro + return distro.UBNT } defer func() { getDistroFunc = distro.Get }() // Restore original after the test - expected := udmProIPRules + expected := ubntIPRules actual := ipRules() if len(expected) != len(actual) { From 1e2e319e7d261a8592744689c362fae16a3969d0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Jan 2025 10:41:02 -0800 Subject: [PATCH 0267/1708] util/slicesx: add MapKeys and MapValues from golang.org/x/exp/maps Importing the ~deprecated golang.org/x/exp/maps as "xmaps" to not shadow the std "maps" was getting ugly. And using slices.Collect on an iterator is verbose & allocates more. So copy (x)maps.Keys+Values into our slicesx package instead. Updates #cleanup Updates #12912 Updates #14514 (pulled out of that change) Change-Id: I5e68d12729934de93cf4a9cd87c367645f86123a Signed-off-by: Brad Fitzpatrick --- appc/appconnector.go | 7 +++-- appc/appconnector_test.go | 4 +-- cmd/systray/systray.go | 6 ++--- cmd/tailscale/cli/exitnode.go | 4 +-- cmd/tailscale/cli/serve_legacy.go | 6 ++--- cmd/tailscale/cli/serve_v2.go | 7 ++--- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/testwrapper/testwrapper.go | 4 +-- ipn/ipnlocal/local.go | 10 +++---- net/dns/manager.go | 4 +-- util/lru/lru_test.go | 6 ++--- util/slicesx/slicesx.go | 40 ++++++++++++++++++++++++++++ util/syspolicy/source/test_store.go | 3 ++- wgengine/filter/filter_test.go | 4 +-- wgengine/magicsock/endpoint.go | 4 +-- wgengine/magicsock/magicsock_test.go | 4 +-- 17 files changed, 76 insertions(+), 41 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 671ced953..063381cd7 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -18,7 +18,6 @@ import ( "sync" "time" - xmaps "golang.org/x/exp/maps" "golang.org/x/net/dns/dnsmessage" "tailscale.com/types/logger" "tailscale.com/types/views" @@ -291,11 +290,11 @@ func (e *AppConnector) updateDomains(domains []string) { } } if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", xmaps.Keys(oldDomains), toRemove, err) + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) } } - e.logf("handling domains: %v and wildcards: %v", xmaps.Keys(e.domains), e.wildcards) + e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards) } // updateRoutes merges the supplied routes into the currently configured routes. The routes supplied @@ -354,7 +353,7 @@ func (e *AppConnector) Domains() views.Slice[string] { e.mu.Lock() defer e.mu.Unlock() - return views.SliceOf(xmaps.Keys(e.domains)) + return views.SliceOf(slicesx.MapKeys(e.domains)) } // DomainRoutes returns a map of domains to resolved IP diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 7dba8cebd..36ec7a119 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -11,13 +11,13 @@ import ( "testing" "time" - xmaps "golang.org/x/exp/maps" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc/appctest" "tailscale.com/tstest" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/slicesx" ) func fakeStoreRoutes(*RouteInfo) error { return nil } @@ -50,7 +50,7 @@ func TestUpdateDomains(t *testing.T) { // domains are explicitly downcased on set. a.UpdateDomains([]string{"UP.EXAMPLE.COM"}) a.Wait(ctx) - if got, want := xmaps.Keys(a.domains), []string{"up.example.com"}; !slices.Equal(got, want) { + if got, want := slicesx.MapKeys(a.domains), []string{"up.example.com"}; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) } } diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 0d6f87916..7da83a7ea 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -12,7 +12,6 @@ import ( "fmt" "io" "log" - "maps" "net/http" "os" "os/signal" @@ -31,6 +30,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/util/slicesx" "tailscale.com/util/stringsx" ) @@ -616,7 +616,7 @@ type mullvadPeers struct { // sortedCountries returns countries containing mullvad nodes, sorted by name. func (mp mullvadPeers) sortedCountries() []*mvCountry { - countries := slices.Collect(maps.Values(mp.countries)) + countries := slicesx.MapValues(mp.countries) slices.SortFunc(countries, func(a, b *mvCountry) int { return stringsx.CompareFold(a.name, b.name) }) @@ -632,7 +632,7 @@ type mvCountry struct { // sortedCities returns cities containing mullvad nodes, sorted by name. func (mc *mvCountry) sortedCities() []*mvCity { - cities := slices.Collect(maps.Values(mc.cities)) + cities := slicesx.MapValues(mc.cities) slices.SortFunc(cities, func(a, b *mvCity) int { return stringsx.CompareFold(a.name, b.name) }) diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index 6b9247a7b..941c6be8d 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -15,10 +15,10 @@ import ( "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" - xmaps "golang.org/x/exp/maps" "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/util/slicesx" ) func exitNodeCmd() *ffcli.Command { @@ -255,7 +255,7 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) } filteredExitNodes := filteredExitNodes{ - Countries: xmaps.Values(countries), + Countries: slicesx.MapValues(countries), } for _, country := range filteredExitNodes.Countries { diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 443a404ab..5f55b1da6 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -27,6 +27,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -707,10 +708,7 @@ func (e *serveEnv) printWebStatusTree(sc *ipn.ServeConfig, hp ipn.HostPort) erro return "", "" } - var mounts []string - for k := range sc.Web[hp].Handlers { - mounts = append(mounts, k) - } + mounts := slicesx.MapKeys(sc.Web[hp].Handlers) sort.Slice(mounts, func(i, j int) bool { return len(mounts[i]) < len(mounts[j]) }) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 009a61198..3e173ce28 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -28,6 +28,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/util/mak" + "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -439,11 +440,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN } if sc.Web[hp] != nil { - var mounts []string - - for k := range sc.Web[hp].Handlers { - mounts = append(mounts, k) - } + mounts := slicesx.MapKeys(sc.Web[hp].Handlers) sort.Slice(mounts, func(i, j int) bool { return len(mounts[i]) < len(mounts[j]) }) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index a8496c411..ff2de13c0 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -202,7 +202,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ W golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ - golang.org/x/exp/maps from tailscale.com/cmd/tailscale/cli+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 264f8296f..749c3f310 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -449,7 +449,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ - golang.org/x/exp/maps from tailscale.com/appc+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index f6ff8f00a..91aea904e 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -29,8 +29,8 @@ import ( "github.com/dave/courtney/tester" "github.com/dave/patsy" "github.com/dave/patsy/vos" - xmaps "golang.org/x/exp/maps" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/util/slicesx" ) const ( @@ -350,7 +350,7 @@ func main() { if len(toRetry) == 0 { continue } - pkgs := xmaps.Keys(toRetry) + pkgs := slicesx.MapKeys(toRetry) sort.Strings(pkgs) nextRun := &nextRun{ attempt: thisRun.attempt + 1, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9e8886404..c4f68e929 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -38,7 +38,6 @@ import ( "go4.org/mem" "go4.org/netipx" - xmaps "golang.org/x/exp/maps" "golang.org/x/net/dns/dnsmessage" "gvisor.dev/gvisor/pkg/tcpip" "tailscale.com/appc" @@ -104,6 +103,7 @@ import ( "tailscale.com/util/osuser" "tailscale.com/util/rands" "tailscale.com/util/set" + "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/systemd" @@ -2022,7 +2022,7 @@ func (b *LocalBackend) DisablePortMapperForTest() { func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { b.mu.Lock() defer b.mu.Unlock() - ret := xmaps.Values(b.peers) + ret := slicesx.MapValues(b.peers) slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) @@ -7375,9 +7375,9 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug // First, try to select an exit node that has the closest DERP home, based on lastReport's DERP latency. // If there are no latency values, it returns an arbitrary region if len(candidatesByRegion) > 0 { - minRegion := minLatencyDERPRegion(xmaps.Keys(candidatesByRegion), report) + minRegion := minLatencyDERPRegion(slicesx.MapKeys(candidatesByRegion), report) if minRegion == 0 { - minRegion = selectRegion(views.SliceOf(xmaps.Keys(candidatesByRegion))) + minRegion = selectRegion(views.SliceOf(slicesx.MapKeys(candidatesByRegion))) } regionCandidates, ok := candidatesByRegion[minRegion] if !ok { @@ -7636,5 +7636,5 @@ func vipServicesFromPrefs(prefs ipn.PrefsView) []*tailcfg.VIPService { services[s].Active = true } - return slices.Collect(maps.Values(services)) + return slicesx.MapValues(services) } diff --git a/net/dns/manager.go b/net/dns/manager.go index 13cb2d84e..5ac2f69fc 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -19,7 +19,6 @@ import ( "sync/atomic" "time" - xmaps "golang.org/x/exp/maps" "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/net/dns/resolver" @@ -31,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/slicesx" ) var ( @@ -204,7 +204,7 @@ func compileHostEntries(cfg Config) (hosts []*HostEntry) { if len(hostsMap) == 0 { return nil } - hosts = xmaps.Values(hostsMap) + hosts = slicesx.MapValues(hostsMap) slices.SortFunc(hosts, func(a, b *HostEntry) int { if len(a.Hosts) == 0 && len(b.Hosts) == 0 { return 0 diff --git a/util/lru/lru_test.go b/util/lru/lru_test.go index fb538efbe..5500e5e0f 100644 --- a/util/lru/lru_test.go +++ b/util/lru/lru_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - xmaps "golang.org/x/exp/maps" + "tailscale.com/util/slicesx" ) func TestLRU(t *testing.T) { @@ -75,7 +75,7 @@ func TestStressEvictions(t *testing.T) { for len(vm) < numKeys { vm[rand.Uint64()] = true } - vals := xmaps.Keys(vm) + vals := slicesx.MapKeys(vm) c := Cache[uint64, bool]{ MaxEntries: cacheSize, @@ -106,7 +106,7 @@ func TestStressBatchedEvictions(t *testing.T) { for len(vm) < numKeys { vm[rand.Uint64()] = true } - vals := xmaps.Keys(vm) + vals := slicesx.MapKeys(vm) c := Cache[uint64, bool]{} diff --git a/util/slicesx/slicesx.go b/util/slicesx/slicesx.go index e0b820eb7..1a7e18d91 100644 --- a/util/slicesx/slicesx.go +++ b/util/slicesx/slicesx.go @@ -148,3 +148,43 @@ func FirstEqual[T comparable](s []T, v T) bool { func LastEqual[T comparable](s []T, v T) bool { return len(s) > 0 && s[len(s)-1] == v } + +// MapKeys returns the values of the map m. +// +// The keys will be in an indeterminate order. +// +// It's equivalent to golang.org/x/exp/maps.Keys, which +// unfortunately has the package name "maps", shadowing +// the std "maps" package. This version exists for clarity +// when reading call sites. +// +// As opposed to slices.Collect(maps.Keys(m)), this allocates +// the returned slice once to exactly the right size, rather than +// appending larger backing arrays as it goes. +func MapKeys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// MapValues returns the values of the map m. +// +// The values will be in an indeterminate order. +// +// It's equivalent to golang.org/x/exp/maps.Values, which +// unfortunately has the package name "maps", shadowing +// the std "maps" package. This version exists for clarity +// when reading call sites. +// +// As opposed to slices.Collect(maps.Values(m)), this allocates +// the returned slice once to exactly the right size, rather than +// appending larger backing arrays as it goes. +func MapValues[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index 1f19bbb43..e6c09d6b0 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -11,6 +11,7 @@ import ( xmaps "golang.org/x/exp/maps" "tailscale.com/util/mak" "tailscale.com/util/set" + "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/setting" ) @@ -418,7 +419,7 @@ func (s *TestStore) NotifyPolicyChanged() { s.mu.RUnlock() return } - cbs := xmaps.Values(s.cbs) + cbs := slicesx.MapValues(s.cbs) s.mu.RUnlock() var wg sync.WaitGroup diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index f2796d71f..7ffdd5c7b 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -18,7 +18,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go4.org/netipx" - xmaps "golang.org/x/exp/maps" "tailscale.com/net/flowtrack" "tailscale.com/net/ipset" "tailscale.com/net/packet" @@ -30,6 +29,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/must" + "tailscale.com/util/slicesx" "tailscale.com/wgengine/filter/filtertype" ) @@ -997,7 +997,7 @@ func TestPeerCaps(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := xmaps.Keys(filt.CapsWithValues(netip.MustParseAddr(tt.src), netip.MustParseAddr(tt.dst))) + got := slicesx.MapKeys(filt.CapsWithValues(netip.MustParseAddr(tt.src), netip.MustParseAddr(tt.dst))) slices.Sort(got) slices.Sort(tt.want) if !slices.Equal(got, tt.want) { diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index bbba3181c..df4299b72 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -21,7 +21,6 @@ import ( "sync/atomic" "time" - xmaps "golang.org/x/exp/maps" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" "tailscale.com/disco" @@ -34,6 +33,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/mak" "tailscale.com/util/ringbuffer" + "tailscale.com/util/slicesx" ) var mtuProbePingSizesV4 []int @@ -587,7 +587,7 @@ func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr netip.Add needPing := len(de.endpointState) > 1 && now.Sub(oldestPing) > wireguardPingInterval if !udpAddr.IsValid() { - candidates := xmaps.Keys(de.endpointState) + candidates := slicesx.MapKeys(de.endpointState) // Randomly select an address to use until we retrieve latency information // and give it a short trustBestAddrUntil time so we avoid flapping between diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1b3f8ec73..816600451 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -33,7 +33,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" "go4.org/mem" - xmaps "golang.org/x/exp/maps" "golang.org/x/net/icmp" "golang.org/x/net/ipv4" "tailscale.com/cmd/testwrapper/flakytest" @@ -66,6 +65,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" + "tailscale.com/util/slicesx" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/wgcfg" @@ -1133,7 +1133,7 @@ func testTwoDevicePing(t *testing.T, d *devices) { } } t.Helper() - t.Errorf("missing any connection to %s from %s", wantConns, xmaps.Keys(stats)) + t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(stats)) } addrPort := netip.MustParseAddrPort From 402fc9d65f72136bb3e70b1a7e3d0f20443524ad Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Jan 2025 10:10:16 -0800 Subject: [PATCH 0268/1708] control/controlclient: remove optimization that was more convoluted than useful While working on #13390, I ran across this non-idiomatic pointer-to-view and parallel-sorted-map accounting code that was all just to avoid a sort later. But the sort later when building a new netmap.NetworkMap is already a drop in the bucket of CPU compared to how much work & allocs mapSession.netmap and LocalBackend's spamming of the full netmap (potentially tens of thousands of peers, MBs of JSON) out to IPNBus clients for any tiny little change (node changing online status, etc). Removing the parallel sorted slice let everything be simpler to reason about, so this does that. The sort might take a bit more CPU time now in theory, but in practice for any netmap size for which it'd matter, the quadratic netmap IPN bus spam (which we need to fix soon) will overshadow that little sort. Updates #13390 Updates #1909 Change-Id: I3092d7c67dc10b2a0f141496fe0e7e98ccc07712 Signed-off-by: Brad Fitzpatrick --- control/controlclient/map.go | 68 +++++++++++-------------------- control/controlclient/map_test.go | 13 +++--- 2 files changed, 30 insertions(+), 51 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 787912222..b20a8e170 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -14,7 +14,6 @@ import ( "runtime" "runtime/debug" "slices" - "sort" "strconv" "sync" "time" @@ -31,6 +30,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/mak" "tailscale.com/util/set" + "tailscale.com/util/slicesx" "tailscale.com/wgengine/filter" ) @@ -75,8 +75,7 @@ type mapSession struct { lastPrintMap time.Time lastNode tailcfg.NodeView lastCapSet set.Set[tailcfg.NodeCapability] - peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers. - sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID + peers map[tailcfg.NodeID]tailcfg.NodeView lastDNSConfig *tailcfg.DNSConfig lastDERPMap *tailcfg.DERPMap lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile @@ -366,16 +365,11 @@ var ( patchifiedPeerEqual = clientmetric.NewCounter("controlclient_patchified_peer_equal") ) -// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res. +// updatePeersStateFromResponseres updates ms.peers from resp. +// It takes ownership of resp. func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) { - defer func() { - if stats.removed > 0 || stats.added > 0 { - ms.rebuildSorted() - } - }() - if ms.peers == nil { - ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView) + ms.peers = make(map[tailcfg.NodeID]tailcfg.NodeView) } if len(resp.Peers) > 0 { @@ -384,12 +378,12 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s keep := make(map[tailcfg.NodeID]bool, len(resp.Peers)) for _, n := range resp.Peers { keep[n.ID] = true - if vp, ok := ms.peers[n.ID]; ok { + lenBefore := len(ms.peers) + ms.peers[n.ID] = n.View() + if len(ms.peers) == lenBefore { stats.changed++ - *vp = n.View() } else { stats.added++ - ms.peers[n.ID] = ptr.To(n.View()) } } for id := range ms.peers { @@ -410,12 +404,12 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s } for _, n := range resp.PeersChanged { - if vp, ok := ms.peers[n.ID]; ok { + lenBefore := len(ms.peers) + ms.peers[n.ID] = n.View() + if len(ms.peers) == lenBefore { stats.changed++ - *vp = n.View() } else { stats.added++ - ms.peers[n.ID] = ptr.To(n.View()) } } @@ -427,7 +421,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s } else { mut.LastSeen = nil } - *vp = mut.View() + ms.peers[nodeID] = mut.View() stats.changed++ } } @@ -436,7 +430,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s if vp, ok := ms.peers[nodeID]; ok { mut := vp.AsStruct() mut.Online = ptr.To(online) - *vp = mut.View() + ms.peers[nodeID] = mut.View() stats.changed++ } } @@ -488,31 +482,12 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s mut.CapMap = v patchCapMap.Add(1) } - *vp = mut.View() + ms.peers[pc.NodeID] = mut.View() } return } -// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called -// after any additions or removals from peers. -func (ms *mapSession) rebuildSorted() { - if ms.sortedPeers == nil { - ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers)) - } else { - if len(ms.sortedPeers) > len(ms.peers) { - clear(ms.sortedPeers[len(ms.peers):]) - } - ms.sortedPeers = ms.sortedPeers[:0] - } - for _, p := range ms.peers { - ms.sortedPeers = append(ms.sortedPeers, p) - } - sort.Slice(ms.sortedPeers, func(i, j int) bool { - return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID() - }) -} - func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) { if userID == 0 { return @@ -576,7 +551,7 @@ func (ms *mapSession) patchifyPeer(n *tailcfg.Node) (_ *tailcfg.PeerChange, ok b if !ok { return nil, false } - return peerChangeDiff(*was, n) + return peerChangeDiff(was, n) } // peerChangeDiff returns the difference from 'was' to 'n', if possible. @@ -778,14 +753,19 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang return ret, true } +func (ms *mapSession) sortedPeers() []tailcfg.NodeView { + ret := slicesx.MapValues(ms.peers) + slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) + return ret +} + // netmap returns a fully populated NetworkMap from the last state seen from // a call to updateStateFromResponse, filling in omitted // information from prior MapResponse values. func (ms *mapSession) netmap() *netmap.NetworkMap { - peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers)) - for i, vp := range ms.sortedPeers { - peerViews[i] = *vp - } + peerViews := ms.sortedPeers() nm := &netmap.NetworkMap{ NodeKey: ms.publicNodeKey, diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 897036a94..ad8f7dd6e 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -340,19 +340,18 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { } ms := newTestMapSession(t, nil) for _, n := range tt.prev { - mak.Set(&ms.peers, n.ID, ptr.To(n.View())) + mak.Set(&ms.peers, n.ID, n.View()) } - ms.rebuildSorted() gotStats := ms.updatePeersStateFromResponse(tt.mapRes) - - got := make([]*tailcfg.Node, len(ms.sortedPeers)) - for i, vp := range ms.sortedPeers { - got[i] = vp.AsStruct() - } if gotStats != tt.wantStats { t.Errorf("got stats = %+v; want %+v", gotStats, tt.wantStats) } + + var got []*tailcfg.Node + for _, vp := range ms.sortedPeers() { + got = append(got, vp.AsStruct()) + } if !reflect.DeepEqual(got, tt.want) { t.Errorf("wrong results\n got: %s\nwant: %s", formatNodes(got), formatNodes(tt.want)) } From ad8d8e37dee33ba99f1e8bdc31faa7567ca179a9 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 3 Jan 2025 16:01:20 -0800 Subject: [PATCH 0269/1708] go.mod: update github.com/go-json-experiment/json (#14522) Updates tailscale/corp#11038 Signed-off-by: Joe Tsai --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e57573f18..3c389b4de 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.6.0 github.com/gaissmai/bart v0.11.1 - github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 + github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 diff --git a/go.sum b/go.sum index 1cbb440fa..2ae4ce09d 100644 --- a/go.sum +++ b/go.sum @@ -350,8 +350,8 @@ github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lK github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= From 47bd0723a08cfeb5ac04aaa23f74dd032909abbd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Jan 2025 10:14:23 -0800 Subject: [PATCH 0270/1708] all: use iterators in more places instead of Range funcs And misc cleanup along the way. Updates #12912 Change-Id: I0cab148b49efc668c6f5cdf09c740b84a713e388 Signed-off-by: Brad Fitzpatrick --- control/controlclient/map.go | 20 +++++++++++--------- envknob/logknob/logknob.go | 6 ++---- envknob/logknob/logknob_test.go | 7 ++----- ipn/ipnlocal/local.go | 21 +++++++++------------ ipn/ipnlocal/serve.go | 5 ++--- tailcfg/tailcfg.go | 4 ++-- types/netmap/netmap.go | 20 +++++--------------- 7 files changed, 33 insertions(+), 50 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index b20a8e170..97d49f90d 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -663,21 +663,23 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "CapMap": if len(n.CapMap) != was.CapMap().Len() { + // If they have different lengths, they're different. if n.CapMap == nil { pc().CapMap = make(tailcfg.NodeCapMap) } else { pc().CapMap = maps.Clone(n.CapMap) } - break - } - was.CapMap().Range(func(k tailcfg.NodeCapability, v views.Slice[tailcfg.RawMessage]) bool { - nv, ok := n.CapMap[k] - if !ok || !views.SliceEqual(v, views.SliceOf(nv)) { - pc().CapMap = maps.Clone(n.CapMap) - return false + } else { + // If they have the same length, check that all their keys + // have the same values. + for k, v := range was.CapMap().All() { + nv, ok := n.CapMap[k] + if !ok || !views.SliceEqual(v, views.SliceOf(nv)) { + pc().CapMap = maps.Clone(n.CapMap) + break + } } - return true - }) + } case "Tags": if !views.SliceEqual(was.Tags(), views.SliceOf(n.Tags)) { return nil, false diff --git a/envknob/logknob/logknob.go b/envknob/logknob/logknob.go index 350384b86..93302d0d2 100644 --- a/envknob/logknob/logknob.go +++ b/envknob/logknob/logknob.go @@ -11,7 +11,6 @@ import ( "tailscale.com/envknob" "tailscale.com/tailcfg" "tailscale.com/types/logger" - "tailscale.com/types/views" ) // TODO(andrew-d): should we have a package-global registry of logknobs? It @@ -59,7 +58,7 @@ func (lk *LogKnob) Set(v bool) { // about; we use this rather than a concrete type to avoid a circular // dependency. type NetMap interface { - SelfCapabilities() views.Slice[tailcfg.NodeCapability] + HasSelfCapability(tailcfg.NodeCapability) bool } // UpdateFromNetMap will enable logging if the SelfNode in the provided NetMap @@ -68,8 +67,7 @@ func (lk *LogKnob) UpdateFromNetMap(nm NetMap) { if lk.capName == "" { return } - - lk.cap.Store(views.SliceContains(nm.SelfCapabilities(), lk.capName)) + lk.cap.Store(nm.HasSelfCapability(lk.capName)) } // Do will call log with the provided format and arguments if any of the diff --git a/envknob/logknob/logknob_test.go b/envknob/logknob/logknob_test.go index b2a376a25..aa4fb4421 100644 --- a/envknob/logknob/logknob_test.go +++ b/envknob/logknob/logknob_test.go @@ -11,6 +11,7 @@ import ( "tailscale.com/envknob" "tailscale.com/tailcfg" "tailscale.com/types/netmap" + "tailscale.com/util/set" ) var testKnob = NewLogKnob( @@ -63,11 +64,7 @@ func TestLogKnob(t *testing.T) { } testKnob.UpdateFromNetMap(&netmap.NetworkMap{ - SelfNode: (&tailcfg.Node{ - Capabilities: []tailcfg.NodeCapability{ - "https://tailscale.com/cap/testing", - }, - }).View(), + AllCaps: set.Of(tailcfg.NodeCapability("https://tailscale.com/cap/testing")), }) if !testKnob.shouldLog() { t.Errorf("expected shouldLog()=true") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c4f68e929..bf88221ab 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1126,11 +1126,10 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { ss.Capabilities = make([]tailcfg.NodeCapability, 1, cm.Len()+1) ss.Capabilities[0] = "HTTPS://TAILSCALE.COM/s/DEPRECATED-NODE-CAPS#see-https://github.com/tailscale/tailscale/issues/11508" ss.CapMap = make(tailcfg.NodeCapMap, sn.CapMap().Len()) - cm.Range(func(k tailcfg.NodeCapability, v views.Slice[tailcfg.RawMessage]) bool { + for k, v := range cm.All() { ss.CapMap[k] = v.AsSlice() ss.Capabilities = append(ss.Capabilities, k) - return true - }) + } slices.Sort(ss.Capabilities[1:]) } } @@ -1192,10 +1191,9 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { } if cm := p.CapMap(); cm.Len() > 0 { ps.CapMap = make(tailcfg.NodeCapMap, cm.Len()) - cm.Range(func(k tailcfg.NodeCapability, v views.Slice[tailcfg.RawMessage]) bool { + for k, v := range cm.All() { ps.CapMap[k] = v.AsSlice() - return true - }) + } } peerStatusFromNode(ps, p) @@ -5918,15 +5916,15 @@ func (b *LocalBackend) setServeProxyHandlersLocked() { } var backends map[string]bool b.serveConfig.RangeOverWebs(func(_ ipn.HostPort, conf ipn.WebServerConfigView) (cont bool) { - conf.Handlers().Range(func(_ string, h ipn.HTTPHandlerView) (cont bool) { + for _, h := range conf.Handlers().All() { backend := h.Proxy() if backend == "" { // Only create proxy handlers for servers with a proxy backend. - return true + continue } mak.Set(&backends, backend, true) if _, ok := b.serveProxyHandlers.Load(backend); ok { - return true + continue } b.logf("serve: creating a new proxy handler for %s", backend) @@ -5935,11 +5933,10 @@ func (b *LocalBackend) setServeProxyHandlersLocked() { // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget // in the CLI, so just log the error here. b.logf("[unexpected] could not create proxy for %v: %s", backend, err) - return true + continue } b.serveProxyHandlers.Store(backend, p) - return true - }) + } return true }) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 61bed0552..c144fa529 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -326,7 +326,7 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string if b.serveConfig.Valid() { has = b.serveConfig.Foreground().Contains } - prevConfig.Foreground().Range(func(k string, v ipn.ServeConfigView) (cont bool) { + for k := range prevConfig.Foreground().All() { if !has(k) { for _, sess := range b.notifyWatchers { if sess.sessionID == k { @@ -334,8 +334,7 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string } } } - return true - }) + } } return nil diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4c9cd59d9..f762d992d 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1457,7 +1457,7 @@ const ( // NodeCapMap is a map of capabilities to their optional values. It is valid for // a capability to have no values (nil slice); such capabilities can be tested -// for by using the Contains method. +// for by using the [NodeCapMap.Contains] method. // // See [NodeCapability] for more information on keys. type NodeCapMap map[NodeCapability][]RawMessage @@ -1873,7 +1873,7 @@ type MapResponse struct { // PeersChangedPatch, if non-nil, means that node(s) have changed. // This is a lighter version of the older PeersChanged support that - // only supports certain types of updates + // only supports certain types of updates. // // These are applied after Peers* above, but in practice the // control server should only send these on their own, without diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 94e872a55..b1ac612de 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -197,21 +197,11 @@ func (nm *NetworkMap) DomainName() string { return nm.Domain } -// SelfCapabilities returns SelfNode.Capabilities if nm and nm.SelfNode are -// non-nil. This is a method so we can use it in envknob/logknob without a -// circular dependency. -func (nm *NetworkMap) SelfCapabilities() views.Slice[tailcfg.NodeCapability] { - var zero views.Slice[tailcfg.NodeCapability] - if nm == nil || !nm.SelfNode.Valid() { - return zero - } - out := nm.SelfNode.Capabilities().AsSlice() - nm.SelfNode.CapMap().Range(func(k tailcfg.NodeCapability, _ views.Slice[tailcfg.RawMessage]) (cont bool) { - out = append(out, k) - return true - }) - - return views.SliceOf(out) +// HasSelfCapability reports whether nm.SelfNode contains capability c. +// +// It exists to satisify an unused (as of 2025-01-04) interface in the logknob package. +func (nm *NetworkMap) HasSelfCapability(c tailcfg.NodeCapability) bool { + return nm.AllCaps.Contains(c) } func (nm *NetworkMap) String() string { From 4b56bf9039eb6bce4da75aa3154a65506e38661f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Jan 2025 11:50:48 -0800 Subject: [PATCH 0271/1708] types/views: remove various Map Range funcs; use iterators everywhere The remaining range funcs in the tree are RangeOverTCPs and RangeOverWebs in ServeConfig; those will be cleaned up separately. Updates #12912 Change-Id: Ieeae4864ab088877263c36b805f77aa8e6be938d Signed-off-by: Brad Fitzpatrick --- ipn/serve.go | 101 +++++++++++++++++++------------------------ types/views/views.go | 30 ------------- 2 files changed, 45 insertions(+), 86 deletions(-) diff --git a/ipn/serve.go b/ipn/serve.go index 49e0d9fa3..32e74e688 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -568,54 +568,46 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch // If the returned bool from the given f is false, then this function stops // iterating immediately and does not check other foreground configs. func (v ServeConfigView) RangeOverTCPs(f func(port uint16, _ TCPPortHandlerView) bool) { - parentCont := true - v.TCP().Range(func(k uint16, v TCPPortHandlerView) (cont bool) { - parentCont = f(k, v) - return parentCont - }) - v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) { - if !parentCont { - return false + for k, v := range v.TCP().All() { + if !f(k, v) { + return } - v.TCP().Range(func(k uint16, v TCPPortHandlerView) (cont bool) { - parentCont = f(k, v) - return parentCont - }) - return parentCont - }) + } + for _, conf := range v.Foreground().All() { + for k, v := range conf.TCP().All() { + if !f(k, v) { + return + } + } + } } // RangeOverWebs ranges over both background and foreground Webs. // If the returned bool from the given f is false, then this function stops // iterating immediately and does not check other foreground configs. -func (v ServeConfigView) RangeOverWebs(f func(_ HostPort, conf WebServerConfigView) bool) { - parentCont := true - v.Web().Range(func(k HostPort, v WebServerConfigView) (cont bool) { - parentCont = f(k, v) - return parentCont - }) - v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) { - if !parentCont { - return false +func (v ServeConfigView) RangeOverWebs(f func(HostPort, WebServerConfigView) bool) { + for k, v := range v.Web().All() { + if !f(k, v) { + return + } + } + for _, conf := range v.Foreground().All() { + for k, v := range conf.Web().All() { + if !f(k, v) { + return + } } - v.Web().Range(func(k HostPort, v WebServerConfigView) (cont bool) { - parentCont = f(k, v) - return parentCont - }) - return parentCont - }) + } } // FindTCP returns the first TCP that matches with the given port. It // prefers a foreground match first followed by a background search if none // existed. func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) { - v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) { - res, ok = v.TCP().GetOk(port) - return !ok - }) - if ok { - return res, ok + for _, conf := range v.Foreground().All() { + if res, ok := conf.TCP().GetOk(port); ok { + return res, ok + } } return v.TCP().GetOk(port) } @@ -624,12 +616,10 @@ func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) // prefers a foreground match first followed by a background search if none // existed. func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool) { - v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) { - res, ok = v.Web().GetOk(hp) - return !ok - }) - if ok { - return res, ok + for _, conf := range v.Foreground().All() { + if res, ok := conf.Web().GetOk(hp); ok { + return res, ok + } } return v.Web().GetOk(hp) } @@ -637,14 +627,15 @@ func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool) // HasAllowFunnel returns whether this config has at least one AllowFunnel // set in the background or foreground configs. func (v ServeConfigView) HasAllowFunnel() bool { - return v.AllowFunnel().Len() > 0 || func() bool { - var exists bool - v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) { - exists = v.AllowFunnel().Len() > 0 - return !exists - }) - return exists - }() + if v.AllowFunnel().Len() > 0 { + return true + } + for _, conf := range v.Foreground().All() { + if conf.AllowFunnel().Len() > 0 { + return true + } + } + return false } // FindFunnel reports whether target exists in either the background AllowFunnel @@ -653,12 +644,10 @@ func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool { if v.AllowFunnel().Get(target) { return true } - var exists bool - v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) { - if exists = v.AllowFunnel().Get(target); exists { - return false + for _, conf := range v.Foreground().All() { + if conf.AllowFunnel().Get(target) { + return true } - return true - }) - return exists + } + return false } diff --git a/types/views/views.go b/types/views/views.go index 19aa69d4a..eae8c0b16 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -415,16 +415,6 @@ func (m *MapSlice[K, V]) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &m.ж) } -// Range calls f for every k,v pair in the underlying map. -// It stops iteration immediately if f returns false. -func (m MapSlice[K, V]) Range(f MapRangeFn[K, Slice[V]]) { - for k, v := range m.ж { - if !f(k, SliceOf(v)) { - return - } - } -} - // AsMap returns a shallow-clone of the underlying map. // // If V is a pointer type, it is the caller's responsibility to make sure the @@ -527,16 +517,6 @@ func (m Map[K, V]) AsMap() map[K]V { // Implementations should return false to stop range. type MapRangeFn[K comparable, V any] func(k K, v V) (cont bool) -// Range calls f for every k,v pair in the underlying map. -// It stops iteration immediately if f returns false. -func (m Map[K, V]) Range(f MapRangeFn[K, V]) { - for k, v := range m.ж { - if !f(k, v) { - return - } - } -} - // All returns an iterator iterating over the keys // and values of m. func (m Map[K, V]) All() iter.Seq2[K, V] { @@ -600,16 +580,6 @@ func (m MapFn[K, T, V]) GetOk(k K) (V, bool) { return m.wrapv(v), ok } -// Range calls f for every k,v pair in the underlying map. -// It stops iteration immediately if f returns false. -func (m MapFn[K, T, V]) Range(f MapRangeFn[K, V]) { - for k, v := range m.ж { - if !f(k, m.wrapv(v)) { - return - } - } -} - // All returns an iterator iterating over the keys and value views of m. func (m MapFn[K, T, V]) All() iter.Seq2[K, V] { return func(yield func(K, V) bool) { From 2b8f02b407934602e120f3e3b096bbb6d32e61ad Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Jan 2025 13:46:09 -0800 Subject: [PATCH 0272/1708] ipn: convert ServeConfig Range methods to iterators These were the last two Range funcs in this repo. Updates #12912 Change-Id: I6ba0a911933cb5fc4e43697a9aac58a8035f9622 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 10 ++++----- ipn/serve.go | 52 +++++++++++++++++++++++-------------------- 2 files changed, 32 insertions(+), 30 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bf88221ab..fc7b997bc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5882,12 +5882,11 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. b.reloadServeConfigLocked(prefs) if b.serveConfig.Valid() { servePorts := make([]uint16, 0, 3) - b.serveConfig.RangeOverTCPs(func(port uint16, _ ipn.TCPPortHandlerView) bool { + for port := range b.serveConfig.TCPs() { if port > 0 { servePorts = append(servePorts, uint16(port)) } - return true - }) + } handlePorts = append(handlePorts, servePorts...) b.setServeProxyHandlersLocked() @@ -5915,7 +5914,7 @@ func (b *LocalBackend) setServeProxyHandlersLocked() { return } var backends map[string]bool - b.serveConfig.RangeOverWebs(func(_ ipn.HostPort, conf ipn.WebServerConfigView) (cont bool) { + for _, conf := range b.serveConfig.Webs() { for _, h := range conf.Handlers().All() { backend := h.Proxy() if backend == "" { @@ -5937,8 +5936,7 @@ func (b *LocalBackend) setServeProxyHandlersLocked() { } b.serveProxyHandlers.Store(backend, p) } - return true - }) + } // Clean up handlers for proxy backends that are no longer present // in configuration. diff --git a/ipn/serve.go b/ipn/serve.go index 32e74e688..e82279db8 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -6,6 +6,7 @@ package ipn import ( "errors" "fmt" + "iter" "net" "net/netip" "net/url" @@ -564,39 +565,42 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch return u.String(), nil } -// RangeOverTCPs ranges over both background and foreground TCPs. -// If the returned bool from the given f is false, then this function stops -// iterating immediately and does not check other foreground configs. -func (v ServeConfigView) RangeOverTCPs(f func(port uint16, _ TCPPortHandlerView) bool) { - for k, v := range v.TCP().All() { - if !f(k, v) { - return - } - } - for _, conf := range v.Foreground().All() { - for k, v := range conf.TCP().All() { - if !f(k, v) { +// TCPs returns an iterator over both background and foreground TCP +// listeners. +// +// The key is the port number. +func (v ServeConfigView) TCPs() iter.Seq2[uint16, TCPPortHandlerView] { + return func(yield func(uint16, TCPPortHandlerView) bool) { + for k, v := range v.TCP().All() { + if !yield(k, v) { return } } + for _, conf := range v.Foreground().All() { + for k, v := range conf.TCP().All() { + if !yield(k, v) { + return + } + } + } } } -// RangeOverWebs ranges over both background and foreground Webs. -// If the returned bool from the given f is false, then this function stops -// iterating immediately and does not check other foreground configs. -func (v ServeConfigView) RangeOverWebs(f func(HostPort, WebServerConfigView) bool) { - for k, v := range v.Web().All() { - if !f(k, v) { - return - } - } - for _, conf := range v.Foreground().All() { - for k, v := range conf.Web().All() { - if !f(k, v) { +// Webs returns an iterator over both background and foreground Web configurations. +func (v ServeConfigView) Webs() iter.Seq2[HostPort, WebServerConfigView] { + return func(yield func(HostPort, WebServerConfigView) bool) { + for k, v := range v.Web().All() { + if !yield(k, v) { return } } + for _, conf := range v.Foreground().All() { + for k, v := range conf.Web().All() { + if !yield(k, v) { + return + } + } + } } } From 60930d19c06de9469f4b65f4fd79eacdba3e3ee1 Mon Sep 17 00:00:00 2001 From: Marc Paquette Date: Sat, 28 Dec 2024 19:46:23 -0500 Subject: [PATCH 0273/1708] Update README to reference correct Commit Style URL Change-Id: I2981c685a8905ad58536a8d9b01511d04c3017d1 Signed-off-by: Marc Paquette --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4627d9780..a20132a6a 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin) `Signed-off-by` lines in commits. See `git log` for our commit message style. It's basically the same as -[Go's style](https://github.com/golang/go/wiki/CommitMessage). +[Go's style](https://go.dev/wiki/CommitMessage). ## About Us From 36ea792f06f7871ebd9f0f092e9950835b280f7a Mon Sep 17 00:00:00 2001 From: Marc Paquette Date: Sat, 28 Dec 2024 01:29:34 -0500 Subject: [PATCH 0274/1708] Fix various linting, vet & static check issues Fixes #14492 ----- Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. Change-Id: I6dc1068d34bbfa7477e7b7a56a4325b3868c92e1 Signed-off-by: Marc Paquette --- client/tailscale/localclient.go | 2 +- version-embed.go | 1 + wgengine/filter/filter_test.go | 8 ++++---- wgengine/netstack/gro/gro.go | 1 + 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 34c094a63..4e452f894 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build go1.22 package tailscale diff --git a/version-embed.go b/version-embed.go index 2d517339d..17bf578dd 100644 --- a/version-embed.go +++ b/version-embed.go @@ -26,6 +26,7 @@ var AlpineDockerTag string //go:embed go.toolchain.rev var GoToolchainRev string +//lint:ignore U1000 used by tests + assert_ts_toolchain_match.go w/ right build tags func tailscaleToolchainRev() (gitHash string, ok bool) { bi, ok := debug.ReadBuildInfo() if !ok { diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index 7ffdd5c7b..e7f71e6a4 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -768,7 +768,7 @@ func ports(s string) PortRange { if err != nil { panic(fmt.Sprintf("invalid NetPortRange %q", s)) } - return PortRange{uint16(first), uint16(last)} + return PortRange{First: uint16(first), Last: uint16(last)} } func netports(netPorts ...string) (ret []NetPortRange) { @@ -814,11 +814,11 @@ func TestMatchesFromFilterRules(t *testing.T) { Dsts: []NetPortRange{ { Net: netip.MustParsePrefix("0.0.0.0/0"), - Ports: PortRange{22, 22}, + Ports: PortRange{First: 22, Last: 22}, }, { Net: netip.MustParsePrefix("::0/0"), - Ports: PortRange{22, 22}, + Ports: PortRange{First: 22, Last: 22}, }, }, Srcs: []netip.Prefix{ @@ -848,7 +848,7 @@ func TestMatchesFromFilterRules(t *testing.T) { Dsts: []NetPortRange{ { Net: netip.MustParsePrefix("1.2.0.0/16"), - Ports: PortRange{22, 22}, + Ports: PortRange{First: 22, Last: 22}, }, }, Srcs: []netip.Prefix{ diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go index b268534eb..654d17056 100644 --- a/wgengine/netstack/gro/gro.go +++ b/wgengine/netstack/gro/gro.go @@ -6,6 +6,7 @@ package gro import ( "bytes" + "github.com/tailscale/wireguard-go/tun" "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" From 2fb361a3cf1146112b80b56ebcaf33be9474c21b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Jan 2025 15:33:29 -0800 Subject: [PATCH 0275/1708] ipn: declare NotifyWatchOpt consts without using iota Updates #cleanup Updates #1909 (noticed while working on that) Change-Id: I505001e5294287ad2a937b4db61d9e67de70fa14 Signed-off-by: Brad Fitzpatrick --- ipn/backend.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/ipn/backend.go b/ipn/backend.go index ef0700a70..3e956f473 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -58,23 +58,29 @@ type EngineStatus struct { // to subscribe to. type NotifyWatchOpt uint64 +// NotifyWatchOpt values. +// +// These aren't declared using Go's iota because they're not purely internal to +// the process and iota should not be used for values that are serialized to +// disk or network. In this case, these values come over the network via the +// LocalAPI, a mostly stable API. const ( // NotifyWatchEngineUpdates, if set, causes Engine updates to be sent to the // client either regularly or when they change, without having to ask for // each one via Engine.RequestStatus. - NotifyWatchEngineUpdates NotifyWatchOpt = 1 << iota + NotifyWatchEngineUpdates NotifyWatchOpt = 1 << 0 - NotifyInitialState // if set, the first Notify message (sent immediately) will contain the current State + BrowseToURL + SessionID - NotifyInitialPrefs // if set, the first Notify message (sent immediately) will contain the current Prefs - NotifyInitialNetMap // if set, the first Notify message (sent immediately) will contain the current NetMap + NotifyInitialState NotifyWatchOpt = 1 << 1 // if set, the first Notify message (sent immediately) will contain the current State + BrowseToURL + SessionID + NotifyInitialPrefs NotifyWatchOpt = 1 << 2 // if set, the first Notify message (sent immediately) will contain the current Prefs + NotifyInitialNetMap NotifyWatchOpt = 1 << 3 // if set, the first Notify message (sent immediately) will contain the current NetMap - NotifyNoPrivateKeys // if set, private keys that would normally be sent in updates are zeroed out - NotifyInitialDriveShares // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares - NotifyInitialOutgoingFiles // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles + NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // if set, private keys that would normally be sent in updates are zeroed out + NotifyInitialDriveShares NotifyWatchOpt = 1 << 5 // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares + NotifyInitialOutgoingFiles NotifyWatchOpt = 1 << 6 // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles - NotifyInitialHealthState // if set, the first Notify message (sent immediately) will contain the current health.State of the client + NotifyInitialHealthState NotifyWatchOpt = 1 << 7 // if set, the first Notify message (sent immediately) will contain the current health.State of the client - NotifyRateLimit // if set, rate limit spammy netmap updates to every few seconds + NotifyRateLimit NotifyWatchOpt = 1 << 8 // if set, rate limit spammy netmap updates to every few seconds ) // Notify is a communication from a backend (e.g. tailscaled) to a frontend From f13b2bce93b9bd5631df7a86e322c4c7ef1299de Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Jan 2025 08:27:40 -0800 Subject: [PATCH 0276/1708] tailcfg: flesh out docs Updates #cleanup Updates #14542 Change-Id: I41f7ce69d43032e0ba3c866d9c89d2a7eccbf090 Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 71 +++++++++++++++++++++++++++++++++------- tailcfg/tailcfg_clone.go | 4 +-- tailcfg/tailcfg_view.go | 8 ++--- 3 files changed, 65 insertions(+), 18 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index f762d992d..fb643a6df 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -155,35 +155,70 @@ type CapabilityVersion int // - 110: 2024-12-12: removed never-before-used Tailscale SSH public key support (#14373) const CurrentCapabilityVersion CapabilityVersion = 110 -type StableID string - +// ID is an integer ID for a user, node, or login allocated by the +// control plane. +// +// To be nice, control plane servers should not use int64s that are too large to +// fit in a JavaScript number (see JavaScript's Number.MAX_SAFE_INTEGER). +// The Tailscale-hosted control plane stopped allocating large integers in +// March 2023 but nodes prior to that may have IDs larger than +// MAX_SAFE_INTEGER (2^53 – 1). +// +// IDs must not be zero or negative. type ID int64 +// UserID is an [ID] for a [User]. type UserID ID func (u UserID) IsZero() bool { return u == 0 } +// LoginID is an [ID] for a [Login]. +// +// It is not used in the Tailscale client, but is used in the control plane. type LoginID ID func (u LoginID) IsZero() bool { return u == 0 } +// NodeID is a unique integer ID for a node. +// +// It's global within a control plane URL ("tailscale up --login-server") and is +// (as of 2025-01-06) never re-used even after a node is deleted. +// +// To be nice, control plane servers should not use int64s that are too large to +// fit in a JavaScript number (see JavaScript's Number.MAX_SAFE_INTEGER). +// The Tailscale-hosted control plane stopped allocating large integers in +// March 2023 but nodes prior to that may have node IDs larger than +// MAX_SAFE_INTEGER (2^53 – 1). +// +// NodeIDs are not stable across control plane URLs. For more stable URLs, +// see [StableNodeID]. type NodeID ID func (u NodeID) IsZero() bool { return u == 0 } -type StableNodeID StableID +// StableNodeID is a string form of [NodeID]. +// +// Different control plane servers should ideally have different StableNodeID +// suffixes for different sites or regions. +// +// Being a string, it's safer to use in JavaScript without worrying about the +// size of the integer, as documented on [NodeID]. +// +// But in general, Tailscale APIs can accept either a [NodeID] integer or a +// [StableNodeID] string when referring to a node. +type StableNodeID string func (u StableNodeID) IsZero() bool { return u == "" } -// User is an IPN user. +// User is a Tailscale user. // // A user can have multiple logins associated with it (e.g. gmail and github oauth). // (Note: none of our UIs support this yet.) @@ -196,23 +231,29 @@ func (u StableNodeID) IsZero() bool { // have a general gmail address login associated with the user. type User struct { ID UserID - LoginName string `json:"-"` // not stored, filled from Login // TODO REMOVE DisplayName string // if non-empty overrides Login field ProfilePicURL string // if non-empty overrides Login field - Logins []LoginID Created time.Time + + // Old, unused fields... + // TODO(bradfitz): remove, once verifying old clients don't need them. + + LoginName string `json:"-"` // not stored, filled from Login // TODO REMOVE + Logins []LoginID } +// Login is a user from a specific identity provider, not associated with any +// particular tailnet. type Login struct { _ structs.Incomparable - ID LoginID - Provider string - LoginName string - DisplayName string - ProfilePicURL string + ID LoginID // unused in the Tailscale client + Provider string // "google", "github", "okta_foo", etc. + LoginName string // an email address or "email-ish" string (like alice@github) + DisplayName string // from the IdP + ProfilePicURL string // from the IdP } -// A UserProfile is display-friendly data for a user. +// A UserProfile is display-friendly data for a [User]. // It includes the LoginName for display purposes but *not* the Provider. // It also includes derived data from one of the user's logins. type UserProfile struct { @@ -283,6 +324,7 @@ func MarshalCapJSON[T any](capRule T) (RawMessage, error) { return RawMessage(string(bs)), nil } +// Node is a Tailscale device in a tailnet. type Node struct { ID NodeID StableID StableNodeID @@ -563,6 +605,11 @@ func (n *Node) InitDisplayNames(networkMagicDNSSuffix string) { n.ComputedNameWithHost = nameWithHost } +// MachineStatus is the state of a [Node]'s approval into a tailnet. +// +// A "node" and a "machine" are often 1:1, but technically a Tailscale +// daemon has one machine key and can have multiple nodes (e.g. different +// users on Windows) for that one machine key. type MachineStatus int const ( diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index bf9bac298..78da0aea6 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -33,11 +33,11 @@ func (src *User) Clone() *User { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _UserCloneNeedsRegeneration = User(struct { ID UserID - LoginName string DisplayName string ProfilePicURL string - Logins []LoginID Created time.Time + LoginName string + Logins []LoginID }{}) // Clone makes a deep copy of Node. diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 6c21e5f45..1c5fda627 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -67,20 +67,20 @@ func (v *UserView) UnmarshalJSON(b []byte) error { } func (v UserView) ID() UserID { return v.ж.ID } -func (v UserView) LoginName() string { return v.ж.LoginName } func (v UserView) DisplayName() string { return v.ж.DisplayName } func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL } -func (v UserView) Logins() views.Slice[LoginID] { return views.SliceOf(v.ж.Logins) } func (v UserView) Created() time.Time { return v.ж.Created } +func (v UserView) LoginName() string { return v.ж.LoginName } +func (v UserView) Logins() views.Slice[LoginID] { return views.SliceOf(v.ж.Logins) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _UserViewNeedsRegeneration = User(struct { ID UserID - LoginName string DisplayName string ProfilePicURL string - Logins []LoginID Created time.Time + LoginName string + Logins []LoginID }{}) // View returns a readonly view of Node. From 5da772c6700c81431924d52c493590aecf9c4163 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Jan 2025 10:11:15 -0800 Subject: [PATCH 0277/1708] cmd/tailscale/cli: fix TestUpdatePrefs on macOS It was failing about an unaccepted risk ("mac-app-connector") because it was checking runtime.GOOS ("darwin") instead of the test's env.goos string value ("linux", which doesn't have the warning). Fixes #14544 Change-Id: I470d86a6ad4bb18e1dd99d334538e56556147835 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/up.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index e86687527..b907257cf 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -379,7 +379,7 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus return false, nil, err } - if runtime.GOOS == "darwin" && env.upArgs.advertiseConnector { + if env.goos == "darwin" && env.upArgs.advertiseConnector { if err := presentRiskToUser(riskMacAppConnector, riskMacAppConnectorMessage, env.upArgs.acceptedRisks); err != nil { return false, nil, err } From b90707665ee7626ee9834ecb0cbd4960a9a52754 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Jan 2025 09:54:11 -0800 Subject: [PATCH 0278/1708] tailcfg: remove unused User fields Fixes #14542 Change-Id: Ifeb0f90c570c1b555af761161f79df75f18ae3f9 Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 6 ---- tailcfg/tailcfg_clone.go | 4 --- tailcfg/tailcfg_test.go | 1 - tailcfg/tailcfg_view.go | 14 ++++------ tstest/integration/testcontrol/testcontrol.go | 28 ++++++++++--------- 5 files changed, 20 insertions(+), 33 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index fb643a6df..1ede0bd9b 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -234,12 +234,6 @@ type User struct { DisplayName string // if non-empty overrides Login field ProfilePicURL string // if non-empty overrides Login field Created time.Time - - // Old, unused fields... - // TODO(bradfitz): remove, once verifying old clients don't need them. - - LoginName string `json:"-"` // not stored, filled from Login // TODO REMOVE - Logins []LoginID } // Login is a user from a specific identity provider, not associated with any diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 78da0aea6..d282719b7 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -26,7 +26,6 @@ func (src *User) Clone() *User { } dst := new(User) *dst = *src - dst.Logins = append(src.Logins[:0:0], src.Logins...) return dst } @@ -36,8 +35,6 @@ var _UserCloneNeedsRegeneration = User(struct { DisplayName string ProfilePicURL string Created time.Time - LoginName string - Logins []LoginID }{}) // Clone makes a deep copy of Node. @@ -302,7 +299,6 @@ func (src *RegisterResponse) Clone() *RegisterResponse { } dst := new(RegisterResponse) *dst = *src - dst.User = *src.User.Clone() dst.NodeKeySignature = append(src.NodeKeySignature[:0:0], src.NodeKeySignature...) return dst } diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 9f8c418a1..b9a204ead 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -666,7 +666,6 @@ func TestCloneUser(t *testing.T) { u *User }{ {"nil_logins", &User{}}, - {"zero_logins", &User{Logins: make([]LoginID, 0)}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 1c5fda627..774a18258 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -66,12 +66,10 @@ func (v *UserView) UnmarshalJSON(b []byte) error { return nil } -func (v UserView) ID() UserID { return v.ж.ID } -func (v UserView) DisplayName() string { return v.ж.DisplayName } -func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL } -func (v UserView) Created() time.Time { return v.ж.Created } -func (v UserView) LoginName() string { return v.ж.LoginName } -func (v UserView) Logins() views.Slice[LoginID] { return views.SliceOf(v.ж.Logins) } +func (v UserView) ID() UserID { return v.ж.ID } +func (v UserView) DisplayName() string { return v.ж.DisplayName } +func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL } +func (v UserView) Created() time.Time { return v.ж.Created } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _UserViewNeedsRegeneration = User(struct { @@ -79,8 +77,6 @@ var _UserViewNeedsRegeneration = User(struct { DisplayName string ProfilePicURL string Created time.Time - LoginName string - Logins []LoginID }{}) // View returns a readonly view of Node. @@ -637,7 +633,7 @@ func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { return nil } -func (v RegisterResponseView) User() UserView { return v.ж.User.View() } +func (v RegisterResponseView) User() User { return v.ж.User } func (v RegisterResponseView) Login() Login { return v.ж.Login } func (v RegisterResponseView) NodeKeyExpired() bool { return v.ж.NodeKeyExpired } func (v RegisterResponseView) MachineAuthorized() bool { return v.ж.MachineAuthorized } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index a6b2e1828..92f74e244 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -6,6 +6,7 @@ package testcontrol import ( "bytes" + "cmp" "context" "encoding/binary" "encoding/json" @@ -476,13 +477,22 @@ func (s *Server) AddFakeNode() { // TODO: send updates to other (non-fake?) nodes } -func (s *Server) AllUsers() (users []*tailcfg.User) { +func (s *Server) allUserProfiles() (res []tailcfg.UserProfile) { s.mu.Lock() defer s.mu.Unlock() - for _, u := range s.users { - users = append(users, u.Clone()) + for k, u := range s.users { + up := tailcfg.UserProfile{ + ID: u.ID, + DisplayName: u.DisplayName, + } + if login, ok := s.logins[k]; ok { + up.LoginName = login.LoginName + up.ProfilePicURL = cmp.Or(up.ProfilePicURL, login.ProfilePicURL) + up.DisplayName = cmp.Or(up.DisplayName, login.DisplayName) + } + res = append(res, up) } - return users + return res } func (s *Server) AllNodes() (nodes []*tailcfg.Node) { @@ -523,9 +533,7 @@ func (s *Server) getUser(nodeKey key.NodePublic) (*tailcfg.User, *tailcfg.Login) } user := &tailcfg.User{ ID: id, - LoginName: loginName, DisplayName: displayName, - Logins: []tailcfg.LoginID{login.ID}, } s.users[nodeKey] = user s.logins[nodeKey] = login @@ -1001,13 +1009,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, sort.Slice(res.Peers, func(i, j int) bool { return res.Peers[i].ID < res.Peers[j].ID }) - for _, u := range s.AllUsers() { - res.UserProfiles = append(res.UserProfiles, tailcfg.UserProfile{ - ID: u.ID, - LoginName: u.LoginName, - DisplayName: u.DisplayName, - }) - } + res.UserProfiles = s.allUserProfiles() v4Prefix := netip.PrefixFrom(netaddr.IPv4(100, 64, uint8(tailcfg.NodeID(user.ID)>>8), uint8(tailcfg.NodeID(user.ID))), 32) v6Prefix := netip.PrefixFrom(tsaddr.Tailscale4To6(v4Prefix.Addr()), 128) From 07aae18bca5a26e459b3d3f906f4c099c3f75df0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Jan 2025 14:24:32 -0800 Subject: [PATCH 0279/1708] ipn/ipnlocal, util/goroutines: track goroutines for tests, shutdown Updates #14520 Updates #14517 (in that I pulled this out of there) Change-Id: Ibc28162816e083fcadf550586c06805c76e378fc Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 64 ++++++++++++++++++++++++++------- util/goroutines/goroutines.go | 2 +- util/goroutines/tracker.go | 66 +++++++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 14 deletions(-) create mode 100644 util/goroutines/tracker.go diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fc7b997bc..0a1126309 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -96,6 +96,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" + "tailscale.com/util/goroutines" "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/multierr" @@ -178,7 +179,7 @@ type watchSession struct { // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by Close + ctx context.Context // canceled by [LocalBackend.Shutdown] ctxCancel context.CancelFunc // cancels ctx logf logger.Logf // general logging keyLogf logger.Logf // for printing list of peers on change @@ -231,6 +232,10 @@ type LocalBackend struct { shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] numClientStatusCalls atomic.Uint32 + // goTracker accounts for all goroutines started by LocalBacked, primarily + // for testing and graceful shutdown purposes. + goTracker goroutines.Tracker + // The mutex protects the following elements. mu sync.Mutex conf *conffile.Config // latest parsed config, or nil if not in declarative mode @@ -866,7 +871,7 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // TODO(raggi,tailscale/corp#22574): authReconfig should be refactored such that we can call the // necessary operations here and avoid the need for asynchronous behavior that is racy and hard // to test here, and do less extra work in these conditions. - go b.authReconfig() + b.goTracker.Go(b.authReconfig) } } @@ -879,7 +884,7 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { want := b.netMap.GetAddresses().Len() if len(b.peerAPIListeners) < want { b.logf("linkChange: peerAPIListeners too low; trying again") - go b.initPeerAPIListener() + b.goTracker.Go(b.initPeerAPIListener) } } } @@ -1004,6 +1009,33 @@ func (b *LocalBackend) Shutdown() { b.ctxCancel() b.e.Close() <-b.e.Done() + b.awaitNoGoroutinesInTest() +} + +func (b *LocalBackend) awaitNoGoroutinesInTest() { + if !testenv.InTest() { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second) + defer cancel() + + ch := make(chan bool, 1) + defer b.goTracker.AddDoneCallback(func() { ch <- true })() + + for { + n := b.goTracker.RunningGoroutines() + if n == 0 { + return + } + select { + case <-ctx.Done(): + // TODO(bradfitz): pass down some TB-like failer interface from + // tests, without depending on testing from here? + // But this is fine in tests too: + panic(fmt.Sprintf("timeout waiting for %d goroutines to stop", n)) + case <-ch: + } + } } func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { @@ -2152,7 +2184,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if b.portpoll != nil { b.portpollOnce.Do(func() { - go b.readPoller() + b.goTracker.Go(b.readPoller) }) } @@ -2366,7 +2398,7 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P b.e.SetJailedFilter(filter.NewShieldsUpFilter(localNets, logNets, oldJailedFilter, b.logf)) if b.sshServer != nil { - go b.sshServer.OnPolicyChange() + b.goTracker.Go(b.sshServer.OnPolicyChange) } } @@ -2843,7 +2875,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A // request every 2 seconds. // TODO(bradfitz): plumb this further and only send a Notify on change. if mask&ipn.NotifyWatchEngineUpdates != 0 { - go b.pollRequestEngineStatus(ctx) + b.goTracker.Go(func() { b.pollRequestEngineStatus(ctx) }) } // TODO(marwan-at-work): streaming background logs? @@ -3850,7 +3882,7 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock if mp.EggSet { mp.EggSet = false b.egg = true - go b.doSetHostinfoFilterServices() + b.goTracker.Go(b.doSetHostinfoFilterServices) } p0 := b.pm.CurrentPrefs() p1 := b.pm.CurrentPrefs().AsStruct() @@ -3943,7 +3975,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { - go b.sshServer.Shutdown() + b.goTracker.Go(b.sshServer.Shutdown) b.sshServer = nil } } @@ -4285,8 +4317,14 @@ func (b *LocalBackend) authReconfig() { dcfg := dnsConfigForNetmap(nm, b.peers, prefs, b.keyExpired, b.logf, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) + closing := b.shutdownCalled b.mu.Unlock() + if closing { + b.logf("[v1] authReconfig: skipping because in shutdown") + return + } + if blocked { b.logf("[v1] authReconfig: blocked, skipping.") return @@ -4751,7 +4789,7 @@ func (b *LocalBackend) initPeerAPIListener() { b.peerAPIListeners = append(b.peerAPIListeners, pln) } - go b.doSetHostinfoFilterServices() + b.goTracker.Go(b.doSetHostinfoFilterServices) } // magicDNSRootDomains returns the subset of nm.DNS.Domains that are the search domains for MagicDNS. @@ -5020,7 +5058,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // can be shut down if we transition away from Running. if b.captiveCancel == nil { b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - go b.checkCaptivePortalLoop(b.captiveCtx) + b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) }) } } else if oldState == ipn.Running { // Transitioning away from running. @@ -5272,7 +5310,7 @@ func (b *LocalBackend) requestEngineStatusAndWait() { b.statusLock.Lock() defer b.statusLock.Unlock() - go b.e.RequestStatus() + b.goTracker.Go(b.e.RequestStatus) b.logf("requestEngineStatusAndWait: waiting...") b.statusChanged.Wait() // temporarily releases lock while waiting b.logf("requestEngineStatusAndWait: got status update.") @@ -5383,7 +5421,7 @@ func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { shouldRun := !nm.HasCap(tailcfg.NodeAttrDisableWebClient) wasRunning := b.webClientAtomicBool.Swap(shouldRun) if wasRunning && !shouldRun { - go b.webClientShutdown() // stop web client + b.goTracker.Go(b.webClientShutdown) // stop web client } } @@ -5900,7 +5938,7 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. if wire := b.wantIngressLocked(); b.hostinfo != nil && b.hostinfo.WireIngress != wire { b.logf("Hostinfo.WireIngress changed to %v", wire) b.hostinfo.WireIngress = wire - go b.doSetHostinfoFilterServices() + b.goTracker.Go(b.doSetHostinfoFilterServices) } b.setTCPPortsIntercepted(handlePorts) diff --git a/util/goroutines/goroutines.go b/util/goroutines/goroutines.go index 9758b0758..d40cbecb1 100644 --- a/util/goroutines/goroutines.go +++ b/util/goroutines/goroutines.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// The goroutines package contains utilities for getting active goroutines. +// The goroutines package contains utilities for tracking and getting active goroutines. package goroutines import ( diff --git a/util/goroutines/tracker.go b/util/goroutines/tracker.go new file mode 100644 index 000000000..044843d33 --- /dev/null +++ b/util/goroutines/tracker.go @@ -0,0 +1,66 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package goroutines + +import ( + "sync" + "sync/atomic" + + "tailscale.com/util/set" +) + +// Tracker tracks a set of goroutines. +type Tracker struct { + started atomic.Int64 // counter + running atomic.Int64 // gauge + + mu sync.Mutex + onDone set.HandleSet[func()] +} + +func (t *Tracker) Go(f func()) { + t.started.Add(1) + t.running.Add(1) + go t.goAndDecr(f) +} + +func (t *Tracker) goAndDecr(f func()) { + defer t.decr() + f() +} + +func (t *Tracker) decr() { + t.running.Add(-1) + + t.mu.Lock() + defer t.mu.Unlock() + for _, f := range t.onDone { + go f() + } +} + +// AddDoneCallback adds a callback to be called in a new goroutine +// whenever a goroutine managed by t (excluding ones from this method) +// finishes. It returns a function to remove the callback. +func (t *Tracker) AddDoneCallback(f func()) (remove func()) { + t.mu.Lock() + defer t.mu.Unlock() + if t.onDone == nil { + t.onDone = set.HandleSet[func()]{} + } + h := t.onDone.Add(f) + return func() { + t.mu.Lock() + defer t.mu.Unlock() + delete(t.onDone, h) + } +} + +func (t *Tracker) RunningGoroutines() int64 { + return t.running.Load() +} + +func (t *Tracker) StartedGoroutines() int64 { + return t.started.Load() +} From 041622c92f124491c7d9ece71efa310adb0f238c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Jan 2025 14:30:02 -0800 Subject: [PATCH 0280/1708] ipn/ipnlocal: move where auto exit node selection happens In the process, because I needed it for testing, make all LocalBackend-managed goroutines be accounted for. And then in tests, verify they're no longer running during LocalBackend.Shutdown. Updates tailscale/corp#19681 Change-Id: Iad873d4df7d30103a4a7863dfacf9e078c77e6a3 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 48 ++++++++++++++++++-------- ipn/ipnlocal/local_test.go | 69 +++++++++++++++++++++++++++----------- 2 files changed, 84 insertions(+), 33 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0a1126309..4c58ae8ec 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -367,7 +367,7 @@ type LocalBackend struct { allowedSuggestedExitNodes set.Set[tailcfg.StableNodeID] // refreshAutoExitNode indicates if the exit node should be recomputed when the next netcheck report is available. - refreshAutoExitNode bool + refreshAutoExitNode bool // guarded by mu // captiveCtx and captiveCancel are used to control captive portal // detection. They are protected by 'mu' and can be changed during the @@ -1812,8 +1812,9 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo b.send(*notify) } }() - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() + if !b.updateNetmapDeltaLocked(muts) { return false } @@ -1821,14 +1822,8 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if b.netMap != nil && mutationsAreWorthyOfTellingIPNBus(muts) { nm := ptr.To(*b.netMap) // shallow clone nm.Peers = make([]tailcfg.NodeView, 0, len(b.peers)) - shouldAutoExitNode := shouldAutoExitNode() for _, p := range b.peers { nm.Peers = append(nm.Peers, p) - // If the auto exit node currently set goes offline, find another auto exit node. - if shouldAutoExitNode && b.pm.prefs.ExitNodeID() == p.StableID() && p.Online() != nil && !*p.Online() { - b.setAutoExitNodeIDLockedOnEntry(unlock) - return false - } } slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) @@ -1859,6 +1854,20 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } +// pickNewAutoExitNode picks a new automatic exit node if needed. +func (b *LocalBackend) pickNewAutoExitNode() { + unlock := b.lockAndGetUnlock() + defer unlock() + + newPrefs := b.setAutoExitNodeIDLockedOnEntry(unlock) + if !newPrefs.Valid() { + // Unchanged. + return + } + + b.send(ipn.Notify{Prefs: &newPrefs}) +} + func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (handled bool) { if b.netMap == nil || len(b.peers) == 0 { return false @@ -1881,6 +1890,12 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand mak.Set(&mutableNodes, nv.ID(), n) } m.Apply(n) + + // If our exit node went offline, we need to schedule picking + // a new one. + if mo, ok := m.(netmap.NodeMutationOnline); ok && !mo.Online && n.StableID == b.pm.prefs.ExitNodeID() && shouldAutoExitNode() { + b.goTracker.Go(b.pickNewAutoExitNode) + } } for nid, n := range mutableNodes { b.peers[nid] = n.View() @@ -5542,29 +5557,34 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { } } -func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) { +func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPrefs ipn.PrefsView) { + var zero ipn.PrefsView defer unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { b.logf("[unexpected]: received tailnet exit node ID pref change callback but current prefs are nil") - return + return zero } prefsClone := prefs.AsStruct() newSuggestion, err := b.suggestExitNodeLocked(nil) if err != nil { b.logf("setAutoExitNodeID: %v", err) - return + return zero + } + if prefsClone.ExitNodeID == newSuggestion.ID { + return zero } prefsClone.ExitNodeID = newSuggestion.ID - _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ + newPrefs, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ Prefs: *prefsClone, ExitNodeIDSet: true, }, unlock) if err != nil { b.logf("setAutoExitNodeID: failed to apply exit node ID preference: %v", err) - return + return zero } + return newPrefs } // setNetMapLocked updates the LocalBackend state to reflect the newly diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b1be86392..15766741b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1867,16 +1867,16 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { PreferredDERP: 2, } tests := []struct { - name string - lastSuggestedExitNode tailcfg.StableNodeID - netmap *netmap.NetworkMap - muts []*tailcfg.PeerChange - exitNodeIDWant tailcfg.StableNodeID - updateNetmapDeltaResponse bool - report *netcheck.Report + name string + lastSuggestedExitNode tailcfg.StableNodeID + netmap *netmap.NetworkMap + muts []*tailcfg.PeerChange + exitNodeIDWant tailcfg.StableNodeID + report *netcheck.Report }{ { - name: "selected auto exit node goes offline", + // selected auto exit node goes offline + name: "exit-node-goes-offline", lastSuggestedExitNode: peer1.StableID(), netmap: &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ @@ -1895,12 +1895,12 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { Online: ptr.To(true), }, }, - exitNodeIDWant: peer2.StableID(), - updateNetmapDeltaResponse: false, - report: report, + exitNodeIDWant: peer2.StableID(), + report: report, }, { - name: "other exit node goes offline doesn't change selected auto exit node that's still online", + // other exit node goes offline doesn't change selected auto exit node that's still online + name: "other-node-goes-offline", lastSuggestedExitNode: peer2.StableID(), netmap: &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ @@ -1919,9 +1919,8 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { Online: ptr.To(true), }, }, - exitNodeIDWant: peer2.StableID(), - updateNetmapDeltaResponse: true, - report: report, + exitNodeIDWant: peer2.StableID(), + report: report, }, } @@ -1939,6 +1938,20 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { b.lastSuggestedExitNode = tt.lastSuggestedExitNode b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, tt.report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) + + allDone := make(chan bool, 1) + defer b.goTracker.AddDoneCallback(func() { + b.mu.Lock() + defer b.mu.Unlock() + if b.goTracker.RunningGoroutines() > 0 { + return + } + select { + case allDone <- true: + default: + } + })() + someTime := time.Unix(123, 0) muts, ok := netmap.MutationsFromMapResponse(&tailcfg.MapResponse{ PeersChangedPatch: tt.muts, @@ -1946,16 +1959,34 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { if !ok { t.Fatal("netmap.MutationsFromMapResponse failed") } + if b.pm.prefs.ExitNodeID() != tt.lastSuggestedExitNode { t.Fatalf("did not set exit node ID to last suggested exit node despite auto policy") } + was := b.goTracker.StartedGoroutines() got := b.UpdateNetmapDelta(muts) - if got != tt.updateNetmapDeltaResponse { - t.Fatalf("got %v expected %v from UpdateNetmapDelta", got, tt.updateNetmapDeltaResponse) + if !got { + t.Error("got false from UpdateNetmapDelta") + } + startedGoroutine := b.goTracker.StartedGoroutines() != was + + wantChange := tt.exitNodeIDWant != tt.lastSuggestedExitNode + if startedGoroutine != wantChange { + t.Errorf("got startedGoroutine %v, want %v", startedGoroutine, wantChange) } - if b.pm.prefs.ExitNodeID() != tt.exitNodeIDWant { - t.Fatalf("did not get expected exit node id after UpdateNetmapDelta") + if startedGoroutine { + select { + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for goroutine to finish") + case <-allDone: + } + } + b.mu.Lock() + gotExitNode := b.pm.prefs.ExitNodeID() + b.mu.Unlock() + if gotExitNode != tt.exitNodeIDWant { + t.Fatalf("exit node ID after UpdateNetmapDelta = %v; want %v", gotExitNode, tt.exitNodeIDWant) } }) } From 82e99fcf84e72cd49cea72adb5b20d7888bd6f6c Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 6 Jan 2025 16:02:53 -0800 Subject: [PATCH 0281/1708] client/systray: move cmd/systray to client/systray Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- {cmd => client}/systray/logo.go | 2 +- {cmd => client}/systray/systray.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) rename {cmd => client}/systray/logo.go (99%) rename {cmd => client}/systray/systray.go (99%) diff --git a/cmd/systray/logo.go b/client/systray/logo.go similarity index 99% rename from cmd/systray/logo.go rename to client/systray/logo.go index de60bcdbd..857a8a937 100644 --- a/cmd/systray/logo.go +++ b/client/systray/logo.go @@ -3,7 +3,7 @@ //go:build cgo || !darwin -package main +package systray import ( "bytes" diff --git a/cmd/systray/systray.go b/client/systray/systray.go similarity index 99% rename from cmd/systray/systray.go rename to client/systray/systray.go index 7da83a7ea..782fc5420 100644 --- a/cmd/systray/systray.go +++ b/client/systray/systray.go @@ -3,8 +3,8 @@ //go:build cgo || !darwin -// The systray command is a minimal Tailscale systray application for Linux. -package main +// Package systray provides a minimal Tailscale systray application. +package systray import ( "context" @@ -44,8 +44,8 @@ var ( hideMullvadCities bool ) -func main() { - menu := new(Menu) +// Run starts the systray menu and blocks until the menu exits. +func (menu *Menu) Run() { menu.updateState() // exit cleanly on SIGINT and SIGTERM From b36984cb16ba297293a1b542e52f9bd23cb31042 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 6 Jan 2025 16:05:14 -0800 Subject: [PATCH 0282/1708] cmd/systray: add cmd/systray back as a small client/systray wrapper Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- cmd/systray/systray.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 cmd/systray/systray.go diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go new file mode 100644 index 000000000..0185a1bc2 --- /dev/null +++ b/cmd/systray/systray.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build cgo || !darwin + +// systray is a minimal Tailscale systray application. +package main + +import ( + "tailscale.com/client/systray" +) + +func main() { + new(systray.Menu).Run() +} From cc4aa435eff873510f7e86e55ead12d7bd6d2346 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 7 Jan 2025 06:43:50 -0800 Subject: [PATCH 0283/1708] go.mod: bump github.com/tailscale/peercred for Solaris This pulls in Solaris/Illumos-specific: https://github.com/tailscale/peercred/pull/10 https://go-review.googlesource.com/c/sys/+/639755 Updates tailscale/peercred#10 (from @nshalman) Change-Id: I8211035fdcf84417009da352927149d68905c0f1 Signed-off-by: Brad Fitzpatrick --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 3c389b4de..e21647684 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 - github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 + github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3 @@ -101,7 +101,7 @@ require ( golang.org/x/net v0.32.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab golang.org/x/term v0.27.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 diff --git a/go.sum b/go.sum index 2ae4ce09d..353d8d5b8 100644 --- a/go.sum +++ b/go.sum @@ -935,8 +935,8 @@ github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 h1:ZB47BgnHcEHQJOD github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10/go.mod h1:iDx/0Rr9VV/KanSUDpJ6I/ROf0sQ7OqljXc/esl0UIA= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= @@ -1239,8 +1239,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 6e45a8304eadf1b6aef9898a7484c846ff65e01d Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 6 Jan 2025 15:39:41 -0800 Subject: [PATCH 0284/1708] cmd/derper: improve logging on derp mesh connect Include the mesh log prefix in all mesh connection setup. Updates tailscale/corp#25653 Signed-off-by: James Tucker --- cmd/derper/mesh.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index ee1807f00..c4218dd94 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -47,6 +47,7 @@ func startMeshWithHost(s *derp.Server, host string) error { c.SetURLDialer(func(ctx context.Context, network, addr string) (net.Conn, error) { host, port, err := net.SplitHostPort(addr) if err != nil { + logf("failed to split %q: %v", addr, err) return nil, err } var d net.Dialer @@ -55,15 +56,18 @@ func startMeshWithHost(s *derp.Server, host string) error { subCtx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() vpcHost := base + "-vpc.tailscale.com" - ips, _ := r.LookupIP(subCtx, "ip", vpcHost) + ips, err := r.LookupIP(subCtx, "ip", vpcHost) + if err != nil { + logf("failed to resolve %v: %v", vpcHost, err) + } if len(ips) > 0 { vpcAddr := net.JoinHostPort(ips[0].String(), port) c, err := d.DialContext(subCtx, network, vpcAddr) if err == nil { - log.Printf("connected to %v (%v) instead of %v", vpcHost, ips[0], base) + logf("connected to %v (%v) instead of %v", vpcHost, ips[0], base) return c, nil } - log.Printf("failed to connect to %v (%v): %v; trying non-VPC route", vpcHost, ips[0], err) + logf("failed to connect to %v (%v): %v; trying non-VPC route", vpcHost, ips[0], err) } } return d.DialContext(ctx, network, addr) From f4f57b815bf9804badf449e91a42ff80a08ea59d Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 6 Jan 2025 12:32:13 -0800 Subject: [PATCH 0285/1708] wgengine/magicsock: rebind on EPIPE/ECONNRESET Observed in the wild some macOS machines gain broken sockets coming out of sleep (we observe "time jumped", followed by EPIPE on sendto). The cause of this in the platform is unclear, but the fix is clear: always rebind if the socket is broken. This can also be created artificially on Linux via `ss -K`, and other conditions or software on a system could also lead to the same outcomes. Updates tailscale/corp#25648 Signed-off-by: James Tucker --- wgengine/magicsock/magicsock.go | 29 -------------- wgengine/magicsock/magicsock_notplan9.go | 49 ++++++++++++++++++++++++ wgengine/magicsock/magicsock_plan9.go | 12 ++++++ 3 files changed, 61 insertions(+), 29 deletions(-) create mode 100644 wgengine/magicsock/magicsock_notplan9.go create mode 100644 wgengine/magicsock/magicsock_plan9.go diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index bff905caa..188933c0e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -21,7 +21,6 @@ import ( "strings" "sync" "sync/atomic" - "syscall" "time" "github.com/tailscale/wireguard-go/conn" @@ -1290,34 +1289,6 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, e return } -// maybeRebindOnError performs a rebind and restun if the error is defined and -// any conditionals are met. -func (c *Conn) maybeRebindOnError(os string, err error) bool { - switch { - case errors.Is(err, syscall.EPERM): - why := "operation-not-permitted-rebind" - switch os { - // We currently will only rebind and restun on a syscall.EPERM if it is experienced - // on a client running darwin. - // TODO(charlotte, raggi): expand os options if required. - case "darwin": - // TODO(charlotte): implement a backoff, so we don't end up in a rebind loop for persistent - // EPERMs. - if c.lastEPERMRebind.Load().Before(time.Now().Add(-5 * time.Second)) { - c.logf("magicsock: performing %q", why) - c.lastEPERMRebind.Store(time.Now()) - c.Rebind() - go c.ReSTUN(why) - return true - } - default: - c.logf("magicsock: not performing %q", why) - return false - } - } - return false -} - // sendUDPNetcheck sends b via UDP to addr. It is used exclusively by netcheck. // It returns the number of bytes sent along with any error encountered. It // returns errors.ErrUnsupported if the client is explicitly configured to only diff --git a/wgengine/magicsock/magicsock_notplan9.go b/wgengine/magicsock/magicsock_notplan9.go new file mode 100644 index 000000000..44f08cb1c --- /dev/null +++ b/wgengine/magicsock/magicsock_notplan9.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package magicsock + +import ( + "errors" + "syscall" + "time" +) + +// maybeRebindOnError performs a rebind and restun if the error is defined and +// any conditionals are met. +func (c *Conn) maybeRebindOnError(os string, err error) bool { + switch { + case errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ENOTCONN): + // EPIPE/ENOTCONN are common errors when a send fails due to a closed + // socket. There is some platform and version inconsistency in which + // error is returned, but the meaning is the same. + why := "broken-pipe-rebind" + c.logf("magicsock: performing %q", why) + c.Rebind() + go c.ReSTUN(why) + return true + case errors.Is(err, syscall.EPERM): + why := "operation-not-permitted-rebind" + switch os { + // We currently will only rebind and restun on a syscall.EPERM if it is experienced + // on a client running darwin. + // TODO(charlotte, raggi): expand os options if required. + case "darwin": + // TODO(charlotte): implement a backoff, so we don't end up in a rebind loop for persistent + // EPERMs. + if c.lastEPERMRebind.Load().Before(time.Now().Add(-5 * time.Second)) { + c.logf("magicsock: performing %q", why) + c.lastEPERMRebind.Store(time.Now()) + c.Rebind() + go c.ReSTUN(why) + return true + } + default: + c.logf("magicsock: not performing %q", why) + return false + } + } + return false +} diff --git a/wgengine/magicsock/magicsock_plan9.go b/wgengine/magicsock/magicsock_plan9.go new file mode 100644 index 000000000..23f710430 --- /dev/null +++ b/wgengine/magicsock/magicsock_plan9.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build plan9 + +package magicsock + +// maybeRebindOnError performs a rebind and restun if the error is defined and +// any conditionals are met. +func (c *Conn) maybeRebindOnError(os string, err error) bool { + return false +} From 6db220b47834d8c45fd2b9ab34eff860aa5a6d72 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Tue, 7 Jan 2025 10:24:32 -0800 Subject: [PATCH 0286/1708] controlclient: do not set HTTPS port for any private coordination server IP (#14564) Fixes tailscale/tailscale#14563 When creating a NoiseClient, ensure that if any private IP address is provided, with both an `http` scheme and an explicit port number, we do not ever attempt to use HTTPS. We were only handling the case of `127.0.0.1` and `localhost`, but `192.168.x.y` is a private IP as well. This uses the `netip` package to check and adds some logging in case we ever need to troubleshoot this. Signed-off-by: Andrea Gottardo --- control/controlclient/noise.go | 28 +++++-- control/controlclient/noise_test.go | 118 ++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 6 deletions(-) diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index db77014a6..4bd8cfc25 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -11,6 +11,7 @@ import ( "errors" "math" "net/http" + "net/netip" "net/url" "sync" "time" @@ -111,24 +112,39 @@ type NoiseOpts struct { // netMon may be nil, if non-nil it's used to do faster interface lookups. // dialPlan may be nil func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { + logf := opts.Logf u, err := url.Parse(opts.ServerURL) if err != nil { return nil, err } + + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.New("invalid ServerURL scheme, must be http or https") + } + var httpPort string var httpsPort string + addr, _ := netip.ParseAddr(u.Hostname()) + isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" if port := u.Port(); port != "" { - // If there is an explicit port specified, trust the scheme and hope for the best - if u.Scheme == "http" { + // If there is an explicit port specified, entirely rely on the scheme, + // unless it's http with a private host in which case we never try using HTTPS. + if u.Scheme == "https" { + httpPort = "" + httpsPort = port + } else if u.Scheme == "http" { httpPort = port httpsPort = "443" - if u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost" { + if isPrivateHost { + logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) httpsPort = "" } - } else { - httpPort = "80" - httpsPort = port } + } else if u.Scheme == "http" && isPrivateHost { + // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, + // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. + httpPort = "80" + httpsPort = "" } else { // Otherwise, use the standard ports httpPort = "80" diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index 69a3a6a36..dadf237df 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -54,6 +54,123 @@ func TestNoiseClientHTTP2Upgrade_earlyPayload(t *testing.T) { }.run(t) } +func makeClientWithURL(t *testing.T, url string) *NoiseClient { + nc, err := NewNoiseClient(NoiseOpts{ + Logf: t.Logf, + ServerURL: url, + }) + if err != nil { + t.Fatal(err) + } + return nc +} + +func TestNoiseClientPortsAreSet(t *testing.T) { + tests := []struct { + name string + url string + wantHTTPS string + wantHTTP string + }{ + { + name: "https-url", + url: "https://example.com", + wantHTTPS: "443", + wantHTTP: "80", + }, + { + name: "http-url", + url: "http://example.com", + wantHTTPS: "443", // TODO(bradfitz): questionable; change? + wantHTTP: "80", + }, + { + name: "https-url-custom-port", + url: "https://example.com:123", + wantHTTPS: "123", + wantHTTP: "", + }, + { + name: "http-url-custom-port", + url: "http://example.com:123", + wantHTTPS: "443", // TODO(bradfitz): questionable; change? + wantHTTP: "123", + }, + { + name: "http-loopback-no-port", + url: "http://127.0.0.1", + wantHTTPS: "", + wantHTTP: "80", + }, + { + name: "http-loopback-custom-port", + url: "http://127.0.0.1:8080", + wantHTTPS: "", + wantHTTP: "8080", + }, + { + name: "http-localhost-no-port", + url: "http://localhost", + wantHTTPS: "", + wantHTTP: "80", + }, + { + name: "http-localhost-custom-port", + url: "http://localhost:8080", + wantHTTPS: "", + wantHTTP: "8080", + }, + { + name: "http-private-ip-no-port", + url: "http://192.168.2.3", + wantHTTPS: "", + wantHTTP: "80", + }, + { + name: "http-private-ip-custom-port", + url: "http://192.168.2.3:8080", + wantHTTPS: "", + wantHTTP: "8080", + }, + { + name: "http-public-ip", + url: "http://1.2.3.4", + wantHTTPS: "443", // TODO(bradfitz): questionable; change? + wantHTTP: "80", + }, + { + name: "http-public-ip-custom-port", + url: "http://1.2.3.4:8080", + wantHTTPS: "443", // TODO(bradfitz): questionable; change? + wantHTTP: "8080", + }, + { + name: "https-public-ip", + url: "https://1.2.3.4", + wantHTTPS: "443", + wantHTTP: "80", + }, + { + name: "https-public-ip-custom-port", + url: "https://1.2.3.4:8080", + wantHTTPS: "8080", + wantHTTP: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nc := makeClientWithURL(t, tt.url) + if nc.httpsPort != tt.wantHTTPS { + t.Errorf("nc.httpsPort = %q; want %q", nc.httpsPort, tt.wantHTTPS) + } + if nc.httpPort != tt.wantHTTP { + t.Errorf("nc.httpPort = %q; want %q", nc.httpPort, tt.wantHTTP) + } + }) + } +} + func (tt noiseClientTest) run(t *testing.T) { serverPrivate := key.NewMachine() clientPrivate := key.NewMachine() @@ -81,6 +198,7 @@ func (tt noiseClientTest) run(t *testing.T) { ServerPubKey: serverPrivate.Public(), ServerURL: hs.URL, Dialer: dialer, + Logf: t.Logf, }) if err != nil { t.Fatal(err) From 2c07f5dfcd3bffd32aa70a08a9d85a90add474f0 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 6 Jan 2025 13:10:56 -0800 Subject: [PATCH 0287/1708] wgengine/magicsock: refactor maybeRebindOnError Remove the platform specificity, it is unnecessary complexity. Deduplicate repeated code as a result of reduced complexity. Split out error identification code. Update call-sites and tests. Updates #14551 Updates tailscale/corp#25648 Signed-off-by: James Tucker --- wgengine/magicsock/magicsock.go | 27 ++++++-- wgengine/magicsock/magicsock_notplan9.go | 48 +++++--------- wgengine/magicsock/magicsock_plan9.go | 8 +-- wgengine/magicsock/magicsock_test.go | 81 ++++++++++++++++-------- 4 files changed, 97 insertions(+), 67 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 188933c0e..d3075f55d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -364,9 +364,9 @@ type Conn struct { // wireguard state by its public key. If nil, it's not used. getPeerByKey func(key.NodePublic) (_ wgint.Peer, ok bool) - // lastEPERMRebind tracks the last time a rebind was performed - // after experiencing a syscall.EPERM. - lastEPERMRebind syncs.AtomicValue[time.Time] + // lastErrRebind tracks the last time a rebind was performed after + // experiencing a write error, and is used to throttle the rate of rebinds. + lastErrRebind syncs.AtomicValue[time.Time] // staticEndpoints are user set endpoints that this node should // advertise amongst its wireguard endpoints. It is user's @@ -1258,7 +1258,7 @@ func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err c.logf("magicsock: %s", errGSO.Error()) err = errGSO.RetryErr } else { - _ = c.maybeRebindOnError(runtime.GOOS, err) + c.maybeRebindOnError(err) } } return err == nil, err @@ -1273,7 +1273,7 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, e sent, err = c.sendUDPStd(ipp, b) if err != nil { metricSendUDPError.Add(1) - _ = c.maybeRebindOnError(runtime.GOOS, err) + c.maybeRebindOnError(err) } else { if sent && !isDisco { switch { @@ -1289,6 +1289,23 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, e return } +// maybeRebindOnError performs a rebind and restun if the error is one that is +// known to be healed by a rebind, and the rebind is not throttled. +func (c *Conn) maybeRebindOnError(err error) { + ok, reason := shouldRebind(err) + if !ok { + return + } + + if c.lastErrRebind.Load().Before(time.Now().Add(-5 * time.Second)) { + c.logf("magicsock: performing rebind due to %q", reason) + c.Rebind() + go c.ReSTUN(reason) + } else { + c.logf("magicsock: not performing %q rebind due to throttle", reason) + } +} + // sendUDPNetcheck sends b via UDP to addr. It is used exclusively by netcheck. // It returns the number of bytes sent along with any error encountered. It // returns errors.ErrUnsupported if the client is explicitly configured to only diff --git a/wgengine/magicsock/magicsock_notplan9.go b/wgengine/magicsock/magicsock_notplan9.go index 44f08cb1c..86d099ee7 100644 --- a/wgengine/magicsock/magicsock_notplan9.go +++ b/wgengine/magicsock/magicsock_notplan9.go @@ -8,42 +8,24 @@ package magicsock import ( "errors" "syscall" - "time" ) -// maybeRebindOnError performs a rebind and restun if the error is defined and -// any conditionals are met. -func (c *Conn) maybeRebindOnError(os string, err error) bool { +// shouldRebind returns if the error is one that is known to be healed by a +// rebind, and if so also returns a resason string for the rebind. +func shouldRebind(err error) (ok bool, reason string) { switch { - case errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ENOTCONN): - // EPIPE/ENOTCONN are common errors when a send fails due to a closed - // socket. There is some platform and version inconsistency in which - // error is returned, but the meaning is the same. - why := "broken-pipe-rebind" - c.logf("magicsock: performing %q", why) - c.Rebind() - go c.ReSTUN(why) - return true + // EPIPE/ENOTCONN are common errors when a send fails due to a closed + // socket. There is some platform and version inconsistency in which + // error is returned, but the meaning is the same. + case errors.Is(err, syscall.EPIPE), errors.Is(err, syscall.ENOTCONN): + return true, "broken-pipe" + + // EPERM is typically caused by EDR software, and has been observed to be + // transient, it seems that some versions of some EDR lose track of sockets + // at times, and return EPERM, but reconnects will establish appropriate + // rights associated with a new socket. case errors.Is(err, syscall.EPERM): - why := "operation-not-permitted-rebind" - switch os { - // We currently will only rebind and restun on a syscall.EPERM if it is experienced - // on a client running darwin. - // TODO(charlotte, raggi): expand os options if required. - case "darwin": - // TODO(charlotte): implement a backoff, so we don't end up in a rebind loop for persistent - // EPERMs. - if c.lastEPERMRebind.Load().Before(time.Now().Add(-5 * time.Second)) { - c.logf("magicsock: performing %q", why) - c.lastEPERMRebind.Store(time.Now()) - c.Rebind() - go c.ReSTUN(why) - return true - } - default: - c.logf("magicsock: not performing %q", why) - return false - } + return true, "operation-not-permitted" } - return false + return false, "" } diff --git a/wgengine/magicsock/magicsock_plan9.go b/wgengine/magicsock/magicsock_plan9.go index 23f710430..65714c3e1 100644 --- a/wgengine/magicsock/magicsock_plan9.go +++ b/wgengine/magicsock/magicsock_plan9.go @@ -5,8 +5,8 @@ package magicsock -// maybeRebindOnError performs a rebind and restun if the error is defined and -// any conditionals are met. -func (c *Conn) maybeRebindOnError(os string, err error) bool { - return false +// shouldRebind returns if the error is one that is known to be healed by a +// rebind, and if so also returns a resason string for the rebind. +func shouldRebind(err error) (ok bool, reason string) { + return false, "" } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 816600451..d4c9f0cbb 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3050,37 +3050,68 @@ func TestMaybeSetNearestDERP(t *testing.T) { } } +func TestShouldRebind(t *testing.T) { + tests := []struct { + err error + ok bool + reason string + }{ + {nil, false, ""}, + {io.EOF, false, ""}, + {io.ErrUnexpectedEOF, false, ""}, + {io.ErrShortBuffer, false, ""}, + {&net.OpError{Err: syscall.EPERM}, true, "operation-not-permitted"}, + {&net.OpError{Err: syscall.EPIPE}, true, "broken-pipe"}, + {&net.OpError{Err: syscall.ENOTCONN}, true, "broken-pipe"}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%s-%v", tt.err, tt.ok), func(t *testing.T) { + if got, reason := shouldRebind(tt.err); got != tt.ok || reason != tt.reason { + t.Errorf("errShouldRebind(%v) = %v, %q; want %v, %q", tt.err, got, reason, tt.ok, tt.reason) + } + }) + } +} + func TestMaybeRebindOnError(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) - err := fmt.Errorf("outer err: %w", syscall.EPERM) - - t.Run("darwin-rebind", func(t *testing.T) { - conn := newTestConn(t) - defer conn.Close() - rebound := conn.maybeRebindOnError("darwin", err) - if !rebound { - t.Errorf("darwin should rebind on syscall.EPERM") - } - }) - - t.Run("linux-not-rebind", func(t *testing.T) { - conn := newTestConn(t) - defer conn.Close() - rebound := conn.maybeRebindOnError("linux", err) - if rebound { - t.Errorf("linux should not rebind on syscall.EPERM") - } - }) + var rebindErrs []error + if runtime.GOOS != "plan9" { + rebindErrs = append(rebindErrs, + &net.OpError{Err: syscall.EPERM}, + &net.OpError{Err: syscall.EPIPE}, + &net.OpError{Err: syscall.ENOTCONN}, + ) + } + + for _, rebindErr := range rebindErrs { + t.Run(fmt.Sprintf("rebind-%s", rebindErr), func(t *testing.T) { + conn := newTestConn(t) + defer conn.Close() + + before := metricRebindCalls.Value() + conn.maybeRebindOnError(rebindErr) + after := metricRebindCalls.Value() + if before+1 != after { + t.Errorf("should rebind on %#v", rebindErr) + } + }) + } t.Run("no-frequent-rebind", func(t *testing.T) { - conn := newTestConn(t) - defer conn.Close() - conn.lastEPERMRebind.Store(time.Now().Add(-1 * time.Second)) - rebound := conn.maybeRebindOnError("darwin", err) - if rebound { - t.Errorf("darwin should not rebind on syscall.EPERM within 5 seconds of last") + if runtime.GOOS != "plan9" { + err := fmt.Errorf("outer err: %w", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + conn.lastErrRebind.Store(time.Now().Add(-1 * time.Second)) + before := metricRebindCalls.Value() + conn.maybeRebindOnError(err) + after := metricRebindCalls.Value() + if before != after { + t.Errorf("should not rebind within 5 seconds of last") + } } }) } From 220dc56f01fc2e6bc9974ad3e3949c15e1d406e1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 7 Jan 2025 11:18:05 -0800 Subject: [PATCH 0288/1708] go.mod: bump tailscale/wireguard-go for Solaris/Illumos Updates #14565 Change-Id: Ifb88ab2ee1997c00c3d4316be04f6f4cc71b2cd3 Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e21647684..650ec4557 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3 + github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 353d8d5b8..ae6a09262 100644 --- a/go.sum +++ b/go.sum @@ -941,8 +941,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:t github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3 h1:dmoPb3dG27tZgMtrvqfD/LW4w7gA6BSWl8prCPNmkCQ= -github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= From de9d4b2f886b6bf5cf0fe9be6c17d080267acef1 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 5 Dec 2024 14:02:30 -0800 Subject: [PATCH 0289/1708] net/netmon: remove extra panic guard around ParseRIB This was an extra defense added for #14201 that is no longer required. Fixes #14201 Signed-off-by: James Tucker --- net/netmon/netmon_darwin.go | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index e89e2d047..cc6301125 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -56,18 +56,7 @@ func (m *darwinRouteMon) Receive() (message, error) { if err != nil { return nil, err } - msgs, err := func() (msgs []route.Message, err error) { - defer func() { - // TODO(raggi,#14201): remove once we've got a fix from - // golang/go#70528. - msg := recover() - if msg != nil { - msgs = nil - err = fmt.Errorf("panic in route.ParseRIB: %s", msg) - } - }() - return route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) - }() + msgs, err := route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) if err != nil { if debugRouteMessages { m.logf("read %d bytes (% 02x), failed to parse RIB: %v", n, m.buf[:n], err) From 60daa2adb8eb2d496f3dd037d87f34060db3a072 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 6 Jan 2025 16:34:27 -0800 Subject: [PATCH 0290/1708] all: fix golangci-lint errors These erroneously blocked a recent PR, which I fixed by simply re-running CI. But we might as well fix them anyway. These are mostly `printf` to `print` and a couple of `!=` to `!Equal()` Updates #cleanup Signed-off-by: Will Norris --- cmd/addlicense/main.go | 4 ++-- cmd/k8s-operator/proxy.go | 2 +- cmd/tailscale/cli/risks.go | 2 +- cmd/tsconnect/tsconnect.go | 4 ++-- net/tshttpproxy/tshttpproxy_synology.go | 2 +- net/tshttpproxy/tshttpproxy_synology_test.go | 2 +- ssh/tailssh/incubator.go | 8 ++++---- tstest/integration/testcontrol/testcontrol.go | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cmd/addlicense/main.go b/cmd/addlicense/main.go index a8fd9dd4a..1cd1b0f19 100644 --- a/cmd/addlicense/main.go +++ b/cmd/addlicense/main.go @@ -18,12 +18,12 @@ var ( ) func usage() { - fmt.Fprintf(os.Stderr, ` + fmt.Fprint(os.Stderr, ` usage: addlicense -file FILE `[1:]) flag.PrintDefaults() - fmt.Fprintf(os.Stderr, ` + fmt.Fprint(os.Stderr, ` addlicense adds a Tailscale license to the beginning of file. It is intended for use with 'go generate', so it also runs a subcommand, diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go index 672f07b1f..4509c0dd8 100644 --- a/cmd/k8s-operator/proxy.go +++ b/cmd/k8s-operator/proxy.go @@ -311,7 +311,7 @@ func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { // Now add the impersonation headers that we want. if err := addImpersonationHeaders(r, h.log); err != nil { - log.Printf("failed to add impersonation headers: " + err.Error()) + log.Print("failed to add impersonation headers: ", err.Error()) } } diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index acb50e723..c36ffafae 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -77,7 +77,7 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { for left := riskAbortTimeSeconds; left > 0; left-- { msg := fmt.Sprintf("\rContinuing in %d seconds...", left) msgLen = len(msg) - printf(msg) + printf("%s", msg) select { case <-interrupt: printf("\r%s\r", strings.Repeat("x", msgLen+1)) diff --git a/cmd/tsconnect/tsconnect.go b/cmd/tsconnect/tsconnect.go index 4c8a0a52e..ef55593b4 100644 --- a/cmd/tsconnect/tsconnect.go +++ b/cmd/tsconnect/tsconnect.go @@ -53,12 +53,12 @@ func main() { } func usage() { - fmt.Fprintf(os.Stderr, ` + fmt.Fprint(os.Stderr, ` usage: tsconnect {dev|build|serve} `[1:]) flag.PrintDefaults() - fmt.Fprintf(os.Stderr, ` + fmt.Fprint(os.Stderr, ` tsconnect implements development/build/serving workflows for Tailscale Connect. It can be invoked with one of three subcommands: diff --git a/net/tshttpproxy/tshttpproxy_synology.go b/net/tshttpproxy/tshttpproxy_synology.go index 2e50d26d3..e28844f7d 100644 --- a/net/tshttpproxy/tshttpproxy_synology.go +++ b/net/tshttpproxy/tshttpproxy_synology.go @@ -47,7 +47,7 @@ func synologyProxyFromConfigCached(req *http.Request) (*url.URL, error) { var err error modtime := mtime(synologyProxyConfigPath) - if modtime != cache.updated { + if !modtime.Equal(cache.updated) { cache.httpProxy, cache.httpsProxy, err = synologyProxiesFromConfig() cache.updated = modtime } diff --git a/net/tshttpproxy/tshttpproxy_synology_test.go b/net/tshttpproxy/tshttpproxy_synology_test.go index 3061740f3..b6e8b948c 100644 --- a/net/tshttpproxy/tshttpproxy_synology_test.go +++ b/net/tshttpproxy/tshttpproxy_synology_test.go @@ -41,7 +41,7 @@ func TestSynologyProxyFromConfigCached(t *testing.T) { t.Fatalf("got %s, %v; want nil, nil", val, err) } - if got, want := cache.updated, time.Unix(0, 0); got != want { + if got, want := cache.updated.UTC(), time.Unix(0, 0).UTC(); !got.Equal(want) { t.Fatalf("got %s, want %s", got, want) } if cache.httpProxy != nil { diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 3ff676d51..986b60bd3 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -1014,10 +1014,10 @@ func (ss *sshSession) startWithStdPipes() (err error) { func envForUser(u *userMeta) []string { return []string{ - fmt.Sprintf("SHELL=" + u.LoginShell()), - fmt.Sprintf("USER=" + u.Username), - fmt.Sprintf("HOME=" + u.HomeDir), - fmt.Sprintf("PATH=" + defaultPathForUser(&u.User)), + fmt.Sprintf("SHELL=%s", u.LoginShell()), + fmt.Sprintf("USER=%s", u.Username), + fmt.Sprintf("HOME=%s", u.HomeDir), + fmt.Sprintf("PATH=%s", defaultPathForUser(&u.User)), } } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 92f74e244..386359f19 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -955,7 +955,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, if dns != nil && s.MagicDNSDomain != "" { dns = dns.Clone() dns.CertDomains = []string{ - fmt.Sprintf(node.Hostinfo.Hostname() + "." + s.MagicDNSDomain), + node.Hostinfo.Hostname() + "." + s.MagicDNSDomain, } } From 009da8a364ae2b1c807fe195b4ff55a8219890aa Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Mon, 6 Jan 2025 11:27:11 -0500 Subject: [PATCH 0291/1708] ipn/ipnlocal: connect serve config to c2n endpoint This commit updates the VIPService c2n endpoint on client to response with actual VIPService configuration stored in the serve config. Fixes tailscale/corp#24510 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 45 +++++++++------- ipn/ipnlocal/local_test.go | 103 ++++++++++++++++++++++++++++++++----- ipn/serve.go | 40 ++++++++++++++ 3 files changed, 154 insertions(+), 34 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4c58ae8ec..0af40cfc7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -11,6 +11,7 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -5017,13 +5018,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } hi.SSH_HostKeys = sshHostKeys - services := vipServicesFromPrefs(prefs) - if len(services) > 0 { - buf, _ := json.Marshal(services) - hi.ServicesHash = fmt.Sprintf("%02x", sha256.Sum256(buf)) - } else { - hi.ServicesHash = "" - } + hi.ServicesHash = b.vipServiceHashLocked(prefs) // The Hostinfo.WantIngress field tells control whether this node wants to // be wired up for ingress connections. If harmless if it's accidentally @@ -7659,28 +7654,38 @@ func maybeUsernameOf(actor ipnauth.Actor) string { func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { b.mu.Lock() defer b.mu.Unlock() - return vipServicesFromPrefs(b.pm.CurrentPrefs()) + return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) +} + +func (b *LocalBackend) vipServiceHashLocked(prefs ipn.PrefsView) string { + services := b.vipServicesFromPrefsLocked(prefs) + if len(services) == 0 { + return "" + } + buf, err := json.Marshal(services) + if err != nil { + b.logf("vipServiceHashLocked: %v", err) + return "" + } + hash := sha256.Sum256(buf) + return hex.EncodeToString(hash[:]) } -func vipServicesFromPrefs(prefs ipn.PrefsView) []*tailcfg.VIPService { +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { // keyed by service name var services map[string]*tailcfg.VIPService - - // TODO(naman): this envknob will be replaced with service-specific port - // information once we start storing that. - var allPortsServices []string - if env := envknob.String("TS_DEBUG_ALLPORTS_SERVICES"); env != "" { - allPortsServices = strings.Split(env, ",") + if !b.serveConfig.Valid() { + return nil } - for _, s := range allPortsServices { - mak.Set(&services, s, &tailcfg.VIPService{ - Name: s, - Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}}, + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), }) } - for _, s := range prefs.AdvertiseServices().AsSlice() { + for _, s := range prefs.AdvertiseServices().All() { if services == nil || services[s] == nil { mak.Set(&services, s, &tailcfg.VIPService{ Name: s, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 15766741b..f3ee24a6b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -30,7 +30,6 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" - "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -4509,15 +4508,15 @@ func TestConfigFileReload(t *testing.T) { func TestGetVIPServices(t *testing.T) { tests := []struct { - name string - advertised []string - mapped []string - want []*tailcfg.VIPService + name string + advertised []string + serveConfig *ipn.ServeConfig + want []*tailcfg.VIPService }{ { "advertised-only", []string{"svc:abc", "svc:def"}, - []string{}, + &ipn.ServeConfig{}, []*tailcfg.VIPService{ { Name: "svc:abc", @@ -4530,9 +4529,13 @@ func TestGetVIPServices(t *testing.T) { }, }, { - "mapped-only", + "served-only", []string{}, - []string{"svc:abc"}, + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + }, []*tailcfg.VIPService{ { Name: "svc:abc", @@ -4541,9 +4544,13 @@ func TestGetVIPServices(t *testing.T) { }, }, { - "mapped-and-advertised", - []string{"svc:abc"}, + "served-and-advertised", []string{"svc:abc"}, + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + }, []*tailcfg.VIPService{ { Name: "svc:abc", @@ -4553,9 +4560,13 @@ func TestGetVIPServices(t *testing.T) { }, }, { - "mapped-and-advertised-separately", + "served-and-advertised-different-service", []string{"svc:def"}, - []string{"svc:abc"}, + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + }, []*tailcfg.VIPService{ { Name: "svc:abc", @@ -4567,14 +4578,78 @@ func TestGetVIPServices(t *testing.T) { }, }, }, + { + "served-with-port-ranges-one-range-single", + []string{}, + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:abc": {TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTPS: true}, + }}, + }, + }, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Ports: []tailcfg.ProtoPortRange{{Proto: 6, Ports: tailcfg.PortRange{First: 80, Last: 80}}}, + }, + }, + }, + { + "served-with-port-ranges-one-range-multiple", + []string{}, + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:abc": {TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTPS: true}, + 81: {HTTPS: true}, + 82: {HTTPS: true}, + }}, + }, + }, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Ports: []tailcfg.ProtoPortRange{{Proto: 6, Ports: tailcfg.PortRange{First: 80, Last: 82}}}, + }, + }, + }, + { + "served-with-port-ranges-multiple-ranges", + []string{}, + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:abc": {TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTPS: true}, + 81: {HTTPS: true}, + 82: {HTTPS: true}, + 1212: {HTTPS: true}, + 1213: {HTTPS: true}, + 1214: {HTTPS: true}, + }}, + }, + }, + []*tailcfg.VIPService{ + { + Name: "svc:abc", + Ports: []tailcfg.ProtoPortRange{ + {Proto: 6, Ports: tailcfg.PortRange{First: 80, Last: 82}}, + {Proto: 6, Ports: tailcfg.PortRange{First: 1212, Last: 1214}}, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - envknob.Setenv("TS_DEBUG_ALLPORTS_SERVICES", strings.Join(tt.mapped, ",")) + lb := newLocalBackendWithTestControl(t, false, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + lb.serveConfig = tt.serveConfig.View() prefs := &ipn.Prefs{ AdvertiseServices: tt.advertised, } - got := vipServicesFromPrefs(prefs.View()) + got := lb.vipServicesFromPrefsLocked(prefs.View()) slices.SortFunc(got, func(a, b *tailcfg.VIPService) int { return strings.Compare(a.Name, b.Name) }) diff --git a/ipn/serve.go b/ipn/serve.go index e82279db8..b7effa874 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -16,7 +16,9 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" "tailscale.com/util/mak" + "tailscale.com/util/set" ) // ServeConfigKey returns a StateKey that stores the @@ -655,3 +657,41 @@ func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool { } return false } + +// ServicePortRange returns the list of tailcfg.ProtoPortRange that represents +// the proto/ports pairs that are being served by the service. +// +// Right now Tun mode is the only thing supports UDP, otherwise serve only supports TCP. +func (v ServiceConfigView) ServicePortRange() []tailcfg.ProtoPortRange { + if v.Tun() { + // If the service is in Tun mode, means service accept TCP/UDP on all ports. + return []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}} + } + tcp := int(ipproto.TCP) + + // Deduplicate the ports. + servePorts := make(set.Set[uint16]) + for port := range v.TCP().All() { + if port > 0 { + servePorts.Add(uint16(port)) + } + } + dedupedServePorts := servePorts.Slice() + slices.Sort(dedupedServePorts) + + var ranges []tailcfg.ProtoPortRange + for _, p := range dedupedServePorts { + if n := len(ranges); n > 0 && p == ranges[n-1].Ports.Last+1 { + ranges[n-1].Ports.Last = p + continue + } + ranges = append(ranges, tailcfg.ProtoPortRange{ + Proto: tcp, + Ports: tailcfg.PortRange{ + First: p, + Last: p, + }, + }) + } + return ranges +} From 8d4ca13cf8093fdcff06140feedb0e32d42cbc91 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 8 Jan 2025 13:43:17 +0000 Subject: [PATCH 0292/1708] cmd/k8s-operator,k8s-operator: support ingress ProxyGroup type (#14548) Currently this does not yet do anything apart from creating the ProxyGroup resources like StatefulSet. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxygroups.yaml | 13 +- .../deploy/manifests/operator.yaml | 13 +- cmd/k8s-operator/egress-services.go | 14 +- cmd/k8s-operator/operator.go | 2 +- cmd/k8s-operator/proxygroup.go | 42 +++++- cmd/k8s-operator/proxygroup_specs.go | 15 +- cmd/k8s-operator/proxygroup_test.go | 138 +++++++++++++++++- k8s-operator/api.md | 6 +- .../apis/v1alpha1/types_proxygroup.go | 11 +- 9 files changed, 222 insertions(+), 32 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 5e6b53785..d6a4fe741 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -20,6 +20,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: ProxyGroup type. + jsonPath: .spec.type + name: Type + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -84,6 +88,7 @@ spec: Defaults to 2. type: integer format: int32 + minimum: 0 tags: description: |- Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. @@ -97,10 +102,16 @@ spec: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: - description: Type of the ProxyGroup proxies. Currently the only supported type is egress. + description: |- + Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type is immutable once a ProxyGroup is created. type: string enum: - egress + - ingress + x-kubernetes-validations: + - rule: self == oldSelf + message: ProxyGroup type is immutable status: description: |- ProxyGroupStatus describes the status of the ProxyGroup resources. This is diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index dd34c2a1e..2f5100ab6 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2721,6 +2721,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: ProxyGroup type. + jsonPath: .spec.type + name: Type + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -2778,6 +2782,7 @@ spec: Replicas specifies how many replicas to create the StatefulSet with. Defaults to 2. format: int32 + minimum: 0 type: integer tags: description: |- @@ -2792,10 +2797,16 @@ spec: type: string type: array type: - description: Type of the ProxyGroup proxies. Currently the only supported type is egress. + description: |- + Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type is immutable once a ProxyGroup is created. enum: - egress + - ingress type: string + x-kubernetes-validations: + - message: ProxyGroup type is immutable + rule: self == oldSelf required: - type type: object diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 7544376fb..55003ee91 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -495,13 +495,6 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, err } - if !tsoperator.ProxyGroupIsReady(pg) { - l.Infof("ProxyGroup %s is not ready, waiting...", proxyGroupName) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) - tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) - return false, nil - } - if violations := validateEgressService(svc, pg); len(violations) > 0 { msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) @@ -510,6 +503,13 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } + if !tsoperator.ProxyGroupIsReady(pg) { + l.Infof("ProxyGroup %s is not ready, waiting...", proxyGroupName) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) + return false, nil + } + l.Debugf("egress service is valid") tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l) return true, nil diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index ebb2c4578..b24839082 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -499,7 +499,7 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create Recorder reconciler: %v", err) } - // Recorder reconciler. + // ProxyGroup reconciler. ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) err = builder.ControllerManagedBy(mgr). diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 60f470fc2..194474fb2 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -51,7 +51,10 @@ const ( optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" ) -var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) +var ( + gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) + gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) +) // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. type ProxyGroupReconciler struct { @@ -68,8 +71,9 @@ type ProxyGroupReconciler struct { tsFirewallMode string defaultProxyClass string - mu sync.Mutex // protects following - proxyGroups set.Slice[types.UID] // for proxygroups gauge + mu sync.Mutex // protects following + egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge + ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { @@ -203,8 +207,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { logger := r.logger(pg.Name) r.mu.Lock() - r.proxyGroups.Add(pg.UID) - gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) + r.ensureAddedToGaugeForProxyGroup(pg) r.mu.Unlock() cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) @@ -358,8 +361,7 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy logger.Infof("cleaned up ProxyGroup resources") r.mu.Lock() - r.proxyGroups.Remove(pg.UID) - gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) + r.ensureRemovedFromGaugeForProxyGroup(pg) r.mu.Unlock() return true, nil } @@ -469,6 +471,32 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return configSHA256Sum, nil } +// ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup +// is created. r.mu must be held. +func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGroup) { + switch pg.Spec.Type { + case tsapi.ProxyGroupTypeEgress: + r.egressProxyGroups.Add(pg.UID) + case tsapi.ProxyGroupTypeIngress: + r.ingressProxyGroups.Add(pg.UID) + } + gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) + gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) +} + +// ensureRemovedFromGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the +// ProxyGroup is deleted. r.mu must be held. +func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.ProxyGroup) { + switch pg.Spec.Type { + case tsapi.ProxyGroupTypeEgress: + r.egressProxyGroups.Remove(pg.UID) + case tsapi.ProxyGroupTypeIngress: + r.ingressProxyGroups.Remove(pg.UID) + } + gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) + gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) +} + func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index b47cb39b1..d602be814 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -138,10 +138,6 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)", }, - { - Name: "TS_INTERNAL_APP", - Value: kubetypes.AppProxyGroupEgress, - }, } if tsFirewallMode != "" { @@ -155,9 +151,18 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa envs = append(envs, corev1.EnvVar{ Name: "TS_EGRESS_SERVICES_CONFIG_PATH", Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), + }, + corev1.EnvVar{ + Name: "TS_INTERNAL_APP", + Value: kubetypes.AppProxyGroupEgress, + }, + ) + } else { + envs = append(envs, corev1.EnvVar{ + Name: "TS_INTERNAL_APP", + Value: kubetypes.AppProxyGroupIngress, }) } - return append(c.Env, envs...) }() diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 9c4df9e4f..bc0dccdff 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -25,6 +25,8 @@ import ( "tailscale.com/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" ) @@ -53,6 +55,9 @@ func TestProxyGroup(t *testing.T) { Name: "test", Finalizers: []string{"tailscale.com/finalizer"}, }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + }, } fc := fake.NewClientBuilder(). @@ -112,8 +117,8 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg, nil) expectProxyGroupResources(t, fc, pg, true, initialCfgHash) - if expected := 1; reconciler.proxyGroups.Len() != expected { - t.Fatalf("expected %d recorders, got %d", expected, reconciler.proxyGroups.Len()) + if expected := 1; reconciler.egressProxyGroups.Len() != expected { + t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } expectProxyGroupResources(t, fc, pg, true, initialCfgHash) keyReq := tailscale.KeyCapabilities{ @@ -227,8 +232,8 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) - if expected := 0; reconciler.proxyGroups.Len() != expected { - t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len()) + if expected := 0; reconciler.egressProxyGroups.Len() != expected { + t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } // 2 nodes should get deleted as part of the scale down, and then finally // the first node gets deleted with the ProxyGroup cleanup. @@ -241,6 +246,131 @@ func TestProxyGroup(t *testing.T) { }) } +func TestProxyGroupTypes(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + Build() + + zl, _ := zap.NewDevelopment() + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + Client: fc, + l: zl.Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + t.Run("egress_type", func(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-egress", + UID: "test-egress-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + Replicas: ptr.To[int32](0), + }, + } + if err := fc.Create(context.Background(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + verifyProxyGroupCounts(t, reconciler, 0, 1) + + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupEgress) + verifyEnvVar(t, sts, "TS_EGRESS_SERVICES_CONFIG_PATH", fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices)) + + // Verify that egress configuration has been set up. + cm := &corev1.ConfigMap{} + cmName := fmt.Sprintf("%s-egress-config", pg.Name) + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { + t.Fatalf("failed to get ConfigMap: %v", err) + } + + expectedVolumes := []corev1.Volume{ + { + Name: cmName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cmName, + }, + }, + }, + }, + } + + expectedVolumeMounts := []corev1.VolumeMount{ + { + Name: cmName, + MountPath: "/etc/proxies", + ReadOnly: true, + }, + } + + if diff := cmp.Diff(expectedVolumes, sts.Spec.Template.Spec.Volumes); diff != "" { + t.Errorf("unexpected volumes (-want +got):\n%s", diff) + } + + if diff := cmp.Diff(expectedVolumeMounts, sts.Spec.Template.Spec.Containers[0].VolumeMounts); diff != "" { + t.Errorf("unexpected volume mounts (-want +got):\n%s", diff) + } + }) + + t.Run("ingress_type", func(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + UID: "test-ingress-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + }, + } + if err := fc.Create(context.Background(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + verifyProxyGroupCounts(t, reconciler, 1, 1) + + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) + }) +} + +func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { + t.Helper() + if r.ingressProxyGroups.Len() != wantIngress { + t.Errorf("expected %d ingress proxy groups, got %d", wantIngress, r.ingressProxyGroups.Len()) + } + if r.egressProxyGroups.Len() != wantEgress { + t.Errorf("expected %d egress proxy groups, got %d", wantEgress, r.egressProxyGroups.Len()) + } +} + +func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue string) { + t.Helper() + for _, env := range sts.Spec.Template.Spec.Containers[0].Env { + if env.Name == name { + if env.Value != expectedValue { + t.Errorf("expected %s=%s, got %s", name, expectedValue, env.Value) + } + return + } + } + t.Errorf("%s environment variable not found", name) +} + func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string) { t.Helper() diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 327f95ea9..f52606989 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -568,9 +568,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress. | | Enum: [egress]
Type: string
| +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| -| `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | | +| `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | Minimum: 0
| | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, and there is no default ProxyClass
configured, the operator will create resources with the default
configuration. | | | @@ -599,7 +599,7 @@ _Underlying type:_ _string_ _Validation:_ -- Enum: [egress] +- Enum: [egress ingress] - Type: string _Appears in:_ diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index e7397f33e..f95fc58d0 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -13,6 +13,7 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,shortName=pg // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=`.spec.type`,description="ProxyGroup type." // ProxyGroup defines a set of Tailscale devices that will act as proxies. // Currently only egress ProxyGroups are supported. @@ -47,7 +48,9 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Currently the only supported type is egress. + // Type of the ProxyGroup proxies. Supported types are egress and ingress. + // Type is immutable once a ProxyGroup is created. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` // Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. @@ -62,6 +65,7 @@ type ProxyGroupSpec struct { // Replicas specifies how many replicas to create the StatefulSet with. // Defaults to 2. // +optional + // +kubebuilder:validation:Minimum=0 Replicas *int32 `json:"replicas,omitempty"` // HostnamePrefix is the hostname prefix to use for tailnet devices created @@ -109,11 +113,12 @@ type TailnetDevice struct { } // +kubebuilder:validation:Type=string -// +kubebuilder:validation:Enum=egress +// +kubebuilder:validation:Enum=egress;ingress type ProxyGroupType string const ( - ProxyGroupTypeEgress ProxyGroupType = "egress" + ProxyGroupTypeEgress ProxyGroupType = "egress" + ProxyGroupTypeIngress ProxyGroupType = "ingress" ) // +kubebuilder:validation:Type=string From c81a95dd53d905b95c67568dd520db16ee1bdb8a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 8 Jan 2025 11:44:10 -0600 Subject: [PATCH 0293/1708] prober: clone histogram buckets before handing to Prometheus for derp_qd_probe_delays_seconds Updates tailscale/corp#25697 Signed-off-by: Percy Wegmann --- prober/derp.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/prober/derp.go b/prober/derp.go index 5adc0c0b4..3cd6394ad 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "log" + "maps" "net" "net/http" "net/netip" @@ -350,7 +351,7 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa qdh.mx.Lock() result := []prometheus.Metric{ prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())), - prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, qdh.bucketedCounts), + prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), } qdh.mx.Unlock() return result From 8d6b9964831185fe07be5d420fa67b2b197421e6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Jan 2025 10:47:33 -0800 Subject: [PATCH 0294/1708] ipn/ipnlocal: add client metric gauge for number of IPNBus connections Updates #1708 Change-Id: Ic7e28d692b4c48e78c842c26234b861fe42a916e Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0af40cfc7..8d2652e0a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -95,6 +95,7 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/goroutines" @@ -2863,6 +2864,9 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A mak.Set(&b.notifyWatchers, sessionID, session) b.mu.Unlock() + metricCurrentWatchIPNBus.Add(1) + defer metricCurrentWatchIPNBus.Add(-1) + defer func() { b.mu.Lock() delete(b.notifyWatchers, sessionID) @@ -7696,3 +7700,7 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf return slicesx.MapValues(services) } + +var ( + metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") +) From 1d4fd2fb34d0a731308f276924992e6b879c0fd7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Jan 2025 11:06:36 -0800 Subject: [PATCH 0295/1708] hostinfo: improve accuracy of Linux desktop detection heuristic DBus doesn't imply desktop. Updates #1708 Change-Id: Id43205aafb293533119256adf372a7d762aa7aca Signed-off-by: Brad Fitzpatrick --- hostinfo/hostinfo.go | 1 - 1 file changed, 1 deletion(-) diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 3d4216922..89968e1e6 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -233,7 +233,6 @@ func desktop() (ret opt.Bool) { seenDesktop := false for lr := range lineiter.File("/proc/net/unix") { line, _ := lr.Value() - seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S(" @/tmp/dbus-")) seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S(".X11-unix")) seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S("/wayland-1")) } From 9f17260e216905cce04d0e938f6767a391317275 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 8 Jan 2025 13:21:54 -0500 Subject: [PATCH 0296/1708] types/views: add MapViewsEqual and MapViewsEqualFunc Extracted from some code written in the other repo. Updates tailscale/corp#25479 Signed-off-by: Andrew Dunham Change-Id: I92c97a63a8f35cace6e89a730938ea587dcefd9b --- types/views/views.go | 41 +++++++++++++++++++ types/views/views_test.go | 85 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+) diff --git a/types/views/views.go b/types/views/views.go index eae8c0b16..4addc6448 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -513,6 +513,47 @@ func (m Map[K, V]) AsMap() map[K]V { return maps.Clone(m.ж) } +// NOTE: the type constraints for MapViewsEqual and MapViewsEqualFunc are based +// on those for maps.Equal and maps.EqualFunc. + +// MapViewsEqual returns whether the two given [Map]s are equal. Both K and V +// must be comparable; if V is non-comparable, use [MapViewsEqualFunc] instead. +func MapViewsEqual[K, V comparable](a, b Map[K, V]) bool { + if a.Len() != b.Len() || a.IsNil() != b.IsNil() { + return false + } + if a.IsNil() { + return true // both nil; can exit early + } + + for k, v := range a.All() { + bv, ok := b.GetOk(k) + if !ok || v != bv { + return false + } + } + return true +} + +// MapViewsEqualFunc returns whether the two given [Map]s are equal, using the +// given function to compare two values. +func MapViewsEqualFunc[K comparable, V1, V2 any](a Map[K, V1], b Map[K, V2], eq func(V1, V2) bool) bool { + if a.Len() != b.Len() || a.IsNil() != b.IsNil() { + return false + } + if a.IsNil() { + return true // both nil; can exit early + } + + for k, v := range a.All() { + bv, ok := b.GetOk(k) + if !ok || !eq(v, bv) { + return false + } + } + return true +} + // MapRangeFn is the func called from a Map.Range call. // Implementations should return false to stop range. type MapRangeFn[K comparable, V any] func(k K, v V) (cont bool) diff --git a/types/views/views_test.go b/types/views/views_test.go index 8a1ff3fdd..51b086a4e 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -15,6 +15,7 @@ import ( "unsafe" qt "github.com/frankban/quicktest" + "tailscale.com/types/structs" ) type viewStruct struct { @@ -501,3 +502,87 @@ func TestMapFnIter(t *testing.T) { t.Errorf("got %q; want %q", got, want) } } + +func TestMapViewsEqual(t *testing.T) { + testCases := []struct { + name string + a, b map[string]string + want bool + }{ + { + name: "both_nil", + a: nil, + b: nil, + want: true, + }, + { + name: "both_empty", + a: map[string]string{}, + b: map[string]string{}, + want: true, + }, + { + name: "one_nil", + a: nil, + b: map[string]string{"a": "1"}, + want: false, + }, + { + name: "different_length", + a: map[string]string{"a": "1"}, + b: map[string]string{"a": "1", "b": "2"}, + want: false, + }, + { + name: "different_values", + a: map[string]string{"a": "1"}, + b: map[string]string{"a": "2"}, + want: false, + }, + { + name: "different_keys", + a: map[string]string{"a": "1"}, + b: map[string]string{"b": "1"}, + want: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := MapViewsEqual(MapOf(tc.a), MapOf(tc.b)) + if got != tc.want { + t.Errorf("MapViewsEqual: got=%v, want %v", got, tc.want) + } + + got = MapViewsEqualFunc(MapOf(tc.a), MapOf(tc.b), func(a, b string) bool { + return a == b + }) + if got != tc.want { + t.Errorf("MapViewsEqualFunc: got=%v, want %v", got, tc.want) + } + }) + } +} + +func TestMapViewsEqualFunc(t *testing.T) { + // Test that we can compare maps with two different non-comparable + // values using a custom comparison function. + type customStruct1 struct { + _ structs.Incomparable + Field1 string + } + type customStruct2 struct { + _ structs.Incomparable + Field2 string + } + + a := map[string]customStruct1{"a": {Field1: "1"}} + b := map[string]customStruct2{"a": {Field2: "1"}} + + got := MapViewsEqualFunc(MapOf(a), MapOf(b), func(a customStruct1, b customStruct2) bool { + return a.Field1 == b.Field2 + }) + if !got { + t.Errorf("MapViewsEqualFunc: got=%v, want true", got) + } +} From fa52035574fd6e9d6896b39f683ed67946a73dbd Mon Sep 17 00:00:00 2001 From: Will Norris Date: Wed, 8 Jan 2025 11:08:53 -0800 Subject: [PATCH 0297/1708] client/systray: record that systray is running Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 1 + 1 file changed, 1 insertion(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index 782fc5420..de2a37d8d 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -58,6 +58,7 @@ func (menu *Menu) Run() { case <-menu.bgCtx.Done(): } }() + go menu.lc.IncrementCounter(menu.bgCtx, "systray_start", 1) systray.Run(menu.onReady, menu.onExit) } From 0b4ba4074f584af28fd34945a4bf62d559b2c64d Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Wed, 8 Jan 2025 13:20:31 -0700 Subject: [PATCH 0298/1708] client/web: properly show "Log In" for web client on fresh install (#14569) Change the type of the `IPv4` and `IPv6` members in the `nodeData` struct to be `netip.Addr` instead of `string`. We were previously calling `String()` on this struct, which returns "invalid IP" when the `netip.Addr` is its zero value, and passing this value into the aforementioned attributes. This caused rendering issues on the frontend as we were assuming that the value for `IPv4` and `IPv6` would be falsy in this case. The zero value for a `netip.Addr` marshalls to an empty string instead which is the behaviour we want downstream. Updates https://github.com/tailscale/tailscale/issues/14568 Signed-off-by: Mario Minardi --- client/web/web.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 56c5c92e8..1e338b735 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -804,8 +804,8 @@ type nodeData struct { DeviceName string TailnetName string // TLS cert name DomainName string - IPv4 string - IPv6 string + IPv4 netip.Addr + IPv6 netip.Addr OS string IPNVersion string @@ -864,10 +864,14 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { return } filterRules, _ := s.lc.DebugPacketFilterRules(r.Context()) + ipv4, ipv6 := s.selfNodeAddresses(r, st) + data := &nodeData{ ID: st.Self.ID, Status: st.BackendState, DeviceName: strings.Split(st.Self.DNSName, ".")[0], + IPv4: ipv4, + IPv6: ipv6, OS: st.Self.OS, IPNVersion: strings.Split(st.Version, "-")[0], Profile: st.User[st.Self.UserID], @@ -887,10 +891,6 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { ACLAllowsAnyIncomingTraffic: s.aclsAllowAccess(filterRules), } - ipv4, ipv6 := s.selfNodeAddresses(r, st) - data.IPv4 = ipv4.String() - data.IPv6 = ipv6.String() - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn && data.URLPrefix == "" { // X-Ingress-Path is the path prefix in use for Home Assistant // https://developers.home-assistant.io/docs/add-ons/presentation#ingress From d8579a48b9ca1cf4636d646baf7ec51c945b9e70 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 8 Jan 2025 12:44:49 -0800 Subject: [PATCH 0299/1708] go.mod: bump go-git to v5.13.1 (#14584) govulncheck flagged a couple fresh vulns in that package: * https://pkg.go.dev/vuln/GO-2025-3367 * https://pkg.go.dev/vuln/GO-2025-3368 I don't believe these affect us, as we only do any git stuff from release tooling which is all internal and with hardcoded repo URLs. Updates #cleanup Signed-off-by: Andrew Lytvynov --- go.mod | 20 ++++++++++---------- go.sum | 59 ++++++++++++++++++++++++++-------------------------------- 2 files changed, 36 insertions(+), 43 deletions(-) diff --git a/go.mod b/go.mod index 650ec4557..62a431d18 100644 --- a/go.mod +++ b/go.mod @@ -95,10 +95,10 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20220726221520-4f986261bf13 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.30.0 - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a + golang.org/x/crypto v0.31.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/mod v0.19.0 - golang.org/x/net v0.32.0 + golang.org/x/net v0.33.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab @@ -135,7 +135,7 @@ require ( github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 // indirect github.com/dave/brenda v1.1.0 // indirect github.com/docker/go-connections v0.5.0 // indirect @@ -183,7 +183,7 @@ require ( github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect @@ -236,8 +236,8 @@ require ( github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.11.0 // indirect + github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect @@ -343,13 +343,13 @@ require ( github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.25.0 // indirect github.com/securego/gosec/v2 v2.19.0 // indirect - github.com/sergi/go-diff v1.3.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.7.1 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect @@ -361,7 +361,7 @@ require ( github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 diff --git a/go.sum b/go.sum index ae6a09262..efb20e63a 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= github.com/ProtonMail/gopenpgp/v2 v2.7.1 h1:Awsg7MPc2gD3I7IFac2qE3Gdls0lZW8SzrFZ3k1oz0s= @@ -200,7 +200,6 @@ github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0 github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/caarlos0/go-rpmutils v0.2.1-0.20211112020245-2cd62ff89b11 h1:IRrDwVlWQr6kS1U8/EtyA1+EHcc4yl8pndcqXWrEamg= github.com/caarlos0/go-rpmutils v0.2.1-0.20211112020245-2cd62ff89b11/go.mod h1:je2KZ+LxaCNvCoKg32jtOIULcFogJKcL1ZWUaIBjKj0= github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= @@ -231,7 +230,6 @@ github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -251,8 +249,8 @@ github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 h1:YI1gOOdmMk3xodBao7fehcvoZsEeOyy/cfhlpCSPgM4= @@ -293,8 +291,8 @@ github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= github.com/elastic/crd-ref-docs v0.0.12 h1:F3seyncbzUz3rT3d+caeYWhumb5ojYQ6Bl0Z+zOp16M= github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= +github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= @@ -335,18 +333,18 @@ github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbx github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjBSxDnM= github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= +github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= +github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -745,8 +743,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -846,8 +844,8 @@ github.com/sashamelentyev/usestdlibvars v1.25.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7 github.com/securego/gosec/v2 v2.19.0 h1:gl5xMkOI0/E6Hxx0XCY2XujA3V7SNSefA8sC+3f1gnk= github.com/securego/gosec/v2 v2.19.0/go.mod h1:hOkDcHz9J/XIgIlPDXalxjeVYsHxoWUc5zJSHxcB8YM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -865,8 +863,8 @@ github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+W github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= @@ -909,8 +907,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/studio-b12/gowebdav v0.9.0 h1:1j1sc9gQnNxbXXM4M/CebPOX4aXYtr7MojAVcN4dHjU= github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= @@ -1060,10 +1059,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1074,8 +1071,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= @@ -1152,9 +1149,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1234,7 +1230,6 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1247,7 +1242,6 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1261,7 +1255,6 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 68997e0dfac4b78ac2ebaa9ea9f0f075c250aae7 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 9 Jan 2025 07:15:19 +0000 Subject: [PATCH 0300/1708] cmd/k8s-operator,k8s-operator: allow users to set custom labels for the optional ServiceMonitor (#14475) * cmd/k8s-operator,k8s-operator: allow users to set custom labels for the optional ServiceMonitor Updates tailscale/tailscale#14381 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/connector_test.go | 2 +- .../crds/tailscale.com_proxyclasses.yaml | 14 +++ .../deploy/manifests/operator.yaml | 14 +++ cmd/k8s-operator/ingress_test.go | 115 ++++++++++-------- cmd/k8s-operator/metrics_resources.go | 37 ++++-- cmd/k8s-operator/operator_test.go | 103 +++++++++++++++- cmd/k8s-operator/proxyclass.go | 9 +- cmd/k8s-operator/proxyclass_test.go | 23 +++- cmd/k8s-operator/proxygroup_test.go | 1 + cmd/k8s-operator/sts.go | 4 +- cmd/k8s-operator/sts_test.go | 47 ++++--- cmd/k8s-operator/testutils_test.go | 21 ++-- k8s-operator/api.md | 36 +++++- .../apis/v1alpha1/types_proxyclass.go | 30 ++++- .../apis/v1alpha1/zz_generated.deepcopy.go | 34 +++++- 15 files changed, 389 insertions(+), 101 deletions(-) diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 7cdd83115..242f1f99f 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -203,7 +203,7 @@ func TestConnectorWithProxyClass(t *testing.T) { pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{Name: "custom-metadata"}, Spec: tsapi.ProxyClassSpec{StatefulSet: &tsapi.StatefulSet{ - Labels: map[string]string{"foo": "bar"}, + Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, } diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 9b45deedb..2e53d5ee8 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -99,6 +99,16 @@ spec: enable: description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. type: boolean + labels: + description: |- + Labels to add to the ServiceMonitor. + Labels must be valid Kubernetes labels. + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + additionalProperties: + type: string + maxLength: 63 + pattern: ^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$ x-kubernetes-validations: - rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)' message: ServiceMonitor can only be enabled if metrics are enabled @@ -133,6 +143,8 @@ spec: type: object additionalProperties: type: string + maxLength: 63 + pattern: ^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$ pod: description: Configuration for the proxy Pod. type: object @@ -1062,6 +1074,8 @@ spec: type: object additionalProperties: type: string + maxLength: 63 + pattern: ^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$ nodeName: description: |- Proxy Pod's node name. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 2f5100ab6..0026ffef5 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -563,6 +563,16 @@ spec: enable: description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. type: boolean + labels: + additionalProperties: + maxLength: 63 + pattern: ^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$ + type: string + description: |- + Labels to add to the ServiceMonitor. + Labels must be valid Kubernetes labels. + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object required: - enable type: object @@ -592,6 +602,8 @@ spec: type: object labels: additionalProperties: + maxLength: 63 + pattern: ^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$ type: string description: |- Labels that will be added to the StatefulSet created for the proxy. @@ -1522,6 +1534,8 @@ spec: type: array labels: additionalProperties: + maxLength: 63 + pattern: ^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$ type: string description: |- Labels that will be added to the proxy Pod. diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index c4332908a..955258cc3 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -295,7 +295,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{Name: "custom-metadata"}, Spec: tsapi.ProxyClassSpec{StatefulSet: &tsapi.StatefulSet{ - Labels: map[string]string{"foo": "bar"}, + Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, } @@ -424,12 +424,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { func TestTailscaleIngressWithServiceMonitor(t *testing.T) { pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{Name: "metrics", Generation: 1}, - Spec: tsapi.ProxyClassSpec{ - Metrics: &tsapi.Metrics{ - Enable: true, - ServiceMonitor: &tsapi.ServiceMonitor{Enable: true}, - }, - }, + Spec: tsapi.ProxyClassSpec{}, Status: tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, @@ -437,32 +432,6 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { ObservedGeneration: 1, }}}, } - crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} - tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} - fc := fake.NewClientBuilder(). - WithScheme(tsapi.GlobalScheme). - WithObjects(pc, tsIngressClass). - WithStatusSubresource(pc). - Build() - ft := &fakeTSClient{} - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } - ingR := &IngressReconciler{ - Client: fc, - ssr: &tailscaleSTSReconciler{ - Client: fc, - tsClient: ft, - tsnetServer: fakeTsnetServer, - defaultTags: []string{"tag:k8s"}, - operatorNamespace: "operator-ns", - proxyImage: "tailscale/tailscale", - }, - logger: zl.Sugar(), - } - // 1. Enable metrics- expect metrics Service to be created ing := &networkingv1.Ingress{ TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -491,8 +460,7 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { }, }, } - mustCreate(t, fc, ing) - mustCreate(t, fc, &corev1.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", @@ -504,11 +472,38 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { Name: "http"}, }, }, - }) - + } + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} + tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pc, tsIngressClass, ing, svc). + WithStatusSubresource(pc). + Build() + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } expectReconciled(t, ingR, "default", "test") - fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + serveConfig := &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, + } opts := configOpts{ stsName: shortName, secretName: fullName, @@ -517,27 +512,51 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, - enableMetrics: true, namespaced: true, proxyType: proxyTypeIngressResource, + serveConfig: serveConfig, + resourceVersion: "1", } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil) + // 1. Enable metrics- expect metrics Service to be created + mustUpdate(t, fc, "", "metrics", func(proxyClass *tsapi.ProxyClass) { + proxyClass.Spec.Metrics = &tsapi.Metrics{Enable: true} + }) + opts.enableMetrics = true + + expectReconciled(t, ingR, "default", "test") + expectEqual(t, fc, expectedMetricsService(opts), nil) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + // 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { - pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true} + pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true, Labels: tsapi.Labels{"foo": "bar"}} }) expectReconciled(t, ingR, "default", "test") + expectEqual(t, fc, expectedMetricsService(opts), nil) + // 3. Create ServiceMonitor CRD and reconcile- ServiceMonitor should get created mustCreate(t, fc, crd) expectReconciled(t, ingR, "default", "test") + opts.serviceMonitorLabels = tsapi.Labels{"foo": "bar"} + expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) + + // 4. Update ServiceMonitor CRD and reconcile- ServiceMonitor should get updated + mustUpdate(t, fc, pc.Namespace, pc.Name, func(proxyClass *tsapi.ProxyClass) { + proxyClass.Spec.Metrics.ServiceMonitor.Labels = nil + }) + expectReconciled(t, ingR, "default", "test") + opts.serviceMonitorLabels = nil + opts.resourceVersion = "2" + expectEqual(t, fc, expectedMetricsService(opts), nil) expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) + + // 5. Disable metrics - metrics resources should get deleted. + mustUpdate(t, fc, pc.Namespace, pc.Name, func(proxyClass *tsapi.ProxyClass) { + proxyClass.Spec.Metrics = nil + }) + expectReconciled(t, ingR, "default", "test") + expectMissing[corev1.Service](t, fc, "operator-ns", metricsResourceName(shortName)) + // ServiceMonitor gets garbage collected when the Service is deleted - we cannot test that here. } diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go index 4881436e8..8516cf8be 100644 --- a/cmd/k8s-operator/metrics_resources.go +++ b/cmd/k8s-operator/metrics_resources.go @@ -8,6 +8,7 @@ package main import ( "context" "fmt" + "reflect" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -115,15 +116,15 @@ func reconcileMetricsResources(ctx context.Context, logger *zap.SugaredLogger, o return maybeCleanupServiceMonitor(ctx, cl, opts.proxyStsName, opts.tsNamespace) } - logger.Info("ensuring ServiceMonitor for metrics Service %s/%s", metricsSvc.Namespace, metricsSvc.Name) - svcMonitor, err := newServiceMonitor(metricsSvc) + logger.Infof("ensuring ServiceMonitor for metrics Service %s/%s", metricsSvc.Namespace, metricsSvc.Name) + svcMonitor, err := newServiceMonitor(metricsSvc, pc.Spec.Metrics.ServiceMonitor) if err != nil { return fmt.Errorf("error creating ServiceMonitor: %w", err) } - // We don't use createOrUpdate here because that does not work with unstructured types. We also do not update - // the ServiceMonitor because it is not expected that any of its fields would change. Currently this is good - // enough, but in future we might want to add logic to create-or-update unstructured types. - err = cl.Get(ctx, client.ObjectKeyFromObject(metricsSvc), svcMonitor.DeepCopy()) + + // We don't use createOrUpdate here because that does not work with unstructured types. + existing := svcMonitor.DeepCopy() + err = cl.Get(ctx, client.ObjectKeyFromObject(metricsSvc), existing) if apierrors.IsNotFound(err) { if err := cl.Create(ctx, svcMonitor); err != nil { return fmt.Errorf("error creating ServiceMonitor: %w", err) @@ -133,6 +134,13 @@ func reconcileMetricsResources(ctx context.Context, logger *zap.SugaredLogger, o if err != nil { return fmt.Errorf("error getting ServiceMonitor: %w", err) } + // Currently, we only update labels on the ServiceMonitor as those are the only values that can change. + if !reflect.DeepEqual(existing.GetLabels(), svcMonitor.GetLabels()) { + existing.SetLabels(svcMonitor.GetLabels()) + if err := cl.Update(ctx, existing); err != nil { + return fmt.Errorf("error updating ServiceMonitor: %w", err) + } + } return nil } @@ -165,9 +173,13 @@ func maybeCleanupServiceMonitor(ctx context.Context, cl client.Client, stsName, // newServiceMonitor takes a metrics Service created for a proxy and constructs and returns a ServiceMonitor for that // proxy that can be applied to the kube API server. // The ServiceMonitor is returned as Unstructured type - this allows us to avoid importing prometheus-operator API server client/schema. -func newServiceMonitor(metricsSvc *corev1.Service) (*unstructured.Unstructured, error) { +func newServiceMonitor(metricsSvc *corev1.Service, spec *tsapi.ServiceMonitor) (*unstructured.Unstructured, error) { sm := serviceMonitorTemplate(metricsSvc.Name, metricsSvc.Namespace) sm.ObjectMeta.Labels = metricsSvc.Labels + if spec != nil && len(spec.Labels) > 0 { + sm.ObjectMeta.Labels = mergeMapKeys(sm.ObjectMeta.Labels, spec.Labels.Parse()) + } + sm.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(metricsSvc, corev1.SchemeGroupVersion.WithKind("Service"))} sm.Spec = ServiceMonitorSpec{ Selector: metav1.LabelSelector{MatchLabels: metricsSvc.Labels}, @@ -270,3 +282,14 @@ type metricsOpts struct { func isNamespacedProxyType(typ string) bool { return typ == proxyTypeIngressResource || typ == proxyTypeIngressService } + +func mergeMapKeys(a, b map[string]string) map[string]string { + m := make(map[string]string, len(a)+len(b)) + for key, val := range b { + m[key] = val + } + for key, val := range a { + m[key] = val + } + return m +} diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index e46cdd7fe..d53269f05 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -16,6 +16,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -1129,7 +1130,7 @@ func TestProxyClassForService(t *testing.T) { AcceptRoutes: true, }, StatefulSet: &tsapi.StatefulSet{ - Labels: map[string]string{"foo": "bar"}, + Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, } @@ -1766,6 +1767,106 @@ func Test_externalNameService(t *testing.T) { expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) } +func Test_metricsResourceCreation(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{Name: "metrics", Generation: 1}, + Spec: tsapi.ProxyClassSpec{}, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Status: metav1.ConditionTrue, + Type: string(tsapi.ProxyClassReady), + ObservedGeneration: 1, + }}}, + } + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Labels: map[string]string{LabelProxyClass: "metrics"}, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + } + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pc, svc). + WithStatusSubresource(pc). + Build() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + operatorNamespace: "operator-ns", + }, + logger: zl.Sugar(), + clock: clock, + } + expectReconciled(t, sr, "default", "test") + fullName, shortName := findGenName(t, fc, "default", "test", "svc") + opts := configOpts{ + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "svc", + tailscaleNamespace: "operator-ns", + hostname: "default-test", + namespaced: true, + proxyType: proxyTypeIngressService, + app: kubetypes.AppIngressProxy, + resourceVersion: "1", + } + + // 1. Enable metrics- expect metrics Service to be created + mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { + pc.Spec = tsapi.ProxyClassSpec{Metrics: &tsapi.Metrics{Enable: true}} + }) + expectReconciled(t, sr, "default", "test") + opts.enableMetrics = true + expectEqual(t, fc, expectedMetricsService(opts), nil) + + // 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster + mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { + pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true} + }) + expectReconciled(t, sr, "default", "test") + + // 3. Create ServiceMonitor CRD and reconcile- ServiceMonitor should get created + mustCreate(t, fc, crd) + expectReconciled(t, sr, "default", "test") + expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) + + // 4. A change to ServiceMonitor config gets reflected in the ServiceMonitor resource + mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { + pc.Spec.Metrics.ServiceMonitor.Labels = tsapi.Labels{"foo": "bar"} + }) + expectReconciled(t, sr, "default", "test") + opts.serviceMonitorLabels = tsapi.Labels{"foo": "bar"} + opts.resourceVersion = "2" + expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) + + // 5. Disable metrics- expect metrics Service to be deleted + mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { + pc.Spec.Metrics = nil + }) + expectReconciled(t, sr, "default", "test") + expectMissing[corev1.Service](t, fc, "operator-ns", metricsResourceName(opts.stsName)) + // ServiceMonitor gets garbage collected when Service gets deleted (it has OwnerReference of the Service + // object). We cannot test this using the fake client. +} + func toFQDN(t *testing.T, s string) dnsname.FQDN { t.Helper() fqdn, err := dnsname.ToFQDN(s) diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index b781af05a..5ec9897d0 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -115,7 +115,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) { if sts := pc.Spec.StatefulSet; sts != nil { if len(sts.Labels) > 0 { - if errs := metavalidation.ValidateLabels(sts.Labels, field.NewPath(".spec.statefulSet.labels")); errs != nil { + if errs := metavalidation.ValidateLabels(sts.Labels.Parse(), field.NewPath(".spec.statefulSet.labels")); errs != nil { violations = append(violations, errs...) } } @@ -126,7 +126,7 @@ func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyCl } if pod := sts.Pod; pod != nil { if len(pod.Labels) > 0 { - if errs := metavalidation.ValidateLabels(pod.Labels, field.NewPath(".spec.statefulSet.pod.labels")); errs != nil { + if errs := metavalidation.ValidateLabels(pod.Labels.Parse(), field.NewPath(".spec.statefulSet.pod.labels")); errs != nil { violations = append(violations, errs...) } } @@ -178,6 +178,11 @@ func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyCl violations = append(violations, field.TypeInvalid(field.NewPath("spec", "metrics", "serviceMonitor"), "enable", msg)) } } + if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && len(pc.Spec.Metrics.ServiceMonitor.Labels) > 0 { + if errs := metavalidation.ValidateLabels(pc.Spec.Metrics.ServiceMonitor.Labels.Parse(), field.NewPath(".spec.metrics.serviceMonitor.labels")); errs != nil { + violations = append(violations, errs...) + } + } // We do not validate embedded fields (security context, resource // requirements etc) as we inherit upstream validation for those fields. // Invalid values would get rejected by upstream validations at apply diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index e6e16e9f9..78828107a 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -36,10 +36,10 @@ func TestProxyClass(t *testing.T) { }, Spec: tsapi.ProxyClassSpec{ StatefulSet: &tsapi.StatefulSet{ - Labels: map[string]string{"foo": "bar", "xyz1234": "abc567"}, + Labels: tsapi.Labels{"foo": "bar", "xyz1234": "abc567"}, Annotations: map[string]string{"foo.io/bar": "{'key': 'val1232'}"}, Pod: &tsapi.Pod{ - Labels: map[string]string{"foo": "bar", "xyz1234": "abc567"}, + Labels: tsapi.Labels{"foo": "bar", "xyz1234": "abc567"}, Annotations: map[string]string{"foo.io/bar": "{'key': 'val1232'}"}, TailscaleContainer: &tsapi.Container{ Env: []tsapi.Env{{Name: "FOO", Value: "BAR"}}, @@ -155,6 +155,25 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) + + // 7. A ProxyClass with invalid ServiceMonitor labels gets its status updated to Invalid with an error message. + pc.Spec.Metrics.ServiceMonitor.Labels = tsapi.Labels{"foo": "bar!"} + mustUpdate(t, fc, "", "test", func(proxyClass *tsapi.ProxyClass) { + proxyClass.Spec.Metrics.ServiceMonitor.Labels = pc.Spec.Metrics.ServiceMonitor.Labels + }) + expectReconciled(t, pcr, "", "test") + msg = `ProxyClass is not valid: .spec.metrics.serviceMonitor.labels: Invalid value: "bar!": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')` + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + expectEqual(t, fc, pc, nil) + + // 8. A ProxyClass with valid ServiceMonitor labels gets its status updated to Valid. + pc.Spec.Metrics.ServiceMonitor.Labels = tsapi.Labels{"foo": "bar", "xyz1234": "abc567", "empty": "", "onechar": "a"} + mustUpdate(t, fc, "", "test", func(proxyClass *tsapi.ProxyClass) { + proxyClass.Spec.Metrics.ServiceMonitor.Labels = pc.Spec.Metrics.ServiceMonitor.Labels + }) + expectReconciled(t, pcr, "", "test") + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar()) + expectEqual(t, fc, pc, nil) } func TestValidateProxyClass(t *testing.T) { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index bc0dccdff..6464a0b2d 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -88,6 +88,7 @@ func TestProxyGroup(t *testing.T) { stsName: pg.Name, parentType: "proxygroup", tailscaleNamespace: "tailscale", + resourceVersion: "1", } t.Run("proxyclass_not_ready", func(t *testing.T) { diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index ff7c074a8..b861bdfff 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -761,7 +761,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, } // Update StatefulSet metadata. - if wantsSSLabels := pc.Spec.StatefulSet.Labels; len(wantsSSLabels) > 0 { + if wantsSSLabels := pc.Spec.StatefulSet.Labels.Parse(); len(wantsSSLabels) > 0 { ss.ObjectMeta.Labels = mergeStatefulSetLabelsOrAnnots(ss.ObjectMeta.Labels, wantsSSLabels, tailscaleManagedLabels) } if wantsSSAnnots := pc.Spec.StatefulSet.Annotations; len(wantsSSAnnots) > 0 { @@ -773,7 +773,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, return ss } wantsPod := pc.Spec.StatefulSet.Pod - if wantsPodLabels := wantsPod.Labels; len(wantsPodLabels) > 0 { + if wantsPodLabels := wantsPod.Labels.Parse(); len(wantsPodLabels) > 0 { ss.Spec.Template.ObjectMeta.Labels = mergeStatefulSetLabelsOrAnnots(ss.Spec.Template.ObjectMeta.Labels, wantsPodLabels, tailscaleManagedLabels) } if wantsPodAnnots := wantsPod.Annotations; len(wantsPodAnnots) > 0 { diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 05aafaee6..3d0cecc04 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -61,10 +61,10 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { proxyClassAllOpts := &tsapi.ProxyClass{ Spec: tsapi.ProxyClassSpec{ StatefulSet: &tsapi.StatefulSet{ - Labels: map[string]string{"foo": "bar"}, + Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"foo.io/bar": "foo"}, Pod: &tsapi.Pod{ - Labels: map[string]string{"bar": "foo"}, + Labels: tsapi.Labels{"bar": "foo"}, Annotations: map[string]string{"bar.io/foo": "foo"}, SecurityContext: &corev1.PodSecurityContext{ RunAsUser: ptr.To(int64(0)), @@ -116,10 +116,10 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { proxyClassJustLabels := &tsapi.ProxyClass{ Spec: tsapi.ProxyClassSpec{ StatefulSet: &tsapi.StatefulSet{ - Labels: map[string]string{"foo": "bar"}, + Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"foo.io/bar": "foo"}, Pod: &tsapi.Pod{ - Labels: map[string]string{"bar": "foo"}, + Labels: tsapi.Labels{"bar": "foo"}, Annotations: map[string]string{"bar.io/foo": "foo"}, }, }, @@ -146,7 +146,6 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, } } - var userspaceProxySS, nonUserspaceProxySS appsv1.StatefulSet if err := yaml.Unmarshal(userspaceProxyYaml, &userspaceProxySS); err != nil { t.Fatalf("unmarshaling userspace proxy template: %v", err) @@ -176,9 +175,9 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // 1. Test that a ProxyClass with all fields set gets correctly applied // to a Statefulset built from non-userspace proxy template. wantSS := nonUserspaceProxySS.DeepCopy() - wantSS.ObjectMeta.Labels = mergeMapKeys(wantSS.ObjectMeta.Labels, proxyClassAllOpts.Spec.StatefulSet.Labels) - wantSS.ObjectMeta.Annotations = mergeMapKeys(wantSS.ObjectMeta.Annotations, proxyClassAllOpts.Spec.StatefulSet.Annotations) - wantSS.Spec.Template.Labels = proxyClassAllOpts.Spec.StatefulSet.Pod.Labels + updateMap(wantSS.ObjectMeta.Labels, proxyClassAllOpts.Spec.StatefulSet.Labels.Parse()) + updateMap(wantSS.ObjectMeta.Annotations, proxyClassAllOpts.Spec.StatefulSet.Annotations) + wantSS.Spec.Template.Labels = proxyClassAllOpts.Spec.StatefulSet.Pod.Labels.Parse() wantSS.Spec.Template.Annotations = proxyClassAllOpts.Spec.StatefulSet.Pod.Annotations wantSS.Spec.Template.Spec.SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.SecurityContext wantSS.Spec.Template.Spec.ImagePullSecrets = proxyClassAllOpts.Spec.StatefulSet.Pod.ImagePullSecrets @@ -207,9 +206,9 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // StatefulSet and Pod set gets correctly applied to a Statefulset built // from non-userspace proxy template. wantSS = nonUserspaceProxySS.DeepCopy() - wantSS.ObjectMeta.Labels = mergeMapKeys(wantSS.ObjectMeta.Labels, proxyClassJustLabels.Spec.StatefulSet.Labels) - wantSS.ObjectMeta.Annotations = mergeMapKeys(wantSS.ObjectMeta.Annotations, proxyClassJustLabels.Spec.StatefulSet.Annotations) - wantSS.Spec.Template.Labels = proxyClassJustLabels.Spec.StatefulSet.Pod.Labels + updateMap(wantSS.ObjectMeta.Labels, proxyClassJustLabels.Spec.StatefulSet.Labels.Parse()) + updateMap(wantSS.ObjectMeta.Annotations, proxyClassJustLabels.Spec.StatefulSet.Annotations) + wantSS.Spec.Template.Labels = proxyClassJustLabels.Spec.StatefulSet.Pod.Labels.Parse() wantSS.Spec.Template.Annotations = proxyClassJustLabels.Spec.StatefulSet.Pod.Annotations gotSS = applyProxyClassToStatefulSet(proxyClassJustLabels, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -219,9 +218,9 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // 3. Test that a ProxyClass with all fields set gets correctly applied // to a Statefulset built from a userspace proxy template. wantSS = userspaceProxySS.DeepCopy() - wantSS.ObjectMeta.Labels = mergeMapKeys(wantSS.ObjectMeta.Labels, proxyClassAllOpts.Spec.StatefulSet.Labels) - wantSS.ObjectMeta.Annotations = mergeMapKeys(wantSS.ObjectMeta.Annotations, proxyClassAllOpts.Spec.StatefulSet.Annotations) - wantSS.Spec.Template.Labels = proxyClassAllOpts.Spec.StatefulSet.Pod.Labels + updateMap(wantSS.ObjectMeta.Labels, proxyClassAllOpts.Spec.StatefulSet.Labels.Parse()) + updateMap(wantSS.ObjectMeta.Annotations, proxyClassAllOpts.Spec.StatefulSet.Annotations) + wantSS.Spec.Template.Labels = proxyClassAllOpts.Spec.StatefulSet.Pod.Labels.Parse() wantSS.Spec.Template.Annotations = proxyClassAllOpts.Spec.StatefulSet.Pod.Annotations wantSS.Spec.Template.Spec.SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.SecurityContext wantSS.Spec.Template.Spec.ImagePullSecrets = proxyClassAllOpts.Spec.StatefulSet.Pod.ImagePullSecrets @@ -243,9 +242,9 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // 4. Test that a ProxyClass with custom labels and annotations gets correctly applied // to a Statefulset built from a userspace proxy template. wantSS = userspaceProxySS.DeepCopy() - wantSS.ObjectMeta.Labels = mergeMapKeys(wantSS.ObjectMeta.Labels, proxyClassJustLabels.Spec.StatefulSet.Labels) - wantSS.ObjectMeta.Annotations = mergeMapKeys(wantSS.ObjectMeta.Annotations, proxyClassJustLabels.Spec.StatefulSet.Annotations) - wantSS.Spec.Template.Labels = proxyClassJustLabels.Spec.StatefulSet.Pod.Labels + updateMap(wantSS.ObjectMeta.Labels, proxyClassJustLabels.Spec.StatefulSet.Labels.Parse()) + updateMap(wantSS.ObjectMeta.Annotations, proxyClassJustLabels.Spec.StatefulSet.Annotations) + wantSS.Spec.Template.Labels = proxyClassJustLabels.Spec.StatefulSet.Pod.Labels.Parse() wantSS.Spec.Template.Annotations = proxyClassJustLabels.Spec.StatefulSet.Pod.Annotations gotSS = applyProxyClassToStatefulSet(proxyClassJustLabels, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -294,13 +293,6 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { } } -func mergeMapKeys(a, b map[string]string) map[string]string { - for key, val := range b { - a[key] = val - } - return a -} - func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { tests := []struct { name string @@ -392,3 +384,10 @@ func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { }) } } + +// updateMap updates map a with the values from map b. +func updateMap(a, b map[string]string) { + for key, val := range b { + a[key] = val + } +} diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index f6ae29b62..d43e75b1e 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -61,7 +61,10 @@ type configOpts struct { app string shouldRemoveAuthKey bool secretExtraData map[string][]byte - enableMetrics bool + resourceVersion string + + enableMetrics bool + serviceMonitorLabels tsapi.Labels } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -431,14 +434,17 @@ func metricsLabels(opts configOpts) map[string]string { func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstructured { t.Helper() - labels := metricsLabels(opts) + smLabels := metricsLabels(opts) + if len(opts.serviceMonitorLabels) != 0 { + smLabels = mergeMapKeys(smLabels, opts.serviceMonitorLabels.Parse()) + } name := metricsResourceName(opts.stsName) sm := &ServiceMonitor{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: opts.tailscaleNamespace, - Labels: labels, - ResourceVersion: "1", + Labels: smLabels, + ResourceVersion: opts.resourceVersion, OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true)}}, }, TypeMeta: metav1.TypeMeta{ @@ -446,7 +452,7 @@ func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstruc APIVersion: "monitoring.coreos.com/v1", }, Spec: ServiceMonitorSpec{ - Selector: metav1.LabelSelector{MatchLabels: labels}, + Selector: metav1.LabelSelector{MatchLabels: metricsLabels(opts)}, Endpoints: []ServiceMonitorEndpoint{{ Port: "metrics", }}, @@ -653,10 +659,11 @@ func expectEqualUnstructured(t *testing.T, client client.Client, want *unstructu func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string) { t.Helper() obj := O(new(T)) - if err := client.Get(context.Background(), types.NamespacedName{ + err := client.Get(context.Background(), types.NamespacedName{ Name: name, Namespace: ns, - }, obj); !apierrors.IsNotFound(err) { + }, obj) + if !apierrors.IsNotFound(err) { t.Fatalf("%s %s/%s unexpectedly present, wanted missing", reflect.TypeOf(obj).Elem().Name(), ns, name) } } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index f52606989..fae25b1f6 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -313,6 +313,37 @@ _Appears in:_ +#### LabelValue + +_Underlying type:_ _string_ + + + +_Validation:_ +- MaxLength: 63 +- Pattern: `^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$` +- Type: string + +_Appears in:_ +- [Labels](#labels) + + + +#### Labels + +_Underlying type:_ _[map[string]LabelValue](#map[string]labelvalue)_ + + + + + +_Appears in:_ +- [Pod](#pod) +- [ServiceMonitor](#servicemonitor) +- [StatefulSet](#statefulset) + + + #### Metrics @@ -407,7 +438,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `labels` _object (keys:string, values:string)_ | Labels that will be added to the proxy Pod.
Any labels specified here will be merged with the default labels
applied to the Pod by the Tailscale Kubernetes operator.
Label keys and values must be valid Kubernetes label keys and values.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | +| `labels` _[Labels](#labels)_ | Labels that will be added to the proxy Pod.
Any labels specified here will be merged with the default labels
applied to the Pod by the Tailscale Kubernetes operator.
Label keys and values must be valid Kubernetes label keys and values.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | | `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the proxy Pod.
Any annotations specified here will be merged with the default
annotations applied to the Pod by the Tailscale Kubernetes operator.
Annotations must be valid Kubernetes annotations.
https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | | `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Proxy Pod's affinity rules.
By default, the Tailscale Kubernetes operator does not apply any affinity rules.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | | | `tailscaleContainer` _[Container](#container)_ | Configuration for the proxy container running tailscale. | | | @@ -864,6 +895,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `enable` _boolean_ | If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. | | | +| `labels` _[Labels](#labels)_ | Labels to add to the ServiceMonitor.
Labels must be valid Kubernetes labels.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | #### StatefulSet @@ -879,7 +911,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `labels` _object (keys:string, values:string)_ | Labels that will be added to the StatefulSet created for the proxy.
Any labels specified here will be merged with the default labels
applied to the StatefulSet by the Tailscale Kubernetes operator as
well as any other labels that might have been applied by other
actors.
Label keys and values must be valid Kubernetes label keys and values.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | +| `labels` _[Labels](#labels)_ | Labels that will be added to the StatefulSet created for the proxy.
Any labels specified here will be merged with the default labels
applied to the StatefulSet by the Tailscale Kubernetes operator as
well as any other labels that might have been applied by other
actors.
Label keys and values must be valid Kubernetes label keys and values.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | | `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the StatefulSet created for the proxy.
Any Annotations specified here will be merged with the default annotations
applied to the StatefulSet by the Tailscale Kubernetes operator as
well as any other annotations that might have been applied by other
actors.
Annotations must be valid Kubernetes annotations.
https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | | `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index ef9a071d0..549234fef 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -87,7 +87,7 @@ type StatefulSet struct { // Label keys and values must be valid Kubernetes label keys and values. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set // +optional - Labels map[string]string `json:"labels,omitempty"` + Labels Labels `json:"labels,omitempty"` // Annotations that will be added to the StatefulSet created for the proxy. // Any Annotations specified here will be merged with the default annotations // applied to the StatefulSet by the Tailscale Kubernetes operator as @@ -109,7 +109,7 @@ type Pod struct { // Label keys and values must be valid Kubernetes label keys and values. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set // +optional - Labels map[string]string `json:"labels,omitempty"` + Labels Labels `json:"labels,omitempty"` // Annotations that will be added to the proxy Pod. // Any annotations specified here will be merged with the default // annotations applied to the Pod by the Tailscale Kubernetes operator. @@ -188,8 +188,34 @@ type Metrics struct { type ServiceMonitor struct { // If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. Enable bool `json:"enable"` + // Labels to add to the ServiceMonitor. + // Labels must be valid Kubernetes labels. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // +optional + Labels Labels `json:"labels"` +} + +type Labels map[string]LabelValue + +func (l Labels) Parse() map[string]string { + if l == nil { + return nil + } + m := make(map[string]string, len(l)) + for k, v := range l { + m[k] = string(v) + } + return m } +// We do not validate the values of the label keys here - it is done by the ProxyClass +// reconciler because the validation rules are too complex for a CRD validation markers regex. + +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:Pattern=`^(([a-zA-Z0-9][-._a-zA-Z0-9]*)?[a-zA-Z0-9])?$` +// +kubebuilder:validation:MaxLength=63 +type LabelValue string + type Container struct { // List of environment variables to set in the container. // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 29c71cb90..5e7e7455c 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -316,13 +316,34 @@ func (in *Env) DeepCopy() *Env { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Labels) DeepCopyInto(out *Labels) { + { + in := &in + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Labels. +func (in Labels) DeepCopy() Labels { + if in == nil { + return nil + } + out := new(Labels) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metrics) DeepCopyInto(out *Metrics) { *out = *in if in.ServiceMonitor != nil { in, out := &in.ServiceMonitor, &out.ServiceMonitor *out = new(ServiceMonitor) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -391,7 +412,7 @@ func (in *Pod) DeepCopyInto(out *Pod) { *out = *in if in.Labels != nil { in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) + *out = make(Labels, len(*in)) for key, val := range *in { (*out)[key] = val } @@ -999,6 +1020,13 @@ func (in *S3Secret) DeepCopy() *S3Secret { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) { *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor. @@ -1016,7 +1044,7 @@ func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { *out = *in if in.Labels != nil { in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) + *out = make(Labels, len(*in)) for key, val := range *in { (*out)[key] = val } From a51672cafd8b6c4e87915a55bda1491eb7cbee84 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 8 Jan 2025 10:36:35 -0600 Subject: [PATCH 0301/1708] prober: record total bytes transferred in DERP bandwidth probes This will enable Prometheus queries to look at the bandwidth over time windows, for example 'increase(derp_bw_bytes_total)[1h] / increase(derp_bw_transfer_time_seconds_total)[1h]'. Updates tailscale/corp#25503 Signed-off-by: Percy Wegmann --- prober/derp.go | 1 + 1 file changed, 1 insertion(+) diff --git a/prober/derp.go b/prober/derp.go index 3cd6394ad..6bad35845 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -318,6 +318,7 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { return []prometheus.Metric{ prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, float64(size)), } }, } From 7fa07f34169b11a02e44165fd875e48b2e2e211d Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 9 Jan 2025 16:03:52 -0500 Subject: [PATCH 0302/1708] types/views: add SliceEqualAnyOrderFunc Extracted from some code written in the other repo. Updates tailscale/corp#25479 Signed-off-by: Andrew Dunham Change-Id: I6df062fdffa1705524caa44ac3b6f2788cf64595 --- types/views/views.go | 35 +++++++++++++++++++++++++++++++++++ types/views/views_test.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/types/views/views.go b/types/views/views.go index 4addc6448..0f53313c7 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -360,6 +360,41 @@ func SliceEqualAnyOrder[T comparable](a, b Slice[T]) bool { return true } +// SliceEqualAnyOrderFunc reports whether a and b contain the same elements, +// regardless of order. The underlying slices for a and b can be nil. +// +// The provided function should return a comparable value for each element. +func SliceEqualAnyOrderFunc[T any, V comparable](a, b Slice[T], cmp func(T) V) bool { + if a.Len() != b.Len() { + return false + } + + var diffStart int // beginning index where a and b differ + for n := a.Len(); diffStart < n; diffStart++ { + av := cmp(a.At(diffStart)) + bv := cmp(b.At(diffStart)) + if av != bv { + break + } + } + if diffStart == a.Len() { + return true + } + + // count the occurrences of remaining values and compare + valueCount := make(map[V]int) + for i, n := diffStart, a.Len(); i < n; i++ { + valueCount[cmp(a.At(i))]++ + valueCount[cmp(b.At(i))]-- + } + for _, count := range valueCount { + if count != 0 { + return false + } + } + return true +} + // MapSlice is a view over a map whose values are slices. type MapSlice[K comparable, V any] struct { // ж is the underlying mutable value, named with a hard-to-type diff --git a/types/views/views_test.go b/types/views/views_test.go index 51b086a4e..f290670fb 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -153,6 +153,43 @@ func TestViewUtils(t *testing.T) { qt.Equals, true) } +func TestSliceEqualAnyOrderFunc(t *testing.T) { + type nc struct { + _ structs.Incomparable + v string + } + + // ncFrom returns a Slice[nc] from a slice of []string + ncFrom := func(s ...string) Slice[nc] { + var out []nc + for _, v := range s { + out = append(out, nc{v: v}) + } + return SliceOf(out) + } + + // cmp returns a comparable value for a nc + cmp := func(a nc) string { return a.v } + + v := ncFrom("foo", "bar") + c := qt.New(t) + + // Simple case of slice equal to itself. + c.Check(SliceEqualAnyOrderFunc(v, v, cmp), qt.Equals, true) + + // Different order. + c.Check(SliceEqualAnyOrderFunc(v, ncFrom("bar", "foo"), cmp), qt.Equals, true) + + // Different values, same length + c.Check(SliceEqualAnyOrderFunc(v, ncFrom("foo", "baz"), cmp), qt.Equals, false) + + // Different values, different length + c.Check(SliceEqualAnyOrderFunc(v, ncFrom("foo"), cmp), qt.Equals, false) + + // Nothing shared + c.Check(SliceEqualAnyOrderFunc(v, ncFrom("baz", "qux"), cmp), qt.Equals, false) +} + func TestSliceEqual(t *testing.T) { a := SliceOf([]string{"foo", "bar"}) b := SliceOf([]string{"foo", "bar"}) From 6ddeae755695de850c56c788cba10bd4736934bb Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 9 Jan 2025 16:55:07 -0500 Subject: [PATCH 0303/1708] types/views: optimize SliceEqualAnyOrderFunc for small slices If the total number of differences is less than a small amount, just do the dumb quadratic thing and compare every single object instead of allocating a map. Updates tailscale/corp#25479 Signed-off-by: Andrew Dunham Change-Id: I8931b4355a2da4ec0f19739927311cf88711a840 --- types/views/views.go | 23 +++++++++++++++++++++++ types/views/views_test.go | 9 +++++++++ 2 files changed, 32 insertions(+) diff --git a/types/views/views.go b/types/views/views.go index 0f53313c7..40d8811f5 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -381,6 +381,29 @@ func SliceEqualAnyOrderFunc[T any, V comparable](a, b Slice[T], cmp func(T) V) b return true } + // For a small number of items, avoid the allocation of a map and just + // do the quadratic thing. We can also only check the items between + // diffStart and the end. + nRemain := a.Len() - diffStart + if nRemain <= 5 { + maxLen := a.Len() // same as b.Len() + for i := diffStart; i < maxLen; i++ { + av := cmp(a.At(i)) + found := false + for j := diffStart; j < maxLen; j++ { + bv := cmp(b.At(j)) + if av == bv { + found = true + break + } + } + if !found { + return false + } + } + return true + } + // count the occurrences of remaining values and compare valueCount := make(map[V]int) for i, n := diffStart, a.Len(); i < n; i++ { diff --git a/types/views/views_test.go b/types/views/views_test.go index f290670fb..70e021aa4 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -188,6 +188,15 @@ func TestSliceEqualAnyOrderFunc(t *testing.T) { // Nothing shared c.Check(SliceEqualAnyOrderFunc(v, ncFrom("baz", "qux"), cmp), qt.Equals, false) + + // Long slice that matches + longSlice := ncFrom("a", "b", "c", "d", "e", "f", "g", "h", "i", "j") + longSame := ncFrom("b", "a", "c", "d", "e", "f", "g", "h", "i", "j") // first 2 elems swapped + c.Check(SliceEqualAnyOrderFunc(longSlice, longSame, cmp), qt.Equals, true) + + // Long difference; past the quadratic limit + longDiff := ncFrom("b", "a", "c", "d", "e", "f", "g", "h", "i", "k") // differs at end + c.Check(SliceEqualAnyOrderFunc(longSlice, longDiff, cmp), qt.Equals, false) } func TestSliceEqual(t *testing.T) { From 9373a1b9026cf3419cbab202bde9d93e44e82091 Mon Sep 17 00:00:00 2001 From: Nahum Shalman Date: Sun, 23 Apr 2023 15:57:35 +0000 Subject: [PATCH 0304/1708] all: illumos/solaris userspace only support Updates #14565 Change-Id: I743148144938794db0a224873ce76c10dbe6fa5f Signed-off-by: Nahum Shalman --- .github/workflows/test.yml | 6 ++++++ cmd/tailscaled/tailscaled.go | 4 ++-- ipn/ipnlocal/local.go | 2 +- ipn/ipnserver/actor.go | 2 +- ipn/ipnstate/ipnstate.go | 2 ++ ipn/localapi/localapi.go | 4 ++-- ipn/localapi/localapi_test.go | 2 +- net/dns/manager_default.go | 2 +- net/dns/manager_solaris.go | 14 ++++++++++++++ net/dns/resolver/tsdns.go | 2 +- net/netutil/ip_forward.go | 26 ++++++++++++++++++++++++++ net/tstun/tstun_stub.go | 2 +- net/tstun/tun.go | 2 +- paths/paths_unix.go | 2 +- 14 files changed, 60 insertions(+), 12 deletions(-) create mode 100644 net/dns/manager_solaris.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a4dccd103..d4c73ab7c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -313,6 +313,12 @@ jobs: # AIX - goos: aix goarch: ppc64 + # Solaris + - goos: solaris + goarch: amd64 + # illumos + - goos: illumos + goarch: amd64 runs-on: ubuntu-22.04 steps: diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 7a5ee0398..9dd00ddd9 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -81,7 +81,7 @@ func defaultTunName() string { // "utun" is recognized by wireguard-go/tun/tun_darwin.go // as a magic value that uses/creates any free number. return "utun" - case "plan9", "aix": + case "plan9", "aix", "solaris", "illumos": return "userspace-networking" case "linux": switch distro.Get() { @@ -665,7 +665,7 @@ func handleSubnetsInNetstack() bool { return true } switch runtime.GOOS { - case "windows", "darwin", "freebsd", "openbsd": + case "windows", "darwin", "freebsd", "openbsd", "solaris", "illumos": // Enable on Windows and tailscaled-on-macOS (this doesn't // affect the GUI clients), and on FreeBSD. return true diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8d2652e0a..ad3bbaef3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4176,7 +4176,7 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { }) } switch runtime.GOOS { - case "linux", "freebsd", "openbsd", "illumos", "darwin", "windows", "android", "ios": + case "linux", "freebsd", "openbsd", "illumos", "solaris", "darwin", "windows", "android", "ios": // These are the platforms currently supported by // net/dns/resolver/tsdns.go:Resolver.HandleExitNodeDNSQuery. ret = append(ret, tailcfg.Service{ diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 63d4b183c..0e716009c 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -96,7 +96,7 @@ func (a *actor) Username() (string, error) { } defer tok.Close() return tok.Username() - case "darwin", "linux": + case "darwin", "linux", "illumos", "solaris": uid, ok := a.ci.Creds().UserID() if !ok { return "", errors.New("missing user ID") diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 9f8bd34f6..37ab47714 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -650,6 +650,8 @@ func osEmoji(os string) string { return "🐡" case "illumos": return "☀️" + case "solaris": + return "🌤️" } return "👽" } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 831f6a9b6..157f72a65 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1097,7 +1097,7 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { switch goos { - case "windows", "linux", "darwin": + case "windows", "linux", "darwin", "illumos", "solaris": default: return nil } @@ -1117,7 +1117,7 @@ func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeC switch goos { case "windows": return errors.New("must be a Windows local admin to serve a path") - case "linux", "darwin": + case "linux", "darwin", "illumos", "solaris": return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") default: // We filter goos at the start of the func, this default case diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 145910830..b7f0c416c 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -237,7 +237,7 @@ func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { } for _, tt := range tests { - for _, goos := range []string{"linux", "windows", "darwin"} { + for _, goos := range []string{"linux", "windows", "darwin", "illumos", "solaris"} { t.Run(goos+"-"+tt.name, func(t *testing.T) { err := authorizeServeConfigForGOOSAndUserContext(goos, tt.configIn, tt.h) gotErr := err != nil diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index 11dea5ca8..99ff017da 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !freebsd && !openbsd && !windows && !darwin +//go:build !linux && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris package dns diff --git a/net/dns/manager_solaris.go b/net/dns/manager_solaris.go new file mode 100644 index 000000000..1f48efb9e --- /dev/null +++ b/net/dns/manager_solaris.go @@ -0,0 +1,14 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package dns + +import ( + "tailscale.com/control/controlknobs" + "tailscale.com/health" + "tailscale.com/types/logger" +) + +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { + return newDirectManager(logf, health), nil +} diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 43ba0acf1..107740b13 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -384,7 +384,7 @@ func (r *Resolver) HandlePeerDNSQuery(ctx context.Context, q []byte, from netip. // but for now that's probably good enough. Later we'll // want to blend in everything from scutil --dns. fallthrough - case "linux", "freebsd", "openbsd", "illumos", "ios": + case "linux", "freebsd", "openbsd", "illumos", "solaris", "ios": nameserver, err := stubResolverForOS() if err != nil { r.logf("stubResolverForOS: %v", err) diff --git a/net/netutil/ip_forward.go b/net/netutil/ip_forward.go index 48cee68ea..c64a9e426 100644 --- a/net/netutil/ip_forward.go +++ b/net/netutil/ip_forward.go @@ -63,6 +63,11 @@ func CheckIPForwarding(routes []netip.Prefix, state *netmon.State) (warn, err er switch runtime.GOOS { case "dragonfly", "freebsd", "netbsd", "openbsd": return fmt.Errorf("Subnet routing and exit nodes only work with additional manual configuration on %v, and is not currently officially supported.", runtime.GOOS), nil + case "illumos", "solaris": + _, err := ipForwardingEnabledSunOS(ipv4, "") + if err != nil { + return nil, fmt.Errorf("Couldn't check system's IP forwarding configuration, subnet routing/exit nodes may not work: %w%s", err, "") + } } return nil, nil } @@ -325,3 +330,24 @@ func reversePathFilterValueLinux(iface string) (int, error) { } return v, nil } + +func ipForwardingEnabledSunOS(p protocol, iface string) (bool, error) { + var proto string + if p == ipv4 { + proto = "ipv4" + } else if p == ipv6 { + proto = "ipv6" + } else { + return false, fmt.Errorf("unknown protocol") + } + + ipadmCmd := "\"ipadm show-prop " + proto + " -p forwarding -o CURRENT -c\"" + bs, err := exec.Command("ipadm", "show-prop", proto, "-p", "forwarding", "-o", "CURRENT", "-c").Output() + if err != nil { + return false, fmt.Errorf("couldn't check %s (%v).\nSubnet routes won't work without IP forwarding.", ipadmCmd, err) + } + if string(bs) != "on\n" { + return false, fmt.Errorf("IP forwarding is set to off. Subnet routes won't work. Try 'routeadm -u -e %s-forwarding'", proto) + } + return true, nil +} diff --git a/net/tstun/tstun_stub.go b/net/tstun/tstun_stub.go index 7a4f71a09..3119d647c 100644 --- a/net/tstun/tstun_stub.go +++ b/net/tstun/tstun_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build plan9 || aix +//go:build plan9 || aix || solaris || illumos package tstun diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 9f5d42ecc..56c66c83a 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !wasm && !plan9 && !tamago && !aix +//go:build !wasm && !plan9 && !tamago && !aix && !solaris && !illumos // Package tun creates a tuntap device, working around OS-specific // quirks if necessary. diff --git a/paths/paths_unix.go b/paths/paths_unix.go index 6a2b28733..50a8b7ca5 100644 --- a/paths/paths_unix.go +++ b/paths/paths_unix.go @@ -22,7 +22,7 @@ func init() { func statePath() string { switch runtime.GOOS { - case "linux": + case "linux", "illumos", "solaris": return "/var/lib/tailscale/tailscaled.state" case "freebsd", "openbsd": return "/var/db/tailscale/tailscaled.state" From fc8b6d9c6a6c510227b21abf00bdfd24e53ab176 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 10 Jan 2025 06:33:58 +0000 Subject: [PATCH 0305/1708] ipn/conf.go: add VIPServices to tailscaled configfile (#14345) Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- ipn/conf.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ipn/conf.go b/ipn/conf.go index 1b2831b03..addeea79e 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -32,6 +32,8 @@ type ConfigVAlpha struct { AdvertiseRoutes []netip.Prefix `json:",omitempty"` DisableSNAT opt.Bool `json:",omitempty"` + AdvertiseServices []string `json:",omitempty"` + AppConnector *AppConnectorPrefs `json:",omitempty"` // advertise app connector; defaults to false (if nil or explicitly set to false) NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert" @@ -143,5 +145,9 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) { mp.AppConnector = *c.AppConnector mp.AppConnectorSet = true } + if c.AdvertiseServices != nil { + mp.AdvertiseServices = c.AdvertiseServices + mp.AdvertiseServicesSet = true + } return mp, nil } From 48a95c422ae5eb304a68ef95f4b62e14870f641a Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 10 Jan 2025 07:29:11 +0000 Subject: [PATCH 0306/1708] cmd/containerboot,cmd/k8s-operator: reload tailscaled config (#14342) cmd/{k8s-operator,containerboot}: reload tailscaled configfile when its contents have changed Instead of restarting the Kubernetes Operator proxies each time tailscaled config has changed, this dynamically reloads the configfile using the new reload endpoint. Older annotation based mechanism will be supported till 1.84 to ensure that proxy versions prior to 1.80 keep working with operator 1.80 and newer. Updates tailscale/tailscale#13032 Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/containerboot/main.go | 8 +++ cmd/containerboot/tailscaled.go | 70 ++++++++++++++++++++++++++ cmd/k8s-operator/operator_test.go | 3 +- cmd/k8s-operator/proxygroup.go | 73 +++++++++++++++++++++++++--- cmd/k8s-operator/proxygroup_specs.go | 5 +- cmd/k8s-operator/proxygroup_test.go | 10 ++-- cmd/k8s-operator/sts.go | 62 ++++++++++++++++------- cmd/k8s-operator/testutils_test.go | 15 +++--- 8 files changed, 207 insertions(+), 39 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 7411ea949..895be108b 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -359,6 +359,12 @@ authLoop: log.Fatalf("rewatching tailscaled for updates after auth: %v", err) } + // If tailscaled config was read from a mounted file, watch the file for updates and reload. + cfgWatchErrChan := make(chan error) + if cfg.TailscaledConfigFilePath != "" { + go watchTailscaledConfigChanges(ctx, cfg.TailscaledConfigFilePath, client, cfgWatchErrChan) + } + var ( startupTasksDone = false currentIPs deephash.Sum // tailscale IPs assigned to device @@ -452,6 +458,8 @@ runLoop: break runLoop case err := <-errChan: log.Fatalf("failed to read from tailscaled: %v", err) + case err := <-cfgWatchErrChan: + log.Fatalf("failed to watch tailscaled config: %v", err) case n := <-notifyChan: if n.State != nil && *n.State != ipn.Running { // Something's gone wrong and we've left the authenticated state. diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index d8da49b03..fc2092477 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -13,10 +13,13 @@ import ( "log" "os" "os/exec" + "path/filepath" + "reflect" "strings" "syscall" "time" + "github.com/fsnotify/fsnotify" "tailscale.com/client/tailscale" ) @@ -166,3 +169,70 @@ func tailscaleSet(ctx context.Context, cfg *settings) error { } return nil } + +func watchTailscaledConfigChanges(ctx context.Context, path string, lc *tailscale.LocalClient, errCh chan<- error) { + var ( + tickChan <-chan time.Time + tailscaledCfgDir = filepath.Dir(path) + prevTailscaledCfg []byte + ) + w, err := fsnotify.NewWatcher() + if err != nil { + log.Printf("tailscaled config watch: failed to create fsnotify watcher, timer-only mode: %v", err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + defer w.Close() + if err := w.Add(tailscaledCfgDir); err != nil { + errCh <- fmt.Errorf("failed to add fsnotify watch: %w", err) + return + } + } + b, err := os.ReadFile(path) + if err != nil { + errCh <- fmt.Errorf("error reading configfile: %w", err) + return + } + prevTailscaledCfg = b + // kubelet mounts Secrets to Pods using a series of symlinks, one of + // which is /..data that Kubernetes recommends consumers to + // use if they need to monitor changes + // https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61 + const kubeletMountedCfg = "..data" + toWatch := filepath.Join(tailscaledCfgDir, kubeletMountedCfg) + for { + select { + case <-ctx.Done(): + return + case err := <-w.Errors: + errCh <- fmt.Errorf("watcher error: %w", err) + return + case <-tickChan: + case event := <-w.Events: + if event.Name != toWatch { + continue + } + } + b, err := os.ReadFile(path) + if err != nil { + errCh <- fmt.Errorf("error reading configfile: %w", err) + return + } + // For some proxy types the mounted volume also contains tailscaled state and other files. We + // don't want to reload config unnecessarily on unrelated changes to these files. + if reflect.DeepEqual(b, prevTailscaledCfg) { + continue + } + prevTailscaledCfg = b + log.Printf("tailscaled config watch: ensuring that config is up to date") + ok, err := lc.ReloadConfig(ctx) + if err != nil { + errCh <- fmt.Errorf("error reloading tailscaled config: %w", err) + return + } + if ok { + log.Printf("tailscaled config watch: config was reloaded") + } + } +} diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index d53269f05..1998fe3bc 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1379,6 +1379,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { }, }) + expectReconciled(t, sr, "default", "test") expectReconciled(t, sr, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "svc") @@ -1389,7 +1390,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", - confFileHash: "acf3467364b0a3ba9b8ee0dd772cb7c2f0bf585e288fa99b7fe4566009ed6041", + confFileHash: "848bff4b5ba83ac999e6984c8464e597156daba961ae045e7dbaef606d54ab5e", app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), nil) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 194474fb2..a4befa039 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -261,17 +261,44 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return fmt.Errorf("error provisioning ConfigMap: %w", err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, cfgHash) + ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode) if err != nil { return fmt.Errorf("error generating StatefulSet spec: %w", err) } ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + capver, err := r.capVerForPG(ctx, pg, logger) + if err != nil { + return fmt.Errorf("error getting device info: %w", err) + } + + updateSS := func(s *appsv1.StatefulSet) { + + // This is a temporary workaround to ensure that egress ProxyGroup proxies with capver older than 110 + // are restarted when tailscaled configfile contents have changed. + // This workaround ensures that: + // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. + // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. + // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid + // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a + // restarting Pod and the hash is re-added again. + // Note that this workaround is only applied to egress ProxyGroups, because ingress ProxyGroup was added after capver 110. + // Note also that the hash annotation is only set on updates, not creation, because if the StatefulSet is + // being created, there is no need for a restart. + // TODO(irbekrm): remove this in 1.84. + hash := cfgHash + if capver >= 110 { + hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] + } + s.Spec = ss.Spec + if hash != "" && pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) + } + s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences - s.Spec = ss.Spec - }); err != nil { + } + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, updateSS); err != nil { return fmt.Errorf("error provisioning StatefulSet: %w", err) } mo := &metricsOpts{ @@ -564,12 +591,19 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr continue } - metadata = append(metadata, nodeMetadata{ + nm := nodeMetadata{ ordinal: ordinal, stateSecret: &secret, tsID: id, dnsName: dnsName, - }) + } + pod := &corev1.Pod{} + if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } else if err == nil { + nm.podUID = string(pod.UID) + } + metadata = append(metadata, nm) } return metadata, nil @@ -601,6 +635,29 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.Prox type nodeMetadata struct { ordinal int stateSecret *corev1.Secret - tsID tailcfg.StableNodeID - dnsName string + // podUID is the UID of the current Pod or empty if the Pod does not exist. + podUID string + tsID tailcfg.StableNodeID + dnsName string +} + +// capVerForPG returns best effort capability version for the given ProxyGroup. It attempts to find it by looking at the +// Secret + Pod for the replica with ordinal 0. Returns -1 if it is not possible to determine the capability version +// (i.e there is no Pod yet). +func (r *ProxyGroupReconciler) capVerForPG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (tailcfg.CapabilityVersion, error) { + metas, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return -1, fmt.Errorf("error getting node metadata: %w", err) + } + if len(metas) == 0 { + return -1, nil + } + dev, err := deviceInfo(metas[0].stateSecret, metas[0].podUID, logger) + if err != nil { + return -1, fmt.Errorf("error getting device info: %w", err) + } + if dev == nil { + return -1, nil + } + return dev.capver, nil } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index d602be814..dc58b9f0e 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -21,7 +21,7 @@ import ( // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. -func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHash string) (*appsv1.StatefulSet, error) { +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -53,9 +53,6 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa Namespace: namespace, Labels: pgLabels(pg.Name, nil), DeletionGracePeriodSeconds: ptr.To[int64](10), - Annotations: map[string]string{ - podAnnotationLastSetConfigFileHash: cfgHash, - }, } tmpl.Spec.ServiceAccountName = pg.Name tmpl.Spec.InitContainers[0].Image = image diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 6464a0b2d..96ffefbed 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -29,6 +29,7 @@ import ( "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" + "tailscale.com/util/mak" ) const testProxyImage = "tailscale/tailscale:test" @@ -117,11 +118,11 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg, nil) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + expectProxyGroupResources(t, fc, pg, true, "") if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } - expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + expectProxyGroupResources(t, fc, pg, true, "") keyReq := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -378,11 +379,14 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", cfgHash) + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto") if err != nil { t.Fatal(err) } statefulSet.Annotations = defaultProxyClassAnnotations + if cfgHash != "" { + mak.Set(&statefulSet.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, cfgHash) + } if shouldExist { expectEqual(t, fc, role, nil) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index b861bdfff..c2b925058 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -437,10 +437,10 @@ func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { return string(sanitizedBytes) } -// DeviceInfo returns the device ID, hostname and IPs for the Tailscale device -// that acts as an operator proxy. It retrieves info from a Kubernetes Secret -// labeled with the provided labels. -// Either of device ID, hostname and IPs can be empty string if not found in the Secret. +// DeviceInfo returns the device ID, hostname, IPs and capver for the Tailscale device that acts as an operator proxy. +// It retrieves info from a Kubernetes Secret labeled with the provided labels. Capver is cross-validated against the +// Pod to ensure that it is the currently running Pod that set the capver. If the Pod or the Secret does not exist, the +// returned capver is -1. Either of device ID, hostname and IPs can be empty string if not found in the Secret. func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) { sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels) if err != nil { @@ -449,12 +449,14 @@ func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map if sec == nil { return dev, nil } + podUID := "" pod := new(corev1.Pod) if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) { - return dev, nil + return dev, err + } else if err == nil { + podUID = string(pod.ObjectMeta.UID) } - - return deviceInfo(sec, pod, logger) + return deviceInfo(sec, podUID, logger) } // device contains tailscale state of a proxy device as gathered from its tailscale state Secret. @@ -465,9 +467,10 @@ type device struct { // ingressDNSName is the L7 Ingress DNS name. In practice this will be the same value as hostname, but only set // when the device has been configured to serve traffic on it via 'tailscale serve'. ingressDNSName string + capver tailcfg.CapabilityVersion } -func deviceInfo(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) (dev *device, err error) { +func deviceInfo(sec *corev1.Secret, podUID string, log *zap.SugaredLogger) (dev *device, err error) { id := tailcfg.StableNodeID(sec.Data[kubetypes.KeyDeviceID]) if id == "" { return dev, nil @@ -484,10 +487,12 @@ func deviceInfo(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) (de // operator to clean up such devices. return dev, nil } + dev.ingressDNSName = dev.hostname + pcv := proxyCapVer(sec, podUID, log) + dev.capver = pcv // TODO(irbekrm): we fall back to using the hostname field to determine Ingress's hostname to ensure backwards // compatibility. In 1.82 we can remove this fallback mechanism. - dev.ingressDNSName = dev.hostname - if proxyCapVer(sec, pod, log) >= 109 { + if pcv >= 109 { dev.ingressDNSName = strings.TrimSuffix(string(sec.Data[kubetypes.KeyHTTPSEndpoint]), ".") if strings.EqualFold(dev.ingressDNSName, kubetypes.ValueNoHTTPS) { dev.ingressDNSName = "" @@ -584,8 +589,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S Value: "true", }) } - // Configure containeboot to run tailscaled with a configfile read from the state Secret. - mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, tsConfigHash) configVolume := corev1.Volume{ Name: "tailscaledconfig", @@ -655,6 +658,12 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }, }) } + + dev, err := a.DeviceInfo(ctx, sts.ChildResourceLabels, logger) + if err != nil { + return nil, fmt.Errorf("failed to get device info: %w", err) + } + app, err := appInfoForProxy(sts) if err != nil { // No need to error out if now or in future we end up in a @@ -673,7 +682,25 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S ss = applyProxyClassToStatefulSet(sts.ProxyClass, ss, sts, logger) } updateSS := func(s *appsv1.StatefulSet) { + // This is a temporary workaround to ensure that proxies with capver older than 110 + // are restarted when tailscaled configfile contents have changed. + // This workaround ensures that: + // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. + // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. + // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid + // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a + // restarting Pod and the hash is re-added again. + // Note that the hash annotation is only set on updates not creation, because if the StatefulSet is + // being created, there is no need for a restart. + // TODO(irbekrm): remove this in 1.84. + hash := tsConfigHash + if dev != nil && dev.capver >= 110 { + hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] + } s.Spec = ss.Spec + if hash != "" { + mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) + } s.ObjectMeta.Labels = ss.Labels s.ObjectMeta.Annotations = ss.Annotations } @@ -1112,10 +1139,11 @@ func isValidFirewallMode(m string) bool { return m == "auto" || m == "nftables" || m == "iptables" } -// proxyCapVer accepts a proxy state Secret and a proxy Pod returns the capability version of a proxy Pod. -// This is best effort - if the capability version can not (currently) be determined, it returns -1. -func proxyCapVer(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) tailcfg.CapabilityVersion { - if sec == nil || pod == nil { +// proxyCapVer accepts a proxy state Secret and UID of the current proxy Pod returns the capability version of the +// tailscale running in that Pod. This is best effort - if the capability version can not (currently) be determined, it +// returns -1. +func proxyCapVer(sec *corev1.Secret, podUID string, log *zap.SugaredLogger) tailcfg.CapabilityVersion { + if sec == nil || podUID == "" { return tailcfg.CapabilityVersion(-1) } if len(sec.Data[kubetypes.KeyCapVer]) == 0 || len(sec.Data[kubetypes.KeyPodUID]) == 0 { @@ -1126,7 +1154,7 @@ func proxyCapVer(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) ta log.Infof("[unexpected]: unexpected capability version in proxy's state Secret, expected an integer, got %q", string(sec.Data[kubetypes.KeyCapVer])) return tailcfg.CapabilityVersion(-1) } - if !strings.EqualFold(string(pod.ObjectMeta.UID), string(sec.Data[kubetypes.KeyPodUID])) { + if !strings.EqualFold(podUID, string(sec.Data[kubetypes.KeyPodUID])) { return tailcfg.CapabilityVersion(-1) } return tailcfg.CapabilityVersion(capVer) diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index d43e75b1e..277bd16df 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -95,7 +95,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Value: "true", }) } - annots := make(map[string]string) + var annots map[string]string var volumes []corev1.Volume volumes = []corev1.Volume{ { @@ -113,7 +113,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef MountPath: "/etc/tsconfig", }} if opts.confFileHash != "" { - annots["tailscale.com/operator-last-set-config-file-hash"] = opts.confFileHash + mak.Set(&annots, "tailscale.com/operator-last-set-config-file-hash", opts.confFileHash) } if opts.firewallMode != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ @@ -122,13 +122,13 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }) } if opts.tailnetTargetIP != "" { - annots["tailscale.com/operator-last-set-ts-tailnet-target-ip"] = opts.tailnetTargetIP + mak.Set(&annots, "tailscale.com/operator-last-set-ts-tailnet-target-ip", opts.tailnetTargetIP) tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_TAILNET_TARGET_IP", Value: opts.tailnetTargetIP, }) } else if opts.tailnetTargetFQDN != "" { - annots["tailscale.com/operator-last-set-ts-tailnet-target-fqdn"] = opts.tailnetTargetFQDN + mak.Set(&annots, "tailscale.com/operator-last-set-ts-tailnet-target-fqdn", opts.tailnetTargetFQDN) tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_TAILNET_TARGET_FQDN", Value: opts.tailnetTargetFQDN, @@ -139,13 +139,13 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Name: "TS_DEST_IP", Value: opts.clusterTargetIP, }) - annots["tailscale.com/operator-last-set-cluster-ip"] = opts.clusterTargetIP + mak.Set(&annots, "tailscale.com/operator-last-set-cluster-ip", opts.clusterTargetIP) } else if opts.clusterTargetDNS != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_EXPERIMENTAL_DEST_DNS_NAME", Value: opts.clusterTargetDNS, }) - annots["tailscale.com/operator-last-set-cluster-dns-name"] = opts.clusterTargetDNS + mak.Set(&annots, "tailscale.com/operator-last-set-cluster-dns-name", opts.clusterTargetDNS) } if opts.serveConfig != nil { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ @@ -794,6 +794,9 @@ func (c *fakeTSClient) Deleted() []string { // change to the configfile contents). func removeHashAnnotation(sts *appsv1.StatefulSet) { delete(sts.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash) + if len(sts.Spec.Template.Annotations) == 0 { + sts.Spec.Template.Annotations = nil + } } func removeTargetPortsFromSvc(svc *corev1.Service) { From 77017bae59c2b89d80a2428524abd53042948e9c Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 10 Jan 2025 07:31:28 +0000 Subject: [PATCH 0307/1708] cmd/containerboot: load containerboot serve config that does not contain HTTPS endpoint in tailnets with HTTPS disabled (#14538) cmd/containerboot: load containerboot serve config that does not contain HTTPS endpoint in tailnets with HTTPS disabled Fixes an issue where, if a tailnet has HTTPS disabled, no serve config set via TS_SERVE_CONFIG was loaded, even if it does not contain an HTTPS endpoint. Now for tailnets with HTTPS disabled serve config provided to containerboot is considered invalid (and therefore not loaded) only if there is an HTTPS endpoint defined in the config. Fixes tailscale/tailscale#14495 Signed-off-by: Irbe Krumina --- cmd/containerboot/serve.go | 34 ++-- cmd/containerboot/serve_test.go | 267 ++++++++++++++++++++++++++++++++ 2 files changed, 290 insertions(+), 11 deletions(-) create mode 100644 cmd/containerboot/serve_test.go diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 14c7f00d7..1729e65b5 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -68,7 +68,6 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { continue } - validateHTTPSServe(certDomain, sc) if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil { log.Fatalf("serve proxy: error updating serve config: %v", err) } @@ -88,27 +87,34 @@ func certDomainFromNetmap(nm *netmap.NetworkMap) string { return nm.DNS.CertDomains[0] } -func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc *tailscale.LocalClient) error { - // TODO(irbekrm): This means that serve config that does not expose HTTPS endpoint will not be set for a tailnet - // that does not have HTTPS enabled. We probably want to fix this. - if certDomain == kubetypes.ValueNoHTTPS { +// localClient is a subset of tailscale.LocalClient that can be mocked for testing. +type localClient interface { + SetServeConfig(context.Context, *ipn.ServeConfig) error +} + +func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc localClient) error { + if !isValidHTTPSConfig(certDomain, sc) { return nil } log.Printf("serve proxy: applying serve config") return lc.SetServeConfig(ctx, sc) } -func validateHTTPSServe(certDomain string, sc *ipn.ServeConfig) { - if certDomain != kubetypes.ValueNoHTTPS || !hasHTTPSEndpoint(sc) { - return - } - log.Printf( - `serve proxy: this node is configured as a proxy that exposes an HTTPS endpoint to tailnet, +func isValidHTTPSConfig(certDomain string, sc *ipn.ServeConfig) bool { + if certDomain == kubetypes.ValueNoHTTPS && hasHTTPSEndpoint(sc) { + log.Printf( + `serve proxy: this node is configured as a proxy that exposes an HTTPS endpoint to tailnet, (perhaps a Kubernetes operator Ingress proxy) but it is not able to issue TLS certs, so this will likely not work. To make it work, ensure that HTTPS is enabled for your tailnet, see https://tailscale.com/kb/1153/enabling-https for more details.`) + return false + } + return true } func hasHTTPSEndpoint(cfg *ipn.ServeConfig) bool { + if cfg == nil { + return false + } for _, tcpCfg := range cfg.TCP { if tcpCfg.HTTPS { return true @@ -127,6 +133,12 @@ func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { if err != nil { return nil, err } + // Serve config can be provided by users as well as the Kubernetes Operator (for its proxies). User-provided + // config could be empty for reasons. + if len(j) == 0 { + log.Printf("serve proxy: serve config file is empty, skipping") + return nil, nil + } j = bytes.ReplaceAll(j, []byte("${TS_CERT_DOMAIN}"), []byte(certDomain)) var sc ipn.ServeConfig if err := json.Unmarshal(j, &sc); err != nil { diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go new file mode 100644 index 000000000..4563c52fc --- /dev/null +++ b/cmd/containerboot/serve_test.go @@ -0,0 +1,267 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/client/tailscale" + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" +) + +func TestUpdateServeConfig(t *testing.T) { + tests := []struct { + name string + sc *ipn.ServeConfig + certDomain string + wantCall bool + }{ + { + name: "no_https_no_cert_domain", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + }, + certDomain: kubetypes.ValueNoHTTPS, // tailnet has HTTPS disabled + wantCall: true, // should set serve config as it doesn't have HTTPS endpoints + }, + { + name: "https_with_cert_domain", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://10.0.1.100:8080"}, + }, + }, + }, + }, + certDomain: "test-node.tailnet.ts.net", + wantCall: true, + }, + { + name: "https_without_cert_domain", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + }, + certDomain: kubetypes.ValueNoHTTPS, + wantCall: false, // incorrect configuration- should not set serve config + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeLC := &fakeLocalClient{} + err := updateServeConfig(context.Background(), tt.sc, tt.certDomain, fakeLC) + if err != nil { + t.Errorf("updateServeConfig() error = %v", err) + } + if fakeLC.setServeCalled != tt.wantCall { + t.Errorf("SetServeConfig() called = %v, want %v", fakeLC.setServeCalled, tt.wantCall) + } + }) + } +} + +func TestReadServeConfig(t *testing.T) { + tests := []struct { + name string + gotSC string + certDomain string + wantSC *ipn.ServeConfig + wantErr bool + }{ + { + name: "empty_file", + }, + { + name: "valid_config_with_cert_domain_placeholder", + gotSC: `{ + "TCP": { + "443": { + "HTTPS": true + } + }, + "Web": { + "${TS_CERT_DOMAIN}:443": { + "Handlers": { + "/api": { + "Proxy": "https://10.2.3.4/api" + }}}}}`, + certDomain: "example.com", + wantSC: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + ipn.HostPort("example.com:443"): { + Handlers: map[string]*ipn.HTTPHandler{ + "/api": { + Proxy: "https://10.2.3.4/api", + }, + }, + }, + }, + }, + }, + { + name: "valid_config_for_http_proxy", + gotSC: `{ + "TCP": { + "80": { + "HTTP": true + } + }}`, + wantSC: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + HTTP: true, + }, + }, + }, + }, + { + name: "config_without_cert_domain", + gotSC: `{ + "TCP": { + "443": { + "HTTPS": true + } + }, + "Web": { + "localhost:443": { + "Handlers": { + "/api": { + "Proxy": "https://10.2.3.4/api" + }}}}}`, + certDomain: "", + wantErr: false, + wantSC: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + ipn.HostPort("localhost:443"): { + Handlers: map[string]*ipn.HTTPHandler{ + "/api": { + Proxy: "https://10.2.3.4/api", + }, + }, + }, + }, + }, + }, + { + name: "invalid_json", + gotSC: "invalid json", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "serve-config.json") + if err := os.WriteFile(path, []byte(tt.gotSC), 0644); err != nil { + t.Fatal(err) + } + + got, err := readServeConfig(path, tt.certDomain) + if (err != nil) != tt.wantErr { + t.Errorf("readServeConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !cmp.Equal(got, tt.wantSC) { + t.Errorf("readServeConfig() diff (-got +want):\n%s", cmp.Diff(got, tt.wantSC)) + } + }) + } +} + +type fakeLocalClient struct { + *tailscale.LocalClient + setServeCalled bool +} + +func (m *fakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConfig) error { + m.setServeCalled = true + return nil +} + +func TestHasHTTPSEndpoint(t *testing.T) { + tests := []struct { + name string + cfg *ipn.ServeConfig + want bool + }{ + { + name: "nil_config", + cfg: nil, + want: false, + }, + { + name: "empty_config", + cfg: &ipn.ServeConfig{}, + want: false, + }, + { + name: "no_https_endpoints", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + HTTPS: false, + }, + }, + }, + want: false, + }, + { + name: "has_https_endpoint", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + }, + want: true, + }, + { + name: "mixed_endpoints", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTPS: false}, + 443: {HTTPS: true}, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := hasHTTPSEndpoint(tt.cfg) + if got != tt.want { + t.Errorf("hasHTTPSEndpoint() = %v, want %v", got, tt.want) + } + }) + } +} From a841f9d87be9490c0c94bcfcbe62549a2d2dfefd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 10 Jan 2025 07:59:31 -0800 Subject: [PATCH 0308/1708] go.mod: bump some deps Most of these are effectively no-ops, but appease security scanners. At least one (x/net for x/net/html) only affect builds from the open source repo, since we already had it updated in our "corp" repo: golang.org/x/net v0.33.1-0.20241230221519-e9d95ba163f7 ... and that's where we do the official releases from. e.g. tailscale.io % go install tailscale.com/cmd/tailscaled tailscale.io % go version -m ~/go/bin/tailscaled | grep x/net dep golang.org/x/net v0.33.1-0.20241230221519-e9d95ba163f7 h1:raAbYgZplPuXQ6s7jPklBFBmmLh6LjnFaJdp3xR2ljY= tailscale.io % cd ../tailscale.com tailscale.com % go install tailscale.com/cmd/tailscaled tailscale.com % go version -m ~/go/bin/tailscaled | grep x/net dep golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= Updates #8043 Updates #14599 Change-Id: I6e238cef62ca22444145a5313554aab8709b33c9 Signed-off-by: Brad Fitzpatrick --- go.mod | 21 ++++++++++----------- go.sum | 43 ++++++++++++++++++++----------------------- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/go.mod b/go.mod index 62a431d18..c28338caf 100644 --- a/go.mod +++ b/go.mod @@ -93,18 +93,18 @@ require ( github.com/u-root/u-root v0.12.0 github.com/vishvananda/netns v0.0.4 go.uber.org/zap v1.27.0 - go4.org/mem v0.0.0-20220726221520-4f986261bf13 + go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.31.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/mod v0.19.0 - golang.org/x/net v0.33.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/crypto v0.32.0 + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 + golang.org/x/mod v0.22.0 + golang.org/x/net v0.34.0 + golang.org/x/oauth2 v0.25.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab - golang.org/x/term v0.27.0 - golang.org/x/time v0.5.0 - golang.org/x/tools v0.23.0 + golang.org/x/term v0.28.0 + golang.org/x/time v0.9.0 + golang.org/x/tools v0.29.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 @@ -385,10 +385,9 @@ require ( gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/image v0.18.0 // indirect + golang.org/x/image v0.23.0 // indirect golang.org/x/text v0.21.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index efb20e63a..be8f291a4 100644 --- a/go.sum +++ b/go.sum @@ -1045,8 +1045,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1059,8 +1059,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1071,16 +1071,16 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= -golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E= +golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68= +golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1108,8 +1108,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1149,16 +1149,16 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1242,8 +1242,8 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1251,7 +1251,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1260,8 +1259,8 @@ golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1326,8 +1325,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1362,8 +1361,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= From cd795d8a7f47f06fe3cc1aaf67a7a6cd26509649 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 10 Jan 2025 12:23:51 -0600 Subject: [PATCH 0309/1708] prober: support filtering regions by region ID in addition to code Updates tailscale/corp#25758 Signed-off-by: Percy Wegmann --- cmd/derpprobe/derpprobe.go | 6 +++--- prober/derp.go | 14 +++++++------- prober/derp_test.go | 24 ++++++++++++------------ 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 62b7d47a4..6e8c603b9 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -32,7 +32,7 @@ var ( bwTUNIPv4Address = flag.String("bw-tun-ipv4-addr", "", "if specified, bandwidth probes will be performed over a TUN device at this address in order to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP; we will use a /30 subnet including this IP address") qdPacketsPerSecond = flag.Int("qd-packets-per-second", 0, "if greater than 0, queuing delay will be measured continuously using 260 byte packets (approximate size of a CallMeMaybe packet) sent at this rate per second") qdPacketTimeout = flag.Duration("qd-packet-timeout", 5*time.Second, "queuing delay packets arriving after this period of time from being sent are treated like dropped packets and don't count toward queuing delay timings") - regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed") + regionCodeOrID = flag.String("region-code", "", "probe only this region (e.g. 'lax' or '17'); if left blank, all regions will be probed") ) func main() { @@ -52,8 +52,8 @@ func main() { if *bwInterval > 0 { opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize, *bwTUNIPv4Address)) } - if *regionCode != "" { - opts = append(opts, prober.WithRegion(*regionCode)) + if *regionCodeOrID != "" { + opts = append(opts, prober.WithRegionCodeOrID(*regionCodeOrID)) } dp, err := prober.DERP(p, *derpMapURL, opts...) if err != nil { diff --git a/prober/derp.go b/prober/derp.go index 6bad35845..f405549ff 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -60,8 +60,8 @@ type derpProber struct { qdPacketsPerSecond int // in packets per second qdPacketTimeout time.Duration - // Optionally restrict probes to a single regionCode. - regionCode string + // Optionally restrict probes to a single regionCodeOrID. + regionCodeOrID string // Probe class for fetching & updating the DERP map. ProbeMap ProbeClass @@ -135,11 +135,11 @@ func WithTLSProbing(interval time.Duration) DERPOpt { } } -// WithRegion restricts probing to the specified region identified by its code -// (e.g. "lax"). This is case sensitive. -func WithRegion(regionCode string) DERPOpt { +// WithRegionCodeOrID restricts probing to the specified region identified by its code +// (e.g. "lax") or its id (e.g. "17"). This is case sensitive. +func WithRegionCodeOrID(regionCode string) DERPOpt { return func(d *derpProber) { - d.regionCode = regionCode + d.regionCodeOrID = regionCode } } @@ -598,7 +598,7 @@ func (d *derpProber) ProbeUDP(ipaddr string, port int) ProbeClass { } func (d *derpProber) skipRegion(region *tailcfg.DERPRegion) bool { - return d.regionCode != "" && region.RegionCode != d.regionCode + return d.regionCodeOrID != "" && region.RegionCode != d.regionCodeOrID && strconv.Itoa(region.RegionID) != d.regionCodeOrID } func derpProbeUDP(ctx context.Context, ipStr string, port int) error { diff --git a/prober/derp_test.go b/prober/derp_test.go index c084803e9..93b8d760b 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -71,17 +71,17 @@ func TestDerpProber(t *testing.T) { clk := newFakeTime() p := newForTest(clk.Now, clk.NewTicker) dp := &derpProber{ - p: p, - derpMapURL: srv.URL, - tlsInterval: time.Second, - tlsProbeFn: func(_ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, - udpInterval: time.Second, - udpProbeFn: func(_ string, _ int) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, - meshInterval: time.Second, - meshProbeFn: func(_, _ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, - nodes: make(map[string]*tailcfg.DERPNode), - probes: make(map[string]*Probe), - regionCode: "zero", + p: p, + derpMapURL: srv.URL, + tlsInterval: time.Second, + tlsProbeFn: func(_ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + udpInterval: time.Second, + udpProbeFn: func(_ string, _ int) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + meshInterval: time.Second, + meshProbeFn: func(_, _ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + nodes: make(map[string]*tailcfg.DERPNode), + probes: make(map[string]*Probe), + regionCodeOrID: "zero", } if err := dp.probeMapFn(context.Background()); err != nil { t.Errorf("unexpected probeMapFn() error: %s", err) @@ -129,7 +129,7 @@ func TestDerpProber(t *testing.T) { } // Stop filtering regions. - dp.regionCode = "" + dp.regionCodeOrID = "" if err := dp.probeMapFn(context.Background()); err != nil { t.Errorf("unexpected probeMapFn() error: %s", err) } From 2af255790dd561ddbca8e1b8264b2b0e7f5f8976 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Thu, 9 Jan 2025 16:45:04 -0500 Subject: [PATCH 0310/1708] ipn/ipnlocal: add VIPServices hash to return body of vip-services c2n endpoint This commit updates the return body of c2n endpoint /vip-services to keep hash generation logic on client side. Updates tailscale/corp#24510 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/c2n.go | 6 +++++- ipn/ipnlocal/local.go | 5 ++--- tailcfg/c2ntypes.go | 15 +++++++++++++++ 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index f3a4a3a3d..04f91954f 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -274,8 +274,12 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: GET /vip-services received") + var res tailcfg.C2NVIPServicesResponse + res.VIPServices = b.VIPServices() + res.ServicesHash = b.vipServiceHash(res.VIPServices) - json.NewEncoder(w).Encode(b.VIPServices()) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) } func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ad3bbaef3..088f1ef75 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5022,7 +5022,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } hi.SSH_HostKeys = sshHostKeys - hi.ServicesHash = b.vipServiceHashLocked(prefs) + hi.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) // The Hostinfo.WantIngress field tells control whether this node wants to // be wired up for ingress connections. If harmless if it's accidentally @@ -7661,8 +7661,7 @@ func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) } -func (b *LocalBackend) vipServiceHashLocked(prefs ipn.PrefsView) string { - services := b.vipServicesFromPrefsLocked(prefs) +func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { if len(services) == 0 { return "" } diff --git a/tailcfg/c2ntypes.go b/tailcfg/c2ntypes.go index 54efb736e..66f95785c 100644 --- a/tailcfg/c2ntypes.go +++ b/tailcfg/c2ntypes.go @@ -102,3 +102,18 @@ type C2NTLSCertInfo struct { // TODO(bradfitz): add fields for whether an ACME fetch is currently in // process and when it started, etc. } + +// C2NVIPServicesResponse is the response (from node to control) from the +// /vip-services handler. +// +// It returns the list of VIPServices that the node is currently serving with +// their port info and whether they are active or not. It also returns a hash of +// the response to allow the control server to detect changes. +type C2NVIPServicesResponse struct { + // VIPServices is the list of VIP services that the node is currently serving. + VIPServices []*VIPService `json:",omitempty"` + + // ServicesHash is the hash of VIPServices to allow the control server to detect + // changes. This value matches what is reported in latest [Hostinfo.ServicesHash]. + ServicesHash string +} From 5fdb4f83ad23f0ee7a9dc08ecc2a0ceeabd81fc3 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 10 Jan 2025 17:21:39 -0800 Subject: [PATCH 0311/1708] Dockerfile: bump base alpine image (#14604) Bump the versions to pick up some CVE patches. They don't affect us, but customer scanners will complain. Updates #cleanup Signed-off-by: Andrew Lytvynov --- ALPINE.txt | 2 +- Dockerfile | 2 +- Dockerfile.base | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ALPINE.txt b/ALPINE.txt index 55b698c77..f29702326 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.18 \ No newline at end of file +3.21 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 4ad3d88d9..7a5dbce5a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,7 +62,7 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.18 +FROM alpine:3.21 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ diff --git a/Dockerfile.base b/Dockerfile.base index eb4f0a02a..5186746a4 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,5 +1,5 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.18 +FROM alpine:3.21 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils From 69b90742fe852cb83e0106b8f4fe36976c3ed3c8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 12 Jan 2025 19:14:04 -0800 Subject: [PATCH 0312/1708] util/uniq,types/lazy,*: delete code that's now in Go std sync.OnceValue and slices.Compact were both added in Go 1.21. cmp.Or was added in Go 1.22. Updates #8632 Updates #11058 Change-Id: I89ba4c404f40188e1f8a9566c8aaa049be377754 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscaled/depaware.txt | 1 - ipn/ipnlocal/local.go | 3 +- ipn/localapi/debugderp.go | 23 +++---- types/lazy/lazy.go | 35 ----------- types/lazy/sync_test.go | 43 -------------- util/uniq/slice.go | 62 ------------------- util/uniq/slice_test.go | 102 -------------------------------- version/print.go | 5 +- version/prop.go | 3 +- version/version.go | 3 +- wgengine/magicsock/magicsock.go | 4 +- wgengine/pendopen.go | 4 +- 13 files changed, 18 insertions(+), 271 deletions(-) delete mode 100644 util/uniq/slice.go delete mode 100644 util/uniq/slice_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 0e42fe2b6..3489e5a60 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -817,7 +817,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail - tailscale.com/util/uniq from tailscale.com/ipn/ipnlocal+ tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 749c3f310..4dad47421 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -406,7 +406,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail - tailscale.com/util/uniq from tailscale.com/ipn/ipnlocal+ tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 088f1ef75..3a2a22c58 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -111,7 +111,6 @@ import ( "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/systemd" "tailscale.com/util/testenv" - "tailscale.com/util/uniq" "tailscale.com/util/usermetric" "tailscale.com/version" "tailscale.com/version/distro" @@ -3346,7 +3345,7 @@ func (b *LocalBackend) clearMachineKeyLocked() error { // incoming packet. func (b *LocalBackend) setTCPPortsIntercepted(ports []uint16) { slices.Sort(ports) - uniq.ModifySlice(&ports) + ports = slices.Compact(ports) var f func(uint16) bool switch len(ports) { case 0: diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 85eb031e6..dbdf5cf79 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -4,6 +4,7 @@ package localapi import ( + "cmp" "context" "crypto/tls" "encoding/json" @@ -81,7 +82,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { client *http.Client = http.DefaultClient ) checkConn := func(derpNode *tailcfg.DERPNode) bool { - port := firstNonzero(derpNode.DERPPort, 443) + port := cmp.Or(derpNode.DERPPort, 443) var ( hasIPv4 bool @@ -89,7 +90,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { ) // Check IPv4 first - addr := net.JoinHostPort(firstNonzero(derpNode.IPv4, derpNode.HostName), strconv.Itoa(port)) + addr := net.JoinHostPort(cmp.Or(derpNode.IPv4, derpNode.HostName), strconv.Itoa(port)) conn, err := dialer.DialContext(ctx, "tcp4", addr) if err != nil { st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ %q over IPv4: %v", derpNode.HostName, addr, err)) @@ -98,7 +99,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { // Upgrade to TLS and verify that works properly. tlsConn := tls.Client(conn, &tls.Config{ - ServerName: firstNonzero(derpNode.CertName, derpNode.HostName), + ServerName: cmp.Or(derpNode.CertName, derpNode.HostName), }) if err := tlsConn.HandshakeContext(ctx); err != nil { st.Errors = append(st.Errors, fmt.Sprintf("Error upgrading connection to node %q @ %q to TLS over IPv4: %v", derpNode.HostName, addr, err)) @@ -108,7 +109,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { } // Check IPv6 - addr = net.JoinHostPort(firstNonzero(derpNode.IPv6, derpNode.HostName), strconv.Itoa(port)) + addr = net.JoinHostPort(cmp.Or(derpNode.IPv6, derpNode.HostName), strconv.Itoa(port)) conn, err = dialer.DialContext(ctx, "tcp6", addr) if err != nil { st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ %q over IPv6: %v", derpNode.HostName, addr, err)) @@ -117,7 +118,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { // Upgrade to TLS and verify that works properly. tlsConn := tls.Client(conn, &tls.Config{ - ServerName: firstNonzero(derpNode.CertName, derpNode.HostName), + ServerName: cmp.Or(derpNode.CertName, derpNode.HostName), // TODO(andrew-d): we should print more // detailed failure information on if/why TLS // verification fails @@ -166,7 +167,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { addr = addrs[0] } - addrPort := netip.AddrPortFrom(addr, uint16(firstNonzero(derpNode.STUNPort, 3478))) + addrPort := netip.AddrPortFrom(addr, uint16(cmp.Or(derpNode.STUNPort, 3478))) txID := stun.NewTxID() req := stun.Request(txID) @@ -292,13 +293,3 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { // issued in the first place, tell them specifically that the // cert is bad not just that the connection failed. } - -func firstNonzero[T comparable](items ...T) T { - var zero T - for _, item := range items { - if item != zero { - return item - } - } - return zero -} diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index 43325512d..c29a03db4 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -120,41 +120,6 @@ func (z *SyncValue[T]) PeekErr() (v T, err error, ok bool) { return zero, nil, false } -// SyncFunc wraps a function to make it lazy. -// -// The returned function calls fill the first time it's called, and returns -// fill's result on every subsequent call. -// -// The returned function is safe for concurrent use. -func SyncFunc[T any](fill func() T) func() T { - var ( - once sync.Once - v T - ) - return func() T { - once.Do(func() { v = fill() }) - return v - } -} - -// SyncFuncErr wraps a function to make it lazy. -// -// The returned function calls fill the first time it's called, and returns -// fill's results on every subsequent call. -// -// The returned function is safe for concurrent use. -func SyncFuncErr[T any](fill func() (T, error)) func() (T, error) { - var ( - once sync.Once - v T - err error - ) - return func() (T, error) { - once.Do(func() { v, err = fill() }) - return v, err - } -} - // TB is a subset of testing.TB that we use to set up test helpers. // It's defined here to avoid pulling in the testing package. type TB interface { diff --git a/types/lazy/sync_test.go b/types/lazy/sync_test.go index 5578eee0c..4d1278253 100644 --- a/types/lazy/sync_test.go +++ b/types/lazy/sync_test.go @@ -354,46 +354,3 @@ func TestSyncValueSetForTest(t *testing.T) { }) } } - -func TestSyncFunc(t *testing.T) { - f := SyncFunc(fortyTwo) - - n := int(testing.AllocsPerRun(1000, func() { - got := f() - if got != 42 { - t.Fatalf("got %v; want 42", got) - } - })) - if n != 0 { - t.Errorf("allocs = %v; want 0", n) - } -} - -func TestSyncFuncErr(t *testing.T) { - f := SyncFuncErr(func() (int, error) { - return 42, nil - }) - n := int(testing.AllocsPerRun(1000, func() { - got, err := f() - if got != 42 || err != nil { - t.Fatalf("got %v, %v; want 42, nil", got, err) - } - })) - if n != 0 { - t.Errorf("allocs = %v; want 0", n) - } - - wantErr := errors.New("test error") - f = SyncFuncErr(func() (int, error) { - return 0, wantErr - }) - n = int(testing.AllocsPerRun(1000, func() { - got, err := f() - if got != 0 || err != wantErr { - t.Fatalf("got %v, %v; want 0, %v", got, err, wantErr) - } - })) - if n != 0 { - t.Errorf("allocs = %v; want 0", n) - } -} diff --git a/util/uniq/slice.go b/util/uniq/slice.go deleted file mode 100644 index 4ab933a9d..000000000 --- a/util/uniq/slice.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package uniq provides removal of adjacent duplicate elements in slices. -// It is similar to the unix command uniq. -package uniq - -// ModifySlice removes adjacent duplicate elements from the given slice. It -// adjusts the length of the slice appropriately and zeros the tail. -// -// ModifySlice does O(len(*slice)) operations. -func ModifySlice[E comparable](slice *[]E) { - // Remove duplicates - dst := 0 - for i := 1; i < len(*slice); i++ { - if (*slice)[i] == (*slice)[dst] { - continue - } - dst++ - (*slice)[dst] = (*slice)[i] - } - - // Zero out the elements we removed at the end of the slice - end := dst + 1 - var zero E - for i := end; i < len(*slice); i++ { - (*slice)[i] = zero - } - - // Truncate the slice - if end < len(*slice) { - *slice = (*slice)[:end] - } -} - -// ModifySliceFunc is the same as ModifySlice except that it allows using a -// custom comparison function. -// -// eq should report whether the two provided elements are equal. -func ModifySliceFunc[E any](slice *[]E, eq func(i, j E) bool) { - // Remove duplicates - dst := 0 - for i := 1; i < len(*slice); i++ { - if eq((*slice)[dst], (*slice)[i]) { - continue - } - dst++ - (*slice)[dst] = (*slice)[i] - } - - // Zero out the elements we removed at the end of the slice - end := dst + 1 - var zero E - for i := end; i < len(*slice); i++ { - (*slice)[i] = zero - } - - // Truncate the slice - if end < len(*slice) { - *slice = (*slice)[:end] - } -} diff --git a/util/uniq/slice_test.go b/util/uniq/slice_test.go deleted file mode 100644 index 564fc0866..000000000 --- a/util/uniq/slice_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package uniq_test - -import ( - "reflect" - "strconv" - "testing" - - "tailscale.com/util/uniq" -) - -func runTests(t *testing.T, cb func(*[]uint32)) { - tests := []struct { - // Use uint32 to be different from an int-typed slice index - in []uint32 - want []uint32 - }{ - {in: []uint32{0, 1, 2}, want: []uint32{0, 1, 2}}, - {in: []uint32{0, 1, 2, 2}, want: []uint32{0, 1, 2}}, - {in: []uint32{0, 0, 1, 2}, want: []uint32{0, 1, 2}}, - {in: []uint32{0, 1, 0, 2}, want: []uint32{0, 1, 0, 2}}, - {in: []uint32{0}, want: []uint32{0}}, - {in: []uint32{0, 0}, want: []uint32{0}}, - {in: []uint32{}, want: []uint32{}}, - } - - for _, test := range tests { - in := make([]uint32, len(test.in)) - copy(in, test.in) - cb(&test.in) - if !reflect.DeepEqual(test.in, test.want) { - t.Errorf("uniq.Slice(%v) = %v, want %v", in, test.in, test.want) - } - start := len(test.in) - test.in = test.in[:cap(test.in)] - for i := start; i < len(in); i++ { - if test.in[i] != 0 { - t.Errorf("uniq.Slice(%v): non-0 in tail of %v at index %v", in, test.in, i) - } - } - } -} - -func TestModifySlice(t *testing.T) { - runTests(t, func(slice *[]uint32) { - uniq.ModifySlice(slice) - }) -} - -func TestModifySliceFunc(t *testing.T) { - runTests(t, func(slice *[]uint32) { - uniq.ModifySliceFunc(slice, func(i, j uint32) bool { - return i == j - }) - }) -} - -func Benchmark(b *testing.B) { - benches := []struct { - name string - reset func(s []byte) - }{ - {name: "AllDups", - reset: func(s []byte) { - for i := range s { - s[i] = '*' - } - }, - }, - {name: "NoDups", - reset: func(s []byte) { - for i := range s { - s[i] = byte(i) - } - }, - }, - } - - for _, bb := range benches { - b.Run(bb.name, func(b *testing.B) { - for size := 1; size <= 4096; size *= 16 { - b.Run(strconv.Itoa(size), func(b *testing.B) { - benchmark(b, 64, bb.reset) - }) - } - }) - } -} - -func benchmark(b *testing.B, size int64, reset func(s []byte)) { - b.ReportAllocs() - b.SetBytes(size) - s := make([]byte, size) - b.ResetTimer() - for range b.N { - s = s[:size] - reset(s) - uniq.ModifySlice(&s) - } -} diff --git a/version/print.go b/version/print.go index 7d8554279..be90432cc 100644 --- a/version/print.go +++ b/version/print.go @@ -7,11 +7,10 @@ import ( "fmt" "runtime" "strings" - - "tailscale.com/types/lazy" + "sync" ) -var stringLazy = lazy.SyncFunc(func() string { +var stringLazy = sync.OnceValue(func() string { var ret strings.Builder ret.WriteString(Short()) ret.WriteByte('\n') diff --git a/version/prop.go b/version/prop.go index fee76c65f..6026d1179 100644 --- a/version/prop.go +++ b/version/prop.go @@ -9,6 +9,7 @@ import ( "runtime" "strconv" "strings" + "sync" "tailscale.com/tailcfg" "tailscale.com/types/lazy" @@ -174,7 +175,7 @@ func IsUnstableBuild() bool { }) } -var isDev = lazy.SyncFunc(func() bool { +var isDev = sync.OnceValue(func() bool { return strings.Contains(Short(), "-dev") }) diff --git a/version/version.go b/version/version.go index 5edea22ca..2add25689 100644 --- a/version/version.go +++ b/version/version.go @@ -9,6 +9,7 @@ import ( "runtime/debug" "strconv" "strings" + "sync" tailscaleroot "tailscale.com" "tailscale.com/types/lazy" @@ -117,7 +118,7 @@ func (i embeddedInfo) commitAbbrev() string { return i.commit } -var getEmbeddedInfo = lazy.SyncFunc(func() embeddedInfo { +var getEmbeddedInfo = sync.OnceValue(func() embeddedInfo { bi, ok := debug.ReadBuildInfo() if !ok { return embeddedInfo{} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d3075f55d..6a49f091e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -17,6 +17,7 @@ import ( "net/netip" "reflect" "runtime" + "slices" "strconv" "strings" "sync" @@ -59,7 +60,6 @@ import ( "tailscale.com/util/ringbuffer" "tailscale.com/util/set" "tailscale.com/util/testenv" - "tailscale.com/util/uniq" "tailscale.com/util/usermetric" "tailscale.com/wgengine/capture" "tailscale.com/wgengine/wgint" @@ -2666,7 +2666,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur } ports = append(ports, 0) // Remove duplicates. (All duplicates are consecutive.) - uniq.ModifySlice(&ports) + ports = slices.Compact(ports) if debugBindSocket() { c.logf("magicsock: bindSocket: candidate ports: %+v", ports) diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 7db07c685..308c3ede2 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -8,6 +8,7 @@ import ( "net/netip" "runtime" "strings" + "sync" "time" "github.com/gaissmai/bart" @@ -15,7 +16,6 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/tstun" "tailscale.com/types/ipproto" - "tailscale.com/types/lazy" "tailscale.com/util/mak" "tailscale.com/wgengine/filter" ) @@ -91,7 +91,7 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp var ( appleIPRange = netip.MustParsePrefix("17.0.0.0/8") - canonicalIPs = lazy.SyncFunc(func() (checkIPFunc func(netip.Addr) bool) { + canonicalIPs = sync.OnceValue(func() (checkIPFunc func(netip.Addr) bool) { // https://bgp.he.net/AS41231#_prefixes t := &bart.Table[bool]{} for _, s := range strings.Fields(` From 60d19fa00d93f756fe9244a6c6b7ceb806696477 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 12 Jan 2025 21:03:43 -0800 Subject: [PATCH 0313/1708] all: use Go 1.21's binary.NativeEndian We still use josharian/native (hi @josharian!) via netlink, but I also sent https://github.com/mdlayher/netlink/pull/220 Updates #8632 Change-Id: I2eedcb7facb36ec894aee7f152c8a1f56d7fc8ba Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- net/dns/nm.go | 4 ++-- util/cstruct/cstruct.go | 9 ++++----- util/linuxfw/nftables.go | 6 +++--- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index c28338caf..f3adfd47a 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,6 @@ require ( github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 github.com/jellydator/ttlcache/v3 v3.1.0 - github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 github.com/jsimonetti/rtnetlink v1.4.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/klauspost/compress v1.17.11 @@ -152,6 +151,7 @@ require ( github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect + github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect diff --git a/net/dns/nm.go b/net/dns/nm.go index adb33cdb7..ef07a90d8 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -7,6 +7,7 @@ package dns import ( "context" + "encoding/binary" "fmt" "net" "net/netip" @@ -14,7 +15,6 @@ import ( "time" "github.com/godbus/dbus/v5" - "github.com/josharian/native" "tailscale.com/net/tsaddr" "tailscale.com/util/dnsname" ) @@ -137,7 +137,7 @@ func (m *nmManager) trySet(ctx context.Context, config OSConfig) error { for _, ip := range config.Nameservers { b := ip.As16() if ip.Is4() { - dnsv4 = append(dnsv4, native.Endian.Uint32(b[12:])) + dnsv4 = append(dnsv4, binary.NativeEndian.Uint32(b[12:])) } else { dnsv6 = append(dnsv6, b[:]) } diff --git a/util/cstruct/cstruct.go b/util/cstruct/cstruct.go index 464dc5dc3..4d1d0a98b 100644 --- a/util/cstruct/cstruct.go +++ b/util/cstruct/cstruct.go @@ -6,10 +6,9 @@ package cstruct import ( + "encoding/binary" "errors" "io" - - "github.com/josharian/native" ) // Size of a pointer-typed value, in bits @@ -120,7 +119,7 @@ func (d *Decoder) Uint16() uint16 { d.err = err return 0 } - return native.Endian.Uint16(d.dbuf[0:2]) + return binary.NativeEndian.Uint16(d.dbuf[0:2]) } // Uint32 returns a uint32 decoded from the buffer. @@ -133,7 +132,7 @@ func (d *Decoder) Uint32() uint32 { d.err = err return 0 } - return native.Endian.Uint32(d.dbuf[0:4]) + return binary.NativeEndian.Uint32(d.dbuf[0:4]) } // Uint64 returns a uint64 decoded from the buffer. @@ -146,7 +145,7 @@ func (d *Decoder) Uint64() uint64 { d.err = err return 0 } - return native.Endian.Uint64(d.dbuf[0:8]) + return binary.NativeEndian.Uint64(d.dbuf[0:8]) } // Uintptr returns a uintptr decoded from the buffer. diff --git a/util/linuxfw/nftables.go b/util/linuxfw/nftables.go index 056563071..e8b267b5e 100644 --- a/util/linuxfw/nftables.go +++ b/util/linuxfw/nftables.go @@ -8,6 +8,7 @@ package linuxfw import ( "cmp" + "encoding/binary" "fmt" "sort" "strings" @@ -15,7 +16,6 @@ import ( "github.com/google/nftables" "github.com/google/nftables/expr" "github.com/google/nftables/xt" - "github.com/josharian/native" "golang.org/x/sys/unix" "tailscale.com/types/logger" ) @@ -235,8 +235,8 @@ func printMatchInfo(name string, info xt.InfoAny) string { break } - pkttype := int(native.Endian.Uint32(data[0:4])) - invert := int(native.Endian.Uint32(data[4:8])) + pkttype := int(binary.NativeEndian.Uint32(data[0:4])) + invert := int(binary.NativeEndian.Uint32(data[4:8])) var invertPrefix string if invert != 0 { invertPrefix = "!" From 377127c20ca5cb5660e7086d64bc1fcb3e9da9d8 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 13 Jan 2025 10:02:26 -0800 Subject: [PATCH 0314/1708] Revert "Dockerfile: bump base alpine image (#14604)" (#14620) This reverts commit 5fdb4f83ad23f0ee7a9dc08ecc2a0ceeabd81fc3. Signed-off-by: Andrew Lytvynov --- ALPINE.txt | 2 +- Dockerfile | 2 +- Dockerfile.base | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ALPINE.txt b/ALPINE.txt index f29702326..55b698c77 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.21 \ No newline at end of file +3.18 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 7a5dbce5a..4ad3d88d9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,7 +62,7 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.21 +FROM alpine:3.18 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ diff --git a/Dockerfile.base b/Dockerfile.base index 5186746a4..eb4f0a02a 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,5 +1,5 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.21 +FROM alpine:3.18 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils From 6ccde369ffa0aa381fdaf6a735f3e3bbaba179b9 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Thu, 9 Jan 2025 11:50:11 -0600 Subject: [PATCH 0315/1708] prober: record total bytes transferred in DERP bandwidth probes This will enable Prometheus queries to look at the bandwidth over time windows, for example 'increase(derp_bw_bytes_total)[1h] / increase(derp_bw_transfer_time_seconds_total)[1h]'. Fixes commit a51672cafd8b6c4e87915a55bda1491eb7cbee84. Updates tailscale/corp#25503 Signed-off-by: Percy Wegmann --- prober/derp.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index f405549ff..870460d96 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -301,13 +301,14 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { derpPath = "single" } var transferTimeSeconds expvar.Float + var totalBytesTransferred expvar.Float return ProbeClass{ Probe: func(ctx context.Context) error { fromN, toN, err := d.getNodePair(from, to) if err != nil { return err } - return derpProbeBandwidth(ctx, d.lastDERPMap, fromN, toN, size, &transferTimeSeconds, d.bwTUNIPv4Prefix) + return derpProbeBandwidth(ctx, d.lastDERPMap, fromN, toN, size, &transferTimeSeconds, &totalBytesTransferred, d.bwTUNIPv4Prefix) }, Class: "derp_bw", Labels: Labels{ @@ -315,11 +316,15 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { "tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil), }, Metrics: func(l prometheus.Labels) []prometheus.Metric { - return []prometheus.Metric{ + metrics := []prometheus.Metric{ prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, float64(size)), } + if d.bwTUNIPv4Prefix != nil { + // For TCP-in-TCP probes, also record cumulative bytes transferred. + metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, totalBytesTransferred.Value())) + } + return metrics }, } } @@ -655,7 +660,7 @@ func derpProbeUDP(ctx context.Context, ipStr string, port int) error { // DERP clients connected to two DERP servers.If tunIPv4Address is specified, // probes will use a TCP connection over a TUN device at this address in order // to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP. -func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, size int64, transferTimeSeconds *expvar.Float, tunIPv4Prefix *netip.Prefix) (err error) { +func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, size int64, transferTimeSeconds, totalBytesTransferred *expvar.Float, tunIPv4Prefix *netip.Prefix) (err error) { // This probe uses clients with isProber=false to avoid spamming the derper logs with every packet // sent by the bandwidth probe. fromc, err := newConn(ctx, dm, from, false) @@ -677,7 +682,7 @@ func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tail } if tunIPv4Prefix != nil { - err = derpProbeBandwidthTUN(ctx, transferTimeSeconds, from, to, fromc, toc, size, tunIPv4Prefix) + err = derpProbeBandwidthTUN(ctx, transferTimeSeconds, totalBytesTransferred, from, to, fromc, toc, size, tunIPv4Prefix) } else { err = derpProbeBandwidthDirect(ctx, transferTimeSeconds, from, to, fromc, toc, size) } @@ -848,7 +853,7 @@ var derpProbeBandwidthTUNMu sync.Mutex // to another over a TUN device at an address at the start of the usable host IP // range that the given tunAddress lives in. The time taken to finish the transfer // is recorded in `transferTimeSeconds`. -func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds *expvar.Float, from, to *tailcfg.DERPNode, fromc, toc *derphttp.Client, size int64, prefix *netip.Prefix) error { +func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesTransferred *expvar.Float, from, to *tailcfg.DERPNode, fromc, toc *derphttp.Client, size int64, prefix *netip.Prefix) error { // Make sure all goroutines have finished. var wg sync.WaitGroup defer wg.Wait() @@ -1046,9 +1051,10 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds *expvar.Floa readFinishedC <- fmt.Errorf("unable to set read deadline: %w", err) } } - _, err = io.CopyN(io.Discard, readConn, size) - // Measure transfer time irrespective of whether it succeeded or failed. + n, err := io.CopyN(io.Discard, readConn, size) + // Measure transfer time and bytes transferred irrespective of whether it succeeded or failed. transferTimeSeconds.Add(time.Since(start).Seconds()) + totalBytesTransferred.Add(float64(n)) readFinishedC <- err }() From 64ab0ddff14cfcae55b66017ba104afd3640e422 Mon Sep 17 00:00:00 2001 From: Michael Stapelberg Date: Sun, 12 Jan 2025 09:57:38 +0100 Subject: [PATCH 0316/1708] cmd/tailscale/cli: only exit silently if len(args) == 0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This amends commit b7e48058c8d243adf1ff687e3e92d3fb02b035ea. That commit broke all documented ways of starting Tailscale on gokrazy: https://gokrazy.org/packages/tailscale/ — both Option A (tailscale up) and Option B (tailscale up --auth-key) rely on the tailscale CLI working. I verified that the tailscale CLI just prints it help when started without arguments, i.e. it does not stay running and is not restarted. I verified that the tailscale CLI successfully exits when started with tailscale up --auth-key, regardless of whether the node has joined the tailnet yet or not. I verified that the tailscale CLI successfully waits and exits when started with tailscale up, as expected. fixes https://github.com/gokrazy/gokrazy/issues/286 Signed-off-by: Michael Stapelberg --- cmd/tailscale/cli/cli.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 66961b2e0..542a2e464 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -84,9 +84,9 @@ var localClient = tailscale.LocalClient{ // Run runs the CLI. The args do not include the binary name. func Run(args []string) (err error) { - if runtime.GOOS == "linux" && os.Getenv("GOKRAZY_FIRST_START") == "1" && distro.Get() == distro.Gokrazy && os.Getppid() == 1 { - // We're running on gokrazy and it's the first start. - // Don't run the tailscale CLI as a service; just exit. + if runtime.GOOS == "linux" && os.Getenv("GOKRAZY_FIRST_START") == "1" && distro.Get() == distro.Gokrazy && os.Getppid() == 1 && len(args) == 0 { + // We're running on gokrazy and the user did not specify 'up'. + // Don't run the tailscale CLI and spam logs with usage; just exit. // See https://gokrazy.org/development/process-interface/ os.Exit(0) } From e4385f1c022dcce6809758c1bf0c06001a69c5dd Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 14 Jan 2025 12:12:05 +0000 Subject: [PATCH 0317/1708] cmd/tailscale/cli: add --posture-checking to tailscale up This will prevent `tailscale up` from resetting the posture checking client pref. Fixes #12154 Signed-off-by: Anton Tolchanov --- cmd/tailscale/cli/cli_test.go | 14 ++++++++++++++ cmd/tailscale/cli/up.go | 5 +++++ 2 files changed, 19 insertions(+) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 0444e914c..dccb69876 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -601,6 +601,19 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { goos: "linux", want: "", }, + { + name: "losing_posture_checking", + flags: []string{"--accept-dns"}, + curPrefs: &ipn.Prefs{ + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + CorpDNS: true, + PostureChecking: true, + NetfilterMode: preftype.NetfilterOn, + NoStatefulFiltering: opt.NewBool(true), + }, + want: accidentalUpPrefix + " --accept-dns --posture-checking", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1045,6 +1058,7 @@ func TestUpdatePrefs(t *testing.T) { NoSNATSet: true, NoStatefulFilteringSet: true, OperatorUserSet: true, + PostureCheckingSet: true, RouteAllSet: true, RunSSHSet: true, ShieldsUpSet: true, diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index b907257cf..4af264d73 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -116,6 +116,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.StringVar(&upArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") upf.BoolVar(&upArgs.advertiseConnector, "advertise-connector", false, "advertise this node as an app connector") upf.BoolVar(&upArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet") + upf.BoolVar(&upArgs.postureChecking, "posture-checking", false, hidden+"allow management plane to gather device posture information") if safesocket.GOOSUsesPeerCreds(goos) { upf.StringVar(&upArgs.opUser, "operator", "", "Unix username to allow to operate on tailscaled without sudo") @@ -194,6 +195,7 @@ type upArgsT struct { timeout time.Duration acceptedRisks string profileName string + postureChecking bool } func (a upArgsT) getAuthKey() (string, error) { @@ -304,6 +306,7 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo prefs.OperatorUser = upArgs.opUser prefs.ProfileName = upArgs.profileName prefs.AppConnector.Advertise = upArgs.advertiseConnector + prefs.PostureChecking = upArgs.postureChecking if goos == "linux" { prefs.NoSNAT = !upArgs.snat @@ -1053,6 +1056,8 @@ func prefsToFlags(env upCheckEnv, prefs *ipn.Prefs) (flagVal map[string]any) { set(prefs.NetfilterMode.String()) case "unattended": set(prefs.ForceDaemon) + case "posture-checking": + set(prefs.PostureChecking) } }) return ret From da9965d51ca89a393bbf4c52818a579320dca93c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 8 Jan 2025 17:21:44 -0600 Subject: [PATCH 0318/1708] cmd/viewer,types/views,various: avoid allocations in pointer field getters whenever possible In this PR, we add a generic views.ValuePointer type that can be used as a view for pointers to basic types and struct types that do not require deep cloning and do not have corresponding view types. Its Get/GetOk methods return stack-allocated shallow copies of the underlying value. We then update the cmd/viewer codegen to produce getters that return either concrete views when available or ValuePointer views when not, for pointer fields in generated view types. This allows us to avoid unnecessary allocations compared to returning pointers to newly allocated shallow copies. Updates #14570 Signed-off-by: Nick Khyl --- cmd/tsconnect/wasm/wasm_js.go | 2 +- cmd/viewer/tests/tests.go | 9 +++- cmd/viewer/tests/tests_clone.go | 4 ++ cmd/viewer/tests/tests_view.go | 45 +++++-------------- cmd/viewer/viewer.go | 72 ++++++++++++++++++++++++----- control/controlclient/map.go | 14 +++--- ipn/ipnlocal/drive.go | 3 +- ipn/ipnlocal/expiry_test.go | 8 ++-- ipn/ipnlocal/local.go | 39 +++++++--------- ipn/ipnlocal/peerapi.go | 8 ++-- tailcfg/tailcfg_view.go | 75 +++++++------------------------ types/prefs/prefs_view_test.go | 11 +---- types/views/views.go | 80 +++++++++++++++++++++++++++++++++ wgengine/pendopen.go | 8 ++-- wgengine/wgcfg/nmcfg/nmcfg.go | 4 +- 15 files changed, 219 insertions(+), 163 deletions(-) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 4ea1cd897..a7e3e506b 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -282,7 +282,7 @@ func (i *jsIPN) run(jsCallbacks js.Value) { MachineKey: p.Machine().String(), NodeKey: p.Key().String(), }, - Online: p.Online(), + Online: p.Online().Clone(), TailscaleSSHEnabled: p.Hostinfo().TailscaleSSHEnabled(), } }), diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index 14a488861..ac094c53b 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -37,9 +37,14 @@ type Map struct { StructWithPtrKey map[StructWithPtrs]int `json:"-"` } +type StructWithNoView struct { + Value int +} + type StructWithPtrs struct { - Value *StructWithoutPtrs - Int *int + Value *StructWithoutPtrs + Int *int + NoView *StructWithNoView NoCloneValue *StructWithoutPtrs `codegen:"noclone"` } diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index 9131f5040..106a9b684 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -28,6 +28,9 @@ func (src *StructWithPtrs) Clone() *StructWithPtrs { if dst.Int != nil { dst.Int = ptr.To(*src.Int) } + if dst.NoView != nil { + dst.NoView = ptr.To(*src.NoView) + } return dst } @@ -35,6 +38,7 @@ func (src *StructWithPtrs) Clone() *StructWithPtrs { var _StructWithPtrsCloneNeedsRegeneration = StructWithPtrs(struct { Value *StructWithoutPtrs Int *int + NoView *StructWithNoView NoCloneValue *StructWithoutPtrs }{}) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index 9c74c9426..41c1338ff 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -61,20 +61,11 @@ func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { return nil } -func (v StructWithPtrsView) Value() *StructWithoutPtrs { - if v.ж.Value == nil { - return nil - } - x := *v.ж.Value - return &x -} +func (v StructWithPtrsView) Value() StructWithoutPtrsView { return v.ж.Value.View() } +func (v StructWithPtrsView) Int() views.ValuePointer[int] { return views.ValuePointerOf(v.ж.Int) } -func (v StructWithPtrsView) Int() *int { - if v.ж.Int == nil { - return nil - } - x := *v.ж.Int - return &x +func (v StructWithPtrsView) NoView() views.ValuePointer[StructWithNoView] { + return views.ValuePointerOf(v.ж.NoView) } func (v StructWithPtrsView) NoCloneValue() *StructWithoutPtrs { return v.ж.NoCloneValue } @@ -85,6 +76,7 @@ func (v StructWithPtrsView) Equal(v2 StructWithPtrsView) bool { return v.ж.Equa var _StructWithPtrsViewNeedsRegeneration = StructWithPtrs(struct { Value *StructWithoutPtrs Int *int + NoView *StructWithNoView NoCloneValue *StructWithoutPtrs }{}) @@ -424,12 +416,8 @@ func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { } func (v GenericIntStructView[T]) Value() T { return v.ж.Value } -func (v GenericIntStructView[T]) Pointer() *T { - if v.ж.Pointer == nil { - return nil - } - x := *v.ж.Pointer - return &x +func (v GenericIntStructView[T]) Pointer() views.ValuePointer[T] { + return views.ValuePointerOf(v.ж.Pointer) } func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } @@ -500,12 +488,8 @@ func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { } func (v GenericNoPtrsStructView[T]) Value() T { return v.ж.Value } -func (v GenericNoPtrsStructView[T]) Pointer() *T { - if v.ж.Pointer == nil { - return nil - } - x := *v.ж.Pointer - return &x +func (v GenericNoPtrsStructView[T]) Pointer() views.ValuePointer[T] { + return views.ValuePointerOf(v.ж.Pointer) } func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } @@ -722,19 +706,14 @@ func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { return nil } -func (v StructWithTypeAliasFieldsView) WithPtr() StructWithPtrsView { return v.ж.WithPtr.View() } +func (v StructWithTypeAliasFieldsView) WithPtr() StructWithPtrsAliasView { return v.ж.WithPtr.View() } func (v StructWithTypeAliasFieldsView) WithoutPtr() StructWithoutPtrsAlias { return v.ж.WithoutPtr } func (v StructWithTypeAliasFieldsView) WithPtrByPtr() StructWithPtrsAliasView { return v.ж.WithPtrByPtr.View() } -func (v StructWithTypeAliasFieldsView) WithoutPtrByPtr() *StructWithoutPtrsAlias { - if v.ж.WithoutPtrByPtr == nil { - return nil - } - x := *v.ж.WithoutPtrByPtr - return &x +func (v StructWithTypeAliasFieldsView) WithoutPtrByPtr() StructWithoutPtrsAliasView { + return v.ж.WithoutPtrByPtr.View() } - func (v StructWithTypeAliasFieldsView) SliceWithPtrs() views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView] { return views.SliceOfViews[*StructWithPtrsAlias, StructWithPtrsAliasView](v.ж.SliceWithPtrs) } diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 0c5868f3a..e265defe0 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -79,13 +79,7 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { {{end}} {{define "makeViewField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldViewName}} { return {{.MakeViewFnName}}(&v.ж.{{.FieldName}}) } {{end}} -{{define "valuePointerField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldType}} { - if v.ж.{{.FieldName}} == nil { - return nil - } - x := *v.ж.{{.FieldName}} - return &x -} +{{define "valuePointerField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.ValuePointer[{{.FieldType}}] { return views.ValuePointerOf(v.ж.{{.FieldName}}) } {{end}} {{define "mapField"}} @@ -126,7 +120,7 @@ func requiresCloning(t types.Type) (shallow, deep bool, base types.Type) { return p, p, t } -func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thisPkg *types.Package) { +func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ *types.Package) { t, ok := typ.Underlying().(*types.Struct) if !ok || codegen.IsViewType(t) { return @@ -354,10 +348,32 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi } else { writeTemplate("unsupportedField") } - } else { - args.FieldType = it.QualifiedName(ptr) - writeTemplate("valuePointerField") + continue + } + + // If a view type is already defined for the base type, use it as the field's view type. + if viewType := viewTypeForValueType(base); viewType != nil { + args.FieldType = it.QualifiedName(base) + args.FieldViewName = it.QualifiedName(viewType) + writeTemplate("viewField") + continue + } + + // Otherwise, if the unaliased base type is a named type whose view type will be generated by this viewer invocation, + // append the "View" suffix to the unaliased base type name and use it as the field's view type. + if base, ok := types.Unalias(base).(*types.Named); ok && slices.Contains(typeNames, it.QualifiedName(base)) { + baseTypeName := it.QualifiedName(base) + args.FieldType = baseTypeName + args.FieldViewName = appendNameSuffix(args.FieldType, "View") + writeTemplate("viewField") + continue } + + // Otherwise, if the base type does not require deep cloning, has no existing view type, + // and will not have a generated view type, use views.ValuePointer[T] as the field's view type. + // Its Get/GetOk methods return stack-allocated shallow copies of the field's value. + args.FieldType = it.QualifiedName(base) + writeTemplate("valuePointerField") continue case *types.Interface: // If fieldType is an interface with a "View() {ViewType}" method, it can be used to clone the field. @@ -405,6 +421,33 @@ func appendNameSuffix(name, suffix string) string { return name + suffix } +func typeNameOf(typ types.Type) (name *types.TypeName, ok bool) { + switch t := typ.(type) { + case *types.Alias: + return t.Obj(), true + case *types.Named: + return t.Obj(), true + default: + return nil, false + } +} + +func lookupViewType(typ types.Type) types.Type { + for { + if typeName, ok := typeNameOf(typ); ok && typeName.Pkg() != nil { + if viewTypeObj := typeName.Pkg().Scope().Lookup(typeName.Name() + "View"); viewTypeObj != nil { + return viewTypeObj.Type() + } + } + switch alias := typ.(type) { + case *types.Alias: + typ = alias.Rhs() + default: + return nil + } + } +} + func viewTypeForValueType(typ types.Type) types.Type { if ptr, ok := typ.(*types.Pointer); ok { return viewTypeForValueType(ptr.Elem()) @@ -417,7 +460,12 @@ func viewTypeForValueType(typ types.Type) types.Type { if !ok || sig.Results().Len() != 1 { return nil } - return sig.Results().At(0).Type() + viewType := sig.Results().At(0).Type() + // Check if the typ's package defines an alias for the view type, and use it if so. + if viewTypeAlias, ok := lookupViewType(typ).(*types.Alias); ok && types.AssignableTo(viewType, viewTypeAlias) { + viewType = viewTypeAlias + } + return viewType } func viewTypeForContainerType(typ types.Type) (*types.Named, *types.Func) { diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 97d49f90d..30c1da672 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -689,13 +689,11 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang return nil, false } case "Online": - wasOnline := was.Online() - if n.Online != nil && wasOnline != nil && *n.Online != *wasOnline { + if wasOnline, ok := was.Online().GetOk(); ok && n.Online != nil && *n.Online != wasOnline { pc().Online = ptr.To(*n.Online) } case "LastSeen": - wasSeen := was.LastSeen() - if n.LastSeen != nil && wasSeen != nil && !wasSeen.Equal(*n.LastSeen) { + if wasSeen, ok := was.LastSeen().GetOk(); ok && n.LastSeen != nil && !wasSeen.Equal(*n.LastSeen) { pc().LastSeen = ptr.To(*n.LastSeen) } case "MachineAuthorized": @@ -720,18 +718,18 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "SelfNodeV4MasqAddrForThisPeer": va, vb := was.SelfNodeV4MasqAddrForThisPeer(), n.SelfNodeV4MasqAddrForThisPeer - if va == nil && vb == nil { + if !va.Valid() && vb == nil { continue } - if va == nil || vb == nil || *va != *vb { + if va, ok := va.GetOk(); !ok || vb == nil || va != *vb { return nil, false } case "SelfNodeV6MasqAddrForThisPeer": va, vb := was.SelfNodeV6MasqAddrForThisPeer(), n.SelfNodeV6MasqAddrForThisPeer - if va == nil && vb == nil { + if !va.Valid() && vb == nil { continue } - if va == nil || vb == nil || *va != *vb { + if va, ok := va.GetOk(); !ok || vb == nil || va != *vb { return nil, false } case "ExitNodeDNSResolvers": diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index fe3622ba4..8ae813ff2 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -347,8 +347,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // TODO(oxtoacart): for some reason, this correctly // catches when a node goes from offline to online, // but not the other way around... - online := peer.Online() - if online == nil || !*online { + if !peer.Online().Get() { return false } diff --git a/ipn/ipnlocal/expiry_test.go b/ipn/ipnlocal/expiry_test.go index af1aa337b..a2b10fe32 100644 --- a/ipn/ipnlocal/expiry_test.go +++ b/ipn/ipnlocal/expiry_test.go @@ -283,11 +283,11 @@ func formatNodes(nodes []tailcfg.NodeView) string { } fmt.Fprintf(&sb, "(%d, %q", n.ID(), n.Name()) - if n.Online() != nil { - fmt.Fprintf(&sb, ", online=%v", *n.Online()) + if online, ok := n.Online().GetOk(); ok { + fmt.Fprintf(&sb, ", online=%v", online) } - if n.LastSeen() != nil { - fmt.Fprintf(&sb, ", lastSeen=%v", n.LastSeen().Unix()) + if lastSeen, ok := n.LastSeen().GetOk(); ok { + fmt.Fprintf(&sb, ", lastSeen=%v", lastSeen.Unix()) } if n.Key() != (key.NodePublic{}) { fmt.Fprintf(&sb, ", key=%v", n.Key().String()) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3a2a22c58..4ebcd5d6d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1117,13 +1117,9 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { } if !prefs.ExitNodeID().IsZero() { if exitPeer, ok := b.netMap.PeerWithStableID(prefs.ExitNodeID()); ok { - online := false - if v := exitPeer.Online(); v != nil { - online = *v - } s.ExitNodeStatus = &ipnstate.ExitNodeStatus{ ID: prefs.ExitNodeID(), - Online: online, + Online: exitPeer.Online().Get(), TailscaleIPs: exitPeer.Addresses().AsSlice(), } } @@ -1194,10 +1190,6 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { } exitNodeID := b.pm.CurrentPrefs().ExitNodeID() for _, p := range b.peers { - var lastSeen time.Time - if p.LastSeen() != nil { - lastSeen = *p.LastSeen() - } tailscaleIPs := make([]netip.Addr, 0, p.Addresses().Len()) for i := range p.Addresses().Len() { addr := p.Addresses().At(i) @@ -1205,7 +1197,6 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { tailscaleIPs = append(tailscaleIPs, addr.Addr()) } } - online := p.Online() ps := &ipnstate.PeerStatus{ InNetworkMap: true, UserID: p.User(), @@ -1214,12 +1205,12 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { HostName: p.Hostinfo().Hostname(), DNSName: p.Name(), OS: p.Hostinfo().OS(), - LastSeen: lastSeen, - Online: online != nil && *online, + LastSeen: p.LastSeen().Get(), + Online: p.Online().Get(), ShareeNode: p.Hostinfo().ShareeNode(), ExitNode: p.StableID() != "" && p.StableID() == exitNodeID, SSH_HostKeys: p.Hostinfo().SSH_HostKeys().AsSlice(), - Location: p.Hostinfo().Location(), + Location: p.Hostinfo().Location().AsStruct(), Capabilities: p.Capabilities().AsSlice(), } if cm := p.CapMap(); cm.Len() > 0 { @@ -7369,8 +7360,8 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug if len(candidates) == 1 { peer := candidates[0] if hi := peer.Hostinfo(); hi.Valid() { - if loc := hi.Location(); loc != nil { - res.Location = loc.View() + if loc := hi.Location(); loc.Valid() { + res.Location = loc } } res.ID = peer.StableID() @@ -7414,10 +7405,10 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug continue } loc := hi.Location() - if loc == nil { + if !loc.Valid() { continue } - distance := longLatDistance(preferredDERP.Latitude, preferredDERP.Longitude, loc.Latitude, loc.Longitude) + distance := longLatDistance(preferredDERP.Latitude, preferredDERP.Longitude, loc.Latitude(), loc.Longitude()) if distance < minDistance { minDistance = distance } @@ -7438,8 +7429,8 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug res.ID = chosen.StableID() res.Name = chosen.Name() if hi := chosen.Hostinfo(); hi.Valid() { - if loc := hi.Location(); loc != nil { - res.Location = loc.View() + if loc := hi.Location(); loc.Valid() { + res.Location = loc } } return res, nil @@ -7468,8 +7459,8 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug res.ID = chosen.StableID() res.Name = chosen.Name() if hi := chosen.Hostinfo(); hi.Valid() { - if loc := hi.Location(); loc != nil { - res.Location = loc.View() + if loc := hi.Location(); loc.Valid() { + res.Location = loc } } return res, nil @@ -7485,13 +7476,13 @@ func pickWeighted(candidates []tailcfg.NodeView) []tailcfg.NodeView { continue } loc := hi.Location() - if loc == nil || loc.Priority < maxWeight { + if !loc.Valid() || loc.Priority() < maxWeight { continue } - if maxWeight != loc.Priority { + if maxWeight != loc.Priority() { best = best[:0] } - maxWeight = loc.Priority + maxWeight = loc.Priority() best = append(best, c) } return best diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index aa18c3588..7aa677640 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -233,11 +233,11 @@ func (h *peerAPIHandler) logf(format string, a ...any) { // isAddressValid reports whether addr is a valid destination address for this // node originating from the peer. func (h *peerAPIHandler) isAddressValid(addr netip.Addr) bool { - if v := h.peerNode.SelfNodeV4MasqAddrForThisPeer(); v != nil { - return *v == addr + if v, ok := h.peerNode.SelfNodeV4MasqAddrForThisPeer().GetOk(); ok { + return v == addr } - if v := h.peerNode.SelfNodeV6MasqAddrForThisPeer(); v != nil { - return *v == addr + if v, ok := h.peerNode.SelfNodeV6MasqAddrForThisPeer().GetOk(); ok { + return v == addr } pfx := netip.PrefixFrom(addr, addr.BitLen()) return views.SliceContains(h.selfNode.Addresses(), pfx) diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 774a18258..53df3dcef 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -145,21 +145,11 @@ func (v NodeView) Created() time.Time { return v.ж.Create func (v NodeView) Cap() CapabilityVersion { return v.ж.Cap } func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } func (v NodeView) PrimaryRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.PrimaryRoutes) } -func (v NodeView) LastSeen() *time.Time { - if v.ж.LastSeen == nil { - return nil - } - x := *v.ж.LastSeen - return &x +func (v NodeView) LastSeen() views.ValuePointer[time.Time] { + return views.ValuePointerOf(v.ж.LastSeen) } -func (v NodeView) Online() *bool { - if v.ж.Online == nil { - return nil - } - x := *v.ж.Online - return &x -} +func (v NodeView) Online() views.ValuePointer[bool] { return views.ValuePointerOf(v.ж.Online) } func (v NodeView) MachineAuthorized() bool { return v.ж.MachineAuthorized } func (v NodeView) Capabilities() views.Slice[NodeCapability] { return views.SliceOf(v.ж.Capabilities) } @@ -172,20 +162,12 @@ func (v NodeView) ComputedName() string { return v.ж.ComputedName } func (v NodeView) ComputedNameWithHost() string { return v.ж.ComputedNameWithHost } func (v NodeView) DataPlaneAuditLogID() string { return v.ж.DataPlaneAuditLogID } func (v NodeView) Expired() bool { return v.ж.Expired } -func (v NodeView) SelfNodeV4MasqAddrForThisPeer() *netip.Addr { - if v.ж.SelfNodeV4MasqAddrForThisPeer == nil { - return nil - } - x := *v.ж.SelfNodeV4MasqAddrForThisPeer - return &x +func (v NodeView) SelfNodeV4MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { + return views.ValuePointerOf(v.ж.SelfNodeV4MasqAddrForThisPeer) } -func (v NodeView) SelfNodeV6MasqAddrForThisPeer() *netip.Addr { - if v.ж.SelfNodeV6MasqAddrForThisPeer == nil { - return nil - } - x := *v.ж.SelfNodeV6MasqAddrForThisPeer - return &x +func (v NodeView) SelfNodeV6MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { + return views.ValuePointerOf(v.ж.SelfNodeV6MasqAddrForThisPeer) } func (v NodeView) IsWireGuardOnly() bool { return v.ж.IsWireGuardOnly } @@ -315,15 +297,8 @@ func (v HostinfoView) Userspace() opt.Bool { return v.ж.User func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } -func (v HostinfoView) Location() *Location { - if v.ж.Location == nil { - return nil - } - x := *v.ж.Location - return &x -} - -func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } +func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } +func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HostinfoViewNeedsRegeneration = Hostinfo(struct { @@ -699,12 +674,8 @@ func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { return nil } -func (v RegisterResponseAuthView) Oauth2Token() *Oauth2Token { - if v.ж.Oauth2Token == nil { - return nil - } - x := *v.ж.Oauth2Token - return &x +func (v RegisterResponseAuthView) Oauth2Token() views.ValuePointer[Oauth2Token] { + return views.ValuePointerOf(v.ж.Oauth2Token) } func (v RegisterResponseAuthView) AuthKey() string { return v.ж.AuthKey } @@ -774,12 +745,8 @@ func (v RegisterRequestView) NodeKeySignature() views.ByteSlice[tkatype.Marshale return views.ByteSliceOf(v.ж.NodeKeySignature) } func (v RegisterRequestView) SignatureType() SignatureType { return v.ж.SignatureType } -func (v RegisterRequestView) Timestamp() *time.Time { - if v.ж.Timestamp == nil { - return nil - } - x := *v.ж.Timestamp - return &x +func (v RegisterRequestView) Timestamp() views.ValuePointer[time.Time] { + return views.ValuePointerOf(v.ж.Timestamp) } func (v RegisterRequestView) DeviceCert() views.ByteSlice[[]byte] { @@ -1110,12 +1077,8 @@ func (v *SSHRuleView) UnmarshalJSON(b []byte) error { return nil } -func (v SSHRuleView) RuleExpires() *time.Time { - if v.ж.RuleExpires == nil { - return nil - } - x := *v.ж.RuleExpires - return &x +func (v SSHRuleView) RuleExpires() views.ValuePointer[time.Time] { + return views.ValuePointerOf(v.ж.RuleExpires) } func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalView] { @@ -1189,12 +1152,8 @@ func (v SSHActionView) HoldAndDelegate() string { return v.ж.Hol func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding } func (v SSHActionView) AllowRemotePortForwarding() bool { return v.ж.AllowRemotePortForwarding } func (v SSHActionView) Recorders() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Recorders) } -func (v SSHActionView) OnRecordingFailure() *SSHRecorderFailureAction { - if v.ж.OnRecordingFailure == nil { - return nil - } - x := *v.ж.OnRecordingFailure - return &x +func (v SSHActionView) OnRecordingFailure() views.ValuePointer[SSHRecorderFailureAction] { + return views.ValuePointerOf(v.ж.OnRecordingFailure) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index d76eebb43..ef9f09603 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -162,15 +162,8 @@ func (v *TestBundleView) UnmarshalJSON(b []byte) error { return nil } -func (v TestBundleView) Name() string { return v.ж.Name } -func (v TestBundleView) Nested() *TestValueStruct { - if v.ж.Nested == nil { - return nil - } - x := *v.ж.Nested - return &x -} - +func (v TestBundleView) Name() string { return v.ж.Name } +func (v TestBundleView) Nested() TestValueStructView { return v.ж.Nested.View() } func (v TestBundleView) Equal(v2 TestBundleView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. diff --git a/types/views/views.go b/types/views/views.go index 40d8811f5..d8acf27ce 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -16,6 +16,7 @@ import ( "slices" "go4.org/mem" + "tailscale.com/types/ptr" ) func unmarshalSliceFromJSON[T any](b []byte, x *[]T) error { @@ -690,6 +691,85 @@ func (m MapFn[K, T, V]) All() iter.Seq2[K, V] { } } +// ValuePointer provides a read-only view of a pointer to a value type, +// such as a primitive type or an immutable struct. Its Value and ValueOk +// methods return a stack-allocated shallow copy of the underlying value. +// It is the caller's responsibility to ensure that T +// is free from memory aliasing/mutation concerns. +type ValuePointer[T any] struct { + // ж is the underlying value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *T +} + +// Valid reports whether the underlying pointer is non-nil. +func (p ValuePointer[T]) Valid() bool { + return p.ж != nil +} + +// Get returns a shallow copy of the value if the underlying pointer is non-nil. +// Otherwise, it returns a zero value. +func (p ValuePointer[T]) Get() T { + v, _ := p.GetOk() + return v +} + +// GetOk returns a shallow copy of the underlying value and true if the underlying +// pointer is non-nil. Otherwise, it returns a zero value and false. +func (p ValuePointer[T]) GetOk() (value T, ok bool) { + if p.ж == nil { + return value, false // value holds a zero value + } + return *p.ж, true +} + +// GetOr returns a shallow copy of the underlying value if it is non-nil. +// Otherwise, it returns the provided default value. +func (p ValuePointer[T]) GetOr(def T) T { + if p.ж == nil { + return def + } + return *p.ж +} + +// Clone returns a shallow copy of the underlying value. +func (p ValuePointer[T]) Clone() *T { + if p.ж == nil { + return nil + } + return ptr.To(*p.ж) +} + +// String implements [fmt.Stringer]. +func (p ValuePointer[T]) String() string { + if p.ж == nil { + return "nil" + } + return fmt.Sprint(p.ж) +} + +// ValuePointerOf returns an immutable view of a pointer to an immutable value. +// It is the caller's responsibility to ensure that T +// is free from memory aliasing/mutation concerns. +func ValuePointerOf[T any](v *T) ValuePointer[T] { + return ValuePointer[T]{v} +} + +// MarshalJSON implements [json.Marshaler]. +func (p ValuePointer[T]) MarshalJSON() ([]byte, error) { + return json.Marshal(p.ж) +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (p *ValuePointer[T]) UnmarshalJSON(b []byte) error { + if p.ж != nil { + return errors.New("already initialized") + } + return json.Unmarshal(b, &p.ж) +} + // ContainsPointers reports whether T contains any pointers, // either explicitly or implicitly. // It has special handling for some types that contain pointers diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 308c3ede2..f8e9198a5 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -239,15 +239,15 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) { if n.IsWireGuardOnly() { online = "wg" } else { - if v := n.Online(); v != nil { - if *v { + if v, ok := n.Online().GetOk(); ok { + if v { online = "yes" } else { online = "no" } } - if n.LastSeen() != nil && online != "yes" { - online += fmt.Sprintf(", lastseen=%v", durFmt(*n.LastSeen())) + if lastSeen, ok := n.LastSeen().GetOk(); ok && online != "yes" { + online += fmt.Sprintf(", lastseen=%v", durFmt(lastSeen)) } } e.logf("open-conn-track: timeout opening %v to node %v; online=%v, lastRecv=%v", diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index e7d5edf15..97304aa41 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -106,8 +106,8 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, cpeer := &cfg.Peers[len(cfg.Peers)-1] didExitNodeWarn := false - cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer() - cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer() + cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer().Clone() + cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer().Clone() cpeer.IsJailed = peer.IsJailed() for _, allowedIP := range peer.AllowedIPs().All() { if allowedIP.Bits() == 0 && peer.StableID() != exitNode { From 414a01126a1c75564980d9077b1899f7e7956c9e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 14 Jan 2025 08:02:58 -0800 Subject: [PATCH 0319/1708] go.mod: bump mdlayher/netlink and u-root/uio to use Go 1.21 NativeEndian This finishes the work started in #14616. Updates #8632 Change-Id: I4dc07d45b1e00c3db32217c03b21b8b1ec19e782 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 3 +-- cmd/k8s-operator/depaware.txt | 3 +-- cmd/tailscale/depaware.txt | 3 +-- cmd/tailscaled/depaware.txt | 3 +-- go.mod | 5 ++--- go.sum | 14 ++++---------- 6 files changed, 10 insertions(+), 21 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 076074f25..d4b406d9d 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -28,7 +28,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/tka - L github.com/josharian/native from github.com/mdlayher/netlink+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ @@ -204,7 +203,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ - golang.org/x/sys/cpu from github.com/josharian/native+ + golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ LD golang.org/x/sys/unix from github.com/google/nftables+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 3489e5a60..f757cda18 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -147,7 +147,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm github.com/josharian/intern from github.com/mailru/easyjson/jlexer - L github.com/josharian/native from github.com/mdlayher/netlink+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink 💣 github.com/json-iterator/go from sigs.k8s.io/structured-merge-diff/v4/fieldpath+ @@ -877,7 +876,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ - golang.org/x/sys/cpu from github.com/josharian/native+ + golang.org/x/sys/cpu from github.com/tailscale/certstore+ LD golang.org/x/sys/unix from github.com/fsnotify/fsnotify+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index ff2de13c0..e894e0674 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -26,7 +26,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/josharian/native from github.com/mdlayher/netlink+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli @@ -219,7 +218,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ - golang.org/x/sys/cpu from github.com/josharian/native+ + golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ LD golang.org/x/sys/unix from github.com/google/nftables+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4dad47421..19254b616 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -118,7 +118,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 github.com/jellydator/ttlcache/v3 from tailscale.com/drive/driveimpl/compositedav L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/josharian/native from github.com/mdlayher/netlink+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -464,7 +463,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de D golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/jellydator/ttlcache/v3 - golang.org/x/sys/cpu from github.com/josharian/native+ + golang.org/x/sys/cpu from github.com/tailscale/certstore+ LD golang.org/x/sys/unix from github.com/google/nftables+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ diff --git a/go.mod b/go.mod index f3adfd47a..79374eb9c 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 github.com/mdlayher/genetlink v1.3.2 - github.com/mdlayher/netlink v1.7.2 + github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 github.com/mdlayher/sdnotify v1.0.0 github.com/miekg/dns v1.1.58 github.com/mitchellh/go-ps v1.0.0 @@ -151,7 +151,6 @@ require ( github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect - github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect @@ -371,7 +370,7 @@ require ( github.com/timonwong/loggercheck v0.9.4 // indirect github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e // indirect + github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.1.0 // indirect diff --git a/go.sum b/go.sum index be8f291a4..28315ad1e 100644 --- a/go.sum +++ b/go.sum @@ -594,9 +594,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= @@ -684,8 +681,8 @@ github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -760,7 +757,6 @@ github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeB github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= @@ -970,8 +966,8 @@ github.com/u-root/gobusybox/src v0.0.0-20231228173702-b69f654846aa h1:unMPGGK/CR github.com/u-root/gobusybox/src v0.0.0-20231228173702-b69f654846aa/go.mod h1:Zj4Tt22fJVn/nz/y6Ergm1SahR9dio1Zm/D2/S0TmXM= github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= @@ -1222,7 +1218,6 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1231,7 +1226,6 @@ golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= From cfda1ff70982f13594e1ae781e0464cb048fa931 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 14 Jan 2025 08:15:27 -0800 Subject: [PATCH 0320/1708] cmd/viewer,all: consistently use "read-only" instead of "readonly" Updates #cleanup Change-Id: I8e4e3497d3d0ec5b16a73aedda500fe5cfa37a67 Signed-off-by: Brad Fitzpatrick --- client/web/web.go | 14 ++-- cmd/viewer/tests/tests.go | 4 +- cmd/viewer/tests/tests_view.go | 44 +++++------ cmd/viewer/viewer.go | 6 +- drive/drive_view.go | 4 +- ipn/ipn_view.go | 24 +++--- ipn/ipnlocal/local.go | 2 +- tailcfg/tailcfg_view.go | 76 +++++++++---------- types/dnstype/dnstype_view.go | 4 +- types/persist/persist_view.go | 4 +- types/prefs/prefs.go | 4 +- .../prefs/prefs_example/prefs_example_view.go | 12 +-- types/prefs/prefs_view_test.go | 20 ++--- types/prefs/struct_map.go | 2 +- 14 files changed, 110 insertions(+), 110 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 1e338b735..4e4866923 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -89,8 +89,8 @@ type Server struct { type ServerMode string const ( - // LoginServerMode serves a readonly login client for logging a - // node into a tailnet, and viewing a readonly interface of the + // LoginServerMode serves a read-only login client for logging a + // node into a tailnet, and viewing a read-only interface of the // node's current Tailscale settings. // // In this mode, API calls are authenticated via platform auth. @@ -110,7 +110,7 @@ const ( // This mode restricts the app to only being assessible over Tailscale, // and API calls are authenticated via browser sessions associated with // the source's Tailscale identity. If the source browser does not have - // a valid session, a readonly version of the app is displayed. + // a valid session, a read-only version of the app is displayed. ManageServerMode ServerMode = "manage" ) @@ -695,16 +695,16 @@ func (s *Server) serveAPIAuth(w http.ResponseWriter, r *http.Request) { switch { case sErr != nil && errors.Is(sErr, errNotUsingTailscale): s.lc.IncrementCounter(r.Context(), "web_client_viewing_local", 1) - resp.Authorized = false // restricted to the readonly view + resp.Authorized = false // restricted to the read-only view case sErr != nil && errors.Is(sErr, errNotOwner): s.lc.IncrementCounter(r.Context(), "web_client_viewing_not_owner", 1) - resp.Authorized = false // restricted to the readonly view + resp.Authorized = false // restricted to the read-only view case sErr != nil && errors.Is(sErr, errTaggedLocalSource): s.lc.IncrementCounter(r.Context(), "web_client_viewing_local_tag", 1) - resp.Authorized = false // restricted to the readonly view + resp.Authorized = false // restricted to the read-only view case sErr != nil && errors.Is(sErr, errTaggedRemoteSource): s.lc.IncrementCounter(r.Context(), "web_client_viewing_remote_tag", 1) - resp.Authorized = false // restricted to the readonly view + resp.Authorized = false // restricted to the read-only view case sErr != nil && !errors.Is(sErr, errNoSession): // Any other error. http.Error(w, sErr.Error(), http.StatusInternalServerError) diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index ac094c53b..4020e5651 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -140,7 +140,7 @@ func (c *Container[T]) Clone() *Container[T] { panic(fmt.Errorf("%T contains pointers, but is not cloneable", c.Item)) } -// ContainerView is a pre-defined readonly view of a Container[T]. +// ContainerView is a pre-defined read-only view of a Container[T]. type ContainerView[T views.ViewCloner[T, V], V views.StructView[T]] struct { // ж is the underlying mutable value, named with a hard-to-type // character that looks pointy like a pointer. @@ -178,7 +178,7 @@ func (c *MapContainer[K, V]) Clone() *MapContainer[K, V] { return &MapContainer[K, V]{m} } -// MapContainerView is a pre-defined readonly view of a [MapContainer][K, T]. +// MapContainerView is a pre-defined read-only view of a [MapContainer][K, T]. type MapContainerView[K comparable, T views.ViewCloner[T, V], V views.StructView[T]] struct { // ж is the underlying mutable value, named with a hard-to-type // character that looks pointy like a pointer. diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index 41c1338ff..f1d8f424f 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -16,7 +16,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct -// View returns a readonly view of StructWithPtrs. +// View returns a read-only view of StructWithPtrs. func (p *StructWithPtrs) View() StructWithPtrsView { return StructWithPtrsView{ж: p} } @@ -32,7 +32,7 @@ type StructWithPtrsView struct { ж *StructWithPtrs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v StructWithPtrsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -80,7 +80,7 @@ var _StructWithPtrsViewNeedsRegeneration = StructWithPtrs(struct { NoCloneValue *StructWithoutPtrs }{}) -// View returns a readonly view of StructWithoutPtrs. +// View returns a read-only view of StructWithoutPtrs. func (p *StructWithoutPtrs) View() StructWithoutPtrsView { return StructWithoutPtrsView{ж: p} } @@ -96,7 +96,7 @@ type StructWithoutPtrsView struct { ж *StructWithoutPtrs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v StructWithoutPtrsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -134,7 +134,7 @@ var _StructWithoutPtrsViewNeedsRegeneration = StructWithoutPtrs(struct { Pfx netip.Prefix }{}) -// View returns a readonly view of Map. +// View returns a read-only view of Map. func (p *Map) View() MapView { return MapView{ж: p} } @@ -150,7 +150,7 @@ type MapView struct { ж *Map } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v MapView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -240,7 +240,7 @@ var _MapViewNeedsRegeneration = Map(struct { StructWithPtrKey map[StructWithPtrs]int }{}) -// View returns a readonly view of StructWithSlices. +// View returns a read-only view of StructWithSlices. func (p *StructWithSlices) View() StructWithSlicesView { return StructWithSlicesView{ж: p} } @@ -256,7 +256,7 @@ type StructWithSlicesView struct { ж *StructWithSlices } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v StructWithSlicesView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -314,7 +314,7 @@ var _StructWithSlicesViewNeedsRegeneration = StructWithSlices(struct { Ints []*int }{}) -// View returns a readonly view of StructWithEmbedded. +// View returns a read-only view of StructWithEmbedded. func (p *StructWithEmbedded) View() StructWithEmbeddedView { return StructWithEmbeddedView{ж: p} } @@ -330,7 +330,7 @@ type StructWithEmbeddedView struct { ж *StructWithEmbedded } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v StructWithEmbeddedView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -370,7 +370,7 @@ var _StructWithEmbeddedViewNeedsRegeneration = StructWithEmbedded(struct { StructWithSlices }{}) -// View returns a readonly view of GenericIntStruct. +// View returns a read-only view of GenericIntStruct. func (p *GenericIntStruct[T]) View() GenericIntStructView[T] { return GenericIntStructView[T]{ж: p} } @@ -386,7 +386,7 @@ type GenericIntStructView[T constraints.Integer] struct { ж *GenericIntStruct[T] } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v GenericIntStructView[T]) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -442,7 +442,7 @@ func _GenericIntStructViewNeedsRegeneration[T constraints.Integer](GenericIntStr }{}) } -// View returns a readonly view of GenericNoPtrsStruct. +// View returns a read-only view of GenericNoPtrsStruct. func (p *GenericNoPtrsStruct[T]) View() GenericNoPtrsStructView[T] { return GenericNoPtrsStructView[T]{ж: p} } @@ -458,7 +458,7 @@ type GenericNoPtrsStructView[T StructWithoutPtrs | netip.Prefix | BasicType] str ж *GenericNoPtrsStruct[T] } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v GenericNoPtrsStructView[T]) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -514,7 +514,7 @@ func _GenericNoPtrsStructViewNeedsRegeneration[T StructWithoutPtrs | netip.Prefi }{}) } -// View returns a readonly view of GenericCloneableStruct. +// View returns a read-only view of GenericCloneableStruct. func (p *GenericCloneableStruct[T, V]) View() GenericCloneableStructView[T, V] { return GenericCloneableStructView[T, V]{ж: p} } @@ -530,7 +530,7 @@ type GenericCloneableStructView[T views.ViewCloner[T, V], V views.StructView[T]] ж *GenericCloneableStruct[T, V] } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v GenericCloneableStructView[T, V]) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -589,7 +589,7 @@ func _GenericCloneableStructViewNeedsRegeneration[T views.ViewCloner[T, V], V vi }{}) } -// View returns a readonly view of StructWithContainers. +// View returns a read-only view of StructWithContainers. func (p *StructWithContainers) View() StructWithContainersView { return StructWithContainersView{ж: p} } @@ -605,7 +605,7 @@ type StructWithContainersView struct { ж *StructWithContainers } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v StructWithContainersView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -661,7 +661,7 @@ var _StructWithContainersViewNeedsRegeneration = StructWithContainers(struct { CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] }{}) -// View returns a readonly view of StructWithTypeAliasFields. +// View returns a read-only view of StructWithTypeAliasFields. func (p *StructWithTypeAliasFields) View() StructWithTypeAliasFieldsView { return StructWithTypeAliasFieldsView{ж: p} } @@ -677,7 +677,7 @@ type StructWithTypeAliasFieldsView struct { ж *StructWithTypeAliasFields } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v StructWithTypeAliasFieldsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -759,7 +759,7 @@ var _StructWithTypeAliasFieldsViewNeedsRegeneration = StructWithTypeAliasFields( MapOfSlicesWithoutPtrs map[string][]*StructWithoutPtrsAlias }{}) -// View returns a readonly view of GenericTypeAliasStruct. +// View returns a read-only view of GenericTypeAliasStruct. func (p *GenericTypeAliasStruct[T, T2, V2]) View() GenericTypeAliasStructView[T, T2, V2] { return GenericTypeAliasStructView[T, T2, V2]{ж: p} } @@ -775,7 +775,7 @@ type GenericTypeAliasStructView[T integer, T2 views.ViewCloner[T2, V2], V2 views ж *GenericTypeAliasStruct[T, T2, V2] } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v GenericTypeAliasStructView[T, T2, V2]) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index e265defe0..2d30cc2eb 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -21,7 +21,7 @@ import ( ) const viewTemplateStr = `{{define "common"}} -// View returns a readonly view of {{.StructName}}. +// View returns a read-only view of {{.StructName}}. func (p *{{.StructName}}{{.TypeParamNames}}) View() {{.ViewName}}{{.TypeParamNames}} { return {{.ViewName}}{{.TypeParamNames}}{ж: p} } @@ -37,7 +37,7 @@ type {{.ViewName}}{{.TypeParams}} struct { ж *{{.StructName}}{{.TypeParamNames}} } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v {{.ViewName}}{{.TypeParamNames}}) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -143,7 +143,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * MapValueView string MapFn string - // MakeViewFnName is the name of the function that accepts a value and returns a readonly view of it. + // MakeViewFnName is the name of the function that accepts a value and returns a read-only view of it. MakeViewFnName string }{ StructName: typ.Obj().Name(), diff --git a/drive/drive_view.go b/drive/drive_view.go index a6adfbc70..0f6686f24 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -14,7 +14,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Share -// View returns a readonly view of Share. +// View returns a read-only view of Share. func (p *Share) View() ShareView { return ShareView{ж: p} } @@ -30,7 +30,7 @@ type ShareView struct { ж *Share } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v ShareView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index bc67531e4..9cd5a466a 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -20,7 +20,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig -// View returns a readonly view of Prefs. +// View returns a read-only view of Prefs. func (p *Prefs) View() PrefsView { return PrefsView{ж: p} } @@ -36,7 +36,7 @@ type PrefsView struct { ж *Prefs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v PrefsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -138,7 +138,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { Persist *persist.Persist }{}) -// View returns a readonly view of ServeConfig. +// View returns a read-only view of ServeConfig. func (p *ServeConfig) View() ServeConfigView { return ServeConfigView{ж: p} } @@ -154,7 +154,7 @@ type ServeConfigView struct { ж *ServeConfig } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v ServeConfigView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -222,7 +222,7 @@ var _ServeConfigViewNeedsRegeneration = ServeConfig(struct { ETag string }{}) -// View returns a readonly view of ServiceConfig. +// View returns a read-only view of ServiceConfig. func (p *ServiceConfig) View() ServiceConfigView { return ServiceConfigView{ж: p} } @@ -238,7 +238,7 @@ type ServiceConfigView struct { ж *ServiceConfig } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v ServiceConfigView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -287,7 +287,7 @@ var _ServiceConfigViewNeedsRegeneration = ServiceConfig(struct { Tun bool }{}) -// View returns a readonly view of TCPPortHandler. +// View returns a read-only view of TCPPortHandler. func (p *TCPPortHandler) View() TCPPortHandlerView { return TCPPortHandlerView{ж: p} } @@ -303,7 +303,7 @@ type TCPPortHandlerView struct { ж *TCPPortHandler } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v TCPPortHandlerView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -345,7 +345,7 @@ var _TCPPortHandlerViewNeedsRegeneration = TCPPortHandler(struct { TerminateTLS string }{}) -// View returns a readonly view of HTTPHandler. +// View returns a read-only view of HTTPHandler. func (p *HTTPHandler) View() HTTPHandlerView { return HTTPHandlerView{ж: p} } @@ -361,7 +361,7 @@ type HTTPHandlerView struct { ж *HTTPHandler } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v HTTPHandlerView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -401,7 +401,7 @@ var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { Text string }{}) -// View returns a readonly view of WebServerConfig. +// View returns a read-only view of WebServerConfig. func (p *WebServerConfig) View() WebServerConfigView { return WebServerConfigView{ж: p} } @@ -417,7 +417,7 @@ type WebServerConfigView struct { ж *WebServerConfig } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v WebServerConfigView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4ebcd5d6d..d33e2c9ee 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3952,7 +3952,7 @@ func (b *LocalBackend) wantIngressLocked() bool { // setPrefsLockedOnEntry requires b.mu be held to call it, but it // unlocks b.mu when done. newp ownership passes to this function. -// It returns a readonly copy of the new prefs. +// It returns a read-only copy of the new prefs. func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { defer unlock() diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 53df3dcef..8edd19c83 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -21,7 +21,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile -// View returns a readonly view of User. +// View returns a read-only view of User. func (p *User) View() UserView { return UserView{ж: p} } @@ -37,7 +37,7 @@ type UserView struct { ж *User } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v UserView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -79,7 +79,7 @@ var _UserViewNeedsRegeneration = User(struct { Created time.Time }{}) -// View returns a readonly view of Node. +// View returns a read-only view of Node. func (p *Node) View() NodeView { return NodeView{ж: p} } @@ -95,7 +95,7 @@ type NodeView struct { ж *Node } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v NodeView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -216,7 +216,7 @@ var _NodeViewNeedsRegeneration = Node(struct { ExitNodeDNSResolvers []*dnstype.Resolver }{}) -// View returns a readonly view of Hostinfo. +// View returns a read-only view of Hostinfo. func (p *Hostinfo) View() HostinfoView { return HostinfoView{ж: p} } @@ -232,7 +232,7 @@ type HostinfoView struct { ж *Hostinfo } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v HostinfoView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -341,7 +341,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { Location *Location }{}) -// View returns a readonly view of NetInfo. +// View returns a read-only view of NetInfo. func (p *NetInfo) View() NetInfoView { return NetInfoView{ж: p} } @@ -357,7 +357,7 @@ type NetInfoView struct { ж *NetInfo } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v NetInfoView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -421,7 +421,7 @@ var _NetInfoViewNeedsRegeneration = NetInfo(struct { FirewallMode string }{}) -// View returns a readonly view of Login. +// View returns a read-only view of Login. func (p *Login) View() LoginView { return LoginView{ж: p} } @@ -437,7 +437,7 @@ type LoginView struct { ж *Login } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v LoginView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -482,7 +482,7 @@ var _LoginViewNeedsRegeneration = Login(struct { ProfilePicURL string }{}) -// View returns a readonly view of DNSConfig. +// View returns a read-only view of DNSConfig. func (p *DNSConfig) View() DNSConfigView { return DNSConfigView{ж: p} } @@ -498,7 +498,7 @@ type DNSConfigView struct { ж *DNSConfig } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v DNSConfigView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -563,7 +563,7 @@ var _DNSConfigViewNeedsRegeneration = DNSConfig(struct { TempCorpIssue13969 string }{}) -// View returns a readonly view of RegisterResponse. +// View returns a read-only view of RegisterResponse. func (p *RegisterResponse) View() RegisterResponseView { return RegisterResponseView{ж: p} } @@ -579,7 +579,7 @@ type RegisterResponseView struct { ж *RegisterResponse } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v RegisterResponseView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -629,7 +629,7 @@ var _RegisterResponseViewNeedsRegeneration = RegisterResponse(struct { Error string }{}) -// View returns a readonly view of RegisterResponseAuth. +// View returns a read-only view of RegisterResponseAuth. func (p *RegisterResponseAuth) View() RegisterResponseAuthView { return RegisterResponseAuthView{ж: p} } @@ -645,7 +645,7 @@ type RegisterResponseAuthView struct { ж *RegisterResponseAuth } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v RegisterResponseAuthView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -687,7 +687,7 @@ var _RegisterResponseAuthViewNeedsRegeneration = RegisterResponseAuth(struct { AuthKey string }{}) -// View returns a readonly view of RegisterRequest. +// View returns a read-only view of RegisterRequest. func (p *RegisterRequest) View() RegisterRequestView { return RegisterRequestView{ж: p} } @@ -703,7 +703,7 @@ type RegisterRequestView struct { ж *RegisterRequest } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v RegisterRequestView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -777,7 +777,7 @@ var _RegisterRequestViewNeedsRegeneration = RegisterRequest(struct { Tailnet string }{}) -// View returns a readonly view of DERPHomeParams. +// View returns a read-only view of DERPHomeParams. func (p *DERPHomeParams) View() DERPHomeParamsView { return DERPHomeParamsView{ж: p} } @@ -793,7 +793,7 @@ type DERPHomeParamsView struct { ж *DERPHomeParams } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v DERPHomeParamsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -831,7 +831,7 @@ var _DERPHomeParamsViewNeedsRegeneration = DERPHomeParams(struct { RegionScore map[int]float64 }{}) -// View returns a readonly view of DERPRegion. +// View returns a read-only view of DERPRegion. func (p *DERPRegion) View() DERPRegionView { return DERPRegionView{ж: p} } @@ -847,7 +847,7 @@ type DERPRegionView struct { ж *DERPRegion } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v DERPRegionView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -897,7 +897,7 @@ var _DERPRegionViewNeedsRegeneration = DERPRegion(struct { Nodes []*DERPNode }{}) -// View returns a readonly view of DERPMap. +// View returns a read-only view of DERPMap. func (p *DERPMap) View() DERPMapView { return DERPMapView{ж: p} } @@ -913,7 +913,7 @@ type DERPMapView struct { ж *DERPMap } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v DERPMapView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -958,7 +958,7 @@ var _DERPMapViewNeedsRegeneration = DERPMap(struct { OmitDefaultRegions bool }{}) -// View returns a readonly view of DERPNode. +// View returns a read-only view of DERPNode. func (p *DERPNode) View() DERPNodeView { return DERPNodeView{ж: p} } @@ -974,7 +974,7 @@ type DERPNodeView struct { ж *DERPNode } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v DERPNodeView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -1032,7 +1032,7 @@ var _DERPNodeViewNeedsRegeneration = DERPNode(struct { CanPort80 bool }{}) -// View returns a readonly view of SSHRule. +// View returns a read-only view of SSHRule. func (p *SSHRule) View() SSHRuleView { return SSHRuleView{ж: p} } @@ -1048,7 +1048,7 @@ type SSHRuleView struct { ж *SSHRule } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v SSHRuleView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -1098,7 +1098,7 @@ var _SSHRuleViewNeedsRegeneration = SSHRule(struct { AcceptEnv []string }{}) -// View returns a readonly view of SSHAction. +// View returns a read-only view of SSHAction. func (p *SSHAction) View() SSHActionView { return SSHActionView{ж: p} } @@ -1114,7 +1114,7 @@ type SSHActionView struct { ж *SSHAction } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v SSHActionView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -1170,7 +1170,7 @@ var _SSHActionViewNeedsRegeneration = SSHAction(struct { OnRecordingFailure *SSHRecorderFailureAction }{}) -// View returns a readonly view of SSHPrincipal. +// View returns a read-only view of SSHPrincipal. func (p *SSHPrincipal) View() SSHPrincipalView { return SSHPrincipalView{ж: p} } @@ -1186,7 +1186,7 @@ type SSHPrincipalView struct { ж *SSHPrincipal } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v SSHPrincipalView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -1232,7 +1232,7 @@ var _SSHPrincipalViewNeedsRegeneration = SSHPrincipal(struct { UnusedPubKeys []string }{}) -// View returns a readonly view of ControlDialPlan. +// View returns a read-only view of ControlDialPlan. func (p *ControlDialPlan) View() ControlDialPlanView { return ControlDialPlanView{ж: p} } @@ -1248,7 +1248,7 @@ type ControlDialPlanView struct { ж *ControlDialPlan } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v ControlDialPlanView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -1286,7 +1286,7 @@ var _ControlDialPlanViewNeedsRegeneration = ControlDialPlan(struct { Candidates []ControlIPCandidate }{}) -// View returns a readonly view of Location. +// View returns a read-only view of Location. func (p *Location) View() LocationView { return LocationView{ж: p} } @@ -1302,7 +1302,7 @@ type LocationView struct { ж *Location } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v LocationView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -1350,7 +1350,7 @@ var _LocationViewNeedsRegeneration = Location(struct { Priority int }{}) -// View returns a readonly view of UserProfile. +// View returns a read-only view of UserProfile. func (p *UserProfile) View() UserProfileView { return UserProfileView{ж: p} } @@ -1366,7 +1366,7 @@ type UserProfileView struct { ж *UserProfile } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v UserProfileView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index c0e2b28ff..c77ff9a40 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -15,7 +15,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Resolver -// View returns a readonly view of Resolver. +// View returns a read-only view of Resolver. func (p *Resolver) View() ResolverView { return ResolverView{ж: p} } @@ -31,7 +31,7 @@ type ResolverView struct { ж *Resolver } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v ResolverView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 1d479b3bf..ce600be3e 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -17,7 +17,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Persist -// View returns a readonly view of Persist. +// View returns a read-only view of Persist. func (p *Persist) View() PersistView { return PersistView{ж: p} } @@ -33,7 +33,7 @@ type PersistView struct { ж *Persist } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v PersistView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index 3bbd237fe..4f7902077 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -29,8 +29,8 @@ import ( var ( // ErrManaged is the error returned when attempting to modify a managed preference. ErrManaged = errors.New("cannot modify a managed preference") - // ErrReadOnly is the error returned when attempting to modify a readonly preference. - ErrReadOnly = errors.New("cannot modify a readonly preference") + // ErrReadOnly is the error returned when attempting to modify a read-only preference. + ErrReadOnly = errors.New("cannot modify a read-only preference") ) // metadata holds type-agnostic preference metadata. diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index 0256bd7e6..9aaac6e9c 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -20,7 +20,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,AutoUpdatePrefs,AppConnectorPrefs -// View returns a readonly view of Prefs. +// View returns a read-only view of Prefs. func (p *Prefs) View() PrefsView { return PrefsView{ж: p} } @@ -36,7 +36,7 @@ type PrefsView struct { ж *Prefs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v PrefsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -132,7 +132,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { Persist *persist.Persist }{}) -// View returns a readonly view of AutoUpdatePrefs. +// View returns a read-only view of AutoUpdatePrefs. func (p *AutoUpdatePrefs) View() AutoUpdatePrefsView { return AutoUpdatePrefsView{ж: p} } @@ -148,7 +148,7 @@ type AutoUpdatePrefsView struct { ж *AutoUpdatePrefs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v AutoUpdatePrefsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -186,7 +186,7 @@ var _AutoUpdatePrefsViewNeedsRegeneration = AutoUpdatePrefs(struct { Apply prefs.Item[opt.Bool] }{}) -// View returns a readonly view of AppConnectorPrefs. +// View returns a read-only view of AppConnectorPrefs. func (p *AppConnectorPrefs) View() AppConnectorPrefsView { return AppConnectorPrefsView{ж: p} } @@ -202,7 +202,7 @@ type AppConnectorPrefsView struct { ж *AppConnectorPrefs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v AppConnectorPrefsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index ef9f09603..f6cfc918d 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -13,7 +13,7 @@ import ( //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup -tags=test -// View returns a readonly view of TestPrefs. +// View returns a read-only view of TestPrefs. func (p *TestPrefs) View() TestPrefsView { return TestPrefsView{ж: p} } @@ -29,7 +29,7 @@ type TestPrefsView struct { ж *TestPrefs } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v TestPrefsView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -117,7 +117,7 @@ var _TestPrefsViewNeedsRegeneration = TestPrefs(struct { Group TestPrefsGroup }{}) -// View returns a readonly view of TestBundle. +// View returns a read-only view of TestBundle. func (p *TestBundle) View() TestBundleView { return TestBundleView{ж: p} } @@ -133,7 +133,7 @@ type TestBundleView struct { ж *TestBundle } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v TestBundleView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -172,7 +172,7 @@ var _TestBundleViewNeedsRegeneration = TestBundle(struct { Nested *TestValueStruct }{}) -// View returns a readonly view of TestValueStruct. +// View returns a read-only view of TestValueStruct. func (p *TestValueStruct) View() TestValueStructView { return TestValueStructView{ж: p} } @@ -188,7 +188,7 @@ type TestValueStructView struct { ж *TestValueStruct } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v TestValueStructView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -225,7 +225,7 @@ var _TestValueStructViewNeedsRegeneration = TestValueStruct(struct { Value int }{}) -// View returns a readonly view of TestGenericStruct. +// View returns a read-only view of TestGenericStruct. func (p *TestGenericStruct[T]) View() TestGenericStructView[T] { return TestGenericStructView[T]{ж: p} } @@ -241,7 +241,7 @@ type TestGenericStructView[T ImmutableType] struct { ж *TestGenericStruct[T] } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v TestGenericStructView[T]) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with @@ -280,7 +280,7 @@ func _TestGenericStructViewNeedsRegeneration[T ImmutableType](TestGenericStruct[ }{}) } -// View returns a readonly view of TestPrefsGroup. +// View returns a read-only view of TestPrefsGroup. func (p *TestPrefsGroup) View() TestPrefsGroupView { return TestPrefsGroupView{ж: p} } @@ -296,7 +296,7 @@ type TestPrefsGroupView struct { ж *TestPrefsGroup } -// Valid reports whether underlying value is non-nil. +// Valid reports whether v's underlying value is non-nil. func (v TestPrefsGroupView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index 2003eebe3..4d55da7a0 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -83,7 +83,7 @@ type StructMapView[K MapKeyType, T views.ViewCloner[T, V], V views.StructView[T] ж *StructMap[K, T] } -// StructMapViewOf returns a readonly view of m. +// StructMapViewOf returns a read-only view of m. // It is used by [tailscale.com/cmd/viewer]. func StructMapViewOf[K MapKeyType, T views.ViewCloner[T, V], V views.StructView[T]](m *StructMap[K, T]) StructMapView[K, T, V] { return StructMapView[K, T, V]{m} From 66269dc934058cd41ddff7a5fd94c6425dbb28f8 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 14 Jan 2025 11:04:55 -0600 Subject: [PATCH 0321/1708] ipn/ipnlocal: allow Peer API access via either V4MasqAddr or V6MasqAddr when both are set This doesn't seem to have any immediate impact, but not allowing access via the IPv6 masquerade address when an IPv4 masquerade address is also set seems like a bug. Updates #cleanup Updates #14570 (found when working on it) Signed-off-by: Nick Khyl --- ipn/ipnlocal/peerapi.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 7aa677640..4d0548917 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -233,11 +233,13 @@ func (h *peerAPIHandler) logf(format string, a ...any) { // isAddressValid reports whether addr is a valid destination address for this // node originating from the peer. func (h *peerAPIHandler) isAddressValid(addr netip.Addr) bool { - if v, ok := h.peerNode.SelfNodeV4MasqAddrForThisPeer().GetOk(); ok { - return v == addr + if !addr.IsValid() { + return false } - if v, ok := h.peerNode.SelfNodeV6MasqAddrForThisPeer().GetOk(); ok { - return v == addr + v4MasqAddr, hasMasqV4 := h.peerNode.SelfNodeV4MasqAddrForThisPeer().GetOk() + v6MasqAddr, hasMasqV6 := h.peerNode.SelfNodeV6MasqAddrForThisPeer().GetOk() + if hasMasqV4 || hasMasqV6 { + return addr == v4MasqAddr || addr == v6MasqAddr } pfx := netip.PrefixFrom(addr, addr.BitLen()) return views.SliceContains(h.selfNode.Addresses(), pfx) From 2fc4455e6dd9ab7f879d4e2f7cffc2be81f14077 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 14 Jan 2025 10:19:52 -0800 Subject: [PATCH 0322/1708] all: add Node.HomeDERP int, phase out "127.3.3.40:$region" hack [capver 111] This deprecates the old "DERP string" packing a DERP region ID into an IP:port of 127.3.3.40:$REGION_ID and just uses an integer, like PeerChange.DERPRegion does. We still support servers sending the old form; they're converted to the new form internally right when they're read off the network. Updates #14636 Change-Id: I9427ec071f02a2c6d75ccb0fcbf0ecff9f19f26f Signed-off-by: Brad Fitzpatrick --- control/controlbase/conn_test.go | 2 +- control/controlclient/map.go | 49 ++++++++--- control/controlclient/map_test.go | 83 +++++++++++++++---- ipn/ipnlocal/expiry.go | 2 +- ipn/ipnlocal/local.go | 10 +-- ipn/ipnlocal/local_test.go | 12 +-- tailcfg/tailcfg.go | 25 ++++-- tailcfg/tailcfg_clone.go | 3 +- tailcfg/tailcfg_test.go | 11 ++- tailcfg/tailcfg_view.go | 6 +- tstest/integration/testcontrol/testcontrol.go | 2 +- types/netmap/netmap.go | 9 +- types/netmap/netmap_test.go | 36 ++++---- types/netmap/nodemut.go | 3 +- wgengine/magicsock/endpoint.go | 4 +- wgengine/magicsock/magicsock.go | 5 +- wgengine/magicsock/magicsock_test.go | 2 +- wgengine/pendopen.go | 2 +- wgengine/wgcfg/nmcfg/nmcfg.go | 2 +- 19 files changed, 171 insertions(+), 97 deletions(-) diff --git a/control/controlbase/conn_test.go b/control/controlbase/conn_test.go index 8a0f46967..ed4642d3b 100644 --- a/control/controlbase/conn_test.go +++ b/control/controlbase/conn_test.go @@ -280,7 +280,7 @@ func TestConnMemoryOverhead(t *testing.T) { growthTotal := int64(ms.HeapAlloc) - int64(ms0.HeapAlloc) growthEach := float64(growthTotal) / float64(num) t.Logf("Alloced %v bytes, %.2f B/each", growthTotal, growthEach) - const max = 2000 + const max = 2048 if growthEach > max { t.Errorf("allocated more than expected; want max %v bytes/each", max) } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 30c1da672..d5fd84c6d 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -7,7 +7,6 @@ import ( "cmp" "context" "encoding/json" - "fmt" "maps" "net" "reflect" @@ -166,6 +165,7 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t // For responses that mutate the self node, check for updated nodeAttrs. if resp.Node != nil { + upgradeNode(resp.Node) if DevKnob.StripCaps() { resp.Node.Capabilities = nil resp.Node.CapMap = nil @@ -181,6 +181,13 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.CapMap) } + for _, p := range resp.Peers { + upgradeNode(p) + } + for _, p := range resp.PeersChanged { + upgradeNode(p) + } + // Call Node.InitDisplayNames on any changed nodes. initDisplayNames(cmp.Or(resp.Node.View(), ms.lastNode), resp) @@ -216,6 +223,26 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t return nil } +// upgradeNode upgrades Node fields from the server into the modern forms +// not using deprecated fields. +func upgradeNode(n *tailcfg.Node) { + if n == nil { + return + } + if n.LegacyDERPString != "" { + if n.HomeDERP == 0 { + ip, portStr, err := net.SplitHostPort(n.LegacyDERPString) + if ip == tailcfg.DerpMagicIP && err == nil { + port, err := strconv.Atoi(portStr) + if err == nil { + n.HomeDERP = port + } + } + } + n.LegacyDERPString = "" + } +} + func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool { if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() { return false @@ -443,7 +470,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s stats.changed++ mut := vp.AsStruct() if pc.DERPRegion != 0 { - mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion) + mut.HomeDERP = pc.DERPRegion patchDERPRegion.Add(1) } if pc.Cap != 0 { @@ -631,17 +658,13 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang if !views.SliceEqual(was.Endpoints(), views.SliceOf(n.Endpoints)) { pc().Endpoints = slices.Clone(n.Endpoints) } - case "DERP": - if was.DERP() != n.DERP { - ip, portStr, err := net.SplitHostPort(n.DERP) - if err != nil || ip != "127.3.3.40" { - return nil, false - } - port, err := strconv.Atoi(portStr) - if err != nil || port < 1 || port > 65535 { - return nil, false - } - pc().DERPRegion = port + case "LegacyDERPString": + if was.LegacyDERPString() != "" || n.LegacyDERPString != "" { + panic("unexpected; caller should've already called upgradeNode") + } + case "HomeDERP": + if was.HomeDERP() != n.HomeDERP { + pc().DERPRegion = n.HomeDERP } case "Hostinfo": if !was.Hostinfo().Valid() && !n.Hostinfo.Valid() { diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index ad8f7dd6e..9c8c0c3aa 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -50,9 +50,9 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { n.LastSeen = &t } } - withDERP := func(d string) func(*tailcfg.Node) { + withDERP := func(regionID int) func(*tailcfg.Node) { return func(n *tailcfg.Node) { - n.DERP = d + n.HomeDERP = regionID } } withEP := func(ep string) func(*tailcfg.Node) { @@ -189,14 +189,14 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { }, { name: "ep_change_derp", - prev: peers(n(1, "foo", withDERP("127.3.3.40:3"))), + prev: peers(n(1, "foo", withDERP(3))), mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, DERPRegion: 4, }}, }, - want: peers(n(1, "foo", withDERP("127.3.3.40:4"))), + want: peers(n(1, "foo", withDERP(4))), wantStats: updateStats{changed: 1}, }, { @@ -213,19 +213,19 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { }, { name: "ep_change_udp_2", - prev: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:111"))), + prev: peers(n(1, "foo", withDERP(3), withEP("1.2.3.4:111"))), mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, Endpoints: eps("1.2.3.4:56"), }}, }, - want: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:56"))), + want: peers(n(1, "foo", withDERP(3), withEP("1.2.3.4:56"))), wantStats: updateStats{changed: 1}, }, { name: "ep_change_both", - prev: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:111"))), + prev: peers(n(1, "foo", withDERP(3), withEP("1.2.3.4:111"))), mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, @@ -233,7 +233,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { Endpoints: eps("1.2.3.4:56"), }}, }, - want: peers(n(1, "foo", withDERP("127.3.3.40:2"), withEP("1.2.3.4:56"))), + want: peers(n(1, "foo", withDERP(2), withEP("1.2.3.4:56"))), wantStats: updateStats{changed: 1}, }, { @@ -744,8 +744,8 @@ func TestPeerChangeDiff(t *testing.T) { }, { name: "patch-derp", - a: &tailcfg.Node{ID: 1, DERP: "127.3.3.40:1"}, - b: &tailcfg.Node{ID: 1, DERP: "127.3.3.40:2"}, + a: &tailcfg.Node{ID: 1, HomeDERP: 1}, + b: &tailcfg.Node{ID: 1, HomeDERP: 2}, want: &tailcfg.PeerChange{NodeID: 1, DERPRegion: 2}, }, { @@ -929,23 +929,23 @@ func TestPatchifyPeersChanged(t *testing.T) { mr0: &tailcfg.MapResponse{ Node: &tailcfg.Node{Name: "foo.bar.ts.net."}, Peers: []*tailcfg.Node{ - {ID: 1, DERP: "127.3.3.40:1", Hostinfo: hi}, - {ID: 2, DERP: "127.3.3.40:2", Hostinfo: hi}, - {ID: 3, DERP: "127.3.3.40:3", Hostinfo: hi}, + {ID: 1, HomeDERP: 1, Hostinfo: hi}, + {ID: 2, HomeDERP: 2, Hostinfo: hi}, + {ID: 3, HomeDERP: 3, Hostinfo: hi}, }, }, mr1: &tailcfg.MapResponse{ PeersChanged: []*tailcfg.Node{ - {ID: 1, DERP: "127.3.3.40:11", Hostinfo: hi}, + {ID: 1, HomeDERP: 11, Hostinfo: hi}, {ID: 2, StableID: "other-change", Hostinfo: hi}, - {ID: 3, DERP: "127.3.3.40:33", Hostinfo: hi}, - {ID: 4, DERP: "127.3.3.40:4", Hostinfo: hi}, + {ID: 3, HomeDERP: 33, Hostinfo: hi}, + {ID: 4, HomeDERP: 4, Hostinfo: hi}, }, }, want: &tailcfg.MapResponse{ PeersChanged: []*tailcfg.Node{ {ID: 2, StableID: "other-change", Hostinfo: hi}, - {ID: 4, DERP: "127.3.3.40:4", Hostinfo: hi}, + {ID: 4, HomeDERP: 4, Hostinfo: hi}, }, PeersChangedPatch: []*tailcfg.PeerChange{ {NodeID: 1, DERPRegion: 11}, @@ -1006,6 +1006,53 @@ func TestPatchifyPeersChanged(t *testing.T) { } } +func TestUpgradeNode(t *testing.T) { + tests := []struct { + name string + in *tailcfg.Node + want *tailcfg.Node + }{ + { + name: "nil", + in: nil, + want: nil, + }, + { + name: "empty", + in: new(tailcfg.Node), + want: new(tailcfg.Node), + }, + { + name: "derp-both", + in: &tailcfg.Node{HomeDERP: 1, LegacyDERPString: tailcfg.DerpMagicIP + ":2"}, + want: &tailcfg.Node{HomeDERP: 1}, + }, + { + name: "derp-str-only", + in: &tailcfg.Node{LegacyDERPString: tailcfg.DerpMagicIP + ":2"}, + want: &tailcfg.Node{HomeDERP: 2}, + }, + { + name: "derp-int-only", + in: &tailcfg.Node{HomeDERP: 2}, + want: &tailcfg.Node{HomeDERP: 2}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got *tailcfg.Node + if tt.in != nil { + got = ptr.To(*tt.in) // shallow clone + } + upgradeNode(got) + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("wrong result (-want +got):\n%s", diff) + } + }) + } + +} + func BenchmarkMapSessionDelta(b *testing.B) { for _, size := range []int{10, 100, 1_000, 10_000} { b.Run(fmt.Sprintf("size_%d", size), func(b *testing.B) { @@ -1022,7 +1069,7 @@ func BenchmarkMapSessionDelta(b *testing.B) { res.Peers = append(res.Peers, &tailcfg.Node{ ID: tailcfg.NodeID(i + 2), Name: fmt.Sprintf("peer%d.bar.ts.net.", i), - DERP: "127.3.3.40:10", + HomeDERP: 10, Addresses: []netip.Prefix{netip.MustParsePrefix("100.100.2.3/32"), netip.MustParsePrefix("fd7a:115c:a1e0::123/128")}, AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.100.2.3/32"), netip.MustParsePrefix("fd7a:115c:a1e0::123/128")}, Endpoints: eps("192.168.1.2:345", "192.168.1.3:678"), diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 04c10226d..d11199815 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -116,7 +116,7 @@ func (em *expiryManager) flagExpiredPeers(netmap *netmap.NetworkMap, localNow ti // since we discover endpoints via DERP, and due to DERP return // path optimization. mut.Endpoints = nil - mut.DERP = "" + mut.HomeDERP = 0 // Defense-in-depth: break the node's public key as well, in // case something tries to communicate. diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d33e2c9ee..81a62045a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7381,15 +7381,7 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug } distances := make([]nodeDistance, 0, len(candidates)) for _, c := range candidates { - if c.DERP() != "" { - ipp, err := netip.ParseAddrPort(c.DERP()) - if err != nil { - continue - } - if ipp.Addr() != tailcfg.DerpMagicIPAddr { - continue - } - regionID := int(ipp.Port()) + if regionID := c.HomeDERP(); regionID != 0 { candidatesByRegion[regionID] = append(candidatesByRegion[regionID], c) continue } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f3ee24a6b..f9a967bea 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1007,8 +1007,8 @@ func TestUpdateNetmapDelta(t *testing.T) { wants := []*tailcfg.Node{ { - ID: 1, - DERP: "127.3.3.40:1", + ID: 1, + HomeDERP: 1, }, { ID: 2, @@ -2021,7 +2021,7 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { netip.MustParsePrefix("100.64.1.1/32"), netip.MustParsePrefix("fe70::1/128"), }, - DERP: "127.3.3.40:2", + HomeDERP: 2, } defaultDERPMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ @@ -2985,7 +2985,7 @@ func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { ID: id, StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), Name: fmt.Sprintf("peer%d", id), - DERP: fmt.Sprintf("127.3.3.40:%d", id), + HomeDERP: int(id), } for _, opt := range opts { opt(node) @@ -3001,13 +3001,13 @@ func withName(name string) peerOptFunc { func withDERP(region int) peerOptFunc { return func(n *tailcfg.Node) { - n.DERP = fmt.Sprintf("127.3.3.40:%d", region) + n.HomeDERP = region } } func withoutDERP() peerOptFunc { return func(n *tailcfg.Node) { - n.DERP = "" + n.HomeDERP = 0 } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 1ede0bd9b..76945ec10 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -153,7 +153,8 @@ type CapabilityVersion int // - 108: 2024-11-08: Client sends ServicesHash in Hostinfo, understands c2n GET /vip-services. // - 109: 2024-11-18: Client supports filtertype.Match.SrcCaps (issue #12542) // - 110: 2024-12-12: removed never-before-used Tailscale SSH public key support (#14373) -const CurrentCapabilityVersion CapabilityVersion = 110 +// - 111: 2025-01-14: Client supports a peer having Node.HomeDERP (issue #14636) +const CurrentCapabilityVersion CapabilityVersion = 111 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -346,15 +347,24 @@ type Node struct { AllowedIPs []netip.Prefix // range of IP addresses to route to this node Endpoints []netip.AddrPort `json:",omitempty"` // IP+port (public via STUN, and local LANs) - // DERP is this node's home DERP region ID integer, but shoved into an + // LegacyDERPString is this node's home LegacyDERPString region ID integer, but shoved into an // IP:port string for legacy reasons. The IP address is always "127.3.3.40" // (a loopback address (127) followed by the digits over the letters DERP on - // a QWERTY keyboard (3.3.40)). The "port number" is the home DERP region ID + // a QWERTY keyboard (3.3.40)). The "port number" is the home LegacyDERPString region ID // integer. // - // TODO(bradfitz): simplify this legacy mess; add a new HomeDERPRegionID int - // field behind a new capver bump. - DERP string `json:",omitempty"` // DERP-in-IP:port ("127.3.3.40:N") endpoint + // Deprecated: HomeDERP has replaced this, but old servers might still send + // this field. See tailscale/tailscale#14636. Do not use this field in code + // other than in the upgradeNode func, which canonicalizes it to HomeDERP + // if it arrives as a LegacyDERPString string on the wire. + LegacyDERPString string `json:"DERP,omitempty"` // DERP-in-IP:port ("127.3.3.40:N") endpoint + + // HomeDERP is the modern version of the DERP string field, with just an + // integer. The client advertises support for this as of capver 111. + // + // HomeDERP may be zero if not (yet) known, but ideally always be non-zero + // for magicsock connectivity to function normally. + HomeDERP int `json:",omitempty"` // DERP region ID of the node's home DERP Hostinfo HostinfoView Created time.Time @@ -2162,7 +2172,8 @@ func (n *Node) Equal(n2 *Node) bool { slicesx.EqualSameNil(n.AllowedIPs, n2.AllowedIPs) && slicesx.EqualSameNil(n.PrimaryRoutes, n2.PrimaryRoutes) && slicesx.EqualSameNil(n.Endpoints, n2.Endpoints) && - n.DERP == n2.DERP && + n.LegacyDERPString == n2.LegacyDERPString && + n.HomeDERP == n2.HomeDERP && n.Cap == n2.Cap && n.Hostinfo.Equal(n2.Hostinfo) && n.Created.Equal(n2.Created) && diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index d282719b7..42cef1598 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -99,7 +99,8 @@ var _NodeCloneNeedsRegeneration = Node(struct { Addresses []netip.Prefix AllowedIPs []netip.Prefix Endpoints []netip.AddrPort - DERP string + LegacyDERPString string + HomeDERP int Hostinfo HostinfoView Created time.Time Cap CapabilityVersion diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index b9a204ead..560e28933 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -367,7 +367,7 @@ func TestNodeEqual(t *testing.T) { nodeHandles := []string{ "ID", "StableID", "Name", "User", "Sharer", "Key", "KeyExpiry", "KeySignature", "Machine", "DiscoKey", - "Addresses", "AllowedIPs", "Endpoints", "DERP", "Hostinfo", + "Addresses", "AllowedIPs", "Endpoints", "LegacyDERPString", "HomeDERP", "Hostinfo", "Created", "Cap", "Tags", "PrimaryRoutes", "LastSeen", "Online", "MachineAuthorized", "Capabilities", "CapMap", @@ -530,8 +530,13 @@ func TestNodeEqual(t *testing.T) { true, }, { - &Node{DERP: "foo"}, - &Node{DERP: "bar"}, + &Node{LegacyDERPString: "foo"}, + &Node{LegacyDERPString: "bar"}, + false, + }, + { + &Node{HomeDERP: 1}, + &Node{HomeDERP: 2}, false, }, { diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 8edd19c83..3770f272f 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -139,7 +139,8 @@ func (v NodeView) DiscoKey() key.DiscoPublic { return v.ж.DiscoK func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Addresses) } func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AllowedIPs) } func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) } -func (v NodeView) DERP() string { return v.ж.DERP } +func (v NodeView) LegacyDERPString() string { return v.ж.LegacyDERPString } +func (v NodeView) HomeDERP() int { return v.ж.HomeDERP } func (v NodeView) Hostinfo() HostinfoView { return v.ж.Hostinfo } func (v NodeView) Created() time.Time { return v.ж.Created } func (v NodeView) Cap() CapabilityVersion { return v.ж.Cap } @@ -192,7 +193,8 @@ var _NodeViewNeedsRegeneration = Node(struct { Addresses []netip.Prefix AllowedIPs []netip.Prefix Endpoints []netip.AddrPort - DERP string + LegacyDERPString string + HomeDERP int Hostinfo HostinfoView Created time.Time Cap CapabilityVersion diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 386359f19..e127087a6 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -805,7 +805,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi node.Hostinfo = req.Hostinfo.View() if ni := node.Hostinfo.NetInfo(); ni.Valid() { if ni.PreferredDERP() != 0 { - node.DERP = fmt.Sprintf("127.3.3.40:%d", ni.PreferredDERP()) + node.HomeDERP = ni.PreferredDERP() } } } diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index b1ac612de..7662e145e 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -287,11 +287,8 @@ func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) { epStrs[i] = fmt.Sprintf("%21v", e+strings.Repeat(" ", spaces)) } - derp := p.DERP() - const derpPrefix = "127.3.3.40:" - if strings.HasPrefix(derp, derpPrefix) { - derp = "D" + derp[len(derpPrefix):] - } + derp := fmt.Sprintf("D%d", p.HomeDERP()) + var discoShort string if !p.DiscoKey().IsZero() { discoShort = p.DiscoKey().ShortString() + " " @@ -311,7 +308,7 @@ func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) { // nodeConciseEqual reports whether a and b are equal for the fields accessed by printPeerConcise. func nodeConciseEqual(a, b tailcfg.NodeView) bool { return a.Key() == b.Key() && - a.DERP() == b.DERP() && + a.HomeDERP() == b.HomeDERP() && a.DiscoKey() == b.DiscoKey() && views.SliceEqual(a.AllowedIPs(), b.AllowedIPs()) && views.SliceEqual(a.Endpoints(), b.Endpoints()) diff --git a/types/netmap/netmap_test.go b/types/netmap/netmap_test.go index e7e2d1957..40f504741 100644 --- a/types/netmap/netmap_test.go +++ b/types/netmap/netmap_test.go @@ -63,12 +63,12 @@ func TestNetworkMapConcise(t *testing.T) { Peers: nodeViews([]*tailcfg.Node{ { Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, { Key: testNodeKey(3), - DERP: "127.3.3.40:4", + HomeDERP: 4, Endpoints: eps("10.2.0.100:12", "10.1.0.100:12345"), }, }), @@ -102,7 +102,7 @@ func TestConciseDiffFrom(t *testing.T) { Peers: nodeViews([]*tailcfg.Node{ { Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -112,7 +112,7 @@ func TestConciseDiffFrom(t *testing.T) { Peers: nodeViews([]*tailcfg.Node{ { Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -126,7 +126,7 @@ func TestConciseDiffFrom(t *testing.T) { Peers: nodeViews([]*tailcfg.Node{ { Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -136,7 +136,7 @@ func TestConciseDiffFrom(t *testing.T) { Peers: nodeViews([]*tailcfg.Node{ { Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -151,7 +151,7 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -162,19 +162,19 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 1, Key: testNodeKey(1), - DERP: "127.3.3.40:1", + HomeDERP: 1, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, { ID: 3, Key: testNodeKey(3), - DERP: "127.3.3.40:3", + HomeDERP: 3, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -189,19 +189,19 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 1, Key: testNodeKey(1), - DERP: "127.3.3.40:1", + HomeDERP: 1, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, { ID: 3, Key: testNodeKey(3), - DERP: "127.3.3.40:3", + HomeDERP: 3, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -212,7 +212,7 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "192.168.0.100:12354"), }, }), @@ -227,7 +227,7 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "1.1.1.1:1"), }, }), @@ -238,7 +238,7 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:12", "1.1.1.1:2"), }, }), @@ -253,7 +253,7 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:41641", "1.1.1.1:41641"), DiscoKey: testDiscoKey("f00f00f00f"), AllowedIPs: []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(100, 102, 103, 104), 32)}, @@ -266,7 +266,7 @@ func TestConciseDiffFrom(t *testing.T) { { ID: 2, Key: testNodeKey(2), - DERP: "127.3.3.40:2", + HomeDERP: 2, Endpoints: eps("192.168.0.100:41641", "1.1.1.1:41641"), DiscoKey: testDiscoKey("ba4ba4ba4b"), AllowedIPs: []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(100, 102, 103, 104), 32)}, diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index 46fbaefc6..6f116059e 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -5,7 +5,6 @@ package netmap import ( "cmp" - "fmt" "net/netip" "reflect" "slices" @@ -35,7 +34,7 @@ type NodeMutationDERPHome struct { } func (m NodeMutationDERPHome) Apply(n *tailcfg.Node) { - n.DERP = fmt.Sprintf("127.3.3.40:%v", m.DERPRegion) + n.HomeDERP = m.DERPRegion } // NodeMutation is a NodeMutation that says a node's endpoints have changed. diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index df4299b72..7780c7db6 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1359,7 +1359,7 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p }) de.resetLocked() } - if n.DERP() == "" { + if n.HomeDERP() == 0 { if de.derpAddr.IsValid() { de.debugUpdates.Add(EndpointChange{ When: time.Now(), @@ -1369,7 +1369,7 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p } de.derpAddr = netip.AddrPort{} } else { - newDerp, _ := netip.ParseAddrPort(n.DERP()) + newDerp := netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(n.HomeDERP())) if de.derpAddr != newDerp { de.debugUpdates.Add(EndpointChange{ When: time.Now(), diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6a49f091e..98cb63b88 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2337,10 +2337,7 @@ func devPanicf(format string, a ...any) { func (c *Conn) logEndpointCreated(n tailcfg.NodeView) { c.logf("magicsock: created endpoint key=%s: disco=%s; %v", n.Key().ShortString(), n.DiscoKey().ShortString(), logger.ArgWriter(func(w *bufio.Writer) { - const derpPrefix = "127.3.3.40:" - if strings.HasPrefix(n.DERP(), derpPrefix) { - ipp, _ := netip.ParseAddrPort(n.DERP()) - regionID := int(ipp.Port()) + if regionID := n.HomeDERP(); regionID != 0 { code := c.derpRegionCodeLocked(regionID) if code != "" { code = "(" + code + ")" diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index d4c9f0cbb..090c1218f 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -314,7 +314,7 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM Addresses: addrs, AllowedIPs: addrs, Endpoints: epFromTyped(eps[i]), - DERP: "127.3.3.40:1", + HomeDERP: 1, } nm.Peers = append(nm.Peers, peer.View()) } diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index f8e9198a5..28d1f4f9d 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -198,7 +198,7 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) { e.logf("open-conn-track: timeout opening %v; peer node %v running pre-0.100", flow, n.Key().ShortString()) return } - if n.DERP() == "" { + if n.HomeDERP() == 0 { e.logf("open-conn-track: timeout opening %v; peer node %v not connected to any DERP relay", flow, n.Key().ShortString()) return } diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 97304aa41..45c235b4d 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -85,7 +85,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, skippedSubnets := new(bytes.Buffer) for _, peer := range nm.Peers { - if peer.DiscoKey().IsZero() && peer.DERP() == "" && !peer.IsWireGuardOnly() { + if peer.DiscoKey().IsZero() && peer.HomeDERP() == 0 && !peer.IsWireGuardOnly() { // Peer predates both DERP and active discovery, we cannot // communicate with it. logf("[v1] wgcfg: skipped peer %s, doesn't offer DERP or disco", peer.Key().ShortString()) From 27477983e333eda4cab540778d97fe64203c91ac Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 14 Jan 2025 12:36:09 -0800 Subject: [PATCH 0323/1708] control/controlclient: remove misleading TS_DEBUG_NETMAP, make it TS_DEBUG_MAP=2 (or more) Updates #cleanup Change-Id: Ic1edaed46b7b451ab58bb2303640225223eba9ce Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 36 ++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index dd361c4a2..c436bc8b1 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -650,7 +650,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new c.logf("RegisterReq sign error: %v", err) } } - if debugRegister() { + if DevKnob.DumpRegister() { j, _ := json.MarshalIndent(request, "", "\t") c.logf("RegisterRequest: %s", j) } @@ -691,7 +691,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new c.logf("error decoding RegisterResponse with server key %s and machine key %s: %v", serverKey, machinePrivKey.Public(), err) return regen, opt.URL, nil, fmt.Errorf("register request: %v", err) } - if debugRegister() { + if DevKnob.DumpRegister() { j, _ := json.MarshalIndent(resp, "", "\t") c.logf("RegisterResponse: %s", j) } @@ -877,7 +877,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.logf("[v1] PollNetMap: stream=%v ep=%v", isStreaming, epStrs) vlogf := logger.Discard - if DevKnob.DumpNetMaps() { + if DevKnob.DumpNetMapsVerbose() { // TODO(bradfitz): update this to use "[v2]" prefix perhaps? but we don't // want to upload it always. vlogf = c.logf @@ -1170,11 +1170,6 @@ func decode(res *http.Response, v any) error { return json.Unmarshal(msg, v) } -var ( - debugMap = envknob.RegisterBool("TS_DEBUG_MAP") - debugRegister = envknob.RegisterBool("TS_DEBUG_REGISTER") -) - var jsonEscapedZero = []byte(`\u0000`) // decodeMsg is responsible for uncompressing msg and unmarshaling into v. @@ -1183,7 +1178,7 @@ func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { if err != nil { return err } - if debugMap() { + if DevKnob.DumpNetMaps() { var buf bytes.Buffer json.Indent(&buf, b, "", " ") log.Printf("MapResponse: %s", buf.Bytes()) @@ -1205,7 +1200,7 @@ func encode(v any) ([]byte, error) { if err != nil { return nil, err } - if debugMap() { + if DevKnob.DumpNetMaps() { if _, ok := v.(*tailcfg.MapRequest); ok { log.Printf("MapRequest: %s", b) } @@ -1253,18 +1248,23 @@ func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string var DevKnob = initDevKnob() type devKnobs struct { - DumpNetMaps func() bool - ForceProxyDNS func() bool - StripEndpoints func() bool // strip endpoints from control (only use disco messages) - StripCaps func() bool // strip all local node's control-provided capabilities + DumpRegister func() bool + DumpNetMaps func() bool + DumpNetMapsVerbose func() bool + ForceProxyDNS func() bool + StripEndpoints func() bool // strip endpoints from control (only use disco messages) + StripCaps func() bool // strip all local node's control-provided capabilities } func initDevKnob() devKnobs { + nm := envknob.RegisterInt("TS_DEBUG_MAP") return devKnobs{ - DumpNetMaps: envknob.RegisterBool("TS_DEBUG_NETMAP"), - ForceProxyDNS: envknob.RegisterBool("TS_DEBUG_PROXY_DNS"), - StripEndpoints: envknob.RegisterBool("TS_DEBUG_STRIP_ENDPOINTS"), - StripCaps: envknob.RegisterBool("TS_DEBUG_STRIP_CAPS"), + DumpNetMaps: func() bool { return nm() > 0 }, + DumpNetMapsVerbose: func() bool { return nm() > 1 }, + DumpRegister: envknob.RegisterBool("TS_DEBUG_REGISTER"), + ForceProxyDNS: envknob.RegisterBool("TS_DEBUG_PROXY_DNS"), + StripEndpoints: envknob.RegisterBool("TS_DEBUG_STRIP_ENDPOINTS"), + StripCaps: envknob.RegisterBool("TS_DEBUG_STRIP_CAPS"), } } From d818a58a7772698358a078684d57b27098b28dad Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 13 Jan 2025 13:47:56 -0700 Subject: [PATCH 0324/1708] net/dns: ensure the Windows configurator does not touch the hosts file unless the configuration actually changed We build up maps of both the existing MagicDNS configuration in hosts and the desired MagicDNS configuration, compare the two, and only write out a new one if there are changes. The comparison doesn't need to be perfect, as the occasional false-positive is fine, but this should greatly reduce rewrites of the hosts file. I also changed the hosts updating code to remove the CRLF/LF conversion stuff, and use Fprintf instead of Frintln to let us write those inline. Updates #14428 Signed-off-by: Aaron Klotz --- net/dns/manager_windows.go | 64 ++++++++++++++++++++++++++------- net/dns/manager_windows_test.go | 52 +++++++++++++++++++++++++-- 2 files changed, 102 insertions(+), 14 deletions(-) diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 250a25573..effdf23ca 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -8,10 +8,12 @@ import ( "bytes" "errors" "fmt" + "maps" "net/netip" "os" "os/exec" "path/filepath" + "slices" "sort" "strings" "sync" @@ -140,9 +142,8 @@ func (m *windowsManager) setSplitDNS(resolvers []netip.Addr, domains []dnsname.F return m.nrptDB.WriteSplitDNSConfig(servers, domains) } -func setTailscaleHosts(prevHostsFile []byte, hosts []*HostEntry) ([]byte, error) { - b := bytes.ReplaceAll(prevHostsFile, []byte("\r\n"), []byte("\n")) - sc := bufio.NewScanner(bytes.NewReader(b)) +func setTailscaleHosts(logf logger.Logf, prevHostsFile []byte, hosts []*HostEntry) ([]byte, error) { + sc := bufio.NewScanner(bytes.NewReader(prevHostsFile)) const ( header = "# TailscaleHostsSectionStart" footer = "# TailscaleHostsSectionEnd" @@ -151,6 +152,32 @@ func setTailscaleHosts(prevHostsFile []byte, hosts []*HostEntry) ([]byte, error) "# This section contains MagicDNS entries for Tailscale.", "# Do not edit this section manually.", } + + prevEntries := make(map[netip.Addr][]string) + addPrevEntry := func(line string) { + if line == "" || line[0] == '#' { + return + } + + parts := strings.Split(line, " ") + if len(parts) < 1 { + return + } + + addr, err := netip.ParseAddr(parts[0]) + if err != nil { + logf("Parsing address from hosts: %v", err) + return + } + + prevEntries[addr] = parts[1:] + } + + nextEntries := make(map[netip.Addr][]string, len(hosts)) + for _, he := range hosts { + nextEntries[he.Addr] = he.Hosts + } + var out bytes.Buffer var inSection bool for sc.Scan() { @@ -164,26 +191,34 @@ func setTailscaleHosts(prevHostsFile []byte, hosts []*HostEntry) ([]byte, error) continue } if inSection { + addPrevEntry(line) continue } - fmt.Fprintln(&out, line) + fmt.Fprintf(&out, "%s\r\n", line) } if err := sc.Err(); err != nil { return nil, err } + + unchanged := maps.EqualFunc(prevEntries, nextEntries, func(a, b []string) bool { + return slices.Equal(a, b) + }) + if unchanged { + return nil, nil + } + if len(hosts) > 0 { - fmt.Fprintln(&out, header) + fmt.Fprintf(&out, "%s\r\n", header) for _, c := range comments { - fmt.Fprintln(&out, c) + fmt.Fprintf(&out, "%s\r\n", c) } - fmt.Fprintln(&out) + fmt.Fprintf(&out, "\r\n") for _, he := range hosts { - fmt.Fprintf(&out, "%s %s\n", he.Addr, strings.Join(he.Hosts, " ")) + fmt.Fprintf(&out, "%s %s\r\n", he.Addr, strings.Join(he.Hosts, " ")) } - fmt.Fprintln(&out) - fmt.Fprintln(&out, footer) + fmt.Fprintf(&out, "\r\n%s\r\n", footer) } - return bytes.ReplaceAll(out.Bytes(), []byte("\n"), []byte("\r\n")), nil + return out.Bytes(), nil } // setHosts sets the hosts file to contain the given host entries. @@ -197,10 +232,15 @@ func (m *windowsManager) setHosts(hosts []*HostEntry) error { if err != nil { return err } - outB, err := setTailscaleHosts(b, hosts) + outB, err := setTailscaleHosts(m.logf, b, hosts) if err != nil { return err } + if outB == nil { + // No change to hosts file, therefore no write necessary. + return nil + } + const fileMode = 0 // ignored on windows. // This can fail spuriously with an access denied error, so retry it a diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index 62c4dd9fb..edcf24ec0 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -15,6 +15,7 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" + "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" @@ -24,9 +25,56 @@ const testGPRuleID = "{7B1B6151-84E6-41A3-8967-62F7F7B45687}" func TestHostFileNewLines(t *testing.T) { in := []byte("#foo\r\n#bar\n#baz\n") - want := []byte("#foo\r\n#bar\r\n#baz\r\n") + want := []byte("#foo\r\n#bar\r\n#baz\r\n# TailscaleHostsSectionStart\r\n# This section contains MagicDNS entries for Tailscale.\r\n# Do not edit this section manually.\r\n\r\n192.168.1.1 aaron\r\n\r\n# TailscaleHostsSectionEnd\r\n") - got, err := setTailscaleHosts(in, nil) + he := []*HostEntry{ + &HostEntry{ + Addr: netip.MustParseAddr("192.168.1.1"), + Hosts: []string{"aaron"}, + }, + } + got, err := setTailscaleHosts(logger.Discard, in, he) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, want) { + t.Errorf("got %q, want %q\n", got, want) + } +} + +func TestHostFileUnchanged(t *testing.T) { + in := []byte("#foo\r\n#bar\r\n#baz\r\n# TailscaleHostsSectionStart\r\n# This section contains MagicDNS entries for Tailscale.\r\n# Do not edit this section manually.\r\n\r\n192.168.1.1 aaron\r\n\r\n# TailscaleHostsSectionEnd\r\n") + + he := []*HostEntry{ + &HostEntry{ + Addr: netip.MustParseAddr("192.168.1.1"), + Hosts: []string{"aaron"}, + }, + } + got, err := setTailscaleHosts(logger.Discard, in, he) + if err != nil { + t.Fatal(err) + } + if got != nil { + t.Errorf("got %q, want nil\n", got) + } +} + +func TestHostFileChanged(t *testing.T) { + in := []byte("#foo\r\n#bar\r\n#baz\r\n# TailscaleHostsSectionStart\r\n# This section contains MagicDNS entries for Tailscale.\r\n# Do not edit this section manually.\r\n\r\n192.168.1.1 aaron1\r\n\r\n# TailscaleHostsSectionEnd\r\n") + want := []byte("#foo\r\n#bar\r\n#baz\r\n# TailscaleHostsSectionStart\r\n# This section contains MagicDNS entries for Tailscale.\r\n# Do not edit this section manually.\r\n\r\n192.168.1.1 aaron1\r\n192.168.1.2 aaron2\r\n\r\n# TailscaleHostsSectionEnd\r\n") + + he := []*HostEntry{ + &HostEntry{ + Addr: netip.MustParseAddr("192.168.1.1"), + Hosts: []string{"aaron1"}, + }, + &HostEntry{ + Addr: netip.MustParseAddr("192.168.1.2"), + Hosts: []string{"aaron2"}, + }, + } + got, err := setTailscaleHosts(logger.Discard, in, he) if err != nil { t.Fatal(err) } From d0ba91bdb22e2cfe8b0f35a37cc0b540842f0011 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Sat, 11 Jan 2025 17:58:27 -0600 Subject: [PATCH 0325/1708] ipn/ipnserver: use ipnauth.Actor instead of *ipnserver.actor whenever possible In preparation for adding test coverage for ipn/ipnserver.Server, we update it to use ipnauth.Actor instead of its concrete implementation where possible. Updates tailscale/corp#25804 Signed-off-by: Nick Khyl --- ipn/ipnserver/actor.go | 8 ++++---- ipn/ipnserver/server.go | 24 ++++++++++++++++-------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 0e716009c..2df8986c3 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -112,11 +112,11 @@ func (a *actor) Username() (string, error) { } type actorOrError struct { - actor *actor + actor ipnauth.Actor err error } -func (a actorOrError) unwrap() (*actor, error) { +func (a actorOrError) unwrap() (ipnauth.Actor, error) { return a.actor, a.err } @@ -131,9 +131,9 @@ func contextWithActor(ctx context.Context, logf logger.Logf, c net.Conn) context return actorKey.WithValue(ctx, actorOrError{actor: actor, err: err}) } -// actorFromContext returns an [actor] associated with ctx, +// actorFromContext returns an [ipnauth.Actor] associated with ctx, // or an error if the context does not carry an actor's identity. -func actorFromContext(ctx context.Context) (*actor, error) { +func actorFromContext(ctx context.Context) (ipnauth.Actor, error) { return actorKey.Value(ctx).unwrap() } diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 73b5e82ab..574d1a55c 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -22,6 +22,7 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" "tailscale.com/net/netmon" @@ -30,6 +31,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/systemd" + "tailscale.com/util/testenv" ) // Server is an IPN backend and its set of 0 or more active localhost @@ -50,7 +52,7 @@ type Server struct { // lock order: mu, then LocalBackend.mu mu sync.Mutex lastUserID ipn.WindowsUserID // tracks last userid; on change, Reset state for paranoia - activeReqs map[*http.Request]*actor + activeReqs map[*http.Request]ipnauth.Actor backendWaiter waiterSet // of LocalBackend waiters zeroReqWaiter waiterSet // of blockUntilZeroConnections waiters } @@ -195,8 +197,12 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.URL.Path, "/localapi/") { lah := localapi.NewHandler(lb, s.logf, s.backendLogID) - lah.PermitRead, lah.PermitWrite = ci.Permissions(lb.OperatorUserID()) - lah.PermitCert = ci.CanFetchCerts() + if actor, ok := ci.(*actor); ok { + lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) + lah.PermitCert = actor.CanFetchCerts() + } else if testenv.InTest() { + lah.PermitRead, lah.PermitWrite = true, true + } lah.Actor = ci lah.ServeHTTP(w, r) return @@ -230,11 +236,11 @@ func (e inUseOtherUserError) Unwrap() error { return e.error } // The returned error, when non-nil, will be of type inUseOtherUserError. // // s.mu must be held. -func (s *Server) checkConnIdentityLocked(ci *actor) error { +func (s *Server) checkConnIdentityLocked(ci ipnauth.Actor) error { // If clients are already connected, verify they're the same user. // This mostly matters on Windows at the moment. if len(s.activeReqs) > 0 { - var active *actor + var active ipnauth.Actor for _, active = range s.activeReqs { break } @@ -251,7 +257,9 @@ func (s *Server) checkConnIdentityLocked(ci *actor) error { if username, err := active.Username(); err == nil { fmt.Fprintf(&b, " by %s", username) } - fmt.Fprintf(&b, ", pid %d", active.pid()) + if active, ok := active.(*actor); ok { + fmt.Fprintf(&b, ", pid %d", active.pid()) + } return inUseOtherUserError{errors.New(b.String())} } } @@ -267,7 +275,7 @@ func (s *Server) checkConnIdentityLocked(ci *actor) error { // // This is primarily used for the Windows GUI, to block until one user's done // controlling the tailscaled process. -func (s *Server) blockWhileIdentityInUse(ctx context.Context, actor *actor) error { +func (s *Server) blockWhileIdentityInUse(ctx context.Context, actor ipnauth.Actor) error { inUse := func() bool { s.mu.Lock() defer s.mu.Unlock() @@ -361,7 +369,7 @@ func (a *actor) CanFetchCerts() bool { // The returned error may be of type [inUseOtherUserError]. // // onDone must be called when the HTTP request is done. -func (s *Server) addActiveHTTPRequest(req *http.Request, actor *actor) (onDone func(), err error) { +func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (onDone func(), err error) { if actor == nil { return nil, errors.New("internal error: nil actor") } From c3c4c964898716511142e448f6e1006daace28f8 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 13 Jan 2025 17:37:29 -0600 Subject: [PATCH 0326/1708] ipn/{ipnauth,ipnlocal,ipnserver}, client/tailscale: make ipnserver.Server testable We update client/tailscale.LocalClient to allow specifying an optional Transport (http.RoundTripper) for LocalAPI HTTP requests, and implement one that injects an ipnauth.TestActor via request headers. We also add several functions and types to make testing an ipn/ipnserver.Server possible (or at least easier). We then use these updates to write basic tests for ipnserver.Server, ensuring it works on non-Windows platforms and correctly sets and unsets the LocalBackend's current user when a Windows user connects and disconnects. We intentionally omit tests for switching between different OS users and will add them in follow-up commits. Updates tailscale/corp#25804 Signed-off-by: Nick Khyl --- client/tailscale/localclient.go | 12 +- ipn/ipnauth/actor.go | 13 ++ ipn/ipnlocal/local.go | 9 + ipn/ipnserver/server_test.go | 358 ++++++++++++++++++++++++++++++++ 4 files changed, 389 insertions(+), 3 deletions(-) diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 4e452f894..baa211d1f 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -62,6 +62,12 @@ type LocalClient struct { // machine's tailscaled or equivalent. If nil, a default is used. Dial func(ctx context.Context, network, addr string) (net.Conn, error) + // Transport optionally specified an alternate [http.RoundTripper] + // used to execute HTTP requests. If nil, a default [http.Transport] is used, + // potentially with custom dialing logic from [Dial]. + // It is primarily used for testing. + Transport http.RoundTripper + // Socket specifies an alternate path to the local Tailscale socket. // If empty, a platform-specific default is used. Socket string @@ -129,9 +135,9 @@ func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error) req.Header.Set("Tailscale-Cap", strconv.Itoa(int(tailcfg.CurrentCapabilityVersion))) lc.tsClientOnce.Do(func() { lc.tsClient = &http.Client{ - Transport: &http.Transport{ - DialContext: lc.dialer(), - }, + Transport: cmp.Or(lc.Transport, http.RoundTripper( + &http.Transport{DialContext: lc.dialer()}), + ), } }) if !lc.OmitAuth { diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 107017268..040d9b522 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -4,6 +4,7 @@ package ipnauth import ( + "encoding/json" "fmt" "tailscale.com/ipn" @@ -76,3 +77,15 @@ func (id ClientID) String() string { } return fmt.Sprint(id.v) } + +// MarshalJSON implements [json.Marshaler]. +// It is primarily used for testing. +func (id ClientID) MarshalJSON() ([]byte, error) { + return json.Marshal(id.v) +} + +// UnmarshalJSON implements [json.Unmarshaler]. +// It is primarily used for testing. +func (id *ClientID) UnmarshalJSON(b []byte) error { + return json.Unmarshal(b, &id.v) +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 81a62045a..576f01b6b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3659,6 +3659,15 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) (ipn.WindowsUserID, e return uid, nil } +// CurrentUserForTest returns the current user and the associated WindowsUserID. +// It is used for testing only, and will be removed along with the rest of the +// "current user" functionality as we progress on the multi-user improvements (tailscale/corp#18342). +func (b *LocalBackend) CurrentUserForTest() (ipn.WindowsUserID, ipnauth.Actor) { + b.mu.Lock() + defer b.mu.Unlock() + return b.pm.CurrentUserID(), b.currentUser +} + func (b *LocalBackend) CheckPrefs(p *ipn.Prefs) error { b.mu.Lock() defer b.mu.Unlock() diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index b7d5ea144..8a9324fab 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -5,8 +5,32 @@ package ipnserver import ( "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "runtime" "sync" + "sync/atomic" "testing" + + "tailscale.com/client/tailscale" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/control/controlclient" + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/store/mem" + "tailscale.com/tsd" + "tailscale.com/tstest" + "tailscale.com/types/logger" + "tailscale.com/types/logid" + "tailscale.com/types/ptr" + "tailscale.com/util/mak" + "tailscale.com/wgengine" ) func TestWaiterSet(t *testing.T) { @@ -44,3 +68,337 @@ func TestWaiterSet(t *testing.T) { cleanup() wantLen(0, "at end") } + +func TestUserConnectDisconnectNonWindows(t *testing.T) { + enableLogging := false + if runtime.GOOS == "windows" { + setGOOSForTest(t, "linux") + } + + ctx := context.Background() + server := startDefaultTestIPNServer(t, ctx, enableLogging) + + // UserA connects and starts watching the IPN bus. + clientA := server.getClientAs("UserA") + watcherA, _ := clientA.WatchIPNBus(ctx, 0) + + // The concept of "current user" is only relevant on Windows + // and it should not be set on non-Windows platforms. + server.checkCurrentUser(nil) + + // Additionally, a different user should be able to connect and use the LocalAPI. + clientB := server.getClientAs("UserB") + if _, gotErr := clientB.Status(ctx); gotErr != nil { + t.Fatalf("Status(%q): want nil; got %v", clientB.User.Name, gotErr) + } + + // Watching the IPN bus should also work for UserB. + watcherB, _ := clientB.WatchIPNBus(ctx, 0) + + // And if we send a notification, both users should receive it. + wantErrMessage := "test error" + testNotify := ipn.Notify{ErrMessage: ptr.To(wantErrMessage)} + server.mustBackend().DebugNotify(testNotify) + + if n, err := watcherA.Next(); err != nil { + t.Fatalf("IPNBusWatcher.Next(%q): %v", clientA.User.Name, err) + } else if gotErrMessage := n.ErrMessage; gotErrMessage == nil || *gotErrMessage != wantErrMessage { + t.Fatalf("IPNBusWatcher.Next(%q): want %v; got %v", clientA.User.Name, wantErrMessage, gotErrMessage) + } + + if n, err := watcherB.Next(); err != nil { + t.Fatalf("IPNBusWatcher.Next(%q): %v", clientB.User.Name, err) + } else if gotErrMessage := n.ErrMessage; gotErrMessage == nil || *gotErrMessage != wantErrMessage { + t.Fatalf("IPNBusWatcher.Next(%q): want %v; got %v", clientB.User.Name, wantErrMessage, gotErrMessage) + } +} + +func TestUserConnectDisconnectOnWindows(t *testing.T) { + enableLogging := false + setGOOSForTest(t, "windows") + + ctx := context.Background() + server := startDefaultTestIPNServer(t, ctx, enableLogging) + + client := server.getClientAs("User") + _, cancelWatcher := client.WatchIPNBus(ctx, 0) + + // On Windows, however, the current user should be set to the user that connected. + server.checkCurrentUser(client.User) + + // Cancel the IPN bus watcher request and wait for the server to unblock. + cancelWatcher() + server.blockWhileInUse(ctx) + + // The current user should not be set after a disconnect, as no one is + // currently using the server. + server.checkCurrentUser(nil) +} + +func TestIPNAlreadyInUseOnWindows(t *testing.T) { + enableLogging := false + setGOOSForTest(t, "windows") + + ctx := context.Background() + server := startDefaultTestIPNServer(t, ctx, enableLogging) + + // UserA connects and starts watching the IPN bus. + clientA := server.getClientAs("UserA") + clientA.WatchIPNBus(ctx, 0) + + // While UserA is connected, UserB should not be able to connect. + clientB := server.getClientAs("UserB") + if _, gotErr := clientB.Status(ctx); gotErr == nil { + t.Fatalf("Status(%q): want error; got nil", clientB.User.Name) + } else if wantError := "401 Unauthorized: Tailscale already in use by UserA"; gotErr.Error() != wantError { + t.Fatalf("Status(%q): want %q; got %q", clientB.User.Name, wantError, gotErr.Error()) + } + + // Current user should still be UserA. + server.checkCurrentUser(clientA.User) +} + +func setGOOSForTest(tb testing.TB, goos string) { + tb.Helper() + envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) + tb.Cleanup(func() { envknob.Setenv("TS_DEBUG_FAKE_GOOS", "") }) +} + +func testLogger(tb testing.TB, enableLogging bool) logger.Logf { + tb.Helper() + if enableLogging { + return tstest.WhileTestRunningLogger(tb) + } + return logger.Discard +} + +// newTestIPNServer creates a new IPN server for testing, using the specified local backend. +func newTestIPNServer(tb testing.TB, lb *ipnlocal.LocalBackend, enableLogging bool) *Server { + tb.Helper() + server := New(testLogger(tb, enableLogging), logid.PublicID{}, lb.NetMon()) + server.lb.Store(lb) + return server +} + +type testIPNClient struct { + tb testing.TB + *tailscale.LocalClient + User *ipnauth.TestActor +} + +func (c *testIPNClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, context.CancelFunc) { + c.tb.Helper() + ctx, cancelWatcher := context.WithCancel(ctx) + c.tb.Cleanup(cancelWatcher) + watcher, err := c.LocalClient.WatchIPNBus(ctx, mask) + if err != nil { + c.tb.Fatalf("WatchIPNBus(%q): %v", c.User.Name, err) + } + c.tb.Cleanup(func() { watcher.Close() }) + return watcher, cancelWatcher +} + +func pumpIPNBus(watcher *tailscale.IPNBusWatcher) { + for { + _, err := watcher.Next() + if err != nil { + break + } + } +} + +type testIPNServer struct { + tb testing.TB + *Server + clientID atomic.Int64 + getClient func(*ipnauth.TestActor) *tailscale.LocalClient + + actorsMu sync.Mutex + actors map[string]*ipnauth.TestActor +} + +func (s *testIPNServer) getClientAs(name string) *testIPNClient { + clientID := fmt.Sprintf("Client-%d", 1+s.clientID.Add(1)) + user := s.makeTestUser(name, clientID) + return &testIPNClient{ + tb: s.tb, + LocalClient: s.getClient(user), + User: user, + } +} + +func (s *testIPNServer) makeTestUser(name string, clientID string) *ipnauth.TestActor { + s.actorsMu.Lock() + defer s.actorsMu.Unlock() + actor := s.actors[name] + if actor == nil { + actor = &ipnauth.TestActor{Name: name} + if envknob.GOOS() == "windows" { + // Historically, as of 2025-01-13, IPN does not distinguish between + // different users on non-Windows devices. Therefore, the UID, which is + // an [ipn.WindowsUserID], should only be populated when the actual or + // fake GOOS is Windows. + actor.UID = ipn.WindowsUserID(fmt.Sprintf("S-1-5-21-1-0-0-%d", 1001+len(s.actors))) + } + mak.Set(&s.actors, name, actor) + s.tb.Cleanup(func() { delete(s.actors, name) }) + } + actor = ptr.To(*actor) + actor.CID = ipnauth.ClientIDFrom(clientID) + return actor +} + +func (s *testIPNServer) blockWhileInUse(ctx context.Context) error { + ready, cleanup := s.zeroReqWaiter.add(&s.mu, ctx) + <-ready + cleanup() + return ctx.Err() +} + +func (s *testIPNServer) checkCurrentUser(want *ipnauth.TestActor) { + s.tb.Helper() + var wantUID ipn.WindowsUserID + if want != nil { + wantUID = want.UID + } + gotUID, gotActor := s.mustBackend().CurrentUserForTest() + if gotUID != wantUID { + s.tb.Errorf("CurrentUser: got UID %q; want %q", gotUID, wantUID) + } + if gotActor, ok := gotActor.(*ipnauth.TestActor); ok != (want != nil) || (want != nil && *gotActor != *want) { + s.tb.Errorf("CurrentUser: got %v; want %v", gotActor, want) + } +} + +// startTestIPNServer starts a [httptest.Server] that hosts the specified IPN server for the +// duration of the test, using the specified base context for incoming requests. +// It returns a function that creates a [tailscale.LocalClient] as a given [ipnauth.TestActor]. +func startTestIPNServer(tb testing.TB, baseContext context.Context, server *Server) *testIPNServer { + tb.Helper() + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + actor, err := extractActorFromHeader(r.Header) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + tb.Errorf("extractActorFromHeader: %v", err) + return + } + ctx := newTestContextWithActor(r.Context(), actor) + server.serveHTTP(w, r.Clone(ctx)) + })) + ts.Config.Addr = "http://" + apitype.LocalAPIHost + ts.Config.BaseContext = func(_ net.Listener) context.Context { return baseContext } + ts.Config.ErrorLog = logger.StdLogger(logger.WithPrefix(server.logf, "ipnserver: ")) + ts.Start() + tb.Cleanup(ts.Close) + return &testIPNServer{ + tb: tb, + Server: server, + getClient: func(actor *ipnauth.TestActor) *tailscale.LocalClient { + return &tailscale.LocalClient{Transport: newTestRoundTripper(ts, actor)} + }, + } +} + +func startDefaultTestIPNServer(tb testing.TB, ctx context.Context, enableLogging bool) *testIPNServer { + tb.Helper() + lb := newLocalBackendWithTestControl(tb, newUnreachableControlClient, enableLogging) + ctx, stopServer := context.WithCancel(ctx) + tb.Cleanup(stopServer) + return startTestIPNServer(tb, ctx, newTestIPNServer(tb, lb, enableLogging)) +} + +type testRoundTripper struct { + transport http.RoundTripper + actor *ipnauth.TestActor +} + +// newTestRoundTripper creates a new [http.RoundTripper] that sends requests +// to the specified test server as the specified actor. +func newTestRoundTripper(ts *httptest.Server, actor *ipnauth.TestActor) *testRoundTripper { + return &testRoundTripper{ + transport: &http.Transport{DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + var std net.Dialer + return std.DialContext(ctx, network, ts.Listener.Addr().(*net.TCPAddr).String()) + }}, + actor: actor, + } +} + +const testActorHeaderName = "TS-Test-Actor" + +// RoundTrip implements [http.RoundTripper] by forwarding the request to the underlying transport +// and including the test actor's identity in the request headers. +func (rt *testRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + actorJSON, err := json.Marshal(&rt.actor) + if err != nil { + // An [http.RoundTripper] must always close the request body, including on error. + if r.Body != nil { + r.Body.Close() + } + return nil, err + } + + r = r.Clone(r.Context()) + r.Header.Set(testActorHeaderName, string(actorJSON)) + return rt.transport.RoundTrip(r) +} + +// extractActorFromHeader extracts a test actor from the specified request headers. +func extractActorFromHeader(h http.Header) (*ipnauth.TestActor, error) { + actorJSON := h.Get(testActorHeaderName) + if actorJSON == "" { + return nil, errors.New("missing Test-Actor header") + } + actor := &ipnauth.TestActor{} + if err := json.Unmarshal([]byte(actorJSON), &actor); err != nil { + return nil, fmt.Errorf("invalid Test-Actor header: %v", err) + } + return actor, nil +} + +type newControlClientFn func(tb testing.TB, opts controlclient.Options) controlclient.Client + +func newLocalBackendWithTestControl(tb testing.TB, newControl newControlClientFn, enableLogging bool) *ipnlocal.LocalBackend { + tb.Helper() + + sys := &tsd.System{} + store := &mem.Store{} + sys.Set(store) + + logf := testLogger(tb, enableLogging) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + if err != nil { + tb.Fatalf("NewFakeUserspaceEngine: %v", err) + } + tb.Cleanup(e.Close) + sys.Set(e) + + b, err := ipnlocal.NewLocalBackend(logf, logid.PublicID{}, sys, 0) + if err != nil { + tb.Fatalf("NewLocalBackend: %v", err) + } + tb.Cleanup(b.Shutdown) + b.DisablePortMapperForTest() + + b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { + return newControl(tb, opts), nil + }) + return b +} + +func newUnreachableControlClient(tb testing.TB, opts controlclient.Options) controlclient.Client { + tb.Helper() + opts.ServerURL = "https://127.0.0.1:1" + cc, err := controlclient.New(opts) + if err != nil { + tb.Fatal(err) + } + return cc +} + +// newTestContextWithActor returns a new context that carries the identity +// of the specified actor and can be used for testing. +// It can be retrieved with [actorFromContext]. +func newTestContextWithActor(ctx context.Context, actor ipnauth.Actor) context.Context { + return actorKey.WithValue(ctx, actorOrError{actor: actor}) +} From f33f5f99c00f187644bf807b5336e2008e0228b7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 13 Jan 2025 17:42:12 -0600 Subject: [PATCH 0327/1708] ipn/{ipnlocal,ipnserver}: remove redundant (*LocalBackend).ResetForClientDisconnect In this commit, we add a failing test to verify that ipn/ipnserver.Server correctly sets and unsets the current user when two different users connect sequentially (A connects, A disconnects, B connects, B disconnects). We then fix the test by updating (*ipn/ipnserver.Server).addActiveHTTPRequest to avoid calling (*LocalBackend).ResetForClientDisconnect again after a new user has connected and been set as the current user with (*LocalBackend).SetCurrentUser(). Since ipn/ipnserver.Server does not allow simultaneous connections from different Windows users and relies on the LocalBackend's current user, and since we already reset the LocalBackend's state by calling ResetForClientDisconnect when the last active request completes (indicating the server is idle and can accept connections from any Windows user), it is unnecessary to track the last connected user on the ipnserver.Server side or call ResetForClientDisconnect again when the user changes. Additionally, the second call to ResetForClientDisconnect occurs after the new user has been set as the current user, resetting the correct state for the new user instead of the old state of the now-disconnected user, causing issues. Updates tailscale/corp#25804 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 26 ++++++++++++-------------- ipn/ipnserver/server.go | 23 +---------------------- ipn/ipnserver/server_test.go | 29 +++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 576f01b6b..c506f1376 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3620,7 +3620,7 @@ func (b *LocalBackend) shouldUploadServices() bool { } // SetCurrentUser is used to implement support for multi-user systems (only -// Windows 2022-11-25). On such systems, the uid is used to determine which +// Windows 2022-11-25). On such systems, the actor is used to determine which // user's state should be used. The current user is maintained by active // connections open to the backend. // @@ -3634,11 +3634,8 @@ func (b *LocalBackend) shouldUploadServices() bool { // unattended mode. The user must disable unattended mode before the user can be // changed. // -// On non-multi-user systems, the user should be set to nil. -// -// SetCurrentUser returns the ipn.WindowsUserID associated with the user -// when successful. -func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) (ipn.WindowsUserID, error) { +// On non-multi-user systems, the actor should be set to nil. +func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { var uid ipn.WindowsUserID if actor != nil { uid = actor.UserID() @@ -3647,16 +3644,17 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) (ipn.WindowsUserID, e unlock := b.lockAndGetUnlock() defer unlock() - if b.pm.CurrentUserID() == uid { - return uid, nil + if actor != b.currentUser { + if c, ok := b.currentUser.(ipnauth.ActorCloser); ok { + c.Close() + } + b.currentUser = actor } - b.pm.SetCurrentUserID(uid) - if c, ok := b.currentUser.(ipnauth.ActorCloser); ok { - c.Close() + + if b.pm.CurrentUserID() != uid { + b.pm.SetCurrentUserID(uid) + b.resetForProfileChangeLockedOnEntry(unlock) } - b.currentUser = actor - b.resetForProfileChangeLockedOnEntry(unlock) - return uid, nil } // CurrentUserForTest returns the current user and the associated WindowsUserID. diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 574d1a55c..c0e99f5d8 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -21,7 +21,6 @@ import ( "unicode" "tailscale.com/envknob" - "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" @@ -51,7 +50,6 @@ type Server struct { // mu guards the fields that follow. // lock order: mu, then LocalBackend.mu mu sync.Mutex - lastUserID ipn.WindowsUserID // tracks last userid; on change, Reset state for paranoia activeReqs map[*http.Request]ipnauth.Actor backendWaiter waiterSet // of LocalBackend waiters zeroReqWaiter waiterSet // of blockUntilZeroConnections waiters @@ -376,16 +374,6 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o lb := s.mustBackend() - // If the connected user changes, reset the backend server state to make - // sure node keys don't leak between users. - var doReset bool - defer func() { - if doReset { - s.logf("identity changed; resetting server") - lb.ResetForClientDisconnect() - } - }() - s.mu.Lock() defer s.mu.Unlock() @@ -400,16 +388,7 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o // Tell the LocalBackend about the identity we're now running as, // unless its the SYSTEM user. That user is not a real account and // doesn't have a home directory. - uid, err := lb.SetCurrentUser(actor) - if err != nil { - return nil, err - } - if s.lastUserID != uid { - if s.lastUserID != "" { - doReset = true - } - s.lastUserID = uid - } + lb.SetCurrentUser(actor) } } diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 8a9324fab..7f6131328 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -158,6 +158,35 @@ func TestIPNAlreadyInUseOnWindows(t *testing.T) { server.checkCurrentUser(clientA.User) } +func TestSequentialOSUserSwitchingOnWindows(t *testing.T) { + enableLogging := false + setGOOSForTest(t, "windows") + + ctx := context.Background() + server := startDefaultTestIPNServer(t, ctx, enableLogging) + + connectDisconnectAsUser := func(name string) { + // User connects and starts watching the IPN bus. + client := server.getClientAs(name) + watcher, cancelWatcher := client.WatchIPNBus(ctx, 0) + defer cancelWatcher() + go pumpIPNBus(watcher) + + // It should be the current user from the LocalBackend's perspective... + server.checkCurrentUser(client.User) + // until it disconnects. + cancelWatcher() + server.blockWhileInUse(ctx) + // Now, the current user should be unset. + server.checkCurrentUser(nil) + } + + // UserA logs in, uses Tailscale for a bit, then logs out. + connectDisconnectAsUser("UserA") + // Same for UserB. + connectDisconnectAsUser("UserB") +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) From 6fac2903e1146164b2a587fd36d53a083ac7e98c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 13 Jan 2025 18:20:09 -0600 Subject: [PATCH 0328/1708] ipn/ipnserver: fix race condition where LocalBackend is reset after a different user connects In this commit, we add a failing test to verify that ipn/ipnserver.Server correctly sets and unsets the current user when two different clients send requests concurrently (A sends request, B sends request, A's request completes, B's request completes). The expectation is that the user who wins the race becomes the current user from the LocalBackend's perspective, remaining in this state until they disconnect, after which a different user should be able to connect and use the LocalBackend. We then fix the second of two bugs in (*Server).addActiveHTTPRequest, where a race condition causes the LocalBackend's state to be reset after a new client connects, instead of after the last active request of the previous client completes and the server becomes idle. Fixes tailscale/corp#25804 Signed-off-by: Nick Khyl --- ipn/ipnserver/server.go | 15 ++++---- ipn/ipnserver/server_test.go | 67 ++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 8 deletions(-) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index c0e99f5d8..a69e43067 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -394,11 +394,14 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o onDone = func() { s.mu.Lock() + defer s.mu.Unlock() delete(s.activeReqs, req) - remain := len(s.activeReqs) - s.mu.Unlock() + if len(s.activeReqs) != 0 { + // The server is not idle yet. + return + } - if remain == 0 && s.resetOnZero { + if s.resetOnZero { if lb.InServerMode() { s.logf("client disconnected; staying alive in server mode") } else { @@ -408,11 +411,7 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o } // Wake up callers waiting for the server to be idle: - if remain == 0 { - s.mu.Lock() - s.zeroReqWaiter.wakeAll() - s.mu.Unlock() - } + s.zeroReqWaiter.wakeAll() } return onDone, nil diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 7f6131328..97a616db8 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -12,6 +12,7 @@ import ( "net/http" "net/http/httptest" "runtime" + "strconv" "sync" "sync/atomic" "testing" @@ -187,6 +188,72 @@ func TestSequentialOSUserSwitchingOnWindows(t *testing.T) { connectDisconnectAsUser("UserB") } +func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { + enableLogging := false + setGOOSForTest(t, "windows") + + ctx := context.Background() + server := startDefaultTestIPNServer(t, ctx, enableLogging) + + connectDisconnectAsUser := func(name string) { + // User connects and starts watching the IPN bus. + client := server.getClientAs(name) + watcher, cancelWatcher := client.WatchIPNBus(ctx, ipn.NotifyInitialState) + defer cancelWatcher() + + runtime.Gosched() + + // Get the current user from the LocalBackend's perspective + // as soon as we're connected. + gotUID, gotActor := server.mustBackend().CurrentUserForTest() + + // Wait for the first notification to arrive. + // It will either be the initial state we've requested via [ipn.NotifyInitialState], + // returned by an actual handler, or a "fake" notification sent by the server + // itself to indicate that it is being used by someone else. + n, err := watcher.Next() + if err != nil { + t.Fatal(err) + } + + // If our user lost the race and the IPN is in use by another user, + // we should just return. For the sake of this test, we're not + // interested in waiting for the server to become idle. + if n.State != nil && *n.State == ipn.InUseOtherUser { + return + } + + // Otherwise, our user should have been the current user since the time we connected. + if gotUID != client.User.UID { + t.Errorf("CurrentUser(Initial): got UID %q; want %q", gotUID, client.User.UID) + return + } + if gotActor, ok := gotActor.(*ipnauth.TestActor); !ok || *gotActor != *client.User { + t.Errorf("CurrentUser(Initial): got %v; want %v", gotActor, client.User) + return + } + + // And should still be the current user (as they're still connected)... + server.checkCurrentUser(client.User) + } + + numIterations := 10 + for range numIterations { + numGoRoutines := 100 + var wg sync.WaitGroup + wg.Add(numGoRoutines) + for i := range numGoRoutines { + // User logs in, uses Tailscale for a bit, then logs out + // in parallel with other users doing the same. + go func() { + defer wg.Done() + connectDisconnectAsUser("User-" + strconv.Itoa(i)) + }() + } + wg.Wait() + } +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) From 2ac189800c4d2eefe182ee55222c61629ccd31da Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 14 Jan 2025 16:50:04 -0600 Subject: [PATCH 0329/1708] client/tailscale: fix typo in comment Updates #cleanup Signed-off-by: Nick Khyl --- client/tailscale/localclient.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index baa211d1f..f440b19a8 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -62,7 +62,7 @@ type LocalClient struct { // machine's tailscaled or equivalent. If nil, a default is used. Dial func(ctx context.Context, network, addr string) (net.Conn, error) - // Transport optionally specified an alternate [http.RoundTripper] + // Transport optionally specifies an alternate [http.RoundTripper] // used to execute HTTP requests. If nil, a default [http.Transport] is used, // potentially with custom dialing logic from [Dial]. // It is primarily used for testing. From 6364b5f1e0f643cabefec3534e3b4e5b5847ae3b Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 10 Jan 2025 14:17:16 -0800 Subject: [PATCH 0330/1708] net/netmon: trim IPv6 endpoints in already routable subnets We have observed some clients with extremely large lists of IPv6 endpoints, in some cases from subnets where the machine also has the zero address for a whole /48 with then arbitrary addresses additionally assigned within that /48. It is in general unnecessary for reachability to report all of these addresses, typically only one will be necessary for reachability. We report two, to cover some other common cases such as some styles of IPv6 private address rotations. Updates tailscale/corp#25850 Signed-off-by: James Tucker --- net/netmon/state.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/net/netmon/state.go b/net/netmon/state.go index d9b360f5e..a612dd06d 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -19,8 +19,14 @@ import ( "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" "tailscale.com/net/tshttpproxy" + "tailscale.com/util/mak" ) +// forceAllIPv6Endpoints is a debug knob that when set forces the client to +// report all IPv6 endpoints rather than trim endpoints that are siblings on the +// same interface and subnet. +var forceAllIPv6Endpoints = envknob.RegisterBool("TS_DEBUG_FORCE_ALL_IPV6_ENDPOINTS") + // LoginEndpointForProxyDetermination is the URL used for testing // which HTTP proxy the system should use. var LoginEndpointForProxyDetermination = "https://controlplane.tailscale.com/" @@ -65,6 +71,7 @@ func LocalAddresses() (regular, loopback []netip.Addr, err error) { if err != nil { return nil, nil, err } + var subnets map[netip.Addr]int for _, a := range addrs { switch v := a.(type) { case *net.IPNet: @@ -102,7 +109,15 @@ func LocalAddresses() (regular, loopback []netip.Addr, err error) { if ip.Is4() { regular4 = append(regular4, ip) } else { - regular6 = append(regular6, ip) + curMask, _ := netip.AddrFromSlice(v.IP.Mask(v.Mask)) + // Limit the number of addresses reported per subnet for + // IPv6, as we have seen some nodes with extremely large + // numbers of assigned addresses being carved out of + // same-subnet allocations. + if forceAllIPv6Endpoints() || subnets[curMask] < 2 { + regular6 = append(regular6, ip) + } + mak.Set(&subnets, curMask, subnets[curMask]+1) } } } From 7ecb69e32e003a15d093333980e1acaf0eacc8e2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 14 Jan 2025 15:10:15 -0800 Subject: [PATCH 0331/1708] tailcfg,control/controlclient: treat nil AllowedIPs as Addresses [capver 112] Updates #14635 Change-Id: I21e2bd1ec4eb384eb7a3fc8379f0788a684893f3 Signed-off-by: Brad Fitzpatrick --- control/controlclient/map.go | 4 ++++ control/controlclient/map_test.go | 32 +++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 18 +++++++++++++---- 3 files changed, 50 insertions(+), 4 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index d5fd84c6d..13b11d6df 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -241,6 +241,10 @@ func upgradeNode(n *tailcfg.Node) { } n.LegacyDERPString = "" } + + if n.AllowedIPs == nil { + n.AllowedIPs = slices.Clone(n.Addresses) + } } func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool { diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 9c8c0c3aa..09441d066 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1007,10 +1007,16 @@ func TestPatchifyPeersChanged(t *testing.T) { } func TestUpgradeNode(t *testing.T) { + a1 := netip.MustParsePrefix("0.0.0.1/32") + a2 := netip.MustParsePrefix("0.0.0.2/32") + a3 := netip.MustParsePrefix("0.0.0.3/32") + a4 := netip.MustParsePrefix("0.0.0.4/32") + tests := []struct { name string in *tailcfg.Node want *tailcfg.Node + also func(t *testing.T, got *tailcfg.Node) // optional }{ { name: "nil", @@ -1037,6 +1043,29 @@ func TestUpgradeNode(t *testing.T) { in: &tailcfg.Node{HomeDERP: 2}, want: &tailcfg.Node{HomeDERP: 2}, }, + { + name: "implicit-allowed-ips-all-set", + in: &tailcfg.Node{Addresses: []netip.Prefix{a1, a2}, AllowedIPs: []netip.Prefix{a3, a4}}, + want: &tailcfg.Node{Addresses: []netip.Prefix{a1, a2}, AllowedIPs: []netip.Prefix{a3, a4}}, + }, + { + name: "implicit-allowed-ips-only-address-set", + in: &tailcfg.Node{Addresses: []netip.Prefix{a1, a2}}, + want: &tailcfg.Node{Addresses: []netip.Prefix{a1, a2}, AllowedIPs: []netip.Prefix{a1, a2}}, + also: func(t *testing.T, got *tailcfg.Node) { + if t.Failed() { + return + } + if &got.Addresses[0] == &got.AllowedIPs[0] { + t.Error("Addresses and AllowIPs alias the same memory") + } + }, + }, + { + name: "implicit-allowed-ips-set-empty-slice", + in: &tailcfg.Node{Addresses: []netip.Prefix{a1, a2}, AllowedIPs: []netip.Prefix{}}, + want: &tailcfg.Node{Addresses: []netip.Prefix{a1, a2}, AllowedIPs: []netip.Prefix{}}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1048,6 +1077,9 @@ func TestUpgradeNode(t *testing.T) { if diff := cmp.Diff(tt.want, got); diff != "" { t.Errorf("wrong result (-want +got):\n%s", diff) } + if tt.also != nil { + tt.also(t, got) + } }) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 76945ec10..9b26e8883 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -154,7 +154,8 @@ type CapabilityVersion int // - 109: 2024-11-18: Client supports filtertype.Match.SrcCaps (issue #12542) // - 110: 2024-12-12: removed never-before-used Tailscale SSH public key support (#14373) // - 111: 2025-01-14: Client supports a peer having Node.HomeDERP (issue #14636) -const CurrentCapabilityVersion CapabilityVersion = 111 +// - 112: 2025-01-14: Client interprets AllowedIPs of nil as meaning same as Addresses +const CurrentCapabilityVersion CapabilityVersion = 112 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -343,9 +344,18 @@ type Node struct { KeySignature tkatype.MarshaledSignature `json:",omitempty"` Machine key.MachinePublic DiscoKey key.DiscoPublic - Addresses []netip.Prefix // IP addresses of this Node directly - AllowedIPs []netip.Prefix // range of IP addresses to route to this node - Endpoints []netip.AddrPort `json:",omitempty"` // IP+port (public via STUN, and local LANs) + + // Addresses are the IP addresses of this Node directly. + Addresses []netip.Prefix + + // AllowedIPs are the IP ranges to route to this node. + // + // As of CapabilityVersion 112, this may be nil (null or undefined) on the wire + // to mean the same as Addresses. Internally, it is always filled in with + // its possibly-implicit value. + AllowedIPs []netip.Prefix + + Endpoints []netip.AddrPort `json:",omitempty"` // IP+port (public via STUN, and local LANs) // LegacyDERPString is this node's home LegacyDERPString region ID integer, but shoved into an // IP:port string for legacy reasons. The IP address is always "127.3.3.40" From db05e83efc4c2e8e12fb8f16665827986839b381 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 7 Jan 2025 05:34:07 -0600 Subject: [PATCH 0332/1708] cmd/derper: support explicit configuration of mesh dial hosts The --mesh-with flag now supports the specification of hostname tuples like derp1a.tailscale.com/derp1a-vpc.tailscale.com, which instructs derp to mesh with host 'derp1a.tailscale.com' but dial TCP connections to 'derp1a-vpc.tailscale.com'. For backwards compatibility, --mesh-with still supports individual hostnames. The logic which attempts to auto-discover '[host]-vpc.tailscale.com' dial hosts has been removed. Updates tailscale/corp#25653 Signed-off-by: Percy Wegmann --- cmd/derper/derper.go | 2 +- cmd/derper/mesh.go | 57 +++++++++++++++++++++----------------------- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 6e24e0ab1..46ff644b2 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -63,7 +63,7 @@ var ( runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It should contain some hex string; whitespace is trimmed.") - meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list") + meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list. If an entry contains a slash, the second part names a hostname to be used when dialing the target.") bootstrapDNS = flag.String("bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns") unpublishedDNS = flag.String("unpublished-bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns and not publish in the list. If an entry contains a slash, the second part names a DNS record to poll for its TXT record with a `0` to `100` value for rollout percentage.") verifyClients = flag.Bool("verify-clients", false, "verify clients to this DERP server through a local tailscaled instance.") diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index c4218dd94..1d8e3ef93 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -10,7 +10,6 @@ import ( "log" "net" "strings" - "time" "tailscale.com/derp" "tailscale.com/derp/derphttp" @@ -25,15 +24,28 @@ func startMesh(s *derp.Server) error { if !s.HasMeshKey() { return errors.New("--mesh-with requires --mesh-psk-file") } - for _, host := range strings.Split(*meshWith, ",") { - if err := startMeshWithHost(s, host); err != nil { + for _, hostTuple := range strings.Split(*meshWith, ",") { + if err := startMeshWithHost(s, hostTuple); err != nil { return err } } return nil } -func startMeshWithHost(s *derp.Server, host string) error { +func startMeshWithHost(s *derp.Server, hostTuple string) error { + var host string + var dialHost string + hostParts := strings.Split(hostTuple, "/") + if len(hostParts) > 2 { + return fmt.Errorf("too many components in host tuple %q", hostTuple) + } + host = hostParts[0] + if len(hostParts) == 2 { + dialHost = hostParts[1] + } else { + dialHost = hostParts[0] + } + logf := logger.WithPrefix(log.Printf, fmt.Sprintf("mesh(%q): ", host)) netMon := netmon.NewStatic() // good enough for cmd/derper; no need for netns fanciness c, err := derphttp.NewClient(s.PrivateKey(), "https://"+host+"/derp", logf, netMon) @@ -43,35 +55,20 @@ func startMeshWithHost(s *derp.Server, host string) error { c.MeshKey = s.MeshKey() c.WatchConnectionChanges = true - // For meshed peers within a region, connect via VPC addresses. - c.SetURLDialer(func(ctx context.Context, network, addr string) (net.Conn, error) { - host, port, err := net.SplitHostPort(addr) - if err != nil { - logf("failed to split %q: %v", addr, err) - return nil, err - } + logf("will dial %q for %q", dialHost, host) + if dialHost != host { var d net.Dialer - var r net.Resolver - if base, ok := strings.CutSuffix(host, ".tailscale.com"); ok && port == "443" { - subCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - vpcHost := base + "-vpc.tailscale.com" - ips, err := r.LookupIP(subCtx, "ip", vpcHost) + c.SetURLDialer(func(ctx context.Context, network, addr string) (net.Conn, error) { + _, port, err := net.SplitHostPort(addr) if err != nil { - logf("failed to resolve %v: %v", vpcHost, err) + logf("failed to split %q: %v", addr, err) + return nil, err } - if len(ips) > 0 { - vpcAddr := net.JoinHostPort(ips[0].String(), port) - c, err := d.DialContext(subCtx, network, vpcAddr) - if err == nil { - logf("connected to %v (%v) instead of %v", vpcHost, ips[0], base) - return c, nil - } - logf("failed to connect to %v (%v): %v; trying non-VPC route", vpcHost, ips[0], err) - } - } - return d.DialContext(ctx, network, addr) - }) + dialAddr := net.JoinHostPort(dialHost, port) + logf("dialing %q instead of %q", dialAddr, addr) + return d.DialContext(ctx, network, dialAddr) + }) + } add := func(m derp.PeerPresentMessage) { s.AddPacketForwarder(m.Key, c) } remove := func(m derp.PeerGoneMessage) { s.RemovePacketForwarder(m.Peer, c) } From beb951c74400f108544f555e17a31a2da662ef48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 10:53:38 -0700 Subject: [PATCH 0333/1708] .github: Bump actions/setup-go from 5.1.0 to 5.2.0 (#14391) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.1.0 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed...3041bf56c941b39c61721a86cd11f3bb1338122a) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ba21e8fe9..6f551f140 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 6630e8de8..9f1f2b9d1 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version-file: go.mod cache: false diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d4c73ab7c..92ef57b50 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -153,7 +153,7 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version-file: go.mod cache: false From 3431ab17202ed84ba5f9048c9f5afb9b7dfc63d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 10:54:34 -0700 Subject: [PATCH 0334/1708] .github: Bump github/codeql-action from 3.27.6 to 3.28.1 (#14618) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.6 to 3.28.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/aa578102511db1f4524ed59b8cc2bae4f6e88195...b6a472f63d85b9c78a3ac5e89422239fc15e9b3c) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 6f551f140..605f0939b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/autobuild@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 From fcf90260cef795fc4f4ce98d425da6c69070eecb Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 13 Jan 2025 13:02:47 -0700 Subject: [PATCH 0335/1708] atomicfile: use ReplaceFile on Windows so that attributes and ACLs are preserved I moved the actual rename into separate, GOOS-specific files. On non-Windows, we do a simple os.Rename. On Windows, we first try ReplaceFile with a fallback to os.Rename if the target file does not exist. ReplaceFile is the recommended way to rename the file in this use case, as it preserves attributes and ACLs set on the target file. Updates #14428 Signed-off-by: Aaron Klotz --- atomicfile/atomicfile.go | 7 +- atomicfile/atomicfile_notwindows.go | 14 +++ atomicfile/atomicfile_windows.go | 33 ++++++ atomicfile/atomicfile_windows_test.go | 146 ++++++++++++++++++++++++++ atomicfile/mksyscall.go | 8 ++ atomicfile/zsyscall_windows.go | 52 +++++++++ cmd/derper/depaware.txt | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- 10 files changed, 261 insertions(+), 7 deletions(-) create mode 100644 atomicfile/atomicfile_notwindows.go create mode 100644 atomicfile/atomicfile_windows.go create mode 100644 atomicfile/atomicfile_windows_test.go create mode 100644 atomicfile/mksyscall.go create mode 100644 atomicfile/zsyscall_windows.go diff --git a/atomicfile/atomicfile.go b/atomicfile/atomicfile.go index 5c18e85a8..b3c8c93da 100644 --- a/atomicfile/atomicfile.go +++ b/atomicfile/atomicfile.go @@ -15,8 +15,9 @@ import ( ) // WriteFile writes data to filename+some suffix, then renames it into filename. -// The perm argument is ignored on Windows. If the target filename already -// exists but is not a regular file, WriteFile returns an error. +// The perm argument is ignored on Windows, but if the target filename already +// exists then the target file's attributes and ACLs are preserved. If the target +// filename already exists but is not a regular file, WriteFile returns an error. func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { fi, err := os.Stat(filename) if err == nil && !fi.Mode().IsRegular() { @@ -47,5 +48,5 @@ func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { if err := f.Close(); err != nil { return err } - return os.Rename(tmpName, filename) + return rename(tmpName, filename) } diff --git a/atomicfile/atomicfile_notwindows.go b/atomicfile/atomicfile_notwindows.go new file mode 100644 index 000000000..1ce2bb8ac --- /dev/null +++ b/atomicfile/atomicfile_notwindows.go @@ -0,0 +1,14 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package atomicfile + +import ( + "os" +) + +func rename(srcFile, destFile string) error { + return os.Rename(srcFile, destFile) +} diff --git a/atomicfile/atomicfile_windows.go b/atomicfile/atomicfile_windows.go new file mode 100644 index 000000000..c67762df2 --- /dev/null +++ b/atomicfile/atomicfile_windows.go @@ -0,0 +1,33 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package atomicfile + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func rename(srcFile, destFile string) error { + // Use replaceFile when possible to preserve the original file's attributes and ACLs. + if err := replaceFile(destFile, srcFile); err == nil || err != windows.ERROR_FILE_NOT_FOUND { + return err + } + // destFile doesn't exist. Just do a normal rename. + return os.Rename(srcFile, destFile) +} + +func replaceFile(destFile, srcFile string) error { + destFile16, err := windows.UTF16PtrFromString(destFile) + if err != nil { + return err + } + + srcFile16, err := windows.UTF16PtrFromString(srcFile) + if err != nil { + return err + } + + return replaceFileW(destFile16, srcFile16, nil, 0, nil, nil) +} diff --git a/atomicfile/atomicfile_windows_test.go b/atomicfile/atomicfile_windows_test.go new file mode 100644 index 000000000..4dec1493e --- /dev/null +++ b/atomicfile/atomicfile_windows_test.go @@ -0,0 +1,146 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package atomicfile + +import ( + "os" + "testing" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _SECURITY_RESOURCE_MANAGER_AUTHORITY = windows.SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 9}} + +// makeRandomSID generates a SID derived from a v4 GUID. +// This is basically the same algorithm used by browser sandboxes for generating +// random SIDs. +func makeRandomSID() (*windows.SID, error) { + guid, err := windows.GenerateGUID() + if err != nil { + return nil, err + } + + rids := *((*[4]uint32)(unsafe.Pointer(&guid))) + + var pSID *windows.SID + if err := windows.AllocateAndInitializeSid(&_SECURITY_RESOURCE_MANAGER_AUTHORITY, 4, rids[0], rids[1], rids[2], rids[3], 0, 0, 0, 0, &pSID); err != nil { + return nil, err + } + defer windows.FreeSid(pSID) + + // Make a copy that lives on the Go heap + return pSID.Copy() +} + +func getExistingFileSD(name string) (*windows.SECURITY_DESCRIPTOR, error) { + const infoFlags = windows.DACL_SECURITY_INFORMATION + return windows.GetNamedSecurityInfo(name, windows.SE_FILE_OBJECT, infoFlags) +} + +func getExistingFileDACL(name string) (*windows.ACL, error) { + sd, err := getExistingFileSD(name) + if err != nil { + return nil, err + } + + dacl, _, err := sd.DACL() + return dacl, err +} + +func addDenyACEForRandomSID(dacl *windows.ACL) (*windows.ACL, error) { + randomSID, err := makeRandomSID() + if err != nil { + return nil, err + } + + randomSIDTrustee := windows.TRUSTEE{nil, windows.NO_MULTIPLE_TRUSTEE, + windows.TRUSTEE_IS_SID, windows.TRUSTEE_IS_UNKNOWN, + windows.TrusteeValueFromSID(randomSID)} + + entries := []windows.EXPLICIT_ACCESS{ + { + windows.GENERIC_ALL, + windows.DENY_ACCESS, + windows.NO_INHERITANCE, + randomSIDTrustee, + }, + } + + return windows.ACLFromEntries(entries, dacl) +} + +func setExistingFileDACL(name string, dacl *windows.ACL) error { + return windows.SetNamedSecurityInfo(name, windows.SE_FILE_OBJECT, + windows.DACL_SECURITY_INFORMATION, nil, nil, dacl, nil) +} + +// makeOrigFileWithCustomDACL creates a new, temporary file with a custom +// DACL that we can check for later. It returns the name of the temporary +// file and the security descriptor for the file in SDDL format. +func makeOrigFileWithCustomDACL() (name, sddl string, err error) { + f, err := os.CreateTemp("", "foo*.tmp") + if err != nil { + return "", "", err + } + name = f.Name() + if err := f.Close(); err != nil { + return "", "", err + } + f = nil + defer func() { + if err != nil { + os.Remove(name) + } + }() + + dacl, err := getExistingFileDACL(name) + if err != nil { + return "", "", err + } + + // Add a harmless, deny-only ACE for a random SID that isn't used for anything + // (but that we can check for later). + dacl, err = addDenyACEForRandomSID(dacl) + if err != nil { + return "", "", err + } + + if err := setExistingFileDACL(name, dacl); err != nil { + return "", "", err + } + + sd, err := getExistingFileSD(name) + if err != nil { + return "", "", err + } + + return name, sd.String(), nil +} + +func TestPreserveSecurityInfo(t *testing.T) { + // Make a test file with a custom ACL. + origFileName, want, err := makeOrigFileWithCustomDACL() + if err != nil { + t.Fatalf("makeOrigFileWithCustomDACL returned %v", err) + } + t.Cleanup(func() { + os.Remove(origFileName) + }) + + if err := WriteFile(origFileName, []byte{}, 0); err != nil { + t.Fatalf("WriteFile returned %v", err) + } + + // We expect origFileName's security descriptor to be unchanged despite + // the WriteFile call. + sd, err := getExistingFileSD(origFileName) + if err != nil { + t.Fatalf("getExistingFileSD(%q) returned %v", origFileName, err) + } + + if got := sd.String(); got != want { + t.Errorf("security descriptor comparison failed: got %q, want %q", got, want) + } +} diff --git a/atomicfile/mksyscall.go b/atomicfile/mksyscall.go new file mode 100644 index 000000000..d8951a77c --- /dev/null +++ b/atomicfile/mksyscall.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package atomicfile + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go + +//sys replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) [int32(failretval)==0] = kernel32.ReplaceFileW diff --git a/atomicfile/zsyscall_windows.go b/atomicfile/zsyscall_windows.go new file mode 100644 index 000000000..f2f0b6d08 --- /dev/null +++ b/atomicfile/zsyscall_windows.go @@ -0,0 +1,52 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package atomicfile + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procReplaceFileW = modkernel32.NewProc("ReplaceFileW") +) + +func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall6(procReplaceFileW.Addr(), 6, uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) + if int32(r1) == 0 { + err = errnoErr(e1) + } + return +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index d4b406d9d..729122d79 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -85,7 +85,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version - tailscale.com/atomicfile from tailscale.com/cmd/derper+ + 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/tailscale from tailscale.com/derp tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale tailscale.com/derp from tailscale.com/cmd/derper+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index f757cda18..cb02038e3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -643,7 +643,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal - tailscale.com/atomicfile from tailscale.com/ipn+ + 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index e894e0674..9ccd6eebd 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -69,7 +69,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version - tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ + 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/tailscale from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 19254b616..8af347319 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -232,7 +232,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal - tailscale.com/atomicfile from tailscale.com/ipn+ + 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/tailscale from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ From 1b303ee5baef3ddab40be4d1c2b8caa284fd811d Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 15 Jan 2025 13:32:13 -0800 Subject: [PATCH 0336/1708] ipn/ipnlocal: re-advertise appc routes on startup (#14609) There's at least one example of stored routes and advertised routes getting out of sync. I don't know how they got there yet, but this would backfill missing advertised routes on startup from stored routes. Also add logging in LocalBackend.AdvertiseRoute to record when new routes actually get put into prefs. Updates #14606 Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/local.go | 35 +++++++++++++++++++++++++--- ipn/ipnlocal/local_test.go | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c506f1376..92d2f123f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4319,6 +4319,33 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i b.appConnector.UpdateDomainsAndRoutes(domains, routes) } +func (b *LocalBackend) readvertiseAppConnectorRoutes() { + var domainRoutes map[string][]netip.Addr + b.mu.Lock() + if b.appConnector != nil { + domainRoutes = b.appConnector.DomainRoutes() + } + b.mu.Unlock() + if domainRoutes == nil { + return + } + + // Re-advertise the stored routes, in case stored state got out of + // sync with previously advertised routes in prefs. + var prefixes []netip.Prefix + for _, ips := range domainRoutes { + for _, ip := range ips { + prefixes = append(prefixes, netip.PrefixFrom(ip, ip.BitLen())) + } + } + // Note: AdvertiseRoute will trim routes that are already + // advertised, so if everything is already being advertised this is + // a noop. + if err := b.AdvertiseRoute(prefixes...); err != nil { + b.logf("error advertising stored app connector routes: %v", err) + } +} + // authReconfig pushes a new configuration into wgengine, if engine // updates are not currently blocked, based on the cached netmap and // user prefs. @@ -4397,6 +4424,7 @@ func (b *LocalBackend) authReconfig() { } b.initPeerAPIListener() + b.readvertiseAppConnectorRoutes() } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -7111,7 +7139,7 @@ var ErrDisallowedAutoRoute = errors.New("route is not allowed") // If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() - newRoutes := false + var newRoutes []netip.Prefix for _, ipp := range ipps { if !allowedAutoRoute(ipp) { @@ -7127,13 +7155,14 @@ func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { } finalRoutes = append(finalRoutes, ipp) - newRoutes = true + newRoutes = append(newRoutes, ipp) } - if !newRoutes { + if len(newRoutes) == 0 { return nil } + b.logf("advertising new app connector routes: %v", newRoutes) _, err := b.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: finalRoutes, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f9a967bea..5e8a3172c 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1501,6 +1501,53 @@ func TestReconfigureAppConnector(t *testing.T) { } } +func TestBackfillAppConnectorRoutes(t *testing.T) { + // Create backend with an empty app connector. + b := newTestBackend(t) + if err := b.Start(ipn.Options{}); err != nil { + t.Fatal(err) + } + if _, err := b.EditPrefs(&ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AppConnector: ipn.AppConnectorPrefs{Advertise: true}, + }, + AppConnectorSet: true, + }); err != nil { + t.Fatal(err) + } + b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + + // Smoke check that AdvertiseRoutes doesn't have the test IP. + ip := netip.MustParseAddr("1.2.3.4") + routes := b.Prefs().AdvertiseRoutes().AsSlice() + if slices.Contains(routes, netip.PrefixFrom(ip, ip.BitLen())) { + t.Fatalf("AdvertiseRoutes %v on a fresh backend already contains advertised route for %v", routes, ip) + } + + // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. + b.ControlKnobs().AppCStoreRoutes.Store(true) + if err := b.storeRouteInfo(&appc.RouteInfo{ + Domains: map[string][]netip.Addr{ + "example.com": {ip}, + }, + }); err != nil { + t.Fatal(err) + } + + // Mimic b.authReconfigure for the app connector bits. + b.mu.Lock() + b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.mu.Unlock() + b.readvertiseAppConnectorRoutes() + + // Check that Prefs.AdvertiseRoutes got backfilled with routes stored in + // profile data. + routes = b.Prefs().AdvertiseRoutes().AsSlice() + if !slices.Contains(routes, netip.PrefixFrom(ip, ip.BitLen())) { + t.Fatalf("AdvertiseRoutes %v was not backfilled from stored app connector routes with %v", routes, ip) + } +} + func resolversEqual(t *testing.T, a, b []*dnstype.Resolver) bool { if a == nil && b == nil { return true From f023c8603a8b519846b567052119739774e5ac57 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 14 Jan 2025 19:36:27 -0600 Subject: [PATCH 0337/1708] types/lazy: fix flaky TestDeferAfterDo This test verifies, among other things, that init functions cannot be deferred after (*DeferredFuncs).Do has already been called and that all subsequent calls to (*DeferredFuncs).Defer return false. However, the initial implementation of this check was racy: by the time (*DeferredFuncs).Do returned, not all goroutines that successfully deferred an init function may have incremented the atomic variable tracking the number of deferred functions. As a result, the variable's value could differ immediately after (*DeferredFuncs).Do returned and after all goroutines had completed execution (i.e., after wg.Wait()). In this PR, we replace the original racy check with a different one. Although this new check is also racy, it can only produce false negatives. This means that if the test fails, it indicates an actual bug rather than a flaky test. Fixes #14039 Signed-off-by: Nick Khyl --- types/lazy/deferred.go | 9 ++++++++- types/lazy/deferred_test.go | 32 ++++++++++++++++++++++++++------ 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/types/lazy/deferred.go b/types/lazy/deferred.go index 964553cef..973082914 100644 --- a/types/lazy/deferred.go +++ b/types/lazy/deferred.go @@ -22,7 +22,14 @@ type DeferredInit struct { // until the owner's [DeferredInit.Do] method is called // for the first time. // -// DeferredFuncs is safe for concurrent use. +// DeferredFuncs is safe for concurrent use. The execution +// order of functions deferred by different goroutines is +// unspecified and must not be relied upon. +// However, functions deferred by the same goroutine are +// executed in the same relative order they were deferred. +// Warning: this is the opposite of the behavior of Go's +// defer statement, which executes deferred functions in +// reverse order. type DeferredFuncs struct { m sync.Mutex funcs []func() error diff --git a/types/lazy/deferred_test.go b/types/lazy/deferred_test.go index 9de16c67a..98cacbfce 100644 --- a/types/lazy/deferred_test.go +++ b/types/lazy/deferred_test.go @@ -205,16 +205,38 @@ func TestDeferredErr(t *testing.T) { } } +// TestDeferAfterDo checks all of the following: +// - Deferring a function before [DeferredInit.Do] is called should always succeed. +// - All successfully deferred functions are executed by the time [DeferredInit.Do] completes. +// - No functions can be deferred after [DeferredInit.Do] is called, meaning: +// - [DeferredInit.Defer] should return false. +// - The deferred function should not be executed. +// +// This test is intentionally racy as it attempts to defer functions from multiple goroutines +// and then calls [DeferredInit.Do] without waiting for them to finish. Waiting would alter +// the observable behavior and render the test pointless. func TestDeferAfterDo(t *testing.T) { var di DeferredInit var deferred, called atomic.Int32 + // deferOnce defers a test function once and fails the test + // if [DeferredInit.Defer] returns true after [DeferredInit.Do] + // has already been called and any deferred functions have been executed. + // It's called concurrently by multiple goroutines. deferOnce := func() bool { + // canDefer is whether it's acceptable for Defer to return true. + // (but not it necessarily must return true) + // If its func has run before, it's definitely not okay for it to + // accept more Defer funcs. + canDefer := called.Load() == 0 ok := di.Defer(func() error { called.Add(1) return nil }) if ok { + if !canDefer { + t.Error("An init function was deferred after DeferredInit.Do() was already called") + } deferred.Add(1) } return ok @@ -242,19 +264,17 @@ func TestDeferAfterDo(t *testing.T) { if err := di.Do(); err != nil { t.Fatalf("DeferredInit.Do() failed: %v", err) } - wantDeferred, wantCalled := deferred.Load(), called.Load() + // The number of called funcs should remain unchanged after [DeferredInit.Do] returns. + wantCalled := called.Load() if deferOnce() { t.Error("An init func was deferred after DeferredInit.Do() returned") } // Wait for the goroutines deferring init funcs to exit. - // No funcs should be deferred after DeferredInit.Do() has returned, - // so the deferred and called counters should remain unchanged. + // No funcs should be called after DeferredInit.Do() has returned, + // and the number of called funcs should be equal to the number of deferred funcs. wg.Wait() - if gotDeferred := deferred.Load(); gotDeferred != wantDeferred { - t.Errorf("An init func was deferred after DeferredInit.Do() returned. Got %d, want %d", gotDeferred, wantDeferred) - } if gotCalled := called.Load(); gotCalled != wantCalled { t.Errorf("An init func was called after DeferredInit.Do() returned. Got %d, want %d", gotCalled, wantCalled) } From d8b00e39ef5ee560d0ff27d38d3c64d34c2f7d22 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 15 Jan 2025 13:43:36 -0800 Subject: [PATCH 0338/1708] cmd/tailscaled: add some more depchecker dep tests As we look to add github.com/prometheus/client_golang/prometheus to more parts of the codebase, lock in that we don't use it in tailscaled, primarily for binary size reasons. Updates #12614 Change-Id: I03c100d12a05019a22bdc23ce5c4df63d5a03ec6 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/tailscaled/tailscaled_test.go b/cmd/tailscaled/tailscaled_test.go index 5045468d6..f36120f13 100644 --- a/cmd/tailscaled/tailscaled_test.go +++ b/cmd/tailscaled/tailscaled_test.go @@ -29,8 +29,10 @@ func TestDeps(t *testing.T) { GOOS: "linux", GOARCH: "arm64", BadDeps: map[string]string{ - "testing": "do not use testing package in production code", - "gvisor.dev/gvisor/pkg/hostarch": "will crash on non-4K page sizes; see https://github.com/tailscale/tailscale/issues/8658", + "testing": "do not use testing package in production code", + "gvisor.dev/gvisor/pkg/hostarch": "will crash on non-4K page sizes; see https://github.com/tailscale/tailscale/issues/8658", + "google.golang.org/protobuf/proto": "unexpected", + "github.com/prometheus/client_golang/prometheus": "use tailscale.com/metrics in tailscaled", }, }.Check(t) } From 62fb85785710f9249e943eb8f248facf28eda6f7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 15 Jan 2025 14:22:14 -0600 Subject: [PATCH 0339/1708] ipn/ipnserver: fix TestConcurrentOSUserSwitchingOnWindows I made a last-minute change in #14626 to split a single loop that created 1_000 concurrent connections into an inner and outer loop that create 100 concurrent connections 10 times. This introduced a race because the last user's connection may still be active (from the server's perspective) when a new outer iteration begins. Since every new client gets a unique ClientID, but we reuse usernames and UIDs, the server may let a user in (as the UID matches, which is fine), but the test might then fail due to a ClientID mismatch: server_test.go:232: CurrentUser(Initial): got &{S-1-5-21-1-0-0-1001 User-4 Client-2 false false}; want &{S-1-5-21-1-0-0-1001 User-4 Client-114 false false} In this PR, we update (*testIPNServer).blockWhileInUse to check whether the server is currently busy and wait until it frees up. We then call blockWhileInUse at the end of each outer iteration so that the server is always in a known idle state at the beginning of the inner loop. We also check that the current user is not set when the server is idle. Updates tailscale/corp#25804 Updates #14655 (found when working on it) Signed-off-by: Nick Khyl --- ipn/ipnserver/server_test.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 97a616db8..e77901c35 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -251,6 +251,12 @@ func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { }() } wg.Wait() + + if err := server.blockWhileInUse(ctx); err != nil { + t.Fatalf("blockWhileInUse: %v", err) + } + + server.checkCurrentUser(nil) } } @@ -346,7 +352,14 @@ func (s *testIPNServer) makeTestUser(name string, clientID string) *ipnauth.Test func (s *testIPNServer) blockWhileInUse(ctx context.Context) error { ready, cleanup := s.zeroReqWaiter.add(&s.mu, ctx) - <-ready + + s.mu.Lock() + busy := len(s.activeReqs) != 0 + s.mu.Unlock() + + if busy { + <-ready + } cleanup() return ctx.Err() } From 0481042738b6320fd328cfcd4998bdaae1c93534 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 15 Jan 2025 16:03:21 -0600 Subject: [PATCH 0340/1708] ipn/ipnserver: fix a deadlock in (*Server).blockWhileIdentityInUse If the server was in use at the time of the initial check, but disconnected and was removed from the activeReqs map by the time we registered a waiter, the ready channel will never be closed, resulting in a deadlock. To avoid this, we check whether the server is still busy after registering the wait. Fixes #14655 Signed-off-by: Nick Khyl --- ipn/ipnserver/server.go | 13 +++++++++- ipn/ipnserver/server_test.go | 46 ++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index a69e43067..3d9c9e3d4 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -283,7 +283,18 @@ func (s *Server) blockWhileIdentityInUse(ctx context.Context, actor ipnauth.Acto for inUse() { // Check whenever the connection count drops down to zero. ready, cleanup := s.zeroReqWaiter.add(&s.mu, ctx) - <-ready + if inUse() { + // If the server was in use at the time of the initial check, + // but disconnected and was removed from the activeReqs map + // by the time we registered a waiter, the ready channel + // will never be closed, resulting in a deadlock. To avoid + // this, we can check again after registering the waiter. + // + // This method is planned for complete removal as part of the + // multi-user improvements in tailscale/corp#18342, + // and this approach should be fine as a temporary solution. + <-ready + } cleanup() if err := ctx.Err(); err != nil { return err diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index e77901c35..e56ae8dab 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -260,6 +260,52 @@ func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { } } +func TestBlockWhileIdentityInUse(t *testing.T) { + enableLogging := false + setGOOSForTest(t, "windows") + + ctx := context.Background() + server := startDefaultTestIPNServer(t, ctx, enableLogging) + + // connectWaitDisconnectAsUser connects as a user with the specified name + // and keeps the IPN bus watcher alive until the context is canceled. + // It returns a channel that is closed when done. + connectWaitDisconnectAsUser := func(ctx context.Context, name string) <-chan struct{} { + client := server.getClientAs(name) + watcher, cancelWatcher := client.WatchIPNBus(ctx, 0) + + done := make(chan struct{}) + go func() { + defer cancelWatcher() + defer close(done) + for { + _, err := watcher.Next() + if err != nil { + // There's either an error or the request has been canceled. + break + } + } + }() + return done + } + + for range 100 { + // Connect as UserA, and keep the connection alive + // until disconnectUserA is called. + userAContext, disconnectUserA := context.WithCancel(ctx) + userADone := connectWaitDisconnectAsUser(userAContext, "UserA") + disconnectUserA() + // Check if userB can connect. Calling it directly increases + // the likelihood of triggering a deadlock due to a race condition + // in blockWhileIdentityInUse. But the issue also occurs during + // the normal execution path when UserB connects to the IPN server + // while UserA is disconnecting. + userB := server.makeTestUser("UserB", "ClientB") + server.blockWhileIdentityInUse(ctx, userB) + <-userADone + } +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) From 84b0379dd5869bad00fb3c2d4f4cfe59c8204559 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 15 Jan 2025 15:47:26 -0800 Subject: [PATCH 0341/1708] prober: remove per-packet DERP pub key copying overheads (#14658) Updates tailscale/corp#25883 Signed-off-by: Jordan Whited --- prober/derp.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index 870460d96..e811d41ff 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -925,6 +925,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT destinationAddrBytes := destinationAddr.AsSlice() scratch := make([]byte, 4) + toPubDERPKey := toc.SelfPublicKey() for { n, err := dev.Read(bufs, sizes, tunStartOffset) if err != nil { @@ -953,7 +954,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT copy(pkt[12:16], pkt[16:20]) copy(pkt[16:20], scratch) - if err := fromc.Send(toc.SelfPublicKey(), pkt); err != nil { + if err := fromc.Send(toPubDERPKey, pkt); err != nil { tunReadErrC <- err return } @@ -971,6 +972,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT buf := make([]byte, mtu+tunStartOffset) bufs := make([][]byte, 1) + fromDERPPubKey := fromc.SelfPublicKey() for { m, err := toc.Recv() if err != nil { @@ -979,7 +981,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT } switch v := m.(type) { case derp.ReceivedPacket: - if v.Source != fromc.SelfPublicKey() { + if v.Source != fromDERPPubKey { recvErrC <- fmt.Errorf("got data packet from unexpected source, %v", v.Source) return } From 00bd906797fe35d53db8fda24a36ba2bc3f34852 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 15 Jan 2025 16:28:49 -0800 Subject: [PATCH 0342/1708] prober: remove DERP pub key copying overheads in qd and non-tun measures (#14659) Updates tailscale/corp#25883 Signed-off-by: Jordan Whited --- prober/derp.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index e811d41ff..995a69626 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -431,6 +431,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. t := time.NewTicker(time.Second / time.Duration(packetsPerSecond)) defer t.Stop() + toDERPPubKey := toc.SelfPublicKey() seq := uint64(0) for { select { @@ -446,7 +447,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. txRecordsMu.Unlock() binary.BigEndian.PutUint64(pkt, seq) seq++ - if err := fromc.Send(toc.SelfPublicKey(), pkt); err != nil { + if err := fromc.Send(toDERPPubKey, pkt); err != nil { sendErrC <- fmt.Errorf("sending packet %w", err) return } @@ -460,6 +461,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. go func() { defer wg.Done() defer close(recvFinishedC) // to break out of 'select' below. + fromDERPPubKey := fromc.SelfPublicKey() for { m, err := toc.Recv() if err != nil { @@ -469,7 +471,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. switch v := m.(type) { case derp.ReceivedPacket: now := time.Now() - if v.Source != fromc.SelfPublicKey() { + if v.Source != fromDERPPubKey { recvFinishedC <- fmt.Errorf("got data packet from unexpected source, %v", v.Source) return } @@ -767,9 +769,10 @@ func runDerpProbeNodePair(ctx context.Context, from, to *tailcfg.DERPNode, fromc // Send the packets. sendc := make(chan error, 1) go func() { + toDERPPubKey := toc.SelfPublicKey() for idx, pkt := range pkts { inFlight.AcquireContext(ctx) - if err := fromc.Send(toc.SelfPublicKey(), pkt); err != nil { + if err := fromc.Send(toDERPPubKey, pkt); err != nil { sendc <- fmt.Errorf("sending packet %d: %w", idx, err) return } @@ -781,6 +784,7 @@ func runDerpProbeNodePair(ctx context.Context, from, to *tailcfg.DERPNode, fromc go func() { defer close(recvc) // to break out of 'select' below. idx := 0 + fromDERPPubKey := fromc.SelfPublicKey() for { m, err := toc.Recv() if err != nil { @@ -790,7 +794,7 @@ func runDerpProbeNodePair(ctx context.Context, from, to *tailcfg.DERPNode, fromc switch v := m.(type) { case derp.ReceivedPacket: inFlight.Release() - if v.Source != fromc.SelfPublicKey() { + if v.Source != fromDERPPubKey { recvc <- fmt.Errorf("got data packet %d from unexpected source, %v", idx, v.Source) return } @@ -925,7 +929,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT destinationAddrBytes := destinationAddr.AsSlice() scratch := make([]byte, 4) - toPubDERPKey := toc.SelfPublicKey() + toDERPPubKey := toc.SelfPublicKey() for { n, err := dev.Read(bufs, sizes, tunStartOffset) if err != nil { @@ -954,7 +958,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT copy(pkt[12:16], pkt[16:20]) copy(pkt[16:20], scratch) - if err := fromc.Send(toPubDERPKey, pkt); err != nil { + if err := fromc.Send(toDERPPubKey, pkt); err != nil { tunReadErrC <- err return } From 2d1f6f18cc4f8ed1cb09aa6bee0a922219ba6aa6 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 16 Jan 2025 11:15:36 +0000 Subject: [PATCH 0343/1708] cmd/k8s-operator: require namespace config (#14648) Most users should not run into this because it's set in the helm chart and the deploy manifest, but if namespace is not set we get confusing authz errors because the kube client tries to fetch some namespaced resources as though they're cluster-scoped and reports permission denied. Try to detect namespace from the default projected volume, and otherwise fatal. Fixes #cleanup Change-Id: I64b34191e440b61204b9ad30bbfa117abbbe09c3 Signed-off-by: Tom Proctor --- cmd/k8s-operator/operator.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index b24839082..7f8f94673 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -88,6 +88,15 @@ func main() { zlog := kzap.NewRaw(opts...).Sugar() logf.SetLogger(zapr.NewLogger(zlog.Desugar())) + if tsNamespace == "" { + const namespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + b, err := os.ReadFile(namespaceFile) + if err != nil { + zlog.Fatalf("Could not get operator namespace from OPERATOR_NAMESPACE environment variable or default projected volume: %v", err) + } + tsNamespace = strings.TrimSpace(string(b)) + } + // The operator can run either as a plain operator or it can // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. From 7d73a38b40a57e39080eb8b74aed08c69c0fb562 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Fri, 10 Jan 2025 14:42:11 -0700 Subject: [PATCH 0344/1708] net/dns: only populate OSConfig.Hosts when MagicDNS is enabled Previously we were doing this unconditionally. Updates #14428 Signed-off-by: Aaron Klotz --- net/dns/manager.go | 4 ++- net/dns/manager_test.go | 70 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/net/dns/manager.go b/net/dns/manager.go index 5ac2f69fc..ebf91811a 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -246,8 +246,10 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // the OS. rcfg.Hosts = cfg.Hosts routes := map[dnsname.FQDN][]*dnstype.Resolver{} // assigned conditionally to rcfg.Routes below. + var propagateHostsToOS bool for suffix, resolvers := range cfg.Routes { if len(resolvers) == 0 { + propagateHostsToOS = true rcfg.LocalDomains = append(rcfg.LocalDomains, suffix) } else { routes[suffix] = resolvers @@ -256,7 +258,7 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // Similarly, the OS always gets search paths. ocfg.SearchDomains = cfg.SearchDomains - if m.goos == "windows" { + if propagateHostsToOS && m.goos == "windows" { ocfg.Hosts = compileHostEntries(cfg) } diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 366e08bbf..2bdbc72e2 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -836,6 +836,76 @@ func TestManager(t *testing.T) { }, goos: "darwin", }, + { + name: "populate-hosts-magicdns", + in: Config{ + Routes: upstreams( + "corp.com", "2.2.2.2", + "ts.com", ""), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + SearchDomains: fqdns("ts.com", "universe.tf"), + }, + split: true, + os: OSConfig{ + Hosts: []*HostEntry{ + { + Addr: netip.MustParseAddr("2.3.4.5"), + Hosts: []string{ + "bradfitz.ts.com.", + "bradfitz", + }, + }, + { + Addr: netip.MustParseAddr("1.2.3.4"), + Hosts: []string{ + "dave.ts.com.", + "dave", + }, + }, + }, + Nameservers: mustIPs("100.100.100.100"), + SearchDomains: fqdns("ts.com", "universe.tf"), + MatchDomains: fqdns("corp.com", "ts.com"), + }, + rs: resolver.Config{ + Routes: upstreams("corp.com.", "2.2.2.2"), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + LocalDomains: fqdns("ts.com."), + }, + goos: "windows", + }, + { + // Regression test for https://github.com/tailscale/tailscale/issues/14428 + name: "nopopulate-hosts-nomagicdns", + in: Config{ + Routes: upstreams( + "corp.com", "2.2.2.2", + "ts.com", "1.1.1.1"), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + SearchDomains: fqdns("ts.com", "universe.tf"), + }, + split: true, + os: OSConfig{ + Nameservers: mustIPs("100.100.100.100"), + SearchDomains: fqdns("ts.com", "universe.tf"), + MatchDomains: fqdns("corp.com", "ts.com"), + }, + rs: resolver.Config{ + Routes: upstreams( + "corp.com.", "2.2.2.2", + "ts.com", "1.1.1.1"), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + }, + goos: "windows", + }, } trIP := cmp.Transformer("ipStr", func(ip netip.Addr) string { return ip.String() }) From de5683f7c61098337fe2825e2febe98b6809b291 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Thu, 16 Jan 2025 12:21:33 -0700 Subject: [PATCH 0345/1708] derp: change packets_dropped metric to also have reason and kind labels (#14651) Metrics currently exist for dropped packets by reason, and total received packets by kind (e.g., `disco` or `other`), but relating these two together to gleam information about the drop rate for specific reasons on a per-kind basis is not currently possible. Change `derp_packets_dropped` to use a `metrics.MultiLabelMap` to track both the `reason` and `kind` in the same metric to allow for this desired level of granularity. Drop metrics that this makes unnecessary (namely `packetsDroppedReason` and `packetsDroppedType`). Updates https://github.com/tailscale/corp/issues/25489 Signed-off-by: Mario Minardi --- derp/derp_server.go | 224 +++++++++++++++++++++----------------- derp/dropreason_string.go | 33 ------ 2 files changed, 126 insertions(+), 131 deletions(-) delete mode 100644 derp/dropreason_string.go diff --git a/derp/derp_server.go b/derp/derp_server.go index 8066b7f19..08fd280a9 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -112,6 +112,14 @@ const ( disableFighters ) +// packetKind is the kind of packet being sent through DERP +type packetKind string + +const ( + packetKindDisco packetKind = "disco" + packetKindOther packetKind = "other" +) + type align64 [0]atomic.Int64 // for side effect of its 64-bit alignment // Server is a DERP server. @@ -131,44 +139,37 @@ type Server struct { debug bool // Counters: - packetsSent, bytesSent expvar.Int - packetsRecv, bytesRecv expvar.Int - packetsRecvByKind metrics.LabelMap - packetsRecvDisco *expvar.Int - packetsRecvOther *expvar.Int - _ align64 - packetsDropped expvar.Int - packetsDroppedReason metrics.LabelMap - packetsDroppedReasonCounters []*expvar.Int // indexed by dropReason - packetsDroppedType metrics.LabelMap - packetsDroppedTypeDisco *expvar.Int - packetsDroppedTypeOther *expvar.Int - _ align64 - packetsForwardedOut expvar.Int - packetsForwardedIn expvar.Int - peerGoneDisconnectedFrames expvar.Int // number of peer disconnected frames sent - peerGoneNotHereFrames expvar.Int // number of peer not here frames sent - gotPing expvar.Int // number of ping frames from client - sentPong expvar.Int // number of pong frames enqueued to client - accepts expvar.Int - curClients expvar.Int - curClientsNotIdeal expvar.Int - curHomeClients expvar.Int // ones with preferred - dupClientKeys expvar.Int // current number of public keys we have 2+ connections for - dupClientConns expvar.Int // current number of connections sharing a public key - dupClientConnTotal expvar.Int // total number of accepted connections when a dup key existed - unknownFrames expvar.Int - homeMovesIn expvar.Int // established clients announce home server moves in - homeMovesOut expvar.Int // established clients announce home server moves out - multiForwarderCreated expvar.Int - multiForwarderDeleted expvar.Int - removePktForwardOther expvar.Int - sclientWriteTimeouts expvar.Int - avgQueueDuration *uint64 // In milliseconds; accessed atomically - tcpRtt metrics.LabelMap // histogram - meshUpdateBatchSize *metrics.Histogram - meshUpdateLoopCount *metrics.Histogram - bufferedWriteFrames *metrics.Histogram // how many sendLoop frames (or groups of related frames) get written per flush + packetsSent, bytesSent expvar.Int + packetsRecv, bytesRecv expvar.Int + packetsRecvByKind metrics.LabelMap + packetsRecvDisco *expvar.Int + packetsRecvOther *expvar.Int + _ align64 + packetsForwardedOut expvar.Int + packetsForwardedIn expvar.Int + peerGoneDisconnectedFrames expvar.Int // number of peer disconnected frames sent + peerGoneNotHereFrames expvar.Int // number of peer not here frames sent + gotPing expvar.Int // number of ping frames from client + sentPong expvar.Int // number of pong frames enqueued to client + accepts expvar.Int + curClients expvar.Int + curClientsNotIdeal expvar.Int + curHomeClients expvar.Int // ones with preferred + dupClientKeys expvar.Int // current number of public keys we have 2+ connections for + dupClientConns expvar.Int // current number of connections sharing a public key + dupClientConnTotal expvar.Int // total number of accepted connections when a dup key existed + unknownFrames expvar.Int + homeMovesIn expvar.Int // established clients announce home server moves in + homeMovesOut expvar.Int // established clients announce home server moves out + multiForwarderCreated expvar.Int + multiForwarderDeleted expvar.Int + removePktForwardOther expvar.Int + sclientWriteTimeouts expvar.Int + avgQueueDuration *uint64 // In milliseconds; accessed atomically + tcpRtt metrics.LabelMap // histogram + meshUpdateBatchSize *metrics.Histogram + meshUpdateLoopCount *metrics.Histogram + bufferedWriteFrames *metrics.Histogram // how many sendLoop frames (or groups of related frames) get written per flush // verifyClientsLocalTailscaled only accepts client connections to the DERP // server if the clientKey is a known peer in the network, as specified by a @@ -351,6 +352,11 @@ type Conn interface { SetWriteDeadline(time.Time) error } +var packetsDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( + "derp_packets_dropped", + "counter", + "DERP packets dropped by reason and by kind") + // NewServer returns a new DERP server. It doesn't listen on its own. // Connections are given to it via Server.Accept. func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { @@ -358,61 +364,81 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { runtime.ReadMemStats(&ms) s := &Server{ - debug: envknob.Bool("DERP_DEBUG_LOGS"), - privateKey: privateKey, - publicKey: privateKey.Public(), - logf: logf, - limitedLogf: logger.RateLimitedFn(logf, 30*time.Second, 5, 100), - packetsRecvByKind: metrics.LabelMap{Label: "kind"}, - packetsDroppedReason: metrics.LabelMap{Label: "reason"}, - packetsDroppedType: metrics.LabelMap{Label: "type"}, - clients: map[key.NodePublic]*clientSet{}, - clientsMesh: map[key.NodePublic]PacketForwarder{}, - netConns: map[Conn]chan struct{}{}, - memSys0: ms.Sys, - watchers: set.Set[*sclient]{}, - peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, - avgQueueDuration: new(uint64), - tcpRtt: metrics.LabelMap{Label: "le"}, - meshUpdateBatchSize: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000}), - meshUpdateLoopCount: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100}), - bufferedWriteFrames: metrics.NewHistogram([]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100}), - keyOfAddr: map[netip.AddrPort]key.NodePublic{}, - clock: tstime.StdClock{}, + debug: envknob.Bool("DERP_DEBUG_LOGS"), + privateKey: privateKey, + publicKey: privateKey.Public(), + logf: logf, + limitedLogf: logger.RateLimitedFn(logf, 30*time.Second, 5, 100), + packetsRecvByKind: metrics.LabelMap{Label: "kind"}, + clients: map[key.NodePublic]*clientSet{}, + clientsMesh: map[key.NodePublic]PacketForwarder{}, + netConns: map[Conn]chan struct{}{}, + memSys0: ms.Sys, + watchers: set.Set[*sclient]{}, + peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, + avgQueueDuration: new(uint64), + tcpRtt: metrics.LabelMap{Label: "le"}, + meshUpdateBatchSize: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000}), + meshUpdateLoopCount: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100}), + bufferedWriteFrames: metrics.NewHistogram([]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100}), + keyOfAddr: map[netip.AddrPort]key.NodePublic{}, + clock: tstime.StdClock{}, } s.initMetacert() - s.packetsRecvDisco = s.packetsRecvByKind.Get("disco") - s.packetsRecvOther = s.packetsRecvByKind.Get("other") + s.packetsRecvDisco = s.packetsRecvByKind.Get(string(packetKindDisco)) + s.packetsRecvOther = s.packetsRecvByKind.Get(string(packetKindOther)) - s.packetsDroppedReasonCounters = s.genPacketsDroppedReasonCounters() - - s.packetsDroppedTypeDisco = s.packetsDroppedType.Get("disco") - s.packetsDroppedTypeOther = s.packetsDroppedType.Get("other") + genPacketsDroppedCounters() s.perClientSendQueueDepth = getPerClientSendQueueDepth() return s } -func (s *Server) genPacketsDroppedReasonCounters() []*expvar.Int { - getMetric := s.packetsDroppedReason.Get - ret := []*expvar.Int{ - dropReasonUnknownDest: getMetric("unknown_dest"), - dropReasonUnknownDestOnFwd: getMetric("unknown_dest_on_fwd"), - dropReasonGoneDisconnected: getMetric("gone_disconnected"), - dropReasonQueueHead: getMetric("queue_head"), - dropReasonQueueTail: getMetric("queue_tail"), - dropReasonWriteError: getMetric("write_error"), - dropReasonDupClient: getMetric("dup_client"), +func genPacketsDroppedCounters() { + initMetrics := func(reason dropReason) { + packetsDropped.Add(dropReasonKindLabels{ + Kind: string(packetKindDisco), + Reason: string(reason), + }, 0) + packetsDropped.Add(dropReasonKindLabels{ + Kind: string(packetKindOther), + Reason: string(reason), + }, 0) + } + getMetrics := func(reason dropReason) []expvar.Var { + return []expvar.Var{ + packetsDropped.Get(dropReasonKindLabels{ + Kind: string(packetKindDisco), + Reason: string(reason), + }), + packetsDropped.Get(dropReasonKindLabels{ + Kind: string(packetKindOther), + Reason: string(reason), + }), + } } - if len(ret) != int(numDropReasons) { - panic("dropReason metrics out of sync") + + dropReasons := []dropReason{ + dropReasonUnknownDest, + dropReasonUnknownDestOnFwd, + dropReasonGoneDisconnected, + dropReasonQueueHead, + dropReasonQueueTail, + dropReasonWriteError, + dropReasonDupClient, } - for i := range numDropReasons { - if ret[i] == nil { + + for _, dr := range dropReasons { + initMetrics(dr) + m := getMetrics(dr) + if len(m) != 2 { + panic("dropReason metrics out of sync") + } + + if m[0] == nil || m[1] == nil { panic("dropReason metrics out of sync") } } - return ret } // SetMesh sets the pre-shared key that regional DERP servers used to mesh @@ -1152,31 +1178,36 @@ func (c *sclient) debugLogf(format string, v ...any) { } } -// dropReason is why we dropped a DERP frame. -type dropReason int +type dropReasonKindLabels struct { + Reason string // metric label corresponding to a given dropReason + Kind string // either `disco` or `other` +} -//go:generate go run tailscale.com/cmd/addlicense -file dropreason_string.go go run golang.org/x/tools/cmd/stringer -type=dropReason -trimprefix=dropReason +// dropReason is why we dropped a DERP frame. +type dropReason string const ( - dropReasonUnknownDest dropReason = iota // unknown destination pubkey - dropReasonUnknownDestOnFwd // unknown destination pubkey on a derp-forwarded packet - dropReasonGoneDisconnected // destination tailscaled disconnected before we could send - dropReasonQueueHead // destination queue is full, dropped packet at queue head - dropReasonQueueTail // destination queue is full, dropped packet at queue tail - dropReasonWriteError // OS write() failed - dropReasonDupClient // the public key is connected 2+ times (active/active, fighting) - numDropReasons // unused; keep last + dropReasonUnknownDest dropReason = "unknown_dest" // unknown destination pubkey + dropReasonUnknownDestOnFwd dropReason = "unknown_dest_on_fwd" // unknown destination pubkey on a derp-forwarded packet + dropReasonGoneDisconnected dropReason = "gone_disconnected" // destination tailscaled disconnected before we could send + dropReasonQueueHead dropReason = "queue_head" // destination queue is full, dropped packet at queue head + dropReasonQueueTail dropReason = "queue_tail" // destination queue is full, dropped packet at queue tail + dropReasonWriteError dropReason = "write_error" // OS write() failed + dropReasonDupClient dropReason = "dup_client" // the public key is connected 2+ times (active/active, fighting) ) func (s *Server) recordDrop(packetBytes []byte, srcKey, dstKey key.NodePublic, reason dropReason) { - s.packetsDropped.Add(1) - s.packetsDroppedReasonCounters[reason].Add(1) + labels := dropReasonKindLabels{ + Reason: string(reason), + } looksDisco := disco.LooksLikeDiscoWrapper(packetBytes) if looksDisco { - s.packetsDroppedTypeDisco.Add(1) + labels.Kind = string(packetKindDisco) } else { - s.packetsDroppedTypeOther.Add(1) + labels.Kind = string(packetKindOther) } + packetsDropped.Add(labels, 1) + if verboseDropKeys[dstKey] { // Preformat the log string prior to calling limitedLogf. The // limiter acts based on the format string, and we want to @@ -2095,9 +2126,6 @@ func (s *Server) ExpVar() expvar.Var { m.Set("accepts", &s.accepts) m.Set("bytes_received", &s.bytesRecv) m.Set("bytes_sent", &s.bytesSent) - m.Set("packets_dropped", &s.packetsDropped) - m.Set("counter_packets_dropped_reason", &s.packetsDroppedReason) - m.Set("counter_packets_dropped_type", &s.packetsDroppedType) m.Set("counter_packets_received_kind", &s.packetsRecvByKind) m.Set("packets_sent", &s.packetsSent) m.Set("packets_received", &s.packetsRecv) diff --git a/derp/dropreason_string.go b/derp/dropreason_string.go deleted file mode 100644 index 3ad072819..000000000 --- a/derp/dropreason_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Code generated by "stringer -type=dropReason -trimprefix=dropReason"; DO NOT EDIT. - -package derp - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[dropReasonUnknownDest-0] - _ = x[dropReasonUnknownDestOnFwd-1] - _ = x[dropReasonGoneDisconnected-2] - _ = x[dropReasonQueueHead-3] - _ = x[dropReasonQueueTail-4] - _ = x[dropReasonWriteError-5] - _ = x[dropReasonDupClient-6] - _ = x[numDropReasons-7] -} - -const _dropReason_name = "UnknownDestUnknownDestOnFwdGoneDisconnectedQueueHeadQueueTailWriteErrorDupClientnumDropReasons" - -var _dropReason_index = [...]uint8{0, 11, 27, 43, 52, 61, 71, 80, 94} - -func (i dropReason) String() string { - if i < 0 || i >= dropReason(len(_dropReason_index)-1) { - return "dropReason(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _dropReason_name[_dropReason_index[i]:_dropReason_index[i+1]] -} From d912a49be6cca5252612e52e20fdbce6a89486ec Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 16 Jan 2025 16:04:35 -0800 Subject: [PATCH 0346/1708] net/tstun: add logging to aid developers missing Start calls Since 5297bd2cff8ed03679, tstun.Wrapper has required its Start method to be called for it to function. Failure to do so just results in weird hangs and I've wasted too much time multiple times now debugging. Hopefully this prevents more lost time. Updates tailscale/corp#24454 Change-Id: I87f4539f7be7dc154627f8835a37a8db88c31be0 Signed-off-by: Brad Fitzpatrick --- net/tstun/wrap.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index deb8bc094..e4ff36b49 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -904,9 +904,23 @@ func (t *Wrapper) IdleDuration() time.Duration { return mono.Since(t.lastActivityAtomic.LoadAtomic()) } +func (t *Wrapper) awaitStart() { + for { + select { + case <-t.startCh: + return + case <-time.After(1 * time.Second): + // Multiple times while remixing tailscaled I (Brad) have forgotten + // to call Start and then wasted far too much time debugging. + // I do not wish that debugging on anyone else. Hopefully this'll help: + t.logf("tstun: awaiting Wrapper.Start call") + } + } +} + func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { if !t.started.Load() { - <-t.startCh + t.awaitStart() } // packet from OS read and sent to WG res, ok := <-t.vectorOutbound From 97a44d6453e83c966cfe109df77f9863830344ff Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 17 Jan 2025 05:37:53 +0000 Subject: [PATCH 0347/1708] go.{mod,sum},cmd/{k8s-operator,derper,stund}/depaware.txt: bump kube deps (#14601) Updates kube deps and mkctr, regenerates kube yamls with the updated tooling. Updates#cleanup Signed-off-by: Irbe Krumina --- cmd/derper/depaware.txt | 4 +- cmd/k8s-operator/connector_test.go | 36 ++-- cmd/k8s-operator/depaware.txt | 167 +++++++++++++++-- .../deploy/crds/tailscale.com_connectors.yaml | 2 +- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 2 +- .../crds/tailscale.com_proxyclasses.yaml | 82 +++++++-- .../crds/tailscale.com_proxygroups.yaml | 2 +- .../deploy/crds/tailscale.com_recorders.yaml | 74 ++++++-- .../deploy/manifests/operator.yaml | 162 +++++++++++++---- cmd/k8s-operator/egress-eps_test.go | 4 +- .../egress-services-readiness_test.go | 14 +- cmd/k8s-operator/egress-services_test.go | 6 +- cmd/k8s-operator/ingress_test.go | 44 ++--- cmd/k8s-operator/nameserver_test.go | 10 +- cmd/k8s-operator/operator_test.go | 141 ++++++++------- cmd/k8s-operator/proxyclass_test.go | 16 +- cmd/k8s-operator/proxygroup_test.go | 24 +-- cmd/k8s-operator/testutils_test.go | 10 +- cmd/k8s-operator/tsrecorder_test.go | 18 +- cmd/stund/depaware.txt | 3 +- go.mod | 64 +++---- go.sum | 169 +++++++++--------- 22 files changed, 690 insertions(+), 364 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 729122d79..498677a49 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -35,11 +35,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/munnerz/goautoneg from github.com/prometheus/common/expfmt 💣 github.com/prometheus/client_golang/prometheus from tailscale.com/tsweb/promvarz github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ - github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg from github.com/prometheus/common/expfmt github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs @@ -264,7 +264,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa html/template from tailscale.com/cmd/derper io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/mitchellh/go-ps+ + L io/ioutil from github.com/mitchellh/go-ps+ iter from maps+ log from expvar+ log/internal from log diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 242f1f99f..f32fe3282 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -79,8 +79,8 @@ func TestConnector(t *testing.T) { subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Connector status should get updated with the IP/hostname info when available. const hostname = "foo.tailnetxyz.ts.net" @@ -106,7 +106,7 @@ func TestConnector(t *testing.T) { opts.subnetRoutes = "10.40.0.0/14,10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Remove a route. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -114,7 +114,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Remove the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -122,7 +122,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Re-add the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -132,7 +132,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -175,8 +175,8 @@ func TestConnector(t *testing.T) { hostname: "test-connector", app: kubetypes.AppConnector, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Add an exit node. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -184,7 +184,7 @@ func TestConnector(t *testing.T) { }) opts.isExitNode = true expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -261,8 +261,8 @@ func TestConnectorWithProxyClass(t *testing.T) { subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. Update Connector to specify a ProxyClass. ProxyClass is not yet // ready, so its configuration is NOT applied to the Connector @@ -271,7 +271,7 @@ func TestConnectorWithProxyClass(t *testing.T) { conn.Spec.ProxyClass = "custom-metadata" }) expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Connector // get reconciled and configuration from the ProxyClass is applied to @@ -286,7 +286,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 4. Connector.spec.proxyClass field is unset, Connector gets // reconciled and configuration from the ProxyClass is removed from the @@ -296,7 +296,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) } func TestConnectorWithAppConnector(t *testing.T) { @@ -351,8 +351,8 @@ func TestConnectorWithAppConnector(t *testing.T) { app: kubetypes.AppConnector, isAppConnector: true, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // Connector's ready condition should be set to true cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") @@ -364,7 +364,7 @@ func TestConnectorWithAppConnector(t *testing.T) { Reason: reasonConnectorCreated, Message: reasonConnectorCreated, }} - expectEqual(t, fc, cn, nil) + expectEqual(t, fc, cn) // 2. Connector with invalid app connector routes has status set to invalid mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -379,7 +379,7 @@ func TestConnectorWithAppConnector(t *testing.T) { Reason: reasonConnectorInvalid, Message: "Connector is invalid: route 1.2.3.4/5 has non-address bits set; expected 0.0.0.0/5", }} - expectEqual(t, fc, cn, nil) + expectEqual(t, fc, cn) // 3. Connector with valid app connnector routes becomes ready mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index cb02038e3..80c9f0c06 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -94,7 +94,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/evanphx/json-patch/v5 from sigs.k8s.io/controller-runtime/pkg/client github.com/evanphx/json-patch/v5/internal/json from github.com/evanphx/json-patch/v5 💣 github.com/fsnotify/fsnotify from sigs.k8s.io/controller-runtime/pkg/certwatcher - github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/fxamacker/cbor/v2 from tailscale.com/tka+ github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ @@ -110,11 +110,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-openapi/jsonpointer from github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference - github.com/go-openapi/swag from github.com/go-openapi/jsonpointer+ + 💣 github.com/go-openapi/swag from github.com/go-openapi/jsonpointer+ L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns 💣 github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+ github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+ - github.com/golang/groupcache/lru from k8s.io/client-go/tools/record+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/golang/protobuf/proto from k8s.io/client-go/discovery+ github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ github.com/google/gnostic-models/compiler from github.com/google/gnostic-models/openapiv2+ @@ -140,7 +140,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns - github.com/imdario/mergo from k8s.io/client-go/tools/clientcmd L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/net/tstun L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 @@ -171,7 +170,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go - github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3 + github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/opencontainers/go-digest from github.com/distribution/reference L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ @@ -186,7 +185,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ - github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg from github.com/prometheus/common/expfmt github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs @@ -250,6 +248,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ google.golang.org/protobuf/internal/descopts from google.golang.org/protobuf/internal/filedesc+ google.golang.org/protobuf/internal/detrand from google.golang.org/protobuf/internal/descfmt+ google.golang.org/protobuf/internal/editiondefaults from google.golang.org/protobuf/internal/filedesc+ + google.golang.org/protobuf/internal/editionssupport from google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/internal/encoding/defval from google.golang.org/protobuf/internal/encoding/tag+ google.golang.org/protobuf/internal/encoding/messageset from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/encoding/tag from google.golang.org/protobuf/internal/impl @@ -275,8 +274,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + gopkg.in/evanphx/json-patch.v4 from k8s.io/client-go/testing gopkg.in/inf.v0 from k8s.io/apimachinery/pkg/api/resource - gopkg.in/yaml.v2 from k8s.io/kube-openapi/pkg/util/proto+ gopkg.in/yaml.v3 from github.com/go-openapi/swag+ gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer @@ -345,6 +344,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/certificates/v1alpha1 from k8s.io/client-go/applyconfigurations/certificates/v1alpha1+ k8s.io/api/certificates/v1beta1 from k8s.io/client-go/applyconfigurations/certificates/v1beta1+ k8s.io/api/coordination/v1 from k8s.io/client-go/applyconfigurations/coordination/v1+ + k8s.io/api/coordination/v1alpha2 from k8s.io/client-go/applyconfigurations/coordination/v1alpha2+ k8s.io/api/coordination/v1beta1 from k8s.io/client-go/applyconfigurations/coordination/v1beta1+ k8s.io/api/core/v1 from k8s.io/api/apps/v1+ k8s.io/api/discovery/v1 from k8s.io/client-go/applyconfigurations/discovery/v1+ @@ -367,7 +367,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/rbac/v1 from k8s.io/client-go/applyconfigurations/rbac/v1+ k8s.io/api/rbac/v1alpha1 from k8s.io/client-go/applyconfigurations/rbac/v1alpha1+ k8s.io/api/rbac/v1beta1 from k8s.io/client-go/applyconfigurations/rbac/v1beta1+ - k8s.io/api/resource/v1alpha2 from k8s.io/client-go/applyconfigurations/resource/v1alpha2+ + k8s.io/api/resource/v1alpha3 from k8s.io/client-go/applyconfigurations/resource/v1alpha3+ + k8s.io/api/resource/v1beta1 from k8s.io/client-go/applyconfigurations/resource/v1beta1+ k8s.io/api/scheduling/v1 from k8s.io/client-go/applyconfigurations/scheduling/v1+ k8s.io/api/scheduling/v1alpha1 from k8s.io/client-go/applyconfigurations/scheduling/v1alpha1+ k8s.io/api/scheduling/v1beta1 from k8s.io/client-go/applyconfigurations/scheduling/v1beta1+ @@ -380,10 +381,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/api/equality from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+ k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+ + k8s.io/apimachinery/pkg/api/meta/testrestmapper from k8s.io/client-go/testing k8s.io/apimachinery/pkg/api/resource from k8s.io/api/autoscaling/v1+ k8s.io/apimachinery/pkg/api/validation from k8s.io/apimachinery/pkg/util/managedfields/internal+ 💣 k8s.io/apimachinery/pkg/apis/meta/internalversion from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata + k8s.io/apimachinery/pkg/apis/meta/internalversion/validation from k8s.io/client-go/util/watchlist 💣 k8s.io/apimachinery/pkg/apis/meta/v1 from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/apis/meta/v1/unstructured from k8s.io/apimachinery/pkg/runtime/serializer/versioning+ k8s.io/apimachinery/pkg/apis/meta/v1/validation from k8s.io/apimachinery/pkg/api/validation+ @@ -395,6 +398,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/runtime from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/runtime/schema from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/runtime/serializer from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ + k8s.io/apimachinery/pkg/runtime/serializer/cbor from k8s.io/client-go/dynamic+ + k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ + k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes from k8s.io/apimachinery/pkg/runtime/serializer/cbor+ k8s.io/apimachinery/pkg/runtime/serializer/json from k8s.io/apimachinery/pkg/runtime/serializer+ k8s.io/apimachinery/pkg/runtime/serializer/protobuf from k8s.io/apimachinery/pkg/runtime/serializer k8s.io/apimachinery/pkg/runtime/serializer/recognizer from k8s.io/apimachinery/pkg/runtime/serializer+ @@ -446,6 +452,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/certificates/v1alpha1 from k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 from k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 from k8s.io/client-go/kubernetes/typed/coordination/v1 + k8s.io/client-go/applyconfigurations/coordination/v1alpha2 from k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 k8s.io/client-go/applyconfigurations/coordination/v1beta1 from k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/applyconfigurations/core/v1 from k8s.io/client-go/applyconfigurations/apps/v1+ k8s.io/client-go/applyconfigurations/discovery/v1 from k8s.io/client-go/kubernetes/typed/discovery/v1 @@ -470,7 +477,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/rbac/v1 from k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 from k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 from k8s.io/client-go/kubernetes/typed/rbac/v1beta1 - k8s.io/client-go/applyconfigurations/resource/v1alpha2 from k8s.io/client-go/kubernetes/typed/resource/v1alpha2 + k8s.io/client-go/applyconfigurations/resource/v1alpha3 from k8s.io/client-go/kubernetes/typed/resource/v1alpha3 + k8s.io/client-go/applyconfigurations/resource/v1beta1 from k8s.io/client-go/kubernetes/typed/resource/v1beta1 k8s.io/client-go/applyconfigurations/scheduling/v1 from k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 from k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 from k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -480,8 +488,80 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1 from k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 k8s.io/client-go/discovery from k8s.io/client-go/applyconfigurations/meta/v1+ k8s.io/client-go/dynamic from sigs.k8s.io/controller-runtime/pkg/cache/internal+ - k8s.io/client-go/features from k8s.io/client-go/tools/cache - k8s.io/client-go/kubernetes from k8s.io/client-go/tools/leaderelection/resourcelock + k8s.io/client-go/features from k8s.io/client-go/tools/cache+ + k8s.io/client-go/gentype from k8s.io/client-go/kubernetes/typed/admissionregistration/v1+ + k8s.io/client-go/informers from k8s.io/client-go/tools/leaderelection + k8s.io/client-go/informers/admissionregistration from k8s.io/client-go/informers + k8s.io/client-go/informers/admissionregistration/v1 from k8s.io/client-go/informers/admissionregistration + k8s.io/client-go/informers/admissionregistration/v1alpha1 from k8s.io/client-go/informers/admissionregistration + k8s.io/client-go/informers/admissionregistration/v1beta1 from k8s.io/client-go/informers/admissionregistration + k8s.io/client-go/informers/apiserverinternal from k8s.io/client-go/informers + k8s.io/client-go/informers/apiserverinternal/v1alpha1 from k8s.io/client-go/informers/apiserverinternal + k8s.io/client-go/informers/apps from k8s.io/client-go/informers + k8s.io/client-go/informers/apps/v1 from k8s.io/client-go/informers/apps + k8s.io/client-go/informers/apps/v1beta1 from k8s.io/client-go/informers/apps + k8s.io/client-go/informers/apps/v1beta2 from k8s.io/client-go/informers/apps + k8s.io/client-go/informers/autoscaling from k8s.io/client-go/informers + k8s.io/client-go/informers/autoscaling/v1 from k8s.io/client-go/informers/autoscaling + k8s.io/client-go/informers/autoscaling/v2 from k8s.io/client-go/informers/autoscaling + k8s.io/client-go/informers/autoscaling/v2beta1 from k8s.io/client-go/informers/autoscaling + k8s.io/client-go/informers/autoscaling/v2beta2 from k8s.io/client-go/informers/autoscaling + k8s.io/client-go/informers/batch from k8s.io/client-go/informers + k8s.io/client-go/informers/batch/v1 from k8s.io/client-go/informers/batch + k8s.io/client-go/informers/batch/v1beta1 from k8s.io/client-go/informers/batch + k8s.io/client-go/informers/certificates from k8s.io/client-go/informers + k8s.io/client-go/informers/certificates/v1 from k8s.io/client-go/informers/certificates + k8s.io/client-go/informers/certificates/v1alpha1 from k8s.io/client-go/informers/certificates + k8s.io/client-go/informers/certificates/v1beta1 from k8s.io/client-go/informers/certificates + k8s.io/client-go/informers/coordination from k8s.io/client-go/informers + k8s.io/client-go/informers/coordination/v1 from k8s.io/client-go/informers/coordination + k8s.io/client-go/informers/coordination/v1alpha2 from k8s.io/client-go/informers/coordination + k8s.io/client-go/informers/coordination/v1beta1 from k8s.io/client-go/informers/coordination + k8s.io/client-go/informers/core from k8s.io/client-go/informers + k8s.io/client-go/informers/core/v1 from k8s.io/client-go/informers/core + k8s.io/client-go/informers/discovery from k8s.io/client-go/informers + k8s.io/client-go/informers/discovery/v1 from k8s.io/client-go/informers/discovery + k8s.io/client-go/informers/discovery/v1beta1 from k8s.io/client-go/informers/discovery + k8s.io/client-go/informers/events from k8s.io/client-go/informers + k8s.io/client-go/informers/events/v1 from k8s.io/client-go/informers/events + k8s.io/client-go/informers/events/v1beta1 from k8s.io/client-go/informers/events + k8s.io/client-go/informers/extensions from k8s.io/client-go/informers + k8s.io/client-go/informers/extensions/v1beta1 from k8s.io/client-go/informers/extensions + k8s.io/client-go/informers/flowcontrol from k8s.io/client-go/informers + k8s.io/client-go/informers/flowcontrol/v1 from k8s.io/client-go/informers/flowcontrol + k8s.io/client-go/informers/flowcontrol/v1beta1 from k8s.io/client-go/informers/flowcontrol + k8s.io/client-go/informers/flowcontrol/v1beta2 from k8s.io/client-go/informers/flowcontrol + k8s.io/client-go/informers/flowcontrol/v1beta3 from k8s.io/client-go/informers/flowcontrol + k8s.io/client-go/informers/internalinterfaces from k8s.io/client-go/informers+ + k8s.io/client-go/informers/networking from k8s.io/client-go/informers + k8s.io/client-go/informers/networking/v1 from k8s.io/client-go/informers/networking + k8s.io/client-go/informers/networking/v1alpha1 from k8s.io/client-go/informers/networking + k8s.io/client-go/informers/networking/v1beta1 from k8s.io/client-go/informers/networking + k8s.io/client-go/informers/node from k8s.io/client-go/informers + k8s.io/client-go/informers/node/v1 from k8s.io/client-go/informers/node + k8s.io/client-go/informers/node/v1alpha1 from k8s.io/client-go/informers/node + k8s.io/client-go/informers/node/v1beta1 from k8s.io/client-go/informers/node + k8s.io/client-go/informers/policy from k8s.io/client-go/informers + k8s.io/client-go/informers/policy/v1 from k8s.io/client-go/informers/policy + k8s.io/client-go/informers/policy/v1beta1 from k8s.io/client-go/informers/policy + k8s.io/client-go/informers/rbac from k8s.io/client-go/informers + k8s.io/client-go/informers/rbac/v1 from k8s.io/client-go/informers/rbac + k8s.io/client-go/informers/rbac/v1alpha1 from k8s.io/client-go/informers/rbac + k8s.io/client-go/informers/rbac/v1beta1 from k8s.io/client-go/informers/rbac + k8s.io/client-go/informers/resource from k8s.io/client-go/informers + k8s.io/client-go/informers/resource/v1alpha3 from k8s.io/client-go/informers/resource + k8s.io/client-go/informers/resource/v1beta1 from k8s.io/client-go/informers/resource + k8s.io/client-go/informers/scheduling from k8s.io/client-go/informers + k8s.io/client-go/informers/scheduling/v1 from k8s.io/client-go/informers/scheduling + k8s.io/client-go/informers/scheduling/v1alpha1 from k8s.io/client-go/informers/scheduling + k8s.io/client-go/informers/scheduling/v1beta1 from k8s.io/client-go/informers/scheduling + k8s.io/client-go/informers/storage from k8s.io/client-go/informers + k8s.io/client-go/informers/storage/v1 from k8s.io/client-go/informers/storage + k8s.io/client-go/informers/storage/v1alpha1 from k8s.io/client-go/informers/storage + k8s.io/client-go/informers/storage/v1beta1 from k8s.io/client-go/informers/storage + k8s.io/client-go/informers/storagemigration from k8s.io/client-go/informers + k8s.io/client-go/informers/storagemigration/v1alpha1 from k8s.io/client-go/informers/storagemigration + k8s.io/client-go/kubernetes from k8s.io/client-go/tools/leaderelection/resourcelock+ k8s.io/client-go/kubernetes/scheme from k8s.io/client-go/discovery+ k8s.io/client-go/kubernetes/typed/admissionregistration/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 from k8s.io/client-go/kubernetes @@ -505,6 +585,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/certificates/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/coordination/v1 from k8s.io/client-go/kubernetes+ + k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 from k8s.io/client-go/kubernetes+ k8s.io/client-go/kubernetes/typed/coordination/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/core/v1 from k8s.io/client-go/kubernetes+ k8s.io/client-go/kubernetes/typed/discovery/v1 from k8s.io/client-go/kubernetes @@ -527,7 +608,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/rbac/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/rbac/v1beta1 from k8s.io/client-go/kubernetes - k8s.io/client-go/kubernetes/typed/resource/v1alpha2 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/resource/v1alpha3 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/resource/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 from k8s.io/client-go/kubernetes @@ -535,6 +617,56 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/storage/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/storage/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 from k8s.io/client-go/kubernetes + k8s.io/client-go/listers from k8s.io/client-go/listers/admissionregistration/v1+ + k8s.io/client-go/listers/admissionregistration/v1 from k8s.io/client-go/informers/admissionregistration/v1 + k8s.io/client-go/listers/admissionregistration/v1alpha1 from k8s.io/client-go/informers/admissionregistration/v1alpha1 + k8s.io/client-go/listers/admissionregistration/v1beta1 from k8s.io/client-go/informers/admissionregistration/v1beta1 + k8s.io/client-go/listers/apiserverinternal/v1alpha1 from k8s.io/client-go/informers/apiserverinternal/v1alpha1 + k8s.io/client-go/listers/apps/v1 from k8s.io/client-go/informers/apps/v1 + k8s.io/client-go/listers/apps/v1beta1 from k8s.io/client-go/informers/apps/v1beta1 + k8s.io/client-go/listers/apps/v1beta2 from k8s.io/client-go/informers/apps/v1beta2 + k8s.io/client-go/listers/autoscaling/v1 from k8s.io/client-go/informers/autoscaling/v1 + k8s.io/client-go/listers/autoscaling/v2 from k8s.io/client-go/informers/autoscaling/v2 + k8s.io/client-go/listers/autoscaling/v2beta1 from k8s.io/client-go/informers/autoscaling/v2beta1 + k8s.io/client-go/listers/autoscaling/v2beta2 from k8s.io/client-go/informers/autoscaling/v2beta2 + k8s.io/client-go/listers/batch/v1 from k8s.io/client-go/informers/batch/v1 + k8s.io/client-go/listers/batch/v1beta1 from k8s.io/client-go/informers/batch/v1beta1 + k8s.io/client-go/listers/certificates/v1 from k8s.io/client-go/informers/certificates/v1 + k8s.io/client-go/listers/certificates/v1alpha1 from k8s.io/client-go/informers/certificates/v1alpha1 + k8s.io/client-go/listers/certificates/v1beta1 from k8s.io/client-go/informers/certificates/v1beta1 + k8s.io/client-go/listers/coordination/v1 from k8s.io/client-go/informers/coordination/v1 + k8s.io/client-go/listers/coordination/v1alpha2 from k8s.io/client-go/informers/coordination/v1alpha2 + k8s.io/client-go/listers/coordination/v1beta1 from k8s.io/client-go/informers/coordination/v1beta1 + k8s.io/client-go/listers/core/v1 from k8s.io/client-go/informers/core/v1 + k8s.io/client-go/listers/discovery/v1 from k8s.io/client-go/informers/discovery/v1 + k8s.io/client-go/listers/discovery/v1beta1 from k8s.io/client-go/informers/discovery/v1beta1 + k8s.io/client-go/listers/events/v1 from k8s.io/client-go/informers/events/v1 + k8s.io/client-go/listers/events/v1beta1 from k8s.io/client-go/informers/events/v1beta1 + k8s.io/client-go/listers/extensions/v1beta1 from k8s.io/client-go/informers/extensions/v1beta1 + k8s.io/client-go/listers/flowcontrol/v1 from k8s.io/client-go/informers/flowcontrol/v1 + k8s.io/client-go/listers/flowcontrol/v1beta1 from k8s.io/client-go/informers/flowcontrol/v1beta1 + k8s.io/client-go/listers/flowcontrol/v1beta2 from k8s.io/client-go/informers/flowcontrol/v1beta2 + k8s.io/client-go/listers/flowcontrol/v1beta3 from k8s.io/client-go/informers/flowcontrol/v1beta3 + k8s.io/client-go/listers/networking/v1 from k8s.io/client-go/informers/networking/v1 + k8s.io/client-go/listers/networking/v1alpha1 from k8s.io/client-go/informers/networking/v1alpha1 + k8s.io/client-go/listers/networking/v1beta1 from k8s.io/client-go/informers/networking/v1beta1 + k8s.io/client-go/listers/node/v1 from k8s.io/client-go/informers/node/v1 + k8s.io/client-go/listers/node/v1alpha1 from k8s.io/client-go/informers/node/v1alpha1 + k8s.io/client-go/listers/node/v1beta1 from k8s.io/client-go/informers/node/v1beta1 + k8s.io/client-go/listers/policy/v1 from k8s.io/client-go/informers/policy/v1 + k8s.io/client-go/listers/policy/v1beta1 from k8s.io/client-go/informers/policy/v1beta1 + k8s.io/client-go/listers/rbac/v1 from k8s.io/client-go/informers/rbac/v1 + k8s.io/client-go/listers/rbac/v1alpha1 from k8s.io/client-go/informers/rbac/v1alpha1 + k8s.io/client-go/listers/rbac/v1beta1 from k8s.io/client-go/informers/rbac/v1beta1 + k8s.io/client-go/listers/resource/v1alpha3 from k8s.io/client-go/informers/resource/v1alpha3 + k8s.io/client-go/listers/resource/v1beta1 from k8s.io/client-go/informers/resource/v1beta1 + k8s.io/client-go/listers/scheduling/v1 from k8s.io/client-go/informers/scheduling/v1 + k8s.io/client-go/listers/scheduling/v1alpha1 from k8s.io/client-go/informers/scheduling/v1alpha1 + k8s.io/client-go/listers/scheduling/v1beta1 from k8s.io/client-go/informers/scheduling/v1beta1 + k8s.io/client-go/listers/storage/v1 from k8s.io/client-go/informers/storage/v1 + k8s.io/client-go/listers/storage/v1alpha1 from k8s.io/client-go/informers/storage/v1alpha1 + k8s.io/client-go/listers/storage/v1beta1 from k8s.io/client-go/informers/storage/v1beta1 + k8s.io/client-go/listers/storagemigration/v1alpha1 from k8s.io/client-go/informers/storagemigration/v1alpha1 k8s.io/client-go/metadata from sigs.k8s.io/controller-runtime/pkg/cache/internal+ k8s.io/client-go/openapi from k8s.io/client-go/discovery k8s.io/client-go/pkg/apis/clientauthentication from k8s.io/client-go/pkg/apis/clientauthentication/install+ @@ -546,6 +678,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/rest from k8s.io/client-go/discovery+ k8s.io/client-go/rest/watch from k8s.io/client-go/rest k8s.io/client-go/restmapper from sigs.k8s.io/controller-runtime/pkg/client/apiutil + k8s.io/client-go/testing from k8s.io/client-go/gentype k8s.io/client-go/tools/auth from k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/cache from sigs.k8s.io/controller-runtime/pkg/cache+ k8s.io/client-go/tools/cache/synctrack from k8s.io/client-go/tools/cache @@ -562,11 +695,14 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/tools/record/util from k8s.io/client-go/tools/record k8s.io/client-go/tools/reference from k8s.io/client-go/kubernetes/typed/core/v1+ k8s.io/client-go/transport from k8s.io/client-go/plugin/pkg/client/auth/exec+ + k8s.io/client-go/util/apply from k8s.io/client-go/dynamic+ k8s.io/client-go/util/cert from k8s.io/client-go/rest+ k8s.io/client-go/util/connrotation from k8s.io/client-go/plugin/pkg/client/auth/exec+ + k8s.io/client-go/util/consistencydetector from k8s.io/client-go/dynamic+ k8s.io/client-go/util/flowcontrol from k8s.io/client-go/kubernetes+ k8s.io/client-go/util/homedir from k8s.io/client-go/tools/clientcmd k8s.io/client-go/util/keyutil from k8s.io/client-go/util/cert + k8s.io/client-go/util/watchlist from k8s.io/client-go/dynamic+ k8s.io/client-go/util/workqueue from k8s.io/client-go/transport+ k8s.io/klog/v2 from k8s.io/apimachinery/pkg/api/meta+ k8s.io/klog/v2/internal/buffer from k8s.io/klog/v2 @@ -587,11 +723,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/utils/buffer from k8s.io/client-go/tools/cache k8s.io/utils/clock from k8s.io/apimachinery/pkg/util/cache+ k8s.io/utils/clock/testing from k8s.io/client-go/util/flowcontrol + k8s.io/utils/internal/third_party/forked/golang/golang-lru from k8s.io/utils/lru k8s.io/utils/internal/third_party/forked/golang/net from k8s.io/utils/net + k8s.io/utils/lru from k8s.io/client-go/tools/record k8s.io/utils/net from k8s.io/apimachinery/pkg/util/net+ k8s.io/utils/pointer from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/utils/ptr from k8s.io/client-go/tools/cache+ - k8s.io/utils/strings/slices from k8s.io/apimachinery/pkg/labels k8s.io/utils/trace from k8s.io/client-go/tools/cache sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator sigs.k8s.io/controller-runtime/pkg/cache from sigs.k8s.io/controller-runtime/pkg/cluster+ @@ -624,12 +761,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/controller-runtime/pkg/metrics from sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics+ sigs.k8s.io/controller-runtime/pkg/metrics/server from sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/predicate from sigs.k8s.io/controller-runtime/pkg/builder+ - sigs.k8s.io/controller-runtime/pkg/ratelimiter from sigs.k8s.io/controller-runtime/pkg/controller+ sigs.k8s.io/controller-runtime/pkg/reconcile from sigs.k8s.io/controller-runtime/pkg/builder+ sigs.k8s.io/controller-runtime/pkg/recorder from sigs.k8s.io/controller-runtime/pkg/leaderelection+ sigs.k8s.io/controller-runtime/pkg/source from sigs.k8s.io/controller-runtime/pkg/builder+ sigs.k8s.io/controller-runtime/pkg/webhook from sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/webhook/admission from sigs.k8s.io/controller-runtime/pkg/builder+ + sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics from sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/conversion from sigs.k8s.io/controller-runtime/pkg/builder sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics from sigs.k8s.io/controller-runtime/pkg/webhook+ sigs.k8s.io/json from k8s.io/apimachinery/pkg/runtime/serializer/json+ @@ -640,7 +777,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/structured-merge-diff/v4/typed from k8s.io/apimachinery/pkg/util/managedfields+ sigs.k8s.io/structured-merge-diff/v4/value from k8s.io/apimachinery/pkg/runtime+ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ - sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml + sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index 4434c1283..1917e31de 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: connectors.tailscale.com spec: group: tailscale.com diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 13aee9b9e..242debd27 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: dnsconfigs.tailscale.com spec: group: tailscale.com diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 2e53d5ee8..a620c3887 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: proxyclasses.tailscale.com spec: group: tailscale.com @@ -428,7 +428,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -443,7 +443,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -600,7 +600,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -615,7 +615,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -773,7 +773,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -788,7 +788,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -945,7 +945,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -960,7 +960,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -1174,6 +1174,32 @@ spec: Note that this field cannot be set when spec.os.name is windows. type: integer format: int64 + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string seLinuxOptions: description: |- The SELinux context to be applied to all containers. @@ -1222,18 +1248,28 @@ spec: type: string supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. type: array items: type: integer format: int64 x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -1389,6 +1425,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map @@ -1493,7 +1535,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -1713,6 +1755,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map @@ -1817,7 +1865,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index d6a4fe741..86e74e441 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: proxygroups.tailscale.com spec: group: tailscale.com diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 5b22297d8..22bbed810 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: recorders.tailscale.com spec: group: tailscale.com @@ -372,7 +372,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -387,7 +387,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -544,7 +544,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -559,7 +559,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -717,7 +717,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -732,7 +732,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -889,7 +889,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -904,7 +904,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -1066,6 +1066,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map @@ -1165,7 +1171,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -1401,6 +1407,32 @@ spec: Note that this field cannot be set when spec.os.name is windows. type: integer format: int64 + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string seLinuxOptions: description: |- The SELinux context to be applied to all containers. @@ -1449,18 +1481,28 @@ spec: type: string supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. type: array items: type: integer format: int64 x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 0026ffef5..def5716f6 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -31,7 +31,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: connectors.tailscale.com spec: group: tailscale.com @@ -294,7 +294,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: dnsconfigs.tailscale.com spec: group: tailscale.com @@ -476,7 +476,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: proxyclasses.tailscale.com spec: group: tailscale.com @@ -886,7 +886,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -901,7 +901,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1062,7 +1062,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1077,7 +1077,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1231,7 +1231,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1246,7 +1246,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1407,7 +1407,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1422,7 +1422,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1641,6 +1641,32 @@ spec: Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string seLinuxOptions: description: |- The SELinux context to be applied to all containers. @@ -1689,18 +1715,28 @@ spec: type: object supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -1851,6 +1887,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -1959,7 +2001,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -2175,6 +2217,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -2283,7 +2331,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -2717,7 +2765,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: proxygroups.tailscale.com spec: group: tailscale.com @@ -2927,7 +2975,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + controller-gen.kubebuilder.io/version: v0.17.0 name: recorders.tailscale.com spec: group: tailscale.com @@ -3281,7 +3329,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3296,7 +3344,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3457,7 +3505,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3472,7 +3520,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3626,7 +3674,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3641,7 +3689,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3802,7 +3850,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3817,7 +3865,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3979,6 +4027,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -4082,7 +4136,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -4319,6 +4373,32 @@ spec: Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string seLinuxOptions: description: |- The SELinux context to be applied to all containers. @@ -4367,18 +4447,28 @@ spec: type: object supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index a64f3e4e1..bd81071cb 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -112,7 +112,7 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { Terminating: pointer.ToBool(false), }, }) - expectEqual(t, fc, eps, nil) + expectEqual(t, fc, eps) }) t.Run("status_does_not_match_pod_ip", func(t *testing.T) { _, stateS := podAndSecretForProxyGroup("foo") // replica Pod has IP 10.0.0.1 @@ -122,7 +122,7 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { }) expectReconciled(t, er, "operator-ns", "foo") eps.Endpoints = []discoveryv1.Endpoint{} - expectEqual(t, fc, eps, nil) + expectEqual(t, fc, eps) }) } diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index 052eb1a49..ce947329d 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -67,24 +67,24 @@ func TestEgressServiceReadiness(t *testing.T) { setClusterNotReady(egressSvc, cl, zl.Sugar()) t.Run("endpointslice_does_not_exist", func(t *testing.T) { expectReconciled(t, rec, "dev", "my-app") - expectEqual(t, fc, egressSvc, nil) // not ready + expectEqual(t, fc, egressSvc) // not ready }) t.Run("proxy_group_does_not_exist", func(t *testing.T) { mustCreate(t, fc, eps) expectReconciled(t, rec, "dev", "my-app") - expectEqual(t, fc, egressSvc, nil) // still not ready + expectEqual(t, fc, egressSvc) // still not ready }) t.Run("proxy_group_not_ready", func(t *testing.T) { mustCreate(t, fc, pg) expectReconciled(t, rec, "dev", "my-app") - expectEqual(t, fc, egressSvc, nil) // still not ready + expectEqual(t, fc, egressSvc) // still not ready }) t.Run("no_ready_replicas", func(t *testing.T) { setPGReady(pg, cl, zl.Sugar()) mustUpdateStatus(t, fc, pg.Namespace, pg.Name, func(p *tsapi.ProxyGroup) { p.Status = pg.Status }) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) for i := range pgReplicas(pg) { p := pod(pg, i) mustCreate(t, fc, p) @@ -94,7 +94,7 @@ func TestEgressServiceReadiness(t *testing.T) { } expectReconciled(t, rec, "dev", "my-app") setNotReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg)) - expectEqual(t, fc, egressSvc, nil) // still not ready + expectEqual(t, fc, egressSvc) // still not ready }) t.Run("one_ready_replica", func(t *testing.T) { setEndpointForReplica(pg, 0, eps) @@ -103,7 +103,7 @@ func TestEgressServiceReadiness(t *testing.T) { }) setReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg), 1) expectReconciled(t, rec, "dev", "my-app") - expectEqual(t, fc, egressSvc, nil) // partially ready + expectEqual(t, fc, egressSvc) // partially ready }) t.Run("all_replicas_ready", func(t *testing.T) { for i := range pgReplicas(pg) { @@ -114,7 +114,7 @@ func TestEgressServiceReadiness(t *testing.T) { }) setReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg), pgReplicas(pg)) expectReconciled(t, rec, "dev", "my-app") - expectEqual(t, fc, egressSvc, nil) // ready + expectEqual(t, fc, egressSvc) // ready }) } diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index 06fe977ec..ab0008ca0 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -96,7 +96,7 @@ func TestTailscaleEgressServices(t *testing.T) { expectReconciled(t, esr, "default", "test") // Service should have EgressSvcValid condition set to Unknown. svc.Status.Conditions = []metav1.Condition{condition(tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, clock)} - expectEqual(t, fc, svc, nil) + expectEqual(t, fc, svc) }) t.Run("proxy_group_ready", func(t *testing.T) { @@ -162,7 +162,7 @@ func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReco expectEqual(t, fc, clusterIPSvc(name, svc), removeTargetPortsFromSvc) clusterSvc := mustGetClusterIPSvc(t, fc, name) // Verify that an EndpointSlice has been created. - expectEqual(t, fc, endpointSlice(name, svc, clusterSvc), nil) + expectEqual(t, fc, endpointSlice(name, svc, clusterSvc)) // Verify that ConfigMap contains configuration for the new egress service. mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm) r := svcConfiguredReason(svc, true, zl.Sugar()) @@ -174,7 +174,7 @@ func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReco } svc.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} svc.Spec.ExternalName = fmt.Sprintf("%s.operator-ns.svc.cluster.local", name) - expectEqual(t, fc, svc, nil) + expectEqual(t, fc, svc) } diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 955258cc3..74eddff56 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -103,9 +103,9 @@ func TestTailscaleIngress(t *testing.T) { } opts.serveConfig = serveConfig - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. Ingress status gets updated with ingress proxy's MagicDNS name // once that becomes available. @@ -120,7 +120,7 @@ func TestTailscaleIngress(t *testing.T) { {Hostname: "foo.tailnetxyz.ts.net", Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}}, }, } - expectEqual(t, fc, ing, nil) + expectEqual(t, fc, ing) // 3. Resources get created for Ingress that should allow forwarding // cluster traffic @@ -129,7 +129,7 @@ func TestTailscaleIngress(t *testing.T) { }) opts.shouldEnableForwardingClusterTrafficViaIngress = true expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 4. Resources get cleaned up when Ingress class is unset mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { @@ -229,9 +229,9 @@ func TestTailscaleIngressHostname(t *testing.T) { } opts.serveConfig = serveConfig - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint set mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -243,7 +243,7 @@ func TestTailscaleIngressHostname(t *testing.T) { expectReconciled(t, ingR, "default", "test") ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer") - expectEqual(t, fc, ing, nil) + expectEqual(t, fc, ing) // 3. Ingress proxy with capability version >= 110 advertises HTTPS endpoint mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -259,7 +259,7 @@ func TestTailscaleIngressHostname(t *testing.T) { {Hostname: "foo.tailnetxyz.ts.net", Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}}, }, } - expectEqual(t, fc, ing, nil) + expectEqual(t, fc, ing) // 4. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint ready mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -271,7 +271,7 @@ func TestTailscaleIngressHostname(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") ing.Status.LoadBalancer.Ingress = nil - expectEqual(t, fc, ing, nil) + expectEqual(t, fc, ing) // 5. Ingress proxy's state has https_endpoints set, but its capver is not matching Pod UID (downgrade) mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -287,7 +287,7 @@ func TestTailscaleIngressHostname(t *testing.T) { }, } expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, ing, nil) + expectEqual(t, fc, ing) } func TestTailscaleIngressWithProxyClass(t *testing.T) { @@ -383,9 +383,9 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { } opts.serveConfig = serveConfig - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. @@ -393,7 +393,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { mak.Set(&ing.ObjectMeta.Labels, LabelProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Ingress get // reconciled and configuration from the ProxyClass is applied to the @@ -408,7 +408,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 4. tailscale.com/proxy-class label is removed from the Ingress, the // Ingress gets reconciled and the custom ProxyClass configuration is @@ -418,7 +418,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = "" - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) } func TestTailscaleIngressWithServiceMonitor(t *testing.T) { @@ -526,20 +526,20 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) // 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true, Labels: tsapi.Labels{"foo": "bar"}} }) expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) // 3. Create ServiceMonitor CRD and reconcile- ServiceMonitor should get created mustCreate(t, fc, crd) expectReconciled(t, ingR, "default", "test") opts.serviceMonitorLabels = tsapi.Labels{"foo": "bar"} - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) // 4. Update ServiceMonitor CRD and reconcile- ServiceMonitor should get updated @@ -549,7 +549,7 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { expectReconciled(t, ingR, "default", "test") opts.serviceMonitorLabels = nil opts.resourceVersion = "2" - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) // 5. Disable metrics - metrics resources should get deleted. diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 695710212..cec95b84e 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -69,7 +69,7 @@ func TestNameserverReconciler(t *testing.T) { wantsDeploy.Namespace = "tailscale" labels := nameserverResourceLabels("test", "tailscale") wantsDeploy.ObjectMeta.Labels = labels - expectEqual(t, fc, wantsDeploy, nil) + expectEqual(t, fc, wantsDeploy) // Verify that DNSConfig advertizes the nameserver's Service IP address, // has the ready status condition and tailscale finalizer. @@ -88,7 +88,7 @@ func TestNameserverReconciler(t *testing.T) { Message: reasonNameserverCreated, LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, }) - expectEqual(t, fc, dnsCfg, nil) + expectEqual(t, fc, dnsCfg) // // Verify that nameserver image gets updated to match DNSConfig spec. mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { @@ -96,7 +96,7 @@ func TestNameserverReconciler(t *testing.T) { }) expectReconciled(t, nr, "", "test") wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" - expectEqual(t, fc, wantsDeploy, nil) + expectEqual(t, fc, wantsDeploy) // Verify that when another actor sets ConfigMap data, it does not get // overwritten by nameserver reconciler. @@ -114,7 +114,7 @@ func TestNameserverReconciler(t *testing.T) { TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, Data: map[string]string{"records.json": string(bs)}, } - expectEqual(t, fc, wantCm, nil) + expectEqual(t, fc, wantCm) // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, // the nameserver image defaults to tailscale/k8s-nameserver:unstable. @@ -123,5 +123,5 @@ func TestNameserverReconciler(t *testing.T) { }) expectReconciled(t, nr, "", "test") wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" - expectEqual(t, fc, wantsDeploy, nil) + expectEqual(t, fc, wantsDeploy) } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 1998fe3bc..2fa14e33b 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -106,7 +106,7 @@ func TestLoadBalancerClass(t *testing.T) { }}, }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Delete the misconfiguration so the proxy starts getting created on the // next reconcile. @@ -128,9 +128,9 @@ func TestLoadBalancerClass(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) want.Annotations = nil want.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} @@ -143,7 +143,7 @@ func TestLoadBalancerClass(t *testing.T) { Message: "no Tailscale hostname known yet, waiting for proxy pod to finish auth", }}, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, then verify reconcile again and verify @@ -169,7 +169,7 @@ func TestLoadBalancerClass(t *testing.T) { }, }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, which should make the // operator clean up. @@ -206,7 +206,7 @@ func TestLoadBalancerClass(t *testing.T) { Type: corev1.ServiceTypeClusterIP, }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestTailnetTargetFQDNAnnotation(t *testing.T) { @@ -266,9 +266,9 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { app: kubetypes.AppEgressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -288,10 +288,10 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, want) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) // Change the tailscale-target-fqdn annotation which should update the // StatefulSet @@ -378,9 +378,9 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { app: kubetypes.AppEgressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -400,10 +400,10 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, want) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) // Change the tailscale-target-ip annotation which should update the // StatefulSet @@ -501,7 +501,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { @@ -572,7 +572,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestAnnotations(t *testing.T) { @@ -629,9 +629,9 @@ func TestAnnotations(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -650,7 +650,7 @@ func TestAnnotations(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, which should make the // operator clean up. @@ -678,7 +678,7 @@ func TestAnnotations(t *testing.T) { Type: corev1.ServiceTypeClusterIP, }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestAnnotationIntoLB(t *testing.T) { @@ -735,9 +735,9 @@ func TestAnnotationIntoLB(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, since it would have normally happened at @@ -769,7 +769,7 @@ func TestAnnotationIntoLB(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Remove Tailscale's annotation, and at the same time convert the service // into a tailscale LoadBalancer. @@ -780,8 +780,8 @@ func TestAnnotationIntoLB(t *testing.T) { }) expectReconciled(t, sr, "default", "test") // None of the proxy machinery should have changed... - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) // ... but the service should have a LoadBalancer status. want = &corev1.Service{ @@ -810,7 +810,7 @@ func TestAnnotationIntoLB(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestLBIntoAnnotation(t *testing.T) { @@ -865,9 +865,9 @@ func TestLBIntoAnnotation(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, then verify reconcile again and verify @@ -907,7 +907,7 @@ func TestLBIntoAnnotation(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, but also add the // tailscale annotation. @@ -926,8 +926,8 @@ func TestLBIntoAnnotation(t *testing.T) { }) expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) want = &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -947,7 +947,7 @@ func TestLBIntoAnnotation(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestCustomHostname(t *testing.T) { @@ -1005,9 +1005,9 @@ func TestCustomHostname(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, o), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, o)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -1027,7 +1027,7 @@ func TestCustomHostname(t *testing.T) { Conditions: proxyCreatedCondition(clock), }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, which should make the // operator clean up. @@ -1058,7 +1058,7 @@ func TestCustomHostname(t *testing.T) { Type: corev1.ServiceTypeClusterIP, }, } - expectEqual(t, fc, want, nil) + expectEqual(t, fc, want) } func TestCustomPriorityClassName(t *testing.T) { @@ -1118,7 +1118,7 @@ func TestCustomPriorityClassName(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) } func TestProxyClassForService(t *testing.T) { @@ -1186,9 +1186,9 @@ func TestProxyClassForService(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. The Service gets updated with tailscale.com/proxy-class label // pointing at the 'custom-metadata' ProxyClass. The ProxyClass is not @@ -1197,8 +1197,8 @@ func TestProxyClassForService(t *testing.T) { mak.Set(&svc.Labels, LabelProxyClass, "custom-metadata") }) expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSecret(t, fc, opts)) // 3. ProxyClass is set to Ready, the Service gets reconciled by the // services-reconciler and the customization from the ProxyClass is @@ -1213,7 +1213,7 @@ func TestProxyClassForService(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts), removeAuthKeyIfExistsModifier(t)) // 4. tailscale.com/proxy-class label is removed from the Service, the @@ -1224,7 +1224,7 @@ func TestProxyClassForService(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) } func TestDefaultLoadBalancer(t *testing.T) { @@ -1270,7 +1270,7 @@ func TestDefaultLoadBalancer(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) o := configOpts{ stsName: shortName, secretName: fullName, @@ -1280,8 +1280,7 @@ func TestDefaultLoadBalancer(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) - + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) } func TestProxyFirewallMode(t *testing.T) { @@ -1337,7 +1336,7 @@ func TestProxyFirewallMode(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) } func TestTailscaledConfigfileHash(t *testing.T) { @@ -1393,7 +1392,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { confFileHash: "848bff4b5ba83ac999e6984c8464e597156daba961ae045e7dbaef606d54ab5e", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), nil) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // 2. Hostname gets changed, configfile is updated and a new hash value // is produced. @@ -1403,7 +1402,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { o.hostname = "another-test" o.confFileHash = "d4cc13f09f55f4f6775689004f9a466723325b84d2b590692796bfe22aeaa389" expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, o), nil) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func Test_isMagicDNSName(t *testing.T) { tests := []struct { @@ -1681,9 +1680,9 @@ func Test_authKeyRemoval(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. Apply update to the Secret that imitates the proxy setting device_id. s := expectedSecret(t, fc, opts) @@ -1695,7 +1694,7 @@ func Test_authKeyRemoval(t *testing.T) { expectReconciled(t, sr, "default", "test") opts.shouldRemoveAuthKey = true opts.secretExtraData = map[string][]byte{"device_id": []byte("dkkdi4CNTRL")} - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) + expectEqual(t, fc, expectedSecret(t, fc, opts)) } func Test_externalNameService(t *testing.T) { @@ -1755,9 +1754,9 @@ func Test_externalNameService(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSecret(t, fc, opts), nil) - expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) // 2. Change the ExternalName and verify that changes get propagated. mustUpdate(t, sr, "default", "test", func(s *corev1.Service) { @@ -1765,7 +1764,7 @@ func Test_externalNameService(t *testing.T) { }) expectReconciled(t, sr, "default", "test") opts.clusterTargetDNS = "bar.com" - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) } func Test_metricsResourceCreation(t *testing.T) { @@ -1835,7 +1834,7 @@ func Test_metricsResourceCreation(t *testing.T) { }) expectReconciled(t, sr, "default", "test") opts.enableMetrics = true - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) // 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) { @@ -1855,7 +1854,7 @@ func Test_metricsResourceCreation(t *testing.T) { expectReconciled(t, sr, "default", "test") opts.serviceMonitorLabels = tsapi.Labels{"foo": "bar"} opts.resourceVersion = "2" - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts)) // 5. Disable metrics- expect metrics Service to be deleted diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index 78828107a..48290eea7 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -78,7 +78,7 @@ func TestProxyClass(t *testing.T) { LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, }) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) // 2. A ProxyClass resource with invalid labels gets its status updated to Invalid with an error message. pc.Spec.StatefulSet.Labels["foo"] = "?!someVal" @@ -88,7 +88,7 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") msg := `ProxyClass is not valid: .spec.statefulSet.labels: Invalid value: "?!someVal": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')` tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) expectedEvent := "Warning ProxyClassInvalid ProxyClass is not valid: .spec.statefulSet.labels: Invalid value: \"?!someVal\": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')" expectEvents(t, fr, []string{expectedEvent}) @@ -102,7 +102,7 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.statefulSet.pod.tailscaleContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) expectedEvent = `Warning ProxyClassInvalid ProxyClass is not valid: spec.statefulSet.pod.tailscaleContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` expectEvents(t, fr, []string{expectedEvent}) @@ -121,7 +121,7 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.statefulSet.pod.tailscaleInitContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) expectedEvent = `Warning ProxyClassInvalid ProxyClass is not valid: spec.statefulSet.pod.tailscaleInitContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` expectEvents(t, fr, []string{expectedEvent}) @@ -145,7 +145,7 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.metrics.serviceMonitor: Invalid value: "enable": ProxyClass defines that a ServiceMonitor custom resource should be created, but "servicemonitors.monitoring.coreos.com" CRD was not found` tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) expectedEvent = "Warning ProxyClassInvalid " + msg expectEvents(t, fr, []string{expectedEvent}) @@ -154,7 +154,7 @@ func TestProxyClass(t *testing.T) { mustCreate(t, fc, crd) expectReconciled(t, pcr, "", "test") tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) // 7. A ProxyClass with invalid ServiceMonitor labels gets its status updated to Invalid with an error message. pc.Spec.Metrics.ServiceMonitor.Labels = tsapi.Labels{"foo": "bar!"} @@ -164,7 +164,7 @@ func TestProxyClass(t *testing.T) { expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: .spec.metrics.serviceMonitor.labels: Invalid value: "bar!": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')` tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) // 8. A ProxyClass with valid ServiceMonitor labels gets its status updated to Valid. pc.Spec.Metrics.ServiceMonitor.Labels = tsapi.Labels{"foo": "bar", "xyz1234": "abc567", "empty": "", "onechar": "a"} @@ -173,7 +173,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar()) - expectEqual(t, fc, pc, nil) + expectEqual(t, fc, pc) } func TestValidateProxyClass(t *testing.T) { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 96ffefbed..c920c90d1 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -96,7 +96,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, false, "") }) @@ -117,7 +117,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, "") if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) @@ -153,7 +153,7 @@ func TestProxyGroup(t *testing.T) { }, } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, initialCfgHash) }) @@ -164,7 +164,7 @@ func TestProxyGroup(t *testing.T) { }) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, initialCfgHash) addNodeIDToStateSecrets(t, fc, pg) @@ -174,7 +174,7 @@ func TestProxyGroup(t *testing.T) { Hostname: "hostname-nodeid-2", TailnetIPs: []string{"1.2.3.4", "::1"}, }) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, initialCfgHash) }) @@ -187,7 +187,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, initialCfgHash) }) @@ -201,7 +201,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) - expectEqual(t, fc, pg, nil) + expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74") }) @@ -211,7 +211,7 @@ func TestProxyGroup(t *testing.T) { p.Spec = pc.Spec }) expectReconciled(t, reconciler, "", pg.Name) - expectEqual(t, fc, expectedMetricsService(opts), nil) + expectEqual(t, fc, expectedMetricsService(opts)) }) t.Run("enable_service_monitor_no_crd", func(t *testing.T) { pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true} @@ -389,10 +389,10 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox } if shouldExist { - expectEqual(t, fc, role, nil) - expectEqual(t, fc, roleBinding, nil) - expectEqual(t, fc, serviceAccount, nil) - expectEqual(t, fc, statefulSet, nil) + expectEqual(t, fc, role) + expectEqual(t, fc, roleBinding) + expectEqual(t, fc, serviceAccount) + expectEqual(t, fc, statefulSet, removeResourceReqs) } else { expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 277bd16df..240a7df15 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -618,7 +618,7 @@ func mustUpdateStatus[T any, O ptrObject[T]](t *testing.T, client client.Client, // modify func to ensure that they are removed from the cluster object and the // object passed as 'want'. If no such modifications are needed, you can pass // nil in place of the modify function. -func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O, modifier func(O)) { +func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O, modifiers ...func(O)) { t.Helper() got := O(new(T)) if err := client.Get(context.Background(), types.NamespacedName{ @@ -632,7 +632,7 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want // so just remove it from both got and want. got.SetResourceVersion("") want.SetResourceVersion("") - if modifier != nil { + for _, modifier := range modifiers { modifier(want) modifier(got) } @@ -799,6 +799,12 @@ func removeHashAnnotation(sts *appsv1.StatefulSet) { } } +func removeResourceReqs(sts *appsv1.StatefulSet) { + if sts != nil { + sts.Spec.Template.Spec.Resources = nil + } +} + func removeTargetPortsFromSvc(svc *corev1.Service) { newPorts := make([]corev1.ServicePort, 0) for _, p := range svc.Spec.Ports { diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index bd73e8fb9..4de1089a9 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -57,7 +57,7 @@ func TestRecorder(t *testing.T) { msg := "Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible" tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionFalse, reasonRecorderInvalid, msg, 0, cl, zl.Sugar()) - expectEqual(t, fc, tsr, nil) + expectEqual(t, fc, tsr) if expected := 0; reconciler.recorders.Len() != expected { t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) } @@ -76,7 +76,7 @@ func TestRecorder(t *testing.T) { expectReconciled(t, reconciler, "", tsr.Name) tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated, 0, cl, zl.Sugar()) - expectEqual(t, fc, tsr, nil) + expectEqual(t, fc, tsr) if expected := 1; reconciler.recorders.Len() != expected { t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) } @@ -112,7 +112,7 @@ func TestRecorder(t *testing.T) { URL: "https://test-0.example.ts.net", }, } - expectEqual(t, fc, tsr, nil) + expectEqual(t, fc, tsr) }) t.Run("delete the Recorder and observe cleanup", func(t *testing.T) { @@ -145,12 +145,12 @@ func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recor statefulSet := tsrStatefulSet(tsr, tsNamespace) if shouldExist { - expectEqual(t, fc, auth, nil) - expectEqual(t, fc, state, nil) - expectEqual(t, fc, role, nil) - expectEqual(t, fc, roleBinding, nil) - expectEqual(t, fc, serviceAccount, nil) - expectEqual(t, fc, statefulSet, nil) + expectEqual(t, fc, auth) + expectEqual(t, fc, state) + expectEqual(t, fc, role) + expectEqual(t, fc, roleBinding) + expectEqual(t, fc, serviceAccount) + expectEqual(t, fc, statefulSet, removeResourceReqs) } else { expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name) expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 9599f6a01..52d649a1d 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -8,11 +8,11 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/munnerz/goautoneg from github.com/prometheus/common/expfmt 💣 github.com/prometheus/client_golang/prometheus from tailscale.com/tsweb/promvarz github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ - github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg from github.com/prometheus/common/expfmt github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs @@ -155,7 +155,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar html from net/http/pprof+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from google.golang.org/protobuf/internal/impl iter from maps+ log from expvar+ log/internal from log diff --git a/go.mod b/go.mod index 79374eb9c..92dd1bf65 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/evanw/esbuild v0.19.11 github.com/fogleman/gg v1.3.0 github.com/frankban/quicktest v1.14.6 - github.com/fxamacker/cbor/v2 v2.6.0 + github.com/fxamacker/cbor/v2 v2.7.0 github.com/gaissmai/bart v0.11.1 github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 github.com/go-logr/zapr v1.3.0 @@ -68,7 +68,7 @@ require ( github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.48.0 + github.com/prometheus/common v0.55.0 github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff github.com/safchain/ethtool v0.3.0 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e @@ -79,7 +79,7 @@ require ( github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 + github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 @@ -109,12 +109,12 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 honnef.co/go/tools v0.5.1 - k8s.io/api v0.30.3 - k8s.io/apimachinery v0.30.3 - k8s.io/apiserver v0.30.3 - k8s.io/client-go v0.30.3 - sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + sigs.k8s.io/controller-runtime v0.19.4 + sigs.k8s.io/controller-tools v0.17.0 sigs.k8s.io/yaml v1.4.0 software.sslmate.com/src/go-pkcs12 v0.4.0 ) @@ -143,12 +143,11 @@ require ( github.com/ghostiam/protogetter v0.3.5 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gobuffalo/flect v1.0.2 // indirect + github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.12.0 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect - github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect @@ -159,12 +158,14 @@ require ( github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect go-simpler.org/sloglint v0.5.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect ) require ( @@ -210,25 +211,24 @@ require ( github.com/breml/errchkjson v0.3.6 // indirect github.com/butuzov/ireturn v0.3.0 // indirect github.com/cavaliergopher/cpio v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/docker/cli v27.3.1+incompatible // indirect + github.com/docker/cli v27.4.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.3.1+incompatible // indirect + github.com/docker/docker v27.4.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/ettle/strcase v0.2.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 @@ -238,9 +238,9 @@ require ( github.com/go-git/go-billy/v5 v5.6.1 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/go-logr/logr v1.4.2 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -328,14 +328,14 @@ require ( github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.4.8 // indirect - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/procfs v0.15.1 // indirect github.com/quasilyte/go-ruleguard v0.4.2 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/ryancurrah/gomodguard v1.3.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect @@ -387,19 +387,19 @@ require ( golang.org/x/image v0.23.0 // indirect golang.org/x/text v0.21.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 howett.net/plist v1.0.0 // indirect - k8s.io/apiextensions-apiserver v0.30.3 + k8s.io/apiextensions-apiserver v0.32.0 k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 mvdan.cc/gofumpt v0.6.0 // indirect mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) diff --git a/go.sum b/go.sum index 28315ad1e..0354c3364 100644 --- a/go.sum +++ b/go.sum @@ -211,13 +211,13 @@ github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= @@ -237,8 +237,8 @@ github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NA github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= -github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= @@ -275,12 +275,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= -github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= +github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= +github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -309,8 +309,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanw/esbuild v0.19.11 h1:mbPO1VJ/df//jjUd+p/nRLYCpizXxXb2w/zZMShxa2k= github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -323,8 +323,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= @@ -365,12 +365,12 @@ github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= @@ -381,7 +381,8 @@ github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7 github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= @@ -405,8 +406,8 @@ github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsM github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= -github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= @@ -508,8 +509,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= -github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.5.0 h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A= github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= @@ -545,8 +546,8 @@ github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -738,10 +739,10 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -790,21 +791,21 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff h1:X1Tly81aZ22DA1fxBdfvR3iw8+yFoUBUHMEd+AX/ZXI= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= @@ -820,8 +821,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.3.1 h1:fH+fUg+ngsQO0ruZXXHnA/2aNllWA1whly4a6UvyzGE= github.com/ryancurrah/gomodguard v1.3.1/go.mod h1:DGFHzEhi6iJ0oIDfMuo3TgrS+L9gZvrEfmjjuelnRU0= @@ -926,8 +927,8 @@ github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPx github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 h1:ZB47BgnHcEHQJODkDubs5ZiNeJxMhcgzefV3lykRwVQ= -github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10/go.mod h1:iDx/0Rr9VV/KanSUDpJ6I/ROf0sQ7OqljXc/esl0UIA= +github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 h1:9SuADtKJAGQkIpnpg5znEJ86QaxacN25pHkiEXTDjzg= +github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= @@ -1017,22 +1018,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -1384,11 +1387,11 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac h1:OZkkudMUu9LVQMCoRUbI/1p5VCo9BOrlvkqMvWtqa6s= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= +google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1401,8 +1404,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1415,8 +1418,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1424,6 +1427,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -1461,22 +1466,22 @@ honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= -k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= -k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= +k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w= @@ -1484,14 +1489,14 @@ mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6u rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= -sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab h1:Fq4VD28nejtsijBNTeRRy9Tt3FVwq+o6NB7fIxja8uY= -sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab/go.mod h1:egedX5jq2KrZ3A2zaOz3e2DSsh5BhFyyjvNcBRIQel8= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= +sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-tools v0.17.0 h1:KaEQZbhrdY6J3zLBHplt+0aKUp8PeIttlhtF2UDo6bI= +sigs.k8s.io/controller-tools v0.17.0/go.mod h1:SKoWY8rwGWDzHtfnhmOwljn6fViG0JF7/xmnxpklgjo= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= From c79b736a856b63adc76610a82e7080fc3a468f29 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Fri, 17 Jan 2025 14:52:47 -0800 Subject: [PATCH 0348/1708] ipnlocal: allow overriding os.Hostname() via syspolicy (#14676) Updates tailscale/corp#25936 This defines a new syspolicy 'Hostname' and allows an IT administrator to override the value we normally read from os.Hostname(). This is particularly useful on Android and iOS devices, where the hostname we get from the OS is really just the device model (a platform restriction to prevent fingerprinting). If we don't implement this, all devices on the customer's side will look like `google-pixel-7a-1`, `google-pixel-7a-2`, `google-pixel-7a-3`, etc. and it is not feasible for the customer to use the API or worse the admin console to manually fix these names. Apply code review comment by @nickkhyl Signed-off-by: Andrea Gottardo Co-authored-by: Nick Khyl <1761190+nickkhyl@users.noreply.github.com> --- ipn/ipnlocal/local.go | 31 +++++++++++++++++++++++++++++++ util/syspolicy/policy_keys.go | 6 ++++++ 2 files changed, 37 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 92d2f123f..c59df833d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1703,6 +1703,37 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID anyChange = true } + const sentinel = "HostnameDefaultValue" + hostnameFromPolicy, _ := syspolicy.GetString(syspolicy.Hostname, sentinel) + switch hostnameFromPolicy { + case sentinel: + // An empty string for this policy value means that the admin wants to delete + // the hostname stored in the ipn.Prefs. To make that work, we need to + // distinguish between an empty string and a policy that was not set. + // We cannot do that with the current implementation of syspolicy.GetString. + // It currently does not return an error if a policy was not configured. + // Instead, it returns the default value provided as the second argument. + // This behavior makes it impossible to distinguish between a policy that + // was not set and a policy that was set to an empty default value. + // Checking for sentinel here is a workaround to distinguish between + // the two cases. If we get it, we do nothing because the policy was not set. + // + // TODO(angott,nickkhyl): clean up this behavior once syspolicy.GetString starts + // properly returning errors. + case "": + // The policy was set to an empty string, which means the admin intends + // to clear the hostname stored in preferences. + prefs.Hostname = "" + anyChange = true + default: + // The policy was set to a non-empty string, which means the admin wants + // to override the hostname stored in preferences. + if prefs.Hostname != hostnameFromPolicy { + prefs.Hostname = hostnameFromPolicy + anyChange = true + } + } + if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) if shouldAutoExitNode() && lastSuggestedExitNode != "" { diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index bb9a5d6cc..35a36130e 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -123,6 +123,11 @@ const ( // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" MachineCertificateSubject Key = "MachineCertificateSubject" + // Hostname is the hostname of the device that is running Tailscale. + // When this policy is set, it overrides the hostname that the client + // would otherwise obtain from the OS, e.g. by calling os.Hostname(). + Hostname Key = "Hostname" + // Keys with a string array value. // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" @@ -148,6 +153,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(Hostname, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), From 6c30840cac13f184474654c90b7b0cc314069b25 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sun, 19 Jan 2025 19:00:21 +0000 Subject: [PATCH 0349/1708] ipn: [serve] warn that foreground funnel won't work if shields are up (#14685) We throw error early with a warning if users attempt to enable background funnel for a node that does not allow incoming connections (shields up), but if it done in foreground mode, we just silently fail (the funnel command succeeds, but the connections are not allowed). This change makes sure that we also error early in foreground mode. Updates tailscale/tailscale#11049 Signed-off-by: Irbe Krumina --- ipn/serve.go | 20 ++++++----- ipn/serve_test.go | 85 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 8 deletions(-) diff --git a/ipn/serve.go b/ipn/serve.go index b7effa874..176c6d984 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -63,12 +63,12 @@ type ServeConfig struct { // traffic is allowed, from trusted ingress peers. AllowFunnel map[HostPort]bool `json:",omitempty"` - // Foreground is a map of an IPN Bus session ID to an alternate foreground - // serve config that's valid for the life of that WatchIPNBus session ID. - // This. This allows the config to specify ephemeral configs that are - // used in the CLI's foreground mode to ensure ungraceful shutdowns - // of either the client or the LocalBackend does not expose ports - // that users are not aware of. + // Foreground is a map of an IPN Bus session ID to an alternate foreground serve config that's valid for the + // life of that WatchIPNBus session ID. This allows the config to specify ephemeral configs that are used + // in the CLI's foreground mode to ensure ungraceful shutdowns of either the client or the LocalBackend does not + // expose ports that users are not aware of. In practice this contains any serve config set via 'tailscale + // serve' command run without the '--bg' flag. ServeConfig contained by Foreground is not expected itself to contain + // another Foreground block. Foreground map[string]*ServeConfig `json:",omitempty"` // ETag is the checksum of the serve config that's populated @@ -389,8 +389,7 @@ func (sc *ServeConfig) RemoveTCPForwarding(port uint16) { // View version of ServeConfig.IsFunnelOn. func (v ServeConfigView) IsFunnelOn() bool { return v.ж.IsFunnelOn() } -// IsFunnelOn reports whether if ServeConfig is currently allowing funnel -// traffic for any host:port. +// IsFunnelOn reports whether any funnel endpoint is currently enabled for this node. func (sc *ServeConfig) IsFunnelOn() bool { if sc == nil { return false @@ -400,6 +399,11 @@ func (sc *ServeConfig) IsFunnelOn() bool { return true } } + for _, conf := range sc.Foreground { + if conf.IsFunnelOn() { + return true + } + } return false } diff --git a/ipn/serve_test.go b/ipn/serve_test.go index e9d8e8f32..ae1d56eef 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -182,3 +182,88 @@ func TestExpandProxyTargetDev(t *testing.T) { }) } } + +func TestIsFunnelOn(t *testing.T) { + tests := []struct { + name string + sc *ServeConfig + want bool + }{ + { + name: "nil_config", + }, + { + name: "empty_config", + sc: &ServeConfig{}, + }, + { + name: "funnel_enabled_in_background", + sc: &ServeConfig{ + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:443": true, + }, + }, + want: true, + }, + { + name: "funnel_disabled_in_background", + sc: &ServeConfig{ + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:443": false, + }, + }, + }, + { + name: "funnel_enabled_in_foreground", + sc: &ServeConfig{ + Foreground: map[string]*ServeConfig{ + "abc123": { + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:443": true, + }, + }, + }, + }, + want: true, + }, + { + name: "funnel_disabled_in_both", + sc: &ServeConfig{ + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:443": false, + }, + Foreground: map[string]*ServeConfig{ + "abc123": { + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:8443": false, + }, + }, + }, + }, + }, + { + name: "funnel_enabled_in_both", + sc: &ServeConfig{ + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:443": true, + }, + Foreground: map[string]*ServeConfig{ + "abc123": { + AllowFunnel: map[HostPort]bool{ + "tailnet.xyz:8443": true, + }, + }, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.sc.IsFunnelOn(); got != tt.want { + t.Errorf("ServeConfig.IsFunnelOn() = %v, want %v", got, tt.want) + } + }) + } +} From 6e3c746942b5f5c65ef813713f861449b0c7f54e Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 20 Jan 2025 12:31:26 -0500 Subject: [PATCH 0350/1708] derp: add bytes dropped metric (#14698) Add bytes dropped counter metric by reason and kind. Fixes tailscale/corp#25918 Signed-off-by: Mike O'Driscoll --- derp/derp_server.go | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/derp/derp_server.go b/derp/derp_server.go index 08fd280a9..983b5dc00 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -357,6 +357,12 @@ var packetsDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( "counter", "DERP packets dropped by reason and by kind") +var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( + "derp_bytes_dropped", + "counter", + "DERP bytes dropped by reason and by kind", +) + // NewServer returns a new DERP server. It doesn't listen on its own. // Connections are given to it via Server.Accept. func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { @@ -388,13 +394,13 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { s.packetsRecvDisco = s.packetsRecvByKind.Get(string(packetKindDisco)) s.packetsRecvOther = s.packetsRecvByKind.Get(string(packetKindOther)) - genPacketsDroppedCounters() + genDroppedCounters() s.perClientSendQueueDepth = getPerClientSendQueueDepth() return s } -func genPacketsDroppedCounters() { +func genDroppedCounters() { initMetrics := func(reason dropReason) { packetsDropped.Add(dropReasonKindLabels{ Kind: string(packetKindDisco), @@ -404,6 +410,14 @@ func genPacketsDroppedCounters() { Kind: string(packetKindOther), Reason: string(reason), }, 0) + bytesDropped.Add(dropReasonKindLabels{ + Kind: string(packetKindDisco), + Reason: string(reason), + }, 0) + bytesDropped.Add(dropReasonKindLabels{ + Kind: string(packetKindOther), + Reason: string(reason), + }, 0) } getMetrics := func(reason dropReason) []expvar.Var { return []expvar.Var{ @@ -415,6 +429,14 @@ func genPacketsDroppedCounters() { Kind: string(packetKindOther), Reason: string(reason), }), + bytesDropped.Get(dropReasonKindLabels{ + Kind: string(packetKindDisco), + Reason: string(reason), + }), + bytesDropped.Get(dropReasonKindLabels{ + Kind: string(packetKindOther), + Reason: string(reason), + }), } } @@ -431,12 +453,14 @@ func genPacketsDroppedCounters() { for _, dr := range dropReasons { initMetrics(dr) m := getMetrics(dr) - if len(m) != 2 { + if len(m) != 4 { panic("dropReason metrics out of sync") } - if m[0] == nil || m[1] == nil { - panic("dropReason metrics out of sync") + for _, v := range m { + if v == nil { + panic("dropReason metrics out of sync") + } } } } @@ -1207,6 +1231,7 @@ func (s *Server) recordDrop(packetBytes []byte, srcKey, dstKey key.NodePublic, r labels.Kind = string(packetKindOther) } packetsDropped.Add(labels, 1) + bytesDropped.Add(labels, int64(len(packetBytes))) if verboseDropKeys[dstKey] { // Preformat the log string prior to calling limitedLogf. The From 174af763eb36d31a75ff397a1ed2b1789f41b16b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:57:18 -0700 Subject: [PATCH 0351/1708] .github: Bump actions/upload-artifact from 4.4.3 to 4.6.0 (#14697) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.4.3 to 4.6.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882...65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 92ef57b50..20f215cd0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -467,7 +467,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From 33e62a31bdd9c0e919adbc9c6dbc42aba65e7f23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 11:18:42 -0700 Subject: [PATCH 0352/1708] .github: Bump peter-evans/create-pull-request from 7.0.5 to 7.0.6 (#14695) Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 7.0.5 to 7.0.6. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/5e914681df9dc83aa4e4905692ca88beb2f9e91f...67ccf781d68cd99b580ae25a5c18a1cc84ffff1f) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index f79248c1e..151ed6bab 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -36,7 +36,7 @@ jobs: private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f #v7.0.5 + uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f #v7.0.6 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index a0ae95cd7..11665460b 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -35,7 +35,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f #v7.0.5 + uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f #v7.0.6 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 682c06a0e7921e979ef8f24e57a14078eb5dd115 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:48:50 -0700 Subject: [PATCH 0353/1708] .github: Bump golangci/golangci-lint-action from 6.1.0 to 6.2.0 (#14696) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.0 to 6.2.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/aaa42aa0628b4ae2578232a66b541047968fac86...ec5d18412c0aeab7936cb16880d708ba2a64e1ae) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9f1f2b9d1..ad135f784 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -31,8 +31,7 @@ jobs: cache: false - name: golangci-lint - # Note: this is the 'v6.1.0' tag as of 2024-08-21 - uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 + uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: version: v1.60 From 70c7b0d77f134d33943a220a4f63949266c83373 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 13:05:37 -0700 Subject: [PATCH 0354/1708] build(deps): bump nanoid from 3.3.4 to 3.3.8 in /cmd/tsconnect (#14352) Bumps [nanoid](https://github.com/ai/nanoid) from 3.3.4 to 3.3.8. - [Release notes](https://github.com/ai/nanoid/releases) - [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md) - [Commits](https://github.com/ai/nanoid/compare/3.3.4...3.3.8) --- updated-dependencies: - dependency-name: nanoid dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmd/tsconnect/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tsconnect/yarn.lock b/cmd/tsconnect/yarn.lock index 663a1244e..811eddeb7 100644 --- a/cmd/tsconnect/yarn.lock +++ b/cmd/tsconnect/yarn.lock @@ -349,9 +349,9 @@ minimist@^1.2.6: integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== nanoid@^3.3.4: - version "3.3.4" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab" - integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw== + version "3.3.8" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" + integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" From 69a985fb1edfae8c887e42711d5df814ce7353a7 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 21 Jan 2025 05:17:27 +0000 Subject: [PATCH 0355/1708] ipn/ipnlocal,tailcfg: communicate to control whether funnel is enabled (#14688) Adds a new Hostinfo.IngressEnabled bool field that holds whether funnel is currently enabled for the node. Triggers control update when this value changes. Bumps capver so that control can distinguish the new field being false vs non-existant in previous clients. This is part of a fix for an issue where nodes with any AllowFunnel block set in their serve config are being displayed as if actively routing funnel traffic in the admin panel. Updates tailscale/tailscale#11572 Updates tailscale/corp#25931 Signed-off-by: Irbe Krumina --- ipn/ipnlocal/local.go | 42 ++++++++++- ipn/ipnlocal/local_test.go | 151 +++++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 4 +- tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 21 ++++++ tailcfg/tailcfg_view.go | 2 + 6 files changed, 216 insertions(+), 5 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c59df833d..214d3a4e4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3988,6 +3988,12 @@ func (b *LocalBackend) wantIngressLocked() bool { return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() } +// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in +// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. +func (b *LocalBackend) hasIngressEnabledLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() +} + // setPrefsLockedOnEntry requires b.mu be held to call it, but it // unlocks b.mu when done. newp ownership passes to this function. // It returns a read-only copy of the new prefs. @@ -5086,7 +5092,12 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // if this is accidentally false, then control may not configure DNS // properly. This exists as an optimization to control to program fewer DNS // records that have ingress enabled but are not actually being used. + // TODO(irbekrm): once control knows that if hostinfo.IngressEnabled is true, + // then wireIngress can be considered true, don't send wireIngress in that case. hi.WireIngress = b.wantIngressLocked() + // The Hostinfo.IngressEnabled field is used to communicate to control whether + // the funnel is actually enabled. + hi.IngressEnabled = b.hasIngressEnabledLocked() hi.AppConnector.Set(prefs.AppConnector().Advertise) } @@ -6009,14 +6020,37 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) } } - // Kick off a Hostinfo update to control if WireIngress changed. - if wire := b.wantIngressLocked(); b.hostinfo != nil && b.hostinfo.WireIngress != wire { + + // Update funnel info in hostinfo and kick off control update if needed. + b.updateIngressLocked() + b.setTCPPortsIntercepted(handlePorts) +} + +// updateIngressLocked updates the hostinfo.WireIngress and hostinfo.IngressEnabled fields and kicks off a Hostinfo +// update if the values have changed. +// TODO(irbekrm): once control knows that if hostinfo.IngressEnabled is true, then wireIngress can be considered true, +// we can stop sending hostinfo.WireIngress in that case. +// +// b.mu must be held. +func (b *LocalBackend) updateIngressLocked() { + if b.hostinfo == nil { + return + } + hostInfoChanged := false + if wire := b.wantIngressLocked(); b.hostinfo.WireIngress != wire { b.logf("Hostinfo.WireIngress changed to %v", wire) b.hostinfo.WireIngress = wire + hostInfoChanged = true + } + if ie := b.hasIngressEnabledLocked(); b.hostinfo.IngressEnabled != ie { + b.logf("Hostinfo.IngressEnabled changed to %v", ie) + b.hostinfo.IngressEnabled = ie + hostInfoChanged = true + } + // Kick off a Hostinfo update to control if ingress status has changed. + if hostInfoChanged { b.goTracker.Go(b.doSetHostinfoFilterServices) } - - b.setTCPPortsIntercepted(handlePorts) } // setServeProxyHandlersLocked ensures there is an http proxy handler for each diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5e8a3172c..348bdcab3 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4838,3 +4838,154 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { }) } } + +func TestUpdateIngressLocked(t *testing.T) { + tests := []struct { + name string + hi *tailcfg.Hostinfo + sc *ipn.ServeConfig + wantIngress bool + wantWireIngress bool + wantControlUpdate bool + }{ + { + name: "no_hostinfo_no_serve_config", + hi: nil, + }, + { + name: "empty_hostinfo_no_serve_config", + hi: &tailcfg.Hostinfo{}, + }, + { + name: "empty_hostinfo_funnel_enabled", + hi: &tailcfg.Hostinfo{}, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": true, + }, + }, + wantIngress: true, + wantWireIngress: true, + wantControlUpdate: true, + }, + { + name: "empty_hostinfo_funnel_disabled", + hi: &tailcfg.Hostinfo{}, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": false, + }, + }, + wantWireIngress: true, // true if there is any AllowFunnel block + wantControlUpdate: true, + }, + { + name: "empty_hostinfo_no_funnel", + hi: &tailcfg.Hostinfo{}, + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTPS: true}, + }, + }, + }, + { + name: "funnel_enabled_no_change", + hi: &tailcfg.Hostinfo{ + IngressEnabled: true, + WireIngress: true, + }, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": true, + }, + }, + wantIngress: true, + wantWireIngress: true, + }, + { + name: "funnel_disabled_no_change", + hi: &tailcfg.Hostinfo{ + WireIngress: true, + }, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": false, + }, + }, + wantWireIngress: true, // true if there is any AllowFunnel block + }, + { + name: "funnel_changes_to_disabled", + hi: &tailcfg.Hostinfo{ + IngressEnabled: true, + WireIngress: true, + }, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": false, + }, + }, + wantWireIngress: true, // true if there is any AllowFunnel block + wantControlUpdate: true, + }, + { + name: "funnel_changes_to_enabled", + hi: &tailcfg.Hostinfo{ + WireIngress: true, + }, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": true, + }, + }, + wantWireIngress: true, + wantIngress: true, + wantControlUpdate: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + b.hostinfo = tt.hi + b.serveConfig = tt.sc.View() + allDone := make(chan bool, 1) + defer b.goTracker.AddDoneCallback(func() { + b.mu.Lock() + defer b.mu.Unlock() + if b.goTracker.RunningGoroutines() > 0 { + return + } + select { + case allDone <- true: + default: + } + })() + + was := b.goTracker.StartedGoroutines() + b.updateIngressLocked() + + if tt.hi != nil { + if tt.hi.IngressEnabled != tt.wantIngress { + t.Errorf("IngressEnabled = %v, want %v", tt.hi.IngressEnabled, tt.wantIngress) + } + if tt.hi.WireIngress != tt.wantWireIngress { + t.Errorf("WireIngress = %v, want %v", tt.hi.WireIngress, tt.wantWireIngress) + } + } + + startedGoroutine := b.goTracker.StartedGoroutines() != was + if startedGoroutine != tt.wantControlUpdate { + t.Errorf("control update triggered = %v, want %v", startedGoroutine, tt.wantControlUpdate) + } + + if startedGoroutine { + select { + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for goroutine to finish") + case <-allDone: + } + } + }) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 9b26e8883..937f619e6 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -155,7 +155,8 @@ type CapabilityVersion int // - 110: 2024-12-12: removed never-before-used Tailscale SSH public key support (#14373) // - 111: 2025-01-14: Client supports a peer having Node.HomeDERP (issue #14636) // - 112: 2025-01-14: Client interprets AllowedIPs of nil as meaning same as Addresses -const CurrentCapabilityVersion CapabilityVersion = 112 +// - 113: 2025-01-20: Client communicates to control whether funnel is enabled by sending Hostinfo.IngressEnabled (#14688) +const CurrentCapabilityVersion CapabilityVersion = 113 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -869,6 +870,7 @@ type Hostinfo struct { ShareeNode bool `json:",omitempty"` // indicates this node exists in netmap because it's owned by a shared-to user NoLogsNoSupport bool `json:",omitempty"` // indicates that the user has opted out of sending logs and support WireIngress bool `json:",omitempty"` // indicates that the node wants the option to receive ingress connections + IngressEnabled bool `json:",omitempty"` // if the node has any funnel endpoint enabled AllowsUpdate bool `json:",omitempty"` // indicates that the node has opted-in to admin-console-drive remote updates Machine string `json:",omitempty"` // the current host's machine type (uname -m) GoArch string `json:",omitempty"` // GOARCH value (of the built binary) diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 42cef1598..f7126ca41 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -166,6 +166,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { ShareeNode bool NoLogsNoSupport bool WireIngress bool + IngressEnabled bool AllowsUpdate bool Machine string GoArch string diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 560e28933..da5873847 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -51,6 +51,7 @@ func TestHostinfoEqual(t *testing.T) { "ShareeNode", "NoLogsNoSupport", "WireIngress", + "IngressEnabled", "AllowsUpdate", "Machine", "GoArch", @@ -251,6 +252,26 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{}, false, }, + { + &Hostinfo{IngressEnabled: true}, + &Hostinfo{}, + false, + }, + { + &Hostinfo{IngressEnabled: true}, + &Hostinfo{IngressEnabled: true}, + true, + }, + { + &Hostinfo{IngressEnabled: false}, + &Hostinfo{}, + true, + }, + { + &Hostinfo{IngressEnabled: false}, + &Hostinfo{IngressEnabled: true}, + false, + }, } for i, tt := range tests { got := tt.a.Equal(tt.b) diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 3770f272f..55c244fbf 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -283,6 +283,7 @@ func (v HostinfoView) ShieldsUp() bool { return v.ж.Shie func (v HostinfoView) ShareeNode() bool { return v.ж.ShareeNode } func (v HostinfoView) NoLogsNoSupport() bool { return v.ж.NoLogsNoSupport } func (v HostinfoView) WireIngress() bool { return v.ж.WireIngress } +func (v HostinfoView) IngressEnabled() bool { return v.ж.IngressEnabled } func (v HostinfoView) AllowsUpdate() bool { return v.ж.AllowsUpdate } func (v HostinfoView) Machine() string { return v.ж.Machine } func (v HostinfoView) GoArch() string { return v.ж.GoArch } @@ -324,6 +325,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { ShareeNode bool NoLogsNoSupport bool WireIngress bool + IngressEnabled bool AllowsUpdate bool Machine string GoArch string From 817ba1c300ad8378bb87f14e2c1709e428a54372 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 21 Jan 2025 05:21:03 +0000 Subject: [PATCH 0356/1708] cmd/{k8s-operator,containerboot},kube/kubetypes: parse Ingresses for ingress ProxyGroup (#14583) cmd/k8s-operator: add logic to parse L7 Ingresses in HA mode - Wrap the Tailscale API client used by the Kubernetes Operator into a client that knows how to manage VIPServices. - Create/Delete VIPServices and update serve config for L7 Ingresses for ProxyGroup. - Ensure that ingress ProxyGroup proxies mount serve config from a shared ConfigMap. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/containerboot/serve.go | 7 + cmd/k8s-operator/ingress-for-pg.go | 567 ++++++++++++++++++++++++ cmd/k8s-operator/ingress-for-pg_test.go | 337 ++++++++++++++ cmd/k8s-operator/ingress.go | 166 +++---- cmd/k8s-operator/operator.go | 103 +++-- cmd/k8s-operator/proxygroup.go | 11 +- cmd/k8s-operator/proxygroup_specs.go | 52 ++- cmd/k8s-operator/proxygroup_test.go | 31 +- cmd/k8s-operator/sts.go | 4 +- cmd/k8s-operator/testutils_test.go | 50 +++ cmd/k8s-operator/tsclient.go | 185 ++++++++ kube/kubetypes/types.go | 5 +- 12 files changed, 1391 insertions(+), 127 deletions(-) create mode 100644 cmd/k8s-operator/ingress-for-pg.go create mode 100644 cmd/k8s-operator/ingress-for-pg_test.go create mode 100644 cmd/k8s-operator/tsclient.go diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 1729e65b5..aad22820b 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -65,6 +65,10 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan if err != nil { log.Fatalf("serve proxy: failed to read serve config: %v", err) } + if sc == nil { + log.Printf("serve proxy: no serve config at %q, skipping", path) + continue + } if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { continue } @@ -131,6 +135,9 @@ func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { } j, err := os.ReadFile(path) if err != nil { + if os.IsNotExist(err) { + return nil, nil + } return nil, err } // Serve config can be provided by users as well as the Kubernetes Operator (for its proxies). User-provided diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go new file mode 100644 index 000000000..4dcaf7c6d --- /dev/null +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -0,0 +1,567 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "slices" + "strings" + "sync" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/util/clientmetric" + "tailscale.com/util/dnsname" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +const ( + serveConfigKey = "serve-config.json" + VIPSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s" + // FinalizerNamePG is the finalizer used by the IngressPGReconciler + FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" +) + +var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) + +// IngressPGReconciler is a controller that reconciles Tailscale Ingresses should be exposed on an ingress ProxyGroup +// (in HA mode). +type IngressPGReconciler struct { + client.Client + + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsnetServer tsnetServer + tsNamespace string + lc localClient + defaultTags []string + + mu sync.Mutex // protects following + // managedIngresses is a set of all ingress resources that we're currently + // managing. This is only used for metrics. + managedIngresses set.Slice[types.UID] +} + +// Reconcile reconciles Ingresses that should be exposed over Tailscale in HA mode (on a ProxyGroup). It looks at all +// Ingresses with tailscale.com/proxy-group annotation. For each such Ingress, it ensures that a VIPService named after +// the hostname of the Ingress exists and is up to date. It also ensures that the serve config for the ingress +// ProxyGroup is updated to route traffic for the VIPService to the Ingress's backend Services. +// When an Ingress is deleted or unexposed, the VIPService and the associated serve config are cleaned up. +// Ingress hostname change also results in the VIPService for the previous hostname being cleaned up and a new VIPService +// being created for the new hostname. +func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := a.logger.With("Ingress", req.NamespacedName) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + ing := new(networkingv1.Ingress) + err = a.Get(ctx, req.NamespacedName, ing) + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + logger.Debugf("Ingress not found, assuming it was deleted") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get Ingress: %w", err) + } + + // hostname is the name of the VIPService that will be created for this Ingress as well as the first label in + // the MagicDNS name of the Ingress. + hostname := hostnameForIngress(ing) + logger = logger.With("hostname", hostname) + + if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) { + return res, a.maybeCleanup(ctx, hostname, ing, logger) + } + + if err := a.maybeProvision(ctx, hostname, ing, logger); err != nil { + return res, fmt.Errorf("failed to provision: %w", err) + } + return res, nil +} + +// maybeProvision ensures that the VIPService and serve config for the Ingress are created or updated. +func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { + if err := validateIngressClass(ctx, a.Client); err != nil { + logger.Infof("error validating tailscale IngressClass: %v.", err) + return nil + } + + // Get and validate ProxyGroup readiness + pgName := ing.Annotations[AnnotationProxyGroup] + if pgName == "" { + logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") + return nil + } + pg := &tsapi.ProxyGroup{} + if err := a.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { + if apierrors.IsNotFound(err) { + logger.Infof("ProxyGroup %q does not exist", pgName) + return nil + } + return fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + } + if !tsoperator.ProxyGroupIsReady(pg) { + // TODO(irbekrm): we need to reconcile ProxyGroup Ingresses on ProxyGroup changes to not miss the status update + // in this case. + logger.Infof("ProxyGroup %q is not ready", pgName) + return nil + } + + // Validate Ingress configuration + if err := a.validateIngress(ing, pg); err != nil { + logger.Infof("invalid Ingress configuration: %v", err) + a.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error()) + return nil + } + + if !IsHTTPSEnabledOnTailnet(a.tsnetServer) { + a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") + } + + logger = logger.With("proxy-group", pg) + + if !slices.Contains(ing.Finalizers, FinalizerNamePG) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to tell the operator that the high level, + // multi-reconcile operation is underway. + logger.Infof("exposing Ingress over tailscale") + ing.Finalizers = append(ing.Finalizers, FinalizerNamePG) + if err := a.Update(ctx, ing); err != nil { + return fmt.Errorf("failed to add finalizer: %w", err) + } + a.mu.Lock() + a.managedIngresses.Add(ing.UID) + gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) + a.mu.Unlock() + } + + // 1. Ensure that if Ingress' hostname has changed, any VIPService resources corresponding to the old hostname + // are cleaned up. + // In practice, this function will ensure that any VIPServices that are associated with the provided ProxyGroup + // and no longer owned by an Ingress are cleaned up. This is fine- it is not expensive and ensures that in edge + // cases (a single update changed both hostname and removed ProxyGroup annotation) the VIPService is more likely + // to be (eventually) removed. + if err := a.maybeCleanupProxyGroup(ctx, pgName, logger); err != nil { + return fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) + } + + // 2. Ensure that there isn't a VIPService with the same hostname already created and not owned by this Ingress. + // TODO(irbekrm): perhaps in future we could have record names being stored on VIPServices. I am not certain if + // there might not be edge cases (custom domains, etc?) where attempting to determine the DNS name of the + // VIPService in this way won't be incorrect. + tcd, err := a.tailnetCertDomain(ctx) + if err != nil { + return fmt.Errorf("error determining DNS name base: %w", err) + } + dnsName := hostname + "." + tcd + existingVIPSvc, err := a.tsClient.getVIPServiceByName(ctx, hostname) + // TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore + // should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET + // here will fail. + if err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { + return fmt.Errorf("error getting VIPService %q: %w", hostname, err) + } + } + if existingVIPSvc != nil && !isVIPServiceForIngress(existingVIPSvc, ing) { + logger.Infof("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually and recreate this Ingress to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName) + a.recorder.Event(ing, corev1.EventTypeWarning, "ConflictingVIPServiceExists", fmt.Sprintf("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName)) + return nil + } + + // 3. Ensure that the serve config for the ProxyGroup contains the VIPService + cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) + if err != nil { + return fmt.Errorf("error getting ingress serve config: %w", err) + } + if cm == nil { + logger.Infof("no ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.") + return nil + } + ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName)) + handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger) + if err != nil { + return fmt.Errorf("failed to get handlers for ingress: %w", err) + } + ingCfg := &ipn.ServiceConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + ep: { + Handlers: handlers, + }, + }, + } + var gotCfg *ipn.ServiceConfig + if cfg != nil && cfg.Services != nil { + gotCfg = cfg.Services[hostname] + } + if !reflect.DeepEqual(gotCfg, ingCfg) { + logger.Infof("Updating serve config") + mak.Set(&cfg.Services, hostname, ingCfg) + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return fmt.Errorf("error marshaling serve config: %w", err) + } + mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) + if err := a.Update(ctx, cm); err != nil { + return fmt.Errorf("error updating serve config: %w", err) + } + } + + // 4. Ensure that the VIPService exists and is up to date. + tags := a.defaultTags + if tstr, ok := ing.Annotations[AnnotationTags]; ok { + tags = strings.Split(tstr, ",") + } + + vipSvc := &VIPService{ + Name: hostname, + Tags: tags, + Ports: []string{"443"}, // always 443 for Ingress + Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID), + } + if existingVIPSvc != nil { + vipSvc.Addrs = existingVIPSvc.Addrs + } + if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) { + logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) + if err := a.tsClient.createOrUpdateVIPServiceByName(ctx, vipSvc); err != nil { + logger.Infof("error creating VIPService: %v", err) + return fmt.Errorf("error creating VIPService: %w", err) + } + } + + // 5. Update Ingress status + oldStatus := ing.Status.DeepCopy() + // TODO(irbekrm): once we have ingress ProxyGroup, we can determine if instances are ready to route traffic to the VIPService + ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ + { + Hostname: dnsName, + Ports: []networkingv1.IngressPortStatus{ + { + Protocol: "TCP", + Port: 443, + }, + }, + }, + } + if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) { + return nil + } + if err := a.Status().Update(ctx, ing); err != nil { + return fmt.Errorf("failed to update Ingress status: %w", err) + } + return nil +} + +// maybeCleanupProxyGroup ensures that if an Ingress hostname has changed, any VIPService resources created for the +// Ingress' ProxyGroup corresponding to the old hostname are cleaned up. A run of this function will ensure that any +// VIPServices that are associated with the provided ProxyGroup and no longer owned by an Ingress are cleaned up. +func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) error { + // Get serve config for the ProxyGroup + cm, cfg, err := a.proxyGroupServeConfig(ctx, proxyGroupName) + if err != nil { + return fmt.Errorf("getting serve config: %w", err) + } + if cfg == nil { + return nil // ProxyGroup does not have any VIPServices + } + + ingList := &networkingv1.IngressList{} + if err := a.List(ctx, ingList); err != nil { + return fmt.Errorf("listing Ingresses: %w", err) + } + serveConfigChanged := false + // For each VIPService in serve config... + for vipHostname := range cfg.Services { + // ...check if there is currently an Ingress with this hostname + found := false + for _, i := range ingList.Items { + ingressHostname := hostnameForIngress(&i) + if ingressHostname == vipHostname { + found = true + break + } + } + + if !found { + logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipHostname) + svc, err := a.getVIPService(ctx, vipHostname, logger) + if err != nil { + errResp := &tailscale.ErrResponse{} + if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound { + delete(cfg.Services, vipHostname) + serveConfigChanged = true + continue + } + return err + } + if isVIPServiceForAnyIngress(svc) { + logger.Infof("cleaning up orphaned VIPService %q", vipHostname) + if err := a.tsClient.deleteVIPServiceByName(ctx, vipHostname); err != nil { + errResp := &tailscale.ErrResponse{} + if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { + return fmt.Errorf("deleting VIPService %q: %w", vipHostname, err) + } + } + } + delete(cfg.Services, vipHostname) + serveConfigChanged = true + } + } + + if serveConfigChanged { + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return fmt.Errorf("marshaling serve config: %w", err) + } + mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) + if err := a.Update(ctx, cm); err != nil { + return fmt.Errorf("updating serve config: %w", err) + } + } + return nil +} + +// maybeCleanup ensures that any resources, such as a VIPService created for this Ingress, are cleaned up when the +// Ingress is being deleted or is unexposed. +func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { + logger.Debugf("Ensuring any resources for Ingress are cleaned up") + ix := slices.Index(ing.Finalizers, FinalizerNamePG) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + a.mu.Lock() + defer a.mu.Unlock() + a.managedIngresses.Remove(ing.UID) + gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) + return nil + } + + // 1. Check if there is a VIPService created for this Ingress. + pg := ing.Annotations[AnnotationProxyGroup] + cm, cfg, err := a.proxyGroupServeConfig(ctx, pg) + if err != nil { + return fmt.Errorf("error getting ProxyGroup serve config: %w", err) + } + // VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not + // found in the serve config, we can assume that there is no VIPService. TODO(irbekrm): once we have ingress + // ProxyGroup, we will probably add currently exposed VIPServices to its status. At that point, we can use the + // status rather than checking the serve config each time. + if cfg == nil || cfg.Services == nil || cfg.Services[hostname] == nil { + return nil + } + logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) + + // 2. Delete the VIPService. + if err := a.deleteVIPServiceIfExists(ctx, hostname, ing, logger); err != nil { + return fmt.Errorf("error deleting VIPService: %w", err) + } + + // 3. Remove the VIPService from the serve config for the ProxyGroup. + logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) + delete(cfg.Services, hostname) + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return fmt.Errorf("error marshaling serve config: %w", err) + } + mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) + if err := a.Update(ctx, cm); err != nil { + return fmt.Errorf("error updating ConfigMap %q: %w", cm.Name, err) + } + + if err := a.deleteFinalizer(ctx, ing, logger); err != nil { + return fmt.Errorf("failed to remove finalizer: %w", err) + } + a.mu.Lock() + defer a.mu.Unlock() + a.managedIngresses.Remove(ing.UID) + gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) + return nil +} + +func (a *IngressPGReconciler) deleteFinalizer(ctx context.Context, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { + found := false + ing.Finalizers = slices.DeleteFunc(ing.Finalizers, func(f string) bool { + found = true + return f == FinalizerNamePG + }) + if !found { + return nil + } + logger.Debug("ensure %q finalizer is removed", FinalizerNamePG) + + if err := a.Update(ctx, ing); err != nil { + return fmt.Errorf("failed to remove finalizer %q: %w", FinalizerNamePG, err) + } + return nil +} + +func pgIngressCMName(pg string) string { + return fmt.Sprintf("%s-ingress-config", pg) +} + +func (a *IngressPGReconciler) proxyGroupServeConfig(ctx context.Context, pg string) (cm *corev1.ConfigMap, cfg *ipn.ServeConfig, err error) { + name := pgIngressCMName(pg) + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: a.tsNamespace, + }, + } + if err := a.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil && !apierrors.IsNotFound(err) { + return nil, nil, fmt.Errorf("error retrieving ingress serve config ConfigMap %s: %v", name, err) + } + if apierrors.IsNotFound(err) { + return nil, nil, nil + } + cfg = &ipn.ServeConfig{} + if len(cm.BinaryData[serveConfigKey]) != 0 { + if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil { + return nil, nil, fmt.Errorf("error unmarshaling ingress serve config %v: %w", cm.BinaryData[serveConfigKey], err) + } + } + return cm, cfg, nil +} + +type localClient interface { + StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) +} + +// tailnetCertDomain returns the base domain (TCD) of the current tailnet. +func (a *IngressPGReconciler) tailnetCertDomain(ctx context.Context) (string, error) { + st, err := a.lc.StatusWithoutPeers(ctx) + if err != nil { + return "", fmt.Errorf("error getting tailscale status: %w", err) + } + return st.CurrentTailnet.MagicDNSSuffix, nil +} + +// shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup) +func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { + isTSIngress := ing != nil && + ing.Spec.IngressClassName != nil && + *ing.Spec.IngressClassName == tailscaleIngressClassName + pgAnnot := ing.Annotations[AnnotationProxyGroup] + return isTSIngress && pgAnnot != "" +} + +func (a *IngressPGReconciler) getVIPService(ctx context.Context, hostname string, logger *zap.SugaredLogger) (*VIPService, error) { + svc, err := a.tsClient.getVIPServiceByName(ctx, hostname) + if err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { + logger.Infof("error getting VIPService %q: %v", hostname, err) + return nil, fmt.Errorf("error getting VIPService %q: %w", hostname, err) + } + } + return svc, nil +} + +func isVIPServiceForIngress(svc *VIPService, ing *networkingv1.Ingress) bool { + if svc == nil || ing == nil { + return false + } + return strings.EqualFold(svc.Comment, fmt.Sprintf(VIPSvcOwnerRef, ing.UID)) +} + +func isVIPServiceForAnyIngress(svc *VIPService) bool { + if svc == nil { + return false + } + return strings.HasPrefix(svc.Comment, "tailscale.com/k8s-operator:owned-by:") +} + +// validateIngress validates that the Ingress is properly configured. +// Currently validates: +// - Any tags provided via tailscale.com/tags annotation are valid Tailscale ACL tags +// - The derived hostname is a valid DNS label +// - The referenced ProxyGroup exists and is of type 'ingress' +// - Ingress' TLS block is invalid +func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsapi.ProxyGroup) error { + var errs []error + + // Validate tags if present + if tstr, ok := ing.Annotations[AnnotationTags]; ok { + tags := strings.Split(tstr, ",") + for _, tag := range tags { + tag = strings.TrimSpace(tag) + if err := tailcfg.CheckTag(tag); err != nil { + errs = append(errs, fmt.Errorf("tailscale.com/tags annotation contains invalid tag %q: %w", tag, err)) + } + } + } + + // Validate TLS configuration + if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { + errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS)) + } + + // Validate that the hostname will be a valid DNS label + hostname := hostnameForIngress(ing) + if err := dnsname.ValidLabel(hostname); err != nil { + errs = append(errs, fmt.Errorf("invalid hostname %q: %w. Ensure that the hostname is a valid DNS label", hostname, err)) + } + + // Validate ProxyGroup type + if pg.Spec.Type != tsapi.ProxyGroupTypeIngress { + errs = append(errs, fmt.Errorf("ProxyGroup %q is of type %q but must be of type %q", + pg.Name, pg.Spec.Type, tsapi.ProxyGroupTypeIngress)) + } + + // Validate ProxyGroup readiness + if !tsoperator.ProxyGroupIsReady(pg) { + errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) + } + + return errors.Join(errs...) +} + +// deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress. +func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { + svc, err := a.getVIPService(ctx, name, logger) + if err != nil { + return fmt.Errorf("error getting VIPService: %w", err) + } + + // isVIPServiceForIngress handles nil svc, so we don't need to check it here + if !isVIPServiceForIngress(svc, ing) { + return nil + } + + logger.Infof("Deleting VIPService %q", name) + if err = a.tsClient.deleteVIPServiceByName(ctx, name); err != nil { + return fmt.Errorf("error deleting VIPService: %w", err) + } + return nil +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go new file mode 100644 index 000000000..2cd340962 --- /dev/null +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -0,0 +1,337 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "testing" + + "slices" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" +) + +func TestIngressPGReconciler(t *testing.T) { + tsIngressClass := &networkingv1.IngressClass{ + ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, + Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, + } + + // Pre-create the ProxyGroup + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + }, + } + + // Pre-create the ConfigMap for the ProxyGroup + pgConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, + BinaryData: map[string][]byte{ + "serve-config.json": []byte(`{"Services":{}}`), + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pgConfigMap, tsIngressClass). + WithStatusSubresource(pg). + Build() + mustUpdateStatus(t, fc, "", pg.Name, func(pg *tsapi.ProxyGroup) { + pg.Status.Conditions = []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupReady), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + } + }) + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + lc := &fakeLocalClient{ + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{ + MagicDNSSuffix: "ts.net", + }, + }, + } + ingPGR := &IngressPGReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + tsNamespace: "operator-ns", + logger: zl.Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + } + + // Test 1: Default tags + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + }, + }, + } + mustCreate(t, fc, ing) + + // Verify initial reconciliation + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Get and verify the ConfigMap was updated + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + cfg := &ipn.ServeConfig{} + if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil { + t.Fatalf("unmarshaling serve config: %v", err) + } + + if cfg.Services["my-svc"] == nil { + t.Error("expected serve config to contain VIPService configuration") + } + + // Verify VIPService uses default tags + vipSvc, err := ft.getVIPServiceByName(context.Background(), "my-svc") + if err != nil { + t.Fatalf("getting VIPService: %v", err) + } + if vipSvc == nil { + t.Fatal("VIPService not created") + } + wantTags := []string{"tag:k8s"} // default tags + if !slices.Equal(vipSvc.Tags, wantTags) { + t.Errorf("incorrect VIPService tags: got %v, want %v", vipSvc.Tags, wantTags) + } + + // Test 2: Custom tags + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" + }) + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify VIPService uses custom tags + vipSvc, err = ft.getVIPServiceByName(context.Background(), "my-svc") + if err != nil { + t.Fatalf("getting VIPService: %v", err) + } + if vipSvc == nil { + t.Fatal("VIPService not created") + } + wantTags = []string{"tag:custom", "tag:test"} // custom tags only + gotTags := slices.Clone(vipSvc.Tags) + slices.Sort(gotTags) + slices.Sort(wantTags) + if !slices.Equal(gotTags, wantTags) { + t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags) + } + + // Delete the Ingress and verify cleanup + if err := fc.Delete(context.Background(), ing); err != nil { + t.Fatalf("deleting Ingress: %v", err) + } + + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify the ConfigMap was cleaned up + cm = &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + cfg = &ipn.ServeConfig{} + if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil { + t.Fatalf("unmarshaling serve config: %v", err) + } + + if len(cfg.Services) > 0 { + t.Error("serve config not cleaned up") + } +} + +func TestValidateIngress(t *testing.T) { + baseIngress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + }, + } + + readyProxyGroup := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + }, + Status: tsapi.ProxyGroupStatus{ + Conditions: []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupReady), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + } + + tests := []struct { + name string + ing *networkingv1.Ingress + pg *tsapi.ProxyGroup + wantErr string + }{ + { + name: "valid_ingress_with_hostname", + ing: &networkingv1.Ingress{ + ObjectMeta: baseIngress.ObjectMeta, + Spec: networkingv1.IngressSpec{ + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test.example.com"}}, + }, + }, + }, + pg: readyProxyGroup, + }, + { + name: "valid_ingress_with_default_hostname", + ing: baseIngress, + pg: readyProxyGroup, + }, + { + name: "invalid_tags", + ing: &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseIngress.Name, + Namespace: baseIngress.Namespace, + Annotations: map[string]string{ + AnnotationTags: "tag:invalid!", + }, + }, + }, + pg: readyProxyGroup, + wantErr: "tailscale.com/tags annotation contains invalid tag \"tag:invalid!\": tag names can only contain numbers, letters, or dashes", + }, + { + name: "multiple_TLS_entries", + ing: &networkingv1.Ingress{ + ObjectMeta: baseIngress.ObjectMeta, + Spec: networkingv1.IngressSpec{ + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test1.example.com"}}, + {Hosts: []string{"test2.example.com"}}, + }, + }, + }, + pg: readyProxyGroup, + wantErr: "Ingress contains invalid TLS block [{[test1.example.com] } {[test2.example.com] }]: only a single TLS entry with a single host is allowed", + }, + { + name: "multiple_hosts_in_TLS_entry", + ing: &networkingv1.Ingress{ + ObjectMeta: baseIngress.ObjectMeta, + Spec: networkingv1.IngressSpec{ + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test1.example.com", "test2.example.com"}}, + }, + }, + }, + pg: readyProxyGroup, + wantErr: "Ingress contains invalid TLS block [{[test1.example.com test2.example.com] }]: only a single TLS entry with a single host is allowed", + }, + { + name: "wrong_proxy_group_type", + ing: baseIngress, + pg: &tsapi.ProxyGroup{ + ObjectMeta: readyProxyGroup.ObjectMeta, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupType("foo"), + }, + Status: readyProxyGroup.Status, + }, + wantErr: "ProxyGroup \"test-pg\" is of type \"foo\" but must be of type \"ingress\"", + }, + { + name: "proxy_group_not_ready", + ing: baseIngress, + pg: &tsapi.ProxyGroup{ + ObjectMeta: readyProxyGroup.ObjectMeta, + Spec: readyProxyGroup.Spec, + Status: tsapi.ProxyGroupStatus{ + Conditions: []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupReady), + Status: metav1.ConditionFalse, + ObservedGeneration: 1, + }, + }, + }, + }, + wantErr: "ProxyGroup \"test-pg\" is not ready", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &IngressPGReconciler{} + err := r.validateIngress(tt.ing, tt.pg) + if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) { + t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 3eb47dfb0..7cadaecc4 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -26,6 +26,7 @@ import ( "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" "tailscale.com/util/clientmetric" + "tailscale.com/util/mak" "tailscale.com/util/set" ) @@ -58,7 +59,7 @@ var ( ) func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { - logger := a.logger.With("ingress-ns", req.Namespace, "ingress-name", req.Name) + logger := a.logger.With("Ingress", req.NamespacedName) logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") @@ -128,9 +129,8 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare // This function adds a finalizer to ing, ensuring that we can handle orderly // deprovisioning later. func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error { - if err := a.validateIngressClass(ctx); err != nil { + if err := validateIngressClass(ctx, a.Client); err != nil { logger.Warnf("error validating tailscale IngressClass: %v. In future this might be a terminal error.", err) - } if !slices.Contains(ing.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, @@ -159,7 +159,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga gaugeIngressResources.Set(int64(a.managedIngresses.Len())) a.mu.Unlock() - if !a.ssr.IsHTTPSEnabledOnTailnet() { + if !IsHTTPSEnabledOnTailnet(a.ssr.tsnetServer) { a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } @@ -185,73 +185,16 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } web := sc.Web[magic443] - addIngressBackend := func(b *networkingv1.IngressBackend, path string) { - if b == nil { - return - } - if b.Service == nil { - a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q is missing service", path) - return - } - var svc corev1.Service - if err := a.Get(ctx, types.NamespacedName{Namespace: ing.Namespace, Name: b.Service.Name}, &svc); err != nil { - a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "failed to get service %q for path %q: %v", b.Service.Name, path, err) - return - } - if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { - a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid ClusterIP", path) - return - } - var port int32 - if b.Service.Port.Name != "" { - for _, p := range svc.Spec.Ports { - if p.Name == b.Service.Port.Name { - port = p.Port - break - } - } - } else { - port = b.Service.Port.Number - } - if port == 0 { - a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid port", path) - return - } - proto := "http://" - if port == 443 || b.Service.Port.Name == "https" { - proto = "https+insecure://" - } - web.Handlers[path] = &ipn.HTTPHandler{ - Proxy: proto + svc.Spec.ClusterIP + ":" + fmt.Sprint(port) + path, - } - } - addIngressBackend(ing.Spec.DefaultBackend, "/") var tlsHost string // hostname or FQDN or empty if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && len(ing.Spec.TLS[0].Hosts) > 0 { tlsHost = ing.Spec.TLS[0].Hosts[0] } - for _, rule := range ing.Spec.Rules { - // Host is optional, but if it's present it must match the TLS host - // otherwise we ignore the rule. - if rule.Host != "" && rule.Host != tlsHost { - a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "rule with host %q ignored, unsupported", rule.Host) - continue - } - for _, p := range rule.HTTP.Paths { - // Send a warning if folks use Exact path type - to make - // it easier for us to support Exact path type matching - // in the future if needed. - // https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types - if *p.PathType == networkingv1.PathTypeExact { - msg := "Exact path type strict matching is currently not supported and requests will be routed as for Prefix path type. This behaviour might change in the future." - logger.Warnf(fmt.Sprintf("Unsupported Path type exact for path %s. %s", p.Path, msg)) - a.recorder.Eventf(ing, corev1.EventTypeWarning, "UnsupportedPathTypeExact", msg) - } - addIngressBackend(&p.Backend, p.Path) - } + handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, tlsHost, logger) + if err != nil { + return fmt.Errorf("failed to get handlers for ingress: %w", err) } - + web.Handlers = handlers if len(web.Handlers) == 0 { logger.Warn("Ingress contains no valid backends") a.recorder.Eventf(ing, corev1.EventTypeWarning, "NoValidBackends", "no valid backends") @@ -263,10 +206,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga if tstr, ok := ing.Annotations[AnnotationTags]; ok { tags = strings.Split(tstr, ",") } - hostname := ing.Namespace + "-" + ing.Name + "-ingress" - if tlsHost != "" { - hostname, _, _ = strings.Cut(tlsHost, ".") - } + hostname := hostnameForIngress(ing) sts := &tailscaleSTSConfig{ Hostname: hostname, @@ -322,28 +262,106 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga func (a *IngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName + *ing.Spec.IngressClassName == tailscaleIngressClassName && + ing.Annotations[AnnotationProxyGroup] == "" } // validateIngressClass attempts to validate that 'tailscale' IngressClass // included in Tailscale installation manifests exists and has not been modified // to attempt to enable features that we do not support. -func (a *IngressReconciler) validateIngressClass(ctx context.Context) error { +func validateIngressClass(ctx context.Context, cl client.Client) error { ic := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ Name: tailscaleIngressClassName, }, } - if err := a.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) { - return errors.New("Tailscale IngressClass not found in cluster. Latest installation manifests include a tailscale IngressClass - please update") + if err := cl.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) { + return errors.New("'tailscale' IngressClass not found in cluster.") } else if err != nil { return fmt.Errorf("error retrieving 'tailscale' IngressClass: %w", err) } if ic.Spec.Controller != tailscaleIngressControllerName { - return fmt.Errorf("Tailscale Ingress class controller name %s does not match tailscale Ingress controller name %s. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests", ic.Spec.Controller, tailscaleIngressControllerName) + return fmt.Errorf("'tailscale' Ingress class controller name %s does not match tailscale Ingress controller name %s. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests", ic.Spec.Controller, tailscaleIngressControllerName) } if ic.GetAnnotations()[ingressClassDefaultAnnotation] != "" { return fmt.Errorf("%s annotation is set on 'tailscale' IngressClass, but Tailscale Ingress controller does not support default Ingress class. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests", ingressClassDefaultAnnotation) } return nil } + +func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl client.Client, rec record.EventRecorder, tlsHost string, logger *zap.SugaredLogger) (handlers map[string]*ipn.HTTPHandler, err error) { + addIngressBackend := func(b *networkingv1.IngressBackend, path string) { + if b == nil { + return + } + if b.Service == nil { + rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q is missing service", path) + return + } + var svc corev1.Service + if err := cl.Get(ctx, types.NamespacedName{Namespace: ing.Namespace, Name: b.Service.Name}, &svc); err != nil { + rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "failed to get service %q for path %q: %v", b.Service.Name, path, err) + return + } + if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { + rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid ClusterIP", path) + return + } + var port int32 + if b.Service.Port.Name != "" { + for _, p := range svc.Spec.Ports { + if p.Name == b.Service.Port.Name { + port = p.Port + break + } + } + } else { + port = b.Service.Port.Number + } + if port == 0 { + rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid port", path) + return + } + proto := "http://" + if port == 443 || b.Service.Port.Name == "https" { + proto = "https+insecure://" + } + mak.Set(&handlers, path, &ipn.HTTPHandler{ + Proxy: proto + svc.Spec.ClusterIP + ":" + fmt.Sprint(port) + path, + }) + } + addIngressBackend(ing.Spec.DefaultBackend, "/") + for _, rule := range ing.Spec.Rules { + // Host is optional, but if it's present it must match the TLS host + // otherwise we ignore the rule. + if rule.Host != "" && rule.Host != tlsHost { + rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "rule with host %q ignored, unsupported", rule.Host) + continue + } + for _, p := range rule.HTTP.Paths { + // Send a warning if folks use Exact path type - to make + // it easier for us to support Exact path type matching + // in the future if needed. + // https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types + if *p.PathType == networkingv1.PathTypeExact { + msg := "Exact path type strict matching is currently not supported and requests will be routed as for Prefix path type. This behaviour might change in the future." + logger.Warnf(fmt.Sprintf("Unsupported Path type exact for path %s. %s", p.Path, msg)) + rec.Eventf(ing, corev1.EventTypeWarning, "UnsupportedPathTypeExact", msg) + } + addIngressBackend(&p.Backend, p.Path) + } + } + return handlers, nil +} + +// hostnameForIngress returns the hostname for an Ingress resource. +// If the Ingress has TLS configured with a host, it returns the first component of that host. +// Otherwise, it returns a hostname derived from the Ingress name and namespace. +func hostnameForIngress(ing *networkingv1.Ingress) string { + if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && len(ing.Spec.TLS[0].Hosts) > 0 { + h := ing.Spec.TLS[0].Hosts[0] + hostname, _, _ := strings.Cut(h, ".") + return hostname + } + return ing.Namespace + "-" + ing.Name + "-ingress" +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 7f8f94673..6368698d8 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -18,7 +18,6 @@ import ( "github.com/go-logr/zapr" "go.uber.org/zap" "go.uber.org/zap/zapcore" - "golang.org/x/oauth2/clientcredentials" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -107,14 +106,14 @@ func main() { hostinfo.SetApp(kubetypes.AppAPIServerProxy) } - s, tsClient := initTSNet(zlog) + s, tsc := initTSNet(zlog) defer s.Close() restConfig := config.GetConfigOrDie() maybeLaunchAPIServerProxy(zlog, restConfig, s, mode) rOpts := reconcilerOpts{ log: zlog, tsServer: s, - tsClient: tsClient, + tsClient: tsc, tailscaleNamespace: tsNamespace, restConfig: restConfig, proxyImage: image, @@ -130,7 +129,7 @@ func main() { // initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the // CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate // with Tailscale. -func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) { +func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) { var ( clientIDPath = defaultEnv("CLIENT_ID_FILE", "") clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") @@ -142,23 +141,10 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) { if clientIDPath == "" || clientSecretPath == "" { startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") } - clientID, err := os.ReadFile(clientIDPath) + tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath) if err != nil { - startlog.Fatalf("reading client ID %q: %v", clientIDPath, err) + startlog.Fatalf("error creating Tailscale client: %v", err) } - clientSecret, err := os.ReadFile(clientSecretPath) - if err != nil { - startlog.Fatalf("reading client secret %q: %v", clientSecretPath, err) - } - credentials := clientcredentials.Config{ - ClientID: string(clientID), - ClientSecret: string(clientSecret), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", - } - tsClient := tailscale.NewClient("-", nil) - tsClient.UserAgent = "tailscale-k8s-operator" - tsClient.HTTPClient = credentials.Client(context.Background()) - s := &tsnet.Server{ Hostname: hostname, Logf: zlog.Named("tailscaled").Debugf, @@ -211,7 +197,7 @@ waitOnline: }, }, } - authkey, _, err := tsClient.CreateKey(ctx, caps) + authkey, _, err := tsc.CreateKey(ctx, caps) if err != nil { startlog.Fatalf("creating operator authkey: %v", err) } @@ -235,7 +221,7 @@ waitOnline: } time.Sleep(time.Second) } - return s, tsClient + return s, tsc } // runReconcilers starts the controller-runtime manager and registers the @@ -343,6 +329,27 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } + lc, err := opts.tsServer.LocalClient() + if err != nil { + startlog.Fatalf("could not get local client: %v", err) + } + err = builder. + ControllerManagedBy(mgr). + For(&networkingv1.Ingress{}). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Complete(&IngressPGReconciler{ + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-pg-reconciler"), + lc: lc, + tsNamespace: opts.tailscaleNamespace, + }) + if err != nil { + startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) + } connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector")) // If a ProxyClassChanges, enqueue all Connectors that have @@ -514,6 +521,7 @@ func runReconcilers(opts reconcilerOpts) { err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). + Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). Watches(&corev1.Secret{}, ownedByProxyGroupFilter). Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). @@ -545,7 +553,7 @@ func runReconcilers(opts reconcilerOpts) { type reconcilerOpts struct { log *zap.SugaredLogger tsServer *tsnet.Server - tsClient *tailscale.Client + tsClient tsClient tailscaleNamespace string // namespace in which operator resources will be deployed restConfig *rest.Config // config for connecting to the kube API server proxyImage string // : @@ -670,12 +678,6 @@ func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, c } } -type tsClient interface { - CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) - Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) - DeleteDevice(ctx context.Context, nodeStableID string) error -} - func isManagedResource(o client.Object) bool { ls := o.GetLabels() return ls[LabelManaged] == "true" @@ -811,6 +813,10 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handl if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { return nil } + if hasProxyGroupAnnotation(&ing) { + // We don't want to reconcile backend Services for Ingresses for ProxyGroups. + continue + } if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) } @@ -1094,3 +1100,44 @@ func indexEgressServices(o client.Object) []string { } return []string{o.GetAnnotations()[AnnotationProxyGroup]} } + +// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service +// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, +// the associated Ingress gets reconciled. +func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + ingList := networkingv1.IngressList{} + if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { + logger.Debugf("error listing Ingresses: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ing := range ingList.Items { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + continue + } + if !hasProxyGroupAnnotation(&ing) { + continue + } + if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + for _, rule := range ing.Spec.Rules { + if rule.HTTP == nil { + continue + } + for _, path := range rule.HTTP.Paths { + if path.Backend.Service != nil && path.Backend.Service.Name == o.GetName() { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + } + } + } + return reqs + } +} + +func hasProxyGroupAnnotation(obj client.Object) bool { + ing := obj.(*networkingv1.Ingress) + return ing.Annotations[AnnotationProxyGroup] != "" +} diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index a4befa039..f6de31727 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -258,7 +258,16 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning ConfigMap: %w", err) + return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) + } + } + if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { + cm := pgIngressCM(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { + existing.ObjectMeta.Labels = cm.ObjectMeta.Labels + existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode) diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index dc58b9f0e..556a2ed76 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -56,6 +56,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string } tmpl.Spec.ServiceAccountName = pg.Name tmpl.Spec.InitContainers[0].Image = image + proxyConfigVolName := pgEgressCMName(pg.Name) + if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { + proxyConfigVolName = pgIngressCMName(pg.Name) + } tmpl.Spec.Volumes = func() []corev1.Volume { var volumes []corev1.Volume for i := range pgReplicas(pg) { @@ -69,18 +73,16 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string }) } - if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - volumes = append(volumes, corev1.Volume{ - Name: pgEgressCMName(pg.Name), - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: pgEgressCMName(pg.Name), - }, + volumes = append(volumes, corev1.Volume{ + Name: proxyConfigVolName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: proxyConfigVolName, }, }, - }) - } + }, + }) return volumes }() @@ -102,13 +104,11 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string }) } - if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - mounts = append(mounts, corev1.VolumeMount{ - Name: pgEgressCMName(pg.Name), - MountPath: "/etc/proxies", - ReadOnly: true, - }) - } + mounts = append(mounts, corev1.VolumeMount{ + Name: proxyConfigVolName, + MountPath: "/etc/proxies", + ReadOnly: true, + }) return mounts }() @@ -154,11 +154,15 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Value: kubetypes.AppProxyGroupEgress, }, ) - } else { + } else { // ingress envs = append(envs, corev1.EnvVar{ Name: "TS_INTERNAL_APP", Value: kubetypes.AppProxyGroupIngress, - }) + }, + corev1.EnvVar{ + Name: "TS_SERVE_CONFIG", + Value: fmt.Sprintf("/etc/proxies/%s", serveConfigKey), + }) } return append(c.Env, envs...) }() @@ -264,6 +268,16 @@ func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { }, } } +func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgIngressCMName(pg.Name), + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + } +} func pgSecretLabels(pgName, typ string) map[string]string { return pgLabels(pgName, map[string]string{ diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index c920c90d1..e7c85d387 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -332,7 +332,8 @@ func TestProxyGroupTypes(t *testing.T) { UID: "test-ingress-uid", }, Spec: tsapi.ProxyGroupSpec{ - Type: tsapi.ProxyGroupTypeIngress, + Type: tsapi.ProxyGroupTypeIngress, + Replicas: ptr.To[int32](0), }, } if err := fc.Create(context.Background(), pg); err != nil { @@ -347,6 +348,34 @@ func TestProxyGroupTypes(t *testing.T) { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) + verifyEnvVar(t, sts, "TS_SERVE_CONFIG", "/etc/proxies/serve-config.json") + + // Verify ConfigMap volume mount + cmName := fmt.Sprintf("%s-ingress-config", pg.Name) + expectedVolume := corev1.Volume{ + Name: cmName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cmName, + }, + }, + }, + } + + expectedVolumeMount := corev1.VolumeMount{ + Name: cmName, + MountPath: "/etc/proxies", + ReadOnly: true, + } + + if diff := cmp.Diff([]corev1.Volume{expectedVolume}, sts.Spec.Template.Spec.Volumes); diff != "" { + t.Errorf("unexpected volumes (-want +got):\n%s", diff) + } + + if diff := cmp.Diff([]corev1.VolumeMount{expectedVolumeMount}, sts.Spec.Template.Spec.Containers[0].VolumeMounts); diff != "" { + t.Errorf("unexpected volume mounts (-want +got):\n%s", diff) + } }) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index c2b925058..fce6bfdd7 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -172,8 +172,8 @@ func (sts tailscaleSTSReconciler) validate() error { } // IsHTTPSEnabledOnTailnet reports whether HTTPS is enabled on the tailnet. -func (a *tailscaleSTSReconciler) IsHTTPSEnabledOnTailnet() bool { - return len(a.tsnetServer.CertDomains()) > 0 +func IsHTTPSEnabledOnTailnet(tsnetServer tsnetServer) bool { + return len(tsnetServer.CertDomains()) > 0 } // Provision ensures that the StatefulSet for the given service is running and diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 240a7df15..160f24ec9 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "net/http" "net/netip" "reflect" "strings" @@ -29,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "tailscale.com/client/tailscale" "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/types/ptr" "tailscale.com/util/mak" @@ -737,6 +739,7 @@ type fakeTSClient struct { sync.Mutex keyRequests []tailscale.KeyCapabilities deleted []string + vipServices map[string]*VIPService } type fakeTSNetServer struct { certDomains []string @@ -842,3 +845,50 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { } } } + +func (c *fakeTSClient) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) { + c.Lock() + defer c.Unlock() + if c.vipServices == nil { + return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} + } + svc, ok := c.vipServices[name] + if !ok { + return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} + } + return svc, nil +} + +func (c *fakeTSClient) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error { + c.Lock() + defer c.Unlock() + if c.vipServices == nil { + c.vipServices = make(map[string]*VIPService) + } + c.vipServices[svc.Name] = svc + return nil +} + +func (c *fakeTSClient) deleteVIPServiceByName(ctx context.Context, name string) error { + c.Lock() + defer c.Unlock() + if c.vipServices != nil { + delete(c.vipServices, name) + } + return nil +} + +type fakeLocalClient struct { + status *ipnstate.Status +} + +func (f *fakeLocalClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { + if f.status == nil { + return &ipnstate.Status{ + Self: &ipnstate.PeerStatus{ + DNSName: "test-node.test.ts.net.", + }, + }, nil + } + return f.status, nil +} diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go new file mode 100644 index 000000000..5352629de --- /dev/null +++ b/cmd/k8s-operator/tsclient.go @@ -0,0 +1,185 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + + "golang.org/x/oauth2/clientcredentials" + "tailscale.com/client/tailscale" + "tailscale.com/util/httpm" +) + +// defaultTailnet is a value that can be used in Tailscale API calls instead of tailnet name to indicate that the API +// call should be performed on the default tailnet for the provided credentials. +const ( + defaultTailnet = "-" + defaultBaseURL = "https://api.tailscale.com" +) + +func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (tsClient, error) { + clientID, err := os.ReadFile(clientIDPath) + if err != nil { + return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) + } + clientSecret, err := os.ReadFile(clientSecretPath) + if err != nil { + return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) + } + credentials := clientcredentials.Config{ + ClientID: string(clientID), + ClientSecret: string(clientSecret), + TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + } + c := tailscale.NewClient(defaultTailnet, nil) + c.UserAgent = "tailscale-k8s-operator" + c.HTTPClient = credentials.Client(ctx) + tsc := &tsClientImpl{ + Client: c, + baseURL: defaultBaseURL, + tailnet: defaultTailnet, + } + return tsc, nil +} + +type tsClient interface { + CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) + Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) + DeleteDevice(ctx context.Context, nodeStableID string) error + getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) + createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error + deleteVIPServiceByName(ctx context.Context, name string) error +} + +type tsClientImpl struct { + *tailscale.Client + baseURL string + tailnet string +} + +// VIPService is a Tailscale VIPService with Tailscale API JSON representation. +type VIPService struct { + // Name is the leftmost label of the DNS name of the VIP service. + // Name is required. + Name string `json:"name,omitempty"` + // Addrs are the IP addresses of the VIP Service. There are two addresses: + // the first is IPv4 and the second is IPv6. + // When creating a new VIP Service, the IP addresses are optional: if no + // addresses are specified then they will be selected. If an IPv4 address is + // specified at index 0, then that address will attempt to be used. An IPv6 + // address can not be specified upon creation. + Addrs []string `json:"addrs,omitempty"` + // Comment is an optional text string for display in the admin panel. + Comment string `json:"comment,omitempty"` + // Ports are the ports of a VIPService that will be configured via Tailscale serve config. + // If set, any node wishing to advertise this VIPService must have this port configured via Tailscale serve. + Ports []string `json:"ports,omitempty"` + // Tags are optional ACL tags that will be applied to the VIPService. + Tags []string `json:"tags,omitempty"` +} + +// GetVIPServiceByName retrieves a VIPService by its name. It returns 404 if the VIPService is not found. +func (c *tsClientImpl) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) { + path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name)) + req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) + if err != nil { + return nil, fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := c.sendRequest(req) + if err != nil { + return nil, fmt.Errorf("error making Tailsale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return nil, handleErrorResponse(b, resp) + } + svc := &VIPService{} + if err := json.Unmarshal(b, svc); err != nil { + return nil, err + } + return svc, nil +} + +// CreateOrUpdateVIPServiceByName creates or updates a VIPService by its name. Caller must ensure that, if the +// VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not +// lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were +// auto-allocated by Tailscale) any subsequent request to this function that does not set any IP addresses will error. +func (c *tsClientImpl) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error { + data, err := json.Marshal(svc) + if err != nil { + return err + } + path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name)) + req, err := http.NewRequestWithContext(ctx, httpm.PUT, path, bytes.NewBuffer(data)) + if err != nil { + return fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := c.sendRequest(req) + if err != nil { + return fmt.Errorf("error making Tailscale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return handleErrorResponse(b, resp) + } + return nil +} + +// DeleteVIPServiceByName deletes a VIPService by its name. It returns an error if the VIPService +// does not exist or if the deletion fails. +func (c *tsClientImpl) deleteVIPServiceByName(ctx context.Context, name string) error { + path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name)) + req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil) + if err != nil { + return fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := c.sendRequest(req) + if err != nil { + return fmt.Errorf("error making Tailscale API request: %w", err) + } + // If status code was not successful, return the error. + if resp.StatusCode != http.StatusOK { + return handleErrorResponse(b, resp) + } + return nil +} + +// sendRequest add the authentication key to the request and sends it. It +// receives the response and reads up to 10MB of it. +func (c *tsClientImpl) sendRequest(req *http.Request) ([]byte, *http.Response, error) { + resp, err := c.Do(req) + if err != nil { + return nil, resp, fmt.Errorf("error actually doing request: %w", err) + } + defer resp.Body.Close() + + // Read response + b, err := io.ReadAll(resp.Body) + if err != nil { + err = fmt.Errorf("error reading response body: %v", err) + } + return b, resp, err +} + +// handleErrorResponse decodes the error message from the server and returns +// an ErrResponse from it. +func handleErrorResponse(b []byte, resp *http.Response) error { + var errResp tailscale.ErrResponse + if err := json.Unmarshal(b, &errResp); err != nil { + return err + } + errResp.Status = resp.StatusCode + return errResp +} diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 3c97d8c7d..afc489018 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -15,8 +15,9 @@ const ( AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" // Clientmetrics for Tailscale Kubernetes Operator components - MetricIngressProxyCount = "k8s_ingress_proxies" // L3 - MetricIngressResourceCount = "k8s_ingress_resources" // L7 + MetricIngressProxyCount = "k8s_ingress_proxies" // L3 + MetricIngressResourceCount = "k8s_ingress_resources" // L7 + MetricIngressPGResourceCount = "k8s_ingress_pg_resources" // L7 on ProxyGroup MetricEgressProxyCount = "k8s_egress_proxies" MetricConnectorResourceCount = "k8s_connector_resources" MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources" From bcc262269f058923fe7e88c577aabaf212bafde1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 22:24:13 -0700 Subject: [PATCH 0357/1708] build(deps): bump braces from 3.0.2 to 3.0.3 in /cmd/tsconnect (#12468) Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3. - [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) --- updated-dependencies: - dependency-name: braces dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmd/tsconnect/yarn.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/tsconnect/yarn.lock b/cmd/tsconnect/yarn.lock index 811eddeb7..d9d9db32f 100644 --- a/cmd/tsconnect/yarn.lock +++ b/cmd/tsconnect/yarn.lock @@ -90,11 +90,11 @@ binary-extensions@^2.0.0: integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== braces@^3.0.2, braces@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" camelcase-css@^2.0.1: version "2.0.1" @@ -231,10 +231,10 @@ fastq@^1.6.0: dependencies: reusify "^1.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" From 51adaec35a3e4d25df88d81e6264584e151bd33d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 21 Jan 2025 08:02:24 -0800 Subject: [PATCH 0358/1708] Revert "ipn/ipnlocal: re-advertise appc routes on startup (#14609)" This reverts commit 1b303ee5baef3ddab40be4d1c2 (#14609). It caused a deadlock; see tailscale/corp#25965 Updates tailscale/corp#25965 Updates #13680 Updates #14606 --- ipn/ipnlocal/local.go | 35 +++------------------------- ipn/ipnlocal/local_test.go | 47 -------------------------------------- 2 files changed, 3 insertions(+), 79 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 214d3a4e4..bb84012fd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4356,33 +4356,6 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i b.appConnector.UpdateDomainsAndRoutes(domains, routes) } -func (b *LocalBackend) readvertiseAppConnectorRoutes() { - var domainRoutes map[string][]netip.Addr - b.mu.Lock() - if b.appConnector != nil { - domainRoutes = b.appConnector.DomainRoutes() - } - b.mu.Unlock() - if domainRoutes == nil { - return - } - - // Re-advertise the stored routes, in case stored state got out of - // sync with previously advertised routes in prefs. - var prefixes []netip.Prefix - for _, ips := range domainRoutes { - for _, ip := range ips { - prefixes = append(prefixes, netip.PrefixFrom(ip, ip.BitLen())) - } - } - // Note: AdvertiseRoute will trim routes that are already - // advertised, so if everything is already being advertised this is - // a noop. - if err := b.AdvertiseRoute(prefixes...); err != nil { - b.logf("error advertising stored app connector routes: %v", err) - } -} - // authReconfig pushes a new configuration into wgengine, if engine // updates are not currently blocked, based on the cached netmap and // user prefs. @@ -4461,7 +4434,6 @@ func (b *LocalBackend) authReconfig() { } b.initPeerAPIListener() - b.readvertiseAppConnectorRoutes() } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -7204,7 +7176,7 @@ var ErrDisallowedAutoRoute = errors.New("route is not allowed") // If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() - var newRoutes []netip.Prefix + newRoutes := false for _, ipp := range ipps { if !allowedAutoRoute(ipp) { @@ -7220,14 +7192,13 @@ func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { } finalRoutes = append(finalRoutes, ipp) - newRoutes = append(newRoutes, ipp) + newRoutes = true } - if len(newRoutes) == 0 { + if !newRoutes { return nil } - b.logf("advertising new app connector routes: %v", newRoutes) _, err := b.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: finalRoutes, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 348bdcab3..415791c60 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1501,53 +1501,6 @@ func TestReconfigureAppConnector(t *testing.T) { } } -func TestBackfillAppConnectorRoutes(t *testing.T) { - // Create backend with an empty app connector. - b := newTestBackend(t) - if err := b.Start(ipn.Options{}); err != nil { - t.Fatal(err) - } - if _, err := b.EditPrefs(&ipn.MaskedPrefs{ - Prefs: ipn.Prefs{ - AppConnector: ipn.AppConnectorPrefs{Advertise: true}, - }, - AppConnectorSet: true, - }); err != nil { - t.Fatal(err) - } - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) - - // Smoke check that AdvertiseRoutes doesn't have the test IP. - ip := netip.MustParseAddr("1.2.3.4") - routes := b.Prefs().AdvertiseRoutes().AsSlice() - if slices.Contains(routes, netip.PrefixFrom(ip, ip.BitLen())) { - t.Fatalf("AdvertiseRoutes %v on a fresh backend already contains advertised route for %v", routes, ip) - } - - // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. - b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appc.RouteInfo{ - Domains: map[string][]netip.Addr{ - "example.com": {ip}, - }, - }); err != nil { - t.Fatal(err) - } - - // Mimic b.authReconfigure for the app connector bits. - b.mu.Lock() - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) - b.mu.Unlock() - b.readvertiseAppConnectorRoutes() - - // Check that Prefs.AdvertiseRoutes got backfilled with routes stored in - // profile data. - routes = b.Prefs().AdvertiseRoutes().AsSlice() - if !slices.Contains(routes, netip.PrefixFrom(ip, ip.BitLen())) { - t.Fatalf("AdvertiseRoutes %v was not backfilled from stored app connector routes with %v", routes, ip) - } -} - func resolversEqual(t *testing.T, a, b []*dnstype.Resolver) bool { if a == nil && b == nil { return true From 7f3c1932b54fb6af2d8d1e367e0e456ff7fa40fd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 21 Jan 2025 10:23:58 -0800 Subject: [PATCH 0359/1708] tsnet: fix panic on race between listener.Close and incoming packet I saw this panic while writing a new test for #14715: panic: send on closed channel goroutine 826 [running]: tailscale.com/tsnet.(*listener).handle(0x1400031a500, {0x1035fbb00, 0x14000b82300}) /Users/bradfitz/src/tailscale.com/tsnet/tsnet.go:1317 +0xac tailscale.com/wgengine/netstack.(*Impl).acceptTCP(0x14000204700, 0x14000882100) /Users/bradfitz/src/tailscale.com/wgengine/netstack/netstack.go:1320 +0x6dc created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*Forwarder).HandlePacket in goroutine 807 /Users/bradfitz/go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/forwarder.go:98 +0x32c FAIL tailscale.com/tsnet 0.927s Updates #14715 Change-Id: I9924e0a6c2b801d46ee44eb8eeea0da2f9ea17c4 Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet.go | 25 ++++++++++++++----------- tsnet/tsnet_test.go | 19 +++++++++++++++++++ 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 5f1d8073a..fd894c38a 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1180,7 +1180,8 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro keys: keys, addr: addr, - conn: make(chan net.Conn), + closedc: make(chan struct{}), + conn: make(chan net.Conn), } s.mu.Lock() for _, key := range keys { @@ -1243,11 +1244,12 @@ type listenKey struct { } type listener struct { - s *Server - keys []listenKey - addr string - conn chan net.Conn - closed bool // guarded by s.mu + s *Server + keys []listenKey + addr string + conn chan net.Conn // unbuffered, never closed + closedc chan struct{} // closed on [listener.Close] + closed bool // guarded by s.mu } func (ln *listener) Accept() (net.Conn, error) { @@ -1277,21 +1279,22 @@ func (ln *listener) closeLocked() error { delete(ln.s.listeners, key) } } - close(ln.conn) + close(ln.closedc) ln.closed = true return nil } func (ln *listener) handle(c net.Conn) { - t := time.NewTimer(time.Second) - defer t.Stop() select { case ln.conn <- c: - case <-t.C: + return + case <-ln.closedc: + case <-ln.s.shutdownCtx.Done(): + case <-time.After(time.Second): // TODO(bradfitz): this isn't ideal. Think about how // we how we want to do pushback. - c.Close() } + c.Close() } // Server returns the tsnet Server associated with the listener. diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 14d600817..c2f27d0f3 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -494,6 +494,25 @@ func TestListenerCleanup(t *testing.T) { if err := ln.Close(); !errors.Is(err, net.ErrClosed) { t.Fatalf("second ln.Close error: %v, want net.ErrClosed", err) } + + // Verify that handling a connection from gVisor (from a packet arriving) + // after a listener closed doesn't panic (previously: sending on a closed + // channel) or hang. + c := &closeTrackConn{} + ln.(*listener).handle(c) + if !c.closed { + t.Errorf("c.closed = false, want true") + } +} + +type closeTrackConn struct { + net.Conn + closed bool +} + +func (wc *closeTrackConn) Close() error { + wc.closed = true + return nil } // tests https://github.com/tailscale/tailscale/issues/6973 -- that we can start a tsnet server, From 27299426389e7fb7eab54994596cae09c0e11ec9 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Sat, 18 Jan 2025 20:41:36 -0600 Subject: [PATCH 0360/1708] prober: fix nil pointer access in tcp-in-tcp probes If unable to accept a connection from the bandwidth probe listener, return from the goroutine immediately since the accepted connection will be nil. Updates tailscale/corp#25958 Signed-off-by: Percy Wegmann --- prober/derp.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/prober/derp.go b/prober/derp.go index 995a69626..05cc8f05c 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -1048,6 +1048,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT readConn, err := l.Accept() if err != nil { readFinishedC <- err + return } defer readConn.Close() deadline, ok := ctx.Deadline() @@ -1055,6 +1056,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT // Don't try reading past our context's deadline. if err := readConn.SetReadDeadline(deadline); err != nil { readFinishedC <- fmt.Errorf("unable to set read deadline: %w", err) + return } } n, err := io.CopyN(io.Discard, readConn, size) From b50d32059f1b33311dbba96a57c82d33a28f0e1f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 21 Jan 2025 09:50:45 -0800 Subject: [PATCH 0361/1708] tsnet: block in Server.Dial until backend is Running Updates #14715 Change-Id: I8c91e94fd1c6278c7f94a6b890274ed8a01e6f25 Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet.go | 32 ++++++++++++++++++++++++++++++++ tsnet/tsnet_test.go | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index fd894c38a..b769e719c 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -169,9 +169,41 @@ func (s *Server) Dial(ctx context.Context, network, address string) (net.Conn, e if err := s.Start(); err != nil { return nil, err } + if err := s.awaitRunning(ctx); err != nil { + return nil, err + } return s.dialer.UserDial(ctx, network, address) } +// awaitRunning waits until the backend is in state Running. +// If the backend is in state Starting, it blocks until it reaches +// a terminal state (such as Stopped, NeedsMachineAuth) +// or the context expires. +func (s *Server) awaitRunning(ctx context.Context) error { + st := s.lb.State() + for { + if err := ctx.Err(); err != nil { + return err + } + switch st { + case ipn.Running: + return nil + case ipn.NeedsLogin, ipn.Starting: + // Even after LocalBackend.Start, the state machine is still briefly + // in the "NeedsLogin" state. So treat that as also "Starting" and + // wait for us to get out of that state. + s.lb.WatchNotifications(ctx, ipn.NotifyInitialState, nil, func(n *ipn.Notify) (keepGoing bool) { + if n.State != nil { + st = *n.State + } + return st == ipn.NeedsLogin || st == ipn.Starting + }) + default: + return fmt.Errorf("tsnet: backend in state %v", st) + } + } +} + // HTTPClient returns an HTTP client that is configured to connect over Tailscale. // // This is useful if you need to have your tsnet services connect to other devices on diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index c2f27d0f3..552e8dbee 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -232,6 +232,46 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) return s, status.TailscaleIPs[0], status.Self.PublicKey } +func TestDialBlocks(t *testing.T) { + tstest.ResourceCheck(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + controlURL, _ := startControl(t) + + // Make one tsnet that blocks until it's up. + s1, _, _ := startServer(t, ctx, controlURL, "s1") + + ln, err := s1.Listen("tcp", ":8080") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + // Then make another tsnet node that will only be woken up + // upon the first dial. + tmp := filepath.Join(t.TempDir(), "s2") + os.MkdirAll(tmp, 0755) + s2 := &Server{ + Dir: tmp, + ControlURL: controlURL, + Hostname: "s2", + Store: new(mem.Store), + Ephemeral: true, + getCertForTesting: testCertRoot.getCert, + } + if *verboseNodes { + s2.Logf = log.Printf + } + t.Cleanup(func() { s2.Close() }) + + c, err := s2.Dial(ctx, "tcp", "s1:8080") + if err != nil { + t.Fatal(err) + } + defer c.Close() +} + func TestConn(t *testing.T) { tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) From 8b9d5fd6bc9fd1bd13ec77903f2c23d1189c1a7d Mon Sep 17 00:00:00 2001 From: James Tucker Date: Tue, 21 Jan 2025 10:34:28 -0800 Subject: [PATCH 0362/1708] go.mod: bump github.com/inetaf/tcpproxy Updates tailscale/corp#25169 Signed-off-by: James Tucker --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 92dd1bf65..4265953a4 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/goreleaser/nfpm/v2 v2.33.1 github.com/hdevalence/ed25519consensus v0.2.0 github.com/illarion/gonotify/v2 v2.0.3 - github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c + github.com/inetaf/tcpproxy v0.0.0-20250121183218-48c7e53d7ac4 github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 github.com/jellydator/ttlcache/v3 v3.1.0 github.com/jsimonetti/rtnetlink v1.4.0 diff --git a/go.sum b/go.sum index 0354c3364..2623cb6e9 100644 --- a/go.sum +++ b/go.sum @@ -572,8 +572,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c h1:gYfYE403/nlrGNYj6BEOs9ucLCAGB9gstlSk92DttTg= -github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI= +github.com/inetaf/tcpproxy v0.0.0-20250121183218-48c7e53d7ac4 h1:5u/LhBmv8Y+BhTTADTuh8ma0DcZ3zzx+GINbMeMG9nM= +github.com/inetaf/tcpproxy v0.0.0-20250121183218-48c7e53d7ac4/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= From e12b2a7267afbd8189c7834b840d2fcdb8786d64 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 21 Jan 2025 15:42:12 -0800 Subject: [PATCH 0363/1708] cmd/tailscale/cli: clean up how optional commands get registered Both @agottardo and I tripped over this today. Updates #cleanup Change-Id: I64380a03bfc952b9887b1512dbcadf26499ff1cd Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/advertise.go | 26 ++++++++++++-------------- cmd/tailscale/cli/cli.go | 10 ++++++++-- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go index c9474c427..00b5024f0 100644 --- a/cmd/tailscale/cli/advertise.go +++ b/cmd/tailscale/cli/advertise.go @@ -21,23 +21,21 @@ var advertiseArgs struct { // TODO(naman): This flag may move to set.go or serve_v2.go after the WIPCode // envknob is not needed. -var advertiseCmd = &ffcli.Command{ - Name: "advertise", - ShortUsage: "tailscale advertise --services=", - ShortHelp: "Advertise this node as a destination for a service", - Exec: runAdvertise, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("advertise") - fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")") - return fs - })(), -} - -func maybeAdvertiseCmd() []*ffcli.Command { +func advertiseCmd() *ffcli.Command { if !envknob.UseWIPCode() { return nil } - return []*ffcli.Command{advertiseCmd} + return &ffcli.Command{ + Name: "advertise", + ShortUsage: "tailscale advertise --services=", + ShortHelp: "Advertise this node as a destination for a service", + Exec: runAdvertise, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("advertise") + fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")") + return fs + })(), + } } func runAdvertise(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 542a2e464..645859038 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -25,6 +25,7 @@ import ( "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" "tailscale.com/paths" + "tailscale.com/util/slicesx" "tailscale.com/version/distro" ) @@ -182,7 +183,7 @@ For help on subcommands, add --help after: "tailscale status --help". This CLI is still under active development. Commands and flags will change in the future. `), - Subcommands: append([]*ffcli.Command{ + Subcommands: nonNilCmds( upCmd, downCmd, setCmd, @@ -214,7 +215,8 @@ change in the future. debugCmd, driveCmd, idTokenCmd, - }, maybeAdvertiseCmd()...), + advertiseCmd(), + ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { if len(args) > 0 { @@ -239,6 +241,10 @@ change in the future. return rootCmd } +func nonNilCmds(cmds ...*ffcli.Command) []*ffcli.Command { + return slicesx.Filter(cmds[:0], cmds, func(c *ffcli.Command) bool { return c != nil }) +} + func fatalf(format string, a ...any) { if Fatalf != nil { Fatalf(format, a...) From 150cd30b1d28613b50cebde9f18595ef78a2a803 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 21 Jan 2025 15:30:55 -0800 Subject: [PATCH 0364/1708] ipn/ipnlocal: also use LetsEncrypt-baked-in roots for cert validation We previously baked in the LetsEncrypt x509 root CA for our tlsdial package. This moves that out into a new "bakedroots" package and is now also shared by ipn/ipnlocal's cert validation code (validCertPEM) that decides whether it's time to fetch a new cert. Otherwise, a machine without LetsEncrypt roots locally in its system roots is unable to use tailscale cert/serve and fetch certs. Fixes #14690 Change-Id: Ic88b3bdaabe25d56b9ff07ada56a27e3f11d7159 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + ipn/ipnlocal/cert.go | 19 ++++- net/bakedroots/bakedroots.go | 122 ++++++++++++++++++++++++++++++ net/bakedroots/bakedroots_test.go | 15 ++++ net/tlsdial/tlsdial.go | 86 +-------------------- net/tlsdial/tlsdial_test.go | 28 +------ 9 files changed, 164 insertions(+), 110 deletions(-) create mode 100644 net/bakedroots/bakedroots.go create mode 100644 net/bakedroots/bakedroots_test.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 498677a49..3a730dd99 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -99,6 +99,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/dnscache from tailscale.com/derp/derphttp tailscale.com/net/ktimeout from tailscale.com/cmd/derper tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 80c9f0c06..a27e1761d 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -835,6 +835,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 9ccd6eebd..774d97d8e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -97,6 +97,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 8af347319..1fc1b8d70 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -286,6 +286,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index d87374bbb..0d92c7cf8 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -40,6 +40,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" + "tailscale.com/net/bakedroots" "tailscale.com/types/logger" "tailscale.com/util/testenv" "tailscale.com/version" @@ -665,7 +666,7 @@ func acmeClient(cs certStore) (*acme.Client, error) { // validCertPEM reports whether the given certificate is valid for domain at now. // // If roots != nil, it is used instead of the system root pool. This is meant -// to support testing, and production code should pass roots == nil. +// to support testing; production code should pass roots == nil. func validCertPEM(domain string, keyPEM, certPEM []byte, roots *x509.CertPool, now time.Time) bool { if len(keyPEM) == 0 || len(certPEM) == 0 { return false @@ -688,15 +689,29 @@ func validCertPEM(domain string, keyPEM, certPEM []byte, roots *x509.CertPool, n intermediates.AddCert(cert) } } + return validateLeaf(leaf, intermediates, domain, now, roots) +} + +// validateLeaf is a helper for [validCertPEM]. +// +// If called with roots == nil, it will use the system root pool as well as the +// baked-in roots. If non-nil, only those roots are used. +func validateLeaf(leaf *x509.Certificate, intermediates *x509.CertPool, domain string, now time.Time, roots *x509.CertPool) bool { if leaf == nil { return false } - _, err = leaf.Verify(x509.VerifyOptions{ + _, err := leaf.Verify(x509.VerifyOptions{ DNSName: domain, CurrentTime: now, Roots: roots, Intermediates: intermediates, }) + if err != nil && roots == nil { + // If validation failed and they specified nil for roots (meaning to use + // the system roots), then give it another chance to validate using the + // binary's baked-in roots (LetsEncrypt). See tailscale/tailscale#14690. + return validateLeaf(leaf, intermediates, domain, now, bakedroots.Get()) + } return err == nil } diff --git a/net/bakedroots/bakedroots.go b/net/bakedroots/bakedroots.go new file mode 100644 index 000000000..f7e4fa21e --- /dev/null +++ b/net/bakedroots/bakedroots.go @@ -0,0 +1,122 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package bakedroots contains WebPKI CA roots we bake into the tailscaled binary, +// lest the system's CA roots be missing them (or entirely empty). +package bakedroots + +import ( + "crypto/x509" + "sync" + + "tailscale.com/util/testenv" +) + +// Get returns the baked-in roots. +// +// As of 2025-01-21, this includes only the LetsEncrypt ISRG Root X1 root. +func Get() *x509.CertPool { + roots.once.Do(func() { roots.parsePEM([]byte(letsEncryptX1)) }) + return roots.p +} + +// testingTB is a subset of testing.TB needed +// to verify the caller isn't in a parallel test. +type testingTB interface { + // Setenv panics if it's in a parallel test. + Setenv(k, v string) +} + +// ResetForTest resets the cached roots for testing, +// optionally setting them to caPEM if non-nil. +func ResetForTest(tb testingTB, caPEM []byte) { + if !testenv.InTest() { + panic("not in test") + } + tb.Setenv("ASSERT_NOT_PARALLEL_TEST", "1") // panics if tb's Parallel was called + + roots = rootsOnce{} + if caPEM != nil { + roots.once.Do(func() { roots.parsePEM(caPEM) }) + } +} + +var roots rootsOnce + +type rootsOnce struct { + once sync.Once + p *x509.CertPool +} + +func (r *rootsOnce) parsePEM(caPEM []byte) { + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(caPEM) { + panic("bogus PEM") + } + r.p = p +} + +/* +letsEncryptX1 is the LetsEncrypt X1 root: + +Certificate: + + Data: + Version: 3 (0x2) + Serial Number: + 82:10:cf:b0:d2:40:e3:59:44:63:e0:bb:63:82:8b:00 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C = US, O = Internet Security Research Group, CN = ISRG Root X1 + Validity + Not Before: Jun 4 11:04:38 2015 GMT + Not After : Jun 4 11:04:38 2035 GMT + Subject: C = US, O = Internet Security Research Group, CN = ISRG Root X1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (4096 bit) + +We bake it into the binary as a fallback verification root, +in case the system we're running on doesn't have it. +(Tailscale runs on some ancient devices.) + +To test that this code is working on Debian/Ubuntu: + +$ sudo mv /usr/share/ca-certificates/mozilla/ISRG_Root_X1.crt{,.old} +$ sudo update-ca-certificates + +Then restart tailscaled. To also test dnsfallback's use of it, nuke +your /etc/resolv.conf and it should still start & run fine. +*/ +const letsEncryptX1 = ` +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- +` diff --git a/net/bakedroots/bakedroots_test.go b/net/bakedroots/bakedroots_test.go new file mode 100644 index 000000000..9aa4366c8 --- /dev/null +++ b/net/bakedroots/bakedroots_test.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package bakedroots + +import "testing" + +func TestBakedInRoots(t *testing.T) { + ResetForTest(t, nil) + p := Get() + got := p.Subjects() + if len(got) != 1 { + t.Errorf("subjects = %v; want 1", len(got)) + } +} diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 2a109c790..2af87bd02 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -27,6 +27,7 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" + "tailscale.com/net/bakedroots" "tailscale.com/net/tlsdial/blockblame" ) @@ -154,7 +155,7 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // Always verify with our baked-in Let's Encrypt certificate, // so we can log an informational message. This is useful for // detecting SSL MiTM. - opts.Roots = bakedInRoots() + opts.Roots = bakedroots.Get() _, bakedErr := cs.PeerCertificates[0].Verify(opts) if debug() { log.Printf("tlsdial(bake %q): %v", host, bakedErr) @@ -233,7 +234,7 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { if errSys == nil { return nil } - opts.Roots = bakedInRoots() + opts.Roots = bakedroots.Get() _, err := certs[0].Verify(opts) if debug() { log.Printf("tlsdial(bake %q/%q): %v", c.ServerName, certDNSName, err) @@ -260,84 +261,3 @@ func NewTransport() *http.Transport { }, } } - -/* -letsEncryptX1 is the LetsEncrypt X1 root: - -Certificate: - - Data: - Version: 3 (0x2) - Serial Number: - 82:10:cf:b0:d2:40:e3:59:44:63:e0:bb:63:82:8b:00 - Signature Algorithm: sha256WithRSAEncryption - Issuer: C = US, O = Internet Security Research Group, CN = ISRG Root X1 - Validity - Not Before: Jun 4 11:04:38 2015 GMT - Not After : Jun 4 11:04:38 2035 GMT - Subject: C = US, O = Internet Security Research Group, CN = ISRG Root X1 - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (4096 bit) - -We bake it into the binary as a fallback verification root, -in case the system we're running on doesn't have it. -(Tailscale runs on some ancient devices.) - -To test that this code is working on Debian/Ubuntu: - -$ sudo mv /usr/share/ca-certificates/mozilla/ISRG_Root_X1.crt{,.old} -$ sudo update-ca-certificates - -Then restart tailscaled. To also test dnsfallback's use of it, nuke -your /etc/resolv.conf and it should still start & run fine. -*/ -const letsEncryptX1 = ` ------BEGIN CERTIFICATE----- -MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw -TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh -cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 -WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu -ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY -MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc -h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ -0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U -A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW -T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH -B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC -B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv -KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn -OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn -jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw -qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI -rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq -hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL -ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ -3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK -NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 -ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur -TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC -jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc -oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq -4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA -mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d -emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= ------END CERTIFICATE----- -` - -var bakedInRootsOnce struct { - sync.Once - p *x509.CertPool -} - -func bakedInRoots() *x509.CertPool { - bakedInRootsOnce.Do(func() { - p := x509.NewCertPool() - if !p.AppendCertsFromPEM([]byte(letsEncryptX1)) { - panic("bogus PEM") - } - bakedInRootsOnce.p = p - }) - return bakedInRootsOnce.p -} diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index 26814ebbd..6723b82e0 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -4,37 +4,22 @@ package tlsdial import ( - "crypto/x509" "io" "net" "net/http" "os" "os/exec" "path/filepath" - "reflect" "runtime" "sync/atomic" "testing" "tailscale.com/health" + "tailscale.com/net/bakedroots" ) -func resetOnce() { - rv := reflect.ValueOf(&bakedInRootsOnce).Elem() - rv.Set(reflect.Zero(rv.Type())) -} - -func TestBakedInRoots(t *testing.T) { - resetOnce() - p := bakedInRoots() - got := p.Subjects() - if len(got) != 1 { - t.Errorf("subjects = %v; want 1", len(got)) - } -} - func TestFallbackRootWorks(t *testing.T) { - defer resetOnce() + defer bakedroots.ResetForTest(t, nil) const debug = false if runtime.GOOS != "linux" { @@ -69,14 +54,7 @@ func TestFallbackRootWorks(t *testing.T) { if err != nil { t.Fatal(err) } - resetOnce() - bakedInRootsOnce.Do(func() { - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(caPEM) { - t.Fatal("failed to add") - } - bakedInRootsOnce.p = p - }) + bakedroots.ResetForTest(t, caPEM) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { From 042ed6bf693da7061c37bf62b5d823a7b35ae9b5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 21 Jan 2025 17:19:42 -0800 Subject: [PATCH 0365/1708] net/bakedroots: add LetsEncrypt ISRG Root X2 Updates #14690 Change-Id: Ib85e318d48450fc6534f7b0c1d4cc4335de7c0ff Signed-off-by: Brad Fitzpatrick --- net/bakedroots/bakedroots.go | 29 ++++++++++++++++++++++++++++- net/bakedroots/bakedroots_test.go | 23 ++++++++++++++++++++--- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/net/bakedroots/bakedroots.go b/net/bakedroots/bakedroots.go index f7e4fa21e..42e70c0dd 100644 --- a/net/bakedroots/bakedroots.go +++ b/net/bakedroots/bakedroots.go @@ -16,7 +16,12 @@ import ( // // As of 2025-01-21, this includes only the LetsEncrypt ISRG Root X1 root. func Get() *x509.CertPool { - roots.once.Do(func() { roots.parsePEM([]byte(letsEncryptX1)) }) + roots.once.Do(func() { + roots.parsePEM(append( + []byte(letsEncryptX1), + letsEncryptX2..., + )) + }) return roots.p } @@ -120,3 +125,25 @@ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= -----END CERTIFICATE----- ` + +// letsEncryptX2 is the ISRG Root X2. +// +// Subject: O = Internet Security Research Group, CN = ISRG Root X2 +// Key type: ECDSA P-384 +// Validity: until 2035-09-04 (generated 2020-09-04) +const letsEncryptX2 = ` +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- +` diff --git a/net/bakedroots/bakedroots_test.go b/net/bakedroots/bakedroots_test.go index 9aa4366c8..8ba502a78 100644 --- a/net/bakedroots/bakedroots_test.go +++ b/net/bakedroots/bakedroots_test.go @@ -3,13 +3,30 @@ package bakedroots -import "testing" +import ( + "slices" + "testing" +) func TestBakedInRoots(t *testing.T) { ResetForTest(t, nil) p := Get() got := p.Subjects() - if len(got) != 1 { - t.Errorf("subjects = %v; want 1", len(got)) + if len(got) != 2 { + t.Errorf("subjects = %v; want 2", len(got)) + } + + // TODO(bradfitz): is there a way to easily make this test prettier without + // writing a DER decoder? I'm not seeing how. + var name []string + for _, der := range got { + name = append(name, string(der)) + } + want := []string{ + "0O1\v0\t\x06\x03U\x04\x06\x13\x02US1)0'\x06\x03U\x04\n\x13 Internet Security Research Group1\x150\x13\x06\x03U\x04\x03\x13\fISRG Root X1", + "0O1\v0\t\x06\x03U\x04\x06\x13\x02US1)0'\x06\x03U\x04\n\x13 Internet Security Research Group1\x150\x13\x06\x03U\x04\x03\x13\fISRG Root X2", + } + if !slices.Equal(name, want) { + t.Errorf("subjects = %q; want %q", name, want) } } From cb3b1a1dcf84b467ec25821efd2faca0cb3af93f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 22 Jan 2025 06:20:14 -0800 Subject: [PATCH 0366/1708] tsweb: add missing debug pprof endpoints Updates tailscale/corp#26016 Change-Id: I47a5671e881cc092d83c1e992e2271f90afcae7e Signed-off-by: Brad Fitzpatrick --- tsweb/debug.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tsweb/debug.go b/tsweb/debug.go index 6db3f25cf..9e6ce4df4 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -52,15 +52,15 @@ func Debugger(mux *http.ServeMux) *DebugHandler { ret.KV("Version", version.Long()) ret.Handle("vars", "Metrics (Go)", expvar.Handler()) ret.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(promvarz.Handler)) + + // pprof.Index serves everything that runtime/pprof.Lookup finds: + // goroutine, threadcreate, heap, allocs, block, mutex ret.Handle("pprof/", "pprof (index)", http.HandlerFunc(pprof.Index)) - // the CPU profile handler is special because it responds - // streamily, unlike every other pprof handler. This means it's - // not made available through pprof.Index the way all the other - // pprof types are, you have to register the CPU profile handler - // separately. Use HandleSilent for that to not pollute the human - // debug list with a link that produces streaming line noise if - // you click it. + // But register the other ones from net/http/pprof directly: + ret.HandleSilent("pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) ret.HandleSilent("pprof/profile", http.HandlerFunc(pprof.Profile)) + ret.HandleSilent("pprof/symbol", http.HandlerFunc(pprof.Symbol)) + ret.HandleSilent("pprof/trace", http.HandlerFunc(pprof.Trace)) ret.URL("/debug/pprof/goroutine?debug=1", "Goroutines (collapsed)") ret.URL("/debug/pprof/goroutine?debug=2", "Goroutines (full)") ret.Handle("gc", "force GC", http.HandlerFunc(gcHandler)) From 8c8750f1b3e69aa3ca5ac0ebd15f3b406818c5d2 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:02:53 -0500 Subject: [PATCH 0367/1708] ipn/ipnlocal: Support TCP and Web VIP services This commit intend to provide support for TCP and Web VIP services and also allow user to use Tun for VIP services if they want to. The commit includes: 1.Setting TCP intercept function for VIP Services. 2.Update netstack to send packet written from WG to netStack handler for VIP service. 3.Return correct TCP hander for VIP services when netstack acceptTCP. This commit also includes unit tests for if the local backend setServeConfig would set correct TCP intercept function and test if a hander gets returned when getting TCPHandlerForDst. The shouldProcessInbound check is not unit tested since the test result just depends on mocked functions. There should be an integration test to cover shouldProcessInbound and if the returned TCP handler actually does what the serveConfig says. Updates tailscale/corp#24604 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 105 +++++++++++++-- ipn/ipnlocal/local_test.go | 215 ++++++++++++++++++++++++++++++ ipn/ipnlocal/serve.go | 123 ++++++++++++++++- ipn/ipnlocal/serve_test.go | 197 +++++++++++++++++++++++++++ ipn/serve.go | 54 +++++++- tailcfg/tailcfg.go | 16 +++ types/netmap/IPServiceMappings.go | 19 +++ types/netmap/netmap.go | 48 +++++++ wgengine/netstack/netstack.go | 27 +++- 9 files changed, 783 insertions(+), 21 deletions(-) create mode 100644 types/netmap/IPServiceMappings.go diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bb84012fd..05f56fcbd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -228,10 +228,11 @@ type LocalBackend struct { // is never called. getTCPHandlerForFunnelFlow func(srcAddr netip.AddrPort, dstPort uint16) (handler func(net.Conn)) - filterAtomic atomic.Pointer[filter.Filter] - containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] - shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] - numClientStatusCalls atomic.Uint32 + filterAtomic atomic.Pointer[filter.Filter] + containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] + shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] + shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] + numClientStatusCalls atomic.Uint32 // goTracker accounts for all goroutines started by LocalBacked, primarily // for testing and graceful shutdown purposes. @@ -317,8 +318,9 @@ type LocalBackend struct { offlineAutoUpdateCancel func() // ServeConfig fields. (also guarded by mu) - lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig - serveConfig ipn.ServeConfigView // or !Valid if none + lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig + serveConfig ipn.ServeConfigView // or !Valid if none + ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names webClient webClient webClientListeners map[netip.AddrPort]*localListener // listeners for local web client traffic @@ -523,6 +525,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetJailedFilter(noneFilter) b.setTCPPortsIntercepted(nil) + b.setVIPServicesTCPPortsIntercepted(nil) b.statusChanged = sync.NewCond(&b.statusLock) b.e.SetStatusCallback(b.setWgengineStatus) @@ -3362,10 +3365,7 @@ func (b *LocalBackend) clearMachineKeyLocked() error { return nil } -// setTCPPortsIntercepted populates b.shouldInterceptTCPPortAtomic with an -// efficient func for ShouldInterceptTCPPort to use, which is called on every -// incoming packet. -func (b *LocalBackend) setTCPPortsIntercepted(ports []uint16) { +func generateInterceptTCPPortFunc(ports []uint16) func(uint16) bool { slices.Sort(ports) ports = slices.Compact(ports) var f func(uint16) bool @@ -3396,7 +3396,61 @@ func (b *LocalBackend) setTCPPortsIntercepted(ports []uint16) { } } } - b.shouldInterceptTCPPortAtomic.Store(f) + return f +} + +// setTCPPortsIntercepted populates b.shouldInterceptTCPPortAtomic with an +// efficient func for ShouldInterceptTCPPort to use, which is called on every +// incoming packet. +func (b *LocalBackend) setTCPPortsIntercepted(ports []uint16) { + b.shouldInterceptTCPPortAtomic.Store(generateInterceptTCPPortFunc(ports)) +} + +func generateInterceptVIPServicesTCPPortFunc(svcAddrPorts map[netip.Addr]func(uint16) bool) func(netip.AddrPort) bool { + return func(ap netip.AddrPort) bool { + if f, ok := svcAddrPorts[ap.Addr()]; ok { + return f(ap.Port()) + } + return false + } +} + +// setVIPServicesTCPPortsIntercepted populates b.shouldInterceptVIPServicesTCPPortAtomic with an +// efficient func for ShouldInterceptTCPPort to use, which is called on every incoming packet. +func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[string][]uint16) { + b.mu.Lock() + defer b.mu.Unlock() + b.setVIPServicesTCPPortsInterceptedLocked(svcPorts) +} + +func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[string][]uint16) { + if len(svcPorts) == 0 { + b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) + return + } + nm := b.netMap + if nm == nil { + b.logf("can't set intercept function for Service TCP Ports, netMap is nil") + return + } + vipServiceIPMap := nm.GetVIPServiceIPMap() + if len(vipServiceIPMap) == 0 { + // No approved VIP Services + return + } + + svcAddrPorts := make(map[netip.Addr]func(uint16) bool) + // Only set the intercept function if the service has been assigned a VIP. + for svcName, ports := range svcPorts { + if addrs, ok := vipServiceIPMap[svcName]; ok { + interceptFn := generateInterceptTCPPortFunc(ports) + for _, addr := range addrs { + svcAddrPorts[addr] = interceptFn + } + } + } + + b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) } // setAtomicValuesFromPrefsLocked populates sshAtomicBool, containsViaIPFuncAtomic, @@ -3409,6 +3463,7 @@ func (b *LocalBackend) setAtomicValuesFromPrefsLocked(p ipn.PrefsView) { if !p.Valid() { b.containsViaIPFuncAtomic.Store(ipset.FalseContainsIPFunc()) b.setTCPPortsIntercepted(nil) + b.setVIPServicesTCPPortsInterceptedLocked(nil) b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} } else { @@ -4159,6 +4214,11 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c } } + // TODO(corp#26001): Get handler for VIP services and Local IPs using + // the same function. + if handler := b.tcpHandlerForVIPService(dst, src); handler != nil { + return handler, opts + } // Then handle external connections to the local IP. if !b.isLocalIP(dst.Addr()) { return nil, nil @@ -5676,6 +5736,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) + b.ipVIPServiceMap = nm.GetIPVIPServiceMap() if nm == nil { b.nodeByAddr = nil @@ -5962,6 +6023,7 @@ func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) + vipServicesPorts := make(map[string][]uint16) if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -5985,6 +6047,20 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } handlePorts = append(handlePorts, servePorts...) + for svc, cfg := range b.serveConfig.Services().All() { + servicePorts := make([]uint16, 0, 3) + for port := range cfg.TCP().All() { + if port > 0 { + servicePorts = append(servicePorts, uint16(port)) + } + } + if _, ok := vipServicesPorts[svc]; !ok { + vipServicesPorts[svc] = servicePorts + } else { + vipServicesPorts[svc] = append(vipServicesPorts[svc], servicePorts...) + } + } + b.setServeProxyHandlersLocked() // don't listen on netmap addresses if we're in userspace mode @@ -5996,6 +6072,7 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. // Update funnel info in hostinfo and kick off control update if needed. b.updateIngressLocked() b.setTCPPortsIntercepted(handlePorts) + b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) } // updateIngressLocked updates the hostinfo.WireIngress and hostinfo.IngressEnabled fields and kicks off a Hostinfo @@ -6854,6 +6931,12 @@ func (b *LocalBackend) ShouldInterceptTCPPort(port uint16) bool { return b.shouldInterceptTCPPortAtomic.Load()(port) } +// ShouldInterceptVIPServiceTCPPort reports whether the given TCP port number +// to a VIP service should be intercepted by Tailscaled and handled in-process. +func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool { + return b.shouldInterceptVIPServicesTCPPortAtomic.Load()(ap) +} + // SwitchProfile switches to the profile with the given id. // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 415791c60..f851bb0f8 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2615,6 +2615,150 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { func TestTCPHandlerForDst(t *testing.T) { b := newTestBackend(t) + tests := []struct { + desc string + dst string + intercept bool + }{ + { + desc: "intercept port 80 (Web UI) on quad100 IPv4", + dst: "100.100.100.100:80", + intercept: true, + }, + { + desc: "intercept port 80 (Web UI) on quad100 IPv6", + dst: "[fd7a:115c:a1e0::53]:80", + intercept: true, + }, + { + desc: "don't intercept port 80 on local ip", + dst: "100.100.103.100:80", + intercept: false, + }, + { + desc: "intercept port 8080 (Taildrive) on quad100 IPv4", + dst: "[fd7a:115c:a1e0::53]:8080", + intercept: true, + }, + { + desc: "don't intercept port 8080 on local ip", + dst: "100.100.103.100:8080", + intercept: false, + }, + { + desc: "don't intercept port 9080 on quad100 IPv4", + dst: "100.100.100.100:9080", + intercept: false, + }, + { + desc: "don't intercept port 9080 on quad100 IPv6", + dst: "[fd7a:115c:a1e0::53]:9080", + intercept: false, + }, + { + desc: "don't intercept port 9080 on local ip", + dst: "100.100.103.100:9080", + intercept: false, + }, + } + for _, tt := range tests { + t.Run(tt.dst, func(t *testing.T) { + t.Log(tt.desc) + src := netip.MustParseAddrPort("100.100.102.100:51234") + h, _ := b.TCPHandlerForDst(src, netip.MustParseAddrPort(tt.dst)) + if !tt.intercept && h != nil { + t.Error("intercepted traffic we shouldn't have") + } else if tt.intercept && h == nil { + t.Error("failed to intercept traffic we should have") + } + }) + } +} + +func TestTCPHandlerForDstWithVIPService(t *testing.T) { + b := newTestBackend(t) + svcIPMap := tailcfg.ServiceIPMappings{ + "svc:foo": []netip.Addr{ + netip.MustParseAddr("100.101.101.101"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:6565:6565"), + }, + "svc:bar": []netip.Addr{ + netip.MustParseAddr("100.99.99.99"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:626b:628b"), + }, + "svc:baz": []netip.Addr{ + netip.MustParseAddr("100.133.133.133"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:8585:8585"), + }, + } + svcIPMapJSON, err := json.Marshal(svcIPMap) + if err != nil { + t.Fatal(err) + } + b.setNetMapLocked( + &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{tailcfg.RawMessage(svcIPMapJSON)}, + }, + }).View(), + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{ + tailcfg.UserID(1): { + LoginName: "someone@example.com", + DisplayName: "Some One", + ProfilePicURL: "https://example.com/photo.jpg", + }, + }, + }, + ) + + err = b.setServeConfigLocked( + &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 882: {HTTP: true}, + 883: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.example.ts.net:882": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://127.0.0.1:3000"}, + }, + }, + "foo.example.ts.net:883": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Text: "test"}, + }, + }, + }, + }, + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 990: {TCPForward: "127.0.0.1:8443"}, + 991: {TCPForward: "127.0.0.1:5432", TerminateTLS: "bar.test.ts.net"}, + }, + }, + "svc:qux": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 600: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "qux.example.ts.net:600": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Text: "qux"}, + }, + }, + }, + }, + }, + }, + "", + ) + if err != nil { + t.Fatal(err) + } tests := []struct { desc string @@ -2666,6 +2810,77 @@ func TestTCPHandlerForDst(t *testing.T) { dst: "100.100.103.100:9080", intercept: false, }, + // VIP service destinations + { + desc: "intercept port 882 (HTTP) on service foo IPv4", + dst: "100.101.101.101:882", + intercept: true, + }, + { + desc: "intercept port 882 (HTTP) on service foo IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:882", + intercept: true, + }, + { + desc: "intercept port 883 (HTTPS) on service foo IPv4", + dst: "100.101.101.101:883", + intercept: true, + }, + { + desc: "intercept port 883 (HTTPS) on service foo IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:883", + intercept: true, + }, + { + desc: "intercept port 990 (TCPForward) on service bar IPv4", + dst: "100.99.99.99:990", + intercept: true, + }, + { + desc: "intercept port 990 (TCPForward) on service bar IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:990", + intercept: true, + }, + { + desc: "intercept port 991 (TCPForward with TerminateTLS) on service bar IPv4", + dst: "100.99.99.99:990", + intercept: true, + }, + { + desc: "intercept port 991 (TCPForward with TerminateTLS) on service bar IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:990", + intercept: true, + }, + { + desc: "don't intercept port 4444 on service foo IPv4", + dst: "100.101.101.101:4444", + intercept: false, + }, + { + desc: "don't intercept port 4444 on service foo IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:4444", + intercept: false, + }, + { + desc: "don't intercept port 600 on unknown service IPv4", + dst: "100.22.22.22:883", + intercept: false, + }, + { + desc: "don't intercept port 600 on unknown service IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:883", + intercept: false, + }, + { + desc: "don't intercept port 600 (HTTPS) on service baz IPv4", + dst: "100.133.133.133:600", + intercept: false, + }, + { + desc: "don't intercept port 600 (HTTPS) on service baz IPv6", + dst: "[fd7a:115c:a1e0:ab12:4843:cd96:8585:8585]:600", + intercept: false, + }, } for _, tt := range tests { diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index c144fa529..c20172a42 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -54,8 +54,9 @@ var ErrETagMismatch = errors.New("etag mismatch") var serveHTTPContextKey ctxkey.Key[*serveHTTPContext] type serveHTTPContext struct { - SrcAddr netip.AddrPort - DestPort uint16 + SrcAddr netip.AddrPort + ForVIPService bool + DestPort uint16 // provides funnel-specific context, nil if not funneled Funnel *funnelFlow @@ -275,6 +276,12 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string return errors.New("can't reconfigure tailscaled when using a config file; config file is locked") } + if config != nil { + if err := config.CheckValidServicesConfig(); err != nil { + return err + } + } + nm := b.netMap if nm == nil { return errors.New("netMap is nil") @@ -432,6 +439,105 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target handler(c) } +// tcpHandlerForVIPService returns a handler for a TCP connection to a VIP service +// that is being served via the ipn.ServeConfig. It returns nil if the destination +// address is not a VIP service or if the VIP service does not have a TCP handler set. +func (b *LocalBackend) tcpHandlerForVIPService(dstAddr, srcAddr netip.AddrPort) (handler func(net.Conn) error) { + b.mu.Lock() + sc := b.serveConfig + ipVIPServiceMap := b.ipVIPServiceMap + b.mu.Unlock() + + if !sc.Valid() { + return nil + } + + dport := dstAddr.Port() + + dstSvc, ok := ipVIPServiceMap[dstAddr.Addr()] + if !ok { + return nil + } + + tcph, ok := sc.FindServiceTCP(dstSvc, dstAddr.Port()) + if !ok { + b.logf("The destination service doesn't have a TCP handler set.") + return nil + } + + if tcph.HTTPS() || tcph.HTTP() { + hs := &http.Server{ + Handler: http.HandlerFunc(b.serveWebHandler), + BaseContext: func(_ net.Listener) context.Context { + return serveHTTPContextKey.WithValue(context.Background(), &serveHTTPContext{ + SrcAddr: srcAddr, + ForVIPService: true, + DestPort: dport, + }) + }, + } + if tcph.HTTPS() { + // TODO(kevinliang10): just leaving this TLS cert creation as if we don't have other + // hostnames, but for services this getTLSServeCetForPort will need a version that also take + // in the hostname. How to store the TLS cert is still being discussed. + hs.TLSConfig = &tls.Config{ + GetCertificate: b.getTLSServeCertForPort(dport, true), + } + return func(c net.Conn) error { + return hs.ServeTLS(netutil.NewOneConnListener(c, nil), "", "") + } + } + + return func(c net.Conn) error { + return hs.Serve(netutil.NewOneConnListener(c, nil)) + } + } + + if backDst := tcph.TCPForward(); backDst != "" { + return func(conn net.Conn) error { + defer conn.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + backConn, err := b.dialer.SystemDial(ctx, "tcp", backDst) + cancel() + if err != nil { + b.logf("localbackend: failed to TCP proxy port %v (from %v) to %s: %v", dport, srcAddr, backDst, err) + return nil + } + defer backConn.Close() + if sni := tcph.TerminateTLS(); sni != "" { + conn = tls.Server(conn, &tls.Config{ + GetCertificate: func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + pair, err := b.GetCertPEM(ctx, sni) + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(pair.CertPEM, pair.KeyPEM) + if err != nil { + return nil, err + } + return &cert, nil + }, + }) + } + + errc := make(chan error, 1) + go func() { + _, err := io.Copy(backConn, conn) + errc <- err + }() + go func() { + _, err := io.Copy(conn, backConn) + errc <- err + }() + return <-errc + } + } + + return nil +} + // tcpHandlerForServe returns a handler for a TCP connection to be served via // the ipn.ServeConfig. The funnelFlow can be nil if this is not a funneled // connection. @@ -462,7 +568,7 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, } if tcph.HTTPS() { hs.TLSConfig = &tls.Config{ - GetCertificate: b.getTLSServeCertForPort(dport), + GetCertificate: b.getTLSServeCertForPort(dport, false), } return func(c net.Conn) error { return hs.ServeTLS(netutil.NewOneConnListener(c, nil), "", "") @@ -542,7 +648,7 @@ func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, b.logf("[unexpected] localbackend: no serveHTTPContext in request") return z, "", false } - wsc, ok := b.webServerConfig(hostname, sctx.DestPort) + wsc, ok := b.webServerConfig(hostname, sctx.ForVIPService, sctx.DestPort) if !ok { return z, "", false } @@ -900,7 +1006,7 @@ func allNumeric(s string) bool { return s != "" } -func (b *LocalBackend) webServerConfig(hostname string, port uint16) (c ipn.WebServerConfigView, ok bool) { +func (b *LocalBackend) webServerConfig(hostname string, forVIPService bool, port uint16) (c ipn.WebServerConfigView, ok bool) { key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port)) b.mu.Lock() @@ -909,15 +1015,18 @@ func (b *LocalBackend) webServerConfig(hostname string, port uint16) (c ipn.WebS if !b.serveConfig.Valid() { return c, false } + if forVIPService { + return b.serveConfig.FindServiceWeb(key) + } return b.serveConfig.FindWeb(key) } -func (b *LocalBackend) getTLSServeCertForPort(port uint16) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { +func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService bool) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { return func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { if hi == nil || hi.ServerName == "" { return nil, errors.New("no SNI ServerName") } - _, ok := b.webServerConfig(hi.ServerName, port) + _, ok := b.webServerConfig(hi.ServerName, forVIPService, port) if !ok { return nil, errors.New("no webserver configured for name/port") } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 73e66c2b9..f2ea8e5cd 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -296,6 +296,203 @@ func TestServeConfigForeground(t *testing.T) { } } +// TestServeConfigServices tests the side effects of setting the +// Services field in a ServeConfig. The Services field is a map +// of all services the current service host is serving. Unlike what we +// serve for node itself, there is no foreground and no local handlers +// for the services. So the only things we need to test are if the +// services configured are valid and if they correctly set intercept +// functions for netStack. +func TestServeConfigServices(t *testing.T) { + b := newTestBackend(t) + svcIPMap := tailcfg.ServiceIPMappings{ + "svc:foo": []netip.Addr{ + netip.MustParseAddr("100.101.101.101"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:6565:6565"), + }, + "svc:bar": []netip.Addr{ + netip.MustParseAddr("100.99.99.99"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:626b:628b"), + }, + } + svcIPMapJSON, err := json.Marshal(svcIPMap) + if err != nil { + t.Fatal(err) + } + + b.netMap = &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{tailcfg.RawMessage(svcIPMapJSON)}, + }, + }).View(), + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{ + tailcfg.UserID(1): { + LoginName: "someone@example.com", + DisplayName: "Some One", + ProfilePicURL: "https://example.com/photo.jpg", + }, + }, + } + + tests := []struct { + name string + conf *ipn.ServeConfig + expectedErr error + packetDstAddrPort []netip.AddrPort + intercepted bool + }{ + { + name: "no-services", + conf: &ipn.ServeConfig{}, + packetDstAddrPort: []netip.AddrPort{ + netip.MustParseAddrPort("100.101.101.101:443"), + }, + intercepted: false, + }, + { + name: "one-incorrectly-configured-service", + conf: &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Tun: true, + }, + }, + }, + expectedErr: ipn.ErrServiceConfigHasBothTCPAndTun, + }, + { + // one correctly configured service with packet should be intercepted + name: "one-service-intercept-packet", + conf: &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 81: {HTTPS: true}, + }, + }, + }, + }, + packetDstAddrPort: []netip.AddrPort{ + netip.MustParseAddrPort("100.101.101.101:80"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:80"), + }, + intercepted: true, + }, + { + // one correctly configured service with packet should not be intercepted + name: "one-service-not-intercept-packet", + conf: &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 81: {HTTPS: true}, + }, + }, + }, + }, + packetDstAddrPort: []netip.AddrPort{ + netip.MustParseAddrPort("100.99.99.99:80"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:80"), + netip.MustParseAddrPort("100.101.101.101:82"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:82"), + }, + intercepted: false, + }, + { + //multiple correctly configured service with packet should be intercepted + name: "multiple-service-intercept-packet", + conf: &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 81: {HTTPS: true}, + }, + }, + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 81: {HTTPS: true}, + 82: {HTTPS: true}, + }, + }, + }, + }, + packetDstAddrPort: []netip.AddrPort{ + netip.MustParseAddrPort("100.99.99.99:80"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:80"), + netip.MustParseAddrPort("100.101.101.101:81"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:81"), + }, + intercepted: true, + }, + { + // multiple correctly configured service with packet should not be intercepted + name: "multiple-service-not-intercept-packet", + conf: &ipn.ServeConfig{ + Services: map[string]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 81: {HTTPS: true}, + }, + }, + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 81: {HTTPS: true}, + 82: {HTTPS: true}, + }, + }, + }, + }, + packetDstAddrPort: []netip.AddrPort{ + // ips in capmap but port is not hosting service + netip.MustParseAddrPort("100.99.99.99:77"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:77"), + netip.MustParseAddrPort("100.101.101.101:85"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:85"), + // ips not in capmap + netip.MustParseAddrPort("100.102.102.102:80"), + netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:6666:6666]:80"), + }, + intercepted: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := b.SetServeConfig(tt.conf, "") + if err != nil && tt.expectedErr != nil { + if !errors.Is(err, tt.expectedErr) { + t.Fatalf("expected error %v,\n got %v", tt.expectedErr, err) + } + return + } + if err != nil { + t.Fatal(err) + } + for _, addrPort := range tt.packetDstAddrPort { + if tt.intercepted != b.ShouldInterceptVIPServiceTCPPort(addrPort) { + if tt.intercepted { + t.Fatalf("expected packet to be intercepted") + } else { + t.Fatalf("expected packet not to be intercepted") + } + } + } + }) + } + +} + func TestServeConfigETag(t *testing.T) { b := newTestBackend(t) diff --git a/ipn/serve.go b/ipn/serve.go index 176c6d984..472b327a3 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -55,8 +55,8 @@ type ServeConfig struct { // keyed by mount point ("/", "/foo", etc) Web map[HostPort]*WebServerConfig `json:",omitempty"` - // Services maps from service name to a ServiceConfig. Which describes the - // L3, L4, and L7 forwarding information for the service. + // Services maps from service name (in the form "svc:dns-label") to a ServiceConfig. + // Which describes the L3, L4, and L7 forwarding information for the service. Services map[string]*ServiceConfig `json:",omitempty"` // AllowFunnel is the set of SNI:port values for which funnel @@ -607,7 +607,32 @@ func (v ServeConfigView) Webs() iter.Seq2[HostPort, WebServerConfigView] { } } } + for _, service := range v.Services().All() { + for k, v := range service.Web().All() { + if !yield(k, v) { + return + } + } + } + } +} + +// FindServiceTCP return the TCPPortHandlerView for the given service name and port. +func (v ServeConfigView) FindServiceTCP(svcName string, port uint16) (res TCPPortHandlerView, ok bool) { + svcCfg, ok := v.Services().GetOk(svcName) + if !ok { + return res, ok } + return svcCfg.TCP().GetOk(port) +} + +func (v ServeConfigView) FindServiceWeb(hp HostPort) (res WebServerConfigView, ok bool) { + for _, service := range v.Services().All() { + if res, ok := service.Web().GetOk(hp); ok { + return res, ok + } + } + return res, ok } // FindTCP returns the first TCP that matches with the given port. It @@ -662,6 +687,17 @@ func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool { return false } +// CheckValidServicesConfig reports whether the ServeConfig has +// invalid service configurations. +func (sc *ServeConfig) CheckValidServicesConfig() error { + for svcName, service := range sc.Services { + if err := service.checkValidConfig(); err != nil { + return fmt.Errorf("invalid service configuration for %q: %w", svcName, err) + } + } + return nil +} + // ServicePortRange returns the list of tailcfg.ProtoPortRange that represents // the proto/ports pairs that are being served by the service. // @@ -699,3 +735,17 @@ func (v ServiceConfigView) ServicePortRange() []tailcfg.ProtoPortRange { } return ranges } + +// ErrServiceConfigHasBothTCPAndTun signals that a service +// in Tun mode cannot also has TCP or Web handlers set. +var ErrServiceConfigHasBothTCPAndTun = errors.New("the VIP Service configuration can not set TUN at the same time as TCP or Web") + +// checkValidConfig checks if the service configuration is valid. +// Currently, the only invalid configuration is when the service is in Tun mode +// and has TCP or Web handlers. +func (v *ServiceConfig) checkValidConfig() error { + if v.Tun && (len(v.TCP) > 0 || len(v.Web) > 0) { + return ErrServiceConfigHasBothTCPAndTun + } + return nil +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 937f619e6..b69139d34 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2997,3 +2997,19 @@ const LBHeader = "Ts-Lb" // correspond to those IPs. Any services that don't correspond to a service // this client is hosting can be ignored. type ServiceIPMappings map[string][]netip.Addr + +// IPServiceMappings maps IP addresses to service names. This is the inverse of +// [ServiceIPMappings], and is used to inform clients which services is an VIP +// address associated with. This is set to b.ipVIPServiceMap every time the +// netmap is updated. This is used to reduce the cost for looking up the service +// name for the dst IP address in the netStack packet processing workflow. +// +// This is of the form: +// +// { +// "100.65.32.1": "svc:samba", +// "fd7a:115c:a1e0::1234": "svc:samba", +// "100.102.42.3": "svc:web", +// "fd7a:115c:a1e0::abcd": "svc:web", +// } +type IPServiceMappings map[netip.Addr]string diff --git a/types/netmap/IPServiceMappings.go b/types/netmap/IPServiceMappings.go new file mode 100644 index 000000000..0cd207fb8 --- /dev/null +++ b/types/netmap/IPServiceMappings.go @@ -0,0 +1,19 @@ +package netmap + +import "net/netip" + +// IPServiceMappings maps IP addresses to service names. This is the inverse of +// [ServiceIPMappings], and is used to inform clients which services is an VIP +// address associated with. This is set to b.ipVIPServiceMap every time the +// netmap is updated. This is used to reduce the cost for looking up the service +// name for the dst IP address in the netStack packet processing workflow. +// +// This is of the form: +// +// { +// "100.65.32.1": "svc:samba", +// "fd7a:115c:a1e0::1234": "svc:samba", +// "100.102.42.3": "svc:web", +// "fd7a:115c:a1e0::abcd": "svc:web", +// } +type IPServiceMappings map[netip.Addr]string diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 7662e145e..c9f909b1a 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -101,6 +101,54 @@ func (nm *NetworkMap) GetAddresses() views.Slice[netip.Prefix] { return nm.SelfNode.Addresses() } +// GetVIPServiceIPMap returns a map of service names to the slice of +// VIP addresses that correspond to the service. The service names are +// with the prefix "svc:". +// +// TODO(corp##25997): cache the result of decoding the capmap so that +// we don't have to decode it multiple times after each netmap update. +func (nm *NetworkMap) GetVIPServiceIPMap() tailcfg.ServiceIPMappings { + if nm == nil { + return nil + } + if !nm.SelfNode.Valid() { + return nil + } + + ipMaps, err := tailcfg.UnmarshalNodeCapJSON[tailcfg.ServiceIPMappings](nm.SelfNode.CapMap().AsMap(), tailcfg.NodeAttrServiceHost) + if len(ipMaps) != 1 || err != nil { + return nil + } + + return ipMaps[0] +} + +// GetIPVIPServiceMap returns a map of VIP addresses to the service +// names that has the VIP address. The service names are with the +// prefix "svc:". +func (nm *NetworkMap) GetIPVIPServiceMap() IPServiceMappings { + var res IPServiceMappings + if nm == nil { + return res + } + + if !nm.SelfNode.Valid() { + return res + } + + serviceIPMap := nm.GetVIPServiceIPMap() + if serviceIPMap == nil { + return res + } + res = make(IPServiceMappings) + for svc, addrs := range serviceIPMap { + for _, addr := range addrs { + res[addr] = svc + } + } + return res +} + // AnyPeersAdvertiseRoutes reports whether any peer is advertising non-exit node routes. func (nm *NetworkMap) AnyPeersAdvertiseRoutes() bool { for _, p := range nm.Peers { diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 20eac06e6..0b8c67b06 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -50,6 +50,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/util/clientmetric" + "tailscale.com/util/set" "tailscale.com/version" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" @@ -200,6 +201,8 @@ type Impl struct { // updates. atomicIsLocalIPFunc syncs.AtomicValue[func(netip.Addr) bool] + atomicIsVIPServiceIPFunc syncs.AtomicValue[func(netip.Addr) bool] + // forwardDialFunc, if non-nil, is the net.Dialer.DialContext-style // function that is used to make outgoing connections when forwarding a // TCP connection to another host (e.g. in subnet router mode). @@ -387,6 +390,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi } ns.ctx, ns.ctxCancel = context.WithCancel(context.Background()) ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) + ns.atomicIsVIPServiceIPFunc.Store(ipset.FalseContainsIPFunc()) ns.tundev.PostFilterPacketInboundFromWireGuard = ns.injectInbound ns.tundev.PreFilterPacketOutboundToWireGuardNetstackIntercept = ns.handleLocalPackets stacksForMetrics.Store(ns, struct{}{}) @@ -532,7 +536,7 @@ func (ns *Impl) wrapTCPProtocolHandler(h protocolHandlerFunc) protocolHandlerFun // Dynamically reconfigure ns's subnet addresses as needed for // outbound traffic. - if !ns.isLocalIP(localIP) { + if !ns.isLocalIP(localIP) && !ns.isVIPServiceIP(localIP) { ns.addSubnetAddress(localIP) } @@ -621,10 +625,17 @@ var v4broadcast = netaddr.IPv4(255, 255, 255, 255) func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { var selfNode tailcfg.NodeView if nm != nil { + vipServiceIPMap := nm.GetVIPServiceIPMap() + serviceAddrSet := set.Set[netip.Addr]{} + for _, addrs := range vipServiceIPMap { + serviceAddrSet.AddSlice(addrs) + } ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses())) + ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) selfNode = nm.SelfNode } else { ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) + ns.atomicIsVIPServiceIPFunc.Store(ipset.FalseContainsIPFunc()) } oldPfx := make(map[netip.Prefix]bool) @@ -952,6 +963,12 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool { return ns.atomicIsLocalIPFunc.Load()(ip) } +// isVIPServiceIP reports whether ip is an IP address that's +// assigned to a VIP service. +func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool { + return ns.atomicIsVIPServiceIPFunc.Load()(ip) +} + func (ns *Impl) peerAPIPortAtomic(ip netip.Addr) *atomic.Uint32 { if ip.Is4() { return &ns.peerapiPort4Atomic @@ -968,6 +985,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { // Handle incoming peerapi connections in netstack. dstIP := p.Dst.Addr() isLocal := ns.isLocalIP(dstIP) + isService := ns.isVIPServiceIP(dstIP) // Handle TCP connection to the Tailscale IP(s) in some cases: if ns.lb != nil && p.IPProto == ipproto.TCP && isLocal { @@ -990,6 +1008,13 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { return true } } + if ns.lb != nil && p.IPProto == ipproto.TCP && isService { + // An assumption holds for this to work: when tun mode is on for a service, + // its tcp and web are not set. This is enforced in b.setServeConfigLocked. + if ns.lb.ShouldInterceptVIPServiceTCPPort(p.Dst) { + return true + } + } if p.IPVersion == 6 && !isLocal && viaRange.Contains(dstIP) { return ns.lb != nil && ns.lb.ShouldHandleViaIP(dstIP) } From ccd16430439e4cfc943fc7bfc40eebbfef0a9195 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:47:22 -0500 Subject: [PATCH 0368/1708] add copyright header Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- types/netmap/IPServiceMappings.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/types/netmap/IPServiceMappings.go b/types/netmap/IPServiceMappings.go index 0cd207fb8..4f02924ab 100644 --- a/types/netmap/IPServiceMappings.go +++ b/types/netmap/IPServiceMappings.go @@ -1,3 +1,5 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause package netmap import "net/netip" From 0a57051f2eb7b3c98f32c98bfabc27a229b440bf Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:54:27 -0500 Subject: [PATCH 0369/1708] add blank line Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- types/netmap/IPServiceMappings.go | 1 + 1 file changed, 1 insertion(+) diff --git a/types/netmap/IPServiceMappings.go b/types/netmap/IPServiceMappings.go index 4f02924ab..04e71b0bf 100644 --- a/types/netmap/IPServiceMappings.go +++ b/types/netmap/IPServiceMappings.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package netmap import "net/netip" From 550923d95330d7809f28cf28daa41bb91a70770f Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 22 Jan 2025 09:24:49 -0500 Subject: [PATCH 0370/1708] fix handler related and some nit Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 20 +++++++++++--------- ipn/ipnlocal/serve.go | 16 ++++++++-------- ipn/serve.go | 6 +++--- types/netmap/IPServiceMappings.go | 22 ---------------------- types/netmap/netmap.go | 18 +++++++++++++++++- 5 files changed, 39 insertions(+), 43 deletions(-) delete mode 100644 types/netmap/IPServiceMappings.go diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 05f56fcbd..470824fde 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3442,11 +3442,13 @@ func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[stri svcAddrPorts := make(map[netip.Addr]func(uint16) bool) // Only set the intercept function if the service has been assigned a VIP. for svcName, ports := range svcPorts { - if addrs, ok := vipServiceIPMap[svcName]; ok { - interceptFn := generateInterceptTCPPortFunc(ports) - for _, addr := range addrs { - svcAddrPorts[addr] = interceptFn - } + addrs, ok := vipServiceIPMap[svcName] + if !ok { + continue + } + interceptFn := generateInterceptTCPPortFunc(ports) + for _, addr := range addrs { + svcAddrPorts[addr] = interceptFn } } @@ -4214,7 +4216,7 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c } } - // TODO(corp#26001): Get handler for VIP services and Local IPs using + // TODO(tailscale/corp#26001): Get handler for VIP services and Local IPs using // the same function. if handler := b.tcpHandlerForVIPService(dst, src); handler != nil { return handler, opts @@ -6023,7 +6025,7 @@ func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) - vipServicesPorts := make(map[string][]uint16) + var vipServicesPorts map[string][]uint16 if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -6055,9 +6057,9 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } } if _, ok := vipServicesPorts[svc]; !ok { - vipServicesPorts[svc] = servicePorts + mak.Set(&vipServicesPorts, svc, servicePorts) } else { - vipServicesPorts[svc] = append(vipServicesPorts[svc], servicePorts...) + mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) } } diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index c20172a42..a5247dd8c 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -55,7 +55,7 @@ var serveHTTPContextKey ctxkey.Key[*serveHTTPContext] type serveHTTPContext struct { SrcAddr netip.AddrPort - ForVIPService bool + ForVIPService string // VIP service name, empty string means local DestPort uint16 // provides funnel-specific context, nil if not funneled @@ -471,7 +471,7 @@ func (b *LocalBackend) tcpHandlerForVIPService(dstAddr, srcAddr netip.AddrPort) BaseContext: func(_ net.Listener) context.Context { return serveHTTPContextKey.WithValue(context.Background(), &serveHTTPContext{ SrcAddr: srcAddr, - ForVIPService: true, + ForVIPService: dstSvc, DestPort: dport, }) }, @@ -481,7 +481,7 @@ func (b *LocalBackend) tcpHandlerForVIPService(dstAddr, srcAddr netip.AddrPort) // hostnames, but for services this getTLSServeCetForPort will need a version that also take // in the hostname. How to store the TLS cert is still being discussed. hs.TLSConfig = &tls.Config{ - GetCertificate: b.getTLSServeCertForPort(dport, true), + GetCertificate: b.getTLSServeCertForPort(dport, dstSvc), } return func(c net.Conn) error { return hs.ServeTLS(netutil.NewOneConnListener(c, nil), "", "") @@ -568,7 +568,7 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, } if tcph.HTTPS() { hs.TLSConfig = &tls.Config{ - GetCertificate: b.getTLSServeCertForPort(dport, false), + GetCertificate: b.getTLSServeCertForPort(dport, ""), } return func(c net.Conn) error { return hs.ServeTLS(netutil.NewOneConnListener(c, nil), "", "") @@ -1006,7 +1006,7 @@ func allNumeric(s string) bool { return s != "" } -func (b *LocalBackend) webServerConfig(hostname string, forVIPService bool, port uint16) (c ipn.WebServerConfigView, ok bool) { +func (b *LocalBackend) webServerConfig(hostname string, forVIPService string, port uint16) (c ipn.WebServerConfigView, ok bool) { key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port)) b.mu.Lock() @@ -1015,13 +1015,13 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService bool, port if !b.serveConfig.Valid() { return c, false } - if forVIPService { - return b.serveConfig.FindServiceWeb(key) + if forVIPService != "" { + return b.serveConfig.FindServiceWeb(forVIPService, key) } return b.serveConfig.FindWeb(key) } -func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService bool) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { +func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService string) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { return func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { if hi == nil || hi.ServerName == "" { return nil, errors.New("no SNI ServerName") diff --git a/ipn/serve.go b/ipn/serve.go index 472b327a3..4c2d2f158 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -626,9 +626,9 @@ func (v ServeConfigView) FindServiceTCP(svcName string, port uint16) (res TCPPor return svcCfg.TCP().GetOk(port) } -func (v ServeConfigView) FindServiceWeb(hp HostPort) (res WebServerConfigView, ok bool) { - for _, service := range v.Services().All() { - if res, ok := service.Web().GetOk(hp); ok { +func (v ServeConfigView) FindServiceWeb(svcName string, hp HostPort) (res WebServerConfigView, ok bool) { + if svcCfg, ok := v.Services().GetOk(svcName); ok { + if res, ok := svcCfg.Web().GetOk(hp); ok { return res, ok } } diff --git a/types/netmap/IPServiceMappings.go b/types/netmap/IPServiceMappings.go deleted file mode 100644 index 04e71b0bf..000000000 --- a/types/netmap/IPServiceMappings.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package netmap - -import "net/netip" - -// IPServiceMappings maps IP addresses to service names. This is the inverse of -// [ServiceIPMappings], and is used to inform clients which services is an VIP -// address associated with. This is set to b.ipVIPServiceMap every time the -// netmap is updated. This is used to reduce the cost for looking up the service -// name for the dst IP address in the netStack packet processing workflow. -// -// This is of the form: -// -// { -// "100.65.32.1": "svc:samba", -// "fd7a:115c:a1e0::1234": "svc:samba", -// "100.102.42.3": "svc:web", -// "fd7a:115c:a1e0::abcd": "svc:web", -// } -type IPServiceMappings map[netip.Addr]string diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index c9f909b1a..1482e534e 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -105,7 +105,7 @@ func (nm *NetworkMap) GetAddresses() views.Slice[netip.Prefix] { // VIP addresses that correspond to the service. The service names are // with the prefix "svc:". // -// TODO(corp##25997): cache the result of decoding the capmap so that +// TODO(tailscale/corp##25997): cache the result of decoding the capmap so that // we don't have to decode it multiple times after each netmap update. func (nm *NetworkMap) GetVIPServiceIPMap() tailcfg.ServiceIPMappings { if nm == nil { @@ -425,3 +425,19 @@ const ( _ WGConfigFlags = 1 << iota AllowSubnetRoutes ) + +// IPServiceMappings maps IP addresses to service names. This is the inverse of +// [ServiceIPMappings], and is used to inform clients which services is an VIP +// address associated with. This is set to b.ipVIPServiceMap every time the +// netmap is updated. This is used to reduce the cost for looking up the service +// name for the dst IP address in the netStack packet processing workflow. +// +// This is of the form: +// +// { +// "100.65.32.1": "svc:samba", +// "fd7a:115c:a1e0::1234": "svc:samba", +// "100.102.42.3": "svc:web", +// "fd7a:115c:a1e0::abcd": "svc:web", +// } +type IPServiceMappings map[netip.Addr]string From e4779146b50f05bbcf07b4e99018902669ddd6cc Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 22 Jan 2025 10:45:30 -0500 Subject: [PATCH 0371/1708] delete extra struct in tailcfg Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- tailcfg/tailcfg.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b69139d34..937f619e6 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2997,19 +2997,3 @@ const LBHeader = "Ts-Lb" // correspond to those IPs. Any services that don't correspond to a service // this client is hosting can be ignored. type ServiceIPMappings map[string][]netip.Addr - -// IPServiceMappings maps IP addresses to service names. This is the inverse of -// [ServiceIPMappings], and is used to inform clients which services is an VIP -// address associated with. This is set to b.ipVIPServiceMap every time the -// netmap is updated. This is used to reduce the cost for looking up the service -// name for the dst IP address in the netStack packet processing workflow. -// -// This is of the form: -// -// { -// "100.65.32.1": "svc:samba", -// "fd7a:115c:a1e0::1234": "svc:samba", -// "100.102.42.3": "svc:web", -// "fd7a:115c:a1e0::abcd": "svc:web", -// } -type IPServiceMappings map[netip.Addr]string From 17022ad0e907d033418dbfc604999bedae8a3978 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 22 Jan 2025 06:33:05 -0800 Subject: [PATCH 0372/1708] tailcfg: remove now-unused TailscaleFunnelEnabled method As of tailscale/corp#26003 Updates tailscale/tailscale#11572 Change-Id: I5de2a0951b7b8972744178abc1b0e7948087d412 Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 937f619e6..e1259b3f5 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -927,14 +927,6 @@ func (hi *Hostinfo) TailscaleSSHEnabled() bool { func (v HostinfoView) TailscaleSSHEnabled() bool { return v.ж.TailscaleSSHEnabled() } -// TailscaleFunnelEnabled reports whether or not this node has explicitly -// enabled Funnel. -func (hi *Hostinfo) TailscaleFunnelEnabled() bool { - return hi != nil && hi.WireIngress -} - -func (v HostinfoView) TailscaleFunnelEnabled() bool { return v.ж.TailscaleFunnelEnabled() } - // NetInfo contains information about the host's network state. type NetInfo struct { // MappingVariesByDestIP says whether the host's NAT mappings From 8b65598614569b060a1af44258a706aa04d7aa5c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 22 Jan 2025 10:05:43 -0800 Subject: [PATCH 0373/1708] util/slicesx: add AppendNonzero By request of @agottardo. Updates #cleanup Change-Id: I2f02314eb9533b1581e47b66b45b6fb8ac257bb7 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 2 +- util/slicesx/slicesx.go | 11 +++++++++++ util/slicesx/slicesx_test.go | 13 +++++++++++++ util/syspolicy/internal/metrics/metrics.go | 4 +--- 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 645859038..b419417f9 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -242,7 +242,7 @@ change in the future. } func nonNilCmds(cmds ...*ffcli.Command) []*ffcli.Command { - return slicesx.Filter(cmds[:0], cmds, func(c *ffcli.Command) bool { return c != nil }) + return slicesx.AppendNonzero(cmds[:0], cmds) } func fatalf(format string, a ...any) { diff --git a/util/slicesx/slicesx.go b/util/slicesx/slicesx.go index 1a7e18d91..ff9d47375 100644 --- a/util/slicesx/slicesx.go +++ b/util/slicesx/slicesx.go @@ -95,6 +95,17 @@ func Filter[S ~[]T, T any](dst, src S, fn func(T) bool) S { return dst } +// AppendNonzero appends all non-zero elements of src to dst. +func AppendNonzero[S ~[]T, T comparable](dst, src S) S { + var zero T + for _, v := range src { + if v != zero { + dst = append(dst, v) + } + } + return dst +} + // AppendMatching appends elements in ps to dst if f(x) is true. func AppendMatching[T any](dst, ps []T, f func(T) bool) []T { for _, p := range ps { diff --git a/util/slicesx/slicesx_test.go b/util/slicesx/slicesx_test.go index 597b22b83..346449284 100644 --- a/util/slicesx/slicesx_test.go +++ b/util/slicesx/slicesx_test.go @@ -137,6 +137,19 @@ func TestFilterNoAllocations(t *testing.T) { } } +func TestAppendNonzero(t *testing.T) { + v := []string{"one", "two", "", "four"} + got := AppendNonzero(nil, v) + want := []string{"one", "two", "four"} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v; want %v", got, want) + } + got = AppendNonzero(v[:0], v) + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v; want %v", got, want) + } +} + func TestAppendMatching(t *testing.T) { v := []string{"one", "two", "three", "four"} got := AppendMatching(v[:0], v, func(s string) bool { return len(s) > 3 }) diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 0a2aa1192..d8ba271a8 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -289,7 +289,7 @@ func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ c } func newMetric(nameParts []string, typ clientmetric.Type) metric { - name := strings.Join(slicesx.Filter([]string{internal.OS(), "syspolicy"}, nameParts, isNonEmpty), "_") + name := strings.Join(slicesx.AppendNonzero([]string{internal.OS(), "syspolicy"}, nameParts), "_") switch { case !ShouldReport(): return &funcMetric{name: name, typ: typ} @@ -304,8 +304,6 @@ func newMetric(nameParts []string, typ clientmetric.Type) metric { } } -func isNonEmpty(s string) bool { return s != "" } - func metricScopeName(scope setting.Scope) string { switch scope { case setting.DeviceSetting: From d1b378504c11cc2a6db896a02a4b963818b07170 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 11:46:13 -0700 Subject: [PATCH 0374/1708] .github: Bump slackapi/slack-github-action from 1.27.0 to 2.0.0 (#14141) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 1.27.0 to 2.0.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/37ebaef184d7626c5f204ab8d3baff4262dd30f0...485a9d42d3a73031f12ec201c457e2162c45d02d) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/govulncheck.yml | 8 ++++---- .github/workflows/test.yml | 7 +++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 4a5ad54f3..989e55fb1 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -24,13 +24,13 @@ jobs: - name: Post to slack if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 # v1.27.0 - env: - SLACK_BOT_TOKEN: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} + uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 with: - channel-id: 'C05PXRM304B' + method: chat.postMessage + token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} payload: | { + "channel": "C05PXRM304B", "blocks": [ { "type": "section", diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 20f215cd0..d049323a3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -569,8 +569,10 @@ jobs: # By having the job always run, but skipping its only step as needed, we # let the CI output collapse nicely in PRs. if: failure() && github.event_name == 'push' - uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 # v1.27.0 + uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | { "attachments": [{ @@ -582,9 +584,6 @@ jobs: "color": "danger" }] } - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK check_mergeability: if: always() From 0fa7b4a236bc492d2c83e4ec319f4d0614d37774 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Tue, 21 Jan 2025 17:07:34 -0500 Subject: [PATCH 0375/1708] tailcfg: add ServiceName Rather than using a string everywhere and needing to clarify that the string should have the svc: prefix, create a separate type for Service names. Updates tailscale/corp#24607 Change-Id: I720e022f61a7221644bb60955b72cacf42f59960 Signed-off-by: Adrian Dewhurst --- cmd/k8s-operator/ingress-for-pg.go | 16 +++--- cmd/k8s-operator/ingress-for-pg_test.go | 2 +- cmd/tailscale/cli/advertise.go | 2 +- ipn/ipn_clone.go | 4 +- ipn/ipn_view.go | 4 +- ipn/ipnlocal/local.go | 17 +++--- ipn/ipnlocal/local_test.go | 16 +++--- ipn/ipnlocal/serve.go | 6 +-- ipn/ipnlocal/serve_test.go | 12 ++--- ipn/serve.go | 6 +-- tailcfg/tailcfg.go | 72 ++++++++++++++++--------- types/netmap/netmap.go | 10 ++-- 12 files changed, 95 insertions(+), 72 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 4dcaf7c6d..e90187d58 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -222,13 +222,14 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin }, }, } + serviceName := tailcfg.ServiceName("svc:" + hostname) var gotCfg *ipn.ServiceConfig if cfg != nil && cfg.Services != nil { - gotCfg = cfg.Services[hostname] + gotCfg = cfg.Services[serviceName] } if !reflect.DeepEqual(gotCfg, ingCfg) { logger.Infof("Updating serve config") - mak.Set(&cfg.Services, hostname, ingCfg) + mak.Set(&cfg.Services, serviceName, ingCfg) cfgBytes, err := json.Marshal(cfg) if err != nil { return fmt.Errorf("error marshaling serve config: %w", err) @@ -309,7 +310,7 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG found := false for _, i := range ingList.Items { ingressHostname := hostnameForIngress(&i) - if ingressHostname == vipHostname { + if ingressHostname == vipHostname.WithoutPrefix() { found = true break } @@ -317,7 +318,7 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipHostname) - svc, err := a.getVIPService(ctx, vipHostname, logger) + svc, err := a.getVIPService(ctx, vipHostname.WithoutPrefix(), logger) if err != nil { errResp := &tailscale.ErrResponse{} if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound { @@ -329,7 +330,7 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } if isVIPServiceForAnyIngress(svc) { logger.Infof("cleaning up orphaned VIPService %q", vipHostname) - if err := a.tsClient.deleteVIPServiceByName(ctx, vipHostname); err != nil { + if err := a.tsClient.deleteVIPServiceByName(ctx, vipHostname.WithoutPrefix()); err != nil { errResp := &tailscale.ErrResponse{} if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { return fmt.Errorf("deleting VIPService %q: %w", vipHostname, err) @@ -374,11 +375,12 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, if err != nil { return fmt.Errorf("error getting ProxyGroup serve config: %w", err) } + serviceName := tailcfg.ServiceName("svc:" + hostname) // VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not // found in the serve config, we can assume that there is no VIPService. TODO(irbekrm): once we have ingress // ProxyGroup, we will probably add currently exposed VIPServices to its status. At that point, we can use the // status rather than checking the serve config each time. - if cfg == nil || cfg.Services == nil || cfg.Services[hostname] == nil { + if cfg == nil || cfg.Services == nil || cfg.Services[serviceName] == nil { return nil } logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) @@ -390,7 +392,7 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, // 3. Remove the VIPService from the serve config for the ProxyGroup. logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) - delete(cfg.Services, hostname) + delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) if err != nil { return fmt.Errorf("error marshaling serve config: %w", err) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 2cd340962..9ef36f696 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -137,7 +137,7 @@ func TestIngressPGReconciler(t *testing.T) { t.Fatalf("unmarshaling serve config: %v", err) } - if cfg.Services["my-svc"] == nil { + if cfg.Services["svc:my-svc"] == nil { t.Error("expected serve config to contain VIPService configuration") } diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go index 00b5024f0..83d1a35aa 100644 --- a/cmd/tailscale/cli/advertise.go +++ b/cmd/tailscale/cli/advertise.go @@ -66,7 +66,7 @@ func parseServiceNames(servicesArg string) ([]string, error) { if servicesArg != "" { services = strings.Split(servicesArg, ",") for _, svc := range services { - err := tailcfg.CheckServiceName(svc) + err := tailcfg.ServiceName(svc).Validate() if err != nil { return nil, fmt.Errorf("service %q: %s", svc, err) } diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 34d7ba9a6..47cca71d0 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -106,7 +106,7 @@ func (src *ServeConfig) Clone() *ServeConfig { } } if dst.Services != nil { - dst.Services = map[string]*ServiceConfig{} + dst.Services = map[tailcfg.ServiceName]*ServiceConfig{} for k, v := range src.Services { if v == nil { dst.Services[k] = nil @@ -133,7 +133,7 @@ func (src *ServeConfig) Clone() *ServeConfig { var _ServeConfigCloneNeedsRegeneration = ServeConfig(struct { TCP map[uint16]*TCPPortHandler Web map[HostPort]*WebServerConfig - Services map[string]*ServiceConfig + Services map[tailcfg.ServiceName]*ServiceConfig AllowFunnel map[HostPort]bool Foreground map[string]*ServeConfig ETag string diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 9cd5a466a..41b4ddbc8 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -195,7 +195,7 @@ func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServer }) } -func (v ServeConfigView) Services() views.MapFn[string, *ServiceConfig, ServiceConfigView] { +func (v ServeConfigView) Services() views.MapFn[tailcfg.ServiceName, *ServiceConfig, ServiceConfigView] { return views.MapFnOf(v.ж.Services, func(t *ServiceConfig) ServiceConfigView { return t.View() }) @@ -216,7 +216,7 @@ func (v ServeConfigView) ETag() string { return v.ж.ETag } var _ServeConfigViewNeedsRegeneration = ServeConfig(struct { TCP map[uint16]*TCPPortHandler Web map[HostPort]*WebServerConfig - Services map[string]*ServiceConfig + Services map[tailcfg.ServiceName]*ServiceConfig AllowFunnel map[HostPort]bool Foreground map[string]*ServeConfig ETag string diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 470824fde..2bd46b852 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3417,13 +3417,13 @@ func generateInterceptVIPServicesTCPPortFunc(svcAddrPorts map[netip.Addr]func(ui // setVIPServicesTCPPortsIntercepted populates b.shouldInterceptVIPServicesTCPPortAtomic with an // efficient func for ShouldInterceptTCPPort to use, which is called on every incoming packet. -func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[string][]uint16) { +func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[tailcfg.ServiceName][]uint16) { b.mu.Lock() defer b.mu.Unlock() b.setVIPServicesTCPPortsInterceptedLocked(svcPorts) } -func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[string][]uint16) { +func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { if len(svcPorts) == 0 { b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) return @@ -6025,7 +6025,7 @@ func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) - var vipServicesPorts map[string][]uint16 + var vipServicesPorts map[tailcfg.ServiceName][]uint16 if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -7815,7 +7815,7 @@ func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { // keyed by service name - var services map[string]*tailcfg.VIPService + var services map[tailcfg.ServiceName]*tailcfg.VIPService if !b.serveConfig.Valid() { return nil } @@ -7828,12 +7828,13 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf } for _, s := range prefs.AdvertiseServices().All() { - if services == nil || services[s] == nil { - mak.Set(&services, s, &tailcfg.VIPService{ - Name: s, + sn := tailcfg.ServiceName(s) + if services == nil || services[sn] == nil { + mak.Set(&services, sn, &tailcfg.VIPService{ + Name: sn, }) } - services[s].Active = true + services[sn].Active = true } return slicesx.MapValues(services) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f851bb0f8..b1a79860d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2715,7 +2715,7 @@ func TestTCPHandlerForDstWithVIPService(t *testing.T) { err = b.setServeConfigLocked( &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:foo": { TCP: map[uint16]*ipn.TCPPortHandler{ 882: {HTTP: true}, @@ -4747,7 +4747,7 @@ func TestGetVIPServices(t *testing.T) { "served-only", []string{}, &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:abc": {Tun: true}, }, }, @@ -4762,7 +4762,7 @@ func TestGetVIPServices(t *testing.T) { "served-and-advertised", []string{"svc:abc"}, &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:abc": {Tun: true}, }, }, @@ -4778,7 +4778,7 @@ func TestGetVIPServices(t *testing.T) { "served-and-advertised-different-service", []string{"svc:def"}, &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:abc": {Tun: true}, }, }, @@ -4797,7 +4797,7 @@ func TestGetVIPServices(t *testing.T) { "served-with-port-ranges-one-range-single", []string{}, &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:abc": {TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTPS: true}, }}, @@ -4814,7 +4814,7 @@ func TestGetVIPServices(t *testing.T) { "served-with-port-ranges-one-range-multiple", []string{}, &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:abc": {TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTPS: true}, 81: {HTTPS: true}, @@ -4833,7 +4833,7 @@ func TestGetVIPServices(t *testing.T) { "served-with-port-ranges-multiple-ranges", []string{}, &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:abc": {TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTPS: true}, 81: {HTTPS: true}, @@ -4866,7 +4866,7 @@ func TestGetVIPServices(t *testing.T) { } got := lb.vipServicesFromPrefsLocked(prefs.View()) slices.SortFunc(got, func(a, b *tailcfg.VIPService) int { - return strings.Compare(a.Name, b.Name) + return strings.Compare(a.Name.String(), b.Name.String()) }) if !reflect.DeepEqual(tt.want, got) { t.Logf("want:") diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index a5247dd8c..63cb2ef55 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -55,7 +55,7 @@ var serveHTTPContextKey ctxkey.Key[*serveHTTPContext] type serveHTTPContext struct { SrcAddr netip.AddrPort - ForVIPService string // VIP service name, empty string means local + ForVIPService tailcfg.ServiceName // "" means local DestPort uint16 // provides funnel-specific context, nil if not funneled @@ -1006,7 +1006,7 @@ func allNumeric(s string) bool { return s != "" } -func (b *LocalBackend) webServerConfig(hostname string, forVIPService string, port uint16) (c ipn.WebServerConfigView, ok bool) { +func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.ServiceName, port uint16) (c ipn.WebServerConfigView, ok bool) { key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port)) b.mu.Lock() @@ -1021,7 +1021,7 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService string, po return b.serveConfig.FindWeb(key) } -func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService string) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { +func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService tailcfg.ServiceName) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { return func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { if hi == nil || hi.ServerName == "" { return nil, errors.New("no SNI ServerName") diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index f2ea8e5cd..eb8169390 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -354,7 +354,7 @@ func TestServeConfigServices(t *testing.T) { { name: "one-incorrectly-configured-service", conf: &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:foo": { TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTP: true}, @@ -369,7 +369,7 @@ func TestServeConfigServices(t *testing.T) { // one correctly configured service with packet should be intercepted name: "one-service-intercept-packet", conf: &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:foo": { TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTP: true}, @@ -388,7 +388,7 @@ func TestServeConfigServices(t *testing.T) { // one correctly configured service with packet should not be intercepted name: "one-service-not-intercept-packet", conf: &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:foo": { TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTP: true}, @@ -406,10 +406,10 @@ func TestServeConfigServices(t *testing.T) { intercepted: false, }, { - //multiple correctly configured service with packet should be intercepted + // multiple correctly configured service with packet should be intercepted name: "multiple-service-intercept-packet", conf: &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:foo": { TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTP: true}, @@ -437,7 +437,7 @@ func TestServeConfigServices(t *testing.T) { // multiple correctly configured service with packet should not be intercepted name: "multiple-service-not-intercept-packet", conf: &ipn.ServeConfig{ - Services: map[string]*ipn.ServiceConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ "svc:foo": { TCP: map[uint16]*ipn.TCPPortHandler{ 80: {HTTP: true}, diff --git a/ipn/serve.go b/ipn/serve.go index 4c2d2f158..ac92287bd 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -57,7 +57,7 @@ type ServeConfig struct { // Services maps from service name (in the form "svc:dns-label") to a ServiceConfig. // Which describes the L3, L4, and L7 forwarding information for the service. - Services map[string]*ServiceConfig `json:",omitempty"` + Services map[tailcfg.ServiceName]*ServiceConfig `json:",omitempty"` // AllowFunnel is the set of SNI:port values for which funnel // traffic is allowed, from trusted ingress peers. @@ -618,7 +618,7 @@ func (v ServeConfigView) Webs() iter.Seq2[HostPort, WebServerConfigView] { } // FindServiceTCP return the TCPPortHandlerView for the given service name and port. -func (v ServeConfigView) FindServiceTCP(svcName string, port uint16) (res TCPPortHandlerView, ok bool) { +func (v ServeConfigView) FindServiceTCP(svcName tailcfg.ServiceName, port uint16) (res TCPPortHandlerView, ok bool) { svcCfg, ok := v.Services().GetOk(svcName) if !ok { return res, ok @@ -626,7 +626,7 @@ func (v ServeConfigView) FindServiceTCP(svcName string, port uint16) (res TCPPor return svcCfg.TCP().GetOk(port) } -func (v ServeConfigView) FindServiceWeb(svcName string, hp HostPort) (res WebServerConfigView, ok bool) { +func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort) (res WebServerConfigView, ok bool) { if svcCfg, ok := v.Services().GetOk(svcName); ok { if res, ok := svcCfg.Web().GetOk(hp); ok { return res, ok diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index e1259b3f5..c921a0c7d 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -717,21 +717,6 @@ func CheckTag(tag string) error { return nil } -// CheckServiceName validates svc for use as a service name. -// We only allow valid DNS labels, since the expectation is that these will be -// used as parts of domain names. -func CheckServiceName(svc string) error { - var ok bool - svc, ok = strings.CutPrefix(svc, "svc:") - if !ok { - return errors.New("services must start with 'svc:'") - } - if svc == "" { - return errors.New("service names must not be empty") - } - return dnsname.ValidLabel(svc) -} - // CheckRequestTags checks that all of h.RequestTags are valid. func (h *Hostinfo) CheckRequestTags() error { if h == nil { @@ -897,16 +882,51 @@ type Hostinfo struct { // require changes to Hostinfo.Equal. } +// ServiceName is the name of a service, of the form `svc:dns-label`. Services +// represent some kind of application provided for users of the tailnet with a +// MagicDNS name and possibly dedicated IP addresses. Currently (2024-01-21), +// the only type of service is [VIPService]. +// This is not related to the older [Service] used in [Hostinfo.Services]. +type ServiceName string + +// Validate validates if the service name is formatted correctly. +// We only allow valid DNS labels, since the expectation is that these will be +// used as parts of domain names. +func (sn ServiceName) Validate() error { + bareName, ok := strings.CutPrefix(string(sn), "svc:") + if !ok { + return errors.New("services must start with 'svc:'") + } + if bareName == "" { + return errors.New("service names must not be empty") + } + return dnsname.ValidLabel(bareName) +} + +// String implements [fmt.Stringer]. +func (sn ServiceName) String() string { + return string(sn) +} + +// WithoutPrefix is the name of the service without the `svc:` prefix, used for +// DNS names. If the name does not include the prefix (which means +// [ServiceName.Validate] would return an error) then it returns "". +func (sn ServiceName) WithoutPrefix() string { + bareName, ok := strings.CutPrefix(string(sn), "svc:") + if !ok { + return "" + } + return bareName +} + // VIPService represents a service created on a tailnet from the // perspective of a node providing that service. These services // have an virtual IP (VIP) address pair distinct from the node's IPs. type VIPService struct { - // Name is the name of the service, of the form `svc:dns-label`. - // See CheckServiceName for a validation func. - // Name uniquely identifies a service on a particular tailnet, - // and so also corresponds uniquely to the pair of IP addresses - // belonging to the VIP service. - Name string + // Name is the name of the service. The Name uniquely identifies a service + // on a particular tailnet, and so also corresponds uniquely to the pair of + // IP addresses belonging to the VIP service. + Name ServiceName // Ports specify which ProtoPorts are made available by this node // on the service's IPs. @@ -2972,10 +2992,10 @@ type EarlyNoise struct { // vs NodeKey) const LBHeader = "Ts-Lb" -// ServiceIPMappings maps service names (strings that conform to -// [CheckServiceName]) to lists of IP addresses. This is used as the value of -// the [NodeAttrServiceHost] capability, to inform service hosts what IP -// addresses they need to listen on for each service that they are advertising. +// ServiceIPMappings maps ServiceName to lists of IP addresses. This is used +// as the value of the [NodeAttrServiceHost] capability, to inform service hosts +// what IP addresses they need to listen on for each service that they are +// advertising. // // This is of the form: // @@ -2988,4 +3008,4 @@ const LBHeader = "Ts-Lb" // provided in AllowedIPs, but this lets the client know which services // correspond to those IPs. Any services that don't correspond to a service // this client is hosting can be ignored. -type ServiceIPMappings map[string][]netip.Addr +type ServiceIPMappings map[ServiceName][]netip.Addr diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 1482e534e..ab22eec3e 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -427,10 +427,10 @@ const ( ) // IPServiceMappings maps IP addresses to service names. This is the inverse of -// [ServiceIPMappings], and is used to inform clients which services is an VIP -// address associated with. This is set to b.ipVIPServiceMap every time the -// netmap is updated. This is used to reduce the cost for looking up the service -// name for the dst IP address in the netStack packet processing workflow. +// [tailcfg.ServiceIPMappings], and is used to inform track which service a VIP +// is associated with. This is set to b.ipVIPServiceMap every time the netmap is +// updated. This is used to reduce the cost for looking up the service name for +// the dst IP address in the netStack packet processing workflow. // // This is of the form: // @@ -440,4 +440,4 @@ const ( // "100.102.42.3": "svc:web", // "fd7a:115c:a1e0::abcd": "svc:web", // } -type IPServiceMappings map[netip.Addr]string +type IPServiceMappings map[netip.Addr]tailcfg.ServiceName From 3dabea0fc2c224249b2a503431fc610f8883d3e1 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Wed, 22 Jan 2025 16:01:07 -0800 Subject: [PATCH 0376/1708] cmd/tailscale: define CLI tools to manipulate macOS network and system extensions (#14727) Updates tailscale/corp#25278 Adds definitions for new CLI commands getting added in v1.80. Refactors some pre-existing CLI commands within the `configure` tree to clean up code. Signed-off-by: Andrea Gottardo --- cmd/tailscale/cli/cli.go | 7 +- cmd/tailscale/cli/configure-kube.go | 28 +++--- cmd/tailscale/cli/configure-kube_omit.go | 13 +++ cmd/tailscale/cli/configure-synology-cert.go | 27 +++--- cmd/tailscale/cli/configure-synology.go | 55 +++++++---- cmd/tailscale/cli/configure.go | 43 +++++---- cmd/tailscale/cli/configure_apple-all.go | 11 +++ cmd/tailscale/cli/configure_apple.go | 97 ++++++++++++++++++++ 8 files changed, 213 insertions(+), 68 deletions(-) create mode 100644 cmd/tailscale/cli/configure-kube_omit.go create mode 100644 cmd/tailscale/cli/configure_apple-all.go create mode 100644 cmd/tailscale/cli/configure_apple.go diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index b419417f9..fd39b3b67 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -190,7 +190,7 @@ change in the future. loginCmd, logoutCmd, switchCmd, - configureCmd, + configureCmd(), syspolicyCmd, netcheckCmd, ipCmd, @@ -216,6 +216,7 @@ change in the future. driveCmd, idTokenCmd, advertiseCmd(), + configureHostCmd(), ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { @@ -226,10 +227,6 @@ change in the future. }, } - if runtime.GOOS == "linux" && distro.Get() == distro.Synology { - rootCmd.Subcommands = append(rootCmd.Subcommands, configureHostCmd) - } - walkCommands(rootCmd, func(w cmdWalk) bool { if w.UsageFunc == nil { w.UsageFunc = usageFunc diff --git a/cmd/tailscale/cli/configure-kube.go b/cmd/tailscale/cli/configure-kube.go index 6af15e3d9..6bc4e202e 100644 --- a/cmd/tailscale/cli/configure-kube.go +++ b/cmd/tailscale/cli/configure-kube.go @@ -20,33 +20,31 @@ import ( "tailscale.com/version" ) -func init() { - configureCmd.Subcommands = append(configureCmd.Subcommands, configureKubeconfigCmd) -} - -var configureKubeconfigCmd = &ffcli.Command{ - Name: "kubeconfig", - ShortHelp: "[ALPHA] Connect to a Kubernetes cluster using a Tailscale Auth Proxy", - ShortUsage: "tailscale configure kubeconfig ", - LongHelp: strings.TrimSpace(` +func configureKubeconfigCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "kubeconfig", + ShortHelp: "[ALPHA] Connect to a Kubernetes cluster using a Tailscale Auth Proxy", + ShortUsage: "tailscale configure kubeconfig ", + LongHelp: strings.TrimSpace(` Run this command to configure kubectl to connect to a Kubernetes cluster over Tailscale. The hostname argument should be set to the Tailscale hostname of the peer running as an auth proxy in the cluster. See: https://tailscale.com/s/k8s-auth-proxy `), - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("kubeconfig") - return fs - })(), - Exec: runConfigureKubeconfig, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("kubeconfig") + return fs + })(), + Exec: runConfigureKubeconfig, + } } // kubeconfigPath returns the path to the kubeconfig file for the current user. func kubeconfigPath() (string, error) { if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" { if version.IsSandboxedMacOS() { - return "", errors.New("$KUBECONFIG is incompatible with the App Store version") + return "", errors.New("cannot read $KUBECONFIG on GUI builds of the macOS client: this requires the open-source tailscaled distribution") } var out string for _, out = range filepath.SplitList(kubeconfig) { diff --git a/cmd/tailscale/cli/configure-kube_omit.go b/cmd/tailscale/cli/configure-kube_omit.go new file mode 100644 index 000000000..130f2870f --- /dev/null +++ b/cmd/tailscale/cli/configure-kube_omit.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_kube + +package cli + +import "github.com/peterbourgon/ff/v3/ffcli" + +func configureKubeconfigCmd() *ffcli.Command { + // omitted from the build when the ts_omit_kube build tag is set + return nil +} diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index aabcb8dfa..663d0c879 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -22,22 +22,27 @@ import ( "tailscale.com/version/distro" ) -var synologyConfigureCertCmd = &ffcli.Command{ - Name: "synology-cert", - Exec: runConfigureSynologyCert, - ShortHelp: "Configure Synology with a TLS certificate for your tailnet", - ShortUsage: "synology-cert [--domain ]", - LongHelp: strings.TrimSpace(` +func synologyConfigureCertCmd() *ffcli.Command { + if runtime.GOOS != "linux" || distro.Get() != distro.Synology { + return nil + } + return &ffcli.Command{ + Name: "synology-cert", + Exec: runConfigureSynologyCert, + ShortHelp: "Configure Synology with a TLS certificate for your tailnet", + ShortUsage: "synology-cert [--domain ]", + LongHelp: strings.TrimSpace(` This command is intended to run periodically as root on a Synology device to create or refresh the TLS certificate for the tailnet domain. See: https://tailscale.com/kb/1153/enabling-https `), - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("synology-cert") - fs.StringVar(&synologyConfigureCertArgs.domain, "domain", "", "Tailnet domain to create or refresh certificates for. Ignored if only one domain exists.") - return fs - })(), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("synology-cert") + fs.StringVar(&synologyConfigureCertArgs.domain, "domain", "", "Tailnet domain to create or refresh certificates for. Ignored if only one domain exists.") + return fs + })(), + } } var synologyConfigureCertArgs struct { diff --git a/cmd/tailscale/cli/configure-synology.go b/cmd/tailscale/cli/configure-synology.go index 9d674e56d..f0f05f757 100644 --- a/cmd/tailscale/cli/configure-synology.go +++ b/cmd/tailscale/cli/configure-synology.go @@ -21,34 +21,49 @@ import ( // configureHostCmd is the "tailscale configure-host" command which was once // used to configure Synology devices, but is now a compatibility alias to // "tailscale configure synology". -var configureHostCmd = &ffcli.Command{ - Name: "configure-host", - Exec: runConfigureSynology, - ShortUsage: "tailscale configure-host\n" + synologyConfigureCmd.ShortUsage, - ShortHelp: synologyConfigureCmd.ShortHelp, - LongHelp: hidden + synologyConfigureCmd.LongHelp, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("configure-host") - return fs - })(), +// +// It returns nil if the actual "tailscale configure synology" command is not +// available. +func configureHostCmd() *ffcli.Command { + synologyConfigureCmd := synologyConfigureCmd() + if synologyConfigureCmd == nil { + // No need to offer this compatibility alias if the actual command is not available. + return nil + } + return &ffcli.Command{ + Name: "configure-host", + Exec: runConfigureSynology, + ShortUsage: "tailscale configure-host\n" + synologyConfigureCmd.ShortUsage, + ShortHelp: synologyConfigureCmd.ShortHelp, + LongHelp: hidden + synologyConfigureCmd.LongHelp, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("configure-host") + return fs + })(), + } } -var synologyConfigureCmd = &ffcli.Command{ - Name: "synology", - Exec: runConfigureSynology, - ShortUsage: "tailscale configure synology", - ShortHelp: "Configure Synology to enable outbound connections", - LongHelp: strings.TrimSpace(` +func synologyConfigureCmd() *ffcli.Command { + if runtime.GOOS != "linux" || distro.Get() != distro.Synology { + return nil + } + return &ffcli.Command{ + Name: "synology", + Exec: runConfigureSynology, + ShortUsage: "tailscale configure synology", + ShortHelp: "Configure Synology to enable outbound connections", + LongHelp: strings.TrimSpace(` This command is intended to run at boot as root on a Synology device to create the /dev/net/tun device and give the tailscaled binary permission to use it. See: https://tailscale.com/s/synology-outbound `), - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("synology") - return fs - })(), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("synology") + return fs + })(), + } } func runConfigureSynology(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index fd136d766..acb416755 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -5,32 +5,41 @@ package cli import ( "flag" - "runtime" "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/version/distro" ) -var configureCmd = &ffcli.Command{ - Name: "configure", - ShortUsage: "tailscale configure ", - ShortHelp: "[ALPHA] Configure the host to enable more Tailscale features", - LongHelp: strings.TrimSpace(` +func configureCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "configure", + ShortUsage: "tailscale configure ", + ShortHelp: "Configure the host to enable more Tailscale features", + LongHelp: strings.TrimSpace(` The 'configure' set of commands are intended to provide a way to enable different services on the host to use Tailscale in more ways. `), - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("configure") - return fs - })(), - Subcommands: configureSubcommands(), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("configure") + return fs + })(), + Subcommands: nonNilCmds( + configureKubeconfigCmd(), + synologyConfigureCmd(), + synologyConfigureCertCmd(), + ccall(maybeSysExtCmd), + ccall(maybeVPNConfigCmd), + ), + } } -func configureSubcommands() (out []*ffcli.Command) { - if runtime.GOOS == "linux" && distro.Get() == distro.Synology { - out = append(out, synologyConfigureCmd) - out = append(out, synologyConfigureCertCmd) +// ccall calls the function f if it is non-nil, and returns its result. +// +// It returns the zero value of the type T if f is nil. +func ccall[T any](f func() T) T { + var zero T + if f == nil { + return zero } - return out + return f() } diff --git a/cmd/tailscale/cli/configure_apple-all.go b/cmd/tailscale/cli/configure_apple-all.go new file mode 100644 index 000000000..5f0da9b95 --- /dev/null +++ b/cmd/tailscale/cli/configure_apple-all.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import "github.com/peterbourgon/ff/v3/ffcli" + +var ( + maybeSysExtCmd func() *ffcli.Command // non-nil only on macOS, see configure_apple.go + maybeVPNConfigCmd func() *ffcli.Command // non-nil only on macOS, see configure_apple.go +) diff --git a/cmd/tailscale/cli/configure_apple.go b/cmd/tailscale/cli/configure_apple.go new file mode 100644 index 000000000..edd9ec1ab --- /dev/null +++ b/cmd/tailscale/cli/configure_apple.go @@ -0,0 +1,97 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build darwin + +package cli + +import ( + "context" + "errors" + + "github.com/peterbourgon/ff/v3/ffcli" +) + +func init() { + maybeSysExtCmd = sysExtCmd + maybeVPNConfigCmd = vpnConfigCmd +} + +// Functions in this file provide a dummy Exec function that only prints an error message for users of the open-source +// tailscaled distribution. On GUI builds, the Swift code in the macOS client handles these commands by not passing the +// flow of execution to the CLI. + +// sysExtCmd returns a command for managing the Tailscale system extension on macOS +// (for the Standalone variant of the client only). +func sysExtCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "sysext", + ShortUsage: "tailscale configure sysext [activate|deactivate|status]", + ShortHelp: "Manages the system extension for macOS (Standalone variant)", + LongHelp: "The sysext set of commands provides a way to activate, deactivate, or manage the state of the Tailscale system extension on macOS. " + + "This is only relevant if you are running the Standalone variant of the Tailscale client for macOS. " + + "To access more detailed information about system extensions installed on this Mac, run 'systemextensionsctl list'.", + Subcommands: []*ffcli.Command{ + { + Name: "activate", + ShortUsage: "tailscale sysext activate", + ShortHelp: "Register the Tailscale system extension with macOS.", + LongHelp: "This command registers the Tailscale system extension with macOS. To run Tailscale, you'll also need to install the VPN configuration separately (run `tailscale configure vpn-config install`). After running this command, you need to approve the extension in System Settings > Login Items and Extensions > Network Extensions.", + Exec: requiresStandalone, + }, + { + Name: "deactivate", + ShortUsage: "tailscale sysext deactivate", + ShortHelp: "Deactivate the Tailscale system extension on macOS", + LongHelp: "This command deactivates the Tailscale system extension on macOS. To completely remove Tailscale, you'll also need to delete the VPN configuration separately (use `tailscale configure vpn-config uninstall`).", + Exec: requiresStandalone, + }, + { + Name: "status", + ShortUsage: "tailscale sysext status", + ShortHelp: "Print the enablement status of the Tailscale system extension", + LongHelp: "This command prints the enablement status of the Tailscale system extension. If the extension is not enabled, run `tailscale sysext activate` to enable it.", + Exec: requiresStandalone, + }, + }, + Exec: requiresStandalone, + } +} + +// vpnConfigCmd returns a command for managing the Tailscale VPN configuration on macOS +// (the entry that appears in System Settings > VPN). +func vpnConfigCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "mac-vpn", + ShortUsage: "tailscale configure mac-vpn [install|uninstall]", + ShortHelp: "Manage the VPN configuration on macOS (App Store and Standalone variants)", + LongHelp: "The vpn-config set of commands provides a way to add or remove the Tailscale VPN configuration from the macOS settings. This is the entry that appears in System Settings > VPN.", + Subcommands: []*ffcli.Command{ + { + Name: "install", + ShortUsage: "tailscale mac-vpn install", + ShortHelp: "Write the Tailscale VPN configuration to the macOS settings", + LongHelp: "This command writes the Tailscale VPN configuration to the macOS settings. This is the entry that appears in System Settings > VPN. If you are running the Standalone variant of the client, you'll also need to install the system extension separately (run `tailscale configure sysext activate`).", + Exec: requiresGUI, + }, + { + Name: "uninstall", + ShortUsage: "tailscale mac-vpn uninstall", + ShortHelp: "Delete the Tailscale VPN configuration from the macOS settings", + LongHelp: "This command removes the Tailscale VPN configuration from the macOS settings. This is the entry that appears in System Settings > VPN. If you are running the Standalone variant of the client, you'll also need to deactivate the system extension separately (run `tailscale configure sysext deactivate`).", + Exec: requiresGUI, + }, + }, + Exec: func(ctx context.Context, args []string) error { + return errors.New("unsupported command: requires a GUI build of the macOS client") + }, + } +} + +func requiresStandalone(ctx context.Context, args []string) error { + return errors.New("unsupported command: requires the Standalone (.pkg installer) GUI build of the client") +} + +func requiresGUI(ctx context.Context, args []string) error { + return errors.New("unsupported command: requires a GUI build of the macOS client") +} From 3fb8a1f6bf4bdc8f438430014721486f3a6f20f6 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 22 Jan 2025 16:50:25 -0800 Subject: [PATCH 0377/1708] ipn/ipnlocal: re-advertise appc routes on startup, take 2 (#14740) * Reapply "ipn/ipnlocal: re-advertise appc routes on startup (#14609)" This reverts commit 51adaec35a3e4d25df88d81e6264584e151bd33d. Signed-off-by: Andrew Lytvynov * ipn/ipnlocal: fix a deadlock in readvertiseAppConnectorRoutes Don't hold LocalBackend.mu while calling the methods of appc.AppConnector. Those methods could call back into LocalBackend and try to acquire it's mutex. Fixes https://github.com/tailscale/corp/issues/25965 Fixes #14606 Signed-off-by: Andrew Lytvynov --------- Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/local.go | 43 +++++++++++++++++++++++++++++++--- ipn/ipnlocal/local_test.go | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2bd46b852..4ff3f3db4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4418,6 +4418,41 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i b.appConnector.UpdateDomainsAndRoutes(domains, routes) } +func (b *LocalBackend) readvertiseAppConnectorRoutes() { + // Note: we should never call b.appConnector methods while holding b.mu. + // This can lead to a deadlock, like + // https://github.com/tailscale/corp/issues/25965. + // + // Grab a copy of the field, since b.mu only guards access to the + // b.appConnector field itself. + b.mu.Lock() + appConnector := b.appConnector + b.mu.Unlock() + + if appConnector == nil { + return + } + domainRoutes := appConnector.DomainRoutes() + if domainRoutes == nil { + return + } + + // Re-advertise the stored routes, in case stored state got out of + // sync with previously advertised routes in prefs. + var prefixes []netip.Prefix + for _, ips := range domainRoutes { + for _, ip := range ips { + prefixes = append(prefixes, netip.PrefixFrom(ip, ip.BitLen())) + } + } + // Note: AdvertiseRoute will trim routes that are already + // advertised, so if everything is already being advertised this is + // a noop. + if err := b.AdvertiseRoute(prefixes...); err != nil { + b.logf("error advertising stored app connector routes: %v", err) + } +} + // authReconfig pushes a new configuration into wgengine, if engine // updates are not currently blocked, based on the cached netmap and // user prefs. @@ -4496,6 +4531,7 @@ func (b *LocalBackend) authReconfig() { } b.initPeerAPIListener() + b.readvertiseAppConnectorRoutes() } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -7261,7 +7297,7 @@ var ErrDisallowedAutoRoute = errors.New("route is not allowed") // If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() - newRoutes := false + var newRoutes []netip.Prefix for _, ipp := range ipps { if !allowedAutoRoute(ipp) { @@ -7277,13 +7313,14 @@ func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { } finalRoutes = append(finalRoutes, ipp) - newRoutes = true + newRoutes = append(newRoutes, ipp) } - if !newRoutes { + if len(newRoutes) == 0 { return nil } + b.logf("advertising new app connector routes: %v", newRoutes) _, err := b.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: finalRoutes, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b1a79860d..b7b81ada8 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1501,6 +1501,53 @@ func TestReconfigureAppConnector(t *testing.T) { } } +func TestBackfillAppConnectorRoutes(t *testing.T) { + // Create backend with an empty app connector. + b := newTestBackend(t) + if err := b.Start(ipn.Options{}); err != nil { + t.Fatal(err) + } + if _, err := b.EditPrefs(&ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AppConnector: ipn.AppConnectorPrefs{Advertise: true}, + }, + AppConnectorSet: true, + }); err != nil { + t.Fatal(err) + } + b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + + // Smoke check that AdvertiseRoutes doesn't have the test IP. + ip := netip.MustParseAddr("1.2.3.4") + routes := b.Prefs().AdvertiseRoutes().AsSlice() + if slices.Contains(routes, netip.PrefixFrom(ip, ip.BitLen())) { + t.Fatalf("AdvertiseRoutes %v on a fresh backend already contains advertised route for %v", routes, ip) + } + + // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. + b.ControlKnobs().AppCStoreRoutes.Store(true) + if err := b.storeRouteInfo(&appc.RouteInfo{ + Domains: map[string][]netip.Addr{ + "example.com": {ip}, + }, + }); err != nil { + t.Fatal(err) + } + + // Mimic b.authReconfigure for the app connector bits. + b.mu.Lock() + b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.mu.Unlock() + b.readvertiseAppConnectorRoutes() + + // Check that Prefs.AdvertiseRoutes got backfilled with routes stored in + // profile data. + routes = b.Prefs().AdvertiseRoutes().AsSlice() + if !slices.Contains(routes, netip.PrefixFrom(ip, ip.BitLen())) { + t.Fatalf("AdvertiseRoutes %v was not backfilled from stored app connector routes with %v", routes, ip) + } +} + func resolversEqual(t *testing.T, a, b []*dnstype.Resolver) bool { if a == nil && b == nil { return true From 1562a6f2f2b8017a65ae147e48f23e1ec113ac2f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 22 Jan 2025 11:56:36 -0800 Subject: [PATCH 0378/1708] feature/*: make Wake-on-LAN conditional, start supporting modular features This pulls out the Wake-on-LAN (WoL) code out into its own package (feature/wakeonlan) that registers itself with various new hooks around tailscaled. Then a new build tag (ts_omit_wakeonlan) causes the package to not even be linked in the binary. Ohter new packages include: * feature: to just record which features are loaded. Future: dependencies between features. * feature/condregister: the package with all the build tags that tailscaled, tsnet, and the Tailscale Xcode project extension can empty (underscore) import to load features as a function of the defined build tags. Future commits will move of our "ts_omit_foo" build tags into this style. Updates #12614 Change-Id: I9c5378dafb1113b62b816aabef02714db3fc9c4a Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/k8s-operator/depaware.txt | 5 +- cmd/tailscaled/depaware.txt | 5 +- cmd/tailscaled/tailscaled.go | 1 + feature/condregister/condregister.go | 7 + feature/condregister/maybe_wakeonlan.go | 8 + feature/feature.go | 15 ++ feature/wakeonlan/wakeonlan.go | 243 ++++++++++++++++++ hostinfo/hostinfo.go | 15 +- hostinfo/wol.go | 106 -------- ipn/ipnlocal/c2n.go | 67 +---- ipn/ipnlocal/peerapi.go | 117 +++------ tsnet/tsnet.go | 1 + .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + 18 files changed, 355 insertions(+), 242 deletions(-) create mode 100644 feature/condregister/condregister.go create mode 100644 feature/condregister/maybe_wakeonlan.go create mode 100644 feature/feature.go create mode 100644 feature/wakeonlan/wakeonlan.go delete mode 100644 hostinfo/wol.go diff --git a/build_dist.sh b/build_dist.sh index 66afa8f74..9a29e5201 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -37,7 +37,7 @@ while [ "$#" -gt 1 ]; do --extra-small) shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan" ;; --box) shift diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index a27e1761d..bdcf3417a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -156,7 +156,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/kortschak/wol from tailscale.com/ipn/ipnlocal + github.com/kortschak/wol from tailscale.com/feature/wakeonlan github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag @@ -801,6 +801,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/drive from tailscale.com/client/tailscale+ tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ + tailscale.com/feature from tailscale.com/feature/wakeonlan + tailscale.com/feature/condregister from tailscale.com/tsnet + tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1fc1b8d70..5246b82b9 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -127,7 +127,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/kortschak/wol from tailscale.com/ipn/ipnlocal + github.com/kortschak/wol from tailscale.com/feature/wakeonlan LD github.com/kr/fs from github.com/pkg/sftp L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ @@ -259,6 +259,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/drive/driveimpl/shared from tailscale.com/drive/driveimpl+ tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ + tailscale.com/feature from tailscale.com/feature/wakeonlan + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 9dd00ddd9..bab3bc75a 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -35,6 +35,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/conffile" diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go new file mode 100644 index 000000000..f90250951 --- /dev/null +++ b/feature/condregister/condregister.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The condregister package registers all conditional features guarded +// by build tags. It is one central package that callers can empty import +// to ensure all conditional features are registered. +package condregister diff --git a/feature/condregister/maybe_wakeonlan.go b/feature/condregister/maybe_wakeonlan.go new file mode 100644 index 000000000..14cae605d --- /dev/null +++ b/feature/condregister/maybe_wakeonlan.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_wakeonlan + +package condregister + +import _ "tailscale.com/feature/wakeonlan" diff --git a/feature/feature.go b/feature/feature.go new file mode 100644 index 000000000..ea290c43a --- /dev/null +++ b/feature/feature.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package feature tracks which features are linked into the binary. +package feature + +var in = map[string]bool{} + +// Register notes that the named feature is linked into the binary. +func Register(name string) { + if _, ok := in[name]; ok { + panic("duplicate feature registration for " + name) + } + in[name] = true +} diff --git a/feature/wakeonlan/wakeonlan.go b/feature/wakeonlan/wakeonlan.go new file mode 100644 index 000000000..96c424084 --- /dev/null +++ b/feature/wakeonlan/wakeonlan.go @@ -0,0 +1,243 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package wakeonlan registers the Wake-on-LAN feature. +package wakeonlan + +import ( + "encoding/json" + "log" + "net" + "net/http" + "runtime" + "sort" + "strings" + "unicode" + + "github.com/kortschak/wol" + "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/hostinfo" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" + "tailscale.com/util/clientmetric" +) + +func init() { + feature.Register("wakeonlan") + ipnlocal.RegisterC2N("POST /wol", handleC2NWoL) + ipnlocal.RegisterPeerAPIHandler("/v0/wol", handlePeerAPIWakeOnLAN) + hostinfo.RegisterHostinfoNewHook(func(h *tailcfg.Hostinfo) { + h.WoLMACs = getWoLMACs() + }) +} + +func handleC2NWoL(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + var macs []net.HardwareAddr + for _, macStr := range r.Form["mac"] { + mac, err := net.ParseMAC(macStr) + if err != nil { + http.Error(w, "bad 'mac' param", http.StatusBadRequest) + return + } + macs = append(macs, mac) + } + var res struct { + SentTo []string + Errors []string + } + st := b.NetMon().InterfaceState() + if st == nil { + res.Errors = append(res.Errors, "no interface state") + writeJSON(w, &res) + return + } + var password []byte // TODO(bradfitz): support? does anything use WoL passwords? + for _, mac := range macs { + for ifName, ips := range st.InterfaceIPs { + for _, ip := range ips { + if ip.Addr().IsLoopback() || ip.Addr().Is6() { + continue + } + local := &net.UDPAddr{ + IP: ip.Addr().AsSlice(), + Port: 0, + } + remote := &net.UDPAddr{ + IP: net.IPv4bcast, + Port: 0, + } + if err := wol.Wake(mac, password, local, remote); err != nil { + res.Errors = append(res.Errors, err.Error()) + } else { + res.SentTo = append(res.SentTo, ifName) + } + break // one per interface is enough + } + } + } + sort.Strings(res.SentTo) + writeJSON(w, &res) +} + +func writeJSON(w http.ResponseWriter, v any) { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(v) +} + +func canWakeOnLAN(h ipnlocal.PeerAPIHandler) bool { + if h.Peer().UnsignedPeerAPIOnly() { + return false + } + return h.IsSelfUntagged() || h.PeerCaps().HasCapability(tailcfg.PeerCapabilityWakeOnLAN) +} + +var metricWakeOnLANCalls = clientmetric.NewCounter("peerapi_wol") + +func handlePeerAPIWakeOnLAN(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + metricWakeOnLANCalls.Add(1) + if !canWakeOnLAN(h) { + http.Error(w, "no WoL access", http.StatusForbidden) + return + } + if r.Method != "POST" { + http.Error(w, "bad method", http.StatusMethodNotAllowed) + return + } + macStr := r.FormValue("mac") + if macStr == "" { + http.Error(w, "missing 'mac' param", http.StatusBadRequest) + return + } + mac, err := net.ParseMAC(macStr) + if err != nil { + http.Error(w, "bad 'mac' param", http.StatusBadRequest) + return + } + var password []byte // TODO(bradfitz): support? does anything use WoL passwords? + st := h.LocalBackend().NetMon().InterfaceState() + if st == nil { + http.Error(w, "failed to get interfaces state", http.StatusInternalServerError) + return + } + var res struct { + SentTo []string + Errors []string + } + for ifName, ips := range st.InterfaceIPs { + for _, ip := range ips { + if ip.Addr().IsLoopback() || ip.Addr().Is6() { + continue + } + local := &net.UDPAddr{ + IP: ip.Addr().AsSlice(), + Port: 0, + } + remote := &net.UDPAddr{ + IP: net.IPv4bcast, + Port: 0, + } + if err := wol.Wake(mac, password, local, remote); err != nil { + res.Errors = append(res.Errors, err.Error()) + } else { + res.SentTo = append(res.SentTo, ifName) + } + break // one per interface is enough + } + } + sort.Strings(res.SentTo) + writeJSON(w, res) +} + +// TODO(bradfitz): this is all too simplistic and static. It needs to run +// continuously in response to netmon events (USB ethernet adapters might get +// plugged in) and look for the media type/status/etc. Right now on macOS it +// still detects a half dozen "up" en0, en1, en2, en3 etc interfaces that don't +// have any media. We should only report the one that's actually connected. +// But it works for now (2023-10-05) for fleshing out the rest. + +var wakeMAC = envknob.RegisterString("TS_WAKE_MAC") // mac address, "false" or "auto". for https://github.com/tailscale/tailscale/issues/306 + +// getWoLMACs returns up to 10 MAC address of the local machine to send +// wake-on-LAN packets to in order to wake it up. The returned MACs are in +// lowercase hex colon-separated form ("xx:xx:xx:xx:xx:xx"). +// +// If TS_WAKE_MAC=auto, it tries to automatically find the MACs based on the OS +// type and interface properties. (TODO(bradfitz): incomplete) If TS_WAKE_MAC is +// set to a MAC address, that sole MAC address is returned. +func getWoLMACs() (macs []string) { + switch runtime.GOOS { + case "ios", "android": + return nil + } + if s := wakeMAC(); s != "" { + switch s { + case "auto": + ifs, _ := net.Interfaces() + for _, iface := range ifs { + if iface.Flags&net.FlagLoopback != 0 { + continue + } + if iface.Flags&net.FlagBroadcast == 0 || + iface.Flags&net.FlagRunning == 0 || + iface.Flags&net.FlagUp == 0 { + continue + } + if keepMAC(iface.Name, iface.HardwareAddr) { + macs = append(macs, iface.HardwareAddr.String()) + } + if len(macs) == 10 { + break + } + } + return macs + case "false", "off": // fast path before ParseMAC error + return nil + } + mac, err := net.ParseMAC(s) + if err != nil { + log.Printf("invalid MAC %q", s) + return nil + } + return []string{mac.String()} + } + return nil +} + +var ignoreWakeOUI = map[[3]byte]bool{ + {0x00, 0x15, 0x5d}: true, // Hyper-V + {0x00, 0x50, 0x56}: true, // VMware + {0x00, 0x1c, 0x14}: true, // VMware + {0x00, 0x05, 0x69}: true, // VMware + {0x00, 0x0c, 0x29}: true, // VMware + {0x00, 0x1c, 0x42}: true, // Parallels + {0x08, 0x00, 0x27}: true, // VirtualBox + {0x00, 0x21, 0xf6}: true, // VirtualBox + {0x00, 0x14, 0x4f}: true, // VirtualBox + {0x00, 0x0f, 0x4b}: true, // VirtualBox + {0x52, 0x54, 0x00}: true, // VirtualBox/Vagrant +} + +func keepMAC(ifName string, mac []byte) bool { + if len(mac) != 6 { + return false + } + base := strings.TrimRightFunc(ifName, unicode.IsNumber) + switch runtime.GOOS { + case "darwin": + switch base { + case "llw", "awdl", "utun", "bridge", "lo", "gif", "stf", "anpi", "ap": + return false + } + } + if mac[0] == 0x02 && mac[1] == 0x42 { + // Docker container. + return false + } + oui := [3]byte{mac[0], mac[1], mac[2]} + if ignoreWakeOUI[oui] { + return false + } + return true +} diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 89968e1e6..d952ce603 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -32,11 +32,19 @@ import ( var started = time.Now() +var newHooks []func(*tailcfg.Hostinfo) + +// RegisterHostinfoNewHook registers a callback to be called on a non-nil +// [tailcfg.Hostinfo] before it is returned by [New]. +func RegisterHostinfoNewHook(f func(*tailcfg.Hostinfo)) { + newHooks = append(newHooks, f) +} + // New returns a partially populated Hostinfo for the current host. func New() *tailcfg.Hostinfo { hostname, _ := os.Hostname() hostname = dnsname.FirstLabel(hostname) - return &tailcfg.Hostinfo{ + hi := &tailcfg.Hostinfo{ IPNVersion: version.Long(), Hostname: hostname, App: appTypeCached(), @@ -57,8 +65,11 @@ func New() *tailcfg.Hostinfo { Cloud: string(cloudenv.Get()), NoLogsNoSupport: envknob.NoLogsNoSupport(), AllowsUpdate: envknob.AllowsRemoteUpdate(), - WoLMACs: getWoLMACs(), } + for _, f := range newHooks { + f(hi) + } + return hi } // non-nil on some platforms diff --git a/hostinfo/wol.go b/hostinfo/wol.go deleted file mode 100644 index 3a30af2fe..000000000 --- a/hostinfo/wol.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package hostinfo - -import ( - "log" - "net" - "runtime" - "strings" - "unicode" - - "tailscale.com/envknob" -) - -// TODO(bradfitz): this is all too simplistic and static. It needs to run -// continuously in response to netmon events (USB ethernet adapaters might get -// plugged in) and look for the media type/status/etc. Right now on macOS it -// still detects a half dozen "up" en0, en1, en2, en3 etc interfaces that don't -// have any media. We should only report the one that's actually connected. -// But it works for now (2023-10-05) for fleshing out the rest. - -var wakeMAC = envknob.RegisterString("TS_WAKE_MAC") // mac address, "false" or "auto". for https://github.com/tailscale/tailscale/issues/306 - -// getWoLMACs returns up to 10 MAC address of the local machine to send -// wake-on-LAN packets to in order to wake it up. The returned MACs are in -// lowercase hex colon-separated form ("xx:xx:xx:xx:xx:xx"). -// -// If TS_WAKE_MAC=auto, it tries to automatically find the MACs based on the OS -// type and interface properties. (TODO(bradfitz): incomplete) If TS_WAKE_MAC is -// set to a MAC address, that sole MAC address is returned. -func getWoLMACs() (macs []string) { - switch runtime.GOOS { - case "ios", "android": - return nil - } - if s := wakeMAC(); s != "" { - switch s { - case "auto": - ifs, _ := net.Interfaces() - for _, iface := range ifs { - if iface.Flags&net.FlagLoopback != 0 { - continue - } - if iface.Flags&net.FlagBroadcast == 0 || - iface.Flags&net.FlagRunning == 0 || - iface.Flags&net.FlagUp == 0 { - continue - } - if keepMAC(iface.Name, iface.HardwareAddr) { - macs = append(macs, iface.HardwareAddr.String()) - } - if len(macs) == 10 { - break - } - } - return macs - case "false", "off": // fast path before ParseMAC error - return nil - } - mac, err := net.ParseMAC(s) - if err != nil { - log.Printf("invalid MAC %q", s) - return nil - } - return []string{mac.String()} - } - return nil -} - -var ignoreWakeOUI = map[[3]byte]bool{ - {0x00, 0x15, 0x5d}: true, // Hyper-V - {0x00, 0x50, 0x56}: true, // VMware - {0x00, 0x1c, 0x14}: true, // VMware - {0x00, 0x05, 0x69}: true, // VMware - {0x00, 0x0c, 0x29}: true, // VMware - {0x00, 0x1c, 0x42}: true, // Parallels - {0x08, 0x00, 0x27}: true, // VirtualBox - {0x00, 0x21, 0xf6}: true, // VirtualBox - {0x00, 0x14, 0x4f}: true, // VirtualBox - {0x00, 0x0f, 0x4b}: true, // VirtualBox - {0x52, 0x54, 0x00}: true, // VirtualBox/Vagrant -} - -func keepMAC(ifName string, mac []byte) bool { - if len(mac) != 6 { - return false - } - base := strings.TrimRightFunc(ifName, unicode.IsNumber) - switch runtime.GOOS { - case "darwin": - switch base { - case "llw", "awdl", "utun", "bridge", "lo", "gif", "stf", "anpi", "ap": - return false - } - } - if mac[0] == 0x02 && mac[1] == 0x42 { - // Docker container. - return false - } - oui := [3]byte{mac[0], mac[1], mac[2]} - if ignoreWakeOUI[oui] { - return false - } - return true -} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 04f91954f..e91921533 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -10,19 +10,16 @@ import ( "errors" "fmt" "io" - "net" "net/http" "os" "os/exec" "path" "path/filepath" "runtime" - "sort" "strconv" "strings" "time" - "github.com/kortschak/wol" "tailscale.com/clientupdate" "tailscale.com/envknob" "tailscale.com/ipn" @@ -66,9 +63,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ req("GET /update"): handleC2NUpdateGet, req("POST /update"): handleC2NUpdatePost, - // Wake-on-LAN. - req("POST /wol"): handleC2NWoL, - // Device posture. req("GET /posture/identity"): handleC2NPostureIdentityGet, @@ -82,6 +76,18 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ req("GET /vip-services"): handleC2NVIPServicesGet, } +// RegisterC2N registers a new c2n handler for the given pattern. +// +// A pattern is like "GET /foo" (specific to an HTTP method) or "/foo" (all +// methods). It panics if the pattern is already registered. +func RegisterC2N(pattern string, h func(*LocalBackend, http.ResponseWriter, *http.Request)) { + k := req(pattern) + if _, ok := c2nHandlers[k]; ok { + panic(fmt.Sprintf("c2n: duplicate handler for %q", pattern)) + } + c2nHandlers[k] = h +} + type c2nHandler func(*LocalBackend, http.ResponseWriter, *http.Request) type methodAndPath struct { @@ -503,55 +509,6 @@ func regularFileExists(path string) bool { return err == nil && fi.Mode().IsRegular() } -func handleC2NWoL(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - r.ParseForm() - var macs []net.HardwareAddr - for _, macStr := range r.Form["mac"] { - mac, err := net.ParseMAC(macStr) - if err != nil { - http.Error(w, "bad 'mac' param", http.StatusBadRequest) - return - } - macs = append(macs, mac) - } - var res struct { - SentTo []string - Errors []string - } - st := b.sys.NetMon.Get().InterfaceState() - if st == nil { - res.Errors = append(res.Errors, "no interface state") - writeJSON(w, &res) - return - } - var password []byte // TODO(bradfitz): support? does anything use WoL passwords? - for _, mac := range macs { - for ifName, ips := range st.InterfaceIPs { - for _, ip := range ips { - if ip.Addr().IsLoopback() || ip.Addr().Is6() { - continue - } - local := &net.UDPAddr{ - IP: ip.Addr().AsSlice(), - Port: 0, - } - remote := &net.UDPAddr{ - IP: net.IPv4bcast, - Port: 0, - } - if err := wol.Wake(mac, password, local, remote); err != nil { - res.Errors = append(res.Errors, err.Error()) - } else { - res.SentTo = append(res.SentTo, ifName) - } - break // one per interface is enough - } - } - } - sort.Strings(res.SentTo) - writeJSON(w, &res) -} - // handleC2NTLSCertStatus returns info about the last TLS certificate issued for the // provided domain. This can be called by the controlplane to clean up DNS TXT // records when they're no longer needed by LetsEncrypt. diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 4d0548917..f79fb200b 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -20,13 +20,11 @@ import ( "path/filepath" "runtime" "slices" - "sort" "strconv" "strings" "sync" "time" - "github.com/kortschak/wol" "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" "tailscale.com/drive" @@ -226,6 +224,23 @@ type peerAPIHandler struct { peerUser tailcfg.UserProfile // profile of peerNode } +// PeerAPIHandler is the interface implemented by [peerAPIHandler] and needed by +// module features registered via tailscale.com/feature/*. +type PeerAPIHandler interface { + Peer() tailcfg.NodeView + PeerCaps() tailcfg.PeerCapMap + Self() tailcfg.NodeView + LocalBackend() *LocalBackend + IsSelfUntagged() bool // whether the peer is untagged and the same as this user +} + +func (h *peerAPIHandler) IsSelfUntagged() bool { + return !h.selfNode.IsTagged() && !h.peerNode.IsTagged() && h.isSelf +} +func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } +func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } +func (h *peerAPIHandler) LocalBackend() *LocalBackend { return h.ps.b } + func (h *peerAPIHandler) logf(format string, a ...any) { h.ps.b.logf("peerapi: "+format, a...) } @@ -302,6 +317,20 @@ func peerAPIRequestShouldGetSecurityHeaders(r *http.Request) bool { return false } +// RegisterPeerAPIHandler registers a PeerAPI handler. +// +// The path should be of the form "/v0/foo". +// +// It panics if the path is already registered. +func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWriter, *http.Request)) { + if _, ok := peerAPIHandlers[path]; ok { + panic(fmt.Sprintf("duplicate PeerAPI handler %q", path)) + } + peerAPIHandlers[path] = f +} + +var peerAPIHandlers = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){} // by URL.Path + func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := h.validatePeerAPIRequest(r); err != nil { metricInvalidRequests.Add(1) @@ -346,10 +375,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "/v0/dnsfwd": h.handleServeDNSFwd(w, r) return - case "/v0/wol": - metricWakeOnLANCalls.Add(1) - h.handleWakeOnLAN(w, r) - return case "/v0/interfaces": h.handleServeInterfaces(w, r) return @@ -364,6 +389,10 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.handleServeIngress(w, r) return } + if ph, ok := peerAPIHandlers[r.URL.Path]; ok { + ph(h, w, r) + return + } who := h.peerUser.DisplayName fmt.Fprintf(w, ` @@ -624,14 +653,6 @@ func (h *peerAPIHandler) canDebug() bool { return h.isSelf || h.peerHasCap(tailcfg.PeerCapabilityDebugPeer) } -// canWakeOnLAN reports whether h can send a Wake-on-LAN packet from this node. -func (h *peerAPIHandler) canWakeOnLAN() bool { - if h.peerNode.UnsignedPeerAPIOnly() { - return false - } - return h.isSelf || h.peerHasCap(tailcfg.PeerCapabilityWakeOnLAN) -} - var allowSelfIngress = envknob.RegisterBool("TS_ALLOW_SELF_INGRESS") // canIngress reports whether h can send ingress requests to this node. @@ -640,10 +661,10 @@ func (h *peerAPIHandler) canIngress() bool { } func (h *peerAPIHandler) peerHasCap(wantCap tailcfg.PeerCapability) bool { - return h.peerCaps().HasCapability(wantCap) + return h.PeerCaps().HasCapability(wantCap) } -func (h *peerAPIHandler) peerCaps() tailcfg.PeerCapMap { +func (h *peerAPIHandler) PeerCaps() tailcfg.PeerCapMap { return h.ps.b.PeerCaps(h.remoteAddr.Addr()) } @@ -817,61 +838,6 @@ func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Reques dh.ServeHTTP(w, r) } -func (h *peerAPIHandler) handleWakeOnLAN(w http.ResponseWriter, r *http.Request) { - if !h.canWakeOnLAN() { - http.Error(w, "no WoL access", http.StatusForbidden) - return - } - if r.Method != "POST" { - http.Error(w, "bad method", http.StatusMethodNotAllowed) - return - } - macStr := r.FormValue("mac") - if macStr == "" { - http.Error(w, "missing 'mac' param", http.StatusBadRequest) - return - } - mac, err := net.ParseMAC(macStr) - if err != nil { - http.Error(w, "bad 'mac' param", http.StatusBadRequest) - return - } - var password []byte // TODO(bradfitz): support? does anything use WoL passwords? - st := h.ps.b.sys.NetMon.Get().InterfaceState() - if st == nil { - http.Error(w, "failed to get interfaces state", http.StatusInternalServerError) - return - } - var res struct { - SentTo []string - Errors []string - } - for ifName, ips := range st.InterfaceIPs { - for _, ip := range ips { - if ip.Addr().IsLoopback() || ip.Addr().Is6() { - continue - } - local := &net.UDPAddr{ - IP: ip.Addr().AsSlice(), - Port: 0, - } - remote := &net.UDPAddr{ - IP: net.IPv4bcast, - Port: 0, - } - if err := wol.Wake(mac, password, local, remote); err != nil { - res.Errors = append(res.Errors, err.Error()) - } else { - res.SentTo = append(res.SentTo, ifName) - } - break // one per interface is enough - } - } - sort.Strings(res.SentTo) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func (h *peerAPIHandler) replyToDNSQueries() bool { if h.isSelf { // If the peer is owned by the same user, just allow it @@ -1150,7 +1116,7 @@ func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request return } - capsMap := h.peerCaps() + capsMap := h.PeerCaps() driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] if !ok { h.logf("taildrive: not permitted") @@ -1274,8 +1240,7 @@ var ( metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests") // Non-debug PeerAPI endpoints. - metricPutCalls = clientmetric.NewCounter("peerapi_put") - metricDNSCalls = clientmetric.NewCounter("peerapi_dns") - metricWakeOnLANCalls = clientmetric.NewCounter("peerapi_wol") - metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + metricPutCalls = clientmetric.NewCounter("peerapi_put") + metricDNSCalls = clientmetric.NewCounter("peerapi_dns") + metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") ) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index b769e719c..3505c9453 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,6 +29,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 6676ee22c..d04dc6aa1 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 6676ee22c..d04dc6aa1 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 6676ee22c..d04dc6aa1 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 6676ee22c..d04dc6aa1 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index bbf46d8c2..5eda22327 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -24,6 +24,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" From 3033a96b02c7bad11a1260c92e06668f0ac970bc Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 23 Jan 2025 10:47:21 +0000 Subject: [PATCH 0379/1708] cmd/k8s-operator: fix reconciler name clash (#14712) The new ProxyGroup-based Ingress reconciler is causing a fatal log at startup because it has the same name as the existing Ingress reconciler. Explicitly name both to ensure they have unique names that are consistent with other explicitly named reconcilers. Updates #14583 Change-Id: Ie76e3eaf3a96b1cec3d3615ea254a847447372ea Signed-off-by: Tom Proctor --- cmd/k8s-operator/operator.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 6368698d8..f349e7848 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -315,6 +315,7 @@ func runReconcilers(opts reconcilerOpts) { err = builder. ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). + Named("ingress-reconciler"). Watches(&appsv1.StatefulSet{}, ingressChildFilter). Watches(&corev1.Secret{}, ingressChildFilter). Watches(&corev1.Service{}, svcHandlerForIngress). @@ -336,6 +337,7 @@ func runReconcilers(opts reconcilerOpts) { err = builder. ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). + Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). Complete(&IngressPGReconciler{ recorder: eventRecorder, @@ -357,6 +359,7 @@ func runReconcilers(opts reconcilerOpts) { proxyClassFilterForConnector := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForConnector(mgr.GetClient(), startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.Connector{}). + Named("connector-reconciler"). Watches(&appsv1.StatefulSet{}, connectorFilter). Watches(&corev1.Secret{}, connectorFilter). Watches(&tsapi.ProxyClass{}, proxyClassFilterForConnector). @@ -376,6 +379,7 @@ func runReconcilers(opts reconcilerOpts) { nameserverFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("nameserver")) err = builder.ControllerManagedBy(mgr). For(&tsapi.DNSConfig{}). + Named("nameserver-reconciler"). Watches(&appsv1.Deployment{}, nameserverFilter). Watches(&corev1.ConfigMap{}, nameserverFilter). Watches(&corev1.Service{}, nameserverFilter). @@ -455,6 +459,7 @@ func runReconcilers(opts reconcilerOpts) { serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyClass{}). + Named("proxyclass-reconciler"). Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter). Complete(&ProxyClassReconciler{ Client: mgr.GetClient(), @@ -498,6 +503,7 @@ func runReconcilers(opts reconcilerOpts) { recorderFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.Recorder{}) err = builder.ControllerManagedBy(mgr). For(&tsapi.Recorder{}). + Named("recorder-reconciler"). Watches(&appsv1.StatefulSet{}, recorderFilter). Watches(&corev1.ServiceAccount{}, recorderFilter). Watches(&corev1.Secret{}, recorderFilter). @@ -520,6 +526,7 @@ func runReconcilers(opts reconcilerOpts) { proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). + Named("proxygroup-reconciler"). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). From a00623e8c4f4fa94ae705ac992f361a594b7caa1 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 23 Jan 2025 09:04:03 -0500 Subject: [PATCH 0380/1708] derp,wgengine/magicsock: remove unexpected label (#14711) Remove "unexpected" labelling of PeerGoneReasonNotHere. A peer being no longer connected to a DERP server is not an unexpected case and causes confusion in looking at logs. Fixes tailscale/corp#25609 Signed-off-by: Mike O'Driscoll --- derp/derp.go | 7 +++---- wgengine/magicsock/derp.go | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/derp/derp.go b/derp/derp.go index 878188cd2..6a7b3b735 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -79,8 +79,7 @@ const ( // framePeerGone to B so B can forget that a reverse path // exists on that connection to get back to A. It is also sent // if A tries to send a CallMeMaybe to B and the server has no - // record of B (which currently would only happen if there was - // a bug). + // record of B framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason // framePeerPresent is like framePeerGone, but for other members of the DERP @@ -131,8 +130,8 @@ const ( type PeerGoneReasonType byte const ( - PeerGoneReasonDisconnected = PeerGoneReasonType(0x00) // peer disconnected from this server - PeerGoneReasonNotHere = PeerGoneReasonType(0x01) // server doesn't know about this peer, unexpected + PeerGoneReasonDisconnected = PeerGoneReasonType(0x00) // is only sent when a peer disconnects from this server + PeerGoneReasonNotHere = PeerGoneReasonType(0x01) // server doesn't know about this peer PeerGoneReasonMeshConnBroke = PeerGoneReasonType(0xf0) // invented by Client.RunWatchConnectionLoop on disconnect; not sent on the wire ) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index e9f070862..7c8ffc01a 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -632,7 +632,7 @@ func (c *Conn) runDerpReader(ctx context.Context, regionID int, dc *derphttp.Cli // Do nothing. case derp.PeerGoneReasonNotHere: metricRecvDiscoDERPPeerNotHere.Add(1) - c.logf("[unexpected] magicsock: derp-%d does not know about peer %s, removing route", + c.logf("magicsock: derp-%d does not know about peer %s, removing route", regionID, key.NodePublic(m.Peer).ShortString()) default: metricRecvDiscoDERPPeerGoneUnknown.Add(1) From f1710f4a429911b461fb5e25a1e33642317bdedf Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 23 Jan 2025 09:03:56 -0800 Subject: [PATCH 0381/1708] appc,ipn/ipnlocal: log DNS parsing errors in app connectors (#14607) If we fail to parse the upstream DNS response in an app connector, we might miss new IPs for the target domain. Log parsing errors to be able to diagnose that. Updates #14606 Signed-off-by: Andrew Lytvynov --- appc/appconnector.go | 21 +++++------ appc/appconnector_test.go | 72 ++++++++++++++++++++++++++++---------- ipn/ipnlocal/local.go | 6 ++-- ipn/ipnlocal/local_test.go | 8 +++-- ipn/ipnlocal/peerapi.go | 6 +++- 5 files changed, 78 insertions(+), 35 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 063381cd7..f4857fcc6 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -374,13 +374,13 @@ func (e *AppConnector) DomainRoutes() map[string][]netip.Addr { // response is being returned over the PeerAPI. The response is parsed and // matched against the configured domains, if matched the routeAdvertiser is // advised to advertise the discovered route. -func (e *AppConnector) ObserveDNSResponse(res []byte) { +func (e *AppConnector) ObserveDNSResponse(res []byte) error { var p dnsmessage.Parser if _, err := p.Start(res); err != nil { - return + return err } if err := p.SkipAllQuestions(); err != nil { - return + return err } // cnameChain tracks a chain of CNAMEs for a given query in order to reverse @@ -399,12 +399,12 @@ func (e *AppConnector) ObserveDNSResponse(res []byte) { break } if err != nil { - return + return err } if h.Class != dnsmessage.ClassINET { if err := p.SkipAnswer(); err != nil { - return + return err } continue } @@ -413,7 +413,7 @@ func (e *AppConnector) ObserveDNSResponse(res []byte) { case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: default: if err := p.SkipAnswer(); err != nil { - return + return err } continue @@ -427,7 +427,7 @@ func (e *AppConnector) ObserveDNSResponse(res []byte) { if h.Type == dnsmessage.TypeCNAME { res, err := p.CNAMEResource() if err != nil { - return + return err } cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") if len(cname) == 0 { @@ -441,20 +441,20 @@ func (e *AppConnector) ObserveDNSResponse(res []byte) { case dnsmessage.TypeA: r, err := p.AResource() if err != nil { - return + return err } addr := netip.AddrFrom4(r.A) mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) case dnsmessage.TypeAAAA: r, err := p.AAAAResource() if err != nil { - return + return err } addr := netip.AddrFrom16(r.AAAA) mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) default: if err := p.SkipAnswer(); err != nil { - return + return err } continue } @@ -485,6 +485,7 @@ func (e *AppConnector) ObserveDNSResponse(res []byte) { e.scheduleAdvertisement(domain, toAdvertise...) } } + return nil } // starting from the given domain that resolved to an address, find it, or any diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 36ec7a119..fd0001224 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -69,7 +69,9 @@ func TestUpdateRoutes(t *testing.T) { a.updateDomains([]string{"*.example.com"}) // This route should be collapsed into the range - a.ObserveDNSResponse(dnsResponse("a.example.com.", "192.0.2.1")) + if err := a.ObserveDNSResponse(dnsResponse("a.example.com.", "192.0.2.1")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) if !slices.Equal(rc.Routes(), []netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) { @@ -77,7 +79,9 @@ func TestUpdateRoutes(t *testing.T) { } // This route should not be collapsed or removed - a.ObserveDNSResponse(dnsResponse("b.example.com.", "192.0.0.1")) + if err := a.ObserveDNSResponse(dnsResponse("b.example.com.", "192.0.0.1")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24"), netip.MustParsePrefix("192.0.0.1/32")} @@ -130,7 +134,9 @@ func TestDomainRoutes(t *testing.T) { a = NewAppConnector(t.Logf, rc, nil, nil) } a.updateDomains([]string{"example.com"}) - a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) + if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(context.Background()) want := map[string][]netip.Addr{ @@ -155,7 +161,9 @@ func TestObserveDNSResponse(t *testing.T) { } // a has no domains configured, so it should not advertise any routes - a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) + if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } if got, want := rc.Routes(), ([]netip.Prefix)(nil); !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) } @@ -163,7 +171,9 @@ func TestObserveDNSResponse(t *testing.T) { wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} a.updateDomains([]string{"example.com"}) - a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) + if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) @@ -172,7 +182,9 @@ func TestObserveDNSResponse(t *testing.T) { // a CNAME record chain should result in a route being added if the chain // matches a routed domain. a.updateDomains([]string{"www.example.com", "example.com"}) - a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.9", "www.example.com.", "chain.example.com.", "example.com.")) + if err := a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.9", "www.example.com.", "chain.example.com.", "example.com.")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) wantRoutes = append(wantRoutes, netip.MustParsePrefix("192.0.0.9/32")) if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { @@ -181,7 +193,9 @@ func TestObserveDNSResponse(t *testing.T) { // a CNAME record chain should result in a route being added if the chain // even if only found in the middle of the chain - a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.10", "outside.example.org.", "www.example.com.", "example.org.")) + if err := a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.10", "outside.example.org.", "www.example.com.", "example.org.")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) wantRoutes = append(wantRoutes, netip.MustParsePrefix("192.0.0.10/32")) if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { @@ -190,14 +204,18 @@ func TestObserveDNSResponse(t *testing.T) { wantRoutes = append(wantRoutes, netip.MustParsePrefix("2001:db8::1/128")) - a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")) + if err := a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) } // don't re-advertise routes that have already been advertised - a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")) + if err := a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("rc.Routes(): got %v; want %v", rc.Routes(), wantRoutes) @@ -207,7 +225,9 @@ func TestObserveDNSResponse(t *testing.T) { pfx := netip.MustParsePrefix("192.0.2.0/24") a.updateRoutes([]netip.Prefix{pfx}) wantRoutes = append(wantRoutes, pfx) - a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.2.1")) + if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.2.1")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("rc.Routes(): got %v; want %v", rc.Routes(), wantRoutes) @@ -230,7 +250,9 @@ func TestWildcardDomains(t *testing.T) { } a.updateDomains([]string{"*.example.com"}) - a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")) + if err := a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } a.Wait(ctx) if got, want := rc.Routes(), []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")}; !slices.Equal(got, want) { t.Errorf("routes: got %v; want %v", got, want) @@ -438,10 +460,16 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { // adding domains doesn't immediately cause any routes to be advertised assertRoutes("update domains", []netip.Prefix{}, []netip.Prefix{}) - a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.1")) - a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.2")) - a.ObserveDNSResponse(dnsResponse("b.example.com.", "1.2.3.3")) - a.ObserveDNSResponse(dnsResponse("b.example.com.", "1.2.3.4")) + for _, res := range [][]byte{ + dnsResponse("a.example.com.", "1.2.3.1"), + dnsResponse("a.example.com.", "1.2.3.2"), + dnsResponse("b.example.com.", "1.2.3.3"), + dnsResponse("b.example.com.", "1.2.3.4"), + } { + if err := a.ObserveDNSResponse(res); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } + } a.Wait(ctx) // observing dns responses causes routes to be advertised assertRoutes("observed dns", prefixes("1.2.3.1/32", "1.2.3.2/32", "1.2.3.3/32", "1.2.3.4/32"), []netip.Prefix{}) @@ -487,10 +515,16 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { // adding domains doesn't immediately cause any routes to be advertised assertRoutes("update domains", []netip.Prefix{}, []netip.Prefix{}) - a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.1")) - a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.2")) - a.ObserveDNSResponse(dnsResponse("1.b.example.com.", "1.2.3.3")) - a.ObserveDNSResponse(dnsResponse("2.b.example.com.", "1.2.3.4")) + for _, res := range [][]byte{ + dnsResponse("a.example.com.", "1.2.3.1"), + dnsResponse("a.example.com.", "1.2.3.2"), + dnsResponse("1.b.example.com.", "1.2.3.3"), + dnsResponse("2.b.example.com.", "1.2.3.4"), + } { + if err := a.ObserveDNSResponse(res); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } + } a.Wait(ctx) // observing dns responses causes routes to be advertised assertRoutes("observed dns", prefixes("1.2.3.1/32", "1.2.3.2/32", "1.2.3.3/32", "1.2.3.4/32"), []netip.Prefix{}) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4ff3f3db4..33ce9f331 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7276,17 +7276,17 @@ func (b *LocalBackend) DoSelfUpdate() { // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. -func (b *LocalBackend) ObserveDNSResponse(res []byte) { +func (b *LocalBackend) ObserveDNSResponse(res []byte) error { var appConnector *appc.AppConnector b.mu.Lock() if b.appConnector == nil { b.mu.Unlock() - return + return nil } appConnector = b.appConnector b.mu.Unlock() - appConnector.ObserveDNSResponse(res) + return appConnector.ObserveDNSResponse(res) } // ErrDisallowedAutoRoute is returned by AdvertiseRoute when a route that is not allowed is requested. diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b7b81ada8..de9ebf9fb 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1372,7 +1372,9 @@ func TestObserveDNSResponse(t *testing.T) { b := newTestBackend(t) // ensure no error when no app connector is configured - b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) + if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } rc := &appctest.RouteCollector{} if shouldStore { @@ -1383,7 +1385,9 @@ func TestObserveDNSResponse(t *testing.T) { b.appConnector.UpdateDomains([]string{"example.com"}) b.appConnector.Wait(context.Background()) - b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) + if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { + t.Errorf("ObserveDNSResponse: %v", err) + } b.appConnector.Wait(context.Background()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index f79fb200b..ab2093c13 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -932,7 +932,11 @@ func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) // instead to avoid re-parsing the DNS response for improved performance in // the future. if h.ps.b.OfferingAppConnector() { - h.ps.b.ObserveDNSResponse(res) + if err := h.ps.b.ObserveDNSResponse(res); err != nil { + h.logf("ObserveDNSResponse error: %v", err) + // This is not fatal, we probably just failed to parse the upstream + // response. Return it to the caller anyway. + } } if pretty { From d6abbc2e610f605cdb114f97182e14d54794dc87 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 22 Jan 2025 18:07:57 -0800 Subject: [PATCH 0382/1708] net/tstun: move TAP support out to separate package feature/tap Still behind the same ts_omit_tap build tag. See #14738 for background on the pattern. Updates #12614 Change-Id: I03fb3d2bf137111e727415bd8e713d8568156ecc Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 7 +++-- cmd/tailscaled/depaware.txt | 7 +++-- feature/condregister/maybe_tap.go | 8 +++++ feature/feature.go | 39 +++++++++++++++++++++++++ {net/tstun => feature/tap}/tap_linux.go | 24 +++++++++++---- net/tstun/tun.go | 9 +++--- net/tstun/wrap.go | 11 ++++--- 7 files changed, 85 insertions(+), 20 deletions(-) create mode 100644 feature/condregister/maybe_tap.go rename {net/tstun => feature/tap}/tap_linux.go (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index bdcf3417a..11a9201d4 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -140,7 +140,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns - L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/net/tstun + L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 @@ -302,7 +302,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/feature/tap+ gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+ gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ @@ -801,8 +801,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/drive from tailscale.com/client/tailscale+ tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ - tailscale.com/feature from tailscale.com/feature/wakeonlan + tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/condregister from tailscale.com/tsnet + L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5246b82b9..4f81d93dd 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -112,7 +112,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns - L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/net/tstun + L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 @@ -214,7 +214,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/feature/tap+ gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+ gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ @@ -259,8 +259,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/drive/driveimpl/shared from tailscale.com/drive/driveimpl+ tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ - tailscale.com/feature from tailscale.com/feature/wakeonlan + tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal diff --git a/feature/condregister/maybe_tap.go b/feature/condregister/maybe_tap.go new file mode 100644 index 000000000..eca4fc3ac --- /dev/null +++ b/feature/condregister/maybe_tap.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_tap + +package condregister + +import _ "tailscale.com/feature/tap" diff --git a/feature/feature.go b/feature/feature.go index ea290c43a..6415cfc4a 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -4,6 +4,8 @@ // Package feature tracks which features are linked into the binary. package feature +import "reflect" + var in = map[string]bool{} // Register notes that the named feature is linked into the binary. @@ -13,3 +15,40 @@ func Register(name string) { } in[name] = true } + +// Hook is a func that can only be set once. +// +// It is not safe for concurrent use. +type Hook[Func any] struct { + f Func + ok bool +} + +// IsSet reports whether the hook has been set. +func (h *Hook[Func]) IsSet() bool { + return h.ok +} + +// Set sets the hook function, panicking if it's already been set +// or f is the zero value. +// +// It's meant to be called in init. +func (h *Hook[Func]) Set(f Func) { + if h.ok { + panic("Set on already-set feature hook") + } + if reflect.ValueOf(f).IsZero() { + panic("Set with zero value") + } + h.f = f + h.ok = true +} + +// Get returns the hook function, or panics if it hasn't been set. +// Use IsSet to check if it's been set. +func (h *Hook[Func]) Get() Func { + if !h.ok { + panic("Get on unset feature hook, without IsSet") + } + return h.f +} diff --git a/net/tstun/tap_linux.go b/feature/tap/tap_linux.go similarity index 95% rename from net/tstun/tap_linux.go rename to feature/tap/tap_linux.go index 8a00a9692..58ac00593 100644 --- a/net/tstun/tap_linux.go +++ b/feature/tap/tap_linux.go @@ -1,9 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ts_omit_tap - -package tstun +// Package tap registers Tailscale's experimental (demo) Linux TAP (Layer 2) support. +package tap import ( "bytes" @@ -26,6 +25,7 @@ import ( "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/net/tsaddr" + "tailscale.com/net/tstun" "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" @@ -38,7 +38,11 @@ import ( // For now just hard code it. var ourMAC = net.HardwareAddr{0x30, 0x2D, 0x66, 0xEC, 0x7A, 0x93} -func init() { createTAP = createTAPLinux } +const tapDebug = tstun.TAPDebug + +func init() { + tstun.CreateTAP.Set(createTAPLinux) +} func createTAPLinux(logf logger.Logf, tapName, bridgeName string) (tun.Device, error) { fd, err := unix.Open("/dev/net/tun", unix.O_RDWR, 0) @@ -87,7 +91,10 @@ var ( etherTypeIPv6 = etherType{0x86, 0xDD} ) -const ipv4HeaderLen = 20 +const ( + ipv4HeaderLen = 20 + ethernetFrameSize = 14 // 2 six byte MACs, 2 bytes ethertype +) const ( consumePacket = true @@ -186,6 +193,11 @@ var ( cgnatNetMask = net.IPMask(net.ParseIP("255.192.0.0").To4()) ) +// parsedPacketPool holds a pool of Parsed structs for use in filtering. +// This is needed because escape analysis cannot see that parsed packets +// do not escape through {Pre,Post}Filter{In,Out}. +var parsedPacketPool = sync.Pool{New: func() any { return new(packet.Parsed) }} + // handleDHCPRequest handles receiving a raw TAP ethernet frame and reports whether // it's been handled as a DHCP request. That is, it reports whether the frame should // be ignored by the caller and not passed on. @@ -392,7 +404,7 @@ type tapDevice struct { destMACAtomic syncs.AtomicValue[[6]byte] } -var _ setIPer = (*tapDevice)(nil) +var _ tstun.SetIPer = (*tapDevice)(nil) func (t *tapDevice) SetIP(ipV4, ipV6TODO netip.Addr) error { t.clientIPv4.Store(ipV4.String()) diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 56c66c83a..44ccdfc99 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -14,11 +14,12 @@ import ( "time" "github.com/tailscale/wireguard-go/tun" + "tailscale.com/feature" "tailscale.com/types/logger" ) -// createTAP is non-nil on Linux. -var createTAP func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error) +// CrateTAP is the hook set by feature/tap. +var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] // New returns a tun.Device for the requested device name, along with // the OS-dependent name that was allocated to the device. @@ -29,7 +30,7 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { if runtime.GOOS != "linux" { return nil, "", errors.New("tap only works on Linux") } - if createTAP == nil { // if the ts_omit_tap tag is used + if !CreateTAP.IsSet() { // if the ts_omit_tap tag is used return nil, "", errors.New("tap is not supported in this build") } f := strings.Split(tunName, ":") @@ -42,7 +43,7 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { default: return nil, "", errors.New("bogus tap argument") } - dev, err = createTAP(logf, tapName, bridgeName) + dev, err = CreateTAP.Get()(logf, tapName, bridgeName) } else { dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index e4ff36b49..2d56f3768 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -53,7 +53,8 @@ const PacketStartOffset = device.MessageTransportHeaderSize // of a packet that can be injected into a tstun.Wrapper. const MaxPacketSize = device.MaxContentSize -const tapDebug = false // for super verbose TAP debugging +// TAPDebug is whether super verbose TAP debugging is enabled. +const TAPDebug = false var ( // ErrClosed is returned when attempting an operation on a closed Wrapper. @@ -459,7 +460,7 @@ func (t *Wrapper) pollVector() { return } n, err = reader(t.vectorBuffer[:], sizes, readOffset) - if t.isTAP && tapDebug { + if t.isTAP && TAPDebug { s := fmt.Sprintf("% x", t.vectorBuffer[0][:]) for strings.HasSuffix(s, " 00") { s = strings.TrimSuffix(s, " 00") @@ -792,7 +793,9 @@ func (pc *peerConfigTable) outboundPacketIsJailed(p *packet.Parsed) bool { return c.jailed } -type setIPer interface { +// SetIPer is the interface expected to be implemented by the TAP implementation +// of tun.Device. +type SetIPer interface { // SetIP sets the IP addresses of the TAP device. SetIP(ipV4, ipV6 netip.Addr) error } @@ -800,7 +803,7 @@ type setIPer interface { // SetWGConfig is called when a new NetworkMap is received. func (t *Wrapper) SetWGConfig(wcfg *wgcfg.Config) { if t.isTAP { - if sip, ok := t.tdev.(setIPer); ok { + if sip, ok := t.tdev.(SetIPer); ok { sip.SetIP(findV4(wcfg.Addresses), findV6(wcfg.Addresses)) } } From 413fb5b93311972e3a8d724bb696607ef3afe6f2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 23 Jan 2025 12:45:06 -0800 Subject: [PATCH 0383/1708] control/controlclient: delete unreferenced mapSession UserProfiles This was a slow memory leak on busy tailnets with lots of tagged ephemeral nodes. Updates tailscale/corp#26058 Change-Id: I298e7d438e3ffbb3cde795640e344671d244c632 Signed-off-by: Brad Fitzpatrick --- control/controlclient/map.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 13b11d6df..1a54fc543 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -195,6 +195,10 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t ms.updateStateFromResponse(resp) + // Occasionally clean up old userprofile if it grows too much + // from e.g. ephemeral tagged nodes. + ms.cleanLastUserProfile() + if ms.tryHandleIncrementally(resp) { ms.occasionallyPrintSummary(ms.lastNetmapSummary) return nil @@ -292,7 +296,6 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { for _, up := range resp.UserProfiles { ms.lastUserProfile[up.ID] = up } - // TODO(bradfitz): clean up old user profiles? maybe not worth it. if dm := resp.DERPMap; dm != nil { ms.vlogf("netmap: new map contains DERP map") @@ -532,6 +535,32 @@ func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserI } } +// cleanLastUserProfile deletes any entries from lastUserProfile +// that are not referenced by any peer or the self node. +// +// This is expensive enough that we don't do this on every message +// from the server, but only when it's grown enough to matter. +func (ms *mapSession) cleanLastUserProfile() { + if len(ms.lastUserProfile) < len(ms.peers)*2 { + // Hasn't grown enough to be worth cleaning. + return + } + + keep := set.Set[tailcfg.UserID]{} + if node := ms.lastNode; node.Valid() { + keep.Add(node.User()) + } + for _, n := range ms.peers { + keep.Add(n.User()) + keep.Add(n.Sharer()) + } + for userID := range ms.lastUserProfile { + if !keep.Contains(userID) { + delete(ms.lastUserProfile, userID) + } + } +} + var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER") // patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch From f0db47338e61dbf803cbfe3beba936282fa04c2a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 16 Jan 2025 15:48:07 -0600 Subject: [PATCH 0384/1708] cmd/tailscaled,util/syspolicy/source,util/winutil/gp: disallow acquiring the GP lock during service startup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In v1.78, we started acquiring the GP lock when reading policy settings. This led to a deadlock during Tailscale installation via Group Policy Software Installation because the GP engine holds the write lock for the duration of policy processing, which in turn waits for the installation to complete, which in turn waits for the service to enter the running state. In this PR, we prevent the acquisition of GP locks (aka EnterCriticalPolicySection) during service startup and update the Windows Registry-based util/syspolicy/source.PlatformPolicyStore to handle this failure gracefully. The GP lock is somewhat optional; it’s safe to read policy settings without it, but acquiring the lock is recommended when reading multiple values to prevent the Group Policy engine from modifying settings mid-read and to avoid inconsistent results. Fixes #14416 Signed-off-by: Nick Khyl --- cmd/tailscaled/tailscaled_windows.go | 25 +++++- .../tailscaled_deps_test_windows.go | 1 + util/syspolicy/source/policy_store_windows.go | 82 ++++++++++++++++++- util/winutil/gp/policylock_windows.go | 38 ++++++++- 4 files changed, 138 insertions(+), 8 deletions(-) diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 786c5d833..7208e03da 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -55,6 +55,7 @@ import ( "tailscale.com/util/osdiag" "tailscale.com/util/syspolicy" "tailscale.com/util/winutil" + "tailscale.com/util/winutil/gp" "tailscale.com/version" "tailscale.com/wf" ) @@ -70,6 +71,22 @@ func init() { } } +// permitPolicyLocks is a function to be called to lift the restriction on acquiring +// [gp.PolicyLock]s once the service is running. +// It is safe to be called multiple times. +var permitPolicyLocks = func() {} + +func init() { + if isWindowsService() { + // We prevent [gp.PolicyLock]s from being acquired until the service enters the running state. + // Otherwise, if tailscaled starts due to a GPSI policy installing Tailscale, it may deadlock + // while waiting for the write counterpart of the GP lock to be released by Group Policy, + // which is itself waiting for the installation to complete and tailscaled to start. + // See tailscale/tailscale#14416 for more information. + permitPolicyLocks = gp.RestrictPolicyLocks() + } +} + const serviceName = "Tailscale" // Application-defined command codes between 128 and 255 @@ -109,13 +126,13 @@ func tstunNewWithWindowsRetries(logf logger.Logf, tunName string) (_ tun.Device, } } -func isWindowsService() bool { +var isWindowsService = sync.OnceValue(func() bool { v, err := svc.IsWindowsService() if err != nil { log.Fatalf("svc.IsWindowsService failed: %v", err) } return v -} +}) // syslogf is a logger function that writes to the Windows event log (ie, the // one that you see in the Windows Event Viewer). tailscaled may optionally @@ -180,6 +197,10 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch changes <- svc.Status{State: svc.Running, Accepts: svcAccepts} syslogf("Service running") + // It is safe to allow GP locks to be acquired now that the service + // is running. + permitPolicyLocks() + for { select { case <-doneCh: diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 5eda22327..b0d1c8968 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -60,6 +60,7 @@ import ( _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/winutil" + _ "tailscale.com/util/winutil/gp" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wf" diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go index 86e2254e0..621701e84 100644 --- a/util/syspolicy/source/policy_store_windows.go +++ b/util/syspolicy/source/policy_store_windows.go @@ -12,6 +12,7 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" "tailscale.com/util/set" + "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil/gp" ) @@ -29,6 +30,18 @@ var ( _ Expirable = (*PlatformPolicyStore)(nil) ) +// lockableCloser is a [Lockable] that can also be closed. +// It is implemented by [gp.PolicyLock] and [optionalPolicyLock]. +type lockableCloser interface { + Lockable + Close() error +} + +var ( + _ lockableCloser = (*gp.PolicyLock)(nil) + _ lockableCloser = (*optionalPolicyLock)(nil) +) + // PlatformPolicyStore implements [Store] by providing read access to // Registry-based Tailscale policies, such as those configured via Group Policy or MDM. // For better performance and consistency, it is recommended to lock it when @@ -55,7 +68,7 @@ type PlatformPolicyStore struct { // they are being read. // // When both policyLock and mu need to be taken, mu must be taken before policyLock. - policyLock *gp.PolicyLock + policyLock lockableCloser mu sync.Mutex tsKeys []registry.Key // or nil if the [PlatformPolicyStore] hasn't been locked. @@ -108,7 +121,7 @@ func newPlatformPolicyStore(scope gp.Scope, softwareKey registry.Key, policyLock scope: scope, softwareKey: softwareKey, done: make(chan struct{}), - policyLock: policyLock, + policyLock: &optionalPolicyLock{PolicyLock: policyLock}, } } @@ -448,3 +461,68 @@ func tailscaleKeyNamesFor(scope gp.Scope) []string { panic("unreachable") } } + +type gpLockState int + +const ( + gpUnlocked = gpLockState(iota) + gpLocked + gpLockRestricted // the lock could not be acquired due to a restriction in place +) + +// optionalPolicyLock is a wrapper around [gp.PolicyLock] that locks +// and unlocks the underlying [gp.PolicyLock]. +// +// If the [gp.PolicyLock.Lock] returns [gp.ErrLockRestricted], the error is ignored, +// and calling [optionalPolicyLock.Unlock] is a no-op. +// +// The underlying GP lock is kinda optional: it is safe to read policy settings +// from the Registry without acquiring it, but it is recommended to lock it anyway +// when reading multiple policy settings to avoid potentially inconsistent results. +// +// It is not safe for concurrent use. +type optionalPolicyLock struct { + *gp.PolicyLock + state gpLockState +} + +// Lock acquires the underlying [gp.PolicyLock], returning an error on failure. +// If the lock cannot be acquired due to a restriction in place +// (e.g., attempting to acquire a lock while the service is starting), +// the lock is considered to be held, the method returns nil, and a subsequent +// call to [Unlock] is a no-op. +// It is a runtime error to call Lock when the lock is already held. +func (o *optionalPolicyLock) Lock() error { + if o.state != gpUnlocked { + panic("already locked") + } + switch err := o.PolicyLock.Lock(); err { + case nil: + o.state = gpLocked + return nil + case gp.ErrLockRestricted: + loggerx.Errorf("GP lock not acquired: %v", err) + o.state = gpLockRestricted + return nil + default: + return err + } +} + +// Unlock releases the underlying [gp.PolicyLock], if it was previously acquired. +// It is a runtime error to call Unlock when the lock is not held. +func (o *optionalPolicyLock) Unlock() { + switch o.state { + case gpLocked: + o.PolicyLock.Unlock() + case gpLockRestricted: + // The GP lock wasn't acquired due to a restriction in place + // when [optionalPolicyLock.Lock] was called. Unlock is a no-op. + case gpUnlocked: + panic("not locked") + default: + panic("unreachable") + } + + o.state = gpUnlocked +} diff --git a/util/winutil/gp/policylock_windows.go b/util/winutil/gp/policylock_windows.go index 95453aa16..69c5ff016 100644 --- a/util/winutil/gp/policylock_windows.go +++ b/util/winutil/gp/policylock_windows.go @@ -48,10 +48,35 @@ type policyLockResult struct { } var ( - // ErrInvalidLockState is returned by (*PolicyLock).Lock if the lock has a zero value or has already been closed. + // ErrInvalidLockState is returned by [PolicyLock.Lock] if the lock has a zero value or has already been closed. ErrInvalidLockState = errors.New("the lock has not been created or has already been closed") + // ErrLockRestricted is returned by [PolicyLock.Lock] if the lock cannot be acquired due to a restriction in place, + // such as when [RestrictPolicyLocks] has been called. + ErrLockRestricted = errors.New("the lock cannot be acquired due to a restriction in place") ) +var policyLockRestricted atomic.Int32 + +// RestrictPolicyLocks forces all [PolicyLock.Lock] calls to return [ErrLockRestricted] +// until the returned function is called to remove the restriction. +// +// It is safe to call the returned function multiple times, but the restriction will only +// be removed once. If [RestrictPolicyLocks] is called multiple times, each call must be +// matched by a corresponding call to the returned function to fully remove the restrictions. +// +// It is primarily used to prevent certain deadlocks, such as when tailscaled attempts to acquire +// a policy lock during startup. If the service starts due to Tailscale being installed by GPSI, +// the write lock will be held by the Group Policy service throughout the installation, +// preventing tailscaled from acquiring the read lock. Since Group Policy waits for the installation +// to complete, and therefore for tailscaled to start, before releasing the write lock, this scenario +// would result in a deadlock. See tailscale/tailscale#14416 for more information. +func RestrictPolicyLocks() (removeRestriction func()) { + policyLockRestricted.Add(1) + return sync.OnceFunc(func() { + policyLockRestricted.Add(-1) + }) +} + // NewMachinePolicyLock creates a PolicyLock that facilitates pausing the // application of computer policy. To avoid deadlocks when acquiring both // machine and user locks, acquire the user lock before the machine lock. @@ -103,13 +128,18 @@ func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) { } // Lock locks l. -// It returns ErrNotInitialized if l has a zero value or has already been closed, -// or an Errno if the underlying Group Policy lock cannot be acquired. +// It returns [ErrInvalidLockState] if l has a zero value or has already been closed, +// [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place, +// or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired. // -// As a special case, it fails with windows.ERROR_ACCESS_DENIED +// As a special case, it fails with [windows.ERROR_ACCESS_DENIED] // if l is a user policy lock, and the corresponding user is not logged in // interactively at the time of the call. func (l *PolicyLock) Lock() error { + if policyLockRestricted.Load() > 0 { + return ErrLockRestricted + } + l.mu.Lock() defer l.mu.Unlock() if l.lockCnt.Add(2)&1 == 0 { From 61bea750928b291d5e7bca4ea87d64d503d8a7ac Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 23 Jan 2025 18:40:17 -0800 Subject: [PATCH 0385/1708] cmd/tailscale: fix, test some recent doc inconsistencies 3dabea0fc2c added some docs with inconsistent usage docs. This fixes them, and adds a test. It also adds some other tests and fixes other verb tense inconsistencies. Updates tailscale/corp#25278 Change-Id: I94c2a8940791bddd7c35c1c3d5fb791a317370c2 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli_test.go | 43 ++++++++++++++++++++++++++++ cmd/tailscale/cli/configure_apple.go | 12 ++++---- cmd/tailscale/cli/debug.go | 8 +++--- cmd/tailscale/cli/dns.go | 2 +- cmd/tailscale/cli/exitnode.go | 2 +- cmd/tailscale/cli/metrics.go | 4 +-- cmd/tailscale/cli/network-lock.go | 19 ++++++------ cmd/tailscale/cli/switch.go | 2 +- cmd/tailscale/cli/syspolicy.go | 4 +-- 9 files changed, 68 insertions(+), 28 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index dccb69876..6f43814e8 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -17,6 +17,7 @@ import ( qt "github.com/frankban/quicktest" "github.com/google/go-cmp/cmp" + "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" "tailscale.com/health/healthmsg" "tailscale.com/ipn" @@ -1525,3 +1526,45 @@ func TestHelpAlias(t *testing.T) { t.Fatalf("Run: %v", err) } } + +func TestDocs(t *testing.T) { + root := newRootCmd() + check := func(t *testing.T, c *ffcli.Command) { + shortVerb, _, ok := strings.Cut(c.ShortHelp, " ") + if !ok || shortVerb == "" { + t.Errorf("couldn't find verb+space in ShortHelp") + } else { + if strings.HasSuffix(shortVerb, ".") { + t.Errorf("ShortHelp shouldn't end in period; got %q", c.ShortHelp) + } + if b := shortVerb[0]; b >= 'a' && b <= 'z' { + t.Errorf("ShortHelp should start with upper-case letter; got %q", c.ShortHelp) + } + if strings.HasSuffix(shortVerb, "s") && shortVerb != "Does" { + t.Errorf("verb %q ending in 's' is unexpected, from %q", shortVerb, c.ShortHelp) + } + } + + name := t.Name() + wantPfx := strings.ReplaceAll(strings.TrimPrefix(name, "TestDocs/"), "/", " ") + switch name { + case "TestDocs/tailscale/completion/bash", + "TestDocs/tailscale/completion/zsh": + wantPfx = "" // special-case exceptions + } + if !strings.HasPrefix(c.ShortUsage, wantPfx) { + t.Errorf("ShortUsage should start with %q; got %q", wantPfx, c.ShortUsage) + } + } + + var walk func(t *testing.T, c *ffcli.Command) + walk = func(t *testing.T, c *ffcli.Command) { + t.Run(c.Name, func(t *testing.T) { + check(t, c) + for _, sub := range c.Subcommands { + walk(t, sub) + } + }) + } + walk(t, root) +} diff --git a/cmd/tailscale/cli/configure_apple.go b/cmd/tailscale/cli/configure_apple.go index edd9ec1ab..c0d99b90a 100644 --- a/cmd/tailscale/cli/configure_apple.go +++ b/cmd/tailscale/cli/configure_apple.go @@ -27,28 +27,28 @@ func sysExtCmd() *ffcli.Command { return &ffcli.Command{ Name: "sysext", ShortUsage: "tailscale configure sysext [activate|deactivate|status]", - ShortHelp: "Manages the system extension for macOS (Standalone variant)", + ShortHelp: "Manage the system extension for macOS (Standalone variant)", LongHelp: "The sysext set of commands provides a way to activate, deactivate, or manage the state of the Tailscale system extension on macOS. " + "This is only relevant if you are running the Standalone variant of the Tailscale client for macOS. " + "To access more detailed information about system extensions installed on this Mac, run 'systemextensionsctl list'.", Subcommands: []*ffcli.Command{ { Name: "activate", - ShortUsage: "tailscale sysext activate", + ShortUsage: "tailscale configure sysext activate", ShortHelp: "Register the Tailscale system extension with macOS.", LongHelp: "This command registers the Tailscale system extension with macOS. To run Tailscale, you'll also need to install the VPN configuration separately (run `tailscale configure vpn-config install`). After running this command, you need to approve the extension in System Settings > Login Items and Extensions > Network Extensions.", Exec: requiresStandalone, }, { Name: "deactivate", - ShortUsage: "tailscale sysext deactivate", + ShortUsage: "tailscale configure sysext deactivate", ShortHelp: "Deactivate the Tailscale system extension on macOS", LongHelp: "This command deactivates the Tailscale system extension on macOS. To completely remove Tailscale, you'll also need to delete the VPN configuration separately (use `tailscale configure vpn-config uninstall`).", Exec: requiresStandalone, }, { Name: "status", - ShortUsage: "tailscale sysext status", + ShortUsage: "tailscale configure sysext status", ShortHelp: "Print the enablement status of the Tailscale system extension", LongHelp: "This command prints the enablement status of the Tailscale system extension. If the extension is not enabled, run `tailscale sysext activate` to enable it.", Exec: requiresStandalone, @@ -69,14 +69,14 @@ func vpnConfigCmd() *ffcli.Command { Subcommands: []*ffcli.Command{ { Name: "install", - ShortUsage: "tailscale mac-vpn install", + ShortUsage: "tailscale configure mac-vpn install", ShortHelp: "Write the Tailscale VPN configuration to the macOS settings", LongHelp: "This command writes the Tailscale VPN configuration to the macOS settings. This is the entry that appears in System Settings > VPN. If you are running the Standalone variant of the client, you'll also need to install the system extension separately (run `tailscale configure sysext activate`).", Exec: requiresGUI, }, { Name: "uninstall", - ShortUsage: "tailscale mac-vpn uninstall", + ShortUsage: "tailscale configure mac-vpn uninstall", ShortHelp: "Delete the Tailscale VPN configuration from the macOS settings", LongHelp: "This command removes the Tailscale VPN configuration from the macOS settings. This is the entry that appears in System Settings > VPN. If you are running the Standalone variant of the client, you'll also need to deactivate the system extension separately (run `tailscale configure sysext deactivate`).", Exec: requiresGUI, diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 04b343e76..f84dd25f0 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -289,7 +289,7 @@ var debugCmd = &ffcli.Command{ Name: "capture", ShortUsage: "tailscale debug capture", Exec: runCapture, - ShortHelp: "Streams pcaps for debugging", + ShortHelp: "Stream pcaps for debugging", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("capture") fs.StringVar(&captureArgs.outFile, "o", "", "path to stream the pcap (or - for stdout), leave empty to start wireshark") @@ -315,13 +315,13 @@ var debugCmd = &ffcli.Command{ Name: "peer-endpoint-changes", ShortUsage: "tailscale debug peer-endpoint-changes ", Exec: runPeerEndpointChanges, - ShortHelp: "Prints debug information about a peer's endpoint changes", + ShortHelp: "Print debug information about a peer's endpoint changes", }, { Name: "dial-types", ShortUsage: "tailscale debug dial-types ", Exec: runDebugDialTypes, - ShortHelp: "Prints debug information about connecting to a given host or IP", + ShortHelp: "Print debug information about connecting to a given host or IP", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("dial-types") fs.StringVar(&debugDialTypesArgs.network, "network", "tcp", `network type to dial ("tcp", "udp", etc.)`) @@ -342,7 +342,7 @@ var debugCmd = &ffcli.Command{ { Name: "go-buildinfo", ShortUsage: "tailscale debug go-buildinfo", - ShortHelp: "Prints Go's runtime/debug.BuildInfo", + ShortHelp: "Print Go's runtime/debug.BuildInfo", Exec: runGoBuildInfo, }, }, diff --git a/cmd/tailscale/cli/dns.go b/cmd/tailscale/cli/dns.go index 042ce1a94..402f0cedf 100644 --- a/cmd/tailscale/cli/dns.go +++ b/cmd/tailscale/cli/dns.go @@ -20,7 +20,7 @@ var dnsCmd = &ffcli.Command{ Name: "status", ShortUsage: "tailscale dns status [--all]", Exec: runDNSStatus, - ShortHelp: "Prints the current DNS status and configuration", + ShortHelp: "Print the current DNS status and configuration", LongHelp: dnsStatusLongHelp(), FlagSet: (func() *flag.FlagSet { fs := newFlagSet("status") diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index 941c6be8d..ad7a8ccee 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -41,7 +41,7 @@ func exitNodeCmd() *ffcli.Command { { Name: "suggest", ShortUsage: "tailscale exit-node suggest", - ShortHelp: "Suggests the best available exit node", + ShortHelp: "Suggest the best available exit node", Exec: runExitNodeSuggest, }}, (func() []*ffcli.Command { diff --git a/cmd/tailscale/cli/metrics.go b/cmd/tailscale/cli/metrics.go index d5fe9ad81..dbdedd5a6 100644 --- a/cmd/tailscale/cli/metrics.go +++ b/cmd/tailscale/cli/metrics.go @@ -33,13 +33,13 @@ https://tailscale.com/s/client-metrics Name: "print", ShortUsage: "tailscale metrics print", Exec: runMetricsPrint, - ShortHelp: "Prints current metric values in the Prometheus text exposition format", + ShortHelp: "Print current metric values in Prometheus text format", }, { Name: "write", ShortUsage: "tailscale metrics write ", Exec: runMetricsWrite, - ShortHelp: "Writes metric values to a file", + ShortHelp: "Write metric values to a file", LongHelp: strings.TrimSpace(` The 'tailscale metrics write' command writes metric values to a text file provided as its diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 45f989f10..c77767074 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -191,8 +191,7 @@ var nlStatusArgs struct { var nlStatusCmd = &ffcli.Command{ Name: "status", ShortUsage: "tailscale lock status", - ShortHelp: "Outputs the state of tailnet lock", - LongHelp: "Outputs the state of tailnet lock", + ShortHelp: "Output the state of tailnet lock", Exec: runNetworkLockStatus, FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock status") @@ -293,8 +292,7 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { var nlAddCmd = &ffcli.Command{ Name: "add", ShortUsage: "tailscale lock add ...", - ShortHelp: "Adds one or more trusted signing keys to tailnet lock", - LongHelp: "Adds one or more trusted signing keys to tailnet lock", + ShortHelp: "Add one or more trusted signing keys to tailnet lock", Exec: func(ctx context.Context, args []string) error { return runNetworkLockModify(ctx, args, nil) }, @@ -307,8 +305,7 @@ var nlRemoveArgs struct { var nlRemoveCmd = &ffcli.Command{ Name: "remove", ShortUsage: "tailscale lock remove [--re-sign=false] ...", - ShortHelp: "Removes one or more trusted signing keys from tailnet lock", - LongHelp: "Removes one or more trusted signing keys from tailnet lock", + ShortHelp: "Remove one or more trusted signing keys from tailnet lock", Exec: runNetworkLockRemove, FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock remove") @@ -448,7 +445,7 @@ func runNetworkLockModify(ctx context.Context, addArgs, removeArgs []string) err var nlSignCmd = &ffcli.Command{ Name: "sign", ShortUsage: "tailscale lock sign []\ntailscale lock sign ", - ShortHelp: "Signs a node or pre-approved auth key", + ShortHelp: "Sign a node or pre-approved auth key", LongHelp: `Either: - signs a node key and transmits the signature to the coordination server, or @@ -510,7 +507,7 @@ func runNetworkLockSign(ctx context.Context, args []string) error { var nlDisableCmd = &ffcli.Command{ Name: "disable", ShortUsage: "tailscale lock disable ", - ShortHelp: "Consumes a disablement secret to shut down tailnet lock for the tailnet", + ShortHelp: "Consume a disablement secret to shut down tailnet lock for the tailnet", LongHelp: strings.TrimSpace(` The 'tailscale lock disable' command uses the specified disablement @@ -539,7 +536,7 @@ func runNetworkLockDisable(ctx context.Context, args []string) error { var nlLocalDisableCmd = &ffcli.Command{ Name: "local-disable", ShortUsage: "tailscale lock local-disable", - ShortHelp: "Disables tailnet lock for this node only", + ShortHelp: "Disable tailnet lock for this node only", LongHelp: strings.TrimSpace(` The 'tailscale lock local-disable' command disables tailnet lock for only @@ -561,8 +558,8 @@ func runNetworkLockLocalDisable(ctx context.Context, args []string) error { var nlDisablementKDFCmd = &ffcli.Command{ Name: "disablement-kdf", ShortUsage: "tailscale lock disablement-kdf ", - ShortHelp: "Computes a disablement value from a disablement secret (advanced users only)", - LongHelp: "Computes a disablement value from a disablement secret (advanced users only)", + ShortHelp: "Compute a disablement value from a disablement secret (advanced users only)", + LongHelp: "Compute a disablement value from a disablement secret (advanced users only)", Exec: runNetworkLockDisablementKDF, } diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index 731492daa..af8b51326 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -20,7 +20,7 @@ import ( var switchCmd = &ffcli.Command{ Name: "switch", ShortUsage: "tailscale switch ", - ShortHelp: "Switches to a different Tailscale account", + ShortHelp: "Switch to a different Tailscale account", LongHelp: `"tailscale switch" switches between logged in accounts. You can use the ID that's returned from 'tailnet switch -list' to pick which profile you want to switch to. Alternatively, you diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go index 0e903db39..a71952a9f 100644 --- a/cmd/tailscale/cli/syspolicy.go +++ b/cmd/tailscale/cli/syspolicy.go @@ -31,7 +31,7 @@ var syspolicyCmd = &ffcli.Command{ Name: "list", ShortUsage: "tailscale syspolicy list", Exec: runSysPolicyList, - ShortHelp: "Prints effective policy settings", + ShortHelp: "Print effective policy settings", LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("syspolicy list") @@ -43,7 +43,7 @@ var syspolicyCmd = &ffcli.Command{ Name: "reload", ShortUsage: "tailscale syspolicy reload", Exec: runSysPolicyReload, - ShortHelp: "Forces a reload of policy settings, even if no changes are detected, and prints the result", + ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("syspolicy reload") From 3a39f08735abd5d9e757f8ac2dd7b0e8eb359c03 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 20 Dec 2024 09:09:53 +0100 Subject: [PATCH 0386/1708] util/usermetric: add more drop labels Updates #14280 Signed-off-by: Kristoffer Dalby --- util/usermetric/metrics.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index 7f85989ff..0c5511759 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -28,6 +28,19 @@ const ( // ReasonACL means that the packet was not permitted by ACL. ReasonACL DropReason = "acl" + // ReasonMulticast means that the packet was dropped because it was a multicast packet. + ReasonMulticast DropReason = "multicast" + + // ReasonLinkLocalUnicast means that the packet was dropped because it was a link-local unicast packet. + ReasonLinkLocalUnicast DropReason = "link_local_unicast" + + // ReasonTooShort means that the packet was dropped because it was a bad packet, + // this could be due to a short packet. + ReasonTooShort DropReason = "too_short" + + // ReasonFragment means that the packet was dropped because it was an IP fragment. + ReasonFragment DropReason = "fragment" + // ReasonError means that the packet was dropped because of an error. ReasonError DropReason = "error" ) From 5756bc17049fbfa65c531eff08a8db5aea66b14a Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 20 Dec 2024 09:09:06 +0100 Subject: [PATCH 0387/1708] wgengine/filter: return drop reason for metrics Updates #14280 Signed-off-by: Kristoffer Dalby --- wgengine/filter/filter.go | 25 +++++++++++++------------ wgengine/filter/filter_test.go | 32 +++++++++++++++++--------------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go index 9e5d8a37f..6269b08eb 100644 --- a/wgengine/filter/filter.go +++ b/wgengine/filter/filter.go @@ -24,6 +24,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/mak" "tailscale.com/util/slicesx" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter/filtertype" ) @@ -410,7 +411,7 @@ func (f *Filter) ShieldsUp() bool { return f.shieldsUp } // Tailscale peer. func (f *Filter) RunIn(q *packet.Parsed, rf RunFlags) Response { dir := in - r := f.pre(q, rf, dir) + r, _ := f.pre(q, rf, dir) if r == Accept || r == Drop { // already logged return r @@ -431,16 +432,16 @@ func (f *Filter) RunIn(q *packet.Parsed, rf RunFlags) Response { // RunOut determines whether this node is allowed to send q to a // Tailscale peer. -func (f *Filter) RunOut(q *packet.Parsed, rf RunFlags) Response { +func (f *Filter) RunOut(q *packet.Parsed, rf RunFlags) (Response, usermetric.DropReason) { dir := out - r := f.pre(q, rf, dir) + r, reason := f.pre(q, rf, dir) if r == Accept || r == Drop { // already logged - return r + return r, reason } r, why := f.runOut(q) f.logRateLimit(rf, q, dir, r, why) - return r + return r, "" } var unknownProtoStringCache sync.Map // ipproto.Proto -> string @@ -610,33 +611,33 @@ var gcpDNSAddr = netaddr.IPv4(169, 254, 169, 254) // pre runs the direction-agnostic filter logic. dir is only used for // logging. -func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) Response { +func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, usermetric.DropReason) { if len(q.Buffer()) == 0 { // wireguard keepalive packet, always permit. - return Accept + return Accept, "" } if len(q.Buffer()) < 20 { f.logRateLimit(rf, q, dir, Drop, "too short") - return Drop + return Drop, usermetric.ReasonTooShort } if q.Dst.Addr().IsMulticast() { f.logRateLimit(rf, q, dir, Drop, "multicast") - return Drop + return Drop, usermetric.ReasonMulticast } if q.Dst.Addr().IsLinkLocalUnicast() && q.Dst.Addr() != gcpDNSAddr { f.logRateLimit(rf, q, dir, Drop, "link-local-unicast") - return Drop + return Drop, usermetric.ReasonLinkLocalUnicast } if q.IPProto == ipproto.Fragment { // Fragments after the first always need to be passed through. // Very small fragments are considered Junk by Parsed. f.logRateLimit(rf, q, dir, Accept, "fragment") - return Accept + return Accept, "" } - return noVerdict + return noVerdict, "" } // loggingAllowed reports whether p can appear in logs at all. diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index e7f71e6a4..68f206778 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -30,6 +30,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/must" "tailscale.com/util/slicesx" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter/filtertype" ) @@ -211,7 +212,7 @@ func TestUDPState(t *testing.T) { t.Fatalf("incoming initial packet not dropped, got=%v: %v", got, a4) } // We talk to that peer - if got := acl.RunOut(&b4, flags); got != Accept { + if got, _ := acl.RunOut(&b4, flags); got != Accept { t.Fatalf("outbound packet didn't egress, got=%v: %v", got, b4) } // Now, the same packet as before is allowed back. @@ -227,7 +228,7 @@ func TestUDPState(t *testing.T) { t.Fatalf("incoming initial packet not dropped: %v", a4) } // We talk to that peer - if got := acl.RunOut(&b6, flags); got != Accept { + if got, _ := acl.RunOut(&b6, flags); got != Accept { t.Fatalf("outbound packet didn't egress: %v", b4) } // Now, the same packet as before is allowed back. @@ -382,25 +383,26 @@ func BenchmarkFilter(b *testing.B) { func TestPreFilter(t *testing.T) { packets := []struct { - desc string - want Response - b []byte + desc string + want Response + wantReason usermetric.DropReason + b []byte }{ - {"empty", Accept, []byte{}}, - {"short", Drop, []byte("short")}, - {"junk", Drop, raw4default(ipproto.Unknown, 10)}, - {"fragment", Accept, raw4default(ipproto.Fragment, 40)}, - {"tcp", noVerdict, raw4default(ipproto.TCP, 0)}, - {"udp", noVerdict, raw4default(ipproto.UDP, 0)}, - {"icmp", noVerdict, raw4default(ipproto.ICMPv4, 0)}, + {"empty", Accept, "", []byte{}}, + {"short", Drop, usermetric.ReasonTooShort, []byte("short")}, + {"junk", Drop, "", raw4default(ipproto.Unknown, 10)}, + {"fragment", Accept, "", raw4default(ipproto.Fragment, 40)}, + {"tcp", noVerdict, "", raw4default(ipproto.TCP, 0)}, + {"udp", noVerdict, "", raw4default(ipproto.UDP, 0)}, + {"icmp", noVerdict, "", raw4default(ipproto.ICMPv4, 0)}, } f := NewAllowNone(t.Logf, &netipx.IPSet{}) for _, testPacket := range packets { p := &packet.Parsed{} p.Decode(testPacket.b) - got := f.pre(p, LogDrops|LogAccepts, in) - if got != testPacket.want { - t.Errorf("%q got=%v want=%v packet:\n%s", testPacket.desc, got, testPacket.want, packet.Hexdump(testPacket.b)) + got, gotReason := f.pre(p, LogDrops|LogAccepts, in) + if got != testPacket.want || gotReason != testPacket.wantReason { + t.Errorf("%q got=%v want=%v gotReason=%s wantReason=%s packet:\n%s", testPacket.desc, got, testPacket.want, gotReason, testPacket.wantReason, packet.Hexdump(testPacket.b)) } } } From f39ee8e5200a3c65491f024b968e5dddee97d872 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 20 Dec 2024 09:12:37 +0100 Subject: [PATCH 0388/1708] net/tstun: add back outgoing drop metric Using new labels returned from the filter Updates #14280 Signed-off-by: Kristoffer Dalby --- net/tstun/wrap.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 2d56f3768..b26239632 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -877,12 +877,13 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf return filter.Drop, gro } - if filt.RunOut(p, t.filterFlags) != filter.Accept { + if resp, reason := filt.RunOut(p, t.filterFlags); resp != filter.Accept { metricPacketOutDropFilter.Add(1) - // TODO(#14280): increment a t.metrics.outboundDroppedPacketsTotal here - // once we figure out & document what labels to use for multicast, - // link-local-unicast, IP fragments, etc. But they're not - // usermetric.ReasonACL. + if reason != "" { + t.metrics.outboundDroppedPacketsTotal.Add(usermetric.DropLabels{ + Reason: reason, + }, 1) + } return filter.Drop, gro } From f0b63d0eecd01dc2562d14b7eb25710c3d351218 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Wed, 8 Jan 2025 10:53:07 +0100 Subject: [PATCH 0389/1708] wgengine/filter: add check for unknown proto Updates #14280 Signed-off-by: Kristoffer Dalby --- util/usermetric/metrics.go | 3 +++ wgengine/filter/filter.go | 5 +++++ wgengine/filter/filter_test.go | 3 ++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index 0c5511759..044b4d65f 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -41,6 +41,9 @@ const ( // ReasonFragment means that the packet was dropped because it was an IP fragment. ReasonFragment DropReason = "fragment" + // ReasonUnknownProtocol means that the packet was dropped because it was an unknown protocol. + ReasonUnknownProtocol DropReason = "unknown_protocol" + // ReasonError means that the packet was dropped because of an error. ReasonError DropReason = "error" ) diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go index 6269b08eb..987fcee01 100644 --- a/wgengine/filter/filter.go +++ b/wgengine/filter/filter.go @@ -621,6 +621,11 @@ func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, us return Drop, usermetric.ReasonTooShort } + if q.IPProto == ipproto.Unknown { + f.logRateLimit(rf, q, dir, Drop, "unknown proto") + return Drop, usermetric.ReasonUnknownProtocol + } + if q.Dst.Addr().IsMulticast() { f.logRateLimit(rf, q, dir, Drop, "multicast") return Drop, usermetric.ReasonMulticast diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index 68f206778..ae39eeb08 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -390,7 +390,8 @@ func TestPreFilter(t *testing.T) { }{ {"empty", Accept, "", []byte{}}, {"short", Drop, usermetric.ReasonTooShort, []byte("short")}, - {"junk", Drop, "", raw4default(ipproto.Unknown, 10)}, + {"short-junk", Drop, usermetric.ReasonTooShort, raw4default(ipproto.Unknown, 10)}, + {"long-junk", Drop, usermetric.ReasonUnknownProtocol, raw4default(ipproto.Unknown, 21)}, {"fragment", Accept, "", raw4default(ipproto.Fragment, 40)}, {"tcp", noVerdict, "", raw4default(ipproto.TCP, 0)}, {"udp", noVerdict, "", raw4default(ipproto.UDP, 0)}, From 5e9056a35641d06bacc0e73864cba6d659869f9c Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Thu, 23 Jan 2025 09:08:54 -0600 Subject: [PATCH 0390/1708] derp: move Conn interface to derp.go This interface is used both by the DERP client as well as the server. Defining the interface in derp.go makes it clear that it is shared. Updates tailscale/corp#26045 Signed-off-by: Percy Wegmann --- derp/derp.go | 12 ++++++++++++ derp/derp_server.go | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/derp/derp.go b/derp/derp.go index 6a7b3b735..65acd4321 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "net" "time" ) @@ -254,3 +255,14 @@ func writeFrame(bw *bufio.Writer, t frameType, b []byte) error { } return bw.Flush() } + +// Conn is the subset of the underlying net.Conn the DERP Server needs. +// It is a defined type so that non-net connections can be used. +type Conn interface { + io.WriteCloser + LocalAddr() net.Addr + // The *Deadline methods follow the semantics of net.Conn. + SetDeadline(time.Time) error + SetReadDeadline(time.Time) error + SetWriteDeadline(time.Time) error +} diff --git a/derp/derp_server.go b/derp/derp_server.go index 983b5dc00..4b5cc2f78 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -23,7 +23,6 @@ import ( "math" "math/big" "math/rand/v2" - "net" "net/http" "net/netip" "os" @@ -341,17 +340,6 @@ type PacketForwarder interface { String() string } -// Conn is the subset of the underlying net.Conn the DERP Server needs. -// It is a defined type so that non-net connections can be used. -type Conn interface { - io.WriteCloser - LocalAddr() net.Addr - // The *Deadline methods follow the semantics of net.Conn. - SetDeadline(time.Time) error - SetReadDeadline(time.Time) error - SetWriteDeadline(time.Time) error -} - var packetsDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( "derp_packets_dropped", "counter", From 450bc9a6b8a70904aa58614999c5400eed6d273c Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Thu, 23 Jan 2025 14:32:22 -0600 Subject: [PATCH 0391/1708] cmd/derper,derp: make TCP write timeout configurable The timeout still defaults to 2 seconds, but can now be changed via command-line flag. Updates tailscale/corp#26045 Signed-off-by: Percy Wegmann --- cmd/derper/derper.go | 3 +++ derp/derp_server.go | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 46ff644b2..2c6ecd175 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -77,6 +77,8 @@ var ( tcpKeepAlive = flag.Duration("tcp-keepalive-time", 10*time.Minute, "TCP keepalive time") // tcpUserTimeout is intentionally short, so that hung connections are cleaned up promptly. DERPs should be nearby users. tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") + // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. + tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") ) var ( @@ -173,6 +175,7 @@ func main() { s.SetVerifyClient(*verifyClients) s.SetVerifyClientURL(*verifyClientURL) s.SetVerifyClientURLFailOpen(*verifyFailOpen) + s.SetTCPWriteTimeout(*tcpWriteTimeout) if *meshPSKFile != "" { b, err := os.ReadFile(*meshPSKFile) diff --git a/derp/derp_server.go b/derp/derp_server.go index 4b5cc2f78..0389eed64 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -84,7 +84,7 @@ func init() { const ( defaultPerClientSendQueueDepth = 32 // default packets buffered for sending - writeTimeout = 2 * time.Second + DefaultTCPWiteTimeout = 2 * time.Second privilegedWriteTimeout = 30 * time.Second // for clients with the mesh key ) @@ -201,6 +201,8 @@ type Server struct { // Sets the client send queue depth for the server. perClientSendQueueDepth int + tcpWriteTimeout time.Duration + clock tstime.Clock } @@ -377,6 +379,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { bufferedWriteFrames: metrics.NewHistogram([]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100}), keyOfAddr: map[netip.AddrPort]key.NodePublic{}, clock: tstime.StdClock{}, + tcpWriteTimeout: DefaultTCPWiteTimeout, } s.initMetacert() s.packetsRecvDisco = s.packetsRecvByKind.Get(string(packetKindDisco)) @@ -481,6 +484,13 @@ func (s *Server) SetVerifyClientURLFailOpen(v bool) { s.verifyClientsURLFailOpen = v } +// SetTCPWriteTimeout sets the timeout for writing to connected clients. +// This timeout does not apply to mesh connections. +// Defaults to 2 seconds. +func (s *Server) SetTCPWriteTimeout(d time.Duration) { + s.tcpWriteTimeout = d +} + // HasMeshKey reports whether the server is configured with a mesh key. func (s *Server) HasMeshKey() bool { return s.meshKey != "" } @@ -1805,7 +1815,7 @@ func (c *sclient) sendLoop(ctx context.Context) error { } func (c *sclient) setWriteDeadline() { - d := writeTimeout + d := c.s.tcpWriteTimeout if c.canMesh { // Trusted peers get more tolerance. // @@ -1817,7 +1827,10 @@ func (c *sclient) setWriteDeadline() { // of connected peers. d = privilegedWriteTimeout } - c.nc.SetWriteDeadline(time.Now().Add(d)) + // Ignore the error from setting the write deadline. In practice, + // setting the deadline will only fail if the connection is closed + // or closing, so the subsequent Write() will fail anyway. + _ = c.nc.SetWriteDeadline(time.Now().Add(d)) } // sendKeepAlive sends a keep-alive frame, without flushing. From 05afa31df3e3840549d946ff90392cf6d73ccf12 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 24 Jan 2025 12:51:01 +0100 Subject: [PATCH 0392/1708] util/clientmetric: use counter in aggcounter Fixes #14743 Signed-off-by: Kristoffer Dalby --- util/clientmetric/clientmetric.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 584a24f73..5c1116019 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -270,7 +270,7 @@ func (c *AggregateCounter) UnregisterAll() { // a sum of expvar variables registered with it. func NewAggregateCounter(name string) *AggregateCounter { c := &AggregateCounter{counters: set.Set[*expvar.Int]{}} - NewGaugeFunc(name, c.Value) + NewCounterFunc(name, c.Value) return c } From d69c70ee5b446f74721ce20fd3677b8c6e642cb5 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Thu, 23 Jan 2025 14:26:16 -0500 Subject: [PATCH 0393/1708] tailcfg: adjust ServiceName.Validate to use vizerror Updates #cleanup Change-Id: I163b3f762b9d45c2155afe1c0a36860606833a22 Signed-off-by: Adrian Dewhurst --- tailcfg/tailcfg.go | 7 ++++--- util/dnsname/dnsname.go | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index c921a0c7d..738c8a5dc 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -27,6 +27,7 @@ import ( "tailscale.com/types/tkatype" "tailscale.com/util/dnsname" "tailscale.com/util/slicesx" + "tailscale.com/util/vizerror" ) // CapabilityVersion represents the client's capability level. That @@ -891,14 +892,14 @@ type ServiceName string // Validate validates if the service name is formatted correctly. // We only allow valid DNS labels, since the expectation is that these will be -// used as parts of domain names. +// used as parts of domain names. All errors are [vizerror.Error]. func (sn ServiceName) Validate() error { bareName, ok := strings.CutPrefix(string(sn), "svc:") if !ok { - return errors.New("services must start with 'svc:'") + return vizerror.Errorf("%q is not a valid service name: must start with 'svc:'", sn) } if bareName == "" { - return errors.New("service names must not be empty") + return vizerror.Errorf("%q is not a valid service name: must not be empty after the 'svc:' prefix", sn) } return dnsname.ValidLabel(bareName) } diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index 131bdd14b..6404a9af1 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -94,7 +94,8 @@ func (f FQDN) Contains(other FQDN) bool { return strings.HasSuffix(other.WithTrailingDot(), cmp) } -// ValidLabel reports whether label is a valid DNS label. +// ValidLabel reports whether label is a valid DNS label. All errors are +// [vizerror.Error]. func ValidLabel(label string) error { if len(label) == 0 { return vizerror.New("empty DNS label") From 69bc164c621b8dc920b4208b389bd4a8f87c3d9f Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 24 Jan 2025 17:04:26 +0000 Subject: [PATCH 0394/1708] ipn/ipnlocal: include DNS SAN in cert CSR (#14764) The CN field is technically deprecated; set the requested name in a DNS SAN extension in addition to maximise compatibility with RFC 8555. Fixes #14762 Change-Id: If5d27f1e7abc519ec86489bf034ac98b2e613043 Signed-off-by: Tom Proctor --- ipn/ipnlocal/cert.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 0d92c7cf8..71ae8ac86 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -556,6 +556,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger } logf("requesting cert...") + traceACME(csr) der, _, err := ac.CreateOrderCert(ctx, order.FinalizeURL, csr, true) if err != nil { return nil, fmt.Errorf("CreateOrder: %v", err) @@ -578,10 +579,10 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger } // certRequest generates a CSR for the given common name cn and optional SANs. -func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) { +func certRequest(key crypto.Signer, name string, ext []pkix.Extension) ([]byte, error) { req := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: cn}, - DNSNames: san, + Subject: pkix.Name{CommonName: name}, + DNSNames: []string{name}, ExtraExtensions: ext, } return x509.CreateCertificateRequest(rand.Reader, req, key) From 716e4fcc97759308f79875ff1809da945df70574 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Fri, 24 Jan 2025 16:29:58 -0700 Subject: [PATCH 0395/1708] client/web: remove advanced options from web client login (#14770) Removing the advanced options collapsible from the web client login for now ahead of our next client release. Updates https://github.com/tailscale/tailscale/issues/14568 Signed-off-by: Mario Minardi --- .../web/src/components/views/login-view.tsx | 36 +------------------ 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/client/web/src/components/views/login-view.tsx b/client/web/src/components/views/login-view.tsx index b2868bb46..f8c15b16d 100644 --- a/client/web/src/components/views/login-view.tsx +++ b/client/web/src/components/views/login-view.tsx @@ -1,13 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -import React, { useState } from "react" +import React from "react" import { useAPI } from "src/api" import TailscaleIcon from "src/assets/icons/tailscale-icon.svg?react" import { NodeData } from "src/types" import Button from "src/ui/button" -import Collapsible from "src/ui/collapsible" -import Input from "src/ui/input" /** * LoginView is rendered when the client is not authenticated @@ -15,8 +13,6 @@ import Input from "src/ui/input" */ export default function LoginView({ data }: { data: NodeData }) { const api = useAPI() - const [controlURL, setControlURL] = useState("") - const [authKey, setAuthKey] = useState("") return (
@@ -88,8 +84,6 @@ export default function LoginView({ data }: { data: NodeData }) { action: "up", data: { Reauthenticate: true, - ControlURL: controlURL, - AuthKey: authKey, }, }) } @@ -98,34 +92,6 @@ export default function LoginView({ data }: { data: NodeData }) { > Log In - -

Auth Key

-

- Connect with a pre-authenticated key.{" "} - - Learn more → - -

- setAuthKey(e.target.value)} - placeholder="tskey-auth-XXX" - /> -

Server URL

-

Base URL of control server.

- setControlURL(e.target.value)} - placeholder="https://login.tailscale.com/" - /> -
)}
From cbf1a9abe188ae3c121b98aad00b3cc4439fe677 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Fri, 24 Jan 2025 17:04:12 -0700 Subject: [PATCH 0396/1708] go.{mod,sum}: update web-client-prebuilt (#14772) Manually update the `web-client-prebuilt` package as the GitHub action is failing for some reason. Updates https://github.com/tailscale/tailscale/issues/14568 Signed-off-by: Mario Minardi --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4265953a4..22193ee6e 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc - github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 + github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e diff --git a/go.sum b/go.sum index 2623cb6e9..20dbe7306 100644 --- a/go.sum +++ b/go.sum @@ -933,8 +933,8 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= From 1a7274fccb0617f6d0bc31a45d835b61a9d5c5b7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 24 Jan 2025 13:09:21 -0800 Subject: [PATCH 0397/1708] control/controlclient: skip SetControlClientStatus when queue has newer results later Updates #1909 Updates #12542 Updates tailscale/corp#26058 Change-Id: I3033d235ca49f9739fdf3deaf603eea4ec3e407e Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 71 ++++++++++++++++- control/controlclient/controlclient_test.go | 85 +++++++++++++++++++++ control/controlknobs/controlknobs.go | 8 ++ tailcfg/tailcfg.go | 5 ++ 4 files changed, 167 insertions(+), 2 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index edd0ae29c..a5397594e 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -21,6 +21,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/structs" + "tailscale.com/util/clientmetric" "tailscale.com/util/execqueue" ) @@ -131,6 +132,8 @@ type Auto struct { // the server. lastUpdateGen updateGen + lastStatus atomic.Pointer[Status] + paused bool // whether we should stop making HTTP requests unpauseWaiters []chan bool // chans that gets sent true (once) on wake, or false on Shutdown loggedIn bool // true if currently logged in @@ -596,21 +599,85 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM // not logged in. nm = nil } - new := Status{ + newSt := &Status{ URL: url, Persist: p, NetMap: nm, Err: err, state: state, } + c.lastStatus.Store(newSt) // Launch a new goroutine to avoid blocking the caller while the observer // does its thing, which may result in a call back into the client. + metricQueued.Add(1) c.observerQueue.Add(func() { - c.observer.SetControlClientStatus(c, new) + if canSkipStatus(newSt, c.lastStatus.Load()) { + metricSkippable.Add(1) + if !c.direct.controlKnobs.DisableSkipStatusQueue.Load() { + metricSkipped.Add(1) + return + } + } + c.observer.SetControlClientStatus(c, *newSt) + // Best effort stop retaining the memory now that + // we've sent it to the observer (LocalBackend). + // We CAS here because the caller goroutine is + // doing a Store which we want to want to win + // a race. This is only a memory optimization + // and is for correctness: + c.lastStatus.CompareAndSwap(newSt, nil) }) } +var ( + metricQueued = clientmetric.NewCounter("controlclient_auto_status_queued") + metricSkippable = clientmetric.NewCounter("controlclient_auto_status_queue_skippable") + metricSkipped = clientmetric.NewCounter("controlclient_auto_status_queue_skipped") +) + +// canSkipStatus reports whether we can skip sending s1, knowing +// that s2 is enqueued sometime in the future after s1. +// +// s1 must be non-nil. s2 may be nil. +func canSkipStatus(s1, s2 *Status) bool { + if s2 == nil { + // Nothing in the future. + return false + } + if s1 == s2 { + // If the last item in the queue is the same as s1, + // we can't skip it. + return false + } + if s1.Err != nil || s1.URL != "" { + // If s1 has an error or a URL, we shouldn't skip it, lest the error go + // away in s2 or in-between. We want to make sure all the subsystems see + // it. Plus there aren't many of these, so not worth skipping. + return false + } + if !s1.Persist.Equals(s2.Persist) || s1.state != s2.state { + // If s1 has a different Persist or state than s2, + // don't skip it. We only care about skipping the typical + // entries where the only difference is the NetMap. + return false + } + // If nothing above precludes it, and both s1 and s2 have NetMaps, then + // we can skip it, because s2's NetMap is a newer version and we can + // jump straight from whatever state we had before to s2's state, + // without passing through s1's state first. A NetMap is regrettably a + // full snapshot of the state, not an incremental delta. We're slowly + // moving towards passing around only deltas around internally at all + // layers, but this is explicitly the case where we didn't have a delta + // path for the message we received over the wire and had to resort + // to the legacy full NetMap path. And then we can get behind processing + // these full NetMap snapshots in LocalBackend/wgengine/magicsock/netstack + // and this path (when it returns true) lets us skip over useless work + // and not get behind in the queue. This matters in particular for tailnets + // that are both very large + very churny. + return s1.NetMap != nil && s2.NetMap != nil +} + func (c *Auto) Login(flags LoginFlags) { c.logf("client.Login(%v)", flags) diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index b37623451..6885b5851 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -4,8 +4,13 @@ package controlclient import ( + "io" "reflect" + "slices" "testing" + + "tailscale.com/types/netmap" + "tailscale.com/types/persist" ) func fieldsOf(t reflect.Type) (fields []string) { @@ -62,3 +67,83 @@ func TestStatusEqual(t *testing.T) { } } } + +// tests [canSkipStatus]. +func TestCanSkipStatus(t *testing.T) { + st := new(Status) + nm1 := &netmap.NetworkMap{} + nm2 := &netmap.NetworkMap{} + + tests := []struct { + name string + s1, s2 *Status + want bool + }{ + { + name: "nil-s2", + s1: st, + s2: nil, + want: false, + }, + { + name: "equal", + s1: st, + s2: st, + want: false, + }, + { + name: "s1-error", + s1: &Status{Err: io.EOF, NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-url", + s1: &Status{URL: "foo", NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-persist-diff", + s1: &Status{Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-state-diff", + s1: &Status{state: 123, NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-no-netmap1", + s1: &Status{NetMap: nil}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-no-netmap2", + s1: &Status{NetMap: nm1}, + s2: &Status{NetMap: nil}, + want: false, + }, + { + name: "skip", + s1: &Status{NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := canSkipStatus(tt.s1, tt.s2); got != tt.want { + t.Errorf("canSkipStatus = %v, want %v", got, tt.want) + } + }) + } + + want := []string{"Err", "URL", "NetMap", "Persist", "state"} + if f := fieldsOf(reflect.TypeFor[Status]()); !slices.Equal(f, want) { + t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want) + } +} diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index dd76a3abd..c7933be5a 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -103,6 +103,11 @@ type Knobs struct { // DisableCaptivePortalDetection is whether the node should not perform captive portal detection // automatically when the network state changes. DisableCaptivePortalDetection atomic.Bool + + // DisableSkipStatusQueue is whether the node should disable skipping + // of queued netmap.NetworkMap between the controlclient and LocalBackend. + // See tailscale/tailscale#14768. + DisableSkipStatusQueue atomic.Bool } // UpdateFromNodeAttributes updates k (if non-nil) based on the provided self @@ -132,6 +137,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT) disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting) disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) + disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue) ) if has(tailcfg.NodeAttrOneCGNATEnable) { @@ -159,6 +165,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) k.DisableCryptorouting.Store(disableCryptorouting) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) + k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) } // AsDebugJSON returns k as something that can be marshalled with json.Marshal @@ -187,5 +194,6 @@ func (k *Knobs) AsDebugJSON() map[string]any { "DisableLocalDNSOverrideViaNRPT": k.DisableLocalDNSOverrideViaNRPT.Load(), "DisableCryptorouting": k.DisableCryptorouting.Load(), "DisableCaptivePortalDetection": k.DisableCaptivePortalDetection.Load(), + "DisableSkipStatusQueue": k.DisableSkipStatusQueue.Load(), } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 738c8a5dc..c17cd5f45 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2470,6 +2470,11 @@ const ( // automatically when the network state changes. NodeAttrDisableCaptivePortalDetection NodeCapability = "disable-captive-portal-detection" + // NodeAttrDisableSkipStatusQueue is set when the node should disable skipping + // of queued netmap.NetworkMap between the controlclient and LocalBackend. + // See tailscale/tailscale#14768. + NodeAttrDisableSkipStatusQueue NodeCapability = "disable-skip-status-queue" + // NodeAttrSSHEnvironmentVariables enables logic for handling environment variables sent // via SendEnv in the SSH server and applying them to the SSH session. NodeAttrSSHEnvironmentVariables NodeCapability = "ssh-env-vars" From ca39c4e150366b0cdcb766a62c9c8bc3fb116083 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 23 Jan 2025 16:23:41 -0800 Subject: [PATCH 0398/1708] cmd/natc,wgengine/netstack: tune buffer size and segment lifetime in natc Some natc instances have been observed with excessive memory growth, dominant in gvisor buffers. It is likely that the connection buffers are sticking around for too long due to the default long segment time, and uptuned buffer size applied by default in wgengine/netstack. Apply configurations in natc specifically which are a better match for the natc use case, most notably a 5s maximum segment lifetime. Updates tailscale/corp#25169 Signed-off-by: James Tucker --- cmd/natc/natc.go | 31 +++++++++++++++++++++++++++++++ wgengine/netstack/netstack.go | 8 ++++++++ 2 files changed, 39 insertions(+) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index d94523c6e..b28f4a1d5 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -26,6 +26,8 @@ import ( "github.com/inetaf/tcpproxy" "github.com/peterbourgon/ff/v3" "golang.org/x/net/dns/dnsmessage" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "tailscale.com/client/tailscale" "tailscale.com/envknob" "tailscale.com/hostinfo" @@ -37,6 +39,7 @@ import ( "tailscale.com/tsweb" "tailscale.com/util/dnsname" "tailscale.com/util/mak" + "tailscale.com/wgengine/netstack" ) func main() { @@ -112,6 +115,7 @@ func main() { ts.Port = uint16(*wgPort) } defer ts.Close() + if *verboseTSNet { ts.Logf = log.Printf } @@ -129,6 +133,33 @@ func main() { log.Fatalf("debug serve: %v", http.Serve(dln, mux)) }() } + + if err := ts.Start(); err != nil { + log.Fatalf("ts.Start: %v", err) + } + // TODO(raggi): this is not a public interface or guarantee. + ns := ts.Sys().Netstack.Get().(*netstack.Impl) + tcpRXBufOpt := tcpip.TCPReceiveBufferSizeRangeOption{ + Min: tcp.MinBufferSize, + Default: tcp.DefaultReceiveBufferSize, + Max: tcp.MaxBufferSize, + } + if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRXBufOpt); err != nil { + log.Fatalf("could not set TCP RX buf size: %v", err) + } + tcpTXBufOpt := tcpip.TCPSendBufferSizeRangeOption{ + Min: tcp.MinBufferSize, + Default: tcp.DefaultSendBufferSize, + Max: tcp.MaxBufferSize, + } + if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpTXBufOpt); err != nil { + log.Fatalf("could not set TCP TX buf size: %v", err) + } + mslOpt := tcpip.TCPTimeWaitTimeoutOption(5 * time.Second) + if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &mslOpt); err != nil { + log.Fatalf("could not set TCP MSL: %v", err) + } + lc, err := ts.LocalClient() if err != nil { log.Fatalf("LocalClient() failed: %v", err) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 0b8c67b06..f0c4c5271 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -405,6 +405,14 @@ func (ns *Impl) Close() error { return nil } +// SetTransportProtocolOption forwards to the underlying +// [stack.Stack.SetTransportProtocolOption]. Callers are responsible for +// ensuring that the options are valid, compatible and appropriate for their use +// case. Compatibility may change at any version. +func (ns *Impl) SetTransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.SettableTransportProtocolOption) tcpip.Error { + return ns.ipstack.SetTransportProtocolOption(transport, option) +} + // A single process might have several netstacks running at the same time. // Exported clientmetric counters will have a sum of counters of all of them. var stacksForMetrics syncs.Map[*Impl, struct{}] From 2089f4b603e36501dd1a7497ab4de691b1560dd7 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Sat, 25 Jan 2025 00:29:00 +0000 Subject: [PATCH 0399/1708] ipn/ipnlocal: add debug envknob for ACME directory URL (#14771) Adds an envknob setting for changing the client's ACME directory URL. This allows testing cert issuing against LE's staging environment, as well as enabling local-only test environments, which is useful for avoiding the production rate limits in test and development scenarios. Fixes #14761 Change-Id: I191c840c0ca143a20e4fa54ea3b2f9b7cbfc889f Signed-off-by: Tom Proctor --- ipn/ipnlocal/cert.go | 5 +++-- ipn/ipnlocal/cert_test.go | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 71ae8ac86..3361fc70b 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -659,8 +659,9 @@ func acmeClient(cs certStore) (*acme.Client, error) { // LetsEncrypt), we should make sure that they support ARI extension (see // shouldStartDomainRenewalARI). return &acme.Client{ - Key: key, - UserAgent: "tailscaled/" + version.Long(), + Key: key, + UserAgent: "tailscaled/" + version.Long(), + DirectoryURL: envknob.String("TS_DEBUG_ACME_DIRECTORY_URL"), }, nil } diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index 3ae7870e3..21741ca95 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -199,3 +199,19 @@ func TestShouldStartDomainRenewal(t *testing.T) { }) } } + +func TestDebugACMEDirectoryURL(t *testing.T) { + for _, tc := range []string{"", "https://acme-staging-v02.api.letsencrypt.org/directory"} { + const setting = "TS_DEBUG_ACME_DIRECTORY_URL" + t.Run(tc, func(t *testing.T) { + t.Setenv(setting, tc) + ac, err := acmeClient(certStateStore{StateStore: new(mem.Store)}) + if err != nil { + t.Fatalf("acmeClient creation err: %v", err) + } + if ac.DirectoryURL != tc { + t.Fatalf("acmeClient.DirectoryURL = %q, want %q", ac.DirectoryURL, tc) + } + }) + } +} From 82e41ddc427aa6d41b875642788cb12f765ed40c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 23 Jan 2025 16:31:40 -0800 Subject: [PATCH 0400/1708] cmd/natc: expose netstack metrics in client metrics in natc Updates tailscale/corp#25169 Signed-off-by: James Tucker --- cmd/natc/natc.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index b28f4a1d5..069eabefd 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -10,6 +10,7 @@ import ( "context" "encoding/binary" "errors" + "expvar" "flag" "fmt" "log" @@ -159,6 +160,9 @@ func main() { if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &mslOpt); err != nil { log.Fatalf("could not set TCP MSL: %v", err) } + if *debugPort != 0 { + expvar.Publish("netstack", ns.ExpVar()) + } lc, err := ts.LocalClient() if err != nil { From 2c98c44d9a7c0b67aef7e72e7fed0766a7e7b1e6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 23 Jan 2025 13:58:40 -0800 Subject: [PATCH 0401/1708] control/controlclient: sanitize invalid DERPMap nil Region from control Fixes #14752 Change-Id: If364603eefb9ac6dc5ec6df84a0d5e16c94dda8d Signed-off-by: Brad Fitzpatrick --- control/controlclient/map.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 1a54fc543..f0a11bdf1 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -300,6 +300,15 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { if dm := resp.DERPMap; dm != nil { ms.vlogf("netmap: new map contains DERP map") + // Guard against the control server accidentally sending + // a nil region definition, which at least Headscale was + // observed to send. + for rid, r := range dm.Regions { + if r == nil { + delete(dm.Regions, rid) + } + } + // Zero-valued fields in a DERPMap mean that we're not changing // anything and are using the previous value(s). if ldm := ms.lastDERPMap; ldm != nil { From 68a66ee81b8e59de355a4b1a0688f28adf2c59b6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 23 Jan 2025 20:39:28 -0800 Subject: [PATCH 0402/1708] feature/capture: move packet capture to feature/*, out of iOS + CLI We had the debug packet capture code + Lua dissector in the CLI + the iOS app. Now we don't, with tests to lock it in. As a bonus, tailscale.com/net/packet and tailscale.com/net/flowtrack no longer appear in the CLI's binary either. A new build tag ts_omit_capture disables the packet capture code and was added to build_dist.sh's --extra-small mode. Updates #12614 Change-Id: I79b0628c0d59911bd4d510c732284d97b0160f10 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscale/cli/cli.go | 2 +- cmd/tailscale/cli/cli_test.go | 30 + cmd/tailscale/cli/debug-capture.go | 80 +++ cmd/tailscale/cli/debug.go | 638 ++++++++---------- cmd/tailscale/depaware.txt | 6 +- cmd/tailscaled/depaware.txt | 4 +- {wgengine => feature}/capture/capture.go | 74 +- feature/capture/dissector/dissector.go | 12 + .../capture/dissector}/ts-dissector.lua | 0 feature/condregister/maybe_capture.go | 8 + ipn/ipnlocal/local.go | 80 +-- ipn/localapi/localapi.go | 34 +- net/packet/capture.go | 75 ++ net/packet/packet.go | 8 - net/tstun/wrap.go | 15 +- net/tstun/wrap_test.go | 13 +- tstest/iosdeps/iosdeps_test.go | 1 + wgengine/magicsock/magicsock.go | 7 +- wgengine/userspace.go | 3 +- wgengine/watchdog.go | 4 +- wgengine/wgengine.go | 4 +- 23 files changed, 620 insertions(+), 484 deletions(-) create mode 100644 cmd/tailscale/cli/debug-capture.go rename {wgengine => feature}/capture/capture.go (79%) create mode 100644 feature/capture/dissector/dissector.go rename {wgengine/capture => feature/capture/dissector}/ts-dissector.lua (100%) create mode 100644 feature/condregister/maybe_capture.go create mode 100644 net/packet/capture.go diff --git a/build_dist.sh b/build_dist.sh index 9a29e5201..ccd4ac8b1 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -37,7 +37,7 @@ while [ "$#" -gt 1 ]; do --extra-small) shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture" ;; --box) shift diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 11a9201d4..fc2f8854a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -802,6 +802,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/tsnet L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister @@ -814,7 +815,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ - tailscale.com/ipn/localapi from tailscale.com/tsnet + tailscale.com/ipn/localapi from tailscale.com/tsnet+ tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store @@ -969,7 +970,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/version from tailscale.com/client/web+ tailscale.com/version/distro from tailscale.com/client/web+ tailscale.com/wgengine from tailscale.com/ipn/ipnlocal+ - tailscale.com/wgengine/capture from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index fd39b3b67..d80d0c02f 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -212,7 +212,7 @@ change in the future. exitNodeCmd(), updateCmd, whoisCmd, - debugCmd, + debugCmd(), driveCmd, idTokenCmd, advertiseCmd(), diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 6f43814e8..2d02b6b7a 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -25,10 +25,12 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" + "tailscale.com/util/set" "tailscale.com/version/distro" ) @@ -1568,3 +1570,31 @@ func TestDocs(t *testing.T) { } walk(t, root) } + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "arm64", + WantDeps: set.Of( + "tailscale.com/feature/capture/dissector", // want the Lua by default + ), + BadDeps: map[string]string{ + "tailscale.com/feature/capture": "don't link capture code", + "tailscale.com/net/packet": "why we passing packets in the CLI?", + "tailscale.com/net/flowtrack": "why we tracking flows in the CLI?", + }, + }.Check(t) +} + +func TestDepsNoCapture(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "arm64", + Tags: "ts_omit_capture", + BadDeps: map[string]string{ + "tailscale.com/feature/capture": "don't link capture code", + "tailscale.com/feature/capture/dissector": "don't like the Lua", + }, + }.Check(t) + +} diff --git a/cmd/tailscale/cli/debug-capture.go b/cmd/tailscale/cli/debug-capture.go new file mode 100644 index 000000000..a54066fa6 --- /dev/null +++ b/cmd/tailscale/cli/debug-capture.go @@ -0,0 +1,80 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_capture + +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "os/exec" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/feature/capture/dissector" +) + +func init() { + debugCaptureCmd = mkDebugCaptureCmd +} + +func mkDebugCaptureCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "capture", + ShortUsage: "tailscale debug capture", + Exec: runCapture, + ShortHelp: "Stream pcaps for debugging", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("capture") + fs.StringVar(&captureArgs.outFile, "o", "", "path to stream the pcap (or - for stdout), leave empty to start wireshark") + return fs + })(), + } +} + +var captureArgs struct { + outFile string +} + +func runCapture(ctx context.Context, args []string) error { + stream, err := localClient.StreamDebugCapture(ctx) + if err != nil { + return err + } + defer stream.Close() + + switch captureArgs.outFile { + case "-": + fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.") + _, err = io.Copy(os.Stdout, stream) + return err + case "": + lua, err := os.CreateTemp("", "ts-dissector") + if err != nil { + return err + } + defer os.Remove(lua.Name()) + io.WriteString(lua, dissector.Lua) + if err := lua.Close(); err != nil { + return err + } + + wireshark := exec.CommandContext(ctx, "wireshark", "-X", "lua_script:"+lua.Name(), "-k", "-i", "-") + wireshark.Stdin = stream + wireshark.Stdout = os.Stdout + wireshark.Stderr = os.Stderr + return wireshark.Run() + } + + f, err := os.OpenFile(captureArgs.outFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.") + _, err = io.Copy(f, stream) + return err +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index f84dd25f0..ce5edd8d3 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -20,7 +20,6 @@ import ( "net/netip" "net/url" "os" - "os/exec" "runtime" "runtime/debug" "strconv" @@ -45,307 +44,302 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/must" - "tailscale.com/wgengine/capture" ) -var debugCmd = &ffcli.Command{ - Name: "debug", - Exec: runDebug, - ShortUsage: "tailscale debug ", - ShortHelp: "Debug commands", - LongHelp: hidden + `"tailscale debug" contains misc debug facilities; it is not a stable interface.`, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("debug") - fs.StringVar(&debugArgs.file, "file", "", "get, delete:NAME, or NAME") - fs.StringVar(&debugArgs.cpuFile, "cpu-profile", "", "if non-empty, grab a CPU profile for --profile-seconds seconds and write it to this file; - for stdout") - fs.StringVar(&debugArgs.memFile, "mem-profile", "", "if non-empty, grab a memory profile and write it to this file; - for stdout") - fs.IntVar(&debugArgs.cpuSec, "profile-seconds", 15, "number of seconds to run a CPU profile for, when --cpu-profile is non-empty") - return fs - })(), - Subcommands: []*ffcli.Command{ - { - Name: "derp-map", - ShortUsage: "tailscale debug derp-map", - Exec: runDERPMap, - ShortHelp: "Print DERP map", - }, - { - Name: "component-logs", - ShortUsage: "tailscale debug component-logs [" + strings.Join(ipn.DebuggableComponents, "|") + "]", - Exec: runDebugComponentLogs, - ShortHelp: "Enable/disable debug logs for a component", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("component-logs") - fs.DurationVar(&debugComponentLogsArgs.forDur, "for", time.Hour, "how long to enable debug logs for; zero or negative means to disable") - return fs - })(), - }, - { - Name: "daemon-goroutines", - ShortUsage: "tailscale debug daemon-goroutines", - Exec: runDaemonGoroutines, - ShortHelp: "Print tailscaled's goroutines", - }, - { - Name: "daemon-logs", - ShortUsage: "tailscale debug daemon-logs", - Exec: runDaemonLogs, - ShortHelp: "Watch tailscaled's server logs", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("daemon-logs") - fs.IntVar(&daemonLogsArgs.verbose, "verbose", 0, "verbosity level") - fs.BoolVar(&daemonLogsArgs.time, "time", false, "include client time") - return fs - })(), - }, - { - Name: "metrics", - ShortUsage: "tailscale debug metrics", - Exec: runDaemonMetrics, - ShortHelp: "Print tailscaled's metrics", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("metrics") - fs.BoolVar(&metricsArgs.watch, "watch", false, "print JSON dump of delta values") - return fs - })(), - }, - { - Name: "env", - ShortUsage: "tailscale debug env", - Exec: runEnv, - ShortHelp: "Print cmd/tailscale environment", - }, - { - Name: "stat", - ShortUsage: "tailscale debug stat ", - Exec: runStat, - ShortHelp: "Stat a file", - }, - { - Name: "hostinfo", - ShortUsage: "tailscale debug hostinfo", - Exec: runHostinfo, - ShortHelp: "Print hostinfo", - }, - { - Name: "local-creds", - ShortUsage: "tailscale debug local-creds", - Exec: runLocalCreds, - ShortHelp: "Print how to access Tailscale LocalAPI", - }, - { - Name: "restun", - ShortUsage: "tailscale debug restun", - Exec: localAPIAction("restun"), - ShortHelp: "Force a magicsock restun", - }, - { - Name: "rebind", - ShortUsage: "tailscale debug rebind", - Exec: localAPIAction("rebind"), - ShortHelp: "Force a magicsock rebind", - }, - { - Name: "derp-set-on-demand", - ShortUsage: "tailscale debug derp-set-on-demand", - Exec: localAPIAction("derp-set-homeless"), - ShortHelp: "Enable DERP on-demand mode (breaks reachability)", - }, - { - Name: "derp-unset-on-demand", - ShortUsage: "tailscale debug derp-unset-on-demand", - Exec: localAPIAction("derp-unset-homeless"), - ShortHelp: "Disable DERP on-demand mode", - }, - { - Name: "break-tcp-conns", - ShortUsage: "tailscale debug break-tcp-conns", - Exec: localAPIAction("break-tcp-conns"), - ShortHelp: "Break any open TCP connections from the daemon", - }, - { - Name: "break-derp-conns", - ShortUsage: "tailscale debug break-derp-conns", - Exec: localAPIAction("break-derp-conns"), - ShortHelp: "Break any open DERP connections from the daemon", - }, - { - Name: "pick-new-derp", - ShortUsage: "tailscale debug pick-new-derp", - Exec: localAPIAction("pick-new-derp"), - ShortHelp: "Switch to some other random DERP home region for a short time", - }, - { - Name: "force-prefer-derp", - ShortUsage: "tailscale debug force-prefer-derp", - Exec: forcePreferDERP, - ShortHelp: "Prefer the given region ID if reachable (until restart, or 0 to clear)", - }, - { - Name: "force-netmap-update", - ShortUsage: "tailscale debug force-netmap-update", - Exec: localAPIAction("force-netmap-update"), - ShortHelp: "Force a full no-op netmap update (for load testing)", - }, - { - // TODO(bradfitz,maisem): eventually promote this out of debug - Name: "reload-config", - ShortUsage: "tailscale debug reload-config", - Exec: reloadConfig, - ShortHelp: "Reload config", - }, - { - Name: "control-knobs", - ShortUsage: "tailscale debug control-knobs", - Exec: debugControlKnobs, - ShortHelp: "See current control knobs", - }, - { - Name: "prefs", - ShortUsage: "tailscale debug prefs", - Exec: runPrefs, - ShortHelp: "Print prefs", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("prefs") - fs.BoolVar(&prefsArgs.pretty, "pretty", false, "If true, pretty-print output") - return fs - })(), - }, - { - Name: "watch-ipn", - ShortUsage: "tailscale debug watch-ipn", - Exec: runWatchIPN, - ShortHelp: "Subscribe to IPN message bus", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("watch-ipn") - fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") - fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") - fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") - fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") - fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") - return fs - })(), - }, - { - Name: "netmap", - ShortUsage: "tailscale debug netmap", - Exec: runNetmap, - ShortHelp: "Print the current network map", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("netmap") - fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") - return fs - })(), - }, - { - Name: "via", - ShortUsage: "tailscale debug via \n" + - "tailscale debug via ", - Exec: runVia, - ShortHelp: "Convert between site-specific IPv4 CIDRs and IPv6 'via' routes", - }, - { - Name: "ts2021", - ShortUsage: "tailscale debug ts2021", - Exec: runTS2021, - ShortHelp: "Debug ts2021 protocol connectivity", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("ts2021") - fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane") - fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") - fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") - return fs - })(), - }, - { - Name: "set-expire", - ShortUsage: "tailscale debug set-expire --in=1m", - Exec: runSetExpire, - ShortHelp: "Manipulate node key expiry for testing", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("set-expire") - fs.DurationVar(&setExpireArgs.in, "in", 0, "if non-zero, set node key to expire this duration from now") - return fs - })(), - }, - { - Name: "dev-store-set", - ShortUsage: "tailscale debug dev-store-set", - Exec: runDevStoreSet, - ShortHelp: "Set a key/value pair during development", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("store-set") - fs.BoolVar(&devStoreSetArgs.danger, "danger", false, "accept danger") - return fs - })(), - }, - { - Name: "derp", - ShortUsage: "tailscale debug derp", - Exec: runDebugDERP, - ShortHelp: "Test a DERP configuration", - }, - { - Name: "capture", - ShortUsage: "tailscale debug capture", - Exec: runCapture, - ShortHelp: "Stream pcaps for debugging", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("capture") - fs.StringVar(&captureArgs.outFile, "o", "", "path to stream the pcap (or - for stdout), leave empty to start wireshark") - return fs - })(), - }, - { - Name: "portmap", - ShortUsage: "tailscale debug portmap", - Exec: debugPortmap, - ShortHelp: "Run portmap debugging", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("portmap") - fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") - fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) - fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) - fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) - fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) - return fs - })(), - }, - { - Name: "peer-endpoint-changes", - ShortUsage: "tailscale debug peer-endpoint-changes ", - Exec: runPeerEndpointChanges, - ShortHelp: "Print debug information about a peer's endpoint changes", - }, - { - Name: "dial-types", - ShortUsage: "tailscale debug dial-types ", - Exec: runDebugDialTypes, - ShortHelp: "Print debug information about connecting to a given host or IP", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("dial-types") - fs.StringVar(&debugDialTypesArgs.network, "network", "tcp", `network type to dial ("tcp", "udp", etc.)`) - return fs - })(), - }, - { - Name: "resolve", - ShortUsage: "tailscale debug resolve ", - Exec: runDebugResolve, - ShortHelp: "Does a DNS lookup", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("resolve") - fs.StringVar(&resolveArgs.net, "net", "ip", "network type to resolve (ip, ip4, ip6)") - return fs - })(), - }, - { - Name: "go-buildinfo", - ShortUsage: "tailscale debug go-buildinfo", - ShortHelp: "Print Go's runtime/debug.BuildInfo", - Exec: runGoBuildInfo, - }, - }, +var ( + debugCaptureCmd func() *ffcli.Command // or nil +) + +func debugCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "debug", + Exec: runDebug, + ShortUsage: "tailscale debug ", + ShortHelp: "Debug commands", + LongHelp: hidden + `"tailscale debug" contains misc debug facilities; it is not a stable interface.`, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("debug") + fs.StringVar(&debugArgs.file, "file", "", "get, delete:NAME, or NAME") + fs.StringVar(&debugArgs.cpuFile, "cpu-profile", "", "if non-empty, grab a CPU profile for --profile-seconds seconds and write it to this file; - for stdout") + fs.StringVar(&debugArgs.memFile, "mem-profile", "", "if non-empty, grab a memory profile and write it to this file; - for stdout") + fs.IntVar(&debugArgs.cpuSec, "profile-seconds", 15, "number of seconds to run a CPU profile for, when --cpu-profile is non-empty") + return fs + })(), + Subcommands: nonNilCmds([]*ffcli.Command{ + { + Name: "derp-map", + ShortUsage: "tailscale debug derp-map", + Exec: runDERPMap, + ShortHelp: "Print DERP map", + }, + { + Name: "component-logs", + ShortUsage: "tailscale debug component-logs [" + strings.Join(ipn.DebuggableComponents, "|") + "]", + Exec: runDebugComponentLogs, + ShortHelp: "Enable/disable debug logs for a component", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("component-logs") + fs.DurationVar(&debugComponentLogsArgs.forDur, "for", time.Hour, "how long to enable debug logs for; zero or negative means to disable") + return fs + })(), + }, + { + Name: "daemon-goroutines", + ShortUsage: "tailscale debug daemon-goroutines", + Exec: runDaemonGoroutines, + ShortHelp: "Print tailscaled's goroutines", + }, + { + Name: "daemon-logs", + ShortUsage: "tailscale debug daemon-logs", + Exec: runDaemonLogs, + ShortHelp: "Watch tailscaled's server logs", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("daemon-logs") + fs.IntVar(&daemonLogsArgs.verbose, "verbose", 0, "verbosity level") + fs.BoolVar(&daemonLogsArgs.time, "time", false, "include client time") + return fs + })(), + }, + { + Name: "metrics", + ShortUsage: "tailscale debug metrics", + Exec: runDaemonMetrics, + ShortHelp: "Print tailscaled's metrics", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("metrics") + fs.BoolVar(&metricsArgs.watch, "watch", false, "print JSON dump of delta values") + return fs + })(), + }, + { + Name: "env", + ShortUsage: "tailscale debug env", + Exec: runEnv, + ShortHelp: "Print cmd/tailscale environment", + }, + { + Name: "stat", + ShortUsage: "tailscale debug stat ", + Exec: runStat, + ShortHelp: "Stat a file", + }, + { + Name: "hostinfo", + ShortUsage: "tailscale debug hostinfo", + Exec: runHostinfo, + ShortHelp: "Print hostinfo", + }, + { + Name: "local-creds", + ShortUsage: "tailscale debug local-creds", + Exec: runLocalCreds, + ShortHelp: "Print how to access Tailscale LocalAPI", + }, + { + Name: "restun", + ShortUsage: "tailscale debug restun", + Exec: localAPIAction("restun"), + ShortHelp: "Force a magicsock restun", + }, + { + Name: "rebind", + ShortUsage: "tailscale debug rebind", + Exec: localAPIAction("rebind"), + ShortHelp: "Force a magicsock rebind", + }, + { + Name: "derp-set-on-demand", + ShortUsage: "tailscale debug derp-set-on-demand", + Exec: localAPIAction("derp-set-homeless"), + ShortHelp: "Enable DERP on-demand mode (breaks reachability)", + }, + { + Name: "derp-unset-on-demand", + ShortUsage: "tailscale debug derp-unset-on-demand", + Exec: localAPIAction("derp-unset-homeless"), + ShortHelp: "Disable DERP on-demand mode", + }, + { + Name: "break-tcp-conns", + ShortUsage: "tailscale debug break-tcp-conns", + Exec: localAPIAction("break-tcp-conns"), + ShortHelp: "Break any open TCP connections from the daemon", + }, + { + Name: "break-derp-conns", + ShortUsage: "tailscale debug break-derp-conns", + Exec: localAPIAction("break-derp-conns"), + ShortHelp: "Break any open DERP connections from the daemon", + }, + { + Name: "pick-new-derp", + ShortUsage: "tailscale debug pick-new-derp", + Exec: localAPIAction("pick-new-derp"), + ShortHelp: "Switch to some other random DERP home region for a short time", + }, + { + Name: "force-prefer-derp", + ShortUsage: "tailscale debug force-prefer-derp", + Exec: forcePreferDERP, + ShortHelp: "Prefer the given region ID if reachable (until restart, or 0 to clear)", + }, + { + Name: "force-netmap-update", + ShortUsage: "tailscale debug force-netmap-update", + Exec: localAPIAction("force-netmap-update"), + ShortHelp: "Force a full no-op netmap update (for load testing)", + }, + { + // TODO(bradfitz,maisem): eventually promote this out of debug + Name: "reload-config", + ShortUsage: "tailscale debug reload-config", + Exec: reloadConfig, + ShortHelp: "Reload config", + }, + { + Name: "control-knobs", + ShortUsage: "tailscale debug control-knobs", + Exec: debugControlKnobs, + ShortHelp: "See current control knobs", + }, + { + Name: "prefs", + ShortUsage: "tailscale debug prefs", + Exec: runPrefs, + ShortHelp: "Print prefs", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("prefs") + fs.BoolVar(&prefsArgs.pretty, "pretty", false, "If true, pretty-print output") + return fs + })(), + }, + { + Name: "watch-ipn", + ShortUsage: "tailscale debug watch-ipn", + Exec: runWatchIPN, + ShortHelp: "Subscribe to IPN message bus", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("watch-ipn") + fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") + fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") + fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") + fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") + fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") + return fs + })(), + }, + { + Name: "netmap", + ShortUsage: "tailscale debug netmap", + Exec: runNetmap, + ShortHelp: "Print the current network map", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("netmap") + fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") + return fs + })(), + }, + { + Name: "via", + ShortUsage: "tailscale debug via \n" + + "tailscale debug via ", + Exec: runVia, + ShortHelp: "Convert between site-specific IPv4 CIDRs and IPv6 'via' routes", + }, + { + Name: "ts2021", + ShortUsage: "tailscale debug ts2021", + Exec: runTS2021, + ShortHelp: "Debug ts2021 protocol connectivity", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("ts2021") + fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane") + fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") + fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") + return fs + })(), + }, + { + Name: "set-expire", + ShortUsage: "tailscale debug set-expire --in=1m", + Exec: runSetExpire, + ShortHelp: "Manipulate node key expiry for testing", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("set-expire") + fs.DurationVar(&setExpireArgs.in, "in", 0, "if non-zero, set node key to expire this duration from now") + return fs + })(), + }, + { + Name: "dev-store-set", + ShortUsage: "tailscale debug dev-store-set", + Exec: runDevStoreSet, + ShortHelp: "Set a key/value pair during development", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("store-set") + fs.BoolVar(&devStoreSetArgs.danger, "danger", false, "accept danger") + return fs + })(), + }, + { + Name: "derp", + ShortUsage: "tailscale debug derp", + Exec: runDebugDERP, + ShortHelp: "Test a DERP configuration", + }, + ccall(debugCaptureCmd), + { + Name: "portmap", + ShortUsage: "tailscale debug portmap", + Exec: debugPortmap, + ShortHelp: "Run portmap debugging", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("portmap") + fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") + fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) + fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) + fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) + fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) + return fs + })(), + }, + { + Name: "peer-endpoint-changes", + ShortUsage: "tailscale debug peer-endpoint-changes ", + Exec: runPeerEndpointChanges, + ShortHelp: "Print debug information about a peer's endpoint changes", + }, + { + Name: "dial-types", + ShortUsage: "tailscale debug dial-types ", + Exec: runDebugDialTypes, + ShortHelp: "Print debug information about connecting to a given host or IP", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("dial-types") + fs.StringVar(&debugDialTypesArgs.network, "network", "tcp", `network type to dial ("tcp", "udp", etc.)`) + return fs + })(), + }, + { + Name: "resolve", + ShortUsage: "tailscale debug resolve ", + Exec: runDebugResolve, + ShortHelp: "Does a DNS lookup", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("resolve") + fs.StringVar(&resolveArgs.net, "net", "ip", "network type to resolve (ip, ip4, ip6)") + return fs + })(), + }, + { + Name: "go-buildinfo", + ShortUsage: "tailscale debug go-buildinfo", + ShortHelp: "Print Go's runtime/debug.BuildInfo", + Exec: runGoBuildInfo, + }, + }...), + } } func runGoBuildInfo(ctx context.Context, args []string) error { @@ -1036,50 +1030,6 @@ func runSetExpire(ctx context.Context, args []string) error { return localClient.DebugSetExpireIn(ctx, setExpireArgs.in) } -var captureArgs struct { - outFile string -} - -func runCapture(ctx context.Context, args []string) error { - stream, err := localClient.StreamDebugCapture(ctx) - if err != nil { - return err - } - defer stream.Close() - - switch captureArgs.outFile { - case "-": - fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.") - _, err = io.Copy(os.Stdout, stream) - return err - case "": - lua, err := os.CreateTemp("", "ts-dissector") - if err != nil { - return err - } - defer os.Remove(lua.Name()) - lua.Write([]byte(capture.DissectorLua)) - if err := lua.Close(); err != nil { - return err - } - - wireshark := exec.CommandContext(ctx, "wireshark", "-X", "lua_script:"+lua.Name(), "-k", "-i", "-") - wireshark.Stdin = stream - wireshark.Stdout = os.Stdout - wireshark.Stderr = os.Stderr - return wireshark.Run() - } - - f, err := os.OpenFile(captureArgs.outFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - defer f.Close() - fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.") - _, err = io.Copy(f, stream) - return err -} - var debugPortmapArgs struct { duration time.Duration gatewayAddr string diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 774d97d8e..47ba03cb9 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -88,6 +88,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/drive from tailscale.com/client/tailscale+ tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web + tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ @@ -102,7 +103,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+ - tailscale.com/net/flowtrack from tailscale.com/net/packet tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli tailscale.com/net/neterror from tailscale.com/net/netcheck+ @@ -110,7 +110,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/tailscale+ - tailscale.com/net/packet from tailscale.com/wgengine/capture tailscale.com/net/ping from tailscale.com/net/netcheck tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ @@ -133,7 +132,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tsweb/varz from tailscale.com/util/usermetric tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn - tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ + tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/tailscale+ tailscale.com/types/lazy from tailscale.com/util/testenv+ tailscale.com/types/logger from tailscale.com/client/web+ @@ -185,7 +184,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/client/web+ tailscale.com/version/distro from tailscale.com/client/web+ - tailscale.com/wgengine/capture from tailscale.com/cmd/tailscale/cli tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4f81d93dd..1e0b2061a 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -260,6 +260,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/tailscale+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister @@ -273,7 +274,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ - tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store @@ -422,7 +423,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/version/distro from tailscale.com/client/web+ W tailscale.com/wf from tailscale.com/cmd/tailscaled tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ - tailscale.com/wgengine/capture from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ diff --git a/wgengine/capture/capture.go b/feature/capture/capture.go similarity index 79% rename from wgengine/capture/capture.go rename to feature/capture/capture.go index 6ea5a9549..e5e150de8 100644 --- a/wgengine/capture/capture.go +++ b/feature/capture/capture.go @@ -13,21 +13,44 @@ import ( "sync" "time" - _ "embed" - + "tailscale.com/feature" + "tailscale.com/ipn/localapi" "tailscale.com/net/packet" "tailscale.com/util/set" ) -//go:embed ts-dissector.lua -var DissectorLua string +func init() { + feature.Register("capture") + localapi.Register("debug-capture", serveLocalAPIDebugCapture) +} + +func serveLocalAPIDebugCapture(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != "POST" { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + + b := h.LocalBackend() + s := b.GetOrSetCaptureSink(newSink) -// Callback describes a function which is called to -// record packets when debugging packet-capture. -// Such callbacks must not take ownership of the -// provided data slice: it may only copy out of it -// within the lifetime of the function. -type Callback func(Path, time.Time, []byte, packet.CaptureMeta) + unregister := s.RegisterOutput(w) + + select { + case <-ctx.Done(): + case <-s.WaitCh(): + } + unregister() + + b.ClearCaptureSink() +} var bufferPool = sync.Pool{ New: func() any { @@ -57,29 +80,8 @@ func writePktHeader(w *bytes.Buffer, when time.Time, length int) { binary.Write(w, binary.LittleEndian, uint32(length)) // total length } -// Path describes where in the data path the packet was captured. -type Path uint8 - -// Valid Path values. -const ( - // FromLocal indicates the packet was logged as it traversed the FromLocal path: - // i.e.: A packet from the local system into the TUN. - FromLocal Path = 0 - // FromPeer indicates the packet was logged upon reception from a remote peer. - FromPeer Path = 1 - // SynthesizedToLocal indicates the packet was generated from within tailscaled, - // and is being routed to the local machine's network stack. - SynthesizedToLocal Path = 2 - // SynthesizedToPeer indicates the packet was generated from within tailscaled, - // and is being routed to a remote Wireguard peer. - SynthesizedToPeer Path = 3 - - // PathDisco indicates the packet is information about a disco frame. - PathDisco Path = 254 -) - -// New creates a new capture sink. -func New() *Sink { +// newSink creates a new capture sink. +func newSink() packet.CaptureSink { ctx, c := context.WithCancel(context.Background()) return &Sink{ ctx: ctx, @@ -126,6 +128,10 @@ func (s *Sink) RegisterOutput(w io.Writer) (unregister func()) { } } +func (s *Sink) CaptureCallback() packet.CaptureCallback { + return s.LogPacket +} + // NumOutputs returns the number of outputs registered with the sink. func (s *Sink) NumOutputs() int { s.mu.Lock() @@ -174,7 +180,7 @@ func customDataLen(meta packet.CaptureMeta) int { // LogPacket is called to insert a packet into the capture. // // This function does not take ownership of the provided data slice. -func (s *Sink) LogPacket(path Path, when time.Time, data []byte, meta packet.CaptureMeta) { +func (s *Sink) LogPacket(path packet.CapturePath, when time.Time, data []byte, meta packet.CaptureMeta) { select { case <-s.ctx.Done(): return diff --git a/feature/capture/dissector/dissector.go b/feature/capture/dissector/dissector.go new file mode 100644 index 000000000..ab2f6c2ec --- /dev/null +++ b/feature/capture/dissector/dissector.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package dissector contains the Lua dissector for Tailscale packets. +package dissector + +import ( + _ "embed" +) + +//go:embed ts-dissector.lua +var Lua string diff --git a/wgengine/capture/ts-dissector.lua b/feature/capture/dissector/ts-dissector.lua similarity index 100% rename from wgengine/capture/ts-dissector.lua rename to feature/capture/dissector/ts-dissector.lua diff --git a/feature/condregister/maybe_capture.go b/feature/condregister/maybe_capture.go new file mode 100644 index 000000000..0c68331f1 --- /dev/null +++ b/feature/condregister/maybe_capture.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_capture + +package condregister + +import _ "tailscale.com/feature/capture" diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 33ce9f331..58cd4025f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -73,6 +73,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/netutil" + "tailscale.com/net/packet" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" @@ -115,7 +116,6 @@ import ( "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/magicsock" "tailscale.com/wgengine/router" @@ -209,7 +209,7 @@ type LocalBackend struct { // Tailscale on port 5252. exposeRemoteWebClientAtomicBool atomic.Bool shutdownCalled bool // if Shutdown has been called - debugSink *capture.Sink + debugSink packet.CaptureSink sockstatLogger *sockstatlog.Logger // getTCPHandlerForFunnelFlow returns a handler for an incoming TCP flow for @@ -948,6 +948,40 @@ func (b *LocalBackend) onHealthChange(w *health.Warnable, us *health.UnhealthySt } } +// GetOrSetCaptureSink returns the current packet capture sink, creating it +// with the provided newSink function if it does not already exist. +func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) packet.CaptureSink { + b.mu.Lock() + defer b.mu.Unlock() + + if b.debugSink != nil { + return b.debugSink + } + s := newSink() + b.debugSink = s + b.e.InstallCaptureHook(s.CaptureCallback()) + return s +} + +func (b *LocalBackend) ClearCaptureSink() { + // Shut down & uninstall the sink if there are no longer + // any outputs on it. + b.mu.Lock() + defer b.mu.Unlock() + + select { + case <-b.ctx.Done(): + return + default: + } + if b.debugSink != nil && b.debugSink.NumOutputs() == 0 { + s := b.debugSink + b.e.InstallCaptureHook(nil) + b.debugSink = nil + s.Close() + } +} + // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { @@ -7154,48 +7188,6 @@ func (b *LocalBackend) ResetAuth() error { return b.resetForProfileChangeLockedOnEntry(unlock) } -// StreamDebugCapture writes a pcap stream of packets traversing -// tailscaled to the provided response writer. -func (b *LocalBackend) StreamDebugCapture(ctx context.Context, w io.Writer) error { - var s *capture.Sink - - b.mu.Lock() - if b.debugSink == nil { - s = capture.New() - b.debugSink = s - b.e.InstallCaptureHook(s.LogPacket) - } else { - s = b.debugSink - } - b.mu.Unlock() - - unregister := s.RegisterOutput(w) - - select { - case <-ctx.Done(): - case <-s.WaitCh(): - } - unregister() - - // Shut down & uninstall the sink if there are no longer - // any outputs on it. - b.mu.Lock() - defer b.mu.Unlock() - - select { - case <-b.ctx.Done(): - return nil - default: - } - if b.debugSink != nil && b.debugSink.NumOutputs() == 0 { - s := b.debugSink - b.e.InstallCaptureHook(nil) - b.debugSink = nil - return s.Close() - } - return nil -} - func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { pip, ok := b.e.PeerForIP(ip) if !ok { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 157f72a65..e6b537d8f 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -68,12 +68,12 @@ import ( "tailscale.com/wgengine/magicsock" ) -type localAPIHandler func(*Handler, http.ResponseWriter, *http.Request) +type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) // handler is the set of LocalAPI handlers, keyed by the part of the // Request.URL.Path after "/localapi/v0/". If the key ends with a trailing slash // then it's a prefix match. -var handler = map[string]localAPIHandler{ +var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: "cert/": (*Handler).serveCert, "file-put/": (*Handler).serveFilePut, @@ -90,7 +90,6 @@ var handler = map[string]localAPIHandler{ "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, "component-debug-logging": (*Handler).serveComponentDebugLogging, "debug": (*Handler).serveDebug, - "debug-capture": (*Handler).serveDebugCapture, "debug-derp-region": (*Handler).serveDebugDERPRegion, "debug-dial-types": (*Handler).serveDebugDialTypes, "debug-log": (*Handler).serveDebugLog, @@ -152,6 +151,14 @@ var handler = map[string]localAPIHandler{ "whois": (*Handler).serveWhoIs, } +// Register registers a new LocalAPI handler for the given name. +func Register(name string, fn LocalAPIHandler) { + if _, ok := handler[name]; ok { + panic("duplicate LocalAPI handler registration: " + name) + } + handler[name] = fn +} + var ( // The clientmetrics package is stateful, but we want to expose a simple // imperative API to local clients, so we need to keep track of @@ -196,6 +203,10 @@ type Handler struct { clock tstime.Clock } +func (h *Handler) LocalBackend() *ipnlocal.LocalBackend { + return h.b +} + func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if h.b == nil { http.Error(w, "server has no local backend", http.StatusInternalServerError) @@ -260,7 +271,7 @@ func (h *Handler) validHost(hostname string) bool { // handlerForPath returns the LocalAPI handler for the provided Request.URI.Path. // (the path doesn't include any query parameters) -func handlerForPath(urlPath string) (h localAPIHandler, ok bool) { +func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { if urlPath == "/" { return (*Handler).serveLocalAPIRoot, true } @@ -2689,21 +2700,6 @@ func defBool(a string, def bool) bool { return v } -func (h *Handler) serveDebugCapture(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != "POST" { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - - w.WriteHeader(http.StatusOK) - w.(http.Flusher).Flush() - h.b.StreamDebugCapture(r.Context(), w) -} - func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "debug-log access denied", http.StatusForbidden) diff --git a/net/packet/capture.go b/net/packet/capture.go new file mode 100644 index 000000000..dd0ca411f --- /dev/null +++ b/net/packet/capture.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package packet + +import ( + "io" + "net/netip" + "time" +) + +// Callback describes a function which is called to +// record packets when debugging packet-capture. +// Such callbacks must not take ownership of the +// provided data slice: it may only copy out of it +// within the lifetime of the function. +type CaptureCallback func(CapturePath, time.Time, []byte, CaptureMeta) + +// CaptureSink is the minimal interface from [tailscale.com/feature/capture]'s +// Sink type that is needed by the core (magicsock/LocalBackend/wgengine/etc). +// This lets the relativel heavy feature/capture package be optionally linked. +type CaptureSink interface { + // Close closes + Close() error + + // NumOutputs returns the number of outputs registered with the sink. + NumOutputs() int + + // CaptureCallback returns a callback which can be used to + // write packets to the sink. + CaptureCallback() CaptureCallback + + // WaitCh returns a channel which blocks until + // the sink is closed. + WaitCh() <-chan struct{} + + // RegisterOutput connects an output to this sink, which + // will be written to with a pcap stream as packets are logged. + // A function is returned which unregisters the output when + // called. + // + // If w implements io.Closer, it will be closed upon error + // or when the sink is closed. If w implements http.Flusher, + // it will be flushed periodically. + RegisterOutput(w io.Writer) (unregister func()) +} + +// CaptureMeta contains metadata that is used when debugging. +type CaptureMeta struct { + DidSNAT bool // SNAT was performed & the address was updated. + OriginalSrc netip.AddrPort // The source address before SNAT was performed. + DidDNAT bool // DNAT was performed & the address was updated. + OriginalDst netip.AddrPort // The destination address before DNAT was performed. +} + +// CapturePath describes where in the data path the packet was captured. +type CapturePath uint8 + +// CapturePath values +const ( + // FromLocal indicates the packet was logged as it traversed the FromLocal path: + // i.e.: A packet from the local system into the TUN. + FromLocal CapturePath = 0 + // FromPeer indicates the packet was logged upon reception from a remote peer. + FromPeer CapturePath = 1 + // SynthesizedToLocal indicates the packet was generated from within tailscaled, + // and is being routed to the local machine's network stack. + SynthesizedToLocal CapturePath = 2 + // SynthesizedToPeer indicates the packet was generated from within tailscaled, + // and is being routed to a remote Wireguard peer. + SynthesizedToPeer CapturePath = 3 + + // PathDisco indicates the packet is information about a disco frame. + PathDisco CapturePath = 254 +) diff --git a/net/packet/packet.go b/net/packet/packet.go index c9521ad46..b683b2212 100644 --- a/net/packet/packet.go +++ b/net/packet/packet.go @@ -34,14 +34,6 @@ const ( TCPECNBits TCPFlag = TCPECNEcho | TCPCWR ) -// CaptureMeta contains metadata that is used when debugging. -type CaptureMeta struct { - DidSNAT bool // SNAT was performed & the address was updated. - OriginalSrc netip.AddrPort // The source address before SNAT was performed. - DidDNAT bool // DNAT was performed & the address was updated. - OriginalDst netip.AddrPort // The destination address before DNAT was performed. -} - // Parsed is a minimal decoding of a packet suitable for use in filters. type Parsed struct { // b is the byte buffer that this decodes. diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index b26239632..442184065 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -36,7 +36,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/usermetric" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/netstack/gro" "tailscale.com/wgengine/wgcfg" @@ -208,7 +207,7 @@ type Wrapper struct { // stats maintains per-connection counters. stats atomic.Pointer[connstats.Statistics] - captureHook syncs.AtomicValue[capture.Callback] + captureHook syncs.AtomicValue[packet.CaptureCallback] metrics *metrics } @@ -955,7 +954,7 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { } } if captHook != nil { - captHook(capture.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) + captHook(packet.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) } if !t.disableFilter { var response filter.Response @@ -1101,9 +1100,9 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i return n, err } -func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook capture.Callback, pc *peerConfigTable, gro *gro.GRO) (filter.Response, *gro.GRO) { +func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook packet.CaptureCallback, pc *peerConfigTable, gro *gro.GRO) (filter.Response, *gro.GRO) { if captHook != nil { - captHook(capture.FromPeer, t.now(), p.Buffer(), p.CaptureMeta) + captHook(packet.FromPeer, t.now(), p.Buffer(), p.CaptureMeta) } if p.IPProto == ipproto.TSMP { @@ -1317,7 +1316,7 @@ func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]b p.Decode(buf) captHook := t.captureHook.Load() if captHook != nil { - captHook(capture.SynthesizedToLocal, t.now(), p.Buffer(), p.CaptureMeta) + captHook(packet.SynthesizedToLocal, t.now(), p.Buffer(), p.CaptureMeta) } invertGSOChecksum(buf, pkt.GSOOptions) @@ -1449,7 +1448,7 @@ func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error { } if capt := t.captureHook.Load(); capt != nil { b := pkt.ToBuffer() - capt(capture.SynthesizedToPeer, t.now(), b.Flatten(), packet.CaptureMeta{}) + capt(packet.SynthesizedToPeer, t.now(), b.Flatten(), packet.CaptureMeta{}) } t.injectOutbound(tunInjectedRead{packet: pkt}) @@ -1491,6 +1490,6 @@ var ( metricPacketOutDropSelfDisco = clientmetric.NewCounter("tstun_out_to_wg_drop_self_disco") ) -func (t *Wrapper) InstallCaptureHook(cb capture.Callback) { +func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { t.captureHook.Store(cb) } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index a3dfe7d86..223ee34f4 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -40,7 +40,6 @@ import ( "tailscale.com/types/views" "tailscale.com/util/must" "tailscale.com/util/usermetric" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/wgcfg" ) @@ -871,14 +870,14 @@ func TestPeerCfg_NAT(t *testing.T) { // with the correct parameters when various packet operations are performed. func TestCaptureHook(t *testing.T) { type captureRecord struct { - path capture.Path + path packet.CapturePath now time.Time pkt []byte meta packet.CaptureMeta } var captured []captureRecord - hook := func(path capture.Path, now time.Time, pkt []byte, meta packet.CaptureMeta) { + hook := func(path packet.CapturePath, now time.Time, pkt []byte, meta packet.CaptureMeta) { captured = append(captured, captureRecord{ path: path, now: now, @@ -935,19 +934,19 @@ func TestCaptureHook(t *testing.T) { // Assert that the right packets are captured. want := []captureRecord{ { - path: capture.FromPeer, + path: packet.FromPeer, pkt: []byte("Write1"), }, { - path: capture.FromPeer, + path: packet.FromPeer, pkt: []byte("Write2"), }, { - path: capture.SynthesizedToLocal, + path: packet.SynthesizedToLocal, pkt: []byte("InjectInboundPacketBuffer"), }, { - path: capture.SynthesizedToPeer, + path: packet.SynthesizedToPeer, pkt: []byte("InjectOutboundPacketBuffer"), }, } diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index ab69f1c2b..b533724eb 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -24,6 +24,7 @@ func TestDeps(t *testing.T) { "github.com/google/uuid": "see tailscale/tailscale#13760", "tailscale.com/clientupdate/distsign": "downloads via AppStore, not distsign", "github.com/tailscale/hujson": "no config file support on iOS", + "tailscale.com/feature/capture": "no debug packet capture on iOS", }, }.Check(t) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 98cb63b88..acf7114e1 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -61,7 +61,6 @@ import ( "tailscale.com/util/set" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/wgint" ) @@ -238,7 +237,7 @@ type Conn struct { stats atomic.Pointer[connstats.Statistics] // captureHook, if non-nil, is the pcap logging callback when capturing. - captureHook syncs.AtomicValue[capture.Callback] + captureHook syncs.AtomicValue[packet.CaptureCallback] // discoPrivate is the private naclbox key used for active // discovery traffic. It is always present, and immutable. @@ -655,7 +654,7 @@ func deregisterMetrics(m *metrics) { // log debug information into the pcap stream. This function // can be called with a nil argument to uninstall the capture // hook. -func (c *Conn) InstallCaptureHook(cb capture.Callback) { +func (c *Conn) InstallCaptureHook(cb packet.CaptureCallback) { c.captureHook.Store(cb) } @@ -1709,7 +1708,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke // Emit information about the disco frame into the pcap stream // if a capture hook is installed. if cb := c.captureHook.Load(); cb != nil { - cb(capture.PathDisco, time.Now(), disco.ToPCAPFrame(src, derpNodeSrc, payload), packet.CaptureMeta{}) + cb(packet.PathDisco, time.Now(), disco.ToPCAPFrame(src, derpNodeSrc, payload), packet.CaptureMeta{}) } dm, err := disco.Parse(payload) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 81f8000e0..b51b2c8ea 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -51,7 +51,6 @@ import ( "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/version" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/magicsock" "tailscale.com/wgengine/netlog" @@ -1594,7 +1593,7 @@ var ( metricNumMinorChanges = clientmetric.NewCounter("wgengine_minor_changes") ) -func (e *userspaceEngine) InstallCaptureHook(cb capture.Callback) { +func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) { e.tundev.InstallCaptureHook(cb) e.magicConn.InstallCaptureHook(cb) } diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 232591f5e..74a191748 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -17,10 +17,10 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" + "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/netmap" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -162,7 +162,7 @@ func (e *watchdogEngine) Done() <-chan struct{} { return e.wrap.Done() } -func (e *watchdogEngine) InstallCaptureHook(cb capture.Callback) { +func (e *watchdogEngine) InstallCaptureHook(cb packet.CaptureCallback) { e.wrap.InstallCaptureHook(cb) } diff --git a/wgengine/wgengine.go b/wgengine/wgengine.go index c165ccdf3..6aaf567ad 100644 --- a/wgengine/wgengine.go +++ b/wgengine/wgengine.go @@ -11,10 +11,10 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" + "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/netmap" - "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -129,5 +129,5 @@ type Engine interface { // InstallCaptureHook registers a function to be called to capture // packets traversing the data path. The hook can be uninstalled by // calling this function with a nil value. - InstallCaptureHook(capture.Callback) + InstallCaptureHook(packet.CaptureCallback) } From 66b2e9fd07f2c635b809aa82d657fd82de3f9323 Mon Sep 17 00:00:00 2001 From: Derek Kaser <11674153+dkaser@users.noreply.github.com> Date: Sun, 26 Jan 2025 10:35:58 -0500 Subject: [PATCH 0403/1708] envknob/featureknob: allow use of exit node on unraid (#14754) Fixes #14372 Signed-off-by: Derek Kaser <11674153+dkaser@users.noreply.github.com> --- envknob/featureknob/featureknob.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go index d7af80d23..210414bfe 100644 --- a/envknob/featureknob/featureknob.go +++ b/envknob/featureknob/featureknob.go @@ -55,8 +55,7 @@ func CanRunTailscaleSSH() error { func CanUseExitNode() error { switch dist := distro.Get(); dist { case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995 - distro.QNAP, - distro.Unraid: + distro.QNAP: return errors.New("Tailscale exit nodes cannot be used on " + string(dist)) } From e701fde6b389a4a69b4d33aace8969530b25de8d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 26 Jan 2025 18:23:38 +0000 Subject: [PATCH 0404/1708] control/controlknobs: make Knobs.AsDebugJSON automatic, not require maintenance The AsDebugJSON method (used only for a LocalAPI debug call) always needed to be updated whenever a new controlknob was added. We had a test for it, which was nice, but it was a tedious step we don't need to do. Use reflect instead. Updates #14788 Change-Id: If59cd776920f3ce7c748f86ed2eddd9323039a0b Signed-off-by: Brad Fitzpatrick --- control/controlknobs/controlknobs.go | 37 ++++++++++------------- control/controlknobs/controlknobs_test.go | 3 ++ 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index c7933be5a..a86f0af53 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -6,6 +6,8 @@ package controlknobs import ( + "fmt" + "reflect" "sync/atomic" "tailscale.com/syncs" @@ -174,26 +176,19 @@ func (k *Knobs) AsDebugJSON() map[string]any { if k == nil { return nil } - return map[string]any{ - "DisableUPnP": k.DisableUPnP.Load(), - "KeepFullWGConfig": k.KeepFullWGConfig.Load(), - "RandomizeClientPort": k.RandomizeClientPort.Load(), - "OneCGNAT": k.OneCGNAT.Load(), - "ForceBackgroundSTUN": k.ForceBackgroundSTUN.Load(), - "DisableDeltaUpdates": k.DisableDeltaUpdates.Load(), - "PeerMTUEnable": k.PeerMTUEnable.Load(), - "DisableDNSForwarderTCPRetries": k.DisableDNSForwarderTCPRetries.Load(), - "SilentDisco": k.SilentDisco.Load(), - "LinuxForceIPTables": k.LinuxForceIPTables.Load(), - "LinuxForceNfTables": k.LinuxForceNfTables.Load(), - "SeamlessKeyRenewal": k.SeamlessKeyRenewal.Load(), - "ProbeUDPLifetime": k.ProbeUDPLifetime.Load(), - "AppCStoreRoutes": k.AppCStoreRoutes.Load(), - "UserDialUseRoutes": k.UserDialUseRoutes.Load(), - "DisableSplitDNSWhenNoCustomResolvers": k.DisableSplitDNSWhenNoCustomResolvers.Load(), - "DisableLocalDNSOverrideViaNRPT": k.DisableLocalDNSOverrideViaNRPT.Load(), - "DisableCryptorouting": k.DisableCryptorouting.Load(), - "DisableCaptivePortalDetection": k.DisableCaptivePortalDetection.Load(), - "DisableSkipStatusQueue": k.DisableSkipStatusQueue.Load(), + ret := map[string]any{} + rt := reflect.TypeFor[Knobs]() + rv := reflect.ValueOf(k).Elem() // of *k + for i := 0; i < rt.NumField(); i++ { + name := rt.Field(i).Name + switch v := rv.Field(i).Addr().Interface().(type) { + case *atomic.Bool: + ret[name] = v.Load() + case *syncs.AtomicValue[opt.Bool]: + ret[name] = v.Load() + default: + panic(fmt.Sprintf("unknown field type %T for %v", v, name)) + } } + return ret } diff --git a/control/controlknobs/controlknobs_test.go b/control/controlknobs/controlknobs_test.go index a78a486f3..7618b7121 100644 --- a/control/controlknobs/controlknobs_test.go +++ b/control/controlknobs/controlknobs_test.go @@ -6,6 +6,8 @@ package controlknobs import ( "reflect" "testing" + + "tailscale.com/types/logger" ) func TestAsDebugJSON(t *testing.T) { @@ -18,4 +20,5 @@ func TestAsDebugJSON(t *testing.T) { if want := reflect.TypeFor[Knobs]().NumField(); len(got) != want { t.Errorf("AsDebugJSON map has %d fields; want %v", len(got), want) } + t.Logf("Got: %v", logger.AsJSON(got)) } From 04029b857f5ef8699e5cd2c80f57048b34b32825 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 26 Jan 2025 18:12:46 +0000 Subject: [PATCH 0405/1708] tstest/deptest: verify that tailscale.com BadDeps actually exist This protects against rearranging packages and not catching that a BadDeps package got moved. That would then effectively remove a test. Updates #12614 Change-Id: I257f1eeda9e3569c867b7628d5bfb252d3354ba6 Signed-off-by: Brad Fitzpatrick --- tstest/deptest/deptest.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 00faa8a38..2393733e6 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -15,6 +15,7 @@ import ( "runtime" "slices" "strings" + "sync" "testing" "tailscale.com/util/set" @@ -54,11 +55,35 @@ func (c DepChecker) Check(t *testing.T) { t.Fatal(err) } + tsRoot := sync.OnceValue(func() string { + out, err := exec.Command("go", "list", "-f", "{{.Dir}}", "tailscale.com").Output() + if err != nil { + t.Fatalf("failed to find tailscale.com root: %v", err) + } + return strings.TrimSpace(string(out)) + }) + for _, dep := range res.Deps { if why, ok := c.BadDeps[dep]; ok { t.Errorf("package %q is not allowed as a dependency (env: %q); reason: %s", dep, extraEnv, why) } } + // Make sure the BadDeps packages actually exists. If they got renamed or + // moved around, we should update the test referencing the old name. + // Doing this in the general case requires network access at runtime + // (resolving a package path to its module, possibly doing the ?go-get=1 + // meta tag dance), so we just check the common case of + // "tailscale.com/*" packages for now, with the assumption that all + // "tailscale.com/*" packages are in the same module, which isn't + // necessarily true in the general case. + for dep := range c.BadDeps { + if suf, ok := strings.CutPrefix(dep, "tailscale.com/"); ok { + pkgDir := filepath.Join(tsRoot(), suf) + if _, err := os.Stat(pkgDir); err != nil { + t.Errorf("listed BadDep %q doesn't seem to exist anymore: %v", dep, err) + } + } + } for dep := range c.WantDeps { if !slices.Contains(res.Deps, dep) { t.Errorf("expected package %q to be a dependency (env: %q)", dep, extraEnv) From 8c925899e115ce18e47ba4ec4c630696140e63df Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 26 Jan 2025 17:09:44 +0000 Subject: [PATCH 0406/1708] go.mod: bump depaware, add --internal flag to stop hiding internal packages The hiding of internal packages has hidden things I wanted to see a few times now. Stop hiding them. This makes depaware.txt output a bit longer, but not too much. Plus we only really look at it with diffs & greps anyway; it's not like anybody reads the whole thing. Updates #12614 Change-Id: I868c89eeeddcaaab63e82371651003629bc9bda8 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 2 +- Makefile | 4 +-- cmd/derper/depaware.txt | 59 +++++++++++++++++++++++++++++++ cmd/k8s-operator/depaware.txt | 66 +++++++++++++++++++++++++++++++++++ cmd/stund/depaware.txt | 58 ++++++++++++++++++++++++++++++ cmd/tailscale/depaware.txt | 61 ++++++++++++++++++++++++++++++++ cmd/tailscaled/depaware.txt | 64 +++++++++++++++++++++++++++++++++ go.mod | 2 +- go.sum | 4 +-- 9 files changed, 314 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d049323a3..cc773e4a9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -481,7 +481,7 @@ jobs: - name: check depaware run: | export PATH=$(./tool/go env GOROOT)/bin:$PATH - find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check + find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check --internal go_generate: runs-on: ubuntu-22.04 diff --git a/Makefile b/Makefile index d3e50af05..30ac5327a 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ lint: ## Run golangci-lint updatedeps: ## Update depaware deps # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ @@ -27,7 +27,7 @@ updatedeps: ## Update depaware deps depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 3a730dd99..5a39c110e 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -189,6 +189,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ @@ -201,6 +203,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/net/http/httpproxy from net/http+ golang.org/x/net/http2/hpack from net/http golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ @@ -232,6 +235,18 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ + crypto/internal/alias from crypto/aes+ + crypto/internal/bigmod from crypto/ecdsa+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/edwards25519 from crypto/ed25519 + crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/hpke from crypto/tls + crypto/internal/mlkem768 from crypto/tls + crypto/internal/nistec from crypto/ecdh+ + crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/randutil from crypto/dsa+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls @@ -242,6 +257,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/subtle from crypto/aes+ crypto/tls from golang.org/x/crypto/acme+ crypto/x509 from crypto/tls+ + D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ embed from crypto/internal/nistec+ encoding from encoding/json+ @@ -263,6 +279,44 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa hash/maphash from go4.org/mem html from net/http/pprof+ html/template from tailscale.com/cmd/derper + internal/abi from crypto/x509/internal/macos+ + internal/asan from syscall + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/aes+ + internal/chacha8rand from math/rand/v2+ + internal/concurrent from unique + internal/coverage/rtcov from runtime + internal/cpu from crypto/aes+ + internal/filepathlite from os+ + internal/fmtsort from fmt+ + internal/goarch from crypto/aes+ + internal/godebug from crypto/tls+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from syscall + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + L internal/runtime/syscall from runtime+ + internal/singleflight from net + internal/stringslite from embed+ + internal/syscall/execenv from os+ + LD internal/syscall/unix from crypto/rand+ + W internal/syscall/windows from crypto/rand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + internal/weak from unique io from bufio+ io/fs from crypto/x509+ L io/ioutil from github.com/mitchellh/go-ps+ @@ -282,6 +336,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http from expvar+ net/http/httptrace from net/http+ net/http/internal from net/http + net/http/internal/ascii from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -295,7 +350,10 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa reflect from crypto/x509+ regexp from github.com/coreos/go-iptables/iptables+ regexp/syntax from regexp + runtime from crypto/internal/nistec+ runtime/debug from github.com/prometheus/client_golang/prometheus+ + runtime/internal/math from runtime + runtime/internal/sys from runtime runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof runtime/trace from net/http/pprof @@ -314,3 +372,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip + unsafe from bytes+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index fc2f8854a..32af3b25e 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -992,6 +992,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device @@ -1009,6 +1011,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns @@ -1050,6 +1055,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ + crypto/internal/alias from crypto/aes+ + crypto/internal/bigmod from crypto/ecdsa+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/edwards25519 from crypto/ed25519 + crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/hpke from crypto/tls + crypto/internal/mlkem768 from crypto/tls + crypto/internal/nistec from crypto/ecdh+ + crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/randutil from crypto/dsa+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ @@ -1060,6 +1077,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/subtle from crypto/aes+ crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ crypto/x509 from crypto/tls+ + D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ database/sql from github.com/prometheus/client_golang/prometheus/collectors database/sql/driver from database/sql+ @@ -1085,6 +1103,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime go/doc/comment from go/doc + go/internal/typeparams from go/parser go/parser from k8s.io/apimachinery/pkg/runtime go/scanner from go/ast+ go/token from go/ast+ @@ -1095,6 +1114,46 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ hash/maphash from go4.org/mem html from html/template+ html/template from github.com/gorilla/csrf + internal/abi from crypto/x509/internal/macos+ + internal/asan from syscall + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/aes+ + internal/chacha8rand from math/rand/v2+ + internal/concurrent from unique + internal/coverage/rtcov from runtime + internal/cpu from crypto/aes+ + internal/filepathlite from os+ + internal/fmtsort from fmt+ + internal/goarch from crypto/aes+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/lazyregexp from go/doc + internal/msan from syscall + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + L internal/runtime/syscall from runtime+ + internal/saferio from debug/pe+ + internal/singleflight from net + internal/stringslite from embed+ + internal/syscall/execenv from os+ + LD internal/syscall/unix from crypto/rand+ + W internal/syscall/windows from crypto/rand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + internal/weak from unique io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ @@ -1103,6 +1162,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/internal from log+ log/slog from github.com/go-logr/logr+ log/slog/internal from log/slog + log/slog/internal/buffer from log/slog maps from sigs.k8s.io/controller-runtime/pkg/predicate+ math from archive/tar+ math/big from crypto/dsa+ @@ -1118,6 +1178,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ net/http/httptrace from github.com/prometheus-community/pro-bing+ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ + net/http/internal/ascii from net/http+ + net/http/internal/testcert from net/http/httptest net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -1131,7 +1193,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ reflect from archive/tar+ regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ regexp/syntax from regexp + runtime from archive/tar+ runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/internal/math from runtime + runtime/internal/sys from runtime runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof @@ -1150,3 +1215,4 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip + unsafe from bytes+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 52d649a1d..c553b9be5 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -89,6 +89,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ @@ -123,6 +125,18 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ + crypto/internal/alias from crypto/aes+ + crypto/internal/bigmod from crypto/ecdsa+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/edwards25519 from crypto/ed25519 + crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/hpke from crypto/tls + crypto/internal/mlkem768 from crypto/tls + crypto/internal/nistec from crypto/ecdh+ + crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/randutil from crypto/dsa+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls @@ -133,6 +147,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/subtle from crypto/aes+ crypto/tls from net/http+ crypto/x509 from crypto/tls + D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509 embed from crypto/internal/nistec+ encoding from encoding/json+ @@ -153,6 +168,44 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar hash/fnv from google.golang.org/protobuf/internal/detrand hash/maphash from go4.org/mem html from net/http/pprof+ + internal/abi from crypto/x509/internal/macos+ + internal/asan from syscall + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/aes+ + internal/chacha8rand from math/rand/v2+ + internal/concurrent from unique + internal/coverage/rtcov from runtime + internal/cpu from crypto/aes+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/aes+ + internal/godebug from crypto/tls+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from syscall + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + L internal/runtime/syscall from runtime+ + internal/singleflight from net + internal/stringslite from embed+ + internal/syscall/execenv from os + LD internal/syscall/unix from crypto/rand+ + W internal/syscall/windows from crypto/rand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + internal/weak from unique io from bufio+ io/fs from crypto/x509+ iter from maps+ @@ -171,6 +224,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar net/http from expvar+ net/http/httptrace from net/http net/http/internal from net/http + net/http/internal/ascii from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -182,7 +236,10 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar reflect from crypto/x509+ regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp + runtime from crypto/internal/nistec+ runtime/debug from github.com/prometheus/client_golang/prometheus+ + runtime/internal/math from runtime + runtime/internal/sys from runtime runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof runtime/trace from net/http/pprof @@ -199,3 +256,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip + unsafe from bytes+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 47ba03cb9..6d1fcfd03 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -194,6 +194,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 @@ -209,6 +211,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns @@ -247,6 +252,18 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ + crypto/internal/alias from crypto/aes+ + crypto/internal/bigmod from crypto/ecdsa+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/edwards25519 from crypto/ed25519 + crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/hpke from crypto/tls + crypto/internal/mlkem768 from crypto/tls + crypto/internal/nistec from crypto/ecdh+ + crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/randutil from crypto/dsa+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls @@ -257,6 +274,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/subtle from crypto/aes+ crypto/tls from github.com/miekg/dns+ crypto/x509 from crypto/tls+ + D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe @@ -285,6 +303,44 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode + internal/abi from crypto/x509/internal/macos+ + internal/asan from syscall + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/aes+ + internal/chacha8rand from math/rand/v2+ + internal/concurrent from unique + internal/coverage/rtcov from runtime + internal/cpu from crypto/aes+ + internal/filepathlite from os+ + internal/fmtsort from fmt+ + internal/goarch from crypto/aes+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from syscall + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profilerecord from runtime + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + L internal/runtime/syscall from runtime+ + internal/saferio from debug/pe+ + internal/singleflight from net + internal/stringslite from embed+ + internal/syscall/execenv from os+ + LD internal/syscall/unix from crypto/rand+ + W internal/syscall/windows from crypto/rand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + internal/weak from unique io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/mitchellh/go-ps+ @@ -306,6 +362,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/http/httptrace from golang.org/x/net/http2+ net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ + net/http/internal/ascii from net/http+ net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -318,7 +375,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep reflect from archive/tar+ regexp from github.com/coreos/go-iptables/iptables+ regexp/syntax from regexp + runtime from archive/tar+ runtime/debug from tailscale.com+ + runtime/internal/math from runtime + runtime/internal/sys from runtime slices from tailscale.com/client/web+ sort from compress/flate+ strconv from archive/tar+ @@ -334,3 +394,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip + unsafe from bytes+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1e0b2061a..e0ed51ebb 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -445,12 +445,15 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ + LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ @@ -462,6 +465,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns @@ -501,6 +507,18 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ + crypto/internal/alias from crypto/aes+ + crypto/internal/bigmod from crypto/ecdsa+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/edwards25519 from crypto/ed25519 + crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/hpke from crypto/tls + crypto/internal/mlkem768 from crypto/tls + crypto/internal/nistec from crypto/ecdh+ + crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/randutil from crypto/dsa+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ @@ -511,6 +529,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/subtle from crypto/aes+ crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ crypto/x509 from crypto/tls+ + D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe @@ -536,6 +555,45 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash/maphash from go4.org/mem html from html/template+ html/template from github.com/gorilla/csrf + internal/abi from crypto/x509/internal/macos+ + internal/asan from syscall + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/aes+ + internal/chacha8rand from math/rand/v2+ + internal/concurrent from unique + internal/coverage/rtcov from runtime + internal/cpu from crypto/aes+ + internal/filepathlite from os+ + internal/fmtsort from fmt+ + internal/goarch from crypto/aes+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from syscall + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + L internal/runtime/syscall from runtime+ + internal/saferio from debug/pe+ + internal/singleflight from net + internal/stringslite from embed+ + internal/syscall/execenv from os+ + LD internal/syscall/unix from crypto/rand+ + W internal/syscall/windows from crypto/rand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + internal/weak from unique io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ @@ -558,6 +616,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/httptrace from github.com/prometheus-community/pro-bing+ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ + net/http/internal/ascii from net/http+ + net/http/internal/testcert from net/http/httptest net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from github.com/tailscale/wireguard-go/conn+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -571,7 +631,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de reflect from archive/tar+ regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn+ regexp/syntax from regexp + runtime from archive/tar+ runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/internal/math from runtime + runtime/internal/sys from runtime runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from tailscale.com/appc+ @@ -589,3 +652,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip + unsafe from bytes+ diff --git a/go.mod b/go.mod index 22193ee6e..8e52a9ab3 100644 --- a/go.mod +++ b/go.mod @@ -74,7 +74,7 @@ require ( github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/studio-b12/gowebdav v0.9.0 github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e - github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502 + github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 diff --git a/go.sum b/go.sum index 20dbe7306..c1c82ad77 100644 --- a/go.sum +++ b/go.sum @@ -915,8 +915,8 @@ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplB github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= -github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502 h1:34icjjmqJ2HPjrSuJYEkdZ+0ItmGQAQ75cRHIiftIyE= -github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= +github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b h1:ewWb4cA+YO9/3X+v5UhdV+eKFsNBOPcGRh39Glshx/4= +github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= From bce05ec6c3f3cb2dad1086472e99e2e69b2cfadc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 26 Jan 2025 17:06:06 +0000 Subject: [PATCH 0407/1708] control/controlclient,tempfork/httprec: don't link httptest, test certs for c2n The c2n handling code was using the Go httptest package's ResponseRecorder code but that's in a test package which brings in Go's test certs, etc. This forks the httptest recorder type into its own package that only has the recorder and adds a test that we don't re-introduce a dependency on httptest. Updates #12614 Change-Id: I3546f49972981e21813ece9064cc2be0b74f4b16 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware.txt | 5 +- cmd/tailscaled/tailscaled_test.go | 2 + control/controlclient/direct.go | 4 +- tempfork/httprec/httprec.go | 258 ++++++++++++++++++++++++++++++ 5 files changed, 265 insertions(+), 7 deletions(-) create mode 100644 tempfork/httprec/httprec.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 32af3b25e..fab29ba03 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -889,6 +889,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tailcfg from tailscale.com/client/tailscale+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/client/tailscale+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ @@ -1174,12 +1175,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptest from tailscale.com/control/controlclient net/http/httptrace from github.com/prometheus-community/pro-bing+ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ - net/http/internal/testcert from net/http/httptest net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e0ed51ebb..36b6063d5 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -341,6 +341,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/client/tailscale+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ @@ -547,7 +548,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ expvar from tailscale.com/derp+ - flag from net/http/httptest+ + flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from compress/zlib+ hash/adler32 from compress/zlib+ @@ -612,12 +613,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptest from tailscale.com/control/controlclient net/http/httptrace from github.com/prometheus-community/pro-bing+ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ - net/http/internal/testcert from net/http/httptest net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from github.com/tailscale/wireguard-go/conn+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ diff --git a/cmd/tailscaled/tailscaled_test.go b/cmd/tailscaled/tailscaled_test.go index f36120f13..c50c23759 100644 --- a/cmd/tailscaled/tailscaled_test.go +++ b/cmd/tailscaled/tailscaled_test.go @@ -22,6 +22,8 @@ func TestDeps(t *testing.T) { BadDeps: map[string]string{ "testing": "do not use testing package in production code", "gvisor.dev/gvisor/pkg/hostarch": "will crash on non-4K page sizes; see https://github.com/tailscale/tailscale/issues/8658", + "net/http/httptest": "do not use httptest in production code", + "net/http/internal/testcert": "do not use httptest in production code", }, }.Check(t) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index c436bc8b1..f327ecc2a 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -15,7 +15,6 @@ import ( "log" "net" "net/http" - "net/http/httptest" "net/netip" "net/url" "os" @@ -42,6 +41,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" + "tailscale.com/tempfork/httprec" "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/key" @@ -1384,7 +1384,7 @@ func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) defer cancel() hreq = hreq.WithContext(handlerCtx) - rec := httptest.NewRecorder() + rec := httprec.NewRecorder() c2nHandler.ServeHTTP(rec, hreq) cancel() diff --git a/tempfork/httprec/httprec.go b/tempfork/httprec/httprec.go new file mode 100644 index 000000000..13786aaf6 --- /dev/null +++ b/tempfork/httprec/httprec.go @@ -0,0 +1,258 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httprec is a copy of the Go standard library's httptest.ResponseRecorder +// type, which we want to use in non-test code without pulling in the rest of +// the httptest package and its test certs, etc. +package httprec + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" +) + +// ResponseRecorder is an implementation of [http.ResponseWriter] that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + // Code is the HTTP response code set by WriteHeader. + // + // Note that if a Handler never calls WriteHeader or Write, + // this might end up being 0, rather than the implicit + // http.StatusOK. To get the implicit value, use the Result + // method. + Code int + + // HeaderMap contains the headers explicitly set by the Handler. + // It is an internal detail. + // + // Deprecated: HeaderMap exists for historical compatibility + // and should not be used. To access the headers returned by a handler, + // use the Response.Header map as returned by the Result method. + HeaderMap http.Header + + // Body is the buffer to which the Handler's Write calls are sent. + // If nil, the Writes are silently discarded. + Body *bytes.Buffer + + // Flushed is whether the Handler called Flush. + Flushed bool + + result *http.Response // cache of Result's return value + snapHeader http.Header // snapshot of HeaderMap at first Write + wroteHeader bool +} + +// NewRecorder returns an initialized [ResponseRecorder]. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + Code: 200, + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder]. +const DefaultRemoteAddr = "1.2.3.4" + +// Header implements [http.ResponseWriter]. It returns the response +// headers to mutate within a handler. To test the headers that were +// written after a handler completes, use the [ResponseRecorder.Result] method and see +// the returned Response value's Header. +func (rw *ResponseRecorder) Header() http.Header { + m := rw.HeaderMap + if m == nil { + m = make(http.Header) + rw.HeaderMap = m + } + return m +} + +// writeHeader writes a header if it was not written yet and +// detects Content-Type if needed. +// +// bytes or str are the beginning of the response body. +// We pass both to avoid unnecessarily generate garbage +// in rw.WriteString which was created for performance reasons. +// Non-nil bytes win. +func (rw *ResponseRecorder) writeHeader(b []byte, str string) { + if rw.wroteHeader { + return + } + if len(str) > 512 { + str = str[:512] + } + + m := rw.Header() + + _, hasType := m["Content-Type"] + hasTE := m.Get("Transfer-Encoding") != "" + if !hasType && !hasTE { + if b == nil { + b = []byte(str) + } + m.Set("Content-Type", http.DetectContentType(b)) + } + + rw.WriteHeader(200) +} + +// Write implements http.ResponseWriter. The data in buf is written to +// rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + rw.writeHeader(buf, "") + if rw.Body != nil { + rw.Body.Write(buf) + } + return len(buf), nil +} + +// WriteString implements [io.StringWriter]. The data in str is written +// to rw.Body, if not nil. +func (rw *ResponseRecorder) WriteString(str string) (int, error) { + rw.writeHeader(nil, str) + if rw.Body != nil { + rw.Body.WriteString(str) + } + return len(str), nil +} + +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at https://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +// WriteHeader implements [http.ResponseWriter]. +func (rw *ResponseRecorder) WriteHeader(code int) { + if rw.wroteHeader { + return + } + + checkWriteHeaderCode(code) + rw.Code = code + rw.wroteHeader = true + if rw.HeaderMap == nil { + rw.HeaderMap = make(http.Header) + } + rw.snapHeader = rw.HeaderMap.Clone() +} + +// Flush implements [http.Flusher]. To test whether Flush was +// called, see rw.Flushed. +func (rw *ResponseRecorder) Flush() { + if !rw.wroteHeader { + rw.WriteHeader(200) + } + rw.Flushed = true +} + +// Result returns the response generated by the handler. +// +// The returned Response will have at least its StatusCode, +// Header, Body, and optionally Trailer populated. +// More fields may be populated in the future, so callers should +// not DeepEqual the result in tests. +// +// The Response.Header is a snapshot of the headers at the time of the +// first write call, or at the time of this call, if the handler never +// did a write. +// +// The Response.Body is guaranteed to be non-nil and Body.Read call is +// guaranteed to not return any error other than [io.EOF]. +// +// Result must only be called after the handler has finished running. +func (rw *ResponseRecorder) Result() *http.Response { + if rw.result != nil { + return rw.result + } + if rw.snapHeader == nil { + rw.snapHeader = rw.HeaderMap.Clone() + } + res := &http.Response{ + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + StatusCode: rw.Code, + Header: rw.snapHeader, + } + rw.result = res + if res.StatusCode == 0 { + res.StatusCode = 200 + } + res.Status = fmt.Sprintf("%03d %s", res.StatusCode, http.StatusText(res.StatusCode)) + if rw.Body != nil { + res.Body = io.NopCloser(bytes.NewReader(rw.Body.Bytes())) + } else { + res.Body = http.NoBody + } + res.ContentLength = parseContentLength(res.Header.Get("Content-Length")) + + if trailers, ok := rw.snapHeader["Trailer"]; ok { + res.Trailer = make(http.Header, len(trailers)) + for _, k := range trailers { + for _, k := range strings.Split(k, ",") { + k = http.CanonicalHeaderKey(textproto.TrimString(k)) + if !httpguts.ValidTrailerHeader(k) { + // Ignore since forbidden by RFC 7230, section 4.1.2. + continue + } + vv, ok := rw.HeaderMap[k] + if !ok { + continue + } + vv2 := make([]string, len(vv)) + copy(vv2, vv) + res.Trailer[k] = vv2 + } + } + } + for k, vv := range rw.HeaderMap { + if !strings.HasPrefix(k, http.TrailerPrefix) { + continue + } + if res.Trailer == nil { + res.Trailer = make(http.Header) + } + for _, v := range vv { + res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v) + } + } + return res +} + +// parseContentLength trims whitespace from s and returns -1 if no value +// is set, or the value if it's >= 0. +// +// This a modified version of same function found in net/http/transfer.go. This +// one just ignores an invalid header. +func parseContentLength(cl string) int64 { + cl = textproto.TrimString(cl) + if cl == "" { + return -1 + } + n, err := strconv.ParseUint(cl, 10, 63) + if err != nil { + return -1 + } + return int64(n) +} From 3fec806523dbc650afa30f4a500b02f0fe40d641 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 12:36:04 -0700 Subject: [PATCH 0408/1708] .github: Bump actions/setup-go from 5.2.0 to 5.3.0 (#14793) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.2.0 to 5.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/3041bf56c941b39c61721a86cd11f3bb1338122a...f111f3307d8850f501ac008e886eec1fd1932a34) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 605f0939b..ecac2851c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index ad135f784..58e611591 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version-file: go.mod cache: false diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cc773e4a9..a6ef6c36e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -153,7 +153,7 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version-file: go.mod cache: false From 76dc028b389b72f111e976af2cff8ed1080996c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 12:36:42 -0700 Subject: [PATCH 0409/1708] .github: Bump github/codeql-action from 3.28.1 to 3.28.5 (#14794) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.1 to 3.28.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b6a472f63d85b9c78a3ac5e89422239fc15e9b3c...f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ecac2851c..928240c53 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 + uses: github/codeql-action/init@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 + uses: github/codeql-action/autobuild@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 + uses: github/codeql-action/analyze@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 From bfde8079a0919269fd9f435ba0c6e1bfc35988c2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 27 Jan 2025 13:37:49 +0000 Subject: [PATCH 0410/1708] health: do Warnable dependency filtering in tailscaled Previously we were depending on the GUI(s) to do it. By doing it in tailscaled, GUIs can be simplified and be guaranteed to render consistent results. If warnable A depends on warnable B, if both A & B are unhealhy, only B will be shown to the GUI as unhealthy. Once B clears up, only then will A be presented as unhealthy. Updates #14687 Change-Id: Id8566f2672d8d2d699740fa053d4e2a2c8009e83 Signed-off-by: Brad Fitzpatrick --- health/health.go | 9 +++++++-- health/health_test.go | 8 +++++++- health/state.go | 25 +++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/health/health.go b/health/health.go index 079b3195c..fa608ea73 100644 --- a/health/health.go +++ b/health/health.go @@ -214,9 +214,11 @@ type Warnable struct { // TODO(angott): turn this into a SeverityFunc, which allows the Warnable to change its severity based on // the Args of the unhappy state, just like we do in the Text function. Severity Severity - // DependsOn is a set of Warnables that this Warnable depends, on and need to be healthy - // before this Warnable can also be healthy again. The GUI can use this information to ignore + // DependsOn is a set of Warnables that this Warnable depends on and need to be healthy + // before this Warnable is relevant. The GUI can use this information to ignore // this Warnable if one of its dependencies is unhealthy. + // That is, if any of these Warnables are unhealthy, then this Warnable is not relevant + // and should be considered healthy to bother the user about. DependsOn []*Warnable // MapDebugFlag is a MapRequest.DebugFlag that is sent to control when this Warnable is unhealthy @@ -940,6 +942,9 @@ func (t *Tracker) stringsLocked() []string { // Do not append invisible warnings. continue } + if t.isEffectivelyHealthyLocked(w) { + continue + } if ws.Args == nil { result = append(result, w.Text(Args{})) } else { diff --git a/health/health_test.go b/health/health_test.go index ebdddc988..cc7b9d5aa 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -257,9 +257,15 @@ func TestCheckDependsOnAppearsInUnhealthyState(t *testing.T) { } ht.SetUnhealthy(w2, Args{ArgError: "w2 is also unhealthy now"}) us2, ok := ht.CurrentState().Warnings[w2.Code] + if ok { + t.Fatalf("Saw w2 being unhealthy but it shouldn't be, as it depends on unhealthy w1") + } + ht.SetHealthy(w1) + us2, ok = ht.CurrentState().Warnings[w2.Code] if !ok { - t.Fatalf("Expected an UnhealthyState for w2, got nothing") + t.Fatalf("w2 wasn't unhealthy; want it to be unhealthy now that w1 is back healthy") } + wantDependsOn = slices.Concat([]WarnableCode{w1.Code}, wantDependsOn) if !reflect.DeepEqual(us2.DependsOn, wantDependsOn) { t.Fatalf("Expected DependsOn = %v in the unhealthy state, got: %v", wantDependsOn, us2.DependsOn) diff --git a/health/state.go b/health/state.go index 17a646794..3bfa6f99b 100644 --- a/health/state.go +++ b/health/state.go @@ -90,6 +90,11 @@ func (t *Tracker) CurrentState() *State { // Skip invisible Warnables. continue } + if t.isEffectivelyHealthyLocked(w) { + // Skip Warnables that are unhealthy if they have dependencies + // that are unhealthy. + continue + } wm[w.Code] = *w.unhealthyState(ws) } @@ -97,3 +102,23 @@ func (t *Tracker) CurrentState() *State { Warnings: wm, } } + +// isEffectivelyHealthyLocked reports whether w is effectively healthy. +// That means it's either actually healthy or it has a dependency that +// that's unhealthy, so we should treat w as healthy to not spam users +// with multiple warnings when only the root cause is relevant. +func (t *Tracker) isEffectivelyHealthyLocked(w *Warnable) bool { + if _, ok := t.warnableVal[w]; !ok { + // Warnable not found in the tracker. So healthy. + return true + } + for _, d := range w.DependsOn { + if !t.isEffectivelyHealthyLocked(d) { + // If one of our deps is unhealthy, we're healthy. + return true + } + } + // If we have no unhealthy deps and had warnableVal set, + // we're unhealthy. + return false +} From bd9725c5f80b2e47da8ddb09a788036822531488 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 27 Jan 2025 14:21:25 +0000 Subject: [PATCH 0411/1708] health: relax no-derp-home warnable to not fire if not in map poll Fixes #14687 Change-Id: I05035df7e075e94dd39b2192bee34d878c15310d Signed-off-by: Brad Fitzpatrick --- health/health.go | 64 ++++++++++++++++++++++++++++--------------- health/health_test.go | 45 ++++++++++++++++++++++++++++++ health/state.go | 2 +- 3 files changed, 88 insertions(+), 23 deletions(-) diff --git a/health/health.go b/health/health.go index fa608ea73..b0733f353 100644 --- a/health/health.go +++ b/health/health.go @@ -22,6 +22,7 @@ import ( "tailscale.com/envknob" "tailscale.com/metrics" "tailscale.com/tailcfg" + "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" "tailscale.com/util/mak" @@ -73,6 +74,8 @@ type Tracker struct { // mu should not be held during init. initOnce sync.Once + testClock tstime.Clock // nil means use time.Now / tstime.StdClock{} + // mu guards everything that follows. mu sync.Mutex @@ -80,13 +83,13 @@ type Tracker struct { warnableVal map[*Warnable]*warningState // pendingVisibleTimers contains timers for Warnables that are unhealthy, but are // not visible to the user yet, because they haven't been unhealthy for TimeToVisible - pendingVisibleTimers map[*Warnable]*time.Timer + pendingVisibleTimers map[*Warnable]tstime.TimerController // sysErr maps subsystems to their current error (or nil if the subsystem is healthy) // Deprecated: using Warnables should be preferred sysErr map[Subsystem]error watchers set.HandleSet[func(*Warnable, *UnhealthyState)] // opt func to run if error state changes - timer *time.Timer + timer tstime.TimerController latestVersion *tailcfg.ClientVersion // or nil checkForUpdates bool @@ -115,6 +118,20 @@ type Tracker struct { metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] } +func (t *Tracker) now() time.Time { + if t.testClock != nil { + return t.testClock.Now() + } + return time.Now() +} + +func (t *Tracker) clock() tstime.Clock { + if t.testClock != nil { + return t.testClock + } + return tstime.StdClock{} +} + // Subsystem is the name of a subsystem whose health can be monitored. // // Deprecated: Registering a Warnable using Register() and updating its health state @@ -311,11 +328,11 @@ func (ws *warningState) Equal(other *warningState) bool { // IsVisible returns whether the Warnable should be visible to the user, based on the TimeToVisible // field of the Warnable and the BrokenSince time when the Warnable became unhealthy. -func (w *Warnable) IsVisible(ws *warningState) bool { +func (w *Warnable) IsVisible(ws *warningState, clockNow func() time.Time) bool { if ws == nil || w.TimeToVisible == 0 { return true } - return time.Since(ws.BrokenSince) >= w.TimeToVisible + return clockNow().Sub(ws.BrokenSince) >= w.TimeToVisible } // SetMetricsRegistry sets up the metrics for the Tracker. It takes @@ -365,7 +382,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // If we already have a warningState for this Warnable with an earlier BrokenSince time, keep that // BrokenSince time. - brokenSince := time.Now() + brokenSince := t.now() if existingWS := t.warnableVal[w]; existingWS != nil { brokenSince = existingWS.BrokenSince } @@ -384,15 +401,15 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable // becomes visible. - if w.IsVisible(ws) { + if w.IsVisible(ws, t.now) { go cb(w, w.unhealthyState(ws)) continue } // The time remaining until the Warnable will be visible to the user is the TimeToVisible // minus the time that has already passed since the Warnable became unhealthy. - visibleIn := w.TimeToVisible - time.Since(brokenSince) - mak.Set(&t.pendingVisibleTimers, w, time.AfterFunc(visibleIn, func() { + visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) + var tc tstime.TimerController = t.clock().AfterFunc(visibleIn, func() { t.mu.Lock() defer t.mu.Unlock() // Check if the Warnable is still unhealthy, as it could have become healthy between the time @@ -401,7 +418,8 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { go cb(w, w.unhealthyState(ws)) delete(t.pendingVisibleTimers, w) } - })) + }) + mak.Set(&t.pendingVisibleTimers, w, tc) } } } @@ -476,7 +494,7 @@ func (t *Tracker) RegisterWatcher(cb func(w *Warnable, r *UnhealthyState)) (unre } handle := t.watchers.Add(cb) if t.timer == nil { - t.timer = time.AfterFunc(time.Minute, t.timerSelfCheck) + t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) } return func() { t.mu.Lock() @@ -640,10 +658,10 @@ func (t *Tracker) GotStreamedMapResponse() { } t.mu.Lock() defer t.mu.Unlock() - t.lastStreamedMapResponse = time.Now() + t.lastStreamedMapResponse = t.now() if !t.inMapPoll { t.inMapPoll = true - t.inMapPollSince = time.Now() + t.inMapPollSince = t.now() } t.selfCheckLocked() } @@ -660,7 +678,7 @@ func (t *Tracker) SetOutOfPollNetMap() { return } t.inMapPoll = false - t.lastMapPollEndedAt = time.Now() + t.lastMapPollEndedAt = t.now() t.selfCheckLocked() } @@ -702,7 +720,7 @@ func (t *Tracker) NoteMapRequestHeard(mr *tailcfg.MapRequest) { // against SetMagicSockDERPHome and // SetDERPRegionConnectedState - t.lastMapRequestHeard = time.Now() + t.lastMapRequestHeard = t.now() t.selfCheckLocked() } @@ -740,7 +758,7 @@ func (t *Tracker) NoteDERPRegionReceivedFrame(region int) { } t.mu.Lock() defer t.mu.Unlock() - mak.Set(&t.derpRegionLastFrame, region, time.Now()) + mak.Set(&t.derpRegionLastFrame, region, t.now()) t.selfCheckLocked() } @@ -799,9 +817,9 @@ func (t *Tracker) SetIPNState(state string, wantRunning bool) { // The first time we see wantRunning=true and it used to be false, it means the user requested // the backend to start. We store this timestamp and use it to silence some warnings that are // expected during startup. - t.ipnWantRunningLastTrue = time.Now() + t.ipnWantRunningLastTrue = t.now() t.setUnhealthyLocked(warmingUpWarnable, nil) - time.AfterFunc(warmingUpWarnableDuration, func() { + t.clock().AfterFunc(warmingUpWarnableDuration, func() { t.mu.Lock() t.updateWarmingUpWarnableLocked() t.mu.Unlock() @@ -938,7 +956,7 @@ func (t *Tracker) Strings() []string { func (t *Tracker) stringsLocked() []string { result := []string{} for w, ws := range t.warnableVal { - if !w.IsVisible(ws) { + if !w.IsVisible(ws, t.now) { // Do not append invisible warnings. continue } @@ -1010,7 +1028,7 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { t.setHealthyLocked(localLogWarnable) } - now := time.Now() + now := t.now() // How long we assume we'll have heard a DERP frame or a MapResponse // KeepAlive by. @@ -1020,8 +1038,10 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { recentlyOn := now.Sub(t.ipnWantRunningLastTrue) < 5*time.Second homeDERP := t.derpHomeRegion - if recentlyOn { + if recentlyOn || !t.inMapPoll { // If user just turned Tailscale on, don't warn for a bit. + // Also, if we're not in a map poll, that means we don't yet + // have a DERPMap or aren't in a state where we even want t.setHealthyLocked(noDERPHomeWarnable) t.setHealthyLocked(noDERPConnectionWarnable) t.setHealthyLocked(derpTimeoutWarnable) @@ -1170,7 +1190,7 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { // updateWarmingUpWarnableLocked ensures the warmingUpWarnable is healthy if wantRunning has been set to true // for more than warmingUpWarnableDuration. func (t *Tracker) updateWarmingUpWarnableLocked() { - if !t.ipnWantRunningLastTrue.IsZero() && time.Now().After(t.ipnWantRunningLastTrue.Add(warmingUpWarnableDuration)) { + if !t.ipnWantRunningLastTrue.IsZero() && t.now().After(t.ipnWantRunningLastTrue.Add(warmingUpWarnableDuration)) { t.setHealthyLocked(warmingUpWarnable) } } @@ -1282,7 +1302,7 @@ func (t *Tracker) LastNoiseDialWasRecent() bool { t.mu.Lock() defer t.mu.Unlock() - now := time.Now() + now := t.now() dur := now.Sub(t.lastNoiseDial) t.lastNoiseDial = now return dur < 2*time.Minute diff --git a/health/health_test.go b/health/health_test.go index cc7b9d5aa..abc0ec07e 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -12,6 +12,7 @@ import ( "time" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/opt" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -406,3 +407,47 @@ func TestHealthMetric(t *testing.T) { }) } } + +// TestNoDERPHomeWarnable checks that we don't +// complain about no DERP home if we're not in a +// map poll. +func TestNoDERPHomeWarnable(t *testing.T) { + t.Skip("TODO: fix https://github.com/tailscale/tailscale/issues/14798 to make this test not deadlock") + clock := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(123, 0), + FollowRealTime: false, + }) + ht := &Tracker{ + testClock: clock, + } + ht.SetIPNState("NeedsLogin", true) + + // Advance 30 seconds to get past the "recentlyLoggedIn" check. + clock.Advance(30 * time.Second) + ht.updateBuiltinWarnablesLocked() + + // Advance to get past the the TimeToVisible delay. + clock.Advance(noDERPHomeWarnable.TimeToVisible * 2) + + ht.updateBuiltinWarnablesLocked() + if ws, ok := ht.CurrentState().Warnings[noDERPHomeWarnable.Code]; ok { + t.Fatalf("got unexpected noDERPHomeWarnable warnable: %v", ws) + } +} + +// TestNoDERPHomeWarnableManual is like TestNoDERPHomeWarnable +// but doesn't use tstest.Clock so avoids the deadlock +// I hit: https://github.com/tailscale/tailscale/issues/14798 +func TestNoDERPHomeWarnableManual(t *testing.T) { + ht := &Tracker{} + ht.SetIPNState("NeedsLogin", true) + + // Avoid wantRunning: + ht.ipnWantRunningLastTrue = ht.ipnWantRunningLastTrue.Add(-10 * time.Second) + ht.updateBuiltinWarnablesLocked() + + ws, ok := ht.warnableVal[noDERPHomeWarnable] + if ok { + t.Fatalf("got unexpected noDERPHomeWarnable warnable: %v", ws) + } +} diff --git a/health/state.go b/health/state.go index 3bfa6f99b..c06f6ef59 100644 --- a/health/state.go +++ b/health/state.go @@ -86,7 +86,7 @@ func (t *Tracker) CurrentState() *State { wm := map[WarnableCode]UnhealthyState{} for w, ws := range t.warnableVal { - if !w.IsVisible(ws) { + if !w.IsVisible(ws, t.now) { // Skip invisible Warnables. continue } From 2691b9f6be2925188159d914411298e13dc409df Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 27 Jan 2025 03:07:21 +0000 Subject: [PATCH 0412/1708] tempfork/acme: add new package for x/crypto package acme fork, move We've been maintaining temporary dev forks of golang.org/x/crypto/{acme,ssh} in https://github.com/tailscale/golang-x-crypto instead of using this repo's tempfork directory as we do with other packages. The reason we were doing that was because x/crypto/ssh depended on x/crypto/ssh/internal/poly1305 and I hadn't noticed there are forwarding wrappers already available in x/crypto/poly1305. It also depended internal/bcrypt_pbkdf but we don't use that so it's easy to just delete that calling code in our tempfork/ssh. Now that our SSH changes have been upstreamed, we can soon unfork from SSH. That leaves ACME remaining. This change copies our tailscale/golang-x-crypto/acme code to tempfork/acme but adds a test that our vendored copied still matches our tailscale/golang-x-crypto repo, where we can continue to do development work and rebases with upstream. A comment on the new test describes the expected workflow. While we could continue to just import & use tailscale/golang-x-crypto/acme, it seems a bit nicer to not have that entire-fork-of-x-crypto visible at all in our transitive deps and the questions that invites. Showing just a fork of an ACME client is much less scary. It does add a step to the process of hacking on the ACME client code, but we do that approximately never anyway, and the extra step is very incremental compared to the existing tedious steps. Updates #8593 Updates #10238 Change-Id: I8af4378c04c1f82e63d31bf4d16dba9f510f9199 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- ipn/ipnlocal/cert.go | 2 +- tempfork/acme/README.md | 14 + tempfork/acme/acme.go | 861 ++++++++++++++++++++ tempfork/acme/acme_test.go | 973 +++++++++++++++++++++++ tempfork/acme/http.go | 325 ++++++++ tempfork/acme/http_test.go | 255 ++++++ tempfork/acme/jws.go | 257 ++++++ tempfork/acme/jws_test.go | 550 +++++++++++++ tempfork/acme/rfc8555.go | 476 +++++++++++ tempfork/acme/rfc8555_test.go | 1017 ++++++++++++++++++++++++ tempfork/acme/sync_to_upstream_test.go | 70 ++ tempfork/acme/types.go | 632 +++++++++++++++ tempfork/acme/types_test.go | 219 +++++ tempfork/acme/version_go112.go | 27 + 16 files changed, 5679 insertions(+), 3 deletions(-) create mode 100644 tempfork/acme/README.md create mode 100644 tempfork/acme/acme.go create mode 100644 tempfork/acme/acme_test.go create mode 100644 tempfork/acme/http.go create mode 100644 tempfork/acme/http_test.go create mode 100644 tempfork/acme/jws.go create mode 100644 tempfork/acme/jws_test.go create mode 100644 tempfork/acme/rfc8555.go create mode 100644 tempfork/acme/rfc8555_test.go create mode 100644 tempfork/acme/sync_to_upstream_test.go create mode 100644 tempfork/acme/types.go create mode 100644 tempfork/acme/types_test.go create mode 100644 tempfork/acme/version_go112.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index fab29ba03..e32fd4a2b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -197,7 +197,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh @@ -888,6 +887,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/syncs from tailscale.com/control/controlknobs+ tailscale.com/tailcfg from tailscale.com/client/tailscale+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ + tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/client/tailscale+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 36b6063d5..a7ad83818 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -152,7 +152,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+ LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh @@ -339,6 +338,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ + tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 3361fc70b..cfa4fe1ba 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -32,7 +32,6 @@ import ( "sync" "time" - "github.com/tailscale/golang-x-crypto/acme" "tailscale.com/atomicfile" "tailscale.com/envknob" "tailscale.com/hostinfo" @@ -41,6 +40,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/tempfork/acme" "tailscale.com/types/logger" "tailscale.com/util/testenv" "tailscale.com/version" diff --git a/tempfork/acme/README.md b/tempfork/acme/README.md new file mode 100644 index 000000000..def357fc1 --- /dev/null +++ b/tempfork/acme/README.md @@ -0,0 +1,14 @@ +# tempfork/acme + +This is a vendored copy of Tailscale's https://github.com/tailscale/golang-x-crypto, +which is a fork of golang.org/x/crypto/acme. + +See https://github.com/tailscale/tailscale/issues/10238 for unforking +status. + +The https://github.com/tailscale/golang-x-crypto location exists to +let us do rebases from upstream easily, and then we update tempfork/acme +in the same commit we go get github.com/tailscale/golang-x-crypto@main. +See the comment on the TestSyncedToUpstream test for details. That +test should catch that forgotten step. + diff --git a/tempfork/acme/acme.go b/tempfork/acme/acme.go new file mode 100644 index 000000000..8bc2ac16e --- /dev/null +++ b/tempfork/acme/acme.go @@ -0,0 +1,861 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec, +// most famously used by Let's Encrypt. +// +// The initial implementation of this package was based on an early version +// of the spec. The current implementation supports only the modern +// RFC 8555 but some of the old API surface remains for compatibility. +// While code using the old API will still compile, it will return an error. +// Note the deprecation comments to update your code. +// +// See https://tools.ietf.org/html/rfc8555 for the spec. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +package acme + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/http" + "strings" + "sync" + "time" +) + +const ( + // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. + LetsEncryptURL = "https://acme-v02.api.letsencrypt.org/directory" + + // ALPNProto is the ALPN protocol name used by a CA server when validating + // tls-alpn-01 challenges. + // + // Package users must ensure their servers can negotiate the ACME ALPN in + // order for tls-alpn-01 challenge verifications to succeed. + // See the crypto/tls package's Config.NextProtos field. + ALPNProto = "acme-tls/1" +) + +// idPeACMEIdentifier is the OID for the ACME extension for the TLS-ALPN challenge. +// https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05#section-5.1 +var idPeACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in DER bytes + // Used for decoding certs from application/pem-certificate-chain response, + // the default when in RFC mode. + maxCertChainSize = maxCertSize * maxChainLen + + // Max number of collected nonces kept in memory. + // Expect usual peak of 1 or 2. + maxNonces = 100 +) + +// Client is an ACME client. +// +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + // + // The following algorithms are supported: + // RS256, ES256, ES384 and ES512. + // See RFC 7518 for more details about the algorithms. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + // RetryBackoff computes the duration after which the nth retry of a failed request + // should occur. The value of n for the first call on failure is 1. + // The values of r and resp are the request and response of the last failed attempt. + // If the returned value is negative or zero, no more retries are done and an error + // is returned to the caller of the original method. + // + // Requests which result in a 4xx client error are not retried, + // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. + // + // If RetryBackoff is nil, a truncated exponential backoff algorithm + // with the ceiling of 10 seconds is used, where each subsequent retry n + // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), + // preferring the former if "Retry-After" header is found in the resp. + // The jitter is a random value up to 1 second. + RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration + + // UserAgent is prepended to the User-Agent header sent to the ACME server, + // which by default is this package's name and version. + // + // Reusable libraries and tools in particular should set this value to be + // identifiable by the server, in case they are causing issues. + UserAgent string + + cacheMu sync.Mutex + dir *Directory // cached result of Client's Discover method + // KID is the key identifier provided by the CA. If not provided it will be + // retrieved from the CA by making a call to the registration endpoint. + KID KeyID + + noncesMu sync.Mutex + nonces map[string]struct{} // nonces collected from previous responses +} + +// accountKID returns a key ID associated with c.Key, the account identity +// provided by the CA during RFC based registration. +// It assumes c.Discover has already been called. +// +// accountKID requires at most one network roundtrip. +// It caches only successful result. +// +// When in pre-RFC mode or when c.getRegRFC responds with an error, accountKID +// returns noKeyID. +func (c *Client) accountKID(ctx context.Context) KeyID { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + if c.KID != noKeyID { + return c.KID + } + a, err := c.getRegRFC(ctx) + if err != nil { + return noKeyID + } + c.KID = KeyID(a.URI) + return c.KID +} + +var errPreRFC = errors.New("acme: server does not support the RFC 8555 version of ACME") + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + res, err := c.get(ctx, c.directoryURL(), wantStatus(http.StatusOK)) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + c.addNonce(res.Header) + + var v struct { + Reg string `json:"newAccount"` + Authz string `json:"newAuthz"` + Order string `json:"newOrder"` + Revoke string `json:"revokeCert"` + Nonce string `json:"newNonce"` + KeyChange string `json:"keyChange"` + RenewalInfo string `json:"renewalInfo"` + Meta struct { + Terms string `json:"termsOfService"` + Website string `json:"website"` + CAA []string `json:"caaIdentities"` + ExternalAcct bool `json:"externalAccountRequired"` + } + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + if v.Order == "" { + return Directory{}, errPreRFC + } + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + OrderURL: v.Order, + RevokeURL: v.Revoke, + NonceURL: v.Nonce, + KeyChangeURL: v.KeyChange, + RenewalInfoURL: v.RenewalInfo, + Terms: v.Meta.Terms, + Website: v.Meta.Website, + CAA: v.Meta.CAA, + ExternalAccountRequired: v.Meta.ExternalAcct, + } + return *c.dir, nil +} + +func (c *Client) directoryURL() string { + if c.DirectoryURL != "" { + return c.DirectoryURL + } + return LetsEncryptURL +} + +// CreateCert was part of the old version of ACME. It is incompatible with RFC 8555. +// +// Deprecated: this was for the pre-RFC 8555 version of ACME. Callers should use CreateOrderCert. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + return nil, "", errPreRFC +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// If the bundle argument is true, the returned value also contains the CA (issuer) +// certificate chain. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.fetchCertRFC(ctx, url, bundle) +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + return c.revokeCertRFC(ctx, key, cert, reason) +} + +// FetchRenewalInfo retrieves the RenewalInfo from Directory.RenewalInfoURL. +func (c *Client) FetchRenewalInfo(ctx context.Context, leaf []byte) (*RenewalInfo, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + parsedLeaf, err := x509.ParseCertificate(leaf) + if err != nil { + return nil, fmt.Errorf("parsing leaf certificate: %w", err) + } + + renewalURL, err := c.getRenewalURL(parsedLeaf) + if err != nil { + return nil, fmt.Errorf("generating renewal info URL: %w", err) + } + + res, err := c.get(ctx, renewalURL, wantStatus(http.StatusOK)) + if err != nil { + return nil, fmt.Errorf("fetching renewal info: %w", err) + } + defer res.Body.Close() + + var info RenewalInfo + if err := json.NewDecoder(res.Body).Decode(&info); err != nil { + return nil, fmt.Errorf("parsing renewal info response: %w", err) + } + return &info, nil +} + +func (c *Client) getRenewalURL(cert *x509.Certificate) (string, error) { + // See https://www.ietf.org/archive/id/draft-ietf-acme-ari-04.html#name-the-renewalinfo-resource + // for how the request URL is built. + url := c.dir.RenewalInfoURL + if !strings.HasSuffix(url, "/") { + url += "/" + } + aki := base64.RawURLEncoding.EncodeToString(cert.AuthorityKeyId) + serial := base64.RawURLEncoding.EncodeToString(cert.SerialNumber.Bytes()) + return fmt.Sprintf("%s%s.%s", url, aki, serial), nil +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account with the CA using c.Key. +// It returns the registered account. The account acct is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +// +// When interfacing with an RFC-compliant CA, non-RFC 8555 fields of acct are ignored +// and prompt is called if Directory's Terms field is non-zero. +// Also see Error's Instance field for when a CA requires already registered accounts to agree +// to an updated Terms of Service. +func (c *Client) Register(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) { + if c.Key == nil { + return nil, errors.New("acme: client.Key must be set to Register") + } + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.registerRFC(ctx, acct, prompt) +} + +// GetReg retrieves an existing account associated with c.Key. +// +// The url argument is a legacy artifact of the pre-RFC 8555 API +// and is ignored. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.getRegRFC(ctx) +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +// +// The account's URI is ignored and the account URL associated with +// c.Key is used instead. +func (c *Client) UpdateReg(ctx context.Context, acct *Account) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.updateRegRFC(ctx, acct) +} + +// AccountKeyRollover attempts to transition a client's account key to a new key. +// On success client's Key is updated which is not concurrency safe. +// On failure an error will be returned. +// The new key is already registered with the ACME provider if the following is true: +// - error is of type acme.Error +// - StatusCode should be 409 (Conflict) +// - Location header will have the KID of the associated account +// +// More about account key rollover can be found at +// https://tools.ietf.org/html/rfc8555#section-7.3.5. +func (c *Client) AccountKeyRollover(ctx context.Context, newKey crypto.Signer) error { + return c.accountKeyRollover(ctx, newKey) +} + +// Authorize performs the initial step in the pre-authorization flow, +// as opposed to order-based flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// Once complete, the caller can use AuthorizeOrder which the CA +// should provision with the already satisfied authorization. +// For pre-RFC CAs, the caller can proceed directly to requesting a certificate +// using CreateCert method. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization which has its Status field set to StatusValid. +// +// More about pre-authorization can be found at +// https://tools.ietf.org/html/rfc8555#section-7.4.1. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + return c.authorize(ctx, "dns", domain) +} + +// AuthorizeIP is the same as Authorize but requests IP address authorization. +// Clients which successfully obtain such authorization may request to issue +// a certificate for IP addresses. +// +// See the ACME spec extension for more details about IP address identifiers: +// https://tools.ietf.org/html/draft-ietf-acme-ip. +func (c *Client) AuthorizeIP(ctx context.Context, ipaddr string) (*Authorization, error) { + return c.authorize(ctx, "ip", ipaddr) +} + +func (c *Client) authorize(ctx context.Context, typ, val string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: typ, Value: val}, + } + res, err := c.post(ctx, nil, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize or AuthorizeOrder methods before being able to request +// a new certificate for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + + req := struct { + Resource string `json:"resource"` + Status string `json:"status"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Status: "deactivated", + Delete: true, + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// the ACME CA responded with a 4xx error code, or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + for { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case raw.Status == StatusValid: + return raw.authorization(url), nil + case raw.Status == StatusInvalid: + return nil, raw.error(url) + } + + // Exponential backoff is implemented in c.get above. + // This is just to prevent continuously hitting the CA + // while waiting for a final authorization status. + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Given that the fastest challenges TLS-SNI and HTTP-01 + // require a CA to make at least 1 network round trip + // and most likely persist a challenge state, + // this default delay seems reasonable. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + defer res.Body.Close() + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.post(ctx, nil, chal.URI, json.RawMessage("{}"), wantStatus( + http.StatusOK, // according to the spec + http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// +// Deprecated: This challenge type is unused in both draft-02 and RFC versions of the ACME spec. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// +// Deprecated: This challenge type is unused in both draft-02 and RFC versions of the ACME spec. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-ALPN-01 see +// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol +// has been specified. +func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, err + } + shasum := sha256.Sum256([]byte(ka)) + extValue, err := asn1.Marshal(shasum[:]) + if err != nil { + return tls.Certificate{}, err + } + acmeExtension := pkix.Extension{ + Id: idPeACMEIdentifier, + Critical: true, + Value: extValue, + } + + tmpl := defaultTLSChallengeCertTemplate() + + var newOpt []CertOption + for _, o := range opt { + switch o := o.(type) { + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + newOpt = append(newOpt, o) + } + } + tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) + newOpt = append(newOpt, WithTemplate(tmpl)) + return tlsChallengeCert([]string{domain}, newOpt) +} + +// popNonce returns a nonce value previously stored with c.addNonce +// or fetches a fresh one from c.dir.NonceURL. +// If NonceURL is empty, it first tries c.directoryURL() and, failing that, +// the provided url. +func (c *Client) popNonce(ctx context.Context, url string) (string, error) { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) == 0 { + if c.dir != nil && c.dir.NonceURL != "" { + return c.fetchNonce(ctx, c.dir.NonceURL) + } + dirURL := c.directoryURL() + v, err := c.fetchNonce(ctx, dirURL) + if err != nil && url != dirURL { + v, err = c.fetchNonce(ctx, url) + } + return v, err + } + var nonce string + for nonce = range c.nonces { + delete(c.nonces, nonce) + break + } + return nonce, nil +} + +// clearNonces clears any stored nonces +func (c *Client) clearNonces() { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + c.nonces = make(map[string]struct{}) +} + +// addNonce stores a nonce value found in h (if any) for future use. +func (c *Client) addNonce(h http.Header) { + v := nonceFromHeader(h) + if v == "" { + return + } + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) >= maxNonces { + return + } + if c.nonces == nil { + c.nonces = make(map[string]struct{}) + } + c.nonces[v] = struct{}{} +} + +func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { + r, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return "", err + } + resp, err := c.doNoRetry(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + nonce := nonceFromHeader(resp.Header) + if nonce == "" { + if resp.StatusCode > 299 { + return "", responseError(resp) + } + return "", errors.New("acme: nonce not found") + } + return nonce, nil +} + +func nonceFromHeader(h http.Header) string { + return h.Get("Replay-Nonce") +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. +func defaultTLSChallengeCertTemplate() *x509.Certificate { + return &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var key crypto.Signer + tmpl := defaultTLSChallengeCertTemplate() + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is time.Now, except in tests which can mess with it. +var timeNow = time.Now diff --git a/tempfork/acme/acme_test.go b/tempfork/acme/acme_test.go new file mode 100644 index 000000000..dcd214896 --- /dev/null +++ b/tempfork/acme/acme_test.go @@ -0,0 +1,973 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "sort" + "strings" + "testing" + "time" +) + +// newTestClient creates a client with a non-nil Directory so that it skips +// the discovery which is otherwise done on the first call of almost every +// exported method. +func newTestClient() *Client { + return &Client{ + Key: testKeyEC, + dir: &Directory{}, // skip discovery + } +} + +// newTestClientWithMockDirectory creates a client with a non-nil Directory +// that contains mock field values. +func newTestClientWithMockDirectory() *Client { + return &Client{ + Key: testKeyEC, + dir: &Directory{ + RenewalInfoURL: "https://example.com/acme/renewal-info/", + }, + } +} + +// Decodes a JWS-encoded request and unmarshals the decoded JSON into a provided +// interface. +func decodeJWSRequest(t *testing.T, v interface{}, r io.Reader) { + // Decode request + var req struct{ Payload string } + if err := json.NewDecoder(r).Decode(&req); err != nil { + t.Fatal(err) + } + payload, err := base64.RawURLEncoding.DecodeString(req.Payload) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(payload, v) + if err != nil { + t.Fatal(err) + } +} + +type jwsHead struct { + Alg string + Nonce string + URL string `json:"url"` + KID string `json:"kid"` + JWK map[string]string `json:"jwk"` +} + +func decodeJWSHead(r io.Reader) (*jwsHead, error) { + var req struct{ Protected string } + if err := json.NewDecoder(r).Decode(&req); err != nil { + return nil, err + } + b, err := base64.RawURLEncoding.DecodeString(req.Protected) + if err != nil { + return nil, err + } + var head jwsHead + if err := json.Unmarshal(b, &head); err != nil { + return nil, err + } + return &head, nil +} + +func TestRegisterWithoutKey(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + w.WriteHeader(http.StatusCreated) + fmt.Fprint(w, `{}`) + })) + defer ts.Close() + // First verify that using a complete client results in success. + c := Client{ + Key: testKeyEC, + DirectoryURL: ts.URL, + dir: &Directory{RegURL: ts.URL}, + } + if _, err := c.Register(context.Background(), &Account{}, AcceptTOS); err != nil { + t.Fatalf("c.Register() = %v; want success with a complete test client", err) + } + c.Key = nil + if _, err := c.Register(context.Background(), &Account{}, AcceptTOS); err == nil { + t.Error("c.Register() from client without key succeeded, wanted error") + } +} + +func TestAuthorize(t *testing.T) { + tt := []struct{ typ, value string }{ + {"dns", "example.com"}, + {"ip", "1.2.3.4"}, + } + for _, test := range tt { + t.Run(test.typ, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Identifier struct { + Type string + Value string + } + } + decodeJWSRequest(t, &j, r.Body) + + // Test request + if j.Resource != "new-authz" { + t.Errorf("j.Resource = %q; want new-authz", j.Resource) + } + if j.Identifier.Type != test.typ { + t.Errorf("j.Identifier.Type = %q; want %q", j.Identifier.Type, test.typ) + } + if j.Identifier.Value != test.value { + t.Errorf("j.Identifier.Value = %q; want %q", j.Identifier.Value, test.value) + } + + w.Header().Set("Location", "https://ca.tld/acme/auth/1") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "identifier": {"type":%q,"value":%q}, + "status":"pending", + "challenges":[ + { + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1" + }, + { + "type":"tls-sni-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id2", + "token":"token2" + } + ], + "combinations":[[0],[1]] + }`, test.typ, test.value) + })) + defer ts.Close() + + var ( + auth *Authorization + err error + ) + cl := Client{ + Key: testKeyEC, + DirectoryURL: ts.URL, + dir: &Directory{AuthzURL: ts.URL}, + } + switch test.typ { + case "dns": + auth, err = cl.Authorize(context.Background(), test.value) + case "ip": + auth, err = cl.AuthorizeIP(context.Background(), test.value) + default: + t.Fatalf("unknown identifier type: %q", test.typ) + } + if err != nil { + t.Fatal(err) + } + + if auth.URI != "https://ca.tld/acme/auth/1" { + t.Errorf("URI = %q; want https://ca.tld/acme/auth/1", auth.URI) + } + if auth.Status != "pending" { + t.Errorf("Status = %q; want pending", auth.Status) + } + if auth.Identifier.Type != test.typ { + t.Errorf("Identifier.Type = %q; want %q", auth.Identifier.Type, test.typ) + } + if auth.Identifier.Value != test.value { + t.Errorf("Identifier.Value = %q; want %q", auth.Identifier.Value, test.value) + } + + if n := len(auth.Challenges); n != 2 { + t.Fatalf("len(auth.Challenges) = %d; want 2", n) + } + + c := auth.Challenges[0] + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Token) + } + + c = auth.Challenges[1] + if c.Type != "tls-sni-01" { + t.Errorf("c.Type = %q; want tls-sni-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id2" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id2", c.URI) + } + if c.Token != "token2" { + t.Errorf("c.Token = %q; want token2", c.Token) + } + + combs := [][]int{{0}, {1}} + if !reflect.DeepEqual(auth.Combinations, combs) { + t.Errorf("auth.Combinations: %+v\nwant: %+v\n", auth.Combinations, combs) + } + + }) + } +} + +func TestAuthorizeValid(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "nonce") + return + } + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status":"valid"}`)) + })) + defer ts.Close() + client := Client{ + Key: testKey, + DirectoryURL: ts.URL, + dir: &Directory{AuthzURL: ts.URL}, + } + _, err := client.Authorize(context.Background(), "example.com") + if err != nil { + t.Errorf("err = %v", err) + } +} + +func TestWaitAuthorization(t *testing.T) { + t.Run("wait loop", func(t *testing.T) { + var count int + authz, err := runWaitAuthorization(context.Background(), t, func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("Retry-After", "0") + if count > 1 { + fmt.Fprintf(w, `{"status":"valid"}`) + return + } + fmt.Fprintf(w, `{"status":"pending"}`) + }) + if err != nil { + t.Fatalf("non-nil error: %v", err) + } + if authz == nil { + t.Fatal("authz is nil") + } + }) + t.Run("invalid status", func(t *testing.T) { + _, err := runWaitAuthorization(context.Background(), t, func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"status":"invalid"}`) + }) + if _, ok := err.(*AuthorizationError); !ok { + t.Errorf("err is %v (%T); want non-nil *AuthorizationError", err, err) + } + }) + t.Run("invalid status with error returns the authorization error", func(t *testing.T) { + _, err := runWaitAuthorization(context.Background(), t, func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "type": "dns-01", + "status": "invalid", + "error": { + "type": "urn:ietf:params:acme:error:caa", + "detail": "CAA record for prevents issuance", + "status": 403 + }, + "url": "https://acme-v02.api.letsencrypt.org/acme/chall-v3/xxx/xxx", + "token": "xxx", + "validationRecord": [ + { + "hostname": "" + } + ] + }`) + }) + + want := &AuthorizationError{ + Errors: []error{ + (&wireError{ + Status: 403, + Type: "urn:ietf:params:acme:error:caa", + Detail: "CAA record for prevents issuance", + }).error(nil), + }, + } + + _, ok := err.(*AuthorizationError) + if !ok { + t.Errorf("err is %T; want non-nil *AuthorizationError", err) + } + + if err.Error() != want.Error() { + t.Errorf("err is %v; want %v", err, want) + } + }) + t.Run("non-retriable error", func(t *testing.T) { + const code = http.StatusBadRequest + _, err := runWaitAuthorization(context.Background(), t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(code) + }) + res, ok := err.(*Error) + if !ok { + t.Fatalf("err is %v (%T); want a non-nil *Error", err, err) + } + if res.StatusCode != code { + t.Errorf("res.StatusCode = %d; want %d", res.StatusCode, code) + } + }) + for _, code := range []int{http.StatusTooManyRequests, http.StatusInternalServerError} { + t.Run(fmt.Sprintf("retriable %d error", code), func(t *testing.T) { + var count int + authz, err := runWaitAuthorization(context.Background(), t, func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("Retry-After", "0") + if count > 1 { + fmt.Fprintf(w, `{"status":"valid"}`) + return + } + w.WriteHeader(code) + }) + if err != nil { + t.Fatalf("non-nil error: %v", err) + } + if authz == nil { + t.Fatal("authz is nil") + } + }) + } + t.Run("context cancel", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err := runWaitAuthorization(ctx, t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Retry-After", "60") + fmt.Fprintf(w, `{"status":"pending"}`) + time.AfterFunc(1*time.Millisecond, cancel) + }) + if err == nil { + t.Error("err is nil") + } + }) +} + +func runWaitAuthorization(ctx context.Context, t *testing.T, h http.HandlerFunc) (*Authorization, error) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", fmt.Sprintf("bad-test-nonce-%v", time.Now().UnixNano())) + h(w, r) + })) + defer ts.Close() + + client := &Client{ + Key: testKey, + DirectoryURL: ts.URL, + dir: &Directory{}, + KID: "some-key-id", // set to avoid lookup attempt + } + return client.WaitAuthorization(ctx, ts.URL) +} + +func TestRevokeAuthorization(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "nonce") + return + } + switch r.URL.Path { + case "/1": + var req struct { + Resource string + Status string + Delete bool + } + decodeJWSRequest(t, &req, r.Body) + if req.Resource != "authz" { + t.Errorf("req.Resource = %q; want authz", req.Resource) + } + if req.Status != "deactivated" { + t.Errorf("req.Status = %q; want deactivated", req.Status) + } + if !req.Delete { + t.Errorf("req.Delete is false") + } + case "/2": + w.WriteHeader(http.StatusBadRequest) + } + })) + defer ts.Close() + client := &Client{ + Key: testKey, + DirectoryURL: ts.URL, // don't dial outside of localhost + dir: &Directory{}, // don't do discovery + } + ctx := context.Background() + if err := client.RevokeAuthorization(ctx, ts.URL+"/1"); err != nil { + t.Errorf("err = %v", err) + } + if client.RevokeAuthorization(ctx, ts.URL+"/2") == nil { + t.Error("nil error") + } +} + +func TestFetchCertCancel(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-r.Context().Done() + w.Header().Set("Retry-After", "0") + w.WriteHeader(http.StatusBadRequest) + })) + defer ts.Close() + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + var err error + go func() { + cl := newTestClient() + _, err = cl.FetchCert(ctx, ts.URL, false) + close(done) + }() + cancel() + <-done + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestFetchCertDepth(t *testing.T) { + var count byte + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + if count > maxChainLen+1 { + t.Errorf("count = %d; want at most %d", count, maxChainLen+1) + w.WriteHeader(http.StatusInternalServerError) + } + w.Header().Set("Link", fmt.Sprintf("<%s>;rel=up", ts.URL)) + w.Write([]byte{count}) + })) + defer ts.Close() + cl := newTestClient() + _, err := cl.FetchCert(context.Background(), ts.URL, true) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestFetchCertBreadth(t *testing.T) { + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < maxChainLen+1; i++ { + w.Header().Add("Link", fmt.Sprintf("<%s>;rel=up", ts.URL)) + } + w.Write([]byte{1}) + })) + defer ts.Close() + cl := newTestClient() + _, err := cl.FetchCert(context.Background(), ts.URL, true) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestFetchCertSize(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b := bytes.Repeat([]byte{1}, maxCertSize+1) + w.Write(b) + })) + defer ts.Close() + cl := newTestClient() + _, err := cl.FetchCert(context.Background(), ts.URL, false) + if err == nil { + t.Errorf("err is nil") + } +} + +const ( + leafPEM = `-----BEGIN CERTIFICATE----- +MIIEizCCAvOgAwIBAgIRAITApw7R8HSs7GU7cj8dEyUwDQYJKoZIhvcNAQELBQAw +gYUxHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTEtMCsGA1UECwwkY3Bh +bG1lckBwdW1wa2luLmxvY2FsIChDaHJpcyBQYWxtZXIpMTQwMgYDVQQDDCtta2Nl +cnQgY3BhbG1lckBwdW1wa2luLmxvY2FsIChDaHJpcyBQYWxtZXIpMB4XDTIzMDcx +MjE4MjIxNloXDTI1MTAxMjE4MjIxNlowWDEnMCUGA1UEChMebWtjZXJ0IGRldmVs +b3BtZW50IGNlcnRpZmljYXRlMS0wKwYDVQQLDCRjcGFsbWVyQHB1bXBraW4ubG9j +YWwgKENocmlzIFBhbG1lcikwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDNDO8P4MI9jaqVcPtF8C4GgHnTP5EK3U9fgyGApKGxTpicMQkA6z4GXwUP/Fvq +7RuCU9Wg7By5VetKIHF7FxkxWkUMrssr7mV8v6mRCh/a5GqDs14aj5ucjLQAJV74 +tLAdrCiijQ1fkPWc82fob+LkfKWGCWw7Cxf6ZtEyC8jz/DnfQXUvOiZS729ndGF7 +FobKRfIoirD+GI2NTYIp3LAUFSPR6HXTe7HAg8J81VoUKli8z504+FebfMmHePm/ +zIfiI0njAj4czOlZD56/oLsV0WRUizFjafHHUFz1HVdfFw8Qf9IOOTydYOe8M5i0 +lVbVO5G+HP+JDn3cr9MT41B9AgMBAAGjgaEwgZ4wDgYDVR0PAQH/BAQDAgWgMBMG +A1UdJQQMMAoGCCsGAQUFBwMBMB8GA1UdIwQYMBaAFPpL4Q0O7Z7voTkjn2rrFCsf +s8TbMFYGA1UdEQRPME2CC2V4YW1wbGUuY29tgg0qLmV4YW1wbGUuY29tggxleGFt +cGxlLnRlc3SCCWxvY2FsaG9zdIcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkq +hkiG9w0BAQsFAAOCAYEAMlOb7lrHuSxwcnAu7mL1ysTGqKn1d2TyDJAN5W8YFY+4 +XLpofNkK2UzZ0t9LQRnuFUcjmfqmfplh5lpC7pKmtL4G5Qcdc+BczQWcopbxd728 +sht9BKRkH+Bo1I+1WayKKNXW+5bsMv4CH641zxaMBlzjEnPvwKkNaGLMH3x5lIeX +GGgkKNXwVtINmyV+lTNVtu2IlHprxJGCjRfEuX7mEv6uRnqz3Wif+vgyh3MBgM/1 +dUOsTBNH4a6Jl/9VPSOfRdQOStqIlwTa/J1bhTvivsYt1+eWjLnsQJLgZQqwKvYH +BJ30gAk1oNnuSkx9dHbx4mO+4mB9oIYUALXUYakb8JHTOnuMSj9qelVj5vjVxl9q +KRitptU+kLYRA4HSgUXrhDIm4Q6D/w8/ascPqQ3HxPIDFLe+gTofEjqnnsnQB29L +gWpI8l5/MtXAOMdW69eEovnADc2pgaiif0T+v9nNKBc5xfDZHnrnqIqVzQEwL5Qv +niQI8IsWD5LcQ1Eg7kCq +-----END CERTIFICATE-----` +) + +func TestGetRenewalURL(t *testing.T) { + leaf, _ := pem.Decode([]byte(leafPEM)) + + parsedLeaf, err := x509.ParseCertificate(leaf.Bytes) + if err != nil { + t.Fatal(err) + } + + client := newTestClientWithMockDirectory() + urlString, err := client.getRenewalURL(parsedLeaf) + if err != nil { + t.Fatal(err) + } + + parsedURL, err := url.Parse(urlString) + if err != nil { + t.Fatal(err) + } + if scheme := parsedURL.Scheme; scheme == "" { + t.Fatalf("malformed URL scheme: %q from %q", scheme, urlString) + } + if host := parsedURL.Host; host == "" { + t.Fatalf("malformed URL host: %q from %q", host, urlString) + } + if parsedURL.RawQuery != "" { + t.Fatalf("malformed URL: should not have a query") + } + path := parsedURL.EscapedPath() + slash := strings.LastIndex(path, "/") + if slash == -1 { + t.Fatalf("malformed URL path: %q from %q", path, urlString) + } + certID := path[slash+1:] + if certID == "" { + t.Fatalf("missing certificate identifier in URL path: %q from %q", path, urlString) + } + certIDParts := strings.Split(certID, ".") + if len(certIDParts) != 2 { + t.Fatalf("certificate identifier should consist of 2 base64-encoded values separated by a dot: %q from %q", certID, urlString) + } + if _, err := base64.RawURLEncoding.DecodeString(certIDParts[0]); err != nil { + t.Fatalf("malformed AKI part in certificate identifier: %q from %q: %v", certIDParts[0], urlString, err) + } + if _, err := base64.RawURLEncoding.DecodeString(certIDParts[1]); err != nil { + t.Fatalf("malformed Serial part in certificate identifier: %q from %q: %v", certIDParts[1], urlString, err) + } + +} + +func TestUnmarshalRenewalInfo(t *testing.T) { + renewalInfoJSON := `{ + "suggestedWindow": { + "start": "2021-01-03T00:00:00Z", + "end": "2021-01-07T00:00:00Z" + }, + "explanationURL": "https://example.com/docs/example-mass-reissuance-event" + }` + expectedStart := time.Date(2021, time.January, 3, 0, 0, 0, 0, time.UTC) + expectedEnd := time.Date(2021, time.January, 7, 0, 0, 0, 0, time.UTC) + + var info RenewalInfo + if err := json.Unmarshal([]byte(renewalInfoJSON), &info); err != nil { + t.Fatal(err) + } + if _, err := url.Parse(info.ExplanationURL); err != nil { + t.Fatal(err) + } + if !info.SuggestedWindow.Start.Equal(expectedStart) { + t.Fatalf("%v != %v", expectedStart, info.SuggestedWindow.Start) + } + if !info.SuggestedWindow.End.Equal(expectedEnd) { + t.Fatalf("%v != %v", expectedEnd, info.SuggestedWindow.End) + } +} + +func TestNonce_add(t *testing.T) { + var c Client + c.addNonce(http.Header{"Replay-Nonce": {"nonce"}}) + c.addNonce(http.Header{"Replay-Nonce": {}}) + c.addNonce(http.Header{"Replay-Nonce": {"nonce"}}) + + nonces := map[string]struct{}{"nonce": {}} + if !reflect.DeepEqual(c.nonces, nonces) { + t.Errorf("c.nonces = %q; want %q", c.nonces, nonces) + } +} + +func TestNonce_addMax(t *testing.T) { + c := &Client{nonces: make(map[string]struct{})} + for i := 0; i < maxNonces; i++ { + c.nonces[fmt.Sprintf("%d", i)] = struct{}{} + } + c.addNonce(http.Header{"Replay-Nonce": {"nonce"}}) + if n := len(c.nonces); n != maxNonces { + t.Errorf("len(c.nonces) = %d; want %d", n, maxNonces) + } +} + +func TestNonce_fetch(t *testing.T) { + tests := []struct { + code int + nonce string + }{ + {http.StatusOK, "nonce1"}, + {http.StatusBadRequest, "nonce2"}, + {http.StatusOK, ""}, + } + var i int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Errorf("%d: r.Method = %q; want HEAD", i, r.Method) + } + w.Header().Set("Replay-Nonce", tests[i].nonce) + w.WriteHeader(tests[i].code) + })) + defer ts.Close() + for ; i < len(tests); i++ { + test := tests[i] + c := newTestClient() + n, err := c.fetchNonce(context.Background(), ts.URL) + if n != test.nonce { + t.Errorf("%d: n=%q; want %q", i, n, test.nonce) + } + switch { + case err == nil && test.nonce == "": + t.Errorf("%d: n=%q, err=%v; want non-nil error", i, n, err) + case err != nil && test.nonce != "": + t.Errorf("%d: n=%q, err=%v; want %q", i, n, err, test.nonce) + } + } +} + +func TestNonce_fetchError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTooManyRequests) + })) + defer ts.Close() + c := newTestClient() + _, err := c.fetchNonce(context.Background(), ts.URL) + e, ok := err.(*Error) + if !ok { + t.Fatalf("err is %T; want *Error", err) + } + if e.StatusCode != http.StatusTooManyRequests { + t.Errorf("e.StatusCode = %d; want %d", e.StatusCode, http.StatusTooManyRequests) + } +} + +func TestNonce_popWhenEmpty(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Errorf("r.Method = %q; want HEAD", r.Method) + } + switch r.URL.Path { + case "/dir-with-nonce": + w.Header().Set("Replay-Nonce", "dirnonce") + case "/new-nonce": + w.Header().Set("Replay-Nonce", "newnonce") + case "/dir-no-nonce", "/empty": + // No nonce in the header. + default: + t.Errorf("Unknown URL: %s", r.URL) + } + })) + defer ts.Close() + ctx := context.Background() + + tt := []struct { + dirURL, popURL, nonce string + wantOK bool + }{ + {ts.URL + "/dir-with-nonce", ts.URL + "/new-nonce", "dirnonce", true}, + {ts.URL + "/dir-no-nonce", ts.URL + "/new-nonce", "newnonce", true}, + {ts.URL + "/dir-no-nonce", ts.URL + "/empty", "", false}, + } + for _, test := range tt { + t.Run(fmt.Sprintf("nonce:%s wantOK:%v", test.nonce, test.wantOK), func(t *testing.T) { + c := Client{DirectoryURL: test.dirURL} + v, err := c.popNonce(ctx, test.popURL) + if !test.wantOK { + if err == nil { + t.Fatalf("c.popNonce(%q) returned nil error", test.popURL) + } + return + } + if err != nil { + t.Fatalf("c.popNonce(%q): %v", test.popURL, err) + } + if v != test.nonce { + t.Errorf("c.popNonce(%q) = %q; want %q", test.popURL, v, test.nonce) + } + }) + } +} + +func TestLinkHeader(t *testing.T) { + h := http.Header{"Link": { + `;rel="next"`, + `; rel=recover`, + `; foo=bar; rel="terms-of-service"`, + `;rel="next"`, + }} + tests := []struct { + rel string + out []string + }{ + {"next", []string{"https://example.com/acme/new-authz", "dup"}}, + {"recover", []string{"https://example.com/acme/recover-reg"}}, + {"terms-of-service", []string{"https://example.com/acme/terms"}}, + {"empty", nil}, + } + for i, test := range tests { + if v := linkHeader(h, test.rel); !reflect.DeepEqual(v, test.out) { + t.Errorf("%d: linkHeader(%q): %v; want %v", i, test.rel, v, test.out) + } + } +} + +func TestTLSSNI01ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + // echo -n | shasum -a 256 + san = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.acme.invalid" + ) + + tlscert, name, err := newTestClient().TLSSNI01ChallengeCert(token) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + if len(cert.DNSNames) != 1 || cert.DNSNames[0] != san { + t.Fatalf("cert.DNSNames = %v; want %q", cert.DNSNames, san) + } + if cert.DNSNames[0] != name { + t.Errorf("cert.DNSNames[0] != name: %q vs %q", cert.DNSNames[0], name) + } + if cn := cert.Subject.CommonName; cn != san { + t.Errorf("cert.Subject.CommonName = %q; want %q", cn, san) + } +} + +func TestTLSSNI02ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + // echo -n evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA | shasum -a 256 + sanA = "7ea0aaa69214e71e02cebb18bb867736.09b730209baabf60e43d4999979ff139.token.acme.invalid" + // echo -n | shasum -a 256 + sanB = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.ka.acme.invalid" + ) + + tlscert, name, err := newTestClient().TLSSNI02ChallengeCert(token) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + names := []string{sanA, sanB} + if !reflect.DeepEqual(cert.DNSNames, names) { + t.Fatalf("cert.DNSNames = %v;\nwant %v", cert.DNSNames, names) + } + sort.Strings(cert.DNSNames) + i := sort.SearchStrings(cert.DNSNames, name) + if i >= len(cert.DNSNames) || cert.DNSNames[i] != name { + t.Errorf("%v doesn't have %q", cert.DNSNames, name) + } + if cn := cert.Subject.CommonName; cn != sanA { + t.Errorf("CommonName = %q; want %q", cn, sanA) + } +} + +func TestTLSALPN01ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + keyAuth = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA." + testKeyECThumbprint + // echo -n | shasum -a 256 + h = "0420dbbd5eefe7b4d06eb9d1d9f5acb4c7cda27d320e4b30332f0b6cb441734ad7b0" + domain = "example.com" + ) + + extValue, err := hex.DecodeString(h) + if err != nil { + t.Fatal(err) + } + + tlscert, err := newTestClient().TLSALPN01ChallengeCert(token, domain) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + names := []string{domain} + if !reflect.DeepEqual(cert.DNSNames, names) { + t.Fatalf("cert.DNSNames = %v;\nwant %v", cert.DNSNames, names) + } + if cn := cert.Subject.CommonName; cn != domain { + t.Errorf("CommonName = %q; want %q", cn, domain) + } + acmeExts := []pkix.Extension{} + for _, ext := range cert.Extensions { + if idPeACMEIdentifier.Equal(ext.Id) { + acmeExts = append(acmeExts, ext) + } + } + if len(acmeExts) != 1 { + t.Errorf("acmeExts = %v; want exactly one", acmeExts) + } + if !acmeExts[0].Critical { + t.Errorf("acmeExt.Critical = %v; want true", acmeExts[0].Critical) + } + if bytes.Compare(acmeExts[0].Value, extValue) != 0 { + t.Errorf("acmeExt.Value = %v; want %v", acmeExts[0].Value, extValue) + } + +} + +func TestTLSChallengeCertOpt(t *testing.T) { + key, err := rsa.GenerateKey(rand.Reader, 512) + if err != nil { + t.Fatal(err) + } + tmpl := &x509.Certificate{ + SerialNumber: big.NewInt(2), + Subject: pkix.Name{Organization: []string{"Test"}}, + DNSNames: []string{"should-be-overwritten"}, + } + opts := []CertOption{WithKey(key), WithTemplate(tmpl)} + + client := newTestClient() + cert1, _, err := client.TLSSNI01ChallengeCert("token", opts...) + if err != nil { + t.Fatal(err) + } + cert2, _, err := client.TLSSNI02ChallengeCert("token", opts...) + if err != nil { + t.Fatal(err) + } + + for i, tlscert := range []tls.Certificate{cert1, cert2} { + // verify generated cert private key + tlskey, ok := tlscert.PrivateKey.(*rsa.PrivateKey) + if !ok { + t.Errorf("%d: tlscert.PrivateKey is %T; want *rsa.PrivateKey", i, tlscert.PrivateKey) + continue + } + if tlskey.D.Cmp(key.D) != 0 { + t.Errorf("%d: tlskey.D = %v; want %v", i, tlskey.D, key.D) + } + // verify generated cert public key + x509Cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + tlspub, ok := x509Cert.PublicKey.(*rsa.PublicKey) + if !ok { + t.Errorf("%d: x509Cert.PublicKey is %T; want *rsa.PublicKey", i, x509Cert.PublicKey) + continue + } + if tlspub.N.Cmp(key.N) != 0 { + t.Errorf("%d: tlspub.N = %v; want %v", i, tlspub.N, key.N) + } + // verify template option + sn := big.NewInt(2) + if x509Cert.SerialNumber.Cmp(sn) != 0 { + t.Errorf("%d: SerialNumber = %v; want %v", i, x509Cert.SerialNumber, sn) + } + org := []string{"Test"} + if !reflect.DeepEqual(x509Cert.Subject.Organization, org) { + t.Errorf("%d: Subject.Organization = %+v; want %+v", i, x509Cert.Subject.Organization, org) + } + for _, v := range x509Cert.DNSNames { + if !strings.HasSuffix(v, ".acme.invalid") { + t.Errorf("%d: invalid DNSNames element: %q", i, v) + } + } + } +} + +func TestHTTP01Challenge(t *testing.T) { + const ( + token = "xxx" + // thumbprint is precomputed for testKeyEC in jws_test.go + value = token + "." + testKeyECThumbprint + urlpath = "/.well-known/acme-challenge/" + token + ) + client := newTestClient() + val, err := client.HTTP01ChallengeResponse(token) + if err != nil { + t.Fatal(err) + } + if val != value { + t.Errorf("val = %q; want %q", val, value) + } + if path := client.HTTP01ChallengePath(token); path != urlpath { + t.Errorf("path = %q; want %q", path, urlpath) + } +} + +func TestDNS01ChallengeRecord(t *testing.T) { + // echo -n xxx. | \ + // openssl dgst -binary -sha256 | \ + // base64 | tr -d '=' | tr '/+' '_-' + const value = "8DERMexQ5VcdJ_prpPiA0mVdp7imgbCgjsG4SqqNMIo" + + val, err := newTestClient().DNS01ChallengeRecord("xxx") + if err != nil { + t.Fatal(err) + } + if val != value { + t.Errorf("val = %q; want %q", val, value) + } +} diff --git a/tempfork/acme/http.go b/tempfork/acme/http.go new file mode 100644 index 000000000..58836e5d3 --- /dev/null +++ b/tempfork/acme/http.go @@ -0,0 +1,325 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "strconv" + "strings" + "time" +) + +// retryTimer encapsulates common logic for retrying unsuccessful requests. +// It is not safe for concurrent use. +type retryTimer struct { + // backoffFn provides backoff delay sequence for retries. + // See Client.RetryBackoff doc comment. + backoffFn func(n int, r *http.Request, res *http.Response) time.Duration + // n is the current retry attempt. + n int +} + +func (t *retryTimer) inc() { + t.n++ +} + +// backoff pauses the current goroutine as described in Client.RetryBackoff. +func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { + d := t.backoffFn(t.n, r, res) + if d <= 0 { + return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) + } + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } +} + +func (c *Client) retryTimer() *retryTimer { + f := c.RetryBackoff + if f == nil { + f = defaultBackoff + } + return &retryTimer{backoffFn: f} +} + +// defaultBackoff provides default Client.RetryBackoff implementation +// using a truncated exponential backoff algorithm, +// as described in Client.RetryBackoff. +// +// The n argument is always bounded between 1 and 30. +// The returned value is always greater than 0. +func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { + const max = 10 * time.Second + var jitter time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + // Set the minimum to 1ms to avoid a case where + // an invalid Retry-After value is parsed into 0 below, + // resulting in the 0 returned value which would unintentionally + // stop the retries. + jitter = (1 + time.Duration(x.Int64())) * time.Millisecond + } + if v, ok := res.Header["Retry-After"]; ok { + return retryAfter(v[0]) + jitter + } + + if n < 1 { + n = 1 + } + if n > 30 { + n = 30 + } + d := time.Duration(1< max { + return max + } + return d +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns zero value if v cannot be parsed. +func retryAfter(v string) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return 0 + } + return t.Sub(timeNow()) +} + +// resOkay is a function that reports whether the provided response is okay. +// It is expected to keep the response body unread. +type resOkay func(*http.Response) bool + +// wantStatus returns a function which reports whether the code +// matches the status code of a response. +func wantStatus(codes ...int) resOkay { + return func(res *http.Response) bool { + for _, code := range codes { + if code == res.StatusCode { + return true + } + } + return false + } +} + +// get issues an unsigned GET request to the specified URL. +// It returns a non-error value only when ok reports true. +// +// get retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := c.doNoRetry(ctx, req) + switch { + case err != nil: + return nil, err + case ok(res): + return res, nil + case isRetriable(res.StatusCode): + retry.inc() + resErr := responseError(res) + res.Body.Close() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if retry.backoff(ctx, req, res) != nil { + return nil, resErr + } + default: + defer res.Body.Close() + return nil, responseError(res) + } + } +} + +// postAsGet is POST-as-GET, a replacement for GET in RFC 8555 +// as described in https://tools.ietf.org/html/rfc8555#section-6.3. +// It makes a POST request in KID form with zero JWS payload. +// See nopayload doc comments in jws.go. +func (c *Client) postAsGet(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + return c.post(ctx, nil, url, noPayload, ok) +} + +// post issues a signed POST request in JWS format using the provided key +// to the specified URL. If key is nil, c.Key is used instead. +// It returns a non-error value only when ok reports true. +// +// post retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +// It uses postNoRetry to make individual requests. +func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + res, req, err := c.postNoRetry(ctx, key, url, body) + if err != nil { + return nil, err + } + if ok(res) { + return res, nil + } + resErr := responseError(res) + res.Body.Close() + switch { + // Check for bad nonce before isRetriable because it may have been returned + // with an unretriable response code such as 400 Bad Request. + case isBadNonce(resErr): + // Consider any previously stored nonce values to be invalid. + c.clearNonces() + case !isRetriable(res.StatusCode): + return nil, resErr + } + retry.inc() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if err := retry.backoff(ctx, req, res); err != nil { + return nil, resErr + } + } +} + +// postNoRetry signs the body with the given key and POSTs it to the provided url. +// It is used by c.post to retry unsuccessful attempts. +// The body argument must be JSON-serializable. +// +// If key argument is nil, c.Key is used to sign the request. +// If key argument is nil and c.accountKID returns a non-zero keyID, +// the request is sent in KID form. Otherwise, JWK form is used. +// +// In practice, when interfacing with RFC-compliant CAs most requests are sent in KID form +// and JWK is used only when KID is unavailable: new account endpoint and certificate +// revocation requests authenticated by a cert key. +// See jwsEncodeJSON for other details. +func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { + kid := noKeyID + if key == nil { + if c.Key == nil { + return nil, nil, errors.New("acme: Client.Key must be populated to make POST requests") + } + key = c.Key + kid = c.accountKID(ctx) + } + nonce, err := c.popNonce(ctx, url) + if err != nil { + return nil, nil, err + } + b, err := jwsEncodeJSON(body, key, kid, nonce, url) + if err != nil { + return nil, nil, err + } + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/jose+json") + res, err := c.doNoRetry(ctx, req) + if err != nil { + return nil, nil, err + } + c.addNonce(res.Header) + return res, req, nil +} + +// doNoRetry issues a request req, replacing its context (if any) with ctx. +func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", c.userAgent()) + res, err := c.httpClient().Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + // Prefer the unadorned context error. + // (The acme package had tests assuming this, previously from ctxhttp's + // behavior, predating net/http supporting contexts natively) + // TODO(bradfitz): reconsider this in the future. But for now this + // requires no test updates. + return nil, ctx.Err() + default: + return nil, err + } + } + return res, nil +} + +func (c *Client) httpClient() *http.Client { + if c.HTTPClient != nil { + return c.HTTPClient + } + return http.DefaultClient +} + +// packageVersion is the version of the module that contains this package, for +// sending as part of the User-Agent header. It's set in version_go112.go. +var packageVersion string + +// userAgent returns the User-Agent header value. It includes the package name, +// the module version (if available), and the c.UserAgent value (if set). +func (c *Client) userAgent() string { + ua := "golang.org/x/crypto/acme" + if packageVersion != "" { + ua += "@" + packageVersion + } + if c.UserAgent != "" { + ua = c.UserAgent + " " + ua + } + return ua +} + +// isBadNonce reports whether err is an ACME "badnonce" error. +func isBadNonce(err error) bool { + // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. + // However, ACME servers in the wild return their versions of the error. + // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 + // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. + ae, ok := err.(*Error) + return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") +} + +// isRetriable reports whether a request can be retried +// based on the response status code. +// +// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. +// Callers should parse the response and check with isBadNonce. +func isRetriable(code int) bool { + return code <= 399 || code >= 500 || code == http.StatusTooManyRequests +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := io.ReadAll(resp.Body) + e := &wireError{Status: resp.StatusCode} + if err := json.Unmarshal(b, e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return e.error(resp.Header) +} diff --git a/tempfork/acme/http_test.go b/tempfork/acme/http_test.go new file mode 100644 index 000000000..d124e4e21 --- /dev/null +++ b/tempfork/acme/http_test.go @@ -0,0 +1,255 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" +) + +func TestDefaultBackoff(t *testing.T) { + tt := []struct { + nretry int + retryAfter string // Retry-After header + out time.Duration // expected min; max = min + jitter + }{ + {-1, "", time.Second}, // verify the lower bound is 1 + {0, "", time.Second}, // verify the lower bound is 1 + {100, "", 10 * time.Second}, // verify the ceiling + {1, "3600", time.Hour}, // verify the header value is used + {1, "", 1 * time.Second}, + {2, "", 2 * time.Second}, + {3, "", 4 * time.Second}, + {4, "", 8 * time.Second}, + } + for i, test := range tt { + r := httptest.NewRequest("GET", "/", nil) + resp := &http.Response{Header: http.Header{}} + if test.retryAfter != "" { + resp.Header.Set("Retry-After", test.retryAfter) + } + d := defaultBackoff(test.nretry, r, resp) + max := test.out + time.Second // + max jitter + if d < test.out || max < d { + t.Errorf("%d: defaultBackoff(%v) = %v; want between %v and %v", i, test.nretry, d, test.out, max) + } + } +} + +func TestErrorResponse(t *testing.T) { + s := `{ + "status": 400, + "type": "urn:acme:error:xxx", + "detail": "text" + }` + res := &http.Response{ + StatusCode: 400, + Status: "400 Bad Request", + Body: io.NopCloser(strings.NewReader(s)), + Header: http.Header{"X-Foo": {"bar"}}, + } + err := responseError(res) + v, ok := err.(*Error) + if !ok { + t.Fatalf("err = %+v (%T); want *Error type", err, err) + } + if v.StatusCode != 400 { + t.Errorf("v.StatusCode = %v; want 400", v.StatusCode) + } + if v.ProblemType != "urn:acme:error:xxx" { + t.Errorf("v.ProblemType = %q; want urn:acme:error:xxx", v.ProblemType) + } + if v.Detail != "text" { + t.Errorf("v.Detail = %q; want text", v.Detail) + } + if !reflect.DeepEqual(v.Header, res.Header) { + t.Errorf("v.Header = %+v; want %+v", v.Header, res.Header) + } +} + +func TestPostWithRetries(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("Replay-Nonce", fmt.Sprintf("nonce%d", count)) + if r.Method == "HEAD" { + // We expect the client to do 2 head requests to fetch + // nonces, one to start and another after getting badNonce + return + } + + head, err := decodeJWSHead(r.Body) + switch { + case err != nil: + t.Errorf("decodeJWSHead: %v", err) + case head.Nonce == "": + t.Error("head.Nonce is empty") + case head.Nonce == "nonce1": + // Return a badNonce error to force the call to retry. + w.Header().Set("Retry-After", "0") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"type":"urn:ietf:params:acme:error:badNonce"}`)) + return + } + // Make client.Authorize happy; we're not testing its result. + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status":"valid"}`)) + })) + defer ts.Close() + + client := &Client{ + Key: testKey, + DirectoryURL: ts.URL, + dir: &Directory{AuthzURL: ts.URL}, + } + // This call will fail with badNonce, causing a retry + if _, err := client.Authorize(context.Background(), "example.com"); err != nil { + t.Errorf("client.Authorize 1: %v", err) + } + if count != 3 { + t.Errorf("total requests count: %d; want 3", count) + } +} + +func TestRetryErrorType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "nonce") + w.WriteHeader(http.StatusTooManyRequests) + w.Write([]byte(`{"type":"rateLimited"}`)) + })) + defer ts.Close() + + client := &Client{ + Key: testKey, + RetryBackoff: func(n int, r *http.Request, res *http.Response) time.Duration { + // Do no retries. + return 0 + }, + dir: &Directory{AuthzURL: ts.URL}, + } + + t.Run("post", func(t *testing.T) { + testRetryErrorType(t, func() error { + _, err := client.Authorize(context.Background(), "example.com") + return err + }) + }) + t.Run("get", func(t *testing.T) { + testRetryErrorType(t, func() error { + _, err := client.GetAuthorization(context.Background(), ts.URL) + return err + }) + }) +} + +func testRetryErrorType(t *testing.T, callClient func() error) { + t.Helper() + err := callClient() + if err == nil { + t.Fatal("client.Authorize returned nil error") + } + acmeErr, ok := err.(*Error) + if !ok { + t.Fatalf("err is %v (%T); want *Error", err, err) + } + if acmeErr.StatusCode != http.StatusTooManyRequests { + t.Errorf("acmeErr.StatusCode = %d; want %d", acmeErr.StatusCode, http.StatusTooManyRequests) + } + if acmeErr.ProblemType != "rateLimited" { + t.Errorf("acmeErr.ProblemType = %q; want 'rateLimited'", acmeErr.ProblemType) + } +} + +func TestRetryBackoffArgs(t *testing.T) { + const resCode = http.StatusInternalServerError + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "test-nonce") + w.WriteHeader(resCode) + })) + defer ts.Close() + + // Canceled in backoff. + ctx, cancel := context.WithCancel(context.Background()) + + var nretry int + backoff := func(n int, r *http.Request, res *http.Response) time.Duration { + nretry++ + if n != nretry { + t.Errorf("n = %d; want %d", n, nretry) + } + if nretry == 3 { + cancel() + } + + if r == nil { + t.Error("r is nil") + } + if res.StatusCode != resCode { + t.Errorf("res.StatusCode = %d; want %d", res.StatusCode, resCode) + } + return time.Millisecond + } + + client := &Client{ + Key: testKey, + RetryBackoff: backoff, + dir: &Directory{AuthzURL: ts.URL}, + } + if _, err := client.Authorize(ctx, "example.com"); err == nil { + t.Error("err is nil") + } + if nretry != 3 { + t.Errorf("nretry = %d; want 3", nretry) + } +} + +func TestUserAgent(t *testing.T) { + for _, custom := range []string{"", "CUSTOM_UA"} { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Log(r.UserAgent()) + if s := "golang.org/x/crypto/acme"; !strings.Contains(r.UserAgent(), s) { + t.Errorf("expected User-Agent to contain %q, got %q", s, r.UserAgent()) + } + if !strings.Contains(r.UserAgent(), custom) { + t.Errorf("expected User-Agent to contain %q, got %q", custom, r.UserAgent()) + } + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"newOrder": "sure"}`)) + })) + defer ts.Close() + + client := &Client{ + Key: testKey, + DirectoryURL: ts.URL, + UserAgent: custom, + } + if _, err := client.Discover(context.Background()); err != nil { + t.Errorf("client.Discover: %v", err) + } + } +} + +func TestAccountKidLoop(t *testing.T) { + // if Client.postNoRetry is called with a nil key argument + // then Client.Key must be set, otherwise we fall into an + // infinite loop (which also causes a deadlock). + client := &Client{dir: &Directory{OrderURL: ":)"}} + _, _, err := client.postNoRetry(context.Background(), nil, "", nil) + if err == nil { + t.Fatal("Client.postNoRetry didn't fail with a nil key") + } + expected := "acme: Client.Key must be populated to make POST requests" + if err.Error() != expected { + t.Fatalf("Unexpected error returned: wanted %q, got %q", expected, err.Error()) + } +} diff --git a/tempfork/acme/jws.go b/tempfork/acme/jws.go new file mode 100644 index 000000000..b38828d85 --- /dev/null +++ b/tempfork/acme/jws.go @@ -0,0 +1,257 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/asn1" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "math/big" +) + +// KeyID is the account key identity provided by a CA during registration. +type KeyID string + +// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID. +// See jwsEncodeJSON for details. +const noKeyID = KeyID("") + +// noPayload indicates jwsEncodeJSON will encode zero-length octet string +// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make +// authenticated GET requests via POSTing with an empty payload. +// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details. +const noPayload = "" + +// noNonce indicates that the nonce should be omitted from the protected header. +// See jwsEncodeJSON for details. +const noNonce = "" + +// jsonWebSignature can be easily serialized into a JWS following +// https://tools.ietf.org/html/rfc7515#section-3.2. +type jsonWebSignature struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` +} + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format containing either kid or jwk +// fields based on the provided KeyID value. +// +// The claimset is marshalled using json.Marshal unless it is a string. +// In which case it is inserted directly into the message. +// +// If kid is non-empty, its quoted value is inserted in the protected header +// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted +// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive. +// +// If nonce is non-empty, its quoted value is inserted in the protected header. +// +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid KeyID, nonce, url string) ([]byte, error) { + if key == nil { + return nil, errors.New("nil key") + } + alg, sha := jwsHasher(key.Public()) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + headers := struct { + Alg string `json:"alg"` + KID string `json:"kid,omitempty"` + JWK json.RawMessage `json:"jwk,omitempty"` + Nonce string `json:"nonce,omitempty"` + URL string `json:"url"` + }{ + Alg: alg, + Nonce: nonce, + URL: url, + } + switch kid { + case noKeyID: + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + headers.JWK = json.RawMessage(jwk) + default: + headers.KID = string(kid) + } + phJSON, err := json.Marshal(headers) + if err != nil { + return nil, err + } + phead := base64.RawURLEncoding.EncodeToString([]byte(phJSON)) + var payload string + if val, ok := claimset.(string); ok { + payload = val + } else { + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload = base64.RawURLEncoding.EncodeToString(cs) + } + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + enc := jsonWebSignature{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwsWithMAC creates and signs a JWS using the given key and the HS256 +// algorithm. kid and url are included in the protected header. rawPayload +// should not be base64-URL-encoded. +func jwsWithMAC(key []byte, kid, url string, rawPayload []byte) (*jsonWebSignature, error) { + if len(key) == 0 { + return nil, errors.New("acme: cannot sign JWS with an empty MAC key") + } + header := struct { + Algorithm string `json:"alg"` + KID string `json:"kid"` + URL string `json:"url,omitempty"` + }{ + // Only HMAC-SHA256 is supported. + Algorithm: "HS256", + KID: kid, + URL: url, + } + rawProtected, err := json.Marshal(header) + if err != nil { + return nil, err + } + protected := base64.RawURLEncoding.EncodeToString(rawProtected) + payload := base64.RawURLEncoding.EncodeToString(rawPayload) + + h := hmac.New(sha256.New, key) + if _, err := h.Write([]byte(protected + "." + payload)); err != nil { + return nil, err + } + mac := h.Sum(nil) + + return &jsonWebSignature{ + Protected: protected, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(mac), + }, nil +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// The hash is unused for ECDSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch pub := key.Public().(type) { + case *rsa.PublicKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PublicKey: + sigASN1, err := key.Sign(rand.Reader, digest, hash) + if err != nil { + return nil, err + } + + var rs struct{ R, S *big.Int } + if _, err := asn1.Unmarshal(sigASN1, &rs); err != nil { + return nil, err + } + + rb, sb := rs.R.Bytes(), rs.S.Bytes() + size := pub.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) { + switch pub := pub.(type) { + case *rsa.PublicKey: + return "RS256", crypto.SHA256 + case *ecdsa.PublicKey: + switch pub.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/tempfork/acme/jws_test.go b/tempfork/acme/jws_test.go new file mode 100644 index 000000000..d5f00ba2d --- /dev/null +++ b/tempfork/acme/jws_test.go @@ -0,0 +1,550 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "math/big" + "testing" +) + +// The following shell command alias is used in the comments +// throughout this file: +// alias b64raw="base64 -w0 | tr -d '=' | tr '/+' '_-'" + +const ( + // Modulus in raw base64: + // 4xgZ3eRPkwoRvy7qeRUbmMDe0V-xH9eWLdu0iheeLlrmD2mqWXfP9IeSKApbn34 + // g8TuAS9g5zhq8ELQ3kmjr-KV86GAMgI6VAcGlq3QrzpTCf_30Ab7-zawrfRaFON + // a1HwEzPY1KHnGVkxJc85gNkwYI9SY2RHXtvln3zs5wITNrdosqEXeaIkVYBEhbh + // Nu54pp3kxo6TuWLi9e6pXeWetEwmlBwtWZlPoib2j3TxLBksKZfoyFyek380mHg + // JAumQ_I2fjj98_97mk3ihOY4AgVdCDj1z_GCoZkG5Rq7nbCGyosyKWyDX00Zs-n + // NqVhoLeIvXC4nnWdJMZ6rogxyQQ + testKeyPEM = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4xgZ3eRPkwoRvy7qeRUbmMDe0V+xH9eWLdu0iheeLlrmD2mq +WXfP9IeSKApbn34g8TuAS9g5zhq8ELQ3kmjr+KV86GAMgI6VAcGlq3QrzpTCf/30 +Ab7+zawrfRaFONa1HwEzPY1KHnGVkxJc85gNkwYI9SY2RHXtvln3zs5wITNrdosq +EXeaIkVYBEhbhNu54pp3kxo6TuWLi9e6pXeWetEwmlBwtWZlPoib2j3TxLBksKZf +oyFyek380mHgJAumQ/I2fjj98/97mk3ihOY4AgVdCDj1z/GCoZkG5Rq7nbCGyosy +KWyDX00Zs+nNqVhoLeIvXC4nnWdJMZ6rogxyQQIDAQABAoIBACIEZTOI1Kao9nmV +9IeIsuaR1Y61b9neOF/MLmIVIZu+AAJFCMB4Iw11FV6sFodwpEyeZhx2WkpWVN+H +r19eGiLX3zsL0DOdqBJoSIHDWCCMxgnYJ6nvS0nRxX3qVrBp8R2g12Ub+gNPbmFm +ecf/eeERIVxfifd9VsyRu34eDEvcmKFuLYbElFcPh62xE3x12UZvV/sN7gXbawpP +G+w255vbE5MoaKdnnO83cTFlcHvhn24M/78qP7Te5OAeelr1R89kYxQLpuGe4fbS +zc6E3ym5Td6urDetGGrSY1Eu10/8sMusX+KNWkm+RsBRbkyKq72ks/qKpOxOa+c6 +9gm+Y8ECgYEA/iNUyg1ubRdH11p82l8KHtFC1DPE0V1gSZsX29TpM5jS4qv46K+s +8Ym1zmrORM8x+cynfPx1VQZQ34EYeCMIX212ryJ+zDATl4NE0I4muMvSiH9vx6Xc +7FmhNnaYzPsBL5Tm9nmtQuP09YEn8poiOJFiDs/4olnD5ogA5O4THGkCgYEA5MIL +qWYBUuqbEWLRtMruUtpASclrBqNNsJEsMGbeqBJmoMxdHeSZckbLOrqm7GlMyNRJ +Ne/5uWRGSzaMYuGmwsPpERzqEvYFnSrpjW5YtXZ+JtxFXNVfm9Z1gLLgvGpOUCIU +RbpoDckDe1vgUuk3y5+DjZihs+rqIJ45XzXTzBkCgYBWuf3segruJZy5rEKhTv+o +JqeUvRn0jNYYKFpLBeyTVBrbie6GkbUGNIWbrK05pC+c3K9nosvzuRUOQQL1tJbd +4gA3oiD9U4bMFNr+BRTHyZ7OQBcIXdz3t1qhuHVKtnngIAN1p25uPlbRFUNpshnt +jgeVoHlsBhApcs5DUc+pyQKBgDzeHPg/+g4z+nrPznjKnktRY1W+0El93kgi+J0Q +YiJacxBKEGTJ1MKBb8X6sDurcRDm22wMpGfd9I5Cv2v4GsUsF7HD/cx5xdih+G73 +c4clNj/k0Ff5Nm1izPUno4C+0IOl7br39IPmfpSuR6wH/h6iHQDqIeybjxyKvT1G +N0rRAoGBAKGD+4ZI/E1MoJ5CXB8cDDMHagbE3cq/DtmYzE2v1DFpQYu5I4PCm5c7 +EQeIP6dZtv8IMgtGIb91QX9pXvP0aznzQKwYIA8nZgoENCPfiMTPiEDT9e/0lObO +9XWsXpbSTsRPj0sv1rB+UzBJ0PgjK4q2zOF0sNo7b1+6nlM3BWPx +-----END RSA PRIVATE KEY----- +` + + // This thumbprint is for the testKey defined above. + testKeyThumbprint = "6nicxzh6WETQlrvdchkz-U3e3DOQZ4heJKU63rfqMqQ" + + // openssl ecparam -name secp256k1 -genkey -noout + testKeyECPEM = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIK07hGLr0RwyUdYJ8wbIiBS55CjnkMD23DWr+ccnypWLoAoGCCqGSM49 +AwEHoUQDQgAE5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HThqIrvawF5 +QAaS/RNouybCiRhRjI3EaxLkQwgrCw0gqQ== +-----END EC PRIVATE KEY----- +` + // openssl ecparam -name secp384r1 -genkey -noout + testKeyEC384PEM = ` +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDAQ4lNtXRORWr1bgKR1CGysr9AJ9SyEk4jiVnlUWWUChmSNL+i9SLSD +Oe/naPqXJ6CgBwYFK4EEACKhZANiAAQzKtj+Ms0vHoTX5dzv3/L5YMXOWuI5UKRj +JigpahYCqXD2BA1j0E/2xt5vlPf+gm0PL+UHSQsCokGnIGuaHCsJAp3ry0gHQEke +WYXapUUFdvaK1R2/2hn5O+eiQM8YzCg= +-----END EC PRIVATE KEY----- +` + // openssl ecparam -name secp521r1 -genkey -noout + testKeyEC512PEM = ` +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBSNZKFcWzXzB/aJClAb305ibalKgtDA7+70eEkdPt28/3LZMM935Z +KqYHh/COcxuu3Kt8azRAUz3gyr4zZKhlKUSgBwYFK4EEACOhgYkDgYYABAHUNKbx +7JwC7H6pa2sV0tERWhHhB3JmW+OP6SUgMWryvIKajlx73eS24dy4QPGrWO9/ABsD +FqcRSkNVTXnIv6+0mAF25knqIBIg5Q8M9BnOu9GGAchcwt3O7RDHmqewnJJDrbjd +GGnm6rb+NnWR9DIopM0nKNkToWoF/hzopxu4Ae/GsQ== +-----END EC PRIVATE KEY----- +` + // 1. openssl ec -in key.pem -noout -text + // 2. remove first byte, 04 (the header); the rest is X and Y + // 3. convert each with: echo | xxd -r -p | b64raw + testKeyECPubX = "5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HQ" + testKeyECPubY = "4aiK72sBeUAGkv0TaLsmwokYUYyNxGsS5EMIKwsNIKk" + testKeyEC384PubX = "MyrY_jLNLx6E1-Xc79_y-WDFzlriOVCkYyYoKWoWAqlw9gQNY9BP9sbeb5T3_oJt" + testKeyEC384PubY = "Dy_lB0kLAqJBpyBrmhwrCQKd68tIB0BJHlmF2qVFBXb2itUdv9oZ-TvnokDPGMwo" + testKeyEC512PubX = "AdQ0pvHsnALsfqlraxXS0RFaEeEHcmZb44_pJSAxavK8gpqOXHvd5Lbh3LhA8atY738AGwMWpxFKQ1VNeci_r7SY" + testKeyEC512PubY = "AXbmSeogEiDlDwz0Gc670YYByFzC3c7tEMeap7CckkOtuN0Yaebqtv42dZH0MiikzSco2ROhagX-HOinG7gB78ax" + + // echo -n '{"crv":"P-256","kty":"EC","x":"","y":""}' | \ + // openssl dgst -binary -sha256 | b64raw + testKeyECThumbprint = "zedj-Bd1Zshp8KLePv2MB-lJ_Hagp7wAwdkA0NUTniU" +) + +var ( + testKey *rsa.PrivateKey + testKeyEC *ecdsa.PrivateKey + testKeyEC384 *ecdsa.PrivateKey + testKeyEC512 *ecdsa.PrivateKey +) + +func init() { + testKey = parseRSA(testKeyPEM, "testKeyPEM") + testKeyEC = parseEC(testKeyECPEM, "testKeyECPEM") + testKeyEC384 = parseEC(testKeyEC384PEM, "testKeyEC384PEM") + testKeyEC512 = parseEC(testKeyEC512PEM, "testKeyEC512PEM") +} + +func decodePEM(s, name string) []byte { + d, _ := pem.Decode([]byte(s)) + if d == nil { + panic("no block found in " + name) + } + return d.Bytes +} + +func parseRSA(s, name string) *rsa.PrivateKey { + b := decodePEM(s, name) + k, err := x509.ParsePKCS1PrivateKey(b) + if err != nil { + panic(fmt.Sprintf("%s: %v", name, err)) + } + return k +} + +func parseEC(s, name string) *ecdsa.PrivateKey { + b := decodePEM(s, name) + k, err := x509.ParseECPrivateKey(b) + if err != nil { + panic(fmt.Sprintf("%s: %v", name, err)) + } + return k +} + +func TestJWSEncodeJSON(t *testing.T) { + claims := struct{ Msg string }{"Hello JWS"} + // JWS signed with testKey and "nonce" as the nonce value + // JSON-serialized JWS fields are split for easier testing + const ( + // {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce","url":"url"} + protected = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" + + "IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" + + "SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" + + "QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" + + "VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" + + "NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" + + "QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" + + "bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" + + "ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" + + "b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" + + "UVEifSwibm9uY2UiOiJub25jZSIsInVybCI6InVybCJ9" + // {"Msg":"Hello JWS"} + payload = "eyJNc2ciOiJIZWxsbyBKV1MifQ" + // printf '.' | openssl dgst -binary -sha256 -sign testKey | b64raw + signature = "YFyl_xz1E7TR-3E1bIuASTr424EgCvBHjt25WUFC2VaDjXYV0Rj_" + + "Hd3dJ_2IRqBrXDZZ2n4ZeA_4mm3QFwmwyeDwe2sWElhb82lCZ8iX" + + "uFnjeOmSOjx-nWwPa5ibCXzLq13zZ-OBV1Z4oN_TuailQeRoSfA3" + + "nO8gG52mv1x2OMQ5MAFtt8jcngBLzts4AyhI6mBJ2w7Yaj3ZCriq" + + "DWA3GLFvvHdW1Ba9Z01wtGT2CuZI7DUk_6Qj1b3BkBGcoKur5C9i" + + "bUJtCkABwBMvBQNyD3MmXsrRFRTgvVlyU_yMaucYm7nmzEr_2PaQ" + + "50rFt_9qOfJ4sfbLtG1Wwae57BQx1g" + ) + + b, err := jwsEncodeJSON(claims, testKey, noKeyID, "nonce", "url") + if err != nil { + t.Fatal(err) + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Fatal(err) + } + if jws.Protected != protected { + t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected) + } + if jws.Payload != payload { + t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload) + } + if jws.Signature != signature { + t.Errorf("signature:\n%s\nwant:\n%s", jws.Signature, signature) + } +} + +func TestJWSEncodeNoNonce(t *testing.T) { + kid := KeyID("https://example.org/account/1") + claims := "RawString" + const ( + // {"alg":"ES256","kid":"https://example.org/account/1","nonce":"nonce","url":"url"} + protected = "eyJhbGciOiJFUzI1NiIsImtpZCI6Imh0dHBzOi8vZXhhbXBsZS5vcmcvYWNjb3VudC8xIiwidXJsIjoidXJsIn0" + // "Raw String" + payload = "RawString" + ) + + b, err := jwsEncodeJSON(claims, testKeyEC, kid, "", "url") + if err != nil { + t.Fatal(err) + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Fatal(err) + } + if jws.Protected != protected { + t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected) + } + if jws.Payload != payload { + t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload) + } + + sig, err := base64.RawURLEncoding.DecodeString(jws.Signature) + if err != nil { + t.Fatalf("jws.Signature: %v", err) + } + r, s := big.NewInt(0), big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s.SetBytes(sig[len(sig)/2:]) + h := sha256.Sum256([]byte(protected + "." + payload)) + if !ecdsa.Verify(testKeyEC.Public().(*ecdsa.PublicKey), h[:], r, s) { + t.Error("invalid signature") + } +} + +func TestJWSEncodeKID(t *testing.T) { + kid := KeyID("https://example.org/account/1") + claims := struct{ Msg string }{"Hello JWS"} + // JWS signed with testKeyEC + const ( + // {"alg":"ES256","kid":"https://example.org/account/1","nonce":"nonce","url":"url"} + protected = "eyJhbGciOiJFUzI1NiIsImtpZCI6Imh0dHBzOi8vZXhhbXBsZS5" + + "vcmcvYWNjb3VudC8xIiwibm9uY2UiOiJub25jZSIsInVybCI6InVybCJ9" + // {"Msg":"Hello JWS"} + payload = "eyJNc2ciOiJIZWxsbyBKV1MifQ" + ) + + b, err := jwsEncodeJSON(claims, testKeyEC, kid, "nonce", "url") + if err != nil { + t.Fatal(err) + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Fatal(err) + } + if jws.Protected != protected { + t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected) + } + if jws.Payload != payload { + t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload) + } + + sig, err := base64.RawURLEncoding.DecodeString(jws.Signature) + if err != nil { + t.Fatalf("jws.Signature: %v", err) + } + r, s := big.NewInt(0), big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s.SetBytes(sig[len(sig)/2:]) + h := sha256.Sum256([]byte(protected + "." + payload)) + if !ecdsa.Verify(testKeyEC.Public().(*ecdsa.PublicKey), h[:], r, s) { + t.Error("invalid signature") + } +} + +func TestJWSEncodeJSONEC(t *testing.T) { + tt := []struct { + key *ecdsa.PrivateKey + x, y string + alg, crv string + }{ + {testKeyEC, testKeyECPubX, testKeyECPubY, "ES256", "P-256"}, + {testKeyEC384, testKeyEC384PubX, testKeyEC384PubY, "ES384", "P-384"}, + {testKeyEC512, testKeyEC512PubX, testKeyEC512PubY, "ES512", "P-521"}, + } + for i, test := range tt { + claims := struct{ Msg string }{"Hello JWS"} + b, err := jwsEncodeJSON(claims, test.key, noKeyID, "nonce", "url") + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Errorf("%d: %v", i, err) + continue + } + + b, err = base64.RawURLEncoding.DecodeString(jws.Protected) + if err != nil { + t.Errorf("%d: jws.Protected: %v", i, err) + } + var head struct { + Alg string + Nonce string + URL string `json:"url"` + KID string `json:"kid"` + JWK struct { + Crv string + Kty string + X string + Y string + } `json:"jwk"` + } + if err := json.Unmarshal(b, &head); err != nil { + t.Errorf("%d: jws.Protected: %v", i, err) + } + if head.Alg != test.alg { + t.Errorf("%d: head.Alg = %q; want %q", i, head.Alg, test.alg) + } + if head.Nonce != "nonce" { + t.Errorf("%d: head.Nonce = %q; want nonce", i, head.Nonce) + } + if head.URL != "url" { + t.Errorf("%d: head.URL = %q; want 'url'", i, head.URL) + } + if head.KID != "" { + // We used noKeyID in jwsEncodeJSON: expect no kid value. + t.Errorf("%d: head.KID = %q; want empty", i, head.KID) + } + if head.JWK.Crv != test.crv { + t.Errorf("%d: head.JWK.Crv = %q; want %q", i, head.JWK.Crv, test.crv) + } + if head.JWK.Kty != "EC" { + t.Errorf("%d: head.JWK.Kty = %q; want EC", i, head.JWK.Kty) + } + if head.JWK.X != test.x { + t.Errorf("%d: head.JWK.X = %q; want %q", i, head.JWK.X, test.x) + } + if head.JWK.Y != test.y { + t.Errorf("%d: head.JWK.Y = %q; want %q", i, head.JWK.Y, test.y) + } + } +} + +type customTestSigner struct { + sig []byte + pub crypto.PublicKey +} + +func (s *customTestSigner) Public() crypto.PublicKey { return s.pub } +func (s *customTestSigner) Sign(io.Reader, []byte, crypto.SignerOpts) ([]byte, error) { + return s.sig, nil +} + +func TestJWSEncodeJSONCustom(t *testing.T) { + claims := struct{ Msg string }{"hello"} + const ( + // printf '{"Msg":"hello"}' | b64raw + payload = "eyJNc2ciOiJoZWxsbyJ9" + // printf 'testsig' | b64raw + testsig = "dGVzdHNpZw" + + // the example P256 curve point from https://tools.ietf.org/html/rfc7515#appendix-A.3.1 + // encoded as ASN.1… + es256stdsig = "MEUCIA7RIVN5Y2xIPC9/FVgH1AKjsigDOvl8fheBmsMWnqZlAiEA" + + "xQoH04w8cOXY8S2vCEpUgKZlkMXyk1Cajz9/ioOjVNU" + // …and RFC7518 (https://tools.ietf.org/html/rfc7518#section-3.4) + es256jwsig = "DtEhU3ljbEg8L38VWAfUAqOyKAM6-Xx-F4GawxaepmXFCgfTjDxw" + + "5djxLa8ISlSApmWQxfKTUJqPP3-Kg6NU1Q" + + // printf '{"alg":"ES256","jwk":{"crv":"P-256","kty":"EC","x":,"y":},"nonce":"nonce","url":"url"}' | b64raw + es256phead = "eyJhbGciOiJFUzI1NiIsImp3ayI6eyJjcnYiOiJQLTI1NiIsImt0" + + "eSI6IkVDIiwieCI6IjVsaEV1ZzV4SzR4QkRaMm5BYmF4THRhTGl2" + + "ODVieEo3ZVBkMWRrTzIzSFEiLCJ5IjoiNGFpSzcyc0JlVUFHa3Yw" + + "VGFMc213b2tZVVl5TnhHc1M1RU1JS3dzTklLayJ9LCJub25jZSI6" + + "Im5vbmNlIiwidXJsIjoidXJsIn0" + + // {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce","url":"url"} + rs256phead = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" + + "IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" + + "SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" + + "QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" + + "VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" + + "NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" + + "QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" + + "bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" + + "ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" + + "b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" + + "UVEifSwibm9uY2UiOiJub25jZSIsInVybCI6InVybCJ9" + ) + + tt := []struct { + alg, phead string + pub crypto.PublicKey + stdsig, jwsig string + }{ + {"ES256", es256phead, testKeyEC.Public(), es256stdsig, es256jwsig}, + {"RS256", rs256phead, testKey.Public(), testsig, testsig}, + } + for _, tc := range tt { + tc := tc + t.Run(tc.alg, func(t *testing.T) { + stdsig, err := base64.RawStdEncoding.DecodeString(tc.stdsig) + if err != nil { + t.Errorf("couldn't decode test vector: %v", err) + } + signer := &customTestSigner{ + sig: stdsig, + pub: tc.pub, + } + + b, err := jwsEncodeJSON(claims, signer, noKeyID, "nonce", "url") + if err != nil { + t.Fatal(err) + } + var j jsonWebSignature + if err := json.Unmarshal(b, &j); err != nil { + t.Fatal(err) + } + if j.Protected != tc.phead { + t.Errorf("j.Protected = %q\nwant %q", j.Protected, tc.phead) + } + if j.Payload != payload { + t.Errorf("j.Payload = %q\nwant %q", j.Payload, payload) + } + if j.Sig != tc.jwsig { + t.Errorf("j.Sig = %q\nwant %q", j.Sig, tc.jwsig) + } + }) + } +} + +func TestJWSWithMAC(t *testing.T) { + // Example from RFC 7520 Section 4.4.3. + // https://tools.ietf.org/html/rfc7520#section-4.4.3 + b64Key := "hJtXIZ2uSN5kbQfbtTNWbpdmhkV8FJG-Onbc6mxCcYg" + rawPayload := []byte("It\xe2\x80\x99s a dangerous business, Frodo, going out your " + + "door. You step onto the road, and if you don't keep your feet, " + + "there\xe2\x80\x99s no knowing where you might be swept off " + + "to.") + protected := "eyJhbGciOiJIUzI1NiIsImtpZCI6IjAxOGMwYWU1LTRkOWItNDcxYi1iZmQ2LW" + + "VlZjMxNGJjNzAzNyJ9" + payload := "SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywg" + + "Z29pbmcgb3V0IHlvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9h" + + "ZCwgYW5kIGlmIHlvdSBkb24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXi" + + "gJlzIG5vIGtub3dpbmcgd2hlcmUgeW91IG1pZ2h0IGJlIHN3ZXB0IG9m" + + "ZiB0by4" + sig := "s0h6KThzkfBBBkLspW1h84VsJZFTsPPqMDA7g1Md7p0" + + key, err := base64.RawURLEncoding.DecodeString(b64Key) + if err != nil { + t.Fatalf("unable to decode key: %q", b64Key) + } + got, err := jwsWithMAC(key, "018c0ae5-4d9b-471b-bfd6-eef314bc7037", "", rawPayload) + if err != nil { + t.Fatalf("jwsWithMAC() = %q", err) + } + if got.Protected != protected { + t.Errorf("got.Protected = %q\nwant %q", got.Protected, protected) + } + if got.Payload != payload { + t.Errorf("got.Payload = %q\nwant %q", got.Payload, payload) + } + if got.Sig != sig { + t.Errorf("got.Signature = %q\nwant %q", got.Sig, sig) + } +} + +func TestJWSWithMACError(t *testing.T) { + p := "{}" + if _, err := jwsWithMAC(nil, "", "", []byte(p)); err == nil { + t.Errorf("jwsWithMAC(nil, ...) = success; want err") + } +} + +func TestJWKThumbprintRSA(t *testing.T) { + // Key example from RFC 7638 + const base64N = "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAt" + + "VT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn6" + + "4tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FD" + + "W2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n9" + + "1CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINH" + + "aQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw" + const base64E = "AQAB" + const expected = "NzbLsXh8uDCcd-6MNwXF4W_7noWXFZAfHkxZsRGC9Xs" + + b, err := base64.RawURLEncoding.DecodeString(base64N) + if err != nil { + t.Fatalf("Error parsing example key N: %v", err) + } + n := new(big.Int).SetBytes(b) + + b, err = base64.RawURLEncoding.DecodeString(base64E) + if err != nil { + t.Fatalf("Error parsing example key E: %v", err) + } + e := new(big.Int).SetBytes(b) + + pub := &rsa.PublicKey{N: n, E: int(e.Uint64())} + th, err := JWKThumbprint(pub) + if err != nil { + t.Error(err) + } + if th != expected { + t.Errorf("thumbprint = %q; want %q", th, expected) + } +} + +func TestJWKThumbprintEC(t *testing.T) { + // Key example from RFC 7520 + // expected was computed with + // printf '{"crv":"P-521","kty":"EC","x":"","y":""}' | \ + // openssl dgst -binary -sha256 | b64raw + const ( + base64X = "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkT" + + "KqjqvjyekWF-7ytDyRXYgCF5cj0Kt" + base64Y = "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUda" + + "QkAgDPrwQrJmbnX9cwlGfP-HqHZR1" + expected = "dHri3SADZkrush5HU_50AoRhcKFryN-PI6jPBtPL55M" + ) + + b, err := base64.RawURLEncoding.DecodeString(base64X) + if err != nil { + t.Fatalf("Error parsing example key X: %v", err) + } + x := new(big.Int).SetBytes(b) + + b, err = base64.RawURLEncoding.DecodeString(base64Y) + if err != nil { + t.Fatalf("Error parsing example key Y: %v", err) + } + y := new(big.Int).SetBytes(b) + + pub := &ecdsa.PublicKey{Curve: elliptic.P521(), X: x, Y: y} + th, err := JWKThumbprint(pub) + if err != nil { + t.Error(err) + } + if th != expected { + t.Errorf("thumbprint = %q; want %q", th, expected) + } +} + +func TestJWKThumbprintErrUnsupportedKey(t *testing.T) { + _, err := JWKThumbprint(struct{}{}) + if err != ErrUnsupportedKey { + t.Errorf("err = %q; want %q", err, ErrUnsupportedKey) + } +} diff --git a/tempfork/acme/rfc8555.go b/tempfork/acme/rfc8555.go new file mode 100644 index 000000000..3152e531b --- /dev/null +++ b/tempfork/acme/rfc8555.go @@ -0,0 +1,476 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "time" +) + +// DeactivateReg permanently disables an existing account associated with c.Key. +// A deactivated account can no longer request certificate issuance or access +// resources related to the account, such as orders or authorizations. +// +// It only works with CAs implementing RFC 8555. +func (c *Client) DeactivateReg(ctx context.Context) error { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return err + } + url := string(c.accountKID(ctx)) + if url == "" { + return ErrNoAccount + } + req := json.RawMessage(`{"status": "deactivated"}`) + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + res.Body.Close() + return nil +} + +// registerRFC is equivalent to c.Register but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) registerRFC(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) { + c.cacheMu.Lock() // guard c.kid access + defer c.cacheMu.Unlock() + + req := struct { + TermsAgreed bool `json:"termsOfServiceAgreed,omitempty"` + Contact []string `json:"contact,omitempty"` + ExternalAccountBinding *jsonWebSignature `json:"externalAccountBinding,omitempty"` + }{ + Contact: acct.Contact, + } + if c.dir.Terms != "" { + req.TermsAgreed = prompt(c.dir.Terms) + } + + // set 'externalAccountBinding' field if requested + if acct.ExternalAccountBinding != nil { + eabJWS, err := c.encodeExternalAccountBinding(acct.ExternalAccountBinding) + if err != nil { + return nil, fmt.Errorf("acme: failed to encode external account binding: %v", err) + } + req.ExternalAccountBinding = eabJWS + } + + res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus( + http.StatusOK, // account with this key already registered + http.StatusCreated, // new account created + )) + if err != nil { + return nil, err + } + + defer res.Body.Close() + a, err := responseAccount(res) + if err != nil { + return nil, err + } + // Cache Account URL even if we return an error to the caller. + // It is by all means a valid and usable "kid" value for future requests. + c.KID = KeyID(a.URI) + if res.StatusCode == http.StatusOK { + return nil, ErrAccountAlreadyExists + } + return a, nil +} + +// encodeExternalAccountBinding will encode an external account binding stanza +// as described in https://tools.ietf.org/html/rfc8555#section-7.3.4. +func (c *Client) encodeExternalAccountBinding(eab *ExternalAccountBinding) (*jsonWebSignature, error) { + jwk, err := jwkEncode(c.Key.Public()) + if err != nil { + return nil, err + } + return jwsWithMAC(eab.Key, eab.KID, c.dir.RegURL, []byte(jwk)) +} + +// updateRegRFC is equivalent to c.UpdateReg but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) updateRegRFC(ctx context.Context, a *Account) (*Account, error) { + url := string(c.accountKID(ctx)) + if url == "" { + return nil, ErrNoAccount + } + req := struct { + Contact []string `json:"contact,omitempty"` + }{ + Contact: a.Contact, + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseAccount(res) +} + +// getRegRFC is equivalent to c.GetReg but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) getRegRFC(ctx context.Context) (*Account, error) { + req := json.RawMessage(`{"onlyReturnExisting": true}`) + res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus(http.StatusOK)) + if e, ok := err.(*Error); ok && e.ProblemType == "urn:ietf:params:acme:error:accountDoesNotExist" { + return nil, ErrNoAccount + } + if err != nil { + return nil, err + } + + defer res.Body.Close() + return responseAccount(res) +} + +func responseAccount(res *http.Response) (*Account, error) { + var v struct { + Status string + Contact []string + Orders string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid account response: %v", err) + } + return &Account{ + URI: res.Header.Get("Location"), + Status: v.Status, + Contact: v.Contact, + OrdersURL: v.Orders, + }, nil +} + +// accountKeyRollover attempts to perform account key rollover. +// On success it will change client.Key to the new key. +func (c *Client) accountKeyRollover(ctx context.Context, newKey crypto.Signer) error { + dir, err := c.Discover(ctx) // Also required by c.accountKID + if err != nil { + return err + } + kid := c.accountKID(ctx) + if kid == noKeyID { + return ErrNoAccount + } + oldKey, err := jwkEncode(c.Key.Public()) + if err != nil { + return err + } + payload := struct { + Account string `json:"account"` + OldKey json.RawMessage `json:"oldKey"` + }{ + Account: string(kid), + OldKey: json.RawMessage(oldKey), + } + inner, err := jwsEncodeJSON(payload, newKey, noKeyID, noNonce, dir.KeyChangeURL) + if err != nil { + return err + } + + res, err := c.post(ctx, nil, dir.KeyChangeURL, base64.RawURLEncoding.EncodeToString(inner), wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + c.Key = newKey + return nil +} + +// AuthorizeOrder initiates the order-based application for certificate issuance, +// as opposed to pre-authorization in Authorize. +// It is only supported by CAs implementing RFC 8555. +// +// The caller then needs to fetch each authorization with GetAuthorization, +// identify those with StatusPending status and fulfill a challenge using Accept. +// Once all authorizations are satisfied, the caller will typically want to poll +// order status using WaitOrder until it's in StatusReady state. +// To finalize the order and obtain a certificate, the caller submits a CSR with CreateOrderCert. +func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderOption) (*Order, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + + req := struct { + Identifiers []wireAuthzID `json:"identifiers"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{} + for _, v := range id { + req.Identifiers = append(req.Identifiers, wireAuthzID{ + Type: v.Type, + Value: v.Value, + }) + } + for _, o := range opt { + switch o := o.(type) { + case orderNotBeforeOpt: + req.NotBefore = time.Time(o).Format(time.RFC3339) + case orderNotAfterOpt: + req.NotAfter = time.Time(o).Format(time.RFC3339) + default: + // Package's fault if we let this happen. + panic(fmt.Sprintf("unsupported order option type %T", o)) + } + } + + res, err := c.post(ctx, nil, dir.OrderURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseOrder(res) +} + +// GetOrder retrives an order identified by the given URL. +// For orders created with AuthorizeOrder, the url value is Order.URI. +// +// If a caller needs to poll an order until its status is final, +// see the WaitOrder method. +func (c *Client) GetOrder(ctx context.Context, url string) (*Order, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseOrder(res) +} + +// WaitOrder polls an order from the given URL until it is in one of the final states, +// StatusReady, StatusValid or StatusInvalid, the CA responded with a non-retryable error +// or the context is done. +// +// It returns a non-nil Order only if its Status is StatusReady or StatusValid. +// In all other cases WaitOrder returns an error. +// If the Status is StatusInvalid, the returned error is of type *OrderError. +func (c *Client) WaitOrder(ctx context.Context, url string) (*Order, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + for { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + o, err := responseOrder(res) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case o.Status == StatusInvalid: + return nil, &OrderError{OrderURL: o.URI, Status: o.Status} + case o.Status == StatusReady || o.Status == StatusValid: + return o, nil + } + + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Default retry-after. + // Same reasoning as in WaitAuthorization. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +func responseOrder(res *http.Response) (*Order, error) { + var v struct { + Status string + Expires time.Time + Identifiers []wireAuthzID + NotBefore time.Time + NotAfter time.Time + Error *wireError + Authorizations []string + Finalize string + Certificate string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: error reading order: %v", err) + } + o := &Order{ + URI: res.Header.Get("Location"), + Status: v.Status, + Expires: v.Expires, + NotBefore: v.NotBefore, + NotAfter: v.NotAfter, + AuthzURLs: v.Authorizations, + FinalizeURL: v.Finalize, + CertURL: v.Certificate, + } + for _, id := range v.Identifiers { + o.Identifiers = append(o.Identifiers, AuthzID{Type: id.Type, Value: id.Value}) + } + if v.Error != nil { + o.Error = v.Error.error(nil /* headers */) + } + return o, nil +} + +// CreateOrderCert submits the CSR (Certificate Signing Request) to a CA at the specified URL. +// The URL is the FinalizeURL field of an Order created with AuthorizeOrder. +// +// If the bundle argument is true, the returned value also contain the CA (issuer) +// certificate chain. Otherwise, only a leaf certificate is returned. +// The returned URL can be used to re-fetch the certificate using FetchCert. +// +// This method is only supported by CAs implementing RFC 8555. See CreateCert for pre-RFC CAs. +// +// CreateOrderCert returns an error if the CA's response is unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateOrderCert(ctx context.Context, url string, csr []byte, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return nil, "", err + } + + // RFC describes this as "finalize order" request. + req := struct { + CSR string `json:"csr"` + }{ + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + o, err := responseOrder(res) + if err != nil { + return nil, "", err + } + + // Wait for CA to issue the cert if they haven't. + if o.Status != StatusValid { + o, err = c.WaitOrder(ctx, o.URI) + } + if err != nil { + return nil, "", err + } + // The only acceptable status post finalize and WaitOrder is "valid". + if o.Status != StatusValid { + return nil, "", &OrderError{OrderURL: o.URI, Status: o.Status} + } + crt, err := c.fetchCertRFC(ctx, o.CertURL, bundle) + return crt, o.CertURL, err +} + +// fetchCertRFC downloads issued certificate from the given URL. +// It expects the CA to respond with PEM-encoded certificate chain. +// +// The URL argument is the CertURL field of Order. +func (c *Client) fetchCertRFC(ctx context.Context, url string, bundle bool) ([][]byte, error) { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // Get all the bytes up to a sane maximum. + // Account very roughly for base64 overhead. + const max = maxCertChainSize + maxCertChainSize/33 + b, err := io.ReadAll(io.LimitReader(res.Body, max+1)) + if err != nil { + return nil, fmt.Errorf("acme: fetch cert response stream: %v", err) + } + if len(b) > max { + return nil, errors.New("acme: certificate chain is too big") + } + + // Decode PEM chain. + var chain [][]byte + for { + var p *pem.Block + p, b = pem.Decode(b) + if p == nil { + break + } + if p.Type != "CERTIFICATE" { + return nil, fmt.Errorf("acme: invalid PEM cert type %q", p.Type) + } + + chain = append(chain, p.Bytes) + if !bundle { + return chain, nil + } + if len(chain) > maxChainLen { + return nil, errors.New("acme: certificate chain is too long") + } + } + if len(chain) == 0 { + return nil, errors.New("acme: certificate chain is empty") + } + return chain, nil +} + +// sends a cert revocation request in either JWK form when key is non-nil or KID form otherwise. +func (c *Client) revokeCertRFC(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + req := &struct { + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + res, err := c.post(ctx, key, c.dir.RevokeURL, req, wantStatus(http.StatusOK)) + if err != nil { + if isAlreadyRevoked(err) { + // Assume it is not an error to revoke an already revoked cert. + return nil + } + return err + } + defer res.Body.Close() + return nil +} + +func isAlreadyRevoked(err error) bool { + e, ok := err.(*Error) + return ok && e.ProblemType == "urn:ietf:params:acme:error:alreadyRevoked" +} + +// ListCertAlternates retrieves any alternate certificate chain URLs for the +// given certificate chain URL. These alternate URLs can be passed to FetchCert +// in order to retrieve the alternate certificate chains. +// +// If there are no alternate issuer certificate chains, a nil slice will be +// returned. +func (c *Client) ListCertAlternates(ctx context.Context, url string) ([]string, error) { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // We don't need the body but we need to discard it so we don't end up + // preventing keep-alive + if _, err := io.Copy(io.Discard, res.Body); err != nil { + return nil, fmt.Errorf("acme: cert alternates response stream: %v", err) + } + alts := linkHeader(res.Header, "alternate") + return alts, nil +} diff --git a/tempfork/acme/rfc8555_test.go b/tempfork/acme/rfc8555_test.go new file mode 100644 index 000000000..d65720a35 --- /dev/null +++ b/tempfork/acme/rfc8555_test.go @@ -0,0 +1,1017 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "sync" + "testing" + "time" +) + +// While contents of this file is pertinent only to RFC8555, +// it is complementary to the tests in the other _test.go files +// many of which are valid for both pre- and RFC8555. +// This will make it easier to clean up the tests once non-RFC compliant +// code is removed. + +func TestRFC_Discover(t *testing.T) { + const ( + nonce = "https://example.com/acme/new-nonce" + reg = "https://example.com/acme/new-acct" + order = "https://example.com/acme/new-order" + authz = "https://example.com/acme/new-authz" + revoke = "https://example.com/acme/revoke-cert" + keychange = "https://example.com/acme/key-change" + metaTerms = "https://example.com/acme/terms/2017-5-30" + metaWebsite = "https://www.example.com/" + metaCAA = "example.com" + ) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "newNonce": %q, + "newAccount": %q, + "newOrder": %q, + "newAuthz": %q, + "revokeCert": %q, + "keyChange": %q, + "meta": { + "termsOfService": %q, + "website": %q, + "caaIdentities": [%q], + "externalAccountRequired": true + } + }`, nonce, reg, order, authz, revoke, keychange, metaTerms, metaWebsite, metaCAA) + })) + defer ts.Close() + c := &Client{DirectoryURL: ts.URL} + dir, err := c.Discover(context.Background()) + if err != nil { + t.Fatal(err) + } + if dir.NonceURL != nonce { + t.Errorf("dir.NonceURL = %q; want %q", dir.NonceURL, nonce) + } + if dir.RegURL != reg { + t.Errorf("dir.RegURL = %q; want %q", dir.RegURL, reg) + } + if dir.OrderURL != order { + t.Errorf("dir.OrderURL = %q; want %q", dir.OrderURL, order) + } + if dir.AuthzURL != authz { + t.Errorf("dir.AuthzURL = %q; want %q", dir.AuthzURL, authz) + } + if dir.RevokeURL != revoke { + t.Errorf("dir.RevokeURL = %q; want %q", dir.RevokeURL, revoke) + } + if dir.KeyChangeURL != keychange { + t.Errorf("dir.KeyChangeURL = %q; want %q", dir.KeyChangeURL, keychange) + } + if dir.Terms != metaTerms { + t.Errorf("dir.Terms = %q; want %q", dir.Terms, metaTerms) + } + if dir.Website != metaWebsite { + t.Errorf("dir.Website = %q; want %q", dir.Website, metaWebsite) + } + if len(dir.CAA) == 0 || dir.CAA[0] != metaCAA { + t.Errorf("dir.CAA = %q; want [%q]", dir.CAA, metaCAA) + } + if !dir.ExternalAccountRequired { + t.Error("dir.Meta.ExternalAccountRequired is false") + } +} + +func TestRFC_popNonce(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // The Client uses only Directory.NonceURL when specified. + // Expect no other URL paths. + if r.URL.Path != "/new-nonce" { + t.Errorf("r.URL.Path = %q; want /new-nonce", r.URL.Path) + } + if count > 0 { + w.WriteHeader(http.StatusTooManyRequests) + return + } + count++ + w.Header().Set("Replay-Nonce", "second") + })) + cl := &Client{ + DirectoryURL: ts.URL, + dir: &Directory{NonceURL: ts.URL + "/new-nonce"}, + } + cl.addNonce(http.Header{"Replay-Nonce": {"first"}}) + + for i, nonce := range []string{"first", "second"} { + v, err := cl.popNonce(context.Background(), "") + if err != nil { + t.Errorf("%d: cl.popNonce: %v", i, err) + } + if v != nonce { + t.Errorf("%d: cl.popNonce = %q; want %q", i, v, nonce) + } + } + // No more nonces and server replies with an error past first nonce fetch. + // Expected to fail. + if _, err := cl.popNonce(context.Background(), ""); err == nil { + t.Error("last cl.popNonce returned nil error") + } +} + +func TestRFC_postKID(t *testing.T) { + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/new-nonce": + w.Header().Set("Replay-Nonce", "nonce") + case "/new-account": + w.Header().Set("Location", "/account-1") + w.Write([]byte(`{"status":"valid"}`)) + case "/post": + b, _ := io.ReadAll(r.Body) // check err later in decodeJWSxxx + head, err := decodeJWSHead(bytes.NewReader(b)) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if head.KID != "/account-1" { + t.Errorf("head.KID = %q; want /account-1", head.KID) + } + if len(head.JWK) != 0 { + t.Errorf("head.JWK = %q; want zero map", head.JWK) + } + if v := ts.URL + "/post"; head.URL != v { + t.Errorf("head.URL = %q; want %q", head.URL, v) + } + + var payload struct{ Msg string } + decodeJWSRequest(t, &payload, bytes.NewReader(b)) + if payload.Msg != "ping" { + t.Errorf("payload.Msg = %q; want ping", payload.Msg) + } + w.Write([]byte("pong")) + default: + t.Errorf("unhandled %s %s", r.Method, r.URL) + w.WriteHeader(http.StatusBadRequest) + } + })) + defer ts.Close() + + ctx := context.Background() + cl := &Client{ + Key: testKey, + DirectoryURL: ts.URL, + dir: &Directory{ + NonceURL: ts.URL + "/new-nonce", + RegURL: ts.URL + "/new-account", + OrderURL: "/force-rfc-mode", + }, + } + req := json.RawMessage(`{"msg":"ping"}`) + res, err := cl.post(ctx, nil /* use kid */, ts.URL+"/post", req, wantStatus(http.StatusOK)) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + b, _ := io.ReadAll(res.Body) // don't care about err - just checking b + if string(b) != "pong" { + t.Errorf("res.Body = %q; want pong", b) + } +} + +// acmeServer simulates a subset of RFC 8555 compliant CA. +// +// TODO: We also have x/crypto/acme/autocert/acmetest and startACMEServerStub in autocert_test.go. +// It feels like this acmeServer is a sweet spot between usefulness and added complexity. +// Also, acmetest and startACMEServerStub were both written for draft-02, no RFC support. +// The goal is to consolidate all into one ACME test server. +type acmeServer struct { + ts *httptest.Server + handler map[string]http.HandlerFunc // keyed by r.URL.Path + + mu sync.Mutex + nnonce int +} + +func newACMEServer() *acmeServer { + return &acmeServer{handler: make(map[string]http.HandlerFunc)} +} + +func (s *acmeServer) handle(path string, f func(http.ResponseWriter, *http.Request)) { + s.handler[path] = http.HandlerFunc(f) +} + +func (s *acmeServer) start() { + s.ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + // Directory request. + if r.URL.Path == "/" { + fmt.Fprintf(w, `{ + "newNonce": %q, + "newAccount": %q, + "newOrder": %q, + "newAuthz": %q, + "revokeCert": %q, + "keyChange": %q, + "meta": {"termsOfService": %q} + }`, + s.url("/acme/new-nonce"), + s.url("/acme/new-account"), + s.url("/acme/new-order"), + s.url("/acme/new-authz"), + s.url("/acme/revoke-cert"), + s.url("/acme/key-change"), + s.url("/terms"), + ) + return + } + + // All other responses contain a nonce value unconditionally. + w.Header().Set("Replay-Nonce", s.nonce()) + if r.URL.Path == "/acme/new-nonce" { + return + } + + h := s.handler[r.URL.Path] + if h == nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "Unhandled %s", r.URL.Path) + return + } + h.ServeHTTP(w, r) + })) +} + +func (s *acmeServer) close() { + s.ts.Close() +} + +func (s *acmeServer) url(path string) string { + return s.ts.URL + path +} + +func (s *acmeServer) nonce() string { + s.mu.Lock() + defer s.mu.Unlock() + s.nnonce++ + return fmt.Sprintf("nonce%d", s.nnonce) +} + +func (s *acmeServer) error(w http.ResponseWriter, e *wireError) { + w.WriteHeader(e.Status) + json.NewEncoder(w).Encode(e) +} + +func TestRFC_Register(t *testing.T) { + const email = "mailto:user@example.org" + + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusCreated) // 201 means new account created + fmt.Fprintf(w, `{ + "status": "valid", + "contact": [%q], + "orders": %q + }`, email, s.url("/accounts/1/orders")) + + b, _ := io.ReadAll(r.Body) // check err later in decodeJWSxxx + head, err := decodeJWSHead(bytes.NewReader(b)) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if len(head.JWK) == 0 { + t.Error("head.JWK is empty") + } + + var req struct{ Contact []string } + decodeJWSRequest(t, &req, bytes.NewReader(b)) + if len(req.Contact) != 1 || req.Contact[0] != email { + t.Errorf("req.Contact = %q; want [%q]", req.Contact, email) + } + }) + s.start() + defer s.close() + + ctx := context.Background() + cl := &Client{ + Key: testKeyEC, + DirectoryURL: s.url("/"), + } + + var didPrompt bool + a := &Account{Contact: []string{email}} + acct, err := cl.Register(ctx, a, func(tos string) bool { + didPrompt = true + terms := s.url("/terms") + if tos != terms { + t.Errorf("tos = %q; want %q", tos, terms) + } + return true + }) + if err != nil { + t.Fatal(err) + } + okAccount := &Account{ + URI: s.url("/accounts/1"), + Status: StatusValid, + Contact: []string{email}, + OrdersURL: s.url("/accounts/1/orders"), + } + if !reflect.DeepEqual(acct, okAccount) { + t.Errorf("acct = %+v; want %+v", acct, okAccount) + } + if !didPrompt { + t.Error("tos prompt wasn't called") + } + if v := cl.accountKID(ctx); v != KeyID(okAccount.URI) { + t.Errorf("account kid = %q; want %q", v, okAccount.URI) + } +} + +func TestRFC_RegisterExternalAccountBinding(t *testing.T) { + eab := &ExternalAccountBinding{ + KID: "kid-1", + Key: []byte("secret"), + } + + type protected struct { + Algorithm string `json:"alg"` + KID string `json:"kid"` + URL string `json:"url"` + } + const email = "mailto:user@example.org" + + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Protected string + Contact []string + TermsOfServiceAgreed bool + ExternalaccountBinding struct { + Protected string + Payload string + Signature string + } + } + decodeJWSRequest(t, &j, r.Body) + protData, err := base64.RawURLEncoding.DecodeString(j.ExternalaccountBinding.Protected) + if err != nil { + t.Fatal(err) + } + + var prot protected + err = json.Unmarshal(protData, &prot) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(j.Contact, []string{email}) { + t.Errorf("j.Contact = %v; want %v", j.Contact, []string{email}) + } + if !j.TermsOfServiceAgreed { + t.Error("j.TermsOfServiceAgreed = false; want true") + } + + // Ensure same KID. + if prot.KID != eab.KID { + t.Errorf("j.ExternalAccountBinding.KID = %s; want %s", prot.KID, eab.KID) + } + // Ensure expected Algorithm. + if prot.Algorithm != "HS256" { + t.Errorf("j.ExternalAccountBinding.Alg = %s; want %s", + prot.Algorithm, "HS256") + } + + // Ensure same URL as outer JWS. + url := fmt.Sprintf("http://%s/acme/new-account", r.Host) + if prot.URL != url { + t.Errorf("j.ExternalAccountBinding.URL = %s; want %s", + prot.URL, url) + } + + // Ensure payload is base64URL encoded string of JWK in outer JWS + jwk, err := jwkEncode(testKeyEC.Public()) + if err != nil { + t.Fatal(err) + } + decodedPayload, err := base64.RawURLEncoding.DecodeString(j.ExternalaccountBinding.Payload) + if err != nil { + t.Fatal(err) + } + if jwk != string(decodedPayload) { + t.Errorf("j.ExternalAccountBinding.Payload = %s; want %s", decodedPayload, jwk) + } + + // Check signature on inner external account binding JWS + hmac := hmac.New(sha256.New, []byte("secret")) + _, err = hmac.Write([]byte(j.ExternalaccountBinding.Protected + "." + j.ExternalaccountBinding.Payload)) + if err != nil { + t.Fatal(err) + } + mac := hmac.Sum(nil) + encodedMAC := base64.RawURLEncoding.EncodeToString(mac) + + if !bytes.Equal([]byte(encodedMAC), []byte(j.ExternalaccountBinding.Signature)) { + t.Errorf("j.ExternalAccountBinding.Signature = %v; want %v", + []byte(j.ExternalaccountBinding.Signature), encodedMAC) + } + + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusCreated) + b, _ := json.Marshal([]string{email}) + fmt.Fprintf(w, `{"status":"valid","orders":"%s","contact":%s}`, s.url("/accounts/1/orders"), b) + }) + s.start() + defer s.close() + + ctx := context.Background() + cl := &Client{ + Key: testKeyEC, + DirectoryURL: s.url("/"), + } + + var didPrompt bool + a := &Account{Contact: []string{email}, ExternalAccountBinding: eab} + acct, err := cl.Register(ctx, a, func(tos string) bool { + didPrompt = true + terms := s.url("/terms") + if tos != terms { + t.Errorf("tos = %q; want %q", tos, terms) + } + return true + }) + if err != nil { + t.Fatal(err) + } + okAccount := &Account{ + URI: s.url("/accounts/1"), + Status: StatusValid, + Contact: []string{email}, + OrdersURL: s.url("/accounts/1/orders"), + } + if !reflect.DeepEqual(acct, okAccount) { + t.Errorf("acct = %+v; want %+v", acct, okAccount) + } + if !didPrompt { + t.Error("tos prompt wasn't called") + } + if v := cl.accountKID(ctx); v != KeyID(okAccount.URI) { + t.Errorf("account kid = %q; want %q", v, okAccount.URI) + } +} + +func TestRFC_RegisterExisting(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) // 200 means account already exists + w.Write([]byte(`{"status": "valid"}`)) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + _, err := cl.Register(context.Background(), &Account{}, AcceptTOS) + if err != ErrAccountAlreadyExists { + t.Errorf("err = %v; want %v", err, ErrAccountAlreadyExists) + } + kid := KeyID(s.url("/accounts/1")) + if v := cl.accountKID(context.Background()); v != kid { + t.Errorf("account kid = %q; want %q", v, kid) + } +} + +func TestRFC_UpdateReg(t *testing.T) { + const email = "mailto:user@example.org" + + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + }) + var didUpdate bool + s.handle("/accounts/1", func(w http.ResponseWriter, r *http.Request) { + didUpdate = true + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + + b, _ := io.ReadAll(r.Body) // check err later in decodeJWSxxx + head, err := decodeJWSHead(bytes.NewReader(b)) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if len(head.JWK) != 0 { + t.Error("head.JWK is non-zero") + } + kid := s.url("/accounts/1") + if head.KID != kid { + t.Errorf("head.KID = %q; want %q", head.KID, kid) + } + + var req struct{ Contact []string } + decodeJWSRequest(t, &req, bytes.NewReader(b)) + if len(req.Contact) != 1 || req.Contact[0] != email { + t.Errorf("req.Contact = %q; want [%q]", req.Contact, email) + } + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + _, err := cl.UpdateReg(context.Background(), &Account{Contact: []string{email}}) + if err != nil { + t.Error(err) + } + if !didUpdate { + t.Error("UpdateReg didn't update the account") + } +} + +func TestRFC_GetReg(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + + head, err := decodeJWSHead(r.Body) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if len(head.JWK) == 0 { + t.Error("head.JWK is empty") + } + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + acct, err := cl.GetReg(context.Background(), "") + if err != nil { + t.Fatal(err) + } + okAccount := &Account{ + URI: s.url("/accounts/1"), + Status: StatusValid, + } + if !reflect.DeepEqual(acct, okAccount) { + t.Errorf("acct = %+v; want %+v", acct, okAccount) + } +} + +func TestRFC_GetRegNoAccount(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + s.error(w, &wireError{ + Status: http.StatusBadRequest, + Type: "urn:ietf:params:acme:error:accountDoesNotExist", + }) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + if _, err := cl.GetReg(context.Background(), ""); err != ErrNoAccount { + t.Errorf("err = %v; want %v", err, ErrNoAccount) + } +} + +func TestRFC_GetRegOtherError(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + if _, err := cl.GetReg(context.Background(), ""); err == nil || err == ErrNoAccount { + t.Errorf("GetReg: %v; want any other non-nil err", err) + } +} + +func TestRFC_AccountKeyRollover(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + }) + s.handle("/acme/key-change", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + if err := cl.AccountKeyRollover(context.Background(), testKeyEC384); err != nil { + t.Errorf("AccountKeyRollover: %v, wanted no error", err) + } else if cl.Key != testKeyEC384 { + t.Error("AccountKeyRollover did not rotate the client key") + } +} + +func TestRFC_DeactivateReg(t *testing.T) { + const email = "mailto:user@example.org" + curStatus := StatusValid + + type account struct { + Status string `json:"status"` + Contact []string `json:"contact"` + AcceptTOS bool `json:"termsOfServiceAgreed"` + Orders string `json:"orders"` + } + + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) // 200 means existing account + json.NewEncoder(w).Encode(account{ + Status: curStatus, + Contact: []string{email}, + AcceptTOS: true, + Orders: s.url("/accounts/1/orders"), + }) + + b, _ := io.ReadAll(r.Body) // check err later in decodeJWSxxx + head, err := decodeJWSHead(bytes.NewReader(b)) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if len(head.JWK) == 0 { + t.Error("head.JWK is empty") + } + + var req struct { + Status string `json:"status"` + Contact []string `json:"contact"` + AcceptTOS bool `json:"termsOfServiceAgreed"` + OnlyExisting bool `json:"onlyReturnExisting"` + } + decodeJWSRequest(t, &req, bytes.NewReader(b)) + if !req.OnlyExisting { + t.Errorf("req.OnlyReturnExisting = %t; want = %t", req.OnlyExisting, true) + } + }) + s.handle("/accounts/1", func(w http.ResponseWriter, r *http.Request) { + if curStatus == StatusValid { + curStatus = StatusDeactivated + w.WriteHeader(http.StatusOK) + } else { + s.error(w, &wireError{ + Status: http.StatusUnauthorized, + Type: "urn:ietf:params:acme:error:unauthorized", + }) + } + var req account + b, _ := io.ReadAll(r.Body) // check err later in decodeJWSxxx + head, err := decodeJWSHead(bytes.NewReader(b)) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if len(head.JWK) != 0 { + t.Error("head.JWK is not empty") + } + if !strings.HasSuffix(head.KID, "/accounts/1") { + t.Errorf("head.KID = %q; want suffix /accounts/1", head.KID) + } + + decodeJWSRequest(t, &req, bytes.NewReader(b)) + if req.Status != StatusDeactivated { + t.Errorf("req.Status = %q; want = %q", req.Status, StatusDeactivated) + } + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + if err := cl.DeactivateReg(context.Background()); err != nil { + t.Errorf("DeactivateReg: %v, wanted no error", err) + } + if err := cl.DeactivateReg(context.Background()); err == nil { + t.Errorf("DeactivateReg: %v, wanted error for unauthorized", err) + } +} + +func TestRF_DeactivateRegNoAccount(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + s.error(w, &wireError{ + Status: http.StatusBadRequest, + Type: "urn:ietf:params:acme:error:accountDoesNotExist", + }) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + if err := cl.DeactivateReg(context.Background()); !errors.Is(err, ErrNoAccount) { + t.Errorf("DeactivateReg: %v, wanted ErrNoAccount", err) + } +} + +func TestRFC_AuthorizeOrder(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + }) + s.handle("/acme/new-order", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/orders/1")) + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "status": "pending", + "expires": "2019-09-01T00:00:00Z", + "notBefore": "2019-08-31T00:00:00Z", + "notAfter": "2019-09-02T00:00:00Z", + "identifiers": [{"type":"dns", "value":"example.org"}], + "authorizations": [%q] + }`, s.url("/authz/1")) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + o, err := cl.AuthorizeOrder(context.Background(), DomainIDs("example.org"), + WithOrderNotBefore(time.Date(2019, 8, 31, 0, 0, 0, 0, time.UTC)), + WithOrderNotAfter(time.Date(2019, 9, 2, 0, 0, 0, 0, time.UTC)), + ) + if err != nil { + t.Fatal(err) + } + okOrder := &Order{ + URI: s.url("/orders/1"), + Status: StatusPending, + Expires: time.Date(2019, 9, 1, 0, 0, 0, 0, time.UTC), + NotBefore: time.Date(2019, 8, 31, 0, 0, 0, 0, time.UTC), + NotAfter: time.Date(2019, 9, 2, 0, 0, 0, 0, time.UTC), + Identifiers: []AuthzID{AuthzID{Type: "dns", Value: "example.org"}}, + AuthzURLs: []string{s.url("/authz/1")}, + } + if !reflect.DeepEqual(o, okOrder) { + t.Errorf("AuthorizeOrder = %+v; want %+v", o, okOrder) + } +} + +func TestRFC_GetOrder(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + }) + s.handle("/orders/1", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/orders/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ + "status": "invalid", + "expires": "2019-09-01T00:00:00Z", + "notBefore": "2019-08-31T00:00:00Z", + "notAfter": "2019-09-02T00:00:00Z", + "identifiers": [{"type":"dns", "value":"example.org"}], + "authorizations": ["/authz/1"], + "finalize": "/orders/1/fin", + "certificate": "/orders/1/cert", + "error": {"type": "badRequest"} + }`)) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + o, err := cl.GetOrder(context.Background(), s.url("/orders/1")) + if err != nil { + t.Fatal(err) + } + okOrder := &Order{ + URI: s.url("/orders/1"), + Status: StatusInvalid, + Expires: time.Date(2019, 9, 1, 0, 0, 0, 0, time.UTC), + NotBefore: time.Date(2019, 8, 31, 0, 0, 0, 0, time.UTC), + NotAfter: time.Date(2019, 9, 2, 0, 0, 0, 0, time.UTC), + Identifiers: []AuthzID{AuthzID{Type: "dns", Value: "example.org"}}, + AuthzURLs: []string{"/authz/1"}, + FinalizeURL: "/orders/1/fin", + CertURL: "/orders/1/cert", + Error: &Error{ProblemType: "badRequest"}, + } + if !reflect.DeepEqual(o, okOrder) { + t.Errorf("GetOrder = %+v\nwant %+v", o, okOrder) + } +} + +func TestRFC_WaitOrder(t *testing.T) { + for _, st := range []string{StatusReady, StatusValid} { + t.Run(st, func(t *testing.T) { + testWaitOrderStatus(t, st) + }) + } +} + +func testWaitOrderStatus(t *testing.T, okStatus string) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + }) + var count int + s.handle("/orders/1", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/orders/1")) + w.WriteHeader(http.StatusOK) + s := StatusPending + if count > 0 { + s = okStatus + } + fmt.Fprintf(w, `{"status": %q}`, s) + count++ + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + order, err := cl.WaitOrder(context.Background(), s.url("/orders/1")) + if err != nil { + t.Fatalf("WaitOrder: %v", err) + } + if order.Status != okStatus { + t.Errorf("order.Status = %q; want %q", order.Status, okStatus) + } +} + +func TestRFC_WaitOrderError(t *testing.T) { + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status": "valid"}`)) + }) + var count int + s.handle("/orders/1", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/orders/1")) + w.WriteHeader(http.StatusOK) + s := StatusPending + if count > 0 { + s = StatusInvalid + } + fmt.Fprintf(w, `{"status": %q}`, s) + count++ + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + _, err := cl.WaitOrder(context.Background(), s.url("/orders/1")) + if err == nil { + t.Fatal("WaitOrder returned nil error") + } + e, ok := err.(*OrderError) + if !ok { + t.Fatalf("err = %v (%T); want OrderError", err, err) + } + if e.OrderURL != s.url("/orders/1") { + t.Errorf("e.OrderURL = %q; want %q", e.OrderURL, s.url("/orders/1")) + } + if e.Status != StatusInvalid { + t.Errorf("e.Status = %q; want %q", e.Status, StatusInvalid) + } +} + +func TestRFC_CreateOrderCert(t *testing.T) { + q := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "example.org"}, + } + csr, err := x509.CreateCertificateRequest(rand.Reader, q, testKeyEC) + if err != nil { + t.Fatal(err) + } + + tmpl := &x509.Certificate{SerialNumber: big.NewInt(1)} + leaf, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &testKeyEC.PublicKey, testKeyEC) + if err != nil { + t.Fatal(err) + } + + s := newACMEServer() + s.handle("/acme/new-account", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/accounts/1")) + w.Write([]byte(`{"status": "valid"}`)) + }) + var count int + s.handle("/pleaseissue", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Location", s.url("/pleaseissue")) + st := StatusProcessing + if count > 0 { + st = StatusValid + } + fmt.Fprintf(w, `{"status":%q, "certificate":%q}`, st, s.url("/crt")) + count++ + }) + s.handle("/crt", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/pem-certificate-chain") + pem.Encode(w, &pem.Block{Type: "CERTIFICATE", Bytes: leaf}) + }) + s.start() + defer s.close() + ctx := context.Background() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + cert, curl, err := cl.CreateOrderCert(ctx, s.url("/pleaseissue"), csr, true) + if err != nil { + t.Fatalf("CreateOrderCert: %v", err) + } + if _, err := x509.ParseCertificate(cert[0]); err != nil { + t.Errorf("ParseCertificate: %v", err) + } + if !reflect.DeepEqual(cert[0], leaf) { + t.Errorf("cert and leaf bytes don't match") + } + if u := s.url("/crt"); curl != u { + t.Errorf("curl = %q; want %q", curl, u) + } +} + +func TestRFC_AlreadyRevokedCert(t *testing.T) { + s := newACMEServer() + s.handle("/acme/revoke-cert", func(w http.ResponseWriter, r *http.Request) { + s.error(w, &wireError{ + Status: http.StatusBadRequest, + Type: "urn:ietf:params:acme:error:alreadyRevoked", + }) + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + err := cl.RevokeCert(context.Background(), testKeyEC, []byte{0}, CRLReasonUnspecified) + if err != nil { + t.Fatalf("RevokeCert: %v", err) + } +} + +func TestRFC_ListCertAlternates(t *testing.T) { + s := newACMEServer() + s.handle("/crt", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/pem-certificate-chain") + w.Header().Add("Link", `;rel="alternate"`) + w.Header().Add("Link", `; rel="alternate"`) + w.Header().Add("Link", `; rel="index"`) + }) + s.handle("/crt2", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/pem-certificate-chain") + }) + s.start() + defer s.close() + + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} + crts, err := cl.ListCertAlternates(context.Background(), s.url("/crt")) + if err != nil { + t.Fatalf("ListCertAlternates: %v", err) + } + want := []string{"https://example.com/crt/2", "https://example.com/crt/3"} + if !reflect.DeepEqual(crts, want) { + t.Errorf("ListCertAlternates(/crt): %v; want %v", crts, want) + } + crts, err = cl.ListCertAlternates(context.Background(), s.url("/crt2")) + if err != nil { + t.Fatalf("ListCertAlternates: %v", err) + } + if crts != nil { + t.Errorf("ListCertAlternates(/crt2): %v; want nil", crts) + } +} diff --git a/tempfork/acme/sync_to_upstream_test.go b/tempfork/acme/sync_to_upstream_test.go new file mode 100644 index 000000000..d6bea7a11 --- /dev/null +++ b/tempfork/acme/sync_to_upstream_test.go @@ -0,0 +1,70 @@ +package acme + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + _ "github.com/tailscale/golang-x-crypto/acme" // so it's on disk for the test +) + +// Verify that the files tempfork/acme/*.go (other than this test file) match the +// files in "github.com/tailscale/golang-x-crypto/acme" which is where we develop +// our fork of golang.org/x/crypto/acme and merge with upstream, but then we vendor +// just its acme package into tailscale.com/tempfork/acme. +// +// Development workflow: +// +// - make a change in github.com/tailscale/golang-x-crypto/acme +// - merge it (ideally with golang.org/x/crypto/acme too) +// - rebase github.com/tailscale/golang-x-crypto/acme with upstream x/crypto/acme +// as needed +// - in the tailscale.com repo, run "go get github.com/tailscale/golang-x-crypto/acme@main" +// - run go test ./tempfork/acme to watch it fail; the failure includes +// a shell command you should run to copy the *.go files from tailscale/golang-x-crypto +// to tailscale.com. +// - watch tests pass. git add it all. +// - send PR to tailscale.com +func TestSyncedToUpstream(t *testing.T) { + const pkg = "github.com/tailscale/golang-x-crypto/acme" + out, err := exec.Command("go", "list", "-f", "{{.Dir}}", pkg).Output() + if err != nil { + t.Fatalf("failed to find %s's location o disk: %v", pkg, err) + } + xDir := strings.TrimSpace(string(out)) + + t.Logf("at %s", xDir) + scanDir := func(dir string) map[string]string { + m := map[string]string{} // filename => Go contents + ents, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + for _, de := range ents { + name := de.Name() + if name == "sync_to_upstream_test.go" { + continue + } + if !strings.HasSuffix(name, ".go") { + continue + } + b, err := os.ReadFile(filepath.Join(dir, name)) + if err != nil { + t.Fatal(err) + } + m[name] = string(b) + } + + return m + } + + want := scanDir(xDir) + got := scanDir(".") + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("files differ (-want +got):\n%s", diff) + t.Errorf("to fix, run from module root:\n\ncp %s/*.go ./tempfork/acme && ./tool/go mod tidy\n", xDir) + } +} diff --git a/tempfork/acme/types.go b/tempfork/acme/types.go new file mode 100644 index 000000000..9fad800b4 --- /dev/null +++ b/tempfork/acme/types.go @@ -0,0 +1,632 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// ACME status values of Account, Order, Authorization and Challenge objects. +// See https://tools.ietf.org/html/rfc8555#section-7.1.6 for details. +const ( + StatusDeactivated = "deactivated" + StatusExpired = "expired" + StatusInvalid = "invalid" + StatusPending = "pending" + StatusProcessing = "processing" + StatusReady = "ready" + StatusRevoked = "revoked" + StatusUnknown = "unknown" + StatusValid = "valid" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +var ( + // ErrUnsupportedKey is returned when an unsupported key type is encountered. + ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + + // ErrAccountAlreadyExists indicates that the Client's key has already been registered + // with the CA. It is returned by Register method. + ErrAccountAlreadyExists = errors.New("acme: account already exists") + + // ErrNoAccount indicates that the Client's key has not been registered with the CA. + ErrNoAccount = errors.New("acme: account does not exist") +) + +// A Subproblem describes an ACME subproblem as reported in an Error. +type Subproblem struct { + // Type is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + Type string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Instance indicates a URL that the client should direct a human user to visit + // in order for instructions on how to agree to the updated Terms of Service. + // In such an event CA sets StatusCode to 403, Type to + // "urn:ietf:params:acme:error:userActionRequired", and adds a Link header with relation + // "terms-of-service" containing the latest TOS URL. + Instance string + // Identifier may contain the ACME identifier that the error is for. + Identifier *AuthzID +} + +func (sp Subproblem) String() string { + str := fmt.Sprintf("%s: ", sp.Type) + if sp.Identifier != nil { + str += fmt.Sprintf("[%s: %s] ", sp.Identifier.Type, sp.Identifier.Value) + } + str += sp.Detail + return str +} + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Instance indicates a URL that the client should direct a human user to visit + // in order for instructions on how to agree to the updated Terms of Service. + // In such an event CA sets StatusCode to 403, ProblemType to + // "urn:ietf:params:acme:error:userActionRequired" and a Link header with relation + // "terms-of-service" containing the latest TOS URL. + Instance string + // Header is the original server error response headers. + // It may be nil. + Header http.Header + // Subproblems may contain more detailed information about the individual problems + // that caused the error. This field is only sent by RFC 8555 compatible ACME + // servers. Defined in RFC 8555 Section 6.7.1. + Subproblems []Subproblem +} + +func (e *Error) Error() string { + str := fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) + if len(e.Subproblems) > 0 { + str += fmt.Sprintf("; subproblems:") + for _, sp := range e.Subproblems { + str += fmt.Sprintf("\n\t%s", sp) + } + } + return str +} + +// AuthorizationError indicates that an authorization for an identifier +// did not succeed. +// It contains all errors from Challenge items of the failed Authorization. +type AuthorizationError struct { + // URI uniquely identifies the failed Authorization. + URI string + + // Identifier is an AuthzID.Value of the failed Authorization. + Identifier string + + // Errors is a collection of non-nil error values of Challenge items + // of the failed Authorization. + Errors []error +} + +func (a *AuthorizationError) Error() string { + e := make([]string, len(a.Errors)) + for i, err := range a.Errors { + e[i] = err.Error() + } + + if a.Identifier != "" { + return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) + } + + return fmt.Sprintf("acme: authorization error: %s", strings.Join(e, "; ")) +} + +// OrderError is returned from Client's order related methods. +// It indicates the order is unusable and the clients should start over with +// AuthorizeOrder. +// +// The clients can still fetch the order object from CA using GetOrder +// to inspect its state. +type OrderError struct { + OrderURL string + Status string +} + +func (oe *OrderError) Error() string { + return fmt.Sprintf("acme: order %s status: %s", oe.OrderURL, oe.Status) +} + +// RateLimit reports whether err represents a rate limit error and +// any Retry-After duration returned by the server. +// +// See the following for more details on rate limiting: +// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 +func RateLimit(err error) (time.Duration, bool) { + e, ok := err.(*Error) + if !ok { + return 0, false + } + // Some CA implementations may return incorrect values. + // Use case-insensitive comparison. + if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { + return 0, false + } + if e.Header == nil { + return 0, true + } + return retryAfter(e.Header.Get("Retry-After")), true +} + +// Account is a user account. It is associated with a private key. +// Non-RFC 8555 fields are empty when interfacing with a compliant CA. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + // When interfacing with RFC 8555-compliant CAs, URI is the "kid" field + // value in JWS signed requests. + URI string + + // Contact is a slice of contact info used during registration. + // See https://tools.ietf.org/html/rfc8555#section-7.3 for supported + // formats. + Contact []string + + // Status indicates current account status as returned by the CA. + // Possible values are StatusValid, StatusDeactivated, and StatusRevoked. + Status string + + // OrdersURL is a URL from which a list of orders submitted by this account + // can be fetched. + OrdersURL string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + // + // It is non-RFC 8555 compliant. Package users can store the ToS they agree to + // during Client's Register call in the prompt callback function. + AgreedTerms string + + // Actual terms of a CA. + // + // It is non-RFC 8555 compliant. Use Directory's Terms field. + // When a CA updates their terms and requires an account agreement, + // a URL at which instructions to do so is available in Error's Instance field. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + // + // It is non-RFC 8555 compliant. Use Directory's AuthzURL or OrderURL. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + // + // It is non-RFC 8555 compliant and is obsoleted by OrdersURL. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + // + // It is non-RFC 8555 compliant and is obsoleted by OrdersURL. + Certificates string + + // ExternalAccountBinding represents an arbitrary binding to an account of + // the CA which the ACME server is tied to. + // See https://tools.ietf.org/html/rfc8555#section-7.3.4 for more details. + ExternalAccountBinding *ExternalAccountBinding +} + +// ExternalAccountBinding contains the data needed to form a request with +// an external account binding. +// See https://tools.ietf.org/html/rfc8555#section-7.3.4 for more details. +type ExternalAccountBinding struct { + // KID is the Key ID of the symmetric MAC key that the CA provides to + // identify an external account from ACME. + KID string + + // Key is the bytes of the symmetric key that the CA provides to identify + // the account. Key must correspond to the KID. + Key []byte +} + +func (e *ExternalAccountBinding) String() string { + return fmt.Sprintf("&{KID: %q, Key: redacted}", e.KID) +} + +// Directory is ACME server discovery data. +// See https://tools.ietf.org/html/rfc8555#section-7.1.1 for more details. +type Directory struct { + // NonceURL indicates an endpoint where to fetch fresh nonce values from. + NonceURL string + + // RegURL is an account endpoint URL, allowing for creating new accounts. + // Pre-RFC 8555 CAs also allow modifying existing accounts at this URL. + RegURL string + + // OrderURL is used to initiate the certificate issuance flow + // as described in RFC 8555. + OrderURL string + + // AuthzURL is used to initiate identifier pre-authorization flow. + // Empty string indicates the flow is unsupported by the CA. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + // It is non-RFC 8555 compliant and is obsoleted by OrderURL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // KeyChangeURL allows to perform account key rollover flow. + KeyChangeURL string + + // RenewalInfoURL allows to perform certificate renewal using the ACME + // Renewal Information (ARI) Extension. + RenewalInfoURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC 6844. + CAA []string + + // ExternalAccountRequired indicates that the CA requires for all account-related + // requests to include external account binding information. + ExternalAccountRequired bool +} + +// Order represents a client's request for a certificate. +// It tracks the request flow progress through to issuance. +type Order struct { + // URI uniquely identifies an order. + URI string + + // Status represents the current status of the order. + // It indicates which action the client should take. + // + // Possible values are StatusPending, StatusReady, StatusProcessing, StatusValid and StatusInvalid. + // Pending means the CA does not believe that the client has fulfilled the requirements. + // Ready indicates that the client has fulfilled all the requirements and can submit a CSR + // to obtain a certificate. This is done with Client's CreateOrderCert. + // Processing means the certificate is being issued. + // Valid indicates the CA has issued the certificate. It can be downloaded + // from the Order's CertURL. This is done with Client's FetchCert. + // Invalid means the certificate will not be issued. Users should consider this order + // abandoned. + Status string + + // Expires is the timestamp after which CA considers this order invalid. + Expires time.Time + + // Identifiers contains all identifier objects which the order pertains to. + Identifiers []AuthzID + + // NotBefore is the requested value of the notBefore field in the certificate. + NotBefore time.Time + + // NotAfter is the requested value of the notAfter field in the certificate. + NotAfter time.Time + + // AuthzURLs represents authorizations to complete before a certificate + // for identifiers specified in the order can be issued. + // It also contains unexpired authorizations that the client has completed + // in the past. + // + // Authorization objects can be fetched using Client's GetAuthorization method. + // + // The required authorizations are dictated by CA policies. + // There may not be a 1:1 relationship between the identifiers and required authorizations. + // Required authorizations can be identified by their StatusPending status. + // + // For orders in the StatusValid or StatusInvalid state these are the authorizations + // which were completed. + AuthzURLs []string + + // FinalizeURL is the endpoint at which a CSR is submitted to obtain a certificate + // once all the authorizations are satisfied. + FinalizeURL string + + // CertURL points to the certificate that has been issued in response to this order. + CertURL string + + // The error that occurred while processing the order as received from a CA, if any. + Error *Error +} + +// OrderOption allows customizing Client.AuthorizeOrder call. +type OrderOption interface { + privateOrderOpt() +} + +// WithOrderNotBefore sets order's NotBefore field. +func WithOrderNotBefore(t time.Time) OrderOption { + return orderNotBeforeOpt(t) +} + +// WithOrderNotAfter sets order's NotAfter field. +func WithOrderNotAfter(t time.Time) OrderOption { + return orderNotAfterOpt(t) +} + +type orderNotBeforeOpt time.Time + +func (orderNotBeforeOpt) privateOrderOpt() {} + +type orderNotAfterOpt time.Time + +func (orderNotAfterOpt) privateOrderOpt() {} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status is the current status of an authorization. + // Possible values are StatusPending, StatusValid, StatusInvalid, StatusDeactivated, + // StatusExpired and StatusRevoked. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // The timestamp after which the CA considers the authorization invalid. + Expires time.Time + + // Wildcard is true for authorizations of a wildcard domain name. + Wildcard bool + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For valid authorizations, the challenge that was validated. + // For invalid authorizations, the challenge that was attempted and failed. + // + // RFC 8555 compatible CAs require users to fuflfill only one of the challenges. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + // + // This field is unused in RFC 8555. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, "dns" or "ip". + Value string // The identifier itself, e.g. "example.org". +} + +// DomainIDs creates a slice of AuthzID with "dns" identifier type. +func DomainIDs(names ...string) []AuthzID { + a := make([]AuthzID, len(names)) + for i, v := range names { + a[i] = AuthzID{Type: "dns", Value: v} + } + return a +} + +// IPIDs creates a slice of AuthzID with "ip" identifier type. +// Each element of addr is textual form of an address as defined +// in RFC 1123 Section 2.1 for IPv4 and in RFC 5952 Section 4 for IPv6. +func IPIDs(addr ...string) []AuthzID { + a := make([]AuthzID, len(addr)) + for i, v := range addr { + a[i] = AuthzID{Type: "ip", Value: v} + } + return a +} + +// wireAuthzID is ACME JSON representation of authorization identifier objects. +type wireAuthzID struct { + Type string `json:"type"` + Value string `json:"value"` +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Identifier wireAuthzID + Status string + Expires time.Time + Wildcard bool + Challenges []wireChallenge + Combinations [][]int + Error *wireError +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Expires: z.Expires, + Wildcard: z.Wildcard, + Challenges: make([]*Challenge, len(z.Challenges)), + Combinations: z.Combinations, // shallow copy + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +func (z *wireAuthz) error(uri string) *AuthorizationError { + err := &AuthorizationError{ + URI: uri, + Identifier: z.Identifier.Value, + } + + if z.Error != nil { + err.Errors = append(err.Errors, z.Error.error(nil)) + } + + for _, raw := range z.Challenges { + if raw.Error != nil { + err.Errors = append(err.Errors, raw.Error.error(nil)) + } + } + + return err +} + +// Challenge encodes a returned CA challenge. +// Its Error field may be non-nil if the challenge is part of an Authorization +// with StatusInvalid. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-alpn-01", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + // In RFC 8555, possible values are StatusPending, StatusProcessing, StatusValid, + // and StatusInvalid. + Status string + + // Validated is the time at which the CA validated this challenge. + // Always zero value in pre-RFC 8555. + Validated time.Time + + // Error indicates the reason for an authorization failure + // when this challenge was used. + // The type of a non-nil value is *Error. + Error error +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URL string `json:"url"` // RFC + URI string `json:"uri"` // pre-RFC + Type string + Token string + Status string + Validated time.Time + Error *wireError +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URL, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.URI == "" { + v.URI = c.URI // c.URL was empty; use legacy + } + if v.Status == "" { + v.Status = StatusPending + } + if c.Error != nil { + v.Error = c.Error.error(nil) + } + return v +} + +// wireError is a subset of fields of the Problem Details object +// as described in https://tools.ietf.org/html/rfc7807#section-3.1. +type wireError struct { + Status int + Type string + Detail string + Instance string + Subproblems []Subproblem +} + +func (e *wireError) error(h http.Header) *Error { + err := &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Instance: e.Instance, + Header: h, + Subproblems: e.Subproblems, + } + return err +} + +// CertOption is an optional argument type for the TLS ChallengeCert methods for +// customizing a temporary certificate for TLS-based challenges. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLS ChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} + +// RenewalInfoWindow describes the time frame during which the ACME client +// should attempt to renew, using the ACME Renewal Info Extension. +type RenewalInfoWindow struct { + Start time.Time `json:"start"` + End time.Time `json:"end"` +} + +// RenewalInfo describes the suggested renewal window for a given certificate, +// returned from an ACME server, using the ACME Renewal Info Extension. +type RenewalInfo struct { + SuggestedWindow RenewalInfoWindow `json:"suggestedWindow"` + ExplanationURL string `json:"explanationURL"` +} diff --git a/tempfork/acme/types_test.go b/tempfork/acme/types_test.go new file mode 100644 index 000000000..59ce7e760 --- /dev/null +++ b/tempfork/acme/types_test.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "errors" + "net/http" + "reflect" + "testing" + "time" +) + +func TestExternalAccountBindingString(t *testing.T) { + eab := ExternalAccountBinding{ + KID: "kid", + Key: []byte("key"), + } + got := eab.String() + want := `&{KID: "kid", Key: redacted}` + if got != want { + t.Errorf("eab.String() = %q, want: %q", got, want) + } +} + +func TestRateLimit(t *testing.T) { + now := time.Date(2017, 04, 27, 10, 0, 0, 0, time.UTC) + f := timeNow + defer func() { timeNow = f }() + timeNow = func() time.Time { return now } + + h120, hTime := http.Header{}, http.Header{} + h120.Set("Retry-After", "120") + hTime.Set("Retry-After", "Tue Apr 27 11:00:00 2017") + + err1 := &Error{ + ProblemType: "urn:ietf:params:acme:error:nolimit", + Header: h120, + } + err2 := &Error{ + ProblemType: "urn:ietf:params:acme:error:rateLimited", + Header: h120, + } + err3 := &Error{ + ProblemType: "urn:ietf:params:acme:error:rateLimited", + Header: nil, + } + err4 := &Error{ + ProblemType: "urn:ietf:params:acme:error:rateLimited", + Header: hTime, + } + + tt := []struct { + err error + res time.Duration + ok bool + }{ + {nil, 0, false}, + {errors.New("dummy"), 0, false}, + {err1, 0, false}, + {err2, 2 * time.Minute, true}, + {err3, 0, true}, + {err4, time.Hour, true}, + } + for i, test := range tt { + res, ok := RateLimit(test.err) + if ok != test.ok { + t.Errorf("%d: RateLimit(%+v): ok = %v; want %v", i, test.err, ok, test.ok) + continue + } + if res != test.res { + t.Errorf("%d: RateLimit(%+v) = %v; want %v", i, test.err, res, test.res) + } + } +} + +func TestAuthorizationError(t *testing.T) { + tests := []struct { + desc string + err *AuthorizationError + msg string + }{ + { + desc: "when auth error identifier is set", + err: &AuthorizationError{ + Identifier: "domain.com", + Errors: []error{ + (&wireError{ + Status: 403, + Type: "urn:ietf:params:acme:error:caa", + Detail: "CAA record for domain.com prevents issuance", + }).error(nil), + }, + }, + msg: "acme: authorization error for domain.com: 403 urn:ietf:params:acme:error:caa: CAA record for domain.com prevents issuance", + }, + + { + desc: "when auth error identifier is unset", + err: &AuthorizationError{ + Errors: []error{ + (&wireError{ + Status: 403, + Type: "urn:ietf:params:acme:error:caa", + Detail: "CAA record for domain.com prevents issuance", + }).error(nil), + }, + }, + msg: "acme: authorization error: 403 urn:ietf:params:acme:error:caa: CAA record for domain.com prevents issuance", + }, + } + + for _, tt := range tests { + if tt.err.Error() != tt.msg { + t.Errorf("got: %s\nwant: %s", tt.err, tt.msg) + } + } +} + +func TestSubproblems(t *testing.T) { + tests := []struct { + wire wireError + expectedOut Error + }{ + { + wire: wireError{ + Status: 1, + Type: "urn:error", + Detail: "it's an error", + }, + expectedOut: Error{ + StatusCode: 1, + ProblemType: "urn:error", + Detail: "it's an error", + }, + }, + { + wire: wireError{ + Status: 1, + Type: "urn:error", + Detail: "it's an error", + Subproblems: []Subproblem{ + { + Type: "urn:error:sub", + Detail: "it's a subproblem", + }, + }, + }, + expectedOut: Error{ + StatusCode: 1, + ProblemType: "urn:error", + Detail: "it's an error", + Subproblems: []Subproblem{ + { + Type: "urn:error:sub", + Detail: "it's a subproblem", + }, + }, + }, + }, + { + wire: wireError{ + Status: 1, + Type: "urn:error", + Detail: "it's an error", + Subproblems: []Subproblem{ + { + Type: "urn:error:sub", + Detail: "it's a subproblem", + Identifier: &AuthzID{Type: "dns", Value: "example"}, + }, + }, + }, + expectedOut: Error{ + StatusCode: 1, + ProblemType: "urn:error", + Detail: "it's an error", + Subproblems: []Subproblem{ + { + Type: "urn:error:sub", + Detail: "it's a subproblem", + Identifier: &AuthzID{Type: "dns", Value: "example"}, + }, + }, + }, + }, + } + + for _, tc := range tests { + out := tc.wire.error(nil) + if !reflect.DeepEqual(*out, tc.expectedOut) { + t.Errorf("Unexpected error: wanted %v, got %v", tc.expectedOut, *out) + } + } +} + +func TestErrorStringerWithSubproblems(t *testing.T) { + err := Error{ + StatusCode: 1, + ProblemType: "urn:error", + Detail: "it's an error", + Subproblems: []Subproblem{ + { + Type: "urn:error:sub", + Detail: "it's a subproblem", + }, + { + Type: "urn:error:sub", + Detail: "it's a subproblem", + Identifier: &AuthzID{Type: "dns", Value: "example"}, + }, + }, + } + expectedStr := "1 urn:error: it's an error; subproblems:\n\turn:error:sub: it's a subproblem\n\turn:error:sub: [dns: example] it's a subproblem" + if err.Error() != expectedStr { + t.Errorf("Unexpected error string: wanted %q, got %q", expectedStr, err.Error()) + } +} diff --git a/tempfork/acme/version_go112.go b/tempfork/acme/version_go112.go new file mode 100644 index 000000000..cc5fab604 --- /dev/null +++ b/tempfork/acme/version_go112.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.12 + +package acme + +import "runtime/debug" + +func init() { + // Set packageVersion if the binary was built in modules mode and x/crypto + // was not replaced with a different module. + info, ok := debug.ReadBuildInfo() + if !ok { + return + } + for _, m := range info.Deps { + if m.Path != "golang.org/x/crypto" { + continue + } + if m.Replace == nil { + packageVersion = m.Version + } + break + } +} From ba1f9a3918f092e5428620640e571ba36ab646e9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 24 Jan 2025 19:56:33 -0800 Subject: [PATCH 0413/1708] types/persist: remove Persist.LegacyFrontendPrivateMachineKey It was a temporary migration over four years ago. It's no longer relevant. Updates #610 Change-Id: I1f00c9485fab13ede6f77603f7d4235222c2a481 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 17 ++--------------- ipn/ipnlocal/state_test.go | 6 ------ ipn/prefs_test.go | 9 +-------- types/persist/persist.go | 22 +++------------------- types/persist/persist_clone.go | 15 +++++++-------- types/persist/persist_test.go | 14 +------------- types/persist/persist_view.go | 18 +++++++----------- 7 files changed, 21 insertions(+), 80 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 58cd4025f..a6e3f1952 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1082,7 +1082,6 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { } p2 := p.AsStruct() - p2.Persist.LegacyFrontendPrivateMachineKey = key.MachinePrivate{} p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} @@ -3343,11 +3342,6 @@ func (b *LocalBackend) initMachineKeyLocked() (err error) { return nil } - var legacyMachineKey key.MachinePrivate - if p := b.pm.CurrentPrefs().Persist(); p.Valid() { - legacyMachineKey = p.LegacyFrontendPrivateMachineKey() - } - keyText, err := b.store.ReadState(ipn.MachineKeyStateKey) if err == nil { if err := b.machinePrivKey.UnmarshalText(keyText); err != nil { @@ -3356,9 +3350,6 @@ func (b *LocalBackend) initMachineKeyLocked() (err error) { if b.machinePrivKey.IsZero() { return fmt.Errorf("invalid zero key stored in %v key of %v", ipn.MachineKeyStateKey, b.store) } - if !legacyMachineKey.IsZero() && !legacyMachineKey.Equal(b.machinePrivKey) { - b.logf("frontend-provided legacy machine key ignored; used value from server state") - } return nil } if err != ipn.ErrStateNotExist { @@ -3368,12 +3359,8 @@ func (b *LocalBackend) initMachineKeyLocked() (err error) { // If we didn't find one already on disk and the prefs already // have a legacy machine key, use that. Otherwise generate a // new one. - if !legacyMachineKey.IsZero() { - b.machinePrivKey = legacyMachineKey - } else { - b.logf("generating new machine key") - b.machinePrivKey = key.NewMachine() - } + b.logf("generating new machine key") + b.machinePrivKey = key.NewMachine() keyText, _ = b.machinePrivKey.MarshalText() if err := ipn.WriteState(b.store, ipn.MachineKeyStateKey, keyText); err != nil { diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ef4b0ed62..1b3b43af6 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -949,8 +949,6 @@ func TestEditPrefsHasNoKeys(t *testing.T) { Persist: &persist.Persist{ PrivateNodeKey: key.NewNode(), OldPrivateNodeKey: key.NewNode(), - - LegacyFrontendPrivateMachineKey: key.NewMachine(), }, }).View(), ipn.NetworkProfile{}) if p := b.pm.CurrentPrefs().Persist(); !p.Valid() || p.PrivateNodeKey().IsZero() { @@ -977,10 +975,6 @@ func TestEditPrefsHasNoKeys(t *testing.T) { t.Errorf("OldPrivateNodeKey = %v; want zero", p.Persist().OldPrivateNodeKey()) } - if !p.Persist().LegacyFrontendPrivateMachineKey().IsZero() { - t.Errorf("LegacyFrontendPrivateMachineKey = %v; want zero", p.Persist().LegacyFrontendPrivateMachineKey()) - } - if !p.Persist().NetworkLockKey().IsZero() { t.Errorf("NetworkLockKey= %v; want zero", p.Persist().NetworkLockKey()) } diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 31671c0f8..91b835e3e 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -467,13 +467,6 @@ func TestPrefsPretty(t *testing.T) { "darwin", `Prefs{ra=false dns=false want=true tags=tag:foo,tag:bar url="http://localhost:1234" update=off Persist=nil}`, }, - { - Prefs{ - Persist: &persist.Persist{}, - }, - "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n= u=""}}`, - }, { Prefs{ Persist: &persist.Persist{ @@ -481,7 +474,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, }, { Prefs{ diff --git a/types/persist/persist.go b/types/persist/persist.go index 8b555abd4..d888a6afb 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -21,17 +21,6 @@ import ( type Persist struct { _ structs.Incomparable - // LegacyFrontendPrivateMachineKey is here temporarily - // (starting 2020-09-28) during migration of Windows users' - // machine keys from frontend storage to the backend. On the - // first LocalBackend.Start call, the backend will initialize - // the real (backend-owned) machine key from the frontend's - // provided value (if non-zero), picking a new random one if - // needed. This field should be considered read-only from GUI - // frontends. The real value should not be written back in - // this field, lest the frontend persist it to disk. - LegacyFrontendPrivateMachineKey key.MachinePrivate `json:"PrivateMachineKey"` - PrivateNodeKey key.NodePrivate OldPrivateNodeKey key.NodePrivate // needed to request key rotation UserProfile tailcfg.UserProfile @@ -95,8 +84,7 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } - return p.LegacyFrontendPrivateMachineKey.Equal(p2.LegacyFrontendPrivateMachineKey) && - p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && @@ -106,18 +94,14 @@ func (p *Persist) Equals(p2 *Persist) bool { func (p *Persist) Pretty() string { var ( - mk key.MachinePublic ok, nk key.NodePublic ) - if !p.LegacyFrontendPrivateMachineKey.IsZero() { - mk = p.LegacyFrontendPrivateMachineKey.Public() - } if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{lm=%v, o=%v, n=%v u=%#v}", - mk.ShortString(), ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 95dd65ac1..680419ff2 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -25,12 +25,11 @@ func (src *Persist) Clone() *Persist { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PersistCloneNeedsRegeneration = Persist(struct { - _ structs.Incomparable - LegacyFrontendPrivateMachineKey key.MachinePrivate - PrivateNodeKey key.NodePrivate - OldPrivateNodeKey key.NodePrivate - UserProfile tailcfg.UserProfile - NetworkLockKey key.NLPrivate - NodeID tailcfg.StableNodeID - DisallowedTKAStateIDs []string + _ structs.Incomparable + PrivateNodeKey key.NodePrivate + OldPrivateNodeKey key.NodePrivate + UserProfile tailcfg.UserProfile + NetworkLockKey key.NLPrivate + NodeID tailcfg.StableNodeID + DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index 6b159573d..dbf2a6d8c 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,13 +21,12 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"LegacyFrontendPrivateMachineKey", "PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) } - m1 := key.NewMachine() k1 := key.NewNode() nl1 := key.NewNLPrivate() tests := []struct { @@ -39,17 +38,6 @@ func TestPersistEqual(t *testing.T) { {&Persist{}, nil, false}, {&Persist{}, &Persist{}, true}, - { - &Persist{LegacyFrontendPrivateMachineKey: m1}, - &Persist{LegacyFrontendPrivateMachineKey: key.NewMachine()}, - false, - }, - { - &Persist{LegacyFrontendPrivateMachineKey: m1}, - &Persist{LegacyFrontendPrivateMachineKey: m1}, - true, - }, - { &Persist{PrivateNodeKey: k1}, &Persist{PrivateNodeKey: key.NewNode()}, diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index ce600be3e..55eb40c51 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -62,9 +62,6 @@ func (v *PersistView) UnmarshalJSON(b []byte) error { return nil } -func (v PersistView) LegacyFrontendPrivateMachineKey() key.MachinePrivate { - return v.ж.LegacyFrontendPrivateMachineKey -} func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } @@ -76,12 +73,11 @@ func (v PersistView) DisallowedTKAStateIDs() views.Slice[string] { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PersistViewNeedsRegeneration = Persist(struct { - _ structs.Incomparable - LegacyFrontendPrivateMachineKey key.MachinePrivate - PrivateNodeKey key.NodePrivate - OldPrivateNodeKey key.NodePrivate - UserProfile tailcfg.UserProfile - NetworkLockKey key.NLPrivate - NodeID tailcfg.StableNodeID - DisallowedTKAStateIDs []string + _ structs.Incomparable + PrivateNodeKey key.NodePrivate + OldPrivateNodeKey key.NodePrivate + UserProfile tailcfg.UserProfile + NetworkLockKey key.NLPrivate + NodeID tailcfg.StableNodeID + DisallowedTKAStateIDs []string }{}) From 079973de8280c23b7a3f74f6c2d0ac5b7d963d9d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 27 Jan 2025 22:03:22 +0000 Subject: [PATCH 0414/1708] tempfork/acme: fix TestSyncedToUpstream with Windows line endings Updates #10238 Change-Id: Ic85811c267679a9f79377f376d77dee3a9d92ce7 Signed-off-by: Brad Fitzpatrick --- tempfork/acme/sync_to_upstream_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tempfork/acme/sync_to_upstream_test.go b/tempfork/acme/sync_to_upstream_test.go index d6bea7a11..e22c8c1a8 100644 --- a/tempfork/acme/sync_to_upstream_test.go +++ b/tempfork/acme/sync_to_upstream_test.go @@ -55,7 +55,7 @@ func TestSyncedToUpstream(t *testing.T) { if err != nil { t.Fatal(err) } - m[name] = string(b) + m[name] = strings.ReplaceAll(string(b), "\r", "") } return m From 6f10fe8ab1f7d4f212610719a02c5b612575b858 Mon Sep 17 00:00:00 2001 From: yejingchen Date: Tue, 28 Jan 2025 18:05:49 +0800 Subject: [PATCH 0415/1708] cmd/tailscale: add warning to help text of `--force-reauth` (#14778) The warning text is adapted from https://tailscale.com/kb/1028/key-expiry#renewing-keys-for-an-expired-device . There is already https://github.com/tailscale/tailscale/pull/7575 which presents a warning when connected over Tailscale, however the detection is done by checking SSH environment variables, which are absent within systemd's run0*. That means `--force-reauth` will happily bring down Tailscale connection, leaving the user in despair. Changing only the help text is by no means a complete solution, but hopefully it will stop users from blindly trying it out, and motivate them to search for a proper solution. *: https://www.freedesktop.org/software/systemd/man/devel/run0.html Updates #3849 Signed-off-by: yejingchen --- cmd/tailscale/cli/up.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 4af264d73..da3780e39 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -139,7 +139,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // Some flags are only for "up", not "login". upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values") - upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication") + upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this will bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") registerAcceptRiskFlag(upf, &upArgs.acceptedRisks) } From 3abfbf50aebbe3ba57dc749165edb56be6715c0a Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 28 Jan 2025 12:10:28 +0000 Subject: [PATCH 0416/1708] tsnet: return from Accept when the listener gets closed Fixes #14808 Signed-off-by: Anton Tolchanov --- tsnet/tsnet.go | 7 ++++--- tsnet/tsnet_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 3505c9453..23a9f9a98 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1286,11 +1286,12 @@ type listener struct { } func (ln *listener) Accept() (net.Conn, error) { - c, ok := <-ln.conn - if !ok { + select { + case c := <-ln.conn: + return c, nil + case <-ln.closedc: return nil, fmt.Errorf("tsnet: %w", net.ErrClosed) } - return c, nil } func (ln *listener) Addr() net.Addr { return addr{ln} } diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 552e8dbee..0f245b015 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -667,6 +667,37 @@ func TestFunnel(t *testing.T) { } } +func TestListenerClose(t *testing.T) { + ctx := context.Background() + controlURL, _ := startControl(t) + + s1, _, _ := startServer(t, ctx, controlURL, "s1") + + ln, err := s1.Listen("tcp", ":8080") + if err != nil { + t.Fatal(err) + } + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if c != nil { + c.Close() + } + errc <- err + }() + + ln.Close() + select { + case err := <-errc: + if !errors.Is(err, net.ErrClosed) { + t.Errorf("unexpected error: %v", err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for Accept to return") + } +} + func dialIngressConn(from, to *Server, target string) (net.Conn, error) { toLC := must.Get(to.LocalClient()) toStatus := must.Get(toLC.StatusWithoutPeers(context.Background())) From 46fd4e58a27495263336b86ee961ee28d8c332b7 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 27 Jan 2025 13:05:27 -0600 Subject: [PATCH 0417/1708] ssh,tempfork/gliderlabs/ssh: replace github.com/tailscale/golang-x-crypto/ssh with golang.org/x/crypto/ssh The upstream crypto package now supports sending banners at any time during authentication, so the Tailscale fork of crypto/ssh is no longer necessary. github.com/tailscale/golang-x-crypto is still needed for some custom ACME autocert functionality. tempfork/gliderlabs is still necessary because of a few other customizations, mostly related to TTY handling. Updates #8593 Signed-off-by: Percy Wegmann --- cmd/k8s-operator/depaware.txt | 11 +- cmd/ssh-auth-none-demo/ssh-auth-none-demo.go | 24 +- cmd/tailscaled/depaware.txt | 7 +- cmd/tailscaled/deps_test.go | 1 - go.mod | 2 +- go.sum | 4 +- ipn/ipnlocal/ssh.go | 2 +- ssh/tailssh/tailssh.go | 310 ++++++++----------- ssh/tailssh/tailssh_integration_test.go | 2 +- ssh/tailssh/tailssh_test.go | 5 +- tempfork/gliderlabs/ssh/agent.go | 2 +- tempfork/gliderlabs/ssh/context.go | 11 +- tempfork/gliderlabs/ssh/options.go | 2 +- tempfork/gliderlabs/ssh/options_test.go | 2 +- tempfork/gliderlabs/ssh/server.go | 2 +- tempfork/gliderlabs/ssh/session.go | 2 +- tempfork/gliderlabs/ssh/session_test.go | 2 +- tempfork/gliderlabs/ssh/ssh.go | 4 +- tempfork/gliderlabs/ssh/tcpip.go | 2 +- tempfork/gliderlabs/ssh/tcpip_test.go | 2 +- tempfork/gliderlabs/ssh/util.go | 2 +- tempfork/gliderlabs/ssh/wrap.go | 2 +- 22 files changed, 172 insertions(+), 231 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e32fd4a2b..972dbfc2c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -197,9 +197,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh - LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal - LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ @@ -986,12 +983,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+ + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ + golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ @@ -1000,6 +997,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ + LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal + LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go index ee929299a..39af584ec 100644 --- a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go +++ b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go @@ -6,6 +6,9 @@ // highlight the unique parts of the Tailscale SSH server so SSH // client authors can hit it easily and fix their SSH clients without // needing to set up Tailscale and Tailscale SSH. +// +// Connections are allowed using any username except for "denyme". Connecting as +// "denyme" will result in an authentication failure with error message. package main import ( @@ -16,6 +19,7 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" + "errors" "flag" "fmt" "io" @@ -24,7 +28,7 @@ import ( "path/filepath" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" "tailscale.com/tempfork/gliderlabs/ssh" ) @@ -62,13 +66,21 @@ func main() { Handler: handleSessionPostSSHAuth, ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig { start := time.Now() + var spac gossh.ServerPreAuthConn return &gossh.ServerConfig{ - NextAuthMethodCallback: func(conn gossh.ConnMetadata, prevErrors []error) []string { - return []string{"tailscale"} + PreAuthConnCallback: func(conn gossh.ServerPreAuthConn) { + spac = conn }, NoClientAuth: true, // required for the NoClientAuthCallback to run NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) { - cm.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start))) + spac.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start))) + + if cm.User() == "denyme" { + return nil, &gossh.BannerError{ + Err: errors.New("denying access"), + Message: "denyme is not allowed to access this machine\n", + } + } totalBanners := 2 if cm.User() == "banners" { @@ -77,9 +89,9 @@ func main() { for banner := 2; banner <= totalBanners; banner++ { time.Sleep(time.Second) if banner == totalBanners { - cm.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start))) + spac.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start))) } else { - cm.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start))) + spac.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start))) } } return nil, nil diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a7ad83818..a6fae54ff 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -152,9 +152,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh - LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+ - LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ @@ -439,12 +436,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+ + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ + golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 2b4bc280d..7f06abc6c 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -17,7 +17,6 @@ func TestOmitSSH(t *testing.T) { Tags: "ts_omit_ssh", BadDeps: map[string]string{ "tailscale.com/ssh/tailssh": msg, - "golang.org/x/crypto/ssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, "github.com/creack/pty": msg, diff --git a/go.mod b/go.mod index 8e52a9ab3..2489e34d7 100644 --- a/go.mod +++ b/go.mod @@ -94,7 +94,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.32.0 + golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 golang.org/x/mod v0.22.0 golang.org/x/net v0.34.0 diff --git a/go.sum b/go.sum index c1c82ad77..b10e98da2 100644 --- a/go.sum +++ b/go.sum @@ -1058,8 +1058,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index 383d03f5a..47a74e282 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -24,8 +24,8 @@ import ( "strings" "sync" - "github.com/tailscale/golang-x-crypto/ssh" "go4.org/mem" + "golang.org/x/crypto/ssh" "tailscale.com/tailcfg" "tailscale.com/util/lineiter" "tailscale.com/util/mak" diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 7f21ccd11..638ff99b8 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -29,7 +29,7 @@ import ( "syscall" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" "tailscale.com/logtail/backoff" @@ -198,8 +198,11 @@ func (srv *server) OnPolicyChange() { // Setup and discover server info // - ServerConfigCallback // -// Do the user auth -// - NoClientAuthHandler +// Get access to a ServerPreAuthConn (useful for sending banners) +// +// Do the user auth with a NoClientAuthCallback. If user specified +// a username ending in "+password", follow this with password auth +// (to work around buggy SSH clients that don't work with noauth). // // Once auth is done, the conn can be multiplexed with multiple sessions and // channels concurrently. At which point any of the following can be called @@ -219,15 +222,12 @@ type conn struct { idH string connID string // ID that's shared with control - // anyPasswordIsOkay is whether the client is authorized but has requested - // password-based auth to work around their buggy SSH client. When set, we - // accept any password in the PasswordHandler. - anyPasswordIsOkay bool // set by NoClientAuthCallback + // spac is a [gossh.ServerPreAuthConn] used for sending auth banners. + // Banners cannot be sent after auth completes. + spac gossh.ServerPreAuthConn - action0 *tailcfg.SSHAction // set by doPolicyAuth; first matching action - currentAction *tailcfg.SSHAction // set by doPolicyAuth, updated by resolveNextAction - finalAction *tailcfg.SSHAction // set by doPolicyAuth or resolveNextAction - finalActionErr error // set by doPolicyAuth or resolveNextAction + action0 *tailcfg.SSHAction // set by clientAuth + finalAction *tailcfg.SSHAction // set by clientAuth info *sshConnInfo // set by setInfo localUser *userMeta // set by doPolicyAuth @@ -254,141 +254,142 @@ func (c *conn) vlogf(format string, args ...any) { } } -// isAuthorized walks through the action chain and returns nil if the connection -// is authorized. If the connection is not authorized, it returns -// errDenied. If the action chain resolution fails, it returns the -// resolution error. -func (c *conn) isAuthorized(ctx ssh.Context) error { - action := c.currentAction - for { - if action.Accept { - return nil - } - if action.Reject || action.HoldAndDelegate == "" { - return errDenied - } - var err error - action, err = c.resolveNextAction(ctx) - if err != nil { - return err - } - if action.Message != "" { - if err := ctx.SendAuthBanner(action.Message); err != nil { - return err - } - } +// errDenied is returned by auth callbacks when a connection is denied by the +// policy. It returns a gossh.BannerError to make sure the message gets +// displayed as an auth banner. +func errDenied(message string) error { + if message == "" { + message = "tailscale: access denied" + } + return &gossh.BannerError{ + Message: message, } } -// errDenied is returned by auth callbacks when a connection is denied by the -// policy. -var errDenied = errors.New("ssh: access denied") +// bannerError creates a gossh.BannerError that will result in the given +// message being displayed to the client. If err != nil, this also logs +// message:error. The contents of err is not leaked to clients in the banner. +func (c *conn) bannerError(message string, err error) error { + if err != nil { + c.logf("%s: %s", message, err) + } + return &gossh.BannerError{ + Err: err, + Message: fmt.Sprintf("tailscale: %s", message), + } +} -// NoClientAuthCallback implements gossh.NoClientAuthCallback and is called by -// the ssh.Server when the client first connects with the "none" -// authentication method. +// clientAuth is responsible for performing client authentication. // -// It is responsible for continuing policy evaluation from BannerCallback (or -// starting it afresh). It returns an error if the policy evaluation fails, or -// if the decision is "reject" -// -// It either returns nil (accept) or errDenied (reject). The errors may be wrapped. -func (c *conn) NoClientAuthCallback(ctx ssh.Context) error { +// If policy evaluation fails, it returns an error. +// If access is denied, it returns an error. +func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { if c.insecureSkipTailscaleAuth { - return nil - } - if err := c.doPolicyAuth(ctx); err != nil { - return err - } - if err := c.isAuthorized(ctx); err != nil { - return err + return &gossh.Permissions{}, nil } - // Let users specify a username ending in +password to force password auth. - // This exists for buggy SSH clients that get confused by success from - // "none" auth. - if strings.HasSuffix(ctx.User(), forcePasswordSuffix) { - c.anyPasswordIsOkay = true - return errors.New("any password please") // not shown to users + if err := c.setInfo(cm); err != nil { + return nil, c.bannerError("failed to get connection info", err) } - return nil -} -func (c *conn) nextAuthMethodCallback(cm gossh.ConnMetadata, prevErrors []error) (nextMethod []string) { - switch { - case c.anyPasswordIsOkay: - nextMethod = append(nextMethod, "password") + action, localUser, acceptEnv, err := c.evaluatePolicy() + if err != nil { + return nil, c.bannerError("failed to evaluate SSH policy", err) } - // The fake "tailscale" method is always appended to next so OpenSSH renders - // that in parens as the final failure. (It also shows up in "ssh -v", etc) - nextMethod = append(nextMethod, "tailscale") - return -} - -// fakePasswordHandler is our implementation of the PasswordHandler hook that -// checks whether the user's password is correct. But we don't actually use -// passwords. This exists only for when the user's username ends in "+password" -// to signal that their SSH client is buggy and gets confused by auth type -// "none" succeeding and they want our SSH server to require a dummy password -// prompt instead. We then accept any password since we've already authenticated -// & authorized them. -func (c *conn) fakePasswordHandler(ctx ssh.Context, password string) bool { - return c.anyPasswordIsOkay -} + c.action0 = action -// doPolicyAuth verifies that conn can proceed. -// It returns nil if the matching policy action is Accept or -// HoldAndDelegate. Otherwise, it returns errDenied. -func (c *conn) doPolicyAuth(ctx ssh.Context) error { - if err := c.setInfo(ctx); err != nil { - c.logf("failed to get conninfo: %v", err) - return errDenied - } - a, localUser, acceptEnv, err := c.evaluatePolicy() - if err != nil { - return fmt.Errorf("%w: %v", errDenied, err) - } - c.action0 = a - c.currentAction = a - c.acceptEnv = acceptEnv - if a.Message != "" { - if err := ctx.SendAuthBanner(a.Message); err != nil { - return fmt.Errorf("SendBanner: %w", err) - } - } - if a.Accept || a.HoldAndDelegate != "" { - if a.Accept { - c.finalAction = a - } + if action.Accept || action.HoldAndDelegate != "" { + // Immediately look up user information for purposes of generating + // hold and delegate URL (if necessary). lu, err := userLookup(localUser) if err != nil { - c.logf("failed to look up %v: %v", localUser, err) - ctx.SendAuthBanner(fmt.Sprintf("failed to look up %v\r\n", localUser)) - return err + return nil, c.bannerError(fmt.Sprintf("failed to look up local user %q ", localUser), err) } gids, err := lu.GroupIds() if err != nil { - c.logf("failed to look up local user's group IDs: %v", err) - return err + return nil, c.bannerError("failed to look up local user's group IDs", err) } c.userGroupIDs = gids c.localUser = lu - return nil + c.acceptEnv = acceptEnv } - if a.Reject { - c.finalAction = a - return errDenied + + for { + switch { + case action.Accept: + metricTerminalAccept.Add(1) + if action.Message != "" { + if err := c.spac.SendAuthBanner(action.Message); err != nil { + return nil, fmt.Errorf("error sending auth welcome message: %w", err) + } + } + c.finalAction = action + return &gossh.Permissions{}, nil + case action.Reject: + metricTerminalReject.Add(1) + c.finalAction = action + return nil, errDenied(action.Message) + case action.HoldAndDelegate != "": + if action.Message != "" { + if err := c.spac.SendAuthBanner(action.Message); err != nil { + return nil, fmt.Errorf("error sending hold and delegate message: %w", err) + } + } + + url := action.HoldAndDelegate + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + metricHolds.Add(1) + url = c.expandDelegateURLLocked(url) + + var err error + action, err = c.fetchSSHAction(ctx, url) + if err != nil { + metricTerminalFetchError.Add(1) + return nil, c.bannerError("failed to fetch next SSH action", fmt.Errorf("fetch failed from %s: %w", url, err)) + } + default: + metricTerminalMalformed.Add(1) + return nil, c.bannerError("reached Action that had neither Accept, Reject, nor HoldAndDelegate", nil) + } } - // Shouldn't get here, but: - return errDenied } // ServerConfig implements ssh.ServerConfigCallback. func (c *conn) ServerConfig(ctx ssh.Context) *gossh.ServerConfig { return &gossh.ServerConfig{ - NoClientAuth: true, // required for the NoClientAuthCallback to run - NextAuthMethodCallback: c.nextAuthMethodCallback, + PreAuthConnCallback: func(spac gossh.ServerPreAuthConn) { + c.spac = spac + }, + NoClientAuth: true, // required for the NoClientAuthCallback to run + NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) { + // First perform client authentication, which can potentially + // involve multiple steps (for example prompting user to log in to + // Tailscale admin panel to confirm identity). + perms, err := c.clientAuth(cm) + if err != nil { + return nil, err + } + + // Authentication succeeded. Buggy SSH clients get confused by + // success from the "none" auth method. As a workaround, let users + // specify a username ending in "+password" to force password auth. + // The actual value of the password doesn't matter. + if strings.HasSuffix(cm.User(), forcePasswordSuffix) { + return nil, &gossh.PartialSuccessError{ + Next: gossh.ServerAuthCallbacks{ + PasswordCallback: func(_ gossh.ConnMetadata, password []byte) (*gossh.Permissions, error) { + return &gossh.Permissions{}, nil + }, + }, + } + } + + return perms, nil + }, } } @@ -399,7 +400,7 @@ func (srv *server) newConn() (*conn, error) { // Stop accepting new connections. // Connections in the auth phase are handled in handleConnPostSSHAuth. // Existing sessions are terminated by Shutdown. - return nil, errDenied + return nil, errDenied("tailscale: server is shutting down") } srv.mu.Unlock() c := &conn{srv: srv} @@ -410,9 +411,6 @@ func (srv *server) newConn() (*conn, error) { Version: "Tailscale", ServerConfigCallback: c.ServerConfig, - NoClientAuthHandler: c.NoClientAuthCallback, - PasswordHandler: c.fakePasswordHandler, - Handler: c.handleSessionPostSSHAuth, LocalPortForwardingCallback: c.mayForwardLocalPortTo, ReversePortForwardingCallback: c.mayReversePortForwardTo, @@ -523,16 +521,16 @@ func toIPPort(a net.Addr) (ipp netip.AddrPort) { return netip.AddrPortFrom(tanetaddr.Unmap(), uint16(ta.Port)) } -// connInfo returns a populated sshConnInfo from the provided arguments, +// connInfo populates the sshConnInfo from the provided arguments, // validating only that they represent a known Tailscale identity. -func (c *conn) setInfo(ctx ssh.Context) error { +func (c *conn) setInfo(cm gossh.ConnMetadata) error { if c.info != nil { return nil } ci := &sshConnInfo{ - sshUser: strings.TrimSuffix(ctx.User(), forcePasswordSuffix), - src: toIPPort(ctx.RemoteAddr()), - dst: toIPPort(ctx.LocalAddr()), + sshUser: strings.TrimSuffix(cm.User(), forcePasswordSuffix), + src: toIPPort(cm.RemoteAddr()), + dst: toIPPort(cm.LocalAddr()), } if !tsaddr.IsTailscaleIP(ci.dst.Addr()) { return fmt.Errorf("tailssh: rejecting non-Tailscale local address %v", ci.dst) @@ -547,7 +545,7 @@ func (c *conn) setInfo(ctx ssh.Context) error { ci.node = node ci.uprof = uprof - c.idH = ctx.SessionID() + c.idH = string(cm.SessionID()) c.info = ci c.logf("handling conn: %v", ci.String()) return nil @@ -594,62 +592,6 @@ func (c *conn) handleSessionPostSSHAuth(s ssh.Session) { ss.run() } -// resolveNextAction starts at c.currentAction and makes it way through the -// action chain one step at a time. An action without a HoldAndDelegate is -// considered the final action. Once a final action is reached, this function -// will keep returning that action. It updates c.currentAction to the next -// action in the chain. When the final action is reached, it also sets -// c.finalAction to the final action. -func (c *conn) resolveNextAction(sctx ssh.Context) (action *tailcfg.SSHAction, err error) { - if c.finalAction != nil || c.finalActionErr != nil { - return c.finalAction, c.finalActionErr - } - - defer func() { - if action != nil { - c.currentAction = action - if action.Accept || action.Reject { - c.finalAction = action - } - } - if err != nil { - c.finalActionErr = err - } - }() - - ctx, cancel := context.WithCancel(sctx) - defer cancel() - - // Loop processing/fetching Actions until one reaches a - // terminal state (Accept, Reject, or invalid Action), or - // until fetchSSHAction times out due to the context being - // done (client disconnect) or its 30 minute timeout passes. - // (Which is a long time for somebody to see login - // instructions and go to a URL to do something.) - action = c.currentAction - if action.Accept || action.Reject { - if action.Reject { - metricTerminalReject.Add(1) - } else { - metricTerminalAccept.Add(1) - } - return action, nil - } - url := action.HoldAndDelegate - if url == "" { - metricTerminalMalformed.Add(1) - return nil, errors.New("reached Action that lacked Accept, Reject, and HoldAndDelegate") - } - metricHolds.Add(1) - url = c.expandDelegateURLLocked(url) - nextAction, err := c.fetchSSHAction(ctx, url) - if err != nil { - metricTerminalFetchError.Add(1) - return nil, fmt.Errorf("fetching SSHAction from %s: %w", url, err) - } - return nextAction, nil -} - func (c *conn) expandDelegateURLLocked(actionURL string) string { nm := c.srv.lb.NetMap() ci := c.info diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 1799d3400..5c4f533b1 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -32,8 +32,8 @@ import ( "github.com/bramvdbogaerde/go-scp" "github.com/google/go-cmp/cmp" "github.com/pkg/sftp" - gossh "github.com/tailscale/golang-x-crypto/ssh" "golang.org/x/crypto/ssh" + gossh "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 9f3616d8c..207136659 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -31,7 +31,7 @@ import ( "testing" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "tailscale.com/ipn/ipnlocal" @@ -805,7 +805,8 @@ func TestSSHAuthFlow(t *testing.T) { state: &localState{ sshEnabled: true, }, - authErr: true, + authErr: true, + wantBanners: []string{"tailscale: failed to evaluate SSH policy"}, }, { name: "accept", diff --git a/tempfork/gliderlabs/ssh/agent.go b/tempfork/gliderlabs/ssh/agent.go index 86a5bce7f..99e84c1e5 100644 --- a/tempfork/gliderlabs/ssh/agent.go +++ b/tempfork/gliderlabs/ssh/agent.go @@ -7,7 +7,7 @@ import ( "path" "sync" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) const ( diff --git a/tempfork/gliderlabs/ssh/context.go b/tempfork/gliderlabs/ssh/context.go index d43de6f09..505a43dbf 100644 --- a/tempfork/gliderlabs/ssh/context.go +++ b/tempfork/gliderlabs/ssh/context.go @@ -6,7 +6,7 @@ import ( "net" "sync" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // contextKey is a value for use with context.WithValue. It's used as @@ -55,8 +55,6 @@ var ( // ContextKeyPublicKey is a context key for use with Contexts in this package. // The associated value will be of type PublicKey. ContextKeyPublicKey = &contextKey{"public-key"} - - ContextKeySendAuthBanner = &contextKey{"send-auth-banner"} ) // Context is a package specific context interface. It exposes connection @@ -91,8 +89,6 @@ type Context interface { // SetValue allows you to easily write new values into the underlying context. SetValue(key, value interface{}) - - SendAuthBanner(banner string) error } type sshContext struct { @@ -121,7 +117,6 @@ func applyConnMetadata(ctx Context, conn gossh.ConnMetadata) { ctx.SetValue(ContextKeyUser, conn.User()) ctx.SetValue(ContextKeyLocalAddr, conn.LocalAddr()) ctx.SetValue(ContextKeyRemoteAddr, conn.RemoteAddr()) - ctx.SetValue(ContextKeySendAuthBanner, conn.SendAuthBanner) } func (ctx *sshContext) SetValue(key, value interface{}) { @@ -158,7 +153,3 @@ func (ctx *sshContext) LocalAddr() net.Addr { func (ctx *sshContext) Permissions() *Permissions { return ctx.Value(ContextKeyPermissions).(*Permissions) } - -func (ctx *sshContext) SendAuthBanner(msg string) error { - return ctx.Value(ContextKeySendAuthBanner).(func(string) error)(msg) -} diff --git a/tempfork/gliderlabs/ssh/options.go b/tempfork/gliderlabs/ssh/options.go index aa87a4f39..29c8ef141 100644 --- a/tempfork/gliderlabs/ssh/options.go +++ b/tempfork/gliderlabs/ssh/options.go @@ -3,7 +3,7 @@ package ssh import ( "os" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // PasswordAuth returns a functional option that sets PasswordHandler on the server. diff --git a/tempfork/gliderlabs/ssh/options_test.go b/tempfork/gliderlabs/ssh/options_test.go index 7cf6f376c..47342b0f6 100644 --- a/tempfork/gliderlabs/ssh/options_test.go +++ b/tempfork/gliderlabs/ssh/options_test.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "testing" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) func newTestSessionWithOptions(t *testing.T, srv *Server, cfg *gossh.ClientConfig, options ...Option) (*gossh.Session, *gossh.Client, func()) { diff --git a/tempfork/gliderlabs/ssh/server.go b/tempfork/gliderlabs/ssh/server.go index 1086a72ca..473e5fbd6 100644 --- a/tempfork/gliderlabs/ssh/server.go +++ b/tempfork/gliderlabs/ssh/server.go @@ -8,7 +8,7 @@ import ( "sync" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // ErrServerClosed is returned by the Server's Serve, ListenAndServe, diff --git a/tempfork/gliderlabs/ssh/session.go b/tempfork/gliderlabs/ssh/session.go index 0a4a21e53..a7a9a3eeb 100644 --- a/tempfork/gliderlabs/ssh/session.go +++ b/tempfork/gliderlabs/ssh/session.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/anmitsu/go-shlex" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // Session provides access to information about an SSH session and methods diff --git a/tempfork/gliderlabs/ssh/session_test.go b/tempfork/gliderlabs/ssh/session_test.go index a60be5ec1..fe61a9d96 100644 --- a/tempfork/gliderlabs/ssh/session_test.go +++ b/tempfork/gliderlabs/ssh/session_test.go @@ -9,7 +9,7 @@ import ( "net" "testing" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) func (srv *Server) serveOnce(l net.Listener) error { diff --git a/tempfork/gliderlabs/ssh/ssh.go b/tempfork/gliderlabs/ssh/ssh.go index 644cb257d..54bd31ec2 100644 --- a/tempfork/gliderlabs/ssh/ssh.go +++ b/tempfork/gliderlabs/ssh/ssh.go @@ -4,7 +4,7 @@ import ( "crypto/subtle" "net" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) type Signal string @@ -105,7 +105,7 @@ type Pty struct { // requested by the client as part of the pty-req. These are outlined as // part of https://datatracker.ietf.org/doc/html/rfc4254#section-8. // - // The opcodes are defined as constants in github.com/tailscale/golang-x-crypto/ssh (VINTR,VQUIT,etc.). + // The opcodes are defined as constants in golang.org/x/crypto/ssh (VINTR,VQUIT,etc.). // Boolean opcodes have values 0 or 1. Modes gossh.TerminalModes } diff --git a/tempfork/gliderlabs/ssh/tcpip.go b/tempfork/gliderlabs/ssh/tcpip.go index 056a0c734..335fda657 100644 --- a/tempfork/gliderlabs/ssh/tcpip.go +++ b/tempfork/gliderlabs/ssh/tcpip.go @@ -7,7 +7,7 @@ import ( "strconv" "sync" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) const ( diff --git a/tempfork/gliderlabs/ssh/tcpip_test.go b/tempfork/gliderlabs/ssh/tcpip_test.go index 118b5d53a..b3ba60a9b 100644 --- a/tempfork/gliderlabs/ssh/tcpip_test.go +++ b/tempfork/gliderlabs/ssh/tcpip_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) var sampleServerResponse = []byte("Hello world") diff --git a/tempfork/gliderlabs/ssh/util.go b/tempfork/gliderlabs/ssh/util.go index e3b5716a3..3bee06dcd 100644 --- a/tempfork/gliderlabs/ssh/util.go +++ b/tempfork/gliderlabs/ssh/util.go @@ -5,7 +5,7 @@ import ( "crypto/rsa" "encoding/binary" - "github.com/tailscale/golang-x-crypto/ssh" + "golang.org/x/crypto/ssh" ) func generateSigner() (ssh.Signer, error) { diff --git a/tempfork/gliderlabs/ssh/wrap.go b/tempfork/gliderlabs/ssh/wrap.go index 17867d751..d1f2b161e 100644 --- a/tempfork/gliderlabs/ssh/wrap.go +++ b/tempfork/gliderlabs/ssh/wrap.go @@ -1,6 +1,6 @@ package ssh -import gossh "github.com/tailscale/golang-x-crypto/ssh" +import gossh "golang.org/x/crypto/ssh" // PublicKey is an abstraction of different types of public keys. type PublicKey interface { From f1514a944a167fde05888273440342598112050c Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 28 Jan 2025 14:35:24 -0700 Subject: [PATCH 0418/1708] go.toolchain.rev: bump from Go 1.23.3 to 1.23.5 (#14814) Update Go toolchain to 1.23.5. Updates #cleanup Signed-off-by: Mario Minardi --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index e90440d41..900450dca 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -161c3b79ed91039e65eb148f2547dea6b91e2247 +64f7854906c3121fe3ada3d05f1936d3420d6ffa From 0aa54151f290df4675714a338c0e067bf5fd050c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:03:13 -0700 Subject: [PATCH 0419/1708] .github: Bump actions/checkout from 3.6.0 to 4.2.2 (#14139) Bumps [actions/checkout](https://github.com/actions/checkout) from 3.6.0 to 4.2.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3.6.0...11bd71901bbe5b1630ceea73d27597364c9af683) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/checklocks.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker-file-build.yml | 2 +- .github/workflows/flakehub-publish-tagged.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/govulncheck.yml | 2 +- .github/workflows/installer.yml | 6 +--- .github/workflows/kubemanifests.yaml | 2 +- .github/workflows/ssh-integrationtest.yml | 2 +- .github/workflows/test.yml | 34 +++++++++---------- .github/workflows/update-flake.yml | 2 +- .../workflows/update-webclient-prebuilt.yml | 2 +- .github/workflows/webclient.yml | 2 +- 13 files changed, 29 insertions(+), 33 deletions(-) diff --git a/.github/workflows/checklocks.yml b/.github/workflows/checklocks.yml index 064797c88..7464524ce 100644 --- a/.github/workflows/checklocks.yml +++ b/.github/workflows/checklocks.yml @@ -18,7 +18,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build checklocks run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 928240c53..425175218 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 # Install a more recent Go that understands modern go.mod content. - name: Install Go diff --git a/.github/workflows/docker-file-build.yml b/.github/workflows/docker-file-build.yml index c53575572..04611e172 100644 --- a/.github/workflows/docker-file-build.yml +++ b/.github/workflows/docker-file-build.yml @@ -10,6 +10,6 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: "Build Docker image" run: docker build . diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 60fdba91c..9ff12c6a3 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -17,7 +17,7 @@ jobs: id-token: "write" contents: "read" steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - uses: "DeterminateSystems/nix-installer-action@main" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 58e611591..b9a9eb33d 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -23,7 +23,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 989e55fb1..47d278e1c 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code into the Go module directory - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install govulncheck run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 1c39e4d74..adc4a0a60 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -36,7 +36,6 @@ jobs: - "ubuntu:24.04" - "elementary/docker:stable" - "elementary/docker:unstable" - - "parrotsec/core:lts-amd64" - "parrotsec/core:latest" - "kalilinux/kali-rolling" - "kalilinux/kali-dev" @@ -92,10 +91,7 @@ jobs: || contains(matrix.image, 'parrotsec') || contains(matrix.image, 'kalilinux') - name: checkout - # We cannot use v4, as it requires a newer glibc version than some of the - # tested images provide. See - # https://github.com/actions/checkout/issues/1487 - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: run installer run: scripts/installer.sh # Package installation can fail in docker because systemd is not running diff --git a/.github/workflows/kubemanifests.yaml b/.github/workflows/kubemanifests.yaml index f943ccb52..5b100a276 100644 --- a/.github/workflows/kubemanifests.yaml +++ b/.github/workflows/kubemanifests.yaml @@ -17,7 +17,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build and lint Helm chart run: | eval `./tool/go run ./cmd/mkversion` diff --git a/.github/workflows/ssh-integrationtest.yml b/.github/workflows/ssh-integrationtest.yml index a82696307..829d10ab8 100644 --- a/.github/workflows/ssh-integrationtest.yml +++ b/.github/workflows/ssh-integrationtest.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Run SSH integration tests run: | make sshintegrationtest \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a6ef6c36e..a368afc67 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: - shard: '4/4' steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: build test wrapper run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: integration tests as root @@ -78,7 +78,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: @@ -150,7 +150,7 @@ jobs: runs-on: windows-2022 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 @@ -190,7 +190,7 @@ jobs: options: --privileged steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: chown run: chown -R $(id -u):$(id -g) $PWD - name: privileged tests @@ -202,7 +202,7 @@ jobs: if: github.repository == 'tailscale/tailscale' steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Run VM tests run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004 env: @@ -214,7 +214,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: build all run: ./tool/go install -race ./cmd/... - name: build tests @@ -258,7 +258,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: @@ -295,7 +295,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: build some run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient env: @@ -323,7 +323,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: @@ -356,7 +356,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 # Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed # and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch # some Android breakages early. @@ -371,7 +371,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: @@ -405,7 +405,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: test tailscale_go run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/... @@ -477,7 +477,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: check depaware run: | export PATH=$(./tool/go env GOROOT)/bin:$PATH @@ -487,7 +487,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: check that 'go generate' is clean run: | pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') @@ -500,7 +500,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: check that 'go mod tidy' is clean run: | ./tool/go mod tidy @@ -512,7 +512,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: check licenses run: ./scripts/check_license_headers.sh . @@ -528,7 +528,7 @@ jobs: goarch: "386" steps: - name: checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: install staticcheck run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck - name: run staticcheck diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 151ed6bab..4d9db490b 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Run update-flakes run: ./update-flake.sh diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 11665460b..f2d1e65a5 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Run go get run: | diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index 9afb7730d..b1cfb7620 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install deps run: ./tool/yarn --cwd client/web - name: Run lint From eb299302ba454e8b7e2fc65a972d9710090bbea8 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 24 Jan 2025 13:26:08 -0500 Subject: [PATCH 0420/1708] types/views: fix SliceEqualAnyOrderFunc short optimization This was flagged by @tkhattra on the merge commit; thanks! Updates tailscale/corp#25479 Signed-off-by: Andrew Dunham Change-Id: Ia8045640f02bd4dcc0fe7433249fd72ac6b9cf52 --- types/views/views.go | 38 ++++++++++++++++++++++++++++++++------ types/views/views_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/types/views/views.go b/types/views/views.go index d8acf27ce..ae776c3b2 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -386,14 +386,32 @@ func SliceEqualAnyOrderFunc[T any, V comparable](a, b Slice[T], cmp func(T) V) b // do the quadratic thing. We can also only check the items between // diffStart and the end. nRemain := a.Len() - diffStart - if nRemain <= 5 { - maxLen := a.Len() // same as b.Len() - for i := diffStart; i < maxLen; i++ { - av := cmp(a.At(i)) + const shortOptLen = 5 + if nRemain <= shortOptLen { + // These track which elements in a and b have been matched, so + // that we don't treat arrays with differing number of + // duplicate elements as equal (e.g. [1, 1, 2] and [1, 2, 2]). + var aMatched, bMatched [shortOptLen]bool + + // Compare each element in a to each element in b + for i := range nRemain { + av := cmp(a.At(i + diffStart)) found := false - for j := diffStart; j < maxLen; j++ { - bv := cmp(b.At(j)) + for j := range nRemain { + // Skip elements in b that have already been + // used to match an item in a. + if bMatched[j] { + continue + } + + bv := cmp(b.At(j + diffStart)) if av == bv { + // Mark these elements as already + // matched, so that a future loop + // iteration (of a duplicate element) + // doesn't match it again. + aMatched[i] = true + bMatched[j] = true found = true break } @@ -402,6 +420,14 @@ func SliceEqualAnyOrderFunc[T any, V comparable](a, b Slice[T], cmp func(T) V) b return false } } + + // Verify all elements were matched exactly once. + for i := range nRemain { + if !aMatched[i] || !bMatched[i] { + return false + } + } + return true } diff --git a/types/views/views_test.go b/types/views/views_test.go index 70e021aa4..7837a89d6 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -197,6 +197,38 @@ func TestSliceEqualAnyOrderFunc(t *testing.T) { // Long difference; past the quadratic limit longDiff := ncFrom("b", "a", "c", "d", "e", "f", "g", "h", "i", "k") // differs at end c.Check(SliceEqualAnyOrderFunc(longSlice, longDiff, cmp), qt.Equals, false) + + // The short slice optimization had a bug where it wouldn't handle + // duplicate elements; test various cases here driven by code coverage. + shortTestCases := []struct { + name string + s1, s2 Slice[nc] + want bool + }{ + { + name: "duplicates_same_length", + s1: ncFrom("a", "a", "b"), + s2: ncFrom("a", "b", "b"), + want: false, + }, + { + name: "duplicates_different_matched", + s1: ncFrom("x", "y", "a", "a", "b"), + s2: ncFrom("x", "y", "b", "a", "a"), + want: true, + }, + { + name: "item_in_a_not_b", + s1: ncFrom("x", "y", "a", "b", "c"), + s2: ncFrom("x", "y", "b", "c", "q"), + want: false, + }, + } + for _, tc := range shortTestCases { + t.Run("short_"+tc.name, func(t *testing.T) { + c.Check(SliceEqualAnyOrderFunc(tc.s1, tc.s2, cmp), qt.Equals, tc.want) + }) + } } func TestSliceEqual(t *testing.T) { From b406f209c380f4c20cca5709a01f66275143b867 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 29 Jan 2025 09:35:50 +0200 Subject: [PATCH 0421/1708] cmd/{k8s-operator,containerboot},kube: ensure egress ProxyGroup proxies don't terminate while cluster traffic is still routed to them (#14436) cmd/{containerboot,k8s-operator},kube: add preshutdown hook for egress PG proxies This change is part of work towards minimizing downtime during update rollouts of egress ProxyGroup replicas. This change: - updates the containerboot health check logic to return Pod IP in headers, if set - always runs the health check for egress PG proxies - updates ClusterIP Services created for PG egress endpoints to include the health check endpoint - implements preshutdown endpoint in proxies. The preshutdown endpoint logic waits till, for all currently configured egress services, the ClusterIP Service health check endpoint is no longer returned by the shutting-down Pod (by looking at the new Pod IP header). - ensures that kubelet is configured to call the preshutdown endpoint This reduces the possibility that, as replicas are terminated during an update, a replica gets terminated to which cluster traffic is still being routed via the ClusterIP Service because kube proxy has not yet updated routig rules. This is not a perfect check as in practice, it only checks that the kube proxy on the node on which the proxy runs has updated rules. However, overall this might be good enough. The preshutdown logic is disabled if users have configured a custom health check port via TS_LOCAL_ADDR_PORT env var. This change throws a warnign if so and in future setting of that env var for operator proxies might be disallowed (as users shouldn't need to configure this for a Pod directly). This is backwards compatible with earlier proxy versions. Updates tailscale/tailscale#14326 Signed-off-by: Irbe Krumina --- cmd/containerboot/healthz.go | 13 +- cmd/containerboot/main.go | 22 ++- cmd/containerboot/main_test.go | 127 ++++++++---- cmd/containerboot/services.go | 212 +++++++++++++++++++-- cmd/containerboot/services_test.go | 149 +++++++++++++++ cmd/containerboot/settings.go | 34 ++-- cmd/k8s-operator/egress-eps.go | 13 +- cmd/k8s-operator/egress-services.go | 94 +++++++-- cmd/k8s-operator/egress-services_test.go | 54 +++--- cmd/k8s-operator/operator.go | 10 +- cmd/k8s-operator/proxygroup.go | 32 +++- cmd/k8s-operator/proxygroup_specs.go | 75 +++++++- cmd/k8s-operator/proxygroup_test.go | 99 ++++++++-- cmd/k8s-operator/sts.go | 3 + k8s-operator/conditions.go | 10 - kube/egressservices/egressservices.go | 13 +- kube/egressservices/egressservices_test.go | 2 +- kube/kubetypes/types.go | 5 + 18 files changed, 791 insertions(+), 176 deletions(-) diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go index 895290733..6d03bd6d3 100644 --- a/cmd/containerboot/healthz.go +++ b/cmd/containerboot/healthz.go @@ -6,9 +6,12 @@ package main import ( + "fmt" "log" "net/http" "sync" + + "tailscale.com/kube/kubetypes" ) // healthz is a simple health check server, if enabled it returns 200 OK if @@ -17,6 +20,7 @@ import ( type healthz struct { sync.Mutex hasAddrs bool + podIPv4 string } func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -24,7 +28,10 @@ func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer h.Unlock() if h.hasAddrs { - w.Write([]byte("ok")) + w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) + if _, err := w.Write([]byte("ok")); err != nil { + http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) + } } else { http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) } @@ -43,8 +50,8 @@ func (h *healthz) update(healthy bool) { // healthHandlers registers a simple health handler at /healthz. // A containerized tailscale instance is considered healthy if // it has at least one tailnet IP address. -func healthHandlers(mux *http.ServeMux) *healthz { - h := &healthz{} +func healthHandlers(mux *http.ServeMux, podIPv4 string) *healthz { + h := &healthz{podIPv4: podIPv4} mux.Handle("GET /healthz", h) return h } diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 895be108b..0aca27f5f 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -191,17 +191,18 @@ func main() { defer killTailscaled() var healthCheck *healthz + ep := &egressProxy{} if cfg.HealthCheckAddrPort != "" { mux := http.NewServeMux() log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort) - healthCheck = healthHandlers(mux) + healthCheck = healthHandlers(mux, cfg.PodIPv4) close := runHTTPServer(mux, cfg.HealthCheckAddrPort) defer close() } - if cfg.localMetricsEnabled() || cfg.localHealthEnabled() { + if cfg.localMetricsEnabled() || cfg.localHealthEnabled() || cfg.egressSvcsTerminateEPEnabled() { mux := http.NewServeMux() if cfg.localMetricsEnabled() { @@ -211,7 +212,11 @@ func main() { if cfg.localHealthEnabled() { log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort) - healthCheck = healthHandlers(mux) + healthCheck = healthHandlers(mux, cfg.PodIPv4) + } + if cfg.EgressProxiesCfgPath != "" { + log.Printf("Running preshutdown hook at %s%s", cfg.LocalAddrPort, kubetypes.EgessServicesPreshutdownEP) + ep.registerHandlers(mux) } close := runHTTPServer(mux, cfg.LocalAddrPort) @@ -639,20 +644,21 @@ runLoop: // will then continuously monitor the config file and netmap updates and // reconfigure the firewall rules as needed. If any of its operations fail, it // will crash this node. - if cfg.EgressSvcsCfgPath != "" { - log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressSvcsCfgPath) + if cfg.EgressProxiesCfgPath != "" { + log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressProxiesCfgPath) egressSvcsNotify = make(chan ipn.Notify) - ep := egressProxy{ - cfgPath: cfg.EgressSvcsCfgPath, + opts := egressProxyRunOpts{ + cfgPath: cfg.EgressProxiesCfgPath, nfr: nfr, kc: kc, + tsClient: client, stateSecret: cfg.KubeSecret, netmapChan: egressSvcsNotify, podIPv4: cfg.PodIPv4, tailnetAddrs: addrs, } go func() { - if err := ep.run(ctx, n); err != nil { + if err := ep.run(ctx, n, opts); err != nil { egressSvcsErrorChan <- err } }() diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index dacfb5bc6..c8066f2c1 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -32,6 +32,8 @@ import ( "golang.org/x/sys/unix" "tailscale.com/ipn" "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/netmap" @@ -54,20 +56,9 @@ func TestContainerBoot(t *testing.T) { defer kube.Close() tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} - tailscaledConfBytes, err := json.Marshal(tailscaledConf) - if err != nil { - t.Fatalf("error unmarshaling tailscaled config: %v", err) - } serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} - serveConfBytes, err := json.Marshal(serveConf) - if err != nil { - t.Fatalf("error unmarshaling serve config: %v", err) - } - egressSvcsCfg := egressservices.Configs{"foo": {TailnetTarget: egressservices.TailnetTarget{FQDN: "foo.tailnetxyx.ts.net"}}} - egressSvcsCfgBytes, err := json.Marshal(egressSvcsCfg) - if err != nil { - t.Fatalf("error unmarshaling egress services config: %v", err) - } + egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net") + egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net") dirs := []string{ "var/lib", @@ -84,16 +75,17 @@ func TestContainerBoot(t *testing.T) { } } files := map[string][]byte{ - "usr/bin/tailscaled": fakeTailscaled, - "usr/bin/tailscale": fakeTailscale, - "usr/bin/iptables": fakeTailscale, - "usr/bin/ip6tables": fakeTailscale, - "dev/net/tun": []byte(""), - "proc/sys/net/ipv4/ip_forward": []byte("0"), - "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), - "etc/tailscaled/cap-95.hujson": tailscaledConfBytes, - "etc/tailscaled/serve-config.json": serveConfBytes, - "etc/tailscaled/egress-services-config.json": egressSvcsCfgBytes, + "usr/bin/tailscaled": fakeTailscaled, + "usr/bin/tailscale": fakeTailscale, + "usr/bin/iptables": fakeTailscale, + "usr/bin/ip6tables": fakeTailscale, + "dev/net/tun": []byte(""), + "proc/sys/net/ipv4/ip_forward": []byte("0"), + "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), + "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), + "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), + filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg), + filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"), } resetFiles := func() { for path, content := range files { @@ -132,6 +124,9 @@ func TestContainerBoot(t *testing.T) { healthURL := func(port int) string { return fmt.Sprintf("http://127.0.0.1:%d/healthz", port) } + egressSvcTerminateURL := func(port int) string { + return fmt.Sprintf("http://127.0.0.1:%d%s", port, kubetypes.EgessServicesPreshutdownEP) + } capver := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion) @@ -896,9 +891,10 @@ func TestContainerBoot(t *testing.T) { { Name: "egress_svcs_config_kube", Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - "TS_EGRESS_SERVICES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled/egress-services-config.json"), + "KUBERNETES_SERVICE_HOST": kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, + "TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"), + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), }, KubeSecret: map[string]string{ "authkey": "tskey-key", @@ -912,28 +908,35 @@ func TestContainerBoot(t *testing.T) { WantKubeSecret: map[string]string{ "authkey": "tskey-key", }, + EndpointStatuses: map[string]int{ + egressSvcTerminateURL(localAddrPort): 200, + }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ + "egress-services": mustBase64(t, egressStatus), "authkey": "tskey-key", "device_fqdn": "test-node.test.ts.net", "device_id": "myID", "device_ips": `["100.64.0.1"]`, "tailscale_capver": capver, }, + EndpointStatuses: map[string]int{ + egressSvcTerminateURL(localAddrPort): 200, + }, }, }, }, { Name: "egress_svcs_config_no_kube", Env: map[string]string{ - "TS_EGRESS_SERVICES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled/egress-services-config.json"), - "TS_AUTHKEY": "tskey-key", + "TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"), + "TS_AUTHKEY": "tskey-key", }, Phases: []phase{ { - WantFatalLog: "TS_EGRESS_SERVICES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", + WantFatalLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", }, }, }, @@ -1394,13 +1397,31 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } for _, op := range req { - if op.Op != "remove" { + if op.Op == "remove" { + if !strings.HasPrefix(op.Path, "/data/") { + panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) + } + delete(k.secret, strings.TrimPrefix(op.Path, "/data/")) + } else if op.Op == "replace" { + path, ok := strings.CutPrefix(op.Path, "/data/") + if !ok { + panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) + } + req := make([]kubeclient.JSONPatch, 0) + if err := json.Unmarshal(bs, &req); err != nil { + panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) + } + + for _, patch := range req { + val, ok := patch.Value.(string) + if !ok { + panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", patch.Value)) + } + k.secret[path] = val + } + } else { panic(fmt.Sprintf("unsupported json-patch op %q", op.Op)) } - if !strings.HasPrefix(op.Path, "/data/") { - panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) - } - delete(k.secret, strings.TrimPrefix(op.Path, "/data/")) } case "application/strategic-merge-patch+json": req := struct { @@ -1419,3 +1440,41 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("unhandled HTTP method %q", r.Method)) } } + +func mustBase64(t *testing.T, v any) string { + b := mustJSON(t, v) + s := base64.StdEncoding.WithPadding('=').EncodeToString(b) + return s +} + +func mustJSON(t *testing.T, v any) []byte { + b, err := json.Marshal(v) + if err != nil { + t.Fatalf("error converting %v to json: %v", v, err) + } + return b +} + +// egress services status given one named tailnet target specified by FQDN. As written by the proxy to its state Secret. +func egressSvcStatus(name, fqdn string) egressservices.Status { + return egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{ + name: { + TailnetTarget: egressservices.TailnetTarget{ + FQDN: fqdn, + }, + }, + }, + } +} + +// egress config given one named tailnet target specified by FQDN. +func egressSvcConfig(name, fqdn string) egressservices.Configs { + return egressservices.Configs{ + name: egressservices.Config{ + TailnetTarget: egressservices.TailnetTarget{ + FQDN: fqdn, + }, + }, + } +} diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go index aed00250d..177cb2d50 100644 --- a/cmd/containerboot/services.go +++ b/cmd/containerboot/services.go @@ -11,18 +11,24 @@ import ( "errors" "fmt" "log" + "net/http" "net/netip" "os" "path/filepath" "reflect" + "strconv" "strings" "time" "github.com/fsnotify/fsnotify" + "tailscale.com/client/tailscale" "tailscale.com/ipn" "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" + "tailscale.com/syncs" "tailscale.com/tailcfg" + "tailscale.com/util/httpm" "tailscale.com/util/linuxfw" "tailscale.com/util/mak" ) @@ -37,13 +43,15 @@ const tailscaleTunInterface = "tailscale0" // egressProxy knows how to configure firewall rules to route cluster traffic to // one or more tailnet services. type egressProxy struct { - cfgPath string // path to egress service config file + cfgPath string // path to a directory with egress services config files nfr linuxfw.NetfilterRunner // never nil kc kubeclient.Client // never nil stateSecret string // name of the kube state Secret + tsClient *tailscale.LocalClient // never nil + netmapChan chan ipn.Notify // chan to receive netmap updates on podIPv4 string // never empty string, currently only IPv4 is supported @@ -55,15 +63,29 @@ type egressProxy struct { // memory at all. targetFQDNs map[string][]netip.Prefix - // used to configure firewall rules. - tailnetAddrs []netip.Prefix + tailnetAddrs []netip.Prefix // tailnet IPs of this tailnet device + + // shortSleep is the backoff sleep between healthcheck endpoint calls - can be overridden in tests. + shortSleep time.Duration + // longSleep is the time to sleep after the routing rules are updated to increase the chance that kube + // proxies on all nodes have updated their routing configuration. It can be configured to 0 in + // tests. + longSleep time.Duration + // client is a client that can send HTTP requests. + client httpClient +} + +// httpClient is a client that can send HTTP requests and can be mocked in tests. +type httpClient interface { + Do(*http.Request) (*http.Response, error) } // run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when: // - the mounted egress config has changed // - the proxy's tailnet IP addresses have changed // - tailnet IPs have changed for any backend targets specified by tailnet FQDN -func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error { +func (ep *egressProxy) run(ctx context.Context, n ipn.Notify, opts egressProxyRunOpts) error { + ep.configure(opts) var tickChan <-chan time.Time var eventChan <-chan fsnotify.Event // TODO (irbekrm): take a look if this can be pulled into a single func @@ -75,7 +97,7 @@ func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error { tickChan = ticker.C } else { defer w.Close() - if err := w.Add(filepath.Dir(ep.cfgPath)); err != nil { + if err := w.Add(ep.cfgPath); err != nil { return fmt.Errorf("failed to add fsnotify watch: %w", err) } eventChan = w.Events @@ -85,28 +107,52 @@ func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error { return err } for { - var err error select { case <-ctx.Done(): return nil case <-tickChan: - err = ep.sync(ctx, n) + log.Printf("periodic sync, ensuring firewall config is up to date...") case <-eventChan: log.Printf("config file change detected, ensuring firewall config is up to date...") - err = ep.sync(ctx, n) case n = <-ep.netmapChan: shouldResync := ep.shouldResync(n) - if shouldResync { - log.Printf("netmap change detected, ensuring firewall config is up to date...") - err = ep.sync(ctx, n) + if !shouldResync { + continue } + log.Printf("netmap change detected, ensuring firewall config is up to date...") } - if err != nil { + if err := ep.sync(ctx, n); err != nil { return fmt.Errorf("error syncing egress service config: %w", err) } } } +type egressProxyRunOpts struct { + cfgPath string + nfr linuxfw.NetfilterRunner + kc kubeclient.Client + tsClient *tailscale.LocalClient + stateSecret string + netmapChan chan ipn.Notify + podIPv4 string + tailnetAddrs []netip.Prefix +} + +// applyOpts configures egress proxy using the provided options. +func (ep *egressProxy) configure(opts egressProxyRunOpts) { + ep.cfgPath = opts.cfgPath + ep.nfr = opts.nfr + ep.kc = opts.kc + ep.tsClient = opts.tsClient + ep.stateSecret = opts.stateSecret + ep.netmapChan = opts.netmapChan + ep.podIPv4 = opts.podIPv4 + ep.tailnetAddrs = opts.tailnetAddrs + ep.client = &http.Client{} // default HTTP client + ep.shortSleep = time.Second + ep.longSleep = time.Second * 10 +} + // sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if // any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current // firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such @@ -327,7 +373,8 @@ func (ep *egressProxy) deleteUnnecessaryServices(cfgs *egressservices.Configs, s // getConfigs gets the mounted egress service configuration. func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) { - j, err := os.ReadFile(ep.cfgPath) + svcsCfg := filepath.Join(ep.cfgPath, egressservices.KeyEgressServices) + j, err := os.ReadFile(svcsCfg) if os.IsNotExist(err) { return nil, nil } @@ -569,3 +616,142 @@ func servicesStatusIsEqual(st, st1 *egressservices.Status) bool { st1.PodIPv4 = "" return reflect.DeepEqual(*st, *st1) } + +// registerHandlers adds a new handler to the provided ServeMux that can be called as a Kubernetes prestop hook to +// delay shutdown till it's safe to do so. +func (ep *egressProxy) registerHandlers(mux *http.ServeMux) { + mux.Handle(fmt.Sprintf("GET %s", kubetypes.EgessServicesPreshutdownEP), ep) +} + +// ServeHTTP serves /internal-egress-services-preshutdown endpoint, when it receives a request, it periodically polls +// the configured health check endpoint for each egress service till it the health check endpoint no longer hits this +// proxy Pod. It uses the Pod-IPv4 header to verify if health check response is received from this Pod. +func (ep *egressProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + cfgs, err := ep.getConfigs() + if err != nil { + http.Error(w, fmt.Sprintf("error retrieving egress services configs: %v", err), http.StatusInternalServerError) + return + } + if cfgs == nil { + if _, err := w.Write([]byte("safe to terminate")); err != nil { + http.Error(w, fmt.Sprintf("error writing termination status: %v", err), http.StatusInternalServerError) + return + } + } + hp, err := ep.getHEPPings() + if err != nil { + http.Error(w, fmt.Sprintf("error determining the number of times health check endpoint should be pinged: %v", err), http.StatusInternalServerError) + return + } + ep.waitTillSafeToShutdown(r.Context(), cfgs, hp) +} + +// waitTillSafeToShutdown looks up all egress targets configured to be proxied via this instance and, for each target +// whose configuration includes a healthcheck endpoint, pings the endpoint till none of the responses +// are returned by this instance or till the HTTP request times out. In practice, the endpoint will be a Kubernetes Service for whom one of the backends +// would normally be this Pod. When this Pod is being deleted, the operator should have removed it from the Service +// backends and eventually kube proxy routing rules should be updated to no longer route traffic for the Service to this +// Pod. +func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egressservices.Configs, hp int) { + if cfgs == nil || len(*cfgs) == 0 { // avoid sleeping if no services are configured + return + } + log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...") + wg := syncs.WaitGroup{} + + for s, cfg := range *cfgs { + hep := cfg.HealthCheckEndpoint + if hep == "" { + log.Printf("Tailnet target %q does not have a cluster healthcheck specified, unable to verify if cluster traffic for the target is still routed via this Pod", s) + continue + } + svc := s + wg.Go(func() { + log.Printf("Ensuring that cluster traffic is no longer routed to %q via this Pod...", svc) + for { + if ctx.Err() != nil { // kubelet's HTTP request timeout + log.Printf("Cluster traffic for %s did not stop being routed to this Pod.", svc) + return + } + found, err := lookupPodRoute(ctx, hep, ep.podIPv4, hp, ep.client) + if err != nil { + log.Printf("unable to reach endpoint %q, assuming the routing rules for this Pod have been deleted: %v", hep, err) + break + } + if !found { + log.Printf("service %q is no longer routed through this Pod", svc) + break + } + log.Printf("service %q is still routed through this Pod, waiting...", svc) + time.Sleep(ep.shortSleep) + } + }) + } + wg.Wait() + // The check above really only checked that the routing rules are updated on this node. Sleep for a bit to + // ensure that the routing rules are updated on other nodes. TODO(irbekrm): this may or may not be good enough. + // If it's not good enough, we'd probably want to do something more complex, where the proxies check each other. + log.Printf("Sleeping for %s before shutdown to ensure that kube proxies on all nodes have updated routing configuration", ep.longSleep) + time.Sleep(ep.longSleep) +} + +// lookupPodRoute calls the healthcheck endpoint repeat times and returns true if the endpoint returns with the podIP +// header at least once. +func lookupPodRoute(ctx context.Context, hep, podIP string, repeat int, client httpClient) (bool, error) { + for range repeat { + f, err := lookup(ctx, hep, podIP, client) + if err != nil { + return false, err + } + if f { + return true, nil + } + } + return false, nil +} + +// lookup calls the healthcheck endpoint and returns true if the response contains the podIP header. +func lookup(ctx context.Context, hep, podIP string, client httpClient) (bool, error) { + req, err := http.NewRequestWithContext(ctx, httpm.GET, hep, nil) + if err != nil { + return false, fmt.Errorf("error creating new HTTP request: %v", err) + } + + // Close the TCP connection to ensure that the next request is routed to a different backend. + req.Close = true + + resp, err := client.Do(req) + if err != nil { + log.Printf("Endpoint %q can not be reached: %v, likely because there are no (more) healthy backends", hep, err) + return true, nil + } + defer resp.Body.Close() + gotIP := resp.Header.Get(kubetypes.PodIPv4Header) + return strings.EqualFold(podIP, gotIP), nil +} + +// getHEPPings gets the number of pings that should be sent to a health check endpoint to ensure that each configured +// backend is hit. This assumes that a health check endpoint is a Kubernetes Service and traffic to backend Pods is +// round robin load balanced. +func (ep *egressProxy) getHEPPings() (int, error) { + hepPingsPath := filepath.Join(ep.cfgPath, egressservices.KeyHEPPings) + j, err := os.ReadFile(hepPingsPath) + if os.IsNotExist(err) { + return 0, nil + } + if err != nil { + return -1, err + } + if len(j) == 0 || string(j) == "" { + return 0, nil + } + hp, err := strconv.Atoi(string(j)) + if err != nil { + return -1, fmt.Errorf("error parsing hep pings as int: %v", err) + } + if hp < 0 { + log.Printf("[unexpected] hep pings is negative: %d", hp) + return 0, nil + } + return hp, nil +} diff --git a/cmd/containerboot/services_test.go b/cmd/containerboot/services_test.go index 46f6db1cf..724626b07 100644 --- a/cmd/containerboot/services_test.go +++ b/cmd/containerboot/services_test.go @@ -6,11 +6,18 @@ package main import ( + "context" + "fmt" + "io" + "net/http" "net/netip" "reflect" + "strings" + "sync" "testing" "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" ) func Test_updatesForSvc(t *testing.T) { @@ -173,3 +180,145 @@ func Test_updatesForSvc(t *testing.T) { }) } } + +// A failure of this test will most likely look like a timeout. +func TestWaitTillSafeToShutdown(t *testing.T) { + podIP := "10.0.0.1" + anotherIP := "10.0.0.2" + + tests := []struct { + name string + // services is a map of service name to the number of calls to make to the healthcheck endpoint before + // returning a response that does NOT contain this Pod's IP in headers. + services map[string]int + replicas int + healthCheckSet bool + }{ + { + name: "no_configs", + }, + { + name: "one_service_immediately_safe_to_shutdown", + services: map[string]int{ + "svc1": 0, + }, + replicas: 2, + healthCheckSet: true, + }, + { + name: "multiple_services_immediately_safe_to_shutdown", + services: map[string]int{ + "svc1": 0, + "svc2": 0, + "svc3": 0, + }, + replicas: 2, + healthCheckSet: true, + }, + { + name: "multiple_services_no_healthcheck_endpoints", + services: map[string]int{ + "svc1": 0, + "svc2": 0, + "svc3": 0, + }, + replicas: 2, + }, + { + name: "one_service_eventually_safe_to_shutdown", + services: map[string]int{ + "svc1": 3, // After 3 calls to health check endpoint, no longer returns this Pod's IP + }, + replicas: 2, + healthCheckSet: true, + }, + { + name: "multiple_services_eventually_safe_to_shutdown", + services: map[string]int{ + "svc1": 1, // After 1 call to health check endpoint, no longer returns this Pod's IP + "svc2": 3, // After 3 calls to health check endpoint, no longer returns this Pod's IP + "svc3": 5, // After 5 calls to the health check endpoint, no longer returns this Pod's IP + }, + replicas: 2, + healthCheckSet: true, + }, + { + name: "multiple_services_eventually_safe_to_shutdown_with_higher_replica_count", + services: map[string]int{ + "svc1": 7, + "svc2": 10, + }, + replicas: 5, + healthCheckSet: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfgs := &egressservices.Configs{} + switches := make(map[string]int) + + for svc, callsToSwitch := range tt.services { + endpoint := fmt.Sprintf("http://%s.local", svc) + if tt.healthCheckSet { + (*cfgs)[svc] = egressservices.Config{ + HealthCheckEndpoint: endpoint, + } + } + switches[endpoint] = callsToSwitch + } + + ep := &egressProxy{ + podIPv4: podIP, + client: &mockHTTPClient{ + podIP: podIP, + anotherIP: anotherIP, + switches: switches, + }, + } + + ep.waitTillSafeToShutdown(context.Background(), cfgs, tt.replicas) + }) + } +} + +// mockHTTPClient is a client that receives an HTTP call for an egress service endpoint and returns a response with an +// IP address in a 'Pod-IPv4' header. It can be configured to return one IP address for N calls, then switch to another +// IP address to simulate a scenario where an IP is eventually no longer a backend for an endpoint. +// TODO(irbekrm): to test this more thoroughly, we should have the client take into account the number of replicas and +// return as if traffic was round robin load balanced across different Pods. +type mockHTTPClient struct { + // podIP - initial IP address to return, that matches the current proxy's IP address. + podIP string + anotherIP string + // after how many calls to an endpoint, the client should start returning 'anotherIP' instead of 'podIP. + switches map[string]int + mu sync.Mutex // protects the following + // calls tracks the number of calls received. + calls map[string]int +} + +func (m *mockHTTPClient) Do(req *http.Request) (*http.Response, error) { + m.mu.Lock() + if m.calls == nil { + m.calls = make(map[string]int) + } + + endpoint := req.URL.String() + m.calls[endpoint]++ + calls := m.calls[endpoint] + m.mu.Unlock() + + resp := &http.Response{ + StatusCode: http.StatusOK, + Header: make(http.Header), + Body: io.NopCloser(strings.NewReader("")), + } + + if calls <= m.switches[endpoint] { + resp.Header.Set(kubetypes.PodIPv4Header, m.podIP) // Pod is still routable + } else { + resp.Header.Set(kubetypes.PodIPv4Header, m.anotherIP) // Pod is no longer routable + } + return resp, nil +} diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 5fc6cc3f0..0da18e52c 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -64,16 +64,16 @@ type settings struct { // when setting up rules to proxy cluster traffic to cluster ingress // target. // Deprecated: use PodIPv4, PodIPv6 instead to support dual stack clusters - PodIP string - PodIPv4 string - PodIPv6 string - PodUID string - HealthCheckAddrPort string - LocalAddrPort string - MetricsEnabled bool - HealthCheckEnabled bool - DebugAddrPort string - EgressSvcsCfgPath string + PodIP string + PodIPv4 string + PodIPv6 string + PodUID string + HealthCheckAddrPort string + LocalAddrPort string + MetricsEnabled bool + HealthCheckEnabled bool + DebugAddrPort string + EgressProxiesCfgPath string } func configFromEnv() (*settings, error) { @@ -107,7 +107,7 @@ func configFromEnv() (*settings, error) { MetricsEnabled: defaultBool("TS_ENABLE_METRICS", false), HealthCheckEnabled: defaultBool("TS_ENABLE_HEALTH_CHECK", false), DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""), - EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), + EgressProxiesCfgPath: defaultEnv("TS_EGRESS_PROXIES_CONFIG_PATH", ""), PodUID: defaultEnv("POD_UID", ""), } podIPs, ok := os.LookupEnv("POD_IPS") @@ -186,7 +186,7 @@ func (s *settings) validate() error { return fmt.Errorf("error parsing TS_HEALTHCHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) } } - if s.localMetricsEnabled() || s.localHealthEnabled() { + if s.localMetricsEnabled() || s.localHealthEnabled() || s.EgressProxiesCfgPath != "" { if _, err := netip.ParseAddrPort(s.LocalAddrPort); err != nil { return fmt.Errorf("error parsing TS_LOCAL_ADDR_PORT value %q: %w", s.LocalAddrPort, err) } @@ -199,8 +199,8 @@ func (s *settings) validate() error { if s.HealthCheckEnabled && s.HealthCheckAddrPort != "" { return errors.New("TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0, use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT") } - if s.EgressSvcsCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") { - return errors.New("TS_EGRESS_SERVICES_CONFIG_PATH is only supported for Tailscale running on Kubernetes") + if s.EgressProxiesCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") { + return errors.New("TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes") } return nil } @@ -291,7 +291,7 @@ func isOneStepConfig(cfg *settings) bool { // as an L3 proxy, proxying to an endpoint provided via one of the config env // vars. func isL3Proxy(cfg *settings) bool { - return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressSvcsCfgPath != "" + return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressProxiesCfgPath != "" } // hasKubeStateStore returns true if the state must be stored in a Kubernetes @@ -308,6 +308,10 @@ func (cfg *settings) localHealthEnabled() bool { return cfg.LocalAddrPort != "" && cfg.HealthCheckEnabled } +func (cfg *settings) egressSvcsTerminateEPEnabled() bool { + return cfg.LocalAddrPort != "" && cfg.EgressProxiesCfgPath != "" +} + // defaultEnv returns the value of the given envvar name, or defVal if // unset. func defaultEnv(name, defVal string) string { diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 85992abed..3441e12ba 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -20,7 +20,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - tsoperator "tailscale.com/k8s-operator" "tailscale.com/kube/egressservices" "tailscale.com/types/ptr" ) @@ -71,25 +70,27 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ if err != nil { return res, fmt.Errorf("error retrieving ExternalName Service: %w", err) } - if !tsoperator.EgressServiceIsValidAndConfigured(svc) { - l.Infof("Cluster resources for ExternalName Service %s/%s are not yet configured", svc.Namespace, svc.Name) - return res, nil - } // TODO(irbekrm): currently this reconcile loop runs all the checks every time it's triggered, which is // wasteful. Once we have a Ready condition for ExternalName Services for ProxyGroup, use the condition to // determine if a reconcile is needed. oldEps := eps.DeepCopy() - proxyGroupName := eps.Labels[labelProxyGroup] tailnetSvc := tailnetSvcName(svc) l = l.With("tailnet-service-name", tailnetSvc) // Retrieve the desired tailnet service configuration from the ConfigMap. + proxyGroupName := eps.Labels[labelProxyGroup] _, cfgs, err := egressSvcsConfigs(ctx, er.Client, proxyGroupName, er.tsNamespace) if err != nil { return res, fmt.Errorf("error retrieving tailnet services configuration: %w", err) } + if cfgs == nil { + // TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later + // got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow. + l.Debugf("No egress config found, likely because ProxyGroup has not been created") + return res, nil + } cfg, ok := (*cfgs)[tailnetSvc] if !ok { l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 55003ee91..cf218ba4f 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -59,6 +59,8 @@ const ( maxPorts = 1000 indexEgressProxyGroup = ".metadata.annotations.egress-proxy-group" + + tsHealthCheckPortName = "tailscale-health-check" ) var gaugeEgressServices = clientmetric.NewGauge(kubetypes.MetricEgressServiceCount) @@ -229,15 +231,16 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s found := false for _, wantsPM := range svc.Spec.Ports { if wantsPM.Port == pm.Port && strings.EqualFold(string(wantsPM.Protocol), string(pm.Protocol)) { - // We don't use the port name to distinguish this port internally, but Kubernetes - // require that, for Service ports with more than one name each port is uniquely named. - // So we can always pick the port name from the ExternalName Service as at this point we - // know that those are valid names because Kuberentes already validated it once. Note - // that users could have changed an unnamed port to a named port and might have changed - // port names- this should still work. + // We want to both preserve the user set port names for ease of debugging, but also + // ensure that we name all unnamed ports as the ClusterIP Service that we create will + // always have at least two ports. // https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services // See also https://github.com/tailscale/tailscale/issues/13406#issuecomment-2507230388 - clusterIPSvc.Spec.Ports[i].Name = wantsPM.Name + if wantsPM.Name != "" { + clusterIPSvc.Spec.Ports[i].Name = wantsPM.Name + } else { + clusterIPSvc.Spec.Ports[i].Name = "tailscale-unnamed" + } found = true break } @@ -252,6 +255,12 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s // ClusterIP Service produce new target port and add a portmapping to // the ClusterIP Service. for _, wantsPM := range svc.Spec.Ports { + // Because we add a healthcheck port of our own, we will always have at least two ports. That + // means that we cannot have ports with name not set. + // https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + if wantsPM.Name == "" { + wantsPM.Name = "tailscale-unnamed" + } found := false for _, gotPM := range clusterIPSvc.Spec.Ports { if wantsPM.Port == gotPM.Port && strings.EqualFold(string(wantsPM.Protocol), string(gotPM.Protocol)) { @@ -278,6 +287,25 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s }) } } + var healthCheckPort int32 = defaultLocalAddrPort + + for { + if !slices.ContainsFunc(svc.Spec.Ports, func(p corev1.ServicePort) bool { + return p.Port == healthCheckPort + }) { + break + } + healthCheckPort++ + if healthCheckPort > 10002 { + return nil, false, fmt.Errorf("unable to find a free port for internal health check in range [9002, 10002]") + } + } + clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ + Name: tsHealthCheckPortName, + Port: healthCheckPort, + TargetPort: intstr.FromInt(defaultLocalAddrPort), + Protocol: "TCP", + }) if !reflect.DeepEqual(clusterIPSvc, oldClusterIPSvc) { if clusterIPSvc, err = createOrUpdate(ctx, esr.Client, esr.tsNamespace, clusterIPSvc, func(svc *corev1.Service) { svc.Labels = clusterIPSvc.Labels @@ -320,7 +348,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s } tailnetSvc := tailnetSvcName(svc) gotCfg := (*cfgs)[tailnetSvc] - wantsCfg := egressSvcCfg(svc, clusterIPSvc) + wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l) if !reflect.DeepEqual(gotCfg, wantsCfg) { l.Debugf("updating egress services ConfigMap %s", cm.Name) mak.Set(cfgs, tailnetSvc, wantsCfg) @@ -504,10 +532,8 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s return false, nil } if !tsoperator.ProxyGroupIsReady(pg) { - l.Infof("ProxyGroup %s is not ready, waiting...", proxyGroupName) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) - return false, nil } l.Debugf("egress service is valid") @@ -515,6 +541,24 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s return true, nil } +func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config { + d := retrieveClusterDomain(ns, l) + tt := tailnetTargetFromSvc(externalNameSvc) + hep := healthCheckForSvc(clusterIPSvc, d) + cfg := egressservices.Config{ + TailnetTarget: tt, + HealthCheckEndpoint: hep, + } + for _, svcPort := range clusterIPSvc.Spec.Ports { + if svcPort.Name == tsHealthCheckPortName { + continue // exclude healthcheck from egress svcs configs + } + pm := portMap(svcPort) + mak.Set(&cfg.Ports, pm, struct{}{}) + } + return cfg +} + func validateEgressService(svc *corev1.Service, pg *tsapi.ProxyGroup) []string { violations := validateService(svc) @@ -584,16 +628,6 @@ func tailnetTargetFromSvc(svc *corev1.Service) egressservices.TailnetTarget { } } -func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service) egressservices.Config { - tt := tailnetTargetFromSvc(externalNameSvc) - cfg := egressservices.Config{TailnetTarget: tt} - for _, svcPort := range clusterIPSvc.Spec.Ports { - pm := portMap(svcPort) - mak.Set(&cfg.Ports, pm, struct{}{}) - } - return cfg -} - func portMap(p corev1.ServicePort) egressservices.PortMap { // TODO (irbekrm): out of bounds check? return egressservices.PortMap{Protocol: string(p.Protocol), MatchPort: uint16(p.TargetPort.IntVal), TargetPort: uint16(p.Port)} @@ -618,7 +652,11 @@ func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, ts Namespace: tsNamespace, }, } - if err := cl.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil { + err = cl.Get(ctx, client.ObjectKeyFromObject(cm), cm) + if apierrors.IsNotFound(err) { // ProxyGroup resources have not been created (yet) + return nil, nil, nil + } + if err != nil { return nil, nil, fmt.Errorf("error retrieving egress services ConfigMap %s: %v", name, err) } cfgs = &egressservices.Configs{} @@ -740,3 +778,17 @@ func (esr *egressSvcsReconciler) updateSvcSpec(ctx context.Context, svc *corev1. svc.Status = *st return err } + +// healthCheckForSvc return the URL of the containerboot's health check endpoint served by this Service or empty string. +func healthCheckForSvc(svc *corev1.Service, clusterDomain string) string { + // This version of the operator always sets health check port on the egress Services. However, it is possible + // that this reconcile loops runs during a proxy upgrade from a version that did not set the health check port + // and parses a Service that does not have the port set yet. + i := slices.IndexFunc(svc.Spec.Ports, func(port corev1.ServicePort) bool { + return port.Name == tsHealthCheckPortName + }) + if i == -1 { + return "" + } + return fmt.Sprintf("http://%s.%s.svc.%s:%d/healthz", svc.Name, svc.Namespace, clusterDomain, svc.Spec.Ports[i].Port) +} diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index ab0008ca0..d8a5dfd32 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -18,6 +18,7 @@ import ( discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -78,42 +79,16 @@ func TestTailscaleEgressServices(t *testing.T) { Selector: nil, Ports: []corev1.ServicePort{ { - Name: "http", Protocol: "TCP", Port: 80, }, - { - Name: "https", - Protocol: "TCP", - Port: 443, - }, }, }, } - t.Run("proxy_group_not_ready", func(t *testing.T) { + t.Run("service_one_unnamed_port", func(t *testing.T) { mustCreate(t, fc, svc) expectReconciled(t, esr, "default", "test") - // Service should have EgressSvcValid condition set to Unknown. - svc.Status.Conditions = []metav1.Condition{condition(tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, clock)} - expectEqual(t, fc, svc) - }) - - t.Run("proxy_group_ready", func(t *testing.T) { - mustUpdateStatus(t, fc, "", "foo", func(pg *tsapi.ProxyGroup) { - pg.Status.Conditions = []metav1.Condition{ - condition(tsapi.ProxyGroupReady, metav1.ConditionTrue, "", "", clock), - } - }) - expectReconciled(t, esr, "default", "test") - validateReadyService(t, fc, esr, svc, clock, zl, cm) - }) - t.Run("service_retain_one_unnamed_port", func(t *testing.T) { - svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80}} - mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { - s.Spec.Ports = svc.Spec.Ports - }) - expectReconciled(t, esr, "default", "test") validateReadyService(t, fc, esr, svc, clock, zl, cm) }) t.Run("service_add_two_named_ports", func(t *testing.T) { @@ -164,7 +139,7 @@ func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReco // Verify that an EndpointSlice has been created. expectEqual(t, fc, endpointSlice(name, svc, clusterSvc)) // Verify that ConfigMap contains configuration for the new egress service. - mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm) + mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm, zl) r := svcConfiguredReason(svc, true, zl.Sugar()) // Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the // CluterIP Service. @@ -203,6 +178,23 @@ func findGenNameForEgressSvcResources(t *testing.T, client client.Client, svc *c func clusterIPSvc(name string, extNSvc *corev1.Service) *corev1.Service { labels := egressSvcChildResourceLabels(extNSvc) + ports := make([]corev1.ServicePort, len(extNSvc.Spec.Ports)) + for i, port := range extNSvc.Spec.Ports { + ports[i] = corev1.ServicePort{ // Copy the port to avoid modifying the original. + Name: port.Name, + Port: port.Port, + Protocol: port.Protocol, + } + if port.Name == "" { + ports[i].Name = "tailscale-unnamed" + } + } + ports = append(ports, corev1.ServicePort{ + Name: "tailscale-health-check", + Port: 9002, + TargetPort: intstr.FromInt(9002), + Protocol: "TCP", + }) return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -212,7 +204,7 @@ func clusterIPSvc(name string, extNSvc *corev1.Service) *corev1.Service { }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, - Ports: extNSvc.Spec.Ports, + Ports: ports, }, } } @@ -257,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort { return ports } -func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap) { +func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) { t.Helper() - wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc) + wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar()) if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { t.Fatalf("Error retrieving ConfigMap: %v", err) } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index f349e7848..6631c4f98 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -777,7 +777,7 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger) } } -// proxyClassHandlerForConnector returns a handler that, for a given ProxyClass, +// proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass, // returns a list of reconcile requests for all Connectors that have // .spec.proxyClass set. func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { @@ -998,7 +998,7 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return func(_ context.Context, o client.Object) []reconcile.Request { + return func(ctx context.Context, o client.Object) []reconcile.Request { pg, ok := o.(*tsapi.ProxyGroup) if !ok { logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") @@ -1008,7 +1008,7 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) return nil } svcList := &corev1.ServiceList{} - if err := cl.List(context.Background(), svcList, client.MatchingFields{indexEgressProxyGroup: pg.Name}); err != nil { + if err := cl.List(ctx, svcList, client.MatchingFields{indexEgressProxyGroup: pg.Name}); err != nil { logger.Infof("error listing Services: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name) return nil } @@ -1028,7 +1028,7 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) // epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that // should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service. func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { - return func(_ context.Context, o client.Object) []reconcile.Request { + return func(ctx context.Context, o client.Object) []reconcile.Request { svc, ok := o.(*corev1.Service) if !ok { logger.Infof("[unexpected] Service handler triggered for an object that is not a Service") @@ -1038,7 +1038,7 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns return nil } epsList := &discoveryv1.EndpointSliceList{} - if err := cl.List(context.Background(), epsList, client.InNamespace(ns), + if err := cl.List(ctx, epsList, client.InNamespace(ns), client.MatchingLabels(egressSvcChildResourceLabels(svc))); err != nil { logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on Service %s", err, svc.Name) return nil diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index f6de31727..4b17d3470 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -32,6 +32,7 @@ import ( "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -166,6 +167,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) } + validateProxyClassForPG(logger, pg, proxyClass) if !tsoperator.ProxyClassIsReady(proxyClass) { message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName) logger.Info(message) @@ -204,6 +206,31 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) } +// validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup. +func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) { + if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { + return + } + // Our custom logic for ensuring minimum downtime ProxyGroup update rollouts relies on the local health check + // beig accessible on the replica Pod IP:9002. This address can also be modified by users, via + // TS_LOCAL_ADDR_PORT env var. + // + // Currently TS_LOCAL_ADDR_PORT controls Pod's health check and metrics address. _Probably_ there is no need for + // users to set this to a custom value. Users who want to consume metrics, should integrate with the metrics + // Service and/or ServiceMonitor, rather than Pods directly. The health check is likely not useful to integrate + // directly with for operator proxies (and we should aim for unified lifecycle logic in the operator, users + // shouldn't need to set their own). + // + // TODO(irbekrm): maybe disallow configuring this env var in future (in Tailscale 1.84 or later). + if hasLocalAddrPortSet(pc) { + msg := fmt.Sprintf("ProxyClass %s applied to an egress ProxyGroup has TS_LOCAL_ADDR_PORT env var set to a custom value."+ + "This will disable the ProxyGroup graceful failover mechanism, so you might experience downtime when ProxyGroup pods are restarted."+ + "In future we will remove the ability to set custom TS_LOCAL_ADDR_PORT for egress ProxyGroups."+ + "Please raise an issue if you expect that this will cause issues for your workflow.", pc.Name) + logger.Warn(msg) + } +} + func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { logger := r.logger(pg.Name) r.mu.Lock() @@ -253,10 +280,11 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return fmt.Errorf("error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - cm := pgEgressCM(pg, r.tsNamespace) + cm, hp := pgEgressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences + mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) } @@ -270,7 +298,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode) + ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, proxyClass) if err != nil { return fmt.Errorf("error generating StatefulSet spec: %w", err) } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 556a2ed76..1ea91004b 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -7,11 +7,14 @@ package main import ( "fmt" + "slices" + "strconv" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" @@ -19,9 +22,12 @@ import ( "tailscale.com/types/ptr" ) +// deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. +const deletionGracePeriodSeconds int64 = 360 + // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. -func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string) (*appsv1.StatefulSet, error) { +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -145,15 +151,25 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - envs = append(envs, corev1.EnvVar{ - Name: "TS_EGRESS_SERVICES_CONFIG_PATH", - Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), - }, + envs = append(envs, + // TODO(irbekrm): in 1.80 we deprecated TS_EGRESS_SERVICES_CONFIG_PATH in favour of + // TS_EGRESS_PROXIES_CONFIG_PATH. Remove it in 1.84. + corev1.EnvVar{ + Name: "TS_EGRESS_SERVICES_CONFIG_PATH", + Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), + }, + corev1.EnvVar{ + Name: "TS_EGRESS_PROXIES_CONFIG_PATH", + Value: "/etc/proxies", + }, corev1.EnvVar{ Name: "TS_INTERNAL_APP", Value: kubetypes.AppProxyGroupEgress, }, - ) + corev1.EnvVar{ + Name: "TS_ENABLE_HEALTH_CHECK", + Value: "true", + }) } else { // ingress envs = append(envs, corev1.EnvVar{ Name: "TS_INTERNAL_APP", @@ -167,6 +183,25 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string return append(c.Env, envs...) }() + // The pre-stop hook is used to ensure that a replica does not get terminated while cluster traffic for egress + // services is still being routed to it. + // + // This mechanism currently (2025-01-26) rely on the local health check being accessible on the Pod's + // IP, so they are not supported for ProxyGroups where users have configured TS_LOCAL_ADDR_PORT to a custom + // value. + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && !hasLocalAddrPortSet(proxyClass) { + c.Lifecycle = &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: kubetypes.EgessServicesPreshutdownEP, + Port: intstr.FromInt(defaultLocalAddrPort), + }, + }, + } + // Set the deletion grace period to 6 minutes to ensure that the pre-stop hook has enough time to terminate + // gracefully. + ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds) + } return ss, nil } @@ -258,7 +293,9 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S return secrets } -func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { +func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) (*corev1.ConfigMap, []byte) { + hp := hepPings(pg) + hpBs := []byte(strconv.Itoa(hp)) return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: pgEgressCMName(pg.Name), @@ -266,8 +303,10 @@ func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { Labels: pgLabels(pg.Name, nil), OwnerReferences: pgOwnerReference(pg), }, - } + BinaryData: map[string][]byte{egressservices.KeyHEPPings: hpBs}, + }, hpBs } + func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -313,3 +352,23 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { func pgEgressCMName(pg string) string { return fmt.Sprintf("%s-egress-config", pg) } + +// hasLocalAddrPortSet returns true if the proxyclass has the TS_LOCAL_ADDR_PORT env var set. For egress ProxyGroups, +// currently (2025-01-26) this means that the ProxyGroup does not support graceful failover. +func hasLocalAddrPortSet(proxyClass *tsapi.ProxyClass) bool { + if proxyClass == nil || proxyClass.Spec.StatefulSet == nil || proxyClass.Spec.StatefulSet.Pod == nil || proxyClass.Spec.StatefulSet.Pod.TailscaleContainer == nil { + return false + } + return slices.ContainsFunc(proxyClass.Spec.StatefulSet.Pod.TailscaleContainer.Env, func(env tsapi.Env) bool { + return env.Name == envVarTSLocalAddrPort + }) +} + +// hepPings returns the number of times a health check endpoint exposed by a Service fronting ProxyGroup replicas should +// be pinged to ensure that all currently configured backend replicas are hit. +func hepPings(pg *tsapi.ProxyGroup) int { + rc := pgReplicas(pg) + // Assuming a Service implemented using round robin load balancing, number-of-replica-times should be enough, but in + // practice, we cannot assume that the requests will be load balanced perfectly. + return int(rc) * 3 +} diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index e7c85d387..29100de1d 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -19,13 +19,13 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/kube/egressservices" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" @@ -97,7 +97,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, false, "") + expectProxyGroupResources(t, fc, pg, false, "", pc) }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -118,11 +118,11 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "") + expectProxyGroupResources(t, fc, pg, true, "", pc) if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } - expectProxyGroupResources(t, fc, pg, true, "") + expectProxyGroupResources(t, fc, pg, true, "", pc) keyReq := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -154,7 +154,7 @@ func TestProxyGroup(t *testing.T) { } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) }) t.Run("scale_up_to_3", func(t *testing.T) { @@ -165,7 +165,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) @@ -175,7 +175,7 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) }) t.Run("scale_down_to_1", func(t *testing.T) { @@ -188,7 +188,7 @@ func TestProxyGroup(t *testing.T) { pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash) + expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) }) t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) { @@ -202,7 +202,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74") + expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74", pc) }) t.Run("enable_metrics", func(t *testing.T) { @@ -246,12 +246,29 @@ func TestProxyGroup(t *testing.T) { // The fake client does not clean up objects whose owner has been // deleted, so we can't test for the owned resources getting deleted. }) + } func TestProxyGroupTypes(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Generation: 1, + }, + Spec: tsapi.ProxyClassSpec{}, + } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). + WithObjects(pc). + WithStatusSubresource(pc). Build() + mustUpdateStatus(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { + p.Status.Conditions = []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }} + }) zl, _ := zap.NewDevelopment() reconciler := &ProxyGroupReconciler{ @@ -274,9 +291,7 @@ func TestProxyGroupTypes(t *testing.T) { Replicas: ptr.To[int32](0), }, } - if err := fc.Create(context.Background(), pg); err != nil { - t.Fatal(err) - } + mustCreate(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) verifyProxyGroupCounts(t, reconciler, 0, 1) @@ -286,7 +301,8 @@ func TestProxyGroupTypes(t *testing.T) { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupEgress) - verifyEnvVar(t, sts, "TS_EGRESS_SERVICES_CONFIG_PATH", fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices)) + verifyEnvVar(t, sts, "TS_EGRESS_PROXIES_CONFIG_PATH", "/etc/proxies") + verifyEnvVar(t, sts, "TS_ENABLE_HEALTH_CHECK", "true") // Verify that egress configuration has been set up. cm := &corev1.ConfigMap{} @@ -323,6 +339,57 @@ func TestProxyGroupTypes(t *testing.T) { if diff := cmp.Diff(expectedVolumeMounts, sts.Spec.Template.Spec.Containers[0].VolumeMounts); diff != "" { t.Errorf("unexpected volume mounts (-want +got):\n%s", diff) } + + expectedLifecycle := corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: kubetypes.EgessServicesPreshutdownEP, + Port: intstr.FromInt(defaultLocalAddrPort), + }, + }, + } + if diff := cmp.Diff(expectedLifecycle, *sts.Spec.Template.Spec.Containers[0].Lifecycle); diff != "" { + t.Errorf("unexpected lifecycle (-want +got):\n%s", diff) + } + if *sts.Spec.Template.DeletionGracePeriodSeconds != deletionGracePeriodSeconds { + t.Errorf("unexpected deletion grace period seconds %d, want %d", *sts.Spec.Template.DeletionGracePeriodSeconds, deletionGracePeriodSeconds) + } + }) + t.Run("egress_type_no_lifecycle_hook_when_local_addr_port_set", func(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-egress-no-lifecycle", + UID: "test-egress-no-lifecycle-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + Replicas: ptr.To[int32](0), + ProxyClass: "test", + }, + } + mustCreate(t, fc, pg) + mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { + p.Spec.StatefulSet = &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &tsapi.Container{ + Env: []tsapi.Env{{ + Name: "TS_LOCAL_ADDR_PORT", + Value: "127.0.0.1:8080", + }}, + }, + }, + } + }) + expectReconciled(t, reconciler, "", pg.Name) + + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + if sts.Spec.Template.Spec.Containers[0].Lifecycle != nil { + t.Error("lifecycle hook was set when TS_LOCAL_ADDR_PORT was configured via ProxyClass") + } }) t.Run("ingress_type", func(t *testing.T) { @@ -341,7 +408,7 @@ func TestProxyGroupTypes(t *testing.T) { } expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 1, 1) + verifyProxyGroupCounts(t, reconciler, 1, 2) sts := &appsv1.StatefulSet{} if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { @@ -402,13 +469,13 @@ func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue str t.Errorf("%s environment variable not found", name) } -func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string) { +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) { t.Helper() role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto") + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", proxyClass) if err != nil { t.Fatal(err) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index fce6bfdd7..c1d13f33d 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -101,6 +101,9 @@ const ( proxyTypeIngressResource = "ingress_resource" proxyTypeConnector = "connector" proxyTypeProxyGroup = "proxygroup" + + envVarTSLocalAddrPort = "TS_LOCAL_ADDR_PORT" + defaultLocalAddrPort = 9002 // metrics and health check port ) var ( diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 1ecedfc07..abe8f7f9c 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -75,16 +75,6 @@ func RemoveServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionTy }) } -func EgressServiceIsValidAndConfigured(svc *corev1.Service) bool { - for _, typ := range []tsapi.ConditionType{tsapi.EgressSvcValid, tsapi.EgressSvcConfigured} { - cond := GetServiceCondition(svc, typ) - if cond == nil || cond.Status != metav1.ConditionTrue { - return false - } - } - return true -} - // SetRecorderCondition ensures that Recorder status has a condition with the // given attributes. LastTransitionTime gets set every time condition's status // changes. diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index 04a1c362b..2515f1bf3 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -13,9 +13,15 @@ import ( "net/netip" ) -// KeyEgressServices is name of the proxy state Secret field that contains the -// currently applied egress proxy config. -const KeyEgressServices = "egress-services" +const ( + // KeyEgressServices is name of the proxy state Secret field that contains the + // currently applied egress proxy config. + KeyEgressServices = "egress-services" + + // KeyHEPPings is the number of times an egress service health check endpoint needs to be pinged to ensure that + // each currently configured backend is hit. In practice, it depends on the number of ProxyGroup replicas. + KeyHEPPings = "hep-pings" +) // Configs contains the desired configuration for egress services keyed by // service name. @@ -24,6 +30,7 @@ type Configs map[string]Config // Config is an egress service configuration. // TODO(irbekrm): version this? type Config struct { + HealthCheckEndpoint string `json:"healthCheckEndpoint"` // TailnetTarget is the target to which cluster traffic for this service // should be proxied. TailnetTarget TailnetTarget `json:"tailnetTarget"` diff --git a/kube/egressservices/egressservices_test.go b/kube/egressservices/egressservices_test.go index d6f952ea0..806ad91be 100644 --- a/kube/egressservices/egressservices_test.go +++ b/kube/egressservices/egressservices_test.go @@ -55,7 +55,7 @@ func Test_jsonMarshalConfig(t *testing.T) { protocol: "tcp", matchPort: 4003, targetPort: 80, - wantsBs: []byte(`{"tailnetTarget":{"ip":"","fqdn":""},"ports":[{"protocol":"tcp","matchPort":4003,"targetPort":80}]}`), + wantsBs: []byte(`{"healthCheckEndpoint":"","tailnetTarget":{"ip":"","fqdn":""},"ports":[{"protocol":"tcp","matchPort":4003,"targetPort":80}]}`), }, } for _, tt := range tests { diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index afc489018..894cbb41d 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -43,4 +43,9 @@ const ( // that cluster workloads behind the Ingress can now be accessed via the given DNS name over HTTPS. KeyHTTPSEndpoint string = "https_endpoint" ValueNoHTTPS string = "no-https" + + // Pod's IPv4 address header key as returned by containerboot health check endpoint. + PodIPv4Header string = "Pod-IPv4" + + EgessServicesPreshutdownEP = "/internal-egress-services-preshutdown" ) From 52f88f782a45652d9db25b1563e5defae1e42897 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 29 Jan 2025 17:48:05 +0200 Subject: [PATCH 0422/1708] cmd/k8s-operator: don't set deprecated configfile hash on new proxies (#14817) Fixes the configfile reload logic- if the tailscale capver can not yet be determined because the device info is not yet written to the state Secret, don't assume that the proxy is pre-110. Updates tailscale/tailscale#13032 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/operator_test.go | 65 ------------------------------- cmd/k8s-operator/sts.go | 2 +- 2 files changed, 1 insertion(+), 66 deletions(-) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 2fa14e33b..73c795bb3 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1339,71 +1339,6 @@ func TestProxyFirewallMode(t *testing.T) { expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) } -func TestTailscaledConfigfileHash(t *testing.T) { - fc := fake.NewFakeClient() - ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } - clock := tstest.NewClock(tstest.ClockOpts{}) - sr := &ServiceReconciler{ - Client: fc, - ssr: &tailscaleSTSReconciler{ - Client: fc, - tsClient: ft, - defaultTags: []string{"tag:k8s"}, - operatorNamespace: "operator-ns", - proxyImage: "tailscale/tailscale", - }, - logger: zl.Sugar(), - clock: clock, - isDefaultLoadBalancer: true, - } - - // Create a service that we should manage, and check that the initial round - // of objects looks right. - mustCreate(t, fc, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - // The apiserver is supposed to set the UID, but the fake client - // doesn't. So, set it explicitly because other code later depends - // on it being set. - UID: types.UID("1234-UID"), - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.20.30.40", - Type: corev1.ServiceTypeLoadBalancer, - }, - }) - - expectReconciled(t, sr, "default", "test") - expectReconciled(t, sr, "default", "test") - - fullName, shortName := findGenName(t, fc, "default", "test", "svc") - o := configOpts{ - stsName: shortName, - secretName: fullName, - namespace: "default", - parentType: "svc", - hostname: "default-test", - clusterTargetIP: "10.20.30.40", - confFileHash: "848bff4b5ba83ac999e6984c8464e597156daba961ae045e7dbaef606d54ab5e", - app: kubetypes.AppIngressProxy, - } - expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) - - // 2. Hostname gets changed, configfile is updated and a new hash value - // is produced. - mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - mak.Set(&svc.Annotations, AnnotationHostname, "another-test") - }) - o.hostname = "another-test" - o.confFileHash = "d4cc13f09f55f4f6775689004f9a466723325b84d2b590692796bfe22aeaa389" - expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) -} func Test_isMagicDNSName(t *testing.T) { tests := []struct { in string diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index c1d13f33d..0bc9d6fb9 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -697,7 +697,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S // being created, there is no need for a restart. // TODO(irbekrm): remove this in 1.84. hash := tsConfigHash - if dev != nil && dev.capver >= 110 { + if dev == nil || dev.capver >= 110 { hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] } s.Spec = ss.Spec From b60f6b849af1fae1cf343be98f7fb1714c9ea165 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 29 Jan 2025 10:25:50 -0600 Subject: [PATCH 0423/1708] Revert "ssh,tempfork/gliderlabs/ssh: replace github.com/tailscale/golang-x-crypto/ssh with golang.org/x/crypto/ssh" This reverts commit 46fd4e58a27495263336b86ee961ee28d8c332b7. We don't want to include this in 1.80 yet, but can add it back post 1.80. Updates #8593 Signed-off-by: Percy Wegmann --- cmd/k8s-operator/depaware.txt | 11 +- cmd/ssh-auth-none-demo/ssh-auth-none-demo.go | 24 +- cmd/tailscaled/depaware.txt | 7 +- cmd/tailscaled/deps_test.go | 1 + go.mod | 2 +- go.sum | 4 +- ipn/ipnlocal/ssh.go | 2 +- ssh/tailssh/tailssh.go | 310 +++++++++++-------- ssh/tailssh/tailssh_integration_test.go | 2 +- ssh/tailssh/tailssh_test.go | 5 +- tempfork/gliderlabs/ssh/agent.go | 2 +- tempfork/gliderlabs/ssh/context.go | 11 +- tempfork/gliderlabs/ssh/options.go | 2 +- tempfork/gliderlabs/ssh/options_test.go | 2 +- tempfork/gliderlabs/ssh/server.go | 2 +- tempfork/gliderlabs/ssh/session.go | 2 +- tempfork/gliderlabs/ssh/session_test.go | 2 +- tempfork/gliderlabs/ssh/ssh.go | 4 +- tempfork/gliderlabs/ssh/tcpip.go | 2 +- tempfork/gliderlabs/ssh/tcpip_test.go | 2 +- tempfork/gliderlabs/ssh/util.go | 2 +- tempfork/gliderlabs/ssh/wrap.go | 2 +- 22 files changed, 231 insertions(+), 172 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 972dbfc2c..e32fd4a2b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -197,6 +197,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ + LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh + LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal + LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ @@ -983,12 +986,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ + LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf + golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ + golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ @@ -997,8 +1000,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go index 39af584ec..ee929299a 100644 --- a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go +++ b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go @@ -6,9 +6,6 @@ // highlight the unique parts of the Tailscale SSH server so SSH // client authors can hit it easily and fix their SSH clients without // needing to set up Tailscale and Tailscale SSH. -// -// Connections are allowed using any username except for "denyme". Connecting as -// "denyme" will result in an authentication failure with error message. package main import ( @@ -19,7 +16,6 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" - "errors" "flag" "fmt" "io" @@ -28,7 +24,7 @@ import ( "path/filepath" "time" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" "tailscale.com/tempfork/gliderlabs/ssh" ) @@ -66,21 +62,13 @@ func main() { Handler: handleSessionPostSSHAuth, ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig { start := time.Now() - var spac gossh.ServerPreAuthConn return &gossh.ServerConfig{ - PreAuthConnCallback: func(conn gossh.ServerPreAuthConn) { - spac = conn + NextAuthMethodCallback: func(conn gossh.ConnMetadata, prevErrors []error) []string { + return []string{"tailscale"} }, NoClientAuth: true, // required for the NoClientAuthCallback to run NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) { - spac.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start))) - - if cm.User() == "denyme" { - return nil, &gossh.BannerError{ - Err: errors.New("denying access"), - Message: "denyme is not allowed to access this machine\n", - } - } + cm.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start))) totalBanners := 2 if cm.User() == "banners" { @@ -89,9 +77,9 @@ func main() { for banner := 2; banner <= totalBanners; banner++ { time.Sleep(time.Second) if banner == totalBanners { - spac.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start))) + cm.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start))) } else { - spac.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start))) + cm.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start))) } } return nil, nil diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a6fae54ff..a7ad83818 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -152,6 +152,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ + LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh + LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+ + LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ @@ -436,12 +439,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ + golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 7f06abc6c..2b4bc280d 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -17,6 +17,7 @@ func TestOmitSSH(t *testing.T) { Tags: "ts_omit_ssh", BadDeps: map[string]string{ "tailscale.com/ssh/tailssh": msg, + "golang.org/x/crypto/ssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, "github.com/creack/pty": msg, diff --git a/go.mod b/go.mod index 2489e34d7..8e52a9ab3 100644 --- a/go.mod +++ b/go.mod @@ -94,7 +94,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 + golang.org/x/crypto v0.32.0 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 golang.org/x/mod v0.22.0 golang.org/x/net v0.34.0 diff --git a/go.sum b/go.sum index b10e98da2..c1c82ad77 100644 --- a/go.sum +++ b/go.sum @@ -1058,8 +1058,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index 47a74e282..383d03f5a 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -24,8 +24,8 @@ import ( "strings" "sync" + "github.com/tailscale/golang-x-crypto/ssh" "go4.org/mem" - "golang.org/x/crypto/ssh" "tailscale.com/tailcfg" "tailscale.com/util/lineiter" "tailscale.com/util/mak" diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 638ff99b8..7f21ccd11 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -29,7 +29,7 @@ import ( "syscall" "time" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" "tailscale.com/logtail/backoff" @@ -198,11 +198,8 @@ func (srv *server) OnPolicyChange() { // Setup and discover server info // - ServerConfigCallback // -// Get access to a ServerPreAuthConn (useful for sending banners) -// -// Do the user auth with a NoClientAuthCallback. If user specified -// a username ending in "+password", follow this with password auth -// (to work around buggy SSH clients that don't work with noauth). +// Do the user auth +// - NoClientAuthHandler // // Once auth is done, the conn can be multiplexed with multiple sessions and // channels concurrently. At which point any of the following can be called @@ -222,12 +219,15 @@ type conn struct { idH string connID string // ID that's shared with control - // spac is a [gossh.ServerPreAuthConn] used for sending auth banners. - // Banners cannot be sent after auth completes. - spac gossh.ServerPreAuthConn + // anyPasswordIsOkay is whether the client is authorized but has requested + // password-based auth to work around their buggy SSH client. When set, we + // accept any password in the PasswordHandler. + anyPasswordIsOkay bool // set by NoClientAuthCallback - action0 *tailcfg.SSHAction // set by clientAuth - finalAction *tailcfg.SSHAction // set by clientAuth + action0 *tailcfg.SSHAction // set by doPolicyAuth; first matching action + currentAction *tailcfg.SSHAction // set by doPolicyAuth, updated by resolveNextAction + finalAction *tailcfg.SSHAction // set by doPolicyAuth or resolveNextAction + finalActionErr error // set by doPolicyAuth or resolveNextAction info *sshConnInfo // set by setInfo localUser *userMeta // set by doPolicyAuth @@ -254,142 +254,141 @@ func (c *conn) vlogf(format string, args ...any) { } } -// errDenied is returned by auth callbacks when a connection is denied by the -// policy. It returns a gossh.BannerError to make sure the message gets -// displayed as an auth banner. -func errDenied(message string) error { - if message == "" { - message = "tailscale: access denied" - } - return &gossh.BannerError{ - Message: message, +// isAuthorized walks through the action chain and returns nil if the connection +// is authorized. If the connection is not authorized, it returns +// errDenied. If the action chain resolution fails, it returns the +// resolution error. +func (c *conn) isAuthorized(ctx ssh.Context) error { + action := c.currentAction + for { + if action.Accept { + return nil + } + if action.Reject || action.HoldAndDelegate == "" { + return errDenied + } + var err error + action, err = c.resolveNextAction(ctx) + if err != nil { + return err + } + if action.Message != "" { + if err := ctx.SendAuthBanner(action.Message); err != nil { + return err + } + } } } -// bannerError creates a gossh.BannerError that will result in the given -// message being displayed to the client. If err != nil, this also logs -// message:error. The contents of err is not leaked to clients in the banner. -func (c *conn) bannerError(message string, err error) error { - if err != nil { - c.logf("%s: %s", message, err) - } - return &gossh.BannerError{ - Err: err, - Message: fmt.Sprintf("tailscale: %s", message), - } -} +// errDenied is returned by auth callbacks when a connection is denied by the +// policy. +var errDenied = errors.New("ssh: access denied") -// clientAuth is responsible for performing client authentication. +// NoClientAuthCallback implements gossh.NoClientAuthCallback and is called by +// the ssh.Server when the client first connects with the "none" +// authentication method. // -// If policy evaluation fails, it returns an error. -// If access is denied, it returns an error. -func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { +// It is responsible for continuing policy evaluation from BannerCallback (or +// starting it afresh). It returns an error if the policy evaluation fails, or +// if the decision is "reject" +// +// It either returns nil (accept) or errDenied (reject). The errors may be wrapped. +func (c *conn) NoClientAuthCallback(ctx ssh.Context) error { if c.insecureSkipTailscaleAuth { - return &gossh.Permissions{}, nil + return nil + } + if err := c.doPolicyAuth(ctx); err != nil { + return err + } + if err := c.isAuthorized(ctx); err != nil { + return err } - if err := c.setInfo(cm); err != nil { - return nil, c.bannerError("failed to get connection info", err) + // Let users specify a username ending in +password to force password auth. + // This exists for buggy SSH clients that get confused by success from + // "none" auth. + if strings.HasSuffix(ctx.User(), forcePasswordSuffix) { + c.anyPasswordIsOkay = true + return errors.New("any password please") // not shown to users } + return nil +} - action, localUser, acceptEnv, err := c.evaluatePolicy() - if err != nil { - return nil, c.bannerError("failed to evaluate SSH policy", err) +func (c *conn) nextAuthMethodCallback(cm gossh.ConnMetadata, prevErrors []error) (nextMethod []string) { + switch { + case c.anyPasswordIsOkay: + nextMethod = append(nextMethod, "password") } - c.action0 = action + // The fake "tailscale" method is always appended to next so OpenSSH renders + // that in parens as the final failure. (It also shows up in "ssh -v", etc) + nextMethod = append(nextMethod, "tailscale") + return +} + +// fakePasswordHandler is our implementation of the PasswordHandler hook that +// checks whether the user's password is correct. But we don't actually use +// passwords. This exists only for when the user's username ends in "+password" +// to signal that their SSH client is buggy and gets confused by auth type +// "none" succeeding and they want our SSH server to require a dummy password +// prompt instead. We then accept any password since we've already authenticated +// & authorized them. +func (c *conn) fakePasswordHandler(ctx ssh.Context, password string) bool { + return c.anyPasswordIsOkay +} - if action.Accept || action.HoldAndDelegate != "" { - // Immediately look up user information for purposes of generating - // hold and delegate URL (if necessary). +// doPolicyAuth verifies that conn can proceed. +// It returns nil if the matching policy action is Accept or +// HoldAndDelegate. Otherwise, it returns errDenied. +func (c *conn) doPolicyAuth(ctx ssh.Context) error { + if err := c.setInfo(ctx); err != nil { + c.logf("failed to get conninfo: %v", err) + return errDenied + } + a, localUser, acceptEnv, err := c.evaluatePolicy() + if err != nil { + return fmt.Errorf("%w: %v", errDenied, err) + } + c.action0 = a + c.currentAction = a + c.acceptEnv = acceptEnv + if a.Message != "" { + if err := ctx.SendAuthBanner(a.Message); err != nil { + return fmt.Errorf("SendBanner: %w", err) + } + } + if a.Accept || a.HoldAndDelegate != "" { + if a.Accept { + c.finalAction = a + } lu, err := userLookup(localUser) if err != nil { - return nil, c.bannerError(fmt.Sprintf("failed to look up local user %q ", localUser), err) + c.logf("failed to look up %v: %v", localUser, err) + ctx.SendAuthBanner(fmt.Sprintf("failed to look up %v\r\n", localUser)) + return err } gids, err := lu.GroupIds() if err != nil { - return nil, c.bannerError("failed to look up local user's group IDs", err) + c.logf("failed to look up local user's group IDs: %v", err) + return err } c.userGroupIDs = gids c.localUser = lu - c.acceptEnv = acceptEnv + return nil } - - for { - switch { - case action.Accept: - metricTerminalAccept.Add(1) - if action.Message != "" { - if err := c.spac.SendAuthBanner(action.Message); err != nil { - return nil, fmt.Errorf("error sending auth welcome message: %w", err) - } - } - c.finalAction = action - return &gossh.Permissions{}, nil - case action.Reject: - metricTerminalReject.Add(1) - c.finalAction = action - return nil, errDenied(action.Message) - case action.HoldAndDelegate != "": - if action.Message != "" { - if err := c.spac.SendAuthBanner(action.Message); err != nil { - return nil, fmt.Errorf("error sending hold and delegate message: %w", err) - } - } - - url := action.HoldAndDelegate - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer cancel() - - metricHolds.Add(1) - url = c.expandDelegateURLLocked(url) - - var err error - action, err = c.fetchSSHAction(ctx, url) - if err != nil { - metricTerminalFetchError.Add(1) - return nil, c.bannerError("failed to fetch next SSH action", fmt.Errorf("fetch failed from %s: %w", url, err)) - } - default: - metricTerminalMalformed.Add(1) - return nil, c.bannerError("reached Action that had neither Accept, Reject, nor HoldAndDelegate", nil) - } + if a.Reject { + c.finalAction = a + return errDenied } + // Shouldn't get here, but: + return errDenied } // ServerConfig implements ssh.ServerConfigCallback. func (c *conn) ServerConfig(ctx ssh.Context) *gossh.ServerConfig { return &gossh.ServerConfig{ - PreAuthConnCallback: func(spac gossh.ServerPreAuthConn) { - c.spac = spac - }, - NoClientAuth: true, // required for the NoClientAuthCallback to run - NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) { - // First perform client authentication, which can potentially - // involve multiple steps (for example prompting user to log in to - // Tailscale admin panel to confirm identity). - perms, err := c.clientAuth(cm) - if err != nil { - return nil, err - } - - // Authentication succeeded. Buggy SSH clients get confused by - // success from the "none" auth method. As a workaround, let users - // specify a username ending in "+password" to force password auth. - // The actual value of the password doesn't matter. - if strings.HasSuffix(cm.User(), forcePasswordSuffix) { - return nil, &gossh.PartialSuccessError{ - Next: gossh.ServerAuthCallbacks{ - PasswordCallback: func(_ gossh.ConnMetadata, password []byte) (*gossh.Permissions, error) { - return &gossh.Permissions{}, nil - }, - }, - } - } - - return perms, nil - }, + NoClientAuth: true, // required for the NoClientAuthCallback to run + NextAuthMethodCallback: c.nextAuthMethodCallback, } } @@ -400,7 +399,7 @@ func (srv *server) newConn() (*conn, error) { // Stop accepting new connections. // Connections in the auth phase are handled in handleConnPostSSHAuth. // Existing sessions are terminated by Shutdown. - return nil, errDenied("tailscale: server is shutting down") + return nil, errDenied } srv.mu.Unlock() c := &conn{srv: srv} @@ -411,6 +410,9 @@ func (srv *server) newConn() (*conn, error) { Version: "Tailscale", ServerConfigCallback: c.ServerConfig, + NoClientAuthHandler: c.NoClientAuthCallback, + PasswordHandler: c.fakePasswordHandler, + Handler: c.handleSessionPostSSHAuth, LocalPortForwardingCallback: c.mayForwardLocalPortTo, ReversePortForwardingCallback: c.mayReversePortForwardTo, @@ -521,16 +523,16 @@ func toIPPort(a net.Addr) (ipp netip.AddrPort) { return netip.AddrPortFrom(tanetaddr.Unmap(), uint16(ta.Port)) } -// connInfo populates the sshConnInfo from the provided arguments, +// connInfo returns a populated sshConnInfo from the provided arguments, // validating only that they represent a known Tailscale identity. -func (c *conn) setInfo(cm gossh.ConnMetadata) error { +func (c *conn) setInfo(ctx ssh.Context) error { if c.info != nil { return nil } ci := &sshConnInfo{ - sshUser: strings.TrimSuffix(cm.User(), forcePasswordSuffix), - src: toIPPort(cm.RemoteAddr()), - dst: toIPPort(cm.LocalAddr()), + sshUser: strings.TrimSuffix(ctx.User(), forcePasswordSuffix), + src: toIPPort(ctx.RemoteAddr()), + dst: toIPPort(ctx.LocalAddr()), } if !tsaddr.IsTailscaleIP(ci.dst.Addr()) { return fmt.Errorf("tailssh: rejecting non-Tailscale local address %v", ci.dst) @@ -545,7 +547,7 @@ func (c *conn) setInfo(cm gossh.ConnMetadata) error { ci.node = node ci.uprof = uprof - c.idH = string(cm.SessionID()) + c.idH = ctx.SessionID() c.info = ci c.logf("handling conn: %v", ci.String()) return nil @@ -592,6 +594,62 @@ func (c *conn) handleSessionPostSSHAuth(s ssh.Session) { ss.run() } +// resolveNextAction starts at c.currentAction and makes it way through the +// action chain one step at a time. An action without a HoldAndDelegate is +// considered the final action. Once a final action is reached, this function +// will keep returning that action. It updates c.currentAction to the next +// action in the chain. When the final action is reached, it also sets +// c.finalAction to the final action. +func (c *conn) resolveNextAction(sctx ssh.Context) (action *tailcfg.SSHAction, err error) { + if c.finalAction != nil || c.finalActionErr != nil { + return c.finalAction, c.finalActionErr + } + + defer func() { + if action != nil { + c.currentAction = action + if action.Accept || action.Reject { + c.finalAction = action + } + } + if err != nil { + c.finalActionErr = err + } + }() + + ctx, cancel := context.WithCancel(sctx) + defer cancel() + + // Loop processing/fetching Actions until one reaches a + // terminal state (Accept, Reject, or invalid Action), or + // until fetchSSHAction times out due to the context being + // done (client disconnect) or its 30 minute timeout passes. + // (Which is a long time for somebody to see login + // instructions and go to a URL to do something.) + action = c.currentAction + if action.Accept || action.Reject { + if action.Reject { + metricTerminalReject.Add(1) + } else { + metricTerminalAccept.Add(1) + } + return action, nil + } + url := action.HoldAndDelegate + if url == "" { + metricTerminalMalformed.Add(1) + return nil, errors.New("reached Action that lacked Accept, Reject, and HoldAndDelegate") + } + metricHolds.Add(1) + url = c.expandDelegateURLLocked(url) + nextAction, err := c.fetchSSHAction(ctx, url) + if err != nil { + metricTerminalFetchError.Add(1) + return nil, fmt.Errorf("fetching SSHAction from %s: %w", url, err) + } + return nextAction, nil +} + func (c *conn) expandDelegateURLLocked(actionURL string) string { nm := c.srv.lb.NetMap() ci := c.info diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 5c4f533b1..1799d3400 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -32,8 +32,8 @@ import ( "github.com/bramvdbogaerde/go-scp" "github.com/google/go-cmp/cmp" "github.com/pkg/sftp" + gossh "github.com/tailscale/golang-x-crypto/ssh" "golang.org/x/crypto/ssh" - gossh "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 207136659..9f3616d8c 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -31,7 +31,7 @@ import ( "testing" "time" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "tailscale.com/ipn/ipnlocal" @@ -805,8 +805,7 @@ func TestSSHAuthFlow(t *testing.T) { state: &localState{ sshEnabled: true, }, - authErr: true, - wantBanners: []string{"tailscale: failed to evaluate SSH policy"}, + authErr: true, }, { name: "accept", diff --git a/tempfork/gliderlabs/ssh/agent.go b/tempfork/gliderlabs/ssh/agent.go index 99e84c1e5..86a5bce7f 100644 --- a/tempfork/gliderlabs/ssh/agent.go +++ b/tempfork/gliderlabs/ssh/agent.go @@ -7,7 +7,7 @@ import ( "path" "sync" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) const ( diff --git a/tempfork/gliderlabs/ssh/context.go b/tempfork/gliderlabs/ssh/context.go index 505a43dbf..d43de6f09 100644 --- a/tempfork/gliderlabs/ssh/context.go +++ b/tempfork/gliderlabs/ssh/context.go @@ -6,7 +6,7 @@ import ( "net" "sync" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) // contextKey is a value for use with context.WithValue. It's used as @@ -55,6 +55,8 @@ var ( // ContextKeyPublicKey is a context key for use with Contexts in this package. // The associated value will be of type PublicKey. ContextKeyPublicKey = &contextKey{"public-key"} + + ContextKeySendAuthBanner = &contextKey{"send-auth-banner"} ) // Context is a package specific context interface. It exposes connection @@ -89,6 +91,8 @@ type Context interface { // SetValue allows you to easily write new values into the underlying context. SetValue(key, value interface{}) + + SendAuthBanner(banner string) error } type sshContext struct { @@ -117,6 +121,7 @@ func applyConnMetadata(ctx Context, conn gossh.ConnMetadata) { ctx.SetValue(ContextKeyUser, conn.User()) ctx.SetValue(ContextKeyLocalAddr, conn.LocalAddr()) ctx.SetValue(ContextKeyRemoteAddr, conn.RemoteAddr()) + ctx.SetValue(ContextKeySendAuthBanner, conn.SendAuthBanner) } func (ctx *sshContext) SetValue(key, value interface{}) { @@ -153,3 +158,7 @@ func (ctx *sshContext) LocalAddr() net.Addr { func (ctx *sshContext) Permissions() *Permissions { return ctx.Value(ContextKeyPermissions).(*Permissions) } + +func (ctx *sshContext) SendAuthBanner(msg string) error { + return ctx.Value(ContextKeySendAuthBanner).(func(string) error)(msg) +} diff --git a/tempfork/gliderlabs/ssh/options.go b/tempfork/gliderlabs/ssh/options.go index 29c8ef141..aa87a4f39 100644 --- a/tempfork/gliderlabs/ssh/options.go +++ b/tempfork/gliderlabs/ssh/options.go @@ -3,7 +3,7 @@ package ssh import ( "os" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) // PasswordAuth returns a functional option that sets PasswordHandler on the server. diff --git a/tempfork/gliderlabs/ssh/options_test.go b/tempfork/gliderlabs/ssh/options_test.go index 47342b0f6..7cf6f376c 100644 --- a/tempfork/gliderlabs/ssh/options_test.go +++ b/tempfork/gliderlabs/ssh/options_test.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "testing" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) func newTestSessionWithOptions(t *testing.T, srv *Server, cfg *gossh.ClientConfig, options ...Option) (*gossh.Session, *gossh.Client, func()) { diff --git a/tempfork/gliderlabs/ssh/server.go b/tempfork/gliderlabs/ssh/server.go index 473e5fbd6..1086a72ca 100644 --- a/tempfork/gliderlabs/ssh/server.go +++ b/tempfork/gliderlabs/ssh/server.go @@ -8,7 +8,7 @@ import ( "sync" "time" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) // ErrServerClosed is returned by the Server's Serve, ListenAndServe, diff --git a/tempfork/gliderlabs/ssh/session.go b/tempfork/gliderlabs/ssh/session.go index a7a9a3eeb..0a4a21e53 100644 --- a/tempfork/gliderlabs/ssh/session.go +++ b/tempfork/gliderlabs/ssh/session.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/anmitsu/go-shlex" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) // Session provides access to information about an SSH session and methods diff --git a/tempfork/gliderlabs/ssh/session_test.go b/tempfork/gliderlabs/ssh/session_test.go index fe61a9d96..a60be5ec1 100644 --- a/tempfork/gliderlabs/ssh/session_test.go +++ b/tempfork/gliderlabs/ssh/session_test.go @@ -9,7 +9,7 @@ import ( "net" "testing" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) func (srv *Server) serveOnce(l net.Listener) error { diff --git a/tempfork/gliderlabs/ssh/ssh.go b/tempfork/gliderlabs/ssh/ssh.go index 54bd31ec2..644cb257d 100644 --- a/tempfork/gliderlabs/ssh/ssh.go +++ b/tempfork/gliderlabs/ssh/ssh.go @@ -4,7 +4,7 @@ import ( "crypto/subtle" "net" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) type Signal string @@ -105,7 +105,7 @@ type Pty struct { // requested by the client as part of the pty-req. These are outlined as // part of https://datatracker.ietf.org/doc/html/rfc4254#section-8. // - // The opcodes are defined as constants in golang.org/x/crypto/ssh (VINTR,VQUIT,etc.). + // The opcodes are defined as constants in github.com/tailscale/golang-x-crypto/ssh (VINTR,VQUIT,etc.). // Boolean opcodes have values 0 or 1. Modes gossh.TerminalModes } diff --git a/tempfork/gliderlabs/ssh/tcpip.go b/tempfork/gliderlabs/ssh/tcpip.go index 335fda657..056a0c734 100644 --- a/tempfork/gliderlabs/ssh/tcpip.go +++ b/tempfork/gliderlabs/ssh/tcpip.go @@ -7,7 +7,7 @@ import ( "strconv" "sync" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) const ( diff --git a/tempfork/gliderlabs/ssh/tcpip_test.go b/tempfork/gliderlabs/ssh/tcpip_test.go index b3ba60a9b..118b5d53a 100644 --- a/tempfork/gliderlabs/ssh/tcpip_test.go +++ b/tempfork/gliderlabs/ssh/tcpip_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - gossh "golang.org/x/crypto/ssh" + gossh "github.com/tailscale/golang-x-crypto/ssh" ) var sampleServerResponse = []byte("Hello world") diff --git a/tempfork/gliderlabs/ssh/util.go b/tempfork/gliderlabs/ssh/util.go index 3bee06dcd..e3b5716a3 100644 --- a/tempfork/gliderlabs/ssh/util.go +++ b/tempfork/gliderlabs/ssh/util.go @@ -5,7 +5,7 @@ import ( "crypto/rsa" "encoding/binary" - "golang.org/x/crypto/ssh" + "github.com/tailscale/golang-x-crypto/ssh" ) func generateSigner() (ssh.Signer, error) { diff --git a/tempfork/gliderlabs/ssh/wrap.go b/tempfork/gliderlabs/ssh/wrap.go index d1f2b161e..17867d751 100644 --- a/tempfork/gliderlabs/ssh/wrap.go +++ b/tempfork/gliderlabs/ssh/wrap.go @@ -1,6 +1,6 @@ package ssh -import gossh "golang.org/x/crypto/ssh" +import gossh "github.com/tailscale/golang-x-crypto/ssh" // PublicKey is an abstraction of different types of public keys. type PublicKey interface { From 8bd04bdd3a6ceca64dfd04b49035cc16cbe2b2e1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 29 Jan 2025 20:44:01 +0000 Subject: [PATCH 0424/1708] go.mod: bump gorilla/csrf for security fix (#14822) For https://github.com/gorilla/csrf/commit/9dd6af1f6d30fc79fb0d972394deebdabad6b5eb Update client/web and safeweb to correctly signal to the csrf middleware whether the request is being served over TLS. This determines whether Origin and Referer header checks are strictly enforced. The gorilla library previously did not enforce these checks due to a logic bug based on erroneous use of the net/http.Request API. The patch to fix this also inverts the library behavior to presume that every request is being served over TLS, necessitating these changes. Updates tailscale/corp#25340 Signed-off-by: Patrick O'Doherty Co-authored-by: Patrick O'Doherty --- client/web/web.go | 16 +++++++++++++--- go.mod | 2 +- go.sum | 4 ++-- safeweb/http.go | 6 ++++++ 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 4e4866923..3a7feea40 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -211,15 +211,25 @@ func NewServer(opts ServerOpts) (s *Server, err error) { // The client is secured by limiting the interface it listens on, // or by authenticating requests before they reach the web client. csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false)) + + // signal to the CSRF middleware that the request is being served over + // plaintext HTTP to skip TLS-only header checks. + withSetPlaintext := func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = csrf.PlaintextHTTPRequest(r) + h.ServeHTTP(w, r) + }) + } + switch s.mode { case LoginServerMode: - s.apiHandler = csrfProtect(http.HandlerFunc(s.serveLoginAPI)) + s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI))) metric = "web_login_client_initialization" case ReadOnlyServerMode: - s.apiHandler = csrfProtect(http.HandlerFunc(s.serveLoginAPI)) + s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI))) metric = "web_readonly_client_initialization" case ManageServerMode: - s.apiHandler = csrfProtect(http.HandlerFunc(s.serveAPI)) + s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveAPI))) metric = "web_client_initialization" } diff --git a/go.mod b/go.mod index 8e52a9ab3..6a5080585 100644 --- a/go.mod +++ b/go.mod @@ -265,7 +265,7 @@ require ( github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/goreleaser/chglog v0.5.0 // indirect github.com/goreleaser/fileglob v1.3.0 // indirect - github.com/gorilla/csrf v1.7.2 + github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect diff --git a/go.sum b/go.sum index c1c82ad77..c38c96029 100644 --- a/go.sum +++ b/go.sum @@ -529,8 +529,8 @@ github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+ github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= github.com/goreleaser/nfpm/v2 v2.33.1 h1:EkdAzZyVhAI9JC1vjmjjbmnNzyH1J6Cu4JCsA7YcQuc= github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= diff --git a/safeweb/http.go b/safeweb/http.go index 983ff2fad..143c4dcee 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -318,6 +318,12 @@ func checkHandlerType(apiPattern, browserPattern string) handlerType { } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // if we are not in a secure context, signal to the CSRF middleware that + // TLS-only header checks should be skipped + if !s.Config.SecureContext { + r = csrf.PlaintextHTTPRequest(r) + } + _, bp := s.BrowserMux.Handler(r) _, ap := s.APIMux.Handler(r) switch { From 3f39211f987c4127b447be0c29e2e4ab08176b11 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 30 Jan 2025 10:47:45 +0200 Subject: [PATCH 0425/1708] cmd/k8s-operator: check that cluster traffic is routed to egress ProxyGroup Pod before marking it as ready (#14792) This change builds on top of #14436 to ensure minimum downtime during egress ProxyGroup update rollouts: - adds a readiness gate for ProxyGroup replicas that prevents kubelet from marking the replica Pod as ready before a corresponding readiness condition has been added to the Pod - adds a reconciler that reconciles egress ProxyGroup Pods and, for each that is not ready, if cluster traffic for relevant egress endpoints is routed via this Pod- if so add the readiness condition to allow kubelet to mark the Pod as ready. During the sequenced StatefulSet update rollouts kubelet does not restart a Pod before the previous replica has been updated and marked as ready, so ensuring that a replica is not marked as ready allows to avoid a temporary post-update situation where all replicas have been restarted, but none of the new ones are yet set up as an endpoint for the egress service, so cluster traffic is dropped. Updates tailscale/tailscale#14326 Signed-off-by: Irbe Krumina --- .../deploy/chart/templates/operator-rbac.yaml | 5 +- .../deploy/manifests/operator.yaml | 7 + cmd/k8s-operator/egress-pod-readiness.go | 274 +++++++++ cmd/k8s-operator/egress-pod-readiness_test.go | 525 ++++++++++++++++++ cmd/k8s-operator/egress-services-readiness.go | 15 +- cmd/k8s-operator/operator.go | 70 +++ cmd/k8s-operator/testutils_test.go | 28 + 7 files changed, 916 insertions(+), 8 deletions(-) create mode 100644 cmd/k8s-operator/egress-pod-readiness.go create mode 100644 cmd/k8s-operator/egress-pod-readiness_test.go diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 637bdf793..7056ef42f 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -63,7 +63,10 @@ rules: verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] - apiGroups: [""] resources: ["pods"] - verbs: ["get","list","watch"] + verbs: ["get","list","watch", "update"] +- apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] - apiGroups: ["apps"] resources: ["statefulsets", "deployments"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index def5716f6..e966ef559 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4854,6 +4854,13 @@ rules: - get - list - watch + - update + - apiGroups: + - "" + resources: + - pods/status + verbs: + - update - apiGroups: - apps resources: diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go new file mode 100644 index 000000000..a6c57bf9d --- /dev/null +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -0,0 +1,274 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "errors" + "fmt" + "net/http" + "slices" + "strings" + "sync/atomic" + "time" + + "go.uber.org/zap" + xslices "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/logtail/backoff" + "tailscale.com/tstime" + "tailscale.com/util/httpm" +) + +const tsEgressReadinessGate = "tailscale.com/egress-services" + +// egressPodsReconciler is responsible for setting tailscale.com/egress-services condition on egress ProxyGroup Pods. +// The condition is used as a readiness gate for the Pod, meaning that kubelet will not mark the Pod as ready before the +// condition is set. The ProxyGroup StatefulSet updates are rolled out in such a way that no Pod is restarted, before +// the previous Pod is marked as ready, so ensuring that the Pod does not get marked as ready when it is not yet able to +// route traffic for egress service prevents downtime during restarts caused by no available endpoints left because +// every Pod has been recreated and is not yet added to endpoints. +// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate +type egressPodsReconciler struct { + client.Client + logger *zap.SugaredLogger + tsNamespace string + clock tstime.Clock + httpClient doer // http client that can be set to a mock client in tests + maxBackoff time.Duration // max backoff period between health check calls +} + +// Reconcile reconciles an egress ProxyGroup Pods on changes to those Pods and ProxyGroup EndpointSlices. It ensures +// that for each Pod who is ready to route traffic to all egress services for the ProxyGroup, the Pod has a +// tailscale.com/egress-services condition to set, so that kubelet will mark the Pod as ready. +// +// For the Pod to be ready +// to route traffic to the egress service, the kube proxy needs to have set up the Pod's IP as an endpoint for the +// ClusterIP Service corresponding to the egress service. +// +// Note that the endpoints for the ClusterIP Service are configured by the operator itself using custom +// EndpointSlices(egress-eps-reconciler), so the routing is not blocked on Pod's readiness. +// +// Each egress service has a corresponding ClusterIP Service, that exposes all user configured +// tailnet ports, as well as a health check port for the proxy. +// +// The reconciler calls the health check endpoint of each Service up to N number of times, where N is the number of +// replicas for the ProxyGroup x 3, and checks if the received response is healthy response from the Pod being reconciled. +// +// The health check response contains a header with the +// Pod's IP address- this is used to determine whether the response is received from this Pod. +// +// If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the +// readiness condition for backwards compatibility reasons. +func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + l := er.logger.With("Pod", req.NamespacedName) + l.Debugf("starting reconcile") + defer l.Debugf("reconcile finished") + + pod := new(corev1.Pod) + err = er.Get(ctx, req.NamespacedName, pod) + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err) + } + if !pod.DeletionTimestamp.IsZero() { + l.Debugf("Pod is being deleted, do nothing") + return res, nil + } + if pod.Labels[LabelParentType] != proxyTypeProxyGroup { + l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") + return res, nil + } + + // If the Pod does not have the readiness gate set, there is no need to add the readiness condition. In practice + // this will happen if the user has configured custom TS_LOCAL_ADDR_PORT, thus disabling the graceful failover. + if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool { + return r.ConditionType == tsEgressReadinessGate + }) { + l.Debug("Pod does not have egress readiness gate set, skipping") + return res, nil + } + + proxyGroupName := pod.Labels[LabelParentName] + pg := new(tsapi.ProxyGroup) + if err := er.Get(ctx, types.NamespacedName{Name: proxyGroupName}, pg); err != nil { + return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err) + } + if pg.Spec.Type != typeEgress { + l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) + return res, nil + } + // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. + lbls := map[string]string{ + LabelManaged: "true", + labelProxyGroup: proxyGroupName, + labelSvcType: typeEgress, + } + svcs := &corev1.ServiceList{} + if err := er.List(ctx, svcs, client.InNamespace(er.tsNamespace), client.MatchingLabels(lbls)); err != nil { + return res, fmt.Errorf("error listing ClusterIP Services") + } + + idx := xslices.IndexFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { + return c.Type == tsEgressReadinessGate + }) + if idx != -1 { + l.Debugf("Pod is already ready, do nothing") + return res, nil + } + + var routesMissing atomic.Bool + errChan := make(chan error, len(svcs.Items)) + for _, svc := range svcs.Items { + s := svc + go func() { + ll := l.With("service_name", s.Name) + d := retrieveClusterDomain(er.tsNamespace, ll) + healthCheckAddr := healthCheckForSvc(&s, d) + if healthCheckAddr == "" { + ll.Debugf("ClusterIP Service does not expose a health check endpoint, unable to verify if routing is set up") + errChan <- nil + return + } + + var routesSetup bool + bo := backoff.NewBackoff(s.Name, ll.Infof, er.maxBackoff) + for range numCalls(pgReplicas(pg)) { + if ctx.Err() != nil { + errChan <- nil + return + } + state, err := er.lookupPodRouteViaSvc(ctx, pod, healthCheckAddr, ll) + if err != nil { + errChan <- fmt.Errorf("error validating if routing has been set up for Pod: %w", err) + return + } + if state == healthy || state == cannotVerify { + routesSetup = true + break + } + if state == unreachable || state == unhealthy || state == podNotReady { + bo.BackOff(ctx, errors.New("backoff")) + } + } + if !routesSetup { + ll.Debugf("Pod is not yet configured as Service endpoint") + routesMissing.Store(true) + } + errChan <- nil + }() + } + for range len(svcs.Items) { + e := <-errChan + err = errors.Join(err, e) + } + if err != nil { + return res, fmt.Errorf("error verifying conectivity: %w", err) + } + if rm := routesMissing.Load(); rm { + l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") + return reconcile.Result{RequeueAfter: shortRequeue}, nil + } + if err := er.setPodReady(ctx, pod, l); err != nil { + return res, fmt.Errorf("error setting Pod as ready: %w", err) + } + return res, nil +} + +func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error { + if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { + return c.Type == tsEgressReadinessGate + }) { + return nil + } + l.Infof("Pod is ready to route traffic to all egress targets") + pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ + Type: tsEgressReadinessGate, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: er.clock.Now()}, + }) + return er.Status().Update(ctx, pod) +} + +// healthCheckState is the result of a single request to an egress Service health check endpoint with a goal to hit a +// specific backend Pod. +type healthCheckState int8 + +const ( + cannotVerify healthCheckState = iota // not verifiable for this setup (i.e earlier proxy version) + unreachable // no backends or another network error + notFound // hit another backend + unhealthy // not 200 + podNotReady // Pod is not ready, i.e does not have an IP address yet + healthy // 200 +) + +// lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check. +func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) { + if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { + return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true" + }) { + l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") + return cannotVerify, nil + } + wantsIP, err := podIPv4(pod) + if err != nil { + return -1, fmt.Errorf("error determining Pod's IP address: %w", err) + } + if wantsIP == "" { + return podNotReady, nil + } + + ctx, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + req, err := http.NewRequestWithContext(ctx, httpm.GET, healthCheckAddr, nil) + if err != nil { + return -1, fmt.Errorf("error creating new HTTP request: %w", err) + } + // Do not re-use the same connection for the next request so to maximize the chance of hitting all backends equally. + req.Close = true + resp, err := er.httpClient.Do(req) + if err != nil { + // This is most likely because this is the first Pod and is not yet added to Service endoints. Other + // error types are possible, but checking for those would likely make the system too fragile. + return unreachable, nil + } + defer resp.Body.Close() + gotIP := resp.Header.Get(kubetypes.PodIPv4Header) + if gotIP == "" { + l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") + return cannotVerify, nil + } + if !strings.EqualFold(wantsIP, gotIP) { + return notFound, nil + } + if resp.StatusCode != http.StatusOK { + return unhealthy, nil + } + return healthy, nil +} + +// numCalls return the number of times an endpoint on a ProxyGroup Service should be called till it can be safely +// assumed that, if none of the responses came back from a specific Pod then traffic for the Service is currently not +// being routed to that Pod. This assumes that traffic for the Service is routed via round robin, so +// InternalTrafficPolicy must be 'Cluster' and session affinity must be None. +func numCalls(replicas int32) int32 { + return replicas * 3 +} + +// doer is an interface for HTTP client that can be set to a mock client in tests. +type doer interface { + Do(*http.Request) (*http.Response, error) +} diff --git a/cmd/k8s-operator/egress-pod-readiness_test.go b/cmd/k8s-operator/egress-pod-readiness_test.go new file mode 100644 index 000000000..5e6fa2bb4 --- /dev/null +++ b/cmd/k8s-operator/egress-pod-readiness_test.go @@ -0,0 +1,525 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "net/http" + "sync" + "testing" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/tstest" + "tailscale.com/types/ptr" +) + +func TestEgressPodReadiness(t *testing.T) { + // We need to pass a Pod object to WithStatusSubresource because of some quirks in how the fake client + // works. Without this code we would not be able to update Pod's status further down. + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&corev1.Pod{}). + Build() + zl, _ := zap.NewDevelopment() + cl := tstest.NewClock(tstest.ClockOpts{}) + rec := &egressPodsReconciler{ + tsNamespace: "operator-ns", + Client: fc, + logger: zl.Sugar(), + clock: cl, + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dev", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: "egress", + Replicas: ptr.To(int32(3)), + }, + } + mustCreate(t, fc, pg) + podIP := "10.0.0.2" + podTemplate := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "operator-ns", + Name: "pod", + Labels: map[string]string{ + LabelParentType: "proxygroup", + LabelParentName: "dev", + }, + }, + Spec: corev1.PodSpec{ + ReadinessGates: []corev1.PodReadinessGate{{ + ConditionType: tsEgressReadinessGate, + }}, + Containers: []corev1.Container{{ + Name: "tailscale", + Env: []corev1.EnvVar{{ + Name: "TS_ENABLE_HEALTH_CHECK", + Value: "true", + }}, + }}, + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: podIP}}, + }, + } + + t.Run("no_egress_services", func(t *testing.T) { + pod := podTemplate.DeepCopy() + mustCreate(t, fc, pod) + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod) + }) + t.Run("one_svc_already_routed_to", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + mustCreateAll(t, fc, svc, pod) + resp := readyResps(podIP, 1) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{hep: resp}, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + + // A subsequent reconcile should not change the Pod. + expectReconciled(t, rec, "operator-ns", pod.Name) + expectEqual(t, fc, pod) + + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("one_svc_many_backends_eventually_routed_to", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + mustCreateAll(t, fc, svc, pod) + // For a 3 replica ProxyGroup the healthcheck endpoint should be called 9 times, make the 9th time only + // return with the right Pod IP. + resps := append(readyResps("10.0.0.3", 4), append(readyResps("10.0.0.4", 4), readyResps(podIP, 1)...)...) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{hep: resps}, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("one_svc_one_backend_eventually_healthy", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + mustCreateAll(t, fc, svc, pod) + // For a 3 replica ProxyGroup the healthcheck endpoint should be called 9 times, make the 9th time only + // return with 200 status code. + resps := append(unreadyResps(podIP, 8), readyResps(podIP, 1)...) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{hep: resps}, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("one_svc_one_backend_never_routable", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + mustCreateAll(t, fc, svc, pod) + // For a 3 replica ProxyGroup the healthcheck endpoint should be called 9 times and Pod should be + // requeued if neither of those succeed. + resps := readyResps("10.0.0.3", 9) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{hep: resps}, + } + rec.httpClient = &httpCl + expectRequeue(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("one_svc_many_backends_already_routable", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + svc2, hep2 := newSvc("svc-2", 9002) + svc3, hep3 := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + resps := readyResps(podIP, 1) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + hep2: resps, + hep3: resps, + }, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + t.Run("one_svc_many_backends_eventually_routable_and_healthy", func(t *testing.T) { + pod := podTemplate.DeepCopy() + svc, hep := newSvc("svc", 9002) + svc2, hep2 := newSvc("svc-2", 9002) + svc3, hep3 := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + resps := append(readyResps("10.0.0.3", 7), readyResps(podIP, 1)...) + resps2 := append(readyResps("10.0.0.3", 5), readyResps(podIP, 1)...) + resps3 := append(unreadyResps(podIP, 4), readyResps(podIP, 1)...) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + hep2: resps2, + hep3: resps3, + }, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + t.Run("one_svc_many_backends_never_routable_and_healthy", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + svc2, hep2 := newSvc("svc-2", 9002) + svc3, hep3 := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + // For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried 9 times and the Pod + // will be requeued if neither succeeds. + resps := readyResps("10.0.0.3", 9) + resps2 := append(readyResps("10.0.0.3", 5), readyResps("10.0.0.4", 4)...) + resps3 := unreadyResps(podIP, 9) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + hep2: resps2, + hep3: resps3, + }, + } + rec.httpClient = &httpCl + expectRequeue(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + t.Run("one_svc_many_backends_one_never_routable", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + svc2, hep2 := newSvc("svc-2", 9002) + svc3, hep3 := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + // For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried 9 times and the Pod + // will be requeued if any one never succeeds. + resps := readyResps(podIP, 9) + resps2 := readyResps(podIP, 9) + resps3 := append(readyResps("10.0.0.3", 5), readyResps("10.0.0.4", 4)...) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + hep2: resps2, + hep3: resps3, + }, + } + rec.httpClient = &httpCl + expectRequeue(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + t.Run("one_svc_many_backends_one_never_healthy", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + svc2, hep2 := newSvc("svc-2", 9002) + svc3, hep3 := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + // For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried 9 times and the Pod + // will be requeued if any one never succeeds. + resps := readyResps(podIP, 9) + resps2 := unreadyResps(podIP, 9) + resps3 := readyResps(podIP, 9) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + hep2: resps2, + hep3: resps3, + }, + } + rec.httpClient = &httpCl + expectRequeue(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + t.Run("one_svc_many_backends_different_ports_eventually_healthy_and_routable", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9003) + svc2, hep2 := newSvc("svc-2", 9004) + svc3, hep3 := newSvc("svc-3", 9010) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + // For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried up to 9 times and + // marked as success as soon as one try succeeds. + resps := append(readyResps("10.0.0.3", 7), readyResps(podIP, 1)...) + resps2 := append(readyResps("10.0.0.3", 5), readyResps(podIP, 1)...) + resps3 := append(unreadyResps(podIP, 4), readyResps(podIP, 1)...) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + hep2: resps2, + hep3: resps3, + }, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + // Proxies of 1.78 and earlier did not set the Pod IP header. + t.Run("pod_does_not_return_ip_header", func(t *testing.T) { + pod := podTemplate.DeepCopy() + pod.Name = "foo-bar" + + svc, hep := newSvc("foo-bar", 9002) + mustCreateAll(t, fc, svc, pod) + // If a response does not contain Pod IP header, we assume that this is an earlier proxy version, + // readiness cannot be verified so the readiness gate is just set to true. + resps := unreadyResps("", 1) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + }, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("one_svc_one_backend_eventually_healthy_and_routable", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + svc, hep := newSvc("svc", 9002) + mustCreateAll(t, fc, svc, pod) + // If a response errors, it is probably because the Pod is not yet properly running, so retry. + resps := append(erroredResps(8), readyResps(podIP, 1)...) + httpCl := fakeHTTPClient{ + t: t, + state: map[string][]fakeResponse{ + hep: resps, + }, + } + rec.httpClient = &httpCl + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("one_svc_one_backend_svc_does_not_have_health_port", func(t *testing.T) { + pod := podTemplate.DeepCopy() + + // If a Service does not have health port set, we assume that it is not possible to determine Pod's + // readiness and set it to ready. + svc, _ := newSvc("svc", -1) + mustCreateAll(t, fc, svc, pod) + rec.httpClient = nil + expectReconciled(t, rec, "operator-ns", pod.Name) + + // Pod should have readiness gate condition set. + podSetReady(pod, cl) + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc) + }) + t.Run("error_setting_up_healthcheck", func(t *testing.T) { + pod := podTemplate.DeepCopy() + // This is not a realistic reason for error, but we are just testing the behaviour of a healthcheck + // lookup failing. + pod.Status.PodIPs = []corev1.PodIP{{IP: "not-an-ip"}} + + svc, _ := newSvc("svc", 9002) + svc2, _ := newSvc("svc-2", 9002) + svc3, _ := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + rec.httpClient = nil + expectError(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) + t.Run("pod_does_not_have_an_ip_address", func(t *testing.T) { + pod := podTemplate.DeepCopy() + pod.Status.PodIPs = nil + + svc, _ := newSvc("svc", 9002) + svc2, _ := newSvc("svc-2", 9002) + svc3, _ := newSvc("svc-3", 9002) + mustCreateAll(t, fc, svc, svc2, svc3, pod) + rec.httpClient = nil + expectRequeue(t, rec, "operator-ns", pod.Name) + + // Pod should not have readiness gate condition set. + expectEqual(t, fc, pod) + mustDeleteAll(t, fc, pod, svc, svc2, svc3) + }) +} + +func readyResps(ip string, num int) (resps []fakeResponse) { + for range num { + resps = append(resps, fakeResponse{statusCode: 200, podIP: ip}) + } + return resps +} + +func unreadyResps(ip string, num int) (resps []fakeResponse) { + for range num { + resps = append(resps, fakeResponse{statusCode: 503, podIP: ip}) + } + return resps +} + +func erroredResps(num int) (resps []fakeResponse) { + for range num { + resps = append(resps, fakeResponse{err: errors.New("timeout")}) + } + return resps +} + +func newSvc(name string, port int32) (*corev1.Service, string) { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "operator-ns", + Name: name, + Labels: map[string]string{ + LabelManaged: "true", + labelProxyGroup: "dev", + labelSvcType: typeEgress, + }, + }, + Spec: corev1.ServiceSpec{}, + } + if port != -1 { + svc.Spec.Ports = []corev1.ServicePort{ + { + Name: tsHealthCheckPortName, + Port: port, + TargetPort: intstr.FromInt(9002), + Protocol: "TCP", + }, + } + } + return svc, fmt.Sprintf("http://%s.operator-ns.svc.cluster.local:%d/healthz", name, port) +} + +func podSetReady(pod *corev1.Pod, cl *tstest.Clock) { + pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ + Type: tsEgressReadinessGate, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }) +} + +// fakeHTTPClient is a mock HTTP client with a preset map of request URLs to list of responses. When it receives a +// request for a specific URL, it returns the preset response for that URL. It errors if an unexpected request is +// received. +type fakeHTTPClient struct { + t *testing.T + mu sync.Mutex // protects following + state map[string][]fakeResponse +} + +func (f *fakeHTTPClient) Do(req *http.Request) (*http.Response, error) { + f.mu.Lock() + resps := f.state[req.URL.String()] + if len(resps) == 0 { + f.mu.Unlock() + log.Printf("\n\n\nURL %q\n\n\n", req.URL) + f.t.Fatalf("fakeHTTPClient received an unexpected request for %q", req.URL) + } + defer func() { + if len(resps) == 1 { + delete(f.state, req.URL.String()) + f.mu.Unlock() + return + } + f.state[req.URL.String()] = f.state[req.URL.String()][1:] + f.mu.Unlock() + }() + + resp := resps[0] + if resp.err != nil { + return nil, resp.err + } + r := http.Response{ + StatusCode: resp.statusCode, + Header: make(http.Header), + Body: io.NopCloser(bytes.NewReader([]byte{})), + } + r.Header.Add(kubetypes.PodIPv4Header, resp.podIP) + return &r, nil +} + +type fakeResponse struct { + err error + statusCode int + podIP string // for the Pod IP header +} diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index f1964d452..5e95a5279 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -48,11 +48,12 @@ type egressSvcsReadinessReconciler struct { // service to determine how many replicas are currently able to route traffic. func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { l := esrr.logger.With("Service", req.NamespacedName) - defer l.Info("reconcile finished") + l.Debugf("starting reconcile") + defer l.Debugf("reconcile finished") svc := new(corev1.Service) if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Info("Service not found") + l.Debugf("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -127,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if pod == nil { - l.Infof("[unexpected] ProxyGroup is ready, but replica %d was not found", i) + l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady return res, nil } - l.Infof("looking at Pod with IPs %v", pod.Status.PodIPs) + l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) ready := false for _, ep := range eps.Endpoints { - l.Infof("looking at endpoint with addresses %v", ep.Addresses) + l.Debugf("looking at endpoint with addresses %v", ep.Addresses) if endpointReadyForPod(&ep, pod, l) { - l.Infof("endpoint is ready for Pod") + l.Debugf("endpoint is ready for Pod") ready = true break } @@ -165,7 +166,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { podIP, err := podIPv4(pod) if err != nil { - l.Infof("[unexpected] error retrieving Pod's IPv4 address: %v", err) + l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) return false } // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 6631c4f98..8fa979094 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -9,6 +9,7 @@ package main import ( "context" + "net/http" "os" "regexp" "strconv" @@ -453,6 +454,24 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err) } + podsForEps := handler.EnqueueRequestsFromMapFunc(podsFromEgressEps(mgr.GetClient(), opts.log, opts.tailscaleNamespace)) + podsER := handler.EnqueueRequestsFromMapFunc(egressPodsHandler) + err = builder. + ControllerManagedBy(mgr). + Named("egress-pods-readiness-reconciler"). + Watches(&discoveryv1.EndpointSlice{}, podsForEps). + Watches(&corev1.Pod{}, podsER). + Complete(&egressPodsReconciler{ + Client: mgr.GetClient(), + tsNamespace: opts.tailscaleNamespace, + clock: tstime.DefaultClock{}, + logger: opts.log.Named("egress-pods-readiness-reconciler"), + httpClient: http.DefaultClient, + }) + if err != nil { + startlog.Fatalf("could not create egress Pods readiness reconciler: %v", err) + } + // ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that // define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get // reconciled if the CRD is applied at a later point. @@ -906,6 +925,20 @@ func egressEpsHandler(_ context.Context, o client.Object) []reconcile.Request { } } +func egressPodsHandler(_ context.Context, o client.Object) []reconcile.Request { + if typ := o.GetLabels()[LabelParentType]; typ != proxyTypeProxyGroup { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: o.GetNamespace(), + Name: o.GetName(), + }, + }, + } +} + // egressEpsFromEgressPods returns a Pod event handler that checks if Pod is a replica for a ProxyGroup and if it is, // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { @@ -1056,6 +1089,43 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns } } +func podsFromEgressEps(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + eps, ok := o.(*discoveryv1.EndpointSlice) + if !ok { + logger.Infof("[unexpected] EndpointSlice handler triggered for an object that is not a EndpointSlice") + return nil + } + if eps.Labels[labelProxyGroup] == "" { + return nil + } + if eps.Labels[labelSvcType] != "egress" { + return nil + } + podLabels := map[string]string{ + LabelManaged: "true", + LabelParentType: "proxygroup", + LabelParentName: eps.Labels[labelProxyGroup], + } + podList := &corev1.PodList{} + if err := cl.List(ctx, podList, client.InNamespace(ns), + client.MatchingLabels(podLabels)); err != nil { + logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on EndpointSlice %s", err, eps.Name) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, pod := range podList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: pod.Namespace, + Name: pod.Name, + }, + }) + } + return reqs + } +} + // proxyClassesWithServiceMonitor returns an event handler that, given that the event is for the Prometheus // ServiceMonitor CRD, returns all ProxyClasses that define that a ServiceMonitor should be created. func proxyClassesWithServiceMonitor(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 160f24ec9..83c42cb76 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -583,6 +583,21 @@ func mustCreate(t *testing.T, client client.Client, obj client.Object) { t.Fatalf("creating %q: %v", obj.GetName(), err) } } +func mustCreateAll(t *testing.T, client client.Client, objs ...client.Object) { + t.Helper() + for _, obj := range objs { + mustCreate(t, client, obj) + } +} + +func mustDeleteAll(t *testing.T, client client.Client, objs ...client.Object) { + t.Helper() + for _, obj := range objs { + if err := client.Delete(context.Background(), obj); err != nil { + t.Fatalf("deleting %q: %v", obj.GetName(), err) + } + } +} func mustUpdate[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string, update func(O)) { t.Helper() @@ -706,6 +721,19 @@ func expectRequeue(t *testing.T, sr reconcile.Reconciler, ns, name string) { t.Fatalf("expected timed requeue, got success") } } +func expectError(t *testing.T, sr reconcile.Reconciler, ns, name string) { + t.Helper() + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: ns, + }, + } + _, err := sr.Reconcile(context.Background(), req) + if err == nil { + t.Error("Reconcile: expected error but did not get one") + } +} // expectEvents accepts a test recorder and a list of events, tests that expected // events are sent down the recorder's channel. Waits for 5s for each event. From ed8bb3b56438a10e9f7bf4b3bb20e0c8ebf6dffb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 30 Jan 2025 07:22:52 +0000 Subject: [PATCH 0426/1708] control/controlclient: add missing word in comment Found by review.ai. Updates #cleanup Change-Id: Ib9126de7327527b8b3818d92cc774bb1c7b6f974 Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index a5397594e..92db9382e 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -625,7 +625,7 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM // We CAS here because the caller goroutine is // doing a Store which we want to want to win // a race. This is only a memory optimization - // and is for correctness: + // and is not for correctness: c.lastStatus.CompareAndSwap(newSt, nil) }) } From 0ed4aa028fbd3f0cd4a5a2d86a962f354ae954e4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 30 Jan 2025 07:23:36 +0000 Subject: [PATCH 0427/1708] control/controlclient: flesh out a recently added comment Updates tailscale/corp#26058 Change-Id: Ib46161fbb2e79c080f886083665961f02cbf5949 --- control/controlclient/auto.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 92db9382e..da123f8c4 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -620,12 +620,17 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM } } c.observer.SetControlClientStatus(c, *newSt) - // Best effort stop retaining the memory now that - // we've sent it to the observer (LocalBackend). - // We CAS here because the caller goroutine is - // doing a Store which we want to want to win - // a race. This is only a memory optimization - // and is not for correctness: + + // Best effort stop retaining the memory now that we've sent it to the + // observer (LocalBackend). We CAS here because the caller goroutine is + // doing a Store which we want to win a race. This is only a memory + // optimization and is not for correctness. + // + // If the CAS fails, that means somebody else's Store replaced our + // pointer (so mission accomplished: our netmap is no longer retained in + // any case) and that Store caller will be responsible for removing + // their own netmap (or losing their race too, down the chain). + // Eventually the last caller will win this CAS and zero lastStatus. c.lastStatus.CompareAndSwap(newSt, nil) }) } From a49af98b3167d325bc9c0d43e61d6dc6c494c544 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 30 Jan 2025 13:36:33 +0200 Subject: [PATCH 0428/1708] cmd/k8s-operator: temporarily disable HA Ingress controller (#14833) The HA Ingress functionality is not actually doing anything valuable yet, so don't run the controller in 1.80 release yet. Updates tailscale/tailscale#24795 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxygroups.yaml | 2 +- .../deploy/manifests/operator.yaml | 2 +- cmd/k8s-operator/operator.go | 58 ------------------- k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 2 +- 5 files changed, 4 insertions(+), 62 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 86e74e441..e101c201f 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -103,7 +103,7 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Currently the only supported type is egress. Type is immutable once a ProxyGroup is created. type: string enum: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index e966ef559..54b32bef0 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2860,7 +2860,7 @@ spec: type: array type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Currently the only supported type is egress. Type is immutable once a ProxyGroup is created. enum: - egress diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 8fa979094..8fcd1342c 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -331,28 +331,6 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } - lc, err := opts.tsServer.LocalClient() - if err != nil { - startlog.Fatalf("could not get local client: %v", err) - } - err = builder. - ControllerManagedBy(mgr). - For(&networkingv1.Ingress{}). - Named("ingress-pg-reconciler"). - Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). - Complete(&IngressPGReconciler{ - recorder: eventRecorder, - tsClient: opts.tsClient, - tsnetServer: opts.tsServer, - defaultTags: strings.Split(opts.proxyTags, ","), - Client: mgr.GetClient(), - logger: opts.log.Named("ingress-pg-reconciler"), - lc: lc, - tsNamespace: opts.tailscaleNamespace, - }) - if err != nil { - startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) - } connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector")) // If a ProxyClassChanges, enqueue all Connectors that have @@ -1178,42 +1156,6 @@ func indexEgressServices(o client.Object) []string { return []string{o.GetAnnotations()[AnnotationProxyGroup]} } -// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service -// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, -// the associated Ingress gets reconciled. -func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return func(ctx context.Context, o client.Object) []reconcile.Request { - ingList := networkingv1.IngressList{} - if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { - logger.Debugf("error listing Ingresses: %v", err) - return nil - } - reqs := make([]reconcile.Request, 0) - for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { - continue - } - if !hasProxyGroupAnnotation(&ing) { - continue - } - if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) - } - for _, rule := range ing.Spec.Rules { - if rule.HTTP == nil { - continue - } - for _, path := range rule.HTTP.Paths { - if path.Backend.Service != nil && path.Backend.Service.Name == o.GetName() { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) - } - } - } - } - return reqs - } -} - func hasProxyGroupAnnotation(obj client.Object) bool { ing := obj.(*networkingv1.Ingress) return ing.Annotations[AnnotationProxyGroup] != "" diff --git a/k8s-operator/api.md b/k8s-operator/api.md index fae25b1f6..64756c8f1 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -599,7 +599,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | Minimum: 0
| | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index f95fc58d0..cb9f678f8 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -48,7 +48,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Supported types are egress and ingress. + // Type of the ProxyGroup proxies. Currently the only supported type is egress. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` From c2af1cd9e347abbe7fa7ef52ca21df3230abbfe1 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Wed, 29 Jan 2025 15:35:37 +0000 Subject: [PATCH 0429/1708] prober: support multiple probes running concurrently Some probes might need to run for longer than their scheduling interval, so this change relaxes the 1-at-a-time restriction, allowing us to configure probe concurrency and timeout separately. The default values remain the same (concurrency of 1; timeout of 80% of interval). Updates tailscale/corp#25479 Signed-off-by: Anton Tolchanov --- prober/prober.go | 48 ++++++++++++++++++++--------- prober/prober_test.go | 72 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 105 insertions(+), 15 deletions(-) diff --git a/prober/prober.go b/prober/prober.go index e3860e7b9..d80db773a 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -7,6 +7,7 @@ package prober import ( + "cmp" "container/ring" "context" "encoding/json" @@ -20,6 +21,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "tailscale.com/syncs" "tailscale.com/tsweb" ) @@ -44,6 +46,14 @@ type ProbeClass struct { // exposed by this probe class. Labels Labels + // Timeout is the maximum time the probe function is allowed to run before + // its context is cancelled. Defaults to 80% of the scheduling interval. + Timeout time.Duration + + // Concurrency is the maximum number of concurrent probe executions + // allowed for this probe class. Defaults to 1. + Concurrency int + // Metrics allows a probe class to export custom Metrics. Can be nil. Metrics func(prometheus.Labels) []prometheus.Metric } @@ -131,9 +141,12 @@ func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Label cancel: cancel, stopped: make(chan struct{}), + runSema: syncs.NewSemaphore(cmp.Or(pc.Concurrency, 1)), + name: name, probeClass: pc, interval: interval, + timeout: cmp.Or(pc.Timeout, time.Duration(float64(interval)*0.8)), initialDelay: initialDelay(name, interval), successHist: ring.New(recentHistSize), latencyHist: ring.New(recentHistSize), @@ -226,11 +239,12 @@ type Probe struct { ctx context.Context cancel context.CancelFunc // run to initiate shutdown stopped chan struct{} // closed when shutdown is complete - runMu sync.Mutex // ensures only one probe runs at a time + runSema syncs.Semaphore // restricts concurrency per probe name string probeClass ProbeClass interval time.Duration + timeout time.Duration initialDelay time.Duration tick ticker @@ -282,17 +296,15 @@ func (p *Probe) loop() { t := p.prober.newTicker(p.initialDelay) select { case <-t.Chan(): - p.run() case <-p.ctx.Done(): t.Stop() return } t.Stop() - } else { - p.run() } if p.prober.once { + p.run() return } @@ -315,9 +327,12 @@ func (p *Probe) loop() { p.tick = p.prober.newTicker(p.interval) defer p.tick.Stop() for { + // Run the probe in a new goroutine every tick. Default concurrency & timeout + // settings will ensure that only one probe is running at a time. + go p.run() + select { case <-p.tick.Chan(): - p.run() case <-p.ctx.Done(): return } @@ -331,8 +346,13 @@ func (p *Probe) loop() { // that the probe either succeeds or fails before the next cycle is scheduled to // start. func (p *Probe) run() (pi ProbeInfo, err error) { - p.runMu.Lock() - defer p.runMu.Unlock() + // Probes are scheduled each p.interval, so we don't wait longer than that. + semaCtx, cancel := context.WithTimeout(p.ctx, p.interval) + defer cancel() + if !p.runSema.AcquireContext(semaCtx) { + return pi, fmt.Errorf("probe %s: context cancelled", p.name) + } + defer p.runSema.Release() p.recordStart() defer func() { @@ -344,19 +364,21 @@ func (p *Probe) run() (pi ProbeInfo, err error) { if r := recover(); r != nil { log.Printf("probe %s panicked: %v", p.name, r) err = fmt.Errorf("panic: %v", r) - p.recordEnd(err) + p.recordEndLocked(err) } }() ctx := p.ctx if !p.IsContinuous() { - timeout := time.Duration(float64(p.interval) * 0.8) var cancel func() - ctx, cancel = context.WithTimeout(ctx, timeout) + ctx, cancel = context.WithTimeout(ctx, p.timeout) defer cancel() } err = p.probeClass.Probe(ctx) - p.recordEnd(err) + + p.mu.Lock() + defer p.mu.Unlock() + p.recordEndLocked(err) if err != nil { log.Printf("probe %s: %v", p.name, err) } @@ -370,10 +392,8 @@ func (p *Probe) recordStart() { p.mu.Unlock() } -func (p *Probe) recordEnd(err error) { +func (p *Probe) recordEndLocked(err error) { end := p.prober.now() - p.mu.Lock() - defer p.mu.Unlock() p.end = end p.succeeded = err == nil p.lastErr = err diff --git a/prober/prober_test.go b/prober/prober_test.go index 3905bfbc9..109953b65 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -149,6 +149,74 @@ func TestProberTimingSpread(t *testing.T) { notCalled() } +func TestProberTimeout(t *testing.T) { + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker) + + var done sync.WaitGroup + done.Add(1) + pfunc := FuncProbe(func(ctx context.Context) error { + defer done.Done() + select { + case <-ctx.Done(): + return ctx.Err() + } + }) + pfunc.Timeout = time.Microsecond + probe := p.Run("foo", 30*time.Second, nil, pfunc) + waitActiveProbes(t, p, clk, 1) + done.Wait() + probe.mu.Lock() + info := probe.probeInfoLocked() + probe.mu.Unlock() + wantInfo := ProbeInfo{ + Name: "foo", + Interval: 30 * time.Second, + Labels: map[string]string{"class": "", "name": "foo"}, + Status: ProbeStatusFailed, + Error: "context deadline exceeded", + RecentResults: []bool{false}, + RecentLatencies: nil, + } + if diff := cmp.Diff(wantInfo, info, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Latency")); diff != "" { + t.Fatalf("unexpected ProbeInfo (-want +got):\n%s", diff) + } + if got := info.Latency; got > time.Second { + t.Errorf("info.Latency = %v, want at most 1s", got) + } +} + +func TestProberConcurrency(t *testing.T) { + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker) + + var ran atomic.Int64 + stopProbe := make(chan struct{}) + pfunc := FuncProbe(func(ctx context.Context) error { + ran.Add(1) + <-stopProbe + return nil + }) + pfunc.Timeout = time.Hour + pfunc.Concurrency = 3 + p.Run("foo", time.Second, nil, pfunc) + waitActiveProbes(t, p, clk, 1) + + for range 50 { + clk.Advance(time.Second) + } + + if err := tstest.WaitFor(convergenceTimeout, func() error { + if got, want := ran.Load(), int64(3); got != want { + return fmt.Errorf("expected %d probes to run concurrently, got %d", want, got) + } + return nil + }); err != nil { + t.Fatal(err) + } + close(stopProbe) +} + func TestProberRun(t *testing.T) { clk := newFakeTime() p := newForTest(clk.Now, clk.NewTicker) @@ -450,9 +518,11 @@ func TestProbeInfoRecent(t *testing.T) { for _, r := range tt.results { probe.recordStart() clk.Advance(r.latency) - probe.recordEnd(r.err) + probe.recordEndLocked(r.err) } + probe.mu.Lock() info := probe.probeInfoLocked() + probe.mu.Unlock() if diff := cmp.Diff(tt.wantProbeInfo, info, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Interval")); diff != "" { t.Fatalf("unexpected ProbeInfo (-want +got):\n%s", diff) } From 138a83efe11659220643f5975b28c4a030fefe9a Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 30 Jan 2025 13:51:10 +0000 Subject: [PATCH 0430/1708] cmd/containerboot: wait for consistent state on shutdown (#14263) tailscaled's ipn package writes a collection of keys to state after authenticating to control, but one at a time. If containerboot happens to send a SIGTERM signal to tailscaled in the middle of writing those keys, it may shut down with an inconsistent state Secret and never recover. While we can't durably fix this with our current single-use auth keys (no atomic operation to auth + write state), we can reduce the window for this race condition by checking for partial state before sending SIGTERM to tailscaled. Best effort only. Updates #14080 Change-Id: I0532d51b6f0b7d391e538468bd6a0a80dbe1d9f7 Signed-off-by: Tom Proctor --- cmd/containerboot/kube.go | 66 +++++++++++++++ cmd/containerboot/kube_test.go | 33 ++++++++ cmd/containerboot/main.go | 124 ++++++++++++++++++---------- cmd/containerboot/main_test.go | 142 +++++++++++++++++++++++++------- cmd/containerboot/tailscaled.go | 4 +- 5 files changed, 294 insertions(+), 75 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 4d00687ee..0a2dfa1bf 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -8,15 +8,22 @@ package main import ( "context" "encoding/json" + "errors" "fmt" + "log" "net/http" "net/netip" "os" + "strings" + "time" + "tailscale.com/ipn" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" + "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" + "tailscale.com/types/logger" ) // kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use @@ -126,3 +133,62 @@ func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error { } return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") } + +// waitForConsistentState waits for tailscaled to finish writing state if it +// looks like it's started. It is designed to reduce the likelihood that +// tailscaled gets shut down in the window between authenticating to control +// and finishing writing state. However, it's not bullet proof because we can't +// atomically authenticate and write state. +func (kc *kubeClient) waitForConsistentState(ctx context.Context) error { + var logged bool + + bo := backoff.NewBackoff("", logger.Discard, 2*time.Second) + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + secret, err := kc.GetSecret(ctx, kc.stateSecret) + if ctx.Err() != nil || kubeclient.IsNotFoundErr(err) { + return nil + } + if err != nil { + return fmt.Errorf("getting Secret %q: %v", kc.stateSecret, err) + } + + if hasConsistentState(secret.Data) { + return nil + } + + if !logged { + log.Printf("Waiting for tailscaled to finish writing state to Secret %q", kc.stateSecret) + logged = true + } + bo.BackOff(ctx, errors.New("")) // Fake error to trigger actual sleep. + } +} + +// hasConsistentState returns true is there is either no state or the full set +// of expected keys are present. +func hasConsistentState(d map[string][]byte) bool { + var ( + _, hasCurrent = d[string(ipn.CurrentProfileStateKey)] + _, hasKnown = d[string(ipn.KnownProfilesStateKey)] + _, hasMachine = d[string(ipn.MachineKeyStateKey)] + hasProfile bool + ) + + for k := range d { + if strings.HasPrefix(k, "profile-") { + if hasProfile { + return false // We only expect one profile. + } + hasProfile = true + } + } + + // Approximate check, we don't want to reimplement all of profileManager. + return (hasCurrent && hasKnown && hasMachine && hasProfile) || + (!hasCurrent && !hasKnown && !hasMachine && !hasProfile) +} diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index 2ba69af7c..413971bc6 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -9,8 +9,10 @@ import ( "context" "errors" "testing" + "time" "github.com/google/go-cmp/cmp" + "tailscale.com/ipn" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" ) @@ -205,3 +207,34 @@ func TestSetupKube(t *testing.T) { }) } } + +func TestWaitForConsistentState(t *testing.T) { + data := map[string][]byte{ + // Missing _current-profile. + string(ipn.KnownProfilesStateKey): []byte(""), + string(ipn.MachineKeyStateKey): []byte(""), + "profile-foo": []byte(""), + } + kc := &kubeClient{ + Client: &kubeclient.FakeClient{ + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{ + Data: data, + }, nil + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if err := kc.waitForConsistentState(ctx); err != context.DeadlineExceeded { + t.Fatalf("expected DeadlineExceeded, got %v", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + defer cancel() + data[string(ipn.CurrentProfileStateKey)] = []byte("") + if err := kc.waitForConsistentState(ctx); err != nil { + t.Fatalf("expected nil, got %v", err) + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 0aca27f5f..cf4bd8620 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -137,53 +137,83 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) { } func main() { + if err := run(); err != nil && !errors.Is(err, context.Canceled) { + log.Fatal(err) + } +} + +func run() error { log.SetPrefix("boot: ") tailscale.I_Acknowledge_This_API_Is_Unstable = true cfg, err := configFromEnv() if err != nil { - log.Fatalf("invalid configuration: %v", err) + return fmt.Errorf("invalid configuration: %w", err) } if !cfg.UserspaceMode { if err := ensureTunFile(cfg.Root); err != nil { - log.Fatalf("Unable to create tuntap device file: %v", err) + return fmt.Errorf("unable to create tuntap device file: %w", err) } if cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.Routes != nil || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" { if err := ensureIPForwarding(cfg.Root, cfg.ProxyTargetIP, cfg.TailnetTargetIP, cfg.TailnetTargetFQDN, cfg.Routes); err != nil { log.Printf("Failed to enable IP forwarding: %v", err) log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.") if cfg.InKubernetes { - log.Fatalf("You can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.") + return fmt.Errorf("you can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.") } else { - log.Fatalf("You can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.") + return fmt.Errorf("you can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.") } } } } - // Context is used for all setup stuff until we're in steady + // Root context for the whole containerboot process, used to make sure + // shutdown signals are promptly and cleanly handled. + ctx, cancel := contextWithExitSignalWatch() + defer cancel() + + // bootCtx is used for all setup stuff until we're in steady // state, so that if something is hanging we eventually time out // and crashloop the container. - bootCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + bootCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() var kc *kubeClient if cfg.InKubernetes { kc, err = newKubeClient(cfg.Root, cfg.KubeSecret) if err != nil { - log.Fatalf("error initializing kube client: %v", err) + return fmt.Errorf("error initializing kube client: %w", err) } if err := cfg.setupKube(bootCtx, kc); err != nil { - log.Fatalf("error setting up for running on Kubernetes: %v", err) + return fmt.Errorf("error setting up for running on Kubernetes: %w", err) } } client, daemonProcess, err := startTailscaled(bootCtx, cfg) if err != nil { - log.Fatalf("failed to bring up tailscale: %v", err) + return fmt.Errorf("failed to bring up tailscale: %w", err) } killTailscaled := func() { + if hasKubeStateStore(cfg) { + // Check we're not shutting tailscaled down while it's still writing + // state. If we authenticate and fail to write all the state, we'll + // never recover automatically. + // + // The default termination grace period for a Pod is 30s. We wait 25s at + // most so that we still reserve some of that budget for tailscaled + // to receive and react to a SIGTERM before the SIGKILL that k8s + // will send at the end of the grace period. + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) + defer cancel() + + log.Printf("Checking for consistent state") + err := kc.waitForConsistentState(ctx) + if err != nil { + log.Printf("Error waiting for consistent state on shutdown: %v", err) + } + } + log.Printf("Sending SIGTERM to tailscaled") if err := daemonProcess.Signal(unix.SIGTERM); err != nil { log.Fatalf("error shutting tailscaled down: %v", err) } @@ -231,7 +261,7 @@ func main() { w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState) if err != nil { - log.Fatalf("failed to watch tailscaled for updates: %v", err) + return fmt.Errorf("failed to watch tailscaled for updates: %w", err) } // Now that we've started tailscaled, we can symlink the socket to the @@ -267,18 +297,18 @@ func main() { didLogin = true w.Close() if err := tailscaleUp(bootCtx, cfg); err != nil { - return fmt.Errorf("failed to auth tailscale: %v", err) + return fmt.Errorf("failed to auth tailscale: %w", err) } w, err = client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) if err != nil { - return fmt.Errorf("rewatching tailscaled for updates after auth: %v", err) + return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err) } return nil } if isTwoStepConfigAlwaysAuth(cfg) { if err := authTailscale(); err != nil { - log.Fatalf("failed to auth tailscale: %v", err) + return fmt.Errorf("failed to auth tailscale: %w", err) } } @@ -286,7 +316,7 @@ authLoop: for { n, err := w.Next() if err != nil { - log.Fatalf("failed to read from tailscaled: %v", err) + return fmt.Errorf("failed to read from tailscaled: %w", err) } if n.State != nil { @@ -295,10 +325,10 @@ authLoop: if isOneStepConfig(cfg) { // This could happen if this is the first time tailscaled was run for this // device and the auth key was not passed via the configfile. - log.Fatalf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.") + return fmt.Errorf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.") } if err := authTailscale(); err != nil { - log.Fatalf("failed to auth tailscale: %v", err) + return fmt.Errorf("failed to auth tailscale: %w", err) } case ipn.NeedsMachineAuth: log.Printf("machine authorization required, please visit the admin panel") @@ -318,14 +348,11 @@ authLoop: w.Close() - ctx, cancel := contextWithExitSignalWatch() - defer cancel() - if isTwoStepConfigAuthOnce(cfg) { // Now that we are authenticated, we can set/reset any of the // settings that we need to. if err := tailscaleSet(ctx, cfg); err != nil { - log.Fatalf("failed to auth tailscale: %v", err) + return fmt.Errorf("failed to auth tailscale: %w", err) } } @@ -334,11 +361,11 @@ authLoop: if cfg.ServeConfigPath != "" { log.Printf("serve proxy: unsetting previous config") if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { - log.Fatalf("failed to unset serve config: %v", err) + return fmt.Errorf("failed to unset serve config: %w", err) } if hasKubeStateStore(cfg) { if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { - log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err) + return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err) } } } @@ -349,19 +376,19 @@ authLoop: // wipe it, but it's good hygiene. log.Printf("Deleting authkey from kube secret") if err := kc.deleteAuthKey(ctx); err != nil { - log.Fatalf("deleting authkey from kube secret: %v", err) + return fmt.Errorf("deleting authkey from kube secret: %w", err) } } if hasKubeStateStore(cfg) { if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil { - log.Fatalf("storing capability version and UID: %v", err) + return fmt.Errorf("storing capability version and UID: %w", err) } } w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) if err != nil { - log.Fatalf("rewatching tailscaled for updates after auth: %v", err) + return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err) } // If tailscaled config was read from a mounted file, watch the file for updates and reload. @@ -391,7 +418,7 @@ authLoop: if isL3Proxy(cfg) { nfr, err = newNetfilterRunner(log.Printf) if err != nil { - log.Fatalf("error creating new netfilter runner: %v", err) + return fmt.Errorf("error creating new netfilter runner: %w", err) } } @@ -462,9 +489,9 @@ runLoop: killTailscaled() break runLoop case err := <-errChan: - log.Fatalf("failed to read from tailscaled: %v", err) + return fmt.Errorf("failed to read from tailscaled: %w", err) case err := <-cfgWatchErrChan: - log.Fatalf("failed to watch tailscaled config: %v", err) + return fmt.Errorf("failed to watch tailscaled config: %w", err) case n := <-notifyChan: if n.State != nil && *n.State != ipn.Running { // Something's gone wrong and we've left the authenticated state. @@ -472,7 +499,7 @@ runLoop: // control flow required to make it work now is hard. So, just crash // the container and rely on the container runtime to restart us, // whereupon we'll go through initial auth again. - log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State) + return fmt.Errorf("tailscaled left running state (now in state %q), exiting", *n.State) } if n.NetMap != nil { addrs = n.NetMap.SelfNode.Addresses().AsSlice() @@ -490,7 +517,7 @@ runLoop: deviceID := n.NetMap.SelfNode.StableID() if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceID, &deviceID) { if err := kc.storeDeviceID(ctx, n.NetMap.SelfNode.StableID()); err != nil { - log.Fatalf("storing device ID in Kubernetes Secret: %v", err) + return fmt.Errorf("storing device ID in Kubernetes Secret: %w", err) } } if cfg.TailnetTargetFQDN != "" { @@ -527,12 +554,12 @@ runLoop: rulesInstalled = true log.Printf("Installing forwarding rules for destination %v", ea.String()) if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil { - log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err) + return fmt.Errorf("installing egress proxy rules for destination %s: %v", ea.String(), err) } } } if !rulesInstalled { - log.Fatalf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT()) + return fmt.Errorf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT()) } } currentEgressIPs = newCurentEgressIPs @@ -540,7 +567,7 @@ runLoop: if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged { log.Printf("Installing proxy rules") if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil { - log.Fatalf("installing ingress proxy rules: %v", err) + return fmt.Errorf("installing ingress proxy rules: %w", err) } } if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged { @@ -556,7 +583,7 @@ runLoop: if backendsHaveChanged { log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs) if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil { - log.Fatalf("error installing ingress proxy rules: %v", err) + return fmt.Errorf("error installing ingress proxy rules: %w", err) } } resetTimer(false) @@ -578,7 +605,7 @@ runLoop: if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 { log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP) if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil { - log.Fatalf("installing egress proxy rules: %v", err) + return fmt.Errorf("installing egress proxy rules: %w", err) } } // If this is a L7 cluster ingress proxy (set up @@ -590,7 +617,7 @@ runLoop: if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 { log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP) if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil { - log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err) + return fmt.Errorf("installing rules to forward traffic to node's tailnet IP: %w", err) } } currentIPs = newCurrentIPs @@ -609,7 +636,7 @@ runLoop: deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()} if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceEndpoints, &deviceEndpoints) { if err := kc.storeDeviceEndpoints(ctx, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil { - log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err) + return fmt.Errorf("storing device IPs and FQDN in Kubernetes Secret: %w", err) } } @@ -700,16 +727,18 @@ runLoop: if backendsHaveChanged && len(addrs) != 0 { log.Printf("Backend address change detected, installing proxy rules for backends %v", newBackendAddrs) if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil { - log.Fatalf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err) + return fmt.Errorf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err) } } backendAddrs = newBackendAddrs resetTimer(false) case e := <-egressSvcsErrorChan: - log.Fatalf("egress proxy failed: %v", e) + return fmt.Errorf("egress proxy failed: %v", e) } } wg.Wait() + + return nil } // ensureTunFile checks that /dev/net/tun exists, creating it if @@ -738,13 +767,13 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) { ip4s, err := net.DefaultResolver.LookupIP(ctx, "ip4", name) if err != nil { if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) { - return nil, fmt.Errorf("error looking up IPv4 addresses: %v", err) + return nil, fmt.Errorf("error looking up IPv4 addresses: %w", err) } } ip6s, err := net.DefaultResolver.LookupIP(ctx, "ip6", name) if err != nil { if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) { - return nil, fmt.Errorf("error looking up IPv6 addresses: %v", err) + return nil, fmt.Errorf("error looking up IPv6 addresses: %w", err) } } if len(ip4s) == 0 && len(ip6s) == 0 { @@ -757,7 +786,7 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) { // context that gets cancelled when a signal is received and a cancel function // that can be called to free the resources when the watch should be stopped. func contextWithExitSignalWatch() (context.Context, func()) { - closeChan := make(chan string) + closeChan := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) @@ -769,8 +798,11 @@ func contextWithExitSignalWatch() (context.Context, func()) { return } }() + closeOnce := sync.Once{} f := func() { - closeChan <- "goodbye" + closeOnce.Do(func() { + close(closeChan) + }) } return ctx, f } @@ -823,7 +855,11 @@ func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) { go func() { if err := srv.Serve(ln); err != nil { - log.Fatalf("failed running server: %v", err) + if err != http.ErrServerClosed { + log.Fatalf("failed running server: %v", err) + } else { + log.Printf("HTTP server at %s closed", addr) + } } }() diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index c8066f2c1..bc158dac5 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" "sync" + "syscall" "testing" "time" @@ -50,9 +51,7 @@ func TestContainerBoot(t *testing.T) { defer lapi.Close() kube := kubeServer{FSRoot: d} - if err := kube.Start(); err != nil { - t.Fatal(err) - } + kube.Start(t) defer kube.Close() tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} @@ -138,15 +137,29 @@ func TestContainerBoot(t *testing.T) { // WantCmds is the commands that containerboot should run in this phase. WantCmds []string + // WantKubeSecret is the secret keys/values that should exist in the // kube secret. WantKubeSecret map[string]string + + // Update the kube secret with these keys/values at the beginning of the + // phase (simulates our fake tailscaled doing it). + UpdateKubeSecret map[string]string + // WantFiles files that should exist in the container and their // contents. WantFiles map[string]string - // WantFatalLog is the fatal log message we expect from containerboot. - // If set for a phase, the test will finish on that phase. - WantFatalLog string + + // WantLog is a log message we expect from containerboot. + WantLog string + + // If set for a phase, the test will expect containerboot to exit with + // this error code, and the test will finish on that phase without + // waiting for the successful startup log message. + WantExitCode *int + + // The signal to send to containerboot at the start of the phase. + Signal *syscall.Signal EndpointStatuses map[string]int } @@ -434,7 +447,8 @@ func TestContainerBoot(t *testing.T) { }, }, }, - WantFatalLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false", + WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false", + WantExitCode: ptr.To(1), }, }, }, @@ -936,7 +950,64 @@ func TestContainerBoot(t *testing.T) { }, Phases: []phase{ { - WantFatalLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", + WantLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", + WantExitCode: ptr.To(1), + }, + }, + }, + { + Name: "kube_shutdown_during_state_write", + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, + "TS_ENABLE_HEALTH_CHECK": "true", + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + Phases: []phase{ + { + // Normal startup. + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + }, + { + // SIGTERM before state is finished writing, should wait for + // consistent state before propagating SIGTERM to tailscaled. + Signal: ptr.To(unix.SIGTERM), + UpdateKubeSecret: map[string]string{ + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + // Missing "_current-profile" key. + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + }, + WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"", + }, + { + // tailscaled has finished writing state, should propagate SIGTERM. + UpdateKubeSecret: map[string]string{ + "_current-profile": "foo", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + "_current-profile": "foo", + }, + WantLog: "HTTP server at [::]:9002 closed", + WantExitCode: ptr.To(0), }, }, }, @@ -984,26 +1055,36 @@ func TestContainerBoot(t *testing.T) { var wantCmds []string for i, p := range test.Phases { + for k, v := range p.UpdateKubeSecret { + kube.SetSecret(k, v) + } lapi.Notify(p.Notify) - if p.WantFatalLog != "" { + if p.Signal != nil { + cmd.Process.Signal(*p.Signal) + } + if p.WantLog != "" { err := tstest.WaitFor(2*time.Second, func() error { - state, err := cmd.Process.Wait() - if err != nil { - return err - } - if state.ExitCode() != 1 { - return fmt.Errorf("process exited with code %d but wanted %d", state.ExitCode(), 1) - } - waitLogLine(t, time.Second, cbOut, p.WantFatalLog) + waitLogLine(t, time.Second, cbOut, p.WantLog) return nil }) if err != nil { t.Fatal(err) } + } + + if p.WantExitCode != nil { + state, err := cmd.Process.Wait() + if err != nil { + t.Fatal(err) + } + if state.ExitCode() != *p.WantExitCode { + t.Fatalf("phase %d: want exit code %d, got %d", i, *p.WantExitCode, state.ExitCode()) + } // Early test return, we don't expect the successful startup log message. return } + wantCmds = append(wantCmds, p.WantCmds...) waitArgs(t, 2*time.Second, d, argFile, strings.Join(wantCmds, "\n")) err := tstest.WaitFor(2*time.Second, func() error { @@ -1059,6 +1140,9 @@ func TestContainerBoot(t *testing.T) { } } waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal") + if cmd.ProcessState != nil { + t.Fatalf("containerboot should be running but exited with exit code %d", cmd.ProcessState.ExitCode()) + } }) } } @@ -1290,18 +1374,18 @@ func (k *kubeServer) Reset() { k.secret = map[string]string{} } -func (k *kubeServer) Start() error { +func (k *kubeServer) Start(t *testing.T) { root := filepath.Join(k.FSRoot, "var/run/secrets/kubernetes.io/serviceaccount") if err := os.MkdirAll(root, 0700); err != nil { - return err + t.Fatal(err) } if err := os.WriteFile(filepath.Join(root, "namespace"), []byte("default"), 0600); err != nil { - return err + t.Fatal(err) } if err := os.WriteFile(filepath.Join(root, "token"), []byte("bearer_token"), 0600); err != nil { - return err + t.Fatal(err) } k.srv = httptest.NewTLSServer(k) @@ -1310,13 +1394,11 @@ func (k *kubeServer) Start() error { var cert bytes.Buffer if err := pem.Encode(&cert, &pem.Block{Type: "CERTIFICATE", Bytes: k.srv.Certificate().Raw}); err != nil { - return err + t.Fatal(err) } if err := os.WriteFile(filepath.Join(root, "ca.crt"), cert.Bytes(), 0600); err != nil { - return err + t.Fatal(err) } - - return nil } func (k *kubeServer) Close() { @@ -1365,6 +1447,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { http.Error(w, fmt.Sprintf("reading request body: %v", err), http.StatusInternalServerError) return } + defer r.Body.Close() switch r.Method { case "GET": @@ -1397,12 +1480,13 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } for _, op := range req { - if op.Op == "remove" { + switch op.Op { + case "remove": if !strings.HasPrefix(op.Path, "/data/") { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } delete(k.secret, strings.TrimPrefix(op.Path, "/data/")) - } else if op.Op == "replace" { + case "replace": path, ok := strings.CutPrefix(op.Path, "/data/") if !ok { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) @@ -1419,7 +1503,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { } k.secret[path] = val } - } else { + default: panic(fmt.Sprintf("unsupported json-patch op %q", op.Op)) } } @@ -1437,7 +1521,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("unknown content type %q", r.Header.Get("Content-Type"))) } default: - panic(fmt.Sprintf("unhandled HTTP method %q", r.Method)) + panic(fmt.Sprintf("unhandled HTTP request %s %s", r.Method, r.URL)) } } diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index fc2092477..1ff068b97 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -42,14 +42,14 @@ func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient log.Printf("Waiting for tailscaled socket") for { if ctx.Err() != nil { - log.Fatalf("Timed out waiting for tailscaled socket") + return nil, nil, errors.New("timed out waiting for tailscaled socket") } _, err := os.Stat(cfg.Socket) if errors.Is(err, fs.ErrNotExist) { time.Sleep(100 * time.Millisecond) continue } else if err != nil { - log.Fatalf("Waiting for tailscaled socket: %v", err) + return nil, nil, fmt.Errorf("error waiting for tailscaled socket: %w", err) } break } From 08dd4994d09cf4b960663321136b4fde3f5ccb18 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Thu, 30 Jan 2025 13:04:29 -0800 Subject: [PATCH 0431/1708] VERSION.txt: this is v1.81.0 (#14838) Signed-off-by: Andrea Gottardo --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index b3a8c61e6..dbd41264a 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.79.0 +1.81.0 From 8ee72cd33c10fd549570f2b9d2611d1b1da781f0 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Thu, 30 Jan 2025 14:21:32 -0800 Subject: [PATCH 0432/1708] cli/funnel: fix comment typo (#14840) Updates #cleanup Signed-off-by: Andrea Gottardo --- cmd/tailscale/cli/funnel.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/funnel.go b/cmd/tailscale/cli/funnel.go index a95f9e270..f4a1c6bfd 100644 --- a/cmd/tailscale/cli/funnel.go +++ b/cmd/tailscale/cli/funnel.go @@ -19,7 +19,7 @@ import ( var funnelCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true - // change is limited to make a revert easier and full cleanup to come after the relase. + // change is limited to make a revert easier and full cleanup to come after the release. // TODO(tylersmalley): cleanup and removal of newFunnelCommand as of 2023-10-16 return newServeV2Command(se, funnel) } From 7d5fe13d27126b96d81b0c654bd552a19db8a312 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 30 Jan 2025 08:46:21 +0000 Subject: [PATCH 0433/1708] types/views: make SliceEqualAnyOrder also do short slice optimization SliceEqualAnyOrderFunc had an optimization missing from SliceEqualAnyOrder. Now they share the same code and both have the optimization. Updates #14593 Change-Id: I550726e0964fc4006e77bb44addc67be989c131c Signed-off-by: Brad Fitzpatrick --- types/views/views.go | 148 ++++++++++++++++++++++---------------- types/views/views_test.go | 77 ++++++++++++++++++++ 2 files changed, 164 insertions(+), 61 deletions(-) diff --git a/types/views/views.go b/types/views/views.go index ae776c3b2..3911f1112 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -330,6 +330,12 @@ func SliceEqual[T comparable](a, b Slice[T]) bool { return slices.Equal(a.ж, b.ж) } +// shortOOOLen (short Out-of-Order length) is the slice length at or +// under which we attempt to compare two slices quadratically rather +// than allocating memory for a map in SliceEqualAnyOrder and +// SliceEqualAnyOrderFunc. +const shortOOOLen = 5 + // SliceEqualAnyOrder reports whether a and b contain the same elements, regardless of order. // The underlying slices for a and b can be nil. func SliceEqualAnyOrder[T comparable](a, b Slice[T]) bool { @@ -347,18 +353,15 @@ func SliceEqualAnyOrder[T comparable](a, b Slice[T]) bool { return true } - // count the occurrences of remaining values and compare - valueCount := make(map[T]int) - for i, n := diffStart, a.Len(); i < n; i++ { - valueCount[a.At(i)]++ - valueCount[b.At(i)]-- - } - for _, count := range valueCount { - if count != 0 { - return false - } + a, b = a.SliceFrom(diffStart), b.SliceFrom(diffStart) + cmp := func(v T) T { return v } + + // For a small number of items, avoid the allocation of a map and just + // do the quadratic thing. + if a.Len() <= shortOOOLen { + return unorderedSliceEqualAnyOrderSmall(a, b, cmp) } - return true + return unorderedSliceEqualAnyOrder(a, b, cmp) } // SliceEqualAnyOrderFunc reports whether a and b contain the same elements, @@ -382,66 +385,89 @@ func SliceEqualAnyOrderFunc[T any, V comparable](a, b Slice[T], cmp func(T) V) b return true } + a, b = a.SliceFrom(diffStart), b.SliceFrom(diffStart) // For a small number of items, avoid the allocation of a map and just - // do the quadratic thing. We can also only check the items between - // diffStart and the end. - nRemain := a.Len() - diffStart - const shortOptLen = 5 - if nRemain <= shortOptLen { - // These track which elements in a and b have been matched, so - // that we don't treat arrays with differing number of - // duplicate elements as equal (e.g. [1, 1, 2] and [1, 2, 2]). - var aMatched, bMatched [shortOptLen]bool - - // Compare each element in a to each element in b - for i := range nRemain { - av := cmp(a.At(i + diffStart)) - found := false - for j := range nRemain { - // Skip elements in b that have already been - // used to match an item in a. - if bMatched[j] { - continue - } - - bv := cmp(b.At(j + diffStart)) - if av == bv { - // Mark these elements as already - // matched, so that a future loop - // iteration (of a duplicate element) - // doesn't match it again. - aMatched[i] = true - bMatched[j] = true - found = true - break - } - } - if !found { - return false - } + // do the quadratic thing. + if a.Len() <= shortOOOLen { + return unorderedSliceEqualAnyOrderSmall(a, b, cmp) + } + return unorderedSliceEqualAnyOrder(a, b, cmp) +} + +// unorderedSliceEqualAnyOrder reports whether a and b contain the same elements +// using a map. The cmp function maps from a T slice element to a comparable +// value. +func unorderedSliceEqualAnyOrder[T any, V comparable](a, b Slice[T], cmp func(T) V) bool { + if a.Len() != b.Len() { + panic("internal error") + } + if a.Len() == 0 { + return true + } + m := make(map[V]int) + for i := range a.Len() { + m[cmp(a.At(i))]++ + m[cmp(b.At(i))]-- + } + for _, count := range m { + if count != 0 { + return false } + } + return true +} - // Verify all elements were matched exactly once. - for i := range nRemain { - if !aMatched[i] || !bMatched[i] { - return false +// unorderedSliceEqualAnyOrderSmall reports whether a and b (which must be the +// same length, and shortOOOLen or shorter) contain the same elements (using cmp +// to map from T to a comparable value) in some order. +// +// This is the quadratic-time implementation for small slices that doesn't +// allocate. +func unorderedSliceEqualAnyOrderSmall[T any, V comparable](a, b Slice[T], cmp func(T) V) bool { + if a.Len() != b.Len() || a.Len() > shortOOOLen { + panic("internal error") + } + + // These track which elements in a and b have been matched, so + // that we don't treat arrays with differing number of + // duplicate elements as equal (e.g. [1, 1, 2] and [1, 2, 2]). + var aMatched, bMatched [shortOOOLen]bool + + // Compare each element in a to each element in b + for i := range a.Len() { + av := cmp(a.At(i)) + found := false + for j := range a.Len() { + // Skip elements in b that have already been + // used to match an item in a. + if bMatched[j] { + continue } - } - return true + bv := cmp(b.At(j)) + if av == bv { + // Mark these elements as already + // matched, so that a future loop + // iteration (of a duplicate element) + // doesn't match it again. + aMatched[i] = true + bMatched[j] = true + found = true + break + } + } + if !found { + return false + } } - // count the occurrences of remaining values and compare - valueCount := make(map[V]int) - for i, n := diffStart, a.Len(); i < n; i++ { - valueCount[cmp(a.At(i))]++ - valueCount[cmp(b.At(i))]-- - } - for _, count := range valueCount { - if count != 0 { + // Verify all elements were matched exactly once. + for i := range a.Len() { + if !aMatched[i] || !bMatched[i] { return false } } + return true } diff --git a/types/views/views_test.go b/types/views/views_test.go index 7837a89d6..2205cbc03 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -231,6 +231,83 @@ func TestSliceEqualAnyOrderFunc(t *testing.T) { } } +func TestSliceEqualAnyOrderAllocs(t *testing.T) { + ss := func(s ...string) Slice[string] { return SliceOf(s) } + cmp := func(s string) string { return s } + + t.Run("no-allocs-short-unordered", func(t *testing.T) { + // No allocations for short comparisons + short1 := ss("a", "b", "c") + short2 := ss("c", "b", "a") + if n := testing.AllocsPerRun(1000, func() { + if !SliceEqualAnyOrder(short1, short2) { + t.Fatal("not equal") + } + if !SliceEqualAnyOrderFunc(short1, short2, cmp) { + t.Fatal("not equal") + } + }); n > 0 { + t.Fatalf("allocs = %v; want 0", n) + } + }) + + t.Run("no-allocs-long-match", func(t *testing.T) { + long1 := ss("a", "b", "c", "d", "e", "f", "g", "h", "i", "j") + long2 := ss("a", "b", "c", "d", "e", "f", "g", "h", "i", "j") + + if n := testing.AllocsPerRun(1000, func() { + if !SliceEqualAnyOrder(long1, long2) { + t.Fatal("not equal") + } + if !SliceEqualAnyOrderFunc(long1, long2, cmp) { + t.Fatal("not equal") + } + }); n > 0 { + t.Fatalf("allocs = %v; want 0", n) + } + }) + + t.Run("allocs-long-unordered", func(t *testing.T) { + // We do unfortunately allocate for long comparisons. + long1 := ss("a", "b", "c", "d", "e", "f", "g", "h", "i", "j") + long2 := ss("c", "b", "a", "e", "d", "f", "g", "h", "i", "j") + + if n := testing.AllocsPerRun(1000, func() { + if !SliceEqualAnyOrder(long1, long2) { + t.Fatal("not equal") + } + if !SliceEqualAnyOrderFunc(long1, long2, cmp) { + t.Fatal("not equal") + } + }); n == 0 { + t.Fatalf("unexpectedly didn't allocate") + } + }) +} + +func BenchmarkSliceEqualAnyOrder(b *testing.B) { + b.Run("short", func(b *testing.B) { + b.ReportAllocs() + s1 := SliceOf([]string{"foo", "bar"}) + s2 := SliceOf([]string{"bar", "foo"}) + for range b.N { + if !SliceEqualAnyOrder(s1, s2) { + b.Fatal() + } + } + }) + b.Run("long", func(b *testing.B) { + b.ReportAllocs() + s1 := SliceOf([]string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}) + s2 := SliceOf([]string{"c", "b", "a", "e", "d", "f", "g", "h", "i", "j"}) + for range b.N { + if !SliceEqualAnyOrder(s1, s2) { + b.Fatal() + } + } + }) +} + func TestSliceEqual(t *testing.T) { a := SliceOf([]string{"foo", "bar"}) b := SliceOf([]string{"foo", "bar"}) From 4e7f4086b2b7ded76e43bdb4ad12d9dd253edba4 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 30 Jan 2025 11:24:25 -0600 Subject: [PATCH 0434/1708] ipn: generate LoginProfileView and use it instead of *LoginProfile where appropriate Conventionally, we use views (e.g., ipn.PrefsView, tailcfg.NodeView, etc.) when dealing with structs that shouldn't be mutated. However, ipn.LoginProfile has been an exception so far, with a mix of passing and returning LoginProfile by reference (allowing accidental mutations) and by value (which is wasteful, given its current size of 192 bytes). In this PR, we generate an ipn.LoginProfileView and use it instead of passing/returning LoginProfiles by mutable reference or copying them when passing/returning by value. Now, LoginProfiles can only be mutated by (*profileManager).setProfilePrefs. Updates #14823 Signed-off-by: Nick Khyl --- ipn/doc.go | 2 +- ipn/ipn_clone.go | 23 ++++ ipn/ipn_view.go | 68 +++++++++- ipn/ipnlocal/local.go | 30 ++--- ipn/ipnlocal/local_test.go | 6 +- ipn/ipnlocal/network-lock.go | 4 +- ipn/ipnlocal/network-lock_test.go | 16 +-- ipn/ipnlocal/profiles.go | 204 ++++++++++++++++-------------- ipn/ipnlocal/profiles_test.go | 44 +++---- ipn/ipnlocal/serve.go | 2 +- ipn/ipnlocal/serve_test.go | 2 +- ipn/localapi/localapi.go | 4 +- 12 files changed, 254 insertions(+), 151 deletions(-) diff --git a/ipn/doc.go b/ipn/doc.go index 9a0bbb800..c98c7e8b3 100644 --- a/ipn/doc.go +++ b/ipn/doc.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/viewer -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig +//go:generate go run tailscale.com/cmd/viewer -type=LoginProfile,Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig // Package ipn implements the interactions between the Tailscale cloud // control plane and the local network stack. diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 47cca71d0..4050fec46 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -17,6 +17,29 @@ import ( "tailscale.com/types/ptr" ) +// Clone makes a deep copy of LoginProfile. +// The result aliases no memory with the original. +func (src *LoginProfile) Clone() *LoginProfile { + if src == nil { + return nil + } + dst := new(LoginProfile) + *dst = *src + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _LoginProfileCloneNeedsRegeneration = LoginProfile(struct { + ID ProfileID + Name string + NetworkProfile NetworkProfile + Key StateKey + UserProfile tailcfg.UserProfile + NodeID tailcfg.StableNodeID + LocalUserID WindowsUserID + ControlURL string +}{}) + // Clone makes a deep copy of Prefs. // The result aliases no memory with the original. func (src *Prefs) Clone() *Prefs { diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 41b4ddbc8..e633a2633 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -18,7 +18,73 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=LoginProfile,Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig + +// View returns a read-only view of LoginProfile. +func (p *LoginProfile) View() LoginProfileView { + return LoginProfileView{ж: p} +} + +// LoginProfileView provides a read-only view over LoginProfile. +// +// Its methods should only be called if `Valid()` returns true. +type LoginProfileView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *LoginProfile +} + +// Valid reports whether v's underlying value is non-nil. +func (v LoginProfileView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v LoginProfileView) AsStruct() *LoginProfile { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v LoginProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *LoginProfileView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x LoginProfile + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v LoginProfileView) ID() ProfileID { return v.ж.ID } +func (v LoginProfileView) Name() string { return v.ж.Name } +func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile } +func (v LoginProfileView) Key() StateKey { return v.ж.Key } +func (v LoginProfileView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID } +func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _LoginProfileViewNeedsRegeneration = LoginProfile(struct { + ID ProfileID + Name string + NetworkProfile NetworkProfile + Key StateKey + UserProfile tailcfg.UserProfile + NodeID tailcfg.StableNodeID + LocalUserID WindowsUserID + ControlURL string +}{}) // View returns a read-only view of Prefs. func (p *Prefs) View() PrefsView { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a6e3f1952..5766365b1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4045,7 +4045,7 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { // No profile with that name exists. That's fine. return nil } - if id != b.pm.CurrentProfile().ID { + if id != b.pm.CurrentProfile().ID() { // Name is already in use by another profile. return fmt.Errorf("profile name %q already in use", p.ProfileName) } @@ -4127,7 +4127,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } prefs := newp.View() - np := b.pm.CurrentProfile().NetworkProfile + np := b.pm.CurrentProfile().NetworkProfile() if netMap != nil { np = ipn.NetworkProfile{ MagicDNSName: b.netMap.MagicDNSSuffix(), @@ -5663,7 +5663,7 @@ func (b *LocalBackend) Logout(ctx context.Context) error { unlock = b.lockAndGetUnlock() defer unlock() - if err := b.pm.DeleteProfile(profile.ID); err != nil { + if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } @@ -6039,7 +6039,7 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { // the method to only run the reset-logic and not reload the store from memory to ensure // foreground sessions are not removed if they are not saved on disk. func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if b.netMap == nil || !b.netMap.SelfNode.Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID == "" { + if b.netMap == nil || !b.netMap.SelfNode.Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { // We're not logged in, so we don't have a profile. // Don't try to load the serve config. b.lastServeConfJSON = mem.B(nil) @@ -6047,7 +6047,7 @@ func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { return } - confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID) + confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) // TODO(maisem,bradfitz): prevent reading the config from disk // if the profile has not changed. confj, err := b.store.ReadState(confKey) @@ -7000,7 +7000,7 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - if b.CurrentProfile().ID == profile { + if b.CurrentProfile().ID() == profile { return nil } unlock := b.lockAndGetUnlock() @@ -7023,12 +7023,12 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { func (b *LocalBackend) initTKALocked() error { cp := b.pm.CurrentProfile() - if cp.ID == "" { + if cp.ID() == "" { b.tka = nil return nil } if b.tka != nil { - if b.tka.profile == cp.ID { + if b.tka.profile == cp.ID() { // Already initialized. return nil } @@ -7058,7 +7058,7 @@ func (b *LocalBackend) initTKALocked() error { } b.tka = &tkaState{ - profile: cp.ID, + profile: cp.ID(), authority: authority, storage: storage, } @@ -7111,7 +7111,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { unlock := b.lockAndGetUnlock() defer unlock() - needToRestart := b.pm.CurrentProfile().ID == p + needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { if err == errProfileNotFound { return nil @@ -7126,7 +7126,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { // CurrentProfile returns the current LoginProfile. // The value may be zero if the profile is not persisted. -func (b *LocalBackend) CurrentProfile() ipn.LoginProfile { +func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { b.mu.Lock() defer b.mu.Unlock() return b.pm.CurrentProfile() @@ -7147,7 +7147,7 @@ func (b *LocalBackend) NewProfile() error { } // ListProfiles returns a list of all LoginProfiles. -func (b *LocalBackend) ListProfiles() []ipn.LoginProfile { +func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { b.mu.Lock() defer b.mu.Unlock() return b.pm.Profiles() @@ -7353,7 +7353,7 @@ func (b *LocalBackend) UnadvertiseRoute(toRemove ...netip.Prefix) error { // namespace a key with the profile manager's current profile key, if any func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.StateKey { - return pm.CurrentProfile().Key + "||" + key + return pm.CurrentProfile().Key() + "||" + key } const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" @@ -7361,7 +7361,7 @@ const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { b.mu.Lock() defer b.mu.Unlock() - if b.pm.CurrentProfile().ID == "" { + if b.pm.CurrentProfile().ID() == "" { return nil } key := namespaceKeyForCurrentProfile(b.pm, routeInfoStateStoreKey) @@ -7373,7 +7373,7 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { } func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { - if b.pm.CurrentProfile().ID == "" { + if b.pm.CurrentProfile().ID() == "" { return &appc.RouteInfo{}, nil } key := namespaceKeyForCurrentProfile(b.pm, routeInfoStateStoreKey) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index de9ebf9fb..3455cab1f 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4087,9 +4087,9 @@ func TestReadWriteRouteInfo(t *testing.T) { b := newTestBackend(t) prof1 := ipn.LoginProfile{ID: "id1", Key: "key1"} prof2 := ipn.LoginProfile{ID: "id2", Key: "key2"} - b.pm.knownProfiles["id1"] = &prof1 - b.pm.knownProfiles["id2"] = &prof2 - b.pm.currentProfile = &prof1 + b.pm.knownProfiles["id1"] = prof1.View() + b.pm.knownProfiles["id2"] = prof2.View() + b.pm.currentProfile = prof1.View() // set up routeInfo ri1 := &appc.RouteInfo{} diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index bf14d339e..e1583dab7 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -407,7 +407,7 @@ func (b *LocalBackend) tkaApplyDisablementLocked(secret []byte) error { // // b.mu must be held. func (b *LocalBackend) chonkPathLocked() string { - return filepath.Join(b.TailscaleVarRoot(), "tka-profiles", string(b.pm.CurrentProfile().ID)) + return filepath.Join(b.TailscaleVarRoot(), "tka-profiles", string(b.pm.CurrentProfile().ID())) } // tkaBootstrapFromGenesisLocked initializes the local (on-disk) state of the @@ -455,7 +455,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per } b.tka = &tkaState{ - profile: b.pm.CurrentProfile().ID, + profile: b.pm.CurrentProfile().ID(), authority: authority, storage: chonk, } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 4b79136c8..838f16cb9 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -202,7 +202,7 @@ func TestTKADisablementFlow(t *testing.T) { }).View(), ipn.NetworkProfile{})) temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) chonk, err := tka.ChonkDir(tkaPath) if err != nil { @@ -410,7 +410,7 @@ func TestTKASync(t *testing.T) { } temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) // Setup the TKA authority on the node. nodeStorage, err := tka.ChonkDir(tkaPath) @@ -710,7 +710,7 @@ func TestTKADisable(t *testing.T) { }).View(), ipn.NetworkProfile{})) temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} chonk, err := tka.ChonkDir(tkaPath) @@ -770,7 +770,7 @@ func TestTKADisable(t *testing.T) { ccAuto: cc, logf: t.Logf, tka: &tkaState{ - profile: pm.CurrentProfile().ID, + profile: pm.CurrentProfile().ID(), authority: authority, storage: chonk, }, @@ -805,7 +805,7 @@ func TestTKASign(t *testing.T) { key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) chonk, err := tka.ChonkDir(tkaPath) if err != nil { @@ -890,7 +890,7 @@ func TestTKAForceDisable(t *testing.T) { }).View(), ipn.NetworkProfile{})) temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) chonk, err := tka.ChonkDir(tkaPath) if err != nil { @@ -989,7 +989,7 @@ func TestTKAAffectedSigs(t *testing.T) { tkaKey := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) chonk, err := tka.ChonkDir(tkaPath) if err != nil { @@ -1124,7 +1124,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { compromisedKey := tka.Key{Kind: tka.Key25519, Public: compromisedPriv.Public().Verifier(), Votes: 1} temp := t.TempDir() - tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID)) + tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) os.Mkdir(tkaPath, 0755) chonk, err := tka.ChonkDir(tkaPath) if err != nil { diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index b13f921d6..858623025 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -35,9 +35,9 @@ type profileManager struct { health *health.Tracker currentUserID ipn.WindowsUserID - knownProfiles map[ipn.ProfileID]*ipn.LoginProfile // always non-nil - currentProfile *ipn.LoginProfile // always non-nil - prefs ipn.PrefsView // always Valid. + knownProfiles map[ipn.ProfileID]ipn.LoginProfileView // always non-nil + currentProfile ipn.LoginProfileView // always Valid. + prefs ipn.PrefsView // always Valid. } func (pm *profileManager) dlogf(format string, args ...any) { @@ -89,7 +89,7 @@ func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.Profil pm.dlogf("DefaultUserProfileID: windows: migrating from legacy preferences") profile, err := pm.migrateFromLegacyPrefs(uid, false) if err == nil { - return profile.ID + return profile.ID() } pm.logf("failed to migrate from legacy preferences: %v", err) } @@ -98,17 +98,17 @@ func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.Profil pk := ipn.StateKey(string(b)) prof := pm.findProfileByKey(pk) - if prof == nil { + if !prof.Valid() { pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk) return "" } - return prof.ID + return prof.ID() } // checkProfileAccess returns an [errProfileAccessDenied] if the current user // does not have access to the specified profile. -func (pm *profileManager) checkProfileAccess(profile *ipn.LoginProfile) error { - if pm.currentUserID != "" && profile.LocalUserID != pm.currentUserID { +func (pm *profileManager) checkProfileAccess(profile ipn.LoginProfileView) error { + if pm.currentUserID != "" && profile.LocalUserID() != pm.currentUserID { return errProfileAccessDenied } return nil @@ -116,21 +116,21 @@ func (pm *profileManager) checkProfileAccess(profile *ipn.LoginProfile) error { // allProfiles returns all profiles accessible to the current user. // The returned profiles are sorted by Name. -func (pm *profileManager) allProfiles() (out []*ipn.LoginProfile) { +func (pm *profileManager) allProfiles() (out []ipn.LoginProfileView) { for _, p := range pm.knownProfiles { if pm.checkProfileAccess(p) == nil { out = append(out, p) } } - slices.SortFunc(out, func(a, b *ipn.LoginProfile) int { - return cmp.Compare(a.Name, b.Name) + slices.SortFunc(out, func(a, b ipn.LoginProfileView) int { + return cmp.Compare(a.Name(), b.Name()) }) return out } // matchingProfiles is like [profileManager.allProfiles], but returns only profiles // matching the given predicate. -func (pm *profileManager) matchingProfiles(f func(*ipn.LoginProfile) bool) (out []*ipn.LoginProfile) { +func (pm *profileManager) matchingProfiles(f func(ipn.LoginProfileView) bool) (out []ipn.LoginProfileView) { all := pm.allProfiles() out = all[:0] for _, p := range all { @@ -144,11 +144,11 @@ func (pm *profileManager) matchingProfiles(f func(*ipn.LoginProfile) bool) (out // findMatchingProfiles returns all profiles accessible to the current user // that represent the same node/user as prefs. // The returned profiles are sorted by Name. -func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []*ipn.LoginProfile { - return pm.matchingProfiles(func(p *ipn.LoginProfile) bool { - return p.ControlURL == prefs.ControlURL() && - (p.UserProfile.ID == prefs.Persist().UserProfile().ID || - p.NodeID == prefs.Persist().NodeID()) +func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []ipn.LoginProfileView { + return pm.matchingProfiles(func(p ipn.LoginProfileView) bool { + return p.ControlURL() == prefs.ControlURL() && + (p.UserProfile().ID == prefs.Persist().UserProfile().ID || + p.NodeID() == prefs.Persist().NodeID()) }) } @@ -157,18 +157,18 @@ func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []*ipn.Login // accessible to the current user. func (pm *profileManager) ProfileIDForName(name string) ipn.ProfileID { p := pm.findProfileByName(name) - if p == nil { + if !p.Valid() { return "" } - return p.ID + return p.ID() } -func (pm *profileManager) findProfileByName(name string) *ipn.LoginProfile { - out := pm.matchingProfiles(func(p *ipn.LoginProfile) bool { - return p.Name == name +func (pm *profileManager) findProfileByName(name string) ipn.LoginProfileView { + out := pm.matchingProfiles(func(p ipn.LoginProfileView) bool { + return p.Name() == name }) if len(out) == 0 { - return nil + return ipn.LoginProfileView{} } if len(out) > 1 { pm.logf("[unexpected] multiple profiles with the same name") @@ -176,12 +176,12 @@ func (pm *profileManager) findProfileByName(name string) *ipn.LoginProfile { return out[0] } -func (pm *profileManager) findProfileByKey(key ipn.StateKey) *ipn.LoginProfile { - out := pm.matchingProfiles(func(p *ipn.LoginProfile) bool { - return p.Key == key +func (pm *profileManager) findProfileByKey(key ipn.StateKey) ipn.LoginProfileView { + out := pm.matchingProfiles(func(p ipn.LoginProfileView) bool { + return p.Key() == key }) if len(out) == 0 { - return nil + return ipn.LoginProfileView{} } if len(out) > 1 { pm.logf("[unexpected] multiple profiles with the same key") @@ -194,8 +194,8 @@ func (pm *profileManager) setUnattendedModeAsConfigured() error { return nil } - if pm.currentProfile.Key != "" && pm.prefs.ForceDaemon() { - return pm.WriteState(ipn.ServerModeStartKey, []byte(pm.currentProfile.Key)) + if pm.currentProfile.Key() != "" && pm.prefs.ForceDaemon() { + return pm.WriteState(ipn.ServerModeStartKey, []byte(pm.currentProfile.Key())) } else { return pm.WriteState(ipn.ServerModeStartKey, nil) } @@ -229,29 +229,36 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) existing = existing[1:] for _, p := range existing { // Clear the state. - if err := pm.store.WriteState(p.Key, nil); err != nil { + if err := pm.store.WriteState(p.Key(), nil); err != nil { // We couldn't delete the state, so keep the profile around. continue } // Remove the profile, knownProfiles will be persisted // in [profileManager.setProfilePrefs] below. - delete(pm.knownProfiles, p.ID) + delete(pm.knownProfiles, p.ID()) } } pm.currentProfile = cp - if err := pm.SetProfilePrefs(cp, prefsIn, np); err != nil { + cp, err := pm.setProfilePrefs(nil, prefsIn, np) + if err != nil { return err } return pm.setProfileAsUserDefault(cp) } -// SetProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile] -// which is not necessarily the [profileManager.CurrentProfile]. It returns an [errProfileAccessDenied] -// if the specified profile is not accessible by the current user. -func (pm *profileManager) SetProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.PrefsView, np ipn.NetworkProfile) error { - if err := pm.checkProfileAccess(lp); err != nil { - return err +// setProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile], +// returning a read-only view of the updated profile on success. If the specified profile is nil, +// it defaults to the current profile. If the profile is not accessible by the current user, +// the method returns an [errProfileAccessDenied]. +func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.PrefsView, np ipn.NetworkProfile) (ipn.LoginProfileView, error) { + isCurrentProfile := lp == nil || (lp.ID != "" && lp.ID == pm.currentProfile.ID()) + if isCurrentProfile { + lp = pm.CurrentProfile().AsStruct() + } + + if err := pm.checkProfileAccess(lp.View()); err != nil { + return ipn.LoginProfileView{}, err } // An empty profile.ID indicates that the profile is new, the node info wasn't available, @@ -291,23 +298,29 @@ func (pm *profileManager) SetProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref lp.UserProfile = up lp.NetworkProfile = np + // Update the current profile view to reflect the changes + // if the specified profile is the current profile. + if isCurrentProfile { + pm.currentProfile = lp.View() + } + // An empty profile.ID indicates that the node info is not available yet, // and the profile doesn't need to be saved on disk. if lp.ID != "" { - pm.knownProfiles[lp.ID] = lp + pm.knownProfiles[lp.ID] = lp.View() if err := pm.writeKnownProfiles(); err != nil { - return err + return ipn.LoginProfileView{}, err } // Clone prefsIn and create a read-only view as a safety measure to // prevent accidental preference mutations, both externally and internally. - if err := pm.setProfilePrefsNoPermCheck(lp, prefsIn.AsStruct().View()); err != nil { - return err + if err := pm.setProfilePrefsNoPermCheck(lp.View(), prefsIn.AsStruct().View()); err != nil { + return ipn.LoginProfileView{}, err } } - return nil + return lp.View(), nil } -func newUnusedID(knownProfiles map[ipn.ProfileID]*ipn.LoginProfile) (ipn.ProfileID, ipn.StateKey) { +func newUnusedID(knownProfiles map[ipn.ProfileID]ipn.LoginProfileView) (ipn.ProfileID, ipn.StateKey) { var idb [2]byte for { rand.Read(idb[:]) @@ -326,14 +339,14 @@ func newUnusedID(knownProfiles map[ipn.ProfileID]*ipn.LoginProfile) (ipn.Profile // The method does not perform any additional checks on the specified // profile, such as verifying the caller's access rights or checking // if another profile for the same node already exists. -func (pm *profileManager) setProfilePrefsNoPermCheck(profile *ipn.LoginProfile, clonedPrefs ipn.PrefsView) error { +func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileView, clonedPrefs ipn.PrefsView) error { isCurrentProfile := pm.currentProfile == profile if isCurrentProfile { pm.prefs = clonedPrefs pm.updateHealth() } - if profile.Key != "" { - if err := pm.writePrefsToStore(profile.Key, clonedPrefs); err != nil { + if profile.Key() != "" { + if err := pm.writePrefsToStore(profile.Key(), clonedPrefs); err != nil { return err } } else if !isCurrentProfile { @@ -362,11 +375,11 @@ func (pm *profileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsVie } // Profiles returns the list of known profiles accessible to the current user. -func (pm *profileManager) Profiles() []ipn.LoginProfile { +func (pm *profileManager) Profiles() []ipn.LoginProfileView { allProfiles := pm.allProfiles() - out := make([]ipn.LoginProfile, len(allProfiles)) + out := make([]ipn.LoginProfileView, len(allProfiles)) for i, p := range allProfiles { - out[i] = *p + out[i] = p } return out } @@ -374,26 +387,26 @@ func (pm *profileManager) Profiles() []ipn.LoginProfile { // ProfileByID returns a profile with the given id, if it is accessible to the current user. // If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied]. // If the profile does not exist, it returns an [errProfileNotFound]. -func (pm *profileManager) ProfileByID(id ipn.ProfileID) (ipn.LoginProfile, error) { +func (pm *profileManager) ProfileByID(id ipn.ProfileID) (ipn.LoginProfileView, error) { kp, err := pm.profileByIDNoPermCheck(id) if err != nil { - return ipn.LoginProfile{}, err + return ipn.LoginProfileView{}, err } if err := pm.checkProfileAccess(kp); err != nil { - return ipn.LoginProfile{}, err + return ipn.LoginProfileView{}, err } - return *kp, nil + return kp, nil } // profileByIDNoPermCheck is like [profileManager.ProfileByID], but it doesn't // check user's access rights to the profile. -func (pm *profileManager) profileByIDNoPermCheck(id ipn.ProfileID) (*ipn.LoginProfile, error) { - if id == pm.currentProfile.ID { +func (pm *profileManager) profileByIDNoPermCheck(id ipn.ProfileID) (ipn.LoginProfileView, error) { + if id == pm.currentProfile.ID() { return pm.currentProfile, nil } kp, ok := pm.knownProfiles[id] if !ok { - return nil, errProfileNotFound + return ipn.LoginProfileView{}, errProfileNotFound } return kp, nil } @@ -412,11 +425,11 @@ func (pm *profileManager) ProfilePrefs(id ipn.ProfileID) (ipn.PrefsView, error) return pm.profilePrefs(kp) } -func (pm *profileManager) profilePrefs(p *ipn.LoginProfile) (ipn.PrefsView, error) { - if p.ID == pm.currentProfile.ID { +func (pm *profileManager) profilePrefs(p ipn.LoginProfileView) (ipn.PrefsView, error) { + if p.ID() == pm.currentProfile.ID() { return pm.prefs, nil } - return pm.loadSavedPrefs(p.Key) + return pm.loadSavedPrefs(p.Key()) } // SwitchProfile switches to the profile with the given id. @@ -429,14 +442,14 @@ func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error { if !ok { return errProfileNotFound } - if pm.currentProfile != nil && kp.ID == pm.currentProfile.ID && pm.prefs.Valid() { + if pm.currentProfile.Valid() && kp.ID() == pm.currentProfile.ID() && pm.prefs.Valid() { return nil } if err := pm.checkProfileAccess(kp); err != nil { return fmt.Errorf("%w: profile %q is not accessible to the current user", err, id) } - prefs, err := pm.loadSavedPrefs(kp.Key) + prefs, err := pm.loadSavedPrefs(kp.Key()) if err != nil { return err } @@ -459,8 +472,8 @@ func (pm *profileManager) SwitchToDefaultProfile() error { // setProfileAsUserDefault sets the specified profile as the default for the current user. // It returns an [errProfileAccessDenied] if the specified profile is not accessible to the current user. -func (pm *profileManager) setProfileAsUserDefault(profile *ipn.LoginProfile) error { - if profile.Key == "" { +func (pm *profileManager) setProfileAsUserDefault(profile ipn.LoginProfileView) error { + if profile.Key() == "" { // The profile has not been persisted yet; ignore it for now. return nil } @@ -468,7 +481,7 @@ func (pm *profileManager) setProfileAsUserDefault(profile *ipn.LoginProfile) err return errProfileAccessDenied } k := ipn.CurrentProfileKey(string(pm.currentUserID)) - return pm.WriteState(k, []byte(profile.Key)) + return pm.WriteState(k, []byte(profile.Key())) } func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) { @@ -507,10 +520,10 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error return savedPrefs.View(), nil } -// CurrentProfile returns the current LoginProfile. +// CurrentProfile returns a read-only [ipn.LoginProfileView] of the current profile. // The value may be zero if the profile is not persisted. -func (pm *profileManager) CurrentProfile() ipn.LoginProfile { - return *pm.currentProfile +func (pm *profileManager) CurrentProfile() ipn.LoginProfileView { + return pm.currentProfile } // errProfileNotFound is returned by methods that accept a ProfileID @@ -533,7 +546,7 @@ var errProfileAccessDenied = errors.New("profile access denied") // recommended to call [profileManager.SwitchProfile] first. func (pm *profileManager) DeleteProfile(id ipn.ProfileID) error { metricDeleteProfile.Add(1) - if id == pm.currentProfile.ID { + if id == pm.currentProfile.ID() { return pm.deleteCurrentProfile() } kp, ok := pm.knownProfiles[id] @@ -550,7 +563,7 @@ func (pm *profileManager) deleteCurrentProfile() error { if err := pm.checkProfileAccess(pm.currentProfile); err != nil { return err } - if pm.currentProfile.ID == "" { + if pm.currentProfile.ID() == "" { // Deleting the in-memory only new profile, just create a new one. pm.NewProfile() return nil @@ -560,14 +573,14 @@ func (pm *profileManager) deleteCurrentProfile() error { // deleteProfileNoPermCheck is like [profileManager.DeleteProfile], // but it doesn't check user's access rights to the profile. -func (pm *profileManager) deleteProfileNoPermCheck(profile *ipn.LoginProfile) error { - if profile.ID == pm.currentProfile.ID { +func (pm *profileManager) deleteProfileNoPermCheck(profile ipn.LoginProfileView) error { + if profile.ID() == pm.currentProfile.ID() { pm.NewProfile() } - if err := pm.WriteState(profile.Key, nil); err != nil { + if err := pm.WriteState(profile.Key(), nil); err != nil { return err } - delete(pm.knownProfiles, profile.ID) + delete(pm.knownProfiles, profile.ID()) return pm.writeKnownProfiles() } @@ -578,7 +591,7 @@ func (pm *profileManager) DeleteAllProfilesForUser() error { currentProfileDeleted := false writeKnownProfiles := func() error { - if currentProfileDeleted || pm.currentProfile.ID == "" { + if currentProfileDeleted || pm.currentProfile.ID() == "" { pm.NewProfile() } return pm.writeKnownProfiles() @@ -589,14 +602,14 @@ func (pm *profileManager) DeleteAllProfilesForUser() error { // Skip profiles we don't have access to. continue } - if err := pm.WriteState(kp.Key, nil); err != nil { + if err := pm.WriteState(kp.Key(), nil); err != nil { // Write to remove references to profiles we've already deleted, but // return the original error. writeKnownProfiles() return err } - delete(pm.knownProfiles, kp.ID) - if kp.ID == pm.currentProfile.ID { + delete(pm.knownProfiles, kp.ID()) + if kp.ID() == pm.currentProfile.ID() { currentProfileDeleted = true } } @@ -633,26 +646,27 @@ func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) { pm.prefs = defaultPrefs pm.updateHealth() - pm.currentProfile = &ipn.LoginProfile{LocalUserID: uid} + newProfile := &ipn.LoginProfile{LocalUserID: uid} + pm.currentProfile = newProfile.View() } // newProfileWithPrefs creates a new profile with the specified prefs and assigns // the specified uid as the profile owner. If switchNow is true, it switches to the // newly created profile immediately. It returns the newly created profile on success, // or an error on failure. -func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (*ipn.LoginProfile, error) { +func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (ipn.LoginProfileView, error) { metricNewProfile.Add(1) - profile := &ipn.LoginProfile{LocalUserID: uid} - if err := pm.SetProfilePrefs(profile, prefs, ipn.NetworkProfile{}); err != nil { - return nil, err + profile, err := pm.setProfilePrefs(&ipn.LoginProfile{LocalUserID: uid}, prefs, ipn.NetworkProfile{}) + if err != nil { + return ipn.LoginProfileView{}, err } if switchNow { pm.currentProfile = profile pm.prefs = prefs.AsStruct().View() pm.updateHealth() if err := pm.setProfileAsUserDefault(profile); err != nil { - return nil, err + return ipn.LoginProfileView{}, err } } return profile, nil @@ -711,8 +725,8 @@ func readAutoStartKey(store ipn.StateStore, goos string) (ipn.StateKey, error) { return ipn.StateKey(autoStartKey), nil } -func readKnownProfiles(store ipn.StateStore) (map[ipn.ProfileID]*ipn.LoginProfile, error) { - var knownProfiles map[ipn.ProfileID]*ipn.LoginProfile +func readKnownProfiles(store ipn.StateStore) (map[ipn.ProfileID]ipn.LoginProfileView, error) { + var knownProfiles map[ipn.ProfileID]ipn.LoginProfileView prfB, err := store.ReadState(ipn.KnownProfilesStateKey) switch err { case nil: @@ -720,7 +734,7 @@ func readKnownProfiles(store ipn.StateStore) (map[ipn.ProfileID]*ipn.LoginProfil return nil, fmt.Errorf("unmarshaling known profiles: %w", err) } case ipn.ErrStateNotExist: - knownProfiles = make(map[ipn.ProfileID]*ipn.LoginProfile) + knownProfiles = make(map[ipn.ProfileID]ipn.LoginProfileView) default: return nil, fmt.Errorf("calling ReadState on state store: %w", err) } @@ -749,17 +763,17 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt if stateKey != "" { for _, v := range knownProfiles { - if v.Key == stateKey { + if v.Key() == stateKey { pm.currentProfile = v } } - if pm.currentProfile == nil { + if !pm.currentProfile.Valid() { if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok { pm.currentUserID = ipn.WindowsUserID(suf) } pm.NewProfile() } else { - pm.currentUserID = pm.currentProfile.LocalUserID + pm.currentUserID = pm.currentProfile.LocalUserID() } prefs, err := pm.loadSavedPrefs(stateKey) if err != nil { @@ -788,18 +802,18 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt return pm, nil } -func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (*ipn.LoginProfile, error) { +func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (ipn.LoginProfileView, error) { metricMigration.Add(1) sentinel, prefs, err := pm.loadLegacyPrefs(uid) if err != nil { metricMigrationError.Add(1) - return nil, fmt.Errorf("load legacy prefs: %w", err) + return ipn.LoginProfileView{}, fmt.Errorf("load legacy prefs: %w", err) } pm.dlogf("loaded legacy preferences; sentinel=%q", sentinel) profile, err := pm.newProfileWithPrefs(uid, prefs, switchNow) if err != nil { metricMigrationError.Add(1) - return nil, fmt.Errorf("migrating _daemon profile: %w", err) + return ipn.LoginProfileView{}, fmt.Errorf("migrating _daemon profile: %w", err) } pm.completeMigration(sentinel) pm.dlogf("completed legacy preferences migration with sentinel=%q", sentinel) @@ -809,8 +823,8 @@ func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNo func (pm *profileManager) requiresBackfill() bool { return pm != nil && - pm.currentProfile != nil && - pm.currentProfile.NetworkProfile.RequiresBackfill() + pm.currentProfile.Valid() && + pm.currentProfile.NetworkProfile().RequiresBackfill() } var ( diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 73e4f6535..5c4f1fd4c 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -52,11 +52,11 @@ func TestProfileCurrentUserSwitch(t *testing.T) { pm.SetCurrentUserID("user1") newProfile(t, "user1") cp := pm.currentProfile - pm.DeleteProfile(cp.ID) - if pm.currentProfile == nil { + pm.DeleteProfile(cp.ID()) + if !pm.currentProfile.Valid() { t.Fatal("currentProfile is nil") - } else if pm.currentProfile.ID != "" { - t.Fatalf("currentProfile.ID = %q, want empty", pm.currentProfile.ID) + } else if pm.currentProfile.ID() != "" { + t.Fatalf("currentProfile.ID = %q, want empty", pm.currentProfile.ID()) } if !pm.CurrentPrefs().Equals(defaultPrefs) { t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty()) @@ -67,10 +67,10 @@ func TestProfileCurrentUserSwitch(t *testing.T) { t.Fatal(err) } pm.SetCurrentUserID("user1") - if pm.currentProfile == nil { + if !pm.currentProfile.Valid() { t.Fatal("currentProfile is nil") - } else if pm.currentProfile.ID != "" { - t.Fatalf("currentProfile.ID = %q, want empty", pm.currentProfile.ID) + } else if pm.currentProfile.ID() != "" { + t.Fatalf("currentProfile.ID = %q, want empty", pm.currentProfile.ID()) } if !pm.CurrentPrefs().Equals(defaultPrefs) { t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty()) @@ -110,8 +110,8 @@ func TestProfileList(t *testing.T) { t.Fatalf("got %d profiles, want %d", len(got), len(want)) } for i, w := range want { - if got[i].Name != w { - t.Errorf("got profile %d name %q, want %q", i, got[i].Name, w) + if got[i].Name() != w { + t.Errorf("got profile %d name %q, want %q", i, got[i].Name(), w) } } } @@ -129,10 +129,10 @@ func TestProfileList(t *testing.T) { pm.SetCurrentUserID("user1") checkProfiles(t, "alice", "bob") - if lp := pm.findProfileByKey(carol.Key); lp != nil { + if lp := pm.findProfileByKey(carol.Key()); lp.Valid() { t.Fatalf("found profile for user2 in user1's profile list") } - if lp := pm.findProfileByName(carol.Name); lp != nil { + if lp := pm.findProfileByName(carol.Name()); lp.Valid() { t.Fatalf("found profile for user2 in user1's profile list") } @@ -294,7 +294,7 @@ func TestProfileDupe(t *testing.T) { profs := pm.Profiles() var got []*persist.Persist for _, p := range profs { - prefs, err := pm.loadSavedPrefs(p.Key) + prefs, err := pm.loadSavedPrefs(p.Key()) if err != nil { t.Fatal(err) } @@ -328,9 +328,9 @@ func TestProfileManagement(t *testing.T) { checkProfiles := func(t *testing.T) { t.Helper() prof := pm.CurrentProfile() - t.Logf("\tCurrentProfile = %q", prof) - if prof.Name != wantCurProfile { - t.Fatalf("CurrentProfile = %q; want %q", prof, wantCurProfile) + t.Logf("\tCurrentProfile = %q", prof.Name()) + if prof.Name() != wantCurProfile { + t.Fatalf("CurrentProfile = %q; want %q", prof.Name(), wantCurProfile) } profiles := pm.Profiles() wantLen := len(wantProfiles) @@ -349,13 +349,13 @@ func TestProfileManagement(t *testing.T) { t.Fatalf("CurrentPrefs = %v; want %v", p.Pretty(), wantProfiles[wantCurProfile].Pretty()) } for _, p := range profiles { - got, err := pm.loadSavedPrefs(p.Key) + got, err := pm.loadSavedPrefs(p.Key()) if err != nil { t.Fatal(err) } // Use Hostname as a proxy for all prefs. - if !got.Equals(wantProfiles[p.Name]) { - t.Fatalf("Prefs for profile %q =\n got=%+v\nwant=%v", p, got.Pretty(), wantProfiles[p.Name].Pretty()) + if !got.Equals(wantProfiles[p.Name()]) { + t.Fatalf("Prefs for profile %q =\n got=%+v\nwant=%v", p.Name(), got.Pretty(), wantProfiles[p.Name()].Pretty()) } } } @@ -422,7 +422,7 @@ func TestProfileManagement(t *testing.T) { checkProfiles(t) t.Logf("Delete default profile") - if err := pm.DeleteProfile(pm.findProfileByName("user@1.example.com").ID); err != nil { + if err := pm.DeleteProfile(pm.ProfileIDForName("user@1.example.com")); err != nil { t.Fatal(err) } delete(wantProfiles, "user@1.example.com") @@ -506,9 +506,9 @@ func TestProfileManagementWindows(t *testing.T) { checkProfiles := func(t *testing.T) { t.Helper() prof := pm.CurrentProfile() - t.Logf("\tCurrentProfile = %q", prof) - if prof.Name != wantCurProfile { - t.Fatalf("CurrentProfile = %q; want %q", prof, wantCurProfile) + t.Logf("\tCurrentProfile = %q", prof.Name()) + if prof.Name() != wantCurProfile { + t.Fatalf("CurrentProfile = %q; want %q", prof.Name(), wantCurProfile) } if p := pm.CurrentPrefs(); !p.Equals(wantProfiles[wantCurProfile]) { t.Fatalf("CurrentPrefs = %+v; want %+v", p.Pretty(), wantProfiles[wantCurProfile].Pretty()) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 63cb2ef55..638b26a36 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -318,7 +318,7 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string bs = j } - profileID := b.pm.CurrentProfile().ID + profileID := b.pm.CurrentProfile().ID() confKey := ipn.ServeConfigKey(profileID) if err := b.store.WriteState(confKey, bs); err != nil { return fmt.Errorf("writing ServeConfig to StateStore: %w", err) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index eb8169390..7f457e560 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -898,7 +898,7 @@ func newTestBackend(t *testing.T) *LocalBackend { b.SetVarRoot(dir) pm := must.Get(newProfileManager(new(mem.Store), logf, new(health.Tracker))) - pm.currentProfile = &ipn.LoginProfile{ID: "id0"} + pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm b.netMap = &netmap.NetworkMap{ diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e6b537d8f..154d309a1 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -2601,8 +2601,8 @@ func (h *Handler) serveProfiles(w http.ResponseWriter, r *http.Request) { switch r.Method { case httpm.GET: profiles := h.b.ListProfiles() - profileIndex := slices.IndexFunc(profiles, func(p ipn.LoginProfile) bool { - return p.ID == profileID + profileIndex := slices.IndexFunc(profiles, func(p ipn.LoginProfileView) bool { + return p.ID() == profileID }) if profileIndex == -1 { http.Error(w, "Profile not found", http.StatusNotFound) From 081595de6310db7f5dff71502e266caf3e203ada Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 29 Jan 2025 15:34:20 -0600 Subject: [PATCH 0435/1708] ipn/{ipnauth, ipnserver}: extend the ipnauth.Actor interface with a CheckProfileAccess method The implementations define it to verify whether the actor has the requested access to a login profile. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnauth/access.go | 8 ++++++++ ipn/ipnauth/actor.go | 4 ++++ ipn/ipnauth/test_actor.go | 8 +++++++- ipn/ipnserver/actor.go | 8 ++++++++ 4 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 ipn/ipnauth/access.go diff --git a/ipn/ipnauth/access.go b/ipn/ipnauth/access.go new file mode 100644 index 000000000..4d0aeb850 --- /dev/null +++ b/ipn/ipnauth/access.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnauth + +// ProfileAccess is a bitmask representing the requested, required, or granted +// access rights to an [ipn.LoginProfile]. +type ProfileAccess uint32 diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 040d9b522..2c713e441 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -27,6 +27,10 @@ type Actor interface { // a connected LocalAPI client. Otherwise, it returns a zero value and false. ClientID() (_ ClientID, ok bool) + // CheckProfileAccess checks whether the actor has the requested access rights + // to the specified Tailscale profile. It returns an error if the access is denied. + CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess) error + // IsLocalSystem reports whether the actor is the Windows' Local System account. // // Deprecated: this method exists for compatibility with the current (as of 2024-08-27) diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go index d38aa2196..0d4a0e37d 100644 --- a/ipn/ipnauth/test_actor.go +++ b/ipn/ipnauth/test_actor.go @@ -4,6 +4,8 @@ package ipnauth import ( + "errors" + "tailscale.com/ipn" ) @@ -17,7 +19,6 @@ type TestActor struct { CID ClientID // non-zero if the actor represents a connected LocalAPI client LocalSystem bool // whether the actor represents the special Local System account on Windows LocalAdmin bool // whether the actor has local admin access - } // UserID implements [Actor]. @@ -29,6 +30,11 @@ func (a *TestActor) Username() (string, error) { return a.Name, a.NameErr } // ClientID implements [Actor]. func (a *TestActor) ClientID() (_ ClientID, ok bool) { return a.CID, a.CID != NoClientID } +// CheckProfileAccess implements [Actor]. +func (a *TestActor) CheckProfileAccess(profile ipn.LoginProfileView, _ ProfileAccess) error { + return errors.New("profile access denied") +} + // IsLocalSystem implements [Actor]. func (a *TestActor) IsLocalSystem() bool { return a.LocalSystem } diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 2df8986c3..8f743a3eb 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -58,6 +58,14 @@ func newActor(logf logger.Logf, c net.Conn) (*actor, error) { return &actor{logf: logf, ci: ci, clientID: clientID, isLocalSystem: connIsLocalSystem(ci)}, nil } +// CheckProfileAccess implements [ipnauth.Actor]. +func (a *actor) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ipnauth.ProfileAccess) error { + if profile.LocalUserID() != a.UserID() { + return errors.New("the target profile does not belong to the user") + } + return errors.New("the requested operation is not allowed") +} + // IsLocalSystem implements [ipnauth.Actor]. func (a *actor) IsLocalSystem() bool { return a.isLocalSystem From 535a3dbebd7b75dc59eb042888bf828205b97d8b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 29 Jan 2025 15:49:31 -0600 Subject: [PATCH 0436/1708] ipn/ipnauth: implement an Actor representing tailscaled itself Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnauth/self.go | 46 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 ipn/ipnauth/self.go diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go new file mode 100644 index 000000000..d8ece45c5 --- /dev/null +++ b/ipn/ipnauth/self.go @@ -0,0 +1,46 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnauth + +import ( + "tailscale.com/ipn" +) + +// Self is a caller identity that represents the tailscaled itself and therefore +// has unlimited access. +var Self Actor = unrestricted{} + +// unrestricted is an [Actor] that has unlimited access to the currently running +// tailscaled instance. It's typically used for operations performed by tailscaled +// on its own, or upon a request from the control plane, rather on behalf of a user. +type unrestricted struct{} + +// UserID implements [Actor]. +func (u unrestricted) UserID() ipn.WindowsUserID { return "" } + +// Username implements [Actor]. +func (u unrestricted) Username() (string, error) { return "", nil } + +// ClientID implements [Actor]. +// It always returns (NoClientID, false) because the tailscaled itself +// is not a connected LocalAPI client. +func (u unrestricted) ClientID() (_ ClientID, ok bool) { return NoClientID, false } + +// CheckProfileAccess implements [Actor]. +func (u unrestricted) CheckProfileAccess(_ ipn.LoginProfileView, _ ProfileAccess) error { + // Unrestricted access to all profiles. + return nil +} + +// IsLocalSystem implements [Actor]. +// +// Deprecated: this method exists for compatibility with the current (as of 2025-01-28) +// permission model and will be removed as we progress on tailscale/corp#18342. +func (u unrestricted) IsLocalSystem() bool { return false } + +// IsLocalAdmin implements [Actor]. +// +// Deprecated: this method exists for compatibility with the current (as of 2025-01-28) +// permission model and will be removed as we progress on tailscale/corp#18342. +func (u unrestricted) IsLocalAdmin(operatorUID string) bool { return false } From 02ad21717fdc8ef9e95bccb4cda7fa77a7d2cd4e Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 30 Jan 2025 18:29:02 -0600 Subject: [PATCH 0437/1708] ipn/ipn{auth,server,local}: initial support for the always-on mode In this PR, we update LocalBackend to set WantRunning=true when applying policy settings to the current profile's prefs, if the "always-on" mode is enabled. We also implement a new (*LocalBackend).EditPrefsAs() method, which is like EditPrefs but accepts an actor (e.g., a LocalAPI client's identity) that initiated the change. If WantRunning is being set to false, the new EditPrefsAs method checks whether the actor has ipnauth.Disconnect access to the profile and propagates an error if they do not. Finally, we update (*ipnserver.actor).CheckProfileAccess to allow a disconnect only if the "always-on" mode is not enabled by the AlwaysOn policy setting. This is not a comprehensive solution to the "always-on" mode across platforms, as instead of disconnecting a user could achieve the same effect by creating a new empty profile, initiating a reauth, or by deleting the profile. These are the things we should address in future PRs. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnauth/access.go | 6 ++++++ ipn/ipnlocal/local.go | 25 +++++++++++++++++++++++++ ipn/ipnserver/actor.go | 13 ++++++++++++- ipn/localapi/localapi.go | 2 +- util/syspolicy/policy_keys.go | 10 ++++++++++ 5 files changed, 54 insertions(+), 2 deletions(-) diff --git a/ipn/ipnauth/access.go b/ipn/ipnauth/access.go index 4d0aeb850..53934c64b 100644 --- a/ipn/ipnauth/access.go +++ b/ipn/ipnauth/access.go @@ -6,3 +6,9 @@ package ipnauth // ProfileAccess is a bitmask representing the requested, required, or granted // access rights to an [ipn.LoginProfile]. type ProfileAccess uint32 + +// Define access rights that might be granted or denied on a per-profile basis. +const ( + // Disconnect is required to disconnect (or switch from) a Tailscale profile. + Disconnect = ProfileAccess(1 << iota) +) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5766365b1..fc4bd6e4e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1795,6 +1795,11 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID } } + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !prefs.WantRunning { + prefs.WantRunning = true + anyChange = true + } + for _, opt := range preferencePolicies { if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { curVal := opt.get(prefs.View()) @@ -3984,7 +3989,15 @@ func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { return err } +// EditPrefs applies the changes in mp to the current prefs, +// acting as the tailscaled itself rather than a specific user. func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { + return b.EditPrefsAs(mp, ipnauth.Self) +} + +// EditPrefsAs is like EditPrefs, but makes the change as the specified actor. +// It returns an error if the actor is not allowed to make the change. +func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ipn.PrefsView, error) { if mp.SetsInternal() { return ipn.PrefsView{}, errors.New("can't set Internal fields") } @@ -3995,8 +4008,20 @@ func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { mp.InternalExitNodePriorSet = true } + // Acquire the lock before checking the profile access to prevent + // TOCTOU issues caused by the current profile changing between the + // check and the actual edit. unlock := b.lockAndGetUnlock() defer unlock() + if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect); err != nil { + return ipn.PrefsView{}, err + } + + // TODO(nickkhyl): check the ReconnectAfter policy here. If configured, + // start a timer to automatically reconnect after the specified duration. + } + return b.editPrefsLockedOnEntry(mp, unlock) } diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 8f743a3eb..7ff96699a 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -17,6 +17,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/ctxkey" "tailscale.com/util/osuser" + "tailscale.com/util/syspolicy" "tailscale.com/version" ) @@ -63,7 +64,17 @@ func (a *actor) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess if profile.LocalUserID() != a.UserID() { return errors.New("the target profile does not belong to the user") } - return errors.New("the requested operation is not allowed") + switch requestedAccess { + case ipnauth.Disconnect: + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn { + // TODO(nickkhyl): check if disconnecting with justifications is allowed + // and whether a justification is included in the request. + return errors.New("profile access denied: always-on mode is enabled") + } + return nil + default: + return errors.New("the requested operation is not allowed") + } } // IsLocalSystem implements [ipnauth.Actor]. diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 154d309a1..c75f732b6 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1381,7 +1381,7 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { return } var err error - prefs, err = h.b.EditPrefs(mp) + prefs, err = h.b.EditPrefsAs(mp, h.Actor) if err != nil { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusBadRequest) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 35a36130e..d970a4a3c 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -26,6 +26,15 @@ const ( ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. + + // AlwaysOn is a boolean key that controls whether Tailscale + // should always remain in a connected state, and the user should + // not be able to disconnect at their discretion. + // + // Warning: This policy setting is experimental and may change or be removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + AlwaysOn Key = "AlwaysOn" + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. // Exit node ID takes precedence over exit node IP. // To find the node ID, go to /api.md#device. @@ -139,6 +148,7 @@ const ( var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), From 0a51bbc7651a2414cde4b1e40ad726f7f19e3dc7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 31 Jan 2025 11:22:15 -0600 Subject: [PATCH 0438/1708] ipn/ipnauth,util/syspolicy: improve comments Updates #cleanup Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnauth/access.go | 5 ++++- ipn/ipnauth/actor.go | 5 +++-- util/syspolicy/policy_keys.go | 1 + 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ipn/ipnauth/access.go b/ipn/ipnauth/access.go index 53934c64b..74c663922 100644 --- a/ipn/ipnauth/access.go +++ b/ipn/ipnauth/access.go @@ -5,7 +5,10 @@ package ipnauth // ProfileAccess is a bitmask representing the requested, required, or granted // access rights to an [ipn.LoginProfile]. -type ProfileAccess uint32 +// +// It is not to be written to disk or transmitted over the network in its integer form, +// but rather serialized to a string or other format if ever needed. +type ProfileAccess uint // Define access rights that might be granted or denied on a per-profile basis. const ( diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 2c713e441..92e3b202f 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -27,8 +27,9 @@ type Actor interface { // a connected LocalAPI client. Otherwise, it returns a zero value and false. ClientID() (_ ClientID, ok bool) - // CheckProfileAccess checks whether the actor has the requested access rights - // to the specified Tailscale profile. It returns an error if the access is denied. + // CheckProfileAccess checks whether the actor has the necessary access rights + // to perform a given action on the specified Tailscale profile. + // It returns an error if access is denied. CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess) error // IsLocalSystem reports whether the actor is the Windows' Local System account. diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index d970a4a3c..ec5e83b18 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -33,6 +33,7 @@ const ( // // Warning: This policy setting is experimental and may change or be removed in the future. // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. AlwaysOn Key = "AlwaysOn" // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. From 2e95313b8bb08e4dca1c0a27854fb3d65d40194f Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 31 Jan 2025 12:19:22 -0600 Subject: [PATCH 0439/1708] ssh,tempfork/gliderlabs/ssh: replace github.com/tailscale/golang-x-crypto/ssh with golang.org/x/crypto/ssh The upstream crypto package now supports sending banners at any time during authentication, so the Tailscale fork of crypto/ssh is no longer necessary. github.com/tailscale/golang-x-crypto is still needed for some custom ACME autocert functionality. tempfork/gliderlabs is still necessary because of a few other customizations, mostly related to TTY handling. Originally implemented in 46fd4e58a27495263336b86ee961ee28d8c332b7, which was reverted in b60f6b849af1fae1cf343be98f7fb1714c9ea165 to keep the change out of v1.80. Updates #8593 Signed-off-by: Percy Wegmann --- cmd/k8s-operator/depaware.txt | 11 +- cmd/ssh-auth-none-demo/ssh-auth-none-demo.go | 24 +- cmd/tailscaled/depaware.txt | 7 +- cmd/tailscaled/deps_test.go | 1 - go.mod | 2 +- go.sum | 4 +- ipn/ipnlocal/ssh.go | 2 +- ssh/tailssh/tailssh.go | 310 ++++++++----------- ssh/tailssh/tailssh_integration_test.go | 2 +- ssh/tailssh/tailssh_test.go | 5 +- tempfork/gliderlabs/ssh/agent.go | 2 +- tempfork/gliderlabs/ssh/context.go | 11 +- tempfork/gliderlabs/ssh/options.go | 2 +- tempfork/gliderlabs/ssh/options_test.go | 2 +- tempfork/gliderlabs/ssh/server.go | 2 +- tempfork/gliderlabs/ssh/session.go | 2 +- tempfork/gliderlabs/ssh/session_test.go | 2 +- tempfork/gliderlabs/ssh/ssh.go | 4 +- tempfork/gliderlabs/ssh/tcpip.go | 2 +- tempfork/gliderlabs/ssh/tcpip_test.go | 2 +- tempfork/gliderlabs/ssh/util.go | 2 +- tempfork/gliderlabs/ssh/wrap.go | 2 +- 22 files changed, 172 insertions(+), 231 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e32fd4a2b..972dbfc2c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -197,9 +197,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh - LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal - LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ @@ -986,12 +983,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+ + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ + golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ @@ -1000,6 +997,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ + LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal + LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go index ee929299a..39af584ec 100644 --- a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go +++ b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go @@ -6,6 +6,9 @@ // highlight the unique parts of the Tailscale SSH server so SSH // client authors can hit it easily and fix their SSH clients without // needing to set up Tailscale and Tailscale SSH. +// +// Connections are allowed using any username except for "denyme". Connecting as +// "denyme" will result in an authentication failure with error message. package main import ( @@ -16,6 +19,7 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" + "errors" "flag" "fmt" "io" @@ -24,7 +28,7 @@ import ( "path/filepath" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" "tailscale.com/tempfork/gliderlabs/ssh" ) @@ -62,13 +66,21 @@ func main() { Handler: handleSessionPostSSHAuth, ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig { start := time.Now() + var spac gossh.ServerPreAuthConn return &gossh.ServerConfig{ - NextAuthMethodCallback: func(conn gossh.ConnMetadata, prevErrors []error) []string { - return []string{"tailscale"} + PreAuthConnCallback: func(conn gossh.ServerPreAuthConn) { + spac = conn }, NoClientAuth: true, // required for the NoClientAuthCallback to run NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) { - cm.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start))) + spac.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start))) + + if cm.User() == "denyme" { + return nil, &gossh.BannerError{ + Err: errors.New("denying access"), + Message: "denyme is not allowed to access this machine\n", + } + } totalBanners := 2 if cm.User() == "banners" { @@ -77,9 +89,9 @@ func main() { for banner := 2; banner <= totalBanners; banner++ { time.Sleep(time.Second) if banner == totalBanners { - cm.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start))) + spac.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start))) } else { - cm.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start))) + spac.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start))) } } return nil, nil diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a7ad83818..a6fae54ff 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -152,9 +152,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh - LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+ - LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ @@ -439,12 +436,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+ + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+ + golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 2b4bc280d..7f06abc6c 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -17,7 +17,6 @@ func TestOmitSSH(t *testing.T) { Tags: "ts_omit_ssh", BadDeps: map[string]string{ "tailscale.com/ssh/tailssh": msg, - "golang.org/x/crypto/ssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, "github.com/creack/pty": msg, diff --git a/go.mod b/go.mod index 6a5080585..4d50f5985 100644 --- a/go.mod +++ b/go.mod @@ -94,7 +94,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.32.0 + golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 golang.org/x/mod v0.22.0 golang.org/x/net v0.34.0 diff --git a/go.sum b/go.sum index c38c96029..7e21f7c20 100644 --- a/go.sum +++ b/go.sum @@ -1058,8 +1058,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index 383d03f5a..47a74e282 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -24,8 +24,8 @@ import ( "strings" "sync" - "github.com/tailscale/golang-x-crypto/ssh" "go4.org/mem" + "golang.org/x/crypto/ssh" "tailscale.com/tailcfg" "tailscale.com/util/lineiter" "tailscale.com/util/mak" diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 7f21ccd11..638ff99b8 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -29,7 +29,7 @@ import ( "syscall" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" "tailscale.com/logtail/backoff" @@ -198,8 +198,11 @@ func (srv *server) OnPolicyChange() { // Setup and discover server info // - ServerConfigCallback // -// Do the user auth -// - NoClientAuthHandler +// Get access to a ServerPreAuthConn (useful for sending banners) +// +// Do the user auth with a NoClientAuthCallback. If user specified +// a username ending in "+password", follow this with password auth +// (to work around buggy SSH clients that don't work with noauth). // // Once auth is done, the conn can be multiplexed with multiple sessions and // channels concurrently. At which point any of the following can be called @@ -219,15 +222,12 @@ type conn struct { idH string connID string // ID that's shared with control - // anyPasswordIsOkay is whether the client is authorized but has requested - // password-based auth to work around their buggy SSH client. When set, we - // accept any password in the PasswordHandler. - anyPasswordIsOkay bool // set by NoClientAuthCallback + // spac is a [gossh.ServerPreAuthConn] used for sending auth banners. + // Banners cannot be sent after auth completes. + spac gossh.ServerPreAuthConn - action0 *tailcfg.SSHAction // set by doPolicyAuth; first matching action - currentAction *tailcfg.SSHAction // set by doPolicyAuth, updated by resolveNextAction - finalAction *tailcfg.SSHAction // set by doPolicyAuth or resolveNextAction - finalActionErr error // set by doPolicyAuth or resolveNextAction + action0 *tailcfg.SSHAction // set by clientAuth + finalAction *tailcfg.SSHAction // set by clientAuth info *sshConnInfo // set by setInfo localUser *userMeta // set by doPolicyAuth @@ -254,141 +254,142 @@ func (c *conn) vlogf(format string, args ...any) { } } -// isAuthorized walks through the action chain and returns nil if the connection -// is authorized. If the connection is not authorized, it returns -// errDenied. If the action chain resolution fails, it returns the -// resolution error. -func (c *conn) isAuthorized(ctx ssh.Context) error { - action := c.currentAction - for { - if action.Accept { - return nil - } - if action.Reject || action.HoldAndDelegate == "" { - return errDenied - } - var err error - action, err = c.resolveNextAction(ctx) - if err != nil { - return err - } - if action.Message != "" { - if err := ctx.SendAuthBanner(action.Message); err != nil { - return err - } - } +// errDenied is returned by auth callbacks when a connection is denied by the +// policy. It returns a gossh.BannerError to make sure the message gets +// displayed as an auth banner. +func errDenied(message string) error { + if message == "" { + message = "tailscale: access denied" + } + return &gossh.BannerError{ + Message: message, } } -// errDenied is returned by auth callbacks when a connection is denied by the -// policy. -var errDenied = errors.New("ssh: access denied") +// bannerError creates a gossh.BannerError that will result in the given +// message being displayed to the client. If err != nil, this also logs +// message:error. The contents of err is not leaked to clients in the banner. +func (c *conn) bannerError(message string, err error) error { + if err != nil { + c.logf("%s: %s", message, err) + } + return &gossh.BannerError{ + Err: err, + Message: fmt.Sprintf("tailscale: %s", message), + } +} -// NoClientAuthCallback implements gossh.NoClientAuthCallback and is called by -// the ssh.Server when the client first connects with the "none" -// authentication method. +// clientAuth is responsible for performing client authentication. // -// It is responsible for continuing policy evaluation from BannerCallback (or -// starting it afresh). It returns an error if the policy evaluation fails, or -// if the decision is "reject" -// -// It either returns nil (accept) or errDenied (reject). The errors may be wrapped. -func (c *conn) NoClientAuthCallback(ctx ssh.Context) error { +// If policy evaluation fails, it returns an error. +// If access is denied, it returns an error. +func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { if c.insecureSkipTailscaleAuth { - return nil - } - if err := c.doPolicyAuth(ctx); err != nil { - return err - } - if err := c.isAuthorized(ctx); err != nil { - return err + return &gossh.Permissions{}, nil } - // Let users specify a username ending in +password to force password auth. - // This exists for buggy SSH clients that get confused by success from - // "none" auth. - if strings.HasSuffix(ctx.User(), forcePasswordSuffix) { - c.anyPasswordIsOkay = true - return errors.New("any password please") // not shown to users + if err := c.setInfo(cm); err != nil { + return nil, c.bannerError("failed to get connection info", err) } - return nil -} -func (c *conn) nextAuthMethodCallback(cm gossh.ConnMetadata, prevErrors []error) (nextMethod []string) { - switch { - case c.anyPasswordIsOkay: - nextMethod = append(nextMethod, "password") + action, localUser, acceptEnv, err := c.evaluatePolicy() + if err != nil { + return nil, c.bannerError("failed to evaluate SSH policy", err) } - // The fake "tailscale" method is always appended to next so OpenSSH renders - // that in parens as the final failure. (It also shows up in "ssh -v", etc) - nextMethod = append(nextMethod, "tailscale") - return -} - -// fakePasswordHandler is our implementation of the PasswordHandler hook that -// checks whether the user's password is correct. But we don't actually use -// passwords. This exists only for when the user's username ends in "+password" -// to signal that their SSH client is buggy and gets confused by auth type -// "none" succeeding and they want our SSH server to require a dummy password -// prompt instead. We then accept any password since we've already authenticated -// & authorized them. -func (c *conn) fakePasswordHandler(ctx ssh.Context, password string) bool { - return c.anyPasswordIsOkay -} + c.action0 = action -// doPolicyAuth verifies that conn can proceed. -// It returns nil if the matching policy action is Accept or -// HoldAndDelegate. Otherwise, it returns errDenied. -func (c *conn) doPolicyAuth(ctx ssh.Context) error { - if err := c.setInfo(ctx); err != nil { - c.logf("failed to get conninfo: %v", err) - return errDenied - } - a, localUser, acceptEnv, err := c.evaluatePolicy() - if err != nil { - return fmt.Errorf("%w: %v", errDenied, err) - } - c.action0 = a - c.currentAction = a - c.acceptEnv = acceptEnv - if a.Message != "" { - if err := ctx.SendAuthBanner(a.Message); err != nil { - return fmt.Errorf("SendBanner: %w", err) - } - } - if a.Accept || a.HoldAndDelegate != "" { - if a.Accept { - c.finalAction = a - } + if action.Accept || action.HoldAndDelegate != "" { + // Immediately look up user information for purposes of generating + // hold and delegate URL (if necessary). lu, err := userLookup(localUser) if err != nil { - c.logf("failed to look up %v: %v", localUser, err) - ctx.SendAuthBanner(fmt.Sprintf("failed to look up %v\r\n", localUser)) - return err + return nil, c.bannerError(fmt.Sprintf("failed to look up local user %q ", localUser), err) } gids, err := lu.GroupIds() if err != nil { - c.logf("failed to look up local user's group IDs: %v", err) - return err + return nil, c.bannerError("failed to look up local user's group IDs", err) } c.userGroupIDs = gids c.localUser = lu - return nil + c.acceptEnv = acceptEnv } - if a.Reject { - c.finalAction = a - return errDenied + + for { + switch { + case action.Accept: + metricTerminalAccept.Add(1) + if action.Message != "" { + if err := c.spac.SendAuthBanner(action.Message); err != nil { + return nil, fmt.Errorf("error sending auth welcome message: %w", err) + } + } + c.finalAction = action + return &gossh.Permissions{}, nil + case action.Reject: + metricTerminalReject.Add(1) + c.finalAction = action + return nil, errDenied(action.Message) + case action.HoldAndDelegate != "": + if action.Message != "" { + if err := c.spac.SendAuthBanner(action.Message); err != nil { + return nil, fmt.Errorf("error sending hold and delegate message: %w", err) + } + } + + url := action.HoldAndDelegate + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + metricHolds.Add(1) + url = c.expandDelegateURLLocked(url) + + var err error + action, err = c.fetchSSHAction(ctx, url) + if err != nil { + metricTerminalFetchError.Add(1) + return nil, c.bannerError("failed to fetch next SSH action", fmt.Errorf("fetch failed from %s: %w", url, err)) + } + default: + metricTerminalMalformed.Add(1) + return nil, c.bannerError("reached Action that had neither Accept, Reject, nor HoldAndDelegate", nil) + } } - // Shouldn't get here, but: - return errDenied } // ServerConfig implements ssh.ServerConfigCallback. func (c *conn) ServerConfig(ctx ssh.Context) *gossh.ServerConfig { return &gossh.ServerConfig{ - NoClientAuth: true, // required for the NoClientAuthCallback to run - NextAuthMethodCallback: c.nextAuthMethodCallback, + PreAuthConnCallback: func(spac gossh.ServerPreAuthConn) { + c.spac = spac + }, + NoClientAuth: true, // required for the NoClientAuthCallback to run + NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) { + // First perform client authentication, which can potentially + // involve multiple steps (for example prompting user to log in to + // Tailscale admin panel to confirm identity). + perms, err := c.clientAuth(cm) + if err != nil { + return nil, err + } + + // Authentication succeeded. Buggy SSH clients get confused by + // success from the "none" auth method. As a workaround, let users + // specify a username ending in "+password" to force password auth. + // The actual value of the password doesn't matter. + if strings.HasSuffix(cm.User(), forcePasswordSuffix) { + return nil, &gossh.PartialSuccessError{ + Next: gossh.ServerAuthCallbacks{ + PasswordCallback: func(_ gossh.ConnMetadata, password []byte) (*gossh.Permissions, error) { + return &gossh.Permissions{}, nil + }, + }, + } + } + + return perms, nil + }, } } @@ -399,7 +400,7 @@ func (srv *server) newConn() (*conn, error) { // Stop accepting new connections. // Connections in the auth phase are handled in handleConnPostSSHAuth. // Existing sessions are terminated by Shutdown. - return nil, errDenied + return nil, errDenied("tailscale: server is shutting down") } srv.mu.Unlock() c := &conn{srv: srv} @@ -410,9 +411,6 @@ func (srv *server) newConn() (*conn, error) { Version: "Tailscale", ServerConfigCallback: c.ServerConfig, - NoClientAuthHandler: c.NoClientAuthCallback, - PasswordHandler: c.fakePasswordHandler, - Handler: c.handleSessionPostSSHAuth, LocalPortForwardingCallback: c.mayForwardLocalPortTo, ReversePortForwardingCallback: c.mayReversePortForwardTo, @@ -523,16 +521,16 @@ func toIPPort(a net.Addr) (ipp netip.AddrPort) { return netip.AddrPortFrom(tanetaddr.Unmap(), uint16(ta.Port)) } -// connInfo returns a populated sshConnInfo from the provided arguments, +// connInfo populates the sshConnInfo from the provided arguments, // validating only that they represent a known Tailscale identity. -func (c *conn) setInfo(ctx ssh.Context) error { +func (c *conn) setInfo(cm gossh.ConnMetadata) error { if c.info != nil { return nil } ci := &sshConnInfo{ - sshUser: strings.TrimSuffix(ctx.User(), forcePasswordSuffix), - src: toIPPort(ctx.RemoteAddr()), - dst: toIPPort(ctx.LocalAddr()), + sshUser: strings.TrimSuffix(cm.User(), forcePasswordSuffix), + src: toIPPort(cm.RemoteAddr()), + dst: toIPPort(cm.LocalAddr()), } if !tsaddr.IsTailscaleIP(ci.dst.Addr()) { return fmt.Errorf("tailssh: rejecting non-Tailscale local address %v", ci.dst) @@ -547,7 +545,7 @@ func (c *conn) setInfo(ctx ssh.Context) error { ci.node = node ci.uprof = uprof - c.idH = ctx.SessionID() + c.idH = string(cm.SessionID()) c.info = ci c.logf("handling conn: %v", ci.String()) return nil @@ -594,62 +592,6 @@ func (c *conn) handleSessionPostSSHAuth(s ssh.Session) { ss.run() } -// resolveNextAction starts at c.currentAction and makes it way through the -// action chain one step at a time. An action without a HoldAndDelegate is -// considered the final action. Once a final action is reached, this function -// will keep returning that action. It updates c.currentAction to the next -// action in the chain. When the final action is reached, it also sets -// c.finalAction to the final action. -func (c *conn) resolveNextAction(sctx ssh.Context) (action *tailcfg.SSHAction, err error) { - if c.finalAction != nil || c.finalActionErr != nil { - return c.finalAction, c.finalActionErr - } - - defer func() { - if action != nil { - c.currentAction = action - if action.Accept || action.Reject { - c.finalAction = action - } - } - if err != nil { - c.finalActionErr = err - } - }() - - ctx, cancel := context.WithCancel(sctx) - defer cancel() - - // Loop processing/fetching Actions until one reaches a - // terminal state (Accept, Reject, or invalid Action), or - // until fetchSSHAction times out due to the context being - // done (client disconnect) or its 30 minute timeout passes. - // (Which is a long time for somebody to see login - // instructions and go to a URL to do something.) - action = c.currentAction - if action.Accept || action.Reject { - if action.Reject { - metricTerminalReject.Add(1) - } else { - metricTerminalAccept.Add(1) - } - return action, nil - } - url := action.HoldAndDelegate - if url == "" { - metricTerminalMalformed.Add(1) - return nil, errors.New("reached Action that lacked Accept, Reject, and HoldAndDelegate") - } - metricHolds.Add(1) - url = c.expandDelegateURLLocked(url) - nextAction, err := c.fetchSSHAction(ctx, url) - if err != nil { - metricTerminalFetchError.Add(1) - return nil, fmt.Errorf("fetching SSHAction from %s: %w", url, err) - } - return nextAction, nil -} - func (c *conn) expandDelegateURLLocked(actionURL string) string { nm := c.srv.lb.NetMap() ci := c.info diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 1799d3400..5c4f533b1 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -32,8 +32,8 @@ import ( "github.com/bramvdbogaerde/go-scp" "github.com/google/go-cmp/cmp" "github.com/pkg/sftp" - gossh "github.com/tailscale/golang-x-crypto/ssh" "golang.org/x/crypto/ssh" + gossh "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 9f3616d8c..207136659 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -31,7 +31,7 @@ import ( "testing" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "tailscale.com/ipn/ipnlocal" @@ -805,7 +805,8 @@ func TestSSHAuthFlow(t *testing.T) { state: &localState{ sshEnabled: true, }, - authErr: true, + authErr: true, + wantBanners: []string{"tailscale: failed to evaluate SSH policy"}, }, { name: "accept", diff --git a/tempfork/gliderlabs/ssh/agent.go b/tempfork/gliderlabs/ssh/agent.go index 86a5bce7f..99e84c1e5 100644 --- a/tempfork/gliderlabs/ssh/agent.go +++ b/tempfork/gliderlabs/ssh/agent.go @@ -7,7 +7,7 @@ import ( "path" "sync" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) const ( diff --git a/tempfork/gliderlabs/ssh/context.go b/tempfork/gliderlabs/ssh/context.go index d43de6f09..505a43dbf 100644 --- a/tempfork/gliderlabs/ssh/context.go +++ b/tempfork/gliderlabs/ssh/context.go @@ -6,7 +6,7 @@ import ( "net" "sync" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // contextKey is a value for use with context.WithValue. It's used as @@ -55,8 +55,6 @@ var ( // ContextKeyPublicKey is a context key for use with Contexts in this package. // The associated value will be of type PublicKey. ContextKeyPublicKey = &contextKey{"public-key"} - - ContextKeySendAuthBanner = &contextKey{"send-auth-banner"} ) // Context is a package specific context interface. It exposes connection @@ -91,8 +89,6 @@ type Context interface { // SetValue allows you to easily write new values into the underlying context. SetValue(key, value interface{}) - - SendAuthBanner(banner string) error } type sshContext struct { @@ -121,7 +117,6 @@ func applyConnMetadata(ctx Context, conn gossh.ConnMetadata) { ctx.SetValue(ContextKeyUser, conn.User()) ctx.SetValue(ContextKeyLocalAddr, conn.LocalAddr()) ctx.SetValue(ContextKeyRemoteAddr, conn.RemoteAddr()) - ctx.SetValue(ContextKeySendAuthBanner, conn.SendAuthBanner) } func (ctx *sshContext) SetValue(key, value interface{}) { @@ -158,7 +153,3 @@ func (ctx *sshContext) LocalAddr() net.Addr { func (ctx *sshContext) Permissions() *Permissions { return ctx.Value(ContextKeyPermissions).(*Permissions) } - -func (ctx *sshContext) SendAuthBanner(msg string) error { - return ctx.Value(ContextKeySendAuthBanner).(func(string) error)(msg) -} diff --git a/tempfork/gliderlabs/ssh/options.go b/tempfork/gliderlabs/ssh/options.go index aa87a4f39..29c8ef141 100644 --- a/tempfork/gliderlabs/ssh/options.go +++ b/tempfork/gliderlabs/ssh/options.go @@ -3,7 +3,7 @@ package ssh import ( "os" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // PasswordAuth returns a functional option that sets PasswordHandler on the server. diff --git a/tempfork/gliderlabs/ssh/options_test.go b/tempfork/gliderlabs/ssh/options_test.go index 7cf6f376c..47342b0f6 100644 --- a/tempfork/gliderlabs/ssh/options_test.go +++ b/tempfork/gliderlabs/ssh/options_test.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "testing" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) func newTestSessionWithOptions(t *testing.T, srv *Server, cfg *gossh.ClientConfig, options ...Option) (*gossh.Session, *gossh.Client, func()) { diff --git a/tempfork/gliderlabs/ssh/server.go b/tempfork/gliderlabs/ssh/server.go index 1086a72ca..473e5fbd6 100644 --- a/tempfork/gliderlabs/ssh/server.go +++ b/tempfork/gliderlabs/ssh/server.go @@ -8,7 +8,7 @@ import ( "sync" "time" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // ErrServerClosed is returned by the Server's Serve, ListenAndServe, diff --git a/tempfork/gliderlabs/ssh/session.go b/tempfork/gliderlabs/ssh/session.go index 0a4a21e53..a7a9a3eeb 100644 --- a/tempfork/gliderlabs/ssh/session.go +++ b/tempfork/gliderlabs/ssh/session.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/anmitsu/go-shlex" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) // Session provides access to information about an SSH session and methods diff --git a/tempfork/gliderlabs/ssh/session_test.go b/tempfork/gliderlabs/ssh/session_test.go index a60be5ec1..fe61a9d96 100644 --- a/tempfork/gliderlabs/ssh/session_test.go +++ b/tempfork/gliderlabs/ssh/session_test.go @@ -9,7 +9,7 @@ import ( "net" "testing" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) func (srv *Server) serveOnce(l net.Listener) error { diff --git a/tempfork/gliderlabs/ssh/ssh.go b/tempfork/gliderlabs/ssh/ssh.go index 644cb257d..54bd31ec2 100644 --- a/tempfork/gliderlabs/ssh/ssh.go +++ b/tempfork/gliderlabs/ssh/ssh.go @@ -4,7 +4,7 @@ import ( "crypto/subtle" "net" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) type Signal string @@ -105,7 +105,7 @@ type Pty struct { // requested by the client as part of the pty-req. These are outlined as // part of https://datatracker.ietf.org/doc/html/rfc4254#section-8. // - // The opcodes are defined as constants in github.com/tailscale/golang-x-crypto/ssh (VINTR,VQUIT,etc.). + // The opcodes are defined as constants in golang.org/x/crypto/ssh (VINTR,VQUIT,etc.). // Boolean opcodes have values 0 or 1. Modes gossh.TerminalModes } diff --git a/tempfork/gliderlabs/ssh/tcpip.go b/tempfork/gliderlabs/ssh/tcpip.go index 056a0c734..335fda657 100644 --- a/tempfork/gliderlabs/ssh/tcpip.go +++ b/tempfork/gliderlabs/ssh/tcpip.go @@ -7,7 +7,7 @@ import ( "strconv" "sync" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) const ( diff --git a/tempfork/gliderlabs/ssh/tcpip_test.go b/tempfork/gliderlabs/ssh/tcpip_test.go index 118b5d53a..b3ba60a9b 100644 --- a/tempfork/gliderlabs/ssh/tcpip_test.go +++ b/tempfork/gliderlabs/ssh/tcpip_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - gossh "github.com/tailscale/golang-x-crypto/ssh" + gossh "golang.org/x/crypto/ssh" ) var sampleServerResponse = []byte("Hello world") diff --git a/tempfork/gliderlabs/ssh/util.go b/tempfork/gliderlabs/ssh/util.go index e3b5716a3..3bee06dcd 100644 --- a/tempfork/gliderlabs/ssh/util.go +++ b/tempfork/gliderlabs/ssh/util.go @@ -5,7 +5,7 @@ import ( "crypto/rsa" "encoding/binary" - "github.com/tailscale/golang-x-crypto/ssh" + "golang.org/x/crypto/ssh" ) func generateSigner() (ssh.Signer, error) { diff --git a/tempfork/gliderlabs/ssh/wrap.go b/tempfork/gliderlabs/ssh/wrap.go index 17867d751..d1f2b161e 100644 --- a/tempfork/gliderlabs/ssh/wrap.go +++ b/tempfork/gliderlabs/ssh/wrap.go @@ -1,6 +1,6 @@ package ssh -import gossh "github.com/tailscale/golang-x-crypto/ssh" +import gossh "golang.org/x/crypto/ssh" // PublicKey is an abstraction of different types of public keys. type PublicKey interface { From a0537dc0270505bf34736c5bd8595b9770111e1a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 31 Jan 2025 18:27:37 -0600 Subject: [PATCH 0440/1708] ipn/ipnlocal: fix a panic in setPrefsLockedOnEntry when cc is nil The AlwaysOn policy can be applied by (*LocalBackend).applySysPolicy, flipping WantRunning from false to true before (*LocalBackend).Start() has been called for the first time and set a control client in b.cc. This results in a nil pointer dereference and a panic when setPrefsLockedOnEntry applies the change and calls controlclient.Client.Login(). In this PR, we fix it by only doing a login if b.cc has been set. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fc4bd6e4e..b13dfd0e4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4181,7 +4181,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.MagicConn().SetDERPMap(netMap.DERPMap) } - if !oldp.WantRunning() && newp.WantRunning { + if !oldp.WantRunning() && newp.WantRunning && cc != nil { b.logf("transitioning to running; doing Login...") cc.Login(controlclient.LoginDefault) } From 2c02f712d1961b1260fcdf488d7971d7c833fabe Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 31 Jan 2025 16:09:15 -0600 Subject: [PATCH 0441/1708] util/syspolicy/internal/metrics: replace dots with underscores for metric names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dots are not allowed in metric names and cause panics. Since we use dots in names like AlwaysOn.OverrideWithReason, let's replace them with underscores. We don’t want to use setting.KeyPathSeparator here just yet to make it fully hierarchical, but we will decide as we progress on the (experimental) AlwaysOn.* policy settings. tailscale/corp#26146 Signed-off-by: Nick Khyl --- util/syspolicy/internal/metrics/metrics.go | 1 + 1 file changed, 1 insertion(+) diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index d8ba271a8..770a34d29 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -285,6 +285,7 @@ func SetHooksForTest(tb internal.TB, addMetric, setMetric metricFn) { func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { name := strings.ReplaceAll(string(key), string(setting.KeyPathSeparator), "_") + name = strings.ReplaceAll(name, ".", "_") // dots are not allowed in metric names return newMetric([]string{name, metricScopeName(scope), suffix}, typ) } From d8324674610231c36dc010854e82f0c087637df1 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 31 Jan 2025 16:14:13 -0600 Subject: [PATCH 0442/1708] client/tailscale,ipn/ipn{local,server},util/syspolicy: implement the AlwaysOn.OverrideWithReason policy setting In this PR, we update client/tailscale.LocalClient to allow sending requests with an optional X-Tailscale-Reason header. We then update ipn/ipnserver.{actor,Server} to retrieve this reason, if specified, and use it to determine whether ipnauth.Disconnect is allowed when the AlwaysOn.OverrideWithReason policy setting is enabled. For now, we log the reason, along with the profile and OS username, to the backend log. Finally, we update LocalBackend to remember when a disconnect was permitted and do not reconnect automatically unless the policy changes. Updates tailscale/corp#26146 Signed-off-by: Nick Khyl --- client/tailscale/apitype/apitype.go | 18 ++++++++++++ client/tailscale/localclient.go | 13 ++++++++- ipn/ipnlocal/local.go | 44 +++++++++++++++++++++++++---- ipn/ipnlocal/local_test.go | 6 ++-- ipn/ipnserver/actor.go | 40 ++++++++++++++++++++++---- ipn/ipnserver/server.go | 12 +++++++- util/syspolicy/policy_keys.go | 9 +++++- 7 files changed, 125 insertions(+), 17 deletions(-) diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index b1c273a4f..5ef838039 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -7,11 +7,29 @@ package apitype import ( "tailscale.com/tailcfg" "tailscale.com/types/dnstype" + "tailscale.com/util/ctxkey" ) // LocalAPIHost is the Host header value used by the LocalAPI. const LocalAPIHost = "local-tailscaled.sock" +// RequestReasonHeader is the header used to pass justification for a LocalAPI request, +// such as when a user wants to perform an action they don't have permission for, +// and a policy allows it with justification. As of 2025-01-29, it is only used to +// allow a user to disconnect Tailscale when the "always-on" mode is enabled. +// +// The header value is base64-encoded using the standard encoding defined in RFC 4648. +// +// See tailscale/corp#26146. +const RequestReasonHeader = "X-Tailscale-Reason" + +// RequestReasonKey is the context key used to pass the request reason +// when making a LocalAPI request via [tailscale.LocalClient]. +// It's value is a raw string. An empty string means no reason was provided. +// +// See tailscale/corp#26146. +var RequestReasonKey = ctxkey.New(RequestReasonHeader, "") + // WhoIsResponse is the JSON type returned by tailscaled debug server's /whois?ip=$IP handler. // In successful whois responses, Node and UserProfile are never nil. type WhoIsResponse struct { diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index f440b19a8..eecd05dfd 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -10,6 +10,7 @@ import ( "cmp" "context" "crypto/tls" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -238,7 +239,12 @@ func SetVersionMismatchHandler(f func(clientVer, serverVer string)) { } func (lc *LocalClient) send(ctx context.Context, method, path string, wantStatus int, body io.Reader) ([]byte, error) { - slurp, _, err := lc.sendWithHeaders(ctx, method, path, wantStatus, body, nil) + var headers http.Header + if reason := apitype.RequestReasonKey.Value(ctx); reason != "" { + reasonBase64 := base64.StdEncoding.EncodeToString([]byte(reason)) + headers = http.Header{apitype.RequestReasonHeader: {reasonBase64}} + } + slurp, _, err := lc.sendWithHeaders(ctx, method, path, wantStatus, body, headers) return slurp, err } @@ -824,6 +830,11 @@ func (lc *LocalClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { return &p, nil } +// EditPrefs updates the [ipn.Prefs] of the current Tailscale profile, applying the changes in mp. +// It returns an error if the changes cannot be applied, such as due to the caller's access rights +// or a policy restriction. An optional reason or justification for the request can be +// provided as a context value using [apitype.RequestReasonKey]. If permitted by policy, +// access may be granted, and the reason will be logged for auditing purposes. func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { body, err := lc.send(ctx, "PATCH", "/localapi/v0/prefs", http.StatusOK, jsonBody(mp)) if err != nil { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b13dfd0e4..fb7cc98a3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -386,6 +386,14 @@ type LocalBackend struct { // backend is healthy and captive portal detection is not required // (sending false). needsCaptiveDetection chan bool + + // overrideAlwaysOn is whether [syspolicy.AlwaysOn] is overridden by the user + // and should have no impact on the WantRunning state until the policy changes, + // or the user re-connects manually, switches to a different profile, etc. + // Notably, this is true when [syspolicy.AlwaysOnOverrideWithReason] is enabled, + // and the user has disconnected with a reason. + // See tailscale/corp#26146. + overrideAlwaysOn bool } // HealthTracker returns the health tracker for the backend. @@ -1564,7 +1572,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.logf("SetControlClientStatus failed to select auto exit node: %v", err) } } - if applySysPolicy(prefs, b.lastSuggestedExitNode) { + if applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { prefsChanged = true } if setExitNodeID(prefs, curNetMap) { @@ -1733,7 +1741,7 @@ var preferencePolicies = []preferencePolicyInfo{ // applySysPolicy overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID) (anyChange bool) { +func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID, overrideAlwaysOn bool) (anyChange bool) { if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1795,7 +1803,7 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID } } - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !prefs.WantRunning { + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !overrideAlwaysOn && !prefs.WantRunning { prefs.WantRunning = true anyChange = true } @@ -1834,7 +1842,7 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() - if !applySysPolicy(prefs, b.lastSuggestedExitNode) { + if !applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { unlock.UnlockEarly() return prefs.View(), false } @@ -1844,6 +1852,15 @@ func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { + if policy.HasChanged(syspolicy.AlwaysOn) || policy.HasChanged(syspolicy.AlwaysOnOverrideWithReason) { + // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, + // we should reset the overrideAlwaysOn flag, as the override might + // no longer be valid. + b.mu.Lock() + b.overrideAlwaysOn = false + b.mu.Unlock() + } + if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. @@ -4018,6 +4035,12 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, err } + // If a user has enough rights to disconnect, such as when [syspolicy.AlwaysOn] + // is disabled, or [syspolicy.AlwaysOnOverrideWithReason] is also set and the user + // provides a reason for disconnecting, then we should not force the "always on" + // mode on them until the policy changes, they switch to a different profile, etc. + b.overrideAlwaysOn = true + // TODO(nickkhyl): check the ReconnectAfter policy here. If configured, // start a timer to automatically reconnect after the specified duration. } @@ -4025,6 +4048,10 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return b.editPrefsLockedOnEntry(mp, unlock) } +func (b *LocalBackend) resetAlwaysOnOverrideLocked() { + b.overrideAlwaysOn = false +} + // Warning: b.mu must be held on entry, but it unlocks it on the way out. // TODO(bradfitz): redo the locking on all these weird methods like this. func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { @@ -4113,7 +4140,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // applySysPolicyToPrefsLocked returns whether it updated newp, // but everything in this function treats b.prefs as completely new // anyway, so its return value can be ignored here. - applySysPolicy(newp, b.lastSuggestedExitNode) + applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) // setExitNodeID does likewise. No-op if no exit node resolution is needed. setExitNodeID(newp, netMap) // We do this to avoid holding the lock while doing everything else. @@ -4161,6 +4188,11 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } if err := b.pm.SetPrefs(prefs, np); err != nil { b.logf("failed to save new controlclient state: %v", err) + } else if prefs.WantRunning() { + // Reset the always-on override if WantRunning is true in the new prefs, + // such as when the user toggles the Connected switch in the GUI + // or runs `tailscale up`. + b.resetAlwaysOnOverrideLocked() } if newp.AutoUpdate.Apply.EqualBool(true) { @@ -5587,6 +5619,7 @@ func (b *LocalBackend) ResetForClientDisconnect() { b.resetAuthURLLocked() b.activeLogin = "" b.resetDialPlan() + b.resetAlwaysOnOverrideLocked() b.setAtomicValuesFromPrefsLocked(ipn.PrefsView{}) b.enterStateLockedOnEntry(ipn.Stopped, unlock) } @@ -7125,6 +7158,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} b.lastSuggestedExitNode = "" + b.resetAlwaysOnOverrideLocked() b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu b.health.SetLocalLogConfigHealth(nil) return b.Start(ipn.Options{}) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3455cab1f..dfc2e45bd 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1861,7 +1861,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.lastSuggestedExitNode = test.lastSuggestedExitNode prefs := b.pm.prefs.AsStruct() - if changed := applySysPolicy(prefs, test.lastSuggestedExitNode) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { + if changed := applySysPolicy(prefs, test.lastSuggestedExitNode, false) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) } @@ -2421,7 +2421,7 @@ func TestApplySysPolicy(t *testing.T) { t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs, "") + gotAnyChange := applySysPolicy(prefs, "", false) if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -2569,7 +2569,7 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs, "") + gotAnyChange := applySysPolicy(prefs, "", false) if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 7ff96699a..652716670 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -32,8 +32,12 @@ type actor struct { logf logger.Logf ci *ipnauth.ConnIdentity - clientID ipnauth.ClientID - isLocalSystem bool // whether the actor is the Windows' Local System identity. + clientID ipnauth.ClientID + // accessOverrideReason specifies the reason for overriding certain access restrictions, + // such as permitting a user to disconnect when the always-on mode is enabled, + // provided that such justification is allowed by the policy. + accessOverrideReason string + isLocalSystem bool // whether the actor is the Windows' Local System identity. } func newActor(logf logger.Logf, c net.Conn) (*actor, error) { @@ -59,19 +63,43 @@ func newActor(logf logger.Logf, c net.Conn) (*actor, error) { return &actor{logf: logf, ci: ci, clientID: clientID, isLocalSystem: connIsLocalSystem(ci)}, nil } +// actorWithAccessOverride returns a new actor that carries the specified +// reason for overriding certain access restrictions, if permitted by the +// policy. If the reason is "", it returns the base actor. +func actorWithAccessOverride(baseActor *actor, reason string) *actor { + if reason == "" { + return baseActor + } + return &actor{ + logf: baseActor.logf, + ci: baseActor.ci, + clientID: baseActor.clientID, + accessOverrideReason: reason, + isLocalSystem: baseActor.isLocalSystem, + } +} + // CheckProfileAccess implements [ipnauth.Actor]. func (a *actor) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ipnauth.ProfileAccess) error { + // TODO(nickkhyl): return errors of more specific types and have them + // translated to the appropriate HTTP status codes in the API handler. if profile.LocalUserID() != a.UserID() { return errors.New("the target profile does not belong to the user") } switch requestedAccess { case ipnauth.Disconnect: if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn { - // TODO(nickkhyl): check if disconnecting with justifications is allowed - // and whether a justification is included in the request. - return errors.New("profile access denied: always-on mode is enabled") + if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason { + return errors.New("disconnect not allowed: always-on mode is enabled") + } + if a.accessOverrideReason == "" { + return errors.New("disconnect not allowed: reason required") + } + maybeUsername, _ := a.Username() // best-effort + a.logf("Tailscale (%q) is being disconnected by %q: %v", profile.Name(), maybeUsername, a.accessOverrideReason) + // TODO(nickkhyl): Log the reason to the audit log once we have one. } - return nil + return nil // disconnect is allowed default: return errors.New("the requested operation is not allowed") } diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 3d9c9e3d4..a08643667 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -7,6 +7,7 @@ package ipnserver import ( "context" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -20,6 +21,7 @@ import ( "sync/atomic" "unicode" + "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" @@ -198,10 +200,18 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { if actor, ok := ci.(*actor); ok { lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) lah.PermitCert = actor.CanFetchCerts() + reason, err := base64.StdEncoding.DecodeString(r.Header.Get(apitype.RequestReasonHeader)) + if err != nil { + http.Error(w, "invalid reason header", http.StatusBadRequest) + return + } + lah.Actor = actorWithAccessOverride(actor, string(reason)) } else if testenv.InTest() { lah.PermitRead, lah.PermitWrite = true, true } - lah.Actor = ci + if lah.Actor == nil { + lah.Actor = ci + } lah.ServeHTTP(w, r) return } diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ec5e83b18..a955ce094 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -34,7 +34,13 @@ const ( // Warning: This policy setting is experimental and may change or be removed in the future. // It may also not be fully supported by all Tailscale clients until it is out of experimental status. // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. - AlwaysOn Key = "AlwaysOn" + AlwaysOn Key = "AlwaysOn.Enabled" + + // AlwaysOnOverrideWithReason is a boolean key that alters the behavior + // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale + // by providing a reason. The reason is logged and sent to the control + // for auditing purposes. It has no effect when [AlwaysOn] is false. + AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. // Exit node ID takes precedence over exit node IP. @@ -150,6 +156,7 @@ var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), From 496347c724a8aab76e06bb8a899b3620bd119d10 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 3 Feb 2025 08:53:23 -0800 Subject: [PATCH 0443/1708] go.mod: bump inetaf/tcpproxy To fix a logging crash. Updates tailscale/corp#20503 Change-Id: I1beafe34afeb577aaaf6800a408faf6454b16912 Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4d50f5985..e09f22a72 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/goreleaser/nfpm/v2 v2.33.1 github.com/hdevalence/ed25519consensus v0.2.0 github.com/illarion/gonotify/v2 v2.0.3 - github.com/inetaf/tcpproxy v0.0.0-20250121183218-48c7e53d7ac4 + github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 github.com/jellydator/ttlcache/v3 v3.1.0 github.com/jsimonetti/rtnetlink v1.4.0 diff --git a/go.sum b/go.sum index 7e21f7c20..e1709fef4 100644 --- a/go.sum +++ b/go.sum @@ -572,8 +572,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inetaf/tcpproxy v0.0.0-20250121183218-48c7e53d7ac4 h1:5u/LhBmv8Y+BhTTADTuh8ma0DcZ3zzx+GINbMeMG9nM= -github.com/inetaf/tcpproxy v0.0.0-20250121183218-48c7e53d7ac4/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI= +github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f h1:hPcDyz0u+Zo14n0fpJggxL9JMAmZIK97TVLcLJLPMDI= +github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= From 17ca2b7721bbaec6dd61019633a51d07b20fc2e7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 31 Jan 2025 16:12:32 -0600 Subject: [PATCH 0444/1708] cmd/tailscale/cli: update tailscale down to accept an optional --reason If specified, the reason is sent via the LocalAPI for auditing purposes. Updates tailscale/corp#26146 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/down.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/tailscale/cli/down.go b/cmd/tailscale/cli/down.go index 1eb85a13e..224198a98 100644 --- a/cmd/tailscale/cli/down.go +++ b/cmd/tailscale/cli/down.go @@ -9,6 +9,7 @@ import ( "fmt" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" ) @@ -23,10 +24,12 @@ var downCmd = &ffcli.Command{ var downArgs struct { acceptedRisks string + reason string } func newDownFlagSet() *flag.FlagSet { downf := newFlagSet("down") + downf.StringVar(&downArgs.reason, "reason", "", "reason for the disconnect, if required by a policy") registerAcceptRiskFlag(downf, &downArgs.acceptedRisks) return downf } @@ -50,6 +53,7 @@ func runDown(ctx context.Context, args []string) error { fmt.Fprintf(Stderr, "Tailscale was already stopped.\n") return nil } + ctx = apitype.RequestReasonKey.WithValue(ctx, downArgs.reason) _, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ WantRunning: false, From 10fe10ea10f88aaa1ffbb79aa5dd864ca69391ba Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 31 Jan 2025 12:54:50 -0800 Subject: [PATCH 0445/1708] derp/derphttp,ipn/localapi,net/captivedetection: add cache resistance to captive portal detection Observed on some airlines (British Airways, WestJet), Squid is configured to cache and transform these results, which is disruptive. The server and client should both actively request that this is not done by setting Cache-Control headers. Send a timestamp parameter to further work against caches that do not respect the cache-control headers. Updates #14856 Signed-off-by: James Tucker --- derp/derphttp/derphttp_server.go | 3 +- ipn/localapi/debugderp.go | 10 ++- net/captivedetection/captivedetection.go | 17 ++++- net/captivedetection/captivedetection_test.go | 71 +++++++++++++++++++ 4 files changed, 97 insertions(+), 4 deletions(-) diff --git a/derp/derphttp/derphttp_server.go b/derp/derphttp/derphttp_server.go index ed7d3d707..50aba774a 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derphttp/derphttp_server.go @@ -98,6 +98,7 @@ func ServeNoContent(w http.ResponseWriter, r *http.Request) { w.Header().Set(NoContentResponseHeader, "response "+challenge) } } + w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") w.WriteHeader(http.StatusNoContent) } @@ -105,7 +106,7 @@ func isChallengeChar(c rune) bool { // Semi-randomly chosen as a limited set of valid characters return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || - c == '.' || c == '-' || c == '_' + c == '.' || c == '-' || c == '_' || c == ':' } const ( diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index dbdf5cf79..6636fd253 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -231,8 +231,14 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { connSuccess := checkConn(derpNode) // Verify that the /generate_204 endpoint works - captivePortalURL := "http://" + derpNode.HostName + "/generate_204" - resp, err := client.Get(captivePortalURL) + captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) + req, err := http.NewRequest("GET", captivePortalURL, nil) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) + continue + } + req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") + resp, err := client.Do(req) if err != nil { st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) } else { diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index 7d598d853..a06362a5b 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -11,6 +11,7 @@ import ( "net" "net/http" "runtime" + "strconv" "strings" "sync" "syscall" @@ -23,6 +24,7 @@ import ( // Detector checks whether the system is behind a captive portal. type Detector struct { + clock func() time.Time // httpClient is the HTTP client that is used for captive portal detection. It is configured // to not follow redirects, have a short timeout and no keep-alive. @@ -52,6 +54,13 @@ func NewDetector(logf logger.Logf) *Detector { return d } +func (d *Detector) Now() time.Time { + if d.clock != nil { + return d.clock() + } + return time.Now() +} + // Timeout is the timeout for captive portal detection requests. Because the captive portal intercepting our requests // is usually located on the LAN, this is a relatively short timeout. const Timeout = 3 * time.Second @@ -187,10 +196,16 @@ func (d *Detector) verifyCaptivePortalEndpoint(ctx context.Context, e Endpoint, ctx, cancel := context.WithTimeout(ctx, Timeout) defer cancel() - req, err := http.NewRequestWithContext(ctx, "GET", e.URL.String(), nil) + u := *e.URL + v := u.Query() + v.Add("t", strconv.Itoa(int(d.Now().Unix()))) + u.RawQuery = v.Encode() + + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) if err != nil { return false, err } + req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") // Attach the Tailscale challenge header if the endpoint supports it. Not all captive portal detection endpoints // support this, so we only attach it if the endpoint does. diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 29a197d31..064a86c8c 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -5,14 +5,21 @@ package captivedetection import ( "context" + "net/http" + "net/http/httptest" + "net/url" "runtime" + "strconv" "sync" "sync/atomic" "testing" + "time" + "tailscale.com/derp/derphttp" "tailscale.com/net/netmon" "tailscale.com/syncs" "tailscale.com/tstest/nettest" + "tailscale.com/util/must" ) func TestAvailableEndpointsAlwaysAtLeastTwo(t *testing.T) { @@ -81,3 +88,67 @@ func TestEndpointsAreUpAndReturnExpectedResponse(t *testing.T) { t.Errorf("no good endpoints found") } } + +func TestCaptivePortalRequest(t *testing.T) { + d := NewDetector(t.Logf) + now := time.Now() + d.clock = func() time.Time { return now } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("expected GET, got %q", r.Method) + } + if r.URL.Path != "/generate_204" { + t.Errorf("expected /generate_204, got %q", r.URL.Path) + } + q := r.URL.Query() + if got, want := q.Get("t"), strconv.Itoa(int(now.Unix())); got != want { + t.Errorf("timestamp param; got %v, want %v", got, want) + } + w.Header().Set("X-Tailscale-Response", "response "+r.Header.Get("X-Tailscale-Challenge")) + + w.WriteHeader(http.StatusNoContent) + })) + defer s.Close() + + e := Endpoint{ + URL: must.Get(url.Parse(s.URL + "/generate_204")), + StatusCode: 204, + ExpectedContent: "", + SupportsTailscaleChallenge: true, + } + + found, err := d.verifyCaptivePortalEndpoint(ctx, e, 0) + if err != nil { + t.Fatalf("verifyCaptivePortalEndpoint = %v, %v", found, err) + } + if found { + t.Errorf("verifyCaptivePortalEndpoint = %v, want false", found) + } +} + +func TestAgainstDERPHandler(t *testing.T) { + d := NewDetector(t.Logf) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := httptest.NewServer(http.HandlerFunc(derphttp.ServeNoContent)) + defer s.Close() + e := Endpoint{ + URL: must.Get(url.Parse(s.URL + "/generate_204")), + StatusCode: 204, + ExpectedContent: "", + SupportsTailscaleChallenge: true, + } + found, err := d.verifyCaptivePortalEndpoint(ctx, e, 0) + if err != nil { + t.Fatalf("verifyCaptivePortalEndpoint = %v, %v", found, err) + } + if found { + t.Errorf("verifyCaptivePortalEndpoint = %v, want false", found) + } +} From 95e2353294f666d15a52fb1a0c3b396d2ab21ece Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 3 Feb 2025 09:59:07 -0800 Subject: [PATCH 0446/1708] wgengine/wgcfg/nmcfg: coalesce, limit some debug logs Updates #14881 Change-Id: I708d29244fe901ab037203a5d7c2cae3c77e4c78 Signed-off-by: Brad Fitzpatrick --- wgengine/wgcfg/nmcfg/nmcfg.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 45c235b4d..1add608e4 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -81,8 +81,8 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // Logging buffers skippedUnselected := new(bytes.Buffer) - skippedIPs := new(bytes.Buffer) skippedSubnets := new(bytes.Buffer) + skippedExpired := new(bytes.Buffer) for _, peer := range nm.Peers { if peer.DiscoKey().IsZero() && peer.HomeDERP() == 0 && !peer.IsWireGuardOnly() { @@ -95,7 +95,16 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // anyway, since control intentionally breaks node keys for // expired peers so that we can't discover endpoints via DERP. if peer.Expired() { - logf("[v1] wgcfg: skipped expired peer %s", peer.Key().ShortString()) + if skippedExpired.Len() >= 1<<10 { + if !bytes.HasSuffix(skippedExpired.Bytes(), []byte("...")) { + skippedExpired.WriteString("...") + } + } else { + if skippedExpired.Len() > 0 { + skippedExpired.WriteString(", ") + } + fmt.Fprintf(skippedExpired, "%s/%v", peer.StableID(), peer.Key().ShortString()) + } continue } @@ -137,12 +146,11 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, if skippedUnselected.Len() > 0 { logf("[v1] wgcfg: skipped unselected default routes from: %s", skippedUnselected.Bytes()) } - if skippedIPs.Len() > 0 { - logf("[v1] wgcfg: skipped node IPs: %s", skippedIPs) - } if skippedSubnets.Len() > 0 { logf("[v1] wgcfg: did not accept subnet routes: %s", skippedSubnets) } - + if skippedExpired.Len() > 0 { + logf("[v1] wgcfg: skipped expired peer: %s", skippedExpired) + } return cfg, nil } From 600f25dac99aa8fbecb5b43f0ec9df516fbb28f6 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Mon, 3 Feb 2025 14:08:26 -0500 Subject: [PATCH 0447/1708] tailcfg: add JSON unmarshal helper for view of node/peer capabilities Many places that need to work with node/peer capabilities end up with a something-View and need to either reimplement the helper code or make an expensive copy. We have the machinery to easily handle this now. Updates #cleanup Change-Id: Ic3f55be329f0fc6c178de26b34359d0e8c6ca5fc Signed-off-by: Adrian Dewhurst --- cmd/sniproxy/sniproxy.go | 4 +--- ipn/ipnlocal/local.go | 5 +---- tailcfg/tailcfg.go | 27 +++++++++++++++++++++------ types/netmap/netmap.go | 2 +- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index fa83aaf4a..c1af977f6 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -157,10 +157,8 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro // NetMap contains app-connector configuration if nm := msg.NetMap; nm != nil && nm.SelfNode.Valid() { - sn := nm.SelfNode.AsStruct() - var c appctype.AppConnectorConfig - nmConf, err := tailcfg.UnmarshalNodeCapJSON[appctype.AppConnectorConfig](sn.CapMap, configCapKey) + nmConf, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorConfig](nm.SelfNode.CapMap(), configCapKey) if err != nil { log.Printf("failed to read app connector configuration from coordination server: %v", err) } else if len(nmConf) > 0 { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fb7cc98a3..faf5d13db 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4463,10 +4463,7 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i return } - // TODO(raggi): rework the view infrastructure so the large deep clone is no - // longer required - sn := nm.SelfNode.AsStruct() - attrs, err := tailcfg.UnmarshalNodeCapJSON[appctype.AppConnectorAttr](sn.CapMap, appConnectorCapName) + attrs, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorAttr](nm.SelfNode.CapMap(), appConnectorCapName) if err != nil { b.logf("[unexpected] error parsing app connector mapcap: %v", err) return diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index c17cd5f45..8251b5058 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -25,6 +25,7 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/structs" "tailscale.com/types/tkatype" + "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/slicesx" "tailscale.com/util/vizerror" @@ -1547,12 +1548,19 @@ func (c NodeCapMap) Equal(c2 NodeCapMap) bool { // If cap does not exist in cm, it returns (nil, nil). // It returns an error if the values cannot be unmarshaled into the provided type. func UnmarshalNodeCapJSON[T any](cm NodeCapMap, cap NodeCapability) ([]T, error) { - vals, ok := cm[cap] + return UnmarshalNodeCapViewJSON[T](views.MapSliceOf(cm), cap) +} + +// UnmarshalNodeCapViewJSON unmarshals each JSON value in cm.Get(cap) as T. +// If cap does not exist in cm, it returns (nil, nil). +// It returns an error if the values cannot be unmarshaled into the provided type. +func UnmarshalNodeCapViewJSON[T any](cm views.MapSlice[NodeCapability, RawMessage], cap NodeCapability) ([]T, error) { + vals, ok := cm.GetOk(cap) if !ok { return nil, nil } - out := make([]T, 0, len(vals)) - for _, v := range vals { + out := make([]T, 0, vals.Len()) + for _, v := range vals.All() { var t T if err := json.Unmarshal([]byte(v), &t); err != nil { return nil, err @@ -1582,12 +1590,19 @@ type PeerCapMap map[PeerCapability][]RawMessage // If cap does not exist in cm, it returns (nil, nil). // It returns an error if the values cannot be unmarshaled into the provided type. func UnmarshalCapJSON[T any](cm PeerCapMap, cap PeerCapability) ([]T, error) { - vals, ok := cm[cap] + return UnmarshalCapViewJSON[T](views.MapSliceOf(cm), cap) +} + +// UnmarshalCapViewJSON unmarshals each JSON value in cm.Get(cap) as T. +// If cap does not exist in cm, it returns (nil, nil). +// It returns an error if the values cannot be unmarshaled into the provided type. +func UnmarshalCapViewJSON[T any](cm views.MapSlice[PeerCapability, RawMessage], cap PeerCapability) ([]T, error) { + vals, ok := cm.GetOk(cap) if !ok { return nil, nil } - out := make([]T, 0, len(vals)) - for _, v := range vals { + out := make([]T, 0, vals.Len()) + for _, v := range vals.All() { var t T if err := json.Unmarshal([]byte(v), &t); err != nil { return nil, err diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index ab22eec3e..051b0f0dc 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -115,7 +115,7 @@ func (nm *NetworkMap) GetVIPServiceIPMap() tailcfg.ServiceIPMappings { return nil } - ipMaps, err := tailcfg.UnmarshalNodeCapJSON[tailcfg.ServiceIPMappings](nm.SelfNode.CapMap().AsMap(), tailcfg.NodeAttrServiceHost) + ipMaps, err := tailcfg.UnmarshalNodeCapViewJSON[tailcfg.ServiceIPMappings](nm.SelfNode.CapMap(), tailcfg.NodeAttrServiceHost) if len(ipMaps) != 1 || err != nil { return nil } From 97c4c0ecf02c792b3fcc7a9d009c2204c00c6d18 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Fri, 31 Jan 2025 15:25:48 -0500 Subject: [PATCH 0448/1708] ipn/ipnlocal: add VIP service IPs to localnets Without adding this, the packet filter rejects traffic to VIP service addresses before checking the filters sent in the netmap. Fixes tailscale/corp#26241 Change-Id: Idd54448048e9b786cf4873fd33b3b21e03d3ad4c Signed-off-by: Adrian Dewhurst --- ipn/ipnlocal/local.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index faf5d13db..373da9881 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2368,6 +2368,29 @@ func (b *LocalBackend) Start(opts ipn.Options) error { return nil } +// addServiceIPs adds the IP addresses of any VIP Services sent from the +// coordination server to the list of addresses that we expect to handle. +func addServiceIPs(localNetsB *netipx.IPSetBuilder, selfNode tailcfg.NodeView) error { + if !selfNode.Valid() { + return nil + } + + serviceMap, err := tailcfg.UnmarshalNodeCapViewJSON[tailcfg.ServiceIPMappings](selfNode.CapMap(), tailcfg.NodeAttrServiceHost) + if err != nil { + return err + } + + for _, sm := range serviceMap { // typically there will be exactly one of these + for _, serviceAddrs := range sm { + for _, addr := range serviceAddrs { // typically there will be exactly two of these + localNetsB.Add(addr) + } + } + } + + return nil +} + // invalidPacketFilterWarnable is a Warnable to warn the user that the control server sent an invalid packet filter. var invalidPacketFilterWarnable = health.Register(&health.Warnable{ Code: "invalid-packet-filter", @@ -2411,6 +2434,10 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P } else { b.health.SetHealthy(invalidPacketFilterWarnable) } + + if err := addServiceIPs(&localNetsB, netMap.SelfNode); err != nil { + b.logf("addServiceIPs: %v", err) + } } if prefs.Valid() { for _, r := range prefs.AdvertiseRoutes().All() { From 80a100b3cb7033bbb0d8bc0846719430a8d37d99 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 3 Feb 2025 10:24:42 -0800 Subject: [PATCH 0449/1708] net/netmon: add extra panic guard around ParseRIB We once again have a report of a panic from ParseRIB. This panic guard should probably remain permanent. Updates #14201 This reverts commit de9d4b2f886b6bf5cf0fe9be6c17d080267acef1. Signed-off-by: James Tucker --- net/netmon/netmon_darwin.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index cc6301125..8a521919b 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -56,7 +56,19 @@ func (m *darwinRouteMon) Receive() (message, error) { if err != nil { return nil, err } - msgs, err := route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) + msgs, err := func() (msgs []route.Message, err error) { + defer func() { + // #14201: permanent panic protection, as we have been burned by + // ParseRIB panics too many times. + msg := recover() + if msg != nil { + msgs = nil + m.logf("[unexpected] netmon: panic in route.ParseRIB from % 02x", m.buf[:n]) + err = fmt.Errorf("panic in route.ParseRIB: %s", msg) + } + }() + return route.ParseRIB(route.RIBTypeRoute, m.buf[:n]) + }() if err != nil { if debugRouteMessages { m.logf("read %d bytes (% 02x), failed to parse RIB: %v", n, m.buf[:n], err) From cfe578870d5e3c36dc3337a4cd37ed4ad07293b8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 3 Feb 2025 15:14:16 -0800 Subject: [PATCH 0450/1708] derp: tcp-write-timeout=0 should disable write deadline (#14895) Updates tailscale/corp#26316 Signed-off-by: Jordan Whited --- derp/derp_server.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/derp/derp_server.go b/derp/derp_server.go index 0389eed64..15fc0dfb8 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -1827,6 +1827,14 @@ func (c *sclient) setWriteDeadline() { // of connected peers. d = privilegedWriteTimeout } + if d == 0 { + // A zero value should disable the write deadline per + // --tcp-write-timeout docs. The flag should only be applicable for + // non-mesh connections, again per its docs. If mesh happened to use a + // zero value constant above it would be a bug, so we don't bother + // with a condition on c.canMesh. + return + } // Ignore the error from setting the write deadline. In practice, // setting the deadline will only fail if the connection is closed // or closing, so the subsequent Write() will fail anyway. From 5ef934b62d1a8a4c81e4b167c36c9b8c7dd46463 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 4 Feb 2025 15:09:43 +0200 Subject: [PATCH 0451/1708] cmd/k8s-operator: reinstate HA Ingress reconciler (#14887) This change: - reinstates the HA Ingress controller that was disabled for 1.80 release - fixes the API calls to manage VIPServices as the API was changed - triggers the HA Ingress reconciler on ProxyGroup changes Updates tailscale/tailscale#24795 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxygroups.yaml | 2 +- .../deploy/manifests/operator.yaml | 2 +- cmd/k8s-operator/ingress-for-pg.go | 42 ++++---- cmd/k8s-operator/ingress-for-pg_test.go | 4 +- cmd/k8s-operator/operator.go | 102 ++++++++++++++++++ cmd/k8s-operator/testutils_test.go | 11 +- cmd/k8s-operator/tsclient.go | 26 ++--- k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 2 +- 9 files changed, 149 insertions(+), 44 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index e101c201f..86e74e441 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -103,7 +103,7 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: description: |- - Type of the ProxyGroup proxies. Currently the only supported type is egress. + Type of the ProxyGroup proxies. Supported types are egress and ingress. Type is immutable once a ProxyGroup is created. type: string enum: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 54b32bef0..e966ef559 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2860,7 +2860,7 @@ spec: type: array type: description: |- - Type of the ProxyGroup proxies. Currently the only supported type is egress. + Type of the ProxyGroup proxies. Supported types are egress and ingress. Type is immutable once a ProxyGroup is created. enum: - egress diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index e90187d58..5a67a891f 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -44,6 +44,8 @@ const ( VIPSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s" // FinalizerNamePG is the finalizer used by the IngressPGReconciler FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" + + indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -180,7 +182,8 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin return fmt.Errorf("error determining DNS name base: %w", err) } dnsName := hostname + "." + tcd - existingVIPSvc, err := a.tsClient.getVIPServiceByName(ctx, hostname) + serviceName := tailcfg.ServiceName("svc:" + hostname) + existingVIPSvc, err := a.tsClient.getVIPService(ctx, serviceName) // TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore // should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET // here will fail. @@ -222,7 +225,6 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin }, }, } - serviceName := tailcfg.ServiceName("svc:" + hostname) var gotCfg *ipn.ServiceConfig if cfg != nil && cfg.Services != nil { gotCfg = cfg.Services[serviceName] @@ -247,7 +249,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin } vipSvc := &VIPService{ - Name: hostname, + Name: serviceName, Tags: tags, Ports: []string{"443"}, // always 443 for Ingress Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID), @@ -257,7 +259,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin } if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) { logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) - if err := a.tsClient.createOrUpdateVIPServiceByName(ctx, vipSvc); err != nil { + if err := a.tsClient.createOrUpdateVIPService(ctx, vipSvc); err != nil { logger.Infof("error creating VIPService: %v", err) return fmt.Errorf("error creating VIPService: %w", err) } @@ -305,39 +307,39 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } serveConfigChanged := false // For each VIPService in serve config... - for vipHostname := range cfg.Services { + for vipServiceName := range cfg.Services { // ...check if there is currently an Ingress with this hostname found := false for _, i := range ingList.Items { ingressHostname := hostnameForIngress(&i) - if ingressHostname == vipHostname.WithoutPrefix() { + if ingressHostname == vipServiceName.WithoutPrefix() { found = true break } } if !found { - logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipHostname) - svc, err := a.getVIPService(ctx, vipHostname.WithoutPrefix(), logger) + logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) + svc, err := a.getVIPService(ctx, vipServiceName, logger) if err != nil { errResp := &tailscale.ErrResponse{} if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound { - delete(cfg.Services, vipHostname) + delete(cfg.Services, vipServiceName) serveConfigChanged = true continue } return err } if isVIPServiceForAnyIngress(svc) { - logger.Infof("cleaning up orphaned VIPService %q", vipHostname) - if err := a.tsClient.deleteVIPServiceByName(ctx, vipHostname.WithoutPrefix()); err != nil { + logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) + if err := a.tsClient.deleteVIPService(ctx, vipServiceName); err != nil { errResp := &tailscale.ErrResponse{} if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { - return fmt.Errorf("deleting VIPService %q: %w", vipHostname, err) + return fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) } } } - delete(cfg.Services, vipHostname) + delete(cfg.Services, vipServiceName) serveConfigChanged = true } } @@ -386,7 +388,7 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) // 2. Delete the VIPService. - if err := a.deleteVIPServiceIfExists(ctx, hostname, ing, logger); err != nil { + if err := a.deleteVIPServiceIfExists(ctx, serviceName, ing, logger); err != nil { return fmt.Errorf("error deleting VIPService: %w", err) } @@ -478,13 +480,13 @@ func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func (a *IngressPGReconciler) getVIPService(ctx context.Context, hostname string, logger *zap.SugaredLogger) (*VIPService, error) { - svc, err := a.tsClient.getVIPServiceByName(ctx, hostname) +func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (*VIPService, error) { + svc, err := a.tsClient.getVIPService(ctx, name) if err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { - logger.Infof("error getting VIPService %q: %v", hostname, err) - return nil, fmt.Errorf("error getting VIPService %q: %w", hostname, err) + logger.Infof("error getting VIPService %q: %v", name, err) + return nil, fmt.Errorf("error getting VIPService %q: %w", name, err) } } return svc, nil @@ -550,7 +552,7 @@ func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsa } // deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress. -func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { +func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name tailcfg.ServiceName, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { svc, err := a.getVIPService(ctx, name, logger) if err != nil { return fmt.Errorf("error getting VIPService: %w", err) @@ -562,7 +564,7 @@ func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name } logger.Infof("Deleting VIPService %q", name) - if err = a.tsClient.deleteVIPServiceByName(ctx, name); err != nil { + if err = a.tsClient.deleteVIPService(ctx, name); err != nil { return fmt.Errorf("error deleting VIPService: %w", err) } return nil diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 9ef36f696..9317a44d4 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -142,7 +142,7 @@ func TestIngressPGReconciler(t *testing.T) { } // Verify VIPService uses default tags - vipSvc, err := ft.getVIPServiceByName(context.Background(), "my-svc") + vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc") if err != nil { t.Fatalf("getting VIPService: %v", err) } @@ -161,7 +161,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") // Verify VIPService uses custom tags - vipSvc, err = ft.getVIPServiceByName(context.Background(), "my-svc") + vipSvc, err = ft.getVIPService(context.Background(), "svc:my-svc") if err != nil { t.Fatalf("getting VIPService: %v", err) } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 8fcd1342c..37e37a96e 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -331,6 +331,33 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } + lc, err := opts.tsServer.LocalClient() + if err != nil { + startlog.Fatalf("could not get local client: %v", err) + } + ingressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(ingressesFromIngressProxyGroup(mgr.GetClient(), opts.log)) + err = builder. + ControllerManagedBy(mgr). + For(&networkingv1.Ingress{}). + Named("ingress-pg-reconciler"). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). + Complete(&IngressPGReconciler{ + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-pg-reconciler"), + lc: lc, + tsNamespace: opts.tailscaleNamespace, + }) + if err != nil { + startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyGroup, indexPGIngresses); err != nil { + startlog.Fatalf("failed setting up indexer for HA Ingresses: %v", err) + } connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector")) // If a ProxyClassChanges, enqueue all Connectors that have @@ -1036,6 +1063,36 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } +// ingressesFromIngressProxyGroup is an event handler for ingress ProxyGroups. It returns reconcile requests for all +// user-created Ingresses that should be exposed on this ProxyGroup. +func ingressesFromIngressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pg, ok := o.(*tsapi.ProxyGroup) + if !ok { + logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") + return nil + } + if pg.Spec.Type != tsapi.ProxyGroupTypeIngress { + return nil + } + ingList := &networkingv1.IngressList{} + if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pg.Name}); err != nil { + logger.Infof("error listing Ingresses: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, svc := range ingList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: svc.Namespace, + Name: svc.Name, + }, + }) + } + return reqs + } +} + // epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that // should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service. func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { @@ -1156,6 +1213,51 @@ func indexEgressServices(o client.Object) []string { return []string{o.GetAnnotations()[AnnotationProxyGroup]} } +// indexPGIngresses adds a local index to a cached Tailscale Ingresses meant to be exposed on a ProxyGroup. The index is +// used a list filter. +func indexPGIngresses(o client.Object) []string { + if !hasProxyGroupAnnotation(o) { + return nil + } + return []string{o.GetAnnotations()[AnnotationProxyGroup]} +} + +// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service +// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, +// the associated Ingress gets reconciled. +func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + ingList := networkingv1.IngressList{} + if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { + logger.Debugf("error listing Ingresses: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ing := range ingList.Items { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + continue + } + if !hasProxyGroupAnnotation(&ing) { + continue + } + if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + for _, rule := range ing.Spec.Rules { + if rule.HTTP == nil { + continue + } + for _, path := range rule.HTTP.Paths { + if path.Backend.Service != nil && path.Backend.Service.Name == o.GetName() { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + } + } + } + return reqs + } +} + func hasProxyGroupAnnotation(obj client.Object) bool { ing := obj.(*networkingv1.Ingress) return ing.Annotations[AnnotationProxyGroup] != "" diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 83c42cb76..386005b1f 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tailcfg" "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -767,7 +768,7 @@ type fakeTSClient struct { sync.Mutex keyRequests []tailscale.KeyCapabilities deleted []string - vipServices map[string]*VIPService + vipServices map[tailcfg.ServiceName]*VIPService } type fakeTSNetServer struct { certDomains []string @@ -874,7 +875,7 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { } } -func (c *fakeTSClient) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) { +func (c *fakeTSClient) getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { c.Lock() defer c.Unlock() if c.vipServices == nil { @@ -887,17 +888,17 @@ func (c *fakeTSClient) getVIPServiceByName(ctx context.Context, name string) (*V return svc, nil } -func (c *fakeTSClient) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error { +func (c *fakeTSClient) createOrUpdateVIPService(ctx context.Context, svc *VIPService) error { c.Lock() defer c.Unlock() if c.vipServices == nil { - c.vipServices = make(map[string]*VIPService) + c.vipServices = make(map[tailcfg.ServiceName]*VIPService) } c.vipServices[svc.Name] = svc return nil } -func (c *fakeTSClient) deleteVIPServiceByName(ctx context.Context, name string) error { +func (c *fakeTSClient) deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { c.Lock() defer c.Unlock() if c.vipServices != nil { diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index 5352629de..2381438b2 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -17,6 +17,7 @@ import ( "golang.org/x/oauth2/clientcredentials" "tailscale.com/client/tailscale" + "tailscale.com/tailcfg" "tailscale.com/util/httpm" ) @@ -56,9 +57,9 @@ type tsClient interface { CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) DeleteDevice(ctx context.Context, nodeStableID string) error - getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) - createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error - deleteVIPServiceByName(ctx context.Context, name string) error + getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) + createOrUpdateVIPService(ctx context.Context, svc *VIPService) error + deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error } type tsClientImpl struct { @@ -69,9 +70,8 @@ type tsClientImpl struct { // VIPService is a Tailscale VIPService with Tailscale API JSON representation. type VIPService struct { - // Name is the leftmost label of the DNS name of the VIP service. - // Name is required. - Name string `json:"name,omitempty"` + // Name is a VIPService name in form svc:. + Name tailcfg.ServiceName `json:"name,omitempty"` // Addrs are the IP addresses of the VIP Service. There are two addresses: // the first is IPv4 and the second is IPv6. // When creating a new VIP Service, the IP addresses are optional: if no @@ -89,8 +89,8 @@ type VIPService struct { } // GetVIPServiceByName retrieves a VIPService by its name. It returns 404 if the VIPService is not found. -func (c *tsClientImpl) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name)) +func (c *tsClientImpl) getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { + path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(name.String())) req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) if err != nil { return nil, fmt.Errorf("error creating new HTTP request: %w", err) @@ -111,16 +111,16 @@ func (c *tsClientImpl) getVIPServiceByName(ctx context.Context, name string) (*V return svc, nil } -// CreateOrUpdateVIPServiceByName creates or updates a VIPService by its name. Caller must ensure that, if the +// createOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the // VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not // lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were // auto-allocated by Tailscale) any subsequent request to this function that does not set any IP addresses will error. -func (c *tsClientImpl) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error { +func (c *tsClientImpl) createOrUpdateVIPService(ctx context.Context, svc *VIPService) error { data, err := json.Marshal(svc) if err != nil { return err } - path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name)) + path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name.String())) req, err := http.NewRequestWithContext(ctx, httpm.PUT, path, bytes.NewBuffer(data)) if err != nil { return fmt.Errorf("error creating new HTTP request: %w", err) @@ -139,8 +139,8 @@ func (c *tsClientImpl) createOrUpdateVIPServiceByName(ctx context.Context, svc * // DeleteVIPServiceByName deletes a VIPService by its name. It returns an error if the VIPService // does not exist or if the deletion fails. -func (c *tsClientImpl) deleteVIPServiceByName(ctx context.Context, name string) error { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name)) +func (c *tsClientImpl) deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { + path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(name.String())) req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil) if err != nil { return fmt.Errorf("error creating new HTTP request: %w", err) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 64756c8f1..fae25b1f6 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -599,7 +599,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | Minimum: 0
| | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index cb9f678f8..f95fc58d0 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -48,7 +48,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Currently the only supported type is egress. + // Type of the ProxyGroup proxies. Supported types are egress and ingress. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` From 00fe8845b1e2f4ef5dedfe5989812d8beddfb1f9 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 4 Feb 2025 14:20:06 -0600 Subject: [PATCH 0452/1708] ipn/{ipnauth,ipnlocal,ipnserver}: move the AlwaysOn policy check from ipnserver to ipnauth In this PR, we move the code that checks the AlwaysOn policy from ipnserver.actor to ipnauth. It is intended to be used by ipnauth.Actor implementations, and we temporarily make it exported while these implementations reside in ipnserver and in corp. We'll unexport it later. We also update [ipnauth.Actor.CheckProfileAccess] to accept an auditLogger, which is called to write details about the action to the audit log when required by the policy, and update LocalBackend.EditPrefsAs to use an auditLogger that writes to the regular backend log. Updates tailscale/corp#26146 Signed-off-by: Nick Khyl --- ipn/ipnauth/actor.go | 10 ++++++++- ipn/ipnauth/policy.go | 46 +++++++++++++++++++++++++++++++++++++++ ipn/ipnauth/self.go | 2 +- ipn/ipnauth/test_actor.go | 2 +- ipn/ipnlocal/local.go | 4 +++- ipn/ipnserver/actor.go | 17 +++------------ 6 files changed, 63 insertions(+), 18 deletions(-) create mode 100644 ipn/ipnauth/policy.go diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 92e3b202f..446cb4635 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -10,6 +10,11 @@ import ( "tailscale.com/ipn" ) +// AuditLogFunc is any function that can be used to log audit actions performed by an [Actor]. +// +// TODO(nickkhyl,barnstar): define a named string type for the action (in tailcfg?) and use it here. +type AuditLogFunc func(action, details string) + // Actor is any actor using the [ipnlocal.LocalBackend]. // // It typically represents a specific OS user, indicating that an operation @@ -30,7 +35,10 @@ type Actor interface { // CheckProfileAccess checks whether the actor has the necessary access rights // to perform a given action on the specified Tailscale profile. // It returns an error if access is denied. - CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess) error + // + // If the auditLogger is non-nil, it is used to write details about the action + // to the audit log when required by the policy. + CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess, auditLogger AuditLogFunc) error // IsLocalSystem reports whether the actor is the Windows' Local System account. // diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go new file mode 100644 index 000000000..c61f9cd89 --- /dev/null +++ b/ipn/ipnauth/policy.go @@ -0,0 +1,46 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnauth + +import ( + "errors" + "fmt" + + "tailscale.com/ipn" + "tailscale.com/util/syspolicy" +) + +// CheckDisconnectPolicy checks if the policy allows the specified actor to disconnect +// Tailscale with the given optional reason. It returns nil if the operation is allowed, +// or an error if it is not. If auditLogger is non-nil, it is called to log the action +// when required by the policy. +// +// Note: this function only checks the policy and does not check whether the actor has +// the necessary access rights to the device or profile. It is intended to be used by +// [Actor] implementations on platforms where [syspolicy] is supported. +// +// TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] +// and corp to this package. +func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditLogger AuditLogFunc) error { + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + return nil + } + if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason { + return errors.New("disconnect not allowed: always-on mode is enabled") + } + if reason == "" { + return errors.New("disconnect not allowed: reason required") + } + if auditLogger != nil { + var details string + if username, _ := actor.Username(); username != "" { // best-effort; we don't have it on all platforms + details = fmt.Sprintf("%q is being disconnected by %q: %v", profile.Name(), username, reason) + } else { + details = fmt.Sprintf("%q is being disconnected: %v", profile.Name(), reason) + } + // TODO(nickkhyl,barnstar): use a const for DISCONNECT_NODE. + auditLogger("DISCONNECT_NODE", details) + } + return nil +} diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go index d8ece45c5..271be9815 100644 --- a/ipn/ipnauth/self.go +++ b/ipn/ipnauth/self.go @@ -28,7 +28,7 @@ func (u unrestricted) Username() (string, error) { return "", nil } func (u unrestricted) ClientID() (_ ClientID, ok bool) { return NoClientID, false } // CheckProfileAccess implements [Actor]. -func (u unrestricted) CheckProfileAccess(_ ipn.LoginProfileView, _ ProfileAccess) error { +func (u unrestricted) CheckProfileAccess(_ ipn.LoginProfileView, _ ProfileAccess, _ AuditLogFunc) error { // Unrestricted access to all profiles. return nil } diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go index 0d4a0e37d..ba4e03c93 100644 --- a/ipn/ipnauth/test_actor.go +++ b/ipn/ipnauth/test_actor.go @@ -31,7 +31,7 @@ func (a *TestActor) Username() (string, error) { return a.Name, a.NameErr } func (a *TestActor) ClientID() (_ ClientID, ok bool) { return a.CID, a.CID != NoClientID } // CheckProfileAccess implements [Actor]. -func (a *TestActor) CheckProfileAccess(profile ipn.LoginProfileView, _ ProfileAccess) error { +func (a *TestActor) CheckProfileAccess(profile ipn.LoginProfileView, _ ProfileAccess, _ AuditLogFunc) error { return errors.New("profile access denied") } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 373da9881..38bcfaaa2 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4058,7 +4058,9 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip unlock := b.lockAndGetUnlock() defer unlock() if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { - if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect); err != nil { + // TODO(barnstar,nickkhyl): replace loggerFn with the actual audit logger. + loggerFn := func(action, details string) { b.logf("[audit]: %s: %s", action, details) } + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, loggerFn); err != nil { return ipn.PrefsView{}, err } diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 652716670..6ee7a04d7 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -17,7 +17,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/ctxkey" "tailscale.com/util/osuser" - "tailscale.com/util/syspolicy" "tailscale.com/version" ) @@ -80,7 +79,7 @@ func actorWithAccessOverride(baseActor *actor, reason string) *actor { } // CheckProfileAccess implements [ipnauth.Actor]. -func (a *actor) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ipnauth.ProfileAccess) error { +func (a *actor) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ipnauth.ProfileAccess, auditLogger ipnauth.AuditLogFunc) error { // TODO(nickkhyl): return errors of more specific types and have them // translated to the appropriate HTTP status codes in the API handler. if profile.LocalUserID() != a.UserID() { @@ -88,18 +87,8 @@ func (a *actor) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess } switch requestedAccess { case ipnauth.Disconnect: - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn { - if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason { - return errors.New("disconnect not allowed: always-on mode is enabled") - } - if a.accessOverrideReason == "" { - return errors.New("disconnect not allowed: reason required") - } - maybeUsername, _ := a.Username() // best-effort - a.logf("Tailscale (%q) is being disconnected by %q: %v", profile.Name(), maybeUsername, a.accessOverrideReason) - // TODO(nickkhyl): Log the reason to the audit log once we have one. - } - return nil // disconnect is allowed + // Disconnect is allowed if a user owns the profile and the policy permits it. + return ipnauth.CheckDisconnectPolicy(a, profile, a.accessOverrideReason, auditLogger) default: return errors.New("the requested operation is not allowed") } From 0b7087c4012096ea5329b021606b0fc012ef6856 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 4 Feb 2025 12:51:27 -0800 Subject: [PATCH 0453/1708] logpolicy: expose MaxBufferSize and MaxUploadSize options (#14903) Updates tailscale/corp#26342 Signed-off-by: Joe Tsai --- logpolicy/logpolicy.go | 26 ++++++++++++++++++++------ logtail/logtail.go | 11 ++++++++--- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index b9b813718..1419fff65 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -503,6 +503,18 @@ type Options struct { // If nil, [TransportOptions.New] is used to construct a new client // with that particular transport sending logs to the default logs server. HTTPC *http.Client + + // MaxBufferSize is the maximum size of the log buffer. + // This controls the amount of logs that can be temporarily stored + // before the logs can be successfully upload. + // If zero, a default buffer size is chosen. + MaxBufferSize int + + // MaxUploadSize is the maximum size per upload. + // This should only be set by clients that have been authenticated + // with the logging service as having a higher upload limit. + // If zero, a default upload size is chosen. + MaxUploadSize int } // New returns a new log policy (a logger and its instance ID). @@ -603,10 +615,11 @@ func (opts Options) New() *Policy { } conf := logtail.Config{ - Collection: newc.Collection, - PrivateID: newc.PrivateID, - Stderr: logWriter{console}, - CompressLogs: true, + Collection: newc.Collection, + PrivateID: newc.PrivateID, + Stderr: logWriter{console}, + CompressLogs: true, + MaxUploadSize: opts.MaxUploadSize, } if opts.Collection == logtail.CollectionNode { conf.MetricsDelta = clientmetric.EncodeLogTailMetricsDelta @@ -620,7 +633,7 @@ func (opts Options) New() *Policy { } else { // Only attach an on-disk filch buffer if we are going to be sending logs. // No reason to persist them locally just to drop them later. - attachFilchBuffer(&conf, opts.Dir, opts.CmdName, opts.Logf) + attachFilchBuffer(&conf, opts.Dir, opts.CmdName, opts.MaxBufferSize, opts.Logf) conf.HTTPC = opts.HTTPC if conf.HTTPC == nil { @@ -676,9 +689,10 @@ func (opts Options) New() *Policy { // attachFilchBuffer creates an on-disk ring buffer using filch and attaches // it to the logtail config. Note that this is optional; if no buffer is set, // logtail will use an in-memory buffer. -func attachFilchBuffer(conf *logtail.Config, dir, cmdName string, logf logger.Logf) { +func attachFilchBuffer(conf *logtail.Config, dir, cmdName string, maxFileSize int, logf logger.Logf) { filchOptions := filch.Options{ ReplaceStderr: redirectStderrToLogPanics(), + MaxFileSize: maxFileSize, } filchPrefix := filepath.Join(dir, cmdName) diff --git a/logtail/logtail.go b/logtail/logtail.go index 0e9c4f288..a617397f9 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -6,6 +6,7 @@ package logtail import ( "bytes" + "cmp" "context" "crypto/rand" "encoding/binary" @@ -78,6 +79,7 @@ type Config struct { StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only Buffer Buffer // temp storage, if nil a MemoryBuffer CompressLogs bool // whether to compress the log uploads + MaxUploadSize int // maximum upload size; 0 means using the default // MetricsDelta, if non-nil, is a func that returns an encoding // delta in clientmetrics to upload alongside existing logs. @@ -157,6 +159,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { url: cfg.BaseURL + "/c/" + cfg.Collection + "/" + cfg.PrivateID.String() + urlSuffix, lowMem: cfg.LowMemory, buffer: cfg.Buffer, + maxUploadSize: cfg.MaxUploadSize, skipClientTime: cfg.SkipClientTime, drainWake: make(chan struct{}, 1), sentinel: make(chan int32, 16), @@ -192,6 +195,7 @@ type Logger struct { skipClientTime bool netMonitor *netmon.Monitor buffer Buffer + maxUploadSize int drainWake chan struct{} // signal to speed up drain drainBuf []byte // owned by drainPending for reuse flushDelayFn func() time.Duration // negative or zero return value to upload aggressively, or >0 to batch at this delay @@ -325,7 +329,7 @@ func (l *Logger) drainPending() (b []byte) { } }() - maxLen := maxSize + maxLen := cmp.Or(l.maxUploadSize, maxSize) if l.lowMem { // When operating in a low memory environment, it is better to upload // in multiple operations than it is to allocate a large body and OOM. @@ -775,9 +779,10 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // That's okay as the Tailscale log service limit is actually 2*maxSize. // However, so long as logging applications aim to target the maxSize limit, // there should be no trouble eventually uploading logs. - if len(src) > maxSize { + maxLen := cmp.Or(l.maxUploadSize, maxSize) + if len(src) > maxLen { errDetail := fmt.Sprintf("entry too large: %d bytes", len(src)) - errData := appendTruncatedString(nil, src, maxSize/len(`\uffff`)) // escaping could increase size + errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size dst = append(dst, '{') dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) From 9726e1f2089beadff6a0fd86fe1f8fe99d3daf5a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 4 Feb 2025 15:52:21 -0600 Subject: [PATCH 0454/1708] ipn/{ipnserver,localapi},tsnet: use ipnauth.Self as the actor in tsnet localapi handlers With #14843 merged, (*localapi.Handler).servePrefs() now requires a non-nil actor, and other places may soon require it as well. In this PR, we update localapi.NewHandler with a new required parameter for the actor. We then update tsnet to use ipnauth.Self. We also rearrange the code in (*ipnserver.Server).serveHTTP() to pass the actor via Handler's constructor instead of the field. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnserver/server.go | 14 +++++++------- ipn/localapi/localapi.go | 7 +++---- tsnet/tsnet.go | 5 +++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index a08643667..5df9375a4 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -196,22 +196,22 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { defer onDone() if strings.HasPrefix(r.URL.Path, "/localapi/") { - lah := localapi.NewHandler(lb, s.logf, s.backendLogID) if actor, ok := ci.(*actor); ok { - lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) - lah.PermitCert = actor.CanFetchCerts() reason, err := base64.StdEncoding.DecodeString(r.Header.Get(apitype.RequestReasonHeader)) if err != nil { http.Error(w, "invalid reason header", http.StatusBadRequest) return } - lah.Actor = actorWithAccessOverride(actor, string(reason)) + ci = actorWithAccessOverride(actor, string(reason)) + } + + lah := localapi.NewHandler(ci, lb, s.logf, s.backendLogID) + if actor, ok := ci.(*actor); ok { + lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) + lah.PermitCert = actor.CanFetchCerts() } else if testenv.InTest() { lah.PermitRead, lah.PermitWrite = true, true } - if lah.Actor == nil { - lah.Actor = ci - } lah.ServeHTTP(w, r) return } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index c75f732b6..d1f07ea4e 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -169,10 +169,9 @@ var ( metrics = map[string]*clientmetric.Metric{} ) -// NewHandler creates a new LocalAPI HTTP handler. All parameters except netMon -// are required (if non-nil it's used to do faster interface lookups). -func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, logID logid.PublicID) *Handler { - return &Handler{b: b, logf: logf, backendLogID: logID, clock: tstime.StdClock{}} +// NewHandler creates a new LocalAPI HTTP handler. All parameters are required. +func NewHandler(actor ipnauth.Actor, b *ipnlocal.LocalBackend, logf logger.Logf, logID logid.PublicID) *Handler { + return &Handler{Actor: actor, b: b, logf: logf, backendLogID: logID, clock: tstime.StdClock{}} } type Handler struct { diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 23a9f9a98..e1494c65f 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -33,6 +33,7 @@ import ( "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/localapi" @@ -272,7 +273,7 @@ func (s *Server) Loopback() (addr string, proxyCred, localAPICred string, err er // out the CONNECT code from tailscaled/proxy.go that uses // httputil.ReverseProxy and adding auth support. go func() { - lah := localapi.NewHandler(s.lb, s.logf, s.logid) + lah := localapi.NewHandler(ipnauth.Self, s.lb, s.logf, s.logid) lah.PermitWrite = true lah.PermitRead = true lah.RequiredPassword = s.localAPICred @@ -667,7 +668,7 @@ func (s *Server) start() (reterr error) { go s.printAuthURLLoop() // Run the localapi handler, to allow fetching LetsEncrypt certs. - lah := localapi.NewHandler(lb, tsLogf, s.logid) + lah := localapi.NewHandler(ipnauth.Self, lb, tsLogf, s.logid) lah.PermitWrite = true lah.PermitRead = true From e19c01f5b3e9760a010872b0ffa2075b897b9ad4 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 4 Feb 2025 15:51:03 -0800 Subject: [PATCH 0455/1708] clientupdate: refuse to update in tsnet binaries (#14911) When running via tsnet, c2n will be hooked up so requests to update can reach the node. But it will then apply whatever OS-specific update function, upgrading the local tailscaled instead. We can't update tsnet automatically, so refuse it. Fixes #14892 Signed-off-by: Andrew Lytvynov --- clientupdate/clientupdate.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 7fa84d67f..c5baeb8e9 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "tailscale.com/hostinfo" "tailscale.com/types/logger" "tailscale.com/util/cmpver" "tailscale.com/version" @@ -169,6 +170,12 @@ func NewUpdater(args Arguments) (*Updater, error) { type updateFunction func() error func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { + hi := hostinfo.New() + // We don't know how to update custom tsnet binaries, it's up to the user. + if hi.Package == "tsnet" { + return nil, false + } + switch runtime.GOOS { case "windows": return up.updateWindows, true From d5316a4fbb4a1105ce2ba6f92d9688452b7747cd Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 5 Feb 2025 10:41:18 -0500 Subject: [PATCH 0456/1708] cmd/derper: add setec secret support (#14890) Add setec secret support for derper. Support dev mode via env var, and setec via secrets URL. For backwards compatibility use setec load from file also. Updates tailscale/corp#25756 Signed-off-by: Mike O'Driscoll --- cmd/derper/depaware.txt | 3 ++ cmd/derper/derper.go | 70 +++++++++++++++++++++++--- cmd/derper/derper_test.go | 43 ++++++++++++++++ cmd/k8s-operator/depaware.txt | 5 +- cmd/tailscaled/depaware.txt | 5 +- go.mod | 39 +++++++------- go.sum | 95 ++++++++++++++++------------------- 7 files changed, 181 insertions(+), 79 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 5a39c110e..82dd08e63 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -51,6 +51,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/setec/client/setec from tailscale.com/cmd/derper + github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/tailscale+ @@ -207,6 +209,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ LD golang.org/x/sys/unix from github.com/google/nftables+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 2c6ecd175..b36fad59a 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -27,6 +27,7 @@ import ( "net/http" "os" "os/signal" + "path" "path/filepath" "regexp" "runtime" @@ -36,6 +37,7 @@ import ( "syscall" "time" + "github.com/tailscale/setec/client/setec" "golang.org/x/time/rate" "tailscale.com/atomicfile" "tailscale.com/derp" @@ -64,6 +66,9 @@ var ( meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It should contain some hex string; whitespace is trimmed.") meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list. If an entry contains a slash, the second part names a hostname to be used when dialing the target.") + secretsURL = flag.String("secrets-url", "", "SETEC server URL for secrets retrieval of mesh key") + secretPrefix = flag.String("secrets-path-prefix", "prod/derp", "setec path prefix for \""+setecMeshKeyName+"\" secret for DERP mesh key") + secretsCacheDir = flag.String("secrets-cache-dir", defaultSetecCacheDir(), "directory to cache setec secrets in (required if --secrets-url is set)") bootstrapDNS = flag.String("bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns") unpublishedDNS = flag.String("unpublished-bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns and not publish in the list. If an entry contains a slash, the second part names a DNS record to poll for its TXT record with a `0` to `100` value for rollout percentage.") verifyClients = flag.Bool("verify-clients", false, "verify clients to this DERP server through a local tailscaled instance.") @@ -84,8 +89,14 @@ var ( var ( tlsRequestVersion = &metrics.LabelMap{Label: "version"} tlsActiveVersion = &metrics.LabelMap{Label: "version"} + + // Exactly 64 hexadecimal lowercase digits. + validMeshKey = regexp.MustCompile(`^[0-9a-f]{64}$`) ) +const setecMeshKeyName = "meshkey" +const meshKeyEnvVar = "TAILSCALE_DERPER_MESH_KEY" + func init() { expvar.Publish("derper_tls_request_version", tlsRequestVersion) expvar.Publish("gauge_derper_tls_active_version", tlsActiveVersion) @@ -141,6 +152,14 @@ func writeNewConfig() config { return cfg } +func checkMeshKey(key string) (string, error) { + key = strings.TrimSpace(key) + if !validMeshKey.MatchString(key) { + return "", fmt.Errorf("key in %q must contain 64+ hex digits", key) + } + return key, nil +} + func main() { flag.Parse() if *versionFlag { @@ -177,18 +196,51 @@ func main() { s.SetVerifyClientURLFailOpen(*verifyFailOpen) s.SetTCPWriteTimeout(*tcpWriteTimeout) - if *meshPSKFile != "" { - b, err := os.ReadFile(*meshPSKFile) + var meshKey string + if *dev { + meshKey = os.Getenv(meshKeyEnvVar) + if meshKey == "" { + log.Printf("No mesh key specified for dev via %s\n", meshKeyEnvVar) + } else { + log.Printf("Set mesh key from %s\n", meshKeyEnvVar) + } + } else if *secretsURL != "" { + meshKeySecret := path.Join(*secretPrefix, setecMeshKeyName) + fc, err := setec.NewFileCache(*secretsCacheDir) if err != nil { - log.Fatal(err) + log.Fatalf("NewFileCache: %v", err) } - key := strings.TrimSpace(string(b)) - if matched, _ := regexp.MatchString(`(?i)^[0-9a-f]{64,}$`, key); !matched { - log.Fatalf("key in %s must contain 64+ hex digits", *meshPSKFile) + st, err := setec.NewStore(ctx, + setec.StoreConfig{ + Client: setec.Client{Server: *secretsURL}, + Secrets: []string{ + meshKeySecret, + }, + Cache: fc, + }) + if err != nil { + log.Fatalf("NewStore: %v", err) + } + meshKey = st.Secret(meshKeySecret).GetString() + log.Println("Got mesh key from setec store") + } else if *meshPSKFile != "" { + b, err := setec.StaticFile(*meshPSKFile) + if err != nil { + log.Fatalf("StaticFile failed to get key: %v", err) } + log.Println("Got mesh key from static file") + meshKey = b.GetString() + } + + if meshKey == "" && *dev { + log.Printf("No mesh key configured for --dev mode") + } else if key, err := checkMeshKey(meshKey); err != nil { + log.Fatalf("invalid mesh key: %v", err) + } else { s.SetMeshKey(key) - log.Printf("DERP mesh key configured") + log.Println("DERP mesh key configured") } + if err := startMesh(s); err != nil { log.Fatalf("startMesh: %v", err) } @@ -382,6 +434,10 @@ func prodAutocertHostPolicy(_ context.Context, host string) error { return errors.New("invalid hostname") } +func defaultSetecCacheDir() string { + return filepath.Join(os.Getenv("HOME"), ".cache", "derper-secrets") +} + func defaultMeshPSKFile() string { try := []string{ "/home/derp/keys/derp-mesh.key", diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 6dce1fcdf..12686ce4e 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -138,3 +138,46 @@ func TestTemplate(t *testing.T) { t.Error("Output is missing debug info") } } + +func TestCheckMeshKey(t *testing.T) { + testCases := []struct { + name string + input string + want string + wantErr bool + }{ + { + name: "KeyOkay", + input: "f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6", + want: "f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6", + wantErr: false, + }, + { + name: "TrimKeyOkay", + input: " f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6 ", + want: "f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6", + wantErr: false, + }, + { + name: "NotAKey", + input: "zzthisisnotakey", + want: "", + wantErr: true, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + k, err := checkMeshKey(tt.input) + if err != nil && !tt.wantErr { + t.Errorf("unexpected error: %v", err) + } + if k != tt.want && err == nil { + t.Errorf("want: %s doesn't match expected: %s", tt.want, k) + } + + }) + } + +} diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 972dbfc2c..2eab8e123 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -9,7 +9,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts @@ -31,10 +30,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds @@ -69,11 +70,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a6fae54ff..31a2ec0ba 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -10,7 +10,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts @@ -32,10 +31,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds @@ -70,11 +71,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm diff --git a/go.mod b/go.mod index e09f22a72..dc34d84ca 100644 --- a/go.mod +++ b/go.mod @@ -10,10 +10,10 @@ require ( github.com/andybalholm/brotli v1.1.0 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/atotto/clipboard v0.1.4 - github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.26.5 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64 - github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0 + github.com/aws/aws-sdk-go-v2 v1.36.0 + github.com/aws/aws-sdk-go-v2/config v1.29.5 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 + github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 github.com/bramvdbogaerde/go-scp v1.4.0 github.com/cilium/ebpf v0.15.0 @@ -82,6 +82,7 @@ require ( github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc + github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 @@ -188,21 +189,21 @@ require ( github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect - github.com/aws/smithy-go v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect + github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect diff --git a/go.sum b/go.sum index e1709fef4..2666faf90 100644 --- a/go.sum +++ b/go.sum @@ -123,59 +123,46 @@ github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5Fc github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= -github.com/aws/aws-sdk-go-v2/config v1.18.22/go.mod h1:mN7Li1wxaPxSSy4Xkr6stFuinJGf3VZW3ZSNvO0q6sI= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/credentials v1.13.21/go.mod h1:90Dk1lJoMyspa/EDUrldTxsPns0wn6+KpRKpdAWc0uA= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64 h1:9QJQs36z61YB8nxGwRDfWXEDYbU6H7jdI6zFiAX1vag= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64/go.mod h1:4Q7R9MFpXRdjO3YnAfUTdnuENs32WzBkASt6VxSYDYQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25 h1:AzwRi5OKKwo4QNqPf7TjeO+tK8AyOK3GVSwmRPo7/Cs= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25/go.mod h1:SUbB4wcbSEyCvqBxv/O/IBf93RbEze7U7OnoTlpPB+g= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28 h1:vGWm5vTpMr39tEZfQeDiDAMgk+5qsnvRny3FjLpnH5w= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28/go.mod h1:spfrICMD6wCAhjhzHuy6DOZZ+LAIY10UxhUmLzpJTTs= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2 h1:NbWkRxEEIRSCqxhsHQuMiTH7yo+JZW1gp8v3elSVMTQ= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2/go.mod h1:4tfW5l4IAB32VWCDEBxCRtR9T4BWy4I4kr1spr8NgZM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0 h1:L5h2fymEdVJYvn6hYO8Jx48YmC6xVmjmgHJV3oGKgmc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 h1:/BsEGAyMai+KdXS+CMHlLhB5miAO19wOqE6tj8azWPM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58/go.mod h1:KHM3lfl/sAJBCoLI1Lsg5w4SD2VDYWwQi7vxbKhw7TI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo= +github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.9/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.9/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.10/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -244,6 +231,8 @@ github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= +github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= @@ -933,6 +922,8 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= +github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= +github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= @@ -957,6 +948,8 @@ github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+n github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok= +github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI= github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs= github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= From e6e00012b25dda7b13c4b6910d9567dd771c97ae Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 5 Feb 2025 11:36:05 -0500 Subject: [PATCH 0457/1708] cmd/derper: remove logging of mesh key (#14915) A previous PR accidentally logged the key as part of an error. Remove logging of the key. Add log print for Setec store steup. Updates tailscale/corp#25756 Signed-off-by: Mike O'Driscoll --- cmd/derper/derper.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index b36fad59a..f1d848a5f 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -155,7 +155,7 @@ func writeNewConfig() config { func checkMeshKey(key string) (string, error) { key = strings.TrimSpace(key) if !validMeshKey.MatchString(key) { - return "", fmt.Errorf("key in %q must contain 64+ hex digits", key) + return "", errors.New("key must contain exactly 64 hex digits") } return key, nil } @@ -210,6 +210,7 @@ func main() { if err != nil { log.Fatalf("NewFileCache: %v", err) } + log.Printf("Setting up setec store from %q", *secretsURL) st, err := setec.NewStore(ctx, setec.StoreConfig{ Client: setec.Client{Server: *secretsURL}, From e4bee94857f102871fa84f999f25886818ef24f3 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 3 Feb 2025 16:22:05 -0600 Subject: [PATCH 0458/1708] ssh: don't use -l option for shells on FreeBSD Shells on FreeBSD don't support the -l option. This means that when handling SSH in-process, we can't give the user a login shell, but this change at least allows connecting at all. Updates #13338 Signed-off-by: Percy Wegmann --- ssh/tailssh/incubator.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 986b60bd3..c720f52d9 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -1089,6 +1089,10 @@ func (ia *incubatorArgs) loginArgs(loginCmdPath string) []string { func shellArgs(isShell bool, cmd string) []string { if isShell { + if runtime.GOOS == "freebsd" { + // freebsd's shells don't support the "-l" option, so we can't run as a login shell + return []string{} + } return []string{"-l"} } else { return []string{"-c", cmd} From 82878422690f87e585b0672c4ef0e076f966efd3 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 4 Feb 2025 10:08:19 -0600 Subject: [PATCH 0459/1708] ssh: refactor OS names into constants Updates #13338 Signed-off-by: Percy Wegmann --- ssh/tailssh/incubator.go | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index c720f52d9..f55dcbe45 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -43,6 +43,13 @@ import ( "tailscale.com/version/distro" ) +const ( + linux = "linux" + darwin = "darwin" + freebsd = "freebsd" + openbsd = "openbsd" +) + func init() { childproc.Add("ssh", beIncubator) childproc.Add("sftp", beSFTP) @@ -126,7 +133,7 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err // We have to check the below outside of the incubator process, because it // relies on the "getenforce" command being on the PATH, which it is not // when in the incubator. - if runtime.GOOS == "linux" && hostinfo.IsSELinuxEnforcing() { + if runtime.GOOS == linux && hostinfo.IsSELinuxEnforcing() { incubatorArgs = append(incubatorArgs, "--is-selinux-enforcing") } @@ -428,13 +435,13 @@ func tryExecLogin(dlogf logger.Logf, ia incubatorArgs) error { // Only the macOS version of the login command supports executing a // command, all other versions only support launching a shell without // taking any arguments. - if !ia.isShell && runtime.GOOS != "darwin" { + if !ia.isShell && runtime.GOOS != darwin { dlogf("won't use login because we're not in a shell or on macOS") return nil } switch runtime.GOOS { - case "linux", "freebsd", "openbsd": + case linux, freebsd, openbsd: if !ia.hasTTY { dlogf("can't use login because of missing TTY") // We can only use the login command if a shell was requested with @@ -523,7 +530,7 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { func findSU(dlogf logger.Logf, ia incubatorArgs) string { // Currently, we only support falling back to su on Linux. This // potentially could work on BSDs as well, but requires testing. - if runtime.GOOS != "linux" { + if runtime.GOOS != linux { return "" } @@ -659,7 +666,7 @@ func doDropPrivileges(dlogf logger.Logf, wantUid, wantGid int, supplementaryGrou euid := os.Geteuid() egid := os.Getegid() - if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" { + if runtime.GOOS == darwin || runtime.GOOS == freebsd { // On FreeBSD and Darwin, the first entry returned from the // getgroups(2) syscall is the egid, and changing it with // setgroups(2) changes the egid of the process. This is @@ -1051,7 +1058,7 @@ func fileExists(path string) bool { // loginArgs returns the arguments to use to exec the login binary. func (ia *incubatorArgs) loginArgs(loginCmdPath string) []string { switch runtime.GOOS { - case "darwin": + case darwin: args := []string{ loginCmdPath, "-f", // already authenticated @@ -1071,7 +1078,7 @@ func (ia *incubatorArgs) loginArgs(loginCmdPath string) []string { } return args - case "linux": + case linux: if distro.Get() == distro.Arch && !fileExists("/etc/pam.d/remote") { // See https://github.com/tailscale/tailscale/issues/4924 // @@ -1081,7 +1088,7 @@ func (ia *incubatorArgs) loginArgs(loginCmdPath string) []string { return []string{loginCmdPath, "-f", ia.localUser, "-p"} } return []string{loginCmdPath, "-f", ia.localUser, "-h", ia.remoteIP, "-p"} - case "freebsd", "openbsd": + case freebsd, openbsd: return []string{loginCmdPath, "-fp", "-h", ia.remoteIP, ia.localUser} } panic("unimplemented") @@ -1089,7 +1096,7 @@ func (ia *incubatorArgs) loginArgs(loginCmdPath string) []string { func shellArgs(isShell bool, cmd string) []string { if isShell { - if runtime.GOOS == "freebsd" { + if runtime.GOOS == freebsd { // freebsd's shells don't support the "-l" option, so we can't run as a login shell return []string{} } @@ -1100,7 +1107,7 @@ func shellArgs(isShell bool, cmd string) []string { } func setGroups(groupIDs []int) error { - if runtime.GOOS == "darwin" && len(groupIDs) > 16 { + if runtime.GOOS == darwin && len(groupIDs) > 16 { // darwin returns "invalid argument" if more than 16 groups are passed to syscall.Setgroups // some info can be found here: // https://opensource.apple.com/source/samba/samba-187.8/patches/support-darwin-initgroups-syscall.auto.html From 0e6d99cc361a4928a9618f1c0b57ce32be6a2d4a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 5 Feb 2025 12:04:32 -0600 Subject: [PATCH 0460/1708] docs/windows/policy: remove an extra closing > Something I accidentally added in #14217. It doesn't seem to impact Intune or the Administrative Templates MMC extension, but it should still be fixed. Updates #cleanup Signed-off-by: Nick Khyl --- docs/windows/policy/tailscale.admx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index f941525c4..9cf27bddc 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -98,7 +98,7 @@ - > + From 3f2bec5f64a802b311af12c3e9061d89985e78ac Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 5 Feb 2025 12:01:01 -0600 Subject: [PATCH 0461/1708] ssh: don't use -l option for shells on OpenBSD Shells on OpenBSD don't support the -l option. This means that when handling SSH in-process, we can't give the user a login shell, but this change at least allows connecting at all. Updates #13338 Signed-off-by: Percy Wegmann --- ssh/tailssh/incubator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index f55dcbe45..e809e9185 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -1096,8 +1096,8 @@ func (ia *incubatorArgs) loginArgs(loginCmdPath string) []string { func shellArgs(isShell bool, cmd string) []string { if isShell { - if runtime.GOOS == freebsd { - // freebsd's shells don't support the "-l" option, so we can't run as a login shell + if runtime.GOOS == freebsd || runtime.GOOS == openbsd { + // bsd shells don't support the "-l" option, so we can't run as a login shell return []string{} } return []string{"-l"} From f57fa3cbc30e94ec5c39fa8b70f976ead8d420f1 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 5 Feb 2025 10:37:59 -0600 Subject: [PATCH 0462/1708] client,localclient: move localclient.go to client/local package Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- .../localclient.go => local/local.go} | 221 +++++++++--------- .../local_test.go} | 4 +- client/tailscale/localclient_aliases.go | 77 ++++++ client/tailscale/tailscale.go | 3 +- cmd/derper/depaware.txt | 27 +-- cmd/k8s-operator/depaware.txt | 25 +- cmd/tailscale/depaware.txt | 23 +- cmd/tailscaled/depaware.txt | 23 +- 8 files changed, 242 insertions(+), 161 deletions(-) rename client/{tailscale/localclient.go => local/local.go} (83%) rename client/{tailscale/localclient_test.go => local/local_test.go} (98%) create mode 100644 client/tailscale/localclient_aliases.go diff --git a/client/tailscale/localclient.go b/client/local/local.go similarity index 83% rename from client/tailscale/localclient.go rename to client/local/local.go index eecd05dfd..5312c1d0a 100644 --- a/client/tailscale/localclient.go +++ b/client/local/local.go @@ -3,7 +3,8 @@ //go:build go1.22 -package tailscale +// Package local contains a Go client for the Tailscale LocalAPI. +package local import ( "bytes" @@ -44,11 +45,11 @@ import ( "tailscale.com/util/syspolicy/setting" ) -// defaultLocalClient is the default LocalClient when using the legacy +// defaultClient is the default Client when using the legacy // package-level functions. -var defaultLocalClient LocalClient +var defaultClient Client -// LocalClient is a client to Tailscale's "LocalAPI", communicating with the +// Client is a client to Tailscale's "LocalAPI", communicating with the // Tailscale daemon on the local machine. Its API is not necessarily stable and // subject to changes between releases. Some API calls have stricter // compatibility guarantees, once they've been widely adopted. See method docs @@ -58,7 +59,7 @@ var defaultLocalClient LocalClient // // Any exported fields should be set before using methods on the type // and not changed thereafter. -type LocalClient struct { +type Client struct { // Dial optionally specifies an alternate func that connects to the local // machine's tailscaled or equivalent. If nil, a default is used. Dial func(ctx context.Context, network, addr string) (net.Conn, error) @@ -92,21 +93,21 @@ type LocalClient struct { tsClientOnce sync.Once } -func (lc *LocalClient) socket() string { +func (lc *Client) socket() string { if lc.Socket != "" { return lc.Socket } return paths.DefaultTailscaledSocket() } -func (lc *LocalClient) dialer() func(ctx context.Context, network, addr string) (net.Conn, error) { +func (lc *Client) dialer() func(ctx context.Context, network, addr string) (net.Conn, error) { if lc.Dial != nil { return lc.Dial } return lc.defaultDialer } -func (lc *LocalClient) defaultDialer(ctx context.Context, network, addr string) (net.Conn, error) { +func (lc *Client) defaultDialer(ctx context.Context, network, addr string) (net.Conn, error) { if addr != "local-tailscaled.sock:80" { return nil, fmt.Errorf("unexpected URL address %q", addr) } @@ -132,7 +133,7 @@ func (lc *LocalClient) defaultDialer(ctx context.Context, network, addr string) // authenticating to the local Tailscale daemon vary by platform. // // DoLocalRequest may mutate the request to add Authorization headers. -func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error) { +func (lc *Client) DoLocalRequest(req *http.Request) (*http.Response, error) { req.Header.Set("Tailscale-Cap", strconv.Itoa(int(tailcfg.CurrentCapabilityVersion))) lc.tsClientOnce.Do(func() { lc.tsClient = &http.Client{ @@ -149,7 +150,7 @@ func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error) return lc.tsClient.Do(req) } -func (lc *LocalClient) doLocalRequestNiceError(req *http.Request) (*http.Response, error) { +func (lc *Client) doLocalRequestNiceError(req *http.Request) (*http.Response, error) { res, err := lc.DoLocalRequest(req) if err == nil { if server := res.Header.Get("Tailscale-Version"); server != "" && server != envknob.IPCVersion() && onVersionMismatch != nil { @@ -238,7 +239,7 @@ func SetVersionMismatchHandler(f func(clientVer, serverVer string)) { onVersionMismatch = f } -func (lc *LocalClient) send(ctx context.Context, method, path string, wantStatus int, body io.Reader) ([]byte, error) { +func (lc *Client) send(ctx context.Context, method, path string, wantStatus int, body io.Reader) ([]byte, error) { var headers http.Header if reason := apitype.RequestReasonKey.Value(ctx); reason != "" { reasonBase64 := base64.StdEncoding.EncodeToString([]byte(reason)) @@ -248,7 +249,7 @@ func (lc *LocalClient) send(ctx context.Context, method, path string, wantStatus return slurp, err } -func (lc *LocalClient) sendWithHeaders( +func (lc *Client) sendWithHeaders( ctx context.Context, method, path string, @@ -287,15 +288,15 @@ type httpStatusError struct { HTTPStatus int } -func (lc *LocalClient) get200(ctx context.Context, path string) ([]byte, error) { +func (lc *Client) get200(ctx context.Context, path string) ([]byte, error) { return lc.send(ctx, "GET", path, 200, nil) } // WhoIs returns the owner of the remoteAddr, which must be an IP or IP:port. // -// Deprecated: use LocalClient.WhoIs. +// Deprecated: use Client.WhoIs. func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { - return defaultLocalClient.WhoIs(ctx, remoteAddr) + return defaultClient.WhoIs(ctx, remoteAddr) } func decodeJSON[T any](b []byte) (ret T, err error) { @@ -313,7 +314,7 @@ func decodeJSON[T any](b []byte) (ret T, err error) { // For connections proxied by tailscaled, this looks up the owner of the given // address as TCP first, falling back to UDP; if you want to only check a // specific address family, use WhoIsProto. -func (lc *LocalClient) WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { +func (lc *Client) WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { body, err := lc.get200(ctx, "/localapi/v0/whois?addr="+url.QueryEscape(remoteAddr)) if err != nil { if hs, ok := err.(httpStatusError); ok && hs.HTTPStatus == http.StatusNotFound { @@ -330,7 +331,7 @@ var ErrPeerNotFound = errors.New("peer not found") // WhoIsNodeKey returns the owner of the given wireguard public key. // // If not found, the error is ErrPeerNotFound. -func (lc *LocalClient) WhoIsNodeKey(ctx context.Context, key key.NodePublic) (*apitype.WhoIsResponse, error) { +func (lc *Client) WhoIsNodeKey(ctx context.Context, key key.NodePublic) (*apitype.WhoIsResponse, error) { body, err := lc.get200(ctx, "/localapi/v0/whois?addr="+url.QueryEscape(key.String())) if err != nil { if hs, ok := err.(httpStatusError); ok && hs.HTTPStatus == http.StatusNotFound { @@ -345,7 +346,7 @@ func (lc *LocalClient) WhoIsNodeKey(ctx context.Context, key key.NodePublic) (*a // IP:port, for the given protocol (tcp or udp). // // If not found, the error is ErrPeerNotFound. -func (lc *LocalClient) WhoIsProto(ctx context.Context, proto, remoteAddr string) (*apitype.WhoIsResponse, error) { +func (lc *Client) WhoIsProto(ctx context.Context, proto, remoteAddr string) (*apitype.WhoIsResponse, error) { body, err := lc.get200(ctx, "/localapi/v0/whois?proto="+url.QueryEscape(proto)+"&addr="+url.QueryEscape(remoteAddr)) if err != nil { if hs, ok := err.(httpStatusError); ok && hs.HTTPStatus == http.StatusNotFound { @@ -357,19 +358,19 @@ func (lc *LocalClient) WhoIsProto(ctx context.Context, proto, remoteAddr string) } // Goroutines returns a dump of the Tailscale daemon's current goroutines. -func (lc *LocalClient) Goroutines(ctx context.Context) ([]byte, error) { +func (lc *Client) Goroutines(ctx context.Context) ([]byte, error) { return lc.get200(ctx, "/localapi/v0/goroutines") } // DaemonMetrics returns the Tailscale daemon's metrics in // the Prometheus text exposition format. -func (lc *LocalClient) DaemonMetrics(ctx context.Context) ([]byte, error) { +func (lc *Client) DaemonMetrics(ctx context.Context) ([]byte, error) { return lc.get200(ctx, "/localapi/v0/metrics") } // UserMetrics returns the user metrics in // the Prometheus text exposition format. -func (lc *LocalClient) UserMetrics(ctx context.Context) ([]byte, error) { +func (lc *Client) UserMetrics(ctx context.Context) ([]byte, error) { return lc.get200(ctx, "/localapi/v0/usermetrics") } @@ -378,7 +379,7 @@ func (lc *LocalClient) UserMetrics(ctx context.Context) ([]byte, error) { // metric is created and initialized to delta. // // IncrementCounter does not support gauge metrics or negative delta values. -func (lc *LocalClient) IncrementCounter(ctx context.Context, name string, delta int) error { +func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) error { type metricUpdate struct { Name string `json:"name"` Type string `json:"type"` @@ -397,7 +398,7 @@ func (lc *LocalClient) IncrementCounter(ctx context.Context, name string, delta // TailDaemonLogs returns a stream the Tailscale daemon's logs as they arrive. // Close the context to stop the stream. -func (lc *LocalClient) TailDaemonLogs(ctx context.Context) (io.Reader, error) { +func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/logtap", nil) if err != nil { return nil, err @@ -413,7 +414,7 @@ func (lc *LocalClient) TailDaemonLogs(ctx context.Context) (io.Reader, error) { } // Pprof returns a pprof profile of the Tailscale daemon. -func (lc *LocalClient) Pprof(ctx context.Context, pprofType string, sec int) ([]byte, error) { +func (lc *Client) Pprof(ctx context.Context, pprofType string, sec int) ([]byte, error) { var secArg string if sec < 0 || sec > 300 { return nil, errors.New("duration out of range") @@ -446,7 +447,7 @@ type BugReportOpts struct { // // The opts type specifies options to pass to the Tailscale daemon when // generating this bug report. -func (lc *LocalClient) BugReportWithOpts(ctx context.Context, opts BugReportOpts) (string, error) { +func (lc *Client) BugReportWithOpts(ctx context.Context, opts BugReportOpts) (string, error) { qparams := make(url.Values) if opts.Note != "" { qparams.Set("note", opts.Note) @@ -491,13 +492,13 @@ func (lc *LocalClient) BugReportWithOpts(ctx context.Context, opts BugReportOpts // // This is the same as calling BugReportWithOpts and only specifying the Note // field. -func (lc *LocalClient) BugReport(ctx context.Context, note string) (string, error) { +func (lc *Client) BugReport(ctx context.Context, note string) (string, error) { return lc.BugReportWithOpts(ctx, BugReportOpts{Note: note}) } // DebugAction invokes a debug action, such as "rebind" or "restun". // These are development tools and subject to change or removal over time. -func (lc *LocalClient) DebugAction(ctx context.Context, action string) error { +func (lc *Client) DebugAction(ctx context.Context, action string) error { body, err := lc.send(ctx, "POST", "/localapi/v0/debug?action="+url.QueryEscape(action), 200, nil) if err != nil { return fmt.Errorf("error %w: %s", err, body) @@ -508,7 +509,7 @@ func (lc *LocalClient) DebugAction(ctx context.Context, action string) error { // DebugActionBody invokes a debug action with a body parameter, such as // "debug-force-prefer-derp". // These are development tools and subject to change or removal over time. -func (lc *LocalClient) DebugActionBody(ctx context.Context, action string, rbody io.Reader) error { +func (lc *Client) DebugActionBody(ctx context.Context, action string, rbody io.Reader) error { body, err := lc.send(ctx, "POST", "/localapi/v0/debug?action="+url.QueryEscape(action), 200, rbody) if err != nil { return fmt.Errorf("error %w: %s", err, body) @@ -518,7 +519,7 @@ func (lc *LocalClient) DebugActionBody(ctx context.Context, action string, rbody // DebugResultJSON invokes a debug action and returns its result as something JSON-able. // These are development tools and subject to change or removal over time. -func (lc *LocalClient) DebugResultJSON(ctx context.Context, action string) (any, error) { +func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, error) { body, err := lc.send(ctx, "POST", "/localapi/v0/debug?action="+url.QueryEscape(action), 200, nil) if err != nil { return nil, fmt.Errorf("error %w: %s", err, body) @@ -561,7 +562,7 @@ type DebugPortmapOpts struct { // process. // // opts can be nil; if so, default values will be used. -func (lc *LocalClient) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { +func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { vals := make(url.Values) if opts == nil { opts = &DebugPortmapOpts{} @@ -596,7 +597,7 @@ func (lc *LocalClient) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // The schema (including when keys are re-read) is not a stable interface. -func (lc *LocalClient) SetDevStoreKeyValue(ctx context.Context, key, value string) error { +func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { body, err := lc.send(ctx, "POST", "/localapi/v0/dev-set-state-store?"+(url.Values{ "key": {key}, "value": {value}, @@ -610,7 +611,7 @@ func (lc *LocalClient) SetDevStoreKeyValue(ctx context.Context, key, value strin // SetComponentDebugLogging sets component's debug logging enabled for // the provided duration. If the duration is in the past, the debug logging // is disabled. -func (lc *LocalClient) SetComponentDebugLogging(ctx context.Context, component string, d time.Duration) error { +func (lc *Client) SetComponentDebugLogging(ctx context.Context, component string, d time.Duration) error { body, err := lc.send(ctx, "POST", fmt.Sprintf("/localapi/v0/component-debug-logging?component=%s&secs=%d", url.QueryEscape(component), int64(d.Seconds())), 200, nil) @@ -631,25 +632,25 @@ func (lc *LocalClient) SetComponentDebugLogging(ctx context.Context, component s // Status returns the Tailscale daemon's status. func Status(ctx context.Context) (*ipnstate.Status, error) { - return defaultLocalClient.Status(ctx) + return defaultClient.Status(ctx) } // Status returns the Tailscale daemon's status. -func (lc *LocalClient) Status(ctx context.Context) (*ipnstate.Status, error) { +func (lc *Client) Status(ctx context.Context) (*ipnstate.Status, error) { return lc.status(ctx, "") } // StatusWithoutPeers returns the Tailscale daemon's status, without the peer info. func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { - return defaultLocalClient.StatusWithoutPeers(ctx) + return defaultClient.StatusWithoutPeers(ctx) } // StatusWithoutPeers returns the Tailscale daemon's status, without the peer info. -func (lc *LocalClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { +func (lc *Client) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { return lc.status(ctx, "?peers=false") } -func (lc *LocalClient) status(ctx context.Context, queryString string) (*ipnstate.Status, error) { +func (lc *Client) status(ctx context.Context, queryString string) (*ipnstate.Status, error) { body, err := lc.get200(ctx, "/localapi/v0/status"+queryString) if err != nil { return nil, err @@ -660,7 +661,7 @@ func (lc *LocalClient) status(ctx context.Context, queryString string) (*ipnstat // IDToken is a request to get an OIDC ID token for an audience. // The token can be presented to any resource provider which offers OIDC // Federation. -func (lc *LocalClient) IDToken(ctx context.Context, aud string) (*tailcfg.TokenResponse, error) { +func (lc *Client) IDToken(ctx context.Context, aud string) (*tailcfg.TokenResponse, error) { body, err := lc.get200(ctx, "/localapi/v0/id-token?aud="+url.QueryEscape(aud)) if err != nil { return nil, err @@ -672,14 +673,14 @@ func (lc *LocalClient) IDToken(ctx context.Context, aud string) (*tailcfg.TokenR // received by the Tailscale daemon in its staging/cache directory but not yet // transferred by the user's CLI or GUI client and written to a user's home // directory somewhere. -func (lc *LocalClient) WaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) { +func (lc *Client) WaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) { return lc.AwaitWaitingFiles(ctx, 0) } // AwaitWaitingFiles is like WaitingFiles but takes a duration to await for an answer. // If the duration is 0, it will return immediately. The duration is respected at second // granularity only. If no files are available, it returns (nil, nil). -func (lc *LocalClient) AwaitWaitingFiles(ctx context.Context, d time.Duration) ([]apitype.WaitingFile, error) { +func (lc *Client) AwaitWaitingFiles(ctx context.Context, d time.Duration) ([]apitype.WaitingFile, error) { path := "/localapi/v0/files/?waitsec=" + fmt.Sprint(int(d.Seconds())) body, err := lc.get200(ctx, path) if err != nil { @@ -688,12 +689,12 @@ func (lc *LocalClient) AwaitWaitingFiles(ctx context.Context, d time.Duration) ( return decodeJSON[[]apitype.WaitingFile](body) } -func (lc *LocalClient) DeleteWaitingFile(ctx context.Context, baseName string) error { +func (lc *Client) DeleteWaitingFile(ctx context.Context, baseName string) error { _, err := lc.send(ctx, "DELETE", "/localapi/v0/files/"+url.PathEscape(baseName), http.StatusNoContent, nil) return err } -func (lc *LocalClient) GetWaitingFile(ctx context.Context, baseName string) (rc io.ReadCloser, size int64, err error) { +func (lc *Client) GetWaitingFile(ctx context.Context, baseName string) (rc io.ReadCloser, size int64, err error) { req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/files/"+url.PathEscape(baseName), nil) if err != nil { return nil, 0, err @@ -714,7 +715,7 @@ func (lc *LocalClient) GetWaitingFile(ctx context.Context, baseName string) (rc return res.Body, res.ContentLength, nil } -func (lc *LocalClient) FileTargets(ctx context.Context) ([]apitype.FileTarget, error) { +func (lc *Client) FileTargets(ctx context.Context) ([]apitype.FileTarget, error) { body, err := lc.get200(ctx, "/localapi/v0/file-targets") if err != nil { return nil, err @@ -726,7 +727,7 @@ func (lc *LocalClient) FileTargets(ctx context.Context) ([]apitype.FileTarget, e // // A size of -1 means unknown. // The name parameter is the original filename, not escaped. -func (lc *LocalClient) PushFile(ctx context.Context, target tailcfg.StableNodeID, size int64, name string, r io.Reader) error { +func (lc *Client) PushFile(ctx context.Context, target tailcfg.StableNodeID, size int64, name string, r io.Reader) error { req, err := http.NewRequestWithContext(ctx, "PUT", "http://"+apitype.LocalAPIHost+"/localapi/v0/file-put/"+string(target)+"/"+url.PathEscape(name), r) if err != nil { return err @@ -749,7 +750,7 @@ func (lc *LocalClient) PushFile(ctx context.Context, target tailcfg.StableNodeID // CheckIPForwarding asks the local Tailscale daemon whether it looks like the // machine is properly configured to forward IP packets as a subnet router // or exit node. -func (lc *LocalClient) CheckIPForwarding(ctx context.Context) error { +func (lc *Client) CheckIPForwarding(ctx context.Context) error { body, err := lc.get200(ctx, "/localapi/v0/check-ip-forwarding") if err != nil { return err @@ -769,7 +770,7 @@ func (lc *LocalClient) CheckIPForwarding(ctx context.Context) error { // CheckUDPGROForwarding asks the local Tailscale daemon whether it looks like // the machine is optimally configured to forward UDP packets as a subnet router // or exit node. -func (lc *LocalClient) CheckUDPGROForwarding(ctx context.Context) error { +func (lc *Client) CheckUDPGROForwarding(ctx context.Context) error { body, err := lc.get200(ctx, "/localapi/v0/check-udp-gro-forwarding") if err != nil { return err @@ -790,7 +791,7 @@ func (lc *LocalClient) CheckUDPGROForwarding(ctx context.Context) error { // node. This can be done to improve performance of tailnet nodes acting as exit // nodes or subnet routers. // See https://tailscale.com/kb/1320/performance-best-practices#linux-optimizations-for-subnet-routers-and-exit-nodes -func (lc *LocalClient) SetUDPGROForwarding(ctx context.Context) error { +func (lc *Client) SetUDPGROForwarding(ctx context.Context) error { body, err := lc.get200(ctx, "/localapi/v0/set-udp-gro-forwarding") if err != nil { return err @@ -813,12 +814,12 @@ func (lc *LocalClient) SetUDPGROForwarding(ctx context.Context) error { // work. Currently (2022-04-18) this only checks for SSH server compatibility. // Note that EditPrefs does the same validation as this, so call CheckPrefs before // EditPrefs is not necessary. -func (lc *LocalClient) CheckPrefs(ctx context.Context, p *ipn.Prefs) error { +func (lc *Client) CheckPrefs(ctx context.Context, p *ipn.Prefs) error { _, err := lc.send(ctx, "POST", "/localapi/v0/check-prefs", http.StatusOK, jsonBody(p)) return err } -func (lc *LocalClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { +func (lc *Client) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { body, err := lc.get200(ctx, "/localapi/v0/prefs") if err != nil { return nil, err @@ -835,7 +836,7 @@ func (lc *LocalClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { // or a policy restriction. An optional reason or justification for the request can be // provided as a context value using [apitype.RequestReasonKey]. If permitted by policy, // access may be granted, and the reason will be logged for auditing purposes. -func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { +func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { body, err := lc.send(ctx, "PATCH", "/localapi/v0/prefs", http.StatusOK, jsonBody(mp)) if err != nil { return nil, err @@ -844,7 +845,7 @@ func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn } // GetEffectivePolicy returns the effective policy for the specified scope. -func (lc *LocalClient) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { +func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { scopeID, err := scope.MarshalText() if err != nil { return nil, err @@ -858,7 +859,7 @@ func (lc *LocalClient) GetEffectivePolicy(ctx context.Context, scope setting.Pol // ReloadEffectivePolicy reloads the effective policy for the specified scope // by reading and merging policy settings from all applicable policy sources. -func (lc *LocalClient) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { +func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { scopeID, err := scope.MarshalText() if err != nil { return nil, err @@ -872,7 +873,7 @@ func (lc *LocalClient) ReloadEffectivePolicy(ctx context.Context, scope setting. // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. -func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { +func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") if err != nil { return nil, err @@ -887,7 +888,7 @@ func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig // QueryDNS executes a DNS query for a name (`google.com.`) and query type (`CNAME`). // It returns the raw DNS response bytes and the resolvers that were used to answer the query // (often just one, but can be more if we raced multiple resolvers). -func (lc *LocalClient) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { +func (lc *Client) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { body, err := lc.get200(ctx, fmt.Sprintf("/localapi/v0/dns-query?name=%s&type=%s", url.QueryEscape(name), queryType)) if err != nil { return nil, nil, err @@ -900,20 +901,20 @@ func (lc *LocalClient) QueryDNS(ctx context.Context, name string, queryType stri } // StartLoginInteractive starts an interactive login. -func (lc *LocalClient) StartLoginInteractive(ctx context.Context) error { +func (lc *Client) StartLoginInteractive(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/login-interactive", http.StatusNoContent, nil) return err } // Start applies the configuration specified in opts, and starts the // state machine. -func (lc *LocalClient) Start(ctx context.Context, opts ipn.Options) error { +func (lc *Client) Start(ctx context.Context, opts ipn.Options) error { _, err := lc.send(ctx, "POST", "/localapi/v0/start", http.StatusNoContent, jsonBody(opts)) return err } // Logout logs out the current node. -func (lc *LocalClient) Logout(ctx context.Context) error { +func (lc *Client) Logout(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/logout", http.StatusNoContent, nil) return err } @@ -932,7 +933,7 @@ func (lc *LocalClient) Logout(ctx context.Context) error { // This is a low-level interface; it's expected that most Tailscale // users use a higher level interface to getting/using TLS // certificates. -func (lc *LocalClient) SetDNS(ctx context.Context, name, value string) error { +func (lc *Client) SetDNS(ctx context.Context, name, value string) error { v := url.Values{} v.Set("name", name) v.Set("value", value) @@ -946,7 +947,7 @@ func (lc *LocalClient) SetDNS(ctx context.Context, name, value string) error { // tailscaled), a FQDN, or an IP address. // // The ctx is only used for the duration of the call, not the lifetime of the net.Conn. -func (lc *LocalClient) DialTCP(ctx context.Context, host string, port uint16) (net.Conn, error) { +func (lc *Client) DialTCP(ctx context.Context, host string, port uint16) (net.Conn, error) { return lc.UserDial(ctx, "tcp", host, port) } @@ -957,7 +958,7 @@ func (lc *LocalClient) DialTCP(ctx context.Context, host string, port uint16) (n // // The ctx is only used for the duration of the call, not the lifetime of the // net.Conn. -func (lc *LocalClient) UserDial(ctx context.Context, network, host string, port uint16) (net.Conn, error) { +func (lc *Client) UserDial(ctx context.Context, network, host string, port uint16) (net.Conn, error) { connCh := make(chan net.Conn, 1) trace := httptrace.ClientTrace{ GotConn: func(info httptrace.GotConnInfo) { @@ -1008,7 +1009,7 @@ func (lc *LocalClient) UserDial(ctx context.Context, network, host string, port // CurrentDERPMap returns the current DERPMap that is being used by the local tailscaled. // It is intended to be used with netcheck to see availability of DERPs. -func (lc *LocalClient) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) { +func (lc *Client) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) { var derpMap tailcfg.DERPMap res, err := lc.send(ctx, "GET", "/localapi/v0/derpmap", 200, nil) if err != nil { @@ -1024,9 +1025,9 @@ func (lc *LocalClient) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, er // // It returns a cached certificate from disk if it's still valid. // -// Deprecated: use LocalClient.CertPair. +// Deprecated: use Client.CertPair. func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return defaultLocalClient.CertPair(ctx, domain) + return defaultClient.CertPair(ctx, domain) } // CertPair returns a cert and private key for the provided DNS domain. @@ -1034,7 +1035,7 @@ func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err e // It returns a cached certificate from disk if it's still valid. // // API maturity: this is considered a stable API. -func (lc *LocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { +func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { return lc.CertPairWithValidity(ctx, domain, 0) } @@ -1047,7 +1048,7 @@ func (lc *LocalClient) CertPair(ctx context.Context, domain string) (certPEM, ke // valid, but for less than minValidity, it will be synchronously renewed. // // API maturity: this is considered a stable API. -func (lc *LocalClient) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { +func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) if err != nil { return nil, nil, err @@ -1073,9 +1074,9 @@ func (lc *LocalClient) CertPairWithValidity(ctx context.Context, domain string, // It's the right signature to use as the value of // tls.Config.GetCertificate. // -// Deprecated: use LocalClient.GetCertificate. +// Deprecated: use Client.GetCertificate. func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return defaultLocalClient.GetCertificate(hi) + return defaultClient.GetCertificate(hi) } // GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. @@ -1086,7 +1087,7 @@ func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { // tls.Config.GetCertificate. // // API maturity: this is considered a stable API. -func (lc *LocalClient) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { +func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { if hi == nil || hi.ServerName == "" { return nil, errors.New("no SNI ServerName") } @@ -1112,13 +1113,13 @@ func (lc *LocalClient) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate // ExpandSNIName expands bare label name into the most likely actual TLS cert name. // -// Deprecated: use LocalClient.ExpandSNIName. +// Deprecated: use Client.ExpandSNIName. func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return defaultLocalClient.ExpandSNIName(ctx, name) + return defaultClient.ExpandSNIName(ctx, name) } // ExpandSNIName expands bare label name into the most likely actual TLS cert name. -func (lc *LocalClient) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { +func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { st, err := lc.StatusWithoutPeers(ctx) if err != nil { return "", false @@ -1146,7 +1147,7 @@ type PingOpts struct { // Ping sends a ping of the provided type to the provided IP and waits // for its response. The opts type specifies additional options. -func (lc *LocalClient) PingWithOpts(ctx context.Context, ip netip.Addr, pingtype tailcfg.PingType, opts PingOpts) (*ipnstate.PingResult, error) { +func (lc *Client) PingWithOpts(ctx context.Context, ip netip.Addr, pingtype tailcfg.PingType, opts PingOpts) (*ipnstate.PingResult, error) { v := url.Values{} v.Set("ip", ip.String()) v.Set("size", strconv.Itoa(opts.Size)) @@ -1160,12 +1161,12 @@ func (lc *LocalClient) PingWithOpts(ctx context.Context, ip netip.Addr, pingtype // Ping sends a ping of the provided type to the provided IP and waits // for its response. -func (lc *LocalClient) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.PingType) (*ipnstate.PingResult, error) { +func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.PingType) (*ipnstate.PingResult, error) { return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } // NetworkLockStatus fetches information about the tailnet key authority, if one is configured. -func (lc *LocalClient) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { +func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) if err != nil { return nil, fmt.Errorf("error: %w", err) @@ -1176,7 +1177,7 @@ func (lc *LocalClient) NetworkLockStatus(ctx context.Context) (*ipnstate.Network // NetworkLockInit initializes the tailnet key authority. // // TODO(tom): Plumb through disablement secrets. -func (lc *LocalClient) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { +func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { var b bytes.Buffer type initRequest struct { Keys []tka.Key @@ -1197,7 +1198,7 @@ func (lc *LocalClient) NetworkLockInit(ctx context.Context, keys []tka.Key, disa // NetworkLockWrapPreauthKey wraps a pre-auth key with information to // enable unattended bringup in the locked tailnet. -func (lc *LocalClient) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { +func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { encodedPrivate, err := tkaKey.MarshalText() if err != nil { return "", err @@ -1220,7 +1221,7 @@ func (lc *LocalClient) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey } // NetworkLockModify adds and/or removes key(s) to the tailnet key authority. -func (lc *LocalClient) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { +func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { var b bytes.Buffer type modifyRequest struct { AddKeys []tka.Key @@ -1239,7 +1240,7 @@ func (lc *LocalClient) NetworkLockModify(ctx context.Context, addKeys, removeKey // NetworkLockSign signs the specified node-key and transmits that signature to the control plane. // rotationPublic, if specified, must be an ed25519 public key. -func (lc *LocalClient) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { +func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { var b bytes.Buffer type signRequest struct { NodeKey key.NodePublic @@ -1257,7 +1258,7 @@ func (lc *LocalClient) NetworkLockSign(ctx context.Context, nodeKey key.NodePubl } // NetworkLockAffectedSigs returns all signatures signed by the specified keyID. -func (lc *LocalClient) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { +func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) if err != nil { return nil, fmt.Errorf("error: %w", err) @@ -1266,7 +1267,7 @@ func (lc *LocalClient) NetworkLockAffectedSigs(ctx context.Context, keyID tkatyp } // NetworkLockLog returns up to maxEntries number of changes to network-lock state. -func (lc *LocalClient) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { +func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { v := url.Values{} v.Set("limit", fmt.Sprint(maxEntries)) body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) @@ -1277,7 +1278,7 @@ func (lc *LocalClient) NetworkLockLog(ctx context.Context, maxEntries int) ([]ip } // NetworkLockForceLocalDisable forcibly shuts down network lock on this node. -func (lc *LocalClient) NetworkLockForceLocalDisable(ctx context.Context) error { +func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { // This endpoint expects an empty JSON stanza as the payload. var b bytes.Buffer if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { @@ -1292,7 +1293,7 @@ func (lc *LocalClient) NetworkLockForceLocalDisable(ctx context.Context) error { // NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained // in url and returns information extracted from it. -func (lc *LocalClient) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { +func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { vr := struct { URL string }{url} @@ -1306,7 +1307,7 @@ func (lc *LocalClient) NetworkLockVerifySigningDeeplink(ctx context.Context, url } // NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. -func (lc *LocalClient) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { +func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { vr := struct { Keys []tkatype.KeyID ForkFrom string @@ -1321,7 +1322,7 @@ func (lc *LocalClient) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys } // NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. -func (lc *LocalClient) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { +func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { r := bytes.NewReader(aum.Serialize()) body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) if err != nil { @@ -1332,7 +1333,7 @@ func (lc *LocalClient) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka } // NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. -func (lc *LocalClient) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { +func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { r := bytes.NewReader(aum.Serialize()) _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) if err != nil { @@ -1343,7 +1344,7 @@ func (lc *LocalClient) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka // SetServeConfig sets or replaces the serving settings. // If config is nil, settings are cleared and serving is disabled. -func (lc *LocalClient) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { +func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { h := make(http.Header) if config != nil { h.Set("If-Match", config.ETag) @@ -1358,7 +1359,7 @@ func (lc *LocalClient) SetServeConfig(ctx context.Context, config *ipn.ServeConf // DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be // run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over // to another replica whilst there is still some grace period for the existing connections to terminate. -func (lc *LocalClient) DisconnectControl(ctx context.Context) error { +func (lc *Client) DisconnectControl(ctx context.Context) error { _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/disconnect-control", 200, nil, nil) if err != nil { return fmt.Errorf("error disconnecting control: %w", err) @@ -1367,7 +1368,7 @@ func (lc *LocalClient) DisconnectControl(ctx context.Context) error { } // NetworkLockDisable shuts down network-lock across the tailnet. -func (lc *LocalClient) NetworkLockDisable(ctx context.Context, secret []byte) error { +func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { return fmt.Errorf("error: %w", err) } @@ -1377,7 +1378,7 @@ func (lc *LocalClient) NetworkLockDisable(ctx context.Context, secret []byte) er // GetServeConfig return the current serve config. // // If the serve config is empty, it returns (nil, nil). -func (lc *LocalClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { +func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) if err != nil { return nil, fmt.Errorf("getting serve config: %w", err) @@ -1452,7 +1453,7 @@ func (r jsonReader) Read(p []byte) (n int, err error) { } // ProfileStatus returns the current profile and the list of all profiles. -func (lc *LocalClient) ProfileStatus(ctx context.Context) (current ipn.LoginProfile, all []ipn.LoginProfile, err error) { +func (lc *Client) ProfileStatus(ctx context.Context) (current ipn.LoginProfile, all []ipn.LoginProfile, err error) { body, err := lc.send(ctx, "GET", "/localapi/v0/profiles/current", 200, nil) if err != nil { return @@ -1470,7 +1471,7 @@ func (lc *LocalClient) ProfileStatus(ctx context.Context) (current ipn.LoginProf } // ReloadConfig reloads the config file, if possible. -func (lc *LocalClient) ReloadConfig(ctx context.Context) (ok bool, err error) { +func (lc *Client) ReloadConfig(ctx context.Context) (ok bool, err error) { body, err := lc.send(ctx, "POST", "/localapi/v0/reload-config", 200, nil) if err != nil { return @@ -1488,13 +1489,13 @@ func (lc *LocalClient) ReloadConfig(ctx context.Context) (ok bool, err error) { // SwitchToEmptyProfile creates and switches to a new unnamed profile. The new // profile is not assigned an ID until it is persisted after a successful login. // In order to login to the new profile, the user must call LoginInteractive. -func (lc *LocalClient) SwitchToEmptyProfile(ctx context.Context) error { +func (lc *Client) SwitchToEmptyProfile(ctx context.Context) error { _, err := lc.send(ctx, "PUT", "/localapi/v0/profiles/", http.StatusCreated, nil) return err } // SwitchProfile switches to the given profile. -func (lc *LocalClient) SwitchProfile(ctx context.Context, profile ipn.ProfileID) error { +func (lc *Client) SwitchProfile(ctx context.Context, profile ipn.ProfileID) error { _, err := lc.send(ctx, "POST", "/localapi/v0/profiles/"+url.PathEscape(string(profile)), 204, nil) return err } @@ -1502,7 +1503,7 @@ func (lc *LocalClient) SwitchProfile(ctx context.Context, profile ipn.ProfileID) // DeleteProfile removes the profile with the given ID. // If the profile is the current profile, an empty profile // will be selected as if SwitchToEmptyProfile was called. -func (lc *LocalClient) DeleteProfile(ctx context.Context, profile ipn.ProfileID) error { +func (lc *Client) DeleteProfile(ctx context.Context, profile ipn.ProfileID) error { _, err := lc.send(ctx, "DELETE", "/localapi/v0/profiles"+url.PathEscape(string(profile)), http.StatusNoContent, nil) return err } @@ -1519,7 +1520,7 @@ func (lc *LocalClient) DeleteProfile(ctx context.Context, profile ipn.ProfileID) // to block until the feature has been enabled. // // 2023-08-09: Valid feature values are "serve" and "funnel". -func (lc *LocalClient) QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) { +func (lc *Client) QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) { v := url.Values{"feature": {feature}} body, err := lc.send(ctx, "POST", "/localapi/v0/query-feature?"+v.Encode(), 200, nil) if err != nil { @@ -1528,7 +1529,7 @@ func (lc *LocalClient) QueryFeature(ctx context.Context, feature string) (*tailc return decodeJSON[*tailcfg.QueryFeatureResponse](body) } -func (lc *LocalClient) DebugDERPRegion(ctx context.Context, regionIDOrCode string) (*ipnstate.DebugDERPRegionReport, error) { +func (lc *Client) DebugDERPRegion(ctx context.Context, regionIDOrCode string) (*ipnstate.DebugDERPRegionReport, error) { v := url.Values{"region": {regionIDOrCode}} body, err := lc.send(ctx, "POST", "/localapi/v0/debug-derp-region?"+v.Encode(), 200, nil) if err != nil { @@ -1538,7 +1539,7 @@ func (lc *LocalClient) DebugDERPRegion(ctx context.Context, regionIDOrCode strin } // DebugPacketFilterRules returns the packet filter rules for the current device. -func (lc *LocalClient) DebugPacketFilterRules(ctx context.Context) ([]tailcfg.FilterRule, error) { +func (lc *Client) DebugPacketFilterRules(ctx context.Context) ([]tailcfg.FilterRule, error) { body, err := lc.send(ctx, "POST", "/localapi/v0/debug-packet-filter-rules", 200, nil) if err != nil { return nil, fmt.Errorf("error %w: %s", err, body) @@ -1549,7 +1550,7 @@ func (lc *LocalClient) DebugPacketFilterRules(ctx context.Context) ([]tailcfg.Fi // DebugSetExpireIn marks the current node key to expire in d. // // This is meant primarily for debug and testing. -func (lc *LocalClient) DebugSetExpireIn(ctx context.Context, d time.Duration) error { +func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error { v := url.Values{"expiry": {fmt.Sprint(time.Now().Add(d).Unix())}} _, err := lc.send(ctx, "POST", "/localapi/v0/set-expiry-sooner?"+v.Encode(), 200, nil) return err @@ -1559,7 +1560,7 @@ func (lc *LocalClient) DebugSetExpireIn(ctx context.Context, d time.Duration) er // // The provided context does not determine the lifetime of the // returned io.ReadCloser. -func (lc *LocalClient) StreamDebugCapture(ctx context.Context) (io.ReadCloser, error) { +func (lc *Client) StreamDebugCapture(ctx context.Context) (io.ReadCloser, error) { req, err := http.NewRequestWithContext(ctx, "POST", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-capture", nil) if err != nil { return nil, err @@ -1585,7 +1586,7 @@ func (lc *LocalClient) StreamDebugCapture(ctx context.Context) (io.ReadCloser, e // resources. // // A default set of ipn.Notify messages are returned but the set can be modified by mask. -func (lc *LocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*IPNBusWatcher, error) { +func (lc *Client) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*IPNBusWatcher, error) { req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/watch-ipn-bus?mask="+fmt.Sprint(mask), nil) @@ -1611,7 +1612,7 @@ func (lc *LocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) // CheckUpdate returns a tailcfg.ClientVersion indicating whether or not an update is available // to be installed via the LocalAPI. In case the LocalAPI can't install updates, it returns a // ClientVersion that says that we are up to date. -func (lc *LocalClient) CheckUpdate(ctx context.Context) (*tailcfg.ClientVersion, error) { +func (lc *Client) CheckUpdate(ctx context.Context) (*tailcfg.ClientVersion, error) { body, err := lc.get200(ctx, "/localapi/v0/update/check") if err != nil { return nil, err @@ -1627,7 +1628,7 @@ func (lc *LocalClient) CheckUpdate(ctx context.Context) (*tailcfg.ClientVersion, // To turn it on, there must have been a previously used exit node. // The most previously used one is reused. // This is a convenience method for GUIs. To select an actual one, update the prefs. -func (lc *LocalClient) SetUseExitNode(ctx context.Context, on bool) error { +func (lc *Client) SetUseExitNode(ctx context.Context, on bool) error { _, err := lc.send(ctx, "POST", "/localapi/v0/set-use-exit-node-enabled?enabled="+strconv.FormatBool(on), http.StatusOK, nil) return err } @@ -1635,7 +1636,7 @@ func (lc *LocalClient) SetUseExitNode(ctx context.Context, on bool) error { // DriveSetServerAddr instructs Taildrive to use the server at addr to access // the filesystem. This is used on platforms like Windows and MacOS to let // Taildrive know to use the file server running in the GUI app. -func (lc *LocalClient) DriveSetServerAddr(ctx context.Context, addr string) error { +func (lc *Client) DriveSetServerAddr(ctx context.Context, addr string) error { _, err := lc.send(ctx, "PUT", "/localapi/v0/drive/fileserver-address", http.StatusCreated, strings.NewReader(addr)) return err } @@ -1643,14 +1644,14 @@ func (lc *LocalClient) DriveSetServerAddr(ctx context.Context, addr string) erro // DriveShareSet adds or updates the given share in the list of shares that // Taildrive will serve to remote nodes. If a share with the same name already // exists, the existing share is replaced/updated. -func (lc *LocalClient) DriveShareSet(ctx context.Context, share *drive.Share) error { +func (lc *Client) DriveShareSet(ctx context.Context, share *drive.Share) error { _, err := lc.send(ctx, "PUT", "/localapi/v0/drive/shares", http.StatusCreated, jsonBody(share)) return err } // DriveShareRemove removes the share with the given name from the list of // shares that Taildrive will serve to remote nodes. -func (lc *LocalClient) DriveShareRemove(ctx context.Context, name string) error { +func (lc *Client) DriveShareRemove(ctx context.Context, name string) error { _, err := lc.send( ctx, "DELETE", @@ -1661,7 +1662,7 @@ func (lc *LocalClient) DriveShareRemove(ctx context.Context, name string) error } // DriveShareRename renames the share from old to new name. -func (lc *LocalClient) DriveShareRename(ctx context.Context, oldName, newName string) error { +func (lc *Client) DriveShareRename(ctx context.Context, oldName, newName string) error { _, err := lc.send( ctx, "POST", @@ -1673,7 +1674,7 @@ func (lc *LocalClient) DriveShareRename(ctx context.Context, oldName, newName st // DriveShareList returns the list of shares that drive is currently serving // to remote nodes. -func (lc *LocalClient) DriveShareList(ctx context.Context) ([]*drive.Share, error) { +func (lc *Client) DriveShareList(ctx context.Context) ([]*drive.Share, error) { result, err := lc.get200(ctx, "/localapi/v0/drive/shares") if err != nil { return nil, err @@ -1684,7 +1685,7 @@ func (lc *LocalClient) DriveShareList(ctx context.Context) ([]*drive.Share, erro } // IPNBusWatcher is an active subscription (watch) of the local tailscaled IPN bus. -// It's returned by LocalClient.WatchIPNBus. +// It's returned by Client.WatchIPNBus. // // It must be closed when done. type IPNBusWatcher struct { @@ -1708,7 +1709,7 @@ func (w *IPNBusWatcher) Close() error { } // Next returns the next ipn.Notify from the stream. -// If the context from LocalClient.WatchIPNBus is done, that error is returned. +// If the context from Client.WatchIPNBus is done, that error is returned. func (w *IPNBusWatcher) Next() (ipn.Notify, error) { var n ipn.Notify if err := w.dec.Decode(&n); err != nil { @@ -1721,7 +1722,7 @@ func (w *IPNBusWatcher) Next() (ipn.Notify, error) { } // SuggestExitNode requests an exit node suggestion and returns the exit node's details. -func (lc *LocalClient) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggestionResponse, error) { +func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggestionResponse, error) { body, err := lc.get200(ctx, "/localapi/v0/suggest-exit-node") if err != nil { return apitype.ExitNodeSuggestionResponse{}, err diff --git a/client/tailscale/localclient_test.go b/client/local/local_test.go similarity index 98% rename from client/tailscale/localclient_test.go rename to client/local/local_test.go index 950a22f47..4322e4dde 100644 --- a/client/tailscale/localclient_test.go +++ b/client/local/local_test.go @@ -3,7 +3,7 @@ //go:build go1.19 -package tailscale +package local import ( "context" @@ -41,7 +41,7 @@ func TestWhoIsPeerNotFound(t *testing.T) { })) defer ts.Close() - lc := &LocalClient{ + lc := &Client{ Dial: func(ctx context.Context, network, addr string) (net.Conn, error) { var std net.Dialer return std.DialContext(ctx, network, ts.Listener.Addr().(*net.TCPAddr).String()) diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go new file mode 100644 index 000000000..2e77b22e3 --- /dev/null +++ b/client/tailscale/localclient_aliases.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + "crypto/tls" + + "tailscale.com/client/local" + "tailscale.com/client/tailscale/apitype" +) + +// ErrPeerNotFound is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +var ErrPeerNotFound = local.ErrPeerNotFound + +// LocalClient is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +type LocalClient = local.Client + +// IPNBusWatcher is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +type IPNBusWatcher = local.IPNBusWatcher + +// BugReportOpts is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +type BugReportOpts = local.BugReportOpts + +// DebugPortMapOpts is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +type DebugPortmapOpts = local.DebugPortmapOpts + +// PingOpts is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +type PingOpts = local.PingOpts + +// GetCertificate is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return local.GetCertificate(hi) +} + +// SetVersionMismatchHandler is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func SetVersionMismatchHandler(f func(clientVer, serverVer string)) { + local.SetVersionMismatchHandler(f) +} + +// IsAccessDeniedError is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func IsAccessDeniedError(err error) bool { + return local.IsAccessDeniedError(err) +} + +// IsPreconditionsFailedError is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func IsPreconditionsFailedError(err error) bool { + return local.IsPreconditionsFailedError(err) +} + +// WhoIs is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { + return local.WhoIs(ctx, remoteAddr) +} diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index 8533b4712..9d001d376 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -3,8 +3,7 @@ //go:build go1.19 -// Package tailscale contains Go clients for the Tailscale LocalAPI and -// Tailscale control plane API. +// Package tailscale contains a Go client for the Tailscale control plane API. // // Warning: this package is in development and makes no API compatibility // promises as of 2022-04-29. It is subject to change at any time. diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 82dd08e63..91891463c 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -55,7 +55,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 - 💣 go4.org/mem from tailscale.com/client/tailscale+ + 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt @@ -88,17 +88,18 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ + tailscale.com/client/local from tailscale.com/client/tailscale tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale + tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/derp from tailscale.com/cmd/derper+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper tailscale.com/disco from tailscale.com/derp - tailscale.com/drive from tailscale.com/client/tailscale+ - tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/drive from tailscale.com/client/local+ + tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ - tailscale.com/ipn from tailscale.com/client/tailscale - tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ + tailscale.com/ipn from tailscale.com/client/local + tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial @@ -108,7 +109,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/netknob from tailscale.com/net/netns 💣 tailscale.com/net/netmon from tailscale.com/derp/derphttp+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp - tailscale.com/net/netutil from tailscale.com/client/tailscale + tailscale.com/net/netutil from tailscale.com/client/local tailscale.com/net/sockstats from tailscale.com/derp/derphttp tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/derper @@ -118,11 +119,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/tsaddr from tailscale.com/ipn+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ tailscale.com/net/wsconn from tailscale.com/cmd/derper - tailscale.com/paths from tailscale.com/client/tailscale - 💣 tailscale.com/safesocket from tailscale.com/client/tailscale + tailscale.com/paths from tailscale.com/client/local + 💣 tailscale.com/safesocket from tailscale.com/client/local tailscale.com/syncs from tailscale.com/cmd/derper+ - tailscale.com/tailcfg from tailscale.com/client/tailscale+ - tailscale.com/tka from tailscale.com/client/tailscale+ + tailscale.com/tailcfg from tailscale.com/client/local+ + tailscale.com/tka from tailscale.com/client/local+ W tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate @@ -133,7 +134,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/tailcfg+ - tailscale.com/types/key from tailscale.com/client/tailscale+ + tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn @@ -143,7 +144,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ - tailscale.com/types/tkatype from tailscale.com/client/tailscale+ + tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/net/netmon+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2eab8e123..43ad0598f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -235,7 +235,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go.uber.org/zap/internal/pool from go.uber.org/zap+ go.uber.org/zap/internal/stacktrace from go.uber.org/zap go.uber.org/zap/zapcore from github.com/go-logr/zapr+ - 💣 go4.org/mem from tailscale.com/client/tailscale+ + 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/dns+ @@ -780,6 +780,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale tailscale.com/client/tailscale from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal @@ -797,8 +798,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal - tailscale.com/drive from tailscale.com/client/tailscale+ - tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/drive from tailscale.com/client/local+ + tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/capture from tailscale.com/feature/condregister @@ -809,11 +810,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient - tailscale.com/ipn from tailscale.com/client/tailscale+ + tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ - tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ + tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet+ tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ @@ -860,7 +861,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ W 💣 tailscale.com/net/netstat from tailscale.com/portlist - tailscale.com/net/netutil from tailscale.com/client/tailscale+ + tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ @@ -878,19 +879,19 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/omit from tailscale.com/ipn/conffile - tailscale.com/paths from tailscale.com/client/tailscale+ + tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ - 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ + 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/syncs from tailscale.com/control/controlknobs+ - tailscale.com/tailcfg from tailscale.com/client/tailscale+ + tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient - tailscale.com/tka from tailscale.com/client/tailscale+ + tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+ @@ -902,7 +903,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ - tailscale.com/types/key from tailscale.com/client/tailscale+ + tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/ipn/ipnlocal+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ @@ -915,7 +916,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/ptr from tailscale.com/cmd/k8s-operator+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ - tailscale.com/types/tkatype from tailscale.com/client/tailscale+ + tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 6d1fcfd03..5533683af 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -60,7 +60,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 - 💣 go4.org/mem from tailscale.com/client/tailscale+ + 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli @@ -70,6 +70,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ + tailscale.com/client/local from tailscale.com/client/tailscale tailscale.com/client/tailscale from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli @@ -85,16 +86,16 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/derp from tailscale.com/derp/derphttp tailscale.com/derp/derphttp from tailscale.com/net/netcheck tailscale.com/disco from tailscale.com/derp - tailscale.com/drive from tailscale.com/client/tailscale+ - tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/drive from tailscale.com/client/local+ + tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli - tailscale.com/ipn from tailscale.com/client/tailscale+ - tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ + tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/derp+ @@ -109,7 +110,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/netknob from tailscale.com/net/netns 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - tailscale.com/net/netutil from tailscale.com/client/tailscale+ + tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/ping from tailscale.com/net/netcheck tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ @@ -119,12 +120,12 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ - tailscale.com/paths from tailscale.com/client/tailscale+ - 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ + tailscale.com/paths from tailscale.com/client/local+ + 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+ - tailscale.com/tailcfg from tailscale.com/client/tailscale+ + tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ - tailscale.com/tka from tailscale.com/client/tailscale+ + tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate @@ -133,7 +134,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/ipn+ - tailscale.com/types/key from tailscale.com/client/tailscale+ + tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/util/testenv+ tailscale.com/types/logger from tailscale.com/client/web+ tailscale.com/types/netmap from tailscale.com/ipn+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 31a2ec0ba..d5beb789c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -184,7 +184,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 - 💣 go4.org/mem from tailscale.com/client/tailscale+ + 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from github.com/tailscale/wf+ W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun+ W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/cmd/tailscaled+ @@ -233,6 +233,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled + tailscale.com/client/local from tailscale.com/client/tailscale tailscale.com/client/tailscale from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal @@ -251,12 +252,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal - tailscale.com/drive from tailscale.com/client/tailscale+ + tailscale.com/drive from tailscale.com/client/local+ tailscale.com/drive/driveimpl from tailscale.com/cmd/tailscaled tailscale.com/drive/driveimpl/compositedav from tailscale.com/drive/driveimpl tailscale.com/drive/driveimpl/dirfs from tailscale.com/drive/driveimpl+ tailscale.com/drive/driveimpl/shared from tailscale.com/drive/driveimpl+ - tailscale.com/envknob from tailscale.com/client/tailscale+ + tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/capture from tailscale.com/feature/condregister @@ -267,12 +268,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient - tailscale.com/ipn from tailscale.com/client/tailscale+ + tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled - tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ + tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ @@ -310,7 +311,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ W 💣 tailscale.com/net/netstat from tailscale.com/portlist - tailscale.com/net/netutil from tailscale.com/client/tailscale+ + tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ @@ -328,21 +329,21 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/omit from tailscale.com/ipn/conffile - tailscale.com/paths from tailscale.com/client/tailscale+ + tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ - 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ + 💣 tailscale.com/safesocket from tailscale.com/client/local+ LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled tailscale.com/syncs from tailscale.com/cmd/tailscaled+ - tailscale.com/tailcfg from tailscale.com/client/tailscale+ + tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient - tailscale.com/tka from tailscale.com/client/tailscale+ + tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ @@ -354,7 +355,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ - tailscale.com/types/key from tailscale.com/client/tailscale+ + tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/ipn/ipnlocal+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ From 8ecce0e98dde566f3b65e244267d3a7693fd7b8a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 5 Feb 2025 15:06:20 -0600 Subject: [PATCH 0463/1708] client: add missing localclient aliases (#14921) localclient_aliases.go was missing some package level functions from client/local. This adds them. Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- client/tailscale/localclient_aliases.go | 29 +++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 2e77b22e3..28d597232 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -9,6 +9,7 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/ipn/ipnstate" ) // ErrPeerNotFound is an alias for tailscale.com/client/local. @@ -75,3 +76,31 @@ func IsPreconditionsFailedError(err error) bool { func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { return local.WhoIs(ctx, remoteAddr) } + +// Status is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func Status(ctx context.Context) (*ipnstate.Status, error) { + return local.Status(ctx) +} + +// StatusWithoutPeers is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { + return local.StatusWithoutPeers(ctx) +} + +// CertPair is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return local.CertPair(ctx, domain) +} + +// ExpandSNIName is an alias for tailscale.com/client/local. +// +// Deprecated: import tailscale.com/client/local instead. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return local.ExpandSNIName(ctx, name) +} From 05ac21ebe452eeaccf38fd3caa9d037615ad48ca Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 5 Feb 2025 10:53:06 -0800 Subject: [PATCH 0464/1708] all: use new LocalAPI client package location It was moved in f57fa3cbc30e. Updates tailscale/corp#22748 Change-Id: I19f965e6bded1d4c919310aa5b864f2de0cd6220 Signed-off-by: Brad Fitzpatrick --- client/systray/systray.go | 4 ++-- client/tailscale/apitype/apitype.go | 2 +- client/tailscale/example/servetls/servetls.go | 5 +++-- client/web/web.go | 10 +++++----- client/web/web_test.go | 14 +++++++------- cmd/containerboot/metrics.go | 6 +++--- cmd/containerboot/serve.go | 6 +++--- cmd/containerboot/serve_test.go | 4 ++-- cmd/containerboot/services.go | 6 +++--- cmd/containerboot/tailscaled.go | 8 ++++---- cmd/derper/depaware.txt | 2 +- cmd/hello/hello.go | 4 ++-- cmd/k8s-operator/depaware.txt | 4 ++-- cmd/k8s-operator/proxy.go | 4 ++-- cmd/natc/natc.go | 6 +++--- cmd/pgproxy/pgproxy.go | 6 +++--- cmd/proxy-to-grafana/proxy-to-grafana.go | 6 +++--- cmd/sniproxy/sniproxy.go | 4 ++-- cmd/tailscale/cli/cli.go | 3 ++- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_legacy_test.go | 2 +- cmd/tailscale/depaware.txt | 4 ++-- cmd/tailscaled/depaware.txt | 4 ++-- cmd/tailscaled/tailscaled.go | 4 ++-- cmd/tl-longchain/tl-longchain.go | 4 ++-- cmd/tsidp/tsidp.go | 12 ++++++------ cmd/tta/tta.go | 4 ++-- derp/derp_server.go | 3 ++- ipn/ipnlocal/web_client.go | 10 +++++----- ipn/ipnlocal/web_client_stub.go | 4 ++-- ipn/ipnserver/server_test.go | 19 ++++++++++--------- prober/derp.go | 4 ++-- tsnet/tsnet.go | 15 ++++++++------- tsnet/tsnet_test.go | 4 ++-- tstest/integration/integration_test.go | 7 ++++--- .../tailscaled_deps_test_darwin.go | 2 +- .../tailscaled_deps_test_freebsd.go | 2 +- .../integration/tailscaled_deps_test_linux.go | 2 +- .../tailscaled_deps_test_openbsd.go | 2 +- .../tailscaled_deps_test_windows.go | 2 +- tstest/natlab/vnet/vnet.go | 6 +++--- 41 files changed, 114 insertions(+), 108 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index de2a37d8d..ac64b9958 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -26,7 +26,7 @@ import ( "github.com/atotto/clipboard" dbus "github.com/godbus/dbus/v5" "github.com/toqueteos/webbrowser" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -67,7 +67,7 @@ func (menu *Menu) Run() { type Menu struct { mu sync.Mutex // protects the entire Menu - lc tailscale.LocalClient + lc local.Client status *ipnstate.Status curProfile ipn.LoginProfile allProfiles []ipn.LoginProfile diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 5ef838039..58cdcecc7 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -24,7 +24,7 @@ const LocalAPIHost = "local-tailscaled.sock" const RequestReasonHeader = "X-Tailscale-Reason" // RequestReasonKey is the context key used to pass the request reason -// when making a LocalAPI request via [tailscale.LocalClient]. +// when making a LocalAPI request via [local.Client]. // It's value is a raw string. An empty string means no reason was provided. // // See tailscale/corp#26146. diff --git a/client/tailscale/example/servetls/servetls.go b/client/tailscale/example/servetls/servetls.go index f48e90d16..0ade42088 100644 --- a/client/tailscale/example/servetls/servetls.go +++ b/client/tailscale/example/servetls/servetls.go @@ -11,13 +11,14 @@ import ( "log" "net/http" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" ) func main() { + var lc local.Client s := &http.Server{ TLSConfig: &tls.Config{ - GetCertificate: tailscale.GetCertificate, + GetCertificate: lc.GetCertificate, }, Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "

Hello from Tailscale!

It works.") diff --git a/client/web/web.go b/client/web/web.go index 3a7feea40..6203b4c18 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -22,7 +22,7 @@ import ( "time" "github.com/gorilla/csrf" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/envknob" @@ -50,7 +50,7 @@ type Server struct { mode ServerMode logf logger.Logf - lc *tailscale.LocalClient + lc *local.Client timeNow func() time.Time // devMode indicates that the server run with frontend assets @@ -125,9 +125,9 @@ type ServerOpts struct { // PathPrefix is the URL prefix added to requests by CGI or reverse proxy. PathPrefix string - // LocalClient is the tailscale.LocalClient to use for this web server. + // LocalClient is the local.Client to use for this web server. // If nil, a new one will be created. - LocalClient *tailscale.LocalClient + LocalClient *local.Client // TimeNow optionally provides a time function. // time.Now is used as default. @@ -166,7 +166,7 @@ func NewServer(opts ServerOpts) (s *Server, err error) { return nil, fmt.Errorf("invalid Mode provided") } if opts.LocalClient == nil { - opts.LocalClient = &tailscale.LocalClient{} + opts.LocalClient = &local.Client{} } s = &Server{ mode: opts.Mode, diff --git a/client/web/web_test.go b/client/web/web_test.go index 3c5543c12..b9242f6ac 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/google/go-cmp/cmp" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -120,7 +120,7 @@ func TestServeAPI(t *testing.T) { s := &Server{ mode: ManageServerMode, - lc: &tailscale.LocalClient{Dial: lal.Dial}, + lc: &local.Client{Dial: lal.Dial}, timeNow: time.Now, } @@ -288,7 +288,7 @@ func TestGetTailscaleBrowserSession(t *testing.T) { s := &Server{ timeNow: time.Now, - lc: &tailscale.LocalClient{Dial: lal.Dial}, + lc: &local.Client{Dial: lal.Dial}, } // Add some browser sessions to cache state. @@ -457,7 +457,7 @@ func TestAuthorizeRequest(t *testing.T) { s := &Server{ mode: ManageServerMode, - lc: &tailscale.LocalClient{Dial: lal.Dial}, + lc: &local.Client{Dial: lal.Dial}, timeNow: time.Now, } validCookie := "ts-cookie" @@ -572,7 +572,7 @@ func TestServeAuth(t *testing.T) { s := &Server{ mode: ManageServerMode, - lc: &tailscale.LocalClient{Dial: lal.Dial}, + lc: &local.Client{Dial: lal.Dial}, timeNow: func() time.Time { return timeNow }, newAuthURL: mockNewAuthURL, waitAuthURL: mockWaitAuthURL, @@ -914,7 +914,7 @@ func TestServeAPIAuthMetricLogging(t *testing.T) { s := &Server{ mode: ManageServerMode, - lc: &tailscale.LocalClient{Dial: lal.Dial}, + lc: &local.Client{Dial: lal.Dial}, timeNow: func() time.Time { return timeNow }, newAuthURL: mockNewAuthURL, waitAuthURL: mockWaitAuthURL, @@ -1126,7 +1126,7 @@ func TestRequireTailscaleIP(t *testing.T) { s := &Server{ mode: ManageServerMode, - lc: &tailscale.LocalClient{Dial: lal.Dial}, + lc: &local.Client{Dial: lal.Dial}, timeNow: time.Now, logf: t.Logf, } diff --git a/cmd/containerboot/metrics.go b/cmd/containerboot/metrics.go index a8b9222a5..0bcd231ab 100644 --- a/cmd/containerboot/metrics.go +++ b/cmd/containerboot/metrics.go @@ -10,7 +10,7 @@ import ( "io" "net/http" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" ) @@ -18,7 +18,7 @@ import ( // the tailscaled's LocalAPI usermetrics endpoint at /localapi/v0/usermetrics. type metrics struct { debugEndpoint string - lc *tailscale.LocalClient + lc *local.Client } func proxy(w http.ResponseWriter, r *http.Request, url string, do func(*http.Request) (*http.Response, error)) { @@ -68,7 +68,7 @@ func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { // In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug // endpoint if configured to ease migration for a breaking change serving user // metrics instead of debug metrics on the "metrics" port. -func metricsHandlers(mux *http.ServeMux, lc *tailscale.LocalClient, debugAddrPort string) { +func metricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { m := &metrics{ lc: lc, debugEndpoint: debugAddrPort, diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index aad22820b..fbfaba64a 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -17,7 +17,7 @@ import ( "time" "github.com/fsnotify/fsnotify" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/kube/kubetypes" "tailscale.com/types/netmap" @@ -28,7 +28,7 @@ import ( // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // is written to when the certDomain changes, causing the serve config to be // re-read and applied. -func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient, kc *kubeClient) { +func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient) { if certDomainAtomic == nil { panic("certDomainAtomic must not be nil") } @@ -91,7 +91,7 @@ func certDomainFromNetmap(nm *netmap.NetworkMap) string { return nm.DNS.CertDomains[0] } -// localClient is a subset of tailscale.LocalClient that can be mocked for testing. +// localClient is a subset of [local.Client] that can be mocked for testing. type localClient interface { SetServeConfig(context.Context, *ipn.ServeConfig) error } diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go index 4563c52fc..eb92a8dc8 100644 --- a/cmd/containerboot/serve_test.go +++ b/cmd/containerboot/serve_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/kube/kubetypes" ) @@ -197,7 +197,7 @@ func TestReadServeConfig(t *testing.T) { } type fakeLocalClient struct { - *tailscale.LocalClient + *local.Client setServeCalled bool } diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go index 177cb2d50..21ae0f4e0 100644 --- a/cmd/containerboot/services.go +++ b/cmd/containerboot/services.go @@ -21,7 +21,7 @@ import ( "time" "github.com/fsnotify/fsnotify" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" @@ -50,7 +50,7 @@ type egressProxy struct { kc kubeclient.Client // never nil stateSecret string // name of the kube state Secret - tsClient *tailscale.LocalClient // never nil + tsClient *local.Client // never nil netmapChan chan ipn.Notify // chan to receive netmap updates on @@ -131,7 +131,7 @@ type egressProxyRunOpts struct { cfgPath string nfr linuxfw.NetfilterRunner kc kubeclient.Client - tsClient *tailscale.LocalClient + tsClient *local.Client stateSecret string netmapChan chan ipn.Notify podIPv4 string diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 1ff068b97..e73a7e94d 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -20,10 +20,10 @@ import ( "time" "github.com/fsnotify/fsnotify" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" ) -func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient, *os.Process, error) { +func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Process, error) { args := tailscaledArgs(cfg) // tailscaled runs without context, since it needs to persist // beyond the startup timeout in ctx. @@ -54,7 +54,7 @@ func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient break } - tsClient := &tailscale.LocalClient{ + tsClient := &local.Client{ Socket: cfg.Socket, UseSocketOnly: true, } @@ -170,7 +170,7 @@ func tailscaleSet(ctx context.Context, cfg *settings) error { return nil } -func watchTailscaledConfigChanges(ctx context.Context, path string, lc *tailscale.LocalClient, errCh chan<- error) { +func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Client, errCh chan<- error) { var ( tickChan <-chan time.Time tailscaledCfgDir = filepath.Dir(path) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 91891463c..e9df49b72 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -88,7 +88,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/client/tailscale + tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/derp tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/derp from tailscale.com/cmd/derper+ diff --git a/cmd/hello/hello.go b/cmd/hello/hello.go index e4b0ca827..86f885f54 100644 --- a/cmd/hello/hello.go +++ b/cmd/hello/hello.go @@ -18,7 +18,7 @@ import ( "strings" "time" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" ) @@ -31,7 +31,7 @@ var ( //go:embed hello.tmpl.html var embeddedTemplate string -var localClient tailscale.LocalClient +var localClient local.Client func main() { flag.Parse() diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 43ad0598f..aedd4265e 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -780,8 +780,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale - tailscale.com/client/tailscale from tailscale.com/client/web+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go index 4509c0dd8..01383a53d 100644 --- a/cmd/k8s-operator/proxy.go +++ b/cmd/k8s-operator/proxy.go @@ -20,7 +20,7 @@ import ( "go.uber.org/zap" "k8s.io/client-go/rest" "k8s.io/client-go/transport" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" @@ -189,7 +189,7 @@ func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredL // LocalAPI and then proxies them to the Kubernetes API. type apiserverProxy struct { log *zap.SugaredLogger - lc *tailscale.LocalClient + lc *local.Client rp *httputil.ReverseProxy mode apiServerProxyMode diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 069eabefd..818947a13 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -29,7 +29,7 @@ import ( "golang.org/x/net/dns/dnsmessage" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -186,9 +186,9 @@ func main() { type connector struct { // ts is the tsnet.Server used to host the connector. ts *tsnet.Server - // lc is the LocalClient used to interact with the tsnet.Server hosting this + // lc is the local.Client used to interact with the tsnet.Server hosting this // connector. - lc *tailscale.LocalClient + lc *local.Client // dnsAddr is the IPv4 address to listen on for DNS requests. It is used to // prevent the app connector from assigning it to a domain. diff --git a/cmd/pgproxy/pgproxy.go b/cmd/pgproxy/pgproxy.go index 468649ee2..e102c8ae4 100644 --- a/cmd/pgproxy/pgproxy.go +++ b/cmd/pgproxy/pgproxy.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/metrics" "tailscale.com/tsnet" "tailscale.com/tsweb" @@ -105,7 +105,7 @@ type proxy struct { upstreamHost string // "my.database.com" upstreamCertPool *x509.CertPool downstreamCert []tls.Certificate - client *tailscale.LocalClient + client *local.Client activeSessions expvar.Int startedSessions expvar.Int @@ -115,7 +115,7 @@ type proxy struct { // newProxy returns a proxy that forwards connections to // upstreamAddr. The upstream's TLS session is verified using the CA // cert(s) in upstreamCAPath. -func newProxy(upstreamAddr, upstreamCAPath string, client *tailscale.LocalClient) (*proxy, error) { +func newProxy(upstreamAddr, upstreamCAPath string, client *local.Client) (*proxy, error) { bs, err := os.ReadFile(upstreamCAPath) if err != nil { return nil, err diff --git a/cmd/proxy-to-grafana/proxy-to-grafana.go b/cmd/proxy-to-grafana/proxy-to-grafana.go index f1c67bad5..849d184c6 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana.go @@ -36,7 +36,7 @@ import ( "strings" "time" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/tailcfg" "tailscale.com/tsnet" ) @@ -127,7 +127,7 @@ func main() { log.Fatal(http.Serve(ln, proxy)) } -func modifyRequest(req *http.Request, localClient *tailscale.LocalClient) { +func modifyRequest(req *http.Request, localClient *local.Client) { // with enable_login_token set to true, we get a cookie that handles // auth for paths that are not /login if req.URL.Path != "/login" { @@ -144,7 +144,7 @@ func modifyRequest(req *http.Request, localClient *tailscale.LocalClient) { req.Header.Set("X-Webauth-Name", user.DisplayName) } -func getTailscaleUser(ctx context.Context, localClient *tailscale.LocalClient, ipPort string) (*tailcfg.UserProfile, error) { +func getTailscaleUser(ctx context.Context, localClient *local.Client, ipPort string) (*tailcfg.UserProfile, error) { whois, err := localClient.WhoIs(ctx, ipPort) if err != nil { return nil, fmt.Errorf("failed to identify remote host: %w", err) diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index c1af977f6..c020b4a1f 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/tailcfg" @@ -183,7 +183,7 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro type sniproxy struct { srv Server ts *tsnet.Server - lc *tailscale.LocalClient + lc *local.Client } func (s *sniproxy) advertiseRoutesFromConfig(ctx context.Context, c *appctype.AppConnectorConfig) error { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d80d0c02f..2a532f9d7 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -21,6 +21,7 @@ import ( "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" @@ -79,7 +80,7 @@ func CleanUpArgs(args []string) []string { return out } -var localClient = tailscale.LocalClient{ +var localClient = local.Client{ Socket: paths.DefaultTailscaledSocket(), } diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 5f55b1da6..96629b5ad 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -130,7 +130,7 @@ func (e *serveEnv) newFlags(name string, setup func(fs *flag.FlagSet)) *flag.Fla } // localServeClient is an interface conforming to the subset of -// tailscale.LocalClient. It includes only the methods used by the +// local.Client. It includes only the methods used by the // serve command. // // The purpose of this interface is to allow tests to provide a mock. diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 2eb982ca0..df68b5edd 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -850,7 +850,7 @@ func TestVerifyFunnelEnabled(t *testing.T) { } } -// fakeLocalServeClient is a fake tailscale.LocalClient for tests. +// fakeLocalServeClient is a fake local.Client for tests. // It's not a full implementation, just enough to test the serve command. // // The fake client is stateful, and is used to test manipulating diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 5533683af..45221252e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -70,8 +70,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ - tailscale.com/client/local from tailscale.com/client/tailscale - tailscale.com/client/tailscale from tailscale.com/client/web+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d5beb789c..21b7d32d2 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -233,8 +233,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled - tailscale.com/client/local from tailscale.com/client/tailscale - tailscale.com/client/tailscale from tailscale.com/client/web+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/derp tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index bab3bc75a..237cdfb55 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -30,7 +30,7 @@ import ( "syscall" "time" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" "tailscale.com/drive/driveimpl" @@ -621,7 +621,7 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if root := lb.TailscaleVarRoot(); root != "" { dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf) } - lb.ConfigureWebClient(&tailscale.LocalClient{ + lb.ConfigureWebClient(&local.Client{ Socket: args.socketpath, UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), }) diff --git a/cmd/tl-longchain/tl-longchain.go b/cmd/tl-longchain/tl-longchain.go index c92714505..2a4dc10ba 100644 --- a/cmd/tl-longchain/tl-longchain.go +++ b/cmd/tl-longchain/tl-longchain.go @@ -22,7 +22,7 @@ import ( "log" "time" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" "tailscale.com/types/key" @@ -37,7 +37,7 @@ var ( func main() { flag.Parse() - lc := tailscale.LocalClient{Socket: *flagSocket} + lc := local.Client{Socket: *flagSocket} if lc.Socket != "" { lc.UseSocketOnly = true } diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 1bdca8919..3eabef245 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -35,7 +35,7 @@ import ( "gopkg.in/square/go-jose.v2" "gopkg.in/square/go-jose.v2/jwt" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" "tailscale.com/ipn" @@ -75,7 +75,7 @@ func main() { } var ( - lc *tailscale.LocalClient + lc *local.Client st *ipnstate.Status err error watcherChan chan error @@ -84,7 +84,7 @@ func main() { lns []net.Listener ) if *flagUseLocalTailscaled { - lc = &tailscale.LocalClient{} + lc = &local.Client{} st, err = lc.StatusWithoutPeers(ctx) if err != nil { log.Fatalf("getting status: %v", err) @@ -212,7 +212,7 @@ func main() { // serveOnLocalTailscaled starts a serve session using an already-running // tailscaled instead of starting a fresh tsnet server, making something // listening on clientDNSName:dstPort accessible over serve/funnel. -func serveOnLocalTailscaled(ctx context.Context, lc *tailscale.LocalClient, st *ipnstate.Status, dstPort uint16, shouldFunnel bool) (cleanup func(), watcherChan chan error, err error) { +func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate.Status, dstPort uint16, shouldFunnel bool) (cleanup func(), watcherChan chan error, err error) { // In order to support funneling out in local tailscaled mode, we need // to add a serve config to forward the listeners we bound above and // allow those forwarders to be funneled out. @@ -275,7 +275,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *tailscale.LocalClient, st * } type idpServer struct { - lc *tailscale.LocalClient + lc *local.Client loopbackURL string serverURL string // "https://foo.bar.ts.net" funnel bool @@ -328,7 +328,7 @@ type authRequest struct { // allowRelyingParty validates that a relying party identified either by a // known remoteAddr or a valid client ID/secret pair is allowed to proceed // with the authorization flow associated with this authRequest. -func (ar *authRequest) allowRelyingParty(r *http.Request, lc *tailscale.LocalClient) error { +func (ar *authRequest) allowRelyingParty(r *http.Request, lc *local.Client) error { if ar.localRP { ra, err := netip.ParseAddrPort(r.RemoteAddr) if err != nil { diff --git a/cmd/tta/tta.go b/cmd/tta/tta.go index 4a4c4a6be..9f8f00295 100644 --- a/cmd/tta/tta.go +++ b/cmd/tta/tta.go @@ -30,7 +30,7 @@ import ( "time" "tailscale.com/atomicfile" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/hostinfo" "tailscale.com/util/mak" "tailscale.com/util/must" @@ -64,7 +64,7 @@ func serveCmd(w http.ResponseWriter, cmd string, args ...string) { } type localClientRoundTripper struct { - lc tailscale.LocalClient + lc local.Client } func (rt *localClientRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { diff --git a/derp/derp_server.go b/derp/derp_server.go index 15fc0dfb8..baca898d3 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -36,6 +36,7 @@ import ( "go4.org/mem" "golang.org/x/sync/errgroup" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/disco" "tailscale.com/envknob" @@ -1319,7 +1320,7 @@ func (c *sclient) requestMeshUpdate() { } } -var localClient tailscale.LocalClient +var localClient local.Client // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 37fc31819..219a4c535 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -17,7 +17,7 @@ import ( "sync" "time" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/web" "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" @@ -36,16 +36,16 @@ type webClient struct { server *web.Server // or nil, initialized lazily - // lc optionally specifies a LocalClient to use to connect + // lc optionally specifies a local.Client to use to connect // to the localapi for this tailscaled instance. // If nil, a default is used. - lc *tailscale.LocalClient + lc *local.Client } // ConfigureWebClient configures b.web prior to use. -// Specifially, it sets b.web.lc to the provided LocalClient. +// Specifially, it sets b.web.lc to the provided local.Client. // If provided as nil, b.web.lc is cleared out. -func (b *LocalBackend) ConfigureWebClient(lc *tailscale.LocalClient) { +func (b *LocalBackend) ConfigureWebClient(lc *local.Client) { b.webClient.mu.Lock() defer b.webClient.mu.Unlock() b.webClient.lc = lc diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 1dfc8c27c..31735de25 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -9,14 +9,14 @@ import ( "errors" "net" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" ) const webClientPort = 5252 type webClient struct{} -func (b *LocalBackend) ConfigureWebClient(lc *tailscale.LocalClient) {} +func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {} func (b *LocalBackend) webClientGetOrInit() error { return errors.New("not implemented") diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index e56ae8dab..c51c2d4d1 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -17,6 +17,7 @@ import ( "sync/atomic" "testing" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlclient" @@ -330,7 +331,7 @@ func newTestIPNServer(tb testing.TB, lb *ipnlocal.LocalBackend, enableLogging bo type testIPNClient struct { tb testing.TB - *tailscale.LocalClient + *local.Client User *ipnauth.TestActor } @@ -338,7 +339,7 @@ func (c *testIPNClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt c.tb.Helper() ctx, cancelWatcher := context.WithCancel(ctx) c.tb.Cleanup(cancelWatcher) - watcher, err := c.LocalClient.WatchIPNBus(ctx, mask) + watcher, err := c.Client.WatchIPNBus(ctx, mask) if err != nil { c.tb.Fatalf("WatchIPNBus(%q): %v", c.User.Name, err) } @@ -359,7 +360,7 @@ type testIPNServer struct { tb testing.TB *Server clientID atomic.Int64 - getClient func(*ipnauth.TestActor) *tailscale.LocalClient + getClient func(*ipnauth.TestActor) *local.Client actorsMu sync.Mutex actors map[string]*ipnauth.TestActor @@ -369,9 +370,9 @@ func (s *testIPNServer) getClientAs(name string) *testIPNClient { clientID := fmt.Sprintf("Client-%d", 1+s.clientID.Add(1)) user := s.makeTestUser(name, clientID) return &testIPNClient{ - tb: s.tb, - LocalClient: s.getClient(user), - User: user, + tb: s.tb, + Client: s.getClient(user), + User: user, } } @@ -427,7 +428,7 @@ func (s *testIPNServer) checkCurrentUser(want *ipnauth.TestActor) { // startTestIPNServer starts a [httptest.Server] that hosts the specified IPN server for the // duration of the test, using the specified base context for incoming requests. -// It returns a function that creates a [tailscale.LocalClient] as a given [ipnauth.TestActor]. +// It returns a function that creates a [local.Client] as a given [ipnauth.TestActor]. func startTestIPNServer(tb testing.TB, baseContext context.Context, server *Server) *testIPNServer { tb.Helper() ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -448,8 +449,8 @@ func startTestIPNServer(tb testing.TB, baseContext context.Context, server *Serv return &testIPNServer{ tb: tb, Server: server, - getClient: func(actor *ipnauth.TestActor) *tailscale.LocalClient { - return &tailscale.LocalClient{Transport: newTestRoundTripper(ts, actor)} + getClient: func(actor *ipnauth.TestActor) *local.Client { + return &local.Client{Transport: newTestRoundTripper(ts, actor)} }, } } diff --git a/prober/derp.go b/prober/derp.go index 05cc8f05c..01a7d3086 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -30,7 +30,7 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "go4.org/netipx" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/net/netmon" @@ -534,7 +534,7 @@ func (d *derpProber) getNodePair(n1, n2 string) (ret1, ret2 *tailcfg.DERPNode, _ return ret1, ret2, nil } -var tsLocalClient tailscale.LocalClient +var tsLocalClient local.Client // updateMap refreshes the locally-cached DERP map. func (d *derpProber) updateMap(ctx context.Context) error { diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index e1494c65f..8d5b89f84 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -26,6 +26,7 @@ import ( "sync" "time" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/control/controlclient" "tailscale.com/envknob" @@ -135,11 +136,11 @@ type Server struct { hostname string shutdownCtx context.Context shutdownCancel context.CancelFunc - proxyCred string // SOCKS5 proxy auth for loopbackListener - localAPICred string // basic auth password for loopbackListener - loopbackListener net.Listener // optional loopback for localapi and proxies - localAPIListener net.Listener // in-memory, used by localClient - localClient *tailscale.LocalClient // in-memory + proxyCred string // SOCKS5 proxy auth for loopbackListener + localAPICred string // basic auth password for loopbackListener + loopbackListener net.Listener // optional loopback for localapi and proxies + localAPIListener net.Listener // in-memory, used by localClient + localClient *local.Client // in-memory localAPIServer *http.Server logbuffer *filch.Filch logtail *logtail.Logger @@ -222,7 +223,7 @@ func (s *Server) HTTPClient() *http.Client { // // It will start the server if it has not been started yet. If the server's // already been started successfully, it doesn't return an error. -func (s *Server) LocalClient() (*tailscale.LocalClient, error) { +func (s *Server) LocalClient() (*local.Client, error) { if err := s.Start(); err != nil { return nil, err } @@ -676,7 +677,7 @@ func (s *Server) start() (reterr error) { // nettest.Listen provides a in-memory pipe based implementation for net.Conn. lal := memnet.Listen("local-tailscaled.sock:80") s.localAPIListener = lal - s.localClient = &tailscale.LocalClient{Dial: lal.Dial} + s.localClient = &local.Client{Dial: lal.Dial} s.localAPIServer = &http.Server{Handler: lah} s.lb.ConfigureWebClient(s.localClient) go func() { diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 0f245b015..4b73707c9 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -36,7 +36,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "golang.org/x/net/proxy" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -1273,7 +1273,7 @@ func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func() } // mustDirect ensures there is a direct connection between LocalClient 1 and 2 -func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *tailscale.LocalClient) { +func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { t.Helper() lastLog := time.Now().Add(-time.Minute) // See https://github.com/tailscale/tailscale/issues/654 diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 70c5d68c3..770abd506 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -32,6 +32,7 @@ import ( "github.com/miekg/dns" "go4.org/mem" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" @@ -755,11 +756,11 @@ func TestClientSideJailing(t *testing.T) { defer ln.Close() port := uint16(ln.Addr().(*net.TCPAddr).Port) - lc1 := &tailscale.LocalClient{ + lc1 := &local.Client{ Socket: n1.sockFile, UseSocketOnly: true, } - lc2 := &tailscale.LocalClient{ + lc2 := &local.Client{ Socket: n2.sockFile, UseSocketOnly: true, } @@ -789,7 +790,7 @@ func TestClientSideJailing(t *testing.T) { }, } - testDial := func(t *testing.T, lc *tailscale.LocalClient, ip netip.Addr, port uint16, shouldFail bool) { + testDial := func(t *testing.T, lc *local.Client, ip netip.Addr, port uint16, shouldFail bool) { t.Helper() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index d04dc6aa1..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -11,7 +11,7 @@ import ( // transitive deps when we run "go install tailscaled" in a child // process and can cache a prior success when a dependency changes. _ "tailscale.com/chirp" - _ "tailscale.com/client/tailscale" + _ "tailscale.com/client/local" _ "tailscale.com/cmd/tailscaled/childproc" _ "tailscale.com/control/controlclient" _ "tailscale.com/derp/derphttp" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index d04dc6aa1..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -11,7 +11,7 @@ import ( // transitive deps when we run "go install tailscaled" in a child // process and can cache a prior success when a dependency changes. _ "tailscale.com/chirp" - _ "tailscale.com/client/tailscale" + _ "tailscale.com/client/local" _ "tailscale.com/cmd/tailscaled/childproc" _ "tailscale.com/control/controlclient" _ "tailscale.com/derp/derphttp" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index d04dc6aa1..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -11,7 +11,7 @@ import ( // transitive deps when we run "go install tailscaled" in a child // process and can cache a prior success when a dependency changes. _ "tailscale.com/chirp" - _ "tailscale.com/client/tailscale" + _ "tailscale.com/client/local" _ "tailscale.com/cmd/tailscaled/childproc" _ "tailscale.com/control/controlclient" _ "tailscale.com/derp/derphttp" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index d04dc6aa1..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -11,7 +11,7 @@ import ( // transitive deps when we run "go install tailscaled" in a child // process and can cache a prior success when a dependency changes. _ "tailscale.com/chirp" - _ "tailscale.com/client/tailscale" + _ "tailscale.com/client/local" _ "tailscale.com/cmd/tailscaled/childproc" _ "tailscale.com/control/controlclient" _ "tailscale.com/derp/derphttp" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index b0d1c8968..6ea475e64 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -18,7 +18,7 @@ import ( _ "golang.org/x/sys/windows/svc/mgr" _ "golang.zx2c4.com/wintun" _ "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" - _ "tailscale.com/client/tailscale" + _ "tailscale.com/client/local" _ "tailscale.com/cmd/tailscaled/childproc" _ "tailscale.com/control/controlclient" _ "tailscale.com/derp/derphttp" diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 586fd28e0..ead2bbb8b 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -50,7 +50,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/icmp" "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/waiter" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/net/netutil" @@ -2123,7 +2123,7 @@ func (s *Server) takeAgentConnOne(n *node) (_ *agentConn, ok bool) { } type NodeAgentClient struct { - *tailscale.LocalClient + *local.Client HTTPClient *http.Client } @@ -2148,7 +2148,7 @@ func (s *Server) NodeAgentDialer(n *Node) DialFunc { func (s *Server) NodeAgentClient(n *Node) *NodeAgentClient { d := s.NodeAgentDialer(n) return &NodeAgentClient{ - LocalClient: &tailscale.LocalClient{ + Client: &local.Client{ UseSocketOnly: true, OmitAuth: true, Dial: d, From 1bf4c6481abc6eeda10780e676668f0a662166f9 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Thu, 6 Feb 2025 09:51:00 -0500 Subject: [PATCH 0465/1708] safesocket: add ability for Darwin clients to set explicit credentials (#14702) updates tailscale/corp#25687 The darwin appstore and standalone clients now support XPC and the keychain for passing user credentials securely between the gui process and an NEVPNExtension hosted tailscaled. Clients that can communicate directly with the network extension, via XPC or the keychain, are now expected to call SetCredentials and supply credentials explicitly, fixing issues with the cli breaking if the current user cannot read the contents of /Library/Tailscale due to group membership restrictions. This matches how those clients source and supply credentials to the localAPI http client. Non-platform-specific code that has traditionally been in the client is moved to safesocket. /Libraray/Tailscaled/sameuserproof has its permissions changed to that it's readably only by users in the admin group. This restricts standalone CLI access for and direct use of localAPI to admins. Signed-off-by: Jonathan Nobels --- ipn/ipnserver/server.go | 7 + safesocket/safesocket_darwin.go | 369 ++++++++++++++++++++------- safesocket/safesocket_darwin_test.go | 149 +++++++++++ 3 files changed, 434 insertions(+), 91 deletions(-) create mode 100644 safesocket/safesocket_darwin_test.go diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 5df9375a4..7bc2c7b3e 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -318,6 +318,13 @@ func (s *Server) blockWhileIdentityInUse(ctx context.Context, actor ipnauth.Acto // Unix-like platforms and specifies the ID of a local user // (in the os/user.User.Uid string form) who is allowed // to operate tailscaled without being root or using sudo. +// +// Sandboxed macos clients must directly supply, or be able to read, +// an explicit token. Permission is inferred by validating that +// token. Sandboxed macos clients also don't use ipnserver.actor at all +// (and prior to that, they didn't use ipnauth.ConnIdentity) +// +// See safesocket and safesocket_darwin. func (a *actor) Permissions(operatorUID string) (read, write bool) { switch envknob.GOOS() { case "windows": diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index 62e6f7e6d..fbcd7aaa6 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -6,8 +6,11 @@ package safesocket import ( "bufio" "bytes" + crand "crypto/rand" "errors" "fmt" + "io/fs" + "log" "net" "os" "os/exec" @@ -17,6 +20,7 @@ import ( "sync" "time" + "golang.org/x/sys/unix" "tailscale.com/version" ) @@ -24,96 +28,278 @@ func init() { localTCPPortAndToken = localTCPPortAndTokenDarwin } -// localTCPPortAndTokenMacsys returns the localhost TCP port number and auth token -// from /Library/Tailscale. -// -// In that case the files are: +const sameUserProofTokenLength = 10 + +type safesocketDarwin struct { + mu sync.Mutex + token string // safesocket auth token + port int // safesocket port + sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file + sharedDir string // shared directory for location of sameuserproof file + + checkConn bool // Check macsys safesocket port before returning it + isMacSysExt func() bool // For testing only to force macsys +} + +var ssd = safesocketDarwin{ + isMacSysExt: version.IsMacSysExt, + checkConn: true, + sharedDir: "/Library/Tailscale", +} + +// There are three ways a Darwin binary can be run: as the Mac App Store (macOS) +// standalone notarized (macsys), or a separate CLI (tailscale) that was +// built or downloaded. // -// /Library/Tailscale/ipnport => $port (symlink with localhost port number target) -// /Library/Tailscale/sameuserproof-$port is a file with auth -func localTCPPortAndTokenMacsys() (port int, token string, err error) { +// The macOS and macsys binaries can communicate directly via XPC with +// the NEPacketTunnelProvider managed tailscaled process and are responsible for +// calling SetCredentials when they need to operate as a CLI. + +// A built/downloaded CLI binary will not be managing the NEPacketTunnelProvider +// hosting tailscaled directly and must source the credentials from a 'sameuserproof' file. +// This file is written to sharedDir when tailscaled/NEPacketTunnelProvider +// calls InitListenerDarwin. + +// localTCPPortAndTokenDarwin returns the localhost TCP port number and auth token +// either generated, or sourced from the NEPacketTunnelProvider managed tailscaled process. +func localTCPPortAndTokenDarwin() (port int, token string, err error) { + ssd.mu.Lock() + defer ssd.mu.Unlock() + + if ssd.port != 0 && ssd.token != "" { + return ssd.port, ssd.token, nil + } - const dir = "/Library/Tailscale" - portStr, err := os.Readlink(filepath.Join(dir, "ipnport")) + // Credentials were not explicitly, this is likely a standalone CLI binary. + // Fallback to reading the sameuserproof file. + return portAndTokenFromSameUserProof() +} + +// SetCredentials sets an token and port used to authenticate safesocket generated +// by the NEPacketTunnelProvider tailscaled process. This is only used when running +// the CLI via Tailscale.app. +func SetCredentials(token string, port int) { + ssd.mu.Lock() + defer ssd.mu.Unlock() + + if ssd.token != "" || ssd.port != 0 { + // Not fatal, but likely programmer error. Credentials do not change. + log.Printf("warning: SetCredentials credentials already set") + } + + ssd.token = token + ssd.port = port +} + +// InitListenerDarwin initializes the listener for the CLI commands +// and localapi HTTP server and sets the port/token. This will override +// any credentials set explicitly via SetCredentials(). Calling this mulitple times +// has no effect. The listener and it's corresponding token/port is initialized only once. +func InitListenerDarwin(sharedDir string) (*net.Listener, error) { + ssd.mu.Lock() + defer ssd.mu.Unlock() + + ln := onceListener.ln + if ln != nil { + return ln, nil + } + + var err error + ln, err = localhostListener() if err != nil { - return 0, "", err + log.Printf("InitListenerDarwin: listener initialization failed") + return nil, err } - port, err = strconv.Atoi(portStr) + + port, err := localhostTCPPort() if err != nil { - return 0, "", err + log.Printf("localhostTCPPort: listener initialization failed") + return nil, err } - authb, err := os.ReadFile(filepath.Join(dir, "sameuserproof-"+portStr)) + + token, err := getToken() if err != nil { - return 0, "", err + log.Printf("localhostTCPPort: getToken failed") + return nil, err } - auth := strings.TrimSpace(string(authb)) - if auth == "" { - return 0, "", errors.New("empty auth token in sameuserproof file") + + if port == 0 || token == "" { + log.Printf("localhostTCPPort: Invalid token or port") + return nil, fmt.Errorf("invalid localhostTCPPort: returned 0") } - // The above files exist forever after the first run of - // /Applications/Tailscale.app, so check we can connect to avoid returning a - // port nothing is listening on. Connect to "127.0.0.1" rather than - // "localhost" due to #7851. - conn, err := net.DialTimeout("tcp", "127.0.0.1:"+portStr, time.Second) + ssd.sharedDir = sharedDir + ssd.token = token + ssd.port = port + + // Write the port and token to a sameuserproof file + err = initSameUserProofToken(sharedDir, port, token) if err != nil { - return 0, "", err + // Not fatal + log.Printf("initSameUserProofToken: failed: %v", err) } - conn.Close() - return port, auth, nil + return ln, nil } -var warnAboutRootOnce sync.Once +var onceListener struct { + once sync.Once + ln *net.Listener +} -func localTCPPortAndTokenDarwin() (port int, token string, err error) { - // There are two ways this binary can be run: as the Mac App Store sandboxed binary, - // or a normal binary that somebody built or download and are being run from outside - // the sandbox. Detect which way we're running and then figure out how to connect - // to the local daemon. - - if dir := os.Getenv("TS_MACOS_CLI_SHARED_DIR"); dir != "" { - // First see if we're running as the non-AppStore "macsys" variant. - if version.IsMacSys() { - if port, token, err := localTCPPortAndTokenMacsys(); err == nil { - return port, token, nil +func localhostTCPPort() (int, error) { + if onceListener.ln == nil { + return 0, fmt.Errorf("listener not initialized") + } + + ln, err := localhostListener() + if err != nil { + return 0, err + } + + return (*ln).Addr().(*net.TCPAddr).Port, nil +} + +func localhostListener() (*net.Listener, error) { + onceListener.once.Do(func() { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + return + } + onceListener.ln = &ln + }) + if onceListener.ln == nil { + return nil, fmt.Errorf("failed to get TCP listener") + } + return onceListener.ln, nil +} + +var onceToken struct { + once sync.Once + token string +} + +func getToken() (string, error) { + onceToken.once.Do(func() { + buf := make([]byte, sameUserProofTokenLength) + if _, err := crand.Read(buf); err != nil { + return + } + t := fmt.Sprintf("%x", buf) + onceToken.token = t + }) + if onceToken.token == "" { + return "", fmt.Errorf("failed to generate token") + } + + return onceToken.token, nil +} + +// initSameUserProofToken writes the port and token to a sameuserproof +// file owned by the current user. We leave the file open to allow us +// to discover it via lsof. +// +// "sameuserproof" is intended to convey that the user attempting to read +// the credentials from the file is the same user that wrote them. For +// standalone macsys where tailscaled is running as root, we set group +// permissions to allow users in the admin group to read the file. +func initSameUserProofToken(sharedDir string, port int, token string) error { + var err error + + // Guard against bad sharedDir + old, err := os.ReadDir(sharedDir) + if err == os.ErrNotExist { + log.Printf("failed to read shared dir %s: %v", sharedDir, err) + return err + } + + // Remove all old sameuserproof files + for _, fi := range old { + if name := fi.Name(); strings.HasPrefix(name, "sameuserproof-") { + err := os.Remove(filepath.Join(sharedDir, name)) + if err != nil { + log.Printf("failed to remove %s: %v", name, err) } } + } - // The current binary (this process) is sandboxed. The user is - // running the CLI via /Applications/Tailscale.app/Contents/MacOS/Tailscale - // which sets the TS_MACOS_CLI_SHARED_DIR environment variable. - fis, err := os.ReadDir(dir) + var baseFile string + var perm fs.FileMode + if ssd.isMacSysExt() { + perm = 0640 // allow wheel to read + baseFile = fmt.Sprintf("sameuserproof-%d", port) + portFile := filepath.Join(sharedDir, "ipnport") + err := os.Remove(portFile) if err != nil { - return 0, "", err + log.Printf("failed to remove portfile %s: %v", portFile, err) } - for _, fi := range fis { - name := filepath.Base(fi.Name()) - // Look for name like "sameuserproof-61577-2ae2ec9e0aa2005784f1" - // to extract out the port number and token. - if strings.HasPrefix(name, "sameuserproof-") { - f := strings.SplitN(name, "-", 3) - if len(f) == 3 { - if port, err := strconv.Atoi(f[1]); err == nil { - return port, f[2], nil - } - } - } + symlinkErr := os.Symlink(fmt.Sprint(port), portFile) + if symlinkErr != nil { + log.Printf("failed to symlink portfile: %v", symlinkErr) } - if os.Geteuid() == 0 { - // Log a warning as the clue to the user, in case the error - // message is swallowed. Only do this once since we may retry - // multiple times to connect, and don't want to spam. - warnAboutRootOnce.Do(func() { - fmt.Fprintf(os.Stderr, "Warning: The CLI is running as root from within a sandboxed binary. It cannot reach the local tailscaled, please try again as a regular user.\n") - }) + } else { + perm = 0666 + baseFile = fmt.Sprintf("sameuserproof-%d-%s", port, token) + } + + path := filepath.Join(sharedDir, baseFile) + ssd.sameuserproofFD, err = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, perm) + log.Printf("initSameUserProofToken : done=%v", err == nil) + + if ssd.isMacSysExt() && err == nil { + fmt.Fprintf(ssd.sameuserproofFD, "%s\n", token) + + // Macsys runs as root so ownership of this file will be + // root/wheel. Change ownership to root/admin which will let all members + // of the admin group to read it. + unix.Fchown(int(ssd.sameuserproofFD.Fd()), 0, 80 /* admin */) + } + + return err +} + +// readMacsysSameuserproof returns the localhost TCP port number and auth token +// from a sameuserproof file written to /Library/Tailscale. +// +// In that case the files are: +// +// /Library/Tailscale/ipnport => $port (symlink with localhost port number target) +// /Library/Tailscale/sameuserproof-$port is a file containing only the auth token as a hex string. +func readMacsysSameUserProof() (port int, token string, err error) { + portStr, err := os.Readlink(filepath.Join(ssd.sharedDir, "ipnport")) + if err != nil { + return 0, "", err + } + port, err = strconv.Atoi(portStr) + if err != nil { + return 0, "", err + } + authb, err := os.ReadFile(filepath.Join(ssd.sharedDir, "sameuserproof-"+portStr)) + if err != nil { + return 0, "", err + } + auth := strings.TrimSpace(string(authb)) + if auth == "" { + return 0, "", errors.New("empty auth token in sameuserproof file") + } + + if ssd.checkConn { + // Files may be stale and there is no guarantee that the sameuserproof + // derived port is open and valid. Check it before returning it. + conn, err := net.DialTimeout("tcp", "127.0.0.1:"+portStr, time.Second) + if err != nil { + return 0, "", err } - return 0, "", fmt.Errorf("failed to find sandboxed sameuserproof-* file in TS_MACOS_CLI_SHARED_DIR %q", dir) + conn.Close() } - // The current process is running outside the sandbox, so use - // lsof to find the IPNExtension (the Mac App Store variant). + return port, auth, nil +} +// readMacosSameUserProof searches for open sameuserproof files belonging +// to the current user and the IPNExtension (macOS App Store) process and returns a +// port and token. +func readMacosSameUserProof() (port int, token string, err error) { cmd := exec.Command("lsof", "-n", // numeric sockets; don't do DNS lookups, etc "-a", // logical AND remaining options @@ -122,39 +308,40 @@ func localTCPPortAndTokenDarwin() (port int, token string, err error) { "-F", // machine-readable output ) out, err := cmd.Output() - if err != nil { - // Before returning an error, see if we're running the - // macsys variant at the normal location. - if port, token, err := localTCPPortAndTokenMacsys(); err == nil { + + if err == nil { + bs := bufio.NewScanner(bytes.NewReader(out)) + subStr := []byte(".tailscale.ipn.macos/sameuserproof-") + for bs.Scan() { + line := bs.Bytes() + i := bytes.Index(line, subStr) + if i == -1 { + continue + } + f := strings.SplitN(string(line[i+len(subStr):]), "-", 2) + if len(f) != 2 { + continue + } + portStr, token := f[0], f[1] + port, err := strconv.Atoi(portStr) + if err != nil { + return 0, "", fmt.Errorf("invalid port %q found in lsof", portStr) + } + return port, token, nil } - - return 0, "", fmt.Errorf("failed to run '%s' looking for IPNExtension: %w", cmd, err) } - bs := bufio.NewScanner(bytes.NewReader(out)) - subStr := []byte(".tailscale.ipn.macos/sameuserproof-") - for bs.Scan() { - line := bs.Bytes() - i := bytes.Index(line, subStr) - if i == -1 { - continue - } - f := strings.SplitN(string(line[i+len(subStr):]), "-", 2) - if len(f) != 2 { - continue - } - portStr, token := f[0], f[1] - port, err := strconv.Atoi(portStr) - if err != nil { - return 0, "", fmt.Errorf("invalid port %q found in lsof", portStr) - } + return 0, "", ErrTokenNotFound +} + +func portAndTokenFromSameUserProof() (port int, token string, err error) { + if port, token, err := readMacosSameUserProof(); err == nil { return port, token, nil } - // Before returning an error, see if we're running the - // macsys variant at the normal location. - if port, token, err := localTCPPortAndTokenMacsys(); err == nil { + if port, token, err := readMacsysSameUserProof(); err == nil { return port, token, nil } - return 0, "", ErrTokenNotFound + + return 0, "", err } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go new file mode 100644 index 000000000..80f0dcddd --- /dev/null +++ b/safesocket/safesocket_darwin_test.go @@ -0,0 +1,149 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package safesocket + +import ( + "os" + "strings" + "testing" + + "tailscale.com/tstest" +) + +// TestSetCredentials verifies that calling SetCredentials +// sets the port and token correctly and that LocalTCPPortAndToken +// returns the given values. +func TestSetCredentials(t *testing.T) { + wantPort := 123 + wantToken := "token" + SetCredentials(wantToken, wantPort) + + gotPort, gotToken, err := LocalTCPPortAndToken() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if gotPort != wantPort { + t.Errorf("got port %d, want %d", gotPort, wantPort) + } + + if gotToken != wantToken { + t.Errorf("got token %s, want %s", gotToken, wantToken) + } +} + +// TestInitListenerDarwin verifies that InitListenerDarwin +// returns a listener and a non-zero port and non-empty token. +func TestInitListenerDarwin(t *testing.T) { + temp := t.TempDir() + ln, err := InitListenerDarwin(temp) + if err != nil || ln == nil { + t.Fatalf("InitListenerDarwin failed: %v", err) + } + defer (*ln).Close() + + port, token, err := LocalTCPPortAndToken() + if err != nil { + t.Fatalf("LocalTCPPortAndToken failed: %v", err) + } + + if port == 0 { + t.Errorf("expected non-zero port, got %d", port) + } + + if token == "" { + t.Errorf("expected non-empty token, got empty string") + } +} + +// TestTokenGeneration verifies token generation behavior +func TestTokenGeneration(t *testing.T) { + token, err := getToken() + if err != nil { + t.Fatalf("getToken: %v", err) + } + + // Verify token length (hex string is 2x byte length) + wantLen := sameUserProofTokenLength * 2 + if got := len(token); got != wantLen { + t.Errorf("token length = %d, want %d", got, wantLen) + } + + // Verify token persistence + subsequentToken, err := getToken() + if err != nil { + t.Fatalf("subsequent getToken: %v", err) + } + if subsequentToken != token { + t.Errorf("subsequent token = %q, want %q", subsequentToken, token) + } +} + +// TestSameUserProofToken verifies that the sameuserproof file +// is created and read correctly for the macsys variant +func TestMacsysSameuserproof(t *testing.T) { + dir := t.TempDir() + + tstest.Replace(t, &ssd.isMacSysExt, func() bool { return true }) + tstest.Replace(t, &ssd.checkConn, false) + tstest.Replace(t, &ssd.sharedDir, dir) + + const ( + wantToken = "token" + wantPort = 123 + ) + + if err := initSameUserProofToken(dir, wantPort, wantToken); err != nil { + t.Fatalf("initSameUserProofToken: %v", err) + } + + gotPort, gotToken, err := readMacsysSameUserProof() + if err != nil { + t.Fatalf("readMacOSSameUserProof: %v", err) + } + + if gotPort != wantPort { + t.Errorf("got port = %d, want %d", gotPort, wantPort) + } + if wantToken != gotToken { + t.Errorf("got token = %s, want %s", wantToken, gotToken) + } + assertFileCount(t, dir, 1, "sameuserproof-") +} + +// TestMacosSameuserproof verifies that the sameuserproof file +// is created correctly for the macos variant +func TestMacosSameuserproof(t *testing.T) { + dir := t.TempDir() + wantToken := "token" + wantPort := 123 + + initSameUserProofToken(dir, wantPort, wantToken) + + // initSameUserProofToken should never leave duplicates + initSameUserProofToken(dir, wantPort, wantToken) + + // we can't just call readMacosSameUserProof because it relies on lsof + // and makes some assumptions about the user. But we can make sure + // the file exists + assertFileCount(t, dir, 1, "sameuserproof-") +} + +func assertFileCount(t *testing.T, dir string, want int, prefix string) { + t.Helper() + + files, err := os.ReadDir(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + count := 0 + for _, file := range files { + if strings.HasPrefix(file.Name(), prefix) { + count += 1 + } + } + if count != want { + t.Errorf("expected 1 file, got %d", count) + } +} From 9a9ce12a3efdf9bd770e6c9e689d976292c040af Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 6 Feb 2025 10:52:42 -0500 Subject: [PATCH 0466/1708] cmd/derper: close setec after use (#14929) Since dynamic reload of setec is not supported in derper at this time, close the server after the secret is loaded. Updates tailscale/corp#25756 Signed-off-by: Mike O'Driscoll --- cmd/derper/derper.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index f1d848a5f..f08be7d08 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -224,6 +224,7 @@ func main() { } meshKey = st.Secret(meshKeySecret).GetString() log.Println("Got mesh key from setec store") + st.Close() } else if *meshPSKFile != "" { b, err := setec.StaticFile(*meshPSKFile) if err != nil { From d08f830d502b457fd955f1f346ece8df9519b2f8 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 6 Feb 2025 10:53:08 -0500 Subject: [PATCH 0467/1708] cmd/derper: support no mesh key (#14931) Incorrect disabled support for not having a mesh key in d5316a4fbb4a1105ce2ba6f92d9688452b7747cd Allow for no mesh key to be set. Fixes #14928 Signed-off-by: Mike O'Driscoll --- cmd/derper/derper.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index f08be7d08..4af63e192 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -236,6 +236,8 @@ func main() { if meshKey == "" && *dev { log.Printf("No mesh key configured for --dev mode") + } else if meshKey == "" { + log.Printf("No mesh key configured") } else if key, err := checkMeshKey(meshKey); err != nil { log.Fatalf("invalid mesh key: %v", err) } else { From 431216017b465fbb6517a967b6c046293238f61a Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 6 Feb 2025 16:32:51 +0000 Subject: [PATCH 0468/1708] scripts/installer.sh: add FreeBSD 14 (#14925) Fixes #14745 Also adds --yes to pkg to match other package managers Signed-off-by: Erisa A --- scripts/installer.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index bdd425539..e74ce7886 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -390,7 +390,8 @@ main() { ;; freebsd) if [ "$VERSION" != "12" ] && \ - [ "$VERSION" != "13" ] + [ "$VERSION" != "13" ] && \ + [ "$VERSION" != "14" ] then OS_UNSUPPORTED=1 fi @@ -572,7 +573,7 @@ main() { ;; pkg) set -x - $SUDO pkg install tailscale + $SUDO pkg install tailscale --yes $SUDO service tailscaled enable $SUDO service tailscaled start set +x From 83808029d8c6f54d11f9be7482634bd76fcdac15 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 3 Feb 2025 16:18:07 -0800 Subject: [PATCH 0469/1708] wgengine/netstack: disable RACK on all platforms The gVisor RACK implementation appears to perfom badly, particularly in scenarios with higher BDP. This may have gone poorly noticed as a result of it being gated on SACK, which is not enabled by default in upstream gVisor, but itself has a higher positive impact on performance. Both the RACK and DACK implementations (which are now one) have overlapping non-completion of tasks in their work streams on the public tracker. Updates #9707 Signed-off-by: James Tucker --- wgengine/netstack/netstack.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index f0c4c5271..ed03f666d 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -317,16 +317,14 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi if tcpipErr != nil { return nil, fmt.Errorf("could not enable TCP SACK: %v", tcpipErr) } - if runtime.GOOS == "windows" { - // See https://github.com/tailscale/tailscale/issues/9707 - // Windows w/RACK performs poorly. ACKs do not appear to be handled in a - // timely manner, leading to spurious retransmissions and a reduced - // congestion window. - tcpRecoveryOpt := tcpip.TCPRecovery(0) - tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt) - if tcpipErr != nil { - return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr) - } + // See https://github.com/tailscale/tailscale/issues/9707 + // gVisor's RACK performs poorly. ACKs do not appear to be handled in a + // timely manner, leading to spurious retransmissions and a reduced + // congestion window. + tcpRecoveryOpt := tcpip.TCPRecovery(0) + tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt) + if tcpipErr != nil { + return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr) } err := setTCPBufSizes(ipstack) if err != nil { From 08a96a86af56561bbce6521fd8bf612729defc72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Sun, 12 May 2024 22:57:10 +0200 Subject: [PATCH 0470/1708] cmd/tailscale: make ssh command work when tailscaled is built with the ts_include_cli tag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #12125 Signed-off-by: Sandro Jäckel --- cmd/tailscale/cli/ssh.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cmd/tailscale/cli/ssh.go b/cmd/tailscale/cli/ssh.go index 68a6193af..ba70e97e9 100644 --- a/cmd/tailscale/cli/ssh.go +++ b/cmd/tailscale/cli/ssh.go @@ -84,10 +84,6 @@ func runSSH(ctx context.Context, args []string) error { // of failing. But for now: return fmt.Errorf("no system 'ssh' command found: %w", err) } - tailscaleBin, err := os.Executable() - if err != nil { - return err - } knownHostsFile, err := writeKnownHosts(st) if err != nil { return err @@ -116,7 +112,9 @@ func runSSH(ctx context.Context, args []string) error { argv = append(argv, "-o", fmt.Sprintf("ProxyCommand %q %s nc %%h %%p", - tailscaleBin, + // os.Executable() would return the real running binary but in case tailscale is built with the ts_include_cli tag, + // we need to return the started symlink instead + os.Args[0], socketArg, )) } From caafe68eb2db88f16facecde777d8925dbe7cc99 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 6 Feb 2025 22:19:16 +0000 Subject: [PATCH 0471/1708] scripts/installer.sh: add BigLinux as a Manjaro derivative (#14936) Fixes #13343 Signed-off-by: Erisa A --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index e74ce7886..3b76a7b88 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -250,7 +250,7 @@ main() { VERSION="" # rolling release PACKAGETYPE="pacman" ;; - manjaro|manjaro-arm) + manjaro|manjaro-arm|biglinux) OS="manjaro" VERSION="" # rolling release PACKAGETYPE="pacman" From 4903d6c80bba6de3e6b70f24ee80169ed4ac4e68 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 6 Feb 2025 10:35:16 -0800 Subject: [PATCH 0472/1708] wgengine/netstack: block link writes when full rather than drop Originally identified by Coder and documented in their blog post, this implementation differs slightly as our link endpoint was introduced for a different purpose, but the behavior is the same: apply backpressure rather than dropping packets. This reduces the negative impact of large packet count bursts substantially. An alternative would be to swell the size of the channel buffer substantially, however that's largely just moving where buffering occurs and may lead to reduced signalling back to lower layer or upstream congestion controls. Updates #9707 Updates #10408 Updates #12393 Updates tailscale/corp#24483 Updates tailscale/corp#25169 Signed-off-by: James Tucker --- wgengine/netstack/link_endpoint.go | 49 ++++++++++++++++-------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 485d829a3..39da64b55 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -16,19 +16,27 @@ import ( ) type queue struct { - // TODO(jwhited): evaluate performance with mu as Mutex and/or alternative - // non-channel buffer. - c chan *stack.PacketBuffer - mu sync.RWMutex // mu guards closed + // TODO(jwhited): evaluate performance with a non-channel buffer. + c chan *stack.PacketBuffer + + closeOnce sync.Once + closedCh chan struct{} + + mu sync.RWMutex closed bool } func (q *queue) Close() { + q.closeOnce.Do(func() { + close(q.closedCh) + }) + q.mu.Lock() defer q.mu.Unlock() - if !q.closed { - close(q.c) + if q.closed { + return } + close(q.c) q.closed = true } @@ -51,26 +59,27 @@ func (q *queue) ReadContext(ctx context.Context) *stack.PacketBuffer { } func (q *queue) Write(pkt *stack.PacketBuffer) tcpip.Error { - // q holds the PacketBuffer. q.mu.RLock() defer q.mu.RUnlock() if q.closed { return &tcpip.ErrClosedForSend{} } - - wrote := false select { case q.c <- pkt.IncRef(): - wrote = true - default: - // TODO(jwhited): reconsider/count + return nil + case <-q.closedCh: pkt.DecRef() + return &tcpip.ErrClosedForSend{} } +} - if wrote { - return nil +func (q *queue) Drain() int { + c := 0 + for pkt := range q.c { + pkt.DecRef() + c++ } - return &tcpip.ErrNoBufferSpace{} + return c } func (q *queue) Num() int { @@ -107,7 +116,8 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported le := &linkEndpoint{ supportedGRO: supportedGRO, q: &queue{ - c: make(chan *stack.PacketBuffer, size), + c: make(chan *stack.PacketBuffer, size), + closedCh: make(chan struct{}), }, mtu: mtu, linkAddr: linkAddr, @@ -164,12 +174,7 @@ func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { // Drain removes all outbound packets from the channel and counts them. func (l *linkEndpoint) Drain() int { - c := 0 - for pkt := l.Read(); pkt != nil; pkt = l.Read() { - pkt.DecRef() - c++ - } - return c + return l.q.Drain() } // NumQueued returns the number of packets queued for outbound. From e113b106a69080aace45e3d3d160ee87835ea75e Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 6 Feb 2025 10:45:45 -0800 Subject: [PATCH 0473/1708] go.mod,wgengine/netstack: use cubic congestion control, bump gvisor Cubic performs better than Reno in higher BDP scenarios, and enables the use of the hystart++ implementation contributed by Coder. This improves throughput on higher BDP links with a much faster ramp. gVisor is bumped as well for some fixes related to send queue processing and RTT tracking. Updates #9707 Updates #10408 Updates #12393 Updates tailscale/corp#24483 Updates tailscale/corp#25169 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- go.mod | 2 +- go.sum | 4 ++-- wgengine/netstack/netstack.go | 5 +++++ 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index aedd4265e..19773761f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -296,7 +296,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 21b7d32d2..294f2944e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -208,7 +208,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ diff --git a/go.mod b/go.mod index dc34d84ca..4de2df640 100644 --- a/go.mod +++ b/go.mod @@ -108,7 +108,7 @@ require ( golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 - gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 + gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 honnef.co/go/tools v0.5.1 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 diff --git a/go.sum b/go.sum index 2666faf90..4568ffb33 100644 --- a/go.sum +++ b/go.sum @@ -1446,8 +1446,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index ed03f666d..76c8754ac 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -326,6 +326,11 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi if tcpipErr != nil { return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr) } + cubicOpt := tcpip.CongestionControlOption("cubic") + tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &cubicOpt) + if tcpipErr != nil { + return nil, fmt.Errorf("could not set cubic congestion control: %v", tcpipErr) + } err := setTCPBufSizes(ipstack) if err != nil { return nil, err From e1523fe6866bb375be2d245d267c9c2773b5b17d Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 6 Feb 2025 10:51:01 -0800 Subject: [PATCH 0474/1708] cmd/natc: remove speculative tuning from natc These tunings reduced memory usage while the implementation was struggling with earlier bugs, but will no longer be necessary after those bugs are addressed. Depends #14933 Depends #14934 Updates #9707 Updates #10408 Updates tailscale/corp#24483 Updates tailscale/corp#25169 Signed-off-by: James Tucker --- cmd/natc/natc.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 818947a13..956d2455e 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -27,8 +27,6 @@ import ( "github.com/inetaf/tcpproxy" "github.com/peterbourgon/ff/v3" "golang.org/x/net/dns/dnsmessage" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "tailscale.com/client/local" "tailscale.com/envknob" "tailscale.com/hostinfo" @@ -140,26 +138,6 @@ func main() { } // TODO(raggi): this is not a public interface or guarantee. ns := ts.Sys().Netstack.Get().(*netstack.Impl) - tcpRXBufOpt := tcpip.TCPReceiveBufferSizeRangeOption{ - Min: tcp.MinBufferSize, - Default: tcp.DefaultReceiveBufferSize, - Max: tcp.MaxBufferSize, - } - if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRXBufOpt); err != nil { - log.Fatalf("could not set TCP RX buf size: %v", err) - } - tcpTXBufOpt := tcpip.TCPSendBufferSizeRangeOption{ - Min: tcp.MinBufferSize, - Default: tcp.DefaultSendBufferSize, - Max: tcp.MaxBufferSize, - } - if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpTXBufOpt); err != nil { - log.Fatalf("could not set TCP TX buf size: %v", err) - } - mslOpt := tcpip.TCPTimeWaitTimeoutOption(5 * time.Second) - if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &mslOpt); err != nil { - log.Fatalf("could not set TCP MSL: %v", err) - } if *debugPort != 0 { expvar.Publish("netstack", ns.ExpVar()) } From 7b3e5b5df36276567109f6a924d2866d0f85e503 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Thu, 6 Feb 2025 17:21:00 -0500 Subject: [PATCH 0475/1708] wgengine/netstack: respond to service IPs in Linux tun mode When in tun mode on Linux, AllowedIPs are not automatically added to netstack because the kernel is responsible for handling subnet routes. This ensures that virtual IPs are always added to netstack. When in tun mode, pings were also not being handled, so this adds explicit support for ping as well. Fixes tailscale/corp#26387 Change-Id: I6af02848bf2572701288125f247d1eaa6f661107 Signed-off-by: Adrian Dewhurst --- wgengine/netstack/netstack.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 76c8754ac..0bbd20b79 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -635,9 +635,10 @@ var v4broadcast = netaddr.IPv4(255, 255, 255, 255) // address slice views. func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { var selfNode tailcfg.NodeView + var serviceAddrSet set.Set[netip.Addr] if nm != nil { vipServiceIPMap := nm.GetVIPServiceIPMap() - serviceAddrSet := set.Set[netip.Addr]{} + serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) for _, addrs := range vipServiceIPMap { serviceAddrSet.AddSlice(addrs) } @@ -675,6 +676,11 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { } } + for addr := range serviceAddrSet { + p := netip.PrefixFrom(addr, addr.BitLen()) + newPfx[p] = true + } + pfxToAdd := make(map[netip.Prefix]bool) for p := range newPfx { if !oldPfx[p] { @@ -1019,12 +1025,18 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { return true } } - if ns.lb != nil && p.IPProto == ipproto.TCP && isService { - // An assumption holds for this to work: when tun mode is on for a service, - // its tcp and web are not set. This is enforced in b.setServeConfigLocked. - if ns.lb.ShouldInterceptVIPServiceTCPPort(p.Dst) { + if isService { + if p.IsEchoRequest() { return true } + if ns.lb != nil && p.IPProto == ipproto.TCP { + // An assumption holds for this to work: when tun mode is on for a service, + // its tcp and web are not set. This is enforced in b.setServeConfigLocked. + if ns.lb.ShouldInterceptVIPServiceTCPPort(p.Dst) { + return true + } + } + return false } if p.IPVersion == 6 && !isLocal && viaRange.Contains(dstIP) { return ns.lb != nil && ns.lb.ShouldHandleViaIP(dstIP) From 532e38bdc82cd61d0e72cdca89093fd92baa1db2 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Sat, 8 Feb 2025 14:45:41 +0000 Subject: [PATCH 0476/1708] scripts/installer.sh: fix --yes argument for freebsd (#14958) This argument apparently has to be before the package name Updates #14745 Signed-off-by: Erisa A --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 3b76a7b88..22ba12b6b 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -573,7 +573,7 @@ main() { ;; pkg) set -x - $SUDO pkg install tailscale --yes + $SUDO pkg install --yes tailscale $SUDO service tailscaled enable $SUDO service tailscaled start set +x From 122255765a95b3af8b4e382bcad89ac7cccb37ce Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 7 Feb 2025 15:27:31 -0600 Subject: [PATCH 0477/1708] ipn/ipnlocal: fix (*profileManager).DefaultUserProfileID for users other than current Currently, profileManager filters profiles based on their creator/owner and the "current user"'s UID. This causes DefaultUserProfileID(uid) to work incorrectly when the UID doesn't match the current user. While we plan to remove the concept of the "current user" completely, we're not there yet. In this PR, we fix DefaultUserProfileID by updating profileManager to allow checking profile access for a given UID and modifying helper methods to accept UID as a parameter when returning matching profiles. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnlocal/profiles.go | 50 ++++++++++++++++++----------------- ipn/ipnlocal/profiles_test.go | 4 +-- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 858623025..f988f8852 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -97,7 +97,7 @@ func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.Profil } pk := ipn.StateKey(string(b)) - prof := pm.findProfileByKey(pk) + prof := pm.findProfileByKey(uid, pk) if !prof.Valid() { pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk) return "" @@ -108,17 +108,24 @@ func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.Profil // checkProfileAccess returns an [errProfileAccessDenied] if the current user // does not have access to the specified profile. func (pm *profileManager) checkProfileAccess(profile ipn.LoginProfileView) error { - if pm.currentUserID != "" && profile.LocalUserID() != pm.currentUserID { + return pm.checkProfileAccessAs(pm.currentUserID, profile) +} + +// checkProfileAccessAs returns an [errProfileAccessDenied] if the specified user +// does not have access to the specified profile. +func (pm *profileManager) checkProfileAccessAs(uid ipn.WindowsUserID, profile ipn.LoginProfileView) error { + if uid != "" && profile.LocalUserID() != uid { return errProfileAccessDenied } return nil } -// allProfiles returns all profiles accessible to the current user. +// allProfilesFor returns all profiles accessible to the specified user. // The returned profiles are sorted by Name. -func (pm *profileManager) allProfiles() (out []ipn.LoginProfileView) { +func (pm *profileManager) allProfilesFor(uid ipn.WindowsUserID) []ipn.LoginProfileView { + out := make([]ipn.LoginProfileView, 0, len(pm.knownProfiles)) for _, p := range pm.knownProfiles { - if pm.checkProfileAccess(p) == nil { + if pm.checkProfileAccessAs(uid, p) == nil { out = append(out, p) } } @@ -128,10 +135,10 @@ func (pm *profileManager) allProfiles() (out []ipn.LoginProfileView) { return out } -// matchingProfiles is like [profileManager.allProfiles], but returns only profiles +// matchingProfiles is like [profileManager.allProfilesFor], but returns only profiles // matching the given predicate. -func (pm *profileManager) matchingProfiles(f func(ipn.LoginProfileView) bool) (out []ipn.LoginProfileView) { - all := pm.allProfiles() +func (pm *profileManager) matchingProfiles(uid ipn.WindowsUserID, f func(ipn.LoginProfileView) bool) (out []ipn.LoginProfileView) { + all := pm.allProfilesFor(uid) out = all[:0] for _, p := range all { if f(p) { @@ -144,8 +151,8 @@ func (pm *profileManager) matchingProfiles(f func(ipn.LoginProfileView) bool) (o // findMatchingProfiles returns all profiles accessible to the current user // that represent the same node/user as prefs. // The returned profiles are sorted by Name. -func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []ipn.LoginProfileView { - return pm.matchingProfiles(func(p ipn.LoginProfileView) bool { +func (pm *profileManager) findMatchingProfiles(uid ipn.WindowsUserID, prefs ipn.PrefsView) []ipn.LoginProfileView { + return pm.matchingProfiles(uid, func(p ipn.LoginProfileView) bool { return p.ControlURL() == prefs.ControlURL() && (p.UserProfile().ID == prefs.Persist().UserProfile().ID || p.NodeID() == prefs.Persist().NodeID()) @@ -156,16 +163,16 @@ func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []ipn.LoginP // given name. It returns "" if no such profile exists among profiles // accessible to the current user. func (pm *profileManager) ProfileIDForName(name string) ipn.ProfileID { - p := pm.findProfileByName(name) + p := pm.findProfileByName(pm.currentUserID, name) if !p.Valid() { return "" } return p.ID() } -func (pm *profileManager) findProfileByName(name string) ipn.LoginProfileView { - out := pm.matchingProfiles(func(p ipn.LoginProfileView) bool { - return p.Name() == name +func (pm *profileManager) findProfileByName(uid ipn.WindowsUserID, name string) ipn.LoginProfileView { + out := pm.matchingProfiles(uid, func(p ipn.LoginProfileView) bool { + return p.Name() == name && pm.checkProfileAccessAs(uid, p) == nil }) if len(out) == 0 { return ipn.LoginProfileView{} @@ -176,9 +183,9 @@ func (pm *profileManager) findProfileByName(name string) ipn.LoginProfileView { return out[0] } -func (pm *profileManager) findProfileByKey(key ipn.StateKey) ipn.LoginProfileView { - out := pm.matchingProfiles(func(p ipn.LoginProfileView) bool { - return p.Key() == key +func (pm *profileManager) findProfileByKey(uid ipn.WindowsUserID, key ipn.StateKey) ipn.LoginProfileView { + out := pm.matchingProfiles(uid, func(p ipn.LoginProfileView) bool { + return p.Key() == key && pm.checkProfileAccessAs(uid, p) == nil }) if len(out) == 0 { return ipn.LoginProfileView{} @@ -222,7 +229,7 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) } // Check if we already have an existing profile that matches the user/node. - if existing := pm.findMatchingProfiles(prefsIn); len(existing) > 0 { + if existing := pm.findMatchingProfiles(pm.currentUserID, prefsIn); len(existing) > 0 { // We already have a profile for this user/node we should reuse it. Also // cleanup any other duplicate profiles. cp = existing[0] @@ -376,12 +383,7 @@ func (pm *profileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsVie // Profiles returns the list of known profiles accessible to the current user. func (pm *profileManager) Profiles() []ipn.LoginProfileView { - allProfiles := pm.allProfiles() - out := make([]ipn.LoginProfileView, len(allProfiles)) - for i, p := range allProfiles { - out[i] = p - } - return out + return pm.allProfilesFor(pm.currentUserID) } // ProfileByID returns a profile with the given id, if it is accessible to the current user. diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 5c4f1fd4c..33209d24c 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -129,10 +129,10 @@ func TestProfileList(t *testing.T) { pm.SetCurrentUserID("user1") checkProfiles(t, "alice", "bob") - if lp := pm.findProfileByKey(carol.Key()); lp.Valid() { + if lp := pm.findProfileByKey("user1", carol.Key()); lp.Valid() { t.Fatalf("found profile for user2 in user1's profile list") } - if lp := pm.findProfileByName(carol.Name()); lp.Valid() { + if lp := pm.findProfileByName("user1", carol.Name()); lp.Valid() { t.Fatalf("found profile for user2 in user1's profile list") } From 76fe556fcd0153eee719e694d3d97a8884b3097e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 09:58:08 -0700 Subject: [PATCH 0478/1708] .github: Bump github/codeql-action from 3.28.5 to 3.28.9 (#14962) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.5 to 3.28.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4...9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 425175218..a241d3578 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 + uses: github/codeql-action/init@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 + uses: github/codeql-action/autobuild@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 + uses: github/codeql-action/analyze@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 From 11cd98fab0b1f11fbad88ff6cb3e730c9cdc7245 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 10:09:44 -0700 Subject: [PATCH 0479/1708] .github: Bump golangci/golangci-lint-action from 6.2.0 to 6.3.1 (#14963) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.2.0 to 6.3.1. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/ec5d18412c0aeab7936cb16880d708ba2a64e1ae...2e788936b09dd82dc280e845628a40d2ba6b204c) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index b9a9eb33d..176ee5f02 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -31,7 +31,7 @@ jobs: cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 + uses: golangci/golangci-lint-action@2e788936b09dd82dc280e845628a40d2ba6b204c # v6.3.1 with: version: v1.60 From 48dd4bbe21e1b4ade2ca23acce86a115cb0f59ff Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 7 Feb 2025 11:18:57 -0600 Subject: [PATCH 0480/1708] ipn/ipn{local,server}: remove ResetForClientDisconnect in favor of SetCurrentUser(nil) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There’s (*LocalBackend).ResetForClientDisconnect, and there’s also (*LocalBackend).resetForProfileChangeLockedOnEntry. Both methods essentially did the same thing but in slightly different ways. For example, resetForProfileChangeLockedOnEntry didn’t reset the control client until (*LocalBackend).Start() was called at the very end and didn’t reset the keyExpired flag, while ResetForClientDisconnect didn’t reinitialize TKA. Since SetCurrentUser can be called with a nil argument to reset the currently connected user and internally calls resetForProfileChangeLockedOnEntry, we can remove ResetForClientDisconnect and let SetCurrentUser and resetForProfileChangeLockedOnEntry handle it. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 41 ++++++----------------------------------- ipn/ipnserver/server.go | 2 +- 2 files changed, 7 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 38bcfaaa2..811b978f7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5615,41 +5615,6 @@ func (b *LocalBackend) resetAuthURLLocked() { b.authActor = nil } -// ResetForClientDisconnect resets the backend for GUI clients running -// in interactive (non-headless) mode. This is currently used only by -// Windows. This causes all state to be cleared, lest an unrelated user -// connect to tailscaled next. But it does not trigger a logout; we -// don't want to the user to have to reauthenticate in the future -// when they restart the GUI. -func (b *LocalBackend) ResetForClientDisconnect() { - b.logf("LocalBackend.ResetForClientDisconnect") - - unlock := b.lockAndGetUnlock() - defer unlock() - - prevCC := b.resetControlClientLocked() - if prevCC != nil { - // Needs to happen without b.mu held. - defer prevCC.Shutdown() - } - - b.setNetMapLocked(nil) - b.pm.Reset() - if b.currentUser != nil { - if c, ok := b.currentUser.(ipnauth.ActorCloser); ok { - c.Close() - } - b.currentUser = nil - } - b.keyExpired = false - b.resetAuthURLLocked() - b.activeLogin = "" - b.resetDialPlan() - b.resetAlwaysOnOverrideLocked() - b.setAtomicValuesFromPrefsLocked(ipn.PrefsView{}) - b.enterStateLockedOnEntry(ipn.Stopped, unlock) -} - func (b *LocalBackend) ShouldRunSSH() bool { return b.sshAtomicBool.Load() && envknob.CanSSHD() } // ShouldRunWebClient reports whether the web client is being run @@ -7178,13 +7143,19 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.setNetMapLocked(nil) // Reset netmap. // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) + if prevCC := b.resetControlClientLocked(); prevCC != nil { + // Needs to happen without b.mu held. + defer prevCC.Shutdown() + } if err := b.initTKALocked(); err != nil { return err } b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} b.lastSuggestedExitNode = "" + b.keyExpired = false b.resetAlwaysOnOverrideLocked() + b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu b.health.SetLocalLogConfigHealth(nil) return b.Start(ipn.Options{}) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 7bc2c7b3e..436b8404d 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -434,7 +434,7 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o s.logf("client disconnected; staying alive in server mode") } else { s.logf("client disconnected; stopping server") - lb.ResetForClientDisconnect() + lb.SetCurrentUser(nil) } } From 1047d11102b27fd64383d3c9ebd96cefe0a7580a Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 10 Feb 2025 19:03:07 -0800 Subject: [PATCH 0481/1708] go.toolchain.rev: bump to Go 1.23.6 (#14976) Updates #cleanup Signed-off-by: Andrew Lytvynov --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 900450dca..963e8a28e 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -64f7854906c3121fe3ada3d05f1936d3420d6ffa +65c3f5f3fc9d96f56a37a79cad4ebbd7ff985801 From 9706c9f4ffb8637670e3d2e152607c23be621a41 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 24 Jan 2025 19:41:30 -0800 Subject: [PATCH 0482/1708] types/netmap,*: pass around UserProfiles as views (pointers) instead Smaller. Updates tailscale/corp#26058 (@andrew-d noticed during this) Change-Id: Id33cddd171aaf8f042073b6d3c183b0a746e9931 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 4 +++- control/controlclient/map.go | 8 ++++---- ipn/ipnlocal/local.go | 25 +++++++++++++++++++------ ipn/ipnlocal/local_test.go | 16 ++++++++-------- ipn/ipnlocal/serve_test.go | 12 ++++++------ ipn/ipnstate/ipnstate.go | 4 ++-- types/netmap/netmap.go | 11 +++++++++-- 7 files changed, 51 insertions(+), 29 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index f327ecc2a..883a1a587 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1003,7 +1003,9 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap if persist == c.persist { newPersist := persist.AsStruct() newPersist.NodeID = nm.SelfNode.StableID() - newPersist.UserProfile = nm.UserProfiles[nm.User()] + if up, ok := nm.UserProfiles[nm.User()]; ok { + newPersist.UserProfile = *up.AsStruct() + } c.persist = newPersist.View() persist = c.persist diff --git a/control/controlclient/map.go b/control/controlclient/map.go index f0a11bdf1..d4283e490 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -77,7 +77,7 @@ type mapSession struct { peers map[tailcfg.NodeID]tailcfg.NodeView lastDNSConfig *tailcfg.DNSConfig lastDERPMap *tailcfg.DERPMap - lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile + lastUserProfile map[tailcfg.UserID]tailcfg.UserProfileView lastPacketFilterRules views.Slice[tailcfg.FilterRule] // concatenation of all namedPacketFilters namedPacketFilters map[string]views.Slice[tailcfg.FilterRule] lastParsedPacketFilter []filter.Match @@ -104,7 +104,7 @@ func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnob privateNodeKey: privateNodeKey, publicNodeKey: privateNodeKey.Public(), lastDNSConfig: new(tailcfg.DNSConfig), - lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{}, + lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfileView{}, // Non-nil no-op defaults, to be optionally overridden by the caller. logf: logger.Discard, @@ -294,7 +294,7 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { } for _, up := range resp.UserProfiles { - ms.lastUserProfile[up.ID] = up + ms.lastUserProfile[up.ID] = up.View() } if dm := resp.DERPMap; dm != nil { @@ -837,7 +837,7 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { PrivateKey: ms.privateNodeKey, MachineKey: ms.machinePubKey, Peers: peerViews, - UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile), + UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfileView), Domain: ms.lastDomain, DomainAuditLogID: ms.lastDomainAuditLogID, DNS: *ms.lastDNSConfig, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 811b978f7..c24bcbb7b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1305,6 +1305,18 @@ func peerStatusFromNode(ps *ipnstate.PeerStatus, n tailcfg.NodeView) { } } +func profileFromView(v tailcfg.UserProfileView) tailcfg.UserProfile { + if v.Valid() { + return tailcfg.UserProfile{ + ID: v.ID(), + LoginName: v.LoginName(), + DisplayName: v.DisplayName(), + ProfilePicURL: v.ProfilePicURL(), + } + } + return tailcfg.UserProfile{} +} + // WhoIsNodeKey returns the peer info of given public key, if it exists. func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { b.mu.Lock() @@ -1314,11 +1326,12 @@ func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tai return n, u, false } if self := b.netMap.SelfNode; self.Valid() && self.Key() == k { - return self, b.netMap.UserProfiles[self.User()], true + return self, profileFromView(b.netMap.UserProfiles[self.User()]), true } for _, n := range b.peers { if n.Key() == k { - u, ok = b.netMap.UserProfiles[n.User()] + up, ok := b.netMap.UserProfiles[n.User()] + u = profileFromView(up) return n, u, ok } } @@ -1388,11 +1401,11 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi } n = b.netMap.SelfNode } - u, ok = b.netMap.UserProfiles[n.User()] + up, ok := b.netMap.UserProfiles[n.User()] if !ok { return failf("no userprofile for node %v", n.Key()) } - return n, u, true + return n, profileFromView(up), true } // PeerCaps returns the capabilities that remote src IP has to @@ -4193,7 +4206,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } } if netMap != nil { - newProfile := netMap.UserProfiles[netMap.User()] + newProfile := profileFromView(netMap.UserProfiles[netMap.User()]) if newLoginName := newProfile.LoginName; newLoginName != "" { if !oldp.Persist().Valid() { b.logf("active login: %s", newLoginName) @@ -5803,7 +5816,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } var login string if nm != nil { - login = cmp.Or(nm.UserProfiles[nm.User()].LoginName, "") + login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") } b.netMap = nm b.updatePeersFromNetmapLocked(nm) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index dfc2e45bd..f0c712777 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1052,13 +1052,13 @@ func TestWhoIs(t *testing.T) { Addresses: []netip.Prefix{netip.MustParsePrefix("100.200.200.200/32")}, }).View(), }, - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{ - 10: { + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + 10: (&tailcfg.UserProfile{ DisplayName: "Myself", - }, - 20: { + }).View(), + 20: (&tailcfg.UserProfile{ DisplayName: "Peer", - }, + }).View(), }, }) tests := []struct { @@ -2754,12 +2754,12 @@ func TestTCPHandlerForDstWithVIPService(t *testing.T) { tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{tailcfg.RawMessage(svcIPMapJSON)}, }, }).View(), - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{ - tailcfg.UserID(1): { + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + tailcfg.UserID(1): (&tailcfg.UserProfile{ LoginName: "someone@example.com", DisplayName: "Some One", ProfilePicURL: "https://example.com/photo.jpg", - }, + }).View(), }, }, ) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 7f457e560..3c028c65e 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -327,12 +327,12 @@ func TestServeConfigServices(t *testing.T) { tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{tailcfg.RawMessage(svcIPMapJSON)}, }, }).View(), - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{ - tailcfg.UserID(1): { + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + tailcfg.UserID(1): (&tailcfg.UserProfile{ LoginName: "someone@example.com", DisplayName: "Some One", ProfilePicURL: "https://example.com/photo.jpg", - }, + }).View(), }, } @@ -905,12 +905,12 @@ func newTestBackend(t *testing.T) *LocalBackend { SelfNode: (&tailcfg.Node{ Name: "example.ts.net", }).View(), - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{ - tailcfg.UserID(1): { + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + tailcfg.UserID(1): (&tailcfg.UserProfile{ LoginName: "someone@example.com", DisplayName: "Some One", ProfilePicURL: "https://example.com/photo.jpg", - }, + }).View(), }, } b.peers = map[tailcfg.NodeID]tailcfg.NodeView{ diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 37ab47714..5ab9b5bdf 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -367,7 +367,7 @@ func (sb *StatusBuilder) MutateSelfStatus(f func(*PeerStatus)) { } // AddUser adds a user profile to the status. -func (sb *StatusBuilder) AddUser(id tailcfg.UserID, up tailcfg.UserProfile) { +func (sb *StatusBuilder) AddUser(id tailcfg.UserID, up tailcfg.UserProfileView) { if sb.locked { log.Printf("[unexpected] ipnstate: AddUser after Locked") return @@ -377,7 +377,7 @@ func (sb *StatusBuilder) AddUser(id tailcfg.UserID, up tailcfg.UserProfile) { sb.st.User = make(map[tailcfg.UserID]tailcfg.UserProfile) } - sb.st.User[id] = up + sb.st.User[id] = *up.AsStruct() } // AddIP adds a Tailscale IP address to the status. diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 051b0f0dc..94db7a477 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -76,7 +76,9 @@ type NetworkMap struct { // If this is empty, then data-plane audit logging is disabled. DomainAuditLogID string - UserProfiles map[tailcfg.UserID]tailcfg.UserProfile + // UserProfiles contains the profile information of UserIDs referenced + // in SelfNode and Peers. + UserProfiles map[tailcfg.UserID]tailcfg.UserProfileView // MaxKeyDuration describes the MaxKeyDuration setting for the tailnet. MaxKeyDuration time.Duration @@ -289,7 +291,12 @@ func (nm *NetworkMap) PeerWithStableID(pid tailcfg.StableNodeID) (_ tailcfg.Node func (nm *NetworkMap) printConciseHeader(buf *strings.Builder) { fmt.Fprintf(buf, "netmap: self: %v auth=%v", nm.NodeKey.ShortString(), nm.GetMachineStatus()) - login := nm.UserProfiles[nm.User()].LoginName + + var login string + up, ok := nm.UserProfiles[nm.User()] + if ok { + login = up.LoginName() + } if login == "" { if nm.User().IsZero() { login = "?" From 2f981978575688086c5618341c0d438314c87666 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 11 Feb 2025 07:26:49 -0800 Subject: [PATCH 0483/1708] tempfork/sshtest/ssh: add fork of golang.org/x/crypto/ssh for testing only This fork golang.org/x/crypto/ssh (at upstream x/crypto git rev e47973b1c1) into tailscale.com/tempfork/sshtest/ssh so we can hack up the client in weird ways to simulate other SSH clients seen in the wild. Two changes were made to the files when they were copied from x/crypto: * internal/poly1305 imports were replaced by the non-internal version; no code changes otherwise. It didn't need the internal one. * all decode-with-passphrase funcs were deleted, to avoid using the internal package x/crypto/ssh/internal/bcrypt_pbkdf Then the tests passed. Updates #14969 Change-Id: Ibf1abebfe608c75fef4da0255314f65e54ce5077 Signed-off-by: Brad Fitzpatrick --- ssh/tailssh/tailssh_test.go | 3 + tempfork/sshtest/README.md | 9 + tempfork/sshtest/ssh/benchmark_test.go | 127 ++ tempfork/sshtest/ssh/buffer.go | 97 + tempfork/sshtest/ssh/buffer_test.go | 87 + tempfork/sshtest/ssh/certs.go | 611 +++++++ tempfork/sshtest/ssh/certs_test.go | 406 ++++ tempfork/sshtest/ssh/channel.go | 645 +++++++ tempfork/sshtest/ssh/cipher.go | 789 ++++++++ tempfork/sshtest/ssh/cipher_test.go | 231 +++ tempfork/sshtest/ssh/client.go | 282 +++ tempfork/sshtest/ssh/client_auth.go | 796 ++++++++ tempfork/sshtest/ssh/client_auth_test.go | 1384 ++++++++++++++ tempfork/sshtest/ssh/client_test.go | 367 ++++ tempfork/sshtest/ssh/common.go | 476 +++++ tempfork/sshtest/ssh/common_test.go | 176 ++ tempfork/sshtest/ssh/connection.go | 143 ++ tempfork/sshtest/ssh/doc.go | 23 + tempfork/sshtest/ssh/example_test.go | 400 ++++ tempfork/sshtest/ssh/handshake.go | 816 +++++++++ tempfork/sshtest/ssh/handshake_test.go | 1021 +++++++++++ tempfork/sshtest/ssh/kex.go | 786 ++++++++ tempfork/sshtest/ssh/kex_test.go | 106 ++ tempfork/sshtest/ssh/keys.go | 1626 +++++++++++++++++ tempfork/sshtest/ssh/keys_test.go | 724 ++++++++ tempfork/sshtest/ssh/mac.go | 68 + tempfork/sshtest/ssh/mempipe_test.go | 124 ++ tempfork/sshtest/ssh/messages.go | 891 +++++++++ tempfork/sshtest/ssh/messages_test.go | 288 +++ tempfork/sshtest/ssh/mux.go | 357 ++++ tempfork/sshtest/ssh/mux_test.go | 839 +++++++++ tempfork/sshtest/ssh/server.go | 933 ++++++++++ .../sshtest/ssh/server_multi_auth_test.go | 412 +++++ tempfork/sshtest/ssh/server_test.go | 478 +++++ tempfork/sshtest/ssh/session.go | 647 +++++++ tempfork/sshtest/ssh/session_test.go | 892 +++++++++ tempfork/sshtest/ssh/ssh_gss.go | 139 ++ tempfork/sshtest/ssh/ssh_gss_test.go | 109 ++ tempfork/sshtest/ssh/streamlocal.go | 116 ++ tempfork/sshtest/ssh/tcpip.go | 509 ++++++ tempfork/sshtest/ssh/tcpip_test.go | 53 + tempfork/sshtest/ssh/testdata_test.go | 63 + tempfork/sshtest/ssh/transport.go | 380 ++++ tempfork/sshtest/ssh/transport_test.go | 113 ++ 44 files changed, 19542 insertions(+) create mode 100644 tempfork/sshtest/README.md create mode 100644 tempfork/sshtest/ssh/benchmark_test.go create mode 100644 tempfork/sshtest/ssh/buffer.go create mode 100644 tempfork/sshtest/ssh/buffer_test.go create mode 100644 tempfork/sshtest/ssh/certs.go create mode 100644 tempfork/sshtest/ssh/certs_test.go create mode 100644 tempfork/sshtest/ssh/channel.go create mode 100644 tempfork/sshtest/ssh/cipher.go create mode 100644 tempfork/sshtest/ssh/cipher_test.go create mode 100644 tempfork/sshtest/ssh/client.go create mode 100644 tempfork/sshtest/ssh/client_auth.go create mode 100644 tempfork/sshtest/ssh/client_auth_test.go create mode 100644 tempfork/sshtest/ssh/client_test.go create mode 100644 tempfork/sshtest/ssh/common.go create mode 100644 tempfork/sshtest/ssh/common_test.go create mode 100644 tempfork/sshtest/ssh/connection.go create mode 100644 tempfork/sshtest/ssh/doc.go create mode 100644 tempfork/sshtest/ssh/example_test.go create mode 100644 tempfork/sshtest/ssh/handshake.go create mode 100644 tempfork/sshtest/ssh/handshake_test.go create mode 100644 tempfork/sshtest/ssh/kex.go create mode 100644 tempfork/sshtest/ssh/kex_test.go create mode 100644 tempfork/sshtest/ssh/keys.go create mode 100644 tempfork/sshtest/ssh/keys_test.go create mode 100644 tempfork/sshtest/ssh/mac.go create mode 100644 tempfork/sshtest/ssh/mempipe_test.go create mode 100644 tempfork/sshtest/ssh/messages.go create mode 100644 tempfork/sshtest/ssh/messages_test.go create mode 100644 tempfork/sshtest/ssh/mux.go create mode 100644 tempfork/sshtest/ssh/mux_test.go create mode 100644 tempfork/sshtest/ssh/server.go create mode 100644 tempfork/sshtest/ssh/server_multi_auth_test.go create mode 100644 tempfork/sshtest/ssh/server_test.go create mode 100644 tempfork/sshtest/ssh/session.go create mode 100644 tempfork/sshtest/ssh/session_test.go create mode 100644 tempfork/sshtest/ssh/ssh_gss.go create mode 100644 tempfork/sshtest/ssh/ssh_gss_test.go create mode 100644 tempfork/sshtest/ssh/streamlocal.go create mode 100644 tempfork/sshtest/ssh/tcpip.go create mode 100644 tempfork/sshtest/ssh/tcpip_test.go create mode 100644 tempfork/sshtest/ssh/testdata_test.go create mode 100644 tempfork/sshtest/ssh/transport.go create mode 100644 tempfork/sshtest/ssh/transport_test.go diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 207136659..d22dfe443 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -41,6 +41,7 @@ import ( "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tempfork/gliderlabs/ssh" + sshtest "tailscale.com/tempfork/sshtest/ssh" "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/key" @@ -55,6 +56,8 @@ import ( "tailscale.com/wgengine" ) +type _ = sshtest.Client // TODO(bradfitz,percy): sshtest; delete this line + func TestMatchRule(t *testing.T) { someAction := new(tailcfg.SSHAction) tests := []struct { diff --git a/tempfork/sshtest/README.md b/tempfork/sshtest/README.md new file mode 100644 index 000000000..30c74f525 --- /dev/null +++ b/tempfork/sshtest/README.md @@ -0,0 +1,9 @@ +# sshtest + +This contains packages that are forked & locally hacked up for use +in tests. + +Notably, `golang.org/x/crypto/ssh` was copied to +`tailscale.com/tempfork/sshtest/ssh` to permit adding behaviors specific +to testing (for testing Tailscale SSH) that aren't necessarily desirable +to have upstream. diff --git a/tempfork/sshtest/ssh/benchmark_test.go b/tempfork/sshtest/ssh/benchmark_test.go new file mode 100644 index 000000000..b356330b4 --- /dev/null +++ b/tempfork/sshtest/ssh/benchmark_test.go @@ -0,0 +1,127 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "net" + "testing" +) + +type server struct { + *ServerConn + chans <-chan NewChannel +} + +func newServer(c net.Conn, conf *ServerConfig) (*server, error) { + sconn, chans, reqs, err := NewServerConn(c, conf) + if err != nil { + return nil, err + } + go DiscardRequests(reqs) + return &server{sconn, chans}, nil +} + +func (s *server) Accept() (NewChannel, error) { + n, ok := <-s.chans + if !ok { + return nil, io.EOF + } + return n, nil +} + +func sshPipe() (Conn, *server, error) { + c1, c2, err := netPipe() + if err != nil { + return nil, nil, err + } + + clientConf := ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + } + serverConf := ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + done := make(chan *server, 1) + go func() { + server, err := newServer(c2, &serverConf) + if err != nil { + done <- nil + } + done <- server + }() + + client, _, reqs, err := NewClientConn(c1, "", &clientConf) + if err != nil { + return nil, nil, err + } + + server := <-done + if server == nil { + return nil, nil, errors.New("server handshake failed.") + } + go DiscardRequests(reqs) + + return client, server, nil +} + +func BenchmarkEndToEnd(b *testing.B) { + b.StopTimer() + + client, server, err := sshPipe() + if err != nil { + b.Fatalf("sshPipe: %v", err) + } + + defer client.Close() + defer server.Close() + + size := (1 << 20) + input := make([]byte, size) + output := make([]byte, size) + b.SetBytes(int64(size)) + done := make(chan int, 1) + + go func() { + newCh, err := server.Accept() + if err != nil { + panic(fmt.Sprintf("Client: %v", err)) + } + ch, incoming, err := newCh.Accept() + if err != nil { + panic(fmt.Sprintf("Accept: %v", err)) + } + go DiscardRequests(incoming) + for i := 0; i < b.N; i++ { + if _, err := io.ReadFull(ch, output); err != nil { + panic(fmt.Sprintf("ReadFull: %v", err)) + } + } + ch.Close() + done <- 1 + }() + + ch, in, err := client.OpenChannel("speed", nil) + if err != nil { + b.Fatalf("OpenChannel: %v", err) + } + go DiscardRequests(in) + + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := ch.Write(input); err != nil { + b.Fatalf("WriteFull: %v", err) + } + } + ch.Close() + b.StopTimer() + + <-done +} diff --git a/tempfork/sshtest/ssh/buffer.go b/tempfork/sshtest/ssh/buffer.go new file mode 100644 index 000000000..1ab07d078 --- /dev/null +++ b/tempfork/sshtest/ssh/buffer.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/tempfork/sshtest/ssh/buffer_test.go b/tempfork/sshtest/ssh/buffer_test.go new file mode 100644 index 000000000..d5781cb3d --- /dev/null +++ b/tempfork/sshtest/ssh/buffer_test.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "testing" +) + +var alphabet = []byte("abcdefghijklmnopqrstuvwxyz") + +func TestBufferReadwrite(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + r, _ := b.Read(make([]byte, 10)) + if r != 10 { + t.Fatalf("Expected written == read == 10, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + r, _ = b.Read(make([]byte, 10)) + if r != 5 { + t.Fatalf("Expected written == read == 5, written: 5, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:10]) + r, _ = b.Read(make([]byte, 5)) + if r != 5 { + t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + b.write(alphabet[5:15]) + r, _ = b.Read(make([]byte, 10)) + r2, _ := b.Read(make([]byte, 10)) + if r != 10 || r2 != 5 || 15 != r+r2 { + t.Fatal("Expected written == read == 15") + } +} + +func TestBufferClose(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + b.eof() + _, err := b.Read(make([]byte, 5)) + if err != nil { + t.Fatal("expected read of 5 to not return EOF") + } + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err := b.Read(make([]byte, 5)) + r2, err2 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || err != nil || err2 != nil { + t.Fatal("expected reads of 5 and 5") + } + + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err = b.Read(make([]byte, 5)) + r2, err2 = b.Read(make([]byte, 10)) + r3, err3 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF { + t.Fatal("expected reads of 5 and 5 and 0, with EOF") + } + + b = newBuffer() + b.write(make([]byte, 5)) + b.write(make([]byte, 10)) + b.eof() + r, err = b.Read(make([]byte, 9)) + r2, err2 = b.Read(make([]byte, 3)) + r3, err3 = b.Read(make([]byte, 3)) + r4, err4 := b.Read(make([]byte, 10)) + if err != nil || err2 != nil || err3 != nil || err4 != io.EOF { + t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4) + } + if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 { + t.Fatal("Expected written == read == 15", r, r2, r3, r4) + } +} diff --git a/tempfork/sshtest/ssh/certs.go b/tempfork/sshtest/ssh/certs.go new file mode 100644 index 000000000..27d0e14aa --- /dev/null +++ b/tempfork/sshtest/ssh/certs.go @@ -0,0 +1,611 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear +// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. +// Unlike key algorithm names, these are not passed to AlgorithmSigner nor +// returned by MultiAlgorithmSigner and don't appear in the Signature.Format +// field. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" + CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + + // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a + // Certificate.Type (or PublicKey.Type), but only in + // ClientConfig.HostKeyAlgorithms. + CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" +) + +const ( + // Deprecated: use CertAlgoRSAv01. + CertSigAlgoRSAv01 = CertAlgoRSAv01 + // Deprecated: use CertAlgoRSASHA256v01. + CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 + // Deprecated: use CertAlgoRSASHA512v01. + CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte + Rest []byte `ssh:"rest"` +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the +// PublicKey interface, so it can be unmarshaled using +// ParsePublicKey. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +type algorithmOpenSSHCertSigner struct { + *openSSHCertSigner + algorithmSigner AlgorithmSigner +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { + return nil, errors.New("ssh: signer and cert have different public key") + } + + switch s := signer.(type) { + case MultiAlgorithmSigner: + return &multiAlgorithmSigner{ + AlgorithmSigner: &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, s}, + supportedAlgorithms: s.Algorithms(), + }, nil + case AlgorithmSigner: + return &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, s}, nil + default: + return &openSSHCertSigner{cert, signer}, nil + } +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsUserAuthority should return true if the key is recognized as an + // authority for the given user certificate. This allows for + // certificates to be signed by other certificates. This must be set + // if this CertChecker will be checking user certificates. + IsUserAuthority func(auth PublicKey) bool + + // IsHostAuthority should report whether the key is recognized as + // an authority for this host. This allows for certificates to be + // signed by other keys, and for those other keys to only be valid + // signers for particular hostnames. This must be set if this + // CertChecker will be checking host certificates. + IsHostAuthority func(auth PublicKey, address string) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback HostKeyCallback + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + if !c.IsHostAuthority(cert.SignatureKey, addr) { + return fmt.Errorf("ssh: no authorities for hostname: %v", addr) + } + + hostname, _, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + // Pass hostname only as principal for host certificates (consistent with OpenSSH) + return c.CheckCert(hostname, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + if !c.IsUserAuthority(cert.SignatureKey) { + return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) + } + + for opt := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert signs the certificate with an authority, setting the Nonce, +// SignatureKey, and Signature fields. If the authority implements the +// MultiAlgorithmSigner interface the first algorithm in the list is used. This +// is useful if you want to sign with a specific algorithm. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + if v, ok := authority.(MultiAlgorithmSigner); ok { + if len(v.Algorithms()) == 0 { + return errors.New("the provided authority has no signature algorithm") + } + // Use the first algorithm in the list. + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), v.Algorithms()[0]) + if err != nil { + return err + } + c.Signature = sig + return nil + } else if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + // TODO: consider using KeyAlgoRSASHA256 as default. + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) + if err != nil { + return err + } + c.Signature = sig + return nil + } + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in agent/client.go. +var certKeyAlgoNames = map[string]string{ + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + CertAlgoDSAv01: KeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo +} + +// certificateAlgo returns the certificate algorithms that uses the provided +// underlying signature algorithm. +func certificateAlgo(algo string) (certAlgo string, ok bool) { + for certName, algoName := range certKeyAlgoNames { + if algoName == algo { + return certName, true + } + } + return "", false +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the certificate algorithm name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + certName, ok := certificateAlgo(c.Key.Type()) + if !ok { + panic("unknown certificate type for key type " + c.Key.Type()) + } + return certName +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + switch out.Format { + case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: + out.Rest = in + return out, nil, ok + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/tempfork/sshtest/ssh/certs_test.go b/tempfork/sshtest/ssh/certs_test.go new file mode 100644 index 000000000..6208bb37a --- /dev/null +++ b/tempfork/sshtest/ssh/certs_test.go @@ -0,0 +1,406 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "fmt" + "io" + "net" + "reflect" + "testing" + "time" + + "golang.org/x/crypto/ssh/testdata" +) + +func TestParseCert(t *testing.T) { + authKeyBytes := bytes.TrimSuffix(testdata.SSHCertificates["rsa"], []byte(" host.example.com\n")) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + + if _, ok := key.(*Certificate); !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3 +// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub +// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN +// Critical Options: +// +// force-command /bin/sleep +// source-address 192.168.1.0/24 +// +// Extensions: +// +// permit-X11-forwarding +// permit-agent-forwarding +// permit-port-forwarding +// permit-pty +// permit-user-rc +const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ` + +func TestParseCertWithOptions(t *testing.T) { + opts := map[string]string{ + "source-address": "192.168.1.0/24", + "force-command": "/bin/sleep", + } + exts := map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + } + authKeyBytes := []byte(exampleSSHCertWithOptions) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + cert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + if !reflect.DeepEqual(cert.CriticalOptions, opts) { + t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts) + } + if !reflect.DeepEqual(cert.Extensions, exts) { + t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts) + } + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +func TestValidateCert(t *testing.T) { + key, _, _, _, err := ParseAuthorizedKey(testdata.SSHCertificates["rsa-user-testcertificate"]) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + validCert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + checker := CertChecker{} + checker.IsUserAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal()) + } + + if err := checker.CheckCert("testcertificate", validCert); err != nil { + t.Errorf("Unable to validate certificate: %v", err) + } + invalidCert := &Certificate{ + Key: testPublicKeys["rsa"], + SignatureKey: testPublicKeys["ecdsa"], + ValidBefore: CertTimeInfinity, + Signature: &Signature{}, + } + if err := checker.CheckCert("testcertificate", invalidCert); err == nil { + t.Error("Invalid cert signature passed validation") + } +} + +func TestValidateCertTime(t *testing.T) { + cert := Certificate{ + ValidPrincipals: []string{"user"}, + Key: testPublicKeys["rsa"], + ValidAfter: 50, + ValidBefore: 100, + } + + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + for ts, ok := range map[int64]bool{ + 25: false, + 50: true, + 99: true, + 100: false, + 125: false, + } { + checker := CertChecker{ + Clock: func() time.Time { return time.Unix(ts, 0) }, + } + checker.IsUserAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), + testPublicKeys["ecdsa"].Marshal()) + } + + if v := checker.CheckCert("user", &cert); (v == nil) != ok { + t.Errorf("Authenticate(%d): %v", ts, v) + } + } +} + +// TODO(hanwen): tests for +// +// host keys: +// * fallbacks + +func TestHostKeyCert(t *testing.T) { + cert := &Certificate{ + ValidPrincipals: []string{"hostname", "hostname.domain", "otherhost"}, + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: HostCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + checker := &CertChecker{ + IsHostAuthority: func(p PublicKey, addr string) bool { + return addr == "hostname:22" && bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal()) + }, + } + + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Errorf("NewCertSigner: %v", err) + } + + for _, test := range []struct { + addr string + succeed bool + certSignerAlgorithms []string // Empty means no algorithm restrictions. + clientHostKeyAlgorithms []string + }{ + {addr: "hostname:22", succeed: true}, + { + addr: "hostname:22", + succeed: true, + certSignerAlgorithms: []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}, + clientHostKeyAlgorithms: []string{CertAlgoRSASHA512v01}, + }, + { + addr: "hostname:22", + succeed: false, + certSignerAlgorithms: []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}, + clientHostKeyAlgorithms: []string{CertAlgoRSAv01}, + }, + { + addr: "hostname:22", + succeed: false, + certSignerAlgorithms: []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}, + clientHostKeyAlgorithms: []string{KeyAlgoRSASHA512}, // Not a certificate algorithm. + }, + {addr: "otherhost:22", succeed: false}, // The certificate is valid for 'otherhost' as hostname, but we only recognize the authority of the signer for the address 'hostname:22' + {addr: "lasthost:22", succeed: false}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + errc := make(chan error) + + go func() { + conf := ServerConfig{ + NoClientAuth: true, + } + if len(test.certSignerAlgorithms) > 0 { + mas, err := NewSignerWithAlgorithms(certSigner.(AlgorithmSigner), test.certSignerAlgorithms) + if err != nil { + errc <- err + return + } + conf.AddHostKey(mas) + } else { + conf.AddHostKey(certSigner) + } + _, _, _, err := NewServerConn(c1, &conf) + errc <- err + }() + + config := &ClientConfig{ + User: "user", + HostKeyCallback: checker.CheckHostKey, + HostKeyAlgorithms: test.clientHostKeyAlgorithms, + } + _, _, _, err = NewClientConn(c2, test.addr, config) + + if (err == nil) != test.succeed { + t.Errorf("NewClientConn(%q): %v", test.addr, err) + } + + err = <-errc + if (err == nil) != test.succeed { + t.Errorf("NewServerConn(%q): %v", test.addr, err) + } + } +} + +type legacyRSASigner struct { + Signer +} + +func (s *legacyRSASigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + v, ok := s.Signer.(AlgorithmSigner) + if !ok { + return nil, fmt.Errorf("invalid signer") + } + return v.SignWithAlgorithm(rand, data, KeyAlgoRSA) +} + +func TestCertTypes(t *testing.T) { + algorithmSigner, ok := testSigners["rsa"].(AlgorithmSigner) + if !ok { + t.Fatal("rsa test signer does not implement the AlgorithmSigner interface") + } + multiAlgoSignerSHA256, err := NewSignerWithAlgorithms(algorithmSigner, []string{KeyAlgoRSASHA256}) + if err != nil { + t.Fatalf("unable to create multi algorithm signer SHA256: %v", err) + } + // Algorithms are in order of preference, we expect rsa-sha2-512 to be used. + multiAlgoSignerSHA512, err := NewSignerWithAlgorithms(algorithmSigner, []string{KeyAlgoRSASHA512, KeyAlgoRSASHA256}) + if err != nil { + t.Fatalf("unable to create multi algorithm signer SHA512: %v", err) + } + + var testVars = []struct { + name string + signer Signer + algo string + }{ + {CertAlgoECDSA256v01, testSigners["ecdsap256"], ""}, + {CertAlgoECDSA384v01, testSigners["ecdsap384"], ""}, + {CertAlgoECDSA521v01, testSigners["ecdsap521"], ""}, + {CertAlgoED25519v01, testSigners["ed25519"], ""}, + {CertAlgoRSAv01, testSigners["rsa"], KeyAlgoRSASHA256}, + {"legacyRSASigner", &legacyRSASigner{testSigners["rsa"]}, KeyAlgoRSA}, + {"multiAlgoRSASignerSHA256", multiAlgoSignerSHA256, KeyAlgoRSASHA256}, + {"multiAlgoRSASignerSHA512", multiAlgoSignerSHA512, KeyAlgoRSASHA512}, + {CertAlgoDSAv01, testSigners["dsa"], ""}, + } + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("error generating host key: %v", err) + } + + signer, err := NewSignerFromKey(k) + if err != nil { + t.Fatalf("error generating signer for ssh listener: %v", err) + } + + conf := &ServerConfig{ + PublicKeyCallback: func(c ConnMetadata, k PublicKey) (*Permissions, error) { + return new(Permissions), nil + }, + } + conf.AddHostKey(signer) + + for _, m := range testVars { + t.Run(m.name, func(t *testing.T) { + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, conf) + + priv := m.signer + if err != nil { + t.Fatalf("error generating ssh pubkey: %v", err) + } + + cert := &Certificate{ + CertType: UserCert, + Key: priv.PublicKey(), + } + cert.SignCert(rand.Reader, priv) + + certSigner, err := NewCertSigner(cert, priv) + if err != nil { + t.Fatalf("error generating cert signer: %v", err) + } + + if m.algo != "" && cert.Signature.Format != m.algo { + t.Errorf("expected %q signature format, got %q", m.algo, cert.Signature.Format) + } + + config := &ClientConfig{ + User: "user", + HostKeyCallback: func(h string, r net.Addr, k PublicKey) error { return nil }, + Auth: []AuthMethod{PublicKeys(certSigner)}, + } + + _, _, _, err = NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("error connecting: %v", err) + } + }) + } +} + +func TestCertSignWithMultiAlgorithmSigner(t *testing.T) { + type testcase struct { + sigAlgo string + algorithms []string + } + cases := []testcase{ + { + sigAlgo: KeyAlgoRSA, + algorithms: []string{KeyAlgoRSA, KeyAlgoRSASHA512}, + }, + { + sigAlgo: KeyAlgoRSASHA256, + algorithms: []string{KeyAlgoRSASHA256, KeyAlgoRSA, KeyAlgoRSASHA512}, + }, + { + sigAlgo: KeyAlgoRSASHA512, + algorithms: []string{KeyAlgoRSASHA512, KeyAlgoRSASHA256}, + }, + } + + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + + for _, c := range cases { + t.Run(c.sigAlgo, func(t *testing.T) { + signer, err := NewSignerWithAlgorithms(testSigners["rsa"].(AlgorithmSigner), c.algorithms) + if err != nil { + t.Fatalf("NewSignerWithAlgorithms error: %v", err) + } + if err := cert.SignCert(rand.Reader, signer); err != nil { + t.Fatalf("SignCert error: %v", err) + } + if cert.Signature.Format != c.sigAlgo { + t.Fatalf("got signature format %q, want %q", cert.Signature.Format, c.sigAlgo) + } + }) + } +} diff --git a/tempfork/sshtest/ssh/channel.go b/tempfork/sshtest/ssh/channel.go new file mode 100644 index 000000000..cc0bb7ab6 --- /dev/null +++ b/tempfork/sshtest/ssh/channel.go @@ -0,0 +1,645 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window, and myConsumed, + // the number of bytes consumed since we last increased myWindow + windowMu sync.Mutex + myWindow uint32 + myConsumed uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (ch *channel) writePacket(packet []byte) error { + ch.writeMu.Lock() + if ch.sentClose { + ch.writeMu.Unlock() + return io.EOF + } + ch.sentClose = (packet[0] == msgChannelClose) + err := ch.mux.conn.writePacket(packet) + ch.writeMu.Unlock() + return err +} + +func (ch *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], ch.remoteId) + return ch.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if ch.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + ch.writeMu.Lock() + packet := ch.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + ch.writeMu.Unlock() + + for len(data) > 0 { + space := min(ch.maxRemotePayload, len(data)) + if space, err = ch.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], ch.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = ch.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + ch.writeMu.Lock() + ch.packetPool[extendedCode] = packet + ch.writeMu.Unlock() + + return n, err +} + +func (ch *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > ch.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + ch.windowMu.Lock() + if ch.myWindow < length { + ch.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + ch.myWindow -= length + ch.windowMu.Unlock() + + if extended == 1 { + ch.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + ch.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(adj uint32) error { + c.windowMu.Lock() + // Since myConsumed and myWindow are managed on our side, and can never + // exceed the initial window setting, we don't worry about overflow. + c.myConsumed += adj + var sendAdj uint32 + if (channelWindowSize-c.myWindow > 3*c.maxIncomingPayload) || + (c.myWindow < channelWindowSize/2) { + sendAdj = c.myConsumed + c.myConsumed = 0 + c.myWindow += sendAdj + } + c.windowMu.Unlock() + if sendAdj == 0 { + return nil + } + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: sendAdj, + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (ch *channel) responseMessageReceived() error { + if ch.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if ch.decided { + return errors.New("ssh: duplicate response received for channel") + } + ch.decided = true + return nil +} + +func (ch *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return ch.handleData(packet) + case msgChannelClose: + ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) + ch.mux.chanList.remove(ch.localId) + ch.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + ch.extPending.eof() + ch.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + ch.mux.chanList.remove(msg.PeersID) + ch.msg <- msg + case *channelOpenConfirmMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + ch.remoteId = msg.MyID + ch.maxRemotePayload = msg.MaxPacketSize + ch.remoteWin.add(msg.MyWindow) + ch.msg <- msg + case *windowAdjustMsg: + if !ch.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: ch, + } + + ch.incomingRequests <- &req + default: + ch.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (ch *channel) Accept() (Channel, <-chan *Request, error) { + if ch.decided { + return nil, nil, errDecidedAlready + } + ch.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersID: ch.remoteId, + MyID: ch.localId, + MyWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + } + ch.decided = true + if err := ch.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersID: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersID: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersID: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersID: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersID: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersID: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/tempfork/sshtest/ssh/cipher.go b/tempfork/sshtest/ssh/cipher.go new file mode 100644 index 000000000..0533786f4 --- /dev/null +++ b/tempfork/sshtest/ssh/cipher.go @@ -0,0 +1,789 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/poly1305" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type cipherMode struct { + keySize int + ivSize int + create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) +} + +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + stream, err := createFunc(key, iv) + if err != nil { + return nil, err + } + + var streamDump []byte + if skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + mac := macModes[algs.MAC].new(macKey) + return &streamPacketCipher{ + mac: mac, + etm: macModes[algs.MAC].etm, + macResult: make([]byte, mac.Size()), + cipher: stream, + }, nil + } +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*cipherMode{ + // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + + // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, + "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC 4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, streamCipherMode(0, newRC4)}, + + // AEAD ciphers + gcm128CipherID: {16, 12, newGCMCipher}, + gcm256CipherID: {32, 12, newGCMCipher}, + chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, + + // CBC mode is insecure and so is not included in the default config. + // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, + + // 3des-cbc is insecure and is not included in the default + // config. + tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readCipherPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writeCipherPacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readCipherPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(io.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) + if err != nil { + return nil, err + } + c.oracleCamouflage -= uint32(n) + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} + +const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + +// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com +// AEAD, which is described here: +// +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// +// the methods here also implement padding, which RFC 4253 Section 6 +// also requires of stream ciphers. +type chacha20Poly1305Cipher struct { + lengthKey [32]byte + contentKey [32]byte + buf []byte +} + +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + if len(key) != 64 { + panic(len(key)) + } + + c := &chacha20Poly1305Cipher{ + buf: make([]byte, 256), + } + + copy(c.contentKey[:], key[:32]) + copy(c.lengthKey[:], key[32:]) + return c, nil +} + +func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return nil, err + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + encryptedLength := c.buf[:4] + if _, err := io.ReadFull(r, encryptedLength); err != nil { + return nil, err + } + + var lenBytes [4]byte + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return nil, err + } + ls.XORKeyStream(lenBytes[:], encryptedLength) + + length := binary.BigEndian.Uint32(lenBytes[:]) + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + contentEnd := 4 + length + packetEnd := contentEnd + poly1305.TagSize + if uint32(cap(c.buf)) < packetEnd { + c.buf = make([]byte, packetEnd) + copy(c.buf[:], encryptedLength) + } else { + c.buf = c.buf[:packetEnd] + } + + if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { + return nil, err + } + + var mac [poly1305.TagSize]byte + copy(mac[:], c.buf[contentEnd:packetEnd]) + if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { + return nil, errors.New("ssh: MAC failure") + } + + plain := c.buf[4:contentEnd] + s.XORKeyStream(plain, plain) + + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding)+1 >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + + plain = plain[1 : len(plain)-int(padding)] + + return plain, nil +} + +func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return err + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + // There is no blocksize, so fall back to multiple of 8 byte + // padding, as described in RFC 4253, Sec 6. + const packetSizeMultiple = 8 + + padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple + if padding < 4 { + padding += packetSizeMultiple + } + + // size (4 bytes), padding (1), payload, padding, tag. + totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize + if cap(c.buf) < totalLength { + c.buf = make([]byte, totalLength) + } else { + c.buf = c.buf[:totalLength] + } + + binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return err + } + ls.XORKeyStream(c.buf, c.buf[:4]) + c.buf[4] = byte(padding) + copy(c.buf[5:], payload) + packetEnd := 5 + len(payload) + padding + if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { + return err + } + + s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) + + var mac [poly1305.TagSize]byte + poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) + + copy(c.buf[packetEnd:], mac[:]) + + if _, err := w.Write(c.buf); err != nil { + return err + } + return nil +} diff --git a/tempfork/sshtest/ssh/cipher_test.go b/tempfork/sshtest/ssh/cipher_test.go new file mode 100644 index 000000000..fe339862c --- /dev/null +++ b/tempfork/sshtest/ssh/cipher_test.go @@ -0,0 +1,231 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/rand" + "encoding/binary" + "io" + "testing" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/poly1305" +) + +func TestDefaultCiphersExist(t *testing.T) { + for _, cipherAlgo := range supportedCiphers { + if _, ok := cipherModes[cipherAlgo]; !ok { + t.Errorf("supported cipher %q is unknown", cipherAlgo) + } + } + for _, cipherAlgo := range preferredCiphers { + if _, ok := cipherModes[cipherAlgo]; !ok { + t.Errorf("preferred cipher %q is unknown", cipherAlgo) + } + } +} + +func TestPacketCiphers(t *testing.T) { + defaultMac := "hmac-sha2-256" + defaultCipher := "aes128-ctr" + for cipher := range cipherModes { + t.Run("cipher="+cipher, + func(t *testing.T) { testPacketCipher(t, cipher, defaultMac) }) + } + for mac := range macModes { + t.Run("mac="+mac, + func(t *testing.T) { testPacketCipher(t, defaultCipher, mac) }) + } +} + +func testPacketCipher(t *testing.T, cipher, mac string) { + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: cipher, + MAC: mac, + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) + } + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writeCipherPacket(0, buf, rand.Reader, input); err != nil { + t.Fatalf("writeCipherPacket(%q, %q): %v", cipher, mac, err) + } + + packet, err := server.readCipherPacket(0, buf) + if err != nil { + t.Fatalf("readCipherPacket(%q, %q): %v", cipher, mac, err) + } + + if string(packet) != want { + t.Errorf("roundtrip(%q, %q): got %q, want %q", cipher, mac, packet, want) + } +} + +func TestCBCOracleCounterMeasure(t *testing.T) { + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: aes128cbcID, + MAC: "hmac-sha1", + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writeCipherPacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writeCipherPacket: %v", err) + } + + packetSize := buf.Len() + buf.Write(make([]byte, 2*maxPacket)) + + // We corrupt each byte, but this usually will only test the + // 'packet too large' or 'MAC failure' cases. + lastRead := -1 + for i := 0; i < packetSize; i++ { + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + fresh := &bytes.Buffer{} + fresh.Write(buf.Bytes()) + fresh.Bytes()[i] ^= 0x01 + + before := fresh.Len() + _, err = server.readCipherPacket(0, fresh) + if err == nil { + t.Errorf("corrupt byte %d: readCipherPacket succeeded ", i) + continue + } + if _, ok := err.(cbcError); !ok { + t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err) + continue + } + + after := fresh.Len() + bytesRead := before - after + if bytesRead < maxPacket { + t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket) + continue + } + + if i > 0 && bytesRead != lastRead { + t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead) + } + lastRead = bytesRead + } +} + +func TestCVE202143565(t *testing.T) { + tests := []struct { + cipher string + constructPacket func(packetCipher) io.Reader + }{ + { + cipher: gcm128CipherID, + constructPacket: func(client packetCipher) io.Reader { + internalCipher := client.(*gcmCipher) + b := &bytes.Buffer{} + prefix := [4]byte{} + if _, err := b.Write(prefix[:]); err != nil { + t.Fatal(err) + } + internalCipher.buf = internalCipher.aead.Seal(internalCipher.buf[:0], internalCipher.iv, []byte{}, prefix[:]) + if _, err := b.Write(internalCipher.buf); err != nil { + t.Fatal(err) + } + internalCipher.incIV() + + return b + }, + }, + { + cipher: chacha20Poly1305ID, + constructPacket: func(client packetCipher) io.Reader { + internalCipher := client.(*chacha20Poly1305Cipher) + b := &bytes.Buffer{} + + nonce := make([]byte, 12) + s, err := chacha20.NewUnauthenticatedCipher(internalCipher.contentKey[:], nonce) + if err != nil { + t.Fatal(err) + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + internalCipher.buf = make([]byte, 4+poly1305.TagSize) + binary.BigEndian.PutUint32(internalCipher.buf, 0) + ls, err := chacha20.NewUnauthenticatedCipher(internalCipher.lengthKey[:], nonce) + if err != nil { + t.Fatal(err) + } + ls.XORKeyStream(internalCipher.buf, internalCipher.buf[:4]) + if _, err := io.ReadFull(rand.Reader, internalCipher.buf[4:4]); err != nil { + t.Fatal(err) + } + + s.XORKeyStream(internalCipher.buf[4:], internalCipher.buf[4:4]) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, internalCipher.buf[:4], &polyKey) + + copy(internalCipher.buf[4:], tag[:]) + + if _, err := b.Write(internalCipher.buf); err != nil { + t.Fatal(err) + } + + return b + }, + }, + } + + for _, tc := range tests { + mac := "hmac-sha2-256" + + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: tc.cipher, + MAC: mac, + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client, %q, %q): %v", tc.cipher, mac, err) + } + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client, %q, %q): %v", tc.cipher, mac, err) + } + + b := tc.constructPacket(client) + + wantErr := "ssh: empty packet" + _, err = server.readCipherPacket(0, b) + if err == nil { + t.Fatalf("readCipherPacket(%q, %q): didn't fail with empty packet", tc.cipher, mac) + } else if err.Error() != wantErr { + t.Fatalf("readCipherPacket(%q, %q): unexpected error, got %q, want %q", tc.cipher, mac, err, wantErr) + } + } +} diff --git a/tempfork/sshtest/ssh/client.go b/tempfork/sshtest/ssh/client.go new file mode 100644 index 000000000..fd8c49749 --- /dev/null +++ b/tempfork/sshtest/ssh/client.go @@ -0,0 +1,282 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. +type Client struct { + Conn + + handleForwardsOnce sync.Once // guards calling (*Client).handleForwards + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.HostKeyCallback == nil { + c.Close() + return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") + } + + conn := &connection{ + sshConn: sshConn{conn: c, user: fullConf.User}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %w", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key exchange. +// algo is the negotiated algorithm, and may be a certificate type. +func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + if a := underlyingAlgo(algo); sig.Format != a { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// HostKeyCallback is the function type used for verifying server +// keys. A HostKeyCallback must return nil if the host key is OK, or +// an error to reject it. It receives the hostname as passed to Dial +// or NewClientConn. The remote address is the RemoteAddr of the +// net.Conn underlying the SSH connection. +type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + +// BannerCallback is the function type used for treat the banner sent by +// the server. A BannerCallback receives the message sent by the remote server. +type BannerCallback func(message string) error + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback is called during the cryptographic + // handshake to validate the server's host key. The client + // configuration must supply this callback for the connection + // to succeed. The functions InsecureIgnoreHostKey or + // FixedHostKey can be used for simplistic host key checks. + HostKeyCallback HostKeyCallback + + // BannerCallback is called during the SSH dance to display a custom + // server's message. The client configuration can supply this callback to + // handle it as wished. The function BannerDisplayStderr can be used for + // simplistic display on Stderr. + BannerCallback BannerCallback + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the public key algorithms that the client will + // accept from the server for host key authentication, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from a PublicKey.Type method may be used, or + // any of the CertAlgo and KeyAlgo constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} + +// InsecureIgnoreHostKey returns a function that can be used for +// ClientConfig.HostKeyCallback to accept any host key. It should +// not be used for production code. +func InsecureIgnoreHostKey() HostKeyCallback { + return func(hostname string, remote net.Addr, key PublicKey) error { + return nil + } +} + +type fixedHostKey struct { + key PublicKey +} + +func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { + if f.key == nil { + return fmt.Errorf("ssh: required host key was nil") + } + if !bytes.Equal(key.Marshal(), f.key.Marshal()) { + return fmt.Errorf("ssh: host key mismatch") + } + return nil +} + +// FixedHostKey returns a function for use in +// ClientConfig.HostKeyCallback to accept only a specific host key. +func FixedHostKey(key PublicKey) HostKeyCallback { + hk := &fixedHostKey{key} + return hk.check +} + +// BannerDisplayStderr returns a function that can be used for +// ClientConfig.BannerCallback to display banners on os.Stderr. +func BannerDisplayStderr() BannerCallback { + return func(banner string) error { + _, err := os.Stderr.WriteString(banner) + + return err + } +} diff --git a/tempfork/sshtest/ssh/client_auth.go b/tempfork/sshtest/ssh/client_auth.go new file mode 100644 index 000000000..b86dde151 --- /dev/null +++ b/tempfork/sshtest/ssh/client_auth.go @@ -0,0 +1,796 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" +) + +type authResult int + +const ( + authFailure authResult = iota + authPartialSuccess + authSuccess +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we + // advertised willingness to receive one, which we always do) or not. See + // RFC 8308, Section 2.4. + extensions := make(map[string][]byte) + if len(packet) > 0 && packet[0] == msgExtInfo { + var extInfo extInfoMsg + if err := Unmarshal(packet, &extInfo); err != nil { + return err + } + payload := extInfo.Payload + for i := uint32(0); i < extInfo.NumExtensions; i++ { + name, rest, ok := parseString(payload) + if !ok { + return parseError(msgExtInfo) + } + value, rest, ok := parseString(rest) + if !ok { + return parseError(msgExtInfo) + } + extensions[string(name)] = value + payload = rest + } + packet, err = c.transport.readPacket() + if err != nil { + return err + } + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + var tried []string + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) + if err != nil { + // On disconnect, return error immediately + if _, ok := err.(*disconnectMsg); ok { + return err + } + // We return the error later if there is no other method left to + // try. + ok = authFailure + } + if ok == authSuccess { + // success + return nil + } else if ok == authFailure { + if m := auth.method(); !contains(tried, m) { + tried = append(tried, m) + } + } + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if contains(tried, candidateMethod) { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + + if auth == nil && err != nil { + // We have an error and there are no other authentication methods to + // try, so we return it. + return err + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) +} + +func contains(list []string, e string) bool { + for _, s := range list { + if s == e { + return true + } + } + return false +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return authFailure, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiAlgorithmSigner, string, error) { + var as MultiAlgorithmSigner + keyFormat := signer.PublicKey().Type() + + // If the signer implements MultiAlgorithmSigner we use the algorithms it + // support, if it implements AlgorithmSigner we assume it supports all + // algorithms, otherwise only the key format one. + switch s := signer.(type) { + case MultiAlgorithmSigner: + as = s + case AlgorithmSigner: + as = &multiAlgorithmSigner{ + AlgorithmSigner: s, + supportedAlgorithms: algorithmsForKeyFormat(underlyingAlgo(keyFormat)), + } + default: + as = &multiAlgorithmSigner{ + AlgorithmSigner: algorithmSignerWrapper{signer}, + supportedAlgorithms: []string{underlyingAlgo(keyFormat)}, + } + } + + getFallbackAlgo := func() (string, error) { + // Fallback to use if there is no "server-sig-algs" extension or a + // common algorithm cannot be found. We use the public key format if the + // MultiAlgorithmSigner supports it, otherwise we return an error. + if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { + return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", + underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) + } + return keyFormat, nil + } + + extPayload, ok := extensions["server-sig-algs"] + if !ok { + // If there is no "server-sig-algs" extension use the fallback + // algorithm. + algo, err := getFallbackAlgo() + return as, algo, err + } + + // The server-sig-algs extension only carries underlying signature + // algorithm, but we are trying to select a protocol-level public key + // algorithm, which might be a certificate type. Extend the list of server + // supported algorithms to include the corresponding certificate algorithms. + serverAlgos := strings.Split(string(extPayload), ",") + for _, algo := range serverAlgos { + if certAlgo, ok := certificateAlgo(algo); ok { + serverAlgos = append(serverAlgos, certAlgo) + } + } + + // Filter algorithms based on those supported by MultiAlgorithmSigner. + var keyAlgos []string + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(as.Algorithms(), underlyingAlgo(algo)) { + keyAlgos = append(keyAlgos, algo) + } + } + + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + if err != nil { + // If there is no overlap, return the fallback algorithm to support + // servers that fail to list all supported algorithms. + algo, err := getFallbackAlgo() + return as, algo, err + } + return as, algo, nil +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { + // Authentication is performed by sending an enquiry to test if a key is + // acceptable to the remote. If the key is acceptable, the client will + // attempt to authenticate with the valid key. If not the client will repeat + // the process with the remaining keys. + + signers, err := cb() + if err != nil { + return authFailure, nil, err + } + var methods []string + var errSigAlgo error + + origSignersLen := len(signers) + for idx := 0; idx < len(signers); idx++ { + signer := signers[idx] + pub := signer.PublicKey() + as, algo, err := pickSignatureAlgorithm(signer, extensions) + if err != nil && errSigAlgo == nil { + // If we cannot negotiate a signature algorithm store the first + // error so we can return it to provide a more meaningful message if + // no other signers work. + errSigAlgo = err + continue + } + ok, err := validateKey(pub, algo, user, c) + if err != nil { + return authFailure, nil, err + } + // OpenSSH 7.2-7.7 advertises support for rsa-sha2-256 and rsa-sha2-512 + // in the "server-sig-algs" extension but doesn't support these + // algorithms for certificate authentication, so if the server rejects + // the key try to use the obtained algorithm as if "server-sig-algs" had + // not been implemented if supported from the algorithm signer. + if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 { + if contains(as.Algorithms(), KeyAlgoRSA) { + // We retry using the compat algorithm after all signers have + // been tried normally. + signers = append(signers, &multiAlgorithmSigner{ + AlgorithmSigner: as, + supportedAlgorithms: []string{KeyAlgoRSA}, + }) + } + } + if !ok { + continue + } + + pubKey := pub.Marshal() + data := buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, algo, pubKey) + sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) + if err != nil { + return authFailure, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: algo, + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err = handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + + // If authentication succeeds or the list of available methods does not + // contain the "publickey" method, do not attempt to authenticate with any + // other keys. According to RFC 4252 Section 7, the latter can occur when + // additional authentication methods are required. + if success == authSuccess || !contains(methods, cb.method()) { + return success, methods, err + } + } + + return authFailure, methods, errSigAlgo +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: algo, + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return false, err + } + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + // According to RFC 4252 Section 7 the algorithm in + // SSH_MSG_USERAUTH_PK_OK should match that of the request but some + // servers send the key type instead. OpenSSH allows any algorithm + // that matches the public key, so we do the same. + // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 + if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + return false, nil + } + if !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (authResult, []string, error) { + gotMsgExtInfo := false + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + gotMsgExtInfo = true + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +func handleBannerResponse(c packetConn, packet []byte) error { + var msg userAuthBannerMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + transport, ok := c.(*handshakeTransport) + if !ok { + return nil + } + + if transport.bannerCallback != nil { + return transport.bannerCallback(msg.Message) + } + + return nil +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the name and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns an AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return authFailure, nil, err + } + + gotMsgExtInfo := false + gotUserAuthInfoRequest := false + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + continue + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + gotMsgExtInfo = true + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + if !gotUserAuthInfoRequest { + return authFailure, msg.Methods, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + gotUserAuthInfoRequest = true + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return authFailure, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.Name, msg.Instruction, prompts, echos) + if err != nil { + return authFailure, nil, err + } + + if len(answers) != len(prompts) { + return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return authFailure, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) + if ok != authFailure || err != nil { // either success, partial success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} + +// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. +// See RFC 4462 section 3 +// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. +// target is the server host you want to log in to. +func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { + if gssAPIClient == nil { + panic("gss-api client must be not nil with enable gssapi-with-mic") + } + return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} +} + +type gssAPIWithMICCallback struct { + gssAPIClient GSSAPIClient + target string +} + +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + m := &userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: g.method(), + } + // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. + // See RFC 4462 section 3.2. + m.Payload = appendU32(m.Payload, 1) + m.Payload = appendString(m.Payload, string(krb5OID)) + if err := c.writePacket(Marshal(m)); err != nil { + return authFailure, nil, err + } + // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an + // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or + // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. + // See RFC 4462 section 3.3. + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check + // selected mech if it is valid. + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + userAuthGSSAPIResp := &userAuthGSSAPIResponse{} + if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { + return authFailure, nil, err + } + // Start the loop into the exchange token. + // See RFC 4462 section 3.4. + var token []byte + defer g.gssAPIClient.DeleteSecContext() + for { + // Initiates the establishment of a security context between the application and a remote peer. + nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) + if err != nil { + return authFailure, nil, err + } + if len(nextToken) > 0 { + if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: nextToken, + })); err != nil { + return authFailure, nil, err + } + } + if !needContinue { + break + } + packet, err = c.readPacket() + if err != nil { + return authFailure, nil, err + } + switch packet[0] { + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthGSSAPIError: + userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} + if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { + return authFailure, nil, err + } + return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ + "Major Status: %d\n"+ + "Minor Status: %d\n"+ + "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, + userAuthGSSAPIErrorResp.Message) + case msgUserAuthGSSAPIToken: + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return authFailure, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + } + // Binding Encryption Keys. + // See RFC 4462 section 3.5. + micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") + micToken, err := g.gssAPIClient.GetMIC(micField) + if err != nil { + return authFailure, nil, err + } + if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ + MIC: micToken, + })); err != nil { + return authFailure, nil, err + } + return handleAuthResponse(c) +} + +func (g *gssAPIWithMICCallback) method() string { + return "gssapi-with-mic" +} diff --git a/tempfork/sshtest/ssh/client_auth_test.go b/tempfork/sshtest/ssh/client_auth_test.go new file mode 100644 index 000000000..ec27133a3 --- /dev/null +++ b/tempfork/sshtest/ssh/client_auth_test.go @@ -0,0 +1,1384 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "runtime" + "strings" + "testing" +) + +type keyboardInteractive map[string]string + +func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) { + var answers []string + for _, q := range questions { + answers = append(answers, cr[q]) + } + return answers, nil +} + +// reused internally by tests +var clientPassword = "tiger" + +// tryAuth runs a handshake with a given config against an SSH server +// with config serverConfig. Returns both client and server side errors. +func tryAuth(t *testing.T, config *ClientConfig) error { + err, _ := tryAuthBothSides(t, config, nil) + return err +} + +// tryAuthWithGSSAPIWithMICConfig runs a handshake with a given config against an SSH server +// with a given GSSAPIWithMICConfig and config serverConfig. Returns both client and server side errors. +func tryAuthWithGSSAPIWithMICConfig(t *testing.T, clientConfig *ClientConfig, gssAPIWithMICConfig *GSSAPIWithMICConfig) error { + err, _ := tryAuthBothSides(t, clientConfig, gssAPIWithMICConfig) + return err +} + +// tryAuthBothSides runs the handshake and returns the resulting errors from both sides of the connection. +func tryAuthBothSides(t *testing.T, config *ClientConfig, gssAPIWithMICConfig *GSSAPIWithMICConfig) (clientError error, serverAuthErrors []error) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + certChecker := CertChecker{ + IsUserAuthority: func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal()) + }, + UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + return nil, nil + } + + return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User()) + }, + IsRevoked: func(c *Certificate) bool { + return c.Serial == 666 + }, + } + serverConfig := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == clientPassword { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + PublicKeyCallback: certChecker.Authenticate, + KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) { + ans, err := challenge("user", + "instruction", + []string{"question1", "question2"}, + []bool{true, true}) + if err != nil { + return nil, err + } + ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2" + if ok { + challenge("user", "motd", nil, nil) + return nil, nil + } + return nil, errors.New("keyboard-interactive failed") + }, + GSSAPIWithMICConfig: gssAPIWithMICConfig, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + serverConfig.AuthLogCallback = func(conn ConnMetadata, method string, err error) { + serverAuthErrors = append(serverAuthErrors, err) + } + + go newServer(c1, serverConfig) + _, _, _, err = NewClientConn(c2, "", config) + return err, serverAuthErrors +} + +type loggingAlgorithmSigner struct { + used []string + AlgorithmSigner +} + +func (l *loggingAlgorithmSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + l.used = append(l.used, "[Sign]") + return l.AlgorithmSigner.Sign(rand, data) +} + +func (l *loggingAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + l.used = append(l.used, algorithm) + return l.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +func TestClientAuthPublicKey(t *testing.T) { + signer := &loggingAlgorithmSigner{AlgorithmSigner: testSigners["rsa"].(AlgorithmSigner)} + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(signer), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + if len(signer.used) != 1 || signer.used[0] != KeyAlgoRSASHA256 { + t.Errorf("unexpected Sign/SignWithAlgorithm calls: %q", signer.used) + } +} + +// TestClientAuthNoSHA2 tests a ssh-rsa Signer that doesn't implement AlgorithmSigner. +func TestClientAuthNoSHA2(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(&legacyRSASigner{testSigners["rsa"]}), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +// TestClientAuthThirdKey checks that the third configured can succeed. If we +// were to do three attempts for each key (rsa-sha2-256, rsa-sha2-512, ssh-rsa), +// we'd hit the six maximum attempts before reaching it. +func TestClientAuthThirdKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa-openssh-format"], + testSigners["rsa-openssh-format"], testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodFallback(t *testing.T) { + var passwordCalled bool + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + PasswordCallback( + func() (string, error) { + passwordCalled = true + return "WRONG", nil + }), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + if passwordCalled { + t.Errorf("password auth tried before public-key auth.") + } +} + +func TestAuthMethodWrongPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password("wrong"), + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "answer2", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodWrongKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "WRONG", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive") + } +} + +// the mock server will only authenticate ssh-rsa keys +func TestAuthMethodInvalidPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"]), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("dsa private key should not have authenticated with rsa public key") + } +} + +// the client should authenticate with the second key +func TestAuthMethodRSAandDSA(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"], testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with rsa key: %v", err) + } +} + +type invalidAlgSigner struct { + Signer +} + +func (s *invalidAlgSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + sig, err := s.Signer.Sign(rand, data) + if sig != nil { + sig.Format = "invalid" + } + return sig, err +} + +func TestMethodInvalidAlgorithm(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(&invalidAlgSigner{testSigners["rsa"]}), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + err, serverErrors := tryAuthBothSides(t, config, nil) + if err == nil { + t.Fatalf("login succeeded") + } + + found := false + want := "algorithm \"invalid\"" + + var errStrings []string + for _, err := range serverErrors { + found = found || (err != nil && strings.Contains(err.Error(), want)) + errStrings = append(errStrings, err.Error()) + } + if !found { + t.Errorf("server got error %q, want substring %q", errStrings, want) + } +} + +func TestClientHMAC(t *testing.T) { + for _, mac := range supportedMACs { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + Config: Config{ + MACs: []string{mac}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err) + } + } +} + +// issue 4285. +func TestClientUnsupportedCipher(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + Ciphers: []string{"aes128-cbc"}, // not currently supported + }, + } + if err := tryAuth(t, config); err == nil { + t.Errorf("expected no ciphers in common") + } +} + +func TestClientUnsupportedKex(t *testing.T) { + if os.Getenv("GO_BUILDER_NAME") != "" { + t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198") + } + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + KeyExchanges: []string{"non-existent-kex"}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") { + t.Errorf("got %v, expected 'common algorithm'", err) + } +} + +func TestClientLoginCert(t *testing.T) { + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + clientConfig := &ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + } + clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner)) + + // should succeed + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + // corrupted signature + cert.Signature.Blob[0]++ + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with corrupted sig") + } + + // revoked + cert.Serial = 666 + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("revoked cert login succeeded") + } + cert.Serial = 1 + + // sign with wrong key + cert.SignCert(rand.Reader, testSigners["dsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with non-authoritative key") + } + + // host cert + cert.CertType = HostCert + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong type") + } + cert.CertType = UserCert + + // principal specified + cert.ValidPrincipals = []string{"user"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + // wrong principal specified + cert.ValidPrincipals = []string{"fred"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong principal") + } + cert.ValidPrincipals = nil + + // added critical option + cert.CriticalOptions = map[string]string{"root-access": "yes"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with unrecognized critical option") + } + + // allowed source address + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24,::42/120"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login with source-address failed: %v", err) + } + + // disallowed source address + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42,::42"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login with source-address succeeded") + } +} + +func testPermissionsPassing(withPermissions bool, t *testing.T) { + serverConfig := &ServerConfig{ + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "nopermissions" { + return nil, nil + } + return &Permissions{}, nil + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if withPermissions { + clientConfig.User = "permissions" + } else { + clientConfig.User = "nopermissions" + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatal(err) + } + if p := serverConn.Permissions; (p != nil) != withPermissions { + t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p) + } +} + +func TestPermissionsPassing(t *testing.T) { + testPermissionsPassing(true, t) +} + +func TestNoPermissionsPassing(t *testing.T) { + testPermissionsPassing(false, t) +} + +func TestRetryableAuth(t *testing.T) { + n := 0 + passwords := []string{"WRONG1", "WRONG2"} + + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + p := passwords[n] + n++ + return p, nil + }), 2), + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + if n != 2 { + t.Fatalf("Did not try all passwords") + } +} + +func ExampleRetryableAuthMethod() { + user := "testuser" + NumberOfPrompts := 3 + + // Normally this would be a callback that prompts the user to answer the + // provided questions + Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + return []string{"answer1", "answer2"}, nil + } + + config := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts), + }, + } + + host := "mysshserver" + netConn, err := net.Dial("tcp", host) + if err != nil { + log.Fatal(err) + } + + sshConn, _, _, err := NewClientConn(netConn, host, config) + if err != nil { + log.Fatal(err) + } + _ = sshConn +} + +// Test if username is received on server side when NoClientAuth is used +func TestClientAuthNone(t *testing.T) { + user := "testuser" + serverConfig := &ServerConfig{ + NoClientAuth: true, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + User: user, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatalf("newServer: %v", err) + } + if serverConn.User() != user { + t.Fatalf("server: got %q, want %q", serverConn.User(), user) + } +} + +// Test if authentication attempts are limited on server when MaxAuthTries is set +func TestClientAuthMaxAuthTries(t *testing.T) { + user := "testuser" + + serverConfig := &ServerConfig{ + MaxAuthTries: 2, + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == "right" { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + }) + + for tries := 2; tries < 4; tries++ { + n := tries + clientConfig := &ClientConfig{ + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + n-- + if n == 0 { + return "right", nil + } + return "wrong", nil + }), tries), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + errCh := make(chan error, 1) + + go func() { + _, err := newServer(c1, serverConfig) + errCh <- err + }() + _, _, _, cliErr := NewClientConn(c2, "", clientConfig) + srvErr := <-errCh + + if tries > serverConfig.MaxAuthTries { + if cliErr == nil { + t.Fatalf("client: got no error, want %s", expectedErr) + } else if cliErr.Error() != expectedErr.Error() { + t.Fatalf("client: got %s, want %s", err, expectedErr) + } + var authErr *ServerAuthError + if !errors.As(srvErr, &authErr) { + t.Errorf("expected ServerAuthError, got: %v", srvErr) + } + } else { + if cliErr != nil { + t.Fatalf("client: got %s, want no error", cliErr) + } + } + } +} + +// Test if authentication attempts are correctly limited on server +// when more public keys are provided then MaxAuthTries +func TestClientAuthMaxAuthTriesPublicKey(t *testing.T) { + signers := []Signer{} + for i := 0; i < 6; i++ { + signers = append(signers, testSigners["dsa"]) + } + + validConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(append([]Signer{testSigners["rsa"]}, signers...)...), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, validConfig); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + }) + invalidConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(append(signers, testSigners["rsa"])...), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, invalidConfig); err == nil { + t.Fatalf("client: got no error, want %s", expectedErr) + } else if err.Error() != expectedErr.Error() { + // On Windows we can see a WSAECONNABORTED error + // if the client writes another authentication request + // before the client goroutine reads the disconnection + // message. See issue 50805. + if runtime.GOOS == "windows" && strings.Contains(err.Error(), "wsarecv: An established connection was aborted") { + // OK. + } else { + t.Fatalf("client: got %s, want %s", err, expectedErr) + } + } +} + +// Test whether authentication errors are being properly logged if all +// authentication methods have been exhausted +func TestClientAuthErrorList(t *testing.T) { + publicKeyErr := errors.New("This is an error from PublicKeyCallback") + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + serverConfig := &ServerConfig{ + PublicKeyCallback: func(_ ConnMetadata, _ PublicKey) (*Permissions, error) { + return nil, publicKeyErr + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + _, err = newServer(c1, serverConfig) + if err == nil { + t.Fatal("newServer: got nil, expected errors") + } + + authErrs, ok := err.(*ServerAuthError) + if !ok { + t.Fatalf("errors: got %T, want *ssh.ServerAuthError", err) + } + for i, e := range authErrs.Errors { + switch i { + case 0: + if e != ErrNoAuth { + t.Fatalf("errors: got error %v, want ErrNoAuth", e) + } + case 1: + if e != publicKeyErr { + t.Fatalf("errors: got %v, want %v", e, publicKeyErr) + } + default: + t.Fatalf("errors: got %v, expected 2 errors", authErrs.Errors) + } + } +} + +func TestAuthMethodGSSAPIWithMIC(t *testing.T) { + type testcase struct { + config *ClientConfig + gssConfig *GSSAPIWithMICConfig + clientWantErr string + serverWantErr string + } + testcases := []*testcase{ + { + config: &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + GSSAPIWithMICAuthMethod( + &FakeClient{ + exchanges: []*exchange{ + { + outToken: "client-valid-token-1", + }, + { + expectedToken: "server-valid-token-1", + }, + }, + mic: []byte("valid-mic"), + maxRound: 2, + }, "testtarget", + ), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + }, + gssConfig: &GSSAPIWithMICConfig{ + AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) { + if srcName != conn.User()+"@DOMAIN" { + return nil, fmt.Errorf("srcName is %s, conn user is %s", srcName, conn.User()) + } + return nil, nil + }, + Server: &FakeServer{ + exchanges: []*exchange{ + { + outToken: "server-valid-token-1", + expectedToken: "client-valid-token-1", + }, + }, + maxRound: 1, + expectedMIC: []byte("valid-mic"), + srcName: "testuser@DOMAIN", + }, + }, + }, + { + config: &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + GSSAPIWithMICAuthMethod( + &FakeClient{ + exchanges: []*exchange{ + { + outToken: "client-valid-token-1", + }, + { + expectedToken: "server-valid-token-1", + }, + }, + mic: []byte("valid-mic"), + maxRound: 2, + }, "testtarget", + ), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + }, + gssConfig: &GSSAPIWithMICConfig{ + AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) { + return nil, fmt.Errorf("user is not allowed to login") + }, + Server: &FakeServer{ + exchanges: []*exchange{ + { + outToken: "server-valid-token-1", + expectedToken: "client-valid-token-1", + }, + }, + maxRound: 1, + expectedMIC: []byte("valid-mic"), + srcName: "testuser@DOMAIN", + }, + }, + serverWantErr: "user is not allowed to login", + clientWantErr: "ssh: handshake failed: ssh: unable to authenticate", + }, + { + config: &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + GSSAPIWithMICAuthMethod( + &FakeClient{ + exchanges: []*exchange{ + { + outToken: "client-valid-token-1", + }, + { + expectedToken: "server-valid-token-1", + }, + }, + mic: []byte("valid-mic"), + maxRound: 2, + }, "testtarget", + ), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + }, + gssConfig: &GSSAPIWithMICConfig{ + AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) { + if srcName != conn.User() { + return nil, fmt.Errorf("srcName is %s, conn user is %s", srcName, conn.User()) + } + return nil, nil + }, + Server: &FakeServer{ + exchanges: []*exchange{ + { + outToken: "server-invalid-token-1", + expectedToken: "client-valid-token-1", + }, + }, + maxRound: 1, + expectedMIC: []byte("valid-mic"), + srcName: "testuser@DOMAIN", + }, + }, + clientWantErr: "ssh: handshake failed: got \"server-invalid-token-1\", want token \"server-valid-token-1\"", + }, + { + config: &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + GSSAPIWithMICAuthMethod( + &FakeClient{ + exchanges: []*exchange{ + { + outToken: "client-valid-token-1", + }, + { + expectedToken: "server-valid-token-1", + }, + }, + mic: []byte("invalid-mic"), + maxRound: 2, + }, "testtarget", + ), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + }, + gssConfig: &GSSAPIWithMICConfig{ + AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) { + if srcName != conn.User() { + return nil, fmt.Errorf("srcName is %s, conn user is %s", srcName, conn.User()) + } + return nil, nil + }, + Server: &FakeServer{ + exchanges: []*exchange{ + { + outToken: "server-valid-token-1", + expectedToken: "client-valid-token-1", + }, + }, + maxRound: 1, + expectedMIC: []byte("valid-mic"), + srcName: "testuser@DOMAIN", + }, + }, + serverWantErr: "got MICToken \"invalid-mic\", want \"valid-mic\"", + clientWantErr: "ssh: handshake failed: ssh: unable to authenticate", + }, + } + + for i, c := range testcases { + clientErr, serverErrs := tryAuthBothSides(t, c.config, c.gssConfig) + if (c.clientWantErr == "") != (clientErr == nil) { + t.Fatalf("client got %v, want %s, case %d", clientErr, c.clientWantErr, i) + } + if (c.serverWantErr == "") != (len(serverErrs) == 2 && serverErrs[1] == nil || len(serverErrs) == 1) { + t.Fatalf("server got err %v, want %s", serverErrs, c.serverWantErr) + } + if c.clientWantErr != "" { + if clientErr != nil && !strings.Contains(clientErr.Error(), c.clientWantErr) { + t.Fatalf("client got %v, want %s, case %d", clientErr, c.clientWantErr, i) + } + } + found := false + var errStrings []string + if c.serverWantErr != "" { + for _, err := range serverErrs { + found = found || (err != nil && strings.Contains(err.Error(), c.serverWantErr)) + errStrings = append(errStrings, err.Error()) + } + if !found { + t.Errorf("server got error %q, want substring %q, case %d", errStrings, c.serverWantErr, i) + } + } + } +} + +func TestCompatibleAlgoAndSignatures(t *testing.T) { + type testcase struct { + algo string + sigFormat string + compatible bool + } + testcases := []*testcase{ + { + KeyAlgoRSA, + KeyAlgoRSA, + true, + }, + { + KeyAlgoRSA, + KeyAlgoRSASHA256, + true, + }, + { + KeyAlgoRSA, + KeyAlgoRSASHA512, + true, + }, + { + KeyAlgoRSASHA256, + KeyAlgoRSA, + true, + }, + { + KeyAlgoRSASHA512, + KeyAlgoRSA, + true, + }, + { + KeyAlgoRSASHA512, + KeyAlgoRSASHA256, + true, + }, + { + KeyAlgoRSASHA256, + KeyAlgoRSASHA512, + true, + }, + { + KeyAlgoRSASHA512, + KeyAlgoRSASHA512, + true, + }, + { + CertAlgoRSAv01, + KeyAlgoRSA, + true, + }, + { + CertAlgoRSAv01, + KeyAlgoRSASHA256, + true, + }, + { + CertAlgoRSAv01, + KeyAlgoRSASHA512, + true, + }, + { + CertAlgoRSASHA256v01, + KeyAlgoRSASHA512, + true, + }, + { + CertAlgoRSASHA512v01, + KeyAlgoRSASHA512, + true, + }, + { + CertAlgoRSASHA512v01, + KeyAlgoRSASHA256, + true, + }, + { + CertAlgoRSASHA256v01, + CertAlgoRSAv01, + true, + }, + { + CertAlgoRSAv01, + CertAlgoRSASHA512v01, + true, + }, + { + KeyAlgoECDSA256, + KeyAlgoRSA, + false, + }, + { + KeyAlgoECDSA256, + KeyAlgoECDSA521, + false, + }, + { + KeyAlgoECDSA256, + KeyAlgoECDSA256, + true, + }, + { + KeyAlgoECDSA256, + KeyAlgoED25519, + false, + }, + { + KeyAlgoED25519, + KeyAlgoED25519, + true, + }, + } + + for _, c := range testcases { + if isAlgoCompatible(c.algo, c.sigFormat) != c.compatible { + t.Errorf("algorithm %q, signature format %q, expected compatible to be %t", c.algo, c.sigFormat, c.compatible) + } + } +} + +func TestPickSignatureAlgorithm(t *testing.T) { + type testcase struct { + name string + extensions map[string][]byte + } + cases := []testcase{ + { + name: "server with empty server-sig-algs", + extensions: map[string][]byte{ + "server-sig-algs": []byte(``), + }, + }, + { + name: "server with no server-sig-algs", + extensions: nil, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + signer, ok := testSigners["rsa"].(MultiAlgorithmSigner) + if !ok { + t.Fatalf("rsa test signer does not implement the MultiAlgorithmSigner interface") + } + // The signer supports the public key algorithm which is then returned. + _, algo, err := pickSignatureAlgorithm(signer, c.extensions) + if err != nil { + t.Fatalf("got %v, want no error", err) + } + if algo != signer.PublicKey().Type() { + t.Fatalf("got algo %q, want %q", algo, signer.PublicKey().Type()) + } + // Test a signer that uses a certificate algorithm as the public key + // type. + cert := &Certificate{ + CertType: UserCert, + Key: signer.PublicKey(), + } + cert.SignCert(rand.Reader, signer) + + certSigner, err := NewCertSigner(cert, signer) + if err != nil { + t.Fatalf("error generating cert signer: %v", err) + } + // The signer supports the public key algorithm and the + // public key format is a certificate type so the cerificate + // algorithm matching the key format must be returned + _, algo, err = pickSignatureAlgorithm(certSigner, c.extensions) + if err != nil { + t.Fatalf("got %v, want no error", err) + } + if algo != certSigner.PublicKey().Type() { + t.Fatalf("got algo %q, want %q", algo, certSigner.PublicKey().Type()) + } + signer, err = NewSignerWithAlgorithms(signer.(AlgorithmSigner), []string{KeyAlgoRSASHA512, KeyAlgoRSASHA256}) + if err != nil { + t.Fatalf("unable to create signer with algorithms: %v", err) + } + // The signer does not support the public key algorithm so an error + // is returned. + _, _, err = pickSignatureAlgorithm(signer, c.extensions) + if err == nil { + t.Fatal("got no error, no common public key signature algorithm error expected") + } + }) + } +} + +// configurablePublicKeyCallback is a public key callback that allows to +// configure the signature algorithm and format. This way we can emulate the +// behavior of buggy clients. +type configurablePublicKeyCallback struct { + signer AlgorithmSigner + signatureAlgo string + signatureFormat string +} + +func (cb configurablePublicKeyCallback) method() string { + return "publickey" +} + +func (cb configurablePublicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { + pub := cb.signer.PublicKey() + + ok, err := validateKey(pub, cb.signatureAlgo, user, c) + if err != nil { + return authFailure, nil, err + } + if !ok { + return authFailure, nil, fmt.Errorf("invalid public key") + } + + pubKey := pub.Marshal() + data := buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, cb.signatureAlgo, pubKey) + sign, err := cb.signer.SignWithAlgorithm(rand, data, underlyingAlgo(cb.signatureFormat)) + if err != nil { + return authFailure, nil, err + } + + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: cb.signatureAlgo, + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err := handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + if success == authSuccess || !contains(methods, cb.method()) { + return success, methods, err + } + + return authFailure, methods, nil +} + +func TestPublicKeyAndAlgoCompatibility(t *testing.T) { + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + clientConfig := &ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + Auth: []AuthMethod{ + configurablePublicKeyCallback{ + signer: certSigner.(AlgorithmSigner), + signatureAlgo: KeyAlgoRSASHA256, + signatureFormat: KeyAlgoRSASHA256, + }, + }, + } + if err := tryAuth(t, clientConfig); err == nil { + t.Error("cert login passed with incompatible public key type and algorithm") + } +} + +func TestClientAuthGPGAgentCompat(t *testing.T) { + clientConfig := &ClientConfig{ + User: "testuser", + HostKeyCallback: InsecureIgnoreHostKey(), + Auth: []AuthMethod{ + // algorithm rsa-sha2-512 and signature format ssh-rsa. + configurablePublicKeyCallback{ + signer: testSigners["rsa"].(AlgorithmSigner), + signatureAlgo: KeyAlgoRSASHA512, + signatureFormat: KeyAlgoRSA, + }, + }, + } + if err := tryAuth(t, clientConfig); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestCertAuthOpenSSHCompat(t *testing.T) { + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + clientConfig := &ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + Auth: []AuthMethod{ + // algorithm ssh-rsa-cert-v01@openssh.com and signature format + // rsa-sha2-256. + configurablePublicKeyCallback{ + signer: certSigner.(AlgorithmSigner), + signatureAlgo: CertAlgoRSAv01, + signatureFormat: KeyAlgoRSASHA256, + }, + }, + } + if err := tryAuth(t, clientConfig); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestKeyboardInteractiveAuthEarlyFail(t *testing.T) { + const maxAuthTries = 2 + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + // Start testserver + serverConfig := &ServerConfig{ + MaxAuthTries: maxAuthTries, + KeyboardInteractiveCallback: func(c ConnMetadata, + client KeyboardInteractiveChallenge) (*Permissions, error) { + // Fail keyboard-interactive authentication early before + // any prompt is sent to client. + return nil, errors.New("keyboard-interactive auth failed") + }, + PasswordCallback: func(c ConnMetadata, + pass []byte) (*Permissions, error) { + if string(pass) == clientPassword { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + serverDone := make(chan struct{}) + go func() { + defer func() { serverDone <- struct{}{} }() + conn, chans, reqs, err := NewServerConn(c2, serverConfig) + if err != nil { + return + } + _ = conn.Close() + + discarderDone := make(chan struct{}) + go func() { + defer func() { discarderDone <- struct{}{} }() + DiscardRequests(reqs) + }() + for newChannel := range chans { + newChannel.Reject(Prohibited, + "testserver not accepting requests") + } + + <-discarderDone + }() + + // Connect to testserver, expect KeyboardInteractive() to be not called, + // PasswordCallback() to be called and connection to succeed. + passwordCallbackCalled := false + clientConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + RetryableAuthMethod(KeyboardInteractive(func(name, + instruction string, questions []string, + echos []bool) ([]string, error) { + t.Errorf("unexpected call to KeyboardInteractive()") + return []string{clientPassword}, nil + }), maxAuthTries), + RetryableAuthMethod(PasswordCallback(func() (secret string, + err error) { + t.Logf("PasswordCallback()") + passwordCallbackCalled = true + return clientPassword, nil + }), maxAuthTries), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + conn, _, _, err := NewClientConn(c1, "", clientConfig) + if err != nil { + t.Errorf("unexpected NewClientConn() error: %v", err) + } + if conn != nil { + conn.Close() + } + + // Wait for server to finish. + <-serverDone + + if !passwordCallbackCalled { + t.Errorf("expected PasswordCallback() to be called") + } +} diff --git a/tempfork/sshtest/ssh/client_test.go b/tempfork/sshtest/ssh/client_test.go new file mode 100644 index 000000000..2621f0ea5 --- /dev/null +++ b/tempfork/sshtest/ssh/client_test.go @@ -0,0 +1,367 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "net" + "strings" + "testing" +) + +func TestClientVersion(t *testing.T) { + for _, tt := range []struct { + name string + version string + multiLine string + wantErr bool + }{ + { + name: "default version", + version: packageVersion, + }, + { + name: "custom version", + version: "SSH-2.0-CustomClientVersionString", + }, + { + name: "good multi line version", + version: packageVersion, + multiLine: strings.Repeat("ignored\r\n", 20), + }, + { + name: "bad multi line version", + version: packageVersion, + multiLine: "bad multi line version", + wantErr: true, + }, + { + name: "long multi line version", + version: packageVersion, + multiLine: strings.Repeat("long multi line version\r\n", 50)[:256], + wantErr: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + go func() { + if tt.multiLine != "" { + c1.Write([]byte(tt.multiLine)) + } + NewClientConn(c1, "", &ClientConfig{ + ClientVersion: tt.version, + HostKeyCallback: InsecureIgnoreHostKey(), + }) + c1.Close() + }() + conf := &ServerConfig{NoClientAuth: true} + conf.AddHostKey(testSigners["rsa"]) + conn, _, _, err := NewServerConn(c2, conf) + if err == nil == tt.wantErr { + t.Fatalf("got err %v; wantErr %t", err, tt.wantErr) + } + if tt.wantErr { + // Don't verify the version on an expected error. + return + } + if got := string(conn.ClientVersion()); got != tt.version { + t.Fatalf("got %q; want %q", got, tt.version) + } + }) + } +} + +func TestHostKeyCheck(t *testing.T) { + for _, tt := range []struct { + name string + wantError string + key PublicKey + }{ + {"no callback", "must specify HostKeyCallback", nil}, + {"correct key", "", testSigners["rsa"].PublicKey()}, + {"mismatch", "mismatch", testSigners["ecdsa"].PublicKey()}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + + go NewServerConn(c1, serverConf) + clientConf := ClientConfig{ + User: "user", + } + if tt.key != nil { + clientConf.HostKeyCallback = FixedHostKey(tt.key) + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + if tt.wantError == "" || !strings.Contains(err.Error(), tt.wantError) { + t.Errorf("%s: got error %q, missing %q", tt.name, err.Error(), tt.wantError) + } + } else if tt.wantError != "" { + t.Errorf("%s: succeeded, but want error string %q", tt.name, tt.wantError) + } + } +} + +func TestVerifyHostKeySignature(t *testing.T) { + for _, tt := range []struct { + key string + signAlgo string + verifyAlgo string + wantError string + }{ + {"rsa", KeyAlgoRSA, KeyAlgoRSA, ""}, + {"rsa", KeyAlgoRSASHA256, KeyAlgoRSASHA256, ""}, + {"rsa", KeyAlgoRSA, KeyAlgoRSASHA512, `ssh: invalid signature algorithm "ssh-rsa", expected "rsa-sha2-512"`}, + {"ed25519", KeyAlgoED25519, KeyAlgoED25519, ""}, + } { + key := testSigners[tt.key].PublicKey() + s, ok := testSigners[tt.key].(AlgorithmSigner) + if !ok { + t.Fatalf("needed an AlgorithmSigner") + } + sig, err := s.SignWithAlgorithm(rand.Reader, []byte("test"), tt.signAlgo) + if err != nil { + t.Fatalf("couldn't sign: %q", err) + } + + b := bytes.Buffer{} + writeString(&b, []byte(sig.Format)) + writeString(&b, sig.Blob) + + result := kexResult{Signature: b.Bytes(), H: []byte("test")} + + err = verifyHostKeySignature(key, tt.verifyAlgo, &result) + if err != nil { + if tt.wantError == "" || !strings.Contains(err.Error(), tt.wantError) { + t.Errorf("got error %q, expecting %q", err.Error(), tt.wantError) + } + } else if tt.wantError != "" { + t.Errorf("succeeded, but want error string %q", tt.wantError) + } + } +} + +func TestBannerCallback(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverConf := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + BannerCallback: func(conn ConnMetadata) string { + return "Hello World" + }, + } + serverConf.AddHostKey(testSigners["rsa"]) + go NewServerConn(c1, serverConf) + + var receivedBanner string + var bannerCount int + clientConf := ClientConfig{ + Auth: []AuthMethod{ + Password("123"), + }, + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + BannerCallback: func(message string) error { + bannerCount++ + receivedBanner = message + return nil + }, + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + t.Fatal(err) + } + + if bannerCount != 1 { + t.Errorf("got %d banners; want 1", bannerCount) + } + + expected := "Hello World" + if receivedBanner != expected { + t.Fatalf("got %s; want %s", receivedBanner, expected) + } +} + +func TestNewClientConn(t *testing.T) { + errHostKeyMismatch := errors.New("host key mismatch") + + for _, tt := range []struct { + name string + user string + simulateHostKeyMismatch HostKeyCallback + }{ + { + name: "good user field for ConnMetadata", + user: "testuser", + }, + { + name: "empty user field for ConnMetadata", + user: "", + }, + { + name: "host key mismatch", + user: "testuser", + simulateHostKeyMismatch: func(hostname string, remote net.Addr, key PublicKey) error { + return fmt.Errorf("%w: %s", errHostKeyMismatch, bytes.TrimSpace(MarshalAuthorizedKey(key))) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverConf := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + } + serverConf.AddHostKey(testSigners["rsa"]) + go NewServerConn(c1, serverConf) + + clientConf := &ClientConfig{ + User: tt.user, + Auth: []AuthMethod{ + Password("testpw"), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if tt.simulateHostKeyMismatch != nil { + clientConf.HostKeyCallback = tt.simulateHostKeyMismatch + } + + clientConn, _, _, err := NewClientConn(c2, "", clientConf) + if err != nil { + if tt.simulateHostKeyMismatch != nil && errors.Is(err, errHostKeyMismatch) { + return + } + t.Fatal(err) + } + + if userGot := clientConn.User(); userGot != tt.user { + t.Errorf("got user %q; want user %q", userGot, tt.user) + } + }) + } +} + +func TestUnsupportedAlgorithm(t *testing.T) { + for _, tt := range []struct { + name string + config Config + wantError string + }{ + { + "unsupported KEX", + Config{ + KeyExchanges: []string{"unsupported"}, + }, + "no common algorithm", + }, + { + "unsupported and supported KEXs", + Config{ + KeyExchanges: []string{"unsupported", kexAlgoCurve25519SHA256}, + }, + "", + }, + { + "unsupported cipher", + Config{ + Ciphers: []string{"unsupported"}, + }, + "no common algorithm", + }, + { + "unsupported and supported ciphers", + Config{ + Ciphers: []string{"unsupported", chacha20Poly1305ID}, + }, + "", + }, + { + "unsupported MAC", + Config{ + MACs: []string{"unsupported"}, + // MAC is used for non AAED ciphers. + Ciphers: []string{"aes256-ctr"}, + }, + "no common algorithm", + }, + { + "unsupported and supported MACs", + Config{ + MACs: []string{"unsupported", "hmac-sha2-256-etm@openssh.com"}, + // MAC is used for non AAED ciphers. + Ciphers: []string{"aes256-ctr"}, + }, + "", + }, + } { + t.Run(tt.name, func(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverConf := &ServerConfig{ + Config: tt.config, + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + } + serverConf.AddHostKey(testSigners["rsa"]) + go NewServerConn(c1, serverConf) + + clientConf := &ClientConfig{ + User: "testuser", + Config: tt.config, + Auth: []AuthMethod{ + Password("testpw"), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + _, _, _, err = NewClientConn(c2, "", clientConf) + if err != nil { + if tt.wantError == "" || !strings.Contains(err.Error(), tt.wantError) { + t.Errorf("%s: got error %q, missing %q", tt.name, err.Error(), tt.wantError) + } + } else if tt.wantError != "" { + t.Errorf("%s: succeeded, but want error string %q", tt.name, tt.wantError) + } + }) + } +} diff --git a/tempfork/sshtest/ssh/common.go b/tempfork/sshtest/ssh/common.go new file mode 100644 index 000000000..7e9c2cbc6 --- /dev/null +++ b/tempfork/sshtest/ssh/common.go @@ -0,0 +1,476 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "math" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers lists ciphers we support but might not recommend. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", gcm256CipherID, + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", gcm256CipherID, + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, + kexAlgoDH1SHA1, +} + +// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden +// for the server half. +var serverForbiddenKexAlgos = map[string]struct{}{ + kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests + kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests +} + +// preferredKexAlgos specifies the default preference for key-exchange +// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm +// is disabled by default because it is a bit slower than the others. +var preferredKexAlgos = []string{ + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, +} + +// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported signature algorithms to their +// respective hashes needed for signing and verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoRSASHA256: crypto.SHA256, + KeyAlgoRSASHA512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + // KeyAlgoED25519 doesn't pre-hash. + KeyAlgoSKECDSA256: crypto.SHA256, + KeyAlgoSKED25519: crypto.SHA256, +} + +// algorithmsForKeyFormat returns the supported signature algorithms for a given +// public key format (PublicKey.Type), in order of preference. See RFC 8332, +// Section 2. See also the note in sendKexInit on backwards compatibility. +func algorithmsForKeyFormat(keyFormat string) []string { + switch keyFormat { + case KeyAlgoRSA: + return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} + case CertAlgoRSAv01: + return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} + default: + return []string{keyFormat} + } +} + +// isRSA returns whether algo is a supported RSA algorithm, including certificate +// algorithms. +func isRSA(algo string) bool { + algos := algorithmsForKeyFormat(KeyAlgoRSA) + return contains(algos, underlyingAlgo(algo)) +} + +func isRSACert(algo string) bool { + _, ok := certKeyAlgoNames[algo] + if !ok { + return false + } + return isRSA(algo) +} + +// supportedPubKeyAuthAlgos specifies the supported client public key +// authentication algorithms. Note that this doesn't include certificate types +// since those use the underlying algorithm. This list is sent to the client if +// it supports the server-sig-algs extension. Order is irrelevant. +var supportedPubKeyAuthAlgos = []string{ + KeyAlgoED25519, + KeyAlgoSKED25519, KeyAlgoSKECDSA256, + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, + KeyAlgoDSA, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +// directionAlgorithms records algorithm choices in one direction (either read or write) +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC 4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcm128CipherID, gcm256CipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC 4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +var aeadCiphers = map[string]bool{ + gcm128CipherID: true, + gcm256CipherID: true, + chacha20Poly1305ID: true, +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + stoc, ctos := &result.w, &result.r + if isClient { + ctos, stoc = stoc, ctos + } + + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + if !aeadCiphers[ctos.Cipher] { + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + } + + if !aeadCiphers[stoc.Cipher] { + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + } + + ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, a size suitable for the chosen cipher is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a default set + // of algorithms is used. Unsupported values are silently ignored. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = preferredCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // Ignore the cipher if we have no cipherModes definition. + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = preferredKexAlgos + } + var kexs []string + for _, k := range c.KeyExchanges { + if kexAlgoMap[k] != nil { + // Ignore the KEX if we have no kexAlgoMap definition. + kexs = append(kexs, k) + } + } + c.KeyExchanges = kexs + + if c.MACs == nil { + c.MACs = supportedMACs + } + var macs []string + for _, m := range c.MACs { + if macModes[m] != nil { + // Ignore the MAC if we have no macModes definition. + macs = append(macs, m) + } + } + c.MACs = macs + + if c.RekeyThreshold == 0 { + // cipher specific default + } else if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } else if c.RekeyThreshold >= math.MaxInt64 { + // Avoid weirdness if somebody uses -1 as a threshold. + c.RekeyThreshold = math.MaxInt64 + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. algo is the advertised +// algorithm, and may be a certificate type. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo string + PubKey []byte + }{ + sessionID, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/tempfork/sshtest/ssh/common_test.go b/tempfork/sshtest/ssh/common_test.go new file mode 100644 index 000000000..a7beee8e8 --- /dev/null +++ b/tempfork/sshtest/ssh/common_test.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "reflect" + "testing" +) + +func TestFindAgreedAlgorithms(t *testing.T) { + initKex := func(k *kexInitMsg) { + if k.KexAlgos == nil { + k.KexAlgos = []string{"kex1"} + } + if k.ServerHostKeyAlgos == nil { + k.ServerHostKeyAlgos = []string{"hostkey1"} + } + if k.CiphersClientServer == nil { + k.CiphersClientServer = []string{"cipher1"} + + } + if k.CiphersServerClient == nil { + k.CiphersServerClient = []string{"cipher1"} + + } + if k.MACsClientServer == nil { + k.MACsClientServer = []string{"mac1"} + + } + if k.MACsServerClient == nil { + k.MACsServerClient = []string{"mac1"} + + } + if k.CompressionClientServer == nil { + k.CompressionClientServer = []string{"compression1"} + + } + if k.CompressionServerClient == nil { + k.CompressionServerClient = []string{"compression1"} + + } + if k.LanguagesClientServer == nil { + k.LanguagesClientServer = []string{"language1"} + + } + if k.LanguagesServerClient == nil { + k.LanguagesServerClient = []string{"language1"} + + } + } + + initDirAlgs := func(a *directionAlgorithms) { + if a.Cipher == "" { + a.Cipher = "cipher1" + } + if a.MAC == "" { + a.MAC = "mac1" + } + if a.Compression == "" { + a.Compression = "compression1" + } + } + + initAlgs := func(a *algorithms) { + if a.kex == "" { + a.kex = "kex1" + } + if a.hostKey == "" { + a.hostKey = "hostkey1" + } + initDirAlgs(&a.r) + initDirAlgs(&a.w) + } + + type testcase struct { + name string + clientIn, serverIn kexInitMsg + wantClient, wantServer algorithms + wantErr bool + } + + cases := []testcase{ + { + name: "standard", + }, + + { + name: "no common hostkey", + serverIn: kexInitMsg{ + ServerHostKeyAlgos: []string{"hostkey2"}, + }, + wantErr: true, + }, + + { + name: "no common kex", + serverIn: kexInitMsg{ + KexAlgos: []string{"kex2"}, + }, + wantErr: true, + }, + + { + name: "no common cipher", + serverIn: kexInitMsg{ + CiphersClientServer: []string{"cipher2"}, + }, + wantErr: true, + }, + + { + name: "client decides cipher", + serverIn: kexInitMsg{ + CiphersClientServer: []string{"cipher1", "cipher2"}, + CiphersServerClient: []string{"cipher2", "cipher3"}, + }, + clientIn: kexInitMsg{ + CiphersClientServer: []string{"cipher2", "cipher1"}, + CiphersServerClient: []string{"cipher3", "cipher2"}, + }, + wantClient: algorithms{ + r: directionAlgorithms{ + Cipher: "cipher3", + }, + w: directionAlgorithms{ + Cipher: "cipher2", + }, + }, + wantServer: algorithms{ + w: directionAlgorithms{ + Cipher: "cipher3", + }, + r: directionAlgorithms{ + Cipher: "cipher2", + }, + }, + }, + + // TODO(hanwen): fix and add tests for AEAD ignoring + // the MACs field + } + + for i := range cases { + initKex(&cases[i].clientIn) + initKex(&cases[i].serverIn) + initAlgs(&cases[i].wantClient) + initAlgs(&cases[i].wantServer) + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + serverAlgs, serverErr := findAgreedAlgorithms(false, &c.clientIn, &c.serverIn) + clientAlgs, clientErr := findAgreedAlgorithms(true, &c.clientIn, &c.serverIn) + + serverHasErr := serverErr != nil + clientHasErr := clientErr != nil + if c.wantErr != serverHasErr || c.wantErr != clientHasErr { + t.Fatalf("got client/server error (%v, %v), want hasError %v", + clientErr, serverErr, c.wantErr) + + } + if c.wantErr { + return + } + + if !reflect.DeepEqual(serverAlgs, &c.wantServer) { + t.Errorf("server: got algs %#v, want %#v", serverAlgs, &c.wantServer) + } + if !reflect.DeepEqual(clientAlgs, &c.wantClient) { + t.Errorf("server: got algs %#v, want %#v", clientAlgs, &c.wantClient) + } + }) + } +} diff --git a/tempfork/sshtest/ssh/connection.go b/tempfork/sshtest/ssh/connection.go new file mode 100644 index 000000000..8f345ee92 --- /dev/null +++ b/tempfork/sshtest/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the session hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC 4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshConn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/tempfork/sshtest/ssh/doc.go b/tempfork/sshtest/ssh/doc.go new file mode 100644 index 000000000..f5d352fe3 --- /dev/null +++ b/tempfork/sshtest/ssh/doc.go @@ -0,0 +1,23 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + + [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + +This package does not fall under the stability promise of the Go language itself, +so its API may be changed when pressing needs arise. +*/ +package ssh diff --git a/tempfork/sshtest/ssh/example_test.go b/tempfork/sshtest/ssh/example_test.go new file mode 100644 index 000000000..97b3b6aba --- /dev/null +++ b/tempfork/sshtest/ssh/example_test.go @@ -0,0 +1,400 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh_test + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/rsa" + "fmt" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/terminal" +) + +func ExampleNewServerConn() { + // Public key authentication is done by comparing + // the public key of a received connection + // with the entries in the authorized_keys file. + authorizedKeysBytes, err := os.ReadFile("authorized_keys") + if err != nil { + log.Fatalf("Failed to load authorized_keys, err: %v", err) + } + + authorizedKeysMap := map[string]bool{} + for len(authorizedKeysBytes) > 0 { + pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes) + if err != nil { + log.Fatal(err) + } + + authorizedKeysMap[string(pubKey.Marshal())] = true + authorizedKeysBytes = rest + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + // Remove to disable password auth. + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + + // Remove to disable public key auth. + PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { + if authorizedKeysMap[string(pubKey.Marshal())] { + return &ssh.Permissions{ + // Record the public key used for authentication. + Extensions: map[string]string{ + "pubkey-fp": ssh.FingerprintSHA256(pubKey), + }, + }, nil + } + return nil, fmt.Errorf("unknown public key for %q", c.User()) + }, + } + + privateBytes, err := os.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key: ", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key: ", err) + } + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection: ", err) + } + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection: ", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + conn, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake: ", err) + } + log.Printf("logged in with key %s", conn.Permissions.Extensions["pubkey-fp"]) + + var wg sync.WaitGroup + defer wg.Wait() + + // The incoming Request channel must be serviced. + wg.Add(1) + go func() { + ssh.DiscardRequests(reqs) + wg.Done() + }() + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of a shell, the type is + // "session" and ServerShell may be used to present a simple + // terminal interface. + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatalf("Could not accept channel: %v", err) + } + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "shell" request. + wg.Add(1) + go func(in <-chan *ssh.Request) { + for req := range in { + req.Reply(req.Type == "shell", nil) + } + wg.Done() + }(requests) + + term := terminal.NewTerminal(channel, "> ") + + wg.Add(1) + go func() { + defer func() { + channel.Close() + wg.Done() + }() + for { + line, err := term.ReadLine() + if err != nil { + break + } + fmt.Println(line) + } + }() + } +} + +func ExampleServerConfig_AddHostKey() { + // Minimal ServerConfig supporting only password authentication. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := os.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key: ", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key: ", err) + } + // Restrict host key algorithms to disable ssh-rsa. + signer, err := ssh.NewSignerWithAlgorithms(private.(ssh.AlgorithmSigner), []string{ssh.KeyAlgoRSASHA256, ssh.KeyAlgoRSASHA512}) + if err != nil { + log.Fatal("Failed to create private key with restricted algorithms: ", err) + } + config.AddHostKey(signer) +} + +func ExampleClientConfig_HostKeyCallback() { + // Every client must provide a host key check. Here is a + // simple-minded parse of OpenSSH's known_hosts file + host := "hostname" + file, err := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts")) + if err != nil { + log.Fatal(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + var hostKey ssh.PublicKey + for scanner.Scan() { + fields := strings.Split(scanner.Text(), " ") + if len(fields) != 3 { + continue + } + if strings.Contains(fields[0], host) { + var err error + hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes()) + if err != nil { + log.Fatalf("error parsing %q: %v", fields[2], err) + } + break + } + } + + if hostKey == nil { + log.Fatalf("no hostkey for %s", host) + } + + config := ssh.ClientConfig{ + User: os.Getenv("USER"), + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + + _, err = ssh.Dial("tcp", host+":22", &config) + log.Println(err) +} + +func ExampleDial() { + var hostKey ssh.PublicKey + // An SSH client is represented with a ClientConn. + // + // To authenticate with the remote server you must pass at least one + // implementation of AuthMethod via the Auth field in ClientConfig, + // and provide a HostKeyCallback. + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("yourpassword"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + client, err := ssh.Dial("tcp", "yourserver.com:22", config) + if err != nil { + log.Fatal("Failed to dial: ", err) + } + defer client.Close() + + // Each ClientConn can support multiple interactive sessions, + // represented by a Session. + session, err := client.NewSession() + if err != nil { + log.Fatal("Failed to create session: ", err) + } + defer session.Close() + + // Once a Session is created, you can execute a single command on + // the remote side using the Run method. + var b bytes.Buffer + session.Stdout = &b + if err := session.Run("/usr/bin/whoami"); err != nil { + log.Fatal("Failed to run: " + err.Error()) + } + fmt.Println(b.String()) +} + +func ExamplePublicKeys() { + var hostKey ssh.PublicKey + // A public key may be used to authenticate against the remote + // server by using an unencrypted PEM-encoded private key file. + // + // If you have an encrypted private key, the crypto/x509 package + // can be used to decrypt it. + key, err := os.ReadFile("/home/user/.ssh/id_rsa") + if err != nil { + log.Fatalf("unable to read private key: %v", err) + } + + // Create the Signer for this private key. + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + log.Fatalf("unable to parse private key: %v", err) + } + + config := &ssh.ClientConfig{ + User: "user", + Auth: []ssh.AuthMethod{ + // Use the PublicKeys method for remote authentication. + ssh.PublicKeys(signer), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + + // Connect to the remote server and perform the SSH handshake. + client, err := ssh.Dial("tcp", "host.com:22", config) + if err != nil { + log.Fatalf("unable to connect: %v", err) + } + defer client.Close() +} + +func ExampleClient_Listen() { + var hostKey ssh.PublicKey + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + // Dial your ssh server. + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + + // Request the remote side to open port 8080 on all interfaces. + l, err := conn.Listen("tcp", "0.0.0.0:8080") + if err != nil { + log.Fatal("unable to register tcp forward: ", err) + } + defer l.Close() + + // Serve HTTP with your SSH server acting as a reverse proxy. + http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + fmt.Fprintf(resp, "Hello world!\n") + })) +} + +func ExampleSession_RequestPty() { + var hostKey ssh.PublicKey + // Create client config + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + // Connect to ssh server + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + // Create a session + session, err := conn.NewSession() + if err != nil { + log.Fatal("unable to create session: ", err) + } + defer session.Close() + // Set up terminal modes + modes := ssh.TerminalModes{ + ssh.ECHO: 0, // disable echoing + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + // Request pseudo terminal + if err := session.RequestPty("xterm", 40, 80, modes); err != nil { + log.Fatal("request for pseudo terminal failed: ", err) + } + // Start remote shell + if err := session.Shell(); err != nil { + log.Fatal("failed to start shell: ", err) + } +} + +func ExampleCertificate_SignCert() { + // Sign a certificate with a specific algorithm. + privateKey, err := rsa.GenerateKey(rand.Reader, 3072) + if err != nil { + log.Fatal("unable to generate RSA key: ", err) + } + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + log.Fatal("unable to get RSA public key: ", err) + } + caKey, err := rsa.GenerateKey(rand.Reader, 3072) + if err != nil { + log.Fatal("unable to generate CA key: ", err) + } + signer, err := ssh.NewSignerFromKey(caKey) + if err != nil { + log.Fatal("unable to generate signer from key: ", err) + } + mas, err := ssh.NewSignerWithAlgorithms(signer.(ssh.AlgorithmSigner), []string{ssh.KeyAlgoRSASHA256}) + if err != nil { + log.Fatal("unable to create signer with algorithms: ", err) + } + certificate := ssh.Certificate{ + Key: publicKey, + CertType: ssh.UserCert, + } + if err := certificate.SignCert(rand.Reader, mas); err != nil { + log.Fatal("unable to sign certificate: ", err) + } + // Save the public key to a file and check that rsa-sha-256 is used for + // signing: + // ssh-keygen -L -f + fmt.Println(string(ssh.MarshalAuthorizedKey(&certificate))) +} diff --git a/tempfork/sshtest/ssh/handshake.go b/tempfork/sshtest/ssh/handshake.go new file mode 100644 index 000000000..fef687db0 --- /dev/null +++ b/tempfork/sshtest/ssh/handshake.go @@ -0,0 +1,816 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "strings" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error + + // setStrictMode sets the strict KEX mode, notably triggering + // sequence number resets on sending or receiving msgNewKeys. + // If the sequence number is already > 1 when setStrictMode + // is called, an error is returned. + setStrictMode() error + + // setInitialKEXDone indicates to the transport that the initial key exchange + // was completed + setInitialKEXDone() +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // publicKeyAuthAlgorithms is non-empty if we are the server. In that case, + // it contains the supported client public key authentication algorithms. + publicKeyAuthAlgorithms []string + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + writePacketsLeft uint32 + writeBytesLeft int64 + userAuthComplete bool // whether the user authentication phase is complete + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + kexLoopDone chan struct{} // closed (with writeError non-nil) when kexLoop exits + + // data for host key checking + hostKeyCallback HostKeyCallback + dialAddress string + remoteAddr net.Addr + + // bannerCallback is non-empty if we are the client and it has been set in + // ClientConfig. In that case it is called during the user authentication + // dance to handle a custom server's message. + bannerCallback BannerCallback + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + // Counters exclusively owned by readLoop. + readPacketsLeft uint32 + readBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte + + // strictMode indicates if the other side of the handshake indicated + // that we should be following the strict KEX protocol restrictions. + strictMode bool +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex), + kexLoopDone: make(chan struct{}), + + config: config, + } + t.resetReadThresholds() + t.resetWriteThresholds() + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + t.bannerCallback = config.BannerCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + t.publicKeyAuthAlgorithms = config.PublicKeyAuthAlgorithms + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + // If this is the first kex, and strict KEX mode is enabled, + // we don't ignore any messages, as they may be used to manipulate + // the packet sequence numbers. + if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) resetWriteThresholds() { + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } else { + t.writeBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + + t.resetWriteThresholds() + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // Unblock reader. + t.conn.Close() + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + for request := range t.startKex { + request.done <- t.getWriteError() + } + + // Mark that the loop is done so that Close can return. + close(t.kexLoopDone) +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) resetReadThresholds() { + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } else { + t.readBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.resetReadThresholds() + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +const ( + kexStrictClient = "kex-strict-c-v00@openssh.com" + kexStrictServer = "kex-strict-s-v00@openssh.com" +) + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, + // and possibly to add the ext-info extension algorithm. Since the slice may be the + // user owned KeyExchanges, we create our own slice in order to avoid using user + // owned memory by mistake. + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + + isServer := len(t.hostKeys) > 0 + if isServer { + for _, k := range t.hostKeys { + // If k is a MultiAlgorithmSigner, we restrict the signature + // algorithms. If k is a AlgorithmSigner, presume it supports all + // signature algorithms associated with the key format. If k is not + // an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign + // can't pick a different default). + keyFormat := k.PublicKey().Type() + + switch s := k.(type) { + case MultiAlgorithmSigner: + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(s.Algorithms(), underlyingAlgo(algo)) { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + } + } + case AlgorithmSigner: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) + default: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) + } + } + + if t.sessionID == nil { + msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. + // + // We also send the strict KEX mode extension algorithm, in order to opt + // into the strict KEX mode. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) + } + + } + + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") + +func (t *handshakeTransport) writePacket(p []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + case msgUserAuthBanner: + if t.userAuthComplete { + return errSendBannerPhase + } + case msgUserAuthSuccess: + t.userAuthComplete = true + } + + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + // Close the connection. This should cause the readLoop goroutine to wake up + // and close t.startKex, which will shut down kexLoop if running. + err := t.conn.Close() + + // Wait for the kexLoop goroutine to complete. + // At that point we know that the readLoop goroutine is complete too, + // because kexLoop itself waits for readLoop to close the startKex channel. + <-t.kexLoopDone + + return err +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + isClient := len(t.hostKeys) == 0 + if isClient { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) + if err != nil { + return err + } + + if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + t.strictMode = true + if err := t.conn.setStrictMode(); err != nil { + return err + } + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, &magics) + } else { + result, err = t.client(kex, &magics) + } + + if err != nil { + return err + } + + firstKeyExchange := t.sessionID == nil + if firstKeyExchange { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { + return err + } + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + + // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO + // message with the server-sig-algs extension if the client supports it. See + // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. + if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { + supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",") + extInfo := &extInfoMsg{ + NumExtensions: 2, + Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1), + } + extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) + extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) + extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) + extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) + extInfo.Payload = appendInt(extInfo.Payload, len("ping@openssh.com")) + extInfo.Payload = append(extInfo.Payload, "ping@openssh.com"...) + extInfo.Payload = appendInt(extInfo.Payload, 1) + extInfo.Payload = append(extInfo.Payload, "0"...) + if err := t.conn.writePacket(Marshal(extInfo)); err != nil { + return err + } + } + + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + if firstKeyExchange { + // Indicates to the transport that the first key exchange is completed + // after receiving SSH_MSG_NEWKEYS. + t.conn.setInitialKEXDone() + } + + return nil +} + +// algorithmSignerWrapper is an AlgorithmSigner that only supports the default +// key format algorithm. +// +// This is technically a violation of the AlgorithmSigner interface, but it +// should be unreachable given where we use this. Anyway, at least it returns an +// error instead of panicing or producing an incorrect signature. +type algorithmSignerWrapper struct { + Signer +} + +func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != underlyingAlgo(a.PublicKey().Type()) { + return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") + } + return a.Sign(rand, data) +} + +func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { + for _, k := range hostKeys { + if s, ok := k.(MultiAlgorithmSigner); ok { + if !contains(s.Algorithms(), underlyingAlgo(algo)) { + continue + } + } + + if algo == k.PublicKey().Type() { + return algorithmSignerWrapper{k} + } + + k, ok := k.(AlgorithmSigner) + if !ok { + continue + } + for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { + if algo == a { + return k + } + } + } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + if hostKey == nil { + return nil, errors.New("ssh: internal error: negotiated unsupported signature type") + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { + return nil, err + } + + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/tempfork/sshtest/ssh/handshake_test.go b/tempfork/sshtest/ssh/handshake_test.go new file mode 100644 index 000000000..2bc607b64 --- /dev/null +++ b/tempfork/sshtest/ssh/handshake_test.go @@ -0,0 +1,1021 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "io" + "net" + "reflect" + "runtime" + "strings" + "sync" + "testing" +) + +type testChecker struct { + calls []string +} + +func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + if dialAddr == "bad" { + return fmt.Errorf("dialAddr is bad") + } + + if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil { + return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr) + } + + t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal())) + + return nil +} + +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and +// therefore is buffered (net.Pipe deadlocks if both sides start with +// a write.) +func netPipe() (net.Conn, net.Conn, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + listener, err = net.Listen("tcp", "[::1]:0") + if err != nil { + return nil, nil, err + } + } + defer listener.Close() + c1, err := net.Dial("tcp", listener.Addr().String()) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1, c2, nil +} + +// noiseTransport inserts ignore messages to check that the read loop +// and the key exchange filters out these messages. +type noiseTransport struct { + keyingTransport +} + +func (t *noiseTransport) writePacket(p []byte) error { + ignore := []byte{msgIgnore} + if err := t.keyingTransport.writePacket(ignore); err != nil { + return err + } + debug := []byte{msgDebug, 1, 2, 3} + if err := t.keyingTransport.writePacket(debug); err != nil { + return err + } + + return t.keyingTransport.writePacket(p) +} + +func addNoiseTransport(t keyingTransport) keyingTransport { + return &noiseTransport{t} +} + +// handshakePair creates two handshakeTransports connected with each +// other. If the noise argument is true, both transports will try to +// confuse the other side by sending ignore and debug messages. +func handshakePair(clientConf *ClientConfig, addr string, noise bool) (client *handshakeTransport, server *handshakeTransport, err error) { + a, b, err := netPipe() + if err != nil { + return nil, nil, err + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + trS = newTransport(b, rand.Reader, false) + if noise { + trC = addNoiseTransport(trC) + trS = addNoiseTransport(trS) + } + clientConf.SetDefaults() + + v := []byte("version") + client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server = newServerTransport(trS, v, v, serverConf) + + if err := server.waitSession(); err != nil { + return nil, nil, fmt.Errorf("server.waitSession: %v", err) + } + if err := client.waitSession(); err != nil { + return nil, nil, fmt.Errorf("client.waitSession: %v", err) + } + + return client, server, nil +} + +func TestHandshakeBasic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + checker := &syncChecker{ + waitCall: make(chan int, 10), + called: make(chan int, 10), + } + + checker.waitCall <- 1 + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + // Let first kex complete normally. + <-checker.called + + clientDone := make(chan int, 0) + gotHalf := make(chan int, 0) + const N = 20 + errorCh := make(chan error, 1) + + go func() { + defer close(clientDone) + // Client writes a bunch of stuff, and does a key + // change in the middle. This should not confuse the + // handshake in progress. We do this twice, so we test + // that the packet buffer is reset correctly. + for i := 0; i < N; i++ { + p := []byte{msgRequestSuccess, byte(i)} + if err := trC.writePacket(p); err != nil { + errorCh <- err + trC.Close() + return + } + if (i % 10) == 5 { + <-gotHalf + // halfway through, we request a key change. + trC.requestKeyExchange() + + // Wait until we can be sure the key + // change has really started before we + // write more. + <-checker.called + } + if (i % 10) == 7 { + // write some packets until the kex + // completes, to test buffering of + // packets. + checker.waitCall <- 1 + } + } + errorCh <- nil + }() + + // Server checks that client messages come in cleanly + i := 0 + for ; i < N; i++ { + p, err := trS.readPacket() + if err != nil && err != io.EOF { + t.Fatalf("server error: %v", err) + } + if (i % 10) == 5 { + gotHalf <- 1 + } + + want := []byte{msgRequestSuccess, byte(i)} + if bytes.Compare(p, want) != 0 { + t.Errorf("message %d: got %v, want %v", i, p, want) + } + } + <-clientDone + if err := <-errorCh; err != nil { + t.Fatalf("sendPacket: %v", err) + } + if i != N { + t.Errorf("received %d messages, want 10.", i) + } + + close(checker.called) + if _, ok := <-checker.called; ok { + // If all went well, we registered exactly 2 key changes: one + // that establishes the session, and one that we requested + // additionally. + t.Fatalf("got another host key checks after 2 handshakes") + } +} + +func TestForceFirstKex(t *testing.T) { + // like handshakePair, but must access the keyingTransport. + checker := &testChecker{} + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + + // This is the disallowed packet: + trC.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})) + + // Rest of the setup. + trS = newTransport(b, rand.Reader, false) + clientConf.SetDefaults() + + v := []byte("version") + client := newClientTransport(trC, v, v, clientConf, "addr", a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server := newServerTransport(trS, v, v, serverConf) + + defer client.Close() + defer server.Close() + + // We setup the initial key exchange, but the remote side + // tries to send serviceRequestMsg in cleartext, which is + // disallowed. + + if err := server.waitSession(); err == nil { + t.Errorf("server first kex init should reject unexpected packet") + } +} + +func TestHandshakeAutoRekeyWrite(t *testing.T) { + checker := &syncChecker{ + called: make(chan int, 10), + waitCall: nil, + } + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + clientConf.RekeyThreshold = 500 + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + input := make([]byte, 251) + input[0] = msgRequestSuccess + + done := make(chan int, 1) + const numPacket = 5 + go func() { + defer close(done) + j := 0 + for ; j < numPacket; j++ { + if p, err := trS.readPacket(); err != nil { + break + } else if !bytes.Equal(input, p) { + t.Errorf("got packet type %d, want %d", p[0], input[0]) + } + } + + if j != numPacket { + t.Errorf("got %d, want 5 messages", j) + } + }() + + <-checker.called + + for i := 0; i < numPacket; i++ { + p := make([]byte, len(input)) + copy(p, input) + if err := trC.writePacket(p); err != nil { + t.Errorf("writePacket: %v", err) + } + if i == 2 { + // Make sure the kex is in progress. + <-checker.called + } + + } + <-done +} + +type syncChecker struct { + waitCall chan int + called chan int +} + +func (c *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + c.called <- 1 + if c.waitCall != nil { + <-c.waitCall + } + return nil +} + +func TestHandshakeAutoRekeyRead(t *testing.T) { + sync := &syncChecker{ + called: make(chan int, 2), + waitCall: nil, + } + clientConf := &ClientConfig{ + HostKeyCallback: sync.Check, + } + clientConf.RekeyThreshold = 500 + + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + packet := make([]byte, 501) + packet[0] = msgRequestSuccess + if err := trS.writePacket(packet); err != nil { + t.Fatalf("writePacket: %v", err) + } + + // While we read out the packet, a key change will be + // initiated. + errorCh := make(chan error, 1) + go func() { + _, err := trC.readPacket() + errorCh <- err + }() + + if err := <-errorCh; err != nil { + t.Fatalf("readPacket(client): %v", err) + } + + <-sync.called +} + +// errorKeyingTransport generates errors after a given number of +// read/write operations. +type errorKeyingTransport struct { + packetConn + readLeft, writeLeft int +} + +func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error { + return nil +} + +func (n *errorKeyingTransport) getSessionID() []byte { + return nil +} + +func (n *errorKeyingTransport) writePacket(packet []byte) error { + if n.writeLeft == 0 { + n.Close() + return errors.New("barf") + } + + n.writeLeft-- + return n.packetConn.writePacket(packet) +} + +func (n *errorKeyingTransport) readPacket() ([]byte, error) { + if n.readLeft == 0 { + n.Close() + return nil, errors.New("barf") + } + + n.readLeft-- + return n.packetConn.readPacket() +} + +func (n *errorKeyingTransport) setStrictMode() error { return nil } + +func (n *errorKeyingTransport) setInitialKEXDone() {} + +func TestHandshakeErrorHandlingRead(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1, false) + } +} + +func TestHandshakeErrorHandlingWrite(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i, false) + } +} + +func TestHandshakeErrorHandlingReadCoupled(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1, true) + } +} + +func TestHandshakeErrorHandlingWriteCoupled(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i, true) + } +} + +// testHandshakeErrorHandlingN runs handshakes, injecting errors. If +// handshakeTransport deadlocks, the go runtime will detect it and +// panic. +func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int, coupled bool) { + if (runtime.GOOS == "js" || runtime.GOOS == "wasip1") && runtime.GOARCH == "wasm" { + t.Skipf("skipping on %s/wasm; see golang.org/issue/32840", runtime.GOOS) + } + msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)}) + + a, b := memPipe() + defer a.Close() + defer b.Close() + + key := testSigners["ecdsa"] + serverConf := Config{RekeyThreshold: minRekeyThreshold} + serverConf.SetDefaults() + serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'}) + serverConn.hostKeys = []Signer{key} + go serverConn.readLoop() + go serverConn.kexLoop() + + clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold} + clientConf.SetDefaults() + clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'}) + clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()} + clientConn.hostKeyCallback = InsecureIgnoreHostKey() + go clientConn.readLoop() + go clientConn.kexLoop() + + var wg sync.WaitGroup + + for _, hs := range []packetConn{serverConn, clientConn} { + if !coupled { + wg.Add(2) + go func(c packetConn) { + for i := 0; ; i++ { + str := fmt.Sprintf("%08x", i) + strings.Repeat("x", int(minRekeyThreshold)/4-8) + err := c.writePacket(Marshal(&serviceRequestMsg{str})) + if err != nil { + break + } + } + wg.Done() + c.Close() + }(hs) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + } + wg.Done() + }(hs) + } else { + wg.Add(1) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + if err := c.writePacket(msg); err != nil { + break + } + + } + wg.Done() + }(hs) + } + } + wg.Wait() +} + +func TestDisconnect(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + errMsg := &disconnectMsg{ + Reason: 42, + Message: "such is life", + } + trC.writePacket(Marshal(errMsg)) + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + + packet, err := trS.readPacket() + if err != nil { + t.Fatalf("readPacket 1: %v", err) + } + if packet[0] != msgRequestSuccess { + t.Errorf("got packet %v, want packet type %d", packet, msgRequestSuccess) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 2 succeeded") + } else if !reflect.DeepEqual(err, errMsg) { + t.Errorf("got error %#v, want %#v", err, errMsg) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 3 succeeded") + } +} + +func TestHandshakeRekeyDefault(t *testing.T) { + clientConf := &ClientConfig{ + Config: Config{ + Ciphers: []string{"aes128-ctr"}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + trC.Close() + + rgb := (1024 + trC.readBytesLeft) >> 30 + wgb := (1024 + trC.writeBytesLeft) >> 30 + + if rgb != 64 { + t.Errorf("got rekey after %dG read, want 64G", rgb) + } + if wgb != 64 { + t.Errorf("got rekey after %dG write, want 64G", wgb) + } +} + +func TestHandshakeAEADCipherNoMAC(t *testing.T) { + for _, cipher := range []string{chacha20Poly1305ID, gcm128CipherID} { + checker := &syncChecker{ + called: make(chan int, 1), + } + clientConf := &ClientConfig{ + Config: Config{ + Ciphers: []string{cipher}, + MACs: []string{}, + }, + HostKeyCallback: checker.Check, + } + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + <-checker.called + } +} + +// TestNoSHA2Support tests a host key Signer that is not an AlgorithmSigner and +// therefore can't do SHA-2 signatures. Ensures the server does not advertise +// support for them in this case. +func TestNoSHA2Support(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverConf := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + } + serverConf.AddHostKey(&legacyRSASigner{testSigners["rsa"]}) + go func() { + _, _, _, err := NewServerConn(c1, serverConf) + if err != nil { + t.Error(err) + } + }() + + clientConf := &ClientConfig{ + User: "test", + Auth: []AuthMethod{Password("testpw")}, + HostKeyCallback: FixedHostKey(testSigners["rsa"].PublicKey()), + } + + if _, _, _, err := NewClientConn(c2, "", clientConf); err != nil { + t.Fatal(err) + } +} + +func TestMultiAlgoSignerHandshake(t *testing.T) { + algorithmSigner, ok := testSigners["rsa"].(AlgorithmSigner) + if !ok { + t.Fatal("rsa test signer does not implement the AlgorithmSigner interface") + } + multiAlgoSigner, err := NewSignerWithAlgorithms(algorithmSigner, []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}) + if err != nil { + t.Fatalf("unable to create multi algorithm signer: %v", err) + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverConf := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + } + serverConf.AddHostKey(multiAlgoSigner) + go NewServerConn(c1, serverConf) + + clientConf := &ClientConfig{ + User: "test", + Auth: []AuthMethod{Password("testpw")}, + HostKeyCallback: FixedHostKey(testSigners["rsa"].PublicKey()), + HostKeyAlgorithms: []string{KeyAlgoRSASHA512}, + } + + if _, _, _, err := NewClientConn(c2, "", clientConf); err != nil { + t.Fatal(err) + } +} + +func TestMultiAlgoSignerNoCommonHostKeyAlgo(t *testing.T) { + algorithmSigner, ok := testSigners["rsa"].(AlgorithmSigner) + if !ok { + t.Fatal("rsa test signer does not implement the AlgorithmSigner interface") + } + multiAlgoSigner, err := NewSignerWithAlgorithms(algorithmSigner, []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}) + if err != nil { + t.Fatalf("unable to create multi algorithm signer: %v", err) + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + // ssh-rsa is disabled server side + serverConf := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + } + serverConf.AddHostKey(multiAlgoSigner) + go NewServerConn(c1, serverConf) + + // the client only supports ssh-rsa + clientConf := &ClientConfig{ + User: "test", + Auth: []AuthMethod{Password("testpw")}, + HostKeyCallback: FixedHostKey(testSigners["rsa"].PublicKey()), + HostKeyAlgorithms: []string{KeyAlgoRSA}, + } + + _, _, _, err = NewClientConn(c2, "", clientConf) + if err == nil { + t.Fatal("succeeded connecting with no common hostkey algorithm") + } +} + +func TestPickIncompatibleHostKeyAlgo(t *testing.T) { + algorithmSigner, ok := testSigners["rsa"].(AlgorithmSigner) + if !ok { + t.Fatal("rsa test signer does not implement the AlgorithmSigner interface") + } + multiAlgoSigner, err := NewSignerWithAlgorithms(algorithmSigner, []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}) + if err != nil { + t.Fatalf("unable to create multi algorithm signer: %v", err) + } + signer := pickHostKey([]Signer{multiAlgoSigner}, KeyAlgoRSA) + if signer != nil { + t.Fatal("incompatible signer returned") + } +} + +func TestStrictKEXResetSeqFirstKEX(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + checker := &syncChecker{ + waitCall: make(chan int, 10), + called: make(chan int, 10), + } + + checker.waitCall <- 1 + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + <-checker.called + + t.Cleanup(func() { + trC.Close() + trS.Close() + }) + + // Throw away the msgExtInfo packet sent during the handshake by the server + _, err = trC.readPacket() + if err != nil { + t.Fatalf("readPacket failed: %s", err) + } + + // close the handshake transports before checking the sequence number to + // avoid races. + trC.Close() + trS.Close() + + // check that the sequence number counters. We reset after msgNewKeys, but + // then the server immediately writes msgExtInfo, and we close the + // transports so we expect read 2, write 0 on the client and read 1, write 1 + // on the server. + if trC.conn.(*transport).reader.seqNum != 2 || trC.conn.(*transport).writer.seqNum != 0 || + trS.conn.(*transport).reader.seqNum != 1 || trS.conn.(*transport).writer.seqNum != 1 { + t.Errorf( + "unexpected sequence counters:\nclient: reader %d (expected 2), writer %d (expected 0)\nserver: reader %d (expected 1), writer %d (expected 1)", + trC.conn.(*transport).reader.seqNum, + trC.conn.(*transport).writer.seqNum, + trS.conn.(*transport).reader.seqNum, + trS.conn.(*transport).writer.seqNum, + ) + } +} + +func TestStrictKEXResetSeqSuccessiveKEX(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + checker := &syncChecker{ + waitCall: make(chan int, 10), + called: make(chan int, 10), + } + + checker.waitCall <- 1 + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + <-checker.called + + t.Cleanup(func() { + trC.Close() + trS.Close() + }) + + // Throw away the msgExtInfo packet sent during the handshake by the server + _, err = trC.readPacket() + if err != nil { + t.Fatalf("readPacket failed: %s", err) + } + + // write and read five packets on either side to bump the sequence numbers + for i := 0; i < 5; i++ { + if err := trC.writePacket([]byte{msgRequestSuccess}); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + if _, err := trS.readPacket(); err != nil { + t.Fatalf("readPacket failed: %s", err) + } + if err := trS.writePacket([]byte{msgRequestSuccess}); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + if _, err := trC.readPacket(); err != nil { + t.Fatalf("readPacket failed: %s", err) + } + } + + // Request a key exchange, which should cause the sequence numbers to reset + checker.waitCall <- 1 + trC.requestKeyExchange() + <-checker.called + + // write a packet on the client, and then read it, to verify the key change has actually happened, since + // the HostKeyCallback is called _during_ the handshake, so isn't actually indicative of the handshake + // finishing. + dummyPacket := []byte{99} + if err := trS.writePacket(dummyPacket); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + if p, err := trC.readPacket(); err != nil { + t.Fatalf("readPacket failed: %s", err) + } else if !bytes.Equal(p, dummyPacket) { + t.Fatalf("unexpected packet: got %x, want %x", p, dummyPacket) + } + + // close the handshake transports before checking the sequence number to + // avoid races. + trC.Close() + trS.Close() + + if trC.conn.(*transport).reader.seqNum != 2 || trC.conn.(*transport).writer.seqNum != 0 || + trS.conn.(*transport).reader.seqNum != 1 || trS.conn.(*transport).writer.seqNum != 1 { + t.Errorf( + "unexpected sequence counters:\nclient: reader %d (expected 2), writer %d (expected 0)\nserver: reader %d (expected 1), writer %d (expected 1)", + trC.conn.(*transport).reader.seqNum, + trC.conn.(*transport).writer.seqNum, + trS.conn.(*transport).reader.seqNum, + trS.conn.(*transport).writer.seqNum, + ) + } +} + +func TestSeqNumIncrease(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + checker := &syncChecker{ + waitCall: make(chan int, 10), + called: make(chan int, 10), + } + + checker.waitCall <- 1 + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + <-checker.called + + t.Cleanup(func() { + trC.Close() + trS.Close() + }) + + // Throw away the msgExtInfo packet sent during the handshake by the server + _, err = trC.readPacket() + if err != nil { + t.Fatalf("readPacket failed: %s", err) + } + + // write and read five packets on either side to bump the sequence numbers + for i := 0; i < 5; i++ { + if err := trC.writePacket([]byte{msgRequestSuccess}); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + if _, err := trS.readPacket(); err != nil { + t.Fatalf("readPacket failed: %s", err) + } + if err := trS.writePacket([]byte{msgRequestSuccess}); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + if _, err := trC.readPacket(); err != nil { + t.Fatalf("readPacket failed: %s", err) + } + } + + // close the handshake transports before checking the sequence number to + // avoid races. + trC.Close() + trS.Close() + + if trC.conn.(*transport).reader.seqNum != 7 || trC.conn.(*transport).writer.seqNum != 5 || + trS.conn.(*transport).reader.seqNum != 6 || trS.conn.(*transport).writer.seqNum != 6 { + t.Errorf( + "unexpected sequence counters:\nclient: reader %d (expected 7), writer %d (expected 5)\nserver: reader %d (expected 6), writer %d (expected 6)", + trC.conn.(*transport).reader.seqNum, + trC.conn.(*transport).writer.seqNum, + trS.conn.(*transport).reader.seqNum, + trS.conn.(*transport).writer.seqNum, + ) + } +} + +func TestStrictKEXUnexpectedMsg(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + // Check that unexpected messages during the handshake cause failure + _, _, err := handshakePair(&ClientConfig{HostKeyCallback: func(hostname string, remote net.Addr, key PublicKey) error { return nil }}, "addr", true) + if err == nil { + t.Fatal("handshake should fail when there are unexpected messages during the handshake") + } + + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: func(hostname string, remote net.Addr, key PublicKey) error { return nil }}, "addr", false) + if err != nil { + t.Fatalf("handshake failed: %s", err) + } + + // Check that ignore/debug pacekts are still ignored outside of the handshake + if err := trC.writePacket([]byte{msgIgnore}); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + if err := trC.writePacket([]byte{msgDebug}); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + dummyPacket := []byte{99} + if err := trC.writePacket(dummyPacket); err != nil { + t.Fatalf("writePacket failed: %s", err) + } + + if p, err := trS.readPacket(); err != nil { + t.Fatalf("readPacket failed: %s", err) + } else if !bytes.Equal(p, dummyPacket) { + t.Fatalf("unexpected packet: got %x, want %x", p, dummyPacket) + } +} + +func TestStrictKEXMixed(t *testing.T) { + // Test that we still support a mixed connection, where one side sends kex-strict but the other + // side doesn't. + + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe failed: %s", err) + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + trS = newTransport(b, rand.Reader, false) + trS = addNoiseTransport(trS) + + clientConf := &ClientConfig{HostKeyCallback: func(hostname string, remote net.Addr, key PublicKey) error { return nil }} + clientConf.SetDefaults() + + v := []byte("version") + client := newClientTransport(trC, v, v, clientConf, "addr", a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + + transport := newHandshakeTransport(trS, &serverConf.Config, []byte("version"), []byte("version")) + transport.hostKeys = serverConf.hostKeys + transport.publicKeyAuthAlgorithms = serverConf.PublicKeyAuthAlgorithms + + readOneFailure := make(chan error, 1) + go func() { + if _, err := transport.readOnePacket(true); err != nil { + readOneFailure <- err + } + }() + + // Basically sendKexInit, but without the kex-strict extension algorithm + msg := &kexInitMsg{ + KexAlgos: transport.config.KeyExchanges, + CiphersClientServer: transport.config.Ciphers, + CiphersServerClient: transport.config.Ciphers, + MACsClientServer: transport.config.MACs, + MACsServerClient: transport.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + ServerHostKeyAlgos: []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA}, + } + packet := Marshal(msg) + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + if err := transport.pushPacket(packetCopy); err != nil { + t.Fatalf("pushPacket: %s", err) + } + transport.sentInitMsg = msg + transport.sentInitPacket = packet + + if err := transport.getWriteError(); err != nil { + t.Fatalf("getWriteError failed: %s", err) + } + var request *pendingKex + select { + case err = <-readOneFailure: + t.Fatalf("server readOnePacket failed: %s", err) + case request = <-transport.startKex: + break + } + + // We expect the following calls to fail if the side which does not support + // kex-strict sends unexpected/ignored packets during the handshake, even if + // the other side does support kex-strict. + + if err := transport.enterKeyExchange(request.otherInit); err != nil { + t.Fatalf("enterKeyExchange failed: %s", err) + } + if err := client.waitSession(); err != nil { + t.Fatalf("client.waitSession: %v", err) + } +} diff --git a/tempfork/sshtest/ssh/kex.go b/tempfork/sshtest/ssh/kex.go new file mode 100644 index 000000000..8a05f7990 --- /dev/null +++ b/tempfork/sshtest/ssh/kex.go @@ -0,0 +1,786 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" + + // For the following kex only the client half contains a production + // ready implementation. The server half only consists of a minimal + // implementation to satisfy the automated tests. + kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" + kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. algo is the negotiated algorithm, and may + // be a certificate type. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int + hashFunc crypto.Hash +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + ki, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := group.hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: group.hashFunc, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + ki, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := group.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H, algo) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: group.hashFunc, + }, err +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H, algo) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in + // RFC 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA1, + } + + // This are the groups called diffie-hellman-group14-sha1 and + // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, + // and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + group14 := &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA1, + } + kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA256, + } + + // This is the group called diffie-hellman-group16-sha512 in RFC + // 8268 and Oakley Group 16 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA512, + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} + kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} + kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} +} + +// curve25519sha256 implements the curve25519-sha256 (formerly known as +// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H, algo) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} + +// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and +// diffie-hellman-group-exchange-sha256 key agreement protocols, +// as described in RFC 4419 +type dhGEXSHA struct { + hashFunc crypto.Hash +} + +const ( + dhGroupExchangeMinimumBits = 2048 + dhGroupExchangePreferredBits = 2048 + dhGroupExchangeMaximumBits = 8192 +) + +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + // Send GexRequest + kexDHGexRequest := kexDHGexRequestMsg{ + MinBits: dhGroupExchangeMinimumBits, + PreferedBits: dhGroupExchangePreferredBits, + MaxBits: dhGroupExchangeMaximumBits, + } + if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { + return nil, err + } + + // Receive GexGroup + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var msg kexDHGexGroupMsg + if err = Unmarshal(packet, &msg); err != nil { + return nil, err + } + + // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits + if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) + } + + // Check if g is safe by verifying that 1 < g < p-1 + pMinusOne := new(big.Int).Sub(msg.P, bigOne) + if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { + return nil, fmt.Errorf("ssh: server provided gex g is not safe") + } + + // Send GexInit + pHalf := new(big.Int).Rsh(msg.P, 1) + x, err := rand.Int(randSource, pHalf) + if err != nil { + return nil, err + } + X := new(big.Int).Exp(msg.G, x, msg.P) + kexDHGexInit := kexDHGexInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { + return nil, err + } + + // Receive GexReply + packet, err = c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexReply kexDHGexReplyMsg + if err = Unmarshal(packet, &kexDHGexReply); err != nil { + return nil, err + } + + if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) + + // Check if k is safe by verifying that k > 1 and k < p - 1 + if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { + return nil, fmt.Errorf("ssh: derived k is not safe") + } + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, kexDHGexReply.HostKey) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, msg.P) + writeInt(h, msg.G) + writeInt(h, X) + writeInt(h, kexDHGexReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHGexReply.HostKey, + Signature: kexDHGexReply.Signature, + Hash: gex.hashFunc, + }, nil +} + +// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. +// +// This is a minimal implementation to satisfy the automated tests. +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + // Receive GexRequest + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHGexRequest kexDHGexRequestMsg + if err = Unmarshal(packet, &kexDHGexRequest); err != nil { + return + } + + // Send GexGroup + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + g := big.NewInt(2) + + msg := &kexDHGexGroupMsg{ + P: p, + G: g, + } + if err := c.writePacket(Marshal(msg)); err != nil { + return nil, err + } + + // Receive GexInit + packet, err = c.readPacket() + if err != nil { + return + } + var kexDHGexInit kexDHGexInitMsg + if err = Unmarshal(packet, &kexDHGexInit); err != nil { + return + } + + pHalf := new(big.Int).Rsh(p, 1) + + y, err := rand.Int(randSource, pHalf) + if err != nil { + return + } + Y := new(big.Int).Exp(g, y, p) + + pMinusOne := new(big.Int).Sub(p, bigOne) + if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) + + hostKeyBytes := priv.PublicKey().Marshal() + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, p) + writeInt(h, g) + writeInt(h, kexDHGexInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H, algo) + if err != nil { + return nil, err + } + + kexDHGexReply := kexDHGexReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHGexReply) + + err = c.writePacket(packet) + + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: gex.hashFunc, + }, err +} diff --git a/tempfork/sshtest/ssh/kex_test.go b/tempfork/sshtest/ssh/kex_test.go new file mode 100644 index 000000000..cb7f66a50 --- /dev/null +++ b/tempfork/sshtest/ssh/kex_test.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Key exchange tests. + +import ( + "crypto/rand" + "fmt" + "reflect" + "sync" + "testing" +) + +// Runs multiple key exchanges concurrent to detect potential data races with +// kex obtained from the global kexAlgoMap. +// This test needs to be executed using the race detector in order to detect +// race conditions. +func TestKexes(t *testing.T) { + type kexResultErr struct { + result *kexResult + err error + } + + for name, kex := range kexAlgoMap { + t.Run(name, func(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + a, b := memPipe() + + s := make(chan kexResultErr, 1) + c := make(chan kexResultErr, 1) + var magics handshakeMagics + go func() { + r, e := kex.Client(a, rand.Reader, &magics) + a.Close() + c <- kexResultErr{r, e} + }() + go func() { + r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"].(AlgorithmSigner), testSigners["ecdsa"].PublicKey().Type()) + b.Close() + s <- kexResultErr{r, e} + }() + + clientRes := <-c + serverRes := <-s + if clientRes.err != nil { + t.Errorf("client: %v", clientRes.err) + } + if serverRes.err != nil { + t.Errorf("server: %v", serverRes.err) + } + if !reflect.DeepEqual(clientRes.result, serverRes.result) { + t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result) + } + }() + } + wg.Wait() + }) + } +} + +func BenchmarkKexes(b *testing.B) { + type kexResultErr struct { + result *kexResult + err error + } + + for name, kex := range kexAlgoMap { + b.Run(name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + t1, t2 := memPipe() + + s := make(chan kexResultErr, 1) + c := make(chan kexResultErr, 1) + var magics handshakeMagics + + go func() { + r, e := kex.Client(t1, rand.Reader, &magics) + t1.Close() + c <- kexResultErr{r, e} + }() + go func() { + r, e := kex.Server(t2, rand.Reader, &magics, testSigners["ecdsa"].(AlgorithmSigner), testSigners["ecdsa"].PublicKey().Type()) + t2.Close() + s <- kexResultErr{r, e} + }() + + clientRes := <-c + serverRes := <-s + + if clientRes.err != nil { + panic(fmt.Sprintf("client: %v", clientRes.err)) + } + if serverRes.err != nil { + panic(fmt.Sprintf("server: %v", serverRes.err)) + } + } + }) + } +} diff --git a/tempfork/sshtest/ssh/keys.go b/tempfork/sshtest/ssh/keys.go new file mode 100644 index 000000000..4a3d769d9 --- /dev/null +++ b/tempfork/sshtest/ssh/keys.go @@ -0,0 +1,1626 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" +) + +// Public key algorithms names. These values can appear in PublicKey.Type, +// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner +// arguments. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" + KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not + // public key formats, so they can't appear as a PublicKey.Type. The + // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + KeyAlgoRSASHA256 = "rsa-sha2-256" + KeyAlgoRSASHA512 = "rsa-sha2-512" +) + +const ( + // Deprecated: use KeyAlgoRSA. + SigAlgoRSA = KeyAlgoRSA + // Deprecated: use KeyAlgoRSASHA256. + SigAlgoRSASHA2256 = KeyAlgoRSASHA256 + // Deprecated: use KeyAlgoRSASHA512. + SigAlgoRSASHA2512 = KeyAlgoRSASHA512 +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoSKECDSA256: + return parseSKECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case KeyAlgoSKED25519: + return parseSKEd25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: + cert, err := parseCert(in, certKeyAlgoNames[algo]) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKey parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// MarshalPrivateKey returns a PEM block with the private key serialized in the +// OpenSSH format. +func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler) +} + +// PublicKey represents a public key using an unspecified algorithm. +// +// Some PublicKeys provided by this package also implement CryptoPublicKey. +type PublicKey interface { + // Type returns the key format name, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, with the name + // prefix. To unmarshal the returned data, use the ParsePublicKey function. + Marshal() []byte + + // Verify that sig is a signature on the given data using this key. This + // method will hash the data appropriately first. sig.Format is allowed to + // be any signature algorithm compatible with the key type, the caller + // should check if it has more stringent requirements. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +// +// Some Signers provided by this package also implement MultiAlgorithmSigner. +type Signer interface { + // PublicKey returns the associated PublicKey. + PublicKey() PublicKey + + // Sign returns a signature for the given data. This method will hash the + // data appropriately first. The signature algorithm is expected to match + // the key format returned by the PublicKey.Type method (and not to be any + // alternative algorithm supported by the key format). + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +// An AlgorithmSigner is a Signer that also supports specifying an algorithm to +// use for signing. +// +// An AlgorithmSigner can't advertise the algorithms it supports, unless it also +// implements MultiAlgorithmSigner, so it should be prepared to be invoked with +// every algorithm supported by the public key format. +type AlgorithmSigner interface { + Signer + + // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired + // signing algorithm. Callers may pass an empty string for the algorithm in + // which case the AlgorithmSigner will use a default algorithm. This default + // doesn't currently control any behavior in this package. + SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) +} + +// MultiAlgorithmSigner is an AlgorithmSigner that also reports the algorithms +// supported by that signer. +type MultiAlgorithmSigner interface { + AlgorithmSigner + + // Algorithms returns the available algorithms in preference order. The list + // must not be empty, and it must not include certificate types. + Algorithms() []string +} + +// NewSignerWithAlgorithms returns a signer restricted to the specified +// algorithms. The algorithms must be set in preference order. The list must not +// be empty, and it must not include certificate types. An error is returned if +// the specified algorithms are incompatible with the public key type. +func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (MultiAlgorithmSigner, error) { + if len(algorithms) == 0 { + return nil, errors.New("ssh: please specify at least one valid signing algorithm") + } + var signerAlgos []string + supportedAlgos := algorithmsForKeyFormat(underlyingAlgo(signer.PublicKey().Type())) + if s, ok := signer.(*multiAlgorithmSigner); ok { + signerAlgos = s.Algorithms() + } else { + signerAlgos = supportedAlgos + } + + for _, algo := range algorithms { + if !contains(supportedAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", + algo, signer.PublicKey().Type()) + } + if !contains(signerAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) + } + } + return &multiAlgorithmSigner{ + AlgorithmSigner: signer, + supportedAlgorithms: algorithms, + }, nil +} + +type multiAlgorithmSigner struct { + AlgorithmSigner + supportedAlgorithms []string +} + +func (s *multiAlgorithmSigner) Algorithms() []string { + return s.supportedAlgorithms +} + +func (s *multiAlgorithmSigner) isAlgorithmSupported(algorithm string) bool { + if algorithm == "" { + algorithm = underlyingAlgo(s.PublicKey().Type()) + } + for _, algo := range s.supportedAlgorithms { + if algorithm == algo { + return true + } + } + return false +} + +func (s *multiAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if !s.isAlgorithmSupported(algorithm) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported: %v", algorithm, s.supportedAlgorithms) + } + return s.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + supportedAlgos := algorithmsForKeyFormat(r.Type()) + if !contains(supportedAlgos, sig.Format) { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + hash := hashFuncs[sig.Format] + h := hash.New() + h.Write(data) + digest := h.Sum(nil) + + // Signatures in PKCS1v15 must match the key's modulus in + // length. However with SSH, some signers provide RSA + // signatures which are missing the MSB 0's of the bignum + // represented. With ssh-rsa signatures, this is encouraged by + // the spec (even though e.g. OpenSSH will give the full + // length unconditionally). With rsa-sha2-* signatures, the + // verifier is allowed to support these, even though they are + // out of spec. See RFC 4253 Section 6.6 for ssh-rsa and RFC + // 8332 Section 3 for rsa-sha2-* details. + // + // In practice: + // * OpenSSH always allows "short" signatures: + // https://github.com/openssh/openssh-portable/blob/V_9_8_P1/ssh-rsa.c#L526 + // but always generates padded signatures: + // https://github.com/openssh/openssh-portable/blob/V_9_8_P1/ssh-rsa.c#L439 + // + // * PuTTY versions 0.81 and earlier will generate short + // signatures for all RSA signature variants. Note that + // PuTTY is embedded in other software, such as WinSCP and + // FileZilla. At the time of writing, a patch has been + // applied to PuTTY to generate padded signatures for + // rsa-sha2-*, but not yet released: + // https://git.tartarus.org/?p=simon/putty.git;a=commitdiff;h=a5bcf3d384e1bf15a51a6923c3724cbbee022d8e + // + // * SSH.NET versions 2024.0.0 and earlier will generate short + // signatures for all RSA signature variants, fixed in 2024.1.0: + // https://github.com/sshnet/SSH.NET/releases/tag/2024.1.0 + // + // As a result, we pad these up to the key size by inserting + // leading 0's. + // + // Note that support for short signatures with rsa-sha2-* may + // be removed in the future due to such signatures not being + // allowed by the spec. + blob := sig.Blob + keySize := (*rsa.PublicKey)(r).Size() + if len(blob) < keySize { + padded := make([]byte, keySize) + copy(padded[keySize-len(blob):], blob) + blob = padded + } + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (k *dsaPublicKey) Type() string { + return "ssh-dss" +} + +func checkDSAParams(param *dsa.Parameters) error { + // SSH specifies FIPS 186-2, which only provided a single size + // (1024 bits) DSA key. FIPS 186-3 allows for larger key + // sizes, which would confuse SSH. + if l := param.P.BitLen(); l != 1024 { + return fmt.Errorf("ssh: unsupported DSA key size %d", l) + } + + return nil +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + param := dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + } + if err := checkDSAParams(¶m); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: param, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := hashFuncs[sig.Format].New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) +} + +func (k *dsaPrivateKey) Algorithms() []string { + return []string{k.PublicKey().Type()} +} + +func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != "" && algorithm != k.PublicKey().Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + h := hashFuncs[k.PublicKey().Type()].New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (k *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + k.nistID() +} + +func (k *ecdsaPublicKey) nistID() string { + switch k.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (k ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + return ed25519PublicKey(w.KeyBytes), w.Rest, nil +} + +func (k ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(k), + } + return Marshal(&w) +} + +func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + if l := len(k); l != ed25519.PublicKeySize { + return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + + if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (k *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + k.Type(), + k.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := hashFuncs[sig.Format].New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// skFields holds the additional fields present in U2F/FIDO2 signatures. +// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. +type skFields struct { + // Flags contains U2F/FIDO2 flags such as 'user present' + Flags byte + // Counter is a monotonic signature counter which can be + // used to detect concurrent use of a private key, should + // it be extracted from hardware. + Counter uint32 +} + +type skECDSAPublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ecdsa.PublicKey +} + +func (k *skECDSAPublicKey) Type() string { + return KeyAlgoSKECDSA256 +} + +func (k *skECDSAPublicKey) nistID() string { + return "nistp256" +} + +func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(skECDSAPublicKey) + key.application = w.Application + + if w.Curve != "nistp256" { + return nil, nil, errors.New("ssh: unsupported curve") + } + key.Curve = elliptic.P256() + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + + return key, w.Rest, nil +} + +func (k *skECDSAPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + w := struct { + Name string + ID string + Key []byte + Application string + }{ + k.Type(), + k.nistID(), + keyBytes, + k.application, + } + + return Marshal(&w) +} + +func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := hashFuncs[sig.Format].New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var ecSig struct { + R *big.Int + S *big.Int + } + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + h.Reset() + h.Write(original) + digest := h.Sum(nil) + + if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *skECDSAPublicKey) CryptoPublicKey() crypto.PublicKey { + return &k.PublicKey +} + +type skEd25519PublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ed25519.PublicKey +} + +func (k *skEd25519PublicKey) Type() string { + return KeyAlgoSKED25519 +} + +func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + key := new(skEd25519PublicKey) + key.application = w.Application + key.PublicKey = ed25519.PublicKey(w.KeyBytes) + + return key, w.Rest, nil +} + +func (k *skEd25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + Application string + }{ + KeyAlgoSKED25519, + []byte(k.PublicKey), + k.application, + } + return Marshal(&w) +} + +func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + if l := len(k.PublicKey); l != ed25519.PublicKeySize { + return fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + h := hashFuncs[sig.Format].New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var edSig struct { + Signature []byte `ssh:"rest"` + } + + if err := Unmarshal(sig.Blob, &edSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k *skEd25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a +// corresponding Signer instance. ECDSA keys must use P-256, P-384 or +// P-521. DSA keys must use parameter size L1024N160. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return newDSAPrivateKey(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { + if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { + return nil, err + } + + return &dsaPrivateKey{key}, nil +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) +} + +func (s *wrappedSigner) Algorithms() []string { + return algorithmsForKeyFormat(s.pubKey.Type()) +} + +func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm == "" { + algorithm = s.pubKey.Type() + } + + if !contains(s.Algorithms(), algorithm) { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) + } + + hashFunc := hashFuncs[algorithm] + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: algorithm, + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + if l := len(key); l != ed25519.PublicKeySize { + return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + return ed25519PublicKey(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. If the private key is encrypted, it +// will return a PassphraseMissingError. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// A PassphraseMissingError indicates that parsing this private key requires a +// passphrase. Use ParsePrivateKeyWithPassphrase. +type PassphraseMissingError struct { + // PublicKey will be set if the private key format includes an unencrypted + // public key along with the encrypted private key. + PublicKey PublicKey +} + +func (*PassphraseMissingError) Error() string { + return "ssh: this private key is passphrase protected" +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It supports +// RSA, DSA, ECDSA, and Ed25519 private keys in PKCS#1, PKCS#8, OpenSSL, and OpenSSH +// formats. If the private key is encrypted, it will return a PassphraseMissingError. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, &PassphraseMissingError{} + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + // RFC5208 - https://tools.ietf.org/html/rfc5208 + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName != "none" || cipherName != "none" { + return nil, &PassphraseMissingError{} + } + if kdfOpts != "" { + return nil, errors.New("ssh: invalid openssh private key") + } + return privKeyBlock, nil +} + +func unencryptedOpenSSHMarshaler(privKeyBlock []byte) ([]byte, string, string, string, error) { + key := generateOpenSSHPadding(privKeyBlock, 8) + return key, "none", "none", "", nil +} + +const privateKeyAuthMagic = "openssh-key-v1\x00" + +type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) +type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error) + +type openSSHEncryptedPrivateKey struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte +} + +type openSSHPrivateKey struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` +} + +type openSSHRSAPrivateKey struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHEd25519PrivateKey struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHECDSAPrivateKey struct { + Curve string + Pub []byte + D *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt +// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used +// as the decrypt function to parse an unencrypted private key. See +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. +func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { + if len(key) < len(privateKeyAuthMagic) || string(key[:len(privateKeyAuthMagic)]) != privateKeyAuthMagic { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(privateKeyAuthMagic):] + + var w openSSHEncryptedPrivateKey + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + if w.NumKeys != 1 { + // We only support single key files, and so does OpenSSH. + // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 + return nil, errors.New("ssh: multi-key files are not supported") + } + + privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) + if err != nil { + if err, ok := err.(*PassphraseMissingError); ok { + pub, errPub := ParsePublicKey(w.PubKey) + if errPub != nil { + return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) + } + err.PublicKey = pub + } + return nil, err + } + + var pk1 openSSHPrivateKey + if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { + if w.CipherName != "none" { + return nil, x509.IncorrectPasswordError + } + return nil, errors.New("ssh: malformed OpenSSH key") + } + + switch pk1.Keytype { + case KeyAlgoRSA: + var key openSSHRSAPrivateKey + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + pk := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: key.N, + E: int(key.E.Int64()), + }, + D: key.D, + Primes: []*big.Int{key.P, key.Q}, + } + + if err := pk.Validate(); err != nil { + return nil, err + } + + pk.Precompute() + + return pk, nil + case KeyAlgoED25519: + var key openSSHEd25519PrivateKey + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if len(key.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, key.Priv) + return &pk, nil + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + var key openSSHECDSAPrivateKey + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + var curve elliptic.Curve + switch key.Curve { + case "nistp256": + curve = elliptic.P256() + case "nistp384": + curve = elliptic.P384() + case "nistp521": + curve = elliptic.P521() + default: + return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) + } + + X, Y := elliptic.Unmarshal(curve, key.Pub) + if X == nil || Y == nil { + return nil, errors.New("ssh: failed to unmarshal public key") + } + + if key.D.Cmp(curve.Params().N) >= 0 { + return nil, errors.New("ssh: scalar is out of range") + } + + x, y := curve.ScalarBaseMult(key.D.Bytes()) + if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { + return nil, errors.New("ssh: public key does not match private key") + } + + return &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: X, + Y: Y, + }, + D: key.D, + }, nil + default: + return nil, errors.New("ssh: unhandled key type") + } +} + +func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) { + var w openSSHEncryptedPrivateKey + var pk1 openSSHPrivateKey + + // Random check bytes. + var check uint32 + if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { + return nil, err + } + + pk1.Check1 = check + pk1.Check2 = check + w.NumKeys = 1 + + // Use a []byte directly on ed25519 keys. + if k, ok := key.(*ed25519.PrivateKey); ok { + key = *k + } + + switch k := key.(type) { + case *rsa.PrivateKey: + E := new(big.Int).SetInt64(int64(k.PublicKey.E)) + // Marshal public key: + // E and N are in reversed order in the public and private key. + pubKey := struct { + KeyType string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + E, k.PublicKey.N, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHRSAPrivateKey{ + N: k.PublicKey.N, + E: E, + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comment: comment, + } + pk1.Keytype = KeyAlgoRSA + pk1.Rest = Marshal(key) + case ed25519.PrivateKey: + pub := make([]byte, ed25519.PublicKeySize) + priv := make([]byte, ed25519.PrivateKeySize) + copy(pub, k[32:]) + copy(priv, k) + + // Marshal public key. + pubKey := struct { + KeyType string + Pub []byte + }{ + KeyAlgoED25519, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHEd25519PrivateKey{ + Pub: pub, + Priv: priv, + Comment: comment, + } + pk1.Keytype = KeyAlgoED25519 + pk1.Rest = Marshal(key) + case *ecdsa.PrivateKey: + var curve, keyType string + switch name := k.Curve.Params().Name; name { + case "P-256": + curve = "nistp256" + keyType = KeyAlgoECDSA256 + case "P-384": + curve = "nistp384" + keyType = KeyAlgoECDSA384 + case "P-521": + curve = "nistp521" + keyType = KeyAlgoECDSA521 + default: + return nil, errors.New("ssh: unhandled elliptic curve " + name) + } + + pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y) + + // Marshal public key. + pubKey := struct { + KeyType string + Curve string + Pub []byte + }{ + keyType, curve, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHECDSAPrivateKey{ + Curve: curve, + Pub: pub, + D: k.D, + Comment: comment, + } + pk1.Keytype = keyType + pk1.Rest = Marshal(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", k) + } + + var err error + // Add padding and encrypt the key if necessary. + w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(Marshal(pk1)) + if err != nil { + return nil, err + } + + b := Marshal(w) + block := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Bytes: append([]byte(privateKeyAuthMagic), b...), + } + return block, nil +} + +func checkOpenSSHKeyPadding(pad []byte) error { + for i, b := range pad { + if int(b) != i+1 { + return errors.New("ssh: padding not as expected") + } + } + return nil +} + +func generateOpenSSHPadding(block []byte, blockSize int) []byte { + for i, l := 0, len(block); (l+i)%blockSize != 0; i++ { + block = append(block, byte(i+1)) + } + return block +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/tempfork/sshtest/ssh/keys_test.go b/tempfork/sshtest/ssh/keys_test.go new file mode 100644 index 000000000..bf1f0be1b --- /dev/null +++ b/tempfork/sshtest/ssh/keys_test.go @@ -0,0 +1,724 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "fmt" + "io" + "reflect" + "strings" + "testing" + + "golang.org/x/crypto/ssh/testdata" +) + +func rawKey(pub PublicKey) interface{} { + switch k := pub.(type) { + case *rsaPublicKey: + return (*rsa.PublicKey)(k) + case *dsaPublicKey: + return (*dsa.PublicKey)(k) + case *ecdsaPublicKey: + return (*ecdsa.PublicKey)(k) + case ed25519PublicKey: + return (ed25519.PublicKey)(k) + case *Certificate: + return k + } + panic("unknown key type") +} + +func TestKeyMarshalParse(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + roundtrip, err := ParsePublicKey(pub.Marshal()) + if err != nil { + t.Errorf("ParsePublicKey(%T): %v", pub, err) + } + + k1 := rawKey(pub) + k2 := rawKey(roundtrip) + + if !reflect.DeepEqual(k1, k2) { + t.Errorf("got %#v in roundtrip, want %#v", k2, k1) + } + } +} + +func TestUnsupportedCurves(t *testing.T) { + raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err) + } + + if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err) + } +} + +func TestNewPublicKey(t *testing.T) { + for _, k := range testSigners { + raw := rawKey(k.PublicKey()) + // Skip certificates, as NewPublicKey does not support them. + if _, ok := raw.(*Certificate); ok { + continue + } + pub, err := NewPublicKey(raw) + if err != nil { + t.Errorf("NewPublicKey(%#v): %v", raw, err) + } + if !reflect.DeepEqual(k.PublicKey(), pub) { + t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey()) + } + } +} + +func TestKeySignVerify(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + + data := []byte("sign me") + sig, err := priv.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("Sign(%T): %v", priv, err) + } + + if err := pub.Verify(data, sig); err != nil { + t.Errorf("publicKey.Verify(%T): %v", priv, err) + } + sig.Blob[5]++ + if err := pub.Verify(data, sig); err == nil { + t.Errorf("publicKey.Verify on broken sig did not fail") + } + } +} + +func TestKeySignWithAlgorithmVerify(t *testing.T) { + for k, priv := range testSigners { + if algorithmSigner, ok := priv.(MultiAlgorithmSigner); !ok { + t.Errorf("Signers %q constructed by ssh package should always implement the MultiAlgorithmSigner interface: %T", k, priv) + } else { + pub := priv.PublicKey() + data := []byte("sign me") + + signWithAlgTestCase := func(algorithm string, expectedAlg string) { + sig, err := algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm) + if err != nil { + t.Fatalf("Sign(%T): %v", priv, err) + } + if sig.Format != expectedAlg { + t.Errorf("signature format did not match requested signature algorithm: %s != %s", sig.Format, expectedAlg) + } + + if err := pub.Verify(data, sig); err != nil { + t.Errorf("publicKey.Verify(%T): %v", priv, err) + } + sig.Blob[5]++ + if err := pub.Verify(data, sig); err == nil { + t.Errorf("publicKey.Verify on broken sig did not fail") + } + } + + // Using the empty string as the algorithm name should result in the same signature format as the algorithm-free Sign method. + defaultSig, err := priv.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("Sign(%T): %v", priv, err) + } + signWithAlgTestCase("", defaultSig.Format) + + // RSA keys are the only ones which currently support more than one signing algorithm + if pub.Type() == KeyAlgoRSA { + for _, algorithm := range []string{KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512} { + signWithAlgTestCase(algorithm, algorithm) + } + } + } + } +} + +func TestKeySignWithShortSignature(t *testing.T) { + signer := testSigners["rsa"].(AlgorithmSigner) + pub := signer.PublicKey() + // Note: data obtained by empirically trying until a result + // starting with 0 appeared + tests := []struct { + algorithm string + data []byte + }{ + { + algorithm: KeyAlgoRSA, + data: []byte("sign me92"), + }, + { + algorithm: KeyAlgoRSASHA256, + data: []byte("sign me294"), + }, + { + algorithm: KeyAlgoRSASHA512, + data: []byte("sign me60"), + }, + } + + for _, tt := range tests { + sig, err := signer.SignWithAlgorithm(rand.Reader, tt.data, tt.algorithm) + if err != nil { + t.Fatalf("Sign(%T): %v", signer, err) + } + if sig.Blob[0] != 0 { + t.Errorf("%s: Expected signature with a leading 0", tt.algorithm) + } + sig.Blob = sig.Blob[1:] + if err := pub.Verify(tt.data, sig); err != nil { + t.Errorf("publicKey.Verify(%s): %v", tt.algorithm, err) + } + } +} + +func TestParseRSAPrivateKey(t *testing.T) { + key := testPrivateKeys["rsa"] + + rsa, ok := key.(*rsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *rsa.PrivateKey", rsa) + } + + if err := rsa.Validate(); err != nil { + t.Errorf("Validate: %v", err) + } +} + +func TestParseECPrivateKey(t *testing.T) { + key := testPrivateKeys["ecdsa"] + + ecKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey) + } + + if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) { + t.Fatalf("public key does not validate.") + } +} + +func TestParseDSA(t *testing.T) { + // We actually exercise the ParsePrivateKey codepath here, as opposed to + // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go + // uses. + s, err := ParsePrivateKey(testdata.PEMBytes["dsa"]) + if err != nil { + t.Fatalf("ParsePrivateKey returned error: %s", err) + } + + data := []byte("sign me") + sig, err := s.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("dsa.Sign: %v", err) + } + + if err := s.PublicKey().Verify(data, sig); err != nil { + t.Errorf("Verify failed: %v", err) + } +} + +// Tests for authorized_keys parsing. + +// getTestKey returns a public key, and its base64 encoding. +func getTestKey() (PublicKey, string) { + k := testPublicKeys["rsa"] + + b := &bytes.Buffer{} + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(k.Marshal()) + e.Close() + + return k, b.String() +} + +func TestMarshalParsePublicKey(t *testing.T) { + pub, pubSerialized := getTestKey() + line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized) + + authKeys := MarshalAuthorizedKey(pub) + actualFields := strings.Fields(string(authKeys)) + if len(actualFields) == 0 { + t.Fatalf("failed authKeys: %v", authKeys) + } + + // drop the comment + expectedFields := strings.Fields(line)[0:2] + + if !reflect.DeepEqual(actualFields, expectedFields) { + t.Errorf("got %v, expected %v", actualFields, expectedFields) + } + + actPub, _, _, _, err := ParseAuthorizedKey([]byte(line)) + if err != nil { + t.Fatalf("cannot parse %v: %v", line, err) + } + if !reflect.DeepEqual(actPub, pub) { + t.Errorf("got %v, expected %v", actPub, pub) + } +} + +func TestMarshalPrivateKey(t *testing.T) { + tests := []struct { + name string + }{ + {"rsa-openssh-format"}, + {"ed25519"}, + {"p256-openssh-format"}, + {"p384-openssh-format"}, + {"p521-openssh-format"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expected, ok := testPrivateKeys[tt.name] + if !ok { + t.Fatalf("cannot find key %s", tt.name) + } + + block, err := MarshalPrivateKey(expected, "test@golang.org") + if err != nil { + t.Fatalf("cannot marshal %s: %v", tt.name, err) + } + + key, err := ParseRawPrivateKey(pem.EncodeToMemory(block)) + if err != nil { + t.Fatalf("cannot parse %s: %v", tt.name, err) + } + + if !reflect.DeepEqual(expected, key) { + t.Errorf("unexpected marshaled key %s", tt.name) + } + }) + } +} + +type testAuthResult struct { + pubKey PublicKey + options []string + comments string + rest string + ok bool +} + +func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []testAuthResult) { + rest := authKeys + var values []testAuthResult + for len(rest) > 0 { + var r testAuthResult + var err error + r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest) + r.ok = (err == nil) + t.Log(err) + r.rest = string(rest) + values = append(values, r) + } + + if !reflect.DeepEqual(values, expected) { + t.Errorf("got %#v, expected %#v", values, expected) + } +} + +func TestAuthorizedKeyBasic(t *testing.T) { + pub, pubSerialized := getTestKey() + line := "ssh-rsa " + pubSerialized + " user@host" + testAuthorizedKeys(t, []byte(line), + []testAuthResult{ + {pub, nil, "user@host", "", true}, + }) +} + +func TestAuth(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithOptions := []string{ + `# comments to ignore before any keys...`, + ``, + `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`, + `# comments to ignore, along with a blank line`, + ``, + `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`, + ``, + `# more comments, plus a invalid entry`, + `ssh-rsa data-that-will-not-parse user@host3`, + } + for _, eol := range []string{"\n", "\r\n"} { + authOptions := strings.Join(authWithOptions, eol) + rest2 := strings.Join(authWithOptions[3:], eol) + rest3 := strings.Join(authWithOptions[6:], eol) + testAuthorizedKeys(t, []byte(authOptions), []testAuthResult{ + {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true}, + {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true}, + {nil, nil, "", "", false}, + }) + } +} + +func TestAuthWithQuotedSpaceInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []testAuthResult{ + {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedCommaInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []testAuthResult{ + {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedQuoteInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`) + authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []testAuthResult{ + {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) + + testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []testAuthResult{ + {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true}, + }) +} + +func TestAuthWithInvalidSpace(t *testing.T) { + _, pubSerialized := getTestKey() + authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +#more to follow but still no valid keys`) + testAuthorizedKeys(t, []byte(authWithInvalidSpace), []testAuthResult{ + {nil, nil, "", "", false}, + }) +} + +func TestAuthWithMissingQuote(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`) + + testAuthorizedKeys(t, []byte(authWithMissingQuote), []testAuthResult{ + {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true}, + }) +} + +func TestInvalidEntry(t *testing.T) { + authInvalid := []byte(`ssh-rsa`) + _, _, _, _, err := ParseAuthorizedKey(authInvalid) + if err == nil { + t.Errorf("got valid entry for %q", authInvalid) + } +} + +var knownHostsParseTests = []struct { + input string + err string + + marker string + comment string + hosts []string + rest string +}{ + { + "", + "EOF", + + "", "", nil, "", + }, + { + "# Just a comment", + "EOF", + + "", "", nil, "", + }, + { + " \t ", + "EOF", + + "", "", nil, "", + }, + { + "localhost ssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line", + "", + + "", "comment comment", []string{"localhost"}, "next line", + }, + { + "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost", "[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}", + "", + + "marker", "", []string{"localhost", "[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd", + "short read", + + "", "", nil, "", + }, +} + +func TestKnownHostsParsing(t *testing.T) { + rsaPub, rsaPubSerialized := getTestKey() + + for i, test := range knownHostsParseTests { + var expectedKey PublicKey + const rsaKeyToken = "{RSAPUB}" + + input := test.input + if strings.Contains(input, rsaKeyToken) { + expectedKey = rsaPub + input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1) + } + + marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input)) + if err != nil { + if len(test.err) == 0 { + t.Errorf("#%d: unexpectedly failed with %q", i, err) + } else if !strings.Contains(err.Error(), test.err) { + t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err) + } + continue + } else if len(test.err) != 0 { + t.Errorf("#%d: succeeded but expected error including %q", i, test.err) + continue + } + + if !reflect.DeepEqual(expectedKey, pubKey) { + t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey) + } + + if marker != test.marker { + t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker) + } + + if comment != test.comment { + t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment) + } + + if !reflect.DeepEqual(test.hosts, hosts) { + t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts) + } + + if rest := string(rest); rest != test.rest { + t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest) + } + } +} + +func TestFingerprintLegacyMD5(t *testing.T) { + pub, _ := getTestKey() + fingerprint := FingerprintLegacyMD5(pub) + want := "b7:ef:d3:d5:89:29:52:96:9f:df:47:41:4d:15:37:f4" // ssh-keygen -lf -E md5 rsa + if fingerprint != want { + t.Errorf("got fingerprint %q want %q", fingerprint, want) + } +} + +func TestFingerprintSHA256(t *testing.T) { + pub, _ := getTestKey() + fingerprint := FingerprintSHA256(pub) + want := "SHA256:fi5+D7UmDZDE9Q2sAVvvlpcQSIakN4DERdINgXd2AnE" // ssh-keygen -lf rsa + if fingerprint != want { + t.Errorf("got fingerprint %q want %q", fingerprint, want) + } +} + +func TestInvalidKeys(t *testing.T) { + keyTypes := []string{ + "RSA PRIVATE KEY", + "PRIVATE KEY", + "EC PRIVATE KEY", + "DSA PRIVATE KEY", + "OPENSSH PRIVATE KEY", + } + + for _, keyType := range keyTypes { + for _, dataLen := range []int{0, 1, 2, 5, 10, 20} { + data := make([]byte, dataLen) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + pem.Encode(&buf, &pem.Block{ + Type: keyType, + Bytes: data, + }) + + // This test is just to ensure that the function + // doesn't panic so the return value is ignored. + ParseRawPrivateKey(buf.Bytes()) + } + } +} + +func TestSKKeys(t *testing.T) { + for _, d := range testdata.SKData { + pk, _, _, _, err := ParseAuthorizedKey(d.PubKey) + if err != nil { + t.Fatalf("parseAuthorizedKey returned error: %v", err) + } + + sigBuf := make([]byte, hex.DecodedLen(len(d.HexSignature))) + if _, err := hex.Decode(sigBuf, d.HexSignature); err != nil { + t.Fatalf("hex.Decode() failed: %v", err) + } + + dataBuf := make([]byte, hex.DecodedLen(len(d.HexData))) + if _, err := hex.Decode(dataBuf, d.HexData); err != nil { + t.Fatalf("hex.Decode() failed: %v", err) + } + + sig, _, ok := parseSignature(sigBuf) + if !ok { + t.Fatalf("parseSignature(%v) failed", sigBuf) + } + + // Test that good data and signature pass verification + if err := pk.Verify(dataBuf, sig); err != nil { + t.Errorf("%s: PublicKey.Verify(%v, %v) failed: %v", d.Name, dataBuf, sig, err) + } + + // Invalid data being passed in + invalidData := []byte("INVALID DATA") + if err := pk.Verify(invalidData, sig); err == nil { + t.Errorf("%s with invalid data: PublicKey.Verify(%v, %v) passed unexpectedly", d.Name, invalidData, sig) + } + + // Change byte in blob to corrup signature + sig.Blob[5] = byte('A') + // Corrupted data being passed in + if err := pk.Verify(dataBuf, sig); err == nil { + t.Errorf("%s with corrupted signature: PublicKey.Verify(%v, %v) passed unexpectedly", d.Name, dataBuf, sig) + } + } +} + +func TestNewSignerWithAlgos(t *testing.T) { + algorithSigner, ok := testSigners["rsa"].(AlgorithmSigner) + if !ok { + t.Fatal("rsa test signer does not implement the AlgorithmSigner interface") + } + _, err := NewSignerWithAlgorithms(algorithSigner, nil) + if err == nil { + t.Error("signer with algos created with no algorithms") + } + + _, err = NewSignerWithAlgorithms(algorithSigner, []string{KeyAlgoED25519}) + if err == nil { + t.Error("signer with algos created with invalid algorithms") + } + + _, err = NewSignerWithAlgorithms(algorithSigner, []string{CertAlgoRSASHA256v01}) + if err == nil { + t.Error("signer with algos created with certificate algorithms") + } + + mas, err := NewSignerWithAlgorithms(algorithSigner, []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}) + if err != nil { + t.Errorf("unable to create signer with valid algorithms: %v", err) + } + + _, err = NewSignerWithAlgorithms(mas, []string{KeyAlgoRSA}) + if err == nil { + t.Error("signer with algos created with restricted algorithms") + } +} + +func TestCryptoPublicKey(t *testing.T) { + for _, priv := range testSigners { + p1 := priv.PublicKey() + key, ok := p1.(CryptoPublicKey) + if !ok { + continue + } + p2, err := NewPublicKey(key.CryptoPublicKey()) + if err != nil { + t.Fatalf("NewPublicKey(CryptoPublicKey) failed for %s, got: %v", p1.Type(), err) + } + if !reflect.DeepEqual(p1, p2) { + t.Errorf("got %#v in NewPublicKey, want %#v", p2, p1) + } + } + for _, d := range testdata.SKData { + p1, _, _, _, err := ParseAuthorizedKey(d.PubKey) + if err != nil { + t.Fatalf("parseAuthorizedKey returned error: %v", err) + } + k1, ok := p1.(CryptoPublicKey) + if !ok { + t.Fatalf("%T does not implement CryptoPublicKey", p1) + } + + var p2 PublicKey + switch pub := k1.CryptoPublicKey().(type) { + case *ecdsa.PublicKey: + p2 = &skECDSAPublicKey{ + application: "ssh:", + PublicKey: *pub, + } + case ed25519.PublicKey: + p2 = &skEd25519PublicKey{ + application: "ssh:", + PublicKey: pub, + } + default: + t.Fatalf("unexpected type %T from CryptoPublicKey()", pub) + } + if !reflect.DeepEqual(p1, p2) { + t.Errorf("got %#v, want %#v", p2, p1) + } + } +} diff --git a/tempfork/sshtest/ssh/mac.go b/tempfork/sshtest/ssh/mac.go new file mode 100644 index 000000000..06a1b2750 --- /dev/null +++ b/tempfork/sshtest/ssh/mac.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-512-etm@openssh.com": {64, true, func(key []byte) hash.Hash { + return hmac.New(sha512.New, key) + }}, + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-512": {64, false, func(key []byte) hash.Hash { + return hmac.New(sha512.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/tempfork/sshtest/ssh/mempipe_test.go b/tempfork/sshtest/ssh/mempipe_test.go new file mode 100644 index 000000000..f27339c51 --- /dev/null +++ b/tempfork/sshtest/ssh/mempipe_test.go @@ -0,0 +1,124 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" + "testing" +) + +// An in-memory packetConn. It is safe to call Close and writePacket +// from different goroutines. +type memTransport struct { + eof bool + pending [][]byte + write *memTransport + writeCount uint64 + sync.Mutex + *sync.Cond +} + +func (t *memTransport) readPacket() ([]byte, error) { + t.Lock() + defer t.Unlock() + for { + if len(t.pending) > 0 { + r := t.pending[0] + t.pending = t.pending[1:] + return r, nil + } + if t.eof { + return nil, io.EOF + } + t.Cond.Wait() + } +} + +func (t *memTransport) closeSelf() error { + t.Lock() + defer t.Unlock() + if t.eof { + return io.EOF + } + t.eof = true + t.Cond.Broadcast() + return nil +} + +func (t *memTransport) Close() error { + err := t.write.closeSelf() + t.closeSelf() + return err +} + +func (t *memTransport) writePacket(p []byte) error { + t.write.Lock() + defer t.write.Unlock() + if t.write.eof { + return io.EOF + } + c := make([]byte, len(p)) + copy(c, p) + t.write.pending = append(t.write.pending, c) + t.write.Cond.Signal() + t.writeCount++ + return nil +} + +func (t *memTransport) getWriteCount() uint64 { + t.write.Lock() + defer t.write.Unlock() + return t.writeCount +} + +func memPipe() (a, b packetConn) { + t1 := memTransport{} + t2 := memTransport{} + t1.write = &t2 + t2.write = &t1 + t1.Cond = sync.NewCond(&t1.Mutex) + t2.Cond = sync.NewCond(&t2.Mutex) + return &t1, &t2 +} + +func TestMemPipe(t *testing.T) { + a, b := memPipe() + if err := a.writePacket([]byte{42}); err != nil { + t.Fatalf("writePacket: %v", err) + } + if wc := a.(*memTransport).getWriteCount(); wc != 1 { + t.Fatalf("got %v, want 1", wc) + } + if err := a.Close(); err != nil { + t.Fatal("Close: ", err) + } + p, err := b.readPacket() + if err != nil { + t.Fatal("readPacket: ", err) + } + if len(p) != 1 || p[0] != 42 { + t.Fatalf("got %v, want {42}", p) + } + p, err = b.readPacket() + if err != io.EOF { + t.Fatalf("got %v, %v, want EOF", p, err) + } + if wc := b.(*memTransport).getWriteCount(); wc != 0 { + t.Fatalf("got %v, want 0", wc) + } +} + +func TestDoubleClose(t *testing.T) { + a, _ := memPipe() + err := a.Close() + if err != nil { + t.Errorf("Close: %v", err) + } + err = a.Close() + if err != io.EOF { + t.Errorf("expect EOF on double close.") + } +} diff --git a/tempfork/sshtest/ssh/messages.go b/tempfork/sshtest/ssh/messages.go new file mode 100644 index 000000000..b55f86056 --- /dev/null +++ b/tempfork/sshtest/ssh/messages.go @@ -0,0 +1,891 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Hellman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4419, section 5. +const msgKexDHGexGroup = 31 + +type kexDHGexGroupMsg struct { + P *big.Int `sshtype:"31"` + G *big.Int +} + +const msgKexDHGexInit = 32 + +type kexDHGexInitMsg struct { + X *big.Int `sshtype:"32"` +} + +const msgKexDHGexReply = 33 + +type kexDHGexReplyMsg struct { + HostKey []byte `sshtype:"33"` + Y *big.Int + Signature []byte +} + +const msgKexDHGexRequest = 34 + +type kexDHGexRequestMsg struct { + MinBits uint32 `sshtype:"34"` + PreferedBits uint32 + MaxBits uint32 +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 8308, section 2.3 +const msgExtInfo = 7 + +type extInfoMsg struct { + NumExtensions uint32 `sshtype:"7"` + Payload []byte `ssh:"rest"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4252, section 5.1 +const msgUserAuthSuccess = 52 + +// See RFC 4252, section 5.4 +const msgUserAuthBanner = 53 + +type userAuthBannerMsg struct { + Message string `sshtype:"53"` + // unused, but required to allow message parsing + Language string +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + Name string `sshtype:"60"` + Instruction string + Language string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersID uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersID uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersID uint32 `sshtype:"91"` + MyID uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersID uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersID uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersID uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersID uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersID uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersID uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersID uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// See RFC 4462, section 3 +const msgUserAuthGSSAPIResponse = 60 + +type userAuthGSSAPIResponse struct { + SupportMech []byte `sshtype:"60"` +} + +const msgUserAuthGSSAPIToken = 61 + +type userAuthGSSAPIToken struct { + Token []byte `sshtype:"61"` +} + +const msgUserAuthGSSAPIMIC = 66 + +type userAuthGSSAPIMIC struct { + MIC []byte `sshtype:"66"` +} + +// See RFC 4462, section 3.9 +const msgUserAuthGSSAPIErrTok = 64 + +type userAuthGSSAPIErrTok struct { + ErrorToken []byte `sshtype:"64"` +} + +// See RFC 4462, section 3.8 +const msgUserAuthGSSAPIError = 65 + +type userAuthGSSAPIError struct { + MajorStatus uint32 `sshtype:"65"` + MinorStatus uint32 + Message string + LanguageTag string +} + +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPing = 192 + +type pingMsg struct { + Data string `sshtype:"192"` +} + +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPong = 193 + +type pongMsg struct { + Data string `sshtype:"193"` +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgExtInfo: + msg = new(extInfoMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + case msgUserAuthGSSAPIToken: + msg = new(userAuthGSSAPIToken) + case msgUserAuthGSSAPIMIC: + msg = new(userAuthGSSAPIMIC) + case msgUserAuthGSSAPIErrTok: + msg = new(userAuthGSSAPIErrTok) + case msgUserAuthGSSAPIError: + msg = new(userAuthGSSAPIError) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +var packetTypeNames = map[byte]string{ + msgDisconnect: "disconnectMsg", + msgServiceRequest: "serviceRequestMsg", + msgServiceAccept: "serviceAcceptMsg", + msgExtInfo: "extInfoMsg", + msgKexInit: "kexInitMsg", + msgKexDHInit: "kexDHInitMsg", + msgKexDHReply: "kexDHReplyMsg", + msgUserAuthRequest: "userAuthRequestMsg", + msgUserAuthSuccess: "userAuthSuccessMsg", + msgUserAuthFailure: "userAuthFailureMsg", + msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", + msgGlobalRequest: "globalRequestMsg", + msgRequestSuccess: "globalRequestSuccessMsg", + msgRequestFailure: "globalRequestFailureMsg", + msgChannelOpen: "channelOpenMsg", + msgChannelData: "channelDataMsg", + msgChannelOpenConfirm: "channelOpenConfirmMsg", + msgChannelOpenFailure: "channelOpenFailureMsg", + msgChannelWindowAdjust: "windowAdjustMsg", + msgChannelEOF: "channelEOFMsg", + msgChannelClose: "channelCloseMsg", + msgChannelRequest: "channelRequestMsg", + msgChannelSuccess: "channelRequestSuccessMsg", + msgChannelFailure: "channelRequestFailureMsg", +} diff --git a/tempfork/sshtest/ssh/messages_test.go b/tempfork/sshtest/ssh/messages_test.go new file mode 100644 index 000000000..e79076412 --- /dev/null +++ b/tempfork/sshtest/ssh/messages_test.go @@ -0,0 +1,288 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "math/big" + "math/rand" + "reflect" + "testing" + "testing/quick" +) + +var intLengthTests = []struct { + val, length int +}{ + {0, 4 + 0}, + {1, 4 + 1}, + {127, 4 + 1}, + {128, 4 + 2}, + {-1, 4 + 1}, +} + +func TestIntLength(t *testing.T) { + for _, test := range intLengthTests { + v := new(big.Int).SetInt64(int64(test.val)) + length := intLength(v) + if length != test.length { + t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length) + } + } +} + +type msgAllTypes struct { + Bool bool `sshtype:"21"` + Array [16]byte + Uint64 uint64 + Uint32 uint32 + Uint8 uint8 + String string + Strings []string + Bytes []byte + Int *big.Int + Rest []byte `ssh:"rest"` +} + +func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value { + m := &msgAllTypes{} + m.Bool = rand.Intn(2) == 1 + randomBytes(m.Array[:], rand) + m.Uint64 = uint64(rand.Int63n(1<<63 - 1)) + m.Uint32 = uint32(rand.Intn((1 << 31) - 1)) + m.Uint8 = uint8(rand.Intn(1 << 8)) + m.String = string(m.Array[:]) + m.Strings = randomNameList(rand) + m.Bytes = m.Array[:] + m.Int = randomInt(rand) + m.Rest = m.Array[:] + return reflect.ValueOf(m) +} + +func TestMarshalUnmarshal(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + iface := &msgAllTypes{} + ty := reflect.ValueOf(iface).Type() + + n := 100 + if testing.Short() { + n = 5 + } + for j := 0; j < n; j++ { + v, ok := quick.Value(ty, rand) + if !ok { + t.Errorf("failed to create value") + break + } + + m1 := v.Elem().Interface() + m2 := iface + + marshaled := Marshal(m1) + if err := Unmarshal(marshaled, m2); err != nil { + t.Errorf("Unmarshal %#v: %s", m1, err) + break + } + + if !reflect.DeepEqual(v.Interface(), m2) { + t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled) + break + } + } +} + +func TestUnmarshalEmptyPacket(t *testing.T) { + var b []byte + var m channelRequestSuccessMsg + if err := Unmarshal(b, &m); err == nil { + t.Fatalf("unmarshal of empty slice succeeded") + } +} + +func TestUnmarshalUnexpectedPacket(t *testing.T) { + type S struct { + I uint32 `sshtype:"43"` + S string + B bool + } + + s := S{11, "hello", true} + packet := Marshal(s) + packet[0] = 42 + roundtrip := S{} + err := Unmarshal(packet, &roundtrip) + if err == nil { + t.Fatal("expected error, not nil") + } +} + +func TestMarshalPtr(t *testing.T) { + s := struct { + S string + }{"hello"} + + m1 := Marshal(s) + m2 := Marshal(&s) + if !bytes.Equal(m1, m2) { + t.Errorf("got %q, want %q for marshaled pointer", m2, m1) + } +} + +func TestBareMarshalUnmarshal(t *testing.T) { + type S struct { + I uint32 + S string + B bool + } + + s := S{42, "hello", true} + packet := Marshal(s) + roundtrip := S{} + Unmarshal(packet, &roundtrip) + + if !reflect.DeepEqual(s, roundtrip) { + t.Errorf("got %#v, want %#v", roundtrip, s) + } +} + +func TestBareMarshal(t *testing.T) { + type S2 struct { + I uint32 + } + s := S2{42} + packet := Marshal(s) + i, rest, ok := parseUint32(packet) + if len(rest) > 0 || !ok { + t.Errorf("parseInt(%q): parse error", packet) + } + if i != s.I { + t.Errorf("got %d, want %d", i, s.I) + } +} + +func TestUnmarshalShortKexInitPacket(t *testing.T) { + // This used to panic. + // Issue 11348 + packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff} + kim := &kexInitMsg{} + if err := Unmarshal(packet, kim); err == nil { + t.Error("truncated packet unmarshaled without error") + } +} + +func TestMarshalMultiTag(t *testing.T) { + var res struct { + A uint32 `sshtype:"1|2"` + } + + good1 := struct { + A uint32 `sshtype:"1"` + }{ + 1, + } + good2 := struct { + A uint32 `sshtype:"2"` + }{ + 1, + } + + if e := Unmarshal(Marshal(good1), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + if e := Unmarshal(Marshal(good2), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + bad1 := struct { + A uint32 `sshtype:"3"` + }{ + 1, + } + if e := Unmarshal(Marshal(bad1), &res); e == nil { + t.Errorf("bad struct unmarshaled without error") + } +} + +func randomBytes(out []byte, rand *rand.Rand) { + for i := 0; i < len(out); i++ { + out[i] = byte(rand.Int31()) + } +} + +func randomNameList(rand *rand.Rand) []string { + ret := make([]string, rand.Int31()&15) + for i := range ret { + s := make([]byte, 1+(rand.Int31()&15)) + for j := range s { + s[j] = 'a' + uint8(rand.Int31()&15) + } + ret[i] = string(s) + } + return ret +} + +func randomInt(rand *rand.Rand) *big.Int { + return new(big.Int).SetInt64(int64(int32(rand.Uint32()))) +} + +func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + ki := &kexInitMsg{} + randomBytes(ki.Cookie[:], rand) + ki.KexAlgos = randomNameList(rand) + ki.ServerHostKeyAlgos = randomNameList(rand) + ki.CiphersClientServer = randomNameList(rand) + ki.CiphersServerClient = randomNameList(rand) + ki.MACsClientServer = randomNameList(rand) + ki.MACsServerClient = randomNameList(rand) + ki.CompressionClientServer = randomNameList(rand) + ki.CompressionServerClient = randomNameList(rand) + ki.LanguagesClientServer = randomNameList(rand) + ki.LanguagesServerClient = randomNameList(rand) + if rand.Int31()&1 == 1 { + ki.FirstKexFollows = true + } + return reflect.ValueOf(ki) +} + +func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + dhi := &kexDHInitMsg{} + dhi.X = randomInt(rand) + return reflect.ValueOf(dhi) +} + +var ( + _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + + _kexInit = Marshal(_kexInitMsg) + _kexDHInit = Marshal(_kexDHInitMsg) +) + +func BenchmarkMarshalKexInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexInitMsg) + } +} + +func BenchmarkUnmarshalKexInitMsg(b *testing.B) { + m := new(kexInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexInit, m) + } +} + +func BenchmarkMarshalKexDHInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexDHInitMsg) + } +} + +func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) { + m := new(kexDHInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexDHInit, m) + } +} diff --git a/tempfork/sshtest/ssh/mux.go b/tempfork/sshtest/ssh/mux.go new file mode 100644 index 000000000..d2d24c635 --- /dev/null +++ b/tempfork/sshtest/ssh/mux.go @@ -0,0 +1,357 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + case msgPing: + var msg pingMsg + if err := Unmarshal(packet, &msg); err != nil { + return fmt.Errorf("failed to unmarshal ping@openssh.com message: %w", err) + } + return m.sendMessage(pongMsg(msg)) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return m.handleUnknownChannelPacket(id, packet) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersID: msg.PeersID, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersID + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersID: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} + +func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + // RFC 4254 section 5.4 says unrecognized channel requests should + // receive a failure response. + case *channelRequestMsg: + if msg.WantReply { + return m.sendMessage(channelRequestFailureMsg{ + PeersID: msg.PeersID, + }) + } + return nil + default: + return fmt.Errorf("ssh: invalid channel %d", id) + } +} diff --git a/tempfork/sshtest/ssh/mux_test.go b/tempfork/sshtest/ssh/mux_test.go new file mode 100644 index 000000000..21f0ac3e3 --- /dev/null +++ b/tempfork/sshtest/ssh/mux_test.go @@ -0,0 +1,839 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "sync" + "testing" +) + +func muxPair() (*mux, *mux) { + a, b := memPipe() + + s := newMux(a) + c := newMux(b) + + return s, c +} + +// Returns both ends of a channel, and the mux for the 2nd +// channel. +func channelPair(t *testing.T) (*channel, *channel, *mux) { + c, s := muxPair() + + res := make(chan *channel, 1) + go func() { + newCh, ok := <-s.incomingChannels + if !ok { + t.Error("no incoming channel") + close(res) + return + } + if newCh.ChannelType() != "chan" { + t.Errorf("got type %q want chan", newCh.ChannelType()) + newCh.Reject(Prohibited, fmt.Sprintf("got type %q want chan", newCh.ChannelType())) + close(res) + return + } + ch, _, err := newCh.Accept() + if err != nil { + t.Errorf("accept: %v", err) + close(res) + return + } + res <- ch.(*channel) + }() + + ch, err := c.openChannel("chan", nil) + if err != nil { + t.Fatalf("OpenChannel: %v", err) + } + w := <-res + if w == nil { + t.Fatal("unable to get write channel") + } + + return w, ch, c +} + +// Test that stderr and stdout can be addressed from different +// goroutines. This is intended for use with the race detector. +func TestMuxChannelExtendedThreadSafety(t *testing.T) { + writer, reader, mux := channelPair(t) + defer writer.Close() + defer reader.Close() + defer mux.Close() + + var wr, rd sync.WaitGroup + magic := "hello world" + + wr.Add(2) + go func() { + io.WriteString(writer, magic) + wr.Done() + }() + go func() { + io.WriteString(writer.Stderr(), magic) + wr.Done() + }() + + rd.Add(2) + go func() { + c, err := io.ReadAll(reader) + if string(c) != magic { + t.Errorf("stdout read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + go func() { + c, err := io.ReadAll(reader.Stderr()) + if string(c) != magic { + t.Errorf("stderr read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + + wr.Wait() + writer.CloseWrite() + rd.Wait() +} + +func TestMuxReadWrite(t *testing.T) { + s, c, mux := channelPair(t) + defer s.Close() + defer c.Close() + defer mux.Close() + + magic := "hello world" + magicExt := "hello stderr" + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + _, err := s.Write([]byte(magic)) + if err != nil { + t.Errorf("Write: %v", err) + return + } + _, err = s.Extended(1).Write([]byte(magicExt)) + if err != nil { + t.Errorf("Write: %v", err) + return + } + }() + + var buf [1024]byte + n, err := c.Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + got := string(buf[:n]) + if got != magic { + t.Fatalf("server: got %q want %q", got, magic) + } + + n, err = c.Extended(1).Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + + got = string(buf[:n]) + if got != magicExt { + t.Fatalf("server: got %q want %q", got, magic) + } +} + +func TestMuxChannelOverflow(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + writer.Write(make([]byte, 1)) + }() + writer.remoteWin.waitWriterBlocked() + + // Send 1 byte. + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], writer.remoteId) + marshalUint32(packet[5:], uint32(1)) + packet[9] = 42 + + if err := writer.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + if _, err := reader.SendRequest("hello", true, nil); err == nil { + t.Errorf("SendRequest succeeded.") + } +} + +func TestMuxChannelReadUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != nil { + t.Errorf("Write: %v", err) + } + writer.Close() + }() + + writer.remoteWin.waitWriterBlocked() + + buf := make([]byte, 32768) + for { + _, err := reader.Read(buf) + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Read: %v", err) + } + } +} + +func TestMuxChannelCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + }() + + writer.remoteWin.waitWriterBlocked() + reader.Close() +} + +func TestMuxConnectionCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + }() + + writer.remoteWin.waitWriterBlocked() + mux.Close() +} + +func TestMuxReject(t *testing.T) { + client, server := muxPair() + defer server.Close() + defer client.Close() + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + + ch, ok := <-server.incomingChannels + if !ok { + t.Error("cannot accept channel") + return + } + if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" { + t.Errorf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData()) + ch.Reject(RejectionReason(UnknownChannelType), UnknownChannelType.String()) + return + } + ch.Reject(RejectionReason(42), "message") + }() + + ch, err := client.openChannel("ch", []byte("extra")) + if ch != nil { + t.Fatal("openChannel not rejected") + } + + ocf, ok := err.(*OpenChannelError) + if !ok { + t.Errorf("got %#v want *OpenChannelError", err) + } else if ocf.Reason != 42 || ocf.Message != "message" { + t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message") + } + + want := "ssh: rejected: unknown reason 42 (message)" + if err.Error() != want { + t.Errorf("got %q, want %q", err.Error(), want) + } +} + +func TestMuxChannelRequest(t *testing.T) { + client, server, mux := channelPair(t) + defer server.Close() + defer client.Close() + defer mux.Close() + + var received int + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + for r := range server.incomingRequests { + received++ + r.Reply(r.Type == "yes", nil) + } + wg.Done() + }() + _, err := client.SendRequest("yes", false, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + ok, err := client.SendRequest("yes", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + + if !ok { + t.Errorf("SendRequest(yes): %v", ok) + + } + + ok, err = client.SendRequest("no", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + if ok { + t.Errorf("SendRequest(no): %v", ok) + } + + client.Close() + wg.Wait() + + if received != 3 { + t.Errorf("got %d requests, want %d", received, 3) + } +} + +func TestMuxUnknownChannelRequests(t *testing.T) { + clientPipe, serverPipe := memPipe() + client := newMux(clientPipe) + defer serverPipe.Close() + defer client.Close() + + kDone := make(chan error, 1) + go func() { + // Ignore unknown channel messages that don't want a reply. + err := serverPipe.writePacket(Marshal(channelRequestMsg{ + PeersID: 1, + Request: "keepalive@openssh.com", + WantReply: false, + RequestSpecificData: []byte{}, + })) + if err != nil { + kDone <- fmt.Errorf("send: %w", err) + return + } + + // Send a keepalive, which should get a channel failure message + // in response. + err = serverPipe.writePacket(Marshal(channelRequestMsg{ + PeersID: 2, + Request: "keepalive@openssh.com", + WantReply: true, + RequestSpecificData: []byte{}, + })) + if err != nil { + kDone <- fmt.Errorf("send: %w", err) + return + } + + packet, err := serverPipe.readPacket() + if err != nil { + kDone <- fmt.Errorf("read packet: %w", err) + return + } + decoded, err := decode(packet) + if err != nil { + kDone <- fmt.Errorf("decode failed: %w", err) + return + } + + switch msg := decoded.(type) { + case *channelRequestFailureMsg: + if msg.PeersID != 2 { + kDone <- fmt.Errorf("received response to wrong message: %v", msg) + return + + } + default: + kDone <- fmt.Errorf("unexpected channel message: %v", msg) + return + } + + kDone <- nil + + // Receive and respond to the keepalive to confirm the mux is + // still processing requests. + packet, err = serverPipe.readPacket() + if err != nil { + kDone <- fmt.Errorf("read packet: %w", err) + return + } + if packet[0] != msgGlobalRequest { + kDone <- errors.New("expected global request") + return + } + + err = serverPipe.writePacket(Marshal(globalRequestFailureMsg{ + Data: []byte{}, + })) + if err != nil { + kDone <- fmt.Errorf("failed to send failure msg: %w", err) + return + } + + close(kDone) + }() + + // Wait for the server to send the keepalive message and receive back a + // response. + if err := <-kDone; err != nil { + t.Fatal(err) + } + + // Confirm client hasn't closed. + if _, _, err := client.SendRequest("keepalive@golang.org", true, nil); err != nil { + t.Fatalf("failed to send keepalive: %v", err) + } + + // Wait for the server to shut down. + if err := <-kDone; err != nil { + t.Fatal(err) + } +} + +func TestMuxClosedChannel(t *testing.T) { + clientPipe, serverPipe := memPipe() + client := newMux(clientPipe) + defer serverPipe.Close() + defer client.Close() + + kDone := make(chan error, 1) + go func() { + // Open the channel. + packet, err := serverPipe.readPacket() + if err != nil { + kDone <- fmt.Errorf("read packet: %w", err) + return + } + if packet[0] != msgChannelOpen { + kDone <- errors.New("expected chan open") + return + } + + var openMsg channelOpenMsg + if err := Unmarshal(packet, &openMsg); err != nil { + kDone <- fmt.Errorf("unmarshal: %w", err) + return + } + + // Send back the opened channel confirmation. + err = serverPipe.writePacket(Marshal(channelOpenConfirmMsg{ + PeersID: openMsg.PeersID, + MyID: 0, + MyWindow: 0, + MaxPacketSize: channelMaxPacket, + })) + if err != nil { + kDone <- fmt.Errorf("send: %w", err) + return + } + + // Close the channel. + err = serverPipe.writePacket(Marshal(channelCloseMsg{ + PeersID: openMsg.PeersID, + })) + if err != nil { + kDone <- fmt.Errorf("send: %w", err) + return + } + + // Send a keepalive message on the channel we just closed. + err = serverPipe.writePacket(Marshal(channelRequestMsg{ + PeersID: openMsg.PeersID, + Request: "keepalive@openssh.com", + WantReply: true, + RequestSpecificData: []byte{}, + })) + if err != nil { + kDone <- fmt.Errorf("send: %w", err) + return + } + + // Receive the channel closed response. + packet, err = serverPipe.readPacket() + if err != nil { + kDone <- fmt.Errorf("read packet: %w", err) + return + } + if packet[0] != msgChannelClose { + kDone <- errors.New("expected channel close") + return + } + + // Receive the keepalive response failure. + packet, err = serverPipe.readPacket() + if err != nil { + kDone <- fmt.Errorf("read packet: %w", err) + return + } + if packet[0] != msgChannelFailure { + kDone <- errors.New("expected channel failure") + return + } + kDone <- nil + + // Receive and respond to the keepalive to confirm the mux is + // still processing requests. + packet, err = serverPipe.readPacket() + if err != nil { + kDone <- fmt.Errorf("read packet: %w", err) + return + } + if packet[0] != msgGlobalRequest { + kDone <- errors.New("expected global request") + return + } + + err = serverPipe.writePacket(Marshal(globalRequestFailureMsg{ + Data: []byte{}, + })) + if err != nil { + kDone <- fmt.Errorf("failed to send failure msg: %w", err) + return + } + + close(kDone) + }() + + // Open a channel. + ch, err := client.openChannel("chan", nil) + if err != nil { + t.Fatalf("OpenChannel: %v", err) + } + defer ch.Close() + + // Wait for the server to close the channel and send the keepalive. + <-kDone + + // Make sure the channel closed. + if _, ok := <-ch.incomingRequests; ok { + t.Fatalf("channel not closed") + } + + // Confirm client hasn't closed + if _, _, err := client.SendRequest("keepalive@golang.org", true, nil); err != nil { + t.Fatalf("failed to send keepalive: %v", err) + } + + // Wait for the server to shut down. + <-kDone +} + +func TestMuxGlobalRequest(t *testing.T) { + var sawPeek bool + var wg sync.WaitGroup + defer func() { + wg.Wait() + if !sawPeek { + t.Errorf("never saw 'peek' request") + } + }() + + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + wg.Add(1) + go func() { + defer wg.Done() + for r := range serverMux.incomingRequests { + sawPeek = sawPeek || r.Type == "peek" + if r.WantReply { + err := r.Reply(r.Type == "yes", + append([]byte(r.Type), r.Payload...)) + if err != nil { + t.Errorf("AckRequest: %v", err) + } + } + } + }() + + _, _, err := clientMux.SendRequest("peek", false, nil) + if err != nil { + t.Errorf("SendRequest: %v", err) + } + + ok, data, err := clientMux.SendRequest("yes", true, []byte("a")) + if !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + + if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil { + t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v", + ok, data, err) + } +} + +func TestMuxGlobalRequestUnblock(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + result := make(chan error, 1) + go func() { + _, _, err := clientMux.SendRequest("hello", true, nil) + result <- err + }() + + <-serverMux.incomingRequests + serverMux.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", io.EOF) + } +} + +func TestMuxChannelRequestUnblock(t *testing.T) { + a, b, connB := channelPair(t) + defer a.Close() + defer b.Close() + defer connB.Close() + + result := make(chan error, 1) + go func() { + _, err := a.SendRequest("hello", true, nil) + result <- err + }() + + <-b.incomingRequests + connB.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", err) + } +} + +func TestMuxCloseChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + defer r.Close() + defer w.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.Close(); err != nil { + t.Errorf("w.Close: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after Close", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxCloseWriteChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.CloseWrite(); err != nil { + t.Errorf("w.CloseWrite: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after CloseWrite", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxInvalidRecord(t *testing.T) { + a, b := muxPair() + defer a.Close() + defer b.Close() + + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], 29348723 /* invalid channel id */) + marshalUint32(packet[5:], 1) + packet[9] = 42 + + a.conn.writePacket(packet) + go a.SendRequest("hello", false, nil) + // 'a' wrote an invalid packet, so 'b' has exited. + req, ok := <-b.incomingRequests + if ok { + t.Errorf("got request %#v after receiving invalid packet", req) + } +} + +func TestZeroWindowAdjust(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + go func() { + io.WriteString(a, "hello") + // bogus adjust. + a.sendMessage(windowAdjustMsg{}) + io.WriteString(a, "world") + a.Close() + }() + + want := "helloworld" + c, _ := io.ReadAll(b) + if string(c) != want { + t.Errorf("got %q want %q", c, want) + } +} + +func TestMuxMaxPacketSize(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + large := make([]byte, a.maxRemotePayload+1) + packet := make([]byte, 1+4+4+1+len(large)) + packet[0] = msgChannelData + marshalUint32(packet[1:], a.remoteId) + marshalUint32(packet[5:], uint32(len(large))) + packet[9] = 42 + + if err := a.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + a.SendRequest("hello", false, nil) + wg.Done() + }() + + _, ok := <-b.incomingRequests + if ok { + t.Errorf("connection still alive after receiving large packet.") + } +} + +func TestMuxChannelWindowDeferredUpdates(t *testing.T) { + s, c, mux := channelPair(t) + cTransport := mux.conn.(*memTransport) + defer s.Close() + defer c.Close() + defer mux.Close() + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + + data := make([]byte, 1024) + + wg.Add(1) + go func() { + defer wg.Done() + _, err := s.Write(data) + if err != nil { + t.Errorf("Write: %v", err) + return + } + }() + cWritesInit := cTransport.getWriteCount() + buf := make([]byte, 1) + for i := 0; i < len(data); i++ { + n, err := c.Read(buf) + if n != len(buf) || err != nil { + t.Fatalf("Read: %v, %v", n, err) + } + } + cWrites := cTransport.getWriteCount() - cWritesInit + // reading 1 KiB should not cause any window updates to be sent, but allow + // for some unexpected writes + if cWrites > 30 { + t.Fatalf("reading 1 KiB from channel caused %v writes", cWrites) + } +} + +// Don't ship code with debug=true. +func TestDebug(t *testing.T) { + if debugMux { + t.Error("mux debug switched on") + } + if debugHandshake { + t.Error("handshake debug switched on") + } + if debugTransport { + t.Error("transport debug switched on") + } +} diff --git a/tempfork/sshtest/ssh/server.go b/tempfork/sshtest/ssh/server.go new file mode 100644 index 000000000..1839ddc6a --- /dev/null +++ b/tempfork/sshtest/ssh/server.go @@ -0,0 +1,933 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. +type Permissions struct { + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. + Extensions map[string]string +} + +type GSSAPIWithMICConfig struct { + // AllowLogin, must be set, is called when gssapi-with-mic + // authentication is selected (RFC 4462 section 3). The srcName is from the + // results of the GSS-API authentication. The format is username@DOMAIN. + // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. + // This callback is called after the user identity is established with GSSAPI to decide if the user can login with + // which permissions. If the user is allowed to login, it should return a nil error. + AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) + + // Server must be set. It's the implementation + // of the GSSAPIServer interface. See GSSAPIServer interface for details. + Server GSSAPIServer +} + +// SendAuthBanner implements [ServerPreAuthConn]. +func (s *connection) SendAuthBanner(msg string) error { + return s.transport.writePacket(Marshal(&userAuthBannerMsg{ + Message: msg, + })) +} + +func (*connection) unexportedMethodForFutureProofing() {} + +// ServerPreAuthConn is the interface available on an incoming server +// connection before authentication has completed. +type ServerPreAuthConn interface { + unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB + + ConnMetadata + + // SendAuthBanner sends a banner message to the client. + // It returns an error once the authentication phase has ended. + SendAuthBanner(string) error +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + // PublicKeyAuthAlgorithms specifies the supported client public key + // authentication algorithms. Note that this should not include certificate + // types since those use the underlying algorithm. This list is sent to the + // client if it supports the server-sig-algs extension. Order is irrelevant. + // If unspecified then a default set of algorithms is used. + PublicKeyAuthAlgorithms []string + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + // To determine NoClientAuth at runtime, set NoClientAuth to true + // and the optional NoClientAuthCallback to a non-nil value. + NoClientAuth bool + + // NoClientAuthCallback, if non-nil, is called when a user + // attempts to authenticate with auth method "none". + // NoClientAuth must also be set to true for this be used, or + // this func is unused. + NoClientAuthCallback func(ConnMetadata) (*Permissions, error) + + // MaxAuthTries specifies the maximum number of authentication attempts + // permitted per connection. If set to a negative number, the number of + // attempts are unlimited. If set to zero, the number of attempts are limited + // to 6. + MaxAuthTries int + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return a nil error + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // PreAuthConnCallback, if non-nil, is called upon receiving a new connection + // before any authentication has started. The provided ServerPreAuthConn + // can be used at any time before authentication is complete, including + // after this callback has returned. + PreAuthConnCallback func(ServerPreAuthConn) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string + + // BannerCallback, if present, is called and the return string is sent to + // the client after key exchange completed but before authentication. + BannerCallback func(conn ConnMetadata) string + + // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used + // when gssapi-with-mic authentication is selected (RFC 4462 section 3). + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same public key format, it is replaced. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. This is a FIFO cache. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +// maxCachedPubKeys is the number of cache entries we store. +// +// Due to consistent misuse of the PublicKeyCallback API, we have reduced this +// to 1, such that the only key in the cache is the most recently seen one. This +// forces the behavior that the last call to PublicKeyCallback will always be +// with the key that is used for authentication. +const maxCachedPubKeys = 1 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) >= maxCachedPubKeys { + c.keys = c.keys[1:] + } + c.keys = append(c.keys, candidate) +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.MaxAuthTries == 0 { + fullConf.MaxAuthTries = 6 + } + if len(fullConf.PublicKeyAuthAlgorithms) == 0 { + fullConf.PublicKeyAuthAlgorithms = supportedPubKeyAuthAlgos + } else { + for _, algo := range fullConf.PublicKeyAuthAlgorithms { + if !contains(supportedPubKeyAuthAlgos, algo) { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) + } + } + } + // Check if the config contains any unsupported key exchanges + for _, kex := range fullConf.KeyExchanges { + if _, ok := serverForbiddenKexAlgos[kex]; ok { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) + } + } + + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. algo is the negotiate +// algorithm and may be a certificate type. +func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { + sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && + config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || + config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, token []byte, s *connection, + sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { + gssAPIServer := gssapiConfig.Server + defer gssAPIServer.DeleteSecContext() + var srcName string + for { + var ( + outToken []byte + needContinue bool + ) + outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(token) + if err != nil { + return err, nil, nil + } + if len(outToken) != 0 { + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: outToken, + })); err != nil { + return nil, nil, err + } + } + if !needContinue { + break + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} + if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { + return nil, nil, err + } + mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) + if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { + return err, nil, nil + } + perms, authErr = gssapiConfig.AllowLogin(s, srcName) + return authErr, perms, nil +} + +// isAlgoCompatible checks if the signature format is compatible with the +// selected algorithm taking into account edge cases that occur with old +// clients. +func isAlgoCompatible(algo, sigFormat string) bool { + // Compatibility for old clients. + // + // For certificate authentication with OpenSSH 7.2-7.7 signature format can + // be rsa-sha2-256 or rsa-sha2-512 for the algorithm + // ssh-rsa-cert-v01@openssh.com. + // + // With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512 + // for signature format ssh-rsa. + if isRSA(algo) && isRSA(sigFormat) { + return true + } + // Standard case: the underlying algorithm must match the signature format. + return underlyingAlgo(algo) == sigFormat +} + +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. The first entry is typically ErrNoAuth. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + +// ErrNoAuth is the error value returned if no +// authentication method has been passed yet. This happens as a normal +// part of the authentication loop, since the client first tries +// 'none' authentication to discover available methods. +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") + +// BannerError is an error that can be returned by authentication handlers in +// ServerConfig to send a banner message to the client. +type BannerError struct { + Err error + Message string +} + +func (b *BannerError) Unwrap() error { + return b.Err +} + +func (b *BannerError) Error() string { + if b.Err == nil { + return b.Message + } + return b.Err.Error() +} + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + if config.PreAuthConnCallback != nil { + config.PreAuthConnCallback(s) + } + + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + + authFailures := 0 + noneAuthCount := 0 + var authErrs []error + var calledBannerCallback bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } + +userAuthLoop: + for { + if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { + discMsg := &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + } + + if err := s.transport.writePacket(Marshal(discMsg)); err != nil { + return nil, err + } + authErrs = append(authErrs, discMsg) + return nil, &ServerAuthError{Errors: authErrs} + } + + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + + s.user = userAuthReq.User + + if !calledBannerCallback && config.BannerCallback != nil { + calledBannerCallback = true + if msg := config.BannerCallback(s); msg != "" { + if err := s.SendAuthBanner(msg); err != nil { + return nil, err + } + } + } + + perms = nil + authErr := ErrNoAuth + + switch userAuthReq.Method { + case "none": + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { + if config.NoClientAuthCallback != nil { + perms, authErr = config.NoClientAuthCallback(s) + } else { + authErr = nil + } + } + case "password": + if authConfig.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = authConfig.PasswordCallback(s, password) + case "keyboard-interactive": + if authConfig.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configured") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if authConfig.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the declared public key algo is compatible with the + // decoded one. This check will ensure we don't accept e.g. + // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public + // key type. The algorithm and public key type must be + // consistent: both must be certificate algorithms, or neither. + if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { + authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", + pubKey.Type(), algo) + break + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !contains(config.PublicKeyAuthAlgorithms, sig.Format) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) + break + } + if !isAlgoCompatible(algo, sig.Format) { + authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) + break + } + + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + case "gssapi-with-mic": + if authConfig.GSSAPIWithMICConfig == nil { + authErr = errors.New("ssh: gssapi-with-mic auth not configured") + break + } + gssapiConfig := authConfig.GSSAPIWithMICConfig + userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) + if err != nil { + return nil, parseError(msgUserAuthRequest) + } + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. + if userAuthRequestGSSAPI.N == 0 { + authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") + break + } + var i uint32 + present := false + for i = 0; i < userAuthRequestGSSAPI.N; i++ { + if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { + present = true + break + } + } + if !present { + authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") + break + } + // Initial server response, see RFC 4462 section 3.3. + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ + SupportMech: krb5OID, + })); err != nil { + return nil, err + } + // Exchange token, see RFC 4462 section 3.4. + packet, err := s.transport.readPacket() + if err != nil { + return nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, err + } + authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, + userAuthReq) + if err != nil { + return nil, err + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + authErrs = append(authErrs, authErr) + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + var bannerErr *BannerError + if errors.As(authErr, &bannerErr) { + if bannerErr.Message != "" { + if err := s.SendAuthBanner(bannerErr.Message); err != nil { + return nil, err + } + } + } + + if authErr == nil { + break userAuthLoop + } + + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } + } + + if authConfig.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if authConfig.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if authConfig.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { + failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods available") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Name: name, + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/tempfork/sshtest/ssh/server_multi_auth_test.go b/tempfork/sshtest/ssh/server_multi_auth_test.go new file mode 100644 index 000000000..3b3980243 --- /dev/null +++ b/tempfork/sshtest/ssh/server_multi_auth_test.go @@ -0,0 +1,412 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "strings" + "testing" +) + +func doClientServerAuth(t *testing.T, serverConfig *ServerConfig, clientConfig *ClientConfig) ([]error, error) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + var serverAuthErrors []error + + serverConfig.AddHostKey(testSigners["rsa"]) + serverConfig.AuthLogCallback = func(conn ConnMetadata, method string, err error) { + serverAuthErrors = append(serverAuthErrors, err) + } + go newServer(c1, serverConfig) + c, _, _, err := NewClientConn(c2, "", clientConfig) + if err == nil { + c.Close() + } + return serverAuthErrors, err +} + +func TestMultiStepAuth(t *testing.T) { + // This user can login with password, public key or public key + password. + username := "testuser" + // This user can login with public key + password only. + usernameSecondFactor := "testuser_second_factor" + errPwdAuthFailed := errors.New("password auth failed") + errWrongSequence := errors.New("wrong sequence") + + serverConfig := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + if conn.User() == usernameSecondFactor { + return nil, errWrongSequence + } + if conn.User() == username && string(password) == clientPassword { + return nil, nil + } + return nil, errPwdAuthFailed + }, + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + if conn.User() == usernameSecondFactor { + return nil, &PartialSuccessError{ + Next: ServerAuthCallbacks{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + if string(password) == clientPassword { + return nil, nil + } + return nil, errPwdAuthFailed + }, + }, + } + } + return nil, nil + } + return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User()) + }, + } + + clientConfig := &ClientConfig{ + User: usernameSecondFactor, + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + serverAuthErrors, err := doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("client login error: %s", err) + } + + // The error sequence is: + // - no auth passed yet + // - partial success + // - nil + if len(serverAuthErrors) != 3 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[1].(*PartialSuccessError); !ok { + t.Fatalf("expected partial success error, got: %v", serverAuthErrors[1]) + } + // Now test a wrong sequence. + clientConfig.Auth = []AuthMethod{ + Password(clientPassword), + PublicKeys(testSigners["rsa"]), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err == nil { + t.Fatal("client login with wrong sequence must fail") + } + // The error sequence is: + // - no auth passed yet + // - wrong sequence + // - partial success + if len(serverAuthErrors) != 3 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if serverAuthErrors[1] != errWrongSequence { + t.Fatal("server not returned wrong sequence") + } + if _, ok := serverAuthErrors[2].(*PartialSuccessError); !ok { + t.Fatalf("expected partial success error, got: %v", serverAuthErrors[2]) + } + // Now test using a correct sequence but a wrong password before the right + // one. + n := 0 + passwords := []string{"WRONG", "WRONG", clientPassword} + clientConfig.Auth = []AuthMethod{ + PublicKeys(testSigners["rsa"]), + RetryableAuthMethod(PasswordCallback(func() (string, error) { + p := passwords[n] + n++ + return p, nil + }), 3), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("client login error: %s", err) + } + // The error sequence is: + // - no auth passed yet + // - partial success + // - wrong password + // - wrong password + // - nil + if len(serverAuthErrors) != 5 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[1].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + if serverAuthErrors[2] != errPwdAuthFailed { + t.Fatal("server not returned password authentication failed") + } + if serverAuthErrors[3] != errPwdAuthFailed { + t.Fatal("server not returned password authentication failed") + } + // Only password authentication should fail. + clientConfig.Auth = []AuthMethod{ + Password(clientPassword), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err == nil { + t.Fatal("client login with password only must fail") + } + // The error sequence is: + // - no auth passed yet + // - wrong sequence + if len(serverAuthErrors) != 2 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if serverAuthErrors[1] != errWrongSequence { + t.Fatal("server not returned wrong sequence") + } + + // Only public key authentication should fail. + clientConfig.Auth = []AuthMethod{ + PublicKeys(testSigners["rsa"]), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err == nil { + t.Fatal("client login with public key only must fail") + } + // The error sequence is: + // - no auth passed yet + // - partial success + if len(serverAuthErrors) != 2 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[1].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + + // Public key and wrong password. + clientConfig.Auth = []AuthMethod{ + PublicKeys(testSigners["rsa"]), + Password("WRONG"), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err == nil { + t.Fatal("client login with wrong password after public key must fail") + } + // The error sequence is: + // - no auth passed yet + // - partial success + // - password auth failed + if len(serverAuthErrors) != 3 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[1].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + if serverAuthErrors[2] != errPwdAuthFailed { + t.Fatal("server not returned password authentication failed") + } + + // Public key, public key again and then correct password. Public key + // authentication is attempted only once because the partial success error + // returns only "password" as the allowed authentication method. + clientConfig.Auth = []AuthMethod{ + PublicKeys(testSigners["rsa"]), + PublicKeys(testSigners["rsa"]), + Password(clientPassword), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("client login error: %s", err) + } + // The error sequence is: + // - no auth passed yet + // - partial success + // - nil + if len(serverAuthErrors) != 3 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[1].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + + // The unrestricted username can do anything + clientConfig = &ClientConfig{ + User: username, + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + _, err = doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("unrestricted client login error: %s", err) + } + + clientConfig = &ClientConfig{ + User: username, + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + _, err = doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("unrestricted client login error: %s", err) + } + + clientConfig = &ClientConfig{ + User: username, + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + _, err = doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("unrestricted client login error: %s", err) + } +} + +func TestDynamicAuthCallbacks(t *testing.T) { + user1 := "user1" + user2 := "user2" + errInvalidCredentials := errors.New("invalid credentials") + + serverConfig := &ServerConfig{ + NoClientAuth: true, + NoClientAuthCallback: func(conn ConnMetadata) (*Permissions, error) { + switch conn.User() { + case user1: + return nil, &PartialSuccessError{ + Next: ServerAuthCallbacks{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + if conn.User() == user1 && string(password) == clientPassword { + return nil, nil + } + return nil, errInvalidCredentials + }, + }, + } + case user2: + return nil, &PartialSuccessError{ + Next: ServerAuthCallbacks{ + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + if conn.User() == user2 { + return nil, nil + } + } + return nil, errInvalidCredentials + }, + }, + } + default: + return nil, errInvalidCredentials + } + }, + } + + clientConfig := &ClientConfig{ + User: user1, + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + serverAuthErrors, err := doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("client login error: %s", err) + } + // The error sequence is: + // - partial success + // - nil + if len(serverAuthErrors) != 2 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[0].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + + clientConfig = &ClientConfig{ + User: user2, + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("client login error: %s", err) + } + // The error sequence is: + // - partial success + // - nil + if len(serverAuthErrors) != 2 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[0].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + + // user1 cannot login with public key + clientConfig = &ClientConfig{ + User: user1, + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err == nil { + t.Fatal("user1 login with public key must fail") + } + if !strings.Contains(err.Error(), "no supported methods remain") { + t.Errorf("got %v, expected 'no supported methods remain'", err) + } + if len(serverAuthErrors) != 1 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[0].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } + // user2 cannot login with password + clientConfig = &ClientConfig{ + User: user2, + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + serverAuthErrors, err = doClientServerAuth(t, serverConfig, clientConfig) + if err == nil { + t.Fatal("user2 login with password must fail") + } + if !strings.Contains(err.Error(), "no supported methods remain") { + t.Errorf("got %v, expected 'no supported methods remain'", err) + } + if len(serverAuthErrors) != 1 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if _, ok := serverAuthErrors[0].(*PartialSuccessError); !ok { + t.Fatal("server not returned partial success") + } +} diff --git a/tempfork/sshtest/ssh/server_test.go b/tempfork/sshtest/ssh/server_test.go new file mode 100644 index 000000000..c2b24f47c --- /dev/null +++ b/tempfork/sshtest/ssh/server_test.go @@ -0,0 +1,478 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "reflect" + "strings" + "sync/atomic" + "testing" + "time" +) + +func TestClientAuthRestrictedPublicKeyAlgos(t *testing.T) { + for _, tt := range []struct { + name string + key Signer + wantError bool + }{ + {"rsa", testSigners["rsa"], false}, + {"dsa", testSigners["dsa"], true}, + {"ed25519", testSigners["ed25519"], true}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + serverConf := &ServerConfig{ + PublicKeyAuthAlgorithms: []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512}, + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + return nil, nil + }, + } + serverConf.AddHostKey(testSigners["ecdsap256"]) + + done := make(chan struct{}) + go func() { + defer close(done) + NewServerConn(c1, serverConf) + }() + + clientConf := ClientConfig{ + User: "user", + Auth: []AuthMethod{ + PublicKeys(tt.key), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + if !tt.wantError { + t.Errorf("%s: got unexpected error %q", tt.name, err.Error()) + } + } else if tt.wantError { + t.Errorf("%s: succeeded, but want error", tt.name) + } + <-done + } +} + +func TestMaxAuthTriesNoneMethod(t *testing.T) { + username := "testuser" + serverConfig := &ServerConfig{ + MaxAuthTries: 2, + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + if conn.User() == username && string(password) == clientPassword { + return nil, nil + } + return nil, errors.New("invalid credentials") + }, + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + var serverAuthErrors []error + + serverConfig.AddHostKey(testSigners["rsa"]) + serverConfig.AuthLogCallback = func(conn ConnMetadata, method string, err error) { + serverAuthErrors = append(serverAuthErrors, err) + } + go newServer(c1, serverConfig) + + clientConfig := ClientConfig{ + User: username, + HostKeyCallback: InsecureIgnoreHostKey(), + } + clientConfig.SetDefaults() + // Our client will send 'none' auth only once, so we need to send the + // requests manually. + c := &connection{ + sshConn: sshConn{ + conn: c2, + user: username, + clientVersion: []byte(packageVersion), + }, + } + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + t.Fatalf("unable to exchange version: %v", err) + } + c.transport = newClientTransport( + newTransport(c.sshConn.conn, clientConfig.Rand, true /* is client */), + c.clientVersion, c.serverVersion, &clientConfig, "", c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + t.Fatalf("unable to wait session: %v", err) + } + c.sessionID = c.transport.getSessionID() + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + t.Fatalf("unable to send ssh-userauth message: %v", err) + } + packet, err := c.transport.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) > 0 && packet[0] == msgExtInfo { + packet, err = c.transport.readPacket() + if err != nil { + t.Fatal(err) + } + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + t.Fatal(err) + } + for i := 0; i <= serverConfig.MaxAuthTries; i++ { + auth := new(noneAuth) + _, _, err := auth.auth(c.sessionID, clientConfig.User, c.transport, clientConfig.Rand, nil) + if i < serverConfig.MaxAuthTries { + if err != nil { + t.Fatal(err) + } + continue + } + if err == nil { + t.Fatal("client: got no error") + } else if !strings.Contains(err.Error(), "too many authentication failures") { + t.Fatalf("client: got unexpected error: %v", err) + } + } + if len(serverAuthErrors) != 3 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + for _, err := range serverAuthErrors { + if !errors.Is(err, ErrNoAuth) { + t.Errorf("go error: %v; want: %v", err, ErrNoAuth) + } + } +} + +func TestMaxAuthTriesFirstNoneAuthErrorIgnored(t *testing.T) { + username := "testuser" + serverConfig := &ServerConfig{ + MaxAuthTries: 1, + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + if conn.User() == username && string(password) == clientPassword { + return nil, nil + } + return nil, errors.New("invalid credentials") + }, + } + clientConfig := &ClientConfig{ + User: username, + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + serverAuthErrors, err := doClientServerAuth(t, serverConfig, clientConfig) + if err != nil { + t.Fatalf("client login error: %s", err) + } + if len(serverAuthErrors) != 2 { + t.Fatalf("unexpected number of server auth errors: %v, errors: %+v", len(serverAuthErrors), serverAuthErrors) + } + if !errors.Is(serverAuthErrors[0], ErrNoAuth) { + t.Errorf("go error: %v; want: %v", serverAuthErrors[0], ErrNoAuth) + } + if serverAuthErrors[1] != nil { + t.Errorf("unexpected error: %v", serverAuthErrors[1]) + } +} + +func TestNewServerConnValidationErrors(t *testing.T) { + serverConf := &ServerConfig{ + PublicKeyAuthAlgorithms: []string{CertAlgoRSAv01}, + } + c := &markerConn{} + _, _, _, err := NewServerConn(c, serverConf) + if err == nil { + t.Fatal("NewServerConn with invalid public key auth algorithms succeeded") + } + if !c.isClosed() { + t.Fatal("NewServerConn with invalid public key auth algorithms left connection open") + } + if c.isUsed() { + t.Fatal("NewServerConn with invalid public key auth algorithms used connection") + } + + serverConf = &ServerConfig{ + Config: Config{ + KeyExchanges: []string{kexAlgoDHGEXSHA256}, + }, + } + c = &markerConn{} + _, _, _, err = NewServerConn(c, serverConf) + if err == nil { + t.Fatal("NewServerConn with unsupported key exchange succeeded") + } + if !c.isClosed() { + t.Fatal("NewServerConn with unsupported key exchange left connection open") + } + if c.isUsed() { + t.Fatal("NewServerConn with unsupported key exchange used connection") + } +} + +func TestBannerError(t *testing.T) { + serverConfig := &ServerConfig{ + BannerCallback: func(ConnMetadata) string { + return "banner from BannerCallback" + }, + NoClientAuth: true, + NoClientAuthCallback: func(ConnMetadata) (*Permissions, error) { + err := &BannerError{ + Err: errors.New("error from NoClientAuthCallback"), + Message: "banner from NoClientAuthCallback", + } + return nil, fmt.Errorf("wrapped: %w", err) + }, + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + return nil, &BannerError{ + Err: errors.New("error from PublicKeyCallback"), + Message: "banner from PublicKeyCallback", + } + }, + KeyboardInteractiveCallback: func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) { + return nil, &BannerError{ + Err: nil, // make sure that a nil inner error is allowed + Message: "banner from KeyboardInteractiveCallback", + } + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + var banners []string + clientConfig := &ClientConfig{ + User: "test", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + KeyboardInteractive(func(name, instruction string, questions []string, echos []bool) ([]string, error) { + return []string{"letmein"}, nil + }), + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + BannerCallback: func(msg string) error { + banners = append(banners, msg) + return nil + }, + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + go newServer(c1, serverConfig) + c, _, _, err := NewClientConn(c2, "", clientConfig) + if err != nil { + t.Fatalf("client connection failed: %v", err) + } + defer c.Close() + + wantBanners := []string{ + "banner from BannerCallback", + "banner from NoClientAuthCallback", + "banner from PublicKeyCallback", + "banner from KeyboardInteractiveCallback", + } + if !reflect.DeepEqual(banners, wantBanners) { + t.Errorf("got banners:\n%q\nwant banners:\n%q", banners, wantBanners) + } +} + +func TestPublicKeyCallbackLastSeen(t *testing.T) { + var lastSeenKey PublicKey + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + serverConf := &ServerConfig{ + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + lastSeenKey = key + fmt.Printf("seen %#v\n", key) + if _, ok := key.(*dsaPublicKey); !ok { + return nil, errors.New("nope") + } + return nil, nil + }, + } + serverConf.AddHostKey(testSigners["ecdsap256"]) + + done := make(chan struct{}) + go func() { + defer close(done) + NewServerConn(c1, serverConf) + }() + + clientConf := ClientConfig{ + User: "user", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"], testSigners["dsa"], testSigners["ed25519"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + t.Fatal(err) + } + <-done + + expectedPublicKey := testSigners["dsa"].PublicKey().Marshal() + lastSeenMarshalled := lastSeenKey.Marshal() + if !bytes.Equal(lastSeenMarshalled, expectedPublicKey) { + t.Errorf("unexpected key: got %#v, want %#v", lastSeenKey, testSigners["dsa"].PublicKey()) + } +} + +func TestPreAuthConnAndBanners(t *testing.T) { + testDone := make(chan struct{}) + defer close(testDone) + + authConnc := make(chan ServerPreAuthConn, 1) + serverConfig := &ServerConfig{ + PreAuthConnCallback: func(c ServerPreAuthConn) { + t.Logf("got ServerPreAuthConn: %v", c) + authConnc <- c // for use later in the test + for _, s := range []string{"hello1", "hello2"} { + if err := c.SendAuthBanner(s); err != nil { + t.Errorf("failed to send banner %q: %v", s, err) + } + } + // Now start a goroutine to spam SendAuthBanner in hopes + // of hitting a race. + go func() { + for { + select { + case <-testDone: + return + default: + if err := c.SendAuthBanner("attempted-race"); err != nil && err != errSendBannerPhase { + t.Errorf("unexpected error from SendAuthBanner: %v", err) + } + time.Sleep(5 * time.Millisecond) + } + } + }() + }, + NoClientAuth: true, + NoClientAuthCallback: func(ConnMetadata) (*Permissions, error) { + t.Logf("got NoClientAuthCallback") + return &Permissions{}, nil + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + var banners []string + clientConfig := &ClientConfig{ + User: "test", + HostKeyCallback: InsecureIgnoreHostKey(), + BannerCallback: func(msg string) error { + if msg != "attempted-race" { + banners = append(banners, msg) + } + return nil + }, + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + go newServer(c1, serverConfig) + c, _, _, err := NewClientConn(c2, "", clientConfig) + if err != nil { + t.Fatalf("client connection failed: %v", err) + } + defer c.Close() + + wantBanners := []string{ + "hello1", + "hello2", + } + if !reflect.DeepEqual(banners, wantBanners) { + t.Errorf("got banners:\n%q\nwant banners:\n%q", banners, wantBanners) + } + + // Now that we're authenticated, verify that use of SendBanner + // is an error. + var bc ServerPreAuthConn + select { + case bc = <-authConnc: + default: + t.Fatal("expected ServerPreAuthConn") + } + if err := bc.SendAuthBanner("wrong-phase"); err == nil { + t.Error("unexpected success of SendAuthBanner after authentication") + } else if err != errSendBannerPhase { + t.Errorf("unexpected error: %v; want %v", err, errSendBannerPhase) + } +} + +type markerConn struct { + closed uint32 + used uint32 +} + +func (c *markerConn) isClosed() bool { + return atomic.LoadUint32(&c.closed) != 0 +} + +func (c *markerConn) isUsed() bool { + return atomic.LoadUint32(&c.used) != 0 +} + +func (c *markerConn) Close() error { + atomic.StoreUint32(&c.closed, 1) + return nil +} + +func (c *markerConn) Read(b []byte) (n int, err error) { + atomic.StoreUint32(&c.used, 1) + if atomic.LoadUint32(&c.closed) != 0 { + return 0, net.ErrClosed + } else { + return 0, io.EOF + } +} + +func (c *markerConn) Write(b []byte) (n int, err error) { + atomic.StoreUint32(&c.used, 1) + if atomic.LoadUint32(&c.closed) != 0 { + return 0, net.ErrClosed + } else { + return 0, io.ErrClosedPipe + } +} + +func (*markerConn) LocalAddr() net.Addr { return nil } +func (*markerConn) RemoteAddr() net.Addr { return nil } + +func (*markerConn) SetDeadline(t time.Time) error { return nil } +func (*markerConn) SetReadDeadline(t time.Time) error { return nil } +func (*markerConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/tempfork/sshtest/ssh/session.go b/tempfork/sshtest/ssh/session.go new file mode 100644 index 000000000..acef62259 --- /dev/null +++ b/tempfork/sshtest/ssh/session.go @@ -0,0 +1,647 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + IUTF8 = 42 // RFC 8160 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of io.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.7. +type ptyWindowChangeMsg struct { + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 +} + +// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. +func (s *Session) WindowChange(h, w int) error { + req := ptyWindowChangeMsg{ + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + } + _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = io.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = io.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/tempfork/sshtest/ssh/session_test.go b/tempfork/sshtest/ssh/session_test.go new file mode 100644 index 000000000..807a913e5 --- /dev/null +++ b/tempfork/sshtest/ssh/session_test.go @@ -0,0 +1,892 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session tests. + +import ( + "bytes" + crypto_rand "crypto/rand" + "errors" + "io" + "math/rand" + "net" + "sync" + "testing" + + "golang.org/x/crypto/ssh/terminal" +) + +type serverType func(Channel, <-chan *Request, *testing.T) + +// dial constructs a new test server and returns a *ClientConn. +func dial(handler serverType, t *testing.T) *Client { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer func() { + c1.Close() + wg.Done() + }() + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(testSigners["rsa"]) + + conn, chans, reqs, err := NewServerConn(c1, &conf) + if err != nil { + t.Errorf("Unable to handshake: %v", err) + return + } + wg.Add(1) + go func() { + DiscardRequests(reqs) + wg.Done() + }() + + for newCh := range chans { + if newCh.ChannelType() != "session" { + newCh.Reject(UnknownChannelType, "unknown channel type") + continue + } + + ch, inReqs, err := newCh.Accept() + if err != nil { + t.Errorf("Accept: %v", err) + continue + } + wg.Add(1) + go func() { + handler(ch, inReqs, t) + wg.Done() + }() + } + if err := conn.Wait(); err != io.EOF { + t.Logf("server exit reason: %v", err) + } + }() + + config := &ClientConfig{ + User: "testuser", + HostKeyCallback: InsecureIgnoreHostKey(), + } + + conn, chans, reqs, err := NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("unable to dial remote side: %v", err) + } + + return NewClient(conn, chans, reqs) +} + +// Test a simple string is returned to session.Stdout. +func TestSessionShell(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout := new(bytes.Buffer) + session.Stdout = stdout + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %s", err) + } + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + actual := stdout.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it. + +// Test a simple string is returned via StdoutPipe. +func TestSessionStdoutPipe(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("Unable to request StdoutPipe(): %v", err) + } + var buf bytes.Buffer + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + done := make(chan bool, 1) + go func() { + if _, err := io.Copy(&buf, stdout); err != nil { + t.Errorf("Copy of stdout failed: %v", err) + } + done <- true + }() + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + <-done + actual := buf.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// Test that a simple string is returned via the Output helper, +// and that stderr is discarded. +func TestSessionOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.Output("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + w := "this-is-stdout." + g := string(buf) + if g != w { + t.Error("Remote command did not return expected string:") + t.Logf("want %q", w) + t.Logf("got %q", g) + } +} + +// Test that both stdout and stderr are returned +// via the CombinedOutput helper. +func TestSessionCombinedOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + const stdout = "this-is-stdout." + const stderr = "this-is-stderr." + g := string(buf) + if g != stdout+stderr && g != stderr+stdout { + t.Error("Remote command did not return expected string:") + t.Logf("want %q, or %q", stdout+stderr, stderr+stdout) + t.Logf("got %q", g) + } +} + +// Test non-0 exit status is returned correctly. +func TestExitStatusNonZero(t *testing.T) { + conn := dial(exitStatusNonZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus()) + } +} + +// Test 0 exit status is returned correctly. +func TestExitStatusZero(t *testing.T) { + conn := dial(exitStatusZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got %v", err) + } +} + +// Test exit signal and status are both returned correctly. +func TestExitSignalAndStatus(t *testing.T) { + conn := dial(exitSignalAndStatusHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestKnownExitSignalOnly(t *testing.T) { + conn := dial(exitSignalHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 143 { + t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestUnknownExitSignal(t *testing.T) { + conn := dial(exitSignalUnknownHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "SYS" || e.ExitStatus() != 128 { + t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +func TestExitWithoutStatusOrSignal(t *testing.T) { + conn := dial(exitWithoutSignalOrStatus, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + if _, ok := err.(*ExitMissingError); !ok { + t.Fatalf("got %T want *ExitMissingError", err) + } +} + +// windowTestBytes is the number of bytes that we'll send to the SSH server. +const windowTestBytes = 16000 * 200 + +// TestServerWindow writes random data to the server. The server is expected to echo +// the same data back, which is compared against the original. +func TestServerWindow(t *testing.T) { + origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes) + origBytes := origBuf.Bytes() + + conn := dial(echoHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + + serverStdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + + result := make(chan []byte) + go func() { + defer close(result) + echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + serverStdout, err := session.StdoutPipe() + if err != nil { + t.Errorf("StdoutPipe failed: %v", err) + return + } + n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes) + if err != nil && err != io.EOF { + t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err) + } + result <- echoedBuf.Bytes() + }() + + written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes) + if err != nil { + t.Errorf("failed to copy origBuf to serverStdin: %v", err) + } else if written != windowTestBytes { + t.Errorf("Wrote only %d of %d bytes to server", written, windowTestBytes) + } + + echoedBytes := <-result + + if !bytes.Equal(origBytes, echoedBytes) { + t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes)) + } +} + +// Verify the client can handle a keepalive packet from the server. +func TestClientHandlesKeepalives(t *testing.T) { + conn := dial(channelKeepaliveSender, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got: %v", err) + } +} + +type exitStatusMsg struct { + Status uint32 +} + +type exitSignalMsg struct { + Signal string + CoreDumped bool + Errmsg string + Lang string +} + +func handleTerminalRequests(in <-chan *Request) { + for req := range in { + ok := false + switch req.Type { + case "shell": + ok = true + if len(req.Payload) > 0 { + // We don't accept any commands, only the default shell. + ok = false + } + case "env": + ok = true + } + req.Reply(ok, nil) + } +} + +func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal { + term := terminal.NewTerminal(ch, prompt) + go handleTerminalRequests(in) + return term +} + +func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(0, ch, t) +} + +func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) +} + +func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) + sendSignal("TERM", ch, t) +} + +func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("TERM", ch, t) +} + +func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("SYS", ch, t) +} + +func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) +} + +func shellHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "golang") + readLine(shell, t) + sendStatus(0, ch, t) +} + +// Ignores the command, writes fixed strings to stderr and stdout. +// Strings are "this-is-stdout." and "this-is-stderr.". +func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + _, err := ch.Read(nil) + + req, ok := <-in + if !ok { + t.Fatalf("error: expected channel request, got: %#v", err) + return + } + + // ignore request, always send some text + req.Reply(true, nil) + + _, err = io.WriteString(ch, "this-is-stdout.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + _, err = io.WriteString(ch.Stderr(), "this-is-stderr.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + sendStatus(0, ch, t) +} + +func readLine(shell *terminal.Terminal, t *testing.T) { + if _, err := shell.ReadLine(); err != nil && err != io.EOF { + t.Errorf("unable to read line: %v", err) + } +} + +func sendStatus(status uint32, ch Channel, t *testing.T) { + msg := exitStatusMsg{ + Status: status, + } + if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil { + t.Errorf("unable to send status: %v", err) + } +} + +func sendSignal(signal string, ch Channel, t *testing.T) { + sig := exitSignalMsg{ + Signal: signal, + CoreDumped: false, + Errmsg: "Process terminated", + Lang: "en-GB-oed", + } + if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil { + t.Errorf("unable to send signal: %v", err) + } +} + +func discardHandler(ch Channel, t *testing.T) { + defer ch.Close() + io.Copy(io.Discard, ch) +} + +func echoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil { + t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err) + } +} + +// copyNRandomly copies n bytes from src to dst. It uses a variable, and random, +// buffer size to exercise more code paths. +func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) { + var ( + buf = make([]byte, 32*1024) + written int + remaining = n + ) + for remaining > 0 { + l := rand.Intn(1 << 15) + if remaining < l { + l = remaining + } + nr, er := src.Read(buf[:l]) + nw, ew := dst.Write(buf[:nr]) + remaining -= nw + written += nw + if ew != nil { + return written, ew + } + if nr != nw { + return written, io.ErrShortWrite + } + if er != nil && er != io.EOF { + return written, er + } + } + return written, nil +} + +func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil { + t.Errorf("unable to send channel keepalive request: %v", err) + } + sendStatus(0, ch, t) +} + +func TestClientWriteEOF(t *testing.T) { + conn := dial(simpleEchoHandler, t) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe failed: %v", err) + } + + data := []byte(`0000`) + _, err = stdin.Write(data) + if err != nil { + t.Fatalf("Write failed: %v", err) + } + stdin.Close() + + res, err := io.ReadAll(stdout) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + + if !bytes.Equal(data, res) { + t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res) + } +} + +func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + data, err := io.ReadAll(ch) + if err != nil { + t.Errorf("handler read error: %v", err) + } + _, err = ch.Write(data) + if err != nil { + t.Errorf("handler write error: %v", err) + } +} + +func TestSessionID(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverID := make(chan []byte, 1) + clientID := make(chan []byte, 1) + + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: "user", + } + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + + srvErrCh := make(chan error, 1) + wg.Add(1) + go func() { + defer wg.Done() + conn, chans, reqs, err := NewServerConn(c1, serverConf) + srvErrCh <- err + if err != nil { + return + } + serverID <- conn.SessionID() + wg.Add(1) + go func() { + DiscardRequests(reqs) + wg.Done() + }() + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + cliErrCh := make(chan error, 1) + wg.Add(1) + go func() { + defer wg.Done() + conn, chans, reqs, err := NewClientConn(c2, "", clientConf) + cliErrCh <- err + if err != nil { + return + } + clientID <- conn.SessionID() + wg.Add(1) + go func() { + DiscardRequests(reqs) + wg.Done() + }() + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + if err := <-srvErrCh; err != nil { + t.Fatalf("server handshake: %v", err) + } + + if err := <-cliErrCh; err != nil { + t.Fatalf("client handshake: %v", err) + } + + s := <-serverID + c := <-clientID + if bytes.Compare(s, c) != 0 { + t.Errorf("server session ID (%x) != client session ID (%x)", s, c) + } else if len(s) == 0 { + t.Errorf("client and server SessionID were empty.") + } +} + +type noReadConn struct { + readSeen bool + net.Conn +} + +func (c *noReadConn) Close() error { + return nil +} + +func (c *noReadConn) Read(b []byte) (int, error) { + c.readSeen = true + return 0, errors.New("noReadConn error") +} + +func TestInvalidServerConfiguration(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serveConn := noReadConn{Conn: c1} + serverConf := &ServerConfig{} + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key") + } + + serverConf.AddHostKey(testSigners["ecdsa"]) + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method") + } +} + +func TestHostKeyAlgorithms(t *testing.T) { + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.AddHostKey(testSigners["ecdsa"]) + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + connect := func(clientConf *ClientConfig, want string) { + var alg string + clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error { + alg = key.Type() + return nil + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + wg.Add(1) + go func() { + NewServerConn(c1, serverConf) + wg.Done() + }() + _, _, _, err = NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + if alg != want { + t.Errorf("selected key algorithm %s, want %s", alg, want) + } + } + + // By default, we get the preferred algorithm, which is ECDSA 256. + + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + } + connect(clientConf, KeyAlgoECDSA256) + + // Client asks for RSA explicitly. + clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA} + connect(clientConf, KeyAlgoRSA) + + // Client asks for RSA-SHA2-512 explicitly. + clientConf.HostKeyAlgorithms = []string{KeyAlgoRSASHA512} + // We get back an "ssh-rsa" key but the verification happened + // with an RSA-SHA2-512 signature. + connect(clientConf, KeyAlgoRSA) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + wg.Add(1) + go func() { + NewServerConn(c1, serverConf) + wg.Done() + }() + clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"} + _, _, _, err = NewClientConn(c2, "", clientConf) + if err == nil { + t.Fatal("succeeded connecting with unknown hostkey algorithm") + } +} + +func TestServerClientAuthCallback(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + userCh := make(chan string, 1) + + serverConf := &ServerConfig{ + NoClientAuth: true, + NoClientAuthCallback: func(conn ConnMetadata) (*Permissions, error) { + userCh <- conn.User() + return nil, nil + }, + } + const someUsername = "some-username" + + serverConf.AddHostKey(testSigners["ecdsa"]) + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: someUsername, + } + + var wg sync.WaitGroup + t.Cleanup(wg.Wait) + wg.Add(1) + go func() { + defer wg.Done() + _, chans, reqs, err := NewServerConn(c1, serverConf) + if err != nil { + t.Errorf("server handshake: %v", err) + userCh <- "error" + return + } + wg.Add(1) + go func() { + DiscardRequests(reqs) + wg.Done() + }() + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + conn, _, _, err := NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("client handshake: %v", err) + return + } + conn.Close() + + got := <-userCh + if got != someUsername { + t.Errorf("username = %q; want %q", got, someUsername) + } +} diff --git a/tempfork/sshtest/ssh/ssh_gss.go b/tempfork/sshtest/ssh/ssh_gss.go new file mode 100644 index 000000000..24bd7c8e8 --- /dev/null +++ b/tempfork/sshtest/ssh/ssh_gss.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/asn1" + "errors" +) + +var krb5OID []byte + +func init() { + krb5OID, _ = asn1.Marshal(krb5Mesh) +} + +// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. +type GSSAPIClient interface { + // InitSecContext initiates the establishment of a security context for GSS-API between the + // ssh client and ssh server. Initially the token parameter should be specified as nil. + // The routine may return a outputToken which should be transferred to + // the ssh server, where the ssh server will present it to + // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting + // needContinue to false. To complete the context + // establishment, one or more reply tokens may be required from the ssh + // server;if so, InitSecContext will return a needContinue which is true. + // In this case, InitSecContext should be called again when the + // reply token is received from the ssh server, passing the reply + // token to InitSecContext via the token parameters. + // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. + InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) + // GetMIC generates a cryptographic MIC for the SSH2 message, and places + // the MIC in a token for transfer to the ssh server. + // The contents of the MIC field are obtained by calling GSS_GetMIC() + // over the following, using the GSS-API context that was just + // established: + // string session identifier + // byte SSH_MSG_USERAUTH_REQUEST + // string user name + // string service + // string "gssapi-with-mic" + // See RFC 2743 section 2.3.1 and RFC 4462 3.5. + GetMIC(micFiled []byte) ([]byte, error) + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. +type GSSAPIServer interface { + // AcceptSecContext allows a remotely initiated security context between the application + // and a remote peer to be established by the ssh client. The routine may return a + // outputToken which should be transferred to the ssh client, + // where the ssh client will present it to InitSecContext. + // If no token need be sent, AcceptSecContext will indicate this + // by setting the needContinue to false. To + // complete the context establishment, one or more reply tokens may be + // required from the ssh client. if so, AcceptSecContext + // will return a needContinue which is true, in which case it + // should be called again when the reply token is received from the ssh + // client, passing the token to AcceptSecContext via the + // token parameters. + // The srcName return value is the authenticated username. + // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. + AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) + // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, + // fits the supplied message is received from the ssh client. + // See RFC 2743 section 2.3.2. + VerifyMIC(micField []byte, micToken []byte) error + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +var ( + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, + // so we also support the krb5 mechanism only. + // See RFC 1964 section 1. + krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} +) + +// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST +// See RFC 4462 section 3.2. +type userAuthRequestGSSAPI struct { + N uint32 + OIDS []asn1.ObjectIdentifier +} + +func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { + n, rest, ok := parseUint32(payload) + if !ok { + return nil, errors.New("parse uint32 failed") + } + s := &userAuthRequestGSSAPI{ + N: n, + OIDS: make([]asn1.ObjectIdentifier, n), + } + for i := 0; i < int(n); i++ { + var ( + desiredMech []byte + err error + ) + desiredMech, rest, ok = parseString(rest) + if !ok { + return nil, errors.New("parse string failed") + } + if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { + return nil, err + } + + } + return s, nil +} + +// See RFC 4462 section 3.6. +func buildMIC(sessionID string, username string, service string, authMethod string) []byte { + out := make([]byte, 0, 0) + out = appendString(out, sessionID) + out = append(out, msgUserAuthRequest) + out = appendString(out, username) + out = appendString(out, service) + out = appendString(out, authMethod) + return out +} diff --git a/tempfork/sshtest/ssh/ssh_gss_test.go b/tempfork/sshtest/ssh/ssh_gss_test.go new file mode 100644 index 000000000..39a111288 --- /dev/null +++ b/tempfork/sshtest/ssh/ssh_gss_test.go @@ -0,0 +1,109 @@ +package ssh + +import ( + "fmt" + "testing" +) + +func TestParseGSSAPIPayload(t *testing.T) { + payload := []byte{0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0b, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02} + res, err := parseGSSAPIPayload(payload) + if err != nil { + t.Fatal(err) + } + if ok := res.OIDS[0].Equal(krb5Mesh); !ok { + t.Fatalf("got %v, want %v", res, krb5Mesh) + } +} + +func TestBuildMIC(t *testing.T) { + sessionID := []byte{134, 180, 134, 194, 62, 145, 171, 82, 119, 149, 254, 196, 125, 173, 177, 145, 187, 85, 53, + 183, 44, 150, 219, 129, 166, 195, 19, 33, 209, 246, 175, 121} + username := "testuser" + service := "ssh-connection" + authMethod := "gssapi-with-mic" + expected := []byte{0, 0, 0, 32, 134, 180, 134, 194, 62, 145, 171, 82, 119, 149, 254, 196, 125, 173, 177, 145, 187, 85, 53, 183, 44, 150, 219, 129, 166, 195, 19, 33, 209, 246, 175, 121, 50, 0, 0, 0, 8, 116, 101, 115, 116, 117, 115, 101, 114, 0, 0, 0, 14, 115, 115, 104, 45, 99, 111, 110, 110, 101, 99, 116, 105, 111, 110, 0, 0, 0, 15, 103, 115, 115, 97, 112, 105, 45, 119, 105, 116, 104, 45, 109, 105, 99} + result := buildMIC(string(sessionID), username, service, authMethod) + if string(result) != string(expected) { + t.Fatalf("buildMic: got %v, want %v", result, expected) + } +} + +type exchange struct { + outToken string + expectedToken string +} + +type FakeClient struct { + exchanges []*exchange + round int + mic []byte + maxRound int +} + +func (f *FakeClient) InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) { + if token == nil { + if f.exchanges[f.round].expectedToken != "" { + err = fmt.Errorf("got empty token, want %q", f.exchanges[f.round].expectedToken) + } else { + outputToken = []byte(f.exchanges[f.round].outToken) + } + } else { + if string(token) != string(f.exchanges[f.round].expectedToken) { + err = fmt.Errorf("got %q, want token %q", token, f.exchanges[f.round].expectedToken) + } else { + outputToken = []byte(f.exchanges[f.round].outToken) + } + } + f.round++ + needContinue = f.round < f.maxRound + return +} + +func (f *FakeClient) GetMIC(micField []byte) ([]byte, error) { + return f.mic, nil +} + +func (f *FakeClient) DeleteSecContext() error { + return nil +} + +type FakeServer struct { + exchanges []*exchange + round int + expectedMIC []byte + srcName string + maxRound int +} + +func (f *FakeServer) AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) { + if token == nil { + if f.exchanges[f.round].expectedToken != "" { + err = fmt.Errorf("got empty token, want %q", f.exchanges[f.round].expectedToken) + } else { + outputToken = []byte(f.exchanges[f.round].outToken) + } + } else { + if string(token) != string(f.exchanges[f.round].expectedToken) { + err = fmt.Errorf("got %q, want token %q", token, f.exchanges[f.round].expectedToken) + } else { + outputToken = []byte(f.exchanges[f.round].outToken) + } + } + f.round++ + needContinue = f.round < f.maxRound + srcName = f.srcName + return +} + +func (f *FakeServer) VerifyMIC(micField []byte, micToken []byte) error { + if string(micToken) != string(f.expectedMIC) { + return fmt.Errorf("got MICToken %q, want %q", micToken, f.expectedMIC) + } + return nil +} + +func (f *FakeServer) DeleteSecContext() error { + return nil +} diff --git a/tempfork/sshtest/ssh/streamlocal.go b/tempfork/sshtest/ssh/streamlocal.go new file mode 100644 index 000000000..b171b330b --- /dev/null +++ b/tempfork/sshtest/ssh/streamlocal.go @@ -0,0 +1,116 @@ +package ssh + +import ( + "errors" + "io" + "net" +) + +// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "direct-streamlocal@openssh.com" string. +// +// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 +type streamLocalChannelOpenDirectMsg struct { + socketPath string + reserved0 string + reserved1 uint32 +} + +// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "forwarded-streamlocal@openssh.com" string. +type forwardedStreamLocalPayload struct { + SocketPath string + Reserved0 string +} + +// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message +// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. +type streamLocalChannelForwardMsg struct { + socketPath string +} + +// ListenUnix is similar to ListenTCP but uses a Unix domain socket. +func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := streamLocalChannelForwardMsg{ + socketPath, + } + // send message + ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") + } + ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + + return &unixListener{socketPath, c, ch}, nil +} + +func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { + msg := streamLocalChannelOpenDirectMsg{ + socketPath: socketPath, + } + ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type unixListener struct { + socketPath string + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *unixListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + }, nil +} + +// Close closes the listener. +func (l *unixListener) Close() error { + // this also closes the listener. + l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + m := streamLocalChannelForwardMsg{ + l.socketPath, + } + ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *unixListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + } +} diff --git a/tempfork/sshtest/ssh/tcpip.go b/tempfork/sshtest/ssh/tcpip.go new file mode 100644 index 000000000..ef5059a11 --- /dev/null +++ b/tempfork/sshtest/ssh/tcpip.go @@ -0,0 +1,509 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +// N must be "tcp", "tcp4", "tcp6", or "unix". +func (c *Client) Listen(n, addr string) (net.Listener, error) { + switch n { + case "tcp", "tcp4", "tcp6": + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) + case "unix": + return c.ListenUnix(addr) + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// handleForwards starts goroutines handling forwarded connections. +// It's called on first use by (*Client).ListenTCP to not launch +// goroutines until needed. +func (c *Client) handleForwards() { + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.Addr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr net.Addr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.Addr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + laddr: addr, + c: make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var ( + laddr net.Addr + raddr net.Addr + err error + ) + switch channelType := ch.ChannelType(); channelType { + case "forwarded-tcpip": + var payload forwardedTCPPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err = parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + case "forwarded-streamlocal@openssh.com": + var payload forwardedStreamLocalPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) + continue + } + laddr = &net.UnixAddr{ + Name: payload.SocketPath, + Net: "unix", + } + raddr = &net.UnixAddr{ + Name: "@", + Net: "unix", + } + default: + panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) + } + if ok := l.forward(laddr, raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.Addr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + f.c <- forward{newCh: ch, raddr: raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// DialContext initiates a connection to the addr from the remote host. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Once successfully connected, +// any expiration of the context will not affect the connection. +// +// See func Dial for additional information. +func (c *Client) DialContext(ctx context.Context, n, addr string) (net.Conn, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + type connErr struct { + conn net.Conn + err error + } + ch := make(chan connErr) + go func() { + conn, err := c.Dial(n, addr) + select { + case ch <- connErr{conn, err}: + case <-ctx.Done(): + if conn != nil { + conn.Close() + } + } + }() + select { + case res := <-ch: + return res.conn, res.err + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + var ch Channel + switch n { + case "tcp", "tcp4", "tcp6": + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + return &chanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil + case "unix": + var err error + ch, err = c.dialStreamLocal(addr) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: addr, + Net: "unix", + }, + }, nil + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// chanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type chanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *chanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *chanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *chanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *chanConn) SetReadDeadline(deadline time.Time) error { + // for compatibility with previous version, + // the error message contains "tcpChan" + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *chanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/tempfork/sshtest/ssh/tcpip_test.go b/tempfork/sshtest/ssh/tcpip_test.go new file mode 100644 index 000000000..4d8511472 --- /dev/null +++ b/tempfork/sshtest/ssh/tcpip_test.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "context" + "net" + "testing" + "time" +) + +func TestAutoPortListenBroken(t *testing.T) { + broken := "SSH-2.0-OpenSSH_5.9hh11" + works := "SSH-2.0-OpenSSH_6.1" + if !isBrokenOpenSSHVersion(broken) { + t.Errorf("version %q not marked as broken", broken) + } + if isBrokenOpenSSHVersion(works) { + t.Errorf("version %q marked as broken", works) + } +} + +func TestClientImplementsDialContext(t *testing.T) { + type ContextDialer interface { + DialContext(context.Context, string, string) (net.Conn, error) + } + // Belt and suspenders assertion, since package net does not + // declare a ContextDialer type. + var _ ContextDialer = &net.Dialer{} + var _ ContextDialer = &Client{} +} + +func TestClientDialContextWithCancel(t *testing.T) { + c := &Client{} + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err := c.DialContext(ctx, "tcp", "localhost:1000") + if err != context.Canceled { + t.Errorf("DialContext: got nil error, expected %v", context.Canceled) + } +} + +func TestClientDialContextWithDeadline(t *testing.T) { + c := &Client{} + ctx, cancel := context.WithDeadline(context.Background(), time.Now()) + defer cancel() + _, err := c.DialContext(ctx, "tcp", "localhost:1000") + if err != context.DeadlineExceeded { + t.Errorf("DialContext: got nil error, expected %v", context.DeadlineExceeded) + } +} diff --git a/tempfork/sshtest/ssh/testdata_test.go b/tempfork/sshtest/ssh/testdata_test.go new file mode 100644 index 000000000..2da8c79dc --- /dev/null +++ b/tempfork/sshtest/ssh/testdata_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package ssh + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]Signer + testPublicKeys map[string]PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]Signer, n) + testPublicKeys = make(map[string]PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/tempfork/sshtest/ssh/transport.go b/tempfork/sshtest/ssh/transport.go new file mode 100644 index 000000000..0424d2d37 --- /dev/null +++ b/tempfork/sshtest/ssh/transport.go @@ -0,0 +1,380 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcm128CipherID = "aes128-gcm@openssh.com" + gcm256CipherID = "aes256-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer + + strictMode bool + initialKEXDone bool +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writeCipherPacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readCipherPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +func (t *transport) setStrictMode() error { + if t.reader.seqNum != 1 { + return errors.New("ssh: sequence number != 1 when strict KEX mode requested") + } + t.strictMode = true + return nil +} + +func (t *transport) setInitialKEXDone() { + t.initialKEXDone = true +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) + if err != nil { + return err + } + t.reader.pendingKeyChange <- ciph + + ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + if err != nil { + return err + } + t.writer.pendingKeyChange <- ciph + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader, t.strictMode) + if err != nil { + break + } + // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX + if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } + default: + return nil, errors.New("ssh: got bogus newkeys message") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + cipherMode := cipherModes[algs.Cipher] + + iv := make([]byte, cipherMode.ivSize) + key := make([]byte, cipherMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + + var macKey []byte + if !aeadCiphers[algs.Cipher] { + macMode := macModes[algs.MAC] + macKey = make([]byte, macMode.keySize) + generateKeyMaterial(macKey, d.macKeyTag, kex) + } + + return cipherModes[algs.Cipher].create(key, iv, macKey, algs) +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for length := 0; length < maxVersionStringBytes; length++ { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + if !bytes.HasPrefix(versionString, []byte("SSH-")) { + // RFC 4253 says we need to ignore all version string lines + // except the one containing the SSH version (provided that + // all the lines do not exceed 255 bytes in total). + versionString = versionString[:0] + continue + } + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/tempfork/sshtest/ssh/transport_test.go b/tempfork/sshtest/ssh/transport_test.go new file mode 100644 index 000000000..8445e1e56 --- /dev/null +++ b/tempfork/sshtest/ssh/transport_test.go @@ -0,0 +1,113 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "strings" + "testing" +) + +func TestReadVersion(t *testing.T) { + longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253] + multiLineVersion := strings.Repeat("ignored\r\n", 20) + "SSH-2.0-bla\r\n" + cases := map[string]string{ + "SSH-2.0-bla\r\n": "SSH-2.0-bla", + "SSH-2.0-bla\n": "SSH-2.0-bla", + multiLineVersion: "SSH-2.0-bla", + longVersion + "\r\n": longVersion, + } + + for in, want := range cases { + result, err := readVersion(bytes.NewBufferString(in)) + if err != nil { + t.Errorf("readVersion(%q): %s", in, err) + } + got := string(result) + if got != want { + t.Errorf("got %q, want %q", got, want) + } + } +} + +func TestReadVersionError(t *testing.T) { + longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253] + multiLineVersion := strings.Repeat("ignored\r\n", 50) + "SSH-2.0-bla\r\n" + cases := []string{ + longVersion + "too-long\r\n", + multiLineVersion, + } + for _, in := range cases { + if _, err := readVersion(bytes.NewBufferString(in)); err == nil { + t.Errorf("readVersion(%q) should have failed", in) + } + } +} + +func TestExchangeVersionsBasic(t *testing.T) { + v := "SSH-2.0-bla" + buf := bytes.NewBufferString(v + "\r\n") + them, err := exchangeVersions(buf, []byte("xyz")) + if err != nil { + t.Errorf("exchangeVersions: %v", err) + } + + if want := "SSH-2.0-bla"; string(them) != want { + t.Errorf("got %q want %q for our version", them, want) + } +} + +func TestExchangeVersions(t *testing.T) { + cases := []string{ + "not\x000allowed", + "not allowed\x01\r\n", + } + for _, c := range cases { + buf := bytes.NewBufferString("SSH-2.0-bla\r\n") + if _, err := exchangeVersions(buf, []byte(c)); err == nil { + t.Errorf("exchangeVersions(%q): should have failed", c) + } + } +} + +type closerBuffer struct { + bytes.Buffer +} + +func (b *closerBuffer) Close() error { + return nil +} + +func TestTransportMaxPacketWrite(t *testing.T) { + buf := &closerBuffer{} + tr := newTransport(buf, rand.Reader, true) + huge := make([]byte, maxPacket+1) + err := tr.writePacket(huge) + if err == nil { + t.Errorf("transport accepted write for a huge packet.") + } +} + +func TestTransportMaxPacketReader(t *testing.T) { + var header [5]byte + huge := make([]byte, maxPacket+128) + binary.BigEndian.PutUint32(header[0:], uint32(len(huge))) + // padding. + header[4] = 0 + + buf := &closerBuffer{} + buf.Write(header[:]) + buf.Write(huge) + + tr := newTransport(buf, rand.Reader, true) + _, err := tr.readPacket() + if err == nil { + t.Errorf("transport succeeded reading huge packet.") + } else if !strings.Contains(err.Error(), "large") { + t.Errorf("got %q, should mention %q", err.Error(), "large") + } +} From 27f8e2e31d68ccf26f68216fcc94ba6a70dfa35a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 11 Feb 2025 08:56:04 -0800 Subject: [PATCH 0484/1708] go.mod: bump x/* deps Notably, this pulls in https://go.googlesource.com/net/+/2dab271ff1b7396498746703d88fefcddcc5cec7 for golang/go#71557. Updates #8043 Change-Id: I3637dbf27b90423dd4d54d147f12688b51f3ce36 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + go.mod | 24 +++++++++--------- go.sum | 48 +++++++++++++++++------------------ 5 files changed, 39 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 19773761f..c67a02aa2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1015,6 +1015,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 45221252e..ad2e40611 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -212,6 +212,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 294f2944e..9bae28107 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -467,6 +467,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy diff --git a/go.mod b/go.mod index 4de2df640..625105a86 100644 --- a/go.mod +++ b/go.mod @@ -95,16 +95,16 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 - golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 - golang.org/x/mod v0.22.0 - golang.org/x/net v0.34.0 - golang.org/x/oauth2 v0.25.0 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab - golang.org/x/term v0.28.0 - golang.org/x/time v0.9.0 - golang.org/x/tools v0.29.0 + golang.org/x/crypto v0.33.0 + golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac + golang.org/x/mod v0.23.0 + golang.org/x/net v0.35.0 + golang.org/x/oauth2 v0.26.0 + golang.org/x/sync v0.11.0 + golang.org/x/sys v0.30.0 + golang.org/x/term v0.29.0 + golang.org/x/time v0.10.0 + golang.org/x/tools v0.30.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 @@ -385,8 +385,8 @@ require ( gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/image v0.23.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/image v0.24.0 // indirect + golang.org/x/text v0.22.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 4568ffb33..b375d1f58 100644 --- a/go.sum +++ b/go.sum @@ -1051,8 +1051,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1063,16 +1063,16 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68= -golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY= +golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ= +golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1100,8 +1100,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1141,16 +1141,16 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1164,8 +1164,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1224,16 +1224,16 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1244,13 +1244,13 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1315,8 +1315,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 8b347060f84f7dffc7b4e8e3781e7e66d110b148 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 11 Feb 2025 10:23:36 -0800 Subject: [PATCH 0485/1708] types/bool: add Int (#14984) Add Int which converts a bool into an integer. Updates tailscale/corp#22024 Signed-off-by: Joe Tsai --- types/bools/bools.go | 11 ++++++++++- types/bools/bools_test.go | 9 +++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/types/bools/bools.go b/types/bools/bools.go index 962e39919..7cef17cf0 100644 --- a/types/bools/bools.go +++ b/types/bools/bools.go @@ -1,9 +1,18 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package bools contains the [Compare] and [Select] functions. +// Package bools contains the [Int], [Compare], and [Select] functions. package bools +// Int returns 1 for true and 0 for false. +func Int(v bool) int { + if v { + return 1 + } else { + return 0 + } +} + // Compare compares two boolean values as if false is ordered before true. func Compare[T ~bool](x, y T) int { switch { diff --git a/types/bools/bools_test.go b/types/bools/bools_test.go index 1b466db17..67faf3bcc 100644 --- a/types/bools/bools_test.go +++ b/types/bools/bools_test.go @@ -5,6 +5,15 @@ package bools import "testing" +func TestInt(t *testing.T) { + if got := Int(true); got != 1 { + t.Errorf("Int(true) = %v, want 1", got) + } + if got := Int(false); got != 0 { + t.Errorf("Int(false) = %v, want 0", got) + } +} + func TestCompare(t *testing.T) { if got := Compare(false, false); got != 0 { t.Errorf("Compare(false, false) = %v, want 0", got) From b865ceea2056f5ebd6f98442aca82b1a277a6c6e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 11 Feb 2025 10:09:29 -0800 Subject: [PATCH 0486/1708] tailcfg: update + clean up machine API docs, remove some dead code The machine API docs were still often referring to the nacl boxes which are no longer present in the client. Fix that up, fix the paths, add the HTTP methods. And then delete some unused code I found in the process. Updates #cleanup Change-Id: I1591274acbb00a08b7ca4879dfebd5e6b8a9fbcd Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 114 +++++++++++----------------------------- tailcfg/tailcfg_test.go | 76 --------------------------- 2 files changed, 32 insertions(+), 158 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 8251b5058..b513f3d6c 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1092,68 +1092,6 @@ func (h *Hostinfo) Equal(h2 *Hostinfo) bool { return reflect.DeepEqual(h, h2) } -// HowUnequal returns a list of paths through Hostinfo where h and h2 differ. -// If they differ in nil-ness, the path is "nil", otherwise the path is like -// "ShieldsUp" or "NetInfo.nil" or "NetInfo.PCP". -func (h *Hostinfo) HowUnequal(h2 *Hostinfo) (path []string) { - return appendStructPtrDiff(nil, "", reflect.ValueOf(h), reflect.ValueOf(h2)) -} - -func appendStructPtrDiff(base []string, pfx string, p1, p2 reflect.Value) (ret []string) { - ret = base - if p1.IsNil() && p2.IsNil() { - return base - } - mkPath := func(b string) string { - if pfx == "" { - return b - } - return pfx + "." + b - } - if p1.IsNil() || p2.IsNil() { - return append(base, mkPath("nil")) - } - v1, v2 := p1.Elem(), p2.Elem() - t := v1.Type() - for i, n := 0, t.NumField(); i < n; i++ { - sf := t.Field(i) - switch sf.Type.Kind() { - case reflect.String: - if v1.Field(i).String() != v2.Field(i).String() { - ret = append(ret, mkPath(sf.Name)) - } - continue - case reflect.Bool: - if v1.Field(i).Bool() != v2.Field(i).Bool() { - ret = append(ret, mkPath(sf.Name)) - } - continue - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if v1.Field(i).Int() != v2.Field(i).Int() { - ret = append(ret, mkPath(sf.Name)) - } - continue - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if v1.Field(i).Uint() != v2.Field(i).Uint() { - ret = append(ret, mkPath(sf.Name)) - } - continue - case reflect.Slice, reflect.Map: - if !reflect.DeepEqual(v1.Field(i).Interface(), v2.Field(i).Interface()) { - ret = append(ret, mkPath(sf.Name)) - } - continue - case reflect.Ptr: - if sf.Type.Elem().Kind() == reflect.Struct { - ret = appendStructPtrDiff(ret, sf.Name, v1.Field(i), v2.Field(i)) - continue - } - } - panic(fmt.Sprintf("unsupported type at %s: %s", mkPath(sf.Name), sf.Type.String())) - } - return ret -} - // SignatureType specifies a scheme for signing RegisterRequest messages. It // specifies the crypto algorithms to use, the contents of what is signed, and // any other relevant details. Historically, requests were unsigned so the zero @@ -1234,11 +1172,11 @@ type RegisterResponseAuth struct { AuthKey string `json:",omitempty"` } -// RegisterRequest is sent by a client to register the key for a node. -// It is encoded to JSON, encrypted with golang.org/x/crypto/nacl/box, -// using the local machine key, and sent to: +// RegisterRequest is a request to register a key for a node. +// +// This is JSON-encoded and sent over the control plane connection to: // -// https://login.tailscale.com/machine/ +// POST https:///machine/register. type RegisterRequest struct { _ structs.Incomparable @@ -1354,10 +1292,9 @@ type Endpoint struct { // The request includes a copy of the client's current set of WireGuard // endpoints and general host information. // -// The request is encoded to JSON, encrypted with golang.org/x/crypto/nacl/box, -// using the local machine key, and sent to: +// This is JSON-encoded and sent over the control plane connection to: // -// https://login.tailscale.com/machine//map +// POST https:///machine/map type MapRequest struct { // Version is incremented whenever the client code changes enough that // we want to signal to the control server that we're capable of something @@ -1797,9 +1734,14 @@ const ( PingPeerAPI PingType = "peerapi" ) -// PingRequest with no IP and Types is a request to send an HTTP request to prove the -// long-polling client is still connected. -// PingRequest with Types and IP, will send a ping to the IP and send a POST +// PingRequest is a request from the control plane to the local node to probe +// something. +// +// A PingRequest with no IP and Types is a request from the control plane to the +// local node to send an HTTP request to a URL to prove the long-polling client +// is still connected. +// +// A PingRequest with Types and IP, will send a ping to the IP and send a POST // request containing a PingResponse to the URL containing results. type PingRequest struct { // URL is the URL to reply to the PingRequest to. @@ -2506,13 +2448,13 @@ const ( // SetDNSRequest is a request to add a DNS record. // -// This is used for ACME DNS-01 challenges (so people can use -// LetsEncrypt, etc). +// This is used to let tailscaled clients complete their ACME DNS-01 challenges +// (so people can use LetsEncrypt, etc) to get TLS certificates for +// their foo.bar.ts.net MagicDNS names. // -// The request is encoded to JSON, encrypted with golang.org/x/crypto/nacl/box, -// using the local machine key, and sent to: +// This is JSON-encoded and sent over the control plane connection to: // -// https://login.tailscale.com/machine//set-dns +// POST https:///machine/set-dns type SetDNSRequest struct { // Version is the client's capabilities // (CurrentCapabilityVersion) when using the Noise transport. @@ -2542,7 +2484,9 @@ type SetDNSRequest struct { type SetDNSResponse struct{} // HealthChangeRequest is the JSON request body type used to report -// node health changes to https:///machine//update-health. +// node health changes to: +// +// POST https:///machine/update-health. type HealthChangeRequest struct { Subsys string // a health.Subsystem value in string form Error string // or empty if cleared @@ -2557,6 +2501,10 @@ type HealthChangeRequest struct { // // As of 2024-12-30, this is an experimental dev feature // for internal testing. See tailscale/corp#24690. +// +// This is JSON-encoded and sent over the control plane connection to: +// +// PATCH https:///machine/set-device-attr type SetDeviceAttributesRequest struct { // Version is the current binary's [CurrentCapabilityVersion]. Version CapabilityVersion @@ -2746,6 +2694,8 @@ type SSHRecorderFailureAction struct { // SSHEventNotifyRequest is the JSON payload sent to the NotifyURL // for an SSH event. +// +// POST https:///[...varies, sent in SSH policy...] type SSHEventNotifyRequest struct { // EventType is the type of notify request being sent. EventType SSHEventType @@ -2806,9 +2756,9 @@ type SSHRecordingAttempt struct { FailureMessage string } -// QueryFeatureRequest is a request sent to "/machine/feature/query" -// to get instructions on how to enable a feature, such as Funnel, -// for the node's tailnet. +// QueryFeatureRequest is a request sent to "POST /machine/feature/query" to get +// instructions on how to enable a feature, such as Funnel, for the node's +// tailnet. // // See QueryFeatureResponse for response structure. type QueryFeatureRequest struct { @@ -2897,7 +2847,7 @@ type OverTLSPublicKeyResponse struct { // The token can be presented to any resource provider which offers OIDC // Federation. // -// It is JSON-encoded and sent over Noise to "/machine/id-token". +// It is JSON-encoded and sent over Noise to "POST /machine/id-token". type TokenRequest struct { // CapVersion is the client's current CapabilityVersion. CapVersion CapabilityVersion diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index da5873847..7532fc281 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -281,82 +281,6 @@ func TestHostinfoEqual(t *testing.T) { } } -func TestHostinfoHowEqual(t *testing.T) { - tests := []struct { - a, b *Hostinfo - want []string - }{ - { - a: nil, - b: nil, - want: nil, - }, - { - a: new(Hostinfo), - b: nil, - want: []string{"nil"}, - }, - { - a: nil, - b: new(Hostinfo), - want: []string{"nil"}, - }, - { - a: new(Hostinfo), - b: new(Hostinfo), - want: nil, - }, - { - a: &Hostinfo{ - IPNVersion: "1", - ShieldsUp: false, - RoutableIPs: []netip.Prefix{netip.MustParsePrefix("1.2.3.0/24")}, - }, - b: &Hostinfo{ - IPNVersion: "2", - ShieldsUp: true, - RoutableIPs: []netip.Prefix{netip.MustParsePrefix("1.2.3.0/25")}, - }, - want: []string{"IPNVersion", "ShieldsUp", "RoutableIPs"}, - }, - { - a: &Hostinfo{ - IPNVersion: "1", - }, - b: &Hostinfo{ - IPNVersion: "2", - NetInfo: new(NetInfo), - }, - want: []string{"IPNVersion", "NetInfo.nil"}, - }, - { - a: &Hostinfo{ - IPNVersion: "1", - NetInfo: &NetInfo{ - WorkingIPv6: "true", - HavePortMap: true, - LinkType: "foo", - PreferredDERP: 123, - DERPLatency: map[string]float64{ - "foo": 1.0, - }, - }, - }, - b: &Hostinfo{ - IPNVersion: "2", - NetInfo: &NetInfo{}, - }, - want: []string{"IPNVersion", "NetInfo.WorkingIPv6", "NetInfo.HavePortMap", "NetInfo.PreferredDERP", "NetInfo.LinkType", "NetInfo.DERPLatency"}, - }, - } - for i, tt := range tests { - got := tt.a.HowUnequal(tt.b) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%d. got %q; want %q", i, got, tt.want) - } - } -} - func TestHostinfoTailscaleSSHEnabled(t *testing.T) { tests := []struct { hi *Hostinfo From c4984632cac12abe28d9360e74ac0f63ce7463c8 Mon Sep 17 00:00:00 2001 From: Anton Date: Tue, 11 Feb 2025 17:26:07 +0000 Subject: [PATCH 0487/1708] net/dns: add a simple test for resolv.conf inotify watcher Updates #14699 Signed-off-by: Anton --- net/dns/direct_linux.go | 32 ++++++++++++--------- net/dns/direct_linux_test.go | 56 ++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 13 deletions(-) create mode 100644 net/dns/direct_linux_test.go diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go index bdeefb352..20d96e2f1 100644 --- a/net/dns/direct_linux.go +++ b/net/dns/direct_linux.go @@ -6,20 +6,28 @@ package dns import ( "bytes" "context" + "fmt" "github.com/illarion/gonotify/v2" "tailscale.com/health" ) func (m *directManager) runFileWatcher() { - ctx, cancel := context.WithCancel(m.ctx) + if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { + // This is all best effort for now, so surface warnings to users. + m.logf("dns: inotify: %s", err) + } +} + +// watchFile sets up an inotify watch for a given directory and +// calls the callback function every time a particular file is changed. +// The filename should be located in the provided directory. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) defer cancel() in, err := gonotify.NewInotify(ctx) if err != nil { - // Oh well, we tried. This is all best effort for now, to - // surface warnings to users. - m.logf("dns: inotify new: %v", err) - return + return fmt.Errorf("NewInotify: %w", err) } const events = gonotify.IN_ATTRIB | @@ -29,22 +37,20 @@ func (m *directManager) runFileWatcher() { gonotify.IN_MODIFY | gonotify.IN_MOVE - if err := in.AddWatch("/etc/", events); err != nil { - m.logf("dns: inotify addwatch: %v", err) - return + if err := in.AddWatch(dir, events); err != nil { + return fmt.Errorf("AddWatch: %w", err) } for { events, err := in.Read() if ctx.Err() != nil { - return + return ctx.Err() } if err != nil { - m.logf("dns: inotify read: %v", err) - return + return fmt.Errorf("Read: %w", err) } var match bool for _, ev := range events { - if ev.Name == resolvConf { + if ev.Name == filename { match = true break } @@ -52,7 +58,7 @@ func (m *directManager) runFileWatcher() { if !match { continue } - m.checkForFileTrample() + cb() } } diff --git a/net/dns/direct_linux_test.go b/net/dns/direct_linux_test.go new file mode 100644 index 000000000..079d060ed --- /dev/null +++ b/net/dns/direct_linux_test.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package dns + +import ( + "context" + "errors" + "fmt" + "os" + "sync/atomic" + "testing" + "time" + + "golang.org/x/sync/errgroup" +) + +func TestWatchFile(t *testing.T) { + dir := t.TempDir() + filepath := dir + "/test.txt" + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var callbackCalled atomic.Bool + callbackDone := make(chan bool) + callback := func() { + callbackDone <- true + callbackCalled.Store(true) + } + + var eg errgroup.Group + eg.Go(func() error { return watchFile(ctx, dir, filepath, callback) }) + + // Keep writing until we get a callback. + func() { + for i := range 10000 { + if err := os.WriteFile(filepath, []byte(fmt.Sprintf("write%d", i)), 0644); err != nil { + t.Fatal(err) + } + select { + case <-callbackDone: + return + case <-time.After(10 * time.Millisecond): + } + } + }() + + cancel() + if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { + t.Error(err) + } + if !callbackCalled.Load() { + t.Error("callback was not called") + } +} From f35c49d2110c56354c8a131e32157e2d337815da Mon Sep 17 00:00:00 2001 From: Anton Date: Tue, 11 Feb 2025 17:39:53 +0000 Subject: [PATCH 0488/1708] net/dns: update to illarion/gonotify/v3 to fix a panic Fixes #14699 Signed-off-by: Anton --- cmd/k8s-operator/depaware.txt | 3 ++- cmd/tailscaled/depaware.txt | 3 ++- go.mod | 2 +- go.sum | 4 ++-- net/dns/direct_linux.go | 34 +++++++++++----------------------- 5 files changed, 18 insertions(+), 28 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index c67a02aa2..f0d572006 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -142,7 +142,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 9bae28107..d19fb5c96 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -114,7 +114,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 diff --git a/go.mod b/go.mod index 625105a86..074482479 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/google/uuid v1.6.0 github.com/goreleaser/nfpm/v2 v2.33.1 github.com/hdevalence/ed25519consensus v0.2.0 - github.com/illarion/gonotify/v2 v2.0.3 + github.com/illarion/gonotify/v3 v3.0.2 github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 github.com/jellydator/ttlcache/v3 v3.1.0 diff --git a/go.sum b/go.sum index b375d1f58..f12f5e514 100644 --- a/go.sum +++ b/go.sum @@ -554,8 +554,8 @@ github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f h1:ov45/OzrJG8EKbGjn7jJZQJTN7Z1t73sFYNIRd64YlI= github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f/go.mod h1:JoDrYMZpDPYo6uH9/f6Peqms3zNNWT2XiGgioMOIGuI= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= +github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk= +github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go index 20d96e2f1..8dccc5bfb 100644 --- a/net/dns/direct_linux.go +++ b/net/dns/direct_linux.go @@ -8,7 +8,7 @@ import ( "context" "fmt" - "github.com/illarion/gonotify/v2" + "github.com/illarion/gonotify/v3" "tailscale.com/health" ) @@ -25,10 +25,6 @@ func (m *directManager) runFileWatcher() { func watchFile(ctx context.Context, dir, filename string, cb func()) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - in, err := gonotify.NewInotify(ctx) - if err != nil { - return fmt.Errorf("NewInotify: %w", err) - } const events = gonotify.IN_ATTRIB | gonotify.IN_CLOSE_WRITE | @@ -37,28 +33,20 @@ func watchFile(ctx context.Context, dir, filename string, cb func()) error { gonotify.IN_MODIFY | gonotify.IN_MOVE - if err := in.AddWatch(dir, events); err != nil { - return fmt.Errorf("AddWatch: %w", err) + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) } + for { - events, err := in.Read() - if ctx.Err() != nil { - return ctx.Err() - } - if err != nil { - return fmt.Errorf("Read: %w", err) - } - var match bool - for _, ev := range events { - if ev.Name == filename { - match = true - break + select { + case event := <-watcher.C: + if event.Name == filename { + cb() } + case <-ctx.Done(): + return ctx.Err() } - if !match { - continue - } - cb() } } From 926a43fe518d33e3dfc5e8d82fd26e5d0e4f2853 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Tue, 11 Feb 2025 14:58:57 -0500 Subject: [PATCH 0489/1708] tailcfg: make NetPortRange.Bits omitempty This is deprecated anyway, and we don't need to be sending `"Bits":null` on the wire for the majority of clients. Updates tailscale/corp#20965 Updates tailscale/corp#26353 Signed-off-by: Andrew Dunham Change-Id: I95a3e3d72619389ae34a6547ebf47043445374e1 --- tailcfg/tailcfg.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b513f3d6c..405eb1a41 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1409,7 +1409,7 @@ var PortRangeAny = PortRange{0, 65535} type NetPortRange struct { _ structs.Incomparable IP string // IP, CIDR, Range, or "*" (same formats as FilterRule.SrcIPs) - Bits *int // deprecated; the 2020 way to turn IP into a CIDR. See FilterRule.SrcBits. + Bits *int `json:",omitempty"` // deprecated; the 2020 way to turn IP into a CIDR. See FilterRule.SrcBits. Ports PortRange } From 5a082fccecbf8bedcdc8bd427bbd1f9068556dd3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 11 Feb 2025 13:02:42 -0800 Subject: [PATCH 0490/1708] tailcfg: remove ancient UserProfiles.Roles field And add omitempty to the ProfilePicURL too while here. Plenty of users (and tagged devices) don't have profile pics. Updates #14988 Change-Id: I6534bc14edb58fe1034d2d35ae2395f09fd7dd0d Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 16 +--------------- tailcfg/tailcfg_clone.go | 1 - tailcfg/tailcfg_test.go | 23 ----------------------- tailcfg/tailcfg_view.go | 2 -- 4 files changed, 1 insertion(+), 41 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 405eb1a41..35a35dd76 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -259,11 +259,7 @@ type UserProfile struct { ID UserID LoginName string // "alice@smith.com"; for display purposes only (provider is not listed) DisplayName string // "Alice Smith" - ProfilePicURL string - - // Roles exists for legacy reasons, to keep old macOS clients - // happy. It JSON marshals as []. - Roles emptyStructJSONSlice + ProfilePicURL string `json:",omitempty"` } func (p *UserProfile) Equal(p2 *UserProfile) bool { @@ -279,16 +275,6 @@ func (p *UserProfile) Equal(p2 *UserProfile) bool { p.ProfilePicURL == p2.ProfilePicURL } -type emptyStructJSONSlice struct{} - -var emptyJSONSliceBytes = []byte("[]") - -func (emptyStructJSONSlice) MarshalJSON() ([]byte, error) { - return emptyJSONSliceBytes, nil -} - -func (emptyStructJSONSlice) UnmarshalJSON([]byte) error { return nil } - // RawMessage is a raw encoded JSON value. It implements Marshaler and // Unmarshaler and can be used to delay JSON decoding or precompute a JSON // encoding. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index f7126ca41..aeeacebec 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -623,7 +623,6 @@ var _UserProfileCloneNeedsRegeneration = UserProfile(struct { LoginName string DisplayName string ProfilePicURL string - Roles emptyStructJSONSlice }{}) // Clone duplicates src into dst and reports whether it succeeded. diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 7532fc281..dd81af5d6 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -10,7 +10,6 @@ import ( "reflect" "regexp" "strconv" - "strings" "testing" "time" @@ -649,28 +648,6 @@ func TestCloneNode(t *testing.T) { } } -func TestUserProfileJSONMarshalForMac(t *testing.T) { - // Old macOS clients had a bug where they required - // UserProfile.Roles to be non-null. Lock that in - // 1.0.x/1.2.x clients are gone in the wild. - // See mac commit 0242c08a2ca496958027db1208f44251bff8488b (Sep 30). - // It was fixed in at least 1.4.x, and perhaps 1.2.x. - j, err := json.Marshal(UserProfile{}) - if err != nil { - t.Fatal(err) - } - const wantSub = `"Roles":[]` - if !strings.Contains(string(j), wantSub) { - t.Fatalf("didn't contain %#q; got: %s", wantSub, j) - } - - // And back: - var up UserProfile - if err := json.Unmarshal(j, &up); err != nil { - t.Fatalf("Unmarshal: %v", err) - } -} - func TestEndpointTypeMarshal(t *testing.T) { eps := []EndpointType{ EndpointUnknownType, diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 55c244fbf..4b56b8c09 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -1403,7 +1403,6 @@ func (v UserProfileView) ID() UserID { return v.ж.ID } func (v UserProfileView) LoginName() string { return v.ж.LoginName } func (v UserProfileView) DisplayName() string { return v.ж.DisplayName } func (v UserProfileView) ProfilePicURL() string { return v.ж.ProfilePicURL } -func (v UserProfileView) Roles() emptyStructJSONSlice { return v.ж.Roles } func (v UserProfileView) Equal(v2 UserProfileView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -1412,5 +1411,4 @@ var _UserProfileViewNeedsRegeneration = UserProfile(struct { LoginName string DisplayName string ProfilePicURL string - Roles emptyStructJSONSlice }{}) From e9e2bc5bd72190d288d16d8f8836605f48e46e57 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 7 Feb 2025 10:47:14 -0600 Subject: [PATCH 0491/1708] ipn/ipn{auth,server}: update ipnauth.Actor to carry a context The context carries additional information about the actor, such as the request reason, and is canceled when the actor is done. Additionally, we implement three new ipn.Actor types that wrap other actors to modify their behavior: - WithRequestReason, which adds a request reason to the actor; - WithoutClose, which narrows the actor's interface to prevent it from being closed; - WithPolicyChecks, which adds policy checks to the actor's CheckProfileAccess method. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnauth/actor.go | 31 +++++++++++++++++++++++++++++++ ipn/ipnauth/policy.go | 26 ++++++++++++++++++++++++++ ipn/ipnauth/self.go | 17 +++++++++++------ ipn/ipnauth/test_actor.go | 6 ++++++ ipn/ipnserver/actor.go | 3 +++ 5 files changed, 77 insertions(+), 6 deletions(-) diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 446cb4635..8a0e77645 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -4,9 +4,11 @@ package ipnauth import ( + "context" "encoding/json" "fmt" + "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" ) @@ -32,6 +34,11 @@ type Actor interface { // a connected LocalAPI client. Otherwise, it returns a zero value and false. ClientID() (_ ClientID, ok bool) + // Context returns the context associated with the actor. + // It carries additional information about the actor + // and is canceled when the actor is done. + Context() context.Context + // CheckProfileAccess checks whether the actor has the necessary access rights // to perform a given action on the specified Tailscale profile. // It returns an error if access is denied. @@ -102,3 +109,27 @@ func (id ClientID) MarshalJSON() ([]byte, error) { func (id *ClientID) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &id.v) } + +type actorWithRequestReason struct { + Actor + ctx context.Context +} + +// WithRequestReason returns an [Actor] that wraps the given actor and +// carries the specified request reason in its context. +func WithRequestReason(actor Actor, requestReason string) Actor { + ctx := apitype.RequestReasonKey.WithValue(actor.Context(), requestReason) + return &actorWithRequestReason{Actor: actor, ctx: ctx} +} + +// Context implements [Actor]. +func (a *actorWithRequestReason) Context() context.Context { return a.ctx } + +type withoutCloseActor struct{ Actor } + +// WithoutClose returns an [Actor] that does not expose the [ActorCloser] interface. +// In other words, _, ok := WithoutClose(actor).(ActorCloser) will always be false, +// even if the original actor implements [ActorCloser]. +func WithoutClose(actor Actor) Actor { + return withoutCloseActor{actor} +} diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index c61f9cd89..f09be0fcb 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -7,10 +7,36 @@ import ( "errors" "fmt" + "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" "tailscale.com/util/syspolicy" ) +type actorWithPolicyChecks struct{ Actor } + +// WithPolicyChecks returns an [Actor] that wraps the given actor and +// performs additional policy checks on top of the access checks +// implemented by the wrapped actor. +func WithPolicyChecks(actor Actor) Actor { + // TODO(nickkhyl): We should probably exclude the Windows Local System + // account from policy checks as well. + switch actor.(type) { + case unrestricted: + return actor + default: + return &actorWithPolicyChecks{Actor: actor} + } +} + +// CheckProfileAccess implements [Actor]. +func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess, auditLogger AuditLogFunc) error { + if err := a.Actor.CheckProfileAccess(profile, requestedAccess, auditLogger); err != nil { + return err + } + requestReason := apitype.RequestReasonKey.Value(a.Context()) + return CheckDisconnectPolicy(a.Actor, profile, requestReason, auditLogger) +} + // CheckDisconnectPolicy checks if the policy allows the specified actor to disconnect // Tailscale with the given optional reason. It returns nil if the operation is allowed, // or an error if it is not. If auditLogger is non-nil, it is called to log the action diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go index 271be9815..9b430dc6d 100644 --- a/ipn/ipnauth/self.go +++ b/ipn/ipnauth/self.go @@ -4,6 +4,8 @@ package ipnauth import ( + "context" + "tailscale.com/ipn" ) @@ -17,18 +19,21 @@ var Self Actor = unrestricted{} type unrestricted struct{} // UserID implements [Actor]. -func (u unrestricted) UserID() ipn.WindowsUserID { return "" } +func (unrestricted) UserID() ipn.WindowsUserID { return "" } // Username implements [Actor]. -func (u unrestricted) Username() (string, error) { return "", nil } +func (unrestricted) Username() (string, error) { return "", nil } + +// Context implements [Actor]. +func (unrestricted) Context() context.Context { return context.Background() } // ClientID implements [Actor]. // It always returns (NoClientID, false) because the tailscaled itself // is not a connected LocalAPI client. -func (u unrestricted) ClientID() (_ ClientID, ok bool) { return NoClientID, false } +func (unrestricted) ClientID() (_ ClientID, ok bool) { return NoClientID, false } // CheckProfileAccess implements [Actor]. -func (u unrestricted) CheckProfileAccess(_ ipn.LoginProfileView, _ ProfileAccess, _ AuditLogFunc) error { +func (unrestricted) CheckProfileAccess(_ ipn.LoginProfileView, _ ProfileAccess, _ AuditLogFunc) error { // Unrestricted access to all profiles. return nil } @@ -37,10 +42,10 @@ func (u unrestricted) CheckProfileAccess(_ ipn.LoginProfileView, _ ProfileAccess // // Deprecated: this method exists for compatibility with the current (as of 2025-01-28) // permission model and will be removed as we progress on tailscale/corp#18342. -func (u unrestricted) IsLocalSystem() bool { return false } +func (unrestricted) IsLocalSystem() bool { return false } // IsLocalAdmin implements [Actor]. // // Deprecated: this method exists for compatibility with the current (as of 2025-01-28) // permission model and will be removed as we progress on tailscale/corp#18342. -func (u unrestricted) IsLocalAdmin(operatorUID string) bool { return false } +func (unrestricted) IsLocalAdmin(operatorUID string) bool { return false } diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go index ba4e03c93..80c5fcc8a 100644 --- a/ipn/ipnauth/test_actor.go +++ b/ipn/ipnauth/test_actor.go @@ -4,6 +4,8 @@ package ipnauth import ( + "cmp" + "context" "errors" "tailscale.com/ipn" @@ -17,6 +19,7 @@ type TestActor struct { Name string // username associated with the actor, or "" NameErr error // error to be returned by [TestActor.Username] CID ClientID // non-zero if the actor represents a connected LocalAPI client + Ctx context.Context // context associated with the actor LocalSystem bool // whether the actor represents the special Local System account on Windows LocalAdmin bool // whether the actor has local admin access } @@ -30,6 +33,9 @@ func (a *TestActor) Username() (string, error) { return a.Name, a.NameErr } // ClientID implements [Actor]. func (a *TestActor) ClientID() (_ ClientID, ok bool) { return a.CID, a.CID != NoClientID } +// Context implements [Actor]. +func (a *TestActor) Context() context.Context { return cmp.Or(a.Ctx, context.Background()) } + // CheckProfileAccess implements [Actor]. func (a *TestActor) CheckProfileAccess(profile ipn.LoginProfileView, _ ProfileAccess, _ AuditLogFunc) error { return errors.New("profile access denied") diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 6ee7a04d7..594ebf2d5 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -118,6 +118,9 @@ func (a *actor) ClientID() (_ ipnauth.ClientID, ok bool) { return a.clientID, a.clientID != ipnauth.NoClientID } +// Context implements [ipnauth.Actor]. +func (a *actor) Context() context.Context { return context.Background() } + // Username implements [ipnauth.Actor]. func (a *actor) Username() (string, error) { if a.ci == nil { From 5eacf6184466c42406f9edb9ff317c349b50746e Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 7 Feb 2025 14:01:52 -0600 Subject: [PATCH 0492/1708] ipn/ipnauth: implement WindowsActor WindowsActor is an ipnauth.Actor implementation that represents a logged-in Windows user by wrapping their Windows user token. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnauth/actor_windows.go | 102 +++++++++++++++++++++++++++++++++ ipn/ipnauth/ipnauth_windows.go | 10 +++- 2 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 ipn/ipnauth/actor_windows.go diff --git a/ipn/ipnauth/actor_windows.go b/ipn/ipnauth/actor_windows.go new file mode 100644 index 000000000..90d3bdd36 --- /dev/null +++ b/ipn/ipnauth/actor_windows.go @@ -0,0 +1,102 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnauth + +import ( + "context" + "errors" + + "golang.org/x/sys/windows" + "tailscale.com/ipn" + "tailscale.com/types/lazy" +) + +// WindowsActor implements [Actor]. +var _ Actor = (*WindowsActor)(nil) + +// WindowsActor represents a logged in Windows user. +type WindowsActor struct { + ctx context.Context + cancelCtx context.CancelFunc + token WindowsToken + uid ipn.WindowsUserID + username lazy.SyncValue[string] +} + +// NewWindowsActorWithToken returns a new [WindowsActor] for the user +// represented by the given [windows.Token]. +// It takes ownership of the token. +func NewWindowsActorWithToken(t windows.Token) (_ *WindowsActor, err error) { + tok := newToken(t) + uid, err := tok.UID() + if err != nil { + t.Close() + return nil, err + } + ctx, cancelCtx := context.WithCancel(context.Background()) + return &WindowsActor{ctx: ctx, cancelCtx: cancelCtx, token: tok, uid: uid}, nil +} + +// UserID implements [Actor]. +func (a *WindowsActor) UserID() ipn.WindowsUserID { + return a.uid +} + +// Username implements [Actor]. +func (a *WindowsActor) Username() (string, error) { + return a.username.GetErr(a.token.Username) +} + +// ClientID implements [Actor]. +func (a *WindowsActor) ClientID() (_ ClientID, ok bool) { + // TODO(nickkhyl): assign and return a client ID when the actor + // represents a connected LocalAPI client. + return NoClientID, false +} + +// Context implements [Actor]. +func (a *WindowsActor) Context() context.Context { + return a.ctx +} + +// CheckProfileAccess implements [Actor]. +func (a *WindowsActor) CheckProfileAccess(profile ipn.LoginProfileView, _ ProfileAccess, _ AuditLogFunc) error { + if profile.LocalUserID() != a.UserID() { + // TODO(nickkhyl): return errors of more specific types and have them + // translated to the appropriate HTTP status codes in the API handler. + return errors.New("the target profile does not belong to the user") + } + return nil +} + +// IsLocalSystem implements [Actor]. +// +// Deprecated: this method exists for compatibility with the current (as of 2025-02-06) +// permission model and will be removed as we progress on tailscale/corp#18342. +func (a *WindowsActor) IsLocalSystem() bool { + // https://web.archive.org/web/2024/https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/manage/understand-security-identifiers + const systemUID = ipn.WindowsUserID("S-1-5-18") + return a.uid == systemUID +} + +// IsLocalAdmin implements [Actor]. +// +// Deprecated: this method exists for compatibility with the current (as of 2025-02-06) +// permission model and will be removed as we progress on tailscale/corp#18342. +func (a *WindowsActor) IsLocalAdmin(operatorUID string) bool { + return a.token.IsElevated() +} + +// Close releases resources associated with the actor +// and cancels its context. +func (a *WindowsActor) Close() error { + if a.token != nil { + if err := a.token.Close(); err != nil { + return err + } + a.token = nil + } + a.cancelCtx() + return nil +} diff --git a/ipn/ipnauth/ipnauth_windows.go b/ipn/ipnauth/ipnauth_windows.go index 9abd04cd1..1138bc23d 100644 --- a/ipn/ipnauth/ipnauth_windows.go +++ b/ipn/ipnauth/ipnauth_windows.go @@ -36,6 +36,12 @@ type token struct { t windows.Token } +func newToken(t windows.Token) *token { + tok := &token{t: t} + runtime.SetFinalizer(tok, func(t *token) { t.Close() }) + return tok +} + func (t *token) UID() (ipn.WindowsUserID, error) { sid, err := t.uid() if err != nil { @@ -184,7 +190,5 @@ func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { return nil, err } - result := &token{t: windows.Token(h)} - runtime.SetFinalizer(result, func(t *token) { t.Close() }) - return result, nil + return newToken(windows.Token(h)), nil } From bc0cd512ee112d31643ad9e326099e92139aa301 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 10 Feb 2025 17:47:10 -0600 Subject: [PATCH 0493/1708] ipn/desktop: add a new package for managing desktop sessions on Windows This PR adds a new package, ipn/desktop, which provides a platform-agnostic interface for enumerating desktop sessions and registering session callbacks. Currently, it is implemented only for Windows. Updates #14823 Signed-off-by: Nick Khyl --- ipn/desktop/doc.go | 6 + ipn/desktop/mksyscall.go | 24 ++ ipn/desktop/session.go | 58 +++ ipn/desktop/sessions.go | 60 +++ ipn/desktop/sessions_notwindows.go | 15 + ipn/desktop/sessions_windows.go | 672 +++++++++++++++++++++++++++++ ipn/desktop/zsyscall_windows.go | 159 +++++++ 7 files changed, 994 insertions(+) create mode 100644 ipn/desktop/doc.go create mode 100644 ipn/desktop/mksyscall.go create mode 100644 ipn/desktop/session.go create mode 100644 ipn/desktop/sessions.go create mode 100644 ipn/desktop/sessions_notwindows.go create mode 100644 ipn/desktop/sessions_windows.go create mode 100644 ipn/desktop/zsyscall_windows.go diff --git a/ipn/desktop/doc.go b/ipn/desktop/doc.go new file mode 100644 index 000000000..64a332792 --- /dev/null +++ b/ipn/desktop/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package desktop facilitates interaction with the desktop environment +// and user sessions. As of 2025-02-06, it is only implemented for Windows. +package desktop diff --git a/ipn/desktop/mksyscall.go b/ipn/desktop/mksyscall.go new file mode 100644 index 000000000..305138468 --- /dev/null +++ b/ipn/desktop/mksyscall.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package desktop + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go +//go:generate go run golang.org/x/tools/cmd/goimports -w zsyscall_windows.go + +//sys setLastError(dwErrorCode uint32) = kernel32.SetLastError + +//sys registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) [atom==0] = user32.RegisterClassExW +//sys createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) [hWnd==0] = user32.CreateWindowExW +//sys defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) = user32.DefWindowProcW +//sys setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) [res==0 && e1!=0] = user32.SetWindowLongPtrW +//sys getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) [res==0 && e1!=0] = user32.GetWindowLongPtrW +//sys sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) = user32.SendMessageW +//sys getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) = user32.GetMessageW +//sys translateMessage(lpMsg *_MSG) (res bool) = user32.TranslateMessage +//sys dispatchMessage(lpMsg *_MSG) (res uintptr) = user32.DispatchMessageW +//sys destroyWindow(hwnd windows.HWND) (err error) [int32(failretval)==0] = user32.DestroyWindow +//sys postQuitMessage(exitCode int32) = user32.PostQuitMessage + +//sys registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) [int32(failretval)==0] = wtsapi32.WTSRegisterSessionNotificationEx +//sys unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) [int32(failretval)==0] = wtsapi32.WTSUnRegisterSessionNotificationEx diff --git a/ipn/desktop/session.go b/ipn/desktop/session.go new file mode 100644 index 000000000..c95378914 --- /dev/null +++ b/ipn/desktop/session.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package desktop + +import ( + "fmt" + + "tailscale.com/ipn/ipnauth" +) + +// SessionID is a unique identifier of a desktop session. +type SessionID uint + +// SessionStatus is the status of a desktop session. +type SessionStatus int + +const ( + // ClosedSession is a session that does not exist, is not yet initialized by the OS, + // or has been terminated. + ClosedSession SessionStatus = iota + // ForegroundSession is a session that a user can interact with, + // such as when attached to a physical console or an active, + // unlocked RDP connection. + ForegroundSession + // BackgroundSession indicates that the session is locked, disconnected, + // or otherwise running without user presence or interaction. + BackgroundSession +) + +// String implements [fmt.Stringer]. +func (s SessionStatus) String() string { + switch s { + case ClosedSession: + return "Closed" + case ForegroundSession: + return "Foreground" + case BackgroundSession: + return "Background" + default: + panic("unreachable") + } +} + +// Session is a state of a desktop session at a given point in time. +type Session struct { + ID SessionID // Identifier of the session; can be reused after the session is closed. + Status SessionStatus // The status of the session, such as foreground or background. + User ipnauth.Actor // User logged into the session. +} + +// Description returns a human-readable description of the session. +func (s *Session) Description() string { + if maybeUsername, _ := s.User.Username(); maybeUsername != "" { // best effort + return fmt.Sprintf("Session %d - %q (%s)", s.ID, maybeUsername, s.Status) + } + return fmt.Sprintf("Session %d (%s)", s.ID, s.Status) +} diff --git a/ipn/desktop/sessions.go b/ipn/desktop/sessions.go new file mode 100644 index 000000000..8bf7a75e2 --- /dev/null +++ b/ipn/desktop/sessions.go @@ -0,0 +1,60 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package desktop + +import ( + "errors" + "runtime" +) + +// ErrNotImplemented is returned by [NewSessionManager] when it is not +// implemented for the current GOOS. +var ErrNotImplemented = errors.New("not implemented for GOOS=" + runtime.GOOS) + +// SessionInitCallback is a function that is called once per [Session]. +// It returns an optional cleanup function that is called when the session +// is about to be destroyed, or nil if no cleanup is needed. +// It is not safe to call SessionManager methods from within the callback. +type SessionInitCallback func(session *Session) (cleanup func()) + +// SessionStateCallback is a function that reports the initial or updated +// state of a [Session], such as when it transitions between foreground and background. +// It is guaranteed to be called after all registered [SessionInitCallback] functions +// have completed, and before any cleanup functions are called for the same session. +// It is not safe to call SessionManager methods from within the callback. +type SessionStateCallback func(session *Session) + +// SessionManager is an interface that provides access to desktop sessions on the current platform. +// It is safe for concurrent use. +type SessionManager interface { + // Init explicitly initializes the receiver. + // Unless the receiver is explicitly initialized, it will be lazily initialized + // on the first call to any other method. + // It is safe to call Init multiple times. + Init() error + + // Sessions returns a session snapshot taken at the time of the call. + // Since sessions can be created or destroyed at any time, it may become + // outdated as soon as it is returned. + // + // It is primarily intended for logging and debugging. + // Prefer registering a [SessionInitCallback] or [SessionStateCallback] + // in contexts requiring stronger guarantees. + Sessions() (map[SessionID]*Session, error) + + // RegisterInitCallback registers a [SessionInitCallback] that is called for each existing session + // and for each new session that is created, until the returned unregister function is called. + // If the specified [SessionInitCallback] returns a cleanup function, it is called when the session + // is about to be destroyed. The callback function is guaranteed to be called once and only once + // for each existing and new session. + RegisterInitCallback(cb SessionInitCallback) (unregister func(), err error) + + // RegisterStateCallback registers a [SessionStateCallback] that is called for each existing session + // and every time the state of a session changes, until the returned unregister function is called. + RegisterStateCallback(cb SessionStateCallback) (unregister func(), err error) + + // Close waits for all registered callbacks to complete + // and releases resources associated with the receiver. + Close() error +} diff --git a/ipn/desktop/sessions_notwindows.go b/ipn/desktop/sessions_notwindows.go new file mode 100644 index 000000000..da3230a45 --- /dev/null +++ b/ipn/desktop/sessions_notwindows.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package desktop + +import "tailscale.com/types/logger" + +// NewSessionManager returns a new [SessionManager] for the current platform, +// [ErrNotImplemented] if the platform is not supported, or an error if the +// session manager could not be created. +func NewSessionManager(logger.Logf) (SessionManager, error) { + return nil, ErrNotImplemented +} diff --git a/ipn/desktop/sessions_windows.go b/ipn/desktop/sessions_windows.go new file mode 100644 index 000000000..f1b88d573 --- /dev/null +++ b/ipn/desktop/sessions_windows.go @@ -0,0 +1,672 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package desktop + +import ( + "context" + "errors" + "fmt" + "runtime" + "sync" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" + "tailscale.com/ipn/ipnauth" + "tailscale.com/types/logger" + "tailscale.com/util/must" + "tailscale.com/util/set" +) + +// wtsManager is a [SessionManager] implementation for Windows. +type wtsManager struct { + logf logger.Logf + ctx context.Context // cancelled when the manager is closed + ctxCancel context.CancelFunc + + initOnce func() error + watcher *sessionWatcher + + mu sync.Mutex + sessions map[SessionID]*wtsSession + initCbs set.HandleSet[SessionInitCallback] + stateCbs set.HandleSet[SessionStateCallback] +} + +// NewSessionManager returns a new [SessionManager] for the current platform, +func NewSessionManager(logf logger.Logf) (SessionManager, error) { + ctx, ctxCancel := context.WithCancel(context.Background()) + m := &wtsManager{ + logf: logf, + ctx: ctx, + ctxCancel: ctxCancel, + sessions: make(map[SessionID]*wtsSession), + } + m.watcher = newSessionWatcher(m.ctx, m.logf, m.sessionEventHandler) + + m.initOnce = sync.OnceValue(func() error { + if err := waitUntilWTSReady(m.ctx); err != nil { + return fmt.Errorf("WTS is not ready: %w", err) + } + + m.mu.Lock() + defer m.mu.Unlock() + if err := m.watcher.Start(); err != nil { + return fmt.Errorf("failed to start session watcher: %w", err) + } + + var err error + m.sessions, err = enumerateSessions() + return err // may be nil or non-nil + }) + return m, nil +} + +// Init implements [SessionManager]. +func (m *wtsManager) Init() error { + return m.initOnce() +} + +// Sessions implements [SessionManager]. +func (m *wtsManager) Sessions() (map[SessionID]*Session, error) { + if err := m.initOnce(); err != nil { + return nil, err + } + + m.mu.Lock() + defer m.mu.Unlock() + sessions := make(map[SessionID]*Session, len(m.sessions)) + for _, s := range m.sessions { + sessions[s.id] = s.AsSession() + } + return sessions, nil +} + +// RegisterInitCallback implements [SessionManager]. +func (m *wtsManager) RegisterInitCallback(cb SessionInitCallback) (unregister func(), err error) { + if err := m.initOnce(); err != nil { + return nil, err + } + if cb == nil { + return nil, errors.New("nil callback") + } + + m.mu.Lock() + defer m.mu.Unlock() + handle := m.initCbs.Add(cb) + + // TODO(nickkhyl): enqueue callbacks in a separate goroutine? + for _, s := range m.sessions { + if cleanup := cb(s.AsSession()); cleanup != nil { + s.cleanup = append(s.cleanup, cleanup) + } + } + + return func() { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.initCbs, handle) + }, nil +} + +// RegisterStateCallback implements [SessionManager]. +func (m *wtsManager) RegisterStateCallback(cb SessionStateCallback) (unregister func(), err error) { + if err := m.initOnce(); err != nil { + return nil, err + } + if cb == nil { + return nil, errors.New("nil callback") + } + + m.mu.Lock() + defer m.mu.Unlock() + handle := m.stateCbs.Add(cb) + + // TODO(nickkhyl): enqueue callbacks in a separate goroutine? + for _, s := range m.sessions { + cb(s.AsSession()) + } + + return func() { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.stateCbs, handle) + }, nil +} + +func (m *wtsManager) sessionEventHandler(id SessionID, event uint32) { + m.mu.Lock() + defer m.mu.Unlock() + switch event { + case windows.WTS_SESSION_LOGON: + // The session may have been created after we started watching, + // but before the initial enumeration was performed. + // Do not create a new session if it already exists. + if _, _, err := m.getOrCreateSessionLocked(id); err != nil { + m.logf("[unexpected] getOrCreateSessionLocked(%d): %v", id, err) + } + case windows.WTS_SESSION_LOCK: + if err := m.setSessionStatusLocked(id, BackgroundSession); err != nil { + m.logf("[unexpected] setSessionStatusLocked(%d, BackgroundSession): %v", id, err) + } + case windows.WTS_SESSION_UNLOCK: + if err := m.setSessionStatusLocked(id, ForegroundSession); err != nil { + m.logf("[unexpected] setSessionStatusLocked(%d, ForegroundSession): %v", id, err) + } + case windows.WTS_SESSION_LOGOFF: + if err := m.deleteSessionLocked(id); err != nil { + m.logf("[unexpected] deleteSessionLocked(%d): %v", id, err) + } + } +} + +func (m *wtsManager) getOrCreateSessionLocked(id SessionID) (_ *wtsSession, created bool, err error) { + if s, ok := m.sessions[id]; ok { + return s, false, nil + } + + s, err := newWTSSession(id, ForegroundSession) + if err != nil { + return nil, false, err + } + m.sessions[id] = s + + session := s.AsSession() + // TODO(nickkhyl): enqueue callbacks in a separate goroutine? + for _, cb := range m.initCbs { + if cleanup := cb(session); cleanup != nil { + s.cleanup = append(s.cleanup, cleanup) + } + } + for _, cb := range m.stateCbs { + cb(session) + } + + return s, true, err +} + +func (m *wtsManager) setSessionStatusLocked(id SessionID, status SessionStatus) error { + s, _, err := m.getOrCreateSessionLocked(id) + if err != nil { + return err + } + if s.status == status { + return nil + } + + s.status = status + session := s.AsSession() + // TODO(nickkhyl): enqueue callbacks in a separate goroutine? + for _, cb := range m.stateCbs { + cb(session) + } + return nil +} + +func (m *wtsManager) deleteSessionLocked(id SessionID) error { + s, ok := m.sessions[id] + if !ok { + return nil + } + + s.status = ClosedSession + session := s.AsSession() + // TODO(nickkhyl): enqueue callbacks (and [wtsSession.close]!) in a separate goroutine? + for _, cb := range m.stateCbs { + cb(session) + } + + delete(m.sessions, id) + return s.close() +} + +func (m *wtsManager) Close() error { + m.ctxCancel() + + if m.watcher != nil { + err := m.watcher.Stop() + if err != nil { + return err + } + m.watcher = nil + } + + m.mu.Lock() + defer m.mu.Unlock() + m.initCbs = nil + m.stateCbs = nil + errs := make([]error, 0, len(m.sessions)) + for _, s := range m.sessions { + errs = append(errs, s.close()) + } + m.sessions = nil + return errors.Join(errs...) +} + +type wtsSession struct { + id SessionID + user *ipnauth.WindowsActor + + status SessionStatus + + cleanup []func() +} + +func newWTSSession(id SessionID, status SessionStatus) (*wtsSession, error) { + var token windows.Token + if err := windows.WTSQueryUserToken(uint32(id), &token); err != nil { + return nil, err + } + user, err := ipnauth.NewWindowsActorWithToken(token) + if err != nil { + return nil, err + } + return &wtsSession{id, user, status, nil}, nil +} + +// enumerateSessions returns a map of all active WTS sessions. +func enumerateSessions() (map[SessionID]*wtsSession, error) { + const reserved, version uint32 = 0, 1 + var numSessions uint32 + var sessionInfos *windows.WTS_SESSION_INFO + if err := windows.WTSEnumerateSessions(_WTS_CURRENT_SERVER_HANDLE, reserved, version, &sessionInfos, &numSessions); err != nil { + return nil, fmt.Errorf("WTSEnumerateSessions failed: %w", err) + } + defer windows.WTSFreeMemory(uintptr(unsafe.Pointer(sessionInfos))) + + sessions := make(map[SessionID]*wtsSession, numSessions) + for _, si := range unsafe.Slice(sessionInfos, numSessions) { + status := _WTS_CONNECTSTATE_CLASS(si.State).ToSessionStatus() + if status == ClosedSession { + // The session does not exist as far as we're concerned. + // It may be in the process of being created or destroyed, + // or be a special "listener" session, etc. + continue + } + id := SessionID(si.SessionID) + session, err := newWTSSession(id, status) + if err != nil { + continue + } + sessions[id] = session + } + return sessions, nil +} + +func (s *wtsSession) AsSession() *Session { + return &Session{ + ID: s.id, + Status: s.status, + // wtsSession owns the user; don't let the caller close it + User: ipnauth.WithoutClose(s.user), + } +} + +func (m *wtsSession) close() error { + for _, cleanup := range m.cleanup { + cleanup() + } + m.cleanup = nil + + if m.user != nil { + if err := m.user.Close(); err != nil { + return err + } + m.user = nil + } + return nil +} + +type sessionEventHandler func(id SessionID, event uint32) + +// TODO(nickkhyl): implement a sessionWatcher that does not use the message queue. +// One possible approach is to have the tailscaled service register a HandlerEx function +// and stream SERVICE_CONTROL_SESSIONCHANGE events to the tailscaled subprocess +// (the actual tailscaled backend), exposing these events via [sessionWatcher]/[wtsManager]. +// +// See tailscale/corp#26477 for details and tracking. +type sessionWatcher struct { + logf logger.Logf + ctx context.Context // canceled to stop the watcher + ctxCancel context.CancelFunc // cancels the watcher + hWnd windows.HWND // window handle for receiving session change notifications + handler sessionEventHandler // called on session events + + mu sync.Mutex + doneCh chan error // written to when the watcher exits; nil if not started +} + +func newSessionWatcher(ctx context.Context, logf logger.Logf, handler sessionEventHandler) *sessionWatcher { + ctx, cancel := context.WithCancel(ctx) + return &sessionWatcher{logf: logf, ctx: ctx, ctxCancel: cancel, handler: handler} +} + +func (sw *sessionWatcher) Start() error { + sw.mu.Lock() + defer sw.mu.Unlock() + + select { + case <-sw.ctx.Done(): + return fmt.Errorf("sessionWatcher already stopped: %w", sw.ctx.Err()) + default: + } + + if sw.doneCh != nil { + // Already started. + return nil + } + sw.doneCh = make(chan error, 1) + + startedCh := make(chan error, 1) + go sw.run(startedCh) + if err := <-startedCh; err != nil { + return err + } + + // Signal the window to unsubscribe from session notifications + // and shut down gracefully when the sessionWatcher is stopped. + context.AfterFunc(sw.ctx, func() { + sendMessage(sw.hWnd, _WM_CLOSE, 0, 0) + }) + return nil +} + +func (sw *sessionWatcher) run(started chan<- error) { + runtime.LockOSThread() + defer func() { + runtime.UnlockOSThread() + close(sw.doneCh) + }() + err := sw.createMessageWindow() + started <- err + if err != nil { + return + } + pumpThreadMessages() +} + +// Stop stops the session watcher and waits for it to exit. +func (sw *sessionWatcher) Stop() error { + sw.ctxCancel() + + sw.mu.Lock() + doneCh := sw.doneCh + sw.doneCh = nil + sw.mu.Unlock() + + if doneCh != nil { + return <-doneCh + } + return nil +} + +const watcherWindowClassName = "Tailscale-SessionManager" + +var watcherWindowClassName16 = sync.OnceValue(func() *uint16 { + return must.Get(syscall.UTF16PtrFromString(watcherWindowClassName)) +}) + +var registerSessionManagerWindowClass = sync.OnceValue(func() error { + var hInst windows.Handle + if err := windows.GetModuleHandleEx(0, nil, &hInst); err != nil { + return fmt.Errorf("GetModuleHandle: %w", err) + } + wc := _WNDCLASSEX{ + CbSize: uint32(unsafe.Sizeof(_WNDCLASSEX{})), + HInstance: hInst, + LpfnWndProc: syscall.NewCallback(sessionWatcherWndProc), + LpszClassName: watcherWindowClassName16(), + } + if _, err := registerClassEx(&wc); err != nil { + return fmt.Errorf("RegisterClassEx(%q): %w", watcherWindowClassName, err) + } + return nil +}) + +func (sw *sessionWatcher) createMessageWindow() error { + if err := registerSessionManagerWindowClass(); err != nil { + return err + } + _, err := createWindowEx( + 0, // dwExStyle + watcherWindowClassName16(), // lpClassName + nil, // lpWindowName + 0, // dwStyle + 0, // x + 0, // y + 0, // nWidth + 0, // nHeight + _HWND_MESSAGE, // hWndParent; message-only window + 0, // hMenu + 0, // hInstance + unsafe.Pointer(sw), // lpParam + ) + if err != nil { + return fmt.Errorf("CreateWindowEx: %w", err) + } + return nil +} + +func (sw *sessionWatcher) wndProc(hWnd windows.HWND, msg uint32, wParam, lParam uintptr) (result uintptr) { + switch msg { + case _WM_CREATE: + err := registerSessionNotification(_WTS_CURRENT_SERVER_HANDLE, hWnd, _NOTIFY_FOR_ALL_SESSIONS) + if err != nil { + sw.logf("[unexpected] failed to register for session notifications: %v", err) + return ^uintptr(0) + } + sw.logf("registered for session notifications") + case _WM_WTSSESSION_CHANGE: + sw.handler(SessionID(lParam), uint32(wParam)) + return 0 + case _WM_CLOSE: + if err := destroyWindow(hWnd); err != nil { + sw.logf("[unexpected] failed to destroy window: %v", err) + } + return 0 + case _WM_DESTROY: + err := unregisterSessionNotification(_WTS_CURRENT_SERVER_HANDLE, hWnd) + if err != nil { + sw.logf("[unexpected] failed to unregister session notifications callback: %v", err) + } + sw.logf("unregistered from session notifications") + return 0 + case _WM_NCDESTROY: + sw.hWnd = 0 + postQuitMessage(0) // quit the message loop for this thread + } + return defWindowProc(hWnd, msg, wParam, lParam) +} + +func (sw *sessionWatcher) setHandle(hwnd windows.HWND) error { + sw.hWnd = hwnd + setLastError(0) + _, err := setWindowLongPtr(sw.hWnd, _GWLP_USERDATA, uintptr(unsafe.Pointer(sw))) + return err // may be nil or non-nil +} + +func sessionWatcherByHandle(hwnd windows.HWND) *sessionWatcher { + val, _ := getWindowLongPtr(hwnd, _GWLP_USERDATA) + return (*sessionWatcher)(unsafe.Pointer(val)) +} + +func sessionWatcherWndProc(hWnd windows.HWND, msg uint32, wParam, lParam uintptr) (result uintptr) { + if msg == _WM_NCCREATE { + cs := (*_CREATESTRUCT)(unsafe.Pointer(lParam)) + sw := (*sessionWatcher)(unsafe.Pointer(cs.CreateParams)) + if sw == nil { + return 0 + } + if err := sw.setHandle(hWnd); err != nil { + return 0 + } + return defWindowProc(hWnd, msg, wParam, lParam) + } + if sw := sessionWatcherByHandle(hWnd); sw != nil { + return sw.wndProc(hWnd, msg, wParam, lParam) + } + return defWindowProc(hWnd, msg, wParam, lParam) +} + +func pumpThreadMessages() { + var msg _MSG + for getMessage(&msg, 0, 0, 0) != 0 { + translateMessage(&msg) + dispatchMessage(&msg) + } +} + +// waitUntilWTSReady waits until the Windows Terminal Services (WTS) is ready. +// This is necessary because the WTS API functions may fail if called before +// the WTS is ready. +// +// https://web.archive.org/web/20250207011738/https://learn.microsoft.com/en-us/windows/win32/api/wtsapi32/nf-wtsapi32-wtsregistersessionnotificationex +func waitUntilWTSReady(ctx context.Context) error { + eventName16, err := windows.UTF16PtrFromString(`Global\TermSrvReadyEvent`) + if err != nil { + return err + } + event, err := windows.OpenEvent(windows.SYNCHRONIZE, false, eventName16) + if err != nil { + return err + } + defer windows.CloseHandle(event) + return waitForContextOrHandle(ctx, event) +} + +// waitForContextOrHandle waits for either the context to be done or a handle to be signaled. +func waitForContextOrHandle(ctx context.Context, handle windows.Handle) error { + contextDoneEvent, cleanup, err := channelToEvent(ctx.Done()) + if err != nil { + return err + } + defer cleanup() + + handles := []windows.Handle{contextDoneEvent, handle} + waitCode, err := windows.WaitForMultipleObjects(handles, false, windows.INFINITE) + if err != nil { + return err + } + + waitCode -= windows.WAIT_OBJECT_0 + if waitCode == 0 { // contextDoneEvent + return ctx.Err() + } + return nil +} + +// channelToEvent returns an auto-reset event that is set when the channel +// becomes receivable, including when the channel is closed. +func channelToEvent[T any](c <-chan T) (evt windows.Handle, cleanup func(), err error) { + evt, err = windows.CreateEvent(nil, 0, 0, nil) + if err != nil { + return 0, nil, err + } + + cancel := make(chan struct{}) + + go func() { + select { + case <-cancel: + return + case <-c: + } + windows.SetEvent(evt) + }() + + cleanup = func() { + close(cancel) + windows.CloseHandle(evt) + } + + return evt, cleanup, nil +} + +type _WNDCLASSEX struct { + CbSize uint32 + Style uint32 + LpfnWndProc uintptr + CbClsExtra int32 + CbWndExtra int32 + HInstance windows.Handle + HIcon windows.Handle + HCursor windows.Handle + HbrBackground windows.Handle + LpszMenuName *uint16 + LpszClassName *uint16 + HIconSm windows.Handle +} + +type _CREATESTRUCT struct { + CreateParams uintptr + Instance windows.Handle + Menu windows.Handle + Parent windows.HWND + Cy int32 + Cx int32 + Y int32 + X int32 + Style int32 + Name *uint16 + ClassName *uint16 + ExStyle uint32 +} + +type _POINT struct { + X, Y int32 +} + +type _MSG struct { + HWnd windows.HWND + Message uint32 + WParam uintptr + LParam uintptr + Time uint32 + Pt _POINT +} + +const ( + _WM_CREATE = 1 + _WM_DESTROY = 2 + _WM_CLOSE = 16 + _WM_NCCREATE = 129 + _WM_QUIT = 18 + _WM_NCDESTROY = 130 + + // _WM_WTSSESSION_CHANGE is a message sent to windows that have registered + // for session change notifications, informing them of changes in session state. + // + // https://web.archive.org/web/20250207012421/https://learn.microsoft.com/en-us/windows/win32/termserv/wm-wtssession-change + _WM_WTSSESSION_CHANGE = 0x02B1 +) + +const _GWLP_USERDATA = -21 + +const _HWND_MESSAGE = ^windows.HWND(2) + +// _NOTIFY_FOR_ALL_SESSIONS indicates that the window should receive +// session change notifications for all sessions on the specified server. +const _NOTIFY_FOR_ALL_SESSIONS = 1 + +// _WTS_CURRENT_SERVER_HANDLE indicates that the window should receive +// session change notifications for the host itself rather than a remote server. +const _WTS_CURRENT_SERVER_HANDLE = windows.Handle(0) + +// _WTS_CONNECTSTATE_CLASS represents the connection state of a session. +// +// https://web.archive.org/web/20250206082427/https://learn.microsoft.com/en-us/windows/win32/api/wtsapi32/ne-wtsapi32-wts_connectstate_class +type _WTS_CONNECTSTATE_CLASS int32 + +// ToSessionStatus converts cs to a [SessionStatus]. +func (cs _WTS_CONNECTSTATE_CLASS) ToSessionStatus() SessionStatus { + switch cs { + case windows.WTSActive: + return ForegroundSession + case windows.WTSDisconnected: + return BackgroundSession + default: + // The session does not exist as far as we're concerned. + return ClosedSession + } +} diff --git a/ipn/desktop/zsyscall_windows.go b/ipn/desktop/zsyscall_windows.go new file mode 100644 index 000000000..222ab49e5 --- /dev/null +++ b/ipn/desktop/zsyscall_windows.go @@ -0,0 +1,159 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package desktop + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + moduser32 = windows.NewLazySystemDLL("user32.dll") + modwtsapi32 = windows.NewLazySystemDLL("wtsapi32.dll") + + procSetLastError = modkernel32.NewProc("SetLastError") + procCreateWindowExW = moduser32.NewProc("CreateWindowExW") + procDefWindowProcW = moduser32.NewProc("DefWindowProcW") + procDestroyWindow = moduser32.NewProc("DestroyWindow") + procDispatchMessageW = moduser32.NewProc("DispatchMessageW") + procGetMessageW = moduser32.NewProc("GetMessageW") + procGetWindowLongPtrW = moduser32.NewProc("GetWindowLongPtrW") + procPostQuitMessage = moduser32.NewProc("PostQuitMessage") + procRegisterClassExW = moduser32.NewProc("RegisterClassExW") + procSendMessageW = moduser32.NewProc("SendMessageW") + procSetWindowLongPtrW = moduser32.NewProc("SetWindowLongPtrW") + procTranslateMessage = moduser32.NewProc("TranslateMessage") + procWTSRegisterSessionNotificationEx = modwtsapi32.NewProc("WTSRegisterSessionNotificationEx") + procWTSUnRegisterSessionNotificationEx = modwtsapi32.NewProc("WTSUnRegisterSessionNotificationEx") +) + +func setLastError(dwErrorCode uint32) { + syscall.Syscall(procSetLastError.Addr(), 1, uintptr(dwErrorCode), 0, 0) + return +} + +func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) { + r0, _, e1 := syscall.Syscall12(procCreateWindowExW.Addr(), 12, uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) + hWnd = windows.HWND(r0) + if hWnd == 0 { + err = errnoErr(e1) + } + return +} + +func defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { + r0, _, _ := syscall.Syscall6(procDefWindowProcW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + res = uintptr(r0) + return +} + +func destroyWindow(hwnd windows.HWND) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyWindow.Addr(), 1, uintptr(hwnd), 0, 0) + if int32(r1) == 0 { + err = errnoErr(e1) + } + return +} + +func dispatchMessage(lpMsg *_MSG) (res uintptr) { + r0, _, _ := syscall.Syscall(procDispatchMessageW.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + res = uintptr(r0) + return +} + +func getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) { + r0, _, _ := syscall.Syscall6(procGetMessageW.Addr(), 4, uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax), 0, 0) + ret = int32(r0) + return +} + +func getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowLongPtrW.Addr(), 2, uintptr(hwnd), uintptr(index), 0) + res = uintptr(r0) + if res == 0 && e1 != 0 { + err = errnoErr(e1) + } + return +} + +func postQuitMessage(exitCode int32) { + syscall.Syscall(procPostQuitMessage.Addr(), 1, uintptr(exitCode), 0, 0) + return +} + +func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { + r0, _, e1 := syscall.Syscall(procRegisterClassExW.Addr(), 1, uintptr(unsafe.Pointer(windowClass)), 0, 0) + atom = uint16(r0) + if atom == 0 { + err = errnoErr(e1) + } + return +} + +func sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { + r0, _, _ := syscall.Syscall6(procSendMessageW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + res = uintptr(r0) + return +} + +func setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) { + r0, _, e1 := syscall.Syscall(procSetWindowLongPtrW.Addr(), 3, uintptr(hwnd), uintptr(index), uintptr(newLong)) + res = uintptr(r0) + if res == 0 && e1 != 0 { + err = errnoErr(e1) + } + return +} + +func translateMessage(lpMsg *_MSG) (res bool) { + r0, _, _ := syscall.Syscall(procTranslateMessage.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + res = r0 != 0 + return +} + +func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWTSRegisterSessionNotificationEx.Addr(), 3, uintptr(hServer), uintptr(hwnd), uintptr(flags)) + if int32(r1) == 0 { + err = errnoErr(e1) + } + return +} + +func unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) { + r1, _, e1 := syscall.Syscall(procWTSUnRegisterSessionNotificationEx.Addr(), 2, uintptr(hServer), uintptr(hwnd), 0) + if int32(r1) == 0 { + err = errnoErr(e1) + } + return +} From 9b32ba7f549b8591aa69ba4e33ae99eaf42c3bf2 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 11 Feb 2025 12:53:20 -0600 Subject: [PATCH 0494/1708] ipn/ipn{local,server}: move "staying alive in server mode" from ipnserver to LocalBackend Currently, we disconnect Tailscale and reset LocalBackend on Windows when the last LocalAPI client disconnects, unless Unattended Mode is enabled for the current profile. And the implementation is somewhat racy since the current profile could theoretically change after (*ipnserver.Server).addActiveHTTPRequest checks (*LocalBackend).InServerMode() and before it calls (*LocalBackend).SetCurrentUser(nil) (or, previously, (*LocalBackend).ResetForClientDisconnect). Additionally, we might want to keep Tailscale running and connected while a user is logged in rather than tying it to whether a LocalAPI client is connected (i.e., while the GUI is running), even when Unattended Mode is disabled for a profile. This includes scenarios where the new AlwaysOn mode is enabled, as well as when Tailscale is used on headless Windows editions, such as Windows Server Core, where the GUI is not supported. It may also be desirable to switch to the "background" profile when a user logs off from their device or implement other similar features. To facilitate these improvements, we move the logic from ipnserver.Server to ipnlocal.LocalBackend, where it determines whether to keep Tailscale running when the current user disconnects. We also update the logic that determines whether a connection should be allowed to better reflect the fact that, currently, LocalAPI connections are not allowed unless: - the current UID is "", meaning that either we are not on a multi-user system or Tailscale is idle; - the LocalAPI client belongs to the current user (their UIDs are the same); - the LocalAPI client is Local System (special case; Local System is always allowed). Whether Unattended Mode is enabled only affects the error message returned to the Local API client when the connection is denied. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 106 +++++++++++++++++++++++++-------------- ipn/ipnlocal/profiles.go | 42 ++++++++++++++++ ipn/ipnserver/server.go | 16 +----- 3 files changed, 112 insertions(+), 52 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c24bcbb7b..5c1a69e76 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3566,23 +3566,6 @@ func (b *LocalBackend) State() ipn.State { return b.state } -// InServerMode reports whether the Tailscale backend is explicitly running in -// "server mode" where it continues to run despite whatever the platform's -// default is. In practice, this is only used on Windows, where the default -// tailscaled behavior is to shut down whenever the GUI disconnects. -// -// On non-Windows platforms, this usually returns false (because people don't -// set unattended mode on other platforms) and also isn't checked on other -// platforms. -// -// TODO(bradfitz): rename to InWindowsUnattendedMode or something? Or make this -// return true on Linux etc and always be called? It's kinda messy now. -func (b *LocalBackend) InServerMode() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.pm.CurrentPrefs().ForceDaemon() -} - // CheckIPNConnectionAllowed returns an error if the specified actor should not // be allowed to connect or make requests to the LocalAPI currently. // @@ -3592,16 +3575,10 @@ func (b *LocalBackend) InServerMode() bool { func (b *LocalBackend) CheckIPNConnectionAllowed(actor ipnauth.Actor) error { b.mu.Lock() defer b.mu.Unlock() - serverModeUid := b.pm.CurrentUserID() - if serverModeUid == "" { - // Either this platform isn't a "multi-user" platform or we're not yet - // running as one. - return nil - } - if !b.pm.CurrentPrefs().ForceDaemon() { + if b.pm.CurrentUserID() == "" { + // There's no "current user" yet; allow the connection. return nil } - // Always allow Windows SYSTEM user to connect, // even if Tailscale is currently being used by another user. if actor.IsLocalSystem() { @@ -3612,10 +3589,21 @@ func (b *LocalBackend) CheckIPNConnectionAllowed(actor ipnauth.Actor) error { if uid == "" { return errors.New("empty user uid in connection identity") } - if uid != serverModeUid { - return fmt.Errorf("Tailscale running in server mode (%q); connection from %q not allowed", b.tryLookupUserName(string(serverModeUid)), b.tryLookupUserName(string(uid))) + if uid == b.pm.CurrentUserID() { + // The connection is from the current user; allow it. + return nil + } + + // The connection is from a different user; block it. + var reason string + if b.pm.CurrentPrefs().ForceDaemon() { + reason = "running in server mode" + } else { + reason = "already in use" } - return nil + return fmt.Errorf("Tailscale %s (%q); connection from %q not allowed", + reason, b.tryLookupUserName(string(b.pm.CurrentUserID())), + b.tryLookupUserName(string(uid))) } // tryLookupUserName tries to look up the username for the uid. @@ -3822,10 +3810,53 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { b.currentUser = actor } - if b.pm.CurrentUserID() != uid { - b.pm.SetCurrentUserID(uid) - b.resetForProfileChangeLockedOnEntry(unlock) + if b.pm.CurrentUserID() == uid { + return + } + + var profileID ipn.ProfileID + if actor != nil { + profileID = b.pm.DefaultUserProfileID(uid) + } else if uid, profileID = b.getBackgroundProfileIDLocked(); profileID != "" { + b.logf("client disconnected; staying alive in server mode") + } else { + b.logf("client disconnected; stopping server") + } + + if err := b.switchProfileLockedOnEntry(uid, profileID, unlock); err != nil { + b.logf("failed switching profile to %q: %v", profileID, err) + } +} + +// switchProfileLockedOnEntry is like [LocalBackend.SwitchProfile], +// but b.mu must held on entry, but it is released on exit. +func (b *LocalBackend) switchProfileLockedOnEntry(uid ipn.WindowsUserID, profileID ipn.ProfileID, unlock unlockOnce) error { + defer unlock() + if b.pm.CurrentUserID() == uid && b.pm.CurrentProfile().ID() == profileID { + return nil + } + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() + if changed := b.pm.SetCurrentUserAndProfile(uid, profileID); !changed { + return nil + } + // As an optimization, only reset the dialPlan if the control URL changed. + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + b.resetDialPlan() + } + return b.resetForProfileChangeLockedOnEntry(unlock) +} + +// getBackgroundProfileIDLocked returns the profile ID to use when no GUI/CLI +// client is connected, or "" if Tailscale should not run in the background. +// As of 2025-02-07, it is only used on Windows. +func (b *LocalBackend) getBackgroundProfileIDLocked() (ipn.WindowsUserID, ipn.ProfileID) { + // If Unattended Mode is enabled for the current profile, keep using it. + if b.pm.CurrentPrefs().ForceDaemon() { + return b.pm.CurrentProfile().LocalUserID(), b.pm.CurrentProfile().ID() } + // Otherwise, switch to an empty profile and disconnect Tailscale + // until a GUI or CLI client connects. + return "", "" } // CurrentUserForTest returns the current user and the associated WindowsUserID. @@ -7062,21 +7093,20 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - if b.CurrentProfile().ID() == profile { - return nil - } unlock := b.lockAndGetUnlock() defer unlock() + if b.pm.CurrentProfile().ID() == profile { + return nil + } + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() if err := b.pm.SwitchProfile(profile); err != nil { return err } - // As an optimization, only reset the dialPlan if the control URL - // changed; we treat an empty URL as "unknown" and always reset. - newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() - if oldControlURL != newControlURL || oldControlURL == "" || newControlURL == "" { + // As an optimization, only reset the dialPlan if the control URL changed. + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { b.resetDialPlan() } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index f988f8852..65714874a 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -77,6 +77,48 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) { } } +// SetCurrentUserAndProfile sets the current user ID and switches the specified +// profile, if it is accessible to the user. If the profile does not exist, +// or is not accessible, it switches to the user's default profile, +// creating a new one if necessary. +// +// It is a shorthand for [profileManager.SetCurrentUserID] followed by +// [profileManager.SwitchProfile], but it is more efficient as it switches +// directly to the specified profile rather than switching to the user's +// default profile first. +// +// As a special case, if the specified profile ID "", it creates a new +// profile for the user and switches to it, unless the current profile +// is already a new, empty profile owned by the user. +// +// It reports whether the call resulted in a profile switch. +func (pm *profileManager) SetCurrentUserAndProfile(uid ipn.WindowsUserID, profileID ipn.ProfileID) (changed bool) { + pm.currentUserID = uid + + if profileID == "" { + if pm.currentProfile.ID() == "" && pm.currentProfile.LocalUserID() == uid { + return false + } + pm.NewProfileForUser(uid) + return true + } + + if profile, err := pm.ProfileByID(profileID); err == nil { + if pm.CurrentProfile().ID() == profileID { + return false + } + if err := pm.SwitchProfile(profile.ID()); err == nil { + return true + } + } + + if err := pm.SwitchToDefaultProfile(); err != nil { + pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err) + pm.NewProfile() + } + return true +} + // DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user, // or an empty string if the specified user does not have a default profile. func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.ProfileID { diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 436b8404d..63f03f79e 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -42,12 +42,6 @@ type Server struct { logf logger.Logf netMon *netmon.Monitor // must be non-nil backendLogID logid.PublicID - // resetOnZero is whether to call bs.Reset on transition from - // 1->0 active HTTP requests. That is, this is whether the backend is - // being run in "client mode" that requires an active GUI - // connection (such as on Windows by default). Even if this - // is true, the ForceDaemon pref can override this. - resetOnZero bool // mu guards the fields that follow. // lock order: mu, then LocalBackend.mu @@ -429,13 +423,8 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o return } - if s.resetOnZero { - if lb.InServerMode() { - s.logf("client disconnected; staying alive in server mode") - } else { - s.logf("client disconnected; stopping server") - lb.SetCurrentUser(nil) - } + if envknob.GOOS() == "windows" && !actor.IsLocalSystem() { + lb.SetCurrentUser(nil) } // Wake up callers waiting for the server to be idle: @@ -459,7 +448,6 @@ func New(logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor) *Server backendLogID: logID, logf: logf, netMon: netMon, - resetOnZero: envknob.GOOS() == "windows", } } From 2994dde5356c7005e60478d84b883e193b130279 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 10 Feb 2025 15:02:25 +0000 Subject: [PATCH 0495/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 44 ++++++++++++-------------- licenses/apple.md | 63 ++++++++++++++++++------------------- licenses/tailscale.md | 73 +++++++++++++++++++++---------------------- licenses/windows.md | 67 +++++++++++++++++++++------------------ 4 files changed, 122 insertions(+), 125 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 94aeb3fc0..378baa805 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -26,12 +26,11 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.6.0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/2e55bd4e08b0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -41,45 +40,42 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.4/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.4/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.4/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md)) + - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) + - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) + - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/intern](https://pkg.go.dev/go4.org/intern) ([BSD-3-Clause](https://github.com/go4org/intern/blob/ae77deb06f29/LICENSE)) - - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE)) + - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.26.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.20.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.28.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.23.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.23.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.17.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.24.0:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.22.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.29.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index aae006c95..7741318f7 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -12,29 +12,29 @@ See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.23/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.23/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.0/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.0/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.6.0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/2e55bd4e08b0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -45,14 +45,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/15c9b8791914/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md)) + - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) @@ -60,27 +59,25 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) + - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) + - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fc45aab8:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.30.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.20.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) ## Additional Dependencies diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 8f05acedc..ab79ee391 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -17,22 +17,22 @@ Some packages may only be included on certain architectures or operating systems - [github.com/akutz/memconn](https://pkg.go.dev/github.com/akutz/memconn) ([Apache-2.0](https://github.com/akutz/memconn/blob/v0.1.0/LICENSE)) - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/anmitsu/go-shlex](https://pkg.go.dev/github.com/anmitsu/go-shlex) ([MIT](https://github.com/anmitsu/go-shlex/blob/38f4b401e2be/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.24.1/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.26.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.16.16/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.14.11/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.2.10/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.5.10/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.7.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.24.1/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.10.4/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.10.10/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.18.7/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.21.7/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.26.7/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) @@ -40,23 +40,22 @@ Some packages may only be included on certain architectures or operating systems - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.6.0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/2e55bd4e08b0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/v1.7.2/LICENSE)) + - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - [github.com/kballard/go-shellquote](https://pkg.go.dev/github.com/kballard/go-shellquote) ([MIT](https://github.com/kballard/go-shellquote/blob/95032a82bc51/LICENSE)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) @@ -66,7 +65,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md)) + - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) @@ -79,34 +78,32 @@ Some packages may only be included on certain architectures or operating systems - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE)) + - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) + - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/4e883d38c8d3/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.12.0/LICENSE)) - - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) + - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE)) + - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.16.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.25.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.30.3/LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) + - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE)) - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) - [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 4cb35e8de..8abbd52d5 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -13,27 +13,29 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.23/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.23/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.0/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.0/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) + - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) + - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.6.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/2e55bd4e08b0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) @@ -42,41 +44,46 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - - [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md)) + - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) + - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) + - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.19.1/LICENSE)) + - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.55.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/8865133fd3ef/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/28f7e73c7afb/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/72f92d5087d4/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/cfd3289ef17f/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fc45aab8:LICENSE)) - - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.30.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.20.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) + - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.23.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.22.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.35.1/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) + - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) ## Additional Dependencies From 01efddea015a0d040f6dfe44163bb9764f16484b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 11 Feb 2025 15:10:26 -0600 Subject: [PATCH 0496/1708] docs/windows/policy: update ADMX/ADML policy definitions to include the new Always On setting This adds a new policy definition for the AlwaysOn.Enabled policy setting as well as the AlwaysOn.OverrideWithReason sub-option. Updates #14823 Updates tailscale/corp#26247 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 15 +++++++++++++ docs/windows/policy/tailscale.admx | 28 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 4d5893a32..8c375ca45 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -16,9 +16,12 @@ Tailscale version 1.62.0 and later Tailscale version 1.74.0 and later Tailscale version 1.78.0 and later + Tailscale version 1.82.0 and later Tailscale UI customization Settings + Allowed (with audit) + Not Allowed Require using a specific Tailscale coordination server + Restrict users from disconnecting Tailscale (always-on mode) + Allow Local Network Access when an Exit Node is in use Auth Key: + + The options below allow configuring exceptions where disconnecting Tailscale is permitted. + Disconnects with reason: + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 9cf27bddc..6a1ebc666 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -54,6 +54,10 @@ displayName="$(string.SINCE_V1_78)"> + + +
@@ -128,6 +132,30 @@ never + + + + + + + + + + + + + + + + + + + + + + + + From b7f508fccf8bb267bcab6d87b03e400b02161961 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 11 Feb 2025 14:00:50 -0800 Subject: [PATCH 0497/1708] Revert "control/controlclient: delete unreferenced mapSession UserProfiles" This reverts commit 413fb5b93311972e3a8d724bb696607ef3afe6f2. See long story in #14992 Updates #14992 Updates tailscale/corp#26058 Change-Id: I3de7d080443efe47cbf281ea20887a3caf202488 Signed-off-by: Brad Fitzpatrick --- control/controlclient/map.go | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index d4283e490..9f0e706cd 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -195,10 +195,6 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t ms.updateStateFromResponse(resp) - // Occasionally clean up old userprofile if it grows too much - // from e.g. ephemeral tagged nodes. - ms.cleanLastUserProfile() - if ms.tryHandleIncrementally(resp) { ms.occasionallyPrintSummary(ms.lastNetmapSummary) return nil @@ -296,6 +292,7 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { for _, up := range resp.UserProfiles { ms.lastUserProfile[up.ID] = up.View() } + // TODO(bradfitz): clean up old user profiles? maybe not worth it. if dm := resp.DERPMap; dm != nil { ms.vlogf("netmap: new map contains DERP map") @@ -544,32 +541,6 @@ func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserI } } -// cleanLastUserProfile deletes any entries from lastUserProfile -// that are not referenced by any peer or the self node. -// -// This is expensive enough that we don't do this on every message -// from the server, but only when it's grown enough to matter. -func (ms *mapSession) cleanLastUserProfile() { - if len(ms.lastUserProfile) < len(ms.peers)*2 { - // Hasn't grown enough to be worth cleaning. - return - } - - keep := set.Set[tailcfg.UserID]{} - if node := ms.lastNode; node.Valid() { - keep.Add(node.User()) - } - for _, n := range ms.peers { - keep.Add(n.User()) - keep.Add(n.Sharer()) - } - for userID := range ms.lastUserProfile { - if !keep.Contains(userID) { - delete(ms.lastUserProfile, userID) - } - } -} - var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER") // patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch From 7aef4fd44d7c6a00cf880519a36be8ce543ee575 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 12 Feb 2025 17:43:53 -0600 Subject: [PATCH 0498/1708] ipn/ipn{local,server}: extract logic that determines the "best" Tailscale profile to use In this PR, we further refactor LocalBackend and Unattended Mode to extract the logic that determines which profile should be used at the time of the check, such as when a LocalAPI client connects or disconnects. We then update (*LocalBackend).switchProfileLockedOnEntry to to switch to the profile returned by (*LocalBackend).resolveBestProfileLocked() rather than to the caller-specified specified profile, and rename it to switchToBestProfileLockedOnEntry. This is done in preparation for updating (*LocalBackend).getBackgroundProfileIDLocked to support Always-On mode by determining which profile to use based on which users, if any, are currently logged in and have an active foreground desktop session. Updates #14823 Updates tailscale/corp#26247 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 134 +++++++++++++++++++++++++++++---------- ipn/ipnlocal/profiles.go | 15 +++-- ipn/ipnserver/actor.go | 12 +++- 3 files changed, 118 insertions(+), 43 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5c1a69e76..25f4d552f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3795,14 +3795,15 @@ func (b *LocalBackend) shouldUploadServices() bool { // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - var uid ipn.WindowsUserID - if actor != nil { - uid = actor.UserID() - } - unlock := b.lockAndGetUnlock() defer unlock() + var userIdentifier string + if user := cmp.Or(actor, b.currentUser); user != nil { + maybeUsername, _ := user.Username() + userIdentifier = cmp.Or(maybeUsername, string(user.UserID())) + } + if actor != b.currentUser { if c, ok := b.currentUser.(ipnauth.ActorCloser); ok { c.Close() @@ -3810,46 +3811,108 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { b.currentUser = actor } - if b.pm.CurrentUserID() == uid { - return - } - - var profileID ipn.ProfileID - if actor != nil { - profileID = b.pm.DefaultUserProfileID(uid) - } else if uid, profileID = b.getBackgroundProfileIDLocked(); profileID != "" { - b.logf("client disconnected; staying alive in server mode") + var action string + if actor == nil { + action = "disconnected" } else { - b.logf("client disconnected; stopping server") - } - - if err := b.switchProfileLockedOnEntry(uid, profileID, unlock); err != nil { - b.logf("failed switching profile to %q: %v", profileID, err) + action = "connected" } + reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) + b.switchToBestProfileLockedOnEntry(reason, unlock) } -// switchProfileLockedOnEntry is like [LocalBackend.SwitchProfile], -// but b.mu must held on entry, but it is released on exit. -func (b *LocalBackend) switchProfileLockedOnEntry(uid ipn.WindowsUserID, profileID ipn.ProfileID, unlock unlockOnce) error { +// switchToBestProfileLockedOnEntry selects the best profile to use, +// as reported by [LocalBackend.resolveBestProfileLocked], and switches +// to it, unless it's already the current profile. The reason indicates +// why the profile is being switched, such as due to a client connecting +// or disconnecting and is used for logging. +// +// b.mu must held on entry. It is released on exit. +func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { defer unlock() - if b.pm.CurrentUserID() == uid && b.pm.CurrentProfile().ID() == profileID { - return nil - } oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() - if changed := b.pm.SetCurrentUserAndProfile(uid, profileID); !changed { - return nil + uid, profileID, background := b.resolveBestProfileLocked() + cp, switched := b.pm.SetCurrentUserAndProfile(uid, profileID) + switch { + case !switched && cp.ID() == "": + b.logf("%s: staying on empty profile", reason) + case !switched: + b.logf("%s: staying on profile %q (%s)", reason, cp.UserProfile().LoginName, cp.ID()) + case cp.ID() == "": + b.logf("%s: disconnecting Tailscale", reason) + case background: + b.logf("%s: switching to background profile %q (%s)", reason, cp.UserProfile().LoginName, cp.ID()) + default: + b.logf("%s: switching to profile %q (%s)", reason, cp.UserProfile().LoginName, cp.ID()) + } + if !switched { + return } // As an optimization, only reset the dialPlan if the control URL changed. if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { b.resetDialPlan() } - return b.resetForProfileChangeLockedOnEntry(unlock) + if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { + // TODO(nickkhyl): The actual reset cannot fail. However, + // the TKA initialization or [LocalBackend.Start] can fail. + // These errors are not critical as far as we're concerned. + // But maybe we should post a notification to the API watchers? + b.logf("failed switching profile to %q: %v", profileID, err) + } +} + +// resolveBestProfileLocked returns the best profile to use based on the current +// state of the backend, such as whether a GUI/CLI client is connected and whether +// the unattended mode is enabled. +// +// It returns the user ID, profile ID, and whether the returned profile is +// considered a background profile. A background profile is used when no OS user +// is actively using Tailscale, such as when no GUI/CLI client is connected +// and Unattended Mode is enabled (see also [LocalBackend.getBackgroundProfileLocked]). +// An empty profile ID indicates that Tailscale should switch to an empty profile. +// +// b.mu must be held. +func (b *LocalBackend) resolveBestProfileLocked() (userID ipn.WindowsUserID, profileID ipn.ProfileID, isBackground bool) { + // If a GUI/CLI client is connected, use the connected user's profile, which means + // either the current profile if owned by the user, or their default profile. + if b.currentUser != nil { + cp := b.pm.CurrentProfile() + uid := b.currentUser.UserID() + + var profileID ipn.ProfileID + // TODO(nickkhyl): check if the current profile is allowed on the device, + // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // See tailscale/corp#26249. + if cp.LocalUserID() == uid { + profileID = cp.ID() + } else { + profileID = b.pm.DefaultUserProfileID(uid) + } + return uid, profileID, false + } + + // Otherwise, if on Windows, use the background profile if one is set. + // This includes staying on the current profile if Unattended Mode is enabled. + // If the returned background profileID is "", Tailscale will disconnect + // and remain idle until a GUI or CLI client connects. + if goos := envknob.GOOS(); goos == "windows" { + uid, profileID := b.getBackgroundProfileLocked() + return uid, profileID, true + } + + // On other platforms, however, Tailscale continues to run in the background + // using the current profile. + // + // TODO(nickkhyl): check if the current profile is allowed on the device, + // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // See tailscale/corp#26249. + return b.pm.CurrentUserID(), b.pm.CurrentProfile().ID(), false } -// getBackgroundProfileIDLocked returns the profile ID to use when no GUI/CLI -// client is connected, or "" if Tailscale should not run in the background. +// getBackgroundProfileLocked returns the user and profile ID to use when no GUI/CLI +// client is connected, or "","" if Tailscale should not run in the background. // As of 2025-02-07, it is only used on Windows. -func (b *LocalBackend) getBackgroundProfileIDLocked() (ipn.WindowsUserID, ipn.ProfileID) { +func (b *LocalBackend) getBackgroundProfileLocked() (ipn.WindowsUserID, ipn.ProfileID) { // If Unattended Mode is enabled for the current profile, keep using it. if b.pm.CurrentPrefs().ForceDaemon() { return b.pm.CurrentProfile().LocalUserID(), b.pm.CurrentProfile().ID() @@ -7190,9 +7253,9 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // Needs to happen without b.mu held. defer prevCC.Shutdown() } - if err := b.initTKALocked(); err != nil { - return err - } + // TKA errors should not prevent resetting the backend state. + // However, we should still return the error to the caller. + tkaErr := b.initTKALocked() b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} b.lastSuggestedExitNode = "" @@ -7201,6 +7264,9 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu b.health.SetLocalLogConfigHealth(nil) + if tkaErr != nil { + return tkaErr + } return b.Start(ipn.Options{}) } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 65714874a..10a110e61 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -91,24 +91,25 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) { // profile for the user and switches to it, unless the current profile // is already a new, empty profile owned by the user. // -// It reports whether the call resulted in a profile switch. -func (pm *profileManager) SetCurrentUserAndProfile(uid ipn.WindowsUserID, profileID ipn.ProfileID) (changed bool) { +// It returns the current profile and whether the call resulted +// in a profile switch. +func (pm *profileManager) SetCurrentUserAndProfile(uid ipn.WindowsUserID, profileID ipn.ProfileID) (cp ipn.LoginProfileView, changed bool) { pm.currentUserID = uid if profileID == "" { if pm.currentProfile.ID() == "" && pm.currentProfile.LocalUserID() == uid { - return false + return pm.currentProfile, false } pm.NewProfileForUser(uid) - return true + return pm.currentProfile, true } if profile, err := pm.ProfileByID(profileID); err == nil { if pm.CurrentProfile().ID() == profileID { - return false + return pm.currentProfile, false } if err := pm.SwitchProfile(profile.ID()); err == nil { - return true + return pm.currentProfile, true } } @@ -116,7 +117,7 @@ func (pm *profileManager) SetCurrentUserAndProfile(uid ipn.WindowsUserID, profil pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err) pm.NewProfile() } - return true + return pm.currentProfile, true } // DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user, diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 594ebf2d5..b0245b0a8 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -32,6 +32,7 @@ type actor struct { ci *ipnauth.ConnIdentity clientID ipnauth.ClientID + userID ipn.WindowsUserID // cached Windows user ID of the connected client process. // accessOverrideReason specifies the reason for overriding certain access restrictions, // such as permitting a user to disconnect when the always-on mode is enabled, // provided that such justification is allowed by the policy. @@ -59,7 +60,14 @@ func newActor(logf logger.Logf, c net.Conn) (*actor, error) { // connectivity on domain-joined devices and/or be slow. clientID = ipnauth.ClientIDFrom(pid) } - return &actor{logf: logf, ci: ci, clientID: clientID, isLocalSystem: connIsLocalSystem(ci)}, nil + return &actor{ + logf: logf, + ci: ci, + clientID: clientID, + userID: ci.WindowsUserID(), + isLocalSystem: connIsLocalSystem(ci), + }, + nil } // actorWithAccessOverride returns a new actor that carries the specified @@ -106,7 +114,7 @@ func (a *actor) IsLocalAdmin(operatorUID string) bool { // UserID implements [ipnauth.Actor]. func (a *actor) UserID() ipn.WindowsUserID { - return a.ci.WindowsUserID() + return a.userID } func (a *actor) pid() int { From f2f7fd12ebc273bf36b2ee43a3df7af14e6e08b5 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 12 Feb 2025 17:19:17 -0800 Subject: [PATCH 0499/1708] go.mod: bump bart Bart has had some substantial improvements in internal representation, update functions, and other optimizations to reduce memory usage and improve runtime performance. Updates tailscale/corp#26353 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 3 ++- cmd/tailscaled/depaware.txt | 3 ++- go.mod | 5 ++--- go.sum | 6 ++---- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index f0d572006..b9082f966 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -81,7 +81,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus - github.com/bits-and-blooms/bitset from github.com/gaissmai/bart 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump @@ -99,6 +98,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/fsnotify/fsnotify from sigs.k8s.io/controller-runtime/pkg/certwatcher github.com/fxamacker/cbor/v2 from tailscale.com/tka+ github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d19fb5c96..3eaa12d16 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -81,7 +81,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm - github.com/bits-and-blooms/bitset from github.com/gaissmai/bart L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -93,6 +92,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/djherbis/times from tailscale.com/drive/driveimpl github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/tstun+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ diff --git a/go.mod b/go.mod index 074482479..e0c945f83 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.23.1 +go 1.23.6 require ( filippo.io/mkcert v1.4.4 @@ -33,7 +33,7 @@ require ( github.com/fogleman/gg v1.3.0 github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.7.0 - github.com/gaissmai/bart v0.11.1 + github.com/gaissmai/bart v0.18.0 github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 @@ -129,7 +129,6 @@ require ( github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect - github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect diff --git a/go.sum b/go.sum index f12f5e514..6ea727014 100644 --- a/go.sum +++ b/go.sum @@ -167,8 +167,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= @@ -316,8 +314,8 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= +github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo= +github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY= github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbxOK4Ug= github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= From db231107a2c2f4fbe89b543845f51e1a02593709 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 10 Feb 2025 11:43:08 -0600 Subject: [PATCH 0500/1708] ssh/tailssh: accept passwords and public keys Some clients don't request 'none' authentication. Instead, they immediately supply a password or public key. This change allows them to do so, but ignores the supplied credentials and authenticates using Tailscale instead. Updates #14922 Signed-off-by: Percy Wegmann --- ssh/tailssh/tailssh.go | 98 +++++++--- ssh/tailssh/tailssh_integration_test.go | 43 ++++- ssh/tailssh/tailssh_test.go | 226 ++++++++++++++++-------- ssh/tailssh/testcontainers/Dockerfile | 11 +- tempfork/sshtest/ssh/client.go | 8 + tempfork/sshtest/ssh/client_auth.go | 11 +- 6 files changed, 289 insertions(+), 108 deletions(-) diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 638ff99b8..9aae899c3 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -51,6 +51,11 @@ var ( sshDisableSFTP = envknob.RegisterBool("TS_SSH_DISABLE_SFTP") sshDisableForwarding = envknob.RegisterBool("TS_SSH_DISABLE_FORWARDING") sshDisablePTY = envknob.RegisterBool("TS_SSH_DISABLE_PTY") + + // errTerminal is an empty gossh.PartialSuccessError (with no 'Next' + // authentication methods that may proceed), which results in the SSH + // server immediately disconnecting the client. + errTerminal = &gossh.PartialSuccessError{} ) const ( @@ -230,8 +235,8 @@ type conn struct { finalAction *tailcfg.SSHAction // set by clientAuth info *sshConnInfo // set by setInfo - localUser *userMeta // set by doPolicyAuth - userGroupIDs []string // set by doPolicyAuth + localUser *userMeta // set by clientAuth + userGroupIDs []string // set by clientAuth acceptEnv []string // mu protects the following fields. @@ -255,46 +260,73 @@ func (c *conn) vlogf(format string, args ...any) { } // errDenied is returned by auth callbacks when a connection is denied by the -// policy. It returns a gossh.BannerError to make sure the message gets -// displayed as an auth banner. -func errDenied(message string) error { +// policy. It writes the message to an auth banner and then returns an empty +// gossh.PartialSuccessError in order to stop processing authentication +// attempts and immediately disconnect the client. +func (c *conn) errDenied(message string) error { if message == "" { message = "tailscale: access denied" } - return &gossh.BannerError{ - Message: message, + if err := c.spac.SendAuthBanner(message); err != nil { + c.logf("failed to send auth banner: %s", err) } + return errTerminal } -// bannerError creates a gossh.BannerError that will result in the given -// message being displayed to the client. If err != nil, this also logs -// message:error. The contents of err is not leaked to clients in the banner. -func (c *conn) bannerError(message string, err error) error { +// errBanner writes the given message to an auth banner and then returns an +// empty gossh.PartialSuccessError in order to stop processing authentication +// attempts and immediately disconnect the client. The contents of err is not +// leaked in the auth banner, but it is logged to the server's log. +func (c *conn) errBanner(message string, err error) error { if err != nil { c.logf("%s: %s", message, err) } - return &gossh.BannerError{ - Err: err, - Message: fmt.Sprintf("tailscale: %s", message), + if err := c.spac.SendAuthBanner("tailscale: " + message); err != nil { + c.logf("failed to send auth banner: %s", err) } + return errTerminal +} + +// errUnexpected is returned by auth callbacks that encounter an unexpected +// error, such as being unable to send an auth banner. It sends an empty +// gossh.PartialSuccessError to tell gossh.Server to stop processing +// authentication attempts and instead disconnect immediately. +func (c *conn) errUnexpected(err error) error { + c.logf("terminal error: %s", err) + return errTerminal } // clientAuth is responsible for performing client authentication. // // If policy evaluation fails, it returns an error. -// If access is denied, it returns an error. -func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { +// If access is denied, it returns an error. This must always be an empty +// gossh.PartialSuccessError to prevent further authentication methods from +// being tried. +func (c *conn) clientAuth(cm gossh.ConnMetadata) (perms *gossh.Permissions, retErr error) { + defer func() { + if pse, ok := retErr.(*gossh.PartialSuccessError); ok { + if pse.Next.GSSAPIWithMICConfig != nil || + pse.Next.KeyboardInteractiveCallback != nil || + pse.Next.PasswordCallback != nil || + pse.Next.PublicKeyCallback != nil { + panic("clientAuth attempted to return a non-empty PartialSuccessError") + } + } else if retErr != nil { + panic(fmt.Sprintf("clientAuth attempted to return a non-PartialSuccessError error of type: %t", retErr)) + } + }() + if c.insecureSkipTailscaleAuth { return &gossh.Permissions{}, nil } if err := c.setInfo(cm); err != nil { - return nil, c.bannerError("failed to get connection info", err) + return nil, c.errBanner("failed to get connection info", err) } action, localUser, acceptEnv, err := c.evaluatePolicy() if err != nil { - return nil, c.bannerError("failed to evaluate SSH policy", err) + return nil, c.errBanner("failed to evaluate SSH policy", err) } c.action0 = action @@ -304,11 +336,11 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { // hold and delegate URL (if necessary). lu, err := userLookup(localUser) if err != nil { - return nil, c.bannerError(fmt.Sprintf("failed to look up local user %q ", localUser), err) + return nil, c.errBanner(fmt.Sprintf("failed to look up local user %q ", localUser), err) } gids, err := lu.GroupIds() if err != nil { - return nil, c.bannerError("failed to look up local user's group IDs", err) + return nil, c.errBanner("failed to look up local user's group IDs", err) } c.userGroupIDs = gids c.localUser = lu @@ -321,7 +353,7 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { metricTerminalAccept.Add(1) if action.Message != "" { if err := c.spac.SendAuthBanner(action.Message); err != nil { - return nil, fmt.Errorf("error sending auth welcome message: %w", err) + return nil, c.errUnexpected(fmt.Errorf("error sending auth welcome message: %w", err)) } } c.finalAction = action @@ -329,11 +361,11 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { case action.Reject: metricTerminalReject.Add(1) c.finalAction = action - return nil, errDenied(action.Message) + return nil, c.errDenied(action.Message) case action.HoldAndDelegate != "": if action.Message != "" { if err := c.spac.SendAuthBanner(action.Message); err != nil { - return nil, fmt.Errorf("error sending hold and delegate message: %w", err) + return nil, c.errUnexpected(fmt.Errorf("error sending hold and delegate message: %w", err)) } } @@ -349,11 +381,11 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (*gossh.Permissions, error) { action, err = c.fetchSSHAction(ctx, url) if err != nil { metricTerminalFetchError.Add(1) - return nil, c.bannerError("failed to fetch next SSH action", fmt.Errorf("fetch failed from %s: %w", url, err)) + return nil, c.errBanner("failed to fetch next SSH action", fmt.Errorf("fetch failed from %s: %w", url, err)) } default: metricTerminalMalformed.Add(1) - return nil, c.bannerError("reached Action that had neither Accept, Reject, nor HoldAndDelegate", nil) + return nil, c.errBanner("reached Action that had neither Accept, Reject, nor HoldAndDelegate", nil) } } } @@ -390,6 +422,20 @@ func (c *conn) ServerConfig(ctx ssh.Context) *gossh.ServerConfig { return perms, nil }, + PasswordCallback: func(cm gossh.ConnMetadata, pword []byte) (*gossh.Permissions, error) { + // Some clients don't request 'none' authentication. Instead, they + // immediately supply a password. We humor them by accepting the + // password, but authenticate as usual, ignoring the actual value of + // the password. + return c.clientAuth(cm) + }, + PublicKeyCallback: func(cm gossh.ConnMetadata, key gossh.PublicKey) (*gossh.Permissions, error) { + // Some clients don't request 'none' authentication. Instead, they + // immediately supply a public key. We humor them by accepting the + // key, but authenticate as usual, ignoring the actual content of + // the key. + return c.clientAuth(cm) + }, } } @@ -400,7 +446,7 @@ func (srv *server) newConn() (*conn, error) { // Stop accepting new connections. // Connections in the auth phase are handled in handleConnPostSSHAuth. // Existing sessions are terminated by Shutdown. - return nil, errDenied("tailscale: server is shutting down") + return nil, errors.New("server is shutting down") } srv.mu.Unlock() c := &conn{srv: srv} diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 5c4f533b1..9ab26e169 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: BSD-3-Clause //go:build integrationtest -// +build integrationtest package tailssh @@ -410,6 +409,48 @@ func TestSSHAgentForwarding(t *testing.T) { } } +// TestIntegrationParamiko attempts to connect to Tailscale SSH using the +// paramiko Python library. This library does not request 'none' auth. This +// test ensures that Tailscale SSH can correctly handle clients that don't +// request 'none' auth and instead immediately authenticate with a public key +// or password. +func TestIntegrationParamiko(t *testing.T) { + debugTest.Store(true) + t.Cleanup(func() { + debugTest.Store(false) + }) + + addr := testServer(t, "testuser", true, false) + host, port, err := net.SplitHostPort(addr) + if err != nil { + t.Fatalf("Failed to split addr %q: %s", addr, err) + } + + out, err := exec.Command("python3", "-c", fmt.Sprintf(` +import paramiko.client as pm +from paramiko.ecdsakey import ECDSAKey +client = pm.SSHClient() +client.set_missing_host_key_policy(pm.AutoAddPolicy) +client.connect('%s', port=%s, username='testuser', pkey=ECDSAKey.generate(), allow_agent=False, look_for_keys=False) +client.exec_command('pwd') +`, host, port)).CombinedOutput() + if err != nil { + t.Fatalf("failed to connect with Paramiko using public key auth: %s\n%q", err, string(out)) + } + + out, err = exec.Command("python3", "-c", fmt.Sprintf(` +import paramiko.client as pm +from paramiko.ecdsakey import ECDSAKey +client = pm.SSHClient() +client.set_missing_host_key_policy(pm.AutoAddPolicy) +client.connect('%s', port=%s, username='testuser', password='doesntmatter', allow_agent=False, look_for_keys=False) +client.exec_command('pwd') +`, host, port)).CombinedOutput() + if err != nil { + t.Fatalf("failed to connect with Paramiko using password auth: %s\n%q", err, string(out)) + } +} + func fallbackToSUAvailable() bool { if runtime.GOOS != "linux" { return false diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index d22dfe443..24f0e12a2 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -8,12 +8,15 @@ package tailssh import ( "bytes" "context" + "crypto/ecdsa" "crypto/ed25519" + "crypto/elliptic" "crypto/rand" "encoding/json" "errors" "fmt" "io" + "log" "net" "net/http" "net/http/httptest" @@ -41,7 +44,7 @@ import ( "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tempfork/gliderlabs/ssh" - sshtest "tailscale.com/tempfork/sshtest/ssh" + testssh "tailscale.com/tempfork/sshtest/ssh" "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/key" @@ -56,8 +59,6 @@ import ( "tailscale.com/wgengine" ) -type _ = sshtest.Client // TODO(bradfitz,percy): sshtest; delete this line - func TestMatchRule(t *testing.T) { someAction := new(tailcfg.SSHAction) tests := []struct { @@ -510,9 +511,9 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { defer s.Shutdown() const sshUser = "alice" - cfg := &gossh.ClientConfig{ + cfg := &testssh.ClientConfig{ User: sshUser, - HostKeyCallback: gossh.InsecureIgnoreHostKey(), + HostKeyCallback: testssh.InsecureIgnoreHostKey(), } tests := []struct { @@ -559,12 +560,12 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) + c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) return } - client := gossh.NewClient(c, chans, reqs) + client := testssh.NewClient(c, chans, reqs) defer client.Close() session, err := client.NewSession() if err != nil { @@ -645,21 +646,21 @@ func TestMultipleRecorders(t *testing.T) { sc, dc := memnet.NewTCPConn(src, dst, 1024) const sshUser = "alice" - cfg := &gossh.ClientConfig{ + cfg := &testssh.ClientConfig{ User: sshUser, - HostKeyCallback: gossh.InsecureIgnoreHostKey(), + HostKeyCallback: testssh.InsecureIgnoreHostKey(), } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) + c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) return } - client := gossh.NewClient(c, chans, reqs) + client := testssh.NewClient(c, chans, reqs) defer client.Close() session, err := client.NewSession() if err != nil { @@ -736,21 +737,21 @@ func TestSSHRecordingNonInteractive(t *testing.T) { sc, dc := memnet.NewTCPConn(src, dst, 1024) const sshUser = "alice" - cfg := &gossh.ClientConfig{ + cfg := &testssh.ClientConfig{ User: sshUser, - HostKeyCallback: gossh.InsecureIgnoreHostKey(), + HostKeyCallback: testssh.InsecureIgnoreHostKey(), } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) + c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) return } - client := gossh.NewClient(c, chans, reqs) + client := testssh.NewClient(c, chans, reqs) defer client.Close() session, err := client.NewSession() if err != nil { @@ -886,80 +887,151 @@ func TestSSHAuthFlow(t *testing.T) { }, } s := &server{ - logf: logger.Discard, + logf: log.Printf, } defer s.Shutdown() src, dst := must.Get(netip.ParseAddrPort("100.100.100.101:2231")), must.Get(netip.ParseAddrPort("100.100.100.102:22")) for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - sc, dc := memnet.NewTCPConn(src, dst, 1024) - s.lb = tc.state - sshUser := "alice" - if tc.sshUser != "" { - sshUser = tc.sshUser - } - var passwordUsed atomic.Bool - cfg := &gossh.ClientConfig{ - User: sshUser, - HostKeyCallback: gossh.InsecureIgnoreHostKey(), - Auth: []gossh.AuthMethod{ - gossh.PasswordCallback(func() (secret string, err error) { - if !tc.usesPassword { - t.Error("unexpected use of PasswordCallback") - return "", errors.New("unexpected use of PasswordCallback") - } + for _, authMethods := range [][]string{nil, {"publickey", "password"}, {"password", "publickey"}} { + t.Run(fmt.Sprintf("%s-skip-none-auth-%v", tc.name, strings.Join(authMethods, "-then-")), func(t *testing.T) { + sc, dc := memnet.NewTCPConn(src, dst, 1024) + s.lb = tc.state + sshUser := "alice" + if tc.sshUser != "" { + sshUser = tc.sshUser + } + + wantBanners := slices.Clone(tc.wantBanners) + noneAuthEnabled := len(authMethods) == 0 + + var publicKeyUsed atomic.Bool + var passwordUsed atomic.Bool + var methods []testssh.AuthMethod + + for _, authMethod := range authMethods { + switch authMethod { + case "publickey": + methods = append(methods, + testssh.PublicKeysCallback(func() (signers []testssh.Signer, err error) { + publicKeyUsed.Store(true) + key, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + return nil, err + } + sig, err := testssh.NewSignerFromKey(key) + if err != nil { + return nil, err + } + return []testssh.Signer{sig}, nil + })) + case "password": + methods = append(methods, testssh.PasswordCallback(func() (secret string, err error) { + passwordUsed.Store(true) + return "any-pass", nil + })) + } + } + + if noneAuthEnabled && tc.usesPassword { + methods = append(methods, testssh.PasswordCallback(func() (secret string, err error) { passwordUsed.Store(true) return "any-pass", nil - }), - }, - BannerCallback: func(message string) error { - if len(tc.wantBanners) == 0 { - t.Errorf("unexpected banner: %q", message) - } else if message != tc.wantBanners[0] { - t.Errorf("banner = %q; want %q", message, tc.wantBanners[0]) - } else { - t.Logf("banner = %q", message) - tc.wantBanners = tc.wantBanners[1:] + })) + } + + cfg := &testssh.ClientConfig{ + User: sshUser, + HostKeyCallback: testssh.InsecureIgnoreHostKey(), + SkipNoneAuth: !noneAuthEnabled, + Auth: methods, + BannerCallback: func(message string) error { + if len(wantBanners) == 0 { + t.Errorf("unexpected banner: %q", message) + } else if message != wantBanners[0] { + t.Errorf("banner = %q; want %q", message, wantBanners[0]) + } else { + t.Logf("banner = %q", message) + wantBanners = wantBanners[1:] + } + return nil + }, + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) + if err != nil { + if !tc.authErr { + t.Errorf("client: %v", err) + } + return + } else if tc.authErr { + c.Close() + t.Errorf("client: expected error, got nil") + return } - return nil - }, - } - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) - if err != nil { - if !tc.authErr { + client := testssh.NewClient(c, chans, reqs) + defer client.Close() + session, err := client.NewSession() + if err != nil { t.Errorf("client: %v", err) + return } - return - } else if tc.authErr { - c.Close() - t.Errorf("client: expected error, got nil") - return + defer session.Close() + _, err = session.CombinedOutput("echo Ran echo!") + if err != nil { + t.Errorf("client: %v", err) + } + }() + if err := s.HandleSSHConn(dc); err != nil { + t.Errorf("unexpected error: %v", err) } - client := gossh.NewClient(c, chans, reqs) - defer client.Close() - session, err := client.NewSession() - if err != nil { - t.Errorf("client: %v", err) - return + wg.Wait() + if len(wantBanners) > 0 { + t.Errorf("missing banners: %v", wantBanners) } - defer session.Close() - _, err = session.CombinedOutput("echo Ran echo!") - if err != nil { - t.Errorf("client: %v", err) + + // Check to see which callbacks were invoked. + // + // When `none` auth is enabled, the public key callback should + // never fire, and the password callback should only fire if + // authentication succeeded and the client was trying to force + // password authentication by connecting with the '-password' + // username suffix. + // + // When skipping `none` auth, the first callback should always + // fire, and the 2nd callback should fire only if + // authentication failed. + wantPublicKey := false + wantPassword := false + if noneAuthEnabled { + wantPassword = !tc.authErr && tc.usesPassword + } else { + for i, authMethod := range authMethods { + switch authMethod { + case "publickey": + wantPublicKey = i == 0 || tc.authErr + case "password": + wantPassword = i == 0 || tc.authErr + } + } } - }() - if err := s.HandleSSHConn(dc); err != nil { - t.Errorf("unexpected error: %v", err) - } - wg.Wait() - if len(tc.wantBanners) > 0 { - t.Errorf("missing banners: %v", tc.wantBanners) - } - }) + + if wantPublicKey && !publicKeyUsed.Load() { + t.Error("public key should have been attempted") + } else if !wantPublicKey && publicKeyUsed.Load() { + t.Errorf("public key should not have been attempted") + } + + if wantPassword && !passwordUsed.Load() { + t.Error("password should have been attempted") + } else if !wantPassword && passwordUsed.Load() { + t.Error("password should not have been attempted") + } + }) + } } } diff --git a/ssh/tailssh/testcontainers/Dockerfile b/ssh/tailssh/testcontainers/Dockerfile index c94c961d3..4ef1c1eb0 100644 --- a/ssh/tailssh/testcontainers/Dockerfile +++ b/ssh/tailssh/testcontainers/Dockerfile @@ -3,9 +3,12 @@ FROM ${BASE} ARG BASE -RUN echo "Install openssh, needed for scp." -RUN if echo "$BASE" | grep "ubuntu:"; then apt-get update -y && apt-get install -y openssh-client; fi -RUN if echo "$BASE" | grep "alpine:"; then apk add openssh; fi +RUN echo "Install openssh, needed for scp. Also install python3" +RUN if echo "$BASE" | grep "ubuntu:"; then apt-get update -y && apt-get install -y openssh-client python3 python3-pip; fi +RUN if echo "$BASE" | grep "alpine:"; then apk add openssh python3 py3-pip; fi + +RUN echo "Install paramiko" +RUN pip3 install paramiko==3.5.1 || pip3 install --break-system-packages paramiko==3.5.1 # Note - on Ubuntu, we do not create the user's home directory, pam_mkhomedir will do that # for us, and we want to test that PAM gets triggered by Tailscale SSH. @@ -33,6 +36,8 @@ RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSCP RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSSH +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi +RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationParamiko RUN echo "Then run tests as non-root user testuser and make sure tests still pass." RUN touch /tmp/tailscalessh.log diff --git a/tempfork/sshtest/ssh/client.go b/tempfork/sshtest/ssh/client.go index fd8c49749..5876e6421 100644 --- a/tempfork/sshtest/ssh/client.go +++ b/tempfork/sshtest/ssh/client.go @@ -239,6 +239,14 @@ type ClientConfig struct { // // A Timeout of zero means no timeout. Timeout time.Duration + + // SkipNoneAuth allows skipping the initial "none" auth request. This is unusual + // behavior, but it is allowed by [RFC4252 5.2](https://datatracker.ietf.org/doc/html/rfc4252#section-5.2), + // and some clients in the wild behave like this. One such client is the paramiko Python + // library, which is used in pgadmin4 via the sshtunnel library. + // When SkipNoneAuth is true, the client will attempt all configured + // [AuthMethod]s until one works, or it runs out. + SkipNoneAuth bool } // InsecureIgnoreHostKey returns a function that can be used for diff --git a/tempfork/sshtest/ssh/client_auth.go b/tempfork/sshtest/ssh/client_auth.go index b86dde151..af25a4f01 100644 --- a/tempfork/sshtest/ssh/client_auth.go +++ b/tempfork/sshtest/ssh/client_auth.go @@ -68,7 +68,16 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { var lastMethods []string sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { + var auth AuthMethod + if !config.SkipNoneAuth { + auth = AuthMethod(new(noneAuth)) + } else if len(config.Auth) > 0 { + auth = config.Auth[0] + for _, a := range config.Auth { + lastMethods = append(lastMethods, a.method()) + } + } + for auth != nil { ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { // On disconnect, return error immediately From 1d035db4df78bcca717417ec45cecfaf4d12ee1d Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 13 Feb 2025 16:12:00 -0800 Subject: [PATCH 0501/1708] types/bools: fix doc typo (#15021) The Select function was renamed as IfElse. Updates #cleanup Signed-off-by: Joe Tsai --- types/bools/bools.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/bools/bools.go b/types/bools/bools.go index 7cef17cf0..e64068746 100644 --- a/types/bools/bools.go +++ b/types/bools/bools.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package bools contains the [Int], [Compare], and [Select] functions. +// Package bools contains the [Int], [Compare], and [IfElse] functions. package bools // Int returns 1 for true and 0 for false. From e1425713974c7c432ded6f2595f511c69a6064db Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Fri, 14 Feb 2025 15:25:48 +0000 Subject: [PATCH 0502/1708] ipn/ipnlocal: add GetFilterForTest (#15025) Needed to test full packet filter in e2e tests. See tailscale/corp#26596 Updates tailscale/corp#20514 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 25f4d552f..e09061041 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1438,6 +1438,10 @@ func (b *LocalBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { return nil } +func (b *LocalBackend) GetFilterForTest() *filter.Filter { + return b.filterAtomic.Load() +} + // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. // Among other things, this is where we update the netmap, packet filters, DNS and DERP maps. func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st controlclient.Status) { From 4c3c04a413cb25589faf27d8db0f33e58c71c709 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Fri, 14 Feb 2025 09:56:50 -0800 Subject: [PATCH 0503/1708] ipn, tailscale/cli: add TaildropTargetStatus and remove race with FileTargets (#15017) Introduce new TaildropTargetStatus in PeerStatus Refactor getTargetStableID to solely rely on Status() instead of calling FileTargets(). This removes a possible race condition between the two calls and provides more detailed failure information if a peer can't receive files. Updates tailscale/tailscale#14393 Signed-off-by: kari-ts --- cmd/tailscale/cli/file.go | 96 ++++++++++++++++++++++++++------------- ipn/ipnlocal/local.go | 36 +++++++++++++++ ipn/ipnstate/ipnstate.go | 21 +++++++++ 3 files changed, 121 insertions(+), 32 deletions(-) diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index cd7762446..3de5f9766 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -28,6 +28,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" + "tailscale.com/ipn/ipnstate" "tailscale.com/net/tsaddr" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -268,46 +269,77 @@ func getTargetStableID(ctx context.Context, ipStr string) (id tailcfg.StableNode if err != nil { return "", false, err } - fts, err := localClient.FileTargets(ctx) + + st, err := localClient.Status(ctx) if err != nil { - return "", false, err - } - for _, ft := range fts { - n := ft.Node - for _, a := range n.Addresses { - if a.Addr() != ip { - continue + // This likely means tailscaled is unreachable or returned an error on /localapi/v0/status. + return "", false, fmt.Errorf("failed to get local status: %w", err) + } + if st == nil { + // Handle the case if the daemon returns nil with no error. + return "", false, errors.New("no status available") + } + if st.Self == nil { + // We have a status structure, but it doesn’t include Self info. Probably not connected. + return "", false, errors.New("local node is not configured or missing Self information") + } + + // Find the PeerStatus that corresponds to ip. + var foundPeer *ipnstate.PeerStatus +peerLoop: + for _, ps := range st.Peer { + for _, pip := range ps.TailscaleIPs { + if pip == ip { + foundPeer = ps + break peerLoop } - isOffline = n.Online != nil && !*n.Online - return n.StableID, isOffline, nil } } - return "", false, fileTargetErrorDetail(ctx, ip) -} -// fileTargetErrorDetail returns a non-nil error saying why ip is an -// invalid file sharing target. -func fileTargetErrorDetail(ctx context.Context, ip netip.Addr) error { - found := false - if st, err := localClient.Status(ctx); err == nil && st.Self != nil { - for _, peer := range st.Peer { - for _, pip := range peer.TailscaleIPs { - if pip == ip { - found = true - if peer.UserID != st.Self.UserID { - return errors.New("owned by different user; can only send files to your own devices") - } - } - } + // If we didn’t find a matching peer at all: + if foundPeer == nil { + if !tsaddr.IsTailscaleIP(ip) { + return "", false, fmt.Errorf("unknown target; %v is not a Tailscale IP address", ip) } + return "", false, errors.New("unknown target; not in your Tailnet") } - if found { - return errors.New("target seems to be running an old Tailscale version") - } - if !tsaddr.IsTailscaleIP(ip) { - return fmt.Errorf("unknown target; %v is not a Tailscale IP address", ip) + + // We found a peer. Decide whether we can send files to it: + isOffline = !foundPeer.Online + + switch foundPeer.TaildropTarget { + case ipnstate.TaildropTargetAvailable: + return foundPeer.ID, isOffline, nil + + case ipnstate.TaildropTargetNoNetmapAvailable: + return "", isOffline, errors.New("cannot send files: no netmap available on this node") + + case ipnstate.TaildropTargetIpnStateNotRunning: + return "", isOffline, errors.New("cannot send files: local Tailscale is not connected to the tailnet") + + case ipnstate.TaildropTargetMissingCap: + return "", isOffline, errors.New("cannot send files: missing required Taildrop capability") + + case ipnstate.TaildropTargetOffline: + return "", isOffline, errors.New("cannot send files: peer is offline") + + case ipnstate.TaildropTargetNoPeerInfo: + return "", isOffline, errors.New("cannot send files: invalid or unrecognized peer") + + case ipnstate.TaildropTargetUnsupportedOS: + return "", isOffline, errors.New("cannot send files: target's OS does not support Taildrop") + + case ipnstate.TaildropTargetNoPeerAPI: + return "", isOffline, errors.New("cannot send files: target is not advertising a file sharing API") + + case ipnstate.TaildropTargetOwnedByOtherUser: + return "", isOffline, errors.New("cannot send files: peer is owned by a different user") + + case ipnstate.TaildropTargetUnknown: + fallthrough + default: + return "", isOffline, fmt.Errorf("cannot send files: unknown or indeterminate reason") } - return errors.New("unknown target; not in your Tailnet") } const maxSniff = 4 << 20 diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e09061041..3cd8d3c99 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1256,6 +1256,7 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { SSH_HostKeys: p.Hostinfo().SSH_HostKeys().AsSlice(), Location: p.Hostinfo().Location().AsStruct(), Capabilities: p.Capabilities().AsSlice(), + TaildropTarget: b.taildropTargetStatus(p), } if cm := p.CapMap(); cm.Len() > 0 { ps.CapMap = make(tailcfg.NodeCapMap, cm.Len()) @@ -6522,6 +6523,41 @@ func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { return ret, nil } +func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.TaildropTargetStatus { + if b.netMap == nil || b.state != ipn.Running { + return ipnstate.TaildropTargetIpnStateNotRunning + } + if b.netMap == nil { + return ipnstate.TaildropTargetNoNetmapAvailable + } + if !b.capFileSharing { + return ipnstate.TaildropTargetMissingCap + } + + if !p.Online().Get() { + return ipnstate.TaildropTargetOffline + } + + if !p.Valid() { + return ipnstate.TaildropTargetNoPeerInfo + } + if b.netMap.User() != p.User() { + // Different user must have the explicit file sharing target capability + if p.Addresses().Len() == 0 || + !b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + return ipnstate.TaildropTargetOwnedByOtherUser + } + } + + if p.Hostinfo().OS() == "tvOS" { + return ipnstate.TaildropTargetUnsupportedOS + } + if peerAPIBase(b.netMap, p) == "" { + return ipnstate.TaildropTargetNoPeerAPI + } + return ipnstate.TaildropTargetAvailable +} + // peerIsTaildropTargetLocked reports whether p is a valid Taildrop file // recipient from this node according to its ownership and the capabilities in // the netmap. diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 5ab9b5bdf..bc1ba615d 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -270,6 +270,12 @@ type PeerStatus struct { // PeerAPIURL are the URLs of the node's PeerAPI servers. PeerAPIURL []string + // TaildropTargetStatus represents the node's eligibility to have files shared to it. + TaildropTarget TaildropTargetStatus + + // Reason why this peer cannot receive files. Empty if CanReceiveFiles=true + NoFileSharingReason string + // Capabilities are capabilities that the node has. // They're free-form strings, but should be in the form of URLs/URIs // such as: @@ -318,6 +324,21 @@ type PeerStatus struct { Location *tailcfg.Location `json:",omitempty"` } +type TaildropTargetStatus int + +const ( + TaildropTargetUnknown TaildropTargetStatus = iota + TaildropTargetAvailable + TaildropTargetNoNetmapAvailable + TaildropTargetIpnStateNotRunning + TaildropTargetMissingCap + TaildropTargetOffline + TaildropTargetNoPeerInfo + TaildropTargetUnsupportedOS + TaildropTargetNoPeerAPI + TaildropTargetOwnedByOtherUser +) + // HasCap reports whether ps has the given capability. func (ps *PeerStatus) HasCap(cap tailcfg.NodeCapability) bool { return ps.CapMap.Contains(cap) From 717fa68f3a1af5cef7a6ad63978161e5cc02ec72 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 29 Jan 2025 21:13:01 +0000 Subject: [PATCH 0504/1708] tailcfg: read max key duration from node cap map [capver 114] This will be used by clients to make better decisions on when to warn users about impending key expiry. Updates tailscale/corp#16016 Signed-off-by: James Sanderson --- tailcfg/tailcfg.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 35a35dd76..7038d8adc 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -158,7 +158,8 @@ type CapabilityVersion int // - 111: 2025-01-14: Client supports a peer having Node.HomeDERP (issue #14636) // - 112: 2025-01-14: Client interprets AllowedIPs of nil as meaning same as Addresses // - 113: 2025-01-20: Client communicates to control whether funnel is enabled by sending Hostinfo.IngressEnabled (#14688) -const CurrentCapabilityVersion CapabilityVersion = 113 +// - 114: 2025-01-30: NodeAttrMaxKeyDuration CapMap defined, clients might use it (no tailscaled code change) (#14829) +const CurrentCapabilityVersion CapabilityVersion = 114 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2430,6 +2431,12 @@ const ( // If multiple values of this key exist, they should be merged in sequence // (replace conflicting keys). NodeAttrServiceHost NodeCapability = "service-host" + + // NodeAttrMaxKeyDuration represents the MaxKeyDuration setting on the + // tailnet. The value of this key in [NodeCapMap] will be only one entry of + // type float64 representing the duration in seconds. This cap will be + // omitted if the tailnet's MaxKeyDuration is the default. + NodeAttrMaxKeyDuration NodeCapability = "tailnet.maxKeyDuration" ) // SetDNSRequest is a request to add a DNS record. From 45f29a208a41b4da213e6f9c2433f727ab46aee7 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Thu, 30 Jan 2025 14:20:27 +0000 Subject: [PATCH 0505/1708] control/controlclient,tailcfg:types: remove MaxKeyduration from NetMap This reverts most of 124dc10261ea (#10401). Removing in favour of adding this in CapMaps instead (#14829). Updates tailscale/corp#16016 Signed-off-by: James Sanderson --- control/controlclient/map.go | 5 ----- tailcfg/tailcfg.go | 4 ---- types/netmap/netmap.go | 3 --- types/netmap/nodemut.go | 3 +-- 4 files changed, 1 insertion(+), 14 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 9f0e706cd..df2182c8b 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -89,7 +89,6 @@ type mapSession struct { lastPopBrowserURL string lastTKAInfo *tailcfg.TKAInfo lastNetmapSummary string // from NetworkMap.VeryConcise - lastMaxExpiry time.Duration } // newMapSession returns a mostly unconfigured new mapSession. @@ -384,9 +383,6 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { if resp.TKAInfo != nil { ms.lastTKAInfo = resp.TKAInfo } - if resp.MaxKeyDuration > 0 { - ms.lastMaxExpiry = resp.MaxKeyDuration - } } var ( @@ -819,7 +815,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { DERPMap: ms.lastDERPMap, ControlHealth: ms.lastHealth, TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled, - MaxKeyDuration: ms.lastMaxExpiry, } if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 7038d8adc..4d30f6501 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2022,10 +2022,6 @@ type MapResponse struct { // auto-update setting doesn't change if the tailnet admin flips the // default after the node registered. DefaultAutoUpdate opt.Bool `json:",omitempty"` - - // MaxKeyDuration describes the MaxKeyDuration setting for the tailnet. - // If zero, the value is unchanged. - MaxKeyDuration time.Duration `json:",omitempty"` } // ClientVersion is information about the latest client version that's available diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 94db7a477..b1eecaa8f 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -79,9 +79,6 @@ type NetworkMap struct { // UserProfiles contains the profile information of UserIDs referenced // in SelfNode and Peers. UserProfiles map[tailcfg.UserID]tailcfg.UserProfileView - - // MaxKeyDuration describes the MaxKeyDuration setting for the tailnet. - MaxKeyDuration time.Duration } // User returns nm.SelfNode.User if nm.SelfNode is non-nil, otherwise it returns diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index 6f116059e..e31c731be 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -176,6 +176,5 @@ func mapResponseContainsNonPatchFields(res *tailcfg.MapResponse) bool { // function is called, so it should never be set anyway. But for // completedness, and for tests, check it too: res.PeersChanged != nil || - res.DefaultAutoUpdate != "" || - res.MaxKeyDuration > 0 + res.DefaultAutoUpdate != "" } From e11ff284435d3789136e583b82c4e416f6c7f925 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 14 Feb 2025 18:07:17 +0000 Subject: [PATCH 0506/1708] cmd/k8s-operator: allow to optionally configure an HTTP endpoint for the HA Ingress (#14986) Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 61 +++- cmd/k8s-operator/ingress-for-pg_test.go | 401 ++++++++++++++++++------ 2 files changed, 352 insertions(+), 110 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 5a67a891f..b07882deb 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -46,6 +46,9 @@ const ( FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group" + // annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as + // well as the default HTTPS endpoint). + annotationHTTPEndpoint = "tailscale.com/http-endpoint" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -202,16 +205,16 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin // 3. Ensure that the serve config for the ProxyGroup contains the VIPService cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) if err != nil { - return fmt.Errorf("error getting ingress serve config: %w", err) + return fmt.Errorf("error getting Ingress serve config: %w", err) } if cm == nil { - logger.Infof("no ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.") + logger.Infof("no Ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.") return nil } ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName)) handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger) if err != nil { - return fmt.Errorf("failed to get handlers for ingress: %w", err) + return fmt.Errorf("failed to get handlers for Ingress: %w", err) } ingCfg := &ipn.ServiceConfig{ TCP: map[uint16]*ipn.TCPPortHandler{ @@ -225,6 +228,19 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin }, }, } + + // Add HTTP endpoint if configured. + if isHTTPEndpointEnabled(ing) { + logger.Infof("exposing Ingress over HTTP") + epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName)) + ingCfg.TCP[80] = &ipn.TCPPortHandler{ + HTTP: true, + } + ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ + Handlers: handlers, + } + } + var gotCfg *ipn.ServiceConfig if cfg != nil && cfg.Services != nil { gotCfg = cfg.Services[serviceName] @@ -248,16 +264,23 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin tags = strings.Split(tstr, ",") } + vipPorts := []string{"443"} // always 443 for Ingress + if isHTTPEndpointEnabled(ing) { + vipPorts = append(vipPorts, "80") + } + vipSvc := &VIPService{ Name: serviceName, Tags: tags, - Ports: []string{"443"}, // always 443 for Ingress + Ports: vipPorts, Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID), } if existingVIPSvc != nil { vipSvc.Addrs = existingVIPSvc.Addrs } - if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) { + if existingVIPSvc == nil || + !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || + !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) { logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) if err := a.tsClient.createOrUpdateVIPService(ctx, vipSvc); err != nil { logger.Infof("error creating VIPService: %v", err) @@ -267,16 +290,22 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin // 5. Update Ingress status oldStatus := ing.Status.DeepCopy() - // TODO(irbekrm): once we have ingress ProxyGroup, we can determine if instances are ready to route traffic to the VIPService + ports := []networkingv1.IngressPortStatus{ + { + Protocol: "TCP", + Port: 443, + }, + } + if isHTTPEndpointEnabled(ing) { + ports = append(ports, networkingv1.IngressPortStatus{ + Protocol: "TCP", + Port: 80, + }) + } ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ { Hostname: dnsName, - Ports: []networkingv1.IngressPortStatus{ - { - Protocol: "TCP", - Port: 443, - }, - }, + Ports: ports, }, } if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) { @@ -569,3 +598,11 @@ func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name } return nil } + +// isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet. +func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { + if ing == nil { + return false + } + return ing.Annotations[annotationHTTPEndpoint] == "enabled" +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 9317a44d4..ee8a94336 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -8,6 +8,8 @@ package main import ( "context" "encoding/json" + "maps" + "reflect" "testing" "slices" @@ -18,87 +20,78 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tailcfg" "tailscale.com/types/ptr" ) func TestIngressPGReconciler(t *testing.T) { - tsIngressClass := &networkingv1.IngressClass{ - ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, - Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, - } + ingPGR, fc, ft := setupIngressTest(t) - // Pre-create the ProxyGroup - pg := &tsapi.ProxyGroup{ + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg", - Generation: 1, + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, }, - Spec: tsapi.ProxyGroupSpec{ - Type: tsapi.ProxyGroupTypeIngress, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + }, }, } + mustCreate(t, fc, ing) - // Pre-create the ConfigMap for the ProxyGroup - pgConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-ingress-config", - Namespace: "operator-ns", - }, - BinaryData: map[string][]byte{ - "serve-config.json": []byte(`{"Services":{}}`), - }, - } + // Verify initial reconciliation + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyServeConfig(t, fc, "svc:my-svc", false) + verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) - fc := fake.NewClientBuilder(). - WithScheme(tsapi.GlobalScheme). - WithObjects(pg, pgConfigMap, tsIngressClass). - WithStatusSubresource(pg). - Build() - mustUpdateStatus(t, fc, "", pg.Name, func(pg *tsapi.ProxyGroup) { - pg.Status.Conditions = []metav1.Condition{ - { - Type: string(tsapi.ProxyGroupReady), - Status: metav1.ConditionTrue, - ObservedGeneration: 1, - }, - } + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" }) - ft := &fakeTSClient{} - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} - zl, err := zap.NewDevelopment() + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify VIPService uses custom tags + vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc") if err != nil { - t.Fatal(err) + t.Fatalf("getting VIPService: %v", err) } - - lc := &fakeLocalClient{ - status: &ipnstate.Status{ - CurrentTailnet: &ipnstate.TailnetStatus{ - MagicDNSSuffix: "ts.net", - }, - }, + if vipSvc == nil { + t.Fatal("VIPService not created") } - ingPGR := &IngressPGReconciler{ - Client: fc, - tsClient: ft, - tsnetServer: fakeTsnetServer, - defaultTags: []string{"tag:k8s"}, - tsNamespace: "operator-ns", - logger: zl.Sugar(), - recorder: record.NewFakeRecorder(10), - lc: lc, + wantTags := []string{"tag:custom", "tag:test"} // custom tags only + gotTags := slices.Clone(vipSvc.Tags) + slices.Sort(gotTags) + slices.Sort(wantTags) + if !slices.Equal(gotTags, wantTags) { + t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags) } - // Test 1: Default tags - ing := &networkingv1.Ingress{ + // Create second Ingress + ing2 := &networkingv1.Ingress{ TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ - Name: "test-ingress", + Name: "my-other-ingress", Namespace: "default", - UID: types.UID("1234-UID"), + UID: types.UID("5678-UID"), Annotations: map[string]string{ "tailscale.com/proxy-group": "test-pg", }, @@ -114,16 +107,28 @@ func TestIngressPGReconciler(t *testing.T) { }, }, TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + {Hosts: []string{"my-other-svc.tailnetxyz.ts.net"}}, }, }, } - mustCreate(t, fc, ing) + mustCreate(t, fc, ing2) - // Verify initial reconciliation - expectReconciled(t, ingPGR, "default", "test-ingress") + // Verify second Ingress reconciliation + expectReconciled(t, ingPGR, "default", "my-other-ingress") + verifyServeConfig(t, fc, "svc:my-other-svc", false) + verifyVIPService(t, ft, "svc:my-other-svc", []string{"443"}) - // Get and verify the ConfigMap was updated + // Verify first Ingress is still working + verifyServeConfig(t, fc, "svc:my-svc", false) + verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + + // Delete second Ingress + if err := fc.Delete(context.Background(), ing2); err != nil { + t.Fatalf("deleting second Ingress: %v", err) + } + expectReconciled(t, ingPGR, "default", "my-other-ingress") + + // Verify second Ingress cleanup cm := &corev1.ConfigMap{} if err := fc.Get(context.Background(), types.NamespacedName{ Name: "test-pg-ingress-config", @@ -137,46 +142,16 @@ func TestIngressPGReconciler(t *testing.T) { t.Fatalf("unmarshaling serve config: %v", err) } + // Verify first Ingress is still configured if cfg.Services["svc:my-svc"] == nil { - t.Error("expected serve config to contain VIPService configuration") - } - - // Verify VIPService uses default tags - vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc") - if err != nil { - t.Fatalf("getting VIPService: %v", err) - } - if vipSvc == nil { - t.Fatal("VIPService not created") - } - wantTags := []string{"tag:k8s"} // default tags - if !slices.Equal(vipSvc.Tags, wantTags) { - t.Errorf("incorrect VIPService tags: got %v, want %v", vipSvc.Tags, wantTags) + t.Error("first Ingress service config was incorrectly removed") } - - // Test 2: Custom tags - mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { - ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" - }) - expectReconciled(t, ingPGR, "default", "test-ingress") - - // Verify VIPService uses custom tags - vipSvc, err = ft.getVIPService(context.Background(), "svc:my-svc") - if err != nil { - t.Fatalf("getting VIPService: %v", err) - } - if vipSvc == nil { - t.Fatal("VIPService not created") - } - wantTags = []string{"tag:custom", "tag:test"} // custom tags only - gotTags := slices.Clone(vipSvc.Tags) - slices.Sort(gotTags) - slices.Sort(wantTags) - if !slices.Equal(gotTags, wantTags) { - t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags) + // Verify second Ingress was cleaned up + if cfg.Services["svc:my-other-svc"] != nil { + t.Error("second Ingress service config was not cleaned up") } - // Delete the Ingress and verify cleanup + // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) } @@ -335,3 +310,233 @@ func TestValidateIngress(t *testing.T) { }) } } + +func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + // Create test Ingress with HTTP endpoint enabled + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/http-endpoint": "enabled", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + if err := fc.Create(context.Background(), ing); err != nil { + t.Fatal(err) + } + + // Verify initial reconciliation with HTTP enabled + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyVIPService(t, ft, "svc:my-svc", []string{"80", "443"}) + verifyServeConfig(t, fc, "svc:my-svc", true) + + // Verify Ingress status + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + wantStatus := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } + + // Remove HTTP endpoint annotation + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + delete(ing.Annotations, "tailscale.com/http-endpoint") + }) + + // Verify reconciliation after removing HTTP + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyServeConfig(t, fc, "svc:my-svc", false) + + // Verify Ingress status + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + wantStatus = []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } +} + +func verifyVIPService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { + t.Helper() + vipSvc, err := ft.getVIPService(context.Background(), tailcfg.ServiceName(serviceName)) + if err != nil { + t.Fatalf("getting VIPService %q: %v", serviceName, err) + } + if vipSvc == nil { + t.Fatalf("VIPService %q not created", serviceName) + } + gotPorts := slices.Clone(vipSvc.Ports) + slices.Sort(gotPorts) + slices.Sort(wantPorts) + if !slices.Equal(gotPorts, wantPorts) { + t.Errorf("incorrect ports for VIPService %q: got %v, want %v", serviceName, gotPorts, wantPorts) + } +} + +func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantHTTP bool) { + t.Helper() + + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + cfg := &ipn.ServeConfig{} + if err := json.Unmarshal(cm.BinaryData["serve-config.json"], cfg); err != nil { + t.Fatalf("unmarshaling serve config: %v", err) + } + + t.Logf("Looking for service %q in config: %+v", serviceName, cfg) + + svc := cfg.Services[tailcfg.ServiceName(serviceName)] + if svc == nil { + t.Fatalf("service %q not found in serve config, services: %+v", serviceName, maps.Keys(cfg.Services)) + } + + wantHandlers := 1 + if wantHTTP { + wantHandlers = 2 + } + + // Check TCP handlers + if len(svc.TCP) != wantHandlers { + t.Errorf("incorrect number of TCP handlers for service %q: got %d, want %d", serviceName, len(svc.TCP), wantHandlers) + } + if wantHTTP { + if h, ok := svc.TCP[uint16(80)]; !ok { + t.Errorf("HTTP (port 80) handler not found for service %q", serviceName) + } else if !h.HTTP { + t.Errorf("HTTP not enabled for port 80 handler for service %q", serviceName) + } + } + if h, ok := svc.TCP[uint16(443)]; !ok { + t.Errorf("HTTPS (port 443) handler not found for service %q", serviceName) + } else if !h.HTTPS { + t.Errorf("HTTPS not enabled for port 443 handler for service %q", serviceName) + } + + // Check Web handlers + if len(svc.Web) != wantHandlers { + t.Errorf("incorrect number of Web handlers for service %q: got %d, want %d", serviceName, len(svc.Web), wantHandlers) + } +} + +func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeTSClient) { + t.Helper() + + tsIngressClass := &networkingv1.IngressClass{ + ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, + Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, + } + + // Pre-create the ProxyGroup + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + }, + } + + // Pre-create the ConfigMap for the ProxyGroup + pgConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, + BinaryData: map[string][]byte{ + "serve-config.json": []byte(`{"Services":{}}`), + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pgConfigMap, tsIngressClass). + WithStatusSubresource(pg). + Build() + + // Set ProxyGroup status to ready + pg.Status.Conditions = []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupReady), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + } + if err := fc.Status().Update(context.Background(), pg); err != nil { + t.Fatal(err) + } + + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + lc := &fakeLocalClient{ + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{ + MagicDNSSuffix: "ts.net", + }, + }, + } + + ingPGR := &IngressPGReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + tsNamespace: "operator-ns", + logger: zl.Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + } + + return ingPGR, fc, ft +} From 6df5c8f32e63170fa8ccd2c32ac26cf19906b79a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 13 Feb 2025 16:46:24 -0600 Subject: [PATCH 0507/1708] various: keep tailscale connected when Always On mode is enabled on Windows In this PR, we enable the registration of LocalBackend extensions to exclude code specific to certain platforms or environments. We then introduce desktopSessionsExt, which is included only in Windows builds and only if the ts_omit_desktop_sessions tag is disabled for the build. This extension tracks desktop sessions and switches to (or remains on) the appropriate profile when a user signs in or out, locks their screen, or disconnects a remote session. As desktopSessionsExt requires an ipn/desktop.SessionManager, we register it with tsd.System for the tailscaled subprocess on Windows. We also fix a bug in the sessionWatcher implementation where it attempts to close a nil channel on stop. Updates #14823 Updates tailscale/corp#26247 Signed-off-by: Nick Khyl --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/tailscaled_windows.go | 8 + ipn/desktop/sessions_windows.go | 6 +- ipn/ipnlocal/desktop_sessions.go | 178 ++++++++++++++++++ ipn/ipnlocal/local.go | 125 +++++++++++- ipn/ipnserver/actor.go | 1 + tsd/tsd.go | 4 + .../tailscaled_deps_test_windows.go | 1 + 9 files changed, 313 insertions(+), 12 deletions(-) create mode 100644 ipn/ipnlocal/desktop_sessions.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b9082f966..2e96f03d0 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -814,6 +814,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/ipn/desktop from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3eaa12d16..594ebeb17 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -272,6 +272,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + 💣 tailscale.com/ipn/desktop from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 7208e03da..3574fb5f4 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -44,6 +44,7 @@ import ( "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" "tailscale.com/logtail/backoff" "tailscale.com/net/dns" @@ -335,6 +336,13 @@ func beWindowsSubprocess() bool { sys.Set(driveimpl.NewFileSystemForRemote(log.Printf)) + if sessionManager, err := desktop.NewSessionManager(log.Printf); err == nil { + sys.Set(sessionManager) + } else { + // Errors creating the session manager are unexpected, but not fatal. + log.Printf("[unexpected]: error creating a desktop session manager: %v", err) + } + publicLogID, _ := logid.ParsePublicID(logID) err = startIPNServer(ctx, log.Printf, publicLogID, sys) if err != nil { diff --git a/ipn/desktop/sessions_windows.go b/ipn/desktop/sessions_windows.go index f1b88d573..b26172d77 100644 --- a/ipn/desktop/sessions_windows.go +++ b/ipn/desktop/sessions_windows.go @@ -359,7 +359,7 @@ func (sw *sessionWatcher) Start() error { sw.doneCh = make(chan error, 1) startedCh := make(chan error, 1) - go sw.run(startedCh) + go sw.run(startedCh, sw.doneCh) if err := <-startedCh; err != nil { return err } @@ -372,11 +372,11 @@ func (sw *sessionWatcher) Start() error { return nil } -func (sw *sessionWatcher) run(started chan<- error) { +func (sw *sessionWatcher) run(started, done chan<- error) { runtime.LockOSThread() defer func() { runtime.UnlockOSThread() - close(sw.doneCh) + close(done) }() err := sw.createMessageWindow() started <- err diff --git a/ipn/ipnlocal/desktop_sessions.go b/ipn/ipnlocal/desktop_sessions.go new file mode 100644 index 000000000..23307f667 --- /dev/null +++ b/ipn/ipnlocal/desktop_sessions.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Both the desktop session manager and multi-user support +// are currently available only on Windows. +// This file does not need to be built for other platforms. + +//go:build windows && !ts_omit_desktop_sessions + +package ipnlocal + +import ( + "cmp" + "errors" + "fmt" + "sync" + + "tailscale.com/feature" + "tailscale.com/ipn" + "tailscale.com/ipn/desktop" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy" +) + +func init() { + feature.Register("desktop-sessions") + RegisterExtension("desktop-sessions", newDesktopSessionsExt) +} + +// desktopSessionsExt implements [localBackendExtension]. +var _ localBackendExtension = (*desktopSessionsExt)(nil) + +// desktopSessionsExt extends [LocalBackend] with desktop session management. +// It keeps Tailscale running in the background if Always-On mode is enabled, +// and switches to an appropriate profile when a user signs in or out, +// locks their screen, or disconnects a remote session. +type desktopSessionsExt struct { + logf logger.Logf + sm desktop.SessionManager + + *LocalBackend // or nil, until Init is called + cleanup []func() // cleanup functions to call on shutdown + + // mu protects all following fields. + // When both mu and [LocalBackend.mu] need to be taken, + // [LocalBackend.mu] must be taken before mu. + mu sync.Mutex + id2sess map[desktop.SessionID]*desktop.Session +} + +// newDesktopSessionsExt returns a new [desktopSessionsExt], +// or an error if [desktop.SessionManager] is not available. +func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (localBackendExtension, error) { + sm, ok := sys.SessionManager.GetOK() + if !ok { + return nil, errors.New("session manager is not available") + } + return &desktopSessionsExt{logf: logf, sm: sm, id2sess: make(map[desktop.SessionID]*desktop.Session)}, nil +} + +// Init implements [localBackendExtension]. +func (e *desktopSessionsExt) Init(lb *LocalBackend) (err error) { + e.LocalBackend = lb + unregisterResolver := lb.RegisterBackgroundProfileResolver(e.getBackgroundProfile) + unregisterSessionCb, err := e.sm.RegisterStateCallback(e.updateDesktopSessionState) + if err != nil { + unregisterResolver() + return fmt.Errorf("session callback registration failed: %w", err) + } + e.cleanup = []func(){unregisterResolver, unregisterSessionCb} + return nil +} + +// updateDesktopSessionState is a [desktop.SessionStateCallback] +// invoked by [desktop.SessionManager] once for each existing session +// and whenever the session state changes. It updates the session map +// and switches to the best profile if necessary. +func (e *desktopSessionsExt) updateDesktopSessionState(session *desktop.Session) { + e.mu.Lock() + if session.Status != desktop.ClosedSession { + e.id2sess[session.ID] = session + } else { + delete(e.id2sess, session.ID) + } + e.mu.Unlock() + + var action string + switch session.Status { + case desktop.ForegroundSession: + // The user has either signed in or unlocked their session. + // For remote sessions, this may also mean the user has connected. + // The distinction isn't important for our purposes, + // so let's always say "signed in". + action = "signed in to" + case desktop.BackgroundSession: + action = "locked" + case desktop.ClosedSession: + action = "signed out from" + default: + panic("unreachable") + } + maybeUsername, _ := session.User.Username() + userIdentifier := cmp.Or(maybeUsername, string(session.User.UserID()), "user") + reason := fmt.Sprintf("%s %s session %v", userIdentifier, action, session.ID) + + e.SwitchToBestProfile(reason) +} + +// getBackgroundProfile is a [profileResolver] that works as follows: +// +// If Always-On mode is disabled, it returns no profile ("","",false). +// +// If AlwaysOn mode is enabled, it returns the current profile unless: +// - The current user has signed out. +// - Another user has a foreground (i.e. active/unlocked) session. +// +// If the current user's session runs in the background and no other user +// has a foreground session, it returns the current profile. This applies +// when a locally signed-in user locks their screen or when a remote user +// disconnects without signing out. +// +// In all other cases, it returns no profile ("","",false). +// +// It is called with [LocalBackend.mu] locked. +func (e *desktopSessionsExt) getBackgroundProfile() (_ ipn.WindowsUserID, _ ipn.ProfileID, ok bool) { + e.mu.Lock() + defer e.mu.Unlock() + + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + return "", "", false + } + + isCurrentUserSingedIn := false + var foregroundUIDs []ipn.WindowsUserID + for _, s := range e.id2sess { + switch uid := s.User.UserID(); uid { + case e.pm.CurrentUserID(): + isCurrentUserSingedIn = true + if s.Status == desktop.ForegroundSession { + // Keep the current profile if the user has a foreground session. + return e.pm.CurrentUserID(), e.pm.CurrentProfile().ID(), true + } + default: + if s.Status == desktop.ForegroundSession { + foregroundUIDs = append(foregroundUIDs, uid) + } + } + } + + // If there's no current user (e.g., tailscaled just started), or if the current + // user has no foreground session, switch to the default profile of the first user + // with a foreground session, if any. + for _, uid := range foregroundUIDs { + if profileID := e.pm.DefaultUserProfileID(uid); profileID != "" { + return uid, profileID, true + } + } + + // If no user has a foreground session but the current user is still signed in, + // keep the current profile even if the session is not in the foreground, + // such as when the screen is locked or a remote session is disconnected. + if len(foregroundUIDs) == 0 && isCurrentUserSingedIn { + return e.pm.CurrentUserID(), e.pm.CurrentProfile().ID(), true + } + + return "", "", false +} + +// Shutdown implements [localBackendExtension]. +func (e *desktopSessionsExt) Shutdown() error { + for _, f := range e.cleanup { + f() + } + e.cleanup = nil + e.LocalBackend = nil + return nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3cd8d3c99..8b30484d6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -168,6 +168,49 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } +// localBackendExtension extends [LocalBackend] with additional functionality. +type localBackendExtension interface { + // Init is called to initialize the extension when the [LocalBackend] is created + // and before it starts running. If the extension cannot be initialized, + // it must return an error, and the Shutdown method will not be called. + // Any returned errors are not fatal; they are used for logging. + // TODO(nickkhyl): should we allow returning a fatal error? + Init(*LocalBackend) error + + // Shutdown is called when the [LocalBackend] is shutting down, + // if the extension was initialized. Any returned errors are not fatal; + // they are used for logging. + Shutdown() error +} + +// newLocalBackendExtension is a function that instantiates a [localBackendExtension]. +type newLocalBackendExtension func(logger.Logf, *tsd.System) (localBackendExtension, error) + +// registeredExtensions is a map of registered local backend extensions, +// where the key is the name of the extension and the value is the function +// that instantiates the extension. +var registeredExtensions map[string]newLocalBackendExtension + +// RegisterExtension registers a function that creates a [localBackendExtension]. +// It panics if newExt is nil or if an extension with the same name has already been registered. +func RegisterExtension(name string, newExt newLocalBackendExtension) { + if newExt == nil { + panic(fmt.Sprintf("lb: newExt is nil: %q", name)) + } + if _, ok := registeredExtensions[name]; ok { + panic(fmt.Sprintf("lb: duplicate extensions: %q", name)) + } + mak.Set(®isteredExtensions, name, newExt) +} + +// profileResolver is any function that returns user and profile IDs +// along with a flag indicating whether it succeeded. Since an empty +// profile ID ("") represents an empty profile, the ok return parameter +// distinguishes between an empty profile and no profile. +// +// It is called with [LocalBackend.mu] held. +type profileResolver func() (_ ipn.WindowsUserID, _ ipn.ProfileID, ok bool) + // LocalBackend is the glue between the major pieces of the Tailscale // network software: the cloud control plane (via controlclient), the // network data plane (via wgengine), and the user-facing UIs and CLIs @@ -302,8 +345,12 @@ type LocalBackend struct { directFileRoot string componentLogUntil map[string]componentLogState // c2nUpdateStatus is the status of c2n-triggered client update. - c2nUpdateStatus updateStatus - currentUser ipnauth.Actor + c2nUpdateStatus updateStatus + currentUser ipnauth.Actor + + // backgroundProfileResolvers are optional background profile resolvers. + backgroundProfileResolvers set.HandleSet[profileResolver] + selfUpdateProgress []ipnstate.UpdateProgress lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients @@ -394,6 +441,11 @@ type LocalBackend struct { // and the user has disconnected with a reason. // See tailscale/corp#26146. overrideAlwaysOn bool + + // shutdownCbs are the callbacks to be called when the backend is shutting down. + // Each callback is called exactly once in unspecified order and without b.mu held. + // Returned errors are logged but otherwise ignored and do not affect the shutdown process. + shutdownCbs set.HandleSet[func() error] } // HealthTracker returns the health tracker for the backend. @@ -575,6 +627,19 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } + for name, newFn := range registeredExtensions { + ext, err := newFn(logf, sys) + if err != nil { + b.logf("lb: failed to create %q extension: %v", name, err) + continue + } + if err := ext.Init(b); err != nil { + b.logf("lb: failed to initialize %q extension: %v", name, err) + continue + } + b.shutdownCbs.Add(ext.Shutdown) + } + return b, nil } @@ -1033,9 +1098,17 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } + shutdownCbs := slices.Collect(maps.Values(b.shutdownCbs)) + b.shutdownCbs = nil b.mu.Unlock() b.webClientShutdown() + for _, cb := range shutdownCbs { + if err := cb(); err != nil { + b.logf("shutdown callback failed: %v", err) + } + } + if b.sockstatLogger != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -3826,13 +3899,18 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { b.switchToBestProfileLockedOnEntry(reason, unlock) } -// switchToBestProfileLockedOnEntry selects the best profile to use, +// SwitchToBestProfile selects the best profile to use, // as reported by [LocalBackend.resolveBestProfileLocked], and switches // to it, unless it's already the current profile. The reason indicates // why the profile is being switched, such as due to a client connecting -// or disconnecting and is used for logging. -// -// b.mu must held on entry. It is released on exit. +// or disconnecting, or a change in the desktop session state, and is used +// for logging. +func (b *LocalBackend) SwitchToBestProfile(reason string) { + b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) +} + +// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], +// but b.mu must held on entry. It is released on exit. func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { defer unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() @@ -3867,8 +3945,9 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un } // resolveBestProfileLocked returns the best profile to use based on the current -// state of the backend, such as whether a GUI/CLI client is connected and whether -// the unattended mode is enabled. +// state of the backend, such as whether a GUI/CLI client is connected, whether +// the unattended mode is enabled, the current state of the desktop sessions, +// and other factors. // // It returns the user ID, profile ID, and whether the returned profile is // considered a background profile. A background profile is used when no OS user @@ -3897,7 +3976,8 @@ func (b *LocalBackend) resolveBestProfileLocked() (userID ipn.WindowsUserID, pro } // Otherwise, if on Windows, use the background profile if one is set. - // This includes staying on the current profile if Unattended Mode is enabled. + // This includes staying on the current profile if Unattended Mode is enabled + // or if AlwaysOn mode is enabled and the current user is still signed in. // If the returned background profileID is "", Tailscale will disconnect // and remain idle until a GUI or CLI client connects. if goos := envknob.GOOS(); goos == "windows" { @@ -3914,14 +3994,41 @@ func (b *LocalBackend) resolveBestProfileLocked() (userID ipn.WindowsUserID, pro return b.pm.CurrentUserID(), b.pm.CurrentProfile().ID(), false } +// RegisterBackgroundProfileResolver registers a function to be used when +// resolving the background profile, until the returned unregister function is called. +func (b *LocalBackend) RegisterBackgroundProfileResolver(resolver profileResolver) (unregister func()) { + // TODO(nickkhyl): should we allow specifying some kind of priority/altitude for the resolver? + b.mu.Lock() + defer b.mu.Unlock() + handle := b.backgroundProfileResolvers.Add(resolver) + return func() { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.backgroundProfileResolvers, handle) + } +} + // getBackgroundProfileLocked returns the user and profile ID to use when no GUI/CLI // client is connected, or "","" if Tailscale should not run in the background. // As of 2025-02-07, it is only used on Windows. func (b *LocalBackend) getBackgroundProfileLocked() (ipn.WindowsUserID, ipn.ProfileID) { + // TODO(nickkhyl): check if the returned profile is allowed on the device, + // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // See tailscale/corp#26249. + // If Unattended Mode is enabled for the current profile, keep using it. if b.pm.CurrentPrefs().ForceDaemon() { return b.pm.CurrentProfile().LocalUserID(), b.pm.CurrentProfile().ID() } + + // Otherwise, attempt to resolve the background profile using the background + // profile resolvers available on the current platform. + for _, resolver := range b.backgroundProfileResolvers { + if uid, profileID, ok := resolver(); ok { + return uid, profileID + } + } + // Otherwise, switch to an empty profile and disconnect Tailscale // until a GUI or CLI client connects. return "", "" diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index b0245b0a8..9c203fc5f 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -81,6 +81,7 @@ func actorWithAccessOverride(baseActor *actor, reason string) *actor { logf: baseActor.logf, ci: baseActor.ci, clientID: baseActor.clientID, + userID: baseActor.userID, accessOverrideReason: reason, isLocalSystem: baseActor.isLocalSystem, } diff --git a/tsd/tsd.go b/tsd/tsd.go index acd09560c..1d1f35017 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -26,6 +26,7 @@ import ( "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/conffile" + "tailscale.com/ipn/desktop" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -52,6 +53,7 @@ type System struct { Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] + SessionManager SubSystem[desktop.SessionManager] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -110,6 +112,8 @@ func (s *System) Set(v any) { s.DriveForLocal.Set(v) case drive.FileSystemForRemote: s.DriveForRemote.Set(v) + case desktop.SessionManager: + s.SessionManager.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 6ea475e64..a6df2f9ff 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -29,6 +29,7 @@ import ( _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" _ "tailscale.com/ipn/conffile" + _ "tailscale.com/ipn/desktop" _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" From 606f7ef2c6823de8047f742fa28f1961b199e382 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 14 Feb 2025 17:15:20 -0800 Subject: [PATCH 0508/1708] net/netcheck: remove unnecessary custom map clone function Updates #8419 Updates #cleanup Signed-off-by: James Tucker --- net/netcheck/netcheck.go | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index c32eeee8b..107573e5d 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -172,25 +172,14 @@ func (r *Report) Clone() *Report { return nil } r2 := *r - r2.RegionLatency = cloneDurationMap(r2.RegionLatency) - r2.RegionV4Latency = cloneDurationMap(r2.RegionV4Latency) - r2.RegionV6Latency = cloneDurationMap(r2.RegionV6Latency) + r2.RegionLatency = maps.Clone(r2.RegionLatency) + r2.RegionV4Latency = maps.Clone(r2.RegionV4Latency) + r2.RegionV6Latency = maps.Clone(r2.RegionV6Latency) r2.GlobalV4Counters = maps.Clone(r2.GlobalV4Counters) r2.GlobalV6Counters = maps.Clone(r2.GlobalV6Counters) return &r2 } -func cloneDurationMap(m map[int]time.Duration) map[int]time.Duration { - if m == nil { - return nil - } - m2 := make(map[int]time.Duration, len(m)) - for k, v := range m { - m2[k] = v - } - return m2 -} - // Client generates Reports describing the result of both passive and active // network configuration probing. It provides two different modes of report, a // full report (see MakeNextReportFull) and a more lightweight incremental From b21eec7621bd0a20809684adba9b3c42245424d2 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sun, 16 Feb 2025 09:38:02 +0000 Subject: [PATCH 0509/1708] ipn/ipnlocal,tailcfg: don't send WireIngress if IngressEnabled already true (#14960) Hostinfo.WireIngress is used as a hint that the node intends to use funnel. We now send another field, IngressEnabled, in cases where funnel is explicitly enabled, and the logic control-side has been changed to look at IngressEnabled as well as WireIngress in all cases where previously the hint was used - so we can now stop sending WireIngress when IngressEnabled is true to save some bandwidth. Updates tailscale/tailscale#11572 Updates tailscale/corp#25931 Signed-off-by: Irbe Krumina --- ipn/ipnlocal/local.go | 38 +++++++++++++++++++++----------------- ipn/ipnlocal/local_test.go | 8 +++----- tailcfg/tailcfg.go | 25 ++++++++++++++++--------- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8b30484d6..43d82c900 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4372,6 +4372,12 @@ func (b *LocalBackend) hasIngressEnabledLocked() bool { return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() } +// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it +// seems that it is intended to be used with funnel. +func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { + return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() +} + // setPrefsLockedOnEntry requires b.mu be held to call it, but it // unlocks b.mu when done. newp ownership passes to this function. // It returns a read-only copy of the new prefs. @@ -5479,18 +5485,18 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) - // The Hostinfo.WantIngress field tells control whether this node wants to - // be wired up for ingress connections. If harmless if it's accidentally - // true; the actual policy is controlled in tailscaled by ServeConfig. But - // if this is accidentally false, then control may not configure DNS - // properly. This exists as an optimization to control to program fewer DNS - // records that have ingress enabled but are not actually being used. - // TODO(irbekrm): once control knows that if hostinfo.IngressEnabled is true, - // then wireIngress can be considered true, don't send wireIngress in that case. - hi.WireIngress = b.wantIngressLocked() // The Hostinfo.IngressEnabled field is used to communicate to control whether - // the funnel is actually enabled. + // the node has funnel enabled. hi.IngressEnabled = b.hasIngressEnabledLocked() + // The Hostinfo.WantIngress field tells control whether the user intends + // to use funnel with this node even though it is not currently enabled. + // This is an optimization to control- Funnel requires creation of DNS + // records and because DNS propagation can take time, we want to ensure + // that the records exist for any node that intends to use funnel even + // if it's not enabled. If hi.IngressEnabled is true, control knows that + // DNS records are needed, so we can save bandwidth and not send + // WireIngress. + hi.WireIngress = b.shouldWireInactiveIngressLocked() hi.AppConnector.Set(prefs.AppConnector().Advertise) } @@ -6404,8 +6410,6 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. // updateIngressLocked updates the hostinfo.WireIngress and hostinfo.IngressEnabled fields and kicks off a Hostinfo // update if the values have changed. -// TODO(irbekrm): once control knows that if hostinfo.IngressEnabled is true, then wireIngress can be considered true, -// we can stop sending hostinfo.WireIngress in that case. // // b.mu must be held. func (b *LocalBackend) updateIngressLocked() { @@ -6413,16 +6417,16 @@ func (b *LocalBackend) updateIngressLocked() { return } hostInfoChanged := false - if wire := b.wantIngressLocked(); b.hostinfo.WireIngress != wire { - b.logf("Hostinfo.WireIngress changed to %v", wire) - b.hostinfo.WireIngress = wire - hostInfoChanged = true - } if ie := b.hasIngressEnabledLocked(); b.hostinfo.IngressEnabled != ie { b.logf("Hostinfo.IngressEnabled changed to %v", ie) b.hostinfo.IngressEnabled = ie hostInfoChanged = true } + if wire := b.shouldWireInactiveIngressLocked(); b.hostinfo.WireIngress != wire { + b.logf("Hostinfo.WireIngress changed to %v", wire) + b.hostinfo.WireIngress = wire + hostInfoChanged = true + } // Kick off a Hostinfo update to control if ingress status has changed. if hostInfoChanged { b.goTracker.Go(b.doSetHostinfoFilterServices) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f0c712777..35977e679 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5084,7 +5084,7 @@ func TestUpdateIngressLocked(t *testing.T) { }, }, wantIngress: true, - wantWireIngress: true, + wantWireIngress: false, // implied by wantIngress wantControlUpdate: true, }, { @@ -5111,7 +5111,6 @@ func TestUpdateIngressLocked(t *testing.T) { name: "funnel_enabled_no_change", hi: &tailcfg.Hostinfo{ IngressEnabled: true, - WireIngress: true, }, sc: &ipn.ServeConfig{ AllowFunnel: map[ipn.HostPort]bool{ @@ -5119,7 +5118,7 @@ func TestUpdateIngressLocked(t *testing.T) { }, }, wantIngress: true, - wantWireIngress: true, + wantWireIngress: false, // implied by wantIngress }, { name: "funnel_disabled_no_change", @@ -5137,7 +5136,6 @@ func TestUpdateIngressLocked(t *testing.T) { name: "funnel_changes_to_disabled", hi: &tailcfg.Hostinfo{ IngressEnabled: true, - WireIngress: true, }, sc: &ipn.ServeConfig{ AllowFunnel: map[ipn.HostPort]bool{ @@ -5157,8 +5155,8 @@ func TestUpdateIngressLocked(t *testing.T) { "tailnet.xyz:443": true, }, }, - wantWireIngress: true, wantIngress: true, + wantWireIngress: false, // implied by wantIngress wantControlUpdate: true, }, } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4d30f6501..f82c6eb81 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -835,15 +835,22 @@ type Hostinfo struct { // App is used to disambiguate Tailscale clients that run using tsnet. App string `json:",omitempty"` // "k8s-operator", "golinks", ... - Desktop opt.Bool `json:",omitempty"` // if a desktop was detected on Linux - Package string `json:",omitempty"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) - DeviceModel string `json:",omitempty"` // mobile phone model ("Pixel 3a", "iPhone12,3") - PushDeviceToken string `json:",omitempty"` // macOS/iOS APNs device token for notifications (and Android in the future) - Hostname string `json:",omitempty"` // name of the host the client runs on - ShieldsUp bool `json:",omitempty"` // indicates whether the host is blocking incoming connections - ShareeNode bool `json:",omitempty"` // indicates this node exists in netmap because it's owned by a shared-to user - NoLogsNoSupport bool `json:",omitempty"` // indicates that the user has opted out of sending logs and support - WireIngress bool `json:",omitempty"` // indicates that the node wants the option to receive ingress connections + Desktop opt.Bool `json:",omitempty"` // if a desktop was detected on Linux + Package string `json:",omitempty"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) + DeviceModel string `json:",omitempty"` // mobile phone model ("Pixel 3a", "iPhone12,3") + PushDeviceToken string `json:",omitempty"` // macOS/iOS APNs device token for notifications (and Android in the future) + Hostname string `json:",omitempty"` // name of the host the client runs on + ShieldsUp bool `json:",omitempty"` // indicates whether the host is blocking incoming connections + ShareeNode bool `json:",omitempty"` // indicates this node exists in netmap because it's owned by a shared-to user + NoLogsNoSupport bool `json:",omitempty"` // indicates that the user has opted out of sending logs and support + // WireIngress indicates that the node would like to be wired up server-side + // (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently + // enabled. For example, the user might only use it for intermittent + // foreground CLI serve sessions, for which they'd like it to work right + // away, even if it's disabled most of the time. As an optimization, this is + // only sent if IngressEnabled is false, as IngressEnabled implies that this + // option is true. + WireIngress bool `json:",omitempty"` IngressEnabled bool `json:",omitempty"` // if the node has any funnel endpoint enabled AllowsUpdate bool `json:",omitempty"` // indicates that the node has opted-in to admin-console-drive remote updates Machine string `json:",omitempty"` // the current host's machine type (uname -m) From cbf3852b5d3081ec44c924b0045fc9476ea7aea1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Feb 2025 08:58:38 -0800 Subject: [PATCH 0510/1708] cmd/testwrapper: temporarily remove test coverage support testwrapper doesn't work with Go 1.24 and the coverage support is making it harder to debug. Updates #15015 Updates tailscale/corp#26659 Change-Id: I0125e881d08c92f1ecef88b57344f6bbb571b569 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 8 +- cmd/testwrapper/testwrapper.go | 149 +-------------------------------- go.mod | 4 - go.sum | 8 -- 4 files changed, 2 insertions(+), 167 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a368afc67..7142c86b9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -64,7 +64,6 @@ jobs: matrix: include: - goarch: amd64 - coverflags: "-coverprofile=/tmp/coverage.out" - goarch: amd64 buildflags: "-race" shard: '1/3' @@ -119,15 +118,10 @@ jobs: - name: build test wrapper run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: test all - run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ${{matrix.coverflags}} ./... ${{matrix.buildflags}} + run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} env: GOARCH: ${{ matrix.goarch }} TS_TEST_SHARD: ${{ matrix.shard }} - - name: Publish to coveralls.io - if: matrix.coverflags != '' # only publish results if we've tracked coverage - uses: shogo82148/actions-goveralls@v1 - with: - path-to-profile: /tmp/coverage.out - name: bench all run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done) env: diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 91aea904e..67b8a1483 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -22,13 +22,7 @@ import ( "sort" "strings" "time" - "unicode" - "github.com/dave/courtney/scanner" - "github.com/dave/courtney/shared" - "github.com/dave/courtney/tester" - "github.com/dave/patsy" - "github.com/dave/patsy/vos" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/util/slicesx" ) @@ -238,30 +232,6 @@ func main() { fmt.Printf("%s\t%s\t%.3fs\n", outcome, pkg, runtime.Seconds()) } - // Check for -coverprofile argument and filter it out - combinedCoverageFilename := "" - filteredGoTestArgs := make([]string, 0, len(goTestArgs)) - preceededByCoverProfile := false - for _, arg := range goTestArgs { - if arg == "-coverprofile" { - preceededByCoverProfile = true - } else if preceededByCoverProfile { - combinedCoverageFilename = strings.TrimSpace(arg) - preceededByCoverProfile = false - } else { - filteredGoTestArgs = append(filteredGoTestArgs, arg) - } - } - goTestArgs = filteredGoTestArgs - - runningWithCoverage := combinedCoverageFilename != "" - if runningWithCoverage { - fmt.Printf("Will log coverage to %v\n", combinedCoverageFilename) - } - - // Keep track of all test coverage files. With each retry, we'll end up - // with additional coverage files that will be combined when we finish. - coverageFiles := make([]string, 0) for len(toRun) > 0 { var thisRun *nextRun thisRun, toRun = toRun[0], toRun[1:] @@ -275,27 +245,13 @@ func main() { fmt.Printf("\n\nAttempt #%d: Retrying flaky tests:\n\nflakytest failures JSON: %s\n\n", thisRun.attempt, j) } - goTestArgsWithCoverage := testArgs - if runningWithCoverage { - coverageFile := fmt.Sprintf("/tmp/coverage_%d.out", thisRun.attempt) - coverageFiles = append(coverageFiles, coverageFile) - goTestArgsWithCoverage = make([]string, len(goTestArgs), len(goTestArgs)+2) - copy(goTestArgsWithCoverage, goTestArgs) - goTestArgsWithCoverage = append( - goTestArgsWithCoverage, - fmt.Sprintf("-coverprofile=%v", coverageFile), - "-covermode=set", - "-coverpkg=./...", - ) - } - toRetry := make(map[string][]*testAttempt) // pkg -> tests to retry for _, pt := range thisRun.tests { ch := make(chan *testAttempt) runErr := make(chan error, 1) go func() { defer close(runErr) - runErr <- runTests(ctx, thisRun.attempt, pt, goTestArgsWithCoverage, testArgs, ch) + runErr <- runTests(ctx, thisRun.attempt, pt, goTestArgs, testArgs, ch) }() var failed bool @@ -372,107 +328,4 @@ func main() { } toRun = append(toRun, nextRun) } - - if runningWithCoverage { - intermediateCoverageFilename := "/tmp/coverage.out_intermediate" - if err := combineCoverageFiles(intermediateCoverageFilename, coverageFiles); err != nil { - fmt.Printf("error combining coverage files: %v\n", err) - os.Exit(2) - } - - if err := processCoverageWithCourtney(intermediateCoverageFilename, combinedCoverageFilename, testArgs); err != nil { - fmt.Printf("error processing coverage with courtney: %v\n", err) - os.Exit(3) - } - - fmt.Printf("Wrote combined coverage to %v\n", combinedCoverageFilename) - } -} - -func combineCoverageFiles(intermediateCoverageFilename string, coverageFiles []string) error { - combinedCoverageFile, err := os.OpenFile(intermediateCoverageFilename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return fmt.Errorf("create /tmp/coverage.out: %w", err) - } - defer combinedCoverageFile.Close() - w := bufio.NewWriter(combinedCoverageFile) - defer w.Flush() - - for fileNumber, coverageFile := range coverageFiles { - f, err := os.Open(coverageFile) - if err != nil { - return fmt.Errorf("open %v: %w", coverageFile, err) - } - defer f.Close() - in := bufio.NewReader(f) - line := 0 - for { - r, _, err := in.ReadRune() - if err != nil { - if err != io.EOF { - return fmt.Errorf("read %v: %w", coverageFile, err) - } - break - } - - // On all but the first coverage file, skip the coverage file header - if fileNumber > 0 && line == 0 { - continue - } - if r == '\n' { - line++ - } - - // filter for only printable characters because coverage file sometimes includes junk on 2nd line - if unicode.IsPrint(r) || r == '\n' { - if _, err := w.WriteRune(r); err != nil { - return fmt.Errorf("write %v: %w", combinedCoverageFile.Name(), err) - } - } - } - } - - return nil -} - -// processCoverageWithCourtney post-processes code coverage to exclude less -// meaningful sections like 'if err != nil { return err}', as well as -// anything marked with a '// notest' comment. -// -// instead of running the courtney as a separate program, this embeds -// courtney for easier integration. -func processCoverageWithCourtney(intermediateCoverageFilename, combinedCoverageFilename string, testArgs []string) error { - env := vos.Os() - - setup := &shared.Setup{ - Env: vos.Os(), - Paths: patsy.NewCache(env), - TestArgs: testArgs, - Load: intermediateCoverageFilename, - Output: combinedCoverageFilename, - } - if err := setup.Parse(testArgs); err != nil { - return fmt.Errorf("parse args: %w", err) - } - - s := scanner.New(setup) - if err := s.LoadProgram(); err != nil { - return fmt.Errorf("load program: %w", err) - } - if err := s.ScanPackages(); err != nil { - return fmt.Errorf("scan packages: %w", err) - } - - t := tester.New(setup) - if err := t.Load(); err != nil { - return fmt.Errorf("load: %w", err) - } - if err := t.ProcessExcludes(s.Excludes); err != nil { - return fmt.Errorf("process excludes: %w", err) - } - if err := t.Save(); err != nil { - return fmt.Errorf("save: %w", err) - } - - return nil } diff --git a/go.mod b/go.mod index e0c945f83..0845008bb 100644 --- a/go.mod +++ b/go.mod @@ -21,8 +21,6 @@ require ( github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/creack/pty v1.1.23 - github.com/dave/courtney v0.4.0 - github.com/dave/patsy v0.0.0-20210517141501-957256f50cba github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e github.com/distribution/reference v0.6.0 @@ -135,8 +133,6 @@ require ( github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect github.com/cyphar/filepath-securejoin v0.3.6 // indirect - github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 // indirect - github.com/dave/brenda v1.1.0 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect diff --git a/go.sum b/go.sum index 6ea727014..7be4c3eaf 100644 --- a/go.sum +++ b/go.sum @@ -240,14 +240,6 @@ github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18C github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= -github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 h1:YI1gOOdmMk3xodBao7fehcvoZsEeOyy/cfhlpCSPgM4= -github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms= -github.com/dave/brenda v1.1.0 h1:Sl1LlwXnbw7xMhq3y2x11McFu43AjDcwkllxxgZ3EZw= -github.com/dave/brenda v1.1.0/go.mod h1:4wCUr6gSlu5/1Tk7akE5X7UorwiQ8Rij0SKH3/BGMOM= -github.com/dave/courtney v0.4.0 h1:Vb8hi+k3O0h5++BR96FIcX0x3NovRbnhGd/dRr8inBk= -github.com/dave/courtney v0.4.0/go.mod h1:3WSU3yaloZXYAxRuWt8oRyVb9SaRiMBt5Kz/2J227tM= -github.com/dave/patsy v0.0.0-20210517141501-957256f50cba h1:1o36L4EKbZzazMk8iGC4kXpVnZ6TPxR2mZ9qVKjNNAs= -github.com/dave/patsy v0.0.0-20210517141501-957256f50cba/go.mod h1:qfR88CgEGLoiqDaE+xxDCi5QA5v4vUoW0UCX2Nd5Tlc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= From d923979e65237d281346e8269d78b48c84e51667 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 7 Feb 2025 08:54:35 -0600 Subject: [PATCH 0511/1708] client/tailscale: mark control API client deprecated The official client for 3rd party use is at tailscale.com/client/tailscale/v2. Updates #22748 Signed-off-by: Percy Wegmann --- client/tailscale/tailscale.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index 9d001d376..b81a7ee63 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -5,8 +5,10 @@ // Package tailscale contains a Go client for the Tailscale control plane API. // -// Warning: this package is in development and makes no API compatibility -// promises as of 2022-04-29. It is subject to change at any time. +// This package is only intended for internal and transitional use. +// +// Deprecated: the official control plane client is available at +// tailscale.com/client/tailscale/v2. package tailscale import ( @@ -18,10 +20,7 @@ import ( ) // I_Acknowledge_This_API_Is_Unstable must be set true to use this package -// for now. It was added 2022-04-29 when it was moved to this git repo -// and will be removed when the public API has settled. -// -// TODO(bradfitz): remove this after the we're happy with the public API. +// for now. This package is being replaced by tailscale.com/client/tailscale/v2. var I_Acknowledge_This_API_Is_Unstable = false // TODO: use url.PathEscape() for deviceID and tailnets when constructing requests. @@ -35,6 +34,8 @@ const maxReadSize = 10 << 20 // // Use NewClient to instantiate one. Exported fields should be set before // the client is used and not changed thereafter. +// +// Deprecated: use tailscale.com/client/tailscale/v2 instead. type Client struct { // tailnet is the globally unique identifier for a Tailscale network, such // as "example.com" or "user@gmail.com". @@ -97,6 +98,8 @@ func (c *Client) setAuth(r *http.Request) { // If httpClient is nil, then http.DefaultClient is used. // "api.tailscale.com" is set as the BaseURL for the returned client // and can be changed manually by the user. +// +// Deprecated: use tailscale.com/client/tailscale/v2 instead. func NewClient(tailnet string, auth AuthMethod) *Client { return &Client{ tailnet: tailnet, From 4f0222388ade008603567846fae3f4a35b168502 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 7 Feb 2025 09:09:36 -0600 Subject: [PATCH 0512/1708] cmd,tsnet,internal/client: create internal shim to deprecated control plane API Even after we remove the deprecated API, we will want to maintain a minimal API for internal use, in order to avoid importing the external tailscale.com/client/tailscale/v2 package. This shim exposes only the necessary parts of the deprecated API for internal use, which gains us the following: 1. It removes deprecation warnings for internal use of the API. 2. It gives us an inventory of which parts we will want to keep for internal use. Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- cmd/get-authkey/main.go | 6 +--- cmd/k8s-operator/depaware.txt | 1 + cmd/k8s-operator/e2e/main_test.go | 3 +- cmd/k8s-operator/tsclient.go | 2 +- cmd/tailscale/cli/up.go | 8 +---- cmd/tailscale/depaware.txt | 1 + internal/client/tailscale/tailscale.go | 48 ++++++++++++++++++++++++++ tsnet/tsnet.go | 3 ++ 8 files changed, 57 insertions(+), 15 deletions(-) create mode 100644 internal/client/tailscale/tailscale.go diff --git a/cmd/get-authkey/main.go b/cmd/get-authkey/main.go index 95c930756..ec7ab5d2c 100644 --- a/cmd/get-authkey/main.go +++ b/cmd/get-authkey/main.go @@ -16,14 +16,10 @@ import ( "strings" "golang.org/x/oauth2/clientcredentials" - "tailscale.com/client/tailscale" + "tailscale.com/internal/client/tailscale" ) func main() { - // Required to use our client API. We're fine with the instability since the - // client lives in the same repo as this code. - tailscale.I_Acknowledge_This_API_Is_Unstable = true - reusable := flag.Bool("reusable", false, "allocate a reusable authkey") ephemeral := flag.Bool("ephemeral", false, "allocate an ephemeral authkey") preauth := flag.Bool("preauth", true, "set the authkey as pre-authorized") diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2e96f03d0..520595bf6 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -811,6 +811,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index ae23c939c..5a1364e09 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" - "tailscale.com/client/tailscale" + "tailscale.com/internal/client/tailscale" ) const ( @@ -64,7 +64,6 @@ func TestMain(m *testing.M) { func runTests(m *testing.M) (int, error) { zlog := kzap.NewRaw([]kzap.Opts{kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel)}...).Sugar() logf.SetLogger(zapr.NewLogger(zlog.Desugar())) - tailscale.I_Acknowledge_This_API_Is_Unstable = true if clientID := os.Getenv("TS_API_CLIENT_ID"); clientID != "" { cleanup, err := setupClientAndACLs() diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index 2381438b2..acbc96520 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -16,7 +16,7 @@ import ( "os" "golang.org/x/oauth2/clientcredentials" - "tailscale.com/client/tailscale" + "tailscale.com/internal/client/tailscale" "tailscale.com/tailcfg" "tailscale.com/util/httpm" ) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index da3780e39..31f7eb956 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -27,8 +27,8 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" "golang.org/x/oauth2/clientcredentials" - "tailscale.com/client/tailscale" "tailscale.com/health/healthmsg" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" @@ -1097,12 +1097,6 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { return } -func init() { - // Required to use our client API. We're fine with the instability since the - // client lives in the same repo as this code. - tailscale.I_Acknowledge_This_API_Is_Unstable = true -} - // resolveAuthKey either returns v unchanged (in the common case) or, if it // starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like // diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index ad2e40611..8c972aa69 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -93,6 +93,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go new file mode 100644 index 000000000..4745bef64 --- /dev/null +++ b/internal/client/tailscale/tailscale.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package tailscale provides a minimal control plane API client for internal +// use. A full client for 3rd party use is available at +// tailscale.com/client/tailscale/v2. The internal client is provided to avoid +// having to import that whole package. +package tailscale + +import ( + tsclient "tailscale.com/client/tailscale" +) + +func init() { + tsclient.I_Acknowledge_This_API_Is_Unstable = true +} + +// Client is an alias to tailscale.com/client/tailscale. +type Client = tsclient.Client + +// AuthMethod is an alias to tailscale.com/client/tailscale. +type AuthMethod = tsclient.AuthMethod + +// Device is an alias to tailscale.com/client/tailscale. +type Device = tsclient.Device + +// DeviceFieldsOpts is an alias to tailscale.com/client/tailscale. +type DeviceFieldsOpts = tsclient.DeviceFieldsOpts + +// Key is an alias to tailscale.com/client/tailscale. +type Key = tsclient.Key + +// KeyCapabilities is an alias to tailscale.com/client/tailscale. +type KeyCapabilities = tsclient.KeyCapabilities + +// KeyDeviceCapabilities is an alias to tailscale.com/client/tailscale. +type KeyDeviceCapabilities = tsclient.KeyDeviceCapabilities + +// KeyDeviceCreateCapabilities is an alias to tailscale.com/client/tailscale. +type KeyDeviceCreateCapabilities = tsclient.KeyDeviceCreateCapabilities + +// ErrResponse is an alias to tailscale.com/client/tailscale. +type ErrResponse = tsclient.ErrResponse + +// NewClient is an alias to tailscale.com/client/tailscale. +func NewClient(tailnet string, auth AuthMethod) *Client { + return tsclient.NewClient(tailnet, auth) +} diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 8d5b89f84..f5fcd416f 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -930,6 +930,9 @@ func getTSNetDir(logf logger.Logf, confDir, prog string) (string, error) { // APIClient returns a tailscale.Client that can be used to make authenticated // requests to the Tailscale control server. // It requires the user to set tailscale.I_Acknowledge_This_API_Is_Unstable. +// +// TODO: (percy) provide a way to use Noise for the official API at +// tailscale.com/client/tailscale/v2. func (s *Server) APIClient() (*tailscale.Client, error) { if !tailscale.I_Acknowledge_This_API_Is_Unstable { return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") From 8a792ab540f7744c669b6952a257c32a7f6f0ea2 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 7 Feb 2025 09:28:09 -0600 Subject: [PATCH 0513/1708] tsnet: provide AuthenticatedAPITransport for use with tailscale.com/client/tailscale/v2 This allows use of the officially supported control server API, authenticated with the tsnet node's nodekey. Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- tsnet/tsnet.go | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index f5fcd416f..6fffa9aff 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -931,8 +931,7 @@ func getTSNetDir(logf logger.Logf, confDir, prog string) (string, error) { // requests to the Tailscale control server. // It requires the user to set tailscale.I_Acknowledge_This_API_Is_Unstable. // -// TODO: (percy) provide a way to use Noise for the official API at -// tailscale.com/client/tailscale/v2. +// Deprecated: use AuthenticatedAPITransport with tailscale.com/client/tailscale/v2 instead. func (s *Server) APIClient() (*tailscale.Client, error) { if !tailscale.I_Acknowledge_This_API_Is_Unstable { return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") @@ -947,6 +946,32 @@ func (s *Server) APIClient() (*tailscale.Client, error) { return c, nil } +// AuthenticatedAPITransport provides an HTTP transport that can be used with +// the control server API without needing additional authentication details. It +// authenticates using the current client's nodekey. +// +// For example: +// +// import "net/http" +// import "tailscale.com/client/tailscale/v2" +// import "tailscale.com/tsnet" +// +// var s *tsnet.Server +// ... +// rt, err := s.AuthenticatedAPITransport() +// // handler err ... +// var client tailscale.Client{HTTP: http.Client{ +// Timeout: 1*time.Minute, +// UserAgent: "your-useragent-here", +// Transport: rt, +// }} +func (s *Server) AuthenticatedAPITransport() (http.RoundTripper, error) { + if err := s.Start(); err != nil { + return nil, err + } + return s.lb.KeyProvingNoiseRoundTripper(), nil +} + // Listen announces only on the Tailscale network. // It will start the server if it has not been started yet. // From 9ae9de469a470c2f189ec82da57ae49ec2bb77da Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 12 Feb 2025 10:43:09 -0600 Subject: [PATCH 0514/1708] internal/client/tailscale: change Client from alias into wrapper This will allow Client to be extended with additional functions for internal use. Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- internal/client/tailscale/tailscale.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index 4745bef64..d29927e3f 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -15,9 +15,6 @@ func init() { tsclient.I_Acknowledge_This_API_Is_Unstable = true } -// Client is an alias to tailscale.com/client/tailscale. -type Client = tsclient.Client - // AuthMethod is an alias to tailscale.com/client/tailscale. type AuthMethod = tsclient.AuthMethod @@ -44,5 +41,12 @@ type ErrResponse = tsclient.ErrResponse // NewClient is an alias to tailscale.com/client/tailscale. func NewClient(tailnet string, auth AuthMethod) *Client { - return tsclient.NewClient(tailnet, auth) + return &Client{ + Client: tsclient.NewClient(tailnet, auth), + } +} + +// Client is a wrapper of tailscale.com/client/tailscale. +type Client struct { + *tsclient.Client } From 052eefbcceeb8a7df865c348aa9139a0a8cf64b1 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 14 Feb 2025 15:34:33 -0600 Subject: [PATCH 0515/1708] tsnet: require I_Acknowledge_This_API_Is_Experimental to use AuthenticatedAPITransport() It's not entirely clear whether this capability will be maintained, or in what form, so this serves as a warning to that effect. Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- tsnet/tsnet.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 6fffa9aff..680825708 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -946,10 +946,16 @@ func (s *Server) APIClient() (*tailscale.Client, error) { return c, nil } +// I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() +// for now. +var I_Acknowledge_This_API_Is_Experimental = false + // AuthenticatedAPITransport provides an HTTP transport that can be used with // the control server API without needing additional authentication details. It // authenticates using the current client's nodekey. // +// It requires the user to set I_Acknowledge_This_API_Is_Experimental. +// // For example: // // import "net/http" @@ -966,6 +972,9 @@ func (s *Server) APIClient() (*tailscale.Client, error) { // Transport: rt, // }} func (s *Server) AuthenticatedAPITransport() (http.RoundTripper, error) { + if !I_Acknowledge_This_API_Is_Experimental { + return nil, errors.New("use of AuthenticatedAPITransport without setting I_Acknowledge_This_API_Is_Experimental") + } if err := s.Start(); err != nil { return nil, err } From ec5f04b274c3b2be2887a5ba58a9309ed624d1ed Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 18 Feb 2025 11:31:14 -0800 Subject: [PATCH 0516/1708] appc: fix a deadlock in route advertisements (#15031) `routeAdvertiser` is the `iplocal.LocalBackend`. Calls to `Advertise/UnadvertiseRoute` end up calling `EditPrefs` which in turn calls `authReconfig` which finally calls `readvertiseAppConnectorRoutes` which calls `AppConnector.DomainRoutes` and gets stuck on a mutex that was already held when `routeAdvertiser` was called. Make all calls to `routeAdvertiser` in `app.AppConnector` go through the execqueue instead as a short-term fix. Updates tailscale/corp#25965 Signed-off-by: Andrew Lytvynov Co-authored-by: Irbe Krumina --- appc/appconnector.go | 24 ++++++++-------- appc/appconnector_test.go | 58 +++++++++++++++++++++++++++++++++++++++ appc/appctest/appctest.go | 13 +++++++++ 3 files changed, 84 insertions(+), 11 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index f4857fcc6..89c6c9aeb 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -289,9 +289,11 @@ func (e *AppConnector) updateDomains(domains []string) { toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen())) } } - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) - } + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) } e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards) @@ -310,11 +312,6 @@ func (e *AppConnector) updateRoutes(routes []netip.Prefix) { return } - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes: %v: %v", routes, err) - return - } - var toRemove []netip.Prefix // If we're storing routes and know e.controlRoutes is a good @@ -338,9 +335,14 @@ nextRoute: } } - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes: %v: %v", toRemove, err) - } + e.queue.Add(func() { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes: %v: %v", routes, err) + } + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes: %v: %v", toRemove, err) + } + }) e.controlRoutes = routes if err := e.storeRoutesLocked(); err != nil { diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index fd0001224..c13835f39 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -8,6 +8,7 @@ import ( "net/netip" "reflect" "slices" + "sync/atomic" "testing" "time" @@ -86,6 +87,7 @@ func TestUpdateRoutes(t *testing.T) { routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24"), netip.MustParsePrefix("192.0.0.1/32")} a.updateRoutes(routes) + a.Wait(ctx) slices.SortFunc(rc.Routes(), prefixCompare) rc.SetRoutes(slices.Compact(rc.Routes())) @@ -105,6 +107,7 @@ func TestUpdateRoutes(t *testing.T) { } func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { + ctx := context.Background() for _, shouldStore := range []bool{false, true} { rc := &appctest.RouteCollector{} var a *AppConnector @@ -117,6 +120,7 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")} a.updateRoutes(routes) + a.Wait(ctx) if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) { t.Fatalf("got %v, want %v", rc.Routes(), routes) @@ -636,3 +640,57 @@ func TestMetricBucketsAreSorted(t *testing.T) { t.Errorf("metricStoreRoutesNBuckets must be in order") } } + +// TestUpdateRoutesDeadlock is a regression test for a deadlock in +// LocalBackend<->AppConnector interaction. When using real LocalBackend as the +// routeAdvertiser, calls to Advertise/UnadvertiseRoutes can end up calling +// back into AppConnector via authReconfig. If everything is called +// synchronously, this results in a deadlock on AppConnector.mu. +func TestUpdateRoutesDeadlock(t *testing.T) { + ctx := context.Background() + rc := &appctest.RouteCollector{} + a := NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + + advertiseCalled := new(atomic.Bool) + unadvertiseCalled := new(atomic.Bool) + rc.AdvertiseCallback = func() { + // Call something that requires a.mu to be held. + a.DomainRoutes() + advertiseCalled.Store(true) + } + rc.UnadvertiseCallback = func() { + // Call something that requires a.mu to be held. + a.DomainRoutes() + unadvertiseCalled.Store(true) + } + + a.updateDomains([]string{"example.com"}) + a.Wait(ctx) + + // Trigger rc.AdveriseRoute. + a.updateRoutes( + []netip.Prefix{ + netip.MustParsePrefix("127.0.0.1/32"), + netip.MustParsePrefix("127.0.0.2/32"), + }, + ) + a.Wait(ctx) + // Trigger rc.UnadveriseRoute. + a.updateRoutes( + []netip.Prefix{ + netip.MustParsePrefix("127.0.0.1/32"), + }, + ) + a.Wait(ctx) + + if !advertiseCalled.Load() { + t.Error("AdvertiseRoute was not called") + } + if !unadvertiseCalled.Load() { + t.Error("UnadvertiseRoute was not called") + } + + if want := []netip.Prefix{netip.MustParsePrefix("127.0.0.1/32")}; !slices.Equal(slices.Compact(rc.Routes()), want) { + t.Fatalf("got %v, want %v", rc.Routes(), want) + } +} diff --git a/appc/appctest/appctest.go b/appc/appctest/appctest.go index aa77bc3b4..9726a2b97 100644 --- a/appc/appctest/appctest.go +++ b/appc/appctest/appctest.go @@ -11,12 +11,22 @@ import ( // RouteCollector is a test helper that collects the list of routes advertised type RouteCollector struct { + // AdvertiseCallback (optional) is called synchronously from + // AdvertiseRoute. + AdvertiseCallback func() + // UnadvertiseCallback (optional) is called synchronously from + // UnadvertiseRoute. + UnadvertiseCallback func() + routes []netip.Prefix removedRoutes []netip.Prefix } func (rc *RouteCollector) AdvertiseRoute(pfx ...netip.Prefix) error { rc.routes = append(rc.routes, pfx...) + if rc.AdvertiseCallback != nil { + rc.AdvertiseCallback() + } return nil } @@ -30,6 +40,9 @@ func (rc *RouteCollector) UnadvertiseRoute(toRemove ...netip.Prefix) error { rc.removedRoutes = append(rc.removedRoutes, r) } } + if rc.UnadvertiseCallback != nil { + rc.UnadvertiseCallback() + } return nil } From 9c731b848b42a3bfaf1a8fc2f36b6ad3e804025c Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 18 Feb 2025 14:06:22 -0600 Subject: [PATCH 0517/1708] cmd/gitops-pusher: log error details when unable to fetch ACL ETag This will help debug unexpected issues encountered by consumers of the gitops-pusher. Updates tailscale/corp#26664 Signed-off-by: Percy Wegmann --- cmd/gitops-pusher/gitops-pusher.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index e7a0aeee1..690ca2870 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -13,6 +13,7 @@ import ( "encoding/json" "flag" "fmt" + "io" "log" "net/http" "os" @@ -405,7 +406,8 @@ func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string got := resp.StatusCode want := http.StatusOK if got != want { - return "", fmt.Errorf("wanted HTTP status code %d but got %d", want, got) + errorDetails, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("wanted HTTP status code %d but got %d: %#q", want, got, string(errorDetails)) } return Shuck(resp.Header.Get("ETag")), nil From 1f1a26776bc182652cfa9a4dc2a2c60b1b5de8b8 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 12 Feb 2025 10:34:28 -0600 Subject: [PATCH 0518/1708] client/tailscale,cmd/k8s-operator,internal/client/tailscale: move VIP service client methods into internal control client Updates tailscale/corp#22748 Signed-off-by: Percy Wegmann --- client/tailscale/acl.go | 16 +-- client/tailscale/devices.go | 12 +- client/tailscale/dns.go | 8 +- client/tailscale/keys.go | 16 +-- client/tailscale/routes.go | 4 +- client/tailscale/tailnet.go | 5 +- client/tailscale/tailscale.go | 38 +++++- cmd/k8s-operator/ingress-for-pg.go | 20 ++-- cmd/k8s-operator/ingress-for-pg_test.go | 4 +- cmd/k8s-operator/testutils_test.go | 12 +- cmd/k8s-operator/tsclient.go | 141 +---------------------- internal/client/tailscale/tailscale.go | 31 +++++ internal/client/tailscale/vip_service.go | 103 +++++++++++++++++ 13 files changed, 222 insertions(+), 188 deletions(-) create mode 100644 internal/client/tailscale/vip_service.go diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go index 8d8bdfc86..bef80d241 100644 --- a/client/tailscale/acl.go +++ b/client/tailscale/acl.go @@ -83,7 +83,7 @@ func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) { } }() - path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("acl") req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -97,7 +97,7 @@ func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) { // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } // Otherwise, try to decode the response. @@ -126,7 +126,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) { } }() - path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl?details=1", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("acl?details=1") req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -138,7 +138,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) { } if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } data := struct { @@ -184,7 +184,7 @@ func (e ACLTestError) Error() string { } func (c *Client) aclPOSTRequest(ctx context.Context, body []byte, avoidCollisions bool, etag, acceptHeader string) ([]byte, string, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("acl") req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body)) if err != nil { return nil, "", err @@ -328,7 +328,7 @@ type ACLPreview struct { } func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, previewType string, previewFor string) (res *ACLPreviewResponse, err error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl/preview", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("acl/preview") req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body)) if err != nil { return nil, err @@ -350,7 +350,7 @@ func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, preview // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } if err = json.Unmarshal(b, &res); err != nil { return nil, err @@ -488,7 +488,7 @@ func (c *Client) ValidateACLJSON(ctx context.Context, source, dest string) (test return nil, err } - path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl/validate", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("acl/validate") req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(postData)) if err != nil { return nil, err diff --git a/client/tailscale/devices.go b/client/tailscale/devices.go index 9008d4d0d..b79191d53 100644 --- a/client/tailscale/devices.go +++ b/client/tailscale/devices.go @@ -131,7 +131,7 @@ func (c *Client) Devices(ctx context.Context, fields *DeviceFieldsOpts) (deviceL } }() - path := fmt.Sprintf("%s/api/v2/tailnet/%s/devices", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("devices") req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -149,7 +149,7 @@ func (c *Client) Devices(ctx context.Context, fields *DeviceFieldsOpts) (deviceL // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } var devices GetDevicesResponse @@ -188,7 +188,7 @@ func (c *Client) Device(ctx context.Context, deviceID string, fields *DeviceFiel // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } err = json.Unmarshal(b, &device) @@ -221,7 +221,7 @@ func (c *Client) DeleteDevice(ctx context.Context, deviceID string) (err error) // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) + return HandleErrorResponse(b, resp) } return nil } @@ -253,7 +253,7 @@ func (c *Client) SetAuthorized(ctx context.Context, deviceID string, authorized // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) + return HandleErrorResponse(b, resp) } return nil @@ -281,7 +281,7 @@ func (c *Client) SetTags(ctx context.Context, deviceID string, tags []string) er // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) + return HandleErrorResponse(b, resp) } return nil diff --git a/client/tailscale/dns.go b/client/tailscale/dns.go index f198742b3..bbdc7c56c 100644 --- a/client/tailscale/dns.go +++ b/client/tailscale/dns.go @@ -44,7 +44,7 @@ type DNSPreferences struct { } func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint) + path := c.BuildTailnetURL("dns", endpoint) req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -57,14 +57,14 @@ func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, er // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } return b, nil } func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData any) ([]byte, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint) + path := c.BuildTailnetURL("dns", endpoint) data, err := json.Marshal(&postData) if err != nil { return nil, err @@ -84,7 +84,7 @@ func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData a // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } return b, nil diff --git a/client/tailscale/keys.go b/client/tailscale/keys.go index 84bcdfae6..79e19e998 100644 --- a/client/tailscale/keys.go +++ b/client/tailscale/keys.go @@ -40,7 +40,7 @@ type KeyDeviceCreateCapabilities struct { // Keys returns the list of keys for the current user. func (c *Client) Keys(ctx context.Context) ([]string, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("keys") req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -51,7 +51,7 @@ func (c *Client) Keys(ctx context.Context) ([]string, error) { return nil, err } if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } var keys struct { @@ -99,7 +99,7 @@ func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities, return "", nil, err } - path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet) + path := c.BuildTailnetURL("keys") req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewReader(bs)) if err != nil { return "", nil, err @@ -110,7 +110,7 @@ func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities, return "", nil, err } if resp.StatusCode != http.StatusOK { - return "", nil, handleErrorResponse(b, resp) + return "", nil, HandleErrorResponse(b, resp) } var key struct { @@ -126,7 +126,7 @@ func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities, // Key returns the metadata for the given key ID. Currently, capabilities are // only returned for auth keys, API keys only return general metadata. func (c *Client) Key(ctx context.Context, id string) (*Key, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id) + path := c.BuildTailnetURL("keys", id) req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -137,7 +137,7 @@ func (c *Client) Key(ctx context.Context, id string) (*Key, error) { return nil, err } if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } var key Key @@ -149,7 +149,7 @@ func (c *Client) Key(ctx context.Context, id string) (*Key, error) { // DeleteKey deletes the key with the given ID. func (c *Client) DeleteKey(ctx context.Context, id string) error { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id) + path := c.BuildTailnetURL("keys", id) req, err := http.NewRequestWithContext(ctx, "DELETE", path, nil) if err != nil { return err @@ -160,7 +160,7 @@ func (c *Client) DeleteKey(ctx context.Context, id string) error { return err } if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) + return HandleErrorResponse(b, resp) } return nil } diff --git a/client/tailscale/routes.go b/client/tailscale/routes.go index 5912fc46c..b72f2743f 100644 --- a/client/tailscale/routes.go +++ b/client/tailscale/routes.go @@ -44,7 +44,7 @@ func (c *Client) Routes(ctx context.Context, deviceID string) (routes *Routes, e // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } var sr Routes @@ -84,7 +84,7 @@ func (c *Client) SetRoutes(ctx context.Context, deviceID string, subnets []netip // If status code was not successful, return the error. // TODO: Change the check for the StatusCode to include other 2XX success codes. if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) + return nil, HandleErrorResponse(b, resp) } var srr *Routes diff --git a/client/tailscale/tailnet.go b/client/tailscale/tailnet.go index 2539e7f23..9453962c9 100644 --- a/client/tailscale/tailnet.go +++ b/client/tailscale/tailnet.go @@ -9,7 +9,6 @@ import ( "context" "fmt" "net/http" - "net/url" "tailscale.com/util/httpm" ) @@ -22,7 +21,7 @@ func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (er } }() - path := fmt.Sprintf("%s/api/v2/tailnet/%s", c.baseURL(), url.PathEscape(string(tailnetID))) + path := c.BuildTailnetURL("tailnet") req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil) if err != nil { return err @@ -35,7 +34,7 @@ func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (er } if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) + return HandleErrorResponse(b, resp) } return nil diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index b81a7ee63..f273023eb 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -17,6 +17,8 @@ import ( "fmt" "io" "net/http" + "net/url" + "path" ) // I_Acknowledge_This_API_Is_Unstable must be set true to use this package @@ -63,6 +65,36 @@ func (c *Client) httpClient() *http.Client { return http.DefaultClient } +// BuildURL builds a url to http(s):///api/v2/ +// using the given pathElements. It url escapes each path element, so the caller +// doesn't need to worry about that. +// +// For example, BuildURL(devices, 5) with the default server URL would result in +// https://api.tailscale.com/api/v2/devices/5. +func (c *Client) BuildURL(pathElements ...any) string { + elem := make([]string, 2, len(pathElements)+1) + elem[0] = c.baseURL() + elem[1] = "/api/v2" + for _, pathElement := range pathElements { + elem = append(elem, url.PathEscape(fmt.Sprint(pathElement))) + } + return path.Join(elem...) +} + +// BuildTailnetURL builds a url to http(s):///api/v2/tailnet// +// using the given pathElements. It url escapes each path element, so the +// caller doesn't need to worry about that. +// +// For example, BuildTailnetURL(policy, validate) with the default server URL and a tailnet of "example.com" +// would result in https://api.tailscale.com/api/v2/tailnet/example.com/policy/validate. +func (c *Client) BuildTailnetURL(pathElements ...any) string { + allElements := make([]any, 3, len(pathElements)+2) + allElements[0] = "tailnet" + allElements[1] = c.Tailnet + allElements = append(allElements, pathElements...) + return c.BuildURL(allElements...) +} + func (c *Client) baseURL() string { if c.BaseURL != "" { return c.BaseURL @@ -150,9 +182,11 @@ func (e ErrResponse) Error() string { return fmt.Sprintf("Status: %d, Message: %q", e.Status, e.Message) } -// handleErrorResponse decodes the error message from the server and returns +// HandleErrorResponse decodes the error message from the server and returns // an ErrResponse from it. -func handleErrorResponse(b []byte, resp *http.Response) error { +// +// Deprecated: use tailscale.com/client/tailscale/v2 instead. +func HandleErrorResponse(b []byte, resp *http.Response) error { var errResp ErrResponse if err := json.Unmarshal(b, &errResp); err != nil { return err diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index b07882deb..4fa0af2a2 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "tailscale.com/client/tailscale" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" @@ -186,7 +186,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin } dnsName := hostname + "." + tcd serviceName := tailcfg.ServiceName("svc:" + hostname) - existingVIPSvc, err := a.tsClient.getVIPService(ctx, serviceName) + existingVIPSvc, err := a.tsClient.GetVIPService(ctx, serviceName) // TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore // should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET // here will fail. @@ -269,7 +269,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin vipPorts = append(vipPorts, "80") } - vipSvc := &VIPService{ + vipSvc := &tailscale.VIPService{ Name: serviceName, Tags: tags, Ports: vipPorts, @@ -282,7 +282,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) { logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) - if err := a.tsClient.createOrUpdateVIPService(ctx, vipSvc); err != nil { + if err := a.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { logger.Infof("error creating VIPService: %v", err) return fmt.Errorf("error creating VIPService: %w", err) } @@ -361,7 +361,7 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } if isVIPServiceForAnyIngress(svc) { logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) - if err := a.tsClient.deleteVIPService(ctx, vipServiceName); err != nil { + if err := a.tsClient.DeleteVIPService(ctx, vipServiceName); err != nil { errResp := &tailscale.ErrResponse{} if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { return fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) @@ -509,8 +509,8 @@ func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (*VIPService, error) { - svc, err := a.tsClient.getVIPService(ctx, name) +func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (*tailscale.VIPService, error) { + svc, err := a.tsClient.GetVIPService(ctx, name) if err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { @@ -521,14 +521,14 @@ func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.Se return svc, nil } -func isVIPServiceForIngress(svc *VIPService, ing *networkingv1.Ingress) bool { +func isVIPServiceForIngress(svc *tailscale.VIPService, ing *networkingv1.Ingress) bool { if svc == nil || ing == nil { return false } return strings.EqualFold(svc.Comment, fmt.Sprintf(VIPSvcOwnerRef, ing.UID)) } -func isVIPServiceForAnyIngress(svc *VIPService) bool { +func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { if svc == nil { return false } @@ -593,7 +593,7 @@ func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name } logger.Infof("Deleting VIPService %q", name) - if err = a.tsClient.deleteVIPService(ctx, name); err != nil { + if err = a.tsClient.DeleteVIPService(ctx, name); err != nil { return fmt.Errorf("error deleting VIPService: %w", err) } return nil diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index ee8a94336..c432eb7e1 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -70,7 +70,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") // Verify VIPService uses custom tags - vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc") + vipSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") if err != nil { t.Fatalf("getting VIPService: %v", err) } @@ -398,7 +398,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { func verifyVIPService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { t.Helper() - vipSvc, err := ft.getVIPService(context.Background(), tailcfg.ServiceName(serviceName)) + vipSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName)) if err != nil { t.Fatalf("getting VIPService %q: %v", serviceName, err) } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 386005b1f..6b1a4f85b 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "tailscale.com/client/tailscale" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -768,7 +768,7 @@ type fakeTSClient struct { sync.Mutex keyRequests []tailscale.KeyCapabilities deleted []string - vipServices map[tailcfg.ServiceName]*VIPService + vipServices map[tailcfg.ServiceName]*tailscale.VIPService } type fakeTSNetServer struct { certDomains []string @@ -875,7 +875,7 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { } } -func (c *fakeTSClient) getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { +func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) { c.Lock() defer c.Unlock() if c.vipServices == nil { @@ -888,17 +888,17 @@ func (c *fakeTSClient) getVIPService(ctx context.Context, name tailcfg.ServiceNa return svc, nil } -func (c *fakeTSClient) createOrUpdateVIPService(ctx context.Context, svc *VIPService) error { +func (c *fakeTSClient) CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error { c.Lock() defer c.Unlock() if c.vipServices == nil { - c.vipServices = make(map[tailcfg.ServiceName]*VIPService) + c.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService) } c.vipServices[svc.Name] = svc return nil } -func (c *fakeTSClient) deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { +func (c *fakeTSClient) DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { c.Lock() defer c.Unlock() if c.vipServices != nil { diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index acbc96520..3101da75d 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -6,19 +6,13 @@ package main import ( - "bytes" "context" - "encoding/json" "fmt" - "io" - "net/http" - "net/url" "os" "golang.org/x/oauth2/clientcredentials" "tailscale.com/internal/client/tailscale" "tailscale.com/tailcfg" - "tailscale.com/util/httpm" ) // defaultTailnet is a value that can be used in Tailscale API calls instead of tailnet name to indicate that the API @@ -45,141 +39,14 @@ func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (ts c := tailscale.NewClient(defaultTailnet, nil) c.UserAgent = "tailscale-k8s-operator" c.HTTPClient = credentials.Client(ctx) - tsc := &tsClientImpl{ - Client: c, - baseURL: defaultBaseURL, - tailnet: defaultTailnet, - } - return tsc, nil + return c, nil } type tsClient interface { CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) DeleteDevice(ctx context.Context, nodeStableID string) error - getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) - createOrUpdateVIPService(ctx context.Context, svc *VIPService) error - deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error -} - -type tsClientImpl struct { - *tailscale.Client - baseURL string - tailnet string -} - -// VIPService is a Tailscale VIPService with Tailscale API JSON representation. -type VIPService struct { - // Name is a VIPService name in form svc:. - Name tailcfg.ServiceName `json:"name,omitempty"` - // Addrs are the IP addresses of the VIP Service. There are two addresses: - // the first is IPv4 and the second is IPv6. - // When creating a new VIP Service, the IP addresses are optional: if no - // addresses are specified then they will be selected. If an IPv4 address is - // specified at index 0, then that address will attempt to be used. An IPv6 - // address can not be specified upon creation. - Addrs []string `json:"addrs,omitempty"` - // Comment is an optional text string for display in the admin panel. - Comment string `json:"comment,omitempty"` - // Ports are the ports of a VIPService that will be configured via Tailscale serve config. - // If set, any node wishing to advertise this VIPService must have this port configured via Tailscale serve. - Ports []string `json:"ports,omitempty"` - // Tags are optional ACL tags that will be applied to the VIPService. - Tags []string `json:"tags,omitempty"` -} - -// GetVIPServiceByName retrieves a VIPService by its name. It returns 404 if the VIPService is not found. -func (c *tsClientImpl) getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(name.String())) - req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) - if err != nil { - return nil, fmt.Errorf("error creating new HTTP request: %w", err) - } - b, resp, err := c.sendRequest(req) - if err != nil { - return nil, fmt.Errorf("error making Tailsale API request: %w", err) - } - // If status code was not successful, return the error. - // TODO: Change the check for the StatusCode to include other 2XX success codes. - if resp.StatusCode != http.StatusOK { - return nil, handleErrorResponse(b, resp) - } - svc := &VIPService{} - if err := json.Unmarshal(b, svc); err != nil { - return nil, err - } - return svc, nil -} - -// createOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the -// VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not -// lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were -// auto-allocated by Tailscale) any subsequent request to this function that does not set any IP addresses will error. -func (c *tsClientImpl) createOrUpdateVIPService(ctx context.Context, svc *VIPService) error { - data, err := json.Marshal(svc) - if err != nil { - return err - } - path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name.String())) - req, err := http.NewRequestWithContext(ctx, httpm.PUT, path, bytes.NewBuffer(data)) - if err != nil { - return fmt.Errorf("error creating new HTTP request: %w", err) - } - b, resp, err := c.sendRequest(req) - if err != nil { - return fmt.Errorf("error making Tailscale API request: %w", err) - } - // If status code was not successful, return the error. - // TODO: Change the check for the StatusCode to include other 2XX success codes. - if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) - } - return nil -} - -// DeleteVIPServiceByName deletes a VIPService by its name. It returns an error if the VIPService -// does not exist or if the deletion fails. -func (c *tsClientImpl) deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { - path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(name.String())) - req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil) - if err != nil { - return fmt.Errorf("error creating new HTTP request: %w", err) - } - b, resp, err := c.sendRequest(req) - if err != nil { - return fmt.Errorf("error making Tailscale API request: %w", err) - } - // If status code was not successful, return the error. - if resp.StatusCode != http.StatusOK { - return handleErrorResponse(b, resp) - } - return nil -} - -// sendRequest add the authentication key to the request and sends it. It -// receives the response and reads up to 10MB of it. -func (c *tsClientImpl) sendRequest(req *http.Request) ([]byte, *http.Response, error) { - resp, err := c.Do(req) - if err != nil { - return nil, resp, fmt.Errorf("error actually doing request: %w", err) - } - defer resp.Body.Close() - - // Read response - b, err := io.ReadAll(resp.Body) - if err != nil { - err = fmt.Errorf("error reading response body: %v", err) - } - return b, resp, err -} - -// handleErrorResponse decodes the error message from the server and returns -// an ErrResponse from it. -func handleErrorResponse(b []byte, resp *http.Response) error { - var errResp tailscale.ErrResponse - if err := json.Unmarshal(b, &errResp); err != nil { - return err - } - errResp.Status = resp.StatusCode - return errResp + GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) + CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error + DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error } diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index d29927e3f..cba7228bb 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -8,9 +8,16 @@ package tailscale import ( + "errors" + "io" + "net/http" + tsclient "tailscale.com/client/tailscale" ) +// maxSize is the maximum read size (10MB) of responses from the server. +const maxReadSize = 10 << 20 + func init() { tsclient.I_Acknowledge_This_API_Is_Unstable = true } @@ -50,3 +57,27 @@ func NewClient(tailnet string, auth AuthMethod) *Client { type Client struct { *tsclient.Client } + +// HandleErrorResponse is an alias to tailscale.com/client/tailscale. +func HandleErrorResponse(b []byte, resp *http.Response) error { + return tsclient.HandleErrorResponse(b, resp) +} + +// SendRequest add the authentication key to the request and sends it. It +// receives the response and reads up to 10MB of it. +func SendRequest(c *Client, req *http.Request) ([]byte, *http.Response, error) { + resp, err := c.Do(req) + if err != nil { + return nil, resp, err + } + defer resp.Body.Close() + + // Read response. Limit the response to 10MB. + // This limit is carried over from client/tailscale/tailscale.go. + body := io.LimitReader(resp.Body, maxReadSize+1) + b, err := io.ReadAll(body) + if len(b) > maxReadSize { + err = errors.New("API response too large") + } + return b, resp, err +} diff --git a/internal/client/tailscale/vip_service.go b/internal/client/tailscale/vip_service.go new file mode 100644 index 000000000..958192c4d --- /dev/null +++ b/internal/client/tailscale/vip_service.go @@ -0,0 +1,103 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + + "tailscale.com/tailcfg" + "tailscale.com/util/httpm" +) + +// VIPService is a Tailscale VIPService with Tailscale API JSON representation. +type VIPService struct { + // Name is a VIPService name in form svc:. + Name tailcfg.ServiceName `json:"name,omitempty"` + // Addrs are the IP addresses of the VIP Service. There are two addresses: + // the first is IPv4 and the second is IPv6. + // When creating a new VIP Service, the IP addresses are optional: if no + // addresses are specified then they will be selected. If an IPv4 address is + // specified at index 0, then that address will attempt to be used. An IPv6 + // address can not be specified upon creation. + Addrs []string `json:"addrs,omitempty"` + // Comment is an optional text string for display in the admin panel. + Comment string `json:"comment,omitempty"` + // Ports are the ports of a VIPService that will be configured via Tailscale serve config. + // If set, any node wishing to advertise this VIPService must have this port configured via Tailscale serve. + Ports []string `json:"ports,omitempty"` + // Tags are optional ACL tags that will be applied to the VIPService. + Tags []string `json:"tags,omitempty"` +} + +// GetVIPService retrieves a VIPService by its name. It returns 404 if the VIPService is not found. +func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { + path := client.BuildTailnetURL("vip-services", name.String()) + req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) + if err != nil { + return nil, fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := SendRequest(client, req) + if err != nil { + return nil, fmt.Errorf("error making Tailsale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return nil, HandleErrorResponse(b, resp) + } + svc := &VIPService{} + if err := json.Unmarshal(b, svc); err != nil { + return nil, err + } + return svc, nil +} + +// CreateOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the +// VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not +// lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were +// auto-allocated by Tailscale) any subsequent request to this function that does not set any IP addresses will error. +func (client *Client) CreateOrUpdateVIPService(ctx context.Context, svc *VIPService) error { + data, err := json.Marshal(svc) + if err != nil { + return err + } + path := client.BuildTailnetURL("vip-services", svc.Name.String()) + req, err := http.NewRequestWithContext(ctx, httpm.PUT, path, bytes.NewBuffer(data)) + if err != nil { + return fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := SendRequest(client, req) + if err != nil { + return fmt.Errorf("error making Tailscale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return HandleErrorResponse(b, resp) + } + return nil +} + +// DeleteVIPService deletes a VIPService by its name. It returns an error if the VIPService +// does not exist or if the deletion fails. +func (client *Client) DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { + path := client.BuildTailnetURL("vip-services", name.String()) + req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil) + if err != nil { + return fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := SendRequest(client, req) + if err != nil { + return fmt.Errorf("error making Tailscale API request: %w", err) + } + // If status code was not successful, return the error. + if resp.StatusCode != http.StatusOK { + return HandleErrorResponse(b, resp) + } + return nil +} From 09982e1918f27c740320a4879bbafdd9f379733a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 18 Feb 2025 15:52:10 -0600 Subject: [PATCH 0519/1708] ipn/ipnlocal: reset always-on override and apply policy settings on start We already reset the always-on override flag when switching profiles and in a few other cases. In this PR, we update (*LocalBackend).Start() to reset it as well. This is necessary to support scenarios where Start() is called explicitly, such as when the GUI starts or when tailscale up is used with additional flags and passes prefs via ipn.Options in a call to Start() rather than via EditPrefs. Additionally, we update it to apply policy settings to the current prefs, which is necessary for properly overriding prefs specified in ipn.Options. Updates #14823 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 43d82c900..bd5f595be 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2341,12 +2341,20 @@ func (b *LocalBackend) Start(opts ipn.Options) error { }); err != nil { b.logf("failed to save UpdatePrefs state: %v", err) } - b.setAtomicValuesFromPrefsLocked(pv) - } else { - b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) } + // Reset the always-on override whenever Start is called. + b.resetAlwaysOnOverrideLocked() + // And also apply syspolicy settings to the current profile. + // This is important in two cases: when opts.UpdatePrefs is not nil, + // and when Always Mode is enabled and we need to set WantRunning to true. + if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) { + setExitNodeID(newp, b.netMap) + b.pm.setPrefsNoPermCheck(newp.View()) + } prefs := b.pm.CurrentPrefs() + b.setAtomicValuesFromPrefsLocked(prefs) + wantRunning := prefs.WantRunning() if wantRunning { if err := b.initMachineKeyLocked(); err != nil { From 323747c3e0c1c79bfad1bd28c76b351b076f942d Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 19 Feb 2025 10:41:45 -0800 Subject: [PATCH 0520/1708] various: disable MPTCP when setting TCP_USER_TIMEOUT sockopt (#15063) There's nothing about it on https://github.com/multipath-tcp/mptcp_net-next/issues/ but empirically MPTCP doesn't support this option on awly's kernel 6.13.2 and in GitHub actions. Updates #15015 Signed-off-by: Andrew Lytvynov --- cmd/derper/derper.go | 3 +++ net/ktimeout/ktimeout_linux_test.go | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 4af63e192..980870847 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -324,6 +324,9 @@ func main() { Control: ktimeout.UserTimeout(*tcpUserTimeout), KeepAlive: *tcpKeepAlive, } + // As of 2025-02-19, MPTCP does not support TCP_USER_TIMEOUT socket option + // set in ktimeout.UserTimeout above. + lc.SetMultipathTCP(false) quietLogger := log.New(logger.HTTPServerLogFilter{Inner: log.Printf}, "", 0) httpsrv := &http.Server{ diff --git a/net/ktimeout/ktimeout_linux_test.go b/net/ktimeout/ktimeout_linux_test.go index a367bfd4a..df4156745 100644 --- a/net/ktimeout/ktimeout_linux_test.go +++ b/net/ktimeout/ktimeout_linux_test.go @@ -4,17 +4,22 @@ package ktimeout import ( + "context" "net" "testing" "time" - "golang.org/x/net/nettest" "golang.org/x/sys/unix" "tailscale.com/util/must" ) func TestSetUserTimeout(t *testing.T) { - l := must.Get(nettest.NewLocalListener("tcp")) + lc := net.ListenConfig{} + // As of 2025-02-19, MPTCP does not support TCP_USER_TIMEOUT socket option + // set in ktimeout.UserTimeout above. + lc.SetMultipathTCP(false) + + l := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) defer l.Close() var err error From cc923713f6f1f1765bd5c07146596e59f0eb9e2f Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 19 Feb 2025 10:42:06 -0800 Subject: [PATCH 0521/1708] tempfork/acme: pull in latest changes for Go 1.24 (#15062) https://github.com/tailscale/golang-x-crypto/commit/9a281fd8facad954dae80ef984c5d5d763f8ff91 Updates #15015 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.sum | 4 ++-- tempfork/acme/acme.go | 6 +++++- tempfork/acme/acme_test.go | 2 +- tempfork/acme/http.go | 21 ++++++++++++++++++++- tempfork/acme/types.go | 13 ++++++++++++- 6 files changed, 41 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 0845008bb..c926e8428 100644 --- a/go.mod +++ b/go.mod @@ -74,7 +74,7 @@ require ( github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 - github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 + github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 diff --git a/go.sum b/go.sum index 7be4c3eaf..be5fc57bc 100644 --- a/go.sum +++ b/go.sum @@ -900,8 +900,8 @@ github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8 github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca h1:ecjHwH73Yvqf/oIdQ2vxAX+zc6caQsYdPzsxNW1J3G8= +github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= diff --git a/tempfork/acme/acme.go b/tempfork/acme/acme.go index 8bc2ac16e..94234efe3 100644 --- a/tempfork/acme/acme.go +++ b/tempfork/acme/acme.go @@ -557,7 +557,11 @@ func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error return nil, err } - res, err := c.post(ctx, nil, chal.URI, json.RawMessage("{}"), wantStatus( + payload := json.RawMessage("{}") + if len(chal.Payload) != 0 { + payload = chal.Payload + } + res, err := c.post(ctx, nil, chal.URI, payload, wantStatus( http.StatusOK, // according to the spec http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) )) diff --git a/tempfork/acme/acme_test.go b/tempfork/acme/acme_test.go index dcd214896..5473bbc2b 100644 --- a/tempfork/acme/acme_test.go +++ b/tempfork/acme/acme_test.go @@ -875,7 +875,7 @@ func TestTLSALPN01ChallengeCert(t *testing.T) { } func TestTLSChallengeCertOpt(t *testing.T) { - key, err := rsa.GenerateKey(rand.Reader, 512) + key, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } diff --git a/tempfork/acme/http.go b/tempfork/acme/http.go index 58836e5d3..d92ff232f 100644 --- a/tempfork/acme/http.go +++ b/tempfork/acme/http.go @@ -15,6 +15,7 @@ import ( "io" "math/big" "net/http" + "runtime/debug" "strconv" "strings" "time" @@ -271,9 +272,27 @@ func (c *Client) httpClient() *http.Client { } // packageVersion is the version of the module that contains this package, for -// sending as part of the User-Agent header. It's set in version_go112.go. +// sending as part of the User-Agent header. var packageVersion string +func init() { + // Set packageVersion if the binary was built in modules mode and x/crypto + // was not replaced with a different module. + info, ok := debug.ReadBuildInfo() + if !ok { + return + } + for _, m := range info.Deps { + if m.Path != "golang.org/x/crypto" { + continue + } + if m.Replace == nil { + packageVersion = m.Version + } + break + } +} + // userAgent returns the User-Agent header value. It includes the package name, // the module version (if available), and the c.UserAgent value (if set). func (c *Client) userAgent() string { diff --git a/tempfork/acme/types.go b/tempfork/acme/types.go index 9fad800b4..518fa2440 100644 --- a/tempfork/acme/types.go +++ b/tempfork/acme/types.go @@ -7,6 +7,7 @@ package acme import ( "crypto" "crypto/x509" + "encoding/json" "errors" "fmt" "net/http" @@ -292,7 +293,7 @@ type Directory struct { // Renewal Information (ARI) Extension. RenewalInfoURL string - // Term is a URI identifying the current terms of service. + // Terms is a URI identifying the current terms of service. Terms string // Website is an HTTP or HTTPS URL locating a website @@ -531,6 +532,16 @@ type Challenge struct { // when this challenge was used. // The type of a non-nil value is *Error. Error error + + // Payload is the JSON-formatted payload that the client sends + // to the server to indicate it is ready to respond to the challenge. + // When unset, it defaults to an empty JSON object: {}. + // For most challenges, the client must not set Payload, + // see https://tools.ietf.org/html/rfc8555#section-7.5.1. + // Payload is used only for newer challenges (such as "device-attest-01") + // where the client must send additional data for the server to validate + // the challenge. + Payload json.RawMessage } // wireChallenge is ACME JSON challenge representation. From 836c01258de01a38fdd267957eeedab7faf0f4f2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 19 Feb 2025 10:55:49 -0800 Subject: [PATCH 0522/1708] go.toolchain.branch: update to Go 1.24 (#15016) * go.toolchain.branch: update to Go 1.24 Updates #15015 Change-Id: I29c934ec17e60c3ac3264f30fbbe68fc21422f4d Signed-off-by: Brad Fitzpatrick * cmd/testwrapper: fix for go1.24 Updates #15015 Signed-off-by: Paul Scott * go.mod,Dockerfile: bump to Go 1.24 Also bump golangci-lint to a version that was built with 1.24 Updates #15015 Signed-off-by: Andrew Lytvynov --------- Signed-off-by: Brad Fitzpatrick Signed-off-by: Paul Scott Signed-off-by: Andrew Lytvynov Co-authored-by: Paul Scott Co-authored-by: Andrew Lytvynov --- .github/workflows/golangci-lint.yml | 2 +- Dockerfile | 2 +- cmd/derper/depaware.txt | 82 ++++++++++++++++++---------- cmd/k8s-operator/depaware.txt | 78 ++++++++++++++++++--------- cmd/stund/depaware.txt | 84 +++++++++++++++++++---------- cmd/tailscale/depaware.txt | 77 +++++++++++++++++--------- cmd/tailscaled/depaware.txt | 77 +++++++++++++++++--------- cmd/testwrapper/testwrapper.go | 53 ++++++++++-------- cmd/testwrapper/testwrapper_test.go | 15 +++--- cmd/tsconnect/common.go | 4 ++ go.mod | 2 +- go.toolchain.branch | 2 +- go.toolchain.rev | 2 +- 13 files changed, 312 insertions(+), 168 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 176ee5f02..3ee6287b9 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -33,7 +33,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@2e788936b09dd82dc280e845628a40d2ba6b204c # v6.3.1 with: - version: v1.60 + version: v1.64 # Show only new issues if it's a pull request. only-new-issues: true diff --git a/Dockerfile b/Dockerfile index 4ad3d88d9..32cb92ab0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ # $ docker exec tailscaled tailscale status -FROM golang:1.23-alpine AS build-env +FROM golang:1.24-alpine AS build-env WORKDIR /go/src/tailscale diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index e9df49b72..1812a1a8d 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -191,13 +191,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ W golang.org/x/exp/constraints from tailscale.com/util/winutil golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/net/bpf from github.com/mdlayher/netlink+ @@ -230,7 +228,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/ecdsa+ + crypto/aes from crypto/internal/hpke+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -239,31 +237,58 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ - crypto/internal/alias from crypto/aes+ - crypto/internal/bigmod from crypto/ecdsa+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/edwards25519 from crypto/ed25519 - crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ crypto/internal/hpke from crypto/tls - crypto/internal/mlkem768 from crypto/tls - crypto/internal/nistec from crypto/ecdh+ - crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/impl from crypto/internal/fips140/aes+ crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ - crypto/subtle from crypto/aes+ + crypto/subtle from crypto/cipher+ crypto/tls from golang.org/x/crypto/acme+ + crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - embed from crypto/internal/nistec+ + embed from google.golang.org/protobuf/internal/editiondefaults+ encoding from encoding/json+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ @@ -284,23 +309,22 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa html from net/http/pprof+ html/template from tailscale.com/cmd/derper internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall + internal/asan from syscall+ internal/bisect from internal/godebug internal/bytealg from bytes+ - internal/byteorder from crypto/aes+ + internal/byteorder from crypto/cipher+ internal/chacha8rand from math/rand/v2+ - internal/concurrent from unique internal/coverage/rtcov from runtime - internal/cpu from crypto/aes+ + internal/cpu from crypto/internal/fips140deps/cpu+ internal/filepathlite from os+ internal/fmtsort from fmt+ - internal/goarch from crypto/aes+ + internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/tls+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime + internal/goexperiment from runtime+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall + internal/msan from syscall+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ @@ -310,17 +334,20 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/singleflight from net internal/stringslite from embed+ + internal/sync from sync+ internal/syscall/execenv from os+ - LD internal/syscall/unix from crypto/rand+ - W internal/syscall/windows from crypto/rand+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os internal/unsafeheader from internal/reflectlite+ - internal/weak from unique io from bufio+ io/fs from crypto/x509+ L io/ioutil from github.com/mitchellh/go-ps+ @@ -332,7 +359,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/mdlayher/netlink+ - math/rand/v2 from internal/concurrent+ + math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from net/http mime/quotedprintable from mime/multipart @@ -345,7 +372,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ - os from crypto/rand+ + os from crypto/internal/sysrand+ os/exec from github.com/coreos/go-iptables/iptables+ os/signal from tailscale.com/cmd/derper W os/user from tailscale.com/util/winutil+ @@ -354,10 +381,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa reflect from crypto/x509+ regexp from github.com/coreos/go-iptables/iptables+ regexp/syntax from regexp - runtime from crypto/internal/nistec+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/prometheus/client_golang/prometheus+ - runtime/internal/math from runtime - runtime/internal/sys from runtime runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof runtime/trace from net/http/pprof @@ -367,7 +392,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa strings from bufio+ sync from compress/flate+ sync/atomic from context+ - syscall from crypto/rand+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof text/template from html/template text/template/parse from html/template+ @@ -377,3 +402,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ + weak from unique diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 520595bf6..54d9bd248 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -997,14 +997,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ - golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ @@ -1055,7 +1054,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/ecdsa+ + crypto/aes from crypto/internal/hpke+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -1064,27 +1063,54 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ - crypto/internal/alias from crypto/aes+ - crypto/internal/bigmod from crypto/ecdsa+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/edwards25519 from crypto/ed25519 - crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ crypto/internal/hpke from crypto/tls - crypto/internal/mlkem768 from crypto/tls - crypto/internal/nistec from crypto/ecdh+ - crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/impl from crypto/internal/fips140/aes+ crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ - crypto/subtle from crypto/aes+ + crypto/subtle from crypto/cipher+ crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ @@ -1092,7 +1118,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ database/sql/driver from database/sql+ W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe - embed from crypto/internal/nistec+ + embed from github.com/tailscale/web-client-prebuilt+ encoding from encoding/gob+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ @@ -1112,7 +1138,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime go/doc/comment from go/doc - go/internal/typeparams from go/parser go/parser from k8s.io/apimachinery/pkg/runtime go/scanner from go/ast+ go/token from go/ast+ @@ -1124,24 +1149,23 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ html from html/template+ html/template from github.com/gorilla/csrf internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall + internal/asan from syscall+ internal/bisect from internal/godebug internal/bytealg from bytes+ - internal/byteorder from crypto/aes+ + internal/byteorder from crypto/cipher+ internal/chacha8rand from math/rand/v2+ - internal/concurrent from unique internal/coverage/rtcov from runtime - internal/cpu from crypto/aes+ + internal/cpu from crypto/internal/fips140deps/cpu+ internal/filepathlite from os+ internal/fmtsort from fmt+ - internal/goarch from crypto/aes+ + internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime + internal/goexperiment from runtime+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ internal/lazyregexp from go/doc - internal/msan from syscall + internal/msan from syscall+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ @@ -1151,18 +1175,21 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ + internal/sync from sync+ internal/syscall/execenv from os+ - LD internal/syscall/unix from crypto/rand+ - W internal/syscall/windows from crypto/rand+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os internal/unsafeheader from internal/reflectlite+ - internal/weak from unique io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ @@ -1191,7 +1218,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ net/netip from github.com/gaissmai/bart+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ net/url from crypto/x509+ - os from crypto/rand+ + os from crypto/internal/sysrand+ os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals os/user from archive/tar+ @@ -1202,8 +1229,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ - runtime/internal/math from runtime - runtime/internal/sys from runtime runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof @@ -1223,3 +1248,4 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ + weak from unique diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index c553b9be5..1d0a093c4 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -88,13 +88,11 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http golang.org/x/net/http/httpproxy from net/http @@ -116,7 +114,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/ecdsa+ + crypto/aes from crypto/internal/hpke+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -124,32 +122,59 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/hmac from crypto/tls+ - crypto/internal/alias from crypto/aes+ - crypto/internal/bigmod from crypto/ecdsa+ + crypto/hmac from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/edwards25519 from crypto/ed25519 - crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ crypto/internal/hpke from crypto/tls - crypto/internal/mlkem768 from crypto/tls - crypto/internal/nistec from crypto/ecdh+ - crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/impl from crypto/internal/fips140/aes+ crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ - crypto/subtle from crypto/aes+ + crypto/subtle from crypto/cipher+ crypto/tls from net/http+ + crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509 - embed from crypto/internal/nistec+ + embed from google.golang.org/protobuf/internal/editiondefaults+ encoding from encoding/json+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/go-json-experiment/json @@ -169,23 +194,22 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar hash/maphash from go4.org/mem html from net/http/pprof+ internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall + internal/asan from syscall+ internal/bisect from internal/godebug internal/bytealg from bytes+ - internal/byteorder from crypto/aes+ + internal/byteorder from crypto/cipher+ internal/chacha8rand from math/rand/v2+ - internal/concurrent from unique internal/coverage/rtcov from runtime - internal/cpu from crypto/aes+ + internal/cpu from crypto/internal/fips140deps/cpu+ internal/filepathlite from os+ internal/fmtsort from fmt - internal/goarch from crypto/aes+ + internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/tls+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime + internal/goexperiment from runtime+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall + internal/msan from syscall+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ @@ -195,17 +219,20 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/singleflight from net internal/stringslite from embed+ + internal/sync from sync+ internal/syscall/execenv from os - LD internal/syscall/unix from crypto/rand+ - W internal/syscall/windows from crypto/rand+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os internal/unsafeheader from internal/reflectlite+ - internal/weak from unique io from bufio+ io/fs from crypto/x509+ iter from maps+ @@ -216,7 +243,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from math/big+ - math/rand/v2 from internal/concurrent+ + math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from net/http mime/quotedprintable from mime/multipart @@ -229,17 +256,15 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ - os from crypto/rand+ + os from crypto/internal/sysrand+ os/signal from tailscale.com/cmd/stund path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp - runtime from crypto/internal/nistec+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/prometheus/client_golang/prometheus+ - runtime/internal/math from runtime - runtime/internal/sys from runtime runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof runtime/trace from net/http/pprof @@ -249,7 +274,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar strings from bufio+ sync from compress/flate+ sync/atomic from context+ - syscall from crypto/rand+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof time from compress/gzip+ unicode from bytes+ @@ -257,3 +282,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ + weak from unique diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8c972aa69..afe62165c 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -195,14 +195,13 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ W golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ @@ -246,7 +245,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/ecdsa+ + crypto/aes from crypto/internal/hpke+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -255,34 +254,61 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ - crypto/internal/alias from crypto/aes+ - crypto/internal/bigmod from crypto/ecdsa+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/edwards25519 from crypto/ed25519 - crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ crypto/internal/hpke from crypto/tls - crypto/internal/mlkem768 from crypto/tls - crypto/internal/nistec from crypto/ecdh+ - crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/impl from crypto/internal/fips140/aes+ crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ - crypto/subtle from crypto/aes+ + crypto/subtle from crypto/cipher+ crypto/tls from github.com/miekg/dns+ + crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe - embed from crypto/internal/nistec+ + embed from github.com/peterbourgon/ff/v3+ encoding from encoding/gob+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ @@ -307,23 +333,22 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall + internal/asan from syscall+ internal/bisect from internal/godebug internal/bytealg from bytes+ - internal/byteorder from crypto/aes+ + internal/byteorder from crypto/cipher+ internal/chacha8rand from math/rand/v2+ - internal/concurrent from unique internal/coverage/rtcov from runtime - internal/cpu from crypto/aes+ + internal/cpu from crypto/internal/fips140deps/cpu+ internal/filepathlite from os+ internal/fmtsort from fmt+ - internal/goarch from crypto/aes+ + internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime + internal/goexperiment from runtime+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall + internal/msan from syscall+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ @@ -332,18 +357,21 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ + internal/sync from sync+ internal/syscall/execenv from os+ - LD internal/syscall/unix from crypto/rand+ - W internal/syscall/windows from crypto/rand+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os internal/unsafeheader from internal/reflectlite+ - internal/weak from unique io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/mitchellh/go-ps+ @@ -369,7 +397,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ - os from crypto/rand+ + os from crypto/internal/sysrand+ os/exec from github.com/coreos/go-iptables/iptables+ os/signal from tailscale.com/cmd/tailscale/cli os/user from archive/tar+ @@ -380,8 +408,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ - runtime/internal/math from runtime - runtime/internal/sys from runtime slices from tailscale.com/client/web+ sort from compress/flate+ strconv from archive/tar+ @@ -398,3 +424,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ + weak from unique diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 594ebeb17..c0f592ea1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -449,14 +449,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ - golang.org/x/crypto/hkdf from crypto/tls+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ @@ -504,7 +503,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/ecdsa+ + crypto/aes from crypto/internal/hpke+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -513,34 +512,61 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/hmac from crypto/tls+ - crypto/internal/alias from crypto/aes+ - crypto/internal/bigmod from crypto/ecdsa+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/edwards25519 from crypto/ed25519 - crypto/internal/edwards25519/field from crypto/ecdh+ + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ crypto/internal/hpke from crypto/tls - crypto/internal/mlkem768 from crypto/tls - crypto/internal/nistec from crypto/ecdh+ - crypto/internal/nistec/fiat from crypto/internal/nistec + crypto/internal/impl from crypto/internal/fips140/aes+ crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ - crypto/subtle from crypto/aes+ + crypto/subtle from crypto/cipher+ crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe - embed from crypto/internal/nistec+ + embed from github.com/tailscale/web-client-prebuilt+ encoding from encoding/gob+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ @@ -562,23 +588,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de html from html/template+ html/template from github.com/gorilla/csrf internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall + internal/asan from syscall+ internal/bisect from internal/godebug internal/bytealg from bytes+ - internal/byteorder from crypto/aes+ + internal/byteorder from crypto/cipher+ internal/chacha8rand from math/rand/v2+ - internal/concurrent from unique internal/coverage/rtcov from runtime - internal/cpu from crypto/aes+ + internal/cpu from crypto/internal/fips140deps/cpu+ internal/filepathlite from os+ internal/fmtsort from fmt+ - internal/goarch from crypto/aes+ + internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime + internal/goexperiment from runtime+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall + internal/msan from syscall+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ @@ -588,18 +613,21 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ + internal/sync from sync+ internal/syscall/execenv from os+ - LD internal/syscall/unix from crypto/rand+ - W internal/syscall/windows from crypto/rand+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os internal/unsafeheader from internal/reflectlite+ - internal/weak from unique io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ @@ -626,7 +654,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/netip from github.com/tailscale/wireguard-go/conn+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ net/url from crypto/x509+ - os from crypto/rand+ + os from crypto/internal/sysrand+ os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ os/signal from tailscale.com/cmd/tailscaled os/user from archive/tar+ @@ -637,8 +665,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de regexp/syntax from regexp runtime from archive/tar+ runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ - runtime/internal/math from runtime - runtime/internal/sys from runtime runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from tailscale.com/appc+ @@ -657,3 +683,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ + weak from unique diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 67b8a1483..1df1ef11f 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -10,6 +10,7 @@ package main import ( "bufio" "bytes" + "cmp" "context" "encoding/json" "errors" @@ -59,11 +60,12 @@ type packageTests struct { } type goTestOutput struct { - Time time.Time - Action string - Package string - Test string - Output string + Time time.Time + Action string + ImportPath string + Package string + Test string + Output string } var debug = os.Getenv("TS_TESTWRAPPER_DEBUG") != "" @@ -111,42 +113,43 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te for s.Scan() { var goOutput goTestOutput if err := json.Unmarshal(s.Bytes(), &goOutput); err != nil { - if errors.Is(err, io.EOF) || errors.Is(err, os.ErrClosed) { - break - } - - // `go test -json` outputs invalid JSON when a build fails. - // In that case, discard the the output and start reading again. - // The build error will be printed to stderr. - // See: https://github.com/golang/go/issues/35169 - if _, ok := err.(*json.SyntaxError); ok { - fmt.Println(s.Text()) - continue - } - panic(err) + return fmt.Errorf("failed to parse go test output %q: %w", s.Bytes(), err) } - pkg := goOutput.Package + pkg := cmp.Or( + goOutput.Package, + "build:"+goOutput.ImportPath, // can be "./cmd" while Package is "tailscale.com/cmd" so use separate namespace + ) pkgTests := resultMap[pkg] if pkgTests == nil { - pkgTests = make(map[string]*testAttempt) + pkgTests = map[string]*testAttempt{ + "": {}, // Used for start time and build logs. + } resultMap[pkg] = pkgTests } if goOutput.Test == "" { switch goOutput.Action { case "start": - pkgTests[""] = &testAttempt{start: goOutput.Time} - case "fail", "pass", "skip": + pkgTests[""].start = goOutput.Time + case "build-output": + pkgTests[""].logs.WriteString(goOutput.Output) + case "build-fail", "fail", "pass", "skip": for _, test := range pkgTests { if test.testName != "" && test.outcome == "" { test.outcome = "fail" ch <- test } } + outcome := goOutput.Action + if outcome == "build-fail" { + outcome = "FAIL" + } + pkgTests[""].logs.WriteString(goOutput.Output) ch <- &testAttempt{ pkg: goOutput.Package, - outcome: goOutput.Action, + outcome: outcome, start: pkgTests[""].start, end: goOutput.Time, + logs: pkgTests[""].logs, pkgFinished: true, } } @@ -215,6 +218,9 @@ func main() { } toRun := []*nextRun{firstRun} printPkgOutcome := func(pkg, outcome string, attempt int, runtime time.Duration) { + if pkg == "" { + return // We reach this path on a build error. + } if outcome == "skip" { fmt.Printf("?\t%s [skipped/no tests] \n", pkg) return @@ -270,6 +276,7 @@ func main() { // when a package times out. failed = true } + os.Stdout.ReadFrom(&tr.logs) printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start)) continue } diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index fb2ed2c52..ace53ccd0 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -11,6 +11,7 @@ import ( "os/exec" "path/filepath" "regexp" + "strings" "sync" "testing" ) @@ -154,24 +155,24 @@ func TestBuildError(t *testing.T) { t.Fatalf("writing package: %s", err) } - buildErr := []byte("builderror_test.go:3:1: expected declaration, found derp\nFAIL command-line-arguments [setup failed]") + wantErr := "builderror_test.go:3:1: expected declaration, found derp\nFAIL" // Confirm `go test` exits with code 1. goOut, err := exec.Command("go", "test", testfile).CombinedOutput() if code, ok := errExitCode(err); !ok || code != 1 { - t.Fatalf("go test %s: expected error with exit code 0 but got: %v", testfile, err) + t.Fatalf("go test %s: got exit code %d, want 1 (err: %v)", testfile, code, err) } - if !bytes.Contains(goOut, buildErr) { - t.Fatalf("go test %s: expected build error containing %q but got:\n%s", testfile, buildErr, goOut) + if !strings.Contains(string(goOut), wantErr) { + t.Fatalf("go test %s: got output %q, want output containing %q", testfile, goOut, wantErr) } // Confirm `testwrapper` exits with code 1. twOut, err := cmdTestwrapper(t, testfile).CombinedOutput() if code, ok := errExitCode(err); !ok || code != 1 { - t.Fatalf("testwrapper %s: expected error with exit code 0 but got: %v", testfile, err) + t.Fatalf("testwrapper %s: got exit code %d, want 1 (err: %v)", testfile, code, err) } - if !bytes.Contains(twOut, buildErr) { - t.Fatalf("testwrapper %s: expected build error containing %q but got:\n%s", testfile, buildErr, twOut) + if !strings.Contains(string(twOut), wantErr) { + t.Fatalf("testwrapper %s: got output %q, want output containing %q", testfile, twOut, wantErr) } if testing.Verbose() { diff --git a/cmd/tsconnect/common.go b/cmd/tsconnect/common.go index 0b0813226..ff10e4efb 100644 --- a/cmd/tsconnect/common.go +++ b/cmd/tsconnect/common.go @@ -176,6 +176,10 @@ func runEsbuild(buildOptions esbuild.BuildOptions) esbuild.BuildResult { // wasm_exec.js runtime helper library from the Go toolchain. func setupEsbuildWasmExecJS(build esbuild.PluginBuild) { wasmExecSrcPath := filepath.Join(runtime.GOROOT(), "misc", "wasm", "wasm_exec.js") + if _, err := os.Stat(wasmExecSrcPath); os.IsNotExist(err) { + // Go 1.24+ location: + wasmExecSrcPath = filepath.Join(runtime.GOROOT(), "lib", "wasm", "wasm_exec.js") + } build.OnResolve(esbuild.OnResolveOptions{ Filter: "./wasm_exec$", }, func(args esbuild.OnResolveArgs) (esbuild.OnResolveResult, error) { diff --git a/go.mod b/go.mod index c926e8428..5aeefc9c9 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.23.6 +go 1.24.0 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.branch b/go.toolchain.branch index 47469a20a..5e1cd0620 100644 --- a/go.toolchain.branch +++ b/go.toolchain.branch @@ -1 +1 @@ -tailscale.go1.23 +tailscale.go1.24 diff --git a/go.toolchain.rev b/go.toolchain.rev index 963e8a28e..aa4153ac4 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -65c3f5f3fc9d96f56a37a79cad4ebbd7ff985801 +a529f1c329a97596448310cd52ab64047294b9d5 From 2c3338c46be073c2d50da6ab7ef187cb3707419e Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 19 Feb 2025 17:19:54 -0800 Subject: [PATCH 0523/1708] client/tailscale: fix Client.BuildURL and Client.BuildTailnetURL (#15064) This method uses `path.Join` to build the URL. Turns out with 1.24 this started stripping consecutive "/" characters, so "http://..." in baseURL becomes "http:/...". Also, `c.Tailnet` is a function that returns `c.tailnet`. Using it as a path element would encode as a pointer instead of the tailnet name. Finally, provide a way to prevent escaping of path elements e.g. for `?` in `acl?details=1`. Updates #15015 Signed-off-by: Andrew Lytvynov --- client/tailscale/acl.go | 9 ++-- client/tailscale/tailscale.go | 36 ++++++++----- client/tailscale/tailscale_test.go | 86 ++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 17 deletions(-) create mode 100644 client/tailscale/tailscale_test.go diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go index bef80d241..929ec2b3b 100644 --- a/client/tailscale/acl.go +++ b/client/tailscale/acl.go @@ -12,6 +12,7 @@ import ( "fmt" "net/http" "net/netip" + "net/url" ) // ACLRow defines a rule that grants access by a set of users or groups to a set @@ -126,7 +127,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) { } }() - path := c.BuildTailnetURL("acl?details=1") + path := c.BuildTailnetURL("acl", url.Values{"details": {"1"}}) req, err := http.NewRequestWithContext(ctx, "GET", path, nil) if err != nil { return nil, err @@ -146,7 +147,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) { Warnings []string `json:"warnings"` }{} if err := json.Unmarshal(b, &data); err != nil { - return nil, err + return nil, fmt.Errorf("json.Unmarshal %q: %w", b, err) } acl = &ACLHuJSON{ @@ -328,7 +329,7 @@ type ACLPreview struct { } func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, previewType string, previewFor string) (res *ACLPreviewResponse, err error) { - path := c.BuildTailnetURL("acl/preview") + path := c.BuildTailnetURL("acl", "preview") req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body)) if err != nil { return nil, err @@ -488,7 +489,7 @@ func (c *Client) ValidateACLJSON(ctx context.Context, source, dest string) (test return nil, err } - path := c.BuildTailnetURL("acl/validate") + path := c.BuildTailnetURL("acl", "validate") req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(postData)) if err != nil { return nil, err diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index f273023eb..4c6273c89 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -66,31 +66,41 @@ func (c *Client) httpClient() *http.Client { } // BuildURL builds a url to http(s):///api/v2/ -// using the given pathElements. It url escapes each path element, so the caller -// doesn't need to worry about that. +// using the given pathElements. It url escapes each path element, so the +// caller doesn't need to worry about that. The last item of pathElements can +// be of type url.Values to add a query string to the URL. // // For example, BuildURL(devices, 5) with the default server URL would result in // https://api.tailscale.com/api/v2/devices/5. func (c *Client) BuildURL(pathElements ...any) string { - elem := make([]string, 2, len(pathElements)+1) - elem[0] = c.baseURL() - elem[1] = "/api/v2" - for _, pathElement := range pathElements { - elem = append(elem, url.PathEscape(fmt.Sprint(pathElement))) + elem := make([]string, 1, len(pathElements)+1) + elem[0] = "/api/v2" + var query string + for i, pathElement := range pathElements { + if uv, ok := pathElement.(url.Values); ok && i == len(pathElements)-1 { + query = uv.Encode() + } else { + elem = append(elem, url.PathEscape(fmt.Sprint(pathElement))) + } } - return path.Join(elem...) + url := c.baseURL() + path.Join(elem...) + if query != "" { + url += "?" + query + } + return url } // BuildTailnetURL builds a url to http(s):///api/v2/tailnet// -// using the given pathElements. It url escapes each path element, so the -// caller doesn't need to worry about that. +// using the given pathElements. It url escapes each path element, so the +// caller doesn't need to worry about that. The last item of pathElements can +// be of type url.Values to add a query string to the URL. // // For example, BuildTailnetURL(policy, validate) with the default server URL and a tailnet of "example.com" // would result in https://api.tailscale.com/api/v2/tailnet/example.com/policy/validate. func (c *Client) BuildTailnetURL(pathElements ...any) string { - allElements := make([]any, 3, len(pathElements)+2) + allElements := make([]any, 2, len(pathElements)+2) allElements[0] = "tailnet" - allElements[1] = c.Tailnet + allElements[1] = c.tailnet allElements = append(allElements, pathElements...) return c.BuildURL(allElements...) } @@ -189,7 +199,7 @@ func (e ErrResponse) Error() string { func HandleErrorResponse(b []byte, resp *http.Response) error { var errResp ErrResponse if err := json.Unmarshal(b, &errResp); err != nil { - return err + return fmt.Errorf("json.Unmarshal %q: %w", b, err) } errResp.Status = resp.StatusCode return errResp diff --git a/client/tailscale/tailscale_test.go b/client/tailscale/tailscale_test.go new file mode 100644 index 000000000..67379293b --- /dev/null +++ b/client/tailscale/tailscale_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "net/url" + "testing" +) + +func TestClientBuildURL(t *testing.T) { + c := Client{BaseURL: "http://127.0.0.1:1234"} + for _, tt := range []struct { + desc string + elements []any + want string + }{ + { + desc: "single-element", + elements: []any{"devices"}, + want: "http://127.0.0.1:1234/api/v2/devices", + }, + { + desc: "multiple-elements", + elements: []any{"tailnet", "example.com"}, + want: "http://127.0.0.1:1234/api/v2/tailnet/example.com", + }, + { + desc: "escape-element", + elements: []any{"tailnet", "example dot com?foo=bar"}, + want: `http://127.0.0.1:1234/api/v2/tailnet/example%20dot%20com%3Ffoo=bar`, + }, + { + desc: "url.Values", + elements: []any{"tailnet", "example.com", "acl", url.Values{"details": {"1"}}}, + want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/acl?details=1`, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + got := c.BuildURL(tt.elements...) + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} + +func TestClientBuildTailnetURL(t *testing.T) { + c := Client{ + BaseURL: "http://127.0.0.1:1234", + tailnet: "example.com", + } + for _, tt := range []struct { + desc string + elements []any + want string + }{ + { + desc: "single-element", + elements: []any{"devices"}, + want: "http://127.0.0.1:1234/api/v2/tailnet/example.com/devices", + }, + { + desc: "multiple-elements", + elements: []any{"devices", 123}, + want: "http://127.0.0.1:1234/api/v2/tailnet/example.com/devices/123", + }, + { + desc: "escape-element", + elements: []any{"foo bar?baz=qux"}, + want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/foo%20bar%3Fbaz=qux`, + }, + { + desc: "url.Values", + elements: []any{"acl", url.Values{"details": {"1"}}}, + want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/acl?details=1`, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + got := c.BuildTailnetURL(tt.elements...) + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} From 074372d6c5fdcfc81a15b70df883a4f74525f8b3 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 20 Feb 2025 18:22:08 +0000 Subject: [PATCH 0524/1708] scripts/installer.sh: add SparkyLinux as a Debian derivative (#15076) Fixes #15075 Signed-off-by: Erisa A --- scripts/installer.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 22ba12b6b..3bd392b93 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -186,6 +186,12 @@ main() { VERSION="$DEBIAN_CODENAME" fi ;; + sparky) + OS="debian" + PACKAGETYPE="apt" + VERSION="$DEBIAN_CODENAME" + APT_KEY_TYPE="keyring" + ;; centos) OS="$ID" VERSION="$VERSION_ID" From dcd7cd3c6af4c6dfae2a6d491e37b5cc60f54482 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 20 Feb 2025 15:55:42 -0800 Subject: [PATCH 0525/1708] client/systray: show message on localapi permission error When LocalAPI returns an AccessDeniedError, display a message in the menu and hide or disable most other menu items. This currently includes a placeholder KB link which I'll update if we end up using something different. I debated whether to change the app icon to indicate an error, but opted not to since there is actually nothing wrong with the client itself and Tailscale will continue to function normally. It's just that the systray app itself is in a read-only state. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 64 ++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index ac64b9958..b5bde551c 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -72,6 +72,11 @@ type Menu struct { curProfile ipn.LoginProfile allProfiles []ipn.LoginProfile + // readonly is whether the systray app is running in read-only mode. + // This is set if LocalAPI returns a permission error, + // typically because the user needs to run `tailscale set --operator=$USER`. + readonly bool + bgCtx context.Context // ctx for background tasks not involving menu item clicks bgCancel context.CancelFunc @@ -153,6 +158,8 @@ func (menu *Menu) updateState() { defer menu.mu.Unlock() menu.init() + menu.readonly = false + var err error menu.status, err = menu.lc.Status(menu.bgCtx) if err != nil { @@ -160,6 +167,9 @@ func (menu *Menu) updateState() { } menu.curProfile, menu.allProfiles, err = menu.lc.ProfileStatus(menu.bgCtx) if err != nil { + if local.IsAccessDeniedError(err) { + menu.readonly = true + } log.Print(err) } } @@ -182,6 +192,15 @@ func (menu *Menu) rebuild() { systray.ResetMenu() + if menu.readonly { + const readonlyMsg = "No permission to manage Tailscale.\nSee tailscale.com/s/cli-operator" + m := systray.AddMenuItem(readonlyMsg, "") + onClick(ctx, m, func(_ context.Context) { + webbrowser.Open("https://tailscale.com/s/cli-operator") + }) + systray.AddSeparator() + } + menu.connect = systray.AddMenuItem("Connect", "") menu.disconnect = systray.AddMenuItem("Disconnect", "") menu.disconnect.Hide() @@ -222,28 +241,35 @@ func (menu *Menu) rebuild() { setAppIcon(disconnected) } + if menu.readonly { + menu.connect.Disable() + menu.disconnect.Disable() + } + account := "Account" if pt := profileTitle(menu.curProfile); pt != "" { account = pt } - accounts := systray.AddMenuItem(account, "") - setRemoteIcon(accounts, menu.curProfile.UserProfile.ProfilePicURL) - time.Sleep(newMenuDelay) - for _, profile := range menu.allProfiles { - title := profileTitle(profile) - var item *systray.MenuItem - if profile.ID == menu.curProfile.ID { - item = accounts.AddSubMenuItemCheckbox(title, "", true) - } else { - item = accounts.AddSubMenuItem(title, "") - } - setRemoteIcon(item, profile.UserProfile.ProfilePicURL) - onClick(ctx, item, func(ctx context.Context) { - select { - case <-ctx.Done(): - case menu.accountsCh <- profile.ID: + if !menu.readonly { + accounts := systray.AddMenuItem(account, "") + setRemoteIcon(accounts, menu.curProfile.UserProfile.ProfilePicURL) + time.Sleep(newMenuDelay) + for _, profile := range menu.allProfiles { + title := profileTitle(profile) + var item *systray.MenuItem + if profile.ID == menu.curProfile.ID { + item = accounts.AddSubMenuItemCheckbox(title, "", true) + } else { + item = accounts.AddSubMenuItem(title, "") } - }) + setRemoteIcon(item, profile.UserProfile.ProfilePicURL) + onClick(ctx, item, func(ctx context.Context) { + select { + case <-ctx.Done(): + case menu.accountsCh <- profile.ID: + } + }) + } } if menu.status != nil && menu.status.Self != nil && len(menu.status.Self.TailscaleIPs) > 0 { @@ -255,7 +281,9 @@ func (menu *Menu) rebuild() { } systray.AddSeparator() - menu.rebuildExitNodeMenu(ctx) + if !menu.readonly { + menu.rebuildExitNodeMenu(ctx) + } if menu.status != nil { menu.more = systray.AddMenuItem("More settings", "") From f5997b3c570481279990a9dc2cfd75c2f64da63e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 20 Feb 2025 08:14:49 -0800 Subject: [PATCH 0526/1708] go.toolchain.rev: bump Tailscale Go 1.24 for a Tailscale revert + upstream bump Diff: https://github.com/tailscale/go/commit/7c083839130f799407787d693dc95af44190cc83 This reverts our previous CGO_ENABLED change: https://github.com/tailscale/go/commit/c1d3e9e81461bf43e010bd5f79315b715ed68023 It was causing depaware problems and is no longer necessary it seems? Upstream cmd/go is static nowadays. And pulls in: [release-branch.go1.24] doc/godebug: mention GODEBUG=fips140 [release-branch.go1.24] cmd/compile: avoid infinite recursion when inlining closures [release-branch.go1.24] syscall: don't truncate newly created files on Windows [release-branch.go1.24] runtime: fix usleep on s390x/linux [release-branch.go1.24] runtime: add some linknames back for `github.com/bytedance/sonic` Of those, really the only the 2nd and 3rd might affect us. Updates #15015 Updates tailscale/go#52 Change-Id: I0fa479f8b2d39f43f2dcdff6c28289dbe50b0773 Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index aa4153ac4..ddbabb3eb 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -a529f1c329a97596448310cd52ab64047294b9d5 +2b494987ff3c1a6a26e10570c490394ff0a77aa4 From 781c1e96247532256974e864c5ba6e653e103b9d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 24 Feb 2025 12:14:14 -0800 Subject: [PATCH 0527/1708] tstest/deptest: add DepChecker.ExtraEnv option for callers to set For tests (in another repo) that use cgo, we'd like to set CGO_ENABLED=1 explicitly when evaluating cross-compiled deps with "go list". Updates tailscale/corp#26717 Updates tailscale/corp#26737 Change-Id: Ic21a54379ae91688d2456985068a47e73d04a645 Signed-off-by: Brad Fitzpatrick --- tstest/deptest/deptest.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 2393733e6..4effd4a78 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -27,6 +27,7 @@ type DepChecker struct { BadDeps map[string]string // package => why WantDeps set.Set[string] // packages expected Tags string // comma-separated + ExtraEnv []string // extra environment for "go list" (e.g. CGO_ENABLED=1) } func (c DepChecker) Check(t *testing.T) { @@ -43,6 +44,7 @@ func (c DepChecker) Check(t *testing.T) { if c.GOARCH != "" { extraEnv = append(extraEnv, "GOARCH="+c.GOARCH) } + extraEnv = append(extraEnv, c.ExtraEnv...) cmd.Env = append(os.Environ(), extraEnv...) out, err := cmd.Output() if err != nil { From d1b0e1af06d9c44c71413410dcf2527f349d4767 Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Mon, 24 Feb 2025 13:26:41 -0800 Subject: [PATCH 0528/1708] cmd/testwrapper/flakytest: add Marked to check if in flakytest (#15119) Updates tailscale/corp#26637 Signed-off-by: Paul Scott --- cmd/testwrapper/flakytest/flakytest.go | 29 +++++++++++++ cmd/testwrapper/flakytest/flakytest_test.go | 46 +++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index 494ed080b..6302900cb 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -9,8 +9,12 @@ package flakytest import ( "fmt" "os" + "path" "regexp" + "sync" "testing" + + "tailscale.com/util/mak" ) // FlakyTestLogMessage is a sentinel value that is printed to stderr when a @@ -25,6 +29,11 @@ const FlakeAttemptEnv = "TS_TESTWRAPPER_ATTEMPT" var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/tailscale/[a-zA-Z0-9_.-]+/issues/\d+\z`) +var ( + rootFlakesMu sync.Mutex + rootFlakes map[string]bool +) + // Mark sets the current test as a flaky test, such that if it fails, it will // be retried a few times on failure. issue must be a GitHub issue that tracks // the status of the flaky test being marked, of the format: @@ -41,4 +50,24 @@ func Mark(t testing.TB, issue string) { fmt.Fprintf(os.Stderr, "%s: %s\n", FlakyTestLogMessage, issue) } t.Logf("flakytest: issue tracking this flaky test: %s", issue) + + // Record the root test name as flakey. + rootFlakesMu.Lock() + defer rootFlakesMu.Unlock() + mak.Set(&rootFlakes, t.Name(), true) +} + +// Marked reports whether the current test or one of its parents was marked flaky. +func Marked(t testing.TB) bool { + n := t.Name() + for { + if rootFlakes[n] { + return true + } + n = path.Dir(n) + if n == "." || n == "/" { + break + } + } + return false } diff --git a/cmd/testwrapper/flakytest/flakytest_test.go b/cmd/testwrapper/flakytest/flakytest_test.go index 85e77a939..64cbfd9a3 100644 --- a/cmd/testwrapper/flakytest/flakytest_test.go +++ b/cmd/testwrapper/flakytest/flakytest_test.go @@ -41,3 +41,49 @@ func TestFlakeRun(t *testing.T) { t.Fatal("First run in testwrapper, failing so that test is retried. This is expected.") } } + +func TestMarked_Root(t *testing.T) { + Mark(t, "https://github.com/tailscale/tailscale/issues/0") + + t.Run("child", func(t *testing.T) { + t.Run("grandchild", func(t *testing.T) { + if got, want := Marked(t), true; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } + }) + + if got, want := Marked(t), true; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } + }) + + if got, want := Marked(t), true; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } +} + +func TestMarked_Subtest(t *testing.T) { + t.Run("flaky", func(t *testing.T) { + Mark(t, "https://github.com/tailscale/tailscale/issues/0") + + t.Run("child", func(t *testing.T) { + t.Run("grandchild", func(t *testing.T) { + if got, want := Marked(t), true; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } + }) + + if got, want := Marked(t), true; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } + }) + + if got, want := Marked(t), true; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } + }) + + if got, want := Marked(t), false; got != want { + t.Fatalf("Marked(t) = %t, want %t", got, want) + } +} From 8d7033fe7f58ef4177df9903d664077ae6521682 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 24 Feb 2025 14:50:04 -0600 Subject: [PATCH 0529/1708] ipn/ipnlocal,util/syspolicy,docs/windows/policy: implement the ReconnectAfter policy setting In this PR, we update the LocalBackend so that when the ReconnectAfter policy setting is configured and a user disconnects Tailscale by setting WantRunning to false in the profile prefs, the LocalBackend will now start a timer to set WantRunning back to true once the ReconnectAfter timer expires. We also update the ADMX/ADML policy definitions to allow configuring this policy setting for Windows via Group Policy and Intune. Updates #14824 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 14 +++++ docs/windows/policy/tailscale.admx | 7 +++ ipn/ipnlocal/local.go | 72 +++++++++++++++++++++++- util/syspolicy/policy_keys.go | 7 +++ 4 files changed, 97 insertions(+), 3 deletions(-) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 8c375ca45..eb6a520d1 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -109,6 +109,14 @@ If you enable this policy setting, users will not be allowed to disconnect Tails If necessary, it can be used along with Unattended Mode to keep Tailscale connected regardless of whether a user is logged in. This can be used to facilitate remote access to a device or ensure connectivity to a Domain Controller before a user logs in. If you disable or don't configure this policy setting, users will be allowed to disconnect Tailscale at their will.]]>
+ Configure automatic reconnect delay + Allow Local Network Access when an Exit Node is in use The options below allow configuring exceptions where disconnecting Tailscale is permitted. Disconnects with reason: + + The delay must be a valid Go duration string, such as 30s, 5m, or 1h30m, all without spaces or any other symbols. + + + + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 6a1ebc666..0ff311b40 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -156,6 +156,13 @@ + + + + + + + diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bd5f595be..fec5c166f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -442,6 +442,10 @@ type LocalBackend struct { // See tailscale/corp#26146. overrideAlwaysOn bool + // reconnectTimer is used to schedule a reconnect by setting [ipn.Prefs.WantRunning] + // to true after a delay, or nil if no reconnect is scheduled. + reconnectTimer tstime.TimerController + // shutdownCbs are the callbacks to be called when the backend is shutting down. // Each callback is called exactly once in unspecified order and without b.mu held. // Returned errors are logged but otherwise ignored and do not affect the shutdown process. @@ -1070,6 +1074,8 @@ func (b *LocalBackend) Shutdown() { b.captiveCancel() } + b.stopReconnectTimerLocked() + if b.loginFlags&controlclient.LoginEphemeral != 0 { b.mu.Unlock() ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second) @@ -4297,15 +4303,75 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip // mode on them until the policy changes, they switch to a different profile, etc. b.overrideAlwaysOn = true - // TODO(nickkhyl): check the ReconnectAfter policy here. If configured, - // start a timer to automatically reconnect after the specified duration. + if reconnectAfter, _ := syspolicy.GetDuration(syspolicy.ReconnectAfter, 0); reconnectAfter > 0 { + b.startReconnectTimerLocked(reconnectAfter) + } } return b.editPrefsLockedOnEntry(mp, unlock) } +// startReconnectTimerLocked sets a timer to automatically set WantRunning to true +// after the specified duration. +func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { + if b.reconnectTimer != nil { + // Stop may return false if the timer has already fired, + // and the function has been called in its own goroutine, + // but lost the race to acquire b.mu. In this case, it'll + // end up as a no-op due to a reconnectTimer mismatch + // once it manages to acquire the lock. This is fine, and we + // don't need to check the return value. + b.reconnectTimer.Stop() + } + profileID := b.pm.CurrentProfile().ID() + var reconnectTimer tstime.TimerController + reconnectTimer = b.clock.AfterFunc(d, func() { + unlock := b.lockAndGetUnlock() + defer unlock() + + if b.reconnectTimer != reconnectTimer { + // We're either not the most recent timer, or we lost the race when + // the timer was stopped. No need to reconnect. + return + } + b.reconnectTimer = nil + + cp := b.pm.CurrentProfile() + if cp.ID() != profileID { + // The timer fired before the profile changed but we lost the race + // and acquired the lock shortly after. + // No need to reconnect. + return + } + + mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} + if _, err := b.editPrefsLockedOnEntry(mp, unlock); err != nil { + b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) + } else { + b.logf("automatically reconnected as %q after %v", cp.Name(), d) + } + }) + b.reconnectTimer = reconnectTimer + b.logf("reconnect for %q has been scheduled and will be performed in %v", b.pm.CurrentProfile().Name(), d) +} + func (b *LocalBackend) resetAlwaysOnOverrideLocked() { b.overrideAlwaysOn = false + b.stopReconnectTimerLocked() +} + +func (b *LocalBackend) stopReconnectTimerLocked() { + if b.reconnectTimer != nil { + // Stop may return false if the timer has already fired, + // and the function has been called in its own goroutine, + // but lost the race to acquire b.mu. + // In this case, it'll end up as a no-op due to a reconnectTimer + // mismatch (see [LocalBackend.startReconnectTimerLocked]) + // once it manages to acquire the lock. This is fine, and we + // don't need to check the return value. + b.reconnectTimer.Stop() + b.reconnectTimer = nil + } } // Warning: b.mu must be held on entry, but it unlocks it on the way out. @@ -4399,7 +4465,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if oldp.Valid() { newp.Persist = oldp.Persist().AsStruct() // caller isn't allowed to override this } - // applySysPolicyToPrefsLocked returns whether it updated newp, + // applySysPolicy returns whether it updated newp, // but everything in this function treats b.prefs as completely new // anyway, so its return value can be ignored here. applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index a955ce094..a81c1e5d5 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -42,6 +42,12 @@ const ( // for auditing purposes. It has no effect when [AlwaysOn] is false. AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" + // ReconnectAfter is a string value formatted for use with time.ParseDuration() + // that defines the duration after which the client should automatically reconnect + // to the Tailscale network following a user-initiated disconnect. + // An empty string or a zero duration disables automatic reconnection. + ReconnectAfter Key = "ReconnectAfter" + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. // Exit node ID takes precedence over exit node IP. // To find the node ID, go to /api.md#device. @@ -176,6 +182,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(ReconnectAfter, setting.DeviceSetting, setting.DurationValue), setting.NewDefinition(Tailnet, setting.DeviceSetting, setting.StringValue), // User policy settings (can be configured on a user- or device-basis): From 83c104652d89717731774a2c7c95b4e47cc41383 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 24 Feb 2025 20:11:14 -0800 Subject: [PATCH 0530/1708] cmd/derper: add --socket flag to change unix socket path to tailscaled Fixes #10359 Change-Id: Ide49941c486d29856841016686827316878c9433 Signed-off-by: Brad Fitzpatrick --- cmd/derper/derper.go | 4 ++++ derp/derp_server.go | 17 +++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 980870847..682ec0bba 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -71,10 +71,13 @@ var ( secretsCacheDir = flag.String("secrets-cache-dir", defaultSetecCacheDir(), "directory to cache setec secrets in (required if --secrets-url is set)") bootstrapDNS = flag.String("bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns") unpublishedDNS = flag.String("unpublished-bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns and not publish in the list. If an entry contains a slash, the second part names a DNS record to poll for its TXT record with a `0` to `100` value for rollout percentage.") + verifyClients = flag.Bool("verify-clients", false, "verify clients to this DERP server through a local tailscaled instance.") verifyClientURL = flag.String("verify-client-url", "", "if non-empty, an admission controller URL for permitting client connections; see tailcfg.DERPAdmitClientRequest") verifyFailOpen = flag.Bool("verify-client-url-fail-open", true, "whether we fail open if --verify-client-url is unreachable") + socket = flag.String("socket", "", "optional alternate path to tailscaled socket (only relevant when using --verify-clients)") + acceptConnLimit = flag.Float64("accept-connection-limit", math.Inf(+1), "rate limit for accepting new connection") acceptConnBurst = flag.Int("accept-connection-burst", math.MaxInt, "burst limit for accepting new connection") @@ -192,6 +195,7 @@ func main() { s := derp.NewServer(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) + s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) s.SetVerifyClientURLFailOpen(*verifyFailOpen) s.SetTCPWriteTimeout(*tcpWriteTimeout) diff --git a/derp/derp_server.go b/derp/derp_server.go index baca898d3..c330572d2 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -137,6 +137,7 @@ type Server struct { metaCert []byte // the encoded x509 cert to send after LetsEncrypt cert+intermediate dupPolicy dupPolicy debug bool + localClient local.Client // Counters: packetsSent, bytesSent expvar.Int @@ -485,6 +486,16 @@ func (s *Server) SetVerifyClientURLFailOpen(v bool) { s.verifyClientsURLFailOpen = v } +// SetTailscaledSocketPath sets the unix socket path to use to talk to +// tailscaled if client verification is enabled. +// +// If unset or set to the empty string, the default path for the operating +// system is used. +func (s *Server) SetTailscaledSocketPath(path string) { + s.localClient.Socket = path + s.localClient.UseSocketOnly = path != "" +} + // SetTCPWriteTimeout sets the timeout for writing to connected clients. // This timeout does not apply to mesh connections. // Defaults to 2 seconds. @@ -1320,8 +1331,6 @@ func (c *sclient) requestMeshUpdate() { } } -var localClient local.Client - // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. func (s *Server) isMeshPeer(info *clientInfo) bool { @@ -1340,7 +1349,7 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf // tailscaled-based verification: if s.verifyClientsLocalTailscaled { - _, err := localClient.WhoIsNodeKey(ctx, clientKey) + _, err := s.localClient.WhoIsNodeKey(ctx, clientKey) if err == tailscale.ErrPeerNotFound { return fmt.Errorf("peer %v not authorized (not found in local tailscaled)", clientKey) } @@ -2240,7 +2249,7 @@ func (s *Server) ConsistencyCheck() error { func (s *Server) checkVerifyClientsLocalTailscaled() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - status, err := localClient.StatusWithoutPeers(ctx) + status, err := s.localClient.StatusWithoutPeers(ctx) if err != nil { return fmt.Errorf("localClient.Status: %w", err) } From d7508b24c64162e915e9f2c3da1052ac1d9f1ff2 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 25 Feb 2025 08:39:56 -0800 Subject: [PATCH 0531/1708] go.mod: bump golang.org/x/crypto (#15123) There were two recent CVEs. The one that sorta affects us is https://groups.google.com/g/golang-announce/c/qN_GDasRQSA (SSH DoS). Updates #15124 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5aeefc9c9..04264f9ce 100644 --- a/go.mod +++ b/go.mod @@ -93,7 +93,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.33.0 + golang.org/x/crypto v0.35.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/mod v0.23.0 golang.org/x/net v0.35.0 diff --git a/go.sum b/go.sum index be5fc57bc..00a45edb9 100644 --- a/go.sum +++ b/go.sum @@ -1041,8 +1041,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= From 820bdb870a414d9a5d2131f80649d0fa98a74819 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 14 Feb 2025 18:36:24 -0800 Subject: [PATCH 0532/1708] maths: add exponentially weighted moving average type In order to improve latency tracking, we will use an exponentially weighted moving average that will smooth change over time and suppress large outlier values. Updates tailscale/corp#26649 Signed-off-by: James Tucker --- maths/ewma.go | 72 ++++++++++++++++++ maths/ewma_test.go | 178 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 250 insertions(+) create mode 100644 maths/ewma.go create mode 100644 maths/ewma_test.go diff --git a/maths/ewma.go b/maths/ewma.go new file mode 100644 index 000000000..0897b73e4 --- /dev/null +++ b/maths/ewma.go @@ -0,0 +1,72 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package maths contains additional mathematical functions or structures not +// found in the standard library. +package maths + +import ( + "math" + "time" +) + +// EWMA is an exponentially weighted moving average supporting updates at +// irregular intervals with at most nanosecond resolution. +// The zero value will compute a half-life of 1 second. +// It is not safe for concurrent use. +// TODO(raggi): de-duplicate with tstime/rate.Value, which has a more complex +// and synchronized interface and does not provide direct access to the stable +// value. +type EWMA struct { + value float64 // current value of the average + lastTime int64 // time of last update in unix nanos + halfLife float64 // half-life in seconds +} + +// NewEWMA creates a new EWMA with the specified half-life. If halfLifeSeconds +// is 0, it defaults to 1. +func NewEWMA(halfLifeSeconds float64) *EWMA { + return &EWMA{ + halfLife: halfLifeSeconds, + } +} + +// Update adds a new sample to the average. If t is zero or precedes the last +// update, the update is ignored. +func (e *EWMA) Update(value float64, t time.Time) { + if t.IsZero() { + return + } + hl := e.halfLife + if hl == 0 { + hl = 1 + } + tn := t.UnixNano() + if e.lastTime == 0 { + e.value = value + e.lastTime = tn + return + } + + dt := (time.Duration(tn-e.lastTime) * time.Nanosecond).Seconds() + if dt < 0 { + // drop out of order updates + return + } + + // decay = 2^(-dt/halfLife) + decay := math.Exp2(-dt / hl) + e.value = e.value*decay + value*(1-decay) + e.lastTime = tn +} + +// Get returns the current value of the average +func (e *EWMA) Get() float64 { + return e.value +} + +// Reset clears the EWMA to its initial state +func (e *EWMA) Reset() { + e.value = 0 + e.lastTime = 0 +} diff --git a/maths/ewma_test.go b/maths/ewma_test.go new file mode 100644 index 000000000..307078a38 --- /dev/null +++ b/maths/ewma_test.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package maths + +import ( + "slices" + "testing" + "time" +) + +// some real world latency samples. +var ( + latencyHistory1 = []int{ + 14, 12, 15, 6, 19, 12, 13, 13, 13, 16, 17, 11, 17, 11, 14, 15, 14, 15, + 16, 16, 17, 14, 12, 16, 18, 14, 14, 11, 15, 15, 25, 11, 15, 14, 12, 15, + 13, 12, 13, 15, 11, 13, 15, 14, 14, 15, 12, 15, 18, 12, 15, 22, 12, 13, + 10, 14, 16, 15, 16, 11, 14, 17, 18, 20, 16, 11, 16, 14, 5, 15, 17, 12, + 15, 11, 15, 20, 12, 17, 12, 17, 15, 12, 12, 11, 14, 15, 11, 20, 14, 13, + 11, 12, 13, 13, 11, 13, 11, 15, 13, 13, 14, 12, 11, 12, 12, 14, 11, 13, + 12, 12, 12, 19, 14, 13, 13, 14, 11, 12, 10, 11, 15, 12, 14, 11, 11, 14, + 14, 12, 12, 11, 14, 12, 11, 12, 14, 11, 12, 15, 12, 14, 12, 12, 21, 16, + 21, 12, 16, 9, 11, 16, 14, 13, 14, 12, 13, 16, + } + latencyHistory2 = []int{ + 18, 20, 21, 21, 20, 23, 18, 18, 20, 21, 20, 19, 22, 18, 20, 20, 19, 21, + 21, 22, 22, 19, 18, 22, 22, 19, 20, 17, 16, 11, 25, 16, 18, 21, 17, 22, + 19, 18, 22, 21, 20, 18, 22, 17, 17, 20, 19, 10, 19, 16, 19, 25, 17, 18, + 15, 20, 21, 20, 23, 22, 22, 22, 19, 22, 22, 17, 22, 20, 20, 19, 21, 22, + 20, 19, 17, 22, 16, 16, 20, 22, 17, 19, 21, 16, 20, 22, 19, 21, 20, 19, + 13, 14, 23, 19, 16, 10, 19, 15, 15, 17, 16, 18, 14, 16, 18, 22, 20, 18, + 18, 21, 15, 19, 18, 19, 18, 20, 17, 19, 21, 19, 20, 19, 20, 20, 17, 14, + 17, 17, 18, 21, 20, 18, 18, 17, 16, 17, 17, 20, 22, 19, 20, 21, 21, 20, + 21, 24, 20, 18, 12, 17, 18, 17, 19, 19, 19, + } +) + +func TestEWMALatencyHistory(t *testing.T) { + type result struct { + t time.Time + v float64 + s int + } + + for _, latencyHistory := range [][]int{latencyHistory1, latencyHistory2} { + startTime := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + halfLife := 30.0 + + ewma := NewEWMA(halfLife) + + var results []result + sum := 0.0 + for i, latency := range latencyHistory { + t := startTime.Add(time.Duration(i) * time.Second) + ewma.Update(float64(latency), t) + sum += float64(latency) + + results = append(results, result{t, ewma.Get(), latency}) + } + mean := sum / float64(len(latencyHistory)) + min := float64(slices.Min(latencyHistory)) + max := float64(slices.Max(latencyHistory)) + + t.Logf("EWMA Latency History (half-life: %.1f seconds):", halfLife) + t.Logf("Mean latency: %.2f ms", mean) + t.Logf("Range: [%.1f, %.1f]", min, max) + + t.Log("Samples: ") + sparkline := []rune("▁▂▃▄▅▆▇█") + var sampleLine []rune + for _, r := range results { + idx := int(((float64(r.s) - min) / (max - min)) * float64(len(sparkline)-1)) + if idx >= len(sparkline) { + idx = len(sparkline) - 1 + } + sampleLine = append(sampleLine, sparkline[idx]) + } + t.Log(string(sampleLine)) + + t.Log("EWMA: ") + var ewmaLine []rune + for _, r := range results { + idx := int(((r.v - min) / (max - min)) * float64(len(sparkline)-1)) + if idx >= len(sparkline) { + idx = len(sparkline) - 1 + } + ewmaLine = append(ewmaLine, sparkline[idx]) + } + t.Log(string(ewmaLine)) + t.Log("") + + t.Logf("Time | Sample | Value | Value - Sample") + t.Logf("") + + for _, result := range results { + t.Logf("%10s | % 6d | % 5.2f | % 5.2f", result.t.Format("15:04:05"), result.s, result.v, result.v-float64(result.s)) + } + + // check that all results are greater than the min, and less than the max of the input, + // and they're all close to the mean. + for _, result := range results { + if result.v < float64(min) || result.v > float64(max) { + t.Errorf("result %f out of range [%f, %f]", result.v, min, max) + } + + if result.v < mean*0.9 || result.v > mean*1.1 { + t.Errorf("result %f not close to mean %f", result.v, mean) + } + } + } +} + +func TestHalfLife(t *testing.T) { + start := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + ewma := NewEWMA(30.0) + ewma.Update(10, start) + ewma.Update(0, start.Add(30*time.Second)) + + if ewma.Get() != 5 { + t.Errorf("expected 5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(60*time.Second)) + if ewma.Get() != 7.5 { + t.Errorf("expected 7.5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(90*time.Second)) + if ewma.Get() != 8.75 { + t.Errorf("expected 8.75, got %f", ewma.Get()) + } +} + +func TestZeroValue(t *testing.T) { + start := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + var ewma EWMA + ewma.Update(10, start) + ewma.Update(0, start.Add(time.Second)) + + if ewma.Get() != 5 { + t.Errorf("expected 5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(2*time.Second)) + if ewma.Get() != 7.5 { + t.Errorf("expected 7.5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(3*time.Second)) + if ewma.Get() != 8.75 { + t.Errorf("expected 8.75, got %f", ewma.Get()) + } +} + +func TestReset(t *testing.T) { + start := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + ewma := NewEWMA(30.0) + ewma.Update(10, start) + ewma.Update(0, start.Add(30*time.Second)) + + if ewma.Get() != 5 { + t.Errorf("expected 5, got %f", ewma.Get()) + } + + ewma.Reset() + + if ewma.Get() != 0 { + t.Errorf("expected 0, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(90*time.Second)) + if ewma.Get() != 10 { + t.Errorf("expected 10, got %f", ewma.Get()) + } +} From c174d3c795a906214cf6bd63ffc3618555296db5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 26 Feb 2025 09:02:40 -0800 Subject: [PATCH 0533/1708] scripts/installer.sh: ensure default umask for the installer (#15139) Ensures default Linux umask 022 for the installer script to make sure that files created by the installer can be accessed by other tools, such as apt. Updates tailscale/tailscale#15133 Signed-off-by: Irbe Krumina --- scripts/installer.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 3bd392b93..388dd5a56 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -7,6 +7,14 @@ set -eu +# Ensure that this script runs with the default umask for Linux. In practice, +# this means that files created by this script (such as keyring files) will be +# created with 644 permissions. This ensures that keyrings and other files +# created by this script are readable by installers on systems where the +# umask is set to a more restrictive value. +# See https://github.com/tailscale/tailscale/issues/15133 +umask 022 + # All the code is wrapped in a main function that gets called at the # bottom of the file, so that a truncated partial download doesn't end # up executing half a script. From ae303d41dd1850b4306848a5ada87ea8b14a088d Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 27 Feb 2025 11:35:54 -0800 Subject: [PATCH 0534/1708] go.mod: bump github.com/go-json-experiment/json (#15010) The upstream module has seen significant work making the v1 emulation layer a high fidelity re-implementation of v1 "encoding/json". This addresses several upstream breaking changes: * MarshalJSONV2 renamed as MarshalJSONTo * UnmarshalJSONV2 renamed as UnmarshalJSONFrom * Options argument removed from MarshalJSONV2 * Options argument removed from UnmarshalJSONV2 Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- go.mod | 2 +- go.sum | 4 +-- types/opt/value.go | 16 +++++------ types/prefs/item.go | 16 +++++------ types/prefs/list.go | 16 +++++------ types/prefs/map.go | 16 +++++------ types/prefs/prefs.go | 16 +++++------ types/prefs/prefs_example/prefs_types.go | 20 +++++++------- types/prefs/prefs_test.go | 16 +++++------ types/prefs/struct_list.go | 16 +++++------ types/prefs/struct_map.go | 16 +++++------ util/syspolicy/internal/internal.go | 4 +-- util/syspolicy/setting/origin.go | 16 +++++------ util/syspolicy/setting/raw_item.go | 34 ++++++++++++------------ util/syspolicy/setting/snapshot.go | 16 +++++------ util/syspolicy/setting/summary.go | 16 +++++------ 16 files changed, 120 insertions(+), 120 deletions(-) diff --git a/go.mod b/go.mod index 04264f9ce..e6f3141a0 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.7.0 github.com/gaissmai/bart v0.18.0 - github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 + github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 diff --git a/go.sum b/go.sum index 00a45edb9..0c8704674 100644 --- a/go.sum +++ b/go.sum @@ -327,8 +327,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= +github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= +github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= diff --git a/types/opt/value.go b/types/opt/value.go index b47b03c81..c71c53e51 100644 --- a/types/opt/value.go +++ b/types/opt/value.go @@ -100,31 +100,31 @@ func (o Value[T]) Equal(v Value[T]) bool { return false } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (o Value[T]) MarshalJSONV2(enc *jsontext.Encoder, opts jsonv2.Options) error { +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (o Value[T]) MarshalJSONTo(enc *jsontext.Encoder) error { if !o.set { return enc.WriteToken(jsontext.Null) } - return jsonv2.MarshalEncode(enc, &o.value, opts) + return jsonv2.MarshalEncode(enc, &o.value) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (o *Value[T]) UnmarshalJSONV2(dec *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (o *Value[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { if dec.PeekKind() == 'n' { *o = Value[T]{} _, err := dec.ReadToken() // read null return err } o.set = true - return jsonv2.UnmarshalDecode(dec, &o.value, opts) + return jsonv2.UnmarshalDecode(dec, &o.value) } // MarshalJSON implements [json.Marshaler]. func (o Value[T]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(o) // uses MarshalJSONV2 + return jsonv2.Marshal(o) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (o *Value[T]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, o) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, o) // uses UnmarshalJSONFrom } diff --git a/types/prefs/item.go b/types/prefs/item.go index 103204147..717a0c76c 100644 --- a/types/prefs/item.go +++ b/types/prefs/item.go @@ -152,15 +152,15 @@ func (iv ItemView[T, V]) Equal(iv2 ItemView[T, V]) bool { return iv.ж.Equal(*iv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (iv ItemView[T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return iv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (iv ItemView[T, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return iv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (iv *ItemView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (iv *ItemView[T, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x Item[T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } iv.ж = &x @@ -169,10 +169,10 @@ func (iv *ItemView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Opti // MarshalJSON implements [json.Marshaler]. func (iv ItemView[T, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(iv) // uses MarshalJSONV2 + return jsonv2.Marshal(iv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (iv *ItemView[T, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, iv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, iv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/list.go b/types/prefs/list.go index 9830e79de..e9c1a1f33 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -157,15 +157,15 @@ func (lv ListView[T]) Equal(lv2 ListView[T]) bool { return lv.ж.Equal(*lv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (lv ListView[T]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return lv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (lv ListView[T]) MarshalJSONTo(out *jsontext.Encoder) error { + return lv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (lv *ListView[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (lv *ListView[T]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x List[T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } lv.ж = &x @@ -174,10 +174,10 @@ func (lv *ListView[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options // MarshalJSON implements [json.Marshaler]. func (lv ListView[T]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(lv) // uses MarshalJSONV2 + return jsonv2.Marshal(lv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (lv *ListView[T]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/map.go b/types/prefs/map.go index 2bd32bfbd..4b64690ed 100644 --- a/types/prefs/map.go +++ b/types/prefs/map.go @@ -133,15 +133,15 @@ func (mv MapView[K, V]) Equal(mv2 MapView[K, V]) bool { return mv.ж.Equal(*mv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (mv MapView[K, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return mv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (mv MapView[K, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return mv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (mv *MapView[K, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (mv *MapView[K, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x Map[K, V] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } mv.ж = &x @@ -150,10 +150,10 @@ func (mv *MapView[K, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Optio // MarshalJSON implements [json.Marshaler]. func (mv MapView[K, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(mv) // uses MarshalJSONV2 + return jsonv2.Marshal(mv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (mv *MapView[K, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index 4f7902077..52cb464b6 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -158,22 +158,22 @@ func (p *preference[T]) SetReadOnly(readonly bool) { p.s.Metadata.ReadOnly = readonly } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (p preference[T]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &p.s, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p preference[T]) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &p.s) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (p *preference[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &p.s, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (p *preference[T]) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &p.s) } // MarshalJSON implements [json.Marshaler]. func (p preference[T]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(p) // uses MarshalJSONV2 + return jsonv2.Marshal(p) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (p *preference[T]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONFrom } diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go index 49f0d8c3c..f88c29f94 100644 --- a/types/prefs/prefs_example/prefs_types.go +++ b/types/prefs/prefs_example/prefs_types.go @@ -48,10 +48,10 @@ import ( // the `omitzero` JSON tag option. This option is not supported by the // [encoding/json] package as of 2024-08-21; see golang/go#45669. // It is recommended that a prefs type implements both -// [jsonv2.MarshalerV2]/[jsonv2.UnmarshalerV2] and [json.Marshaler]/[json.Unmarshaler] +// [jsonv2.MarshalerTo]/[jsonv2.UnmarshalerFrom] and [json.Marshaler]/[json.Unmarshaler] // to ensure consistent and more performant marshaling, regardless of the JSON package // used at the call sites; the standard marshalers can be implemented via [jsonv2]. -// See [Prefs.MarshalJSONV2], [Prefs.UnmarshalJSONV2], [Prefs.MarshalJSON], +// See [Prefs.MarshalJSONTo], [Prefs.UnmarshalJSONFrom], [Prefs.MarshalJSON], // and [Prefs.UnmarshalJSON] for an example implementation. type Prefs struct { ControlURL prefs.Item[string] `json:",omitzero"` @@ -128,34 +128,34 @@ type AppConnectorPrefs struct { Advertise prefs.Item[bool] `json:",omitzero"` } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. // It is implemented as a performance improvement and to enable omission of // unconfigured preferences from the JSON output. See the [Prefs] doc for details. -func (p Prefs) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { +func (p Prefs) MarshalJSONTo(out *jsontext.Encoder) error { // The prefs type shadows the Prefs's method set, // causing [jsonv2] to use the default marshaler and avoiding // infinite recursion. type prefs Prefs - return jsonv2.MarshalEncode(out, (*prefs)(&p), opts) + return jsonv2.MarshalEncode(out, (*prefs)(&p)) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (p *Prefs) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (p *Prefs) UnmarshalJSONFrom(in *jsontext.Decoder) error { // The prefs type shadows the Prefs's method set, // causing [jsonv2] to use the default unmarshaler and avoiding // infinite recursion. type prefs Prefs - return jsonv2.UnmarshalDecode(in, (*prefs)(p), opts) + return jsonv2.UnmarshalDecode(in, (*prefs)(p)) } // MarshalJSON implements [json.Marshaler]. func (p Prefs) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(p) // uses MarshalJSONV2 + return jsonv2.Marshal(p) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (p *Prefs) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONFrom } type marshalAsTrueInJSON struct{} diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index ea4729366..1201054d0 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -53,32 +53,32 @@ type TestPrefs struct { Group TestPrefsGroup `json:",omitzero"` } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (p TestPrefs) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p TestPrefs) MarshalJSONTo(out *jsontext.Encoder) error { // The testPrefs type shadows the TestPrefs's method set, // causing jsonv2 to use the default marshaler and avoiding // infinite recursion. type testPrefs TestPrefs - return jsonv2.MarshalEncode(out, (*testPrefs)(&p), opts) + return jsonv2.MarshalEncode(out, (*testPrefs)(&p)) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (p *TestPrefs) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (p *TestPrefs) UnmarshalJSONFrom(in *jsontext.Decoder) error { // The testPrefs type shadows the TestPrefs's method set, // causing jsonv2 to use the default unmarshaler and avoiding // infinite recursion. type testPrefs TestPrefs - return jsonv2.UnmarshalDecode(in, (*testPrefs)(p), opts) + return jsonv2.UnmarshalDecode(in, (*testPrefs)(p)) } // MarshalJSON implements [json.Marshaler]. func (p TestPrefs) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(p) // uses MarshalJSONV2 + return jsonv2.Marshal(p) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (p *TestPrefs) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONFrom } // TestBundle is an example structure type that, diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index 872cb2326..65f11011a 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -169,15 +169,15 @@ func (lv StructListView[T, V]) Equal(lv2 StructListView[T, V]) bool { return lv.ж.Equal(*lv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (lv StructListView[T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return lv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (lv StructListView[T, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return lv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (lv *StructListView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (lv *StructListView[T, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x StructList[T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } lv.ж = &x @@ -186,10 +186,10 @@ func (lv *StructListView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv // MarshalJSON implements [json.Marshaler]. func (lv StructListView[T, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(lv) // uses MarshalJSONV2 + return jsonv2.Marshal(lv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (lv *StructListView[T, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index 4d55da7a0..a081f7c74 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -149,15 +149,15 @@ func (mv StructMapView[K, T, V]) Equal(mv2 StructMapView[K, T, V]) bool { return mv.ж.Equal(*mv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (mv StructMapView[K, T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return mv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (mv StructMapView[K, T, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return mv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (mv *StructMapView[K, T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (mv *StructMapView[K, T, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x StructMap[K, T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } mv.ж = &x @@ -166,10 +166,10 @@ func (mv *StructMapView[K, T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jso // MarshalJSON implements [json.Marshaler]. func (mv StructMapView[K, T, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(mv) // uses MarshalJSONV2 + return jsonv2.Marshal(mv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (mv *StructMapView[K, T, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONFrom } diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go index 8f2889625..2e1737e5b 100644 --- a/util/syspolicy/internal/internal.go +++ b/util/syspolicy/internal/internal.go @@ -56,10 +56,10 @@ func EqualJSONForTest(tb TB, j1, j2 jsontext.Value) (s1, s2 string, equal bool) return "", "", true } // Otherwise, format the values for display and return false. - if err := j1.Indent("", "\t"); err != nil { + if err := j1.Indent(); err != nil { tb.Fatal(err) } - if err := j2.Indent("", "\t"); err != nil { + if err := j2.Indent(); err != nil { tb.Fatal(err) } return j1.String(), j2.String(), false diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go index 078ef758e..b5b28edf6 100644 --- a/util/syspolicy/setting/origin.go +++ b/util/syspolicy/setting/origin.go @@ -50,22 +50,22 @@ func (s Origin) String() string { return s.Scope().String() } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (s Origin) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &s.data, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (s Origin) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &s.data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (s *Origin) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &s.data, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (s *Origin) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &s.data) } // MarshalJSON implements [json.Marshaler]. func (s Origin) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(s) // uses MarshalJSONV2 + return jsonv2.Marshal(s) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (s *Origin) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONFrom } diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index cf46e54b7..82e5f634a 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -75,31 +75,31 @@ func (i RawItem) String() string { return fmt.Sprintf("%v%s", i.data.Value.Value, suffix) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (i RawItem) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &i.data, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (i RawItem) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &i.data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (i *RawItem) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &i.data, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (i *RawItem) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &i.data) } // MarshalJSON implements [json.Marshaler]. func (i RawItem) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(i) // uses MarshalJSONV2 + return jsonv2.Marshal(i) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (i *RawItem) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, i) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, i) // uses UnmarshalJSONFrom } // RawValue represents a raw policy setting value read from a policy store. // It is JSON-marshallable and facilitates unmarshalling of JSON values // into corresponding policy setting types, with special handling for JSON numbers // (unmarshalled as float64) and JSON string arrays (unmarshalled as []string). -// See also [RawValue.UnmarshalJSONV2]. +// See also [RawValue.UnmarshalJSONFrom]. type RawValue struct { opt.Value[any] } @@ -114,16 +114,16 @@ func RawValueOf[T RawValueType](v T) RawValue { return RawValue{opt.ValueOf[any](v)} } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (v RawValue) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, v.Value, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RawValue) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, v.Value) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2] by attempting to unmarshal +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom] by attempting to unmarshal // a JSON value as one of the supported policy setting value types (bool, string, uint64, or []string), // based on the JSON value type. It fails if the JSON value is an object, if it's a JSON number that // cannot be represented as a uint64, or if a JSON array contains anything other than strings. -func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +func (v *RawValue) UnmarshalJSONFrom(in *jsontext.Decoder) error { var valPtr any switch k := in.PeekKind(); k { case 't', 'f': @@ -139,7 +139,7 @@ func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) er default: panic("unreachable") } - if err := jsonv2.UnmarshalDecode(in, valPtr, opts); err != nil { + if err := jsonv2.UnmarshalDecode(in, valPtr); err != nil { v.Value.Clear() return err } @@ -150,12 +150,12 @@ func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) er // MarshalJSON implements [json.Marshaler]. func (v RawValue) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(v) // uses MarshalJSONV2 + return jsonv2.Marshal(v) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (v *RawValue) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, v) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, v) // uses UnmarshalJSONFrom } // RawValues is a map of keyed setting values that can be read from a JSON. diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 0af2bae0f..38642f7cc 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -147,23 +147,23 @@ type snapshotJSON struct { Settings map[Key]RawItem `json:",omitempty"` } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (s *Snapshot) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} if s != nil { data.Summary = s.summary data.Settings = s.m } - return jsonv2.MarshalEncode(out, data, opts) + return jsonv2.MarshalEncode(out, data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (s *Snapshot) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (s *Snapshot) UnmarshalJSONFrom(in *jsontext.Decoder) error { if s == nil { return errors.New("s must not be nil") } data := &snapshotJSON{} - if err := jsonv2.UnmarshalDecode(in, data, opts); err != nil { + if err := jsonv2.UnmarshalDecode(in, data); err != nil { return err } *s = Snapshot{m: data.Settings, sig: deephash.Hash(&data.Settings), summary: data.Summary} @@ -172,12 +172,12 @@ func (s *Snapshot) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) er // MarshalJSON implements [json.Marshaler]. func (s *Snapshot) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(s) // uses MarshalJSONV2 + return jsonv2.Marshal(s) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (s *Snapshot) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONFrom } // MergeSnapshots returns a [Snapshot] that contains all [RawItem]s diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go index 5ff20e0aa..d7c139a87 100644 --- a/util/syspolicy/setting/summary.go +++ b/util/syspolicy/setting/summary.go @@ -54,24 +54,24 @@ func (s Summary) String() string { return s.data.Scope.String() } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (s Summary) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &s.data, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (s Summary) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &s.data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (s *Summary) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &s.data, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (s *Summary) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &s.data) } // MarshalJSON implements [json.Marshaler]. func (s Summary) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(s) // uses MarshalJSONV2 + return jsonv2.Marshal(s) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (s *Summary) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONFrom } // SummaryOption is an option that configures [Summary] From f5522e62d1dde2ea966f2454df248a8ea2d43676 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 27 Feb 2025 11:58:45 -0800 Subject: [PATCH 0535/1708] client/web: fix CSRF handler order in web UI (#15143) Fix the order of the CSRF handlers (HTTP plaintext context setting, _then_ enforcement) in the construction of the web UI server. This resolves false-positive "invalid Origin" 403 exceptions when attempting to update settings in the web UI. Add unit test to exercise the CSRF protection failure and success cases for our web UI configuration. Updates #14822 Updates #14872 Signed-off-by: Patrick O'Doherty --- client/web/web.go | 53 +++++++++++++++------------ client/web/web_test.go | 82 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 23 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 6203b4c18..e9810ccd0 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -203,15 +203,25 @@ func NewServer(opts ServerOpts) (s *Server, err error) { } s.assetsHandler, s.assetsCleanup = assetsHandler(s.devMode) - var metric string // clientmetric to report on startup + var metric string + s.apiHandler, metric = s.modeAPIHandler(s.mode) + s.apiHandler = s.withCSRF(s.apiHandler) - // Create handler for "/api" requests with CSRF protection. - // We don't require secure cookies, since the web client is regularly used - // on network appliances that are served on local non-https URLs. - // The client is secured by limiting the interface it listens on, - // or by authenticating requests before they reach the web client. + // Don't block startup on reporting metric. + // Report in separate go routine with 5 second timeout. + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + s.lc.IncrementCounter(ctx, metric, 1) + }() + + return s, nil +} + +func (s *Server) withCSRF(h http.Handler) http.Handler { csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false)) + // ref https://github.com/tailscale/tailscale/pull/14822 // signal to the CSRF middleware that the request is being served over // plaintext HTTP to skip TLS-only header checks. withSetPlaintext := func(h http.Handler) http.Handler { @@ -221,27 +231,24 @@ func NewServer(opts ServerOpts) (s *Server, err error) { }) } - switch s.mode { + // NB: the order of the withSetPlaintext and csrfProtect calls is important + // to ensure that we signal to the CSRF middleware that the request is being + // served over plaintext HTTP and not over TLS as it presumes by default. + return withSetPlaintext(csrfProtect(h)) +} + +func (s *Server) modeAPIHandler(mode ServerMode) (http.Handler, string) { + switch mode { case LoginServerMode: - s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI))) - metric = "web_login_client_initialization" + return http.HandlerFunc(s.serveLoginAPI), "web_login_client_initialization" case ReadOnlyServerMode: - s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI))) - metric = "web_readonly_client_initialization" + return http.HandlerFunc(s.serveLoginAPI), "web_readonly_client_initialization" case ManageServerMode: - s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveAPI))) - metric = "web_client_initialization" + return http.HandlerFunc(s.serveAPI), "web_client_initialization" + default: // invalid mode + log.Fatalf("invalid mode: %v", mode) } - - // Don't block startup on reporting metric. - // Report in separate go routine with 5 second timeout. - go func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - s.lc.IncrementCounter(ctx, metric, 1) - }() - - return s, nil + return nil, "" } func (s *Server) Shutdown() { diff --git a/client/web/web_test.go b/client/web/web_test.go index b9242f6ac..291356260 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "net/http" + "net/http/cookiejar" "net/http/httptest" "net/netip" "net/url" @@ -20,6 +21,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/gorilla/csrf" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" @@ -1477,3 +1479,83 @@ func mockWaitAuthURL(_ context.Context, id string, src tailcfg.NodeID) (*tailcfg return nil, errors.New("unknown id") } } + +func TestCSRFProtect(t *testing.T) { + s := &Server{} + + mux := http.NewServeMux() + mux.HandleFunc("GET /test/csrf-token", func(w http.ResponseWriter, r *http.Request) { + token := csrf.Token(r) + _, err := io.WriteString(w, token) + if err != nil { + t.Fatal(err) + } + }) + mux.HandleFunc("POST /test/csrf-protected", func(w http.ResponseWriter, r *http.Request) { + _, err := io.WriteString(w, "ok") + if err != nil { + t.Fatal(err) + } + }) + h := s.withCSRF(mux) + ser := httptest.NewServer(h) + defer ser.Close() + + jar, err := cookiejar.New(nil) + if err != nil { + t.Fatalf("unable to construct cookie jar: %v", err) + } + + client := ser.Client() + client.Jar = jar + + // make GET request to populate cookie jar + resp, err := client.Get(ser.URL + "/test/csrf-token") + if err != nil { + t.Fatalf("unable to make request: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status: %v", resp.Status) + } + tokenBytes, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unable to read body: %v", err) + } + + csrfToken := strings.TrimSpace(string(tokenBytes)) + if csrfToken == "" { + t.Fatal("empty csrf token") + } + + // make a POST request without the CSRF header; ensure it fails + resp, err = client.Post(ser.URL+"/test/csrf-protected", "text/plain", nil) + if err != nil { + t.Fatalf("unable to make request: %v", err) + } + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("unexpected status: %v", resp.Status) + } + + // make a POST request with the CSRF header; ensure it succeeds + req, err := http.NewRequest("POST", ser.URL+"/test/csrf-protected", nil) + if err != nil { + t.Fatalf("error building request: %v", err) + } + req.Header.Set("X-CSRF-Token", csrfToken) + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unable to make request: %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status: %v", resp.Status) + } + defer resp.Body.Close() + out, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unable to read body: %v", err) + } + if string(out) != "ok" { + t.Fatalf("unexpected body: %q", out) + } +} From 3d28aa19cbf70a0b0e72d2ce37e83bd7e73a346c Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 27 Feb 2025 12:33:31 -0800 Subject: [PATCH 0536/1708] all: statically enforce json/v2 interface satisfaction (#15154) The json/v2 prototype is still in flux and the API can/will change. Statically enforce that types implementing the v2 methods satisfy the correct interface so that changes to the signature can be statically detected by the compiler. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- types/opt/value_test.go | 5 +++++ types/prefs/list.go | 5 +++++ types/prefs/prefs.go | 5 +++++ types/prefs/prefs_example/prefs_types.go | 5 +++++ types/prefs/prefs_test.go | 19 +++++++++++++++++++ util/syspolicy/setting/origin.go | 5 +++++ util/syspolicy/setting/raw_item.go | 10 ++++++++++ util/syspolicy/setting/snapshot.go | 5 +++++ util/syspolicy/setting/summary.go | 5 +++++ 9 files changed, 64 insertions(+) diff --git a/types/opt/value_test.go b/types/opt/value_test.go index dbd8b255f..890f9a579 100644 --- a/types/opt/value_test.go +++ b/types/opt/value_test.go @@ -13,6 +13,11 @@ import ( "tailscale.com/util/must" ) +var ( + _ jsonv2.MarshalerTo = (*Value[bool])(nil) + _ jsonv2.UnmarshalerFrom = (*Value[bool])(nil) +) + type testStruct struct { Int int `json:",omitempty,omitzero"` Str string `json:",omitempty"` diff --git a/types/prefs/list.go b/types/prefs/list.go index e9c1a1f33..7db473887 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -157,6 +157,11 @@ func (lv ListView[T]) Equal(lv2 ListView[T]) bool { return lv.ж.Equal(*lv2.ж) } +var ( + _ jsonv2.MarshalerTo = (*ListView[bool])(nil) + _ jsonv2.UnmarshalerFrom = (*ListView[bool])(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (lv ListView[T]) MarshalJSONTo(out *jsontext.Encoder) error { return lv.ж.MarshalJSONTo(out) diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index 52cb464b6..a6caf1283 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -158,6 +158,11 @@ func (p *preference[T]) SetReadOnly(readonly bool) { p.s.Metadata.ReadOnly = readonly } +var ( + _ jsonv2.MarshalerTo = (*preference[struct{}])(nil) + _ jsonv2.UnmarshalerFrom = (*preference[struct{}])(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (p preference[T]) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &p.s) diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go index f88c29f94..c35f1f62f 100644 --- a/types/prefs/prefs_example/prefs_types.go +++ b/types/prefs/prefs_example/prefs_types.go @@ -128,6 +128,11 @@ type AppConnectorPrefs struct { Advertise prefs.Item[bool] `json:",omitzero"` } +var ( + _ jsonv2.MarshalerTo = (*Prefs)(nil) + _ jsonv2.UnmarshalerFrom = (*Prefs)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. // It is implemented as a performance improvement and to enable omission of // unconfigured preferences from the JSON output. See the [Prefs] doc for details. diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index 1201054d0..d6af745bf 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -19,6 +19,20 @@ import ( //go:generate go run tailscale.com/cmd/viewer --tags=test --type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup +var ( + _ jsonv2.MarshalerTo = (*ItemView[*TestBundle, TestBundleView])(nil) + _ jsonv2.UnmarshalerFrom = (*ItemView[*TestBundle, TestBundleView])(nil) + + _ jsonv2.MarshalerTo = (*MapView[string, string])(nil) + _ jsonv2.UnmarshalerFrom = (*MapView[string, string])(nil) + + _ jsonv2.MarshalerTo = (*StructListView[*TestBundle, TestBundleView])(nil) + _ jsonv2.UnmarshalerFrom = (*StructListView[*TestBundle, TestBundleView])(nil) + + _ jsonv2.MarshalerTo = (*StructMapView[string, *TestBundle, TestBundleView])(nil) + _ jsonv2.UnmarshalerFrom = (*StructMapView[string, *TestBundle, TestBundleView])(nil) +) + type TestPrefs struct { Int32Item Item[int32] `json:",omitzero"` UInt64Item Item[uint64] `json:",omitzero"` @@ -53,6 +67,11 @@ type TestPrefs struct { Group TestPrefsGroup `json:",omitzero"` } +var ( + _ jsonv2.MarshalerTo = (*TestPrefs)(nil) + _ jsonv2.UnmarshalerFrom = (*TestPrefs)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (p TestPrefs) MarshalJSONTo(out *jsontext.Encoder) error { // The testPrefs type shadows the TestPrefs's method set, diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go index b5b28edf6..4c7cc7025 100644 --- a/util/syspolicy/setting/origin.go +++ b/util/syspolicy/setting/origin.go @@ -50,6 +50,11 @@ func (s Origin) String() string { return s.Scope().String() } +var ( + _ jsonv2.MarshalerTo = (*Origin)(nil) + _ jsonv2.UnmarshalerFrom = (*Origin)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s Origin) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &s.data) diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index 82e5f634a..9a96073b0 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -75,6 +75,11 @@ func (i RawItem) String() string { return fmt.Sprintf("%v%s", i.data.Value.Value, suffix) } +var ( + _ jsonv2.MarshalerTo = (*RawItem)(nil) + _ jsonv2.UnmarshalerFrom = (*RawItem)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (i RawItem) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &i.data) @@ -114,6 +119,11 @@ func RawValueOf[T RawValueType](v T) RawValue { return RawValue{opt.ValueOf[any](v)} } +var ( + _ jsonv2.MarshalerTo = (*RawValue)(nil) + _ jsonv2.UnmarshalerFrom = (*RawValue)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (v RawValue) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, v.Value) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 38642f7cc..087325a04 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -147,6 +147,11 @@ type snapshotJSON struct { Settings map[Key]RawItem `json:",omitempty"` } +var ( + _ jsonv2.MarshalerTo = (*Snapshot)(nil) + _ jsonv2.UnmarshalerFrom = (*Snapshot)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go index d7c139a87..9864822f7 100644 --- a/util/syspolicy/setting/summary.go +++ b/util/syspolicy/setting/summary.go @@ -54,6 +54,11 @@ func (s Summary) String() string { return s.data.Scope.String() } +var ( + _ jsonv2.MarshalerTo = (*Summary)(nil) + _ jsonv2.UnmarshalerFrom = (*Summary)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s Summary) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &s.data) From b85d18d14e9898261af00de60ebf069bc17a1a0b Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 27 Feb 2025 14:41:05 -0800 Subject: [PATCH 0537/1708] ipn/{ipnlocal,store},kube/kubeclient: store TLS cert and key pair to a Secret in a single operation. (#15147) To avoid duplicate issuances/slowness while the state Secret contains a mismatched cert and key. Updates tailscale/tailscale#15134 Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- ipn/ipnlocal/cert.go | 39 ++++-- ipn/ipnlocal/cert_test.go | 8 +- ipn/store/kubestore/store_kube.go | 76 ++++++---- ipn/store/kubestore/store_kube_test.go | 183 +++++++++++++++++++++++++ kube/kubeclient/fake_client.go | 15 +- 5 files changed, 278 insertions(+), 43 deletions(-) create mode 100644 ipn/store/kubestore/store_kube_test.go diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index cfa4fe1ba..d360ed79c 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -250,15 +250,13 @@ type certStore interface { // for now. If they're expired, it returns errCertExpired. // If they don't exist, it returns ipn.ErrStateNotExist. Read(domain string, now time.Time) (*TLSCertKeyPair, error) - // WriteCert writes the cert for domain. - WriteCert(domain string, cert []byte) error - // WriteKey writes the key for domain. - WriteKey(domain string, key []byte) error // ACMEKey returns the value previously stored via WriteACMEKey. // It is a PEM encoded ECDSA key. ACMEKey() ([]byte, error) // WriteACMEKey stores the provided PEM encoded ECDSA key. WriteACMEKey([]byte) error + // WriteTLSCertAndKey writes the cert and key for domain. + WriteTLSCertAndKey(domain string, cert, key []byte) error } var errCertExpired = errors.New("cert expired") @@ -344,6 +342,13 @@ func (f certFileStore) WriteKey(domain string, key []byte) error { return atomicfile.WriteFile(keyFile(f.dir, domain), key, 0600) } +func (f certFileStore) WriteTLSCertAndKey(domain string, cert, key []byte) error { + if err := f.WriteKey(domain, key); err != nil { + return err + } + return f.WriteCert(domain, cert) +} + // certStateStore implements certStore by storing the cert & key files in an ipn.StateStore. type certStateStore struct { ipn.StateStore @@ -384,6 +389,27 @@ func (s certStateStore) WriteACMEKey(key []byte) error { return ipn.WriteState(s.StateStore, ipn.StateKey(acmePEMName), key) } +// TLSCertKeyWriter is an interface implemented by state stores that can write the TLS +// cert and key in a single atomic operation. Currently this is only implemented +// by the kubestore.StoreKube. +type TLSCertKeyWriter interface { + WriteTLSCertAndKey(domain string, cert, key []byte) error +} + +// WriteTLSCertAndKey writes the TLS cert and key for domain to the current +// LocalBackend's StateStore. +func (s certStateStore) WriteTLSCertAndKey(domain string, cert, key []byte) error { + // If we're using a store that supports atomic writes, use that. + if aw, ok := s.StateStore.(TLSCertKeyWriter); ok { + return aw.WriteTLSCertAndKey(domain, cert, key) + } + // Otherwise fall back to separate writes for cert and key. + if err := s.WriteKey(domain, key); err != nil { + return err + } + return s.WriteCert(domain, cert) +} + // TLSCertKeyPair is a TLS public and private key, and whether they were obtained // from cache or freshly obtained. type TLSCertKeyPair struct { @@ -546,9 +572,6 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil { return nil, err } - if err := cs.WriteKey(domain, privPEM.Bytes()); err != nil { - return nil, err - } csr, err := certRequest(certPrivKey, domain, nil) if err != nil { @@ -570,7 +593,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger return nil, err } } - if err := cs.WriteCert(domain, certPEM.Bytes()); err != nil { + if err := cs.WriteTLSCertAndKey(domain, certPEM.Bytes(), privPEM.Bytes()); err != nil { return nil, err } b.domainRenewed(domain) diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index 21741ca95..868808cd6 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -86,13 +86,9 @@ func TestCertStoreRoundTrip(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if err := test.store.WriteCert(testDomain, testCert); err != nil { - t.Fatalf("WriteCert: unexpected error: %v", err) + if err := test.store.WriteTLSCertAndKey(testDomain, testCert, testKey); err != nil { + t.Fatalf("WriteTLSCertAndKey: unexpected error: %v", err) } - if err := test.store.WriteKey(testDomain, testKey); err != nil { - t.Fatalf("WriteKey: unexpected error: %v", err) - } - kp, err := test.store.Read(testDomain, testNow) if err != nil { t.Fatalf("Read: unexpected error: %v", err) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 462e6d434..b4e14c6d3 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -18,6 +18,7 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/types/logger" + "tailscale.com/util/mak" ) const ( @@ -83,10 +84,22 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { // WriteState implements the StateStore interface. func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { + return s.updateStateSecret(map[string][]byte{string(id): bs}) +} + +// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields of a Tailscale Kubernetes node's state +// Secret. +func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) error { + return s.updateStateSecret(map[string][]byte{domain + ".crt": cert, domain + ".key": key}) +} + +func (s *Store) updateStateSecret(data map[string][]byte) (err error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer func() { if err == nil { - s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) + for id, bs := range data { + s.memory.WriteState(ipn.StateKey(id), bs) + } } if err != nil { if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { @@ -99,9 +112,9 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { } cancel() }() - secret, err := s.client.GetSecret(ctx, s.secretName) if err != nil { + // If the Secret does not exist, create it with the required data. if kubeclient.IsNotFoundErr(err) { return s.client.CreateSecret(ctx, &kubeapi.Secret{ TypeMeta: kubeapi.TypeMeta{ @@ -111,40 +124,53 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { ObjectMeta: kubeapi.ObjectMeta{ Name: s.secretName, }, - Data: map[string][]byte{ - sanitizeKey(id): bs, - }, + Data: func(m map[string][]byte) map[string][]byte { + d := make(map[string][]byte, len(m)) + for key, val := range m { + d[sanitizeKey(key)] = val + } + return d + }(data), }) } return err } if s.canPatch { - if len(secret.Data) == 0 { // if user has pre-created a blank Secret - m := []kubeclient.JSONPatch{ + var m []kubeclient.JSONPatch + // If the user has pre-created a Secret with no data, we need to ensure the top level /data field. + if len(secret.Data) == 0 { + m = []kubeclient.JSONPatch{ { - Op: "add", - Path: "/data", - Value: map[string][]byte{sanitizeKey(id): bs}, + Op: "add", + Path: "/data", + Value: func(m map[string][]byte) map[string][]byte { + d := make(map[string][]byte, len(m)) + for key, val := range m { + d[sanitizeKey(key)] = val + } + return d + }(data), }, } - if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { - return fmt.Errorf("error patching Secret %s with a /data field: %v", s.secretName, err) + // If the Secret has data, patch it with the new data. + } else { + for key, val := range data { + m = append(m, kubeclient.JSONPatch{ + Op: "add", + Path: "/data/" + sanitizeKey(key), + Value: val, + }) } - return nil - } - m := []kubeclient.JSONPatch{ - { - Op: "add", - Path: "/data/" + sanitizeKey(id), - Value: bs, - }, } if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { - return fmt.Errorf("error patching Secret %s with /data/%s field: %v", s.secretName, sanitizeKey(id), err) + return fmt.Errorf("error patching Secret %s: %w", s.secretName, err) } return nil } - secret.Data[sanitizeKey(id)] = bs + // No patch permissions, use UPDATE instead. + for key, val := range data { + mak.Set(&secret.Data, sanitizeKey(key), val) + } if err := s.client.UpdateSecret(ctx, secret); err != nil { return err } @@ -172,9 +198,9 @@ func (s *Store) loadState() (err error) { return nil } -func sanitizeKey(k ipn.StateKey) string { - // The only valid characters in a Kubernetes secret key are alphanumeric, -, - // _, and . +// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes secret key. +// Valid characters are alphanumeric, -, _, and . +func sanitizeKey[T ~string](k T) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { return r diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go new file mode 100644 index 000000000..f3c5ac9fb --- /dev/null +++ b/ipn/store/kubestore/store_kube_test.go @@ -0,0 +1,183 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package kubestore + +import ( + "context" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn" + "tailscale.com/ipn/store/mem" + "tailscale.com/kube/kubeapi" + "tailscale.com/kube/kubeclient" +) + +func TestUpdateStateSecret(t *testing.T) { + tests := []struct { + name string + initial map[string][]byte + updates map[string][]byte + wantData map[string][]byte + allowPatch bool + }{ + { + name: "basic_update", + initial: map[string][]byte{ + "existing": []byte("old"), + }, + updates: map[string][]byte{ + "foo": []byte("bar"), + }, + wantData: map[string][]byte{ + "existing": []byte("old"), + "foo": []byte("bar"), + }, + allowPatch: true, + }, + { + name: "update_existing", + initial: map[string][]byte{ + "foo": []byte("old"), + }, + updates: map[string][]byte{ + "foo": []byte("new"), + }, + wantData: map[string][]byte{ + "foo": []byte("new"), + }, + allowPatch: true, + }, + { + name: "multiple_updates", + initial: map[string][]byte{ + "keep": []byte("keep"), + }, + updates: map[string][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + wantData: map[string][]byte{ + "keep": []byte("keep"), + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + allowPatch: true, + }, + { + name: "create_new_secret", + updates: map[string][]byte{ + "foo": []byte("bar"), + }, + wantData: map[string][]byte{ + "foo": []byte("bar"), + }, + allowPatch: true, + }, + { + name: "patch_denied", + initial: map[string][]byte{ + "foo": []byte("old"), + }, + updates: map[string][]byte{ + "foo": []byte("new"), + }, + wantData: map[string][]byte{ + "foo": []byte("new"), + }, + allowPatch: false, + }, + { + name: "sanitize_keys", + initial: map[string][]byte{ + "clean-key": []byte("old"), + }, + updates: map[string][]byte{ + "dirty@key": []byte("new"), + "also/bad": []byte("value"), + "good.key": []byte("keep"), + }, + wantData: map[string][]byte{ + "clean-key": []byte("old"), + "dirty_key": []byte("new"), + "also_bad": []byte("value"), + "good.key": []byte("keep"), + }, + allowPatch: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + secret := tt.initial // track current state + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if secret == nil { + return nil, &kubeapi.Status{Code: 404} + } + return &kubeapi.Secret{Data: secret}, nil + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return tt.allowPatch, true, nil + }, + CreateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + secret = s.Data + return nil + }, + UpdateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + secret = s.Data + return nil + }, + JSONPatchResourceImpl: func(ctx context.Context, name, resourceType string, patches []kubeclient.JSONPatch) error { + if !tt.allowPatch { + return &kubeapi.Status{Reason: "Forbidden"} + } + if secret == nil { + secret = make(map[string][]byte) + } + for _, p := range patches { + if p.Op == "add" && p.Path == "/data" { + secret = p.Value.(map[string][]byte) + } else if p.Op == "add" && strings.HasPrefix(p.Path, "/data/") { + key := strings.TrimPrefix(p.Path, "/data/") + secret[key] = p.Value.([]byte) + } + } + return nil + }, + } + + s := &Store{ + client: client, + canPatch: tt.allowPatch, + secretName: "test-secret", + memory: mem.Store{}, + } + + err := s.updateStateSecret(tt.updates) + if err != nil { + t.Errorf("updateStateSecret() error = %v", err) + return + } + + // Verify secret data + if diff := cmp.Diff(secret, tt.wantData); diff != "" { + t.Errorf("secret data mismatch (-got +want):\n%s", diff) + } + + // Verify memory store was updated + for k, v := range tt.updates { + got, err := s.memory.ReadState(ipn.StateKey(k)) + if err != nil { + t.Errorf("reading from memory store: %v", err) + continue + } + if !cmp.Equal(got, v) { + t.Errorf("memory store key %q = %v, want %v", k, got, v) + } + } + }) + } +} diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index 5716ca31b..aea786ea0 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -15,6 +15,9 @@ var _ Client = &FakeClient{} type FakeClient struct { GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) + CreateSecretImpl func(context.Context, *kubeapi.Secret) error + UpdateSecretImpl func(context.Context, *kubeapi.Secret) error + JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -33,8 +36,12 @@ func (fc *FakeClient) Event(context.Context, string, string, string) error { return nil } -func (fc *FakeClient) JSONPatchResource(context.Context, string, string, []JSONPatch) error { - return nil +func (fc *FakeClient) JSONPatchResource(ctx context.Context, resource, name string, patches []JSONPatch) error { + return fc.JSONPatchResourceImpl(ctx, resource, name, patches) +} +func (fc *FakeClient) UpdateSecret(ctx context.Context, secret *kubeapi.Secret) error { + return fc.UpdateSecretImpl(ctx, secret) +} +func (fc *FakeClient) CreateSecret(ctx context.Context, secret *kubeapi.Secret) error { + return fc.CreateSecretImpl(ctx, secret) } -func (fc *FakeClient) UpdateSecret(context.Context, *kubeapi.Secret) error { return nil } -func (fc *FakeClient) CreateSecret(context.Context, *kubeapi.Secret) error { return nil } From 6df0aa58bbddedf6c6f0373f9dd2eb0693e01fd8 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 27 Feb 2025 15:05:04 -0800 Subject: [PATCH 0538/1708] cmd/containerboot: fix nil pointer exception (#15090) Updates tailscale/tailscale#15081 Signed-off-by: Irbe Krumina --- cmd/containerboot/serve.go | 2 ++ cmd/containerboot/tailscaled.go | 13 +++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index fbfaba64a..4ea5a9c46 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -35,6 +35,8 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan var tickChan <-chan time.Time var eventChan <-chan fsnotify.Event if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 log.Printf("serve proxy: failed to create fsnotify watcher, timer-only mode: %v", err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index e73a7e94d..01ee96d3a 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -173,11 +173,14 @@ func tailscaleSet(ctx context.Context, cfg *settings) error { func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Client, errCh chan<- error) { var ( tickChan <-chan time.Time + eventChan <-chan fsnotify.Event + errChan <-chan error tailscaledCfgDir = filepath.Dir(path) prevTailscaledCfg []byte ) - w, err := fsnotify.NewWatcher() - if err != nil { + if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 log.Printf("tailscaled config watch: failed to create fsnotify watcher, timer-only mode: %v", err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() @@ -188,6 +191,8 @@ func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Cl errCh <- fmt.Errorf("failed to add fsnotify watch: %w", err) return } + eventChan = w.Events + errChan = w.Errors } b, err := os.ReadFile(path) if err != nil { @@ -205,11 +210,11 @@ func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Cl select { case <-ctx.Done(): return - case err := <-w.Errors: + case err := <-errChan: errCh <- fmt.Errorf("watcher error: %w", err) return case <-tickChan: - case event := <-w.Events: + case event := <-eventChan: if event.Name != toWatch { continue } From 90273a7f70a16b4f8de0ac0f70ccc39ad4e1c5ff Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Thu, 27 Feb 2025 18:55:46 -0500 Subject: [PATCH 0539/1708] safesocket: return an error for LocalTCPPortAndToken for tailscaled (#15144) fixes tailscale/corp#26806 Fixes a regression where LocalTCPPortAndToken needs to error out early if we're not running as sandboxed macos so that we attempt to connect using the normal unix machinery. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 16 +++++++++++----- safesocket/safesocket_darwin_test.go | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index fbcd7aaa6..f6e46bc50 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -37,14 +37,16 @@ type safesocketDarwin struct { sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file sharedDir string // shared directory for location of sameuserproof file - checkConn bool // Check macsys safesocket port before returning it - isMacSysExt func() bool // For testing only to force macsys + checkConn bool // Check macsys safesocket port before returning it + isMacSysExt func() bool // For testing only to force macsys + isSandboxedMacos func() bool // For testing only to force macOS sandbox } var ssd = safesocketDarwin{ - isMacSysExt: version.IsMacSysExt, - checkConn: true, - sharedDir: "/Library/Tailscale", + isMacSysExt: version.IsMacSysExt, + isSandboxedMacos: version.IsSandboxedMacOS, + checkConn: true, + sharedDir: "/Library/Tailscale", } // There are three ways a Darwin binary can be run: as the Mac App Store (macOS) @@ -66,6 +68,10 @@ func localTCPPortAndTokenDarwin() (port int, token string, err error) { ssd.mu.Lock() defer ssd.mu.Unlock() + if !ssd.isSandboxedMacos() { + return 0, "", ErrNoTokenOnOS + } + if ssd.port != 0 && ssd.token != "" { return ssd.port, ssd.token, nil } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index 80f0dcddd..465ac0b68 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -17,6 +17,7 @@ import ( func TestSetCredentials(t *testing.T) { wantPort := 123 wantToken := "token" + tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) SetCredentials(wantToken, wantPort) gotPort, gotToken, err := LocalTCPPortAndToken() @@ -37,6 +38,8 @@ func TestSetCredentials(t *testing.T) { // returns a listener and a non-zero port and non-empty token. func TestInitListenerDarwin(t *testing.T) { temp := t.TempDir() + tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) + ln, err := InitListenerDarwin(temp) if err != nil || ln == nil { t.Fatalf("InitListenerDarwin failed: %v", err) From 7180812f47c8ebdee2a9837671b7a4b4d376a3f8 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 27 Feb 2025 13:40:43 -0800 Subject: [PATCH 0540/1708] licenses: add README Add description of the license reports in this directory and brief instructions for reviewers. I recently needed to convert these to CSV, so I also wanted to place to stash that regex so I didn't lose it. Updates tailscale/corp#5780 Signed-off-by: Will Norris --- licenses/README.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 licenses/README.md diff --git a/licenses/README.md b/licenses/README.md new file mode 100644 index 000000000..46fe8b77f --- /dev/null +++ b/licenses/README.md @@ -0,0 +1,35 @@ +# Licenses + +This directory contains a list of dependencies, and their licenses, that are included in the Tailscale clients. +These lists are generated using the [go-licenses] tool to analyze all Go packages in the Tailscale binaries, +as well as a set of custom output templates that includes any additional non-Go dependencies. +For example, the clients for macOS and iOS include some additional Swift libraries. + +These lists are updated roughly every week, so it is possible to see the dependencies in a given release by looking at the release tag. +For example, the dependences for the 1.80.0 release of the macOS client can be seen at +. + +[go-licenses]: https://github.com/google/go-licenses + +## Other formats + +The go-licenses tool can output other formats like CSV, but that wouldn't include the non-Go dependencies. +We can generate a CSV file if that's really needed by running a regex over the markdown files: + +```sh +cat apple.md | grep "^ -" | sed -E "s/- \[(.*)\]\(.*?\) \(\[(.*)\]\((.*)\)\)/\1,\2,\3/" +``` + +## Reviewer instructions + +The majority of changes in this directory are from updating dependency versions. +In that case, only the URL for the license file will change to reflect the new version. +Occasionally, a dependency is added or removed, or the import path is changed. + +New dependencies require the closest review to ensure the license is acceptable. +Because we generate the license reports **after** dependencies are changed, +the new dependency would have already gone through one review when it was initially added. +This is just a secondary review to double-check the license. If in doubt, ask legal. + +Always do a normal GitHub code review on the license PR with a brief summary of what changed. +For example, see #13936 or #14064. Then approve and merge the PR. From 2791b5d5cc7f377da1c0a60e193e8cdaf37cd8b5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 28 Feb 2025 01:28:08 -0800 Subject: [PATCH 0541/1708] go.{mod,sum}: bump mkctr (#15161) Updates tailscale/tailscale#15159 Signed-off-by: Irbe Krumina --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e6f3141a0..106538e94 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 + github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb diff --git a/go.sum b/go.sum index 0c8704674..efbf8ae2b 100644 --- a/go.sum +++ b/go.sum @@ -906,8 +906,8 @@ github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPx github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 h1:9SuADtKJAGQkIpnpg5znEJ86QaxacN25pHkiEXTDjzg= -github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= +github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 h1:SwZ72kr1oRzzSPA5PYB4hzPh22UI0nm0dapn3bHaUPs= +github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= From 8c2717f96a54d1bf0d543a78afc766913a3cf9ac Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 28 Feb 2025 13:51:07 -0500 Subject: [PATCH 0542/1708] ipn/ipnlocal: send vipServices info via c2n even it's incomplete (#15166) This commit updates the logic of vipServicesFromPrefsLocked, so that it would return the vipServices list even when service host is only advertising the service but not yet serving anything. This makes control always get accurate state of service host in terms of serving a service. Fixes tailscale/corp#26843 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fec5c166f..4f94a55a1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -8222,15 +8222,13 @@ func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { // keyed by service name var services map[tailcfg.ServiceName]*tailcfg.VIPService - if !b.serveConfig.Valid() { - return nil - } - - for svc, config := range b.serveConfig.Services().All() { - mak.Set(&services, svc, &tailcfg.VIPService{ - Name: svc, - Ports: config.ServicePortRange(), - }) + if b.serveConfig.Valid() { + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), + }) + } } for _, s := range prefs.AdvertiseServices().All() { From ef906763ee5a7e5e22eaf5336dd020532d9b6964 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 27 Feb 2025 16:31:56 -0800 Subject: [PATCH 0543/1708] util/eventbus: initial implementation of an in-process event bus Updates #15160 Signed-off-by: David Anderson Co-authored-by: M. J. Fromberger --- go.mod | 1 + go.sum | 4 + util/eventbus/bus.go | 223 +++++++++++++++++++++++++++++++++++++ util/eventbus/bus_test.go | 196 ++++++++++++++++++++++++++++++++ util/eventbus/doc.go | 100 +++++++++++++++++ util/eventbus/publish.go | 79 +++++++++++++ util/eventbus/queue.go | 83 ++++++++++++++ util/eventbus/subscribe.go | 170 ++++++++++++++++++++++++++++ 8 files changed, 856 insertions(+) create mode 100644 util/eventbus/bus.go create mode 100644 util/eventbus/bus_test.go create mode 100644 util/eventbus/doc.go create mode 100644 util/eventbus/publish.go create mode 100644 util/eventbus/queue.go create mode 100644 util/eventbus/subscribe.go diff --git a/go.mod b/go.mod index 106538e94..970e2e63c 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/creachadair/taskgroup v0.13.2 github.com/creack/pty v1.1.23 github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e diff --git a/go.sum b/go.sum index efbf8ae2b..1707effd5 100644 --- a/go.sum +++ b/go.sum @@ -231,6 +231,8 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7 github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= +github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= +github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= @@ -298,6 +300,8 @@ github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phm github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go new file mode 100644 index 000000000..85d73b15e --- /dev/null +++ b/util/eventbus/bus.go @@ -0,0 +1,223 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "context" + "reflect" + "slices" + "sync" + + "tailscale.com/util/set" +) + +// Bus is an event bus that distributes published events to interested +// subscribers. +type Bus struct { + write chan any + stop goroutineShutdownControl + snapshot chan chan []any + + topicsMu sync.Mutex // guards everything below. + topics map[reflect.Type][]*Queue + + // Used for introspection/debugging only, not in the normal event + // publishing path. + publishers set.Set[publisher] + queues set.Set[*Queue] +} + +// New returns a new bus. Use [PublisherOf] to make event publishers, +// and [Bus.Queue] and [Subscribe] to make event subscribers. +func New() *Bus { + stopCtl, stopWorker := newGoroutineShutdown() + ret := &Bus{ + write: make(chan any), + stop: stopCtl, + snapshot: make(chan chan []any), + topics: map[reflect.Type][]*Queue{}, + publishers: set.Set[publisher]{}, + queues: set.Set[*Queue]{}, + } + go ret.pump(stopWorker) + return ret +} + +func (b *Bus) pump(stop goroutineShutdownWorker) { + defer stop.Done() + var vals queue + acceptCh := func() chan any { + if vals.Full() { + return nil + } + return b.write + } + for { + // Drain all pending events. Note that while we're draining + // events into subscriber queues, we continue to + // opportunistically accept more incoming events, if we have + // queue space for it. + for !vals.Empty() { + val := vals.Peek() + dests := b.dest(reflect.ValueOf(val).Type()) + for _, d := range dests { + deliverOne: + for { + select { + case d.write <- val: + break deliverOne + case <-d.stop.WaitChan(): + // Queue closed, don't block but continue + // delivering to others. + break deliverOne + case in := <-acceptCh(): + vals.Add(in) + case <-stop.Stop(): + return + case ch := <-b.snapshot: + ch <- vals.Snapshot() + } + } + } + vals.Drop() + } + + // Inbound queue empty, wait for at least 1 work item before + // resuming. + for vals.Empty() { + select { + case <-stop.Stop(): + return + case val := <-b.write: + vals.Add(val) + case ch := <-b.snapshot: + ch <- nil + } + } + } +} + +func (b *Bus) dest(t reflect.Type) []*Queue { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + return b.topics[t] +} + +func (b *Bus) subscribe(t reflect.Type, q *Queue) (cancel func()) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.topics[t] = append(b.topics[t], q) + return func() { + b.unsubscribe(t, q) + } +} + +func (b *Bus) unsubscribe(t reflect.Type, q *Queue) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + // Topic slices are accessed by pump without holding a lock, so we + // have to replace the entire slice when unsubscribing. + // Unsubscribing should be infrequent enough that this won't + // matter. + i := slices.Index(b.topics[t], q) + if i < 0 { + return + } + b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1) +} + +func (b *Bus) Close() { + b.stop.StopAndWait() +} + +// Queue returns a new queue with no subscriptions. Use [Subscribe] to +// atach subscriptions to it. +// +// The queue's name should be a short, human-readable string that +// identifies this queue. The name is only visible through debugging +// APIs. +func (b *Bus) Queue(name string) *Queue { + return newQueue(b, name) +} + +func (b *Bus) addQueue(q *Queue) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.queues.Add(q) +} + +func (b *Bus) deleteQueue(q *Queue) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.queues.Delete(q) +} + +func (b *Bus) addPublisher(p publisher) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.publishers.Add(p) +} + +func (b *Bus) deletePublisher(p publisher) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.publishers.Delete(p) +} + +func newGoroutineShutdown() (goroutineShutdownControl, goroutineShutdownWorker) { + ctx, cancel := context.WithCancel(context.Background()) + + ctl := goroutineShutdownControl{ + startShutdown: cancel, + shutdownFinished: make(chan struct{}), + } + work := goroutineShutdownWorker{ + startShutdown: ctx.Done(), + shutdownFinished: ctl.shutdownFinished, + } + + return ctl, work +} + +// goroutineShutdownControl is a helper type to manage the shutdown of +// a worker goroutine. The worker goroutine should use the +// goroutineShutdownWorker related to this controller. +type goroutineShutdownControl struct { + startShutdown context.CancelFunc + shutdownFinished chan struct{} +} + +func (ctl *goroutineShutdownControl) Stop() { + ctl.startShutdown() +} + +func (ctl *goroutineShutdownControl) Wait() { + <-ctl.shutdownFinished +} + +func (ctl *goroutineShutdownControl) WaitChan() <-chan struct{} { + return ctl.shutdownFinished +} + +func (ctl *goroutineShutdownControl) StopAndWait() { + ctl.Stop() + ctl.Wait() +} + +// goroutineShutdownWorker is a helper type for a worker goroutine to +// be notified that it should shut down, and to report that shutdown +// has completed. The notification is triggered by the related +// goroutineShutdownControl. +type goroutineShutdownWorker struct { + startShutdown <-chan struct{} + shutdownFinished chan struct{} +} + +func (work *goroutineShutdownWorker) Stop() <-chan struct{} { + return work.startShutdown +} + +func (work *goroutineShutdownWorker) Done() { + close(work.shutdownFinished) +} diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go new file mode 100644 index 000000000..180f4164a --- /dev/null +++ b/util/eventbus/bus_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus_test + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/creachadair/taskgroup" + "github.com/google/go-cmp/cmp" + "tailscale.com/util/eventbus" +) + +type EventA struct { + Counter int +} + +type EventB struct { + Counter int +} + +func TestBus(t *testing.T) { + b := eventbus.New() + defer b.Close() + + q := b.Queue("TestBus") + defer q.Close() + s := eventbus.Subscribe[EventA](q) + + go func() { + pa := eventbus.PublisherOf[EventA](b, "TestBusA") + defer pa.Close() + pb := eventbus.PublisherOf[EventB](b, "TestBusB") + defer pb.Close() + pa.Publish(EventA{1}) + pb.Publish(EventB{2}) + pa.Publish(EventA{3}) + }() + + want := expectEvents(t, EventA{1}, EventA{3}) + for !want.Empty() { + select { + case got := <-s.Events(): + want.Got(got) + case <-q.Done(): + t.Fatalf("queue closed unexpectedly") + case <-time.After(time.Second): + t.Fatalf("timed out waiting for event") + } + } +} + +func TestBusMultipleConsumers(t *testing.T) { + b := eventbus.New() + defer b.Close() + + q1 := b.Queue("TestBusA") + defer q1.Close() + s1 := eventbus.Subscribe[EventA](q1) + + q2 := b.Queue("TestBusAB") + defer q2.Close() + s2A := eventbus.Subscribe[EventA](q2) + s2B := eventbus.Subscribe[EventB](q2) + + go func() { + pa := eventbus.PublisherOf[EventA](b, "TestBusA") + defer pa.Close() + pb := eventbus.PublisherOf[EventB](b, "TestBusB") + defer pb.Close() + pa.Publish(EventA{1}) + pb.Publish(EventB{2}) + pa.Publish(EventA{3}) + }() + + wantA := expectEvents(t, EventA{1}, EventA{3}) + wantB := expectEvents(t, EventA{1}, EventB{2}, EventA{3}) + for !wantA.Empty() || !wantB.Empty() { + select { + case got := <-s1.Events(): + wantA.Got(got) + case got := <-s2A.Events(): + wantB.Got(got) + case got := <-s2B.Events(): + wantB.Got(got) + case <-q1.Done(): + t.Fatalf("queue closed unexpectedly") + case <-q2.Done(): + t.Fatalf("queue closed unexpectedly") + case <-time.After(time.Second): + t.Fatalf("timed out waiting for event") + } + } +} + +func TestSpam(t *testing.T) { + b := eventbus.New() + defer b.Close() + + const ( + publishers = 100 + eventsPerPublisher = 20 + wantEvents = publishers * eventsPerPublisher + subscribers = 100 + ) + + var g taskgroup.Group + + received := make([][]EventA, subscribers) + for i := range subscribers { + q := b.Queue(fmt.Sprintf("Subscriber%d", i)) + defer q.Close() + s := eventbus.Subscribe[EventA](q) + g.Go(func() error { + for range wantEvents { + select { + case evt := <-s.Events(): + received[i] = append(received[i], evt) + case <-q.Done(): + t.Errorf("queue done before expected number of events received") + return errors.New("queue prematurely closed") + case <-time.After(5 * time.Second): + t.Errorf("timed out waiting for expected bus event after %d events", len(received[i])) + return errors.New("timeout") + } + } + return nil + }) + } + + published := make([][]EventA, publishers) + for i := range publishers { + g.Run(func() { + p := eventbus.PublisherOf[EventA](b, fmt.Sprintf("Publisher%d", i)) + for j := range eventsPerPublisher { + evt := EventA{i*eventsPerPublisher + j} + p.Publish(evt) + published[i] = append(published[i], evt) + } + }) + } + + if err := g.Wait(); err != nil { + t.Fatal(err) + } + var last []EventA + for i, got := range received { + if len(got) != wantEvents { + // Receiving goroutine already reported an error, we just need + // to fail early within the main test goroutine. + t.FailNow() + } + if last == nil { + continue + } + if diff := cmp.Diff(got, last); diff != "" { + t.Errorf("Subscriber %d did not see the same events as %d (-got+want):\n%s", i, i-1, diff) + } + last = got + } + for i, sent := range published { + if got := len(sent); got != eventsPerPublisher { + t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + } + } + + // TODO: check that the published sequences are proper + // subsequences of the received slices. +} + +type queueChecker struct { + t *testing.T + want []any +} + +func expectEvents(t *testing.T, want ...any) *queueChecker { + return &queueChecker{t, want} +} + +func (q *queueChecker) Got(v any) { + q.t.Helper() + if q.Empty() { + q.t.Fatalf("queue got unexpected %v", v) + } + if v != q.want[0] { + q.t.Fatalf("queue got %#v, want %#v", v, q.want[0]) + } + q.want = q.want[1:] +} + +func (q *queueChecker) Empty() bool { + return len(q.want) == 0 +} diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go new file mode 100644 index 000000000..136823c42 --- /dev/null +++ b/util/eventbus/doc.go @@ -0,0 +1,100 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package eventbus provides an in-process event bus. +// +// The event bus connects publishers of typed events with subscribers +// interested in those events. +// +// # Usage +// +// To publish events, use [PublisherOf] to get a typed publisher for +// your event type, then call [Publisher.Publish] as needed. If your +// event is expensive to construct, you can optionally use +// [Publisher.ShouldPublish] to skip the work if nobody is listening +// for the event. +// +// To receive events, first use [Bus.Queue] to create an event +// delivery queue, then use [Subscribe] to get a [Subscriber] for each +// event type you're interested in. Receive the events themselves by +// selecting over all your [Subscriber.Chan] channels, as well as +// [Queue.Done] for shutdown notifications. +// +// # Concurrency properties +// +// The bus serializes all published events, and preserves that +// ordering when delivering to subscribers that are attached to the +// same Queue. In more detail: +// +// - An event is published to the bus at some instant between the +// start and end of the call to [Publisher.Publish]. +// - Events cannot be published at the same instant, and so are +// totally ordered by their publication time. Given two events E1 +// and E2, either E1 happens before E2, or E2 happens before E1. +// - Queues dispatch events to their Subscribers in publication +// order: if E1 happens before E2, the queue always delivers E1 +// before E2. +// - Queues do not synchronize with each other: given queues Q1 and +// Q2, both subscribed to events E1 and E2, Q1 may deliver both E1 +// and E2 before Q2 delivers E1. +// +// Less formally: there is one true timeline of all published events. +// If you make a Queue and subscribe to events on it, you will receive +// those events one at a time, in the same order as the one true +// timeline. You will "skip over" events you didn't subscribe to, but +// your view of the world always moves forward in time, never +// backwards, and you will observe events in the same order as +// everyone else. +// +// However, you cannot assume that what your subscribers on your queue +// see as "now" is the same as what other subscribers on other +// queues. Their queue may be further behind you in the timeline, or +// running ahead of you. This means you should be careful about +// reaching out to another component directly after receiving an +// event, as its view of the world may not yet (or ever) be exactly +// consistent with yours. +// +// To make your code more testable and understandable, you should try +// to structure it following the actor model: you have some local +// state over which you have authority, but your only way to interact +// with state elsewhere in the program is to receive and process +// events coming from elsewhere, or to emit events of your own. +// +// # Expected subscriber behavior +// +// Subscribers are expected to promptly receive their events on +// [Subscriber.Chan]. The bus has a small, fixed amount of internal +// buffering, meaning that a slow subscriber will eventually cause +// backpressure and block publication of all further events. +// +// In general, you should receive from your subscriber(s) in a loop, +// and only do fast state updates within that loop. Any heavier work +// should be offloaded to another goroutine. +// +// Causing publishers to block from backpressure is considered a bug +// in the slow subscriber causing the backpressure, and should be +// addressed there. Publishers should assume that Publish will not +// block for extended periods of time, and should not make exceptional +// effort to behave gracefully if they do get blocked. +// +// These blocking semantics are provisional and subject to +// change. Please speak up if this causes development pain, so that we +// can adapt the semantics to better suit our needs. +// +// # Debugging facilities +// +// (TODO, not implemented yet, sorry, I promise we're working on it next!) +// +// The bus comes with introspection facilities to help reason about +// the state of the client, and diagnose issues such as slow +// subscribers. +// +// The bus provide a tsweb debugging page that shows the current state +// of the bus, including all publishers, subscribers, and queued +// events. +// +// The bus also has a snooping and tracing facility, which lets you +// observe all events flowing through the bus, along with their +// source, destination(s) and timing information such as the time of +// delivery to each subscriber and end-to-end bus delays. +package eventbus diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go new file mode 100644 index 000000000..14828812b --- /dev/null +++ b/util/eventbus/publish.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "context" + "reflect" +) + +// publisher is a uniformly typed wrapper around Publisher[T], so that +// debugging facilities can look at active publishers. +type publisher interface { + publisherName() string +} + +// A Publisher publishes events on the bus. +type Publisher[T any] struct { + bus *Bus + name string + stopCtx context.Context + stop context.CancelFunc +} + +// PublisherOf returns a publisher for event type T on the given bus. +// +// The publisher's name should be a short, human-readable string that +// identifies this event publisher. The name is only visible through +// debugging APIs. +func PublisherOf[T any](b *Bus, name string) *Publisher[T] { + ctx, cancel := context.WithCancel(context.Background()) + ret := &Publisher[T]{ + bus: b, + name: name, + stopCtx: ctx, + stop: cancel, + } + b.addPublisher(ret) + return ret +} + +func (p *Publisher[T]) publisherName() string { return p.name } + +// Publish publishes event v on the bus. +func (p *Publisher[T]) Publish(v T) { + // Check for just a stopped publisher or bus before trying to + // write, so that once closed Publish consistently does nothing. + select { + case <-p.stopCtx.Done(): + return + case <-p.bus.stop.WaitChan(): + return + default: + } + + select { + case p.bus.write <- v: + case <-p.stopCtx.Done(): + case <-p.bus.stop.WaitChan(): + } +} + +// ShouldPublish reports whether anyone is subscribed to events of +// type T. +// +// ShouldPublish can be used to skip expensive event construction if +// nobody seems to care. Publishers must not assume that someone will +// definitely receive an event if ShouldPublish returns true. +func (p *Publisher[T]) ShouldPublish() bool { + dests := p.bus.dest(reflect.TypeFor[T]()) + return len(dests) > 0 +} + +// Close closes the publisher, indicating that no further events will +// be published with it. +func (p *Publisher[T]) Close() { + p.stop() + p.bus.deletePublisher(p) +} diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go new file mode 100644 index 000000000..8f6bda748 --- /dev/null +++ b/util/eventbus/queue.go @@ -0,0 +1,83 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "slices" +) + +const maxQueuedItems = 16 + +// queue is an ordered queue of length up to maxQueuedItems. +type queue struct { + vals []any + start int +} + +// canAppend reports whether a value can be appended to q.vals without +// shifting values around. +func (q *queue) canAppend() bool { + return cap(q.vals) < maxQueuedItems || len(q.vals) < cap(q.vals) +} + +func (q *queue) Full() bool { + return q.start == 0 && !q.canAppend() +} + +func (q *queue) Empty() bool { + return q.start == len(q.vals) +} + +func (q *queue) Len() int { + return len(q.vals) - q.start +} + +// Add adds v to the end of the queue. Blocks until append can be +// done. +func (q *queue) Add(v any) { + if !q.canAppend() { + if q.start == 0 { + panic("Add on a full queue") + } + + // Slide remaining values back to the start of the array. + n := copy(q.vals, q.vals[q.start:]) + toClear := len(q.vals) - n + clear(q.vals[len(q.vals)-toClear:]) + q.vals = q.vals[:n] + q.start = 0 + } + + q.vals = append(q.vals, v) +} + +// Peek returns the first value in the queue, without removing it from +// the queue, or nil if the queue is empty. +func (q *queue) Peek() any { + if q.Empty() { + return nil + } + + return q.vals[q.start] +} + +// Drop discards the first value in the queue, if any. +func (q *queue) Drop() { + if q.Empty() { + return + } + + q.vals[q.start] = nil + q.start++ + if q.Empty() { + // Reset cursor to start of array, it's free to do. + q.start = 0 + q.vals = q.vals[:0] + } +} + +// Snapshot returns a copy of the queue's contents. +func (q *queue) Snapshot() []any { + return slices.Clone(q.vals[q.start:]) +} diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go new file mode 100644 index 000000000..ade834d77 --- /dev/null +++ b/util/eventbus/subscribe.go @@ -0,0 +1,170 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "fmt" + "reflect" + "sync" +) + +type dispatchFn func(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool + +// A Queue receives events from a Bus. +// +// To receive events through the queue, see [Subscribe]. Subscribers +// that share the same Queue receive events one at time, in the order +// they were published. +type Queue struct { + bus *Bus + name string + + write chan any + stop goroutineShutdownControl + snapshot chan chan []any + + outputsMu sync.Mutex + outputs map[reflect.Type]dispatchFn +} + +func newQueue(b *Bus, name string) *Queue { + stopCtl, stopWorker := newGoroutineShutdown() + ret := &Queue{ + bus: b, + name: name, + write: make(chan any), + stop: stopCtl, + snapshot: make(chan chan []any), + outputs: map[reflect.Type]dispatchFn{}, + } + b.addQueue(ret) + go ret.pump(stopWorker) + return ret +} + +func (q *Queue) pump(stop goroutineShutdownWorker) { + defer stop.Done() + var vals queue + acceptCh := func() chan any { + if vals.Full() { + return nil + } + return q.write + } + for { + if !vals.Empty() { + val := vals.Peek() + fn := q.dispatchFn(val) + if fn == nil { + // Raced with unsubscribe. + vals.Drop() + continue + } + if !fn(&vals, stop, acceptCh) { + return + } + } else { + // Keep the cases in this select in sync with + // Subscriber.dispatch below. The only different should be + // that this select doesn't deliver queued values to + // anyone, and unconditionally accepts new values. + select { + case val := <-q.write: + vals.Add(val) + case <-stop.Stop(): + return + case ch := <-q.snapshot: + ch <- vals.Snapshot() + } + } + } +} + +// A Subscriber delivers one type of event from a [Queue]. +type Subscriber[T any] struct { + recv *Queue + read chan T +} + +func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool { + t := vals.Peek().(T) + for { + // Keep the cases in this select in sync with Queue.pump + // above. The only different should be that this select + // delivers a value on s.read. + select { + case s.read <- t: + vals.Drop() + return true + case val := <-acceptCh(): + vals.Add(val) + case <-stop.Stop(): + return false + case ch := <-s.recv.snapshot: + ch <- vals.Snapshot() + } + } +} + +// Events returns a channel on which the subscriber's events are +// delivered. +func (s *Subscriber[T]) Events() <-chan T { + return s.read +} + +// Close shuts down the Subscriber, indicating the caller no longer +// wishes to receive these events. After Close, receives on +// [Subscriber.Chan] block for ever. +func (s *Subscriber[T]) Close() { + t := reflect.TypeFor[T]() + s.recv.bus.unsubscribe(t, s.recv) + s.recv.deleteDispatchFn(t) +} + +func (q *Queue) dispatchFn(val any) dispatchFn { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + return q.outputs[reflect.ValueOf(val).Type()] +} + +func (q *Queue) addDispatchFn(t reflect.Type, fn dispatchFn) { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + if q.outputs[t] != nil { + panic(fmt.Errorf("double subscription for event %s", t)) + } + q.outputs[t] = fn +} + +func (q *Queue) deleteDispatchFn(t reflect.Type) { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + delete(q.outputs, t) +} + +// Done returns a channel that is closed when the Queue is closed. +func (q *Queue) Done() <-chan struct{} { + return q.stop.WaitChan() +} + +// Close closes the queue. All Subscribers attached to the queue are +// implicitly closed, and any pending events are discarded. +func (q *Queue) Close() { + q.stop.StopAndWait() + q.bus.deleteQueue(q) +} + +// Subscribe requests delivery of events of type T through the given +// Queue. Panics if the queue already has a subscriber for T. +func Subscribe[T any](r *Queue) Subscriber[T] { + t := reflect.TypeFor[T]() + ret := Subscriber[T]{ + recv: r, + read: make(chan T), + } + r.addDispatchFn(t, ret.dispatch) + r.bus.subscribe(t, r) + + return ret +} From 74d7d8a77b14abb8ce31c9c81f5b2dbee03eec96 Mon Sep 17 00:00:00 2001 From: Lee Briggs Date: Fri, 24 Jan 2025 11:15:28 -0800 Subject: [PATCH 0544/1708] ipn/store/awsstore: allow providing a KMS key Implements a KMS input for AWS parameter to support encrypting Tailscale state Fixes #14765 Change-Id: I39c0fae4bfd60a9aec17c5ea6a61d0b57143d4ba Co-authored-by: Brad Fitzpatrick Signed-off-by: Lee Briggs --- ipn/store/awsstore/store_aws.go | 111 ++++++++++++++++++++++----- ipn/store/awsstore/store_aws_stub.go | 18 ----- ipn/store/awsstore/store_aws_test.go | 61 ++++++++++++++- ipn/store/store_aws.go | 10 ++- 4 files changed, 157 insertions(+), 43 deletions(-) delete mode 100644 ipn/store/awsstore/store_aws_stub.go diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 0fb78d45a..40bbbf037 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -10,7 +10,9 @@ import ( "context" "errors" "fmt" + "net/url" "regexp" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" @@ -28,6 +30,14 @@ const ( var parameterNameRx = regexp.MustCompile(parameterNameRxStr) +// Option defines a functional option type for configuring awsStore. +type Option func(*storeOptions) + +// storeOptions holds optional settings for creating a new awsStore. +type storeOptions struct { + kmsKey string +} + // awsSSMClient is an interface allowing us to mock the couple of // API calls we are leveraging with the AWSStore provider type awsSSMClient interface { @@ -46,6 +56,10 @@ type awsStore struct { ssmClient awsSSMClient ssmARN arn.ARN + // kmsKey is optional. If empty, the parameter is stored in plaintext. + // If non-empty, the parameter is encrypted with this KMS key. + kmsKey string + memory mem.Store } @@ -57,30 +71,80 @@ type awsStore struct { // Tailscaled to only only store new state in-memory and // restarting Tailscaled can fail until you delete your state // from the AWS Parameter Store. -func New(_ logger.Logf, ssmARN string) (ipn.StateStore, error) { - return newStore(ssmARN, nil) +// +// If you want to specify an optional KMS key, +// pass one or more Option objects, e.g. awsstore.WithKeyID("alias/my-key"). +func New(_ logger.Logf, ssmARN string, opts ...Option) (ipn.StateStore, error) { + // Apply all options to an empty storeOptions + var so storeOptions + for _, opt := range opts { + opt(&so) + } + + return newStore(ssmARN, so, nil) +} + +// WithKeyID sets the KMS key to be used for encryption. It can be +// a KeyID, an alias ("alias/my-key"), or a full ARN. +// +// If kmsKey is empty, the Option is a no-op. +func WithKeyID(kmsKey string) Option { + return func(o *storeOptions) { + o.kmsKey = kmsKey + } +} + +// ParseARNAndOpts parses an ARN and optional URL-encoded parameters +// from arg. +func ParseARNAndOpts(arg string) (ssmARN string, opts []Option, err error) { + ssmARN = arg + + // Support optional ?url-encoded-parameters. + if s, q, ok := strings.Cut(arg, "?"); ok { + ssmARN = s + q, err := url.ParseQuery(q) + if err != nil { + return "", nil, err + } + + for k := range q { + switch k { + default: + return "", nil, fmt.Errorf("unknown arn option parameter %q", k) + case "kmsKey": + // We allow an ARN, a key ID, or an alias name for kmsKeyID. + // If it doesn't look like an ARN and doesn't have a '/', + // prepend "alias/" for KMS alias references. + kmsKey := q.Get(k) + if kmsKey != "" && + !strings.Contains(kmsKey, "/") && + !strings.HasPrefix(kmsKey, "arn:") { + kmsKey = "alias/" + kmsKey + } + if kmsKey != "" { + opts = append(opts, WithKeyID(kmsKey)) + } + } + } + } + return ssmARN, opts, nil } // newStore is NewStore, but for tests. If client is non-nil, it's // used instead of making one. -func newStore(ssmARN string, client awsSSMClient) (ipn.StateStore, error) { +func newStore(ssmARN string, so storeOptions, client awsSSMClient) (ipn.StateStore, error) { s := &awsStore{ ssmClient: client, + kmsKey: so.kmsKey, } var err error - - // Parse the ARN if s.ssmARN, err = arn.Parse(ssmARN); err != nil { return nil, fmt.Errorf("unable to parse the ARN correctly: %v", err) } - - // Validate the ARN corresponds to the SSM service if s.ssmARN.Service != "ssm" { return nil, fmt.Errorf("invalid service %q, expected 'ssm'", s.ssmARN.Service) } - - // Validate the ARN corresponds to a parameter store resource if !parameterNameRx.MatchString(s.ssmARN.Resource) { return nil, fmt.Errorf("invalid resource %q, expected to match %v", s.ssmARN.Resource, parameterNameRxStr) } @@ -96,12 +160,11 @@ func newStore(ssmARN string, client awsSSMClient) (ipn.StateStore, error) { s.ssmClient = ssm.NewFromConfig(cfg) } - // Hydrate cache with the potentially current state + // Preload existing state, if any if err := s.LoadState(); err != nil { return nil, err } return s, nil - } // LoadState attempts to read the state from AWS SSM parameter store key. @@ -172,15 +235,21 @@ func (s *awsStore) persistState() error { // which is free. However, if it exceeds 4kb it switches the parameter to advanced tiering // doubling the capacity to 8kb per the following docs: // https://aws.amazon.com/about-aws/whats-new/2019/08/aws-systems-manager-parameter-store-announces-intelligent-tiering-to-enable-automatic-parameter-tier-selection/ - _, err = s.ssmClient.PutParameter( - context.TODO(), - &ssm.PutParameterInput{ - Name: aws.String(s.ParameterName()), - Value: aws.String(string(bs)), - Overwrite: aws.Bool(true), - Tier: ssmTypes.ParameterTierIntelligentTiering, - Type: ssmTypes.ParameterTypeSecureString, - }, - ) + in := &ssm.PutParameterInput{ + Name: aws.String(s.ParameterName()), + Value: aws.String(string(bs)), + Overwrite: aws.Bool(true), + Tier: ssmTypes.ParameterTierIntelligentTiering, + Type: ssmTypes.ParameterTypeSecureString, + } + + // If kmsKey is specified, encrypt with that key + // NOTE: this input allows any alias, keyID or ARN + // If this isn't specified, AWS will use the default KMS key + if s.kmsKey != "" { + in.KeyId = aws.String(s.kmsKey) + } + + _, err = s.ssmClient.PutParameter(context.TODO(), in) return err } diff --git a/ipn/store/awsstore/store_aws_stub.go b/ipn/store/awsstore/store_aws_stub.go deleted file mode 100644 index 8d2156ce9..000000000 --- a/ipn/store/awsstore/store_aws_stub.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || ts_omit_aws - -package awsstore - -import ( - "fmt" - "runtime" - - "tailscale.com/ipn" - "tailscale.com/types/logger" -) - -func New(logger.Logf, string) (ipn.StateStore, error) { - return nil, fmt.Errorf("AWS store is not supported on %v", runtime.GOOS) -} diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index f6c8fedb3..3382635a7 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !ts_omit_aws package awsstore @@ -65,7 +65,11 @@ func TestNewAWSStore(t *testing.T) { Resource: "parameter/foo", } - s, err := newStore(storeParameterARN.String(), mc) + opts := storeOptions{ + kmsKey: "arn:aws:kms:eu-west-1:123456789:key/MyCustomKey", + } + + s, err := newStore(storeParameterARN.String(), opts, mc) if err != nil { t.Fatalf("creating aws store failed: %v", err) } @@ -73,7 +77,7 @@ func TestNewAWSStore(t *testing.T) { // Build a brand new file store and check that both IDs written // above are still there. - s2, err := newStore(storeParameterARN.String(), mc) + s2, err := newStore(storeParameterARN.String(), opts, mc) if err != nil { t.Fatalf("creating second aws store failed: %v", err) } @@ -162,3 +166,54 @@ func testStoreSemantics(t *testing.T, store ipn.StateStore) { } } } + +func TestParseARNAndOpts(t *testing.T) { + tests := []struct { + name string + arg string + wantARN string + wantKey string + }{ + { + name: "no-key", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + }, + { + name: "custom-key", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam?kmsKey=alias/MyCustomKey", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantKey: "alias/MyCustomKey", + }, + { + name: "bare-name", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam?kmsKey=Bare", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantKey: "alias/Bare", + }, + { + name: "arn-arg", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam?kmsKey=arn:foo", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantKey: "arn:foo", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + arn, opts, err := ParseARNAndOpts(tt.arg) + if err != nil { + t.Fatalf("New: %v", err) + } + if arn != tt.wantARN { + t.Errorf("ARN = %q; want %q", arn, tt.wantARN) + } + var got storeOptions + for _, opt := range opts { + opt(&got) + } + if got.kmsKey != tt.wantKey { + t.Errorf("kmsKey = %q; want %q", got.kmsKey, tt.wantKey) + } + }) + } +} diff --git a/ipn/store/store_aws.go b/ipn/store/store_aws.go index e164f9de7..d39e84319 100644 --- a/ipn/store/store_aws.go +++ b/ipn/store/store_aws.go @@ -6,7 +6,9 @@ package store import ( + "tailscale.com/ipn" "tailscale.com/ipn/store/awsstore" + "tailscale.com/types/logger" ) func init() { @@ -14,5 +16,11 @@ func init() { } func registerAWSStore() { - Register("arn:", awsstore.New) + Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) + if err != nil { + return nil, err + } + return awsstore.New(logf, ssmARN, opts...) + }) } From dc18091678ebf3928bf3ead518f2d6e979547526 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Fri, 28 Feb 2025 14:17:28 -0800 Subject: [PATCH 0545/1708] ipn: update AddPeer to include TaildropTarget (#15091) We previously were not merging in the TaildropTarget into the PeerStatus because we did not update AddPeer. Updates tailscale/tailscale#14393 Signed-off-by: kari-ts --- ipn/ipnlocal/local.go | 2 +- ipn/ipnstate/ipnstate.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4f94a55a1..74796a62a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6709,7 +6709,7 @@ func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { } func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.TaildropTargetStatus { - if b.netMap == nil || b.state != ipn.Running { + if b.state != ipn.Running { return ipnstate.TaildropTargetIpnStateNotRunning } if b.netMap == nil { diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index bc1ba615d..89c6d7e24 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -216,6 +216,11 @@ type PeerStatusLite struct { } // PeerStatus describes a peer node and its current state. +// WARNING: The fields in PeerStatus are merged by the AddPeer method in the StatusBuilder. +// When adding a new field to PeerStatus, you must update AddPeer to handle merging +// the new field. The AddPeer function is responsible for combining multiple updates +// to the same peer, and any new field that is not merged properly may lead to +// inconsistencies or lost data in the peer status. type PeerStatus struct { ID tailcfg.StableNodeID PublicKey key.NodePublic @@ -533,6 +538,9 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { if v := st.Capabilities; v != nil { e.Capabilities = v } + if v := st.TaildropTarget; v != TaildropTargetUnknown { + e.TaildropTarget = v + } e.Location = st.Location } From 986daca5eeeffa04bdb184d1ee13f70d04d33ff1 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sun, 2 Mar 2025 10:22:15 -0800 Subject: [PATCH 0546/1708] scripts/installer.sh: explicitly chmod 0644 installed files (#15171) Updates tailscale/tailscale#15133 Signed-off-by: Irbe Krumina --- scripts/installer.sh | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 388dd5a56..f3671aff8 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -7,14 +7,6 @@ set -eu -# Ensure that this script runs with the default umask for Linux. In practice, -# this means that files created by this script (such as keyring files) will be -# created with 644 permissions. This ensures that keyrings and other files -# created by this script are readable by installers on systems where the -# umask is set to a more restrictive value. -# See https://github.com/tailscale/tailscale/issues/15133 -umask 022 - # All the code is wrapped in a main function that gets called at the # bottom of the file, so that a truncated partial download doesn't end # up executing half a script. @@ -501,10 +493,13 @@ main() { legacy) $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.asc" | $SUDO apt-key add - $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.list" | $SUDO tee /etc/apt/sources.list.d/tailscale.list + $SUDO chmod 0644 /etc/apt/sources.list.d/tailscale.list ;; keyring) $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.noarmor.gpg" | $SUDO tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null + $SUDO chmod 0644 /usr/share/keyrings/tailscale-archive-keyring.gpg $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.tailscale-keyring.list" | $SUDO tee /etc/apt/sources.list.d/tailscale.list + $SUDO chmod 0644 /etc/apt/sources.list.d/tailscale.list ;; esac $SUDO apt-get update From a567f56445d523a89922253ae4902ad19e71c1be Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 3 Mar 2025 08:04:18 -0800 Subject: [PATCH 0547/1708] ipn/store/kubestore: sanitize keys loaded to in-memory store (#15178) Reads use the sanitized form, so unsanitized keys being stored in memory resulted lookup failures, for example for serve config. Updates tailscale/tailscale#15134 Signed-off-by: Irbe Krumina --- ipn/store/kubestore/store_kube.go | 9 +++++++-- ipn/store/kubestore/store_kube_test.go | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index b4e14c6d3..ecd101c57 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -98,7 +98,11 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { defer func() { if err == nil { for id, bs := range data { - s.memory.WriteState(ipn.StateKey(id), bs) + // The in-memory store does not distinguish between values read from state Secret on + // init and values written to afterwards. Values read from the state + // Secret will always be sanitized, so we also need to sanitize values written to store + // later, so that the Read logic can just lookup keys in sanitized form. + s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) } } if err != nil { @@ -198,8 +202,9 @@ func (s *Store) loadState() (err error) { return nil } -// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes secret key. +// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes Secret key. // Valid characters are alphanumeric, -, _, and . +// https://kubernetes.io/docs/concepts/configuration/secret/#restriction-names-data. func sanitizeKey[T ~string](k T) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index f3c5ac9fb..351458efb 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -169,7 +169,7 @@ func TestUpdateStateSecret(t *testing.T) { // Verify memory store was updated for k, v := range tt.updates { - got, err := s.memory.ReadState(ipn.StateKey(k)) + got, err := s.memory.ReadState(ipn.StateKey(sanitizeKey(k))) if err != nil { t.Errorf("reading from memory store: %v", err) continue From ce6ce81311cc53df4498f2e8757b52be50801d64 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 28 Feb 2025 18:30:14 -0600 Subject: [PATCH 0548/1708] ipn/ipnlocal: initialize Taildrive shares when starting backend Previously, it initialized when the backend was created. This caused two problems: 1. It would not properly switch when changing profiles. 2. If the backend was created before the profile had been selected, Taildrive's shares were uninitialized. Updates #14825 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/local.go | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 74796a62a..1ce299371 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -618,19 +618,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } - // initialize Taildrive shares from saved state - fs, ok := b.sys.DriveForRemote.GetOK() - if ok { - currentShares := b.pm.prefs.DriveShares() - if currentShares.Len() > 0 { - var shares []*drive.Share - for _, share := range currentShares.All() { - shares = append(shares, share.AsStruct()) - } - fs.SetShares(shares) - } - } - for name, newFn := range registeredExtensions { ext, err := newFn(logf, sys) if err != nil { @@ -2458,6 +2445,16 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID) b.sendToLocked(ipn.Notify{Prefs: &prefs}, allClients) + // initialize Taildrive shares from saved state + if fs, ok := b.sys.DriveForRemote.GetOK(); ok { + currentShares := b.pm.CurrentPrefs().DriveShares() + var shares []*drive.Share + for _, share := range currentShares.All() { + shares = append(shares, share.AsStruct()) + } + fs.SetShares(shares) + } + if !loggedOut && (b.hasNodeKeyLocked() || confWantRunning) { // If we know that we're either logged in or meant to be // running, tell the controlclient that it should also assume From 5449aba94c51285ef5c2be322f47d1f7b636cd1b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Mon, 3 Mar 2025 14:54:57 -0500 Subject: [PATCH 0549/1708] safesocket: correct logic for determining if we're a macOS GUI client (#15187) fixes tailscale/corp#26806 This was still slightly incorrect. We care only if the caller is the macSys or macOs app. isSandBoxedMacOS doesn't give us the correct answer for macSys because technically, macsys isn't sandboxed. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 16 ++++++++-------- safesocket/safesocket_darwin_test.go | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index f6e46bc50..5c2717ecf 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -37,16 +37,16 @@ type safesocketDarwin struct { sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file sharedDir string // shared directory for location of sameuserproof file - checkConn bool // Check macsys safesocket port before returning it - isMacSysExt func() bool // For testing only to force macsys - isSandboxedMacos func() bool // For testing only to force macOS sandbox + checkConn bool // Check macsys safesocket port before returning it + isMacSysExt func() bool // For testing only to force macsys + isMacGUIApp func() bool // For testing only to force macOS sandbox } var ssd = safesocketDarwin{ - isMacSysExt: version.IsMacSysExt, - isSandboxedMacos: version.IsSandboxedMacOS, - checkConn: true, - sharedDir: "/Library/Tailscale", + isMacSysExt: version.IsMacSysExt, + isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() }, + checkConn: true, + sharedDir: "/Library/Tailscale", } // There are three ways a Darwin binary can be run: as the Mac App Store (macOS) @@ -68,7 +68,7 @@ func localTCPPortAndTokenDarwin() (port int, token string, err error) { ssd.mu.Lock() defer ssd.mu.Unlock() - if !ssd.isSandboxedMacos() { + if !ssd.isMacGUIApp() { return 0, "", ErrNoTokenOnOS } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index 465ac0b68..2793d6aa3 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -17,7 +17,7 @@ import ( func TestSetCredentials(t *testing.T) { wantPort := 123 wantToken := "token" - tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) SetCredentials(wantToken, wantPort) gotPort, gotToken, err := LocalTCPPortAndToken() @@ -38,7 +38,7 @@ func TestSetCredentials(t *testing.T) { // returns a listener and a non-zero port and non-empty token. func TestInitListenerDarwin(t *testing.T) { temp := t.TempDir() - tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) ln, err := InitListenerDarwin(temp) if err != nil || ln == nil { From 16a920b96ed9dc1f76b844e340b04f32c89242bf Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Mon, 3 Mar 2025 18:28:26 -0500 Subject: [PATCH 0550/1708] safesocket: add isMacSysExt Check (#15192) fixes tailscale/corp#26806 IsMacSysApp is not returning the correct answer... It looks like the rest of the code base uses isMacSysExt (when what they really want to know is isMacSysApp). To fix the immediate issue (localAPI is broken entirely in corp), we'll add this check to safesocket which lines up with the other usages, despite the confusing naming. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index 5c2717ecf..fb35ad9df 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -44,7 +44,7 @@ type safesocketDarwin struct { var ssd = safesocketDarwin{ isMacSysExt: version.IsMacSysExt, - isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() }, + isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() || version.IsMacSysExt() }, checkConn: true, sharedDir: "/Library/Tailscale", } From e74a705c6713b325e44a875c0d850b4a5c02223a Mon Sep 17 00:00:00 2001 From: Brian Palmer Date: Tue, 4 Mar 2025 08:47:35 -0700 Subject: [PATCH 0551/1708] cmd/hello: display native ipv4 (#15191) We are soon going to start assigning shared-in nodes a CGNAT IPv4 in the Hello tailnet when necessary, the same way that normal node shares assign a new IPv4 on conflict. But Hello wants to display the node's native IPv4, the one it uses in its own tailnet. That IPv4 isn't available anywhere in the netmap today, because it's not normally needed for anything. We are going to start sending that native IPv4 in the peer node CapMap, only for Hello's netmap responses. This change enables Hello to display that native IPv4 instead, when available. Updates tailscale/corp#25393 Change-Id: I87480b6d318ab028b41ef149eb3ba618bd7f1e08 Signed-off-by: Brian Palmer --- cmd/hello/hello.go | 5 +++++ tailcfg/tailcfg.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/cmd/hello/hello.go b/cmd/hello/hello.go index 86f885f54..fa116b28b 100644 --- a/cmd/hello/hello.go +++ b/cmd/hello/hello.go @@ -20,6 +20,7 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" ) var ( @@ -134,6 +135,10 @@ func tailscaleIP(who *apitype.WhoIsResponse) string { if who == nil { return "" } + vals, err := tailcfg.UnmarshalNodeCapJSON[string](who.Node.CapMap, tailcfg.NodeAttrNativeIPV4) + if err == nil && len(vals) > 0 { + return vals[0] + } for _, nodeIP := range who.Node.Addresses { if nodeIP.Addr().Is4() && nodeIP.IsSingleIP() { return nodeIP.Addr().String() diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index f82c6eb81..b5f49c614 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2440,6 +2440,11 @@ const ( // type float64 representing the duration in seconds. This cap will be // omitted if the tailnet's MaxKeyDuration is the default. NodeAttrMaxKeyDuration NodeCapability = "tailnet.maxKeyDuration" + + // NodeAttrNativeIPV4 contains the IPV4 address of the node in its + // native tailnet. This is currently only sent to Hello, in its + // peer node list. + NodeAttrNativeIPV4 NodeCapability = "native-ipv4" ) // SetDNSRequest is a request to add a DNS record. From fa374fa852f6b91656c64f07892c554b27b83e49 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Tue, 4 Mar 2025 11:46:05 +0000 Subject: [PATCH 0552/1708] cmd/testwrapper: Display package-level output Updates tailscale/corp#26861 Signed-off-by: James Sanderson --- cmd/testwrapper/testwrapper.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 1df1ef11f..1501c7e97 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -141,7 +141,7 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te } outcome := goOutput.Action if outcome == "build-fail" { - outcome = "FAIL" + outcome = "fail" } pkgTests[""].logs.WriteString(goOutput.Output) ch <- &testAttempt{ @@ -152,7 +152,15 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te logs: pkgTests[""].logs, pkgFinished: true, } + case "output": + // Capture all output from the package except for the final + // "FAIL tailscale.io/control 0.684s" line, as + // printPkgOutcome will output a similar line + if !strings.HasPrefix(goOutput.Output, fmt.Sprintf("FAIL\t%s\t", goOutput.Package)) { + pkgTests[""].logs.WriteString(goOutput.Output) + } } + continue } testName := goOutput.Test @@ -276,7 +284,11 @@ func main() { // when a package times out. failed = true } - os.Stdout.ReadFrom(&tr.logs) + if testingVerbose || tr.outcome == "fail" { + // Output package-level output which is where e.g. + // panics outside tests will be printed + io.Copy(os.Stdout, &tr.logs) + } printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start)) continue } From cae5b97626a44ef425a0b0d3807269fec966a18d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 4 Mar 2025 07:41:44 -0800 Subject: [PATCH 0553/1708] cmd/derper: add --home flag to control home page behavior Updates #12897 Change-Id: I7e9c8de0d2daf92cc32e9f6121bc0874c6672540 Signed-off-by: Brad Fitzpatrick --- cmd/derper/derper.go | 52 +++++++++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 682ec0bba..221ee0bff 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -63,6 +63,7 @@ var ( hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.") runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") + flagHome = flag.String("home", "", "what to serve at the root path. It may be left empty (the default, for a default homepage), \"blank\" for a blank page, or a URL to redirect to") meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It should contain some hex string; whitespace is trimmed.") meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list. If an entry contains a slash, the second part names a hostname to be used when dialing the target.") @@ -254,6 +255,11 @@ func main() { } expvar.Publish("derp", s.ExpVar()) + handleHome, ok := getHomeHandler(*flagHome) + if !ok { + log.Fatalf("unknown --home value %q", *flagHome) + } + mux := http.NewServeMux() if *runDERP { derpHandler := derphttp.Handler(s) @@ -274,19 +280,7 @@ func main() { mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS)) mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tsweb.AddBrowserHeaders(w) - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.WriteHeader(200) - err := homePageTemplate.Execute(w, templateData{ - ShowAbuseInfo: validProdHostname.MatchString(*hostname), - Disabled: !*runDERP, - AllowDebug: tsweb.AllowDebugAccess(r), - }) - if err != nil { - if r.Context().Err() == nil { - log.Printf("homePageTemplate.Execute: %v", err) - } - return - } + handleHome.ServeHTTP(w, r) })) mux.Handle("/robots.txt", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tsweb.AddBrowserHeaders(w) @@ -579,3 +573,35 @@ var homePageTemplate = template.Must(template.New("home").Parse(` `)) + +// getHomeHandler returns a handler for the home page based on a flag string +// as documented on the --home flag. +func getHomeHandler(val string) (_ http.Handler, ok bool) { + if val == "" { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(200) + err := homePageTemplate.Execute(w, templateData{ + ShowAbuseInfo: validProdHostname.MatchString(*hostname), + Disabled: !*runDERP, + AllowDebug: tsweb.AllowDebugAccess(r), + }) + if err != nil { + if r.Context().Err() == nil { + log.Printf("homePageTemplate.Execute: %v", err) + } + return + } + }), true + } + if val == "blank" { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(200) + }), true + } + if strings.HasPrefix(val, "http://") || strings.HasPrefix(val, "https://") { + return http.RedirectHandler(val, http.StatusFound), true + } + return nil, false +} From 1d2d449b57c7a04766d3da80d9e8ef52abe3ef70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:56:15 +0000 Subject: [PATCH 0554/1708] .github: Bump actions/cache from 4.2.0 to 4.2.2 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.0 to 4.2.2. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/1bd1e32a3bdc45362d1e726936510720a7c30a57...d4323d4df104b026a6aa633fdb11d772146be0bf) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7142c86b9..4ff2f2421 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -79,7 +79,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -153,7 +153,7 @@ jobs: cache: false - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -254,7 +254,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -319,7 +319,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -367,7 +367,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache From f840aad49e51b457f4d6cacb532313f4d28bfbd1 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 4 Mar 2025 16:17:57 -0800 Subject: [PATCH 0555/1708] go.toolchain.rev: bump to go1.24.1 (#15209) Bump to 1.24.1 to avail of security fixes. Updates https://github.com/tailscale/tailscale/issues/15015 Signed-off-by: Patrick O'Doherty --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index ddbabb3eb..69aec16e4 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -2b494987ff3c1a6a26e10570c490394ff0a77aa4 +4fdaeeb8fe43bcdb4e8cc736433b9cd9c0ddd221 From 3e184345953345ccb69958df355b9bd9fed4ac2e Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 4 Mar 2025 11:22:30 -0800 Subject: [PATCH 0556/1708] util/eventbus: rework to have a Client abstraction The Client carries both publishers and subscribers for a single actor. This makes the APIs for publish and subscribe look more similar, and this structure is a better fit for upcoming debug facilities. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 103 +++++++++++---------- util/eventbus/bus_test.go | 51 ++++++----- util/eventbus/client.go | 100 ++++++++++++++++++++ util/eventbus/doc.go | 65 ++++++------- util/eventbus/publish.go | 53 +++++------ util/eventbus/subscribe.go | 181 +++++++++++++++++++++---------------- 6 files changed, 345 insertions(+), 208 deletions(-) create mode 100644 util/eventbus/client.go diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 85d73b15e..393596d75 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -20,12 +20,11 @@ type Bus struct { snapshot chan chan []any topicsMu sync.Mutex // guards everything below. - topics map[reflect.Type][]*Queue + topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - publishers set.Set[publisher] - queues set.Set[*Queue] + clients set.Set[*Client] } // New returns a new bus. Use [PublisherOf] to make event publishers, @@ -33,17 +32,53 @@ type Bus struct { func New() *Bus { stopCtl, stopWorker := newGoroutineShutdown() ret := &Bus{ - write: make(chan any), - stop: stopCtl, - snapshot: make(chan chan []any), - topics: map[reflect.Type][]*Queue{}, - publishers: set.Set[publisher]{}, - queues: set.Set[*Queue]{}, + write: make(chan any), + stop: stopCtl, + snapshot: make(chan chan []any), + topics: map[reflect.Type][]*subscribeState{}, + clients: set.Set[*Client]{}, } go ret.pump(stopWorker) return ret } +// Client returns a new client with no subscriptions. Use [Subscribe] +// to receive events, and [Publish] to emit events. +// +// The client's name is used only for debugging, to tell humans what +// piece of code a publisher/subscriber belongs to. Aim for something +// short but unique, for example "kernel-route-monitor" or "taildrop", +// not "watcher". +func (b *Bus) Client(name string) *Client { + ret := &Client{ + name: name, + bus: b, + pub: set.Set[publisher]{}, + } + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.clients.Add(ret) + return ret +} + +// Close closes the bus. Implicitly closes all clients, publishers and +// subscribers attached to the bus. +// +// Close blocks until the bus is fully shut down. The bus is +// permanently unusable after closing. +func (b *Bus) Close() { + b.stop.StopAndWait() + + var clients set.Set[*Client] + b.topicsMu.Lock() + clients, b.clients = b.clients, set.Set[*Client]{} + b.topicsMu.Unlock() + + for c := range clients { + c.Close() + } +} + func (b *Bus) pump(stop goroutineShutdownWorker) { defer stop.Done() var vals queue @@ -98,13 +133,19 @@ func (b *Bus) pump(stop goroutineShutdownWorker) { } } -func (b *Bus) dest(t reflect.Type) []*Queue { +func (b *Bus) dest(t reflect.Type) []*subscribeState { b.topicsMu.Lock() defer b.topicsMu.Unlock() return b.topics[t] } -func (b *Bus) subscribe(t reflect.Type, q *Queue) (cancel func()) { +func (b *Bus) shouldPublish(t reflect.Type) bool { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + return len(b.topics[t]) > 0 +} + +func (b *Bus) subscribe(t reflect.Type, q *subscribeState) (cancel func()) { b.topicsMu.Lock() defer b.topicsMu.Unlock() b.topics[t] = append(b.topics[t], q) @@ -113,7 +154,7 @@ func (b *Bus) subscribe(t reflect.Type, q *Queue) (cancel func()) { } } -func (b *Bus) unsubscribe(t reflect.Type, q *Queue) { +func (b *Bus) unsubscribe(t reflect.Type, q *subscribeState) { b.topicsMu.Lock() defer b.topicsMu.Unlock() // Topic slices are accessed by pump without holding a lock, so we @@ -127,44 +168,6 @@ func (b *Bus) unsubscribe(t reflect.Type, q *Queue) { b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1) } -func (b *Bus) Close() { - b.stop.StopAndWait() -} - -// Queue returns a new queue with no subscriptions. Use [Subscribe] to -// atach subscriptions to it. -// -// The queue's name should be a short, human-readable string that -// identifies this queue. The name is only visible through debugging -// APIs. -func (b *Bus) Queue(name string) *Queue { - return newQueue(b, name) -} - -func (b *Bus) addQueue(q *Queue) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.queues.Add(q) -} - -func (b *Bus) deleteQueue(q *Queue) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.queues.Delete(q) -} - -func (b *Bus) addPublisher(p publisher) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.publishers.Add(p) -} - -func (b *Bus) deletePublisher(p publisher) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.publishers.Delete(p) -} - func newGoroutineShutdown() (goroutineShutdownControl, goroutineShutdownWorker) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 180f4164a..e159b6a12 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -26,14 +26,16 @@ func TestBus(t *testing.T) { b := eventbus.New() defer b.Close() - q := b.Queue("TestBus") - defer q.Close() - s := eventbus.Subscribe[EventA](q) + c := b.Client("TestSub") + defer c.Close() + s := eventbus.Subscribe[EventA](c) go func() { - pa := eventbus.PublisherOf[EventA](b, "TestBusA") + p := b.Client("TestPub") + defer p.Close() + pa := eventbus.Publish[EventA](p) defer pa.Close() - pb := eventbus.PublisherOf[EventB](b, "TestBusB") + pb := eventbus.Publish[EventB](p) defer pb.Close() pa.Publish(EventA{1}) pb.Publish(EventB{2}) @@ -45,7 +47,7 @@ func TestBus(t *testing.T) { select { case got := <-s.Events(): want.Got(got) - case <-q.Done(): + case <-s.Done(): t.Fatalf("queue closed unexpectedly") case <-time.After(time.Second): t.Fatalf("timed out waiting for event") @@ -57,19 +59,21 @@ func TestBusMultipleConsumers(t *testing.T) { b := eventbus.New() defer b.Close() - q1 := b.Queue("TestBusA") - defer q1.Close() - s1 := eventbus.Subscribe[EventA](q1) + c1 := b.Client("TestSubA") + defer c1.Close() + s1 := eventbus.Subscribe[EventA](c1) - q2 := b.Queue("TestBusAB") - defer q2.Close() - s2A := eventbus.Subscribe[EventA](q2) - s2B := eventbus.Subscribe[EventB](q2) + c2 := b.Client("TestSubB") + defer c2.Close() + s2A := eventbus.Subscribe[EventA](c2) + s2B := eventbus.Subscribe[EventB](c2) go func() { - pa := eventbus.PublisherOf[EventA](b, "TestBusA") + p := b.Client("TestPub") + defer p.Close() + pa := eventbus.Publish[EventA](p) defer pa.Close() - pb := eventbus.PublisherOf[EventB](b, "TestBusB") + pb := eventbus.Publish[EventB](p) defer pb.Close() pa.Publish(EventA{1}) pb.Publish(EventB{2}) @@ -86,9 +90,11 @@ func TestBusMultipleConsumers(t *testing.T) { wantB.Got(got) case got := <-s2B.Events(): wantB.Got(got) - case <-q1.Done(): + case <-s1.Done(): t.Fatalf("queue closed unexpectedly") - case <-q2.Done(): + case <-s2A.Done(): + t.Fatalf("queue closed unexpectedly") + case <-s2B.Done(): t.Fatalf("queue closed unexpectedly") case <-time.After(time.Second): t.Fatalf("timed out waiting for event") @@ -111,15 +117,15 @@ func TestSpam(t *testing.T) { received := make([][]EventA, subscribers) for i := range subscribers { - q := b.Queue(fmt.Sprintf("Subscriber%d", i)) - defer q.Close() - s := eventbus.Subscribe[EventA](q) + c := b.Client(fmt.Sprintf("Subscriber%d", i)) + defer c.Close() + s := eventbus.Subscribe[EventA](c) g.Go(func() error { for range wantEvents { select { case evt := <-s.Events(): received[i] = append(received[i], evt) - case <-q.Done(): + case <-s.Done(): t.Errorf("queue done before expected number of events received") return errors.New("queue prematurely closed") case <-time.After(5 * time.Second): @@ -134,7 +140,8 @@ func TestSpam(t *testing.T) { published := make([][]EventA, publishers) for i := range publishers { g.Run(func() { - p := eventbus.PublisherOf[EventA](b, fmt.Sprintf("Publisher%d", i)) + c := b.Client(fmt.Sprintf("Publisher%d", i)) + p := eventbus.Publish[EventA](c) for j := range eventsPerPublisher { evt := EventA{i*eventsPerPublisher + j} p.Publish(evt) diff --git a/util/eventbus/client.go b/util/eventbus/client.go new file mode 100644 index 000000000..ff8eea6ee --- /dev/null +++ b/util/eventbus/client.go @@ -0,0 +1,100 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "reflect" + "sync" + + "tailscale.com/util/set" +) + +// A Client can publish and subscribe to events on its attached +// bus. See [Publish] to publish events, and [Subscribe] to receive +// events. +// +// Subscribers that share the same client receive events one at a +// time, in the order they were published. +type Client struct { + name string + bus *Bus + + mu sync.Mutex + pub set.Set[publisher] + sub *subscribeState // Lazily created on first subscribe +} + +// Close closes the client. Implicitly closes all publishers and +// subscribers obtained from this client. +func (c *Client) Close() { + var ( + pub set.Set[publisher] + sub *subscribeState + ) + + c.mu.Lock() + pub, c.pub = c.pub, nil + sub, c.sub = c.sub, nil + c.mu.Unlock() + + if sub != nil { + sub.close() + } + for p := range pub { + p.Close() + } +} + +func (c *Client) subscribeState() *subscribeState { + c.mu.Lock() + defer c.mu.Unlock() + if c.sub == nil { + c.sub = newSubscribeState(c) + } + return c.sub +} + +func (c *Client) addPublisher(pub publisher) { + c.mu.Lock() + defer c.mu.Unlock() + c.pub.Add(pub) +} + +func (c *Client) deletePublisher(pub publisher) { + c.mu.Lock() + defer c.mu.Unlock() + c.pub.Delete(pub) +} + +func (c *Client) addSubscriber(t reflect.Type, s *subscribeState) { + c.bus.subscribe(t, s) +} + +func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) { + c.bus.unsubscribe(t, s) +} + +func (c *Client) publish() chan<- any { + return c.bus.write +} + +func (c *Client) shouldPublish(t reflect.Type) bool { + return c.bus.shouldPublish(t) +} + +// Subscribe requests delivery of events of type T through the given +// Queue. Panics if the queue already has a subscriber for T. +func Subscribe[T any](c *Client) *Subscriber[T] { + return newSubscriber[T](c.subscribeState()) +} + +// Publisher returns a publisher for event type T using the given +// client. +func Publish[T any](c *Client) *Publisher[T] { + ret := newPublisher[T](c) + c.mu.Lock() + defer c.mu.Unlock() + c.pub.Add(ret) + return ret +} diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index 136823c42..b3509b48b 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -3,56 +3,59 @@ // Package eventbus provides an in-process event bus. // -// The event bus connects publishers of typed events with subscribers -// interested in those events. +// An event bus connects publishers of typed events with subscribers +// interested in those events. Typically, there is one global event +// bus per process. // // # Usage // -// To publish events, use [PublisherOf] to get a typed publisher for -// your event type, then call [Publisher.Publish] as needed. If your -// event is expensive to construct, you can optionally use -// [Publisher.ShouldPublish] to skip the work if nobody is listening -// for the event. +// To send or receive events, first use [Bus.Client] to register with +// the bus. Clients should register with a human-readable name that +// identifies the code using the client, to aid in debugging. // -// To receive events, first use [Bus.Queue] to create an event -// delivery queue, then use [Subscribe] to get a [Subscriber] for each -// event type you're interested in. Receive the events themselves by -// selecting over all your [Subscriber.Chan] channels, as well as -// [Queue.Done] for shutdown notifications. +// To publish events, use [Publish] on a Client to get a typed +// publisher for your event type, then call [Publisher.Publish] as +// needed. If your event is expensive to construct, you can optionally +// use [Publisher.ShouldPublish] to skip the work if nobody is +// listening for the event. +// +// To receive events, use [Subscribe] to get a typed subscriber for +// each event type you're interested in. Receive the events themselves +// by selecting over all your [Subscriber.Events] channels, as well as +// [Subscriber.Done] for shutdown notifications. // // # Concurrency properties // -// The bus serializes all published events, and preserves that -// ordering when delivering to subscribers that are attached to the -// same Queue. In more detail: +// The bus serializes all published events across all publishers, and +// preserves that ordering when delivering to subscribers that are +// attached to the same Client. In more detail: // // - An event is published to the bus at some instant between the // start and end of the call to [Publisher.Publish]. -// - Events cannot be published at the same instant, and so are +// - Two events cannot be published at the same instant, and so are // totally ordered by their publication time. Given two events E1 // and E2, either E1 happens before E2, or E2 happens before E1. -// - Queues dispatch events to their Subscribers in publication -// order: if E1 happens before E2, the queue always delivers E1 +// - Clients dispatch events to their Subscribers in publication +// order: if E1 happens before E2, the client always delivers E1 // before E2. -// - Queues do not synchronize with each other: given queues Q1 and -// Q2, both subscribed to events E1 and E2, Q1 may deliver both E1 -// and E2 before Q2 delivers E1. +// - Clients do not synchronize subscriptions with each other: given +// clients C1 and C2, both subscribed to events E1 and E2, C1 may +// deliver both E1 and E2 before C2 delivers E1. // // Less formally: there is one true timeline of all published events. -// If you make a Queue and subscribe to events on it, you will receive -// those events one at a time, in the same order as the one true +// If you make a Client and subscribe to events, you will receive +// events one at a time, in the same order as the one true // timeline. You will "skip over" events you didn't subscribe to, but // your view of the world always moves forward in time, never // backwards, and you will observe events in the same order as // everyone else. // -// However, you cannot assume that what your subscribers on your queue -// see as "now" is the same as what other subscribers on other -// queues. Their queue may be further behind you in the timeline, or -// running ahead of you. This means you should be careful about -// reaching out to another component directly after receiving an -// event, as its view of the world may not yet (or ever) be exactly -// consistent with yours. +// However, you cannot assume that what your client see as "now" is +// the same as what other clients. They may be further behind you in +// working through the timeline, or running ahead of you. This means +// you should be careful about reaching out to another component +// directly after receiving an event, as its view of the world may not +// yet (or ever) be exactly consistent with yours. // // To make your code more testable and understandable, you should try // to structure it following the actor model: you have some local @@ -63,7 +66,7 @@ // # Expected subscriber behavior // // Subscribers are expected to promptly receive their events on -// [Subscriber.Chan]. The bus has a small, fixed amount of internal +// [Subscriber.Events]. The bus has a small, fixed amount of internal // buffering, meaning that a slow subscriber will eventually cause // backpressure and block publication of all further events. // diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 14828812b..19ddc1256 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -11,35 +11,41 @@ import ( // publisher is a uniformly typed wrapper around Publisher[T], so that // debugging facilities can look at active publishers. type publisher interface { - publisherName() string + publishType() reflect.Type + Close() } -// A Publisher publishes events on the bus. +// A Publisher publishes typed events on a bus. type Publisher[T any] struct { - bus *Bus - name string + client *Client stopCtx context.Context stop context.CancelFunc } -// PublisherOf returns a publisher for event type T on the given bus. -// -// The publisher's name should be a short, human-readable string that -// identifies this event publisher. The name is only visible through -// debugging APIs. -func PublisherOf[T any](b *Bus, name string) *Publisher[T] { +func newPublisher[T any](c *Client) *Publisher[T] { ctx, cancel := context.WithCancel(context.Background()) ret := &Publisher[T]{ - bus: b, - name: name, + client: c, stopCtx: ctx, stop: cancel, } - b.addPublisher(ret) + c.addPublisher(ret) return ret } -func (p *Publisher[T]) publisherName() string { return p.name } +// Close closes the publisher. +// +// Calls to Publish after Close silently do nothing. +func (p *Publisher[T]) Close() { + // Just unblocks any active calls to Publish, no other + // synchronization needed. + p.stop() + p.client.deletePublisher(p) +} + +func (p *Publisher[T]) publishType() reflect.Type { + return reflect.TypeFor[T]() +} // Publish publishes event v on the bus. func (p *Publisher[T]) Publish(v T) { @@ -48,32 +54,21 @@ func (p *Publisher[T]) Publish(v T) { select { case <-p.stopCtx.Done(): return - case <-p.bus.stop.WaitChan(): - return default: } select { - case p.bus.write <- v: + case p.client.publish() <- v: case <-p.stopCtx.Done(): - case <-p.bus.stop.WaitChan(): } } -// ShouldPublish reports whether anyone is subscribed to events of -// type T. +// ShouldPublish reports whether anyone is subscribed to the events +// that this publisher emits. // // ShouldPublish can be used to skip expensive event construction if // nobody seems to care. Publishers must not assume that someone will // definitely receive an event if ShouldPublish returns true. func (p *Publisher[T]) ShouldPublish() bool { - dests := p.bus.dest(reflect.TypeFor[T]()) - return len(dests) > 0 -} - -// Close closes the publisher, indicating that no further events will -// be published with it. -func (p *Publisher[T]) Close() { - p.stop() - p.bus.deletePublisher(p) + return p.client.shouldPublish(reflect.TypeFor[T]()) } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ade834d77..896f0ce1f 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -4,46 +4,59 @@ package eventbus import ( + "context" "fmt" "reflect" "sync" ) -type dispatchFn func(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool +// subscriber is a uniformly typed wrapper around Subscriber[T], so +// that debugging facilities can look at active subscribers. +type subscriber interface { + subscribeType() reflect.Type + // dispatch is a function that dispatches the head value in vals to + // a subscriber, while also handling stop and incoming queue write + // events. + // + // dispatch exists because of the strongly typed Subscriber[T] + // wrapper around subscriptions: within the bus events are boxed in an + // 'any', and need to be unpacked to their full type before delivery + // to the subscriber. This involves writing to a strongly-typed + // channel, so subscribeState cannot handle that dispatch by itself - + // but if that strongly typed send blocks, we also need to keep + // processing other potential sources of wakeups, which is how we end + // up at this awkward type signature and sharing of internal state + // through dispatch. + dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool + Close() +} -// A Queue receives events from a Bus. -// -// To receive events through the queue, see [Subscribe]. Subscribers -// that share the same Queue receive events one at time, in the order -// they were published. -type Queue struct { - bus *Bus - name string +// subscribeState handles dispatching of events received from a Bus. +type subscribeState struct { + client *Client write chan any stop goroutineShutdownControl snapshot chan chan []any outputsMu sync.Mutex - outputs map[reflect.Type]dispatchFn + outputs map[reflect.Type]subscriber } -func newQueue(b *Bus, name string) *Queue { +func newSubscribeState(c *Client) *subscribeState { stopCtl, stopWorker := newGoroutineShutdown() - ret := &Queue{ - bus: b, - name: name, + ret := &subscribeState{ + client: c, write: make(chan any), stop: stopCtl, snapshot: make(chan chan []any), - outputs: map[reflect.Type]dispatchFn{}, + outputs: map[reflect.Type]subscriber{}, } - b.addQueue(ret) go ret.pump(stopWorker) return ret } -func (q *Queue) pump(stop goroutineShutdownWorker) { +func (q *subscribeState) pump(stop goroutineShutdownWorker) { defer stop.Done() var vals queue acceptCh := func() chan any { @@ -55,13 +68,13 @@ func (q *Queue) pump(stop goroutineShutdownWorker) { for { if !vals.Empty() { val := vals.Peek() - fn := q.dispatchFn(val) - if fn == nil { + sub := q.subscriberFor(val) + if sub == nil { // Raced with unsubscribe. vals.Drop() continue } - if !fn(&vals, stop, acceptCh) { + if !sub.dispatch(&vals, stop, acceptCh) { return } } else { @@ -81,16 +94,74 @@ func (q *Queue) pump(stop goroutineShutdownWorker) { } } -// A Subscriber delivers one type of event from a [Queue]. +func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + if s.outputs[t] != nil { + panic(fmt.Errorf("double subscription for event %s", t)) + } + s.outputs[t] = sub + s.client.addSubscriber(t, s) +} + +func (s *subscribeState) deleteSubscriber(t reflect.Type) { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + delete(s.outputs, t) + s.client.deleteSubscriber(t, s) +} + +func (q *subscribeState) subscriberFor(val any) subscriber { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + return q.outputs[reflect.TypeOf(val)] +} + +// Close closes the subscribeState. Implicitly closes all Subscribers +// linked to this state, and any pending events are discarded. +func (s *subscribeState) close() { + s.stop.StopAndWait() + + var subs map[reflect.Type]subscriber + s.outputsMu.Lock() + subs, s.outputs = s.outputs, nil + s.outputsMu.Unlock() + for _, sub := range subs { + sub.Close() + } +} + +// A Subscriber delivers one type of event from a [Client]. type Subscriber[T any] struct { - recv *Queue - read chan T + doneCtx context.Context + done context.CancelFunc + recv *subscribeState + read chan T +} + +func newSubscriber[T any](r *subscribeState) *Subscriber[T] { + t := reflect.TypeFor[T]() + + ctx, cancel := context.WithCancel(context.Background()) + ret := &Subscriber[T]{ + doneCtx: ctx, + done: cancel, + recv: r, + read: make(chan T), + } + r.addSubscriber(t, ret) + + return ret +} + +func (s *Subscriber[T]) subscribeType() reflect.Type { + return reflect.TypeFor[T]() } func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool { t := vals.Peek().(T) for { - // Keep the cases in this select in sync with Queue.pump + // Keep the cases in this select in sync with subscribeState.pump // above. The only different should be that this select // delivers a value on s.read. select { @@ -113,58 +184,16 @@ func (s *Subscriber[T]) Events() <-chan T { return s.read } -// Close shuts down the Subscriber, indicating the caller no longer -// wishes to receive these events. After Close, receives on -// [Subscriber.Chan] block for ever. -func (s *Subscriber[T]) Close() { - t := reflect.TypeFor[T]() - s.recv.bus.unsubscribe(t, s.recv) - s.recv.deleteDispatchFn(t) -} - -func (q *Queue) dispatchFn(val any) dispatchFn { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - return q.outputs[reflect.ValueOf(val).Type()] -} - -func (q *Queue) addDispatchFn(t reflect.Type, fn dispatchFn) { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - if q.outputs[t] != nil { - panic(fmt.Errorf("double subscription for event %s", t)) - } - q.outputs[t] = fn -} - -func (q *Queue) deleteDispatchFn(t reflect.Type) { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - delete(q.outputs, t) -} - -// Done returns a channel that is closed when the Queue is closed. -func (q *Queue) Done() <-chan struct{} { - return q.stop.WaitChan() -} - -// Close closes the queue. All Subscribers attached to the queue are -// implicitly closed, and any pending events are discarded. -func (q *Queue) Close() { - q.stop.StopAndWait() - q.bus.deleteQueue(q) +// Done returns a channel that is closed when the subscriber is +// closed. +func (s *Subscriber[T]) Done() <-chan struct{} { + return s.doneCtx.Done() } -// Subscribe requests delivery of events of type T through the given -// Queue. Panics if the queue already has a subscriber for T. -func Subscribe[T any](r *Queue) Subscriber[T] { - t := reflect.TypeFor[T]() - ret := Subscriber[T]{ - recv: r, - read: make(chan T), - } - r.addDispatchFn(t, ret.dispatch) - r.bus.subscribe(t, r) - - return ret +// Close closes the Subscriber, indicating the caller no longer wishes +// to receive this event type. After Close, receives on +// [Subscriber.Events] block for ever. +func (s *Subscriber[T]) Close() { + s.done() // unblock receivers + s.recv.deleteSubscriber(reflect.TypeFor[T]()) } From 5eafce7e25b100f4dd235f5256607fe11727e843 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 7 Feb 2025 20:25:43 -0800 Subject: [PATCH 0557/1708] gokrazy/natlab: update gokrazy, wire up natlab tests to GitHub CI Updates #13038 Change-Id: I610f9076816f44d59c0ca405a1b4f5eb4c6c0594 Signed-off-by: Brad Fitzpatrick --- .github/workflows/natlab-integrationtest.yml | 30 ++++++++ gokrazy/build.go | 40 ++++++++-- gokrazy/go.mod | 12 +-- gokrazy/go.sum | 12 +-- .../builddir/tailscale.com/go.sum | 70 ++++++++++++++++++ gokrazy/natlabapp.arm64/config.json | 4 + .../natlabapp/builddir/tailscale.com/go.sum | 68 +++++++++++++++++ gokrazy/natlabapp/config.json | 4 + gokrazy/tsapp/builddir/tailscale.com/go.sum | 74 +++++++++++++++++++ gokrazy/tsapp/config.json | 4 + tstest/integration/nat/nat_test.go | 20 ++++- 11 files changed, 318 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/natlab-integrationtest.yml diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml new file mode 100644 index 000000000..b8d99e668 --- /dev/null +++ b/.github/workflows/natlab-integrationtest.yml @@ -0,0 +1,30 @@ +# Run some natlab integration tests. +# See https://github.com/tailscale/tailscale/issues/13038 +name: "natlab-integrationtest" + +concurrency: + group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + pull_request: + paths: + - "tailcfg/**" + - "wgengine/**" + - "ipn/ipnlocal/**" + - ".github/workflows/natlab-integrationtest.yml" +jobs: + natlab-integrationtest: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Install qemu + run: | + sudo rm /var/lib/man-db/auto-update + sudo apt-get -y update + sudo apt-get -y remove man-db + sudo apt-get install -y qemu-system-x86 qemu-utils + - name: Run natlab integration tests + run: | + ./tool/go test -v -run=^TestEasyEasy$ -timeout=3m -count=1 ./tstest/integration/nat --run-vm-tests diff --git a/gokrazy/build.go b/gokrazy/build.go index 2392af0cb..c1ee1cbeb 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -11,7 +11,6 @@ package main import ( "bytes" - "cmp" "encoding/json" "errors" "flag" @@ -30,7 +29,6 @@ import ( var ( app = flag.String("app", "tsapp", "appliance name; one of the subdirectories of gokrazy/") bucket = flag.String("bucket", "tskrazy-import", "S3 bucket to upload disk image to while making AMI") - goArch = flag.String("arch", cmp.Or(os.Getenv("GOARCH"), "amd64"), "GOARCH architecture to build for: arm64 or amd64") build = flag.Bool("build", false, "if true, just build locally and stop, without uploading") ) @@ -54,6 +52,26 @@ func findMkfsExt4() (string, error) { return "", errors.New("No mkfs.ext4 found on system") } +var conf gokrazyConfig + +// gokrazyConfig is the subset of gokrazy/internal/config.Struct +// that we care about. +type gokrazyConfig struct { + // Environment is os.Environment pairs to use when + // building userspace. + // See https://gokrazy.org/userguide/instance-config/#environment + Environment []string +} + +func (c *gokrazyConfig) GOARCH() string { + for _, e := range c.Environment { + if v, ok := strings.CutPrefix(e, "GOARCH="); ok { + return v + } + } + return "" +} + func main() { flag.Parse() @@ -61,6 +79,19 @@ func main() { log.Fatalf("--app must be non-empty name such as 'tsapp' or 'natlabapp'") } + confJSON, err := os.ReadFile(filepath.Join(*app, "config.json")) + if err != nil { + log.Fatalf("reading config.json: %v", err) + } + if err := json.Unmarshal(confJSON, &conf); err != nil { + log.Fatalf("unmarshaling config.json: %v", err) + } + switch conf.GOARCH() { + case "amd64", "arm64": + default: + log.Fatalf("config.json GOARCH %q must be amd64 or arm64", conf.GOARCH()) + } + if err := buildImage(); err != nil { log.Fatalf("build image: %v", err) } @@ -106,7 +137,6 @@ func buildImage() error { // Build the tsapp.img var buf bytes.Buffer cmd := exec.Command("go", "run", - "-exec=env GOOS=linux GOARCH="+*goArch+" ", "github.com/gokrazy/tools/cmd/gok", "--parent_dir="+dir, "--instance="+*app, @@ -253,13 +283,13 @@ func waitForImportSnapshot(importTaskID string) (snapID string, err error) { func makeAMI(name, ebsSnapID string) (ami string, err error) { var arch string - switch *goArch { + switch conf.GOARCH() { case "arm64": arch = "arm64" case "amd64": arch = "x86_64" default: - return "", fmt.Errorf("unknown arch %q", *goArch) + return "", fmt.Errorf("unknown arch %q", conf.GOARCH()) } out, err := exec.Command("aws", "ec2", "register-image", "--name", name, diff --git a/gokrazy/go.mod b/gokrazy/go.mod index a9ba5a07d..f7483f41d 100644 --- a/gokrazy/go.mod +++ b/gokrazy/go.mod @@ -1,13 +1,13 @@ module tailscale.com/gokrazy -go 1.23.1 +go 1.23 -require github.com/gokrazy/tools v0.0.0-20240730192548-9f81add3a91e +require github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c require ( github.com/breml/rootcerts v0.2.10 // indirect github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect + github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 // indirect github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -15,9 +15,5 @@ require ( github.com/spf13/pflag v1.0.5 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.20.0 // indirect + golang.org/x/sys v0.28.0 // indirect ) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 - -replace github.com/gokrazy/tools => github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e diff --git a/gokrazy/go.sum b/gokrazy/go.sum index dfac8ca37..170d15b3d 100644 --- a/gokrazy/go.sum +++ b/gokrazy/go.sum @@ -3,8 +3,10 @@ github.com/breml/rootcerts v0.2.10/go.mod h1:24FDtzYMpqIeYC7QzaE8VPRQaFZU5TIUDly github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= +github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 h1:f5bEvO4we3fbfiBkECrrUgWQ8OH6J3SdB2Dwxid/Yx4= +github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57/go.mod h1:SJG1KwuJQXFEoBgryaNCkMbdISyovDgZd0xmXJRZmiw= +github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c h1:iEbS8GrNOn671ze8J/AfrYFEVzf8qMx8aR5K0VxPK2w= +github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c/go.mod h1:f2vZhnaPzy92+Bjpx1iuZHK7VuaJx6SNCWQWmu23HZA= github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 h1:kBY5R1tSf+EYZ+QaSrofLaVJtBqYsVNVBWkdMq3Smcg= github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2/go.mod h1:PYOvzGOL4nlBmuxu7IyKQTFLaxr61+WPRNRzVtuYOHw= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -19,14 +21,12 @@ github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e h1:3/xIc1QCvnKL7BCLng9od98HEvxCadjvqiI/bN+Twso= -github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e/go.mod h1:eTZ0QsugEPFU5UAQ/87bKMkPxQuTNa7+iFAIahOFwRg= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum index 9123439ed..ae814f316 100644 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum +++ b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum @@ -4,32 +4,58 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= @@ -46,10 +72,14 @@ github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -62,6 +92,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= @@ -70,6 +102,8 @@ github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwso github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= +github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= +github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= @@ -84,6 +118,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -96,6 +132,8 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -126,12 +164,18 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= @@ -144,6 +188,8 @@ github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -152,42 +198,66 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/gokrazy/natlabapp.arm64/config.json b/gokrazy/natlabapp.arm64/config.json index 2577f61a5..2ba9a20f9 100644 --- a/gokrazy/natlabapp.arm64/config.json +++ b/gokrazy/natlabapp.arm64/config.json @@ -20,6 +20,10 @@ } } }, + "Environment": [ + "GOOS=linux", + "GOARCH=arm64" + ], "KernelPackage": "github.com/gokrazy/kernel.arm64", "FirmwarePackage": "github.com/gokrazy/kernel.arm64", "EEPROMPackage": "", diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum index baa378c46..25f15059d 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.sum +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.sum @@ -4,32 +4,58 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= @@ -46,10 +72,14 @@ github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -62,6 +92,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= @@ -86,6 +118,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -98,6 +132,8 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -128,14 +164,20 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= @@ -148,6 +190,8 @@ github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -156,42 +200,66 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/gokrazy/natlabapp/config.json b/gokrazy/natlabapp/config.json index 902f14acd..1968b2aac 100644 --- a/gokrazy/natlabapp/config.json +++ b/gokrazy/natlabapp/config.json @@ -20,6 +20,10 @@ } } }, + "Environment": [ + "GOOS=linux", + "GOARCH=amd64" + ], "KernelPackage": "github.com/tailscale/gokrazy-kernel", "FirmwarePackage": "", "EEPROMPackage": "", diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.sum b/gokrazy/tsapp/builddir/tailscale.com/go.sum index b3b73e2d0..2ffef7bf7 100644 --- a/gokrazy/tsapp/builddir/tailscale.com/go.sum +++ b/gokrazy/tsapp/builddir/tailscale.com/go.sum @@ -4,48 +4,80 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -58,12 +90,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= +github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= +github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= @@ -78,6 +114,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -90,6 +128,8 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -116,14 +156,22 @@ github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29X github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= @@ -136,6 +184,8 @@ github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -144,42 +194,66 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/gokrazy/tsapp/config.json b/gokrazy/tsapp/config.json index 33dd98a96..b88be53a4 100644 --- a/gokrazy/tsapp/config.json +++ b/gokrazy/tsapp/config.json @@ -27,6 +27,10 @@ } } }, + "Environment": [ + "GOOS=linux", + "GOARCH=amd64" + ], "KernelPackage": "github.com/tailscale/gokrazy-kernel", "FirmwarePackage": "github.com/tailscale/gokrazy-kernel", "InternalCompatibilityFlags": {} diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 535515588..9f77d31e9 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -32,6 +32,7 @@ import ( ) var ( + runVMTests = flag.Bool("run-vm-tests", false, "run tests that require a VM") logTailscaled = flag.Bool("log-tailscaled", false, "log tailscaled output") pcapFile = flag.String("pcap", "", "write pcap to file") ) @@ -59,8 +60,25 @@ func newNatTest(tb testing.TB) *natTest { base: filepath.Join(modRoot, "gokrazy/natlabapp.qcow2"), } + if !*runVMTests { + tb.Skip("skipping heavy test; set --run-vm-tests to run") + } + if _, err := os.Stat(nt.base); err != nil { - tb.Skipf("skipping test; base image %q not found", nt.base) + if !os.IsNotExist(err) { + tb.Fatal(err) + } + tb.Logf("building VM image...") + cmd := exec.Command("make", "natlab") + cmd.Dir = filepath.Join(modRoot, "gokrazy") + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + if err := cmd.Run(); err != nil { + tb.Fatalf("Error running 'make natlab' in gokrazy directory") + } + if _, err := os.Stat(nt.base); err != nil { + tb.Skipf("still can't find VM image: %v", err) + } } nt.kernel, err = findKernelPath(filepath.Join(modRoot, "gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod")) From 24d4846f007d34b160e2dba9fecf95a8357372d7 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 4 Mar 2025 12:08:32 -0800 Subject: [PATCH 0558/1708] util/eventbus: adjust worker goroutine management helpers This makes the helpers closer in behavior to cancelable contexts and taskgroup.Single, and makes the worker code use a more normal and easier to reason about context.Context for shutdown. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 127 ++++++++++++++++++++++--------------- util/eventbus/publish.go | 17 ++--- util/eventbus/subscribe.go | 47 +++++++------- 3 files changed, 105 insertions(+), 86 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 393596d75..3520be828 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -15,8 +15,8 @@ import ( // Bus is an event bus that distributes published events to interested // subscribers. type Bus struct { + router *worker write chan any - stop goroutineShutdownControl snapshot chan chan []any topicsMu sync.Mutex // guards everything below. @@ -30,15 +30,13 @@ type Bus struct { // New returns a new bus. Use [PublisherOf] to make event publishers, // and [Bus.Queue] and [Subscribe] to make event subscribers. func New() *Bus { - stopCtl, stopWorker := newGoroutineShutdown() ret := &Bus{ write: make(chan any), - stop: stopCtl, snapshot: make(chan chan []any), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, } - go ret.pump(stopWorker) + ret.router = runWorker(ret.pump) return ret } @@ -67,7 +65,7 @@ func (b *Bus) Client(name string) *Client { // Close blocks until the bus is fully shut down. The bus is // permanently unusable after closing. func (b *Bus) Close() { - b.stop.StopAndWait() + b.router.StopAndWait() var clients set.Set[*Client] b.topicsMu.Lock() @@ -79,8 +77,7 @@ func (b *Bus) Close() { } } -func (b *Bus) pump(stop goroutineShutdownWorker) { - defer stop.Done() +func (b *Bus) pump(ctx context.Context) { var vals queue acceptCh := func() chan any { if vals.Full() { @@ -102,13 +99,13 @@ func (b *Bus) pump(stop goroutineShutdownWorker) { select { case d.write <- val: break deliverOne - case <-d.stop.WaitChan(): + case <-d.closed(): // Queue closed, don't block but continue // delivering to others. break deliverOne case in := <-acceptCh(): vals.Add(in) - case <-stop.Stop(): + case <-ctx.Done(): return case ch := <-b.snapshot: ch <- vals.Snapshot() @@ -122,7 +119,7 @@ func (b *Bus) pump(stop goroutineShutdownWorker) { // resuming. for vals.Empty() { select { - case <-stop.Stop(): + case <-ctx.Done(): return case val := <-b.write: vals.Add(val) @@ -168,59 +165,89 @@ func (b *Bus) unsubscribe(t reflect.Type, q *subscribeState) { b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1) } -func newGoroutineShutdown() (goroutineShutdownControl, goroutineShutdownWorker) { - ctx, cancel := context.WithCancel(context.Background()) +// A worker runs a worker goroutine and helps coordinate its shutdown. +type worker struct { + ctx context.Context + stop context.CancelFunc + stopped chan struct{} +} - ctl := goroutineShutdownControl{ - startShutdown: cancel, - shutdownFinished: make(chan struct{}), - } - work := goroutineShutdownWorker{ - startShutdown: ctx.Done(), - shutdownFinished: ctl.shutdownFinished, +// runWorker creates a worker goroutine running fn. The context passed +// to fn is canceled by [worker.Stop]. +func runWorker(fn func(context.Context)) *worker { + ctx, stop := context.WithCancel(context.Background()) + ret := &worker{ + ctx: ctx, + stop: stop, + stopped: make(chan struct{}), } - - return ctl, work + go ret.run(fn) + return ret } -// goroutineShutdownControl is a helper type to manage the shutdown of -// a worker goroutine. The worker goroutine should use the -// goroutineShutdownWorker related to this controller. -type goroutineShutdownControl struct { - startShutdown context.CancelFunc - shutdownFinished chan struct{} +func (w *worker) run(fn func(context.Context)) { + defer close(w.stopped) + fn(w.ctx) } -func (ctl *goroutineShutdownControl) Stop() { - ctl.startShutdown() -} +// Stop signals the worker goroutine to shut down. +func (w *worker) Stop() { w.stop() } -func (ctl *goroutineShutdownControl) Wait() { - <-ctl.shutdownFinished -} +// Done returns a channel that is closed when the worker goroutine +// exits. +func (w *worker) Done() <-chan struct{} { return w.stopped } -func (ctl *goroutineShutdownControl) WaitChan() <-chan struct{} { - return ctl.shutdownFinished -} +// Wait waits until the worker goroutine has exited. +func (w *worker) Wait() { <-w.stopped } -func (ctl *goroutineShutdownControl) StopAndWait() { - ctl.Stop() - ctl.Wait() +// StopAndWait signals the worker goroutine to shut down, then waits +// for it to exit. +func (w *worker) StopAndWait() { + w.stop() + <-w.stopped } -// goroutineShutdownWorker is a helper type for a worker goroutine to -// be notified that it should shut down, and to report that shutdown -// has completed. The notification is triggered by the related -// goroutineShutdownControl. -type goroutineShutdownWorker struct { - startShutdown <-chan struct{} - shutdownFinished chan struct{} +// stopFlag is a value that can be watched for a notification. The +// zero value is ready for use. +// +// The flag is notified by running [stopFlag.Stop]. Stop can be called +// multiple times. Upon the first call to Stop, [stopFlag.Done] is +// closed, all pending [stopFlag.Wait] calls return, and future Wait +// calls return immediately. +// +// A stopFlag can only notify once, and is intended for use as a +// one-way shutdown signal that's lighter than a cancellable +// context.Context. +type stopFlag struct { + // guards the lazy construction of stopped, and the value of + // alreadyStopped. + mu sync.Mutex + stopped chan struct{} + alreadyStopped bool +} + +func (s *stopFlag) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.alreadyStopped { + return + } + s.alreadyStopped = true + if s.stopped == nil { + s.stopped = make(chan struct{}) + } + close(s.stopped) } -func (work *goroutineShutdownWorker) Stop() <-chan struct{} { - return work.startShutdown +func (s *stopFlag) Done() <-chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + if s.stopped == nil { + s.stopped = make(chan struct{}) + } + return s.stopped } -func (work *goroutineShutdownWorker) Done() { - close(work.shutdownFinished) +func (s *stopFlag) Wait() { + <-s.Done() } diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 19ddc1256..b2d0641d9 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -4,7 +4,6 @@ package eventbus import ( - "context" "reflect" ) @@ -17,17 +16,13 @@ type publisher interface { // A Publisher publishes typed events on a bus. type Publisher[T any] struct { - client *Client - stopCtx context.Context - stop context.CancelFunc + client *Client + stop stopFlag } func newPublisher[T any](c *Client) *Publisher[T] { - ctx, cancel := context.WithCancel(context.Background()) ret := &Publisher[T]{ - client: c, - stopCtx: ctx, - stop: cancel, + client: c, } c.addPublisher(ret) return ret @@ -39,7 +34,7 @@ func newPublisher[T any](c *Client) *Publisher[T] { func (p *Publisher[T]) Close() { // Just unblocks any active calls to Publish, no other // synchronization needed. - p.stop() + p.stop.Stop() p.client.deletePublisher(p) } @@ -52,14 +47,14 @@ func (p *Publisher[T]) Publish(v T) { // Check for just a stopped publisher or bus before trying to // write, so that once closed Publish consistently does nothing. select { - case <-p.stopCtx.Done(): + case <-p.stop.Done(): return default: } select { case p.client.publish() <- v: - case <-p.stopCtx.Done(): + case <-p.stop.Done(): } } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 896f0ce1f..606410c8e 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -27,7 +27,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool + dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool Close() } @@ -35,29 +35,26 @@ type subscriber interface { type subscribeState struct { client *Client - write chan any - stop goroutineShutdownControl - snapshot chan chan []any + dispatcher *worker + write chan any + snapshot chan chan []any outputsMu sync.Mutex outputs map[reflect.Type]subscriber } func newSubscribeState(c *Client) *subscribeState { - stopCtl, stopWorker := newGoroutineShutdown() ret := &subscribeState{ client: c, write: make(chan any), - stop: stopCtl, snapshot: make(chan chan []any), outputs: map[reflect.Type]subscriber{}, } - go ret.pump(stopWorker) + ret.dispatcher = runWorker(ret.pump) return ret } -func (q *subscribeState) pump(stop goroutineShutdownWorker) { - defer stop.Done() +func (q *subscribeState) pump(ctx context.Context) { var vals queue acceptCh := func() chan any { if vals.Full() { @@ -74,7 +71,7 @@ func (q *subscribeState) pump(stop goroutineShutdownWorker) { vals.Drop() continue } - if !sub.dispatch(&vals, stop, acceptCh) { + if !sub.dispatch(ctx, &vals, acceptCh) { return } } else { @@ -85,7 +82,7 @@ func (q *subscribeState) pump(stop goroutineShutdownWorker) { select { case val := <-q.write: vals.Add(val) - case <-stop.Stop(): + case <-ctx.Done(): return case ch := <-q.snapshot: ch <- vals.Snapshot() @@ -120,7 +117,7 @@ func (q *subscribeState) subscriberFor(val any) subscriber { // Close closes the subscribeState. Implicitly closes all Subscribers // linked to this state, and any pending events are discarded. func (s *subscribeState) close() { - s.stop.StopAndWait() + s.dispatcher.StopAndWait() var subs map[reflect.Type]subscriber s.outputsMu.Lock() @@ -131,23 +128,23 @@ func (s *subscribeState) close() { } } +func (s *subscribeState) closed() <-chan struct{} { + return s.dispatcher.Done() +} + // A Subscriber delivers one type of event from a [Client]. type Subscriber[T any] struct { - doneCtx context.Context - done context.CancelFunc - recv *subscribeState - read chan T + stop stopFlag + recv *subscribeState + read chan T } func newSubscriber[T any](r *subscribeState) *Subscriber[T] { t := reflect.TypeFor[T]() - ctx, cancel := context.WithCancel(context.Background()) ret := &Subscriber[T]{ - doneCtx: ctx, - done: cancel, - recv: r, - read: make(chan T), + recv: r, + read: make(chan T), } r.addSubscriber(t, ret) @@ -158,7 +155,7 @@ func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool { +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool { t := vals.Peek().(T) for { // Keep the cases in this select in sync with subscribeState.pump @@ -170,7 +167,7 @@ func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acce return true case val := <-acceptCh(): vals.Add(val) - case <-stop.Stop(): + case <-ctx.Done(): return false case ch := <-s.recv.snapshot: ch <- vals.Snapshot() @@ -187,13 +184,13 @@ func (s *Subscriber[T]) Events() <-chan T { // Done returns a channel that is closed when the subscriber is // closed. func (s *Subscriber[T]) Done() <-chan struct{} { - return s.doneCtx.Done() + return s.stop.Done() } // Close closes the Subscriber, indicating the caller no longer wishes // to receive this event type. After Close, receives on // [Subscriber.Events] block for ever. func (s *Subscriber[T]) Close() { - s.done() // unblock receivers + s.stop.Stop() // unblock receivers s.recv.deleteSubscriber(reflect.TypeFor[T]()) } From c6b8e6f6b7a2a95edbc620bcaaa473bd21a68d5b Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 3 Mar 2025 15:02:35 +0000 Subject: [PATCH 0559/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 57 +++++++++++++++++++++---------------------- licenses/apple.md | 23 +++++++++-------- licenses/tailscale.md | 25 +++++++++---------- licenses/windows.md | 24 +++++++++--------- 4 files changed, 63 insertions(+), 66 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 378baa805..c3e9e989a 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -9,34 +9,33 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.24.1/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.26.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.16.16/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.14.11/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.2.10/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.5.10/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.7.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.24.1/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.10.4/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.10.10/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.18.7/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.21.7/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.26.7/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE)) - - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) + - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -65,17 +64,17 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.33.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.22.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.29.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.30.0:LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 7741318f7..a2984ea2e 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -28,20 +28,19 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) + - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/15c9b8791914/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -69,14 +68,14 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index ab79ee391..777687be6 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -33,7 +33,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) @@ -41,8 +40,8 @@ Some packages may only be included on certain architectures or operating systems - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) @@ -52,7 +51,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) + - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -91,15 +90,15 @@ Some packages may only be included on certain architectures or operating systems - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.25.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 8abbd52d5..78fdcf7fb 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -35,7 +35,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) @@ -62,23 +62,23 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/72f92d5087d4/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/cfd3289ef17f/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/04068c1cab63/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.23.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.22.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.35.1/LICENSE)) From 27e0575f76f0d8c88ec04d770ed7d6ec86bcba91 Mon Sep 17 00:00:00 2001 From: Sam Linville Date: Wed, 5 Mar 2025 10:55:37 -0600 Subject: [PATCH 0560/1708] cmd/tsidp: add README and Dockerfile (#15205) --- cmd/tsidp/Dockerfile | 41 ++++++++++++++++++ cmd/tsidp/README.md | 100 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 cmd/tsidp/Dockerfile create mode 100644 cmd/tsidp/README.md diff --git a/cmd/tsidp/Dockerfile b/cmd/tsidp/Dockerfile new file mode 100644 index 000000000..605a7ba2e --- /dev/null +++ b/cmd/tsidp/Dockerfile @@ -0,0 +1,41 @@ +# Build stage +FROM golang:alpine AS builder + +# Install build dependencies +RUN apk add --no-cache git + +# Set working directory +WORKDIR /src + +# Copy only go.mod and go.sum first to leverage Docker caching +COPY go.mod go.sum ./ +RUN go mod download + +# Copy the entire repository +COPY . . + +# Build the tsidp binary +RUN go build -o /bin/tsidp ./cmd/tsidp + +# Final stage +FROM alpine:latest + +# Create necessary directories +RUN mkdir -p /var/lib/tsidp + +# Copy binary from builder stage +COPY --from=builder /bin/tsidp /app/tsidp + +# Set working directory +WORKDIR /app + +# Environment variables +ENV TAILSCALE_USE_WIP_CODE=1 \ + TS_HOSTNAME=tsidp \ + TS_STATE_DIR=/var/lib/tsidp + +# Expose the default port +EXPOSE 443 + +# Run the application +ENTRYPOINT ["/app/tsidp"] \ No newline at end of file diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md new file mode 100644 index 000000000..d51138b6d --- /dev/null +++ b/cmd/tsidp/README.md @@ -0,0 +1,100 @@ +# `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider + +[![status: experimental](https://img.shields.io/badge/status-experimental-blue)](https://tailscale.com/kb/1167/release-stages/#experimental) + +`tsidp` is an OIDC Identity Provider (IdP) server that integrates with your Tailscale network. It allows you to use Tailscale identities for authentication in applications that support OpenID Connect, enabling single sign-on (SSO) capabilities within your tailnet. + +## Prerequisites + +- A Tailscale network (tailnet) with magicDNS and HTTPS enabled +- A Tailscale authentication key from your tailnet +- Docker installed on your system + +## Installation using Docker + +1. **Build the Docker Image** + + The Dockerfile uses a multi-stage build process to: + - Build the `tsidp` binary from source + - Create a minimal Alpine-based image with just the necessary components + + ```bash + # Clone the Tailscale repository + git clone https://github.com/tailscale/tailscale.git + cd tailscale + ``` + + ```bash + # Build the Docker image + docker build -t tsidp:latest -f cmd/tsidp/Dockerfile . + ``` + +2. **Run the Container** + + Replace `YOUR_TAILSCALE_AUTHKEY` with your Tailscale authentication key. + + ```bash + docker run -d \ + --name `tsidp` \ + -p 443:443 \ + -e TS_AUTHKEY=YOUR_TAILSCALE_AUTHKEY \ + -e TS_HOSTNAME=tsidp \ + -v tsidp-data:/var/lib/tsidp \ + tsidp:latest + ``` + +3. **Verify Installation** + ```bash + docker logs tsidp + ``` + + Visit `https://tsidp.tailnet.ts.net` to confirm the service is running. + +## Usage Example: Proxmox Integration + +Here's how to configure Proxmox to use `tsidp` for authentication: + +1. In Proxmox, navigate to Datacenter > Realms > Add OpenID Connect Server + +2. Configure the following settings: + - Issuer URL: `https://idp.velociraptor.ts.net` + - Realm: `tailscale` (or your preferred name) + - Client ID: `unused` + - Client Key: `unused` + - Default: `true` + - Autocreate users: `true` + - Username claim: `email` + +3. Set up user permissions: + - Go to Datacenter > Permissions > Groups + - Create a new group (e.g., "tsadmins") + - Click Permissions in the sidebar + - Add Group Permission + - Set Path to `/` for full admin access or scope as needed + - Set the group and role + - Add Tailscale-authenticated users to the group + +## Configuration Options + +The `tsidp` server supports several command-line flags: + +- `--verbose`: Enable verbose logging +- `--port`: Port to listen on (default: 443) +- `--local-port`: Allow requests from localhost +- `--use-local-tailscaled`: Use local tailscaled instead of tsnet +- `--dir`: tsnet state directory + +## Environment Variables + +- `TS_AUTHKEY`: Your Tailscale authentication key (required) +- `TS_HOSTNAME`: Hostname for the `tsidp` server (default: "idp") +- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp") +- `TAILSCALE_USE_WIP_CODE`: Enable work-in-progress code (default: "1") + +## Support + +This is an [experimental](https://tailscale.com/kb/1167/release-stages#experimental), work in progress feature. For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale) + +## License + +BSD-3-Clause License. See [LICENSE](../../LICENSE) for details. \ No newline at end of file From 96202a7c0cad1e2d63479339d8a99880d4c897e7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 5 Mar 2025 14:14:19 -0800 Subject: [PATCH 0561/1708] .github/workflows: descope natlab CI for now until GitHub flakes are fixed The natlab VM tests are flaking on GitHub Actions. To not distract people, disable them for now (unless they're touched directly) until they're made more reliable, which will be some painful debugging probably. Updates #13038 Change-Id: I6570f1cd43f8f4d628a54af8481b67455ebe83dc Signed-off-by: Brad Fitzpatrick --- .github/workflows/natlab-integrationtest.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index b8d99e668..1de74cdaa 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -9,10 +9,7 @@ concurrency: on: pull_request: paths: - - "tailcfg/**" - - "wgengine/**" - - "ipn/ipnlocal/**" - - ".github/workflows/natlab-integrationtest.yml" + - "tstest/integration/nat/nat_test.go" jobs: natlab-integrationtest: runs-on: ubuntu-latest From bf40bc4fa0dc952a6c4f78997b14367b2eb96d4a Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 10:33:35 -0800 Subject: [PATCH 0562/1708] util/eventbus: make internal queue a generic type In preparation for making the queues carry additional event metadata. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 2 +- util/eventbus/queue.go | 26 ++++++++++++++------------ util/eventbus/subscribe.go | 6 +++--- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 3520be828..9f6adbfb7 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -78,7 +78,7 @@ func (b *Bus) Close() { } func (b *Bus) pump(ctx context.Context) { - var vals queue + var vals queue[any] acceptCh := func() chan any { if vals.Full() { return nil diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go index 8f6bda748..a62bf3c62 100644 --- a/util/eventbus/queue.go +++ b/util/eventbus/queue.go @@ -10,32 +10,32 @@ import ( const maxQueuedItems = 16 // queue is an ordered queue of length up to maxQueuedItems. -type queue struct { - vals []any +type queue[T any] struct { + vals []T start int } // canAppend reports whether a value can be appended to q.vals without // shifting values around. -func (q *queue) canAppend() bool { +func (q *queue[T]) canAppend() bool { return cap(q.vals) < maxQueuedItems || len(q.vals) < cap(q.vals) } -func (q *queue) Full() bool { +func (q *queue[T]) Full() bool { return q.start == 0 && !q.canAppend() } -func (q *queue) Empty() bool { +func (q *queue[T]) Empty() bool { return q.start == len(q.vals) } -func (q *queue) Len() int { +func (q *queue[T]) Len() int { return len(q.vals) - q.start } // Add adds v to the end of the queue. Blocks until append can be // done. -func (q *queue) Add(v any) { +func (q *queue[T]) Add(v T) { if !q.canAppend() { if q.start == 0 { panic("Add on a full queue") @@ -54,21 +54,23 @@ func (q *queue) Add(v any) { // Peek returns the first value in the queue, without removing it from // the queue, or nil if the queue is empty. -func (q *queue) Peek() any { +func (q *queue[T]) Peek() T { if q.Empty() { - return nil + var zero T + return zero } return q.vals[q.start] } // Drop discards the first value in the queue, if any. -func (q *queue) Drop() { +func (q *queue[T]) Drop() { if q.Empty() { return } - q.vals[q.start] = nil + var zero T + q.vals[q.start] = zero q.start++ if q.Empty() { // Reset cursor to start of array, it's free to do. @@ -78,6 +80,6 @@ func (q *queue) Drop() { } // Snapshot returns a copy of the queue's contents. -func (q *queue) Snapshot() []any { +func (q *queue[T]) Snapshot() []T { return slices.Clone(q.vals[q.start:]) } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 606410c8e..85aa1ff6a 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -27,7 +27,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool + dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool Close() } @@ -55,7 +55,7 @@ func newSubscribeState(c *Client) *subscribeState { } func (q *subscribeState) pump(ctx context.Context) { - var vals queue + var vals queue[any] acceptCh := func() chan any { if vals.Full() { return nil @@ -155,7 +155,7 @@ func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool { +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool { t := vals.Peek().(T) for { // Keep the cases in this select in sync with subscribeState.pump From a1192dd686fa4f2b53db4b1cba9030b01f80b891 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 10:39:06 -0800 Subject: [PATCH 0563/1708] util/eventbus: track additional event context in publish queue Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 16 ++++++++-------- util/eventbus/client.go | 2 +- util/eventbus/publish.go | 15 ++++++++++++++- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 9f6adbfb7..33c0ae84d 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -16,8 +16,8 @@ import ( // subscribers. type Bus struct { router *worker - write chan any - snapshot chan chan []any + write chan publishedEvent + snapshot chan chan []publishedEvent topicsMu sync.Mutex // guards everything below. topics map[reflect.Type][]*subscribeState @@ -31,8 +31,8 @@ type Bus struct { // and [Bus.Queue] and [Subscribe] to make event subscribers. func New() *Bus { ret := &Bus{ - write: make(chan any), - snapshot: make(chan chan []any), + write: make(chan publishedEvent), + snapshot: make(chan chan []publishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, } @@ -78,8 +78,8 @@ func (b *Bus) Close() { } func (b *Bus) pump(ctx context.Context) { - var vals queue[any] - acceptCh := func() chan any { + var vals queue[publishedEvent] + acceptCh := func() chan publishedEvent { if vals.Full() { return nil } @@ -92,12 +92,12 @@ func (b *Bus) pump(ctx context.Context) { // queue space for it. for !vals.Empty() { val := vals.Peek() - dests := b.dest(reflect.ValueOf(val).Type()) + dests := b.dest(reflect.ValueOf(val.Event).Type()) for _, d := range dests { deliverOne: for { select { - case d.write <- val: + case d.write <- val.Event: break deliverOne case <-d.closed(): // Queue closed, don't block but continue diff --git a/util/eventbus/client.go b/util/eventbus/client.go index ff8eea6ee..174cc5ea5 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -75,7 +75,7 @@ func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) { c.bus.unsubscribe(t, s) } -func (c *Client) publish() chan<- any { +func (c *Client) publish() chan<- publishedEvent { return c.bus.write } diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index b2d0641d9..fdabdcb23 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -5,8 +5,15 @@ package eventbus import ( "reflect" + "time" ) +type publishedEvent struct { + Event any + From *Client + Published time.Time +} + // publisher is a uniformly typed wrapper around Publisher[T], so that // debugging facilities can look at active publishers. type publisher interface { @@ -52,8 +59,14 @@ func (p *Publisher[T]) Publish(v T) { default: } + evt := publishedEvent{ + Event: v, + From: p.client, + Published: time.Now(), + } + select { - case p.client.publish() <- v: + case p.client.publish() <- evt: case <-p.stop.Done(): } } From cf5c788cf19001b09e71514c8a66593385e43ea9 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 10:42:08 -0800 Subject: [PATCH 0564/1708] util/eventbus: track additional event context in subscribe queue Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 10 +++++++++- util/eventbus/subscribe.go | 28 ++++++++++++++++++---------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 33c0ae84d..b479f3940 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -8,6 +8,7 @@ import ( "reflect" "slices" "sync" + "time" "tailscale.com/util/set" ) @@ -93,11 +94,18 @@ func (b *Bus) pump(ctx context.Context) { for !vals.Empty() { val := vals.Peek() dests := b.dest(reflect.ValueOf(val.Event).Type()) + routed := time.Now() for _, d := range dests { + evt := queuedEvent{ + Event: val.Event, + From: val.From, + Published: val.Published, + Routed: routed, + } deliverOne: for { select { - case d.write <- val.Event: + case d.write <- evt: break deliverOne case <-d.closed(): // Queue closed, don't block but continue diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 85aa1ff6a..71201aa40 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,8 +8,16 @@ import ( "fmt" "reflect" "sync" + "time" ) +type queuedEvent struct { + Event any + From *Client + Published time.Time + Routed time.Time +} + // subscriber is a uniformly typed wrapper around Subscriber[T], so // that debugging facilities can look at active subscribers. type subscriber interface { @@ -27,7 +35,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool + dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool Close() } @@ -36,8 +44,8 @@ type subscribeState struct { client *Client dispatcher *worker - write chan any - snapshot chan chan []any + write chan queuedEvent + snapshot chan chan []queuedEvent outputsMu sync.Mutex outputs map[reflect.Type]subscriber @@ -46,8 +54,8 @@ type subscribeState struct { func newSubscribeState(c *Client) *subscribeState { ret := &subscribeState{ client: c, - write: make(chan any), - snapshot: make(chan chan []any), + write: make(chan queuedEvent), + snapshot: make(chan chan []queuedEvent), outputs: map[reflect.Type]subscriber{}, } ret.dispatcher = runWorker(ret.pump) @@ -55,8 +63,8 @@ func newSubscribeState(c *Client) *subscribeState { } func (q *subscribeState) pump(ctx context.Context) { - var vals queue[any] - acceptCh := func() chan any { + var vals queue[queuedEvent] + acceptCh := func() chan queuedEvent { if vals.Full() { return nil } @@ -65,7 +73,7 @@ func (q *subscribeState) pump(ctx context.Context) { for { if !vals.Empty() { val := vals.Peek() - sub := q.subscriberFor(val) + sub := q.subscriberFor(val.Event) if sub == nil { // Raced with unsubscribe. vals.Drop() @@ -155,8 +163,8 @@ func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool { - t := vals.Peek().(T) +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool { + t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump // above. The only different should be that this select From ffb0b66d5b99e018cdfc1b9fa9c79f6b3dd5542e Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 6 Mar 2025 06:05:41 -0800 Subject: [PATCH 0565/1708] cmd/k8s-operator: advertise VIPServices in ProxyGroup config (#14946) Now that packets flow for VIPServices, the last piece needed to start serving them from a ProxyGroup is config to tell the proxy Pods which services they should advertise. Updates tailscale/corp#24795 Change-Id: Ic7bbeac8e93c9503558107bc5f6123be02a84c77 Signed-off-by: Tom Proctor --- cmd/k8s-operator/egress-services.go | 6 +- cmd/k8s-operator/ingress-for-pg.go | 117 ++++++++++++++++++------ cmd/k8s-operator/ingress-for-pg_test.go | 43 ++++++++- cmd/k8s-operator/proxygroup.go | 27 +++++- cmd/k8s-operator/proxygroup_specs.go | 10 +- cmd/k8s-operator/proxygroup_test.go | 84 ++++++++++++++++- 6 files changed, 251 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index cf218ba4f..e997e5884 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -630,7 +630,11 @@ func tailnetTargetFromSvc(svc *corev1.Service) egressservices.TailnetTarget { func portMap(p corev1.ServicePort) egressservices.PortMap { // TODO (irbekrm): out of bounds check? - return egressservices.PortMap{Protocol: string(p.Protocol), MatchPort: uint16(p.TargetPort.IntVal), TargetPort: uint16(p.Port)} + return egressservices.PortMap{ + Protocol: string(p.Protocol), + MatchPort: uint16(p.TargetPort.IntVal), + TargetPort: uint16(p.Port), + } } func isEgressSvcForProxyGroup(obj client.Object) bool { diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 4fa0af2a2..1fa12eb59 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -99,7 +99,7 @@ func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Reque hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) - if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) { + if !ing.DeletionTimestamp.IsZero() || !shouldExpose(ing) { return res, a.maybeCleanup(ctx, hostname, ing, logger) } @@ -122,6 +122,8 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") return nil } + logger = logger.With("ProxyGroup", pgName) + pg := &tsapi.ProxyGroup{} if err := a.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { if apierrors.IsNotFound(err) { @@ -148,8 +150,6 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } - logger = logger.With("proxy-group", pg) - if !slices.Contains(ing.Finalizers, FinalizerNamePG) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -288,7 +288,13 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin } } - // 5. Update Ingress status + // 5. Update tailscaled's AdvertiseServices config, which should add the VIPService + // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. + if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { + return fmt.Errorf("failed to update tailscaled config: %w", err) + } + + // 6. Update Ingress status oldStatus := ing.Status.DeepCopy() ports := []networkingv1.IngressPortStatus{ { @@ -320,9 +326,9 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin // maybeCleanupProxyGroup ensures that if an Ingress hostname has changed, any VIPService resources created for the // Ingress' ProxyGroup corresponding to the old hostname are cleaned up. A run of this function will ensure that any // VIPServices that are associated with the provided ProxyGroup and no longer owned by an Ingress are cleaned up. -func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) error { +func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName string, logger *zap.SugaredLogger) error { // Get serve config for the ProxyGroup - cm, cfg, err := a.proxyGroupServeConfig(ctx, proxyGroupName) + cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) if err != nil { return fmt.Errorf("getting serve config: %w", err) } @@ -349,17 +355,16 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) - svc, err := a.getVIPService(ctx, vipServiceName, logger) + + // Delete the VIPService from control if necessary. + svc, err := a.tsClient.GetVIPService(ctx, vipServiceName) if err != nil { errResp := &tailscale.ErrResponse{} - if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound { - delete(cfg.Services, vipServiceName) - serveConfigChanged = true - continue + if ok := errors.As(err, errResp); !ok || errResp.Status != http.StatusNotFound { + return err } - return err } - if isVIPServiceForAnyIngress(svc) { + if svc != nil && isVIPServiceForAnyIngress(svc) { logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) if err := a.tsClient.DeleteVIPService(ctx, vipServiceName); err != nil { errResp := &tailscale.ErrResponse{} @@ -368,6 +373,11 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } } } + + // Make sure the VIPService is not advertised in tailscaled or serve config. + if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pgName, vipServiceName, false, logger); err != nil { + return fmt.Errorf("failed to update tailscaled config services: %w", err) + } delete(cfg.Services, vipServiceName) serveConfigChanged = true } @@ -383,6 +393,7 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return fmt.Errorf("updating serve config: %w", err) } } + return nil } @@ -421,7 +432,12 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, return fmt.Errorf("error deleting VIPService: %w", err) } - // 3. Remove the VIPService from the serve config for the ProxyGroup. + // 3. Unadvertise the VIPService in tailscaled config. + if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { + return fmt.Errorf("failed to update tailscaled config services: %w", err) + } + + // 4. Remove the VIPService from the serve config for the ProxyGroup. logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) @@ -501,7 +517,7 @@ func (a *IngressPGReconciler) tailnetCertDomain(ctx context.Context) (string, er } // shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup) -func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { +func shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && *ing.Spec.IngressClassName == tailscaleIngressClassName @@ -509,18 +525,6 @@ func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (*tailscale.VIPService, error) { - svc, err := a.tsClient.GetVIPService(ctx, name) - if err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { - logger.Infof("error getting VIPService %q: %v", name, err) - return nil, fmt.Errorf("error getting VIPService %q: %w", name, err) - } - } - return svc, nil -} - func isVIPServiceForIngress(svc *tailscale.VIPService, ing *networkingv1.Ingress) bool { if svc == nil || ing == nil { return false @@ -582,12 +586,16 @@ func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsa // deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress. func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name tailcfg.ServiceName, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { - svc, err := a.getVIPService(ctx, name, logger) + svc, err := a.tsClient.GetVIPService(ctx, name) if err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + return nil + } + return fmt.Errorf("error getting VIPService: %w", err) } - // isVIPServiceForIngress handles nil svc, so we don't need to check it here if !isVIPServiceForIngress(svc, ing) { return nil } @@ -606,3 +614,54 @@ func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { } return ing.Annotations[annotationHTTPEndpoint] == "enabled" } + +func (a *IngressPGReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { + logger.Debugf("Updating ProxyGroup tailscaled configs to advertise service %q: %v", serviceName, shouldBeAdvertised) + + // Get all config Secrets for this ProxyGroup. + secrets := &corev1.SecretList{} + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + return fmt.Errorf("failed to list config Secrets: %w", err) + } + + for _, secret := range secrets.Items { + var updated bool + for fileName, confB := range secret.Data { + var conf ipn.ConfigVAlpha + if err := json.Unmarshal(confB, &conf); err != nil { + return fmt.Errorf("error unmarshalling ProxyGroup config: %w", err) + } + + // Update the services to advertise if required. + idx := slices.Index(conf.AdvertiseServices, serviceName.String()) + isAdvertised := idx >= 0 + switch { + case isAdvertised == shouldBeAdvertised: + // Already up to date. + continue + case isAdvertised: + // Needs to be removed. + conf.AdvertiseServices = slices.Delete(conf.AdvertiseServices, idx, idx+1) + case shouldBeAdvertised: + // Needs to be added. + conf.AdvertiseServices = append(conf.AdvertiseServices, serviceName.String()) + } + + // Update the Secret. + confB, err := json.Marshal(conf) + if err != nil { + return fmt.Errorf("error marshalling ProxyGroup config: %w", err) + } + mak.Set(&secret.Data, fileName, confB) + updated = true + } + + if updated { + if err := a.Update(ctx, &secret); err != nil { + return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) + } + } + } + + return nil +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index c432eb7e1..8c4ffb691 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "fmt" "maps" "reflect" "testing" @@ -24,6 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tailcfg" "tailscale.com/types/ptr" @@ -63,6 +65,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" @@ -122,6 +125,8 @@ func TestIngressPGReconciler(t *testing.T) { verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:my-svc", "svc:my-other-svc"}) + // Delete second Ingress if err := fc.Delete(context.Background(), ing2); err != nil { t.Fatalf("deleting second Ingress: %v", err) @@ -151,6 +156,8 @@ func TestIngressPGReconciler(t *testing.T) { t.Error("second Ingress service config was not cleaned up") } + verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) @@ -175,6 +182,7 @@ func TestIngressPGReconciler(t *testing.T) { if len(cfg.Services) > 0 { t.Error("serve config not cleaned up") } + verifyTailscaledConfig(t, fc, nil) } func TestValidateIngress(t *testing.T) { @@ -464,6 +472,27 @@ func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantH } } +func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []string) { + var expected string + if expectedServices != nil { + expectedServicesJSON, err := json.Marshal(expectedServices) + if err != nil { + t.Fatalf("marshaling expected services: %v", err) + } + expected = fmt.Sprintf(`,"AdvertiseServices":%s`, expectedServicesJSON) + } + expectEqual(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName("test-pg", 0), + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "config"), + }, + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), + }, + }) +} + func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeTSClient) { t.Helper() @@ -494,9 +523,21 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT }, } + // Pre-create a config Secret for the ProxyGroup + pgCfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName("test-pg", 0), + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "config"), + }, + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): []byte("{}"), + }, + } + fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(pg, pgConfigMap, tsIngressClass). + WithObjects(pg, pgCfgSecret, pgConfigMap, tsIngressClass). WithStatusSubresource(pg). Build() diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 4b17d3470..463d29249 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -452,7 +452,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d-config", pg.Name, i), + Name: pgConfigSecretName(pg.Name, i), Namespace: r.tsNamespace, Labels: pgSecretLabels(pg.Name, "config"), OwnerReferences: pgOwnerReference(pg), @@ -596,10 +596,35 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 conf.AuthKey = key } capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) + + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it here. + if err := copyAdvertiseServicesConfig(conf, oldSecret, 106); err != nil { + return nil, err + } capVerConfigs[106] = *conf return capVerConfigs, nil } +func copyAdvertiseServicesConfig(conf *ipn.ConfigVAlpha, oldSecret *corev1.Secret, capVer tailcfg.CapabilityVersion) error { + if oldSecret == nil { + return nil + } + + oldConfB := oldSecret.Data[tsoperator.TailscaledConfigFileName(capVer)] + if len(oldConfB) == 0 { + return nil + } + + var oldConf ipn.ConfigVAlpha + if err := json.Unmarshal(oldConfB, &oldConf); err != nil { + return fmt.Errorf("error unmarshalling existing config: %w", err) + } + conf.AdvertiseServices = oldConf.AdvertiseServices + + return nil +} + func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { return nil } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 1ea91004b..40bbaec17 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -73,7 +73,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: fmt.Sprintf("tailscaledconfig-%d", i), VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-%d-config", pg.Name, i), + SecretName: pgConfigSecretName(pg.Name, i), }, }, }) @@ -236,8 +236,8 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { ResourceNames: func() (secrets []string) { for i := range pgReplicas(pg) { secrets = append(secrets, - fmt.Sprintf("%s-%d-config", pg.Name, i), // Config with auth key. - fmt.Sprintf("%s-%d", pg.Name, i), // State. + pgConfigSecretName(pg.Name, i), // Config with auth key. + fmt.Sprintf("%s-%d", pg.Name, i), // State. ) } return secrets @@ -349,6 +349,10 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { return 2 } +func pgConfigSecretName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d-config", pgName, i) +} + func pgEgressCMName(pg string) string { return fmt.Sprintf("%s-egress-config", pg) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 29100de1d..6829b3929 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -24,6 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/client/tailscale" + "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -446,6 +447,79 @@ func TestProxyGroupTypes(t *testing.T) { }) } +func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + Build() + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + existingServices := []string{"svc1", "svc2"} + existingConfigBytes, err := json.Marshal(ipn.ConfigVAlpha{ + AdvertiseServices: existingServices, + Version: "should-get-overwritten", + }) + if err != nil { + t.Fatal(err) + } + + const pgName = "test-ingress" + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: tsNamespace, + }, + // Write directly to Data because the fake client doesn't copy the write-only + // StringData field across to Data for us. + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): existingConfigBytes, + }, + }) + + mustCreate(t, fc, &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + UID: "test-ingress-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + Replicas: ptr.To[int32](1), + }, + }) + expectReconciled(t, reconciler, "", pgName) + + expectedConfigBytes, err := json.Marshal(ipn.ConfigVAlpha{ + // Preserved. + AdvertiseServices: existingServices, + + // Everything else got updated in the reconcile: + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", + Locked: "false", + Hostname: ptr.To(fmt.Sprintf("%s-%d", pgName, 0)), + }) + if err != nil { + t.Fatal(err) + } + expectEqual(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: tsNamespace, + ResourceVersion: "2", + }, + StringData: map[string]string{ + tsoperator.TailscaledConfigFileName(106): string(expectedConfigBytes), + }, + }, omitSecretData) +} + func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { t.Helper() if r.ingressProxyGroups.Len() != wantIngress { @@ -501,7 +575,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox for i := range pgReplicas(pg) { expectedSecrets = append(expectedSecrets, fmt.Sprintf("%s-%d", pg.Name, i), - fmt.Sprintf("%s-%d-config", pg.Name, i), + pgConfigSecretName(pg.Name, i), ) } } @@ -546,3 +620,11 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG }) } } + +// The operator mostly writes to StringData and reads from Data, but the fake +// client doesn't copy StringData across to Data on write. When comparing actual +// vs expected Secrets, use this function to only check what the operator writes +// to StringData. +func omitSecretData(secret *corev1.Secret) { + secret.Data = nil +} From 9d7f2719bb5e120d87fb51ac093534474d279cc4 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 6 Mar 2025 08:52:35 -0800 Subject: [PATCH 0566/1708] cmd/tsidp: use constant time comparison for client_id/secret (#15222) Use secure constant time comparisons for the client ID and secret values during the allowRelyingParty authorization check. Updates #cleanup Signed-off-by: Patrick O'Doherty --- cmd/tsidp/tsidp.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 3eabef245..96fac58fd 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -11,6 +11,7 @@ import ( "context" crand "crypto/rand" "crypto/rsa" + "crypto/subtle" "crypto/tls" "crypto/x509" "encoding/base64" @@ -345,7 +346,9 @@ func (ar *authRequest) allowRelyingParty(r *http.Request, lc *local.Client) erro clientID = r.FormValue("client_id") clientSecret = r.FormValue("client_secret") } - if ar.funnelRP.ID != clientID || ar.funnelRP.Secret != clientSecret { + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(ar.funnelRP.ID)) + clientSecretcmp := subtle.ConstantTimeCompare([]byte(clientSecret), []byte(ar.funnelRP.Secret)) + if clientIDcmp != 1 || clientSecretcmp != 1 { return fmt.Errorf("tsidp: invalid client credentials") } return nil From 74a2373e1d0e7213d0a89b9b1b4d17f159bb2ba4 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 6 Mar 2025 15:13:10 -0800 Subject: [PATCH 0567/1708] cmd/k8s-operator: ensure HA Ingress can operate in multicluster mode. (#15157) cmd/k8s-operator: ensure HA Ingress can operate in multicluster mode. Update the owner reference mechanism so that: - if during HA Ingress resource creation, a VIPService with some other operator's owner reference is already found, just update the owner references to add one for this operator - if during HA Ingress deletion, the VIPService is found to have owner reference(s) from another operator, don't delete the VIPService, just remove this operator's owner reference - requeue after HA Ingress reconciles that resulted in VIPService updates, to guard against overwrites due to concurrent operations from different clusters. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 504 +++++++++++++++--------- cmd/k8s-operator/ingress-for-pg_test.go | 142 ++++++- cmd/k8s-operator/ingress.go | 1 + cmd/k8s-operator/operator.go | 20 +- 4 files changed, 476 insertions(+), 191 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 1fa12eb59..85a64a336 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -15,6 +15,9 @@ import ( "slices" "strings" "sync" + "time" + + "math/rand/v2" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -53,9 +56,9 @@ const ( var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) -// IngressPGReconciler is a controller that reconciles Tailscale Ingresses should be exposed on an ingress ProxyGroup -// (in HA mode). -type IngressPGReconciler struct { +// HAIngressReconciler is a controller that reconciles Tailscale Ingresses +// should be exposed on an ingress ProxyGroup (in HA mode). +type HAIngressReconciler struct { client.Client recorder record.EventRecorder @@ -65,6 +68,7 @@ type IngressPGReconciler struct { tsNamespace string lc localClient defaultTags []string + operatorID string // stableID of the operator's Tailscale device mu sync.Mutex // protects following // managedIngresses is a set of all ingress resources that we're currently @@ -72,20 +76,29 @@ type IngressPGReconciler struct { managedIngresses set.Slice[types.UID] } -// Reconcile reconciles Ingresses that should be exposed over Tailscale in HA mode (on a ProxyGroup). It looks at all -// Ingresses with tailscale.com/proxy-group annotation. For each such Ingress, it ensures that a VIPService named after -// the hostname of the Ingress exists and is up to date. It also ensures that the serve config for the ingress -// ProxyGroup is updated to route traffic for the VIPService to the Ingress's backend Services. -// When an Ingress is deleted or unexposed, the VIPService and the associated serve config are cleaned up. -// Ingress hostname change also results in the VIPService for the previous hostname being cleaned up and a new VIPService -// being created for the new hostname. -func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - logger := a.logger.With("Ingress", req.NamespacedName) +// Reconcile reconciles Ingresses that should be exposed over Tailscale in HA +// mode (on a ProxyGroup). It looks at all Ingresses with +// tailscale.com/proxy-group annotation. For each such Ingress, it ensures that +// a VIPService named after the hostname of the Ingress exists and is up to +// date. It also ensures that the serve config for the ingress ProxyGroup is +// updated to route traffic for the VIPService to the Ingress's backend +// Services. Ingress hostname change also results in the VIPService for the +// previous hostname being cleaned up and a new VIPService being created for the +// new hostname. +// HA Ingresses support multi-cluster Ingress setup. +// Each VIPService contains a list of owner references that uniquely identify +// the Ingress resource and the operator. When an Ingress that acts as a +// backend is being deleted, the corresponding VIPService is only deleted if the +// only owner reference that it contains is for this Ingress. If other owner +// references are found, then cleanup operation only removes this Ingress' owner +// reference. +func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := r.logger.With("Ingress", req.NamespacedName) logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") ing := new(networkingv1.Ingress) - err = a.Get(ctx, req.NamespacedName, ing) + err = r.Get(ctx, req.NamespacedName, ing) if apierrors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. logger.Debugf("Ingress not found, assuming it was deleted") @@ -99,57 +112,71 @@ func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Reque hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) - if !ing.DeletionTimestamp.IsZero() || !shouldExpose(ing) { - return res, a.maybeCleanup(ctx, hostname, ing, logger) + // needsRequeue is set to true if the underlying VIPService has changed as a result of this reconcile. If that + // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the VIPService in a + // multi-cluster Ingress setup have not resulted in another actor overwriting our VIPService update. + needsRequeue := false + if !ing.DeletionTimestamp.IsZero() || !r.shouldExpose(ing) { + needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger) + } else { + needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger) } - - if err := a.maybeProvision(ctx, hostname, ing, logger); err != nil { - return res, fmt.Errorf("failed to provision: %w", err) + if err != nil { + return res, err + } + if needsRequeue { + res = reconcile.Result{RequeueAfter: requeueInterval()} } return res, nil } -// maybeProvision ensures that the VIPService and serve config for the Ingress are created or updated. -func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { - if err := validateIngressClass(ctx, a.Client); err != nil { +// maybeProvision ensures that a VIPService for this Ingress exists and is up to date and that the serve config for the +// corresponding ProxyGroup contains the Ingress backend's definition. +// If a VIPService does not exist, it will be created. +// If a VIPService exists, but only with owner references from other operator instances, an owner reference for this +// operator instance is added. +// If a VIPService exists, but does not have an owner reference from any operator, we error +// out assuming that this is an owner reference created by an unknown actor. +// Returns true if the operation resulted in a VIPService update. +func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcsChanged bool, err error) { + if err := validateIngressClass(ctx, r.Client); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) - return nil + return false, nil } - // Get and validate ProxyGroup readiness pgName := ing.Annotations[AnnotationProxyGroup] if pgName == "" { logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") - return nil + return false, nil } logger = logger.With("ProxyGroup", pgName) pg := &tsapi.ProxyGroup{} - if err := a.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { + if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { if apierrors.IsNotFound(err) { logger.Infof("ProxyGroup %q does not exist", pgName) - return nil + return false, nil } - return fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } if !tsoperator.ProxyGroupIsReady(pg) { - // TODO(irbekrm): we need to reconcile ProxyGroup Ingresses on ProxyGroup changes to not miss the status update - // in this case. - logger.Infof("ProxyGroup %q is not ready", pgName) - return nil + logger.Infof("ProxyGroup %q is not (yet) ready", pgName) + return false, nil } // Validate Ingress configuration - if err := a.validateIngress(ing, pg); err != nil { + if err := r.validateIngress(ctx, ing, pg); err != nil { logger.Infof("invalid Ingress configuration: %v", err) - a.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error()) - return nil + r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error()) + return false, nil } - if !IsHTTPSEnabledOnTailnet(a.tsnetServer) { - a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") + if !IsHTTPSEnabledOnTailnet(r.tsnetServer) { + r.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } + logger = logger.With("proxy-group", pg.Name) + if !slices.Contains(ing.Finalizers, FinalizerNamePG) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -157,64 +184,78 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin // multi-reconcile operation is underway. logger.Infof("exposing Ingress over tailscale") ing.Finalizers = append(ing.Finalizers, FinalizerNamePG) - if err := a.Update(ctx, ing); err != nil { - return fmt.Errorf("failed to add finalizer: %w", err) + if err := r.Update(ctx, ing); err != nil { + return false, fmt.Errorf("failed to add finalizer: %w", err) } - a.mu.Lock() - a.managedIngresses.Add(ing.UID) - gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) - a.mu.Unlock() - } - - // 1. Ensure that if Ingress' hostname has changed, any VIPService resources corresponding to the old hostname - // are cleaned up. - // In practice, this function will ensure that any VIPServices that are associated with the provided ProxyGroup - // and no longer owned by an Ingress are cleaned up. This is fine- it is not expensive and ensures that in edge - // cases (a single update changed both hostname and removed ProxyGroup annotation) the VIPService is more likely - // to be (eventually) removed. - if err := a.maybeCleanupProxyGroup(ctx, pgName, logger); err != nil { - return fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) - } - - // 2. Ensure that there isn't a VIPService with the same hostname already created and not owned by this Ingress. - // TODO(irbekrm): perhaps in future we could have record names being stored on VIPServices. I am not certain if - // there might not be edge cases (custom domains, etc?) where attempting to determine the DNS name of the - // VIPService in this way won't be incorrect. - tcd, err := a.tailnetCertDomain(ctx) + r.mu.Lock() + r.managedIngresses.Add(ing.UID) + gaugePGIngressResources.Set(int64(r.managedIngresses.Len())) + r.mu.Unlock() + } + + // 1. Ensure that if Ingress' hostname has changed, any VIPService + // resources corresponding to the old hostname are cleaned up. + // In practice, this function will ensure that any VIPServices that are + // associated with the provided ProxyGroup and no longer owned by an + // Ingress are cleaned up. This is fine- it is not expensive and ensures + // that in edge cases (a single update changed both hostname and removed + // ProxyGroup annotation) the VIPService is more likely to be + // (eventually) removed. + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) if err != nil { - return fmt.Errorf("error determining DNS name base: %w", err) + return false, fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) + } + + // 2. Ensure that there isn't a VIPService with the same hostname + // already created and not owned by this Ingress. + // TODO(irbekrm): perhaps in future we could have record names being + // stored on VIPServices. I am not certain if there might not be edge + // cases (custom domains, etc?) where attempting to determine the DNS + // name of the VIPService in this way won't be incorrect. + tcd, err := r.tailnetCertDomain(ctx) + if err != nil { + return false, fmt.Errorf("error determining DNS name base: %w", err) } dnsName := hostname + "." + tcd serviceName := tailcfg.ServiceName("svc:" + hostname) - existingVIPSvc, err := a.tsClient.GetVIPService(ctx, serviceName) - // TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore - // should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET - // here will fail. + existingVIPSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + // TODO(irbekrm): here and when creating the VIPService, verify if the + // error is not terminal (and therefore should not be reconciled). For + // example, if the hostname is already a hostname of a Tailscale node, + // the GET here will fail. if err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { - return fmt.Errorf("error getting VIPService %q: %w", hostname, err) + return false, fmt.Errorf("error getting VIPService %q: %w", hostname, err) } } - if existingVIPSvc != nil && !isVIPServiceForIngress(existingVIPSvc, ing) { - logger.Infof("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually and recreate this Ingress to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName) - a.recorder.Event(ing, corev1.EventTypeWarning, "ConflictingVIPServiceExists", fmt.Sprintf("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName)) - return nil + // Generate the VIPService comment for new or existing VIPService. This + // checks and ensures that VIPService's owner references are updated for + // this Ingress and errors if that is not possible (i.e. because it + // appears that the VIPService has been created by a non-operator + // actor). + svcComment, err := r.ownerRefsComment(existingVIPSvc) + if err != nil { + const instr = "To proceed, you can either manually delete the existing VIPService or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" + msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr) + logger.Warn(msg) + r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidVIPService", msg) + return false, nil } - // 3. Ensure that the serve config for the ProxyGroup contains the VIPService - cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) + // 3. Ensure that the serve config for the ProxyGroup contains the VIPService. + cm, cfg, err := r.proxyGroupServeConfig(ctx, pgName) if err != nil { - return fmt.Errorf("error getting Ingress serve config: %w", err) + return false, fmt.Errorf("error getting Ingress serve config: %w", err) } if cm == nil { logger.Infof("no Ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.") - return nil + return svcsChanged, nil } ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName)) - handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger) + handlers, err := handlersForIngress(ctx, ing, r.Client, r.recorder, dnsName, logger) if err != nil { - return fmt.Errorf("failed to get handlers for Ingress: %w", err) + return false, fmt.Errorf("failed to get handlers for Ingress: %w", err) } ingCfg := &ipn.ServiceConfig{ TCP: map[uint16]*ipn.TCPPortHandler{ @@ -250,16 +291,16 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin mak.Set(&cfg.Services, serviceName, ingCfg) cfgBytes, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("error marshaling serve config: %w", err) + return false, fmt.Errorf("error marshaling serve config: %w", err) } mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) - if err := a.Update(ctx, cm); err != nil { - return fmt.Errorf("error updating serve config: %w", err) + if err := r.Update(ctx, cm); err != nil { + return false, fmt.Errorf("error updating serve config: %w", err) } } // 4. Ensure that the VIPService exists and is up to date. - tags := a.defaultTags + tags := r.defaultTags if tstr, ok := ing.Annotations[AnnotationTags]; ok { tags = strings.Split(tstr, ",") } @@ -273,27 +314,32 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin Name: serviceName, Tags: tags, Ports: vipPorts, - Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID), + Comment: svcComment, } if existingVIPSvc != nil { vipSvc.Addrs = existingVIPSvc.Addrs } + // TODO(irbekrm): right now if two Ingress resources attempt to apply different VIPService configs (different + // tags, or HTTP endpoint settings) we can end up reconciling those in a loop. We should detect when an Ingress + // with the same generation number has been reconciled ~more than N times and stop attempting to apply updates. if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || - !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) { + !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) || + !strings.EqualFold(vipSvc.Comment, existingVIPSvc.Comment) { logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) - if err := a.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { - logger.Infof("error creating VIPService: %v", err) - return fmt.Errorf("error creating VIPService: %w", err) + if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { + return false, fmt.Errorf("error creating VIPService: %w", err) } } // 5. Update tailscaled's AdvertiseServices config, which should add the VIPService // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. - if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { - return fmt.Errorf("failed to update tailscaled config: %w", err) + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config: %w", err) } + // TODO(irbekrm): check that the replicas are ready to route traffic for the VIPService before updating Ingress + // status. // 6. Update Ingress status oldStatus := ing.Status.DeepCopy() ports := []networkingv1.IngressPortStatus{ @@ -315,30 +361,29 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin }, } if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) { - return nil + return svcsChanged, nil } - if err := a.Status().Update(ctx, ing); err != nil { - return fmt.Errorf("failed to update Ingress status: %w", err) + if err := r.Status().Update(ctx, ing); err != nil { + return false, fmt.Errorf("failed to update Ingress status: %w", err) } - return nil + return svcsChanged, nil } -// maybeCleanupProxyGroup ensures that if an Ingress hostname has changed, any VIPService resources created for the -// Ingress' ProxyGroup corresponding to the old hostname are cleaned up. A run of this function will ensure that any -// VIPServices that are associated with the provided ProxyGroup and no longer owned by an Ingress are cleaned up. -func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName string, logger *zap.SugaredLogger) error { +// VIPServices that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. +// Returns true if the operation resulted in existing VIPService updates (owner reference removal). +func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { // Get serve config for the ProxyGroup - cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) + cm, cfg, err := r.proxyGroupServeConfig(ctx, proxyGroupName) if err != nil { - return fmt.Errorf("getting serve config: %w", err) + return false, fmt.Errorf("getting serve config: %w", err) } if cfg == nil { - return nil // ProxyGroup does not have any VIPServices + return false, nil // ProxyGroup does not have any VIPServices } ingList := &networkingv1.IngressList{} - if err := a.List(ctx, ingList); err != nil { - return fmt.Errorf("listing Ingresses: %w", err) + if err := r.List(ctx, ingList); err != nil { + return false, fmt.Errorf("listing Ingresses: %w", err) } serveConfigChanged := false // For each VIPService in serve config... @@ -357,26 +402,21 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) // Delete the VIPService from control if necessary. - svc, err := a.tsClient.GetVIPService(ctx, vipServiceName) - if err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); !ok || errResp.Status != http.StatusNotFound { - return err - } - } + svc, _ := r.tsClient.GetVIPService(ctx, vipServiceName) if svc != nil && isVIPServiceForAnyIngress(svc) { logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) - if err := a.tsClient.DeleteVIPService(ctx, vipServiceName); err != nil { + svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) + if err != nil { errResp := &tailscale.ErrResponse{} if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { - return fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) + return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) } } } // Make sure the VIPService is not advertised in tailscaled or serve config. - if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pgName, vipServiceName, false, logger); err != nil { - return fmt.Errorf("failed to update tailscaled config services: %w", err) + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, false, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } delete(cfg.Services, vipServiceName) serveConfigChanged = true @@ -386,55 +426,67 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName if serveConfigChanged { cfgBytes, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("marshaling serve config: %w", err) + return false, fmt.Errorf("marshaling serve config: %w", err) } mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) - if err := a.Update(ctx, cm); err != nil { - return fmt.Errorf("updating serve config: %w", err) + if err := r.Update(ctx, cm); err != nil { + return false, fmt.Errorf("updating serve config: %w", err) } } - - return nil + return svcsChanged, nil } // maybeCleanup ensures that any resources, such as a VIPService created for this Ingress, are cleaned up when the -// Ingress is being deleted or is unexposed. -func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { +// Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the VIPService is only +// deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference +// corresponding to this Ingress. +func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Ingress are cleaned up") ix := slices.Index(ing.Finalizers, FinalizerNamePG) if ix < 0 { logger.Debugf("no finalizer, nothing to do") - a.mu.Lock() - defer a.mu.Unlock() - a.managedIngresses.Remove(ing.UID) - gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) - return nil + return false, nil } + logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) + + // Ensure that if cleanup succeeded Ingress finalizers are removed. + defer func() { + if err != nil { + return + } + if e := r.deleteFinalizer(ctx, ing, logger); err != nil { + err = errors.Join(err, e) + } + }() - // 1. Check if there is a VIPService created for this Ingress. + // 1. Check if there is a VIPService associated with this Ingress. pg := ing.Annotations[AnnotationProxyGroup] - cm, cfg, err := a.proxyGroupServeConfig(ctx, pg) + cm, cfg, err := r.proxyGroupServeConfig(ctx, pg) if err != nil { - return fmt.Errorf("error getting ProxyGroup serve config: %w", err) + return false, fmt.Errorf("error getting ProxyGroup serve config: %w", err) } serviceName := tailcfg.ServiceName("svc:" + hostname) + // VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not - // found in the serve config, we can assume that there is no VIPService. TODO(irbekrm): once we have ingress - // ProxyGroup, we will probably add currently exposed VIPServices to its status. At that point, we can use the - // status rather than checking the serve config each time. - if cfg == nil || cfg.Services == nil || cfg.Services[serviceName] == nil { - return nil + // found in the serve config, we can assume that there is no VIPService. (If the serve config does not exist at + // all, it is possible that the ProxyGroup has been deleted before cleaning up the Ingress, so carry on with + // cleanup). + if cfg != nil && cfg.Services != nil && cfg.Services[serviceName] == nil { + return false, nil } - logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) - // 2. Delete the VIPService. - if err := a.deleteVIPServiceIfExists(ctx, serviceName, ing, logger); err != nil { - return fmt.Errorf("error deleting VIPService: %w", err) + // 2. Clean up the VIPService resources. + svcChanged, err = r.cleanupVIPService(ctx, serviceName, logger) + if err != nil { + return false, fmt.Errorf("error deleting VIPService: %w", err) + } + if cfg == nil || cfg.Services == nil { // user probably deleted the ProxyGroup + return svcChanged, nil } // 3. Unadvertise the VIPService in tailscaled config. - if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { - return fmt.Errorf("failed to update tailscaled config services: %w", err) + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } // 4. Remove the VIPService from the serve config for the ProxyGroup. @@ -442,24 +494,13 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("error marshaling serve config: %w", err) + return false, fmt.Errorf("error marshaling serve config: %w", err) } mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) - if err := a.Update(ctx, cm); err != nil { - return fmt.Errorf("error updating ConfigMap %q: %w", cm.Name, err) - } - - if err := a.deleteFinalizer(ctx, ing, logger); err != nil { - return fmt.Errorf("failed to remove finalizer: %w", err) - } - a.mu.Lock() - defer a.mu.Unlock() - a.managedIngresses.Remove(ing.UID) - gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) - return nil + return svcChanged, r.Update(ctx, cm) } -func (a *IngressPGReconciler) deleteFinalizer(ctx context.Context, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { +func (r *HAIngressReconciler) deleteFinalizer(ctx context.Context, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { found := false ing.Finalizers = slices.DeleteFunc(ing.Finalizers, func(f string) bool { found = true @@ -470,9 +511,13 @@ func (a *IngressPGReconciler) deleteFinalizer(ctx context.Context, ing *networki } logger.Debug("ensure %q finalizer is removed", FinalizerNamePG) - if err := a.Update(ctx, ing); err != nil { + if err := r.Update(ctx, ing); err != nil { return fmt.Errorf("failed to remove finalizer %q: %w", FinalizerNamePG, err) } + r.mu.Lock() + defer r.mu.Unlock() + r.managedIngresses.Remove(ing.UID) + gaugePGIngressResources.Set(int64(r.managedIngresses.Len())) return nil } @@ -480,15 +525,15 @@ func pgIngressCMName(pg string) string { return fmt.Sprintf("%s-ingress-config", pg) } -func (a *IngressPGReconciler) proxyGroupServeConfig(ctx context.Context, pg string) (cm *corev1.ConfigMap, cfg *ipn.ServeConfig, err error) { +func (r *HAIngressReconciler) proxyGroupServeConfig(ctx context.Context, pg string) (cm *corev1.ConfigMap, cfg *ipn.ServeConfig, err error) { name := pgIngressCMName(pg) cm = &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: a.tsNamespace, + Namespace: r.tsNamespace, }, } - if err := a.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil && !apierrors.IsNotFound(err) { + if err := r.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil && !apierrors.IsNotFound(err) { return nil, nil, fmt.Errorf("error retrieving ingress serve config ConfigMap %s: %v", name, err) } if apierrors.IsNotFound(err) { @@ -508,16 +553,16 @@ type localClient interface { } // tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (a *IngressPGReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := a.lc.StatusWithoutPeers(ctx) +func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, error) { + st, err := r.lc.StatusWithoutPeers(ctx) if err != nil { return "", fmt.Errorf("error getting tailscale status: %w", err) } return st.CurrentTailnet.MagicDNSSuffix, nil } -// shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup) -func shouldExpose(ing *networkingv1.Ingress) bool { +// shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup). +func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && *ing.Spec.IngressClassName == tailscaleIngressClassName @@ -525,13 +570,6 @@ func shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func isVIPServiceForIngress(svc *tailscale.VIPService, ing *networkingv1.Ingress) bool { - if svc == nil || ing == nil { - return false - } - return strings.EqualFold(svc.Comment, fmt.Sprintf(VIPSvcOwnerRef, ing.UID)) -} - func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { if svc == nil { return false @@ -545,7 +583,7 @@ func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { // - The derived hostname is a valid DNS label // - The referenced ProxyGroup exists and is of type 'ingress' // - Ingress' TLS block is invalid -func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsapi.ProxyGroup) error { +func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networkingv1.Ingress, pg *tsapi.ProxyGroup) error { var errs []error // Validate tags if present @@ -581,30 +619,66 @@ func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsa errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) } + // It is invalid to have multiple Ingress resources for the same VIPService in one cluster. + ingList := &networkingv1.IngressList{} + if err := r.List(ctx, ingList); err != nil { + errs = append(errs, fmt.Errorf("[unexpected] error listing Ingresses: %w", err)) + return errors.Join(errs...) + } + for _, i := range ingList.Items { + if r.shouldExpose(&i) && hostnameForIngress(&i) == hostname && i.Name != ing.Name { + errs = append(errs, fmt.Errorf("found duplicate Ingress %q for hostname %q - multiple Ingresses for the same hostname in the same cluster are not allowed", i.Name, hostname)) + } + } return errors.Join(errs...) } -// deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress. -func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name tailcfg.ServiceName, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { - svc, err := a.tsClient.GetVIPService(ctx, name) +// cleanupVIPService deletes any VIPService by the provided name if it is not owned by operator instances other than this one. +// If a VIPService is found, but contains other owner references, only removes this operator's owner reference. +// If a VIPService by the given name is not found or does not contain this operator's owner reference, do nothing. +// It returns true if an existing VIPService was updated to remove owner reference, as well as any error that occurred. +func (r *HAIngressReconciler) cleanupVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, _ error) { + svc, err := r.tsClient.GetVIPService(ctx, name) if err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - return nil + return false, nil } - return fmt.Errorf("error getting VIPService: %w", err) + return false, fmt.Errorf("error getting VIPService: %w", err) } - - if !isVIPServiceForIngress(svc, ing) { - return nil + if svc == nil { + return false, nil } - + c, err := parseComment(svc) + if err != nil { + return false, fmt.Errorf("error parsing VIPService comment") + } + if c == nil || len(c.OwnerRefs) == 0 { + return false, nil + } + // Comparing with the operatorID only means that we will not be able to + // clean up VIPServices in cases where the operator was deleted from the + // cluster before deleting the Ingress. Perhaps the comparison could be + // 'if or.OperatorID === r.operatorID || or.ingressUID == r.ingressUID'. + ix := slices.IndexFunc(c.OwnerRefs, func(or OwnerRef) bool { + return or.OperatorID == r.operatorID + }) + if ix == -1 { + return false, nil + } + if len(c.OwnerRefs) == 1 { + logger.Infof("Deleting VIPService %q", name) + return false, r.tsClient.DeleteVIPService(ctx, name) + } + c.OwnerRefs = slices.Delete(c.OwnerRefs, ix, ix+1) logger.Infof("Deleting VIPService %q", name) - if err = a.tsClient.DeleteVIPService(ctx, name); err != nil { - return fmt.Errorf("error deleting VIPService: %w", err) + json, err := json.Marshal(c) + if err != nil { + return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err) } - return nil + svc.Comment = string(json) + return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) } // isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet. @@ -615,7 +689,7 @@ func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { return ing.Annotations[annotationHTTPEndpoint] == "enabled" } -func (a *IngressPGReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { +func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { logger.Debugf("Updating ProxyGroup tailscaled configs to advertise service %q: %v", serviceName, shouldBeAdvertised) // Get all config Secrets for this ProxyGroup. @@ -665,3 +739,75 @@ func (a *IngressPGReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } + +// OwnerRef is an owner reference that uniquely identifies a Tailscale +// Kubernetes operator instance. +type OwnerRef struct { + // OperatorID is the stable ID of the operator's Tailscale device. + OperatorID string `json:"operatorID,omitempty"` +} + +// comment is the content of the VIPService.Comment field. +type comment struct { + // OwnerRefs is a list of owner references that identify all operator + // instances that manage this VIPService. + OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"` +} + +// ownerRefsComment return VIPService Comment that includes owner reference for this +// operator instance for the provided VIPService. If the VIPService is nil, a +// new comment with owner ref is returned. If the VIPService is not nil, the +// existing comment is returned with the owner reference added, if not already +// present. If the VIPService is not nil, but does not contain a comment we +// return an error as this likely means that the VIPService was created by +// somthing other than a Tailscale Kubernetes operator. +func (r *HAIngressReconciler) ownerRefsComment(svc *tailscale.VIPService) (string, error) { + ref := OwnerRef{ + OperatorID: r.operatorID, + } + if svc == nil { + c := &comment{OwnerRefs: []OwnerRef{ref}} + json, err := json.Marshal(c) + if err != nil { + return "", fmt.Errorf("[unexpected] unable to marshal VIPService comment contents: %w, please report this", err) + } + return string(json), nil + } + c, err := parseComment(svc) + if err != nil { + return "", fmt.Errorf("error parsing existing VIPService comment: %w", err) + } + if c == nil || len(c.OwnerRefs) == 0 { + return "", fmt.Errorf("VIPService %s exists, but does not contain Comment field with owner references- not proceeding as this is likely a resource created by something other than a Tailscale Kubernetes Operator", svc.Name) + } + if slices.Contains(c.OwnerRefs, ref) { // up to date + return svc.Comment, nil + } + c.OwnerRefs = append(c.OwnerRefs, ref) + json, err := json.Marshal(c) + if err != nil { + return "", fmt.Errorf("error marshalling updated owner references: %w", err) + } + return string(json), nil +} + +// parseComment returns VIPService comment or nil if none found or not matching the expected format. +func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { + if vipSvc.Comment == "" { + return nil, nil + } + c := &comment{} + if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { + return nil, fmt.Errorf("error parsing VIPService Comment field %q: %w", vipSvc.Comment, err) + } + return c, nil +} + +// requeueInterval returns a time duration between 5 and 10 minutes, which is +// the period of time after which an HA Ingress, whose VIPService has been newly +// created or changed, needs to be requeued. This is to protect against +// VIPService owner references being overwritten as a result of concurrent +// updates during multi-clutster Ingress create/update operations. +func requeueInterval() time.Duration { + return time.Duration(rand.N(5)+5) * time.Minute +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 8c4ffb691..7a995e169 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" @@ -190,6 +191,15 @@ func TestValidateIngress(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-ingress", Namespace: "default", + Annotations: map[string]string{ + AnnotationProxyGroup: "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test"}}, + }, }, } @@ -213,10 +223,11 @@ func TestValidateIngress(t *testing.T) { } tests := []struct { - name string - ing *networkingv1.Ingress - pg *tsapi.ProxyGroup - wantErr string + name string + ing *networkingv1.Ingress + pg *tsapi.ProxyGroup + existingIngs []networkingv1.Ingress + wantErr string }{ { name: "valid_ingress_with_hostname", @@ -306,12 +317,38 @@ func TestValidateIngress(t *testing.T) { }, wantErr: "ProxyGroup \"test-pg\" is not ready", }, + { + name: "duplicate_hostname", + ing: baseIngress, + pg: readyProxyGroup, + existingIngs: []networkingv1.Ingress{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-ingress", + Namespace: "default", + Annotations: map[string]string{ + AnnotationProxyGroup: "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test"}}, + }, + }, + }}, + wantErr: `found duplicate Ingress "existing-ingress" for hostname "test" - multiple Ingresses for the same hostname in the same cluster are not allowed`, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &IngressPGReconciler{} - err := r.validateIngress(tt.ing, tt.pg) + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(tt.ing). + WithLists(&networkingv1.IngressList{Items: tt.existingIngs}). + Build() + r := &HAIngressReconciler{Client: fc} + err := r.validateIngress(context.Background(), tt.ing, tt.pg) if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) { t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr) } @@ -493,8 +530,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []s }) } -func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeTSClient) { - t.Helper() +func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) { tsIngressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, @@ -552,9 +588,9 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT if err := fc.Status().Update(context.Background(), pg); err != nil { t.Fatal(err) } + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} ft := &fakeTSClient{} - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} zl, err := zap.NewDevelopment() if err != nil { t.Fatal(err) @@ -568,12 +604,12 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT }, } - ingPGR := &IngressPGReconciler{ + ingPGR := &HAIngressReconciler{ Client: fc, tsClient: ft, - tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", + tsnetServer: fakeTsnetServer, logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), lc: lc, @@ -581,3 +617,87 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT return ingPGR, fc, ft } + +func TestIngressPGReconciler_MultiCluster(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + ingPGR.operatorID = "operator-1" + + // Create initial Ingress + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + mustCreate(t, fc, ing) + + // Simulate existing VIPService from another cluster + existingVIPSvc := &tailscale.VIPService{ + Name: "svc:my-svc", + Comment: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, + } + ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{ + "svc:my-svc": existingVIPSvc, + } + + // Verify reconciliation adds our operator reference + expectReconciled(t, ingPGR, "default", "test-ingress") + + vipSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + if err != nil { + t.Fatalf("getting VIPService: %v", err) + } + if vipSvc == nil { + t.Fatal("VIPService not found") + } + + c := &comment{} + if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { + t.Fatalf("parsing comment: %v", err) + } + + wantOwnerRefs := []OwnerRef{ + {OperatorID: "operator-2"}, + {OperatorID: "operator-1"}, + } + if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs) + } + + // Delete the Ingress and verify VIPService still exists with one owner ref + if err := fc.Delete(context.Background(), ing); err != nil { + t.Fatalf("deleting Ingress: %v", err) + } + expectRequeue(t, ingPGR, "default", "test-ingress") + + vipSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") + if err != nil { + t.Fatalf("getting VIPService after deletion: %v", err) + } + if vipSvc == nil { + t.Fatal("VIPService was incorrectly deleted") + } + + c = &comment{} + if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { + t.Fatalf("parsing comment after deletion: %v", err) + } + + wantOwnerRefs = []OwnerRef{ + {OperatorID: "operator-2"}, + } + if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs) + } +} diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 7cadaecc4..8c19a5e05 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -73,6 +73,7 @@ func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{}, fmt.Errorf("failed to get ing: %w", err) } if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) { + // TODO(irbekrm): this message is confusing if the Ingress is an HA Ingress logger.Debugf("ingress is being deleted or should not be exposed, cleaning up") return reconcile.Result{}, a.maybeCleanup(ctx, logger, ing) } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 37e37a96e..1dcd130fb 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -9,6 +9,7 @@ package main import ( "context" + "fmt" "net/http" "os" "regexp" @@ -39,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -335,6 +337,10 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not get local client: %v", err) } + id, err := id(context.Background(), lc) + if err != nil { + startlog.Fatalf("error determining stable ID of the operator's Tailscale device: %v", err) + } ingressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(ingressesFromIngressProxyGroup(mgr.GetClient(), opts.log)) err = builder. ControllerManagedBy(mgr). @@ -342,7 +348,7 @@ func runReconcilers(opts reconcilerOpts) { Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). - Complete(&IngressPGReconciler{ + Complete(&HAIngressReconciler{ recorder: eventRecorder, tsClient: opts.tsClient, tsnetServer: opts.tsServer, @@ -350,6 +356,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), logger: opts.log.Named("ingress-pg-reconciler"), lc: lc, + operatorID: id, tsNamespace: opts.tailscaleNamespace, }) if err != nil { @@ -1262,3 +1269,14 @@ func hasProxyGroupAnnotation(obj client.Object) bool { ing := obj.(*networkingv1.Ingress) return ing.Annotations[AnnotationProxyGroup] != "" } + +func id(ctx context.Context, lc *local.Client) (string, error) { + st, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return "", fmt.Errorf("error getting tailscale status: %w", err) + } + if st.Self == nil { + return "", fmt.Errorf("unexpected: device's status does not contain node's metadata") + } + return string(st.Self.ID), nil +} From dd7166cb8e12261eafd43a06cd4ee31a7356d016 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 18:57:14 -0800 Subject: [PATCH 0568/1708] util/eventbus: add internal hook type for debugging Publicly exposed debugging functions will use these hooks to observe dataflow in the bus. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/debug.go | 62 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 util/eventbus/debug.go diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go new file mode 100644 index 000000000..912fe7623 --- /dev/null +++ b/util/eventbus/debug.go @@ -0,0 +1,62 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "slices" + "sync" + "sync/atomic" +) + +// A hook collects hook functions that can be run as a group. +type hook[T any] struct { + sync.Mutex + fns []hookFn[T] +} + +var hookID atomic.Uint64 + +// add registers fn to be called when the hook is run. Returns an +// unregistration function that removes fn from the hook when called. +// +//lint:ignore U1000 Not used yet, but will be in an upcoming change +func (h *hook[T]) add(fn func(T)) (remove func()) { + id := hookID.Add(1) + h.Lock() + defer h.Unlock() + h.fns = append(h.fns, hookFn[T]{id, fn}) + return func() { h.remove(id) } +} + +// remove removes the function with the given ID from the hook. +// +//lint:ignore U1000 Not used yet, but will be in an upcoming change +func (h *hook[T]) remove(id uint64) { + h.Lock() + defer h.Unlock() + h.fns = slices.DeleteFunc(h.fns, func(f hookFn[T]) bool { return f.ID == id }) +} + +// active reports whether any functions are registered with the +// hook. This can be used to skip expensive work when the hook is +// inactive. +func (h *hook[T]) active() bool { + h.Lock() + defer h.Unlock() + return len(h.fns) > 0 +} + +// run calls all registered functions with the value v. +func (h *hook[T]) run(v T) { + h.Lock() + defer h.Unlock() + for _, fn := range h.fns { + fn.Fn(v) + } +} + +type hookFn[T any] struct { + ID uint64 + Fn func(T) +} From e80d2b4ad1e427c7700264a05d4bc8a6d95e29d7 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 19:37:03 -0800 Subject: [PATCH 0569/1708] util/eventbus: add debug hooks to snoop on bus traffic Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 45 ++++++++++++++++++++++++++++---------- util/eventbus/client.go | 5 +++-- util/eventbus/publish.go | 12 ++-------- util/eventbus/subscribe.go | 22 ++++++++++++++----- 4 files changed, 56 insertions(+), 28 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index b479f3940..a9b6f0dec 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -8,17 +8,28 @@ import ( "reflect" "slices" "sync" - "time" "tailscale.com/util/set" ) +type publishedEvent struct { + Event any + From *Client +} + +type routedEvent struct { + Event any + From *Client + To []*Client +} + // Bus is an event bus that distributes published events to interested // subscribers. type Bus struct { - router *worker - write chan publishedEvent - snapshot chan chan []publishedEvent + router *worker + write chan publishedEvent + snapshot chan chan []publishedEvent + routeDebug hook[routedEvent] topicsMu sync.Mutex // guards everything below. topics map[reflect.Type][]*subscribeState @@ -94,13 +105,23 @@ func (b *Bus) pump(ctx context.Context) { for !vals.Empty() { val := vals.Peek() dests := b.dest(reflect.ValueOf(val.Event).Type()) - routed := time.Now() + + if b.routeDebug.active() { + clients := make([]*Client, len(dests)) + for i := range len(dests) { + clients[i] = dests[i].client + } + b.routeDebug.run(routedEvent{ + Event: val.Event, + From: val.From, + To: clients, + }) + } + for _, d := range dests { evt := queuedEvent{ - Event: val.Event, - From: val.From, - Published: val.Published, - Routed: routed, + Event: val.Event, + From: val.From, } deliverOne: for { @@ -113,6 +134,7 @@ func (b *Bus) pump(ctx context.Context) { break deliverOne case in := <-acceptCh(): vals.Add(in) + in.From.publishDebug.run(in) case <-ctx.Done(): return case ch := <-b.snapshot: @@ -129,8 +151,9 @@ func (b *Bus) pump(ctx context.Context) { select { case <-ctx.Done(): return - case val := <-b.write: - vals.Add(val) + case in := <-b.write: + vals.Add(in) + in.From.publishDebug.run(in) case ch := <-b.snapshot: ch <- nil } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 174cc5ea5..17f7e8608 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -17,8 +17,9 @@ import ( // Subscribers that share the same client receive events one at a // time, in the order they were published. type Client struct { - name string - bus *Bus + name string + bus *Bus + publishDebug hook[publishedEvent] mu sync.Mutex pub set.Set[publisher] diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index fdabdcb23..b228708ac 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -5,15 +5,8 @@ package eventbus import ( "reflect" - "time" ) -type publishedEvent struct { - Event any - From *Client - Published time.Time -} - // publisher is a uniformly typed wrapper around Publisher[T], so that // debugging facilities can look at active publishers. type publisher interface { @@ -60,9 +53,8 @@ func (p *Publisher[T]) Publish(v T) { } evt := publishedEvent{ - Event: v, - From: p.client, - Published: time.Now(), + Event: v, + From: p.client, } select { diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 71201aa40..c38949d9d 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,14 +8,17 @@ import ( "fmt" "reflect" "sync" - "time" ) +type deliveredEvent struct { + Event any + From *Client + To *Client +} + type queuedEvent struct { - Event any - From *Client - Published time.Time - Routed time.Time + Event any + From *Client } // subscriber is a uniformly typed wrapper around Subscriber[T], so @@ -46,6 +49,7 @@ type subscribeState struct { dispatcher *worker write chan queuedEvent snapshot chan chan []queuedEvent + debug hook[deliveredEvent] outputsMu sync.Mutex outputs map[reflect.Type]subscriber @@ -82,6 +86,14 @@ func (q *subscribeState) pump(ctx context.Context) { if !sub.dispatch(ctx, &vals, acceptCh) { return } + + if q.debug.active() { + q.debug.run(deliveredEvent{ + Event: val.Event, + From: val.From, + To: q.client, + }) + } } else { // Keep the cases in this select in sync with // Subscriber.dispatch below. The only different should be From 7fac0175c08565076f92b9ae4d2742dc8abda9af Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 4 Mar 2025 13:41:12 -0800 Subject: [PATCH 0570/1708] cmd/derper, derp/derphttp: support, generate self-signed IP address certs For people who can't use LetsEncrypt because it's banned. Per https://github.com/tailscale/tailscale/issues/11776#issuecomment-2520955317 This does two things: 1) if you run derper with --certmode=manual and --hostname=$IP_ADDRESS we previously permitted, but now we also: * auto-generate the self-signed cert for you if it doesn't yet exist on disk * print out the derpmap configuration you need to use that self-signed cert 2) teaches derp/derphttp's derp dialer to verify the signature of self-signed TLS certs, if so declared in the existing DERPNode.CertName field, which previously existed for domain fronting, separating out the dial hostname from how certs are validates, so it's not overloaded much; that's what it was meant for. Fixes #11776 Change-Id: Ie72d12f209416bb7e8325fe0838cd2c66342c5cf Signed-off-by: Brad Fitzpatrick --- cmd/derper/cert.go | 102 ++++++++++++++++++++++++++++++- cmd/derper/cert_test.go | 73 ++++++++++++++++++++++ derp/derphttp/derphttp_client.go | 20 +++++- net/tlsdial/tlsdial.go | 41 +++++++++++++ tailcfg/derpmap.go | 6 ++ 5 files changed, 238 insertions(+), 4 deletions(-) diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index 623fa376f..b95755c64 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -4,16 +4,28 @@ package main import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "encoding/pem" "errors" "fmt" + "log" + "math/big" "net" "net/http" + "os" "path/filepath" "regexp" + "time" "golang.org/x/crypto/acme/autocert" + "tailscale.com/tailcfg" ) var unsafeHostnameCharacters = regexp.MustCompile(`[^a-zA-Z0-9-\.]`) @@ -65,8 +77,18 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) { crtPath := filepath.Join(certdir, keyname+".crt") keyPath := filepath.Join(certdir, keyname+".key") cert, err := tls.LoadX509KeyPair(crtPath, keyPath) + hostnameIP := net.ParseIP(hostname) // or nil if hostname isn't an IP address if err != nil { - return nil, fmt.Errorf("can not load x509 key pair for hostname %q: %w", keyname, err) + // If the hostname is an IP address, automatically create a + // self-signed certificate for it. + var certp *tls.Certificate + if os.IsNotExist(err) && hostnameIP != nil { + certp, err = createSelfSignedIPCert(crtPath, keyPath, hostname) + } + if err != nil { + return nil, fmt.Errorf("can not load x509 key pair for hostname %q: %w", keyname, err) + } + cert = *certp } // ensure hostname matches with the certificate x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) @@ -76,6 +98,18 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) { if err := x509Cert.VerifyHostname(hostname); err != nil { return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err) } + if hostnameIP != nil { + // If the hostname is an IP address, print out information on how to + // confgure this in the derpmap. + dn := &tailcfg.DERPNode{ + Name: "custom", + RegionID: 900, + HostName: hostname, + CertName: fmt.Sprintf("sha256-raw:%-02x", sha256.Sum256(x509Cert.Raw)), + } + dnJSON, _ := json.Marshal(dn) + log.Printf("Using self-signed certificate for IP address %q. Configure it in DERPMap using: (https://tailscale.com/s/custom-derp)\n %s", hostname, dnJSON) + } return &manualCertManager{ cert: &cert, hostname: hostname, @@ -109,3 +143,69 @@ func (m *manualCertManager) getCertificate(hi *tls.ClientHelloInfo) (*tls.Certif func (m *manualCertManager) HTTPHandler(fallback http.Handler) http.Handler { return fallback } + +func createSelfSignedIPCert(crtPath, keyPath, ipStr string) (*tls.Certificate, error) { + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid IP address: %s", ipStr) + } + + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed to generate EC private key: %v", err) + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, fmt.Errorf("failed to generate serial number: %v", err) + } + + now := time.Now() + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: ipStr, + }, + NotBefore: now, + NotAfter: now.AddDate(1, 0, 0), // expires in 1 year; a bit over that is rejected by macOS etc + + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + // Set the IP as a SAN. + template.IPAddresses = []net.IP{ip} + + // Create the self-signed certificate. + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %v", err) + } + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + + keyBytes, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("unable to marshal EC private key: %v", err) + } + + keyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}) + + if err := os.MkdirAll(filepath.Dir(crtPath), 0700); err != nil { + return nil, fmt.Errorf("failed to create directory for certificate: %v", err) + } + if err := os.WriteFile(crtPath, certPEM, 0644); err != nil { + return nil, fmt.Errorf("failed to write certificate to %s: %v", crtPath, err) + } + if err := os.WriteFile(keyPath, keyPEM, 0600); err != nil { + return nil, fmt.Errorf("failed to write key to %s: %v", keyPath, err) + } + + tlsCert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + return nil, fmt.Errorf("failed to create tls.Certificate: %v", err) + } + return &tlsCert, nil +} diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index a379e5c04..2ec7b756e 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -4,19 +4,29 @@ package main import ( + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "crypto/sha256" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "fmt" "math/big" "net" + "net/http" "os" "path/filepath" "testing" "time" + + "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/net/netmon" + "tailscale.com/tailcfg" + "tailscale.com/types/key" ) // Verify that in --certmode=manual mode, we can use a bare IP address @@ -95,3 +105,66 @@ func TestCertIP(t *testing.T) { t.Fatalf("GetCertificate returned nil") } } + +// Test that we can dial a raw IP without using a hostname and without a WebPKI +// cert, validating the cert against the signature of the cert in the DERP map's +// DERPNode. +// +// See https://github.com/tailscale/tailscale/issues/11776. +func TestPinnedCertRawIP(t *testing.T) { + td := t.TempDir() + cp, err := NewManualCertManager(td, "127.0.0.1") + if err != nil { + t.Fatalf("NewManualCertManager: %v", err) + } + + cert, err := cp.TLSConfig().GetCertificate(&tls.ClientHelloInfo{ + ServerName: "127.0.0.1", + }) + if err != nil { + t.Fatalf("GetCertificate: %v", err) + } + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer ln.Close() + + ds := derp.NewServer(key.NewNode(), t.Logf) + + derpHandler := derphttp.Handler(ds) + mux := http.NewServeMux() + mux.Handle("/derp", derpHandler) + + var hs http.Server + hs.Handler = mux + hs.TLSConfig = cp.TLSConfig() + go hs.ServeTLS(ln, "", "") + + lnPort := ln.Addr().(*net.TCPAddr).Port + + reg := &tailcfg.DERPRegion{ + RegionID: 900, + Nodes: []*tailcfg.DERPNode{ + { + RegionID: 900, + HostName: "127.0.0.1", + CertName: fmt.Sprintf("sha256-raw:%-02x", sha256.Sum256(cert.Leaf.Raw)), + DERPPort: lnPort, + }, + }, + } + + netMon := netmon.NewStatic() + dc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + return reg + }) + defer dc.Close() + + _, connClose, _, err := dc.DialRegionTLS(context.Background(), reg) + if err != nil { + t.Fatalf("DialRegionTLS: %v", err) + } + defer connClose.Close() +} diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 7387b60b4..319c02429 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -652,7 +652,11 @@ func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { tlsConf.VerifyConnection = nil } if node.CertName != "" { - tlsdial.SetConfigExpectedCert(tlsConf, node.CertName) + if suf, ok := strings.CutPrefix(node.CertName, "sha256-raw:"); ok { + tlsdial.SetConfigExpectedCertHash(tlsConf, suf) + } else { + tlsdial.SetConfigExpectedCert(tlsConf, node.CertName) + } } } return tls.Client(nc, tlsConf) @@ -666,7 +670,7 @@ func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { func (c *Client) DialRegionTLS(ctx context.Context, reg *tailcfg.DERPRegion) (tlsConn *tls.Conn, connClose io.Closer, node *tailcfg.DERPNode, err error) { tcpConn, node, err := c.dialRegion(ctx, reg) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, fmt.Errorf("dialRegion(%d): %w", reg.RegionID, err) } done := make(chan bool) // unbuffered defer close(done) @@ -741,6 +745,17 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e nwait := 0 startDial := func(dstPrimary, proto string) { + dst := cmp.Or(dstPrimary, n.HostName) + + // If dialing an IP address directly, check its address family + // and bail out before incrementing nwait. + if ip, err := netip.ParseAddr(dst); err == nil { + if proto == "tcp4" && ip.Is6() || + proto == "tcp6" && ip.Is4() { + return + } + } + nwait++ go func() { if proto == "tcp4" && c.preferIPv6() { @@ -755,7 +770,6 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e // Start v4 dial } } - dst := cmp.Or(dstPrimary, n.HostName) port := "443" if !c.useHTTPS() { port = "3340" diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 2af87bd02..4d22383ef 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -12,6 +12,7 @@ package tlsdial import ( "bytes" "context" + "crypto/sha256" "crypto/tls" "crypto/x509" "errors" @@ -246,6 +247,46 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { } } +// SetConfigExpectedCertHash configures c's VerifyPeerCertificate function +// to require that exactly 1 cert is presented, and that the hex of its SHA256 hash +// is equal to wantFullCertSHA256Hex and that it's a valid cert for c.ServerName. +func SetConfigExpectedCertHash(c *tls.Config, wantFullCertSHA256Hex string) { + if c.VerifyPeerCertificate != nil { + panic("refusing to override tls.Config.VerifyPeerCertificate") + } + // Set InsecureSkipVerify to prevent crypto/tls from doing its + // own cert verification, but do the same work that it'd do + // (but using certDNSName) in the VerifyPeerCertificate hook. + c.InsecureSkipVerify = true + c.VerifyConnection = nil + c.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error { + if len(rawCerts) == 0 { + return errors.New("no certs presented") + } + if len(rawCerts) > 1 { + return errors.New("unexpected multiple certs presented") + } + if fmt.Sprintf("%02x", sha256.Sum256(rawCerts[0])) != wantFullCertSHA256Hex { + return fmt.Errorf("cert hash does not match expected cert hash") + } + cert, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return fmt.Errorf("ParseCertificate: %w", err) + } + if err := cert.VerifyHostname(c.ServerName); err != nil { + return fmt.Errorf("cert does not match server name %q: %w", c.ServerName, err) + } + now := time.Now() + if now.After(cert.NotAfter) { + return fmt.Errorf("cert expired %v", cert.NotAfter) + } + if now.Before(cert.NotBefore) { + return fmt.Errorf("cert not yet valid until %v; is your clock correct?", cert.NotBefore) + } + return nil + } +} + // NewTransport returns a new HTTP transport that verifies TLS certs using this // package, including its baked-in LetsEncrypt fallback roots. func NewTransport() *http.Transport { diff --git a/tailcfg/derpmap.go b/tailcfg/derpmap.go index 056152157..b3e54983f 100644 --- a/tailcfg/derpmap.go +++ b/tailcfg/derpmap.go @@ -139,6 +139,12 @@ type DERPNode struct { // name. If empty, HostName is used. If CertName is non-empty, // HostName is only used for the TCP dial (if IPv4/IPv6 are // not present) + TLS ClientHello. + // + // As a special case, if CertName starts with "sha256-raw:", + // then the rest of the string is a hex-encoded SHA256 of the + // cert to expect. This is used for self-signed certs. + // In this case, the HostName field will typically be an IP + // address literal. CertName string `json:",omitempty"` // IPv4 optionally forces an IPv4 address to use, instead of using DNS. From 75a03fc71903b6e161af5ec2fb135df99e85bd23 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 7 Feb 2025 19:45:20 -0800 Subject: [PATCH 0571/1708] wgengine/magicsock: use learned DERP route as send path of last resort If we get a packet in over some DERP and don't otherwise know how to reply (no known DERP home or UDP endpoint), this makes us use the DERP connection on which we received the packet to reply. This will almost always be our own home DERP region. This is particularly useful for large one-way nodes (such as hello.ts.net) that don't actively reach out to other nodes, so don't need to be told the DERP home of peers. They can instead learn the DERP home upon getting the first connection. This can also help nodes from a slow or misbehaving control plane. Updates tailscale/corp#26438 Change-Id: I6241ec92828bf45982e0eb83ad5c7404df5968bc Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 2 ++ control/controlclient/map.go | 3 +++ tstest/integration/nat/nat_test.go | 36 ++++++++++++++++++++++++++++++ wgengine/magicsock/derp.go | 24 ++++++++++++++++++-- wgengine/magicsock/endpoint.go | 10 ++++++++- 5 files changed, 72 insertions(+), 3 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 883a1a587..e7d1d25f8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1255,6 +1255,7 @@ type devKnobs struct { DumpNetMapsVerbose func() bool ForceProxyDNS func() bool StripEndpoints func() bool // strip endpoints from control (only use disco messages) + StripHomeDERP func() bool // strip Home DERP from control StripCaps func() bool // strip all local node's control-provided capabilities } @@ -1266,6 +1267,7 @@ func initDevKnob() devKnobs { DumpRegister: envknob.RegisterBool("TS_DEBUG_REGISTER"), ForceProxyDNS: envknob.RegisterBool("TS_DEBUG_PROXY_DNS"), StripEndpoints: envknob.RegisterBool("TS_DEBUG_STRIP_ENDPOINTS"), + StripHomeDERP: envknob.RegisterBool("TS_DEBUG_STRIP_HOME_DERP"), StripCaps: envknob.RegisterBool("TS_DEBUG_STRIP_CAPS"), } } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index df2182c8b..769c8f1e3 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -240,6 +240,9 @@ func upgradeNode(n *tailcfg.Node) { } n.LegacyDERPString = "" } + if DevKnob.StripHomeDERP() { + n.HomeDERP = 0 + } if n.AllowedIPs == nil { n.AllowedIPs = slices.Clone(n.Addresses) diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 9f77d31e9..15f126985 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -236,6 +236,22 @@ func hard(c *vnet.Config) *vnet.Node { fmt.Sprintf("10.0.%d.1/24", n), vnet.HardNAT)) } +func hardNoDERPOrEndoints(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("10.0.%d.1/24", n), vnet.HardNAT), + vnet.TailscaledEnv{ + Key: "TS_DEBUG_STRIP_ENDPOINTS", + Value: "1", + }, + vnet.TailscaledEnv{ + Key: "TS_DEBUG_STRIP_HOME_DERP", + Value: "1", + }, + ) +} + func hardPMP(c *vnet.Config) *vnet.Node { n := c.NumNodes() + 1 return c.AddNode(c.AddNetwork( @@ -510,6 +526,26 @@ func TestEasyEasy(t *testing.T) { nt.want(routeDirect) } +// Issue tailscale/corp#26438: use learned DERP route as send path of last +// resort +// +// See (*magicsock.Conn).fallbackDERPRegionForPeer and its comment for +// background. +// +// This sets up a test with two nodes that must use DERP to communicate but the +// target of the ping (the second node) additionally is not getting DERP or +// Endpoint updates from the control plane. (Or rather, it's getting them but is +// configured to scrub them right when they come off the network before being +// processed) This then tests whether node2, upon receiving a packet, will be +// able to reply to node1 since it knows neither node1's endpoints nor its home +// DERP. The only reply route it can use is that fact that it just received a +// packet over a particular DERP from that peer. +func TestFallbackDERPRegionForPeer(t *testing.T) { + nt := newNatTest(t) + nt.runTest(hard, hardNoDERPOrEndoints) + nt.want(routeDERP) +} + func TestSingleJustIPv6(t *testing.T) { nt := newNatTest(t) nt.runTest(just6) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 7c8ffc01a..ffdff14a1 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -64,10 +64,30 @@ func (c *Conn) removeDerpPeerRoute(peer key.NodePublic, regionID int, dc *derpht // addDerpPeerRoute adds a DERP route entry, noting that peer was seen // on DERP node derpID, at least on the connection identified by dc. // See issue 150 for details. -func (c *Conn) addDerpPeerRoute(peer key.NodePublic, derpID int, dc *derphttp.Client) { +func (c *Conn) addDerpPeerRoute(peer key.NodePublic, regionID int, dc *derphttp.Client) { c.mu.Lock() defer c.mu.Unlock() - mak.Set(&c.derpRoute, peer, derpRoute{derpID, dc}) + mak.Set(&c.derpRoute, peer, derpRoute{regionID, dc}) +} + +// fallbackDERPRegionForPeer returns the DERP region ID we might be able to use +// to contact peer, learned from observing recent DERP traffic from them. +// +// This is used as a fallback when a peer receives a packet from a peer +// over DERP but doesn't known that peer's home DERP or any UDP endpoints. +// This is particularly useful for large one-way nodes (such as hello.ts.net) +// that don't actively reach out to other nodes, so don't need to be told +// the DERP home of peers. They can instead learn the DERP home upon getting the +// first connection. +// +// This can also help nodes from a slow or misbehaving control plane. +func (c *Conn) fallbackDERPRegionForPeer(peer key.NodePublic) (regionID int) { + c.mu.Lock() + defer c.mu.Unlock() + if dr, ok := c.derpRoute[peer]; ok { + return dr.regionID + } + return 0 } // activeDerp contains fields for an active DERP connection. diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 7780c7db6..0c48acddf 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -948,7 +948,15 @@ func (de *endpoint) send(buffs [][]byte) error { de.mu.Unlock() if !udpAddr.IsValid() && !derpAddr.IsValid() { - return errNoUDPOrDERP + // Make a last ditch effort to see if we have a DERP route for them. If + // they contacted us over DERP and we don't know their UDP endpoints or + // their DERP home, we can at least assume they're reachable over the + // DERP they used to contact us. + if rid := de.c.fallbackDERPRegionForPeer(de.publicKey); rid != 0 { + derpAddr = netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(rid)) + } else { + return errNoUDPOrDERP + } } var err error if udpAddr.IsValid() { From a4b8c24834e4cd386d633a85d6df05de35c4d023 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 7 Mar 2025 12:50:15 -0500 Subject: [PATCH 0572/1708] ipn: sort VIP services before hashing (#15035) We're computing the list of services to hash by iterating over the values of a map, the ordering of which is not guaranteed. This can cause the hash to fluctuate depending on the ordering if there's more than one service hosted by the same host. Updates tailscale/corp#25733. Signed-off-by: Naman Sood --- ipn/ipnlocal/local.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 1ce299371..e9f263996 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -8238,7 +8238,14 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf services[sn].Active = true } - return slicesx.MapValues(services) + servicesList := slicesx.MapValues(services) + // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll + // be hashing a representation of this list later we want it to be in a consistent + // order. + slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name.String(), b.Name.String()) + }) + return servicesList } var ( From 5177fd2ccb4bb39f38efc01673d75186b5030181 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 6 Mar 2025 21:00:18 -0500 Subject: [PATCH 0573/1708] net/portmapper: retry UPnP when we get an "Invalid Args" We previously retried getting a UPnP mapping when the device returned error code 725, "OnlyPermanentLeasesSupported". However, we've seen devices in the wild also return 402, "Invalid Args", when given a lease duration. Fall back to the no-duration mapping method in these cases. Updates #15223 Signed-off-by: Andrew Dunham Change-Id: I6a25007c9eeac0dac83750dd3ae9bfcc287c8fcf --- net/portmapper/upnp.go | 5 +- net/portmapper/upnp_test.go | 107 ++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 2 deletions(-) diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index f1199f0a6..134183135 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -610,8 +610,9 @@ func (c *Client) tryUPnPPortmapWithDevice( } // From the UPnP spec: http://upnp.org/specs/gw/UPnP-gw-WANIPConnection-v2-Service.pdf + // 402: Invalid Args (see: https://github.com/tailscale/tailscale/issues/15223) // 725: OnlyPermanentLeasesSupported - if ok && code == 725 { + if ok && (code == 402 || code == 725) { newPort, err = addAnyPortMapping( ctx, client, @@ -620,7 +621,7 @@ func (c *Client) tryUPnPPortmapWithDevice( internal.Addr().String(), 0, // permanent ) - c.vlogf("addAnyPortMapping: 725 retry %v, err=%q", newPort, err) + c.vlogf("addAnyPortMapping: errcode=%d retried: port=%v err=%v", code, newPort, err) } } if err != nil { diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index c41b535a5..0c296813f 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -628,6 +628,96 @@ func TestGetUPnPPortMapping(t *testing.T) { } } +func TestGetUPnPPortMapping_LeaseDuration(t *testing.T) { + testCases := []struct { + name string + resp string + }{ + {"only_permanent_leases", testAddPortMappingPermanentLease}, + {"invalid_args", testAddPortMappingPermanentLease_InvalidArgs}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + // This is a very basic fake UPnP server handler. + var sawRequestWithLease atomic.Bool + handlers := map[string]any{ + "AddPortMapping": func(body []byte) (int, string) { + // Decode a minimal body to determine whether we skip the request or not. + var req struct { + Protocol string `xml:"NewProtocol"` + InternalPort string `xml:"NewInternalPort"` + ExternalPort string `xml:"NewExternalPort"` + InternalClient string `xml:"NewInternalClient"` + LeaseDuration string `xml:"NewLeaseDuration"` + } + if err := xml.Unmarshal(body, &req); err != nil { + t.Errorf("bad request: %v", err) + return http.StatusBadRequest, "bad request" + } + + if req.Protocol != "UDP" { + t.Errorf(`got Protocol=%q, want "UDP"`, req.Protocol) + } + if req.LeaseDuration != "0" { + // Return a fake error to ensure that we fall back to a permanent lease. + sawRequestWithLease.Store(true) + return http.StatusOK, tc.resp + } + + return http.StatusOK, testAddPortMappingResponse + }, + "GetExternalIPAddress": testGetExternalIPAddressResponse, + "GetStatusInfo": testGetStatusInfoResponse, + "DeletePortMapping": "", // Do nothing for test + } + + igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + if err != nil { + t.Fatal(err) + } + defer igd.Close() + + igd.SetUPnPHandler(&upnpServer{ + t: t, + Desc: testRootDesc, + Control: map[string]map[string]any{ + "/ctl/IPConn": handlers, + "/upnp/control/yomkmsnooi/wanipconn-1": handlers, + }, + }) + + ctx := context.Background() + c := newTestClient(t, igd) + c.debug.VerboseLogs = true + t.Logf("Listening on upnp=%v", c.testUPnPPort) + defer c.Close() + + // Actually test the UPnP port mapping. + mustProbeUPnP(t, ctx, c) + + gw, myIP, ok := c.gatewayAndSelfIP() + if !ok { + t.Fatalf("could not get gateway and self IP") + } + t.Logf("gw=%v myIP=%v", gw, myIP) + + ext, ok := c.getUPnPPortMapping(ctx, gw, netip.AddrPortFrom(myIP, 12345), 0) + if !ok { + t.Fatal("could not get UPnP port mapping") + } + if got, want := ext.Addr(), netip.MustParseAddr("123.123.123.123"); got != want { + t.Errorf("bad external address; got %v want %v", got, want) + } + if !sawRequestWithLease.Load() { + t.Errorf("wanted request with lease, but didn't see one") + } + t.Logf("external IP: %v", ext) + }) + } +} + // TestGetUPnPPortMapping_NoValidServices tests that getUPnPPortMapping doesn't // crash when a valid UPnP response with no supported services is discovered // and parsed. @@ -1045,6 +1135,23 @@ const testAddPortMappingPermanentLease = ` ` +const testAddPortMappingPermanentLease_InvalidArgs = ` + + + + SOAP:Client + UPnPError + + + 402 + Invalid Args + + + + + +` + const testAddPortMappingResponse = ` From 5ce8cd5fecc1745a63b0ff4474182af3c50baeec Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Thu, 6 Mar 2025 22:10:22 -0700 Subject: [PATCH 0574/1708] .github/workflows: tidy go caches before uploading Delete files from `$(go env GOCACHE)` and `$(go env GOMODCACHE)/cache` that have not been modified in >= 90 minutes as these files are not resulting in cache hits on the current branch. These deltions have resulted in the uploaded / downloaded compressed cache size to go down to ~1/3 of the original size in some instances with the extracted size being ~1/4 of the original extraced size. Updates https://github.com/tailscale/tailscale/issues/15238 Signed-off-by: Mario Minardi --- .github/workflows/test.yml | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4ff2f2421..87b8959ba 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -139,7 +139,11 @@ jobs: echo "Build/test created untracked files in the repo (file names above)." exit 1 fi - + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete windows: runs-on: windows-2022 steps: @@ -176,6 +180,11 @@ jobs: # Somewhere in the layers (powershell?) # the equals signs cause great confusion. run: go test ./... -bench . -benchtime 1x -run "^$" + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete privileged: runs-on: ubuntu-22.04 @@ -283,6 +292,11 @@ jobs: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goarch }} CGO_ENABLED: "0" + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete ios: # similar to cross above, but iOS can't build most of the repo. So, just #make it build a few smoke packages. @@ -342,6 +356,11 @@ jobs: GOARCH: ${{ matrix.goarch }} GOARM: ${{ matrix.goarm }} CGO_ENABLED: "0" + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete android: # similar to cross above, but android fails to build a few pieces of the @@ -394,6 +413,11 @@ jobs: run: | ./tool/go run ./cmd/tsconnect --fast-compression build ./tool/go run ./cmd/tsconnect --fast-compression build-pkg + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete tailscale_go: # Subset of tests that depend on our custom Go toolchain. runs-on: ubuntu-22.04 From 853abf86619d2994157012fec3cd123b64475d5f Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 6 Mar 2025 21:51:18 -0800 Subject: [PATCH 0575/1708] util/eventbus: initial debugging facilities for the event bus Enables monitoring events as they flow, listing bus clients, and snapshotting internal queues to troubleshoot stalls. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 63 +++++++++++++++-------- util/eventbus/client.go | 16 +++++- util/eventbus/debug.go | 103 +++++++++++++++++++++++++++++++++++-- util/eventbus/doc.go | 17 ++---- util/eventbus/publish.go | 2 +- util/eventbus/subscribe.go | 72 +++++++++++++++++--------- 6 files changed, 207 insertions(+), 66 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index a9b6f0dec..fc497add2 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -12,12 +12,12 @@ import ( "tailscale.com/util/set" ) -type publishedEvent struct { +type PublishedEvent struct { Event any From *Client } -type routedEvent struct { +type RoutedEvent struct { Event any From *Client To []*Client @@ -27,24 +27,25 @@ type routedEvent struct { // subscribers. type Bus struct { router *worker - write chan publishedEvent - snapshot chan chan []publishedEvent - routeDebug hook[routedEvent] + write chan PublishedEvent + snapshot chan chan []PublishedEvent + routeDebug hook[RoutedEvent] - topicsMu sync.Mutex // guards everything below. + topicsMu sync.Mutex topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - clients set.Set[*Client] + clientsMu sync.Mutex + clients set.Set[*Client] } // New returns a new bus. Use [PublisherOf] to make event publishers, // and [Bus.Queue] and [Subscribe] to make event subscribers. func New() *Bus { ret := &Bus{ - write: make(chan publishedEvent), - snapshot: make(chan chan []publishedEvent), + write: make(chan PublishedEvent), + snapshot: make(chan chan []PublishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, } @@ -65,12 +66,17 @@ func (b *Bus) Client(name string) *Client { bus: b, pub: set.Set[publisher]{}, } - b.topicsMu.Lock() - defer b.topicsMu.Unlock() + b.clientsMu.Lock() + defer b.clientsMu.Unlock() b.clients.Add(ret) return ret } +// Debugger returns the debugging facility for the bus. +func (b *Bus) Debugger() Debugger { + return Debugger{b} +} + // Close closes the bus. Implicitly closes all clients, publishers and // subscribers attached to the bus. // @@ -79,19 +85,17 @@ func (b *Bus) Client(name string) *Client { func (b *Bus) Close() { b.router.StopAndWait() - var clients set.Set[*Client] - b.topicsMu.Lock() - clients, b.clients = b.clients, set.Set[*Client]{} - b.topicsMu.Unlock() - - for c := range clients { + b.clientsMu.Lock() + defer b.clientsMu.Unlock() + for c := range b.clients { c.Close() } + b.clients = nil } func (b *Bus) pump(ctx context.Context) { - var vals queue[publishedEvent] - acceptCh := func() chan publishedEvent { + var vals queue[PublishedEvent] + acceptCh := func() chan PublishedEvent { if vals.Full() { return nil } @@ -111,7 +115,7 @@ func (b *Bus) pump(ctx context.Context) { for i := range len(dests) { clients[i] = dests[i].client } - b.routeDebug.run(routedEvent{ + b.routeDebug.run(RoutedEvent{ Event: val.Event, From: val.From, To: clients, @@ -119,9 +123,10 @@ func (b *Bus) pump(ctx context.Context) { } for _, d := range dests { - evt := queuedEvent{ + evt := DeliveredEvent{ Event: val.Event, From: val.From, + To: d.client, } deliverOne: for { @@ -173,6 +178,22 @@ func (b *Bus) shouldPublish(t reflect.Type) bool { return len(b.topics[t]) > 0 } +func (b *Bus) listClients() []*Client { + b.clientsMu.Lock() + defer b.clientsMu.Unlock() + return b.clients.Slice() +} + +func (b *Bus) snapshotPublishQueue() []PublishedEvent { + resp := make(chan []PublishedEvent) + select { + case b.snapshot <- resp: + return <-resp + case <-b.router.Done(): + return nil + } +} + func (b *Bus) subscribe(t reflect.Type, q *subscribeState) (cancel func()) { b.topicsMu.Lock() defer b.topicsMu.Unlock() diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 17f7e8608..5cf7f97f5 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -19,13 +19,15 @@ import ( type Client struct { name string bus *Bus - publishDebug hook[publishedEvent] + publishDebug hook[PublishedEvent] mu sync.Mutex pub set.Set[publisher] sub *subscribeState // Lazily created on first subscribe } +func (c *Client) Name() string { return c.name } + // Close closes the client. Implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { @@ -47,6 +49,16 @@ func (c *Client) Close() { } } +func (c *Client) snapshotSubscribeQueue() []DeliveredEvent { + return c.peekSubscribeState().snapshotQueue() +} + +func (c *Client) peekSubscribeState() *subscribeState { + c.mu.Lock() + defer c.mu.Unlock() + return c.sub +} + func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() @@ -76,7 +88,7 @@ func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) { c.bus.unsubscribe(t, s) } -func (c *Client) publish() chan<- publishedEvent { +func (c *Client) publish() chan<- PublishedEvent { return c.bus.write } diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 912fe7623..d41fc0385 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -4,11 +4,110 @@ package eventbus import ( + "fmt" "slices" "sync" "sync/atomic" ) +// A Debugger offers access to a bus's privileged introspection and +// debugging facilities. +// +// The debugger's functionality is intended for humans and their tools +// to examine and troubleshoot bus clients, and should not be used in +// normal codepaths. +// +// In particular, the debugger provides access to information that is +// deliberately withheld from bus clients to encourage more robust and +// maintainable code - for example, the sender of an event, or the +// event streams of other clients. Please don't use the debugger to +// circumvent these restrictions for purposes other than debugging. +type Debugger struct { + bus *Bus +} + +// Clients returns a list of all clients attached to the bus. +func (d *Debugger) Clients() []*Client { + return d.bus.listClients() +} + +// PublishQueue returns the contents of the publish queue. +// +// The publish queue contains events that have been accepted by the +// bus from Publish() calls, but have not yet been routed to relevant +// subscribers. +// +// This queue is expected to be almost empty in normal operation. A +// full publish queue indicates that a slow subscriber downstream is +// causing backpressure and stalling the bus. +func (d *Debugger) PublishQueue() []PublishedEvent { + return d.bus.snapshotPublishQueue() +} + +// checkClient verifies that client is attached to the same bus as the +// Debugger, and panics if not. +func (d *Debugger) checkClient(client *Client) { + if client.bus != d.bus { + panic(fmt.Errorf("SubscribeQueue given client belonging to wrong bus")) + } +} + +// SubscribeQueue returns the contents of the given client's subscribe +// queue. +// +// The subscribe queue contains events that are to be delivered to the +// client, but haven't yet been handed off to the relevant +// [Subscriber]. +// +// This queue is expected to be almost empty in normal operation. A +// full subscribe queue indicates that the client is accepting events +// too slowly, and may be causing the rest of the bus to stall. +func (d *Debugger) SubscribeQueue(client *Client) []DeliveredEvent { + d.checkClient(client) + return client.snapshotSubscribeQueue() +} + +// WatchBus streams information about all events passing through the +// bus. +// +// Monitored events are delivered in the bus's global publication +// order (see "Concurrency properties" in the package docs). +// +// The caller must consume monitoring events promptly to avoid +// stalling the bus (see "Expected subscriber behavior" in the package +// docs). +func (d *Debugger) WatchBus() *Subscriber[RoutedEvent] { + return newMonitor(d.bus.routeDebug.add) +} + +// WatchPublish streams information about all events published by the +// given client. +// +// Monitored events are delivered in the bus's global publication +// order (see "Concurrency properties" in the package docs). +// +// The caller must consume monitoring events promptly to avoid +// stalling the bus (see "Expected subscriber behavior" in the package +// docs). +func (d *Debugger) WatchPublish(client *Client) *Subscriber[PublishedEvent] { + d.checkClient(client) + return newMonitor(client.publishDebug.add) +} + +// WatchSubscribe streams information about all events received by the +// given client. +// +// Monitored events are delivered in the bus's global publication +// order (see "Concurrency properties" in the package docs). +// +// The caller must consume monitoring events promptly to avoid +// stalling the bus (see "Expected subscriber behavior" in the package +// docs). +func (d *Debugger) WatchSubscribe(client *Client) *Subscriber[DeliveredEvent] { + d.checkClient(client) + return newMonitor(client.subscribeState().debug.add) +} + // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex @@ -19,8 +118,6 @@ var hookID atomic.Uint64 // add registers fn to be called when the hook is run. Returns an // unregistration function that removes fn from the hook when called. -// -//lint:ignore U1000 Not used yet, but will be in an upcoming change func (h *hook[T]) add(fn func(T)) (remove func()) { id := hookID.Add(1) h.Lock() @@ -30,8 +127,6 @@ func (h *hook[T]) add(fn func(T)) (remove func()) { } // remove removes the function with the given ID from the hook. -// -//lint:ignore U1000 Not used yet, but will be in an upcoming change func (h *hook[T]) remove(id uint64) { h.Lock() defer h.Unlock() diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index b3509b48b..964a686ea 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -86,18 +86,7 @@ // // # Debugging facilities // -// (TODO, not implemented yet, sorry, I promise we're working on it next!) -// -// The bus comes with introspection facilities to help reason about -// the state of the client, and diagnose issues such as slow -// subscribers. -// -// The bus provide a tsweb debugging page that shows the current state -// of the bus, including all publishers, subscribers, and queued -// events. -// -// The bus also has a snooping and tracing facility, which lets you -// observe all events flowing through the bus, along with their -// source, destination(s) and timing information such as the time of -// delivery to each subscriber and end-to-end bus delays. +// The [Debugger], obtained through [Bus.Debugger], provides +// introspection facilities to monitor events flowing through the bus, +// and inspect publisher and subscriber state. package eventbus diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index b228708ac..9897114b6 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -52,7 +52,7 @@ func (p *Publisher[T]) Publish(v T) { default: } - evt := publishedEvent{ + evt := PublishedEvent{ Event: v, From: p.client, } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index c38949d9d..60e91edd5 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -10,17 +10,12 @@ import ( "sync" ) -type deliveredEvent struct { +type DeliveredEvent struct { Event any From *Client To *Client } -type queuedEvent struct { - Event any - From *Client -} - // subscriber is a uniformly typed wrapper around Subscriber[T], so // that debugging facilities can look at active subscribers. type subscriber interface { @@ -38,7 +33,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool + dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool Close() } @@ -47,9 +42,9 @@ type subscribeState struct { client *Client dispatcher *worker - write chan queuedEvent - snapshot chan chan []queuedEvent - debug hook[deliveredEvent] + write chan DeliveredEvent + snapshot chan chan []DeliveredEvent + debug hook[DeliveredEvent] outputsMu sync.Mutex outputs map[reflect.Type]subscriber @@ -58,8 +53,8 @@ type subscribeState struct { func newSubscribeState(c *Client) *subscribeState { ret := &subscribeState{ client: c, - write: make(chan queuedEvent), - snapshot: make(chan chan []queuedEvent), + write: make(chan DeliveredEvent), + snapshot: make(chan chan []DeliveredEvent), outputs: map[reflect.Type]subscriber{}, } ret.dispatcher = runWorker(ret.pump) @@ -67,8 +62,8 @@ func newSubscribeState(c *Client) *subscribeState { } func (q *subscribeState) pump(ctx context.Context) { - var vals queue[queuedEvent] - acceptCh := func() chan queuedEvent { + var vals queue[DeliveredEvent] + acceptCh := func() chan DeliveredEvent { if vals.Full() { return nil } @@ -83,12 +78,12 @@ func (q *subscribeState) pump(ctx context.Context) { vals.Drop() continue } - if !sub.dispatch(ctx, &vals, acceptCh) { + if !sub.dispatch(ctx, &vals, acceptCh, q.snapshot) { return } if q.debug.active() { - q.debug.run(deliveredEvent{ + q.debug.run(DeliveredEvent{ Event: val.Event, From: val.From, To: q.client, @@ -111,6 +106,20 @@ func (q *subscribeState) pump(ctx context.Context) { } } +func (s *subscribeState) snapshotQueue() []DeliveredEvent { + if s == nil { + return nil + } + + resp := make(chan []DeliveredEvent) + select { + case s.snapshot <- resp: + return <-resp + case <-s.dispatcher.Done(): + return nil + } +} + func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { s.outputsMu.Lock() defer s.outputsMu.Unlock() @@ -154,28 +163,43 @@ func (s *subscribeState) closed() <-chan struct{} { // A Subscriber delivers one type of event from a [Client]. type Subscriber[T any] struct { - stop stopFlag - recv *subscribeState - read chan T + stop stopFlag + read chan T + unregister func() } func newSubscriber[T any](r *subscribeState) *Subscriber[T] { t := reflect.TypeFor[T]() ret := &Subscriber[T]{ - recv: r, - read: make(chan T), + read: make(chan T), + unregister: func() { r.deleteSubscriber(t) }, } r.addSubscriber(t, ret) return ret } +func newMonitor[T any](attach func(fn func(T)) (cancel func())) *Subscriber[T] { + ret := &Subscriber[T]{ + read: make(chan T, 100), // arbitrary, large + } + ret.unregister = attach(ret.monitor) + return ret +} + func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool { +func (s *Subscriber[T]) monitor(debugEvent T) { + select { + case s.read <- debugEvent: + case <-s.stop.Done(): + } +} + +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump @@ -189,7 +213,7 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[queuedEvent], vals.Add(val) case <-ctx.Done(): return false - case ch := <-s.recv.snapshot: + case ch := <-snapshot: ch <- vals.Snapshot() } } @@ -212,5 +236,5 @@ func (s *Subscriber[T]) Done() <-chan struct{} { // [Subscriber.Events] block for ever. func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers - s.recv.deleteSubscriber(reflect.TypeFor[T]()) + s.unregister() } From e71e95b841a1c37bafb69dd1fc355a5541a9bc65 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 13:01:35 -0800 Subject: [PATCH 0576/1708] util/eventbus: don't allow publishers to skip events while debugging If any debugging hook might see an event, Publisher.ShouldPublish should tell its caller to publish even if there are no ordinary subscribers. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 4 ++++ util/eventbus/client.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index fc497add2..96cafc98b 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -173,6 +173,10 @@ func (b *Bus) dest(t reflect.Type) []*subscribeState { } func (b *Bus) shouldPublish(t reflect.Type) bool { + if b.routeDebug.active() { + return true + } + b.topicsMu.Lock() defer b.topicsMu.Unlock() return len(b.topics[t]) > 0 diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 5cf7f97f5..a9ef40771 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -93,7 +93,7 @@ func (c *Client) publish() chan<- PublishedEvent { } func (c *Client) shouldPublish(t reflect.Type) bool { - return c.bus.shouldPublish(t) + return c.publishDebug.active() || c.bus.shouldPublish(t) } // Subscribe requests delivery of events of type T through the given From 346a35f6123cfa04104c283ff28050a75627a074 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 08:16:53 -0800 Subject: [PATCH 0577/1708] util/eventbus: add debugger methods to list pub/sub types This lets debug tools list the types that clients are wielding, so that they can build a dataflow graph and other debugging views. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/client.go | 14 ++++++++++++++ util/eventbus/debug.go | 22 ++++++++++++++++++++++ util/eventbus/subscribe.go | 14 ++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a9ef40771..a7a88c0a1 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -59,6 +59,20 @@ func (c *Client) peekSubscribeState() *subscribeState { return c.sub } +func (c *Client) publishTypes() []reflect.Type { + c.mu.Lock() + defer c.mu.Unlock() + ret := make([]reflect.Type, 0, len(c.pub)) + for pub := range c.pub { + ret = append(ret, pub.publishType()) + } + return ret +} + +func (c *Client) subscribeTypes() []reflect.Type { + return c.peekSubscribeState().subscribeTypes() +} + func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index d41fc0385..31123e6ba 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -5,6 +5,7 @@ package eventbus import ( "fmt" + "reflect" "slices" "sync" "sync/atomic" @@ -108,6 +109,27 @@ func (d *Debugger) WatchSubscribe(client *Client) *Subscriber[DeliveredEvent] { return newMonitor(client.subscribeState().debug.add) } +// PublishTypes returns the list of types being published by client. +// +// The returned types are those for which the client has obtained a +// [Publisher]. The client may not have ever sent the type in +// question. +func (d *Debugger) PublishTypes(client *Client) []reflect.Type { + d.checkClient(client) + return client.publishTypes() +} + +// SubscribeTypes returns the list of types being subscribed to by +// client. +// +// The returned types are those for which the client has obtained a +// [Subscriber]. The client may not have ever received the type in +// question, and here may not be any publishers of the type. +func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { + d.checkClient(client) + return client.subscribeTypes() +} + // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 60e91edd5..ba17e8548 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -120,6 +120,20 @@ func (s *subscribeState) snapshotQueue() []DeliveredEvent { } } +func (s *subscribeState) subscribeTypes() []reflect.Type { + if s == nil { + return nil + } + + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + ret := make([]reflect.Type, 0, len(s.outputs)) + for t := range s.outputs { + ret = append(ret, t) + } + return ret +} + func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { s.outputsMu.Lock() defer s.outputsMu.Unlock() From eb3313e825c2d2e20f4c11bb7168ad72397e3d20 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 7 Mar 2025 17:12:07 -0700 Subject: [PATCH 0578/1708] tailcfg: add DERPRegion.NoMeasureNoHome, deprecate+document Avoid [cap 115] Fixes tailscale/corp#24697 Change-Id: Ib81994b5ded3dc87a1eef079eb268906a2acb3f8 Signed-off-by: Brad Fitzpatrick --- net/captivedetection/endpoints.go | 2 +- net/netcheck/netcheck.go | 5 ++++- net/netcheck/netcheck_test.go | 7 ++++--- tailcfg/derpmap.go | 28 ++++++++++++++++++++++++---- tailcfg/tailcfg.go | 3 ++- tailcfg/tailcfg_clone.go | 15 ++++++++------- tailcfg/tailcfg_view.go | 28 +++++++++++++++------------- 7 files changed, 58 insertions(+), 30 deletions(-) diff --git a/net/captivedetection/endpoints.go b/net/captivedetection/endpoints.go index 450ed4a1c..57b3e5335 100644 --- a/net/captivedetection/endpoints.go +++ b/net/captivedetection/endpoints.go @@ -89,7 +89,7 @@ func availableEndpoints(derpMap *tailcfg.DERPMap, preferredDERPRegionID int, log // Use the DERP IPs as captive portal detection endpoints. Using IPs is better than hostnames // because they do not depend on DNS resolution. for _, region := range derpMap.Regions { - if region.Avoid { + if region.Avoid || region.NoMeasureNoHome { continue } for _, node := range region.Nodes { diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 107573e5d..a33ca2209 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -387,6 +387,9 @@ type probePlan map[string][]probe func sortRegions(dm *tailcfg.DERPMap, last *Report, preferredDERP int) (prev []*tailcfg.DERPRegion) { prev = make([]*tailcfg.DERPRegion, 0, len(dm.Regions)) for _, reg := range dm.Regions { + if reg.NoMeasureNoHome { + continue + } // include an otherwise avoid region if it is the current preferred region if reg.Avoid && reg.RegionID != preferredDERP { continue @@ -533,7 +536,7 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob plan = make(probePlan) for _, reg := range dm.Regions { - if len(reg.Nodes) == 0 { + if reg.NoMeasureNoHome || len(reg.Nodes) == 0 { continue } diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 88c19623d..3affa614d 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -455,7 +455,7 @@ func TestMakeProbePlan(t *testing.T) { basicMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{}, } - for rid := 1; rid <= 5; rid++ { + for rid := 1; rid <= 6; rid++ { var nodes []*tailcfg.DERPNode for nid := 0; nid < rid; nid++ { nodes = append(nodes, &tailcfg.DERPNode{ @@ -467,8 +467,9 @@ func TestMakeProbePlan(t *testing.T) { }) } basicMap.Regions[rid] = &tailcfg.DERPRegion{ - RegionID: rid, - Nodes: nodes, + RegionID: rid, + Nodes: nodes, + NoMeasureNoHome: rid == 6, } } diff --git a/tailcfg/derpmap.go b/tailcfg/derpmap.go index b3e54983f..e05559f3e 100644 --- a/tailcfg/derpmap.go +++ b/tailcfg/derpmap.go @@ -96,12 +96,32 @@ type DERPRegion struct { Latitude float64 `json:",omitempty"` Longitude float64 `json:",omitempty"` - // Avoid is whether the client should avoid picking this as its home - // region. The region should only be used if a peer is there. - // Clients already using this region as their home should migrate - // away to a new region without Avoid set. + // Avoid is whether the client should avoid picking this as its home region. + // The region should only be used if a peer is there. Clients already using + // this region as their home should migrate away to a new region without + // Avoid set. + // + // Deprecated: because of bugs in past implementations combined with unclear + // docs that caused people to think the bugs were intentional, this field is + // deprecated. It was never supposed to cause STUN/DERP measurement probes, + // but due to bugs, it sometimes did. And then some parts of the code began + // to rely on that property. But then we were unable to use this field for + // its original purpose, nor its later imagined purpose, because various + // parts of the codebase thought it meant one thing and others thought it + // meant another. But it did something in the middle instead. So we're retiring + // it. Use NoMeasureNoHome instead. Avoid bool `json:",omitempty"` + // NoMeasureNoHome says that this regions should not be measured for its + // latency distance (STUN, HTTPS, etc) or availability (e.g. captive portal + // checks) and should never be selected as the node's home region. However, + // if a peer declares this region as its home, then this client is allowed + // to connect to it for the purpose of communicating with that peer. + // + // This is what the now deprecated Avoid bool was supposed to mean + // originally but had implementation bugs and documentation omissions. + NoMeasureNoHome bool `json:",omitempty"` + // Nodes are the DERP nodes running in this region, in // priority order for the current client. Client TLS // connections should ideally only go to the first entry diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b5f49c614..7556ba3d0 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -159,7 +159,8 @@ type CapabilityVersion int // - 112: 2025-01-14: Client interprets AllowedIPs of nil as meaning same as Addresses // - 113: 2025-01-20: Client communicates to control whether funnel is enabled by sending Hostinfo.IngressEnabled (#14688) // - 114: 2025-01-30: NodeAttrMaxKeyDuration CapMap defined, clients might use it (no tailscaled code change) (#14829) -const CurrentCapabilityVersion CapabilityVersion = 114 +// - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. +const CurrentCapabilityVersion CapabilityVersion = 115 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index aeeacebec..da1f4f374 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -416,13 +416,14 @@ func (src *DERPRegion) Clone() *DERPRegion { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPRegionCloneNeedsRegeneration = DERPRegion(struct { - RegionID int - RegionCode string - RegionName string - Latitude float64 - Longitude float64 - Avoid bool - Nodes []*DERPNode + RegionID int + RegionCode string + RegionName string + Latitude float64 + Longitude float64 + Avoid bool + NoMeasureNoHome bool + Nodes []*DERPNode }{}) // Clone makes a deep copy of DERPMap. diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 4b56b8c09..b1aacab23 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -880,25 +880,27 @@ func (v *DERPRegionView) UnmarshalJSON(b []byte) error { return nil } -func (v DERPRegionView) RegionID() int { return v.ж.RegionID } -func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } -func (v DERPRegionView) RegionName() string { return v.ж.RegionName } -func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } -func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } -func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } +func (v DERPRegionView) RegionID() int { return v.ж.RegionID } +func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } +func (v DERPRegionView) RegionName() string { return v.ж.RegionName } +func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } +func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } +func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } +func (v DERPRegionView) NoMeasureNoHome() bool { return v.ж.NoMeasureNoHome } func (v DERPRegionView) Nodes() views.SliceView[*DERPNode, DERPNodeView] { return views.SliceOfViews[*DERPNode, DERPNodeView](v.ж.Nodes) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPRegionViewNeedsRegeneration = DERPRegion(struct { - RegionID int - RegionCode string - RegionName string - Latitude float64 - Longitude float64 - Avoid bool - Nodes []*DERPNode + RegionID int + RegionCode string + RegionName string + Latitude float64 + Longitude float64 + Avoid bool + NoMeasureNoHome bool + Nodes []*DERPNode }{}) // View returns a read-only view of DERPMap. From f67725c3ff9e4fd66914619d7becd172958bc424 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 12:41:30 -0600 Subject: [PATCH 0579/1708] .github: Bump peter-evans/create-pull-request from 7.0.6 to 7.0.7 (#15113) Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 7.0.6 to 7.0.7. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/67ccf781d68cd99b580ae25a5c18a1cc84ffff1f...dd2324fc52d5d43c699a5636bcf19fceaa70c284) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 4d9db490b..84b10e254 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -36,7 +36,7 @@ jobs: private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f #v7.0.6 + uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index f2d1e65a5..18d7ffdd9 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -35,7 +35,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f #v7.0.6 + uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 5827e20fdf93c64ae15ef91d7936b18f2122889a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 12:42:13 -0600 Subject: [PATCH 0580/1708] .github: Bump github/codeql-action from 3.28.9 to 3.28.10 (#15110) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.9 to 3.28.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0...b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a241d3578..318bc6698 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 + uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 + uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 + uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 From 71b1ae6bef921abc38ab13d70d7e30bd2170bde3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 13:02:04 -0600 Subject: [PATCH 0581/1708] .github: Bump actions/upload-artifact from 4.6.0 to 4.6.1 (#15111) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.0 to 4.6.1. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08...4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 87b8959ba..b52a3af36 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -485,7 +485,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From b9f4c5d2466f0a1196ad99fb3620d7095d8311cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 13:31:02 -0600 Subject: [PATCH 0582/1708] .github: Bump golangci/golangci-lint-action from 6.3.1 to 6.5.0 (#15046) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.3.1 to 6.5.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/2e788936b09dd82dc280e845628a40d2ba6b204c...2226d7cb06a077cd73e56eedd38eecad18e5d837) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: Mario Minardi Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 2 +- .golangci.yml | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 3ee6287b9..5318923d8 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -31,7 +31,7 @@ jobs: cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@2e788936b09dd82dc280e845628a40d2ba6b204c # v6.3.1 + uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: version: v1.64 diff --git a/.golangci.yml b/.golangci.yml index 45248de16..15f8b5d83 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,16 +26,11 @@ issues: # Per-linter settings are contained in this top-level key linters-settings: - # Enable all rules by default; we don't use invisible unicode runes. - bidichk: - gofmt: rewrite-rules: - pattern: 'interface{}' replacement: 'any' - goimports: - govet: # Matches what we use in corp as of 2023-12-07 enable: @@ -78,8 +73,6 @@ linters-settings: # analyzer doesn't support type declarations #- github.com/tailscale/tailscale/types/logger.Logf - misspell: - revive: enable-all-rules: false ignore-generated-header: true From 69b27d2fcfeaa745de072f96dd6c30f4f085ecd9 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 7 Mar 2025 14:27:13 -0700 Subject: [PATCH 0583/1708] cmd/natc: error and log when IP range is exhausted natc itself can't immediately fix the problem, but it can more correctly error that return bad addresses. Updates tailscale/corp#26968 Signed-off-by: James Tucker --- cmd/natc/natc.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 956d2455e..73ba116ff 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -41,6 +41,8 @@ import ( "tailscale.com/wgengine/netstack" ) +var ErrNoIPsAvailable = errors.New("no IPs available") + func main() { hostinfo.SetApp("natc") if !envknob.UseWIPCode() { @@ -277,14 +279,14 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP defer cancel() who, err := c.lc.WhoIs(ctx, remoteAddr.String()) if err != nil { - log.Printf("HandleDNS: WhoIs failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): WhoIs failed: %v\n", remoteAddr.String(), err) return } var msg dnsmessage.Message err = msg.Unpack(buf) if err != nil { - log.Printf("HandleDNS: dnsmessage unpack failed: %v\n ", err) + log.Printf("HandleDNS(remote=%s): dnsmessage unpack failed: %v\n", remoteAddr.String(), err) return } @@ -297,19 +299,19 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP case dnsmessage.TypeAAAA, dnsmessage.TypeA: dstAddrs, err := lookupDestinationIP(q.Name.String()) if err != nil { - log.Printf("HandleDNS: lookup destination failed: %v\n ", err) + log.Printf("HandleDNS(remote=%s): lookup destination failed: %v\n", remoteAddr.String(), err) return } if c.ignoreDestination(dstAddrs) { bs, err := dnsResponse(&msg, dstAddrs) // TODO (fran): treat as SERVFAIL if err != nil { - log.Printf("HandleDNS: generate ignore response failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): generate ignore response failed: %v\n", remoteAddr.String(), err) return } _, err = pc.WriteTo(bs, remoteAddr) if err != nil { - log.Printf("HandleDNS: write failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) } return } @@ -322,7 +324,7 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP resp, err := c.generateDNSResponse(&msg, who.Node.ID) // TODO (fran): treat as SERVFAIL if err != nil { - log.Printf("HandleDNS: connector handling failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): connector handling failed: %v\n", remoteAddr.String(), err) return } // TODO (fran): treat as NXDOMAIN @@ -332,7 +334,7 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP // This connector handled the DNS request _, err = pc.WriteTo(resp, remoteAddr) if err != nil { - log.Printf("HandleDNS: write failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) } } @@ -529,6 +531,9 @@ func (ps *perPeerState) ipForDomain(domain string) ([]netip.Addr, error) { return addrs, nil } addrs := ps.assignAddrsLocked(domain) + if addrs == nil { + return nil, ErrNoIPsAvailable + } return addrs, nil } @@ -575,6 +580,9 @@ func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { ps.addrToDomain = &bart.Table[string]{} } v4 := ps.unusedIPv4Locked() + if !v4.IsValid() { + return nil + } as16 := ps.c.v6ULA.Addr().As16() as4 := v4.As4() copy(as16[12:], as4[:]) From e38e5c38cc55c7a2ba90429e7ce195e7ac7ec665 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Mar 2025 13:03:10 -0700 Subject: [PATCH 0584/1708] ssh/tailssh: fix typo in forwardedEnviron method, add docs And don't return a comma-separated string. That's kinda weird signature-wise, and not needed by half the callers anyway. The callers that care can do the join themselves. Updates #cleanup Change-Id: Ib5ad51a3c6b663d868eba14fe9dc54b2609cfb0d Signed-off-by: Brad Fitzpatrick --- ssh/tailssh/incubator.go | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index e809e9185..4f630186d 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -254,32 +254,44 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { return ia, nil } -func (ia incubatorArgs) forwadedEnviron() ([]string, string, error) { +// forwardedEnviron returns the concatenation of the current environment with +// any environment variables specified in ia.encodedEnv. +// +// It also returns allowedExtraKeys, containing the env keys that were passed in +// to ia.encodedEnv. +func (ia incubatorArgs) forwardedEnviron() (env, allowedExtraKeys []string, err error) { environ := os.Environ() + // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding - allowListKeys := "SSH_AUTH_SOCK" + // TODO(bradfitz,percy): why is this listed specially? If the parent wanted to included + // it, couldn't it have just passed it to the incubator in encodedEnv? + // If it didn't, no reason for us to pass it to "su -w ..." if it's not in our env + // anyway? (Surely we don't want to inherit the tailscaled parent SSH_AUTH_SOCK, if any) + allowedExtraKeys = []string{"SSH_AUTH_SOCK"} if ia.encodedEnv != "" { unquoted, err := strconv.Unquote(ia.encodedEnv) if err != nil { - return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + return nil, nil, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) } var extraEnviron []string err = json.Unmarshal([]byte(unquoted), &extraEnviron) if err != nil { - return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + return nil, nil, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) } environ = append(environ, extraEnviron...) - for _, v := range extraEnviron { - allowListKeys = fmt.Sprintf("%s,%s", allowListKeys, strings.Split(v, "=")[0]) + for _, kv := range extraEnviron { + if k, _, ok := strings.Cut(kv, "="); ok { + allowedExtraKeys = append(allowedExtraKeys, k) + } } } - return environ, allowListKeys, nil + return environ, allowedExtraKeys, nil } // beIncubator is the entrypoint to the `tailscaled be-child ssh` subcommand. @@ -459,7 +471,7 @@ func tryExecLogin(dlogf logger.Logf, ia incubatorArgs) error { loginArgs := ia.loginArgs(loginCmdPath) dlogf("logging in with %+v", loginArgs) - environ, _, err := ia.forwadedEnviron() + environ, _, err := ia.forwardedEnviron() if err != nil { return err } @@ -498,14 +510,14 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { defer sessionCloser() } - environ, allowListEnvKeys, err := ia.forwadedEnviron() + environ, allowListEnvKeys, err := ia.forwardedEnviron() if err != nil { return false, err } loginArgs := []string{ su, - "-w", allowListEnvKeys, + "-w", strings.Join(allowListEnvKeys, ","), "-l", ia.localUser, } @@ -546,7 +558,7 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { return "" } - _, allowListEnvKeys, err := ia.forwadedEnviron() + _, allowListEnvKeys, err := ia.forwardedEnviron() if err != nil { return "" } @@ -555,7 +567,7 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { // to make sure su supports the necessary arguments. err = exec.Command( su, - "-w", allowListEnvKeys, + "-w", strings.Join(allowListEnvKeys, ","), "-l", ia.localUser, "-c", "true", @@ -582,7 +594,7 @@ func handleSSHInProcess(dlogf logger.Logf, ia incubatorArgs) error { return err } - environ, _, err := ia.forwadedEnviron() + environ, _, err := ia.forwardedEnviron() if err != nil { return err } From a6e19f2881c758eae518ce94e6e0b905ab8ccee0 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 11 Mar 2025 07:09:46 -0700 Subject: [PATCH 0585/1708] ipn/ipnlocal: allow cache hits for testing ACME certs (#15023) PR #14771 added support for getting certs from alternate ACME servers, but the certStore caching mechanism breaks unless you install the CA in system roots, because we check the validity of the cert before allowing a cache hit, which includes checking for a valid chain back to a trusted CA. For ease of testing, allow cert cache hits when the chain is unknown to avoid re-issuing the cert on every TLS request served. We will still get a cache miss when the cert has expired, as enforced by a test, and this makes it much easier to test against non-prod ACME servers compared to having to manage the installation of non-prod CAs on clients. Updates #14771 Change-Id: I74fe6593fe399bd135cc822195155e99985ec08a Signed-off-by: Tom Proctor --- ipn/ipnlocal/cert.go | 27 ++++++++++++++++++++++++++- ipn/ipnlocal/cert_test.go | 24 +++++++++++++++++------- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index d360ed79c..4c026a9e7 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -471,6 +471,10 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger return nil, err } + if !isDefaultDirectoryURL(ac.DirectoryURL) { + logf("acme: using Directory URL %q", ac.DirectoryURL) + } + a, err := ac.GetReg(ctx, "" /* pre-RFC param */) switch { case err == nil: @@ -737,7 +741,28 @@ func validateLeaf(leaf *x509.Certificate, intermediates *x509.CertPool, domain s // binary's baked-in roots (LetsEncrypt). See tailscale/tailscale#14690. return validateLeaf(leaf, intermediates, domain, now, bakedroots.Get()) } - return err == nil + + if err == nil { + return true + } + + // When pointed at a non-prod ACME server, we don't expect to have the CA + // in our system or baked-in roots. Verify only throws UnknownAuthorityError + // after first checking the leaf cert's expiry, hostnames etc, so we know + // that the only reason for an error is to do with constructing a full chain. + // Allow this error so that cert caching still works in testing environments. + if errors.As(err, &x509.UnknownAuthorityError{}) { + acmeURL := envknob.String("TS_DEBUG_ACME_DIRECTORY_URL") + if !isDefaultDirectoryURL(acmeURL) { + return true + } + } + + return false +} + +func isDefaultDirectoryURL(u string) bool { + return u == "" || u == acme.LetsEncryptURL } // validLookingCertDomain reports whether name looks like a valid domain name that diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index 868808cd6..c77570e87 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -47,10 +47,10 @@ var certTestFS embed.FS func TestCertStoreRoundTrip(t *testing.T) { const testDomain = "example.com" - // Use a fixed verification timestamp so validity doesn't fall off when the - // cert expires. If you update the test data below, this may also need to be - // updated. + // Use fixed verification timestamps so validity doesn't change over time. + // If you update the test data below, these may also need to be updated. testNow := time.Date(2023, time.February, 10, 0, 0, 0, 0, time.UTC) + testExpired := time.Date(2026, time.February, 10, 0, 0, 0, 0, time.UTC) // To re-generate a root certificate and domain certificate for testing, // use: @@ -78,14 +78,20 @@ func TestCertStoreRoundTrip(t *testing.T) { } tests := []struct { - name string - store certStore + name string + store certStore + debugACMEURL bool }{ - {"FileStore", certFileStore{dir: t.TempDir(), testRoots: roots}}, - {"StateStore", certStateStore{StateStore: new(mem.Store), testRoots: roots}}, + {"FileStore", certFileStore{dir: t.TempDir(), testRoots: roots}, false}, + {"FileStore_UnknownCA", certFileStore{dir: t.TempDir()}, true}, + {"StateStore", certStateStore{StateStore: new(mem.Store), testRoots: roots}, false}, + {"StateStore_UnknownCA", certStateStore{StateStore: new(mem.Store)}, true}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + if test.debugACMEURL { + t.Setenv("TS_DEBUG_ACME_DIRECTORY_URL", "https://acme-staging-v02.api.letsencrypt.org/directory") + } if err := test.store.WriteTLSCertAndKey(testDomain, testCert, testKey); err != nil { t.Fatalf("WriteTLSCertAndKey: unexpected error: %v", err) } @@ -99,6 +105,10 @@ func TestCertStoreRoundTrip(t *testing.T) { if diff := cmp.Diff(kp.KeyPEM, testKey); diff != "" { t.Errorf("Key (-got, +want):\n%s", diff) } + unexpected, err := test.store.Read(testDomain, testExpired) + if err != errCertExpired { + t.Fatalf("Read: expected expiry error: %v", string(unexpected.CertPEM)) + } }) } } From 660b0515b9e37594aac049576660f3d7ceafcce2 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 11 Mar 2025 13:24:11 -0400 Subject: [PATCH 0586/1708] safesocket, version: fix safesocket_darwin behavior for cmd/tailscale (#15275) fixes tailscale/tailscale#15269 Fixes the various CLIs for all of the various flavors of tailscaled on darwin. The logic in version is updated so that we have methods that return true only for the actual GUI app (which can beCLI) and the order of the checks in localTCPPortAndTokenDarwin are corrected so that the logic works with all 5 combinations of CLI and tailscaled. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 42 ++++++++++------- safesocket/safesocket_darwin_test.go | 68 ++++++++++++++++++++++------ version/prop.go | 42 ++++++++++------- 3 files changed, 104 insertions(+), 48 deletions(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index fb35ad9df..e2b3ea458 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -34,17 +34,17 @@ type safesocketDarwin struct { mu sync.Mutex token string // safesocket auth token port int // safesocket port - sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file - sharedDir string // shared directory for location of sameuserproof file + sameuserproofFD *os.File // File descriptor for macos app store sameuserproof file + sharedDir string // Shared directory for location of sameuserproof file - checkConn bool // Check macsys safesocket port before returning it - isMacSysExt func() bool // For testing only to force macsys - isMacGUIApp func() bool // For testing only to force macOS sandbox + checkConn bool // If true, check macsys safesocket port before returning it + isMacSysExt func() bool // Reports true if this binary is the macOS System Extension + isMacGUIApp func() bool // Reports true if running as a macOS GUI app (Tailscale.app) } var ssd = safesocketDarwin{ isMacSysExt: version.IsMacSysExt, - isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() || version.IsMacSysExt() }, + isMacGUIApp: func() bool { return version.IsMacAppStoreGUI() || version.IsMacSysGUI() }, checkConn: true, sharedDir: "/Library/Tailscale", } @@ -63,22 +63,25 @@ var ssd = safesocketDarwin{ // calls InitListenerDarwin. // localTCPPortAndTokenDarwin returns the localhost TCP port number and auth token -// either generated, or sourced from the NEPacketTunnelProvider managed tailscaled process. +// either from the sameuserproof mechanism, or source and set directly from the +// NEPacketTunnelProvider managed tailscaled process when the CLI is invoked +// from the Tailscale.app GUI. func localTCPPortAndTokenDarwin() (port int, token string, err error) { ssd.mu.Lock() defer ssd.mu.Unlock() - if !ssd.isMacGUIApp() { - return 0, "", ErrNoTokenOnOS - } - - if ssd.port != 0 && ssd.token != "" { + switch { + case ssd.port != 0 && ssd.token != "": + // If something has explicitly set our credentials (typically non-standalone macos binary), use them. return ssd.port, ssd.token, nil + case !ssd.isMacGUIApp(): + // We're not a GUI app (probably cmd/tailscale), so try falling back to sameuserproof. + // If portAndTokenFromSameUserProof returns an error here, cmd/tailscale will + // attempt to use the default unix socket mechanism supported by tailscaled. + return portAndTokenFromSameUserProof() + default: + return 0, "", ErrTokenNotFound } - - // Credentials were not explicitly, this is likely a standalone CLI binary. - // Fallback to reading the sameuserproof file. - return portAndTokenFromSameUserProof() } // SetCredentials sets an token and port used to authenticate safesocket generated @@ -341,6 +344,11 @@ func readMacosSameUserProof() (port int, token string, err error) { } func portAndTokenFromSameUserProof() (port int, token string, err error) { + // When we're cmd/tailscale, we have no idea what tailscaled is, so we'll try + // macos, then macsys and finally, fallback to tailscaled via a unix socket + // if both of those return an error. You can run macos or macsys and + // tailscaled at the same time, but we are forced to choose one and the GUI + // clients are first in line here. You cannot run macos and macsys simultaneously. if port, token, err := readMacosSameUserProof(); err == nil { return port, token, nil } @@ -349,5 +357,5 @@ func portAndTokenFromSameUserProof() (port int, token string, err error) { return port, token, nil } - return 0, "", err + return 0, "", ErrTokenNotFound } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index 2793d6aa3..e52959ad5 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -15,9 +15,12 @@ import ( // sets the port and token correctly and that LocalTCPPortAndToken // returns the given values. func TestSetCredentials(t *testing.T) { - wantPort := 123 - wantToken := "token" - tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) + const ( + wantToken = "token" + wantPort = 123 + ) + + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return false }) SetCredentials(wantToken, wantPort) gotPort, gotToken, err := LocalTCPPortAndToken() @@ -26,11 +29,47 @@ func TestSetCredentials(t *testing.T) { } if gotPort != wantPort { - t.Errorf("got port %d, want %d", gotPort, wantPort) + t.Errorf("port: got %d, want %d", gotPort, wantPort) + } + + if gotToken != wantToken { + t.Errorf("token: got %s, want %s", gotToken, wantToken) + } +} + +// TestFallbackToSameuserproof verifies that we fallback to the +// sameuserproof file via LocalTCPPortAndToken when we're running +// +// s cmd/tailscale +func TestFallbackToSameuserproof(t *testing.T) { + dir := t.TempDir() + const ( + wantToken = "token" + wantPort = 123 + ) + + // Mimics cmd/tailscale falling back to sameuserproof + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return false }) + tstest.Replace(t, &ssd.sharedDir, dir) + tstest.Replace(t, &ssd.checkConn, false) + + // Behave as macSysExt when initializing sameuserproof + tstest.Replace(t, &ssd.isMacSysExt, func() bool { return true }) + if err := initSameUserProofToken(dir, wantPort, wantToken); err != nil { + t.Fatalf("initSameUserProofToken: %v", err) + } + + gotPort, gotToken, err := LocalTCPPortAndToken() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if gotPort != wantPort { + t.Errorf("port: got %d, want %d", gotPort, wantPort) } if gotToken != wantToken { - t.Errorf("got token %s, want %s", gotToken, wantToken) + t.Errorf("token: got %s, want %s", gotToken, wantToken) } } @@ -38,7 +77,7 @@ func TestSetCredentials(t *testing.T) { // returns a listener and a non-zero port and non-empty token. func TestInitListenerDarwin(t *testing.T) { temp := t.TempDir() - tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return false }) ln, err := InitListenerDarwin(temp) if err != nil || ln == nil { @@ -52,15 +91,14 @@ func TestInitListenerDarwin(t *testing.T) { } if port == 0 { - t.Errorf("expected non-zero port, got %d", port) + t.Errorf("port: got %d, want non-zero", port) } if token == "" { - t.Errorf("expected non-empty token, got empty string") + t.Errorf("token: got %s, want non-empty", token) } } -// TestTokenGeneration verifies token generation behavior func TestTokenGeneration(t *testing.T) { token, err := getToken() if err != nil { @@ -70,7 +108,7 @@ func TestTokenGeneration(t *testing.T) { // Verify token length (hex string is 2x byte length) wantLen := sameUserProofTokenLength * 2 if got := len(token); got != wantLen { - t.Errorf("token length = %d, want %d", got, wantLen) + t.Errorf("token length: got %d, want %d", got, wantLen) } // Verify token persistence @@ -79,7 +117,7 @@ func TestTokenGeneration(t *testing.T) { t.Fatalf("subsequent getToken: %v", err) } if subsequentToken != token { - t.Errorf("subsequent token = %q, want %q", subsequentToken, token) + t.Errorf("subsequent token: got %q, want %q", subsequentToken, token) } } @@ -107,10 +145,10 @@ func TestMacsysSameuserproof(t *testing.T) { } if gotPort != wantPort { - t.Errorf("got port = %d, want %d", gotPort, wantPort) + t.Errorf("port: got %d, want %d", gotPort, wantPort) } if wantToken != gotToken { - t.Errorf("got token = %s, want %s", wantToken, gotToken) + t.Errorf("token: got %s, want %s", wantToken, gotToken) } assertFileCount(t, dir, 1, "sameuserproof-") } @@ -138,7 +176,7 @@ func assertFileCount(t *testing.T, dir string, want int, prefix string) { files, err := os.ReadDir(dir) if err != nil { - t.Fatalf("unexpected error: %v", err) + t.Fatalf("[unexpected] error: %v", err) } count := 0 for _, file := range files { @@ -147,6 +185,6 @@ func assertFileCount(t *testing.T, dir string, want int, prefix string) { } } if count != want { - t.Errorf("expected 1 file, got %d", count) + t.Errorf("files: got %d, want 1", count) } } diff --git a/version/prop.go b/version/prop.go index 6026d1179..9327e6fe6 100644 --- a/version/prop.go +++ b/version/prop.go @@ -62,26 +62,21 @@ func IsSandboxedMacOS() bool { // Tailscale for macOS, either the main GUI process (non-sandboxed) or the // system extension (sandboxed). func IsMacSys() bool { - return IsMacSysExt() || IsMacSysApp() + return IsMacSysExt() || IsMacSysGUI() } var isMacSysApp lazy.SyncValue[bool] -// IsMacSysApp reports whether this process is the main, non-sandboxed GUI process +// IsMacSysGUI reports whether this process is the main, non-sandboxed GUI process // that ships with the Standalone variant of Tailscale for macOS. -func IsMacSysApp() bool { +func IsMacSysGUI() bool { if runtime.GOOS != "darwin" { return false } return isMacSysApp.Get(func() bool { - exe, err := os.Executable() - if err != nil { - return false - } - // Check that this is the GUI binary, and it is not sandboxed. The GUI binary - // shipped in the App Store will always have the App Sandbox enabled. - return strings.HasSuffix(exe, "/Contents/MacOS/Tailscale") && !IsMacAppStore() + return strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macsys/") || + strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macsys") }) } @@ -95,10 +90,6 @@ func IsMacSysExt() bool { return false } return isMacSysExt.Get(func() bool { - if strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macsys/") || - strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macsys") { - return true - } exe, err := os.Executable() if err != nil { return false @@ -109,8 +100,8 @@ func IsMacSysExt() bool { var isMacAppStore lazy.SyncValue[bool] -// IsMacAppStore whether this binary is from the App Store version of Tailscale -// for macOS. +// IsMacAppStore returns whether this binary is from the App Store version of Tailscale +// for macOS. Returns true for both the network extension and the GUI app. func IsMacAppStore() bool { if runtime.GOOS != "darwin" { return false @@ -124,6 +115,25 @@ func IsMacAppStore() bool { }) } +var isMacAppStoreGUI lazy.SyncValue[bool] + +// IsMacAppStoreGUI reports whether this binary is the GUI app from the App Store +// version of Tailscale for macOS. +func IsMacAppStoreGUI() bool { + if runtime.GOOS != "darwin" { + return false + } + return isMacAppStoreGUI.Get(func() bool { + exe, err := os.Executable() + if err != nil { + return false + } + // Check that this is the GUI binary, and it is not sandboxed. The GUI binary + // shipped in the App Store will always have the App Sandbox enabled. + return strings.Contains(exe, "/Tailscale") && !IsMacSysGUI() + }) +} + var isAppleTV lazy.SyncValue[bool] // IsAppleTV reports whether this binary is part of the Tailscale network extension for tvOS. From ce0d8b0fb9897d2481e08287e0a4de2afccb44ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 11:25:35 -0600 Subject: [PATCH 0587/1708] .github: Bump github/codeql-action from 3.28.10 to 3.28.11 (#15258) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.10 to 3.28.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d...6bb031afdd8eb862ea3fc1848194185e076637e5) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 318bc6698..f20719360 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 + uses: github/codeql-action/init@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 + uses: github/codeql-action/autobuild@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 + uses: github/codeql-action/analyze@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 From 03f7f1860ed4f39707688ade3e61d59ba3693d2d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 11:31:52 -0600 Subject: [PATCH 0588/1708] .github: Bump peter-evans/create-pull-request from 7.0.7 to 7.0.8 (#15257) Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 7.0.7 to 7.0.8. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/dd2324fc52d5d43c699a5636bcf19fceaa70c284...271a8d0340265f705b14b6d32b9829c1cb33d45e) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 84b10e254..f695c578e 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -36,7 +36,7 @@ jobs: private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 18d7ffdd9..412836db7 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -35,7 +35,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 8f0080c7a48ccf482eeebe7d5c4a9d80da1dba02 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 11 Mar 2025 13:10:22 -0700 Subject: [PATCH 0589/1708] cmd/tsidp: allow CORS requests to openid-configuration (#15229) Add support for Cross-Origin XHR requests to the openid-configuration endpoint to enable clients like Grafana's auto-population of OIDC setup data from its contents. Updates https://github.com/tailscale/tailscale/issues/10263 Signed-off-by: Patrick O'Doherty --- cmd/tsidp/tsidp.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 96fac58fd..95ab2b2eb 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -765,6 +765,18 @@ var ( ) func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { + h := w.Header() + h.Set("Access-Control-Allow-Origin", "*") + h.Set("Access-Control-Allow-Method", "GET, OPTIONS") + // allow all to prevent errors from client sending their own bespoke headers + // and having the server reject the request. + h.Set("Access-Control-Allow-Headers", "*") + + // early return for pre-flight OPTIONS requests. + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } if r.URL.Path != oidcConfigPath { http.Error(w, "tsidp: not found", http.StatusNotFound) return From 5ebc135397acbc2a217986b95f693e6a2c211fd8 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 5 Mar 2025 10:25:30 -0800 Subject: [PATCH 0590/1708] tsnet,wgengine: fix src to primary Tailscale IP for TCP dials Ensure that the src address for a connection is one of the primary addresses assigned by Tailscale. Not, for example, a virtual IP address. Updates #14667 Signed-off-by: Fran Bull --- cmd/k8s-operator/depaware.txt | 1 + tsnet/tsnet.go | 9 ++++++-- wgengine/netstack/netstack.go | 43 +++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 54d9bd248..0a787a780 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -904,6 +904,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tstime/rate from tailscale.com/derp+ tailscale.com/tsweb/varz from tailscale.com/util/usermetric tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 680825708..15cf39cba 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -49,6 +49,7 @@ import ( "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tsd" + "tailscale.com/types/bools" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/nettype" @@ -601,7 +602,9 @@ func (s *Server) start() (reterr error) { // Note: don't just return ns.DialContextTCP or we'll return // *gonet.TCPConn(nil) instead of a nil interface which trips up // callers. - tcpConn, err := ns.DialContextTCP(ctx, dst) + v4, v6 := s.TailscaleIPs() + src := bools.IfElse(dst.Addr().Is6(), v6, v4) + tcpConn, err := ns.DialContextTCPWithBind(ctx, src, dst) if err != nil { return nil, err } @@ -611,7 +614,9 @@ func (s *Server) start() (reterr error) { // Note: don't just return ns.DialContextUDP or we'll return // *gonet.UDPConn(nil) instead of a nil interface which trips up // callers. - udpConn, err := ns.DialContextUDP(ctx, dst) + v4, v6 := s.TailscaleIPs() + src := bools.IfElse(dst.Addr().Is6(), v6, v4) + udpConn, err := ns.DialContextUDPWithBind(ctx, src, dst) if err != nil { return nil, err } diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 0bbd20b79..591bedde4 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -843,6 +843,27 @@ func (ns *Impl) DialContextTCP(ctx context.Context, ipp netip.AddrPort) (*gonet. return gonet.DialContextTCP(ctx, ns.ipstack, remoteAddress, ipType) } +// DialContextTCPWithBind creates a new gonet.TCPConn connected to the specified +// remoteAddress with its local address bound to localAddr on an available port. +func (ns *Impl) DialContextTCPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.TCPConn, error) { + remoteAddress := tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()), + Port: remoteAddr.Port(), + } + localAddress := tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(localAddr.AsSlice()), + } + var ipType tcpip.NetworkProtocolNumber + if remoteAddr.Addr().Is4() { + ipType = ipv4.ProtocolNumber + } else { + ipType = ipv6.ProtocolNumber + } + return gonet.DialTCPWithBind(ctx, ns.ipstack, localAddress, remoteAddress, ipType) +} + func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.UDPConn, error) { remoteAddress := &tcpip.FullAddress{ NIC: nicID, @@ -859,6 +880,28 @@ func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet. return gonet.DialUDP(ns.ipstack, nil, remoteAddress, ipType) } +// DialContextUDPWithBind creates a new gonet.UDPConn. Connected to remoteAddr. +// With its local address bound to localAddr on an available port. +func (ns *Impl) DialContextUDPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.UDPConn, error) { + remoteAddress := &tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()), + Port: remoteAddr.Port(), + } + localAddress := &tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(localAddr.AsSlice()), + } + var ipType tcpip.NetworkProtocolNumber + if remoteAddr.Addr().Is4() { + ipType = ipv4.ProtocolNumber + } else { + ipType = ipv6.ProtocolNumber + } + + return gonet.DialUDP(ns.ipstack, localAddress, remoteAddress, ipType) +} + // getInjectInboundBuffsSizes returns packet memory and a sizes slice for usage // when calling tstun.Wrapper.InjectInboundPacketBuffer(). These are sized with // consideration for MTU and GSO support on ns.linkEP. They should be recycled From 06ae52d309843429df69bc903c318c448abc44d8 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Tue, 11 Mar 2025 17:23:21 -0400 Subject: [PATCH 0591/1708] words: append to the tail of the wordlists (#15278) Updates tailscale/corp#14698 Signed-off-by: Naman Sood --- words/scales.txt | 41 +++++++++++++++++++++++++++++++++++++++++ words/tails.txt | 26 ++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/words/scales.txt b/words/scales.txt index 2fe849bb9..fb19cb88d 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -399,3 +399,44 @@ rankine piano ruler scoville +oratrice +teeth +cliff +degree +company +economy +court +justitia +themis +carat +carob +karat +barley +corn +penny +pound +mark +pence +mine +stairs +escalator +elevator +skilift +gondola +firefighter +newton +smoot +city +truck +everest +wall +fence +fort +trench +matrix +census +likert +sidemirror +wage +salary +fujita diff --git a/words/tails.txt b/words/tails.txt index 497533241..5b93bdd96 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -694,3 +694,29 @@ ussuri kitty tanuki neko +wind +airplane +time +gumiho +eel +moray +twin +hair +braid +gate +end +queue +miku +at +fin +solarflare +asymptote +reverse +bone +stern +quaver +note +mining +coat +follow +stalk From 52710945f524d96138c45d860139a544c39ee9d8 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 12 Mar 2025 10:37:03 -0400 Subject: [PATCH 0592/1708] control/controlclient, ipn: add client audit logging (#14950) updates tailscale/corp#26435 Adds client support for sending audit logs to control via /machine/audit-log. Specifically implements audit logging for user initiated disconnections. This will require further work to optimize the peristant storage and exclusion via build tags for mobile: tailscale/corp#27011 tailscale/corp#27012 Signed-off-by: Jonathan Nobels --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + control/controlclient/auto.go | 7 + control/controlclient/controlclient_test.go | 41 ++ control/controlclient/direct.go | 50 +- control/controlclient/errors.go | 51 +++ ipn/auditlog/auditlog.go | 466 +++++++++++++++++++ ipn/auditlog/auditlog_test.go | 481 ++++++++++++++++++++ ipn/ipnauth/actor.go | 7 +- ipn/ipnauth/policy.go | 10 +- ipn/ipnlocal/local.go | 68 ++- tailcfg/tailcfg.go | 30 ++ tsd/tsd.go | 4 + 13 files changed, 1204 insertions(+), 13 deletions(-) create mode 100644 control/controlclient/errors.go create mode 100644 ipn/auditlog/auditlog.go create mode 100644 ipn/auditlog/auditlog_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 0a787a780..1c27fddea 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -814,6 +814,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/auditlog from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/desktop from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c0f592ea1..026758a47 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -271,6 +271,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/auditlog from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/desktop from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index da123f8c4..e0168c19d 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -119,6 +119,7 @@ type Auto struct { updateCh chan struct{} // readable when we should inform the server of a change observer Observer // called to update Client status; always non-nil observerQueue execqueue.ExecQueue + shutdownFn func() // to be called prior to shutdown or nil unregisterHealthWatch func() @@ -189,6 +190,7 @@ func NewNoStart(opts Options) (_ *Auto, err error) { mapDone: make(chan struct{}), updateDone: make(chan struct{}), observer: opts.Observer, + shutdownFn: opts.Shutdown, } c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) @@ -755,6 +757,7 @@ func (c *Auto) Shutdown() { return } c.logf("client.Shutdown ...") + shutdownFn := c.shutdownFn direct := c.direct c.closed = true @@ -767,6 +770,10 @@ func (c *Auto) Shutdown() { c.unpauseWaiters = nil c.mu.Unlock() + if shutdownFn != nil { + shutdownFn() + } + c.unregisterHealthWatch() <-c.authDone <-c.mapDone diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 6885b5851..f8882a4e7 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -4,6 +4,8 @@ package controlclient import ( + "errors" + "fmt" "io" "reflect" "slices" @@ -147,3 +149,42 @@ func TestCanSkipStatus(t *testing.T) { t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want) } } + +func TestRetryableErrors(t *testing.T) { + errorTests := []struct { + err error + want bool + }{ + {errNoNoiseClient, true}, + {errNoNodeKey, true}, + {fmt.Errorf("%w: %w", errNoNoiseClient, errors.New("no noise")), true}, + {fmt.Errorf("%w: %w", errHTTPPostFailure, errors.New("bad post")), true}, + {fmt.Errorf("%w: %w", errNoNodeKey, errors.New("not node key")), true}, + {errBadHTTPResponse(429, "too may requests"), true}, + {errBadHTTPResponse(500, "internal server eror"), true}, + {errBadHTTPResponse(502, "bad gateway"), true}, + {errBadHTTPResponse(503, "service unavailable"), true}, + {errBadHTTPResponse(504, "gateway timeout"), true}, + {errBadHTTPResponse(1234, "random error"), false}, + } + + for _, tt := range errorTests { + t.Run(tt.err.Error(), func(t *testing.T) { + if isRetryableErrorForTest(tt.err) != tt.want { + t.Fatalf("retriable: got %v, want %v", tt.err, tt.want) + } + }) + } +} + +type retryableForTest interface { + Retryable() bool +} + +func isRetryableErrorForTest(err error) bool { + var ae retryableForTest + if errors.As(err, &ae) { + return ae.Retryable() + } + return false +} diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index e7d1d25f8..68ab9ca17 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -156,6 +156,11 @@ type Options struct { // If we receive a new DialPlan from the server, this value will be // updated. DialPlan ControlDialPlanner + + // Shutdown is an optional function that will be called before client shutdown is + // attempted. It is used to allow the client to clean up any resources or complete any + // tasks that are dependent on a live client. + Shutdown func() } // ControlDialPlanner is the interface optionally supplied when creating a @@ -1662,11 +1667,11 @@ func (c *Auto) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) err func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) error { nc, err := c.getNoiseClient() if err != nil { - return err + return fmt.Errorf("%w: %w", errNoNoiseClient, err) } nodeKey, ok := c.GetPersist().PublicNodeKeyOK() if !ok { - return errors.New("no node key") + return errNoNodeKey } if c.panicOnUse { panic("tainted client") @@ -1697,6 +1702,47 @@ func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) e return nil } +// SendAuditLog implements [auditlog.Transport] by sending an audit log synchronously to the control plane. +// +// See docs on [tailcfg.AuditLogRequest] and [auditlog.Logger] for background. +func (c *Auto) SendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequest) (err error) { + return c.direct.sendAuditLog(ctx, auditLog) +} + +func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequest) (err error) { + nc, err := c.getNoiseClient() + if err != nil { + return fmt.Errorf("%w: %w", errNoNoiseClient, err) + } + + nodeKey, ok := c.GetPersist().PublicNodeKeyOK() + if !ok { + return errNoNodeKey + } + + req := &tailcfg.AuditLogRequest{ + Version: tailcfg.CurrentCapabilityVersion, + NodeKey: nodeKey, + Action: auditLog.Action, + Details: auditLog.Details, + } + + if c.panicOnUse { + panic("tainted client") + } + + res, err := nc.post(ctx, "/machine/audit-log", nodeKey, req) + if err != nil { + return fmt.Errorf("%w: %w", errHTTPPostFailure, err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + all, _ := io.ReadAll(res.Body) + return errBadHTTPResponse(res.StatusCode, string(all)) + } + return nil +} + func addLBHeader(req *http.Request, nodeKey key.NodePublic) { if !nodeKey.IsZero() { req.Header.Add(tailcfg.LBHeader, nodeKey.String()) diff --git a/control/controlclient/errors.go b/control/controlclient/errors.go new file mode 100644 index 000000000..9b4dab844 --- /dev/null +++ b/control/controlclient/errors.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package controlclient + +import ( + "errors" + "fmt" + "net/http" +) + +// apiResponseError is an error type that can be returned by controlclient +// api requests. +// +// It wraps an underlying error and a flag for clients to query if the +// error is retryable via the Retryable() method. +type apiResponseError struct { + err error + retryable bool +} + +// Error implements [error]. +func (e *apiResponseError) Error() string { + return e.err.Error() +} + +// Retryable reports whether the error is retryable. +func (e *apiResponseError) Retryable() bool { + return e.retryable +} + +func (e *apiResponseError) Unwrap() error { return e.err } + +var ( + errNoNodeKey = &apiResponseError{errors.New("no node key"), true} + errNoNoiseClient = &apiResponseError{errors.New("no noise client"), true} + errHTTPPostFailure = &apiResponseError{errors.New("http failure"), true} +) + +func errBadHTTPResponse(code int, msg string) error { + retryable := false + switch code { + case http.StatusTooManyRequests, + http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + retryable = true + } + return &apiResponseError{fmt.Errorf("http error %d: %s", code, msg), retryable} +} diff --git a/ipn/auditlog/auditlog.go b/ipn/auditlog/auditlog.go new file mode 100644 index 000000000..30f39211f --- /dev/null +++ b/ipn/auditlog/auditlog.go @@ -0,0 +1,466 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package auditlog provides a mechanism for logging audit events. +package auditlog + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sort" + "sync" + "time" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/rands" + "tailscale.com/util/set" +) + +// transaction represents an audit log that has not yet been sent to the control plane. +type transaction struct { + // EventID is the unique identifier for the event being logged. + // This is used on the client side only and is not sent to control. + EventID string `json:",omitempty"` + // Retries is the number of times the logger has attempted to send this log. + // This is used on the client side only and is not sent to control. + Retries int `json:",omitempty"` + + // Action is the action to be logged. It must correspond to a known action in the control plane. + Action tailcfg.ClientAuditAction `json:",omitempty"` + // Details is an opaque string specific to the action being logged. Empty strings may not + // be valid depending on the action being logged. + Details string `json:",omitempty"` + // TimeStamp is the time at which the audit log was generated on the node. + TimeStamp time.Time `json:",omitzero"` +} + +// Transport provides a means for a client to send audit logs to a consumer (typically the control plane). +type Transport interface { + // SendAuditLog sends an audit log to a consumer of audit logs. + // Errors should be checked with [IsRetryableError] for retryability. + SendAuditLog(context.Context, tailcfg.AuditLogRequest) error +} + +// LogStore provides a means for a [Logger] to persist logs to disk or memory. +type LogStore interface { + // Save saves the given data to a persistent store. Save will overwrite existing data + // for the given key. + save(key ipn.ProfileID, txns []*transaction) error + + // Load retrieves the data from a persistent store. Returns a nil slice and + // no error if no data exists for the given key. + load(key ipn.ProfileID) ([]*transaction, error) +} + +// Opts contains the configuration options for a [Logger]. +type Opts struct { + // RetryLimit is the maximum number of attempts the logger will make to send a log before giving up. + RetryLimit int + // Store is the persistent store used to save logs to disk. Must be non-nil. + Store LogStore + // Logf is the logger used to log messages from the audit logger. Must be non-nil. + Logf logger.Logf +} + +// IsRetryableError returns true if the given error is retryable +// See [controlclient.apiResponseError]. Potentially retryable errors implement the Retryable() method. +func IsRetryableError(err error) bool { + var retryable interface{ Retryable() bool } + return errors.As(err, &retryable) && retryable.Retryable() +} + +type backoffOpts struct { + min, max time.Duration + multiplier float64 +} + +// .5, 1, 2, 4, 8, 10, 10, 10, 10, 10... +var defaultBackoffOpts = backoffOpts{ + min: time.Millisecond * 500, + max: 10 * time.Second, + multiplier: 2, +} + +// Logger provides a queue-based mechanism for submitting audit logs to the control plane - or +// another suitable consumer. Logs are stored to disk and retried until they are successfully sent, +// or until they permanently fail. +// +// Each individual profile/controlclient tuple should construct and manage a unique [Logger] instance. +type Logger struct { + logf logger.Logf + retryLimit int // the maximum number of attempts to send a log before giving up. + flusher chan struct{} // channel used to signal a flush operation. + done chan struct{} // closed when the flush worker exits. + ctx context.Context // canceled when the logger is stopped. + ctxCancel context.CancelFunc // cancels ctx. + backoffOpts // backoff settings for retry operations. + + // mu protects the fields below. + mu sync.Mutex + store LogStore // persistent storage for unsent logs. + profileID ipn.ProfileID // empty if [Logger.SetProfileID] has not been called. + transport Transport // nil until [Logger.Start] is called. +} + +// NewLogger creates a new [Logger] with the given options. +func NewLogger(opts Opts) *Logger { + ctx, cancel := context.WithCancel(context.Background()) + + al := &Logger{ + retryLimit: opts.RetryLimit, + logf: logger.WithPrefix(opts.Logf, "auditlog: "), + store: opts.Store, + flusher: make(chan struct{}, 1), + done: make(chan struct{}), + ctx: ctx, + ctxCancel: cancel, + backoffOpts: defaultBackoffOpts, + } + al.logf("created") + return al +} + +// FlushAndStop synchronously flushes all pending logs and stops the audit logger. +// This will block until a final flush operation completes or context is done. +// If the logger is already stopped, this will return immediately. All unsent +// logs will be persisted to the store. +func (al *Logger) FlushAndStop(ctx context.Context) { + al.stop() + al.flush(ctx) +} + +// SetProfileID sets the profileID for the logger. This must be called before any logs can be enqueued. +// The profileID of a logger cannot be changed once set. +func (al *Logger) SetProfileID(profileID ipn.ProfileID) error { + al.mu.Lock() + defer al.mu.Unlock() + if al.profileID != "" { + return errors.New("profileID already set") + } + + al.profileID = profileID + return nil +} + +// Start starts the audit logger with the given transport. +// It returns an error if the logger is already started. +func (al *Logger) Start(t Transport) error { + al.mu.Lock() + defer al.mu.Unlock() + + if al.transport != nil { + return errors.New("already started") + } + + al.transport = t + pending, err := al.storedCountLocked() + if err != nil { + al.logf("[unexpected] failed to restore logs: %v", err) + } + go al.flushWorker() + if pending > 0 { + al.flushAsync() + } + return nil +} + +// ErrAuditLogStorageFailure is returned when the logger fails to persist logs to the store. +var ErrAuditLogStorageFailure = errors.New("audit log storage failure") + +// Enqueue queues an audit log to be sent to the control plane (or another suitable consumer/transport). +// This will return an error if the underlying store fails to save the log or we fail to generate a unique +// eventID for the log. +func (al *Logger) Enqueue(action tailcfg.ClientAuditAction, details string) error { + txn := &transaction{ + Action: action, + Details: details, + TimeStamp: time.Now(), + } + // Generate a suitably random eventID for the transaction. + txn.EventID = fmt.Sprint(txn.TimeStamp, rands.HexString(16)) + return al.enqueue(txn) +} + +// flushAsync requests an asynchronous flush. +// It is a no-op if a flush is already pending. +func (al *Logger) flushAsync() { + select { + case al.flusher <- struct{}{}: + default: + } +} + +func (al *Logger) flushWorker() { + defer close(al.done) + + var retryDelay time.Duration + retry := time.NewTimer(0) + retry.Stop() + + for { + select { + case <-al.ctx.Done(): + return + case <-al.flusher: + err := al.flush(al.ctx) + switch { + case errors.Is(err, context.Canceled): + // The logger was stopped, no need to retry. + return + case err != nil: + retryDelay = max(al.backoffOpts.min, min(retryDelay*time.Duration(al.backoffOpts.multiplier), al.backoffOpts.max)) + al.logf("retrying after %v, %v", retryDelay, err) + retry.Reset(retryDelay) + default: + retryDelay = 0 + retry.Stop() + } + case <-retry.C: + al.flushAsync() + } + } +} + +// flush attempts to send all pending logs to the control plane. +// l.mu must not be held. +func (al *Logger) flush(ctx context.Context) error { + al.mu.Lock() + pending, err := al.store.load(al.profileID) + t := al.transport + al.mu.Unlock() + + if err != nil { + // This will catch nil profileIDs + return fmt.Errorf("failed to restore pending logs: %w", err) + } + if len(pending) == 0 { + return nil + } + if t == nil { + return errors.New("no transport") + } + + complete, unsent := al.sendToTransport(ctx, pending, t) + al.markTransactionsDone(complete) + + al.mu.Lock() + defer al.mu.Unlock() + if err = al.appendToStoreLocked(unsent); err != nil { + al.logf("[unexpected] failed to persist logs: %v", err) + } + + if len(unsent) != 0 { + return fmt.Errorf("failed to send %d logs", len(unsent)) + } + + if len(complete) != 0 { + al.logf("complete %d audit log transactions", len(complete)) + } + return nil +} + +// sendToTransport sends all pending logs to the control plane. Returns a pair of slices +// containing the logs that were successfully sent (or failed permanently) and those that were not. +// +// This may require multiple round trips to the control plane and can be a long running transaction. +func (al *Logger) sendToTransport(ctx context.Context, pending []*transaction, t Transport) (complete []*transaction, unsent []*transaction) { + for i, txn := range pending { + req := tailcfg.AuditLogRequest{ + Action: tailcfg.ClientAuditAction(txn.Action), + Details: txn.Details, + Timestamp: txn.TimeStamp, + } + + if err := t.SendAuditLog(ctx, req); err != nil { + switch { + case errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded): + // The contex is done. All further attempts will fail. + unsent = append(unsent, pending[i:]...) + return complete, unsent + case IsRetryableError(err) && txn.Retries+1 < al.retryLimit: + // We permit a maximum number of retries for each log. All retriable + // errors should be transient and we should be able to send the log eventually, but + // we don't want logs to be persisted indefinitely. + txn.Retries++ + unsent = append(unsent, txn) + default: + complete = append(complete, txn) + al.logf("failed permanently: %v", err) + } + } else { + // No error - we're done. + complete = append(complete, txn) + } + } + + return complete, unsent +} + +func (al *Logger) stop() { + al.mu.Lock() + t := al.transport + al.mu.Unlock() + + if t == nil { + // No transport means no worker goroutine and done will not be + // closed if we cancel the context. + return + } + + al.ctxCancel() + <-al.done + al.logf("stopped for profileID: %v", al.profileID) +} + +// appendToStoreLocked persists logs to the store. This will deduplicate +// logs so it is safe to call this with the same logs multiple time, to +// requeue failed transactions for example. +// +// l.mu must be held. +func (al *Logger) appendToStoreLocked(txns []*transaction) error { + if len(txns) == 0 { + return nil + } + + if al.profileID == "" { + return errors.New("no logId set") + } + + persisted, err := al.store.load(al.profileID) + if err != nil { + al.logf("[unexpected] append failed to restore logs: %v", err) + } + + // The order is important here. We want the latest transactions first, which will + // ensure when we dedup, the new transactions are seen and the older transactions + // are discarded. + txnsOut := append(txns, persisted...) + txnsOut = deduplicateAndSort(txnsOut) + + return al.store.save(al.profileID, txnsOut) +} + +// storedCountLocked returns the number of logs persisted to the store. +// al.mu must be held. +func (al *Logger) storedCountLocked() (int, error) { + persisted, err := al.store.load(al.profileID) + return len(persisted), err +} + +// markTransactionsDone removes logs from the store that are complete (sent or failed permanently). +// al.mu must not be held. +func (al *Logger) markTransactionsDone(sent []*transaction) { + al.mu.Lock() + defer al.mu.Unlock() + + ids := set.Set[string]{} + for _, txn := range sent { + ids.Add(txn.EventID) + } + + persisted, err := al.store.load(al.profileID) + if err != nil { + al.logf("[unexpected] markTransactionsDone failed to restore logs: %v", err) + } + var unsent []*transaction + for _, txn := range persisted { + if !ids.Contains(txn.EventID) { + unsent = append(unsent, txn) + } + } + al.store.save(al.profileID, unsent) +} + +// deduplicateAndSort removes duplicate logs from the given slice and sorts them by timestamp. +// The first log entry in the slice will be retained, subsequent logs with the same EventID will be discarded. +func deduplicateAndSort(txns []*transaction) []*transaction { + seen := set.Set[string]{} + deduped := make([]*transaction, 0, len(txns)) + for _, txn := range txns { + if !seen.Contains(txn.EventID) { + deduped = append(deduped, txn) + seen.Add(txn.EventID) + } + } + // Sort logs by timestamp - oldest to newest. This will put the oldest logs at + // the front of the queue. + sort.Slice(deduped, func(i, j int) bool { + return deduped[i].TimeStamp.Before(deduped[j].TimeStamp) + }) + return deduped +} + +func (al *Logger) enqueue(txn *transaction) error { + al.mu.Lock() + defer al.mu.Unlock() + + if err := al.appendToStoreLocked([]*transaction{txn}); err != nil { + return fmt.Errorf("%w: %w", ErrAuditLogStorageFailure, err) + } + + // If a.transport is nil if the logger is stopped. + if al.transport != nil { + al.flushAsync() + } + + return nil +} + +var _ LogStore = (*logStateStore)(nil) + +// logStateStore is a concrete implementation of [LogStore] +// using [ipn.StateStore] as the underlying storage. +type logStateStore struct { + store ipn.StateStore +} + +// NewLogStore creates a new LogStateStore with the given [ipn.StateStore]. +func NewLogStore(store ipn.StateStore) LogStore { + return &logStateStore{ + store: store, + } +} + +func (s *logStateStore) generateKey(key ipn.ProfileID) string { + return "auditlog-" + string(key) +} + +// Save saves the given logs to an [ipn.StateStore]. This overwrites +// any existing entries for the given key. +func (s *logStateStore) save(key ipn.ProfileID, txns []*transaction) error { + if key == "" { + return errors.New("empty key") + } + + data, err := json.Marshal(txns) + if err != nil { + return err + } + k := ipn.StateKey(s.generateKey(key)) + return s.store.WriteState(k, data) +} + +// Load retrieves the logs from an [ipn.StateStore]. +func (s *logStateStore) load(key ipn.ProfileID) ([]*transaction, error) { + if key == "" { + return nil, errors.New("empty key") + } + + k := ipn.StateKey(s.generateKey(key)) + data, err := s.store.ReadState(k) + + switch { + case errors.Is(err, ipn.ErrStateNotExist): + return nil, nil + case err != nil: + return nil, err + } + + var txns []*transaction + err = json.Unmarshal(data, &txns) + return txns, err +} diff --git a/ipn/auditlog/auditlog_test.go b/ipn/auditlog/auditlog_test.go new file mode 100644 index 000000000..3d3bf95cb --- /dev/null +++ b/ipn/auditlog/auditlog_test.go @@ -0,0 +1,481 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package auditlog + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + qt "github.com/frankban/quicktest" + "tailscale.com/ipn/store/mem" + "tailscale.com/tailcfg" + "tailscale.com/tstest" +) + +// loggerForTest creates an auditLogger for you and cleans it up +// (and ensures no goroutines are leaked) when the test is done. +func loggerForTest(t *testing.T, opts Opts) *Logger { + t.Helper() + tstest.ResourceCheck(t) + + if opts.Logf == nil { + opts.Logf = t.Logf + } + + if opts.Store == nil { + t.Fatalf("opts.Store must be set") + } + + a := NewLogger(opts) + + t.Cleanup(func() { + a.FlushAndStop(context.Background()) + }) + return a +} + +func TestNonRetryableErrors(t *testing.T) { + errorTests := []struct { + desc string + err error + want bool + }{ + {"DeadlineExceeded", context.DeadlineExceeded, false}, + {"Canceled", context.Canceled, false}, + {"Canceled wrapped", fmt.Errorf("%w: %w", context.Canceled, errors.New("ctx cancelled")), false}, + {"Random error", errors.New("random error"), false}, + } + + for _, tt := range errorTests { + t.Run(tt.desc, func(t *testing.T) { + if IsRetryableError(tt.err) != tt.want { + t.Fatalf("retriable: got %v, want %v", !tt.want, tt.want) + } + }) + } +} + +// TestEnqueueAndFlush enqueues n logs and flushes them. +// We expect all logs to be flushed and for no +// logs to remain in the store once FlushAndStop returns. +func TestEnqueueAndFlush(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(nil) + al := loggerForTest(t, Opts{ + RetryLimit: 200, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + wantSent := 10 + + for i := range wantSent { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log %d", i)) + c.Assert(err, qt.IsNil) + } + + al.FlushAndStop(context.Background()) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent := mockTransport.sentCount(); gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueAndFlushWithFlushCancel calls FlushAndCancel with a cancelled +// context. We expect nothing to be sent and all logs to be stored. +func TestEnqueueAndFlushWithFlushCancel(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&retriableError) + al := loggerForTest(t, Opts{ + RetryLimit: 200, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + for i := range 10 { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log %d", i)) + c.Assert(err, qt.IsNil) + } + + // Cancel the context before calling FlushAndStop - nothing should get sent. + // This mimics a timeout before flush() has a chance to execute. + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + al.FlushAndStop(ctx) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 10; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 0; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestDeduplicateAndSort tests that the most recent log is kept when deduplicating logs +func TestDeduplicateAndSort(t *testing.T) { + c := qt.New(t) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + + logs := []*transaction{ + {EventID: "1", Details: "log 1", TimeStamp: time.Now().Add(-time.Minute * 1), Retries: 1}, + } + + al.mu.Lock() + defer al.mu.Unlock() + al.appendToStoreLocked(logs) + + // Update the transaction and re-append it + logs[0].Retries = 2 + al.appendToStoreLocked(logs) + + fromStore, err := al.store.load("test") + c.Assert(err, qt.IsNil) + + // We should see only one transaction + if wantStored, gotStored := len(logs), len(fromStore); gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + // We should see the latest transaction + if wantRetryCount, gotRetryCount := 2, fromStore[0].Retries; gotRetryCount != wantRetryCount { + t.Fatalf("reties: got %d, want %d", gotRetryCount, wantRetryCount) + } +} + +func TestChangeProfileId(t *testing.T) { + c := qt.New(t) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + c.Assert(al.SetProfileID("test"), qt.IsNil) + + // Changing a profile ID must fail + c.Assert(al.SetProfileID("test"), qt.IsNotNil) +} + +// TestSendOnRestore pushes a n logs to the persistent store, and ensures they +// are sent as soon as Start is called then checks to ensure the sent logs no +// longer exist in the store. +func TestSendOnRestore(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(nil) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + al.SetProfileID("test") + + wantTotal := 10 + + for range 10 { + al.Enqueue(tailcfg.AuditNodeDisconnect, "log") + } + + c.Assert(al.Start(mockTransport), qt.IsNil) + + al.FlushAndStop(context.Background()) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), wantTotal; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestFailureExhaustion enqueues n logs, with the transport in a failable state. +// We then set it to a non-failing state, call FlushAndStop and expect all logs to be sent. +func TestFailureExhaustion(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&retriableError) + + al := loggerForTest(t, Opts{ + RetryLimit: 1, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + for range 10 { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, "log") + c.Assert(err, qt.IsNil) + } + + al.FlushAndStop(context.Background()) + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 0; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueAndFailNoRetry enqueues a set of logs, all of which will fail and are not +// retriable. We then call FlushAndStop and expect all to be unsent. +func TestEnqueueAndFailNoRetry(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&nonRetriableError) + + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + for i := range 10 { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log %d", i)) + c.Assert(err, qt.IsNil) + } + + al.FlushAndStop(context.Background()) + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 0; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueAndRetry enqueues a set of logs, all of which will fail and are retriable. +// Mid-test, we set the transport to not-fail and expect the queue to flush properly +// We set the backoff parameters to 0 seconds so retries are immediate. +func TestEnqueueAndRetry(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&retriableError) + + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + al.backoffOpts = backoffOpts{ + min: 1 * time.Millisecond, + max: 4 * time.Millisecond, + multiplier: 2.0, + } + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log 1")) + c.Assert(err, qt.IsNil) + + // This will wait for at least 2 retries + gotRetried, wantRetried := mockTransport.waitForSendAttemptsToReach(3), true + if gotRetried != wantRetried { + t.Fatalf("retried: got %v, want %v", gotRetried, wantRetried) + } + + mockTransport.setErrorCondition(nil) + + al.FlushAndStop(context.Background()) + al.mu.Lock() + defer al.mu.Unlock() + + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 1; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueBeforeSetProfileID tests that logs enqueued before SetProfileId are not sent +func TestEnqueueBeforeSetProfileID(t *testing.T) { + c := qt.New(t) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + err := al.Enqueue(tailcfg.AuditNodeDisconnect, "log") + c.Assert(err, qt.IsNotNil) + al.FlushAndStop(context.Background()) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNotNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } +} + +// TestLogStoring tests that audit logs are persisted sorted by timestamp, oldest to newest +func TestLogSorting(t *testing.T) { + c := qt.New(t) + mockStore := NewLogStore(&mem.Store{}) + + logs := []*transaction{ + {EventID: "1", Details: "log 3", TimeStamp: time.Now().Add(-time.Minute * 1)}, + {EventID: "1", Details: "log 3", TimeStamp: time.Now().Add(-time.Minute * 2)}, + {EventID: "2", Details: "log 2", TimeStamp: time.Now().Add(-time.Minute * 3)}, + {EventID: "3", Details: "log 1", TimeStamp: time.Now().Add(-time.Minute * 4)}, + } + + wantLogs := []transaction{ + {Details: "log 1"}, + {Details: "log 2"}, + {Details: "log 3"}, + } + + mockStore.save("test", logs) + + gotLogs, err := mockStore.load("test") + c.Assert(err, qt.IsNil) + gotLogs = deduplicateAndSort(gotLogs) + + for i := range gotLogs { + if want, got := wantLogs[i].Details, gotLogs[i].Details; want != got { + t.Fatalf("Details: got %v, want %v", got, want) + } + } +} + +// mock implementations for testing + +// newMockTransport returns a mock transport for testing +// If err is no nil, SendAuditLog will return this error if the send is attempted +// before the context is cancelled. +func newMockTransport(err error) *mockAuditLogTransport { + return &mockAuditLogTransport{ + err: err, + attempts: make(chan int, 1), + } +} + +type mockAuditLogTransport struct { + attempts chan int // channel to notify of send attempts + + mu sync.Mutex + sendAttmpts int // number of attempts to send logs + sendCount int // number of logs sent by the transport + err error // error to return when sending logs +} + +// waitForSendAttemptsToReach blocks until the number of send attempts reaches n +// This should be use only in tests where the transport is expected to retry sending logs +func (t *mockAuditLogTransport) waitForSendAttemptsToReach(n int) bool { + for attempts := range t.attempts { + if attempts >= n { + return true + } + } + return false +} + +func (t *mockAuditLogTransport) setErrorCondition(err error) { + t.mu.Lock() + defer t.mu.Unlock() + t.err = err +} + +func (t *mockAuditLogTransport) sentCount() int { + t.mu.Lock() + defer t.mu.Unlock() + return t.sendCount +} + +func (t *mockAuditLogTransport) SendAuditLog(ctx context.Context, _ tailcfg.AuditLogRequest) (err error) { + t.mu.Lock() + t.sendAttmpts += 1 + defer func() { + a := t.sendAttmpts + t.mu.Unlock() + select { + case t.attempts <- a: + default: + } + }() + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if t.err != nil { + return t.err + } + t.sendCount += 1 + return nil +} + +var ( + retriableError = mockError{errors.New("retriable error")} + nonRetriableError = mockError{errors.New("permanent failure error")} +) + +type mockError struct { + error +} + +func (e mockError) Retryable() bool { + return e == retriableError +} diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 8a0e77645..108bdd341 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -10,12 +10,11 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/tailcfg" ) // AuditLogFunc is any function that can be used to log audit actions performed by an [Actor]. -// -// TODO(nickkhyl,barnstar): define a named string type for the action (in tailcfg?) and use it here. -type AuditLogFunc func(action, details string) +type AuditLogFunc func(action tailcfg.ClientAuditAction, details string) error // Actor is any actor using the [ipnlocal.LocalBackend]. // @@ -45,7 +44,7 @@ type Actor interface { // // If the auditLogger is non-nil, it is used to write details about the action // to the audit log when required by the policy. - CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess, auditLogger AuditLogFunc) error + CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess, auditLogFn AuditLogFunc) error // IsLocalSystem reports whether the actor is the Windows' Local System account. // diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index f09be0fcb..aa4ec4100 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -9,6 +9,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/tailcfg" "tailscale.com/util/syspolicy" ) @@ -48,7 +49,7 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. -func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditLogger AuditLogFunc) error { +func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { return nil } @@ -58,15 +59,16 @@ func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason str if reason == "" { return errors.New("disconnect not allowed: reason required") } - if auditLogger != nil { + if auditFn != nil { var details string if username, _ := actor.Username(); username != "" { // best-effort; we don't have it on all platforms details = fmt.Sprintf("%q is being disconnected by %q: %v", profile.Name(), username, reason) } else { details = fmt.Sprintf("%q is being disconnected: %v", profile.Name(), reason) } - // TODO(nickkhyl,barnstar): use a const for DISCONNECT_NODE. - auditLogger("DISCONNECT_NODE", details) + if err := auditFn(tailcfg.AuditNodeDisconnect, details); err != nil { + return err + } } return nil } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e9f263996..f866527d1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -57,10 +57,12 @@ import ( "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" + "tailscale.com/ipn/auditlog" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/policy" + memstore "tailscale.com/ipn/store/mem" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" "tailscale.com/net/captivedetection" @@ -450,6 +452,12 @@ type LocalBackend struct { // Each callback is called exactly once in unspecified order and without b.mu held. // Returned errors are logged but otherwise ignored and do not affect the shutdown process. shutdownCbs set.HandleSet[func() error] + + // auditLogger, if non-nil, manages audit logging for the backend. + // + // It queues, persists, and sends audit logs + // to the control client. auditLogger has the same lifespan as b.cc. + auditLogger *auditlog.Logger } // HealthTracker returns the health tracker for the backend. @@ -1679,6 +1687,15 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.logf("Failed to save new controlclient state: %v", err) } } + + // Update the audit logger with the current profile ID. + if b.auditLogger != nil && prefsChanged { + pid := b.pm.CurrentProfile().ID() + if err := b.auditLogger.SetProfileID(pid); err != nil { + b.logf("Failed to set profile ID in audit logger: %v", err) + } + } + // initTKALocked is dependent on CurrentProfile.ID, which is initialized // (for new profiles) on the first call to b.pm.SetPrefs. if err := b.initTKALocked(); err != nil { @@ -2386,6 +2403,27 @@ func (b *LocalBackend) Start(opts ipn.Options) error { debugFlags = append([]string{"netstack"}, debugFlags...) } + var auditLogShutdown func() + // Audit logging is only available if the client has set up a proper persistent + // store for the logs in sys. + store, ok := b.sys.AuditLogStore.GetOK() + if !ok { + b.logf("auditlog: [unexpected] no persistent audit log storage configured. using memory store.") + store = auditlog.NewLogStore(&memstore.Store{}) + } + + al := auditlog.NewLogger(auditlog.Opts{ + Logf: b.logf, + RetryLimit: 32, + Store: store, + }) + b.auditLogger = al + auditLogShutdown = func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + al.FlushAndStop(ctx) + } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, @@ -2411,6 +2449,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { C2NHandler: http.HandlerFunc(b.handleC2N), DialPlan: &b.dialPlan, // pointer because it can't be copied ControlKnobs: b.sys.ControlKnobs(), + Shutdown: auditLogShutdown, // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -4263,6 +4302,21 @@ func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { return err } +var errNoAuditLogger = errors.New("no audit logger configured") + +func (b *LocalBackend) getAuditLoggerLocked() ipnauth.AuditLogFunc { + logger := b.auditLogger + return func(action tailcfg.ClientAuditAction, details string) error { + if logger == nil { + return errNoAuditLogger + } + if err := logger.Enqueue(action, details); err != nil { + return fmt.Errorf("failed to enqueue audit log %v %q: %w", action, details, err) + } + return nil + } +} + // EditPrefs applies the changes in mp to the current prefs, // acting as the tailscaled itself rather than a specific user. func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { @@ -4288,9 +4342,8 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip unlock := b.lockAndGetUnlock() defer unlock() if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { - // TODO(barnstar,nickkhyl): replace loggerFn with the actual audit logger. - loggerFn := func(action, details string) { b.logf("[audit]: %s: %s", action, details) } - if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, loggerFn); err != nil { + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.getAuditLoggerLocked()); err != nil { + b.logf("check profile access failed: %v", err) return ipn.PrefsView{}, err } @@ -5874,6 +5927,15 @@ func (b *LocalBackend) requestEngineStatusAndWait() { func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) + if b.auditLogger != nil { + if err := b.auditLogger.SetProfileID(b.pm.CurrentProfile().ID()); err != nil { + b.logf("audit logger set profile ID failure: %v", err) + } + + if err := b.auditLogger.Start(b.ccAuto); err != nil { + b.logf("audit logger start failure: %v", err) + } + } } // resetControlClientLocked sets b.cc to nil and returns the old value. If the diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 7556ba3d0..83fab9c97 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2982,3 +2982,33 @@ const LBHeader = "Ts-Lb" // correspond to those IPs. Any services that don't correspond to a service // this client is hosting can be ignored. type ServiceIPMappings map[ServiceName][]netip.Addr + +// ClientAuditAction represents an auditable action that a client can report to the +// control plane. These actions must correspond to the supported actions +// in the control plane. +type ClientAuditAction string + +const ( + // AuditNodeDisconnect action is sent when a node has disconnected + // from the control plane. The details must include a reason in the Details + // field, either generated, or entered by the user. + AuditNodeDisconnect = ClientAuditAction("DISCONNECT_NODE") +) + +// AuditLogRequest represents an audit log request to be sent to the control plane. +// +// This is JSON-encoded and sent over the control plane connection to: +// POST https:///machine/audit-log +type AuditLogRequest struct { + // Version is the client's current CapabilityVersion. + Version CapabilityVersion `json:",omitempty"` + // NodeKey is the client's current node key. + NodeKey key.NodePublic `json:",omitzero"` + // Action is the action to be logged. It must correspond to a known action in the control plane. + Action ClientAuditAction `json:",omitempty"` + // Details is an opaque string, specific to the action being logged. Empty strings may not + // be valid depending on the action being logged. + Details string `json:",omitempty"` + // Timestamp is the time at which the audit log was generated on the node. + Timestamp time.Time `json:",omitzero"` +} diff --git a/tsd/tsd.go b/tsd/tsd.go index 1d1f35017..9ab35af55 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -25,6 +25,7 @@ import ( "tailscale.com/drive" "tailscale.com/health" "tailscale.com/ipn" + "tailscale.com/ipn/auditlog" "tailscale.com/ipn/conffile" "tailscale.com/ipn/desktop" "tailscale.com/net/dns" @@ -50,6 +51,7 @@ type System struct { Router SubSystem[router.Router] Tun SubSystem[*tstun.Wrapper] StateStore SubSystem[ipn.StateStore] + AuditLogStore SubSystem[auditlog.LogStore] Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] @@ -106,6 +108,8 @@ func (s *System) Set(v any) { s.MagicSock.Set(v) case ipn.StateStore: s.StateStore.Set(v) + case auditlog.LogStore: + s.AuditLogStore.Set(v) case NetstackImpl: s.Netstack.Set(v) case drive.FileSystemForLocal: From 640b2fa3aebc6abf5ba4efb01f053b290886991c Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 12 Mar 2025 17:04:57 -0400 Subject: [PATCH 0593/1708] net/netmon, wgengine/magicsock: be quieter with portmapper logs This adds a new helper to the netmon package that allows us to rate-limit log messages, so that they only print once per (major) LinkChange event. We then use this when constructing the portmapper, so that we don't keep spamming logs forever on the same network. Updates #13145 Signed-off-by: Andrew Dunham Change-Id: I6e7162509148abea674f96efd76be9dffb373ae4 --- net/netmon/loghelper.go | 42 ++++++++++++++++++ net/netmon/loghelper_test.go | 78 +++++++++++++++++++++++++++++++++ wgengine/magicsock/magicsock.go | 12 ++++- 3 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 net/netmon/loghelper.go create mode 100644 net/netmon/loghelper_test.go diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go new file mode 100644 index 000000000..824faeef0 --- /dev/null +++ b/net/netmon/loghelper.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netmon + +import ( + "sync" + + "tailscale.com/types/logger" +) + +// LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique +// format string to the underlying logger only once per major LinkChange event. +// +// The returned function should be called when the logger is no longer needed, +// to release resources from the Monitor. +func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregister func()) { + var formatSeen sync.Map // map[string]bool + unregister = nm.RegisterChangeCallback(func(cd *ChangeDelta) { + // If we're in a major change or a time jump, clear the seen map. + if cd.Major || cd.TimeJumped { + formatSeen.Clear() + } + }) + + return func(format string, args ...any) { + // We only store 'true' in the map, so if it's present then it + // means we've already logged this format string. + _, loaded := formatSeen.LoadOrStore(format, true) + if loaded { + // TODO(andrew-d): we may still want to log this + // message every N minutes (1x/hour?) even if it's been + // seen, so that debugging doesn't require searching + // back in the logs for an unbounded amount of time. + // + // See: https://github.com/tailscale/tailscale/issues/13145 + return + } + + logf(format, args...) + }, unregister +} diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go new file mode 100644 index 000000000..31777f4bc --- /dev/null +++ b/net/netmon/loghelper_test.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netmon + +import ( + "bytes" + "fmt" + "testing" +) + +func TestLinkChangeLogLimiter(t *testing.T) { + mon, err := New(t.Logf) + if err != nil { + t.Fatal(err) + } + defer mon.Close() + + var logBuffer bytes.Buffer + logf := func(format string, args ...any) { + t.Logf("captured log: "+format, args...) + + if format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(&logBuffer, format, args...) + } + + logf, unregister := LinkChangeLogLimiter(logf, mon) + defer unregister() + + // Log once, which should write to our log buffer. + logf("hello %s", "world") + if got := logBuffer.String(); got != "hello world\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Log again, which should not write to our log buffer. + logf("hello %s", "andrew") + if got := logBuffer.String(); got != "hello world\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Log a different message, which should write to our log buffer. + logf("other message") + if got := logBuffer.String(); got != "hello world\nother message\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Synthesize a fake major change event, which should clear the format + // string cache and allow the next log to write to our log buffer. + // + // InjectEvent doesn't work because it's not a major event, so we + // instead reach into the netmon and grab the callback, and then call + // it ourselves. + mon.mu.Lock() + var cb func(*ChangeDelta) + for _, c := range mon.cbs { + cb = c + break + } + mon.mu.Unlock() + + cb(&ChangeDelta{Major: true}) + + logf("hello %s", "world") + if got := logBuffer.String(); got != "hello world\nother message\nhello world\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Unregistering the callback should clear our 'cbs' set. + unregister() + mon.mu.Lock() + if len(mon.cbs) != 0 { + t.Errorf("expected no callbacks, got %v", mon.cbs) + } + mon.mu.Unlock() +} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index acf7114e1..e8e966582 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -177,6 +177,10 @@ type Conn struct { // port mappings from NAT devices. portMapper *portmapper.Client + // portMapperLogfUnregister is the function to call to unregister + // the portmapper log limiter. + portMapperLogfUnregister func() + // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. derpRecvCh chan derpReadResult @@ -532,10 +536,15 @@ func NewConn(opts Options) (*Conn, error) { c.idleFunc = opts.IdleFunc c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity + + // Don't log the same log messages possibly every few seconds in our + // portmapper. + portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") + portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon) portMapOpts := &portmapper.DebugKnobs{ DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, } - c.portMapper = portmapper.NewClient(logger.WithPrefix(c.logf, "portmapper: "), opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged) + c.portMapper = portmapper.NewClient(portmapperLogf, opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged) c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) c.netMon = opts.NetMon c.health = opts.HealthTracker @@ -2481,6 +2490,7 @@ func (c *Conn) Close() error { } c.stopPeriodicReSTUNTimerLocked() c.portMapper.Close() + c.portMapperLogfUnregister() c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() From d83024a63fe6f7ef6836ece13f13cf748014ebb9 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 08:18:33 -0800 Subject: [PATCH 0594/1708] util/eventbus: add a debug HTTP handler for the bus Updates #15160 Signed-off-by: David Anderson --- util/eventbus/assets/event.html | 6 + util/eventbus/assets/htmx-websocket.min.js.gz | Bin 0 -> 4249 bytes util/eventbus/assets/htmx.min.js.gz | Bin 0 -> 16409 bytes util/eventbus/assets/main.html | 97 +++++++ util/eventbus/assets/monitor.html | 5 + util/eventbus/assets/style.css | 90 +++++++ util/eventbus/bus.go | 4 +- util/eventbus/debug.go | 11 +- util/eventbus/debughttp.go | 238 ++++++++++++++++++ util/eventbus/fetch-htmx.go | 93 +++++++ 10 files changed, 541 insertions(+), 3 deletions(-) create mode 100644 util/eventbus/assets/event.html create mode 100644 util/eventbus/assets/htmx-websocket.min.js.gz create mode 100644 util/eventbus/assets/htmx.min.js.gz create mode 100644 util/eventbus/assets/main.html create mode 100644 util/eventbus/assets/monitor.html create mode 100644 util/eventbus/assets/style.css create mode 100644 util/eventbus/debughttp.go create mode 100644 util/eventbus/fetch-htmx.go diff --git a/util/eventbus/assets/event.html b/util/eventbus/assets/event.html new file mode 100644 index 000000000..8e016f583 --- /dev/null +++ b/util/eventbus/assets/event.html @@ -0,0 +1,6 @@ +
  • +
    + {{.Count}}: {{.Type}} from {{.Event.From.Name}}, {{len .Event.To}} recipients + {{.Event.Event}} +
    +
  • diff --git a/util/eventbus/assets/htmx-websocket.min.js.gz b/util/eventbus/assets/htmx-websocket.min.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..4ed53be492425280da60f662a48eebee9cc8e0b1 GIT binary patch literal 4249 zcmV;K5N7WmiwFP!00002|D-!zbK5wQ@A?&;JVaEip`5*{ebA0_<8d?_UnOzQ^2}V7 z&vgrtEeUH1U;xmH%A@~&g%5%NNh!`u9%374G#ZWmMsxCd@H;LSJbOSTVg6Mila#Z; z?8Lvp?S@Lg8Wu>NOORDra-m?w1q2ushmG2PO#v2&Fxl;P6TeVSb~4@OW=SQ<8i6vY zM3pH$nGRl027_c(G4ppaf~Nt%6X1As>&fRuDF082`aE6UIxH5u;Ya12j+-rzSs;Wbc36;KHoDGJPivkb?u+aLoK zpPx!tQ6UuwTvMq~V4l`UekzF|+h-9=N_vAUw{T*9Y7(a4SzsY7BMh+vSh?YQT3I zu1ilp9L!+2s*2)=e^aGU3_Vrm|8Yk=I5W(F4PkkK5=c=1>JbSgkrH!#7qO(&QqdL; zsRU)~12D41_MF+|0QjgP$#A;|!l4(BHC%IGE%_x8n#!4;Nfi-mRPe_i=C2$Tp+$;C zeFJhwRki_xDrDPa0=HtX&yy8gBB%|5w3li00SnxkgA*f*TmFc-Uz`z%Fw~r%E^);L zUKU7LRnG0A;dJ0ZEycC`z9WiQsZH0Yu7c6T9%>Y>)N&B6Om}?(X_cxEDm6z-Mgiy{ z_+6l?1PeC|m}0~kcDR(z>UFtN3Z9{sisO9wY(YNp9Al=exR{e{lYFUA>@SoObXh69 zX4(v2i^~lya)Hb-Wemfe{J{zKa5QcNB-4OxW+0Q5XjIVwg-jUzHA*D1{JY6mAj?mX z;&gIgh6d2sFIg5;U9;aeRG}*XRi_5XsQI=CZp6WOD6v-&n9|{LxgjU z>I1Gxw*S4p-(PP&Bv^R4(lT2Zh{a<58!0Mmq}KDujO$fn7;e7)u#-c37%jH(;#hqj z^^k;F&(T`T2L6g5F=_-eIR6Jw_|YLr3uJ4x`3KOmvylIO8<;uBJ9=O9WSXRBqe*iM znqC8RcYY7E*$jqW)S@eOjJBX}?gwq13a)T4@OJfeTM(RXNg&1@HRGHaE0m+8Yep32 zwYrU=l9a72HVxv&hZ0^kit!RieKYAGvbXPVKYnng>EMjq+#1N&@RL22ae>ATdm(A7 zkzZJIWio-QRY(tHoGC(?_HKGOII<;Cqh%Zl&ekXZzf!5>Sc9?}Va|bZ4?k)AV}vjD za`r+C<&x+7OS`oU$ZT%US-QX@PbTnBRL<&~P(Ug{=z61ICHS_*Wpm|TF?LeA@Zp$F zcp6%kYvAb)aLj;JMWG#7I5>mhFjgC<8}-ipI^3K-h$Z5ccE3=x1NHzM01Ju1Ya}ku^etNSe>L=_H|schc6W==!#+vzutW4awnxM$FJ9`WH=d)3|G!$ zrYwoX|0{{m(Llj7qD3s-aH+Jvhcih0po!%F6Nb|voIyM~)w~Y*pzkliqCXjx+W9jQv!^iD`>5 zWwk75W&*+$X>Rcuhtl-!sM`4CPxfA-57=Xj4KsW|uLNOo#l`l<%_jaV#&V3IV`L7a zki|N|De#J|xHFJj@_-9J*tJo5(Mpi@)<*U9l3(1A-{0?|FREUUW-df?u|PWvsHb5D z7ebJIs_lDsbrTNsMH{U>I`M|R2F{m@9VwH3r%>a#qxUvok~>oV!1;1G(#C(NM5SdZ z0&7?~>IpXj32=bhE#{OcEcQvWG*v`C$WZ|OE&0A!DHMTt_lWg(iE|z1)^akLz~`#v z({&ER&5#aPVD6o7R!POTp!L-H{l4s}k*OyA(Gg}yRVOerMVA~`@N{kJ0EdQ}W{HWT z1TRsjJv@1UI*{r>7$N)d$#cvm!ul#%J}=a?KH1^zp=RrUS7^hF2obBcOB4sK_F{j@ zE7sjB`Q|Pon7xHZPV&qio@_g!kh>m zx{>*}eRqRC%;n&PEZq95yPt8eS> z{_UYI@qjBQP2uU_QMB)H+6o1{1cVk>y8yMJGL=0Akzk@5aNE zVdvTb(E?c>MMGYg#=%g_aTt30!=H8l6kn_+qGm@}QmF}l_39af8&B6mcQ57NM|)If zI`m%gcid}ua)Bh@FSPZA7qeN!lFamluO7XV9vsDgMukpw2bF}E$jo^RBSs)O5RjCu3#<-`TsxeeRU7hjORFlx3jikw_2OK$ILYgY0vGJQ*d_$kOqN_b*1+_Kv+?7g$ z)M`T{ED@Qt4S5Qe0kAd!l*v+G&A*JG{|in42ueR4D z+n}tUWPLk@w%_#wx{eksikI$1Ie5Q2@qFhx8%{penWyI7*IZwc{8P`l&FWXLItX

    #5>|N2udB*J1?oKb5L)OY8MKTGhNR zKDR_4A=x0wwMhEq@9jXtjhgSK<;!z?6k!o4t3t_;h_X_~xCNMoC6{U#uQbttUg%Jy zb|A)1cAvFu>cC2aI#*7lq-&Nu9mamKZ98f0@U;{uCFSfl5*~COjVGN_fobWp9#kf|qFfE-Ogm%I)E7~zA@x(WeEWb3}hk*w-hT1Qk_Uspy)hgDV zpT_46!JU|dL-wM^VVvzWkg~OFD#S-M?iMD0=J|dK6|)Vr`H4E^bKYyaMdVHhjjfb> zMc1zU*l_aMDyp||Qv2$ax-cZZtw&#&w#omc4lW8kZo%78ze-DCe70*J?kU(ekE`u4 z;CQ~QJ+o~#vrWlJr0GcxyQUl8?n?o(<;1&hlywQs&H!JTIjW;yo#SrEBLXQ*VG$I0 zKn7*u6etXv9OACCKT!e*QvbR`t#M&5gFfh&@CPIT_|)!Z-u2wC-ou6Ehf}TAK2+A* z@m0NMW;(Df?VZ5b0wwK|#lho>kTh-FaSIfFEnx;9iQ1%F$^vX-_4Q1E@{}6=cD0G9mTt-l*Vm|wz^^p%6S~MTC`HpqHyhqKyCQ??)qa2aJ7O@*SD@d z)@oOft~g_AxTYz-K9$8<%ku5buXBqkr0L!Dd;#>VsUyvxSHG@B*5H!z`!E$f*tt1Ll z)Mto!okR_KKeebt(K4g4ocH$-xCFEkMVKIImj<)=WEy4+clRUT*q1ITBVvE6Eo|F7 z{_Lkhp6gW|mY;6D_u@{9!sJ5ImrJ!U8ZHU^Qla6nuc3T0-;P{;y8Q6#yQ@$CgxmLX z_XR`N#RE+dh`|mRvdvkM`=a_#0monD8q?5ng~zBWy=OtJ~Ur z{!&{KQb=AiLmuOXGcqL4wM05IS*J>h^QNO(Ywm_fPj@e!t|_1kS67ZgSEo0zkIvo# zH0{C*BqH0GyEUC};SFzz$~MPNWDsDXQp4s27MLk6E{Y-<-VNa_rWnp(ct3`_oG9|c z=~;6C{g?e^*i)Jfi#729%}mtl=H%W2^>m=&wk(K3xQ#{Z_U`@wM2L7axea=cR(11G vBy=^gL2j^g=XeZr0A8tOxBKGCr;Ow|A#b7B{w`9IyQ(18qC);S# z&_N?bHI-2@<*bo|AIX-px4fL?Y*W|iTJR}t8vNi)|DDhAhZNOfK?Q!4zn8c8YhJOL z`}Mr8@nc%^hBgvEuFHBuo7@hDrZU;$5Ez-l_ih)hc_X8w5L9ji8?X#(wW**heob}1 zv)M^qnlWOImmv9KQ3na-XaBRgDcAUGM&}iyr?--_rZRKI_Bx3*U)WjIeDR($YCr6H zOgWoZi+sDR8p*}oDJ!pPI?JWl&{U_Ll+z`>sQ#q+bxrlsTp znrd)Bl}W^vi4>$9+QcXo^p6c~q@SykBDDc@=I1~{ls2mSiY@#slNQ0cue-TAElSI} z<~OI;WsSpw&x@(3*7CAk*fG}?qnP@hvnkD^$Rw(%*|bthQC)2$Z7w;kNj{FIs^2J$ z8dWq(qnm0bmr)umX|-6&Xp-Jka(N=?j51l3wb=?;ttj8f{5ZX7&gqmhMyC>&IGS(j z`tbKkN-CoCrg>4ZQrunMt!W-z)%+?-XH}zs;DXk4D!Is`@$&XiCr;AAcvi~t&`XNa zW-54Hzo5(Vx>6M?*2t1cyX&v`bkkfwAw9qal>k;RHjSj>Ln+FY$*K8OS^regn@dr$ zrc&q5Olw8ulFypUWhp;4^rNVuVv~=f1(i`6&1g*}jV5VB>-jnEd~?R?JG~VP#s$5s z)Y4*uIPf=2a@FA(d%<}lv9QJ(7i&6gV5s`|;iTWNB^8w{)!t!Kf;MZ;8v43qGX*>O zcsu1Ynn&Lq{}tf~3%03FAAk2BlPuF&s!Nyi*Z*`m*g6%03oJE3ID1bUNoQyLN*$?2 zH?S^6L*Fn-g#t<#@NXKEQ304C`Yt=p{u;H#j~QZ6@_X@PrV%LlEFqoTQT2B|Cs*o+ zUJ9~Q9}0nzb5{JADYQv8`p5ISCbz|pnMR*vQT&)8Rw93@S{nV4A8a~)f3Mni$dCN2 zz76gpe=mN_)GCl~RM#EUM_w`8m`i-25)t*0pYW%`eB?JZO@DdFtoSkejnKm2^KChs zUE)+>6RF5AWO~JCcPE_5vSLkM({x(0=To&&wE$O=`!ak<@9zfXrq?k zr&2WeDN~8BX*r{!$>;F(hOIYJ4`-@VBo(ZzUzW1WD`*+e_nJ*OF6=A{+gzLY^b<1 z$maA`<|Rd>Oy8_lbXJv;)_3_2GHsT8Q_oJ+nXB`qG$-`*T6Ma#C-zhw6-=B{z>gL< zn*5b)^?nOtum)V;X|&#WC?t*6YH6d1F8!K_Y*nsfM+qcbyzc=!Mk6|keB6i$QJZAH z^NPh$I*5|OR36M!^=ckdLN*McdS==i*p#-Wav;h3@_nJoW=&m9X?*xyLP)e~qGT%x z0Q9Sxm(u1Rr-$DqNlWX74*Y75Rkep|Jx-EuAJhN1r{?O*HNX8o$56RbE@}0|s^)$9 zK9Pi=;1H440l+OqHObv%L^AAfb6IZ|Ss~;TD%s*wrVH_sJGTH4q zNfB&bG?Izetn3U6KX*I$;cB=FQ?<7UG?Gw-awwB}0%{)*U>cbGp3j1LUM$NQzd7ez zCZ<2Hq#-c9MPoF!L!0sGMs?zHSK-jr+NrixThD^8csu2*HE&=?l3cawTH)Br&9bU# zOozh*wWrCJlBu`1bhlI2jH^|tS}8M2A&6S@lqLE+ar};ujHvK7jgmsq|CP&g844f| z2$67Ze$0?;&Gv6&$ghUOxFUo^-$uJ#l{M?Sl5sSMlB{AgdV4mHS@M(|Cw6TF0P!owdMJ1-(|vq$a1aYp#WLN{m?kMp zk``t!4hYfhd&71eEJNm`|NO)>TPeIo8wLIPr@_6D)V%9S<9~cQ`gHvC)9rW1pX4X;iG7++zDTsa8wJbW=0=gXW`FqoGI}$3dG_vuLS4e1HKZ9{3BJ0J zqGAhm>LtCEM@zY?BX16G{bM4t>9VS4>TESxP1!;&pB^V$nw9G{WwRiEd^}0KF+C5* z6ernK&{9&phjG*ZD-k6{Lt~mbt|5RWnKeG5q!}w$G^NOzDu|MqnuQ{4C{uXK`zmBs zHg{}Ffb=Z<^R~y#sGgYmLGCZp?&GcZ4&DbA4&BrMQ03j4sRbh4CU3=O zo0hfyvyc-uVR`7#K!Zwc6}zRhFx5=}o<4?_}i^;doo_sn|IZuzOG>Y8FE@PP#fMeiLs0Bel-3kP7eMuN% zO;@alIts1th5uEBS)cOQ;KFzOq8bCpQ(F@f!rR*B}iV4#iV)>^iJzfMxHY_r!*_YMb}T$5%mh zDXLYRXd39~Q!_g9iC#@5t0mDB7*uS4)S6Ue_Sp^lAo!XJc^5N9K91Rh$T6GPb*8Sl z3%J6>o$38=7iaJBB96O=wFqc7+ro?3tTjbY%4?5ZzoddJg0siK-`QUp18 z*f7&}2}@@OqZ3ia;|B6kL5k|hj7DM1kLPa{YQ*!2T!LJx8+xP$QHtqOVU2^Me|$Rn z<|vc2k+DovN$D$9ag;HjY z9H`&?WBUy}hr}-|o3F^aAdcpMPOgyIqiKD3N=T4S}-P%?}3nDA~|htuZX}f$h9S z4&(kMhZfwTGrj0E0}|$K)d1ITKeBQ*Qy}4n@VyxVGKj%2#8(gF`&g)9h|Vw+3F10W zw%&)KBM=?=8F-x$LXK4hEanMIP&1(pq?^Rg0hs#pMGI z-y`do6hFksAp1=&Oq2fdQCxYDmwG2ur`Urz<60u8mTtF0cG{J1TP*+~bU575SRJS2 zzT2nn2ws}a6+(D?oSKDIcdmEY{(5f}sw*7cQ*%K>!U8X9HS;=_Y4k*~Q@^TTM^7U+ zA?fw-*beIOyAVzJH^@4csn8f&SOG|c`kFzMXh^||P*_U{|0_w=RhhJs+$lIS5}@&< z*rJH@SW|Y#MMzVLfTqHFH4$S9#9XZ~M;Z->61(O|y$Nq-ELA|j2pv6E-O7q>XltrI ziIS}xjVxyaiAT|J=sG@pJRFKK9X+0?yr-(d;USRp%$DizSkdK%vl$yHa-)7C9C1%9 zV2HehN^X|bT*e7={|suX zIWu~zSZ@1UAI^*Fs>Ul14x3D}{mi~0s8Ze#){3C+3?Mx?V4Xp%1--8LrZEFC-}ert zzd+xBOi6E0tL~uwyLV8AV1{lj^&AwzYa9yb8$HO@T(qkV6j@kCaE?8!V%al@C72#; zDUktvpJXZnk8-1-Gij0Dtngc1|J6Kh_TZ3`#wabUU|~w;u^B*8=oYdpdlyglNgah4 zt<1kwT=kkdG7srE3-j16OOn2Jz5*R^iw=0En3_^hbKQU7s6CFQ+FG?P*~_z&kMB<3 zU;g&t?BdPko3r=7eKymyIsk(+zxSmQP2##QA8c+9pCEaW12yyDa}leDa~|`iiR<*eU6#qlb*5MljL0Kj=nSy zZx~u5{)LfvteNT_dc{VsX|LRwZ{&I-eKqC{4RsCzzqL5t$O`4MYAjrgFR;wSm;DK% zppa<_)TcYo;zyTDI%xFy8bpsqZ1gA^a5ezx-vC>svjO7+Ek-`DrdZ=^8iKV5sMU*5 zwYQ-$k7a~Ino>mHkC6T+G+I>w(M}LV% zl*T-bs#%1Ijo(UvtPY7(&r!1Vl>U}7%A-UP1q})Lm#_p7*8#KQ=Z+q0fNltF`92T1 zL}R)`&aa}R=qX^#DZEk+WhlG{aE6Ejpi2Zh4%~&6sB*>gF$x3{FeF8wVCzZ=;-rvT zUxxw{L{JCkG;UG=AfoVGYX-DM`;-e~&XwxRho*C586#V0VTq?+u5NSAuTW>Cszs?t zwBo-|v;#JPSkX0}Z2tgu{Wlm#s&39IDK2T8v<;1`jMg%Q`>?y`b4i8ityQ;xuhIx4 zL|pIQ0|kmX^+*NV3E1Gg=V%}lrCY6X$$6Xfhd=JB(rLk6wIH5nu68UF_mexa=wDjDT)EdH#ELg zOF3S;vH3-Gbd9 zt)+ZE`@Nh}CKW1(qf!9`WwR)ap3JK2K{X?f?rZqb(>iTJv6ki%a8>})*VJJo_ z7Qhn-wDl1KiKvfy-_p2FX*z|~@6+Fot68w#z6}JJk-&PZ1sx7q57W)8NIiNKrO~HP zk0Q@VC6ndieYtW9*(irXdE}_Kd0Zz}=LyoLP^`>HYLMhnG!moe(F8|R4TseMAr?9s zB0cn!6mVl!Niof2*?iVKoJapbz9J&S<$CVRaRb31P2FX0O`((Gvqah^Dc;j~4nI76 z0rGwSAiNKibmMX(`J7KT%_Oog{Y{L*vu8Blm#~Mh%`&JQoVC(118>zxfVf4W3F|kj zFuTRFc_kVNpoH>4p7hU}hWdK0saBpbKBJd+YsD%7bY7nRa{1f)vzMpC;eO`H**oCL zRDQA*{!TqteFxf#H=5csk}f@?C)A3OB#GsdC;yoo%8|lmr8wa;`dr3rG)i{IJysMS zfR-~2f~L)pKKuGfm)A?G_ysg)Dl{>)W#kQw1yV=~UE(KXqEcpGSNb_1G%F)vW*KMN zx^{1feqKK{ ztGUjiL{D0d>BL6~R~;JQ3Sj=ErX|~~^}Q1%#Y?jc?k3eZw^t5-g&IOn+fI#;L@iRU z^_j({it>DM_VQQ92>oPgw`jcL5U>%@X&IhHv{MaG2bI;1lyQ|}D{z?2^(RQ>AqLGF z5e^bdBN=2sHWB=jq2YFqu`kF_WNYw|7!HLsN)$bFRu~RiG$J@bM9}Y*o`vkKEIqOW z3~TQr!~#{)FVyIg3?CUQzQ!#9WO{o3{qj=~m~_g$=xZ09m4b2~?`zMWd8f|r)Bsgs z&gImpI<3y)Uj5}yv}Ly#~oKa_`D&N;f{@Jr^%Lpu8~1D0=7y~Sn7zODPPsV z-?&b?7f9XH%aqjmHk&?lyHCGxyYtTNK26-Ux5v60VUP2FOSV6m<;0`>8}+Q1JC1V} zYJB?PPeTiB)hbJ3IT6Df`<#ePLo-p?y+cA%w9u7kx9ccki9w_x9}}|WYRZPHdu<`# z$gb#@cz^meynpLNmeP`co<#!U061>yV&KV^lWp5-4YRp8I2r(2ZPl|?+)dJA1&}&h zveqhT%Bifbja*dAR<B(>&D z+%k;_1-L>1gc!S6ZLu3A+rWaCXw?srh9AHtRGTI;ZR`ltF7|(R_QLG#hIom_-aU=n zX_n-;kWV(|oJn`iO0pTtiK&xE>33R%Ny_o@#M4T9BBH-IrJFBAdTfL$b5f2UPj)*L zQ|X;p64o`*{7W>#C0YXT&C_LCr0Z8{Z>VrbM2fXU!)mkD18a`Uf;qaiQ z_%j&}ss4$~v6;Zafp|wE(J8MtVokP+IHOEz*G_@l9DW^mzD>^#f~@?Yi~2kVQ4oe=wL(k#CNOqTfzvN+T)i(< zLx6EMHC^i-*P8EOoU|S#UOG`6M^Euul}Ycd;uq!(T84XO&ZbK)JeeSk@8Fc%EF%b{S?IzW zZ?9@T{hUVt15tWqeUi(Kr^kCHS<^HqVNLLS z`tWZ(B)vfmR2aP^VCxf5q;MNl4l`P4=pG*Uf&Jr80@97ogXMU z`qUh0Omj_+#-EyWV!pqp(hBd zj>iA^)YxLbs`}g5e*dVx+hlU&G@exYl7sQ2xL=x6&ExN>_D}+8&T|>FBth~`5T>jA zyjN!QSe0U&1bB8|yU8hQ*g?+J>lpX$YZyk>lb%`Aq{gwU=M4R~lJuCiZ!=Zxf zDD#wOOvWPRX@%!xd<^zo<47HQZGfeTm&XN?Jx0|$a8iM2RE?r!Ycq_00yy{GXXVup z)tfqcjp}luNVmg7qmMhbYICHdC6ObXW9tScMdkUca ze^F-*--szC(oop7yE%JwJk1@G7Ai=XNJQAi8l6h zd^~}pr>Ifg(7s?(NT*ta)8QKWl?Dw_w`D|);zyboI@oofC!q4LeOrD!x>7Nb>y1nY zioz=eEmLg`5jm_>$?i>JEe<)MQr#glzCDR~dShs}ni$Zu7G5gA)mk?Hj$O;x2N$!q!CA(K&6Q``Bb_ypwgHg*a?GG38KRz$7TLBcKDcb|l$ukS=%y zqBv5u0KrE#j!?b;{xw6xo9zpk*4u|S^9)Mw~ndscXm1=CRmSVb8wB{^Q4eQ?o zj*7LJ{zg+|O6-Z&p}D|L!R@QzS%$BU9gbfTqd}^7SHLdv^m=o(sOZw{A6$ z4ajl?S+#@>5z^{@F#(&lSWBCsM!Ow+&MP)EzqP$+6PRuKeHtbX`T(BI!;#I1M1yFw zY}>X3mrc@y)6sS^_b&z(@MhquE(HgY^MT5`$bF0X$A)emst@^kvBH{Q#(8hW@qBX6 zh-tIr_3YuAUVjQz(|!~!`De;rH7lUp*x#qvhk67p+4e~v2L-xdG1zQ|%R)48F zo^!Es2J>Tw?3&Hhs*+JV3CceLJUs?Cqno_WJVO+vmW}S`FDoUDm^4 zEDau07w~T9fBx3cdLAX1Xtimt&d%RO!=Z^$0EQ13Eh|crFdBlo+-6l-^M#`0jSV5} z6nc)QTx&yJ^UuNA8{*{@T-#OPYIVr`pyNW2>RA=`o`;Q;=qT(f>~=eje*9aG5;eD# zg3eoG`L3^lOvj6B;^>(=t()!PegeKn)}#mn0*0yafgP(n1x|_G^nT%|0UY(`g8Y= zUGi0yrL)kX9Wk7S>EV!mVZ^et#BlVcQq(`tJ9WZVRF*%`JKx1j?n`Yv%2KNrJ>O-4 zGBs62<3q<-Sx*4Xef3$>d5z!4Zt*tsNxo;P)-tZsX-d;gS~}XcqH&EEE3h*gAiE&e zQ(UL|%P=Y9deTA}P4XgUkE6P5Fs@isvB5k^w)4y;fy499wEX<8T(`3K2&zWB8riaJ z;!W6!x_07C>a~@$e)d9T!{rqFVNTjm`BgbCyxV_GL7!OlTF-B+$wp%lKsAqNYC&n@ zduNur9US`_*gq{Ee~ChFl+-TNJc4SWA&T^g7_up}13jD7boPo1D;e<9#*?;F%$}w? zd2$?kopj%SDN|I_@=IJ;R}q2I6`x!EDSVrKXsh6;|dZB-7kX zi_XpWxH0w@)(+ylgxR|x%lU4MxawQO*p zY6N!74>bC~s8k0tPmcXiQIwC<&>+eHZ8v#5X*)>>oR*Yz2hY0fG%06!)h0!??{DLu z(-uvMR#JyCQXjVxg>%l)M1qV!fK_ZJehss5x` zsL=rzugkhaJFg^kh$C(ECD3E*>X@N#$s>ha%9_p|O;W4)@5}9II8J5^Tt$Qe-WM5=mQ2S zea~k!N<-)k+e)_(R~N;bzBuq&I{Q+KE`rTCU|G$}nL7FA7gEQb3d_MgtR^ajdK5uC zHIaRIp!~V9T!Dhi%I&--#6@qoMu)NHJr8wm>-_?V9{%%v-19l8}6^2YR9advhE z*6*Ih;1|LJ*&sjKG>L(0kZF)D0H8(cPp!BTwEWz*Em%Zl9J|yDqm~PP=_RuZbs)2S zQ&G6f-TI-Cw_(z0Du7AH&@N!kB7hOU4!STGg103;TmBS?4wX`|1w!Z4)i=JN-X{BT zPy#8Cg~kKf;hJ2eBxkt%)lOMp%Zx!11RNyJa>WP1r$K0b8hjr16gi9@tP zHNSi)V!E2TO_#>dje?u{R1M@Qd||G#g?1JGbMhc}g6#%i!}k`C-Z2VC?Q2!=l3 z;?d~Q11%kXRZFVWWb(-K{5+G^3HTJlfoi25`V!c!IguH~$-*vr!7sG|CSh46sVF6V zPj6+MB(5FdJ6WE06|V~79*Xf2#-if$0rh+blI@uy^sH;8o=%T0^0l;P58dn7o#H4R zsGAwM&dH$k?C@Mt=}M@Nus~FJikprq{~dEo%R8pk_Kl)n%~%>^5*Vh&)_PV)HFMf^ zPI^3PUXR!)N(XZ;R_OpG2aiy4@Mw}oqX_0mZ>4W%s6QRy;b)qpXe6?EUCKCt5XzK1 zjnY9hq9!(Sp#a~=DE-5a(DPn)Ln&C#kb}^=SNM)13W(jO;u{ny;nt?pr*5v9 z&+^0JEv4se+gjUDg^;a+T?!im_Dx?HWO_~Rrj1}=vEMgKEtHAsmt z6x9c*z3$bKc#w#dmWU0Vm5lM(2j{wEco%fsdkGmXl>mnz12-F=Uva+65O&H6D>N^F z)@TQ4b0Z9W4;KW=PPMQ0+!S|46}qftO4M=rI>JBMBDvlb_=98_qMXY#QWy${pe4z} zVdD@@vV9Iwv{(}RWj`y3oSJMIpeHXsrs?)-b9H53!$CcpS`_{5$Mj4w2xBf=O>46ts0TC{ zd96nVlEWVLQz#psJo0Q|=GKOBiNB^a*3xO&LhN$ql@u(r4outw5T0L>Ue1Qb;8GIC znG|SM71dx7}fD0AEg&HU|(Q{(lY&o)gi5=CnfV!-NRiJ zLug6}gCmRu($2ZEq9FZl}49``oqEov-MMji?EFJA#cb1pR$%1rr8_ zwi{9BGBqI|SPILL3;ozq&RYe;T{V{;lp5vW)B2&%sY43FIn|3Nk{dVV(!i}B6LqEW{Y#)kVe zMeq!3VqkW7bq4_`x-#xQcQRci^3V>;tcRZGXy@aVkIrOGk{5Ef`!a&U+aF#9=tE17 zJhWtNmIu{7a`N&XdAHnGw|Reysy)pd_8df{TlMRH7v(jL(yf|nF3o#@r#0B-6dE(O zJs3?tBbewtPC_zdhqzz9-ZsYAsh!LMh?YXmPGwGU0xyq*ZyX@8Ot5DqUS@s#m@ z+ndfmyL8ZM5w`k+)NyKPpI?uAYsx2+zLzmig7D$7RZSBqHkoF8O<5d$e|j0E9PMNZz_hvp?dcM|{^ju8yw+$GNcWV};CXmX)nEG6 z^-eZIP$myQp1+OKed8h+EUS4r>!^g}vI4gol3*gPHRFb(}zX_?*5y=Uj)EY=CD7XMq5=%~&O zRRUL0uVTGR>p;=oS^el3X7FB~g52}p81m!f@G;U}^nq4Db`&&(KHYo1plq)21+1Hse%3z)p=crAW(Fzzp&`4Gq ziP+4D{VQ6}8Y;&oec~x5t(~c}c+@uPU`X6GJq=`m)v8?3Zla(K-77h-Y8pM%=X6P@ zpRf3B(5onC75B_oGblx8t-of@bC3CPhA`v=`1#prd7&Nw4OaT3q0GL_IjsP_mNdd|zE>E@%$6nYxj8n3i7aS|+oScP)rEd-|Q16t9Wn8#JhQ=;mh4Fk-ixreU9+!r zHZeW+GLmH5+^3^fXSgQx0cZzs;szSD&H8RYMi;K3+z~`)5zf zdTPHC?e5F^gJn6qMU>L!Hk^|_2Su3Mld( z@2BF5X)s#i)9w+*s?yqHb`ZWOK2~jh5PY?k(S(5ML*OA}ioC$jXrd2vk8kEuBhbA- z_v=!Wt7b3IJw&+kBGWrOXBBP|pH}X1z?9yLG2A^E1EDriDEHUIi!%}myWAT{7N>{X z96UV`U|{T^r(KM(?nSYJNcEt%V~*zQB{M~b%yc6WZTRf8ci?8hi_l-`tQfrhxaizO za<>zH+&0e~VQYg)(4K4%P|bU5tz zcpbo6sda9a{Dwr5uOt0Xnz=<^>DcXl(BOwc)7Wr?=A|jO%xbc^ee5c;b0@R7wl*oA z{fbu;rs>yp^Y&{0SAT6ON`|k(crCa@VyZuW?5^d$cDyK2UPE50Yw5d(Thi*hFdWg8 zxOwOYb*C;SoB_EF;n|w6V-o?w#b0>FwNYRmB@hv>5E=}~#{NXyNvhsaE%{AnOeJwU zqsLV4k7@qTjY)ypT>p4zJYWZR*t=FsJFZSC6GNy7qNb7M-fmD23NbEVtqoe^=p*~g z_zfGZ_^g^&R17LM&__QXM57warS{SM+Gn{e!hp*d-uHo5w}GiEg*;M}1HR3RFXm}7 z#o#E)rjDxWtL6G#S@s(37hs)V$(VJyEgROC6Ph8B0IDA&zL7NorSc#_guHaTcXor_VI;~F13o`8<+>wb?h zsDTmwZV^yn8NBpM?SjY2hQ=kx?!z1%qgtOCg6MEKjM;9-(LH*%V+FSY=QTO**1(sB zRVCq759+%exC8vAmdCzA#iAST%{b!)KL3%_|tIS__8W0zKTGNRpzj z>?k_?c>d<_`9|_nsO9_({o{1T3^!u6jvvq8TKmh7=Wh?-G-zfiy62T{zp}Fw>v^#I zAse8pa%S|PA4MIdXd>k|9ioEsV9!i+GRQz5Du&F^B7>~_~Q#R`h!tQzZmX=4#nau=ItjdWZ~<6VA0 zcRLdXL46H}2dXS4fZ+U^w&8HReESH*7X92sF0{wHNHwFZQt+1%a_9#vnzS<+ivlN%S5bfRq0H!>F;bL2m4JljB=u1Ube9CT|c5QqtU*RqNY zjE7j~MqzlP&@r1>w4*+Zl%TT*epVnrFt3o|>qw3}^qPcVb0BYvhQkBs(wo(bnqMVB zI~{qPDxX*th3icvyQ|NGu(u#dOlctB`5^f1T};#Mg38=~Id$)`P_|iJQ4yI{HcC^- zLvq;V8OGe?tAh~jKXDWs5-W1ysR_rAy{n!&%fA0sk}9McsPAH#mSO>5+o)ExG>NN= z=|NEs*P+*|mz`-Z1|OOq=& zw#~%}d8(hznG3!;Wl~hMF?vmRQ0=*qldl~XAIr%XN2VHVTld;vpbBQ;d;0pxQA2&$ zGtK&dgmoe0Epy?}n((l(@VK$QH-8a{N&`g9JdeRd2haefteJl>JY&}X~dOQvtxUr42?tKH7X4-RVZ1mAKRH;(?cTD2+*iMQQu zL*rA7iGX2i(ePnP$TiaD!bI?Zdd4DOha8ktyZYPrc$0!dfGpdyTFworsPm{P$!1?a z2w~$4ZhT89jZ2h{`4R8l+IR5CTRo-cUQruIubRiteFK*V0{Thv+`4(si7@j4^2)dc zza)W{NJm^JJs=-|)xiNxS`&etm?fL05F#Q4`$LvqQ*o8cRJ$TpYyr+dIZvsa=2coX z&4y|MFaIHhzo6s?Xk5DOF#}G&Pl$FR+5-aEizPt!*4-`tw3`dFjB902VF-g&nK6XO?cJYc7TddVq+fwL zyu4e}XhI@IxK_w?AKf0_+}s?324*8_ss(6Z7+~Zh$@Y#+X?&Nu6ureC$yD_&5w~8E zkLZ+YT)TcJA2VZ1g2V+1Sdwkc;@{Ihm`gD&{zU#@@hOblzV!ixf5JX)$dbj^+PdK( z&!6E?5^Fhh1Cjh9E*9YZ6FC1wDW&lgO)kvKQs``J_LONuI@T9{76*AjKw2L`&@;3N zG2{OYX#=C5hFP3Ck2t*3o6BV>KY}z6M&DReE0d?>ST}V-wz`%4w{+u`&QG1q&P}?u z>aYAci*J&2!_a;d!aZWCYcKIbQ2six60$oBu;v=qCPG)~O(mBng3c(DRarNAl}+8( zZg&xW+Szuhr8h-9#Gc56!Xmr*BqB^)#5PuG9>ygnFgSmbv<E>=7~ZuzTFO-&sr(KEk&|vpv9{sKPs}eh zfs2gs*9M_!b&D0-eI8V7fWpPQuGsFgHGlq<+>Jj^iW3&E)6eOzE`HK$vb|l3eC^JQ zbwyBcn4f5B*PUK1J^lqi8eH|ay2=NH@p;9{`rJ!8PmOv2OKj>o#mBGmv$j<$x96zl z6d01c1}CN0ndvX{TO(&IUMI!7SfsCEw_j(UDP6xYK0J-3KYx_*W%>ua-X;7H9`>lc z2D9BD?~I|N8#ZkiI2|L##r~4sqZ>4QVv>Q1*uGIwQ*AqsQDwvDjQfT|`-juE_5=p% z8Z&y%JZCc($%3}&>m(`UQt%sf55w9+mcRIVrHc6jE9K=il$oAhn^U941&ss!^KUl( z97yV(#?5K@*PGWZbQ|yBWOO9h~YySNPq~cnmQJwZuj@DRVE!r*u79rp_h?2es#j5kxaJ;rYC&0-T+aW8=DQ6 zlwz1rl>5Ox3a-ut%;>d2`F|s)&(t}{R|@5b^hRMoaCiJ$(qr`c zzH8#AXlOsko@YvRs^-30VZSMuqYardMM=>W;7YRD3 z((HB>a2^z)*?Dg?!>UA8J4=*{rVE zB6!^vd%m~qSc2;64OmBOhj1@ehf$t^iEXa6moDIyRYH34PU8r?iX#_p&|2XVEz{9X zyH=%&Py14T;a;*BEQzcmiNy`I6`EQ<^eUrrS~!_1QY!|b^8jU;>$FO=iVjgr14mgK zz2hjm51t$Bg+Lov782hr{MxECq!+4l=HZStS%P2-B`Ql$U;o z#pgN6{rzA|QBttsutU=B!w9i?z3LapmD2CWOfCmbUNzlqiR0rPM z1)2nd_m^Fvnyd_ZT$5E64zie&Ri=koEY)uhc4koAA8N5sNdZWh&8UkiL(fJsFRHBB zOsBMI=9`)v%+)|wnOiIO7`EIrgplw4@Bc}*HM|%1O2#}OVAtUNQnF^w#mWpjUZ+iJ zo>$(0<254O7KNJ&b*L7|uHAO|Vc%sSN|Nnu;4QFh>w6!*4xiEX9!K>^RHR&HWZD5Z zF^0Jb;K;*489MX& z0owpZXd#Gf6Z`x&ErOHxPzOQC949}x)<#yh&JnBU5e}Mq`6kMO)=kcX-7#0zo(w25 z*k^D7Sa<*>1}A(*2hqsj=20}53%(jeqsr$KEgK1V!O$}A7}>TioaHah+-O^|O7>Zx z;A=mAkoSDwRtXE~D4hK^Cur)jcTMb>n^}=VS~YK2T`{;V`$mSGO^P-hokHfdm9S{n zb?3g$^#jr_Dy(PO0|9H1L#m`Ybnn=hP86k~Rx_|`e*vS_oB$Mac^_4i#+sc4wh>IL zdTHoaYq&d7JkTV;z|)Fq_?>CxETG2y0Pru*-f2DLTM&r_ z&-MJyh<NIiEDDE_k*)9l*aV8hOp@(E@ojine@RPAhqrAGiC3{qRbo=3nwhem)`*+ zj|7H*>w5;w&y4!tY4FT%dc$qeP*%j;dksus4>0V3KWMZOgYTs&tL0Rjt@&Erko4JJ zN_JJTnGTe$y=(W%bRTO(IUB^i!8X(+lI_nj?ui@bse{a7M5(1UAjX1GFyoE6q;2BG n&tu7!VF0}1!N>DVC;Hu_HYv>Oe8-*v_K$6x6#qW}00960o7pi+ literal 0 HcmV?d00001 diff --git a/util/eventbus/assets/main.html b/util/eventbus/assets/main.html new file mode 100644 index 000000000..51d6b22ad --- /dev/null +++ b/util/eventbus/assets/main.html @@ -0,0 +1,97 @@ + + + + + + + + +

    Event bus

    + +
    +

    General

    + {{with $.PublishQueue}} + {{len .}} pending + {{end}} + + +
    + +
    +

    Clients

    + + + + + + + + + + + {{range .Clients}} + + + + + + + {{end}} +
    NamePublishingSubscribingPending
    {{.Name}} +
      + {{range .Publish}} +
    • {{.}}
    • + {{end}} +
    +
    +
      + {{range .Subscribe}} +
    • {{.}}
    • + {{end}} +
    +
    + {{len ($.SubscribeQueue .Client)}} +
    +
    + +
    +

    Types

    + + {{range .Types}} + +
    +

    {{.Name}}

    +

    Definition

    + {{prettyPrintStruct .}} + +

    Published by:

    + {{if len (.Publish)}} +
      + {{range .Publish}} +
    • {{.Name}}
    • + {{end}} +
    + {{else}} +
      +
    • No publishers.
    • +
    + {{end}} + +

    Received by:

    + {{if len (.Subscribe)}} +
      + {{range .Subscribe}} +
    • {{.Name}}
    • + {{end}} +
    + {{else}} +
      +
    • No subscribers.
    • +
    + {{end}} +
    + {{end}} + +
    + + diff --git a/util/eventbus/assets/monitor.html b/util/eventbus/assets/monitor.html new file mode 100644 index 000000000..1af5bdce6 --- /dev/null +++ b/util/eventbus/assets/monitor.html @@ -0,0 +1,5 @@ +
    +
      +
    + +
    diff --git a/util/eventbus/assets/style.css b/util/eventbus/assets/style.css new file mode 100644 index 000000000..690bd4f17 --- /dev/null +++ b/util/eventbus/assets/style.css @@ -0,0 +1,90 @@ +/* CSS reset, thanks Josh Comeau: https://www.joshwcomeau.com/css/custom-css-reset/ */ +*, *::before, *::after { box-sizing: border-box; } +* { margin: 0; } +input, button, textarea, select { font: inherit; } +p, h1, h2, h3, h4, h5, h6 { overflow-wrap: break-word; } +p { text-wrap: pretty; } +h1, h2, h3, h4, h5, h6 { text-wrap: balance; } +#root, #__next { isolation: isolate; } +body { + line-height: 1.5; + -webkit-font-smoothing: antialiased; +} +img, picture, video, canvas, svg { + display: block; + max-width: 100%; +} + +/* Local styling begins */ + +body { + padding: 12px; +} + +div { + width: 100%; +} + +section { + display: flex; + flex-direction: column; + flex-gap: 6px; + align-items: flex-start; + padding: 12px 0; +} + +section > * { + margin-left: 24px; +} + +section > h2, section > h3 { + margin-left: 0; + padding-bottom: 6px; + padding-top: 12px; +} + +details { + padding-bottom: 12px; +} + +table { + table-layout: fixed; + width: calc(100% - 48px); + border-collapse: collapse; + border: 1px solid black; +} + +th, td { + padding: 12px; + border: 1px solid black; +} + +td.list { + vertical-align: top; +} + +ul { + list-style: none; +} + +td ul { + margin: 0; + padding: 0; +} + +code { + padding: 12px; + white-space: pre; +} + +#monitor { + width: calc(100% - 48px); + resize: vertical; + padding: 12px; + overflow: scroll; + height: 15lh; + border: 1px inset; + min-height: 1em; + display: flex; + flex-direction: column-reverse; +} diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 96cafc98b..45d12da2f 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -73,8 +73,8 @@ func (b *Bus) Client(name string) *Client { } // Debugger returns the debugging facility for the bus. -func (b *Bus) Debugger() Debugger { - return Debugger{b} +func (b *Bus) Debugger() *Debugger { + return &Debugger{b} } // Close closes the bus. Implicitly closes all clients, publishers and diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 31123e6ba..832d72ac0 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -4,11 +4,14 @@ package eventbus import ( + "cmp" "fmt" "reflect" "slices" "sync" "sync/atomic" + + "tailscale.com/tsweb" ) // A Debugger offers access to a bus's privileged introspection and @@ -29,7 +32,11 @@ type Debugger struct { // Clients returns a list of all clients attached to the bus. func (d *Debugger) Clients() []*Client { - return d.bus.listClients() + ret := d.bus.listClients() + slices.SortFunc(ret, func(a, b *Client) int { + return cmp.Compare(a.Name(), b.Name()) + }) + return ret } // PublishQueue returns the contents of the publish queue. @@ -130,6 +137,8 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { return client.subscribeTypes() } +func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { registerHTTPDebugger(d, td) } + // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go new file mode 100644 index 000000000..bbd929efb --- /dev/null +++ b/util/eventbus/debughttp.go @@ -0,0 +1,238 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "bytes" + "cmp" + "embed" + "fmt" + "html/template" + "io" + "io/fs" + "log" + "net/http" + "path/filepath" + "reflect" + "slices" + "strings" + "sync" + + "github.com/coder/websocket" + "tailscale.com/tsweb" +) + +type httpDebugger struct { + *Debugger +} + +func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { + dh := httpDebugger{d} + td.Handle("bus", "Event bus", dh) + td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor)) + td.HandleSilent("bus/style.css", serveStatic("style.css")) + td.HandleSilent("bus/htmx.min.js", serveStatic("htmx.min.js.gz")) + td.HandleSilent("bus/htmx-websocket.min.js", serveStatic("htmx-websocket.min.js.gz")) +} + +//go:embed assets/*.html +var templatesSrc embed.FS + +var templates = sync.OnceValue(func() *template.Template { + d, err := fs.Sub(templatesSrc, "assets") + if err != nil { + panic(fmt.Errorf("getting eventbus debughttp templates subdir: %w", err)) + } + ret := template.New("").Funcs(map[string]any{ + "prettyPrintStruct": prettyPrintStruct, + }) + return template.Must(ret.ParseFS(d, "*")) +}) + +//go:generate go run fetch-htmx.go + +//go:embed assets/*.css assets/*.min.js.gz +var static embed.FS + +func serveStatic(name string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(name, ".css"): + w.Header().Set("Content-Type", "text/css") + case strings.HasSuffix(name, ".min.js.gz"): + w.Header().Set("Content-Type", "text/javascript") + w.Header().Set("Content-Encoding", "gzip") + case strings.HasSuffix(name, ".js"): + w.Header().Set("Content-Type", "text/javascript") + default: + http.Error(w, "not found", http.StatusNotFound) + return + } + + f, err := static.Open(filepath.Join("assets", name)) + if err != nil { + http.Error(w, fmt.Sprintf("opening asset: %v", err), http.StatusInternalServerError) + return + } + defer f.Close() + if _, err := io.Copy(w, f); err != nil { + http.Error(w, fmt.Sprintf("serving asset: %v", err), http.StatusInternalServerError) + return + } + }) +} + +func render(w http.ResponseWriter, name string, data any) { + err := templates().ExecuteTemplate(w, name+".html", data) + if err != nil { + err := fmt.Errorf("rendering template: %v", err) + log.Print(err) + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func (h httpDebugger) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type clientInfo struct { + *Client + Publish []reflect.Type + Subscribe []reflect.Type + } + type typeInfo struct { + reflect.Type + Publish []*Client + Subscribe []*Client + } + type info struct { + *Debugger + Clients map[string]*clientInfo + Types map[string]*typeInfo + } + + data := info{ + Debugger: h.Debugger, + Clients: map[string]*clientInfo{}, + Types: map[string]*typeInfo{}, + } + + getTypeInfo := func(t reflect.Type) *typeInfo { + if data.Types[t.Name()] == nil { + data.Types[t.Name()] = &typeInfo{ + Type: t, + } + } + return data.Types[t.Name()] + } + + for _, c := range h.Clients() { + ci := &clientInfo{ + Client: c, + Publish: h.PublishTypes(c), + Subscribe: h.SubscribeTypes(c), + } + slices.SortFunc(ci.Publish, func(a, b reflect.Type) int { return cmp.Compare(a.Name(), b.Name()) }) + slices.SortFunc(ci.Subscribe, func(a, b reflect.Type) int { return cmp.Compare(a.Name(), b.Name()) }) + data.Clients[c.Name()] = ci + + for _, t := range ci.Publish { + ti := getTypeInfo(t) + ti.Publish = append(ti.Publish, c) + } + for _, t := range ci.Subscribe { + ti := getTypeInfo(t) + ti.Subscribe = append(ti.Subscribe, c) + } + } + + render(w, "main", data) +} + +func (h httpDebugger) serveMonitor(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Upgrade") == "websocket" { + h.serveMonitorStream(w, r) + return + } + + render(w, "monitor", nil) +} + +func (h httpDebugger) serveMonitorStream(w http.ResponseWriter, r *http.Request) { + conn, err := websocket.Accept(w, r, nil) + if err != nil { + return + } + defer conn.CloseNow() + wsCtx := conn.CloseRead(r.Context()) + + mon := h.WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + return + case <-wsCtx.Done(): + return + case <-mon.Done(): + return + case event := <-mon.Events(): + msg, err := conn.Writer(r.Context(), websocket.MessageText) + if err != nil { + return + } + data := map[string]any{ + "Count": i, + "Type": reflect.TypeOf(event.Event), + "Event": event, + } + i++ + if err := templates().ExecuteTemplate(msg, "event.html", data); err != nil { + log.Println(err) + return + } + if err := msg.Close(); err != nil { + return + } + } + } +} + +func prettyPrintStruct(t reflect.Type) string { + if t.Kind() != reflect.Struct { + return t.String() + } + var rec func(io.Writer, int, reflect.Type) + rec = func(out io.Writer, indent int, t reflect.Type) { + ind := strings.Repeat(" ", indent) + fmt.Fprintf(out, "%s", t.String()) + fs := collectFields(t) + if len(fs) > 0 { + io.WriteString(out, " {\n") + for _, f := range fs { + fmt.Fprintf(out, "%s %s ", ind, f.Name) + if f.Type.Kind() == reflect.Struct { + rec(out, indent+1, f.Type) + } else { + fmt.Fprint(out, f.Type) + } + io.WriteString(out, "\n") + } + fmt.Fprintf(out, "%s}", ind) + } + } + + var ret bytes.Buffer + rec(&ret, 0, t) + return ret.String() +} + +func collectFields(t reflect.Type) (ret []reflect.StructField) { + for _, f := range reflect.VisibleFields(t) { + if !f.IsExported() { + continue + } + ret = append(ret, f) + } + return ret +} diff --git a/util/eventbus/fetch-htmx.go b/util/eventbus/fetch-htmx.go new file mode 100644 index 000000000..f80d50257 --- /dev/null +++ b/util/eventbus/fetch-htmx.go @@ -0,0 +1,93 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ignore + +// Program fetch-htmx fetches and installs local copies of the HTMX +// library and its dependencies, used by the debug UI. It is meant to +// be run via go generate. +package main + +import ( + "compress/gzip" + "crypto/sha512" + "encoding/base64" + "fmt" + "io" + "log" + "net/http" + "os" +) + +func main() { + // Hash from https://htmx.org/docs/#installing + htmx, err := fetchHashed("https://unpkg.com/htmx.org@2.0.4", "HGfztofotfshcF7+8n44JQL2oJmowVChPTg48S+jvZoztPfvwD79OC/LTtG6dMp+") + if err != nil { + log.Fatalf("fetching htmx: %v", err) + } + + // Hash SHOULD be from https://htmx.org/extensions/ws/ , but the + // hash is currently incorrect, see + // https://github.com/bigskysoftware/htmx-extensions/issues/153 + // + // Until that bug is resolved, hash was obtained by rebuilding the + // extension from git source, and verifying that the hash matches + // what unpkg is serving. + ws, err := fetchHashed("https://unpkg.com/htmx-ext-ws@2.0.2", "932iIqjARv+Gy0+r6RTGrfCkCKS5MsF539Iqf6Vt8L4YmbnnWI2DSFoMD90bvXd0") + if err != nil { + log.Fatalf("fetching htmx-websockets: %v", err) + } + + if err := writeGz("assets/htmx.min.js.gz", htmx); err != nil { + log.Fatalf("writing htmx.min.js.gz: %v", err) + } + if err := writeGz("assets/htmx-websocket.min.js.gz", ws); err != nil { + log.Fatalf("writing htmx-websocket.min.js.gz: %v", err) + } +} + +func writeGz(path string, bs []byte) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + + g, err := gzip.NewWriterLevel(f, gzip.BestCompression) + if err != nil { + return err + } + + if _, err := g.Write(bs); err != nil { + return err + } + + if err := g.Flush(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + return nil +} + +func fetchHashed(url, wantHash string) ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("fetching %q returned error status: %s", url, resp.Status) + } + ret, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading file from %q: %v", url, err) + } + h := sha512.Sum384(ret) + got := base64.StdEncoding.EncodeToString(h[:]) + if got != wantHash { + return nil, fmt.Errorf("wrong hash for %q: got %q, want %q", url, got, wantHash) + } + return ret, nil +} From 6d217d81d166b1355f197f1feaba6f99598c82cc Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 09:49:09 -0800 Subject: [PATCH 0595/1708] util/eventbus: add a helper program for bus development The demo program generates a stream of made up bus events between a number of bus actors, as a way to generate some interesting activity to show on the bus debug page. Signed-off-by: David Anderson --- util/eventbus/debug-demo/main.go | 103 +++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 util/eventbus/debug-demo/main.go diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go new file mode 100644 index 000000000..a6d232d88 --- /dev/null +++ b/util/eventbus/debug-demo/main.go @@ -0,0 +1,103 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// debug-demo is a program that serves a bus's debug interface over +// HTTP, then generates some fake traffic from a handful of +// clients. It is an aid to development, to have something to present +// on the debug interfaces while writing them. +package main + +import ( + "log" + "math/rand/v2" + "net/http" + "net/netip" + "time" + + "tailscale.com/tsweb" + "tailscale.com/types/key" + "tailscale.com/util/eventbus" +) + +func main() { + b := eventbus.New() + c := b.Client("RouteMonitor") + go testPub[RouteAdded](c, 5*time.Second) + go testPub[RouteRemoved](c, 5*time.Second) + c = b.Client("ControlClient") + go testPub[PeerAdded](c, 3*time.Second) + go testPub[PeerRemoved](c, 6*time.Second) + c = b.Client("Portmapper") + go testPub[PortmapAcquired](c, 10*time.Second) + go testPub[PortmapLost](c, 15*time.Second) + go testSub[RouteAdded](c) + c = b.Client("WireguardConfig") + go testSub[PeerAdded](c) + go testSub[PeerRemoved](c) + c = b.Client("Magicsock") + go testPub[PeerPathChanged](c, 5*time.Second) + go testSub[RouteAdded](c) + go testSub[RouteRemoved](c) + go testSub[PortmapAcquired](c) + go testSub[PortmapLost](c) + + m := http.NewServeMux() + d := tsweb.Debugger(m) + b.Debugger().RegisterHTTP(d) + + m.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/debug/bus", http.StatusFound) + }) + log.Printf("Serving debug interface at http://localhost:8185/debug/bus") + http.ListenAndServe(":8185", m) +} + +func testPub[T any](c *eventbus.Client, every time.Duration) { + p := eventbus.Publish[T](c) + for { + jitter := time.Duration(rand.N(2000)) * time.Millisecond + time.Sleep(jitter) + var zero T + log.Printf("%s publish: %T", c.Name(), zero) + p.Publish(zero) + time.Sleep(every) + } +} + +func testSub[T any](c *eventbus.Client) { + s := eventbus.Subscribe[T](c) + for v := range s.Events() { + log.Printf("%s received: %T", c.Name(), v) + } +} + +type RouteAdded struct { + Prefix netip.Prefix + Via netip.Addr + Priority int +} +type RouteRemoved struct { + Prefix netip.Addr +} + +type PeerAdded struct { + ID int + Key key.NodePublic +} +type PeerRemoved struct { + ID int + Key key.NodePublic +} + +type PortmapAcquired struct { + Endpoint netip.Addr +} +type PortmapLost struct { + Endpoint netip.Addr +} + +type PeerPathChanged struct { + ID int + EndpointID int + Quality int +} From 45ecc0f85a96d09b4a0ca9839b2598314ad7ac34 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Wed, 12 Mar 2025 15:00:26 -0700 Subject: [PATCH 0596/1708] tsweb: add title to DebugHandler and helper registration methods Allow customizing the title on the debug index page. Also add methods for registering http.HandlerFunc to make it a little easier on callers. Updates tailscale/corp#27058 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- tsweb/debug.go | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/tsweb/debug.go b/tsweb/debug.go index 9e6ce4df4..843324482 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -34,6 +34,7 @@ type DebugHandler struct { kvs []func(io.Writer) // output one
  • ...
  • each, see KV() urls []string // one
  • ...
  • block with link each sections []func(io.Writer, *http.Request) // invoked in registration order prior to outputting + title string // title displayed on index page } // Debugger returns the DebugHandler registered on mux at /debug/, @@ -44,7 +45,8 @@ func Debugger(mux *http.ServeMux) *DebugHandler { return d } ret := &DebugHandler{ - mux: mux, + mux: mux, + title: fmt.Sprintf("%s debug", version.CmdName()), } mux.Handle("/debug/", ret) @@ -85,7 +87,7 @@ func (d *DebugHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { AddBrowserHeaders(w) f := func(format string, args ...any) { fmt.Fprintf(w, format, args...) } - f("

    %s debug

      ", version.CmdName()) + f("

      %s

        ", html.EscapeString(d.title)) for _, kv := range d.kvs { kv(w) } @@ -103,14 +105,20 @@ func (d *DebugHandler) handle(slug string, handler http.Handler) string { return href } -// Handle registers handler at /debug/ and creates a descriptive -// entry in /debug/ for it. +// Handle registers handler at /debug/ and adds a link to it +// on /debug/ with the provided description. func (d *DebugHandler) Handle(slug, desc string, handler http.Handler) { href := d.handle(slug, handler) d.URL(href, desc) } -// HandleSilent registers handler at /debug/. It does not create +// Handle registers handler at /debug/ and adds a link to it +// on /debug/ with the provided description. +func (d *DebugHandler) HandleFunc(slug, desc string, handler http.HandlerFunc) { + d.Handle(slug, desc, handler) +} + +// HandleSilent registers handler at /debug/. It does not add // a descriptive entry in /debug/ for it. This should be used // sparingly, for things that need to be registered but would pollute // the list of debug links. @@ -118,6 +126,14 @@ func (d *DebugHandler) HandleSilent(slug string, handler http.Handler) { d.handle(slug, handler) } +// HandleSilent registers handler at /debug/. It does not add +// a descriptive entry in /debug/ for it. This should be used +// sparingly, for things that need to be registered but would pollute +// the list of debug links. +func (d *DebugHandler) HandleSilentFunc(slug string, handler http.HandlerFunc) { + d.HandleSilent(slug, handler) +} + // KV adds a key/value list item to /debug/. func (d *DebugHandler) KV(k string, v any) { val := html.EscapeString(fmt.Sprintf("%v", v)) @@ -149,6 +165,11 @@ func (d *DebugHandler) Section(f func(w io.Writer, r *http.Request)) { d.sections = append(d.sections, f) } +// Title sets the title at the top of the debug page. +func (d *DebugHandler) Title(title string) { + d.title = title +} + func gcHandler(w http.ResponseWriter, r *http.Request) { w.Write([]byte("running GC...\n")) if f, ok := w.(http.Flusher); ok { From cd391b37a6b5bce82943dca32e9de05427c02a72 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 13 Mar 2025 14:14:03 +0000 Subject: [PATCH 0597/1708] ipn/ipnlocal, envknob: make it possible to configure the cert client to act in read-only mode (#15250) * ipn/ipnlocal,envknob: add some primitives for HA replica cert share. Add an envknob for configuring an instance's cert store as read-only, so that it does not attempt to issue or renew TLS credentials, only reads them from its cert store. This will be used by the Kubernetes Operator's HA Ingress to enable multiple replicas serving the same HTTPS endpoint to be able to share the same cert. Also some minor refactor to allow adding more tests for cert retrieval logic. Signed-off-by: Irbe Krumina --- envknob/envknob.go | 17 +++++ ipn/ipnlocal/cert.go | 37 ++++++++- ipn/ipnlocal/cert_test.go | 155 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 206 insertions(+), 3 deletions(-) diff --git a/envknob/envknob.go b/envknob/envknob.go index e74bfea71..2662da2b4 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -417,6 +417,23 @@ func App() string { return "" } +// IsCertShareReadOnlyMode returns true if this replica should never attempt to +// issue or renew TLS credentials for any of the HTTPS endpoints that it is +// serving. It should only return certs found in its cert store. Currently, +// this is used by the Kubernetes Operator's HA Ingress via VIPServices, where +// multiple Ingress proxy instances serve the same HTTPS endpoint with a shared +// TLS credentials. The TLS credentials should only be issued by one of the +// replicas. +// For HTTPS Ingress the operator and containerboot ensure +// that read-only replicas will not be serving the HTTPS endpoints before there +// is a shared cert available. +func IsCertShareReadOnlyMode() bool { + m := String("TS_CERT_SHARE_MODE") + return m == modeRO +} + +const modeRO = "ro" + // CrashOnUnexpected reports whether the Tailscale client should panic // on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's // used. Otherwise the default value is true for unstable builds. diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 4c026a9e7..111dc5a2d 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -119,6 +119,9 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string } if pair, err := getCertPEMCached(cs, domain, now); err == nil { + if envknob.IsCertShareReadOnlyMode() { + return pair, nil + } // If we got here, we have a valid unexpired cert. // Check whether we should start an async renewal. shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity) @@ -134,7 +137,7 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if minValidity == 0 { logf("starting async renewal") // Start renewal in the background, return current valid cert. - go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, now, minValidity) + b.goTracker.Go(func() { getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity) }) return pair, nil } // If the caller requested a specific validity duration, fall through @@ -142,7 +145,11 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string logf("starting sync renewal") } - pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now, minValidity) + if envknob.IsCertShareReadOnlyMode() { + return nil, fmt.Errorf("retrieving cached TLS certificate failed and cert store is configured in read-only mode, not attempting to issue a new certificate: %w", err) + } + + pair, err := getCertPEM(ctx, b, cs, logf, traceACME, domain, now, minValidity) if err != nil { logf("getCertPEM: %v", err) return nil, err @@ -358,7 +365,29 @@ type certStateStore struct { testRoots *x509.CertPool } +// TLSCertKeyReader is an interface implemented by state stores where it makes +// sense to read the TLS cert and key in a single operation that can be +// distinguished from generic state value reads. Currently this is only implemented +// by the kubestore.Store, which, in some cases, need to read cert and key from a +// non-cached TLS Secret. +type TLSCertKeyReader interface { + ReadTLSCertAndKey(domain string) ([]byte, []byte, error) +} + func (s certStateStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) { + // If we're using a store that supports atomic reads, use that + if kr, ok := s.StateStore.(TLSCertKeyReader); ok { + cert, key, err := kr.ReadTLSCertAndKey(domain) + if err != nil { + return nil, err + } + if !validCertPEM(domain, key, cert, s.testRoots, now) { + return nil, errCertExpired + } + return &TLSCertKeyPair{CertPEM: cert, KeyPEM: key, Cached: true}, nil + } + + // Otherwise fall back to separate reads certPEM, err := s.ReadState(ipn.StateKey(domain + ".crt")) if err != nil { return nil, err @@ -446,7 +475,9 @@ func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKey return cs.Read(domain, now) } -func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { +// getCertPem checks if a cert needs to be renewed and if so, renews it. +// It can be overridden in tests. +var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { acmeMu.Lock() defer acmeMu.Unlock() diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index c77570e87..e2398f670 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -6,6 +6,7 @@ package ipnlocal import ( + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -14,11 +15,17 @@ import ( "embed" "encoding/pem" "math/big" + "os" + "path/filepath" "testing" "time" "github.com/google/go-cmp/cmp" + "tailscale.com/envknob" "tailscale.com/ipn/store/mem" + "tailscale.com/tstest" + "tailscale.com/types/logger" + "tailscale.com/util/must" ) func TestValidLookingCertDomain(t *testing.T) { @@ -221,3 +228,151 @@ func TestDebugACMEDirectoryURL(t *testing.T) { }) } } + +func TestGetCertPEMWithValidity(t *testing.T) { + const testDomain = "example.com" + b := &LocalBackend{ + store: &mem.Store{}, + varRoot: t.TempDir(), + ctx: context.Background(), + logf: t.Logf, + } + certDir, err := b.certDir() + if err != nil { + t.Fatalf("certDir error: %v", err) + } + if _, err := b.getCertStore(); err != nil { + t.Fatalf("getCertStore error: %v", err) + } + testRoot, err := certTestFS.ReadFile("testdata/rootCA.pem") + if err != nil { + t.Fatal(err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(testRoot) { + t.Fatal("Unable to add test CA to the cert pool") + } + testX509Roots = roots + defer func() { testX509Roots = nil }() + tests := []struct { + name string + now time.Time + // storeCerts is true if the test cert and key should be written to store. + storeCerts bool + readOnlyMode bool // TS_READ_ONLY_CERTS env var + wantAsyncRenewal bool // async issuance should be started + wantIssuance bool // sync issuance should be started + wantErr bool + }{ + { + name: "valid_no_renewal", + now: time.Date(2023, time.February, 20, 0, 0, 0, 0, time.UTC), + storeCerts: true, + wantAsyncRenewal: false, + wantIssuance: false, + wantErr: false, + }, + { + name: "issuance_needed", + now: time.Date(2023, time.February, 20, 0, 0, 0, 0, time.UTC), + storeCerts: false, + wantAsyncRenewal: false, + wantIssuance: true, + wantErr: false, + }, + { + name: "renewal_needed", + now: time.Date(2025, time.May, 1, 0, 0, 0, 0, time.UTC), + storeCerts: true, + wantAsyncRenewal: true, + wantIssuance: false, + wantErr: false, + }, + { + name: "renewal_needed_read_only_mode", + now: time.Date(2025, time.May, 1, 0, 0, 0, 0, time.UTC), + storeCerts: true, + readOnlyMode: true, + wantAsyncRenewal: false, + wantIssuance: false, + wantErr: false, + }, + { + name: "no_certs_read_only_mode", + now: time.Date(2025, time.May, 1, 0, 0, 0, 0, time.UTC), + storeCerts: false, + readOnlyMode: true, + wantAsyncRenewal: false, + wantIssuance: false, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + if tt.readOnlyMode { + envknob.Setenv("TS_CERT_SHARE_MODE", "ro") + } + + os.RemoveAll(certDir) + if tt.storeCerts { + os.MkdirAll(certDir, 0755) + if err := os.WriteFile(filepath.Join(certDir, "example.com.crt"), + must.Get(os.ReadFile("testdata/example.com.pem")), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(certDir, "example.com.key"), + must.Get(os.ReadFile("testdata/example.com-key.pem")), 0644); err != nil { + t.Fatal(err) + } + } + + b.clock = tstest.NewClock(tstest.ClockOpts{Start: tt.now}) + + allDone := make(chan bool, 1) + defer b.goTracker.AddDoneCallback(func() { + b.mu.Lock() + defer b.mu.Unlock() + if b.goTracker.RunningGoroutines() > 0 { + return + } + select { + case allDone <- true: + default: + } + })() + + // Set to true if get getCertPEM is called. GetCertPEM can be called in a goroutine for async + // renewal or in the main goroutine if issuance is required to obtain valid TLS credentials. + getCertPemWasCalled := false + getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { + getCertPemWasCalled = true + return nil, nil + } + prevGoRoutines := b.goTracker.StartedGoroutines() + _, err = b.GetCertPEMWithValidity(context.Background(), testDomain, 0) + if (err != nil) != tt.wantErr { + t.Errorf("b.GetCertPemWithValidity got err %v, wants error: '%v'", err, tt.wantErr) + } + // GetCertPEMWithValidity calls getCertPEM in a goroutine if async renewal is needed. That's the + // only goroutine it starts, so this can be used to test if async renewal was started. + gotAsyncRenewal := b.goTracker.StartedGoroutines()-prevGoRoutines != 0 + if gotAsyncRenewal { + select { + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for goroutines to finish") + case <-allDone: + } + } + // Verify that async renewal was triggered if expected. + if tt.wantAsyncRenewal != gotAsyncRenewal { + t.Fatalf("wants getCertPem to be called async: %v, got called %v", tt.wantAsyncRenewal, gotAsyncRenewal) + } + // Verify that (non-async) issuance was started if expected. + gotIssuance := getCertPemWasCalled && !gotAsyncRenewal + if tt.wantIssuance != gotIssuance { + t.Errorf("wants getCertPem to be called: %v, got called %v", tt.wantIssuance, gotIssuance) + } + }) + } +} From eb680edbcea41342f0fb9659c2f6374c494b34d8 Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Thu, 13 Mar 2025 14:21:29 +0000 Subject: [PATCH 0598/1708] cmd/testwrapper: print failed tests preventing retry (#15270) Updates tailscale/corp#26637 Signed-off-by: Paul Scott --- cmd/testwrapper/testwrapper.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 1501c7e97..53c1b1d05 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -259,6 +259,7 @@ func main() { fmt.Printf("\n\nAttempt #%d: Retrying flaky tests:\n\nflakytest failures JSON: %s\n\n", thisRun.attempt, j) } + fatalFailures := make(map[string]struct{}) // pkg.Test key toRetry := make(map[string][]*testAttempt) // pkg -> tests to retry for _, pt := range thisRun.tests { ch := make(chan *testAttempt) @@ -301,11 +302,24 @@ func main() { if tr.isMarkedFlaky { toRetry[tr.pkg] = append(toRetry[tr.pkg], tr) } else { + fatalFailures[tr.pkg+"."+tr.testName] = struct{}{} failed = true } } if failed { fmt.Println("\n\nNot retrying flaky tests because non-flaky tests failed.") + + // Print the list of non-flakytest failures. + // We will later analyze the retried GitHub Action runs to see + // if non-flakytest failures succeeded upon retry. This will + // highlight tests which are flaky but not yet flagged as such. + if len(fatalFailures) > 0 { + tests := slicesx.MapKeys(fatalFailures) + sort.Strings(tests) + j, _ := json.Marshal(tests) + fmt.Printf("non-flakytest failures: %s\n", j) + } + fmt.Println() os.Exit(1) } From 06634125592abd2b9c5727ae3cc4116580dab33d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 13 Mar 2025 08:06:20 -0700 Subject: [PATCH 0599/1708] util/eventbus: add basic throughput benchmarks (#15284) Shovel small events through the pipeine as fast as possible in a few basic configurations, to establish some baseline performance numbers. Updates #15160 Change-Id: I1dcbbd1109abb7b93aa4dcb70da57f183eb0e60e Signed-off-by: M. J. Fromberger --- util/eventbus/bench_test.go | 125 ++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 util/eventbus/bench_test.go diff --git a/util/eventbus/bench_test.go b/util/eventbus/bench_test.go new file mode 100644 index 000000000..25f5b8002 --- /dev/null +++ b/util/eventbus/bench_test.go @@ -0,0 +1,125 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus_test + +import ( + "math/rand/v2" + "testing" + + "tailscale.com/util/eventbus" +) + +func BenchmarkBasicThroughput(b *testing.B) { + bus := eventbus.New() + pcli := bus.Client(b.Name() + "-pub") + scli := bus.Client(b.Name() + "-sub") + + type emptyEvent [0]byte + + // One publisher and a corresponding subscriber shoveling events as fast as + // they can through the plumbing. + pub := eventbus.Publish[emptyEvent](pcli) + sub := eventbus.Subscribe[emptyEvent](scli) + + go func() { + for { + select { + case <-sub.Events(): + continue + case <-sub.Done(): + return + } + } + }() + + for b.Loop() { + pub.Publish(emptyEvent{}) + } + bus.Close() +} + +func BenchmarkSubsThroughput(b *testing.B) { + bus := eventbus.New() + pcli := bus.Client(b.Name() + "-pub") + scli1 := bus.Client(b.Name() + "-sub1") + scli2 := bus.Client(b.Name() + "-sub2") + + type emptyEvent [0]byte + + // One publisher and two subscribers shoveling events as fast as they can + // through the plumbing. + pub := eventbus.Publish[emptyEvent](pcli) + sub1 := eventbus.Subscribe[emptyEvent](scli1) + sub2 := eventbus.Subscribe[emptyEvent](scli2) + + for _, sub := range []*eventbus.Subscriber[emptyEvent]{sub1, sub2} { + go func() { + for { + select { + case <-sub.Events(): + continue + case <-sub.Done(): + return + } + } + }() + } + + for b.Loop() { + pub.Publish(emptyEvent{}) + } + bus.Close() +} + +func BenchmarkMultiThroughput(b *testing.B) { + bus := eventbus.New() + cli := bus.Client(b.Name()) + + type eventA struct{} + type eventB struct{} + + // Two disjoint event streams routed through the global order. + apub := eventbus.Publish[eventA](cli) + asub := eventbus.Subscribe[eventA](cli) + bpub := eventbus.Publish[eventB](cli) + bsub := eventbus.Subscribe[eventB](cli) + + go func() { + for { + select { + case <-asub.Events(): + continue + case <-asub.Done(): + return + } + } + }() + go func() { + for { + select { + case <-bsub.Events(): + continue + case <-bsub.Done(): + return + } + } + }() + + var rng uint64 + var bits int + for b.Loop() { + if bits == 0 { + rng = rand.Uint64() + bits = 64 + } + if rng&1 == 0 { + apub.Publish(eventA{}) + } else { + bpub.Publish(eventB{}) + } + rng >>= 1 + bits-- + } + bus.Close() +} From f0b395d851bbca03ad2712571898dbad0f9aad6a Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 13 Mar 2025 10:37:42 -0700 Subject: [PATCH 0600/1708] go.mod update golang.org/x/net to 0.36.0 for govulncheck (#15296) Updates #cleanup Signed-off-by: Patrick O'Doherty --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 970e2e63c..a566c941f 100644 --- a/go.mod +++ b/go.mod @@ -97,7 +97,7 @@ require ( golang.org/x/crypto v0.35.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/mod v0.23.0 - golang.org/x/net v0.35.0 + golang.org/x/net v0.36.0 golang.org/x/oauth2 v0.26.0 golang.org/x/sync v0.11.0 golang.org/x/sys v0.30.0 diff --git a/go.sum b/go.sum index 1707effd5..528e48c16 100644 --- a/go.sum +++ b/go.sum @@ -1135,8 +1135,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= From 8b1e7f646ee4730ad06c9b70c13e7861b964949b Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 13 Mar 2025 13:33:26 -0700 Subject: [PATCH 0601/1708] net/packet: implement Geneve header serialization (#15301) Updates tailscale/corp#27100 Signed-off-by: Jordan Whited --- net/packet/geneve.go | 104 ++++++++++++++++++++++++++++++++++++++ net/packet/geneve_test.go | 32 ++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 net/packet/geneve.go create mode 100644 net/packet/geneve_test.go diff --git a/net/packet/geneve.go b/net/packet/geneve.go new file mode 100644 index 000000000..29970a8fd --- /dev/null +++ b/net/packet/geneve.go @@ -0,0 +1,104 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package packet + +import ( + "encoding/binary" + "errors" + "io" +) + +const ( + // GeneveFixedHeaderLength is the length of the fixed size portion of the + // Geneve header, in bytes. + GeneveFixedHeaderLength = 8 +) + +const ( + // GeneveProtocolDisco is the IEEE 802 Ethertype number used to represent + // the Tailscale Disco protocol in a Geneve header. + GeneveProtocolDisco uint16 = 0x7A11 + // GeneveProtocolWireGuard is the IEEE 802 Ethertype number used to represent the + // WireGuard protocol in a Geneve header. + GeneveProtocolWireGuard uint16 = 0x7A12 +) + +// GeneveHeader represents the fixed size Geneve header from RFC8926. +// TLVs/options are not implemented/supported. +// +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |Ver| Opt Len |O|C| Rsvd. | Protocol Type | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Virtual Network Identifier (VNI) | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type GeneveHeader struct { + // Ver (2 bits): The current version number is 0. Packets received by a + // tunnel endpoint with an unknown version MUST be dropped. Transit devices + // interpreting Geneve packets with an unknown version number MUST treat + // them as UDP packets with an unknown payload. + Version uint8 + + // Protocol Type (16 bits): The type of protocol data unit appearing after + // the Geneve header. This follows the Ethertype [ETYPES] convention, with + // Ethernet itself being represented by the value 0x6558. + Protocol uint16 + + // Virtual Network Identifier (VNI) (24 bits): An identifier for a unique + // element of a virtual network. In many situations, this may represent an + // L2 segment; however, the control plane defines the forwarding semantics + // of decapsulated packets. The VNI MAY be used as part of ECMP forwarding + // decisions or MAY be used as a mechanism to distinguish between + // overlapping address spaces contained in the encapsulated packet when load + // balancing across CPUs. + VNI uint32 + + // O (1 bit): Control packet. This packet contains a control message. + // Control messages are sent between tunnel endpoints. Tunnel endpoints MUST + // NOT forward the payload, and transit devices MUST NOT attempt to + // interpret it. Since control messages are less frequent, it is RECOMMENDED + // that tunnel endpoints direct these packets to a high-priority control + // queue (for example, to direct the packet to a general purpose CPU from a + // forwarding Application-Specific Integrated Circuit (ASIC) or to separate + // out control traffic on a NIC). Transit devices MUST NOT alter forwarding + // behavior on the basis of this bit, such as ECMP link selection. + Control bool +} + +// Encode encodes GeneveHeader into b. If len(b) < GeneveFixedHeaderLength an +// io.ErrShortBuffer error is returned. +func (h *GeneveHeader) Encode(b []byte) error { + if len(b) < GeneveFixedHeaderLength { + return io.ErrShortBuffer + } + if h.Version > 3 { + return errors.New("version must be <= 3") + } + b[0] = 0 + b[1] = 0 + b[0] |= h.Version << 6 + if h.Control { + b[1] |= 0x80 + } + binary.BigEndian.PutUint16(b[2:], h.Protocol) + if h.VNI > 1<<24-1 { + return errors.New("VNI must be <= 2^24-1") + } + binary.BigEndian.PutUint32(b[4:], h.VNI<<8) + return nil +} + +// Decode decodes GeneveHeader from b. If len(b) < GeneveFixedHeaderLength an +// io.ErrShortBuffer error is returned. +func (h *GeneveHeader) Decode(b []byte) error { + if len(b) < GeneveFixedHeaderLength { + return io.ErrShortBuffer + } + h.Version = b[0] >> 6 + if b[1]&0x80 != 0 { + h.Control = true + } + h.Protocol = binary.BigEndian.Uint16(b[2:]) + h.VNI = binary.BigEndian.Uint32(b[4:]) >> 8 + return nil +} diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go new file mode 100644 index 000000000..029638638 --- /dev/null +++ b/net/packet/geneve_test.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package packet + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestGeneveHeader(t *testing.T) { + in := GeneveHeader{ + Version: 3, + Protocol: GeneveProtocolDisco, + VNI: 1<<24 - 1, + Control: true, + } + b := make([]byte, GeneveFixedHeaderLength) + err := in.Encode(b) + if err != nil { + t.Fatal(err) + } + out := GeneveHeader{} + err = out.Decode(b) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(out, in); diff != "" { + t.Fatalf("wrong results (-got +want)\n%s", diff) + } +} From 299c5372bd2803bdbecbe7faf9e7112b55ef81d6 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 14 Mar 2025 17:33:08 +0000 Subject: [PATCH 0602/1708] cmd/containerboot: manage HA Ingress TLS certs from containerboot (#15303) cmd/containerboot: manage HA Ingress TLS certs from containerboot When ran as HA Ingress node, containerboot now can determine whether it should manage TLS certs for the HA Ingress replicas and call the LocalAPI cert endpoint to ensure initial issuance and renewal of the shared TLS certs. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/containerboot/certs.go | 147 ++++++++++++++++++++ cmd/containerboot/certs_test.go | 229 ++++++++++++++++++++++++++++++++ cmd/containerboot/main.go | 2 +- cmd/containerboot/serve.go | 22 ++- cmd/containerboot/serve_test.go | 4 + cmd/containerboot/settings.go | 17 +++ cmd/containerboot/tailscaled.go | 3 + 7 files changed, 419 insertions(+), 5 deletions(-) create mode 100644 cmd/containerboot/certs.go create mode 100644 cmd/containerboot/certs_test.go diff --git a/cmd/containerboot/certs.go b/cmd/containerboot/certs.go new file mode 100644 index 000000000..7af0424a9 --- /dev/null +++ b/cmd/containerboot/certs.go @@ -0,0 +1,147 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "fmt" + "log" + "net" + "sync" + "time" + + "tailscale.com/ipn" + "tailscale.com/util/goroutines" + "tailscale.com/util/mak" +) + +// certManager is responsible for issuing certificates for known domains and for +// maintaining a loop that re-attempts issuance daily. +// Currently cert manager logic is only run on ingress ProxyGroup replicas that are responsible for managing certs for +// HA Ingress HTTPS endpoints ('write' replicas). +type certManager struct { + lc localClient + tracker goroutines.Tracker // tracks running goroutines + mu sync.Mutex // guards the following + // certLoops contains a map of DNS names, for which we currently need to + // manage certs to cancel functions that allow stopping a goroutine when + // we no longer need to manage certs for the DNS name. + certLoops map[string]context.CancelFunc +} + +// ensureCertLoops ensures that, for all currently managed Service HTTPS +// endpoints, there is a cert loop responsible for issuing and ensuring the +// renewal of the TLS certs. +// ServeConfig must not be nil. +func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { + if sc == nil { + return fmt.Errorf("[unexpected] ensureCertLoops called with nil ServeConfig") + } + currentDomains := make(map[string]bool) + const httpsPort = "443" + for _, service := range sc.Services { + for hostPort := range service.Web { + domain, port, err := net.SplitHostPort(string(hostPort)) + if err != nil { + return fmt.Errorf("[unexpected] unable to parse HostPort %s", hostPort) + } + if port != httpsPort { // HA Ingress' HTTP endpoint + continue + } + currentDomains[domain] = true + } + } + cm.mu.Lock() + defer cm.mu.Unlock() + for domain := range currentDomains { + if _, exists := cm.certLoops[domain]; !exists { + cancelCtx, cancel := context.WithCancel(ctx) + mak.Set(&cm.certLoops, domain, cancel) + cm.tracker.Go(func() { cm.runCertLoop(cancelCtx, domain) }) + } + } + + // Stop goroutines for domain names that are no longer in the config. + for domain, cancel := range cm.certLoops { + if !currentDomains[domain] { + cancel() + delete(cm.certLoops, domain) + } + } + return nil +} + +// runCertLoop: +// - calls localAPI certificate endpoint to ensure that certs are issued for the +// given domain name +// - calls localAPI certificate endpoint daily to ensure that certs are renewed +// - if certificate issuance failed retries after an exponential backoff period +// starting at 1 minute and capped at 24 hours. Reset the backoff once issuance succeeds. +// Note that renewal check also happens when the node receives an HTTPS request and it is possible that certs get +// renewed at that point. Renewal here is needed to prevent the shared certs from expiry in edge cases where the 'write' +// replica does not get any HTTPS requests. +// https://letsencrypt.org/docs/integration-guide/#retrying-failures +func (cm *certManager) runCertLoop(ctx context.Context, domain string) { + const ( + normalInterval = 24 * time.Hour // regular renewal check + initialRetry = 1 * time.Minute // initial backoff after a failure + maxRetryInterval = 24 * time.Hour // max backoff period + ) + timer := time.NewTimer(0) // fire off timer immediately + defer timer.Stop() + retryCount := 0 + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + // We call the certificate endpoint, but don't do anything + // with the returned certs here. + // The call to the certificate endpoint will ensure that + // certs are issued/renewed as needed and stored in the + // relevant state store. For example, for HA Ingress + // 'write' replica, the cert and key will be stored in a + // Kubernetes Secret named after the domain for which we + // are issuing. + // Note that renewals triggered by the call to the + // certificates endpoint here and by renewal check + // triggered during a call to node's HTTPS endpoint + // share the same state/renewal lock mechanism, so we + // should not run into redundant issuances during + // concurrent renewal checks. + // TODO(irbekrm): maybe it is worth adding a new + // issuance endpoint that explicitly only triggers + // issuance and stores certs in the relevant store, but + // does not return certs to the caller? + _, _, err := cm.lc.CertPair(ctx, domain) + if err != nil { + log.Printf("error refreshing certificate for %s: %v", domain, err) + } + var nextInterval time.Duration + // TODO(irbekrm): distinguish between LE rate limit + // errors and other error types like transient network + // errors. + if err == nil { + retryCount = 0 + nextInterval = normalInterval + } else { + retryCount++ + // Calculate backoff: initialRetry * 2^(retryCount-1) + // For retryCount=1: 1min * 2^0 = 1min + // For retryCount=2: 1min * 2^1 = 2min + // For retryCount=3: 1min * 2^2 = 4min + backoff := initialRetry * time.Duration(1<<(retryCount-1)) + if backoff > maxRetryInterval { + backoff = maxRetryInterval + } + nextInterval = backoff + log.Printf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", + domain, retryCount, err, nextInterval) + } + timer.Reset(nextInterval) + } + } +} diff --git a/cmd/containerboot/certs_test.go b/cmd/containerboot/certs_test.go new file mode 100644 index 000000000..577311ea3 --- /dev/null +++ b/cmd/containerboot/certs_test.go @@ -0,0 +1,229 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "testing" + "time" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +// TestEnsureCertLoops tests that the certManager correctly starts and stops +// update loops for certs when the serve config changes. It tracks goroutine +// count and uses that as a validator that the expected number of cert loops are +// running. +func TestEnsureCertLoops(t *testing.T) { + tests := []struct { + name string + initialConfig *ipn.ServeConfig + updatedConfig *ipn.ServeConfig + initialGoroutines int64 // after initial serve config is applied + updatedGoroutines int64 // after updated serve config is applied + wantErr bool + }{ + { + name: "empty_serve_config", + initialConfig: &ipn.ServeConfig{}, + initialGoroutines: 0, + }, + { + name: "nil_serve_config", + initialConfig: nil, + initialGoroutines: 0, + wantErr: true, + }, + { + name: "empty_to_one_service", + initialConfig: &ipn.ServeConfig{}, + updatedConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 0, + updatedGoroutines: 1, + }, + { + name: "single_service", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 1, + }, + { + name: "multiple_services", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + "svc:my-other-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-other-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 2, // one loop per domain across all services + }, + { + name: "ignore_non_https_ports", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + "my-app.tailnetxyz.ts.net:80": {}, + }, + }, + }, + }, + initialGoroutines: 1, // only one loop for the 443 endpoint + }, + { + name: "remove_domain", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + "svc:my-other-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-other-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + updatedConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 2, // initially two loops (one per service) + updatedGoroutines: 1, // one loop after removing service2 + }, + { + name: "add_domain", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + updatedConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + "svc:my-other-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-other-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 1, + updatedGoroutines: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cm := &certManager{ + lc: &fakeLocalClient{}, + certLoops: make(map[string]context.CancelFunc), + } + + allDone := make(chan bool, 1) + defer cm.tracker.AddDoneCallback(func() { + cm.mu.Lock() + defer cm.mu.Unlock() + if cm.tracker.RunningGoroutines() > 0 { + return + } + select { + case allDone <- true: + default: + } + })() + + err := cm.ensureCertLoops(ctx, tt.initialConfig) + if (err != nil) != tt.wantErr { + t.Fatalf("ensureCertLoops() error = %v", err) + } + + if got := cm.tracker.RunningGoroutines(); got != tt.initialGoroutines { + t.Errorf("after initial config: got %d running goroutines, want %d", got, tt.initialGoroutines) + } + + if tt.updatedConfig != nil { + if err := cm.ensureCertLoops(ctx, tt.updatedConfig); err != nil { + t.Fatalf("ensureCertLoops() error on update = %v", err) + } + + // Although starting goroutines and cancelling + // the context happens in the main goroutine, it + // the actual goroutine exit when a context is + // cancelled does not- so wait for a bit for the + // running goroutine count to reach the expected + // number. + deadline := time.After(5 * time.Second) + for { + if got := cm.tracker.RunningGoroutines(); got == tt.updatedGoroutines { + break + } + select { + case <-deadline: + t.Fatalf("timed out waiting for goroutine count to reach %d, currently at %d", + tt.updatedGoroutines, cm.tracker.RunningGoroutines()) + case <-time.After(10 * time.Millisecond): + continue + } + } + } + + if tt.updatedGoroutines == 0 { + return // no goroutines to wait for + } + // cancel context to make goroutines exit + cancel() + select { + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for goroutine to finish") + case <-allDone: + } + }) + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index cf4bd8620..5f8052bb9 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -646,7 +646,7 @@ runLoop: if cfg.ServeConfigPath != "" { triggerWatchServeConfigChanges.Do(func() { - go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client, kc) + go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg) }) } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 4ea5a9c46..37fd49777 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -28,10 +28,11 @@ import ( // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // is written to when the certDomain changes, causing the serve config to be // re-read and applied. -func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient) { +func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings) { if certDomainAtomic == nil { panic("certDomainAtomic must not be nil") } + var tickChan <-chan time.Time var eventChan <-chan fsnotify.Event if w, err := fsnotify.NewWatcher(); err != nil { @@ -43,7 +44,7 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan tickChan = ticker.C } else { defer w.Close() - if err := w.Add(filepath.Dir(path)); err != nil { + if err := w.Add(filepath.Dir(cfg.ServeConfigPath)); err != nil { log.Fatalf("serve proxy: failed to add fsnotify watch: %v", err) } eventChan = w.Events @@ -51,6 +52,12 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan var certDomain string var prevServeConfig *ipn.ServeConfig + var cm certManager + if cfg.CertShareMode == "rw" { + cm = certManager{ + lc: lc, + } + } for { select { case <-ctx.Done(): @@ -63,12 +70,12 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan // k8s handles these mounts. So just re-read the file and apply it // if it's changed. } - sc, err := readServeConfig(path, certDomain) + sc, err := readServeConfig(cfg.ServeConfigPath, certDomain) if err != nil { log.Fatalf("serve proxy: failed to read serve config: %v", err) } if sc == nil { - log.Printf("serve proxy: no serve config at %q, skipping", path) + log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath) continue } if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { @@ -83,6 +90,12 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan } } prevServeConfig = sc + if cfg.CertShareMode != "rw" { + continue + } + if err := cm.ensureCertLoops(ctx, sc); err != nil { + log.Fatalf("serve proxy: error ensuring cert loops: %v", err) + } } } @@ -96,6 +109,7 @@ func certDomainFromNetmap(nm *netmap.NetworkMap) string { // localClient is a subset of [local.Client] that can be mocked for testing. type localClient interface { SetServeConfig(context.Context, *ipn.ServeConfig) error + CertPair(context.Context, string) ([]byte, []byte, error) } func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc localClient) error { diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go index eb92a8dc8..fc18f254d 100644 --- a/cmd/containerboot/serve_test.go +++ b/cmd/containerboot/serve_test.go @@ -206,6 +206,10 @@ func (m *fakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConf return nil } +func (m *fakeLocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return nil, nil, nil +} + func TestHasHTTPSEndpoint(t *testing.T) { tests := []struct { name string diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 0da18e52c..c62db5340 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -74,6 +74,12 @@ type settings struct { HealthCheckEnabled bool DebugAddrPort string EgressProxiesCfgPath string + // CertShareMode is set for Kubernetes Pods running cert share mode. + // Possible values are empty (containerboot doesn't run any certs + // logic), 'ro' (for Pods that shold never attempt to issue/renew + // certs) and 'rw' for Pods that should manage the TLS certs shared + // amongst the replicas. + CertShareMode string } func configFromEnv() (*settings, error) { @@ -128,6 +134,17 @@ func configFromEnv() (*settings, error) { cfg.PodIPv6 = parsed.String() } } + // If cert share is enabled, set the replica as read or write. Only 0th + // replica should be able to write. + isInCertShareMode := defaultBool("TS_EXPERIMENTAL_CERT_SHARE", false) + if isInCertShareMode { + cfg.CertShareMode = "ro" + podName := os.Getenv("POD_NAME") + if strings.HasSuffix(podName, "-0") { + cfg.CertShareMode = "rw" + } + } + if err := cfg.validate(); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 01ee96d3a..654b34757 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -33,6 +33,9 @@ func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Pro cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, } + if cfg.CertShareMode != "" { + cmd.Env = append(os.Environ(), "TS_CERT_SHARE_MODE="+cfg.CertShareMode) + } log.Printf("Starting tailscaled") if err := cmd.Start(); err != nil { return nil, nil, fmt.Errorf("starting tailscaled failed: %v", err) From 3a4b62227654029384006b264ee21a9ab0e2d54b Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 14 Mar 2025 12:30:29 -0700 Subject: [PATCH 0603/1708] .github/workflows/govulncheck.yml: send messages to another channel (#15295) Updates #cleanup Signed-off-by: Andrew Lytvynov --- .github/workflows/govulncheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 47d278e1c..10269ff0b 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -30,7 +30,7 @@ jobs: token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} payload: | { - "channel": "C05PXRM304B", + "channel": "C08FGKZCQTW", "blocks": [ { "type": "section", From 27ef9b666cd23c2ad5acb27c4f87294228219305 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 7 Mar 2025 15:07:00 +0000 Subject: [PATCH 0604/1708] ipn/ipnlocal: add test for CapMap packet filters Updates tailscale/corp#20514 Signed-off-by: James Sanderson --- ipn/ipnlocal/local_test.go | 59 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 35977e679..aa9137275 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -44,6 +44,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/dnstype" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -60,6 +61,7 @@ import ( "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" "tailscale.com/wgengine/wgcfg" ) @@ -5206,3 +5208,60 @@ func TestUpdateIngressLocked(t *testing.T) { }) } } + +// TestSrcCapPacketFilter tests that LocalBackend handles packet filters with +// SrcCaps instead of Srcs (IPs) +func TestSrcCapPacketFilter(t *testing.T) { + lb := newLocalBackendWithTestControl(t, false, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + if err := lb.Start(ipn.Options{}); err != nil { + t.Fatalf("(*LocalBackend).Start(): %v", err) + } + + var k key.NodePublic + must.Do(k.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) + + controlClient := lb.cc.(*mockControl) + controlClient.send(nil, "", false, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + ID: 2, + Key: k, + CapMap: tailcfg.NodeCapMap{"cap-X": nil}, // node 2 has cap + }).View(), + (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("3.3.3.3/32")}, + ID: 3, + Key: k, + CapMap: tailcfg.NodeCapMap{}, // node 3 does not have the cap + }).View(), + }, + PacketFilter: []filtertype.Match{{ + IPProto: views.SliceOf([]ipproto.Proto{ipproto.TCP}), + SrcCaps: []tailcfg.NodeCapability{"cap-X"}, // cap in packet filter rule + Dsts: []filtertype.NetPortRange{{ + Net: netip.MustParsePrefix("1.1.1.1/32"), + Ports: filtertype.PortRange{ + First: 22, + Last: 22, + }, + }}, + }}, + }) + + f := lb.GetFilterForTest() + res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) + if res != filter.Accept { + t.Errorf("Check(2.2.2.2, ...) = %s, want %s", res, filter.Accept) + } + + res = f.Check(netip.MustParseAddr("3.3.3.3"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) + if !res.IsDrop() { + t.Error("IsDrop() for node without cap = false, want true") + } +} From 25b059c0eec0ed8475239c640ceddf4a1bd17e98 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 17 Mar 2025 15:02:33 +0000 Subject: [PATCH 0605/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 6 +++--- licenses/apple.md | 2 +- licenses/tailscale.md | 2 +- licenses/windows.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index c3e9e989a..37961b74c 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -29,7 +29,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -64,11 +64,11 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.33.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index a2984ea2e..814df22da 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -70,7 +70,7 @@ See also the dependencies in the [Tailscale CLI][]. - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 777687be6..b3095f5b4 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -92,7 +92,7 @@ Some packages may only be included on certain architectures or operating systems - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 78fdcf7fb..bdf965051 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -62,7 +62,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/04068c1cab63/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/b2c15a420186/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) @@ -74,7 +74,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) From b413b70ae27686746e461b0e51670d4ac5d3c987 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Sun, 9 Mar 2025 16:55:51 -0700 Subject: [PATCH 0606/1708] cmd/proxy-to-grafana: support setting Grafana role via grants This adds support for using ACL Grants to configure a role for the auto-provisioned user. Fixes tailscale/corp#14567 Signed-off-by: Anton Tolchanov --- cmd/proxy-to-grafana/proxy-to-grafana.go | 104 +++++++++++++++++++++-- 1 file changed, 97 insertions(+), 7 deletions(-) diff --git a/cmd/proxy-to-grafana/proxy-to-grafana.go b/cmd/proxy-to-grafana/proxy-to-grafana.go index 849d184c6..bdabd650f 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana.go @@ -19,8 +19,25 @@ // header_property = username // auto_sign_up = true // whitelist = 127.0.0.1 -// headers = Name:X-WEBAUTH-NAME +// headers = Email:X-Webauth-User, Name:X-Webauth-Name, Role:X-Webauth-Role // enable_login_token = true +// +// You can use grants in Tailscale ACL to give users different roles in Grafana. +// For example, to give group:eng the Editor role, add the following to your ACLs: +// +// "grants": [ +// { +// "src": ["group:eng"], +// "dst": ["tag:grafana"], +// "app": { +// "tailscale.com/cap/proxy-to-grafana": [{ +// "role": "editor", +// }], +// }, +// }, +// ], +// +// If multiple roles are specified, the most permissive role is used. package main import ( @@ -49,6 +66,57 @@ var ( loginServer = flag.String("login-server", "", "URL to alternative control server. If empty, the default Tailscale control is used.") ) +// aclCap is the Tailscale ACL capability used to configure proxy-to-grafana. +const aclCap tailcfg.PeerCapability = "tailscale.com/cap/proxy-to-grafana" + +// aclGrant is an access control rule that assigns Grafana permissions +// while provisioning a user. +type aclGrant struct { + // Role is one of: "viewer", "editor", "admin". + Role string `json:"role"` +} + +// grafanaRole defines possible Grafana roles. +type grafanaRole int + +const ( + // Roles are ordered by their permissions, with the least permissive role first. + // If a user has multiple roles, the most permissive role is used. + ViewerRole grafanaRole = iota + EditorRole + AdminRole +) + +// String returns the string representation of a grafanaRole. +// It is used as a header value in the HTTP request to Grafana. +func (r grafanaRole) String() string { + switch r { + case ViewerRole: + return "Viewer" + case EditorRole: + return "Editor" + case AdminRole: + return "Admin" + default: + // A safe default. + return "Viewer" + } +} + +// roleFromString converts a string to a grafanaRole. +// It is used to parse the role from the ACL grant. +func roleFromString(s string) (grafanaRole, error) { + switch strings.ToLower(s) { + case "viewer": + return ViewerRole, nil + case "editor": + return EditorRole, nil + case "admin": + return AdminRole, nil + } + return ViewerRole, fmt.Errorf("unknown role: %q", s) +} + func main() { flag.Parse() if *hostname == "" || strings.Contains(*hostname, ".") { @@ -134,7 +202,15 @@ func modifyRequest(req *http.Request, localClient *local.Client) { return } - user, err := getTailscaleUser(req.Context(), localClient, req.RemoteAddr) + // Delete any existing X-Webauth-* headers to prevent possible spoofing + // if getting Tailnet identity fails. + for h := range req.Header { + if strings.HasPrefix(h, "X-Webauth-") { + req.Header.Del(h) + } + } + + user, role, err := getTailscaleIdentity(req.Context(), localClient, req.RemoteAddr) if err != nil { log.Printf("error getting Tailscale user: %v", err) return @@ -142,19 +218,33 @@ func modifyRequest(req *http.Request, localClient *local.Client) { req.Header.Set("X-Webauth-User", user.LoginName) req.Header.Set("X-Webauth-Name", user.DisplayName) + req.Header.Set("X-Webauth-Role", role.String()) } -func getTailscaleUser(ctx context.Context, localClient *local.Client, ipPort string) (*tailcfg.UserProfile, error) { +func getTailscaleIdentity(ctx context.Context, localClient *local.Client, ipPort string) (*tailcfg.UserProfile, grafanaRole, error) { whois, err := localClient.WhoIs(ctx, ipPort) if err != nil { - return nil, fmt.Errorf("failed to identify remote host: %w", err) + return nil, ViewerRole, fmt.Errorf("failed to identify remote host: %w", err) } if whois.Node.IsTagged() { - return nil, fmt.Errorf("tagged nodes are not users") + return nil, ViewerRole, fmt.Errorf("tagged nodes are not users") } if whois.UserProfile == nil || whois.UserProfile.LoginName == "" { - return nil, fmt.Errorf("failed to identify remote user") + return nil, ViewerRole, fmt.Errorf("failed to identify remote user") + } + + role := ViewerRole + grants, err := tailcfg.UnmarshalCapJSON[aclGrant](whois.CapMap, aclCap) + if err != nil { + return nil, ViewerRole, fmt.Errorf("failed to unmarshal ACL grants: %w", err) + } + for _, g := range grants { + r, err := roleFromString(g.Role) + if err != nil { + return nil, ViewerRole, fmt.Errorf("failed to parse role: %w", err) + } + role = max(role, r) } - return whois.UserProfile, nil + return whois.UserProfile, role, nil } From ef1e14250c40b28c68691f88dc1b6d1cc33425c0 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 18 Mar 2025 05:48:59 -0700 Subject: [PATCH 0607/1708] cmd/k8s-operator: ensure old VIPServices are cleaned up (#15344) When the Ingress is updated to a new hostname, the controller does not currently clean up the old VIPService from control. Fix this up to parse the ownership comment correctly and write a test to enforce the improved behaviour Updates tailscale/corp#24795 Change-Id: I792ae7684807d254bf2d3cc7aa54aa04a582d1f5 Signed-off-by: Tom Proctor --- cmd/k8s-operator/ingress-for-pg.go | 20 ++------- cmd/k8s-operator/ingress-for-pg_test.go | 57 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 17 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 85a64a336..cdbfecb35 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -402,16 +402,9 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) // Delete the VIPService from control if necessary. - svc, _ := r.tsClient.GetVIPService(ctx, vipServiceName) - if svc != nil && isVIPServiceForAnyIngress(svc) { - logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) - svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) - if err != nil { - errResp := &tailscale.ErrResponse{} - if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { - return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) - } - } + svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) + if err != nil { + return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) } // Make sure the VIPService is not advertised in tailscaled or serve config. @@ -570,13 +563,6 @@ func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { - if svc == nil { - return false - } - return strings.HasPrefix(svc.Comment, "tailscale.com/k8s-operator:owned-by:") -} - // validateIngress validates that the Ingress is properly configured. // Currently validates: // - Any tags provided via tailscale.com/tags annotation are valid Tailscale ACL tags diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 7a995e169..2f675337e 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -8,8 +8,10 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "maps" + "net/http" "reflect" "testing" @@ -186,6 +188,61 @@ func TestIngressPGReconciler(t *testing.T) { verifyTailscaledConfig(t, fc, nil) } +func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + }, + }, + } + mustCreate(t, fc, ing) + + // Verify initial reconciliation + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyServeConfig(t, fc, "svc:my-svc", false) + verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + + // Update the Ingress hostname and make sure the original VIPService is deleted. + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + ing.Spec.TLS[0].Hosts[0] = "updated-svc.tailnetxyz.ts.net" + }) + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyServeConfig(t, fc, "svc:updated-svc", false) + verifyVIPService(t, ft, "svc:updated-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"}) + + _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) + if err == nil { + t.Fatalf("svc:my-svc not cleaned up") + } + var errResp *tailscale.ErrResponse + if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { + t.Fatalf("unexpected error: %v", err) + } +} + func TestValidateIngress(t *testing.T) { baseIngress := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ From 34734ba6351b76eaef525623ab6d17fd38f9b3d6 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 18 Mar 2025 15:09:22 +0000 Subject: [PATCH 0608/1708] ipn/store/kubestore,kube,envknob,cmd/tailscaled/depaware.txt: allow kubestore read/write custom TLS secrets (#15307) This PR adds some custom logic for reading and writing kube store values that are TLS certs and keys: 1) when store is initialized, lookup additional TLS Secrets for this node and if found, load TLS certs from there 2) if the node runs in certs 'read only' mode and TLS cert and key are not found in the in-memory store, look those up in a Secret 3) if the node runs in certs 'read only' mode, run a daily TLS certs reload to memory to get any renewed certs Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/tailscaled/depaware.txt | 2 +- envknob/envknob.go | 10 +- ipn/store/kubestore/store_kube.go | 266 +++++++++-- ipn/store/kubestore/store_kube_test.go | 634 +++++++++++++++++++++++-- kube/kubeapi/api.go | 8 + kube/kubeclient/client.go | 37 +- kube/kubeclient/fake_client.go | 7 + kube/kubetypes/types.go | 3 + 8 files changed, 878 insertions(+), 89 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 026758a47..b47f43c76 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -286,7 +286,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/kube/kubetypes from tailscale.com/envknob+ tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal diff --git a/envknob/envknob.go b/envknob/envknob.go index 2662da2b4..e581eb27e 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -429,10 +429,16 @@ func App() string { // is a shared cert available. func IsCertShareReadOnlyMode() bool { m := String("TS_CERT_SHARE_MODE") - return m == modeRO + return m == "ro" } -const modeRO = "ro" +// IsCertShareReadWriteMode returns true if this instance is the replica +// responsible for issuing and renewing TLS certs in an HA setup with certs +// shared between multiple replicas. +func IsCertShareReadWriteMode() bool { + m := String("TS_CERT_SHARE_MODE") + return m == "rw" +} // CrashOnUnexpected reports whether the Tailscale client should panic // on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index ecd101c57..79e66d357 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -13,11 +13,14 @@ import ( "strings" "time" + "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" "tailscale.com/types/logger" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" ) @@ -32,21 +35,37 @@ const ( reasonTailscaleStateLoadFailed = "TailscaleStateLoadFailed" eventTypeWarning = "Warning" eventTypeNormal = "Normal" + + keyTLSCert = "tls.crt" + keyTLSKey = "tls.key" ) // Store is an ipn.StateStore that uses a Kubernetes Secret for persistence. type Store struct { - client kubeclient.Client - canPatch bool - secretName string + client kubeclient.Client + canPatch bool + secretName string // state Secret + certShareMode string // 'ro', 'rw', or empty + podName string - // memory holds the latest tailscale state. Writes write state to a kube Secret and memory, Reads read from - // memory. + // memory holds the latest tailscale state. Writes write state to a kube + // Secret and memory, Reads read from memory. memory mem.Store } -// New returns a new Store that persists to the named Secret. -func New(_ logger.Logf, secretName string) (*Store, error) { +// New returns a new Store that persists state to Kubernets Secret(s). +// Tailscale state is stored in a Secret named by the secretName parameter. +// TLS certs are stored and retrieved from state Secret or separate Secrets +// named after TLS endpoints if running in cert share mode. +func New(logf logger.Logf, secretName string) (*Store, error) { + c, err := newClient() + if err != nil { + return nil, err + } + return newWithClient(logf, c, secretName) +} + +func newClient() (kubeclient.Client, error) { c, err := kubeclient.New("tailscale-state-store") if err != nil { return nil, err @@ -55,6 +74,10 @@ func New(_ logger.Logf, secretName string) (*Store, error) { // Derive the API server address from the environment variables c.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS"))) } + return c, nil +} + +func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*Store, error) { canPatch, _, err := c.CheckSecretPermissions(context.Background(), secretName) if err != nil { return nil, err @@ -63,11 +86,30 @@ func New(_ logger.Logf, secretName string) (*Store, error) { client: c, canPatch: canPatch, secretName: secretName, + podName: os.Getenv("POD_NAME"), } + if envknob.IsCertShareReadWriteMode() { + s.certShareMode = "rw" + } else if envknob.IsCertShareReadOnlyMode() { + s.certShareMode = "ro" + } + // Load latest state from kube Secret if it already exists. if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist { return nil, fmt.Errorf("error loading state from kube Secret: %w", err) } + // If we are in cert share mode, pre-load existing shared certs. + if s.certShareMode == "rw" || s.certShareMode == "ro" { + sel := s.certSecretSelector() + if err := s.loadCerts(context.Background(), sel); err != nil { + // We will attempt to again retrieve the certs from Secrets when a request for an HTTPS endpoint + // is received. + log.Printf("[unexpected] error loading TLS certs: %v", err) + } + } + if s.certShareMode == "ro" { + go s.runCertReload(context.Background(), logf) + } return s, nil } @@ -84,27 +126,101 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { // WriteState implements the StateStore interface. func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { - return s.updateStateSecret(map[string][]byte{string(id): bs}) + defer func() { + if err == nil { + s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) + } + }() + return s.updateSecret(map[string][]byte{string(id): bs}, s.secretName) } -// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields of a Tailscale Kubernetes node's state -// Secret. -func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) error { - return s.updateStateSecret(map[string][]byte{domain + ".crt": cert, domain + ".key": key}) +// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields +// of a Tailscale Kubernetes node's state Secret. +func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) { + if s.certShareMode == "ro" { + log.Printf("[unexpected] TLS cert and key write in read-only mode") + } + if err := dnsname.ValidHostname(domain); err != nil { + return fmt.Errorf("invalid domain name %q: %w", domain, err) + } + defer func() { + // TODO(irbekrm): a read between these two separate writes would + // get a mismatched cert and key. Allow writing both cert and + // key to the memory store in a single, lock-protected operation. + if err == nil { + s.memory.WriteState(ipn.StateKey(domain+".crt"), cert) + s.memory.WriteState(ipn.StateKey(domain+".key"), key) + } + }() + secretName := s.secretName + data := map[string][]byte{ + domain + ".crt": cert, + domain + ".key": key, + } + // If we run in cert share mode, cert and key for a DNS name are written + // to a separate Secret. + if s.certShareMode == "rw" { + secretName = domain + data = map[string][]byte{ + keyTLSCert: cert, + keyTLSKey: key, + } + } + return s.updateSecret(data, secretName) } -func (s *Store) updateStateSecret(data map[string][]byte) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer func() { +// ReadTLSCertAndKey reads a TLS cert and key from memory or from a +// domain-specific Secret. It first checks the in-memory store, if not found in +// memory and running cert store in read-only mode, looks up a Secret. +func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { + if err := dnsname.ValidHostname(domain); err != nil { + return nil, nil, fmt.Errorf("invalid domain name %q: %w", domain, err) + } + certKey := domain + ".crt" + keyKey := domain + ".key" + + cert, err = s.memory.ReadState(ipn.StateKey(certKey)) + if err == nil { + key, err = s.memory.ReadState(ipn.StateKey(keyKey)) if err == nil { - for id, bs := range data { - // The in-memory store does not distinguish between values read from state Secret on - // init and values written to afterwards. Values read from the state - // Secret will always be sanitized, so we also need to sanitize values written to store - // later, so that the Read logic can just lookup keys in sanitized form. - s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) - } + return cert, key, nil + } + } + if s.certShareMode != "ro" { + return nil, nil, ipn.ErrStateNotExist + } + // If we are in cert share read only mode, it is possible that a write + // replica just issued the TLS cert for this DNS name and it has not + // been loaded to store yet, so check the Secret. + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + secret, err := s.client.GetSecret(ctx, domain) + if err != nil { + if kubeclient.IsNotFoundErr(err) { + // TODO(irbekrm): we should return a more specific error + // that wraps ipn.ErrStateNotExist here. + return nil, nil, ipn.ErrStateNotExist } + return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err) + } + cert = secret.Data[keyTLSCert] + key = secret.Data[keyTLSKey] + if len(cert) == 0 || len(key) == 0 { + return nil, nil, ipn.ErrStateNotExist + } + // TODO(irbekrm): a read between these two separate writes would + // get a mismatched cert and key. Allow writing both cert and + // key to the memory store in a single lock-protected operation. + s.memory.WriteState(ipn.StateKey(certKey), cert) + s.memory.WriteState(ipn.StateKey(keyKey), key) + return cert, key, nil +} + +func (s *Store) updateSecret(data map[string][]byte, secretName string) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer func() { if err != nil { if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { log.Printf("kubestore: error creating tailscaled state update Event: %v", err) @@ -116,17 +232,17 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { } cancel() }() - secret, err := s.client.GetSecret(ctx, s.secretName) + secret, err := s.client.GetSecret(ctx, secretName) if err != nil { // If the Secret does not exist, create it with the required data. - if kubeclient.IsNotFoundErr(err) { + if kubeclient.IsNotFoundErr(err) && s.canCreateSecret(secretName) { return s.client.CreateSecret(ctx, &kubeapi.Secret{ TypeMeta: kubeapi.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, ObjectMeta: kubeapi.ObjectMeta{ - Name: s.secretName, + Name: secretName, }, Data: func(m map[string][]byte) map[string][]byte { d := make(map[string][]byte, len(m)) @@ -137,9 +253,9 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { }(data), }) } - return err + return fmt.Errorf("error getting Secret %s: %w", secretName, err) } - if s.canPatch { + if s.canPatchSecret(secretName) { var m []kubeclient.JSONPatch // If the user has pre-created a Secret with no data, we need to ensure the top level /data field. if len(secret.Data) == 0 { @@ -166,7 +282,7 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { }) } } - if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { + if err := s.client.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil { return fmt.Errorf("error patching Secret %s: %w", s.secretName, err) } return nil @@ -176,9 +292,9 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { mak.Set(&secret.Data, sanitizeKey(key), val) } if err := s.client.UpdateSecret(ctx, secret); err != nil { - return err + return fmt.Errorf("error updating Secret %s: %w", s.secretName, err) } - return err + return nil } func (s *Store) loadState() (err error) { @@ -202,6 +318,96 @@ func (s *Store) loadState() (err error) { return nil } +// runCertReload relists and reloads all TLS certs for endpoints shared by this +// node from Secrets other than the state Secret to ensure that renewed certs get eventually loaded. +// It is not critical to reload a cert immediately after +// renewal, so a daily check is acceptable. +// Currently (3/2025) this is only used for the shared HA Ingress certs on 'read' replicas. +// Note that if shared certs are not found in memory on an HTTPS request, we +// do a Secret lookup, so this mechanism does not need to ensure that newly +// added Ingresses' certs get loaded. +func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) { + ticker := time.NewTicker(time.Hour * 24) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + sel := s.certSecretSelector() + if err := s.loadCerts(ctx, sel); err != nil { + logf("[unexpected] error reloading TLS certs: %v", err) + } + } + } +} + +// loadCerts lists all Secrets matching the provided selector and loads TLS +// certs and keys from those. +func (s *Store) loadCerts(ctx context.Context, sel map[string]string) error { + ss, err := s.client.ListSecrets(ctx, sel) + if err != nil { + return fmt.Errorf("error listing TLS Secrets: %w", err) + } + for _, secret := range ss.Items { + if !hasTLSData(&secret) { + continue + } + // Only load secrets that have valid domain names (ending in .ts.net) + if !strings.HasSuffix(secret.Name, ".ts.net") { + continue + } + s.memory.WriteState(ipn.StateKey(secret.Name)+".crt", secret.Data[keyTLSCert]) + s.memory.WriteState(ipn.StateKey(secret.Name)+".key", secret.Data[keyTLSKey]) + } + return nil +} + +// canCreateSecret returns true if this node should be allowed to create the given +// Secret in its namespace. +func (s *Store) canCreateSecret(secret string) bool { + // Only allow creating the state Secret (and not TLS Secrets). + return secret == s.secretName +} + +// canPatchSecret returns true if this node should be allowed to patch the given +// Secret. +func (s *Store) canPatchSecret(secret string) bool { + // For backwards compatibility reasons, setups where the proxies are not + // given PATCH permissions for state Secrets are allowed. For TLS + // Secrets, we should always have PATCH permissions. + if secret == s.secretName { + return s.canPatch + } + return true +} + +// certSecretSelector returns a label selector that can be used to list all +// Secrets that aren't Tailscale state Secrets and contain TLS certificates for +// HTTPS endpoints that this node serves. +// Currently (3/2025) this only applies to the Kubernetes Operator's ingress +// ProxyGroup. +func (s *Store) certSecretSelector() map[string]string { + if s.podName == "" { + return map[string]string{} + } + p := strings.LastIndex(s.podName, "-") + if p == -1 { + return map[string]string{} + } + pgName := s.podName[:p] + return map[string]string{ + kubetypes.LabelSecretType: "certs", + kubetypes.LabelManaged: "true", + "tailscale.com/proxy-group": pgName, + } +} + +// hasTLSData returns true if the provided Secret contains non-empty TLS cert and key. +func hasTLSData(s *kubeapi.Secret) bool { + return len(s.Data[keyTLSCert]) != 0 && len(s.Data[keyTLSKey]) != 0 +} + // sanitizeKey converts any value that can be converted to a string into a valid Kubernetes Secret key. // Valid characters are alphanumeric, -, _, and . // https://kubernetes.io/docs/concepts/configuration/secret/#restriction-names-data. diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 351458efb..2ed16e77b 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -4,33 +4,37 @@ package kubestore import ( + "bytes" "context" + "encoding/json" + "fmt" "strings" "testing" "github.com/google/go-cmp/cmp" + "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" ) -func TestUpdateStateSecret(t *testing.T) { +func TestWriteState(t *testing.T) { tests := []struct { name string initial map[string][]byte - updates map[string][]byte + key ipn.StateKey + value []byte wantData map[string][]byte allowPatch bool }{ { - name: "basic_update", + name: "basic_write", initial: map[string][]byte{ "existing": []byte("old"), }, - updates: map[string][]byte{ - "foo": []byte("bar"), - }, + key: "foo", + value: []byte("bar"), wantData: map[string][]byte{ "existing": []byte("old"), "foo": []byte("bar"), @@ -42,35 +46,17 @@ func TestUpdateStateSecret(t *testing.T) { initial: map[string][]byte{ "foo": []byte("old"), }, - updates: map[string][]byte{ - "foo": []byte("new"), - }, + key: "foo", + value: []byte("new"), wantData: map[string][]byte{ "foo": []byte("new"), }, allowPatch: true, }, { - name: "multiple_updates", - initial: map[string][]byte{ - "keep": []byte("keep"), - }, - updates: map[string][]byte{ - "foo": []byte("bar"), - "baz": []byte("qux"), - }, - wantData: map[string][]byte{ - "keep": []byte("keep"), - "foo": []byte("bar"), - "baz": []byte("qux"), - }, - allowPatch: true, - }, - { - name: "create_new_secret", - updates: map[string][]byte{ - "foo": []byte("bar"), - }, + name: "create_new_secret", + key: "foo", + value: []byte("bar"), wantData: map[string][]byte{ "foo": []byte("bar"), }, @@ -81,29 +67,23 @@ func TestUpdateStateSecret(t *testing.T) { initial: map[string][]byte{ "foo": []byte("old"), }, - updates: map[string][]byte{ - "foo": []byte("new"), - }, + key: "foo", + value: []byte("new"), wantData: map[string][]byte{ "foo": []byte("new"), }, allowPatch: false, }, { - name: "sanitize_keys", + name: "sanitize_key", initial: map[string][]byte{ "clean-key": []byte("old"), }, - updates: map[string][]byte{ - "dirty@key": []byte("new"), - "also/bad": []byte("value"), - "good.key": []byte("keep"), - }, + key: "dirty@key", + value: []byte("new"), wantData: map[string][]byte{ "clean-key": []byte("old"), "dirty_key": []byte("new"), - "also_bad": []byte("value"), - "good.key": []byte("keep"), }, allowPatch: true, }, @@ -152,13 +132,13 @@ func TestUpdateStateSecret(t *testing.T) { s := &Store{ client: client, canPatch: tt.allowPatch, - secretName: "test-secret", + secretName: "ts-state", memory: mem.Store{}, } - err := s.updateStateSecret(tt.updates) + err := s.WriteState(tt.key, tt.value) if err != nil { - t.Errorf("updateStateSecret() error = %v", err) + t.Errorf("WriteState() error = %v", err) return } @@ -168,16 +148,576 @@ func TestUpdateStateSecret(t *testing.T) { } // Verify memory store was updated - for k, v := range tt.updates { - got, err := s.memory.ReadState(ipn.StateKey(sanitizeKey(k))) + got, err := s.memory.ReadState(ipn.StateKey(sanitizeKey(string(tt.key)))) + if err != nil { + t.Errorf("reading from memory store: %v", err) + } + if !cmp.Equal(got, tt.value) { + t.Errorf("memory store key %q = %v, want %v", tt.key, got, tt.value) + } + }) + } +} + +func TestWriteTLSCertAndKey(t *testing.T) { + const ( + testDomain = "my-app.tailnetxyz.ts.net" + testCert = "fake-cert" + testKey = "fake-key" + ) + + tests := []struct { + name string + initial map[string][]byte // pre-existing cert and key + certShareMode string + allowPatch bool // whether client can patch the Secret + wantSecretName string // name of the Secret where cert and key should be written + wantSecretData map[string][]byte + wantMemoryStore map[ipn.StateKey][]byte + }{ + { + name: "basic_write", + initial: map[string][]byte{ + "existing": []byte("old"), + }, + allowPatch: true, + wantSecretName: "ts-state", + wantSecretData: map[string][]byte{ + "existing": []byte("old"), + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_mode_write", + certShareMode: "rw", + allowPatch: true, + wantSecretName: "my-app.tailnetxyz.ts.net", + wantSecretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_mode_write_update_existing", + initial: map[string][]byte{ + "tls.crt": []byte("old-cert"), + "tls.key": []byte("old-key"), + }, + certShareMode: "rw", + allowPatch: true, + wantSecretName: "my-app.tailnetxyz.ts.net", + wantSecretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "update_existing", + initial: map[string][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte("old-cert"), + "my-app.tailnetxyz.ts.net.key": []byte("old-key"), + }, + certShareMode: "", + allowPatch: true, + wantSecretName: "ts-state", + wantSecretData: map[string][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "patch_denied", + certShareMode: "", + allowPatch: false, + wantSecretName: "ts-state", + wantSecretData: map[string][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Set POD_NAME for testing selectors + envknob.Setenv("POD_NAME", "ingress-proxies-1") + defer envknob.Setenv("POD_NAME", "") + + secret := tt.initial // track current state + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if secret == nil { + return nil, &kubeapi.Status{Code: 404} + } + return &kubeapi.Secret{Data: secret}, nil + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return tt.allowPatch, true, nil + }, + CreateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + if s.Name != tt.wantSecretName { + t.Errorf("CreateSecret called with wrong name, got %q, want %q", s.Name, tt.wantSecretName) + } + secret = s.Data + return nil + }, + UpdateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + if s.Name != tt.wantSecretName { + t.Errorf("UpdateSecret called with wrong name, got %q, want %q", s.Name, tt.wantSecretName) + } + secret = s.Data + return nil + }, + JSONPatchResourceImpl: func(ctx context.Context, name, resourceType string, patches []kubeclient.JSONPatch) error { + if !tt.allowPatch { + return &kubeapi.Status{Reason: "Forbidden"} + } + if name != tt.wantSecretName { + t.Errorf("JSONPatchResource called with wrong name, got %q, want %q", name, tt.wantSecretName) + } + if secret == nil { + secret = make(map[string][]byte) + } + for _, p := range patches { + if p.Op == "add" && p.Path == "/data" { + secret = p.Value.(map[string][]byte) + } else if p.Op == "add" && strings.HasPrefix(p.Path, "/data/") { + key := strings.TrimPrefix(p.Path, "/data/") + secret[key] = p.Value.([]byte) + } + } + return nil + }, + } + + s := &Store{ + client: client, + canPatch: tt.allowPatch, + secretName: tt.wantSecretName, + certShareMode: tt.certShareMode, + memory: mem.Store{}, + } + + err := s.WriteTLSCertAndKey(testDomain, []byte(testCert), []byte(testKey)) + if err != nil { + t.Errorf("WriteTLSCertAndKey() error = '%v'", err) + return + } + + // Verify secret data + if diff := cmp.Diff(secret, tt.wantSecretData); diff != "" { + t.Errorf("secret data mismatch (-got +want):\n%s", diff) + } + + // Verify memory store was updated + for key, want := range tt.wantMemoryStore { + got, err := s.memory.ReadState(key) if err != nil { t.Errorf("reading from memory store: %v", err) continue } - if !cmp.Equal(got, v) { - t.Errorf("memory store key %q = %v, want %v", k, got, v) + if !cmp.Equal(got, want) { + t.Errorf("memory store key %q = %v, want %v", key, got, want) } } }) } } + +func TestReadTLSCertAndKey(t *testing.T) { + const ( + testDomain = "my-app.tailnetxyz.ts.net" + testCert = "fake-cert" + testKey = "fake-key" + ) + + tests := []struct { + name string + memoryStore map[ipn.StateKey][]byte // pre-existing memory store state + certShareMode string + domain string + secretData map[string][]byte // data to return from mock GetSecret + secretGetErr error // error to return from mock GetSecret + wantCert []byte + wantKey []byte + wantErr error + // what should end up in memory store after the store is created + wantMemoryStore map[ipn.StateKey][]byte + }{ + { + name: "found", + memoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + domain: testDomain, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "not_found", + domain: testDomain, + wantErr: ipn.ErrStateNotExist, + }, + { + name: "cert_share_ro_mode_found_in_secret", + certShareMode: "ro", + domain: testDomain, + secretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_ro_mode_found_in_memory", + certShareMode: "ro", + memoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + domain: testDomain, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_ro_mode_not_found", + certShareMode: "ro", + domain: testDomain, + secretGetErr: &kubeapi.Status{Code: 404}, + wantErr: ipn.ErrStateNotExist, + }, + { + name: "cert_share_ro_mode_empty_cert_in_secret", + certShareMode: "ro", + domain: testDomain, + secretData: map[string][]byte{ + "tls.crt": {}, + "tls.key": []byte(testKey), + }, + wantErr: ipn.ErrStateNotExist, + }, + { + name: "cert_share_ro_mode_kube_api_error", + certShareMode: "ro", + domain: testDomain, + secretGetErr: fmt.Errorf("api error"), + wantErr: fmt.Errorf("getting TLS Secret %q: api error", sanitizeKey(testDomain)), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if tt.secretGetErr != nil { + return nil, tt.secretGetErr + } + return &kubeapi.Secret{Data: tt.secretData}, nil + }, + } + + s := &Store{ + client: client, + secretName: "ts-state", + certShareMode: tt.certShareMode, + memory: mem.Store{}, + } + + // Initialize memory store + for k, v := range tt.memoryStore { + s.memory.WriteState(k, v) + } + + gotCert, gotKey, err := s.ReadTLSCertAndKey(tt.domain) + if tt.wantErr != nil { + if err == nil { + t.Errorf("ReadTLSCertAndKey() error = nil, want error containing %v", tt.wantErr) + return + } + if !strings.Contains(err.Error(), tt.wantErr.Error()) { + t.Errorf("ReadTLSCertAndKey() error = %v, want error containing %v", err, tt.wantErr) + } + return + } + if err != nil { + t.Errorf("ReadTLSCertAndKey() unexpected error: %v", err) + return + } + + if !bytes.Equal(gotCert, tt.wantCert) { + t.Errorf("ReadTLSCertAndKey() gotCert = %v, want %v", gotCert, tt.wantCert) + } + if !bytes.Equal(gotKey, tt.wantKey) { + t.Errorf("ReadTLSCertAndKey() gotKey = %v, want %v", gotKey, tt.wantKey) + } + + // Verify memory store contents after operation + if tt.wantMemoryStore != nil { + for key, want := range tt.wantMemoryStore { + got, err := s.memory.ReadState(key) + if err != nil { + t.Errorf("reading from memory store: %v", err) + continue + } + if !bytes.Equal(got, want) { + t.Errorf("memory store key %q = %v, want %v", key, got, want) + } + } + } + }) + } +} + +func TestNewWithClient(t *testing.T) { + const ( + secretName = "ts-state" + testCert = "fake-cert" + testKey = "fake-key" + ) + + certSecretsLabels := map[string]string{ + "tailscale.com/secret-type": "certs", + "tailscale.com/managed": "true", + "tailscale.com/proxy-group": "ingress-proxies", + } + + // Helper function to create Secret objects for testing + makeSecret := func(name string, labels map[string]string, certSuffix string) kubeapi.Secret { + return kubeapi.Secret{ + ObjectMeta: kubeapi.ObjectMeta{ + Name: name, + Labels: labels, + }, + Data: map[string][]byte{ + "tls.crt": []byte(testCert + certSuffix), + "tls.key": []byte(testKey + certSuffix), + }, + } + } + + tests := []struct { + name string + stateSecretContents map[string][]byte // data in state Secret + TLSSecrets []kubeapi.Secret // list of TLS cert Secrets + certMode string + secretGetErr error // error to return from GetSecret + secretsListErr error // error to return from ListSecrets + wantMemoryStoreContents map[ipn.StateKey][]byte + wantErr error + }{ + { + name: "empty_state_secret", + stateSecretContents: map[string][]byte{}, + wantMemoryStoreContents: map[ipn.StateKey][]byte{}, + }, + { + name: "state_secret_not_found", + secretGetErr: &kubeapi.Status{Code: 404}, + wantMemoryStoreContents: map[ipn.StateKey][]byte{}, + }, + { + name: "state_secret_get_error", + secretGetErr: fmt.Errorf("some error"), + wantErr: fmt.Errorf("error loading state from kube Secret: some error"), + }, + { + name: "load_existing_state", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + }, + { + name: "load_select_certs_in_read_only_mode", + certMode: "ro", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + TLSSecrets: []kubeapi.Secret{ + makeSecret("app1.tailnetxyz.ts.net", certSecretsLabels, "1"), + makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), + makeSecret("some-other-secret", nil, "3"), + makeSecret("app3.other-proxies.ts.net", map[string]string{ + "tailscale.com/secret-type": "certs", + "tailscale.com/managed": "true", + "tailscale.com/proxy-group": "some-other-proxygroup", + }, "4"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "app1.tailnetxyz.ts.net.crt": []byte(testCert + "1"), + "app1.tailnetxyz.ts.net.key": []byte(testKey + "1"), + "app2.tailnetxyz.ts.net.crt": []byte(testCert + "2"), + "app2.tailnetxyz.ts.net.key": []byte(testKey + "2"), + }, + }, + { + name: "load_select_certs_in_read_write_mode", + certMode: "rw", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + TLSSecrets: []kubeapi.Secret{ + makeSecret("app1.tailnetxyz.ts.net", certSecretsLabels, "1"), + makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), + makeSecret("some-other-secret", nil, "3"), + makeSecret("app3.other-proxies.ts.net", map[string]string{ + "tailscale.com/secret-type": "certs", + "tailscale.com/managed": "true", + "tailscale.com/proxy-group": "some-other-proxygroup", + }, "4"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "app1.tailnetxyz.ts.net.crt": []byte(testCert + "1"), + "app1.tailnetxyz.ts.net.key": []byte(testKey + "1"), + "app2.tailnetxyz.ts.net.crt": []byte(testCert + "2"), + "app2.tailnetxyz.ts.net.key": []byte(testKey + "2"), + }, + }, + { + name: "list_cert_secrets_fails", + certMode: "ro", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + secretsListErr: fmt.Errorf("list error"), + // The error is logged but not returned, and state is still loaded + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + }, + }, + { + name: "cert_secrets_not_loaded_when_not_in_share_mode", + certMode: "", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + TLSSecrets: []kubeapi.Secret{ + makeSecret("app1.tailnetxyz.ts.net", certSecretsLabels, "1"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + envknob.Setenv("TS_CERT_SHARE_MODE", tt.certMode) + + t.Setenv("POD_NAME", "ingress-proxies-1") + + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if tt.secretGetErr != nil { + return nil, tt.secretGetErr + } + if name == secretName { + return &kubeapi.Secret{Data: tt.stateSecretContents}, nil + } + return nil, &kubeapi.Status{Code: 404} + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return true, true, nil + }, + ListSecretsImpl: func(ctx context.Context, selector map[string]string) (*kubeapi.SecretList, error) { + if tt.secretsListErr != nil { + return nil, tt.secretsListErr + } + var matchingSecrets []kubeapi.Secret + for _, secret := range tt.TLSSecrets { + matches := true + for k, v := range selector { + if secret.Labels[k] != v { + matches = false + break + } + } + if matches { + matchingSecrets = append(matchingSecrets, secret) + } + } + return &kubeapi.SecretList{Items: matchingSecrets}, nil + }, + } + + s, err := newWithClient(t.Logf, client, secretName) + if tt.wantErr != nil { + if err == nil { + t.Errorf("NewWithClient() error = nil, want error containing %v", tt.wantErr) + return + } + if !strings.Contains(err.Error(), tt.wantErr.Error()) { + t.Errorf("NewWithClient() error = %v, want error containing %v", err, tt.wantErr) + } + return + } + + if err != nil { + t.Errorf("NewWithClient() unexpected error: %v", err) + return + } + + // Verify memory store contents + gotJSON, err := s.memory.ExportToJSON() + if err != nil { + t.Errorf("ExportToJSON failed: %v", err) + return + } + var got map[ipn.StateKey][]byte + if err := json.Unmarshal(gotJSON, &got); err != nil { + t.Errorf("failed to unmarshal memory store JSON: %v", err) + return + } + want := tt.wantMemoryStoreContents + if want == nil { + want = map[ipn.StateKey][]byte{} + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("memory store contents mismatch (-got +want):\n%s", diff) + } + }) + } +} diff --git a/kube/kubeapi/api.go b/kube/kubeapi/api.go index a2ae8cc79..e62bd6e2b 100644 --- a/kube/kubeapi/api.go +++ b/kube/kubeapi/api.go @@ -153,6 +153,14 @@ type Secret struct { Data map[string][]byte `json:"data,omitempty"` } +// SecretList is a list of Secret objects. +type SecretList struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata"` + + Items []Secret `json:"items,omitempty"` +} + // Event contains a subset of fields from corev1.Event. // https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L7034 // It is copied here to avoid having to import kube libraries. diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index d4309448d..332b21106 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -60,6 +60,7 @@ func readFile(n string) ([]byte, error) { // It expects to be run inside a cluster. type Client interface { GetSecret(context.Context, string) (*kubeapi.Secret, error) + ListSecrets(context.Context, map[string]string) (*kubeapi.SecretList, error) UpdateSecret(context.Context, *kubeapi.Secret) error CreateSecret(context.Context, *kubeapi.Secret) error // Event attempts to ensure an event with the specified options associated with the Pod in which we are @@ -248,21 +249,35 @@ func (c *client) newRequest(ctx context.Context, method, url string, in any) (*h // GetSecret fetches the secret from the Kubernetes API. func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, error) { s := &kubeapi.Secret{Data: make(map[string][]byte)} - if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, TypeSecrets), nil, s); err != nil { + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, TypeSecrets, ""), nil, s); err != nil { return nil, err } return s, nil } +// ListSecrets fetches the secret from the Kubernetes API. +func (c *client) ListSecrets(ctx context.Context, selector map[string]string) (*kubeapi.SecretList, error) { + sl := new(kubeapi.SecretList) + s := make([]string, 0, len(selector)) + for key, val := range selector { + s = append(s, key+"="+url.QueryEscape(val)) + } + ss := strings.Join(s, ",") + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL("", TypeSecrets, ss), nil, sl); err != nil { + return nil, err + } + return sl, nil +} + // CreateSecret creates a secret in the Kubernetes API. func (c *client) CreateSecret(ctx context.Context, s *kubeapi.Secret) error { s.Namespace = c.ns - return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", TypeSecrets), s, nil) + return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", TypeSecrets, ""), s, nil) } // UpdateSecret updates a secret in the Kubernetes API. func (c *client) UpdateSecret(ctx context.Context, s *kubeapi.Secret) error { - return c.kubeAPIRequest(ctx, "PUT", c.resourceURL(s.Name, TypeSecrets), s, nil) + return c.kubeAPIRequest(ctx, "PUT", c.resourceURL(s.Name, TypeSecrets, ""), s, nil) } // JSONPatch is a JSON patch operation. @@ -283,14 +298,14 @@ func (c *client) JSONPatchResource(ctx context.Context, name, typ string, patche return fmt.Errorf("unsupported JSON patch operation: %q", p.Op) } } - return c.kubeAPIRequest(ctx, "PATCH", c.resourceURL(name, typ), patches, nil, setHeader("Content-Type", "application/json-patch+json")) + return c.kubeAPIRequest(ctx, "PATCH", c.resourceURL(name, typ, ""), patches, nil, setHeader("Content-Type", "application/json-patch+json")) } // StrategicMergePatchSecret updates a secret in the Kubernetes API using a // strategic merge patch. // If a fieldManager is provided, it will be used to track the patch. func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { - surl := c.resourceURL(name, TypeSecrets) + surl := c.resourceURL(name, TypeSecrets, "") if fieldManager != "" { uv := url.Values{ "fieldManager": {fieldManager}, @@ -342,7 +357,7 @@ func (c *client) Event(ctx context.Context, typ, reason, msg string) error { LastTimestamp: now, Count: 1, } - return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", typeEvents), &ev, nil) + return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", typeEvents, ""), &ev, nil) } // If the Event already exists, we patch its count and last timestamp. This ensures that when users run 'kubectl // describe pod...', they see the event just once (but with a message of how many times it has appeared over @@ -472,9 +487,13 @@ func (c *client) checkPermission(ctx context.Context, verb, typ, name string) (b // resourceURL returns a URL that can be used to interact with the given resource type and, if name is not empty string, // the named resource of that type. // Note that this only works for core/v1 resource types. -func (c *client) resourceURL(name, typ string) string { +func (c *client) resourceURL(name, typ, sel string) string { if name == "" { - return fmt.Sprintf("%s/api/v1/namespaces/%s/%s", c.url, c.ns, typ) + url := fmt.Sprintf("%s/api/v1/namespaces/%s/%s", c.url, c.ns, typ) + if sel != "" { + url += "?labelSelector=" + sel + } + return url } return fmt.Sprintf("%s/api/v1/namespaces/%s/%s/%s", c.url, c.ns, typ, name) } @@ -487,7 +506,7 @@ func (c *client) nameForEvent(reason string) string { // getEvent fetches the event from the Kubernetes API. func (c *client) getEvent(ctx context.Context, name string) (*kubeapi.Event, error) { e := &kubeapi.Event{} - if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, typeEvents), nil, e); err != nil { + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, typeEvents, ""), nil, e); err != nil { return nil, err } return e, nil diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index aea786ea0..c21dc2bf8 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -18,6 +18,7 @@ type FakeClient struct { CreateSecretImpl func(context.Context, *kubeapi.Secret) error UpdateSecretImpl func(context.Context, *kubeapi.Secret) error JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error + ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -45,3 +46,9 @@ func (fc *FakeClient) UpdateSecret(ctx context.Context, secret *kubeapi.Secret) func (fc *FakeClient) CreateSecret(ctx context.Context, secret *kubeapi.Secret) error { return fc.CreateSecretImpl(ctx, secret) } +func (fc *FakeClient) ListSecrets(ctx context.Context, selector map[string]string) (*kubeapi.SecretList, error) { + if fc.ListSecretsImpl != nil { + return fc.ListSecretsImpl(ctx, selector) + } + return nil, nil +} diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 894cbb41d..e54e1c99f 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -48,4 +48,7 @@ const ( PodIPv4Header string = "Pod-IPv4" EgessServicesPreshutdownEP = "/internal-egress-services-preshutdown" + + LabelManaged = "tailscale.com/managed" + LabelSecretType = "tailscale.com/secret-type" // "config", "state" "certs" ) From 74ee7493866c10da0ba0ff58a9020313e006a712 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Mon, 17 Mar 2025 18:06:58 +0000 Subject: [PATCH 0609/1708] client/tailscale: add tailnet lock fields to Device struct These are documented, but have not yet been defined in the client. https://tailscale.com/api#tag/devices/GET/device/{deviceId} Updates tailscale/corp#27050 Signed-off-by: Anton Tolchanov --- client/tailscale/devices.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/client/tailscale/devices.go b/client/tailscale/devices.go index b79191d53..0664f9e63 100644 --- a/client/tailscale/devices.go +++ b/client/tailscale/devices.go @@ -79,6 +79,13 @@ type Device struct { // Tailscale have attempted to collect this from the device but it has not // opted in, PostureIdentity will have Disabled=true. PostureIdentity *DevicePostureIdentity `json:"postureIdentity"` + + // TailnetLockKey is the tailnet lock public key of the node as a hex string. + TailnetLockKey string `json:"tailnetLockKey,omitempty"` + + // TailnetLockErr indicates an issue with the tailnet lock node-key signature + // on this device. This field is only populated when tailnet lock is enabled. + TailnetLockErr string `json:"tailnetLockError,omitempty"` } type DevicePostureIdentity struct { From daa5635ba6226bef75d37867fb3449332a1a9758 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 13 Mar 2025 15:29:58 -0700 Subject: [PATCH 0610/1708] tsweb: split promvarz into an optional dependency Allows the use of tsweb without pulling in all of the heavy prometheus client libraries, protobuf and so on. Updates #15160 Signed-off-by: David Anderson --- cmd/derper/depaware.txt | 13 +++++++------ cmd/derper/derper.go | 3 +++ cmd/derpprobe/derpprobe.go | 3 +++ cmd/k8s-operator/depaware.txt | 6 +++--- cmd/stund/depaware.txt | 13 +++++++------ cmd/stund/stund.go | 3 +++ cmd/tailscale/depaware.txt | 6 +++--- cmd/tailscaled/depaware.txt | 6 +++--- cmd/xdpderper/xdpderper.go | 3 +++ tsweb/debug.go | 13 +++++++++++-- tsweb/promvarz/promvarz.go | 13 +++++++++++-- tsweb/promvarz/promvarz_test.go | 2 +- 12 files changed, 58 insertions(+), 26 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 1812a1a8d..5d375a515 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -96,6 +96,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ + tailscale.com/feature from tailscale.com/tsweb tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -128,8 +129,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/derp - tailscale.com/tsweb from tailscale.com/cmd/derper - tailscale.com/tsweb/promvarz from tailscale.com/tsweb + tailscale.com/tsweb from tailscale.com/cmd/derper+ + tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn @@ -309,7 +310,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa html from net/http/pprof+ html/template from tailscale.com/cmd/derper internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -319,12 +320,12 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from crypto/tls+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 221ee0bff..3c6fda68c 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -49,6 +49,9 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/version" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 6e8c603b9..899838462 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -15,6 +15,9 @@ import ( "tailscale.com/prober" "tailscale.com/tsweb" "tailscale.com/version" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 1c27fddea..978744947 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1151,7 +1151,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ html from html/template+ html/template from github.com/gorilla/csrf internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -1163,11 +1163,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ internal/lazyregexp from go/doc - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 1d0a093c4..2326e3a24 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -49,6 +49,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ + tailscale.com/feature from tailscale.com/tsweb tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr @@ -57,8 +58,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/tsaddr from tailscale.com/tsweb tailscale.com/syncs from tailscale.com/metrics tailscale.com/tailcfg from tailscale.com/version - tailscale.com/tsweb from tailscale.com/cmd/stund - tailscale.com/tsweb/promvarz from tailscale.com/tsweb + tailscale.com/tsweb from tailscale.com/cmd/stund+ + tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/ipproto from tailscale.com/tailcfg @@ -194,7 +195,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar hash/maphash from go4.org/mem html from net/http/pprof+ internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -204,12 +205,12 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/filepathlite from os+ internal/fmtsort from fmt internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from crypto/tls+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/stund/stund.go b/cmd/stund/stund.go index c38429169..1055d966f 100644 --- a/cmd/stund/stund.go +++ b/cmd/stund/stund.go @@ -15,6 +15,9 @@ import ( "tailscale.com/net/stunserver" "tailscale.com/tsweb" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index afe62165c..431bf7b71 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -333,7 +333,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -345,10 +345,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index b47f43c76..0a9c46831 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -589,7 +589,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de html from html/template+ html/template from github.com/gorilla/csrf internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -601,10 +601,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/xdpderper/xdpderper.go b/cmd/xdpderper/xdpderper.go index 599034ae7..c127baf54 100644 --- a/cmd/xdpderper/xdpderper.go +++ b/cmd/xdpderper/xdpderper.go @@ -18,6 +18,9 @@ import ( "tailscale.com/derp/xdp" "tailscale.com/net/netutil" "tailscale.com/tsweb" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/tsweb/debug.go b/tsweb/debug.go index 843324482..ac1981999 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -14,7 +14,7 @@ import ( "os" "runtime" - "tailscale.com/tsweb/promvarz" + "tailscale.com/feature" "tailscale.com/tsweb/varz" "tailscale.com/version" ) @@ -37,6 +37,11 @@ type DebugHandler struct { title string // title displayed on index page } +// PrometheusHandler is an optional hook to enable native Prometheus +// support in the debug handler. It is disabled by default. Import the +// tailscale.com/tsweb/promvarz package to enable this feature. +var PrometheusHandler feature.Hook[func(*DebugHandler)] + // Debugger returns the DebugHandler registered on mux at /debug/, // creating it if necessary. func Debugger(mux *http.ServeMux) *DebugHandler { @@ -53,7 +58,11 @@ func Debugger(mux *http.ServeMux) *DebugHandler { ret.KVFunc("Uptime", func() any { return varz.Uptime() }) ret.KV("Version", version.Long()) ret.Handle("vars", "Metrics (Go)", expvar.Handler()) - ret.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(promvarz.Handler)) + if PrometheusHandler.IsSet() { + PrometheusHandler.Get()(ret) + } else { + ret.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(varz.Handler)) + } // pprof.Index serves everything that runtime/pprof.Lookup finds: // goroutine, threadcreate, heap, allocs, block, mutex diff --git a/tsweb/promvarz/promvarz.go b/tsweb/promvarz/promvarz.go index d0e1e52ba..1d978c767 100644 --- a/tsweb/promvarz/promvarz.go +++ b/tsweb/promvarz/promvarz.go @@ -11,12 +11,21 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/expfmt" + "tailscale.com/tsweb" "tailscale.com/tsweb/varz" ) -// Handler returns Prometheus metrics exported by our expvar converter +func init() { + tsweb.PrometheusHandler.Set(registerVarz) +} + +func registerVarz(debug *tsweb.DebugHandler) { + debug.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(handler)) +} + +// handler returns Prometheus metrics exported by our expvar converter // and the official Prometheus client. -func Handler(w http.ResponseWriter, r *http.Request) { +func handler(w http.ResponseWriter, r *http.Request) { if err := gatherNativePrometheusMetrics(w); err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) diff --git a/tsweb/promvarz/promvarz_test.go b/tsweb/promvarz/promvarz_test.go index a3f4e66f1..9f91b5d12 100644 --- a/tsweb/promvarz/promvarz_test.go +++ b/tsweb/promvarz/promvarz_test.go @@ -23,7 +23,7 @@ func TestHandler(t *testing.T) { testVar1.Set(42) testVar2.Set(4242) - svr := httptest.NewServer(http.HandlerFunc(Handler)) + svr := httptest.NewServer(http.HandlerFunc(handler)) defer svr.Close() want := ` From e091e71937bd6cd2b1f9e2685991600211f28446 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 09:49:09 -0800 Subject: [PATCH 0611/1708] util/eventbus: remove debug UI from iOS build The use of html/template causes reflect-based linker bloat. Longer term we have options to bring the UI back to iOS, but for now, cut it out. Updates #15297 Signed-off-by: David Anderson --- util/eventbus/debughttp.go | 2 ++ util/eventbus/debughttp_ios.go | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 util/eventbus/debughttp_ios.go diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index bbd929efb..18888cc56 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ios + package eventbus import ( diff --git a/util/eventbus/debughttp_ios.go b/util/eventbus/debughttp_ios.go new file mode 100644 index 000000000..a898898b7 --- /dev/null +++ b/util/eventbus/debughttp_ios.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios + +package eventbus + +import "tailscale.com/tsweb" + +func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { + // The event bus debugging UI uses html/template, which uses + // reflection for method lookups. This forces the compiler to + // retain a lot more code and information to make dynamic method + // dispatch work, which is unacceptable bloat for the iOS build. + // + // TODO: https://github.com/tailscale/tailscale/issues/15297 to + // bring the debug UI back to iOS somehow. +} From b0095a5da4a0f10e85d9c6a0c5c8005a3d7ea3a1 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 19 Mar 2025 01:53:15 -0700 Subject: [PATCH 0612/1708] cmd/k8s-operator: wait for VIPService before updating HA Ingress status (#15343) Update the HA Ingress controller to wait until it sees AdvertisedServices config propagated into at least 1 Pod's prefs before it updates the status on the Ingress, to ensure the ProxyGroup Pods are ready to serve traffic before indicating that the Ingress is ready Updates tailscale/corp#24795 Change-Id: I1b8ce23c9e312d08f9d02e48d70bdebd9e1a4757 Signed-off-by: Tom Proctor --- cmd/k8s-operator/ingress-for-pg.go | 91 ++++++++++++++++++------- cmd/k8s-operator/ingress-for-pg_test.go | 25 +++++++ cmd/k8s-operator/operator.go | 42 +++++++++++- cmd/k8s-operator/proxygroup.go | 6 +- cmd/k8s-operator/proxygroup_specs.go | 4 +- cmd/k8s-operator/tsrecorder.go | 41 ++++++----- 6 files changed, 158 insertions(+), 51 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index cdbfecb35..fe85509ad 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -154,13 +154,13 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin pg := &tsapi.ProxyGroup{} if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { if apierrors.IsNotFound(err) { - logger.Infof("ProxyGroup %q does not exist", pgName) + logger.Infof("ProxyGroup does not exist") return false, nil } return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } if !tsoperator.ProxyGroupIsReady(pg) { - logger.Infof("ProxyGroup %q is not (yet) ready", pgName) + logger.Infof("ProxyGroup is not (yet) ready") return false, nil } @@ -175,8 +175,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin r.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } - logger = logger.With("proxy-group", pg.Name) - if !slices.Contains(ing.Finalizers, FinalizerNamePG) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -326,7 +324,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) || !strings.EqualFold(vipSvc.Comment, existingVIPSvc.Comment) { - logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) + logger.Infof("Ensuring VIPService exists and is up to date") if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { return false, fmt.Errorf("error creating VIPService: %w", err) } @@ -338,31 +336,48 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("failed to update tailscaled config: %w", err) } - // TODO(irbekrm): check that the replicas are ready to route traffic for the VIPService before updating Ingress - // status. - // 6. Update Ingress status - oldStatus := ing.Status.DeepCopy() - ports := []networkingv1.IngressPortStatus{ - { - Protocol: "TCP", - Port: 443, - }, + // 6. Update Ingress status if ProxyGroup Pods are ready. + count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) + if err != nil { + return false, fmt.Errorf("failed to check if any Pods are configured: %w", err) } - if isHTTPEndpointEnabled(ing) { - ports = append(ports, networkingv1.IngressPortStatus{ - Protocol: "TCP", - Port: 80, - }) - } - ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ - { - Hostname: dnsName, - Ports: ports, - }, + + oldStatus := ing.Status.DeepCopy() + + switch count { + case 0: + ing.Status.LoadBalancer.Ingress = nil + default: + ports := []networkingv1.IngressPortStatus{ + { + Protocol: "TCP", + Port: 443, + }, + } + if isHTTPEndpointEnabled(ing) { + ports = append(ports, networkingv1.IngressPortStatus{ + Protocol: "TCP", + Port: 80, + }) + } + ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ + { + Hostname: dnsName, + Ports: ports, + }, + } } - if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) { + if apiequality.Semantic.DeepEqual(oldStatus, &ing.Status) { return svcsChanged, nil } + + const prefix = "Updating Ingress status" + if count == 0 { + logger.Infof("%s. No Pods are advertising VIPService yet", prefix) + } else { + logger.Infof("%s. %d Pod(s) advertising VIPService", prefix, count) + } + if err := r.Status().Update(ctx, ing); err != nil { return false, fmt.Errorf("failed to update Ingress status: %w", err) } @@ -726,6 +741,30 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } +func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { + // Get all state Secrets for this ProxyGroup. + secrets := &corev1.SecretList{} + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) + } + + var count int + for _, secret := range secrets.Items { + prefs, ok, err := getDevicePrefs(&secret) + if err != nil { + return 0, fmt.Errorf("error getting node metadata: %w", err) + } + if !ok { + continue + } + if slices.Contains(prefs.AdvertiseServices, serviceName.String()) { + count++ + } + } + + return count, nil +} + // OwnerRef is an owner reference that uniquely identifies a Tailscale // Kubernetes operator instance. type OwnerRef struct { diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 2f675337e..0e90ec980 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -461,6 +461,31 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { t.Fatal(err) } + // Status will be empty until the VIPService shows up in prefs. + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress, []networkingv1.IngressLoadBalancerIngress(nil)) { + t.Errorf("incorrect Ingress status: got %v, want empty", + ing.Status.LoadBalancer.Ingress) + } + + // Add the VIPService to prefs to have the Ingress recognised as ready. + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "state"), + }, + Data: map[string][]byte{ + "_current-profile": []byte("profile-foo"), + "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), + }, + }) + + // Reconcile and re-fetch Ingress. + expectReconciled(t, ingPGR, "default", "test-ingress") + if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil { + t.Fatal(err) + } + wantStatus := []networkingv1.IngressPortStatus{ {Port: 443, Protocol: "TCP"}, {Port: 80, Protocol: "TCP"}, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 1dcd130fb..ff2a959bd 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -347,6 +347,7 @@ func runReconcilers(opts reconcilerOpts) { For(&networkingv1.Ingress{}). Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(ingressesFromPGStateSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ recorder: eventRecorder, @@ -978,8 +979,6 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { return nil } - // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we - // have ingress ProxyGroups. if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } @@ -1040,6 +1039,45 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. return reqs } +func ingressesFromPGStateSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") + return nil + } + if secret.ObjectMeta.Labels[LabelManaged] != "true" { + return nil + } + if secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { + return nil + } + if secret.ObjectMeta.Labels[labelSecretType] != "state" { + return nil + } + pgName, ok := secret.ObjectMeta.Labels[LabelParentName] + if !ok { + return nil + } + + ingList := &networkingv1.IngressList{} + if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pgName}); err != nil { + logger.Infof("error listing Ingresses, skipping a reconcile for event on Secret %s: %v", secret.Name, err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ing := range ingList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: ing.Namespace, + Name: ing.Name, + }, + }) + } + return reqs + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 463d29249..c961c0471 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -645,7 +645,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) } - id, dnsName, ok, err := getNodeMetadata(ctx, &secret) + prefs, ok, err := getDevicePrefs(&secret) if err != nil { return nil, err } @@ -656,8 +656,8 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr nm := nodeMetadata{ ordinal: ordinal, stateSecret: &secret, - tsID: id, - dnsName: dnsName, + tsID: prefs.Config.NodeID, + dnsName: prefs.Config.UserProfile.LoginName, } pod := &corev1.Pod{} if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) { diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 40bbaec17..8c17c7b6b 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -318,9 +318,9 @@ func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { } } -func pgSecretLabels(pgName, typ string) map[string]string { +func pgSecretLabels(pgName, secretType string) map[string]string { return pgLabels(pgName, map[string]string{ - labelSecretType: typ, // "config" or "state". + labelSecretType: secretType, // "config" or "state". }) } diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 44ce731fe..e9e6b2c6c 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -230,7 +230,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { logger := r.logger(tsr.Name) - id, _, ok, err := r.getNodeMetadata(ctx, tsr.Name) + prefs, ok, err := r.getDevicePrefs(ctx, tsr.Name) if err != nil { return false, err } @@ -243,6 +243,7 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record return true, nil } + id := string(prefs.Config.NodeID) logger.Debugf("deleting device %s from control", string(id)) if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { errResp := &tailscale.ErrResponse{} @@ -327,34 +328,33 @@ func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) return secret, nil } -func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { +func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string) (prefs prefs, ok bool, err error) { secret, err := r.getStateSecret(ctx, tsrName) if err != nil || secret == nil { - return "", "", false, err + return prefs, false, err } - return getNodeMetadata(ctx, secret) + return getDevicePrefs(secret) } -// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName +// getDevicePrefs returns 'ok == true' iff the node ID is found. The dnsName // is expected to always be non-empty if the node ID is, but not required. -func getNodeMetadata(ctx context.Context, secret *corev1.Secret) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { +func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) { // TODO(tomhjp): Should maybe use ipn to parse the following info instead. currentProfile, ok := secret.Data[currentProfileKey] if !ok { - return "", "", false, nil + return prefs, false, nil } profileBytes, ok := secret.Data[string(currentProfile)] if !ok { - return "", "", false, nil + return prefs, false, nil } - var profile profile - if err := json.Unmarshal(profileBytes, &profile); err != nil { - return "", "", false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err) + if err := json.Unmarshal(profileBytes, &prefs); err != nil { + return prefs, false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err) } - ok = profile.Config.NodeID != "" - return tailcfg.StableNodeID(profile.Config.NodeID), profile.Config.UserProfile.LoginName, ok, nil + ok = prefs.Config.NodeID != "" + return prefs, ok, nil } func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) { @@ -367,14 +367,14 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) } func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { - nodeID, dnsName, ok, err := getNodeMetadata(ctx, secret) + prefs, ok, err := getDevicePrefs(secret) if !ok || err != nil { return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we // need the API. Should we instead update the profile to include addresses? - device, err := tsClient.Device(ctx, string(nodeID), nil) + device, err := tsClient.Device(ctx, string(prefs.Config.NodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } @@ -383,20 +383,25 @@ func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret Hostname: device.Hostname, TailnetIPs: device.Addresses, } - if dnsName != "" { + if dnsName := prefs.Config.UserProfile.LoginName; dnsName != "" { d.URL = fmt.Sprintf("https://%s", dnsName) } return d, true, nil } -type profile struct { +// [prefs] is a subset of the ipn.Prefs struct used for extracting information +// from the state Secret of Tailscale devices. +type prefs struct { Config struct { - NodeID string `json:"NodeID"` + NodeID tailcfg.StableNodeID `json:"NodeID"` UserProfile struct { + // LoginName is the MagicDNS name of the device, e.g. foo.tail-scale.ts.net. LoginName string `json:"LoginName"` } `json:"UserProfile"` } `json:"Config"` + + AdvertiseServices []string `json:"AdvertiseServices"` } func markedForDeletion(obj metav1.Object) bool { From f50d3b22db19f34e233063050581a89694e10622 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 19 Mar 2025 12:49:31 +0000 Subject: [PATCH 0613/1708] cmd/k8s-operator: configure proxies for HA Ingress to run in cert share mode (#15308) cmd/k8s-operator: configure HA Ingress replicas to share certs Creates TLS certs Secret and RBAC that allows HA Ingress replicas to read/write to the Secret. Configures HA Ingress replicas to run in read-only mode. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- .../deploy/chart/templates/operator-rbac.yaml | 2 +- .../deploy/manifests/operator.yaml | 1 + cmd/k8s-operator/dnsrecords_test.go | 9 +- cmd/k8s-operator/egress-pod-readiness.go | 6 +- cmd/k8s-operator/egress-pod-readiness_test.go | 6 +- cmd/k8s-operator/egress-services.go | 12 +- cmd/k8s-operator/ingress-for-pg.go | 161 +++++++++++++++++- cmd/k8s-operator/ingress-for-pg_test.go | 19 +++ cmd/k8s-operator/metrics_resources.go | 3 +- cmd/k8s-operator/operator.go | 28 +-- cmd/k8s-operator/operator_test.go | 8 +- cmd/k8s-operator/proxygroup_specs.go | 21 ++- cmd/k8s-operator/proxygroup_test.go | 2 +- cmd/k8s-operator/sts.go | 4 +- cmd/k8s-operator/sts_test.go | 21 +-- cmd/k8s-operator/svc.go | 8 +- cmd/k8s-operator/testutils_test.go | 9 +- ipn/store/kubestore/store_kube.go | 2 +- 18 files changed, 255 insertions(+), 67 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 7056ef42f..5bf50617e 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -75,7 +75,7 @@ rules: verbs: ["get", "list", "watch", "create", "update", "deletecollection"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["roles", "rolebindings"] - verbs: ["get", "create", "patch", "update", "list", "watch"] + verbs: ["get", "create", "patch", "update", "list", "watch", "deletecollection"] - apiGroups: ["monitoring.coreos.com"] resources: ["servicemonitors"] verbs: ["get", "list", "update", "create", "delete"] diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index e966ef559..9ee3b441a 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4898,6 +4898,7 @@ rules: - update - list - watch + - deletecollection - apiGroups: - monitoring.coreos.com resources: diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 389461b85..4e73e6c9e 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" ) @@ -163,10 +164,10 @@ func headlessSvcForParent(o client.Object, typ string) *corev1.Service { Name: o.GetName(), Namespace: "tailscale", Labels: map[string]string{ - LabelManaged: "true", - LabelParentName: o.GetName(), - LabelParentNamespace: o.GetNamespace(), - LabelParentType: typ, + kubetypes.LabelManaged: "true", + LabelParentName: o.GetName(), + LabelParentNamespace: o.GetNamespace(), + LabelParentType: typ, }, }, Spec: corev1.ServiceSpec{ diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index a6c57bf9d..05cf1aa1a 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -112,9 +112,9 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req } // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. lbls := map[string]string{ - LabelManaged: "true", - labelProxyGroup: proxyGroupName, - labelSvcType: typeEgress, + kubetypes.LabelManaged: "true", + labelProxyGroup: proxyGroupName, + labelSvcType: typeEgress, } svcs := &corev1.ServiceList{} if err := er.List(ctx, svcs, client.InNamespace(er.tsNamespace), client.MatchingLabels(lbls)); err != nil { diff --git a/cmd/k8s-operator/egress-pod-readiness_test.go b/cmd/k8s-operator/egress-pod-readiness_test.go index 5e6fa2bb4..3c35d9043 100644 --- a/cmd/k8s-operator/egress-pod-readiness_test.go +++ b/cmd/k8s-operator/egress-pod-readiness_test.go @@ -450,9 +450,9 @@ func newSvc(name string, port int32) (*corev1.Service, string) { Namespace: "operator-ns", Name: name, Labels: map[string]string{ - LabelManaged: "true", - labelProxyGroup: "dev", - labelSvcType: typeEgress, + kubetypes.LabelManaged: "true", + labelProxyGroup: "dev", + labelSvcType: typeEgress, }, }, Spec: corev1.ServiceSpec{}, diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index e997e5884..7103205ac 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -680,12 +680,12 @@ func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, ts // should probably validate and truncate (?) the names is they are too long. func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { return map[string]string{ - LabelManaged: "true", - LabelParentType: "svc", - LabelParentName: svc.Name, - LabelParentNamespace: svc.Namespace, - labelProxyGroup: svc.Annotations[AnnotationProxyGroup], - labelSvcType: typeEgress, + kubetypes.LabelManaged: "true", + LabelParentType: "svc", + LabelParentName: svc.Name, + LabelParentNamespace: svc.Namespace, + labelProxyGroup: svc.Annotations[AnnotationProxyGroup], + labelSvcType: typeEgress, } } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index fe85509ad..dc74a86a5 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -22,6 +22,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -240,8 +241,12 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidVIPService", msg) return false, nil } + // 3. Ensure that TLS Secret and RBAC exists + if err := r.ensureCertResources(ctx, pgName, dnsName); err != nil { + return false, fmt.Errorf("error ensuring cert resources: %w", err) + } - // 3. Ensure that the serve config for the ProxyGroup contains the VIPService. + // 4. Ensure that the serve config for the ProxyGroup contains the VIPService. cm, cfg, err := r.proxyGroupServeConfig(ctx, pgName) if err != nil { return false, fmt.Errorf("error getting Ingress serve config: %w", err) @@ -426,8 +431,15 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, false, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - delete(cfg.Services, vipServiceName) - serveConfigChanged = true + _, ok := cfg.Services[vipServiceName] + if ok { + logger.Infof("Removing VIPService %q from serve config", vipServiceName) + delete(cfg.Services, vipServiceName) + serveConfigChanged = true + } + if err := r.cleanupCertResources(ctx, proxyGroupName, vipServiceName); err != nil { + return false, fmt.Errorf("failed to clean up cert resources: %w", err) + } } } @@ -488,16 +500,22 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, if err != nil { return false, fmt.Errorf("error deleting VIPService: %w", err) } + + // 3. Clean up any cluster resources + if err := r.cleanupCertResources(ctx, pg, serviceName); err != nil { + return false, fmt.Errorf("failed to clean up cert resources: %w", err) + } + if cfg == nil || cfg.Services == nil { // user probably deleted the ProxyGroup return svcChanged, nil } - // 3. Unadvertise the VIPService in tailscaled config. + // 4. Unadvertise the VIPService in tailscaled config. if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - // 4. Remove the VIPService from the serve config for the ProxyGroup. + // 5. Remove the VIPService from the serve config for the ProxyGroup. logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) @@ -816,6 +834,49 @@ func (r *HAIngressReconciler) ownerRefsComment(svc *tailscale.VIPService) (strin return string(json), nil } +// ensureCertResources ensures that the TLS Secret for an HA Ingress and RBAC +// resources that allow proxies to manage the Secret are created. +// Note that Tailscale VIPService name validation matches Kubernetes +// resource name validation, so we can be certain that the VIPService name +// (domain) is a valid Kubernetes resource name. +// https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99 +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names +func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, domain string) error { + secret := certSecret(pgName, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, nil); err != nil { + return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) + } + role := certSecretRole(pgName, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, nil); err != nil { + return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) + } + rb := certSecretRoleBinding(pgName, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rb, nil); err != nil { + return fmt.Errorf("failed to create or update RoleBinding %s: %w", rb.Name, err) + } + return nil +} + +// cleanupCertResources ensures that the TLS Secret and associated RBAC +// resources that allow proxies to read/write to the Secret are deleted. +func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName string, name tailcfg.ServiceName) error { + domainName, err := r.dnsNameForService(ctx, tailcfg.ServiceName(name)) + if err != nil { + return fmt.Errorf("error getting DNS name for VIPService %s: %w", name, err) + } + labels := certResourceLabels(pgName, domainName) + if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting RoleBinding for domain name %s: %w", domainName, err) + } + if err := r.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Role for domain name %s: %w", domainName, err) + } + if err := r.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Secret for domain name %s: %w", domainName, err) + } + return nil +} + // parseComment returns VIPService comment or nil if none found or not matching the expected format. func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { if vipSvc.Comment == "" { @@ -836,3 +897,93 @@ func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { func requeueInterval() time.Duration { return time.Duration(rand.N(5)+5) * time.Minute } + +// certSecretRole creates a Role that will allow proxies to manage the TLS +// Secret for the given domain. Domain must be a valid Kubernetes resource name. +func certSecretRole(pgName, namespace, domain string) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: namespace, + Labels: certResourceLabels(pgName, domain), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + ResourceNames: []string{domain}, + Verbs: []string{ + "get", + "list", + "patch", + "update", + }, + }, + }, + } +} + +// certSecretRoleBinding creates a RoleBinding for Role that will allow proxies +// to manage the TLS Secret for the given domain. Domain must be a valid +// Kubernetes resource name. +func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: namespace, + Labels: certResourceLabels(pgName, domain), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: pgName, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: domain, + }, + } +} + +// certSecret creates a Secret that will store the TLS certificate and private +// key for the given domain. Domain must be a valid Kubernetes resource name. +func certSecret(pgName, namespace, domain string) *corev1.Secret { + labels := certResourceLabels(pgName, domain) + labels[kubetypes.LabelSecretType] = "certs" + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: namespace, + Labels: labels, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: nil, + corev1.TLSPrivateKeyKey: nil, + }, + Type: corev1.SecretTypeTLS, + } +} + +func certResourceLabels(pgName, domain string) map[string]string { + return map[string]string{ + kubetypes.LabelManaged: "true", + "tailscale.com/proxy-group": pgName, + "tailscale.com/domain": domain, + } +} + +// dnsNameForService returns the DNS name for the given VIPService name. +func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { + s := svc.WithoutPrefix() + tcd, err := r.tailnetCertDomain(ctx) + if err != nil { + return "", fmt.Errorf("error determining DNS name base: %w", err) + } + return s + "." + tcd, nil +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 0e90ec980..5716c0bbf 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -20,6 +20,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -70,6 +71,11 @@ func TestIngressPGReconciler(t *testing.T) { verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + // Verify cert resources were created for the first Ingress + expectEqual(t, fc, certSecret("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-svc.ts.net")) + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" }) @@ -124,6 +130,11 @@ func TestIngressPGReconciler(t *testing.T) { verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyVIPService(t, ft, "svc:my-other-svc", []string{"443"}) + // Verify cert resources were created for the second Ingress + expectEqual(t, fc, certSecret("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) + // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) @@ -160,6 +171,9 @@ func TestIngressPGReconciler(t *testing.T) { } verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + expectMissing[corev1.Secret](t, fc, "operator-ns", "my-other-svc.ts.net") + expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-other-svc.ts.net") + expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-other-svc.ts.net") // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { @@ -186,6 +200,11 @@ func TestIngressPGReconciler(t *testing.T) { t.Error("serve config not cleaned up") } verifyTailscaledConfig(t, fc, nil) + + // Add verification that cert resources were cleaned up + expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net") + expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-svc.ts.net") + expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-svc.ts.net") } func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go index 8516cf8be..0579e3466 100644 --- a/cmd/k8s-operator/metrics_resources.go +++ b/cmd/k8s-operator/metrics_resources.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" ) const ( @@ -222,7 +223,7 @@ func metricsResourceName(stsName string) string { // proxy. func metricsResourceLabels(opts *metricsOpts) map[string]string { lbls := map[string]string{ - LabelManaged: "true", + kubetypes.LabelManaged: "true", labelMetricsTarget: opts.proxyStsName, labelPromProxyType: opts.proxyType, labelPromProxyParentName: opts.proxyLabels[LabelParentName], diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index ff2a959bd..b0f0b3576 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -637,8 +637,8 @@ func enqueueAllIngressEgressProxySvcsInNS(ns string, cl client.Client, logger *z // Get all headless Services for proxies configured using Service. svcProxyLabels := map[string]string{ - LabelManaged: "true", - LabelParentType: "svc", + kubetypes.LabelManaged: "true", + LabelParentType: "svc", } svcHeadlessSvcList := &corev1.ServiceList{} if err := cl.List(ctx, svcHeadlessSvcList, client.InNamespace(ns), client.MatchingLabels(svcProxyLabels)); err != nil { @@ -651,8 +651,8 @@ func enqueueAllIngressEgressProxySvcsInNS(ns string, cl client.Client, logger *z // Get all headless Services for proxies configured using Ingress. ingProxyLabels := map[string]string{ - LabelManaged: "true", - LabelParentType: "ingress", + kubetypes.LabelManaged: "true", + LabelParentType: "ingress", } ingHeadlessSvcList := &corev1.ServiceList{} if err := cl.List(ctx, ingHeadlessSvcList, client.InNamespace(ns), client.MatchingLabels(ingProxyLabels)); err != nil { @@ -719,7 +719,7 @@ func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, c func isManagedResource(o client.Object) bool { ls := o.GetLabels() - return ls[LabelManaged] == "true" + return ls[kubetypes.LabelManaged] == "true" } func isManagedByType(o client.Object, typ string) bool { @@ -956,7 +956,7 @@ func egressPodsHandler(_ context.Context, o client.Object) []reconcile.Request { // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + if v, ok := o.GetLabels()[kubetypes.LabelManaged]; !ok || v != "true" { return nil } // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we @@ -976,13 +976,13 @@ func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + if v, ok := o.GetLabels()[kubetypes.LabelManaged]; !ok || v != "true" { return nil } if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } - if secretType := o.GetLabels()[labelSecretType]; secretType != "state" { + if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != "state" { return nil } pg, ok := o.GetLabels()[LabelParentName] @@ -999,7 +999,7 @@ func egressSvcFromEps(_ context.Context, o client.Object) []reconcile.Request { if typ := o.GetLabels()[labelSvcType]; typ != typeEgress { return nil } - if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + if v, ok := o.GetLabels()[kubetypes.LabelManaged]; !ok || v != "true" { return nil } svcName, ok := o.GetLabels()[LabelParentName] @@ -1046,13 +1046,13 @@ func ingressesFromPGStateSecret(cl client.Client, logger *zap.SugaredLogger) han logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") return nil } - if secret.ObjectMeta.Labels[LabelManaged] != "true" { + if secret.ObjectMeta.Labels[kubetypes.LabelManaged] != "true" { return nil } if secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { return nil } - if secret.ObjectMeta.Labels[labelSecretType] != "state" { + if secret.ObjectMeta.Labels[kubetypes.LabelSecretType] != "state" { return nil } pgName, ok := secret.ObjectMeta.Labels[LabelParentName] @@ -1183,9 +1183,9 @@ func podsFromEgressEps(cl client.Client, logger *zap.SugaredLogger, ns string) h return nil } podLabels := map[string]string{ - LabelManaged: "true", - LabelParentType: "proxygroup", - LabelParentName: eps.Labels[labelProxyGroup], + kubetypes.LabelManaged: "true", + LabelParentType: "proxygroup", + LabelParentName: eps.Labels[labelProxyGroup], } podList := &corev1.PodList{} if err := cl.List(ctx, podList, client.InNamespace(ns), diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 73c795bb3..175003ac7 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1387,10 +1387,10 @@ func Test_serviceHandlerForIngress(t *testing.T) { Name: "headless-1", Namespace: "tailscale", Labels: map[string]string{ - LabelManaged: "true", - LabelParentName: "ing-1", - LabelParentNamespace: "ns-1", - LabelParentType: "ingress", + kubetypes.LabelManaged: "true", + LabelParentName: "ing-1", + LabelParentNamespace: "ns-1", + LabelParentType: "ingress", }, }, } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 8c17c7b6b..16deea278 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -178,7 +178,15 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string corev1.EnvVar{ Name: "TS_SERVE_CONFIG", Value: fmt.Sprintf("/etc/proxies/%s", serveConfigKey), - }) + }, + corev1.EnvVar{ + // Run proxies in cert share mode to + // ensure that only one TLS cert is + // issued for an HA Ingress. + Name: "TS_EXPERIMENTAL_CERT_SHARE", + Value: "true", + }, + ) } return append(c.Env, envs...) }() @@ -225,6 +233,13 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { OwnerReferences: pgOwnerReference(pg), }, Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{ + "list", + }, + }, { APIGroups: []string{""}, Resources: []string{"secrets"}, @@ -320,7 +335,7 @@ func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { func pgSecretLabels(pgName, secretType string) map[string]string { return pgLabels(pgName, map[string]string{ - labelSecretType: secretType, // "config" or "state". + kubetypes.LabelSecretType: secretType, // "config" or "state". }) } @@ -330,7 +345,7 @@ func pgLabels(pgName string, customLabels map[string]string) map[string]string { l[k] = v } - l[LabelManaged] = "true" + l[kubetypes.LabelManaged] = "true" l[LabelParentType] = "proxygroup" l[LabelParentName] = pgName diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 6829b3929..5b690a485 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -247,7 +247,6 @@ func TestProxyGroup(t *testing.T) { // The fake client does not clean up objects whose owner has been // deleted, so we can't test for the owned resources getting deleted. }) - } func TestProxyGroupTypes(t *testing.T) { @@ -417,6 +416,7 @@ func TestProxyGroupTypes(t *testing.T) { } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) verifyEnvVar(t, sts, "TS_SERVE_CONFIG", "/etc/proxies/serve-config.json") + verifyEnvVar(t, sts, "TS_EXPERIMENTAL_CERT_SHARE", "true") // Verify ConfigMap volume mount cmName := fmt.Sprintf("%s-ingress-config", pg.Name) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 0bc9d6fb9..6327a073b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -44,11 +44,9 @@ const ( // Labels that the operator sets on StatefulSets and Pods. If you add a // new label here, do also add it to tailscaleManagedLabels var to // ensure that it does not get overwritten by ProxyClass configuration. - LabelManaged = "tailscale.com/managed" LabelParentType = "tailscale.com/parent-resource-type" LabelParentName = "tailscale.com/parent-resource" LabelParentNamespace = "tailscale.com/parent-resource-ns" - labelSecretType = "tailscale.com/secret-type" // "config" or "state". // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for @@ -108,7 +106,7 @@ const ( var ( // tailscaleManagedLabels are label keys that tailscale operator sets on StatefulSets and Pods. - tailscaleManagedLabels = []string{LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} + tailscaleManagedLabels = []string{kubetypes.LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} // tailscaleManagedAnnotations are annotation keys that tailscale operator sets on StatefulSets and Pods. tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN, podAnnotationLastSetConfigFileHash} ) diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 3d0cecc04..35c512c8c 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/types/ptr" ) @@ -156,8 +157,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // Set a couple additional fields so we can test that we don't // mistakenly override those. labels := map[string]string{ - LabelManaged: "true", - LabelParentName: "foo", + kubetypes.LabelManaged: "true", + LabelParentName: "foo", } annots := map[string]string{ podAnnotationLastSetClusterIP: "1.2.3.4", @@ -303,28 +304,28 @@ func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { }{ { name: "no custom labels specified and none present in current labels, return current labels", - current: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, - want: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { name: "no custom labels specified, but some present in current labels, return tailscale managed labels only from the current labels", - current: map[string]string{"foo": "bar", "something.io/foo": "bar", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, - want: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{"foo": "bar", "something.io/foo": "bar", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { name: "custom labels specified, current labels only contain tailscale managed labels, return a union of both", - current: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, custom: map[string]string{"foo": "bar", "something.io/foo": "bar"}, - want: map[string]string{"foo": "bar", "something.io/foo": "bar", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{"foo": "bar", "something.io/foo": "bar", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { name: "custom labels specified, current labels contain tailscale managed labels and custom labels, some of which re not present in the new custom labels, return a union of managed labels and the desired custom labels", - current: map[string]string{"foo": "bar", "bar": "baz", "app": "1234", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{"foo": "bar", "bar": "baz", "app": "1234", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, custom: map[string]string{"foo": "bar", "something.io/foo": "bar"}, - want: map[string]string{"foo": "bar", "something.io/foo": "bar", "app": "1234", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{"foo": "bar", "something.io/foo": "bar", "app": "1234", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 70c810b25..d6a6f440f 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -84,10 +84,10 @@ func childResourceLabels(name, ns, typ string) map[string]string { // proxying. Instead, we have to do our own filtering and tracking with // labels. return map[string]string{ - LabelManaged: "true", - LabelParentName: name, - LabelParentNamespace: ns, - LabelParentType: typ, + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, } } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 6b1a4f85b..f47f96e44 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/types/ptr" "tailscale.com/util/mak" @@ -563,10 +564,10 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec func findGenName(t *testing.T, client client.Client, ns, name, typ string) (full, noSuffix string) { t.Helper() labels := map[string]string{ - LabelManaged: "true", - LabelParentName: name, - LabelParentNamespace: ns, - LabelParentType: typ, + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, } s, err := getSingleObject[corev1.Secret](context.Background(), client, "operator-ns", labels) if err != nil { diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 79e66d357..ed37f06c2 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -283,7 +283,7 @@ func (s *Store) updateSecret(data map[string][]byte, secretName string) (err err } } if err := s.client.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil { - return fmt.Errorf("error patching Secret %s: %w", s.secretName, err) + return fmt.Errorf("error patching Secret %s: %w", secretName, err) } return nil } From 25d5f78c6efef25eec0f6f78a651bafb61a3833c Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 19 Mar 2025 09:21:37 -0400 Subject: [PATCH 0614/1708] net/dns: expose a function for recompiling the DNS configuration (#15346) updates tailscale/corp#27145 We require a means to trigger a recompilation of the DNS configuration to pick up new nameservers for platforms where we blend the interface nameservers from the OS into our DNS config. Notably, on Darwin, the only API we have at our disposal will, in rare instances, return a transient error when querying the interface nameservers on a link change if they have not been set when we get the AF_ROUTE messages for the link update. There's a corresponding change in corp for Darwin clients, to track the interface namservers during NEPathMonitor events, and call this when the nameservers change. This will also fix the slightly more obscure bug of changing nameservers while tailscaled is running. That change can now be reflected in magicDNS without having to stop the client. Signed-off-by: Jonathan Nobels --- net/dns/manager.go | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/net/dns/manager.go b/net/dns/manager.go index ebf91811a..1e9eb7fe7 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -35,6 +35,9 @@ import ( var ( errFullQueue = errors.New("request queue full") + // ErrNoDNSConfig is returned by RecompileDNSConfig when the Manager + // has no existing DNS configuration. + ErrNoDNSConfig = errors.New("no DNS configuration") ) // maxActiveQueries returns the maximal number of DNS requests that can @@ -91,21 +94,18 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, } // Rate limit our attempts to correct our DNS configuration. + // This is done on incoming queries, we don't want to spam it. limiter := rate.NewLimiter(1.0/5.0, 1) // This will recompile the DNS config, which in turn will requery the system // DNS settings. The recovery func should triggered only when we are missing // upstream nameservers and require them to forward a query. m.resolver.SetMissingUpstreamRecovery(func() { - m.mu.Lock() - defer m.mu.Unlock() - if m.config == nil { - return - } - if limiter.Allow() { - m.logf("DNS resolution failed due to missing upstream nameservers. Recompiling DNS configuration.") - m.setLocked(*m.config) + m.logf("resolution failed due to missing upstream nameservers. Recompiling DNS configuration.") + if err := m.RecompileDNSConfig(); err != nil { + m.logf("config recompilation failed: %v", err) + } } }) @@ -117,6 +117,26 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, // Resolver returns the Manager's DNS Resolver. func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } +// RecompileDNSConfig sets the DNS config to the current value, which has +// the side effect of re-querying the OS's interface nameservers. This should be used +// on platforms where the interface nameservers can change. Darwin, for example, +// where the nameservers aren't always available when we process a major interface +// change event, or platforms where the nameservers may change while tunnel is up. +// +// This should be called if it is determined that [OSConfigurator.GetBaseConfig] may +// give a better or different result than when [Manager.Set] was last called. The +// logic for making that determination is up to the caller. +// +// It returns [ErrNoDNSConfig] if the [Manager] has no existing DNS configuration. +func (m *Manager) RecompileDNSConfig() error { + m.mu.Lock() + defer m.mu.Unlock() + if m.config == nil { + return ErrNoDNSConfig + } + return m.setLocked(*m.config) +} + func (m *Manager) Set(cfg Config) error { m.mu.Lock() defer m.mu.Unlock() From 8d84720edb471c639e0f6de3addf3490e78b7748 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 19 Mar 2025 06:49:36 -0700 Subject: [PATCH 0615/1708] cmd/k8s-operator: update ProxyGroup config Secrets instead of patch (#15353) There was a flaky failure case where renaming a TLS hostname for an ingress might leave the old hostname dangling in tailscaled config. This happened when the proxygroup reconciler loop had an outdated resource version of the config Secret in its cache after the ingress-pg-reconciler loop had very recently written it to delete the old hostname. As the proxygroup reconciler then did a patch, there was no conflict and it reinstated the old hostname. This commit updates the patch to an update operation so that if the resource version is out of date it will fail with an optimistic lock error. It also checks for equality to reduce the likelihood that we make the update API call in the first place, because most of the time the proxygroup reconciler is not even making an update to the Secret in the case that the hostname has changed. Updates tailscale/corp#24795 Change-Id: Ie23a97440063976c9a8475d24ab18253e1f89050 Signed-off-by: Tom Proctor --- cmd/k8s-operator/proxygroup.go | 16 +++++++++------- cmd/k8s-operator/proxygroup_test.go | 16 +++------------- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index c961c0471..112e5e2b0 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -461,7 +461,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p var existingCfgSecret *corev1.Secret // unmodified copy of secret if err := r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { - logger.Debugf("secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) + logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { return "", err @@ -469,7 +469,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p var authKey string if existingCfgSecret == nil { - logger.Debugf("creating authkey for new ProxyGroup proxy") + logger.Debugf("Creating authkey for new ProxyGroup proxy") tags := pg.Spec.Tags.Stringify() if len(tags) == 0 { tags = r.defaultTags @@ -490,7 +490,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if err != nil { return "", fmt.Errorf("error marshalling tailscaled config: %w", err) } - mak.Set(&cfgSecret.StringData, tsoperator.TailscaledConfigFileName(cap), string(cfgJSON)) + mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } // The config sha256 sum is a value for a hash annotation used to trigger @@ -520,12 +520,14 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } if existingCfgSecret != nil { - logger.Debugf("patching the existing ProxyGroup config Secret %s", cfgSecret.Name) - if err := r.Patch(ctx, cfgSecret, client.MergeFrom(existingCfgSecret)); err != nil { - return "", err + if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { + logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) + if err := r.Update(ctx, cfgSecret); err != nil { + return "", err + } } } else { - logger.Debugf("creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) + logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { return "", err } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 5b690a485..1f1a39ab0 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -475,8 +475,6 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Name: pgConfigSecretName(pgName, 0), Namespace: tsNamespace, }, - // Write directly to Data because the fake client doesn't copy the write-only - // StringData field across to Data for us. Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(106): existingConfigBytes, }, @@ -514,10 +512,10 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Namespace: tsNamespace, ResourceVersion: "2", }, - StringData: map[string]string{ - tsoperator.TailscaledConfigFileName(106): string(expectedConfigBytes), + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): expectedConfigBytes, }, - }, omitSecretData) + }) } func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { @@ -620,11 +618,3 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG }) } } - -// The operator mostly writes to StringData and reads from Data, but the fake -// client doesn't copy StringData across to Data on write. When comparing actual -// vs expected Secrets, use this function to only check what the operator writes -// to StringData. -func omitSecretData(secret *corev1.Secret) { - secret.Data = nil -} From 3a2c92f08eac8cd8f50356ff288e40a28636ee42 Mon Sep 17 00:00:00 2001 From: klyubin Date: Wed, 19 Mar 2025 10:46:32 -0600 Subject: [PATCH 0616/1708] web: support Host 100.100.100.100:80 in tailscaled web server This makes the web server running inside tailscaled on 100.100.100.100:80 support requests with `Host: 100.100.100.100:80` and its IPv6 equivalent. Prior to this commit, the web server replied to such requests with a redirect to the node's Tailscale IP:5252. Fixes https://github.com/tailscale/tailscale/issues/14415 Signed-off-by: Alex Klyubin --- client/web/web.go | 3 ++- client/web/web_test.go | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/client/web/web.go b/client/web/web.go index e9810ccd0..6eccdadcf 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -335,7 +335,8 @@ func (s *Server) requireTailscaleIP(w http.ResponseWriter, r *http.Request) (han ipv6ServiceHost = "[" + tsaddr.TailscaleServiceIPv6String + "]" ) // allow requests on quad-100 (or ipv6 equivalent) - if r.Host == ipv4ServiceHost || r.Host == ipv6ServiceHost { + host := strings.TrimSuffix(r.Host, ":80") + if host == ipv4ServiceHost || host == ipv6ServiceHost { return false } diff --git a/client/web/web_test.go b/client/web/web_test.go index 291356260..334b403a6 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -1177,6 +1177,16 @@ func TestRequireTailscaleIP(t *testing.T) { target: "http://[fd7a:115c:a1e0::53]/", wantHandled: false, }, + { + name: "quad-100:80", + target: "http://100.100.100.100:80/", + wantHandled: false, + }, + { + name: "ipv6-service-addr:80", + target: "http://[fd7a:115c:a1e0::53]:80/", + wantHandled: false, + }, } for _, tt := range tests { From f34e08e186ac00c6e86266080c47630f7e5a81d2 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 20 Mar 2025 14:40:36 +0000 Subject: [PATCH 0617/1708] ipn: ensure that conffile is source of truth for advertised services. (#15361) If conffile is used to configure tailscaled, always update currently advertised services from conffile, even if they are empty in the conffile, to ensure that it is possible to transition to a state where no services are advertised. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- ipn/conf.go | 8 +- ipn/ipnlocal/local_test.go | 145 +++++++++++++++++++++++++++++++------ 2 files changed, 130 insertions(+), 23 deletions(-) diff --git a/ipn/conf.go b/ipn/conf.go index addeea79e..2c9fb2fd1 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -145,9 +145,15 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) { mp.AppConnector = *c.AppConnector mp.AppConnectorSet = true } + // Configfile should be the source of truth for whether this node + // advertises any services. We need to ensure that each reload updates + // currently advertised services as else the transition from 'some + // services are advertised' to 'advertised services are empty/unset in + // conffile' would have no effect (especially given that an empty + // service slice would be omitted from the JSON config). + mp.AdvertiseServicesSet = true if c.AdvertiseServices != nil { mp.AdvertiseServices = c.AdvertiseServices - mp.AdvertiseServicesSet = true } return mp, nil } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index aa9137275..bdccdb53d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4745,32 +4745,133 @@ func TestLoginNotifications(t *testing.T) { // TestConfigFileReload tests that the LocalBackend reloads its configuration // when the configuration file changes. func TestConfigFileReload(t *testing.T) { - cfg1 := `{"Hostname": "foo", "Version": "alpha0"}` - f := filepath.Join(t.TempDir(), "cfg") - must.Do(os.WriteFile(f, []byte(cfg1), 0600)) - sys := new(tsd.System) - sys.InitialConfig = must.Get(conffile.Load(f)) - lb := newTestLocalBackendWithSys(t, sys) - must.Do(lb.Start(ipn.Options{})) - - lb.mu.Lock() - hn := lb.hostinfo.Hostname - lb.mu.Unlock() - if hn != "foo" { - t.Fatalf("got %q; want %q", hn, "foo") + type testCase struct { + name string + initial *conffile.Config + updated *conffile.Config + checkFn func(*testing.T, *LocalBackend) } - cfg2 := `{"Hostname": "bar", "Version": "alpha0"}` - must.Do(os.WriteFile(f, []byte(cfg2), 0600)) - if !must.Get(lb.ReloadConfig()) { - t.Fatal("reload failed") + tests := []testCase{ + { + name: "hostname_change", + initial: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + Hostname: ptr.To("initial-host"), + }, + }, + updated: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + Hostname: ptr.To("updated-host"), + }, + }, + checkFn: func(t *testing.T, b *LocalBackend) { + if got := b.Prefs().Hostname(); got != "updated-host" { + t.Errorf("hostname = %q; want updated-host", got) + } + }, + }, + { + name: "start_advertising_services", + initial: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + }, + }, + updated: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + AdvertiseServices: []string{"svc:abc", "svc:def"}, + }, + }, + checkFn: func(t *testing.T, b *LocalBackend) { + if got := b.Prefs().AdvertiseServices().AsSlice(); !reflect.DeepEqual(got, []string{"svc:abc", "svc:def"}) { + t.Errorf("AdvertiseServices = %v; want [svc:abc, svc:def]", got) + } + }, + }, + { + name: "change_advertised_services", + initial: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + AdvertiseServices: []string{"svc:abc", "svc:def"}, + }, + }, + updated: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + AdvertiseServices: []string{"svc:abc", "svc:ghi"}, + }, + }, + checkFn: func(t *testing.T, b *LocalBackend) { + if got := b.Prefs().AdvertiseServices().AsSlice(); !reflect.DeepEqual(got, []string{"svc:abc", "svc:ghi"}) { + t.Errorf("AdvertiseServices = %v; want [svc:abc, svc:ghi]", got) + } + }, + }, + { + name: "unset_advertised_services", + initial: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + AdvertiseServices: []string{"svc:abc"}, + }, + }, + updated: &conffile.Config{ + Parsed: ipn.ConfigVAlpha{ + Version: "alpha0", + }, + }, + checkFn: func(t *testing.T, b *LocalBackend) { + if b.Prefs().AdvertiseServices().Len() != 0 { + t.Errorf("got %d AdvertiseServices wants none", b.Prefs().AdvertiseServices().Len()) + } + }, + }, } - lb.mu.Lock() - hn = lb.hostinfo.Hostname - lb.mu.Unlock() - if hn != "bar" { - t.Fatalf("got %q; want %q", hn, "bar") + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "tailscale.conf") + + // Write initial config + initialJSON, err := json.Marshal(tc.initial.Parsed) + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(path, initialJSON, 0644); err != nil { + t.Fatal(err) + } + + // Create backend with initial config + tc.initial.Path = path + tc.initial.Raw = initialJSON + sys := &tsd.System{ + InitialConfig: tc.initial, + } + b := newTestLocalBackendWithSys(t, sys) + + // Update config file + updatedJSON, err := json.Marshal(tc.updated.Parsed) + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(path, updatedJSON, 0644); err != nil { + t.Fatal(err) + } + + // Trigger reload + if ok, err := b.ReloadConfig(); !ok || err != nil { + t.Fatalf("ReloadConfig() = %v, %v; want true, nil", ok, err) + } + + // Check outcome + tc.checkFn(t, b) + }) } } From 984cd1cab0521a2f64fe6b17705f3438916857a3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 20 Mar 2025 07:39:51 -0700 Subject: [PATCH 0618/1708] cmd/tailscale: add CLI debug command to do raw LocalAPI requests This adds a portable way to do a raw LocalAPI request without worrying about the Unix-vs-macOS-vs-Windows ways of hitting the LocalAPI server. (It was already possible but tedious with 'tailscale debug local-creds') Updates tailscale/corp#24690 Change-Id: I0828ca55edaedf0565c8db192c10f24bebb95f1b Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 86 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ce5edd8d3..9c77570d5 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -136,6 +136,17 @@ func debugCmd() *ffcli.Command { Exec: runLocalCreds, ShortHelp: "Print how to access Tailscale LocalAPI", }, + { + Name: "localapi", + ShortUsage: "tailscale debug localapi [] []", + Exec: runLocalAPI, + ShortHelp: "Call a LocalAPI method directly", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("localapi") + fs.BoolVar(&localAPIFlags.verbose, "v", false, "verbose; dump HTTP headers") + return fs + })(), + }, { Name: "restun", ShortUsage: "tailscale debug restun", @@ -451,6 +462,81 @@ func runLocalCreds(ctx context.Context, args []string) error { return nil } +func looksLikeHTTPMethod(s string) bool { + if len(s) > len("OPTIONS") { + return false + } + for _, r := range s { + if r < 'A' || r > 'Z' { + return false + } + } + return true +} + +var localAPIFlags struct { + verbose bool +} + +func runLocalAPI(ctx context.Context, args []string) error { + if len(args) == 0 { + return errors.New("expected at least one argument") + } + method := "GET" + if looksLikeHTTPMethod(args[0]) { + method = args[0] + args = args[1:] + if len(args) == 0 { + return errors.New("expected at least one argument after method") + } + } + path := args[0] + if !strings.HasPrefix(path, "/localapi/") { + if !strings.Contains(path, "/") { + path = "/localapi/v0/" + path + } else { + path = "/localapi/" + path + } + } + + var body io.Reader + if len(args) > 1 { + if args[1] == "-" { + fmt.Fprintf(Stderr, "# reading request body from stdin...\n") + all, err := io.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("reading Stdin: %q", err) + } + body = bytes.NewReader(all) + } else { + body = strings.NewReader(args[1]) + } + } + req, err := http.NewRequest(method, "http://local-tailscaled.sock"+path, body) + if err != nil { + return err + } + fmt.Fprintf(Stderr, "# doing request %s %s\n", method, path) + + res, err := localClient.DoLocalRequest(req) + if err != nil { + return err + } + is2xx := res.StatusCode >= 200 && res.StatusCode <= 299 + if localAPIFlags.verbose { + res.Write(Stdout) + } else { + if !is2xx { + fmt.Fprintf(Stderr, "# Response status %s\n", res.Status) + } + io.Copy(Stdout, res.Body) + } + if is2xx { + return nil + } + return errors.New(res.Status) +} + type localClientRoundTripper struct{} func (localClientRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { From e07c1573f6458af8054e9009eb86e068411834f0 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 17 Mar 2025 14:58:25 -0500 Subject: [PATCH 0619/1708] ipn/ipnlocal: do not reset the netmap and packet filter in (*LocalBackend).Start() Resetting LocalBackend's netmap without also unconfiguring wgengine to reset routes, DNS, and the killswitch firewall rules may cause connectivity issues until a new netmap is received. In some cases, such as when bootstrap DNS servers are inaccessible due to network restrictions or other reasons, or if the control plane is experiencing issues, this can result in a complete loss of connectivity until the user disconnects and reconnects to Tailscale. As LocalBackend handles state resets in (*LocalBackend).resetForProfileChangeLockedOnEntry(), and this includes resetting the netmap, resetting the current netmap in (*LocalBackend).Start() is not necessary. Moreover, it's harmful if (*LocalBackend).Start() is called more than once for the same profile. In this PR, we update resetForProfileChangeLockedOnEntry() to reset the packet filter and remove the redundant resetting of the netmap and packet filter from Start(). We also update the state machine tests and revise comments that became inaccurate due to previous test updates. Updates tailscale/corp#27173 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 3 +-- ipn/ipnlocal/local_test.go | 9 +++++++++ ipn/ipnlocal/state_test.go | 32 ++++++++++++++++++-------------- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f866527d1..4b47f20c4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2380,12 +2380,10 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } b.applyPrefsToHostinfoLocked(hostinfo, prefs) - b.setNetMapLocked(nil) persistv := prefs.Persist().AsStruct() if persistv == nil { persistv = new(persist.Persist) } - b.updateFilterLocked(nil, ipn.PrefsView{}) if b.portpoll != nil { b.portpollOnce.Do(func() { @@ -7531,6 +7529,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err return nil } b.setNetMapLocked(nil) // Reset netmap. + b.updateFilterLocked(nil, ipn.PrefsView{}) // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bdccdb53d..5b74b8180 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1510,6 +1510,15 @@ func TestReconfigureAppConnector(t *testing.T) { func TestBackfillAppConnectorRoutes(t *testing.T) { // Create backend with an empty app connector. b := newTestBackend(t) + // newTestBackend creates a backend with a non-nil netmap, + // but this test requires a nil netmap. + // Otherwise, instead of backfilling, [LocalBackend.reconfigAppConnectorLocked] + // uses the domains and routes from netmap's [appctype.AppConnectorAttr]. + // Additionally, a non-nil netmap makes reconfigAppConnectorLocked + // asynchronous, resulting in a flaky test. + // Therefore, we set the netmap to nil to simulate a fresh backend start + // or a profile switch where the netmap is not yet available. + b.setNetMapLocked(nil) if err := b.Start(ipn.Options{}); err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 1b3b43af6..a4180de86 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -735,12 +735,10 @@ func TestStateMachine(t *testing.T) { // b.Shutdown() explicitly ourselves. previousCC.assertShutdown(false) - // Note: unpause happens because ipn needs to get at least one netmap - // on startup, otherwise UIs can't show the node list, login - // name, etc when in state ipn.Stopped. - // Arguably they shouldn't try. But they currently do. nn := notifies.drain(2) - cc.assertCalls("New", "Login") + // We already have a netmap for this node, + // and WantRunning is false, so cc should be paused. + cc.assertCalls("New", "Login", "pause") c.Assert(nn[0].Prefs, qt.IsNotNil) c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) @@ -751,7 +749,11 @@ func TestStateMachine(t *testing.T) { // When logged in but !WantRunning, ipn leaves us unpaused to retrieve // the first netmap. Simulate that netmap being received, after which // it should pause us, to avoid wasting CPU retrieving unnecessarily - // additional netmap updates. + // additional netmap updates. Since our LocalBackend instance already + // has a netmap, we will reset it to nil to simulate the first netmap + // retrieval. + b.setNetMapLocked(nil) + cc.assertCalls("unpause") // // TODO: really the various GUIs and prefs should be refactored to // not require the netmap structure at all when starting while @@ -853,7 +855,7 @@ func TestStateMachine(t *testing.T) { // The last test case is the most common one: restarting when both // logged in and WantRunning. t.Logf("\n\nStart5") - notifies.expect(1) + notifies.expect(2) c.Assert(b.Start(ipn.Options{}), qt.IsNil) { // NOTE: cc.Shutdown() is correct here, since we didn't call @@ -861,30 +863,32 @@ func TestStateMachine(t *testing.T) { previousCC.assertShutdown(false) cc.assertCalls("New", "Login") - nn := notifies.drain(1) + nn := notifies.drain(2) cc.assertCalls() c.Assert(nn[0].Prefs, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[0].Prefs.WantRunning(), qt.IsTrue) - c.Assert(b.State(), qt.Equals, ipn.NoState) + // We're logged in and have a valid netmap, so we should + // be in the Starting state. + c.Assert(nn[1].State, qt.IsNotNil) + c.Assert(*nn[1].State, qt.Equals, ipn.Starting) + c.Assert(b.State(), qt.Equals, ipn.Starting) } // Control server accepts our valid key from before. t.Logf("\n\nLoginFinished5") - notifies.expect(1) + notifies.expect(0) cc.send(nil, "", true, &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), }) { - nn := notifies.drain(1) + notifies.drain(0) cc.assertCalls() // NOTE: No LoginFinished message since no interactive // login was needed. - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) // NOTE: No prefs change this time. WantRunning stays true. // We were in Starting in the first place, so that doesn't - // change either. + // change either, so we don't expect any notifications. c.Assert(ipn.Starting, qt.Equals, b.State()) } t.Logf("\n\nExpireKey") From f3f2f72f9664261fc7952a0f5be4ce5d48222358 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 20 Mar 2025 15:15:23 -0500 Subject: [PATCH 0620/1708] ipn/ipnlocal: do not attempt to start the auditlogger with a nil transport (*LocalBackend).setControlClientLocked() is called to both set and reset b.cc. We shouldn't attempt to start the audit logger when b.cc is being reset (i.e., cc is nil). However, it's fine to start the audit logger if b.cc implements auditlog.Transport, even if it's not a controlclient.Auto but a mock control client. In this PR, we fix both issues and add an assertion that controlclient.Auto is an auditlog.Transport. This ensures a compile-time failure if controlclient.Auto ever stops being a valid transport due to future interface or implementation changes. Updates tailscale/corp#26435 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4b47f20c4..622283acb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5918,6 +5918,9 @@ func (b *LocalBackend) requestEngineStatusAndWait() { b.logf("requestEngineStatusAndWait: got status update.") } +// [controlclient.Auto] implements [auditlog.Transport]. +var _ auditlog.Transport = (*controlclient.Auto)(nil) + // setControlClientLocked sets the control client to cc, // which may be nil. // @@ -5925,12 +5928,12 @@ func (b *LocalBackend) requestEngineStatusAndWait() { func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) - if b.auditLogger != nil { + if t, ok := b.cc.(auditlog.Transport); ok && b.auditLogger != nil { if err := b.auditLogger.SetProfileID(b.pm.CurrentProfile().ID()); err != nil { b.logf("audit logger set profile ID failure: %v", err) } - if err := b.auditLogger.Start(b.ccAuto); err != nil { + if err := b.auditLogger.Start(t); err != nil { b.logf("audit logger start failure: %v", err) } } From 196ae1cd747f4f9f7e2b6dfb862915b1296d0beb Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 21 Mar 2025 08:53:41 +0000 Subject: [PATCH 0621/1708] cmd/k8s-operator,k8s-operator: allow optionally using LE staging endpoint for Ingress (#15360) cmd/k8s-operator,k8s-operator: allow using LE staging endpoint for Ingress Allow to optionally use LetsEncrypt staging endpoint to issue certs for Ingress/HA Ingress, so that it is easier to experiment with initial Ingress setup without hiting rate limits. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxyclasses.yaml | 16 + .../deploy/manifests/operator.yaml | 16 + cmd/k8s-operator/ingress_test.go | 308 ++++++++---------- cmd/k8s-operator/proxygroup.go | 5 +- cmd/k8s-operator/proxygroup_test.go | 207 ++++++++++++ cmd/k8s-operator/sts.go | 13 + k8s-operator/api.md | 1 + .../apis/v1alpha1/types_proxyclass.go | 15 + 8 files changed, 413 insertions(+), 168 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index a620c3887..f89e38453 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -2215,6 +2215,22 @@ spec: https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices Defaults to false. type: boolean + useLetsEncryptStagingEnvironment: + description: |- + Set UseLetsEncryptStagingEnvironment to true to issue TLS + certificates for any HTTPS endpoints exposed to the tailnet from + LetsEncrypt's staging environment. + https://letsencrypt.org/docs/staging-environment/ + This setting only affects Tailscale Ingress resources. + By default Ingress TLS certificates are issued from LetsEncrypt's + production environment. + Changing this setting true -> false, will result in any + existing certs being re-issued from the production environment. + Changing this setting false (default) -> true, when certs have already + been provisioned from production environment will NOT result in certs + being re-issued from the staging environment before they need to be + renewed. + type: boolean status: description: |- Status of the ProxyClass. This is set and managed automatically. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 9ee3b441a..dc8d0634c 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2685,6 +2685,22 @@ spec: Defaults to false. type: boolean type: object + useLetsEncryptStagingEnvironment: + description: |- + Set UseLetsEncryptStagingEnvironment to true to issue TLS + certificates for any HTTPS endpoints exposed to the tailnet from + LetsEncrypt's staging environment. + https://letsencrypt.org/docs/staging-environment/ + This setting only affects Tailscale Ingress resources. + By default Ingress TLS certificates are issued from LetsEncrypt's + production environment. + Changing this setting true -> false, will result in any + existing certs being re-issued from the production environment. + Changing this setting false (default) -> true, when certs have already + been provisioned from production environment will NOT result in certs + being re-issued from the staging environment before they need to be + renewed. + type: boolean type: object status: description: |- diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 74eddff56..f9623850c 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -6,6 +6,7 @@ package main import ( + "context" "testing" "go.uber.org/zap" @@ -15,17 +16,18 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/ipn" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" + "tailscale.com/tstest" "tailscale.com/types/ptr" "tailscale.com/util/mak" ) func TestTailscaleIngress(t *testing.T) { - tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} - fc := fake.NewFakeClient(tsIngressClass) + fc := fake.NewFakeClient(ingressClass()) ft := &fakeTSClient{} fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} zl, err := zap.NewDevelopment() @@ -46,45 +48,8 @@ func TestTailscaleIngress(t *testing.T) { } // 1. Resources get created for regular Ingress - ing := &networkingv1.Ingress{ - TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - // The apiserver is supposed to set the UID, but the fake client - // doesn't. So, set it explicitly because other code later depends - // on it being set. - UID: types.UID("1234-UID"), - }, - Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), - DefaultBackend: &networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: "test", - Port: networkingv1.ServiceBackendPort{ - Number: 8080, - }, - }, - }, - TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"default-test"}}, - }, - }, - } - mustCreate(t, fc, ing) - mustCreate(t, fc, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Ports: []corev1.ServicePort{{ - Port: 8080, - Name: "http"}, - }, - }, - }) + mustCreate(t, fc, ingress()) + mustCreate(t, fc, service()) expectReconciled(t, ingR, "default", "test") @@ -114,6 +79,9 @@ func TestTailscaleIngress(t *testing.T) { mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) }) expectReconciled(t, ingR, "default", "test") + + // Get the ingress and update it with expected changes + ing := ingress() ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer") ing.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ Ingress: []networkingv1.IngressLoadBalancerIngress{ @@ -143,8 +111,7 @@ func TestTailscaleIngress(t *testing.T) { } func TestTailscaleIngressHostname(t *testing.T) { - tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} - fc := fake.NewFakeClient(tsIngressClass) + fc := fake.NewFakeClient(ingressClass()) ft := &fakeTSClient{} fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} zl, err := zap.NewDevelopment() @@ -165,45 +132,8 @@ func TestTailscaleIngressHostname(t *testing.T) { } // 1. Resources get created for regular Ingress - ing := &networkingv1.Ingress{ - TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - // The apiserver is supposed to set the UID, but the fake client - // doesn't. So, set it explicitly because other code later depends - // on it being set. - UID: types.UID("1234-UID"), - }, - Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), - DefaultBackend: &networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: "test", - Port: networkingv1.ServiceBackendPort{ - Number: 8080, - }, - }, - }, - TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"default-test"}}, - }, - }, - } - mustCreate(t, fc, ing) - mustCreate(t, fc, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Ports: []corev1.ServicePort{{ - Port: 8080, - Name: "http"}, - }, - }, - }) + mustCreate(t, fc, ingress()) + mustCreate(t, fc, service()) expectReconciled(t, ingR, "default", "test") @@ -241,8 +171,10 @@ func TestTailscaleIngressHostname(t *testing.T) { mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) }) expectReconciled(t, ingR, "default", "test") - ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer") + // Get the ingress and update it with expected changes + ing := ingress() + ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer") expectEqual(t, fc, ing) // 3. Ingress proxy with capability version >= 110 advertises HTTPS endpoint @@ -299,10 +231,9 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Annotations: map[string]string{"bar.io/foo": "some-val"}, Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, } - tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(pc, tsIngressClass). + WithObjects(pc, ingressClass()). WithStatusSubresource(pc). Build() ft := &fakeTSClient{} @@ -326,45 +257,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { // 1. Ingress is created with no ProxyClass specified, default proxy // resources get configured. - ing := &networkingv1.Ingress{ - TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - // The apiserver is supposed to set the UID, but the fake client - // doesn't. So, set it explicitly because other code later depends - // on it being set. - UID: types.UID("1234-UID"), - }, - Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), - DefaultBackend: &networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: "test", - Port: networkingv1.ServiceBackendPort{ - Number: 8080, - }, - }, - }, - TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"default-test"}}, - }, - }, - } - mustCreate(t, fc, ing) - mustCreate(t, fc, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Ports: []corev1.ServicePort{{ - Port: 8080, - Name: "http"}, - }, - }, - }) + mustCreate(t, fc, ingress()) + mustCreate(t, fc, service()) expectReconciled(t, ingR, "default", "test") @@ -432,54 +326,19 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { ObservedGeneration: 1, }}}, } - ing := &networkingv1.Ingress{ - TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - // The apiserver is supposed to set the UID, but the fake client - // doesn't. So, set it explicitly because other code later depends - // on it being set. - UID: types.UID("1234-UID"), - Labels: map[string]string{ - "tailscale.com/proxy-class": "metrics", - }, - }, - Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), - DefaultBackend: &networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: "test", - Port: networkingv1.ServiceBackendPort{ - Number: 8080, - }, - }, - }, - TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"default-test"}}, - }, - }, - } - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Ports: []corev1.ServicePort{{ - Port: 8080, - Name: "http"}, - }, - }, - } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} - tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}} + + // Create fake client with ProxyClass, IngressClass, Ingress with metrics ProxyClass, and Service + ing := ingress() + ing.Labels = map[string]string{ + LabelProxyClass: "metrics", + } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(pc, tsIngressClass, ing, svc). + WithObjects(pc, ingressClass(), ing, service()). WithStatusSubresource(pc). Build() + ft := &fakeTSClient{} fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} zl, err := zap.NewDevelopment() @@ -560,3 +419,118 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { expectMissing[corev1.Service](t, fc, "operator-ns", metricsResourceName(shortName)) // ServiceMonitor gets garbage collected when the Service is deleted - we cannot test that here. } + +func TestIngressLetsEncryptStaging(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest() + + testCases := testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther) + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + + builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse, pcOther). + WithStatusSubresource(pcLEStaging, pcLEStagingFalse, pcOther) + + fc := builder.Build() + + if tt.proxyClassPerResource != "" || tt.defaultProxyClass != "" { + name := tt.proxyClassPerResource + if name == "" { + name = tt.defaultProxyClass + } + setProxyClassReady(t, fc, cl, name) + } + + mustCreate(t, fc, ingressClass()) + mustCreate(t, fc, service()) + ing := ingress() + if tt.proxyClassPerResource != "" { + ing.Labels = map[string]string{ + LabelProxyClass: tt.proxyClassPerResource, + } + } + mustCreate(t, fc, ing) + + ingR := &IngressReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: &fakeTSClient{}, + tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}}, + defaultTags: []string{"tag:test"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale:test", + }, + logger: zl.Sugar(), + defaultProxyClass: tt.defaultProxyClass, + } + + expectReconciled(t, ingR, "default", "test") + + _, shortName := findGenName(t, fc, "default", "test", "ingress") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + if tt.useLEStagingEndpoint { + verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) + } else { + verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL") + } + }) + } +} + +func ingressClass() *networkingv1.IngressClass { + return &networkingv1.IngressClass{ + ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, + Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, + } +} + +func service() *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Ports: []corev1.ServicePort{{ + Port: 8080, + Name: "http"}, + }, + }, + } +} + +func ingress() *networkingv1.Ingress { + return &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"default-test"}}, + }, + }, + } +} diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 112e5e2b0..f263829d7 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -302,7 +302,10 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro if err != nil { return fmt.Errorf("error generating StatefulSet spec: %w", err) } - ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger) + cfg := &tailscaleSTSConfig{ + proxyType: string(pg.Spec.Type), + } + ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) capver, err := r.capVerForPG(ctx, pg, logger) if err != nil { return fmt.Errorf("error getting device info: %w", err) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 1f1a39ab0..159329eda 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -518,6 +518,60 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { }) } +func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) { + pcLEStaging := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "le-staging", + Generation: 1, + }, + Spec: tsapi.ProxyClassSpec{ + UseLetsEncryptStagingEnvironment: true, + }, + } + + pcLEStagingFalse := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "le-staging-false", + Generation: 1, + }, + Spec: tsapi.ProxyClassSpec{ + UseLetsEncryptStagingEnvironment: false, + }, + } + + pcOther := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other", + Generation: 1, + }, + Spec: tsapi.ProxyClassSpec{}, + } + + return pcLEStaging, pcLEStagingFalse, pcOther +} + +func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name string) *tsapi.ProxyClass { + t.Helper() + pc := &tsapi.ProxyClass{} + if err := fc.Get(context.Background(), client.ObjectKey{Name: name}, pc); err != nil { + t.Fatal(err) + } + pc.Status = tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + ObservedGeneration: pc.Generation, + }}, + } + if err := fc.Status().Update(context.Background(), pc); err != nil { + t.Fatal(err) + } + return pc +} + func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { t.Helper() if r.ingressProxyGroups.Len() != wantIngress { @@ -541,6 +595,16 @@ func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue str t.Errorf("%s environment variable not found", name) } +func verifyEnvVarNotPresent(t *testing.T, sts *appsv1.StatefulSet, name string) { + t.Helper() + for _, env := range sts.Spec.Template.Spec.Containers[0].Env { + if env.Name == name { + t.Errorf("environment variable %s should not be present", name) + return + } + } +} + func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) { t.Helper() @@ -618,3 +682,146 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG }) } } + +func TestProxyGroupLetsEncryptStaging(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + // Set up test cases- most are shared with non-HA Ingress. + type proxyGroupLETestCase struct { + leStagingTestCase + pgType tsapi.ProxyGroupType + } + pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest() + sharedTestCases := testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther) + var tests []proxyGroupLETestCase + for _, tt := range sharedTestCases { + tests = append(tests, proxyGroupLETestCase{ + leStagingTestCase: tt, + pgType: tsapi.ProxyGroupTypeIngress, + }) + } + tests = append(tests, proxyGroupLETestCase{ + leStagingTestCase: leStagingTestCase{ + name: "egress_pg_with_staging_proxyclass", + proxyClassPerResource: "le-staging", + useLEStagingEndpoint: false, + }, + pgType: tsapi.ProxyGroupTypeEgress, + }) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + + // Pre-populate the fake client with ProxyClasses. + builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse, pcOther). + WithStatusSubresource(pcLEStaging, pcLEStagingFalse, pcOther) + + fc := builder.Build() + + // If the test case needs a ProxyClass to exist, ensure it is set to Ready. + if tt.proxyClassPerResource != "" || tt.defaultProxyClass != "" { + name := tt.proxyClassPerResource + if name == "" { + name = tt.defaultProxyClass + } + setProxyClassReady(t, fc, cl, name) + } + + // Create ProxyGroup + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tt.pgType, + Replicas: ptr.To[int32](1), + ProxyClass: tt.proxyClassPerResource, + }, + } + mustCreate(t, fc, pg) + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test"}, + defaultProxyClass: tt.defaultProxyClass, + Client: fc, + tsClient: &fakeTSClient{}, + l: zl.Sugar(), + clock: cl, + } + + expectReconciled(t, reconciler, "", pg.Name) + + // Verify that the StatefulSet created for ProxyGrup has + // the expected setting for the staging endpoint. + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + if tt.useLEStagingEndpoint { + verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) + } else { + verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL") + } + }) + } +} + +type leStagingTestCase struct { + name string + // ProxyClass set on ProxyGroup or Ingress resource. + proxyClassPerResource string + // Default ProxyClass. + defaultProxyClass string + useLEStagingEndpoint bool +} + +// Shared test cases for LE staging endpoint configuration for ProxyGroup and +// non-HA Ingress. +func testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther *tsapi.ProxyClass) []leStagingTestCase { + return []leStagingTestCase{ + { + name: "with_staging_proxyclass", + proxyClassPerResource: "le-staging", + useLEStagingEndpoint: true, + }, + { + name: "with_staging_proxyclass_false", + proxyClassPerResource: "le-staging-false", + useLEStagingEndpoint: false, + }, + { + name: "with_other_proxyclass", + proxyClassPerResource: "other", + useLEStagingEndpoint: false, + }, + { + name: "no_proxyclass", + proxyClassPerResource: "", + useLEStagingEndpoint: false, + }, + { + name: "with_default_staging_proxyclass", + proxyClassPerResource: "", + defaultProxyClass: "le-staging", + useLEStagingEndpoint: true, + }, + { + name: "with_default_other_proxyclass", + proxyClassPerResource: "", + defaultProxyClass: "other", + useLEStagingEndpoint: false, + }, + { + name: "with_default_staging_proxyclass_false", + proxyClassPerResource: "", + defaultProxyClass: "le-staging-false", + useLEStagingEndpoint: false, + }, + } +} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 6327a073b..7434ea79d 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -102,6 +102,8 @@ const ( envVarTSLocalAddrPort = "TS_LOCAL_ADDR_PORT" defaultLocalAddrPort = 9002 // metrics and health check port + + letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory" ) var ( @@ -783,6 +785,17 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, enableEndpoints(ss, metricsEnabled, debugEnabled) } } + if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) { + for i, c := range ss.Spec.Template.Spec.Containers { + if c.Name == "tailscale" { + ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ + Name: "TS_DEBUG_ACME_DIRECTORY_URL", + Value: letsEncryptStagingEndpoint, + }) + break + } + } + } if pc.Spec.StatefulSet == nil { return ss diff --git a/k8s-operator/api.md b/k8s-operator/api.md index fae25b1f6..190f99d24 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -517,6 +517,7 @@ _Appears in:_ | `statefulSet` _[StatefulSet](#statefulset)_ | Configuration parameters for the proxy's StatefulSet. Tailscale
        Kubernetes operator deploys a StatefulSet for each of the user
        configured proxies (Tailscale Ingress, Tailscale Service, Connector). | | | | `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported
        for egress proxies and for Ingress proxies that have been configured
        with tailscale.com/experimental-forward-cluster-traffic-via-ingress
        annotation. Note that the metrics are currently considered unstable
        and will likely change in breaking ways in the future - we only
        recommend that you use those for debugging purposes. | | | | `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific
        parameters of proxies. | | | +| `useLetsEncryptStagingEnvironment` _boolean_ | Set UseLetsEncryptStagingEnvironment to true to issue TLS
        certificates for any HTTPS endpoints exposed to the tailnet from
        LetsEncrypt's staging environment.
        https://letsencrypt.org/docs/staging-environment/
        This setting only affects Tailscale Ingress resources.
        By default Ingress TLS certificates are issued from LetsEncrypt's
        production environment.
        Changing this setting true -> false, will result in any
        existing certs being re-issued from the production environment.
        Changing this setting false (default) -> true, when certs have already
        been provisioned from production environment will NOT result in certs
        being re-issued from the staging environment before they need to be
        renewed. | | | #### ProxyClassStatus diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 549234fef..3fde0b37a 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -66,6 +66,21 @@ type ProxyClassSpec struct { // parameters of proxies. // +optional TailscaleConfig *TailscaleConfig `json:"tailscale,omitempty"` + // Set UseLetsEncryptStagingEnvironment to true to issue TLS + // certificates for any HTTPS endpoints exposed to the tailnet from + // LetsEncrypt's staging environment. + // https://letsencrypt.org/docs/staging-environment/ + // This setting only affects Tailscale Ingress resources. + // By default Ingress TLS certificates are issued from LetsEncrypt's + // production environment. + // Changing this setting true -> false, will result in any + // existing certs being re-issued from the production environment. + // Changing this setting false (default) -> true, when certs have already + // been provisioned from production environment will NOT result in certs + // being re-issued from the staging environment before they need to be + // renewed. + // +optional + UseLetsEncryptStagingEnvironment bool `json:"useLetsEncryptStagingEnvironment,omitempty"` } type TailscaleConfig struct { From 005e20a45ebe9d0ed2681c9bde23cb9e853489cc Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 21 Mar 2025 02:08:39 -0700 Subject: [PATCH 0622/1708] cmd/k8s-operator,internal/client/tailscale: use VIPService annotations for ownership tracking (#15356) Switch from using the Comment field to a ts-scoped annotation for tracking which operators are cooperating over ownership of a VIPService. Updates tailscale/corp#24795 Change-Id: I72d4a48685f85c0329aa068dc01a1a3c749017bf Signed-off-by: Tom Proctor --- cmd/k8s-operator/ingress-for-pg.go | 135 +++++++++++++---------- cmd/k8s-operator/ingress-for-pg_test.go | 26 +++-- internal/client/tailscale/vip_service.go | 2 + 3 files changed, 92 insertions(+), 71 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index dc74a86a5..5950a3db5 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -228,12 +228,11 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("error getting VIPService %q: %w", hostname, err) } } - // Generate the VIPService comment for new or existing VIPService. This - // checks and ensures that VIPService's owner references are updated for - // this Ingress and errors if that is not possible (i.e. because it - // appears that the VIPService has been created by a non-operator - // actor). - svcComment, err := r.ownerRefsComment(existingVIPSvc) + // Generate the VIPService owner annotation for new or existing VIPService. + // This checks and ensures that VIPService's owner references are updated + // for this Ingress and errors if that is not possible (i.e. because it + // appears that the VIPService has been created by a non-operator actor). + updatedAnnotations, err := r.ownerAnnotations(existingVIPSvc) if err != nil { const instr = "To proceed, you can either manually delete the existing VIPService or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr) @@ -313,11 +312,13 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin vipPorts = append(vipPorts, "80") } + const managedVIPServiceComment = "This VIPService is managed by the Tailscale Kubernetes Operator, do not modify" vipSvc := &tailscale.VIPService{ - Name: serviceName, - Tags: tags, - Ports: vipPorts, - Comment: svcComment, + Name: serviceName, + Tags: tags, + Ports: vipPorts, + Comment: managedVIPServiceComment, + Annotations: updatedAnnotations, } if existingVIPSvc != nil { vipSvc.Addrs = existingVIPSvc.Addrs @@ -328,7 +329,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) || - !strings.EqualFold(vipSvc.Comment, existingVIPSvc.Comment) { + !ownersAreSetAndEqual(vipSvc, existingVIPSvc) { logger.Infof("Ensuring VIPService exists and is up to date") if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { return false, fmt.Errorf("error creating VIPService: %w", err) @@ -669,34 +670,34 @@ func (r *HAIngressReconciler) cleanupVIPService(ctx context.Context, name tailcf if svc == nil { return false, nil } - c, err := parseComment(svc) + o, err := parseOwnerAnnotation(svc) if err != nil { - return false, fmt.Errorf("error parsing VIPService comment") + return false, fmt.Errorf("error parsing VIPService owner annotation") } - if c == nil || len(c.OwnerRefs) == 0 { + if o == nil || len(o.OwnerRefs) == 0 { return false, nil } // Comparing with the operatorID only means that we will not be able to // clean up VIPServices in cases where the operator was deleted from the // cluster before deleting the Ingress. Perhaps the comparison could be // 'if or.OperatorID === r.operatorID || or.ingressUID == r.ingressUID'. - ix := slices.IndexFunc(c.OwnerRefs, func(or OwnerRef) bool { + ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { return or.OperatorID == r.operatorID }) if ix == -1 { return false, nil } - if len(c.OwnerRefs) == 1 { + if len(o.OwnerRefs) == 1 { logger.Infof("Deleting VIPService %q", name) return false, r.tsClient.DeleteVIPService(ctx, name) } - c.OwnerRefs = slices.Delete(c.OwnerRefs, ix, ix+1) + o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) logger.Infof("Deleting VIPService %q", name) - json, err := json.Marshal(c) + json, err := json.Marshal(o) if err != nil { return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err) } - svc.Comment = string(json) + svc.Annotations[ownerAnnotation] = string(json) return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) } @@ -783,6 +784,15 @@ func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName return count, nil } +const ownerAnnotation = "tailscale.com/owner-references" + +// ownerAnnotationValue is the content of the VIPService.Annotation[ownerAnnotation] field. +type ownerAnnotationValue struct { + // OwnerRefs is a list of owner references that identify all operator + // instances that manage this VIPService. + OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"` +} + // OwnerRef is an owner reference that uniquely identifies a Tailscale // Kubernetes operator instance. type OwnerRef struct { @@ -790,48 +800,67 @@ type OwnerRef struct { OperatorID string `json:"operatorID,omitempty"` } -// comment is the content of the VIPService.Comment field. -type comment struct { - // OwnerRefs is a list of owner references that identify all operator - // instances that manage this VIPService. - OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"` -} - -// ownerRefsComment return VIPService Comment that includes owner reference for this -// operator instance for the provided VIPService. If the VIPService is nil, a -// new comment with owner ref is returned. If the VIPService is not nil, the -// existing comment is returned with the owner reference added, if not already -// present. If the VIPService is not nil, but does not contain a comment we -// return an error as this likely means that the VIPService was created by -// somthing other than a Tailscale Kubernetes operator. -func (r *HAIngressReconciler) ownerRefsComment(svc *tailscale.VIPService) (string, error) { +// ownerAnnotations returns the updated annotations required to ensure this +// instance of the operator is included as an owner. If the VIPService is not +// nil, but does not contain an owner we return an error as this likely means +// that the VIPService was created by somthing other than a Tailscale +// Kubernetes operator. +func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { ref := OwnerRef{ OperatorID: r.operatorID, } if svc == nil { - c := &comment{OwnerRefs: []OwnerRef{ref}} + c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} json, err := json.Marshal(c) if err != nil { - return "", fmt.Errorf("[unexpected] unable to marshal VIPService comment contents: %w, please report this", err) + return nil, fmt.Errorf("[unexpected] unable to marshal VIPService owner annotation contents: %w, please report this", err) } - return string(json), nil + return map[string]string{ + ownerAnnotation: string(json), + }, nil } - c, err := parseComment(svc) + o, err := parseOwnerAnnotation(svc) if err != nil { - return "", fmt.Errorf("error parsing existing VIPService comment: %w", err) + return nil, err } - if c == nil || len(c.OwnerRefs) == 0 { - return "", fmt.Errorf("VIPService %s exists, but does not contain Comment field with owner references- not proceeding as this is likely a resource created by something other than a Tailscale Kubernetes Operator", svc.Name) + if o == nil || len(o.OwnerRefs) == 0 { + return nil, fmt.Errorf("VIPService %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) } - if slices.Contains(c.OwnerRefs, ref) { // up to date - return svc.Comment, nil + if slices.Contains(o.OwnerRefs, ref) { // up to date + return svc.Annotations, nil } - c.OwnerRefs = append(c.OwnerRefs, ref) - json, err := json.Marshal(c) + o.OwnerRefs = append(o.OwnerRefs, ref) + json, err := json.Marshal(o) if err != nil { - return "", fmt.Errorf("error marshalling updated owner references: %w", err) + return nil, fmt.Errorf("error marshalling updated owner references: %w", err) + } + + newAnnots := make(map[string]string, len(svc.Annotations)+1) + for k, v := range svc.Annotations { + newAnnots[k] = v } - return string(json), nil + newAnnots[ownerAnnotation] = string(json) + return newAnnots, nil +} + +// parseOwnerAnnotation returns nil if no valid owner found. +func parseOwnerAnnotation(vipSvc *tailscale.VIPService) (*ownerAnnotationValue, error) { + if vipSvc.Annotations == nil || vipSvc.Annotations[ownerAnnotation] == "" { + return nil, nil + } + o := &ownerAnnotationValue{} + if err := json.Unmarshal([]byte(vipSvc.Annotations[ownerAnnotation]), o); err != nil { + return nil, fmt.Errorf("error parsing VIPService %s annotation %q: %w", ownerAnnotation, vipSvc.Annotations[ownerAnnotation], err) + } + return o, nil +} + +func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool { + return a != nil && b != nil && + a.Annotations != nil && b.Annotations != nil && + a.Annotations[ownerAnnotation] != "" && + b.Annotations[ownerAnnotation] != "" && + strings.EqualFold(a.Annotations[ownerAnnotation], b.Annotations[ownerAnnotation]) } // ensureCertResources ensures that the TLS Secret for an HA Ingress and RBAC @@ -877,18 +906,6 @@ func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName s return nil } -// parseComment returns VIPService comment or nil if none found or not matching the expected format. -func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { - if vipSvc.Comment == "" { - return nil, nil - } - c := &comment{} - if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { - return nil, fmt.Errorf("error parsing VIPService Comment field %q: %w", vipSvc.Comment, err) - } - return c, nil -} - // requeueInterval returns a time duration between 5 and 10 minutes, which is // the period of time after which an HA Ingress, whose VIPService has been newly // created or changed, needs to be requeued. This is to protect against diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 5716c0bbf..705d157cc 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -745,8 +745,10 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { // Simulate existing VIPService from another cluster existingVIPSvc := &tailscale.VIPService{ - Name: "svc:my-svc", - Comment: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, + Name: "svc:my-svc", + Annotations: map[string]string{ + ownerAnnotation: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, + }, } ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{ "svc:my-svc": existingVIPSvc, @@ -763,17 +765,17 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { t.Fatal("VIPService not found") } - c := &comment{} - if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { - t.Fatalf("parsing comment: %v", err) + o, err := parseOwnerAnnotation(vipSvc) + if err != nil { + t.Fatalf("parsing owner annotation: %v", err) } wantOwnerRefs := []OwnerRef{ {OperatorID: "operator-2"}, {OperatorID: "operator-1"}, } - if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) { - t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs) + if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) } // Delete the Ingress and verify VIPService still exists with one owner ref @@ -790,15 +792,15 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { t.Fatal("VIPService was incorrectly deleted") } - c = &comment{} - if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { - t.Fatalf("parsing comment after deletion: %v", err) + o, err = parseOwnerAnnotation(vipSvc) + if err != nil { + t.Fatalf("parsing owner annotation: %v", err) } wantOwnerRefs = []OwnerRef{ {OperatorID: "operator-2"}, } - if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) { - t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs) + if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) } } diff --git a/internal/client/tailscale/vip_service.go b/internal/client/tailscale/vip_service.go index 958192c4d..64fcfdf5e 100644 --- a/internal/client/tailscale/vip_service.go +++ b/internal/client/tailscale/vip_service.go @@ -27,6 +27,8 @@ type VIPService struct { Addrs []string `json:"addrs,omitempty"` // Comment is an optional text string for display in the admin panel. Comment string `json:"comment,omitempty"` + // Annotations are optional key-value pairs that can be used to store arbitrary metadata. + Annotations map[string]string `json:"annotations,omitempty"` // Ports are the ports of a VIPService that will be configured via Tailscale serve config. // If set, any node wishing to advertise this VIPService must have this port configured via Tailscale serve. Ports []string `json:"ports,omitempty"` From 5668de272c4e0fb5fda061564a297c7d530193bc Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 21 Mar 2025 11:56:48 +0000 Subject: [PATCH 0623/1708] tsnet: use test logger for testcontrol and node logs Updates #cleanup Signed-off-by: James Sanderson --- tsnet/tsnet_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 4b73707c9..d00628453 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -120,6 +120,7 @@ func startControl(t *testing.T) (controlURL string, control *testcontrol.Server) Proxied: true, }, MagicDNSDomain: "tail-scale.ts.net", + Logf: t.Logf, } control.HTTPTestServer = httptest.NewUnstartedServer(control) control.HTTPTestServer.Start() @@ -221,7 +222,7 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) getCertForTesting: testCertRoot.getCert, } if *verboseNodes { - s.Logf = log.Printf + s.Logf = t.Logf } t.Cleanup(func() { s.Close() }) From c261fb198f1d3ee0c1bbe9db20ceef441f5be0e8 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 21 Mar 2025 12:00:01 +0000 Subject: [PATCH 0624/1708] tstest: make it clearer where AwaitRunning failed and why Signed-off-by: James Sanderson --- tstest/integration/integration_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 770abd506..81a1cd9dc 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1942,6 +1942,8 @@ func (n *testNode) AwaitIP6() netip.Addr { // AwaitRunning waits for n to reach the IPN state "Running". func (n *testNode) AwaitRunning() { + t := n.env.t + t.Helper() n.AwaitBackendState("Running") } @@ -2015,7 +2017,7 @@ func (n *testNode) Status() (*ipnstate.Status, error) { } st := new(ipnstate.Status) if err := json.Unmarshal(out, st); err != nil { - return nil, fmt.Errorf("decoding tailscale status JSON: %w", err) + return nil, fmt.Errorf("decoding tailscale status JSON: %w\njson:\n%s", err, out) } return st, nil } From e1078686b3d821be75e0b827b9015cb5761a6f92 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 21 Mar 2025 10:36:18 -0700 Subject: [PATCH 0625/1708] safesocket: respect context timeout when sleeping for 250ms in retry loop Noticed while working on a dev tool that uses local.Client. Updates #cleanup Change-Id: I981efff74a5cac5f515755913668bd0508a4aa14 Signed-off-by: Brad Fitzpatrick --- safesocket/safesocket.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index 991fddf5f..721b694dc 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -61,7 +61,11 @@ func ConnectContext(ctx context.Context, path string) (net.Conn, error) { if ctx.Err() != nil { return nil, ctx.Err() } - time.Sleep(250 * time.Millisecond) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(250 * time.Millisecond): + } continue } return c, err From 6bbf98bef457b4403f27da79eb1861e6197ab539 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 21 Mar 2025 14:46:02 -0700 Subject: [PATCH 0626/1708] all: skip looking for package comments in .git/ repository (#15384) --- pkgdoc_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkgdoc_test.go b/pkgdoc_test.go index be08a358b..0f4a45528 100644 --- a/pkgdoc_test.go +++ b/pkgdoc_test.go @@ -26,6 +26,9 @@ func TestPackageDocs(t *testing.T) { if err != nil { return err } + if fi.Mode().IsDir() && path == ".git" { + return filepath.SkipDir // No documentation lives in .git + } if fi.Mode().IsRegular() && strings.HasSuffix(path, ".go") { if strings.HasSuffix(path, "_test.go") { return nil From d0c50c60720b1ef8569bc6f9becf3ab849f9197a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 23 Mar 2025 15:39:54 -0700 Subject: [PATCH 0627/1708] clientupdate: cache CanAutoUpdate, avoid log spam when false I noticed logs on one of my machines where it can't auto-update with scary log spam about "failed to apply tailnet-wide default for auto-updates". This avoids trying to do the EditPrefs if we know it's just going to fail anyway. Updates #282 Change-Id: Ib7db3b122185faa70efe08b60ebd05a6094eed8c Signed-off-by: Brad Fitzpatrick --- clientupdate/clientupdate.go | 7 ++++++- ipn/ipnlocal/local.go | 26 ++++++++++++++------------ 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index c5baeb8e9..ffd3fb03b 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -28,6 +28,7 @@ import ( "strings" "tailscale.com/hostinfo" + "tailscale.com/types/lazy" "tailscale.com/types/logger" "tailscale.com/util/cmpver" "tailscale.com/version" @@ -249,9 +250,13 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { return nil, false } +var canAutoUpdateCache lazy.SyncValue[bool] + // CanAutoUpdate reports whether auto-updating via the clientupdate package // is supported for the current os/distro. -func CanAutoUpdate() bool { +func CanAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } + +func canAutoUpdateUncached() bool { if version.IsMacSysExt() { // Macsys uses Sparkle for auto-updates, which doesn't have an update // function in this package. diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 622283acb..0a0b2280d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3479,18 +3479,20 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { // can still manually enable auto-updates on this node. return } - b.logf("using tailnet default auto-update setting: %v", au) - prefsClone := prefs.AsStruct() - prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, - AutoUpdateSet: ipn.AutoUpdatePrefsMask{ - ApplySet: true, - }, - }, unlock) - if err != nil { - b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) - return + if clientupdate.CanAutoUpdate() { + b.logf("using tailnet default auto-update setting: %v", au) + prefsClone := prefs.AsStruct() + prefsClone.AutoUpdate.Apply = opt.NewBool(au) + _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ + Prefs: *prefsClone, + AutoUpdateSet: ipn.AutoUpdatePrefsMask{ + ApplySet: true, + }, + }, unlock) + if err != nil { + b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) + return + } } } From 5c0e08fbbd0982db61cc0baab8abfafbe22734cd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 26 Feb 2025 15:42:24 -0800 Subject: [PATCH 0628/1708] tstest/mts: add multiple-tailscaled development tool To let you easily run multiple tailscaled instances for development and let you route CLI commands to the right one. Updates #15145 Change-Id: I06b6a7bf024f341c204f30705b4c3068ac89b1a2 Signed-off-by: Brad Fitzpatrick --- tstest/mts/mts.go | 599 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 599 insertions(+) create mode 100644 tstest/mts/mts.go diff --git a/tstest/mts/mts.go b/tstest/mts/mts.go new file mode 100644 index 000000000..c10d69d8d --- /dev/null +++ b/tstest/mts/mts.go @@ -0,0 +1,599 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux || darwin + +// The mts ("Multiple Tailscale") command runs multiple tailscaled instances for +// development, managing their directories and sockets, and lets you easily direct +// tailscale CLI commands to them. +package main + +import ( + "bufio" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "log" + "maps" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strings" + "sync" + "syscall" + "time" + + "tailscale.com/client/local" + "tailscale.com/types/bools" + "tailscale.com/types/lazy" + "tailscale.com/util/mak" +) + +func usage(args ...any) { + var format string + if len(args) > 0 { + format, args = args[0].(string), args[1:] + } + if format != "" { + format = strings.TrimSpace(format) + "\n\n" + fmt.Fprintf(os.Stderr, format, args...) + } + io.WriteString(os.Stderr, strings.TrimSpace(` +usage: + + mts server # manage tailscaled instances + mts server run # run the mts server (parent process of all tailscaled) + mts server list # list all tailscaled and their state + mts server list # show details of named instance + mts server add # add+start new named tailscaled + mts server start # start a previously added tailscaled + mts server stop # stop & remove a named tailscaled + mts server rm # stop & remove a named tailscaled + mts server logs [-f] # get/follow tailscaled logs + + mts [tailscale CLI args] # run Tailscale CLI against a named instance + e.g. + mts gmail1 up + mts github2 status --json + `)+"\n") + os.Exit(1) +} + +func main() { + // Don't use flag.Parse here; we mostly just delegate through + // to the Tailscale CLI. + + if len(os.Args) < 2 { + usage() + } + firstArg, args := os.Args[1], os.Args[2:] + if firstArg == "server" || firstArg == "s" { + if err := runMTSServer(args); err != nil { + log.Fatal(err) + } + } else { + var c Client + inst := firstArg + c.RunCommand(inst, args) + } +} + +func runMTSServer(args []string) error { + if len(args) == 0 { + usage() + } + cmd, args := args[0], args[1:] + if cmd == "run" { + var s Server + return s.Run() + } + + // Commands other than "run" all use the HTTP client to + // hit the mts server over its unix socket. + var c Client + + switch cmd { + default: + usage("unknown mts server subcommand %q", cmd) + case "list", "ls": + list, err := c.List() + if err != nil { + return err + } + if len(args) == 0 { + names := slices.Sorted(maps.Keys(list.Instances)) + for _, name := range names { + running := list.Instances[name].Running + fmt.Printf("%10s %s\n", bools.IfElse(running, "RUNNING", "stopped"), name) + } + } else { + for _, name := range args { + inst, ok := list.Instances[name] + if !ok { + return fmt.Errorf("no instance named %q", name) + } + je := json.NewEncoder(os.Stdout) + je.SetIndent("", " ") + if err := je.Encode(inst); err != nil { + return err + } + } + } + + case "rm": + if len(args) == 0 { + return fmt.Errorf("missing instance name(s) to remove") + } + log.SetFlags(0) + for _, name := range args { + ok, err := c.Remove(name) + if err != nil { + return err + } + if ok { + log.Printf("%s deleted.", name) + } else { + log.Printf("%s didn't exist.", name) + } + } + case "stop": + if len(args) == 0 { + return fmt.Errorf("missing instance name(s) to stop") + } + log.SetFlags(0) + for _, name := range args { + ok, err := c.Stop(name) + if err != nil { + return err + } + if ok { + log.Printf("%s stopped.", name) + } else { + log.Printf("%s didn't exist.", name) + } + } + case "start", "restart": + list, err := c.List() + if err != nil { + return err + } + shouldStop := cmd == "restart" + for _, arg := range args { + is, ok := list.Instances[arg] + if !ok { + return fmt.Errorf("no instance named %q", arg) + } + if is.Running { + if shouldStop { + if _, err := c.Stop(arg); err != nil { + return fmt.Errorf("stopping %q: %w", arg, err) + } + } else { + log.SetFlags(0) + log.Printf("%s already running.", arg) + continue + } + } + // Creating an existing one starts it up. + if err := c.Create(arg); err != nil { + return fmt.Errorf("starting %q: %w", arg, err) + } + } + case "add": + if len(args) == 0 { + return fmt.Errorf("missing instance name(s) to add") + } + for _, name := range args { + if err := c.Create(name); err != nil { + return fmt.Errorf("creating %q: %w", name, err) + } + } + case "logs": + fs := flag.NewFlagSet("logs", flag.ExitOnError) + fs.Usage = func() { usage() } + follow := fs.Bool("f", false, "follow logs") + fs.Parse(args) + log.Printf("Parsed; following=%v, args=%q", *follow, fs.Args()) + if fs.NArg() != 1 { + usage() + } + cmd := bools.IfElse(*follow, "tail", "cat") + args := []string{cmd} + if *follow { + args = append(args, "-f") + } + path, err := exec.LookPath(cmd) + if err != nil { + return fmt.Errorf("looking up %q: %w", cmd, err) + } + args = append(args, instLogsFile(fs.Arg(0))) + log.Fatal(syscall.Exec(path, args, os.Environ())) + } + return nil +} + +type Client struct { +} + +func (c *Client) client() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + return net.Dial("unix", mtsSock()) + }, + }, + } +} + +func getJSON[T any](res *http.Response, err error) (T, error) { + var ret T + if err != nil { + return ret, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return ret, fmt.Errorf("unexpected status: %v: %s", res.Status, body) + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return ret, err + } + return ret, nil +} + +func (c *Client) List() (listResponse, error) { + return getJSON[listResponse](c.client().Get("http://mts/list")) +} + +func (c *Client) Remove(name string) (found bool, err error) { + return getJSON[bool](c.client().PostForm("http://mts/rm", url.Values{ + "name": []string{name}, + })) +} + +func (c *Client) Stop(name string) (found bool, err error) { + return getJSON[bool](c.client().PostForm("http://mts/stop", url.Values{ + "name": []string{name}, + })) +} + +func (c *Client) Create(name string) error { + req, err := http.NewRequest("POST", "http://mts/create/"+name, nil) + if err != nil { + return err + } + resp, err := c.client().Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status: %v: %s", resp.Status, body) + } + return nil +} + +func (c *Client) RunCommand(name string, args []string) { + sock := instSock(name) + lc := &local.Client{ + Socket: sock, + UseSocketOnly: true, + } + probeCtx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) + defer cancel() + if _, err := lc.StatusWithoutPeers(probeCtx); err != nil { + log.Fatalf("instance %q not running? start with 'mts server start %q'; got error: %v", name, name, err) + } + args = append([]string{"run", "tailscale.com/cmd/tailscale", "--socket=" + sock}, args...) + cmd := exec.Command("go", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + err := cmd.Run() + if err == nil { + os.Exit(0) + } + if exitErr, ok := err.(*exec.ExitError); ok { + os.Exit(exitErr.ExitCode()) + } + panic(err) +} + +type Server struct { + lazyTailscaled lazy.GValue[string] + + mu sync.Mutex + cmds map[string]*exec.Cmd // running tailscaled instances +} + +func (s *Server) tailscaled() string { + v, err := s.lazyTailscaled.GetErr(func() (string, error) { + out, err := exec.Command("go", "list", "-f", "{{.Target}}", "tailscale.com/cmd/tailscaled").CombinedOutput() + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil + }) + if err != nil { + panic(err) + } + return v +} + +func (s *Server) Run() error { + if err := os.MkdirAll(mtsRoot(), 0700); err != nil { + return err + } + sock := mtsSock() + os.Remove(sock) + log.Printf("Multi-Tailscaled Server running; listening on %q ...", sock) + ln, err := net.Listen("unix", sock) + if err != nil { + return err + } + return http.Serve(ln, s) +} + +var validNameRx = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +func validInstanceName(name string) bool { + return validNameRx.MatchString(name) +} + +func (s *Server) InstanceRunning(name string) bool { + s.mu.Lock() + defer s.mu.Unlock() + _, ok := s.cmds[name] + return ok +} + +func (s *Server) Stop(name string) { + s.mu.Lock() + defer s.mu.Unlock() + if cmd, ok := s.cmds[name]; ok { + if err := cmd.Process.Kill(); err != nil { + log.Printf("error killing %q: %v", name, err) + } + delete(s.cmds, name) + } +} + +func (s *Server) RunInstance(name string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.cmds[name]; ok { + return fmt.Errorf("instance %q already running", name) + } + + if !validInstanceName(name) { + return fmt.Errorf("invalid instance name %q", name) + } + dir := filepath.Join(mtsRoot(), name) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + + env := os.Environ() + env = append(env, "TS_DEBUG_LOG_RATE=all") + if ef, err := os.Open(instEnvFile(name)); err == nil { + defer ef.Close() + sc := bufio.NewScanner(ef) + for sc.Scan() { + t := strings.TrimSpace(sc.Text()) + if strings.HasPrefix(t, "#") || !strings.Contains(t, "=") { + continue + } + env = append(env, t) + } + } else if os.IsNotExist(err) { + // Write an example one. + os.WriteFile(instEnvFile(name), fmt.Appendf(nil, "# Example mts env.txt file; uncomment/add stuff you want for %q\n\n#TS_DEBUG_MAP=1\n#TS_DEBUG_REGISTER=1\n#TS_NO_LOGS_NO_SUPPORT=1\n", name), 0600) + } + + extraArgs := []string{"--verbose=1"} + if af, err := os.Open(instArgsFile(name)); err == nil { + extraArgs = nil // clear default args + defer af.Close() + sc := bufio.NewScanner(af) + for sc.Scan() { + t := strings.TrimSpace(sc.Text()) + if strings.HasPrefix(t, "#") || t == "" { + continue + } + extraArgs = append(extraArgs, t) + } + } else if os.IsNotExist(err) { + // Write an example one. + os.WriteFile(instArgsFile(name), fmt.Appendf(nil, "# Example mts args.txt file for instance %q.\n# One line per extra arg to tailscaled; no magic string quoting\n\n--verbose=1\n#--socks5-server=127.0.0.1:5000\n", name), 0600) + } + + log.Printf("Running Tailscale daemon %q in %q", name, dir) + + args := []string{ + "--tun=userspace-networking", + "--statedir=" + filepath.Join(dir), + "--socket=" + filepath.Join(dir, "tailscaled.sock"), + } + args = append(args, extraArgs...) + + cmd := exec.Command(s.tailscaled(), args...) + cmd.Dir = dir + cmd.Env = env + + out, err := cmd.StdoutPipe() + if err != nil { + return err + } + cmd.Stderr = cmd.Stdout + + logs := instLogsFile(name) + logFile, err := os.OpenFile(logs, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("opening logs file: %w", err) + } + + go func() { + bs := bufio.NewScanner(out) + for bs.Scan() { + // TODO(bradfitz): record in memory too, serve via HTTP + line := strings.TrimSpace(bs.Text()) + fmt.Fprintf(logFile, "%s\n", line) + fmt.Printf("tailscaled[%s]: %s\n", name, line) + } + }() + + if err := cmd.Start(); err != nil { + return err + } + go func() { + err := cmd.Wait() + logFile.Close() + log.Printf("Tailscale daemon %q exited: %v", name, err) + s.mu.Lock() + defer s.mu.Unlock() + delete(s.cmds, name) + }() + + mak.Set(&s.cmds, name, cmd) + return nil +} + +type listResponse struct { + // Instances maps instance name to its details. + Instances map[string]listResponseInstance `json:"instances"` +} + +type listResponseInstance struct { + Name string `json:"name"` + Dir string `json:"dir"` + Sock string `json:"sock"` + Running bool `json:"running"` + Env string `json:"env"` + Args string `json:"args"` + Logs string `json:"logs"` +} + +func writeJSON(w http.ResponseWriter, v any) { + w.Header().Set("Content-Type", "application/json") + e := json.NewEncoder(w) + e.SetIndent("", " ") + e.Encode(v) +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/list" { + var res listResponse + for _, name := range s.InstanceNames() { + mak.Set(&res.Instances, name, listResponseInstance{ + Name: name, + Dir: instDir(name), + Sock: instSock(name), + Running: s.InstanceRunning(name), + Env: instEnvFile(name), + Args: instArgsFile(name), + Logs: instLogsFile(name), + }) + } + writeJSON(w, res) + return + } + if r.URL.Path == "/rm" || r.URL.Path == "/stop" { + shouldRemove := r.URL.Path == "/rm" + if r.Method != "POST" { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + target := r.FormValue("name") + var ok bool + for _, name := range s.InstanceNames() { + if name != target { + continue + } + ok = true + s.Stop(name) + if shouldRemove { + if err := os.RemoveAll(instDir(name)); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + break + } + writeJSON(w, ok) + return + } + if inst, ok := strings.CutPrefix(r.URL.Path, "/create/"); ok { + if !s.InstanceRunning(inst) { + if err := s.RunInstance(inst); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + fmt.Fprintf(w, "OK\n") + return + } + if r.URL.Path == "/" { + fmt.Fprintf(w, "This is mts, the multi-tailscaled server.\n") + return + } + http.NotFound(w, r) +} + +func (s *Server) InstanceNames() []string { + var ret []string + des, err := os.ReadDir(mtsRoot()) + if err != nil { + if os.IsNotExist(err) { + return nil + } + panic(err) + } + for _, de := range des { + if !de.IsDir() { + continue + } + ret = append(ret, de.Name()) + } + return ret +} + +func mtsRoot() string { + dir, err := os.UserConfigDir() + if err != nil { + panic(err) + } + return filepath.Join(dir, "multi-tailscale-dev") +} + +func instDir(name string) string { + return filepath.Join(mtsRoot(), name) +} + +func instSock(name string) string { + return filepath.Join(instDir(name), "tailscaled.sock") +} + +func instEnvFile(name string) string { + return filepath.Join(mtsRoot(), name, "env.txt") +} + +func instArgsFile(name string) string { + return filepath.Join(mtsRoot(), name, "args.txt") +} + +func instLogsFile(name string) string { + return filepath.Join(mtsRoot(), name, "logs.txt") +} + +func mtsSock() string { + return filepath.Join(mtsRoot(), "mts.sock") +} From 156cd53e7734407dc42e30af2f12cf6956cd9e24 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 24 Mar 2025 10:15:37 -0700 Subject: [PATCH 0629/1708] net/netmon: unexport GetState Baby step towards #15408. Updates #15408 Change-Id: I11fca6e677af2ad2f065d83aa0d83550143bff29 Signed-off-by: Brad Fitzpatrick --- net/netmon/interfaces_test.go | 2 +- net/netmon/netmon.go | 2 +- net/netmon/state.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/netmon/interfaces_test.go b/net/netmon/interfaces_test.go index edd4f6d6e..5fec274ca 100644 --- a/net/netmon/interfaces_test.go +++ b/net/netmon/interfaces_test.go @@ -13,7 +13,7 @@ import ( ) func TestGetState(t *testing.T) { - st, err := GetState() + st, err := getState() if err != nil { t.Fatal(err) } diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index 47b540d6a..b58356d33 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -161,7 +161,7 @@ func (m *Monitor) InterfaceState() *State { } func (m *Monitor) interfaceStateUncached() (*State, error) { - return GetState() + return getState() } // SetTailscaleInterfaceName sets the name of the Tailscale interface. For diff --git a/net/netmon/state.go b/net/netmon/state.go index a612dd06d..b6a6b2ab8 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -466,7 +466,7 @@ var getPAC func() string // It does not set the returned State.IsExpensive. The caller can populate that. // // Deprecated: use netmon.Monitor.InterfaceState instead. -func GetState() (*State, error) { +func getState() (*State, error) { s := &State{ InterfaceIPs: make(map[string][]netip.Prefix), Interface: make(map[string]Interface), From 14db99241f4f4191776ec22b4ff02f4563087b34 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 24 Mar 2025 10:57:57 -0700 Subject: [PATCH 0630/1708] net/netmon: use Monitor's tsIfName if set by SetTailscaleInterfaceName Currently nobody calls SetTailscaleInterfaceName yet, so this is a no-op. I checked oss, android, and the macOS/iOS client. Nobody calls this, or ever did. But I want to in the future. Updates #15408 Updates #9040 Change-Id: I05dfabe505174f9067b929e91c6e0d8bc42628d7 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 6 +++--- ipn/ipnlocal/peerapi.go | 2 +- net/netmon/interfaces_test.go | 2 +- net/netmon/netmon.go | 2 +- net/netmon/state.go | 14 ++++++++------ 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0a0b2280d..1f9f7e8b2 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4968,7 +4968,7 @@ func (b *LocalBackend) authReconfig() { return } - oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.ControlKnobs(), version.OS()) + oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.NetMon.Get(), b.sys.ControlKnobs(), version.OS()) rcfg := b.routerConfig(cfg, prefs, oneCGNATRoute) err = b.e.Reconfig(cfg, rcfg, dcfg) @@ -4992,7 +4992,7 @@ func (b *LocalBackend) authReconfig() { // // The versionOS is a Tailscale-style version ("iOS", "macOS") and not // a runtime.GOOS. -func shouldUseOneCGNATRoute(logf logger.Logf, controlKnobs *controlknobs.Knobs, versionOS string) bool { +func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs *controlknobs.Knobs, versionOS string) bool { if controlKnobs != nil { // Explicit enabling or disabling always take precedence. if v, ok := controlKnobs.OneCGNAT.Load().Get(); ok { @@ -5007,7 +5007,7 @@ func shouldUseOneCGNATRoute(logf logger.Logf, controlKnobs *controlknobs.Knobs, // use fine-grained routes if another interfaces is also using the CGNAT // IP range. if versionOS == "macOS" { - hasCGNATInterface, err := netmon.HasCGNATInterface() + hasCGNATInterface, err := mon.HasCGNATInterface() if err != nil { logf("shouldUseOneCGNATRoute: Could not determine if any interfaces use CGNAT: %v", err) return false diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index ab2093c13..f20ea7524 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -481,7 +481,7 @@ func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Re fmt.Fprintf(w, "

        Could not get the default route: %s

        \n", html.EscapeString(err.Error())) } - if hasCGNATInterface, err := netmon.HasCGNATInterface(); hasCGNATInterface { + if hasCGNATInterface, err := h.ps.b.sys.NetMon.Get().HasCGNATInterface(); hasCGNATInterface { fmt.Fprintln(w, "

        There is another interface using the CGNAT range.

        ") } else if err != nil { fmt.Fprintf(w, "

        Could not check for CGNAT interfaces: %s

        \n", html.EscapeString(err.Error())) diff --git a/net/netmon/interfaces_test.go b/net/netmon/interfaces_test.go index 5fec274ca..e4274819f 100644 --- a/net/netmon/interfaces_test.go +++ b/net/netmon/interfaces_test.go @@ -13,7 +13,7 @@ import ( ) func TestGetState(t *testing.T) { - st, err := getState() + st, err := getState("") if err != nil { t.Fatal(err) } diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index b58356d33..bd62ab270 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -161,7 +161,7 @@ func (m *Monitor) InterfaceState() *State { } func (m *Monitor) interfaceStateUncached() (*State, error) { - return getState() + return getState(m.tsIfName) } // SetTailscaleInterfaceName sets the name of the Tailscale interface. For diff --git a/net/netmon/state.go b/net/netmon/state.go index b6a6b2ab8..bd0960768 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -461,21 +461,22 @@ func isTailscaleInterface(name string, ips []netip.Prefix) bool { // getPAC, if non-nil, returns the current PAC file URL. var getPAC func() string -// GetState returns the state of all the current machine's network interfaces. +// getState returns the state of all the current machine's network interfaces. // // It does not set the returned State.IsExpensive. The caller can populate that. // -// Deprecated: use netmon.Monitor.InterfaceState instead. -func getState() (*State, error) { +// optTSInterfaceName is the name of the Tailscale interface, if known. +func getState(optTSInterfaceName string) (*State, error) { s := &State{ InterfaceIPs: make(map[string][]netip.Prefix), Interface: make(map[string]Interface), } if err := ForeachInterface(func(ni Interface, pfxs []netip.Prefix) { + isTSInterfaceName := optTSInterfaceName != "" && ni.Name == optTSInterfaceName ifUp := ni.IsUp() s.Interface[ni.Name] = ni s.InterfaceIPs[ni.Name] = append(s.InterfaceIPs[ni.Name], pfxs...) - if !ifUp || isTailscaleInterface(ni.Name, pfxs) { + if !ifUp || isTSInterfaceName || isTailscaleInterface(ni.Name, pfxs) { return } for _, pfx := range pfxs { @@ -755,11 +756,12 @@ func DefaultRoute() (DefaultRouteDetails, error) { // HasCGNATInterface reports whether there are any non-Tailscale interfaces that // use a CGNAT IP range. -func HasCGNATInterface() (bool, error) { +func (m *Monitor) HasCGNATInterface() (bool, error) { hasCGNATInterface := false cgnatRange := tsaddr.CGNATRange() err := ForeachInterface(func(i Interface, pfxs []netip.Prefix) { - if hasCGNATInterface || !i.IsUp() || isTailscaleInterface(i.Name, pfxs) { + isTSInterfaceName := m.tsIfName != "" && i.Name == m.tsIfName + if hasCGNATInterface || !i.IsUp() || isTSInterfaceName || isTailscaleInterface(i.Name, pfxs) { return } for _, pfx := range pfxs { From b3455fa99a5e8d07133d5140017ec7c49f032a07 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 20 Mar 2025 11:12:20 -0700 Subject: [PATCH 0631/1708] cmd/natc: add some initial unit test coverage These tests aren't perfect, nor is this complete coverage, but this is a set of coverage that is at least stable. Updates #15367 Signed-off-by: James Tucker --- cmd/natc/natc_test.go | 365 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 365 insertions(+) create mode 100644 cmd/natc/natc_test.go diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go new file mode 100644 index 000000000..1b6d7af7c --- /dev/null +++ b/cmd/natc/natc_test.go @@ -0,0 +1,365 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "net/netip" + "slices" + "testing" + + "github.com/gaissmai/bart" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/tailcfg" +) + +func prefixEqual(a, b netip.Prefix) bool { + return a.Bits() == b.Bits() && a.Addr() == b.Addr() +} + +func TestULA(t *testing.T) { + tests := []struct { + name string + siteID uint16 + expected string + }{ + {"zero", 0, "fd7a:115c:a1e0:a99c:0000::/80"}, + {"one", 1, "fd7a:115c:a1e0:a99c:0001::/80"}, + {"max", 65535, "fd7a:115c:a1e0:a99c:ffff::/80"}, + {"random", 12345, "fd7a:115c:a1e0:a99c:3039::/80"}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := ula(tc.siteID) + expected := netip.MustParsePrefix(tc.expected) + if !prefixEqual(got, expected) { + t.Errorf("ula(%d) = %s; want %s", tc.siteID, got, expected) + } + }) + } +} + +func TestRandV4(t *testing.T) { + pfx := netip.MustParsePrefix("100.64.1.0/24") + + for i := 0; i < 512; i++ { + ip := randV4(pfx) + if !pfx.Contains(ip) { + t.Errorf("randV4(%s) = %s; not contained in prefix", pfx, ip) + } + } +} + +func TestDNSResponse(t *testing.T) { + tests := []struct { + name string + questions []dnsmessage.Question + addrs []netip.Addr + wantEmpty bool + wantAnswers []struct { + name string + qType dnsmessage.Type + addr netip.Addr + } + }{ + { + name: "empty_request", + questions: []dnsmessage.Question{}, + addrs: []netip.Addr{}, + wantEmpty: false, + wantAnswers: nil, + }, + { + name: "a_record", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + addrs: []netip.Addr{netip.MustParseAddr("100.64.1.5")}, + wantAnswers: []struct { + name string + qType dnsmessage.Type + addr netip.Addr + }{ + { + name: "example.com.", + qType: dnsmessage.TypeA, + addr: netip.MustParseAddr("100.64.1.5"), + }, + }, + }, + { + name: "aaaa_record", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("example.com."), + Type: dnsmessage.TypeAAAA, + Class: dnsmessage.ClassINET, + }, + }, + addrs: []netip.Addr{netip.MustParseAddr("fd7a:115c:a1e0:a99c:0001:0505:0505:0505")}, + wantAnswers: []struct { + name string + qType dnsmessage.Type + addr netip.Addr + }{ + { + name: "example.com.", + qType: dnsmessage.TypeAAAA, + addr: netip.MustParseAddr("fd7a:115c:a1e0:a99c:0001:0505:0505:0505"), + }, + }, + }, + { + name: "soa_record", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("example.com."), + Type: dnsmessage.TypeSOA, + Class: dnsmessage.ClassINET, + }, + }, + addrs: []netip.Addr{}, + wantAnswers: nil, + }, + { + name: "ns_record", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("example.com."), + Type: dnsmessage.TypeNS, + Class: dnsmessage.ClassINET, + }, + }, + addrs: []netip.Addr{}, + wantAnswers: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + req := &dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: 1234, + }, + Questions: tc.questions, + } + + resp, err := dnsResponse(req, tc.addrs) + if err != nil { + t.Fatalf("dnsResponse() error = %v", err) + } + + if tc.wantEmpty && len(resp) != 0 { + t.Errorf("dnsResponse() returned non-empty response when expected empty") + } + + if !tc.wantEmpty && len(resp) == 0 { + t.Errorf("dnsResponse() returned empty response when expected non-empty") + } + + if len(resp) > 0 { + var msg dnsmessage.Message + err = msg.Unpack(resp) + if err != nil { + t.Fatalf("Failed to unpack response: %v", err) + } + + if !msg.Header.Response { + t.Errorf("Response header is not set") + } + + if msg.Header.ID != req.Header.ID { + t.Errorf("Response ID = %d, want %d", msg.Header.ID, req.Header.ID) + } + + if len(tc.wantAnswers) > 0 { + if len(msg.Answers) != len(tc.wantAnswers) { + t.Errorf("got %d answers, want %d", len(msg.Answers), len(tc.wantAnswers)) + } else { + for i, want := range tc.wantAnswers { + ans := msg.Answers[i] + + gotName := ans.Header.Name.String() + if gotName != want.name { + t.Errorf("answer[%d] name = %s, want %s", i, gotName, want.name) + } + + if ans.Header.Type != want.qType { + t.Errorf("answer[%d] type = %v, want %v", i, ans.Header.Type, want.qType) + } + + var gotIP netip.Addr + switch want.qType { + case dnsmessage.TypeA: + if ans.Body.(*dnsmessage.AResource) == nil { + t.Errorf("answer[%d] not an A record", i) + continue + } + resource := ans.Body.(*dnsmessage.AResource) + gotIP = netip.AddrFrom4([4]byte(resource.A)) + case dnsmessage.TypeAAAA: + if ans.Body.(*dnsmessage.AAAAResource) == nil { + t.Errorf("answer[%d] not an AAAA record", i) + continue + } + resource := ans.Body.(*dnsmessage.AAAAResource) + gotIP = netip.AddrFrom16([16]byte(resource.AAAA)) + } + + if gotIP != want.addr { + t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, want.addr) + } + } + } + } + } + }) + } +} + +func TestPerPeerState(t *testing.T) { + c := &connector{ + v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + dnsAddr: netip.MustParseAddr("100.64.1.1"), + } + + ps := &perPeerState{c: c} + + addrs, err := ps.ipForDomain("example.com") + if err != nil { + t.Fatalf("ipForDomain() error = %v", err) + } + + if len(addrs) != 2 { + t.Fatalf("ipForDomain() returned %d addresses, want 2", len(addrs)) + } + + v4 := addrs[0] + v6 := addrs[1] + + if !v4.Is4() { + t.Errorf("First address is not IPv4: %s", v4) + } + + if !v6.Is6() { + t.Errorf("Second address is not IPv6: %s", v6) + } + + if !c.v4Ranges[0].Contains(v4) { + t.Errorf("IPv4 address %s not in range %s", v4, c.v4Ranges[0]) + } + + domain, ok := ps.domainForIP(v4) + if !ok { + t.Errorf("domainForIP(%s) not found", v4) + } else if domain != "example.com" { + t.Errorf("domainForIP(%s) = %s, want %s", v4, domain, "example.com") + } + + domain, ok = ps.domainForIP(v6) + if !ok { + t.Errorf("domainForIP(%s) not found", v6) + } else if domain != "example.com" { + t.Errorf("domainForIP(%s) = %s, want %s", v6, domain, "example.com") + } + + addrs2, err := ps.ipForDomain("example.com") + if err != nil { + t.Fatalf("ipForDomain() second call error = %v", err) + } + + if !slices.Equal(addrs, addrs2) { + t.Errorf("ipForDomain() second call = %v, want %v", addrs2, addrs) + } +} + +func TestIgnoreDestination(t *testing.T) { + ignoreDstTable := &bart.Table[bool]{} + ignoreDstTable.Insert(netip.MustParsePrefix("192.168.1.0/24"), true) + ignoreDstTable.Insert(netip.MustParsePrefix("10.0.0.0/8"), true) + + c := &connector{ + ignoreDsts: ignoreDstTable, + } + + tests := []struct { + name string + addrs []netip.Addr + expected bool + }{ + { + name: "no_match", + addrs: []netip.Addr{netip.MustParseAddr("8.8.8.8"), netip.MustParseAddr("1.1.1.1")}, + expected: false, + }, + { + name: "one_match", + addrs: []netip.Addr{netip.MustParseAddr("8.8.8.8"), netip.MustParseAddr("192.168.1.5")}, + expected: true, + }, + { + name: "all_match", + addrs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("192.168.1.5")}, + expected: true, + }, + { + name: "empty_addrs", + addrs: []netip.Addr{}, + expected: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := c.ignoreDestination(tc.addrs) + if got != tc.expected { + t.Errorf("ignoreDestination(%v) = %v, want %v", tc.addrs, got, tc.expected) + } + }) + } +} + +func TestConnectorGenerateDNSResponse(t *testing.T) { + c := &connector{ + v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + dnsAddr: netip.MustParseAddr("100.64.1.1"), + } + + req := &dnsmessage.Message{ + Header: dnsmessage.Header{ID: 1234}, + Questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + } + + nodeID := tailcfg.NodeID(12345) + + resp1, err := c.generateDNSResponse(req, nodeID) + if err != nil { + t.Fatalf("generateDNSResponse() error = %v", err) + } + if len(resp1) == 0 { + t.Fatalf("generateDNSResponse() returned empty response") + } + + resp2, err := c.generateDNSResponse(req, nodeID) + if err != nil { + t.Fatalf("generateDNSResponse() second call error = %v", err) + } + + if !cmp.Equal(resp1, resp2) { + t.Errorf("generateDNSResponse() responses differ between calls") + } +} From ea79dc161d4ebf8002c9c88c0644bd3abbbdd323 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Thu, 20 Mar 2025 15:43:54 +0000 Subject: [PATCH 0632/1708] tstest/integration/testcontrol: fix AddRawMapResponse race condition Only send a stored raw map message in reply to a streaming map response. Otherwise a non-streaming map response might pick it up first, and potentially drop it. This guarantees that a map response sent via AddRawMapResponse will be picked up by the main map response loop in the client. Fixes #15362 Signed-off-by: James Sanderson --- tstest/integration/testcontrol/testcontrol.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index e127087a6..52b96fe4d 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -839,15 +839,17 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi w.WriteHeader(200) for { - if resBytes, ok := s.takeRawMapMessage(req.NodeKey); ok { - if err := s.sendMapMsg(w, compress, resBytes); err != nil { - s.logf("sendMapMsg of raw message: %v", err) - return - } - if streaming { + // Only send raw map responses to the streaming poll, to avoid a + // non-streaming map request beating the streaming poll in a race and + // potentially dropping the map response. + if streaming { + if resBytes, ok := s.takeRawMapMessage(req.NodeKey); ok { + if err := s.sendMapMsg(w, compress, resBytes); err != nil { + s.logf("sendMapMsg of raw message: %v", err) + return + } continue } - return } if s.canGenerateAutomaticMapResponseFor(req.NodeKey) { From e78055eb0106a612896c22f566468b0e6a1071d2 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 24 Mar 2025 15:49:58 -0500 Subject: [PATCH 0633/1708] ipn/ipnlocal: add more logging for initializing peerAPIListeners On Windows and Android, peerAPIListeners may be initialized after a link change. This commit adds log statements to make it easier to trace this flow. Updates #14393 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/local.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 1f9f7e8b2..10a02d3cd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -958,7 +958,9 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { if peerAPIListenAsync && b.netMap != nil && b.state == ipn.Running { want := b.netMap.GetAddresses().Len() - if len(b.peerAPIListeners) < want { + have := len(b.peerAPIListeners) + b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) + if have < want { b.logf("linkChange: peerAPIListeners too low; trying again") b.goTracker.Go(b.initPeerAPIListener) } @@ -5369,6 +5371,7 @@ func (b *LocalBackend) initPeerAPIListener() { ln, err = ps.listen(a.Addr(), b.prevIfState) if err != nil { if peerAPIListenAsync { + b.logf("possibly transient peerapi listen(%q) error, will try again on linkChange: %v", a.Addr(), err) // Expected. But we fix it later in linkChange // ("peerAPIListeners too low"). continue From 08c8ccb48e45d8e5d7c719ce1a9d09756b64d376 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 25 Mar 2025 12:49:54 -0400 Subject: [PATCH 0634/1708] prober: add address family label for udp metrics (#15413) Add a label which differentiates the address family for STUN checks. Also initialize the derpprobe_attempts_total and derpprobe_seconds_total metrics by adding 0 for the alternate fail/ok case. Updates tailscale/corp#27249 Signed-off-by: Mike O'Driscoll --- prober/derp.go | 14 +++++++++++++- prober/prober.go | 4 ++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/prober/derp.go b/prober/derp.go index 01a7d3086..98e61ff54 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -596,11 +596,23 @@ func (d *derpProber) updateMap(ctx context.Context) error { } func (d *derpProber) ProbeUDP(ipaddr string, port int) ProbeClass { + initLabels := make(Labels) + ip := net.ParseIP(ipaddr) + + if ip.To4() != nil { + initLabels["address_family"] = "ipv4" + } else if ip.To16() != nil { // Will return an IPv4 as 16 byte, so ensure the check for IPv4 precedes this + initLabels["address_family"] = "ipv6" + } else { + initLabels["address_family"] = "unknown" + } + return ProbeClass{ Probe: func(ctx context.Context) error { return derpProbeUDP(ctx, ipaddr, port) }, - Class: "derp_udp", + Class: "derp_udp", + Labels: initLabels, } } diff --git a/prober/prober.go b/prober/prober.go index d80db773a..4bd522f26 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -404,10 +404,14 @@ func (p *Probe) recordEndLocked(err error) { p.mSeconds.WithLabelValues("ok").Add(latency.Seconds()) p.latencyHist.Value = latency p.latencyHist = p.latencyHist.Next() + p.mAttempts.WithLabelValues("fail").Add(0) + p.mSeconds.WithLabelValues("fail").Add(0) } else { p.latency = 0 p.mAttempts.WithLabelValues("fail").Inc() p.mSeconds.WithLabelValues("fail").Add(latency.Seconds()) + p.mAttempts.WithLabelValues("ok").Add(0) + p.mSeconds.WithLabelValues("ok").Add(0) } p.successHist.Value = p.succeeded p.successHist = p.successHist.Next() From 725c8d298a23aa12d4556c007ea3fb5a7ba40427 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 25 Mar 2025 15:05:50 -0400 Subject: [PATCH 0635/1708] ipn/ipnlocal: remove misleading [unexpected] log for auditlog (#15421) fixes tailscale/tailscale#15394 In the current iteration, usage of the memstore for the audit logger is expected on some platforms. Signed-off-by: Jonathan Nobels --- ipn/ipnlocal/local.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 10a02d3cd..11da8c734 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2404,11 +2404,9 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } var auditLogShutdown func() - // Audit logging is only available if the client has set up a proper persistent - // store for the logs in sys. store, ok := b.sys.AuditLogStore.GetOK() if !ok { - b.logf("auditlog: [unexpected] no persistent audit log storage configured. using memory store.") + // Use memory store by default if no explicit store is provided. store = auditlog.NewLogStore(&memstore.Store{}) } From 5aa1c27aad94f1408c2092365c32a4a918c305e0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 25 Mar 2025 13:32:55 -0700 Subject: [PATCH 0636/1708] control/controlhttp: quiet "forcing port 443" log spam Minimal mitigation that doesn't do the full refactor that's probably warranted. Updates #15402 Change-Id: I79fd91de0e0661d25398f7d95563982ed1d11561 Signed-off-by: Brad Fitzpatrick --- control/controlhttp/client.go | 7 ++++++- control/controlhttp/constants.go | 6 ++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 9b1d5a1a5..3b95796d0 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -96,6 +96,9 @@ func (a *Dialer) httpsFallbackDelay() time.Duration { var _ = envknob.RegisterBool("TS_USE_CONTROL_DIAL_PLAN") // to record at init time whether it's in use func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { + + a.logPort80Failure.Store(true) + // If we don't have a dial plan, just fall back to dialing the single // host we know about. useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN") @@ -278,7 +281,9 @@ func (d *Dialer) forceNoise443() bool { // This heuristic works around networks where port 80 is MITMed and // appears to work for a bit post-Upgrade but then gets closed, // such as seen in https://github.com/tailscale/tailscale/issues/13597. - d.logf("controlhttp: forcing port 443 dial due to recent noise dial") + if d.logPort80Failure.CompareAndSwap(true, false) { + d.logf("controlhttp: forcing port 443 dial due to recent noise dial") + } return true } diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 971212d63..80b3fe64c 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -6,6 +6,7 @@ package controlhttp import ( "net/http" "net/url" + "sync/atomic" "time" "tailscale.com/health" @@ -90,6 +91,11 @@ type Dialer struct { proxyFunc func(*http.Request) (*url.URL, error) // or nil + // logPort80Failure is whether we should log about port 80 interceptions + // and forcing a port 443 dial. We do this only once per "dial" method + // which can result in many concurrent racing dialHost calls. + logPort80Failure atomic.Bool + // For tests only drainFinished chan struct{} omitCertErrorLogging bool From 75373896c77e610936c006ccd18791ebdf29ba18 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 21 Mar 2025 10:34:16 -0400 Subject: [PATCH 0637/1708] tsnet: Default executable name on iOS When compiled into TailscaleKit.framework (via the libtailscale repository), os.Executable() returns an error instead of the name of the executable. This commit adds another branch to the switch statement that enumerates platforms which behave in this manner, and defaults to "tsnet" in the same manner as those other platforms. Fixes #15410. Signed-off-by: James Nugent --- tsnet/tsnet.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 15cf39cba..1e58b424b 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -505,6 +505,11 @@ func (s *Server) start() (reterr error) { // directory and hostname when they're not supplied. But we can fall // back to "tsnet" as well. exe = "tsnet" + case "ios": + // When compiled as a framework (via TailscaleKit in libtailscale), + // os.Executable() returns an error, so fall back to "tsnet" there + // too. + exe = "tsnet" default: return err } From 4777cc2cda59b9ad5b987513c8d955c5a5977eb0 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 25 Mar 2025 23:25:29 +0000 Subject: [PATCH 0638/1708] ipn/store/kubestore: skip cache for the write replica in cert share mode (#15417) ipn/store/kubestore: skip cache for the write replica in cert share mode This is to avoid issues where stale cache after Ingress recreation causes the certs not to be re-issued. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- ipn/store/kubestore/store_kube.go | 47 +++++++++++++++----------- ipn/store/kubestore/store_kube_test.go | 23 +++++++------ 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index ed37f06c2..14025bbb4 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -143,15 +143,6 @@ func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) if err := dnsname.ValidHostname(domain); err != nil { return fmt.Errorf("invalid domain name %q: %w", domain, err) } - defer func() { - // TODO(irbekrm): a read between these two separate writes would - // get a mismatched cert and key. Allow writing both cert and - // key to the memory store in a single, lock-protected operation. - if err == nil { - s.memory.WriteState(ipn.StateKey(domain+".crt"), cert) - s.memory.WriteState(ipn.StateKey(domain+".key"), key) - } - }() secretName := s.secretName data := map[string][]byte{ domain + ".crt": cert, @@ -166,19 +157,32 @@ func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) keyTLSKey: key, } } - return s.updateSecret(data, secretName) + if err := s.updateSecret(data, secretName); err != nil { + return fmt.Errorf("error writing TLS cert and key to Secret: %w", err) + } + // TODO(irbekrm): certs for write replicas are currently not + // written to memory to avoid out of sync memory state after + // Ingress resources have been recreated. This means that TLS + // certs for write replicas are retrieved from the Secret on + // each HTTPS request. This is a temporary solution till we + // implement a Secret watch. + if s.certShareMode != "rw" { + s.memory.WriteState(ipn.StateKey(domain+".crt"), cert) + s.memory.WriteState(ipn.StateKey(domain+".key"), key) + } + return nil } // ReadTLSCertAndKey reads a TLS cert and key from memory or from a // domain-specific Secret. It first checks the in-memory store, if not found in // memory and running cert store in read-only mode, looks up a Secret. +// Note that write replicas of HA Ingress always retrieve TLS certs from Secrets. func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { if err := dnsname.ValidHostname(domain); err != nil { return nil, nil, fmt.Errorf("invalid domain name %q: %w", domain, err) } certKey := domain + ".crt" keyKey := domain + ".key" - cert, err = s.memory.ReadState(ipn.StateKey(certKey)) if err == nil { key, err = s.memory.ReadState(ipn.StateKey(keyKey)) @@ -186,16 +190,12 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { return cert, key, nil } } - if s.certShareMode != "ro" { + if s.certShareMode == "" { return nil, nil, ipn.ErrStateNotExist } - // If we are in cert share read only mode, it is possible that a write - // replica just issued the TLS cert for this DNS name and it has not - // been loaded to store yet, so check the Secret. ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - secret, err := s.client.GetSecret(ctx, domain) if err != nil { if kubeclient.IsNotFoundErr(err) { @@ -212,9 +212,18 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { } // TODO(irbekrm): a read between these two separate writes would // get a mismatched cert and key. Allow writing both cert and - // key to the memory store in a single lock-protected operation. - s.memory.WriteState(ipn.StateKey(certKey), cert) - s.memory.WriteState(ipn.StateKey(keyKey), key) + // key to the memory store in a single, lock-protected operation. + // + // TODO(irbekrm): currently certs for write replicas of HA Ingress get + // retrieved from the cluster Secret on each HTTPS request to avoid a + // situation when after Ingress recreation stale certs are read from + // memory. + // Fix this by watching Secrets to ensure that memory store gets updated + // when Secrets are deleted. + if s.certShareMode == "ro" { + s.memory.WriteState(ipn.StateKey(certKey), cert) + s.memory.WriteState(ipn.StateKey(keyKey), key) + } return cert, key, nil } diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 2ed16e77b..0d709264e 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -201,10 +201,6 @@ func TestWriteTLSCertAndKey(t *testing.T) { "tls.crt": []byte(testCert), "tls.key": []byte(testKey), }, - wantMemoryStore: map[ipn.StateKey][]byte{ - "my-app.tailnetxyz.ts.net.crt": []byte(testCert), - "my-app.tailnetxyz.ts.net.key": []byte(testKey), - }, }, { name: "cert_share_mode_write_update_existing", @@ -219,10 +215,6 @@ func TestWriteTLSCertAndKey(t *testing.T) { "tls.crt": []byte(testCert), "tls.key": []byte(testKey), }, - wantMemoryStore: map[ipn.StateKey][]byte{ - "my-app.tailnetxyz.ts.net.crt": []byte(testCert), - "my-app.tailnetxyz.ts.net.key": []byte(testKey), - }, }, { name: "update_existing", @@ -367,7 +359,7 @@ func TestReadTLSCertAndKey(t *testing.T) { wantMemoryStore map[ipn.StateKey][]byte }{ { - name: "found", + name: "found_in_memory", memoryStore: map[ipn.StateKey][]byte{ "my-app.tailnetxyz.ts.net.crt": []byte(testCert), "my-app.tailnetxyz.ts.net.key": []byte(testKey), @@ -381,7 +373,7 @@ func TestReadTLSCertAndKey(t *testing.T) { }, }, { - name: "not_found", + name: "not_found_in_memory", domain: testDomain, wantErr: ipn.ErrStateNotExist, }, @@ -400,6 +392,17 @@ func TestReadTLSCertAndKey(t *testing.T) { "my-app.tailnetxyz.ts.net.key": []byte(testKey), }, }, + { + name: "cert_share_rw_mode_found_in_secret", + certShareMode: "rw", + domain: testDomain, + secretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + }, { name: "cert_share_ro_mode_found_in_memory", certShareMode: "ro", From a622debe9b8029d58997aff628ea92f991a83562 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 26 Mar 2025 01:32:13 +0000 Subject: [PATCH 0639/1708] cmd/{k8s-operator,containerboot}: check TLS cert before advertising VIPService (#15427) cmd/{k8s-operator,containerboot}: check TLS cert before advertising VIPService - Ensures that Ingress status does not advertise port 443 before TLS cert has been issued - Ensure that Ingress backends do not advertise a VIPService before TLS cert has been issued, unless the service also exposes port 80 Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/containerboot/certs.go | 11 ++- cmd/k8s-operator/ingress-for-pg.go | 102 ++++++++++++++++++++---- cmd/k8s-operator/ingress-for-pg_test.go | 51 ++++++++++-- cmd/k8s-operator/operator.go | 36 +++++++-- 4 files changed, 168 insertions(+), 32 deletions(-) diff --git a/cmd/containerboot/certs.go b/cmd/containerboot/certs.go index 7af0424a9..504ef7988 100644 --- a/cmd/containerboot/certs.go +++ b/cmd/containerboot/certs.go @@ -60,6 +60,9 @@ func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) if _, exists := cm.certLoops[domain]; !exists { cancelCtx, cancel := context.WithCancel(ctx) mak.Set(&cm.certLoops, domain, cancel) + // Note that most of the issuance anyway happens + // serially because the cert client has a shared lock + // that's held during any issuance. cm.tracker.Go(func() { cm.runCertLoop(cancelCtx, domain) }) } } @@ -116,7 +119,13 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { // issuance endpoint that explicitly only triggers // issuance and stores certs in the relevant store, but // does not return certs to the caller? - _, _, err := cm.lc.CertPair(ctx, domain) + + // An issuance holds a shared lock, so we need to avoid + // a situation where other services cannot issue certs + // because a single one is holding the lock. + ctxT, cancel := context.WithTimeout(ctx, time.Second*300) + defer cancel() + _, _, err := cm.lc.CertPair(ctxT, domain) if err != nil { log.Printf("error refreshing certificate for %s: %v", domain, err) } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 5950a3db5..3df5a07ee 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -53,6 +53,8 @@ const ( // annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as // well as the default HTTPS endpoint). annotationHTTPEndpoint = "tailscale.com/http-endpoint" + + labelDomain = "tailscale.com/domain" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -241,7 +243,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } // 3. Ensure that TLS Secret and RBAC exists - if err := r.ensureCertResources(ctx, pgName, dnsName); err != nil { + if err := r.ensureCertResources(ctx, pgName, dnsName, ing); err != nil { return false, fmt.Errorf("error ensuring cert resources: %w", err) } @@ -338,7 +340,11 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // 5. Update tailscaled's AdvertiseServices config, which should add the VIPService // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { + mode := serviceAdvertisementHTTPS + if isHTTPEndpointEnabled(ing) { + mode = serviceAdvertisementHTTPAndHTTPS + } + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, mode, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config: %w", err) } @@ -354,11 +360,17 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin case 0: ing.Status.LoadBalancer.Ingress = nil default: - ports := []networkingv1.IngressPortStatus{ - { + var ports []networkingv1.IngressPortStatus + hasCerts, err := r.hasCerts(ctx, serviceName) + if err != nil { + return false, fmt.Errorf("error checking TLS credentials provisioned for Ingress: %w", err) + } + // If TLS certs have not been issued (yet), do not set port 443. + if hasCerts { + ports = append(ports, networkingv1.IngressPortStatus{ Protocol: "TCP", Port: 443, - }, + }) } if isHTTPEndpointEnabled(ing) { ports = append(ports, networkingv1.IngressPortStatus{ @@ -366,9 +378,14 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin Port: 80, }) } + // Set Ingress status hostname only if either port 443 or 80 is advertised. + var hostname string + if len(ports) != 0 { + hostname = dnsName + } ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ { - Hostname: dnsName, + Hostname: hostname, Ports: ports, }, } @@ -429,7 +446,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } // Make sure the VIPService is not advertised in tailscaled or serve config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, false, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, serviceAdvertisementOff, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } _, ok := cfg.Services[vipServiceName] @@ -512,7 +529,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 4. Unadvertise the VIPService in tailscaled config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, serviceAdvertisementOff, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } @@ -709,8 +726,16 @@ func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { return ing.Annotations[annotationHTTPEndpoint] == "enabled" } -func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { - logger.Debugf("Updating ProxyGroup tailscaled configs to advertise service %q: %v", serviceName, shouldBeAdvertised) +// serviceAdvertisementMode describes the desired state of a VIPService. +type serviceAdvertisementMode int + +const ( + serviceAdvertisementOff serviceAdvertisementMode = iota // Should not be advertised + serviceAdvertisementHTTPS // Port 443 should be advertised + serviceAdvertisementHTTPAndHTTPS // Both ports 80 and 443 should be advertised +) + +func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, logger *zap.SugaredLogger) (err error) { // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} @@ -718,6 +743,21 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return fmt.Errorf("failed to list config Secrets: %w", err) } + // Verify that TLS cert for the VIPService has been successfully issued + // before attempting to advertise the service. + // This is so that in multi-cluster setups where some Ingresses succeed + // to issue certs and some do not (rate limits), clients are not pinned + // to a backend that is not able to serve HTTPS. + // The only exception is Ingresses with an HTTP endpoint enabled - if an + // Ingress has an HTTP endpoint enabled, it will be advertised even if the + // TLS cert is not yet provisioned. + hasCert, err := a.hasCerts(ctx, serviceName) + if err != nil { + return fmt.Errorf("error checking TLS credentials provisioned for service %q: %w", serviceName, err) + } + shouldBeAdvertised := (mode == serviceAdvertisementHTTPAndHTTPS) || + (mode == serviceAdvertisementHTTPS && hasCert) // if we only expose port 443 and don't have certs (yet), do not advertise + for _, secret := range secrets.Items { var updated bool for fileName, confB := range secret.Data { @@ -870,8 +910,8 @@ func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool { // (domain) is a valid Kubernetes resource name. // https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99 // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names -func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, domain string) error { - secret := certSecret(pgName, r.tsNamespace, domain) +func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, domain string, ing *networkingv1.Ingress) error { + secret := certSecret(pgName, r.tsNamespace, domain, ing) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, nil); err != nil { return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) } @@ -966,9 +1006,14 @@ func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding // certSecret creates a Secret that will store the TLS certificate and private // key for the given domain. Domain must be a valid Kubernetes resource name. -func certSecret(pgName, namespace, domain string) *corev1.Secret { +func certSecret(pgName, namespace, domain string, ing *networkingv1.Ingress) *corev1.Secret { labels := certResourceLabels(pgName, domain) labels[kubetypes.LabelSecretType] = "certs" + // Labels that let us identify the Ingress resource lets us reconcile + // the Ingress when the TLS Secret is updated (for example, when TLS + // certs have been provisioned). + labels[LabelParentName] = ing.Name + labels[LabelParentNamespace] = ing.Namespace return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -989,9 +1034,9 @@ func certSecret(pgName, namespace, domain string) *corev1.Secret { func certResourceLabels(pgName, domain string) map[string]string { return map[string]string{ - kubetypes.LabelManaged: "true", - "tailscale.com/proxy-group": pgName, - "tailscale.com/domain": domain, + kubetypes.LabelManaged: "true", + labelProxyGroup: pgName, + labelDomain: domain, } } @@ -1004,3 +1049,28 @@ func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg } return s + "." + tcd, nil } + +// hasCerts checks if the TLS Secret for the given service has non-zero cert and key data. +func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceName) (bool, error) { + domain, err := r.dnsNameForService(ctx, svc) + if err != nil { + return false, fmt.Errorf("failed to get DNS name for service: %w", err) + } + secret := &corev1.Secret{} + err = r.Get(ctx, client.ObjectKey{ + Namespace: r.tsNamespace, + Name: domain, + }, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("failed to get TLS Secret: %w", err) + } + + cert := secret.Data[corev1.TLSCertKey] + key := secret.Data[corev1.TLSPrivateKeyKey] + + return len(cert) > 0 && len(key) > 0, nil +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 705d157cc..0ad424bd6 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -31,6 +31,7 @@ import ( "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/types/ptr" ) @@ -59,7 +60,7 @@ func TestIngressPGReconciler(t *testing.T) { }, }, TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + {Hosts: []string{"my-svc"}}, }, }, } @@ -67,12 +68,14 @@ func TestIngressPGReconciler(t *testing.T) { // Verify initial reconciliation expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) - // Verify cert resources were created for the first Ingress - expectEqual(t, fc, certSecret("test-pg", "operator-ns", "my-svc.ts.net")) + // Verify that Role and RoleBinding have been created for the first Ingress. + // Do not verify the cert Secret as that was already verified implicitly above. expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-svc.ts.net")) expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-svc.ts.net")) @@ -127,11 +130,13 @@ func TestIngressPGReconciler(t *testing.T) { // Verify second Ingress reconciliation expectReconciled(t, ingPGR, "default", "my-other-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "my-other-svc.ts.net") + expectReconciled(t, ingPGR, "default", "my-other-ingress") verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyVIPService(t, ft, "svc:my-other-svc", []string{"443"}) - // Verify cert resources were created for the second Ingress - expectEqual(t, fc, certSecret("test-pg", "operator-ns", "my-other-svc.ts.net")) + // Verify that Role and RoleBinding have been created for the first Ingress. + // Do not verify the cert Secret as that was already verified implicitly above. expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) @@ -231,7 +236,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { }, }, TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + {Hosts: []string{"my-svc"}}, }, }, } @@ -239,15 +244,19 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { // Verify initial reconciliation expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) // Update the Ingress hostname and make sure the original VIPService is deleted. mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { - ing.Spec.TLS[0].Hosts[0] = "updated-svc.tailnetxyz.ts.net" + ing.Spec.TLS[0].Hosts[0] = "updated-svc" }) expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "updated-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:updated-svc", false) verifyVIPService(t, ft, "svc:updated-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"}) @@ -468,6 +477,8 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { // Verify initial reconciliation with HTTP enabled expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") verifyVIPService(t, ft, "svc:my-svc", []string{"80", "443"}) verifyServeConfig(t, fc, "svc:my-svc", true) @@ -611,6 +622,7 @@ func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantH } func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []string) { + t.Helper() var expected string if expectedServices != nil { expectedServicesJSON, err := json.Marshal(expectedServices) @@ -804,3 +816,28 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) } } + +func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: "operator-ns", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + labelProxyGroup: pgName, + labelDomain: domain, + kubetypes.LabelSecretType: "certs", + }, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + corev1.TLSCertKey: []byte("fake-cert"), + corev1.TLSPrivateKeyKey: []byte("fake-key"), + }, + } + + _, err := createOrUpdate(ctx, c, "operator-ns", secret, func(s *corev1.Secret) { + s.Data = secret.Data + }) + return err +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index b0f0b3576..a00257186 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -347,7 +347,7 @@ func runReconcilers(opts reconcilerOpts) { For(&networkingv1.Ingress{}). Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). - Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(ingressesFromPGStateSecret(mgr.GetClient(), startlog))). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ recorder: eventRecorder, @@ -1039,20 +1039,40 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. return reqs } -func ingressesFromPGStateSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func isTLSSecret(secret *corev1.Secret) bool { + return secret.Type == corev1.SecretTypeTLS && + secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "certs" && + secret.ObjectMeta.Labels[labelDomain] != "" && + secret.ObjectMeta.Labels[labelProxyGroup] != "" +} + +func isPGStateSecret(secret *corev1.Secret) bool { + return secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && + secret.ObjectMeta.Labels[LabelParentType] == "proxygroup" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "state" +} + +// HAIngressesFromSecret returns a handler that returns reconcile requests for +// all HA Ingresses that should be reconciled in response to a Secret event. +func HAIngressesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { secret, ok := o.(*corev1.Secret) if !ok { logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") return nil } - if secret.ObjectMeta.Labels[kubetypes.LabelManaged] != "true" { - return nil - } - if secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { - return nil + if isTLSSecret(secret) { + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: secret.ObjectMeta.Labels[LabelParentNamespace], + Name: secret.ObjectMeta.Labels[LabelParentName], + }, + }, + } } - if secret.ObjectMeta.Labels[kubetypes.LabelSecretType] != "state" { + if !isPGStateSecret(secret) { return nil } pgName, ok := secret.ObjectMeta.Labels[LabelParentName] From 2685484f26a4479c8a9340faea684aaa7f17f375 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 26 Mar 2025 01:48:01 +0000 Subject: [PATCH 0640/1708] Bump Alpine, link iptables back to legacy (#15428) Bumps Alpine 3.18 -> 3.19. Alpine 3.19 links iptables to nftables-based implementation that can break hosts that don't support nftables. Link iptables back to the legacy implementation till we have some certainty that changing to nftables based implementation will not break existing setups. Updates tailscale/tailscale#15328 Signed-off-by: Irbe Krumina --- ALPINE.txt | 2 +- Dockerfile | 4 +++- Dockerfile.base | 11 +++++++++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/ALPINE.txt b/ALPINE.txt index 55b698c77..318956c3d 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.18 \ No newline at end of file +3.19 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 32cb92ab0..015022e49 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,8 +62,10 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.18 +FROM alpine:3.19 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables +RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables +RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ # For compat with the previous run.sh, although ideally you should be diff --git a/Dockerfile.base b/Dockerfile.base index eb4f0a02a..b7e79a43c 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,5 +1,12 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.18 -RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils +FROM alpine:3.19 +RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils +# Alpine 3.19 replaces legacy iptables with nftables based implementation. We +# can't be certain that all hosts that run Tailscale containers currently +# suppport nftables, so link back to legacy for backwards compatibility reasons. +# TODO(irbekrm): add some way how to determine if we still run on nodes that +# don't support nftables, so that we can eventually remove these symlinks. +RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables +RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables From d0e7af3830747076ae9c96301ed4053bd45a9c1c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 20 Mar 2025 12:00:37 -0700 Subject: [PATCH 0641/1708] cmd/natc: add test and fix for ip exhaustion This is a very dumb fix as it has an unbounded worst case runtime. IP allocation needs to be done in a more sane way in a follow-up. Updates #15367 Signed-off-by: James Tucker --- cmd/natc/natc.go | 46 +++++++++++++++++++------- cmd/natc/natc_test.go | 76 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 104 insertions(+), 18 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 73ba116ff..31d6a5d26 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -94,18 +94,24 @@ func main() { } ignoreDstTable.Insert(pfx, true) } - var v4Prefixes []netip.Prefix + var ( + v4Prefixes []netip.Prefix + numV4DNSAddrs int + ) for _, s := range strings.Split(*v4PfxStr, ",") { p := netip.MustParsePrefix(strings.TrimSpace(s)) if p.Masked() != p { log.Fatalf("v4 prefix %v is not a masked prefix", p) } v4Prefixes = append(v4Prefixes, p) + numIPs := 1 << (32 - p.Bits()) + numV4DNSAddrs += numIPs } if len(v4Prefixes) == 0 { log.Fatalf("no v4 prefixes specified") } dnsAddr := v4Prefixes[0].Addr() + numV4DNSAddrs -= 1 // Subtract the dnsAddr allocated above. ts := &tsnet.Server{ Hostname: *hostname, } @@ -153,12 +159,13 @@ func main() { } c := &connector{ - ts: ts, - lc: lc, - dnsAddr: dnsAddr, - v4Ranges: v4Prefixes, - v6ULA: ula(uint16(*siteID)), - ignoreDsts: ignoreDstTable, + ts: ts, + lc: lc, + dnsAddr: dnsAddr, + v4Ranges: v4Prefixes, + numV4DNSAddrs: numV4DNSAddrs, + v6ULA: ula(uint16(*siteID)), + ignoreDsts: ignoreDstTable, } c.run(ctx) } @@ -177,6 +184,11 @@ type connector struct { // v4Ranges is the list of IPv4 ranges to advertise and assign addresses from. // These are masked prefixes. v4Ranges []netip.Prefix + + // numV4DNSAddrs is the total size of the IPv4 ranges in addresses, minus the + // dnsAddr allocation. + numV4DNSAddrs int + // v6ULA is the ULA prefix used by the app connector to assign IPv6 addresses. v6ULA netip.Prefix @@ -502,6 +514,7 @@ type perPeerState struct { mu sync.Mutex domainToAddr map[string][]netip.Addr addrToDomain *bart.Table[string] + numV4Allocs int } // domainForIP returns the domain name assigned to the given IP address and @@ -547,17 +560,25 @@ func (ps *perPeerState) isIPUsedLocked(ip netip.Addr) bool { // unusedIPv4Locked returns an unused IPv4 address from the available ranges. func (ps *perPeerState) unusedIPv4Locked() netip.Addr { + // All addresses have been allocated. + if ps.numV4Allocs >= ps.c.numV4DNSAddrs { + return netip.Addr{} + } + // TODO: skip ranges that have been exhausted - for _, r := range ps.c.v4Ranges { - ip := randV4(r) - for r.Contains(ip) { + // TODO: implement a much more efficient algorithm for finding unused IPs, + // this is fairly crazy. + for { + for _, r := range ps.c.v4Ranges { + ip := randV4(r) + if !r.Contains(ip) { + panic("error: randV4 returned invalid address") + } if !ps.isIPUsedLocked(ip) && ip != ps.c.dnsAddr { return ip } - ip = ip.Next() } } - return netip.Addr{} } // randV4 returns a random IPv4 address within the given prefix. @@ -583,6 +604,7 @@ func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { if !v4.IsValid() { return nil } + ps.numV4Allocs++ as16 := ps.c.v6ULA.Addr().As16() as4 := v4.As4() copy(as16[12:], as4[:]) diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index 1b6d7af7c..e42fa7e89 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -4,6 +4,8 @@ package main import ( + "errors" + "fmt" "net/netip" "slices" "testing" @@ -225,9 +227,10 @@ func TestDNSResponse(t *testing.T) { func TestPerPeerState(t *testing.T) { c := &connector{ - v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - dnsAddr: netip.MustParseAddr("100.64.1.1"), + v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + dnsAddr: netip.MustParseAddr("100.64.1.0"), + numV4DNSAddrs: (1<<(32-24) - 1), } ps := &perPeerState{c: c} @@ -328,9 +331,10 @@ func TestIgnoreDestination(t *testing.T) { func TestConnectorGenerateDNSResponse(t *testing.T) { c := &connector{ - v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - dnsAddr: netip.MustParseAddr("100.64.1.1"), + v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + dnsAddr: netip.MustParseAddr("100.64.1.0"), + numV4DNSAddrs: (1<<(32-24) - 1), } req := &dnsmessage.Message{ @@ -363,3 +367,63 @@ func TestConnectorGenerateDNSResponse(t *testing.T) { t.Errorf("generateDNSResponse() responses differ between calls") } } + +func TestIPPoolExhaustion(t *testing.T) { + smallPrefix := netip.MustParsePrefix("100.64.1.0/30") // Only 4 IPs: .0, .1, .2, .3 + c := &connector{ + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + v4Ranges: []netip.Prefix{smallPrefix}, + dnsAddr: netip.MustParseAddr("100.64.1.0"), + numV4DNSAddrs: 3, + } + + ps := &perPeerState{c: c} + + assignedIPs := make(map[netip.Addr]string) + + domains := []string{"a.example.com", "b.example.com", "c.example.com", "d.example.com"} + + var errs []error + + for i := 0; i < 5; i++ { + for _, domain := range domains { + addrs, err := ps.ipForDomain(domain) + if err != nil { + errs = append(errs, fmt.Errorf("failed to get IP for domain %q: %w", domain, err)) + continue + } + + for _, addr := range addrs { + if d, ok := assignedIPs[addr]; ok { + if d != domain { + t.Errorf("IP %s reused for domain %q, previously assigned to %q", addr, domain, d) + } + } else { + assignedIPs[addr] = domain + } + } + } + } + + for addr, domain := range assignedIPs { + if addr.Is4() && !smallPrefix.Contains(addr) { + t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, smallPrefix) + } + if addr.Is6() && !c.v6ULA.Contains(addr) { + t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, c.v6ULA) + } + if addr == c.dnsAddr { + t.Errorf("IP %s for domain %q is the reserved DNS address", addr, domain) + } + } + + // expect one error for each iteration with the 4th domain + if len(errs) != 5 { + t.Errorf("Expected 5 errors, got %d: %v", len(errs), errs) + } + for _, err := range errs { + if !errors.Is(err, ErrNoIPsAvailable) { + t.Errorf("generateDNSResponse() error = %v, want ErrNoIPsAvailable", err) + } + } +} From e3c04c5d6c53bcdad05c94156ee7676a89054899 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 26 Mar 2025 11:58:26 +0000 Subject: [PATCH 0642/1708] build_docker.sh: bump default base image (#15432) We now have a tailscale/alpine-base:3.19 use that as the default base image. Updates tailscale/tailscale#15328 Signed-off-by: Irbe Krumina --- build_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_docker.sh b/build_docker.sh index f9632ea0a..15105c2ef 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -16,7 +16,7 @@ eval "$(./build_dist.sh shellvars)" DEFAULT_TARGET="client" DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}" -DEFAULT_BASE="tailscale/alpine-base:3.18" +DEFAULT_BASE="tailscale/alpine-base:3.19" # Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked # Github repo to find release notes for any new image tags. Note that for official Tailscale images the default # annotations defined here will be overriden by release scripts that call this script. From fea74a60d529bcccbc8ded74644256bb6f6c7727 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 26 Mar 2025 13:29:38 +0000 Subject: [PATCH 0643/1708] cmd/k8s-operator,k8s-operator: disable HA Ingress before stable release (#15433) Temporarily make sure that the HA Ingress reconciler does not run, as we do not want to release this to stable just yet. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxygroups.yaml | 2 +- .../deploy/manifests/operator.yaml | 2 +- cmd/k8s-operator/ingress-for-pg.go | 1 - cmd/k8s-operator/operator.go | 181 ------------------ k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 2 +- 6 files changed, 4 insertions(+), 186 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 86e74e441..e101c201f 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -103,7 +103,7 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Currently the only supported type is egress. Type is immutable once a ProxyGroup is created. type: string enum: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index dc8d0634c..aa79fefcb 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2876,7 +2876,7 @@ spec: type: array type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Currently the only supported type is egress. Type is immutable once a ProxyGroup is created. enum: - egress diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 3df5a07ee..687f70d7b 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -49,7 +49,6 @@ const ( // FinalizerNamePG is the finalizer used by the IngressPGReconciler FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" - indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group" // annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as // well as the default HTTPS endpoint). annotationHTTPEndpoint = "tailscale.com/http-endpoint" diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index a00257186..69ee51c9b 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -9,7 +9,6 @@ package main import ( "context" - "fmt" "net/http" "os" "regexp" @@ -40,7 +39,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -333,40 +331,6 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } - lc, err := opts.tsServer.LocalClient() - if err != nil { - startlog.Fatalf("could not get local client: %v", err) - } - id, err := id(context.Background(), lc) - if err != nil { - startlog.Fatalf("error determining stable ID of the operator's Tailscale device: %v", err) - } - ingressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(ingressesFromIngressProxyGroup(mgr.GetClient(), opts.log)) - err = builder. - ControllerManagedBy(mgr). - For(&networkingv1.Ingress{}). - Named("ingress-pg-reconciler"). - Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). - Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). - Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). - Complete(&HAIngressReconciler{ - recorder: eventRecorder, - tsClient: opts.tsClient, - tsnetServer: opts.tsServer, - defaultTags: strings.Split(opts.proxyTags, ","), - Client: mgr.GetClient(), - logger: opts.log.Named("ingress-pg-reconciler"), - lc: lc, - operatorID: id, - tsNamespace: opts.tailscaleNamespace, - }) - if err != nil { - startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) - } - if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyGroup, indexPGIngresses); err != nil { - startlog.Fatalf("failed setting up indexer for HA Ingresses: %v", err) - } - connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector")) // If a ProxyClassChanges, enqueue all Connectors that have // .spec.proxyClass set to the name of this ProxyClass. @@ -1039,65 +1003,6 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. return reqs } -func isTLSSecret(secret *corev1.Secret) bool { - return secret.Type == corev1.SecretTypeTLS && - secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "certs" && - secret.ObjectMeta.Labels[labelDomain] != "" && - secret.ObjectMeta.Labels[labelProxyGroup] != "" -} - -func isPGStateSecret(secret *corev1.Secret) bool { - return secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && - secret.ObjectMeta.Labels[LabelParentType] == "proxygroup" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "state" -} - -// HAIngressesFromSecret returns a handler that returns reconcile requests for -// all HA Ingresses that should be reconciled in response to a Secret event. -func HAIngressesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return func(ctx context.Context, o client.Object) []reconcile.Request { - secret, ok := o.(*corev1.Secret) - if !ok { - logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") - return nil - } - if isTLSSecret(secret) { - return []reconcile.Request{ - { - NamespacedName: types.NamespacedName{ - Namespace: secret.ObjectMeta.Labels[LabelParentNamespace], - Name: secret.ObjectMeta.Labels[LabelParentName], - }, - }, - } - } - if !isPGStateSecret(secret) { - return nil - } - pgName, ok := secret.ObjectMeta.Labels[LabelParentName] - if !ok { - return nil - } - - ingList := &networkingv1.IngressList{} - if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pgName}); err != nil { - logger.Infof("error listing Ingresses, skipping a reconcile for event on Secret %s: %v", secret.Name, err) - return nil - } - reqs := make([]reconcile.Request, 0) - for _, ing := range ingList.Items { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: ing.Namespace, - Name: ing.Name, - }, - }) - } - return reqs - } -} - // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { @@ -1128,36 +1033,6 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } -// ingressesFromIngressProxyGroup is an event handler for ingress ProxyGroups. It returns reconcile requests for all -// user-created Ingresses that should be exposed on this ProxyGroup. -func ingressesFromIngressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return func(ctx context.Context, o client.Object) []reconcile.Request { - pg, ok := o.(*tsapi.ProxyGroup) - if !ok { - logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") - return nil - } - if pg.Spec.Type != tsapi.ProxyGroupTypeIngress { - return nil - } - ingList := &networkingv1.IngressList{} - if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pg.Name}); err != nil { - logger.Infof("error listing Ingresses: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name) - return nil - } - reqs := make([]reconcile.Request, 0) - for _, svc := range ingList.Items { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: svc.Namespace, - Name: svc.Name, - }, - }) - } - return reqs - } -} - // epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that // should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service. func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { @@ -1278,63 +1153,7 @@ func indexEgressServices(o client.Object) []string { return []string{o.GetAnnotations()[AnnotationProxyGroup]} } -// indexPGIngresses adds a local index to a cached Tailscale Ingresses meant to be exposed on a ProxyGroup. The index is -// used a list filter. -func indexPGIngresses(o client.Object) []string { - if !hasProxyGroupAnnotation(o) { - return nil - } - return []string{o.GetAnnotations()[AnnotationProxyGroup]} -} - -// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service -// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, -// the associated Ingress gets reconciled. -func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return func(ctx context.Context, o client.Object) []reconcile.Request { - ingList := networkingv1.IngressList{} - if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { - logger.Debugf("error listing Ingresses: %v", err) - return nil - } - reqs := make([]reconcile.Request, 0) - for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { - continue - } - if !hasProxyGroupAnnotation(&ing) { - continue - } - if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) - } - for _, rule := range ing.Spec.Rules { - if rule.HTTP == nil { - continue - } - for _, path := range rule.HTTP.Paths { - if path.Backend.Service != nil && path.Backend.Service.Name == o.GetName() { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) - } - } - } - } - return reqs - } -} - func hasProxyGroupAnnotation(obj client.Object) bool { ing := obj.(*networkingv1.Ingress) return ing.Annotations[AnnotationProxyGroup] != "" } - -func id(ctx context.Context, lc *local.Client) (string, error) { - st, err := lc.StatusWithoutPeers(ctx) - if err != nil { - return "", fmt.Errorf("error getting tailscale status: %w", err) - } - if st.Self == nil { - return "", fmt.Errorf("unexpected: device's status does not contain node's metadata") - } - return string(st.Self.ID), nil -} diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 190f99d24..f885ded1e 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -600,7 +600,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
        Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
        Type: string
        | +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress.
        Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
        Type: string
        | | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
        If you specify custom tags here, make sure you also make the operator
        an owner of these tags.
        See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
        Tags cannot be changed once a ProxyGroup device has been created.
        Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
        Type: string
        | | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
        Defaults to 2. | | Minimum: 0
        | | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
        by the ProxyGroup. Each device will have the integer number from its
        StatefulSet pod appended to this prefix to form the full hostname.
        HostnamePrefix can contain lower case letters, numbers and dashes, it
        must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
        Type: string
        | diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index f95fc58d0..cb9f678f8 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -48,7 +48,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Supported types are egress and ingress. + // Type of the ProxyGroup proxies. Currently the only supported type is egress. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` From 1ec1a60c107b919a8561a10464a635901ba51d41 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Wed, 26 Mar 2025 14:22:21 -0700 Subject: [PATCH 0644/1708] VERSION.txt: this is v1.83.0 (#15443) Signed-off-by: kari-ts --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index dbd41264a..6b4de0a42 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.81.0 +1.83.0 From a3bc0bcb0a42fec00e3f99c4bf17cae0ea8b15dd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 24 Mar 2025 09:08:20 -0700 Subject: [PATCH 0645/1708] net/dns: add debug envknob to enable dual stack MagicDNS Updates #15404 Change-Id: Ic754cc54113b1660b7071b40babb9d3c0e25b2e1 Signed-off-by: Brad Fitzpatrick --- net/dns/config.go | 25 ++++++++++++++++++++++--- net/dns/manager.go | 4 ++-- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/net/dns/config.go b/net/dns/config.go index 67d3d753c..b2f4e6dbd 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -10,6 +10,8 @@ import ( "net/netip" "sort" + "tailscale.com/control/controlknobs" + "tailscale.com/envknob" "tailscale.com/net/dns/publicdns" "tailscale.com/net/dns/resolver" "tailscale.com/net/tsaddr" @@ -47,11 +49,28 @@ type Config struct { OnlyIPv6 bool } -func (c *Config) serviceIP() netip.Addr { +var magicDNSDualStack = envknob.RegisterBool("TS_DEBUG_MAGIC_DNS_DUAL_STACK") + +// serviceIPs returns the list of service IPs where MagicDNS is reachable. +// +// The provided knobs may be nil. +func (c *Config) serviceIPs(knobs *controlknobs.Knobs) []netip.Addr { if c.OnlyIPv6 { - return tsaddr.TailscaleServiceIPv6() + return []netip.Addr{tsaddr.TailscaleServiceIPv6()} } - return tsaddr.TailscaleServiceIP() + + // TODO(bradfitz,mikeodr,raggi): include IPv6 here too; tailscale/tailscale#15404 + // And add a controlknobs knob to disable dual stack. + // + // For now, opt-in for testing. + if magicDNSDualStack() { + return []netip.Addr{ + tsaddr.TailscaleServiceIP(), + tsaddr.TailscaleServiceIPv6(), + } + } + + return []netip.Addr{tsaddr.TailscaleServiceIP()} } // WriteToBufioWriter write a debug version of c for logs to w, omitting diff --git a/net/dns/manager.go b/net/dns/manager.go index 1e9eb7fe7..0bfbaa077 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -307,7 +307,7 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // through quad-100. rcfg.Routes = routes rcfg.Routes["."] = cfg.DefaultResolvers - ocfg.Nameservers = []netip.Addr{cfg.serviceIP()} + ocfg.Nameservers = cfg.serviceIPs(m.knobs) return rcfg, ocfg, nil } @@ -345,7 +345,7 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // or routes + MagicDNS, or just MagicDNS, or on an OS that cannot // split-DNS. Install a split config pointing at quad-100. rcfg.Routes = routes - ocfg.Nameservers = []netip.Addr{cfg.serviceIP()} + ocfg.Nameservers = cfg.serviceIPs(m.knobs) var baseCfg *OSConfig // base config; non-nil if/when known From 7fc9099cf84d8d9ba4e4d6856511ed607507aa79 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 14 Mar 2025 21:20:12 -0700 Subject: [PATCH 0646/1708] cmd/tailscale: fix default for `tailscale set --accept-dns` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The default values for `tailscale up` and `tailscale set` are supposed to agree on all common flags. But they don’t for `--accept-dns`: user@host:~$ tailscale up --help 2>&1 | grep -A1 accept-dns --accept-dns, --accept-dns=false accept DNS configuration from the admin panel (default true) user@host:~$ tailscale set --help 2>&1 | grep -A1 accept-dns --accept-dns, --accept-dns=false accept DNS configuration from the admin panel Luckily, `tailscale set` uses `ipn.MaskedPrefs`, so the default values don’t logically matter. But someone will get the wrong idea if they trust the `tailscale set --help` documentation. This patch makes `--accept-dns` default to true in both commands and also introduces `TestSetDefaultsMatchUpDefaults` to prevent any future drift. Fixes: #15319 Signed-off-by: Simon Law --- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/set_test.go | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index e8e5f0c51..292bfef9b 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -69,7 +69,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.StringVar(&setArgs.profileName, "nickname", "", "nickname for the current account") setf.BoolVar(&setArgs.acceptRoutes, "accept-routes", false, "accept routes advertised by other Tailscale nodes") - setf.BoolVar(&setArgs.acceptDNS, "accept-dns", false, "accept DNS configuration from the admin panel") + setf.BoolVar(&setArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") setf.BoolVar(&setArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") setf.BoolVar(&setArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") diff --git a/cmd/tailscale/cli/set_test.go b/cmd/tailscale/cli/set_test.go index 15305c3ce..a2f211f8c 100644 --- a/cmd/tailscale/cli/set_test.go +++ b/cmd/tailscale/cli/set_test.go @@ -4,6 +4,7 @@ package cli import ( + "flag" "net/netip" "reflect" "testing" @@ -129,3 +130,24 @@ func TestCalcAdvertiseRoutesForSet(t *testing.T) { }) } } + +// TestSetDefaultsMatchUpDefaults is meant to ensure that the default values +// for `tailscale set` and `tailscale up` are the same. +// Since `tailscale set` only sets preferences that are explicitly mentioned, +// the default values for its flags are only used for `--help` documentation. +func TestSetDefaultsMatchUpDefaults(t *testing.T) { + upFlagSet.VisitAll(func(up *flag.Flag) { + if preflessFlag(up.Name) { + return + } + + set := setFlagSet.Lookup(up.Name) + if set == nil { + return + } + + if set.DefValue != up.DefValue { + t.Errorf("--%s: set defaults to %q, but up defaults to %q", up.Name, set.DefValue, up.DefValue) + } + }) +} From e9324236e83e78e8fe588e38166d466a80aff150 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 18 Mar 2025 15:46:57 -0700 Subject: [PATCH 0647/1708] cmd/tailscale: fix default for `tailscale set --accept-routes` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The default values for `tailscale up` and `tailscale set` are supposed to agree for all common flags. But they don’t for `--accept-routes` on Windows and from the Mac OS App Store, because `tailscale up` computes this value based on the operating system: user@host:~$ tailscale up --help 2>&1 | grep -A1 accept-routes --accept-dns, --accept-dns=false accept DNS configuration from the admin panel (default true) user@host:~$ tailscale set --help 2>&1 | grep -A1 accept-routes --accept-dns, --accept-dns=false accept DNS configuration from the admin panel Luckily, `tailscale set` uses `ipn.MaskedPrefs`, so the default values don’t logically matter. But someone will get the wrong idea if they trust the `tailscale set --help` documentation. In addition, `ipn.Prefs.RouteAll` defaults to true so it disagrees with both of the flags above. This patch makes `--accept-routes` use the same logic for in both commands by hoisting the logic that was buried in `cmd/tailscale/cli` to `ipn.Prefs.DefaultRouteAll`. Then, all three of defaults can agree. Fixes: #15319 Signed-off-by: Simon Law --- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/up.go | 11 ++--------- ipn/prefs.go | 19 +++++++++++++++++-- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 292bfef9b..07b3fe9ce 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -68,7 +68,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf := newFlagSet("set") setf.StringVar(&setArgs.profileName, "nickname", "", "nickname for the current account") - setf.BoolVar(&setArgs.acceptRoutes, "accept-routes", false, "accept routes advertised by other Tailscale nodes") + setf.BoolVar(&setArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") setf.BoolVar(&setArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") setf.BoolVar(&setArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 31f7eb956..26db85f13 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -39,7 +39,6 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" - "tailscale.com/version" "tailscale.com/version/distro" ) @@ -79,14 +78,8 @@ func effectiveGOOS() string { // acceptRouteDefault returns the CLI's default value of --accept-routes as // a function of the platform it's running on. func acceptRouteDefault(goos string) bool { - switch goos { - case "windows": - return true - case "darwin": - return version.IsSandboxedMacOS() - default: - return false - } + var p *ipn.Prefs + return p.DefaultRouteAll(goos) } var upFlagSet = newUpFlagSet(effectiveGOOS(), &upArgsGlobal, "up") diff --git a/ipn/prefs.go b/ipn/prefs.go index f5406f3b7..98f04dfa9 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -29,6 +29,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" + "tailscale.com/version" ) // DefaultControlURL is the URL base of the control plane @@ -664,7 +665,7 @@ func NewPrefs() *Prefs { // Provide default values for options which might be missing // from the json data for any reason. The json can still // override them to false. - return &Prefs{ + p := &Prefs{ // ControlURL is explicitly not set to signal that // it's not yet configured, which relaxes the CLI "up" // safety net features. It will get set to DefaultControlURL @@ -672,7 +673,6 @@ func NewPrefs() *Prefs { // later anyway. ControlURL: "", - RouteAll: true, CorpDNS: true, WantRunning: false, NetfilterMode: preftype.NetfilterOn, @@ -682,6 +682,8 @@ func NewPrefs() *Prefs { Apply: opt.Bool("unset"), }, } + p.RouteAll = p.DefaultRouteAll(runtime.GOOS) + return p } // ControlURLOrDefault returns the coordination server's URL base. @@ -711,6 +713,19 @@ func (p *Prefs) ControlURLOrDefault() string { return DefaultControlURL } +// DefaultRouteAll returns the default value of [Prefs.RouteAll] as a function +// of the platform it's running on. +func (p *Prefs) DefaultRouteAll(goos string) bool { + switch goos { + case "windows": + return true + case "darwin": + return version.IsSandboxedMacOS() + default: + return false + } +} + // AdminPageURL returns the admin web site URL for the current ControlURL. func (p PrefsView) AdminPageURL() string { return p.ж.AdminPageURL() } From a8c3490614d7cc269a664fd51759e9b62305ec1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Blanco?= Date: Fri, 28 Mar 2025 12:04:49 +0100 Subject: [PATCH 0648/1708] install.sh - fix DNF 5 detection on all locales (#15325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Raúl Blanco --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index f3671aff8..0b360b8a1 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -521,7 +521,7 @@ main() { dnf) # DNF 5 has a different argument format; determine which one we have. DNF_VERSION="3" - if dnf --version | grep -q '^dnf5 version'; then + if LANG=C.UTF-8 dnf --version | grep -q '^dnf5 version'; then DNF_VERSION="5" fi From 272854df416793fae9b93eb0ee3695918f7ab4d9 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 14 Mar 2025 15:17:26 -0500 Subject: [PATCH 0649/1708] ipn/ipnlocal: unconfigure wgengine when switching profiles LocalBackend transitions to ipn.NoState when switching to a different (or new) profile. When this happens, we should unconfigure wgengine to clear routes, DNS configuration, firewall rules that block all traffic except to the exit node, etc. In this PR, we update (*LocalBackend).enterStateLockedOnEntry to do just that. Fixes #15316 Updates tailscale/corp#23967 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 8 +- ipn/ipnlocal/local_test.go | 24 +- ipn/ipnlocal/state_test.go | 474 +++++++++++++++++++++++++++++++++++++ 3 files changed, 494 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 11da8c734..048b5f0c4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5707,13 +5707,15 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.blockEngineUpdates(true) fallthrough - case ipn.Stopped: + case ipn.Stopped, ipn.NoState: + // Unconfigure the engine if it has stopped (WantRunning is set to false) + // or if we've switched to a different profile and the state is unknown. err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) if err != nil { b.logf("Reconfig(down): %v", err) } - if authURL == "" { + if newState == ipn.Stopped && authURL == "" { systemd.Status("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: @@ -5727,8 +5729,6 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock addrStrs = append(addrStrs, p.Addr().String()) } systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) - case ipn.NoState: - // Do nothing. default: b.logf("[unexpected] unknown newState %#v", newState) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5b74b8180..2579590a8 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4407,19 +4407,27 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { + return newLocalBackendWithSysAndTestControl(t, enableLogging, new(tsd.System), newControl) +} + +func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { logf := logger.Discard if enableLogging { logf = tstest.WhileTestRunningLogger(t) } - sys := new(tsd.System) - store := new(mem.Store) - sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) - if err != nil { - t.Fatalf("NewFakeUserspaceEngine: %v", err) + + if _, hasStore := sys.StateStore.GetOK(); !hasStore { + store := new(mem.Store) + sys.Set(store) + } + if _, hasEngine := sys.Engine.GetOK(); !hasEngine { + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + if err != nil { + t.Fatalf("NewFakeUserspaceEngine: %v", err) + } + t.Cleanup(e.Close) + sys.Set(e) } - t.Cleanup(e.Close) - sys.Set(e) b, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) if err != nil { diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a4180de86..3c22b66be 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -5,26 +5,46 @@ package ipnlocal import ( "context" + "errors" + "net/netip" + "strings" "sync" "sync/atomic" "testing" "time" qt "github.com/frankban/quicktest" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store/mem" + "tailscale.com/net/dns" + "tailscale.com/net/netmon" + "tailscale.com/net/packet" + "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" "tailscale.com/types/persist" + "tailscale.com/types/preftype" + "tailscale.com/util/dnsname" + "tailscale.com/util/mak" + "tailscale.com/util/must" "tailscale.com/wgengine" + "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/magicsock" + "tailscale.com/wgengine/router" + "tailscale.com/wgengine/wgcfg" + "tailscale.com/wgengine/wgint" ) // notifyThrottler receives notifications from an ipn.Backend, blocking @@ -170,6 +190,14 @@ func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netma } } +func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { + if selfUser, ok := nm.UserProfiles[nm.SelfNode.User()]; ok { + cc.persist.UserProfile = *selfUser.AsStruct() + } + cc.persist.NodeID = nm.SelfNode.StableID() + cc.send(nil, "", true, nm) +} + // called records that a particular function name was called. func (cc *mockControl) called(s string) { cc.mu.Lock() @@ -1076,3 +1104,449 @@ func TestWGEngineStatusRace(t *testing.T) { wg.Wait() wantState(ipn.Running) } + +// TestEngineReconfigOnStateChange verifies that wgengine is properly reconfigured +// when the LocalBackend's state changes, such as when the user logs in, switches +// profiles, or disconnects from Tailscale. +func TestEngineReconfigOnStateChange(t *testing.T) { + enableLogging := false + connect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} + disconnect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: false}, WantRunningSet: true} + node1 := testNetmapForNode(1, "node-1", []netip.Prefix{netip.MustParsePrefix("100.64.1.1/32")}) + node2 := testNetmapForNode(2, "node-2", []netip.Prefix{netip.MustParsePrefix("100.64.1.2/32")}) + routesWithQuad100 := func(extra ...netip.Prefix) []netip.Prefix { + return append(extra, netip.MustParsePrefix("100.100.100.100/32")) + } + hostsFor := func(nm *netmap.NetworkMap) map[dnsname.FQDN][]netip.Addr { + var hosts map[dnsname.FQDN][]netip.Addr + appendNode := func(n tailcfg.NodeView) { + addrs := make([]netip.Addr, 0, n.Addresses().Len()) + for _, addr := range n.Addresses().All() { + addrs = append(addrs, addr.Addr()) + } + mak.Set(&hosts, must.Get(dnsname.ToFQDN(n.Name())), addrs) + } + if nm != nil && nm.SelfNode.Valid() { + appendNode(nm.SelfNode) + } + for _, n := range nm.Peers { + appendNode(n) + } + return hosts + } + + tests := []struct { + name string + steps func(*testing.T, *LocalBackend, func() *mockControl) + wantState ipn.State + wantCfg *wgcfg.Config + wantRouterCfg *router.Config + wantDNSCfg *dns.Config + }{ + { + name: "Initial", + // The configs are nil until the the LocalBackend is started. + wantState: ipn.NoState, + wantCfg: nil, + wantRouterCfg: nil, + wantDNSCfg: nil, + }, + { + name: "Start", + steps: func(t *testing.T, lb *LocalBackend, _ func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + }, + // Once started, all configs must be reset and have their zero values. + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect", + steps: func(t *testing.T, lb *LocalBackend, _ func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + }, + // Same if WantRunning is true, but the auth is not completed yet. + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + }, + // After the auth is completed, the configs must be updated to reflect the node's netmap. + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Start/Connect/Login/Disconnect", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + mustDo2(t)(lb.EditPrefs(disconnect)) + }, + // After disconnecting, all configs must be reset and have their zero values. + wantState: ipn.Stopped, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/NewProfile", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + mustDo(t)(lb.NewProfile()) + }, + // After switching to a new, empty profile, all configs should be reset + // and have their zero values until the auth is completed. + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/NewProfile/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + mustDo(t)(lb.NewProfile()) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node2) + }, + // Once the auth is completed, the configs must be updated to reflect the node's netmap. + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node2.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node2.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node2.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node2), + }, + }, + { + name: "Start/Connect/Login/SwitchProfile", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + profileID := lb.CurrentProfile().ID() + mustDo(t)(lb.NewProfile()) + cc().authenticated(node2) + mustDo(t)(lb.SwitchProfile(profileID)) + }, + // After switching to an existing profile, all configs must be reset + // and have their zero values until the (non-interactive) login is completed. + wantState: ipn.NoState, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/SwitchProfile/NonInteractiveLogin", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + profileID := lb.CurrentProfile().ID() + mustDo(t)(lb.NewProfile()) + cc().authenticated(node2) + mustDo(t)(lb.SwitchProfile(profileID)) + cc().authenticated(node1) // complete the login + }, + // After switching profiles and completing the auth, the configs + // must be updated to reflect the node's netmap. + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lb, engine, cc := newLocalBackendWithMockEngineAndControl(t, enableLogging) + + if tt.steps != nil { + tt.steps(t, lb, cc) + } + + if gotState := lb.State(); gotState != tt.wantState { + t.Errorf("State: got %v; want %v", gotState, tt.wantState) + } + + opts := []cmp.Option{ + cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), + } + if diff := cmp.Diff(tt.wantCfg, engine.Config(), opts...); diff != "" { + t.Errorf("wgcfg.Config(+got -want): %v", diff) + } + if diff := cmp.Diff(tt.wantRouterCfg, engine.RouterConfig(), opts...); diff != "" { + t.Errorf("router.Config(+got -want): %v", diff) + } + if diff := cmp.Diff(tt.wantDNSCfg, engine.DNSConfig(), opts...); diff != "" { + t.Errorf("dns.Config(+got -want): %v", diff) + } + }) + } +} + +func testNetmapForNode(userID tailcfg.UserID, name string, addresses []netip.Prefix) *netmap.NetworkMap { + const ( + domain = "example.com" + magicDNSSuffix = ".test.ts.net" + ) + user := &tailcfg.UserProfile{ + ID: userID, + DisplayName: name, + LoginName: strings.Join([]string{name, domain}, "@"), + } + self := &tailcfg.Node{ + ID: tailcfg.NodeID(1000 + userID), + StableID: tailcfg.StableNodeID("stable-" + name), + User: user.ID, + Name: name + magicDNSSuffix, + Addresses: addresses, + MachineAuthorized: true, + } + return &netmap.NetworkMap{ + SelfNode: self.View(), + Name: self.Name, + Domain: domain, + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + user.ID: user.View(), + }, + } +} + +func mustDo(t *testing.T) func(error) { + t.Helper() + return func(err error) { + t.Helper() + if err != nil { + t.Fatal(err) + } + } +} + +func mustDo2(t *testing.T) func(any, error) { + t.Helper() + return func(_ any, err error) { + t.Helper() + if err != nil { + t.Fatal(err) + } + } +} + +func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) (*LocalBackend, *mockEngine, func() *mockControl) { + t.Helper() + + logf := logger.Discard + if enableLogging { + logf = tstest.WhileTestRunningLogger(t) + } + + dialer := &tsdial.Dialer{Logf: logf} + dialer.SetNetMon(netmon.NewStatic()) + + sys := &tsd.System{} + sys.Set(dialer) + sys.Set(dialer.NetMon()) + + magicConn, err := magicsock.NewConn(magicsock.Options{ + Logf: logf, + NetMon: dialer.NetMon(), + Metrics: sys.UserMetricsRegistry(), + HealthTracker: sys.HealthTracker(), + DisablePortMapper: true, + }) + if err != nil { + t.Fatalf("NewConn failed: %v", err) + } + magicConn.SetNetworkUp(dialer.NetMon().InterfaceState().AnyInterfaceUp()) + sys.Set(magicConn) + + engine := newMockEngine() + sys.Set(engine) + t.Cleanup(func() { + engine.Close() + <-engine.Done() + }) + + lb := newLocalBackendWithSysAndTestControl(t, enableLogging, sys, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + return lb, engine, func() *mockControl { return lb.cc.(*mockControl) } +} + +var _ wgengine.Engine = (*mockEngine)(nil) + +// mockEngine implements [wgengine.Engine]. +type mockEngine struct { + done chan struct{} // closed when Close is called + + mu sync.Mutex // protects all following fields + closed bool + cfg *wgcfg.Config + routerCfg *router.Config + dnsCfg *dns.Config + + filter, jailedFilter *filter.Filter + + statusCb wgengine.StatusCallback +} + +func newMockEngine() *mockEngine { + return &mockEngine{ + done: make(chan struct{}), + } +} + +func (e *mockEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { + e.mu.Lock() + defer e.mu.Unlock() + if e.closed { + return errors.New("engine closed") + } + e.cfg = cfg + e.routerCfg = routerCfg + e.dnsCfg = dnsCfg + return nil +} + +func (e *mockEngine) Config() *wgcfg.Config { + e.mu.Lock() + defer e.mu.Unlock() + return e.cfg +} + +func (e *mockEngine) RouterConfig() *router.Config { + e.mu.Lock() + defer e.mu.Unlock() + return e.routerCfg +} + +func (e *mockEngine) DNSConfig() *dns.Config { + e.mu.Lock() + defer e.mu.Unlock() + return e.dnsCfg +} + +func (e *mockEngine) PeerForIP(netip.Addr) (_ wgengine.PeerForIP, ok bool) { + return wgengine.PeerForIP{}, false +} + +func (e *mockEngine) GetFilter() *filter.Filter { + e.mu.Lock() + defer e.mu.Unlock() + return e.filter +} + +func (e *mockEngine) SetFilter(f *filter.Filter) { + e.mu.Lock() + e.filter = f + e.mu.Unlock() +} + +func (e *mockEngine) GetJailedFilter() *filter.Filter { + e.mu.Lock() + defer e.mu.Unlock() + return e.jailedFilter +} + +func (e *mockEngine) SetJailedFilter(f *filter.Filter) { + e.mu.Lock() + e.jailedFilter = f + e.mu.Unlock() +} + +func (e *mockEngine) SetStatusCallback(cb wgengine.StatusCallback) { + e.mu.Lock() + e.statusCb = cb + e.mu.Unlock() +} + +func (e *mockEngine) RequestStatus() { + e.mu.Lock() + cb := e.statusCb + e.mu.Unlock() + if cb != nil { + cb(&wgengine.Status{AsOf: time.Now()}, nil) + } +} + +func (e *mockEngine) PeerByKey(key.NodePublic) (_ wgint.Peer, ok bool) { + return wgint.Peer{}, false +} + +func (e *mockEngine) SetNetworkMap(*netmap.NetworkMap) {} + +func (e *mockEngine) UpdateStatus(*ipnstate.StatusBuilder) {} + +func (e *mockEngine) Ping(ip netip.Addr, pingType tailcfg.PingType, size int, cb func(*ipnstate.PingResult)) { + cb(&ipnstate.PingResult{IP: ip.String(), Err: "not implemented"}) +} + +func (e *mockEngine) InstallCaptureHook(packet.CaptureCallback) {} + +func (e *mockEngine) Close() { + e.mu.Lock() + defer e.mu.Unlock() + if e.closed { + return + } + e.closed = true + close(e.done) +} + +func (e *mockEngine) Done() <-chan struct{} { + return e.done +} From 6a9a7f35d96664110753e16b5ff6bcd29eca70ed Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 24 Mar 2025 15:40:12 -0500 Subject: [PATCH 0650/1708] cmd/tailscaled,ipn/{auditlog,ipnlocal},tsd: omit auditlog unless explicitly imported In this PR, we update ipnlocal.LocalBackend to allow registering callbacks for control client creation and profile changes. We also allow to register ipnauth.AuditLogFunc to be called when an auditable action is attempted. We then use all this to invert the dependency between the auditlog and ipnlocal packages and make the auditlog functionality optional, where it only registers its callbacks via ipnlocal-provided hooks when the auditlog package is imported. We then underscore-import it when building tailscaled for Windows, and we'll explicitly import it when building xcode/ipn-go-bridge for macOS. Since there's no default log-store location for macOS, we'll also need to call auditlog.SetStoreFilePath to specify where pending audit logs should be persisted. Fixes #15394 Updates tailscale/corp#26435 Updates tailscale/corp#27012 Signed-off-by: Nick Khyl --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/tailscaled_windows.go | 1 + ipn/auditlog/auditlog.go | 8 +- ipn/auditlog/auditlog_test.go | 7 +- ipn/auditlog/extension.go | 189 ++++++++++++++++ ipn/auditlog/store.go | 62 +++++ ipn/ipnlocal/desktop_sessions.go | 6 +- ipn/ipnlocal/local.go | 212 +++++++++++++----- tsd/tsd.go | 4 - .../tailscaled_deps_test_windows.go | 1 + 11 files changed, 422 insertions(+), 71 deletions(-) create mode 100644 ipn/auditlog/extension.go create mode 100644 ipn/auditlog/store.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 978744947..7c87649d1 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -814,7 +814,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ - tailscale.com/ipn/auditlog from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/desktop from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 0a9c46831..1fbf7caf1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -271,7 +271,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ - tailscale.com/ipn/auditlog from tailscale.com/ipn/ipnlocal+ + W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/desktop from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 3574fb5f4..dfe53ef61 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -44,6 +44,7 @@ import ( "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + _ "tailscale.com/ipn/auditlog" "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" "tailscale.com/logtail/backoff" diff --git a/ipn/auditlog/auditlog.go b/ipn/auditlog/auditlog.go index 30f39211f..0460bc4e2 100644 --- a/ipn/auditlog/auditlog.go +++ b/ipn/auditlog/auditlog.go @@ -112,7 +112,7 @@ func NewLogger(opts Opts) *Logger { al := &Logger{ retryLimit: opts.RetryLimit, - logf: logger.WithPrefix(opts.Logf, "auditlog: "), + logf: opts.Logf, store: opts.Store, flusher: make(chan struct{}, 1), done: make(chan struct{}), @@ -138,8 +138,10 @@ func (al *Logger) FlushAndStop(ctx context.Context) { func (al *Logger) SetProfileID(profileID ipn.ProfileID) error { al.mu.Lock() defer al.mu.Unlock() - if al.profileID != "" { - return errors.New("profileID already set") + // It's not an error to call SetProfileID more than once + // with the same [ipn.ProfileID]. + if al.profileID != "" && al.profileID != profileID { + return errors.New("profileID cannot be changed once set") } al.profileID = profileID diff --git a/ipn/auditlog/auditlog_test.go b/ipn/auditlog/auditlog_test.go index 3d3bf95cb..041cab354 100644 --- a/ipn/auditlog/auditlog_test.go +++ b/ipn/auditlog/auditlog_test.go @@ -184,8 +184,11 @@ func TestChangeProfileId(t *testing.T) { }) c.Assert(al.SetProfileID("test"), qt.IsNil) - // Changing a profile ID must fail - c.Assert(al.SetProfileID("test"), qt.IsNotNil) + // Calling SetProfileID with the same profile ID must not fail. + c.Assert(al.SetProfileID("test"), qt.IsNil) + + // Changing a profile ID must fail. + c.Assert(al.SetProfileID("test2"), qt.IsNotNil) } // TestSendOnRestore pushes a n logs to the persistent store, and ensures they diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go new file mode 100644 index 000000000..8be7dfb66 --- /dev/null +++ b/ipn/auditlog/extension.go @@ -0,0 +1,189 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package auditlog + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "tailscale.com/control/controlclient" + "tailscale.com/feature" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" + "tailscale.com/tsd" + "tailscale.com/types/lazy" + "tailscale.com/types/logger" +) + +func init() { + feature.Register("auditlog") + ipnlocal.RegisterExtension("auditlog", newExtension) +} + +// extension is an [ipnlocal.Extension] managing audit logging +// on platforms that import this package. +// As of 2025-03-27, that's only Windows and macOS. +type extension struct { + logf logger.Logf + + // cleanup are functions to call on shutdown. + cleanup []func() + // store is the log store shared by all loggers. + // It is created when the first logger is started. + store lazy.SyncValue[LogStore] + + // mu protects all following fields. + mu sync.Mutex + // logger is the current audit logger, or nil if it is not set up, + // such as before the first control client is created, or after + // a profile change and before the new control client is created. + // + // It queues, persists, and sends audit logs to the control client. + logger *Logger +} + +// newExtension is an [ipnlocal.NewExtensionFn] that creates a new audit log extension. +// It is registered with [ipnlocal.RegisterExtension] if the package is imported. +func newExtension(logf logger.Logf, _ *tsd.System) (ipnlocal.Extension, error) { + return &extension{logf: logger.WithPrefix(logf, "auditlog: ")}, nil +} + +// Init implements [ipnlocal.Extension] by registering callbacks and providers +// for the duration of the extension's lifetime. +func (e *extension) Init(lb *ipnlocal.LocalBackend) error { + e.cleanup = []func(){ + lb.RegisterControlClientCallback(e.controlClientChanged), + lb.RegisterProfileChangeCallback(e.profileChanged, false), + lb.RegisterAuditLogProvider(e.getCurrentLogger), + } + return nil +} + +// [controlclient.Auto] implements [Transport]. +var _ Transport = (*controlclient.Auto)(nil) + +// startNewLogger creates and starts a new logger for the specified profile +// using the specified [controlclient.Client] as the transport. +// The profileID may be "" if the profile has not been persisted yet. +func (e *extension) startNewLogger(cc controlclient.Client, profileID ipn.ProfileID) (*Logger, error) { + transport, ok := cc.(Transport) + if !ok { + return nil, fmt.Errorf("%T cannot be used as transport", cc) + } + + // Create a new log store if this is the first logger. + // Otherwise, get the existing log store. + store, err := e.store.GetErr(func() (LogStore, error) { + return newDefaultLogStore(e.logf) + }) + if err != nil { + return nil, fmt.Errorf("failed to create audit log store: %w", err) + } + + logger := NewLogger(Opts{ + Logf: e.logf, + RetryLimit: 32, + Store: store, + }) + if err := logger.SetProfileID(profileID); err != nil { + return nil, fmt.Errorf("set profile failed: %w", err) + } + if err := logger.Start(transport); err != nil { + return nil, fmt.Errorf("start failed: %w", err) + } + return logger, nil +} + +func (e *extension) controlClientChanged(cc controlclient.Client, profile ipn.LoginProfileView, _ ipn.PrefsView) (cleanup func()) { + logger, err := e.startNewLogger(cc, profile.ID()) + e.mu.Lock() + e.logger = logger // nil on error + e.mu.Unlock() + if err != nil { + // If we fail to create or start the logger, log the error + // and return a nil cleanup function. There's nothing more + // we can do here. + // + // But [extension.getCurrentLogger] returns [noCurrentLogger] + // when the logger is nil. Since [noCurrentLogger] always + // fails with [errNoLogger], operations that must be audited + // but cannot will fail on platforms where the audit logger + // is enabled (i.e., the auditlog package is imported). + e.logf("[unexpected] %v", err) + return nil + } + return func() { + // Stop the logger when the control client shuts down. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + logger.FlushAndStop(ctx) + } +} + +func (e *extension) profileChanged(profile ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + switch { + case e.logger == nil: + // No-op if we don't have an audit logger. + case sameNode: + // The profile info has changed, but it represents the same node. + // This includes the case where the login has just been completed + // and the profile's [ipn.ProfileID] has been set for the first time. + if err := e.logger.SetProfileID(profile.ID()); err != nil { + e.logf("[unexpected] failed to set profile ID: %v", err) + } + default: + // The profile info has changed, and it represents a different node. + // We won't have an audit logger for the new profile until the new + // control client is created. + // + // We don't expect any auditable actions to be attempted in this state. + // But if they are, they will fail with [errNoLogger]. + e.logger = nil + } +} + +// errNoLogger is an error returned by [noCurrentLogger]. It indicates that +// the logger was unavailable when [ipnlocal.LocalBackend] requested it, +// such as when an auditable action was attempted before [LocalBackend.Start] +// was called for the first time or immediately after a profile change +// and before the new control client was created. +// +// This error is unexpected and should not occur in normal operation. +var errNoLogger = errors.New("[unexpected] no audit logger") + +// noCurrentLogger is an [ipnauth.AuditLogFunc] returned by [extension.getCurrentLogger] +// when the logger is not available. It fails with [errNoLogger] on every call. +func noCurrentLogger(_ tailcfg.ClientAuditAction, _ string) error { + return errNoLogger +} + +// getCurrentLogger is an [ipnlocal.AuditLogProvider] registered with [ipnlocal.LocalBackend]. +// It is called when [ipnlocal.LocalBackend] needs to audit an action. +// +// It returns a function that enqueues the audit log for the current profile, +// or [noCurrentLogger] if the logger is unavailable. +func (e *extension) getCurrentLogger() ipnauth.AuditLogFunc { + e.mu.Lock() + defer e.mu.Unlock() + if e.logger == nil { + return noCurrentLogger + } + return e.logger.Enqueue +} + +// Shutdown implements [ipnlocal.Extension]. +func (e *extension) Shutdown() error { + for _, f := range e.cleanup { + f() + } + e.cleanup = nil + return nil +} diff --git a/ipn/auditlog/store.go b/ipn/auditlog/store.go new file mode 100644 index 000000000..3b58ffa93 --- /dev/null +++ b/ipn/auditlog/store.go @@ -0,0 +1,62 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package auditlog + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "tailscale.com/ipn/store" + "tailscale.com/types/lazy" + "tailscale.com/types/logger" + "tailscale.com/util/must" +) + +var storeFilePath lazy.SyncValue[string] + +// SetStoreFilePath sets the audit log store file path. +// It is optional on platforms with a default store path, +// but required on platforms without one (e.g., macOS). +// It panics if called more than once or after the store has been created. +func SetStoreFilePath(path string) { + if !storeFilePath.Set(path) { + panic("store file path already set or used") + } +} + +// DefaultStoreFilePath returns the default audit log store file path +// for the current platform, or an error if the platform does not have one. +func DefaultStoreFilePath() (string, error) { + switch runtime.GOOS { + case "windows": + return filepath.Join(os.Getenv("ProgramData"), "Tailscale", "audit-log.json"), nil + default: + // The auditlog package must either be omitted from the build, + // have the platform-specific store path set with [SetStoreFilePath] (e.g., on macOS), + // or have the default store path available on the current platform. + return "", fmt.Errorf("[unexpected] no default store path available on %s", runtime.GOOS) + } +} + +// newDefaultLogStore returns a new [LogStore] for the current platform. +func newDefaultLogStore(logf logger.Logf) (LogStore, error) { + path, err := storeFilePath.GetErr(DefaultStoreFilePath) + if err != nil { + // This indicates that the auditlog package was not omitted from the build + // on a platform without a default store path and that [SetStoreFilePath] + // was not called to set a platform-specific store path. + // + // This is not expected to happen, but if it does, let's log it + // and use an in-memory store as a fallback. + logf("[unexpected] failed to get audit log store path: %v", err) + return NewLogStore(must.Get(store.New(logf, "mem:auditlog"))), nil + } + fs, err := store.New(logf, path) + if err != nil { + return nil, fmt.Errorf("failed to create audit log store at %q: %w", path, err) + } + return NewLogStore(fs), nil +} diff --git a/ipn/ipnlocal/desktop_sessions.go b/ipn/ipnlocal/desktop_sessions.go index 23307f667..4e9eebf34 100644 --- a/ipn/ipnlocal/desktop_sessions.go +++ b/ipn/ipnlocal/desktop_sessions.go @@ -28,8 +28,8 @@ func init() { RegisterExtension("desktop-sessions", newDesktopSessionsExt) } -// desktopSessionsExt implements [localBackendExtension]. -var _ localBackendExtension = (*desktopSessionsExt)(nil) +// desktopSessionsExt implements [Extension]. +var _ Extension = (*desktopSessionsExt)(nil) // desktopSessionsExt extends [LocalBackend] with desktop session management. // It keeps Tailscale running in the background if Always-On mode is enabled, @@ -51,7 +51,7 @@ type desktopSessionsExt struct { // newDesktopSessionsExt returns a new [desktopSessionsExt], // or an error if [desktop.SessionManager] is not available. -func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (localBackendExtension, error) { +func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (Extension, error) { sm, ok := sys.SessionManager.GetOK() if !ok { return nil, errors.New("session manager is not available") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 048b5f0c4..63b9d576a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -57,12 +57,10 @@ import ( "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" - "tailscale.com/ipn/auditlog" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/policy" - memstore "tailscale.com/ipn/store/mem" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" "tailscale.com/net/captivedetection" @@ -170,8 +168,8 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } -// localBackendExtension extends [LocalBackend] with additional functionality. -type localBackendExtension interface { +// Extension extends [LocalBackend] with additional functionality. +type Extension interface { // Init is called to initialize the extension when the [LocalBackend] is created // and before it starts running. If the extension cannot be initialized, // it must return an error, and the Shutdown method will not be called. @@ -185,17 +183,17 @@ type localBackendExtension interface { Shutdown() error } -// newLocalBackendExtension is a function that instantiates a [localBackendExtension]. -type newLocalBackendExtension func(logger.Logf, *tsd.System) (localBackendExtension, error) +// NewExtensionFn is a function that instantiates an [Extension]. +type NewExtensionFn func(logger.Logf, *tsd.System) (Extension, error) // registeredExtensions is a map of registered local backend extensions, // where the key is the name of the extension and the value is the function // that instantiates the extension. -var registeredExtensions map[string]newLocalBackendExtension +var registeredExtensions map[string]NewExtensionFn // RegisterExtension registers a function that creates a [localBackendExtension]. // It panics if newExt is nil or if an extension with the same name has already been registered. -func RegisterExtension(name string, newExt newLocalBackendExtension) { +func RegisterExtension(name string, newExt NewExtensionFn) { if newExt == nil { panic(fmt.Sprintf("lb: newExt is nil: %q", name)) } @@ -213,6 +211,36 @@ func RegisterExtension(name string, newExt newLocalBackendExtension) { // It is called with [LocalBackend.mu] held. type profileResolver func() (_ ipn.WindowsUserID, _ ipn.ProfileID, ok bool) +// NewControlClientCallback is a function to be called when a new [controlclient.Client] +// is created and before it is first used. The login profile and prefs represent +// the profile for which the cc is created and are always valid; however, the +// profile's [ipn.LoginProfileView.ID] returns a zero [ipn.ProfileID] if the profile +// is new and has not been persisted yet. +// +// The callback is called with [LocalBackend.mu] held and must not call +// any [LocalBackend] methods. +// +// It returns a function to be called when the cc is being shut down, +// or nil if no cleanup is needed. +type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView, ipn.PrefsView) (cleanup func()) + +// ProfileChangeCallback is a function to be called when the current login profile changes. +// The sameNode parameter indicates whether the profile represents the same node as before, +// such as when only the profile metadata is updated but the node ID remains the same, +// or when a new profile is persisted and assigned an [ipn.ProfileID] for the first time. +// The subscribers can use this information to decide whether to reset their state. +// +// The profile and prefs are always valid, but the profile's [ipn.LoginProfileView.ID] +// returns a zero [ipn.ProfileID] if the profile is new and has not been persisted yet. +// +// The callback is called with [LocalBackend.mu] held and must not call +// any [LocalBackend] methods. +type ProfileChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) + +// AuditLogProvider is a function that returns an [ipnauth.AuditLogFunc] for +// logging auditable actions. +type AuditLogProvider func() ipnauth.AuditLogFunc + // LocalBackend is the glue between the major pieces of the Tailscale // network software: the cloud control plane (via controlclient), the // network data plane (via wgengine), and the user-facing UIs and CLIs @@ -453,11 +481,19 @@ type LocalBackend struct { // Returned errors are logged but otherwise ignored and do not affect the shutdown process. shutdownCbs set.HandleSet[func() error] - // auditLogger, if non-nil, manages audit logging for the backend. - // - // It queues, persists, and sends audit logs - // to the control client. auditLogger has the same lifespan as b.cc. - auditLogger *auditlog.Logger + // newControlClientCbs are the functions to be called when a new control client is created. + newControlClientCbs set.HandleSet[NewControlClientCallback] + + // profileChangeCbs are the callbacks to be called when the current login profile changes, + // either because of a profile switch, or because the profile information was updated + // by [LocalBackend.SetControlClientStatus], including when the profile is first populated + // and persisted. + profileChangeCbs set.HandleSet[ProfileChangeCallback] + + // auditLoggers is a collection of registered audit log providers. + // Each [AuditLogProvider] is called to get an [ipnauth.AuditLogFunc] when an auditable action + // is about to be performed. If an audit logger returns an error, the action is denied. + auditLoggers set.HandleSet[AuditLogProvider] } // HealthTracker returns the health tracker for the backend. @@ -1681,6 +1717,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Perform all mutations of prefs based on the netmap here. if prefsChanged { + profile := b.pm.CurrentProfile() // Prefs will be written out if stale; this is not safe unless locked or cloned. if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: curNetMap.MagicDNSSuffix(), @@ -1688,13 +1725,19 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - } - - // Update the audit logger with the current profile ID. - if b.auditLogger != nil && prefsChanged { - pid := b.pm.CurrentProfile().ID() - if err := b.auditLogger.SetProfileID(pid); err != nil { - b.logf("Failed to set profile ID in audit logger: %v", err) + // Updating profile prefs may have resulted in a change to the current [ipn.LoginProfile], + // either because the user completed a login, which populated and persisted their profile + // for the first time, or because of an [ipn.NetworkProfile] or [tailcfg.UserProfile] change. + // Theoretically, a completed login could also result in a switch to a different existing + // profile representing a different node (see tailscale/tailscale#8816). + // + // Let's check if the current profile has changed, and invoke all registered [ProfileChangeCallback] + // if necessary. + if cp := b.pm.CurrentProfile(); *cp.AsStruct() != *profile.AsStruct() { + // If the profile ID was empty before SetPrefs, it's a new profile + // and the user has just completed a login for the first time. + sameNode := profile.ID() == "" || profile.ID() == cp.ID() + b.notifyProfileChangeLocked(profile, prefs.View(), sameNode) } } @@ -2403,25 +2446,12 @@ func (b *LocalBackend) Start(opts ipn.Options) error { debugFlags = append([]string{"netstack"}, debugFlags...) } - var auditLogShutdown func() - store, ok := b.sys.AuditLogStore.GetOK() - if !ok { - // Use memory store by default if no explicit store is provided. - store = auditlog.NewLogStore(&memstore.Store{}) - } - - al := auditlog.NewLogger(auditlog.Opts{ - Logf: b.logf, - RetryLimit: 32, - Store: store, - }) - b.auditLogger = al - auditLogShutdown = func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - al.FlushAndStop(ctx) + var ccShutdownCbs []func() + ccShutdown := func() { + for _, cb := range ccShutdownCbs { + cb() + } } - // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, @@ -2447,7 +2477,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { C2NHandler: http.HandlerFunc(b.handleC2N), DialPlan: &b.dialPlan, // pointer because it can't be copied ControlKnobs: b.sys.ControlKnobs(), - Shutdown: auditLogShutdown, + Shutdown: ccShutdown, // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -2456,6 +2486,11 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if err != nil { return err } + for _, cb := range b.newControlClientCbs { + if cleanup := cb(cc, b.pm.CurrentProfile(), prefs); cleanup != nil { + ccShutdownCbs = append(ccShutdownCbs, cleanup) + } + } b.setControlClientLocked(cc) endpoints := b.endpoints @@ -4302,16 +4337,42 @@ func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { return err } -var errNoAuditLogger = errors.New("no audit logger configured") +// RegisterAuditLogProvider registers an audit log provider, which returns a function +// to be called when an auditable action is about to be performed. +// The returned function unregisters the provider. +// It panics if the provider is nil. +func (b *LocalBackend) RegisterAuditLogProvider(provider AuditLogProvider) (unregister func()) { + if provider == nil { + panic("nil audit log provider") + } + b.mu.Lock() + defer b.mu.Unlock() + handle := b.auditLoggers.Add(provider) + return func() { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.auditLoggers, handle) + } +} +// getAuditLoggerLocked returns a function that calls all currently registered +// audit loggers, failing as soon as any of them returns an error. +// +// b.mu must be held. func (b *LocalBackend) getAuditLoggerLocked() ipnauth.AuditLogFunc { - logger := b.auditLogger - return func(action tailcfg.ClientAuditAction, details string) error { - if logger == nil { - return errNoAuditLogger + var loggers []ipnauth.AuditLogFunc + if len(b.auditLoggers) != 0 { + loggers = make([]ipnauth.AuditLogFunc, 0, len(b.auditLoggers)) + for _, getLogger := range b.auditLoggers { + loggers = append(loggers, getLogger()) } - if err := logger.Enqueue(action, details); err != nil { - return fmt.Errorf("failed to enqueue audit log %v %q: %w", action, details, err) + } + return func(action tailcfg.ClientAuditAction, details string) error { + b.logf("auditlog: %v: %v", action, details) + for _, logger := range loggers { + if err := logger(action, details); err != nil { + return err + } } return nil } @@ -5921,8 +5982,22 @@ func (b *LocalBackend) requestEngineStatusAndWait() { b.logf("requestEngineStatusAndWait: got status update.") } -// [controlclient.Auto] implements [auditlog.Transport]. -var _ auditlog.Transport = (*controlclient.Auto)(nil) +// RegisterControlClientCallback registers a function to be called every time a new +// control client is created, until the returned unregister function is called. +// It panics if the cb is nil. +func (b *LocalBackend) RegisterControlClientCallback(cb NewControlClientCallback) (unregister func()) { + if cb == nil { + panic("nil control client callback") + } + b.mu.Lock() + defer b.mu.Unlock() + handle := b.newControlClientCbs.Add(cb) + return func() { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.newControlClientCbs, handle) + } +} // setControlClientLocked sets the control client to cc, // which may be nil. @@ -5931,15 +6006,6 @@ var _ auditlog.Transport = (*controlclient.Auto)(nil) func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) - if t, ok := b.cc.(auditlog.Transport); ok && b.auditLogger != nil { - if err := b.auditLogger.SetProfileID(b.pm.CurrentProfile().ID()); err != nil { - b.logf("audit logger set profile ID failure: %v", err) - } - - if err := b.auditLogger.Start(t); err != nil { - b.logf("audit logger start failure: %v", err) - } - } } // resetControlClientLocked sets b.cc to nil and returns the old value. If the @@ -7522,6 +7588,37 @@ func (b *LocalBackend) resetDialPlan() { } } +// RegisterProfileChangeCallback registers a function to be called when the current [ipn.LoginProfile] changes. +// If includeCurrent is true, the callback is called immediately with the current profile. +// The returned function unregisters the callback. +// It panics if the cb is nil. +func (b *LocalBackend) RegisterProfileChangeCallback(cb ProfileChangeCallback, includeCurrent bool) (unregister func()) { + if cb == nil { + panic("nil profile change callback") + } + b.mu.Lock() + defer b.mu.Unlock() + handle := b.profileChangeCbs.Add(cb) + if includeCurrent { + cb(b.pm.CurrentProfile(), stripKeysFromPrefs(b.pm.CurrentPrefs()), false) + } + return func() { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.profileChangeCbs, handle) + } +} + +// notifyProfileChangeLocked invokes all registered profile change callbacks. +// +// b.mu must be held. +func (b *LocalBackend) notifyProfileChangeLocked(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + prefs = stripKeysFromPrefs(prefs) + for _, cb := range b.profileChangeCbs { + cb(profile, prefs, sameNode) + } +} + // resetForProfileChangeLockedOnEntry resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. @@ -7550,6 +7647,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.lastSuggestedExitNode = "" b.keyExpired = false b.resetAlwaysOnOverrideLocked() + b.notifyProfileChangeLocked(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu b.health.SetLocalLogConfigHealth(nil) diff --git a/tsd/tsd.go b/tsd/tsd.go index 9ab35af55..1d1f35017 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -25,7 +25,6 @@ import ( "tailscale.com/drive" "tailscale.com/health" "tailscale.com/ipn" - "tailscale.com/ipn/auditlog" "tailscale.com/ipn/conffile" "tailscale.com/ipn/desktop" "tailscale.com/net/dns" @@ -51,7 +50,6 @@ type System struct { Router SubSystem[router.Router] Tun SubSystem[*tstun.Wrapper] StateStore SubSystem[ipn.StateStore] - AuditLogStore SubSystem[auditlog.LogStore] Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] @@ -108,8 +106,6 @@ func (s *System) Set(v any) { s.MagicSock.Set(v) case ipn.StateStore: s.StateStore.Set(v) - case auditlog.LogStore: - s.AuditLogStore.Set(v) case NetstackImpl: s.Netstack.Set(v) case drive.FileSystemForLocal: diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index a6df2f9ff..30ce0892e 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -28,6 +28,7 @@ import ( _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" + _ "tailscale.com/ipn/auditlog" _ "tailscale.com/ipn/conffile" _ "tailscale.com/ipn/desktop" _ "tailscale.com/ipn/ipnlocal" From 4c5112eba61a6776a345fbdbcdf33f3cb7cb0883 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 27 Mar 2025 19:31:47 -0700 Subject: [PATCH 0651/1708] cmd/tailscaled: make embedded CLI run earlier, support triggering via env Not all platforms have hardlinks, or not easily. This lets a "tailscale" wrapper script set an environment variable before calling tailscaled. Updates #2233 Change-Id: I9eccc18651e56c106f336fcbbd0fd97a661d312e Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 237cdfb55..122afe97b 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -151,10 +151,33 @@ var subCommands = map[string]*func([]string) error{ "serve-taildrive": &serveDriveFunc, } -var beCLI func() // non-nil if CLI is linked in +var beCLI func() // non-nil if CLI is linked in with the "ts_include_cli" build tag + +// shouldRunCLI reports whether we should run the Tailscale CLI (cmd/tailscale) +// instead of the daemon (cmd/tailscaled) in the case when the two are linked +// together into one binary for space savings reasons. +func shouldRunCLI() bool { + if beCLI == nil { + // Not linked in with the "ts_include_cli" build tag. + return false + } + if len(os.Args) > 0 && filepath.Base(os.Args[0]) == "tailscale" { + // The binary was named (or hardlinked) as "tailscale". + return true + } + if envknob.Bool("TS_BE_CLI") { + // The environment variable was set to force it. + return true + } + return false +} func main() { envknob.PanicIfAnyEnvCheckedInInit() + if shouldRunCLI() { + beCLI() + return + } envknob.ApplyDiskConfig() applyIntegrationTestEnvKnob() @@ -175,11 +198,6 @@ func main() { flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") - if len(os.Args) > 0 && filepath.Base(os.Args[0]) == "tailscale" && beCLI != nil { - beCLI() - return - } - if len(os.Args) > 1 { sub := os.Args[1] if fp, ok := subCommands[sub]; ok { From bf8c8e9e8989d6d7e4b678e6647073dd39069ac4 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 28 Mar 2025 17:34:33 +0000 Subject: [PATCH 0652/1708] cmd/k8s-operator,k8s-operator: enable HA Ingress again. (#15453) Re-enable HA Ingress again that was disabled for 1.82 release. This reverts commit fea74a60d529bcccbc8ded74644256bb6f6c7727. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- .../crds/tailscale.com_proxygroups.yaml | 2 +- .../deploy/manifests/operator.yaml | 2 +- cmd/k8s-operator/ingress-for-pg.go | 1 + cmd/k8s-operator/operator.go | 181 ++++++++++++++++++ k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 2 +- 6 files changed, 186 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index e101c201f..86e74e441 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -103,7 +103,7 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: description: |- - Type of the ProxyGroup proxies. Currently the only supported type is egress. + Type of the ProxyGroup proxies. Supported types are egress and ingress. Type is immutable once a ProxyGroup is created. type: string enum: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index aa79fefcb..dc8d0634c 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2876,7 +2876,7 @@ spec: type: array type: description: |- - Type of the ProxyGroup proxies. Currently the only supported type is egress. + Type of the ProxyGroup proxies. Supported types are egress and ingress. Type is immutable once a ProxyGroup is created. enum: - egress diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 687f70d7b..3df5a07ee 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -49,6 +49,7 @@ const ( // FinalizerNamePG is the finalizer used by the IngressPGReconciler FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" + indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group" // annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as // well as the default HTTPS endpoint). annotationHTTPEndpoint = "tailscale.com/http-endpoint" diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 69ee51c9b..1f637927b 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -9,6 +9,7 @@ package main import ( "context" + "fmt" "net/http" "os" "regexp" @@ -39,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -331,6 +333,40 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } + lc, err := opts.tsServer.LocalClient() + if err != nil { + startlog.Fatalf("could not get local client: %v", err) + } + id, err := id(context.Background(), lc) + if err != nil { + startlog.Fatalf("error determining stable ID of the operator's Tailscale device: %v", err) + } + ingressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(ingressesFromIngressProxyGroup(mgr.GetClient(), opts.log)) + err = builder. + ControllerManagedBy(mgr). + For(&networkingv1.Ingress{}). + Named("ingress-pg-reconciler"). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). + Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). + Complete(&HAIngressReconciler{ + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-pg-reconciler"), + lc: lc, + operatorID: id, + tsNamespace: opts.tailscaleNamespace, + }) + if err != nil { + startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyGroup, indexPGIngresses); err != nil { + startlog.Fatalf("failed setting up indexer for HA Ingresses: %v", err) + } + connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector")) // If a ProxyClassChanges, enqueue all Connectors that have // .spec.proxyClass set to the name of this ProxyClass. @@ -1003,6 +1039,65 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. return reqs } +func isTLSSecret(secret *corev1.Secret) bool { + return secret.Type == corev1.SecretTypeTLS && + secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "certs" && + secret.ObjectMeta.Labels[labelDomain] != "" && + secret.ObjectMeta.Labels[labelProxyGroup] != "" +} + +func isPGStateSecret(secret *corev1.Secret) bool { + return secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && + secret.ObjectMeta.Labels[LabelParentType] == "proxygroup" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "state" +} + +// HAIngressesFromSecret returns a handler that returns reconcile requests for +// all HA Ingresses that should be reconciled in response to a Secret event. +func HAIngressesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] Secret handler triggered for an object that is not a Secret") + return nil + } + if isTLSSecret(secret) { + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: secret.ObjectMeta.Labels[LabelParentNamespace], + Name: secret.ObjectMeta.Labels[LabelParentName], + }, + }, + } + } + if !isPGStateSecret(secret) { + return nil + } + pgName, ok := secret.ObjectMeta.Labels[LabelParentName] + if !ok { + return nil + } + + ingList := &networkingv1.IngressList{} + if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pgName}); err != nil { + logger.Infof("error listing Ingresses, skipping a reconcile for event on Secret %s: %v", secret.Name, err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ing := range ingList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: ing.Namespace, + Name: ing.Name, + }, + }) + } + return reqs + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { @@ -1033,6 +1128,36 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } +// ingressesFromIngressProxyGroup is an event handler for ingress ProxyGroups. It returns reconcile requests for all +// user-created Ingresses that should be exposed on this ProxyGroup. +func ingressesFromIngressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pg, ok := o.(*tsapi.ProxyGroup) + if !ok { + logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") + return nil + } + if pg.Spec.Type != tsapi.ProxyGroupTypeIngress { + return nil + } + ingList := &networkingv1.IngressList{} + if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pg.Name}); err != nil { + logger.Infof("error listing Ingresses: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, svc := range ingList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: svc.Namespace, + Name: svc.Name, + }, + }) + } + return reqs + } +} + // epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that // should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service. func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { @@ -1153,7 +1278,63 @@ func indexEgressServices(o client.Object) []string { return []string{o.GetAnnotations()[AnnotationProxyGroup]} } +// indexPGIngresses adds a local index to a cached Tailscale Ingresses meant to be exposed on a ProxyGroup. The index is +// used a list filter. +func indexPGIngresses(o client.Object) []string { + if !hasProxyGroupAnnotation(o) { + return nil + } + return []string{o.GetAnnotations()[AnnotationProxyGroup]} +} + +// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service +// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, +// the associated Ingress gets reconciled. +func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + ingList := networkingv1.IngressList{} + if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { + logger.Debugf("error listing Ingresses: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ing := range ingList.Items { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + continue + } + if !hasProxyGroupAnnotation(&ing) { + continue + } + if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + for _, rule := range ing.Spec.Rules { + if rule.HTTP == nil { + continue + } + for _, path := range rule.HTTP.Paths { + if path.Backend.Service != nil && path.Backend.Service.Name == o.GetName() { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + } + } + } + return reqs + } +} + func hasProxyGroupAnnotation(obj client.Object) bool { ing := obj.(*networkingv1.Ingress) return ing.Annotations[AnnotationProxyGroup] != "" } + +func id(ctx context.Context, lc *local.Client) (string, error) { + st, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return "", fmt.Errorf("error getting tailscale status: %w", err) + } + if st.Self == nil { + return "", fmt.Errorf("unexpected: device's status does not contain self status") + } + return string(st.Self.ID), nil +} diff --git a/k8s-operator/api.md b/k8s-operator/api.md index f885ded1e..190f99d24 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -600,7 +600,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress.
        Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
        Type: string
        | +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
        Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
        Type: string
        | | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
        If you specify custom tags here, make sure you also make the operator
        an owner of these tags.
        See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
        Tags cannot be changed once a ProxyGroup device has been created.
        Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
        Type: string
        | | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
        Defaults to 2. | | Minimum: 0
        | | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
        by the ProxyGroup. Each device will have the integer number from its
        StatefulSet pod appended to this prefix to form the full hostname.
        HostnamePrefix can contain lower case letters, numbers and dashes, it
        must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
        Type: string
        | diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index cb9f678f8..f95fc58d0 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -48,7 +48,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Currently the only supported type is egress. + // Type of the ProxyGroup proxies. Supported types are egress and ingress. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` From 2a12e634bfe7fc4f89fa8f37b1bd0ff9866e776b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 28 Mar 2025 11:59:36 -0700 Subject: [PATCH 0653/1708] cmd/vnet: add wsproxy mode For hooking up websocket VM clients to natlab. Updates #13038 Change-Id: Iaf728b9146042f3d0c2d3a5e25f178646dd10951 Signed-off-by: Brad Fitzpatrick --- cmd/vnet/vnet-main.go | 179 +++++++++++++++++++++++++++++++++++++ tstest/natlab/vnet/conf.go | 2 + tstest/natlab/vnet/vnet.go | 3 + 3 files changed, 184 insertions(+) diff --git a/cmd/vnet/vnet-main.go b/cmd/vnet/vnet-main.go index 1eb4f65ef..9dd4d8cfa 100644 --- a/cmd/vnet/vnet-main.go +++ b/cmd/vnet/vnet-main.go @@ -7,15 +7,21 @@ package main import ( "context" + "encoding/binary" "flag" + "fmt" + "io" "log" "net" "net/http" "net/http/httputil" "net/url" "os" + "path/filepath" + "slices" "time" + "github.com/coder/websocket" "tailscale.com/tstest/natlab/vnet" "tailscale.com/types/logger" "tailscale.com/util/must" @@ -31,10 +37,18 @@ var ( pcapFile = flag.String("pcap", "", "if non-empty, filename to write pcap") v4 = flag.Bool("v4", true, "enable IPv4") v6 = flag.Bool("v6", true, "enable IPv6") + + wsproxyListen = flag.String("wsproxy", "", "if non-empty, TCP address to run websocket server on. See https://github.com/copy/v86/blob/master/docs/networking.md#backend-url-schemes") ) func main() { flag.Parse() + if *wsproxyListen != "" { + if err := runWSProxy(); err != nil { + log.Fatalf("runWSProxy: %v", err) + } + return + } if _, err := os.Stat(*listen); err == nil { os.Remove(*listen) @@ -137,3 +151,168 @@ func main() { go s.ServeUnixConn(c.(*net.UnixConn), vnet.ProtocolQEMU) } } + +func runWSProxy() error { + ln, err := net.Listen("tcp", *wsproxyListen) + if err != nil { + return err + } + defer ln.Close() + + log.Printf("Running wsproxy mode on %v ...", *wsproxyListen) + + var hs http.Server + hs.Handler = http.HandlerFunc(handleWebSocket) + + return hs.Serve(ln) +} + +func handleWebSocket(w http.ResponseWriter, r *http.Request) { + conn, err := websocket.Accept(w, r, &websocket.AcceptOptions{ + InsecureSkipVerify: true, + }) + if err != nil { + log.Printf("Upgrade error: %v", err) + return + } + defer conn.Close(websocket.StatusInternalError, "closing") + log.Printf("WebSocket client connected: %s", r.RemoteAddr) + + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() + + messageType, firstData, err := conn.Read(ctx) + if err != nil { + log.Printf("ReadMessage first: %v", err) + return + } + if messageType != websocket.MessageBinary { + log.Printf("Ignoring non-binary message") + return + } + if len(firstData) < 12 { + log.Printf("Ignoring short message") + return + } + clientMAC := vnet.MAC(firstData[6:12]) + + // Set up a qemu-protocol Unix socket pair. We'll fake the qemu protocol here + // to avoid changing the vnet package. + td, err := os.MkdirTemp("", "vnet") + if err != nil { + panic(fmt.Errorf("MkdirTemp: %v", err)) + } + defer os.RemoveAll(td) + + unixSrv := filepath.Join(td, "vnet.sock") + + srv, err := net.Listen("unix", unixSrv) + if err != nil { + panic(fmt.Errorf("Listen: %v", err)) + } + defer srv.Close() + + var c vnet.Config + c.SetBlendReality(true) + + var net1opt = []any{vnet.NAT("easy")} + net1opt = append(net1opt, "2.1.1.1", "192.168.1.1/24") + net1opt = append(net1opt, "2000:52::1/64") + + c.AddNode(c.AddNetwork(net1opt...), clientMAC) + + vs, err := vnet.New(&c) + if err != nil { + panic(fmt.Errorf("newServer: %v", err)) + } + if err := vs.PopulateDERPMapIPs(); err != nil { + log.Printf("warning: ignoring failure to populate DERP map: %v", err) + return + } + + errc := make(chan error, 1) + fail := func(err error) { + select { + case errc <- err: + log.Printf("failed: %v", err) + case <-ctx.Done(): + } + } + + go func() { + c, err := srv.Accept() + if err != nil { + fail(err) + return + } + vs.ServeUnixConn(c.(*net.UnixConn), vnet.ProtocolQEMU) + }() + + uc, err := net.Dial("unix", unixSrv) + if err != nil { + panic(fmt.Errorf("Dial: %v", err)) + } + defer uc.Close() + + var frameBuf []byte + writeDataToUnixConn := func(data []byte) error { + frameBuf = slices.Grow(frameBuf[:0], len(data)+4)[:len(data)+4] + binary.BigEndian.PutUint32(frameBuf[:4], uint32(len(data))) + copy(frameBuf[4:], data) + + _, err = uc.Write(frameBuf) + return err + } + if err := writeDataToUnixConn(firstData); err != nil { + fail(err) + return + } + + go func() { + for { + messageType, data, err := conn.Read(ctx) + if err != nil { + fail(fmt.Errorf("ReadMessage: %v", err)) + break + } + + if messageType != websocket.MessageBinary { + log.Printf("Ignoring non-binary message") + continue + } + + if err := writeDataToUnixConn(data); err != nil { + fail(err) + return + } + } + }() + + go func() { + const maxBuf = 4096 + frameBuf := make([]byte, maxBuf) + for { + _, err := io.ReadFull(uc, frameBuf[:4]) + if err != nil { + fail(err) + return + } + frameLen := binary.BigEndian.Uint32(frameBuf[:4]) + if frameLen > maxBuf { + fail(fmt.Errorf("frame too large: %d", frameLen)) + return + } + if _, err := io.ReadFull(uc, frameBuf[:frameLen]); err != nil { + fail(err) + return + } + + if err := conn.Write(ctx, websocket.MessageBinary, frameBuf[:frameLen]); err != nil { + fail(err) + return + } + } + }() + + <-ctx.Done() +} diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index a37c22a6c..07b181540 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -121,6 +121,8 @@ func (c *Config) AddNode(opts ...any) *Node { n.err = fmt.Errorf("unknown NodeOption %q", o) } } + case MAC: + n.mac = o default: if n.err == nil { n.err = fmt.Errorf("unknown AddNode option type %T", o) diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index ead2bbb8b..e3ecf0f75 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -88,6 +88,9 @@ func (s *Server) PopulateDERPMapIPs() error { if n.IPv4 != "" { s.derpIPs.Add(netip.MustParseAddr(n.IPv4)) } + if n.IPv6 != "" { + s.derpIPs.Add(netip.MustParseAddr(n.IPv6)) + } } } return nil From cdde301ca5a3431bb2965273accbdbe6032fb446 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Tue, 11 Mar 2025 12:01:09 +0100 Subject: [PATCH 0654/1708] ipn/ipnlocal: return old hwaddrs if missing If we previously knew of macaddresses of a node, and they suddenly goes to zero, ignore them and return the previous hardware addresses. Updates tailscale/corp#25168 Signed-off-by: Kristoffer Dalby --- ipn/ipnlocal/c2n.go | 2 +- ipn/ipnlocal/local.go | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index e91921533..b33794751 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -360,7 +360,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // and looks good in client metrics, remove this parameter and always report MAC // addresses. if r.FormValue("hwaddrs") == "true" { - res.IfaceHardwareAddrs, err = posture.GetHardwareAddrs() + res.IfaceHardwareAddrs, err = b.getHardwareAddrs() if err != nil { b.logf("c2n: GetHardwareAddrs returned error: %v", err) } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 63b9d576a..206f69968 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -78,6 +78,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/paths" "tailscale.com/portlist" + "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/taildrop" @@ -433,6 +434,12 @@ type LocalBackend struct { // notified about. lastNotifiedDriveShares *views.SliceView[*drive.Share, drive.ShareView] + // lastKnownHardwareAddrs is a list of the previous known hardware addrs. + // Previously known hwaddrs are kept to work around an issue on Windows + // where all addresses might disappear. + // http://go/corp/25168 + lastKnownHardwareAddrs syncs.AtomicValue[[]string] + // outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID outgoingFiles map[string]*ipn.OutgoingFile @@ -7619,6 +7626,25 @@ func (b *LocalBackend) notifyProfileChangeLocked(profile ipn.LoginProfileView, p } } +// getHardwareAddrs returns the hardware addresses for the machine. If the list +// of hardware addresses is empty, it will return the previously known hardware +// addresses. Both the current, and previously known hardware addresses might be +// empty. +func (b *LocalBackend) getHardwareAddrs() ([]string, error) { + addrs, err := posture.GetHardwareAddrs() + if err != nil { + return nil, err + } + + if len(addrs) == 0 { + b.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") + return b.lastKnownHardwareAddrs.Load(), nil + } + + b.lastKnownHardwareAddrs.Store(addrs) + return addrs, nil +} + // resetForProfileChangeLockedOnEntry resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. From a7be3a3d86fe4bcde169db1c6e4f0e398c887498 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Wed, 26 Mar 2025 10:50:20 -0500 Subject: [PATCH 0655/1708] ipn/ipnlocal: add debug logging to initPeerAPIListener initPeerAPIListener may be returning early unexpectedly. Add debug logging to see what causes it to return early when it does. Updates #14393 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/local.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 206f69968..c44827aa4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5370,9 +5370,11 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { + b.logf("[v1] initPeerAPIListener: entered") b.mu.Lock() defer b.mu.Unlock() if b.shutdownCalled { + b.logf("[v1] initPeerAPIListener: shutting down") return } @@ -5382,6 +5384,7 @@ func (b *LocalBackend) initPeerAPIListener() { // ResetForClientDisconnect, or Start happens when its // mutex was released, the netMap could be // nil'ed out (Issue 1996). Bail out early here if so. + b.logf("[v1] initPeerAPIListener: no netmap") return } @@ -5396,6 +5399,7 @@ func (b *LocalBackend) initPeerAPIListener() { } if allSame { // Nothing to do. + b.logf("[v1] initPeerAPIListener: %d netmap addresses match existing listeners", addrs.Len()) return } } @@ -5404,6 +5408,7 @@ func (b *LocalBackend) initPeerAPIListener() { selfNode := b.netMap.SelfNode if !selfNode.Valid() || b.netMap.GetAddresses().Len() == 0 { + b.logf("[v1] initPeerAPIListener: no addresses in netmap") return } @@ -5437,7 +5442,7 @@ func (b *LocalBackend) initPeerAPIListener() { ln, err = ps.listen(a.Addr(), b.prevIfState) if err != nil { if peerAPIListenAsync { - b.logf("possibly transient peerapi listen(%q) error, will try again on linkChange: %v", a.Addr(), err) + b.logf("[v1] possibly transient peerapi listen(%q) error, will try again on linkChange: %v", a.Addr(), err) // Expected. But we fix it later in linkChange // ("peerAPIListeners too low"). continue From e720b9824e20b7b485d02498bd3bf696c1114752 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 30 Mar 2025 21:13:56 -0700 Subject: [PATCH 0656/1708] net/netcheck: use NoMeasureNoHome in another spot It only affected js/wasm and tamago. Updates tailscale/corp#24697 Change-Id: I8fd29323ed9b663fe3fd8d4a86f26ff584a3e134 Signed-off-by: Brad Fitzpatrick --- net/netcheck/netcheck.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index a33ca2209..74c866d92 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1045,7 +1045,7 @@ func (c *Client) finishAndStoreReport(rs *reportState, dm *tailcfg.DERPMap) *Rep } // runHTTPOnlyChecks is the netcheck done by environments that can -// only do HTTP requests, such as ws/wasm. +// only do HTTP requests, such as js/wasm. func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *reportState, dm *tailcfg.DERPMap) error { var regions []*tailcfg.DERPRegion if rs.incremental && last != nil { @@ -1057,6 +1057,9 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report } if len(regions) == 0 { for _, dr := range dm.Regions { + if dr.NoMeasureNoHome { + continue + } regions = append(regions, dr) } } From 96fe8a6db6c916c1474dbfab44b14d5fd5173988 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 30 Mar 2025 21:11:41 -0700 Subject: [PATCH 0657/1708] net/netmon: always remember ifState as old state, even on minor changes Otherwise you can get stuck finding minor ones nonstop. Fixes #15484 Change-Id: I7f98ac338c0b32ec1b9fdc47d053207b5fc1bf23 Signed-off-by: Brad Fitzpatrick --- net/netmon/netmon.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index bd62ab270..f2dd37f1d 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -441,7 +441,6 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { delta.Major = m.IsMajorChangeFrom(oldState, newState) if delta.Major { m.gwValid = false - m.ifState = newState if s1, s2 := oldState.String(), delta.New.String(); s1 == s2 { m.logf("[unexpected] network state changed, but stringification didn't: %v", s1) @@ -449,6 +448,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { m.logf("[unexpected] new: %s", jsonSummary(newState)) } } + m.ifState = newState // See if we have a queued or new time jump signal. if timeJumped { m.resetTimeJumpedLocked() From e8b5f0b3c46d1620d1866f9b800098b1759742ab Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 31 Mar 2025 17:37:21 -0700 Subject: [PATCH 0658/1708] client/systray: use ico image format for windows Add the golang-image-ico package, which is an incredibly small package to handle the ICO container format with PNG inside. Some profile photos look quite pixelated when displayed at this size, but it's better than nothing, and any Windows support is just a bonus anyway. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/logo.go | 10 +++++++++- client/systray/systray.go | 17 +++++++++++++++++ go.mod | 5 +++-- go.sum | 10 ++++++---- 4 files changed, 35 insertions(+), 7 deletions(-) diff --git a/client/systray/logo.go b/client/systray/logo.go index 857a8a937..3467d1b74 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -11,10 +11,12 @@ import ( "image" "image/color" "image/png" + "runtime" "sync" "time" "fyne.io/systray" + ico "github.com/Kodeworks/golang-image-ico" "github.com/fogleman/gg" ) @@ -251,7 +253,13 @@ func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { } b := bytes.NewBuffer(nil) - png.Encode(b, dc.Image()) + + // Encode as ICO format on Windows, PNG on all other platforms. + if runtime.GOOS == "windows" { + _ = ico.Encode(b, dc.Image()) + } else { + _ = png.Encode(b, dc.Image()) + } return b } diff --git a/client/systray/systray.go b/client/systray/systray.go index b5bde551c..781a65bb8 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -7,9 +7,11 @@ package systray import ( + "bytes" "context" "errors" "fmt" + "image" "io" "log" "net/http" @@ -23,6 +25,7 @@ import ( "time" "fyne.io/systray" + ico "github.com/Kodeworks/golang-image-ico" "github.com/atotto/clipboard" dbus "github.com/godbus/dbus/v5" "github.com/toqueteos/webbrowser" @@ -330,6 +333,20 @@ func setRemoteIcon(menu *systray.MenuItem, urlStr string) { resp, err := http.Get(urlStr) if err == nil && resp.StatusCode == http.StatusOK { b, _ = io.ReadAll(resp.Body) + + // Convert image to ICO format on Windows + if runtime.GOOS == "windows" { + im, _, err := image.Decode(bytes.NewReader(b)) + if err != nil { + return + } + buf := bytes.NewBuffer(nil) + if err := ico.Encode(buf, im); err != nil { + return + } + b = buf.Bytes() + } + httpCache[urlStr] = b resp.Body.Close() } diff --git a/go.mod b/go.mod index a566c941f..7be824165 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,8 @@ go 1.24.0 require ( filippo.io/mkcert v1.4.4 - fyne.io/systray v1.11.0 + fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a + github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa github.com/andybalholm/brotli v1.1.0 @@ -100,7 +101,7 @@ require ( golang.org/x/net v0.36.0 golang.org/x/oauth2 v0.26.0 golang.org/x/sync v0.11.0 - golang.org/x/sys v0.30.0 + golang.org/x/sys v0.31.0 golang.org/x/term v0.29.0 golang.org/x/time v0.10.0 golang.org/x/tools v0.30.0 diff --git a/go.sum b/go.sum index 528e48c16..fffa17209 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= -fyne.io/systray v1.11.0 h1:D9HISlxSkx+jHSniMBR6fCFOUjk1x/OOOJLa9lJYAKg= -fyne.io/systray v1.11.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= +fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a h1:I8mEKo5sawHu8CqYf3FSjIl9b3puXasFVn2D/hrCneY= +fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= @@ -67,6 +67,8 @@ github.com/Djarvur/go-err113 v0.1.0 h1:uCRZZOdMQ0TZPHYTdYpoC0bLYJKPEHPUJ8MeAa51l github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= +github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 h1:1ltqoej5GtaWF8jaiA49HwsZD459jqm9YFz9ZtMFpQA= +github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9/go.mod h1:7uhhqiBaR4CpN0k9rMjOtjpcfGd6DG2m04zQxKnWQ0I= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -1218,8 +1220,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 886ab4fad4b407a60c882ec984815ade65396210 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 31 Mar 2025 19:41:57 -0700 Subject: [PATCH 0659/1708] net/udprelay: start of UDP relay server implementation (#15480) This commit implements an experimental UDP relay server. The UDP relay server leverages the Disco protocol for a 3-way handshake between client and server, along with 3 new Disco message types for said handshake. These new Disco message types are also considered experimental, and are not yet tied to a capver. The server expects, and imposes, a Geneve (Generic Network Virtualization Encapsulation) header immediately following the underlay UDP header. Geneve protocol field values have been defined for Disco and WireGuard. The Geneve control bit must be set for the handshake between client and server, and unset for messages relayed between clients through the server. Updates tailscale/corp#27101 Signed-off-by: Jordan Whited --- disco/disco.go | 128 ++++++++- disco/disco_test.go | 23 ++ net/udprelay/server.go | 532 ++++++++++++++++++++++++++++++++++++ net/udprelay/server_test.go | 204 ++++++++++++++ 4 files changed, 884 insertions(+), 3 deletions(-) create mode 100644 net/udprelay/server.go create mode 100644 net/udprelay/server_test.go diff --git a/disco/disco.go b/disco/disco.go index b9a90029d..c5aa4ace2 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -41,9 +41,12 @@ const NonceLen = 24 type MessageType byte const ( - TypePing = MessageType(0x01) - TypePong = MessageType(0x02) - TypeCallMeMaybe = MessageType(0x03) + TypePing = MessageType(0x01) + TypePong = MessageType(0x02) + TypeCallMeMaybe = MessageType(0x03) + TypeBindUDPRelayEndpoint = MessageType(0x04) + TypeBindUDPRelayEndpointChallenge = MessageType(0x05) + TypeBindUDPRelayEndpointAnswer = MessageType(0x06) ) const v0 = byte(0) @@ -77,12 +80,19 @@ func Parse(p []byte) (Message, error) { } t, ver, p := MessageType(p[0]), p[1], p[2:] switch t { + // TODO(jwhited): consider using a signature matching encoding.BinaryUnmarshaler case TypePing: return parsePing(ver, p) case TypePong: return parsePong(ver, p) case TypeCallMeMaybe: return parseCallMeMaybe(ver, p) + case TypeBindUDPRelayEndpoint: + return parseBindUDPRelayEndpoint(ver, p) + case TypeBindUDPRelayEndpointChallenge: + return parseBindUDPRelayEndpointChallenge(ver, p) + case TypeBindUDPRelayEndpointAnswer: + return parseBindUDPRelayEndpointAnswer(ver, p) default: return nil, fmt.Errorf("unknown message type 0x%02x", byte(t)) } @@ -91,6 +101,7 @@ func Parse(p []byte) (Message, error) { // Message a discovery message. type Message interface { // AppendMarshal appends the message's marshaled representation. + // TODO(jwhited): consider using a signature matching encoding.BinaryAppender AppendMarshal([]byte) []byte } @@ -266,7 +277,118 @@ func MessageSummary(m Message) string { return fmt.Sprintf("pong tx=%x", m.TxID[:6]) case *CallMeMaybe: return "call-me-maybe" + case *BindUDPRelayEndpoint: + return "bind-udp-relay-endpoint" + case *BindUDPRelayEndpointChallenge: + return "bind-udp-relay-endpoint-challenge" + case *BindUDPRelayEndpointAnswer: + return "bind-udp-relay-endpoint-answer" default: return fmt.Sprintf("%#v", m) } } + +// BindUDPRelayHandshakeState represents the state of the 3-way bind handshake +// between UDP relay client and UDP relay server. Its potential values include +// those for both participants, UDP relay client and UDP relay server. A UDP +// relay server implementation can be found in net/udprelay. This is currently +// considered experimental. +type BindUDPRelayHandshakeState int + +const ( + // BindUDPRelayHandshakeStateInit represents the initial state prior to any + // message being transmitted. + BindUDPRelayHandshakeStateInit BindUDPRelayHandshakeState = iota + // BindUDPRelayHandshakeStateBindSent is the first client state after + // transmitting a BindUDPRelayEndpoint message to a UDP relay server. + BindUDPRelayHandshakeStateBindSent + // BindUDPRelayHandshakeStateChallengeSent is the first server state after + // receiving a BindUDPRelayEndpoint message from a UDP relay client and + // replying with a BindUDPRelayEndpointChallenge. + BindUDPRelayHandshakeStateChallengeSent + // BindUDPRelayHandshakeStateAnswerSent is a client state that is entered + // after transmitting a BindUDPRelayEndpointAnswer message towards a UDP + // relay server in response to a BindUDPRelayEndpointChallenge message. + BindUDPRelayHandshakeStateAnswerSent + // BindUDPRelayHandshakeStateAnswerReceived is a server state that is + // entered after it has received a correct BindUDPRelayEndpointAnswer + // message from a UDP relay client in response to a + // BindUDPRelayEndpointChallenge message. + BindUDPRelayHandshakeStateAnswerReceived +) + +// bindUDPRelayEndpointLen is the length of a marshalled BindUDPRelayEndpoint +// message, without the message header. +const bindUDPRelayEndpointLen = BindUDPRelayEndpointChallengeLen + +// BindUDPRelayEndpoint is the first messaged transmitted from UDP relay client +// towards UDP relay server as part of the 3-way bind handshake. It is padded to +// match the length of BindUDPRelayEndpointChallenge. This message type is +// currently considered experimental and is not yet tied to a +// tailcfg.CapabilityVersion. +type BindUDPRelayEndpoint struct { +} + +func (m *BindUDPRelayEndpoint) AppendMarshal(b []byte) []byte { + ret, _ := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointLen) + return ret +} + +func parseBindUDPRelayEndpoint(ver uint8, p []byte) (m *BindUDPRelayEndpoint, err error) { + m = new(BindUDPRelayEndpoint) + return m, nil +} + +// BindUDPRelayEndpointChallengeLen is the length of a marshalled +// BindUDPRelayEndpointChallenge message, without the message header. +const BindUDPRelayEndpointChallengeLen = 32 + +// BindUDPRelayEndpointChallenge is transmitted from UDP relay server towards +// UDP relay client in response to a BindUDPRelayEndpoint message as part of the +// 3-way bind handshake. This message type is currently considered experimental +// and is not yet tied to a tailcfg.CapabilityVersion. +type BindUDPRelayEndpointChallenge struct { + Challenge [BindUDPRelayEndpointChallengeLen]byte +} + +func (m *BindUDPRelayEndpointChallenge) AppendMarshal(b []byte) []byte { + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, BindUDPRelayEndpointChallengeLen) + copy(d, m.Challenge[:]) + return ret +} + +func parseBindUDPRelayEndpointChallenge(ver uint8, p []byte) (m *BindUDPRelayEndpointChallenge, err error) { + if len(p) < BindUDPRelayEndpointChallengeLen { + return nil, errShort + } + m = new(BindUDPRelayEndpointChallenge) + copy(m.Challenge[:], p[:]) + return m, nil +} + +// bindUDPRelayEndpointAnswerLen is the length of a marshalled +// BindUDPRelayEndpointAnswer message, without the message header. +const bindUDPRelayEndpointAnswerLen = BindUDPRelayEndpointChallengeLen + +// BindUDPRelayEndpointAnswer is transmitted from UDP relay client to UDP relay +// server in response to a BindUDPRelayEndpointChallenge message. This message +// type is currently considered experimental and is not yet tied to a +// tailcfg.CapabilityVersion. +type BindUDPRelayEndpointAnswer struct { + Answer [bindUDPRelayEndpointAnswerLen]byte +} + +func (m *BindUDPRelayEndpointAnswer) AppendMarshal(b []byte) []byte { + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointAnswerLen) + copy(d, m.Answer[:]) + return ret +} + +func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpointAnswer, err error) { + if len(p) < bindUDPRelayEndpointAnswerLen { + return nil, errShort + } + m = new(BindUDPRelayEndpointAnswer) + copy(m.Answer[:], p[:]) + return m, nil +} diff --git a/disco/disco_test.go b/disco/disco_test.go index 1a56324a5..751190445 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -83,6 +83,29 @@ func TestMarshalAndParse(t *testing.T) { }, want: "03 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", }, + { + name: "bind_udp_relay_endpoint", + m: &BindUDPRelayEndpoint{}, + want: "04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + }, + { + name: "bind_udp_relay_endpoint_challenge", + m: &BindUDPRelayEndpointChallenge{ + Challenge: [BindUDPRelayEndpointChallengeLen]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + }, + want: "05 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + }, + { + name: "bind_udp_relay_endpoint_answer", + m: &BindUDPRelayEndpointAnswer{ + Answer: [bindUDPRelayEndpointAnswerLen]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + }, + want: "06 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/net/udprelay/server.go b/net/udprelay/server.go new file mode 100644 index 000000000..30fc08326 --- /dev/null +++ b/net/udprelay/server.go @@ -0,0 +1,532 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package udprelay contains constructs for relaying Disco and WireGuard packets +// between Tailscale clients over UDP. This package is currently considered +// experimental. +package udprelay + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "net" + "net/netip" + "slices" + "strconv" + "sync" + "time" + + "go4.org/mem" + "tailscale.com/disco" + "tailscale.com/net/packet" + "tailscale.com/types/key" +) + +const ( + // defaultBindLifetime is somewhat arbitrary. We attempt to account for + // high latency between client and Server, and high latency between + // clients over side channels, e.g. DERP, used to exchange ServerEndpoint + // details. So, a total of 3 paths with potentially high latency. Using a + // conservative 10s "high latency" bounds for each path we end up at a 30s + // total. It is worse to set an aggressive bind lifetime as this may lead + // to path discovery failure, vs dealing with a slight increase of Server + // resource utilization (VNIs, RAM, etc) while tracking endpoints that won't + // bind. + defaultBindLifetime = time.Second * 30 + defaultSteadyStateLifetime = time.Minute * 5 +) + +// Server implements an experimental UDP relay server. +type Server struct { + // disco keypair used as part of 3-way bind handshake + disco key.DiscoPrivate + discoPublic key.DiscoPublic + + bindLifetime time.Duration + steadyStateLifetime time.Duration + + // addrPorts contains the ip:port pairs returned as candidate server + // endpoints in response to an allocation request. + addrPorts []netip.AddrPort + + uc *net.UDPConn + + closeOnce sync.Once + wg sync.WaitGroup + closeCh chan struct{} + closed bool + + mu sync.Mutex // guards the following fields + lamportID uint64 + vniPool []uint32 // the pool of available VNIs + byVNI map[uint32]*serverEndpoint + byDisco map[pairOfDiscoPubKeys]*serverEndpoint +} + +// pairOfDiscoPubKeys is a pair of key.DiscoPublic. It must be constructed via +// newPairOfDiscoPubKeys to ensure lexicographical ordering. +type pairOfDiscoPubKeys [2]key.DiscoPublic + +func (p pairOfDiscoPubKeys) String() string { + return fmt.Sprintf("%s <=> %s", p[0].ShortString(), p[1].ShortString()) +} + +func newPairOfDiscoPubKeys(discoA, discoB key.DiscoPublic) pairOfDiscoPubKeys { + pair := pairOfDiscoPubKeys([2]key.DiscoPublic{discoA, discoB}) + slices.SortFunc(pair[:], func(a, b key.DiscoPublic) int { + return a.Compare(b) + }) + return pair +} + +// ServerEndpoint contains the Server's endpoint details. +type ServerEndpoint struct { + // ServerDisco is the Server's Disco public key used as part of the 3-way + // bind handshake. Server will use the same ServerDisco for its lifetime. + // ServerDisco value in combination with LamportID value represents a + // unique ServerEndpoint allocation. + ServerDisco key.DiscoPublic + + // LamportID is unique and monotonically non-decreasing across + // ServerEndpoint allocations for the lifetime of Server. It enables clients + // to dedup and resolve allocation event order. Clients may race to allocate + // on the same Server, and signal ServerEndpoint details via alternative + // channels, e.g. DERP. Additionally, Server.AllocateEndpoint() requests may + // not result in a new allocation depending on existing server-side endpoint + // state. Therefore, where clients have local, existing state that contains + // ServerDisco and LamportID values matching a newly learned endpoint, these + // can be considered one and the same. If ServerDisco is equal, but + // LamportID is unequal, LamportID comparison determines which + // ServerEndpoint was allocated most recently. + LamportID uint64 + + // AddrPorts are the IP:Port candidate pairs the Server may be reachable + // over. + AddrPorts []netip.AddrPort + + // VNI (Virtual Network Identifier) is the Geneve header VNI the Server + // will use for transmitted packets, and expects for received packets + // associated with this endpoint. + VNI uint32 + + // BindLifetime is amount of time post-allocation the Server will consider + // the endpoint active while it has yet to be bound via 3-way bind handshake + // from both client parties. + BindLifetime time.Duration + + // SteadyStateLifetime is the amount of time post 3-way bind handshake from + // both client parties the Server will consider the endpoint active lacking + // bidirectional data flow. + SteadyStateLifetime time.Duration +} + +// serverEndpoint contains Server-internal ServerEndpoint state. serverEndpoint +// methods are not thread-safe. +type serverEndpoint struct { + // discoPubKeys contains the key.DiscoPublic of the served clients. The + // indexing of this array aligns with the following fields, e.g. + // discoSharedSecrets[0] is the shared secret to use when sealing + // Disco protocol messages for transmission towards discoPubKeys[0]. + discoPubKeys pairOfDiscoPubKeys + discoSharedSecrets [2]key.DiscoShared + handshakeState [2]disco.BindUDPRelayHandshakeState + addrPorts [2]netip.AddrPort + lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time + challenge [2][disco.BindUDPRelayEndpointChallengeLen]byte + + lamportID uint64 + vni uint32 + allocatedAt time.Time +} + +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, uw udpWriter, serverDisco key.DiscoPublic) { + if senderIndex != 0 && senderIndex != 1 { + return + } + handshakeState := e.handshakeState[senderIndex] + if handshakeState == disco.BindUDPRelayHandshakeStateAnswerReceived { + // this sender is already bound + return + } + switch discoMsg := discoMsg.(type) { + case *disco.BindUDPRelayEndpoint: + switch handshakeState { + case disco.BindUDPRelayHandshakeStateInit: + // set sender addr + e.addrPorts[senderIndex] = from + fallthrough + case disco.BindUDPRelayHandshakeStateChallengeSent: + if from != e.addrPorts[senderIndex] { + // this is a later arriving bind from a different source, or + // a retransmit and the sender's source has changed, discard + return + } + m := new(disco.BindUDPRelayEndpointChallenge) + copy(m.Challenge[:], e.challenge[senderIndex][:]) + reply := make([]byte, packet.GeneveFixedHeaderLength, 512) + gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} + err := gh.Encode(reply) + if err != nil { + return + } + reply = append(reply, disco.Magic...) + reply = serverDisco.AppendTo(reply) + box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) + reply = append(reply, box...) + uw.WriteMsgUDPAddrPort(reply, nil, from) + // set new state + e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateChallengeSent + return + default: + // disco.BindUDPRelayEndpoint is unexpected in all other handshake states + return + } + case *disco.BindUDPRelayEndpointAnswer: + switch handshakeState { + case disco.BindUDPRelayHandshakeStateChallengeSent: + if from != e.addrPorts[senderIndex] { + // sender source has changed + return + } + if !bytes.Equal(discoMsg.Answer[:], e.challenge[senderIndex][:]) { + // bad answer + return + } + // sender is now bound + // TODO: Consider installing a fast path via netfilter or similar to + // relay (NAT) data packets for this serverEndpoint. + e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateAnswerReceived + // record last seen as bound time + e.lastSeen[senderIndex] = time.Now() + return + default: + // disco.BindUDPRelayEndpointAnswer is unexpected in all other handshake + // states, or we've already handled it + return + } + default: + // unexpected Disco message type + return + } +} + +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { + senderRaw, isDiscoMsg := disco.Source(b) + if !isDiscoMsg { + // Not a Disco message + return + } + sender := key.DiscoPublicFromRaw32(mem.B(senderRaw)) + senderIndex := -1 + switch { + case sender.Compare(e.discoPubKeys[0]) == 0: + senderIndex = 0 + case sender.Compare(e.discoPubKeys[1]) == 0: + senderIndex = 1 + default: + // unknown Disco public key + return + } + + const headerLen = len(disco.Magic) + key.DiscoPublicRawLen + discoPayload, ok := e.discoSharedSecrets[senderIndex].Open(b[headerLen:]) + if !ok { + // unable to decrypt the Disco payload + return + } + + discoMsg, err := disco.Parse(discoPayload) + if err != nil { + // unable to parse the Disco payload + return + } + + e.handleDiscoControlMsg(from, senderIndex, discoMsg, uw, serverDisco) +} + +type udpWriter interface { + WriteMsgUDPAddrPort(b []byte, oob []byte, addr netip.AddrPort) (n, oobn int, err error) +} + +func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { + if !gh.Control { + if !e.isBound() { + // not a control packet, but serverEndpoint isn't bound + return + } + var to netip.AddrPort + switch { + case from == e.addrPorts[0]: + e.lastSeen[0] = time.Now() + to = e.addrPorts[1] + case from == e.addrPorts[1]: + e.lastSeen[1] = time.Now() + to = e.addrPorts[0] + default: + // unrecognized source + return + } + // relay packet + uw.WriteMsgUDPAddrPort(b, nil, to) + return + } + + if e.isBound() { + // control packet, but serverEndpoint is already bound + return + } + + if gh.Protocol != packet.GeneveProtocolDisco { + // control packet, but not Disco + return + } + + msg := b[packet.GeneveFixedHeaderLength:] + e.handleSealedDiscoControlMsg(from, msg, uw, serverDisco) +} + +func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { + if !e.isBound() { + if now.Sub(e.allocatedAt) > bindLifetime { + return true + } + return false + } + if now.Sub(e.lastSeen[0]) > steadyStateLifetime || now.Sub(e.lastSeen[1]) > steadyStateLifetime { + return true + } + return false +} + +// isBound returns true if both clients have completed their 3-way handshake, +// otherwise false. +func (e *serverEndpoint) isBound() bool { + return e.handshakeState[0] == disco.BindUDPRelayHandshakeStateAnswerReceived && + e.handshakeState[1] == disco.BindUDPRelayHandshakeStateAnswerReceived +} + +// NewServer constructs a Server listening on 0.0.0.0:'port'. IPv6 is not yet +// supported. Port may be 0, and what ultimately gets bound is returned as +// 'boundPort'. Supplied 'addrs' are joined with 'boundPort' and returned as +// ServerEndpoint.AddrPorts in response to Server.AllocateEndpoint() requests. +// +// TODO: IPv6 support +// TODO: dynamic addrs:port discovery +func NewServer(port int, addrs []netip.Addr) (s *Server, boundPort int, err error) { + s = &Server{ + disco: key.NewDisco(), + bindLifetime: defaultBindLifetime, + steadyStateLifetime: defaultSteadyStateLifetime, + closeCh: make(chan struct{}), + byDisco: make(map[pairOfDiscoPubKeys]*serverEndpoint), + byVNI: make(map[uint32]*serverEndpoint), + } + s.discoPublic = s.disco.Public() + // TODO: instead of allocating 10s of MBs for the full pool, allocate + // smaller chunks and increase as needed + s.vniPool = make([]uint32, 0, 1<<24-1) + for i := 1; i < 1<<24; i++ { + s.vniPool = append(s.vniPool, uint32(i)) + } + boundPort, err = s.listenOn(port) + if err != nil { + return nil, 0, err + } + addrPorts := make([]netip.AddrPort, 0, len(addrs)) + for _, addr := range addrs { + addrPort, err := netip.ParseAddrPort(net.JoinHostPort(addr.String(), strconv.Itoa(boundPort))) + if err != nil { + return nil, 0, err + } + addrPorts = append(addrPorts, addrPort) + } + s.addrPorts = addrPorts + s.wg.Add(2) + go s.packetReadLoop() + go s.endpointGCLoop() + return s, boundPort, nil +} + +func (s *Server) listenOn(port int) (int, error) { + uc, err := net.ListenUDP("udp4", &net.UDPAddr{Port: port}) + if err != nil { + return 0, err + } + // TODO: set IP_PKTINFO sockopt + _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) + if err != nil { + s.uc.Close() + return 0, err + } + boundPort, err := strconv.Atoi(boundPortStr) + if err != nil { + s.uc.Close() + return 0, err + } + s.uc = uc + return boundPort, nil +} + +// Close closes the server. +func (s *Server) Close() error { + s.closeOnce.Do(func() { + s.mu.Lock() + defer s.mu.Unlock() + s.uc.Close() + close(s.closeCh) + s.wg.Wait() + clear(s.byVNI) + clear(s.byDisco) + s.vniPool = nil + s.closed = true + }) + return nil +} + +func (s *Server) endpointGCLoop() { + defer s.wg.Done() + ticker := time.NewTicker(s.bindLifetime) + defer ticker.Stop() + + gc := func() { + now := time.Now() + // TODO: consider performance implications of scanning all endpoints and + // holding s.mu for the duration. Keep it simple (and slow) for now. + s.mu.Lock() + defer s.mu.Unlock() + for k, v := range s.byDisco { + if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { + delete(s.byDisco, k) + delete(s.byVNI, v.vni) + s.vniPool = append(s.vniPool, v.vni) + } + } + } + + for { + select { + case <-ticker.C: + gc() + case <-s.closeCh: + return + } + } +} + +func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { + gh := packet.GeneveHeader{} + err := gh.Decode(b) + if err != nil { + return + } + // TODO: consider performance implications of holding s.mu for the remainder + // of this method, which does a bunch of disco/crypto work depending. Keep + // it simple (and slow) for now. + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.byVNI[gh.VNI] + if !ok { + // unknown VNI + return + } + + e.handlePacket(from, gh, b, uw, s.discoPublic) +} + +func (s *Server) packetReadLoop() { + defer func() { + s.wg.Done() + s.Close() + }() + b := make([]byte, 1<<16-1) + for { + // TODO: extract laddr from IP_PKTINFO for use in reply + n, from, err := s.uc.ReadFromUDPAddrPort(b) + if err != nil { + return + } + s.handlePacket(from, b[:n], s.uc) + } +} + +var ErrServerClosed = errors.New("server closed") + +// AllocateEndpoint allocates a ServerEndpoint for the provided pair of +// key.DiscoPublic's. It returns an error (ErrServerClosed) if the server has +// been closed. +func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoint, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.closed { + return ServerEndpoint{}, ErrServerClosed + } + + if discoA.Compare(s.discoPublic) == 0 || discoB.Compare(s.discoPublic) == 0 { + return ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) + } + + pair := newPairOfDiscoPubKeys(discoA, discoB) + e, ok := s.byDisco[pair] + if ok { + if !e.isBound() { + // If the endpoint is not yet bound this is likely an allocation + // race between two clients on the same Server. Instead of + // re-allocating we return the existing allocation. We do not reset + // e.allocatedAt in case a client is "stuck" in an allocation + // loop and will not be able to complete a handshake, for whatever + // reason. Once the endpoint expires a new endpoint will be + // allocated. Clients can resolve duplicate ServerEndpoint details + // via ServerEndpoint.LamportID. + // + // TODO: consider ServerEndpoint.BindLifetime -= time.Now()-e.allocatedAt + // to give the client a more accurate picture of the bind window. + // Or, some threshold to trigger re-allocation if too much time has + // already passed since it was originally allocated. + return ServerEndpoint{ + ServerDisco: s.discoPublic, + AddrPorts: s.addrPorts, + VNI: e.vni, + LamportID: e.lamportID, + BindLifetime: s.bindLifetime, + SteadyStateLifetime: s.steadyStateLifetime, + }, nil + } + // If an endpoint exists for the pair of key.DiscoPublic's, and is + // already bound, delete it. We will re-allocate a new endpoint. Chances + // are clients cannot make use of the existing, bound allocation if + // they are requesting a new one. + delete(s.byDisco, pair) + delete(s.byVNI, e.vni) + s.vniPool = append(s.vniPool, e.vni) + } + + if len(s.vniPool) == 0 { + return ServerEndpoint{}, errors.New("VNI pool exhausted") + } + + s.lamportID++ + e = &serverEndpoint{ + discoPubKeys: pair, + lamportID: s.lamportID, + allocatedAt: time.Now(), + } + e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys[0]) + e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys[1]) + e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] + rand.Read(e.challenge[0][:]) + rand.Read(e.challenge[1][:]) + + s.byDisco[pair] = e + s.byVNI[e.vni] = e + + return ServerEndpoint{ + ServerDisco: s.discoPublic, + AddrPorts: s.addrPorts, + VNI: e.vni, + LamportID: e.lamportID, + BindLifetime: s.bindLifetime, + SteadyStateLifetime: s.steadyStateLifetime, + }, nil +} diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go new file mode 100644 index 000000000..733e50b77 --- /dev/null +++ b/net/udprelay/server_test.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package udprelay + +import ( + "bytes" + "net" + "net/netip" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go4.org/mem" + "tailscale.com/disco" + "tailscale.com/net/packet" + "tailscale.com/types/key" +) + +type testClient struct { + vni uint32 + local key.DiscoPrivate + server key.DiscoPublic + uc *net.UDPConn +} + +func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, server key.DiscoPublic) *testClient { + rAddr := &net.UDPAddr{IP: serverEndpoint.Addr().AsSlice(), Port: int(serverEndpoint.Port())} + uc, err := net.DialUDP("udp4", nil, rAddr) + if err != nil { + t.Fatal(err) + } + return &testClient{ + vni: vni, + local: local, + server: server, + uc: uc, + } +} + +func (c *testClient) write(t *testing.T, b []byte) { + _, err := c.uc.Write(b) + if err != nil { + t.Fatal(err) + } +} + +func (c *testClient) read(t *testing.T) []byte { + c.uc.SetReadDeadline(time.Now().Add(time.Second)) + b := make([]byte, 1<<16-1) + n, err := c.uc.Read(b) + if err != nil { + t.Fatal(err) + } + return b[:n] +} + +func (c *testClient) writeDataPkt(t *testing.T, b []byte) { + pkt := make([]byte, packet.GeneveFixedHeaderLength, packet.GeneveFixedHeaderLength+len(b)) + gh := packet.GeneveHeader{Control: false, VNI: c.vni, Protocol: packet.GeneveProtocolWireGuard} + err := gh.Encode(pkt) + if err != nil { + t.Fatal(err) + } + pkt = append(pkt, b...) + c.write(t, pkt) +} + +func (c *testClient) readDataPkt(t *testing.T) []byte { + b := c.read(t) + gh := packet.GeneveHeader{} + err := gh.Decode(b) + if err != nil { + t.Fatal(err) + } + if gh.Protocol != packet.GeneveProtocolWireGuard { + t.Fatal("unexpected geneve protocol") + } + if gh.Control { + t.Fatal("unexpected control") + } + if gh.VNI != c.vni { + t.Fatal("unexpected vni") + } + return b[packet.GeneveFixedHeaderLength:] +} + +func (c *testClient) writeControlDiscoMsg(t *testing.T, msg disco.Message) { + pkt := make([]byte, packet.GeneveFixedHeaderLength, 512) + gh := packet.GeneveHeader{Control: true, VNI: c.vni, Protocol: packet.GeneveProtocolDisco} + err := gh.Encode(pkt) + if err != nil { + t.Fatal(err) + } + pkt = append(pkt, disco.Magic...) + pkt = c.local.Public().AppendTo(pkt) + box := c.local.Shared(c.server).Seal(msg.AppendMarshal(nil)) + pkt = append(pkt, box...) + c.write(t, pkt) +} + +func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { + b := c.read(t) + gh := packet.GeneveHeader{} + err := gh.Decode(b) + if err != nil { + t.Fatal(err) + } + if gh.Protocol != packet.GeneveProtocolDisco { + t.Fatal("unexpected geneve protocol") + } + if !gh.Control { + t.Fatal("unexpected non-control") + } + if gh.VNI != c.vni { + t.Fatal("unexpected vni") + } + b = b[packet.GeneveFixedHeaderLength:] + headerLen := len(disco.Magic) + key.DiscoPublicRawLen + if len(b) < headerLen { + t.Fatal("disco message too short") + } + sender := key.DiscoPublicFromRaw32(mem.B(b[len(disco.Magic):headerLen])) + if sender.Compare(c.server) != 0 { + t.Fatal("unknown disco public key") + } + payload, ok := c.local.Shared(c.server).Open(b[headerLen:]) + if !ok { + t.Fatal("failed to open sealed disco msg") + } + msg, err := disco.Parse(payload) + if err != nil { + t.Fatal("failed to parse disco payload") + } + return msg +} + +func (c *testClient) handshake(t *testing.T) { + c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{}) + msg := c.readControlDiscoMsg(t) + challenge, ok := msg.(*disco.BindUDPRelayEndpointChallenge) + if !ok { + t.Fatal("unexepcted disco message type") + } + c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpointAnswer{Answer: challenge.Challenge}) +} + +func (c *testClient) close() { + c.uc.Close() +} + +func TestServer(t *testing.T) { + discoA := key.NewDisco() + discoB := key.NewDisco() + + ipv4LoopbackAddr := netip.MustParseAddr("127.0.0.1") + + server, _, err := NewServer(0, []netip.Addr{ipv4LoopbackAddr}) + if err != nil { + t.Fatal(err) + } + defer server.Close() + + endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + + // We expect the same endpoint details as the 3-way bind handshake has not + // yet been completed for both relay client parties. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } + + if len(endpoint.AddrPorts) != 1 { + t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) + } + tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, endpoint.ServerDisco) + defer tcA.close() + tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, endpoint.ServerDisco) + defer tcB.close() + + tcA.handshake(t) + tcB.handshake(t) + + txToB := []byte{1, 2, 3} + tcA.writeDataPkt(t, txToB) + rxFromA := tcB.readDataPkt(t) + if !bytes.Equal(txToB, rxFromA) { + t.Fatal("unexpected msg A->B") + } + + txToA := []byte{4, 5, 6} + tcB.writeDataPkt(t, txToA) + rxFromB := tcA.readDataPkt(t) + if !bytes.Equal(txToA, rxFromB) { + t.Fatal("unexpected msg B->A") + } +} From fb47824d749c214ac74115a92f89256c0ee51bf9 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 1 Apr 2025 13:20:46 -0400 Subject: [PATCH 0660/1708] wgengine: return explicit lo0 for loopback addrs on sandboxed macOS (#15493) fixes tailscale/corp#27506 The source address link selection on sandboxed macOS doesn't deal with loopback addresses correctly. This adds an explicit check to ensure we return the loopback interface for loopback addresses instead of the default empty interface. Specifically, this allows the dns resolver to route queries to a loopback IP which is a common tactic for local DNS proxies. Tested on both macos, macsys and tailscaled. Forwarded requests to 127/8 all bound to lo0. Signed-off-by: Jonathan Nobels --- wgengine/userspace.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index b51b2c8ea..1200003f6 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1580,6 +1580,12 @@ type fwdDNSLinkSelector struct { } func (ls fwdDNSLinkSelector) PickLink(ip netip.Addr) (linkName string) { + // sandboxed macOS does not automatically bind to the loopback interface so + // we must be explicit about it. + if runtime.GOOS == "darwin" && ip.IsLoopback() { + return "lo0" + } + if ls.ue.isDNSIPOverTailscale.Load()(ip) { return ls.tunName } From 95034e15a79888b1a4afe48e6812fd47ea138fd5 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Tue, 25 Mar 2025 12:59:07 -0700 Subject: [PATCH 0661/1708] cmd/natc: fix ip allocation runtime Avoid the unbounded runtime during random allocation, if random allocation fails after a first pass at random through the provided ranges, pick the next free address by walking through the allocated set. The new ipx utilities provide a bitset based allocation pool, good for small to moderate ranges of IPv4 addresses as used in natc. Updates #15367 Signed-off-by: James Tucker --- cmd/natc/ipx.go | 130 ++++++++++++++++++++++++++++++++++++ cmd/natc/ipx_test.go | 150 ++++++++++++++++++++++++++++++++++++++++++ cmd/natc/natc.go | 109 +++++++++++------------------- cmd/natc/natc_test.go | 33 +++------- 4 files changed, 325 insertions(+), 97 deletions(-) create mode 100644 cmd/natc/ipx.go create mode 100644 cmd/natc/ipx_test.go diff --git a/cmd/natc/ipx.go b/cmd/natc/ipx.go new file mode 100644 index 000000000..06bf7be79 --- /dev/null +++ b/cmd/natc/ipx.go @@ -0,0 +1,130 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "math/big" + "math/bits" + "math/rand/v2" + "net/netip" + + "go4.org/netipx" +) + +func addrLessOrEqual(a, b netip.Addr) bool { + if a.Less(b) { + return true + } + if a == b { + return true + } + return false +} + +// indexOfAddr returns the index of addr in ipset, or -1 if not found. +func indexOfAddr(addr netip.Addr, ipset *netipx.IPSet) int { + var base int // offset of the current range + for _, r := range ipset.Ranges() { + if addr.Less(r.From()) { + return -1 + } + numFrom := v4ToNum(r.From()) + if addrLessOrEqual(addr, r.To()) { + numInRange := int(v4ToNum(addr) - numFrom) + return base + numInRange + } + numTo := v4ToNum(r.To()) + base += int(numTo-numFrom) + 1 + } + return -1 +} + +// addrAtIndex returns the address at the given index in ipset, or an empty +// address if index is out of range. +func addrAtIndex(index int, ipset *netipx.IPSet) netip.Addr { + if index < 0 { + return netip.Addr{} + } + var base int // offset of the current range + for _, r := range ipset.Ranges() { + numFrom := v4ToNum(r.From()) + numTo := v4ToNum(r.To()) + if index <= base+int(numTo-numFrom) { + return numToV4(uint32(int(numFrom) + index - base)) + } + base += int(numTo-numFrom) + 1 + } + return netip.Addr{} +} + +// TODO(golang/go#9455): once we have uint128 we can easily implement for all addrs. + +// v4ToNum returns a uint32 representation of the IPv4 address. If addr is not +// an IPv4 address, this function will panic. +func v4ToNum(addr netip.Addr) uint32 { + addr = addr.Unmap() + if !addr.Is4() { + panic("only IPv4 addresses are supported by v4ToNum") + } + b := addr.As4() + var o uint32 + o = o<<8 | uint32(b[0]) + o = o<<8 | uint32(b[1]) + o = o<<8 | uint32(b[2]) + o = o<<8 | uint32(b[3]) + return o +} + +func numToV4(i uint32) netip.Addr { + var addr [4]byte + addr[0] = byte((i >> 24) & 0xff) + addr[1] = byte((i >> 16) & 0xff) + addr[2] = byte((i >> 8) & 0xff) + addr[3] = byte(i & 0xff) + return netip.AddrFrom4(addr) +} + +// allocAddr returns an address in ipset that is not already marked allocated in allocated. +func allocAddr(ipset *netipx.IPSet, allocated *big.Int) netip.Addr { + // first try to allocate a random IP from each range, if we land on one. + var base uint32 // index offset of the current range + for _, r := range ipset.Ranges() { + numFrom := v4ToNum(r.From()) + numTo := v4ToNum(r.To()) + randInRange := rand.N(numTo - numFrom) + randIndex := base + randInRange + if allocated.Bit(int(randIndex)) == 0 { + allocated.SetBit(allocated, int(randIndex), 1) + return numToV4(numFrom + randInRange) + } + base += numTo - numFrom + 1 + } + + // fall back to seeking a free bit in the allocated set + index := -1 + for i, word := range allocated.Bits() { + zbi := leastZeroBit(uint(word)) + if zbi == -1 { + continue + } + index = i*bits.UintSize + zbi + allocated.SetBit(allocated, index, 1) + break + } + if index == -1 { + return netip.Addr{} + } + return addrAtIndex(index, ipset) +} + +// leastZeroBit returns the index of the least significant zero bit in the given uint, or -1 +// if all bits are set. +func leastZeroBit(n uint) int { + notN := ^n + rightmostBit := notN & -notN + if rightmostBit == 0 { + return -1 + } + return bits.TrailingZeros(rightmostBit) +} diff --git a/cmd/natc/ipx_test.go b/cmd/natc/ipx_test.go new file mode 100644 index 000000000..b60a5d981 --- /dev/null +++ b/cmd/natc/ipx_test.go @@ -0,0 +1,150 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "math" + "math/big" + "net/netip" + "testing" + + "go4.org/netipx" + "tailscale.com/util/must" +) + +func TestV4ToNum(t *testing.T) { + cases := []struct { + addr netip.Addr + num uint32 + }{ + {netip.MustParseAddr("0.0.0.0"), 0}, + {netip.MustParseAddr("255.255.255.255"), 0xffffffff}, + {netip.MustParseAddr("8.8.8.8"), 0x08080808}, + {netip.MustParseAddr("192.168.0.1"), 0xc0a80001}, + {netip.MustParseAddr("10.0.0.1"), 0x0a000001}, + {netip.MustParseAddr("172.16.0.1"), 0xac100001}, + {netip.MustParseAddr("100.64.0.1"), 0x64400001}, + } + + for _, tc := range cases { + num := v4ToNum(tc.addr) + if num != tc.num { + t.Errorf("addrNum(%v) = %d, want %d", tc.addr, num, tc.num) + } + if numToV4(num) != tc.addr { + t.Errorf("numToV4(%d) = %v, want %v", num, numToV4(num), tc.addr) + } + } + + func() { + defer func() { + if r := recover(); r == nil { + t.Fatal("expected panic") + } + }() + + v4ToNum(netip.MustParseAddr("::1")) + }() +} + +func TestAddrIndex(t *testing.T) { + builder := netipx.IPSetBuilder{} + builder.AddRange(netipx.MustParseIPRange("10.0.0.1-10.0.0.5")) + builder.AddRange(netipx.MustParseIPRange("192.168.0.1-192.168.0.10")) + ipset := must.Get(builder.IPSet()) + + indexCases := []struct { + addr netip.Addr + index int + }{ + {netip.MustParseAddr("10.0.0.1"), 0}, + {netip.MustParseAddr("10.0.0.2"), 1}, + {netip.MustParseAddr("10.0.0.3"), 2}, + {netip.MustParseAddr("10.0.0.4"), 3}, + {netip.MustParseAddr("10.0.0.5"), 4}, + {netip.MustParseAddr("192.168.0.1"), 5}, + {netip.MustParseAddr("192.168.0.5"), 9}, + {netip.MustParseAddr("192.168.0.10"), 14}, + {netip.MustParseAddr("172.16.0.1"), -1}, // Not in set + } + + for _, tc := range indexCases { + index := indexOfAddr(tc.addr, ipset) + if index != tc.index { + t.Errorf("indexOfAddr(%v) = %d, want %d", tc.addr, index, tc.index) + } + if tc.index == -1 { + continue + } + addr := addrAtIndex(tc.index, ipset) + if addr != tc.addr { + t.Errorf("addrAtIndex(%d) = %v, want %v", tc.index, addr, tc.addr) + } + } +} + +func TestAllocAddr(t *testing.T) { + builder := netipx.IPSetBuilder{} + builder.AddRange(netipx.MustParseIPRange("10.0.0.1-10.0.0.5")) + builder.AddRange(netipx.MustParseIPRange("192.168.0.1-192.168.0.10")) + ipset := must.Get(builder.IPSet()) + + allocated := new(big.Int) + for range 15 { + addr := allocAddr(ipset, allocated) + if !addr.IsValid() { + t.Errorf("allocAddr() = invalid, want valid") + } + if !ipset.Contains(addr) { + t.Errorf("allocAddr() = %v, not in set", addr) + } + } + addr := allocAddr(ipset, allocated) + if addr.IsValid() { + t.Errorf("allocAddr() = %v, want invalid", addr) + } + wantAddr := netip.MustParseAddr("10.0.0.2") + allocated.SetBit(allocated, indexOfAddr(wantAddr, ipset), 0) + addr = allocAddr(ipset, allocated) + if addr != wantAddr { + t.Errorf("allocAddr() = %v, want %v", addr, wantAddr) + } +} + +func TestLeastZeroBit(t *testing.T) { + cases := []struct { + num uint + want int + }{ + {math.MaxUint, -1}, + {0, 0}, + {0b01, 1}, + {0b11, 2}, + {0b111, 3}, + {math.MaxUint, -1}, + {math.MaxUint - 1, 0}, + } + if math.MaxUint == math.MaxUint64 { + cases = append(cases, []struct { + num uint + want int + }{ + {math.MaxUint >> 1, 63}, + }...) + } else { + cases = append(cases, []struct { + num uint + want int + }{ + {math.MaxUint >> 1, 31}, + }...) + } + + for _, tc := range cases { + got := leastZeroBit(tc.num) + if got != tc.want { + t.Errorf("leastZeroBit(%b) = %d, want %d", tc.num, got, tc.want) + } + } +} diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 31d6a5d26..a8168ce6d 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -8,13 +8,12 @@ package main import ( "context" - "encoding/binary" "errors" "expvar" "flag" "fmt" "log" - "math/rand/v2" + "math/big" "net" "net/http" "net/netip" @@ -26,6 +25,7 @@ import ( "github.com/gaissmai/bart" "github.com/inetaf/tcpproxy" "github.com/peterbourgon/ff/v3" + "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/local" "tailscale.com/envknob" @@ -38,6 +38,7 @@ import ( "tailscale.com/tsweb" "tailscale.com/util/dnsname" "tailscale.com/util/mak" + "tailscale.com/util/must" "tailscale.com/wgengine/netstack" ) @@ -94,24 +95,6 @@ func main() { } ignoreDstTable.Insert(pfx, true) } - var ( - v4Prefixes []netip.Prefix - numV4DNSAddrs int - ) - for _, s := range strings.Split(*v4PfxStr, ",") { - p := netip.MustParsePrefix(strings.TrimSpace(s)) - if p.Masked() != p { - log.Fatalf("v4 prefix %v is not a masked prefix", p) - } - v4Prefixes = append(v4Prefixes, p) - numIPs := 1 << (32 - p.Bits()) - numV4DNSAddrs += numIPs - } - if len(v4Prefixes) == 0 { - log.Fatalf("no v4 prefixes specified") - } - dnsAddr := v4Prefixes[0].Addr() - numV4DNSAddrs -= 1 // Subtract the dnsAddr allocated above. ts := &tsnet.Server{ Hostname: *hostname, } @@ -159,17 +142,34 @@ func main() { } c := &connector{ - ts: ts, - lc: lc, - dnsAddr: dnsAddr, - v4Ranges: v4Prefixes, - numV4DNSAddrs: numV4DNSAddrs, - v6ULA: ula(uint16(*siteID)), - ignoreDsts: ignoreDstTable, + ts: ts, + lc: lc, + v6ULA: ula(uint16(*siteID)), + ignoreDsts: ignoreDstTable, + } + var prefixes []netip.Prefix + for _, s := range strings.Split(*v4PfxStr, ",") { + p := netip.MustParsePrefix(strings.TrimSpace(s)) + if p.Masked() != p { + log.Fatalf("v4 prefix %v is not a masked prefix", p) + } + prefixes = append(prefixes, p) } + c.setPrefixes(prefixes) c.run(ctx) } +func (c *connector) setPrefixes(prefixes []netip.Prefix) { + var ipsb netipx.IPSetBuilder + for _, p := range prefixes { + ipsb.AddPrefix(p) + } + c.routes = must.Get(ipsb.IPSet()) + c.dnsAddr = c.routes.Ranges()[0].From() + ipsb.Remove(c.dnsAddr) + c.ipset = must.Get(ipsb.IPSet()) +} + type connector struct { // ts is the tsnet.Server used to host the connector. ts *tsnet.Server @@ -181,13 +181,13 @@ type connector struct { // prevent the app connector from assigning it to a domain. dnsAddr netip.Addr - // v4Ranges is the list of IPv4 ranges to advertise and assign addresses from. + // ipset is the set of IPv4 ranges to advertise and assign addresses from. // These are masked prefixes. - v4Ranges []netip.Prefix + ipset *netipx.IPSet - // numV4DNSAddrs is the total size of the IPv4 ranges in addresses, minus the - // dnsAddr allocation. - numV4DNSAddrs int + // routes is the set of IPv4 ranges advertised to the tailnet, or ipset with + // the dnsAddr removed. + routes *netipx.IPSet // v6ULA is the ULA prefix used by the app connector to assign IPv6 addresses. v6ULA netip.Prefix @@ -225,7 +225,7 @@ func (c *connector) run(ctx context.Context) { if _, err := c.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ AdvertiseRoutesSet: true, Prefs: ipn.Prefs{ - AdvertiseRoutes: append(c.v4Ranges, c.v6ULA), + AdvertiseRoutes: append(c.routes.Prefixes(), c.v6ULA), }, }); err != nil { log.Fatalf("failed to advertise routes: %v", err) @@ -512,9 +512,9 @@ type perPeerState struct { c *connector mu sync.Mutex + addrInUse *big.Int domainToAddr map[string][]netip.Addr addrToDomain *bart.Table[string] - numV4Allocs int } // domainForIP returns the domain name assigned to the given IP address and @@ -550,46 +550,12 @@ func (ps *perPeerState) ipForDomain(domain string) ([]netip.Addr, error) { return addrs, nil } -// isIPUsedLocked reports whether the given IP address is already assigned to a -// domain. -// ps.mu must be held. -func (ps *perPeerState) isIPUsedLocked(ip netip.Addr) bool { - _, ok := ps.addrToDomain.Lookup(ip) - return ok -} - // unusedIPv4Locked returns an unused IPv4 address from the available ranges. func (ps *perPeerState) unusedIPv4Locked() netip.Addr { - // All addresses have been allocated. - if ps.numV4Allocs >= ps.c.numV4DNSAddrs { - return netip.Addr{} + if ps.addrInUse == nil { + ps.addrInUse = big.NewInt(0) } - - // TODO: skip ranges that have been exhausted - // TODO: implement a much more efficient algorithm for finding unused IPs, - // this is fairly crazy. - for { - for _, r := range ps.c.v4Ranges { - ip := randV4(r) - if !r.Contains(ip) { - panic("error: randV4 returned invalid address") - } - if !ps.isIPUsedLocked(ip) && ip != ps.c.dnsAddr { - return ip - } - } - } -} - -// randV4 returns a random IPv4 address within the given prefix. -func randV4(maskedPfx netip.Prefix) netip.Addr { - bits := 32 - maskedPfx.Bits() - randBits := rand.Uint32N(1 << uint(bits)) - - ip4 := maskedPfx.Addr().As4() - pn := binary.BigEndian.Uint32(ip4[:]) - binary.BigEndian.PutUint32(ip4[:], randBits|pn) - return netip.AddrFrom4(ip4) + return allocAddr(ps.c.ipset, ps.addrInUse) } // assignAddrsLocked assigns a pair of unique IP addresses for the given domain @@ -604,7 +570,6 @@ func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { if !v4.IsValid() { return nil } - ps.numV4Allocs++ as16 := ps.c.v6ULA.Addr().As16() as4 := v4.As4() copy(as16[12:], as4[:]) diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index e42fa7e89..ddd2d1894 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -43,17 +43,6 @@ func TestULA(t *testing.T) { } } -func TestRandV4(t *testing.T) { - pfx := netip.MustParsePrefix("100.64.1.0/24") - - for i := 0; i < 512; i++ { - ip := randV4(pfx) - if !pfx.Contains(ip) { - t.Errorf("randV4(%s) = %s; not contained in prefix", pfx, ip) - } - } -} - func TestDNSResponse(t *testing.T) { tests := []struct { name string @@ -227,11 +216,9 @@ func TestDNSResponse(t *testing.T) { func TestPerPeerState(t *testing.T) { c := &connector{ - v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - dnsAddr: netip.MustParseAddr("100.64.1.0"), - numV4DNSAddrs: (1<<(32-24) - 1), + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), } + c.setPrefixes([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) ps := &perPeerState{c: c} @@ -255,8 +242,8 @@ func TestPerPeerState(t *testing.T) { t.Errorf("Second address is not IPv6: %s", v6) } - if !c.v4Ranges[0].Contains(v4) { - t.Errorf("IPv4 address %s not in range %s", v4, c.v4Ranges[0]) + if !c.ipset.Contains(v4) { + t.Errorf("IPv4 address %s not in range %s", v4, c.ipset) } domain, ok := ps.domainForIP(v4) @@ -331,11 +318,9 @@ func TestIgnoreDestination(t *testing.T) { func TestConnectorGenerateDNSResponse(t *testing.T) { c := &connector{ - v4Ranges: []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}, - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - dnsAddr: netip.MustParseAddr("100.64.1.0"), - numV4DNSAddrs: (1<<(32-24) - 1), + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), } + c.setPrefixes([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) req := &dnsmessage.Message{ Header: dnsmessage.Header{ID: 1234}, @@ -371,11 +356,9 @@ func TestConnectorGenerateDNSResponse(t *testing.T) { func TestIPPoolExhaustion(t *testing.T) { smallPrefix := netip.MustParsePrefix("100.64.1.0/30") // Only 4 IPs: .0, .1, .2, .3 c := &connector{ - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - v4Ranges: []netip.Prefix{smallPrefix}, - dnsAddr: netip.MustParseAddr("100.64.1.0"), - numV4DNSAddrs: 3, + v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), } + c.setPrefixes([]netip.Prefix{smallPrefix}) ps := &perPeerState{c: c} From faaa364568635e0d112cffce4431d8dfcd5db159 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 11:45:04 -0600 Subject: [PATCH 0662/1708] .github: Bump github/codeql-action from 3.28.11 to 3.28.13 (#15477) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.11 to 3.28.13. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/6bb031afdd8eb862ea3fc1848194185e076637e5...1b549b9259bda1cb5ddde3b41741a82a2d15a841) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f20719360..4c9ee8088 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 + uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 + uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 + uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 From 13f6981694d1f9a305f9f28747038185fed3a97f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 12:10:49 -0700 Subject: [PATCH 0663/1708] go.toolchain.rev: bump for Go 1.24.2 + plan9 fixes Updates #5794 Change-Id: I696d49a3b0825ca90d3cb148b1c0dad9f7855808 Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 69aec16e4..c6dbf4fa1 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -4fdaeeb8fe43bcdb4e8cc736433b9cd9c0ddd221 +16b6e4fd15c59336156cdbc977de1745ad094f2d From e2f7750125b09df30022a45ec928d7d0778a69dc Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Tue, 1 Apr 2025 19:05:45 -0400 Subject: [PATCH 0664/1708] tailcfg: add VIPServiceView Not currently used in the OSS tree, a View for tailcfg.VIPService will make implementing some server side changes easier. Updates tailscale/corp#26272 Change-Id: If1ed0bea4eff8c4425d3845b433a1c562d99eb9e Signed-off-by: Adrian Dewhurst --- tailcfg/tailcfg.go | 2 +- tailcfg/tailcfg_clone.go | 30 ++++++++++++++++++++- tailcfg/tailcfg_view.go | 58 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 87 insertions(+), 3 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 83fab9c97..30672bc6f 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -5,7 +5,7 @@ // the node and the coordination server. package tailcfg -//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile --clonefunc +//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService --clonefunc import ( "bytes" diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index da1f4f374..3952f5f47 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -626,9 +626,28 @@ var _UserProfileCloneNeedsRegeneration = UserProfile(struct { ProfilePicURL string }{}) +// Clone makes a deep copy of VIPService. +// The result aliases no memory with the original. +func (src *VIPService) Clone() *VIPService { + if src == nil { + return nil + } + dst := new(VIPService) + *dst = *src + dst.Ports = append(src.Ports[:0:0], src.Ports...) + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _VIPServiceCloneNeedsRegeneration = VIPService(struct { + Name ServiceName + Ports []ProtoPortRange + Active bool +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile. +// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService. func Clone(dst, src any) bool { switch src := src.(type) { case *User: @@ -802,6 +821,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *VIPService: + switch dst := dst.(type) { + case *VIPService: + *dst = *src.Clone() + return true + case **VIPService: + *dst = src.Clone() + return true + } } return false } diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index b1aacab23..f8f9f865c 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -19,7 +19,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService // View returns a read-only view of User. func (p *User) View() UserView { @@ -1414,3 +1414,59 @@ var _UserProfileViewNeedsRegeneration = UserProfile(struct { DisplayName string ProfilePicURL string }{}) + +// View returns a read-only view of VIPService. +func (p *VIPService) View() VIPServiceView { + return VIPServiceView{ж: p} +} + +// VIPServiceView provides a read-only view over VIPService. +// +// Its methods should only be called if `Valid()` returns true. +type VIPServiceView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *VIPService +} + +// Valid reports whether v's underlying value is non-nil. +func (v VIPServiceView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v VIPServiceView) AsStruct() *VIPService { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v VIPServiceView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *VIPServiceView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x VIPService + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v VIPServiceView) Name() ServiceName { return v.ж.Name } +func (v VIPServiceView) Ports() views.Slice[ProtoPortRange] { return views.SliceOf(v.ж.Ports) } +func (v VIPServiceView) Active() bool { return v.ж.Active } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _VIPServiceViewNeedsRegeneration = VIPService(struct { + Name ServiceName + Ports []ProtoPortRange + Active bool +}{}) From 60847128df6f30d12312f55a7a06d03c0363996e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0665/1708] net/tstun: add Plan 9 'tun' support Updates #5794 Change-Id: I8c466cae25ae79be1097450a63e8c25c7b519331 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 4 +++- go.mod | 2 +- go.sum | 4 ++-- net/tstun/tstun_stub.go | 2 +- net/tstun/tun.go | 38 +++++++++++++++++++++++++++++++++++- wgengine/userspace.go | 11 +++++++++++ 6 files changed, 55 insertions(+), 6 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 122afe97b..8a2a4b820 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -82,7 +82,9 @@ func defaultTunName() string { // "utun" is recognized by wireguard-go/tun/tun_darwin.go // as a magic value that uses/creates any free number. return "utun" - case "plan9", "aix", "solaris", "illumos": + case "plan9": + return "auto" + case "aix", "solaris", "illumos": return "userspace-networking" case "linux": switch distro.Get() { diff --git a/go.mod b/go.mod index 7be824165..eb99c5373 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 + github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index fffa17209..bf5700e49 100644 --- a/go.sum +++ b/go.sum @@ -924,8 +924,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 h1:h/41LFTrwMxB9Xvvug0kRdQCU5TlV1+pAMQw0ZtDE3U= +github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= diff --git a/net/tstun/tstun_stub.go b/net/tstun/tstun_stub.go index 3119d647c..d21eda6b0 100644 --- a/net/tstun/tstun_stub.go +++ b/net/tstun/tstun_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build plan9 || aix || solaris || illumos +//go:build aix || solaris || illumos package tstun diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 44ccdfc99..88679daa2 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !wasm && !plan9 && !tamago && !aix && !solaris && !illumos +//go:build !wasm && !tamago && !aix && !solaris && !illumos // Package tun creates a tuntap device, working around OS-specific // quirks if necessary. @@ -9,6 +9,9 @@ package tstun import ( "errors" + "fmt" + "log" + "os" "runtime" "strings" "time" @@ -45,6 +48,9 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { } dev, err = CreateTAP.Get()(logf, tapName, bridgeName) } else { + if runtime.GOOS == "plan9" { + cleanUpPlan9Interfaces() + } dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) } if err != nil { @@ -65,6 +71,36 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { return dev, name, nil } +func cleanUpPlan9Interfaces() { + maybeUnbind := func(n int) { + b, err := os.ReadFile(fmt.Sprintf("/net/ipifc/%d/status", n)) + if err != nil { + return + } + status := string(b) + if !(strings.HasPrefix(status, "device maxtu ") || + strings.Contains(status, "fd7a:115c:a1e0:")) { + return + } + f, err := os.OpenFile(fmt.Sprintf("/net/ipifc/%d/ctl", n), os.O_RDWR, 0) + if err != nil { + return + } + defer f.Close() + if _, err := fmt.Fprintf(f, "unbind\n"); err != nil { + log.Printf("unbind interface %v: %v", n, err) + return + } + log.Printf("tun: unbound stale interface %v", n) + } + + // A common case: after unclean shutdown we might leave interfaces + // behind. Look for our straggler(s) and clean them up. + for n := 2; n < 5; n++ { + maybeUnbind(n) + } +} + // tunDiagnoseFailure, if non-nil, does OS-specific diagnostics of why // TUN failed to work. var tunDiagnoseFailure func(tunName string, logf logger.Logf, err error) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 1200003f6..cca253048 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -569,6 +569,17 @@ func (e *userspaceEngine) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) return filter.Drop } } + if runtime.GOOS == "plan9" { + isLocalAddr, ok := e.isLocalAddr.LoadOk() + if ok { + if isLocalAddr(p.Dst.Addr()) { + // On Plan9's "tun" equivalent, everything goes back in and out + // the tun, even when the kernel's replying to itself. + t.InjectInboundCopy(p.Buffer()) + return filter.Drop + } + } + } return filter.Accept } From e3282c163231ad9d0bfdf6d43fecfb8ebe154c2c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0666/1708] wgengine/magicsock: avoid some log spam on Plan 9 Updates #5794 Change-Id: I12e8417ebd553f9951690c388fbe42228f8c9097 Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/magicsock.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e8e966582..313f9e315 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3018,6 +3018,10 @@ func (c *Conn) DebugForcePreferDERP(n int) { // portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize, // logging an error if it occurs. func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { + if runtime.GOOS == "plan9" { + // Not supported. Don't try. Avoid logspam. + return + } if c, ok := pconn.(*net.UDPConn); ok { // Attempt to increase the buffer size, and allow failures. if err := c.SetReadBuffer(socketBufferSize); err != nil { From bbdd3c3bdecdb9576040d1e2018f73df4ee339fc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0667/1708] wgengine/router: add Plan 9 implementation Updates #5794 Change-Id: Ib78a3ea971a2374d405b024ab88658ec34be59a6 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 5 + wgengine/router/router_default.go | 2 +- wgengine/router/router_plan9.go | 156 ++++++++++++++++++++++++++++++ 3 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 wgengine/router/router_plan9.go diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c44827aa4..7d69b884d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5069,6 +5069,11 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs } } + if versionOS == "plan9" { + // Just temporarily during plan9 bringup to have fewer routes to debug. + return true + } + // Also prefer to do this on the Mac, so that we don't need to constantly // update the network extension configuration (which is disruptive to // Chrome, see https://github.com/tailscale/tailscale/issues/3102). Only diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go index 1e675d1fc..8dcbd36d0 100644 --- a/wgengine/router/router_default.go +++ b/wgengine/router/router_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows && !linux && !darwin && !openbsd && !freebsd +//go:build !windows && !linux && !darwin && !openbsd && !freebsd && !plan9 package router diff --git a/wgengine/router/router_plan9.go b/wgengine/router/router_plan9.go new file mode 100644 index 000000000..7ed7686d9 --- /dev/null +++ b/wgengine/router/router_plan9.go @@ -0,0 +1,156 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package router + +import ( + "bufio" + "bytes" + "fmt" + "net/netip" + "os" + "strings" + + "github.com/tailscale/wireguard-go/tun" + "tailscale.com/health" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" +) + +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { + r := &plan9Router{ + logf: logf, + tundev: tundev, + netMon: netMon, + } + cleanAllTailscaleRoutes(logf) + return r, nil +} + +type plan9Router struct { + logf logger.Logf + tundev tun.Device + netMon *netmon.Monitor + health *health.Tracker +} + +func (r *plan9Router) Up() error { + return nil +} + +func (r *plan9Router) Set(cfg *Config) error { + if cfg == nil { + cleanAllTailscaleRoutes(r.logf) + return nil + } + + var self4, self6 netip.Addr + for _, addr := range cfg.LocalAddrs { + ctl := r.tundev.File() + maskBits := addr.Bits() + if addr.Addr().Is4() { + // The mask sizes in Plan9 are in IPv6 bits, even for IPv4. + maskBits += (128 - 32) + self4 = addr.Addr() + } + if addr.Addr().Is6() { + self6 = addr.Addr() + } + _, err := fmt.Fprintf(ctl, "add %s /%d\n", addr.Addr().String(), maskBits) + r.logf("route/plan9: add %s /%d = %v", addr.Addr().String(), maskBits, err) + } + + ipr, err := os.OpenFile("/net/iproute", os.O_RDWR, 0) + if err != nil { + return fmt.Errorf("open /net/iproute: %w", err) + } + defer ipr.Close() + + // TODO(bradfitz): read existing routes, delete ones tagged "tail" + // that aren't in cfg.LocalRoutes. + + if _, err := fmt.Fprintf(ipr, "tag tail\n"); err != nil { + return fmt.Errorf("tag tail: %w", err) + } + + for _, route := range cfg.Routes { + maskBits := route.Bits() + if route.Addr().Is4() { + // The mask sizes in Plan9 are in IPv6 bits, even for IPv4. + maskBits += (128 - 32) + } + var nextHop netip.Addr + if route.Addr().Is4() { + nextHop = self4 + } else if route.Addr().Is6() { + nextHop = self6 + } + if !nextHop.IsValid() { + r.logf("route/plan9: skipping route %s: no next hop (no self addr)", route.String()) + continue + } + r.logf("route/plan9: plan9.router: add %s /%d %s", route.Addr(), maskBits, nextHop) + if _, err := fmt.Fprintf(ipr, "add %s /%d %s\n", route.Addr(), maskBits, nextHop); err != nil { + return fmt.Errorf("add %s: %w", route.String(), err) + } + } + + if len(cfg.LocalRoutes) > 0 { + r.logf("route/plan9: TODO: Set LocalRoutes %v", cfg.LocalRoutes) + } + if len(cfg.SubnetRoutes) > 0 { + r.logf("route/plan9: TODO: Set SubnetRoutes %v", cfg.SubnetRoutes) + } + + return nil +} + +// UpdateMagicsockPort implements the Router interface. This implementation +// does nothing and returns nil because this router does not currently need +// to know what the magicsock UDP port is. +func (r *plan9Router) UpdateMagicsockPort(_ uint16, _ string) error { + return nil +} + +func (r *plan9Router) Close() error { + // TODO(bradfitz): unbind + return nil +} + +func cleanUp(logf logger.Logf, _ string) { + cleanAllTailscaleRoutes(logf) +} + +func cleanAllTailscaleRoutes(logf logger.Logf) { + routes, err := os.OpenFile("/net/iproute", os.O_RDWR, 0) + if err != nil { + logf("cleaning routes: %v", err) + return + } + defer routes.Close() + + // Using io.ReadAll or os.ReadFile on /net/iproute fails; it results in a + // 511 byte result when the actual /net/iproute contents are over 1k. + // So do it in one big read instead. Who knows. + routeBuf := make([]byte, 1<<20) + n, err := routes.Read(routeBuf) + if err != nil { + logf("cleaning routes: %v", err) + return + } + routeBuf = routeBuf[:n] + + bs := bufio.NewScanner(bytes.NewReader(routeBuf)) + for bs.Scan() { + f := strings.Fields(bs.Text()) + if len(f) < 6 { + continue + } + tag := f[4] + if tag != "tail" { + continue + } + _, err := fmt.Fprintf(routes, "remove %s %s\n", f[0], f[1]) + logf("router: cleaning route %s %s: %v", f[0], f[1], err) + } +} From af504fa678cbf2a34590287a770c70d92248a40c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0668/1708] safesocket: fix Plan 9 implementation This wasn't right; it was spinning up new goroutines non-stop. Revert to a boring localhost TCP implementation for now. Updates #5794 Change-Id: If93caa20a12ee4e741c0c72b0d91cc0cc5870152 Signed-off-by: Brad Fitzpatrick --- safesocket/safesocket_plan9.go | 110 +-------------------------------- 1 file changed, 2 insertions(+), 108 deletions(-) diff --git a/safesocket/safesocket_plan9.go b/safesocket/safesocket_plan9.go index 196c1df9c..c8a5e3b05 100644 --- a/safesocket/safesocket_plan9.go +++ b/safesocket/safesocket_plan9.go @@ -7,119 +7,13 @@ package safesocket import ( "context" - "fmt" "net" - "os" - "syscall" - "time" - - "golang.org/x/sys/plan9" ) -// Plan 9's devsrv srv(3) is a server registry and -// it is conventionally bound to "/srv" in the default -// namespace. It is "a one level directory for holding -// already open channels to services". Post one end of -// a pipe to "/srv/tailscale.sock" and use the other -// end for communication with a requestor. Plan 9 pipes -// are bidirectional. - -type plan9SrvAddr string - -func (sl plan9SrvAddr) Network() string { - return "/srv" -} - -func (sl plan9SrvAddr) String() string { - return string(sl) -} - -// There is no net.FileListener for Plan 9 at this time -type plan9SrvListener struct { - name string - srvf *os.File - file *os.File -} - -func (sl *plan9SrvListener) Accept() (net.Conn, error) { - // sl.file is the server end of the pipe that's - // connected to /srv/tailscale.sock - return plan9FileConn{name: sl.name, file: sl.file}, nil -} - -func (sl *plan9SrvListener) Close() error { - sl.file.Close() - return sl.srvf.Close() -} - -func (sl *plan9SrvListener) Addr() net.Addr { - return plan9SrvAddr(sl.name) -} - -type plan9FileConn struct { - name string - file *os.File -} - -func (fc plan9FileConn) Read(b []byte) (n int, err error) { - return fc.file.Read(b) -} -func (fc plan9FileConn) Write(b []byte) (n int, err error) { - return fc.file.Write(b) -} -func (fc plan9FileConn) Close() error { - return fc.file.Close() -} -func (fc plan9FileConn) LocalAddr() net.Addr { - return plan9SrvAddr(fc.name) -} -func (fc plan9FileConn) RemoteAddr() net.Addr { - return plan9SrvAddr(fc.name) -} -func (fc plan9FileConn) SetDeadline(t time.Time) error { - return syscall.EPLAN9 -} -func (fc plan9FileConn) SetReadDeadline(t time.Time) error { - return syscall.EPLAN9 -} -func (fc plan9FileConn) SetWriteDeadline(t time.Time) error { - return syscall.EPLAN9 -} - func connect(_ context.Context, path string) (net.Conn, error) { - f, err := os.OpenFile(path, os.O_RDWR, 0666) - if err != nil { - return nil, err - } - - return plan9FileConn{name: path, file: f}, nil + return net.Dial("tcp", "localhost:5252") } -// Create an entry in /srv, open a pipe, write the -// client end to the entry and return the server -// end of the pipe to the caller. When the server -// end of the pipe is closed, /srv name associated -// with it will be removed (controlled by ORCLOSE flag) func listen(path string) (net.Listener, error) { - const O_RCLOSE = 64 // remove on close; should be in plan9 package - var pip [2]int - - err := plan9.Pipe(pip[:]) - if err != nil { - return nil, err - } - defer plan9.Close(pip[1]) - - srvfd, err := plan9.Create(path, plan9.O_WRONLY|plan9.O_CLOEXEC|O_RCLOSE, 0600) - if err != nil { - return nil, err - } - srv := os.NewFile(uintptr(srvfd), path) - - _, err = fmt.Fprintf(srv, "%d", pip[1]) - if err != nil { - return nil, err - } - - return &plan9SrvListener{name: path, srvf: srv, file: os.NewFile(uintptr(pip[0]), path)}, nil + return net.Listen("tcp", "localhost:5252") } From 7426a36371cb3917ee811011eed9988a05838322 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0669/1708] net/netmon: disable time jump monitoring on Plan 9 Updates #5794 Change-Id: I0f96383dea2ad017988d300df723ce906debb007 Signed-off-by: Brad Fitzpatrick --- net/netmon/netmon.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index f2dd37f1d..bd3d13d66 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -596,7 +596,7 @@ func (m *Monitor) pollWallTime() { // // We don't do this on mobile platforms for battery reasons, and because these // platforms don't really sleep in the same way. -const shouldMonitorTimeJump = runtime.GOOS != "android" && runtime.GOOS != "ios" +const shouldMonitorTimeJump = runtime.GOOS != "android" && runtime.GOOS != "ios" && runtime.GOOS != "plan9" // checkWallTimeAdvanceLocked reports whether wall time jumped more than 150% of // pollWallTimeInterval, indicating we probably just came out of sleep. Once a From 5df06612aa4f58a84255399472152bb814243d95 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0670/1708] net/tsdial: tolerate empty default route on Plan 9 Otherwise this was repeated closing control/derp connections all the time on netmon changes. Arguably we should do this on all platforms? Updates #5794 Change-Id: If6bbeff554235f188bab2a40ab75e08dd14746b2 Signed-off-by: Brad Fitzpatrick --- net/tsdial/tsdial.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 3606dd67f..8d287fdb0 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -242,7 +242,7 @@ func changeAffectsConn(delta *netmon.ChangeDelta, conn net.Conn) bool { // In a few cases, we don't have a new DefaultRouteInterface (e.g. on // Android; see tailscale/corp#19124); if so, pessimistically assume // that all connections are affected. - if delta.New.DefaultRouteInterface == "" { + if delta.New.DefaultRouteInterface == "" && runtime.GOOS != "plan9" { return true } From 21d12ec522fe5ad134e994323e87e36dfe4efe82 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0671/1708] cmd/tailscaled: let net/netmon know what our TUN interface is Updates #5794 Change-Id: Ia7e71c32e6c0cd79eb32b6c2c2d4e9a6d8c3e4d6 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 8a2a4b820..a4e1af972 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -751,6 +751,12 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return false, err } + if runtime.GOOS == "plan9" { + // TODO(bradfitz): why don't we do this on all platforms? + // We should. Doing it just on plan9 for now conservatively. + sys.NetMon.Get().SetTailscaleInterfaceName(devName) + } + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker()) if err != nil { dev.Close() From 3da17282073774439128505fe89b153ac8a9b16d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0672/1708] cmd/tailscaled: make state dir on Plan 9 Updates #5794 Change-Id: Id7bdc08263e98a1848ffce0dd25fc034747d7393 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index a4e1af972..323fcf369 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -250,7 +250,18 @@ func main() { // Only apply a default statepath when neither have been provided, so that a // user may specify only --statedir if they wish. if args.statepath == "" && args.statedir == "" { - args.statepath = paths.DefaultTailscaledStateFile() + if runtime.GOOS == "plan9" { + home, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get home directory: %v", err) + } + args.statedir = filepath.Join(home, "tailscale-state") + if err := os.MkdirAll(args.statedir, 0700); err != nil { + log.Fatalf("failed to create state directory: %v", err) + } + } else { + args.statepath = paths.DefaultTailscaledStateFile() + } } if args.disableLogs { From da8e8eb86f569169df996eae5174848cd252385d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0673/1708] types/logger, logpolicy: disable rate limiting, don't upload on Plan 9 To ease local debugging and have fewer moving pieces while bringing up Plan 9 support. Updates #5794 Change-Id: I2dc98e73bbb0d4d4730dc47203efc0550a0ac0a0 Signed-off-by: Brad Fitzpatrick --- logpolicy/logpolicy.go | 2 +- types/logger/logger.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 1419fff65..11c6bf14c 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -627,7 +627,7 @@ func (opts Options) New() *Policy { conf.IncludeProcSequence = true } - if envknob.NoLogsNoSupport() || testenv.InTest() { + if envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" { opts.Logf("You have disabled logging. Tailscale will not be able to provide support.") conf.HTTPC = &http.Client{Transport: noopPretendSuccessTransport{}} } else { diff --git a/types/logger/logger.go b/types/logger/logger.go index 11596b357..66b989480 100644 --- a/types/logger/logger.go +++ b/types/logger/logger.go @@ -14,6 +14,7 @@ import ( "fmt" "io" "log" + "runtime" "strings" "sync" "time" @@ -162,6 +163,10 @@ func RateLimitedFnWithClock(logf Logf, f time.Duration, burst int, maxCache int, if envknob.String("TS_DEBUG_LOG_RATE") == "all" { return logf } + if runtime.GOOS == "plan9" { + // To ease bring-up. + return logf + } var ( mu sync.Mutex msgLim = make(map[string]*limitData) // keyed by logf format From 03b9b879ee17be7c29b1f56175ec952474466fbd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0674/1708] ipn/ipnserver: treat all plan9 safesocket connections as permitted Updates #5794 Change-Id: Ibf74d017e38e0713d19bef437f26685280d79f6f Signed-off-by: Brad Fitzpatrick --- ipn/ipnserver/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 63f03f79e..a7ded9c00 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -331,7 +331,7 @@ func (a *actor) Permissions(operatorUID string) (read, write bool) { // checks here. Note that this permission model is being changed in // tailscale/corp#18342. return true, true - case "js": + case "js", "plan9": return true, true } if a.ci.IsUnixSock() { From 6f75647c0e4a85ca33cf69add4721cbbe7d9976b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0675/1708] net/netcheck: avoid ICMP unimplemented log spam on Plan 9 Updates #5794 Change-Id: Ia6b2429d57b79770e4c278f011504f726136db5b Signed-off-by: Brad Fitzpatrick --- net/netcheck/netcheck.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 74c866d92..5f4ab41c2 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1191,6 +1191,10 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee if len(need) == 0 { return nil } + if runtime.GOOS == "plan9" { + // ICMP isn't implemented. + return nil + } ctx, done := context.WithTimeout(ctx, icmpProbeTimeout) defer done() From b3953ce0c44ddac2a86d22d4632a3874c6bf34cc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0676/1708] ssh/tailssh: add Plan 9 support for Tailscale SSH Updates #5794 Change-Id: I7b05cd29ec02085cb503bbcd0beb61bf455002ac Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/ssh.go | 2 +- cmd/tailscaled/tailscaled.go | 4 + envknob/featureknob/featureknob.go | 2 +- go.mod | 4 +- go.sum | 16 +- ipn/ipnlocal/ssh.go | 2 +- ipn/ipnlocal/ssh_stub.go | 2 +- ssh/tailssh/incubator_plan9.go | 421 +++++++++++++++++++++++++++++ ssh/tailssh/tailssh.go | 4 +- ssh/tailssh/user.go | 8 +- util/osuser/group_ids.go | 4 + util/osuser/user.go | 23 +- 12 files changed, 476 insertions(+), 16 deletions(-) create mode 100644 ssh/tailssh/incubator_plan9.go diff --git a/cmd/tailscaled/ssh.go b/cmd/tailscaled/ssh.go index b10a3b774..59a1ddd0d 100644 --- a/cmd/tailscaled/ssh.go +++ b/cmd/tailscaled/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux || darwin || freebsd || openbsd) && !ts_omit_ssh +//go:build (linux || darwin || freebsd || openbsd || plan9) && !ts_omit_ssh package main diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 323fcf369..2d4aa4358 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -200,6 +200,10 @@ func main() { flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if runtime.GOOS == "plan9" && os.Getenv("_NETSHELL_CHILD_") != "" { + os.Args = []string{"tailscaled", "be-child", "plan9-netshell"} + } + if len(os.Args) > 1 { sub := os.Args[1] if fp, ok := subCommands[sub]; ok { diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go index 210414bfe..e9b871f74 100644 --- a/envknob/featureknob/featureknob.go +++ b/envknob/featureknob/featureknob.go @@ -40,7 +40,7 @@ func CanRunTailscaleSSH() error { if version.IsSandboxedMacOS() { return errors.New("The Tailscale SSH server does not run in sandboxed Tailscale GUI builds.") } - case "freebsd", "openbsd": + case "freebsd", "openbsd", "plan9": default: return errors.New("The Tailscale SSH server is not supported on " + runtime.GOOS) } diff --git a/go.mod b/go.mod index eb99c5373..8ca56a4b9 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 + github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/snappy v0.0.4 @@ -90,7 +91,7 @@ require ( github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 github.com/toqueteos/webbrowser v1.2.0 - github.com/u-root/u-root v0.12.0 + github.com/u-root/u-root v0.14.0 github.com/vishvananda/netns v0.0.4 go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 @@ -121,6 +122,7 @@ require ( ) require ( + 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f // indirect github.com/4meepo/tagalign v1.3.3 // indirect github.com/Antonboom/testifylint v1.2.0 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect diff --git a/go.sum b/go.sum index bf5700e49..ca1b5e30c 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ 4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= 4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q= +9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -391,6 +393,8 @@ github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsM github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo= +github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -547,8 +551,8 @@ github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSo github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f h1:ov45/OzrJG8EKbGjn7jJZQJTN7Z1t73sFYNIRd64YlI= -github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f/go.mod h1:JoDrYMZpDPYo6uH9/f6Peqms3zNNWT2XiGgioMOIGuI= +github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1 h1:jWoR2Yqg8tzM0v6LAiP7i1bikZJu3gxpgvu3g1Lw+a0= +github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1/go.mod h1:B63hDJMhTupLWCHwopAyEo7wRFowx9kOc8m8j1sfOqE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk= github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U= @@ -952,10 +956,10 @@ github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+ github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/gobusybox/src v0.0.0-20231228173702-b69f654846aa h1:unMPGGK/CRzfg923allsikmvk2l7beBeFPUNC4RVX/8= -github.com/u-root/gobusybox/src v0.0.0-20231228173702-b69f654846aa/go.mod h1:Zj4Tt22fJVn/nz/y6Ergm1SahR9dio1Zm/D2/S0TmXM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= +github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a h1:eg5FkNoQp76ZsswyGZ+TjYqA/rhKefxK8BW7XOlQsxo= +github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a/go.mod h1:e/8TmrdreH0sZOw2DFKBaUV7bvDWRq6SeM9PzkuVM68= +github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg= +github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index 47a74e282..c1b477652 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || (darwin && !ios) || freebsd || openbsd +//go:build linux || (darwin && !ios) || freebsd || openbsd || plan9 package ipnlocal diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go index 7875ae311..401f42bf8 100644 --- a/ipn/ipnlocal/ssh_stub.go +++ b/ipn/ipnlocal/ssh_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || (!linux && !darwin && !freebsd && !openbsd) +//go:build ios || (!linux && !darwin && !freebsd && !openbsd && !plan9) package ipnlocal diff --git a/ssh/tailssh/incubator_plan9.go b/ssh/tailssh/incubator_plan9.go new file mode 100644 index 000000000..61b6a54eb --- /dev/null +++ b/ssh/tailssh/incubator_plan9.go @@ -0,0 +1,421 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// This file contains the plan9-specific version of the incubator. Tailscaled +// launches the incubator as the same user as it was launched as. The +// incubator then registers a new session with the OS, sets its UID +// and groups to the specified `--uid`, `--gid` and `--groups`, and +// then launches the requested `--cmd`. + +package tailssh + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "log" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "sync/atomic" + + "github.com/go4org/plan9netshell" + "github.com/pkg/sftp" + "tailscale.com/cmd/tailscaled/childproc" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" +) + +func init() { + childproc.Add("ssh", beIncubator) + childproc.Add("sftp", beSFTP) + childproc.Add("plan9-netshell", beNetshell) +} + +// newIncubatorCommand returns a new exec.Cmd configured with +// `tailscaled be-child ssh` as the entrypoint. +// +// If ss.srv.tailscaledPath is empty, this method is equivalent to +// exec.CommandContext. +// +// The returned Cmd.Env is guaranteed to be nil; the caller populates it. +func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err error) { + defer func() { + if cmd.Env != nil { + panic("internal error") + } + }() + + var isSFTP, isShell bool + switch ss.Subsystem() { + case "sftp": + isSFTP = true + case "": + isShell = ss.RawCommand() == "" + default: + panic(fmt.Sprintf("unexpected subsystem: %v", ss.Subsystem())) + } + + if ss.conn.srv.tailscaledPath == "" { + if isSFTP { + // SFTP relies on the embedded Go-based SFTP server in tailscaled, + // so without tailscaled, we can't serve SFTP. + return nil, errors.New("no tailscaled found on path, can't serve SFTP") + } + + loginShell := ss.conn.localUser.LoginShell() + logf("directly running /bin/rc -c %q", ss.RawCommand()) + return exec.CommandContext(ss.ctx, loginShell, "-c", ss.RawCommand()), nil + } + + lu := ss.conn.localUser + ci := ss.conn.info + remoteUser := ci.uprof.LoginName + if ci.node.IsTagged() { + remoteUser = strings.Join(ci.node.Tags().AsSlice(), ",") + } + + incubatorArgs := []string{ + "be-child", + "ssh", + // TODO: "--uid=" + lu.Uid, + // TODO: "--gid=" + lu.Gid, + "--local-user=" + lu.Username, + "--home-dir=" + lu.HomeDir, + "--remote-user=" + remoteUser, + "--remote-ip=" + ci.src.Addr().String(), + "--has-tty=false", // updated in-place by startWithPTY + "--tty-name=", // updated in-place by startWithPTY + } + + nm := ss.conn.srv.lb.NetMap() + forceV1Behavior := nm.HasCap(tailcfg.NodeAttrSSHBehaviorV1) && !nm.HasCap(tailcfg.NodeAttrSSHBehaviorV2) + if forceV1Behavior { + incubatorArgs = append(incubatorArgs, "--force-v1-behavior") + } + + if debugTest.Load() { + incubatorArgs = append(incubatorArgs, "--debug-test") + } + + switch { + case isSFTP: + // Note that we include both the `--sftp` flag and a command to launch + // tailscaled as `be-child sftp`. If login or su is available, and + // we're not running with tailcfg.NodeAttrSSHBehaviorV1, this will + // result in serving SFTP within a login shell, with full PAM + // integration. Otherwise, we'll serve SFTP in the incubator process + // with no PAM integration. + incubatorArgs = append(incubatorArgs, "--sftp", fmt.Sprintf("--cmd=%s be-child sftp", ss.conn.srv.tailscaledPath)) + case isShell: + incubatorArgs = append(incubatorArgs, "--shell") + default: + incubatorArgs = append(incubatorArgs, "--cmd="+ss.RawCommand()) + } + + allowSendEnv := nm.HasCap(tailcfg.NodeAttrSSHEnvironmentVariables) + if allowSendEnv { + env, err := filterEnv(ss.conn.acceptEnv, ss.Session.Environ()) + if err != nil { + return nil, err + } + + if len(env) > 0 { + encoded, err := json.Marshal(env) + if err != nil { + return nil, fmt.Errorf("failed to encode environment: %w", err) + } + incubatorArgs = append(incubatorArgs, fmt.Sprintf("--encoded-env=%q", encoded)) + } + } + + return exec.CommandContext(ss.ctx, ss.conn.srv.tailscaledPath, incubatorArgs...), nil +} + +var debugTest atomic.Bool + +type stdRWC struct{} + +func (stdRWC) Read(p []byte) (n int, err error) { + return os.Stdin.Read(p) +} + +func (stdRWC) Write(b []byte) (n int, err error) { + return os.Stdout.Write(b) +} + +func (stdRWC) Close() error { + os.Exit(0) + return nil +} + +type incubatorArgs struct { + localUser string + homeDir string + remoteUser string + remoteIP string + ttyName string + hasTTY bool + cmd string + isSFTP bool + isShell bool + forceV1Behavior bool + debugTest bool + isSELinuxEnforcing bool + encodedEnv string +} + +func parseIncubatorArgs(args []string) (incubatorArgs, error) { + var ia incubatorArgs + + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.StringVar(&ia.localUser, "local-user", "", "the user to run as") + flags.StringVar(&ia.homeDir, "home-dir", "/", "the user's home directory") + flags.StringVar(&ia.remoteUser, "remote-user", "", "the remote user/tags") + flags.StringVar(&ia.remoteIP, "remote-ip", "", "the remote Tailscale IP") + flags.StringVar(&ia.ttyName, "tty-name", "", "the tty name (pts/3)") + flags.BoolVar(&ia.hasTTY, "has-tty", false, "is the output attached to a tty") + flags.StringVar(&ia.cmd, "cmd", "", "the cmd to launch, including all arguments (ignored in sftp mode)") + flags.BoolVar(&ia.isShell, "shell", false, "is launching a shell (with no cmds)") + flags.BoolVar(&ia.isSFTP, "sftp", false, "run sftp server (cmd is ignored)") + flags.BoolVar(&ia.forceV1Behavior, "force-v1-behavior", false, "allow falling back to the su command if login is unavailable") + flags.BoolVar(&ia.debugTest, "debug-test", false, "should debug in test mode") + flags.BoolVar(&ia.isSELinuxEnforcing, "is-selinux-enforcing", false, "whether SELinux is in enforcing mode") + flags.StringVar(&ia.encodedEnv, "encoded-env", "", "JSON encoded array of environment variables in '['key=value']' format") + flags.Parse(args) + return ia, nil +} + +func (ia incubatorArgs) forwardedEnviron() ([]string, string, error) { + environ := os.Environ() + // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding + allowListKeys := "SSH_AUTH_SOCK" + + if ia.encodedEnv != "" { + unquoted, err := strconv.Unquote(ia.encodedEnv) + if err != nil { + return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + } + + var extraEnviron []string + + err = json.Unmarshal([]byte(unquoted), &extraEnviron) + if err != nil { + return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + } + + environ = append(environ, extraEnviron...) + + for _, v := range extraEnviron { + allowListKeys = fmt.Sprintf("%s,%s", allowListKeys, strings.Split(v, "=")[0]) + } + } + + return environ, allowListKeys, nil +} + +func beNetshell(args []string) error { + plan9netshell.Main() + return nil +} + +// beIncubator is the entrypoint to the `tailscaled be-child ssh` subcommand. +// It is responsible for informing the system of a new login session for the +// user. This is sometimes necessary for mounting home directories and +// decrypting file systems. +// +// Tailscaled launches the incubator as the same user as it was launched as. +func beIncubator(args []string) error { + // To defend against issues like https://golang.org/issue/1435, + // defensively lock our current goroutine's thread to the current + // system thread before we start making any UID/GID/group changes. + // + // This shouldn't matter on Linux because syscall.AllThreadsSyscall is + // used to invoke syscalls on all OS threads, but (as of 2023-03-23) + // that function is not implemented on all platforms. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + ia, err := parseIncubatorArgs(args) + if err != nil { + return err + } + if ia.isSFTP && ia.isShell { + return fmt.Errorf("--sftp and --shell are mutually exclusive") + } + + if ia.isShell { + plan9netshell.Main() + return nil + } + + dlogf := logger.Discard + if ia.debugTest { + // In testing, we don't always have syslog, so log to a temp file. + if logFile, err := os.OpenFile("/tmp/tailscalessh.log", os.O_APPEND|os.O_WRONLY, 0666); err == nil { + lf := log.New(logFile, "", 0) + dlogf = func(msg string, args ...any) { + lf.Printf(msg, args...) + logFile.Sync() + } + defer logFile.Close() + } + } + + return handleInProcess(dlogf, ia) +} + +func handleInProcess(dlogf logger.Logf, ia incubatorArgs) error { + if ia.isSFTP { + return handleSFTPInProcess(dlogf, ia) + } + return handleSSHInProcess(dlogf, ia) +} + +func handleSFTPInProcess(dlogf logger.Logf, ia incubatorArgs) error { + dlogf("handling sftp") + + return serveSFTP() +} + +// beSFTP serves SFTP in-process. +func beSFTP(args []string) error { + return serveSFTP() +} + +func serveSFTP() error { + server, err := sftp.NewServer(stdRWC{}) + if err != nil { + return err + } + // TODO(https://github.com/pkg/sftp/pull/554): Revert the check for io.EOF, + // when sftp is patched to report clean termination. + if err := server.Serve(); err != nil && err != io.EOF { + return err + } + return nil +} + +// handleSSHInProcess is a last resort if we couldn't use login or su. It +// registers a new session with the OS, sets its UID, GID and groups to the +// specified values, and then launches the requested `--cmd` in the user's +// login shell. +func handleSSHInProcess(dlogf logger.Logf, ia incubatorArgs) error { + + environ, _, err := ia.forwardedEnviron() + if err != nil { + return err + } + + dlogf("running /bin/rc -c %q", ia.cmd) + cmd := newCommand("/bin/rc", environ, []string{"-c", ia.cmd}) + err = cmd.Run() + if ee, ok := err.(*exec.ExitError); ok { + ps := ee.ProcessState + code := ps.ExitCode() + if code < 0 { + // TODO(bradfitz): do we need to also check the syscall.WaitStatus + // and make our process look like it also died by signal/same signal + // as our child process? For now we just do the exit code. + fmt.Fprintf(os.Stderr, "[tailscale-ssh: process died: %v]\n", ps.String()) + code = 1 // for now. so we don't exit with negative + } + os.Exit(code) + } + return err +} + +func newCommand(cmdPath string, cmdEnviron []string, cmdArgs []string) *exec.Cmd { + cmd := exec.Command(cmdPath, cmdArgs...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = cmdEnviron + + return cmd +} + +// launchProcess launches an incubator process for the provided session. +// It is responsible for configuring the process execution environment. +// The caller can wait for the process to exit by calling cmd.Wait(). +// +// It sets ss.cmd, stdin, stdout, and stderr. +func (ss *sshSession) launchProcess() error { + var err error + ss.cmd, err = ss.newIncubatorCommand(ss.logf) + if err != nil { + return err + } + + cmd := ss.cmd + cmd.Dir = "/" + cmd.Env = append(os.Environ(), envForUser(ss.conn.localUser)...) + for _, kv := range ss.Environ() { + if acceptEnvPair(kv) { + cmd.Env = append(cmd.Env, kv) + } + } + + ci := ss.conn.info + cmd.Env = append(cmd.Env, + fmt.Sprintf("SSH_CLIENT=%s %d %d", ci.src.Addr(), ci.src.Port(), ci.dst.Port()), + fmt.Sprintf("SSH_CONNECTION=%s %d %s %d", ci.src.Addr(), ci.src.Port(), ci.dst.Addr(), ci.dst.Port()), + ) + + if ss.agentListener != nil { + cmd.Env = append(cmd.Env, fmt.Sprintf("SSH_AUTH_SOCK=%s", ss.agentListener.Addr())) + } + + return ss.startWithStdPipes() +} + +// startWithStdPipes starts cmd with os.Pipe for Stdin, Stdout and Stderr. +func (ss *sshSession) startWithStdPipes() (err error) { + var rdStdin, wrStdout, wrStderr io.ReadWriteCloser + defer func() { + if err != nil { + closeAll(rdStdin, ss.wrStdin, ss.rdStdout, wrStdout, ss.rdStderr, wrStderr) + } + }() + if ss.cmd == nil { + return errors.New("nil cmd") + } + if rdStdin, ss.wrStdin, err = os.Pipe(); err != nil { + return err + } + if ss.rdStdout, wrStdout, err = os.Pipe(); err != nil { + return err + } + if ss.rdStderr, wrStderr, err = os.Pipe(); err != nil { + return err + } + ss.cmd.Stdin = rdStdin + ss.cmd.Stdout = wrStdout + ss.cmd.Stderr = wrStderr + ss.childPipes = []io.Closer{rdStdin, wrStdout, wrStderr} + return ss.cmd.Start() +} + +func envForUser(u *userMeta) []string { + return []string{ + fmt.Sprintf("user=%s", u.Username), + fmt.Sprintf("home=%s", u.HomeDir), + fmt.Sprintf("path=%s", defaultPathForUser(&u.User)), + } +} + +// acceptEnvPair reports whether the environment variable key=value pair +// should be accepted from the client. It uses the same default as OpenSSH +// AcceptEnv. +func acceptEnvPair(kv string) bool { + k, _, ok := strings.Cut(kv, "=") + if !ok { + return false + } + _ = k + return true // permit anything on plan9 during bringup, for debugging at least +} diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 9aae899c3..e42f09bdf 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || (darwin && !ios) || freebsd || openbsd +//go:build linux || (darwin && !ios) || freebsd || openbsd || plan9 // Package tailssh is an SSH server integrated into Tailscale. package tailssh @@ -903,7 +903,7 @@ func (ss *sshSession) run() { defer t.Stop() } - if euid := os.Geteuid(); euid != 0 { + if euid := os.Geteuid(); euid != 0 && runtime.GOOS != "plan9" { if lu.Uid != fmt.Sprint(euid) { ss.logf("can't switch to user %q from process euid %v", lu.Username, euid) fmt.Fprintf(ss, "can't switch user\r\n") diff --git a/ssh/tailssh/user.go b/ssh/tailssh/user.go index 15191813b..097f0d296 100644 --- a/ssh/tailssh/user.go +++ b/ssh/tailssh/user.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || (darwin && !ios) || freebsd || openbsd +//go:build linux || (darwin && !ios) || freebsd || openbsd || plan9 package tailssh @@ -48,6 +48,9 @@ func userLookup(username string) (*userMeta, error) { } func (u *userMeta) LoginShell() string { + if runtime.GOOS == "plan9" { + return "/bin/rc" + } if u.loginShellCached != "" { // This field should be populated on Linux, at least, because // func userLookup on Linux uses "getent" to look up the user @@ -85,6 +88,9 @@ func defaultPathForUser(u *user.User) string { if s := defaultPathTmpl(); s != "" { return expandDefaultPathTmpl(s, u) } + if runtime.GOOS == "plan9" { + return "/bin" + } isRoot := u.Uid == "0" switch distro.Get() { case distro.Debian: diff --git a/util/osuser/group_ids.go b/util/osuser/group_ids.go index f25861dbb..7c2b5b090 100644 --- a/util/osuser/group_ids.go +++ b/util/osuser/group_ids.go @@ -19,6 +19,10 @@ import ( // an error. It will first try to use the 'id' command to get the group IDs, // and if that fails, it will fall back to the user.GroupIds method. func GetGroupIds(user *user.User) ([]string, error) { + if runtime.GOOS == "plan9" { + return nil, nil + } + if runtime.GOOS != "linux" { return user.GroupIds() } diff --git a/util/osuser/user.go b/util/osuser/user.go index 2c7f2e24b..8b96194d7 100644 --- a/util/osuser/user.go +++ b/util/osuser/user.go @@ -54,9 +54,18 @@ func lookup(usernameOrUID string, std lookupStd, wantShell bool) (*user.User, st // Skip getent entirely on Non-Unix platforms that won't ever have it. // (Using HasPrefix for "wasip1", anticipating that WASI support will // move beyond "preview 1" some day.) - if runtime.GOOS == "windows" || runtime.GOOS == "js" || runtime.GOARCH == "wasm" { + if runtime.GOOS == "windows" || runtime.GOOS == "js" || runtime.GOARCH == "wasm" || runtime.GOOS == "plan9" { + var shell string + if wantShell && runtime.GOOS == "plan9" { + shell = "/bin/rc" + } + if runtime.GOOS == "plan9" { + if u, err := user.Current(); err == nil { + return u, shell, nil + } + } u, err := std(usernameOrUID) - return u, "", err + return u, shell, err } // No getent on Gokrazy. So hard-code the login shell. @@ -78,6 +87,16 @@ func lookup(usernameOrUID string, std lookupStd, wantShell bool) (*user.User, st return u, shell, nil } + if runtime.GOOS == "plan9" { + return &user.User{ + Uid: "0", + Gid: "0", + Username: "glenda", + Name: "Glenda", + HomeDir: "/", + }, "/bin/rc", nil + } + // Start with getent if caller wants to get the user shell. if wantShell { return userLookupGetent(usernameOrUID, std) From 5e305032a953258fc47b56dc0462ac652c9c78db Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0677/1708] portlist: add Plan 9 support Updates #5794 Change-Id: I77df1eb9bea9f079a25337cb7bbd498cf8a19135 Signed-off-by: Brad Fitzpatrick --- portlist/portlist_plan9.go | 122 +++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 portlist/portlist_plan9.go diff --git a/portlist/portlist_plan9.go b/portlist/portlist_plan9.go new file mode 100644 index 000000000..77f8619f9 --- /dev/null +++ b/portlist/portlist_plan9.go @@ -0,0 +1,122 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package portlist + +import ( + "bufio" + "bytes" + "os" + "strconv" + "strings" + "time" +) + +func init() { + newOSImpl = newPlan9Impl + + pollInterval = 5 * time.Second +} + +type plan9Impl struct { + known map[protoPort]*portMeta // inode string => metadata + + br *bufio.Reader // reused + portsBuf []Port + includeLocalhost bool +} + +type protoPort struct { + proto string + port uint16 +} + +type portMeta struct { + port Port + keep bool +} + +func newPlan9Impl(includeLocalhost bool) osImpl { + return &plan9Impl{ + known: map[protoPort]*portMeta{}, + br: bufio.NewReader(bytes.NewReader(nil)), + includeLocalhost: includeLocalhost, + } +} + +func (*plan9Impl) Close() error { return nil } + +func (im *plan9Impl) AppendListeningPorts(base []Port) ([]Port, error) { + ret := base + + des, err := os.ReadDir("/proc") + if err != nil { + return nil, err + } + for _, de := range des { + if !de.IsDir() { + continue + } + pidStr := de.Name() + pid, err := strconv.Atoi(pidStr) + if err != nil { + continue + } + st, _ := os.ReadFile("/proc/" + pidStr + "/fd") + if !bytes.Contains(st, []byte("/net/tcp/clone")) { + continue + } + args, _ := os.ReadFile("/proc/" + pidStr + "/args") + procName := string(bytes.TrimSpace(args)) + // term% cat /proc/417/fd + // /usr/glenda + // 0 r M 35 (0000000000000001 0 00) 16384 260 /dev/cons + // 1 w c 0 (000000000000000a 0 00) 0 471 /dev/null + // 2 w M 35 (0000000000000001 0 00) 16384 108 /dev/cons + // 3 rw I 0 (000000000000002c 0 00) 0 14 /net/tcp/clone + for line := range bytes.Lines(st) { + if !bytes.Contains(line, []byte("/net/tcp/clone")) { + continue + } + f := strings.Fields(string(line)) + if len(f) < 10 { + continue + } + if f[9] != "/net/tcp/clone" { + continue + } + qid, err := strconv.ParseUint(strings.TrimPrefix(f[4], "("), 16, 64) + if err != nil { + continue + } + tcpN := (qid >> 5) & (1<<12 - 1) + tcpNStr := strconv.FormatUint(tcpN, 10) + st, _ := os.ReadFile("/net/tcp/" + tcpNStr + "/status") + if !bytes.Contains(st, []byte("Listen ")) { + // Unexpected. Or a race. + continue + } + bl, _ := os.ReadFile("/net/tcp/" + tcpNStr + "/local") + i := bytes.LastIndexByte(bl, '!') + if i == -1 { + continue + } + if bytes.HasPrefix(bl, []byte("127.0.0.1!")) && !im.includeLocalhost { + continue + } + portStr := strings.TrimSpace(string(bl[i+1:])) + port, _ := strconv.Atoi(portStr) + if port == 0 { + continue + } + ret = append(ret, Port{ + Proto: "tcp", + Port: uint16(port), + Process: procName, + Pid: pid, + }) + } + } + + return sortAndDedup(ret), nil +} From 84c82ac4beba892aed7274ca9d1ad65e23586af3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0678/1708] net/dns: add Plan 9 support This requires the rsc/plan9 ndb DNS changes for now: https://9fans.topicbox.com/groups/9fans/T9c9d81b5801a0820/ndb-suffix-specific-dns-changes https://github.com/rsc/plan9/commit/e8c148ff092a5780d04aa2fd4a07a5732207b698 https://github.com/rsc/plan9/commit/1d0642ae493bf5ce798a6aa64a745bc6316baa11 Updates #5794 Change-Id: I0e242c1fe7bb4404e23604e03a31f89f0d18e70d Signed-off-by: Brad Fitzpatrick --- net/dns/manager.go | 2 +- net/dns/manager_default.go | 2 +- net/dns/manager_plan9.go | 181 ++++++++++++++++++++++++++++++++++ net/dns/manager_plan9_test.go | 86 ++++++++++++++++ 4 files changed, 269 insertions(+), 2 deletions(-) create mode 100644 net/dns/manager_plan9.go create mode 100644 net/dns/manager_plan9_test.go diff --git a/net/dns/manager.go b/net/dns/manager.go index 0bfbaa077..64bf12c6b 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -284,7 +284,7 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // Deal with trivial configs first. switch { - case !cfg.needsOSResolver(): + case !cfg.needsOSResolver() || runtime.GOOS == "plan9": // Set search domains, but nothing else. This also covers the // case where cfg is entirely zero, in which case these // configs clear all Tailscale DNS settings. diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index 99ff017da..e14454e76 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris +//go:build !linux && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris && !plan9 package dns diff --git a/net/dns/manager_plan9.go b/net/dns/manager_plan9.go new file mode 100644 index 000000000..ca179f27f --- /dev/null +++ b/net/dns/manager_plan9.go @@ -0,0 +1,181 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// TODO: man 6 ndb | grep -e 'suffix.*same line' +// to detect Russ's https://9fans.topicbox.com/groups/9fans/T9c9d81b5801a0820/ndb-suffix-specific-dns-changes + +package dns + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/netip" + "os" + "regexp" + "strings" + "unicode" + + "tailscale.com/control/controlknobs" + "tailscale.com/health" + "tailscale.com/types/logger" + "tailscale.com/util/set" +) + +func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { + return &plan9DNSManager{ + logf: logf, + ht: ht, + knobs: knobs, + }, nil +} + +type plan9DNSManager struct { + logf logger.Logf + ht *health.Tracker + knobs *controlknobs.Knobs +} + +// netNDBBytesWithoutTailscale returns raw (the contents of /net/ndb) with any +// Tailscale bits removed. +func netNDBBytesWithoutTailscale(raw []byte) ([]byte, error) { + var ret bytes.Buffer + bs := bufio.NewScanner(bytes.NewReader(raw)) + removeLine := set.Set[string]{} + for bs.Scan() { + t := bs.Text() + if rest, ok := strings.CutPrefix(t, "#tailscaled-added-line:"); ok { + removeLine.Add(strings.TrimSpace(rest)) + continue + } + trimmed := strings.TrimSpace(t) + if removeLine.Contains(trimmed) { + removeLine.Delete(trimmed) + continue + } + + // Also remove any DNS line referencing *.ts.net. This is + // Tailscale-specific (and won't work with, say, Headscale), but + // the Headscale case will be covered by the #tailscaled-added-line + // logic above, assuming the user didn't delete those comments. + if (strings.HasPrefix(trimmed, "dns=") || strings.Contains(trimmed, "dnsdomain=")) && + strings.HasSuffix(trimmed, ".ts.net") { + continue + } + + ret.WriteString(t) + ret.WriteByte('\n') + } + return ret.Bytes(), bs.Err() +} + +// setNDBSuffix adds lines to tsFree (the contents of /net/ndb already cleaned +// of Tailscale-added lines) to add the optional DNS search domain (e.g. +// "foo.ts.net") and DNS server to it. +func setNDBSuffix(tsFree []byte, suffix string) []byte { + suffix = strings.TrimSuffix(suffix, ".") + if suffix == "" { + return tsFree + } + var buf bytes.Buffer + bs := bufio.NewScanner(bytes.NewReader(tsFree)) + var added []string + addLine := func(s string) { + added = append(added, strings.TrimSpace(s)) + buf.WriteString(s) + } + for bs.Scan() { + buf.Write(bs.Bytes()) + buf.WriteByte('\n') + + t := bs.Text() + if suffix != "" && len(added) == 0 && strings.HasPrefix(t, "\tdns=") { + addLine(fmt.Sprintf("\tdns=100.100.100.100 suffix=%s\n", suffix)) + addLine(fmt.Sprintf("\tdnsdomain=%s\n", suffix)) + } + } + bufTrim := bytes.TrimLeftFunc(buf.Bytes(), unicode.IsSpace) + if len(added) == 0 { + return bufTrim + } + var ret bytes.Buffer + for _, s := range added { + ret.WriteString("#tailscaled-added-line: ") + ret.WriteString(s) + ret.WriteString("\n") + } + ret.WriteString("\n") + ret.Write(bufTrim) + return ret.Bytes() +} + +func (m *plan9DNSManager) SetDNS(c OSConfig) error { + ndbOnDisk, err := os.ReadFile("/net/ndb") + if err != nil { + return err + } + + tsFree, err := netNDBBytesWithoutTailscale(ndbOnDisk) + if err != nil { + return err + } + + var suffix string + if len(c.SearchDomains) > 0 { + suffix = string(c.SearchDomains[0]) + } + + newBuf := setNDBSuffix(tsFree, suffix) + if !bytes.Equal(newBuf, ndbOnDisk) { + if err := os.WriteFile("/net/ndb", newBuf, 0644); err != nil { + return fmt.Errorf("writing /net/ndb: %w", err) + } + if f, err := os.OpenFile("/net/dns", os.O_RDWR, 0); err == nil { + if _, err := io.WriteString(f, "refresh\n"); err != nil { + f.Close() + return fmt.Errorf("/net/dns refresh write: %w", err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("/net/dns refresh close: %w", err) + } + } + } + + return nil +} + +func (m *plan9DNSManager) SupportsSplitDNS() bool { return false } + +func (m *plan9DNSManager) Close() error { + // TODO(bradfitz): remove the Tailscale bits from /net/ndb ideally + return nil +} + +var dnsRegex = regexp.MustCompile(`\bdns=(\d+\.\d+\.\d+\.\d+)\b`) + +func (m *plan9DNSManager) GetBaseConfig() (OSConfig, error) { + var oc OSConfig + f, err := os.Open("/net/ndb") + if err != nil { + return oc, err + } + defer f.Close() + bs := bufio.NewScanner(f) + for bs.Scan() { + m := dnsRegex.FindSubmatch(bs.Bytes()) + if m == nil { + continue + } + addr, err := netip.ParseAddr(string(m[1])) + if err != nil { + continue + } + oc.Nameservers = append(oc.Nameservers, addr) + } + if err := bs.Err(); err != nil { + return oc, err + } + + return oc, nil +} diff --git a/net/dns/manager_plan9_test.go b/net/dns/manager_plan9_test.go new file mode 100644 index 000000000..806fdb68e --- /dev/null +++ b/net/dns/manager_plan9_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build plan9 + +package dns + +import "testing" + +func TestNetNDBBytesWithoutTailscale(t *testing.T) { + tests := []struct { + name string + raw string + want string + }{ + { + name: "empty", + raw: "", + want: "", + }, + { + name: "no-tailscale", + raw: "# This is a comment\nip=10.0.2.15 ipmask=255.255.255.0 ipgw=10.0.2.2\n\tsys=gnot\n", + want: "# This is a comment\nip=10.0.2.15 ipmask=255.255.255.0 ipgw=10.0.2.2\n\tsys=gnot\n", + }, + { + name: "remove-by-comments", + raw: "# This is a comment\n#tailscaled-added-line: dns=100.100.100.100\nip=10.0.2.15 ipmask=255.255.255.0 ipgw=10.0.2.2\n\tdns=100.100.100.100\n\tsys=gnot\n", + want: "# This is a comment\nip=10.0.2.15 ipmask=255.255.255.0 ipgw=10.0.2.2\n\tsys=gnot\n", + }, + { + name: "remove-by-ts.net", + raw: "Some line\n\tdns=100.100.100.100 suffix=foo.ts.net\n\tfoo=bar\n", + want: "Some line\n\tfoo=bar\n", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := netNDBBytesWithoutTailscale([]byte(tt.raw)) + if err != nil { + t.Fatal(err) + } + if string(got) != tt.want { + t.Errorf("GOT:\n%s\n\nWANT:\n%s\n", string(got), tt.want) + } + }) + } +} + +func TestSetNDBSuffix(t *testing.T) { + tests := []struct { + name string + raw string + want string + }{ + { + name: "empty", + raw: "", + want: "", + }, + { + name: "set", + raw: "ip=10.0.2.15 ipmask=255.255.255.0 ipgw=10.0.2.2\n\tsys=gnot\n\tdns=100.100.100.100\n\n# foo\n", + want: `#tailscaled-added-line: dns=100.100.100.100 suffix=foo.ts.net +#tailscaled-added-line: dnsdomain=foo.ts.net + +ip=10.0.2.15 ipmask=255.255.255.0 ipgw=10.0.2.2 + sys=gnot + dns=100.100.100.100 + dns=100.100.100.100 suffix=foo.ts.net + dnsdomain=foo.ts.net + +# foo +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := setNDBSuffix([]byte(tt.raw), "foo.ts.net") + if string(got) != tt.want { + t.Errorf("wrong value\n GOT %q:\n%s\n\nWANT %q:\n%s\n", got, got, tt.want, tt.want) + } + }) + } + +} From 7dbb21cae8511dc9708a14627fd384fb18d3f2b2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0679/1708] cmd/tailscale: add tailscale.rc Plan 9 wrapper So we can link tailscale and tailscaled together into one. Updates #5794 Change-Id: I9a8b793c64033827e4188931546cbd64db55982e Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/tailscale.rc | 3 +++ 1 file changed, 3 insertions(+) create mode 100755 cmd/tailscale/tailscale.rc diff --git a/cmd/tailscale/tailscale.rc b/cmd/tailscale/tailscale.rc new file mode 100755 index 000000000..2cac53efb --- /dev/null +++ b/cmd/tailscale/tailscale.rc @@ -0,0 +1,3 @@ +#!/bin/rc +# Plan 9 cmd/tailscale wrapper script to run cmd/tailscaled's embedded CLI. +TS_BE_CLI=1 tailscaled $* From 4c9b37fa2e629eb740cecd5cb699410e1deb3b91 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0680/1708] control/controlhttp: set forceNoise443 on Plan 9 Updates #5794 Change-Id: Idc67082f5d367e03540e1a5310db5b466ee03666 Signed-off-by: Brad Fitzpatrick --- control/controlhttp/client.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 3b95796d0..e971f1253 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -270,6 +270,15 @@ var forceNoise443 = envknob.RegisterBool("TS_FORCE_NOISE_443") // use HTTPS connections as its underlay connection (double crypto). This can // be necessary when networks or middle boxes are messing with port 80. func (d *Dialer) forceNoise443() bool { + if runtime.GOOS == "plan9" { + // For running demos of Plan 9 in a browser with network relays, + // we want to minimize the number of connections we're making. + // The main reason to use port 80 is to avoid double crypto + // costs server-side but the costs are tiny and number of Plan 9 + // users doesn't make it worth it. Just disable this and always use + // HTTPS for Plan 9. That also reduces some log spam. + return true + } if forceNoise443() { return true } From 29c2bb1db6bc7f5d964a76ba48fde570b79abd4d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 30 Mar 2025 21:12:29 -0700 Subject: [PATCH 0681/1708] control/controlhttp: reduce some log spam on context cancel Change-Id: I3ac00ddb29c16e9791ab2be19f454dabd721e4c3 Signed-off-by: Brad Fitzpatrick --- control/controlhttp/client.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index e971f1253..44de6b0df 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -249,6 +249,11 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { results[i].conn = nil // so we don't close it in the defer return conn, nil } + if ctx.Err() != nil { + a.logf("controlhttp: context aborted dialing") + return nil, ctx.Err() + } + merr := multierr.New(errs...) // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. From 65c7a37bc66054074d4da676f751f94a0f859b5f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 1 Apr 2025 04:01:00 -0700 Subject: [PATCH 0682/1708] all: use network less when running in v86 emulator Updates #5794 Change-Id: I1d8b005a1696835c9062545f87b7bab643cfc44d Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 9 +++++++- control/controlclient/map.go | 26 ++++++++++++++++++++++ hostinfo/hostinfo.go | 14 ++++++++++-- hostinfo/hostinfo_plan9.go | 39 +++++++++++++++++++++++++++++++++ net/netcheck/netcheck.go | 16 +++++++++++++- wgengine/magicsock/magicsock.go | 6 +++-- 6 files changed, 104 insertions(+), 6 deletions(-) create mode 100644 hostinfo/hostinfo_plan9.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 68ab9ca17..70ebe2f23 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1086,7 +1086,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap } else { vlogf("netmap: got new map") } - if resp.ControlDialPlan != nil { + if resp.ControlDialPlan != nil && !ignoreDialPlan() { if c.dialPlan != nil { c.logf("netmap: got new dial plan from control") c.dialPlan.Store(resp.ControlDialPlan) @@ -1774,6 +1774,13 @@ func makeScreenTimeDetectingDialFunc(dial dialFunc) (dialFunc, *atomic.Bool) { }, ab } +func ignoreDialPlan() bool { + // If we're running in v86 (a JavaScript-based emulation of a 32-bit x86) + // our networking is very limited. Let's ignore the dial plan since it's too + // complicated to race that many IPs anyway. + return hostinfo.IsInVM86() +} + func isTCPLoopback(a net.Addr) bool { if ta, ok := a.(*net.TCPAddr); ok { return ta.IP.IsLoopback() diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 769c8f1e3..3173040fe 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -19,6 +19,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/hostinfo" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" @@ -308,6 +309,31 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { } } + // In the copy/v86 wasm environment with limited networking, if the + // control plane didn't pick our DERP home for us, do it ourselves and + // mark all but the lowest region as NoMeasureNoHome. For prod, this + // will be Region 1, NYC, a compromise between the US and Europe. But + // really the control plane should pick this. This is only a fallback. + if hostinfo.IsInVM86() { + numCanMeasure := 0 + lowest := 0 + for rid, r := range dm.Regions { + if !r.NoMeasureNoHome { + numCanMeasure++ + if lowest == 0 || rid < lowest { + lowest = rid + } + } + } + if numCanMeasure > 1 { + for rid, r := range dm.Regions { + if rid != lowest { + r.NoMeasureNoHome = true + } + } + } + } + // Zero-valued fields in a DERPMap mean that we're not changing // anything and are using the previous value(s). if ldm := ms.lastDERPMap; ldm != nil { diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index d952ce603..afb465ece 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -21,6 +21,7 @@ import ( "go4.org/mem" "tailscale.com/envknob" "tailscale.com/tailcfg" + "tailscale.com/types/lazy" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/cloudenv" @@ -497,5 +498,14 @@ func IsNATLabGuestVM() bool { return false } -// NAT Lab VMs have a unique MAC address prefix. -// See +const copyV86DeviceModel = "copy-v86" + +var isV86Cache lazy.SyncValue[bool] + +// IsInVM86 reports whether we're running in the copy/v86 wasm emulator, +// https://github.com/copy/v86/. +func IsInVM86() bool { + return isV86Cache.Get(func() bool { + return New().DeviceModel == copyV86DeviceModel + }) +} diff --git a/hostinfo/hostinfo_plan9.go b/hostinfo/hostinfo_plan9.go new file mode 100644 index 000000000..f9aa30e51 --- /dev/null +++ b/hostinfo/hostinfo_plan9.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package hostinfo + +import ( + "bytes" + "os" + "strings" + + "tailscale.com/tailcfg" + "tailscale.com/types/lazy" +) + +func init() { + RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { + if isPlan9V86() { + hi.DeviceModel = copyV86DeviceModel + } + }) +} + +var isPlan9V86Cache lazy.SyncValue[bool] + +// isPlan9V86 reports whether we're running in the wasm +// environment (https://github.com/copy/v86/). +func isPlan9V86() bool { + return isPlan9V86Cache.Get(func() bool { + v, _ := os.ReadFile("/dev/cputype") + s, _, _ := strings.Cut(string(v), " ") + if s != "PentiumIV/Xeon" { + return false + } + + v, _ = os.ReadFile("/dev/config") + v, _, _ = bytes.Cut(v, []byte{'\n'}) + return string(v) == "# pcvm - small kernel used to run in vm" + }) +} diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 5f4ab41c2..c9f03966b 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -25,6 +25,7 @@ import ( "tailscale.com/derp/derphttp" "tailscale.com/envknob" + "tailscale.com/hostinfo" "tailscale.com/net/captivedetection" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" @@ -863,7 +864,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe c.curState = nil }() - if runtime.GOOS == "js" || runtime.GOOS == "tamago" { + if runtime.GOOS == "js" || runtime.GOOS == "tamago" || (runtime.GOOS == "plan9" && hostinfo.IsInVM86()) { if err := c.runHTTPOnlyChecks(ctx, last, rs, dm); err != nil { return nil, err } @@ -1063,6 +1064,19 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report regions = append(regions, dr) } } + + if len(regions) == 1 && hostinfo.IsInVM86() { + // If we only have 1 region that's probably and we're in a + // network-limited v86 environment, don't actually probe it. Just fake + // some results. + rg := regions[0] + if len(rg.Nodes) > 0 { + node := rg.Nodes[0] + rs.addNodeLatency(node, netip.AddrPort{}, 999*time.Millisecond) + return nil + } + } + c.logf("running HTTP-only netcheck against %v regions", len(regions)) var wg sync.WaitGroup diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 313f9e315..a32867f72 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -719,7 +719,7 @@ func (c *Conn) updateEndpoints(why string) { c.muCond.Broadcast() }() c.dlogf("[v1] magicsock: starting endpoint update (%s)", why) - if c.noV4Send.Load() && runtime.GOOS != "js" && !c.onlyTCP443.Load() { + if c.noV4Send.Load() && runtime.GOOS != "js" && !c.onlyTCP443.Load() && !hostinfo.IsInVM86() { c.mu.Lock() closed := c.closed c.mu.Unlock() @@ -2767,7 +2767,9 @@ func (c *Conn) Rebind() { c.logf("Rebind; defIf=%q, ips=%v", defIf, ifIPs) } - c.maybeCloseDERPsOnRebind(ifIPs) + if len(ifIPs) > 0 { + c.maybeCloseDERPsOnRebind(ifIPs) + } c.resetEndpointStates() } From 85bcc2e3bddf3e44cf671c7464ef902543ab0f27 Mon Sep 17 00:00:00 2001 From: Kot Date: Fri, 28 Mar 2025 14:17:13 -0700 Subject: [PATCH 0683/1708] cmd/tsidp: use advertised env vars for config Fixes #14491 Signed-off-by: Kot --- cmd/tsidp/Dockerfile | 4 ++-- cmd/tsidp/README.md | 6 +++--- cmd/tsidp/tsidp.go | 12 ++++++++++-- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/cmd/tsidp/Dockerfile b/cmd/tsidp/Dockerfile index 605a7ba2e..c3ae480b7 100644 --- a/cmd/tsidp/Dockerfile +++ b/cmd/tsidp/Dockerfile @@ -31,11 +31,11 @@ WORKDIR /app # Environment variables ENV TAILSCALE_USE_WIP_CODE=1 \ - TS_HOSTNAME=tsidp \ + TS_HOSTNAME=idp \ TS_STATE_DIR=/var/lib/tsidp # Expose the default port EXPOSE 443 # Run the application -ENTRYPOINT ["/app/tsidp"] \ No newline at end of file +ENTRYPOINT ["/app/tsidp"] diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index d51138b6d..143e448ce 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -38,7 +38,7 @@ --name `tsidp` \ -p 443:443 \ -e TS_AUTHKEY=YOUR_TAILSCALE_AUTHKEY \ - -e TS_HOSTNAME=tsidp \ + -e TS_HOSTNAME=idp \ -v tsidp-data:/var/lib/tsidp \ tsidp:latest ``` @@ -88,7 +88,7 @@ The `tsidp` server supports several command-line flags: - `TS_AUTHKEY`: Your Tailscale authentication key (required) - `TS_HOSTNAME`: Hostname for the `tsidp` server (default: "idp") -- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp") +- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp" in Docker, otherwise tsnet default) - `TAILSCALE_USE_WIP_CODE`: Enable work-in-progress code (default: "1") ## Support @@ -97,4 +97,4 @@ This is an [experimental](https://tailscale.com/kb/1167/release-stages#experimen ## License -BSD-3-Clause License. See [LICENSE](../../LICENSE) for details. \ No newline at end of file +BSD-3-Clause License. See [LICENSE](../../LICENSE) for details. diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 95ab2b2eb..17ef3729d 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -65,9 +65,17 @@ var ( flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") - flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagHostname = flag.String("hostname", envOr("TS_HOSTNAME", "idp"), "tsnet hostname to use instead of idp") + flagDir = flag.String("dir", envOr("TS_STATE_DIR", ""), "tsnet state directory; a default one will be created if not provided") ) +func envOr(key, defaultVal string) string { + if result, ok := os.LookupEnv(key); ok { + return result + } + return defaultVal +} + func main() { flag.Parse() ctx := context.Background() @@ -121,7 +129,7 @@ func main() { defer cleanup() } else { ts := &tsnet.Server{ - Hostname: "idp", + Hostname: *flagHostname, Dir: *flagDir, } if *flagVerbose { From c86afacf26d9707078a9bf774afc3f9c51c8f4c1 Mon Sep 17 00:00:00 2001 From: Kot Date: Tue, 1 Apr 2025 21:53:10 -0500 Subject: [PATCH 0684/1708] Move env var flag passing to Dockerfile Updates #15465 Signed-off-by: Kot --- cmd/tsidp/Dockerfile | 2 +- cmd/tsidp/README.md | 3 ++- cmd/tsidp/tsidp.go | 11 ++--------- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/cmd/tsidp/Dockerfile b/cmd/tsidp/Dockerfile index c3ae480b7..c4f352ed0 100644 --- a/cmd/tsidp/Dockerfile +++ b/cmd/tsidp/Dockerfile @@ -38,4 +38,4 @@ ENV TAILSCALE_USE_WIP_CODE=1 \ EXPOSE 443 # Run the application -ENTRYPOINT ["/app/tsidp"] +ENTRYPOINT ["/bin/sh", "-c", "/app/tsidp --hostname=${TS_HOSTNAME} --dir=${TS_STATE_DIR}"] diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index 143e448ce..a5e789cc4 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -82,13 +82,14 @@ The `tsidp` server supports several command-line flags: - `--port`: Port to listen on (default: 443) - `--local-port`: Allow requests from localhost - `--use-local-tailscaled`: Use local tailscaled instead of tsnet +- `--hostname`: tsnet hostname - `--dir`: tsnet state directory ## Environment Variables - `TS_AUTHKEY`: Your Tailscale authentication key (required) - `TS_HOSTNAME`: Hostname for the `tsidp` server (default: "idp") -- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp" in Docker, otherwise tsnet default) +- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp") - `TAILSCALE_USE_WIP_CODE`: Enable work-in-progress code (default: "1") ## Support diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 17ef3729d..54bb82d12 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -65,17 +65,10 @@ var ( flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") - flagHostname = flag.String("hostname", envOr("TS_HOSTNAME", "idp"), "tsnet hostname to use instead of idp") - flagDir = flag.String("dir", envOr("TS_STATE_DIR", ""), "tsnet state directory; a default one will be created if not provided") + flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") + flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") ) -func envOr(key, defaultVal string) string { - if result, ok := os.LookupEnv(key); ok { - return result - } - return defaultVal -} - func main() { flag.Parse() ctx := context.Background() From 1284482790fee365dce7df9445a1f465c74f10cd Mon Sep 17 00:00:00 2001 From: Kot Date: Tue, 1 Apr 2025 21:56:08 -0500 Subject: [PATCH 0685/1708] Change README to reflect configuration Updates #15465 Signed-off-by: Kot --- cmd/tsidp/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index a5e789cc4..29ce089df 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -48,7 +48,7 @@ docker logs tsidp ``` - Visit `https://tsidp.tailnet.ts.net` to confirm the service is running. + Visit `https://idp.tailnet.ts.net` to confirm the service is running. ## Usage Example: Proxmox Integration @@ -88,8 +88,8 @@ The `tsidp` server supports several command-line flags: ## Environment Variables - `TS_AUTHKEY`: Your Tailscale authentication key (required) -- `TS_HOSTNAME`: Hostname for the `tsidp` server (default: "idp") -- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp") +- `TS_HOSTNAME`: Hostname for the `tsidp` server (default: "idp", Docker only) +- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp", Docker only) - `TAILSCALE_USE_WIP_CODE`: Enable work-in-progress code (default: "1") ## Support From d18b9945768c18271d6e2ec953cbb30743226c35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 11:59:48 -0600 Subject: [PATCH 0686/1708] .github: Bump actions/upload-artifact from 4.6.1 to 4.6.2 (#15400) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.1 to 4.6.2. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1...ea165f8d65b6e75b540449e92b4886f43607fa02) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b52a3af36..858a14a21 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -485,7 +485,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From 8c062c07c6605423be96136962f77d86e0790659 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Apr 2025 15:40:44 -0700 Subject: [PATCH 0687/1708] ipn/ipnlocal: fix taildrive logf formatting verb (#15514) Updates #cleanup Signed-off-by: Jordan Whited --- ipn/ipnlocal/peerapi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index f20ea7524..21b808fd5 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -1135,7 +1135,7 @@ func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request p, err := drive.ParsePermissions(rawPerms) if err != nil { - h.logf("taildrive: error parsing permissions: %w", err.Error()) + h.logf("taildrive: error parsing permissions: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } From 66d741aa3e0984266fe671336008831df71d8c1e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Apr 2025 16:05:40 -0700 Subject: [PATCH 0688/1708] tailcfg: add relay client and server NodeAttr's (#15513) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 30672bc6f..de34cc7f0 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2446,6 +2446,14 @@ const ( // native tailnet. This is currently only sent to Hello, in its // peer node list. NodeAttrNativeIPV4 NodeCapability = "native-ipv4" + + // NodeAttrRelayServer permits the node to act as an underlay UDP relay + // server. There are no expected values for this key in NodeCapMap. + NodeAttrRelayServer NodeCapability = "relay:server" + + // NodeAttrRelayClient permits the node to act as an underlay UDP relay + // client. There are no expected values for this key in NodeCapMap. + NodeAttrRelayClient NodeCapability = "relay:client" ) // SetDNSRequest is a request to add a DNS record. From 881169474551ae95cfa6ca96db72a3069f95af64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 21:24:05 -0600 Subject: [PATCH 0689/1708] .github: Bump actions/setup-go from 5.3.0 to 5.4.0 (#15397) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.3.0 to 5.4.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/f111f3307d8850f501ac008e886eec1fd1932a34...0aaccfd150d50ccaeb58ebd88d36e91967a5f35b) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4c9ee8088..a1c3bd3c0 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 5318923d8..bbe67b0eb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version-file: go.mod cache: false diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 858a14a21..7df01690b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -151,7 +151,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version-file: go.mod cache: false From 917bcdba796244a5a03a15ccbf542700f24e049c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 3 Apr 2025 09:52:31 -0700 Subject: [PATCH 0690/1708] tailcfg: add UDP relay PeerCapability's (#15516) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index de34cc7f0..0043c0ecd 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1462,6 +1462,13 @@ const ( // user groups as Kubernetes user groups. This capability is read by // peers that are Tailscale Kubernetes operator instances. PeerCapabilityKubernetes PeerCapability = "tailscale.com/cap/kubernetes" + + // PeerCapabilityRelay grants the ability for a peer to allocate relay + // endpoints. + PeerCapabilityRelay PeerCapability = "tailscale.com/cap/relay" + // PeerCapabilityRelayTarget grants the current node the ability to allocate + // relay endpoints to the peer which has this capability. + PeerCapabilityRelayTarget PeerCapability = "tailscale.com/cap/relay-target" ) // NodeCapMap is a map of capabilities to their optional values. It is valid for From 66664b316761c89de62f043dbe4608d84634c113 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 19 Mar 2025 10:47:25 -0700 Subject: [PATCH 0691/1708] wgengine/router: default to a fake router on android The regular android app constructs its own wgengine with additional FFI shims, so this default codepath only affects other handcrafted buids like tsnet, which do not let the caller customize the innards of wgengine. Android >=14 forbids the use of netlink sockets, which makes the standard linux router fail to initialize. Fixes #9836 Signed-off-by: David Anderson --- wgengine/router/router_android.go | 29 +++++++++++++++++++++++++++++ wgengine/router/router_linux.go | 2 ++ 2 files changed, 31 insertions(+) create mode 100644 wgengine/router/router_android.go diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go new file mode 100644 index 000000000..deeccda4a --- /dev/null +++ b/wgengine/router/router_android.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build android + +package router + +import ( + "github.com/tailscale/wireguard-go/tun" + "tailscale.com/health" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" +) + +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { + // Note, this codepath is _not_ used when building the android app + // from github.com/tailscale/tailscale-android. The android app + // constructs its own wgengine with a custom router implementation + // that plugs into Android networking APIs. + // + // In practice, the only place this fake router gets used is when + // you build a tsnet app for android, in which case we don't want + // to touch the OS network stack and a no-op router is correct. + return NewFake(logf), nil +} + +func cleanUp(logf logger.Logf, interfaceName string) { + // Nothing to do here. +} diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index 80191b248..adc54c88d 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !android + package router import ( From 7a922c3f1f59f087796c767492842d4ca86893cc Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 19 Mar 2025 10:47:25 -0700 Subject: [PATCH 0692/1708] net/routetable: don't try to fetch the route table on android Android >=14 forbids the use of netlink sockets, and in some configurations can kill apps that try. Fixes #9836 Signed-off-by: David Anderson --- net/routetable/routetable_linux.go | 2 +- net/routetable/routetable_other.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/routetable/routetable_linux.go b/net/routetable/routetable_linux.go index 88dc8535a..0b2cb305d 100644 --- a/net/routetable/routetable_linux.go +++ b/net/routetable/routetable_linux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !android package routetable diff --git a/net/routetable/routetable_other.go b/net/routetable/routetable_other.go index 35c83e374..e547ab0ac 100644 --- a/net/routetable/routetable_other.go +++ b/net/routetable/routetable_other.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !darwin && !freebsd +//go:build android || (!linux && !darwin && !freebsd) package routetable From 5e4fae082844c35fcd73e9bb7628e08ffa3e97fa Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 19 Mar 2025 10:47:25 -0700 Subject: [PATCH 0693/1708] net/tstun: don't try to set link attributes on android Android >= 14 forbids the use of netlink sockets. Fixes #9836 Signed-off-by: David Anderson --- net/tstun/linkattrs_linux.go | 2 ++ net/tstun/linkattrs_notlinux.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/net/tstun/linkattrs_linux.go b/net/tstun/linkattrs_linux.go index 681e79269..320385ba6 100644 --- a/net/tstun/linkattrs_linux.go +++ b/net/tstun/linkattrs_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !android + package tstun import ( diff --git a/net/tstun/linkattrs_notlinux.go b/net/tstun/linkattrs_notlinux.go index 7a7b40fc2..77d227934 100644 --- a/net/tstun/linkattrs_notlinux.go +++ b/net/tstun/linkattrs_notlinux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || android package tstun From 7b29d39f45f2908178dd07120153e57ac3a914e6 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 3 Apr 2025 17:26:58 -0700 Subject: [PATCH 0694/1708] client/systray: add menu item to rebuild the menu This shouldn't be necessary, but while we're continuing to figure out the root cause, this is better than having to restart the app or switch profiles on the command line. Updates #15528 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 781a65bb8..195a157fb 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -84,12 +84,13 @@ type Menu struct { bgCancel context.CancelFunc // Top-level menu items - connect *systray.MenuItem - disconnect *systray.MenuItem - self *systray.MenuItem - exitNodes *systray.MenuItem - more *systray.MenuItem - quit *systray.MenuItem + connect *systray.MenuItem + disconnect *systray.MenuItem + self *systray.MenuItem + exitNodes *systray.MenuItem + more *systray.MenuItem + rebuildMenu *systray.MenuItem + quit *systray.MenuItem rebuildCh chan struct{} // triggers a menu rebuild accountsCh chan ipn.ProfileID @@ -295,6 +296,17 @@ func (menu *Menu) rebuild() { }) } + // TODO(#15528): this menu item shouldn't be necessary at all, + // but is at least more discoverable than having users switch profiles or exit nodes. + menu.rebuildMenu = systray.AddMenuItem("Rebuild menu", "Fix missing menu items") + onClick(ctx, menu.rebuildMenu, func(ctx context.Context) { + select { + case <-ctx.Done(): + case menu.rebuildCh <- struct{}{}: + } + }) + menu.rebuildMenu.Enable() + menu.quit = systray.AddMenuItem("Quit", "Quit the app") menu.quit.Enable() From e2eb6eb87093b4821a1458946d5cff75a70ace63 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 2 Apr 2025 08:58:13 -0700 Subject: [PATCH 0695/1708] cmd/natc: separate perPeerState from connector Make the perPeerState objects able to function independently without a shared reference to the connector. We don't currently change the values from connector that perPeerState uses at runtime. Explicitly copying them at perPeerState creation allows us to, for example, put the perPeerState into a consensus algorithm in the future. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/natc.go | 16 ++++++++++++---- cmd/natc/natc_test.go | 4 ++-- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index a8168ce6d..bff9bce87 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -359,7 +359,7 @@ var tsMBox = dnsmessage.MustNewName("support.tailscale.com.") // generateDNSResponse generates a DNS response for the given request. The from // argument is the NodeID of the node that sent the request. func (c *connector) generateDNSResponse(req *dnsmessage.Message, from tailcfg.NodeID) ([]byte, error) { - pm, _ := c.perPeerMap.LoadOrStore(from, &perPeerState{c: c}) + pm, _ := c.perPeerMap.LoadOrStore(from, newPerPeerState(c)) var addrs []netip.Addr if len(req.Questions) > 0 { switch req.Questions[0].Type { @@ -509,7 +509,8 @@ func proxyTCPConn(c net.Conn, dest string) { // perPeerState holds the state for a single peer. type perPeerState struct { - c *connector + v6ULA netip.Prefix + ipset *netipx.IPSet mu sync.Mutex addrInUse *big.Int @@ -517,6 +518,13 @@ type perPeerState struct { addrToDomain *bart.Table[string] } +func newPerPeerState(c *connector) *perPeerState { + return &perPeerState{ + ipset: c.ipset, + v6ULA: c.v6ULA, + } +} + // domainForIP returns the domain name assigned to the given IP address and // whether it was found. func (ps *perPeerState) domainForIP(ip netip.Addr) (_ string, ok bool) { @@ -555,7 +563,7 @@ func (ps *perPeerState) unusedIPv4Locked() netip.Addr { if ps.addrInUse == nil { ps.addrInUse = big.NewInt(0) } - return allocAddr(ps.c.ipset, ps.addrInUse) + return allocAddr(ps.ipset, ps.addrInUse) } // assignAddrsLocked assigns a pair of unique IP addresses for the given domain @@ -570,7 +578,7 @@ func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { if !v4.IsValid() { return nil } - as16 := ps.c.v6ULA.Addr().As16() + as16 := ps.v6ULA.Addr().As16() as4 := v4.As4() copy(as16[12:], as4[:]) v6 := netip.AddrFrom16(as16) diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index ddd2d1894..66e0141b9 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -220,7 +220,7 @@ func TestPerPeerState(t *testing.T) { } c.setPrefixes([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) - ps := &perPeerState{c: c} + ps := newPerPeerState(c) addrs, err := ps.ipForDomain("example.com") if err != nil { @@ -360,7 +360,7 @@ func TestIPPoolExhaustion(t *testing.T) { } c.setPrefixes([]netip.Prefix{smallPrefix}) - ps := &perPeerState{c: c} + ps := newPerPeerState(c) assignedIPs := make(map[netip.Addr]string) From 46505ca338797ed4a4b902afd1f989c783000475 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 4 Apr 2025 16:32:12 -0700 Subject: [PATCH 0696/1708] tempfork/acme: update to latest version (#15543) Pull in https://github.com/tailscale/golang-x-crypto/pull/16 Updates #15542 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.sum | 4 ++-- tempfork/acme/acme.go | 13 +++++++------ tempfork/acme/acme_test.go | 5 +---- tempfork/acme/rfc8555.go | 10 ++++++++++ tempfork/acme/rfc8555_test.go | 7 +++++++ tempfork/acme/types.go | 24 ++++++++++++++++++++++++ 7 files changed, 52 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 8ca56a4b9..ff736d950 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 - github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca + github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 diff --git a/go.sum b/go.sum index ca1b5e30c..06fad5d6d 100644 --- a/go.sum +++ b/go.sum @@ -910,8 +910,8 @@ github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8 github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ= -github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca h1:ecjHwH73Yvqf/oIdQ2vxAX+zc6caQsYdPzsxNW1J3G8= -github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM= +github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= diff --git a/tempfork/acme/acme.go b/tempfork/acme/acme.go index 94234efe3..bbddb9551 100644 --- a/tempfork/acme/acme.go +++ b/tempfork/acme/acme.go @@ -270,10 +270,7 @@ func (c *Client) FetchRenewalInfo(ctx context.Context, leaf []byte) (*RenewalInf return nil, fmt.Errorf("parsing leaf certificate: %w", err) } - renewalURL, err := c.getRenewalURL(parsedLeaf) - if err != nil { - return nil, fmt.Errorf("generating renewal info URL: %w", err) - } + renewalURL := c.getRenewalURL(parsedLeaf) res, err := c.get(ctx, renewalURL, wantStatus(http.StatusOK)) if err != nil { @@ -288,16 +285,20 @@ func (c *Client) FetchRenewalInfo(ctx context.Context, leaf []byte) (*RenewalInf return &info, nil } -func (c *Client) getRenewalURL(cert *x509.Certificate) (string, error) { +func (c *Client) getRenewalURL(cert *x509.Certificate) string { // See https://www.ietf.org/archive/id/draft-ietf-acme-ari-04.html#name-the-renewalinfo-resource // for how the request URL is built. url := c.dir.RenewalInfoURL if !strings.HasSuffix(url, "/") { url += "/" } + return url + certRenewalIdentifier(cert) +} + +func certRenewalIdentifier(cert *x509.Certificate) string { aki := base64.RawURLEncoding.EncodeToString(cert.AuthorityKeyId) serial := base64.RawURLEncoding.EncodeToString(cert.SerialNumber.Bytes()) - return fmt.Sprintf("%s%s.%s", url, aki, serial), nil + return aki + "." + serial } // AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service diff --git a/tempfork/acme/acme_test.go b/tempfork/acme/acme_test.go index 5473bbc2b..f0c45aea9 100644 --- a/tempfork/acme/acme_test.go +++ b/tempfork/acme/acme_test.go @@ -549,10 +549,7 @@ func TestGetRenewalURL(t *testing.T) { } client := newTestClientWithMockDirectory() - urlString, err := client.getRenewalURL(parsedLeaf) - if err != nil { - t.Fatal(err) - } + urlString := client.getRenewalURL(parsedLeaf) parsedURL, err := url.Parse(urlString) if err != nil { diff --git a/tempfork/acme/rfc8555.go b/tempfork/acme/rfc8555.go index 3152e531b..3eaf935fd 100644 --- a/tempfork/acme/rfc8555.go +++ b/tempfork/acme/rfc8555.go @@ -7,6 +7,7 @@ package acme import ( "context" "crypto" + "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" @@ -205,6 +206,7 @@ func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderO Identifiers []wireAuthzID `json:"identifiers"` NotBefore string `json:"notBefore,omitempty"` NotAfter string `json:"notAfter,omitempty"` + Replaces string `json:"replaces,omitempty"` }{} for _, v := range id { req.Identifiers = append(req.Identifiers, wireAuthzID{ @@ -218,6 +220,14 @@ func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderO req.NotBefore = time.Time(o).Format(time.RFC3339) case orderNotAfterOpt: req.NotAfter = time.Time(o).Format(time.RFC3339) + case orderReplacesCert: + req.Replaces = certRenewalIdentifier(o.cert) + case orderReplacesCertDER: + cert, err := x509.ParseCertificate(o) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate being replaced: %w", err) + } + req.Replaces = certRenewalIdentifier(cert) default: // Package's fault if we let this happen. panic(fmt.Sprintf("unsupported order option type %T", o)) diff --git a/tempfork/acme/rfc8555_test.go b/tempfork/acme/rfc8555_test.go index d65720a35..ec51a7a5e 100644 --- a/tempfork/acme/rfc8555_test.go +++ b/tempfork/acme/rfc8555_test.go @@ -766,10 +766,17 @@ func TestRFC_AuthorizeOrder(t *testing.T) { s.start() defer s.close() + prevCertDER, _ := pem.Decode([]byte(leafPEM)) + prevCert, err := x509.ParseCertificate(prevCertDER.Bytes) + if err != nil { + t.Fatal(err) + } + cl := &Client{Key: testKeyEC, DirectoryURL: s.url("/")} o, err := cl.AuthorizeOrder(context.Background(), DomainIDs("example.org"), WithOrderNotBefore(time.Date(2019, 8, 31, 0, 0, 0, 0, time.UTC)), WithOrderNotAfter(time.Date(2019, 9, 2, 0, 0, 0, 0, time.UTC)), + WithOrderReplacesCert(prevCert), ) if err != nil { t.Fatal(err) diff --git a/tempfork/acme/types.go b/tempfork/acme/types.go index 518fa2440..0142469d8 100644 --- a/tempfork/acme/types.go +++ b/tempfork/acme/types.go @@ -391,6 +391,30 @@ type orderNotAfterOpt time.Time func (orderNotAfterOpt) privateOrderOpt() {} +// WithOrderReplacesCert indicates that this Order is for a replacement of an +// existing certificate. +// See https://datatracker.ietf.org/doc/html/draft-ietf-acme-ari-03#section-5 +func WithOrderReplacesCert(cert *x509.Certificate) OrderOption { + return orderReplacesCert{cert} +} + +type orderReplacesCert struct { + cert *x509.Certificate +} + +func (orderReplacesCert) privateOrderOpt() {} + +// WithOrderReplacesCertDER indicates that this Order is for a replacement of +// an existing DER-encoded certificate. +// See https://datatracker.ietf.org/doc/html/draft-ietf-acme-ari-03#section-5 +func WithOrderReplacesCertDER(der []byte) OrderOption { + return orderReplacesCertDER(der) +} + +type orderReplacesCertDER []byte + +func (orderReplacesCertDER) privateOrderOpt() {} + // Authorization encodes an authorization response. type Authorization struct { // URI uniquely identifies a authorization. From 603a1d383087d20f5dad46ba466e879e0b5031eb Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 2 Apr 2025 10:17:59 -0700 Subject: [PATCH 0697/1708] cmd/natc: move address storage behind an interface Adds IPPool and moves all IP address management concerns behind that. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/ippool.go | 127 ++++++++++++++++++++++++++ cmd/natc/ippool/ippool_test.go | 129 +++++++++++++++++++++++++++ cmd/natc/{ => ippool}/ipx.go | 2 +- cmd/natc/{ => ippool}/ipx_test.go | 2 +- cmd/natc/natc.go | 142 +++++------------------------- cmd/natc/natc_test.go | 129 +++------------------------ 6 files changed, 294 insertions(+), 237 deletions(-) create mode 100644 cmd/natc/ippool/ippool.go create mode 100644 cmd/natc/ippool/ippool_test.go rename cmd/natc/{ => ippool}/ipx.go (99%) rename cmd/natc/{ => ippool}/ipx_test.go (99%) diff --git a/cmd/natc/ippool/ippool.go b/cmd/natc/ippool/ippool.go new file mode 100644 index 000000000..6f6ad1d83 --- /dev/null +++ b/cmd/natc/ippool/ippool.go @@ -0,0 +1,127 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// ippool implements IP address storage, creation, and retrieval for cmd/natc +package ippool + +import ( + "errors" + "log" + "math/big" + "net/netip" + "sync" + + "github.com/gaissmai/bart" + "go4.org/netipx" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/util/dnsname" + "tailscale.com/util/mak" +) + +var ErrNoIPsAvailable = errors.New("no IPs available") + +type IPPool struct { + perPeerMap syncs.Map[tailcfg.NodeID, *perPeerState] + IPSet *netipx.IPSet + V6ULA netip.Prefix +} + +func (ipp *IPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr) (string, bool) { + ps, ok := ipp.perPeerMap.Load(from) + if !ok { + log.Printf("handleTCPFlow: no perPeerState for %v", from) + return "", false + } + domain, ok := ps.domainForIP(addr) + if !ok { + log.Printf("handleTCPFlow: no domain for IP %v\n", addr) + return "", false + } + return domain, ok +} + +func (ipp *IPPool) IPForDomain(from tailcfg.NodeID, domain string) ([]netip.Addr, error) { + npps := &perPeerState{ + ipset: ipp.IPSet, + v6ULA: ipp.V6ULA, + } + ps, _ := ipp.perPeerMap.LoadOrStore(from, npps) + return ps.ipForDomain(domain) +} + +// perPeerState holds the state for a single peer. +type perPeerState struct { + v6ULA netip.Prefix + ipset *netipx.IPSet + + mu sync.Mutex + addrInUse *big.Int + domainToAddr map[string][]netip.Addr + addrToDomain *bart.Table[string] +} + +// domainForIP returns the domain name assigned to the given IP address and +// whether it was found. +func (ps *perPeerState) domainForIP(ip netip.Addr) (_ string, ok bool) { + ps.mu.Lock() + defer ps.mu.Unlock() + if ps.addrToDomain == nil { + return "", false + } + return ps.addrToDomain.Lookup(ip) +} + +// ipForDomain assigns a pair of unique IP addresses for the given domain and +// returns them. The first address is an IPv4 address and the second is an IPv6 +// address. If the domain already has assigned addresses, it returns them. +func (ps *perPeerState) ipForDomain(domain string) ([]netip.Addr, error) { + fqdn, err := dnsname.ToFQDN(domain) + if err != nil { + return nil, err + } + domain = fqdn.WithoutTrailingDot() + + ps.mu.Lock() + defer ps.mu.Unlock() + if addrs, ok := ps.domainToAddr[domain]; ok { + return addrs, nil + } + addrs := ps.assignAddrsLocked(domain) + if addrs == nil { + return nil, ErrNoIPsAvailable + } + return addrs, nil +} + +// unusedIPv4Locked returns an unused IPv4 address from the available ranges. +func (ps *perPeerState) unusedIPv4Locked() netip.Addr { + if ps.addrInUse == nil { + ps.addrInUse = big.NewInt(0) + } + return allocAddr(ps.ipset, ps.addrInUse) +} + +// assignAddrsLocked assigns a pair of unique IP addresses for the given domain +// and returns them. The first address is an IPv4 address and the second is an +// IPv6 address. It does not check if the domain already has assigned addresses. +// ps.mu must be held. +func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { + if ps.addrToDomain == nil { + ps.addrToDomain = &bart.Table[string]{} + } + v4 := ps.unusedIPv4Locked() + if !v4.IsValid() { + return nil + } + as16 := ps.v6ULA.Addr().As16() + as4 := v4.As4() + copy(as16[12:], as4[:]) + v6 := netip.AddrFrom16(as16) + addrs := []netip.Addr{v4, v6} + mak.Set(&ps.domainToAddr, domain, addrs) + for _, a := range addrs { + ps.addrToDomain.Insert(netip.PrefixFrom(a, a.BitLen()), domain) + } + return addrs +} diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go new file mode 100644 index 000000000..84b3b7a02 --- /dev/null +++ b/cmd/natc/ippool/ippool_test.go @@ -0,0 +1,129 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ippool + +import ( + "errors" + "fmt" + "net/netip" + "slices" + "testing" + + "go4.org/netipx" + "tailscale.com/tailcfg" + "tailscale.com/util/must" +) + +func TestIPPoolExhaustion(t *testing.T) { + smallPrefix := netip.MustParsePrefix("100.64.1.0/30") // Only 4 IPs: .0, .1, .2, .3 + var ipsb netipx.IPSetBuilder + ipsb.AddPrefix(smallPrefix) + addrPool := must.Get(ipsb.IPSet()) + v6ULA := netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80") + pool := IPPool{V6ULA: v6ULA, IPSet: addrPool} + + assignedIPs := make(map[netip.Addr]string) + + domains := []string{"a.example.com", "b.example.com", "c.example.com", "d.example.com", "e.example.com"} + + var errs []error + + from := tailcfg.NodeID(12345) + + for i := 0; i < 5; i++ { + for _, domain := range domains { + addrs, err := pool.IPForDomain(from, domain) + if err != nil { + errs = append(errs, fmt.Errorf("failed to get IP for domain %q: %w", domain, err)) + continue + } + + for _, addr := range addrs { + if d, ok := assignedIPs[addr]; ok { + if d != domain { + t.Errorf("IP %s reused for domain %q, previously assigned to %q", addr, domain, d) + } + } else { + assignedIPs[addr] = domain + } + } + } + } + + for addr, domain := range assignedIPs { + if addr.Is4() && !smallPrefix.Contains(addr) { + t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, smallPrefix) + } + if addr.Is6() && !v6ULA.Contains(addr) { + t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, v6ULA) + } + } + + // expect one error for each iteration with the 5th domain + if len(errs) != 5 { + t.Errorf("Expected 5 errors, got %d: %v", len(errs), errs) + } + for _, err := range errs { + if !errors.Is(err, ErrNoIPsAvailable) { + t.Errorf("generateDNSResponse() error = %v, want ErrNoIPsAvailable", err) + } + } +} + +func TestIPPool(t *testing.T) { + var ipsb netipx.IPSetBuilder + ipsb.AddPrefix(netip.MustParsePrefix("100.64.1.0/24")) + addrPool := must.Get(ipsb.IPSet()) + pool := IPPool{ + V6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + IPSet: addrPool, + } + from := tailcfg.NodeID(12345) + addrs, err := pool.IPForDomain(from, "example.com") + if err != nil { + t.Fatalf("ipForDomain() error = %v", err) + } + + if len(addrs) != 2 { + t.Fatalf("ipForDomain() returned %d addresses, want 2", len(addrs)) + } + + v4 := addrs[0] + v6 := addrs[1] + + if !v4.Is4() { + t.Errorf("First address is not IPv4: %s", v4) + } + + if !v6.Is6() { + t.Errorf("Second address is not IPv6: %s", v6) + } + + if !addrPool.Contains(v4) { + t.Errorf("IPv4 address %s not in range %s", v4, addrPool) + } + + domain, ok := pool.DomainForIP(from, v4) + if !ok { + t.Errorf("domainForIP(%s) not found", v4) + } else if domain != "example.com" { + t.Errorf("domainForIP(%s) = %s, want %s", v4, domain, "example.com") + } + + domain, ok = pool.DomainForIP(from, v6) + if !ok { + t.Errorf("domainForIP(%s) not found", v6) + } else if domain != "example.com" { + t.Errorf("domainForIP(%s) = %s, want %s", v6, domain, "example.com") + } + + addrs2, err := pool.IPForDomain(from, "example.com") + if err != nil { + t.Fatalf("ipForDomain() second call error = %v", err) + } + + if !slices.Equal(addrs, addrs2) { + t.Errorf("ipForDomain() second call = %v, want %v", addrs2, addrs) + } +} diff --git a/cmd/natc/ipx.go b/cmd/natc/ippool/ipx.go similarity index 99% rename from cmd/natc/ipx.go rename to cmd/natc/ippool/ipx.go index 06bf7be79..8259a56db 100644 --- a/cmd/natc/ipx.go +++ b/cmd/natc/ippool/ipx.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package main +package ippool import ( "math/big" diff --git a/cmd/natc/ipx_test.go b/cmd/natc/ippool/ipx_test.go similarity index 99% rename from cmd/natc/ipx_test.go rename to cmd/natc/ippool/ipx_test.go index b60a5d981..2e2b9d3d4 100644 --- a/cmd/natc/ipx_test.go +++ b/cmd/natc/ippool/ipx_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package main +package ippool import ( "math" diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index bff9bce87..270524879 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -13,13 +13,11 @@ import ( "flag" "fmt" "log" - "math/big" "net" "net/http" "net/netip" "os" "strings" - "sync" "time" "github.com/gaissmai/bart" @@ -28,22 +26,18 @@ import ( "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/local" + "tailscale.com/cmd/natc/ippool" "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/netutil" - "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/tsweb" - "tailscale.com/util/dnsname" - "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/wgengine/netstack" ) -var ErrNoIPsAvailable = errors.New("no IPs available") - func main() { hostinfo.SetApp("natc") if !envknob.UseWIPCode() { @@ -141,12 +135,6 @@ func main() { log.Fatalf("ts.Up: %v", err) } - c := &connector{ - ts: ts, - lc: lc, - v6ULA: ula(uint16(*siteID)), - ignoreDsts: ignoreDstTable, - } var prefixes []netip.Prefix for _, s := range strings.Split(*v4PfxStr, ",") { p := netip.MustParsePrefix(strings.TrimSpace(s)) @@ -155,19 +143,31 @@ func main() { } prefixes = append(prefixes, p) } - c.setPrefixes(prefixes) + routes, dnsAddr, addrPool := calculateAddresses(prefixes) + + v6ULA := ula(uint16(*siteID)) + c := &connector{ + ts: ts, + lc: lc, + v6ULA: v6ULA, + ignoreDsts: ignoreDstTable, + ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, + routes: routes, + dnsAddr: dnsAddr, + } c.run(ctx) } -func (c *connector) setPrefixes(prefixes []netip.Prefix) { +func calculateAddresses(prefixes []netip.Prefix) (*netipx.IPSet, netip.Addr, *netipx.IPSet) { var ipsb netipx.IPSetBuilder for _, p := range prefixes { ipsb.AddPrefix(p) } - c.routes = must.Get(ipsb.IPSet()) - c.dnsAddr = c.routes.Ranges()[0].From() - ipsb.Remove(c.dnsAddr) - c.ipset = must.Get(ipsb.IPSet()) + routesToAdvertise := must.Get(ipsb.IPSet()) + dnsAddr := routesToAdvertise.Ranges()[0].From() + ipsb.Remove(dnsAddr) + addrPool := must.Get(ipsb.IPSet()) + return routesToAdvertise, dnsAddr, addrPool } type connector struct { @@ -181,10 +181,6 @@ type connector struct { // prevent the app connector from assigning it to a domain. dnsAddr netip.Addr - // ipset is the set of IPv4 ranges to advertise and assign addresses from. - // These are masked prefixes. - ipset *netipx.IPSet - // routes is the set of IPv4 ranges advertised to the tailnet, or ipset with // the dnsAddr removed. routes *netipx.IPSet @@ -192,8 +188,6 @@ type connector struct { // v6ULA is the ULA prefix used by the app connector to assign IPv6 addresses. v6ULA netip.Prefix - perPeerMap syncs.Map[tailcfg.NodeID, *perPeerState] - // ignoreDsts is initialized at start up with the contents of --ignore-destinations (if none it is nil) // It is never mutated, only used for lookups. // Users who want to natc a DNS wildcard but not every address record in that domain can supply the @@ -202,6 +196,8 @@ type connector struct { // return a dns response that contains the ip addresses we discovered with the lookup (ie not the // natc behavior, which would return a dummy ip address pointing at natc). ignoreDsts *bart.Table[bool] + + ipPool *ippool.IPPool } // v6ULA is the ULA prefix used by the app connector to assign IPv6 addresses. @@ -359,13 +355,12 @@ var tsMBox = dnsmessage.MustNewName("support.tailscale.com.") // generateDNSResponse generates a DNS response for the given request. The from // argument is the NodeID of the node that sent the request. func (c *connector) generateDNSResponse(req *dnsmessage.Message, from tailcfg.NodeID) ([]byte, error) { - pm, _ := c.perPeerMap.LoadOrStore(from, newPerPeerState(c)) var addrs []netip.Addr if len(req.Questions) > 0 { switch req.Questions[0].Type { case dnsmessage.TypeAAAA, dnsmessage.TypeA: var err error - addrs, err = pm.ipForDomain(req.Questions[0].Name.String()) + addrs, err = c.ipPool.IPForDomain(from, req.Questions[0].Name.String()) if err != nil { return nil, err } @@ -454,16 +449,8 @@ func (c *connector) handleTCPFlow(src, dst netip.AddrPort) (handler func(net.Con log.Printf("HandleTCPFlow: WhoIs failed: %v\n", err) return nil, false } - - from := who.Node.ID - ps, ok := c.perPeerMap.Load(from) + domain, ok := c.ipPool.DomainForIP(who.Node.ID, dst.Addr()) if !ok { - log.Printf("handleTCPFlow: no perPeerState for %v", from) - return nil, false - } - domain, ok := ps.domainForIP(dst.Addr()) - if !ok { - log.Printf("handleTCPFlow: no domain for IP %v\n", dst.Addr()) return nil, false } return func(conn net.Conn) { @@ -506,86 +493,3 @@ func proxyTCPConn(c net.Conn, dest string) { }) p.Start() } - -// perPeerState holds the state for a single peer. -type perPeerState struct { - v6ULA netip.Prefix - ipset *netipx.IPSet - - mu sync.Mutex - addrInUse *big.Int - domainToAddr map[string][]netip.Addr - addrToDomain *bart.Table[string] -} - -func newPerPeerState(c *connector) *perPeerState { - return &perPeerState{ - ipset: c.ipset, - v6ULA: c.v6ULA, - } -} - -// domainForIP returns the domain name assigned to the given IP address and -// whether it was found. -func (ps *perPeerState) domainForIP(ip netip.Addr) (_ string, ok bool) { - ps.mu.Lock() - defer ps.mu.Unlock() - if ps.addrToDomain == nil { - return "", false - } - return ps.addrToDomain.Lookup(ip) -} - -// ipForDomain assigns a pair of unique IP addresses for the given domain and -// returns them. The first address is an IPv4 address and the second is an IPv6 -// address. If the domain already has assigned addresses, it returns them. -func (ps *perPeerState) ipForDomain(domain string) ([]netip.Addr, error) { - fqdn, err := dnsname.ToFQDN(domain) - if err != nil { - return nil, err - } - domain = fqdn.WithoutTrailingDot() - - ps.mu.Lock() - defer ps.mu.Unlock() - if addrs, ok := ps.domainToAddr[domain]; ok { - return addrs, nil - } - addrs := ps.assignAddrsLocked(domain) - if addrs == nil { - return nil, ErrNoIPsAvailable - } - return addrs, nil -} - -// unusedIPv4Locked returns an unused IPv4 address from the available ranges. -func (ps *perPeerState) unusedIPv4Locked() netip.Addr { - if ps.addrInUse == nil { - ps.addrInUse = big.NewInt(0) - } - return allocAddr(ps.ipset, ps.addrInUse) -} - -// assignAddrsLocked assigns a pair of unique IP addresses for the given domain -// and returns them. The first address is an IPv4 address and the second is an -// IPv6 address. It does not check if the domain already has assigned addresses. -// ps.mu must be held. -func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { - if ps.addrToDomain == nil { - ps.addrToDomain = &bart.Table[string]{} - } - v4 := ps.unusedIPv4Locked() - if !v4.IsValid() { - return nil - } - as16 := ps.v6ULA.Addr().As16() - as4 := v4.As4() - copy(as16[12:], as4[:]) - v6 := netip.AddrFrom16(as16) - addrs := []netip.Addr{v4, v6} - mak.Set(&ps.domainToAddr, domain, addrs) - for _, a := range addrs { - ps.addrToDomain.Insert(netip.PrefixFrom(a, a.BitLen()), domain) - } - return addrs -} diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index 66e0141b9..09ade0a98 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -4,15 +4,13 @@ package main import ( - "errors" - "fmt" "net/netip" - "slices" "testing" "github.com/gaissmai/bart" "github.com/google/go-cmp/cmp" "golang.org/x/net/dns/dnsmessage" + "tailscale.com/cmd/natc/ippool" "tailscale.com/tailcfg" ) @@ -214,62 +212,6 @@ func TestDNSResponse(t *testing.T) { } } -func TestPerPeerState(t *testing.T) { - c := &connector{ - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - } - c.setPrefixes([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) - - ps := newPerPeerState(c) - - addrs, err := ps.ipForDomain("example.com") - if err != nil { - t.Fatalf("ipForDomain() error = %v", err) - } - - if len(addrs) != 2 { - t.Fatalf("ipForDomain() returned %d addresses, want 2", len(addrs)) - } - - v4 := addrs[0] - v6 := addrs[1] - - if !v4.Is4() { - t.Errorf("First address is not IPv4: %s", v4) - } - - if !v6.Is6() { - t.Errorf("Second address is not IPv6: %s", v6) - } - - if !c.ipset.Contains(v4) { - t.Errorf("IPv4 address %s not in range %s", v4, c.ipset) - } - - domain, ok := ps.domainForIP(v4) - if !ok { - t.Errorf("domainForIP(%s) not found", v4) - } else if domain != "example.com" { - t.Errorf("domainForIP(%s) = %s, want %s", v4, domain, "example.com") - } - - domain, ok = ps.domainForIP(v6) - if !ok { - t.Errorf("domainForIP(%s) not found", v6) - } else if domain != "example.com" { - t.Errorf("domainForIP(%s) = %s, want %s", v6, domain, "example.com") - } - - addrs2, err := ps.ipForDomain("example.com") - if err != nil { - t.Fatalf("ipForDomain() second call error = %v", err) - } - - if !slices.Equal(addrs, addrs2) { - t.Errorf("ipForDomain() second call = %v, want %v", addrs2, addrs) - } -} - func TestIgnoreDestination(t *testing.T) { ignoreDstTable := &bart.Table[bool]{} ignoreDstTable.Insert(netip.MustParsePrefix("192.168.1.0/24"), true) @@ -317,10 +259,14 @@ func TestIgnoreDestination(t *testing.T) { } func TestConnectorGenerateDNSResponse(t *testing.T) { + v6ULA := netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80") + routes, dnsAddr, addrPool := calculateAddresses([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) c := &connector{ - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), + v6ULA: v6ULA, + ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, + routes: routes, + dnsAddr: dnsAddr, } - c.setPrefixes([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) req := &dnsmessage.Message{ Header: dnsmessage.Header{ID: 1234}, @@ -351,62 +297,13 @@ func TestConnectorGenerateDNSResponse(t *testing.T) { if !cmp.Equal(resp1, resp2) { t.Errorf("generateDNSResponse() responses differ between calls") } -} - -func TestIPPoolExhaustion(t *testing.T) { - smallPrefix := netip.MustParsePrefix("100.64.1.0/30") // Only 4 IPs: .0, .1, .2, .3 - c := &connector{ - v6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), - } - c.setPrefixes([]netip.Prefix{smallPrefix}) - - ps := newPerPeerState(c) - - assignedIPs := make(map[netip.Addr]string) - domains := []string{"a.example.com", "b.example.com", "c.example.com", "d.example.com"} - - var errs []error - - for i := 0; i < 5; i++ { - for _, domain := range domains { - addrs, err := ps.ipForDomain(domain) - if err != nil { - errs = append(errs, fmt.Errorf("failed to get IP for domain %q: %w", domain, err)) - continue - } - - for _, addr := range addrs { - if d, ok := assignedIPs[addr]; ok { - if d != domain { - t.Errorf("IP %s reused for domain %q, previously assigned to %q", addr, domain, d) - } - } else { - assignedIPs[addr] = domain - } - } - } - } - - for addr, domain := range assignedIPs { - if addr.Is4() && !smallPrefix.Contains(addr) { - t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, smallPrefix) - } - if addr.Is6() && !c.v6ULA.Contains(addr) { - t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, c.v6ULA) - } - if addr == c.dnsAddr { - t.Errorf("IP %s for domain %q is the reserved DNS address", addr, domain) - } - } - - // expect one error for each iteration with the 4th domain - if len(errs) != 5 { - t.Errorf("Expected 5 errors, got %d: %v", len(errs), errs) + var msg dnsmessage.Message + err = msg.Unpack(resp1) + if err != nil { + t.Fatalf("dnsmessage Unpack error = %v", err) } - for _, err := range errs { - if !errors.Is(err, ErrNoIPsAvailable) { - t.Errorf("generateDNSResponse() error = %v, want ErrNoIPsAvailable", err) - } + if len(msg.Answers) != 1 { + t.Fatalf("expected 1 answer, got: %d", len(msg.Answers)) } } From 6d117d64a256234372f2bb177392b987aa1758af Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 6 Apr 2025 13:27:10 -0700 Subject: [PATCH 0698/1708] util/testenv: add func to report whether a testing.TB is in parallel mode For future in-memory network changes (#15558) to be able to be stricter and do automatic leak detection when it's safe to do so, in non-parallel tests. Updates tailscale/corp#27636 Change-Id: I50f03b16a3f92ce61a7ed88264b49d8c6628f638 Signed-off-by: Brad Fitzpatrick --- util/testenv/testenv.go | 39 ++++++++++++++++++++++++++++++++++++ util/testenv/testenv_test.go | 13 ++++++++++++ 2 files changed, 52 insertions(+) diff --git a/util/testenv/testenv.go b/util/testenv/testenv.go index 12ada9003..3e23baef4 100644 --- a/util/testenv/testenv.go +++ b/util/testenv/testenv.go @@ -6,6 +6,7 @@ package testenv import ( + "context" "flag" "tailscale.com/types/lazy" @@ -19,3 +20,41 @@ func InTest() bool { return flag.Lookup("test.v") != nil }) } + +// TB is testing.TB, to avoid importing "testing" in non-test code. +type TB interface { + Cleanup(func()) + Error(args ...any) + Errorf(format string, args ...any) + Fail() + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Helper() + Log(args ...any) + Logf(format string, args ...any) + Name() string + Setenv(key, value string) + Chdir(dir string) + Skip(args ...any) + SkipNow() + Skipf(format string, args ...any) + Skipped() bool + TempDir() string + Context() context.Context +} + +// InParallelTest reports whether t is running as a parallel test. +// +// Use of this function taints t such that its Parallel method (assuming t is an +// actual *testing.T) will panic if called after this function. +func InParallelTest(t TB) (isParallel bool) { + defer func() { + if r := recover(); r != nil { + isParallel = true + } + }() + t.Chdir(".") // panics in a t.Parallel test + return false +} diff --git a/util/testenv/testenv_test.go b/util/testenv/testenv_test.go index 43c332b26..c647d9aec 100644 --- a/util/testenv/testenv_test.go +++ b/util/testenv/testenv_test.go @@ -16,3 +16,16 @@ func TestDeps(t *testing.T) { }, }.Check(t) } + +func TestInParallelTestTrue(t *testing.T) { + t.Parallel() + if !InParallelTest(t) { + t.Fatal("InParallelTest should return true once t.Parallel has been called") + } +} + +func TestInParallelTestFalse(t *testing.T) { + if InParallelTest(t) { + t.Fatal("InParallelTest should return false before t.Parallel has been called") + } +} From c76d0754723f10f6d7100972bbddaf6edae4e57c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 6 Apr 2025 08:10:55 -0700 Subject: [PATCH 0699/1708] nettest, *: add option to run HTTP tests with in-memory network To avoid ephemeral port / TIME_WAIT exhaustion with high --count values, and to eventually detect leaked connections in tests. (Later the memory network will register a Cleanup on the TB to verify that everything's been shut down) Updates tailscale/corp#27636 Change-Id: Id06f1ae750d8719c5a75d871654574a8226d2733 Signed-off-by: Brad Fitzpatrick --- client/local/local_test.go | 8 +- client/web/web_test.go | 3 +- control/controlclient/noise_test.go | 9 +- net/tsdial/tsdial.go | 22 ++- tstest/nettest/nettest.go | 199 ++++++++++++++++++++++++++++ util/testenv/testenv.go | 7 + 6 files changed, 237 insertions(+), 11 deletions(-) diff --git a/client/local/local_test.go b/client/local/local_test.go index 4322e4dde..0e01e74cd 100644 --- a/client/local/local_test.go +++ b/client/local/local_test.go @@ -9,10 +9,10 @@ import ( "context" "net" "net/http" - "net/http/httptest" "testing" "tailscale.com/tstest/deptest" + "tailscale.com/tstest/nettest" "tailscale.com/types/key" ) @@ -36,15 +36,15 @@ func TestGetServeConfigFromJSON(t *testing.T) { } func TestWhoIsPeerNotFound(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + nw := nettest.GetNetwork(t) + ts := nettest.NewHTTPServer(nw, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) })) defer ts.Close() lc := &Client{ Dial: func(ctx context.Context, network, addr string) (net.Conn, error) { - var std net.Dialer - return std.DialContext(ctx, network, ts.Listener.Addr().(*net.TCPAddr).String()) + return nw.Dial(ctx, network, ts.Listener.Addr().String()) }, } var k key.NodePublic diff --git a/client/web/web_test.go b/client/web/web_test.go index 334b403a6..2a6bc787a 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/net/memnet" "tailscale.com/tailcfg" + "tailscale.com/tstest/nettest" "tailscale.com/types/views" "tailscale.com/util/httpm" ) @@ -1508,7 +1509,7 @@ func TestCSRFProtect(t *testing.T) { } }) h := s.withCSRF(mux) - ser := httptest.NewServer(h) + ser := nettest.NewHTTPServer(nettest.GetNetwork(t), h) defer ser.Close() jar, err := cookiejar.New(nil) diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index dadf237df..4904016f2 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -10,7 +10,6 @@ import ( "io" "math" "net/http" - "net/http/httptest" "testing" "time" @@ -20,6 +19,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" + "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" ) @@ -178,7 +178,8 @@ func (tt noiseClientTest) run(t *testing.T) { const msg = "Hello, client" h2 := &http2.Server{} - hs := httptest.NewServer(&Upgrader{ + nw := nettest.GetNetwork(t) + hs := nettest.NewHTTPServer(nw, &Upgrader{ h2srv: h2, noiseKeyPriv: serverPrivate, sendEarlyPayload: tt.sendEarlyPayload, @@ -193,6 +194,10 @@ func (tt noiseClientTest) run(t *testing.T) { defer hs.Close() dialer := tsdial.NewDialer(netmon.NewStatic()) + if nettest.PreferMemNetwork() { + dialer.SetSystemDialerForTest(nw.Dial) + } + nc, err := NewNoiseClient(NoiseOpts{ PrivKey: clientPrivate, ServerPubKey: serverPrivate.Public(), diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 8d287fdb0..8fddd63f2 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -71,6 +71,7 @@ type Dialer struct { netnsDialerOnce sync.Once netnsDialer netns.Dialer + sysDialForTest func(_ context.Context, network, addr string) (net.Conn, error) // or nil routes atomic.Pointer[bart.Table[bool]] // or nil if UserDial should not use routes. `true` indicates routes that point into the Tailscale interface @@ -361,6 +362,13 @@ func (d *Dialer) logf(format string, args ...any) { } } +// SetSystemDialerForTest sets an alternate function to use for SystemDial +// instead of netns.Dialer. This is intended for use with nettest.MemoryNetwork. +func (d *Dialer) SetSystemDialerForTest(fn func(ctx context.Context, network, addr string) (net.Conn, error)) { + testenv.AssertInTest() + d.sysDialForTest = fn +} + // SystemDial connects to the provided network address without going over // Tailscale. It prefers going over the default interface and closes existing // connections if the default interface changes. It is used to connect to @@ -380,10 +388,16 @@ func (d *Dialer) SystemDial(ctx context.Context, network, addr string) (net.Conn return nil, net.ErrClosed } - d.netnsDialerOnce.Do(func() { - d.netnsDialer = netns.NewDialer(d.logf, d.netMon) - }) - c, err := d.netnsDialer.DialContext(ctx, network, addr) + var c net.Conn + var err error + if d.sysDialForTest != nil { + c, err = d.sysDialForTest(ctx, network, addr) + } else { + d.netnsDialerOnce.Do(func() { + d.netnsDialer = netns.NewDialer(d.logf, d.netMon) + }) + c, err = d.netnsDialer.DialContext(ctx, network, addr) + } if err != nil { return nil, err } diff --git a/tstest/nettest/nettest.go b/tstest/nettest/nettest.go index 47c8857a5..f03d6987b 100644 --- a/tstest/nettest/nettest.go +++ b/tstest/nettest/nettest.go @@ -6,11 +6,23 @@ package nettest import ( + "context" + "flag" + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/netip" + "sync" "testing" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" + "tailscale.com/util/testenv" ) +var useMemNet = flag.Bool("use-test-memnet", false, "prefer using in-memory network for tests") + // SkipIfNoNetwork skips the test if it looks like there's no network // access. func SkipIfNoNetwork(t testing.TB) { @@ -19,3 +31,190 @@ func SkipIfNoNetwork(t testing.TB) { t.Skip("skipping; test requires network but no interface is up") } } + +// Network is an interface for use in tests that describes either [RealNetwork] +// or [MemNetwork]. +type Network interface { + NewLocalTCPListener() net.Listener + Listen(network, address string) (net.Listener, error) + Dial(ctx context.Context, network, address string) (net.Conn, error) +} + +// PreferMemNetwork reports whether the --use-test-memnet flag is set. +func PreferMemNetwork() bool { + return *useMemNet +} + +// GetNetwork returns the appropriate Network implementation based on +// whether the --use-test-memnet flag is set. +// +// Each call generates a new network. +func GetNetwork(tb testing.TB) Network { + var n Network + if PreferMemNetwork() { + n = MemNetwork() + } else { + n = RealNetwork() + } + + detectLeaks := PreferMemNetwork() || !testenv.InParallelTest(tb) + if detectLeaks { + tb.Cleanup(func() { + // TODO: leak detection, making sure no connections + // remain at the end of the test. For real network, + // snapshot conns in pid table before & after. + }) + } + return n +} + +// RealNetwork returns a Network implementation that uses the real +// net package. +func RealNetwork() Network { return realNetwork{} } + +// realNetwork implements [Network] using the real net package. +type realNetwork struct{} + +func (realNetwork) Listen(network, address string) (net.Listener, error) { + return net.Listen(network, address) +} + +func (realNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, address) +} + +func (realNetwork) NewLocalTCPListener() net.Listener { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + if ln, err = net.Listen("tcp6", "[::1]:0"); err != nil { + panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) + } + } + return ln +} + +// MemNetwork returns a Network implementation that uses an in-memory +// network for testing. It is only suitable for tests that do not +// require real network access. +func MemNetwork() Network { return &memNetwork{} } + +// memNetwork implements [Network] using an in-memory network. +type memNetwork struct { + mu sync.Mutex + lns map[string]*memnet.Listener // address -> listener +} + +func (m *memNetwork) Listen(network, address string) (net.Listener, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, fmt.Errorf("memNetwork: Listen called with unsupported network %q", network) + } + ap, err := netip.ParseAddrPort(address) + if err != nil { + return nil, fmt.Errorf("memNetwork: Listen called with invalid address %q: %w", address, err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + if m.lns == nil { + m.lns = make(map[string]*memnet.Listener) + } + port := ap.Port() + for { + if port == 0 { + port = 33000 + } + key := net.JoinHostPort(ap.Addr().String(), fmt.Sprint(port)) + _, ok := m.lns[key] + if ok { + if ap.Port() != 0 { + return nil, fmt.Errorf("memNetwork: Listen called with duplicate address %q", address) + } + port++ + continue + } + ln := memnet.Listen(key) + m.lns[key] = ln + return ln, nil + } +} + +func (m *memNetwork) NewLocalTCPListener() net.Listener { + ln, err := m.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(fmt.Sprintf("memNetwork: failed to create local TCP listener: %v", err)) + } + return ln +} + +func (m *memNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, fmt.Errorf("memNetwork: Dial called with unsupported network %q", network) + } + m.mu.Lock() + ln, ok := m.lns[address] + m.mu.Unlock() + if !ok { + return nil, fmt.Errorf("memNetwork: Dial called on unknown address %q", address) + } + return ln.Dial(ctx, network, address) +} + +// NewHTTPServer starts and returns a new [httptest.Server]. +// The caller should call Close when finished, to shut it down. +func NewHTTPServer(net Network, handler http.Handler) *httptest.Server { + ts := NewUnstartedHTTPServer(net, handler) + ts.Start() + return ts +} + +// NewUnstartedHTTPServer returns a new [httptest.Server] but doesn't start it. +// +// After changing its configuration, the caller should call Start or +// StartTLS. +// +// The caller should call Close when finished, to shut it down. +func NewUnstartedHTTPServer(nw Network, handler http.Handler) *httptest.Server { + s := &httptest.Server{ + Config: &http.Server{Handler: handler}, + } + ln := nw.NewLocalTCPListener() + s.Listener = &listenerOnAddrOnce{ + Listener: ln, + fn: func() { + c := s.Client() + if c == nil { + // This httptest.Server.Start initialization order has been true + // for over 10 years. Let's keep counting on it. + panic("httptest.Server: Client not initialized before Addr called") + } + if c.Transport == nil { + c.Transport = &http.Transport{} + } + tr := c.Transport.(*http.Transport) + if tr.Dial != nil || tr.DialContext != nil { + panic("unexpected non-nil Dial or DialContext in httptest.Server.Client.Transport") + } + tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return nw.Dial(ctx, network, addr) + } + }, + } + return s +} + +// listenerOnAddrOnce is a net.Listener that wraps another net.Listener +// and calls a function the first time its Addr is called. +type listenerOnAddrOnce struct { + net.Listener + once sync.Once + fn func() +} + +func (ln *listenerOnAddrOnce) Addr() net.Addr { + ln.once.Do(func() { + ln.fn() + }) + return ln.Listener.Addr() +} diff --git a/util/testenv/testenv.go b/util/testenv/testenv.go index 3e23baef4..aa6660411 100644 --- a/util/testenv/testenv.go +++ b/util/testenv/testenv.go @@ -58,3 +58,10 @@ func InParallelTest(t TB) (isParallel bool) { t.Chdir(".") // panics in a t.Parallel test return false } + +// AssertInTest panics if called outside of a test binary. +func AssertInTest() { + if !InTest() { + panic("func called outside of test binary") + } +} From ead6a72e45fd77f4d805ae3251c4f421ae0ee688 Mon Sep 17 00:00:00 2001 From: Craig Hesling Date: Wed, 2 Apr 2025 21:20:40 -0700 Subject: [PATCH 0700/1708] drive: fix minor typos in comments Signed-off-by: Craig Hesling --- drive/driveimpl/dirfs/dirfs.go | 2 +- drive/driveimpl/fileserver.go | 2 +- drive/remote_permissions.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drive/driveimpl/dirfs/dirfs.go b/drive/driveimpl/dirfs/dirfs.go index c1f28bb9d..50a3330a9 100644 --- a/drive/driveimpl/dirfs/dirfs.go +++ b/drive/driveimpl/dirfs/dirfs.go @@ -44,7 +44,7 @@ func (c *Child) isAvailable() bool { // Any attempts to perform operations on paths inside of children will result // in a panic, as these are not expected to be performed on this FS. // -// An FS an optionally have a StaticRoot, which will insert a folder with that +// An FS can optionally have a StaticRoot, which will insert a folder with that // StaticRoot into the tree, like this: // // -- diff --git a/drive/driveimpl/fileserver.go b/drive/driveimpl/fileserver.go index 0067c1cc7..ef94b0643 100644 --- a/drive/driveimpl/fileserver.go +++ b/drive/driveimpl/fileserver.go @@ -61,7 +61,7 @@ func NewFileServer() (*FileServer, error) { }, nil } -// generateSecretToken generates a hex-encoded 256 bit secet. +// generateSecretToken generates a hex-encoded 256 bit secret. func generateSecretToken() (string, error) { tokenBytes := make([]byte, 32) _, err := rand.Read(tokenBytes) diff --git a/drive/remote_permissions.go b/drive/remote_permissions.go index d3d41c6ec..420eff9a0 100644 --- a/drive/remote_permissions.go +++ b/drive/remote_permissions.go @@ -32,7 +32,7 @@ type grant struct { Access string } -// ParsePermissions builds a Permissions map from a lis of raw grants. +// ParsePermissions builds a Permissions map from a list of raw grants. func ParsePermissions(rawGrants [][]byte) (Permissions, error) { permissions := make(Permissions) for _, rawGrant := range rawGrants { From c29b6c288ab1fb538c66428765fa82d5a60d3db6 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 7 Apr 2025 15:02:53 +0000 Subject: [PATCH 0701/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 14 +++++++------- licenses/tailscale.md | 12 +++--------- licenses/windows.md | 15 ++++++++------- 3 files changed, 18 insertions(+), 23 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 814df22da..5a017076e 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -45,9 +45,9 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) @@ -61,7 +61,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) @@ -71,9 +71,9 @@ See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index b3095f5b4..206734fb4 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -34,7 +34,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) @@ -46,7 +45,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) @@ -63,8 +61,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) @@ -77,16 +73,14 @@ Some packages may only be included on certain architectures or operating systems - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.12.0/LICENSE)) + - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.14.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) @@ -95,7 +89,7 @@ Some packages may only be included on certain architectures or operating systems - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index bdf965051..e47bc3227 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -38,6 +38,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gregjones/httpcache](https://pkg.go.dev/github.com/gregjones/httpcache) ([MIT](https://github.com/gregjones/httpcache/blob/901d90724c79/LICENSE.txt)) @@ -45,9 +46,9 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) @@ -60,7 +61,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.55.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) + - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/ec1d1c113d33/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/b2c15a420186/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) @@ -75,9 +76,9 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) From 161a8ea0a1aa37221e8fc61b6fcd78c8b4bc1e80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 12:25:30 +0000 Subject: [PATCH 0702/1708] .github: Bump actions/cache from 4.2.2 to 4.2.3 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.2 to 4.2.3. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/d4323d4df104b026a6aa633fdb11d772146be0bf...5a3ec84eff668545956fd18022155c47e93e2684) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.2.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7df01690b..666bd2962 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -79,7 +79,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -157,7 +157,7 @@ jobs: cache: false - name: Restore Cache - uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -263,7 +263,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -333,7 +333,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -386,7 +386,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache From dd07cb9b1bfd7533b62968aab0d1de101ad19f23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 12:23:58 +0000 Subject: [PATCH 0703/1708] .github: Bump github/codeql-action from 3.28.13 to 3.28.14 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.13 to 3.28.14. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b549b9259bda1cb5ddde3b41741a82a2d15a841...fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.14 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a1c3bd3c0..c1d0936e7 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/init@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/autobuild@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/analyze@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 From 0655dd7b3da74697e190d67e95c6dbef5ad01060 Mon Sep 17 00:00:00 2001 From: Esteban-Bermudez Date: Mon, 31 Mar 2025 12:12:45 -0700 Subject: [PATCH 0704/1708] client/local: fix path with delete profile request This fixes a bug in the local client where the DELETE request was not being sent correctly. The route was missing a slash before the url and this now matches the switch profile function. Signed-off-by: Esteban-Bermudez --- client/local/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/local/local.go b/client/local/local.go index 5312c1d0a..8953b8ee6 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1504,7 +1504,7 @@ func (lc *Client) SwitchProfile(ctx context.Context, profile ipn.ProfileID) erro // If the profile is the current profile, an empty profile // will be selected as if SwitchToEmptyProfile was called. func (lc *Client) DeleteProfile(ctx context.Context, profile ipn.ProfileID) error { - _, err := lc.send(ctx, "DELETE", "/localapi/v0/profiles"+url.PathEscape(string(profile)), http.StatusNoContent, nil) + _, err := lc.send(ctx, "DELETE", "/localapi/v0/profiles/"+url.PathEscape(string(profile)), http.StatusNoContent, nil) return err } From ad2b075d4f412fa473bcf0ccbbf0d49081570237 Mon Sep 17 00:00:00 2001 From: phanirithvij Date: Thu, 27 Mar 2025 17:32:25 +0530 Subject: [PATCH 0705/1708] cmd/nardump: support symlinks, add basic test Signed-off-by: phanirithvij --- cmd/nardump/nardump.go | 26 +++++++++++++++---- cmd/nardump/nardump_test.go | 52 +++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 5 deletions(-) create mode 100644 cmd/nardump/nardump_test.go diff --git a/cmd/nardump/nardump.go b/cmd/nardump/nardump.go index 05be7b65a..f8947b02b 100644 --- a/cmd/nardump/nardump.go +++ b/cmd/nardump/nardump.go @@ -100,14 +100,13 @@ func (nw *narWriter) writeDir(dirPath string) error { sub := path.Join(dirPath, ent.Name()) var err error switch { - case mode.IsRegular(): - err = nw.writeRegular(sub) case mode.IsDir(): err = nw.writeDir(sub) + case mode.IsRegular(): + err = nw.writeRegular(sub) + case mode&os.ModeSymlink != 0: + err = nw.writeSymlink(sub) default: - // TODO(bradfitz): symlink, but requires fighting io/fs a bit - // to get at Readlink or the osFS via fs. But for now - // we don't need symlinks because they're not in Go's archive. return fmt.Errorf("unsupported file type %v at %q", sub, mode) } if err != nil { @@ -143,6 +142,23 @@ func (nw *narWriter) writeRegular(path string) error { return nil } +func (nw *narWriter) writeSymlink(path string) error { + nw.str("(") + nw.str("type") + nw.str("symlink") + nw.str("target") + // broken symlinks are valid in a nar + // given we do os.chdir(dir) and os.dirfs(".") above + // readlink now resolves relative links even if they are broken + link, err := os.Readlink(path) + if err != nil { + return err + } + nw.str(link) + nw.str(")") + return nil +} + func (nw *narWriter) str(s string) { if err := writeString(nw.w, s); err != nil { panic(writeNARError{err}) diff --git a/cmd/nardump/nardump_test.go b/cmd/nardump/nardump_test.go new file mode 100644 index 000000000..3b87e7962 --- /dev/null +++ b/cmd/nardump/nardump_test.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "crypto/sha256" + "fmt" + "os" + "runtime" + "testing" +) + +// setupTmpdir sets up a known golden layout, covering all allowed file/folder types in a nar +func setupTmpdir(t *testing.T) string { + tmpdir := t.TempDir() + pwd, _ := os.Getwd() + os.Chdir(tmpdir) + defer os.Chdir(pwd) + os.MkdirAll("sub/dir", 0755) + os.Symlink("brokenfile", "brokenlink") + os.Symlink("sub/dir", "dirl") + os.Symlink("/abs/nonexistentdir", "dirb") + os.Create("sub/dir/file1") + f, _ := os.Create("file2m") + _ = f.Truncate(2 * 1024 * 1024) + f.Close() + os.Symlink("../file2m", "sub/goodlink") + return tmpdir +} + +func TestWriteNar(t *testing.T) { + if runtime.GOOS == "windows" { + // Skip test on Windows as the Nix package manager is not supported on this platform + t.Skip("nix package manager is not available on Windows") + } + dir := setupTmpdir(t) + t.Run("nar", func(t *testing.T) { + // obtained via `nix-store --dump /tmp/... | sha256sum` of the above test dir + expected := "727613a36f41030e93a4abf2649c3ec64a2757ccff364e3f6f7d544eb976e442" + h := sha256.New() + os.Chdir(dir) + err := writeNAR(h, os.DirFS(".")) + if err != nil { + t.Fatal(err) + } + hash := fmt.Sprintf("%x", h.Sum(nil)) + if expected != hash { + t.Fatal("sha256sum of nar not matched", hash, expected) + } + }) +} From 03b47a55c7956d872f7e3d54ca5c868e571517ff Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 07:39:52 -0700 Subject: [PATCH 0706/1708] tstest/nettest: pull the non-test Network abstraction out to netx package We want to be able to use the netx.Network (and RealNetwork implemementation) outside of tests, without linking "testing". So split out the non-test stuff of nettest into its own package. We tend to use "foox" as the convention for things we wish were in the standard library's foo package, so "netx" seems consistent. Updates tailscale/corp#27636 Change-Id: I1911d361f4fbdf189837bf629a20f2ebfa863c44 Signed-off-by: Brad Fitzpatrick --- net/netx/netx.go | 120 ++++++++++++++++++++++++++++++++++++++ tstest/nettest/nettest.go | 117 +++---------------------------------- 2 files changed, 127 insertions(+), 110 deletions(-) create mode 100644 net/netx/netx.go diff --git a/net/netx/netx.go b/net/netx/netx.go new file mode 100644 index 000000000..0be277a15 --- /dev/null +++ b/net/netx/netx.go @@ -0,0 +1,120 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package netx contains the Network type to abstract over either a real +// network or a virtual network for testing. +package netx + +import ( + "context" + "fmt" + "net" + "net/netip" + "sync" + + "tailscale.com/net/memnet" +) + +// Network describes a network that can listen and dial. The two common +// implementations are [RealNetwork], using the net package to use the real +// network, or [MemNetwork], using an in-memory network (typically for testing) +type Network interface { + NewLocalTCPListener() net.Listener + Listen(network, address string) (net.Listener, error) + Dial(ctx context.Context, network, address string) (net.Conn, error) +} + +// RealNetwork returns a Network implementation that uses the real +// net package. +func RealNetwork() Network { return realNetwork{} } + +// realNetwork implements [Network] using the real net package. +type realNetwork struct{} + +func (realNetwork) Listen(network, address string) (net.Listener, error) { + return net.Listen(network, address) +} + +func (realNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, address) +} + +func (realNetwork) NewLocalTCPListener() net.Listener { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + if ln, err = net.Listen("tcp6", "[::1]:0"); err != nil { + panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) + } + } + return ln +} + +// MemNetwork returns a Network implementation that uses an in-memory +// network for testing. It is only suitable for tests that do not +// require real network access. +// +// As of 2025-04-08, it only supports TCP. +func MemNetwork() Network { return &memNetwork{} } + +// memNetwork implements [Network] using an in-memory network. +type memNetwork struct { + mu sync.Mutex + lns map[string]*memnet.Listener // address -> listener +} + +func (m *memNetwork) Listen(network, address string) (net.Listener, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, fmt.Errorf("memNetwork: Listen called with unsupported network %q", network) + } + ap, err := netip.ParseAddrPort(address) + if err != nil { + return nil, fmt.Errorf("memNetwork: Listen called with invalid address %q: %w", address, err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + if m.lns == nil { + m.lns = make(map[string]*memnet.Listener) + } + port := ap.Port() + for { + if port == 0 { + port = 33000 + } + key := net.JoinHostPort(ap.Addr().String(), fmt.Sprint(port)) + _, ok := m.lns[key] + if ok { + if ap.Port() != 0 { + return nil, fmt.Errorf("memNetwork: Listen called with duplicate address %q", address) + } + port++ + continue + } + ln := memnet.Listen(key) + m.lns[key] = ln + return ln, nil + } +} + +func (m *memNetwork) NewLocalTCPListener() net.Listener { + ln, err := m.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(fmt.Sprintf("memNetwork: failed to create local TCP listener: %v", err)) + } + return ln +} + +func (m *memNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, fmt.Errorf("memNetwork: Dial called with unsupported network %q", network) + } + m.mu.Lock() + ln, ok := m.lns[address] + m.mu.Unlock() + if !ok { + return nil, fmt.Errorf("memNetwork: Dial called on unknown address %q", address) + } + return ln.Dial(ctx, network, address) +} diff --git a/tstest/nettest/nettest.go b/tstest/nettest/nettest.go index f03d6987b..98662fe39 100644 --- a/tstest/nettest/nettest.go +++ b/tstest/nettest/nettest.go @@ -8,16 +8,14 @@ package nettest import ( "context" "flag" - "fmt" "net" "net/http" "net/http/httptest" - "net/netip" "sync" "testing" - "tailscale.com/net/memnet" "tailscale.com/net/netmon" + "tailscale.com/net/netx" "tailscale.com/util/testenv" ) @@ -32,14 +30,6 @@ func SkipIfNoNetwork(t testing.TB) { } } -// Network is an interface for use in tests that describes either [RealNetwork] -// or [MemNetwork]. -type Network interface { - NewLocalTCPListener() net.Listener - Listen(network, address string) (net.Listener, error) - Dial(ctx context.Context, network, address string) (net.Conn, error) -} - // PreferMemNetwork reports whether the --use-test-memnet flag is set. func PreferMemNetwork() bool { return *useMemNet @@ -49,12 +39,12 @@ func PreferMemNetwork() bool { // whether the --use-test-memnet flag is set. // // Each call generates a new network. -func GetNetwork(tb testing.TB) Network { - var n Network +func GetNetwork(tb testing.TB) netx.Network { + var n netx.Network if PreferMemNetwork() { - n = MemNetwork() + n = netx.MemNetwork() } else { - n = RealNetwork() + n = netx.RealNetwork() } detectLeaks := PreferMemNetwork() || !testenv.InParallelTest(tb) @@ -68,102 +58,9 @@ func GetNetwork(tb testing.TB) Network { return n } -// RealNetwork returns a Network implementation that uses the real -// net package. -func RealNetwork() Network { return realNetwork{} } - -// realNetwork implements [Network] using the real net package. -type realNetwork struct{} - -func (realNetwork) Listen(network, address string) (net.Listener, error) { - return net.Listen(network, address) -} - -func (realNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, network, address) -} - -func (realNetwork) NewLocalTCPListener() net.Listener { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - if ln, err = net.Listen("tcp6", "[::1]:0"); err != nil { - panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) - } - } - return ln -} - -// MemNetwork returns a Network implementation that uses an in-memory -// network for testing. It is only suitable for tests that do not -// require real network access. -func MemNetwork() Network { return &memNetwork{} } - -// memNetwork implements [Network] using an in-memory network. -type memNetwork struct { - mu sync.Mutex - lns map[string]*memnet.Listener // address -> listener -} - -func (m *memNetwork) Listen(network, address string) (net.Listener, error) { - if network != "tcp" && network != "tcp4" && network != "tcp6" { - return nil, fmt.Errorf("memNetwork: Listen called with unsupported network %q", network) - } - ap, err := netip.ParseAddrPort(address) - if err != nil { - return nil, fmt.Errorf("memNetwork: Listen called with invalid address %q: %w", address, err) - } - - m.mu.Lock() - defer m.mu.Unlock() - - if m.lns == nil { - m.lns = make(map[string]*memnet.Listener) - } - port := ap.Port() - for { - if port == 0 { - port = 33000 - } - key := net.JoinHostPort(ap.Addr().String(), fmt.Sprint(port)) - _, ok := m.lns[key] - if ok { - if ap.Port() != 0 { - return nil, fmt.Errorf("memNetwork: Listen called with duplicate address %q", address) - } - port++ - continue - } - ln := memnet.Listen(key) - m.lns[key] = ln - return ln, nil - } -} - -func (m *memNetwork) NewLocalTCPListener() net.Listener { - ln, err := m.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(fmt.Sprintf("memNetwork: failed to create local TCP listener: %v", err)) - } - return ln -} - -func (m *memNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { - if network != "tcp" && network != "tcp4" && network != "tcp6" { - return nil, fmt.Errorf("memNetwork: Dial called with unsupported network %q", network) - } - m.mu.Lock() - ln, ok := m.lns[address] - m.mu.Unlock() - if !ok { - return nil, fmt.Errorf("memNetwork: Dial called on unknown address %q", address) - } - return ln.Dial(ctx, network, address) -} - // NewHTTPServer starts and returns a new [httptest.Server]. // The caller should call Close when finished, to shut it down. -func NewHTTPServer(net Network, handler http.Handler) *httptest.Server { +func NewHTTPServer(net netx.Network, handler http.Handler) *httptest.Server { ts := NewUnstartedHTTPServer(net, handler) ts.Start() return ts @@ -175,7 +72,7 @@ func NewHTTPServer(net Network, handler http.Handler) *httptest.Server { // StartTLS. // // The caller should call Close when finished, to shut it down. -func NewUnstartedHTTPServer(nw Network, handler http.Handler) *httptest.Server { +func NewUnstartedHTTPServer(nw netx.Network, handler http.Handler) *httptest.Server { s := &httptest.Server{ Config: &http.Server{Handler: handler}, } From 265c76dbc5469e852277ef6f1bc691c7895e6e58 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 07:49:28 -0700 Subject: [PATCH 0707/1708] all: unify some redundant testing.TB interface copies I added yet another one in 6d117d64a256234 but that new one is at the best place int he dependency graph and has the best name, so let's use that one for everything possible. types/lazy can't use it for circular dependency reasons, so unexport that copy at least. Updates #cleanup Change-Id: I25db6b6a0d81dbb8e89a0a9080c7f15cbf7aa770 Signed-off-by: Brad Fitzpatrick --- cmd/stund/depaware.txt | 3 ++- types/lazy/lazy.go | 8 +++++--- types/logger/logger.go | 9 ++------- util/syspolicy/handler.go | 8 ++------ util/syspolicy/internal/internal.go | 15 ++------------- util/syspolicy/internal/loggerx/logger.go | 4 ++-- util/syspolicy/internal/metrics/metrics.go | 2 +- util/syspolicy/internal/metrics/test_handler.go | 5 +++-- util/syspolicy/policy_keys.go | 2 +- util/syspolicy/rsop/resultant_policy.go | 4 ++-- util/syspolicy/rsop/store_registration.go | 4 ++-- util/syspolicy/setting/setting.go | 3 ++- util/syspolicy/source/test_store.go | 8 ++++---- util/syspolicy/syspolicy.go | 3 ++- util/syspolicy/syspolicy_test.go | 3 ++- util/syspolicy/syspolicy_windows.go | 2 +- 16 files changed, 35 insertions(+), 48 deletions(-) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 2326e3a24..6168e1582 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -80,6 +80,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/slicesx from tailscale.com/tailcfg + tailscale.com/util/testenv from tailscale.com/types/logger tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob @@ -186,7 +187,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar encoding/pem from crypto/tls+ errors from bufio+ expvar from github.com/prometheus/client_golang/prometheus+ - flag from tailscale.com/cmd/stund + flag from tailscale.com/cmd/stund+ fmt from compress/flate+ go/token from google.golang.org/protobuf/internal/strs hash from crypto+ diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index c29a03db4..f5d7be494 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -120,9 +120,9 @@ func (z *SyncValue[T]) PeekErr() (v T, err error, ok bool) { return zero, nil, false } -// TB is a subset of testing.TB that we use to set up test helpers. +// testing_TB is a subset of testing.TB that we use to set up test helpers. // It's defined here to avoid pulling in the testing package. -type TB interface { +type testing_TB interface { Helper() Cleanup(func()) } @@ -132,7 +132,9 @@ type TB interface { // subtests complete. // It is not safe for concurrent use and must not be called concurrently with // any SyncValue methods, including another call to itself. -func (z *SyncValue[T]) SetForTest(tb TB, val T, err error) { +// +// The provided tb should be a [*testing.T] or [*testing.B]. +func (z *SyncValue[T]) SetForTest(tb testing_TB, val T, err error) { tb.Helper() oldErr, oldVal := z.err.Load(), z.v diff --git a/types/logger/logger.go b/types/logger/logger.go index 66b989480..aeced352e 100644 --- a/types/logger/logger.go +++ b/types/logger/logger.go @@ -24,6 +24,7 @@ import ( "go4.org/mem" "tailscale.com/envknob" "tailscale.com/util/ctxkey" + "tailscale.com/util/testenv" ) // Logf is the basic Tailscale logger type: a printf-like func. @@ -384,16 +385,10 @@ func (a asJSONResult) Format(s fmt.State, verb rune) { s.Write(v) } -// TBLogger is the testing.TB subset needed by TestLogger. -type TBLogger interface { - Helper() - Logf(format string, args ...any) -} - // TestLogger returns a logger that logs to tb.Logf // with a prefix to make it easier to distinguish spam // from explicit test failures. -func TestLogger(tb TBLogger) Logf { +func TestLogger(tb testenv.TB) Logf { return func(format string, args ...any) { tb.Helper() tb.Logf(" ... "+format, args...) diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go index f511f0a56..c4bfd9de9 100644 --- a/util/syspolicy/handler.go +++ b/util/syspolicy/handler.go @@ -4,10 +4,10 @@ package syspolicy import ( - "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" + "tailscale.com/util/testenv" ) // TODO(nickkhyl): delete this file once other repos are updated. @@ -38,15 +38,11 @@ func RegisterHandler(h Handler) { rsop.RegisterStore("DeviceHandler", setting.DeviceScope, WrapHandler(h)) } -// TB is a subset of testing.TB that we use to set up test helpers. -// It's defined here to avoid pulling in the testing package. -type TB = internal.TB - // SetHandlerForTest wraps and sets the specified handler as the device's policy // [source.Store] for the duration of tb. // // Deprecated: using [MustRegisterStoreForTest] should be preferred. -func SetHandlerForTest(tb TB, h Handler) { +func SetHandlerForTest(tb testenv.TB, h Handler) { RegisterWellKnownSettingsForTest(tb) MustRegisterStoreForTest(tb, "DeviceHandler-TestOnly", setting.DefaultScope(), WrapHandler(h)) } diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go index 2e1737e5b..6ab147de6 100644 --- a/util/syspolicy/internal/internal.go +++ b/util/syspolicy/internal/internal.go @@ -10,6 +10,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/lazy" + "tailscale.com/util/testenv" "tailscale.com/version" ) @@ -25,22 +26,10 @@ func OS() string { return OSForTesting.Get(version.OS) } -// TB is a subset of testing.TB that we use to set up test helpers. -// It's defined here to avoid pulling in the testing package. -type TB interface { - Helper() - Cleanup(func()) - Logf(format string, args ...any) - Error(args ...any) - Errorf(format string, args ...any) - Fatal(args ...any) - Fatalf(format string, args ...any) -} - // EqualJSONForTest compares the JSON in j1 and j2 for semantic equality. // It returns "", "", true if j1 and j2 are equal. Otherwise, it returns // indented versions of j1 and j2 and false. -func EqualJSONForTest(tb TB, j1, j2 jsontext.Value) (s1, s2 string, equal bool) { +func EqualJSONForTest(tb testenv.TB, j1, j2 jsontext.Value) (s1, s2 string, equal bool) { tb.Helper() j1 = j1.Clone() j2 = j2.Clone() diff --git a/util/syspolicy/internal/loggerx/logger.go b/util/syspolicy/internal/loggerx/logger.go index c29a5f084..d1f48cbb4 100644 --- a/util/syspolicy/internal/loggerx/logger.go +++ b/util/syspolicy/internal/loggerx/logger.go @@ -10,7 +10,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/testenv" ) const ( @@ -58,7 +58,7 @@ func verbosef(format string, args ...any) { // SetForTest sets the specified printf and verbosef functions for the duration // of tb and its subtests. -func SetForTest(tb internal.TB, printf, verbosef logger.Logf) { +func SetForTest(tb testenv.TB, printf, verbosef logger.Logf) { lazyPrintf.SetForTest(tb, printf, nil) lazyVerbosef.SetForTest(tb, verbosef, nil) } diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 770a34d29..43f2a285a 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -259,7 +259,7 @@ var addMetricTestHook, setMetricTestHook syncs.AtomicValue[metricFn] // SetHooksForTest sets the specified addMetric and setMetric functions // as the metric functions for the duration of tb and all its subtests. -func SetHooksForTest(tb internal.TB, addMetric, setMetric metricFn) { +func SetHooksForTest(tb testenv.TB, addMetric, setMetric metricFn) { oldAddMetric := addMetricTestHook.Swap(addMetric) oldSetMetric := setMetricTestHook.Swap(setMetric) tb.Cleanup(func() { diff --git a/util/syspolicy/internal/metrics/test_handler.go b/util/syspolicy/internal/metrics/test_handler.go index f9e484609..36c3f2cad 100644 --- a/util/syspolicy/internal/metrics/test_handler.go +++ b/util/syspolicy/internal/metrics/test_handler.go @@ -9,6 +9,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/testenv" ) // TestState represents a metric name and its expected value. @@ -19,13 +20,13 @@ type TestState struct { // TestHandler facilitates testing of the code that uses metrics. type TestHandler struct { - t internal.TB + t testenv.TB m map[string]int64 } // NewTestHandler returns a new TestHandler. -func NewTestHandler(t internal.TB) *TestHandler { +func NewTestHandler(t testenv.TB) *TestHandler { return &TestHandler{t, make(map[string]int64)} } diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index a81c1e5d5..8da0e0cc8 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -239,7 +239,7 @@ func WellKnownSettingDefinition(k Key) (*setting.Definition, error) { // RegisterWellKnownSettingsForTest registers all implicit setting definitions // for the duration of the test. -func RegisterWellKnownSettingsForTest(tb TB) { +func RegisterWellKnownSettingsForTest(tb testenv.TB) { tb.Helper() err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) if err != nil { diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go index b811a00ee..297d26f9f 100644 --- a/util/syspolicy/rsop/resultant_policy.go +++ b/util/syspolicy/rsop/resultant_policy.go @@ -11,9 +11,9 @@ import ( "sync/atomic" "time" - "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/testenv" "tailscale.com/util/syspolicy/source" ) @@ -449,7 +449,7 @@ func (p *Policy) Close() { } } -func setForTest[T any](tb internal.TB, target *T, newValue T) { +func setForTest[T any](tb testenv.TB, target *T, newValue T) { oldValue := *target tb.Cleanup(func() { *target = oldValue }) *target = newValue diff --git a/util/syspolicy/rsop/store_registration.go b/util/syspolicy/rsop/store_registration.go index f9836846e..a7c354b6d 100644 --- a/util/syspolicy/rsop/store_registration.go +++ b/util/syspolicy/rsop/store_registration.go @@ -9,9 +9,9 @@ import ( "sync/atomic" "time" - "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" + "tailscale.com/util/testenv" ) // ErrAlreadyConsumed is the error returned when [StoreRegistration.ReplaceStore] @@ -33,7 +33,7 @@ func RegisterStore(name string, scope setting.PolicyScope, store source.Store) ( // RegisterStoreForTest is like [RegisterStore], but unregisters the store when // tb and all its subtests complete. -func RegisterStoreForTest(tb internal.TB, name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) { +func RegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) { setForTest(tb, &policyReloadMinDelay, 10*time.Millisecond) setForTest(tb, &policyReloadMaxDelay, 500*time.Millisecond) diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 70fb0a931..13c7a2a5f 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -16,6 +16,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/testenv" ) // Scope indicates the broadest scope at which a policy setting may apply, @@ -277,7 +278,7 @@ func DefinitionMapOf(settings []*Definition) (DefinitionMap, error) { // for the test duration. It is not concurrency-safe, but unlike [Register], // it does not panic and can be called anytime. // It returns an error if ds contains two different settings with the same [Key]. -func SetDefinitionsForTest(tb lazy.TB, ds ...*Definition) error { +func SetDefinitionsForTest(tb testenv.TB, ds ...*Definition) error { m, err := DefinitionMapOf(ds) if err != nil { return err diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index e6c09d6b0..4b175611f 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -12,8 +12,8 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/slicesx" - "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/testenv" ) var ( @@ -79,7 +79,7 @@ func (r TestExpectedReads) operation() testReadOperation { // TestStore is a [Store] that can be used in tests. type TestStore struct { - tb internal.TB + tb testenv.TB done chan struct{} @@ -98,7 +98,7 @@ type TestStore struct { // NewTestStore returns a new [TestStore]. // The tb will be used to report coding errors detected by the [TestStore]. -func NewTestStore(tb internal.TB) *TestStore { +func NewTestStore(tb testenv.TB) *TestStore { m := make(map[setting.Key]any) store := &TestStore{ tb: tb, @@ -112,7 +112,7 @@ func NewTestStore(tb internal.TB) *TestStore { // NewTestStoreOf is a shorthand for [NewTestStore] followed by [TestStore.SetBooleans], // [TestStore.SetUInt64s], [TestStore.SetStrings] or [TestStore.SetStringLists]. -func NewTestStoreOf[T TestValueType](tb internal.TB, settings ...TestSetting[T]) *TestStore { +func NewTestStoreOf[T TestValueType](tb testenv.TB, settings ...TestSetting[T]) *TestStore { store := NewTestStore(tb) switch settings := any(settings).(type) { case []TestSetting[bool]: diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index d925731c3..5d5a283fb 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -20,6 +20,7 @@ import ( "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" + "tailscale.com/util/testenv" ) var ( @@ -46,7 +47,7 @@ func RegisterStore(name string, scope setting.PolicyScope, store source.Store) ( } // MustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. -func MustRegisterStoreForTest(tb TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { +func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { tb.Helper() reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) if err != nil { diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index a70a49d39..fc01f3645 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" + "tailscale.com/util/testenv" ) var someOtherError = errors.New("error other than not found") @@ -596,7 +597,7 @@ func TestGetStringArray(t *testing.T) { } } -func registerSingleSettingStoreForTest[T source.TestValueType](tb TB, s source.TestSetting[T]) { +func registerSingleSettingStoreForTest[T source.TestValueType](tb testenv.TB, s source.TestSetting[T]) { policyStore := source.NewTestStoreOf(tb, s) MustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) } diff --git a/util/syspolicy/syspolicy_windows.go b/util/syspolicy/syspolicy_windows.go index 9d57e249e..ca0fd329a 100644 --- a/util/syspolicy/syspolicy_windows.go +++ b/util/syspolicy/syspolicy_windows.go @@ -43,7 +43,7 @@ func init() { // configureSyspolicy configures syspolicy for use on Windows, // either in test or regular builds depending on whether tb has a non-nil value. -func configureSyspolicy(tb internal.TB) error { +func configureSyspolicy(tb testenv.TB) error { const localSystemSID = "S-1-5-18" // Always create and register a machine policy store that reads // policy settings from the HKEY_LOCAL_MACHINE registry hive. From 5ed53c7e39ac4818e46f6d613374143732d36833 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 09:07:31 -0700 Subject: [PATCH 0708/1708] words: C what I did there? Updates #words Change-Id: Id025ea5d1856d2ba13fda7549673c7c1712d7213 Signed-off-by: Brad Fitzpatrick --- words/scales.txt | 2 ++ words/tails.txt | 2 ++ 2 files changed, 4 insertions(+) diff --git a/words/scales.txt b/words/scales.txt index fb19cb88d..532734f6d 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -440,3 +440,5 @@ sidemirror wage salary fujita +caiman +cichlid diff --git a/words/tails.txt b/words/tails.txt index 5b93bdd96..7e35c6970 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -720,3 +720,5 @@ mining coat follow stalk +caudal +chronicle From b95df54b0667b2f1953870896dff546209240288 Mon Sep 17 00:00:00 2001 From: Chatnoir Miki Date: Wed, 9 Apr 2025 00:38:48 +0800 Subject: [PATCH 0709/1708] nix: update nix and use go 1.24 (#15578) Updates #15015 Signed-off-by: Chatnoir Miki --- flake.lock | 6 +++--- flake.nix | 8 ++++---- go.mod.sri | 2 +- shell.nix | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 8c4aa7dfc..05b0f303e 100644 --- a/flake.lock +++ b/flake.lock @@ -36,11 +36,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1724748588, - "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", + "lastModified": 1743938762, + "narHash": "sha256-UgFYn8sGv9B8PoFpUfCa43CjMZBl1x/ShQhRDHBFQdI=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", + "rev": "74a40410369a1c35ee09b8a1abee6f4acbedc059", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 95d5c3035..2f920bfd4 100644 --- a/flake.nix +++ b/flake.nix @@ -68,14 +68,14 @@ # you're an end user you should be prepared for this flake to not # build periodically. tailscale = pkgs: - pkgs.buildGo123Module rec { + pkgs.buildGo124Module rec { name = "tailscale"; src = ./.; vendorHash = pkgs.lib.fileContents ./go.mod.sri; nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [pkgs.makeWrapper]; ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"]; - CGO_ENABLED = 0; + env.CGO_ENABLED = 0; subPackages = ["cmd/tailscale" "cmd/tailscaled"]; doCheck = false; @@ -118,7 +118,7 @@ gotools graphviz perl - go_1_23 + go_1_24 yarn # qemu and e2fsprogs are needed for natlab @@ -130,4 +130,4 @@ in flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); } -# nix-direnv cache busting line: sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0= +# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= diff --git a/go.mod.sri b/go.mod.sri index 4abb3c516..6c8357e04 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0= +sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= \ No newline at end of file diff --git a/shell.nix b/shell.nix index 4d2e24366..bb8eacb67 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0= +# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= From fb96137d79628db5493603ac2fc67d2a92f6bc01 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 08:32:27 -0700 Subject: [PATCH 0710/1708] net/{netx,memnet},all: add netx.DialFunc, move memnet Network impl This adds netx.DialFunc, unifying a type we have a bazillion other places, giving it now a nice short name that's clickable in editors, etc. That highlighted that my earlier move (03b47a55c7956) of stuff from nettest into netx moved too much: it also dragged along the memnet impl, meaning all users of netx.DialFunc who just wanted netx for the type definition were instead also pulling in all of memnet. So move the memnet implementation netx.Network into memnet, a package we already had. Then use netx.DialFunc in a bunch of places. I'm sure I missed some. And plenty remain in other repos, to be updated later. Updates tailscale/corp#27636 Change-Id: I7296cd4591218e8624e214f8c70dab05fb884e95 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/sniproxy/handlers.go | 3 +- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + control/controlclient/direct.go | 7 +- control/controlhttp/client.go | 3 +- control/controlhttp/constants.go | 3 +- control/controlhttp/http_test.go | 4 +- derp/derphttp/derphttp_client.go | 3 +- k8s-operator/sessionrecording/hijacker.go | 3 +- .../sessionrecording/hijacker_test.go | 4 +- logpolicy/logpolicy.go | 3 +- net/dns/resolver/forwarder.go | 3 +- net/dnscache/dnscache.go | 9 +- net/memnet/memnet.go | 79 +++++++++++++++++ net/netx/netx.go | 87 +++---------------- net/tsdial/tsdial.go | 5 +- sessionrecording/connect.go | 10 +-- tstest/natlab/vnet/vnet.go | 7 +- tstest/nettest/nettest.go | 3 +- wgengine/netstack/netstack.go | 5 +- wgengine/netstack/netstack_test.go | 3 +- 23 files changed, 135 insertions(+), 113 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 5d375a515..085a58383 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -111,6 +111,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 tailscale.com/net/netmon from tailscale.com/derp/derphttp+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp tailscale.com/net/netutil from tailscale.com/client/local + tailscale.com/net/netx from tailscale.com/net/dnscache+ tailscale.com/net/sockstats from tailscale.com/derp/derphttp tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/derper diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7c87649d1..7fd4c4b21 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -866,6 +866,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ diff --git a/cmd/sniproxy/handlers.go b/cmd/sniproxy/handlers.go index 102110fe3..1973eecc0 100644 --- a/cmd/sniproxy/handlers.go +++ b/cmd/sniproxy/handlers.go @@ -14,6 +14,7 @@ import ( "github.com/inetaf/tcpproxy" "tailscale.com/net/netutil" + "tailscale.com/net/netx" ) type tcpRoundRobinHandler struct { @@ -22,7 +23,7 @@ type tcpRoundRobinHandler struct { To []string // DialContext is used to make the outgoing TCP connection. - DialContext func(ctx context.Context, network, address string) (net.Conn, error) + DialContext netx.DialFunc // ReachableIPs enumerates the IP addresses this handler is reachable on. ReachableIPs []netip.Addr diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 431bf7b71..9728a2ff4 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -112,6 +112,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ + tailscale.com/net/netx from tailscale.com/control/controlhttp+ tailscale.com/net/ping from tailscale.com/net/netcheck tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1fbf7caf1..394056295 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -316,6 +316,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 70ebe2f23..c8e885799 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -37,6 +37,7 @@ import ( "tailscale.com/net/dnsfallback" "tailscale.com/net/netmon" "tailscale.com/net/netutil" + "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" "tailscale.com/net/tshttpproxy" @@ -272,7 +273,7 @@ func NewDirect(opts Options) (*Direct, error) { tr.Proxy = tshttpproxy.ProxyFromEnvironment tshttpproxy.SetTransportGetProxyConnectHeader(tr) tr.TLSClientConfig = tlsdial.Config(serverURL.Hostname(), opts.HealthTracker, tr.TLSClientConfig) - var dialFunc dialFunc + var dialFunc netx.DialFunc dialFunc, interceptedDial = makeScreenTimeDetectingDialFunc(opts.Dialer.SystemDial) tr.DialContext = dnscache.Dialer(dialFunc, dnsCache) tr.DialTLSContext = dnscache.TLSDialer(dialFunc, dnsCache, tr.TLSClientConfig) @@ -1749,14 +1750,12 @@ func addLBHeader(req *http.Request, nodeKey key.NodePublic) { } } -type dialFunc = func(ctx context.Context, network, addr string) (net.Conn, error) - // makeScreenTimeDetectingDialFunc returns dialFunc, optionally wrapped (on // Apple systems) with a func that sets the returned atomic.Bool for whether // Screen Time seemed to intercept the connection. // // The returned *atomic.Bool is nil on non-Apple systems. -func makeScreenTimeDetectingDialFunc(dial dialFunc) (dialFunc, *atomic.Bool) { +func makeScreenTimeDetectingDialFunc(dial netx.DialFunc) (netx.DialFunc, *atomic.Bool) { switch runtime.GOOS { case "darwin", "ios": // Continue below. diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 44de6b0df..869bcb599 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -44,6 +44,7 @@ import ( "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/netutil" + "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" "tailscale.com/net/tshttpproxy" @@ -494,7 +495,7 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad dns = a.resolver() } - var dialer dnscache.DialContextFunc + var dialer netx.DialFunc if a.Dialer != nil { dialer = a.Dialer } else { diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 80b3fe64c..12038fae4 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -12,6 +12,7 @@ import ( "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" + "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" @@ -66,7 +67,7 @@ type Dialer struct { // Dialer is the dialer used to make outbound connections. // // If not specified, this defaults to net.Dialer.DialContext. - Dialer dnscache.DialContextFunc + Dialer netx.DialFunc // DNSCache is the caching Resolver used by this Dialer. // diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index aef916ef6..f556640f8 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -26,8 +26,8 @@ import ( "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/health" - "tailscale.com/net/dnscache" "tailscale.com/net/netmon" + "tailscale.com/net/netx" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -760,7 +760,7 @@ func TestDialPlan(t *testing.T) { type closeTrackDialer struct { t testing.TB - inner dnscache.DialContextFunc + inner netx.DialFunc mu sync.Mutex conns map[*closeTrackConn]bool } diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 319c02429..21ee4a671 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -35,6 +35,7 @@ import ( "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" "tailscale.com/net/tshttpproxy" @@ -587,7 +588,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien // // The primary use for this is the derper mesh mode to connect to each // other over a VPC network. -func (c *Client) SetURLDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) { +func (c *Client) SetURLDialer(dialer netx.DialFunc) { c.dialer = dialer } diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 43aa14e61..a9ed65896 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -25,6 +25,7 @@ import ( "tailscale.com/k8s-operator/sessionrecording/spdy" "tailscale.com/k8s-operator/sessionrecording/tsrecorder" "tailscale.com/k8s-operator/sessionrecording/ws" + "tailscale.com/net/netx" "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" @@ -102,7 +103,7 @@ type Hijacker struct { // connection succeeds. In case of success, returns a list with a single // successful recording attempt and an error channel. If the connection errors // after having been established, an error is sent down the channel. -type RecorderDialFn func(context.Context, []netip.AddrPort, sessionrecording.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) +type RecorderDialFn func(context.Context, []netip.AddrPort, netx.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) // Hijack hijacks a 'kubectl exec' session and configures for the session // contents to be sent to a recorder. diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index e166ce63b..880015b22 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -19,7 +19,7 @@ import ( "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" "tailscale.com/k8s-operator/sessionrecording/fakes" - "tailscale.com/sessionrecording" + "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/tstest" @@ -80,7 +80,7 @@ func Test_Hijacker(t *testing.T) { h := &Hijacker{ connectToRecorder: func(context.Context, []netip.AddrPort, - sessionrecording.DialFunc, + netx.DialFunc, ) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) { if tt.failRecorderConnect { err = errors.New("test") diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 11c6bf14c..b005cfff6 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -42,6 +42,7 @@ import ( "tailscale.com/net/netknob" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tshttpproxy" "tailscale.com/paths" @@ -769,7 +770,7 @@ func (p *Policy) Shutdown(ctx context.Context) error { // // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. -func MakeDialFunc(netMon *netmon.Monitor, logf logger.Logf) func(ctx context.Context, netw, addr string) (net.Conn, error) { +func MakeDialFunc(netMon *netmon.Monitor, logf logger.Logf) netx.DialFunc { if netMon == nil { netMon = netmon.NewStatic() } diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c00dea1ae..c7b9439e6 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -31,6 +31,7 @@ import ( "tailscale.com/net/dnscache" "tailscale.com/net/neterror" "tailscale.com/net/netmon" + "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tsdial" "tailscale.com/types/dnstype" @@ -739,7 +740,7 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn return out, nil } -func (f *forwarder) getDialerType() dnscache.DialContextFunc { +func (f *forwarder) getDialerType() netx.DialFunc { if f.controlKnobs != nil && f.controlKnobs.UserDialUseRoutes.Load() { // It is safe to use UserDial as it dials external servers without going through Tailscale // and closes connections on interface change in the same way as SystemDial does, diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index 2cbea6c0f..96550cbb1 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -19,6 +19,7 @@ import ( "time" "tailscale.com/envknob" + "tailscale.com/net/netx" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" "tailscale.com/util/singleflight" @@ -355,10 +356,8 @@ func (r *Resolver) addIPCache(host string, ip, ip6 netip.Addr, allIPs []netip.Ad } } -type DialContextFunc func(ctx context.Context, network, address string) (net.Conn, error) - // Dialer returns a wrapped DialContext func that uses the provided dnsCache. -func Dialer(fwd DialContextFunc, dnsCache *Resolver) DialContextFunc { +func Dialer(fwd netx.DialFunc, dnsCache *Resolver) netx.DialFunc { d := &dialer{ fwd: fwd, dnsCache: dnsCache, @@ -369,7 +368,7 @@ func Dialer(fwd DialContextFunc, dnsCache *Resolver) DialContextFunc { // dialer is the config and accumulated state for a dial func returned by Dialer. type dialer struct { - fwd DialContextFunc + fwd netx.DialFunc dnsCache *Resolver mu sync.Mutex @@ -653,7 +652,7 @@ func v6addrs(aa []netip.Addr) (ret []netip.Addr) { // TLSDialer is like Dialer but returns a func suitable for using with net/http.Transport.DialTLSContext. // It returns a *tls.Conn type on success. // On TLS cert validation failure, it can invoke a backup DNS resolution strategy. -func TLSDialer(fwd DialContextFunc, dnsCache *Resolver, tlsConfigBase *tls.Config) DialContextFunc { +func TLSDialer(fwd netx.DialFunc, dnsCache *Resolver, tlsConfigBase *tls.Config) netx.DialFunc { tcpDialer := Dialer(fwd, dnsCache) return func(ctx context.Context, network, address string) (net.Conn, error) { host, _, err := net.SplitHostPort(address) diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index c8799bc17..7c2435684 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -6,3 +6,82 @@ // in tests and other situations where you don't want to use the // network. package memnet + +import ( + "context" + "fmt" + "net" + "net/netip" + "sync" + + "tailscale.com/net/netx" +) + +var _ netx.Network = (*Network)(nil) + +// Network implements [Network] using an in-memory network, usually +// used for testing. +// +// As of 2025-04-08, it only supports TCP. +// +// Its zero value is a valid [netx.Network] implementation. +type Network struct { + mu sync.Mutex + lns map[string]*Listener // address -> listener +} + +func (m *Network) Listen(network, address string) (net.Listener, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, fmt.Errorf("memNetwork: Listen called with unsupported network %q", network) + } + ap, err := netip.ParseAddrPort(address) + if err != nil { + return nil, fmt.Errorf("memNetwork: Listen called with invalid address %q: %w", address, err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + if m.lns == nil { + m.lns = make(map[string]*Listener) + } + port := ap.Port() + for { + if port == 0 { + port = 33000 + } + key := net.JoinHostPort(ap.Addr().String(), fmt.Sprint(port)) + _, ok := m.lns[key] + if ok { + if ap.Port() != 0 { + return nil, fmt.Errorf("memNetwork: Listen called with duplicate address %q", address) + } + port++ + continue + } + ln := Listen(key) + m.lns[key] = ln + return ln, nil + } +} + +func (m *Network) NewLocalTCPListener() net.Listener { + ln, err := m.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(fmt.Sprintf("memNetwork: failed to create local TCP listener: %v", err)) + } + return ln +} + +func (m *Network) Dial(ctx context.Context, network, address string) (net.Conn, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, fmt.Errorf("memNetwork: Dial called with unsupported network %q", network) + } + m.mu.Lock() + ln, ok := m.lns[address] + m.mu.Unlock() + if !ok { + return nil, fmt.Errorf("memNetwork: Dial called on unknown address %q", address) + } + return ln.Dial(ctx, network, address) +} diff --git a/net/netx/netx.go b/net/netx/netx.go index 0be277a15..014daa9a7 100644 --- a/net/netx/netx.go +++ b/net/netx/netx.go @@ -1,23 +1,25 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package netx contains the Network type to abstract over either a real -// network or a virtual network for testing. +// Package netx contains types to describe and abstract over how dialing and +// listening are performed. package netx import ( "context" "fmt" "net" - "net/netip" - "sync" - - "tailscale.com/net/memnet" ) +// DialFunc is a function that dials a network address. +// +// It's the type implemented by net.Dialer.DialContext or required +// by net/http.Transport.DialContext, etc. +type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) + // Network describes a network that can listen and dial. The two common // implementations are [RealNetwork], using the net package to use the real -// network, or [MemNetwork], using an in-memory network (typically for testing) +// network, or [memnet.Network], using an in-memory network (typically for testing) type Network interface { NewLocalTCPListener() net.Listener Listen(network, address string) (net.Listener, error) @@ -44,77 +46,8 @@ func (realNetwork) NewLocalTCPListener() net.Listener { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { if ln, err = net.Listen("tcp6", "[::1]:0"); err != nil { - panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) + panic(fmt.Sprintf("failed to listen on either IPv4 or IPv6 localhost port: %v", err)) } } return ln } - -// MemNetwork returns a Network implementation that uses an in-memory -// network for testing. It is only suitable for tests that do not -// require real network access. -// -// As of 2025-04-08, it only supports TCP. -func MemNetwork() Network { return &memNetwork{} } - -// memNetwork implements [Network] using an in-memory network. -type memNetwork struct { - mu sync.Mutex - lns map[string]*memnet.Listener // address -> listener -} - -func (m *memNetwork) Listen(network, address string) (net.Listener, error) { - if network != "tcp" && network != "tcp4" && network != "tcp6" { - return nil, fmt.Errorf("memNetwork: Listen called with unsupported network %q", network) - } - ap, err := netip.ParseAddrPort(address) - if err != nil { - return nil, fmt.Errorf("memNetwork: Listen called with invalid address %q: %w", address, err) - } - - m.mu.Lock() - defer m.mu.Unlock() - - if m.lns == nil { - m.lns = make(map[string]*memnet.Listener) - } - port := ap.Port() - for { - if port == 0 { - port = 33000 - } - key := net.JoinHostPort(ap.Addr().String(), fmt.Sprint(port)) - _, ok := m.lns[key] - if ok { - if ap.Port() != 0 { - return nil, fmt.Errorf("memNetwork: Listen called with duplicate address %q", address) - } - port++ - continue - } - ln := memnet.Listen(key) - m.lns[key] = ln - return ln, nil - } -} - -func (m *memNetwork) NewLocalTCPListener() net.Listener { - ln, err := m.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(fmt.Sprintf("memNetwork: failed to create local TCP listener: %v", err)) - } - return ln -} - -func (m *memNetwork) Dial(ctx context.Context, network, address string) (net.Conn, error) { - if network != "tcp" && network != "tcp4" && network != "tcp6" { - return nil, fmt.Errorf("memNetwork: Dial called with unsupported network %q", network) - } - m.mu.Lock() - ln, ok := m.lns[address] - m.mu.Unlock() - if !ok { - return nil, fmt.Errorf("memNetwork: Dial called on unknown address %q", address) - } - return ln.Dial(ctx, network, address) -} diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 8fddd63f2..1188a3077 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -23,6 +23,7 @@ import ( "tailscale.com/net/netknob" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/netx" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/types/netmap" @@ -71,7 +72,7 @@ type Dialer struct { netnsDialerOnce sync.Once netnsDialer netns.Dialer - sysDialForTest func(_ context.Context, network, addr string) (net.Conn, error) // or nil + sysDialForTest netx.DialFunc // or nil routes atomic.Pointer[bart.Table[bool]] // or nil if UserDial should not use routes. `true` indicates routes that point into the Tailscale interface @@ -364,7 +365,7 @@ func (d *Dialer) logf(format string, args ...any) { // SetSystemDialerForTest sets an alternate function to use for SystemDial // instead of netns.Dialer. This is intended for use with nettest.MemoryNetwork. -func (d *Dialer) SetSystemDialerForTest(fn func(ctx context.Context, network, addr string) (net.Conn, error)) { +func (d *Dialer) SetSystemDialerForTest(fn netx.DialFunc) { testenv.AssertInTest() d.sysDialForTest = fn } diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index 94761393f..dc697d071 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -20,6 +20,7 @@ import ( "time" "golang.org/x/net/http2" + "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" "tailscale.com/util/multierr" @@ -40,9 +41,6 @@ const ( // in tests. var uploadAckWindow = 30 * time.Second -// DialFunc is a function for dialing the recorder. -type DialFunc func(ctx context.Context, network, host string) (net.Conn, error) - // ConnectToRecorder connects to the recorder at any of the provided addresses. // It returns the first successful response, or a multierr if all attempts fail. // @@ -55,7 +53,7 @@ type DialFunc func(ctx context.Context, network, host string) (net.Conn, error) // attempts are in order the recorder(s) was attempted. If successful a // successful connection is made, the last attempt in the slice is the // attempt for connected recorder. -func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) { +func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial netx.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) { if len(recs) == 0 { return nil, nil, nil, errors.New("no recorders configured") } @@ -293,7 +291,7 @@ func (u *readCounter) Read(buf []byte) (int, error) { // clientHTTP1 returns a claassic http.Client with a per-dial context. It uses // dialCtx and adds a 5s timeout to it. -func clientHTTP1(dialCtx context.Context, dial DialFunc) *http.Client { +func clientHTTP1(dialCtx context.Context, dial netx.DialFunc) *http.Client { tr := http.DefaultTransport.(*http.Transport).Clone() tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) @@ -313,7 +311,7 @@ func clientHTTP1(dialCtx context.Context, dial DialFunc) *http.Client { // clientHTTP2 is like clientHTTP1 but returns an http.Client suitable for h2c // requests (HTTP/2 over plaintext). Unfortunately the same client does not // work for HTTP/1 so we need to split these up. -func clientHTTP2(dialCtx context.Context, dial DialFunc) *http.Client { +func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { return &http.Client{ Transport: &http2.Transport{ // Allow "http://" scheme in URLs. diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index e3ecf0f75..1fa170d87 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -54,6 +54,7 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/net/netutil" + "tailscale.com/net/netx" "tailscale.com/net/stun" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -649,7 +650,7 @@ type Server struct { mu sync.Mutex agentConnWaiter map[*node]chan<- struct{} // signaled after added to set agentConns set.Set[*agentConn] // not keyed by node; should be small/cheap enough to scan all - agentDialer map[*node]DialFunc + agentDialer map[*node]netx.DialFunc } func (s *Server) logf(format string, args ...any) { @@ -664,8 +665,6 @@ func (s *Server) SetLoggerForTest(logf func(format string, args ...any)) { s.optLogf = logf } -type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) - var derpMap = &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -2130,7 +2129,7 @@ type NodeAgentClient struct { HTTPClient *http.Client } -func (s *Server) NodeAgentDialer(n *Node) DialFunc { +func (s *Server) NodeAgentDialer(n *Node) netx.DialFunc { s.mu.Lock() defer s.mu.Unlock() diff --git a/tstest/nettest/nettest.go b/tstest/nettest/nettest.go index 98662fe39..c78677dd4 100644 --- a/tstest/nettest/nettest.go +++ b/tstest/nettest/nettest.go @@ -14,6 +14,7 @@ import ( "sync" "testing" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/util/testenv" @@ -42,7 +43,7 @@ func PreferMemNetwork() bool { func GetNetwork(tb testing.TB) netx.Network { var n netx.Network if PreferMemNetwork() { - n = netx.MemNetwork() + n = &memnet.Network{} } else { n = netx.RealNetwork() } diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 591bedde4..04bab0cf9 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -38,6 +38,7 @@ import ( "tailscale.com/net/dns" "tailscale.com/net/ipset" "tailscale.com/net/netaddr" + "tailscale.com/net/netx" "tailscale.com/net/packet" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" @@ -208,7 +209,7 @@ type Impl struct { // TCP connection to another host (e.g. in subnet router mode). // // This is currently only used in tests. - forwardDialFunc func(context.Context, string, string) (net.Conn, error) + forwardDialFunc netx.DialFunc // forwardInFlightPerClientDropped is a metric that tracks how many // in-flight TCP forward requests were dropped due to the per-client @@ -1457,7 +1458,7 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. }() // Attempt to dial the outbound connection before we accept the inbound one. - var dialFunc func(context.Context, string, string) (net.Conn, error) + var dialFunc netx.DialFunc if ns.forwardDialFunc != nil { dialFunc = ns.forwardDialFunc } else { diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 823acee91..79a380e84 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -22,6 +22,7 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/metrics" + "tailscale.com/net/netx" "tailscale.com/net/packet" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" @@ -512,7 +513,7 @@ func tcp4syn(tb testing.TB, src, dst netip.Addr, sport, dport uint16) []byte { // makeHangDialer returns a dialer that notifies the returned channel when a // connection is dialed and then hangs until the test finishes. -func makeHangDialer(tb testing.TB) (func(context.Context, string, string) (net.Conn, error), chan struct{}) { +func makeHangDialer(tb testing.TB) (netx.DialFunc, chan struct{}) { done := make(chan struct{}) tb.Cleanup(func() { close(done) From 025fe72448b43d5bb45e673c68c84a24fce0e33e Mon Sep 17 00:00:00 2001 From: James Tucker Date: Tue, 1 Apr 2025 18:52:45 -0700 Subject: [PATCH 0711/1708] cmd/natc: fix handling of upstream and downstream nxdomain Ensure that the upstream is always queried, so that if upstream is going to NXDOMAIN natc will also return NXDOMAIN rather than returning address allocations. At this time both IPv4 and IPv6 are still returned if upstream has a result, regardless of upstream support - this is ~ok as we're proxying. Rewrite the tests to be once again slightly closer to integration tests, but they're still very rough and in need of a refactor. Further refactors are probably needed implementation side too, as this removed rather than added units. Updates #15367 Signed-off-by: James Tucker --- cmd/natc/natc.go | 253 ++++++++++++++--------------- cmd/natc/natc_test.go | 358 +++++++++++++++++++++++++++++------------- 2 files changed, 369 insertions(+), 242 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 270524879..a80e4a42a 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -26,14 +26,15 @@ import ( "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/local" + "tailscale.com/client/tailscale/apitype" "tailscale.com/cmd/natc/ippool" "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/netutil" - "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/tsweb" + "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/wgengine/netstack" ) @@ -148,14 +149,15 @@ func main() { v6ULA := ula(uint16(*siteID)) c := &connector{ ts: ts, - lc: lc, + whois: lc, v6ULA: v6ULA, ignoreDsts: ignoreDstTable, ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, routes: routes, dnsAddr: dnsAddr, + resolver: net.DefaultResolver, } - c.run(ctx) + c.run(ctx, lc) } func calculateAddresses(prefixes []netip.Prefix) (*netipx.IPSet, netip.Addr, *netipx.IPSet) { @@ -170,12 +172,20 @@ func calculateAddresses(prefixes []netip.Prefix) (*netipx.IPSet, netip.Addr, *ne return routesToAdvertise, dnsAddr, addrPool } +type lookupNetIPer interface { + LookupNetIP(ctx context.Context, net, host string) ([]netip.Addr, error) +} + +type whoiser interface { + WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) +} + type connector struct { // ts is the tsnet.Server used to host the connector. ts *tsnet.Server - // lc is the local.Client used to interact with the tsnet.Server hosting this + // whois is the local.Client used to interact with the tsnet.Server hosting this // connector. - lc *local.Client + whois whoiser // dnsAddr is the IPv4 address to listen on for DNS requests. It is used to // prevent the app connector from assigning it to a domain. @@ -197,7 +207,11 @@ type connector struct { // natc behavior, which would return a dummy ip address pointing at natc). ignoreDsts *bart.Table[bool] + // ipPool contains the per-peer IPv4 address assignments. ipPool *ippool.IPPool + + // resolver is used to lookup IP addresses for DNS queries. + resolver lookupNetIPer } // v6ULA is the ULA prefix used by the app connector to assign IPv6 addresses. @@ -217,8 +231,8 @@ func ula(siteID uint16) netip.Prefix { // // The passed in context is only used for the initial setup. The connector runs // forever. -func (c *connector) run(ctx context.Context) { - if _, err := c.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ +func (c *connector) run(ctx context.Context, lc *local.Client) { + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ AdvertiseRoutesSet: true, Prefs: ipn.Prefs{ AdvertiseRoutes: append(c.routes.Prefixes(), c.v6ULA), @@ -251,26 +265,6 @@ func (c *connector) serveDNS() { } } -func lookupDestinationIP(domain string) ([]netip.Addr, error) { - netIPs, err := net.LookupIP(domain) - if err != nil { - var dnsError *net.DNSError - if errors.As(err, &dnsError) && dnsError.IsNotFound { - return nil, nil - } else { - return nil, err - } - } - var addrs []netip.Addr - for _, ip := range netIPs { - a, ok := netip.AddrFromSlice(ip) - if ok { - addrs = append(addrs, a) - } - } - return addrs, nil -} - // handleDNS handles a DNS request to the app connector. // It generates a response based on the request and the node that sent it. // @@ -285,7 +279,7 @@ func lookupDestinationIP(domain string) ([]netip.Addr, error) { func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDPAddr) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - who, err := c.lc.WhoIs(ctx, remoteAddr.String()) + who, err := c.whois.WhoIs(ctx, remoteAddr.String()) if err != nil { log.Printf("HandleDNS(remote=%s): WhoIs failed: %v\n", remoteAddr.String(), err) return @@ -298,143 +292,133 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP return } - // If there are destination ips that we don't want to route, we - // have to do a dns lookup here to find the destination ip. - if c.ignoreDsts != nil { - if len(msg.Questions) > 0 { - q := msg.Questions[0] - switch q.Type { - case dnsmessage.TypeAAAA, dnsmessage.TypeA: - dstAddrs, err := lookupDestinationIP(q.Name.String()) + var resolves map[string][]netip.Addr + var addrQCount int + for _, q := range msg.Questions { + if q.Type != dnsmessage.TypeA && q.Type != dnsmessage.TypeAAAA { + continue + } + addrQCount++ + if _, ok := resolves[q.Name.String()]; !ok { + addrs, err := c.resolver.LookupNetIP(ctx, "ip", q.Name.String()) + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + continue + } + if err != nil { + log.Printf("HandleDNS(remote=%s): lookup destination failed: %v\n", remoteAddr.String(), err) + return + } + // Note: If _any_ destination is ignored, pass through all of the resolved + // addresses as-is. + // + // This could result in some odd split-routing if there was a mix of + // ignored and non-ignored addresses, but it's currently the user + // preferred behavior. + if !c.ignoreDestination(addrs) { + addrs, err = c.ipPool.IPForDomain(who.Node.ID, q.Name.String()) if err != nil { log.Printf("HandleDNS(remote=%s): lookup destination failed: %v\n", remoteAddr.String(), err) return } - if c.ignoreDestination(dstAddrs) { - bs, err := dnsResponse(&msg, dstAddrs) - // TODO (fran): treat as SERVFAIL - if err != nil { - log.Printf("HandleDNS(remote=%s): generate ignore response failed: %v\n", remoteAddr.String(), err) - return - } - _, err = pc.WriteTo(bs, remoteAddr) - if err != nil { - log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) - } - return - } } + mak.Set(&resolves, q.Name.String(), addrs) } } - // None of the destination IP addresses match an ignore destination prefix, do - // the natc thing. - - resp, err := c.generateDNSResponse(&msg, who.Node.ID) - // TODO (fran): treat as SERVFAIL - if err != nil { - log.Printf("HandleDNS(remote=%s): connector handling failed: %v\n", remoteAddr.String(), err) - return - } - // TODO (fran): treat as NXDOMAIN - if len(resp) == 0 { - return - } - // This connector handled the DNS request - _, err = pc.WriteTo(resp, remoteAddr) - if err != nil { - log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) - } -} - -// tsMBox is the mailbox used in SOA records. -// The convention is to replace the @ symbol with a dot. -// So in this case, the mailbox is support.tailscale.com. with the trailing dot -// to indicate that it is a fully qualified domain name. -var tsMBox = dnsmessage.MustNewName("support.tailscale.com.") -// generateDNSResponse generates a DNS response for the given request. The from -// argument is the NodeID of the node that sent the request. -func (c *connector) generateDNSResponse(req *dnsmessage.Message, from tailcfg.NodeID) ([]byte, error) { - var addrs []netip.Addr - if len(req.Questions) > 0 { - switch req.Questions[0].Type { - case dnsmessage.TypeAAAA, dnsmessage.TypeA: - var err error - addrs, err = c.ipPool.IPForDomain(from, req.Questions[0].Name.String()) - if err != nil { - return nil, err - } - } + rcode := dnsmessage.RCodeSuccess + if addrQCount > 0 && len(resolves) == 0 { + rcode = dnsmessage.RCodeNameError } - return dnsResponse(req, addrs) -} -// dnsResponse makes a DNS response for the natc. If the dnsmessage is requesting TypeAAAA -// or TypeA the provided addrs of the requested type will be used. -func dnsResponse(req *dnsmessage.Message, addrs []netip.Addr) ([]byte, error) { b := dnsmessage.NewBuilder(nil, dnsmessage.Header{ - ID: req.Header.ID, + ID: msg.Header.ID, Response: true, Authoritative: true, + RCode: rcode, }) b.EnableCompression() - if len(req.Questions) == 0 { - return b.Finish() - } - q := req.Questions[0] if err := b.StartQuestions(); err != nil { - return nil, err + log.Printf("HandleDNS(remote=%s): dnsmessage start questions failed: %v\n", remoteAddr.String(), err) + return } - if err := b.Question(q); err != nil { - return nil, err + + for _, q := range msg.Questions { + b.Question(q) } + if err := b.StartAnswers(); err != nil { - return nil, err + log.Printf("HandleDNS(remote=%s): dnsmessage start answers failed: %v\n", remoteAddr.String(), err) + return } - switch q.Type { - case dnsmessage.TypeAAAA, dnsmessage.TypeA: - want6 := q.Type == dnsmessage.TypeAAAA - for _, ip := range addrs { - if want6 != ip.Is6() { - continue + + for _, q := range msg.Questions { + switch q.Type { + case dnsmessage.TypeSOA: + if err := b.SOAResource( + dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120}, + dnsmessage.SOAResource{NS: q.Name, MBox: tsMBox, Serial: 2023030600, + Refresh: 120, Retry: 120, Expire: 120, MinTTL: 60}, + ); err != nil { + log.Printf("HandleDNS(remote=%s): dnsmessage SOA resource failed: %v\n", remoteAddr.String(), err) + return } - if want6 { + case dnsmessage.TypeNS: + if err := b.NSResource( + dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120}, + dnsmessage.NSResource{NS: tsMBox}, + ); err != nil { + log.Printf("HandleDNS(remote=%s): dnsmessage NS resource failed: %v\n", remoteAddr.String(), err) + return + } + case dnsmessage.TypeAAAA: + for _, addr := range resolves[q.Name.String()] { + if !addr.Is6() { + continue + } if err := b.AAAAResource( - dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 5}, - dnsmessage.AAAAResource{AAAA: ip.As16()}, + dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120}, + dnsmessage.AAAAResource{AAAA: addr.As16()}, ); err != nil { - return nil, err + log.Printf("HandleDNS(remote=%s): dnsmessage AAAA resource failed: %v\n", remoteAddr.String(), err) + return + } + } + case dnsmessage.TypeA: + for _, addr := range resolves[q.Name.String()] { + if !addr.Is4() { + continue } - } else { if err := b.AResource( - dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 5}, - dnsmessage.AResource{A: ip.As4()}, + dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120}, + dnsmessage.AResource{A: addr.As4()}, ); err != nil { - return nil, err + log.Printf("HandleDNS(remote=%s): dnsmessage A resource failed: %v\n", remoteAddr.String(), err) + return } } } - case dnsmessage.TypeSOA: - if err := b.SOAResource( - dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120}, - dnsmessage.SOAResource{NS: q.Name, MBox: tsMBox, Serial: 2023030600, - Refresh: 120, Retry: 120, Expire: 120, MinTTL: 60}, - ); err != nil { - return nil, err - } - case dnsmessage.TypeNS: - if err := b.NSResource( - dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120}, - dnsmessage.NSResource{NS: tsMBox}, - ); err != nil { - return nil, err - } } - return b.Finish() + + out, err := b.Finish() + if err != nil { + log.Printf("HandleDNS(remote=%s): dnsmessage finish failed: %v\n", remoteAddr.String(), err) + return + } + _, err = pc.WriteTo(out, remoteAddr) + if err != nil { + log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) + } } +// tsMBox is the mailbox used in SOA records. +// The convention is to replace the @ symbol with a dot. +// So in this case, the mailbox is support.tailscale.com. with the trailing dot +// to indicate that it is a fully qualified domain name. +var tsMBox = dnsmessage.MustNewName("support.tailscale.com.") + // handleTCPFlow handles a TCP flow from the given source to the given // destination. It uses the source address to determine the node that sent the // request and the destination address to determine the domain that the request @@ -443,7 +427,7 @@ func dnsResponse(req *dnsmessage.Message, addrs []netip.Addr) ([]byte, error) { func (c *connector) handleTCPFlow(src, dst netip.AddrPort) (handler func(net.Conn), intercept bool) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - who, err := c.lc.WhoIs(ctx, src.Addr().String()) + who, err := c.whois.WhoIs(ctx, src.Addr().String()) cancel() if err != nil { log.Printf("HandleTCPFlow: WhoIs failed: %v\n", err) @@ -461,6 +445,9 @@ func (c *connector) handleTCPFlow(src, dst netip.AddrPort) (handler func(net.Con // ignoreDestination reports whether any of the provided dstAddrs match the prefixes configured // in --ignore-destinations func (c *connector) ignoreDestination(dstAddrs []netip.Addr) bool { + if c.ignoreDsts == nil { + return false + } for _, a := range dstAddrs { if _, ok := c.ignoreDsts.Lookup(a); ok { return true @@ -488,6 +475,8 @@ func proxyTCPConn(c net.Conn, dest string) { return netutil.NewOneConnListener(c, nil), nil }, } + // XXX(raggi): if the connection here resolves to an ignored destination, + // the connection should be closed/failed. p.AddRoute(addrPortStr, &tcpproxy.DialProxy{ Addr: fmt.Sprintf("%s:%s", dest, port), }) diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index 09ade0a98..8fe38de1c 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -4,14 +4,20 @@ package main import ( + "context" + "fmt" + "io" + "net" "net/netip" "testing" + "time" "github.com/gaissmai/bart" - "github.com/google/go-cmp/cmp" "golang.org/x/net/dns/dnsmessage" + "tailscale.com/client/tailscale/apitype" "tailscale.com/cmd/natc/ippool" "tailscale.com/tailcfg" + "tailscale.com/util/must" ) func prefixEqual(a, b netip.Prefix) bool { @@ -41,22 +47,86 @@ func TestULA(t *testing.T) { } } +type recordingPacketConn struct { + writes [][]byte +} + +func (w *recordingPacketConn) WriteTo(b []byte, addr net.Addr) (int, error) { + w.writes = append(w.writes, b) + return len(b), nil +} + +func (w *recordingPacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + return 0, nil, io.EOF +} + +func (w *recordingPacketConn) Close() error { + return nil +} + +func (w *recordingPacketConn) LocalAddr() net.Addr { + return nil +} + +func (w *recordingPacketConn) RemoteAddr() net.Addr { + return nil +} + +func (w *recordingPacketConn) SetDeadline(t time.Time) error { + return nil +} + +func (w *recordingPacketConn) SetReadDeadline(t time.Time) error { + return nil +} + +func (w *recordingPacketConn) SetWriteDeadline(t time.Time) error { + return nil +} + +type resolver struct { + resolves map[string][]netip.Addr + fails map[string]bool +} + +func (r *resolver) LookupNetIP(ctx context.Context, _net, host string) ([]netip.Addr, error) { + if addrs, ok := r.resolves[host]; ok { + return addrs, nil + } + if _, ok := r.fails[host]; ok { + return nil, &net.DNSError{IsTimeout: false, IsNotFound: false, Name: host, IsTemporary: true} + } + return nil, &net.DNSError{IsNotFound: true, Name: host} +} + +type whois struct { + peers map[string]*apitype.WhoIsResponse +} + +func (w *whois) WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { + addr := netip.MustParseAddrPort(remoteAddr).Addr().String() + if peer, ok := w.peers[addr]; ok { + return peer, nil + } + return nil, fmt.Errorf("peer not found") +} + func TestDNSResponse(t *testing.T) { tests := []struct { name string questions []dnsmessage.Question - addrs []netip.Addr wantEmpty bool wantAnswers []struct { name string qType dnsmessage.Type addr netip.Addr } + wantNXDOMAIN bool + wantIgnored bool }{ { name: "empty_request", questions: []dnsmessage.Question{}, - addrs: []netip.Addr{}, wantEmpty: false, wantAnswers: nil, }, @@ -69,7 +139,6 @@ func TestDNSResponse(t *testing.T) { Class: dnsmessage.ClassINET, }, }, - addrs: []netip.Addr{netip.MustParseAddr("100.64.1.5")}, wantAnswers: []struct { name string qType dnsmessage.Type @@ -78,7 +147,7 @@ func TestDNSResponse(t *testing.T) { { name: "example.com.", qType: dnsmessage.TypeA, - addr: netip.MustParseAddr("100.64.1.5"), + addr: netip.MustParseAddr("100.64.0.0"), }, }, }, @@ -91,7 +160,6 @@ func TestDNSResponse(t *testing.T) { Class: dnsmessage.ClassINET, }, }, - addrs: []netip.Addr{netip.MustParseAddr("fd7a:115c:a1e0:a99c:0001:0505:0505:0505")}, wantAnswers: []struct { name string qType dnsmessage.Type @@ -100,7 +168,7 @@ func TestDNSResponse(t *testing.T) { { name: "example.com.", qType: dnsmessage.TypeAAAA, - addr: netip.MustParseAddr("fd7a:115c:a1e0:a99c:0001:0505:0505:0505"), + addr: netip.MustParseAddr("fd7a:115c:a1e0::"), }, }, }, @@ -113,7 +181,6 @@ func TestDNSResponse(t *testing.T) { Class: dnsmessage.ClassINET, }, }, - addrs: []netip.Addr{}, wantAnswers: nil, }, { @@ -125,89 +192,210 @@ func TestDNSResponse(t *testing.T) { Class: dnsmessage.ClassINET, }, }, - addrs: []netip.Addr{}, wantAnswers: nil, }, + { + name: "nxdomain", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("noexist.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + wantNXDOMAIN: true, + }, + { + name: "servfail", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("fail.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + wantEmpty: true, // TODO: pass through instead? + }, + { + name: "ignored", + questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("ignore.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + wantAnswers: []struct { + name string + qType dnsmessage.Type + addr netip.Addr + }{ + { + name: "ignore.example.com.", + qType: dnsmessage.TypeA, + addr: netip.MustParseAddr("8.8.4.4"), + }, + }, + wantIgnored: true, + }, + } + + var rpc recordingPacketConn + remoteAddr := must.Get(net.ResolveUDPAddr("udp", "100.64.254.1:12345")) + + routes, dnsAddr, addrPool := calculateAddresses([]netip.Prefix{netip.MustParsePrefix("10.64.0.0/24")}) + v6ULA := ula(1) + c := connector{ + resolver: &resolver{ + resolves: map[string][]netip.Addr{ + "example.com.": { + netip.MustParseAddr("8.8.8.8"), + netip.MustParseAddr("2001:4860:4860::8888"), + }, + "ignore.example.com.": { + netip.MustParseAddr("8.8.4.4"), + }, + }, + fails: map[string]bool{ + "fail.example.com.": true, + }, + }, + whois: &whois{ + peers: map[string]*apitype.WhoIsResponse{ + "100.64.254.1": { + Node: &tailcfg.Node{ID: 123}, + }, + }, + }, + ignoreDsts: &bart.Table[bool]{}, + routes: routes, + v6ULA: v6ULA, + ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, + dnsAddr: dnsAddr, } + c.ignoreDsts.Insert(netip.MustParsePrefix("8.8.4.4/32"), true) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - req := &dnsmessage.Message{ - Header: dnsmessage.Header{ + rb := dnsmessage.NewBuilder(nil, + dnsmessage.Header{ ID: 1234, }, - Questions: tc.questions, + ) + must.Do(rb.StartQuestions()) + for _, q := range tc.questions { + rb.Question(q) } - resp, err := dnsResponse(req, tc.addrs) - if err != nil { - t.Fatalf("dnsResponse() error = %v", err) + c.handleDNS(&rpc, must.Get(rb.Finish()), remoteAddr) + + writes := rpc.writes + rpc.writes = rpc.writes[:0] + + if tc.wantEmpty { + if len(writes) != 0 { + t.Errorf("handleDNS() returned non-empty response when expected empty") + } + return } - if tc.wantEmpty && len(resp) != 0 { - t.Errorf("dnsResponse() returned non-empty response when expected empty") + if !tc.wantEmpty && len(writes) != 1 { + t.Fatalf("handleDNS() returned an unexpected number of responses: %d, want 1", len(writes)) } - if !tc.wantEmpty && len(resp) == 0 { - t.Errorf("dnsResponse() returned empty response when expected non-empty") + resp := writes[0] + var msg dnsmessage.Message + err := msg.Unpack(resp) + if err != nil { + t.Fatalf("Failed to unpack response: %v", err) } - if len(resp) > 0 { - var msg dnsmessage.Message - err = msg.Unpack(resp) - if err != nil { - t.Fatalf("Failed to unpack response: %v", err) - } + if !msg.Header.Response { + t.Errorf("Response header is not set") + } - if !msg.Header.Response { - t.Errorf("Response header is not set") - } + if msg.Header.ID != 1234 { + t.Errorf("Response ID = %d, want %d", msg.Header.ID, 1234) + } - if msg.Header.ID != req.Header.ID { - t.Errorf("Response ID = %d, want %d", msg.Header.ID, req.Header.ID) - } + if len(tc.wantAnswers) > 0 { + if len(msg.Answers) != len(tc.wantAnswers) { + t.Errorf("got %d answers, want %d:\n%s", len(msg.Answers), len(tc.wantAnswers), msg.GoString()) + } else { + for i, want := range tc.wantAnswers { + ans := msg.Answers[i] - if len(tc.wantAnswers) > 0 { - if len(msg.Answers) != len(tc.wantAnswers) { - t.Errorf("got %d answers, want %d", len(msg.Answers), len(tc.wantAnswers)) - } else { - for i, want := range tc.wantAnswers { - ans := msg.Answers[i] + gotName := ans.Header.Name.String() + if gotName != want.name { + t.Errorf("answer[%d] name = %s, want %s", i, gotName, want.name) + } - gotName := ans.Header.Name.String() - if gotName != want.name { - t.Errorf("answer[%d] name = %s, want %s", i, gotName, want.name) - } + if ans.Header.Type != want.qType { + t.Errorf("answer[%d] type = %v, want %v", i, ans.Header.Type, want.qType) + } - if ans.Header.Type != want.qType { - t.Errorf("answer[%d] type = %v, want %v", i, ans.Header.Type, want.qType) + switch want.qType { + case dnsmessage.TypeA: + if ans.Body.(*dnsmessage.AResource) == nil { + t.Errorf("answer[%d] not an A record", i) + continue } + resource := ans.Body.(*dnsmessage.AResource) + gotIP := netip.AddrFrom4([4]byte(resource.A)) - var gotIP netip.Addr - switch want.qType { - case dnsmessage.TypeA: - if ans.Body.(*dnsmessage.AResource) == nil { - t.Errorf("answer[%d] not an A record", i) - continue - } - resource := ans.Body.(*dnsmessage.AResource) - gotIP = netip.AddrFrom4([4]byte(resource.A)) - case dnsmessage.TypeAAAA: - if ans.Body.(*dnsmessage.AAAAResource) == nil { - t.Errorf("answer[%d] not an AAAA record", i) - continue + var ips []netip.Addr + if tc.wantIgnored { + ips = must.Get(c.resolver.LookupNetIP(t.Context(), "ip4", want.name)) + } else { + ips = must.Get(c.ipPool.IPForDomain(tailcfg.NodeID(123), want.name)) + } + var wantIP netip.Addr + for _, ip := range ips { + if ip.Is4() { + wantIP = ip + break } - resource := ans.Body.(*dnsmessage.AAAAResource) - gotIP = netip.AddrFrom16([16]byte(resource.AAAA)) } + if gotIP != wantIP { + t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, wantIP) + } + case dnsmessage.TypeAAAA: + if ans.Body.(*dnsmessage.AAAAResource) == nil { + t.Errorf("answer[%d] not an AAAA record", i) + continue + } + resource := ans.Body.(*dnsmessage.AAAAResource) + gotIP := netip.AddrFrom16([16]byte(resource.AAAA)) - if gotIP != want.addr { - t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, want.addr) + var ips []netip.Addr + if tc.wantIgnored { + ips = must.Get(c.resolver.LookupNetIP(t.Context(), "ip6", want.name)) + } else { + ips = must.Get(c.ipPool.IPForDomain(tailcfg.NodeID(123), want.name)) + } + var wantIP netip.Addr + for _, ip := range ips { + if ip.Is6() { + wantIP = ip + break + } + } + if gotIP != wantIP { + t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, wantIP) } } } } } + + if tc.wantNXDOMAIN { + if msg.RCode != dnsmessage.RCodeNameError { + t.Errorf("expected NXDOMAIN, got %v", msg.RCode) + } + if len(msg.Answers) != 0 { + t.Errorf("expected no answers, got %d", len(msg.Answers)) + } + } }) } } @@ -257,53 +445,3 @@ func TestIgnoreDestination(t *testing.T) { }) } } - -func TestConnectorGenerateDNSResponse(t *testing.T) { - v6ULA := netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80") - routes, dnsAddr, addrPool := calculateAddresses([]netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")}) - c := &connector{ - v6ULA: v6ULA, - ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, - routes: routes, - dnsAddr: dnsAddr, - } - - req := &dnsmessage.Message{ - Header: dnsmessage.Header{ID: 1234}, - Questions: []dnsmessage.Question{ - { - Name: dnsmessage.MustNewName("example.com."), - Type: dnsmessage.TypeA, - Class: dnsmessage.ClassINET, - }, - }, - } - - nodeID := tailcfg.NodeID(12345) - - resp1, err := c.generateDNSResponse(req, nodeID) - if err != nil { - t.Fatalf("generateDNSResponse() error = %v", err) - } - if len(resp1) == 0 { - t.Fatalf("generateDNSResponse() returned empty response") - } - - resp2, err := c.generateDNSResponse(req, nodeID) - if err != nil { - t.Fatalf("generateDNSResponse() second call error = %v", err) - } - - if !cmp.Equal(resp1, resp2) { - t.Errorf("generateDNSResponse() responses differ between calls") - } - - var msg dnsmessage.Message - err = msg.Unpack(resp1) - if err != nil { - t.Fatalf("dnsmessage Unpack error = %v", err) - } - if len(msg.Answers) != 1 { - t.Fatalf("expected 1 answer, got: %d", len(msg.Answers)) - } -} From 6088ee311fb67ae28f12883bf452d0698b55b529 Mon Sep 17 00:00:00 2001 From: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Date: Tue, 8 Apr 2025 14:12:17 -0400 Subject: [PATCH 0712/1708] cmd/tailscale/cli: return error on duplicate multi-value flags (#15534) Some CLI flags support multiple values separated by commas. These flags are intended to be declared only once and will silently ignore subsequent instances. This will now throw an error if multiple instances of advertise-tags and advertise-routes are detected. Fixes #6813 Signed-off-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> --- cmd/tailscale/cli/cli_test.go | 75 +++++++++++++++++++++++++++++++---- cmd/tailscale/cli/set.go | 8 ++-- cmd/tailscale/cli/set_test.go | 2 +- cmd/tailscale/cli/up.go | 35 ++++++++++++---- 4 files changed, 100 insertions(+), 20 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 2d02b6b7a..49d8e9c4a 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -657,6 +657,13 @@ func upArgsFromOSArgs(goos string, flagArgs ...string) (args upArgsT) { return } +func newSingleUseStringForTest(v string) singleUseStringFlag { + return singleUseStringFlag{ + set: true, + value: v, + } +} + func TestPrefsFromUpArgs(t *testing.T) { tests := []struct { name string @@ -721,14 +728,14 @@ func TestPrefsFromUpArgs(t *testing.T) { { name: "error_advertise_route_invalid_ip", args: upArgsT{ - advertiseRoutes: "foo", + advertiseRoutes: newSingleUseStringForTest("foo"), }, wantErr: `"foo" is not a valid IP address or CIDR prefix`, }, { name: "error_advertise_route_unmasked_bits", args: upArgsT{ - advertiseRoutes: "1.2.3.4/16", + advertiseRoutes: newSingleUseStringForTest("1.2.3.4/16"), }, wantErr: `1.2.3.4/16 has non-address bits set; expected 1.2.0.0/16`, }, @@ -749,7 +756,7 @@ func TestPrefsFromUpArgs(t *testing.T) { { name: "error_tag_prefix", args: upArgsT{ - advertiseTags: "foo", + advertiseTags: newSingleUseStringForTest("foo"), }, wantErr: `tag: "foo": tags must start with 'tag:'`, }, @@ -829,7 +836,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_good", goos: "linux", args: upArgsT{ - advertiseRoutes: "fd7a:115c:a1e0:b1a::bb:10.0.0.0/112", + advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a::bb:10.0.0.0/112"), netfilterMode: "off", }, want: &ipn.Prefs{ @@ -848,7 +855,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_good_16_bit", goos: "linux", args: upArgsT{ - advertiseRoutes: "fd7a:115c:a1e0:b1a::aabb:10.0.0.0/112", + advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a::aabb:10.0.0.0/112"), netfilterMode: "off", }, want: &ipn.Prefs{ @@ -867,7 +874,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_short_prefix", goos: "linux", args: upArgsT{ - advertiseRoutes: "fd7a:115c:a1e0:b1a::/64", + advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a::/64"), netfilterMode: "off", }, wantErr: "fd7a:115c:a1e0:b1a::/64 4-in-6 prefix must be at least a /96", @@ -876,7 +883,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_short_reserved_siteid", goos: "linux", args: upArgsT{ - advertiseRoutes: "fd7a:115c:a1e0:b1a:1234:5678::/112", + advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a:1234:5678::/112"), netfilterMode: "off", }, wantErr: "route fd7a:115c:a1e0:b1a:1234:5678::/112 contains invalid site ID 12345678; must be 0xffff or less", @@ -1106,6 +1113,7 @@ func TestUpdatePrefs(t *testing.T) { }, env: upCheckEnv{backendState: "Running"}, }, + { // Issue 3808: explicitly empty --operator= should clear value. name: "explicit_empty_operator", @@ -1598,3 +1606,56 @@ func TestDepsNoCapture(t *testing.T) { }.Check(t) } + +func TestSingleUseStringFlag(t *testing.T) { + tests := []struct { + name string + setValues []string + wantValue string + wantErr bool + }{ + { + name: "set once", + setValues: []string{"foo"}, + wantValue: "foo", + wantErr: false, + }, + { + name: "set twice", + setValues: []string{"foo", "bar"}, + wantValue: "foo", + wantErr: true, + }, + { + name: "set nothing", + setValues: []string{}, + wantValue: "", + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var flag singleUseStringFlag + var lastErr error + + for _, val := range tt.setValues { + lastErr = flag.Set(val) + } + + if tt.wantErr { + if lastErr == nil { + t.Errorf("expected error on final Set, got nil") + } + } else { + if lastErr != nil { + t.Errorf("unexpected error on final Set: %v", lastErr) + } + } + + if got := flag.String(); got != tt.wantValue { + t.Errorf("String() = %q, want %q", got, tt.wantValue) + } + }) + } +} diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 07b3fe9ce..ab113f6e0 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -49,7 +49,7 @@ type setArgsT struct { runSSH bool runWebClient bool hostname string - advertiseRoutes string + advertiseRoutes singleUseStringFlag advertiseDefaultRoute bool advertiseConnector bool opUser string @@ -75,7 +75,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") setf.BoolVar(&setArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") setf.StringVar(&setArgs.hostname, "hostname", "", "hostname to use instead of the one provided by the OS") - setf.StringVar(&setArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") + setf.Var(&setArgs.advertiseRoutes, "advertise-routes", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") setf.BoolVar(&setArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet") setf.BoolVar(&setArgs.advertiseConnector, "advertise-connector", false, "offer to be an app connector for domain specific internet traffic for the tailnet") setf.BoolVar(&setArgs.updateCheck, "update-check", true, "notify about available Tailscale updates") @@ -259,11 +259,11 @@ func runSet(ctx context.Context, args []string) (retErr error) { // setArgs is the parsed command-line arguments. func calcAdvertiseRoutesForSet(advertiseExitNodeSet, advertiseRoutesSet bool, curPrefs *ipn.Prefs, setArgs setArgsT) (routes []netip.Prefix, err error) { if advertiseExitNodeSet && advertiseRoutesSet { - return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes, setArgs.advertiseDefaultRoute) + return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes.String(), setArgs.advertiseDefaultRoute) } if advertiseRoutesSet { - return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes, curPrefs.AdvertisesExitNode()) + return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes.String(), curPrefs.AdvertisesExitNode()) } if advertiseExitNodeSet { alreadyAdvertisesExitNode := curPrefs.AdvertisesExitNode() diff --git a/cmd/tailscale/cli/set_test.go b/cmd/tailscale/cli/set_test.go index a2f211f8c..8ef85be12 100644 --- a/cmd/tailscale/cli/set_test.go +++ b/cmd/tailscale/cli/set_test.go @@ -116,7 +116,7 @@ func TestCalcAdvertiseRoutesForSet(t *testing.T) { sa.advertiseDefaultRoute = *tc.setExit } if tc.setRoutes != nil { - sa.advertiseRoutes = *tc.setRoutes + sa.advertiseRoutes = newSingleUseStringForTest(*tc.setRoutes) } got, err := calcAdvertiseRoutesForSet(tc.setExit != nil, tc.setRoutes != nil, curPrefs, sa) if err != nil { diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 26db85f13..22276cd99 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -82,6 +82,25 @@ func acceptRouteDefault(goos string) bool { return p.DefaultRouteAll(goos) } +// singleUseStringFlag will throw an error if the flag is specified more than once. +type singleUseStringFlag struct { + set bool + value string +} + +func (s singleUseStringFlag) String() string { + return s.value +} + +func (s *singleUseStringFlag) Set(v string) error { + if s.set { + return errors.New("flag can only be specified once") + } + s.set = true + s.value = v + return nil +} + var upFlagSet = newUpFlagSet(effectiveGOOS(), &upArgsGlobal, "up") // newUpFlagSet returns a new flag set for the "up" and "login" commands. @@ -104,9 +123,9 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") upf.BoolVar(&upArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") - upf.StringVar(&upArgs.advertiseTags, "advertise-tags", "", "comma-separated ACL tags to request; each must start with \"tag:\" (e.g. \"tag:eng,tag:montreal,tag:ssh\")") + upf.Var(&upArgs.advertiseTags, "advertise-tags", "comma-separated ACL tags to request; each must start with \"tag:\" (e.g. \"tag:eng,tag:montreal,tag:ssh\")") upf.StringVar(&upArgs.hostname, "hostname", "", "hostname to use instead of the one provided by the OS") - upf.StringVar(&upArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") + upf.Var(&upArgs.advertiseRoutes, "advertise-routes", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") upf.BoolVar(&upArgs.advertiseConnector, "advertise-connector", false, "advertise this node as an app connector") upf.BoolVar(&upArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet") upf.BoolVar(&upArgs.postureChecking, "posture-checking", false, hidden+"allow management plane to gather device posture information") @@ -174,9 +193,9 @@ type upArgsT struct { runWebClient bool forceReauth bool forceDaemon bool - advertiseRoutes string + advertiseRoutes singleUseStringFlag advertiseDefaultRoute bool - advertiseTags string + advertiseTags singleUseStringFlag advertiseConnector bool snat bool statefulFiltering bool @@ -244,7 +263,7 @@ func warnf(format string, args ...any) { // function exists for testing and should have no side effects or // outside interactions (e.g. no making Tailscale LocalAPI calls). func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goos string) (*ipn.Prefs, error) { - routes, err := netutil.CalcAdvertiseRoutes(upArgs.advertiseRoutes, upArgs.advertiseDefaultRoute) + routes, err := netutil.CalcAdvertiseRoutes(upArgs.advertiseRoutes.String(), upArgs.advertiseDefaultRoute) if err != nil { return nil, err } @@ -254,8 +273,8 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo } var tags []string - if upArgs.advertiseTags != "" { - tags = strings.Split(upArgs.advertiseTags, ",") + if upArgs.advertiseTags.String() != "" { + tags = strings.Split(upArgs.advertiseTags.String(), ",") for _, tag := range tags { err := tailcfg.CheckTag(tag) if err != nil { @@ -555,7 +574,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } - authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags) + authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags.String()) if err != nil { return err } From f5a873aca44adab47be9c72a08a7e2ffc4faadde Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 11:29:05 -0700 Subject: [PATCH 0713/1708] commit-messages.md: make our git commit message style guide public So we can link open source contributors to it. Updates #cleanup Change-Id: I02f612b38db9594f19b3be5d982f58c136120e9a Co-authored-by: James Sanderson Co-authored-by: Will Norris Co-authored-by: James Tucker Signed-off-by: Brad Fitzpatrick --- README.md | 3 +- docs/commit-messages.md | 150 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+), 2 deletions(-) create mode 100644 docs/commit-messages.md diff --git a/README.md b/README.md index a20132a6a..2c9713a6f 100644 --- a/README.md +++ b/README.md @@ -71,8 +71,7 @@ We require [Developer Certificate of Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin) `Signed-off-by` lines in commits. -See `git log` for our commit message style. It's basically the same as -[Go's style](https://go.dev/wiki/CommitMessage). +See [commit-messages.md](docs/commit-messages.md) (or skim `git log`) for our commit message style. ## About Us diff --git a/docs/commit-messages.md b/docs/commit-messages.md new file mode 100644 index 000000000..36b539689 --- /dev/null +++ b/docs/commit-messages.md @@ -0,0 +1,150 @@ +# Commit messages + +This is Tailscale's style guide for writing git commit messages. + +As with all style guides, many things here are subjective and exist primarily to +codify existing conventions and promote uniformity and thus ease of reading by +others. Others have stronger reasons, such as interop with tooling or making +future git archaeology easier. + +# Commit Messages + +There are different styles of commit messages followed by different projects. + +Our commit message style is largely based on the Go language's style, which +shares much in common with the Linux kernel's git commit message style (for +which git was invented): + +* Go's high-level example: https://go.dev/doc/contribute#commit_messages +* Go's details: https://golang.org/wiki/CommitMessage +* Linux's style: https://www.kernel.org/doc/html/v4.10/process/submitting-patches.html#describe-your-changes + +(We do *not* use the [Conventional +Commits](https://www.conventionalcommits.org/en/v1.0.0/) style or [Semantic +Commits](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) +styles. They're reasonable, but we have already been using the Go and Linux +style of commit messages and there is little justification for switching styles. +Consistency is valuable.) + +In a nutshell, our commit messages should look like: + +``` +net/http: handle foo when bar + +[longer description here in the body] + +Fixes #nnnn +``` + +Notably, for the subject (the first line of description): + +- the primary director(ies) from the root affected by the change goes before the colon, e.g. “derp/derphttp:” (if a lot of packages are involved, you can abbreviate to top-level names e.g. ”derp,magicsock:”, and/or remove less relevant packages) +- the part after the colon is a verb, ideally an imperative verb (Linux style, telling the code what to do) or alternatively an infinitive verb that completes the blank in, *"this change modifies Tailscale to ___________"*. e.g. say *“fix the foobar feature”*, not *“fixing”*, *“fixed”*, or *“fixes”*. Or, as Linux guidelines say: + > Describe your changes in imperative mood, e.g. “make xyzzy do frotz” instead of “[This patch] makes xyzzy do frotz” or “[I] changed xyzzy to do frotz”, as if you are giving orders to the codebase to change its behaviour." +- the verb after the colon is lowercase +- there is no trailing period +- it should be kept as short as possible (many git viewing tools prefer under ~76 characters, though we aren’t super strict about this) + + Examples: + + | Good Example | notes | + | ------- | --- | + | `foo/bar: fix memory leak` | | + | `foo/bar: bump deps` | | + | `foo/bar: temporarily restrict access` | adverbs are okay | + | `foo/bar: implement new UI design` | | + | `control/{foo,bar}: optimize bar` | feel free to use {foo,bar} for common subpackages| + + | Bad Example | notes | + | ------- | --- | + | `fixed memory leak` | BAD: missing package prefix | + | `foo/bar: fixed memory leak` | BAD: past tense | + | `foo/bar: fixing memory leak` | BAD: present continuous tense; no `-ing` verbs | + | `foo/bar: bumping deps` | BAD: present continuous tense; no `-ing` verbs | + | `foo/bar: new UI design` | BAD: that's a noun phrase; no verb | + | `foo/bar: made things larger` | BAD: that's past tense | + | `foo/bar: faster algorithm` | BAD: that's an adjective and a noun, not a verb | + | `foo/bar: Fix memory leak` | BAD: capitalized verb | + | `foo/bar: fix memory leak.` | BAD: trailing period | + | `foo/bar:fix memory leak` | BAD: no space after colon | + | `foo/bar : fix memory leak` | BAD: space before colon | + | `foo/bar: fix memory leak Fixes #123` | BAD: the "Fixes" shouldn't be part of the title | + | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bissect to a clean, working tree | + + +For the body (the rest of the description): + +- blank line after the subject (first) line +- the text should be wrapped to ~76 characters (to appease git viewing tools, mainly), unless you really need longer lines (e.g. for ASCII art, tables, or long links) +- there must be a `Fixes` or `Updates` line for all non-trivial commits linking to a tracking bug. This goes after the body with a blank newline separating the two. Trivial code clean-up commits can use `Updates #cleanup` instead of an issue. +- `Change-Id` lines should ideally be included in commits in the `corp` repo and are more optional in `tailscale/tailscale`. You can configure Git to do this for you by running `./tool/go run misc/install-git-hooks.go` from the root of the corp repo. This was originally a Gerrit thing and we don't use Gerrit, but it lets us tooling track commits as they're cherry-picked between branches. Also, tools like [git-cleanup](https://github.com/bradfitz/gitutil) use it to clean up your old local branches once they're merged upstream. +- we don't use Markdown in commit messages. (Accidental Markdown like bulleted lists or even headings is fine, but not links) +- we require `Signed-off-by` lines in public repos (such as `tailscale/tailscale`). Add them using `git commit --signoff` or `git commit -s` for short. You can use them in private repos but do not have to. +- when moving code between repos, include the repository name, and git hash that it was moved from/to, so it is easier to trace history/blame. + +Please don't use [alternate GitHub-supported +aliases](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) +like `Close` or `Resolves`. Tailscale only uses the verbs `Fixes` and `Updates`. + +To link a commit to an issue without marking it fixed—for example, if the commit +is working toward a fix but not yet a complete fix—GitHub requires only that the +issue is mentioned by number in the commit message. By convention, our commits +mention this at the bottom of the message using `Updates`, where `Fixes` might +be expected, even if the number is also mentioned in the body of the commit +message. + +For example: + +``` +some/dir: refactor func Foo + +This will make the handling of +shorter and easier to test. + +Updates #nnnn +``` + +Please say `Updates` and not other common Github-recognized conventions (that is, don't use `For #nnnn`) + +## Public release notes + +For changes in `tailscale/tailscale` that fix a significant bug or add a new feature that should be included in the release notes for the next release, +add `RELNOTE: ` toward the end of the commit message. +This will aid the release engineer in writing the release notes for the next release. + +# Reverts + +When you use `git revert` to revert a commit, the default commit message will identify the commit SHA and message that was reverted. You must expand this message to explain **why** it is being reverted, including a link to the associated issue. + +Don't revert reverts. That gets ugly. Send the change anew but reference +the original & earlier revert. + +# Other repos + +To reference an issue in one repo from a commit in another (for example, fixing an issue in corp with a commit in `tailscale/tailscale`), you need to fully-qualify the issue number with the GitHub org/repo syntax: + +``` +cipher/rot13: add new super secure cipher + +Fixes tailscale/corp#1234 +``` + +Referencing a full URL to the issue is also acceptable, but try to prefer the shorter way. + +It's okay to reference the `corp` repo in open source repo commit messages. + +# GitHub Pull Requests + +In the future we plan to make a bot rewrite all PR bodies programmatically from +the commit messages. But for now (2023-07-25).... + +By convention, GitHub Pull Requests follow similar rules to commits, especially +the title of the PR (which should be the first line of the commit). It is less +important to follow these conventions in the PR itself, as it’s the commits that +become a permanent part of the commit history. + +It's okay (but rare) for a PR to contain multiple commits. When a PR does +contain multiple commits, call that out in the PR body for reviewers so they can +review each separately. + +You don't need to include the `Change-Id` in the description of your PR. From 8597b25840ecfeb75a6104df62198a0e20546e6c Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Mon, 13 Jan 2025 13:29:41 -0800 Subject: [PATCH 0714/1708] tsconsensus: add a tsconsensus package tsconsensus enables tsnet.Server instances to form a consensus. tsconsensus wraps hashicorp/raft with * the ability to do discovery via tailscale tags * inter node communication over tailscale * routing of commands to the leader Updates #14667 Signed-off-by: Fran Bull --- go.mod | 7 + go.sum | 44 +- tsconsensus/authorization.go | 134 ++++++ tsconsensus/authorization_test.go | 230 ++++++++++ tsconsensus/http.go | 182 ++++++++ tsconsensus/monitor.go | 160 +++++++ tsconsensus/tsconsensus.go | 447 ++++++++++++++++++ tsconsensus/tsconsensus_test.go | 738 ++++++++++++++++++++++++++++++ 8 files changed, 1940 insertions(+), 2 deletions(-) create mode 100644 tsconsensus/authorization.go create mode 100644 tsconsensus/authorization_test.go create mode 100644 tsconsensus/http.go create mode 100644 tsconsensus/monitor.go create mode 100644 tsconsensus/tsconsensus.go create mode 100644 tsconsensus/tsconsensus_test.go diff --git a/go.mod b/go.mod index ff736d950..e430fcb6d 100644 --- a/go.mod +++ b/go.mod @@ -48,6 +48,8 @@ require ( github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 github.com/google/uuid v1.6.0 github.com/goreleaser/nfpm/v2 v2.33.1 + github.com/hashicorp/go-hclog v1.6.2 + github.com/hashicorp/raft v1.7.2 github.com/hdevalence/ed25519consensus v0.2.0 github.com/illarion/gonotify/v3 v3.0.2 github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f @@ -131,6 +133,7 @@ require ( github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect @@ -149,6 +152,10 @@ require ( github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/gorilla/securecookie v1.1.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect github.com/macabu/inamedparam v0.1.3 // indirect diff --git a/go.sum b/go.sum index 06fad5d6d..0c3e566be 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,9 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Djarvur/go-err113 v0.1.0 h1:uCRZZOdMQ0TZPHYTdYpoC0bLYJKPEHPUJ8MeAa51lNU= github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= @@ -118,6 +119,8 @@ github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1 github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -216,6 +219,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -294,6 +299,7 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanw/esbuild v0.19.11 h1:mbPO1VJ/df//jjUd+p/nRLYCpizXxXb2w/zZMShxa2k= github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= @@ -537,13 +543,30 @@ github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Rep github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc= +github.com/hashicorp/raft v1.7.2/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -588,6 +611,7 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -660,8 +684,12 @@ github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2 github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -743,6 +771,8 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= @@ -771,8 +801,10 @@ github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyf github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= @@ -783,6 +815,7 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -790,6 +823,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -890,6 +924,7 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -956,6 +991,7 @@ github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+ github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a h1:eg5FkNoQp76ZsswyGZ+TjYqA/rhKefxK8BW7XOlQsxo= github.com/u-root/gobusybox/src v0.0.0-20240225013946-a274a8d5d83a/go.mod h1:e/8TmrdreH0sZOw2DFKBaUV7bvDWRq6SeM9PzkuVM68= github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg= @@ -1184,6 +1220,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1209,9 +1246,12 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/tsconsensus/authorization.go b/tsconsensus/authorization.go new file mode 100644 index 000000000..1e0b70c07 --- /dev/null +++ b/tsconsensus/authorization.go @@ -0,0 +1,134 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconsensus + +import ( + "context" + "errors" + "net/netip" + "sync" + "time" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tsnet" + "tailscale.com/types/views" + "tailscale.com/util/set" +) + +type statusGetter interface { + getStatus(context.Context) (*ipnstate.Status, error) +} + +type tailscaleStatusGetter struct { + ts *tsnet.Server + + mu sync.Mutex // protects the following + lastStatus *ipnstate.Status + lastStatusTime time.Time +} + +func (sg *tailscaleStatusGetter) fetchStatus(ctx context.Context) (*ipnstate.Status, error) { + lc, err := sg.ts.LocalClient() + if err != nil { + return nil, err + } + return lc.Status(ctx) +} + +func (sg *tailscaleStatusGetter) getStatus(ctx context.Context) (*ipnstate.Status, error) { + sg.mu.Lock() + defer sg.mu.Unlock() + if sg.lastStatus != nil && time.Since(sg.lastStatusTime) < 1*time.Second { + return sg.lastStatus, nil + } + status, err := sg.fetchStatus(ctx) + if err != nil { + return nil, err + } + sg.lastStatus = status + sg.lastStatusTime = time.Now() + return status, nil +} + +type authorization struct { + sg statusGetter + tag string + + mu sync.Mutex + peers *peers // protected by mu +} + +func newAuthorization(ts *tsnet.Server, tag string) *authorization { + return &authorization{ + sg: &tailscaleStatusGetter{ + ts: ts, + }, + tag: tag, + } +} + +func (a *authorization) Refresh(ctx context.Context) error { + tStatus, err := a.sg.getStatus(ctx) + if err != nil { + return err + } + if tStatus == nil { + return errors.New("no status") + } + if tStatus.BackendState != ipn.Running.String() { + return errors.New("ts Server is not running") + } + a.mu.Lock() + defer a.mu.Unlock() + a.peers = newPeers(tStatus, a.tag) + return nil +} + +func (a *authorization) AllowsHost(addr netip.Addr) bool { + if a.peers == nil { + return false + } + a.mu.Lock() + defer a.mu.Unlock() + return a.peers.addrs.Contains(addr) +} + +func (a *authorization) SelfAllowed() bool { + if a.peers == nil { + return false + } + a.mu.Lock() + defer a.mu.Unlock() + return a.peers.status.Self.Tags != nil && views.SliceContains(*a.peers.status.Self.Tags, a.tag) +} + +func (a *authorization) AllowedPeers() views.Slice[*ipnstate.PeerStatus] { + if a.peers == nil { + return views.Slice[*ipnstate.PeerStatus]{} + } + a.mu.Lock() + defer a.mu.Unlock() + return views.SliceOf(a.peers.statuses) +} + +type peers struct { + status *ipnstate.Status + addrs set.Set[netip.Addr] + statuses []*ipnstate.PeerStatus +} + +func newPeers(status *ipnstate.Status, tag string) *peers { + ps := &peers{ + status: status, + addrs: set.Set[netip.Addr]{}, + } + for _, p := range status.Peer { + if p.Tags != nil && views.SliceContains(*p.Tags, tag) { + ps.statuses = append(ps.statuses, p) + ps.addrs.AddSlice(p.TailscaleIPs) + } + } + return ps +} diff --git a/tsconsensus/authorization_test.go b/tsconsensus/authorization_test.go new file mode 100644 index 000000000..e0023f4ff --- /dev/null +++ b/tsconsensus/authorization_test.go @@ -0,0 +1,230 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconsensus + +import ( + "context" + "fmt" + "net/netip" + "testing" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/types/key" + "tailscale.com/types/views" +) + +type testStatusGetter struct { + status *ipnstate.Status +} + +func (sg testStatusGetter) getStatus(ctx context.Context) (*ipnstate.Status, error) { + return sg.status, nil +} + +const testTag string = "tag:clusterTag" + +func makeAuthTestPeer(i int, tags views.Slice[string]) *ipnstate.PeerStatus { + return &ipnstate.PeerStatus{ + ID: tailcfg.StableNodeID(fmt.Sprintf("%d", i)), + Tags: &tags, + TailscaleIPs: []netip.Addr{ + netip.AddrFrom4([4]byte{100, 0, 0, byte(i)}), + netip.MustParseAddr(fmt.Sprintf("fd7a:115c:a1e0:0::%d", i)), + }, + } +} + +func makeAuthTestPeers(tags [][]string) []*ipnstate.PeerStatus { + peers := make([]*ipnstate.PeerStatus, len(tags)) + for i, ts := range tags { + peers[i] = makeAuthTestPeer(i, views.SliceOf(ts)) + } + return peers +} + +func authForStatus(s *ipnstate.Status) *authorization { + return &authorization{ + sg: testStatusGetter{ + status: s, + }, + tag: testTag, + } +} + +func authForPeers(self *ipnstate.PeerStatus, peers []*ipnstate.PeerStatus) *authorization { + s := &ipnstate.Status{ + BackendState: ipn.Running.String(), + Self: self, + Peer: map[key.NodePublic]*ipnstate.PeerStatus{}, + } + for _, p := range peers { + s.Peer[key.NewNode().Public()] = p + } + return authForStatus(s) +} + +func TestAuthRefreshErrorsNotRunning(t *testing.T) { + tests := []struct { + in *ipnstate.Status + expected string + }{ + { + in: nil, + expected: "no status", + }, + { + in: &ipnstate.Status{ + BackendState: "NeedsMachineAuth", + }, + expected: "ts Server is not running", + }, + } + + for _, tt := range tests { + t.Run(tt.expected, func(t *testing.T) { + ctx := t.Context() + a := authForStatus(tt.in) + err := a.Refresh(ctx) + if err == nil { + t.Fatalf("expected err to be non-nil") + } + if err.Error() != tt.expected { + t.Fatalf("expected: %s, got: %s", tt.expected, err.Error()) + } + }) + } +} + +func TestAuthUnrefreshed(t *testing.T) { + a := authForStatus(nil) + if a.AllowsHost(netip.MustParseAddr("100.0.0.1")) { + t.Fatalf("never refreshed authorization, allowsHost: expected false, got true") + } + gotAllowedPeers := a.AllowedPeers() + if gotAllowedPeers.Len() != 0 { + t.Fatalf("never refreshed authorization, allowedPeers: expected [], got %v", gotAllowedPeers) + } + if a.SelfAllowed() != false { + t.Fatalf("never refreshed authorization, selfAllowed: expected false got true") + } +} + +func TestAuthAllowsHost(t *testing.T) { + peerTags := [][]string{ + {"woo"}, + nil, + {"woo", testTag}, + {testTag}, + } + peers := makeAuthTestPeers(peerTags) + + tests := []struct { + name string + peerStatus *ipnstate.PeerStatus + expected bool + }{ + { + name: "tagged with different tag", + peerStatus: peers[0], + expected: false, + }, + { + name: "not tagged", + peerStatus: peers[1], + expected: false, + }, + { + name: "tags includes testTag", + peerStatus: peers[2], + expected: true, + }, + { + name: "only tag is testTag", + peerStatus: peers[3], + expected: true, + }, + } + + a := authForPeers(nil, peers) + err := a.Refresh(t.Context()) + if err != nil { + t.Fatal(err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // test we get the expected result for any of the peers TailscaleIPs + for _, addr := range tt.peerStatus.TailscaleIPs { + got := a.AllowsHost(addr) + if got != tt.expected { + t.Fatalf("allowed for peer with tags: %v, expected: %t, got %t", tt.peerStatus.Tags, tt.expected, got) + } + } + }) + } +} + +func TestAuthAllowedPeers(t *testing.T) { + ctx := t.Context() + peerTags := [][]string{ + {"woo"}, + nil, + {"woo", testTag}, + {testTag}, + } + peers := makeAuthTestPeers(peerTags) + a := authForPeers(nil, peers) + err := a.Refresh(ctx) + if err != nil { + t.Fatal(err) + } + ps := a.AllowedPeers() + if ps.Len() != 2 { + t.Fatalf("expected: 2, got: %d", ps.Len()) + } + for _, i := range []int{2, 3} { + if !ps.ContainsFunc(func(p *ipnstate.PeerStatus) bool { + return p.ID == peers[i].ID + }) { + t.Fatalf("expected peers[%d] to be in AllowedPeers because it is tagged with testTag", i) + } + } +} + +func TestAuthSelfAllowed(t *testing.T) { + tests := []struct { + name string + in []string + expected bool + }{ + { + name: "self has different tag", + in: []string{"woo"}, + expected: false, + }, + { + name: "selfs tags include testTag", + in: []string{"woo", testTag}, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + self := makeAuthTestPeer(0, views.SliceOf(tt.in)) + a := authForPeers(self, nil) + err := a.Refresh(ctx) + if err != nil { + t.Fatal(err) + } + got := a.SelfAllowed() + if got != tt.expected { + t.Fatalf("expected: %t, got: %t", tt.expected, got) + } + }) + } +} diff --git a/tsconsensus/http.go b/tsconsensus/http.go new file mode 100644 index 000000000..d2a44015f --- /dev/null +++ b/tsconsensus/http.go @@ -0,0 +1,182 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconsensus + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "time" + + "tailscale.com/util/httpm" +) + +type joinRequest struct { + RemoteHost string + RemoteID string +} + +type commandClient struct { + port uint16 + httpClient *http.Client +} + +func (rac *commandClient) url(host string, path string) string { + return fmt.Sprintf("http://%s:%d%s", host, rac.port, path) +} + +const maxBodyBytes = 1024 * 1024 + +func readAllMaxBytes(r io.Reader) ([]byte, error) { + return io.ReadAll(io.LimitReader(r, maxBodyBytes+1)) +} + +func (rac *commandClient) join(host string, jr joinRequest) error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + rBs, err := json.Marshal(jr) + if err != nil { + return err + } + url := rac.url(host, "/join") + req, err := http.NewRequestWithContext(ctx, httpm.POST, url, bytes.NewReader(rBs)) + if err != nil { + return err + } + resp, err := rac.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + respBs, err := readAllMaxBytes(resp.Body) + if err != nil { + return err + } + return fmt.Errorf("remote responded %d: %s", resp.StatusCode, string(respBs)) + } + return nil +} + +func (rac *commandClient) executeCommand(host string, bs []byte) (CommandResult, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + url := rac.url(host, "/executeCommand") + req, err := http.NewRequestWithContext(ctx, httpm.POST, url, bytes.NewReader(bs)) + if err != nil { + return CommandResult{}, err + } + resp, err := rac.httpClient.Do(req) + if err != nil { + return CommandResult{}, err + } + defer resp.Body.Close() + respBs, err := readAllMaxBytes(resp.Body) + if err != nil { + return CommandResult{}, err + } + if resp.StatusCode != 200 { + return CommandResult{}, fmt.Errorf("remote responded %d: %s", resp.StatusCode, string(respBs)) + } + var cr CommandResult + if err = json.Unmarshal(respBs, &cr); err != nil { + return CommandResult{}, err + } + return cr, nil +} + +type authedHandler struct { + auth *authorization + handler http.Handler +} + +func (h authedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + err := h.auth.Refresh(r.Context()) + if err != nil { + log.Printf("error authedHandler ServeHTTP refresh auth: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + a, err := addrFromServerAddress(r.RemoteAddr) + if err != nil { + log.Printf("error authedHandler ServeHTTP refresh auth: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + allowed := h.auth.AllowsHost(a) + if !allowed { + http.Error(w, "peer not allowed", http.StatusForbidden) + return + } + h.handler.ServeHTTP(w, r) +} + +func (c *Consensus) handleJoinHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + decoder := json.NewDecoder(http.MaxBytesReader(w, r.Body, maxBodyBytes+1)) + var jr joinRequest + err := decoder.Decode(&jr) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + _, err = decoder.Token() + if !errors.Is(err, io.EOF) { + http.Error(w, "Request body must only contain a single JSON object", http.StatusBadRequest) + return + } + if jr.RemoteHost == "" { + http.Error(w, "Required: remoteAddr", http.StatusBadRequest) + return + } + if jr.RemoteID == "" { + http.Error(w, "Required: remoteID", http.StatusBadRequest) + return + } + err = c.handleJoin(jr) + if err != nil { + log.Printf("join handler error: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } +} + +func (c *Consensus) handleExecuteCommandHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + decoder := json.NewDecoder(r.Body) + var cmd Command + err := decoder.Decode(&cmd) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + result, err := c.executeCommandLocally(cmd) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(result); err != nil { + log.Printf("error encoding execute command result: %v", err) + return + } +} + +func (c *Consensus) makeCommandMux() *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("POST /join", c.handleJoinHTTP) + mux.HandleFunc("POST /executeCommand", c.handleExecuteCommandHTTP) + return mux +} + +func (c *Consensus) makeCommandHandler(auth *authorization) http.Handler { + return authedHandler{ + handler: c.makeCommandMux(), + auth: auth, + } +} diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go new file mode 100644 index 000000000..61a5a74a0 --- /dev/null +++ b/tsconsensus/monitor.go @@ -0,0 +1,160 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconsensus + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "slices" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tsnet" + "tailscale.com/util/dnsname" +) + +type status struct { + Status *ipnstate.Status + RaftState string +} + +type monitor struct { + ts *tsnet.Server + con *Consensus + sg statusGetter +} + +func (m *monitor) getStatus(ctx context.Context) (status, error) { + tStatus, err := m.sg.getStatus(ctx) + if err != nil { + return status{}, err + } + return status{Status: tStatus, RaftState: m.con.raft.State().String()}, nil +} + +func serveMonitor(c *Consensus, ts *tsnet.Server, listenAddr string) (*http.Server, error) { + ln, err := ts.Listen("tcp", listenAddr) + if err != nil { + return nil, err + } + m := &monitor{con: c, ts: ts, sg: &tailscaleStatusGetter{ + ts: ts, + }} + mux := http.NewServeMux() + mux.HandleFunc("GET /full", m.handleFullStatus) + mux.HandleFunc("GET /{$}", m.handleSummaryStatus) + mux.HandleFunc("GET /netmap", m.handleNetmap) + mux.HandleFunc("POST /dial", m.handleDial) + srv := &http.Server{Handler: mux} + go func() { + err := srv.Serve(ln) + log.Printf("MonitorHTTP stopped serving with error: %v", err) + }() + return srv, nil +} + +func (m *monitor) handleFullStatus(w http.ResponseWriter, r *http.Request) { + s, err := m.getStatus(r.Context()) + if err != nil { + log.Printf("monitor: error getStatus: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(s); err != nil { + log.Printf("monitor: error encoding full status: %v", err) + return + } +} + +func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { + s, err := m.getStatus(r.Context()) + if err != nil { + log.Printf("monitor: error getStatus: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + lines := []string{} + for _, p := range s.Status.Peer { + if p.Online { + name := dnsname.FirstLabel(p.DNSName) + lines = append(lines, fmt.Sprintf("%s\t\t%d\t%d\t%t", name, p.RxBytes, p.TxBytes, p.Active)) + } + } + _, err = w.Write([]byte(fmt.Sprintf("RaftState: %s\n", s.RaftState))) + if err != nil { + log.Printf("monitor: error writing status: %v", err) + return + } + + slices.Sort(lines) + for _, l := range lines { + _, err = w.Write([]byte(fmt.Sprintf("%s\n", l))) + if err != nil { + log.Printf("monitor: error writing status: %v", err) + return + } + } +} + +func (m *monitor) handleNetmap(w http.ResponseWriter, r *http.Request) { + var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap + mask |= ipn.NotifyNoPrivateKeys + lc, err := m.ts.LocalClient() + if err != nil { + log.Printf("monitor: error LocalClient: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + watcher, err := lc.WatchIPNBus(r.Context(), mask) + if err != nil { + log.Printf("monitor: error WatchIPNBus: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + defer watcher.Close() + + n, err := watcher.Next() + if err != nil { + log.Printf("monitor: error watcher.Next: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + encoder := json.NewEncoder(w) + encoder.SetIndent("", "\t") + if err := encoder.Encode(n); err != nil { + log.Printf("monitor: error encoding netmap: %v", err) + return + } +} + +func (m *monitor) handleDial(w http.ResponseWriter, r *http.Request) { + var dialParams struct { + Addr string + } + defer r.Body.Close() + bs, err := io.ReadAll(http.MaxBytesReader(w, r.Body, maxBodyBytes)) + if err != nil { + log.Printf("monitor: error reading body: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + err = json.Unmarshal(bs, &dialParams) + if err != nil { + log.Printf("monitor: error unmarshalling json: %v", err) + http.Error(w, "", http.StatusBadRequest) + return + } + c, err := m.ts.Dial(r.Context(), "tcp", dialParams.Addr) + if err != nil { + log.Printf("monitor: error dialing: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + c.Close() + w.Write([]byte("ok\n")) +} diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go new file mode 100644 index 000000000..74094782f --- /dev/null +++ b/tsconsensus/tsconsensus.go @@ -0,0 +1,447 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package tsconsensus implements a consensus algorithm for a group of tsnet.Servers +// +// The Raft consensus algorithm relies on you implementing a state machine that will give the same +// result to a given command as long as the same logs have been applied in the same order. +// +// tsconsensus uses the hashicorp/raft library to implement leader elections and log application. +// +// tsconsensus provides: +// - cluster peer discovery based on tailscale tags +// - executing a command on the leader +// - communication between cluster peers over tailscale using tsnet +// +// Users implement a state machine that satisfies the raft.FSM interface, with the business logic they desire. +// When changes to state are needed any node may +// - create a Command instance with serialized Args. +// - call ExecuteCommand with the Command instance +// this will propagate the command to the leader, +// and then from the reader to every node via raft. +// - the state machine then can implement raft.Apply, and dispatch commands via the Command.Name +// returning a CommandResult with an Err or a serialized Result. +package tsconsensus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net" + "net/http" + "net/netip" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tsnet" + "tailscale.com/types/views" +) + +func raftAddr(host netip.Addr, cfg Config) string { + return netip.AddrPortFrom(host, cfg.RaftPort).String() +} + +func addrFromServerAddress(sa string) (netip.Addr, error) { + addrPort, err := netip.ParseAddrPort(sa) + if err != nil { + return netip.Addr{}, err + } + return addrPort.Addr(), nil +} + +// A selfRaftNode is the info we need to talk to hashicorp/raft about our node. +// We specify the ID and Addr on Consensus Start, and then use it later for raft +// operations such as BootstrapCluster and AddVoter. +type selfRaftNode struct { + id string + hostAddr netip.Addr +} + +// A Config holds configurable values such as ports and timeouts. +// Use DefaultConfig to get a useful Config. +type Config struct { + CommandPort uint16 + RaftPort uint16 + MonitorPort uint16 + Raft *raft.Config + MaxConnPool int + ConnTimeout time.Duration + ServeDebugMonitor bool +} + +// DefaultConfig returns a Config populated with default values ready for use. +func DefaultConfig() Config { + raftConfig := raft.DefaultConfig() + // these values are 2x the raft DefaultConfig + raftConfig.HeartbeatTimeout = 2000 * time.Millisecond + raftConfig.ElectionTimeout = 2000 * time.Millisecond + raftConfig.LeaderLeaseTimeout = 1000 * time.Millisecond + + return Config{ + CommandPort: 6271, + RaftPort: 6270, + MonitorPort: 8081, + Raft: raftConfig, + MaxConnPool: 5, + ConnTimeout: 5 * time.Second, + } +} + +// StreamLayer implements an interface asked for by raft.NetworkTransport. +// It does the raft interprocess communication via tailscale. +type StreamLayer struct { + net.Listener + s *tsnet.Server + auth *authorization + shutdownCtx context.Context +} + +// Dial implements the raft.StreamLayer interface with the tsnet.Server's Dial. +func (sl StreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(sl.shutdownCtx, timeout) + defer cancel() + authorized, err := sl.addrAuthorized(ctx, string(address)) + if err != nil { + return nil, err + } + if !authorized { + return nil, errors.New("dial: peer is not allowed") + } + return sl.s.Dial(ctx, "tcp", string(address)) +} + +func (sl StreamLayer) addrAuthorized(ctx context.Context, address string) (bool, error) { + addr, err := addrFromServerAddress(address) + if err != nil { + // bad RemoteAddr is not authorized + return false, nil + } + err = sl.auth.Refresh(ctx) + if err != nil { + // might be authorized, we couldn't tell + return false, err + } + return sl.auth.AllowsHost(addr), nil +} + +func (sl StreamLayer) Accept() (net.Conn, error) { + ctx, cancel := context.WithCancel(sl.shutdownCtx) + defer cancel() + for { + conn, err := sl.Listener.Accept() + if err != nil || conn == nil { + return conn, err + } + addr := conn.RemoteAddr() + if addr == nil { + conn.Close() + return nil, errors.New("conn has no remote addr") + } + authorized, err := sl.addrAuthorized(ctx, addr.String()) + if err != nil { + conn.Close() + return nil, err + } + if !authorized { + log.Printf("StreamLayer accept: unauthorized: %s", addr) + conn.Close() + continue + } + return conn, err + } +} + +// Start returns a pointer to a running Consensus instance. +// Calling it with a *tsnet.Server will cause that server to join or start a consensus cluster +// with other nodes on the tailnet tagged with the clusterTag. The *tsnet.Server will run the state +// machine defined by the raft.FSM also provided, and keep it in sync with the other cluster members' +// state machines using Raft. +func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag string, cfg Config) (*Consensus, error) { + if clusterTag == "" { + return nil, errors.New("cluster tag must be provided") + } + + cc := commandClient{ + port: cfg.CommandPort, + httpClient: ts.HTTPClient(), + } + v4, _ := ts.TailscaleIPs() + // TODO(fran) support tailnets that have ipv4 disabled + self := selfRaftNode{ + id: v4.String(), + hostAddr: v4, + } + shutdownCtx, shutdownCtxCancel := context.WithCancel(ctx) + c := Consensus{ + commandClient: &cc, + self: self, + config: cfg, + shutdownCtxCancel: shutdownCtxCancel, + } + + auth := newAuthorization(ts, clusterTag) + err := auth.Refresh(shutdownCtx) + if err != nil { + return nil, fmt.Errorf("auth refresh: %w", err) + } + if !auth.SelfAllowed() { + return nil, errors.New("this node is not tagged with the cluster tag") + } + + srv, err := c.serveCommandHTTP(ts, auth) + if err != nil { + return nil, err + } + c.cmdHttpServer = srv + + // after startRaft it's possible some other raft node that has us in their configuration will get + // in contact, so by the time we do anything else we may already be a functioning member + // of a consensus + r, err := startRaft(shutdownCtx, ts, &fsm, c.self, auth, cfg) + if err != nil { + return nil, err + } + c.raft = r + + c.bootstrap(auth.AllowedPeers()) + + if cfg.ServeDebugMonitor { + srv, err = serveMonitor(&c, ts, netip.AddrPortFrom(c.self.hostAddr, cfg.MonitorPort).String()) + if err != nil { + return nil, err + } + c.monitorHttpServer = srv + } + + return &c, nil +} + +func startRaft(shutdownCtx context.Context, ts *tsnet.Server, fsm *raft.FSM, self selfRaftNode, auth *authorization, cfg Config) (*raft.Raft, error) { + cfg.Raft.LocalID = raft.ServerID(self.id) + + // no persistence (for now?) + logStore := raft.NewInmemStore() + stableStore := raft.NewInmemStore() + snapshots := raft.NewInmemSnapshotStore() + + // opens the listener on the raft port, raft will close it when it thinks it's appropriate + ln, err := ts.Listen("tcp", raftAddr(self.hostAddr, cfg)) + if err != nil { + return nil, err + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft-net", + Output: cfg.Raft.LogOutput, + Level: hclog.LevelFromString(cfg.Raft.LogLevel), + }) + + transport := raft.NewNetworkTransportWithLogger(StreamLayer{ + s: ts, + Listener: ln, + auth: auth, + shutdownCtx: shutdownCtx, + }, + cfg.MaxConnPool, + cfg.ConnTimeout, + logger) + + return raft.NewRaft(cfg.Raft, *fsm, logStore, stableStore, snapshots, transport) +} + +// A Consensus is the consensus algorithm for a tsnet.Server +// It wraps a raft.Raft instance and performs the peer discovery +// and command execution on the leader. +type Consensus struct { + raft *raft.Raft + commandClient *commandClient + self selfRaftNode + config Config + cmdHttpServer *http.Server + monitorHttpServer *http.Server + shutdownCtxCancel context.CancelFunc +} + +// bootstrap tries to join a raft cluster, or start one. +// +// We need to do the very first raft cluster configuration, but after that raft manages it. +// bootstrap is called at start up, and we are not currently aware of what the cluster config might be, +// our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and +// if unsuccessful, assume we are the first and start our own. +// +// It's possible for bootstrap to return an error, or start a errant breakaway cluster. +// +// We have a list of expected cluster members already from control (the members of the tailnet with the tag) +// so we could do the initial configuration with all servers specified. +// Choose to start with just this machine in the raft configuration instead, as: +// - We want to handle machines joining after start anyway. +// - Not all tagged nodes tailscale believes are active are necessarily actually responsive right now, +// so let each node opt in when able. +func (c *Consensus) bootstrap(targets views.Slice[*ipnstate.PeerStatus]) error { + log.Printf("Trying to find cluster: num targets to try: %d", targets.Len()) + for _, p := range targets.All() { + if !p.Online { + log.Printf("Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) + continue + } + log.Printf("Trying to find cluster: trying %s", p.TailscaleIPs[0]) + err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ + RemoteHost: c.self.hostAddr.String(), + RemoteID: c.self.id, + }) + if err != nil { + log.Printf("Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) + continue + } + log.Printf("Trying to find cluster: joined %s", p.TailscaleIPs[0]) + return nil + } + + log.Printf("Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) + f := c.raft.BootstrapCluster( + raft.Configuration{ + Servers: []raft.Server{ + { + ID: raft.ServerID(c.self.id), + Address: raft.ServerAddress(c.raftAddr(c.self.hostAddr)), + }, + }, + }) + return f.Error() +} + +// ExecuteCommand propagates a Command to be executed on the leader. Which +// uses raft to Apply it to the followers. +func (c *Consensus) ExecuteCommand(cmd Command) (CommandResult, error) { + b, err := json.Marshal(cmd) + if err != nil { + return CommandResult{}, err + } + result, err := c.executeCommandLocally(cmd) + var leErr lookElsewhereError + for errors.As(err, &leErr) { + result, err = c.commandClient.executeCommand(leErr.where, b) + } + return result, err +} + +// Stop attempts to gracefully shutdown various components. +func (c *Consensus) Stop(ctx context.Context) error { + fut := c.raft.Shutdown() + err := fut.Error() + if err != nil { + log.Printf("Stop: Error in Raft Shutdown: %v", err) + } + c.shutdownCtxCancel() + err = c.cmdHttpServer.Shutdown(ctx) + if err != nil { + log.Printf("Stop: Error in command HTTP Shutdown: %v", err) + } + if c.monitorHttpServer != nil { + err = c.monitorHttpServer.Shutdown(ctx) + if err != nil { + log.Printf("Stop: Error in monitor HTTP Shutdown: %v", err) + } + } + return nil +} + +// A Command is a representation of a state machine action. +type Command struct { + // The Name can be used to dispatch the command when received. + Name string + // The Args are serialized for transport. + Args json.RawMessage +} + +// A CommandResult is a representation of the result of a state +// machine action. +type CommandResult struct { + // Err is any error that occurred on the node that tried to execute the command, + // including any error from the underlying operation and deserialization problems etc. + Err error + // Result is serialized for transport. + Result json.RawMessage +} + +type lookElsewhereError struct { + where string +} + +func (e lookElsewhereError) Error() string { + return fmt.Sprintf("not the leader, try: %s", e.where) +} + +var errLeaderUnknown = errors.New("leader unknown") + +func (c *Consensus) serveCommandHTTP(ts *tsnet.Server, auth *authorization) (*http.Server, error) { + ln, err := ts.Listen("tcp", c.commandAddr(c.self.hostAddr)) + if err != nil { + return nil, err + } + srv := &http.Server{Handler: c.makeCommandHandler(auth)} + go func() { + err := srv.Serve(ln) + log.Printf("CmdHttp stopped serving with err: %v", err) + }() + return srv, nil +} + +func (c *Consensus) getLeader() (string, error) { + raftLeaderAddr, _ := c.raft.LeaderWithID() + leaderAddr := (string)(raftLeaderAddr) + if leaderAddr == "" { + // Raft doesn't know who the leader is. + return "", errLeaderUnknown + } + // Raft gives us the address with the raft port, we don't always want that. + host, _, err := net.SplitHostPort(leaderAddr) + return host, err +} + +func (c *Consensus) executeCommandLocally(cmd Command) (CommandResult, error) { + b, err := json.Marshal(cmd) + if err != nil { + return CommandResult{}, err + } + f := c.raft.Apply(b, 0) + err = f.Error() + result := f.Response() + if errors.Is(err, raft.ErrNotLeader) { + leader, err := c.getLeader() + if err != nil { + // we know we're not leader but we were unable to give the address of the leader + return CommandResult{}, err + } + return CommandResult{}, lookElsewhereError{where: leader} + } + if result == nil { + result = CommandResult{} + } + return result.(CommandResult), err +} + +func (c *Consensus) handleJoin(jr joinRequest) error { + addr, err := netip.ParseAddr(jr.RemoteHost) + if err != nil { + return err + } + remoteAddr := c.raftAddr(addr) + f := c.raft.AddVoter(raft.ServerID(jr.RemoteID), raft.ServerAddress(remoteAddr), 0, 0) + if f.Error() != nil { + return f.Error() + } + return nil +} + +func (c *Consensus) raftAddr(host netip.Addr) string { + return raftAddr(host, c.config) +} + +func (c *Consensus) commandAddr(host netip.Addr) string { + return netip.AddrPortFrom(host, c.config.CommandPort).String() +} diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go new file mode 100644 index 000000000..37ccdcc84 --- /dev/null +++ b/tsconsensus/tsconsensus_test.go @@ -0,0 +1,738 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconsensus + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "net/netip" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + "tailscale.com/client/tailscale" + "tailscale.com/ipn/store/mem" + "tailscale.com/net/netns" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" + "tailscale.com/tstest/integration" + "tailscale.com/tstest/integration/testcontrol" + "tailscale.com/tstest/nettest" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/views" + "tailscale.com/util/racebuild" +) + +type fsm struct { + mu sync.Mutex + applyEvents []string +} + +func commandWith(t *testing.T, s string) []byte { + jsonArgs, err := json.Marshal(s) + if err != nil { + t.Fatal(err) + } + bs, err := json.Marshal(Command{ + Args: jsonArgs, + }) + if err != nil { + t.Fatal(err) + } + return bs +} + +func fromCommand(bs []byte) (string, error) { + var cmd Command + err := json.Unmarshal(bs, &cmd) + if err != nil { + return "", err + } + var args string + err = json.Unmarshal(cmd.Args, &args) + if err != nil { + return "", err + } + return args, nil +} + +func (f *fsm) Apply(l *raft.Log) any { + f.mu.Lock() + defer f.mu.Unlock() + s, err := fromCommand(l.Data) + if err != nil { + return CommandResult{ + Err: err, + } + } + f.applyEvents = append(f.applyEvents, s) + result, err := json.Marshal(len(f.applyEvents)) + if err != nil { + panic("should be able to Marshal that?") + } + return CommandResult{ + Result: result, + } +} + +func (f *fsm) numEvents() int { + f.mu.Lock() + defer f.mu.Unlock() + return len(f.applyEvents) +} + +func (f *fsm) eventsMatch(es []string) bool { + f.mu.Lock() + defer f.mu.Unlock() + return cmp.Equal(es, f.applyEvents) +} + +func (f *fsm) Snapshot() (raft.FSMSnapshot, error) { + return nil, nil +} + +func (f *fsm) Restore(rc io.ReadCloser) error { + return nil +} + +func testConfig(t *testing.T) { + // -race AND Parallel makes things start to take too long. + if !racebuild.On { + t.Parallel() + } + nettest.SkipIfNoNetwork(t) +} + +func startControl(t testing.TB) (control *testcontrol.Server, controlURL string) { + t.Helper() + // tailscale/corp#4520: don't use netns for tests. + netns.SetEnabled(false) + t.Cleanup(func() { + netns.SetEnabled(true) + }) + + derpLogf := logger.Discard + derpMap := integration.RunDERPAndSTUN(t, derpLogf, "127.0.0.1") + control = &testcontrol.Server{ + DERPMap: derpMap, + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + MagicDNSDomain: "tail-scale.ts.net", + } + control.HTTPTestServer = httptest.NewUnstartedServer(control) + control.HTTPTestServer.Start() + t.Cleanup(control.HTTPTestServer.Close) + controlURL = control.HTTPTestServer.URL + t.Logf("testcontrol listening on %s", controlURL) + return control, controlURL +} + +func startNode(t testing.TB, ctx context.Context, controlURL, hostname string) (*tsnet.Server, key.NodePublic, netip.Addr) { + t.Helper() + + tmp := filepath.Join(t.TempDir(), hostname) + os.MkdirAll(tmp, 0755) + s := &tsnet.Server{ + Dir: tmp, + ControlURL: controlURL, + Hostname: hostname, + Store: new(mem.Store), + Ephemeral: true, + } + t.Cleanup(func() { s.Close() }) + + status, err := s.Up(ctx) + if err != nil { + t.Fatal(err) + } + return s, status.Self.PublicKey, status.TailscaleIPs[0] +} + +func waitForNodesToBeTaggedInStatus(t testing.TB, ctx context.Context, ts *tsnet.Server, nodeKeys []key.NodePublic, tag string) { + t.Helper() + waitFor(t, "nodes tagged in status", func() bool { + lc, err := ts.LocalClient() + if err != nil { + t.Fatal(err) + } + status, err := lc.Status(ctx) + if err != nil { + t.Fatalf("error getting status: %v", err) + } + for _, k := range nodeKeys { + var tags *views.Slice[string] + if k == status.Self.PublicKey { + tags = status.Self.Tags + } else { + tags = status.Peer[k].Tags + } + if tag == "" { + if tags != nil && tags.Len() != 0 { + return false + } + } else { + if tags == nil { + return false + } + if tags.Len() != 1 || tags.At(0) != tag { + return false + } + } + } + return true + }, 2*time.Second) +} + +func tagNodes(t testing.TB, control *testcontrol.Server, nodeKeys []key.NodePublic, tag string) { + t.Helper() + for _, key := range nodeKeys { + n := control.Node(key) + if tag == "" { + if len(n.Tags) != 1 { + t.Fatalf("expected tags to have one tag") + } + n.Tags = nil + } else { + if len(n.Tags) != 0 { + // if we want this to work with multiple tags we'll have to change the logic + // for checking if a tag got removed yet. + t.Fatalf("expected tags to be empty") + } + n.Tags = append(n.Tags, tag) + } + b := true + n.Online = &b + control.UpdateNode(n) + } +} + +func addIDedLogger(id string, c Config) Config { + // logs that identify themselves + c.Raft.Logger = hclog.New(&hclog.LoggerOptions{ + Name: fmt.Sprintf("raft: %s", id), + Output: c.Raft.LogOutput, + Level: hclog.LevelFromString(c.Raft.LogLevel), + }) + return c +} + +func warnLogConfig() Config { + c := DefaultConfig() + // fewer logs from raft + c.Raft.LogLevel = "WARN" + // timeouts long enough that we can form a cluster under -race + c.Raft.LeaderLeaseTimeout = 2 * time.Second + c.Raft.HeartbeatTimeout = 4 * time.Second + c.Raft.ElectionTimeout = 4 * time.Second + return c +} + +func TestStart(t *testing.T) { + testConfig(t) + control, controlURL := startControl(t) + ctx := context.Background() + one, k, _ := startNode(t, ctx, controlURL, "one") + + clusterTag := "tag:whatever" + // nodes must be tagged with the cluster tag, to find each other + tagNodes(t, control, []key.NodePublic{k}, clusterTag) + waitForNodesToBeTaggedInStatus(t, ctx, one, []key.NodePublic{k}, clusterTag) + + sm := &fsm{} + r, err := Start(ctx, one, sm, clusterTag, warnLogConfig()) + if err != nil { + t.Fatal(err) + } + defer r.Stop(ctx) +} + +func waitFor(t testing.TB, msg string, condition func() bool, waitBetweenTries time.Duration) { + t.Helper() + try := 0 + for true { + try++ + done := condition() + if done { + t.Logf("waitFor success: %s: after %d tries", msg, try) + return + } + time.Sleep(waitBetweenTries) + } +} + +type participant struct { + c *Consensus + sm *fsm + ts *tsnet.Server + key key.NodePublic +} + +// starts and tags the *tsnet.Server nodes with the control, waits for the nodes to make successful +// LocalClient Status calls that show the first node as Online. +func startNodesAndWaitForPeerStatus(t testing.TB, ctx context.Context, clusterTag string, nNodes int) ([]*participant, *testcontrol.Server, string) { + t.Helper() + ps := make([]*participant, nNodes) + keysToTag := make([]key.NodePublic, nNodes) + localClients := make([]*tailscale.LocalClient, nNodes) + control, controlURL := startControl(t) + for i := 0; i < nNodes; i++ { + ts, key, _ := startNode(t, ctx, controlURL, fmt.Sprintf("node %d", i)) + ps[i] = &participant{ts: ts, key: key} + keysToTag[i] = key + lc, err := ts.LocalClient() + if err != nil { + t.Fatalf("%d: error getting local client: %v", i, err) + } + localClients[i] = lc + } + tagNodes(t, control, keysToTag, clusterTag) + waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, keysToTag, clusterTag) + fxCameOnline := func() bool { + // all the _other_ nodes see the first as online + for i := 1; i < nNodes; i++ { + status, err := localClients[i].Status(ctx) + if err != nil { + t.Fatalf("%d: error getting status: %v", i, err) + } + if !status.Peer[ps[0].key].Online { + return false + } + } + return true + } + waitFor(t, "other nodes see node 1 online in ts status", fxCameOnline, 2*time.Second) + return ps, control, controlURL +} + +// populates participants with their consensus fields, waits for all nodes to show all nodes +// as part of the same consensus cluster. Starts the first participant first and waits for it to +// become leader before adding other nodes. +func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string, participants []*participant, cfg Config) { + t.Helper() + participants[0].sm = &fsm{} + myCfg := addIDedLogger("0", cfg) + first, err := Start(ctx, participants[0].ts, participants[0].sm, clusterTag, myCfg) + if err != nil { + t.Fatal(err) + } + fxFirstIsLeader := func() bool { + return first.raft.State() == raft.Leader + } + waitFor(t, "node 0 is leader", fxFirstIsLeader, 2*time.Second) + participants[0].c = first + + for i := 1; i < len(participants); i++ { + participants[i].sm = &fsm{} + myCfg := addIDedLogger(fmt.Sprintf("%d", i), cfg) + c, err := Start(ctx, participants[i].ts, participants[i].sm, clusterTag, myCfg) + if err != nil { + t.Fatal(err) + } + participants[i].c = c + } + + fxRaftConfigContainsAll := func() bool { + for i := 0; i < len(participants); i++ { + fut := participants[i].c.raft.GetConfiguration() + err = fut.Error() + if err != nil { + t.Fatalf("%d: Getting Configuration errored: %v", i, err) + } + if len(fut.Configuration().Servers) != len(participants) { + return false + } + } + return true + } + waitFor(t, "all raft machines have all servers in their config", fxRaftConfigContainsAll, time.Second*2) +} + +func TestApply(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 2) + cfg := warnLogConfig() + createConsensusCluster(t, ctx, clusterTag, ps, cfg) + for _, p := range ps { + defer p.c.Stop(ctx) + } + + fut := ps[0].c.raft.Apply(commandWith(t, "woo"), 2*time.Second) + err := fut.Error() + if err != nil { + t.Fatalf("Raft Apply Error: %v", err) + } + + want := []string{"woo"} + fxBothMachinesHaveTheApply := func() bool { + return ps[0].sm.eventsMatch(want) && ps[1].sm.eventsMatch(want) + } + waitFor(t, "the apply event made it into both state machines", fxBothMachinesHaveTheApply, time.Second*1) +} + +// calls ExecuteCommand on each participant and checks that all participants get all commands +func assertCommandsWorkOnAnyNode(t testing.TB, participants []*participant) { + t.Helper() + want := []string{} + for i, p := range participants { + si := fmt.Sprintf("%d", i) + want = append(want, si) + bs, err := json.Marshal(si) + if err != nil { + t.Fatal(err) + } + res, err := p.c.ExecuteCommand(Command{Args: bs}) + if err != nil { + t.Fatalf("%d: Error ExecuteCommand: %v", i, err) + } + if res.Err != nil { + t.Fatalf("%d: Result Error ExecuteCommand: %v", i, res.Err) + } + var retVal int + err = json.Unmarshal(res.Result, &retVal) + if err != nil { + t.Fatal(err) + } + // the test implementation of the fsm returns the count of events that have been received + if retVal != i+1 { + t.Fatalf("Result, want %d, got %d", i+1, retVal) + } + + fxEventsInAll := func() bool { + for _, pOther := range participants { + if !pOther.sm.eventsMatch(want) { + return false + } + } + return true + } + waitFor(t, "event makes it to all", fxEventsInAll, time.Second*1) + } +} + +func TestConfig(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + // test all is well with non default ports + cfg.CommandPort = 12347 + cfg.RaftPort = 11882 + mp := uint16(8798) + cfg.MonitorPort = mp + cfg.ServeDebugMonitor = true + createConsensusCluster(t, ctx, clusterTag, ps, cfg) + for _, p := range ps { + defer p.c.Stop(ctx) + } + assertCommandsWorkOnAnyNode(t, ps) + + url := fmt.Sprintf("http://%s:%d/", ps[0].c.self.hostAddr.String(), mp) + httpClientOnTailnet := ps[1].ts.HTTPClient() + rsp, err := httpClientOnTailnet.Get(url) + if err != nil { + t.Fatal(err) + } + if rsp.StatusCode != 200 { + t.Fatalf("monitor status want %d, got %d", 200, rsp.StatusCode) + } + defer rsp.Body.Close() + reader := bufio.NewReader(rsp.Body) + line1, err := reader.ReadString('\n') + if err != nil { + t.Fatal(err) + } + // Not a great assertion because it relies on the format of the response. + if !strings.HasPrefix(line1, "RaftState:") { + t.Fatalf("getting monitor status, first line, want something that starts with 'RaftState:', got '%s'", line1) + } +} + +func TestFollowerFailover(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + createConsensusCluster(t, ctx, clusterTag, ps, cfg) + for _, p := range ps { + defer p.c.Stop(ctx) + } + + smThree := ps[2].sm + + fut := ps[0].c.raft.Apply(commandWith(t, "a"), 2*time.Second) + futTwo := ps[0].c.raft.Apply(commandWith(t, "b"), 2*time.Second) + err := fut.Error() + if err != nil { + t.Fatalf("Apply Raft error %v", err) + } + err = futTwo.Error() + if err != nil { + t.Fatalf("Apply Raft error %v", err) + } + + wantFirstTwoEvents := []string{"a", "b"} + fxAllMachinesHaveTheApplies := func() bool { + return ps[0].sm.eventsMatch(wantFirstTwoEvents) && + ps[1].sm.eventsMatch(wantFirstTwoEvents) && + smThree.eventsMatch(wantFirstTwoEvents) + } + waitFor(t, "the apply events made it into all state machines", fxAllMachinesHaveTheApplies, time.Second*1) + + //a follower goes loses contact with the cluster + ps[2].c.Stop(ctx) + + // applies still make it to one and two + futThree := ps[0].c.raft.Apply(commandWith(t, "c"), 2*time.Second) + futFour := ps[0].c.raft.Apply(commandWith(t, "d"), 2*time.Second) + err = futThree.Error() + if err != nil { + t.Fatalf("Apply Raft error %v", err) + } + err = futFour.Error() + if err != nil { + t.Fatalf("Apply Raft error %v", err) + } + wantFourEvents := []string{"a", "b", "c", "d"} + fxAliveMachinesHaveTheApplies := func() bool { + return ps[0].sm.eventsMatch(wantFourEvents) && + ps[1].sm.eventsMatch(wantFourEvents) && + smThree.eventsMatch(wantFirstTwoEvents) + } + waitFor(t, "the apply events made it into eligible state machines", fxAliveMachinesHaveTheApplies, time.Second*1) + + // follower comes back + smThreeAgain := &fsm{} + cfg = addIDedLogger("2 after restarting", warnLogConfig()) + rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, clusterTag, cfg) + if err != nil { + t.Fatal(err) + } + defer rThreeAgain.Stop(ctx) + fxThreeGetsCaughtUp := func() bool { + return smThreeAgain.eventsMatch(wantFourEvents) + } + waitFor(t, "the apply events made it into the third node when it appeared with an empty state machine", fxThreeGetsCaughtUp, time.Second*2) + if !smThree.eventsMatch(wantFirstTwoEvents) { + t.Fatalf("Expected smThree to remain on 2 events: got %d", smThree.numEvents()) + } +} + +func TestRejoin(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, control, controlURL := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + createConsensusCluster(t, ctx, clusterTag, ps, cfg) + for _, p := range ps { + defer p.c.Stop(ctx) + } + + // 1st node gets a redundant second join request from the second node + ps[0].c.handleJoin(joinRequest{ + RemoteHost: ps[1].c.self.hostAddr.String(), + RemoteID: ps[1].c.self.id, + }) + + tsJoiner, keyJoiner, _ := startNode(t, ctx, controlURL, "node joiner") + tagNodes(t, control, []key.NodePublic{keyJoiner}, clusterTag) + waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, []key.NodePublic{keyJoiner}, clusterTag) + smJoiner := &fsm{} + cJoiner, err := Start(ctx, tsJoiner, smJoiner, clusterTag, cfg) + if err != nil { + t.Fatal(err) + } + ps = append(ps, &participant{ + sm: smJoiner, + c: cJoiner, + ts: tsJoiner, + key: keyJoiner, + }) + + assertCommandsWorkOnAnyNode(t, ps) +} + +func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, control, controlURL := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + createConsensusCluster(t, ctx, clusterTag, ps, cfg) + for _, p := range ps { + defer p.c.Stop(ctx) + } + assertCommandsWorkOnAnyNode(t, ps) + + untaggedNode, _, _ := startNode(t, ctx, controlURL, "untagged node") + + taggedNode, taggedKey, _ := startNode(t, ctx, controlURL, "untagged node") + tagNodes(t, control, []key.NodePublic{taggedKey}, clusterTag) + waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, []key.NodePublic{taggedKey}, clusterTag) + + // surface area: command http, peer tcp + //untagged + ipv4, _ := ps[0].ts.TailscaleIPs() + sAddr := fmt.Sprintf("%s:%d", ipv4, cfg.RaftPort) + + getErrorFromTryingToSend := func(s *tsnet.Server) error { + ctx := context.Background() + conn, err := s.Dial(ctx, "tcp", sAddr) + if err != nil { + t.Fatalf("unexpected Dial err: %v", err) + } + fmt.Fprintf(conn, "hellllllloooooo") + status, err := bufio.NewReader(conn).ReadString('\n') + if status != "" { + t.Fatalf("node sending non-raft message should get empty response, got: '%s' for: %s", status, s.Hostname) + } + if err == nil { + t.Fatalf("node sending non-raft message should get an error but got nil err for: %s", s.Hostname) + } + return err + } + + isNetErr := func(err error) bool { + var netErr net.Error + return errors.As(err, &netErr) + } + + err := getErrorFromTryingToSend(untaggedNode) + if !isNetErr(err) { + t.Fatalf("untagged node trying to send should get a net.Error, got: %v", err) + } + // we still get an error trying to send but it's EOF the target node was happy to talk + // to us but couldn't understand what we said. + err = getErrorFromTryingToSend(taggedNode) + if isNetErr(err) { + t.Fatalf("tagged node trying to send should not get a net.Error, got: %v", err) + } +} + +func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, control, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + + // make a StreamLayer for ps[0] + ts := ps[0].ts + auth := newAuthorization(ts, clusterTag) + + port := 19841 + lns := make([]net.Listener, 3) + for i, p := range ps { + ln, err := p.ts.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + t.Fatal(err) + } + lns[i] = ln + } + + sl := StreamLayer{ + s: ts, + Listener: lns[0], + auth: auth, + shutdownCtx: ctx, + } + + ip1, _ := ps[1].ts.TailscaleIPs() + a1 := raft.ServerAddress(fmt.Sprintf("%s:%d", ip1, port)) + + ip2, _ := ps[2].ts.TailscaleIPs() + a2 := raft.ServerAddress(fmt.Sprintf("%s:%d", ip2, port)) + + // both can be dialed... + conn, err := sl.Dial(a1, 2*time.Second) + if err != nil { + t.Fatal(err) + } + conn.Close() + + conn, err = sl.Dial(a2, 2*time.Second) + if err != nil { + t.Fatal(err) + } + conn.Close() + + // untag ps[2] + tagNodes(t, control, []key.NodePublic{ps[2].key}, "") + waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, []key.NodePublic{ps[2].key}, "") + + // now only ps[1] can be dialed + conn, err = sl.Dial(a1, 2*time.Second) + if err != nil { + t.Fatal(err) + } + conn.Close() + + _, err = sl.Dial(a2, 2*time.Second) + if err.Error() != "dial: peer is not allowed" { + t.Fatalf("expected dial: peer is not allowed, got: %v", err) + } + +} + +func TestOnlyTaggedPeersCanJoin(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, controlURL := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + createConsensusCluster(t, ctx, clusterTag, ps, cfg) + for _, p := range ps { + defer p.c.Stop(ctx) + } + + tsJoiner, _, _ := startNode(t, ctx, controlURL, "joiner node") + + ipv4, _ := tsJoiner.TailscaleIPs() + url := fmt.Sprintf("http://%s/join", ps[0].c.commandAddr(ps[0].c.self.hostAddr)) + payload, err := json.Marshal(joinRequest{ + RemoteHost: ipv4.String(), + RemoteID: "node joiner", + }) + if err != nil { + t.Fatal(err) + } + body := bytes.NewBuffer(payload) + req, err := http.NewRequest("POST", url, body) + if err != nil { + t.Fatal(err) + } + resp, err := tsJoiner.HTTPClient().Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("join req when not tagged, expected status: %d, got: %d", http.StatusForbidden, resp.StatusCode) + } + rBody, _ := io.ReadAll(resp.Body) + sBody := strings.TrimSpace(string(rBody)) + expected := "peer not allowed" + if sBody != expected { + t.Fatalf("join req when not tagged, expected body: %s, got: %s", expected, sBody) + } +} From 79ff067db31aaa7c02a410e75800fe8929998874 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 12:34:18 -0700 Subject: [PATCH 0715/1708] cmd/tailscale/cli: prevent all dup flags, not just strings The earlier #15534 prevent some dup string flags. This does it for all flag types. Updates #6813 Change-Id: Iec2871448394ea9a5b604310bdbf7b499434bf01 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 36 ++++++++++ cmd/tailscale/cli/cli_test.go | 120 +++++++++++++++------------------- cmd/tailscale/cli/set.go | 8 +-- cmd/tailscale/cli/set_test.go | 2 +- cmd/tailscale/cli/up.go | 35 +++------- 5 files changed, 101 insertions(+), 100 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 2a532f9d7..d02db38f1 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -165,6 +165,41 @@ func Run(args []string) (err error) { return err } +type onceFlagValue struct { + flag.Value + set bool +} + +func (v *onceFlagValue) Set(s string) error { + if v.set { + return fmt.Errorf("flag provided multiple times") + } + v.set = true + return v.Value.Set(s) +} + +func (v *onceFlagValue) IsBoolFlag() bool { + type boolFlag interface { + IsBoolFlag() bool + } + bf, ok := v.Value.(boolFlag) + return ok && bf.IsBoolFlag() +} + +// noDupFlagify modifies c recursively to make all the +// flag values be wrappers that permit setting the value +// at most once. +func noDupFlagify(c *ffcli.Command) { + if c.FlagSet != nil { + c.FlagSet.VisitAll(func(f *flag.Flag) { + f.Value = &onceFlagValue{Value: f.Value} + }) + } + for _, sub := range c.Subcommands { + noDupFlagify(sub) + } +} + func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") rootfs.Func("socket", "path to tailscaled socket", func(s string) error { @@ -236,6 +271,7 @@ change in the future. }) ffcomplete.Inject(rootCmd, func(c *ffcli.Command) { c.LongHelp = hidden + c.LongHelp }, usageFunc) + noDupFlagify(rootCmd) return rootCmd } diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 49d8e9c4a..4f6bdab2e 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -657,13 +657,6 @@ func upArgsFromOSArgs(goos string, flagArgs ...string) (args upArgsT) { return } -func newSingleUseStringForTest(v string) singleUseStringFlag { - return singleUseStringFlag{ - set: true, - value: v, - } -} - func TestPrefsFromUpArgs(t *testing.T) { tests := []struct { name string @@ -728,14 +721,14 @@ func TestPrefsFromUpArgs(t *testing.T) { { name: "error_advertise_route_invalid_ip", args: upArgsT{ - advertiseRoutes: newSingleUseStringForTest("foo"), + advertiseRoutes: "foo", }, wantErr: `"foo" is not a valid IP address or CIDR prefix`, }, { name: "error_advertise_route_unmasked_bits", args: upArgsT{ - advertiseRoutes: newSingleUseStringForTest("1.2.3.4/16"), + advertiseRoutes: "1.2.3.4/16", }, wantErr: `1.2.3.4/16 has non-address bits set; expected 1.2.0.0/16`, }, @@ -756,7 +749,7 @@ func TestPrefsFromUpArgs(t *testing.T) { { name: "error_tag_prefix", args: upArgsT{ - advertiseTags: newSingleUseStringForTest("foo"), + advertiseTags: "foo", }, wantErr: `tag: "foo": tags must start with 'tag:'`, }, @@ -836,7 +829,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_good", goos: "linux", args: upArgsT{ - advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a::bb:10.0.0.0/112"), + advertiseRoutes: "fd7a:115c:a1e0:b1a::bb:10.0.0.0/112", netfilterMode: "off", }, want: &ipn.Prefs{ @@ -855,7 +848,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_good_16_bit", goos: "linux", args: upArgsT{ - advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a::aabb:10.0.0.0/112"), + advertiseRoutes: "fd7a:115c:a1e0:b1a::aabb:10.0.0.0/112", netfilterMode: "off", }, want: &ipn.Prefs{ @@ -874,7 +867,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_short_prefix", goos: "linux", args: upArgsT{ - advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a::/64"), + advertiseRoutes: "fd7a:115c:a1e0:b1a::/64", netfilterMode: "off", }, wantErr: "fd7a:115c:a1e0:b1a::/64 4-in-6 prefix must be at least a /96", @@ -883,7 +876,7 @@ func TestPrefsFromUpArgs(t *testing.T) { name: "via_route_short_reserved_siteid", goos: "linux", args: upArgsT{ - advertiseRoutes: newSingleUseStringForTest("fd7a:115c:a1e0:b1a:1234:5678::/112"), + advertiseRoutes: "fd7a:115c:a1e0:b1a:1234:5678::/112", netfilterMode: "off", }, wantErr: "route fd7a:115c:a1e0:b1a:1234:5678::/112 contains invalid site ID 12345678; must be 0xffff or less", @@ -1113,7 +1106,6 @@ func TestUpdatePrefs(t *testing.T) { }, env: upCheckEnv{backendState: "Running"}, }, - { // Issue 3808: explicitly empty --operator= should clear value. name: "explicit_empty_operator", @@ -1507,6 +1499,51 @@ func TestParseNLArgs(t *testing.T) { } } +// makeQuietContinueOnError modifies c recursively to make all the +// flagsets have error mode flag.ContinueOnError and not +// spew all over stderr. +func makeQuietContinueOnError(c *ffcli.Command) { + if c.FlagSet != nil { + c.FlagSet.Init(c.Name, flag.ContinueOnError) + c.FlagSet.Usage = func() {} + c.FlagSet.SetOutput(io.Discard) + } + c.UsageFunc = func(*ffcli.Command) string { return "" } + for _, sub := range c.Subcommands { + makeQuietContinueOnError(sub) + } +} + +// see tailscale/tailscale#6813 +func TestNoDups(t *testing.T) { + tests := []struct { + name string + args []string + want string + }{ + { + name: "dup-boolean", + args: []string{"up", "--json", "--json"}, + want: "error parsing commandline arguments: invalid boolean flag json: flag provided multiple times", + }, + { + name: "dup-string", + args: []string{"up", "--hostname=foo", "--hostname=bar"}, + want: "error parsing commandline arguments: invalid value \"bar\" for flag -hostname: flag provided multiple times", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := newRootCmd() + makeQuietContinueOnError(cmd) + err := cmd.Parse(tt.args) + if got := fmt.Sprint(err); got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} + func TestHelpAlias(t *testing.T) { var stdout, stderr bytes.Buffer tstest.Replace[io.Writer](t, &Stdout, &stdout) @@ -1606,56 +1643,3 @@ func TestDepsNoCapture(t *testing.T) { }.Check(t) } - -func TestSingleUseStringFlag(t *testing.T) { - tests := []struct { - name string - setValues []string - wantValue string - wantErr bool - }{ - { - name: "set once", - setValues: []string{"foo"}, - wantValue: "foo", - wantErr: false, - }, - { - name: "set twice", - setValues: []string{"foo", "bar"}, - wantValue: "foo", - wantErr: true, - }, - { - name: "set nothing", - setValues: []string{}, - wantValue: "", - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var flag singleUseStringFlag - var lastErr error - - for _, val := range tt.setValues { - lastErr = flag.Set(val) - } - - if tt.wantErr { - if lastErr == nil { - t.Errorf("expected error on final Set, got nil") - } - } else { - if lastErr != nil { - t.Errorf("unexpected error on final Set: %v", lastErr) - } - } - - if got := flag.String(); got != tt.wantValue { - t.Errorf("String() = %q, want %q", got, tt.wantValue) - } - }) - } -} diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index ab113f6e0..07b3fe9ce 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -49,7 +49,7 @@ type setArgsT struct { runSSH bool runWebClient bool hostname string - advertiseRoutes singleUseStringFlag + advertiseRoutes string advertiseDefaultRoute bool advertiseConnector bool opUser string @@ -75,7 +75,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") setf.BoolVar(&setArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") setf.StringVar(&setArgs.hostname, "hostname", "", "hostname to use instead of the one provided by the OS") - setf.Var(&setArgs.advertiseRoutes, "advertise-routes", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") + setf.StringVar(&setArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") setf.BoolVar(&setArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet") setf.BoolVar(&setArgs.advertiseConnector, "advertise-connector", false, "offer to be an app connector for domain specific internet traffic for the tailnet") setf.BoolVar(&setArgs.updateCheck, "update-check", true, "notify about available Tailscale updates") @@ -259,11 +259,11 @@ func runSet(ctx context.Context, args []string) (retErr error) { // setArgs is the parsed command-line arguments. func calcAdvertiseRoutesForSet(advertiseExitNodeSet, advertiseRoutesSet bool, curPrefs *ipn.Prefs, setArgs setArgsT) (routes []netip.Prefix, err error) { if advertiseExitNodeSet && advertiseRoutesSet { - return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes.String(), setArgs.advertiseDefaultRoute) + return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes, setArgs.advertiseDefaultRoute) } if advertiseRoutesSet { - return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes.String(), curPrefs.AdvertisesExitNode()) + return netutil.CalcAdvertiseRoutes(setArgs.advertiseRoutes, curPrefs.AdvertisesExitNode()) } if advertiseExitNodeSet { alreadyAdvertisesExitNode := curPrefs.AdvertisesExitNode() diff --git a/cmd/tailscale/cli/set_test.go b/cmd/tailscale/cli/set_test.go index 8ef85be12..a2f211f8c 100644 --- a/cmd/tailscale/cli/set_test.go +++ b/cmd/tailscale/cli/set_test.go @@ -116,7 +116,7 @@ func TestCalcAdvertiseRoutesForSet(t *testing.T) { sa.advertiseDefaultRoute = *tc.setExit } if tc.setRoutes != nil { - sa.advertiseRoutes = newSingleUseStringForTest(*tc.setRoutes) + sa.advertiseRoutes = *tc.setRoutes } got, err := calcAdvertiseRoutesForSet(tc.setExit != nil, tc.setRoutes != nil, curPrefs, sa) if err != nil { diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 22276cd99..26db85f13 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -82,25 +82,6 @@ func acceptRouteDefault(goos string) bool { return p.DefaultRouteAll(goos) } -// singleUseStringFlag will throw an error if the flag is specified more than once. -type singleUseStringFlag struct { - set bool - value string -} - -func (s singleUseStringFlag) String() string { - return s.value -} - -func (s *singleUseStringFlag) Set(v string) error { - if s.set { - return errors.New("flag can only be specified once") - } - s.set = true - s.value = v - return nil -} - var upFlagSet = newUpFlagSet(effectiveGOOS(), &upArgsGlobal, "up") // newUpFlagSet returns a new flag set for the "up" and "login" commands. @@ -123,9 +104,9 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") upf.BoolVar(&upArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") - upf.Var(&upArgs.advertiseTags, "advertise-tags", "comma-separated ACL tags to request; each must start with \"tag:\" (e.g. \"tag:eng,tag:montreal,tag:ssh\")") + upf.StringVar(&upArgs.advertiseTags, "advertise-tags", "", "comma-separated ACL tags to request; each must start with \"tag:\" (e.g. \"tag:eng,tag:montreal,tag:ssh\")") upf.StringVar(&upArgs.hostname, "hostname", "", "hostname to use instead of the one provided by the OS") - upf.Var(&upArgs.advertiseRoutes, "advertise-routes", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") + upf.StringVar(&upArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") upf.BoolVar(&upArgs.advertiseConnector, "advertise-connector", false, "advertise this node as an app connector") upf.BoolVar(&upArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet") upf.BoolVar(&upArgs.postureChecking, "posture-checking", false, hidden+"allow management plane to gather device posture information") @@ -193,9 +174,9 @@ type upArgsT struct { runWebClient bool forceReauth bool forceDaemon bool - advertiseRoutes singleUseStringFlag + advertiseRoutes string advertiseDefaultRoute bool - advertiseTags singleUseStringFlag + advertiseTags string advertiseConnector bool snat bool statefulFiltering bool @@ -263,7 +244,7 @@ func warnf(format string, args ...any) { // function exists for testing and should have no side effects or // outside interactions (e.g. no making Tailscale LocalAPI calls). func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goos string) (*ipn.Prefs, error) { - routes, err := netutil.CalcAdvertiseRoutes(upArgs.advertiseRoutes.String(), upArgs.advertiseDefaultRoute) + routes, err := netutil.CalcAdvertiseRoutes(upArgs.advertiseRoutes, upArgs.advertiseDefaultRoute) if err != nil { return nil, err } @@ -273,8 +254,8 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo } var tags []string - if upArgs.advertiseTags.String() != "" { - tags = strings.Split(upArgs.advertiseTags.String(), ",") + if upArgs.advertiseTags != "" { + tags = strings.Split(upArgs.advertiseTags, ",") for _, tag := range tags { err := tailcfg.CheckTag(tag) if err != nil { @@ -574,7 +555,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } - authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags.String()) + authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags) if err != nil { return err } From fd580611bdf2e77f5861cb52c09ae6f051e85dc2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 15:11:23 -0700 Subject: [PATCH 0716/1708] ipn: use slices.Equal in another spot Updates #8632 Change-Id: I91edd800f97eb0bf9a00866a1e39effc5e4f4e94 Signed-off-by: Brad Fitzpatrick --- ipn/prefs.go | 30 +++--------------------------- 1 file changed, 3 insertions(+), 27 deletions(-) diff --git a/ipn/prefs.go b/ipn/prefs.go index 98f04dfa9..5b3e95b33 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -607,9 +607,9 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.OperatorUser == p2.OperatorUser && p.Hostname == p2.Hostname && p.ForceDaemon == p2.ForceDaemon && - compareIPNets(p.AdvertiseRoutes, p2.AdvertiseRoutes) && - compareStrings(p.AdvertiseTags, p2.AdvertiseTags) && - compareStrings(p.AdvertiseServices, p2.AdvertiseServices) && + slices.Equal(p.AdvertiseRoutes, p2.AdvertiseRoutes) && + slices.Equal(p.AdvertiseTags, p2.AdvertiseTags) && + slices.Equal(p.AdvertiseServices, p2.AdvertiseServices) && p.Persist.Equals(p2.Persist) && p.ProfileName == p2.ProfileName && p.AutoUpdate.Equals(p2.AutoUpdate) && @@ -636,30 +636,6 @@ func (ap AppConnectorPrefs) Pretty() string { return "" } -func compareIPNets(a, b []netip.Prefix) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -func compareStrings(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - // NewPrefs returns the default preferences to use. func NewPrefs() *Prefs { // Provide default values for options which might be missing From de949b050e4c5b81c47e3f30762ea3feca0f4076 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 8 Apr 2025 23:34:23 +0100 Subject: [PATCH 0717/1708] cmd/containerboot: speed up tests (#14883) The test suite had grown to about 20s on my machine, but it doesn't do much taxing work so was a good candidate to parallelise. Now runs in under 2s on my machine. Updates #cleanup Change-Id: I2fcc6be9ca226c74c0cb6c906778846e959492e4 Signed-off-by: Tom Proctor --- cmd/containerboot/main_test.go | 1558 ++++++++++++++++--------------- cmd/containerboot/services.go | 12 +- cmd/containerboot/tailscaled.go | 4 +- 3 files changed, 812 insertions(+), 762 deletions(-) diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index bc158dac5..a0ccce3dd 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -41,23 +41,34 @@ import ( "tailscale.com/types/ptr" ) -func TestContainerBoot(t *testing.T) { +// testEnv represents the environment needed for a single sub-test so that tests +// can run in parallel. +type testEnv struct { + kube *kubeServer // Fake kube server. + lapi *localAPI // Local TS API server. + d string // Temp dir for the specific test. + argFile string // File with commands test_tailscale{,d}.sh were invoked with. + runningSockPath string // Path to the running tailscaled socket. + localAddrPort int // Port for the containerboot HTTP server. + healthAddrPort int // Port for the (deprecated) containerboot health server. +} + +func newTestEnv(t *testing.T) testEnv { d := t.TempDir() lapi := localAPI{FSRoot: d} if err := lapi.Start(); err != nil { t.Fatal(err) } - defer lapi.Close() + t.Cleanup(lapi.Close) kube := kubeServer{FSRoot: d} kube.Start(t) - defer kube.Close() + t.Cleanup(kube.Close) tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net") - egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net") dirs := []string{ "var/lib", @@ -86,22 +97,14 @@ func TestContainerBoot(t *testing.T) { filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg), filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"), } - resetFiles := func() { - for path, content := range files { - // Making everything executable is a little weird, but the - // stuff that doesn't need to be executable doesn't care if we - // do make it executable. - if err := os.WriteFile(filepath.Join(d, path), content, 0700); err != nil { - t.Fatal(err) - } + for path, content := range files { + // Making everything executable is a little weird, but the + // stuff that doesn't need to be executable doesn't care if we + // do make it executable. + if err := os.WriteFile(filepath.Join(d, path), content, 0700); err != nil { + t.Fatal(err) } } - resetFiles() - - boot := filepath.Join(d, "containerboot") - if err := exec.Command("go", "build", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { - t.Fatalf("Building containerboot: %v", err) - } argFile := filepath.Join(d, "args") runningSockPath := filepath.Join(d, "tmp/tailscaled.sock") @@ -117,6 +120,25 @@ func TestContainerBoot(t *testing.T) { port := ln.Addr().(*net.TCPAddr).Port *p = port } + + return testEnv{ + kube: &kube, + lapi: &lapi, + d: d, + argFile: argFile, + runningSockPath: runningSockPath, + localAddrPort: localAddrPort, + healthAddrPort: healthAddrPort, + } +} + +func TestContainerBoot(t *testing.T) { + boot := filepath.Join(t.TempDir(), "containerboot") + if err := exec.Command("go", "build", "-ldflags", "-X main.testSleepDuration=1ms", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { + t.Fatalf("Building containerboot: %v", err) + } + egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net") + metricsURL := func(port int) string { return fmt.Sprintf("http://127.0.0.1:%d/metrics", port) } @@ -173,869 +195,900 @@ func TestContainerBoot(t *testing.T) { }).View(), }, } - tests := []struct { - Name string + type testCase struct { Env map[string]string KubeSecret map[string]string KubeDenyPatch bool Phases []phase - }{ - { - // Out of the box default: runs in userspace mode, ephemeral storage, interactive login. - Name: "no_args", - Env: nil, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + } + tests := map[string]func(env *testEnv) testCase{ + "no_args": func(env *testEnv) testCase { + return testCase{ + // Out of the box default: runs in userspace mode, ephemeral storage, interactive login. + Env: nil, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + // No metrics or health by default. + EndpointStatuses: map[string]int{ + metricsURL(9002): -1, + healthURL(9002): -1, + }, }, - // No metrics or health by default. - EndpointStatuses: map[string]int{ - metricsURL(9002): -1, - healthURL(9002): -1, + { + Notify: runningNotify, }, }, - { - Notify: runningNotify, - }, - }, + } }, - { - // Userspace mode, ephemeral storage, authkey provided on every run. - Name: "authkey", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, + "authkey": func(env *testEnv) testCase { + return testCase{ + // Userspace mode, ephemeral storage, authkey provided on every run. + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", }, - { - Notify: runningNotify, - }, - }, - }, - { - // Userspace mode, ephemeral storage, authkey provided on every run. - Name: "authkey-old-flag", - Env: map[string]string{ - "TS_AUTH_KEY": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, }, }, - { - Notify: runningNotify, - }, - }, + } }, - { - Name: "authkey_disk_state", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_STATE_DIR": filepath.Join(d, "tmp"), - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, + "authkey_old_flag": func(env *testEnv) testCase { + return testCase{ + // Userspace mode, ephemeral storage, authkey provided on every run. + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", }, - { - Notify: runningNotify, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, + }, }, - }, + } }, - { - Name: "routes", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_ROUTES": "1.2.3.0/24,10.20.30.0/24", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24", - }, + "authkey_disk_state": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_STATE_DIR": filepath.Join(env.d, "tmp"), }, - { - Notify: runningNotify, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "0", - "proc/sys/net/ipv6/conf/all/forwarding": "0", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, }, }, - }, + } }, - { - Name: "empty routes", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_ROUTES": "", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=", - }, + "routes": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_ROUTES": "1.2.3.0/24,10.20.30.0/24", }, - { - Notify: runningNotify, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "0", - "proc/sys/net/ipv6/conf/all/forwarding": "0", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24", + }, + }, + { + Notify: runningNotify, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "0", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, }, }, - }, + } }, - { - Name: "routes_kernel_ipv4", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_ROUTES": "1.2.3.0/24,10.20.30.0/24", - "TS_USERSPACE": "false", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24", - }, + "empty_routes": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_ROUTES": "", }, - { - Notify: runningNotify, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "1", - "proc/sys/net/ipv6/conf/all/forwarding": "0", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=", + }, + }, + { + Notify: runningNotify, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "0", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, }, }, - }, + } }, - { - Name: "routes_kernel_ipv6", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_ROUTES": "::/64,1::/64", - "TS_USERSPACE": "false", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1::/64", - }, + "routes_kernel_ipv4": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_ROUTES": "1.2.3.0/24,10.20.30.0/24", + "TS_USERSPACE": "false", }, - { - Notify: runningNotify, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "0", - "proc/sys/net/ipv6/conf/all/forwarding": "1", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24", + }, + }, + { + Notify: runningNotify, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "1", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, }, }, - }, + } }, - { - Name: "routes_kernel_all_families", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_ROUTES": "::/64,1.2.3.0/24", - "TS_USERSPACE": "false", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1.2.3.0/24", - }, + "routes_kernel_ipv6": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_ROUTES": "::/64,1::/64", + "TS_USERSPACE": "false", }, - { - Notify: runningNotify, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "1", - "proc/sys/net/ipv6/conf/all/forwarding": "1", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1::/64", + }, + }, + { + Notify: runningNotify, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "0", + "proc/sys/net/ipv6/conf/all/forwarding": "1", + }, }, }, - }, + } }, - { - Name: "ingress proxy", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_DEST_IP": "1.2.3.4", - "TS_USERSPACE": "false", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, + "routes_kernel_all_families": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_ROUTES": "::/64,1.2.3.0/24", + "TS_USERSPACE": "false", }, - { - Notify: runningNotify, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1.2.3.0/24", + }, + }, + { + Notify: runningNotify, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "1", + "proc/sys/net/ipv6/conf/all/forwarding": "1", + }, + }, }, - }, + } }, - { - Name: "egress proxy", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_TAILNET_TARGET_IP": "100.99.99.99", - "TS_USERSPACE": "false", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + "ingress_proxy": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_DEST_IP": "1.2.3.4", + "TS_USERSPACE": "false", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, }, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "1", - "proc/sys/net/ipv6/conf/all/forwarding": "0", + { + Notify: runningNotify, }, }, - { - Notify: runningNotify, - }, - }, + } }, - { - Name: "egress_proxy_fqdn_ipv6_target_on_ipv4_host", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net", // resolves to IPv6 address - "TS_USERSPACE": "false", - "TS_TEST_FAKE_NETFILTER_6": "false", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + "egress_proxy": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_TAILNET_TARGET_IP": "100.99.99.99", + "TS_USERSPACE": "false", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "1", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, }, - WantFiles: map[string]string{ - "proc/sys/net/ipv4/ip_forward": "1", - "proc/sys/net/ipv6/conf/all/forwarding": "0", + { + Notify: runningNotify, }, }, - { - Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), - NetMap: &netmap.NetworkMap{ - SelfNode: (&tailcfg.Node{ - StableID: tailcfg.StableNodeID("myID"), - Name: "test-node.test.ts.net", - Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, - }).View(), - Peers: []tailcfg.NodeView{ - (&tailcfg.Node{ - StableID: tailcfg.StableNodeID("ipv6ID"), - Name: "ipv6-node.test.ts.net", - Addresses: []netip.Prefix{netip.MustParsePrefix("::1/128")}, + } + }, + "egress_proxy_fqdn_ipv6_target_on_ipv4_host": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net", // resolves to IPv6 address + "TS_USERSPACE": "false", + "TS_TEST_FAKE_NETFILTER_6": "false", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "1", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, + }, + { + Notify: &ipn.Notify{ + State: ptr.To(ipn.Running), + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("myID"), + Name: "test-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("ipv6ID"), + Name: "ipv6-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("::1/128")}, + }).View(), + }, }, }, + WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false", + WantExitCode: ptr.To(1), }, - WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false", - WantExitCode: ptr.To(1), }, - }, + } }, - { - Name: "authkey_once", - Env: map[string]string{ - "TS_AUTHKEY": "tskey-key", - "TS_AUTH_ONCE": "true", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - }, + "authkey_once": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_AUTH_ONCE": "true", }, - { - Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + }, }, - WantCmds: []string{ - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + { + Notify: &ipn.Notify{ + State: ptr.To(ipn.NeedsLogin), + }, + WantCmds: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, }, - }, - { - Notify: runningNotify, - WantCmds: []string{ - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", + { + Notify: runningNotify, + WantCmds: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", + }, }, }, - }, + } }, - { - Name: "kube_storage", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - }, - KubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - }, + "kube_storage": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, }, - { - Notify: runningNotify, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, - }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", }, - }, - }, - { - Name: "kube_disk_storage", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - // Explicitly set to an empty value, to override the default of "tailscale". - "TS_KUBE_SECRET": "", - "TS_STATE_DIR": filepath.Join(d, "tmp"), - "TS_AUTHKEY": "tskey-key", - }, - KubeSecret: map[string]string{}, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + }, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, + }, }, - WantKubeSecret: map[string]string{}, - }, - { - Notify: runningNotify, - WantKubeSecret: map[string]string{}, }, - }, + } }, - { - Name: "kube_storage_no_patch", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - "TS_AUTHKEY": "tskey-key", - }, - KubeSecret: map[string]string{}, - KubeDenyPatch: true, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, - WantKubeSecret: map[string]string{}, + "kube_disk_storage": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + // Explicitly set to an empty value, to override the default of "tailscale". + "TS_KUBE_SECRET": "", + "TS_STATE_DIR": filepath.Join(env.d, "tmp"), + "TS_AUTHKEY": "tskey-key", }, - { - Notify: runningNotify, - WantKubeSecret: map[string]string{}, + KubeSecret: map[string]string{}, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{}, + }, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{}, + }, }, - }, + } }, - { - // Same as previous, but deletes the authkey from the kube secret. - Name: "kube_storage_auth_once", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - "TS_AUTH_ONCE": "true", - }, - KubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "kube_storage_no_patch": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "TS_AUTHKEY": "tskey-key", + }, + KubeSecret: map[string]string{}, + KubeDenyPatch: true, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{}, }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + { + Notify: runningNotify, + WantKubeSecret: map[string]string{}, }, }, - { - Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + } + }, + "kube_storage_auth_once": func(env *testEnv) testCase { + return testCase{ + // Same as previous, but deletes the authkey from the kube secret. + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "TS_AUTH_ONCE": "true", + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, }, - WantCmds: []string{ - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + { + Notify: &ipn.Notify{ + State: ptr.To(ipn.NeedsLogin), + }, + WantCmds: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + { + Notify: runningNotify, + WantCmds: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", + }, + WantKubeSecret: map[string]string{ + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, + }, }, }, - { - Notify: runningNotify, - WantCmds: []string{ - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", + } + }, + "kube_storage_updates": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, }, - WantKubeSecret: map[string]string{ - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, + }, + }, + { + Notify: &ipn.Notify{ + State: ptr.To(ipn.Running), + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("newID"), + Name: "new-name.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, + }).View(), + }, + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_fqdn": "new-name.test.ts.net", + "device_id": "newID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, + }, }, }, - }, + } }, - { - Name: "kube_storage_updates", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - }, - KubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + "proxies": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_SOCKS5_SERVER": "localhost:1080", + "TS_OUTBOUND_HTTP_PROXY_LISTEN": "localhost:8080", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --socks5-server=localhost:1080 --outbound-http-proxy-listen=localhost:8080", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + { + Notify: runningNotify, }, }, - { - Notify: runningNotify, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, - }, + } + }, + "dns": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_ACCEPT_DNS": "true", }, - { - Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), - NetMap: &netmap.NetworkMap{ - SelfNode: (&tailcfg.Node{ - StableID: tailcfg.StableNodeID("newID"), - Name: "new-name.test.ts.net", - Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, - }).View(), + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true", }, }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "new-name.test.ts.net", - "device_id": "newID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + { + Notify: runningNotify, }, }, - }, + } }, - { - Name: "proxies", - Env: map[string]string{ - "TS_SOCKS5_SERVER": "localhost:1080", - "TS_OUTBOUND_HTTP_PROXY_LISTEN": "localhost:8080", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --socks5-server=localhost:1080 --outbound-http-proxy-listen=localhost:8080", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", - }, + "extra_args": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EXTRA_ARGS": "--widget=rotated", + "TS_TAILSCALED_EXTRA_ARGS": "--experiments=widgets", }, - { - Notify: runningNotify, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --experiments=widgets", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --widget=rotated", + }, + }, { + Notify: runningNotify, + }, }, - }, + } }, - { - Name: "dns", - Env: map[string]string{ - "TS_ACCEPT_DNS": "true", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true", - }, + "extra_args_accept_routes": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EXTRA_ARGS": "--accept-routes", }, - { - Notify: runningNotify, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --accept-routes", + }, + }, { + Notify: runningNotify, + }, }, - }, + } }, - { - Name: "extra_args", - Env: map[string]string{ - "TS_EXTRA_ARGS": "--widget=rotated", - "TS_TAILSCALED_EXTRA_ARGS": "--experiments=widgets", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --experiments=widgets", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --widget=rotated", + "hostname": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_HOSTNAME": "my-server", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --hostname=my-server", + }, + }, { + Notify: runningNotify, }, - }, { - Notify: runningNotify, }, - }, + } }, - { - Name: "extra_args_accept_routes", - Env: map[string]string{ - "TS_EXTRA_ARGS": "--accept-routes", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --accept-routes", + "experimental_tailscaled_config_path": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + }, { + Notify: runningNotify, }, - }, { - Notify: runningNotify, }, - }, + } }, - { - Name: "hostname", - Env: map[string]string{ - "TS_HOSTNAME": "my-server", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --hostname=my-server", + "metrics_enabled": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort), + "TS_ENABLE_METRICS": "true", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): 200, + healthURL(env.localAddrPort): -1, + }, + }, { + Notify: runningNotify, }, - }, { - Notify: runningNotify, }, - }, + } }, - { - Name: "experimental tailscaled config path", - Env: map[string]string{ - "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(d, "etc/tailscaled/"), - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + "health_enabled": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort), + "TS_ENABLE_HEALTH_CHECK": "true", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): -1, + healthURL(env.localAddrPort): 503, // Doesn't start passing until the next phase. + }, + }, { + Notify: runningNotify, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): -1, + healthURL(env.localAddrPort): 200, + }, }, - }, { - Notify: runningNotify, }, - }, + } }, - { - Name: "metrics_enabled", - Env: map[string]string{ - "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), - "TS_ENABLE_METRICS": "true", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", - }, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): 200, - healthURL(localAddrPort): -1, + "metrics_and_health_on_same_port": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort), + "TS_ENABLE_METRICS": "true", + "TS_ENABLE_HEALTH_CHECK": "true", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): 200, + healthURL(env.localAddrPort): 503, // Doesn't start passing until the next phase. + }, + }, { + Notify: runningNotify, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): 200, + healthURL(env.localAddrPort): 200, + }, }, - }, { - Notify: runningNotify, }, - }, + } }, - { - Name: "health_enabled", - Env: map[string]string{ - "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), - "TS_ENABLE_HEALTH_CHECK": "true", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", - }, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): -1, - healthURL(localAddrPort): 503, // Doesn't start passing until the next phase. - }, - }, { - Notify: runningNotify, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): -1, - healthURL(localAddrPort): 200, + "local_metrics_and_deprecated_health": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort), + "TS_ENABLE_METRICS": "true", + "TS_HEALTHCHECK_ADDR_PORT": fmt.Sprintf("[::]:%d", env.healthAddrPort), + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): 200, + healthURL(env.healthAddrPort): 503, // Doesn't start passing until the next phase. + }, + }, { + Notify: runningNotify, + EndpointStatuses: map[string]int{ + metricsURL(env.localAddrPort): 200, + healthURL(env.healthAddrPort): 200, + }, }, }, - }, + } }, - { - Name: "metrics_and_health_on_same_port", - Env: map[string]string{ - "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), - "TS_ENABLE_METRICS": "true", - "TS_ENABLE_HEALTH_CHECK": "true", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", - }, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): 200, - healthURL(localAddrPort): 503, // Doesn't start passing until the next phase. + "serve_config_no_kube": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_SERVE_CONFIG": filepath.Join(env.d, "etc/tailscaled/serve-config.json"), + "TS_AUTHKEY": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, }, - }, { - Notify: runningNotify, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): 200, - healthURL(localAddrPort): 200, + { + Notify: runningNotify, }, }, - }, + } }, - { - Name: "local_metrics_and_deprecated_health", - Env: map[string]string{ - "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), - "TS_ENABLE_METRICS": "true", - "TS_HEALTHCHECK_ADDR_PORT": fmt.Sprintf("[::]:%d", healthAddrPort), - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", - }, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): 200, - healthURL(healthAddrPort): 503, // Doesn't start passing until the next phase. + "serve_config_kube": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "TS_SERVE_CONFIG": filepath.Join(env.d, "etc/tailscaled/serve-config.json"), + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, }, - }, { - Notify: runningNotify, - EndpointStatuses: map[string]int{ - metricsURL(localAddrPort): 200, - healthURL(healthAddrPort): 200, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "https_endpoint": "no-https", + "tailscale_capver": capver, + }, }, }, - }, + } }, - { - Name: "serve_config_no_kube", - Env: map[string]string{ - "TS_SERVE_CONFIG": filepath.Join(d, "etc/tailscaled/serve-config.json"), - "TS_AUTHKEY": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, + "egress_svcs_config_kube": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(env.d, "etc/tailscaled"), + "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort), }, - { - Notify: runningNotify, + KubeSecret: map[string]string{ + "authkey": "tskey-key", }, - }, - }, - { - Name: "serve_config_kube", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - "TS_SERVE_CONFIG": filepath.Join(d, "etc/tailscaled/serve-config.json"), - }, - KubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + EndpointStatuses: map[string]int{ + egressSvcTerminateURL(env.localAddrPort): 200, + }, }, - }, - { - Notify: runningNotify, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "https_endpoint": "no-https", - "tailscale_capver": capver, + { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + "egress-services": mustBase64(t, egressStatus), + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "tailscale_capver": capver, + }, + EndpointStatuses: map[string]int{ + egressSvcTerminateURL(env.localAddrPort): 200, + }, }, }, - }, + } }, - { - Name: "egress_svcs_config_kube", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - "TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"), - "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort), - }, - KubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - Phases: []phase{ - { - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - EndpointStatuses: map[string]int{ - egressSvcTerminateURL(localAddrPort): 200, - }, + "egress_svcs_config_no_kube": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(env.d, "etc/tailscaled"), + "TS_AUTHKEY": "tskey-key", }, - { - Notify: runningNotify, - WantKubeSecret: map[string]string{ - "egress-services": mustBase64(t, egressStatus), - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, - }, - EndpointStatuses: map[string]int{ - egressSvcTerminateURL(localAddrPort): 200, + Phases: []phase{ + { + WantLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", + WantExitCode: ptr.To(1), }, }, - }, + } }, - { - Name: "egress_svcs_config_no_kube", - Env: map[string]string{ - "TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"), - "TS_AUTHKEY": "tskey-key", - }, - Phases: []phase{ - { - WantLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", - WantExitCode: ptr.To(1), + "kube_shutdown_during_state_write": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "TS_ENABLE_HEALTH_CHECK": "true", }, - }, - }, - { - Name: "kube_shutdown_during_state_write", - Env: map[string]string{ - "KUBERNETES_SERVICE_HOST": kube.Host, - "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, - "TS_ENABLE_HEALTH_CHECK": "true", - }, - KubeSecret: map[string]string{ - "authkey": "tskey-key", - }, - Phases: []phase{ - { - // Normal startup. - WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", - "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", - }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", }, - { - // SIGTERM before state is finished writing, should wait for - // consistent state before propagating SIGTERM to tailscaled. - Signal: ptr.To(unix.SIGTERM), - UpdateKubeSecret: map[string]string{ - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", - // Missing "_current-profile" key. - }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", + Phases: []phase{ + { + // Normal startup. + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + }, }, - WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"", - }, - { - // tailscaled has finished writing state, should propagate SIGTERM. - UpdateKubeSecret: map[string]string{ - "_current-profile": "foo", + { + // SIGTERM before state is finished writing, should wait for + // consistent state before propagating SIGTERM to tailscaled. + Signal: ptr.To(unix.SIGTERM), + UpdateKubeSecret: map[string]string{ + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + // Missing "_current-profile" key. + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + }, + WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"", }, - WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", - "_current-profile": "foo", + { + // tailscaled has finished writing state, should propagate SIGTERM. + UpdateKubeSecret: map[string]string{ + "_current-profile": "foo", + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + "_current-profile": "foo", + }, + WantLog: "HTTP server at [::]:9002 closed", + WantExitCode: ptr.To(0), }, - WantLog: "HTTP server at [::]:9002 closed", - WantExitCode: ptr.To(0), }, - }, + } }, } - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - lapi.Reset() - kube.Reset() - os.Remove(argFile) - os.Remove(runningSockPath) - resetFiles() + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + env := newTestEnv(t) + tc := test(&env) - for k, v := range test.KubeSecret { - kube.SetSecret(k, v) + for k, v := range tc.KubeSecret { + env.kube.SetSecret(k, v) } - kube.SetPatching(!test.KubeDenyPatch) + env.kube.SetPatching(!tc.KubeDenyPatch) cmd := exec.Command(boot) cmd.Env = []string{ - fmt.Sprintf("PATH=%s/usr/bin:%s", d, os.Getenv("PATH")), - fmt.Sprintf("TS_TEST_RECORD_ARGS=%s", argFile), - fmt.Sprintf("TS_TEST_SOCKET=%s", lapi.Path), - fmt.Sprintf("TS_SOCKET=%s", runningSockPath), - fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", d), + fmt.Sprintf("PATH=%s/usr/bin:%s", env.d, os.Getenv("PATH")), + fmt.Sprintf("TS_TEST_RECORD_ARGS=%s", env.argFile), + fmt.Sprintf("TS_TEST_SOCKET=%s", env.lapi.Path), + fmt.Sprintf("TS_SOCKET=%s", env.runningSockPath), + fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", env.d), fmt.Sprint("TS_TEST_FAKE_NETFILTER=true"), } - for k, v := range test.Env { + for k, v := range tc.Env { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) } cbOut := &lockingBuffer{} @@ -1045,6 +1098,7 @@ func TestContainerBoot(t *testing.T) { } }() cmd.Stderr = cbOut + cmd.Stdout = cbOut if err := cmd.Start(); err != nil { t.Fatalf("starting containerboot: %v", err) } @@ -1054,11 +1108,11 @@ func TestContainerBoot(t *testing.T) { }() var wantCmds []string - for i, p := range test.Phases { + for i, p := range tc.Phases { for k, v := range p.UpdateKubeSecret { - kube.SetSecret(k, v) + env.kube.SetSecret(k, v) } - lapi.Notify(p.Notify) + env.lapi.Notify(p.Notify) if p.Signal != nil { cmd.Process.Signal(*p.Signal) } @@ -1086,15 +1140,15 @@ func TestContainerBoot(t *testing.T) { } wantCmds = append(wantCmds, p.WantCmds...) - waitArgs(t, 2*time.Second, d, argFile, strings.Join(wantCmds, "\n")) + waitArgs(t, 2*time.Second, env.d, env.argFile, strings.Join(wantCmds, "\n")) err := tstest.WaitFor(2*time.Second, func() error { if p.WantKubeSecret != nil { - got := kube.Secret() + got := env.kube.Secret() if diff := cmp.Diff(got, p.WantKubeSecret); diff != "" { return fmt.Errorf("unexpected kube secret data (-got+want):\n%s", diff) } } else { - got := kube.Secret() + got := env.kube.Secret() if len(got) > 0 { return fmt.Errorf("kube secret unexpectedly not empty, got %#v", got) } @@ -1106,7 +1160,7 @@ func TestContainerBoot(t *testing.T) { } err = tstest.WaitFor(2*time.Second, func() error { for path, want := range p.WantFiles { - gotBs, err := os.ReadFile(filepath.Join(d, path)) + gotBs, err := os.ReadFile(filepath.Join(env.d, path)) if err != nil { return fmt.Errorf("reading wanted file %q: %v", path, err) } @@ -1270,13 +1324,6 @@ func (l *localAPI) Close() { l.srv.Close() } -func (l *localAPI) Reset() { - l.Lock() - defer l.Unlock() - l.notify = nil - l.cond.Broadcast() -} - func (l *localAPI) Notify(n *ipn.Notify) { if n == nil { return @@ -1368,13 +1415,8 @@ func (k *kubeServer) SetPatching(canPatch bool) { k.canPatch = canPatch } -func (k *kubeServer) Reset() { - k.Lock() - defer k.Unlock() - k.secret = map[string]string{} -} - func (k *kubeServer) Start(t *testing.T) { + k.secret = map[string]string{} root := filepath.Join(k.FSRoot, "var/run/secrets/kubernetes.io/serviceaccount") if err := os.MkdirAll(root, 0700); err != nil { diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go index 21ae0f4e0..ea56a6236 100644 --- a/cmd/containerboot/services.go +++ b/cmd/containerboot/services.go @@ -35,6 +35,9 @@ import ( const tailscaleTunInterface = "tailscale0" +// Modified using a build flag to speed up tests. +var testSleepDuration string + // This file contains functionality to run containerboot as a proxy that can // route cluster traffic to one or more tailnet targets, based on portmapping // rules read from a configfile. Currently (9/2024) this is only used for the @@ -149,8 +152,13 @@ func (ep *egressProxy) configure(opts egressProxyRunOpts) { ep.podIPv4 = opts.podIPv4 ep.tailnetAddrs = opts.tailnetAddrs ep.client = &http.Client{} // default HTTP client - ep.shortSleep = time.Second - ep.longSleep = time.Second * 10 + sleepDuration := time.Second + if d, err := time.ParseDuration(testSleepDuration); err == nil && d > 0 { + log.Printf("using test sleep duration %v", d) + sleepDuration = d + } + ep.shortSleep = sleepDuration + ep.longSleep = sleepDuration * 10 } // sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 654b34757..f828c5257 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -38,11 +38,11 @@ func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Pro } log.Printf("Starting tailscaled") if err := cmd.Start(); err != nil { - return nil, nil, fmt.Errorf("starting tailscaled failed: %v", err) + return nil, nil, fmt.Errorf("starting tailscaled failed: %w", err) } // Wait for the socket file to appear, otherwise API ops will racily fail. - log.Printf("Waiting for tailscaled socket") + log.Printf("Waiting for tailscaled socket at %s", cfg.Socket) for { if ctx.Err() != nil { return nil, nil, errors.New("timed out waiting for tailscaled socket") From 463b47a0abc36fb52514a9fefed17c6af387fdb8 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 8 Apr 2025 15:37:00 -0700 Subject: [PATCH 0718/1708] ipn/ipnlocal: include previous cert in new ACME orders (#15595) When we have an old cert that is being rotated, include it in the order. If we're in the ARI-recommended rotation window, LE should exclude us from rate limits. If we're not within that window, the order still succeeds, so there's no risk in including the old cert. Fixes #15542 Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/cert.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 111dc5a2d..86052eb8d 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -484,14 +484,15 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l // In case this method was triggered multiple times in parallel (when // serving incoming requests), check whether one of the other goroutines // already renewed the cert before us. - if p, err := getCertPEMCached(cs, domain, now); err == nil { + previous, err := getCertPEMCached(cs, domain, now) + if err == nil { // shouldStartDomainRenewal caches its result so it's OK to call this // frequently. - shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, p, minValidity) + shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, previous, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) } else if !shouldRenew { - return p, nil + return previous, nil } } else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) { return nil, err @@ -536,7 +537,17 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l return nil, err } - order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}}) + // If we have a previous cert, include it in the order. Assuming we're + // within the ARI renewal window this should exclude us from LE rate + // limits. + var opts []acme.OrderOption + if previous != nil { + prevCrt, err := previous.parseCertificate() + if err == nil { + opts = append(opts, acme.WithOrderReplacesCert(prevCrt)) + } + } + order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}}, opts...) if err != nil { return nil, err } From 7f5932e8f44d5a77f5faf5dc69dada32bcca92ba Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 8 Apr 2025 15:28:16 -0700 Subject: [PATCH 0719/1708] .github: add CONTRIBUTING.md Per suggestion from @sfllaw at https://github.com/tailscale/tailscale/pull/15576#issuecomment-2787386082 Updates #engdocs Change-Id: I67f915db7965ae69dab8925999e7f20208a4269a Signed-off-by: Brad Fitzpatrick --- .github/CONTRIBUTING.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/CONTRIBUTING.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 000000000..4847406f9 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,17 @@ +PRs welcome! But please file bugs first and explain the problem or +motivation. For new or changed functionality, strike up a discussion +and get agreement on the design/solution before spending too much time writing +code. + +Commit messages should [reference +bugs](https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls). + +We require [Developer Certificate of +Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin) (DCO) +`Signed-off-by` lines in commits. (`git commit -s`) + +Please squash your code review edits & force push. Multiple commits in +a PR are fine, but only if they're each logically separate and all tests pass +at each stage. No fixup commits. + +See [commit-messages.md](docs/commit-messages.md) (or skim `git log`) for our commit message style. From 8e1aa86bdbbad983e3e1ed03d40217ba38b25e37 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 7 Apr 2025 15:03:24 -0700 Subject: [PATCH 0720/1708] cmd/natc: attempt to match IP version between upstream and downstream As IPv4 and IPv6 end up with different MSS and different congestion control strategies, proxying between them can really amplify TCP meltdown style conditions in many real world network conditions, such as with higher latency, some loss, etc. Attempt to match up the protocols, otherwise pick a destination address arbitrarily. Also shuffle the target address to spread load across upstream load balancers. Updates #15367 Signed-off-by: James Tucker --- cmd/natc/natc.go | 64 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index a80e4a42a..585a0bb45 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -13,6 +13,7 @@ import ( "flag" "fmt" "log" + "math/rand/v2" "net" "net/http" "net/netip" @@ -438,7 +439,7 @@ func (c *connector) handleTCPFlow(src, dst netip.AddrPort) (handler func(net.Con return nil, false } return func(conn net.Conn) { - proxyTCPConn(conn, domain) + proxyTCPConn(conn, domain, c) }, true } @@ -456,16 +457,34 @@ func (c *connector) ignoreDestination(dstAddrs []netip.Addr) bool { return false } -func proxyTCPConn(c net.Conn, dest string) { +func proxyTCPConn(c net.Conn, dest string, ctor *connector) { if c.RemoteAddr() == nil { log.Printf("proxyTCPConn: nil RemoteAddr") c.Close() return } - addrPortStr := c.LocalAddr().String() - _, port, err := net.SplitHostPort(addrPortStr) + laddr, err := netip.ParseAddrPort(c.LocalAddr().String()) if err != nil { - log.Printf("tcpRoundRobinHandler.Handle: bogus addrPort %q", addrPortStr) + log.Printf("proxyTCPConn: ParseAddrPort failed: %v", err) + c.Close() + return + } + + daddrs, err := ctor.resolver.LookupNetIP(context.TODO(), "ip", dest) + if err != nil { + log.Printf("proxyTCPConn: LookupNetIP failed: %v", err) + c.Close() + return + } + + if len(daddrs) == 0 { + log.Printf("proxyTCPConn: no IP addresses found for %s", dest) + c.Close() + return + } + + if ctor.ignoreDestination(daddrs) { + log.Printf("proxyTCPConn: closing connection to ignored destination %s (%v)", dest, daddrs) c.Close() return } @@ -475,10 +494,37 @@ func proxyTCPConn(c net.Conn, dest string) { return netutil.NewOneConnListener(c, nil), nil }, } - // XXX(raggi): if the connection here resolves to an ignored destination, - // the connection should be closed/failed. - p.AddRoute(addrPortStr, &tcpproxy.DialProxy{ - Addr: fmt.Sprintf("%s:%s", dest, port), + + // TODO(raggi): more code could avoid this shuffle, but avoiding allocations + // for now most of the time daddrs will be short. + rand.Shuffle(len(daddrs), func(i, j int) { + daddrs[i], daddrs[j] = daddrs[j], daddrs[i] }) + daddr := daddrs[0] + + // Try to match the upstream and downstream protocols (v4/v6) + if laddr.Addr().Is6() { + for _, addr := range daddrs { + if addr.Is6() { + daddr = addr + break + } + } + } else { + for _, addr := range daddrs { + if addr.Is4() { + daddr = addr + break + } + } + } + + // TODO(raggi): drop this library, it ends up being allocation and + // indirection heavy and really doesn't help us here. + dsockaddrs := netip.AddrPortFrom(daddr, laddr.Port()).String() + p.AddRoute(dsockaddrs, &tcpproxy.DialProxy{ + Addr: dsockaddrs, + }) + p.Start() } From dd95a83a65cee423fd05615753dd633563f4e540 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Apr 2025 10:11:15 +0100 Subject: [PATCH 0721/1708] cmd/{containerboot,k8s-operator},kube/kubetypes: unadvertise ingress services on shutdown (#15451) Ensure no services are advertised as part of shutting down tailscaled. Prefs are only edited if services are currently advertised, and they're edited we wait for control's ~15s (+ buffer) delay to failover. Note that editing prefs will trigger a synchronous write to the state Secret, so it may fail to persist state if the ProxyGroup is getting scaled down and therefore has its RBAC deleted at the same time, but that failure doesn't stop prefs being updated within the local backend, doesn't affect connectivity to control, and the state Secret is about to get deleted anyway, so the only negative side effect is a harmless error log during shutdown. Control still learns that the node is no longer advertising the service and triggers the failover. Note that the first version of this used a PreStop lifecycle hook, but that only supports GET methods and we need the shutdown to trigger side effects (updating prefs) so it didn't seem appropriate to expose that functionality on a GET endpoint that's accessible on the k8s network. Updates tailscale/corp#24795 Change-Id: I0a9a4fe7a5395ca76135ceead05cbc3ee32b3d3c Signed-off-by: Tom Proctor --- cmd/containerboot/healthz.go | 4 +-- cmd/containerboot/main.go | 30 +++++++++++-------- cmd/containerboot/metrics.go | 4 +-- cmd/containerboot/serve.go | 44 ++++++++++++++++++++++++++++ cmd/k8s-operator/proxygroup_specs.go | 10 +++++++ 5 files changed, 75 insertions(+), 17 deletions(-) diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go index 6d03bd6d3..d6a64a37c 100644 --- a/cmd/containerboot/healthz.go +++ b/cmd/containerboot/healthz.go @@ -47,10 +47,10 @@ func (h *healthz) update(healthy bool) { h.hasAddrs = healthy } -// healthHandlers registers a simple health handler at /healthz. +// registerHealthHandlers registers a simple health handler at /healthz. // A containerized tailscale instance is considered healthy if // it has at least one tailnet IP address. -func healthHandlers(mux *http.ServeMux, podIPv4 string) *healthz { +func registerHealthHandlers(mux *http.ServeMux, podIPv4 string) *healthz { h := &healthz{podIPv4: podIPv4} mux.Handle("GET /healthz", h) return h diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 5f8052bb9..9425571e6 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -195,18 +195,21 @@ func run() error { return fmt.Errorf("failed to bring up tailscale: %w", err) } killTailscaled := func() { + // The default termination grace period for a Pod is 30s. We wait 25s at + // most so that we still reserve some of that budget for tailscaled + // to receive and react to a SIGTERM before the SIGKILL that k8s + // will send at the end of the grace period. + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) + defer cancel() + + if err := ensureServicesNotAdvertised(ctx, client); err != nil { + log.Printf("Error ensuring services are not advertised: %v", err) + } + if hasKubeStateStore(cfg) { // Check we're not shutting tailscaled down while it's still writing // state. If we authenticate and fail to write all the state, we'll // never recover automatically. - // - // The default termination grace period for a Pod is 30s. We wait 25s at - // most so that we still reserve some of that budget for tailscaled - // to receive and react to a SIGTERM before the SIGKILL that k8s - // will send at the end of the grace period. - ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) - defer cancel() - log.Printf("Checking for consistent state") err := kc.waitForConsistentState(ctx) if err != nil { @@ -226,7 +229,7 @@ func run() error { mux := http.NewServeMux() log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort) - healthCheck = healthHandlers(mux, cfg.PodIPv4) + healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) close := runHTTPServer(mux, cfg.HealthCheckAddrPort) defer close() @@ -237,15 +240,16 @@ func run() error { if cfg.localMetricsEnabled() { log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort) - metricsHandlers(mux, client, cfg.DebugAddrPort) + registerMetricsHandlers(mux, client, cfg.DebugAddrPort) } if cfg.localHealthEnabled() { log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort) - healthCheck = healthHandlers(mux, cfg.PodIPv4) + healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) } - if cfg.EgressProxiesCfgPath != "" { - log.Printf("Running preshutdown hook at %s%s", cfg.LocalAddrPort, kubetypes.EgessServicesPreshutdownEP) + + if cfg.egressSvcsTerminateEPEnabled() { + log.Printf("Running egress preshutdown hook at %s%s", cfg.LocalAddrPort, kubetypes.EgessServicesPreshutdownEP) ep.registerHandlers(mux) } diff --git a/cmd/containerboot/metrics.go b/cmd/containerboot/metrics.go index 0bcd231ab..bbd050de6 100644 --- a/cmd/containerboot/metrics.go +++ b/cmd/containerboot/metrics.go @@ -62,13 +62,13 @@ func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { proxy(w, r, debugURL, http.DefaultClient.Do) } -// metricsHandlers registers a simple HTTP metrics handler at /metrics, forwarding +// registerMetricsHandlers registers a simple HTTP metrics handler at /metrics, forwarding // requests to tailscaled's /localapi/v0/usermetrics API. // // In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug // endpoint if configured to ease migration for a breaking change serving user // metrics instead of debug metrics on the "metrics" port. -func metricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { +func registerMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { m := &metrics{ lc: lc, debugEndpoint: debugAddrPort, diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 37fd49777..bdf9432b5 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -9,6 +9,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "log" "os" "path/filepath" @@ -169,3 +170,46 @@ func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { } return &sc, nil } + +func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { + prefs, err := lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) + } + if len(prefs.AdvertiseServices) == 0 { + return nil + } + + log.Printf("serve proxy: unadvertising services: %v", prefs.AdvertiseServices) + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: nil, + }, + }); err != nil { + // EditPrefs only returns an error if it fails _set_ its local prefs. + // If it fails to _persist_ the prefs in state, we don't get an error + // and we continue waiting below, as control will failover as usual. + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) + } + + // Services use the same (failover XOR regional routing) mechanism that + // HA subnet routers use. Unfortunately we don't yet get a reliable signal + // from control that it's responded to our unadvertisement, so the best we + // can do is wait for 20 seconds, where 15s is the approximate maximum time + // it should take for control to choose a new primary, and 5s is for buffer. + // + // Note: There is no guarantee that clients have been _informed_ of the new + // primary no matter how long we wait. We would need a mechanism to await + // netmap updates for peers to know for sure. + // + // See https://tailscale.com/kb/1115/high-availability for more details. + // TODO(tomhjp): Wait for a netmap update instead of sleeping when control + // supports that. + select { + case <-ctx.Done(): + return nil + case <-time.After(20 * time.Second): + return nil + } +} diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 16deea278..0cf88b738 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -197,6 +197,16 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string // This mechanism currently (2025-01-26) rely on the local health check being accessible on the Pod's // IP, so they are not supported for ProxyGroups where users have configured TS_LOCAL_ADDR_PORT to a custom // value. + // + // NB: For _Ingress_ ProxyGroups, we run shutdown logic within containerboot + // in reaction to a SIGTERM signal instead of using a pre-stop hook. This is + // because Ingress pods need to unadvertise services, and it's preferable to + // avoid triggering those side-effects from a GET request that would be + // accessible to the whole cluster network (in the absence of NetworkPolicy + // rules). + // + // TODO(tomhjp): add a readiness probe or gate to Ingress Pods. There is a + // small window where the Pod is marked ready but routing can still fail. if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && !hasLocalAddrPortSet(proxyClass) { c.Lifecycle = &corev1.Lifecycle{ PreStop: &corev1.LifecycleHandler{ From 7e296923ab57736a13602645d6eb3f793d798778 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 7 Apr 2025 23:28:49 -0700 Subject: [PATCH 0722/1708] cmd/tailscale: test for new flags in tailscale up MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `tailscale set` was created to set preferences, which used to be overloaded into `tailscale up`. To move people over to the new command, `up` was supposed to be frozen and no new preference flags would be added. But people forgot, there was no test to warn them, and so new flags were added anyway. TestUpFlagSetIsFrozen complains when new flags are added to `tailscale up`. It doesn’t try all combinations of GOOS, but since the CI builds in every OS, the pull-request tests should cover this. Updates #15460 Signed-off-by: Simon Law --- cmd/tailscale/cli/up_test.go | 55 ++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 cmd/tailscale/cli/up_test.go diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go new file mode 100644 index 000000000..2c80ae94d --- /dev/null +++ b/cmd/tailscale/cli/up_test.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "flag" + "testing" + + "tailscale.com/util/set" +) + +// validUpFlags are the only flags that are valid for tailscale up. The up +// command is frozen: no new preferences can be added. Instead, add them to +// tailscale set. +// See tailscale/tailscale#15460. +var validUpFlags = set.Of( + "accept-dns", + "accept-risk", + "accept-routes", + "advertise-connector", + "advertise-exit-node", + "advertise-routes", + "advertise-tags", + "auth-key", + "exit-node", + "exit-node-allow-lan-access", + "force-reauth", + "host-routes", + "hostname", + "json", + "login-server", + "netfilter-mode", + "nickname", + "operator", + "posture-checking", + "qr", + "reset", + "shields-up", + "snat-subnet-routes", + "ssh", + "stateful-filtering", + "timeout", + "unattended", +) + +// TestUpFlagSetIsFrozen complains when new flags are added to tailscale up. +func TestUpFlagSetIsFrozen(t *testing.T) { + upFlagSet.VisitAll(func(f *flag.Flag) { + name := f.Name + if !validUpFlags.Contains(name) { + t.Errorf("--%s flag added to tailscale up, new prefs go in tailscale set: see tailscale/tailscale#15460", name) + } + }) +} From e17abbf461c0c014da0b38ec4bf44b00f2bc7ee4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Apr 2025 10:25:57 -0700 Subject: [PATCH 0723/1708] cmd/tailscale,ipn: add relay-server-port "tailscale set" flag and Prefs field (#15594) This flag is currently no-op and hidden. The flag does round trip through the related pref. Subsequent commits will tie them to net/udprelay.Server. There is no corresponding "tailscale up" flag, enabling/disabling of the relay server will only be supported via "tailscale set". This is a string flag in order to support disablement via empty string as a port value of 0 means "enable the server and listen on a random unused port". Disablement via empty string also follows existing flag convention, e.g. advertise-routes. Early internal discussions settled on "tailscale set --relay="", but the author felt this was too ambiguous around client vs server, and may cause confusion in the future if we add related flags. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/set.go | 13 +++++++++++++ cmd/tailscale/cli/up.go | 1 + ipn/ipn_clone.go | 4 ++++ ipn/ipn_view.go | 5 +++++ ipn/prefs.go | 25 ++++++++++++++++++++++++- ipn/prefs_test.go | 14 ++++++++++++++ 6 files changed, 61 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 07b3fe9ce..37db252ad 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -11,6 +11,7 @@ import ( "net/netip" "os/exec" "runtime" + "strconv" "strings" "github.com/peterbourgon/ff/v3/ffcli" @@ -22,6 +23,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/safesocket" "tailscale.com/types/opt" + "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/version" ) @@ -62,6 +64,7 @@ type setArgsT struct { snat bool statefulFiltering bool netfilterMode string + relayServerPort string } func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { @@ -82,6 +85,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.postureChecking, "posture-checking", false, hidden+"allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") + setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { st, err := localClient.Status(context.Background()) @@ -233,6 +237,15 @@ func runSet(ctx context.Context, args []string) (retErr error) { } } } + + if setArgs.relayServerPort != "" { + uport, err := strconv.ParseUint(setArgs.relayServerPort, 10, 16) + if err != nil { + return fmt.Errorf("failed to set relay server port: %v", err) + } + maskedPrefs.Prefs.RelayServerPort = ptr.To(int(uport)) + } + checkPrefs := curPrefs.Clone() checkPrefs.ApplyEdits(maskedPrefs) if err := localClient.CheckPrefs(ctx, checkPrefs); err != nil { diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 26db85f13..d1e813b95 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -773,6 +773,7 @@ func init() { addPrefFlagMapping("auto-update", "AutoUpdate.Apply") addPrefFlagMapping("advertise-connector", "AppConnector") addPrefFlagMapping("posture-checking", "PostureChecking") + addPrefFlagMapping("relay-server-port", "RelayServerPort") } func addPrefFlagMapping(flagName string, prefNames ...string) { diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 4050fec46..65438444e 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -61,6 +61,9 @@ func (src *Prefs) Clone() *Prefs { } } } + if dst.RelayServerPort != nil { + dst.RelayServerPort = ptr.To(*src.RelayServerPort) + } dst.Persist = src.Persist.Clone() return dst } @@ -96,6 +99,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { PostureChecking bool NetfilterKind string DriveShares []*drive.Share + RelayServerPort *int AllowSingleHosts marshalAsTrueInJSON Persist *persist.Persist }{}) diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index e633a2633..871270b85 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -166,6 +166,10 @@ func (v PrefsView) NetfilterKind() string { return v.ж.Netfilte func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] { return views.SliceOfViews[*drive.Share, drive.ShareView](v.ж.DriveShares) } +func (v PrefsView) RelayServerPort() views.ValuePointer[int] { + return views.ValuePointerOf(v.ж.RelayServerPort) +} + func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.ж.AllowSingleHosts } func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } @@ -200,6 +204,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { PostureChecking bool NetfilterKind string DriveShares []*drive.Share + RelayServerPort *int AllowSingleHosts marshalAsTrueInJSON Persist *persist.Persist }{}) diff --git a/ipn/prefs.go b/ipn/prefs.go index 5b3e95b33..9d6008de1 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -246,6 +246,14 @@ type Prefs struct { // by name. DriveShares []*drive.Share + // RelayServerPort is the UDP port number for the relay server to bind to, + // on all interfaces. A non-nil zero value signifies a random unused port + // should be used. A nil value signifies relay server functionality + // should be disabled. This field is currently experimental, and therefore + // no guarantees are made about its current naming and functionality when + // non-nil/enabled. + RelayServerPort *int `json:",omitempty"` + // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale // peers got /32 or /127 routes for each other. @@ -337,6 +345,7 @@ type MaskedPrefs struct { PostureCheckingSet bool `json:",omitempty"` NetfilterKindSet bool `json:",omitempty"` DriveSharesSet bool `json:",omitempty"` + RelayServerPortSet bool `json:",omitempty"` } // SetsInternal reports whether mp has any of the Internal*Set field bools set @@ -555,6 +564,9 @@ func (p *Prefs) pretty(goos string) string { } sb.WriteString(p.AutoUpdate.Pretty()) sb.WriteString(p.AppConnector.Pretty()) + if p.RelayServerPort != nil { + fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort) + } if p.Persist != nil { sb.WriteString(p.Persist.Pretty()) } else { @@ -616,7 +628,8 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.AppConnector == p2.AppConnector && p.PostureChecking == p2.PostureChecking && slices.EqualFunc(p.DriveShares, p2.DriveShares, drive.SharesEqual) && - p.NetfilterKind == p2.NetfilterKind + p.NetfilterKind == p2.NetfilterKind && + compareIntPtrs(p.RelayServerPort, p2.RelayServerPort) } func (au AutoUpdatePrefs) Pretty() string { @@ -636,6 +649,16 @@ func (ap AppConnectorPrefs) Pretty() string { return "" } +func compareIntPtrs(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + // NewPrefs returns the default preferences to use. func NewPrefs() *Prefs { // Provide default values for options which might be missing diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 91b835e3e..d28d161db 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -65,6 +65,7 @@ func TestPrefsEqual(t *testing.T) { "PostureChecking", "NetfilterKind", "DriveShares", + "RelayServerPort", "AllowSingleHosts", "Persist", } @@ -73,6 +74,9 @@ func TestPrefsEqual(t *testing.T) { have, prefsHandles) } + relayServerPort := func(port int) *int { + return &port + } nets := func(strs ...string) (ns []netip.Prefix) { for _, s := range strs { n, err := netip.ParsePrefix(s) @@ -341,6 +345,16 @@ func TestPrefsEqual(t *testing.T) { &Prefs{AdvertiseServices: []string{"svc:tux", "svc:amelie"}}, false, }, + { + &Prefs{RelayServerPort: relayServerPort(0)}, + &Prefs{RelayServerPort: nil}, + false, + }, + { + &Prefs{RelayServerPort: relayServerPort(0)}, + &Prefs{RelayServerPort: relayServerPort(1)}, + false, + }, } for i, tt := range tests { got := tt.a.Equals(tt.b) From 9ff9c5af048e4053b9f678bd6e3e335d8df8a14e Mon Sep 17 00:00:00 2001 From: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Date: Wed, 9 Apr 2025 14:35:32 -0400 Subject: [PATCH 0724/1708] .github: add cron schedule to installer tests (#15603) Installer tests only run when changes are made to pkgserve. This PR schedules these tests to be run daily and report any failures to Slack. Fixes tailscale/corp#19103 Signed-off-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> --- .github/workflows/installer.yml | 47 ++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index adc4a0a60..7888d9ba5 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -1,6 +1,8 @@ name: test installer.sh on: + schedule: + - cron: '0 15 * * *' # 10am EST (UTC-4/5) push: branches: - "main" @@ -72,10 +74,10 @@ jobs: # tar and gzip are needed by the actions/checkout below. run: yum install -y --allowerasing tar gzip ${{ matrix.deps }} if: | - contains(matrix.image, 'centos') - || contains(matrix.image, 'oraclelinux') - || contains(matrix.image, 'fedora') - || contains(matrix.image, 'amazonlinux') + contains(matrix.image, 'centos') || + contains(matrix.image, 'oraclelinux') || + contains(matrix.image, 'fedora') || + contains(matrix.image, 'amazonlinux') - name: install dependencies (zypper) # tar and gzip are needed by the actions/checkout below. run: zypper --non-interactive install tar gzip ${{ matrix.deps }} @@ -85,11 +87,11 @@ jobs: apt-get update apt-get install -y ${{ matrix.deps }} if: | - contains(matrix.image, 'debian') - || contains(matrix.image, 'ubuntu') - || contains(matrix.image, 'elementary') - || contains(matrix.image, 'parrotsec') - || contains(matrix.image, 'kalilinux') + contains(matrix.image, 'debian') || + contains(matrix.image, 'ubuntu') || + contains(matrix.image, 'elementary') || + contains(matrix.image, 'parrotsec') || + contains(matrix.image, 'kalilinux') - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: run installer @@ -100,3 +102,30 @@ jobs: continue-on-error: true - name: check tailscale version run: tailscale --version + notify-slack: + needs: test + runs-on: ubuntu-latest + steps: + - name: Notify Slack of failure on scheduled runs + if: failure() && github.event_name == 'schedule' + uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + { + "attachments": [{ + "title": "Tailscale installer test failed", + "title_link": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}", + "text": "One or more OSes in the test matrix failed. See the run for details.", + "fields": [ + { + "title": "Ref", + "value": "${{ github.ref_name }}", + "short": true + } + ], + "footer": "${{ github.workflow }} on schedule", + "color": "danger" + }] + } From e7325213a7aba40b4ace3a97aa8b46f9c29b287a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Apr 2025 13:33:41 -0500 Subject: [PATCH 0725/1708] clientupdate: fix MSI exit code handling, preserve MSI and updater logs on Windows In this PR, we update the Windows client updater to: - Run msiexec with logging enabled and preserve the log file in %ProgramData%\Tailscale\Logs; - Preserve the updater's own log file in the same location; - Properly handle ERROR_SUCCESS_REBOOT_REQUIRED, ERROR_SUCCESS_REBOOT_INITIATED, and ERROR_INSTALL_ALREADY_RUNNING exit codes. The first two values indicate that installation completed successfully and no further retries are needed. The last one means the Windows Installer service is busy. Retrying immediately is likely to fail and may be risky; it could uninstall the current version without successfully installing the new one, potentially leaving the user without Tailscale. Updates tailscale/corp#27496 Updates tailscale#15554 Signed-off-by: Nick Khyl --- clientupdate/clientupdate_windows.go | 85 ++++++++++++++++++++++++---- 1 file changed, 75 insertions(+), 10 deletions(-) diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index 973722974..b79d447ad 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -16,6 +16,7 @@ import ( "path/filepath" "runtime" "strings" + "time" "github.com/google/uuid" "golang.org/x/sys/windows" @@ -34,6 +35,12 @@ const ( // It is used to re-launch the GUI process (tailscale-ipn.exe) after // install is complete. winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" + // winVersionEnv is the environment variable that is set along with + // winMSIEnv and carries the version of tailscale that is being installed. + // It is used for logging purposes. + winVersionEnv = "TS_UPDATE_WIN_VERSION" + // updaterPrefix is the prefix for the temporary executable created by [makeSelfCopy]. + updaterPrefix = "tailscale-updater" ) func makeSelfCopy() (origPathExe, tmpPathExe string, err error) { @@ -46,7 +53,7 @@ func makeSelfCopy() (origPathExe, tmpPathExe string, err error) { return "", "", err } defer f.Close() - f2, err := os.CreateTemp("", "tailscale-updater-*.exe") + f2, err := os.CreateTemp("", updaterPrefix+"-*.exe") if err != nil { return "", "", err } @@ -137,7 +144,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("authenticode verification succeeded") up.Logf("making tailscale.exe copy to switch to...") - up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe")) + up.cleanupOldDownloads(filepath.Join(os.TempDir(), updaterPrefix+"-*.exe")) selfOrig, selfCopy, err := makeSelfCopy() if err != nil { return err @@ -146,7 +153,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("running tailscale.exe copy for final install...") cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig) + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig, winVersionEnv+"="+ver) cmd.Stdout = up.Stderr cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin @@ -162,23 +169,62 @@ you can run the command prompt as Administrator one of these ways: func (up *Updater) installMSI(msi string) error { var err error for tries := 0; tries < 2; tries++ { - cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn") + // msiexec.exe requires exclusive access to the log file, so create a dedicated one for each run. + installLogPath := up.startNewLogFile("tailscale-installer", os.Getenv(winVersionEnv)) + up.Logf("Install log: %s", installLogPath) + cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn", "/L*v", installLogPath) cmd.Dir = filepath.Dir(msi) cmd.Stdout = up.Stdout cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin err = cmd.Run() - if err == nil { - break + switch err := err.(type) { + case nil: + // Success. + return nil + case *exec.ExitError: + // For possible error codes returned by Windows Installer, see + // https://web.archive.org/web/20250409144914/https://learn.microsoft.com/en-us/windows/win32/msi/error-codes + switch windows.Errno(err.ExitCode()) { + case windows.ERROR_SUCCESS_REBOOT_REQUIRED: + // In most cases, updating Tailscale should not require a reboot. + // If it does, it might be because we failed to close the GUI + // and the installer couldn't replace tailscale-ipn.exe. + // The old GUI will continue to run until the next reboot. + // Not ideal, but also not a retryable error. + up.Logf("[unexpected] reboot required") + return nil + case windows.ERROR_SUCCESS_REBOOT_INITIATED: + // Same as above, but perhaps the device is configured to prompt + // the user to reboot and the user has chosen to reboot now. + up.Logf("[unexpected] reboot initiated") + return nil + case windows.ERROR_INSTALL_ALREADY_RUNNING: + // The Windows Installer service is currently busy. + // It could be our own install initiated by user/MDM/GP, another MSI install or perhaps a Windows Update install. + // Anyway, we can't do anything about it right now. The user (or tailscaled) can retry later. + // Retrying now will likely fail, and is risky since we might uninstall the current version + // and then fail to install the new one, leaving the user with no Tailscale at all. + // + // TODO(nickkhyl,awly): should we check if this is actually a downgrade before uninstalling the current version? + // Also, maybe keep retrying the install longer if we uninstalled the current version due to a failed install attempt? + up.Logf("another installation is already in progress") + return err + } + default: + // Everything else is a retryable error. } + up.Logf("Install attempt failed: %v", err) uninstallVersion := up.currentVersion if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" { uninstallVersion = v } + uninstallLogPath := up.startNewLogFile("tailscale-uninstaller", uninstallVersion) // Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first. up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion) - cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn") + up.Logf("Uninstall log: %s", uninstallLogPath) + cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn", "/L*v", uninstallLogPath) cmd.Stdout = up.Stdout cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin @@ -205,12 +251,14 @@ func (up *Updater) switchOutputToFile() (io.Closer, error) { var logFilePath string exePath, err := os.Executable() if err != nil { - logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log") + logFilePath = up.startNewLogFile(updaterPrefix, os.Getenv(winVersionEnv)) } else { - logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log" + // Use the same suffix as the self-copy executable. + suffix := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(exePath), updaterPrefix), ".exe") + logFilePath = up.startNewLogFile(updaterPrefix, os.Getenv(winVersionEnv)+suffix) } - up.Logf("writing update output to %q", logFilePath) + up.Logf("writing update output to: %s", logFilePath) logFile, err := os.Create(logFilePath) if err != nil { return nil, err @@ -223,3 +271,20 @@ func (up *Updater) switchOutputToFile() (io.Closer, error) { up.Stderr = logFile return logFile, nil } + +// startNewLogFile returns a name for a new log file. +// It cleans up any old log files with the same baseNamePrefix. +func (up *Updater) startNewLogFile(baseNamePrefix, baseNameSuffix string) string { + baseName := fmt.Sprintf("%s-%s-%s.log", baseNamePrefix, + time.Now().Format("20060102T150405"), baseNameSuffix) + + dir := filepath.Join(os.Getenv("ProgramData"), "Tailscale", "Logs") + if err := os.MkdirAll(dir, 0700); err != nil { + up.Logf("failed to create log directory: %v", err) + return filepath.Join(os.TempDir(), baseName) + } + + // TODO(nickkhyl): preserve up to N old log files? + up.cleanupOldDownloads(filepath.Join(dir, baseNamePrefix+"-*.log")) + return filepath.Join(dir, baseName) +} From 6133f44344fa244bf88c6d76c961e53a0db9aa2f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Apr 2025 13:43:29 -0700 Subject: [PATCH 0726/1708] ipn/ipnlocal: fix peerapi ingress endpoint (#15611) The http.StatusMethodNotAllowed status code was being erroneously set instead of http.StatusBadRequest in multiple places. Updates #cleanup Signed-off-by: Jordan Whited --- ipn/ipnlocal/peerapi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 21b808fd5..888b876d6 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -417,7 +417,7 @@ func (h *peerAPIHandler) handleServeIngress(w http.ResponseWriter, r *http.Reque } logAndError := func(code int, publicMsg string) { h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) - http.Error(w, publicMsg, http.StatusMethodNotAllowed) + http.Error(w, publicMsg, code) } bad := func(publicMsg string) { logAndError(http.StatusBadRequest, publicMsg) From d486ea388d9d572d02ed6ac03a9207fa10b7f7d2 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Wed, 9 Apr 2025 15:36:40 +0000 Subject: [PATCH 0727/1708] logpolicy: fix log target override with a custom HTTP client This makes sure that the log target override is respected even if a custom HTTP client is passed to logpolicy. Updates tailscale/maple#29 Signed-off-by: Anton Tolchanov --- logpolicy/logpolicy.go | 31 ++++++++++++------- logpolicy/logpolicy_test.go | 60 +++++++++++++++++++++++++++++++++---- 2 files changed, 74 insertions(+), 17 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index b005cfff6..fc259a417 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -518,8 +518,9 @@ type Options struct { MaxUploadSize int } -// New returns a new log policy (a logger and its instance ID). -func (opts Options) New() *Policy { +// init initializes the log policy and returns a logtail.Config and the +// Policy. +func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { if hostinfo.IsNATLabGuestVM() { // In NATLab Gokrazy instances, tailscaled comes up concurently with // DHCP and the doesn't have DNS for a while. Wait for DHCP first. @@ -628,7 +629,7 @@ func (opts Options) New() *Policy { conf.IncludeProcSequence = true } - if envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" { + if disableLogging { opts.Logf("You have disabled logging. Tailscale will not be able to provide support.") conf.HTTPC = &http.Client{Transport: noopPretendSuccessTransport{}} } else { @@ -637,14 +638,15 @@ func (opts Options) New() *Policy { attachFilchBuffer(&conf, opts.Dir, opts.CmdName, opts.MaxBufferSize, opts.Logf) conf.HTTPC = opts.HTTPC + logHost := logtail.DefaultHost + if val := getLogTarget(); val != "" { + opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") + conf.BaseURL = val + u, _ := url.Parse(val) + logHost = u.Host + } + if conf.HTTPC == nil { - logHost := logtail.DefaultHost - if val := getLogTarget(); val != "" { - opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") - conf.BaseURL = val - u, _ := url.Parse(val) - logHost = u.Host - } conf.HTTPC = &http.Client{Transport: TransportOptions{ Host: logHost, NetMon: opts.NetMon, @@ -680,13 +682,20 @@ func (opts Options) New() *Policy { opts.Logf("%s", earlyErrBuf.Bytes()) } - return &Policy{ + return &conf, &Policy{ Logtail: lw, PublicID: newc.PublicID, Logf: opts.Logf, } } +// New returns a new log policy (a logger and its instance ID). +func (opts Options) New() *Policy { + disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" + _, policy := opts.init(disableLogging) + return policy +} + // attachFilchBuffer creates an on-disk ring buffer using filch and attaches // it to the logtail config. Note that this is optional; if no buffer is set, // logtail will use an in-memory buffer. diff --git a/logpolicy/logpolicy_test.go b/logpolicy/logpolicy_test.go index fb5666f86..28f03448a 100644 --- a/logpolicy/logpolicy_test.go +++ b/logpolicy/logpolicy_test.go @@ -4,6 +4,7 @@ package logpolicy import ( + "net/http" "os" "reflect" "testing" @@ -11,12 +12,14 @@ import ( "tailscale.com/logtail" ) -func TestLogHost(t *testing.T) { +func resetLogTarget() { + os.Unsetenv("TS_LOG_TARGET") v := reflect.ValueOf(&getLogTargetOnce).Elem() - reset := func() { - v.Set(reflect.Zero(v.Type())) - } - defer reset() + v.Set(reflect.Zero(v.Type())) +} + +func TestLogHost(t *testing.T) { + defer resetLogTarget() tests := []struct { env string @@ -29,10 +32,55 @@ func TestLogHost(t *testing.T) { {"https://foo.com:123/", "foo.com"}, } for _, tt := range tests { - reset() + resetLogTarget() os.Setenv("TS_LOG_TARGET", tt.env) if got := LogHost(); got != tt.want { t.Errorf("for env %q, got %q, want %q", tt.env, got, tt.want) } } } +func TestOptions(t *testing.T) { + defer resetLogTarget() + + tests := []struct { + name string + opts func() Options + wantBaseURL string + }{ + { + name: "default", + opts: func() Options { return Options{} }, + wantBaseURL: "", + }, + { + name: "custom_baseurl", + opts: func() Options { + os.Setenv("TS_LOG_TARGET", "http://localhost:1234") + return Options{} + }, + wantBaseURL: "http://localhost:1234", + }, + { + name: "custom_httpc_and_baseurl", + opts: func() Options { + os.Setenv("TS_LOG_TARGET", "http://localhost:12345") + return Options{HTTPC: &http.Client{Transport: noopPretendSuccessTransport{}}} + }, + wantBaseURL: "http://localhost:12345", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetLogTarget() + config, policy := tt.opts().init(false) + if policy == nil { + t.Fatal("unexpected nil policy") + } + if config.BaseURL != tt.wantBaseURL { + t.Errorf("got %q, want %q", config.BaseURL, tt.wantBaseURL) + } + policy.Close() + }) + } +} From b9277ade1fc5a78e325abc7b15e32f8a9f41ebcd Mon Sep 17 00:00:00 2001 From: Craig Hesling Date: Wed, 9 Apr 2025 14:12:23 -0700 Subject: [PATCH 0728/1708] drive: fix index out of bounds when parsing request local paths (#15517) Fix the index out of bound panic when a request is made to the local fileserver mux with a valid secret-token, but missing share name. Example error: http: panic serving 127.0.0.1:40974: runtime error: slice bounds out of range [2:1] Additionally, we document the edge case behavior of utilities that this fileserver mux depends on. Signed-off-by: Craig Hesling --- drive/driveimpl/drive_test.go | 73 +++++++++++++++++++++++-- drive/driveimpl/fileserver.go | 4 ++ drive/driveimpl/shared/pathutil.go | 5 ++ drive/driveimpl/shared/pathutil_test.go | 1 + 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index 20b179511..e7dd83291 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -133,6 +133,71 @@ func TestPermissions(t *testing.T) { } } +// TestMissingPaths verifies that the fileserver running at localhost +// correctly handles paths with missing required components. +// +// Expected path format: +// http://localhost:[PORT]//[/] +func TestMissingPaths(t *testing.T) { + s := newSystem(t) + + fileserverAddr := s.addRemote(remote1) + s.addShare(remote1, share11, drive.PermissionReadWrite) + + client := &http.Client{ + Transport: &http.Transport{DisableKeepAlives: true}, + } + addr := strings.Split(fileserverAddr, "|")[1] + secretToken := strings.Split(fileserverAddr, "|")[0] + + testCases := []struct { + name string + path string + wantStatus int + }{ + { + name: "empty path", + path: "", + wantStatus: http.StatusForbidden, + }, + { + name: "single slash", + path: "/", + wantStatus: http.StatusForbidden, + }, + { + name: "only token", + path: "/" + secretToken, + wantStatus: http.StatusBadRequest, + }, + { + name: "token with trailing slash", + path: "/" + secretToken + "/", + wantStatus: http.StatusBadRequest, + }, + { + name: "token and invalid share", + path: "/" + secretToken + "/nonexistentshare", + wantStatus: http.StatusNotFound, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + u := fmt.Sprintf("http://%s%s", addr, tc.path) + resp, err := client.Get(u) + if err != nil { + t.Fatalf("unexpected error making request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != tc.wantStatus { + t.Errorf("got status code %d, want %d", resp.StatusCode, tc.wantStatus) + } + }) + } +} + // TestSecretTokenAuth verifies that the fileserver running at localhost cannot // be accessed directly without the correct secret token. This matters because // if a victim can be induced to visit the localhost URL and access a malicious @@ -704,8 +769,8 @@ func (a *noopAuthenticator) Close() error { return nil } -const lockBody = ` - - - +const lockBody = ` + + + ` diff --git a/drive/driveimpl/fileserver.go b/drive/driveimpl/fileserver.go index ef94b0643..113cb3b44 100644 --- a/drive/driveimpl/fileserver.go +++ b/drive/driveimpl/fileserver.go @@ -142,6 +142,10 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + if len(parts) < 2 { + w.WriteHeader(http.StatusBadRequest) + return + } r.URL.Path = shared.Join(parts[2:]...) share := parts[1] s.sharesMu.RLock() diff --git a/drive/driveimpl/shared/pathutil.go b/drive/driveimpl/shared/pathutil.go index efa9f5f32..fcadcdd5a 100644 --- a/drive/driveimpl/shared/pathutil.go +++ b/drive/driveimpl/shared/pathutil.go @@ -22,6 +22,9 @@ const ( // CleanAndSplit cleans the provided path p and splits it into its constituent // parts. This is different from path.Split which just splits a path into prefix // and suffix. +// +// If p is empty or contains only path separators, CleanAndSplit returns a slice +// of length 1 whose only element is "". func CleanAndSplit(p string) []string { return strings.Split(strings.Trim(path.Clean(p), sepStringAndDot), sepString) } @@ -38,6 +41,8 @@ func Parent(p string) string { } // Join behaves like path.Join() but also includes a leading slash. +// +// When parts are missing, the result is "/". func Join(parts ...string) string { fullParts := make([]string, 0, len(parts)) fullParts = append(fullParts, sepString) diff --git a/drive/driveimpl/shared/pathutil_test.go b/drive/driveimpl/shared/pathutil_test.go index 662adbd8b..daee69563 100644 --- a/drive/driveimpl/shared/pathutil_test.go +++ b/drive/driveimpl/shared/pathutil_test.go @@ -40,6 +40,7 @@ func TestJoin(t *testing.T) { parts []string want string }{ + {[]string{}, "/"}, {[]string{""}, "/"}, {[]string{"a"}, "/a"}, {[]string{"/a"}, "/a"}, From 1da78d871816387d21003f621b81ef507ae0c673 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 9 Apr 2025 14:33:52 -0700 Subject: [PATCH 0729/1708] build_dist.sh: allow settings custom build tags (#15589) Default tags to `$TAGS` if set, so that people can choose arbitrary subsets of features. Updates #12614 Signed-off-by: Andrew Lytvynov --- build_dist.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/build_dist.sh b/build_dist.sh index ccd4ac8b1..c01670398 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -28,18 +28,26 @@ EOF exit 0 fi -tags="" +tags="${TAGS:-}" ldflags="-X tailscale.com/version.longStamp=${VERSION_LONG} -X tailscale.com/version.shortStamp=${VERSION_SHORT}" # build_dist.sh arguments must precede go build arguments. while [ "$#" -gt 1 ]; do case "$1" in --extra-small) + if [ ! -z "${TAGS:-}" ]; then + echo "set either --extra-small or \$TAGS, but not both" + exit 1 + fi shift ldflags="$ldflags -w -s" tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture" ;; --box) + if [ ! -z "${TAGS:-}" ]; then + echo "set either --box or \$TAGS, but not both" + exit 1 + fi shift tags="${tags:+$tags,}ts_include_cli" ;; From 5c562116fc49ae8d88268a5a0e6b297cd855b12b Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Wed, 9 Apr 2025 16:49:33 -0700 Subject: [PATCH 0730/1708] ipnlocal: log when client reports new peerAPI ports (#15463) Updates tailscale/tailscale#14393 Signed-off-by: kari-ts --- ipn/ipnlocal/local.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7d69b884d..cf71b80fa 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4844,9 +4844,34 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { c := len(hi.Services) hi.Services = append(hi.Services[:c:c], peerAPIServices...) hi.PushDeviceToken = b.pushDeviceToken.Load() + + // Compare the expected ports from peerAPIServices to the actual ports in hi.Services. + expectedPorts := extractPeerAPIPorts(peerAPIServices) + actualPorts := extractPeerAPIPorts(hi.Services) + if expectedPorts != actualPorts { + b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) + } + cc.SetHostinfo(&hi) } +type portPair struct { + v4, v6 uint16 +} + +func extractPeerAPIPorts(services []tailcfg.Service) portPair { + var p portPair + for _, s := range services { + switch s.Proto { + case "peerapi4": + p.v4 = s.Port + case "peerapi6": + p.v6 = s.Port + } + } + return p +} + // NetMap returns the latest cached network map received from // controlclient, or nil if no network map was received yet. func (b *LocalBackend) NetMap() *netmap.NetworkMap { From ed052eac62bbe0853f7251216dcba28d77ed2519 Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Thu, 10 Apr 2025 16:01:39 +0100 Subject: [PATCH 0731/1708] tstest: parse goroutines for diff in ResourceCheck (#15619) ResourceCheck was previously using cmp.Diff on multiline goroutine stacks The produced output was difficult to read for a number of reasons: - the goroutines were sorted by count, and a changing count caused them to jump around - diffs would be in the middle of stacks Instead, we now parse the pprof/goroutines?debug=1 format goroutines and only diff whole stacks. Updates #1253 Signed-off-by: Paul Scott --- tstest/resource.go | 224 ++++++++++++++++++++++++++++++++++- tstest/resource_test.go | 256 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 477 insertions(+), 3 deletions(-) create mode 100644 tstest/resource_test.go diff --git a/tstest/resource.go b/tstest/resource.go index b094c7911..f50bb3330 100644 --- a/tstest/resource.go +++ b/tstest/resource.go @@ -7,10 +7,10 @@ import ( "bytes" "runtime" "runtime/pprof" + "slices" + "strings" "testing" "time" - - "github.com/google/go-cmp/cmp" ) // ResourceCheck takes a snapshot of the current goroutines and registers a @@ -44,7 +44,20 @@ func ResourceCheck(tb testing.TB) { if endN <= startN { return } - tb.Logf("goroutine diff:\n%v\n", cmp.Diff(startStacks, endStacks)) + + // Parse and print goroutines. + start := parseGoroutines(startStacks) + end := parseGoroutines(endStacks) + if testing.Verbose() { + tb.Logf("goroutines start:\n%s", printGoroutines(start)) + tb.Logf("goroutines end:\n%s", printGoroutines(end)) + } + + // Print goroutine diff, omitting tstest.ResourceCheck goroutines. + self := func(g goroutine) bool { return bytes.Contains(g.stack, []byte("\ttailscale.com/tstest.goroutines+")) } + start.goroutines = slices.DeleteFunc(start.goroutines, self) + end.goroutines = slices.DeleteFunc(end.goroutines, self) + tb.Logf("goroutine diff (-start +end):\n%s", diffGoroutines(start, end)) // tb.Failed() above won't report on panics, so we shouldn't call Fatal // here or we risk suppressing reporting of the panic. @@ -58,3 +71,208 @@ func goroutines() (int, []byte) { p.WriteTo(b, 1) return p.Count(), b.Bytes() } + +// parseGoroutines takes pprof/goroutines?debug=1 -formatted output sorted by +// count, and splits it into a separate list of goroutines with count and stack +// separated. +// +// Example input: +// +// goroutine profile: total 408 +// 48 @ 0x47bc0e 0x136c6b9 0x136c69e 0x136c7ab 0x1379809 0x13797fa 0x483da1 +// # 0x136c6b8 gvisor.dev/gvisor/pkg/sync.Gopark+0x78 gvisor.dev/gvisor@v0.0.0-20250205023644-9414b50a5633/pkg/sync/runtime_unsafe.go:33 +// # 0x136c69d gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker+0x5d gvisor.dev/gvisor@v0.0.0-20250205023644-9414b50a5633/pkg/sleep/sleep_unsafe.go:210 +// # 0x136c7aa gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch+0x2a gvisor.dev/gvisor@v0.0.0-20250205023644-9414b50a5633/pkg/sleep/sleep_unsafe.go:257 +// # 0x1379808 gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch+0xa8 gvisor.dev/gvisor@v0.0.0-20250205023644-9414b50a5633/pkg/sleep/sleep_unsafe.go:280 +// # 0x13797f9 gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start+0x99 gvisor.dev/gvisor@v0.0.0-20250205023644-9414b50a5633/pkg/tcpip/transport/tcp/dispatcher.go:291 +// +// 48 @ 0x47bc0e 0x413705 0x4132b2 0x10fc905 0x483da1 +// # 0x10fc904 github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption+0x184 github.com/tailscale/wireguard-go@v0.0.0-20250107165329-0b8b35511f19/device/receive.go:245 +// +// 48 @ 0x47bc0e 0x413705 0x4132b2 0x10fcd2a 0x483da1 +// # 0x10fcd29 github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake+0x169 github.com/tailscale/wireguard-go@v0.0.0-20250107165329-0b8b35511f19/device/receive.go:279 +// +// 48 @ 0x47bc0e 0x413705 0x4132b2 0x1100ba7 0x483da1 +// # 0x1100ba6 github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption+0x186 github.com/tailscale/wireguard-go@v0.0.0-20250107165329-0b8b35511f19/device/send.go:451 +// +// 26 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +// # 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +// +// 13 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +// # 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +// +// 7 @ 0x47bc0e 0x413705 0x4132b2 0x10fda4d 0x483da1 +// # 0x10fda4c github.com/tailscale/wireguard-go/device.(*Peer).RoutineSequentialReceiver+0x16c github.com/tailscale/wireguard-go@v0.0.0-20250107165329-0b8b35511f19/device/receive.go:443 +func parseGoroutines(g []byte) goroutineDump { + head, tail, ok := bytes.Cut(g, []byte("\n")) + if !ok { + return goroutineDump{head: head} + } + + raw := bytes.Split(tail, []byte("\n\n")) + parsed := make([]goroutine, 0, len(raw)) + for _, s := range raw { + count, rem, ok := bytes.Cut(s, []byte(" @ ")) + if !ok { + continue + } + header, stack, _ := bytes.Cut(rem, []byte("\n")) + sort := slices.Clone(header) + reverseWords(sort) + parsed = append(parsed, goroutine{count, header, stack, sort}) + } + + return goroutineDump{head, parsed} +} + +type goroutineDump struct { + head []byte + goroutines []goroutine +} + +// goroutine is a parsed stack trace in pprof goroutine output, e.g. +// "10 @ 0x100 0x001\n# 0x100 test() test.go\n# 0x001 main() test.go". +type goroutine struct { + count []byte // e.g. "10" + header []byte // e.g. "0x100 0x001" + stack []byte // e.g. "# 0x100 test() test.go\n# 0x001 main() test.go" + + // sort is the same pointers as in header, but in reverse order so that we + // can place related goroutines near each other by sorting on this field. + // E.g. "0x001 0x100". + sort []byte +} + +func (g goroutine) Compare(h goroutine) int { + return bytes.Compare(g.sort, h.sort) +} + +// reverseWords repositions the words in b such that they are reversed. +// Words are separated by spaces. New lines are not considered. +// https://sketch.dev/sk/a4ef +func reverseWords(b []byte) { + if len(b) == 0 { + return + } + + // First, reverse the entire slice. + reverse(b) + + // Then reverse each word individually. + start := 0 + for i := 0; i <= len(b); i++ { + if i == len(b) || b[i] == ' ' { + reverse(b[start:i]) + start = i + 1 + } + } +} + +// reverse reverses bytes in place +func reverse(b []byte) { + for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { + b[i], b[j] = b[j], b[i] + } +} + +// printGoroutines returns a text representation of h, gs equivalent to the +// pprof ?debug=1 input parsed by parseGoroutines, except the goroutines are +// sorted in an order easier for diffing. +func printGoroutines(g goroutineDump) []byte { + var b bytes.Buffer + b.Write(g.head) + + slices.SortFunc(g.goroutines, goroutine.Compare) + for _, g := range g.goroutines { + b.WriteString("\n\n") + b.Write(g.count) + b.WriteString(" @ ") + b.Write(g.header) + b.WriteString("\n") + if len(g.stack) > 0 { + b.Write(g.stack) + } + } + + return b.Bytes() +} + +// diffGoroutines returns a diff between goroutines of gx and gy. +// Goroutines present in gx and absent from gy are prefixed with "-". +// Goroutines absent from gx and present in gy are prefixed with "+". +// Goroutines present in both but with different counts only show a prefix on the count line. +func diffGoroutines(x, y goroutineDump) string { + hx, hy := x.head, y.head + gx, gy := x.goroutines, y.goroutines + var b strings.Builder + if !bytes.Equal(hx, hy) { + b.WriteString("- ") + b.Write(hx) + b.WriteString("\n+ ") + b.Write(hy) + b.WriteString("\n") + } + + slices.SortFunc(gx, goroutine.Compare) + slices.SortFunc(gy, goroutine.Compare) + + writeHeader := func(prefix string, g goroutine) { + b.WriteString(prefix) + b.Write(g.count) + b.WriteString(" @ ") + b.Write(g.header) + b.WriteString("\n") + } + writeStack := func(prefix string, g goroutine) { + s := g.stack + for { + var h []byte + h, s, _ = bytes.Cut(s, []byte("\n")) + if len(h) == 0 && len(s) == 0 { + break + } + b.WriteString(prefix) + b.Write(h) + b.WriteString("\n") + } + } + + i, j := 0, 0 + for { + var d int + switch { + case i < len(gx) && j < len(gy): + d = gx[i].Compare(gy[j]) + case i < len(gx): + d = -1 + case j < len(gy): + d = 1 + default: + return b.String() + } + + switch d { + case -1: + b.WriteString("\n") + writeHeader("- ", gx[i]) + writeStack("- ", gx[i]) + i++ + + case +1: + b.WriteString("\n") + writeHeader("+ ", gy[j]) + writeStack("+ ", gy[j]) + j++ + + case 0: + if !bytes.Equal(gx[i].count, gy[j].count) { + b.WriteString("\n") + writeHeader("- ", gx[i]) + writeHeader("+ ", gy[j]) + writeStack(" ", gy[j]) + } + i++ + j++ + } + } +} diff --git a/tstest/resource_test.go b/tstest/resource_test.go new file mode 100644 index 000000000..7199ac5d1 --- /dev/null +++ b/tstest/resource_test.go @@ -0,0 +1,256 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tstest + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestPrintGoroutines(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + { + name: "empty", + in: "goroutine profile: total 0\n", + want: "goroutine profile: total 0", + }, + { + name: "single goroutine", + in: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + want: `goroutine profile: total 1 + +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + }, + { + name: "multiple goroutines sorted", + in: `goroutine profile: total 14 +7 @ 0x47bc0e 0x413705 0x4132b2 0x10fda4d 0x483da1 +# 0x10fda4c github.com/user/pkg.RoutineA+0x16c pkg/a.go:443 + +7 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +# 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + want: `goroutine profile: total 14 + +7 @ 0x47bc0e 0x413705 0x4132b2 0x10fda4d 0x483da1 +# 0x10fda4c github.com/user/pkg.RoutineA+0x16c pkg/a.go:443 + +7 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +# 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := string(printGoroutines(parseGoroutines([]byte(tt.in)))) + if got != tt.want { + t.Errorf("printGoroutines() = %q, want %q, diff:\n%s", got, tt.want, cmp.Diff(tt.want, got)) + } + }) + } +} + +func TestDiffPprofGoroutines(t *testing.T) { + tests := []struct { + name string + x, y string + want string + }{ + { + name: "no difference", + x: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261`, + y: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + want: "", + }, + { + name: "different counts", + x: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + y: `goroutine profile: total 2 +2 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + want: `- goroutine profile: total 1 ++ goroutine profile: total 2 + +- 1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 ++ 2 @ 0x47bc0e 0x458e57 0x847587 0x483da1 + # 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + }, + { + name: "new goroutine", + x: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + y: `goroutine profile: total 2 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 + +1 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +# 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + want: `- goroutine profile: total 1 ++ goroutine profile: total 2 + ++ 1 @ 0x47bc0e 0x458e57 0x754927 0x483da1 ++ # 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + }, + { + name: "removed goroutine", + x: `goroutine profile: total 2 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 + +1 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +# 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + y: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + want: `- goroutine profile: total 2 ++ goroutine profile: total 1 + +- 1 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +- # 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + }, + { + name: "removed many goroutine", + x: `goroutine profile: total 2 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 + +1 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +# 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + y: `goroutine profile: total 0`, + want: `- goroutine profile: total 2 ++ goroutine profile: total 0 + +- 1 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +- # 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 + +- 1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +- # 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + }, + { + name: "invalid input x", + x: "invalid", + y: "goroutine profile: total 0\n", + want: "- invalid\n+ goroutine profile: total 0\n", + }, + { + name: "invalid input y", + x: "goroutine profile: total 0\n", + y: "invalid", + want: "- goroutine profile: total 0\n+ invalid\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := diffGoroutines( + parseGoroutines([]byte(tt.x)), + parseGoroutines([]byte(tt.y)), + ) + if got != tt.want { + t.Errorf("diffPprofGoroutines() diff:\ngot:\n%s\nwant:\n%s\ndiff (-want +got):\n%s", got, tt.want, cmp.Diff(tt.want, got)) + } + }) + } +} + +func TestParseGoroutines(t *testing.T) { + tests := []struct { + name string + in string + wantHeader string + wantCount int + }{ + { + name: "empty profile", + in: "goroutine profile: total 0\n", + wantHeader: "goroutine profile: total 0", + wantCount: 0, + }, + { + name: "single goroutine", + in: `goroutine profile: total 1 +1 @ 0x47bc0e 0x458e57 0x847587 0x483da1 +# 0x847586 database/sql.(*DB).connectionOpener+0x86 database/sql/sql.go:1261 +`, + wantHeader: "goroutine profile: total 1", + wantCount: 1, + }, + { + name: "multiple goroutines", + in: `goroutine profile: total 14 +7 @ 0x47bc0e 0x413705 0x4132b2 0x10fda4d 0x483da1 +# 0x10fda4c github.com/user/pkg.RoutineA+0x16c pkg/a.go:443 + +7 @ 0x47bc0e 0x458e57 0x754927 0x483da1 +# 0x754926 net/http.(*persistConn).writeLoop+0xe6 net/http/transport.go:2596 +`, + wantHeader: "goroutine profile: total 14", + wantCount: 2, + }, + { + name: "invalid format", + in: "invalid", + wantHeader: "invalid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := parseGoroutines([]byte(tt.in)) + + if got := string(g.head); got != tt.wantHeader { + t.Errorf("parseGoroutines() header = %q, want %q", got, tt.wantHeader) + } + if got := len(g.goroutines); got != tt.wantCount { + t.Errorf("parseGoroutines() goroutine count = %d, want %d", got, tt.wantCount) + } + + // Verify that the sort field is correctly reversed + for _, g := range g.goroutines { + original := strings.Fields(string(g.header)) + sorted := strings.Fields(string(g.sort)) + if len(original) != len(sorted) { + t.Errorf("sort field has different number of words: got %d, want %d", len(sorted), len(original)) + continue + } + for i := 0; i < len(original); i++ { + if original[i] != sorted[len(sorted)-1-i] { + t.Errorf("sort field word mismatch at position %d: got %q, want %q", i, sorted[len(sorted)-1-i], original[i]) + } + } + } + }) + } +} From 6fb5e3b0cb5c4540992b54af1e7ee92542ea6630 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Thu, 10 Apr 2025 09:14:26 -0700 Subject: [PATCH 0732/1708] go.toolchain.rev: bump go 1.24 for Android pidfd changes (#15613) Updates tailscale/tailscale#13452 Signed-off-by: kari-ts --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index c6dbf4fa1..e8ede337c 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -16b6e4fd15c59336156cdbc977de1745ad094f2d +982da8f24fa0504f2214f24b0d68b2febd5983f8 From 476a4c6ff174d46ce3b125c018c07c43713e1c10 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 10 Apr 2025 09:49:46 -0700 Subject: [PATCH 0733/1708] ipn/ipnstate: add home DERP to tailscale status JSON Fixes #15625 Change-Id: Ic20dad2dab4ac52c666057845bdc3cf5c0ffcd8f Signed-off-by: Brad Fitzpatrick --- ipn/ipnstate/ipnstate.go | 3 +++ wgengine/magicsock/magicsock.go | 12 ++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 89c6d7e24..4494afb67 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -53,6 +53,9 @@ type Status struct { // If nil, an exit node is not in use. ExitNodeStatus *ExitNodeStatus `json:"ExitNodeStatus,omitempty"` + // DERPHomeRegionID is the current home DERP region ID. + DERPHomeRegionID int + // Health contains health check problems. // Empty means everything is good. (or at least that no known // problems are detected) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a32867f72..e7e65b6ee 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2892,11 +2892,15 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { }) } - c.foreachActiveDerpSortedLocked(func(node int, ad activeDerp) { - // TODO(bradfitz): add a method to ipnstate.StatusBuilder - // to include all the DERP connections we have open - // and add it here. See the other caller of foreachActiveDerpSortedLocked. + sb.MutateStatus(func(s *ipnstate.Status) { + s.DERPHomeRegionID = c.myDerp + c.foreachActiveDerpSortedLocked(func(node int, ad activeDerp) { + // TODO(bradfitz): add a method to ipnstate.StatusBuilder + // to include all the DERP connections we have open + // and add it here. See the other caller of foreachActiveDerpSortedLocked. + }) }) + } // SetStatistics specifies a per-connection statistics aggregator. From 94f4f8373115f3b76e5f89a9ec8a41af761d3fd2 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Sat, 5 Apr 2025 22:15:26 -0500 Subject: [PATCH 0734/1708] ipn, ipn/ipnlocal: reduce coupling between LocalBackend/profileManager and the Windows-specific "current user" model Ultimately, we'd like to get rid of the concept of the "current user". It is only used on Windows, but even then it doesn't work well in multi-user and enterprise/managed Windows environments. In this PR, we update LocalBackend and profileManager to decouple them a bit more from this obsolete concept. This is done in a preparation for extracting ipnlocal.Extension-related interfaces and types, and using them to implement optional features like tailscale/corp#27645, instead of continuing growing the core ipnlocal logic. Notably, we rename (*profileManager).SetCurrentUserAndProfile() to SwitchToProfile() and change its signature to accept an ipn.LoginProfileView instead of an ipn.ProfileID and ipn.WindowsUserID. Since we're not removing the "current user" completely just yet, the method sets the current user to the owner of the target profile. We also update the profileResolver callback type, which is typically implemented by LocalBackend extensions, to return an ipn.LoginProfileView instead of ipn.ProfileID and ipn.WindowsUserID. Updates tailscale/corp#27645 Updates tailscale/corp#18342 Signed-off-by: Nick Khyl --- ipn/ipnlocal/desktop_sessions.go | 41 ++++--- ipn/ipnlocal/local.go | 87 +++++++------- ipn/ipnlocal/local_test.go | 6 +- ipn/ipnlocal/profiles.go | 200 ++++++++++++++++--------------- ipn/ipnlocal/profiles_test.go | 12 +- ipn/prefs.go | 25 +++- 6 files changed, 206 insertions(+), 165 deletions(-) diff --git a/ipn/ipnlocal/desktop_sessions.go b/ipn/ipnlocal/desktop_sessions.go index 4e9eebf34..29cb196c7 100644 --- a/ipn/ipnlocal/desktop_sessions.go +++ b/ipn/ipnlocal/desktop_sessions.go @@ -109,37 +109,39 @@ func (e *desktopSessionsExt) updateDesktopSessionState(session *desktop.Session) // getBackgroundProfile is a [profileResolver] that works as follows: // -// If Always-On mode is disabled, it returns no profile ("","",false). +// If Always-On mode is disabled, it returns no profile. // // If AlwaysOn mode is enabled, it returns the current profile unless: -// - The current user has signed out. +// - The current profile's owner has signed out. // - Another user has a foreground (i.e. active/unlocked) session. // -// If the current user's session runs in the background and no other user +// If the current profile owner's session runs in the background and no other user // has a foreground session, it returns the current profile. This applies // when a locally signed-in user locks their screen or when a remote user // disconnects without signing out. // -// In all other cases, it returns no profile ("","",false). +// In all other cases, it returns no profile. // // It is called with [LocalBackend.mu] locked. -func (e *desktopSessionsExt) getBackgroundProfile() (_ ipn.WindowsUserID, _ ipn.ProfileID, ok bool) { +func (e *desktopSessionsExt) getBackgroundProfile() ipn.LoginProfileView { e.mu.Lock() defer e.mu.Unlock() if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { - return "", "", false + // If the Always-On mode is disabled, there's no background profile + // as far as the desktop session extension is concerned. + return ipn.LoginProfileView{} } - isCurrentUserSingedIn := false + isCurrentProfileOwnerSignedIn := false var foregroundUIDs []ipn.WindowsUserID for _, s := range e.id2sess { switch uid := s.User.UserID(); uid { - case e.pm.CurrentUserID(): - isCurrentUserSingedIn = true + case e.pm.CurrentProfile().LocalUserID(): + isCurrentProfileOwnerSignedIn = true if s.Status == desktop.ForegroundSession { // Keep the current profile if the user has a foreground session. - return e.pm.CurrentUserID(), e.pm.CurrentProfile().ID(), true + return e.pm.CurrentProfile() } default: if s.Status == desktop.ForegroundSession { @@ -148,23 +150,24 @@ func (e *desktopSessionsExt) getBackgroundProfile() (_ ipn.WindowsUserID, _ ipn. } } - // If there's no current user (e.g., tailscaled just started), or if the current - // user has no foreground session, switch to the default profile of the first user - // with a foreground session, if any. + // If the current profile is empty and not owned by anyone (e.g., tailscaled just started), + // or if the current profile's owner has no foreground session, switch to the default profile + // of the first user with a foreground session, if any. for _, uid := range foregroundUIDs { - if profileID := e.pm.DefaultUserProfileID(uid); profileID != "" { - return uid, profileID, true + if profile := e.pm.DefaultUserProfile(uid); profile.ID() != "" { + return profile } } - // If no user has a foreground session but the current user is still signed in, + // If no user has a foreground session but the current profile's owner is still signed in, // keep the current profile even if the session is not in the foreground, // such as when the screen is locked or a remote session is disconnected. - if len(foregroundUIDs) == 0 && isCurrentUserSingedIn { - return e.pm.CurrentUserID(), e.pm.CurrentProfile().ID(), true + if len(foregroundUIDs) == 0 && isCurrentProfileOwnerSignedIn { + return e.pm.CurrentProfile() } - return "", "", false + // Otherwise, there's no background profile. + return ipn.LoginProfileView{} } // Shutdown implements [localBackendExtension]. diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index cf71b80fa..a99d67cda 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -204,13 +204,12 @@ func RegisterExtension(name string, newExt NewExtensionFn) { mak.Set(®isteredExtensions, name, newExt) } -// profileResolver is any function that returns user and profile IDs -// along with a flag indicating whether it succeeded. Since an empty -// profile ID ("") represents an empty profile, the ok return parameter -// distinguishes between an empty profile and no profile. +// profileResolver is any function that returns a read-only view of a login profile. +// An invalid view indicates no profile. A valid profile view with an empty [ipn.ProfileID] +// indicates that the profile is new and has not been persisted yet. // // It is called with [LocalBackend.mu] held. -type profileResolver func() (_ ipn.WindowsUserID, _ ipn.ProfileID, ok bool) +type profileResolver func() ipn.LoginProfileView // NewControlClientCallback is a function to be called when a new [controlclient.Client] // is created and before it is first used. The login profile and prefs represent @@ -4006,13 +4005,21 @@ func (b *LocalBackend) SwitchToBestProfile(reason string) { func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { defer unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() - uid, profileID, background := b.resolveBestProfileLocked() - cp, switched := b.pm.SetCurrentUserAndProfile(uid, profileID) + profile, background := b.resolveBestProfileLocked() + cp, switched, err := b.pm.SwitchToProfile(profile) switch { case !switched && cp.ID() == "": - b.logf("%s: staying on empty profile", reason) + if err != nil { + b.logf("%s: an error occurred; staying on empty profile: %v", reason, err) + } else { + b.logf("%s: staying on empty profile", reason) + } case !switched: - b.logf("%s: staying on profile %q (%s)", reason, cp.UserProfile().LoginName, cp.ID()) + if err != nil { + b.logf("%s: an error occurred; staying on profile %q (%s): %v", reason, cp.UserProfile().LoginName, cp.ID(), err) + } else { + b.logf("%s: staying on profile %q (%s)", reason, cp.UserProfile().LoginName, cp.ID()) + } case cp.ID() == "": b.logf("%s: disconnecting Tailscale", reason) case background: @@ -4032,7 +4039,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. // But maybe we should post a notification to the API watchers? - b.logf("failed switching profile to %q: %v", profileID, err) + b.logf("failed switching profile to %q: %v", profile.ID(), err) } } @@ -4041,30 +4048,29 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un // the unattended mode is enabled, the current state of the desktop sessions, // and other factors. // -// It returns the user ID, profile ID, and whether the returned profile is -// considered a background profile. A background profile is used when no OS user -// is actively using Tailscale, such as when no GUI/CLI client is connected -// and Unattended Mode is enabled (see also [LocalBackend.getBackgroundProfileLocked]). -// An empty profile ID indicates that Tailscale should switch to an empty profile. +// It returns a read-only view of the profile and whether it is considered +// a background profile. A background profile is used when no OS user is actively +// using Tailscale, such as when no GUI/CLI client is connected and Unattended Mode +// is enabled (see also [LocalBackend.getBackgroundProfileLocked]). +// +// An invalid view indicates no profile, meaning Tailscale should disconnect +// and remain idle until a GUI or CLI client connects. +// A valid profile view with an empty [ipn.ProfileID] indicates a new profile that +// has not been persisted yet. // // b.mu must be held. -func (b *LocalBackend) resolveBestProfileLocked() (userID ipn.WindowsUserID, profileID ipn.ProfileID, isBackground bool) { +func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBackground bool) { // If a GUI/CLI client is connected, use the connected user's profile, which means // either the current profile if owned by the user, or their default profile. if b.currentUser != nil { - cp := b.pm.CurrentProfile() - uid := b.currentUser.UserID() - - var profileID ipn.ProfileID + profile := b.pm.CurrentProfile() // TODO(nickkhyl): check if the current profile is allowed on the device, // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. - if cp.LocalUserID() == uid { - profileID = cp.ID() - } else { - profileID = b.pm.DefaultUserProfileID(uid) + if uid := b.currentUser.UserID(); profile.LocalUserID() != uid { + profile = b.pm.DefaultUserProfile(uid) } - return uid, profileID, false + return profile, false } // Otherwise, if on Windows, use the background profile if one is set. @@ -4073,8 +4079,8 @@ func (b *LocalBackend) resolveBestProfileLocked() (userID ipn.WindowsUserID, pro // If the returned background profileID is "", Tailscale will disconnect // and remain idle until a GUI or CLI client connects. if goos := envknob.GOOS(); goos == "windows" { - uid, profileID := b.getBackgroundProfileLocked() - return uid, profileID, true + profile := b.getBackgroundProfileLocked() + return profile, true } // On other platforms, however, Tailscale continues to run in the background @@ -4083,7 +4089,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (userID ipn.WindowsUserID, pro // TODO(nickkhyl): check if the current profile is allowed on the device, // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. - return b.pm.CurrentUserID(), b.pm.CurrentProfile().ID(), false + return b.pm.CurrentProfile(), false } // RegisterBackgroundProfileResolver registers a function to be used when @@ -4100,30 +4106,31 @@ func (b *LocalBackend) RegisterBackgroundProfileResolver(resolver profileResolve } } -// getBackgroundProfileLocked returns the user and profile ID to use when no GUI/CLI -// client is connected, or "","" if Tailscale should not run in the background. +// getBackgroundProfileLocked returns a read-only view of the profile to use +// when no GUI/CLI client is connected. If Tailscale should not run in the background +// and should disconnect until a GUI/CLI client connects, the returned view is not valid. // As of 2025-02-07, it is only used on Windows. -func (b *LocalBackend) getBackgroundProfileLocked() (ipn.WindowsUserID, ipn.ProfileID) { +func (b *LocalBackend) getBackgroundProfileLocked() ipn.LoginProfileView { // TODO(nickkhyl): check if the returned profile is allowed on the device, // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. // If Unattended Mode is enabled for the current profile, keep using it. if b.pm.CurrentPrefs().ForceDaemon() { - return b.pm.CurrentProfile().LocalUserID(), b.pm.CurrentProfile().ID() + return b.pm.CurrentProfile() } // Otherwise, attempt to resolve the background profile using the background // profile resolvers available on the current platform. for _, resolver := range b.backgroundProfileResolvers { - if uid, profileID, ok := resolver(); ok { - return uid, profileID + if profile := resolver(); profile.Valid() { + return profile } } // Otherwise, switch to an empty profile and disconnect Tailscale // until a GUI or CLI client connects. - return "", "" + return ipn.LoginProfileView{} } // CurrentUserForTest returns the current user and the associated WindowsUserID. @@ -7555,13 +7562,9 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { unlock := b.lockAndGetUnlock() defer unlock() - if b.pm.CurrentProfile().ID() == profile { - return nil - } - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() - if err := b.pm.SwitchProfile(profile); err != nil { - return err + if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { + return err // nil if we're already on the target profile } // As an optimization, only reset the dialPlan if the control URL changed. @@ -7750,7 +7753,7 @@ func (b *LocalBackend) NewProfile() error { unlock := b.lockAndGetUnlock() defer unlock() - b.pm.NewProfile() + b.pm.SwitchToNewProfile() // The new profile doesn't yet have a ControlURL because it hasn't been // set. Conservatively reset the dialPlan. diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 2579590a8..d29c2d4bb 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4124,7 +4124,7 @@ func TestReadWriteRouteInfo(t *testing.T) { } // write the other routeInfo as the other profile - if err := b.pm.SwitchProfile("id2"); err != nil { + if _, _, err := b.pm.SwitchToProfileByID("id2"); err != nil { t.Fatal(err) } if err := b.storeRouteInfo(ri2); err != nil { @@ -4132,7 +4132,7 @@ func TestReadWriteRouteInfo(t *testing.T) { } // read the routeInfo of the first profile - if err := b.pm.SwitchProfile("id1"); err != nil { + if _, _, err := b.pm.SwitchToProfileByID("id1"); err != nil { t.Fatal(err) } readRi, err = b.readRouteInfoLocked() @@ -4144,7 +4144,7 @@ func TestReadWriteRouteInfo(t *testing.T) { } // read the routeInfo of the second profile - if err := b.pm.SwitchProfile("id2"); err != nil { + if _, _, err := b.pm.SwitchToProfileByID("id2"); err != nil { t.Fatal(err) } readRi, err = b.readRouteInfoLocked() diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 10a110e61..901a4a899 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -64,8 +64,7 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) { if pm.currentUserID == uid { return } - pm.currentUserID = uid - if err := pm.SwitchToDefaultProfile(); err != nil { + if _, _, err := pm.SwitchToDefaultProfileForUser(uid); err != nil { // SetCurrentUserID should never fail and must always switch to the // user's default profile or create a new profile for the current user. // Until we implement multi-user support and the new permission model, @@ -73,79 +72,109 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) { // that when SetCurrentUserID exits, the profile in pm.currentProfile // is either an existing profile owned by the user, or a new, empty profile. pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err) - pm.NewProfileForUser(uid) + pm.SwitchToNewProfileForUser(uid) } } -// SetCurrentUserAndProfile sets the current user ID and switches the specified -// profile, if it is accessible to the user. If the profile does not exist, -// or is not accessible, it switches to the user's default profile, -// creating a new one if necessary. +// SwitchToProfile switches to the specified profile and (temporarily, +// while the "current user" is still a thing on Windows; see tailscale/corp#18342) +// sets its owner as the current user. The profile must be a valid profile +// returned by the [profileManager], such as by [profileManager.Profiles], +// [profileManager.ProfileByID], or [profileManager.NewProfileForUser]. // // It is a shorthand for [profileManager.SetCurrentUserID] followed by -// [profileManager.SwitchProfile], but it is more efficient as it switches +// [profileManager.SwitchProfileByID], but it is more efficient as it switches // directly to the specified profile rather than switching to the user's -// default profile first. +// default profile first. It is a no-op if the specified profile is already +// the current profile. // -// As a special case, if the specified profile ID "", it creates a new -// profile for the user and switches to it, unless the current profile -// is already a new, empty profile owned by the user. +// As a special case, if the specified profile view is not valid, it resets +// both the current user and the profile to a new, empty profile not owned +// by any user. // -// It returns the current profile and whether the call resulted -// in a profile switch. -func (pm *profileManager) SetCurrentUserAndProfile(uid ipn.WindowsUserID, profileID ipn.ProfileID) (cp ipn.LoginProfileView, changed bool) { - pm.currentUserID = uid - - if profileID == "" { - if pm.currentProfile.ID() == "" && pm.currentProfile.LocalUserID() == uid { - return pm.currentProfile, false +// It returns the current profile and whether the call resulted in a profile change, +// or an error if the specified profile does not exist or its prefs could not be loaded. +func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn.LoginProfileView, changed bool, err error) { + prefs := defaultPrefs + switch { + case !profile.Valid(): + // Create a new profile that is not associated with any user. + profile = pm.NewProfileForUser("") + case profile == pm.currentProfile, + profile.ID() != "" && profile.ID() == pm.currentProfile.ID(), + profile.ID() == "" && profile.Equals(pm.currentProfile) && prefs.Equals(pm.prefs): + // The profile is already the current profile; no need to switch. + // + // It includes three cases: + // 1. The target profile and the current profile are aliases referencing the [ipn.LoginProfile]. + // The profile may be either a new (non-persisted) profile or an existing well-known profile. + // 2. The target profile is a well-known, persisted profile with the same ID as the current profile. + // 3. The target and the current profiles are both new (non-persisted) profiles and they are equal. + // At minimum, equality means that the profiles are owned by the same user on platforms that support it + // and the prefs are the same as well. + return pm.currentProfile, false, nil + case profile.ID() == "": + // Copy the specified profile to prevent accidental mutation. + profile = profile.AsStruct().View() + default: + // Find an existing profile by ID and load its prefs. + kp, ok := pm.knownProfiles[profile.ID()] + if !ok { + // The profile ID is not valid; it may have been deleted or never existed. + // As the target profile should have been returned by the [profileManager], + // this is unexpected and might indicate a bug in the code. + return pm.currentProfile, false, fmt.Errorf("[unexpected] %w: %s (%s)", errProfileNotFound, profile.Name(), profile.ID()) + } + profile = kp + if prefs, err = pm.loadSavedPrefs(profile.Key()); err != nil { + return pm.currentProfile, false, fmt.Errorf("failed to load profile prefs for %s (%s): %w", profile.Name(), profile.ID(), err) } - pm.NewProfileForUser(uid) - return pm.currentProfile, true } - if profile, err := pm.ProfileByID(profileID); err == nil { - if pm.CurrentProfile().ID() == profileID { - return pm.currentProfile, false - } - if err := pm.SwitchProfile(profile.ID()); err == nil { - return pm.currentProfile, true - } + if profile.ID() == "" { // new profile that has never been persisted + metricNewProfile.Add(1) + } else { + metricSwitchProfile.Add(1) } - if err := pm.SwitchToDefaultProfile(); err != nil { - pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err) - pm.NewProfile() + pm.prefs = prefs + pm.updateHealth() + pm.currentProfile = profile + pm.currentUserID = profile.LocalUserID() + if err := pm.setProfileAsUserDefault(profile); err != nil { + // This is not a fatal error; we've already switched to the profile. + // But if updating the default profile fails, we should log it. + pm.logf("failed to set %s (%s) as the default profile: %v", profile.Name(), profile.ID(), err) } - return pm.currentProfile, true + return profile, true, nil } -// DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user, -// or an empty string if the specified user does not have a default profile. -func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.ProfileID { +// DefaultUserProfile returns a read-only view of the default (last used) profile for the specified user. +// It returns a read-only view of a new, non-persisted profile if the specified user does not have a default profile. +func (pm *profileManager) DefaultUserProfile(uid ipn.WindowsUserID) ipn.LoginProfileView { // Read the CurrentProfileKey from the store which stores // the selected profile for the specified user. b, err := pm.store.ReadState(ipn.CurrentProfileKey(string(uid))) - pm.dlogf("DefaultUserProfileID: ReadState(%q) = %v, %v", string(uid), len(b), err) + pm.dlogf("DefaultUserProfile: ReadState(%q) = %v, %v", string(uid), len(b), err) if err == ipn.ErrStateNotExist || len(b) == 0 { if runtime.GOOS == "windows" { - pm.dlogf("DefaultUserProfileID: windows: migrating from legacy preferences") + pm.dlogf("DefaultUserProfile: windows: migrating from legacy preferences") profile, err := pm.migrateFromLegacyPrefs(uid, false) if err == nil { - return profile.ID() + return profile } pm.logf("failed to migrate from legacy preferences: %v", err) } - return "" + return pm.NewProfileForUser(uid) } pk := ipn.StateKey(string(b)) prof := pm.findProfileByKey(uid, pk) if !prof.Valid() { - pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk) - return "" + pm.dlogf("DefaultUserProfile: no profile found for key: %q", pk) + return pm.NewProfileForUser(uid) } - return prof.ID() + return prof } // checkProfileAccess returns an [errProfileAccessDenied] if the current user @@ -251,12 +280,6 @@ func (pm *profileManager) setUnattendedModeAsConfigured() error { } } -// Reset unloads the current profile, if any. -func (pm *profileManager) Reset() { - pm.currentUserID = "" - pm.NewProfile() -} - // SetPrefs sets the current profile's prefs to the provided value. // It also saves the prefs to the [ipn.StateStore]. It stores a copy of the // provided prefs, which may be accessed via [profileManager.CurrentPrefs]. @@ -477,42 +500,32 @@ func (pm *profileManager) profilePrefs(p ipn.LoginProfileView) (ipn.PrefsView, e return pm.loadSavedPrefs(p.Key()) } -// SwitchProfile switches to the profile with the given id. +// SwitchToProfileByID switches to the profile with the given id. +// It returns the current profile and whether the call resulted in a profile change. // If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied]. // If the profile does not exist, it returns an [errProfileNotFound]. -func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error { - metricSwitchProfile.Add(1) - - kp, ok := pm.knownProfiles[id] - if !ok { - return errProfileNotFound - } - if pm.currentProfile.Valid() && kp.ID() == pm.currentProfile.ID() && pm.prefs.Valid() { - return nil - } - - if err := pm.checkProfileAccess(kp); err != nil { - return fmt.Errorf("%w: profile %q is not accessible to the current user", err, id) +func (pm *profileManager) SwitchToProfileByID(id ipn.ProfileID) (_ ipn.LoginProfileView, changed bool, err error) { + if id == pm.currentProfile.ID() { + return pm.currentProfile, false, nil } - prefs, err := pm.loadSavedPrefs(kp.Key()) + profile, err := pm.ProfileByID(id) if err != nil { - return err + return pm.currentProfile, false, err } - pm.prefs = prefs - pm.updateHealth() - pm.currentProfile = kp - return pm.setProfileAsUserDefault(kp) + return pm.SwitchToProfile(profile) } -// SwitchToDefaultProfile switches to the default (last used) profile for the current user. -// It creates a new one and switches to it if the current user does not have a default profile, +// SwitchToDefaultProfileForUser switches to the default (last used) profile for the specified user. +// It creates a new one and switches to it if the specified user does not have a default profile, // or returns an error if the default profile is inaccessible or could not be loaded. -func (pm *profileManager) SwitchToDefaultProfile() error { - if id := pm.DefaultUserProfileID(pm.currentUserID); id != "" { - return pm.SwitchProfile(id) - } - pm.NewProfileForUser(pm.currentUserID) - return nil +func (pm *profileManager) SwitchToDefaultProfileForUser(uid ipn.WindowsUserID) (_ ipn.LoginProfileView, changed bool, err error) { + return pm.SwitchToProfile(pm.DefaultUserProfile(uid)) +} + +// SwitchToDefaultProfile is like [profileManager.SwitchToDefaultProfileForUser], but switches +// to the default profile for the current user. +func (pm *profileManager) SwitchToDefaultProfile() (_ ipn.LoginProfileView, changed bool, err error) { + return pm.SwitchToDefaultProfileForUser(pm.currentUserID) } // setProfileAsUserDefault sets the specified profile as the default for the current user. @@ -610,7 +623,7 @@ func (pm *profileManager) deleteCurrentProfile() error { } if pm.currentProfile.ID() == "" { // Deleting the in-memory only new profile, just create a new one. - pm.NewProfile() + pm.SwitchToNewProfile() return nil } return pm.deleteProfileNoPermCheck(pm.currentProfile) @@ -620,7 +633,7 @@ func (pm *profileManager) deleteCurrentProfile() error { // but it doesn't check user's access rights to the profile. func (pm *profileManager) deleteProfileNoPermCheck(profile ipn.LoginProfileView) error { if profile.ID() == pm.currentProfile.ID() { - pm.NewProfile() + pm.SwitchToNewProfile() } if err := pm.WriteState(profile.Key(), nil); err != nil { return err @@ -637,7 +650,7 @@ func (pm *profileManager) DeleteAllProfilesForUser() error { currentProfileDeleted := false writeKnownProfiles := func() error { if currentProfileDeleted || pm.currentProfile.ID() == "" { - pm.NewProfile() + pm.SwitchToNewProfile() } return pm.writeKnownProfiles() } @@ -676,23 +689,22 @@ func (pm *profileManager) updateHealth() { pm.health.SetAutoUpdatePrefs(pm.prefs.AutoUpdate().Check, pm.prefs.AutoUpdate().Apply) } -// NewProfile creates and switches to a new unnamed profile. The new profile is +// SwitchToNewProfile creates and switches to a new unnamed profile. The new profile is // not persisted until [profileManager.SetPrefs] is called with a logged-in user. -func (pm *profileManager) NewProfile() { - pm.NewProfileForUser(pm.currentUserID) +func (pm *profileManager) SwitchToNewProfile() { + pm.SwitchToNewProfileForUser(pm.currentUserID) } -// NewProfileForUser is like [profileManager.NewProfile], but it switches to the +// SwitchToNewProfileForUser is like [profileManager.SwitchToNewProfile], but it switches to the // specified user and sets that user as the profile owner for the new profile. -func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) { - pm.currentUserID = uid - - metricNewProfile.Add(1) +func (pm *profileManager) SwitchToNewProfileForUser(uid ipn.WindowsUserID) { + pm.SwitchToProfile(pm.NewProfileForUser(uid)) +} - pm.prefs = defaultPrefs - pm.updateHealth() - newProfile := &ipn.LoginProfile{LocalUserID: uid} - pm.currentProfile = newProfile.View() +// NewProfileForUser creates a new profile for the specified user and returns a read-only view of it. +// It neither switches to the new profile nor persists it to the store. +func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) ipn.LoginProfileView { + return (&ipn.LoginProfile{LocalUserID: uid}).View() } // newProfileWithPrefs creates a new profile with the specified prefs and assigns @@ -816,7 +828,7 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok { pm.currentUserID = ipn.WindowsUserID(suf) } - pm.NewProfile() + pm.SwitchToNewProfile() } else { pm.currentUserID = pm.currentProfile.LocalUserID() } @@ -841,7 +853,7 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt return nil, err } } else { - pm.NewProfile() + pm.SwitchToNewProfile() } return pm, nil diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 33209d24c..534951fb1 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -33,7 +33,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { newProfile := func(t *testing.T, loginName string) ipn.PrefsView { id++ t.Helper() - pm.NewProfile() + pm.SwitchToNewProfile() p := pm.CurrentPrefs().AsStruct() p.Persist = &persist.Persist{ NodeID: tailcfg.StableNodeID(fmt.Sprint(id)), @@ -88,7 +88,7 @@ func TestProfileList(t *testing.T) { newProfile := func(t *testing.T, loginName string) ipn.PrefsView { id++ t.Helper() - pm.NewProfile() + pm.SwitchToNewProfile() p := pm.CurrentPrefs().AsStruct() p.Persist = &persist.Persist{ NodeID: tailcfg.StableNodeID(fmt.Sprint(id)), @@ -162,7 +162,7 @@ func TestProfileDupe(t *testing.T) { must.Do(pm.SetPrefs(prefs.View(), ipn.NetworkProfile{})) } login := func(pm *profileManager, p *persist.Persist) { - pm.NewProfile() + pm.SwitchToNewProfile() reauth(pm, p) } @@ -399,7 +399,7 @@ func TestProfileManagement(t *testing.T) { checkProfiles(t) t.Logf("Create new profile") - pm.NewProfile() + pm.SwitchToNewProfile() wantCurProfile = "" wantProfiles[""] = defaultPrefs checkProfiles(t) @@ -438,7 +438,7 @@ func TestProfileManagement(t *testing.T) { checkProfiles(t) t.Logf("Create new profile - 2") - pm.NewProfile() + pm.SwitchToNewProfile() wantCurProfile = "" wantProfiles[""] = defaultPrefs checkProfiles(t) @@ -550,7 +550,7 @@ func TestProfileManagementWindows(t *testing.T) { { t.Logf("Create new profile") - pm.NewProfile() + pm.SwitchToNewProfile() wantCurProfile = "" wantProfiles[""] = defaultPrefs checkProfiles(t) diff --git a/ipn/prefs.go b/ipn/prefs.go index 9d6008de1..1c9d71d73 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -593,7 +593,7 @@ func (p PrefsView) Equals(p2 PrefsView) bool { } func (p *Prefs) Equals(p2 *Prefs) bool { - if p == nil && p2 == nil { + if p == p2 { return true } if p == nil || p2 == nil { @@ -1014,3 +1014,26 @@ type LoginProfile struct { // into. ControlURL string } + +// Equals reports whether p and p2 are equal. +func (p LoginProfileView) Equals(p2 LoginProfileView) bool { + return p.ж.Equals(p2.ж) +} + +// Equals reports whether p and p2 are equal. +func (p *LoginProfile) Equals(p2 *LoginProfile) bool { + if p == p2 { + return true + } + if p == nil || p2 == nil { + return false + } + return p.ID == p2.ID && + p.Name == p2.Name && + p.NetworkProfile == p2.NetworkProfile && + p.Key == p2.Key && + p.UserProfile.Equal(&p2.UserProfile) && + p.NodeID == p2.NodeID && + p.LocalUserID == p2.LocalUserID && + p.ControlURL == p2.ControlURL +} From d446e046356106190a3aa0be2acb1aba8cef1c8b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 10 Apr 2025 18:42:00 +0100 Subject: [PATCH 0735/1708] docs/k8s: add architecture diagram for ProxyGroup Ingress (#15593) Adds a new diagram for ProxyGroups running in Ingress mode. Documentation is currently not publicly available, but a link needs adding once it is. Updates tailscale/corp#24795 Change-Id: I0d5dd6bf6f0e1b8b0becae848dc97d8b4bfb9ccb Signed-off-by: Tom Proctor --- docs/k8s/operator-architecture.md | 99 ++++++++++++++++++++++++++++--- 1 file changed, 92 insertions(+), 7 deletions(-) diff --git a/docs/k8s/operator-architecture.md b/docs/k8s/operator-architecture.md index 26bfa8542..29672f6a3 100644 --- a/docs/k8s/operator-architecture.md +++ b/docs/k8s/operator-architecture.md @@ -131,11 +131,11 @@ flowchart TD [Documentation][kb-operator-l7-ingress] -L7 ingress is relatively similar to L3 ingress. It is configured via an -`Ingress` object instead of a `Service`, and uses `tailscale serve` to accept -traffic instead of configuring `iptables` or `nftables` rules. Note that we use -tailscaled's local API (`SetServeConfig`) to set serve config, not the -`tailscale serve` command. +The L7 ingress architecture diagram is relatively similar to L3 ingress. It is +configured via an `Ingress` object instead of a `Service`, and uses +`tailscale serve` to accept traffic instead of configuring `iptables` or +`nftables` rules. Note that we use tailscaled's local API (`SetServeConfig`) to +set serve config, not the `tailscale serve` command. ```mermaid %%{ init: { 'theme':'neutral' } }%% @@ -159,6 +159,10 @@ flowchart TD state-secret["state Secret"] end + subgraph cluster-scope[Cluster scoped resources] + ingress-class[Tailscale IngressClass] + end + subgraph defaultns[namespace=default] ingress[tailscale Ingress] svc["Service"] @@ -260,6 +264,8 @@ flowchart TD ## `ProxyGroup` +### Egress + [Documentation][kb-operator-l3-egress-proxygroup] The `ProxyGroup` custom resource manages a collection of proxy Pods that @@ -278,8 +284,6 @@ ports via the ClusterIP Service and its EndpointSlice. The operator then generates the egress ConfigMap that tells the `ProxyGroup` Pods which incoming ports map to which egress targets. -`ProxyGroups` currently only support egress. - ```mermaid %%{ init: { 'theme':'neutral' } }%% @@ -364,6 +368,86 @@ flowchart LR ``` +### Ingress + +A ProxyGroup can also serve as a highly available set of proxies for an +Ingress resource. The `-0` Pod is always the replica that will issue a certificate +from Let's Encrypt. + +If the same Ingress config is applied in multiple clusters, ProxyGroup proxies +from each cluster will be valid targets for the ts.net DNS name, and the proxy +each client is routed to will depend on the same rules as for [high availability][kb-ha] +subnet routers, and is encoded in the client's netmap. + +```mermaid +%%{ init: { 'theme':'neutral' } }%% +flowchart LR + classDef tsnode color:#fff,fill:#000; + classDef pod fill:#fff; + + subgraph Key + ts[Tailscale device]:::tsnode + pod((Pod)):::pod + blank[" "]-->|WireGuard traffic| blank2[" "] + blank3[" "]-->|Other network traffic| blank4[" "] + end + + subgraph k8s[Kubernetes cluster] + subgraph tailscale-ns[namespace=tailscale] + operator((operator)):::tsnode + ingress-sts["StatefulSet"] + serve-cm[serve config ConfigMap] + ingress-0(("pg-0 (dst)")):::tsnode + ingress-1(("pg-1 (dst)")):::tsnode + tls-secret[myapp.tails.ts.net Secret] + end + + subgraph defaultns[namespace=default] + ingress[myapp.tails.ts.net Ingress] + svc["myapp Service"] + svc --> pod1((pod1)) + svc --> pod2((pod2)) + end + + subgraph cluster[Cluster scoped resources] + ingress-class[Tailscale IngressClass] + pg[ProxyGroup 'pg'] + end + end + + control["Tailscale control plane"] + ts-svc["myapp Tailscale Service"] + + client["client (src)"]:::tsnode -->|dials https\://myapp.tails.ts.net/api| ingress-1 + ingress-0 -->|forwards traffic| svc + ingress-1 -->|forwards traffic| svc + control -.->|creates| ts-svc + operator -.->|creates myapp Tailscale Service| control + control -.->|netmap points myapp Tailscale Service to pg-1| client + operator -.->|creates| ingress-sts + ingress-sts -.->|manages| ingress-0 + ingress-sts -.->|manages| ingress-1 + ingress-0 -.->|issues myapp.tails.ts.net cert| le[Let's Encrypt] + ingress-0 -.->|stores cert| tls-secret + ingress-1 -.->|reads cert| tls-secret + operator -.->|watches| ingress + operator -.->|watches| pg + operator -.->|creates| serve-cm + serve-cm -.->|mounted| ingress-0 + serve-cm -.->|mounted| ingress-1 + ingress -.->|/api prefix| svc + + linkStyle 0 stroke:red; + linkStyle 4 stroke:red; + + linkStyle 1 stroke:blue; + linkStyle 2 stroke:blue; + linkStyle 3 stroke:blue; + linkStyle 5 stroke:blue; + linkStyle 6 stroke:blue; + +``` + ## Connector [Subnet router and exit node documentation][kb-operator-connector] @@ -514,4 +598,5 @@ flowchart TD [kb-operator-connector]: https://tailscale.com/kb/1441/kubernetes-operator-connector [kb-operator-app-connector]: https://tailscale.com/kb/1517/kubernetes-operator-app-connector [kb-operator-recorder]: https://tailscale.com/kb/1484/kubernetes-operator-deploying-tsrecorder +[kb-ha]: https://tailscale.com/kb/1115/high-availability [k8s-impersonation]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation From 6c914409cd0300e04506d6f825f7e53e7ff7f3a2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 10 Apr 2025 11:26:57 -0700 Subject: [PATCH 0736/1708] Revert "ipn/ipnstate: add home DERP to tailscale status JSON" This reverts commit 476a4c6ff174d46ce3b125c018c07c43713e1c10. Reason: redundant with `tailscale status --json | jq '.Self.Relay'` which we all forgot about. Whoops. Updates #15625 --- ipn/ipnstate/ipnstate.go | 3 --- wgengine/magicsock/magicsock.go | 12 ++++-------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 4494afb67..89c6d7e24 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -53,9 +53,6 @@ type Status struct { // If nil, an exit node is not in use. ExitNodeStatus *ExitNodeStatus `json:"ExitNodeStatus,omitempty"` - // DERPHomeRegionID is the current home DERP region ID. - DERPHomeRegionID int - // Health contains health check problems. // Empty means everything is good. (or at least that no known // problems are detected) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e7e65b6ee..a32867f72 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2892,15 +2892,11 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { }) } - sb.MutateStatus(func(s *ipnstate.Status) { - s.DERPHomeRegionID = c.myDerp - c.foreachActiveDerpSortedLocked(func(node int, ad activeDerp) { - // TODO(bradfitz): add a method to ipnstate.StatusBuilder - // to include all the DERP connections we have open - // and add it here. See the other caller of foreachActiveDerpSortedLocked. - }) + c.foreachActiveDerpSortedLocked(func(node int, ad activeDerp) { + // TODO(bradfitz): add a method to ipnstate.StatusBuilder + // to include all the DERP connections we have open + // and add it here. See the other caller of foreachActiveDerpSortedLocked. }) - } // SetStatistics specifies a per-connection statistics aggregator. From 11d1dd2aed2a34e151bfd598d4944f778c7af149 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Thu, 10 Apr 2025 11:10:48 -0700 Subject: [PATCH 0737/1708] tsconsensus: mark 2 tests that were flaky in CI Updates #15627 Signed-off-by: Fran Bull --- tsconsensus/tsconsensus_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 37ccdcc84..d1b92f8a4 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "tailscale.com/client/tailscale" + "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" "tailscale.com/tailcfg" @@ -574,6 +575,7 @@ func TestRejoin(t *testing.T) { } func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -631,6 +633,7 @@ func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { } func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" From 4941cd7c73a69a867521c42b58b095a883ebbc2e Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 10 Apr 2025 20:24:58 -0500 Subject: [PATCH 0738/1708] cmd/tailscaled,ipn/{auditlog,desktop,ipnext,ipnlocal},tsd: extract LocalBackend extension interfaces and implementation In this PR, we refactor the LocalBackend extension system, moving from direct callbacks to a more organized extension host model. Specifically, we: - Extract interface and callback types used by packages extending LocalBackend functionality into a new ipn/ipnext package. - Define ipnext.Host as a new interface that bridges extensions with LocalBackend. It enables extensions to register callbacks and interact with LocalBackend in a concurrency-safe, well-defined, and controlled way. - Move existing callback registration and invocation code from ipnlocal.LocalBackend into a new type called ipnlocal.ExtensionHost, implementing ipnext.Host. - Improve docs for existing types and methods while adding docs for the new interfaces. - Add test coverage for both the extracted and the new code. - Remove ipn/desktop.SessionManager from tsd.System since ipn/desktop is now self-contained. - Update existing extensions (e.g., ipn/auditlog and ipn/desktop) to use the new interfaces where appropriate. We're not introducing new callback and hook types (e.g., for ipn.Prefs changes) just yet, nor are we enhancing current callbacks, such as by improving conflict resolution when more than one extension tries to influence profile selection via a background profile resolver. These further improvements will be submitted separately. Updates #12614 Updates tailscale/corp#27645 Updates tailscale/corp#26435 Updates tailscale/corp#18342 Signed-off-by: Nick Khyl --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tailscaled/tailscaled_windows.go | 9 +- ipn/auditlog/extension.go | 39 +- .../extension.go} | 106 +- ipn/ipnext/ipnext.go | 284 ++++ ipn/ipnlocal/extension_host.go | 537 ++++++++ ipn/ipnlocal/extension_host_test.go | 1139 +++++++++++++++++ ipn/ipnlocal/local.go | 283 +--- ipn/ipnlocal/profiles.go | 4 + tsd/tsd.go | 4 - 11 files changed, 2079 insertions(+), 331 deletions(-) rename ipn/{ipnlocal/desktop_sessions.go => desktop/extension.go} (62%) create mode 100644 ipn/ipnext/ipnext.go create mode 100644 ipn/ipnlocal/extension_host.go create mode 100644 ipn/ipnlocal/extension_host_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7fd4c4b21..416265188 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -815,8 +815,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/ipn/desktop from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 394056295..9cdebbae1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -273,8 +273,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn from tailscale.com/client/local+ W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ - 💣 tailscale.com/ipn/desktop from tailscale.com/cmd/tailscaled+ + W 💣 tailscale.com/ipn/desktop from tailscale.com/cmd/tailscaled 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/auditlog+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/local+ diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index dfe53ef61..54ff2af14 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -45,7 +45,7 @@ import ( "tailscale.com/drive/driveimpl" "tailscale.com/envknob" _ "tailscale.com/ipn/auditlog" - "tailscale.com/ipn/desktop" + _ "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" "tailscale.com/logtail/backoff" "tailscale.com/net/dns" @@ -337,13 +337,6 @@ func beWindowsSubprocess() bool { sys.Set(driveimpl.NewFileSystemForRemote(log.Printf)) - if sessionManager, err := desktop.NewSessionManager(log.Printf); err == nil { - sys.Set(sessionManager) - } else { - // Errors creating the session manager are unexpected, but not fatal. - log.Printf("[unexpected]: error creating a desktop session manager: %v", err) - } - publicLogID, _ := logid.ParsePublicID(logID) err = startIPNServer(ctx, log.Printf, publicLogID, sys) if err != nil { diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index 8be7dfb66..6bbe37398 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -14,19 +14,23 @@ import ( "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" - "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/types/lazy" "tailscale.com/types/logger" ) +// featureName is the name of the feature implemented by this package. +// It is also the the [extension] name and the log prefix. +const featureName = "auditlog" + func init() { - feature.Register("auditlog") - ipnlocal.RegisterExtension("auditlog", newExtension) + feature.Register(featureName) + ipnext.RegisterExtension(featureName, newExtension) } -// extension is an [ipnlocal.Extension] managing audit logging +// extension is an [ipnext.Extension] managing audit logging // on platforms that import this package. // As of 2025-03-27, that's only Windows and macOS. type extension struct { @@ -48,19 +52,24 @@ type extension struct { logger *Logger } -// newExtension is an [ipnlocal.NewExtensionFn] that creates a new audit log extension. -// It is registered with [ipnlocal.RegisterExtension] if the package is imported. -func newExtension(logf logger.Logf, _ *tsd.System) (ipnlocal.Extension, error) { - return &extension{logf: logger.WithPrefix(logf, "auditlog: ")}, nil +// newExtension is an [ipnext.NewExtensionFn] that creates a new audit log extension. +// It is registered with [ipnext.RegisterExtension] if the package is imported. +func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { + return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil +} + +// Name implements [ipnext.Extension]. +func (e *extension) Name() string { + return featureName } -// Init implements [ipnlocal.Extension] by registering callbacks and providers +// Init implements [ipnext.Extension] by registering callbacks and providers // for the duration of the extension's lifetime. -func (e *extension) Init(lb *ipnlocal.LocalBackend) error { +func (e *extension) Init(h ipnext.Host) error { e.cleanup = []func(){ - lb.RegisterControlClientCallback(e.controlClientChanged), - lb.RegisterProfileChangeCallback(e.profileChanged, false), - lb.RegisterAuditLogProvider(e.getCurrentLogger), + h.RegisterControlClientCallback(e.controlClientChanged), + h.Profiles().RegisterProfileChangeCallback(e.profileChanged), + h.RegisterAuditLogProvider(e.getCurrentLogger), } return nil } @@ -165,8 +174,8 @@ func noCurrentLogger(_ tailcfg.ClientAuditAction, _ string) error { return errNoLogger } -// getCurrentLogger is an [ipnlocal.AuditLogProvider] registered with [ipnlocal.LocalBackend]. -// It is called when [ipnlocal.LocalBackend] needs to audit an action. +// getCurrentLogger is an [ipnext.AuditLogProvider] registered with [ipnext.Host]. +// It is called when [ipnlocal.LocalBackend] or an extension needs to audit an action. // // It returns a function that enqueues the audit log for the current profile, // or [noCurrentLogger] if the logger is unavailable. diff --git a/ipn/ipnlocal/desktop_sessions.go b/ipn/desktop/extension.go similarity index 62% rename from ipn/ipnlocal/desktop_sessions.go rename to ipn/desktop/extension.go index 29cb196c7..86ae96f5b 100644 --- a/ipn/ipnlocal/desktop_sessions.go +++ b/ipn/desktop/extension.go @@ -7,29 +7,32 @@ //go:build windows && !ts_omit_desktop_sessions -package ipnlocal +package desktop import ( "cmp" - "errors" "fmt" "sync" "tailscale.com/feature" "tailscale.com/ipn" - "tailscale.com/ipn/desktop" + "tailscale.com/ipn/ipnext" "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/util/syspolicy" ) +// featureName is the name of the feature implemented by this package. +// It is also the the [desktopSessionsExt] name and the log prefix. +const featureName = "desktop-sessions" + func init() { - feature.Register("desktop-sessions") - RegisterExtension("desktop-sessions", newDesktopSessionsExt) + feature.Register(featureName) + ipnext.RegisterExtension(featureName, newDesktopSessionsExt) } -// desktopSessionsExt implements [Extension]. -var _ Extension = (*desktopSessionsExt)(nil) +// [desktopSessionsExt] implements [ipnext.Extension]. +var _ ipnext.Extension = (*desktopSessionsExt)(nil) // desktopSessionsExt extends [LocalBackend] with desktop session management. // It keeps Tailscale running in the background if Always-On mode is enabled, @@ -37,32 +40,41 @@ var _ Extension = (*desktopSessionsExt)(nil) // locks their screen, or disconnects a remote session. type desktopSessionsExt struct { logf logger.Logf - sm desktop.SessionManager + sm SessionManager - *LocalBackend // or nil, until Init is called - cleanup []func() // cleanup functions to call on shutdown + host ipnext.Host // or nil, until Init is called + cleanup []func() // cleanup functions to call on shutdown // mu protects all following fields. - // When both mu and [LocalBackend.mu] need to be taken, - // [LocalBackend.mu] must be taken before mu. - mu sync.Mutex - id2sess map[desktop.SessionID]*desktop.Session + mu sync.Mutex + sessByID map[SessionID]*Session } // newDesktopSessionsExt returns a new [desktopSessionsExt], -// or an error if [desktop.SessionManager] is not available. -func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (Extension, error) { - sm, ok := sys.SessionManager.GetOK() - if !ok { - return nil, errors.New("session manager is not available") +// or an error if a [SessionManager] cannot be created. +// It is registered with [ipnext.RegisterExtension] if the package is imported. +func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (ipnext.Extension, error) { + logf = logger.WithPrefix(logf, featureName+": ") + sm, err := NewSessionManager(logf) + if err != nil { + return nil, fmt.Errorf("%w: session manager is not available: %w", ipnext.SkipExtension, err) } - return &desktopSessionsExt{logf: logf, sm: sm, id2sess: make(map[desktop.SessionID]*desktop.Session)}, nil + return &desktopSessionsExt{ + logf: logf, + sm: sm, + sessByID: make(map[SessionID]*Session), + }, nil +} + +// Name implements [ipnext.Extension]. +func (e *desktopSessionsExt) Name() string { + return featureName } -// Init implements [localBackendExtension]. -func (e *desktopSessionsExt) Init(lb *LocalBackend) (err error) { - e.LocalBackend = lb - unregisterResolver := lb.RegisterBackgroundProfileResolver(e.getBackgroundProfile) +// Init implements [ipnext.Extension]. +func (e *desktopSessionsExt) Init(host ipnext.Host) (err error) { + e.host = host + unregisterResolver := host.Profiles().RegisterBackgroundProfileResolver(e.getBackgroundProfile) unregisterSessionCb, err := e.sm.RegisterStateCallback(e.updateDesktopSessionState) if err != nil { unregisterResolver() @@ -72,30 +84,30 @@ func (e *desktopSessionsExt) Init(lb *LocalBackend) (err error) { return nil } -// updateDesktopSessionState is a [desktop.SessionStateCallback] -// invoked by [desktop.SessionManager] once for each existing session +// updateDesktopSessionState is a [SessionStateCallback] +// invoked by [SessionManager] once for each existing session // and whenever the session state changes. It updates the session map // and switches to the best profile if necessary. -func (e *desktopSessionsExt) updateDesktopSessionState(session *desktop.Session) { +func (e *desktopSessionsExt) updateDesktopSessionState(session *Session) { e.mu.Lock() - if session.Status != desktop.ClosedSession { - e.id2sess[session.ID] = session + if session.Status != ClosedSession { + e.sessByID[session.ID] = session } else { - delete(e.id2sess, session.ID) + delete(e.sessByID, session.ID) } e.mu.Unlock() var action string switch session.Status { - case desktop.ForegroundSession: + case ForegroundSession: // The user has either signed in or unlocked their session. // For remote sessions, this may also mean the user has connected. // The distinction isn't important for our purposes, // so let's always say "signed in". action = "signed in to" - case desktop.BackgroundSession: + case BackgroundSession: action = "locked" - case desktop.ClosedSession: + case ClosedSession: action = "signed out from" default: panic("unreachable") @@ -104,10 +116,10 @@ func (e *desktopSessionsExt) updateDesktopSessionState(session *desktop.Session) userIdentifier := cmp.Or(maybeUsername, string(session.User.UserID()), "user") reason := fmt.Sprintf("%s %s session %v", userIdentifier, action, session.ID) - e.SwitchToBestProfile(reason) + e.host.Profiles().SwitchToBestProfileAsync(reason) } -// getBackgroundProfile is a [profileResolver] that works as follows: +// getBackgroundProfile is a [ipnext.ProfileResolver] that works as follows: // // If Always-On mode is disabled, it returns no profile. // @@ -121,9 +133,7 @@ func (e *desktopSessionsExt) updateDesktopSessionState(session *desktop.Session) // disconnects without signing out. // // In all other cases, it returns no profile. -// -// It is called with [LocalBackend.mu] locked. -func (e *desktopSessionsExt) getBackgroundProfile() ipn.LoginProfileView { +func (e *desktopSessionsExt) getBackgroundProfile(profiles ipnext.ProfileStore) ipn.LoginProfileView { e.mu.Lock() defer e.mu.Unlock() @@ -135,16 +145,16 @@ func (e *desktopSessionsExt) getBackgroundProfile() ipn.LoginProfileView { isCurrentProfileOwnerSignedIn := false var foregroundUIDs []ipn.WindowsUserID - for _, s := range e.id2sess { + for _, s := range e.sessByID { switch uid := s.User.UserID(); uid { - case e.pm.CurrentProfile().LocalUserID(): + case profiles.CurrentProfile().LocalUserID(): isCurrentProfileOwnerSignedIn = true - if s.Status == desktop.ForegroundSession { + if s.Status == ForegroundSession { // Keep the current profile if the user has a foreground session. - return e.pm.CurrentProfile() + return profiles.CurrentProfile() } default: - if s.Status == desktop.ForegroundSession { + if s.Status == ForegroundSession { foregroundUIDs = append(foregroundUIDs, uid) } } @@ -154,7 +164,7 @@ func (e *desktopSessionsExt) getBackgroundProfile() ipn.LoginProfileView { // or if the current profile's owner has no foreground session, switch to the default profile // of the first user with a foreground session, if any. for _, uid := range foregroundUIDs { - if profile := e.pm.DefaultUserProfile(uid); profile.ID() != "" { + if profile := profiles.DefaultUserProfile(uid); profile.ID() != "" { return profile } } @@ -163,19 +173,19 @@ func (e *desktopSessionsExt) getBackgroundProfile() ipn.LoginProfileView { // keep the current profile even if the session is not in the foreground, // such as when the screen is locked or a remote session is disconnected. if len(foregroundUIDs) == 0 && isCurrentProfileOwnerSignedIn { - return e.pm.CurrentProfile() + return profiles.CurrentProfile() } // Otherwise, there's no background profile. return ipn.LoginProfileView{} } -// Shutdown implements [localBackendExtension]. +// Shutdown implements [ipnext.Extension]. func (e *desktopSessionsExt) Shutdown() error { for _, f := range e.cleanup { f() } e.cleanup = nil - e.LocalBackend = nil - return nil + e.host = nil + return e.sm.Close() } diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go new file mode 100644 index 000000000..af870b53a --- /dev/null +++ b/ipn/ipnext/ipnext.go @@ -0,0 +1,284 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ipnext defines types and interfaces used for extending the core LocalBackend +// functionality with additional features and services. +package ipnext + +import ( + "errors" + "fmt" + + "tailscale.com/control/controlclient" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/types/views" + "tailscale.com/util/mak" +) + +// Extension augments LocalBackend with additional functionality. +// +// An extension uses the provided [Host] to register callbacks +// and interact with the backend in a controlled, well-defined +// and thread-safe manner. +// +// Extensions are registered using [RegisterExtension]. +// +// They must be safe for concurrent use. +type Extension interface { + // Name is a unique name of the extension. + // It must be the same as the name used to register the extension. + Name() string + + // Init is called to initialize the extension when LocalBackend is initialized. + // If the extension cannot be initialized, it must return an error, + // and its Shutdown method will not be called on the host's shutdown. + // Returned errors are not fatal; they are used for logging. + // A [SkipExtension] error indicates an intentional decision rather than a failure. + Init(Host) error + + // Shutdown is called when LocalBackend is shutting down, + // provided the extension was initialized. For multiple extensions, + // Shutdown is called in the reverse order of Init. + // Returned errors are not fatal; they are used for logging. + Shutdown() error +} + +// NewExtensionFn is a function that instantiates an [Extension]. +// If a registered extension cannot be instantiated, the function must return an error. +// If the extension should be skipped at runtime, it must return either [SkipExtension] +// or a wrapped [SkipExtension]. Any other error returned is fatal and will prevent +// the LocalBackend from starting. +type NewExtensionFn func(logger.Logf, *tsd.System) (Extension, error) + +// SkipExtension is an error returned by [NewExtensionFn] to indicate that the extension +// should be skipped rather than prevent the LocalBackend from starting. +// +// Skipping an extension should be reserved for cases where the extension is not supported +// on the current platform or configuration, or depends on a feature that is not available, +// or otherwise should be disabled permanently rather than temporarily. +// +// Specifically, it must not be returned if the extension is not required right now +// based on user preferences, policy settings, the current tailnet, or other factors +// that may change throughout the LocalBackend's lifetime. +var SkipExtension = errors.New("skipping extension") + +// Definition describes a registered [Extension]. +type Definition struct { + name string // name under which the extension is registered + newFn NewExtensionFn // function that creates a new instance of the extension +} + +// Name returns the name of the extension. +func (d *Definition) Name() string { + return d.name +} + +// MakeExtension instantiates the extension. +func (d *Definition) MakeExtension(logf logger.Logf, sys *tsd.System) (Extension, error) { + ext, err := d.newFn(logf, sys) + if err != nil { + return nil, err + } + if ext.Name() != d.name { + return nil, fmt.Errorf("extension name mismatch: registered %q; actual %q", d.name, ext.Name()) + } + return ext, nil +} + +// extensionsByName is a map of registered extensions, +// where the key is the name of the extension. +var extensionsByName map[string]*Definition + +// extensionsByOrder is a slice of registered extensions, +// in the order they were registered. +var extensionsByOrder []*Definition + +// RegisterExtension registers a function that instantiates an [Extension]. +// The name must be the same as returned by the extension's [Extension.Name]. +// +// It must be called on the main goroutine before LocalBackend is created, +// such as from an init function of the package implementing the extension. +// +// It panics if newExt is nil or if an extension with the same name +// has already been registered. +func RegisterExtension(name string, newExt NewExtensionFn) { + if newExt == nil { + panic(fmt.Sprintf("ipnext: newExt is nil: %q", name)) + } + if _, ok := extensionsByName[name]; ok { + panic(fmt.Sprintf("ipnext: duplicate extensions: %q", name)) + } + ext := &Definition{name, newExt} + mak.Set(&extensionsByName, name, ext) + extensionsByOrder = append(extensionsByOrder, ext) +} + +// Extensions returns a read-only view of the extensions +// registered via [RegisterExtension]. It preserves the order +// in which the extensions were registered. +func Extensions() views.Slice[*Definition] { + return views.SliceOf(extensionsByOrder) +} + +// DefinitionForTest returns a [Definition] for the specified [Extension]. +// It is primarily used for testing where the test code needs to instantiate +// and use an extension without registering it. +func DefinitionForTest(ext Extension) *Definition { + return &Definition{ + name: ext.Name(), + newFn: func(logger.Logf, *tsd.System) (Extension, error) { return ext, nil }, + } +} + +// DefinitionWithErrForTest returns a [Definition] with the specified extension name +// whose [Definition.MakeExtension] method returns the specified error. +// It is used for testing. +func DefinitionWithErrForTest(name string, err error) *Definition { + return &Definition{ + name: name, + newFn: func(logger.Logf, *tsd.System) (Extension, error) { return nil, err }, + } +} + +// Host is the API surface used by [Extension]s to interact with LocalBackend +// in a controlled manner. +// +// Extensions can register callbacks, request information, or perform actions +// via the [Host] interface. +// +// Typically, the host invokes registered callbacks when one of the following occurs: +// - LocalBackend notifies it of an event or state change that may be +// of interest to extensions, such as when switching [ipn.LoginProfile]. +// - LocalBackend needs to consult extensions for information, for example, +// determining the most appropriate profile for the current state of the system. +// - LocalBackend performs an extensible action, such as logging an auditable event, +// and delegates its execution to the extension. +// +// The callbacks are invoked synchronously, and the LocalBackend's state +// remains unchanged while callbacks execute. +// +// In contrast, actions initiated by extensions are generally asynchronous, +// as indicated by the "Async" suffix in their names. +// Performing actions may result in callbacks being invoked as described above. +// +// To prevent conflicts between extensions competing for shared state, +// such as the current profile or prefs, the host must not expose methods +// that directly modify that state. For example, instead of allowing extensions +// to switch profiles at-will, the host's [ProfileServices] provides a method +// to switch to the "best" profile. The host can then consult extensions +// to determine the appropriate profile to use and resolve any conflicts +// in a controlled manner. +// +// A host must be safe for concurrent use. +type Host interface { + // Profiles returns the host's [ProfileServices]. + Profiles() ProfileServices + + // RegisterAuditLogProvider registers an audit log provider, + // which returns a function to be called when an auditable action + // is about to be performed. The returned function unregisters the provider. + // It is a runtime error to register a nil provider. + RegisterAuditLogProvider(AuditLogProvider) (unregister func()) + + // AuditLogger returns a function that calls all currently registered audit loggers. + // The function fails if any logger returns an error, indicating that the action + // cannot be logged and must not be performed. + // + // The returned function captures the current state (e.g., the current profile) at + // the time of the call and must not be persisted. + AuditLogger() ipnauth.AuditLogFunc + + // RegisterControlClientCallback registers a function to be called every time a new + // control client is created. The returned function unregisters the callback. + // It is a runtime error to register a nil callback. + RegisterControlClientCallback(NewControlClientCallback) (unregister func()) +} + +// ProfileServices provides access to the [Host]'s profile management services, +// such as switching profiles and registering profile change callbacks. +type ProfileServices interface { + // SwitchToBestProfileAsync asynchronously selects the best profile to use + // and switches to it, unless it is already the current profile. + // + // If an extension needs to know when a profile switch occurs, + // it must use [ProfileServices.RegisterProfileChangeCallback] + // to register a [ProfileChangeCallback]. + // + // The reason indicates why the profile is being switched, such as due + // to a client connecting or disconnecting or a change in the desktop + // session state. It is used for logging. + SwitchToBestProfileAsync(reason string) + + // RegisterBackgroundProfileResolver registers a function to be used when + // resolving the background profile. The returned function unregisters the resolver. + // It is a runtime error to register a nil resolver. + // + // TODO(nickkhyl): allow specifying some kind of priority/altitude for the resolver. + // TODO(nickkhyl): make it a "profile resolver" instead of a "background profile resolver". + // The concepts of the "current user", "foreground profile" and "background profile" + // only exist on Windows, and we're moving away from them anyway. + RegisterBackgroundProfileResolver(ProfileResolver) (unregister func()) + + // RegisterProfileChangeCallback registers a function to be called when the current + // [ipn.LoginProfile] changes. The returned function unregisters the callback. + // It is a runtime error to register a nil callback. + RegisterProfileChangeCallback(ProfileChangeCallback) (unregister func()) +} + +// ProfileStore provides read-only access to available login profiles and their preferences. +// It is not safe for concurrent use and can only be used from the callback it is passed to. +type ProfileStore interface { + // CurrentUserID returns the current user ID. It is only non-empty on + // Windows where we have a multi-user system. + // + // Deprecated: this method exists for compatibility with the current (as of 2024-08-27) + // permission model and will be removed as we progress on tailscale/corp#18342. + CurrentUserID() ipn.WindowsUserID + + // CurrentProfile returns a read-only [ipn.LoginProfileView] of the current profile. + // The returned view is always valid, but the profile's [ipn.LoginProfileView.ID] + // returns "" if the profile is new and has not been persisted yet. + CurrentProfile() ipn.LoginProfileView + + // CurrentPrefs returns a read-only view of the current prefs. + // The returned view is always valid. + CurrentPrefs() ipn.PrefsView + + // DefaultUserProfile returns a read-only view of the default (last used) profile for the specified user. + // It returns a read-only view of a new, non-persisted profile if the specified user does not have a default profile. + DefaultUserProfile(uid ipn.WindowsUserID) ipn.LoginProfileView +} + +// AuditLogProvider is a function that returns an [ipnauth.AuditLogFunc] for +// logging auditable actions. +type AuditLogProvider func() ipnauth.AuditLogFunc + +// ProfileResolver is a function that returns a read-only view of a login profile. +// An invalid view indicates no profile. A valid profile view with an empty [ipn.ProfileID] +// indicates that the profile is new and has not been persisted yet. +// The provided [ProfileStore] can only be used for the duration of the callback. +type ProfileResolver func(ProfileStore) ipn.LoginProfileView + +// ProfileChangeCallback is a function to be called when the current login profile changes. +// The sameNode parameter indicates whether the profile represents the same node as before, +// such as when only the profile metadata is updated but the node ID remains the same, +// or when a new profile is persisted and assigned an [ipn.ProfileID] for the first time. +// The subscribers can use this information to decide whether to reset their state. +// +// The profile and prefs are always valid, but the profile's [ipn.LoginProfileView.ID] +// returns "" if the profile is new and has not been persisted yet. +type ProfileChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) + +// NewControlClientCallback is a function to be called when a new [controlclient.Client] +// is created and before it is first used. The login profile and prefs represent +// the profile for which the cc is created and are always valid; however, the +// profile's [ipn.LoginProfileView.ID] returns "" if the profile is new +// and has not been persisted yet. If the [controlclient.Client] is created +// due to a profile switch, any registered [ProfileChangeCallback]s are called first. +// +// It returns a function to be called when the cc is being shut down, +// or nil if no cleanup is needed. +type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView, ipn.PrefsView) (cleanup func()) diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go new file mode 100644 index 000000000..4a617ed72 --- /dev/null +++ b/ipn/ipnlocal/extension_host.go @@ -0,0 +1,537 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "context" + "errors" + "fmt" + "iter" + "maps" + "slices" + "strings" + "sync" + "sync/atomic" + "time" + + "tailscale.com/control/controlclient" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnext" + "tailscale.com/tailcfg" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/util/execqueue" + "tailscale.com/util/set" + "tailscale.com/util/testenv" +) + +// ExtensionHost is a bridge between the [LocalBackend] and the registered [ipnext.Extension]s. +// It implements [ipnext.Host] and is safe for concurrent use. +// +// A nil pointer to [ExtensionHost] is a valid, no-op extension host which is primarily used in tests +// that instantiate [LocalBackend] directly without using [NewExtensionHost]. +// +// The [LocalBackend] is not required to hold its mutex when calling the host's methods, +// but it typically does so either to prevent changes to its state (for example, the current profile) +// while callbacks are executing, or because it calls the host's methods as part of a larger operation +// that requires the mutex to be held. +// +// Extensions might invoke the host's methods either from callbacks triggered by the [LocalBackend], +// or in a response to external events. Some methods can be called by both the extensions and the backend. +// +// As a general rule, the host cannot assume anything about the current state of the [LocalBackend]'s +// internal mutex on entry to its methods, and therefore cannot safely call [LocalBackend] methods directly. +// +// The following are typical and supported patterns: +// - LocalBackend notifies the host about an event, such as a change in the current profile. +// The host invokes callbacks registered by Extensions, forwarding the event arguments to them. +// If necessary, the host can also update its own state for future use. +// - LocalBackend requests information from the host, such as the effective [ipnauth.AuditLogFunc] +// or the [ipn.LoginProfile] to use when no GUI/CLI client is connected. Typically, [LocalBackend] +// provides the required context to the host, and the host returns the result to [LocalBackend] +// after forwarding the request to the extensions. +// - Extension invokes the host's method to perform an action, such as switching to the "best" profile +// in response to a change in the device's state. Since the host does not know whether the [LocalBackend]'s +// internal mutex is held, it cannot invoke any methods on the [LocalBackend] directly and must instead +// do so asynchronously, such as by using [ExtensionHost.enqueueBackendOperation]. +// - Extension requests information from the host, such as the effective [ipnauth.AuditLogFunc] +// or the current [ipn.LoginProfile]. Since the host cannot invoke any methods on the [LocalBackend] directly, +// it should maintain its own view of the current state, updating it when the [LocalBackend] notifies it +// about a change or event. +// +// To safeguard against adopting incorrect or risky patterns, the host does not store [LocalBackend] in its fields +// and instead provides [ExtensionHost.enqueueBackendOperation]. Additionally, to make it easier to test extensions +// and to further reduce the risk of accessing unexported methods or fields of [LocalBackend], the host interacts +// with it via the [Backend] interface. +type ExtensionHost struct { + logf logger.Logf // prefixed with "ipnext:" + + // allExtensions holds the extensions in the order they were registered, + // including those that have not yet attempted initialization or have failed to initialize. + allExtensions []ipnext.Extension + + // initOnce is used to ensure that the extensions are initialized only once, + // even if [extensionHost.Init] is called multiple times. + initOnce sync.Once + // shutdownOnce is like initOnce, but for [ExtensionHost.Shutdown]. + shutdownOnce sync.Once + + // workQueue maintains execution order for asynchronous operations requested by extensions. + // It is always an [execqueue.ExecQueue] except in some tests. + workQueue execQueue + // doEnqueueBackendOperation adds an asynchronous [LocalBackend] operation to the workQueue. + doEnqueueBackendOperation func(func(Backend)) + + // mu protects the following fields. + // It must not be held when calling [LocalBackend] methods + // or when invoking callbacks registered by extensions. + mu sync.Mutex + // initialized is whether the host and extensions have been fully initialized. + initialized atomic.Bool + // activeExtensions is a subset of allExtensions that have been initialized and are ready to use. + activeExtensions []ipnext.Extension + // extensionsByName are the activeExtensions indexed by their names. + extensionsByName map[string]ipnext.Extension + // postInitWorkQueue is a queue of functions to be executed + // by the workQueue after all extensions have been initialized. + postInitWorkQueue []func(Backend) + + // auditLoggers are registered [AuditLogProvider]s. + // Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action + // is about to be performed. If an audit logger returns an error, the action is denied. + auditLoggers set.HandleSet[ipnext.AuditLogProvider] + // backgroundProfileResolvers are registered background profile resolvers. + // They're used to determine the profile to use when no GUI/CLI client is connected. + backgroundProfileResolvers set.HandleSet[ipnext.ProfileResolver] + // newControlClientCbs are the functions to be called when a new control client is created. + newControlClientCbs set.HandleSet[ipnext.NewControlClientCallback] + // profileChangeCbs are the callbacks to be invoked when the current login profile changes, + // either because of a profile switch, or because the profile information was updated + // by [LocalBackend.SetControlClientStatus], including when the profile is first populated + // and persisted. + profileChangeCbs set.HandleSet[ipnext.ProfileChangeCallback] +} + +// Backend is a subset of [LocalBackend] methods that are used by [ExtensionHost]. +// It is primarily used for testing. +type Backend interface { + // SwitchToBestProfile switches to the best profile for the current state of the system. + // The reason indicates why the profile is being switched. + SwitchToBestProfile(reason string) +} + +// NewExtensionHost returns a new [ExtensionHost] which manages registered extensions for the given backend. +// The extensions are instantiated, but are not initialized until [ExtensionHost.Init] is called. +// It returns an error if instantiating any extension fails. +// +// If overrideExts is non-nil, the registered extensions are ignored and the provided extensions are used instead. +// Overriding extensions is primarily used for testing. +func NewExtensionHost(logf logger.Logf, sys *tsd.System, b Backend, overrideExts ...*ipnext.Definition) (_ *ExtensionHost, err error) { + host := &ExtensionHost{ + logf: logger.WithPrefix(logf, "ipnext: "), + workQueue: &execqueue.ExecQueue{}, + } + + // All operations on the backend must be executed asynchronously by the work queue. + // DO NOT retain a direct reference to the backend in the host. + // See the docstring for [ExtensionHost] for more details. + host.doEnqueueBackendOperation = func(f func(Backend)) { + if f == nil { + panic("nil backend operation") + } + host.workQueue.Add(func() { f(b) }) + } + + var numExts int + var exts iter.Seq2[int, *ipnext.Definition] + if overrideExts == nil { + // Use registered extensions. + exts = ipnext.Extensions().All() + numExts = ipnext.Extensions().Len() + } else { + // Use the provided, potentially empty, overrideExts + // instead of the registered ones. + exts = slices.All(overrideExts) + numExts = len(overrideExts) + } + + host.allExtensions = make([]ipnext.Extension, 0, numExts) + for _, d := range exts { + ext, err := d.MakeExtension(logf, sys) + if errors.Is(err, ipnext.SkipExtension) { + // The extension wants to be skipped. + host.logf("%q: %v", d.Name(), err) + continue + } else if err != nil { + return nil, fmt.Errorf("failed to create %q extension: %v", d.Name(), err) + } + host.allExtensions = append(host.allExtensions, ext) + } + return host, nil +} + +// Init initializes the host and the extensions it manages. +func (h *ExtensionHost) Init() { + if h != nil { + h.initOnce.Do(h.init) + } +} + +func (h *ExtensionHost) init() { + // Initialize the extensions in the order they were registered. + h.mu.Lock() + h.activeExtensions = make([]ipnext.Extension, 0, len(h.allExtensions)) + h.extensionsByName = make(map[string]ipnext.Extension, len(h.allExtensions)) + h.mu.Unlock() + for _, ext := range h.allExtensions { + // Do not hold the lock while calling [ipnext.Extension.Init]. + // Extensions call back into the host to register their callbacks, + // and that would cause a deadlock if the h.mu is already held. + if err := ext.Init(h); err != nil { + // As per the [ipnext.Extension] interface, failures to initialize + // an extension are never fatal. The extension is simply skipped. + // + // But we handle [ipnext.SkipExtension] differently for nicer logging + // if the extension wants to be skipped and not actually failing. + if errors.Is(err, ipnext.SkipExtension) { + h.logf("%q: %v", ext.Name(), err) + } else { + h.logf("%q init failed: %v", ext.Name(), err) + } + continue + } + // Update the initialized extensions lists as soon as the extension is initialized. + // We'd like to make them visible to other extensions that are initialized later. + h.mu.Lock() + h.activeExtensions = append(h.activeExtensions, ext) + h.extensionsByName[ext.Name()] = ext + h.mu.Unlock() + } + + // Report active extensions to the log. + // TODO(nickkhyl): update client metrics to include the active/failed/skipped extensions. + h.mu.Lock() + extensionNames := slices.Collect(maps.Keys(h.extensionsByName)) + h.mu.Unlock() + h.logf("active extensions: %v", strings.Join(extensionNames, ", ")) + + // Additional init steps that need to be performed after all extensions have been initialized. + h.mu.Lock() + wq := h.postInitWorkQueue + h.postInitWorkQueue = nil + h.initialized.Store(true) + h.mu.Unlock() + + // Enqueue work that was requested and deferred during initialization. + h.doEnqueueBackendOperation(func(b Backend) { + for _, f := range wq { + f(b) + } + }) + +} + +// Profiles implements [ipnext.Host]. +func (h *ExtensionHost) Profiles() ipnext.ProfileServices { + // Currently, [ExtensionHost] implements [ipnext.ProfileServices] directly. + // We might want to extract it to a separate type in the future. + return h +} + +// SwitchToBestProfileAsync implements [ipnext.ProfileServices]. +func (h *ExtensionHost) SwitchToBestProfileAsync(reason string) { + if h == nil { + return + } + h.enqueueBackendOperation(func(b Backend) { + b.SwitchToBestProfile(reason) + }) +} + +// RegisterProfileChangeCallback implements [ipnext.ProfileServices]. +func (h *ExtensionHost) RegisterProfileChangeCallback(cb ipnext.ProfileChangeCallback) (unregister func()) { + if h == nil { + return func() {} + } + if cb == nil { + panic("nil profile change callback") + } + h.mu.Lock() + defer h.mu.Unlock() + handle := h.profileChangeCbs.Add(cb) + return func() { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.profileChangeCbs, handle) + } +} + +// NotifyProfileChange invokes registered profile change callbacks. +// It strips private keys from the [ipn.Prefs] before passing it to the callbacks. +func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + if h == nil { + return + } + h.mu.Lock() + cbs := collectValues(h.profileChangeCbs) + h.mu.Unlock() + if cbs != nil { + // Strip private keys from the prefs before passing it to the callbacks. + // Extensions should not need it (unless proven otherwise in the future), + // and this is a good way to ensure that they won't accidentally leak them. + prefs = stripKeysFromPrefs(prefs) + for _, cb := range cbs { + cb(profile, prefs, sameNode) + } + } +} + +// RegisterBackgroundProfileResolver implements [ipnext.ProfileServices]. +func (h *ExtensionHost) RegisterBackgroundProfileResolver(resolver ipnext.ProfileResolver) (unregister func()) { + if h == nil { + return func() {} + } + h.mu.Lock() + defer h.mu.Unlock() + handle := h.backgroundProfileResolvers.Add(resolver) + return func() { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.backgroundProfileResolvers, handle) + } +} + +// DetermineBackgroundProfile returns a read-only view of the profile +// used when no GUI/CLI client is connected, using background profile +// resolvers registered by extensions. +// +// It returns an invalid view if Tailscale should not run in the background +// and instead disconnect until a GUI/CLI client connects. +// +// As of 2025-02-07, this is only used on Windows. +func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) ipn.LoginProfileView { + if h == nil { + return ipn.LoginProfileView{} + } + // TODO(nickkhyl): check if the returned profile is allowed on the device, + // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // See tailscale/corp#26249. + + // Attempt to resolve the background profile using the registered + // background profile resolvers (e.g., [ipn/desktop.desktopSessionsExt] on Windows). + h.mu.Lock() + resolvers := collectValues(h.backgroundProfileResolvers) + h.mu.Unlock() + for _, resolver := range resolvers { + if profile := resolver(profiles); profile.Valid() { + return profile + } + } + + // Otherwise, switch to an empty profile and disconnect Tailscale + // until a GUI or CLI client connects. + return ipn.LoginProfileView{} +} + +// RegisterControlClientCallback implements [ipnext.Host]. +func (h *ExtensionHost) RegisterControlClientCallback(cb ipnext.NewControlClientCallback) (unregister func()) { + if h == nil { + return func() {} + } + if cb == nil { + panic("nil control client callback") + } + h.mu.Lock() + defer h.mu.Unlock() + handle := h.newControlClientCbs.Add(cb) + return func() { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.newControlClientCbs, handle) + } +} + +// NotifyNewControlClient invokes all registered control client callbacks. +// It returns callbacks to be executed when the control client shuts down. +func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile ipn.LoginProfileView, prefs ipn.PrefsView) (ccShutdownCbs []func()) { + if h == nil { + return nil + } + h.mu.Lock() + cbs := collectValues(h.newControlClientCbs) + h.mu.Unlock() + if len(cbs) > 0 { + ccShutdownCbs = make([]func(), 0, len(cbs)) + for _, cb := range cbs { + if shutdown := cb(cc, profile, prefs); shutdown != nil { + ccShutdownCbs = append(ccShutdownCbs, shutdown) + } + } + } + return ccShutdownCbs +} + +// RegisterAuditLogProvider implements [ipnext.Host]. +func (h *ExtensionHost) RegisterAuditLogProvider(provider ipnext.AuditLogProvider) (unregister func()) { + if h == nil { + return func() {} + } + if provider == nil { + panic("nil audit log provider") + } + h.mu.Lock() + defer h.mu.Unlock() + handle := h.auditLoggers.Add(provider) + return func() { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.auditLoggers, handle) + } +} + +// AuditLogger returns a function that reports an auditable action +// to all registered audit loggers. It fails if any of them returns an error, +// indicating that the action cannot be logged and must not be performed. +// +// It implements [ipnext.Host], but is also used by the [LocalBackend]. +// +// The returned function closes over the current state of the host and extensions, +// which typically includes the current profile and the audit loggers registered by extensions. +// It must not be persisted outside of the auditable action context. +func (h *ExtensionHost) AuditLogger() ipnauth.AuditLogFunc { + if h == nil { + return func(tailcfg.ClientAuditAction, string) error { return nil } + } + + h.mu.Lock() + providers := collectValues(h.auditLoggers) + h.mu.Unlock() + + var loggers []ipnauth.AuditLogFunc + if len(providers) > 0 { + loggers = make([]ipnauth.AuditLogFunc, len(providers)) + for i, provider := range providers { + loggers[i] = provider() + } + } + return func(action tailcfg.ClientAuditAction, details string) error { + // Log auditable actions to the host's log regardless of whether + // the audit loggers are available or not. + h.logf("auditlog: %v: %v", action, details) + + // Invoke all registered audit loggers and collect errors. + // If any of them returns an error, the action is denied. + var errs []error + for _, logger := range loggers { + if err := logger(action, details); err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) + } +} + +// Shutdown shuts down the extension host and all initialized extensions. +func (h *ExtensionHost) Shutdown() { + if h == nil { + return + } + // Ensure that the init function has completed before shutting down, + // or prevent any further init calls from happening. + h.initOnce.Do(func() {}) + h.shutdownOnce.Do(h.shutdown) +} + +func (h *ExtensionHost) shutdown() { + // Prevent any queued but not yet started operations from running, + // block new operations from being enqueued, and wait for the + // currently executing operation (if any) to finish. + h.shutdownWorkQueue() + // Invoke shutdown callbacks registered by extensions. + h.shutdownExtensions() +} + +func (h *ExtensionHost) shutdownWorkQueue() { + h.workQueue.Shutdown() + var ctx context.Context + if testenv.InTest() { + // In tests, we'd like to wait indefinitely for the current operation to finish, + // mostly to help avoid flaky tests. Test runners can be pretty slow. + ctx = context.Background() + } else { + // In prod, however, we want to avoid blocking indefinitely. + // The 5s timeout is somewhat arbitrary; LocalBackend operations + // should not take that long. + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + } + // Since callbacks are invoked synchronously, this will also wait + // for in-flight callbacks associated with those operations to finish. + if err := h.workQueue.Wait(ctx); err != nil { + h.logf("work queue shutdown failed: %v", err) + } +} + +func (h *ExtensionHost) shutdownExtensions() { + h.mu.Lock() + extensions := h.activeExtensions + h.mu.Unlock() + + // h.mu must not be held while shutting down extensions. + // Extensions might call back into the host and that would cause + // a deadlock if the h.mu is already held. + // + // Shutdown is called in the reverse order of Init. + for _, ext := range slices.Backward(extensions) { + if err := ext.Shutdown(); err != nil { + // Extension shutdown errors are never fatal, but we log them for debugging purposes. + h.logf("%q: shutdown callback failed: %v", ext.Name(), err) + } + } +} + +// enqueueBackendOperation enqueues a function to perform an operation on the [Backend]. +// If the host has not yet been initialized (e.g., when called from an extension's Init method), +// the operation is deferred until after the host and all extensions have completed initialization. +// It panics if the f is nil. +func (h *ExtensionHost) enqueueBackendOperation(f func(Backend)) { + if h == nil { + return + } + if f == nil { + panic("nil backend operation") + } + h.mu.Lock() // protects h.initialized and h.postInitWorkQueue + defer h.mu.Unlock() + if h.initialized.Load() { + h.doEnqueueBackendOperation(f) + } else { + h.postInitWorkQueue = append(h.postInitWorkQueue, f) + } +} + +// execQueue is an ordered asynchronous queue for executing functions. +// It is implemented by [execqueue.ExecQueue]. The interface is used +// to allow testing with a mock implementation. +type execQueue interface { + Add(func()) + Shutdown() + Wait(context.Context) error +} + +// collectValues is like [slices.Collect] of [maps.Values], +// but pre-allocates the slice to avoid reallocations. +// It returns nil if the map is empty. +func collectValues[K comparable, V any](m map[K]V) []V { + if len(m) == 0 { + return nil + } + s := make([]V, 0, len(m)) + for _, v := range m { + s = append(s, v) + } + return s +} diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go new file mode 100644 index 000000000..1e03abaa1 --- /dev/null +++ b/ipn/ipnlocal/extension_host_test.go @@ -0,0 +1,1139 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "cmp" + "context" + "errors" + "net/netip" + "reflect" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + + deepcmp "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "tailscale.com/health" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/store/mem" + "tailscale.com/tailcfg" + "tailscale.com/tsd" + "tailscale.com/tstest" + "tailscale.com/types/key" + "tailscale.com/types/persist" + "tailscale.com/util/must" +) + +// TestExtensionInitShutdown tests that [ExtensionHost] correctly initializes +// and shuts down extensions. +func TestExtensionInitShutdown(t *testing.T) { + t.Parallel() + + // As of 2025-04-08, [ipn.Host.Init] and [ipn.Host.Shutdown] do not return errors + // as extension initialization and shutdown errors are not fatal. + // If these methods are updated to return errors, this test should also be updated. + // The conversions below will fail to compile if their signatures change, reminding us to update the test. + _ = (func(*ExtensionHost))((*ExtensionHost).Init) + _ = (func(*ExtensionHost))((*ExtensionHost).Shutdown) + + tests := []struct { + name string + nilHost bool + exts []*testExtension + wantInit []string + wantShutdown []string + skipInit bool + }{ + { + name: "nil-host", + nilHost: true, + exts: []*testExtension{}, + wantInit: []string{}, + wantShutdown: []string{}, + }, + { + name: "empty-extensions", + exts: []*testExtension{}, + wantInit: []string{}, + wantShutdown: []string{}, + }, + { + name: "single-extension", + exts: []*testExtension{{name: "A"}}, + wantInit: []string{"A"}, + wantShutdown: []string{"A"}, + }, + { + name: "multiple-extensions/all-ok", + exts: []*testExtension{{name: "A"}, {name: "B"}, {name: "C"}}, + wantInit: []string{"A", "B", "C"}, + wantShutdown: []string{"C", "B", "A"}, + }, + { + name: "multiple-extensions/no-init-no-shutdown", + exts: []*testExtension{{name: "A"}, {name: "B"}, {name: "C"}}, + wantInit: []string{}, + wantShutdown: []string{}, + skipInit: true, + }, + { + name: "multiple-extensions/init-failed/first", + exts: []*testExtension{{ + name: "A", + InitHook: func(*testExtension) error { return errors.New("init failed") }, + }, { + name: "B", + InitHook: func(*testExtension) error { return nil }, + }, { + name: "C", + InitHook: func(*testExtension) error { return nil }, + }}, + wantInit: []string{"A", "B", "C"}, + wantShutdown: []string{"C", "B"}, + }, + { + name: "multiple-extensions/init-failed/second", + exts: []*testExtension{{ + name: "A", + InitHook: func(*testExtension) error { return nil }, + }, { + name: "B", + InitHook: func(*testExtension) error { return errors.New("init failed") }, + }, { + name: "C", + InitHook: func(*testExtension) error { return nil }, + }}, + wantInit: []string{"A", "B", "C"}, + wantShutdown: []string{"C", "A"}, + }, + { + name: "multiple-extensions/init-failed/third", + exts: []*testExtension{{ + name: "A", + InitHook: func(*testExtension) error { return nil }, + }, { + name: "B", + InitHook: func(*testExtension) error { return nil }, + }, { + name: "C", + InitHook: func(*testExtension) error { return errors.New("init failed") }, + }}, + wantInit: []string{"A", "B", "C"}, + wantShutdown: []string{"B", "A"}, + }, + { + name: "multiple-extensions/init-failed/all", + exts: []*testExtension{{ + name: "A", + InitHook: func(*testExtension) error { return errors.New("init failed") }, + }, { + name: "B", + InitHook: func(*testExtension) error { return errors.New("init failed") }, + }, { + name: "C", + InitHook: func(*testExtension) error { return errors.New("init failed") }, + }}, + wantInit: []string{"A", "B", "C"}, + wantShutdown: []string{}, + }, + { + name: "multiple-extensions/init-skipped", + exts: []*testExtension{{ + name: "A", + InitHook: func(*testExtension) error { return nil }, + }, { + name: "B", + InitHook: func(*testExtension) error { return ipnext.SkipExtension }, + }, { + name: "C", + InitHook: func(*testExtension) error { return nil }, + }}, + wantInit: []string{"A", "B", "C"}, + wantShutdown: []string{"C", "A"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Configure all extensions to append their names + // to the gotInit and gotShutdown slices + // during initialization and shutdown, + // so we can check that they are called in the right order + // and that shutdown is not unless init succeeded. + var gotInit, gotShutdown []string + for _, ext := range tt.exts { + oldInitHook := ext.InitHook + ext.InitHook = func(e *testExtension) error { + gotInit = append(gotInit, e.name) + if oldInitHook == nil { + return nil + } + return oldInitHook(e) + } + ext.ShutdownHook = func(e *testExtension) error { + gotShutdown = append(gotShutdown, e.name) + return nil + } + } + + var h *ExtensionHost + if !tt.nilHost { + h = newExtensionHostForTest(t, &testBackend{}, false, tt.exts...) + } + + if !tt.skipInit { + h.Init() + } + + // Check that the extensions were initialized in the right order. + if !slices.Equal(gotInit, tt.wantInit) { + t.Errorf("Init extensions: got %v; want %v", gotInit, tt.wantInit) + } + + // Calling Init again on the host should be a no-op. + // The [testExtension.Init] method fails the test if called more than once, + // regardless of which test is running, so we don't need to check it here. + // Similarly, calling Shutdown again on the host should be a no-op as well. + // It is verified by the [testExtension.Shutdown] method itself. + if !tt.skipInit { + h.Init() + } + + // Extensions should not be shut down before the host is shut down, + // even if they are not initialized successfully. + for _, ext := range tt.exts { + if gotShutdown := ext.ShutdownCalled(); gotShutdown { + t.Errorf("%q: Extension shutdown called before host shutdown", ext.name) + } + } + + h.Shutdown() + // Check that the extensions were shut down in the right order, + // and that they were not shut down if they were not initialized successfully. + if !slices.Equal(gotShutdown, tt.wantShutdown) { + t.Errorf("Shutdown extensions: got %v; want %v", gotShutdown, tt.wantShutdown) + } + + }) + } +} + +// TestNewExtensionHost tests that [NewExtensionHost] correctly creates +// an [ExtensionHost], instantiates the extensions and handles errors +// if an extension cannot be created. +func TestNewExtensionHost(t *testing.T) { + t.Parallel() + tests := []struct { + name string + defs []*ipnext.Definition + wantErr bool + wantExts []string + }{ + { + name: "no-exts", + defs: []*ipnext.Definition{}, + wantErr: false, + wantExts: []string{}, + }, + { + name: "exts-ok", + defs: []*ipnext.Definition{ + ipnext.DefinitionForTest(&testExtension{name: "A"}), + ipnext.DefinitionForTest(&testExtension{name: "B"}), + ipnext.DefinitionForTest(&testExtension{name: "C"}), + }, + wantErr: false, + wantExts: []string{"A", "B", "C"}, + }, + { + name: "exts-skipped", + defs: []*ipnext.Definition{ + ipnext.DefinitionForTest(&testExtension{name: "A"}), + ipnext.DefinitionWithErrForTest("B", ipnext.SkipExtension), + ipnext.DefinitionForTest(&testExtension{name: "C"}), + }, + wantErr: false, // extension B is skipped, that's ok + wantExts: []string{"A", "C"}, + }, + { + name: "exts-fail", + defs: []*ipnext.Definition{ + ipnext.DefinitionForTest(&testExtension{name: "A"}), + ipnext.DefinitionWithErrForTest("B", errors.New("failed creating Ext-2")), + ipnext.DefinitionForTest(&testExtension{name: "C"}), + }, + wantErr: true, // extension B failed to create, that's not ok + wantExts: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + logf := tstest.WhileTestRunningLogger(t) + h, err := NewExtensionHost(logf, &tsd.System{}, &testBackend{}, tt.defs...) + if gotErr := err != nil; gotErr != tt.wantErr { + t.Errorf("NewExtensionHost: gotErr %v(%v); wantErr %v", gotErr, err, tt.wantErr) + } + if err != nil { + return + } + + var gotExts []string + for _, ext := range h.allExtensions { + gotExts = append(gotExts, ext.Name()) + } + + if !slices.Equal(gotExts, tt.wantExts) { + t.Errorf("Shutdown extensions: got %v; want %v", gotExts, tt.wantExts) + } + }) + } +} + +// TestExtensionHostEnqueueBackendOperation verifies that [ExtensionHost] enqueues +// backend operations and executes them asynchronously in the order they were received. +// It also checks that operations requested before the host and all extensions are initialized +// are not executed immediately but rather after the host and extensions are initialized. +func TestExtensionHostEnqueueBackendOperation(t *testing.T) { + t.Parallel() + tests := []struct { + name string + preInitCalls []string // before host init + extInitCalls []string // from [Extension.Init]; "" means no call + wantInitCalls []string // what we expect to be called after host init + postInitCalls []string // after host init + }{ + { + name: "no-calls", + preInitCalls: []string{}, + extInitCalls: []string{}, + wantInitCalls: []string{}, + postInitCalls: []string{}, + }, + { + name: "pre-init-calls", + preInitCalls: []string{"pre-init-1", "pre-init-2"}, + extInitCalls: []string{}, + wantInitCalls: []string{"pre-init-1", "pre-init-2"}, + postInitCalls: []string{}, + }, + { + name: "init-calls", + preInitCalls: []string{}, + extInitCalls: []string{"init-1", "init-2"}, + wantInitCalls: []string{"init-1", "init-2"}, + postInitCalls: []string{}, + }, + { + name: "post-init-calls", + preInitCalls: []string{}, + extInitCalls: []string{}, + wantInitCalls: []string{}, + postInitCalls: []string{"post-init-1", "post-init-2"}, + }, + { + name: "mixed-calls", + preInitCalls: []string{"pre-init-1", "pre-init-2"}, + extInitCalls: []string{"init-1", "", "init-2"}, + wantInitCalls: []string{"pre-init-1", "pre-init-2", "init-1", "init-2"}, + postInitCalls: []string{"post-init-1", "post-init-2"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var gotCalls []string + var h *ExtensionHost + b := &testBackend{ + switchToBestProfileHook: func(reason string) { + gotCalls = append(gotCalls, reason) + }, + } + + exts := make([]*testExtension, len(tt.extInitCalls)) + for i, reason := range tt.extInitCalls { + exts[i] = &testExtension{} + if reason != "" { + exts[i].InitHook = func(e *testExtension) error { + e.host.Profiles().SwitchToBestProfileAsync(reason) + return nil + } + } + } + + h = newExtensionHostForTest(t, b, false, exts...) + wq := h.SetWorkQueueForTest(t) // use a test queue instead of [execqueue.ExecQueue]. + + // Issue some pre-init calls. They should be deferred and not + // added to the queue until the host is initialized. + for _, call := range tt.preInitCalls { + h.Profiles().SwitchToBestProfileAsync(call) + } + + // The queue should be empty before the host is initialized. + wq.Drain() + if len(gotCalls) != 0 { + t.Errorf("Pre-init calls: got %v; want (none)", gotCalls) + } + gotCalls = nil + + // Initialize the host and all extensions. + // The extensions will make their calls during initialization. + h.Init() + + // Calls made before or during initialization should now be enqueued and running. + wq.Drain() + if diff := deepcmp.Diff(tt.wantInitCalls, gotCalls, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Init calls: (+got -want): %v", diff) + } + gotCalls = nil + + // Let's make some more calls, as if extensions were making them in a response + // to external events. + for _, call := range tt.postInitCalls { + h.Profiles().SwitchToBestProfileAsync(call) + } + + // Any calls made after initialization should be enqueued and running. + wq.Drain() + if diff := deepcmp.Diff(tt.postInitCalls, gotCalls, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Init calls: (+got -want): %v", diff) + } + gotCalls = nil + }) + } +} + +// TestExtensionHostProfileChangeCallback verifies that [ExtensionHost] correctly handles the registration, +// invocation, and unregistration of profile change callbacks. It also checks that the callbacks are called +// with the correct arguments and that any private keys are stripped from [ipn.Prefs] before being passed to the callback. +func TestExtensionHostProfileChangeCallback(t *testing.T) { + t.Parallel() + + type profileChange struct { + Profile *ipn.LoginProfile + Prefs *ipn.Prefs + SameNode bool + } + // newProfileChange creates a new profile change with deep copies of the profile and prefs. + newProfileChange := func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) profileChange { + return profileChange{ + Profile: profile.AsStruct(), + Prefs: prefs.AsStruct(), + SameNode: sameNode, + } + } + // makeProfileChangeAppender returns a callback that appends profile changes to the extension's state. + makeProfileChangeAppender := func(e *testExtension) ipnext.ProfileChangeCallback { + return func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + UpdateExtState(e, "changes", func(changes []profileChange) []profileChange { + return append(changes, newProfileChange(profile, prefs, sameNode)) + }) + } + } + // getProfileChanges returns the profile changes stored in the extension's state. + getProfileChanges := func(e *testExtension) []profileChange { + changes, _ := GetExtStateOk[[]profileChange](e, "changes") + return changes + } + + tests := []struct { + name string + ext *testExtension + calls []profileChange + wantCalls []profileChange + }{ + { + // Register the callback for the lifetime of the extension. + name: "Register/Lifetime", + ext: &testExtension{}, + calls: []profileChange{ + {Profile: &ipn.LoginProfile{ID: "profile-1"}}, + {Profile: &ipn.LoginProfile{ID: "profile-2"}}, + {Profile: &ipn.LoginProfile{ID: "profile-3"}}, + {Profile: &ipn.LoginProfile{ID: "profile-3"}, SameNode: true}, + }, + wantCalls: []profileChange{ // all calls are received by the callback + {Profile: &ipn.LoginProfile{ID: "profile-1"}}, + {Profile: &ipn.LoginProfile{ID: "profile-2"}}, + {Profile: &ipn.LoginProfile{ID: "profile-3"}}, + {Profile: &ipn.LoginProfile{ID: "profile-3"}, SameNode: true}, + }, + }, + { + // Override the default InitHook used in the test to unregister the callback + // after the first call. + name: "Register/Once", + ext: &testExtension{ + InitHook: func(e *testExtension) error { + var unregister func() + handler := func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + makeProfileChangeAppender(e)(profile, prefs, sameNode) + unregister() + } + unregister = e.host.Profiles().RegisterProfileChangeCallback(handler) + return nil + }, + }, + calls: []profileChange{ + {Profile: &ipn.LoginProfile{ID: "profile-1"}}, + {Profile: &ipn.LoginProfile{ID: "profile-2"}}, + {Profile: &ipn.LoginProfile{ID: "profile-3"}}, + }, + wantCalls: []profileChange{ // only the first call is received by the callback + {Profile: &ipn.LoginProfile{ID: "profile-1"}}, + }, + }, + { + // Ensure that ipn.Prefs are passed to the callback. + name: "CheckPrefs", + ext: &testExtension{}, + calls: []profileChange{{ + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{ + WantRunning: true, + LoggedOut: false, + AdvertiseRoutes: []netip.Prefix{ + netip.MustParsePrefix("192.168.1.0/24"), + netip.MustParsePrefix("192.168.2.0/24"), + }, + }, + }}, + wantCalls: []profileChange{{ + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{ + WantRunning: true, + LoggedOut: false, + AdvertiseRoutes: []netip.Prefix{ + netip.MustParsePrefix("192.168.1.0/24"), + netip.MustParsePrefix("192.168.2.0/24"), + }, + }, + }}, + }, + { + // Ensure that private keys are stripped from persist.Persist shared with extensions. + name: "StripPrivateKeys", + ext: &testExtension{}, + calls: []profileChange{{ + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{ + Persist: &persist.Persist{ + NodeID: "12345", + PrivateNodeKey: key.NewNode(), + OldPrivateNodeKey: key.NewNode(), + NetworkLockKey: key.NewNLPrivate(), + UserProfile: tailcfg.UserProfile{ + ID: 12345, + LoginName: "test@example.com", + DisplayName: "Test User", + ProfilePicURL: "https://example.com/profile.png", + }, + }, + }, + }}, + wantCalls: []profileChange{{ + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{ + Persist: &persist.Persist{ + NodeID: "12345", + PrivateNodeKey: key.NodePrivate{}, // stripped + OldPrivateNodeKey: key.NodePrivate{}, // stripped + NetworkLockKey: key.NLPrivate{}, // stripped + UserProfile: tailcfg.UserProfile{ + ID: 12345, + LoginName: "test@example.com", + DisplayName: "Test User", + ProfilePicURL: "https://example.com/profile.png", + }, + }, + }, + }}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Use the default InitHook if not provided by the test. + if tt.ext.InitHook == nil { + tt.ext.InitHook = func(e *testExtension) error { + // Create and register the callback on init. + handler := makeProfileChangeAppender(e) + e.Cleanup(e.host.Profiles().RegisterProfileChangeCallback(handler)) + return nil + } + } + + h := newExtensionHostForTest(t, &testBackend{}, true, tt.ext) + for _, call := range tt.calls { + h.NotifyProfileChange(call.Profile.View(), call.Prefs.View(), call.SameNode) + } + opts := []deepcmp.Option{ + cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), + } + if diff := deepcmp.Diff(tt.wantCalls, getProfileChanges(tt.ext), opts...); diff != "" { + t.Errorf("ProfileChange callbacks: (-want +got): %v", diff) + } + }) + } +} + +// TestBackgroundProfileResolver tests that the background profile resolvers +// are correctly registered, unregistered and invoked by the [ExtensionHost]. +func TestBackgroundProfileResolver(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + profiles []ipn.LoginProfile // the first one is the current profile + resolvers []ipnext.ProfileResolver + wantProfile *ipn.LoginProfile + }{ + { + name: "No-Profiles/No-Resolvers", + profiles: nil, + resolvers: nil, + wantProfile: nil, + }, + { + // TODO(nickkhyl): update this test as we change "background profile resolvers" + // to just "profile resolvers". The wantProfile should be the current profile by default. + name: "Has-Profiles/No-Resolvers", + profiles: []ipn.LoginProfile{{ID: "profile-1"}}, + resolvers: nil, + wantProfile: nil, + }, + { + name: "Has-Profiles/Single-Resolver", + profiles: []ipn.LoginProfile{{ID: "profile-1"}}, + resolvers: []ipnext.ProfileResolver{ + func(ps ipnext.ProfileStore) ipn.LoginProfileView { + return ps.CurrentProfile() + }, + }, + wantProfile: &ipn.LoginProfile{ID: "profile-1"}, + }, + // TODO(nickkhyl): add more tests for multiple resolvers and different profiles + // once we change "background profile resolvers" to just "profile resolvers" + // and add proper conflict resolution logic. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Create a new profile manager and add the profiles to it. + // We expose the profile manager to the extensions via the read-only [ipnext.ProfileStore] interface. + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + for i, p := range tt.profiles { + // Generate a unique ID and key for each profile, + // unless the profile already has them set + // or is an empty, unnamed profile. + if p.Name != "" { + if p.ID == "" { + p.ID = ipn.ProfileID("profile-" + strconv.Itoa(i)) + } + if p.Key == "" { + p.Key = "key-" + ipn.StateKey(p.ID) + } + } + pv := p.View() + pm.knownProfiles[p.ID] = pv + if i == 0 { + // Set the first profile as the current one. + // A profileManager starts with an empty profile, + // so it's okay if the list of profiles is empty. + pm.SwitchToProfile(pv) + } + } + + h := newExtensionHostForTest[ipnext.Extension](t, &testBackend{}, true) + + // Register the resolvers with the host. + // This is typically done by the extensions themselves, + // but we do it here for testing purposes. + for _, r := range tt.resolvers { + t.Cleanup(h.Profiles().RegisterBackgroundProfileResolver(r)) + } + + // Call the resolver to get the profile. + gotProfile := h.DetermineBackgroundProfile(pm) + if !gotProfile.Equals(tt.wantProfile.View()) { + t.Errorf("Resolved profile: got %v; want %v", gotProfile, tt.wantProfile) + } + }) + } +} + +// TestAuditLogProviders tests that the [ExtensionHost] correctly handles +// the registration and invocation of audit log providers. It verifies that +// the audit loggers are called with the correct actions and details, +// and that any errors returned by the providers are properly propagated. +func TestAuditLogProviders(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + auditLoggers []ipnauth.AuditLogFunc // each represents an extension + actions []tailcfg.ClientAuditAction + wantErr bool + }{ + { + name: "No-Providers", + auditLoggers: nil, + actions: []tailcfg.ClientAuditAction{"TestAction-1", "TestAction-2"}, + wantErr: false, + }, + { + name: "Single-Provider/Ok", + auditLoggers: []ipnauth.AuditLogFunc{ + func(tailcfg.ClientAuditAction, string) error { return nil }, + }, + actions: []tailcfg.ClientAuditAction{"TestAction-1", "TestAction-2"}, + wantErr: false, + }, + { + name: "Single-Provider/Err", + auditLoggers: []ipnauth.AuditLogFunc{ + func(tailcfg.ClientAuditAction, string) error { + return errors.New("failed to log") + }, + }, + actions: []tailcfg.ClientAuditAction{"TestAction-1", "TestAction-2"}, + wantErr: true, + }, + { + name: "Many-Providers/Ok", + auditLoggers: []ipnauth.AuditLogFunc{ + func(tailcfg.ClientAuditAction, string) error { return nil }, + func(tailcfg.ClientAuditAction, string) error { return nil }, + }, + actions: []tailcfg.ClientAuditAction{"TestAction-1", "TestAction-2"}, + wantErr: false, + }, + { + name: "Many-Providers/Err", + auditLoggers: []ipnauth.AuditLogFunc{ + func(tailcfg.ClientAuditAction, string) error { + return errors.New("failed to log") + }, + func(tailcfg.ClientAuditAction, string) error { + return nil // all good + }, + func(tailcfg.ClientAuditAction, string) error { + return errors.New("also failed to log") + }, + }, + actions: []tailcfg.ClientAuditAction{"TestAction-1", "TestAction-2"}, + wantErr: true, // some providers failed to log, so that's an error + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create extensions that register the audit log providers. + // Each extension/provider will append auditable actions to its state, + // then call the test's auditLogger function. + var exts []*testExtension + for _, auditLogger := range tt.auditLoggers { + ext := &testExtension{} + provider := func() ipnauth.AuditLogFunc { + return func(action tailcfg.ClientAuditAction, details string) error { + UpdateExtState(ext, "actions", func(actions []tailcfg.ClientAuditAction) []tailcfg.ClientAuditAction { + return append(actions, action) + }) + return auditLogger(action, details) + } + } + ext.InitHook = func(e *testExtension) error { + e.Cleanup(e.host.RegisterAuditLogProvider(provider)) + return nil + } + exts = append(exts, ext) + } + + // Initialize the host and the extensions. + h := newExtensionHostForTest(t, &testBackend{}, true, exts...) + + // Use [ExtensionHost.AuditLogger] to log actions. + for _, action := range tt.actions { + err := h.AuditLogger()(action, "Test details") + if gotErr := err != nil; gotErr != tt.wantErr { + t.Errorf("AuditLogger: gotErr %v (%v); wantErr %v", gotErr, err, tt.wantErr) + } + } + + // Check that the actions were logged correctly by each provider. + for _, ext := range exts { + gotActions := GetExtState[[]tailcfg.ClientAuditAction](ext, "actions") + if !slices.Equal(gotActions, tt.actions) { + t.Errorf("Actions: got %v; want %v", gotActions, tt.actions) + } + } + }) + } +} + +// TestNilExtensionHostMethodCall tests that calling exported methods +// on a nil [ExtensionHost] does not panic. We should treat it as a valid +// value since it's used in various tests that instantiate [LocalBackend] +// manually without calling [NewLocalBackend]. It also verifies that if +// a method returns a single func value (e.g., a cleanup function), +// it should not be nil. This is a basic sanity check to ensure that +// typical method calls on a nil receiver work as expected. +// It does not replace the need for more thorough testing of specific methods. +func TestNilExtensionHostMethodCall(t *testing.T) { + t.Parallel() + + var h *ExtensionHost + typ := reflect.TypeOf(h) + for i := range typ.NumMethod() { + m := typ.Method(i) + if strings.HasSuffix(m.Name, "ForTest") { + // Skip methods that are only for testing. + continue + } + + t.Run(m.Name, func(t *testing.T) { + t.Parallel() + // Calling the method on the nil receiver should not panic. + ret := checkMethodCallWithZeroArgs(t, m, h) + if len(ret) == 1 && ret[0].Kind() == reflect.Func { + // If the method returns a single func, such as a cleanup function, + // it should not be nil. + fn := ret[0] + if fn.IsNil() { + t.Fatalf("(%T).%s returned a nil func", h, m.Name) + } + // We expect it to be a no-op and calling it should not panic. + args := makeZeroArgsFor(fn) + func() { + defer func() { + if e := recover(); e != nil { + t.Fatalf("panic calling the func returned by (%T).%s: %v", e, m.Name, e) + } + }() + fn.Call(args) + }() + } + }) + } +} + +// checkMethodCallWithZeroArgs calls the method m on the receiver r +// with zero values for all its arguments, except the receiver itself. +// It returns the result of the method call, or fails the test if the call panics. +func checkMethodCallWithZeroArgs[T any](t *testing.T, m reflect.Method, r T) []reflect.Value { + t.Helper() + args := makeZeroArgsFor(m.Func) + // The first arg is the receiver. + args[0] = reflect.ValueOf(r) + // Calling the method should not panic. + defer func() { + if e := recover(); e != nil { + t.Fatalf("panic calling (%T).%s: %v", r, m.Name, e) + } + }() + return m.Func.Call(args) +} + +func makeZeroArgsFor(fn reflect.Value) []reflect.Value { + args := make([]reflect.Value, fn.Type().NumIn()) + for i := range args { + args[i] = reflect.Zero(fn.Type().In(i)) + } + return args +} + +// newExtensionHostForTest creates an [ExtensionHost] with the given backend and extensions. +// It associates each extension that either is or embeds a [testExtension] with the test +// and assigns a name if one isn’t already set. +// +// If the host cannot be created, it fails the test. +// +// The host is initialized if the initialize parameter is true. +// It is shut down automatically when the test ends. +func newExtensionHostForTest[T ipnext.Extension](t *testing.T, b Backend, initialize bool, exts ...T) *ExtensionHost { + t.Helper() + + // testExtensionIface is a subset of the methods implemented by [testExtension] that are used here. + // We use testExtensionIface in type assertions instead of using the [testExtension] type directly, + // which supports scenarios where an extension type embeds a [testExtension]. + type testExtensionIface interface { + Name() string + setName(string) + setT(*testing.T) + checkShutdown() + } + + logf := tstest.WhileTestRunningLogger(t) + defs := make([]*ipnext.Definition, len(exts)) + for i, ext := range exts { + if ext, ok := any(ext).(testExtensionIface); ok { + ext.setName(cmp.Or(ext.Name(), "Ext-"+strconv.Itoa(i))) + ext.setT(t) + } + defs[i] = ipnext.DefinitionForTest(ext) + } + h, err := NewExtensionHost(logf, &tsd.System{}, b, defs...) + if err != nil { + t.Fatalf("NewExtensionHost: %v", err) + } + // Replace doEnqueueBackendOperation with the one that's marked as a helper, + // so that we'll have better output if [testExecQueue.Add] fails a test. + h.doEnqueueBackendOperation = func(f func(Backend)) { + t.Helper() + h.workQueue.Add(func() { f(b) }) + } + for _, ext := range exts { + if ext, ok := any(ext).(testExtensionIface); ok { + t.Cleanup(ext.checkShutdown) + } + } + t.Cleanup(h.Shutdown) + if initialize { + h.Init() + } + return h +} + +// testExtension is an [ipnext.Extension] that: +// - Calls the provided init and shutdown callbacks +// when [Init] and [Shutdown] are called. +// - Ensures that [Init] and [Shutdown] are called at most once, +// that [Shutdown] is called after [Init], but is not called if [Init] fails +// and is called before the test ends if [Init] succeeds. +// +// Typically, [testExtension]s are created and passed to [newExtensionHostForTest] +// when creating an [ExtensionHost] for testing. +type testExtension struct { + t *testing.T // test that created the extension + name string // name of the extension, used for logging + + host ipnext.Host // or nil if not initialized + + // InitHook and ShutdownHook are optional hooks that can be set by tests. + InitHook, ShutdownHook func(*testExtension) error + + // initCnt, initOkCnt and shutdownCnt are used to verify that Init and Shutdown + // are called at most once and in the correct order. + initCnt, initOkCnt, shutdownCnt atomic.Int32 + + // mu protects the following fields. + mu sync.Mutex + // state is the optional state used by tests. + // It can be accessed by tests using [setTestExtensionState], + // [getTestExtensionStateOk] and [getTestExtensionState]. + state map[string]any + // cleanup are functions to be called on shutdown. + cleanup []func() +} + +var _ ipnext.Extension = (*testExtension)(nil) + +func (e *testExtension) setT(t *testing.T) { + e.t = t +} + +func (e *testExtension) setName(name string) { + e.name = name +} + +// Name implements [ipnext.Extension]. +func (e *testExtension) Name() string { + return e.name +} + +// Init implements [ipnext.Extension]. +func (e *testExtension) Init(host ipnext.Host) (err error) { + e.t.Helper() + e.host = host + if e.initCnt.Add(1) == 1 { + e.mu.Lock() + e.state = make(map[string]any) + e.mu.Unlock() + } else { + e.t.Errorf("%q: Init called more than once", e.name) + } + if e.InitHook != nil { + err = e.InitHook(e) + } + if err == nil { + e.initOkCnt.Add(1) + } + return err // may be nil or non-nil +} + +// InitCalled reports whether the Init method was called on the receiver. +func (e *testExtension) InitCalled() bool { + return e.initCnt.Load() != 0 +} + +func (e *testExtension) Cleanup(f func()) { + e.mu.Lock() + e.cleanup = append(e.cleanup, f) + e.mu.Unlock() +} + +// Shutdown implements [ipnext.Extension]. +func (e *testExtension) Shutdown() (err error) { + e.t.Helper() + e.mu.Lock() + cleanup := e.cleanup + e.cleanup = nil + e.mu.Unlock() + for _, f := range cleanup { + f() + } + if e.ShutdownHook != nil { + err = e.ShutdownHook(e) + } + if e.shutdownCnt.Add(1) != 1 { + e.t.Errorf("%q: Shutdown called more than once", e.name) + } + if e.initCnt.Load() == 0 { + e.t.Errorf("%q: Shutdown called without Init", e.name) + } else if e.initOkCnt.Load() == 0 { + e.t.Errorf("%q: Shutdown called despite failed Init", e.name) + } + e.host = nil + return err // may be nil or non-nil +} + +func (e *testExtension) checkShutdown() { + e.t.Helper() + if e.initOkCnt.Load() != 0 && e.shutdownCnt.Load() == 0 { + e.t.Errorf("%q: Shutdown has not been called before test end", e.name) + } +} + +// ShutdownCalled reports whether the Shutdown method was called on the receiver. +func (e *testExtension) ShutdownCalled() bool { + return e.shutdownCnt.Load() != 0 +} + +// SetExtState sets a keyed state on [testExtension] to the given value. +// Tests use it to propagate test-specific state throughout the extension lifecycle +// (e.g., between [testExtension.Init], [testExtension.Shutdown], and registered callbacks) +func SetExtState[T any](e *testExtension, key string, value T) { + e.mu.Lock() + defer e.mu.Unlock() + e.state[key] = value +} + +// UpdateExtState updates a keyed state of the extension using the provided update function. +func UpdateExtState[T any](e *testExtension, key string, update func(T) T) { + e.mu.Lock() + defer e.mu.Unlock() + old, _ := e.state[key].(T) + new := update(old) + e.state[key] = new +} + +// GetExtState returns the value of the keyed state of the extension. +// It returns a zero value of T if the state is not set or is of a different type. +func GetExtState[T any](e *testExtension, key string) T { + v, _ := GetExtStateOk[T](e, key) + return v +} + +// GetExtStateOk is like [getExtState], but also reports whether the state +// with the given key exists and is of the expected type. +func GetExtStateOk[T any](e *testExtension, key string) (_ T, ok bool) { + e.mu.Lock() + defer e.mu.Unlock() + v, ok := e.state[key].(T) + return v, ok +} + +// testExecQueue is a test implementation of [execQueue] +// that defers execution of the enqueued funcs until +// [testExecQueue.Drain] is called, and fails the test if +// if [execQueue.Add] is called before the host is initialized. +// +// It is typically used by calling [ExtensionHost.SetWorkQueueForTest]. +type testExecQueue struct { + t *testing.T // test that created the queue + h *ExtensionHost // host to own the queue + + mu sync.Mutex + queue []func() +} + +var _ execQueue = (*testExecQueue)(nil) + +// SetWorkQueueForTest is a helper function that creates a new [testExecQueue] +// and sets it as the work queue for the specified [ExtensionHost], +// returning the new queue. +// +// It fails the test if the host is already initialized. +func (h *ExtensionHost) SetWorkQueueForTest(t *testing.T) *testExecQueue { + t.Helper() + if h.initialized.Load() { + t.Fatalf("UseTestWorkQueue: host is already initialized") + return nil + } + q := &testExecQueue{t: t, h: h} + h.workQueue = q + return q +} + +// Add implements [execQueue]. +func (q *testExecQueue) Add(f func()) { + q.t.Helper() + + if !q.h.initialized.Load() { + q.t.Fatal("ExecQueue.Add must not be called until the host is initialized") + return + } + + q.mu.Lock() + q.queue = append(q.queue, f) + q.mu.Unlock() +} + +// Drain executes all queued functions in the order they were added. +func (q *testExecQueue) Drain() { + q.mu.Lock() + queue := q.queue + q.queue = nil + q.mu.Unlock() + + for _, f := range queue { + f() + } +} + +// Shutdown implements [execQueue]. +func (q *testExecQueue) Shutdown() {} + +// Wait implements [execQueue]. +func (q *testExecQueue) Wait(context.Context) error { return nil } + +// testBackend implements [ipnext.Backend] for testing purposes +// by calling the provided hooks when its methods are called. +type testBackend struct { + switchToBestProfileHook func(reason string) + + // mu protects the backend state. + // It is acquired on entry to the exported methods of the backend + // and released on exit, mimicking the behavior of the [LocalBackend]. + mu sync.Mutex +} + +func (b *testBackend) SwitchToBestProfile(reason string) { + b.mu.Lock() + defer b.mu.Unlock() + if b.switchToBestProfileHook != nil { + b.switchToBestProfileHook(reason) + } +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a99d67cda..0f3ea1fbb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -169,78 +169,6 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } -// Extension extends [LocalBackend] with additional functionality. -type Extension interface { - // Init is called to initialize the extension when the [LocalBackend] is created - // and before it starts running. If the extension cannot be initialized, - // it must return an error, and the Shutdown method will not be called. - // Any returned errors are not fatal; they are used for logging. - // TODO(nickkhyl): should we allow returning a fatal error? - Init(*LocalBackend) error - - // Shutdown is called when the [LocalBackend] is shutting down, - // if the extension was initialized. Any returned errors are not fatal; - // they are used for logging. - Shutdown() error -} - -// NewExtensionFn is a function that instantiates an [Extension]. -type NewExtensionFn func(logger.Logf, *tsd.System) (Extension, error) - -// registeredExtensions is a map of registered local backend extensions, -// where the key is the name of the extension and the value is the function -// that instantiates the extension. -var registeredExtensions map[string]NewExtensionFn - -// RegisterExtension registers a function that creates a [localBackendExtension]. -// It panics if newExt is nil or if an extension with the same name has already been registered. -func RegisterExtension(name string, newExt NewExtensionFn) { - if newExt == nil { - panic(fmt.Sprintf("lb: newExt is nil: %q", name)) - } - if _, ok := registeredExtensions[name]; ok { - panic(fmt.Sprintf("lb: duplicate extensions: %q", name)) - } - mak.Set(®isteredExtensions, name, newExt) -} - -// profileResolver is any function that returns a read-only view of a login profile. -// An invalid view indicates no profile. A valid profile view with an empty [ipn.ProfileID] -// indicates that the profile is new and has not been persisted yet. -// -// It is called with [LocalBackend.mu] held. -type profileResolver func() ipn.LoginProfileView - -// NewControlClientCallback is a function to be called when a new [controlclient.Client] -// is created and before it is first used. The login profile and prefs represent -// the profile for which the cc is created and are always valid; however, the -// profile's [ipn.LoginProfileView.ID] returns a zero [ipn.ProfileID] if the profile -// is new and has not been persisted yet. -// -// The callback is called with [LocalBackend.mu] held and must not call -// any [LocalBackend] methods. -// -// It returns a function to be called when the cc is being shut down, -// or nil if no cleanup is needed. -type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView, ipn.PrefsView) (cleanup func()) - -// ProfileChangeCallback is a function to be called when the current login profile changes. -// The sameNode parameter indicates whether the profile represents the same node as before, -// such as when only the profile metadata is updated but the node ID remains the same, -// or when a new profile is persisted and assigned an [ipn.ProfileID] for the first time. -// The subscribers can use this information to decide whether to reset their state. -// -// The profile and prefs are always valid, but the profile's [ipn.LoginProfileView.ID] -// returns a zero [ipn.ProfileID] if the profile is new and has not been persisted yet. -// -// The callback is called with [LocalBackend.mu] held and must not call -// any [LocalBackend] methods. -type ProfileChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) - -// AuditLogProvider is a function that returns an [ipnauth.AuditLogFunc] for -// logging auditable actions. -type AuditLogProvider func() ipnauth.AuditLogFunc - // LocalBackend is the glue between the major pieces of the Tailscale // network software: the cloud control plane (via controlclient), the // network data plane (via wgengine), and the user-facing UIs and CLIs @@ -311,6 +239,13 @@ type LocalBackend struct { // for testing and graceful shutdown purposes. goTracker goroutines.Tracker + // extHost is the bridge between [LocalBackend] and the registered [ipnext.Extension]s. + // It may be nil in tests that use direct composite literal initialization of [LocalBackend] + // instead of calling [NewLocalBackend]. A nil pointer is a valid, no-op host. + // It can be used with or without b.mu held, but is typically used with it held + // to prevent state changes while invoking callbacks. + extHost *ExtensionHost + // The mutex protects the following elements. mu sync.Mutex conf *conffile.Config // latest parsed config, or nil if not in declarative mode @@ -378,9 +313,6 @@ type LocalBackend struct { c2nUpdateStatus updateStatus currentUser ipnauth.Actor - // backgroundProfileResolvers are optional background profile resolvers. - backgroundProfileResolvers set.HandleSet[profileResolver] - selfUpdateProgress []ipnstate.UpdateProgress lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients @@ -481,25 +413,6 @@ type LocalBackend struct { // reconnectTimer is used to schedule a reconnect by setting [ipn.Prefs.WantRunning] // to true after a delay, or nil if no reconnect is scheduled. reconnectTimer tstime.TimerController - - // shutdownCbs are the callbacks to be called when the backend is shutting down. - // Each callback is called exactly once in unspecified order and without b.mu held. - // Returned errors are logged but otherwise ignored and do not affect the shutdown process. - shutdownCbs set.HandleSet[func() error] - - // newControlClientCbs are the functions to be called when a new control client is created. - newControlClientCbs set.HandleSet[NewControlClientCallback] - - // profileChangeCbs are the callbacks to be called when the current login profile changes, - // either because of a profile switch, or because the profile information was updated - // by [LocalBackend.SetControlClientStatus], including when the profile is first populated - // and persisted. - profileChangeCbs set.HandleSet[ProfileChangeCallback] - - // auditLoggers is a collection of registered audit log providers. - // Each [AuditLogProvider] is called to get an [ipnauth.AuditLogFunc] when an auditable action - // is about to be performed. If an audit logger returns an error, the action is denied. - auditLoggers set.HandleSet[AuditLogProvider] } // HealthTracker returns the health tracker for the backend. @@ -614,6 +527,10 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } + if b.extHost, err = NewExtensionHost(logf, sys, b); err != nil { + return nil, fmt.Errorf("failed to create extension host: %w", err) + } + if b.unregisterSysPolicyWatch, err = b.registerSysPolicyWatch(); err != nil { return nil, err } @@ -668,19 +585,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } - for name, newFn := range registeredExtensions { - ext, err := newFn(logf, sys) - if err != nil { - b.logf("lb: failed to create %q extension: %v", name, err) - continue - } - if err := ext.Init(b); err != nil { - b.logf("lb: failed to initialize %q extension: %v", name, err) - continue - } - b.shutdownCbs.Add(ext.Shutdown) - } - + b.extHost.Init() return b, nil } @@ -1143,17 +1048,11 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } - shutdownCbs := slices.Collect(maps.Values(b.shutdownCbs)) - b.shutdownCbs = nil + extHost := b.extHost + b.extHost = nil b.mu.Unlock() b.webClientShutdown() - for _, cb := range shutdownCbs { - if err := cb(); err != nil { - b.logf("shutdown callback failed: %v", err) - } - } - if b.sockstatLogger != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -1170,6 +1069,7 @@ func (b *LocalBackend) Shutdown() { if cc != nil { cc.Shutdown() } + extHost.Shutdown() b.ctxCancel() b.e.Close() <-b.e.Done() @@ -1743,7 +1643,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // If the profile ID was empty before SetPrefs, it's a new profile // and the user has just completed a login for the first time. sameNode := profile.ID() == "" || profile.ID() == cp.ID() - b.notifyProfileChangeLocked(profile, prefs.View(), sameNode) + b.extHost.NotifyProfileChange(profile, prefs.View(), sameNode) } } @@ -2492,11 +2392,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if err != nil { return err } - for _, cb := range b.newControlClientCbs { - if cleanup := cb(cc, b.pm.CurrentProfile(), prefs); cleanup != nil { - ccShutdownCbs = append(ccShutdownCbs, cleanup) - } - } + ccShutdownCbs = b.extHost.NotifyNewControlClient(cc, b.pm.CurrentProfile(), prefs) b.setControlClientLocked(cc) endpoints := b.endpoints @@ -4060,6 +3956,10 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un // // b.mu must be held. func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBackground bool) { + // TODO(nickkhyl): delegate all of this to the extensions and remove the distinction + // between "foreground" and "background" profiles as we migrate away from the concept + // of a single "current user" on Windows. See tailscale/corp#18342. + // // If a GUI/CLI client is connected, use the connected user's profile, which means // either the current profile if owned by the user, or their default profile. if b.currentUser != nil { @@ -4079,7 +3979,12 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // If the returned background profileID is "", Tailscale will disconnect // and remain idle until a GUI or CLI client connects. if goos := envknob.GOOS(); goos == "windows" { - profile := b.getBackgroundProfileLocked() + // If Unattended Mode is enabled for the current profile, keep using it. + if b.pm.CurrentPrefs().ForceDaemon() { + return b.pm.CurrentProfile(), true + } + // Otherwise, use the profile returned by the extension. + profile := b.extHost.DetermineBackgroundProfile(b.pm) return profile, true } @@ -4092,47 +3997,6 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac return b.pm.CurrentProfile(), false } -// RegisterBackgroundProfileResolver registers a function to be used when -// resolving the background profile, until the returned unregister function is called. -func (b *LocalBackend) RegisterBackgroundProfileResolver(resolver profileResolver) (unregister func()) { - // TODO(nickkhyl): should we allow specifying some kind of priority/altitude for the resolver? - b.mu.Lock() - defer b.mu.Unlock() - handle := b.backgroundProfileResolvers.Add(resolver) - return func() { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.backgroundProfileResolvers, handle) - } -} - -// getBackgroundProfileLocked returns a read-only view of the profile to use -// when no GUI/CLI client is connected. If Tailscale should not run in the background -// and should disconnect until a GUI/CLI client connects, the returned view is not valid. -// As of 2025-02-07, it is only used on Windows. -func (b *LocalBackend) getBackgroundProfileLocked() ipn.LoginProfileView { - // TODO(nickkhyl): check if the returned profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. - // See tailscale/corp#26249. - - // If Unattended Mode is enabled for the current profile, keep using it. - if b.pm.CurrentPrefs().ForceDaemon() { - return b.pm.CurrentProfile() - } - - // Otherwise, attempt to resolve the background profile using the background - // profile resolvers available on the current platform. - for _, resolver := range b.backgroundProfileResolvers { - if profile := resolver(); profile.Valid() { - return profile - } - } - - // Otherwise, switch to an empty profile and disconnect Tailscale - // until a GUI or CLI client connects. - return ipn.LoginProfileView{} -} - // CurrentUserForTest returns the current user and the associated WindowsUserID. // It is used for testing only, and will be removed along with the rest of the // "current user" functionality as we progress on the multi-user improvements (tailscale/corp#18342). @@ -4351,47 +4215,6 @@ func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { return err } -// RegisterAuditLogProvider registers an audit log provider, which returns a function -// to be called when an auditable action is about to be performed. -// The returned function unregisters the provider. -// It panics if the provider is nil. -func (b *LocalBackend) RegisterAuditLogProvider(provider AuditLogProvider) (unregister func()) { - if provider == nil { - panic("nil audit log provider") - } - b.mu.Lock() - defer b.mu.Unlock() - handle := b.auditLoggers.Add(provider) - return func() { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.auditLoggers, handle) - } -} - -// getAuditLoggerLocked returns a function that calls all currently registered -// audit loggers, failing as soon as any of them returns an error. -// -// b.mu must be held. -func (b *LocalBackend) getAuditLoggerLocked() ipnauth.AuditLogFunc { - var loggers []ipnauth.AuditLogFunc - if len(b.auditLoggers) != 0 { - loggers = make([]ipnauth.AuditLogFunc, 0, len(b.auditLoggers)) - for _, getLogger := range b.auditLoggers { - loggers = append(loggers, getLogger()) - } - } - return func(action tailcfg.ClientAuditAction, details string) error { - b.logf("auditlog: %v: %v", action, details) - for _, logger := range loggers { - if err := logger(action, details); err != nil { - return err - } - } - return nil - } -} - // EditPrefs applies the changes in mp to the current prefs, // acting as the tailscaled itself rather than a specific user. func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { @@ -4417,7 +4240,7 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip unlock := b.lockAndGetUnlock() defer unlock() if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { - if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.getAuditLoggerLocked()); err != nil { + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.extHost.AuditLogger()); err != nil { b.logf("check profile access failed: %v", err) return ipn.PrefsView{}, err } @@ -6031,23 +5854,6 @@ func (b *LocalBackend) requestEngineStatusAndWait() { b.logf("requestEngineStatusAndWait: got status update.") } -// RegisterControlClientCallback registers a function to be called every time a new -// control client is created, until the returned unregister function is called. -// It panics if the cb is nil. -func (b *LocalBackend) RegisterControlClientCallback(cb NewControlClientCallback) (unregister func()) { - if cb == nil { - panic("nil control client callback") - } - b.mu.Lock() - defer b.mu.Unlock() - handle := b.newControlClientCbs.Add(cb) - return func() { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.newControlClientCbs, handle) - } -} - // setControlClientLocked sets the control client to cc, // which may be nil. // @@ -7633,37 +7439,6 @@ func (b *LocalBackend) resetDialPlan() { } } -// RegisterProfileChangeCallback registers a function to be called when the current [ipn.LoginProfile] changes. -// If includeCurrent is true, the callback is called immediately with the current profile. -// The returned function unregisters the callback. -// It panics if the cb is nil. -func (b *LocalBackend) RegisterProfileChangeCallback(cb ProfileChangeCallback, includeCurrent bool) (unregister func()) { - if cb == nil { - panic("nil profile change callback") - } - b.mu.Lock() - defer b.mu.Unlock() - handle := b.profileChangeCbs.Add(cb) - if includeCurrent { - cb(b.pm.CurrentProfile(), stripKeysFromPrefs(b.pm.CurrentPrefs()), false) - } - return func() { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.profileChangeCbs, handle) - } -} - -// notifyProfileChangeLocked invokes all registered profile change callbacks. -// -// b.mu must be held. -func (b *LocalBackend) notifyProfileChangeLocked(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { - prefs = stripKeysFromPrefs(prefs) - for _, cb := range b.profileChangeCbs { - cb(profile, prefs, sameNode) - } -} - // getHardwareAddrs returns the hardware addresses for the machine. If the list // of hardware addresses is empty, it will return the previously known hardware // addresses. Both the current, and previously known hardware addresses might be @@ -7711,7 +7486,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.lastSuggestedExitNode = "" b.keyExpired = false b.resetAlwaysOnOverrideLocked() - b.notifyProfileChangeLocked(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) + b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu b.health.SetLocalLogConfigHealth(nil) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 901a4a899..057fe2aae 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -17,6 +17,7 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" @@ -24,6 +25,9 @@ import ( var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") +// [profileManager] implements [ipnext.ProfileStore]. +var _ ipnext.ProfileStore = (*profileManager)(nil) + // profileManager is a wrapper around an [ipn.StateStore] that manages // multiple profiles and the current profile. // diff --git a/tsd/tsd.go b/tsd/tsd.go index 1d1f35017..acd09560c 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -26,7 +26,6 @@ import ( "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/conffile" - "tailscale.com/ipn/desktop" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -53,7 +52,6 @@ type System struct { Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] - SessionManager SubSystem[desktop.SessionManager] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -112,8 +110,6 @@ func (s *System) Set(v any) { s.DriveForLocal.Set(v) case drive.FileSystemForRemote: s.DriveForRemote.Set(v) - case desktop.SessionManager: - s.SessionManager.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } From 1e290867bd76ce13d4a86f232a91b9659d7e604a Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 9 Apr 2025 09:25:19 -0700 Subject: [PATCH 0739/1708] cmd/natc: only store v4 addresses Because we derive v6 addresses from v4 addresses we only need to store the v4 address, not both. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/ippool.go | 38 ++++++-------- cmd/natc/ippool/ippool_test.go | 55 +++++++------------- cmd/natc/natc.go | 23 ++++++++- cmd/natc/natc_test.go | 93 +++++++++++++++++++++++----------- 4 files changed, 120 insertions(+), 89 deletions(-) diff --git a/cmd/natc/ippool/ippool.go b/cmd/natc/ippool/ippool.go index 6f6ad1d83..dbb56d5a4 100644 --- a/cmd/natc/ippool/ippool.go +++ b/cmd/natc/ippool/ippool.go @@ -41,7 +41,7 @@ func (ipp *IPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr) (string, bo return domain, ok } -func (ipp *IPPool) IPForDomain(from tailcfg.NodeID, domain string) ([]netip.Addr, error) { +func (ipp *IPPool) IPForDomain(from tailcfg.NodeID, domain string) (netip.Addr, error) { npps := &perPeerState{ ipset: ipp.IPSet, v6ULA: ipp.V6ULA, @@ -57,7 +57,7 @@ type perPeerState struct { mu sync.Mutex addrInUse *big.Int - domainToAddr map[string][]netip.Addr + domainToAddr map[string]netip.Addr addrToDomain *bart.Table[string] } @@ -75,23 +75,23 @@ func (ps *perPeerState) domainForIP(ip netip.Addr) (_ string, ok bool) { // ipForDomain assigns a pair of unique IP addresses for the given domain and // returns them. The first address is an IPv4 address and the second is an IPv6 // address. If the domain already has assigned addresses, it returns them. -func (ps *perPeerState) ipForDomain(domain string) ([]netip.Addr, error) { +func (ps *perPeerState) ipForDomain(domain string) (netip.Addr, error) { fqdn, err := dnsname.ToFQDN(domain) if err != nil { - return nil, err + return netip.Addr{}, err } domain = fqdn.WithoutTrailingDot() ps.mu.Lock() defer ps.mu.Unlock() - if addrs, ok := ps.domainToAddr[domain]; ok { - return addrs, nil + if addr, ok := ps.domainToAddr[domain]; ok { + return addr, nil } - addrs := ps.assignAddrsLocked(domain) - if addrs == nil { - return nil, ErrNoIPsAvailable + addr := ps.assignAddrsLocked(domain) + if !addr.IsValid() { + return netip.Addr{}, ErrNoIPsAvailable } - return addrs, nil + return addr, nil } // unusedIPv4Locked returns an unused IPv4 address from the available ranges. @@ -106,22 +106,16 @@ func (ps *perPeerState) unusedIPv4Locked() netip.Addr { // and returns them. The first address is an IPv4 address and the second is an // IPv6 address. It does not check if the domain already has assigned addresses. // ps.mu must be held. -func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { +func (ps *perPeerState) assignAddrsLocked(domain string) netip.Addr { if ps.addrToDomain == nil { ps.addrToDomain = &bart.Table[string]{} } v4 := ps.unusedIPv4Locked() if !v4.IsValid() { - return nil + return netip.Addr{} } - as16 := ps.v6ULA.Addr().As16() - as4 := v4.As4() - copy(as16[12:], as4[:]) - v6 := netip.AddrFrom16(as16) - addrs := []netip.Addr{v4, v6} - mak.Set(&ps.domainToAddr, domain, addrs) - for _, a := range addrs { - ps.addrToDomain.Insert(netip.PrefixFrom(a, a.BitLen()), domain) - } - return addrs + addr := v4 + mak.Set(&ps.domainToAddr, domain, addr) + ps.addrToDomain.Insert(netip.PrefixFrom(addr, addr.BitLen()), domain) + return addr } diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go index 84b3b7a02..19bfc856f 100644 --- a/cmd/natc/ippool/ippool_test.go +++ b/cmd/natc/ippool/ippool_test.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "net/netip" - "slices" "testing" "go4.org/netipx" @@ -33,20 +32,18 @@ func TestIPPoolExhaustion(t *testing.T) { for i := 0; i < 5; i++ { for _, domain := range domains { - addrs, err := pool.IPForDomain(from, domain) + addr, err := pool.IPForDomain(from, domain) if err != nil { errs = append(errs, fmt.Errorf("failed to get IP for domain %q: %w", domain, err)) continue } - for _, addr := range addrs { - if d, ok := assignedIPs[addr]; ok { - if d != domain { - t.Errorf("IP %s reused for domain %q, previously assigned to %q", addr, domain, d) - } - } else { - assignedIPs[addr] = domain + if d, ok := assignedIPs[addr]; ok { + if d != domain { + t.Errorf("IP %s reused for domain %q, previously assigned to %q", addr, domain, d) } + } else { + assignedIPs[addr] = domain } } } @@ -80,50 +77,36 @@ func TestIPPool(t *testing.T) { IPSet: addrPool, } from := tailcfg.NodeID(12345) - addrs, err := pool.IPForDomain(from, "example.com") + addr, err := pool.IPForDomain(from, "example.com") if err != nil { t.Fatalf("ipForDomain() error = %v", err) } - if len(addrs) != 2 { - t.Fatalf("ipForDomain() returned %d addresses, want 2", len(addrs)) + if !addr.IsValid() { + t.Fatal("ipForDomain() returned an invalid address") } - v4 := addrs[0] - v6 := addrs[1] - - if !v4.Is4() { - t.Errorf("First address is not IPv4: %s", v4) - } - - if !v6.Is6() { - t.Errorf("Second address is not IPv6: %s", v6) + if !addr.Is4() { + t.Errorf("Address is not IPv4: %s", addr) } - if !addrPool.Contains(v4) { - t.Errorf("IPv4 address %s not in range %s", v4, addrPool) - } - - domain, ok := pool.DomainForIP(from, v4) - if !ok { - t.Errorf("domainForIP(%s) not found", v4) - } else if domain != "example.com" { - t.Errorf("domainForIP(%s) = %s, want %s", v4, domain, "example.com") + if !addrPool.Contains(addr) { + t.Errorf("IPv4 address %s not in range %s", addr, addrPool) } - domain, ok = pool.DomainForIP(from, v6) + domain, ok := pool.DomainForIP(from, addr) if !ok { - t.Errorf("domainForIP(%s) not found", v6) + t.Errorf("domainForIP(%s) not found", addr) } else if domain != "example.com" { - t.Errorf("domainForIP(%s) = %s, want %s", v6, domain, "example.com") + t.Errorf("domainForIP(%s) = %s, want %s", addr, domain, "example.com") } - addrs2, err := pool.IPForDomain(from, "example.com") + addr2, err := pool.IPForDomain(from, "example.com") if err != nil { t.Fatalf("ipForDomain() second call error = %v", err) } - if !slices.Equal(addrs, addrs2) { - t.Errorf("ipForDomain() second call = %v, want %v", addrs2, addrs) + if addr.Compare(addr2) != 0 { + t.Errorf("ipForDomain() second call = %v, want %v", addr2, addr) } } diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 585a0bb45..024f906c5 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -317,11 +317,12 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP // ignored and non-ignored addresses, but it's currently the user // preferred behavior. if !c.ignoreDestination(addrs) { - addrs, err = c.ipPool.IPForDomain(who.Node.ID, q.Name.String()) + addr, err := c.ipPool.IPForDomain(who.Node.ID, q.Name.String()) if err != nil { log.Printf("HandleDNS(remote=%s): lookup destination failed: %v\n", remoteAddr.String(), err) return } + addrs = []netip.Addr{addr, v6ForV4(c.v6ULA.Addr(), addr)} } mak.Set(&resolves, q.Name.String(), addrs) } @@ -414,6 +415,20 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP } } +func v6ForV4(ula netip.Addr, v4 netip.Addr) netip.Addr { + as16 := ula.As16() + as4 := v4.As4() + copy(as16[12:], as4[:]) + return netip.AddrFrom16(as16) +} + +func v4ForV6(v6 netip.Addr) netip.Addr { + as16 := v6.As16() + var as4 [4]byte + copy(as4[:], as16[12:]) + return netip.AddrFrom4(as4) +} + // tsMBox is the mailbox used in SOA records. // The convention is to replace the @ symbol with a dot. // So in this case, the mailbox is support.tailscale.com. with the trailing dot @@ -434,7 +449,11 @@ func (c *connector) handleTCPFlow(src, dst netip.AddrPort) (handler func(net.Con log.Printf("HandleTCPFlow: WhoIs failed: %v\n", err) return nil, false } - domain, ok := c.ipPool.DomainForIP(who.Node.ID, dst.Addr()) + dstAddr := dst.Addr() + if dstAddr.Is6() { + dstAddr = v4ForV6(dstAddr) + } + domain, ok := c.ipPool.DomainForIP(who.Node.ID, dstAddr) if !ok { return nil, false } diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index 8fe38de1c..fa005e457 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -340,50 +340,59 @@ func TestDNSResponse(t *testing.T) { t.Errorf("answer[%d] not an A record", i) continue } - resource := ans.Body.(*dnsmessage.AResource) - gotIP := netip.AddrFrom4([4]byte(resource.A)) - - var ips []netip.Addr - if tc.wantIgnored { - ips = must.Get(c.resolver.LookupNetIP(t.Context(), "ip4", want.name)) - } else { - ips = must.Get(c.ipPool.IPForDomain(tailcfg.NodeID(123), want.name)) - } - var wantIP netip.Addr - for _, ip := range ips { - if ip.Is4() { - wantIP = ip - break - } - } - if gotIP != wantIP { - t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, wantIP) - } case dnsmessage.TypeAAAA: if ans.Body.(*dnsmessage.AAAAResource) == nil { t.Errorf("answer[%d] not an AAAA record", i) continue } + } + + var gotIP netip.Addr + switch want.qType { + case dnsmessage.TypeA: + resource := ans.Body.(*dnsmessage.AResource) + gotIP = netip.AddrFrom4([4]byte(resource.A)) + case dnsmessage.TypeAAAA: resource := ans.Body.(*dnsmessage.AAAAResource) - gotIP := netip.AddrFrom16([16]byte(resource.AAAA)) + gotIP = netip.AddrFrom16([16]byte(resource.AAAA)) + } - var ips []netip.Addr - if tc.wantIgnored { - ips = must.Get(c.resolver.LookupNetIP(t.Context(), "ip6", want.name)) - } else { - ips = must.Get(c.ipPool.IPForDomain(tailcfg.NodeID(123), want.name)) + var wantIP netip.Addr + if tc.wantIgnored { + var net string + var fxSelectIP func(netip.Addr) bool + switch want.qType { + case dnsmessage.TypeA: + net = "ip4" + fxSelectIP = func(a netip.Addr) bool { + return a.Is4() + } + case dnsmessage.TypeAAAA: + //TODO(fran) is this branch exercised? + net = "ip6" + fxSelectIP = func(a netip.Addr) bool { + return a.Is6() + } } - var wantIP netip.Addr + ips := must.Get(c.resolver.LookupNetIP(t.Context(), net, want.name)) for _, ip := range ips { - if ip.Is6() { + if fxSelectIP(ip) { wantIP = ip break } } - if gotIP != wantIP { - t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, wantIP) + } else { + addr := must.Get(c.ipPool.IPForDomain(tailcfg.NodeID(123), want.name)) + switch want.qType { + case dnsmessage.TypeA: + wantIP = addr + case dnsmessage.TypeAAAA: + wantIP = v6ForV4(v6ULA.Addr(), addr) } } + if gotIP != wantIP { + t.Errorf("answer[%d] IP = %s, want %s", i, gotIP, wantIP) + } } } } @@ -445,3 +454,29 @@ func TestIgnoreDestination(t *testing.T) { }) } } + +func TestV6V4(t *testing.T) { + v6ULA := ula(1) + + tests := [][]string{ + {"100.64.0.0", "fd7a:115c:a1e0:a99c:1:0:6440:0"}, + {"0.0.0.0", "fd7a:115c:a1e0:a99c:1::"}, + {"255.255.255.255", "fd7a:115c:a1e0:a99c:1:0:ffff:ffff"}, + } + + for i, test := range tests { + // to v6 + v6 := v6ForV4(v6ULA.Addr(), netip.MustParseAddr(test[0])) + want := netip.MustParseAddr(test[1]) + if v6 != want { + t.Fatalf("test %d: want: %v, got: %v", i, want, v6) + } + + // to v4 + v4 := v4ForV6(netip.MustParseAddr(test[1])) + want = netip.MustParseAddr(test[0]) + if v4 != want { + t.Fatalf("test %d: want: %v, got: %v", i, want, v4) + } + } +} From f28c8d0ec0b4dbdccd87ee43aa13ce13485dc2b1 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 11 Apr 2025 10:09:03 -0500 Subject: [PATCH 0740/1708] ipn/ipn{ext,local}: allow extension lookup by name or type In this PR, we add two methods to facilitate extension lookup by both extensions, and non-extensions (e.g., PeerAPI or LocalAPI handlers): - FindExtensionByName returns an extension with the specified name. It can then be type asserted to a given type. - FindMatchingExtension is like errors.As, but for extensions. It returns the first extension that matches the target type (either a specific extension or an interface). Updates tailscale/corp#27645 Updates tailscale/corp#27502 Signed-off-by: Nick Khyl --- ipn/ipnext/ipnext.go | 19 ++++++ ipn/ipnlocal/extension_host.go | 55 +++++++++++++++++ ipn/ipnlocal/extension_host_test.go | 94 +++++++++++++++++++++++++++++ ipn/ipnlocal/local.go | 16 +++++ 4 files changed, 184 insertions(+) diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index af870b53a..f8fd500ce 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -174,6 +174,9 @@ func DefinitionWithErrForTest(name string, err error) *Definition { // // A host must be safe for concurrent use. type Host interface { + // Extensions returns the host's [ExtensionServices]. + Extensions() ExtensionServices + // Profiles returns the host's [ProfileServices]. Profiles() ProfileServices @@ -197,6 +200,22 @@ type Host interface { RegisterControlClientCallback(NewControlClientCallback) (unregister func()) } +// ExtensionServices provides access to the [Host]'s extension management services, +// such as fetching active extensions. +type ExtensionServices interface { + // FindExtensionByName returns an active extension with the given name, + // or nil if no such extension exists. + FindExtensionByName(name string) any + + // FindMatchingExtension finds the first active extension that matches target, + // and if one is found, sets target to that extension and returns true. + // Otherwise, it returns false. + // + // It panics if target is not a non-nil pointer to either a type + // that implements [ipnext.Extension], or to any interface type. + FindMatchingExtension(target any) bool +} + // ProfileServices provides access to the [Host]'s profile management services, // such as switching profiles and registering profile change callbacks. type ProfileServices interface { diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 4a617ed72..9c6b6d44c 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -9,6 +9,7 @@ import ( "fmt" "iter" "maps" + "reflect" "slices" "strings" "sync" @@ -233,6 +234,60 @@ func (h *ExtensionHost) init() { } +// Extensions implements [ipnext.Host]. +func (h *ExtensionHost) Extensions() ipnext.ExtensionServices { + // Currently, [ExtensionHost] implements [ExtensionServices] directly. + // We might want to extract it to a separate type in the future. + return h +} + +// FindExtensionByName implements [ipnext.ExtensionServices] +// and is also used by the [LocalBackend]. +// It returns nil if the extension is not found. +func (h *ExtensionHost) FindExtensionByName(name string) any { + if h == nil { + return nil + } + h.mu.Lock() + defer h.mu.Unlock() + return h.extensionsByName[name] +} + +// extensionIfaceType is the runtime type of the [ipnext.Extension] interface. +var extensionIfaceType = reflect.TypeFor[ipnext.Extension]() + +// FindMatchingExtension implements [ipnext.ExtensionServices] +// and is also used by the [LocalBackend]. +func (h *ExtensionHost) FindMatchingExtension(target any) bool { + if h == nil { + return false + } + + if target == nil { + panic("ipnext: target cannot be nil") + } + + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("ipnext: target must be a non-nil pointer") + } + targetType := typ.Elem() + if targetType.Kind() != reflect.Interface && !targetType.Implements(extensionIfaceType) { + panic("ipnext: *target must be interface or implement ipnext.Extension") + } + + h.mu.Lock() + defer h.mu.Unlock() + for _, ext := range h.activeExtensions { + if reflect.TypeOf(ext).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(ext)) + return true + } + } + return false +} + // Profiles implements [ipnext.Host]. func (h *ExtensionHost) Profiles() ipnext.ProfileServices { // Currently, [ExtensionHost] implements [ipnext.ProfileServices] directly. diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 1e03abaa1..cefe9339d 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -299,6 +299,100 @@ func TestNewExtensionHost(t *testing.T) { } } +// TestFindMatchingExtension tests that [ExtensionHost.FindMatchingExtension] correctly +// finds extensions by their type or interface. +func TestFindMatchingExtension(t *testing.T) { + t.Parallel() + + // Define test extension types and a couple of interfaces + type ( + extensionA struct { + testExtension + } + extensionB struct { + testExtension + } + extensionC struct { + testExtension + } + supportedIface interface { + Name() string + } + unsupportedIface interface { + Unsupported() + } + ) + + // Register extensions A and B, but not C. + extA := &extensionA{testExtension: testExtension{name: "A"}} + extB := &extensionB{testExtension: testExtension{name: "B"}} + h := newExtensionHostForTest[ipnext.Extension](t, &testBackend{}, true, extA, extB) + + var gotA *extensionA + if !h.FindMatchingExtension(&gotA) { + t.Errorf("LookupExtension(%T): not found", gotA) + } else if gotA != extA { + t.Errorf("LookupExtension(%T): got %v; want %v", gotA, gotA, extA) + } + + var gotB *extensionB + if !h.FindMatchingExtension(&gotB) { + t.Errorf("LookupExtension(%T): extension B not found", gotB) + } else if gotB != extB { + t.Errorf("LookupExtension(%T): got %v; want %v", gotB, gotB, extB) + } + + var gotC *extensionC + if h.FindMatchingExtension(&gotC) { + t.Errorf("LookupExtension(%T): found, but it should not exist", gotC) + } + + // All extensions implement the supportedIface interface, + // but LookupExtension should only return the first one found, + // which is extA. + var gotSupportedIface supportedIface + if !h.FindMatchingExtension(&gotSupportedIface) { + t.Errorf("LookupExtension(%T): not found", gotSupportedIface) + } else if gotName, wantName := gotSupportedIface.Name(), extA.Name(); gotName != wantName { + t.Errorf("LookupExtension(%T): name: got %v; want %v", gotSupportedIface, gotName, wantName) + } else if gotSupportedIface != extA { + t.Errorf("LookupExtension(%T): got %v; want %v", gotSupportedIface, gotSupportedIface, extA) + } + + var gotUnsupportedIface unsupportedIface + if h.FindMatchingExtension(&gotUnsupportedIface) { + t.Errorf("LookupExtension(%T): found, but it should not exist", gotUnsupportedIface) + } +} + +// TestFindExtensionByName tests that [ExtensionHost.FindExtensionByName] correctly +// finds extensions by their name. +func TestFindExtensionByName(t *testing.T) { + // Register extensions A and B, but not C. + extA := &testExtension{name: "A"} + extB := &testExtension{name: "B"} + h := newExtensionHostForTest(t, &testBackend{}, true, extA, extB) + + gotA, ok := h.FindExtensionByName(extA.Name()).(*testExtension) + if !ok { + t.Errorf("FindExtensionByName(%q): not found", extA.Name()) + } else if gotA != extA { + t.Errorf(`FindExtensionByName(%q): got %v; want %v`, extA.Name(), gotA, extA) + } + + gotB, ok := h.FindExtensionByName(extB.Name()).(*testExtension) + if !ok { + t.Errorf("FindExtensionByName(%q): not found", extB.Name()) + } else if gotB != extB { + t.Errorf(`FindExtensionByName(%q): got %v; want %v`, extB.Name(), gotB, extB) + } + + gotC, ok := h.FindExtensionByName("C").(*testExtension) + if ok { + t.Errorf(`FindExtensionByName("C"): found, but it should not exist: %v`, gotC) + } +} + // TestExtensionHostEnqueueBackendOperation verifies that [ExtensionHost] enqueues // backend operations and executes them asynchronously in the order they were received. // It also checks that operations requested before the host and all extensions are initialized diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0f3ea1fbb..9ec4b4767 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -589,6 +589,22 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo return b, nil } +// FindExtensionByName returns an active extension with the given name, +// or nil if no such extension exists. +func (b *LocalBackend) FindExtensionByName(name string) any { + return b.extHost.Extensions().FindExtensionByName(name) +} + +// FindMatchingExtension finds the first active extension that matches target, +// and if one is found, sets target to that extension and returns true. +// Otherwise, it returns false. +// +// It panics if target is not a non-nil pointer to either a type +// that implements [ipnext.Extension], or to any interface type. +func (b *LocalBackend) FindMatchingExtension(target any) bool { + return b.extHost.Extensions().FindMatchingExtension(target) +} + type componentLogState struct { until time.Time timer tstime.TimerController // if non-nil, the AfterFunc to disable it From e84522e3e3a4164f477be75629cfd528e75b5e80 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 14 Apr 2025 13:47:52 +0200 Subject: [PATCH 0741/1708] release/dist/cli: add option to override out path Allow builds to be outputted to a specific directory. By default, or if unset, artifacts are written to PWD/dist. Updates tailscale/corp#27638 Signed-off-by: Kristoffer Dalby --- release/dist/cli/cli.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/release/dist/cli/cli.go b/release/dist/cli/cli.go index 9b861ddd7..f4480cbdb 100644 --- a/release/dist/cli/cli.go +++ b/release/dist/cli/cli.go @@ -65,6 +65,7 @@ func CLI(getTargets func() ([]dist.Target, error)) *ffcli.Command { fs.StringVar(&buildArgs.manifest, "manifest", "", "manifest file to write") fs.BoolVar(&buildArgs.verbose, "verbose", false, "verbose logging") fs.StringVar(&buildArgs.webClientRoot, "web-client-root", "", "path to root of web client source to build") + fs.StringVar(&buildArgs.outPath, "out", "", "path to write output artifacts (defaults to '$PWD/dist' if not set)") return fs })(), LongHelp: strings.TrimSpace(` @@ -156,6 +157,7 @@ var buildArgs struct { manifest string verbose bool webClientRoot string + outPath string } func runBuild(ctx context.Context, filters []string, targets []dist.Target) error { @@ -172,7 +174,11 @@ func runBuild(ctx context.Context, filters []string, targets []dist.Target) erro if err != nil { return fmt.Errorf("getting working directory: %w", err) } - b, err := dist.NewBuild(wd, filepath.Join(wd, "dist")) + outPath := filepath.Join(wd, "dist") + if buildArgs.outPath != "" { + outPath = buildArgs.outPath + } + b, err := dist.NewBuild(wd, outPath) if err != nil { return fmt.Errorf("creating build context: %w", err) } From 624c25bd4936f5388393487065d427d73169904e Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 14 Apr 2025 15:20:50 +0100 Subject: [PATCH 0742/1708] docs/commit-messages.md: merge two 'commit messages' sections (#15668) Updates#cleanup Signed-off-by: Irbe Krumina --- docs/commit-messages.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/commit-messages.md b/docs/commit-messages.md index 36b539689..22a6e67ce 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -1,16 +1,12 @@ # Commit messages +There are different styles of commit messages followed by different projects. This is Tailscale's style guide for writing git commit messages. - As with all style guides, many things here are subjective and exist primarily to codify existing conventions and promote uniformity and thus ease of reading by others. Others have stronger reasons, such as interop with tooling or making future git archaeology easier. -# Commit Messages - -There are different styles of commit messages followed by different projects. - Our commit message style is largely based on the Go language's style, which shares much in common with the Linux kernel's git commit message style (for which git was invented): From 6502b7d667b3325e8f456103a718d0780a9e4d98 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 14 Apr 2025 16:54:51 +0100 Subject: [PATCH 0743/1708] scripts/installer.sh: add Miracle Linux as a RHEL derivative (#15671) Fixes #15669 Signed-off-by: Erisa A --- scripts/installer.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 0b360b8a1..f81ae5292 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -208,8 +208,11 @@ main() { PACKAGETYPE="yum" fi ;; - rhel) + rhel|miraclelinux) OS="$ID" + if [ "$ID" = "miraclelinux" ]; then + OS="rhel" + fi VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then From 40e0c349a77389022df224319dc7e3b194ee59cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 11:05:33 -0600 Subject: [PATCH 0744/1708] .github: Bump github/codeql-action from 3.28.14 to 3.28.15 (#15665) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.14 to 3.28.15. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2...45775bd8235c68ba998cffa5171334d58593da47) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.15 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c1d0936e7..311f539e1 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 + uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 + uses: github/codeql-action/autobuild@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 + uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 From 62182fc37d44c0a8185b7d96f30465710dd68b66 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 14 Apr 2025 10:09:56 -0700 Subject: [PATCH 0745/1708] wgengine/netstack: revert cubic cc to reno cc (#15677) Updates google/gvisor#11632 Updates tailscale/corp#27717 Signed-off-by: Jordan Whited --- wgengine/netstack/netstack.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 04bab0cf9..dab692ead 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -327,10 +327,15 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi if tcpipErr != nil { return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr) } - cubicOpt := tcpip.CongestionControlOption("cubic") - tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &cubicOpt) + // gVisor defaults to reno at the time of writing. We explicitly set reno + // congestion control in order to prevent unexpected changes. Netstack + // has an int overflow in sender congestion window arithmetic that is more + // prone to trigger with cubic congestion control. + // See https://github.com/google/gvisor/issues/11632 + renoOpt := tcpip.CongestionControlOption("reno") + tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &renoOpt) if tcpipErr != nil { - return nil, fmt.Errorf("could not set cubic congestion control: %v", tcpipErr) + return nil, fmt.Errorf("could not set reno congestion control: %v", tcpipErr) } err := setTCPBufSizes(ipstack) if err != nil { From d6fd865d41905e6d55e66613c36186847b2469f6 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Mon, 14 Apr 2025 15:02:32 -0400 Subject: [PATCH 0746/1708] hostinfo, ipnlocal: add optional os-specific callback for querying the hostname (#15647) updates tailscale/tailscale#13476 On darwin, os.Hostname is no longer reliable when called from a sandboxed process. To fix this, we will allow clients to set an optional callback to query the hostname via an alternative native API. We will leave the default implementation as os.Hostname since this works perfectly well for almost everything besides sandboxed darwin clients. Signed-off-by: Jonathan Nobels --- hostinfo/hostinfo.go | 20 +++++++++++++++++++- hostinfo/hostinfo_test.go | 29 +++++++++++++++++++++++++++++ ipn/ipnlocal/local.go | 2 +- 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index afb465ece..3e8f2f994 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -43,7 +43,7 @@ func RegisterHostinfoNewHook(f func(*tailcfg.Hostinfo)) { // New returns a partially populated Hostinfo for the current host. func New() *tailcfg.Hostinfo { - hostname, _ := os.Hostname() + hostname, _ := Hostname() hostname = dnsname.FirstLabel(hostname) hi := &tailcfg.Hostinfo{ IPNVersion: version.Long(), @@ -509,3 +509,21 @@ func IsInVM86() bool { return New().DeviceModel == copyV86DeviceModel }) } + +type hostnameQuery func() (string, error) + +var hostnameFn atomic.Value // of func() (string, error) + +// SetHostNameFn sets a custom function for querying the system hostname. +func SetHostnameFn(fn hostnameQuery) { + hostnameFn.Store(fn) +} + +// Hostname returns the system hostname using the function +// set by SetHostNameFn. We will fallback to os.Hostname. +func Hostname() (string, error) { + if fn, ok := hostnameFn.Load().(hostnameQuery); ok && fn != nil { + return fn() + } + return os.Hostname() +} diff --git a/hostinfo/hostinfo_test.go b/hostinfo/hostinfo_test.go index 9fe32e044..15b6971b6 100644 --- a/hostinfo/hostinfo_test.go +++ b/hostinfo/hostinfo_test.go @@ -5,6 +5,7 @@ package hostinfo import ( "encoding/json" + "os" "strings" "testing" ) @@ -49,3 +50,31 @@ func TestEtcAptSourceFileIsDisabled(t *testing.T) { }) } } + +func TestCustomHostnameFunc(t *testing.T) { + want := "custom-hostname" + SetHostnameFn(func() (string, error) { + return want, nil + }) + + got, err := Hostname() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if got != want { + t.Errorf("got %q, want %q", got, want) + } + + SetHostnameFn(os.Hostname) + got, err = Hostname() + want, _ = os.Hostname() + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != want { + t.Errorf("got %q, want %q", got, want) + } + +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9ec4b4767..e21403fbe 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1245,7 +1245,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { } } else { - ss.HostName, _ = os.Hostname() + ss.HostName, _ = hostinfo.Hostname() } for _, pln := range b.peerAPIListeners { ss.PeerAPIURL = append(ss.PeerAPIURL, pln.urlStr) From 10fd61f1bb6b068ced3cffe059133e51e17ffb92 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 14 Apr 2025 12:50:12 -0700 Subject: [PATCH 0747/1708] go.mod: bump golang.org/x/crypto and related Updates #15680 Signed-off-by: James Tucker --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index e430fcb6d..df8922de3 100644 --- a/go.mod +++ b/go.mod @@ -98,14 +98,14 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.35.0 + golang.org/x/crypto v0.37.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/mod v0.23.0 golang.org/x/net v0.36.0 golang.org/x/oauth2 v0.26.0 - golang.org/x/sync v0.11.0 - golang.org/x/sys v0.31.0 - golang.org/x/term v0.29.0 + golang.org/x/sync v0.13.0 + golang.org/x/sys v0.32.0 + golang.org/x/term v0.31.0 golang.org/x/time v0.10.0 golang.org/x/tools v0.30.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 @@ -392,7 +392,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.24.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/text v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 0c3e566be..860b4aa7c 100644 --- a/go.sum +++ b/go.sum @@ -1087,8 +1087,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1200,8 +1200,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1264,16 +1264,16 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1284,8 +1284,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 4cb9d5c18326b8454feb3c4cd0b4d835380984b9 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Mon, 14 Apr 2025 13:51:59 -0700 Subject: [PATCH 0748/1708] cmd/natc: cleanup unused state perPeerState no longer needs to know the v6ULA. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/ippool.go | 3 --- cmd/natc/ippool/ippool_test.go | 7 +------ cmd/natc/natc.go | 2 +- cmd/natc/natc_test.go | 2 +- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/cmd/natc/ippool/ippool.go b/cmd/natc/ippool/ippool.go index dbb56d5a4..3a46a6e7a 100644 --- a/cmd/natc/ippool/ippool.go +++ b/cmd/natc/ippool/ippool.go @@ -24,7 +24,6 @@ var ErrNoIPsAvailable = errors.New("no IPs available") type IPPool struct { perPeerMap syncs.Map[tailcfg.NodeID, *perPeerState] IPSet *netipx.IPSet - V6ULA netip.Prefix } func (ipp *IPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr) (string, bool) { @@ -44,7 +43,6 @@ func (ipp *IPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr) (string, bo func (ipp *IPPool) IPForDomain(from tailcfg.NodeID, domain string) (netip.Addr, error) { npps := &perPeerState{ ipset: ipp.IPSet, - v6ULA: ipp.V6ULA, } ps, _ := ipp.perPeerMap.LoadOrStore(from, npps) return ps.ipForDomain(domain) @@ -52,7 +50,6 @@ func (ipp *IPPool) IPForDomain(from tailcfg.NodeID, domain string) (netip.Addr, // perPeerState holds the state for a single peer. type perPeerState struct { - v6ULA netip.Prefix ipset *netipx.IPSet mu sync.Mutex diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go index 19bfc856f..2919d7757 100644 --- a/cmd/natc/ippool/ippool_test.go +++ b/cmd/natc/ippool/ippool_test.go @@ -19,8 +19,7 @@ func TestIPPoolExhaustion(t *testing.T) { var ipsb netipx.IPSetBuilder ipsb.AddPrefix(smallPrefix) addrPool := must.Get(ipsb.IPSet()) - v6ULA := netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80") - pool := IPPool{V6ULA: v6ULA, IPSet: addrPool} + pool := IPPool{IPSet: addrPool} assignedIPs := make(map[netip.Addr]string) @@ -52,9 +51,6 @@ func TestIPPoolExhaustion(t *testing.T) { if addr.Is4() && !smallPrefix.Contains(addr) { t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, smallPrefix) } - if addr.Is6() && !v6ULA.Contains(addr) { - t.Errorf("IP %s for domain %q not in expected range %s", addr, domain, v6ULA) - } } // expect one error for each iteration with the 5th domain @@ -73,7 +69,6 @@ func TestIPPool(t *testing.T) { ipsb.AddPrefix(netip.MustParsePrefix("100.64.1.0/24")) addrPool := must.Get(ipsb.IPSet()) pool := IPPool{ - V6ULA: netip.MustParsePrefix("fd7a:115c:a1e0:a99c:0001::/80"), IPSet: addrPool, } from := tailcfg.NodeID(12345) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 024f906c5..b327f55bd 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -153,7 +153,7 @@ func main() { whois: lc, v6ULA: v6ULA, ignoreDsts: ignoreDstTable, - ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, + ipPool: &ippool.IPPool{IPSet: addrPool}, routes: routes, dnsAddr: dnsAddr, resolver: net.DefaultResolver, diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index fa005e457..0320db8a4 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -270,7 +270,7 @@ func TestDNSResponse(t *testing.T) { ignoreDsts: &bart.Table[bool]{}, routes: routes, v6ULA: v6ULA, - ipPool: &ippool.IPPool{V6ULA: v6ULA, IPSet: addrPool}, + ipPool: &ippool.IPPool{IPSet: addrPool}, dnsAddr: dnsAddr, } c.ignoreDsts.Insert(netip.MustParsePrefix("8.8.4.4/32"), true) From d8e3bce0b47b69a76cb7c5f69df608a77f23ae5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:04:36 -0600 Subject: [PATCH 0749/1708] .github: Bump golangci/golangci-lint-action from 6.5.0 to 7.0.0 (#15476) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.5.0 to 7.0.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/2226d7cb06a077cd73e56eedd38eecad18e5d837...1481404843c368bc19ca9406f87d6e0fc97bdcfd) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 4 +- .golangci.yml | 187 +++++++++++++++------------- 2 files changed, 102 insertions(+), 89 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index bbe67b0eb..04a2e042d 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -31,9 +31,9 @@ jobs: cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 + uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 with: - version: v1.64 + version: v2.0.2 # Show only new issues if it's a pull request. only-new-issues: true diff --git a/.golangci.yml b/.golangci.yml index 15f8b5d83..eb34f9d9e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,97 +1,110 @@ +version: "2" +# Configuration for how we run golangci-lint +# Timeout of 5m was the default in v1. +run: + timeout: 5m linters: # Don't enable any linters by default; just the ones that we explicitly # enable in the list below. - disable-all: true + default: none enable: - bidichk - - gofmt - - goimports - govet - misspell - revive - -# Configuration for how we run golangci-lint -run: - timeout: 5m - -issues: - # Excluding configuration per-path, per-linter, per-text and per-source - exclude-rules: - # These are forks of an upstream package and thus are exempt from stylistic - # changes that would make pulling in upstream changes harder. - - path: tempfork/.*\.go - text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`" - - path: util/singleflight/.*\.go - text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`" - -# Per-linter settings are contained in this top-level key -linters-settings: - gofmt: - rewrite-rules: - - pattern: 'interface{}' - replacement: 'any' - - govet: + settings: # Matches what we use in corp as of 2023-12-07 - enable: - - asmdecl - - assign - - atomic - - bools - - buildtag - - cgocall - - copylocks - - deepequalerrors - - errorsas - - framepointer - - httpresponse - - ifaceassert - - loopclosure - - lostcancel - - nilfunc - - nilness - - printf - - reflectvaluecompare - - shift - - sigchanyzer - - sortslice - - stdmethods - - stringintconv - - structtag - - testinggoroutine - - tests - - unmarshal - - unreachable - - unsafeptr - - unusedresult - settings: - printf: - # List of print function names to check (in addition to default) - funcs: - - github.com/tailscale/tailscale/types/logger.Discard - # NOTE(andrew-d): this doesn't currently work because the printf - # analyzer doesn't support type declarations - #- github.com/tailscale/tailscale/types/logger.Logf - - revive: - enable-all-rules: false - ignore-generated-header: true + govet: + enable: + - asmdecl + - assign + - atomic + - bools + - buildtag + - cgocall + - copylocks + - deepequalerrors + - errorsas + - framepointer + - httpresponse + - ifaceassert + - loopclosure + - lostcancel + - nilfunc + - nilness + - printf + - reflectvaluecompare + - shift + - sigchanyzer + - sortslice + - stdmethods + - stringintconv + - structtag + - testinggoroutine + - tests + - unmarshal + - unreachable + - unsafeptr + - unusedresult + settings: + printf: + # List of print function names to check (in addition to default) + funcs: + - github.com/tailscale/tailscale/types/logger.Discard + # NOTE(andrew-d): this doesn't currently work because the printf + # analyzer doesn't support type declarations + #- github.com/tailscale/tailscale/types/logger.Logf + revive: + enable-all-rules: false + rules: + - name: atomic + - name: context-keys-type + - name: defer + arguments: [[ + # Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect. + "immediate-recover", + # Calling 'recover' outside of a deferred function has no effect + "recover", + # Returning values from a deferred function has no effect + "return", + ]] + - name: duplicated-imports + - name: errorf + - name: string-of-int + - name: time-equal + - name: unconditional-recursion + - name: useless-break + - name: waitgroup-by-value + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling rules: - - name: atomic - - name: context-keys-type - - name: defer - arguments: [[ - # Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect. - "immediate-recover", - # Calling 'recover' outside of a deferred function has no effect - "recover", - # Returning values from a deferred function has no effect - "return", - ]] - - name: duplicated-imports - - name: errorf - - name: string-of-int - - name: time-equal - - name: unconditional-recursion - - name: useless-break - - name: waitgroup-by-value + # These are forks of an upstream package and thus are exempt from stylistic + # changes that would make pulling in upstream changes harder. + - path: tempfork/.*\.go + text: File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'` + - path: util/singleflight/.*\.go + text: File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'` + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + rewrite-rules: + - pattern: interface{} + replacement: any + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ From 21400756a0a6d3439ba637ef4cd71acaf616d5d5 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 14 Apr 2025 17:13:59 -0700 Subject: [PATCH 0750/1708] tstest/integration: simplify TestDNSOverTCPIntervalResolver (#15686) Query for the const quad-100 reverse DNS name, for which a forward record will also be served. This test was previously dependent on search domain behavior, and now it is not. Updates #15607 Signed-off-by: Jordan Whited --- tstest/integration/integration_test.go | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 81a1cd9dc..20d8908da 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -51,7 +51,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/ptr" - "tailscale.com/util/dnsname" "tailscale.com/util/must" "tailscale.com/util/rands" "tailscale.com/version" @@ -1140,18 +1139,9 @@ func TestDNSOverTCPIntervalResolver(t *testing.T) { n1.AwaitResponding() n1.MustUp() - - wantIP4 := n1.AwaitIP4() n1.AwaitRunning() - status, err := n1.Status() - if err != nil { - t.Fatalf("failed to get node status: %v", err) - } - selfDNSName, err := dnsname.ToFQDN(status.Self.DNSName) - if err != nil { - t.Fatalf("error converting self dns name to fqdn: %v", err) - } + const dnsSymbolicFQDN = "magicdns.localhost-tailscale-daemon." cases := []struct { network string @@ -1167,9 +1157,9 @@ func TestDNSOverTCPIntervalResolver(t *testing.T) { }, } for _, c := range cases { - err = tstest.WaitFor(time.Second*5, func() error { + err := tstest.WaitFor(time.Second*5, func() error { m := new(dns.Msg) - m.SetQuestion(selfDNSName.WithTrailingDot(), dns.TypeA) + m.SetQuestion(dnsSymbolicFQDN, dns.TypeA) conn, err := net.DialTimeout(c.network, net.JoinHostPort(c.serviceAddr.String(), "53"), time.Second*1) if err != nil { return err @@ -1194,8 +1184,8 @@ func TestDNSOverTCPIntervalResolver(t *testing.T) { return fmt.Errorf("unexpected answer type: %s", resp.Answer[0]) } gotAddr = answer.A - if !bytes.Equal(gotAddr, wantIP4.AsSlice()) { - return fmt.Errorf("got (%s) != want (%s)", gotAddr, wantIP4) + if !bytes.Equal(gotAddr, tsaddr.TailscaleServiceIP().AsSlice()) { + return fmt.Errorf("got (%s) != want (%s)", gotAddr, tsaddr.TailscaleServiceIP()) } return nil }) From 60614fa4e54c0ec8aa834e22e0708ccf882406e8 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 14 Apr 2025 17:02:47 -0500 Subject: [PATCH 0751/1708] ipn/desktop: fix panics on Windows 10, x86 [G,S]etWindowLongPtrW are not available on 32-bit Windows, where [G,S]etWindowLongW should be used instead. The initial revision of #14945 imported the win package for calling and other Win32 API functions, which exported the correct API depending on the platform. However, the same logic wasn't implemented when we removed the win package dependency in a later revision, resulting in panics on Windows 10 x86 (there's no 32-bit Windows 11). In this PR, we update the ipn/desktop package to use either [G,S]etWindowLongPtrW or [G,S]etWindowLongW depending on the platform. Fixes #15684 Signed-off-by: Nick Khyl --- ipn/desktop/mksyscall.go | 2 -- ipn/desktop/sessions_windows.go | 35 +++++++++++++++++++++++++++++++++ ipn/desktop/zsyscall_windows.go | 20 ------------------- 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/ipn/desktop/mksyscall.go b/ipn/desktop/mksyscall.go index 305138468..b7af12366 100644 --- a/ipn/desktop/mksyscall.go +++ b/ipn/desktop/mksyscall.go @@ -11,8 +11,6 @@ package desktop //sys registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) [atom==0] = user32.RegisterClassExW //sys createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) [hWnd==0] = user32.CreateWindowExW //sys defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) = user32.DefWindowProcW -//sys setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) [res==0 && e1!=0] = user32.SetWindowLongPtrW -//sys getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) [res==0 && e1!=0] = user32.GetWindowLongPtrW //sys sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) = user32.SendMessageW //sys getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) = user32.GetMessageW //sys translateMessage(lpMsg *_MSG) (res bool) = user32.TranslateMessage diff --git a/ipn/desktop/sessions_windows.go b/ipn/desktop/sessions_windows.go index b26172d77..83b884228 100644 --- a/ipn/desktop/sessions_windows.go +++ b/ipn/desktop/sessions_windows.go @@ -670,3 +670,38 @@ func (cs _WTS_CONNECTSTATE_CLASS) ToSessionStatus() SessionStatus { return ClosedSession } } + +var ( + procGetWindowLongPtrW *windows.LazyProc + procSetWindowLongPtrW *windows.LazyProc +) + +func init() { + // GetWindowLongPtrW and SetWindowLongPtrW are only available on 64-bit platforms. + // https://web.archive.org/web/20250414195520/https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getwindowlongptrw + if runtime.GOARCH == "386" || runtime.GOARCH == "arm" { + procGetWindowLongPtrW = moduser32.NewProc("GetWindowLongW") + procSetWindowLongPtrW = moduser32.NewProc("SetWindowLongW") + } else { + procGetWindowLongPtrW = moduser32.NewProc("GetWindowLongPtrW") + procSetWindowLongPtrW = moduser32.NewProc("SetWindowLongPtrW") + } +} + +func getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowLongPtrW.Addr(), 2, uintptr(hwnd), uintptr(index), 0) + res = uintptr(r0) + if res == 0 && e1 != 0 { + err = errnoErr(e1) + } + return +} + +func setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) { + r0, _, e1 := syscall.Syscall(procSetWindowLongPtrW.Addr(), 3, uintptr(hwnd), uintptr(index), uintptr(newLong)) + res = uintptr(r0) + if res == 0 && e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/ipn/desktop/zsyscall_windows.go b/ipn/desktop/zsyscall_windows.go index 222ab49e5..535274016 100644 --- a/ipn/desktop/zsyscall_windows.go +++ b/ipn/desktop/zsyscall_windows.go @@ -48,11 +48,9 @@ var ( procDestroyWindow = moduser32.NewProc("DestroyWindow") procDispatchMessageW = moduser32.NewProc("DispatchMessageW") procGetMessageW = moduser32.NewProc("GetMessageW") - procGetWindowLongPtrW = moduser32.NewProc("GetWindowLongPtrW") procPostQuitMessage = moduser32.NewProc("PostQuitMessage") procRegisterClassExW = moduser32.NewProc("RegisterClassExW") procSendMessageW = moduser32.NewProc("SendMessageW") - procSetWindowLongPtrW = moduser32.NewProc("SetWindowLongPtrW") procTranslateMessage = moduser32.NewProc("TranslateMessage") procWTSRegisterSessionNotificationEx = modwtsapi32.NewProc("WTSRegisterSessionNotificationEx") procWTSUnRegisterSessionNotificationEx = modwtsapi32.NewProc("WTSUnRegisterSessionNotificationEx") @@ -98,15 +96,6 @@ func getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (r return } -func getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowLongPtrW.Addr(), 2, uintptr(hwnd), uintptr(index), 0) - res = uintptr(r0) - if res == 0 && e1 != 0 { - err = errnoErr(e1) - } - return -} - func postQuitMessage(exitCode int32) { syscall.Syscall(procPostQuitMessage.Addr(), 1, uintptr(exitCode), 0, 0) return @@ -127,15 +116,6 @@ func sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) return } -func setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) { - r0, _, e1 := syscall.Syscall(procSetWindowLongPtrW.Addr(), 3, uintptr(hwnd), uintptr(index), uintptr(newLong)) - res = uintptr(r0) - if res == 0 && e1 != 0 { - err = errnoErr(e1) - } - return -} - func translateMessage(lpMsg *_MSG) (res bool) { r0, _, _ := syscall.Syscall(procTranslateMessage.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) res = r0 != 0 From 41070566122f1132d10d8c6ab3c984bfa9511f0d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 15 Apr 2025 09:18:55 -0700 Subject: [PATCH 0752/1708] ipn/ipnlocal: skip broken TestOnTailnetDefaultAutoUpdate on macOS Updates #15691 Change-Id: I131aed8bcd83be8e97399c905683e046381c9106 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index d29c2d4bb..84e7cc209 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -15,6 +15,7 @@ import ( "os" "path/filepath" "reflect" + "runtime" "slices" "strings" "sync" @@ -2596,6 +2597,9 @@ func TestPreferencePolicyInfo(t *testing.T) { } func TestOnTailnetDefaultAutoUpdate(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("test known broken on macOS; see https://github.com/tailscale/tailscale/issues/15691") + } tests := []struct { before, after opt.Bool container opt.Bool From 7833145289aa58e10d85b9f2de4d8faefc9bab13 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 15 Apr 2025 11:18:04 -0700 Subject: [PATCH 0753/1708] ipn/auditlog: fix featureName doc typo (#15696) Updates #cleanup Signed-off-by: Jordan Whited --- ipn/auditlog/extension.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index 6bbe37398..036d8fd36 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -22,7 +22,7 @@ import ( ) // featureName is the name of the feature implemented by this package. -// It is also the the [extension] name and the log prefix. +// It is also the [extension] name and the log prefix. const featureName = "auditlog" func init() { From b926cd7fc624c391d4e3595aaa4bfc80d3e6823e Mon Sep 17 00:00:00 2001 From: Satyam Soni <94950988+satyampsoni@users.noreply.github.com> Date: Tue, 15 Apr 2025 20:13:56 +0100 Subject: [PATCH 0754/1708] k8s-operator: add age column to all custom resources (#15663) This change introduces an Age column in the output for all custom resources to enhance visibility into their lifecycle status. Fixes #15499 Signed-off-by: satyampsoni --- .../deploy/crds/tailscale.com_connectors.yaml | 3 +++ .../deploy/crds/tailscale.com_dnsconfigs.yaml | 3 +++ .../deploy/crds/tailscale.com_proxyclasses.yaml | 3 +++ .../deploy/crds/tailscale.com_proxygroups.yaml | 3 +++ .../deploy/crds/tailscale.com_recorders.yaml | 3 +++ cmd/k8s-operator/deploy/manifests/operator.yaml | 15 +++++++++++++++ k8s-operator/apis/v1alpha1/types_connector.go | 1 + k8s-operator/apis/v1alpha1/types_proxyclass.go | 1 + k8s-operator/apis/v1alpha1/types_proxygroup.go | 1 + k8s-operator/apis/v1alpha1/types_recorder.go | 1 + k8s-operator/apis/v1alpha1/types_tsdnsconfig.go | 1 + 11 files changed, 35 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index 1917e31de..d645e3922 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -32,6 +32,9 @@ spec: jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason name: Status type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 242debd27..268d978c1 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -20,6 +20,9 @@ spec: jsonPath: .status.nameserver.ip name: NameserverIP type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index f89e38453..154123475 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -18,6 +18,9 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyClassReady")].reason name: Status type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 86e74e441..4b9149e23 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -24,6 +24,9 @@ spec: jsonPath: .spec.type name: Type type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 22bbed810..b07e9f692 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -24,6 +24,9 @@ spec: jsonPath: .status.devices[?(@.url != "")].url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index dc8d0634c..9bfbd533f 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -61,6 +61,9 @@ spec: jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason name: Status type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: @@ -312,6 +315,9 @@ spec: jsonPath: .status.nameserver.ip name: NameserverIP type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: @@ -492,6 +498,9 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyClassReady")].reason name: Status type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: @@ -2803,6 +2812,9 @@ spec: jsonPath: .spec.type name: Type type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: @@ -3013,6 +3025,9 @@ spec: jsonPath: .status.devices[?(@.url != "")].url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 022258485..a26c9b542 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -24,6 +24,7 @@ var ConnectorKind = "Connector" // +kubebuilder:printcolumn:name="IsExitNode",type="string",JSONPath=`.status.isExitNode`,description="Whether this Connector instance defines an exit node." // +kubebuilder:printcolumn:name="IsAppConnector",type="string",JSONPath=`.status.isAppConnector`,description="Whether this Connector instance is an app connector." // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ConnectorReady")].reason`,description="Status of the deployed Connector resources." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // Connector defines a Tailscale node that will be deployed in the cluster. The // node can be configured to act as a Tailscale subnet router and/or a Tailscale diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 3fde0b37a..899abf096 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -16,6 +16,7 @@ var ProxyClassKind = "ProxyClass" // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyClassReady")].reason`,description="Status of the ProxyClass." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ProxyClass describes a set of configuration parameters that can be applied to // proxy resources created by the Tailscale Kubernetes operator. diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index f95fc58d0..ac87cc6ca 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -14,6 +14,7 @@ import ( // +kubebuilder:resource:scope=Cluster,shortName=pg // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=`.spec.type`,description="ProxyGroup type." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ProxyGroup defines a set of Tailscale devices that will act as proxies. // Currently only egress ProxyGroups are supported. diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index a32b8eb93..6e5416ea5 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -15,6 +15,7 @@ import ( // +kubebuilder:resource:scope=Cluster,shortName=rec // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "RecorderReady")].reason`,description="Status of the deployed Recorder resources." // +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.devices[?(@.url != "")].url`,description="URL on which the UI is exposed if enabled." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // Recorder defines a tsrecorder device for recording SSH sessions. By default, // it will store recordings in a local ephemeral volume. If you want to persist diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 60d212279..0178d60ea 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -18,6 +18,7 @@ var DNSConfigKind = "DNSConfig" // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,shortName=dc // +kubebuilder:printcolumn:name="NameserverIP",type="string",JSONPath=`.status.nameserver.ip`,description="Service IP address of the nameserver" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // DNSConfig can be deployed to cluster to make a subset of Tailscale MagicDNS // names resolvable by cluster workloads. Use this if: A) you need to refer to From e6eba4efeef40658e8708d8912c290fcb1dcf372 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 14 Apr 2025 10:45:08 -0500 Subject: [PATCH 0755/1708] ipn/{auditlog,ipnext,ipnlocal}: convert the profile-change callback to a profile-state-change callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In this PR, we enable extensions to track changes in the current prefs. These changes can result from a profile switch or from the user or system modifying the current profile’s prefs. Since some extensions may want to distinguish between the two events, while others may treat them similarly, we rename the existing profile-change callback to become a profile-state-change callback and invoke it whenever the current profile or its preferences change. Extensions can still use the sameNode parameter to distinguish between situations where the profile information, including its preferences, has been updated but still represents the same tailnet node, and situations where a switch to a different profile has been made. Having dedicated prefs-change callbacks is being considered, but currently seems redundant. A single profile-state-change callback is easier to maintain. We’ll revisit the idea of adding a separate callback as we progress on extracting existing features from LocalBackend, but the conversion to a profile-state-change callback is intended to be permanent. Finally, we let extensions retrieve the current prefs or profile state (profile info + prefs) at any time using the new CurrentProfileState and CurrentPrefs methods. We also simplify the NewControlClientCallback signature to exclude profile prefs. It’s optional, and extensions can retrieve the current prefs themselves if needed. Updates #12614 Updates tailscale/corp#27645 Updates tailscale/corp#26435 Updates tailscale/corp#27502 Signed-off-by: Nick Khyl --- ipn/auditlog/extension.go | 4 +- ipn/ipnext/ipnext.go | 63 ++++++-- ipn/ipnlocal/extension_host.go | 102 +++++++++--- ipn/ipnlocal/extension_host_test.go | 238 +++++++++++++++++++++++----- ipn/ipnlocal/local.go | 7 +- ipn/ipnlocal/profiles.go | 37 ++++- 6 files changed, 370 insertions(+), 81 deletions(-) diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index 036d8fd36..3b561b2e5 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -68,7 +68,7 @@ func (e *extension) Name() string { func (e *extension) Init(h ipnext.Host) error { e.cleanup = []func(){ h.RegisterControlClientCallback(e.controlClientChanged), - h.Profiles().RegisterProfileChangeCallback(e.profileChanged), + h.Profiles().RegisterProfileStateChangeCallback(e.profileChanged), h.RegisterAuditLogProvider(e.getCurrentLogger), } return nil @@ -109,7 +109,7 @@ func (e *extension) startNewLogger(cc controlclient.Client, profileID ipn.Profil return logger, nil } -func (e *extension) controlClientChanged(cc controlclient.Client, profile ipn.LoginProfileView, _ ipn.PrefsView) (cleanup func()) { +func (e *extension) controlClientChanged(cc controlclient.Client, profile ipn.LoginProfileView) (cleanup func()) { logger, err := e.startNewLogger(cc, profile.ID()) e.mu.Lock() e.logger = logger // nil on error diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index f8fd500ce..4c7e978e5 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -219,12 +219,32 @@ type ExtensionServices interface { // ProfileServices provides access to the [Host]'s profile management services, // such as switching profiles and registering profile change callbacks. type ProfileServices interface { + // CurrentProfileState returns read-only views of the current profile + // and its preferences. The returned views are always valid, + // but the profile's [ipn.LoginProfileView.ID] returns "" + // if the profile is new and has not been persisted yet. + // + // The returned views are immutable snapshots of the current profile + // and prefs at the time of the call. The actual state is only guaranteed + // to remain unchanged and match these views for the duration + // of a callback invoked by the host, if used within that callback. + // + // Extensions that need the current profile or prefs at other times + // should typically subscribe to [ProfileStateChangeCallback] + // to be notified if the profile or prefs change after retrieval. + // CurrentProfileState returns both the profile and prefs + // to guarantee that they are consistent with each other. + CurrentProfileState() (ipn.LoginProfileView, ipn.PrefsView) + + // CurrentPrefs is like [CurrentProfileState] but only returns prefs. + CurrentPrefs() ipn.PrefsView + // SwitchToBestProfileAsync asynchronously selects the best profile to use // and switches to it, unless it is already the current profile. // // If an extension needs to know when a profile switch occurs, - // it must use [ProfileServices.RegisterProfileChangeCallback] - // to register a [ProfileChangeCallback]. + // it must use [ProfileServices.RegisterProfileStateChangeCallback] + // to register a [ProfileStateChangeCallback]. // // The reason indicates why the profile is being switched, such as due // to a client connecting or disconnecting or a change in the desktop @@ -241,10 +261,14 @@ type ProfileServices interface { // only exist on Windows, and we're moving away from them anyway. RegisterBackgroundProfileResolver(ProfileResolver) (unregister func()) - // RegisterProfileChangeCallback registers a function to be called when the current - // [ipn.LoginProfile] changes. The returned function unregisters the callback. + // RegisterProfileStateChangeCallback registers a function to be called when the current + // [ipn.LoginProfile] or its [ipn.Prefs] change. The returned function unregisters the callback. + // + // To get the initial profile or prefs, use [ProfileServices.CurrentProfileState] + // or [ProfileServices.CurrentPrefs] from the extension's [Extension.Init]. + // // It is a runtime error to register a nil callback. - RegisterProfileChangeCallback(ProfileChangeCallback) (unregister func()) + RegisterProfileStateChangeCallback(ProfileStateChangeCallback) (unregister func()) } // ProfileStore provides read-only access to available login profiles and their preferences. @@ -281,23 +305,30 @@ type AuditLogProvider func() ipnauth.AuditLogFunc // The provided [ProfileStore] can only be used for the duration of the callback. type ProfileResolver func(ProfileStore) ipn.LoginProfileView -// ProfileChangeCallback is a function to be called when the current login profile changes. +// ProfileStateChangeCallback is a function to be called when the current login profile +// or its preferences change. +// // The sameNode parameter indicates whether the profile represents the same node as before, -// such as when only the profile metadata is updated but the node ID remains the same, -// or when a new profile is persisted and assigned an [ipn.ProfileID] for the first time. -// The subscribers can use this information to decide whether to reset their state. +// which is true when: +// - Only the profile's [ipn.Prefs] or metadata (e.g., [tailcfg.UserProfile]) have changed, +// but the node ID and [ipn.ProfileID] remain the same. +// - The profile has been persisted and assigned an [ipn.ProfileID] for the first time, +// so while its node ID and [ipn.ProfileID] have changed, it is still the same profile. +// +// It can be used to decide whether to reset state bound to the current profile or node identity. // // The profile and prefs are always valid, but the profile's [ipn.LoginProfileView.ID] // returns "" if the profile is new and has not been persisted yet. -type ProfileChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) +type ProfileStateChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) // NewControlClientCallback is a function to be called when a new [controlclient.Client] -// is created and before it is first used. The login profile and prefs represent -// the profile for which the cc is created and are always valid; however, the -// profile's [ipn.LoginProfileView.ID] returns "" if the profile is new -// and has not been persisted yet. If the [controlclient.Client] is created -// due to a profile switch, any registered [ProfileChangeCallback]s are called first. +// is created and before it is first used. The specified profile represents the node +// for which the cc is created and is always valid. Its [ipn.LoginProfileView.ID] +// returns "" if it is a new node whose profile has never been persisted. +// +// If the [controlclient.Client] is created due to a profile switch, any registered +// [ProfileStateChangeCallback]s are called first. // // It returns a function to be called when the cc is being shut down, // or nil if no cleanup is needed. -type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView, ipn.PrefsView) (cleanup func()) +type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) (cleanup func()) diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 9c6b6d44c..2a8a6a085 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -99,6 +99,13 @@ type ExtensionHost struct { // by the workQueue after all extensions have been initialized. postInitWorkQueue []func(Backend) + // currentProfile is a read-only view of the currently used profile. + // The view is always Valid, but might be of an empty, non-persisted profile. + currentProfile ipn.LoginProfileView + // currentPrefs is a read-only view of the current profile's [ipn.Prefs] + // with any private keys stripped. It is always Valid. + currentPrefs ipn.PrefsView + // auditLoggers are registered [AuditLogProvider]s. // Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action // is about to be performed. If an audit logger returns an error, the action is denied. @@ -108,11 +115,12 @@ type ExtensionHost struct { backgroundProfileResolvers set.HandleSet[ipnext.ProfileResolver] // newControlClientCbs are the functions to be called when a new control client is created. newControlClientCbs set.HandleSet[ipnext.NewControlClientCallback] - // profileChangeCbs are the callbacks to be invoked when the current login profile changes, - // either because of a profile switch, or because the profile information was updated - // by [LocalBackend.SetControlClientStatus], including when the profile is first populated - // and persisted. - profileChangeCbs set.HandleSet[ipnext.ProfileChangeCallback] + // profileStateChangeCbs are callbacks that are invoked when the current login profile + // or its [ipn.Prefs] change, after those changes have been made. The current login profile + // may be changed either because of a profile switch, or because the profile information + // was updated by [LocalBackend.SetControlClientStatus], including when the profile + // is first populated and persisted. + profileStateChangeCbs set.HandleSet[ipnext.ProfileStateChangeCallback] } // Backend is a subset of [LocalBackend] methods that are used by [ExtensionHost]. @@ -133,6 +141,10 @@ func NewExtensionHost(logf logger.Logf, sys *tsd.System, b Backend, overrideExts host := &ExtensionHost{ logf: logger.WithPrefix(logf, "ipnext: "), workQueue: &execqueue.ExecQueue{}, + // The host starts with an empty profile and default prefs. + // We'll update them once [profileManager] notifies us of the initial profile. + currentProfile: zeroProfile, + currentPrefs: defaultPrefs, } // All operations on the backend must be executed asynchronously by the work queue. @@ -231,7 +243,6 @@ func (h *ExtensionHost) init() { f(b) } }) - } // Extensions implements [ipnext.Host]. @@ -295,6 +306,22 @@ func (h *ExtensionHost) Profiles() ipnext.ProfileServices { return h } +// CurrentProfileState implements [ipnext.ProfileServices]. +func (h *ExtensionHost) CurrentProfileState() (ipn.LoginProfileView, ipn.PrefsView) { + if h == nil { + return zeroProfile, defaultPrefs + } + h.mu.Lock() + defer h.mu.Unlock() + return h.currentProfile, h.currentPrefs +} + +// CurrentPrefs implements [ipnext.ProfileServices]. +func (h *ExtensionHost) CurrentPrefs() ipn.PrefsView { + _, prefs := h.CurrentProfileState() + return prefs +} + // SwitchToBestProfileAsync implements [ipnext.ProfileServices]. func (h *ExtensionHost) SwitchToBestProfileAsync(reason string) { if h == nil { @@ -305,8 +332,8 @@ func (h *ExtensionHost) SwitchToBestProfileAsync(reason string) { }) } -// RegisterProfileChangeCallback implements [ipnext.ProfileServices]. -func (h *ExtensionHost) RegisterProfileChangeCallback(cb ipnext.ProfileChangeCallback) (unregister func()) { +// RegisterProfileStateChangeCallback implements [ipnext.ProfileServices]. +func (h *ExtensionHost) RegisterProfileStateChangeCallback(cb ipnext.ProfileStateChangeCallback) (unregister func()) { if h == nil { return func() {} } @@ -315,31 +342,60 @@ func (h *ExtensionHost) RegisterProfileChangeCallback(cb ipnext.ProfileChangeCal } h.mu.Lock() defer h.mu.Unlock() - handle := h.profileChangeCbs.Add(cb) + handle := h.profileStateChangeCbs.Add(cb) return func() { h.mu.Lock() defer h.mu.Unlock() - delete(h.profileChangeCbs, handle) + delete(h.profileStateChangeCbs, handle) } } -// NotifyProfileChange invokes registered profile change callbacks. -// It strips private keys from the [ipn.Prefs] before passing it to the callbacks. +// NotifyProfileChange invokes registered profile state change callbacks +// and updates the current profile and prefs in the host. +// It strips private keys from the [ipn.Prefs] before preserving +// or passing them to the callbacks. func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { if h == nil { return } h.mu.Lock() - cbs := collectValues(h.profileChangeCbs) + // Strip private keys from the prefs before preserving or passing them to the callbacks. + // Extensions should not need them (unless proven otherwise in the future), + // and this is a good way to ensure that they won't accidentally leak them. + prefs = stripKeysFromPrefs(prefs) + // Update the current profile and prefs in the host, + // so we can provide them to the extensions later if they ask. + h.currentPrefs = prefs + h.currentProfile = profile + // Get the callbacks to be invoked. + cbs := collectValues(h.profileStateChangeCbs) h.mu.Unlock() - if cbs != nil { - // Strip private keys from the prefs before passing it to the callbacks. - // Extensions should not need it (unless proven otherwise in the future), - // and this is a good way to ensure that they won't accidentally leak them. - prefs = stripKeysFromPrefs(prefs) - for _, cb := range cbs { - cb(profile, prefs, sameNode) - } + for _, cb := range cbs { + cb(profile, prefs, sameNode) + } +} + +// NotifyProfilePrefsChanged invokes registered profile state change callbacks, +// and updates the current profile and prefs in the host. +// It strips private keys from the [ipn.Prefs] before preserving or using them. +func (h *ExtensionHost) NotifyProfilePrefsChanged(profile ipn.LoginProfileView, oldPrefs, newPrefs ipn.PrefsView) { + if h == nil { + return + } + h.mu.Lock() + // Strip private keys from the prefs before preserving or passing them to the callbacks. + // Extensions should not need them (unless proven otherwise in the future), + // and this is a good way to ensure that they won't accidentally leak them. + newPrefs = stripKeysFromPrefs(newPrefs) + // Update the current profile and prefs in the host, + // so we can provide them to the extensions later if they ask. + h.currentPrefs = newPrefs + h.currentProfile = profile + // Get the callbacks to be invoked. + stateCbs := collectValues(h.profileStateChangeCbs) + h.mu.Unlock() + for _, cb := range stateCbs { + cb(profile, newPrefs, true) } } @@ -410,7 +466,7 @@ func (h *ExtensionHost) RegisterControlClientCallback(cb ipnext.NewControlClient // NotifyNewControlClient invokes all registered control client callbacks. // It returns callbacks to be executed when the control client shuts down. -func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile ipn.LoginProfileView, prefs ipn.PrefsView) (ccShutdownCbs []func()) { +func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile ipn.LoginProfileView) (ccShutdownCbs []func()) { if h == nil { return nil } @@ -420,7 +476,7 @@ func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile if len(cbs) > 0 { ccShutdownCbs = make([]func(), 0, len(cbs)) for _, cb := range cbs { - if shutdown := cb(cc, profile, prefs); shutdown != nil { + if shutdown := cb(cc, profile); shutdown != nil { ccShutdownCbs = append(ccShutdownCbs, shutdown) } } diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index cefe9339d..ced5867e7 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -32,6 +32,11 @@ import ( "tailscale.com/util/must" ) +// defaultCmpOpts are the default options used for deepcmp comparisons in tests. +var defaultCmpOpts = []deepcmp.Option{ + cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), +} + // TestExtensionInitShutdown tests that [ExtensionHost] correctly initializes // and shuts down extensions. func TestExtensionInitShutdown(t *testing.T) { @@ -508,56 +513,63 @@ func TestExtensionHostEnqueueBackendOperation(t *testing.T) { } } -// TestExtensionHostProfileChangeCallback verifies that [ExtensionHost] correctly handles the registration, -// invocation, and unregistration of profile change callbacks. It also checks that the callbacks are called -// with the correct arguments and that any private keys are stripped from [ipn.Prefs] before being passed to the callback. -func TestExtensionHostProfileChangeCallback(t *testing.T) { +// TestExtensionHostProfileStateChangeCallback verifies that [ExtensionHost] correctly handles the registration, +// invocation, and unregistration of profile state change callbacks. This includes callbacks triggered by profile changes +// and by changes to the profile's [ipn.Prefs]. It also checks that the callbacks are called with the correct arguments +// and that any private keys are stripped from [ipn.Prefs] before being passed to the callback. +func TestExtensionHostProfileStateChangeCallback(t *testing.T) { t.Parallel() - type profileChange struct { + type stateChange struct { Profile *ipn.LoginProfile Prefs *ipn.Prefs SameNode bool } - // newProfileChange creates a new profile change with deep copies of the profile and prefs. - newProfileChange := func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) profileChange { - return profileChange{ + type prefsChange struct { + Profile *ipn.LoginProfile + Old, New *ipn.Prefs + } + + // newStateChange creates a new [stateChange] with deep copies of the profile and prefs. + newStateChange := func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) stateChange { + return stateChange{ Profile: profile.AsStruct(), Prefs: prefs.AsStruct(), SameNode: sameNode, } } - // makeProfileChangeAppender returns a callback that appends profile changes to the extension's state. - makeProfileChangeAppender := func(e *testExtension) ipnext.ProfileChangeCallback { + // makeStateChangeAppender returns a callback that appends profile state changes to the extension's state. + makeStateChangeAppender := func(e *testExtension) ipnext.ProfileStateChangeCallback { return func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { - UpdateExtState(e, "changes", func(changes []profileChange) []profileChange { - return append(changes, newProfileChange(profile, prefs, sameNode)) + UpdateExtState(e, "changes", func(changes []stateChange) []stateChange { + return append(changes, newStateChange(profile, prefs, sameNode)) }) } } - // getProfileChanges returns the profile changes stored in the extension's state. - getProfileChanges := func(e *testExtension) []profileChange { - changes, _ := GetExtStateOk[[]profileChange](e, "changes") + // getStateChanges returns the profile state changes stored in the extension's state. + getStateChanges := func(e *testExtension) []stateChange { + changes, _ := GetExtStateOk[[]stateChange](e, "changes") return changes } tests := []struct { - name string - ext *testExtension - calls []profileChange - wantCalls []profileChange + name string + ext *testExtension + stateCalls []stateChange + prefsCalls []prefsChange + wantChanges []stateChange }{ { // Register the callback for the lifetime of the extension. name: "Register/Lifetime", ext: &testExtension{}, - calls: []profileChange{ + stateCalls: []stateChange{ {Profile: &ipn.LoginProfile{ID: "profile-1"}}, {Profile: &ipn.LoginProfile{ID: "profile-2"}}, {Profile: &ipn.LoginProfile{ID: "profile-3"}}, {Profile: &ipn.LoginProfile{ID: "profile-3"}, SameNode: true}, }, - wantCalls: []profileChange{ // all calls are received by the callback + wantChanges: []stateChange{ // all calls are received by the callback {Profile: &ipn.LoginProfile{ID: "profile-1"}}, {Profile: &ipn.LoginProfile{ID: "profile-2"}}, {Profile: &ipn.LoginProfile{ID: "profile-3"}}, @@ -572,19 +584,19 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { InitHook: func(e *testExtension) error { var unregister func() handler := func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { - makeProfileChangeAppender(e)(profile, prefs, sameNode) + makeStateChangeAppender(e)(profile, prefs, sameNode) unregister() } - unregister = e.host.Profiles().RegisterProfileChangeCallback(handler) + unregister = e.host.Profiles().RegisterProfileStateChangeCallback(handler) return nil }, }, - calls: []profileChange{ + stateCalls: []stateChange{ {Profile: &ipn.LoginProfile{ID: "profile-1"}}, {Profile: &ipn.LoginProfile{ID: "profile-2"}}, {Profile: &ipn.LoginProfile{ID: "profile-3"}}, }, - wantCalls: []profileChange{ // only the first call is received by the callback + wantChanges: []stateChange{ // only the first call is received by the callback {Profile: &ipn.LoginProfile{ID: "profile-1"}}, }, }, @@ -592,7 +604,7 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { // Ensure that ipn.Prefs are passed to the callback. name: "CheckPrefs", ext: &testExtension{}, - calls: []profileChange{{ + stateCalls: []stateChange{{ Profile: &ipn.LoginProfile{ID: "profile-1"}, Prefs: &ipn.Prefs{ WantRunning: true, @@ -603,7 +615,7 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { }, }, }}, - wantCalls: []profileChange{{ + wantChanges: []stateChange{{ Profile: &ipn.LoginProfile{ID: "profile-1"}, Prefs: &ipn.Prefs{ WantRunning: true, @@ -619,7 +631,7 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { // Ensure that private keys are stripped from persist.Persist shared with extensions. name: "StripPrivateKeys", ext: &testExtension{}, - calls: []profileChange{{ + stateCalls: []stateChange{{ Profile: &ipn.LoginProfile{ID: "profile-1"}, Prefs: &ipn.Prefs{ Persist: &persist.Persist{ @@ -636,7 +648,7 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { }, }, }}, - wantCalls: []profileChange{{ + wantChanges: []stateChange{{ Profile: &ipn.LoginProfile{ID: "profile-1"}, Prefs: &ipn.Prefs{ Persist: &persist.Persist{ @@ -654,6 +666,100 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { }, }}, }, + { + // Ensure that profile state callbacks are also invoked when prefs (rather than profile) change. + name: "PrefsChange", + ext: &testExtension{}, + prefsCalls: []prefsChange{ + { + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Old: &ipn.Prefs{WantRunning: false, LoggedOut: true}, + New: &ipn.Prefs{WantRunning: true, LoggedOut: false}, + }, + { + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Old: &ipn.Prefs{AdvertiseRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.1.0/24")}}, + New: &ipn.Prefs{AdvertiseRoutes: []netip.Prefix{netip.MustParsePrefix("10.10.10.0/24")}}, + }, + }, + wantChanges: []stateChange{ + { + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{WantRunning: true, LoggedOut: false}, + SameNode: true, // must be true for prefs changes + }, + { + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{AdvertiseRoutes: []netip.Prefix{netip.MustParsePrefix("10.10.10.0/24")}}, + SameNode: true, // must be true for prefs changes + }, + }, + }, + { + // Ensure that private keys are stripped from prefs when state change callback + // is invoked by prefs change. + name: "PrefsChange/StripPrivateKeys", + ext: &testExtension{}, + prefsCalls: []prefsChange{ + { + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Old: &ipn.Prefs{ + WantRunning: false, + LoggedOut: true, + Persist: &persist.Persist{ + NodeID: "12345", + PrivateNodeKey: key.NewNode(), + OldPrivateNodeKey: key.NewNode(), + NetworkLockKey: key.NewNLPrivate(), + UserProfile: tailcfg.UserProfile{ + ID: 12345, + LoginName: "test@example.com", + DisplayName: "Test User", + ProfilePicURL: "https://example.com/profile.png", + }, + }, + }, + New: &ipn.Prefs{ + WantRunning: true, + LoggedOut: false, + Persist: &persist.Persist{ + NodeID: "12345", + PrivateNodeKey: key.NewNode(), + OldPrivateNodeKey: key.NewNode(), + NetworkLockKey: key.NewNLPrivate(), + UserProfile: tailcfg.UserProfile{ + ID: 12345, + LoginName: "test@example.com", + DisplayName: "Test User", + ProfilePicURL: "https://example.com/profile.png", + }, + }, + }, + }, + }, + wantChanges: []stateChange{ + { + Profile: &ipn.LoginProfile{ID: "profile-1"}, + Prefs: &ipn.Prefs{ + WantRunning: true, + LoggedOut: false, + Persist: &persist.Persist{ + NodeID: "12345", + PrivateNodeKey: key.NodePrivate{}, // stripped + OldPrivateNodeKey: key.NodePrivate{}, // stripped + NetworkLockKey: key.NLPrivate{}, // stripped + UserProfile: tailcfg.UserProfile{ + ID: 12345, + LoginName: "test@example.com", + DisplayName: "Test User", + ProfilePicURL: "https://example.com/profile.png", + }, + }, + }, + SameNode: true, // must be true for prefs changes + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -663,26 +769,60 @@ func TestExtensionHostProfileChangeCallback(t *testing.T) { if tt.ext.InitHook == nil { tt.ext.InitHook = func(e *testExtension) error { // Create and register the callback on init. - handler := makeProfileChangeAppender(e) - e.Cleanup(e.host.Profiles().RegisterProfileChangeCallback(handler)) + handler := makeStateChangeAppender(e) + e.Cleanup(e.host.Profiles().RegisterProfileStateChangeCallback(handler)) return nil } } h := newExtensionHostForTest(t, &testBackend{}, true, tt.ext) - for _, call := range tt.calls { + for _, call := range tt.stateCalls { h.NotifyProfileChange(call.Profile.View(), call.Prefs.View(), call.SameNode) } - opts := []deepcmp.Option{ - cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), + for _, call := range tt.prefsCalls { + h.NotifyProfilePrefsChanged(call.Profile.View(), call.Old.View(), call.New.View()) } - if diff := deepcmp.Diff(tt.wantCalls, getProfileChanges(tt.ext), opts...); diff != "" { - t.Errorf("ProfileChange callbacks: (-want +got): %v", diff) + if diff := deepcmp.Diff(tt.wantChanges, getStateChanges(tt.ext), defaultCmpOpts...); diff != "" { + t.Errorf("StateChange callbacks: (-want +got): %v", diff) } }) } } +// TestCurrentProfileState tests that the current profile and prefs are correctly +// initialized and updated when the host is notified of changes. +func TestCurrentProfileState(t *testing.T) { + h := newExtensionHostForTest[ipnext.Extension](t, &testBackend{}, false) + + // The initial profile and prefs should be valid and set to the default values. + gotProfile, gotPrefs := h.Profiles().CurrentProfileState() + checkViewsEqual(t, "Initial profile (from state)", gotProfile, zeroProfile) + checkViewsEqual(t, "Initial prefs (from state)", gotPrefs, defaultPrefs) + gotPrefs = h.Profiles().CurrentPrefs() // same when we only ask for prefs + checkViewsEqual(t, "Initial prefs (direct)", gotPrefs, defaultPrefs) + + // Create a new profile and prefs, and notify the host of the change. + profile := &ipn.LoginProfile{ID: "profile-A"} + prefsV1 := &ipn.Prefs{ProfileName: "Prefs V1", WantRunning: true} + h.NotifyProfileChange(profile.View(), prefsV1.View(), false) + // The current profile and prefs should be updated. + gotProfile, gotPrefs = h.Profiles().CurrentProfileState() + checkViewsEqual(t, "Changed profile (from state)", gotProfile, profile.View()) + checkViewsEqual(t, "New prefs (from state)", gotPrefs, prefsV1.View()) + gotPrefs = h.Profiles().CurrentPrefs() + checkViewsEqual(t, "New prefs (direct)", gotPrefs, prefsV1.View()) + + // Notify the host of a change to the profile's prefs. + prefsV2 := &ipn.Prefs{ProfileName: "Prefs V2", WantRunning: false} + h.NotifyProfilePrefsChanged(profile.View(), prefsV1.View(), prefsV2.View()) + // The current prefs should be updated. + gotProfile, gotPrefs = h.Profiles().CurrentProfileState() + checkViewsEqual(t, "Unchanged profile (from state)", gotProfile, profile.View()) + checkViewsEqual(t, "Changed (from state)", gotPrefs, prefsV2.View()) + gotPrefs = h.Profiles().CurrentPrefs() + checkViewsEqual(t, "Changed prefs (direct)", gotPrefs, prefsV2.View()) +} + // TestBackgroundProfileResolver tests that the background profile resolvers // are correctly registered, unregistered and invoked by the [ExtensionHost]. func TestBackgroundProfileResolver(t *testing.T) { @@ -1231,3 +1371,29 @@ func (b *testBackend) SwitchToBestProfile(reason string) { b.switchToBestProfileHook(reason) } } + +// equatableView is an interface implemented by views +// that can be compared for equality. +type equatableView[T any] interface { + Valid() bool + Equals(other T) bool +} + +// checkViewsEqual checks that the two views are equal +// and fails the test if they are not. The prefix is used +// to format the error message. +func checkViewsEqual[T equatableView[T]](t *testing.T, prefix string, got, want T) { + t.Helper() + switch { + case got.Equals(want): + return + case got.Valid() && want.Valid(): + t.Errorf("%s: got %v; want %v", prefix, got, want) + case got.Valid() && !want.Valid(): + t.Errorf("%s: got %v; want invalid", prefix, got) + case !got.Valid() && want.Valid(): + t.Errorf("%s: got invalid; want %v", prefix, want) + default: + panic("unreachable") + } +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e21403fbe..45daefda8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -530,6 +530,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo if b.extHost, err = NewExtensionHost(logf, sys, b); err != nil { return nil, fmt.Errorf("failed to create extension host: %w", err) } + b.pm.SetExtensionHost(b.extHost) if b.unregisterSysPolicyWatch, err = b.registerSysPolicyWatch(); err != nil { return nil, err @@ -1653,8 +1654,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Theoretically, a completed login could also result in a switch to a different existing // profile representing a different node (see tailscale/tailscale#8816). // - // Let's check if the current profile has changed, and invoke all registered [ProfileChangeCallback] - // if necessary. + // Let's check if the current profile has changed, and invoke all registered + // [ipnext.ProfileStateChangeCallback] if necessary. if cp := b.pm.CurrentProfile(); *cp.AsStruct() != *profile.AsStruct() { // If the profile ID was empty before SetPrefs, it's a new profile // and the user has just completed a login for the first time. @@ -2408,7 +2409,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if err != nil { return err } - ccShutdownCbs = b.extHost.NotifyNewControlClient(cc, b.pm.CurrentProfile(), prefs) + ccShutdownCbs = b.extHost.NotifyNewControlClient(cc, b.pm.CurrentProfile()) b.setControlClientLocked(cc) endpoints := b.endpoints diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 057fe2aae..eb01da705 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -42,6 +42,19 @@ type profileManager struct { knownProfiles map[ipn.ProfileID]ipn.LoginProfileView // always non-nil currentProfile ipn.LoginProfileView // always Valid. prefs ipn.PrefsView // always Valid. + + // extHost is the bridge between [profileManager] and the registered [ipnext.Extension]s. + // It may be nil in tests. A nil pointer is a valid, no-op host. + extHost *ExtensionHost +} + +// SetExtensionHost sets the [ExtensionHost] for the [profileManager]. +// The specified host will be notified about profile and prefs changes +// and will immediately be notified about the current profile and prefs. +// A nil host is a valid, no-op host. +func (pm *profileManager) SetExtensionHost(host *ExtensionHost) { + pm.extHost = host + host.NotifyProfileChange(pm.currentProfile, pm.prefs, false) } func (pm *profileManager) dlogf(format string, args ...any) { @@ -321,7 +334,6 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) return err } return pm.setProfileAsUserDefault(cp) - } // setProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile], @@ -419,7 +431,27 @@ func newUnusedID(knownProfiles map[ipn.ProfileID]ipn.LoginProfileView) (ipn.Prof func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileView, clonedPrefs ipn.PrefsView) error { isCurrentProfile := pm.currentProfile == profile if isCurrentProfile { + oldPrefs := pm.prefs pm.prefs = clonedPrefs + + // Sadly, profile prefs can be changed in multiple ways. + // It's pretty chaotic, and in many cases callers use + // unexported methods of the profile manager instead of + // going through [LocalBackend.setPrefsLockedOnEntry] + // or at least using [profileManager.SetPrefs]. + // + // While we should definitely clean this up to improve + // the overall structure of how prefs are set, which would + // also address current and future conflicts, such as + // competing features changing the same prefs, this method + // is currently the central place where we can detect all + // changes to the current profile's prefs. + // + // That said, regardless of the cleanup, we might want + // to keep the profileManager responsible for invoking + // profile- and prefs-related callbacks. + pm.extHost.NotifyProfilePrefsChanged(pm.currentProfile, oldPrefs, clonedPrefs) + pm.updateHealth() } if profile.Key() != "" { @@ -705,6 +737,9 @@ func (pm *profileManager) SwitchToNewProfileForUser(uid ipn.WindowsUserID) { pm.SwitchToProfile(pm.NewProfileForUser(uid)) } +// zeroProfile is a read-only view of a new, empty profile that is not persisted to the store. +var zeroProfile = (&ipn.LoginProfile{}).View() + // NewProfileForUser creates a new profile for the specified user and returns a read-only view of it. // It neither switches to the new profile nor persists it to the store. func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) ipn.LoginProfileView { From 34b97a3c75bde9342395e5d17395b2ab8382ac17 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 15 Apr 2025 14:01:53 -0700 Subject: [PATCH 0756/1708] ipn/ipnlocal: fix TestOnTailnetDefaultAutoUpdate on macOS (#15697) https://github.com/tailscale/tailscale/pull/15395 changed the logic to skip `EditPrefs` when the platform doesn't support auto-updates. But the old logic would only fail `EditPrefs` if the auto-update value was `true`. If it was `false`, `EditPrefs` would succeed and store `false` in prefs. The new logic will keep the value `unset` even if the tailnet default is `false`. Fixes #15691 Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/local_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 84e7cc209..e2a03dcd0 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -15,7 +15,6 @@ import ( "os" "path/filepath" "reflect" - "runtime" "slices" "strings" "sync" @@ -2597,9 +2596,6 @@ func TestPreferencePolicyInfo(t *testing.T) { } func TestOnTailnetDefaultAutoUpdate(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("test known broken on macOS; see https://github.com/tailscale/tailscale/issues/15691") - } tests := []struct { before, after opt.Bool container opt.Bool @@ -2669,7 +2665,7 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { // On platforms that don't support auto-update we can never // transition to auto-updates being enabled. The value should // remain unchanged after onTailnetDefaultAutoUpdate. - if !clientupdate.CanAutoUpdate() && want.EqualBool(true) { + if !clientupdate.CanAutoUpdate() { want = tt.before } if got := b.pm.CurrentPrefs().AutoUpdate().Apply; got != want { From 450bcbcb08db6b1b319623dda6b165082ec26a97 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 15 Apr 2025 16:51:21 -0600 Subject: [PATCH 0757/1708] node.rev: bump to latest 22.x LTS release (#15652) Bump to latest 22.x LTS release for node as the 18.x line is going EOL this month. Updates https://github.com/tailscale/corp/issues/27737 Signed-off-by: Mario Minardi --- client/web/package.json | 4 ++-- client/web/yarn.lock | 18 +++++++++--------- tool/node.rev | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/client/web/package.json b/client/web/package.json index 4b3afb1df..c45f7d6a8 100644 --- a/client/web/package.json +++ b/client/web/package.json @@ -3,7 +3,7 @@ "version": "0.0.1", "license": "BSD-3-Clause", "engines": { - "node": "18.20.4", + "node": "22.14.0", "yarn": "1.22.19" }, "type": "module", @@ -20,7 +20,7 @@ "zustand": "^4.4.7" }, "devDependencies": { - "@types/node": "^18.16.1", + "@types/node": "^22.14.0", "@types/react": "^18.0.20", "@types/react-dom": "^18.0.6", "@vitejs/plugin-react-swc": "^3.6.0", diff --git a/client/web/yarn.lock b/client/web/yarn.lock index 2c8fca5e5..a9b2ae876 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -1880,12 +1880,12 @@ resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== -"@types/node@^18.16.1": - version "18.19.18" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.18.tgz#7526471b28828d1fef1f7e4960fb9477e6e4369c" - integrity sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg== +"@types/node@^22.14.0": + version "22.14.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.14.0.tgz#d3bfa3936fef0dbacd79ea3eb17d521c628bb47e" + integrity sha512-Kmpl+z84ILoG+3T/zQFyAJsU6EPTmOCj8/2+83fSN6djd6I4o7uOuGIH6vq3PrjY5BGitSbFuMN18j3iknubbA== dependencies: - undici-types "~5.26.4" + undici-types "~6.21.0" "@types/parse-json@^4.0.0": version "4.0.2" @@ -5124,10 +5124,10 @@ unbox-primitive@^1.0.2: has-symbols "^1.0.3" which-boxed-primitive "^1.0.2" -undici-types@~5.26.4: - version "5.26.5" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" - integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== +undici-types@~6.21.0: + version "6.21.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb" + integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ== unicode-canonical-property-names-ecmascript@^2.0.0: version "2.0.0" diff --git a/tool/node.rev b/tool/node.rev index 17719ce25..7d41c735d 100644 --- a/tool/node.rev +++ b/tool/node.rev @@ -1 +1 @@ -18.20.4 +22.14.0 From 37f5fd2ec14b1c526b0575285d8451f58f1ccc21 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Apr 2025 09:50:48 -0700 Subject: [PATCH 0758/1708] feature/{condregister,relayserver}: implement the skeleton for the relayserver feature (#15699) This feature is "registered" as an ipnlocal.Extension, and conditionally linked depending on GOOS and ts_omit_relayserver build tag. The feature is not linked on iOS in attempt to limit the impact to binary size and resulting effect of pushing up against NetworkExtension limits. Eventually we will want to support the relay server on iOS, specifically on the Apple TV. Apple TVs are well-fitted to act as underlay relay servers as they are effectively always-on servers. This skeleton begins to tie a PeerAPI endpoint to a net/udprelay.Server. The PeerAPI endpoint is currently no-op as extension.shouldRunRelayServer() always returns false. Follow-up commits will implement extension.shouldRunRelayServer(). Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- build_dist.sh | 2 +- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscaled/depaware.txt | 2 + feature/condregister/maybe_relayserver.go | 8 ++ feature/relayserver/relayserver.go | 154 ++++++++++++++++++++++ 5 files changed, 168 insertions(+), 2 deletions(-) create mode 100644 feature/condregister/maybe_relayserver.go create mode 100644 feature/relayserver/relayserver.go diff --git a/build_dist.sh b/build_dist.sh index c01670398..5b1ca75b2 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 416265188..7af6a7c10 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -806,6 +806,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/tsnet + tailscale.com/feature/relayserver from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ @@ -816,7 +817,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet+ @@ -883,6 +884,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ + tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 9cdebbae1..28a597065 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -264,6 +264,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/relayserver from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ @@ -334,6 +335,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/feature/condregister/maybe_relayserver.go b/feature/condregister/maybe_relayserver.go new file mode 100644 index 000000000..3360dd062 --- /dev/null +++ b/feature/condregister/maybe_relayserver.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_relayserver + +package condregister + +import _ "tailscale.com/feature/relayserver" diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go new file mode 100644 index 000000000..9cf776661 --- /dev/null +++ b/feature/relayserver/relayserver.go @@ -0,0 +1,154 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package relayserver registers the relay server feature and implements its +// associated ipnext.Extension. +package relayserver + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "net/netip" + "sync" + + "tailscale.com/feature" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/udprelay" + "tailscale.com/tailcfg" + "tailscale.com/tsd" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/util/httpm" +) + +// featureName is the name of the feature implemented by this package. +// It is also the [extension] name and the log prefix. +const featureName = "relayserver" + +func init() { + feature.Register(featureName) + ipnext.RegisterExtension(featureName, newExtension) + ipnlocal.RegisterPeerAPIHandler("/v0/relay/endpoint", handlePeerAPIRelayAllocateEndpoint) +} + +// newExtension is an [ipnext.NewExtensionFn] that creates a new relay server +// extension. It is registered with [ipnext.RegisterExtension] if the package is +// imported. +func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { + return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil +} + +// extension is an [ipnext.Extension] managing the relay server on platforms +// that import this package. +type extension struct { + logf logger.Logf + + mu sync.Mutex // guards the following fields + shutdown bool + port int + server *udprelay.Server // lazily initialized +} + +// Name implements [ipnext.Extension]. +func (e *extension) Name() string { + return featureName +} + +// Init implements [ipnext.Extension] by registering callbacks and providers +// for the duration of the extension's lifetime. +func (e *extension) Init(_ ipnext.Host) error { + return nil +} + +// Shutdown implements [ipnlocal.Extension]. +func (e *extension) Shutdown() error { + e.mu.Lock() + defer e.mu.Unlock() + e.shutdown = true + if e.server != nil { + e.server.Close() + e.server = nil + } + return nil +} + +func (e *extension) shouldRunRelayServer() bool { + // TODO(jwhited): consider: + // 1. tailcfg.NodeAttrRelayServer + // 2. ipn.Prefs.RelayServerPort + // 3. envknob.UseWIPCode() + // 4. e.shutdown + return false +} + +func (e *extension) relayServerOrInit() (*udprelay.Server, error) { + e.mu.Lock() + defer e.mu.Unlock() + if e.shutdown { + return nil, errors.New("relay server is shutdown") + } + if e.server != nil { + return e.server, nil + } + var err error + e.server, _, err = udprelay.NewServer(e.port, []netip.Addr{netip.MustParseAddr("127.0.0.1")}) + if err != nil { + return nil, err + } + return e.server, nil +} + +func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + // TODO(jwhited): log errors + e, ok := h.LocalBackend().FindExtensionByName(featureName).(*extension) + if !ok { + http.Error(w, "relay failed to initialize", http.StatusServiceUnavailable) + return + } + + if !e.shouldRunRelayServer() { + http.Error(w, "relay not enabled", http.StatusNotFound) + return + } + + if !h.PeerCaps().HasCapability(tailcfg.PeerCapabilityRelay) { + http.Error(w, "relay not permitted", http.StatusForbidden) + return + } + + if r.Method != httpm.POST { + http.Error(w, "only POST method is allowed", http.StatusMethodNotAllowed) + return + } + + var allocateEndpointReq struct { + DiscoKeys []key.DiscoPublic + } + err := json.NewDecoder(io.LimitReader(r.Body, 512)).Decode(&allocateEndpointReq) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if len(allocateEndpointReq.DiscoKeys) != 2 { + http.Error(w, "2 disco public keys must be supplied", http.StatusBadRequest) + return + } + + rs, err := e.relayServerOrInit() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + err = json.NewEncoder(w).Encode(&ep) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} From a3cc7123ff04a8d9dad438280a2287dfeaedf27d Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 19 Mar 2025 08:34:27 -0700 Subject: [PATCH 0759/1708] tsweb: don't hook up pprof handlers in javascript builds Updates #15160 Signed-off-by: David Anderson --- tsweb/debug.go | 12 +----------- tsweb/pprof_default.go | 24 ++++++++++++++++++++++++ tsweb/pprof_js.go | 8 ++++++++ tsweb/tsweb.go | 1 - 4 files changed, 33 insertions(+), 12 deletions(-) create mode 100644 tsweb/pprof_default.go create mode 100644 tsweb/pprof_js.go diff --git a/tsweb/debug.go b/tsweb/debug.go index ac1981999..4c0fabaff 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -9,7 +9,6 @@ import ( "html" "io" "net/http" - "net/http/pprof" "net/url" "os" "runtime" @@ -64,16 +63,7 @@ func Debugger(mux *http.ServeMux) *DebugHandler { ret.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(varz.Handler)) } - // pprof.Index serves everything that runtime/pprof.Lookup finds: - // goroutine, threadcreate, heap, allocs, block, mutex - ret.Handle("pprof/", "pprof (index)", http.HandlerFunc(pprof.Index)) - // But register the other ones from net/http/pprof directly: - ret.HandleSilent("pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) - ret.HandleSilent("pprof/profile", http.HandlerFunc(pprof.Profile)) - ret.HandleSilent("pprof/symbol", http.HandlerFunc(pprof.Symbol)) - ret.HandleSilent("pprof/trace", http.HandlerFunc(pprof.Trace)) - ret.URL("/debug/pprof/goroutine?debug=1", "Goroutines (collapsed)") - ret.URL("/debug/pprof/goroutine?debug=2", "Goroutines (full)") + addProfilingHandlers(ret) ret.Handle("gc", "force GC", http.HandlerFunc(gcHandler)) hostname, err := os.Hostname() if err == nil { diff --git a/tsweb/pprof_default.go b/tsweb/pprof_default.go new file mode 100644 index 000000000..4fb417c0e --- /dev/null +++ b/tsweb/pprof_default.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js + +package tsweb + +import ( + "net/http" + "net/http/pprof" +) + +func addProfilingHandlers(d *DebugHandler) { + // pprof.Index serves everything that runtime/pprof.Lookup finds: + // goroutine, threadcreate, heap, allocs, block, mutex + d.Handle("pprof/", "pprof (index)", http.HandlerFunc(pprof.Index)) + // But register the other ones from net/http/pprof directly: + d.HandleSilent("pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) + d.HandleSilent("pprof/profile", http.HandlerFunc(pprof.Profile)) + d.HandleSilent("pprof/symbol", http.HandlerFunc(pprof.Symbol)) + d.HandleSilent("pprof/trace", http.HandlerFunc(pprof.Trace)) + d.URL("/debug/pprof/goroutine?debug=1", "Goroutines (collapsed)") + d.URL("/debug/pprof/goroutine?debug=2", "Goroutines (full)") +} diff --git a/tsweb/pprof_js.go b/tsweb/pprof_js.go new file mode 100644 index 000000000..dedb5b9b4 --- /dev/null +++ b/tsweb/pprof_js.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsweb + +func addProfilingHandlers(d *DebugHandler) { + // No pprof in js builds, pprof doesn't work and bloats the build. +} diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index 9ddb3fad5..119fed2e6 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -15,7 +15,6 @@ import ( "io" "net" "net/http" - _ "net/http/pprof" "net/netip" "net/url" "os" From 6b8bbb4c3756493bd64325637ab8e4099869dd57 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 09:49:09 -0800 Subject: [PATCH 0760/1708] tsd: wire up the event bus to tailscaled Updates #15160 Signed-off-by: David Anderson --- cmd/k8s-operator/depaware.txt | 10 ++++++++-- cmd/tailscaled/depaware.txt | 8 +++++++- cmd/tailscaled/tailscaled.go | 5 +++++ tsd/tsd.go | 4 ++++ tstest/integration/tailscaled_deps_test_darwin.go | 1 + tstest/integration/tailscaled_deps_test_freebsd.go | 1 + tstest/integration/tailscaled_deps_test_linux.go | 1 + tstest/integration/tailscaled_deps_test_openbsd.go | 1 + tstest/integration/tailscaled_deps_test_windows.go | 1 + tsweb/pprof_default.go | 2 +- tsweb/pprof_js.go | 2 ++ 11 files changed, 32 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7af6a7c10..0f3d33214 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -82,6 +82,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus + github.com/coder/websocket from tailscale.com/util/eventbus + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -905,7 +909,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/derp+ - tailscale.com/tsweb/varz from tailscale.com/util/usermetric + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ @@ -934,6 +939,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/tsd tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ @@ -1151,7 +1157,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ hash/fnv from google.golang.org/protobuf/internal/detrand hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf + html/template from github.com/gorilla/csrf+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 28a597065..5a77bad72 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -81,6 +81,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/coder/websocket from tailscale.com/util/eventbus + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -357,6 +361,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ @@ -386,6 +391,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ tailscale.com/util/execqueue from tailscale.com/control/controlclient+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ @@ -591,7 +597,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf + html/template from github.com/gorilla/csrf+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 2d4aa4358..480acb9ca 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -62,6 +62,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/version" @@ -376,6 +377,10 @@ func run() (err error) { sys := new(tsd.System) + // Install an event bus as early as possible, so that it's + // available universally when setting up everything else. + sys.Set(eventbus.New()) + // Parse config, if specified, to fail early if it's invalid. var conf *conffile.Config if args.confFile != "" { diff --git a/tsd/tsd.go b/tsd/tsd.go index acd09560c..0d1f49809 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -32,6 +32,7 @@ import ( "tailscale.com/net/tstun" "tailscale.com/proxymap" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" "tailscale.com/util/usermetric" "tailscale.com/wgengine" "tailscale.com/wgengine/magicsock" @@ -40,6 +41,7 @@ import ( // System contains all the subsystems of a Tailscale node (tailscaled, etc.) type System struct { + Bus SubSystem[*eventbus.Bus] Dialer SubSystem[*tsdial.Dialer] DNSManager SubSystem[*dns.Manager] // can get its *resolver.Resolver from DNSManager.Resolver Engine SubSystem[wgengine.Engine] @@ -82,6 +84,8 @@ type NetstackImpl interface { // has already been set. func (s *System) Set(v any) { switch v := v.(type) { + case *eventbus.Bus: + s.Bus.Set(v) case *netmon.Monitor: s.NetMon.Set(v) case *dns.Manager: diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 30ce0892e..79e2e05a7 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -57,6 +57,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" diff --git a/tsweb/pprof_default.go b/tsweb/pprof_default.go index 4fb417c0e..7d22a6161 100644 --- a/tsweb/pprof_default.go +++ b/tsweb/pprof_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !wasm package tsweb diff --git a/tsweb/pprof_js.go b/tsweb/pprof_js.go index dedb5b9b4..1212b37e8 100644 --- a/tsweb/pprof_js.go +++ b/tsweb/pprof_js.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build js && wasm + package tsweb func addProfilingHandlers(d *DebugHandler) { From ffb22ee353d425bb7ba4a6b12739f469d23ad52c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 19 Mar 2025 09:47:06 -0700 Subject: [PATCH 0761/1708] all: construct new System values with an event bus pre-populated Although, at the moment, we do not yet require an event bus to be present, as we start to add more pieces we will want to ensure it is always available. Add a new constructor and replace existing uses of new(tsd.System) throughout. Update generated files for import changes. Updates #15160 Change-Id: Ie5460985571ade87b8eac8b416948c7f49f0f64b Signed-off-by: M. J. Fromberger --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/tailscaled.go | 6 ++---- cmd/tailscaled/tailscaled_windows.go | 2 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- ipn/ipnlocal/local_test.go | 7 +++---- ipn/ipnlocal/loglines_test.go | 2 +- ipn/ipnlocal/serve_test.go | 2 +- ipn/ipnlocal/state_test.go | 6 +++--- ipn/ipnserver/server_test.go | 2 +- ipn/localapi/localapi_test.go | 2 +- ssh/tailssh/tailssh_test.go | 2 +- tsd/tsd.go | 8 ++++++++ tsnet/tsnet.go | 3 ++- tstest/integration/tailscaled_deps_test_darwin.go | 1 - tstest/integration/tailscaled_deps_test_freebsd.go | 1 - tstest/integration/tailscaled_deps_test_linux.go | 1 - tstest/integration/tailscaled_deps_test_openbsd.go | 1 - tstest/integration/tailscaled_deps_test_windows.go | 1 - wgengine/netstack/netstack_test.go | 2 +- wgengine/userspace_ext_test.go | 2 +- 21 files changed, 29 insertions(+), 28 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 0f3d33214..cfdb08c20 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -939,7 +939,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/tsd + tailscale.com/util/eventbus from tailscale.com/tsd+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5a77bad72..4e6502b72 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -391,7 +391,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/eventbus from tailscale.com/tsd+ tailscale.com/util/execqueue from tailscale.com/control/controlclient+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 480acb9ca..5c483ab1f 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -62,7 +62,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" - "tailscale.com/util/eventbus" "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/version" @@ -375,11 +374,9 @@ var debugMux *http.ServeMux func run() (err error) { var logf logger.Logf = log.Printf - sys := new(tsd.System) - // Install an event bus as early as possible, so that it's // available universally when setting up everything else. - sys.Set(eventbus.New()) + sys := tsd.NewSystemWithEventBus() // Parse config, if specified, to fail early if it's invalid. var conf *conffile.Config @@ -718,6 +715,7 @@ var tstunNew = tstun.New func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack bool, err error) { conf := wgengine.Config{ + EventBus: sys.Bus.Get(), ListenPort: args.port, NetMon: sys.NetMon.Get(), HealthTracker: sys.HealthTracker(), diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 54ff2af14..4ba921e53 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -328,7 +328,7 @@ func beWindowsSubprocess() bool { log.Printf("Error pre-loading \"%s\": %v", fqWintunPath, err) } - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() netMon, err := netmon.New(log.Printf) if err != nil { log.Fatalf("Could not create netMon: %v", err) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index a7e3e506b..3d423d308 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -100,7 +100,7 @@ func newIPN(jsConfig js.Value) map[string]any { logtail := logtail.NewLogger(c, log.Printf) logf := logtail.Logf - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() sys.Set(store) dialer := &tsdial.Dialer{Logf: logf} eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index e2a03dcd0..596952a0d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -436,7 +436,7 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { } func newTestLocalBackend(t testing.TB) *LocalBackend { - return newTestLocalBackendWithSys(t, new(tsd.System)) + return newTestLocalBackendWithSys(t, tsd.NewSystemWithEventBus()) } // newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. @@ -4867,9 +4867,8 @@ func TestConfigFileReload(t *testing.T) { // Create backend with initial config tc.initial.Path = path tc.initial.Raw = initialJSON - sys := &tsd.System{ - InitialConfig: tc.initial, - } + sys := tsd.NewSystemWithEventBus() + sys.InitialConfig = tc.initial b := newTestLocalBackendWithSys(t, sys) // Update config file diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index f70987c0e..cfcd54c64 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -47,7 +47,7 @@ func TestLocalLogLines(t *testing.T) { idA := logid(0xaa) // set up a LocalBackend, super bare bones. No functional data. - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() store := new(mem.Store) sys.Set(store) e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 3c028c65e..78f1da42c 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -877,7 +877,7 @@ func newTestBackend(t *testing.T) *LocalBackend { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } - sys := &tsd.System{} + sys := tsd.NewSystemWithEventBus() e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 3c22b66be..a27ef9efe 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -323,7 +323,7 @@ func TestStateMachine(t *testing.T) { c := qt.New(t) logf := tstest.WhileTestRunningLogger(t) - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() store := new(testStateStorage) sys.Set(store) e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) @@ -962,7 +962,7 @@ func TestStateMachine(t *testing.T) { func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() sys.Set(new(mem.Store)) e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { @@ -1042,7 +1042,7 @@ func TestWGEngineStatusRace(t *testing.T) { t.Skip("test fails") c := qt.New(t) logf := tstest.WhileTestRunningLogger(t) - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() sys.Set(new(mem.Store)) eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set) diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index c51c2d4d1..fd2e53f3e 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -517,7 +517,7 @@ type newControlClientFn func(tb testing.TB, opts controlclient.Options) controlc func newLocalBackendWithTestControl(tb testing.TB, newControl newControlClientFn, enableLogging bool) *ipnlocal.LocalBackend { tb.Helper() - sys := &tsd.System{} + sys := tsd.NewSystemWithEventBus() store := &mem.Store{} sys.Set(store) diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index b7f0c416c..38394739e 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -336,7 +336,7 @@ func TestServeWatchIPNBus(t *testing.T) { func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { var logf logger.Logf = logger.Discard - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() store := new(mem.Store) sys.Set(store) eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 24f0e12a2..ec442a83b 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1037,7 +1037,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { var logf logger.Logf = t.Logf - sys := &tsd.System{} + sys := tsd.NewSystemWithEventBus() eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatal(err) diff --git a/tsd/tsd.go b/tsd/tsd.go index 0d1f49809..029c64fbb 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -72,6 +72,14 @@ type System struct { userMetricsRegistry usermetric.Registry } +// NewSystemWithEventBus constructs a new otherwise-empty system with a +// freshly-constructed event bus populated. +func NewSystemWithEventBus() *System { + sys := new(System) + sys.Set(eventbus.New()) + return sys +} + // NetstackImpl is the interface that *netstack.Impl implements. // It's an interface for circular dependency reasons: netstack.Impl // references LocalBackend, and LocalBackend has a tsd.System. diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 1e58b424b..5968defff 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -558,7 +558,7 @@ func (s *Server) start() (reterr error) { s.Logf(format, a...) } - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() s.sys = sys if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { return err @@ -572,6 +572,7 @@ func (s *Server) start() (reterr error) { s.dialer = &tsdial.Dialer{Logf: tsLogf} // mutated below (before used) eng, err := wgengine.NewUserspaceEngine(tsLogf, wgengine.Config{ + EventBus: sys.Bus.Get(), ListenPort: s.Port, NetMon: s.netMon, Dialer: s.dialer, diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 321ba2566..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -48,7 +48,6 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" - _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 321ba2566..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -48,7 +48,6 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" - _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 321ba2566..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -48,7 +48,6 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" - _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 321ba2566..470085f5e 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -48,7 +48,6 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" - _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 79e2e05a7..30ce0892e 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -57,7 +57,6 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" - _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 79a380e84..95c518b01 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -101,7 +101,7 @@ func getMemStats() (ms runtime.MemStats) { func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { tunDev := tstun.NewFake() - sys := &tsd.System{} + sys := tsd.NewSystemWithEventBus() sys.Set(new(mem.Store)) dialer := new(tsdial.Dialer) logf := tstest.WhileTestRunningLogger(tb) diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index cc29be234..6f5583a3a 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -66,7 +66,7 @@ func TestIsNetstackRouter(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sys := &tsd.System{} + sys := tsd.NewSystemWithEventBus() if tt.setNetstackRouter { sys.NetstackRouter.Set(true) } From bcd6a0d0ac32a45e344f87bae1af323f52ac4b50 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 19 Mar 2025 12:13:35 -0700 Subject: [PATCH 0762/1708] tsnet: shut down the event bus on Close Updates #15160 Change-Id: I29c8194b4b41e95848e5f160e9970db352588449 Signed-off-by: M. J. Fromberger --- tsnet/tsnet.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 5968defff..6d52b3062 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -435,8 +435,11 @@ func (s *Server) Close() error { for _, ln := range s.listeners { ln.closeLocked() } - wg.Wait() + + if bus := s.sys.Bus.Get(); bus != nil { + bus.Close() + } s.closed = true return nil } From 2ac6e1edfd9504c43134aef788cf873fdc5c2e1c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 19 Mar 2025 09:48:47 -0700 Subject: [PATCH 0763/1708] wgengine: plumb an event bus into the userspace engine Updates #15160 Change-Id: Ia695ccdddd09cd950de22abd000d4c531d6bf3c8 Signed-off-by: M. J. Fromberger --- wgengine/bench/wg.go | 4 ++-- wgengine/netstack/netstack_test.go | 2 +- wgengine/userspace.go | 17 ++++++++++++++++- wgengine/userspace_ext_test.go | 2 +- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 45823dd56..2474f832e 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -46,7 +46,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. logf: logger.WithPrefix(logf, "tun1: "), traf: traf, } - s1 := new(tsd.System) + s1 := tsd.NewSystemWithEventBus() e1, err := wgengine.NewUserspaceEngine(l1, wgengine.Config{ Router: router.NewFake(l1), NetMon: nil, @@ -73,7 +73,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. logf: logger.WithPrefix(logf, "tun2: "), traf: traf, } - s2 := new(tsd.System) + s2 := tsd.NewSystemWithEventBus() e2, err := wgengine.NewUserspaceEngine(l2, wgengine.Config{ Router: router.NewFake(l2), NetMon: nil, diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 95c518b01..f30b12c96 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -45,7 +45,7 @@ func TestInjectInboundLeak(t *testing.T) { t.Logf(format, args...) } } - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ Tun: tunDev, Dialer: dialer, diff --git a/wgengine/userspace.go b/wgengine/userspace.go index cca253048..d54b5ec38 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -46,6 +46,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/testenv" @@ -89,8 +90,12 @@ const statusPollInterval = 1 * time.Minute const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { + // eventBus will eventually become required, but for now may be nil. + // TODO(creachadair): Enforce that this is non-nil at construction. + eventBus *eventbus.Bus + logf logger.Logf - wgLogger *wglog.Logger //a wireguard-go logging wrapper + wgLogger *wglog.Logger // a wireguard-go logging wrapper reqCh chan struct{} waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool timeNow func() mono.Time @@ -227,6 +232,13 @@ type Config struct { // DriveForLocal, if populated, will cause the engine to expose a Taildrive // listener at 100.100.100.100:8080. DriveForLocal drive.FileSystemForLocal + + // EventBus, if non-nil, is used for event publication and subscription by + // the Engine and its subsystems. + // + // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to + // become required non-nil. + EventBus *eventbus.Bus } // NewFakeUserspaceEngine returns a new userspace engine for testing. @@ -255,6 +267,8 @@ func NewFakeUserspaceEngine(logf logger.Logf, opts ...any) (Engine, error) { conf.HealthTracker = v case *usermetric.Registry: conf.Metrics = v + case *eventbus.Bus: + conf.EventBus = v default: return nil, fmt.Errorf("unknown option type %T", v) } @@ -323,6 +337,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e := &userspaceEngine{ + eventBus: conf.EventBus, timeNow: mono.Now, logf: logf, reqCh: make(chan struct{}, 1), diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 6f5583a3a..b76a2b4b7 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -16,7 +16,7 @@ import ( ) func TestIsNetstack(t *testing.T) { - sys := new(tsd.System) + sys := tsd.NewSystemWithEventBus() e, err := wgengine.NewUserspaceEngine( tstest.WhileTestRunningLogger(t), wgengine.Config{ From 418e19fb5eb34e5552c9b5fc39dd5ca12ce7a51d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 19 Mar 2025 15:50:00 -0700 Subject: [PATCH 0764/1708] portmapper: update NewClient to use a Config argument In preparation for adding more parameters (and later, moving some away), rework the portmapper constructor to accept its arguments on a Config struct rather than positionally. This is a breaking change to the function signature, but one that is very easy to update, and a search of GitHub reveals only six instances of usage outside clones and forks of Tailscale itself, that are not direct copies of the code fixed up here. While we could stub in another constructor, I think it is safe to let those folks do the update in-place, since their usage is already affected by other changes we can't test for anyway. Updates #15160 Change-Id: I9f8a5e12b38885074c98894b7376039261b43f43 Signed-off-by: M. J. Fromberger --- cmd/tailscale/cli/netcheck.go | 5 ++- ipn/localapi/localapi.go | 30 ++++++++++------- net/portmapper/igd_test.go | 11 +++++-- net/portmapper/portmapper.go | 55 +++++++++++++++++++------------ net/portmapper/portmapper_test.go | 6 ++-- wgengine/magicsock/magicsock.go | 8 ++++- 6 files changed, 74 insertions(+), 41 deletions(-) diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 312475ece..14e337b89 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -55,7 +55,10 @@ func runNetcheck(ctx context.Context, args []string) error { // Ensure that we close the portmapper after running a netcheck; this // will release any port mappings created. - pm := portmapper.NewClient(logf, netMon, nil, nil, nil) + pm := portmapper.NewClient(portmapper.Config{ + Logf: logf, + NetMon: netMon, + }) defer pm.Close() c := &netcheck.Client{ diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d1f07ea4e..5901855e3 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -818,19 +818,25 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { done := make(chan bool, 1) var c *portmapper.Client - c = portmapper.NewClient(logger.WithPrefix(logf, "portmapper: "), h.b.NetMon(), debugKnobs, h.b.ControlKnobs(), func() { - logf("portmapping changed.") - logf("have mapping: %v", c.HaveMapping()) - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("cb: mapping: %v", ext) - select { - case done <- true: - default: + c = portmapper.NewClient(portmapper.Config{ + Logf: logger.WithPrefix(logf, "portmapper: "), + NetMon: h.b.NetMon(), + DebugKnobs: debugKnobs, + ControlKnobs: h.b.ControlKnobs(), + OnChange: func() { + logf("portmapping changed.") + logf("have mapping: %v", c.HaveMapping()) + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("cb: mapping: %v", ext) + select { + case done <- true: + default: + } + return } - return - } - logf("cb: no mapping") + logf("cb: no mapping") + }, }) defer c.Close() diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 5c24d03aa..67d873c35 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -260,9 +260,14 @@ func (d *TestIGD) handlePCPQuery(pkt []byte, src netip.AddrPort) { func newTestClient(t *testing.T, igd *TestIGD) *Client { var c *Client - c = NewClient(t.Logf, netmon.NewStatic(), nil, new(controlknobs.Knobs), func() { - t.Logf("port map changed") - t.Logf("have mapping: %v", c.HaveMapping()) + c = NewClient(Config{ + Logf: t.Logf, + NetMon: netmon.NewStatic(), + ControlKnobs: new(controlknobs.Knobs), + OnChange: func() { + t.Logf("port map changed") + t.Logf("have mapping: %v", c.HaveMapping()) + }, }) c.testPxPPort = igd.TestPxPPort() c.testUPnPPort = igd.TestUPnPPort() diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 71b55b8a7..b49a8f7bb 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -201,32 +201,45 @@ func (m *pmpMapping) Release(ctx context.Context) { uc.WriteToUDPAddrPort(pkt, m.gw) } -// NewClient returns a new portmapping client. -// -// The netMon parameter is required. -// -// The debug argument allows configuring the behaviour of the portmapper for -// debugging; if nil, a sensible set of defaults will be used. -// -// The controlKnobs, if non-nil, specifies the control knobs from the control -// plane that might disable portmapping. -// -// The optional onChange argument specifies a func to run in a new goroutine -// whenever the port mapping status has changed. If nil, it doesn't make a -// callback. -func NewClient(logf logger.Logf, netMon *netmon.Monitor, debug *DebugKnobs, controlKnobs *controlknobs.Knobs, onChange func()) *Client { - if netMon == nil { +// Config carries the settings for a [Client]. +type Config struct { + // Logf is called to generate text logs for the client. If nil, logger.Discard is used. + Logf logger.Logf + + // NetMon is the network monitor used by the client. It must be non-nil. + NetMon *netmon.Monitor + + // DebugKnobs, if non-nil, configure the behaviour of the portmapper for + // debugging. If nil, a sensible set of defaults will be used. + DebugKnobs *DebugKnobs + + // ControlKnobs, if non-nil, specifies knobs from the control plane that + // might disable port mapping. + ControlKnobs *controlknobs.Knobs + + // OnChange is called to run in a new goroutine whenever the port mapping + // status has changed. If nil, no callback is issued. + OnChange func() +} + +// NewClient constructs a new portmapping [Client] from c. It will panic if any +// required parameters are omitted. +func NewClient(c Config) *Client { + if c.NetMon == nil { panic("nil netMon") } ret := &Client{ - logf: logf, - netMon: netMon, + logf: c.Logf, + netMon: c.NetMon, ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon - onChange: onChange, - controlKnobs: controlKnobs, + onChange: c.OnChange, + controlKnobs: c.ControlKnobs, + } + if ret.logf == nil { + ret.logf = logger.Discard } - if debug != nil { - ret.debug = *debug + if c.DebugKnobs != nil { + ret.debug = *c.DebugKnobs } return ret } diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index d321b720a..c815f21d1 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -18,7 +18,7 @@ func TestCreateOrGetMapping(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(t.Logf, nil, nil, new(controlknobs.Knobs), nil) + c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) defer c.Close() c.SetLocalPort(1234) for i := range 2 { @@ -34,7 +34,7 @@ func TestClientProbe(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(t.Logf, nil, nil, new(controlknobs.Knobs), nil) + c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) defer c.Close() for i := range 3 { if i > 0 { @@ -49,7 +49,7 @@ func TestClientProbeThenMap(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(t.Logf, nil, nil, new(controlknobs.Knobs), nil) + c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) defer c.Close() c.debug.VerboseLogs = true c.SetLocalPort(1234) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a32867f72..860176470 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -544,7 +544,13 @@ func NewConn(opts Options) (*Conn, error) { portMapOpts := &portmapper.DebugKnobs{ DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, } - c.portMapper = portmapper.NewClient(portmapperLogf, opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged) + c.portMapper = portmapper.NewClient(portmapper.Config{ + Logf: portmapperLogf, + NetMon: opts.NetMon, + DebugKnobs: portMapOpts, + ControlKnobs: opts.ControlKnobs, + OnChange: c.onPortMapChanged, + }) c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) c.netMon = opts.NetMon c.health = opts.HealthTracker From baead61e44075b92c18d09059c416730ed32de73 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 19 Mar 2025 20:28:19 -0700 Subject: [PATCH 0765/1708] {wgengine,util/portmapper}: add and plumb an event bus (#15359) Updates #15160 Change-Id: I2510fb4a8905fb0abe8a8e0c5b81adb15d50a6f8 Signed-off-by: M. J. Fromberger --- cmd/tailscale/depaware.txt | 18 +++++++++++++++--- net/portmapper/portmapper.go | 10 ++++++++++ wgengine/magicsock/magicsock.go | 15 +++++++++++++-- wgengine/userspace.go | 1 + 4 files changed, 39 insertions(+), 5 deletions(-) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 9728a2ff4..1671b71a8 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -5,6 +5,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + github.com/coder/websocket from tailscale.com/util/eventbus + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode @@ -89,6 +93,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web + tailscale.com/feature from tailscale.com/tsweb tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli @@ -132,7 +137,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ - tailscale.com/tsweb/varz from tailscale.com/util/usermetric + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/ipn+ @@ -157,6 +163,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ + tailscale.com/util/eventbus from tailscale.com/net/portmapper tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ @@ -167,6 +174,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli + tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ @@ -329,7 +337,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf + html/template from github.com/gorilla/csrf+ image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode @@ -353,7 +361,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profilerecord from runtime + internal/profile from net/http/pprof + internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ @@ -395,6 +404,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -409,6 +419,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ + runtime/pprof from net/http/pprof + runtime/trace from net/http/pprof slices from tailscale.com/client/web+ sort from compress/flate+ strconv from archive/tar+ diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index b49a8f7bb..8fe9ba493 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -31,6 +31,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/nettype" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" ) var disablePortMapperEnv = envknob.RegisterBool("TS_DISABLE_PORTMAPPER") @@ -84,6 +85,7 @@ const trustServiceStillAvailableDuration = 10 * time.Minute // Client is a port mapping client. type Client struct { + eventBus *eventbus.Bus logf logger.Logf netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand controlKnobs *controlknobs.Knobs @@ -203,6 +205,13 @@ func (m *pmpMapping) Release(ctx context.Context) { // Config carries the settings for a [Client]. type Config struct { + // EventBus, if non-nil, is used for event publication and subscription by + // portmapper clients created from this config. + // + // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to + // become required non-nil. + EventBus *eventbus.Bus + // Logf is called to generate text logs for the client. If nil, logger.Discard is used. Logf logger.Logf @@ -229,6 +238,7 @@ func NewClient(c Config) *Client { panic("nil netMon") } ret := &Client{ + eventBus: c.EventBus, logf: c.Logf, netMon: c.NetMon, ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 860176470..bd7f8c04f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -56,6 +56,7 @@ import ( "tailscale.com/types/nettype" "tailscale.com/types/views" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/ringbuffer" "tailscale.com/util/set" @@ -136,6 +137,7 @@ type Conn struct { // This block mirrors the contents and field order of the Options // struct. Initialized once at construction, then constant. + eventBus *eventbus.Bus logf logger.Logf epFunc func([]tailcfg.Endpoint) derpActiveFunc func() @@ -401,8 +403,15 @@ func (c *Conn) dlogf(format string, a ...any) { // Options contains options for Listen. type Options struct { - // Logf optionally provides a log function to use. - // Must not be nil. + // EventBus, if non-nil, is used for event publication and subscription by + // each Conn created from these Options. + // + // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to + // become required non-nil. + EventBus *eventbus.Bus + + // Logf provides a log function to use. It must not be nil. + // Use [logger.Discard] to disrcard logs. Logf logger.Logf // Port is the port to listen on. @@ -529,6 +538,7 @@ func NewConn(opts Options) (*Conn, error) { } c := newConn(opts.logf()) + c.eventBus = opts.EventBus c.port.Store(uint32(opts.Port)) c.controlKnobs = opts.ControlKnobs c.epFunc = opts.endpointsFunc() @@ -545,6 +555,7 @@ func NewConn(opts Options) (*Conn, error) { DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, } c.portMapper = portmapper.NewClient(portmapper.Config{ + EventBus: c.eventBus, Logf: portmapperLogf, NetMon: opts.NetMon, DebugKnobs: portMapOpts, diff --git a/wgengine/userspace.go b/wgengine/userspace.go index d54b5ec38..385f31d65 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -404,6 +404,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } magicsockOpts := magicsock.Options{ + EventBus: e.eventBus, Logf: logf, Port: conf.ListenPort, EndpointsFunc: endpointsFn, From deb0b255ff1431ffbd0a4b61bb319f7852ab0e24 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 20 Mar 2025 15:18:29 -0700 Subject: [PATCH 0766/1708] all: update the tsd.System constructor name (#15372) Replace NewSystemWithEventBus with plain NewSystem, and update all usage. See https://github.com/tailscale/tailscale/pull/15355#discussion_r2003910766 Updates #15160 Change-Id: I64d337f09576b41d9ad78eba301a74b9a9d6ebf4 Signed-off-by: M. J. Fromberger --- cmd/tailscaled/tailscaled.go | 2 +- cmd/tailscaled/tailscaled_windows.go | 2 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- ipn/ipnlocal/local_test.go | 6 +++--- ipn/ipnlocal/loglines_test.go | 2 +- ipn/ipnlocal/serve_test.go | 2 +- ipn/ipnlocal/state_test.go | 6 +++--- ipn/ipnserver/server_test.go | 2 +- ipn/localapi/localapi_test.go | 2 +- ssh/tailssh/tailssh_test.go | 2 +- tsd/tsd.go | 8 ++++++-- tsnet/tsnet.go | 2 +- wgengine/bench/wg.go | 4 ++-- wgengine/netstack/netstack_test.go | 4 ++-- wgengine/userspace_ext_test.go | 4 ++-- 15 files changed, 27 insertions(+), 23 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 5c483ab1f..191ed64c9 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -376,7 +376,7 @@ func run() (err error) { // Install an event bus as early as possible, so that it's // available universally when setting up everything else. - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() // Parse config, if specified, to fail early if it's invalid. var conf *conffile.Config diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 4ba921e53..681cb3318 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -328,7 +328,7 @@ func beWindowsSubprocess() bool { log.Printf("Error pre-loading \"%s\": %v", fqWintunPath, err) } - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() netMon, err := netmon.New(log.Printf) if err != nil { log.Fatalf("Could not create netMon: %v", err) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 3d423d308..779a87e49 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -100,7 +100,7 @@ func newIPN(jsConfig js.Value) map[string]any { logtail := logtail.NewLogger(c, log.Printf) logf := logtail.Logf - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() sys.Set(store) dialer := &tsdial.Dialer{Logf: logf} eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 596952a0d..5263584b6 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -436,7 +436,7 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { } func newTestLocalBackend(t testing.TB) *LocalBackend { - return newTestLocalBackendWithSys(t, tsd.NewSystemWithEventBus()) + return newTestLocalBackendWithSys(t, tsd.NewSystem()) } // newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. @@ -4407,7 +4407,7 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { - return newLocalBackendWithSysAndTestControl(t, enableLogging, new(tsd.System), newControl) + return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystem(), newControl) } func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { @@ -4867,7 +4867,7 @@ func TestConfigFileReload(t *testing.T) { // Create backend with initial config tc.initial.Path = path tc.initial.Raw = initialJSON - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() sys.InitialConfig = tc.initial b := newTestLocalBackendWithSys(t, sys) diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index cfcd54c64..f4a77824e 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -47,7 +47,7 @@ func TestLocalLogLines(t *testing.T) { idA := logid(0xaa) // set up a LocalBackend, super bare bones. No functional data. - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 78f1da42c..5e148a8a4 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -877,7 +877,7 @@ func newTestBackend(t *testing.T) *LocalBackend { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a27ef9efe..b1bab2a99 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -323,7 +323,7 @@ func TestStateMachine(t *testing.T) { c := qt.New(t) logf := tstest.WhileTestRunningLogger(t) - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() store := new(testStateStorage) sys.Set(store) e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) @@ -962,7 +962,7 @@ func TestStateMachine(t *testing.T) { func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() sys.Set(new(mem.Store)) e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { @@ -1042,7 +1042,7 @@ func TestWGEngineStatusRace(t *testing.T) { t.Skip("test fails") c := qt.New(t) logf := tstest.WhileTestRunningLogger(t) - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() sys.Set(new(mem.Store)) eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set) diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index fd2e53f3e..e34172ff9 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -517,7 +517,7 @@ type newControlClientFn func(tb testing.TB, opts controlclient.Options) controlc func newLocalBackendWithTestControl(tb testing.TB, newControl newControlClientFn, enableLogging bool) *ipnlocal.LocalBackend { tb.Helper() - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() store := &mem.Store{} sys.Set(store) diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 38394739e..4f304bb1b 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -336,7 +336,7 @@ func TestServeWatchIPNBus(t *testing.T) { func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { var logf logger.Logf = logger.Discard - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index ec442a83b..3dbd16047 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1037,7 +1037,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { var logf logger.Logf = t.Logf - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatal(err) diff --git a/tsd/tsd.go b/tsd/tsd.go index 029c64fbb..ccd804f81 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -40,6 +40,10 @@ import ( ) // System contains all the subsystems of a Tailscale node (tailscaled, etc.) +// +// A valid System value must always have a non-nil Bus populated. Callers must +// ensure this before using the value further. Call [NewSystem] to obtain a +// value ready to use. type System struct { Bus SubSystem[*eventbus.Bus] Dialer SubSystem[*tsdial.Dialer] @@ -72,9 +76,9 @@ type System struct { userMetricsRegistry usermetric.Registry } -// NewSystemWithEventBus constructs a new otherwise-empty system with a +// NewSystem constructs a new otherwise-empty [System] with a // freshly-constructed event bus populated. -func NewSystemWithEventBus() *System { +func NewSystem() *System { sys := new(System) sys.Set(eventbus.New()) return sys diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 6d52b3062..67afd674a 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -561,7 +561,7 @@ func (s *Server) start() (reterr error) { s.Logf(format, a...) } - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() s.sys = sys if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { return err diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 2474f832e..9b195bdb7 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -46,7 +46,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. logf: logger.WithPrefix(logf, "tun1: "), traf: traf, } - s1 := tsd.NewSystemWithEventBus() + s1 := tsd.NewSystem() e1, err := wgengine.NewUserspaceEngine(l1, wgengine.Config{ Router: router.NewFake(l1), NetMon: nil, @@ -73,7 +73,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. logf: logger.WithPrefix(logf, "tun2: "), traf: traf, } - s2 := tsd.NewSystemWithEventBus() + s2 := tsd.NewSystem() e2, err := wgengine.NewUserspaceEngine(l2, wgengine.Config{ Router: router.NewFake(l2), NetMon: nil, diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index f30b12c96..c34ec7a25 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -45,7 +45,7 @@ func TestInjectInboundLeak(t *testing.T) { t.Logf(format, args...) } } - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ Tun: tunDev, Dialer: dialer, @@ -101,7 +101,7 @@ func getMemStats() (ms runtime.MemStats) { func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { tunDev := tstun.NewFake() - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() sys.Set(new(mem.Store)) dialer := new(tsdial.Dialer) logf := tstest.WhileTestRunningLogger(tb) diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index b76a2b4b7..b0caffd1e 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -16,7 +16,7 @@ import ( ) func TestIsNetstack(t *testing.T) { - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() e, err := wgengine.NewUserspaceEngine( tstest.WhileTestRunningLogger(t), wgengine.Config{ @@ -66,7 +66,7 @@ func TestIsNetstackRouter(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sys := tsd.NewSystemWithEventBus() + sys := tsd.NewSystem() if tt.setNetstackRouter { sys.NetstackRouter.Set(true) } From 2731171c5e47ada8603c904cbcd9410234614a09 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 20 Mar 2025 15:19:26 -0700 Subject: [PATCH 0767/1708] net/portmapper: fire an event when a port mapping is updated (#15371) When an event bus is configured publish an event each time a new port mapping is updated. Publication is unconditional and occurs prior to calling any callback that is registered. For now, the callback is still fired in a separate goroutine as before -- later, those callbacks should become subscriptions to the published event. For now, the event type is defined as a new type here in the package. We will want to move it to a more central package when there are subscribers. The event wrapper is effectively a subset of the data exported by the internal mapping interface, but on a concrete struct so the bus plumbing can inspect it. Updates #15160 Change-Id: I951f212429ac791223af8d75b6eb39a0d2a0053a Signed-off-by: M. J. Fromberger --- net/portmapper/igd_test.go | 8 ++- net/portmapper/portmapper.go | 95 ++++++++++++++++++++++--------- net/portmapper/portmapper_test.go | 39 ++++++++++--- net/portmapper/select_test.go | 3 +- net/portmapper/upnp_test.go | 18 ++---- 5 files changed, 114 insertions(+), 49 deletions(-) diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 67d873c35..319115896 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -19,6 +19,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/syncs" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // TestIGD is an IGD (Internet Gateway Device) for testing. It supports fake @@ -258,12 +259,16 @@ func (d *TestIGD) handlePCPQuery(pkt []byte, src netip.AddrPort) { } } -func newTestClient(t *testing.T, igd *TestIGD) *Client { +// newTestClient configures a new test client connected to igd for mapping updates. +// If bus != nil, update events are published to it. +// A cleanup for the resulting client is added to t. +func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { var c *Client c = NewClient(Config{ Logf: t.Logf, NetMon: netmon.NewStatic(), ControlKnobs: new(controlknobs.Knobs), + EventBus: bus, OnChange: func() { t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) @@ -273,5 +278,6 @@ func newTestClient(t *testing.T, igd *TestIGD) *Client { c.testUPnPPort = igd.TestUPnPPort() c.netMon = netmon.NewStatic() c.SetGatewayLookupFunc(testIPAndGateway) + t.Cleanup(func() { c.Close() }) return c } diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 8fe9ba493..f95d6503a 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -85,7 +85,11 @@ const trustServiceStillAvailableDuration = 10 * time.Minute // Client is a port mapping client. type Client struct { - eventBus *eventbus.Bus + // The following two fields must either both be nil, or both non-nil. + // Both are immutable after construction. + pubClient *eventbus.Client + updates *eventbus.Publisher[Mapping] + logf logger.Logf netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand controlKnobs *controlknobs.Knobs @@ -238,13 +242,16 @@ func NewClient(c Config) *Client { panic("nil netMon") } ret := &Client{ - eventBus: c.EventBus, logf: c.Logf, netMon: c.NetMon, ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon onChange: c.OnChange, controlKnobs: c.ControlKnobs, } + if c.EventBus != nil { + ret.pubClient = c.EventBus.Client("portmapper") + ret.updates = eventbus.Publish[Mapping](ret.pubClient) + } if ret.logf == nil { ret.logf = logger.Discard } @@ -279,6 +286,10 @@ func (c *Client) Close() error { } c.closed = true c.invalidateMappingsLocked(true) + if c.updates != nil { + c.updates.Close() + c.pubClient.Close() + } // TODO: close some future ever-listening UDP socket(s), // waiting for multicast announcements from router. return nil @@ -490,13 +501,32 @@ func (c *Client) createMapping() { c.runningCreate = false }() - if _, err := c.createOrGetMapping(ctx); err == nil && c.onChange != nil { + mapping, _, err := c.createOrGetMapping(ctx) + if err != nil { + if !IsNoMappingError(err) { + c.logf("createOrGetMapping: %v", err) + } + return + } + c.updates.Publish(Mapping{ + External: mapping.External(), + Type: mapping.MappingType(), + GoodUntil: mapping.GoodUntil(), + }) + if c.onChange != nil { go c.onChange() - } else if err != nil && !IsNoMappingError(err) { - c.logf("createOrGetMapping: %v", err) } } +// Mapping is an event recording the allocation of a port mapping. +type Mapping struct { + External netip.AddrPort + Type string + GoodUntil time.Time + + // TODO(creachadair): Record whether we reused an existing mapping? +} + // wildcardIP is used when the previous external IP is not known for PCP port mapping. var wildcardIP = netip.MustParseAddr("0.0.0.0") @@ -505,19 +535,19 @@ var wildcardIP = netip.MustParseAddr("0.0.0.0") // // If no mapping is available, the error will be of type // NoMappingError; see IsNoMappingError. -func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPort, err error) { +func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, external netip.AddrPort, err error) { if c.debug.disableAll() { - return netip.AddrPort{}, NoMappingError{ErrPortMappingDisabled} + return nil, netip.AddrPort{}, NoMappingError{ErrPortMappingDisabled} } if c.debug.DisableUPnP && c.debug.DisablePCP && c.debug.DisablePMP { - return netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} + return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } gw, myIP, ok := c.gatewayAndSelfIP() if !ok { - return netip.AddrPort{}, NoMappingError{ErrGatewayRange} + return nil, netip.AddrPort{}, NoMappingError{ErrGatewayRange} } if gw.Is6() { - return netip.AddrPort{}, NoMappingError{ErrGatewayIPv6} + return nil, netip.AddrPort{}, NoMappingError{ErrGatewayIPv6} } now := time.Now() @@ -546,6 +576,17 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor return } + // TODO(creachadair): This is more subtle than it should be. Ideally we + // would just return the mapping directly, but there are many different + // paths through the function with carefully-balanced locks, and not all + // the paths have a mapping to return. As a workaround, while we're here + // doing cleanup under the lock, grab the final mapping value and return + // it, so the caller does not need to grab the lock again and potentially + // race with a later update. The mapping itself is concurrency-safe. + // + // We should restructure this code so the locks are properly scoped. + mapping = c.mapping + // Print the internal details of each mapping if we're being verbose. if c.debug.VerboseLogs { c.logf("successfully obtained mapping: now=%d external=%v type=%s mapping=%s", @@ -571,7 +612,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor if now.Before(m.RenewAfter()) { defer c.mu.Unlock() reusedExisting = true - return m.External(), nil + return nil, m.External(), nil } // The mapping might still be valid, so just try to renew it. prevPort = m.External().Port() @@ -580,10 +621,10 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor if c.debug.DisablePCP && c.debug.DisablePMP { c.mu.Unlock() if external, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { - return external, nil + return nil, external, nil } c.vlogf("fallback to UPnP due to PCP and PMP being disabled failed") - return netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} + return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } // If we just did a Probe (e.g. via netchecker) but didn't @@ -610,16 +651,16 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor c.mu.Unlock() // fallback to UPnP portmapping if external, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { - return external, nil + return nil, external, nil } c.vlogf("fallback to UPnP due to no PCP and PMP failed") - return netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} + return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } c.mu.Unlock() uc, err := c.listenPacket(ctx, "udp4", ":0") if err != nil { - return netip.AddrPort{}, err + return nil, netip.AddrPort{}, err } defer uc.Close() @@ -639,7 +680,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor if neterror.TreatAsLostUDP(err) { err = NoMappingError{ErrNoPortMappingServices} } - return netip.AddrPort{}, err + return nil, netip.AddrPort{}, err } } else { // Ask for our external address if needed. @@ -648,7 +689,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor if neterror.TreatAsLostUDP(err) { err = NoMappingError{ErrNoPortMappingServices} } - return netip.AddrPort{}, err + return nil, netip.AddrPort{}, err } } @@ -657,7 +698,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor if neterror.TreatAsLostUDP(err) { err = NoMappingError{ErrNoPortMappingServices} } - return netip.AddrPort{}, err + return nil, netip.AddrPort{}, err } } @@ -666,13 +707,13 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor n, src, err := uc.ReadFromUDPAddrPort(res) if err != nil { if ctx.Err() == context.Canceled { - return netip.AddrPort{}, err + return nil, netip.AddrPort{}, err } // fallback to UPnP portmapping if mapping, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { - return mapping, nil + return nil, mapping, nil } - return netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} + return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } src = netaddr.Unmap(src) if !src.IsValid() { @@ -688,7 +729,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor continue } if pres.ResultCode != 0 { - return netip.AddrPort{}, NoMappingError{fmt.Errorf("PMP response Op=0x%x,Res=0x%x", pres.OpCode, pres.ResultCode)} + return nil, netip.AddrPort{}, NoMappingError{fmt.Errorf("PMP response Op=0x%x,Res=0x%x", pres.OpCode, pres.ResultCode)} } if pres.OpCode == pmpOpReply|pmpOpMapPublicAddr { m.external = netip.AddrPortFrom(pres.PublicAddr, m.external.Port()) @@ -706,7 +747,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor if err != nil { c.logf("failed to get PCP mapping: %v", err) // PCP should only have a single packet response - return netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} + return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } pcpMapping.c = c pcpMapping.internal = m.internal @@ -714,10 +755,10 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor c.mu.Lock() defer c.mu.Unlock() c.mapping = pcpMapping - return pcpMapping.external, nil + return pcpMapping, pcpMapping.external, nil default: c.logf("unknown PMP/PCP version number: %d %v", version, res[:n]) - return netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} + return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } } @@ -725,7 +766,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (external netip.AddrPor c.mu.Lock() defer c.mu.Unlock() c.mapping = m - return m.external, nil + return nil, m.external, nil } } } diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index c815f21d1..32302e461 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -12,6 +12,7 @@ import ( "time" "tailscale.com/control/controlknobs" + "tailscale.com/util/eventbus" ) func TestCreateOrGetMapping(t *testing.T) { @@ -25,7 +26,7 @@ func TestCreateOrGetMapping(t *testing.T) { if i > 0 { time.Sleep(100 * time.Millisecond) } - ext, err := c.createOrGetMapping(context.Background()) + _, ext, err := c.createOrGetMapping(context.Background()) t.Logf("Got: %v, %v", ext, err) } } @@ -55,7 +56,7 @@ func TestClientProbeThenMap(t *testing.T) { c.SetLocalPort(1234) res, err := c.Probe(context.Background()) t.Logf("Probe: %+v, %v", res, err) - ext, err := c.createOrGetMapping(context.Background()) + _, ext, err := c.createOrGetMapping(context.Background()) t.Logf("createOrGetMapping: %v, %v", ext, err) } @@ -66,9 +67,8 @@ func TestProbeIntegration(t *testing.T) { } defer igd.Close() - c := newTestClient(t, igd) + c := newTestClient(t, igd, nil) t.Logf("Listening on pxp=%v, upnp=%v", c.testPxPPort, c.testUPnPPort) - defer c.Close() res, err := c.Probe(context.Background()) if err != nil { @@ -101,8 +101,7 @@ func TestPCPIntegration(t *testing.T) { } defer igd.Close() - c := newTestClient(t, igd) - defer c.Close() + c := newTestClient(t, igd, nil) res, err := c.Probe(context.Background()) if err != nil { t.Fatalf("probe failed: %v", err) @@ -114,7 +113,7 @@ func TestPCPIntegration(t *testing.T) { t.Fatalf("probe did not see pcp: %+v", res) } - external, err := c.createOrGetMapping(context.Background()) + _, external, err := c.createOrGetMapping(context.Background()) if err != nil { t.Fatalf("failed to get mapping: %v", err) } @@ -136,3 +135,29 @@ func TestGetUPnPErrorsMetric(t *testing.T) { getUPnPErrorsMetric(0) getUPnPErrorsMetric(-100) } + +func TestUpdateEvent(t *testing.T) { + igd, err := NewTestIGD(t.Logf, TestIGDOptions{PCP: true}) + if err != nil { + t.Fatalf("Create test gateway: %v", err) + } + + bus := eventbus.New() + defer bus.Close() + + sub := eventbus.Subscribe[Mapping](bus.Client("TestUpdateEvent")) + c := newTestClient(t, igd, bus) + if _, err := c.Probe(t.Context()); err != nil { + t.Fatalf("Probe failed: %v", err) + } + c.GetCachedMappingOrStartCreatingOne() + + select { + case evt := <-sub.Events(): + t.Logf("Received portmap update: %+v", evt) + case <-sub.Done(): + t.Error("Subscriber closed prematurely") + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for an update event") + } +} diff --git a/net/portmapper/select_test.go b/net/portmapper/select_test.go index 9e99c9a9d..6c210d70a 100644 --- a/net/portmapper/select_test.go +++ b/net/portmapper/select_test.go @@ -163,9 +163,8 @@ func TestSelectBestService(t *testing.T) { Desc: rootDesc, Control: tt.control, }) - c := newTestClient(t, igd) + c := newTestClient(t, igd, nil) t.Logf("Listening on upnp=%v", c.testUPnPPort) - defer c.Close() // Ensure that we're using the HTTP client that talks to our test IGD server ctx := context.Background() diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index 0c296813f..1e1278abc 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -586,9 +586,8 @@ func TestGetUPnPPortMapping(t *testing.T) { }, }) - c := newTestClient(t, igd) + c := newTestClient(t, igd, nil) t.Logf("Listening on upnp=%v", c.testUPnPPort) - defer c.Close() c.debug.VerboseLogs = true @@ -689,10 +688,9 @@ func TestGetUPnPPortMapping_LeaseDuration(t *testing.T) { }) ctx := context.Background() - c := newTestClient(t, igd) + c := newTestClient(t, igd, nil) c.debug.VerboseLogs = true t.Logf("Listening on upnp=%v", c.testUPnPPort) - defer c.Close() // Actually test the UPnP port mapping. mustProbeUPnP(t, ctx, c) @@ -735,8 +733,7 @@ func TestGetUPnPPortMapping_NoValidServices(t *testing.T) { Desc: noSupportedServicesRootDesc, }) - c := newTestClient(t, igd) - defer c.Close() + c := newTestClient(t, igd, nil) c.debug.VerboseLogs = true ctx := context.Background() @@ -778,8 +775,7 @@ func TestGetUPnPPortMapping_Legacy(t *testing.T) { }, }) - c := newTestClient(t, igd) - defer c.Close() + c := newTestClient(t, igd, nil) c.debug.VerboseLogs = true ctx := context.Background() @@ -806,9 +802,8 @@ func TestGetUPnPPortMappingNoResponses(t *testing.T) { } defer igd.Close() - c := newTestClient(t, igd) + c := newTestClient(t, igd, nil) t.Logf("Listening on upnp=%v", c.testUPnPPort) - defer c.Close() c.debug.VerboseLogs = true @@ -939,8 +934,7 @@ func TestGetUPnPPortMapping_Invalid(t *testing.T) { }, }) - c := newTestClient(t, igd) - defer c.Close() + c := newTestClient(t, igd, nil) c.debug.VerboseLogs = true ctx := context.Background() From e8cacd2a32d9bf4126c1f3bf5b4125352692b4be Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 19 Mar 2025 10:17:13 -0700 Subject: [PATCH 0768/1708] cmd/tailscaled: clean up unnecessary logf indirection #cleanup Signed-off-by: David Anderson --- cmd/tailscaled/tailscaled.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 191ed64c9..d7de2a772 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -391,9 +391,7 @@ func run() (err error) { var netMon *netmon.Monitor isWinSvc := isWindowsService() if !isWinSvc { - netMon, err = netmon.New(func(format string, args ...any) { - logf(format, args...) - }) + netMon, err = netmon.New(logf) if err != nil { return fmt.Errorf("netmon.New: %w", err) } From 6d6f69e7358f52b56ad8365f465aefaa95a7de0c Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 20 Mar 2025 09:19:47 -0700 Subject: [PATCH 0769/1708] derp/derphttp: remove ban on websockets dependency The event bus's debug page uses websockets. Updates #15160 Signed-off-by: David Anderson --- cmd/tailscaled/tailscaled.go | 1 - control/controlhttp/http_test.go | 12 ------------ derp/derphttp/derphttp_test.go | 22 ---------------------- 3 files changed, 35 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index d7de2a772..c508a9aa3 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -713,7 +713,6 @@ var tstunNew = tstun.New func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack bool, err error) { conf := wgengine.Config{ - EventBus: sys.Bus.Get(), ListenPort: args.port, NetMon: sys.NetMon.Get(), HealthTracker: sys.HealthTracker(), diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index f556640f8..daf262023 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -32,7 +32,6 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tstest" - "tailscale.com/tstest/deptest" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -822,14 +821,3 @@ func (c *closeTrackConn) Close() error { c.d.noteClose(c) return c.Conn.Close() } - -func TestDeps(t *testing.T) { - deptest.DepChecker{ - GOOS: "darwin", - GOARCH: "arm64", - BadDeps: map[string]string{ - // Only the controlhttpserver needs WebSockets... - "github.com/coder/websocket": "controlhttp client shouldn't need websockets", - }, - }.Check(t) -} diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index cf6032a5e..cfb3676cd 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -17,9 +17,7 @@ import ( "tailscale.com/derp" "tailscale.com/net/netmon" - "tailscale.com/tstest/deptest" "tailscale.com/types/key" - "tailscale.com/util/set" ) func TestSendRecv(t *testing.T) { @@ -487,23 +485,3 @@ func TestProbe(t *testing.T) { } } } - -func TestDeps(t *testing.T) { - deptest.DepChecker{ - GOOS: "darwin", - GOARCH: "arm64", - BadDeps: map[string]string{ - "github.com/coder/websocket": "shouldn't link websockets except on js/wasm", - }, - }.Check(t) - - deptest.DepChecker{ - GOOS: "darwin", - GOARCH: "arm64", - Tags: "ts_debug_websockets", - WantDeps: set.Of( - "github.com/coder/websocket", - ), - }.Check(t) - -} From 5399fa159af99380a70347864a4d4acb13efef6b Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 19 Mar 2025 10:47:25 -0700 Subject: [PATCH 0770/1708] net/netmon: publish events to event bus Updates #15160 Signed-off-by: David Anderson --- cmd/derper/depaware.txt | 3 +- cmd/tailscale/cli/debug.go | 6 ++- cmd/tailscale/cli/netcheck.go | 5 ++- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/debug.go | 6 ++- cmd/tailscaled/tailscaled.go | 2 +- cmd/tailscaled/tailscaled_windows.go | 2 +- ipn/ipnlocal/local_test.go | 4 +- ipn/ipnlocal/loglines_test.go | 2 +- ipn/ipnlocal/peerapi_test.go | 19 ++++++++-- ipn/ipnlocal/serve_test.go | 1 + ipn/ipnlocal/state_test.go | 6 +-- ipn/ipnserver/server_test.go | 2 +- ipn/localapi/localapi.go | 5 ++- ipn/localapi/localapi_test.go | 2 +- net/dns/resolver/forwarder_test.go | 5 ++- net/dns/resolver/tsdns_test.go | 6 ++- net/dnsfallback/dnsfallback_test.go | 6 ++- net/netmon/loghelper_test.go | 6 ++- net/netmon/netmon.go | 13 +++++-- net/netmon/netmon_darwin.go | 3 +- net/netmon/netmon_freebsd.go | 3 +- net/netmon/netmon_linux.go | 38 ++++++++++++++++--- net/netmon/netmon_polling.go | 3 +- net/netmon/netmon_test.go | 22 +++++++++-- net/netmon/netmon_windows.go | 3 +- net/netutil/netutil_test.go | 6 ++- ssh/tailssh/tailssh_test.go | 2 +- tsnet/tsnet.go | 2 +- .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + wgengine/magicsock/magicsock_test.go | 26 ++++++++++--- wgengine/netstack/netstack_test.go | 2 + wgengine/router/router_linux_test.go | 10 ++++- wgengine/userspace.go | 2 +- wgengine/userspace_ext_test.go | 2 + wgengine/userspace_test.go | 15 ++++++-- wgengine/watchdog_test.go | 5 ++- 41 files changed, 196 insertions(+), 56 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 085a58383..98965c6ef 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -156,6 +156,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ + tailscale.com/util/eventbus from tailscale.com/net/netmon 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineiter from tailscale.com/hostinfo+ @@ -309,7 +310,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa hash/fnv from google.golang.org/protobuf/internal/detrand hash/maphash from go4.org/mem html from net/http/pprof+ - html/template from tailscale.com/cmd/derper + html/template from tailscale.com/cmd/derper+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 9c77570d5..213a0166e 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -43,6 +43,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/must" ) @@ -956,7 +957,10 @@ func runTS2021(ctx context.Context, args []string) error { logf = log.Printf } - netMon, err := netmon.New(logger.WithPrefix(logf, "netmon: ")) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logger.WithPrefix(logf, "netmon: ")) if err != nil { return fmt.Errorf("creating netmon: %w", err) } diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 14e337b89..3cf05a3b7 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -24,6 +24,7 @@ import ( "tailscale.com/net/tlsdial" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) var netcheckCmd = &ffcli.Command{ @@ -48,7 +49,9 @@ var netcheckArgs struct { func runNetcheck(ctx context.Context, args []string) error { logf := logger.WithPrefix(log.Printf, "portmap: ") - netMon, err := netmon.New(logf) + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logf) if err != nil { return err } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 1671b71a8..7f66e7700 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -163,7 +163,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/eventbus from tailscale.com/net/portmapper + tailscale.com/util/eventbus from tailscale.com/net/portmapper+ tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index b41604d29..2f469a0d1 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/eventbus" ) var debugArgs struct { @@ -72,11 +73,14 @@ func debugMode(args []string) error { } func runMonitor(ctx context.Context, loop bool) error { + b := eventbus.New() + defer b.Close() + dump := func(st *netmon.State) { j, _ := json.MarshalIndent(st, "", " ") os.Stderr.Write(j) } - mon, err := netmon.New(log.Printf) + mon, err := netmon.New(b, log.Printf) if err != nil { return err } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index c508a9aa3..1c5236123 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -391,7 +391,7 @@ func run() (err error) { var netMon *netmon.Monitor isWinSvc := isWindowsService() if !isWinSvc { - netMon, err = netmon.New(logf) + netMon, err = netmon.New(sys.Bus.Get(), logf) if err != nil { return fmt.Errorf("netmon.New: %w", err) } diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 681cb3318..1b5068892 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -329,7 +329,7 @@ func beWindowsSubprocess() bool { } sys := tsd.NewSystem() - netMon, err := netmon.New(log.Printf) + netMon, err := netmon.New(sys.Bus.Get(), log.Printf) if err != nil { log.Fatalf("Could not create netMon: %v", err) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5263584b6..3b384fd96 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -448,7 +448,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { sys.Set(new(mem.Store)) } if _, ok := sys.Engine.GetOK(); !ok { - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -4421,7 +4421,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys sys.Set(store) } if _, hasEngine := sys.Engine.GetOK(); !hasEngine { - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index f4a77824e..5bea6cabc 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -50,7 +50,7 @@ func TestLocalLogLines(t *testing.T) { sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index ff9b62769..7a3f05a9c 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -34,6 +34,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -643,9 +644,12 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") + bus := eventbus.New() + defer bus.Close() + ht := new(health.Tracker) reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) h.ps = &peerAPIServer{ b: &LocalBackend{ @@ -695,9 +699,12 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") + bus := eventbus.New() + defer bus.Close() + ht := new(health.Tracker) reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -768,10 +775,12 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") + bus := eventbus.New() + defer bus.Close() rc := &appctest.RouteCollector{} ht := new(health.Tracker) reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -833,10 +842,12 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") + bus := eventbus.New() + defer bus.Close() ht := new(health.Tracker) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 5e148a8a4..0279ea9be 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -882,6 +882,7 @@ func newTestBackend(t *testing.T) *LocalBackend { SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }) if err != nil { t.Fatal(err) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index b1bab2a99..a91ec84cb 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -326,7 +326,7 @@ func TestStateMachine(t *testing.T) { sys := tsd.NewSystem() store := new(testStateStorage) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -964,7 +964,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() sys.Set(new(mem.Store)) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -1045,7 +1045,7 @@ func TestWGEngineStatusRace(t *testing.T) { sys := tsd.NewSystem() sys.Set(new(mem.Store)) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.Bus.Get()) c.Assert(err, qt.IsNil) t.Cleanup(eng.Close) sys.Set(eng) diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index e34172ff9..9340fd1c6 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -522,7 +522,7 @@ func newLocalBackendWithTestControl(tb testing.TB, newControl newControlClientFn sys.Set(store) logf := testLogger(tb, enableLogging) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { tb.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 5901855e3..40e3b7586 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -56,6 +56,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/httphdr" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -840,7 +841,9 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { }) defer c.Close() - netMon, err := netmon.New(logger.WithPrefix(logf, "monitor: ")) + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) if err != nil { logf("error creating monitor: %v", err) return diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 4f304bb1b..970f798d0 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -339,7 +339,7 @@ func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f3e592d4f..f7cda15f6 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -29,6 +29,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/types/dnstype" + "tailscale.com/util/eventbus" ) func (rr resolverAndDelay) String() string { @@ -454,7 +455,9 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { logf := tstest.WhileTestRunningLogger(tb) - netMon, err := netmon.New(logf) + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logf) if err != nil { tb.Fatal(err) } diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index d7b9fb360..de08450d2 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -31,6 +31,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" ) var ( @@ -1059,7 +1060,10 @@ func TestForwardLinkSelection(t *testing.T) { // routes differently. specialIP := netaddr.IPv4(1, 2, 3, 4) - netMon, err := netmon.New(logger.WithPrefix(t.Logf, ".... netmon: ")) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, ".... netmon: ")) if err != nil { t.Fatal(err) } diff --git a/net/dnsfallback/dnsfallback_test.go b/net/dnsfallback/dnsfallback_test.go index 16f5027d4..7f8810574 100644 --- a/net/dnsfallback/dnsfallback_test.go +++ b/net/dnsfallback/dnsfallback_test.go @@ -15,6 +15,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) func TestGetDERPMap(t *testing.T) { @@ -185,7 +186,10 @@ func TestLookup(t *testing.T) { logf, closeLogf := logger.LogfCloser(t.Logf) defer closeLogf() - netMon, err := netmon.New(logf) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logf) if err != nil { t.Fatal(err) } diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 31777f4bc..44aa46783 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -7,10 +7,14 @@ import ( "bytes" "fmt" "testing" + + "tailscale.com/util/eventbus" ) func TestLinkChangeLogLimiter(t *testing.T) { - mon, err := New(t.Logf) + bus := eventbus.New() + defer bus.Close() + mon, err := New(bus, t.Logf) if err != nil { t.Fatal(err) } diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index bd3d13d66..3f825bc97 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -16,6 +16,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -50,7 +51,10 @@ type osMon interface { // Monitor represents a monitoring instance. type Monitor struct { - logf logger.Logf + logf logger.Logf + b *eventbus.Client + changed *eventbus.Publisher[*ChangeDelta] + om osMon // nil means not supported on this platform change chan bool // send false to wake poller, true to also force ChangeDeltas be sent stop chan struct{} // closed on Stop @@ -114,21 +118,23 @@ type ChangeDelta struct { // New instantiates and starts a monitoring instance. // The returned monitor is inactive until it's started by the Start method. // Use RegisterChangeCallback to get notified of network changes. -func New(logf logger.Logf) (*Monitor, error) { +func New(bus *eventbus.Bus, logf logger.Logf) (*Monitor, error) { logf = logger.WithPrefix(logf, "monitor: ") m := &Monitor{ logf: logf, + b: bus.Client("netmon"), change: make(chan bool, 1), stop: make(chan struct{}), lastWall: wallTime(), } + m.changed = eventbus.Publish[*ChangeDelta](m.b) st, err := m.interfaceStateUncached() if err != nil { return nil, err } m.ifState = st - m.om, err = newOSMon(logf, m) + m.om, err = newOSMon(bus, logf, m) if err != nil { return nil, err } @@ -465,6 +471,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { if delta.TimeJumped { metricChangeTimeJump.Add(1) } + m.changed.Publish(delta) for _, cb := range m.cbs { go cb(delta) } diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index 8a521919b..9c5e76475 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/unix" "tailscale.com/net/netaddr" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) const debugRouteMessages = false @@ -24,7 +25,7 @@ type unspecifiedMessage struct{} func (unspecifiedMessage) ignore() bool { return false } -func newOSMon(logf logger.Logf, _ *Monitor) (osMon, error) { +func newOSMon(_ *eventbus.Bus, logf logger.Logf, _ *Monitor) (osMon, error) { fd, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, 0) if err != nil { return nil, err diff --git a/net/netmon/netmon_freebsd.go b/net/netmon/netmon_freebsd.go index 30480a1d3..842cbdb0d 100644 --- a/net/netmon/netmon_freebsd.go +++ b/net/netmon/netmon_freebsd.go @@ -10,6 +10,7 @@ import ( "strings" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // unspecifiedMessage is a minimal message implementation that should not @@ -24,7 +25,7 @@ type devdConn struct { conn net.Conn } -func newOSMon(logf logger.Logf, m *Monitor) (osMon, error) { +func newOSMon(_ *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { conn, err := net.Dial("unixpacket", "/var/run/devd.seqpacket.pipe") if err != nil { logf("devd dial error: %v, falling back to polling method", err) diff --git a/net/netmon/netmon_linux.go b/net/netmon/netmon_linux.go index dd23dd342..659fcc74b 100644 --- a/net/netmon/netmon_linux.go +++ b/net/netmon/netmon_linux.go @@ -16,6 +16,7 @@ import ( "tailscale.com/envknob" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) var debugNetlinkMessages = envknob.RegisterBool("TS_DEBUG_NETLINK") @@ -27,15 +28,26 @@ type unspecifiedMessage struct{} func (unspecifiedMessage) ignore() bool { return false } +// RuleDeleted reports that one of Tailscale's policy routing rules +// was deleted. +type RuleDeleted struct { + // Table is the table number that the deleted rule referenced. + Table uint8 + // Priority is the lookup priority of the deleted rule. + Priority uint32 +} + // nlConn wraps a *netlink.Conn and returns a monitor.Message // instead of a netlink.Message. Currently, messages are discarded, // but down the line, when messages trigger different logic depending // on the type of event, this provides the capability of handling // each architecture-specific message in a generic fashion. type nlConn struct { - logf logger.Logf - conn *netlink.Conn - buffered []netlink.Message + busClient *eventbus.Client + rulesDeleted *eventbus.Publisher[RuleDeleted] + logf logger.Logf + conn *netlink.Conn + buffered []netlink.Message // addrCache maps interface indices to a set of addresses, and is // used to suppress duplicate RTM_NEWADDR messages. It is populated @@ -44,7 +56,7 @@ type nlConn struct { addrCache map[uint32]map[netip.Addr]bool } -func newOSMon(logf logger.Logf, m *Monitor) (osMon, error) { +func newOSMon(bus *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { conn, err := netlink.Dial(unix.NETLINK_ROUTE, &netlink.Config{ // Routes get us most of the events of interest, but we need // address as well to cover things like DHCP deciding to give @@ -59,12 +71,22 @@ func newOSMon(logf logger.Logf, m *Monitor) (osMon, error) { logf("monitor_linux: AF_NETLINK RTMGRP failed, falling back to polling") return newPollingMon(logf, m) } - return &nlConn{logf: logf, conn: conn, addrCache: make(map[uint32]map[netip.Addr]bool)}, nil + client := bus.Client("netmon-iprules") + return &nlConn{ + busClient: client, + rulesDeleted: eventbus.Publish[RuleDeleted](client), + logf: logf, + conn: conn, + addrCache: make(map[uint32]map[netip.Addr]bool), + }, nil } func (c *nlConn) IsInterestingInterface(iface string) bool { return true } -func (c *nlConn) Close() error { return c.conn.Close() } +func (c *nlConn) Close() error { + c.busClient.Close() + return c.conn.Close() +} func (c *nlConn) Receive() (message, error) { if len(c.buffered) == 0 { @@ -219,6 +241,10 @@ func (c *nlConn) Receive() (message, error) { // On `ip -4 rule del pref 5210 table main`, logs: // monitor: ip rule deleted: {Family:2 DstLength:0 SrcLength:0 Tos:0 Table:254 Protocol:0 Scope:0 Type:1 Flags:0 Attributes:{Dst: Src: Gateway: OutIface:0 Priority:5210 Table:254 Mark:4294967295 Expires: Metrics: Multipath:[]}} } + c.rulesDeleted.Publish(RuleDeleted{ + Table: rmsg.Table, + Priority: rmsg.Attributes.Priority, + }) rdm := ipRuleDeletedMessage{ table: rmsg.Table, priority: rmsg.Attributes.Priority, diff --git a/net/netmon/netmon_polling.go b/net/netmon/netmon_polling.go index 3d6f94731..3b5ef6fe9 100644 --- a/net/netmon/netmon_polling.go +++ b/net/netmon/netmon_polling.go @@ -7,9 +7,10 @@ package netmon import ( "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newOSMon(logf logger.Logf, m *Monitor) (osMon, error) { +func newOSMon(_ *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { return newPollingMon(logf, m) } diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index ce55d1946..a9af8fb00 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -11,11 +11,15 @@ import ( "testing" "time" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" ) func TestMonitorStartClose(t *testing.T) { - mon, err := New(t.Logf) + bus := eventbus.New() + defer bus.Close() + + mon, err := New(bus, t.Logf) if err != nil { t.Fatal(err) } @@ -26,7 +30,10 @@ func TestMonitorStartClose(t *testing.T) { } func TestMonitorJustClose(t *testing.T) { - mon, err := New(t.Logf) + bus := eventbus.New() + defer bus.Close() + + mon, err := New(bus, t.Logf) if err != nil { t.Fatal(err) } @@ -36,7 +43,10 @@ func TestMonitorJustClose(t *testing.T) { } func TestMonitorInjectEvent(t *testing.T) { - mon, err := New(t.Logf) + bus := eventbus.New() + defer bus.Close() + + mon, err := New(bus, t.Logf) if err != nil { t.Fatal(err) } @@ -71,7 +81,11 @@ func TestMonitorMode(t *testing.T) { default: t.Skipf(`invalid --monitor value: must be "raw" or "callback"`) } - mon, err := New(t.Logf) + + bus := eventbus.New() + defer bus.Close() + + mon, err := New(bus, t.Logf) if err != nil { t.Fatal(err) } diff --git a/net/netmon/netmon_windows.go b/net/netmon/netmon_windows.go index ddf13a2e4..718724b6d 100644 --- a/net/netmon/netmon_windows.go +++ b/net/netmon/netmon_windows.go @@ -13,6 +13,7 @@ import ( "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) var ( @@ -45,7 +46,7 @@ type winMon struct { noDeadlockTicker *time.Ticker } -func newOSMon(logf logger.Logf, pm *Monitor) (osMon, error) { +func newOSMon(_ *eventbus.Bus, logf logger.Logf, pm *Monitor) (osMon, error) { m := &winMon{ logf: logf, isActive: pm.isActive, diff --git a/net/netutil/netutil_test.go b/net/netutil/netutil_test.go index fdc26b02f..0523946e6 100644 --- a/net/netutil/netutil_test.go +++ b/net/netutil/netutil_test.go @@ -10,6 +10,7 @@ import ( "testing" "tailscale.com/net/netmon" + "tailscale.com/util/eventbus" ) type conn struct { @@ -72,7 +73,10 @@ func TestCheckReversePathFiltering(t *testing.T) { if runtime.GOOS != "linux" { t.Skipf("skipping on %s", runtime.GOOS) } - netMon, err := netmon.New(t.Logf) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, t.Logf) if err != nil { t.Fatal(err) } diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 3dbd16047..980c77414 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1038,7 +1038,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { var logf logger.Logf = t.Logf sys := tsd.NewSystem() - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 67afd674a..f97598075 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -567,7 +567,7 @@ func (s *Server) start() (reterr error) { return err } - s.netMon, err = netmon.New(tsLogf) + s.netMon, err = netmon.New(sys.Bus.Get(), tsLogf) if err != nil { return err } diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 470085f5e..321ba2566 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -48,6 +48,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/version" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 30ce0892e..79e2e05a7 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -57,6 +57,7 @@ import ( _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" + _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 090c1218f..f50f21f56 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -62,6 +62,7 @@ import ( "tailscale.com/types/nettype" "tailscale.com/types/ptr" "tailscale.com/util/cibuild" + "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" @@ -173,7 +174,10 @@ func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, der func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() - netMon, err := netmon.New(logf) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logf) if err != nil { t.Fatalf("netmon.New: %v", err) } @@ -390,7 +394,10 @@ func TestNewConn(t *testing.T) { } } - netMon, err := netmon.New(logger.WithPrefix(t.Logf, "... netmon: ")) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { t.Fatalf("netmon.New: %v", err) } @@ -523,7 +530,10 @@ func TestDeviceStartStop(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) - netMon, err := netmon.New(logger.WithPrefix(t.Logf, "... netmon: ")) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { t.Fatalf("netmon.New: %v", err) } @@ -1362,7 +1372,10 @@ func newTestConn(t testing.TB) *Conn { t.Helper() port := pickPort(t) - netMon, err := netmon.New(logger.WithPrefix(t.Logf, "... netmon: ")) + bus := eventbus.New() + defer bus.Close() + + netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { t.Fatalf("netmon.New: %v", err) } @@ -3117,7 +3130,10 @@ func TestMaybeRebindOnError(t *testing.T) { } func TestNetworkDownSendErrors(t *testing.T) { - netMon := must.Get(netmon.New(t.Logf)) + bus := eventbus.New() + defer bus.Close() + + netMon := must.Get(netmon.New(bus, t.Logf)) defer netMon.Close() reg := new(usermetric.Registry) diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index c34ec7a25..584b3babc 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -52,6 +52,7 @@ func TestInjectInboundLeak(t *testing.T) { SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }) if err != nil { t.Fatal(err) @@ -111,6 +112,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }) if err != nil { tb.Fatal(err) diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index 9a159aea8..7ddd7385d 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tstest" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" ) @@ -363,7 +364,9 @@ ip route add throw 192.168.0.0/24 table 52` + basic, }, } - mon, err := netmon.New(logger.Discard) + bus := eventbus.New() + defer bus.Close() + mon, err := netmon.New(bus, logger.Discard) if err != nil { t.Fatal(err) } @@ -973,7 +976,10 @@ func newLinuxRootTest(t *testing.T) *linuxTest { logf := lt.logOutput.Logf - mon, err := netmon.New(logger.Discard) + bus := eventbus.New() + defer bus.Close() + + mon, err := netmon.New(bus, logger.Discard) if err != nil { lt.Close() t.Fatal(err) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 385f31d65..e34eae667 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -363,7 +363,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) if conf.NetMon != nil { e.netMon = conf.NetMon } else { - mon, err := netmon.New(logf) + mon, err := netmon.New(conf.EventBus, logf) if err != nil { return nil, err } diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index b0caffd1e..5e7d1ce6a 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -23,6 +23,7 @@ func TestIsNetstack(t *testing.T) { SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }, ) if err != nil { @@ -74,6 +75,7 @@ func TestIsNetstackRouter(t *testing.T) { conf.SetSubsystem = sys.Set conf.HealthTracker = sys.HealthTracker() conf.Metrics = sys.UserMetricsRegistry() + conf.EventBus = sys.Bus.Get() e, err := wgengine.NewUserspaceEngine(logger.Discard, conf) if err != nil { t.Fatal(err) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 051421862..87a36c673 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" + "tailscale.com/util/eventbus" "tailscale.com/util/usermetric" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -100,9 +101,12 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { } func TestUserspaceEngineReconfig(t *testing.T) { + bus := eventbus.New() + defer bus.Close() + ht := new(health.Tracker) reg := new(usermetric.Registry) - e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg) + e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { t.Fatal(err) } @@ -166,13 +170,16 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { var knobs controlknobs.Knobs + bus := eventbus.New() + defer bus.Close() + // Keep making a wgengine until we find an unused port var ue *userspaceEngine ht := new(health.Tracker) reg := new(usermetric.Registry) for i := range 100 { attempt := uint16(defaultPort + i) - e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht, reg) + e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht, reg, bus) if err != nil { t.Fatal(err) } @@ -251,9 +258,11 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { var knobs controlknobs.Knobs + bus := eventbus.New() + defer bus.Close() ht := new(health.Tracker) reg := new(usermetric.Registry) - e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg) + e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) if err != nil { t.Fatal(err) } diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index b05cd421f..a54a0d3fa 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/health" + "tailscale.com/util/eventbus" "tailscale.com/util/usermetric" ) @@ -24,9 +25,11 @@ func TestWatchdog(t *testing.T) { t.Run("default watchdog does not fire", func(t *testing.T) { t.Parallel() + bus := eventbus.New() + defer bus.Close() ht := new(health.Tracker) reg := new(usermetric.Registry) - e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg) + e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { t.Fatal(err) } From dda2c0d2c299c5fff0156ec47c6d629b86b2925c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 24 Mar 2025 09:47:29 -0700 Subject: [PATCH 0771/1708] wgengine/magicsock: subscribe to portmapper updates When an event bus is plumbed in, use it to subscribe and react to port mapping updates instead of using the client's callback mechanism. For now, the callback remains available as a fallback when an event bus is not provided. Updates #15160 Change-Id: I026adca44bf6187692ee87ae8ec02641c12f7774 Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index bd7f8c04f..c2404dd0b 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -138,6 +138,7 @@ type Conn struct { // struct. Initialized once at construction, then constant. eventBus *eventbus.Bus + eventClient *eventbus.Client logf logger.Logf epFunc func([]tailcfg.Endpoint) derpActiveFunc func() @@ -547,6 +548,31 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity + // If an event bus is enabled, subscribe to portmapping changes; otherwise + // use the callback mechanism of portmapper.Client. + // + // TODO(creachadair): Remove the switch once the event bus is mandatory. + onPortMapChanged := c.onPortMapChanged + if c.eventBus != nil { + c.eventClient = c.eventBus.Client("magicsock.Conn") + + pmSub := eventbus.Subscribe[portmapper.Mapping](c.eventClient) + go func() { + defer pmSub.Close() + for { + select { + case <-pmSub.Events(): + c.onPortMapChanged() + case <-pmSub.Done(): + return + } + } + }() + + // Disable the explicit callback from the portmapper, the subscriber handles it. + onPortMapChanged = nil + } + // Don't log the same log messages possibly every few seconds in our // portmapper. portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") @@ -560,7 +586,7 @@ func NewConn(opts Options) (*Conn, error) { NetMon: opts.NetMon, DebugKnobs: portMapOpts, ControlKnobs: opts.ControlKnobs, - OnChange: c.onPortMapChanged, + OnChange: onPortMapChanged, }) c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) c.netMon = opts.NetMon @@ -2478,6 +2504,9 @@ func (c *connBind) Close() error { if c.closeDisco6 != nil { c.closeDisco6.Close() } + if c.eventClient != nil { + c.eventClient.Close() + } // Send an empty read result to unblock receiveDERP, // which will then check connBind.Closed. // connBind.Closed takes c.mu, but c.derpRecvCh is buffered. From 0c78f081a49cdff572edc75849927c29129f0593 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 15 Apr 2025 08:28:48 -0700 Subject: [PATCH 0772/1708] feature/taildrop: start moving Taildrop out of LocalBackend This adds a feature/taildrop package, a ts_omit_taildrop build tag, and starts moving code to feature/taildrop. In some cases, code remains where it was but is now behind a build tag. Future changes will move code to an extension and out of LocalBackend, etc. Updates #12614 Change-Id: Idf96c61144d1a5f707039ceb2ff59c99f5c1642f Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/k8s-operator/depaware.txt | 5 +- cmd/tailscaled/depaware.txt | 5 +- cmd/tailscaled/taildrop.go | 2 +- cmd/tailscaled/taildrop_omit.go | 15 + feature/condregister/maybe_taildrop.go | 8 + feature/taildrop/doc.go | 5 + feature/taildrop/ext.go | 54 +++ feature/taildrop/localapi.go | 429 ++++++++++++++++++ feature/taildrop/peerapi.go | 166 +++++++ feature/taildrop/peerapi_test.go | 574 +++++++++++++++++++++++++ ipn/ipnlocal/extension_host.go | 10 + ipn/ipnlocal/local.go | 243 +---------- ipn/ipnlocal/local_test.go | 48 --- ipn/ipnlocal/peerapi.go | 149 ++----- ipn/ipnlocal/peerapi_test.go | 445 ------------------- ipn/ipnlocal/taildrop.go | 254 +++++++++++ ipn/ipnlocal/taildrop_omit.go | 12 + ipn/ipnlocal/taildrop_test.go | 77 ++++ ipn/localapi/localapi.go | 413 +----------------- taildrop/taildrop.go | 6 + 21 files changed, 1676 insertions(+), 1246 deletions(-) create mode 100644 cmd/tailscaled/taildrop_omit.go create mode 100644 feature/condregister/maybe_taildrop.go create mode 100644 feature/taildrop/doc.go create mode 100644 feature/taildrop/ext.go create mode 100644 feature/taildrop/localapi.go create mode 100644 feature/taildrop/peerapi.go create mode 100644 feature/taildrop/peerapi_test.go create mode 100644 ipn/ipnlocal/taildrop_omit.go create mode 100644 ipn/ipnlocal/taildrop_test.go diff --git a/build_dist.sh b/build_dist.sh index 5b1ca75b2..f11d4aae2 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_taildrop" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index cfdb08c20..37a1be6e3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -811,6 +811,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/tsnet tailscale.com/feature/relayserver from tailscale.com/feature/condregister + tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ @@ -944,7 +945,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ @@ -956,7 +957,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal - tailscale.com/util/progresstracking from tailscale.com/ipn/localapi + tailscale.com/util/progresstracking from tailscale.com/feature/taildrop tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4e6502b72..31881822f 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -269,6 +269,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/relayserver from tailscale.com/feature/condregister + tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ @@ -396,7 +397,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ @@ -408,7 +409,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag tailscale.com/util/osshare from tailscale.com/cmd/tailscaled+ tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/progresstracking from tailscale.com/ipn/localapi + tailscale.com/util/progresstracking from tailscale.com/feature/taildrop tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/taildrop.go b/cmd/tailscaled/taildrop.go index 39fe54373..3eda9bebf 100644 --- a/cmd/tailscaled/taildrop.go +++ b/cmd/tailscaled/taildrop.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_taildrop package main diff --git a/cmd/tailscaled/taildrop_omit.go b/cmd/tailscaled/taildrop_omit.go new file mode 100644 index 000000000..3b7669391 --- /dev/null +++ b/cmd/tailscaled/taildrop_omit.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_taildrop + +package main + +import ( + "tailscale.com/ipn/ipnlocal" + "tailscale.com/types/logger" +) + +func configureTaildrop(logf logger.Logf, lb *ipnlocal.LocalBackend) { + // Nothing. +} diff --git a/feature/condregister/maybe_taildrop.go b/feature/condregister/maybe_taildrop.go new file mode 100644 index 000000000..5fd7b5f8c --- /dev/null +++ b/feature/condregister/maybe_taildrop.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_taildrop + +package condregister + +import _ "tailscale.com/feature/taildrop" diff --git a/feature/taildrop/doc.go b/feature/taildrop/doc.go new file mode 100644 index 000000000..8980a2170 --- /dev/null +++ b/feature/taildrop/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package taildrop registers the taildrop (file sending) feature. +package taildrop diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go new file mode 100644 index 000000000..5d22cfb9b --- /dev/null +++ b/feature/taildrop/ext.go @@ -0,0 +1,54 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/taildrop" + "tailscale.com/tsd" + "tailscale.com/types/logger" +) + +func init() { + ipnext.RegisterExtension("taildrop", newExtension) +} + +func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { + return &extension{ + logf: logger.WithPrefix(logf, "taildrop: "), + }, nil +} + +type extension struct { + logf logger.Logf + lb *ipnlocal.LocalBackend + mgr *taildrop.Manager +} + +func (e *extension) Name() string { + return "taildrop" +} + +func (e *extension) Init(h ipnext.Host) error { + type I interface { + Backend() ipnlocal.Backend + } + e.lb = h.(I).Backend().(*ipnlocal.LocalBackend) + + // TODO(bradfitz): move init of taildrop.Manager from ipnlocal/peerapi.go to + // here + e.mgr = nil + + return nil +} + +func (e *extension) Shutdown() error { + if mgr, err := e.lb.TaildropManager(); err == nil { + mgr.Shutdown() + } else { + e.logf("taildrop: failed to shutdown taildrop manager: %v", err) + } + return nil +} diff --git a/feature/taildrop/localapi.go b/feature/taildrop/localapi.go new file mode 100644 index 000000000..ce812514e --- /dev/null +++ b/feature/taildrop/localapi.go @@ -0,0 +1,429 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "maps" + "mime" + "mime/multipart" + "net/http" + "net/http/httputil" + "net/url" + "strconv" + "strings" + "time" + + "tailscale.com/client/tailscale/apitype" + "tailscale.com/ipn" + "tailscale.com/ipn/localapi" + "tailscale.com/tailcfg" + "tailscale.com/taildrop" + "tailscale.com/util/clientmetric" + "tailscale.com/util/httphdr" + "tailscale.com/util/mak" + "tailscale.com/util/progresstracking" + "tailscale.com/util/rands" +) + +func init() { + localapi.Register("file-put/", serveFilePut) + localapi.Register("files/", serveFiles) + localapi.Register("file-targets", serveFileTargets) +} + +var ( + metricFilePutCalls = clientmetric.NewCounter("localapi_file_put") +) + +// serveFilePut sends a file to another node. +// +// It's sometimes possible for clients to do this themselves, without +// tailscaled, except in the case of tailscaled running in +// userspace-networking ("netstack") mode, in which case tailscaled +// needs to a do a netstack dial out. +// +// Instead, the CLI also goes through tailscaled so it doesn't need to be +// aware of the network mode in use. +// +// macOS/iOS have always used this localapi method to simplify the GUI +// clients. +// +// The Windows client currently (2021-11-30) uses the peerapi (/v0/put/) +// directly, as the Windows GUI always runs in tun mode anyway. +// +// In addition to single file PUTs, this endpoint accepts multipart file +// POSTS encoded as multipart/form-data.The first part should be an +// application/json file that contains a manifest consisting of a JSON array of +// OutgoingFiles which we can use for tracking progress even before reading the +// file parts. +// +// URL format: +// +// - PUT /localapi/v0/file-put/:stableID/:escaped-filename +// - POST /localapi/v0/file-put/:stableID +func serveFilePut(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + metricFilePutCalls.Add(1) + + if !h.PermitWrite { + http.Error(w, "file access denied", http.StatusForbidden) + return + } + + if r.Method != "PUT" && r.Method != "POST" { + http.Error(w, "want PUT to put file", http.StatusBadRequest) + return + } + + lb := h.LocalBackend() + + fts, err := lb.FileTargets() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + upath, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/file-put/") + if !ok { + http.Error(w, "misconfigured", http.StatusInternalServerError) + return + } + var peerIDStr, filenameEscaped string + if r.Method == "PUT" { + ok := false + peerIDStr, filenameEscaped, ok = strings.Cut(upath, "/") + if !ok { + http.Error(w, "bogus URL", http.StatusBadRequest) + return + } + } else { + peerIDStr = upath + } + peerID := tailcfg.StableNodeID(peerIDStr) + + var ft *apitype.FileTarget + for _, x := range fts { + if x.Node.StableID == peerID { + ft = x + break + } + } + if ft == nil { + http.Error(w, "node not found", http.StatusNotFound) + return + } + dstURL, err := url.Parse(ft.PeerAPIURL) + if err != nil { + http.Error(w, "bogus peer URL", http.StatusInternalServerError) + return + } + + // Periodically report progress of outgoing files. + outgoingFiles := make(map[string]*ipn.OutgoingFile) + t := time.NewTicker(1 * time.Second) + progressUpdates := make(chan ipn.OutgoingFile) + defer close(progressUpdates) + + go func() { + defer t.Stop() + defer lb.UpdateOutgoingFiles(outgoingFiles) + for { + select { + case u, ok := <-progressUpdates: + if !ok { + return + } + outgoingFiles[u.ID] = &u + case <-t.C: + lb.UpdateOutgoingFiles(outgoingFiles) + } + } + }() + + switch r.Method { + case "PUT": + file := ipn.OutgoingFile{ + ID: rands.HexString(30), + PeerID: peerID, + Name: filenameEscaped, + DeclaredSize: r.ContentLength, + } + singleFilePut(h, r.Context(), progressUpdates, w, r.Body, dstURL, file) + case "POST": + multiFilePost(h, progressUpdates, w, r, peerID, dstURL) + default: + http.Error(w, "want PUT to put file", http.StatusBadRequest) + return + } +} + +func multiFilePost(h *localapi.Handler, progressUpdates chan (ipn.OutgoingFile), w http.ResponseWriter, r *http.Request, peerID tailcfg.StableNodeID, dstURL *url.URL) { + _, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) + if err != nil { + http.Error(w, fmt.Sprintf("invalid Content-Type for multipart POST: %s", err), http.StatusBadRequest) + return + } + + ww := &multiFilePostResponseWriter{} + defer func() { + if err := ww.Flush(w); err != nil { + h.Logf("error: multiFilePostResponseWriter.Flush(): %s", err) + } + }() + + outgoingFilesByName := make(map[string]ipn.OutgoingFile) + first := true + mr := multipart.NewReader(r.Body, params["boundary"]) + for { + part, err := mr.NextPart() + if err == io.EOF { + // No more parts. + return + } else if err != nil { + http.Error(ww, fmt.Sprintf("failed to decode multipart/form-data: %s", err), http.StatusBadRequest) + return + } + + if first { + first = false + if part.Header.Get("Content-Type") != "application/json" { + http.Error(ww, "first MIME part must be a JSON map of filename -> size", http.StatusBadRequest) + return + } + + var manifest []ipn.OutgoingFile + err := json.NewDecoder(part).Decode(&manifest) + if err != nil { + http.Error(ww, fmt.Sprintf("invalid manifest: %s", err), http.StatusBadRequest) + return + } + + for _, file := range manifest { + outgoingFilesByName[file.Name] = file + progressUpdates <- file + } + + continue + } + + if !singleFilePut(h, r.Context(), progressUpdates, ww, part, dstURL, outgoingFilesByName[part.FileName()]) { + return + } + + if ww.statusCode >= 400 { + // put failed, stop immediately + h.Logf("error: singleFilePut: failed with status %d", ww.statusCode) + return + } + } +} + +// multiFilePostResponseWriter is a buffering http.ResponseWriter that can be +// reused across multiple singleFilePut calls and then flushed to the client +// when all files have been PUT. +type multiFilePostResponseWriter struct { + header http.Header + statusCode int + body *bytes.Buffer +} + +func (ww *multiFilePostResponseWriter) Header() http.Header { + if ww.header == nil { + ww.header = make(http.Header) + } + return ww.header +} + +func (ww *multiFilePostResponseWriter) WriteHeader(statusCode int) { + ww.statusCode = statusCode +} + +func (ww *multiFilePostResponseWriter) Write(p []byte) (int, error) { + if ww.body == nil { + ww.body = bytes.NewBuffer(nil) + } + return ww.body.Write(p) +} + +func (ww *multiFilePostResponseWriter) Flush(w http.ResponseWriter) error { + if ww.header != nil { + maps.Copy(w.Header(), ww.header) + } + if ww.statusCode > 0 { + w.WriteHeader(ww.statusCode) + } + if ww.body != nil { + _, err := io.Copy(w, ww.body) + return err + } + return nil +} + +func singleFilePut( + h *localapi.Handler, + ctx context.Context, + progressUpdates chan (ipn.OutgoingFile), + w http.ResponseWriter, + body io.Reader, + dstURL *url.URL, + outgoingFile ipn.OutgoingFile, +) bool { + outgoingFile.Started = time.Now() + body = progresstracking.NewReader(body, 1*time.Second, func(n int, err error) { + outgoingFile.Sent = int64(n) + progressUpdates <- outgoingFile + }) + + fail := func() { + outgoingFile.Finished = true + outgoingFile.Succeeded = false + progressUpdates <- outgoingFile + } + + // Before we PUT a file we check to see if there are any existing partial file and if so, + // we resume the upload from where we left off by sending the remaining file instead of + // the full file. + var offset int64 + var resumeDuration time.Duration + remainingBody := io.Reader(body) + client := &http.Client{ + Transport: h.LocalBackend().Dialer().PeerAPITransport(), + Timeout: 10 * time.Second, + } + req, err := http.NewRequestWithContext(ctx, "GET", dstURL.String()+"/v0/put/"+outgoingFile.Name, nil) + if err != nil { + http.Error(w, "bogus peer URL", http.StatusInternalServerError) + fail() + return false + } + switch resp, err := client.Do(req); { + case err != nil: + h.Logf("could not fetch remote hashes: %v", err) + case resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotFound: + // noop; implies older peerapi without resume support + case resp.StatusCode != http.StatusOK: + h.Logf("fetch remote hashes status code: %d", resp.StatusCode) + default: + resumeStart := time.Now() + dec := json.NewDecoder(resp.Body) + offset, remainingBody, err = taildrop.ResumeReader(body, func() (out taildrop.BlockChecksum, err error) { + err = dec.Decode(&out) + return out, err + }) + if err != nil { + h.Logf("reader could not be fully resumed: %v", err) + } + resumeDuration = time.Since(resumeStart).Round(time.Millisecond) + } + + outReq, err := http.NewRequestWithContext(ctx, "PUT", "http://peer/v0/put/"+outgoingFile.Name, remainingBody) + if err != nil { + http.Error(w, "bogus outreq", http.StatusInternalServerError) + fail() + return false + } + outReq.ContentLength = outgoingFile.DeclaredSize + if offset > 0 { + h.Logf("resuming put at offset %d after %v", offset, resumeDuration) + rangeHdr, _ := httphdr.FormatRange([]httphdr.Range{{Start: offset, Length: 0}}) + outReq.Header.Set("Range", rangeHdr) + if outReq.ContentLength >= 0 { + outReq.ContentLength -= offset + } + } + + rp := httputil.NewSingleHostReverseProxy(dstURL) + rp.Transport = h.LocalBackend().Dialer().PeerAPITransport() + rp.ServeHTTP(w, outReq) + + outgoingFile.Finished = true + outgoingFile.Succeeded = true + progressUpdates <- outgoingFile + + return true +} + +func serveFiles(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "file access denied", http.StatusForbidden) + return + } + lb := h.LocalBackend() + suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/files/") + if !ok { + http.Error(w, "misconfigured", http.StatusInternalServerError) + return + } + if suffix == "" { + if r.Method != "GET" { + http.Error(w, "want GET to list files", http.StatusBadRequest) + return + } + ctx := r.Context() + if s := r.FormValue("waitsec"); s != "" && s != "0" { + d, err := strconv.Atoi(s) + if err != nil { + http.Error(w, "invalid waitsec", http.StatusBadRequest) + return + } + deadline := time.Now().Add(time.Duration(d) * time.Second) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, deadline) + defer cancel() + } + wfs, err := lb.AwaitWaitingFiles(ctx) + if err != nil && ctx.Err() == nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(wfs) + return + } + name, err := url.PathUnescape(suffix) + if err != nil { + http.Error(w, "bad filename", http.StatusBadRequest) + return + } + if r.Method == "DELETE" { + if err := lb.DeleteFile(name); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + return + } + rc, size, err := lb.OpenFile(name) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer rc.Close() + w.Header().Set("Content-Length", fmt.Sprint(size)) + w.Header().Set("Content-Type", "application/octet-stream") + io.Copy(w, rc) +} + +func serveFileTargets(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != "GET" { + http.Error(w, "want GET to list targets", http.StatusBadRequest) + return + } + fts, err := h.LocalBackend().FileTargets() + if err != nil { + localapi.WriteErrorJSON(w, err) + return + } + mak.NonNilSliceForJSON(&fts) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(fts) +} diff --git a/feature/taildrop/peerapi.go b/feature/taildrop/peerapi.go new file mode 100644 index 000000000..f90dca9dc --- /dev/null +++ b/feature/taildrop/peerapi.go @@ -0,0 +1,166 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" + "tailscale.com/taildrop" + "tailscale.com/tstime" + "tailscale.com/util/clientmetric" + "tailscale.com/util/httphdr" +) + +func init() { + ipnlocal.RegisterPeerAPIHandler("/v0/put/", handlePeerPut) +} + +var ( + metricPutCalls = clientmetric.NewCounter("peerapi_put") +) + +// canPutFile reports whether h can put a file ("Taildrop") to this node. +func canPutFile(h ipnlocal.PeerAPIHandler) bool { + if h.Peer().UnsignedPeerAPIOnly() { + // Unsigned peers can't send files. + return false + } + return h.IsSelfUntagged() || h.PeerCaps().HasCapability(tailcfg.PeerCapabilityFileSharingSend) +} + +func handlePeerPut(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + lb := h.LocalBackend() + handlePeerPutWithBackend(h, lb, w, r) +} + +// localBackend is the subset of ipnlocal.Backend that taildrop +// file put needs. This is pulled out for testability. +type localBackend interface { + TaildropManager() (*taildrop.Manager, error) + HasCapFileSharing() bool + Clock() tstime.Clock +} + +func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, lb localBackend, w http.ResponseWriter, r *http.Request) { + if r.Method == "PUT" { + metricPutCalls.Add(1) + } + + taildropMgr, err := lb.TaildropManager() + if err != nil { + h.Logf("taildropManager: %v", err) + http.Error(w, "failed to get taildrop manager", http.StatusInternalServerError) + return + } + + if !canPutFile(h) { + http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) + return + } + if !lb.HasCapFileSharing() { + http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) + return + } + rawPath := r.URL.EscapedPath() + prefix, ok := strings.CutPrefix(rawPath, "/v0/put/") + if !ok { + http.Error(w, "misconfigured internals", http.StatusForbidden) + return + } + baseName, err := url.PathUnescape(prefix) + if err != nil { + http.Error(w, taildrop.ErrInvalidFileName.Error(), http.StatusBadRequest) + return + } + enc := json.NewEncoder(w) + switch r.Method { + case "GET": + id := taildrop.ClientID(h.Peer().StableID()) + if prefix == "" { + // List all the partial files. + files, err := taildropMgr.PartialFiles(id) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if err := enc.Encode(files); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + h.Logf("json.Encoder.Encode error: %v", err) + return + } + } else { + // Stream all the block hashes for the specified file. + next, close, err := taildropMgr.HashPartialFile(id, baseName) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer close() + for { + switch cs, err := next(); { + case err == io.EOF: + return + case err != nil: + http.Error(w, err.Error(), http.StatusInternalServerError) + h.Logf("HashPartialFile.next error: %v", err) + return + default: + if err := enc.Encode(cs); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + h.Logf("json.Encoder.Encode error: %v", err) + return + } + } + } + } + case "PUT": + t0 := lb.Clock().Now() + id := taildrop.ClientID(h.Peer().StableID()) + + var offset int64 + if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { + ranges, ok := httphdr.ParseRange(rangeHdr) + if !ok || len(ranges) != 1 || ranges[0].Length != 0 { + http.Error(w, "invalid Range header", http.StatusBadRequest) + return + } + offset = ranges[0].Start + } + n, err := taildropMgr.PutFile(taildrop.ClientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength) + switch err { + case nil: + d := lb.Clock().Since(t0).Round(time.Second / 10) + h.Logf("got put of %s in %v from %v/%v", approxSize(n), d, h.RemoteAddr().Addr(), h.Peer().ComputedName) + io.WriteString(w, "{}\n") + case taildrop.ErrNoTaildrop: + http.Error(w, err.Error(), http.StatusForbidden) + case taildrop.ErrInvalidFileName: + http.Error(w, err.Error(), http.StatusBadRequest) + case taildrop.ErrFileExists: + http.Error(w, err.Error(), http.StatusConflict) + default: + http.Error(w, err.Error(), http.StatusInternalServerError) + } + default: + http.Error(w, "expected method GET or PUT", http.StatusMethodNotAllowed) + } +} + +func approxSize(n int64) string { + if n <= 1<<10 { + return "<=1KB" + } + if n <= 1<<20 { + return "<=1MB" + } + return fmt.Sprintf("~%dMB", n>>20) +} diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go new file mode 100644 index 000000000..46a61f547 --- /dev/null +++ b/feature/taildrop/peerapi_test.go @@ -0,0 +1,574 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "math/rand" + "net/http" + "net/http/httptest" + "net/netip" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" + "tailscale.com/taildrop" + "tailscale.com/tstest" + "tailscale.com/tstime" + "tailscale.com/types/logger" +) + +// peerAPIHandler serves the PeerAPI for a source specific client. +type peerAPIHandler struct { + remoteAddr netip.AddrPort + isSelf bool // whether peerNode is owned by same user as this node + selfNode tailcfg.NodeView // this node; always non-nil + peerNode tailcfg.NodeView // peerNode is who's making the request +} + +func (h *peerAPIHandler) IsSelfUntagged() bool { + return !h.selfNode.IsTagged() && !h.peerNode.IsTagged() && h.isSelf +} +func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } +func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } +func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr } +func (h *peerAPIHandler) LocalBackend() *ipnlocal.LocalBackend { panic("unexpected") } +func (h *peerAPIHandler) Logf(format string, a ...any) { + //h.logf(format, a...) +} + +func (h *peerAPIHandler) PeerCaps() tailcfg.PeerCapMap { + return nil +} + +type fakeLocalBackend struct { + logf logger.Logf + capFileSharing bool + clock tstime.Clock + taildrop *taildrop.Manager +} + +func (lb *fakeLocalBackend) Clock() tstime.Clock { return lb.clock } +func (lb *fakeLocalBackend) HasCapFileSharing() bool { + return lb.capFileSharing +} +func (lb *fakeLocalBackend) TaildropManager() (*taildrop.Manager, error) { + return lb.taildrop, nil +} + +type peerAPITestEnv struct { + taildrop *taildrop.Manager + ph *peerAPIHandler + rr *httptest.ResponseRecorder + logBuf tstest.MemLogger +} + +type check func(*testing.T, *peerAPITestEnv) + +func checks(vv ...check) []check { return vv } + +func httpStatus(wantStatus int) check { + return func(t *testing.T, e *peerAPITestEnv) { + if res := e.rr.Result(); res.StatusCode != wantStatus { + t.Errorf("HTTP response code = %v; want %v", res.Status, wantStatus) + } + } +} + +func bodyContains(sub string) check { + return func(t *testing.T, e *peerAPITestEnv) { + if body := e.rr.Body.String(); !strings.Contains(body, sub) { + t.Errorf("HTTP response body does not contain %q; got: %s", sub, body) + } + } +} + +func fileHasSize(name string, size int) check { + return func(t *testing.T, e *peerAPITestEnv) { + root := e.taildrop.Dir() + if root == "" { + t.Errorf("no rootdir; can't check whether %q has size %v", name, size) + return + } + path := filepath.Join(root, name) + if fi, err := os.Stat(path); err != nil { + t.Errorf("fileHasSize(%q, %v): %v", name, size, err) + } else if fi.Size() != int64(size) { + t.Errorf("file %q has size %v; want %v", name, fi.Size(), size) + } + } +} + +func fileHasContents(name string, want string) check { + return func(t *testing.T, e *peerAPITestEnv) { + root := e.taildrop.Dir() + if root == "" { + t.Errorf("no rootdir; can't check contents of %q", name) + return + } + path := filepath.Join(root, name) + got, err := os.ReadFile(path) + if err != nil { + t.Errorf("fileHasContents: %v", err) + return + } + if string(got) != want { + t.Errorf("file contents = %q; want %q", got, want) + } + } +} + +func hexAll(v string) string { + var sb strings.Builder + for i := range len(v) { + fmt.Fprintf(&sb, "%%%02x", v[i]) + } + return sb.String() +} + +func TestHandlePeerAPI(t *testing.T) { + tests := []struct { + name string + isSelf bool // the peer sending the request is owned by us + capSharing bool // self node has file sharing capability + debugCap bool // self node has debug capability + omitRoot bool // don't configure + reqs []*http.Request + checks []check + }{ + { + name: "reject_non_owner_put", + isSelf: false, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, + checks: checks( + httpStatus(http.StatusForbidden), + bodyContains("Taildrop disabled"), + ), + }, + { + name: "owner_without_cap", + isSelf: true, + capSharing: false, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, + checks: checks( + httpStatus(http.StatusForbidden), + bodyContains("Taildrop disabled"), + ), + }, + { + name: "owner_with_cap_no_rootdir", + omitRoot: true, + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, + checks: checks( + httpStatus(http.StatusForbidden), + bodyContains("Taildrop disabled; no storage directory"), + ), + }, + { + name: "bad_method", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("POST", "/v0/put/foo", nil)}, + checks: checks( + httpStatus(405), + bodyContains("expected method GET or PUT"), + ), + }, + { + name: "put_zero_length", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, + checks: checks( + httpStatus(200), + bodyContains("{}"), + fileHasSize("foo", 0), + fileHasContents("foo", ""), + ), + }, + { + name: "put_non_zero_length_content_length", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("contents"))}, + checks: checks( + httpStatus(200), + bodyContains("{}"), + fileHasSize("foo", len("contents")), + fileHasContents("foo", "contents"), + ), + }, + { + name: "put_non_zero_length_chunked", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", struct{ io.Reader }{strings.NewReader("contents")})}, + checks: checks( + httpStatus(200), + bodyContains("{}"), + fileHasSize("foo", len("contents")), + fileHasContents("foo", "contents"), + ), + }, + { + name: "bad_filename_partial", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo.partial", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_deleted", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo.deleted", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_dot", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/.", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_empty", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_slash", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo/bar", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_encoded_dot", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("."), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_encoded_slash", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("/"), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_encoded_backslash", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("\\"), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_encoded_dotdot", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll(".."), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "bad_filename_encoded_dotdot_out", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("foo/../../../../../etc/passwd"), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "put_spaces_and_caps", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("Foo Bar.dat"), strings.NewReader("baz"))}, + checks: checks( + httpStatus(200), + bodyContains("{}"), + fileHasContents("Foo Bar.dat", "baz"), + ), + }, + { + name: "put_unicode", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("Томас и его друзья.mp3"), strings.NewReader("главный озорник"))}, + checks: checks( + httpStatus(200), + bodyContains("{}"), + fileHasContents("Томас и его друзья.mp3", "главный озорник"), + ), + }, + { + name: "put_invalid_utf8", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+(hexAll("😜")[:3]), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "put_invalid_null", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/%00", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "put_invalid_non_printable", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/%01", nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "put_invalid_colon", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("nul:"), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "put_invalid_surrounding_whitespace", + isSelf: true, + capSharing: true, + reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll(" foo "), nil)}, + checks: checks( + httpStatus(400), + bodyContains("invalid filename"), + ), + }, + { + name: "duplicate_zero_length", + isSelf: true, + capSharing: true, + reqs: []*http.Request{ + httptest.NewRequest("PUT", "/v0/put/foo", nil), + httptest.NewRequest("PUT", "/v0/put/foo", nil), + }, + checks: checks( + httpStatus(200), + func(t *testing.T, env *peerAPITestEnv) { + got, err := env.taildrop.WaitingFiles() + if err != nil { + t.Fatalf("WaitingFiles error: %v", err) + } + want := []apitype.WaitingFile{{Name: "foo", Size: 0}} + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("WaitingFile mismatch (-got +want):\n%s", diff) + } + }, + ), + }, + { + name: "duplicate_non_zero_length_content_length", + isSelf: true, + capSharing: true, + reqs: []*http.Request{ + httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("contents")), + httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("contents")), + }, + checks: checks( + httpStatus(200), + func(t *testing.T, env *peerAPITestEnv) { + got, err := env.taildrop.WaitingFiles() + if err != nil { + t.Fatalf("WaitingFiles error: %v", err) + } + want := []apitype.WaitingFile{{Name: "foo", Size: 8}} + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("WaitingFile mismatch (-got +want):\n%s", diff) + } + }, + ), + }, + { + name: "duplicate_different_files", + isSelf: true, + capSharing: true, + reqs: []*http.Request{ + httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("fizz")), + httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("buzz")), + }, + checks: checks( + httpStatus(200), + func(t *testing.T, env *peerAPITestEnv) { + got, err := env.taildrop.WaitingFiles() + if err != nil { + t.Fatalf("WaitingFiles error: %v", err) + } + want := []apitype.WaitingFile{{Name: "foo", Size: 4}, {Name: "foo (1)", Size: 4}} + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("WaitingFile mismatch (-got +want):\n%s", diff) + } + }, + ), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + selfNode := &tailcfg.Node{ + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.100.100.101/32"), + }, + } + if tt.debugCap { + selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} + } + var rootDir string + var e peerAPITestEnv + if !tt.omitRoot { + rootDir = t.TempDir() + e.taildrop = taildrop.ManagerOptions{ + Logf: e.logBuf.Logf, + Dir: rootDir, + }.New() + } + + lb := &fakeLocalBackend{ + logf: e.logBuf.Logf, + capFileSharing: tt.capSharing, + clock: &tstest.Clock{}, + taildrop: e.taildrop, + } + e.ph = &peerAPIHandler{ + isSelf: tt.isSelf, + selfNode: selfNode.View(), + peerNode: (&tailcfg.Node{ + ComputedName: "some-peer-name", + }).View(), + } + for _, req := range tt.reqs { + e.rr = httptest.NewRecorder() + if req.Host == "example.com" { + req.Host = "100.100.100.101:12345" + } + handlePeerPutWithBackend(e.ph, lb, e.rr, req) + } + for _, f := range tt.checks { + f(t, &e) + } + if t.Failed() && rootDir != "" { + t.Logf("Contents of %s:", rootDir) + des, _ := fs.ReadDir(os.DirFS(rootDir), ".") + for _, de := range des { + fi, err := de.Info() + if err != nil { + t.Log(err) + } else { + t.Logf(" %v %5d %s", fi.Mode(), fi.Size(), de.Name()) + } + } + } + }) + } +} + +// Windows likes to hold on to file descriptors for some indeterminate +// amount of time after you close them and not let you delete them for +// a bit. So test that we work around that sufficiently. +func TestFileDeleteRace(t *testing.T) { + dir := t.TempDir() + taildropMgr := taildrop.ManagerOptions{ + Logf: t.Logf, + Dir: dir, + }.New() + + ph := &peerAPIHandler{ + isSelf: true, + peerNode: (&tailcfg.Node{ + ComputedName: "some-peer-name", + }).View(), + selfNode: (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("100.100.100.101/32")}, + }).View(), + } + fakeLB := &fakeLocalBackend{ + logf: t.Logf, + capFileSharing: true, + clock: &tstest.Clock{}, + taildrop: taildropMgr, + } + buf := make([]byte, 2<<20) + for range 30 { + rr := httptest.NewRecorder() + handlePeerPutWithBackend(ph, fakeLB, rr, httptest.NewRequest("PUT", "http://100.100.100.101:123/v0/put/foo.txt", bytes.NewReader(buf[:rand.Intn(len(buf))]))) + if res := rr.Result(); res.StatusCode != 200 { + t.Fatal(res.Status) + } + wfs, err := taildropMgr.WaitingFiles() + if err != nil { + t.Fatal(err) + } + if len(wfs) != 1 { + t.Fatalf("waiting files = %d; want 1", len(wfs)) + } + + if err := taildropMgr.DeleteFile("foo.txt"); err != nil { + t.Fatal(err) + } + wfs, err = taildropMgr.WaitingFiles() + if err != nil { + t.Fatal(err) + } + if len(wfs) != 0 { + t.Fatalf("waiting files = %d; want 0", len(wfs)) + } + } +} diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 2a8a6a085..79f741e55 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -67,6 +67,7 @@ import ( // and to further reduce the risk of accessing unexported methods or fields of [LocalBackend], the host interacts // with it via the [Backend] interface. type ExtensionHost struct { + b Backend logf logger.Logf // prefixed with "ipnext:" // allExtensions holds the extensions in the order they were registered, @@ -139,6 +140,7 @@ type Backend interface { // Overriding extensions is primarily used for testing. func NewExtensionHost(logf logger.Logf, sys *tsd.System, b Backend, overrideExts ...*ipnext.Definition) (_ *ExtensionHost, err error) { host := &ExtensionHost{ + b: b, logf: logger.WithPrefix(logf, "ipnext: "), workQueue: &execqueue.ExecQueue{}, // The host starts with an empty profile and default prefs. @@ -332,6 +334,14 @@ func (h *ExtensionHost) SwitchToBestProfileAsync(reason string) { }) } +// Backend returns the [Backend] used by the extension host. +func (h *ExtensionHost) Backend() Backend { + if h == nil { + return nil + } + return h.b +} + // RegisterProfileStateChangeCallback implements [ipnext.ProfileServices]. func (h *ExtensionHost) RegisterProfileStateChangeCallback(cb ipnext.ProfileStateChangeCallback) (unregister func()) { if h == nil { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 45daefda8..ef5ec267f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -30,7 +30,6 @@ import ( "reflect" "runtime" "slices" - "sort" "strconv" "strings" "sync" @@ -81,7 +80,6 @@ import ( "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/tka" "tailscale.com/tsd" "tailscale.com/tstime" @@ -590,6 +588,8 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo return b, nil } +func (b *LocalBackend) Clock() tstime.Clock { return b.clock } + // FindExtensionByName returns an active extension with the given name, // or nil if no such extension exists. func (b *LocalBackend) FindExtensionByName(name string) any { @@ -1075,9 +1075,6 @@ func (b *LocalBackend) Shutdown() { defer cancel() b.sockstatLogger.Shutdown(ctx) } - if b.peerAPIServer != nil { - b.peerAPIServer.taildrop.Shutdown() - } b.stopOfflineAutoUpdate() b.unregisterNetMon() @@ -1291,7 +1288,9 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { SSH_HostKeys: p.Hostinfo().SSH_HostKeys().AsSlice(), Location: p.Hostinfo().Location().AsStruct(), Capabilities: p.Capabilities().AsSlice(), - TaildropTarget: b.taildropTargetStatus(p), + } + if f := hookSetPeerStatusTaildropTargetLocked; f != nil { + f(b, ps, p) } if cm := p.CapMap(); cm.Len() > 0 { ps.CapMap = make(tailcfg.NodeCapMap, cm.Len()) @@ -3248,6 +3247,17 @@ func (b *LocalBackend) sendTo(n ipn.Notify, recipient notificationTarget) { b.sendToLocked(n, recipient) } +var ( + // hookSetNotifyFilesWaitingLocked, if non-nil, is called in sendToLocked to + // populate ipn.Notify.FilesWaiting when taildrop is linked in to the binary + // and enabled on a LocalBackend. + hookSetNotifyFilesWaitingLocked func(*LocalBackend, *ipn.Notify) + + // hookSetPeerStatusTaildropTargetLocked, if non-nil, is called to populate PeerStatus + // if taildrop is linked in to the binary and enabled on the LocalBackend. + hookSetPeerStatusTaildropTargetLocked func(*LocalBackend, *ipnstate.PeerStatus, tailcfg.NodeView) +) + // sendToLocked is like [LocalBackend.sendTo], but assumes b.mu is already held. func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) { if n.Prefs != nil { @@ -3257,9 +3267,8 @@ func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) n.Version = version.Long() } - apiSrv := b.peerAPIServer - if mayDeref(apiSrv).taildrop.HasFilesWaiting() { - n.FilesWaiting = &empty.Message{} + if f := hookSetNotifyFilesWaitingLocked; f != nil { + f(b, &n) } for _, sess := range b.notifyWatchers { @@ -3273,32 +3282,6 @@ func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) } } -func (b *LocalBackend) sendFileNotify() { - var n ipn.Notify - - b.mu.Lock() - for _, wakeWaiter := range b.fileWaiters { - wakeWaiter() - } - apiSrv := b.peerAPIServer - if apiSrv == nil { - b.mu.Unlock() - return - } - - // Make sure we always set n.IncomingFiles non-nil so it gets encoded - // in JSON to clients. They distinguish between empty and non-nil - // to know whether a Notify should be able about files. - n.IncomingFiles = apiSrv.taildrop.IncomingFiles() - b.mu.Unlock() - - sort.Slice(n.IncomingFiles, func(i, j int) bool { - return n.IncomingFiles[i].Started.Before(n.IncomingFiles[j].Started) - }) - - b.send(n) -} - // setAuthURL sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. // This method is called when a new authURL is received from the control plane, meaning that either a user // has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI), @@ -5289,21 +5272,9 @@ func (b *LocalBackend) initPeerAPIListener() { return } - fileRoot := b.fileRootLocked(selfNode.User()) - if fileRoot == "" { - b.logf("peerapi starting without Taildrop directory configured") - } - ps := &peerAPIServer{ - b: b, - taildrop: taildrop.ManagerOptions{ - Logf: b.logf, - Clock: tstime.DefaultClock{Clock: b.clock}, - State: b.store, - Dir: fileRoot, - DirectFileMode: b.directFileRoot != "", - SendFileNotify: b.sendFileNotify, - }.New(), + b: b, + taildrop: b.newTaildropManager(b.fileRootLocked(selfNode.User())), } if dm, ok := b.sys.DNSManager.GetOK(); ok { ps.resolver = dm.Resolver() @@ -6598,172 +6569,6 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK return mk, nk } -func (b *LocalBackend) removeFileWaiter(handle set.Handle) { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.fileWaiters, handle) -} - -func (b *LocalBackend) addFileWaiter(wakeWaiter context.CancelFunc) set.Handle { - b.mu.Lock() - defer b.mu.Unlock() - return b.fileWaiters.Add(wakeWaiter) -} - -func (b *LocalBackend) WaitingFiles() ([]apitype.WaitingFile, error) { - b.mu.Lock() - apiSrv := b.peerAPIServer - b.mu.Unlock() - return mayDeref(apiSrv).taildrop.WaitingFiles() -} - -// AwaitWaitingFiles is like WaitingFiles but blocks while ctx is not done, -// waiting for any files to be available. -// -// On return, exactly one of the results will be non-empty or non-nil, -// respectively. -func (b *LocalBackend) AwaitWaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) { - if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { - return ff, err - } - - for { - gotFile, gotFileCancel := context.WithCancel(context.Background()) - defer gotFileCancel() - - handle := b.addFileWaiter(gotFileCancel) - defer b.removeFileWaiter(handle) - - // Now that we've registered ourselves, check again, in case - // of race. Otherwise there's a small window where we could - // miss a file arrival and wait forever. - if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { - return ff, err - } - - select { - case <-gotFile.Done(): - if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { - return ff, err - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} - -func (b *LocalBackend) DeleteFile(name string) error { - b.mu.Lock() - apiSrv := b.peerAPIServer - b.mu.Unlock() - return mayDeref(apiSrv).taildrop.DeleteFile(name) -} - -func (b *LocalBackend) OpenFile(name string) (rc io.ReadCloser, size int64, err error) { - b.mu.Lock() - apiSrv := b.peerAPIServer - b.mu.Unlock() - return mayDeref(apiSrv).taildrop.OpenFile(name) -} - -// hasCapFileSharing reports whether the current node has the file -// sharing capability enabled. -func (b *LocalBackend) hasCapFileSharing() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.capFileSharing -} - -// FileTargets lists nodes that the current node can send files to. -func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { - var ret []*apitype.FileTarget - - b.mu.Lock() - defer b.mu.Unlock() - nm := b.netMap - if b.state != ipn.Running || nm == nil { - return nil, errors.New("not connected to the tailnet") - } - if !b.capFileSharing { - return nil, errors.New("file sharing not enabled by Tailscale admin") - } - for _, p := range b.peers { - if !b.peerIsTaildropTargetLocked(p) { - continue - } - if p.Hostinfo().OS() == "tvOS" { - continue - } - peerAPI := peerAPIBase(b.netMap, p) - if peerAPI == "" { - continue - } - ret = append(ret, &apitype.FileTarget{ - Node: p.AsStruct(), - PeerAPIURL: peerAPI, - }) - } - slices.SortFunc(ret, func(a, b *apitype.FileTarget) int { - return cmp.Compare(a.Node.Name, b.Node.Name) - }) - return ret, nil -} - -func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.TaildropTargetStatus { - if b.state != ipn.Running { - return ipnstate.TaildropTargetIpnStateNotRunning - } - if b.netMap == nil { - return ipnstate.TaildropTargetNoNetmapAvailable - } - if !b.capFileSharing { - return ipnstate.TaildropTargetMissingCap - } - - if !p.Online().Get() { - return ipnstate.TaildropTargetOffline - } - - if !p.Valid() { - return ipnstate.TaildropTargetNoPeerInfo - } - if b.netMap.User() != p.User() { - // Different user must have the explicit file sharing target capability - if p.Addresses().Len() == 0 || - !b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { - return ipnstate.TaildropTargetOwnedByOtherUser - } - } - - if p.Hostinfo().OS() == "tvOS" { - return ipnstate.TaildropTargetUnsupportedOS - } - if peerAPIBase(b.netMap, p) == "" { - return ipnstate.TaildropTargetNoPeerAPI - } - return ipnstate.TaildropTargetAvailable -} - -// peerIsTaildropTargetLocked reports whether p is a valid Taildrop file -// recipient from this node according to its ownership and the capabilities in -// the netmap. -// -// b.mu must be locked. -func (b *LocalBackend) peerIsTaildropTargetLocked(p tailcfg.NodeView) bool { - if b.netMap == nil || !p.Valid() { - return false - } - if b.netMap.User() == p.User() { - return true - } - if p.Addresses().Len() > 0 && - b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { - // Explicitly noted in the netmap ACL caps as a target. - return true - } - return false -} - func (b *LocalBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { return b.peerCapsLocked(addr).HasCapability(wantCap) } @@ -7834,14 +7639,6 @@ func allowedAutoRoute(ipp netip.Prefix) bool { return true } -// mayDeref dereferences p if non-nil, otherwise it returns the zero value. -func mayDeref[T any](p *T) (v T) { - if p == nil { - return v - } - return *p -} - var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // suggestExitNodeLocked computes a suggestion based on the current netmap and last netcheck report. If diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3b384fd96..3b9e08638 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -575,54 +575,6 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } } -func TestFileTargets(t *testing.T) { - b := new(LocalBackend) - _, err := b.FileTargets() - if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { - t.Errorf("before connect: got %q; want %q", got, want) - } - - b.netMap = new(netmap.NetworkMap) - _, err = b.FileTargets() - if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { - t.Errorf("non-running netmap: got %q; want %q", got, want) - } - - b.state = ipn.Running - _, err = b.FileTargets() - if got, want := fmt.Sprint(err), "file sharing not enabled by Tailscale admin"; got != want { - t.Errorf("without cap: got %q; want %q", got, want) - } - - b.capFileSharing = true - got, err := b.FileTargets() - if err != nil { - t.Fatal(err) - } - if len(got) != 0 { - t.Fatalf("unexpected %d peers", len(got)) - } - - var peerMap map[tailcfg.NodeID]tailcfg.NodeView - mak.NonNil(&peerMap) - var nodeID tailcfg.NodeID - nodeID = 1234 - peer := &tailcfg.Node{ - ID: 1234, - Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), - } - peerMap[nodeID] = peer.View() - b.peers = peerMap - got, err = b.FileTargets() - if err != nil { - t.Fatal(err) - } - if len(got) != 0 { - t.Fatalf("unexpected %d peers", len(got)) - } - // (other cases handled by TestPeerAPIBase above) -} - func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 888b876d6..87437daf8 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -15,7 +15,6 @@ import ( "net" "net/http" "net/netip" - "net/url" "os" "path/filepath" "runtime" @@ -37,10 +36,8 @@ import ( "tailscale.com/net/netutil" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/types/views" "tailscale.com/util/clientmetric" - "tailscale.com/util/httphdr" "tailscale.com/util/httpm" "tailscale.com/wgengine/filter" ) @@ -64,7 +61,7 @@ type peerAPIServer struct { b *LocalBackend resolver peerDNSQueryHandler - taildrop *taildrop.Manager + taildrop *taildrop_Manager } func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Listener, err error) { @@ -232,6 +229,8 @@ type PeerAPIHandler interface { Self() tailcfg.NodeView LocalBackend() *LocalBackend IsSelfUntagged() bool // whether the peer is untagged and the same as this user + RemoteAddr() netip.AddrPort + Logf(format string, a ...any) } func (h *peerAPIHandler) IsSelfUntagged() bool { @@ -239,7 +238,11 @@ func (h *peerAPIHandler) IsSelfUntagged() bool { } func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } +func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr } func (h *peerAPIHandler) LocalBackend() *LocalBackend { return h.ps.b } +func (h *peerAPIHandler) Logf(format string, a ...any) { + h.logf(format, a...) +} func (h *peerAPIHandler) logf(format string, a ...any) { h.ps.b.logf("peerapi: "+format, a...) @@ -327,9 +330,18 @@ func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWri panic(fmt.Sprintf("duplicate PeerAPI handler %q", path)) } peerAPIHandlers[path] = f + if strings.HasSuffix(path, "/") { + peerAPIHandlerPrefixes[path] = f + } } -var peerAPIHandlers = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){} // by URL.Path +var ( + peerAPIHandlers = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){} // by URL.Path + + // peerAPIHandlerPrefixes are the subset of peerAPIHandlers where + // the map key ends with a slash, indicating a prefix match. + peerAPIHandlerPrefixes = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){} +) func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := h.validatePeerAPIRequest(r); err != nil { @@ -343,12 +355,11 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Frame-Options", "DENY") w.Header().Set("X-Content-Type-Options", "nosniff") } - if strings.HasPrefix(r.URL.Path, "/v0/put/") { - if r.Method == "PUT" { - metricPutCalls.Add(1) + for pfx, ph := range peerAPIHandlerPrefixes { + if strings.HasPrefix(r.URL.Path, pfx) { + ph(h, w, r) + return } - h.handlePeerPut(w, r) - return } if strings.HasPrefix(r.URL.Path, "/dns-query") { metricDNSCalls.Add(1) @@ -393,6 +404,10 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ph(h, w, r) return } + if r.URL.Path != "/" { + http.Error(w, "unsupported peerapi path", http.StatusNotFound) + return + } who := h.peerUser.DisplayName fmt.Fprintf(w, ` @@ -630,15 +645,6 @@ func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Req fmt.Fprintln(w, "") } -// canPutFile reports whether h can put a file ("Taildrop") to this node. -func (h *peerAPIHandler) canPutFile() bool { - if h.peerNode.UnsignedPeerAPIOnly() { - // Unsigned peers can't send files. - return false - } - return h.isSelf || h.peerHasCap(tailcfg.PeerCapabilityFileSharingSend) -} - // canDebug reports whether h can debug this node (goroutines, metrics, // magicsock internal state, etc). func (h *peerAPIHandler) canDebug() bool { @@ -668,110 +674,6 @@ func (h *peerAPIHandler) PeerCaps() tailcfg.PeerCapMap { return h.ps.b.PeerCaps(h.remoteAddr.Addr()) } -func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) { - if !h.canPutFile() { - http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) - return - } - if !h.ps.b.hasCapFileSharing() { - http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) - return - } - rawPath := r.URL.EscapedPath() - prefix, ok := strings.CutPrefix(rawPath, "/v0/put/") - if !ok { - http.Error(w, "misconfigured internals", http.StatusForbidden) - return - } - baseName, err := url.PathUnescape(prefix) - if err != nil { - http.Error(w, taildrop.ErrInvalidFileName.Error(), http.StatusBadRequest) - return - } - enc := json.NewEncoder(w) - switch r.Method { - case "GET": - id := taildrop.ClientID(h.peerNode.StableID()) - if prefix == "" { - // List all the partial files. - files, err := h.ps.taildrop.PartialFiles(id) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if err := enc.Encode(files); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - h.logf("json.Encoder.Encode error: %v", err) - return - } - } else { - // Stream all the block hashes for the specified file. - next, close, err := h.ps.taildrop.HashPartialFile(id, baseName) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - defer close() - for { - switch cs, err := next(); { - case err == io.EOF: - return - case err != nil: - http.Error(w, err.Error(), http.StatusInternalServerError) - h.logf("HashPartialFile.next error: %v", err) - return - default: - if err := enc.Encode(cs); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - h.logf("json.Encoder.Encode error: %v", err) - return - } - } - } - } - case "PUT": - t0 := h.ps.b.clock.Now() - id := taildrop.ClientID(h.peerNode.StableID()) - - var offset int64 - if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { - ranges, ok := httphdr.ParseRange(rangeHdr) - if !ok || len(ranges) != 1 || ranges[0].Length != 0 { - http.Error(w, "invalid Range header", http.StatusBadRequest) - return - } - offset = ranges[0].Start - } - n, err := h.ps.taildrop.PutFile(taildrop.ClientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength) - switch err { - case nil: - d := h.ps.b.clock.Since(t0).Round(time.Second / 10) - h.logf("got put of %s in %v from %v/%v", approxSize(n), d, h.remoteAddr.Addr(), h.peerNode.ComputedName) - io.WriteString(w, "{}\n") - case taildrop.ErrNoTaildrop: - http.Error(w, err.Error(), http.StatusForbidden) - case taildrop.ErrInvalidFileName: - http.Error(w, err.Error(), http.StatusBadRequest) - case taildrop.ErrFileExists: - http.Error(w, err.Error(), http.StatusConflict) - default: - http.Error(w, err.Error(), http.StatusInternalServerError) - } - default: - http.Error(w, "expected method GET or PUT", http.StatusMethodNotAllowed) - } -} - -func approxSize(n int64) string { - if n <= 1<<10 { - return "<=1KB" - } - if n <= 1<<20 { - return "<=1MB" - } - return fmt.Sprintf("~%dMB", n>>20) -} - func (h *peerAPIHandler) handleServeGoroutines(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -1244,7 +1146,6 @@ var ( metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests") // Non-debug PeerAPI endpoints. - metricPutCalls = clientmetric.NewCounter("peerapi_put") metricDNSCalls = clientmetric.NewCounter("peerapi_dns") metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") ) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 7a3f05a9c..77c442060 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -4,33 +4,23 @@ package ipnlocal import ( - "bytes" "context" "encoding/json" - "fmt" - "io" - "io/fs" - "math/rand" "net/http" "net/http/httptest" "net/netip" - "os" - "path/filepath" "slices" "strings" "testing" - "github.com/google/go-cmp/cmp" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/appc/appctest" - "tailscale.com/client/tailscale/apitype" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/types/netmap" @@ -75,56 +65,12 @@ func bodyNotContains(sub string) check { } } -func fileHasSize(name string, size int) check { - return func(t *testing.T, e *peerAPITestEnv) { - root := e.ph.ps.taildrop.Dir() - if root == "" { - t.Errorf("no rootdir; can't check whether %q has size %v", name, size) - return - } - path := filepath.Join(root, name) - if fi, err := os.Stat(path); err != nil { - t.Errorf("fileHasSize(%q, %v): %v", name, size, err) - } else if fi.Size() != int64(size) { - t.Errorf("file %q has size %v; want %v", name, fi.Size(), size) - } - } -} - -func fileHasContents(name string, want string) check { - return func(t *testing.T, e *peerAPITestEnv) { - root := e.ph.ps.taildrop.Dir() - if root == "" { - t.Errorf("no rootdir; can't check contents of %q", name) - return - } - path := filepath.Join(root, name) - got, err := os.ReadFile(path) - if err != nil { - t.Errorf("fileHasContents: %v", err) - return - } - if string(got) != want { - t.Errorf("file contents = %q; want %q", got, want) - } - } -} - -func hexAll(v string) string { - var sb strings.Builder - for i := range len(v) { - fmt.Fprintf(&sb, "%%%02x", v[i]) - } - return sb.String() -} - func TestHandlePeerAPI(t *testing.T) { tests := []struct { name string isSelf bool // the peer sending the request is owned by us capSharing bool // self node has file sharing capability debugCap bool // self node has debug capability - omitRoot bool // don't configure reqs []*http.Request checks []check }{ @@ -174,255 +120,6 @@ func TestHandlePeerAPI(t *testing.T) { bodyContains("ServeHTTP"), ), }, - { - name: "reject_non_owner_put", - isSelf: false, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, - checks: checks( - httpStatus(http.StatusForbidden), - bodyContains("Taildrop disabled"), - ), - }, - { - name: "owner_without_cap", - isSelf: true, - capSharing: false, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, - checks: checks( - httpStatus(http.StatusForbidden), - bodyContains("Taildrop disabled"), - ), - }, - { - name: "owner_with_cap_no_rootdir", - omitRoot: true, - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, - checks: checks( - httpStatus(http.StatusForbidden), - bodyContains("Taildrop disabled; no storage directory"), - ), - }, - { - name: "bad_method", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("POST", "/v0/put/foo", nil)}, - checks: checks( - httpStatus(405), - bodyContains("expected method GET or PUT"), - ), - }, - { - name: "put_zero_length", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, - checks: checks( - httpStatus(200), - bodyContains("{}"), - fileHasSize("foo", 0), - fileHasContents("foo", ""), - ), - }, - { - name: "put_non_zero_length_content_length", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("contents"))}, - checks: checks( - httpStatus(200), - bodyContains("{}"), - fileHasSize("foo", len("contents")), - fileHasContents("foo", "contents"), - ), - }, - { - name: "put_non_zero_length_chunked", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", struct{ io.Reader }{strings.NewReader("contents")})}, - checks: checks( - httpStatus(200), - bodyContains("{}"), - fileHasSize("foo", len("contents")), - fileHasContents("foo", "contents"), - ), - }, - { - name: "bad_filename_partial", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo.partial", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_deleted", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo.deleted", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_dot", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/.", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_empty", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_slash", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo/bar", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_encoded_dot", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("."), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_encoded_slash", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("/"), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_encoded_backslash", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("\\"), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_encoded_dotdot", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll(".."), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "bad_filename_encoded_dotdot_out", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("foo/../../../../../etc/passwd"), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "put_spaces_and_caps", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("Foo Bar.dat"), strings.NewReader("baz"))}, - checks: checks( - httpStatus(200), - bodyContains("{}"), - fileHasContents("Foo Bar.dat", "baz"), - ), - }, - { - name: "put_unicode", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("Томас и его друзья.mp3"), strings.NewReader("главный озорник"))}, - checks: checks( - httpStatus(200), - bodyContains("{}"), - fileHasContents("Томас и его друзья.mp3", "главный озорник"), - ), - }, - { - name: "put_invalid_utf8", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+(hexAll("😜")[:3]), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "put_invalid_null", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/%00", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "put_invalid_non_printable", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/%01", nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "put_invalid_colon", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll("nul:"), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, - { - name: "put_invalid_surrounding_whitespace", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/"+hexAll(" foo "), nil)}, - checks: checks( - httpStatus(400), - bodyContains("invalid filename"), - ), - }, { name: "host-val/bad-ip", isSelf: true, @@ -450,72 +147,6 @@ func TestHandlePeerAPI(t *testing.T) { httpStatus(200), ), }, - { - name: "duplicate_zero_length", - isSelf: true, - capSharing: true, - reqs: []*http.Request{ - httptest.NewRequest("PUT", "/v0/put/foo", nil), - httptest.NewRequest("PUT", "/v0/put/foo", nil), - }, - checks: checks( - httpStatus(200), - func(t *testing.T, env *peerAPITestEnv) { - got, err := env.ph.ps.taildrop.WaitingFiles() - if err != nil { - t.Fatalf("WaitingFiles error: %v", err) - } - want := []apitype.WaitingFile{{Name: "foo", Size: 0}} - if diff := cmp.Diff(got, want); diff != "" { - t.Fatalf("WaitingFile mismatch (-got +want):\n%s", diff) - } - }, - ), - }, - { - name: "duplicate_non_zero_length_content_length", - isSelf: true, - capSharing: true, - reqs: []*http.Request{ - httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("contents")), - httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("contents")), - }, - checks: checks( - httpStatus(200), - func(t *testing.T, env *peerAPITestEnv) { - got, err := env.ph.ps.taildrop.WaitingFiles() - if err != nil { - t.Fatalf("WaitingFiles error: %v", err) - } - want := []apitype.WaitingFile{{Name: "foo", Size: 8}} - if diff := cmp.Diff(got, want); diff != "" { - t.Fatalf("WaitingFile mismatch (-got +want):\n%s", diff) - } - }, - ), - }, - { - name: "duplicate_different_files", - isSelf: true, - capSharing: true, - reqs: []*http.Request{ - httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("fizz")), - httptest.NewRequest("PUT", "/v0/put/foo", strings.NewReader("buzz")), - }, - checks: checks( - httpStatus(200), - func(t *testing.T, env *peerAPITestEnv) { - got, err := env.ph.ps.taildrop.WaitingFiles() - if err != nil { - t.Fatalf("WaitingFiles error: %v", err) - } - want := []apitype.WaitingFile{{Name: "foo", Size: 4}, {Name: "foo (1)", Size: 4}} - if diff := cmp.Diff(got, want); diff != "" { - t.Fatalf("WaitingFile mismatch (-got +want):\n%s", diff) - } - }, - ), - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -544,16 +175,6 @@ func TestHandlePeerAPI(t *testing.T) { b: lb, }, } - var rootDir string - if !tt.omitRoot { - rootDir = t.TempDir() - if e.ph.ps.taildrop == nil { - e.ph.ps.taildrop = taildrop.ManagerOptions{ - Logf: e.logBuf.Logf, - Dir: rootDir, - }.New() - } - } for _, req := range tt.reqs { e.rr = httptest.NewRecorder() if req.Host == "example.com" { @@ -564,76 +185,10 @@ func TestHandlePeerAPI(t *testing.T) { for _, f := range tt.checks { f(t, &e) } - if t.Failed() && rootDir != "" { - t.Logf("Contents of %s:", rootDir) - des, _ := fs.ReadDir(os.DirFS(rootDir), ".") - for _, de := range des { - fi, err := de.Info() - if err != nil { - t.Log(err) - } else { - t.Logf(" %v %5d %s", fi.Mode(), fi.Size(), de.Name()) - } - } - } }) } } -// Windows likes to hold on to file descriptors for some indeterminate -// amount of time after you close them and not let you delete them for -// a bit. So test that we work around that sufficiently. -func TestFileDeleteRace(t *testing.T) { - dir := t.TempDir() - ps := &peerAPIServer{ - b: &LocalBackend{ - logf: t.Logf, - capFileSharing: true, - clock: &tstest.Clock{}, - }, - taildrop: taildrop.ManagerOptions{ - Logf: t.Logf, - Dir: dir, - }.New(), - } - ph := &peerAPIHandler{ - isSelf: true, - peerNode: (&tailcfg.Node{ - ComputedName: "some-peer-name", - }).View(), - selfNode: (&tailcfg.Node{ - Addresses: []netip.Prefix{netip.MustParsePrefix("100.100.100.101/32")}, - }).View(), - ps: ps, - } - buf := make([]byte, 2<<20) - for range 30 { - rr := httptest.NewRecorder() - ph.ServeHTTP(rr, httptest.NewRequest("PUT", "http://100.100.100.101:123/v0/put/foo.txt", bytes.NewReader(buf[:rand.Intn(len(buf))]))) - if res := rr.Result(); res.StatusCode != 200 { - t.Fatal(res.Status) - } - wfs, err := ps.taildrop.WaitingFiles() - if err != nil { - t.Fatal(err) - } - if len(wfs) != 1 { - t.Fatalf("waiting files = %d; want 1", len(wfs)) - } - - if err := ps.taildrop.DeleteFile("foo.txt"); err != nil { - t.Fatal(err) - } - wfs, err = ps.taildrop.WaitingFiles() - if err != nil { - t.Fatal(err) - } - if len(wfs) != 0 { - t.Fatalf("waiting files = %d; want 0", len(wfs)) - } - } -} - func TestPeerAPIReplyToDNSQueries(t *testing.T) { var h peerAPIHandler diff --git a/ipn/ipnlocal/taildrop.go b/ipn/ipnlocal/taildrop.go index db7d8e12a..807304f30 100644 --- a/ipn/ipnlocal/taildrop.go +++ b/ipn/ipnlocal/taildrop.go @@ -1,16 +1,270 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_taildrop + package ipnlocal import ( + "cmp" + "context" + "errors" + "io" "maps" "slices" "strings" + "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/taildrop" + "tailscale.com/tstime" + "tailscale.com/types/empty" + "tailscale.com/util/set" ) +func init() { + hookSetNotifyFilesWaitingLocked = (*LocalBackend).setNotifyFilesWaitingLocked + hookSetPeerStatusTaildropTargetLocked = (*LocalBackend).setPeerStatusTaildropTargetLocked +} + +type taildrop_Manager = taildrop.Manager + +func (b *LocalBackend) newTaildropManager(fileRoot string) *taildrop.Manager { + // TODO(bradfitz): move all this to an ipnext so ipnlocal doesn't need to depend + // on taildrop at all. + if fileRoot == "" { + b.logf("no Taildrop directory configured") + } + return taildrop.ManagerOptions{ + Logf: b.logf, + Clock: tstime.DefaultClock{Clock: b.clock}, + State: b.store, + Dir: fileRoot, + DirectFileMode: b.directFileRoot != "", + SendFileNotify: b.sendFileNotify, + }.New() +} + +func (b *LocalBackend) sendFileNotify() { + var n ipn.Notify + + b.mu.Lock() + for _, wakeWaiter := range b.fileWaiters { + wakeWaiter() + } + apiSrv := b.peerAPIServer + if apiSrv == nil { + b.mu.Unlock() + return + } + + n.IncomingFiles = apiSrv.taildrop.IncomingFiles() + b.mu.Unlock() + + b.send(n) +} + +// TaildropManager returns the taildrop manager for this backend. +// +// TODO(bradfitz): as of 2025-04-15, this is a temporary method during +// refactoring; the plan is for all taildrop code to leave the ipnlocal package +// and move to an extension. Baby steps. +func (b *LocalBackend) TaildropManager() (*taildrop.Manager, error) { + b.mu.Lock() + ps := b.peerAPIServer + b.mu.Unlock() + if ps == nil { + return nil, errors.New("no peer API server initialized") + } + if ps.taildrop == nil { + return nil, errors.New("no taildrop manager initialized") + } + return ps.taildrop, nil +} + +func (b *LocalBackend) taildropOrNil() *taildrop.Manager { + b.mu.Lock() + ps := b.peerAPIServer + b.mu.Unlock() + if ps == nil { + return nil + } + return ps.taildrop +} + +func (b *LocalBackend) setNotifyFilesWaitingLocked(n *ipn.Notify) { + if ps := b.peerAPIServer; ps != nil { + if ps.taildrop.HasFilesWaiting() { + n.FilesWaiting = &empty.Message{} + } + } +} + +func (b *LocalBackend) setPeerStatusTaildropTargetLocked(ps *ipnstate.PeerStatus, p tailcfg.NodeView) { + ps.TaildropTarget = b.taildropTargetStatus(p) +} + +func (b *LocalBackend) removeFileWaiter(handle set.Handle) { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.fileWaiters, handle) +} + +func (b *LocalBackend) addFileWaiter(wakeWaiter context.CancelFunc) set.Handle { + b.mu.Lock() + defer b.mu.Unlock() + return b.fileWaiters.Add(wakeWaiter) +} + +func (b *LocalBackend) WaitingFiles() ([]apitype.WaitingFile, error) { + return b.taildropOrNil().WaitingFiles() +} + +// AwaitWaitingFiles is like WaitingFiles but blocks while ctx is not done, +// waiting for any files to be available. +// +// On return, exactly one of the results will be non-empty or non-nil, +// respectively. +func (b *LocalBackend) AwaitWaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) { + if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { + return ff, err + } + + for { + gotFile, gotFileCancel := context.WithCancel(context.Background()) + defer gotFileCancel() + + handle := b.addFileWaiter(gotFileCancel) + defer b.removeFileWaiter(handle) + + // Now that we've registered ourselves, check again, in case + // of race. Otherwise there's a small window where we could + // miss a file arrival and wait forever. + if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { + return ff, err + } + + select { + case <-gotFile.Done(): + if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { + return ff, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (b *LocalBackend) DeleteFile(name string) error { + return b.taildropOrNil().DeleteFile(name) +} + +func (b *LocalBackend) OpenFile(name string) (rc io.ReadCloser, size int64, err error) { + return b.taildropOrNil().OpenFile(name) +} + +// HasCapFileSharing reports whether the current node has the file +// sharing capability enabled. +func (b *LocalBackend) HasCapFileSharing() bool { + // TODO(bradfitz): remove this method and all Taildrop/Taildrive + // references from LocalBackend as part of tailscale/tailscale#12614. + b.mu.Lock() + defer b.mu.Unlock() + return b.capFileSharing +} + +// FileTargets lists nodes that the current node can send files to. +func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { + var ret []*apitype.FileTarget + + b.mu.Lock() + defer b.mu.Unlock() + nm := b.netMap + if b.state != ipn.Running || nm == nil { + return nil, errors.New("not connected to the tailnet") + } + if !b.capFileSharing { + return nil, errors.New("file sharing not enabled by Tailscale admin") + } + for _, p := range b.peers { + if !b.peerIsTaildropTargetLocked(p) { + continue + } + if p.Hostinfo().OS() == "tvOS" { + continue + } + peerAPI := peerAPIBase(b.netMap, p) + if peerAPI == "" { + continue + } + ret = append(ret, &apitype.FileTarget{ + Node: p.AsStruct(), + PeerAPIURL: peerAPI, + }) + } + slices.SortFunc(ret, func(a, b *apitype.FileTarget) int { + return cmp.Compare(a.Node.Name, b.Node.Name) + }) + return ret, nil +} + +func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.TaildropTargetStatus { + if b.state != ipn.Running { + return ipnstate.TaildropTargetIpnStateNotRunning + } + if b.netMap == nil { + return ipnstate.TaildropTargetNoNetmapAvailable + } + if !b.capFileSharing { + return ipnstate.TaildropTargetMissingCap + } + + if !p.Online().Get() { + return ipnstate.TaildropTargetOffline + } + + if !p.Valid() { + return ipnstate.TaildropTargetNoPeerInfo + } + if b.netMap.User() != p.User() { + // Different user must have the explicit file sharing target capability + if p.Addresses().Len() == 0 || + !b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + return ipnstate.TaildropTargetOwnedByOtherUser + } + } + + if p.Hostinfo().OS() == "tvOS" { + return ipnstate.TaildropTargetUnsupportedOS + } + if peerAPIBase(b.netMap, p) == "" { + return ipnstate.TaildropTargetNoPeerAPI + } + return ipnstate.TaildropTargetAvailable +} + +// peerIsTaildropTargetLocked reports whether p is a valid Taildrop file +// recipient from this node according to its ownership and the capabilities in +// the netmap. +// +// b.mu must be locked. +func (b *LocalBackend) peerIsTaildropTargetLocked(p tailcfg.NodeView) bool { + if b.netMap == nil || !p.Valid() { + return false + } + if b.netMap.User() == p.User() { + return true + } + if p.Addresses().Len() > 0 && + b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + // Explicitly noted in the netmap ACL caps as a target. + return true + } + return false +} + // UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and // sends an ipn.Notify with the full list of outgoingFiles. func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) { diff --git a/ipn/ipnlocal/taildrop_omit.go b/ipn/ipnlocal/taildrop_omit.go new file mode 100644 index 000000000..07d2d5cc0 --- /dev/null +++ b/ipn/ipnlocal/taildrop_omit.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_taildrop + +package ipnlocal + +type taildrop_Manager = struct{} + +func (b *LocalBackend) newTaildropManager(fileRoot string) *taildrop_Manager { + return nil +} diff --git a/ipn/ipnlocal/taildrop_test.go b/ipn/ipnlocal/taildrop_test.go new file mode 100644 index 000000000..9871d5e33 --- /dev/null +++ b/ipn/ipnlocal/taildrop_test.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_taildrop + +package ipnlocal + +import ( + "fmt" + "testing" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/tstest/deptest" + "tailscale.com/types/netmap" + "tailscale.com/util/mak" +) + +func TestFileTargets(t *testing.T) { + b := new(LocalBackend) + _, err := b.FileTargets() + if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { + t.Errorf("before connect: got %q; want %q", got, want) + } + + b.netMap = new(netmap.NetworkMap) + _, err = b.FileTargets() + if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { + t.Errorf("non-running netmap: got %q; want %q", got, want) + } + + b.state = ipn.Running + _, err = b.FileTargets() + if got, want := fmt.Sprint(err), "file sharing not enabled by Tailscale admin"; got != want { + t.Errorf("without cap: got %q; want %q", got, want) + } + + b.capFileSharing = true + got, err := b.FileTargets() + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Fatalf("unexpected %d peers", len(got)) + } + + var peerMap map[tailcfg.NodeID]tailcfg.NodeView + mak.NonNil(&peerMap) + var nodeID tailcfg.NodeID + nodeID = 1234 + peer := &tailcfg.Node{ + ID: 1234, + Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), + } + peerMap[nodeID] = peer.View() + b.peers = peerMap + got, err = b.FileTargets() + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Fatalf("unexpected %d peers", len(got)) + } + // (other cases handled by TestPeerAPIBase above) +} + +func TestOmitTaildropDeps(t *testing.T) { + deptest.DepChecker{ + Tags: "ts_omit_taildrop", + GOOS: "linux", + GOARCH: "amd64", + BadDeps: map[string]string{ + "tailscale.com/taildrop": "should be omitted", + "tailscale.com/feature/taildrop": "should be omitted", + }, + }.Check(t) +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 40e3b7586..94f51d4f2 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -14,12 +14,8 @@ import ( "errors" "fmt" "io" - "maps" - "mime" - "mime/multipart" "net" "net/http" - "net/http/httputil" "net/netip" "net/url" "os" @@ -46,7 +42,6 @@ import ( "tailscale.com/net/netutil" "tailscale.com/net/portmapper" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/dnstype" @@ -57,11 +52,9 @@ import ( "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" - "tailscale.com/util/httphdr" "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/osdiag" - "tailscale.com/util/progresstracking" "tailscale.com/util/rands" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" @@ -77,8 +70,6 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: "cert/": (*Handler).serveCert, - "file-put/": (*Handler).serveFilePut, - "files/": (*Handler).serveFiles, "policy/": (*Handler).servePolicy, "profiles/": (*Handler).serveProfiles, @@ -106,7 +97,6 @@ var handler = map[string]LocalAPIHandler{ "dns-query": (*Handler).serveDNSQuery, "drive/fileserver-address": (*Handler).serveDriveServerAddr, "drive/shares": (*Handler).serveShares, - "file-targets": (*Handler).serveFileTargets, "goroutines": (*Handler).serveGoroutines, "handle-push-message": (*Handler).serveHandlePushMessage, "id-token": (*Handler).serveIDToken, @@ -203,6 +193,10 @@ type Handler struct { clock tstime.Clock } +func (h *Handler) Logf(format string, args ...any) { + h.logf(format, args...) +} + func (h *Handler) LocalBackend() *ipnlocal.LocalBackend { return h.b } @@ -1087,7 +1081,7 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { } configIn := new(ipn.ServeConfig) if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { - writeErrorJSON(w, fmt.Errorf("decoding config: %w", err)) + WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) return } @@ -1105,7 +1099,7 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusPreconditionFailed) return } - writeErrorJSON(w, fmt.Errorf("updating config: %w", err)) + WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) return } w.WriteHeader(http.StatusOK) @@ -1482,67 +1476,10 @@ func (h *Handler) serveCheckPrefs(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(res) } -func (h *Handler) serveFiles(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "file access denied", http.StatusForbidden) - return - } - suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/files/") - if !ok { - http.Error(w, "misconfigured", http.StatusInternalServerError) - return - } - if suffix == "" { - if r.Method != "GET" { - http.Error(w, "want GET to list files", http.StatusBadRequest) - return - } - ctx := r.Context() - if s := r.FormValue("waitsec"); s != "" && s != "0" { - d, err := strconv.Atoi(s) - if err != nil { - http.Error(w, "invalid waitsec", http.StatusBadRequest) - return - } - deadline := time.Now().Add(time.Duration(d) * time.Second) - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, deadline) - defer cancel() - } - wfs, err := h.b.AwaitWaitingFiles(ctx) - if err != nil && ctx.Err() == nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(wfs) - return - } - name, err := url.PathUnescape(suffix) - if err != nil { - http.Error(w, "bad filename", http.StatusBadRequest) - return - } - if r.Method == "DELETE" { - if err := h.b.DeleteFile(name); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - return - } - rc, size, err := h.b.OpenFile(name) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - defer rc.Close() - w.Header().Set("Content-Length", fmt.Sprint(size)) - w.Header().Set("Content-Type", "application/octet-stream") - io.Copy(w, rc) -} - -func writeErrorJSON(w http.ResponseWriter, err error) { +// WriteErrorJSON writes a JSON object (with a single "error" string field) to w +// with the given error. If err is nil, "unexpected nil error" is used for the +// stringification instead. +func WriteErrorJSON(w http.ResponseWriter, err error) { if err == nil { err = errors.New("unexpected nil error") } @@ -1554,329 +1491,6 @@ func writeErrorJSON(w http.ResponseWriter, err error) { json.NewEncoder(w).Encode(E{err.Error()}) } -func (h *Handler) serveFileTargets(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != "GET" { - http.Error(w, "want GET to list targets", http.StatusBadRequest) - return - } - fts, err := h.b.FileTargets() - if err != nil { - writeErrorJSON(w, err) - return - } - mak.NonNilSliceForJSON(&fts) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(fts) -} - -// serveFilePut sends a file to another node. -// -// It's sometimes possible for clients to do this themselves, without -// tailscaled, except in the case of tailscaled running in -// userspace-networking ("netstack") mode, in which case tailscaled -// needs to a do a netstack dial out. -// -// Instead, the CLI also goes through tailscaled so it doesn't need to be -// aware of the network mode in use. -// -// macOS/iOS have always used this localapi method to simplify the GUI -// clients. -// -// The Windows client currently (2021-11-30) uses the peerapi (/v0/put/) -// directly, as the Windows GUI always runs in tun mode anyway. -// -// In addition to single file PUTs, this endpoint accepts multipart file -// POSTS encoded as multipart/form-data.The first part should be an -// application/json file that contains a manifest consisting of a JSON array of -// OutgoingFiles which wecan use for tracking progress even before reading the -// file parts. -// -// URL format: -// -// - PUT /localapi/v0/file-put/:stableID/:escaped-filename -// - POST /localapi/v0/file-put/:stableID -func (h *Handler) serveFilePut(w http.ResponseWriter, r *http.Request) { - metricFilePutCalls.Add(1) - - if !h.PermitWrite { - http.Error(w, "file access denied", http.StatusForbidden) - return - } - - if r.Method != "PUT" && r.Method != "POST" { - http.Error(w, "want PUT to put file", http.StatusBadRequest) - return - } - - fts, err := h.b.FileTargets() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - upath, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/file-put/") - if !ok { - http.Error(w, "misconfigured", http.StatusInternalServerError) - return - } - var peerIDStr, filenameEscaped string - if r.Method == "PUT" { - ok := false - peerIDStr, filenameEscaped, ok = strings.Cut(upath, "/") - if !ok { - http.Error(w, "bogus URL", http.StatusBadRequest) - return - } - } else { - peerIDStr = upath - } - peerID := tailcfg.StableNodeID(peerIDStr) - - var ft *apitype.FileTarget - for _, x := range fts { - if x.Node.StableID == peerID { - ft = x - break - } - } - if ft == nil { - http.Error(w, "node not found", http.StatusNotFound) - return - } - dstURL, err := url.Parse(ft.PeerAPIURL) - if err != nil { - http.Error(w, "bogus peer URL", http.StatusInternalServerError) - return - } - - // Periodically report progress of outgoing files. - outgoingFiles := make(map[string]*ipn.OutgoingFile) - t := time.NewTicker(1 * time.Second) - progressUpdates := make(chan ipn.OutgoingFile) - defer close(progressUpdates) - - go func() { - defer t.Stop() - defer h.b.UpdateOutgoingFiles(outgoingFiles) - for { - select { - case u, ok := <-progressUpdates: - if !ok { - return - } - outgoingFiles[u.ID] = &u - case <-t.C: - h.b.UpdateOutgoingFiles(outgoingFiles) - } - } - }() - - switch r.Method { - case "PUT": - file := ipn.OutgoingFile{ - ID: rands.HexString(30), - PeerID: peerID, - Name: filenameEscaped, - DeclaredSize: r.ContentLength, - } - h.singleFilePut(r.Context(), progressUpdates, w, r.Body, dstURL, file) - case "POST": - h.multiFilePost(progressUpdates, w, r, peerID, dstURL) - default: - http.Error(w, "want PUT to put file", http.StatusBadRequest) - return - } -} - -func (h *Handler) multiFilePost(progressUpdates chan (ipn.OutgoingFile), w http.ResponseWriter, r *http.Request, peerID tailcfg.StableNodeID, dstURL *url.URL) { - _, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) - if err != nil { - http.Error(w, fmt.Sprintf("invalid Content-Type for multipart POST: %s", err), http.StatusBadRequest) - return - } - - ww := &multiFilePostResponseWriter{} - defer func() { - if err := ww.Flush(w); err != nil { - h.logf("error: multiFilePostResponseWriter.Flush(): %s", err) - } - }() - - outgoingFilesByName := make(map[string]ipn.OutgoingFile) - first := true - mr := multipart.NewReader(r.Body, params["boundary"]) - for { - part, err := mr.NextPart() - if err == io.EOF { - // No more parts. - return - } else if err != nil { - http.Error(ww, fmt.Sprintf("failed to decode multipart/form-data: %s", err), http.StatusBadRequest) - return - } - - if first { - first = false - if part.Header.Get("Content-Type") != "application/json" { - http.Error(ww, "first MIME part must be a JSON map of filename -> size", http.StatusBadRequest) - return - } - - var manifest []ipn.OutgoingFile - err := json.NewDecoder(part).Decode(&manifest) - if err != nil { - http.Error(ww, fmt.Sprintf("invalid manifest: %s", err), http.StatusBadRequest) - return - } - - for _, file := range manifest { - outgoingFilesByName[file.Name] = file - progressUpdates <- file - } - - continue - } - - if !h.singleFilePut(r.Context(), progressUpdates, ww, part, dstURL, outgoingFilesByName[part.FileName()]) { - return - } - - if ww.statusCode >= 400 { - // put failed, stop immediately - h.logf("error: singleFilePut: failed with status %d", ww.statusCode) - return - } - } -} - -// multiFilePostResponseWriter is a buffering http.ResponseWriter that can be -// reused across multiple singleFilePut calls and then flushed to the client -// when all files have been PUT. -type multiFilePostResponseWriter struct { - header http.Header - statusCode int - body *bytes.Buffer -} - -func (ww *multiFilePostResponseWriter) Header() http.Header { - if ww.header == nil { - ww.header = make(http.Header) - } - return ww.header -} - -func (ww *multiFilePostResponseWriter) WriteHeader(statusCode int) { - ww.statusCode = statusCode -} - -func (ww *multiFilePostResponseWriter) Write(p []byte) (int, error) { - if ww.body == nil { - ww.body = bytes.NewBuffer(nil) - } - return ww.body.Write(p) -} - -func (ww *multiFilePostResponseWriter) Flush(w http.ResponseWriter) error { - if ww.header != nil { - maps.Copy(w.Header(), ww.header) - } - if ww.statusCode > 0 { - w.WriteHeader(ww.statusCode) - } - if ww.body != nil { - _, err := io.Copy(w, ww.body) - return err - } - return nil -} - -func (h *Handler) singleFilePut( - ctx context.Context, - progressUpdates chan (ipn.OutgoingFile), - w http.ResponseWriter, - body io.Reader, - dstURL *url.URL, - outgoingFile ipn.OutgoingFile, -) bool { - outgoingFile.Started = time.Now() - body = progresstracking.NewReader(body, 1*time.Second, func(n int, err error) { - outgoingFile.Sent = int64(n) - progressUpdates <- outgoingFile - }) - - fail := func() { - outgoingFile.Finished = true - outgoingFile.Succeeded = false - progressUpdates <- outgoingFile - } - - // Before we PUT a file we check to see if there are any existing partial file and if so, - // we resume the upload from where we left off by sending the remaining file instead of - // the full file. - var offset int64 - var resumeDuration time.Duration - remainingBody := io.Reader(body) - client := &http.Client{ - Transport: h.b.Dialer().PeerAPITransport(), - Timeout: 10 * time.Second, - } - req, err := http.NewRequestWithContext(ctx, "GET", dstURL.String()+"/v0/put/"+outgoingFile.Name, nil) - if err != nil { - http.Error(w, "bogus peer URL", http.StatusInternalServerError) - fail() - return false - } - switch resp, err := client.Do(req); { - case err != nil: - h.logf("could not fetch remote hashes: %v", err) - case resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotFound: - // noop; implies older peerapi without resume support - case resp.StatusCode != http.StatusOK: - h.logf("fetch remote hashes status code: %d", resp.StatusCode) - default: - resumeStart := time.Now() - dec := json.NewDecoder(resp.Body) - offset, remainingBody, err = taildrop.ResumeReader(body, func() (out taildrop.BlockChecksum, err error) { - err = dec.Decode(&out) - return out, err - }) - if err != nil { - h.logf("reader could not be fully resumed: %v", err) - } - resumeDuration = time.Since(resumeStart).Round(time.Millisecond) - } - - outReq, err := http.NewRequestWithContext(ctx, "PUT", "http://peer/v0/put/"+outgoingFile.Name, remainingBody) - if err != nil { - http.Error(w, "bogus outreq", http.StatusInternalServerError) - fail() - return false - } - outReq.ContentLength = outgoingFile.DeclaredSize - if offset > 0 { - h.logf("resuming put at offset %d after %v", offset, resumeDuration) - rangeHdr, _ := httphdr.FormatRange([]httphdr.Range{{Start: offset, Length: 0}}) - outReq.Header.Set("Range", rangeHdr) - if outReq.ContentLength >= 0 { - outReq.ContentLength -= offset - } - } - - rp := httputil.NewSingleHostReverseProxy(dstURL) - rp.Transport = h.b.Dialer().PeerAPITransport() - rp.ServeHTTP(w, outReq) - - outgoingFile.Finished = true - outgoingFile.Succeeded = true - progressUpdates <- outgoingFile - - return true -} - func (h *Handler) serveSetDNS(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { http.Error(w, "access denied", http.StatusForbidden) @@ -1889,7 +1503,7 @@ func (h *Handler) serveSetDNS(w http.ResponseWriter, r *http.Request) { ctx := r.Context() err := h.b.SetDNS(ctx, r.FormValue("name"), r.FormValue("value")) if err != nil { - writeErrorJSON(w, err) + WriteErrorJSON(w, err) return } w.Header().Set("Content-Type", "application/json") @@ -1980,7 +1594,7 @@ func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) { } res, err := h.b.Ping(ctx, ip, tailcfg.PingType(pingTypeStr), size) if err != nil { - writeErrorJSON(w, err) + WriteErrorJSON(w, err) return } w.Header().Set("Content-Type", "application/json") @@ -3013,7 +2627,6 @@ var ( metricInvalidRequests = clientmetric.NewCounter("localapi_invalid_requests") // User-visible LocalAPI endpoints. - metricFilePutCalls = clientmetric.NewCounter("localapi_file_put") metricDebugMetricsCalls = clientmetric.NewCounter("localapi_debugmetric_requests") metricUserMetricsCalls = clientmetric.NewCounter("localapi_usermetric_requests") ) @@ -3026,7 +2639,7 @@ func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { } res, err := h.b.SuggestExitNode() if err != nil { - writeErrorJSON(w, err) + WriteErrorJSON(w, err) return } w.Header().Set("Content-Type", "application/json") diff --git a/taildrop/taildrop.go b/taildrop/taildrop.go index 4d14787af..6996dbc4d 100644 --- a/taildrop/taildrop.go +++ b/taildrop/taildrop.go @@ -18,6 +18,7 @@ import ( "path" "path/filepath" "regexp" + "sort" "strconv" "strings" "sync" @@ -239,6 +240,11 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { }) f.mu.Unlock() } + + sort.Slice(files, func(i, j int) bool { + return files[i].Started.Before(files[j].Started) + }) + return files } From 26f31f73f4919b77e72cbe9dd62a91b5f6e36e43 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 15 Apr 2025 11:50:39 -0500 Subject: [PATCH 0773/1708] cmd/dist,release/dist: sign QNAP builds with a Google Cloud hosted key QNAP now requires builds to be signed with an HSM. This removes support for signing with a local keypair. This adds support for signing with a Google Cloud hosted key. The key should be an RSA key with protection level `HSM` and that uses PSS padding and a SHA256 digest. The GCloud project, keyring and key name are passed in as command-line arguments. The GCloud credentials and the PEM signing certificate are passed in as Base64-encoded command-line arguments. Updates tailscale/corp#23528 Signed-off-by: Percy Wegmann --- cmd/dist/dist.go | 25 ++++++--- .../dist/qnap/files/scripts/Dockerfile.qpkg | 20 +++++-- release/dist/qnap/files/scripts/sign-qpkg.sh | 40 ++++++++++++++ release/dist/qnap/pkgs.go | 54 +++++++++---------- release/dist/qnap/targets.go | 27 +++++++--- 5 files changed, 119 insertions(+), 47 deletions(-) create mode 100755 release/dist/qnap/files/scripts/sign-qpkg.sh diff --git a/cmd/dist/dist.go b/cmd/dist/dist.go index 05f5bbfb2..038ced708 100644 --- a/cmd/dist/dist.go +++ b/cmd/dist/dist.go @@ -5,11 +5,13 @@ package main import ( + "cmp" "context" "errors" "flag" "log" "os" + "slices" "tailscale.com/release/dist" "tailscale.com/release/dist/cli" @@ -19,9 +21,12 @@ import ( ) var ( - synologyPackageCenter bool - qnapPrivateKeyPath string - qnapCertificatePath string + synologyPackageCenter bool + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + qnapKeyName string + qnapCertificateBase64 string ) func getTargets() ([]dist.Target, error) { @@ -42,10 +47,11 @@ func getTargets() ([]dist.Target, error) { // To build for package center, run // ./tool/go run ./cmd/dist build --synology-package-center synology ret = append(ret, synology.Targets(synologyPackageCenter, nil)...) - if (qnapPrivateKeyPath == "") != (qnapCertificatePath == "") { - return nil, errors.New("both --qnap-private-key-path and --qnap-certificate-path must be set") + qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64} + if cmp.Or(qnapSigningArgs...) != "" && slices.Contains(qnapSigningArgs, "") { + return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name and --qnap-certificate must be set") } - ret = append(ret, qnap.Targets(qnapPrivateKeyPath, qnapCertificatePath)...) + ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64)...) return ret, nil } @@ -54,8 +60,11 @@ func main() { for _, subcmd := range cmd.Subcommands { if subcmd.Name == "build" { subcmd.FlagSet.BoolVar(&synologyPackageCenter, "synology-package-center", false, "build synology packages with extra metadata for the official package center") - subcmd.FlagSet.StringVar(&qnapPrivateKeyPath, "qnap-private-key-path", "", "sign qnap packages with given key (must also provide --qnap-certificate-path)") - subcmd.FlagSet.StringVar(&qnapCertificatePath, "qnap-certificate-path", "", "sign qnap packages with given certificate (must also provide --qnap-private-key-path)") + subcmd.FlagSet.StringVar(&gcloudCredentialsBase64, "gcloud-credentials", "", "base64 encoded GCP credentials (used when signing QNAP builds)") + subcmd.FlagSet.StringVar(&gcloudProject, "gcloud-project", "", "name of project in GCP KMS (used when signing QNAP builds)") + subcmd.FlagSet.StringVar(&gcloudKeyring, "gcloud-keyring", "", "path to keyring in GCP KMS (used when signing QNAP builds)") + subcmd.FlagSet.StringVar(&qnapKeyName, "qnap-key-name", "", "name of GCP key to use when signing QNAP builds") + subcmd.FlagSet.StringVar(&qnapCertificateBase64, "qnap-certificate", "", "base64 encoded certificate to use when signing QNAP builds") } } diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index 135d5d20f..1f4c2406d 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -1,9 +1,21 @@ -FROM ubuntu:20.04 +FROM ubuntu:24.04 RUN apt-get update -y && \ apt-get install -y --no-install-recommends \ git-core \ - ca-certificates -RUN git clone https://github.com/qnap-dev/QDK.git + ca-certificates \ + apt-transport-https \ + gnupg \ + curl \ + patch + +# Install QNAP QDK (force a specific version to pick up updates) +RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 9a31a67387c583d19a81a378dcf7c25e2abe231d RUN cd /QDK && ./InstallToUbuntu.sh install -ENV PATH="/usr/share/QDK/bin:${PATH}" \ No newline at end of file +ENV PATH="/usr/share/QDK/bin:${PATH}" + +# Install Google Cloud PKCS11 module +RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg +RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list +RUN apt-get update -y && apt-get install -y --no-install-recommends google-cloud-cli libengine-pkcs11-openssl +RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.6/libkmsp11-1.6-linux-amd64.tar.gz | tar xz diff --git a/release/dist/qnap/files/scripts/sign-qpkg.sh b/release/dist/qnap/files/scripts/sign-qpkg.sh new file mode 100755 index 000000000..5629672f8 --- /dev/null +++ b/release/dist/qnap/files/scripts/sign-qpkg.sh @@ -0,0 +1,40 @@ +#! /usr/bin/env bash +set -xeu + +mkdir -p "$HOME/.config/gcloud" +echo "$GCLOUD_CREDENTIALS_BASE64" | base64 --decode > /root/.config/gcloud/application_default_credentials.json +gcloud config set project "$GCLOUD_PROJECT" + +echo "--- +tokens: + - key_ring: \"$GCLOUD_KEYRING\" +log_directory: "/tmp/kmsp11" +" > pkcs11-config.yaml +chmod 0600 pkcs11-config.yaml + +export KMS_PKCS11_CONFIG=`readlink -f pkcs11-config.yaml` +export PKCS11_MODULE_PATH=/libkmsp11-1.6-linux-amd64/libkmsp11.so + +# Verify signature of pkcs11 module +# See https://github.com/GoogleCloudPlatform/kms-integrations/blob/master/kmsp11/docs/user_guide.md#downloading-and-verifying-the-library +echo "-----BEGIN PUBLIC KEY----- +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEtfLbXkHUVc9oUPTNyaEK3hIwmuGRoTtd +6zDhwqjJuYaMwNd1aaFQLMawTwZgR0Xn27ymVWtqJHBe0FU9BPIQ+SFmKw+9jSwu +/FuqbJnLmTnWMJ1jRCtyHNZawvv2wbiB +-----END PUBLIC KEY-----" > pkcs11-release-signing-key.pem +openssl dgst -sha384 -verify pkcs11-release-signing-key.pem -signature "$PKCS11_MODULE_PATH.sig" "$PKCS11_MODULE_PATH" + +echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > cert.crt + +openssl cms \ + -sign \ + -binary \ + -nodetach \ + -engine pkcs11 \ + -keyform engine \ + -inkey "pkcs11:object=$QNAP_SIGNING_KEY_NAME" \ + -keyopt rsa_padding_mode:pss \ + -keyopt rsa_pss_saltlen:digest \ + -signer cert.crt \ + -in "$1" \ + -out - diff --git a/release/dist/qnap/pkgs.go b/release/dist/qnap/pkgs.go index 9df649ddb..7dc3b9495 100644 --- a/release/dist/qnap/pkgs.go +++ b/release/dist/qnap/pkgs.go @@ -27,8 +27,11 @@ type target struct { } type signer struct { - privateKeyPath string - certificatePath string + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + keyName string + certificateBase64 string } func (t *target) String() string { @@ -66,7 +69,8 @@ func (t *target) buildQPKG(b *dist.Build, qnapBuilds *qnapBuilds, inner *innerPk filename := fmt.Sprintf("Tailscale_%s-%s_%s.qpkg", b.Version.Short, qnapTag, t.arch) filePath := filepath.Join(b.Out, filename) - cmd := b.Command(b.Repo, "docker", "run", "--rm", + args := []string{"run", "--rm", + "--network=host", "-e", fmt.Sprintf("ARCH=%s", t.arch), "-e", fmt.Sprintf("TSTAG=%s", b.Version.Short), "-e", fmt.Sprintf("QNAPTAG=%s", qnapTag), @@ -76,10 +80,28 @@ func (t *target) buildQPKG(b *dist.Build, qnapBuilds *qnapBuilds, inner *innerPk "-v", fmt.Sprintf("%s:/Tailscale", filepath.Join(qnapBuilds.tmpDir, "files/Tailscale")), "-v", fmt.Sprintf("%s:/build-qpkg.sh", filepath.Join(qnapBuilds.tmpDir, "files/scripts/build-qpkg.sh")), "-v", fmt.Sprintf("%s:/out", b.Out), + } + + if t.signer != nil { + log.Println("Will sign with Google Cloud HSM") + args = append(args, + "-e", fmt.Sprintf("GCLOUD_CREDENTIALS_BASE64=%s", t.signer.gcloudCredentialsBase64), + "-e", fmt.Sprintf("GCLOUD_PROJECT=%s", t.signer.gcloudProject), + "-e", fmt.Sprintf("GCLOUD_KEYRING=%s", t.signer.gcloudKeyring), + "-e", fmt.Sprintf("QNAP_SIGNING_KEY_NAME=%s", t.signer.keyName), + "-e", fmt.Sprintf("QNAP_SIGNING_CERT_BASE64=%s", t.signer.certificateBase64), + "-e", fmt.Sprintf("QNAP_SIGNING_SCRIPT=%s", "/sign-qpkg.sh"), + "-v", fmt.Sprintf("%s:/sign-qpkg.sh", filepath.Join(qnapBuilds.tmpDir, "files/scripts/sign-qpkg.sh")), + ) + } + + args = append(args, "build.tailscale.io/qdk:latest", "/build-qpkg.sh", ) + cmd := b.Command(b.Repo, "docker", args...) + // dist.Build runs target builds in parallel goroutines by default. // For QNAP, this is an issue because the underlaying qbuild builder will // create tmp directories in the shared docker image that end up conflicting @@ -176,32 +198,6 @@ func newQNAPBuilds(b *dist.Build, signer *signer) (*qnapBuilds, error) { return nil, err } - if signer != nil { - log.Print("Setting up qnap signing files") - - key, err := os.ReadFile(signer.privateKeyPath) - if err != nil { - return nil, err - } - cert, err := os.ReadFile(signer.certificatePath) - if err != nil { - return nil, err - } - - // QNAP's qbuild command expects key and cert files to be in the root - // of the project directory (in our case release/dist/qnap/Tailscale). - // So here, we copy the key and cert over to the project folder for the - // duration of qnap package building and then delete them on close. - - keyPath := filepath.Join(m.tmpDir, "files/Tailscale/private_key") - if err := os.WriteFile(keyPath, key, 0400); err != nil { - return nil, err - } - certPath := filepath.Join(m.tmpDir, "files/Tailscale/certificate") - if err := os.WriteFile(certPath, cert, 0400); err != nil { - return nil, err - } - } return m, nil } diff --git a/release/dist/qnap/targets.go b/release/dist/qnap/targets.go index a069dd623..1c1818a70 100644 --- a/release/dist/qnap/targets.go +++ b/release/dist/qnap/targets.go @@ -3,16 +3,31 @@ package qnap -import "tailscale.com/release/dist" +import ( + "slices" + + "tailscale.com/release/dist" +) // Targets defines the dist.Targets for QNAP devices. // -// If privateKeyPath and certificatePath are both provided non-empty, -// these targets will be signed for QNAP app store release with built. -func Targets(privateKeyPath, certificatePath string) []dist.Target { +// If all parameters are provided non-empty, then the build will be signed using +// a Google Cloud hosted key. +// +// gcloudCredentialsBase64 is the JSON credential for connecting to Google Cloud, base64 encoded. +// gcloudKeyring is the full path to the Google Cloud keyring containing the signing key. +// keyName is the name of the key. +// certificateBase64 is the PEM certificate to use in the signature, base64 encoded. +func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64 string) []dist.Target { var signerInfo *signer - if privateKeyPath != "" && certificatePath != "" { - signerInfo = &signer{privateKeyPath, certificatePath} + if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64}, "") { + signerInfo = &signer{ + gcloudCredentialsBase64: gcloudCredentialsBase64, + gcloudProject: gcloudProject, + gcloudKeyring: gcloudKeyring, + keyName: keyName, + certificateBase64: certificateBase64, + } } return []dist.Target{ &target{ From 9666c2e7002160cba936b181fb4ee7f5590f6cce Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Thu, 17 Apr 2025 16:14:34 +0100 Subject: [PATCH 0774/1708] cmd/k8s-operator: default ingress paths to '/' if not specified by user (#15706) in resource Fixes #14908 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/ingress.go | 6 ++ cmd/k8s-operator/ingress_test.go | 176 +++++++++++++++++++++++++++++-- 2 files changed, 176 insertions(+), 6 deletions(-) diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 8c19a5e05..6c50e10b2 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -292,9 +292,15 @@ func validateIngressClass(ctx context.Context, cl client.Client) error { func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl client.Client, rec record.EventRecorder, tlsHost string, logger *zap.SugaredLogger) (handlers map[string]*ipn.HTTPHandler, err error) { addIngressBackend := func(b *networkingv1.IngressBackend, path string) { + if path == "" { + path = "/" + rec.Eventf(ing, corev1.EventTypeNormal, "PathUndefined", "configured backend is missing a path, defaulting to '/'") + } + if b == nil { return } + if b.Service == nil { rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q is missing service", path) return diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index f9623850c..a975fec7a 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -16,6 +16,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/ipn" @@ -487,6 +488,138 @@ func TestIngressLetsEncryptStaging(t *testing.T) { } } +func TestEmptyPath(t *testing.T) { + testCases := []struct { + name string + paths []networkingv1.HTTPIngressPath + expectedEvents []string + }{ + { + name: "empty_path_with_prefix_type", + paths: []networkingv1.HTTPIngressPath{ + { + PathType: ptrPathType(networkingv1.PathTypePrefix), + Path: "", + Backend: *backend(), + }, + }, + expectedEvents: []string{ + "Normal PathUndefined configured backend is missing a path, defaulting to '/'", + }, + }, + { + name: "empty_path_with_implementation_specific_type", + paths: []networkingv1.HTTPIngressPath{ + { + PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + Path: "", + Backend: *backend(), + }, + }, + expectedEvents: []string{ + "Normal PathUndefined configured backend is missing a path, defaulting to '/'", + }, + }, + { + name: "empty_path_with_exact_type", + paths: []networkingv1.HTTPIngressPath{ + { + PathType: ptrPathType(networkingv1.PathTypeExact), + Path: "", + Backend: *backend(), + }, + }, + expectedEvents: []string{ + "Warning UnsupportedPathTypeExact Exact path type strict matching is currently not supported and requests will be routed as for Prefix path type. This behaviour might change in the future.", + "Normal PathUndefined configured backend is missing a path, defaulting to '/'", + }, + }, + { + name: "two_competing_but_not_identical_paths_including_one_empty", + paths: []networkingv1.HTTPIngressPath{ + { + PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + Path: "", + Backend: *backend(), + }, + { + PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + Path: "/", + Backend: *backend(), + }, + }, + expectedEvents: []string{ + "Normal PathUndefined configured backend is missing a path, defaulting to '/'", + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + fc := fake.NewFakeClient(ingressClass()) + ft := &fakeTSClient{} + fr := record.NewFakeRecorder(3) // bump this if you expect a test case to throw more events + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + recorder: fr, + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + + // 1. Resources get created for regular Ingress + mustCreate(t, fc, ingressWithPaths(tt.paths)) + mustCreate(t, fc, service()) + + expectReconciled(t, ingR, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + mustCreate(t, fc, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fullName, + Namespace: "operator-ns", + UID: "test-uid", + }, + }) + opts := configOpts{ + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "ingress", + hostname: "foo", + app: kubetypes.AppIngressResource, + } + serveConfig := &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, + } + opts.serveConfig = serveConfig + + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + + expectEvents(t, fr, tt.expectedEvents) + }) + } +} + +// ptrPathType is a helper function to return a pointer to the pathtype string (required for TestEmptyPath) +func ptrPathType(p networkingv1.PathType) *networkingv1.PathType { + return &p +} + func ingressClass() *networkingv1.IngressClass { return &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, @@ -520,16 +653,47 @@ func ingress() *networkingv1.Ingress { }, Spec: networkingv1.IngressSpec{ IngressClassName: ptr.To("tailscale"), - DefaultBackend: &networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: "test", - Port: networkingv1.ServiceBackendPort{ - Number: 8080, + DefaultBackend: backend(), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"default-test"}}, + }, + }, + } +} + +func ingressWithPaths(paths []networkingv1.HTTPIngressPath) *networkingv1.Ingress { + return &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + Rules: []networkingv1.IngressRule{ + { + Host: "foo.tailnetxyz.ts.net", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: paths, + }, }, }, }, TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"default-test"}}, + {Hosts: []string{"foo.tailnetxyz.ts.net"}}, + }, + }, + } +} + +func backend() *networkingv1.IngressBackend { + return &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, }, }, } From 92027d7ae0d7479d5784688d8b1ae2a6184cefa1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 17 Apr 2025 09:24:11 -0700 Subject: [PATCH 0775/1708] feature/relayserver: wire up profile/prefs changes (#15714) The relay server is still permanently disabled until node attribute changes are wired up in a future commit. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 85 +++++++++++----- feature/relayserver/relayserver_test.go | 126 ++++++++++++++++++++++++ 2 files changed, 184 insertions(+), 27 deletions(-) create mode 100644 feature/relayserver/relayserver_test.go diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 9cf776661..3d851780d 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -13,7 +13,9 @@ import ( "net/netip" "sync" + "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnlocal" "tailscale.com/net/udprelay" @@ -21,6 +23,7 @@ import ( "tailscale.com/tsd" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/ptr" "tailscale.com/util/httpm" ) @@ -46,10 +49,17 @@ func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { type extension struct { logf logger.Logf - mu sync.Mutex // guards the following fields - shutdown bool - port int - server *udprelay.Server // lazily initialized + mu sync.Mutex // guards the following fields + shutdown bool + port *int // ipn.Prefs.RelayServerPort, nil if disabled + hasNodeAttrRelayServer bool // tailcfg.NodeAttrRelayServer + server relayServer // lazily initialized +} + +// relayServer is the interface of [udprelay.Server]. +type relayServer interface { + AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (udprelay.ServerEndpoint, error) + Close() error } // Name implements [ipnext.Extension]. @@ -59,10 +69,32 @@ func (e *extension) Name() string { // Init implements [ipnext.Extension] by registering callbacks and providers // for the duration of the extension's lifetime. -func (e *extension) Init(_ ipnext.Host) error { +func (e *extension) Init(host ipnext.Host) error { + profile, prefs := host.Profiles().CurrentProfileState() + e.profileStateChanged(profile, prefs, false) + host.Profiles().RegisterProfileStateChangeCallback(e.profileStateChanged) + // TODO(jwhited): callback for netmap/nodeattr changes (e.hasNodeAttrRelayServer) return nil } +func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + newPort, ok := prefs.RelayServerPort().GetOk() + enableOrDisableServer := ok != (e.port != nil) + portChanged := ok && e.port != nil && newPort != *e.port + if enableOrDisableServer || portChanged || !sameNode { + if e.server != nil { + e.server.Close() + e.server = nil + } + e.port = nil + if ok { + e.port = ptr.To(newPort) + } + } +} + // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { e.mu.Lock() @@ -75,16 +107,7 @@ func (e *extension) Shutdown() error { return nil } -func (e *extension) shouldRunRelayServer() bool { - // TODO(jwhited): consider: - // 1. tailcfg.NodeAttrRelayServer - // 2. ipn.Prefs.RelayServerPort - // 3. envknob.UseWIPCode() - // 4. e.shutdown - return false -} - -func (e *extension) relayServerOrInit() (*udprelay.Server, error) { +func (e *extension) relayServerOrInit() (relayServer, error) { e.mu.Lock() defer e.mu.Unlock() if e.shutdown { @@ -93,8 +116,17 @@ func (e *extension) relayServerOrInit() (*udprelay.Server, error) { if e.server != nil { return e.server, nil } + if e.port == nil { + return nil, errors.New("relay server is not configured") + } + if !e.hasNodeAttrRelayServer { + return nil, errors.New("no relay:server node attribute") + } + if !envknob.UseWIPCode() { + return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") + } var err error - e.server, _, err = udprelay.NewServer(e.port, []netip.Addr{netip.MustParseAddr("127.0.0.1")}) + e.server, _, err = udprelay.NewServer(*e.port, []netip.Addr{netip.MustParseAddr("127.0.0.1")}) if err != nil { return nil, err } @@ -102,25 +134,24 @@ func (e *extension) relayServerOrInit() (*udprelay.Server, error) { } func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - // TODO(jwhited): log errors e, ok := h.LocalBackend().FindExtensionByName(featureName).(*extension) if !ok { http.Error(w, "relay failed to initialize", http.StatusServiceUnavailable) return } - if !e.shouldRunRelayServer() { - http.Error(w, "relay not enabled", http.StatusNotFound) - return + httpErrAndLog := func(message string, code int) { + http.Error(w, message, code) + e.logf("peerapi: request from %v returned code %d: %s", h.RemoteAddr(), code, message) } if !h.PeerCaps().HasCapability(tailcfg.PeerCapabilityRelay) { - http.Error(w, "relay not permitted", http.StatusForbidden) + httpErrAndLog("relay not permitted", http.StatusForbidden) return } if r.Method != httpm.POST { - http.Error(w, "only POST method is allowed", http.StatusMethodNotAllowed) + httpErrAndLog("only POST method is allowed", http.StatusMethodNotAllowed) return } @@ -129,26 +160,26 @@ func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.Respon } err := json.NewDecoder(io.LimitReader(r.Body, 512)).Decode(&allocateEndpointReq) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + httpErrAndLog(err.Error(), http.StatusBadRequest) return } if len(allocateEndpointReq.DiscoKeys) != 2 { - http.Error(w, "2 disco public keys must be supplied", http.StatusBadRequest) + httpErrAndLog("2 disco public keys must be supplied", http.StatusBadRequest) return } rs, err := e.relayServerOrInit() if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + httpErrAndLog(err.Error(), http.StatusServiceUnavailable) return } ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + httpErrAndLog(err.Error(), http.StatusInternalServerError) return } err = json.NewEncoder(w).Encode(&ep) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + httpErrAndLog(err.Error(), http.StatusInternalServerError) } } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go new file mode 100644 index 000000000..af4d11df0 --- /dev/null +++ b/feature/relayserver/relayserver_test.go @@ -0,0 +1,126 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package relayserver + +import ( + "errors" + "testing" + + "tailscale.com/ipn" + "tailscale.com/net/udprelay" + "tailscale.com/types/key" + "tailscale.com/types/ptr" +) + +type fakeRelayServer struct{} + +func (f *fakeRelayServer) Close() error { return nil } + +func (f *fakeRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (udprelay.ServerEndpoint, error) { + return udprelay.ServerEndpoint{}, errors.New("fake relay server") +} + +func Test_extension_profileStateChanged(t *testing.T) { + prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(1)} + prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} + + type fields struct { + server relayServer + port *int + } + type args struct { + prefs ipn.PrefsView + sameNode bool + } + tests := []struct { + name string + fields fields + args args + wantPort *int + wantNilServer bool + }{ + { + name: "no changes non-nil server", + fields: fields{ + server: &fakeRelayServer{}, + port: ptr.To(1), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantNilServer: false, + }, + { + name: "prefs port nil", + fields: fields{ + server: &fakeRelayServer{}, + port: ptr.To(1), + }, + args: args{ + prefs: prefsWithNilPort.View(), + sameNode: true, + }, + wantPort: nil, + wantNilServer: true, + }, + { + name: "prefs port changed", + fields: fields{ + server: &fakeRelayServer{}, + port: ptr.To(2), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantNilServer: true, + }, + { + name: "sameNode false", + fields: fields{ + server: &fakeRelayServer{}, + port: ptr.To(1), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: false, + }, + wantPort: ptr.To(1), + wantNilServer: true, + }, + { + name: "prefs port non-nil extension port nil", + fields: fields{ + server: nil, + port: nil, + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: false, + }, + wantPort: ptr.To(1), + wantNilServer: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &extension{ + port: tt.fields.port, + server: tt.fields.server, + } + e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) + if tt.wantNilServer != (e.server == nil) { + t.Errorf("wantNilServer: %v != (e.server == nil): %v", tt.wantNilServer, e.server == nil) + } + if (tt.wantPort == nil) != (e.port == nil) { + t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) + } else if tt.wantPort != nil && *tt.wantPort != *e.port { + t.Errorf("wantPort: %d != *e.port: %d", *tt.wantPort, *e.port) + } + }) + } +} From 898cf068985f04db51093d04cdd9fbb215f376c7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 17 Apr 2025 10:47:54 -0700 Subject: [PATCH 0776/1708] ipn/ipnlocal: remove another copy of slicesx.MapValues We added this helper in 1e2e319e7d26. Remove this copy. Updates #cleanup Change-Id: I5b0681acc23692beed35951c9902ac9ceca0a8b9 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/extension_host.go | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 79f741e55..aa56ad8ef 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -25,6 +25,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/execqueue" "tailscale.com/util/set" + "tailscale.com/util/slicesx" "tailscale.com/util/testenv" ) @@ -378,7 +379,7 @@ func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs h.currentPrefs = prefs h.currentProfile = profile // Get the callbacks to be invoked. - cbs := collectValues(h.profileStateChangeCbs) + cbs := slicesx.MapValues(h.profileStateChangeCbs) h.mu.Unlock() for _, cb := range cbs { cb(profile, prefs, sameNode) @@ -402,7 +403,7 @@ func (h *ExtensionHost) NotifyProfilePrefsChanged(profile ipn.LoginProfileView, h.currentPrefs = newPrefs h.currentProfile = profile // Get the callbacks to be invoked. - stateCbs := collectValues(h.profileStateChangeCbs) + stateCbs := slicesx.MapValues(h.profileStateChangeCbs) h.mu.Unlock() for _, cb := range stateCbs { cb(profile, newPrefs, true) @@ -443,7 +444,7 @@ func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) // Attempt to resolve the background profile using the registered // background profile resolvers (e.g., [ipn/desktop.desktopSessionsExt] on Windows). h.mu.Lock() - resolvers := collectValues(h.backgroundProfileResolvers) + resolvers := slicesx.MapValues(h.backgroundProfileResolvers) h.mu.Unlock() for _, resolver := range resolvers { if profile := resolver(profiles); profile.Valid() { @@ -481,7 +482,7 @@ func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile return nil } h.mu.Lock() - cbs := collectValues(h.newControlClientCbs) + cbs := slicesx.MapValues(h.newControlClientCbs) h.mu.Unlock() if len(cbs) > 0 { ccShutdownCbs = make([]func(), 0, len(cbs)) @@ -527,7 +528,7 @@ func (h *ExtensionHost) AuditLogger() ipnauth.AuditLogFunc { } h.mu.Lock() - providers := collectValues(h.auditLoggers) + providers := slicesx.MapValues(h.auditLoggers) h.mu.Unlock() var loggers []ipnauth.AuditLogFunc @@ -642,17 +643,3 @@ type execQueue interface { Shutdown() Wait(context.Context) error } - -// collectValues is like [slices.Collect] of [maps.Values], -// but pre-allocates the slice to avoid reallocations. -// It returns nil if the map is empty. -func collectValues[K comparable, V any](m map[K]V) []V { - if len(m) == 0 { - return nil - } - s := make([]V, 0, len(m)) - for _, v := range m { - s = append(s, v) - } - return s -} From aff8f1b3587b4e2b166f506fba34d8da8ea0e99a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 17 Apr 2025 15:51:41 -0700 Subject: [PATCH 0777/1708] tstime: add GoDuration which JSON serializes with time.Duration.String (#15726) The encoding/json/v2 effort may end up changing the default represention of time.Duration in JSON. See https://go.dev/issue/71631 The GoDuration type allows us to explicitly use the time.Duration.String representation regardless of whether we serialize with v1 or v2 of encoding/json. Updates tailscale/corp#27502 Signed-off-by: Joe Tsai --- tstime/tstime.go | 38 ++++++++++++++++++++++++++++++++++++++ tstime/tstime_test.go | 17 +++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/tstime/tstime.go b/tstime/tstime.go index 1c006355f..6e5b7f9f4 100644 --- a/tstime/tstime.go +++ b/tstime/tstime.go @@ -6,6 +6,7 @@ package tstime import ( "context" + "encoding" "strconv" "strings" "time" @@ -183,3 +184,40 @@ func (StdClock) AfterFunc(d time.Duration, f func()) TimerController { func (StdClock) Since(t time.Time) time.Duration { return time.Since(t) } + +// GoDuration is a [time.Duration] but JSON serializes with [time.Duration.String]. +// +// Note that this format is specific to Go and non-standard, +// but excels in being most humanly readable compared to alternatives. +// The wider industry still lacks consensus for the representation +// of a time duration in humanly-readable text. +// See https://go.dev/issue/71631 for more discussion. +// +// Regardless of how the industry evolves into the future, +// this type explicitly uses the Go format. +type GoDuration struct{ time.Duration } + +var ( + _ encoding.TextAppender = (*GoDuration)(nil) + _ encoding.TextMarshaler = (*GoDuration)(nil) + _ encoding.TextUnmarshaler = (*GoDuration)(nil) +) + +func (d GoDuration) AppendText(b []byte) ([]byte, error) { + // The String method is inlineable (see https://go.dev/cl/520602), + // so this may not allocate since the string does not escape. + return append(b, d.String()...), nil +} + +func (d GoDuration) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +func (d *GoDuration) UnmarshalText(b []byte) error { + d2, err := time.ParseDuration(string(b)) + if err != nil { + return err + } + d.Duration = d2 + return nil +} diff --git a/tstime/tstime_test.go b/tstime/tstime_test.go index 3ffeaf0ff..556ad4e8b 100644 --- a/tstime/tstime_test.go +++ b/tstime/tstime_test.go @@ -4,8 +4,11 @@ package tstime import ( + "encoding/json" "testing" "time" + + "tailscale.com/util/must" ) func TestParseDuration(t *testing.T) { @@ -34,3 +37,17 @@ func TestParseDuration(t *testing.T) { } } } + +func TestGoDuration(t *testing.T) { + wantDur := GoDuration{time.Hour + time.Minute + time.Second + time.Millisecond + time.Microsecond + time.Nanosecond} + gotJSON := string(must.Get(json.Marshal(wantDur))) + wantJSON := `"1h1m1.001001001s"` + if gotJSON != wantJSON { + t.Errorf("json.Marshal(%v) = %s, want %s", wantDur, gotJSON, wantJSON) + } + var gotDur GoDuration + must.Do(json.Unmarshal([]byte(wantJSON), &gotDur)) + if gotDur != wantDur { + t.Errorf("json.Unmarshal(%s) = %v, want %v", wantJSON, gotDur, wantDur) + } +} From 3a8a174308d857b15b262f5904ae2ec1d8b9c867 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 17 Apr 2025 16:21:32 -0700 Subject: [PATCH 0778/1708] net/udprelay: change ServerEndpoint time.Duration fields to tstime.GoDuration (#15725) tstime.GoDuration JSON serializes with time.Duration.String(), which is more human-friendly than nanoseconds. ServerEndpoint is currently experimental, therefore breaking changes are tolerable. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 13 ++--- net/udprelay/server_test.go | 96 +++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 6 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 30fc08326..373165777 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -21,6 +21,7 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/net/packet" + "tailscale.com/tstime" "tailscale.com/types/key" ) @@ -114,12 +115,12 @@ type ServerEndpoint struct { // BindLifetime is amount of time post-allocation the Server will consider // the endpoint active while it has yet to be bound via 3-way bind handshake // from both client parties. - BindLifetime time.Duration + BindLifetime tstime.GoDuration // SteadyStateLifetime is the amount of time post 3-way bind handshake from // both client parties the Server will consider the endpoint active lacking // bidirectional data flow. - SteadyStateLifetime time.Duration + SteadyStateLifetime tstime.GoDuration } // serverEndpoint contains Server-internal ServerEndpoint state. serverEndpoint @@ -489,8 +490,8 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoin AddrPorts: s.addrPorts, VNI: e.vni, LamportID: e.lamportID, - BindLifetime: s.bindLifetime, - SteadyStateLifetime: s.steadyStateLifetime, + BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, }, nil } // If an endpoint exists for the pair of key.DiscoPublic's, and is @@ -526,7 +527,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoin AddrPorts: s.addrPorts, VNI: e.vni, LamportID: e.lamportID, - BindLifetime: s.bindLifetime, - SteadyStateLifetime: s.steadyStateLifetime, + BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, }, nil } diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 733e50b77..fad35ec03 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -5,6 +5,8 @@ package udprelay import ( "bytes" + "encoding/json" + "math" "net" "net/netip" "testing" @@ -15,6 +17,7 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/net/packet" + "tailscale.com/tstime" "tailscale.com/types/key" ) @@ -202,3 +205,96 @@ func TestServer(t *testing.T) { t.Fatal("unexpected msg B->A") } } + +func TestServerEndpointJSONUnmarshal(t *testing.T) { + tests := []struct { + name string + json []byte + wantErr bool + }{ + { + name: "valid", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: false, + }, + { + name: "invalid ServerDisco", + json: []byte(`{"ServerDisco":"1","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid LamportID", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":1.1,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid AddrPorts", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid VNI", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":18446744073709551615,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid BindLifetime", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"5","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid SteadyStateLifetime", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5"}`), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var out ServerEndpoint + err := json.Unmarshal(tt.json, &out) + if tt.wantErr != (err != nil) { + t.Fatalf("wantErr: %v (err == nil): %v", tt.wantErr, err == nil) + } + if tt.wantErr { + return + } + }) + } +} + +func TestServerEndpointJSONMarshal(t *testing.T) { + tests := []struct { + name string + serverEndpoint ServerEndpoint + }{ + { + name: "valid roundtrip", + serverEndpoint: ServerEndpoint{ + ServerDisco: key.NewDisco().Public(), + LamportID: uint64(math.MaxUint64), + AddrPorts: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:1"), netip.MustParseAddrPort("127.0.0.2:2")}, + VNI: 1<<24 - 1, + BindLifetime: tstime.GoDuration{Duration: defaultBindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: defaultSteadyStateLifetime}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := json.Marshal(&tt.serverEndpoint) + if err != nil { + t.Fatal(err) + } + var got ServerEndpoint + err = json.Unmarshal(b, &got) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(got, tt.serverEndpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("ServerEndpoint unequal (-got +want)\n%s", diff) + } + }) + } +} From b34a2bdb229f6e5aebf5022d842bc6ba3a224082 Mon Sep 17 00:00:00 2001 From: Cedric Kienzler Date: Fri, 18 Apr 2025 02:31:40 +0200 Subject: [PATCH 0779/1708] cmd/tsidp: add groups claim to tsidp (#15127) * cmd/tsidp: add groups claim to tsidp This feature adds support for a `groups` claim in tsidp using the grants syntax: ```json { "grants": [ { "src": ["group:admins"], "dst": ["*"], "ip": ["*"], "app": { "tailscale.com/cap/tsidp": [ { "groups": ["admin"] } ] } }, { "src": ["group:reader"], "dst": ["*"], "ip": ["*"], "app": { "tailscale.com/cap/tsidp": [ { "groups": ["reader"] } ] } } ] } ``` For #10263 Signed-off-by: Cedric Kienzler * cmd/tsidp: refactor cap/tsidp to allow extraClaims This commit refactors the `capRule` struct to allow specifying arbitrary extra claims: ```json { "src": ["group:reader"], "dst": ["*"], "ip": ["*"], "app": { "tailscale.com/cap/tsidp": [ { "extraClaims": { "groups": ["reader"], "entitlements": ["read-stuff"], }, } ] } } ``` Overwriting pre-existing claims cannot be modified/overwritten. Also adding more unit-testing Signed-off-by: Cedric Kienzler * Update cmd/tsidp/tsidp.go Signed-off-by: cedi * Update cmd/tsidp/tsidp_test.go Co-authored-by: Patrick O'Doherty Signed-off-by: Cedric Kienzler * Update cmd/tsidp/tsidp_test.go Co-authored-by: Patrick O'Doherty Signed-off-by: Cedric Kienzler * Fix logical error in test case Signed-off-by: Cedric Kienzler * fix error printing for failed to unmarshal capability in tsidp Signed-off-by: Cedric Kienzler * clarify doc string for withExtraClaims Signed-off-by: Cedric Kienzler --------- Signed-off-by: Cedric Kienzler Signed-off-by: cedi Signed-off-by: Cedric Kienzler Co-authored-by: Patrick O'Doherty --- cmd/tsidp/tsidp.go | 174 ++++++++- cmd/tsidp/tsidp_test.go | 825 ++++++++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 5 + 3 files changed, 1002 insertions(+), 2 deletions(-) create mode 100644 cmd/tsidp/tsidp_test.go diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 54bb82d12..15f7e6e9c 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -498,6 +498,7 @@ func (s *idpServer) serveUserInfo(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: tagged nodes not supported", http.StatusBadRequest) return } + ui.Sub = ar.remoteUser.Node.User.String() ui.Name = ar.remoteUser.UserProfile.DisplayName ui.Email = ar.remoteUser.UserProfile.LoginName @@ -506,8 +507,29 @@ func (s *idpServer) serveUserInfo(w http.ResponseWriter, r *http.Request) { // TODO(maisem): not sure if this is the right thing to do ui.UserName, _, _ = strings.Cut(ar.remoteUser.UserProfile.LoginName, "@") + rules, err := tailcfg.UnmarshalCapJSON[capRule](ar.remoteUser.CapMap, tailcfg.PeerCapabilityTsIDP) + if err != nil { + http.Error(w, "tsidp: failed to unmarshal capability: %v", http.StatusBadRequest) + return + } + + // Only keep rules where IncludeInUserInfo is true + var filtered []capRule + for _, r := range rules { + if r.IncludeInUserInfo { + filtered = append(filtered, r) + } + } + + userInfo, err := withExtraClaims(ui, filtered) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Write the final result w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(ui); err != nil { + if err := json.NewEncoder(w).Encode(userInfo); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } @@ -520,6 +542,140 @@ type userInfo struct { UserName string `json:"username"` } +type capRule struct { + IncludeInUserInfo bool `json:"includeInUserInfo"` + ExtraClaims map[string]interface{} `json:"extraClaims,omitempty"` // list of features peer is allowed to edit +} + +// flattenExtraClaims merges all ExtraClaims from a slice of capRule into a single map. +// It deduplicates values for each claim and preserves the original input type: +// scalar values remain scalars, and slices are returned as deduplicated []interface{} slices. +func flattenExtraClaims(rules []capRule) map[string]interface{} { + // sets stores deduplicated stringified values for each claim key. + sets := make(map[string]map[string]struct{}) + + // isSlice tracks whether each claim was originally provided as a slice. + isSlice := make(map[string]bool) + + for _, rule := range rules { + for claim, raw := range rule.ExtraClaims { + // Track whether the claim was provided as a slice + switch raw.(type) { + case []string, []interface{}: + isSlice[claim] = true + default: + // Only mark as scalar if this is the first time we've seen this claim + if _, seen := isSlice[claim]; !seen { + isSlice[claim] = false + } + } + + // Add the claim value(s) into the deduplication set + addClaimValue(sets, claim, raw) + } + } + + // Build final result: either scalar or slice depending on original type + result := make(map[string]interface{}) + for claim, valSet := range sets { + if isSlice[claim] { + // Claim was provided as a slice: output as []interface{} + var vals []interface{} + for val := range valSet { + vals = append(vals, val) + } + result[claim] = vals + } else { + // Claim was a scalar: return a single value + for val := range valSet { + result[claim] = val + break // only one value is expected + } + } + } + + return result +} + +// addClaimValue adds a claim value to the deduplication set for a given claim key. +// It accepts scalars (string, int, float64), slices of strings or interfaces, +// and recursively handles nested slices. Unsupported types are ignored with a log message. +func addClaimValue(sets map[string]map[string]struct{}, claim string, val interface{}) { + switch v := val.(type) { + case string, float64, int, int64: + // Ensure the claim set is initialized + if sets[claim] == nil { + sets[claim] = make(map[string]struct{}) + } + // Add the stringified scalar to the set + sets[claim][fmt.Sprintf("%v", v)] = struct{}{} + + case []string: + // Ensure the claim set is initialized + if sets[claim] == nil { + sets[claim] = make(map[string]struct{}) + } + // Add each string value to the set + for _, s := range v { + sets[claim][s] = struct{}{} + } + + case []interface{}: + // Recursively handle each item in the slice + for _, item := range v { + addClaimValue(sets, claim, item) + } + + default: + // Log unsupported types for visibility and debugging + log.Printf("Unsupported claim type for %q: %#v (type %T)", claim, val, val) + } +} + +// withExtraClaims merges flattened extra claims from a list of capRule into the provided struct v, +// returning a map[string]interface{} that combines both sources. +// +// v is any struct whose fields represent static claims; it is first marshaled to JSON, then unmarshalled into a generic map. +// rules is a slice of capRule objects that may define additional (extra) claims to merge. +// +// These extra claims are flattened and merged into the base map unless they conflict with protected claims. +// Claims defined in openIDSupportedClaims are considered protected and cannot be overwritten. +// If an extra claim attempts to overwrite a protected claim, an error is returned. +// +// Returns the merged claims map or an error if any protected claim is violated or JSON (un)marshaling fails. +func withExtraClaims(v any, rules []capRule) (map[string]interface{}, error) { + // Marshal the static struct + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Unmarshal into a generic map + var claimMap map[string]interface{} + if err := json.Unmarshal(data, &claimMap); err != nil { + return nil, err + } + + // Convert views.Slice to a map[string]struct{} for efficient lookup + protected := make(map[string]struct{}, len(openIDSupportedClaims.AsSlice())) + for _, claim := range openIDSupportedClaims.AsSlice() { + protected[claim] = struct{}{} + } + + // Merge extra claims + extra := flattenExtraClaims(rules) + for k, v := range extra { + if _, isProtected := protected[k]; isProtected { + log.Printf("Skip overwriting of existing claim %q", k) + return nil, fmt.Errorf("extra claim %q overwriting existing claim", k) + } + + claimMap[k] = v + } + + return claimMap, nil +} + func (s *idpServer) serveToken(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "tsidp: method not allowed", http.StatusMethodNotAllowed) @@ -596,8 +752,22 @@ func (s *idpServer) serveToken(w http.ResponseWriter, r *http.Request) { tsClaims.Issuer = s.loopbackURL } + rules, err := tailcfg.UnmarshalCapJSON[capRule](who.CapMap, tailcfg.PeerCapabilityTsIDP) + if err != nil { + log.Printf("tsidp: failed to unmarshal capability: %v", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + tsClaimsWithExtra, err := withExtraClaims(tsClaims, rules) + if err != nil { + log.Printf("tsidp: failed to merge extra claims: %v", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // Create an OIDC token using this issuer's signer. - token, err := jwt.Signed(signer).Claims(tsClaims).CompactSerialize() + token, err := jwt.Signed(signer).Claims(tsClaimsWithExtra).CompactSerialize() if err != nil { log.Printf("Error getting token: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go new file mode 100644 index 000000000..f6122708a --- /dev/null +++ b/cmd/tsidp/tsidp_test.go @@ -0,0 +1,825 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +package main + +import ( + "crypto/rand" + "crypto/rsa" + "encoding/json" + "fmt" + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" + "io" + "log" + "net/http" + "net/http/httptest" + "net/netip" + "net/url" + "reflect" + "sort" + "strings" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" + "tailscale.com/types/key" + "tailscale.com/types/views" + "testing" + "time" +) + +// normalizeMap recursively sorts []interface{} values in a map[string]interface{} +func normalizeMap(t *testing.T, m map[string]interface{}) map[string]interface{} { + t.Helper() + normalized := make(map[string]interface{}, len(m)) + for k, v := range m { + switch val := v.(type) { + case []interface{}: + sorted := make([]string, len(val)) + for i, item := range val { + sorted[i] = fmt.Sprintf("%v", item) // convert everything to string for sorting + } + sort.Strings(sorted) + + // convert back to []interface{} + sortedIface := make([]interface{}, len(sorted)) + for i, s := range sorted { + sortedIface[i] = s + } + normalized[k] = sortedIface + + default: + normalized[k] = v + } + } + return normalized +} + +func mustMarshalJSON(t *testing.T, v any) tailcfg.RawMessage { + t.Helper() + b, err := json.Marshal(v) + if err != nil { + panic(err) + } + return tailcfg.RawMessage(b) +} + +var privateKey *rsa.PrivateKey = nil + +func oidcTestingSigner(t *testing.T) jose.Signer { + t.Helper() + privKey := mustGeneratePrivateKey(t) + sig, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: privKey}, nil) + if err != nil { + t.Fatalf("failed to create signer: %v", err) + } + return sig +} + +func oidcTestingPublicKey(t *testing.T) *rsa.PublicKey { + t.Helper() + privKey := mustGeneratePrivateKey(t) + return &privKey.PublicKey +} + +func mustGeneratePrivateKey(t *testing.T) *rsa.PrivateKey { + t.Helper() + if privateKey != nil { + return privateKey + } + + var err error + privateKey, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("failed to generate key: %v", err) + } + + return privateKey +} + +func TestFlattenExtraClaims(t *testing.T) { + log.SetOutput(io.Discard) // suppress log output during tests + + tests := []struct { + name string + input []capRule + expected map[string]interface{} + }{ + { + name: "empty extra claims", + input: []capRule{ + {ExtraClaims: map[string]interface{}{}}, + }, + expected: map[string]interface{}{}, + }, + { + name: "string and number values", + input: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "featureA": "read", + "featureB": 42, + }, + }, + }, + expected: map[string]interface{}{ + "featureA": "read", + "featureB": "42", + }, + }, + { + name: "slice of strings and ints", + input: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "roles": []interface{}{"admin", "user", 1}, + }, + }, + }, + expected: map[string]interface{}{ + "roles": []interface{}{"admin", "user", "1"}, + }, + }, + { + name: "duplicate values deduplicated (slice input)", + input: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "foo": []string{"bar", "baz"}, + }, + }, + { + ExtraClaims: map[string]interface{}{ + "foo": []interface{}{"bar", "qux"}, + }, + }, + }, + expected: map[string]interface{}{ + "foo": []interface{}{"bar", "baz", "qux"}, + }, + }, + { + name: "ignore unsupported map type, keep valid scalar", + input: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "invalid": map[string]interface{}{"bad": "yes"}, + "valid": "ok", + }, + }, + }, + expected: map[string]interface{}{ + "valid": "ok", + }, + }, + { + name: "scalar first, slice second", + input: []capRule{ + {ExtraClaims: map[string]interface{}{"foo": "bar"}}, + {ExtraClaims: map[string]interface{}{"foo": []interface{}{"baz"}}}, + }, + expected: map[string]interface{}{ + "foo": []interface{}{"bar", "baz"}, // since first was scalar, second being a slice forces slice output + }, + }, + { + name: "conflicting scalar and unsupported map", + input: []capRule{ + {ExtraClaims: map[string]interface{}{"foo": "bar"}}, + {ExtraClaims: map[string]interface{}{"foo": map[string]interface{}{"bad": "entry"}}}, + }, + expected: map[string]interface{}{ + "foo": "bar", // map should be ignored + }, + }, + { + name: "multiple slices with overlap", + input: []capRule{ + {ExtraClaims: map[string]interface{}{"roles": []interface{}{"admin", "user"}}}, + {ExtraClaims: map[string]interface{}{"roles": []interface{}{"admin", "guest"}}}, + }, + expected: map[string]interface{}{ + "roles": []interface{}{"admin", "user", "guest"}, + }, + }, + { + name: "slice with unsupported values", + input: []capRule{ + {ExtraClaims: map[string]interface{}{ + "mixed": []interface{}{"ok", 42, map[string]string{"oops": "fail"}}, + }}, + }, + expected: map[string]interface{}{ + "mixed": []interface{}{"ok", "42"}, // map is ignored + }, + }, + { + name: "duplicate scalar value", + input: []capRule{ + {ExtraClaims: map[string]interface{}{"env": "prod"}}, + {ExtraClaims: map[string]interface{}{"env": "prod"}}, + }, + expected: map[string]interface{}{ + "env": "prod", // not converted to slice + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := flattenExtraClaims(tt.input) + + gotNormalized := normalizeMap(t, got) + expectedNormalized := normalizeMap(t, tt.expected) + + if !reflect.DeepEqual(gotNormalized, expectedNormalized) { + t.Errorf("mismatch\nGot:\n%s\nWant:\n%s", gotNormalized, expectedNormalized) + } + }) + } +} + +func TestExtraClaims(t *testing.T) { + tests := []struct { + name string + claim tailscaleClaims + extraClaims []capRule + expected map[string]interface{} + expectError bool + }{ + { + name: "extra claim", + claim: tailscaleClaims{ + Claims: jwt.Claims{}, + Nonce: "foobar", + Key: key.NodePublic{}, + Addresses: views.Slice[netip.Prefix]{}, + NodeID: 0, + NodeName: "test-node", + Tailnet: "test.ts.net", + Email: "test@example.com", + UserID: 0, + UserName: "test", + }, + extraClaims: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "foo": []string{"bar"}, + }, + }, + }, + expected: map[string]interface{}{ + "nonce": "foobar", + "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", + "addresses": nil, + "nid": float64(0), + "node": "test-node", + "tailnet": "test.ts.net", + "email": "test@example.com", + "username": "test", + "foo": []interface{}{"bar"}, + }, + }, + { + name: "duplicate claim distinct values", + claim: tailscaleClaims{ + Claims: jwt.Claims{}, + Nonce: "foobar", + Key: key.NodePublic{}, + Addresses: views.Slice[netip.Prefix]{}, + NodeID: 0, + NodeName: "test-node", + Tailnet: "test.ts.net", + Email: "test@example.com", + UserID: 0, + UserName: "test", + }, + extraClaims: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "foo": []string{"bar"}, + }, + }, + { + ExtraClaims: map[string]interface{}{ + "foo": []string{"foobar"}, + }, + }, + }, + expected: map[string]interface{}{ + "nonce": "foobar", + "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", + "addresses": nil, + "nid": float64(0), + "node": "test-node", + "tailnet": "test.ts.net", + "email": "test@example.com", + "username": "test", + "foo": []interface{}{"foobar", "bar"}, + }, + }, + { + name: "multiple extra claims", + claim: tailscaleClaims{ + Claims: jwt.Claims{}, + Nonce: "foobar", + Key: key.NodePublic{}, + Addresses: views.Slice[netip.Prefix]{}, + NodeID: 0, + NodeName: "test-node", + Tailnet: "test.ts.net", + Email: "test@example.com", + UserID: 0, + UserName: "test", + }, + extraClaims: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "foo": []string{"bar"}, + }, + }, + { + ExtraClaims: map[string]interface{}{ + "bar": []string{"foo"}, + }, + }, + }, + expected: map[string]interface{}{ + "nonce": "foobar", + "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", + "addresses": nil, + "nid": float64(0), + "node": "test-node", + "tailnet": "test.ts.net", + "email": "test@example.com", + "username": "test", + "foo": []interface{}{"bar"}, + "bar": []interface{}{"foo"}, + }, + }, + { + name: "overwrite claim", + claim: tailscaleClaims{ + Claims: jwt.Claims{}, + Nonce: "foobar", + Key: key.NodePublic{}, + Addresses: views.Slice[netip.Prefix]{}, + NodeID: 0, + NodeName: "test-node", + Tailnet: "test.ts.net", + Email: "test@example.com", + UserID: 0, + UserName: "test", + }, + extraClaims: []capRule{ + { + ExtraClaims: map[string]interface{}{ + "username": "foobar", + }, + }, + }, + expected: map[string]interface{}{ + "nonce": "foobar", + "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", + "addresses": nil, + "nid": float64(0), + "node": "test-node", + "tailnet": "test.ts.net", + "email": "test@example.com", + "username": "foobar", + }, + expectError: true, + }, + { + name: "empty extra claims", + claim: tailscaleClaims{ + Claims: jwt.Claims{}, + Nonce: "foobar", + Key: key.NodePublic{}, + Addresses: views.Slice[netip.Prefix]{}, + NodeID: 0, + NodeName: "test-node", + Tailnet: "test.ts.net", + Email: "test@example.com", + UserID: 0, + UserName: "test", + }, + extraClaims: []capRule{{ExtraClaims: map[string]interface{}{}}}, + expected: map[string]interface{}{ + "nonce": "foobar", + "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", + "addresses": nil, + "nid": float64(0), + "node": "test-node", + "tailnet": "test.ts.net", + "email": "test@example.com", + "username": "test", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + claims, err := withExtraClaims(tt.claim, tt.extraClaims) + if err != nil && !tt.expectError { + t.Fatalf("claim.withExtraClaims() unexpected error = %v", err) + } else if err == nil && tt.expectError { + t.Fatalf("expected error, got nil") + } else if err != nil && tt.expectError { + return // just as expected + } + + // Marshal to JSON then unmarshal back to map[string]interface{} + gotClaims, err := json.Marshal(claims) + if err != nil { + t.Errorf("json.Marshal(claims) error = %v", err) + } + + var gotClaimsMap map[string]interface{} + if err := json.Unmarshal(gotClaims, &gotClaimsMap); err != nil { + t.Fatalf("json.Unmarshal(gotClaims) error = %v", err) + } + + gotNormalized := normalizeMap(t, gotClaimsMap) + expectedNormalized := normalizeMap(t, tt.expected) + + if !reflect.DeepEqual(gotNormalized, expectedNormalized) { + t.Errorf("claims mismatch:\n got: %#v\nwant: %#v", gotNormalized, expectedNormalized) + } + }) + } +} + +func TestServeToken(t *testing.T) { + tests := []struct { + name string + caps tailcfg.PeerCapMap + method string + grantType string + code string + omitCode bool + redirectURI string + remoteAddr string + expectError bool + expected map[string]interface{} + }{ + { + name: "GET not allowed", + method: "GET", + grantType: "authorization_code", + expectError: true, + }, + { + name: "unsupported grant type", + method: "POST", + grantType: "pkcs", + expectError: true, + }, + { + name: "invalid code", + method: "POST", + grantType: "authorization_code", + code: "invalid-code", + expectError: true, + }, + { + name: "omit code from form", + method: "POST", + grantType: "authorization_code", + omitCode: true, + expectError: true, + }, + { + name: "invalid redirect uri", + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://invalid.example.com/callback", + remoteAddr: "127.0.0.1:12345", + expectError: true, + }, + { + name: "invalid remoteAddr", + method: "POST", + grantType: "authorization_code", + redirectURI: "https://rp.example.com/callback", + code: "valid-code", + remoteAddr: "192.168.0.1:12345", + expectError: true, + }, + { + name: "extra claim included", + method: "POST", + grantType: "authorization_code", + redirectURI: "https://rp.example.com/callback", + code: "valid-code", + remoteAddr: "127.0.0.1:12345", + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: true, + ExtraClaims: map[string]interface{}{ + "foo": "bar", + }, + }), + }, + }, + expected: map[string]interface{}{ + "foo": "bar", + }, + }, + { + name: "attempt to overwrite protected claim", + method: "POST", + grantType: "authorization_code", + redirectURI: "https://rp.example.com/callback", + code: "valid-code", + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: true, + ExtraClaims: map[string]interface{}{ + "sub": "should-not-overwrite", + }, + }), + }, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + now := time.Now() + + // Fake user/node + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tt.caps, + } + + s := &idpServer{ + code: map[string]*authRequest{ + "valid-code": { + clientID: "client-id", + nonce: "nonce123", + redirectURI: "https://rp.example.com/callback", + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: true, + }, + }, + } + // Inject a working signer + s.lazySigner.Set(oidcTestingSigner(t)) + + form := url.Values{} + form.Set("grant_type", tt.grantType) + form.Set("redirect_uri", tt.redirectURI) + if !tt.omitCode { + form.Set("code", tt.code) + } + + req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) + req.RemoteAddr = tt.remoteAddr + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + rr := httptest.NewRecorder() + + s.serveToken(rr, req) + + if tt.expectError { + if rr.Code == http.StatusOK { + t.Fatalf("expected error, got 200 OK: %s", rr.Body.String()) + } + return + } + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp struct { + IDToken string `json:"id_token"` + } + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to unmarshal response: %v", err) + } + + tok, err := jwt.ParseSigned(resp.IDToken) + if err != nil { + t.Fatalf("failed to parse ID token: %v", err) + } + + out := make(map[string]interface{}) + if err := tok.Claims(oidcTestingPublicKey(t), &out); err != nil { + t.Fatalf("failed to extract claims: %v", err) + } + + for k, want := range tt.expected { + got, ok := out[k] + if !ok { + t.Errorf("missing expected claim %q", k) + continue + } + if !reflect.DeepEqual(got, want) { + t.Errorf("claim %q: got %v, want %v", k, got, want) + } + } + }) + } +} + +func TestExtraUserInfo(t *testing.T) { + tests := []struct { + name string + caps tailcfg.PeerCapMap + tokenValidTill time.Time + expected map[string]interface{} + expectError bool + }{ + { + name: "extra claim", + tokenValidTill: time.Now().Add(1 * time.Minute), + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: true, + ExtraClaims: map[string]interface{}{ + "foo": []string{"bar"}, + }, + }), + }, + }, + expected: map[string]interface{}{ + "foo": []interface{}{"bar"}, + }, + }, + { + name: "duplicate claim distinct values", + tokenValidTill: time.Now().Add(1 * time.Minute), + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: true, + ExtraClaims: map[string]interface{}{ + "foo": []string{"bar", "foobar"}, + }, + }), + }, + }, + expected: map[string]interface{}{ + "foo": []interface{}{"bar", "foobar"}, + }, + }, + { + name: "multiple extra claims", + tokenValidTill: time.Now().Add(1 * time.Minute), + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: true, + ExtraClaims: map[string]interface{}{ + "foo": "bar", + "bar": "foo", + }, + }), + }, + }, + expected: map[string]interface{}{ + "foo": "bar", + "bar": "foo", + }, + }, + { + name: "empty extra claims", + caps: tailcfg.PeerCapMap{}, + tokenValidTill: time.Now().Add(1 * time.Minute), + expected: map[string]interface{}{}, + }, + { + name: "attempt to overwrite protected claim", + tokenValidTill: time.Now().Add(1 * time.Minute), + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: true, + ExtraClaims: map[string]interface{}{ + "sub": "should-not-overwrite", + "foo": "ok", + }, + }), + }, + }, + expectError: true, + }, + { + name: "extra claim omitted", + tokenValidTill: time.Now().Add(1 * time.Minute), + caps: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityTsIDP: { + mustMarshalJSON(t, capRule{ + IncludeInUserInfo: false, + ExtraClaims: map[string]interface{}{ + "foo": "ok", + }, + }), + }, + }, + expected: map[string]interface{}{}, + }, + { + name: "expired token", + caps: tailcfg.PeerCapMap{}, + tokenValidTill: time.Now().Add(-1 * time.Minute), + expected: map[string]interface{}{}, + expectError: true, + }, + } + token := "valid-token" + + // Create a fake tailscale Node + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Construct the remote user + profile := tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: &profile, + CapMap: tt.caps, + } + + // Insert a valid token into the idpServer + s := &idpServer{ + accessToken: map[string]*authRequest{ + token: { + validTill: tt.tokenValidTill, + remoteUser: remoteUser, + }, + }, + } + + // Construct request + req := httptest.NewRequest("GET", "/userinfo", nil) + req.Header.Set("Authorization", "Bearer "+token) + rr := httptest.NewRecorder() + + // Call the method under test + s.serveUserInfo(rr, req) + + if tt.expectError { + if rr.Code == http.StatusOK { + t.Fatalf("expected error, got %d: %s", rr.Code, rr.Body.String()) + } + return + } + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp map[string]interface{} + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON response: %v", err) + } + + // Construct expected + tt.expected["sub"] = remoteUser.Node.User.String() + tt.expected["name"] = profile.DisplayName + tt.expected["email"] = profile.LoginName + tt.expected["picture"] = profile.ProfilePicURL + tt.expected["username"], _, _ = strings.Cut(profile.LoginName, "@") + + gotNormalized := normalizeMap(t, resp) + expectedNormalized := normalizeMap(t, tt.expected) + + if !reflect.DeepEqual(gotNormalized, expectedNormalized) { + t.Errorf("UserInfo mismatch:\n got: %#v\nwant: %#v", gotNormalized, expectedNormalized) + } + }) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 0043c0ecd..ada0df8fc 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1469,6 +1469,11 @@ const ( // PeerCapabilityRelayTarget grants the current node the ability to allocate // relay endpoints to the peer which has this capability. PeerCapabilityRelayTarget PeerCapability = "tailscale.com/cap/relay-target" + + // PeerCapabilityTsIDP grants a peer tsidp-specific + // capabilities, such as the ability to add user groups to the OIDC + // claim + PeerCapabilityTsIDP PeerCapability = "tailscale.com/cap/tsidp" ) // NodeCapMap is a map of capabilities to their optional values. It is valid for From e649227ef2f81163a16c93ff0b42fc175e00ea43 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 17 Apr 2025 18:05:07 -0700 Subject: [PATCH 0780/1708] cmd/tsidp: fix interface{} linter warnings (#15729) Replace all instances of interface{} with any to resolve the golangci-lint errors that appeared in the previous tsidp PR. Updates #cleanup Signed-off-by: Patrick O'Doherty --- cmd/tsidp/tsidp.go | 26 +++---- cmd/tsidp/tsidp_test.go | 169 ++++++++++++++++++++-------------------- 2 files changed, 98 insertions(+), 97 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 15f7e6e9c..e2b777fa1 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -543,14 +543,14 @@ type userInfo struct { } type capRule struct { - IncludeInUserInfo bool `json:"includeInUserInfo"` - ExtraClaims map[string]interface{} `json:"extraClaims,omitempty"` // list of features peer is allowed to edit + IncludeInUserInfo bool `json:"includeInUserInfo"` + ExtraClaims map[string]any `json:"extraClaims,omitempty"` // list of features peer is allowed to edit } // flattenExtraClaims merges all ExtraClaims from a slice of capRule into a single map. // It deduplicates values for each claim and preserves the original input type: -// scalar values remain scalars, and slices are returned as deduplicated []interface{} slices. -func flattenExtraClaims(rules []capRule) map[string]interface{} { +// scalar values remain scalars, and slices are returned as deduplicated []any slices. +func flattenExtraClaims(rules []capRule) map[string]any { // sets stores deduplicated stringified values for each claim key. sets := make(map[string]map[string]struct{}) @@ -561,7 +561,7 @@ func flattenExtraClaims(rules []capRule) map[string]interface{} { for claim, raw := range rule.ExtraClaims { // Track whether the claim was provided as a slice switch raw.(type) { - case []string, []interface{}: + case []string, []any: isSlice[claim] = true default: // Only mark as scalar if this is the first time we've seen this claim @@ -576,11 +576,11 @@ func flattenExtraClaims(rules []capRule) map[string]interface{} { } // Build final result: either scalar or slice depending on original type - result := make(map[string]interface{}) + result := make(map[string]any) for claim, valSet := range sets { if isSlice[claim] { - // Claim was provided as a slice: output as []interface{} - var vals []interface{} + // Claim was provided as a slice: output as []any + var vals []any for val := range valSet { vals = append(vals, val) } @@ -600,7 +600,7 @@ func flattenExtraClaims(rules []capRule) map[string]interface{} { // addClaimValue adds a claim value to the deduplication set for a given claim key. // It accepts scalars (string, int, float64), slices of strings or interfaces, // and recursively handles nested slices. Unsupported types are ignored with a log message. -func addClaimValue(sets map[string]map[string]struct{}, claim string, val interface{}) { +func addClaimValue(sets map[string]map[string]struct{}, claim string, val any) { switch v := val.(type) { case string, float64, int, int64: // Ensure the claim set is initialized @@ -620,7 +620,7 @@ func addClaimValue(sets map[string]map[string]struct{}, claim string, val interf sets[claim][s] = struct{}{} } - case []interface{}: + case []any: // Recursively handle each item in the slice for _, item := range v { addClaimValue(sets, claim, item) @@ -633,7 +633,7 @@ func addClaimValue(sets map[string]map[string]struct{}, claim string, val interf } // withExtraClaims merges flattened extra claims from a list of capRule into the provided struct v, -// returning a map[string]interface{} that combines both sources. +// returning a map[string]any that combines both sources. // // v is any struct whose fields represent static claims; it is first marshaled to JSON, then unmarshalled into a generic map. // rules is a slice of capRule objects that may define additional (extra) claims to merge. @@ -643,7 +643,7 @@ func addClaimValue(sets map[string]map[string]struct{}, claim string, val interf // If an extra claim attempts to overwrite a protected claim, an error is returned. // // Returns the merged claims map or an error if any protected claim is violated or JSON (un)marshaling fails. -func withExtraClaims(v any, rules []capRule) (map[string]interface{}, error) { +func withExtraClaims(v any, rules []capRule) (map[string]any, error) { // Marshal the static struct data, err := json.Marshal(v) if err != nil { @@ -651,7 +651,7 @@ func withExtraClaims(v any, rules []capRule) (map[string]interface{}, error) { } // Unmarshal into a generic map - var claimMap map[string]interface{} + var claimMap map[string]any if err := json.Unmarshal(data, &claimMap); err != nil { return nil, err } diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index f6122708a..76a118991 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -7,8 +7,6 @@ import ( "crypto/rsa" "encoding/json" "fmt" - "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" "io" "log" "net/http" @@ -18,29 +16,32 @@ import ( "reflect" "sort" "strings" + "testing" + "time" + + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" "tailscale.com/client/tailscale/apitype" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/views" - "testing" - "time" ) -// normalizeMap recursively sorts []interface{} values in a map[string]interface{} -func normalizeMap(t *testing.T, m map[string]interface{}) map[string]interface{} { +// normalizeMap recursively sorts []any values in a map[string]any +func normalizeMap(t *testing.T, m map[string]any) map[string]any { t.Helper() - normalized := make(map[string]interface{}, len(m)) + normalized := make(map[string]any, len(m)) for k, v := range m { switch val := v.(type) { - case []interface{}: + case []any: sorted := make([]string, len(val)) for i, item := range val { sorted[i] = fmt.Sprintf("%v", item) // convert everything to string for sorting } sort.Strings(sorted) - // convert back to []interface{} - sortedIface := make([]interface{}, len(sorted)) + // convert back to []any + sortedIface := make([]any, len(sorted)) for i, s := range sorted { sortedIface[i] = s } @@ -101,26 +102,26 @@ func TestFlattenExtraClaims(t *testing.T) { tests := []struct { name string input []capRule - expected map[string]interface{} + expected map[string]any }{ { name: "empty extra claims", input: []capRule{ - {ExtraClaims: map[string]interface{}{}}, + {ExtraClaims: map[string]any{}}, }, - expected: map[string]interface{}{}, + expected: map[string]any{}, }, { name: "string and number values", input: []capRule{ { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "featureA": "read", "featureB": 42, }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "featureA": "read", "featureB": "42", }, @@ -129,95 +130,95 @@ func TestFlattenExtraClaims(t *testing.T) { name: "slice of strings and ints", input: []capRule{ { - ExtraClaims: map[string]interface{}{ - "roles": []interface{}{"admin", "user", 1}, + ExtraClaims: map[string]any{ + "roles": []any{"admin", "user", 1}, }, }, }, - expected: map[string]interface{}{ - "roles": []interface{}{"admin", "user", "1"}, + expected: map[string]any{ + "roles": []any{"admin", "user", "1"}, }, }, { name: "duplicate values deduplicated (slice input)", input: []capRule{ { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"bar", "baz"}, }, }, { - ExtraClaims: map[string]interface{}{ - "foo": []interface{}{"bar", "qux"}, + ExtraClaims: map[string]any{ + "foo": []any{"bar", "qux"}, }, }, }, - expected: map[string]interface{}{ - "foo": []interface{}{"bar", "baz", "qux"}, + expected: map[string]any{ + "foo": []any{"bar", "baz", "qux"}, }, }, { name: "ignore unsupported map type, keep valid scalar", input: []capRule{ { - ExtraClaims: map[string]interface{}{ - "invalid": map[string]interface{}{"bad": "yes"}, + ExtraClaims: map[string]any{ + "invalid": map[string]any{"bad": "yes"}, "valid": "ok", }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "valid": "ok", }, }, { name: "scalar first, slice second", input: []capRule{ - {ExtraClaims: map[string]interface{}{"foo": "bar"}}, - {ExtraClaims: map[string]interface{}{"foo": []interface{}{"baz"}}}, + {ExtraClaims: map[string]any{"foo": "bar"}}, + {ExtraClaims: map[string]any{"foo": []any{"baz"}}}, }, - expected: map[string]interface{}{ - "foo": []interface{}{"bar", "baz"}, // since first was scalar, second being a slice forces slice output + expected: map[string]any{ + "foo": []any{"bar", "baz"}, // since first was scalar, second being a slice forces slice output }, }, { name: "conflicting scalar and unsupported map", input: []capRule{ - {ExtraClaims: map[string]interface{}{"foo": "bar"}}, - {ExtraClaims: map[string]interface{}{"foo": map[string]interface{}{"bad": "entry"}}}, + {ExtraClaims: map[string]any{"foo": "bar"}}, + {ExtraClaims: map[string]any{"foo": map[string]any{"bad": "entry"}}}, }, - expected: map[string]interface{}{ + expected: map[string]any{ "foo": "bar", // map should be ignored }, }, { name: "multiple slices with overlap", input: []capRule{ - {ExtraClaims: map[string]interface{}{"roles": []interface{}{"admin", "user"}}}, - {ExtraClaims: map[string]interface{}{"roles": []interface{}{"admin", "guest"}}}, + {ExtraClaims: map[string]any{"roles": []any{"admin", "user"}}}, + {ExtraClaims: map[string]any{"roles": []any{"admin", "guest"}}}, }, - expected: map[string]interface{}{ - "roles": []interface{}{"admin", "user", "guest"}, + expected: map[string]any{ + "roles": []any{"admin", "user", "guest"}, }, }, { name: "slice with unsupported values", input: []capRule{ - {ExtraClaims: map[string]interface{}{ - "mixed": []interface{}{"ok", 42, map[string]string{"oops": "fail"}}, + {ExtraClaims: map[string]any{ + "mixed": []any{"ok", 42, map[string]string{"oops": "fail"}}, }}, }, - expected: map[string]interface{}{ - "mixed": []interface{}{"ok", "42"}, // map is ignored + expected: map[string]any{ + "mixed": []any{"ok", "42"}, // map is ignored }, }, { name: "duplicate scalar value", input: []capRule{ - {ExtraClaims: map[string]interface{}{"env": "prod"}}, - {ExtraClaims: map[string]interface{}{"env": "prod"}}, + {ExtraClaims: map[string]any{"env": "prod"}}, + {ExtraClaims: map[string]any{"env": "prod"}}, }, - expected: map[string]interface{}{ + expected: map[string]any{ "env": "prod", // not converted to slice }, }, @@ -242,7 +243,7 @@ func TestExtraClaims(t *testing.T) { name string claim tailscaleClaims extraClaims []capRule - expected map[string]interface{} + expected map[string]any expectError bool }{ { @@ -261,12 +262,12 @@ func TestExtraClaims(t *testing.T) { }, extraClaims: []capRule{ { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"bar"}, }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "nonce": "foobar", "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", "addresses": nil, @@ -275,7 +276,7 @@ func TestExtraClaims(t *testing.T) { "tailnet": "test.ts.net", "email": "test@example.com", "username": "test", - "foo": []interface{}{"bar"}, + "foo": []any{"bar"}, }, }, { @@ -294,17 +295,17 @@ func TestExtraClaims(t *testing.T) { }, extraClaims: []capRule{ { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"bar"}, }, }, { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"foobar"}, }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "nonce": "foobar", "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", "addresses": nil, @@ -313,7 +314,7 @@ func TestExtraClaims(t *testing.T) { "tailnet": "test.ts.net", "email": "test@example.com", "username": "test", - "foo": []interface{}{"foobar", "bar"}, + "foo": []any{"foobar", "bar"}, }, }, { @@ -332,17 +333,17 @@ func TestExtraClaims(t *testing.T) { }, extraClaims: []capRule{ { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"bar"}, }, }, { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "bar": []string{"foo"}, }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "nonce": "foobar", "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", "addresses": nil, @@ -351,8 +352,8 @@ func TestExtraClaims(t *testing.T) { "tailnet": "test.ts.net", "email": "test@example.com", "username": "test", - "foo": []interface{}{"bar"}, - "bar": []interface{}{"foo"}, + "foo": []any{"bar"}, + "bar": []any{"foo"}, }, }, { @@ -371,12 +372,12 @@ func TestExtraClaims(t *testing.T) { }, extraClaims: []capRule{ { - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "username": "foobar", }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "nonce": "foobar", "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", "addresses": nil, @@ -402,8 +403,8 @@ func TestExtraClaims(t *testing.T) { UserID: 0, UserName: "test", }, - extraClaims: []capRule{{ExtraClaims: map[string]interface{}{}}}, - expected: map[string]interface{}{ + extraClaims: []capRule{{ExtraClaims: map[string]any{}}}, + expected: map[string]any{ "nonce": "foobar", "key": "nodekey:0000000000000000000000000000000000000000000000000000000000000000", "addresses": nil, @@ -427,13 +428,13 @@ func TestExtraClaims(t *testing.T) { return // just as expected } - // Marshal to JSON then unmarshal back to map[string]interface{} + // Marshal to JSON then unmarshal back to map[string]any gotClaims, err := json.Marshal(claims) if err != nil { t.Errorf("json.Marshal(claims) error = %v", err) } - var gotClaimsMap map[string]interface{} + var gotClaimsMap map[string]any if err := json.Unmarshal(gotClaims, &gotClaimsMap); err != nil { t.Fatalf("json.Unmarshal(gotClaims) error = %v", err) } @@ -459,7 +460,7 @@ func TestServeToken(t *testing.T) { redirectURI string remoteAddr string expectError bool - expected map[string]interface{} + expected map[string]any }{ { name: "GET not allowed", @@ -516,13 +517,13 @@ func TestServeToken(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: true, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": "bar", }, }), }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "foo": "bar", }, }, @@ -536,7 +537,7 @@ func TestServeToken(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: true, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "sub": "should-not-overwrite", }, }), @@ -623,7 +624,7 @@ func TestServeToken(t *testing.T) { t.Fatalf("failed to parse ID token: %v", err) } - out := make(map[string]interface{}) + out := make(map[string]any) if err := tok.Claims(oidcTestingPublicKey(t), &out); err != nil { t.Fatalf("failed to extract claims: %v", err) } @@ -647,7 +648,7 @@ func TestExtraUserInfo(t *testing.T) { name string caps tailcfg.PeerCapMap tokenValidTill time.Time - expected map[string]interface{} + expected map[string]any expectError bool }{ { @@ -657,14 +658,14 @@ func TestExtraUserInfo(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: true, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"bar"}, }, }), }, }, - expected: map[string]interface{}{ - "foo": []interface{}{"bar"}, + expected: map[string]any{ + "foo": []any{"bar"}, }, }, { @@ -674,14 +675,14 @@ func TestExtraUserInfo(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: true, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": []string{"bar", "foobar"}, }, }), }, }, - expected: map[string]interface{}{ - "foo": []interface{}{"bar", "foobar"}, + expected: map[string]any{ + "foo": []any{"bar", "foobar"}, }, }, { @@ -691,14 +692,14 @@ func TestExtraUserInfo(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: true, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": "bar", "bar": "foo", }, }), }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "foo": "bar", "bar": "foo", }, @@ -707,7 +708,7 @@ func TestExtraUserInfo(t *testing.T) { name: "empty extra claims", caps: tailcfg.PeerCapMap{}, tokenValidTill: time.Now().Add(1 * time.Minute), - expected: map[string]interface{}{}, + expected: map[string]any{}, }, { name: "attempt to overwrite protected claim", @@ -716,7 +717,7 @@ func TestExtraUserInfo(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: true, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "sub": "should-not-overwrite", "foo": "ok", }, @@ -732,19 +733,19 @@ func TestExtraUserInfo(t *testing.T) { tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ IncludeInUserInfo: false, - ExtraClaims: map[string]interface{}{ + ExtraClaims: map[string]any{ "foo": "ok", }, }), }, }, - expected: map[string]interface{}{}, + expected: map[string]any{}, }, { name: "expired token", caps: tailcfg.PeerCapMap{}, tokenValidTill: time.Now().Add(-1 * time.Minute), - expected: map[string]interface{}{}, + expected: map[string]any{}, expectError: true, }, } @@ -802,7 +803,7 @@ func TestExtraUserInfo(t *testing.T) { t.Fatalf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) } - var resp map[string]interface{} + var resp map[string]any if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { t.Fatalf("failed to parse JSON response: %v", err) } From 7090f7fffc2c6ea67d0ff9e1adb582a6e87db468 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 18 Apr 2025 16:52:55 -0500 Subject: [PATCH 0781/1708] ipn/ipnlocal: use MagicDNSName of the current profile instead of generating a full ipnstate.Status Both are populated from the current netmap's MagicDNSSuffix. But building a full ipnstate.Status (with peers!) is expensive and unnecessary. Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/serve.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 638b26a36..cc0d219d8 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -632,7 +632,7 @@ func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, hostname := r.Host if r.TLS == nil { - tcd := "." + b.Status().CurrentTailnet.MagicDNSSuffix + tcd := "." + b.CurrentProfile().NetworkProfile().MagicDNSName if host, _, err := net.SplitHostPort(hostname); err == nil { hostname = host } From a3fc5150e3a42c1659bb8130bcf4f09e2128f00f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Mengu=C3=A9?= Date: Tue, 1 Apr 2025 17:22:20 +0200 Subject: [PATCH 0782/1708] client/tailscale: add godoc links in Deprecated comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Olivier Mengué --- client/tailscale/localclient_aliases.go | 60 ++++++++++++------------- client/tailscale/tailscale.go | 22 ++++----- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 28d597232..2b53906b7 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -12,95 +12,95 @@ import ( "tailscale.com/ipn/ipnstate" ) -// ErrPeerNotFound is an alias for tailscale.com/client/local. +// ErrPeerNotFound is an alias for [tailscale.com/client/local.ErrPeerNotFound]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. var ErrPeerNotFound = local.ErrPeerNotFound -// LocalClient is an alias for tailscale.com/client/local. +// LocalClient is an alias for [tailscale.com/client/local.Client]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. type LocalClient = local.Client -// IPNBusWatcher is an alias for tailscale.com/client/local. +// IPNBusWatcher is an alias for [tailscale.com/client/local.IPNBusWatcher]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. type IPNBusWatcher = local.IPNBusWatcher -// BugReportOpts is an alias for tailscale.com/client/local. +// BugReportOpts is an alias for [tailscale.com/client/local.BugReportOpts]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. type BugReportOpts = local.BugReportOpts -// DebugPortMapOpts is an alias for tailscale.com/client/local. +// DebugPortmapOpts is an alias for [tailscale.com/client/local.DebugPortmapOpts]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. type DebugPortmapOpts = local.DebugPortmapOpts -// PingOpts is an alias for tailscale.com/client/local. +// PingOpts is an alias for [tailscale.com/client/local.PingOpts]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. type PingOpts = local.PingOpts -// GetCertificate is an alias for tailscale.com/client/local. +// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { return local.GetCertificate(hi) } -// SetVersionMismatchHandler is an alias for tailscale.com/client/local. +// SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. func SetVersionMismatchHandler(f func(clientVer, serverVer string)) { local.SetVersionMismatchHandler(f) } -// IsAccessDeniedError is an alias for tailscale.com/client/local. +// IsAccessDeniedError is an alias for [tailscale.com/client/local.IsAccessDeniedError]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. func IsAccessDeniedError(err error) bool { return local.IsAccessDeniedError(err) } -// IsPreconditionsFailedError is an alias for tailscale.com/client/local. +// IsPreconditionsFailedError is an alias for [tailscale.com/client/local.IsPreconditionsFailedError]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. func IsPreconditionsFailedError(err error) bool { return local.IsPreconditionsFailedError(err) } -// WhoIs is an alias for tailscale.com/client/local. +// WhoIs is an alias for [tailscale.com/client/local.WhoIs]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.WhoIs]. func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { return local.WhoIs(ctx, remoteAddr) } -// Status is an alias for tailscale.com/client/local. +// Status is an alias for [tailscale.com/client/local.Status]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. func Status(ctx context.Context) (*ipnstate.Status, error) { return local.Status(ctx) } -// StatusWithoutPeers is an alias for tailscale.com/client/local. +// StatusWithoutPeers is an alias for [tailscale.com/client/local.StatusWithoutPeers]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead. func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { return local.StatusWithoutPeers(ctx) } -// CertPair is an alias for tailscale.com/client/local. +// CertPair is an alias for [tailscale.com/client/local.CertPair]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { return local.CertPair(ctx, domain) } -// ExpandSNIName is an alias for tailscale.com/client/local. +// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. // -// Deprecated: import tailscale.com/client/local instead. +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { return local.ExpandSNIName(ctx, name) } diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index 4c6273c89..76e44454b 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -8,7 +8,7 @@ // This package is only intended for internal and transitional use. // // Deprecated: the official control plane client is available at -// tailscale.com/client/tailscale/v2. +// [tailscale.com/client/tailscale/v2]. package tailscale import ( @@ -22,7 +22,7 @@ import ( ) // I_Acknowledge_This_API_Is_Unstable must be set true to use this package -// for now. This package is being replaced by tailscale.com/client/tailscale/v2. +// for now. This package is being replaced by [tailscale.com/client/tailscale/v2]. var I_Acknowledge_This_API_Is_Unstable = false // TODO: use url.PathEscape() for deviceID and tailnets when constructing requests. @@ -34,10 +34,10 @@ const maxReadSize = 10 << 20 // Client makes API calls to the Tailscale control plane API server. // -// Use NewClient to instantiate one. Exported fields should be set before +// Use [NewClient] to instantiate one. Exported fields should be set before // the client is used and not changed thereafter. // -// Deprecated: use tailscale.com/client/tailscale/v2 instead. +// Deprecated: use [tailscale.com/client/tailscale/v2] instead. type Client struct { // tailnet is the globally unique identifier for a Tailscale network, such // as "example.com" or "user@gmail.com". @@ -51,7 +51,7 @@ type Client struct { BaseURL string // HTTPClient optionally specifies an alternate HTTP client to use. - // If nil, http.DefaultClient is used. + // If nil, [http.DefaultClient] is used. HTTPClient *http.Client // UserAgent optionally specifies an alternate User-Agent header @@ -119,7 +119,7 @@ type AuthMethod interface { modifyRequest(req *http.Request) } -// APIKey is an AuthMethod for NewClient that authenticates requests +// APIKey is an [AuthMethod] for [NewClient] that authenticates requests // using an authkey. type APIKey string @@ -133,15 +133,15 @@ func (c *Client) setAuth(r *http.Request) { } } -// NewClient is a convenience method for instantiating a new Client. +// NewClient is a convenience method for instantiating a new [Client]. // // tailnet is the globally unique identifier for a Tailscale network, such // as "example.com" or "user@gmail.com". -// If httpClient is nil, then http.DefaultClient is used. +// If httpClient is nil, then [http.DefaultClient] is used. // "api.tailscale.com" is set as the BaseURL for the returned client // and can be changed manually by the user. // -// Deprecated: use tailscale.com/client/tailscale/v2 instead. +// Deprecated: use [tailscale.com/client/tailscale/v2] instead. func NewClient(tailnet string, auth AuthMethod) *Client { return &Client{ tailnet: tailnet, @@ -193,9 +193,9 @@ func (e ErrResponse) Error() string { } // HandleErrorResponse decodes the error message from the server and returns -// an ErrResponse from it. +// an [ErrResponse] from it. // -// Deprecated: use tailscale.com/client/tailscale/v2 instead. +// Deprecated: use [tailscale.com/client/tailscale/v2] instead. func HandleErrorResponse(b []byte, resp *http.Response) error { var errResp ErrResponse if err := json.Unmarshal(b, &errResp); err != nil { From c28fda864a3a6f9f563f34d6ae90f6dd09784d05 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 22 Apr 2025 10:53:58 -0700 Subject: [PATCH 0783/1708] feature/relayserver: use PeerAPIHandler.Logf() (#15765) This was recently added, use it to be consistent. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 3d851780d..8e734bec9 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -142,7 +142,7 @@ func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.Respon httpErrAndLog := func(message string, code int) { http.Error(w, message, code) - e.logf("peerapi: request from %v returned code %d: %s", h.RemoteAddr(), code, message) + h.Logf("relayserver: request from %v returned code %d: %s", h.RemoteAddr(), code, message) } if !h.PeerCaps().HasCapability(tailcfg.PeerCapabilityRelay) { From c41a2d5c8372cea8bf5dc64ed8fcbec577fc00fd Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 23 Apr 2025 09:35:14 -0500 Subject: [PATCH 0784/1708] net/portmapper: fix nil pointer dereference in Client.createMapping The EventBus in net/portmapper.Config is still optional and Client.updates can be nil. Updates #15772 Signed-off-by: Nick Khyl --- net/portmapper/portmapper.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index f95d6503a..59f88e966 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -508,11 +508,13 @@ func (c *Client) createMapping() { } return } - c.updates.Publish(Mapping{ - External: mapping.External(), - Type: mapping.MappingType(), - GoodUntil: mapping.GoodUntil(), - }) + if c.updates != nil { + c.updates.Publish(Mapping{ + External: mapping.External(), + Type: mapping.MappingType(), + GoodUntil: mapping.GoodUntil(), + }) + } if c.onChange != nil { go c.onChange() } From bd33eb7bd7636199c3acecbc40a23033240de6f7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 23 Apr 2025 09:57:59 -0500 Subject: [PATCH 0785/1708] ipn/ipnlocal: use tsd.NewSystem instead of &tsd.System in a few more tests These were likely added after everything else was updated to use tsd.NewSystem, in a feature branch, and before it was merged back into main. Updates #15160 Signed-off-by: Nick Khyl --- ipn/ipnlocal/extension_host_test.go | 4 ++-- ipn/ipnlocal/state_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index ced5867e7..4c497dd99 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -284,7 +284,7 @@ func TestNewExtensionHost(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() logf := tstest.WhileTestRunningLogger(t) - h, err := NewExtensionHost(logf, &tsd.System{}, &testBackend{}, tt.defs...) + h, err := NewExtensionHost(logf, tsd.NewSystem(), &testBackend{}, tt.defs...) if gotErr := err != nil; gotErr != tt.wantErr { t.Errorf("NewExtensionHost: gotErr %v(%v); wantErr %v", gotErr, err, tt.wantErr) } @@ -1118,7 +1118,7 @@ func newExtensionHostForTest[T ipnext.Extension](t *testing.T, b Backend, initia } defs[i] = ipnext.DefinitionForTest(ext) } - h, err := NewExtensionHost(logf, &tsd.System{}, b, defs...) + h, err := NewExtensionHost(logf, tsd.NewSystem(), b, defs...) if err != nil { t.Fatalf("NewExtensionHost: %v", err) } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a91ec84cb..5d9e8b169 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1397,7 +1397,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( dialer := &tsdial.Dialer{Logf: logf} dialer.SetNetMon(netmon.NewStatic()) - sys := &tsd.System{} + sys := tsd.NewSystem() sys.Set(dialer) sys.Set(dialer.NetMon()) From 1f029180c74c63b922858026b17bc0a3b8c2ee70 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 23 Apr 2025 11:08:45 -0700 Subject: [PATCH 0786/1708] types/jsonx: add package for json/v2 helpers (#15756) The typical way to implement union types in Go is to use an interface where the set of types is limited. However, there historically has been poor support in v1 "encoding/json" with interface types where you can marshal such values, but fail to unmarshal them since type information about the concrete type is lost. The MakeInterfaceCoders function constructs custom marshal/unmarshal functions such that the type name is encoded in the JSON representation. The set of valid concrete types for an interface must be statically specified for this to function. Updates tailscale/corp#22024 Signed-off-by: Joe Tsai --- types/jsonx/json.go | 171 +++++++++++++++++++++++++++++++++++++++ types/jsonx/json_test.go | 140 ++++++++++++++++++++++++++++++++ 2 files changed, 311 insertions(+) create mode 100644 types/jsonx/json.go create mode 100644 types/jsonx/json_test.go diff --git a/types/jsonx/json.go b/types/jsonx/json.go new file mode 100644 index 000000000..3f01ea358 --- /dev/null +++ b/types/jsonx/json.go @@ -0,0 +1,171 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsonx contains helper types and functionality to use with +// [github.com/go-json-experiment/json], which is positioned to be +// merged into the Go standard library as [encoding/json/v2]. +// +// See https://go.dev/issues/71497 +package jsonx + +import ( + "errors" + "fmt" + "reflect" + + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" +) + +var ( + errUnknownTypeName = errors.New("unknown type name") + errNonSingularValue = errors.New("dynamic value must only have exactly one member") +) + +// MakeInterfaceCoders constructs a pair of marshal and unmarshal functions +// to serialize a Go interface type T. A bijective mapping for the set +// of concrete types that implement T is provided, +// where the key is a stable type name to use in the JSON representation, +// while the value is any value of a concrete type that implements T. +// By convention, only the zero value of concrete types is passed. +// +// The JSON representation for a dynamic value is a JSON object +// with a single member, where the member name is the type name, +// and the value is the JSON representation for the Go value. +// For example, the JSON serialization for a concrete type named Foo +// would be {"Foo": ...}, where ... is the JSON representation +// of the concrete value of the Foo type. +// +// Example instantiation: +// +// // Interface is a union type implemented by [FooType] and [BarType]. +// type Interface interface { ... } +// +// var interfaceCoders = MakeInterfaceCoders(map[string]Interface{ +// "FooType": FooType{}, +// "BarType": (*BarType)(nil), +// }) +// +// The pair of Marshal and Unmarshal functions can be used with the [json] +// package with either type-specified or caller-specified serialization. +// The result of this constructor is usually stored into a global variable. +// +// Example usage with type-specified serialization: +// +// // InterfaceWrapper is a concrete type that wraps [Interface]. +// // It extends [Interface] to implement +// // [json.MarshalerTo] and [json.UnmarshalerFrom]. +// type InterfaceWrapper struct{ Interface } +// +// func (w InterfaceWrapper) MarshalJSONTo(enc *jsontext.Encoder) error { +// return interfaceCoders.Marshal(enc, &w.Interface) +// } +// +// func (w *InterfaceWrapper) UnmarshalJSONFrom(dec *jsontext.Decoder) error { +// return interfaceCoders.Unmarshal(dec, &w.Interface) +// } +// +// Example usage with caller-specified serialization: +// +// var opts json.Options = json.JoinOptions( +// json.WithMarshalers(json.MarshalToFunc(interfaceCoders.Marshal)), +// json.WithUnmarshalers(json.UnmarshalFromFunc(interfaceCoders.Unmarshal)), +// ) +// +// var v Interface +// ... := json.Marshal(v, opts) +// ... := json.Unmarshal(&v, opts) +// +// The function panics if T is not a named interface kind, +// or if valuesByName contains distinct entries with the same concrete type. +func MakeInterfaceCoders[T any](valuesByName map[string]T) (c struct { + Marshal func(*jsontext.Encoder, *T) error + Unmarshal func(*jsontext.Decoder, *T) error +}) { + // Verify that T is a named interface. + switch t := reflect.TypeFor[T](); { + case t.Kind() != reflect.Interface: + panic(fmt.Sprintf("%v must be an interface kind", t)) + case t.Name() == "": + panic(fmt.Sprintf("%v must be a named type", t)) + } + + // Construct a bijective mapping of names to types. + typesByName := make(map[string]reflect.Type) + namesByType := make(map[reflect.Type]string) + for name, value := range valuesByName { + t := reflect.TypeOf(value) + if t == nil { + panic(fmt.Sprintf("nil value for %s", name)) + } + if name2, ok := namesByType[t]; ok { + panic(fmt.Sprintf("type %v cannot have multiple names %s and %v", t, name, name2)) + } + typesByName[name] = t + namesByType[t] = name + } + + // Construct the marshal and unmarshal functions. + c.Marshal = func(enc *jsontext.Encoder, val *T) error { + t := reflect.TypeOf(*val) + if t == nil { + return enc.WriteToken(jsontext.Null) + } + name := namesByType[t] + if name == "" { + return fmt.Errorf("Go type %v: %w", t, errUnknownTypeName) + } + + if err := enc.WriteToken(jsontext.BeginObject); err != nil { + return err + } + if err := enc.WriteToken(jsontext.String(name)); err != nil { + return err + } + if err := json.MarshalEncode(enc, *val); err != nil { + return err + } + if err := enc.WriteToken(jsontext.EndObject); err != nil { + return err + } + return nil + } + c.Unmarshal = func(dec *jsontext.Decoder, val *T) error { + switch tok, err := dec.ReadToken(); { + case err != nil: + return err + case tok.Kind() == 'n': + var zero T + *val = zero // store nil interface value for JSON null + return nil + case tok.Kind() != '{': + return &json.SemanticError{JSONKind: tok.Kind(), GoType: reflect.TypeFor[T]()} + } + var v reflect.Value + switch tok, err := dec.ReadToken(); { + case err != nil: + return err + case tok.Kind() != '"': + return errNonSingularValue + default: + t := typesByName[tok.String()] + if t == nil { + return errUnknownTypeName + } + v = reflect.New(t) + } + if err := json.UnmarshalDecode(dec, v.Interface()); err != nil { + return err + } + *val = v.Elem().Interface().(T) + switch tok, err := dec.ReadToken(); { + case err != nil: + return err + case tok.Kind() != '}': + return errNonSingularValue + } + return nil + } + + return c +} diff --git a/types/jsonx/json_test.go b/types/jsonx/json_test.go new file mode 100644 index 000000000..0f2a646c4 --- /dev/null +++ b/types/jsonx/json_test.go @@ -0,0 +1,140 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsonx + +import ( + "errors" + "testing" + + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/google/go-cmp/cmp" + "tailscale.com/types/ptr" +) + +type Interface interface { + implementsInterface() +} + +type Foo string + +func (Foo) implementsInterface() {} + +type Bar int + +func (Bar) implementsInterface() {} + +type Baz struct{ Fizz, Buzz string } + +func (*Baz) implementsInterface() {} + +var interfaceCoders = MakeInterfaceCoders(map[string]Interface{ + "Foo": Foo(""), + "Bar": (*Bar)(nil), + "Baz": (*Baz)(nil), +}) + +type InterfaceWrapper struct{ Interface } + +func (w InterfaceWrapper) MarshalJSONTo(enc *jsontext.Encoder) error { + return interfaceCoders.Marshal(enc, &w.Interface) +} + +func (w *InterfaceWrapper) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + return interfaceCoders.Unmarshal(dec, &w.Interface) +} + +func TestInterfaceCoders(t *testing.T) { + var opts json.Options = json.JoinOptions( + json.WithMarshalers(json.MarshalToFunc(interfaceCoders.Marshal)), + json.WithUnmarshalers(json.UnmarshalFromFunc(interfaceCoders.Unmarshal)), + ) + + errSkipMarshal := errors.New("skip marshal") + makeFiller := func() InterfaceWrapper { + return InterfaceWrapper{&Baz{"fizz", "buzz"}} + } + + for _, tt := range []struct { + label string + wantVal InterfaceWrapper + wantJSON string + wantMarshalError error + wantUnmarshalError error + }{{ + label: "Null", + wantVal: InterfaceWrapper{}, + wantJSON: `null`, + }, { + label: "Foo", + wantVal: InterfaceWrapper{Foo("hello")}, + wantJSON: `{"Foo":"hello"}`, + }, { + label: "BarPointer", + wantVal: InterfaceWrapper{ptr.To(Bar(5))}, + wantJSON: `{"Bar":5}`, + }, { + label: "BarValue", + wantVal: InterfaceWrapper{Bar(5)}, + // NOTE: We could handle BarValue just like BarPointer, + // but round-trip marshal/unmarshal would not be identical. + wantMarshalError: errUnknownTypeName, + }, { + label: "Baz", + wantVal: InterfaceWrapper{&Baz{"alpha", "omega"}}, + wantJSON: `{"Baz":{"Fizz":"alpha","Buzz":"omega"}}`, + }, { + label: "Unknown", + wantVal: makeFiller(), + wantJSON: `{"Unknown":[1,2,3]}`, + wantMarshalError: errSkipMarshal, + wantUnmarshalError: errUnknownTypeName, + }, { + label: "Empty", + wantVal: makeFiller(), + wantJSON: `{}`, + wantMarshalError: errSkipMarshal, + wantUnmarshalError: errNonSingularValue, + }, { + label: "Duplicate", + wantVal: InterfaceWrapper{Foo("hello")}, // first entry wins + wantJSON: `{"Foo":"hello","Bar":5}`, + wantMarshalError: errSkipMarshal, + wantUnmarshalError: errNonSingularValue, + }} { + t.Run(tt.label, func(t *testing.T) { + if tt.wantMarshalError != errSkipMarshal { + switch gotJSON, err := json.Marshal(&tt.wantVal); { + case !errors.Is(err, tt.wantMarshalError): + t.Fatalf("json.Marshal(%v) error = %v, want %v", tt.wantVal, err, tt.wantMarshalError) + case string(gotJSON) != tt.wantJSON: + t.Fatalf("json.Marshal(%v) = %s, want %s", tt.wantVal, gotJSON, tt.wantJSON) + } + switch gotJSON, err := json.Marshal(&tt.wantVal.Interface, opts); { + case !errors.Is(err, tt.wantMarshalError): + t.Fatalf("json.Marshal(%v) error = %v, want %v", tt.wantVal, err, tt.wantMarshalError) + case string(gotJSON) != tt.wantJSON: + t.Fatalf("json.Marshal(%v) = %s, want %s", tt.wantVal, gotJSON, tt.wantJSON) + } + } + + if tt.wantJSON != "" { + gotVal := makeFiller() + if err := json.Unmarshal([]byte(tt.wantJSON), &gotVal); !errors.Is(err, tt.wantUnmarshalError) { + t.Fatalf("json.Unmarshal(%v) error = %v, want %v", tt.wantJSON, err, tt.wantUnmarshalError) + } + if d := cmp.Diff(gotVal, tt.wantVal); d != "" { + t.Fatalf("json.Unmarshal(%v):\n%s", tt.wantJSON, d) + } + gotVal = makeFiller() + if err := json.Unmarshal([]byte(tt.wantJSON), &gotVal.Interface, opts); !errors.Is(err, tt.wantUnmarshalError) { + t.Fatalf("json.Unmarshal(%v) error = %v, want %v", tt.wantJSON, err, tt.wantUnmarshalError) + } + if d := cmp.Diff(gotVal, tt.wantVal); d != "" { + t.Fatalf("json.Unmarshal(%v):\n%s", tt.wantJSON, d) + } + } + }) + } +} From cb7bf929aa39f1156716932474cbc016e90be230 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 23 Apr 2025 13:59:03 -0700 Subject: [PATCH 0787/1708] go.mod: bump gorilla/csrf@v1.7.3 (#15775) This is the same version as before, but the old one confuses govulncheck. Updates #cleanup Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index df8922de3..0c1224cf1 100644 --- a/go.mod +++ b/go.mod @@ -272,7 +272,7 @@ require ( github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/goreleaser/chglog v0.5.0 // indirect github.com/goreleaser/fileglob v1.3.0 // indirect - github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 + github.com/gorilla/csrf v1.7.3 github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect diff --git a/go.sum b/go.sum index 860b4aa7c..8c8da8d14 100644 --- a/go.sum +++ b/go.sum @@ -524,8 +524,8 @@ github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+ github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= github.com/goreleaser/nfpm/v2 v2.33.1 h1:EkdAzZyVhAI9JC1vjmjjbmnNzyH1J6Cu4JCsA7YcQuc= github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0= +github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= From 25c4dc5fd7092b2df27b0f9a453a4663c5e85df2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 24 Apr 2025 10:49:33 -0700 Subject: [PATCH 0788/1708] ipn/ipnext: remove support for unregistering extension Updates #12614 Change-Id: I893e3ea74831deaa6f88e31bba2d95dc017e0470 Co-authored-by: Nick Khyl Signed-off-by: Brad Fitzpatrick --- ipn/auditlog/extension.go | 14 +-- ipn/desktop/extension.go | 5 +- ipn/ipnext/ipnext.go | 32 +++-- ipn/ipnlocal/extension_host.go | 177 +++++++++++----------------- ipn/ipnlocal/extension_host_test.go | 46 +------- 5 files changed, 98 insertions(+), 176 deletions(-) diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index 3b561b2e5..90014b72e 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -36,8 +36,6 @@ func init() { type extension struct { logf logger.Logf - // cleanup are functions to call on shutdown. - cleanup []func() // store is the log store shared by all loggers. // It is created when the first logger is started. store lazy.SyncValue[LogStore] @@ -66,11 +64,9 @@ func (e *extension) Name() string { // Init implements [ipnext.Extension] by registering callbacks and providers // for the duration of the extension's lifetime. func (e *extension) Init(h ipnext.Host) error { - e.cleanup = []func(){ - h.RegisterControlClientCallback(e.controlClientChanged), - h.Profiles().RegisterProfileStateChangeCallback(e.profileChanged), - h.RegisterAuditLogProvider(e.getCurrentLogger), - } + h.RegisterControlClientCallback(e.controlClientChanged) + h.Profiles().RegisterProfileStateChangeCallback(e.profileChanged) + h.RegisterAuditLogProvider(e.getCurrentLogger) return nil } @@ -190,9 +186,5 @@ func (e *extension) getCurrentLogger() ipnauth.AuditLogFunc { // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { - for _, f := range e.cleanup { - f() - } - e.cleanup = nil return nil } diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index 86ae96f5b..057b4cfe6 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -74,13 +74,12 @@ func (e *desktopSessionsExt) Name() string { // Init implements [ipnext.Extension]. func (e *desktopSessionsExt) Init(host ipnext.Host) (err error) { e.host = host - unregisterResolver := host.Profiles().RegisterBackgroundProfileResolver(e.getBackgroundProfile) unregisterSessionCb, err := e.sm.RegisterStateCallback(e.updateDesktopSessionState) if err != nil { - unregisterResolver() return fmt.Errorf("session callback registration failed: %w", err) } - e.cleanup = []func(){unregisterResolver, unregisterSessionCb} + host.Profiles().RegisterBackgroundProfileResolver(e.getBackgroundProfile) + e.cleanup = []func(){unregisterSessionCb} return nil } diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 4c7e978e5..5c35192e4 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -43,6 +43,7 @@ type Extension interface { // provided the extension was initialized. For multiple extensions, // Shutdown is called in the reverse order of Init. // Returned errors are not fatal; they are used for logging. + // After a call to Shutdown, the extension will not be called again. Shutdown() error } @@ -182,9 +183,11 @@ type Host interface { // RegisterAuditLogProvider registers an audit log provider, // which returns a function to be called when an auditable action - // is about to be performed. The returned function unregisters the provider. - // It is a runtime error to register a nil provider. - RegisterAuditLogProvider(AuditLogProvider) (unregister func()) + // is about to be performed. + // + // It is a runtime error to register a nil provider or call after the host + // has been initialized. + RegisterAuditLogProvider(AuditLogProvider) // AuditLogger returns a function that calls all currently registered audit loggers. // The function fails if any logger returns an error, indicating that the action @@ -195,9 +198,11 @@ type Host interface { AuditLogger() ipnauth.AuditLogFunc // RegisterControlClientCallback registers a function to be called every time a new - // control client is created. The returned function unregisters the callback. - // It is a runtime error to register a nil callback. - RegisterControlClientCallback(NewControlClientCallback) (unregister func()) + // control client is created. + // + // It is a runtime error to register a nil provider or call after the host + // has been initialized. + RegisterControlClientCallback(NewControlClientCallback) } // ExtensionServices provides access to the [Host]'s extension management services, @@ -252,23 +257,26 @@ type ProfileServices interface { SwitchToBestProfileAsync(reason string) // RegisterBackgroundProfileResolver registers a function to be used when - // resolving the background profile. The returned function unregisters the resolver. - // It is a runtime error to register a nil resolver. + // resolving the background profile. + // + // It is a runtime error to register a nil provider or call after the host + // has been initialized. // // TODO(nickkhyl): allow specifying some kind of priority/altitude for the resolver. // TODO(nickkhyl): make it a "profile resolver" instead of a "background profile resolver". // The concepts of the "current user", "foreground profile" and "background profile" // only exist on Windows, and we're moving away from them anyway. - RegisterBackgroundProfileResolver(ProfileResolver) (unregister func()) + RegisterBackgroundProfileResolver(ProfileResolver) // RegisterProfileStateChangeCallback registers a function to be called when the current - // [ipn.LoginProfile] or its [ipn.Prefs] change. The returned function unregisters the callback. + // [ipn.LoginProfile] or its [ipn.Prefs] change. // // To get the initial profile or prefs, use [ProfileServices.CurrentProfileState] // or [ProfileServices.CurrentPrefs] from the extension's [Extension.Init]. // - // It is a runtime error to register a nil callback. - RegisterProfileStateChangeCallback(ProfileStateChangeCallback) (unregister func()) + // It is a runtime error to register a nil provider or call after the host + // has been initialized. + RegisterProfileStateChangeCallback(ProfileStateChangeCallback) } // ProfileStore provides read-only access to available login profiles and their preferences. diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index aa56ad8ef..a7a764ebc 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "iter" "maps" "reflect" "slices" @@ -24,8 +23,6 @@ import ( "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/util/execqueue" - "tailscale.com/util/set" - "tailscale.com/util/slicesx" "tailscale.com/util/testenv" ) @@ -78,6 +75,7 @@ type ExtensionHost struct { // initOnce is used to ensure that the extensions are initialized only once, // even if [extensionHost.Init] is called multiple times. initOnce sync.Once + initDone atomic.Bool // shutdownOnce is like initOnce, but for [ExtensionHost.Shutdown]. shutdownOnce sync.Once @@ -87,6 +85,24 @@ type ExtensionHost struct { // doEnqueueBackendOperation adds an asynchronous [LocalBackend] operation to the workQueue. doEnqueueBackendOperation func(func(Backend)) + // profileStateChangeCbs are callbacks that are invoked when the current login profile + // or its [ipn.Prefs] change, after those changes have been made. The current login profile + // may be changed either because of a profile switch, or because the profile information + // was updated by [LocalBackend.SetControlClientStatus], including when the profile + // is first populated and persisted. + profileStateChangeCbs []ipnext.ProfileStateChangeCallback + // backgroundProfileResolvers are registered background profile resolvers. + // They're used to determine the profile to use when no GUI/CLI client is connected. + backgroundProfileResolvers []ipnext.ProfileResolver + // auditLoggers are registered [AuditLogProvider]s. + // Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action + // is about to be performed. If an audit logger returns an error, the action is denied. + auditLoggers []ipnext.AuditLogProvider + // newControlClientCbs are the functions to be called when a new control client is created. + newControlClientCbs []ipnext.NewControlClientCallback + + shuttingDown atomic.Bool + // mu protects the following fields. // It must not be held when calling [LocalBackend] methods // or when invoking callbacks registered by extensions. @@ -107,22 +123,6 @@ type ExtensionHost struct { // currentPrefs is a read-only view of the current profile's [ipn.Prefs] // with any private keys stripped. It is always Valid. currentPrefs ipn.PrefsView - - // auditLoggers are registered [AuditLogProvider]s. - // Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action - // is about to be performed. If an audit logger returns an error, the action is denied. - auditLoggers set.HandleSet[ipnext.AuditLogProvider] - // backgroundProfileResolvers are registered background profile resolvers. - // They're used to determine the profile to use when no GUI/CLI client is connected. - backgroundProfileResolvers set.HandleSet[ipnext.ProfileResolver] - // newControlClientCbs are the functions to be called when a new control client is created. - newControlClientCbs set.HandleSet[ipnext.NewControlClientCallback] - // profileStateChangeCbs are callbacks that are invoked when the current login profile - // or its [ipn.Prefs] change, after those changes have been made. The current login profile - // may be changed either because of a profile switch, or because the profile information - // was updated by [LocalBackend.SetControlClientStatus], including when the profile - // is first populated and persisted. - profileStateChangeCbs set.HandleSet[ipnext.ProfileStateChangeCallback] } // Backend is a subset of [LocalBackend] methods that are used by [ExtensionHost]. @@ -160,13 +160,10 @@ func NewExtensionHost(logf logger.Logf, sys *tsd.System, b Backend, overrideExts host.workQueue.Add(func() { f(b) }) } - var numExts int - var exts iter.Seq2[int, *ipnext.Definition] - if overrideExts == nil { - // Use registered extensions. - exts = ipnext.Extensions().All() - numExts = ipnext.Extensions().Len() - } else { + // Use registered extensions. + exts := ipnext.Extensions().All() + numExts := ipnext.Extensions().Len() + if overrideExts != nil { // Use the provided, potentially empty, overrideExts // instead of the registered ones. exts = slices.All(overrideExts) @@ -196,6 +193,8 @@ func (h *ExtensionHost) Init() { } func (h *ExtensionHost) init() { + defer h.initDone.Store(true) + // Initialize the extensions in the order they were registered. h.mu.Lock() h.activeExtensions = make([]ipnext.Extension, 0, len(h.allExtensions)) @@ -343,21 +342,21 @@ func (h *ExtensionHost) Backend() Backend { return h.b } -// RegisterProfileStateChangeCallback implements [ipnext.ProfileServices]. -func (h *ExtensionHost) RegisterProfileStateChangeCallback(cb ipnext.ProfileStateChangeCallback) (unregister func()) { - if h == nil { - return func() {} +// addFuncHook appends non-nil fn to hooks. +func addFuncHook[F any](h *ExtensionHost, hooks *[]F, fn F) { + if h.initDone.Load() { + panic("invalid callback register after init") } - if cb == nil { - panic("nil profile change callback") + if reflect.ValueOf(fn).IsZero() { + panic("nil function hook") } - h.mu.Lock() - defer h.mu.Unlock() - handle := h.profileStateChangeCbs.Add(cb) - return func() { - h.mu.Lock() - defer h.mu.Unlock() - delete(h.profileStateChangeCbs, handle) + *hooks = append(*hooks, fn) +} + +// RegisterProfileStateChangeCallback implements [ipnext.ProfileServices]. +func (h *ExtensionHost) RegisterProfileStateChangeCallback(cb ipnext.ProfileStateChangeCallback) { + if h != nil { + addFuncHook(h, &h.profileStateChangeCbs, cb) } } @@ -366,7 +365,7 @@ func (h *ExtensionHost) RegisterProfileStateChangeCallback(cb ipnext.ProfileStat // It strips private keys from the [ipn.Prefs] before preserving // or passing them to the callbacks. func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { - if h == nil { + if !h.active() { return } h.mu.Lock() @@ -378,10 +377,9 @@ func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs // so we can provide them to the extensions later if they ask. h.currentPrefs = prefs h.currentProfile = profile - // Get the callbacks to be invoked. - cbs := slicesx.MapValues(h.profileStateChangeCbs) h.mu.Unlock() - for _, cb := range cbs { + + for _, cb := range h.profileStateChangeCbs { cb(profile, prefs, sameNode) } } @@ -390,7 +388,7 @@ func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs // and updates the current profile and prefs in the host. // It strips private keys from the [ipn.Prefs] before preserving or using them. func (h *ExtensionHost) NotifyProfilePrefsChanged(profile ipn.LoginProfileView, oldPrefs, newPrefs ipn.PrefsView) { - if h == nil { + if !h.active() { return } h.mu.Lock() @@ -403,28 +401,24 @@ func (h *ExtensionHost) NotifyProfilePrefsChanged(profile ipn.LoginProfileView, h.currentPrefs = newPrefs h.currentProfile = profile // Get the callbacks to be invoked. - stateCbs := slicesx.MapValues(h.profileStateChangeCbs) h.mu.Unlock() - for _, cb := range stateCbs { + + for _, cb := range h.profileStateChangeCbs { cb(profile, newPrefs, true) } } // RegisterBackgroundProfileResolver implements [ipnext.ProfileServices]. -func (h *ExtensionHost) RegisterBackgroundProfileResolver(resolver ipnext.ProfileResolver) (unregister func()) { - if h == nil { - return func() {} - } - h.mu.Lock() - defer h.mu.Unlock() - handle := h.backgroundProfileResolvers.Add(resolver) - return func() { - h.mu.Lock() - defer h.mu.Unlock() - delete(h.backgroundProfileResolvers, handle) +func (h *ExtensionHost) RegisterBackgroundProfileResolver(resolver ipnext.ProfileResolver) { + if h != nil { + addFuncHook(h, &h.backgroundProfileResolvers, resolver) } } +func (h *ExtensionHost) active() bool { + return h != nil && !h.shuttingDown.Load() +} + // DetermineBackgroundProfile returns a read-only view of the profile // used when no GUI/CLI client is connected, using background profile // resolvers registered by extensions. @@ -434,7 +428,7 @@ func (h *ExtensionHost) RegisterBackgroundProfileResolver(resolver ipnext.Profil // // As of 2025-02-07, this is only used on Windows. func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) ipn.LoginProfileView { - if h == nil { + if !h.active() { return ipn.LoginProfileView{} } // TODO(nickkhyl): check if the returned profile is allowed on the device, @@ -443,10 +437,7 @@ func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) // Attempt to resolve the background profile using the registered // background profile resolvers (e.g., [ipn/desktop.desktopSessionsExt] on Windows). - h.mu.Lock() - resolvers := slicesx.MapValues(h.backgroundProfileResolvers) - h.mu.Unlock() - for _, resolver := range resolvers { + for _, resolver := range h.backgroundProfileResolvers { if profile := resolver(profiles); profile.Valid() { return profile } @@ -458,35 +449,21 @@ func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) } // RegisterControlClientCallback implements [ipnext.Host]. -func (h *ExtensionHost) RegisterControlClientCallback(cb ipnext.NewControlClientCallback) (unregister func()) { - if h == nil { - return func() {} - } - if cb == nil { - panic("nil control client callback") - } - h.mu.Lock() - defer h.mu.Unlock() - handle := h.newControlClientCbs.Add(cb) - return func() { - h.mu.Lock() - defer h.mu.Unlock() - delete(h.newControlClientCbs, handle) +func (h *ExtensionHost) RegisterControlClientCallback(cb ipnext.NewControlClientCallback) { + if h != nil { + addFuncHook(h, &h.newControlClientCbs, cb) } } // NotifyNewControlClient invokes all registered control client callbacks. // It returns callbacks to be executed when the control client shuts down. func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile ipn.LoginProfileView) (ccShutdownCbs []func()) { - if h == nil { + if !h.active() { return nil } - h.mu.Lock() - cbs := slicesx.MapValues(h.newControlClientCbs) - h.mu.Unlock() - if len(cbs) > 0 { - ccShutdownCbs = make([]func(), 0, len(cbs)) - for _, cb := range cbs { + if len(h.newControlClientCbs) > 0 { + ccShutdownCbs = make([]func(), 0, len(h.newControlClientCbs)) + for _, cb := range h.newControlClientCbs { if shutdown := cb(cc, profile); shutdown != nil { ccShutdownCbs = append(ccShutdownCbs, shutdown) } @@ -496,20 +473,9 @@ func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile } // RegisterAuditLogProvider implements [ipnext.Host]. -func (h *ExtensionHost) RegisterAuditLogProvider(provider ipnext.AuditLogProvider) (unregister func()) { - if h == nil { - return func() {} - } - if provider == nil { - panic("nil audit log provider") - } - h.mu.Lock() - defer h.mu.Unlock() - handle := h.auditLoggers.Add(provider) - return func() { - h.mu.Lock() - defer h.mu.Unlock() - delete(h.auditLoggers, handle) +func (h *ExtensionHost) RegisterAuditLogProvider(provider ipnext.AuditLogProvider) { + if h != nil { + addFuncHook(h, &h.auditLoggers, provider) } } @@ -523,20 +489,12 @@ func (h *ExtensionHost) RegisterAuditLogProvider(provider ipnext.AuditLogProvide // which typically includes the current profile and the audit loggers registered by extensions. // It must not be persisted outside of the auditable action context. func (h *ExtensionHost) AuditLogger() ipnauth.AuditLogFunc { - if h == nil { + if !h.active() { return func(tailcfg.ClientAuditAction, string) error { return nil } } - - h.mu.Lock() - providers := slicesx.MapValues(h.auditLoggers) - h.mu.Unlock() - - var loggers []ipnauth.AuditLogFunc - if len(providers) > 0 { - loggers = make([]ipnauth.AuditLogFunc, len(providers)) - for i, provider := range providers { - loggers[i] = provider() - } + loggers := make([]ipnauth.AuditLogFunc, 0, len(h.auditLoggers)) + for _, provider := range h.auditLoggers { + loggers = append(loggers, provider()) } return func(action tailcfg.ClientAuditAction, details string) error { // Log auditable actions to the host's log regardless of whether @@ -567,6 +525,7 @@ func (h *ExtensionHost) Shutdown() { } func (h *ExtensionHost) shutdown() { + h.shuttingDown.Store(true) // Prevent any queued but not yet started operations from running, // block new operations from being enqueued, and wait for the // currently executing operation (if any) to finish. diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 4c497dd99..01122073a 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -576,30 +576,6 @@ func TestExtensionHostProfileStateChangeCallback(t *testing.T) { {Profile: &ipn.LoginProfile{ID: "profile-3"}, SameNode: true}, }, }, - { - // Override the default InitHook used in the test to unregister the callback - // after the first call. - name: "Register/Once", - ext: &testExtension{ - InitHook: func(e *testExtension) error { - var unregister func() - handler := func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { - makeStateChangeAppender(e)(profile, prefs, sameNode) - unregister() - } - unregister = e.host.Profiles().RegisterProfileStateChangeCallback(handler) - return nil - }, - }, - stateCalls: []stateChange{ - {Profile: &ipn.LoginProfile{ID: "profile-1"}}, - {Profile: &ipn.LoginProfile{ID: "profile-2"}}, - {Profile: &ipn.LoginProfile{ID: "profile-3"}}, - }, - wantChanges: []stateChange{ // only the first call is received by the callback - {Profile: &ipn.LoginProfile{ID: "profile-1"}}, - }, - }, { // Ensure that ipn.Prefs are passed to the callback. name: "CheckPrefs", @@ -770,7 +746,7 @@ func TestExtensionHostProfileStateChangeCallback(t *testing.T) { tt.ext.InitHook = func(e *testExtension) error { // Create and register the callback on init. handler := makeStateChangeAppender(e) - e.Cleanup(e.host.Profiles().RegisterProfileStateChangeCallback(handler)) + e.host.Profiles().RegisterProfileStateChangeCallback(handler) return nil } } @@ -891,14 +867,15 @@ func TestBackgroundProfileResolver(t *testing.T) { } } - h := newExtensionHostForTest[ipnext.Extension](t, &testBackend{}, true) + h := newExtensionHostForTest[ipnext.Extension](t, &testBackend{}, false) // Register the resolvers with the host. // This is typically done by the extensions themselves, // but we do it here for testing purposes. for _, r := range tt.resolvers { - t.Cleanup(h.Profiles().RegisterBackgroundProfileResolver(r)) + h.Profiles().RegisterBackgroundProfileResolver(r) } + h.Init() // Call the resolver to get the profile. gotProfile := h.DetermineBackgroundProfile(pm) @@ -989,7 +966,7 @@ func TestAuditLogProviders(t *testing.T) { } } ext.InitHook = func(e *testExtension) error { - e.Cleanup(e.host.RegisterAuditLogProvider(provider)) + e.host.RegisterAuditLogProvider(provider) return nil } exts = append(exts, ext) @@ -1168,8 +1145,6 @@ type testExtension struct { // It can be accessed by tests using [setTestExtensionState], // [getTestExtensionStateOk] and [getTestExtensionState]. state map[string]any - // cleanup are functions to be called on shutdown. - cleanup []func() } var _ ipnext.Extension = (*testExtension)(nil) @@ -1212,22 +1187,11 @@ func (e *testExtension) InitCalled() bool { return e.initCnt.Load() != 0 } -func (e *testExtension) Cleanup(f func()) { - e.mu.Lock() - e.cleanup = append(e.cleanup, f) - e.mu.Unlock() -} - // Shutdown implements [ipnext.Extension]. func (e *testExtension) Shutdown() (err error) { e.t.Helper() e.mu.Lock() - cleanup := e.cleanup - e.cleanup = nil e.mu.Unlock() - for _, f := range cleanup { - f() - } if e.ShutdownHook != nil { err = e.ShutdownHook(e) } From 3d8533b5d07faca90b9d3edcdb1555449bcd89db Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 24 Apr 2025 13:55:39 -0700 Subject: [PATCH 0789/1708] ipn/{ipnext,ipnlocal}: add a SafeBackend interface Updates #12614 Change-Id: I197e673666e86ea74c19e3935ed71aec269b6c94 Co-authored-by: Nick Khyl Signed-off-by: Brad Fitzpatrick --- feature/relayserver/relayserver.go | 3 +-- feature/taildrop/ext.go | 16 ++++++------- ipn/auditlog/extension.go | 3 +-- ipn/desktop/extension.go | 3 +-- ipn/ipnext/ipnext.go | 24 +++++++++++++++---- ipn/ipnlocal/extension_host.go | 36 +++++++++++++++++++++-------- ipn/ipnlocal/extension_host_test.go | 14 +++++++++-- ipn/ipnlocal/local.go | 9 +++++++- 8 files changed, 76 insertions(+), 32 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 8e734bec9..f73689245 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -20,7 +20,6 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/net/udprelay" "tailscale.com/tailcfg" - "tailscale.com/tsd" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" @@ -40,7 +39,7 @@ func init() { // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server // extension. It is registered with [ipnext.RegisterExtension] if the package is // imported. -func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { +func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, error) { return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil } diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index 5d22cfb9b..b7cfdec72 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -7,7 +7,6 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnlocal" "tailscale.com/taildrop" - "tailscale.com/tsd" "tailscale.com/types/logger" ) @@ -15,7 +14,7 @@ func init() { ipnext.RegisterExtension("taildrop", newExtension) } -func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { +func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { return &extension{ logf: logger.WithPrefix(logf, "taildrop: "), }, nil @@ -23,7 +22,7 @@ func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { type extension struct { logf logger.Logf - lb *ipnlocal.LocalBackend + sb ipnext.SafeBackend mgr *taildrop.Manager } @@ -32,11 +31,6 @@ func (e *extension) Name() string { } func (e *extension) Init(h ipnext.Host) error { - type I interface { - Backend() ipnlocal.Backend - } - e.lb = h.(I).Backend().(*ipnlocal.LocalBackend) - // TODO(bradfitz): move init of taildrop.Manager from ipnlocal/peerapi.go to // here e.mgr = nil @@ -45,7 +39,11 @@ func (e *extension) Init(h ipnext.Host) error { } func (e *extension) Shutdown() error { - if mgr, err := e.lb.TaildropManager(); err == nil { + lb, ok := e.sb.(*ipnlocal.LocalBackend) + if !ok { + return nil + } + if mgr, err := lb.TaildropManager(); err == nil { mgr.Shutdown() } else { e.logf("taildrop: failed to shutdown taildrop manager: %v", err) diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index 90014b72e..509ab61a8 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -16,7 +16,6 @@ import ( "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" - "tailscale.com/tsd" "tailscale.com/types/lazy" "tailscale.com/types/logger" ) @@ -52,7 +51,7 @@ type extension struct { // newExtension is an [ipnext.NewExtensionFn] that creates a new audit log extension. // It is registered with [ipnext.RegisterExtension] if the package is imported. -func newExtension(logf logger.Logf, _ *tsd.System) (ipnext.Extension, error) { +func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, error) { return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil } diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index 057b4cfe6..6c59b1e5a 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -17,7 +17,6 @@ import ( "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" - "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/util/syspolicy" ) @@ -53,7 +52,7 @@ type desktopSessionsExt struct { // newDesktopSessionsExt returns a new [desktopSessionsExt], // or an error if a [SessionManager] cannot be created. // It is registered with [ipnext.RegisterExtension] if the package is imported. -func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (ipnext.Extension, error) { +func newDesktopSessionsExt(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, error) { logf = logger.WithPrefix(logf, featureName+": ") sm, err := NewSessionManager(logf) if err != nil { diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 5c35192e4..b926ee23a 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -13,6 +13,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/tsd" + "tailscale.com/tstime" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/mak" @@ -52,7 +53,7 @@ type Extension interface { // If the extension should be skipped at runtime, it must return either [SkipExtension] // or a wrapped [SkipExtension]. Any other error returned is fatal and will prevent // the LocalBackend from starting. -type NewExtensionFn func(logger.Logf, *tsd.System) (Extension, error) +type NewExtensionFn func(logger.Logf, SafeBackend) (Extension, error) // SkipExtension is an error returned by [NewExtensionFn] to indicate that the extension // should be skipped rather than prevent the LocalBackend from starting. @@ -78,8 +79,8 @@ func (d *Definition) Name() string { } // MakeExtension instantiates the extension. -func (d *Definition) MakeExtension(logf logger.Logf, sys *tsd.System) (Extension, error) { - ext, err := d.newFn(logf, sys) +func (d *Definition) MakeExtension(logf logger.Logf, sb SafeBackend) (Extension, error) { + ext, err := d.newFn(logf, sb) if err != nil { return nil, err } @@ -130,7 +131,7 @@ func Extensions() views.Slice[*Definition] { func DefinitionForTest(ext Extension) *Definition { return &Definition{ name: ext.Name(), - newFn: func(logger.Logf, *tsd.System) (Extension, error) { return ext, nil }, + newFn: func(logger.Logf, SafeBackend) (Extension, error) { return ext, nil }, } } @@ -140,7 +141,7 @@ func DefinitionForTest(ext Extension) *Definition { func DefinitionWithErrForTest(name string, err error) *Definition { return &Definition{ name: name, - newFn: func(logger.Logf, *tsd.System) (Extension, error) { return nil, err }, + newFn: func(logger.Logf, SafeBackend) (Extension, error) { return nil, err }, } } @@ -203,6 +204,19 @@ type Host interface { // It is a runtime error to register a nil provider or call after the host // has been initialized. RegisterControlClientCallback(NewControlClientCallback) + + // SendNotifyAsync sends a notification to the IPN bus, + // typically to the GUI client. + SendNotifyAsync(ipn.Notify) +} + +// SafeBackend is a subset of the [ipnlocal.LocalBackend] type's methods that +// are safe to call from extension hooks at any time (even hooks called while +// LocalBackend's internal mutex is held). +type SafeBackend interface { + Sys() *tsd.System + Clock() tstime.Clock + TailscaleVarRoot() string } // ExtensionServices provides access to the [Host]'s extension management services, diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index a7a764ebc..85da27ab0 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -20,7 +20,6 @@ import ( "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" - "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/util/execqueue" "tailscale.com/util/testenv" @@ -131,15 +130,32 @@ type Backend interface { // SwitchToBestProfile switches to the best profile for the current state of the system. // The reason indicates why the profile is being switched. SwitchToBestProfile(reason string) + + SendNotify(ipn.Notify) + ipnext.SafeBackend } // NewExtensionHost returns a new [ExtensionHost] which manages registered extensions for the given backend. // The extensions are instantiated, but are not initialized until [ExtensionHost.Init] is called. // It returns an error if instantiating any extension fails. +func NewExtensionHost(logf logger.Logf, b Backend) (*ExtensionHost, error) { + return newExtensionHost(logf, b) +} + +func NewExtensionHostForTest(logf logger.Logf, b Backend, overrideExts ...*ipnext.Definition) (*ExtensionHost, error) { + if !testenv.InTest() { + panic("use outside of test") + } + return newExtensionHost(logf, b, overrideExts...) +} + +// newExtensionHost is the shared implementation of [NewExtensionHost] and +// [NewExtensionHostForTest]. // -// If overrideExts is non-nil, the registered extensions are ignored and the provided extensions are used instead. -// Overriding extensions is primarily used for testing. -func NewExtensionHost(logf logger.Logf, sys *tsd.System, b Backend, overrideExts ...*ipnext.Definition) (_ *ExtensionHost, err error) { +// If overrideExts is non-nil, the registered extensions are ignored and the +// provided extensions are used instead. Overriding extensions is primarily used +// for testing. +func newExtensionHost(logf logger.Logf, b Backend, overrideExts ...*ipnext.Definition) (_ *ExtensionHost, err error) { host := &ExtensionHost{ b: b, logf: logger.WithPrefix(logf, "ipnext: "), @@ -172,7 +188,7 @@ func NewExtensionHost(logf logger.Logf, sys *tsd.System, b Backend, overrideExts host.allExtensions = make([]ipnext.Extension, 0, numExts) for _, d := range exts { - ext, err := d.MakeExtension(logf, sys) + ext, err := d.MakeExtension(logf, b) if errors.Is(err, ipnext.SkipExtension) { // The extension wants to be skipped. host.logf("%q: %v", d.Name(), err) @@ -334,12 +350,14 @@ func (h *ExtensionHost) SwitchToBestProfileAsync(reason string) { }) } -// Backend returns the [Backend] used by the extension host. -func (h *ExtensionHost) Backend() Backend { +// SendNotifyAsync implements [ipnext.Host]. +func (h *ExtensionHost) SendNotifyAsync(n ipn.Notify) { if h == nil { - return nil + return } - return h.b + h.enqueueBackendOperation(func(b Backend) { + b.SendNotify(n) + }) } // addFuncHook appends non-nil fn to hooks. diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 01122073a..31b38196a 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -27,7 +27,9 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/tstime" "tailscale.com/types/key" + "tailscale.com/types/lazy" "tailscale.com/types/persist" "tailscale.com/util/must" ) @@ -284,7 +286,7 @@ func TestNewExtensionHost(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() logf := tstest.WhileTestRunningLogger(t) - h, err := NewExtensionHost(logf, tsd.NewSystem(), &testBackend{}, tt.defs...) + h, err := NewExtensionHostForTest(logf, &testBackend{}, tt.defs...) if gotErr := err != nil; gotErr != tt.wantErr { t.Errorf("NewExtensionHost: gotErr %v(%v); wantErr %v", gotErr, err, tt.wantErr) } @@ -1095,7 +1097,7 @@ func newExtensionHostForTest[T ipnext.Extension](t *testing.T, b Backend, initia } defs[i] = ipnext.DefinitionForTest(ext) } - h, err := NewExtensionHost(logf, tsd.NewSystem(), b, defs...) + h, err := NewExtensionHostForTest(logf, b, defs...) if err != nil { t.Fatalf("NewExtensionHost: %v", err) } @@ -1320,6 +1322,7 @@ func (q *testExecQueue) Wait(context.Context) error { return nil } // testBackend implements [ipnext.Backend] for testing purposes // by calling the provided hooks when its methods are called. type testBackend struct { + lazySys lazy.SyncValue[*tsd.System] switchToBestProfileHook func(reason string) // mu protects the backend state. @@ -1328,6 +1331,13 @@ type testBackend struct { mu sync.Mutex } +func (b *testBackend) Clock() tstime.Clock { return tstime.StdClock{} } +func (b *testBackend) Sys() *tsd.System { + return b.lazySys.Get(tsd.NewSystem) +} +func (b *testBackend) SendNotify(ipn.Notify) { panic("not implemented") } +func (b *testBackend) TailscaleVarRoot() string { panic("not implemented") } + func (b *testBackend) SwitchToBestProfile(reason string) { b.mu.Lock() defer b.mu.Unlock() diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ef5ec267f..d60f05b11 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -525,7 +525,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } - if b.extHost, err = NewExtensionHost(logf, sys, b); err != nil { + if b.extHost, err = NewExtensionHost(logf, b); err != nil { return nil, fmt.Errorf("failed to create extension host: %w", err) } b.pm.SetExtensionHost(b.extHost) @@ -589,6 +589,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } func (b *LocalBackend) Clock() tstime.Clock { return b.clock } +func (b *LocalBackend) Sys() *tsd.System { return b.sys } // FindExtensionByName returns an active extension with the given name, // or nil if no such extension exists. @@ -3187,6 +3188,12 @@ func (b *LocalBackend) send(n ipn.Notify) { b.sendTo(n, allClients) } +// SendNotify sends a notification to the IPN bus, +// typically to the GUI client. +func (b *LocalBackend) SendNotify(n ipn.Notify) { + b.send(n) +} + // notificationTarget describes a notification recipient. // A zero value is valid and indicate that the notification // should be broadcast to all active [watchSession]s. From 3bc10ea58598164625dff5927bbe33fc810712e9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 25 Apr 2025 07:37:15 -0700 Subject: [PATCH 0790/1708] ipn/ipnext: remove some interface indirection to add hooks Now that 25c4dc5fd70 removed unregistering hooks and made them into slices, just expose the slices and remove the setter funcs. This removes boilerplate ceremony around adding new hooks. This does export the hooks and make them mutable at runtime in theory, but that'd be a data race. If we really wanted to lock it down in the future we could make the feature.Hooks slice type be an opaque struct with an All() iterator and a "frozen" bool and we could freeze all the hooks after init. But that doesn't seem worth it. This means that hook registration is also now all in one place, rather than being mixed into ProfilesService vs ipnext.Host vs FooService vs BarService. I view that as a feature. When we have a ton of hooks and the list is long, then we can rearrange the fields in the Hooks struct as needed, or make sub-structs, or big comments. Updates #12614 Change-Id: I05ce5baa45a61e79c04591c2043c05f3288d8587 Signed-off-by: Brad Fitzpatrick --- feature/feature.go | 18 ++++++ feature/relayserver/relayserver.go | 2 +- ipn/auditlog/extension.go | 6 +- ipn/desktop/extension.go | 2 +- ipn/ipnext/ipnext.go | 73 ++++++++++++------------ ipn/ipnlocal/extension_host.go | 88 +++++++---------------------- ipn/ipnlocal/extension_host_test.go | 6 +- 7 files changed, 83 insertions(+), 112 deletions(-) diff --git a/feature/feature.go b/feature/feature.go index 6415cfc4a..6c8cd7eae 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -52,3 +52,21 @@ func (h *Hook[Func]) Get() Func { } return h.f } + +// Hooks is a slice of funcs. +// +// As opposed to a single Hook, this is meant to be used when +// multiple parties are able to install the same hook. +type Hooks[Func any] []Func + +// Add adds a hook to the list of hooks. +// +// Add should only be called during early program +// startup before Tailscale has started. +// It is not safe for concurrent use. +func (h *Hooks[Func]) Add(f Func) { + if reflect.ValueOf(f).IsZero() { + panic("Add with zero value") + } + *h = append(*h, f) +} diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index f73689245..e5c2afc17 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -71,7 +71,7 @@ func (e *extension) Name() string { func (e *extension) Init(host ipnext.Host) error { profile, prefs := host.Profiles().CurrentProfileState() e.profileStateChanged(profile, prefs, false) - host.Profiles().RegisterProfileStateChangeCallback(e.profileStateChanged) + host.Hooks().ProfileStateChange.Add(e.profileStateChanged) // TODO(jwhited): callback for netmap/nodeattr changes (e.hasNodeAttrRelayServer) return nil } diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index 509ab61a8..f73681db0 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -63,9 +63,9 @@ func (e *extension) Name() string { // Init implements [ipnext.Extension] by registering callbacks and providers // for the duration of the extension's lifetime. func (e *extension) Init(h ipnext.Host) error { - h.RegisterControlClientCallback(e.controlClientChanged) - h.Profiles().RegisterProfileStateChangeCallback(e.profileChanged) - h.RegisterAuditLogProvider(e.getCurrentLogger) + h.Hooks().NewControlClient.Add(e.controlClientChanged) + h.Hooks().ProfileStateChange.Add(e.profileChanged) + h.Hooks().AuditLoggers.Add(e.getCurrentLogger) return nil } diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index 6c59b1e5a..f204a90de 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -77,7 +77,7 @@ func (e *desktopSessionsExt) Init(host ipnext.Host) (err error) { if err != nil { return fmt.Errorf("session callback registration failed: %w", err) } - host.Profiles().RegisterBackgroundProfileResolver(e.getBackgroundProfile) + host.Hooks().BackgroundProfileResolvers.Add(e.getBackgroundProfile) e.cleanup = []func(){unregisterSessionCb} return nil } diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index b926ee23a..a671874d1 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -10,6 +10,7 @@ import ( "fmt" "tailscale.com/control/controlclient" + "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/tsd" @@ -182,14 +183,6 @@ type Host interface { // Profiles returns the host's [ProfileServices]. Profiles() ProfileServices - // RegisterAuditLogProvider registers an audit log provider, - // which returns a function to be called when an auditable action - // is about to be performed. - // - // It is a runtime error to register a nil provider or call after the host - // has been initialized. - RegisterAuditLogProvider(AuditLogProvider) - // AuditLogger returns a function that calls all currently registered audit loggers. // The function fails if any logger returns an error, indicating that the action // cannot be logged and must not be performed. @@ -198,12 +191,9 @@ type Host interface { // the time of the call and must not be persisted. AuditLogger() ipnauth.AuditLogFunc - // RegisterControlClientCallback registers a function to be called every time a new - // control client is created. - // - // It is a runtime error to register a nil provider or call after the host - // has been initialized. - RegisterControlClientCallback(NewControlClientCallback) + // Hooks returns a non-nil pointer to a [Hooks] struct. + // Hooks must not be modified concurrently or after Tailscale has started. + Hooks() *Hooks // SendNotifyAsync sends a notification to the IPN bus, // typically to the GUI client. @@ -269,28 +259,6 @@ type ProfileServices interface { // to a client connecting or disconnecting or a change in the desktop // session state. It is used for logging. SwitchToBestProfileAsync(reason string) - - // RegisterBackgroundProfileResolver registers a function to be used when - // resolving the background profile. - // - // It is a runtime error to register a nil provider or call after the host - // has been initialized. - // - // TODO(nickkhyl): allow specifying some kind of priority/altitude for the resolver. - // TODO(nickkhyl): make it a "profile resolver" instead of a "background profile resolver". - // The concepts of the "current user", "foreground profile" and "background profile" - // only exist on Windows, and we're moving away from them anyway. - RegisterBackgroundProfileResolver(ProfileResolver) - - // RegisterProfileStateChangeCallback registers a function to be called when the current - // [ipn.LoginProfile] or its [ipn.Prefs] change. - // - // To get the initial profile or prefs, use [ProfileServices.CurrentProfileState] - // or [ProfileServices.CurrentPrefs] from the extension's [Extension.Init]. - // - // It is a runtime error to register a nil provider or call after the host - // has been initialized. - RegisterProfileStateChangeCallback(ProfileStateChangeCallback) } // ProfileStore provides read-only access to available login profiles and their preferences. @@ -354,3 +322,36 @@ type ProfileStateChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sa // It returns a function to be called when the cc is being shut down, // or nil if no cleanup is needed. type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) (cleanup func()) + +// Hooks is a collection of hooks that extensions can add to (non-concurrently) +// during program initialization and can be called by LocalBackend and others at +// runtime. +// +// Each hook has its own rules about when it's called and what environment it +// has access to and what it's allowed to do. +type Hooks struct { + // ProfileStateChange are callbacks that are invoked when the current login profile + // or its [ipn.Prefs] change, after those changes have been made. The current login profile + // may be changed either because of a profile switch, or because the profile information + // was updated by [LocalBackend.SetControlClientStatus], including when the profile + // is first populated and persisted. + ProfileStateChange feature.Hooks[ProfileStateChangeCallback] + + // BackgroundProfileResolvers are registered background profile resolvers. + // They're used to determine the profile to use when no GUI/CLI client is connected. + // + // TODO(nickkhyl): allow specifying some kind of priority/altitude for the resolver. + // TODO(nickkhyl): make it a "profile resolver" instead of a "background profile resolver". + // The concepts of the "current user", "foreground profile" and "background profile" + // only exist on Windows, and we're moving away from them anyway. + BackgroundProfileResolvers feature.Hooks[ProfileResolver] + + // AuditLoggers are registered [AuditLogProvider]s. + // Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action + // is about to be performed. If an audit logger returns an error, the action is denied. + AuditLoggers feature.Hooks[AuditLogProvider] + + // NewControlClient are the functions to be called when a new control client + // is created. It is called with the LocalBackend locked. + NewControlClient feature.Hooks[NewControlClientCallback] +} diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 85da27ab0..6aa42ba12 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -64,8 +64,9 @@ import ( // and to further reduce the risk of accessing unexported methods or fields of [LocalBackend], the host interacts // with it via the [Backend] interface. type ExtensionHost struct { - b Backend - logf logger.Logf // prefixed with "ipnext:" + b Backend + hooks ipnext.Hooks + logf logger.Logf // prefixed with "ipnext:" // allExtensions holds the extensions in the order they were registered, // including those that have not yet attempted initialization or have failed to initialize. @@ -84,22 +85,6 @@ type ExtensionHost struct { // doEnqueueBackendOperation adds an asynchronous [LocalBackend] operation to the workQueue. doEnqueueBackendOperation func(func(Backend)) - // profileStateChangeCbs are callbacks that are invoked when the current login profile - // or its [ipn.Prefs] change, after those changes have been made. The current login profile - // may be changed either because of a profile switch, or because the profile information - // was updated by [LocalBackend.SetControlClientStatus], including when the profile - // is first populated and persisted. - profileStateChangeCbs []ipnext.ProfileStateChangeCallback - // backgroundProfileResolvers are registered background profile resolvers. - // They're used to determine the profile to use when no GUI/CLI client is connected. - backgroundProfileResolvers []ipnext.ProfileResolver - // auditLoggers are registered [AuditLogProvider]s. - // Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action - // is about to be performed. If an audit logger returns an error, the action is denied. - auditLoggers []ipnext.AuditLogProvider - // newControlClientCbs are the functions to be called when a new control client is created. - newControlClientCbs []ipnext.NewControlClientCallback - shuttingDown atomic.Bool // mu protects the following fields. @@ -208,6 +193,15 @@ func (h *ExtensionHost) Init() { } } +var zeroHooks ipnext.Hooks + +func (h *ExtensionHost) Hooks() *ipnext.Hooks { + if h == nil { + return &zeroHooks + } + return &h.hooks +} + func (h *ExtensionHost) init() { defer h.initDone.Store(true) @@ -360,24 +354,6 @@ func (h *ExtensionHost) SendNotifyAsync(n ipn.Notify) { }) } -// addFuncHook appends non-nil fn to hooks. -func addFuncHook[F any](h *ExtensionHost, hooks *[]F, fn F) { - if h.initDone.Load() { - panic("invalid callback register after init") - } - if reflect.ValueOf(fn).IsZero() { - panic("nil function hook") - } - *hooks = append(*hooks, fn) -} - -// RegisterProfileStateChangeCallback implements [ipnext.ProfileServices]. -func (h *ExtensionHost) RegisterProfileStateChangeCallback(cb ipnext.ProfileStateChangeCallback) { - if h != nil { - addFuncHook(h, &h.profileStateChangeCbs, cb) - } -} - // NotifyProfileChange invokes registered profile state change callbacks // and updates the current profile and prefs in the host. // It strips private keys from the [ipn.Prefs] before preserving @@ -397,7 +373,7 @@ func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs h.currentProfile = profile h.mu.Unlock() - for _, cb := range h.profileStateChangeCbs { + for _, cb := range h.hooks.ProfileStateChange { cb(profile, prefs, sameNode) } } @@ -421,18 +397,11 @@ func (h *ExtensionHost) NotifyProfilePrefsChanged(profile ipn.LoginProfileView, // Get the callbacks to be invoked. h.mu.Unlock() - for _, cb := range h.profileStateChangeCbs { + for _, cb := range h.hooks.ProfileStateChange { cb(profile, newPrefs, true) } } -// RegisterBackgroundProfileResolver implements [ipnext.ProfileServices]. -func (h *ExtensionHost) RegisterBackgroundProfileResolver(resolver ipnext.ProfileResolver) { - if h != nil { - addFuncHook(h, &h.backgroundProfileResolvers, resolver) - } -} - func (h *ExtensionHost) active() bool { return h != nil && !h.shuttingDown.Load() } @@ -455,7 +424,7 @@ func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) // Attempt to resolve the background profile using the registered // background profile resolvers (e.g., [ipn/desktop.desktopSessionsExt] on Windows). - for _, resolver := range h.backgroundProfileResolvers { + for _, resolver := range h.hooks.BackgroundProfileResolvers { if profile := resolver(profiles); profile.Valid() { return profile } @@ -466,37 +435,20 @@ func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) return ipn.LoginProfileView{} } -// RegisterControlClientCallback implements [ipnext.Host]. -func (h *ExtensionHost) RegisterControlClientCallback(cb ipnext.NewControlClientCallback) { - if h != nil { - addFuncHook(h, &h.newControlClientCbs, cb) - } -} - // NotifyNewControlClient invokes all registered control client callbacks. // It returns callbacks to be executed when the control client shuts down. func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile ipn.LoginProfileView) (ccShutdownCbs []func()) { if !h.active() { return nil } - if len(h.newControlClientCbs) > 0 { - ccShutdownCbs = make([]func(), 0, len(h.newControlClientCbs)) - for _, cb := range h.newControlClientCbs { - if shutdown := cb(cc, profile); shutdown != nil { - ccShutdownCbs = append(ccShutdownCbs, shutdown) - } + for _, cb := range h.hooks.NewControlClient { + if shutdown := cb(cc, profile); shutdown != nil { + ccShutdownCbs = append(ccShutdownCbs, shutdown) } } return ccShutdownCbs } -// RegisterAuditLogProvider implements [ipnext.Host]. -func (h *ExtensionHost) RegisterAuditLogProvider(provider ipnext.AuditLogProvider) { - if h != nil { - addFuncHook(h, &h.auditLoggers, provider) - } -} - // AuditLogger returns a function that reports an auditable action // to all registered audit loggers. It fails if any of them returns an error, // indicating that the action cannot be logged and must not be performed. @@ -510,8 +462,8 @@ func (h *ExtensionHost) AuditLogger() ipnauth.AuditLogFunc { if !h.active() { return func(tailcfg.ClientAuditAction, string) error { return nil } } - loggers := make([]ipnauth.AuditLogFunc, 0, len(h.auditLoggers)) - for _, provider := range h.auditLoggers { + loggers := make([]ipnauth.AuditLogFunc, 0, len(h.hooks.AuditLoggers)) + for _, provider := range h.hooks.AuditLoggers { loggers = append(loggers, provider()) } return func(action tailcfg.ClientAuditAction, details string) error { diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 31b38196a..aa4a27d45 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -748,7 +748,7 @@ func TestExtensionHostProfileStateChangeCallback(t *testing.T) { tt.ext.InitHook = func(e *testExtension) error { // Create and register the callback on init. handler := makeStateChangeAppender(e) - e.host.Profiles().RegisterProfileStateChangeCallback(handler) + e.host.Hooks().ProfileStateChange.Add(handler) return nil } } @@ -875,7 +875,7 @@ func TestBackgroundProfileResolver(t *testing.T) { // This is typically done by the extensions themselves, // but we do it here for testing purposes. for _, r := range tt.resolvers { - h.Profiles().RegisterBackgroundProfileResolver(r) + h.Hooks().BackgroundProfileResolvers.Add(r) } h.Init() @@ -968,7 +968,7 @@ func TestAuditLogProviders(t *testing.T) { } } ext.InitHook = func(e *testExtension) error { - e.host.RegisterAuditLogProvider(provider) + e.host.Hooks().AuditLoggers.Add(provider) return nil } exts = append(exts, ext) From dbf13976d3cd1fd7968b18965d01919f75e88612 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 25 Apr 2025 09:42:52 -0700 Subject: [PATCH 0791/1708] types/mapx, ipn/ipnext: add ordered map, akin to set.Slice We had an ordered set type (set.Slice) already but we occasionally want to do the same thing with a map, preserving the order things were added, so add that too, as mapsx.OrderedMap[K, V], and then use in ipnext. Updates #12614 Change-Id: I85e6f5e11035571a28316441075e952aef9a0863 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + ipn/ipnext/ipnext.go | 27 +++----- ipn/ipnlocal/extension_host.go | 9 +-- types/mapx/ordered.go | 111 +++++++++++++++++++++++++++++++++ types/mapx/ordered_test.go | 56 +++++++++++++++++ 6 files changed, 182 insertions(+), 23 deletions(-) create mode 100644 types/mapx/ordered.go create mode 100644 types/mapx/ordered_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 37a1be6e3..4cc4a8d46 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -921,6 +921,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/lazy from tailscale.com/ipn/ipnlocal+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 31881822f..329c00e93 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -373,6 +373,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/lazy from tailscale.com/ipn/ipnlocal+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index a671874d1..bd8d3d79c 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -8,6 +8,7 @@ package ipnext import ( "errors" "fmt" + "iter" "tailscale.com/control/controlclient" "tailscale.com/feature" @@ -16,8 +17,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstime" "tailscale.com/types/logger" - "tailscale.com/types/views" - "tailscale.com/util/mak" + "tailscale.com/types/mapx" ) // Extension augments LocalBackend with additional functionality. @@ -91,13 +91,9 @@ func (d *Definition) MakeExtension(logf logger.Logf, sb SafeBackend) (Extension, return ext, nil } -// extensionsByName is a map of registered extensions, +// extensions is a map of registered extensions, // where the key is the name of the extension. -var extensionsByName map[string]*Definition - -// extensionsByOrder is a slice of registered extensions, -// in the order they were registered. -var extensionsByOrder []*Definition +var extensions mapx.OrderedMap[string, *Definition] // RegisterExtension registers a function that instantiates an [Extension]. // The name must be the same as returned by the extension's [Extension.Name]. @@ -111,19 +107,16 @@ func RegisterExtension(name string, newExt NewExtensionFn) { if newExt == nil { panic(fmt.Sprintf("ipnext: newExt is nil: %q", name)) } - if _, ok := extensionsByName[name]; ok { + if extensions.Contains(name) { panic(fmt.Sprintf("ipnext: duplicate extensions: %q", name)) } - ext := &Definition{name, newExt} - mak.Set(&extensionsByName, name, ext) - extensionsByOrder = append(extensionsByOrder, ext) + extensions.Set(name, &Definition{name, newExt}) } -// Extensions returns a read-only view of the extensions -// registered via [RegisterExtension]. It preserves the order -// in which the extensions were registered. -func Extensions() views.Slice[*Definition] { - return views.SliceOf(extensionsByOrder) +// Extensions iterates over the extensions in the order they were registered +// via [RegisterExtension]. +func Extensions() iter.Seq[*Definition] { + return extensions.Values() } // DefinitionForTest returns a [Definition] for the specified [Extension]. diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 6aa42ba12..bf0e6091c 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -162,17 +162,14 @@ func newExtensionHost(logf logger.Logf, b Backend, overrideExts ...*ipnext.Defin } // Use registered extensions. - exts := ipnext.Extensions().All() - numExts := ipnext.Extensions().Len() + extDef := ipnext.Extensions() if overrideExts != nil { // Use the provided, potentially empty, overrideExts // instead of the registered ones. - exts = slices.All(overrideExts) - numExts = len(overrideExts) + extDef = slices.Values(overrideExts) } - host.allExtensions = make([]ipnext.Extension, 0, numExts) - for _, d := range exts { + for d := range extDef { ext, err := d.MakeExtension(logf, b) if errors.Is(err, ipnext.SkipExtension) { // The extension wants to be skipped. diff --git a/types/mapx/ordered.go b/types/mapx/ordered.go new file mode 100644 index 000000000..1991f039d --- /dev/null +++ b/types/mapx/ordered.go @@ -0,0 +1,111 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package mapx contains extra map types and functions. +package mapx + +import ( + "iter" + "slices" +) + +// OrderedMap is a map that maintains the order of its keys. +// +// It is meant for maps that only grow or that are small; +// is it not optimized for deleting keys. +// +// The zero value is ready to use. +// +// Locking-wise, it has the same rules as a regular Go map: +// concurrent reads are safe, but not writes. +type OrderedMap[K comparable, V any] struct { + // m is the underlying map. + m map[K]V + + // keys is the order of keys in the map. + keys []K +} + +func (m *OrderedMap[K, V]) init() { + if m.m == nil { + m.m = make(map[K]V) + } +} + +// Set sets the value for the given key in the map. +// +// If the key already exists, it updates the value and keeps the order. +func (m *OrderedMap[K, V]) Set(key K, value V) { + m.init() + len0 := len(m.keys) + m.m[key] = value + if len(m.m) > len0 { + // New key (not an update) + m.keys = append(m.keys, key) + } +} + +// Get returns the value for the given key in the map. +// If the key does not exist, it returns the zero value for V. +func (m *OrderedMap[K, V]) Get(key K) V { + return m.m[key] +} + +// GetOk returns the value for the given key in the map +// and whether it was present in the map. +func (m *OrderedMap[K, V]) GetOk(key K) (_ V, ok bool) { + v, ok := m.m[key] + return v, ok +} + +// Contains reports whether the map contains the given key. +func (m *OrderedMap[K, V]) Contains(key K) bool { + _, ok := m.m[key] + return ok +} + +// Delete removes the key from the map. +// +// The cost is O(n) in the number of keys in the map. +func (m *OrderedMap[K, V]) Delete(key K) { + len0 := len(m.m) + delete(m.m, key) + if len(m.m) == len0 { + // Wasn't present; no need to adjust keys. + return + } + was := m.keys + m.keys = m.keys[:0] + for _, k := range was { + if k != key { + m.keys = append(m.keys, k) + } + } +} + +// All yields all the keys and values, in the order they were inserted. +func (m *OrderedMap[K, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for _, k := range m.keys { + if !yield(k, m.m[k]) { + return + } + } + } +} + +// Keys yields the map keys, in the order they were inserted. +func (m *OrderedMap[K, V]) Keys() iter.Seq[K] { + return slices.Values(m.keys) +} + +// Values yields the map values, in the order they were inserted. +func (m *OrderedMap[K, V]) Values() iter.Seq[V] { + return func(yield func(V) bool) { + for _, k := range m.keys { + if !yield(m.m[k]) { + return + } + } + } +} diff --git a/types/mapx/ordered_test.go b/types/mapx/ordered_test.go new file mode 100644 index 000000000..7dcb7e405 --- /dev/null +++ b/types/mapx/ordered_test.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package mapx + +import ( + "fmt" + "slices" + "testing" +) + +func TestOrderedMap(t *testing.T) { + // Test the OrderedMap type and its methods. + var m OrderedMap[string, int] + m.Set("d", 4) + m.Set("a", 1) + m.Set("b", 1) + m.Set("b", 2) + m.Set("c", 3) + m.Delete("d") + m.Delete("e") + + want := map[string]int{ + "a": 1, + "b": 2, + "c": 3, + "d": 0, + } + for k, v := range want { + if m.Get(k) != v { + t.Errorf("Get(%q) = %d, want %d", k, m.Get(k), v) + continue + } + got, ok := m.GetOk(k) + if got != v { + t.Errorf("GetOk(%q) = %d, want %d", k, got, v) + } + if ok != m.Contains(k) { + t.Errorf("GetOk and Contains don't agree for %q", k) + } + } + + if got, want := slices.Collect(m.Keys()), []string{"a", "b", "c"}; !slices.Equal(got, want) { + t.Errorf("Keys() = %q, want %q", got, want) + } + if got, want := slices.Collect(m.Values()), []int{1, 2, 3}; !slices.Equal(got, want) { + t.Errorf("Values() = %v, want %v", got, want) + } + var allGot []string + for k, v := range m.All() { + allGot = append(allGot, fmt.Sprintf("%s:%d", k, v)) + } + if got, want := allGot, []string{"a:1", "b:2", "c:3"}; !slices.Equal(got, want) { + t.Errorf("All() = %q, want %q", got, want) + } +} From dae2319e119cfe55b7c76888ee4be7f750c5150b Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 25 Apr 2025 13:00:00 -0700 Subject: [PATCH 0792/1708] disco: implement CallMeMaybeVia serialization (#15779) This message type is currently unused and considered experimental. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- disco/disco.go | 95 +++++++++++++++++++++++++++++++++++++++++++++ disco/disco_test.go | 16 ++++++++ 2 files changed, 111 insertions(+) diff --git a/disco/disco.go b/disco/disco.go index c5aa4ace2..1219a604d 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -25,6 +25,7 @@ import ( "fmt" "net" "net/netip" + "time" "go4.org/mem" "tailscale.com/types/key" @@ -47,6 +48,7 @@ const ( TypeBindUDPRelayEndpoint = MessageType(0x04) TypeBindUDPRelayEndpointChallenge = MessageType(0x05) TypeBindUDPRelayEndpointAnswer = MessageType(0x06) + TypeCallMeMaybeVia = MessageType(0x07) ) const v0 = byte(0) @@ -93,6 +95,8 @@ func Parse(p []byte) (Message, error) { return parseBindUDPRelayEndpointChallenge(ver, p) case TypeBindUDPRelayEndpointAnswer: return parseBindUDPRelayEndpointAnswer(ver, p) + case TypeCallMeMaybeVia: + return parseCallMeMaybeVia(ver, p) default: return nil, fmt.Errorf("unknown message type 0x%02x", byte(t)) } @@ -392,3 +396,94 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi copy(m.Answer[:], p[:]) return m, nil } + +// CallMeMaybeVia is a message sent only over DERP to request that the recipient +// try to open up a magicsock path back to the sender. The 'Via' in +// CallMeMaybeVia highlights that candidate paths are served through an +// intermediate relay, likely a [tailscale.com/net/udprelay.Server]. +// +// Usage of the candidate paths in magicsock requires a 3-way handshake +// involving [BindUDPRelayEndpoint], [BindUDPRelayEndpointChallenge], and +// [BindUDPRelayEndpointAnswer]. +// +// CallMeMaybeVia mirrors [tailscale.com/net/udprelay.ServerEndpoint], which +// contains field documentation. +// +// The recipient may choose to not open a path back if it's already happy with +// its path. Direct connections, e.g. [CallMeMaybe]-signaled, take priority over +// CallMeMaybeVia paths. +// +// This message type is currently considered experimental and is not yet tied to +// a [tailscale.com/tailcfg.CapabilityVersion]. +type CallMeMaybeVia struct { + // ServerDisco is [tailscale.com/net/udprelay.ServerEndpoint.ServerDisco] + ServerDisco key.DiscoPublic + // LamportID is [tailscale.com/net/udprelay.ServerEndpoint.LamportID] + LamportID uint64 + // VNI is [tailscale.com/net/udprelay.ServerEndpoint.VNI] + VNI uint32 + // BindLifetime is [tailscale.com/net/udprelay.ServerEndpoint.BindLifetime] + BindLifetime time.Duration + // SteadyStateLifetime is [tailscale.com/net/udprelay.ServerEndpoint.SteadyStateLifetime] + SteadyStateLifetime time.Duration + // AddrPorts is [tailscale.com/net/udprelay.ServerEndpoint.AddrPorts] + AddrPorts []netip.AddrPort +} + +const cmmvDataLenMinusEndpoints = key.DiscoPublicRawLen + // ServerDisco + 8 + // LamportID + 4 + // VNI + 8 + // BindLifetime + 8 // SteadyStateLifetime + +func (m *CallMeMaybeVia) AppendMarshal(b []byte) []byte { + endpointsLen := epLength * len(m.AddrPorts) + ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, cmmvDataLenMinusEndpoints+endpointsLen) + disco := m.ServerDisco.AppendTo(nil) + copy(p, disco) + p = p[key.DiscoPublicRawLen:] + binary.BigEndian.PutUint64(p[:8], m.LamportID) + p = p[8:] + binary.BigEndian.PutUint32(p[:4], m.VNI) + p = p[4:] + binary.BigEndian.PutUint64(p[:8], uint64(m.BindLifetime)) + p = p[8:] + binary.BigEndian.PutUint64(p[:8], uint64(m.SteadyStateLifetime)) + p = p[8:] + for _, ipp := range m.AddrPorts { + a := ipp.Addr().As16() + copy(p, a[:]) + binary.BigEndian.PutUint16(p[16:18], ipp.Port()) + p = p[epLength:] + } + return ret +} + +func parseCallMeMaybeVia(ver uint8, p []byte) (m *CallMeMaybeVia, err error) { + m = new(CallMeMaybeVia) + if len(p) < cmmvDataLenMinusEndpoints+epLength || + (len(p)-cmmvDataLenMinusEndpoints)%epLength != 0 || + ver != 0 { + return m, nil + } + m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) + p = p[key.DiscoPublicRawLen:] + m.LamportID = binary.BigEndian.Uint64(p[:8]) + p = p[8:] + m.VNI = binary.BigEndian.Uint32(p[:4]) + p = p[4:] + m.BindLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) + p = p[8:] + m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) + p = p[8:] + m.AddrPorts = make([]netip.AddrPort, 0, len(p)-cmmvDataLenMinusEndpoints/epLength) + for len(p) > 0 { + var a [16]byte + copy(a[:], p) + m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( + netip.AddrFrom16(a).Unmap(), + binary.BigEndian.Uint16(p[16:18]))) + p = p[epLength:] + } + return m, nil +} diff --git a/disco/disco_test.go b/disco/disco_test.go index 751190445..f2a29a744 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -9,6 +9,7 @@ import ( "reflect" "strings" "testing" + "time" "go4.org/mem" "tailscale.com/types/key" @@ -106,6 +107,21 @@ func TestMarshalAndParse(t *testing.T) { }, want: "06 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, + { + name: "call_me_maybe_via", + m: &CallMeMaybeVia{ + ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + LamportID: 123, + VNI: 456, + BindLifetime: time.Second, + SteadyStateLifetime: time.Minute, + AddrPorts: []netip.AddrPort{ + netip.MustParseAddrPort("1.2.3.4:567"), + netip.MustParseAddrPort("[2001::3456]:789"), + }, + }, + want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From f701d39ba42ffc9329fc13fd541440b3c644dce5 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 25 Apr 2025 13:09:09 -0700 Subject: [PATCH 0793/1708] net/udprelay: change Server.AllocateEndpoint existing alloc strategy (#15792) The previous strategy assumed clients maintained adequate state to understand the relationship between endpoint allocation and the server it was allocated on. magicsock will not have awareness of the server's disco key pre-allocation, it only understands peerAPI address at this point. The second client to allocate on the same server could trigger re-allocation, breaking a functional relay server endpoint. If magicsock needs to force reallocation we can add opt-in behaviors for this later. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 50 +++++++++++++------------------------ net/udprelay/server_test.go | 12 +++++++-- 2 files changed, 27 insertions(+), 35 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 373165777..5580b6e65 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -454,9 +454,10 @@ func (s *Server) packetReadLoop() { var ErrServerClosed = errors.New("server closed") -// AllocateEndpoint allocates a ServerEndpoint for the provided pair of -// key.DiscoPublic's. It returns an error (ErrServerClosed) if the server has -// been closed. +// AllocateEndpoint allocates a [ServerEndpoint] for the provided pair of +// [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB it +// is returned without modification/reallocation. AllocateEndpoint returns +// [ErrServerClosed] if the server has been closed. func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoint, error) { s.mu.Lock() defer s.mu.Unlock() @@ -471,36 +472,19 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoin pair := newPairOfDiscoPubKeys(discoA, discoB) e, ok := s.byDisco[pair] if ok { - if !e.isBound() { - // If the endpoint is not yet bound this is likely an allocation - // race between two clients on the same Server. Instead of - // re-allocating we return the existing allocation. We do not reset - // e.allocatedAt in case a client is "stuck" in an allocation - // loop and will not be able to complete a handshake, for whatever - // reason. Once the endpoint expires a new endpoint will be - // allocated. Clients can resolve duplicate ServerEndpoint details - // via ServerEndpoint.LamportID. - // - // TODO: consider ServerEndpoint.BindLifetime -= time.Now()-e.allocatedAt - // to give the client a more accurate picture of the bind window. - // Or, some threshold to trigger re-allocation if too much time has - // already passed since it was originally allocated. - return ServerEndpoint{ - ServerDisco: s.discoPublic, - AddrPorts: s.addrPorts, - VNI: e.vni, - LamportID: e.lamportID, - BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, - SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, - }, nil - } - // If an endpoint exists for the pair of key.DiscoPublic's, and is - // already bound, delete it. We will re-allocate a new endpoint. Chances - // are clients cannot make use of the existing, bound allocation if - // they are requesting a new one. - delete(s.byDisco, pair) - delete(s.byVNI, e.vni) - s.vniPool = append(s.vniPool, e.vni) + // Return the existing allocation. Clients can resolve duplicate + // [ServerEndpoint]'s via [ServerEndpoint.LamportID]. + // + // TODO: consider ServerEndpoint.BindLifetime -= time.Now()-e.allocatedAt + // to give the client a more accurate picture of the bind window. + return ServerEndpoint{ + ServerDisco: s.discoPublic, + AddrPorts: s.addrPorts, + VNI: e.vni, + LamportID: e.lamportID, + BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, + }, nil } if len(s.vniPool) == 0 { diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index fad35ec03..c699e5d15 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -174,8 +174,7 @@ func TestServer(t *testing.T) { t.Fatal(err) } - // We expect the same endpoint details as the 3-way bind handshake has not - // yet been completed for both relay client parties. + // We expect the same endpoint details pre-handshake. if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) } @@ -191,6 +190,15 @@ func TestServer(t *testing.T) { tcA.handshake(t) tcB.handshake(t) + dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + // We expect the same endpoint details post-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } + txToB := []byte{1, 2, 3} tcA.writeDataPkt(t, txToB) rxFromA := tcB.readDataPkt(t) From 8b72dd7873201b0944c48449b1facd08f4135dea Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 24 Apr 2025 21:54:48 -0500 Subject: [PATCH 0794/1708] ipn/ipnlocal: add localNodeContext with netmap-related fields and methods Updates #12614 Signed-off-by: Nick Khyl --- ipn/ipnlocal/dnsconfig_test.go | 4 +- ipn/ipnlocal/drive.go | 43 +-- ipn/ipnlocal/local.go | 529 +++++++++++++++++------------ ipn/ipnlocal/local_node_context.go | 207 +++++++++++ ipn/ipnlocal/local_test.go | 63 ++-- ipn/ipnlocal/network-lock.go | 19 +- ipn/ipnlocal/peerapi.go | 2 +- ipn/ipnlocal/peerapi_test.go | 2 +- ipn/ipnlocal/serve.go | 4 +- ipn/ipnlocal/serve_test.go | 44 +-- ipn/ipnlocal/taildrop.go | 57 ++-- ipn/ipnlocal/taildrop_test.go | 20 +- ipn/ipnlocal/web_client.go | 5 +- 13 files changed, 648 insertions(+), 351 deletions(-) create mode 100644 ipn/ipnlocal/local_node_context.go diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 19d8e8b86..c0f5b25f3 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -382,14 +382,14 @@ func TestAllowExitNodeDNSProxyToServeName(t *testing.T) { t.Fatal("unexpected true on backend with nil NetMap") } - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ DNS: tailcfg.DNSConfig{ ExitNodeFilteredSet: []string{ ".ts.net", "some.exact.bad", }, }, - } + }) tests := []struct { name string want bool diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 8ae813ff2..f13c9de48 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -4,7 +4,6 @@ package ipnlocal import ( - "cmp" "fmt" "os" "slices" @@ -26,26 +25,14 @@ const ( // enabled. This is currently based on checking for the drive:share node // attribute. func (b *LocalBackend) DriveSharingEnabled() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.driveSharingEnabledLocked() -} - -func (b *LocalBackend) driveSharingEnabledLocked() bool { - return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveShare) + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) } // DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes // is enabled. This is currently based on checking for the drive:access node // attribute. func (b *LocalBackend) DriveAccessEnabled() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.driveAccessEnabledLocked() -} - -func (b *LocalBackend) driveAccessEnabledLocked() bool { - return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveAccess) + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) } // DriveSetServerAddr tells Taildrive to use the given address for connecting @@ -266,7 +253,7 @@ func (b *LocalBackend) driveNotifyShares(shares views.SliceView[*drive.Share, dr // shares has changed since the last notification. func (b *LocalBackend) driveNotifyCurrentSharesLocked() { var shares views.SliceView[*drive.Share, drive.ShareView] - if b.driveSharingEnabledLocked() { + if b.DriveSharingEnabled() { // Only populate shares if sharing is enabled. shares = b.pm.prefs.DriveShares() } @@ -310,12 +297,12 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) { } var driveRemotes []*drive.Remote - if b.driveAccessEnabledLocked() { + if b.DriveAccessEnabled() { // Only populate peers if access is enabled, otherwise leave blank. driveRemotes = b.driveRemotesFromPeers(nm) } - fs.SetRemotes(b.netMap.Domain, driveRemotes, b.newDriveTransport()) + fs.SetRemotes(nm.Domain, driveRemotes, b.newDriveTransport()) } func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote { @@ -330,23 +317,20 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // Peers are available to Taildrive if: // - They are online // - They are allowed to share at least one folder with us - b.mu.Lock() - latestNetMap := b.netMap - b.mu.Unlock() - - idx, found := slices.BinarySearchFunc(latestNetMap.Peers, peerID, func(candidate tailcfg.NodeView, id tailcfg.NodeID) int { - return cmp.Compare(candidate.ID(), id) - }) - if !found { + cn := b.currentNode() + peer, ok := cn.PeerByID(peerID) + if !ok { return false } - peer := latestNetMap.Peers[idx] - // Exclude offline peers. // TODO(oxtoacart): for some reason, this correctly // catches when a node goes from offline to online, // but not the other way around... + // TODO(oxtoacart,nickkhyl): the reason was probably + // that we were using netmap.Peers instead of b.peers. + // The netmap.Peers slice is not updated in all cases. + // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { return false } @@ -354,8 +338,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // Check that the peer is allowed to share with us. addresses := peer.Addresses() for _, p := range addresses.All() { - capsMap := b.PeerCaps(p.Addr()) - if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) { + if cn.PeerHasCap(p.Addr(), tailcfg.PeerCapabilityTaildriveSharer) { return true } } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d60f05b11..308d03197 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -199,15 +199,15 @@ type LocalBackend struct { portpollOnce sync.Once // guards starting readPoller varRoot string // or empty if SetVarRoot never called logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil - sshAtomicBool atomic.Bool + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeContext + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. - webClientAtomicBool atomic.Bool + webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext // exposeRemoteWebClientAtomicBool controls whether the web client is exposed over // Tailscale on port 5252. - exposeRemoteWebClientAtomicBool atomic.Bool - shutdownCalled bool // if Shutdown has been called + exposeRemoteWebClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext + shutdownCalled bool // if Shutdown has been called debugSink packet.CaptureSink sockstatLogger *sockstatlog.Logger @@ -227,11 +227,10 @@ type LocalBackend struct { // is never called. getTCPHandlerForFunnelFlow func(srcAddr netip.AddrPort, dstPort uint16) (handler func(net.Conn)) - filterAtomic atomic.Pointer[filter.Filter] - containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] - shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] - shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] - numClientStatusCalls atomic.Uint32 + containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] // TODO(nickkhyl): move to nodeContext + shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] // TODO(nickkhyl): move to nodeContext + shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] // TODO(nickkhyl): move to nodeContext + numClientStatusCalls atomic.Uint32 // TODO(nickkhyl): move to nodeContext // goTracker accounts for all goroutines started by LocalBacked, primarily // for testing and graceful shutdown purposes. @@ -245,46 +244,49 @@ type LocalBackend struct { extHost *ExtensionHost // The mutex protects the following elements. - mu sync.Mutex - conf *conffile.Config // latest parsed config, or nil if not in declarative mode - pm *profileManager // mu guards access - filterHash deephash.Sum + mu sync.Mutex + + // currentNodeAtomic is the current node context. It is always non-nil. + // It must be re-created when [LocalBackend] switches to a different profile/node + // (see tailscale/corp#28014 for a bug), but can be mutated in place (via its methods) + // while [LocalBackend] represents the same node. + // + // It is safe for reading with or without holding b.mu, but mutating it in place + // or creating a new one must be done with b.mu held. If both mutexes must be held, + // the LocalBackend's mutex must be acquired first before acquiring the nodeContext's mutex. + // + // We intend to relax this in the future and only require holding b.mu when replacing it, + // but that requires a better (strictly ordered?) state machine and better management + // of [LocalBackend]'s own state that is not tied to the node context. + currentNodeAtomic atomic.Pointer[localNodeContext] + + conf *conffile.Config // latest parsed config, or nil if not in declarative mode + pm *profileManager // mu guards access + filterHash deephash.Sum // TODO(nickkhyl): move to nodeContext httpTestClient *http.Client // for controlclient. nil by default, used by tests. ccGen clientGen // function for producing controlclient; lazily populated sshServer SSHServer // or nil, initialized lazily. appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. notifyCancel context.CancelFunc - cc controlclient.Client - ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto + cc controlclient.Client // TODO(nickkhyl): move to nodeContext + ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeContext machinePrivKey key.MachinePrivate - tka *tkaState - state ipn.State - capFileSharing bool // whether netMap contains the file sharing capability - capTailnetLock bool // whether netMap contains the tailnet lock capability + tka *tkaState // TODO(nickkhyl): move to nodeContext + state ipn.State // TODO(nickkhyl): move to nodeContext + capFileSharing bool // whether netMap contains the file sharing capability + capTailnetLock bool // whether netMap contains the tailnet lock capability // hostinfo is mutated in-place while mu is held. - hostinfo *tailcfg.Hostinfo - // netMap is the most recently set full netmap from the controlclient. - // It can't be mutated in place once set. Because it can't be mutated in place, - // delta updates from the control server don't apply to it. Instead, use - // the peers map to get up-to-date information on the state of peers. - // In general, avoid using the netMap.Peers slice. We'd like it to go away - // as of 2023-09-17. - netMap *netmap.NetworkMap - // peers is the set of current peers and their current values after applying - // delta node mutations as they come in (with mu held). The map values can - // be given out to callers, but the map itself must not escape the LocalBackend. - peers map[tailcfg.NodeID]tailcfg.NodeView - nodeByAddr map[netip.Addr]tailcfg.NodeID // by Node.Addresses only (not subnet routes) - nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil - activeLogin string // last logged LoginName from netMap + hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeContext + nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil; TODO(nickkhyl): move to nodeContext + activeLogin string // last logged LoginName from netMap; TODO(nickkhyl): move to nodeContext (or remove? it's in [ipn.LoginProfile]). engineStatus ipn.EngineStatus endpoints []tailcfg.Endpoint blocked bool - keyExpired bool - authURL string // non-empty if not Running - authURLTime time.Time // when the authURL was received from the control server - authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil + keyExpired bool // TODO(nickkhyl): move to nodeContext + authURL string // non-empty if not Running; TODO(nickkhyl): move to nodeContext + authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeContext + authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeContext egg bool prevIfState *netmon.State peerAPIServer *peerAPIServer // or nil @@ -315,7 +317,7 @@ type LocalBackend struct { lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. - capForcedNetfilter string + capForcedNetfilter string // TODO(nickkhyl): move to nodeContext // offlineAutoUpdateCancel stops offline auto-updates when called. It // should be used via stopOfflineAutoUpdate and // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are @@ -327,7 +329,7 @@ type LocalBackend struct { // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig serveConfig ipn.ServeConfigView // or !Valid if none - ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names + ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names; TODO(nickkhyl): move to nodeContext webClient webClient webClientListeners map[netip.AddrPort]*localListener // listeners for local web client traffic @@ -342,7 +344,7 @@ type LocalBackend struct { // dialPlan is any dial plan that we've received from the control // server during a previous connection; it is cleared on logout. - dialPlan atomic.Pointer[tailcfg.ControlDialPlan] + dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeContext? // tkaSyncLock is used to make tkaSyncIfNeeded an exclusive // section. This is needed to stop two map-responses in quick succession @@ -517,6 +519,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } + b.currentNodeAtomic.Store(newLocalNodeContext()) mConn.SetNetInfoCallback(b.setNetInfo) if sys.InitialConfig != nil { @@ -591,6 +594,16 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } +func (b *LocalBackend) currentNode() *localNodeContext { + if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { + return v + } + // Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor... + v := newLocalNodeContext() + b.currentNodeAtomic.CompareAndSwap(nil, v) + return b.currentNodeAtomic.Load() +} + // FindExtensionByName returns an active extension with the given name, // or nil if no such extension exists. func (b *LocalBackend) FindExtensionByName(name string) any { @@ -860,7 +873,7 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { return } networkUp := b.prevIfState.AnyInterfaceUp() - b.cc.SetPaused((b.state == ipn.Stopped && b.netMap != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) + b.cc.SetPaused((b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) } // DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe @@ -918,11 +931,13 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. - b.updateFilterLocked(b.netMap, b.pm.CurrentPrefs()) + b.updateFilterLocked(b.pm.CurrentPrefs()) updateExitNodeUsageWarning(b.pm.CurrentPrefs(), delta.New, b.health) - if peerAPIListenAsync && b.netMap != nil && b.state == ipn.Running { - want := b.netMap.GetAddresses().Len() + cn := b.currentNode() + nm := cn.NetMap() + if peerAPIListenAsync && nm != nil && b.state == ipn.Running { + want := nm.GetAddresses().Len() have := len(b.peerAPIListeners) b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) if have < want { @@ -1163,6 +1178,8 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { b.mu.Lock() defer b.mu.Unlock() + cn := b.currentNode() + nm := cn.NetMap() sb.MutateStatus(func(s *ipnstate.Status) { s.Version = version.Long() s.TUN = !b.sys.IsNetstack() @@ -1179,21 +1196,21 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { if m := b.sshOnButUnusableHealthCheckMessageLocked(); m != "" { s.Health = append(s.Health, m) } - if b.netMap != nil { - s.CertDomains = append([]string(nil), b.netMap.DNS.CertDomains...) - s.MagicDNSSuffix = b.netMap.MagicDNSSuffix() + if nm != nil { + s.CertDomains = append([]string(nil), nm.DNS.CertDomains...) + s.MagicDNSSuffix = nm.MagicDNSSuffix() if s.CurrentTailnet == nil { s.CurrentTailnet = &ipnstate.TailnetStatus{} } - s.CurrentTailnet.MagicDNSSuffix = b.netMap.MagicDNSSuffix() - s.CurrentTailnet.MagicDNSEnabled = b.netMap.DNS.Proxied - s.CurrentTailnet.Name = b.netMap.Domain + s.CurrentTailnet.MagicDNSSuffix = nm.MagicDNSSuffix() + s.CurrentTailnet.MagicDNSEnabled = nm.DNS.Proxied + s.CurrentTailnet.Name = nm.Domain if prefs := b.pm.CurrentPrefs(); prefs.Valid() { - if !prefs.RouteAll() && b.netMap.AnyPeersAdvertiseRoutes() { + if !prefs.RouteAll() && nm.AnyPeersAdvertiseRoutes() { s.Health = append(s.Health, healthmsg.WarnAcceptRoutesOff) } if !prefs.ExitNodeID().IsZero() { - if exitPeer, ok := b.netMap.PeerWithStableID(prefs.ExitNodeID()); ok { + if exitPeer, ok := nm.PeerWithStableID(prefs.ExitNodeID()); ok { s.ExitNodeStatus = &ipnstate.ExitNodeStatus{ ID: prefs.ExitNodeID(), Online: exitPeer.Online().Get(), @@ -1206,8 +1223,8 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { }) var tailscaleIPs []netip.Addr - if b.netMap != nil { - addrs := b.netMap.GetAddresses() + if nm != nil { + addrs := nm.GetAddresses() for i := range addrs.Len() { if addr := addrs.At(i); addr.IsSingleIP() { sb.AddTailscaleIP(addr.Addr()) @@ -1219,14 +1236,14 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { sb.MutateSelfStatus(func(ss *ipnstate.PeerStatus) { ss.OS = version.OS() ss.Online = b.health.GetInPollNetMap() - if b.netMap != nil { + if nm != nil { ss.InNetworkMap = true - if hi := b.netMap.SelfNode.Hostinfo(); hi.Valid() { + if hi := nm.SelfNode.Hostinfo(); hi.Valid() { ss.HostName = hi.Hostname() } - ss.DNSName = b.netMap.Name - ss.UserID = b.netMap.User() - if sn := b.netMap.SelfNode; sn.Valid() { + ss.DNSName = nm.Name + ss.UserID = nm.User() + if sn := nm.SelfNode; sn.Valid() { peerStatusFromNode(ss, sn) if cm := sn.CapMap(); cm.Len() > 0 { ss.Capabilities = make([]tailcfg.NodeCapability, 1, cm.Len()+1) @@ -1259,14 +1276,16 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { } func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { - if b.netMap == nil { + cn := b.currentNode() + nm := cn.NetMap() + if nm == nil { return } - for id, up := range b.netMap.UserProfiles { + for id, up := range nm.UserProfiles { sb.AddUser(id, up) } exitNodeID := b.pm.CurrentPrefs().ExitNodeID() - for _, p := range b.peers { + for _, p := range cn.Peers() { tailscaleIPs := make([]netip.Addr, 0, p.Addresses().Len()) for i := range p.Addresses().Len() { addr := p.Addresses().At(i) @@ -1355,18 +1374,10 @@ func profileFromView(v tailcfg.UserProfileView) tailcfg.UserProfile { // WhoIsNodeKey returns the peer info of given public key, if it exists. func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { - b.mu.Lock() - defer b.mu.Unlock() - // TODO(bradfitz): add nodeByKey like nodeByAddr instead of walking peers. - if b.netMap == nil { - return n, u, false - } - if self := b.netMap.SelfNode; self.Valid() && self.Key() == k { - return self, profileFromView(b.netMap.UserProfiles[self.User()]), true - } - for _, n := range b.peers { - if n.Key() == k { - up, ok := b.netMap.UserProfiles[n.User()] + cn := b.currentNode() + if nid, ok := cn.NodeByKey(k); ok { + if n, ok := cn.PeerByID(nid); ok { + up, ok := cn.NetMap().UserProfiles[n.User()] u = profileFromView(up) return n, u, ok } @@ -1399,7 +1410,8 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi return zero, u, false } - nid, ok := b.nodeByAddr[ipp.Addr()] + cn := b.currentNode() + nid, ok := cn.NodeByAddr(ipp.Addr()) if !ok { var ip netip.Addr if ipp.Port() != 0 { @@ -1421,23 +1433,24 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi if !ok { return failf("no IP found in ProxyMapper for %v", ipp) } - nid, ok = b.nodeByAddr[ip] + nid, ok = cn.NodeByAddr(ip) if !ok { return failf("no node for proxymapped IP %v", ip) } } - if b.netMap == nil { + nm := cn.NetMap() + if nm == nil { return failf("no netmap") } - n, ok = b.peers[nid] + n, ok = cn.PeerByID(nid) if !ok { // Check if this the self-node, which would not appear in peers. - if !b.netMap.SelfNode.Valid() || nid != b.netMap.SelfNode.ID() { + if !nm.SelfNode.Valid() || nid != nm.SelfNode.ID() { return zero, u, false } - n = b.netMap.SelfNode + n = nm.SelfNode } - up, ok := b.netMap.UserProfiles[n.User()] + up, ok := cn.UserByID(n.User()) if !ok { return failf("no userprofile for node %v", n.Key()) } @@ -1447,12 +1460,33 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi // PeerCaps returns the capabilities that remote src IP has to // ths current node. func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { + return b.currentNode().PeerCaps(src) +} + +func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + b.mu.Lock() + defer b.mu.Unlock() + ret := base + if b.netMap == nil { + return ret + } + for _, peer := range b.netMap.Peers { + if pred(peer) { + ret = append(ret, peer) + } + } + return ret +} + +// PeerCaps returns the capabilities that remote src IP has to +// ths current node. +func (b *localNodeContext) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { b.mu.Lock() defer b.mu.Unlock() return b.peerCapsLocked(src) } -func (b *LocalBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { +func (b *localNodeContext) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { if b.netMap == nil { return nil } @@ -1474,7 +1508,7 @@ func (b *LocalBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { return nil } -func (b *LocalBackend) GetFilterForTest() *filter.Filter { +func (b *localNodeContext) GetFilterForTest() *filter.Filter { return b.filterAtomic.Load() } @@ -1578,8 +1612,9 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.mu.Lock() prefsChanged := false + cn := b.currentNode() prefs := b.pm.CurrentPrefs().AsStruct() - oldNetMap := b.netMap + oldNetMap := cn.NetMap() curNetMap := st.NetMap if curNetMap == nil { // The status didn't include a netmap update, so the old one is still @@ -1699,7 +1734,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.tkaFilterNetmapLocked(st.NetMap) } b.setNetMapLocked(st.NetMap) - b.updateFilterLocked(st.NetMap, prefs.View()) + b.updateFilterLocked(prefs.View()) } b.mu.Unlock() @@ -1965,19 +2000,30 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo b.mu.Lock() defer b.mu.Unlock() - if !b.updateNetmapDeltaLocked(muts) { - return false - } + cn := b.currentNode() + cn.UpdateNetmapDelta(muts) - if b.netMap != nil && mutationsAreWorthyOfTellingIPNBus(muts) { - nm := ptr.To(*b.netMap) // shallow clone - nm.Peers = make([]tailcfg.NodeView, 0, len(b.peers)) - for _, p := range b.peers { - nm.Peers = append(nm.Peers, p) + // If auto exit nodes are enabled and our exit node went offline, + // we need to schedule picking a new one. + // TODO(nickkhyl): move the auto exit node logic to a feature package. + if shouldAutoExitNode() { + exitNodeID := b.pm.prefs.ExitNodeID() + for _, m := range muts { + mo, ok := m.(netmap.NodeMutationOnline) + if !ok || mo.Online { + continue + } + n, ok := cn.PeerByID(m.NodeIDBeingMutated()) + if !ok || n.StableID() != exitNodeID { + continue + } + b.goTracker.Go(b.pickNewAutoExitNode) + break } - slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { - return cmp.Compare(a.ID(), b.ID()) - }) + } + + if cn.NetMap() != nil && mutationsAreWorthyOfTellingIPNBus(muts) { + nm := cn.netMapWithPeers() notify = &ipn.Notify{NetMap: nm} } else if testenv.InTest() { // In tests, send an empty Notify as a wake-up so end-to-end @@ -1988,6 +2034,20 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } +func (c *localNodeContext) netMapWithPeers() *netmap.NetworkMap { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return nil + } + nm := ptr.To(*c.netMap) // shallow clone + nm.Peers = slicesx.MapValues(c.peers) + slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) + return nm +} + // mutationsAreWorthyOfTellingIPNBus reports whether any mutation type in muts is // worthy of spamming the IPN bus (the Windows & Mac GUIs, basically) to tell them // about the update. @@ -2018,8 +2078,10 @@ func (b *LocalBackend) pickNewAutoExitNode() { b.send(ipn.Notify{Prefs: &newPrefs}) } -func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (handled bool) { - if b.netMap == nil || len(b.peers) == 0 { +func (c *localNodeContext) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil || len(c.peers) == 0 { return false } @@ -2031,7 +2093,7 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] if !ok { - nv, ok := b.peers[m.NodeIDBeingMutated()] + nv, ok := c.peers[m.NodeIDBeingMutated()] if !ok { // TODO(bradfitz): unexpected metric? return false @@ -2040,15 +2102,9 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand mak.Set(&mutableNodes, nv.ID(), n) } m.Apply(n) - - // If our exit node went offline, we need to schedule picking - // a new one. - if mo, ok := m.(netmap.NodeMutationOnline); ok && !mo.Online && n.StableID == b.pm.prefs.ExitNodeID() && shouldAutoExitNode() { - b.goTracker.Go(b.pickNewAutoExitNode) - } } for nid, n := range mutableNodes { - b.peers[nid] = n.View() + c.peers[nid] = n.View() } return true } @@ -2195,15 +2251,6 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// NodeViewByIDForTest returns the state of the node with the given ID -// for integration tests in another repo. -func (b *LocalBackend) NodeViewByIDForTest(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { - b.mu.Lock() - defer b.mu.Unlock() - n, ok := b.peers[id] - return n, ok -} - // DisablePortMapperForTest disables the portmapper for tests. // It must be called before Start. func (b *LocalBackend) DisablePortMapperForTest() { @@ -2215,6 +2262,10 @@ func (b *LocalBackend) DisablePortMapperForTest() { // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { + return b.currentNode().PeersForTest() +} + +func (b *localNodeContext) PeersForTest() []tailcfg.NodeView { b.mu.Lock() defer b.mu.Unlock() ret := slicesx.MapValues(b.peers) @@ -2308,15 +2359,13 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.hostinfo = hostinfo b.state = ipn.NoState + cn := b.currentNode() if opts.UpdatePrefs != nil { oldPrefs := b.pm.CurrentPrefs() newPrefs := opts.UpdatePrefs.Clone() newPrefs.Persist = oldPrefs.Persist().AsStruct() pv := newPrefs.View() - if err := b.pm.SetPrefs(pv, ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - }); err != nil { + if err := b.pm.SetPrefs(pv, cn.NetworkProfile()); err != nil { b.logf("failed to save UpdatePrefs state: %v", err) } } @@ -2327,7 +2376,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // This is important in two cases: when opts.UpdatePrefs is not nil, // and when Always Mode is enabled and we need to set WantRunning to true. if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - setExitNodeID(newp, b.netMap) + setExitNodeID(newp, cn.NetMap()) b.pm.setPrefsNoPermCheck(newp.View()) } prefs := b.pm.CurrentPrefs() @@ -2496,13 +2545,24 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ // given netMap and user preferences. // // b.mu must be held. -func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.PrefsView) { +func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { + // TODO(nickkhyl) split this into two functions: + // - (*localNodeContext).RebuildFilters() (normalFilter, jailedFilter *filter.Filter, changed bool), + // which would return packet filters for the current state and whether they changed since the last call. + // - (*LocalBackend).updateFilters(), which would use the above to update the engine with the new filters, + // notify b.sshServer, etc. + // + // For this, we would need to plumb a few more things into the [localNodeContext]. Most importantly, + // the current [ipn.PrefsView]), but also maybe also a b.logf and a b.health? + // // NOTE(danderson): keep change detection as the first thing in // this function. Don't try to optimize by returning early, more // likely than not you'll just end up breaking the change // detection and end up with the wrong filter installed. This is // quite hard to debug, so save yourself the trouble. var ( + cn = b.currentNode() + netMap = cn.NetMap() haveNetmap = netMap != nil addrs views.Slice[netip.Prefix] packetFilter []filter.Match @@ -2521,7 +2581,7 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P } packetFilter = netMap.PacketFilter - if packetFilterPermitsUnlockedNodes(b.peers, packetFilter) { + if cn.unlockedNodesPermitted(packetFilter) { b.health.SetUnhealthy(invalidPacketFilterWarnable, nil) packetFilter = nil } else { @@ -2702,11 +2762,9 @@ func (b *LocalBackend) performCaptiveDetection() { } d := captivedetection.NewDetector(b.logf) - var dm *tailcfg.DERPMap - b.mu.Lock() - if b.netMap != nil { - dm = b.netMap.DERPMap - } + b.mu.Lock() // for b.hostinfo + cn := b.currentNode() + dm := cn.DERPMap() preferredDERP := 0 if b.hostinfo != nil { if b.hostinfo.NetInfo != nil { @@ -2773,11 +2831,17 @@ func packetFilterPermitsUnlockedNodes(peers map[tailcfg.NodeID]tailcfg.NodeView, return false } +// TODO(nickkhyl): this should be non-existent with a proper [LocalBackend.updateFilterLocked]. +// See the comment in that function for more details. func (b *LocalBackend) setFilter(f *filter.Filter) { - b.filterAtomic.Store(f) + b.currentNode().setFilter(f) b.e.SetFilter(f) } +func (c *localNodeContext) setFilter(f *filter.Filter) { + c.filterAtomic.Store(f) +} + var removeFromDefaultRoute = []netip.Prefix{ // RFC1918 LAN ranges netip.MustParsePrefix("192.168.0.0/16"), @@ -3029,6 +3093,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares if mask&initialBits != 0 { + cn := b.currentNode() ini = &ipn.Notify{Version: version.Long()} if mask&ipn.NotifyInitialState != 0 { ini.SessionID = sessionID @@ -3041,9 +3106,9 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A ini.Prefs = ptr.To(b.sanitizedPrefsLocked()) } if mask&ipn.NotifyInitialNetMap != 0 { - ini.NetMap = b.netMap + ini.NetMap = cn.NetMap() } - if mask&ipn.NotifyInitialDriveShares != 0 && b.driveSharingEnabledLocked() { + if mask&ipn.NotifyInitialDriveShares != 0 && b.DriveSharingEnabled() { ini.DriveShares = b.pm.prefs.DriveShares() } if mask&ipn.NotifyInitialHealthState != 0 { @@ -3137,11 +3202,7 @@ func (b *LocalBackend) DebugNotify(n ipn.Notify) { // // It should only be used via the LocalAPI's debug handler. func (b *LocalBackend) DebugNotifyLastNetMap() { - b.mu.Lock() - nm := b.netMap - b.mu.Unlock() - - if nm != nil { + if nm := b.currentNode().NetMap(); nm != nil { b.send(ipn.Notify{NetMap: nm}) } } @@ -3155,7 +3216,8 @@ func (b *LocalBackend) DebugNotifyLastNetMap() { func (b *LocalBackend) DebugForceNetmapUpdate() { b.mu.Lock() defer b.mu.Unlock() - nm := b.netMap + // TODO(nickkhyl): this all should be done in [LocalBackend.setNetMapLocked]. + nm := b.currentNode().NetMap() b.e.SetNetworkMap(nm) if nm != nil { b.MagicConn().SetDERPMap(nm.DERPMap) @@ -3583,7 +3645,7 @@ func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tail b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) return } - nm := b.netMap + nm := b.currentNode().NetMap() if nm == nil { b.logf("can't set intercept function for Service TCP Ports, netMap is nil") return @@ -3839,15 +3901,17 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt // in Hostinfo. When the user preferences currently request "shields up" // mode, all inbound connections are refused, so services are not reported. // Otherwise, shouldUploadServices respects NetMap.CollectServices. +// TODO(nickkhyl): move this into [localNodeContext]? func (b *LocalBackend) shouldUploadServices() bool { b.mu.Lock() defer b.mu.Unlock() p := b.pm.CurrentPrefs() - if !p.Valid() || b.netMap == nil { + nm := b.currentNode().NetMap() + if !p.Valid() || nm == nil { return false // default to safest setting } - return !p.ShieldsUp() && b.netMap.CollectServices + return !p.ShieldsUp() && nm.CollectServices } // SetCurrentUser is used to implement support for multi-user systems (only @@ -4068,13 +4132,12 @@ func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { if envknob.SSHIgnoreTailnetPolicy() || envknob.SSHPolicyFile() != "" { return nil } - if b.netMap != nil { - if !b.netMap.HasCap(tailcfg.CapabilitySSH) { - if b.isDefaultServerLocked() { - return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet. See https://tailscale.com/s/ssh") - } - return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet.") + // Assume that we do have the SSH capability if don't have a netmap yet. + if !b.currentNode().SelfHasCapOr(tailcfg.CapabilitySSH, true) { + if b.isDefaultServerLocked() { + return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet. See https://tailscale.com/s/ssh") } + return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet.") } return nil } @@ -4086,7 +4149,7 @@ func (b *LocalBackend) sshOnButUnusableHealthCheckMessageLocked() (healthMessage if envknob.SSHIgnoreTailnetPolicy() || envknob.SSHPolicyFile() != "" { return "development SSH policy in use" } - nm := b.netMap + nm := b.currentNode().NetMap() if nm == nil { return "" } @@ -4413,7 +4476,8 @@ func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { defer unlock() - netMap := b.netMap + cn := b.currentNode() + netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) oldp := b.pm.CurrentPrefs() @@ -4438,7 +4502,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) hostInfoChanged := !oldHi.Equal(newHi) cc := b.cc - b.updateFilterLocked(netMap, newp.View()) + b.updateFilterLocked(newp.View()) if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { @@ -4462,13 +4526,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } prefs := newp.View() - np := b.pm.CurrentProfile().NetworkProfile() - if netMap != nil { - np = ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - } - } + np := cmp.Or(cn.NetworkProfile(), b.pm.CurrentProfile().NetworkProfile()) if err := b.pm.SetPrefs(prefs, np); err != nil { b.logf("failed to save new controlclient state: %v", err) } else if prefs.WantRunning() { @@ -4712,9 +4770,13 @@ func extractPeerAPIPorts(services []tailcfg.Service) portPair { // NetMap returns the latest cached network map received from // controlclient, or nil if no network map was received yet. func (b *LocalBackend) NetMap() *netmap.NetworkMap { - b.mu.Lock() - defer b.mu.Unlock() - return b.netMap + return b.currentNode().NetMap() +} + +func (c *localNodeContext) NetMap() *netmap.NetworkMap { + c.mu.Lock() + defer c.mu.Unlock() + return c.netMap } func (b *LocalBackend) isEngineBlocked() bool { @@ -4843,12 +4905,13 @@ func (b *LocalBackend) authReconfig() { b.mu.Lock() blocked := b.blocked prefs := b.pm.CurrentPrefs() - nm := b.netMap + cn := b.currentNode() + nm := cn.NetMap() hasPAC := b.prevIfState.HasPAC() - disableSubnetsIfPAC := nm.HasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) - userDialUseRoutes := nm.HasCap(tailcfg.NodeAttrUserDialUseRoutes) - dohURL, dohURLOK := exitNodeCanProxyDNS(nm, b.peers, prefs.ExitNodeID()) - dcfg := dnsConfigForNetmap(nm, b.peers, prefs, b.keyExpired, b.logf, version.OS()) + disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) + userDialUseRoutes := cn.SelfHasCap(tailcfg.NodeAttrUserDialUseRoutes) + dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) closing := b.shutdownCalled @@ -4955,6 +5018,12 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs return false } +func (c *localNodeContext) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { + c.mu.Lock() + defer c.mu.Unlock() + return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS) +} + // dnsConfigForNetmap returns a *dns.Config for the given netmap, // prefs, client OS version, and cloud hosting environment. // @@ -5245,7 +5314,9 @@ func (b *LocalBackend) initPeerAPIListener() { return } - if b.netMap == nil { + cn := b.currentNode() + nm := cn.NetMap() + if nm == nil { // We're called from authReconfig which checks that // netMap is non-nil, but if a concurrent Logout, // ResetForClientDisconnect, or Start happens when its @@ -5255,7 +5326,7 @@ func (b *LocalBackend) initPeerAPIListener() { return } - addrs := b.netMap.GetAddresses() + addrs := nm.GetAddresses() if addrs.Len() == len(b.peerAPIListeners) { allSame := true for i, pln := range b.peerAPIListeners { @@ -5273,8 +5344,8 @@ func (b *LocalBackend) initPeerAPIListener() { b.closePeerAPIListenersLocked() - selfNode := b.netMap.SelfNode - if !selfNode.Valid() || b.netMap.GetAddresses().Len() == 0 { + selfNode := nm.SelfNode + if !selfNode.Valid() || nm.GetAddresses().Len() == 0 { b.logf("[v1] initPeerAPIListener: no addresses in netmap") return } @@ -5568,6 +5639,7 @@ func (b *LocalBackend) enterState(newState ipn.State) { // enterStateLockedOnEntry is like enterState but requires b.mu be held to call // it, but it unlocks b.mu when done (via unlock, a once func). func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { + cn := b.currentNode() oldState := b.state b.state = newState prefs := b.pm.CurrentPrefs() @@ -5580,7 +5652,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock panic("[unexpected] use of main control server in integration test") } - netMap := b.netMap + netMap := cn.NetMap() activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { @@ -5685,7 +5757,8 @@ func (b *LocalBackend) NodeKey() key.NodePublic { func (b *LocalBackend) nextStateLocked() ipn.State { var ( cc = b.cc - netMap = b.netMap + cn = b.currentNode() + netMap = cn.NetMap() state = b.state blocked = b.blocked st = b.engineStatus @@ -6071,6 +6144,14 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre return newPrefs } +func (c *localNodeContext) SetNetMap(nm *netmap.NetworkMap) { + c.mu.Lock() + defer c.mu.Unlock() + c.netMap = nm + c.updateNodeByAddrLocked() + c.updatePeersLocked() +} + // setNetMapLocked updates the LocalBackend state to reflect the newly // received nm. If nm is nil, it resets all configuration as though // Tailscale is turned off. @@ -6083,8 +6164,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { if nm != nil { login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") } - b.netMap = nm - b.updatePeersFromNetmapLocked(nm) + b.currentNode().SetNetMap(nm) if login != b.activeLogin { b.logf("active login: %v", login) b.activeLogin = login @@ -6124,14 +6204,33 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) b.ipVIPServiceMap = nm.GetIPVIPServiceMap() if nm == nil { - b.nodeByAddr = nil - // If there is no netmap, the client is going into a "turned off" // state so reset the metrics. b.metrics.approvedRoutes.Set(0) return } + if nm.SelfNode.Valid() { + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } + } + b.metrics.approvedRoutes.Set(approved) + } + + b.updateDrivePeersLocked(nm) + b.driveNotifyCurrentSharesLocked() +} + +func (b *localNodeContext) updateNodeByAddrLocked() { + nm := b.netMap + if nm == nil { + b.nodeByAddr = nil + return + } + // Update the nodeByAddr index. if b.nodeByAddr == nil { b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} @@ -6149,14 +6248,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } if nm.SelfNode.Valid() { addNode(nm.SelfNode) - - var approved float64 - for _, route := range nm.SelfNode.AllowedIPs().All() { - if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { - approved++ - } - } - b.metrics.approvedRoutes.Set(approved) } for _, p := range nm.Peers { addNode(p) @@ -6167,12 +6258,10 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { delete(b.nodeByAddr, k) } } - - b.updateDrivePeersLocked(nm) - b.driveNotifyCurrentSharesLocked() } -func (b *LocalBackend) updatePeersFromNetmapLocked(nm *netmap.NetworkMap) { +func (b *localNodeContext) updatePeersLocked() { + nm := b.netMap if nm == nil { b.peers = nil return @@ -6291,7 +6380,7 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err } dt.b.mu.Lock() - selfNodeKey := dt.b.netMap.SelfNode.Key().ShortString() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() dt.b.mu.Unlock() n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) shareNodeKey := "unknown" @@ -6366,7 +6455,7 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { // the method to only run the reset-logic and not reload the store from memory to ensure // foreground sessions are not removed if they are not saved on disk. func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if b.netMap == nil || !b.netMap.SelfNode.Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { + if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { // We're not logged in, so we don't have a profile. // Don't try to load the serve config. b.lastServeConfJSON = mem.B(nil) @@ -6576,7 +6665,15 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK return mk, nk } -func (b *LocalBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { +// PeerHasCap reports whether the peer with the given Tailscale IP addresses +// contains the given capability string, with any value(s). +func (b *localNodeContext) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { + b.mu.Lock() + defer b.mu.Unlock() + return b.peerHasCapLocked(addr, wantCap) +} + +func (b *localNodeContext) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { return b.peerCapsLocked(addr).HasCapability(wantCap) } @@ -6640,6 +6737,19 @@ func peerAPIURL(ip netip.Addr, port uint16) string { return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) } +func (c *localNodeContext) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return c.PeerAPIBase(p) != "" +} + +// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, +// or the empty string if the peer is invalid or doesn't support PeerAPI. +func (c *localNodeContext) PeerAPIBase(p tailcfg.NodeView) string { + c.mu.Lock() + nm := c.netMap + c.mu.Unlock() + return peerAPIBase(nm, p) +} + // peerAPIBase returns the "http://ip:port" URL base to reach peer's peerAPI. // It returns the empty string if the peer doesn't support the peerapi // or there's no matching address family based on the netmap's own addresses. @@ -6766,12 +6876,7 @@ func (b *LocalBackend) SetUDPGROForwarding() error { // DERPMap returns the current DERPMap in use, or nil if not connected. func (b *LocalBackend) DERPMap() *tailcfg.DERPMap { - b.mu.Lock() - defer b.mu.Unlock() - if b.netMap == nil { - return nil - } - return b.netMap.DERPMap + return b.currentNode().DERPMap() } // OfferingExitNode reports whether b is currently offering exit node @@ -6811,7 +6916,7 @@ func (b *LocalBackend) OfferingAppConnector() bool { func (b *LocalBackend) allowExitNodeDNSProxyToServeName(name string) bool { b.mu.Lock() defer b.mu.Unlock() - nm := b.netMap + nm := b.NetMap() if nm == nil { return false } @@ -6882,6 +6987,12 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg return "", false } +func (c *localNodeContext) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID) +} + // wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a // WireGuard-only exit node, if it has resolver addresses. func wireguardExitNodeDNSResolvers(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) ([]*dnstype.Resolver, bool) { @@ -6957,7 +7068,7 @@ func (n keyProvingNoiseRoundTripper) RoundTrip(req *http.Request) (*http.Respons b.mu.Lock() cc := b.ccAuto - if nm := b.netMap; nm != nil { + if nm := b.NetMap(); nm != nil { priv = nm.PrivateKey } b.mu.Unlock() @@ -7089,11 +7200,12 @@ func (b *LocalBackend) handleQuad100Port80Conn(w http.ResponseWriter, r *http.Re defer b.mu.Unlock() io.WriteString(w, "

        Tailscale

        \n") - if b.netMap == nil { + nm := b.currentNode().NetMap() + if nm == nil { io.WriteString(w, "No netmap.\n") return } - addrs := b.netMap.GetAddresses() + addrs := nm.GetAddresses() if addrs.Len() == 0 { io.WriteString(w, "No local addresses.\n") return @@ -7124,7 +7236,7 @@ func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) { // controlplane. checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { b.mu.Lock() - nm := b.netMap + nm := b.NetMap() b.mu.Unlock() if nm == nil { return nil @@ -7299,8 +7411,9 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } + b.currentNodeAtomic.Store(newLocalNodeContext()) b.setNetMapLocked(nil) // Reset netmap. - b.updateFilterLocked(nil, ipn.PrefsView{}) + b.updateFilterLocked(ipn.PrefsView{}) // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { @@ -7663,7 +7776,7 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (response apitype.ExitNodeSuggestionResponse, err error) { // netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). If netMap is nil, then b.netMap is used. if netMap == nil { - netMap = b.netMap + netMap = b.NetMap() } lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode @@ -7988,21 +8101,19 @@ func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { // rules that require a source IP to have a certain node capability. // // TODO(bradfitz): optimize this later if/when it matters. +// TODO(nickkhyl): move this into [localNodeContext] along with [LocalBackend.updateFilterLocked]. func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCapability) bool { if cap == "" { // Shouldn't happen, but just in case. // But the empty cap also shouldn't be found in Node.CapMap. return false } - - b.mu.Lock() - defer b.mu.Unlock() - - nodeID, ok := b.nodeByAddr[srcIP] + cn := b.currentNode() + nodeID, ok := cn.NodeByAddr(srcIP) if !ok { return false } - n, ok := b.peers[nodeID] + n, ok := cn.PeerByID(nodeID) if !ok { return false } diff --git a/ipn/ipnlocal/local_node_context.go b/ipn/ipnlocal/local_node_context.go new file mode 100644 index 000000000..871880893 --- /dev/null +++ b/ipn/ipnlocal/local_node_context.go @@ -0,0 +1,207 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "net/netip" + "sync" + "sync/atomic" + + "go4.org/netipx" + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/util/slicesx" + "tailscale.com/wgengine/filter" +) + +// localNodeContext holds the [LocalBackend]'s context tied to a local node (usually the current one). +// +// Its exported methods are safe for concurrent use, but the struct is not a snapshot of state at a given moment; +// its state can change between calls. For example, asking for the same value (e.g., netmap or prefs) twice +// may return different results. Returned values are immutable and safe for concurrent use. +// +// If both the [LocalBackend]'s internal mutex and the [localNodeContext] mutex must be held at the same time, +// the [LocalBackend] mutex must be acquired first. See the comment on the [LocalBackend] field for more details. +// +// Two pointers to different [localNodeContext] instances represent different local nodes. +// However, there's currently a bug where a new [localNodeContext] might not be created +// during an implicit node switch (see tailscale/corp#28014). + +// In the future, we might want to include at least the following in this struct (in addition to the current fields). +// However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions, +// peer API handlers, etc.). +// - [ipn.State]: when the LocalBackend switches to a different [localNodeContext], it can update the state of the old one. +// - [ipn.LoginProfileView] and [ipn.Prefs]: we should update them when the [profileManager] reports changes to them. +// In the future, [profileManager] (and the corresponding methods of the [LocalBackend]) can be made optional, +// and something else could be used to set them once or update them as needed. +// - [tailcfg.HostinfoView]: it includes certain fields that are tied to the current profile/node/prefs. We should also +// update to build it once instead of mutating it in twelvety different places. +// - [filter.Filter] (normal and jailed, along with the filterHash): the localNodeContext could have a method to (re-)build +// the filter for the current netmap/prefs (see [LocalBackend.updateFilterLocked]), and it needs to track the current +// filters and their hash. +// - Fields related to a requested or required (re-)auth: authURL, authURLTime, authActor, keyExpired, etc. +// - [controlclient.Client]/[*controlclient.Auto]: the current control client. It is ties to a node identity. +// - [tkaState]: it is tied to the current profile / node. +// - Fields related to scheduled node expiration: nmExpiryTimer, numClientStatusCalls, [expiryManager]. +// +// It should not include any fields used by specific features that don't belong in [LocalBackend]. +// Even if they're tied to the local node, instead of moving them here, we should extract the entire feature +// into a separate package and have it install proper hooks. +type localNodeContext struct { + // filterAtomic is a stateful packet filter. Immutable once created, but can be + // replaced with a new one. + filterAtomic atomic.Pointer[filter.Filter] + + // TODO(nickkhyl): maybe use sync.RWMutex? + mu sync.Mutex // protects the following fields + + // NetMap is the most recently set full netmap from the controlclient. + // It can't be mutated in place once set. Because it can't be mutated in place, + // delta updates from the control server don't apply to it. Instead, use + // the peers map to get up-to-date information on the state of peers. + // In general, avoid using the netMap.Peers slice. We'd like it to go away + // as of 2023-09-17. + // TODO(nickkhyl): make it an atomic pointer to avoid the need for a mutex? + netMap *netmap.NetworkMap + + // peers is the set of current peers and their current values after applying + // delta node mutations as they come in (with mu held). The map values can be + // given out to callers, but the map itself can be mutated in place (with mu held) + // and must not escape the [localNodeContext]. + peers map[tailcfg.NodeID]tailcfg.NodeView + + // nodeByAddr maps nodes' own addresses (excluding subnet routes) to node IDs. + // It is mutated in place (with mu held) and must not escape the [localNodeContext]. + nodeByAddr map[netip.Addr]tailcfg.NodeID +} + +func newLocalNodeContext() *localNodeContext { + cn := &localNodeContext{} + // Default filter blocks everything and logs nothing. + noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) + cn.filterAtomic.Store(noneFilter) + return cn +} + +func (c *localNodeContext) Self() tailcfg.NodeView { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return tailcfg.NodeView{} + } + return c.netMap.SelfNode +} + +func (c *localNodeContext) SelfUserID() tailcfg.UserID { + self := c.Self() + if !self.Valid() { + return 0 + } + return self.User() +} + +// SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap. +func (c *localNodeContext) SelfHasCap(wantCap tailcfg.NodeCapability) bool { + return c.SelfHasCapOr(wantCap, false) +} + +// SelfHasCapOr is like [localNodeContext.SelfHasCap], but returns the specified default value +// if the netmap is not available yet. +func (c *localNodeContext) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return def + } + return c.netMap.AllCaps.Contains(wantCap) +} + +func (c *localNodeContext) NetworkProfile() ipn.NetworkProfile { + c.mu.Lock() + defer c.mu.Unlock() + return ipn.NetworkProfile{ + // These are ok to call with nil netMap. + MagicDNSName: c.netMap.MagicDNSSuffix(), + DomainName: c.netMap.DomainName(), + } +} + +// TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]? +func (c *localNodeContext) DERPMap() *tailcfg.DERPMap { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return nil + } + return c.netMap.DERPMap +} + +func (c *localNodeContext) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + nid, ok := c.nodeByAddr[ip] + return nid, ok +} + +func (c *localNodeContext) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return 0, false + } + if self := c.netMap.SelfNode; self.Valid() && self.Key() == k { + return self.ID(), true + } + // TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers. + for _, n := range c.peers { + if n.Key() == k { + return n.ID(), true + } + } + return 0, false +} + +func (c *localNodeContext) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + n, ok := c.peers[id] + return n, ok +} + +func (c *localNodeContext) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { + c.mu.Lock() + nm := c.netMap + c.mu.Unlock() + if nm == nil { + return tailcfg.UserProfileView{}, false + } + u, ok := nm.UserProfiles[id] + return u, ok +} + +// Peers returns all the current peers in an undefined order. +func (c *localNodeContext) Peers() []tailcfg.NodeView { + c.mu.Lock() + defer c.mu.Unlock() + return slicesx.MapValues(c.peers) +} + +// unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs +// in the specified packet filter. +// +// TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here, +// but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter. +// Something like (*localNodeContext).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps? +func (c *localNodeContext) unlockedNodesPermitted(packetFilter []filter.Match) bool { + c.mu.Lock() + defer c.mu.Unlock() + return packetFilterPermitsUnlockedNodes(c.peers, packetFilter) +} + +func (c *localNodeContext) filter() *filter.Filter { + return c.filterAtomic.Load() +} diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3b9e08638..94b5d9522 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -920,15 +920,15 @@ func TestWatchNotificationsCallbacks(t *testing.T) { // tests LocalBackend.updateNetmapDeltaLocked func TestUpdateNetmapDelta(t *testing.T) { b := newTestLocalBackend(t) - if b.updateNetmapDeltaLocked(nil) { + if b.currentNode().UpdateNetmapDelta(nil) { t.Errorf("updateNetmapDeltaLocked() = true, want false with nil netmap") } - b.netMap = &netmap.NetworkMap{} + nm := &netmap.NetworkMap{} for i := range 5 { - b.netMap.Peers = append(b.netMap.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View()) + nm.Peers = append(nm.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View()) } - b.updatePeersFromNetmapLocked(b.netMap) + b.currentNode().SetNetMap(nm) someTime := time.Unix(123, 0) muts, ok := netmap.MutationsFromMapResponse(&tailcfg.MapResponse{ @@ -955,7 +955,7 @@ func TestUpdateNetmapDelta(t *testing.T) { t.Fatal("netmap.MutationsFromMapResponse failed") } - if !b.updateNetmapDeltaLocked(muts) { + if !b.currentNode().UpdateNetmapDelta(muts) { t.Fatalf("updateNetmapDeltaLocked() = false, want true with new netmap") } @@ -978,9 +978,9 @@ func TestUpdateNetmapDelta(t *testing.T) { }, } for _, want := range wants { - gotv, ok := b.peers[want.ID] + gotv, ok := b.currentNode().PeerByID(want.ID) if !ok { - t.Errorf("netmap.Peer %v missing from b.peers", want.ID) + t.Errorf("netmap.Peer %v missing from b.profile.Peers", want.ID) continue } got := gotv.AsStruct() @@ -1398,7 +1398,7 @@ func TestCoveredRouteRangeNoDefault(t *testing.T) { func TestReconfigureAppConnector(t *testing.T) { b := newTestBackend(t) - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) if b.appConnector != nil { t.Fatal("unexpected app connector") } @@ -1411,7 +1411,7 @@ func TestReconfigureAppConnector(t *testing.T) { }, AppConnectorSet: true, }) - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) if b.appConnector == nil { t.Fatal("expected app connector") } @@ -1422,15 +1422,19 @@ func TestReconfigureAppConnector(t *testing.T) { "connectors": ["tag:example"] }` - b.netMap.SelfNode = (&tailcfg.Node{ - Name: "example.ts.net", - Tags: []string{"tag:example"}, - CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ - "tailscale.com/app-connectors": {tailcfg.RawMessage(appCfg)}, - }), - }).View() + nm := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + Tags: []string{"tag:example"}, + CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + "tailscale.com/app-connectors": {tailcfg.RawMessage(appCfg)}, + }), + }).View(), + } - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.currentNode().SetNetMap(nm) + + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) b.appConnector.Wait(context.Background()) want := []string{"example.com"} @@ -1450,7 +1454,7 @@ func TestReconfigureAppConnector(t *testing.T) { }, AppConnectorSet: true, }) - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) if b.appConnector != nil { t.Fatal("expected no app connector") } @@ -1482,7 +1486,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { }); err != nil { t.Fatal(err) } - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) // Smoke check that AdvertiseRoutes doesn't have the test IP. ip := netip.MustParseAddr("1.2.3.4") @@ -1503,7 +1507,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Mimic b.authReconfigure for the app connector bits. b.mu.Lock() - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) b.mu.Unlock() b.readvertiseAppConnectorRoutes() @@ -1819,7 +1823,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) pm.prefs = test.prefs.View() - b.netMap = test.nm + b.currentNode().SetNetMap(test.nm) b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode @@ -1946,8 +1950,7 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := newTestLocalBackend(t) - b.netMap = tt.netmap - b.updatePeersFromNetmapLocked(b.netMap) + b.currentNode().SetNetMap(tt.netmap) b.lastSuggestedExitNode = tt.lastSuggestedExitNode b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, tt.report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) @@ -2065,14 +2068,14 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { }, }, } - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: selfNode.View(), Peers: []tailcfg.NodeView{ peer1, peer2, }, DERPMap: defaultDERPMap, - } + }) b.lastSuggestedExitNode = peer1.StableID() b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) if eid := b.Prefs().ExitNodeID(); eid != peer1.StableID() { @@ -2137,7 +2140,7 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { syspolicy.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - b.netMap = nm + b.currentNode().SetNetMap(nm) b.lastSuggestedExitNode = peer1.StableID() b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) @@ -3068,9 +3071,11 @@ func TestDriveManageShares(t *testing.T) { b.driveSetSharesLocked(tt.existing) } if !tt.disabled { - self := b.netMap.SelfNode.AsStruct() + nm := ptr.To(*b.currentNode().NetMap()) + self := nm.SelfNode.AsStruct() self.CapMap = tailcfg.NodeCapMap{tailcfg.NodeAttrsTaildriveShare: nil} - b.netMap.SelfNode = self.View() + nm.SelfNode = self.View() + b.currentNode().SetNetMap(nm) b.sys.Set(driveimpl.NewFileSystemForRemote(b.logf)) } b.mu.Unlock() @@ -5323,7 +5328,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }}, }) - f := lb.GetFilterForTest() + f := lb.currentNode().GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) if res != filter.Accept { t.Errorf("Check(2.2.2.2, ...) = %s, want %s", res, filter.Accept) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index e1583dab7..36d39a465 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -516,9 +516,10 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { var selfAuthorized bool nodeKeySignature := &tka.NodeKeySignature{} - if b.netMap != nil { - selfAuthorized = b.tka.authority.NodeKeyAuthorized(b.netMap.SelfNode.Key(), b.netMap.SelfNode.KeySignature().AsSlice()) == nil - if err := nodeKeySignature.Unserialize(b.netMap.SelfNode.KeySignature().AsSlice()); err != nil { + nm := b.currentNode().NetMap() + if nm != nil { + selfAuthorized = b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key(), nm.SelfNode.KeySignature().AsSlice()) == nil + if err := nodeKeySignature.Unserialize(nm.SelfNode.KeySignature().AsSlice()); err != nil { b.logf("failed to decode self node key signature: %v", err) } } @@ -539,9 +540,9 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { } var visible []*ipnstate.TKAPeer - if b.netMap != nil { - visible = make([]*ipnstate.TKAPeer, len(b.netMap.Peers)) - for i, p := range b.netMap.Peers { + if nm != nil { + visible = make([]*ipnstate.TKAPeer, len(nm.Peers)) + for i, p := range nm.Peers { s := tkaStateFromPeer(p) visible[i] = &s } @@ -702,12 +703,10 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { id1, id2 := b.tka.authority.StateIDs() stateID := fmt.Sprintf("%d:%d", id1, id2) + cn := b.currentNode() newPrefs := b.pm.CurrentPrefs().AsStruct().Clone() // .Persist should always be initialized here. newPrefs.Persist.DisallowedTKAStateIDs = append(newPrefs.Persist.DisallowedTKAStateIDs, stateID) - if err := b.pm.SetPrefs(newPrefs.View(), ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - }); err != nil { + if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { return fmt.Errorf("saving prefs: %w", err) } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 87437daf8..2b4c07749 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -770,7 +770,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool { // but an app connector explicitly adds 0.0.0.0/32 (and the // IPv6 equivalent) to make this work (see updateFilterLocked // in LocalBackend). - f := b.filterAtomic.Load() + f := b.currentNode().filter() if f == nil { return false } diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 77c442060..975ed38bb 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -162,9 +162,9 @@ func TestHandlePeerAPI(t *testing.T) { lb := &LocalBackend{ logf: e.logBuf.Logf, capFileSharing: tt.capSharing, - netMap: &netmap.NetworkMap{SelfNode: selfNode.View()}, clock: &tstest.Clock{}, } + lb.currentNode().SetNetMap(&netmap.NetworkMap{SelfNode: selfNode.View()}) e.ph = &peerAPIHandler{ isSelf: tt.isSelf, selfNode: selfNode.View(), diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index cc0d219d8..44d63fe54 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -232,7 +232,7 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1 } } - nm := b.netMap + nm := b.NetMap() if nm == nil { b.logf("netMap is nil") return @@ -282,7 +282,7 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string } } - nm := b.netMap + nm := b.NetMap() if nm == nil { return errors.New("netMap is nil") } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 0279ea9be..b9370f877 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -320,7 +320,7 @@ func TestServeConfigServices(t *testing.T) { t.Fatal(err) } - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", CapMap: tailcfg.NodeCapMap{ @@ -334,7 +334,7 @@ func TestServeConfigServices(t *testing.T) { ProfilePicURL: "https://example.com/photo.jpg", }).View(), }, - } + }) tests := []struct { name string @@ -902,7 +902,7 @@ func newTestBackend(t *testing.T) *LocalBackend { pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", }).View(), @@ -913,24 +913,26 @@ func newTestBackend(t *testing.T) *LocalBackend { ProfilePicURL: "https://example.com/photo.jpg", }).View(), }, - } - b.peers = map[tailcfg.NodeID]tailcfg.NodeView{ - 152: (&tailcfg.Node{ - ID: 152, - ComputedName: "some-peer", - User: tailcfg.UserID(1), - }).View(), - 153: (&tailcfg.Node{ - ID: 153, - ComputedName: "some-tagged-peer", - Tags: []string{"tag:server", "tag:test"}, - User: tailcfg.UserID(1), - }).View(), - } - b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{ - netip.MustParseAddr("100.150.151.152"): 152, - netip.MustParseAddr("100.150.151.153"): 153, - } + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 152, + ComputedName: "some-peer", + User: tailcfg.UserID(1), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.152/32"), + }, + }).View(), + (&tailcfg.Node{ + ID: 153, + ComputedName: "some-tagged-peer", + Tags: []string{"tag:server", "tag:test"}, + User: tailcfg.UserID(1), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.153/32"), + }, + }).View(), + }, + }) return b } diff --git a/ipn/ipnlocal/taildrop.go b/ipn/ipnlocal/taildrop.go index 807304f30..17ca40926 100644 --- a/ipn/ipnlocal/taildrop.go +++ b/ipn/ipnlocal/taildrop.go @@ -179,23 +179,32 @@ func (b *LocalBackend) HasCapFileSharing() bool { func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { var ret []*apitype.FileTarget - b.mu.Lock() + b.mu.Lock() // for b.{state,capFileSharing} defer b.mu.Unlock() - nm := b.netMap + cn := b.currentNode() + nm := cn.NetMap() + self := cn.SelfUserID() if b.state != ipn.Running || nm == nil { return nil, errors.New("not connected to the tailnet") } if !b.capFileSharing { return nil, errors.New("file sharing not enabled by Tailscale admin") } - for _, p := range b.peers { - if !b.peerIsTaildropTargetLocked(p) { - continue + peers := cn.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { + if !p.Valid() || p.Hostinfo().OS() == "tvOS" { + return false } - if p.Hostinfo().OS() == "tvOS" { - continue + if self != p.User() { + return false + } + if p.Addresses().Len() != 0 && cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + // Explicitly noted in the netmap ACL caps as a target. + return true } - peerAPI := peerAPIBase(b.netMap, p) + return false + }) + for _, p := range peers { + peerAPI := cn.PeerAPIBase(p) if peerAPI == "" { continue } @@ -214,7 +223,9 @@ func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.Taildro if b.state != ipn.Running { return ipnstate.TaildropTargetIpnStateNotRunning } - if b.netMap == nil { + cn := b.currentNode() + nm := cn.NetMap() + if nm == nil { return ipnstate.TaildropTargetNoNetmapAvailable } if !b.capFileSharing { @@ -228,10 +239,10 @@ func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.Taildro if !p.Valid() { return ipnstate.TaildropTargetNoPeerInfo } - if b.netMap.User() != p.User() { + if nm.User() != p.User() { // Different user must have the explicit file sharing target capability - if p.Addresses().Len() == 0 || - !b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + if p.Addresses().Len() == 0 || !cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + // Explicitly noted in the netmap ACL caps as a target. return ipnstate.TaildropTargetOwnedByOtherUser } } @@ -239,32 +250,12 @@ func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.Taildro if p.Hostinfo().OS() == "tvOS" { return ipnstate.TaildropTargetUnsupportedOS } - if peerAPIBase(b.netMap, p) == "" { + if !cn.PeerHasPeerAPI(p) { return ipnstate.TaildropTargetNoPeerAPI } return ipnstate.TaildropTargetAvailable } -// peerIsTaildropTargetLocked reports whether p is a valid Taildrop file -// recipient from this node according to its ownership and the capabilities in -// the netmap. -// -// b.mu must be locked. -func (b *LocalBackend) peerIsTaildropTargetLocked(p tailcfg.NodeView) bool { - if b.netMap == nil || !p.Valid() { - return false - } - if b.netMap.User() == p.User() { - return true - } - if p.Addresses().Len() > 0 && - b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { - // Explicitly noted in the netmap ACL caps as a target. - return true - } - return false -} - // UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and // sends an ipn.Notify with the full list of outgoingFiles. func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) { diff --git a/ipn/ipnlocal/taildrop_test.go b/ipn/ipnlocal/taildrop_test.go index 9871d5e33..a5166e8a3 100644 --- a/ipn/ipnlocal/taildrop_test.go +++ b/ipn/ipnlocal/taildrop_test.go @@ -13,7 +13,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest/deptest" "tailscale.com/types/netmap" - "tailscale.com/util/mak" ) func TestFileTargets(t *testing.T) { @@ -23,7 +22,7 @@ func TestFileTargets(t *testing.T) { t.Errorf("before connect: got %q; want %q", got, want) } - b.netMap = new(netmap.NetworkMap) + b.currentNode().SetNetMap(new(netmap.NetworkMap)) _, err = b.FileTargets() if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { t.Errorf("non-running netmap: got %q; want %q", got, want) @@ -44,16 +43,15 @@ func TestFileTargets(t *testing.T) { t.Fatalf("unexpected %d peers", len(got)) } - var peerMap map[tailcfg.NodeID]tailcfg.NodeView - mak.NonNil(&peerMap) - var nodeID tailcfg.NodeID - nodeID = 1234 - peer := &tailcfg.Node{ - ID: 1234, - Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), + nm := &netmap.NetworkMap{ + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 1234, + Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), + }).View(), + }, } - peerMap[nodeID] = peer.View() - b.peers = peerMap + b.currentNode().SetNetMap(nm) got, err = b.FileTargets() if err != nil { t.Fatal(err) diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 219a4c535..18145d1bb 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -116,11 +116,12 @@ func (b *LocalBackend) handleWebClientConn(c net.Conn) error { // for each of the local device's Tailscale IP addresses. This is needed to properly // route local traffic when using kernel networking mode. func (b *LocalBackend) updateWebClientListenersLocked() { - if b.netMap == nil { + nm := b.currentNode().NetMap() + if nm == nil { return } - addrs := b.netMap.GetAddresses() + addrs := nm.GetAddresses() for _, pfx := range addrs.All() { addrPort := netip.AddrPortFrom(pfx.Addr(), webClientPort) if _, ok := b.webClientListeners[addrPort]; ok { From f468919f95717870aecfe836e00c0569b67d5015 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 25 Apr 2025 13:02:15 -0700 Subject: [PATCH 0795/1708] util/mak: delete long-deprecated, unused, pre-generics NonNil func Updates #5590 (which deprecated it, 2.5 years ago) Change-Id: I137e82855ee33d91e5639b909f7ca64e237ed6ba Signed-off-by: Brad Fitzpatrick --- util/mak/mak.go | 34 ---------------------------------- util/mak/mak_test.go | 29 ----------------------------- 2 files changed, 63 deletions(-) diff --git a/util/mak/mak.go b/util/mak/mak.go index b421fb0ed..fbdb40b0a 100644 --- a/util/mak/mak.go +++ b/util/mak/mak.go @@ -5,11 +5,6 @@ // things, notably to maps, but also slices. package mak -import ( - "fmt" - "reflect" -) - // Set populates an entry in a map, making the map if necessary. // // That is, it assigns (*m)[k] = v, making *m if it was nil. @@ -20,35 +15,6 @@ func Set[K comparable, V any, T ~map[K]V](m *T, k K, v V) { (*m)[k] = v } -// NonNil takes a pointer to a Go data structure -// (currently only a slice or a map) and makes sure it's non-nil for -// JSON serialization. (In particular, JavaScript clients usually want -// the field to be defined after they decode the JSON.) -// -// Deprecated: use NonNilSliceForJSON or NonNilMapForJSON instead. -func NonNil(ptr any) { - if ptr == nil { - panic("nil interface") - } - rv := reflect.ValueOf(ptr) - if rv.Kind() != reflect.Ptr { - panic(fmt.Sprintf("kind %v, not Ptr", rv.Kind())) - } - if rv.Pointer() == 0 { - panic("nil pointer") - } - rv = rv.Elem() - if rv.Pointer() != 0 { - return - } - switch rv.Type().Kind() { - case reflect.Slice: - rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) - case reflect.Map: - rv.Set(reflect.MakeMap(rv.Type())) - } -} - // NonNilSliceForJSON makes sure that *slicePtr is non-nil so it will // won't be omitted from JSON serialization and possibly confuse JavaScript // clients expecting it to be present. diff --git a/util/mak/mak_test.go b/util/mak/mak_test.go index 4de499a9d..e47839a3c 100644 --- a/util/mak/mak_test.go +++ b/util/mak/mak_test.go @@ -40,35 +40,6 @@ func TestSet(t *testing.T) { }) } -func TestNonNil(t *testing.T) { - var s []string - NonNil(&s) - if len(s) != 0 { - t.Errorf("slice len = %d; want 0", len(s)) - } - if s == nil { - t.Error("slice still nil") - } - - s = append(s, "foo") - NonNil(&s) - if len(s) != 1 { - t.Errorf("len = %d; want 1", len(s)) - } - if s[0] != "foo" { - t.Errorf("value = %q; want foo", s) - } - - var m map[string]string - NonNil(&m) - if len(m) != 0 { - t.Errorf("map len = %d; want 0", len(s)) - } - if m == nil { - t.Error("map still nil") - } -} - func TestNonNilMapForJSON(t *testing.T) { type M map[string]int var m M From 0cfd643d9515fcf52f3e47ca3f4559e39943495c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 25 Apr 2025 14:53:48 -0500 Subject: [PATCH 0796/1708] ipn/ipnlocal: update profileManager to use SwitchToProfile when switching to the initial profile This further minimizes the number of places where the profile manager updates the current profile and prefs. We also document a scenario where an implicit profile switch can occur. We should be able to address it after (partially?) inverting the dependency between LocalBackend and profileManager, so that profileManager notifies LocalBackend of profile changes instead of the other way around. Updates tailscale/corp#28014 Updates #12614 Signed-off-by: Nick Khyl --- ipn/ipnlocal/profiles.go | 93 +++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 53 deletions(-) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index eb01da705..b75e3aeb5 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -40,8 +40,8 @@ type profileManager struct { currentUserID ipn.WindowsUserID knownProfiles map[ipn.ProfileID]ipn.LoginProfileView // always non-nil - currentProfile ipn.LoginProfileView // always Valid. - prefs ipn.PrefsView // always Valid. + currentProfile ipn.LoginProfileView // always Valid (once [newProfileManager] returns). + prefs ipn.PrefsView // always Valid (once [newProfileManager] returns). // extHost is the bridge between [profileManager] and the registered [ipnext.Extension]s. // It may be nil in tests. A nil pointer is a valid, no-op host. @@ -111,6 +111,9 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) { // // It returns the current profile and whether the call resulted in a profile change, // or an error if the specified profile does not exist or its prefs could not be loaded. +// +// It may be called during [profileManager] initialization before [newProfileManager] returns +// and must check whether pm.currentProfile is Valid before using it. func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn.LoginProfileView, changed bool, err error) { prefs := defaultPrefs switch { @@ -118,7 +121,7 @@ func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn. // Create a new profile that is not associated with any user. profile = pm.NewProfileForUser("") case profile == pm.currentProfile, - profile.ID() != "" && profile.ID() == pm.currentProfile.ID(), + profile.ID() != "" && pm.currentProfile.Valid() && profile.ID() == pm.currentProfile.ID(), profile.ID() == "" && profile.Equals(pm.currentProfile) && prefs.Equals(pm.prefs): // The profile is already the current profile; no need to switch. // @@ -176,7 +179,7 @@ func (pm *profileManager) DefaultUserProfile(uid ipn.WindowsUserID) ipn.LoginPro if err == ipn.ErrStateNotExist || len(b) == 0 { if runtime.GOOS == "windows" { pm.dlogf("DefaultUserProfile: windows: migrating from legacy preferences") - profile, err := pm.migrateFromLegacyPrefs(uid, false) + profile, err := pm.migrateFromLegacyPrefs(uid) if err == nil { return profile } @@ -328,6 +331,23 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) delete(pm.knownProfiles, p.ID()) } } + // TODO(nickkhyl): Revisit how we handle implicit switching to a different profile, + // which occurs when prefsIn represents a node/user different from that of the + // currentProfile. It happens when a login (either reauth or user-initiated login) + // is completed with a different node/user identity than the one currently in use. + // + // Currently, we overwrite the existing profile prefs with the ones from prefsIn, + // where prefsIn is the previous profile's prefs with an updated Persist, LoggedOut, + // WantRunning and possibly other fields. This may not be the desired behavior. + // + // Additionally, LocalBackend doesn't treat it as a proper profile switch, meaning that + // [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain + // node/profile-specific state may not be reset as expected. + // + // However, LocalBackend notifies [ipnext.Extension]s about the profile change, + // so features migrated from LocalBackend to external packages should not be affected. + // + // See tailscale/corp#28014. pm.currentProfile = cp cp, err := pm.setProfilePrefs(nil, prefsIn, np) if err != nil { @@ -746,28 +766,6 @@ func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) ipn.LoginProf return (&ipn.LoginProfile{LocalUserID: uid}).View() } -// newProfileWithPrefs creates a new profile with the specified prefs and assigns -// the specified uid as the profile owner. If switchNow is true, it switches to the -// newly created profile immediately. It returns the newly created profile on success, -// or an error on failure. -func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (ipn.LoginProfileView, error) { - metricNewProfile.Add(1) - - profile, err := pm.setProfilePrefs(&ipn.LoginProfile{LocalUserID: uid}, prefs, ipn.NetworkProfile{}) - if err != nil { - return ipn.LoginProfileView{}, err - } - if switchNow { - pm.currentProfile = profile - pm.prefs = prefs.AsStruct().View() - pm.updateHealth() - if err := pm.setProfileAsUserDefault(profile); err != nil { - return ipn.LoginProfileView{}, err - } - } - return profile, nil -} - // defaultPrefs is the default prefs for a new profile. This initializes before // even this package's init() so do not rely on other parts of the system being // fully initialized here (for example, syspolicy will not be available on @@ -857,27 +855,9 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt health: ht, } + var initialProfile ipn.LoginProfileView if stateKey != "" { - for _, v := range knownProfiles { - if v.Key() == stateKey { - pm.currentProfile = v - } - } - if !pm.currentProfile.Valid() { - if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok { - pm.currentUserID = ipn.WindowsUserID(suf) - } - pm.SwitchToNewProfile() - } else { - pm.currentUserID = pm.currentProfile.LocalUserID() - } - prefs, err := pm.loadSavedPrefs(stateKey) - if err != nil { - return nil, err - } - if err := pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefs); err != nil { - return nil, err - } + initialProfile = pm.findProfileByKey("", stateKey) // Most platform behavior is controlled by the goos parameter, however // some behavior is implied by build tag and fails when run on Windows, // so we explicitly avoid that behavior when running on Windows. @@ -888,17 +868,24 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt } else if len(knownProfiles) == 0 && goos != "windows" && runtime.GOOS != "windows" { // No known profiles, try a migration. pm.dlogf("no known profiles; trying to migrate from legacy prefs") - if _, err := pm.migrateFromLegacyPrefs(pm.currentUserID, true); err != nil { - return nil, err + if initialProfile, err = pm.migrateFromLegacyPrefs(pm.currentUserID); err != nil { + } - } else { - pm.SwitchToNewProfile() } - + if !initialProfile.Valid() { + var initialUserID ipn.WindowsUserID + if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok { + initialUserID = ipn.WindowsUserID(suf) + } + initialProfile = pm.NewProfileForUser(initialUserID) + } + if _, _, err := pm.SwitchToProfile(initialProfile); err != nil { + return nil, err + } return pm, nil } -func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (ipn.LoginProfileView, error) { +func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID) (ipn.LoginProfileView, error) { metricMigration.Add(1) sentinel, prefs, err := pm.loadLegacyPrefs(uid) if err != nil { @@ -906,7 +893,7 @@ func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNo return ipn.LoginProfileView{}, fmt.Errorf("load legacy prefs: %w", err) } pm.dlogf("loaded legacy preferences; sentinel=%q", sentinel) - profile, err := pm.newProfileWithPrefs(uid, prefs, switchNow) + profile, err := pm.setProfilePrefs(&ipn.LoginProfile{LocalUserID: uid}, prefs, ipn.NetworkProfile{}) if err != nil { metricMigrationError.Add(1) return ipn.LoginProfileView{}, fmt.Errorf("migrating _daemon profile: %w", err) From 66371f392a785173ff02a27e92d8d9d6590acdd2 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 25 Apr 2025 14:53:55 -0500 Subject: [PATCH 0797/1708] feature,ipn/ipnlocal: add profileManager.StateChangeHook We update profileManager to allow registering a single state (profile+prefs) change hook. This is to invert the dependency between the profileManager and the LocalBackend, so that instead of LocalBackend asking profileManager for the state, we can have profileManager call LocalBackend when the state changes. We also update feature.Hook with a new (*feature.Hook).GetOk method to avoid calling both IsSet and Get. Updates tailscale/corp#28014 Updates #12614 Signed-off-by: Nick Khyl --- feature/feature.go | 6 + ipn/ipnlocal/local.go | 15 - ipn/ipnlocal/profiles.go | 54 +++- ipn/ipnlocal/profiles_test.go | 533 ++++++++++++++++++++++++++++++++++ 4 files changed, 589 insertions(+), 19 deletions(-) diff --git a/feature/feature.go b/feature/feature.go index 6c8cd7eae..5976d7f5a 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -53,6 +53,12 @@ func (h *Hook[Func]) Get() Func { return h.f } +// GetOk returns the hook function and true if it has been set, +// otherwise its zero value and false. +func (h *Hook[Func]) GetOk() (f Func, ok bool) { + return h.f, h.ok +} + // Hooks is a slice of funcs. // // As opposed to a single Hook, this is meant to be used when diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 308d03197..95fe22641 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1675,7 +1675,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Perform all mutations of prefs based on the netmap here. if prefsChanged { - profile := b.pm.CurrentProfile() // Prefs will be written out if stale; this is not safe unless locked or cloned. if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: curNetMap.MagicDNSSuffix(), @@ -1683,20 +1682,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - // Updating profile prefs may have resulted in a change to the current [ipn.LoginProfile], - // either because the user completed a login, which populated and persisted their profile - // for the first time, or because of an [ipn.NetworkProfile] or [tailcfg.UserProfile] change. - // Theoretically, a completed login could also result in a switch to a different existing - // profile representing a different node (see tailscale/tailscale#8816). - // - // Let's check if the current profile has changed, and invoke all registered - // [ipnext.ProfileStateChangeCallback] if necessary. - if cp := b.pm.CurrentProfile(); *cp.AsStruct() != *profile.AsStruct() { - // If the profile ID was empty before SetPrefs, it's a new profile - // and the user has just completed a login for the first time. - sameNode := profile.ID() == "" || profile.ID() == cp.ID() - b.extHost.NotifyProfileChange(profile, prefs.View(), sameNode) - } } // initTKALocked is dependent on CurrentProfile.ID, which is initialized diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index b75e3aeb5..5c1b17038 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -43,6 +43,15 @@ type profileManager struct { currentProfile ipn.LoginProfileView // always Valid (once [newProfileManager] returns). prefs ipn.PrefsView // always Valid (once [newProfileManager] returns). + // StateChangeHook is an optional hook that is called when the current profile or prefs change, + // such as due to a profile switch or a change in the profile's preferences. + // It is typically set by the [LocalBackend] to invert the dependency between + // the [profileManager] and the [LocalBackend], so that instead of [LocalBackend] + // asking [profileManager] for the state, we can have [profileManager] call + // [LocalBackend] when the state changes. See also: + // https://github.com/tailscale/tailscale/pull/15791#discussion_r2060838160 + StateChangeHook ipnext.ProfileStateChangeCallback + // extHost is the bridge between [profileManager] and the registered [ipnext.Extension]s. // It may be nil in tests. A nil pointer is a valid, no-op host. extHost *ExtensionHost @@ -166,6 +175,16 @@ func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn. // But if updating the default profile fails, we should log it. pm.logf("failed to set %s (%s) as the default profile: %v", profile.Name(), profile.ID(), err) } + + if f := pm.StateChangeHook; f != nil { + f(pm.currentProfile, pm.prefs, false) + } + // Do not call pm.extHost.NotifyProfileChange here; it is invoked in + // [LocalBackend.resetForProfileChangeLockedOnEntry] after the netmap reset. + // TODO(nickkhyl): Consider moving it here (or into the stateChangeCb handler + // in [LocalBackend]) once the profile/node state, including the netmap, + // is actually tied to the current profile. + return profile, true, nil } @@ -344,11 +363,19 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) // [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain // node/profile-specific state may not be reset as expected. // - // However, LocalBackend notifies [ipnext.Extension]s about the profile change, + // However, [profileManager] notifies [ipnext.Extension]s about the profile change, // so features migrated from LocalBackend to external packages should not be affected. // // See tailscale/corp#28014. - pm.currentProfile = cp + if !cp.Equals(pm.currentProfile) { + const sameNode = false // implicit profile switch + pm.currentProfile = cp + pm.prefs = prefsIn.AsStruct().View() + if f := pm.StateChangeHook; f != nil { + f(cp, prefsIn, sameNode) + } + pm.extHost.NotifyProfileChange(cp, prefsIn, sameNode) + } cp, err := pm.setProfilePrefs(nil, prefsIn, np) if err != nil { return err @@ -410,7 +437,20 @@ func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref // Update the current profile view to reflect the changes // if the specified profile is the current profile. if isCurrentProfile { - pm.currentProfile = lp.View() + // Always set pm.currentProfile to the new profile view for pointer equality. + // We check it further down the call stack. + lp := lp.View() + sameProfileInfo := lp.Equals(pm.currentProfile) + pm.currentProfile = lp + if !sameProfileInfo { + // But only invoke the callbacks if the profile info has actually changed. + const sameNode = true // just an info update; still the same node + pm.prefs = prefsIn.AsStruct().View() // suppress further callbacks for this change + if f := pm.StateChangeHook; f != nil { + f(lp, prefsIn, sameNode) + } + pm.extHost.NotifyProfileChange(lp, prefsIn, sameNode) + } } // An empty profile.ID indicates that the node info is not available yet, @@ -470,7 +510,13 @@ func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileVie // That said, regardless of the cleanup, we might want // to keep the profileManager responsible for invoking // profile- and prefs-related callbacks. - pm.extHost.NotifyProfilePrefsChanged(pm.currentProfile, oldPrefs, clonedPrefs) + + if !clonedPrefs.Equals(oldPrefs) { + if f := pm.StateChangeHook; f != nil { + f(pm.currentProfile, clonedPrefs, true) + } + pm.extHost.NotifyProfilePrefsChanged(pm.currentProfile, oldPrefs, clonedPrefs) + } pm.updateHealth() } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 534951fb1..52b095be1 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -7,6 +7,7 @@ import ( "fmt" "os/user" "strconv" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -609,3 +610,535 @@ func TestDefaultPrefs(t *testing.T) { t.Errorf("defaultPrefs is %s, want %s; defaultPrefs should only modify WantRunning and LoggedOut, all other defaults should be in ipn.NewPrefs.", p2.Pretty(), p1.Pretty()) } } + +// mutPrefsFn is a function that mutates the prefs. +// Deserialization pre‑populates prefs with default (non‑zero) values. +// After saving prefs and reading them back, we may not get exactly what we set. +// For this reason, tests apply changes through a helper that mutates +// [ipn.NewPrefs] instead of hard‑coding expected values in each case. +type mutPrefsFn func(*ipn.Prefs) + +type profileState struct { + *ipn.LoginProfile + mutPrefs mutPrefsFn +} + +func (s *profileState) prefs() ipn.PrefsView { + prefs := ipn.NewPrefs() // apply changes to the default prefs + s.mutPrefs(prefs) + return prefs.View() +} + +type profileStateChange struct { + *ipn.LoginProfile + mutPrefs mutPrefsFn + sameNode bool +} + +func wantProfileChange(state profileState) profileStateChange { + return profileStateChange{ + LoginProfile: state.LoginProfile, + mutPrefs: state.mutPrefs, + sameNode: false, + } +} + +func wantPrefsChange(state profileState) profileStateChange { + return profileStateChange{ + LoginProfile: state.LoginProfile, + mutPrefs: state.mutPrefs, + sameNode: true, + } +} + +func makeDefaultPrefs(p *ipn.Prefs) { *p = *defaultPrefs.AsStruct() } + +func makeKnownProfileState(id int, nameSuffix string, uid ipn.WindowsUserID, mutPrefs mutPrefsFn) profileState { + lowerNameSuffix := strings.ToLower(nameSuffix) + nid := "node-" + tailcfg.StableNodeID(lowerNameSuffix) + up := tailcfg.UserProfile{ + ID: tailcfg.UserID(id), + LoginName: fmt.Sprintf("user-%s@example.com", lowerNameSuffix), + DisplayName: "User " + nameSuffix, + } + return profileState{ + LoginProfile: &ipn.LoginProfile{ + LocalUserID: uid, + Name: up.LoginName, + ID: ipn.ProfileID(fmt.Sprintf("%04X", id)), + Key: "profile-" + ipn.StateKey(nameSuffix), + NodeID: nid, + UserProfile: up, + }, + mutPrefs: func(p *ipn.Prefs) { + p.Hostname = "Hostname-" + nameSuffix + if mutPrefs != nil { + mutPrefs(p) // apply any additional changes + } + p.Persist = &persist.Persist{NodeID: nid, UserProfile: up} + }, + } +} + +func TestProfileStateChangeCallback(t *testing.T) { + t.Parallel() + + // A few well-known profiles to use in tests. + emptyProfile := profileState{ + LoginProfile: &ipn.LoginProfile{}, + mutPrefs: makeDefaultPrefs, + } + profile0000 := profileState{ + LoginProfile: &ipn.LoginProfile{ID: "0000", Key: "profile-0000"}, + mutPrefs: makeDefaultPrefs, + } + profileA := makeKnownProfileState(0xA, "A", "", nil) + profileB := makeKnownProfileState(0xB, "B", "", nil) + profileC := makeKnownProfileState(0xC, "C", "", nil) + + aliceUserID := ipn.WindowsUserID("S-1-5-21-1-2-3-4") + aliceEmptyProfile := profileState{ + LoginProfile: &ipn.LoginProfile{LocalUserID: aliceUserID}, + mutPrefs: makeDefaultPrefs, + } + bobUserID := ipn.WindowsUserID("S-1-5-21-3-4-5-6") + bobEmptyProfile := profileState{ + LoginProfile: &ipn.LoginProfile{LocalUserID: bobUserID}, + mutPrefs: makeDefaultPrefs, + } + bobKnownProfile := makeKnownProfileState(0xB0B, "Bob", bobUserID, nil) + + tests := []struct { + name string + initial *profileState // if non-nil, this is the initial profile and prefs to start wit + knownProfiles []profileState // known profiles we can switch to + action func(*profileManager) // action to take on the profile manager + wantChanges []profileStateChange // expected state changes + }{ + { + name: "no-changes", + action: func(*profileManager) { + // do nothing + }, + wantChanges: nil, + }, + { + name: "no-initial/new-profile", + action: func(pm *profileManager) { + // The profile manager is new and started with a new empty profile. + // This should not trigger a state change callback. + pm.SwitchToNewProfile() + }, + wantChanges: nil, + }, + { + name: "no-initial/new-profile-for-user", + action: func(pm *profileManager) { + // But switching to a new profile for a specific user should trigger + // a state change callback. + pm.SwitchToNewProfileForUser(aliceUserID) + }, + wantChanges: []profileStateChange{ + // We want a new empty profile (owned by the specified user) + // and the default prefs. + wantProfileChange(aliceEmptyProfile), + }, + }, + { + name: "with-initial/new-profile", + initial: &profile0000, + action: func(pm *profileManager) { + // And so does switching to a new profile when the initial profile + // is non-empty. + pm.SwitchToNewProfile() + }, + wantChanges: []profileStateChange{ + // We want a new empty profile and the default prefs. + wantProfileChange(emptyProfile), + }, + }, + { + name: "with-initial/new-profile/twice", + initial: &profile0000, + action: func(pm *profileManager) { + // If we switch to a new profile twice, we should only get one state change. + pm.SwitchToNewProfile() + pm.SwitchToNewProfile() + }, + wantChanges: []profileStateChange{ + // We want a new empty profile and the default prefs. + wantProfileChange(emptyProfile), + }, + }, + { + name: "with-initial/new-profile-for-user/twice", + initial: &profile0000, + action: func(pm *profileManager) { + // Unless we switch to a new profile for a specific user, + // in which case we should get a state change twice. + pm.SwitchToNewProfileForUser(aliceUserID) + pm.SwitchToNewProfileForUser(aliceUserID) // no change here + pm.SwitchToNewProfileForUser(bobUserID) + }, + wantChanges: []profileStateChange{ + // Both profiles are empty, but they are owned by different users. + wantProfileChange(aliceEmptyProfile), + wantProfileChange(bobEmptyProfile), + }, + }, + { + name: "with-initial/new-profile/twice/with-prefs-change", + initial: &profile0000, + action: func(pm *profileManager) { + // Or unless we switch to a new profile, change the prefs, + // then switch to a new profile again. Since the current + // profile is not empty after the prefs change, we should + // get state changes for all three actions. + pm.SwitchToNewProfile() + p := pm.CurrentPrefs().AsStruct() + p.WantRunning = true + pm.SetPrefs(p.View(), ipn.NetworkProfile{}) + pm.SwitchToNewProfile() + }, + wantChanges: []profileStateChange{ + wantProfileChange(emptyProfile), // new empty profile + wantPrefsChange(profileState{ // prefs change, same profile + LoginProfile: &ipn.LoginProfile{}, + mutPrefs: func(p *ipn.Prefs) { + *p = *defaultPrefs.AsStruct() + p.WantRunning = true + }, + }), + wantProfileChange(emptyProfile), // new empty profile again + }, + }, + { + name: "switch-to-profile/by-id", + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // Switching to a known profile by ID should trigger a state change callback. + pm.SwitchToProfileByID(profileB.ID) + }, + wantChanges: []profileStateChange{ + wantProfileChange(profileB), + }, + }, + { + name: "switch-to-profile/by-id/non-existent", + knownProfiles: []profileState{profileA, profileC}, // no profileB + action: func(pm *profileManager) { + // Switching to a non-existent profile should fail and not trigger a state change callback. + pm.SwitchToProfileByID(profileB.ID) + }, + wantChanges: []profileStateChange{}, + }, + { + name: "switch-to-profile/by-id/twice-same", + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // But only for the first switch. + // The second switch to the same profile should not trigger a state change callback. + pm.SwitchToProfileByID(profileB.ID) + pm.SwitchToProfileByID(profileB.ID) + }, + wantChanges: []profileStateChange{ + wantProfileChange(profileB), + }, + }, + { + name: "switch-to-profile/by-id/many", + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // Same idea, but with multiple switches. + pm.SwitchToProfileByID(profileB.ID) // switch to Profile-B + pm.SwitchToProfileByID(profileB.ID) // then to Profile-B again (no change) + pm.SwitchToProfileByID(profileC.ID) // then to Profile-C (change) + pm.SwitchToProfileByID(profileA.ID) // then to Profile-A (change) + pm.SwitchToProfileByID(profileB.ID) // then to Profile-B (change) + }, + wantChanges: []profileStateChange{ + wantProfileChange(profileB), + wantProfileChange(profileC), + wantProfileChange(profileA), + wantProfileChange(profileB), + }, + }, + { + name: "switch-to-profile/by-view", + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // Switching to a known profile by an [ipn.LoginProfileView] + // should also trigger a state change callback. + pm.SwitchToProfile(profileB.View()) + }, + wantChanges: []profileStateChange{ + wantProfileChange(profileB), + }, + }, + { + name: "switch-to-profile/by-view/empty", + initial: &profile0000, + action: func(pm *profileManager) { + // SwitchToProfile supports switching to an empty profile. + emptyProfile := &ipn.LoginProfile{} + pm.SwitchToProfile(emptyProfile.View()) + }, + wantChanges: []profileStateChange{ + wantProfileChange(emptyProfile), + }, + }, + { + name: "switch-to-profile/by-view/non-existent", + knownProfiles: []profileState{profileA, profileC}, + action: func(pm *profileManager) { + // Switching to a an unknown profile by an [ipn.LoginProfileView] + // should fail and not trigger a state change callback. + pm.SwitchToProfile(profileB.View()) + }, + wantChanges: []profileStateChange{}, + }, + { + name: "switch-to-profile/by-view/empty-for-user", + initial: &profile0000, + action: func(pm *profileManager) { + // And switching to an empty profile for a specific user also works. + pm.SwitchToProfile(bobEmptyProfile.View()) + }, + wantChanges: []profileStateChange{ + wantProfileChange(bobEmptyProfile), + }, + }, + { + name: "switch-to-profile/by-view/invalid", + initial: &profile0000, + action: func(pm *profileManager) { + // Switching to an invalid profile should create and switch + // to a new empty profile. + pm.SwitchToProfile(ipn.LoginProfileView{}) + }, + wantChanges: []profileStateChange{ + wantProfileChange(emptyProfile), + }, + }, + { + name: "delete-profile/current", + initial: &profileA, // profileA is the current profile + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // Deleting the current profile should switch to a new empty profile. + pm.DeleteProfile(profileA.ID) + }, + wantChanges: []profileStateChange{ + wantProfileChange(emptyProfile), + }, + }, + { + name: "delete-profile/current-with-user", + initial: &bobKnownProfile, + knownProfiles: []profileState{profileA, profileB, profileC, bobKnownProfile}, + action: func(pm *profileManager) { + // Similarly, deleting the current profile for a specific user should switch + // to a new empty profile for that user (at least while the "current user" + // is still a thing on Windows). + pm.DeleteProfile(bobKnownProfile.ID) + }, + wantChanges: []profileStateChange{ + wantProfileChange(bobEmptyProfile), + }, + }, + { + name: "delete-profile/non-current", + initial: &profileA, // profileA is the current profile + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // But deleting a non-current profile should not trigger a state change callback. + pm.DeleteProfile(profileB.ID) + }, + wantChanges: []profileStateChange{}, + }, + { + name: "set-prefs/new-profile", + initial: &emptyProfile, // the current profile is empty + action: func(pm *profileManager) { + // The current profile is new and empty, but we can still set p. + // This should trigger a state change callback. + p := pm.CurrentPrefs().AsStruct() + p.WantRunning = true + p.Hostname = "New-Hostname" + pm.SetPrefs(p.View(), ipn.NetworkProfile{}) + }, + wantChanges: []profileStateChange{ + // Still an empty profile, but with new prefs. + wantPrefsChange(profileState{ + LoginProfile: emptyProfile.LoginProfile, + mutPrefs: func(p *ipn.Prefs) { + *p = *emptyProfile.prefs().AsStruct() + p.WantRunning = true + p.Hostname = "New-Hostname" + }, + }), + }, + }, + { + name: "set-prefs/current-profile", + initial: &profileA, // profileA is the current profile + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + p := pm.CurrentPrefs().AsStruct() + p.WantRunning = true + p.Hostname = "New-Hostname" + pm.SetPrefs(p.View(), ipn.NetworkProfile{}) + }, + wantChanges: []profileStateChange{ + wantPrefsChange(profileState{ + LoginProfile: profileA.LoginProfile, // same profile + mutPrefs: func(p *ipn.Prefs) { // but with new prefs + *p = *profileA.prefs().AsStruct() + p.WantRunning = true + p.Hostname = "New-Hostname" + }, + }), + }, + }, + { + name: "set-prefs/current-profile/profile-name", + initial: &profileA, // profileA is the current profile + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + p := pm.CurrentPrefs().AsStruct() + p.ProfileName = "This is User A" + pm.SetPrefs(p.View(), ipn.NetworkProfile{}) + }, + wantChanges: []profileStateChange{ + // Still the same profile, but with a new profile name + // populated from the prefs. The prefs are also updated. + wantPrefsChange(profileState{ + LoginProfile: func() *ipn.LoginProfile { + p := profileA.Clone() + p.Name = "This is User A" + return p + }(), + mutPrefs: func(p *ipn.Prefs) { + *p = *profileA.prefs().AsStruct() + p.ProfileName = "This is User A" + }, + }), + }, + }, + { + name: "set-prefs/implicit-switch/from-new", + initial: &emptyProfile, // a new, empty profile + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // The user attempted to add a new profile but actually logged in as the same + // node/user as profileB. When [LocalBackend.SetControlClientStatus] calls + // [profileManager.SetPrefs] with the [persist.Persist] for profileB, we + // implicitly switch to that profile instead of creating a duplicate for the + // same node/user. + // + // TODO(nickkhyl): currently, [LocalBackend.SetControlClientStatus] uses the p + // of the current profile, not those of the profile we switch to. This is all wrong + // and should be fixed. But for now, we just test that the state change callback + // is called with the new profile and p. + p := pm.CurrentPrefs().AsStruct() + p.Persist = profileB.prefs().Persist().AsStruct() + p.WantRunning = true + p.LoggedOut = false + pm.SetPrefs(p.View(), ipn.NetworkProfile{}) + }, + wantChanges: []profileStateChange{ + // Calling [profileManager.SetPrefs] like this is effectively a profile switch + // rather than a prefs change. + wantProfileChange(profileState{ + LoginProfile: profileB.LoginProfile, + mutPrefs: func(p *ipn.Prefs) { + *p = *emptyProfile.prefs().AsStruct() + p.Persist = profileB.prefs().Persist().AsStruct() + p.WantRunning = true + p.LoggedOut = false + }, + }), + }, + }, + { + name: "set-prefs/implicit-switch/from-other", + initial: &profileA, // profileA is the current profile + knownProfiles: []profileState{profileA, profileB, profileC}, + action: func(pm *profileManager) { + // Same idea, but the current profile is profileA rather than a new empty profile. + // Note: this is all wrong. See the comment above and [profileManager.SetPrefs]. + p := pm.CurrentPrefs().AsStruct() + p.Persist = profileB.prefs().Persist().AsStruct() + p.WantRunning = true + p.LoggedOut = false + pm.SetPrefs(p.View(), ipn.NetworkProfile{}) + }, + wantChanges: []profileStateChange{ + wantProfileChange(profileState{ + LoginProfile: profileB.LoginProfile, + mutPrefs: func(p *ipn.Prefs) { + *p = *profileA.prefs().AsStruct() + p.Persist = profileB.prefs().Persist().AsStruct() + p.WantRunning = true + p.LoggedOut = false + }, + }), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + store := new(mem.Store) + pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + if err != nil { + t.Fatalf("newProfileManagerWithGOOS: %v", err) + } + for _, p := range tt.knownProfiles { + pm.writePrefsToStore(p.Key, p.prefs()) + pm.knownProfiles[p.ID] = p.View() + } + if err := pm.writeKnownProfiles(); err != nil { + t.Fatalf("writeKnownProfiles: %v", err) + } + + if tt.initial != nil { + pm.currentUserID = tt.initial.LocalUserID + pm.currentProfile = tt.initial.View() + pm.prefs = tt.initial.prefs() + } + + type stateChange struct { + Profile *ipn.LoginProfile + Prefs *ipn.Prefs + SameNode bool + } + wantChanges := make([]stateChange, 0, len(tt.wantChanges)) + for _, w := range tt.wantChanges { + wantPrefs := ipn.NewPrefs() + w.mutPrefs(wantPrefs) // apply changes to the default prefs + wantChanges = append(wantChanges, stateChange{ + Profile: w.LoginProfile, + Prefs: wantPrefs, + SameNode: w.sameNode, + }) + } + + gotChanges := make([]stateChange, 0, len(tt.wantChanges)) + pm.StateChangeHook = func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + gotChanges = append(gotChanges, stateChange{ + Profile: profile.AsStruct(), + Prefs: prefs.AsStruct(), + SameNode: sameNode, + }) + } + + tt.action(pm) + + if diff := cmp.Diff(wantChanges, gotChanges, defaultCmpOpts...); diff != "" { + t.Errorf("StateChange callbacks: (-want +got): %v", diff) + } + }) + } +} From 189e03e741acfcd3476343bf01a9fd8c02f3760d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 25 Apr 2025 14:56:18 -0700 Subject: [PATCH 0798/1708] net/portmapper: fix test flakes from logging after test done Fixes #15794 Change-Id: Ic22aa99acb10fdb6dc5f0b6482e722e48237703c Signed-off-by: Brad Fitzpatrick --- net/portmapper/igd_test.go | 7 +++++-- net/portmapper/portmapper_test.go | 6 +++--- net/portmapper/select_test.go | 2 +- net/portmapper/upnp_test.go | 12 ++++++------ tstest/log.go | 3 ++- 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 319115896..3ef7989a3 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -18,8 +18,10 @@ import ( "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/syncs" + "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + "tailscale.com/util/testenv" ) // TestIGD is an IGD (Internet Gateway Device) for testing. It supports fake @@ -64,7 +66,8 @@ type igdCounters struct { invalidPCPMapPkt int32 } -func NewTestIGD(logf logger.Logf, t TestIGDOptions) (*TestIGD, error) { +func NewTestIGD(tb testenv.TB, t TestIGDOptions) (*TestIGD, error) { + logf := tstest.WhileTestRunningLogger(tb) d := &TestIGD{ doPMP: t.PMP, doPCP: t.PCP, @@ -265,7 +268,7 @@ func (d *TestIGD) handlePCPQuery(pkt []byte, src netip.AddrPort) { func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { var c *Client c = NewClient(Config{ - Logf: t.Logf, + Logf: tstest.WhileTestRunningLogger(t), NetMon: netmon.NewStatic(), ControlKnobs: new(controlknobs.Knobs), EventBus: bus, diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index 32302e461..515a0c28c 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -61,7 +61,7 @@ func TestClientProbeThenMap(t *testing.T) { } func TestProbeIntegration(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{PMP: true, PCP: true, UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{PMP: true, PCP: true, UPnP: true}) if err != nil { t.Fatal(err) } @@ -95,7 +95,7 @@ func TestProbeIntegration(t *testing.T) { } func TestPCPIntegration(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{PMP: false, PCP: true, UPnP: false}) + igd, err := NewTestIGD(t, TestIGDOptions{PMP: false, PCP: true, UPnP: false}) if err != nil { t.Fatal(err) } @@ -137,7 +137,7 @@ func TestGetUPnPErrorsMetric(t *testing.T) { } func TestUpdateEvent(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{PCP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{PCP: true}) if err != nil { t.Fatalf("Create test gateway: %v", err) } diff --git a/net/portmapper/select_test.go b/net/portmapper/select_test.go index 6c210d70a..af2e35cbf 100644 --- a/net/portmapper/select_test.go +++ b/net/portmapper/select_test.go @@ -28,7 +28,7 @@ func TestSelectBestService(t *testing.T) { } // Run a fake IGD server to respond to UPnP requests. - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index 1e1278abc..c07ec0208 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -533,7 +533,7 @@ func TestGetUPnPClient(t *testing.T) { } func TestGetUPnPPortMapping(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } @@ -672,7 +672,7 @@ func TestGetUPnPPortMapping_LeaseDuration(t *testing.T) { "DeletePortMapping": "", // Do nothing for test } - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } @@ -722,7 +722,7 @@ func TestGetUPnPPortMapping_LeaseDuration(t *testing.T) { // // See https://github.com/tailscale/tailscale/issues/10911 func TestGetUPnPPortMapping_NoValidServices(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } @@ -753,7 +753,7 @@ func TestGetUPnPPortMapping_NoValidServices(t *testing.T) { // Tests the legacy behaviour with the pre-UPnP standard portmapping service. func TestGetUPnPPortMapping_Legacy(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } @@ -796,7 +796,7 @@ func TestGetUPnPPortMapping_Legacy(t *testing.T) { } func TestGetUPnPPortMappingNoResponses(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } @@ -912,7 +912,7 @@ func TestGetUPnPPortMapping_Invalid(t *testing.T) { "127.0.0.1", } { t.Run(responseAddr, func(t *testing.T) { - igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + igd, err := NewTestIGD(t, TestIGDOptions{UPnP: true}) if err != nil { t.Fatal(err) } diff --git a/tstest/log.go b/tstest/log.go index cb67c609a..d081c819d 100644 --- a/tstest/log.go +++ b/tstest/log.go @@ -13,6 +13,7 @@ import ( "go4.org/mem" "tailscale.com/types/logger" + "tailscale.com/util/testenv" ) type testLogWriter struct { @@ -149,7 +150,7 @@ func (ml *MemLogger) String() string { // WhileTestRunningLogger returns a logger.Logf that logs to t.Logf until the // test finishes, at which point it no longer logs anything. -func WhileTestRunningLogger(t testing.TB) logger.Logf { +func WhileTestRunningLogger(t testenv.TB) logger.Logf { var ( mu sync.RWMutex done bool From b95e8bf4a156016c5ac5b619575e65e4f6d52c18 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 28 Apr 2025 09:04:02 -0700 Subject: [PATCH 0799/1708] tsweb/varz: export GC CPU fraction gauge We were missing this metric, but it can be important for some workloads. Varz memstats output allocation cost reduced from 30 allocs per invocation to 1 alloc per invocation. Updates tailscale/corp#28033 Signed-off-by: James Tucker Co-authored-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 2 +- cmd/stund/depaware.txt | 3 +- cmd/tailscale/depaware.txt | 2 +- tsweb/varz/varz.go | 63 +++++++++++++++++++++++++------- tsweb/varz/varz_test.go | 75 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 128 insertions(+), 17 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 98965c6ef..f22b4873f 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -199,7 +199,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - W golang.org/x/exp/constraints from tailscale.com/util/winutil + golang.org/x/exp/constraints from tailscale.com/util/winutil+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 6168e1582..da7680394 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -65,7 +65,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/ipproto from tailscale.com/tailcfg tailscale.com/types/key from tailscale.com/tailcfg tailscale.com/types/lazy from tailscale.com/version+ - tailscale.com/types/logger from tailscale.com/tsweb + tailscale.com/types/logger from tailscale.com/tsweb+ tailscale.com/types/opt from tailscale.com/envknob+ tailscale.com/types/ptr from tailscale.com/tailcfg+ tailscale.com/types/result from tailscale.com/util/lineiter @@ -95,6 +95,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http golang.org/x/net/http/httpproxy from net/http diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7f66e7700..85bf64e4a 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -211,7 +211,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - W golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index 952ebc231..c6d66fbe2 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -5,6 +5,7 @@ package varz import ( + "bufio" "cmp" "expvar" "fmt" @@ -13,13 +14,16 @@ import ( "reflect" "runtime" "sort" + "strconv" "strings" "sync" "time" "unicode" "unicode/utf8" + "golang.org/x/exp/constraints" "tailscale.com/metrics" + "tailscale.com/types/logger" "tailscale.com/version" ) @@ -316,21 +320,52 @@ type PrometheusMetricsReflectRooter interface { var expvarDo = expvar.Do // pulled out for tests -func writeMemstats(w io.Writer, ms *runtime.MemStats) { - out := func(name, typ string, v uint64, help string) { - if help != "" { - fmt.Fprintf(w, "# HELP memstats_%s %s\n", name, help) - } - fmt.Fprintf(w, "# TYPE memstats_%s %s\nmemstats_%s %v\n", name, typ, name, v) +func writeMemstat[V constraints.Integer | constraints.Float](bw *bufio.Writer, typ, name string, v V, help string) { + if help != "" { + bw.WriteString("# HELP memstats_") + bw.WriteString(name) + bw.WriteString(" ") + bw.WriteString(help) + bw.WriteByte('\n') + } + bw.WriteString("# TYPE memstats_") + bw.WriteString(name) + bw.WriteString(" ") + bw.WriteString(typ) + bw.WriteByte('\n') + bw.WriteString("memstats_") + bw.WriteString(name) + bw.WriteByte(' ') + rt := reflect.TypeOf(v) + switch { + case rt == reflect.TypeFor[int]() || + rt == reflect.TypeFor[uint]() || + rt == reflect.TypeFor[int8]() || + rt == reflect.TypeFor[uint8]() || + rt == reflect.TypeFor[int16]() || + rt == reflect.TypeFor[uint16]() || + rt == reflect.TypeFor[int32]() || + rt == reflect.TypeFor[uint32]() || + rt == reflect.TypeFor[int64]() || + rt == reflect.TypeFor[uint64]() || + rt == reflect.TypeFor[uintptr](): + bw.Write(strconv.AppendInt(bw.AvailableBuffer(), int64(v), 10)) + case rt == reflect.TypeFor[float32]() || rt == reflect.TypeFor[float64](): + bw.Write(strconv.AppendFloat(bw.AvailableBuffer(), float64(v), 'f', -1, 64)) } - g := func(name string, v uint64, help string) { out(name, "gauge", v, help) } - c := func(name string, v uint64, help string) { out(name, "counter", v, help) } - g("heap_alloc", ms.HeapAlloc, "current bytes of allocated heap objects (up/down smoothly)") - c("total_alloc", ms.TotalAlloc, "cumulative bytes allocated for heap objects") - g("sys", ms.Sys, "total bytes of memory obtained from the OS") - c("mallocs", ms.Mallocs, "cumulative count of heap objects allocated") - c("frees", ms.Frees, "cumulative count of heap objects freed") - c("num_gc", uint64(ms.NumGC), "number of completed GC cycles") + bw.WriteByte('\n') +} + +func writeMemstats(w io.Writer, ms *runtime.MemStats) { + fmt.Fprintf(w, "%v", logger.ArgWriter(func(bw *bufio.Writer) { + writeMemstat(bw, "gauge", "heap_alloc", ms.HeapAlloc, "current bytes of allocated heap objects (up/down smoothly)") + writeMemstat(bw, "counter", "total_alloc", ms.TotalAlloc, "cumulative bytes allocated for heap objects") + writeMemstat(bw, "gauge", "sys", ms.Sys, "total bytes of memory obtained from the OS") + writeMemstat(bw, "counter", "mallocs", ms.Mallocs, "cumulative count of heap objects allocated") + writeMemstat(bw, "counter", "frees", ms.Frees, "cumulative count of heap objects freed") + writeMemstat(bw, "counter", "num_gc", ms.NumGC, "number of completed GC cycles") + writeMemstat(bw, "gauge", "gc_cpu_fraction", ms.GCCPUFraction, "fraction of CPU time used by GC") + })) } // sortedStructField is metadata about a struct field used both for sorting once diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index 7e094b0e7..f7a9d8801 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -4,14 +4,17 @@ package varz import ( + "bytes" "expvar" "net/http/httptest" "reflect" + "runtime" "strings" "testing" "tailscale.com/metrics" "tailscale.com/tstest" + "tailscale.com/util/racebuild" "tailscale.com/version" ) @@ -418,3 +421,75 @@ func TestVarzHandlerSorting(t *testing.T) { } } } + +func TestWriteMemestats(t *testing.T) { + memstats := &runtime.MemStats{ + Alloc: 1, + TotalAlloc: 2, + Sys: 3, + Lookups: 4, + Mallocs: 5, + Frees: 6, + HeapAlloc: 7, + HeapSys: 8, + HeapIdle: 9, + HeapInuse: 10, + HeapReleased: 11, + HeapObjects: 12, + StackInuse: 13, + StackSys: 14, + MSpanInuse: 15, + MSpanSys: 16, + MCacheInuse: 17, + MCacheSys: 18, + BuckHashSys: 19, + GCSys: 20, + OtherSys: 21, + NextGC: 22, + LastGC: 23, + PauseTotalNs: 24, + // PauseNs: [256]int64{}, + NumGC: 26, + NumForcedGC: 27, + GCCPUFraction: 0.28, + } + + var buf bytes.Buffer + writeMemstats(&buf, memstats) + lines := strings.Split(buf.String(), "\n") + + checkFor := func(name, typ, value string) { + var foundType, foundValue bool + for _, line := range lines { + if line == "memstats_"+name+" "+value { + foundValue = true + } + if line == "# TYPE memstats_"+name+" "+typ { + foundType = true + } + if foundValue && foundType { + return + } + } + t.Errorf("memstats_%s foundType=%v foundValue=%v", name, foundType, foundValue) + } + + t.Logf("memstats:\n %s", buf.String()) + + checkFor("heap_alloc", "gauge", "7") + checkFor("total_alloc", "counter", "2") + checkFor("sys", "gauge", "3") + checkFor("mallocs", "counter", "5") + checkFor("frees", "counter", "6") + checkFor("num_gc", "counter", "26") + checkFor("gc_cpu_fraction", "gauge", "0.28") + + if !racebuild.On { + if allocs := testing.AllocsPerRun(1000, func() { + buf.Reset() + writeMemstats(&buf, memstats) + }); allocs != 1 { + t.Errorf("allocs = %v; want max %v", allocs, 1) + } + } +} From 51b17483ffde13855caaa286726539b3975eb149 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 28 Apr 2025 11:36:37 -0700 Subject: [PATCH 0800/1708] types/logger: release ArgWriter destination after use Spotted after Brad showed me this utility in #15806. Updates #cleanup Signed-off-by: James Tucker --- types/logger/logger.go | 1 + 1 file changed, 1 insertion(+) diff --git a/types/logger/logger.go b/types/logger/logger.go index aeced352e..6c4edf633 100644 --- a/types/logger/logger.go +++ b/types/logger/logger.go @@ -323,6 +323,7 @@ func (fn ArgWriter) Format(f fmt.State, _ rune) { bw.Reset(f) fn(bw) bw.Flush() + bw.Reset(io.Discard) argBufioPool.Put(bw) } From 61635f8670df36a0f8205cda920db168b2b11ab6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 28 Apr 2025 15:55:49 -0700 Subject: [PATCH 0801/1708] wgengine/magicsock: support Geneve-encap'd Disco transmission (#15811) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- wgengine/magicsock/magicsock.go | 47 +++++++++++++++++++++++++++++---- 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 0c48acddf..5f4f0bd8c 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1111,7 +1111,7 @@ func (de *endpoint) sendDiscoPing(ep netip.AddrPort, discoKey key.DiscoPublic, t size = min(size, MaxDiscoPingSize) padding := max(size-discoPingSize, 0) - sent, _ := de.c.sendDiscoMessage(ep, de.publicKey, discoKey, &disco.Ping{ + sent, _ := de.c.sendDiscoMessage(ep, nil, de.publicKey, discoKey, &disco.Ping{ TxID: [12]byte(txid), NodeKey: de.c.publicKeyAtomic.Load(), Padding: padding, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c2404dd0b..31bf66b2b 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1602,23 +1602,60 @@ var debugIPv4DiscoPingPenalty = envknob.RegisterDuration("TS_DISCO_PONG_IPV4_DEL // // If dst is a DERP IP:port, then dstKey must be non-zero. // +// If geneveVNI is non-nil, then the [disco.Message] will be preceded by a +// Geneve header with the supplied VNI set. +// // The dstKey should only be non-zero if the dstDisco key // unambiguously maps to exactly one peer. -func (c *Conn) sendDiscoMessage(dst netip.AddrPort, dstKey key.NodePublic, dstDisco key.DiscoPublic, m disco.Message, logLevel discoLogLevel) (sent bool, err error) { +func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey key.NodePublic, dstDisco key.DiscoPublic, m disco.Message, logLevel discoLogLevel) (sent bool, err error) { isDERP := dst.Addr() == tailcfg.DerpMagicIPAddr if _, isPong := m.(*disco.Pong); isPong && !isDERP && dst.Addr().Is4() { time.Sleep(debugIPv4DiscoPingPenalty()) } + isRelayHandshakeMsg := false + switch m.(type) { + case *disco.BindUDPRelayEndpoint, *disco.BindUDPRelayEndpointAnswer: + isRelayHandshakeMsg = true + } + c.mu.Lock() if c.closed { c.mu.Unlock() return false, errConnClosed } pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. + if geneveVNI != nil { + gh := packet.GeneveHeader{ + Version: 0, + Protocol: packet.GeneveProtocolDisco, + VNI: *geneveVNI, + Control: isRelayHandshakeMsg, + } + pkt = append(pkt, make([]byte, packet.GeneveFixedHeaderLength)...) + err := gh.Encode(pkt) + if err != nil { + return false, err + } + } pkt = append(pkt, disco.Magic...) pkt = c.discoPublic.AppendTo(pkt) - di := c.discoInfoLocked(dstDisco) + var di *discoInfo + if !isRelayHandshakeMsg { + di = c.discoInfoLocked(dstDisco) + } else { + // c.discoInfoLocked() caches [*discoInfo] for dstDisco. It assumes that + // dstDisco is a known Tailscale peer, and will be cleaned around + // network map changes. In the case of a relay handshake message, + // dstDisco belongs to a relay server with a disco key that is + // discovered at endpoint allocation time or [disco.CallMeMaybeVia] + // reception time. There is no clear ending to its lifetime, so we + // can't cache with the same strategy. Instead, generate the shared + // key on the fly for now. + di = &discoInfo{ + sharedKey: c.discoPrivate.Shared(dstDisco), + } + } c.mu.Unlock() if isDERP { @@ -1943,7 +1980,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, di *discoInf ipDst := src discoDest := di.discoKey - go c.sendDiscoMessage(ipDst, dstKey, discoDest, &disco.Pong{ + go c.sendDiscoMessage(ipDst, nil, dstKey, discoDest, &disco.Pong{ TxID: dm.TxID, Src: src, }, discoVerboseLog) @@ -1988,12 +2025,12 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { for _, ep := range c.lastEndpoints { eps = append(eps, ep.Addr) } - go de.c.sendDiscoMessage(derpAddr, de.publicKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) + go de.c.sendDiscoMessage(derpAddr, nil, de.publicKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) if debugSendCallMeUnknownPeer() { // Send a callMeMaybe packet to a non-existent peer unknownKey := key.NewNode().Public() c.logf("magicsock: sending CallMeMaybe to unknown peer per TS_DEBUG_SEND_CALLME_UNKNOWN_PEER") - go de.c.sendDiscoMessage(derpAddr, unknownKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) + go de.c.sendDiscoMessage(derpAddr, nil, unknownKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) } } From 81420f8944641eb316777e40eccff6d7fcd4ca66 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 28 Apr 2025 15:29:31 -0700 Subject: [PATCH 0802/1708] tstest/integration: move code from integration_test.go to integration.go So it can be exported & used by other packages in future changes. Updates #15812 Change-Id: I319000989ebc294e29c92be7f44a0e11ae6f7761 Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration.go | 604 +++++++++++++++++++++++++ tstest/integration/integration_test.go | 601 +----------------------- 2 files changed, 605 insertions(+), 600 deletions(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 36a92759f..2761f807d 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -9,19 +9,24 @@ package integration import ( "bytes" + "context" "crypto/tls" "encoding/json" + "flag" "fmt" "io" "log" "net" "net/http" "net/http/httptest" + "net/netip" "os" "os/exec" "path" "path/filepath" + "regexp" "runtime" + "strconv" "strings" "sync" "testing" @@ -30,16 +35,35 @@ import ( "go4.org/mem" "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" + "tailscale.com/ipn/store" "tailscale.com/net/stun/stuntest" + "tailscale.com/safesocket" + "tailscale.com/syncs" "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/nettype" + "tailscale.com/util/rands" "tailscale.com/util/zstdframe" "tailscale.com/version" ) +var ( + verboseTailscaled = flag.Bool("verbose-tailscaled", false, "verbose tailscaled logging") + verboseTailscale = flag.Bool("verbose-tailscale", false, "verbose tailscale CLI logging") +) + +// MainError is an error that's set if an error conditions happens outside of a +// context where a testing.TB is available. The caller can check it in its TestMain +// as a last ditch place to report errors. +var MainError syncs.AtomicValue[error] + // CleanupBinaries cleans up any resources created by calls to BinaryDir, TailscaleBinary, or TailscaledBinary. // It should be called from TestMain after all tests have completed. func CleanupBinaries() { @@ -361,3 +385,583 @@ func (lc *LogCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { } w.WriteHeader(200) // must have no content, but not a 204 } + +// testEnv contains the test environment (set of servers) used by one +// or more nodes. +type testEnv struct { + t testing.TB + tunMode bool + cli string + daemon string + loopbackPort *int + + LogCatcher *LogCatcher + LogCatcherServer *httptest.Server + + Control *testcontrol.Server + ControlServer *httptest.Server + + TrafficTrap *trafficTrap + TrafficTrapServer *httptest.Server +} + +// controlURL returns e.ControlServer.URL, panicking if it's the empty string, +// which it should never be in tests. +func (e *testEnv) controlURL() string { + s := e.ControlServer.URL + if s == "" { + panic("control server not set") + } + return s +} + +type testEnvOpt interface { + modifyTestEnv(*testEnv) +} + +type configureControl func(*testcontrol.Server) + +func (f configureControl) modifyTestEnv(te *testEnv) { + f(te.Control) +} + +// newTestEnv starts a bunch of services and returns a new test environment. +// newTestEnv arranges for the environment's resources to be cleaned up on exit. +func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv { + if runtime.GOOS == "windows" { + t.Skip("not tested/working on Windows yet") + } + derpMap := RunDERPAndSTUN(t, logger.Discard, "127.0.0.1") + logc := new(LogCatcher) + control := &testcontrol.Server{ + DERPMap: derpMap, + } + control.HTTPTestServer = httptest.NewUnstartedServer(control) + trafficTrap := new(trafficTrap) + e := &testEnv{ + t: t, + cli: TailscaleBinary(t), + daemon: TailscaledBinary(t), + LogCatcher: logc, + LogCatcherServer: httptest.NewServer(logc), + Control: control, + ControlServer: control.HTTPTestServer, + TrafficTrap: trafficTrap, + TrafficTrapServer: httptest.NewServer(trafficTrap), + } + for _, o := range opts { + o.modifyTestEnv(e) + } + control.HTTPTestServer.Start() + t.Cleanup(func() { + // Shut down e. + if err := e.TrafficTrap.Err(); err != nil { + e.t.Errorf("traffic trap: %v", err) + e.t.Logf("logs: %s", e.LogCatcher.logsString()) + } + e.LogCatcherServer.Close() + e.TrafficTrapServer.Close() + e.ControlServer.Close() + }) + t.Logf("control URL: %v", e.controlURL()) + return e +} + +// testNode is a machine with a tailscale & tailscaled. +// Currently, the test is simplistic and user==node==machine. +// That may grow complexity later to test more. +type testNode struct { + env *testEnv + tailscaledParser *nodeOutputParser + + dir string // temp dir for sock & state + configFile string // or empty for none + sockFile string + stateFile string + upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + + mu sync.Mutex + onLogLine []func([]byte) +} + +// newTestNode allocates a temp directory for a new test node. +// The node is not started automatically. +func newTestNode(t *testing.T, env *testEnv) *testNode { + dir := t.TempDir() + sockFile := filepath.Join(dir, "tailscale.sock") + if len(sockFile) >= 104 { + // Maximum length for a unix socket on darwin. Try something else. + sockFile = filepath.Join(os.TempDir(), rands.HexString(8)+".sock") + t.Cleanup(func() { os.Remove(sockFile) }) + } + n := &testNode{ + env: env, + dir: dir, + sockFile: sockFile, + stateFile: filepath.Join(dir, "tailscale.state"), + } + + // Look for a data race. Once we see the start marker, start logging the rest. + var sawRace bool + var sawPanic bool + n.addLogLineHook(func(line []byte) { + lineB := mem.B(line) + if mem.Contains(lineB, mem.S("WARNING: DATA RACE")) { + sawRace = true + } + if mem.HasPrefix(lineB, mem.S("panic: ")) { + sawPanic = true + } + if sawRace || sawPanic { + t.Logf("%s", line) + } + }) + + return n +} + +func (n *testNode) diskPrefs() *ipn.Prefs { + t := n.env.t + t.Helper() + if _, err := os.ReadFile(n.stateFile); err != nil { + t.Fatalf("reading prefs: %v", err) + } + fs, err := store.NewFileStore(nil, n.stateFile) + if err != nil { + t.Fatalf("reading prefs, NewFileStore: %v", err) + } + p, err := ipnlocal.ReadStartupPrefsForTest(t.Logf, fs) + if err != nil { + t.Fatalf("reading prefs, ReadDiskPrefsForTest: %v", err) + } + return p.AsStruct() +} + +// AwaitResponding waits for n's tailscaled to be up enough to be +// responding, but doesn't wait for any particular state. +func (n *testNode) AwaitResponding() { + t := n.env.t + t.Helper() + n.AwaitListening() + + st := n.MustStatus() + t.Logf("Status: %s", st.BackendState) + + if err := tstest.WaitFor(20*time.Second, func() error { + const sub = `Program starting: ` + if !n.env.LogCatcher.logsContains(mem.S(sub)) { + return fmt.Errorf("log catcher didn't see %#q; got %s", sub, n.env.LogCatcher.logsString()) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// addLogLineHook registers a hook f to be called on each tailscaled +// log line output. +func (n *testNode) addLogLineHook(f func([]byte)) { + n.mu.Lock() + defer n.mu.Unlock() + n.onLogLine = append(n.onLogLine, f) +} + +// socks5AddrChan returns a channel that receives the address (e.g. "localhost:23874") +// of the node's SOCKS5 listener, once started. +func (n *testNode) socks5AddrChan() <-chan string { + ch := make(chan string, 1) + n.addLogLineHook(func(line []byte) { + const sub = "SOCKS5 listening on " + i := mem.Index(mem.B(line), mem.S(sub)) + if i == -1 { + return + } + addr := strings.TrimSpace(string(line)[i+len(sub):]) + select { + case ch <- addr: + default: + } + }) + return ch +} + +func (n *testNode) AwaitSocksAddr(ch <-chan string) string { + t := n.env.t + t.Helper() + timer := time.NewTimer(10 * time.Second) + defer timer.Stop() + select { + case v := <-ch: + return v + case <-timer.C: + t.Fatal("timeout waiting for node to log its SOCK5 listening address") + panic("unreachable") + } +} + +// nodeOutputParser parses stderr of tailscaled processes, calling the +// per-line callbacks previously registered via +// testNode.addLogLineHook. +type nodeOutputParser struct { + allBuf bytes.Buffer + pendLineBuf bytes.Buffer + n *testNode +} + +func (op *nodeOutputParser) Write(p []byte) (n int, err error) { + tn := op.n + tn.mu.Lock() + defer tn.mu.Unlock() + + op.allBuf.Write(p) + n, err = op.pendLineBuf.Write(p) + op.parseLinesLocked() + return +} + +func (op *nodeOutputParser) parseLinesLocked() { + n := op.n + buf := op.pendLineBuf.Bytes() + for len(buf) > 0 { + nl := bytes.IndexByte(buf, '\n') + if nl == -1 { + break + } + line := buf[:nl+1] + buf = buf[nl+1:] + + for _, f := range n.onLogLine { + f(line) + } + } + if len(buf) == 0 { + op.pendLineBuf.Reset() + } else { + io.CopyN(io.Discard, &op.pendLineBuf, int64(op.pendLineBuf.Len()-len(buf))) + } +} + +type Daemon struct { + Process *os.Process +} + +func (d *Daemon) MustCleanShutdown(t testing.TB) { + d.Process.Signal(os.Interrupt) + ps, err := d.Process.Wait() + if err != nil { + t.Fatalf("tailscaled Wait: %v", err) + } + if ps.ExitCode() != 0 { + t.Errorf("tailscaled ExitCode = %d; want 0", ps.ExitCode()) + } +} + +// StartDaemon starts the node's tailscaled, failing if it fails to start. +// StartDaemon ensures that the process will exit when the test completes. +func (n *testNode) StartDaemon() *Daemon { + return n.StartDaemonAsIPNGOOS(runtime.GOOS) +} + +func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { + t := n.env.t + cmd := exec.Command(n.env.daemon) + cmd.Args = append(cmd.Args, + "--state="+n.stateFile, + "--socket="+n.sockFile, + "--socks5-server=localhost:0", + ) + if *verboseTailscaled { + cmd.Args = append(cmd.Args, "-verbose=2") + } + if !n.env.tunMode { + cmd.Args = append(cmd.Args, + "--tun=userspace-networking", + ) + } + if n.configFile != "" { + cmd.Args = append(cmd.Args, "--config="+n.configFile) + } + cmd.Env = append(os.Environ(), + "TS_CONTROL_IS_PLAINTEXT_HTTP=1", + "TS_DEBUG_PERMIT_HTTP_C2N=1", + "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, + "HTTP_PROXY="+n.env.TrafficTrapServer.URL, + "HTTPS_PROXY="+n.env.TrafficTrapServer.URL, + "TS_DEBUG_FAKE_GOOS="+ipnGOOS, + "TS_LOGS_DIR="+t.TempDir(), + "TS_NETCHECK_GENERATE_204_URL="+n.env.ControlServer.URL+"/generate_204", + "TS_ASSUME_NETWORK_UP_FOR_TEST=1", // don't pause control client in airplane mode (no wifi, etc) + "TS_PANIC_IF_HIT_MAIN_CONTROL=1", + "TS_DISABLE_PORTMAPPER=1", // shouldn't be needed; test is all localhost + "TS_DEBUG_LOG_RATE=all", + ) + if n.env.loopbackPort != nil { + cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) + } + if version.IsRace() { + cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") + } + n.tailscaledParser = &nodeOutputParser{n: n} + cmd.Stderr = n.tailscaledParser + if *verboseTailscaled { + cmd.Stdout = os.Stdout + cmd.Stderr = io.MultiWriter(cmd.Stderr, os.Stderr) + } + if runtime.GOOS != "windows" { + pr, pw, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { pw.Close() }) + cmd.ExtraFiles = append(cmd.ExtraFiles, pr) + cmd.Env = append(cmd.Env, "TS_PARENT_DEATH_FD=3") + } + if err := cmd.Start(); err != nil { + t.Fatalf("starting tailscaled: %v", err) + } + t.Cleanup(func() { cmd.Process.Kill() }) + return &Daemon{ + Process: cmd.Process, + } +} + +func (n *testNode) MustUp(extraArgs ...string) { + t := n.env.t + t.Helper() + args := []string{ + "up", + "--login-server=" + n.env.controlURL(), + "--reset", + } + args = append(args, extraArgs...) + cmd := n.Tailscale(args...) + t.Logf("Running %v ...", cmd) + cmd.Stdout = nil // in case --verbose-tailscale was set + cmd.Stderr = nil // in case --verbose-tailscale was set + if b, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("up: %v, %v", string(b), err) + } +} + +func (n *testNode) MustDown() { + t := n.env.t + t.Logf("Running down ...") + if err := n.Tailscale("down", "--accept-risk=all").Run(); err != nil { + t.Fatalf("down: %v", err) + } +} + +func (n *testNode) MustLogOut() { + t := n.env.t + t.Logf("Running logout ...") + if err := n.Tailscale("logout").Run(); err != nil { + t.Fatalf("logout: %v", err) + } +} + +func (n *testNode) Ping(otherNode *testNode) error { + t := n.env.t + ip := otherNode.AwaitIP4().String() + t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP4()) + return n.Tailscale("ping", ip).Run() +} + +// AwaitListening waits for the tailscaled to be serving local clients +// over its localhost IPC mechanism. (Unix socket, etc) +func (n *testNode) AwaitListening() { + t := n.env.t + if err := tstest.WaitFor(20*time.Second, func() (err error) { + c, err := safesocket.ConnectContext(context.Background(), n.sockFile) + if err == nil { + c.Close() + } + return err + }); err != nil { + t.Fatal(err) + } +} + +func (n *testNode) AwaitIPs() []netip.Addr { + t := n.env.t + t.Helper() + var addrs []netip.Addr + if err := tstest.WaitFor(20*time.Second, func() error { + cmd := n.Tailscale("ip") + cmd.Stdout = nil // in case --verbose-tailscale was set + cmd.Stderr = nil // in case --verbose-tailscale was set + out, err := cmd.Output() + if err != nil { + return err + } + ips := string(out) + ipslice := strings.Fields(ips) + addrs = make([]netip.Addr, len(ipslice)) + + for i, ip := range ipslice { + netIP, err := netip.ParseAddr(ip) + if err != nil { + t.Fatal(err) + } + addrs[i] = netIP + } + return nil + }); err != nil { + t.Fatalf("awaiting an IP address: %v", err) + } + if len(addrs) == 0 { + t.Fatalf("returned IP address was blank") + } + return addrs +} + +// AwaitIP4 returns the IPv4 address of n. +func (n *testNode) AwaitIP4() netip.Addr { + t := n.env.t + t.Helper() + ips := n.AwaitIPs() + return ips[0] +} + +// AwaitIP6 returns the IPv6 address of n. +func (n *testNode) AwaitIP6() netip.Addr { + t := n.env.t + t.Helper() + ips := n.AwaitIPs() + return ips[1] +} + +// AwaitRunning waits for n to reach the IPN state "Running". +func (n *testNode) AwaitRunning() { + t := n.env.t + t.Helper() + n.AwaitBackendState("Running") +} + +func (n *testNode) AwaitBackendState(state string) { + t := n.env.t + t.Helper() + if err := tstest.WaitFor(20*time.Second, func() error { + st, err := n.Status() + if err != nil { + return err + } + if st.BackendState != state { + return fmt.Errorf("in state %q; want %q", st.BackendState, state) + } + return nil + }); err != nil { + t.Fatalf("failure/timeout waiting for transition to Running status: %v", err) + } +} + +// AwaitNeedsLogin waits for n to reach the IPN state "NeedsLogin". +func (n *testNode) AwaitNeedsLogin() { + t := n.env.t + t.Helper() + if err := tstest.WaitFor(20*time.Second, func() error { + st, err := n.Status() + if err != nil { + return err + } + if st.BackendState != "NeedsLogin" { + return fmt.Errorf("in state %q", st.BackendState) + } + return nil + }); err != nil { + t.Fatalf("failure/timeout waiting for transition to NeedsLogin status: %v", err) + } +} + +func (n *testNode) TailscaleForOutput(arg ...string) *exec.Cmd { + cmd := n.Tailscale(arg...) + cmd.Stdout = nil + cmd.Stderr = nil + return cmd +} + +// Tailscale returns a command that runs the tailscale CLI with the provided arguments. +// It does not start the process. +func (n *testNode) Tailscale(arg ...string) *exec.Cmd { + cmd := exec.Command(n.env.cli) + cmd.Args = append(cmd.Args, "--socket="+n.sockFile) + cmd.Args = append(cmd.Args, arg...) + cmd.Dir = n.dir + cmd.Env = append(os.Environ(), + "TS_DEBUG_UP_FLAG_GOOS="+n.upFlagGOOS, + "TS_LOGS_DIR="+n.env.t.TempDir(), + ) + if *verboseTailscale { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + return cmd +} + +func (n *testNode) Status() (*ipnstate.Status, error) { + cmd := n.Tailscale("status", "--json") + cmd.Stdout = nil // in case --verbose-tailscale was set + cmd.Stderr = nil // in case --verbose-tailscale was set + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("running tailscale status: %v, %s", err, out) + } + st := new(ipnstate.Status) + if err := json.Unmarshal(out, st); err != nil { + return nil, fmt.Errorf("decoding tailscale status JSON: %w\njson:\n%s", err, out) + } + return st, nil +} + +func (n *testNode) MustStatus() *ipnstate.Status { + tb := n.env.t + tb.Helper() + st, err := n.Status() + if err != nil { + tb.Fatal(err) + } + return st +} + +// trafficTrap is an HTTP proxy handler to note whether any +// HTTP traffic tries to leave localhost from tailscaled. We don't +// expect any, so any request triggers a failure. +type trafficTrap struct { + atomicErr syncs.AtomicValue[error] +} + +func (tt *trafficTrap) Err() error { + return tt.atomicErr.Load() +} + +func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var got bytes.Buffer + r.Write(&got) + err := fmt.Errorf("unexpected HTTP request via proxy: %s", got.Bytes()) + MainError.Store(err) + if tt.Err() == nil { + // Best effort at remembering the first request. + tt.atomicErr.Store(err) + } + log.Printf("Error: %v", err) + w.WriteHeader(403) +} + +type authURLParserWriter struct { + buf bytes.Buffer + fn func(urlStr string) error +} + +var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) + +func (w *authURLParserWriter) Write(p []byte) (n int, err error) { + n, err = w.buf.Write(p) + m := authURLRx.FindSubmatch(w.buf.Bytes()) + if m != nil { + urlStr := string(m[1]) + w.buf.Reset() // so it's not matched again + if err := w.fn(urlStr); err != nil { + return 0, err + } + } + return n, err +} diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 20d8908da..d2f054361 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -13,7 +13,6 @@ import ( "flag" "fmt" "io" - "log" "net" "net/http" "net/http/httptest" @@ -22,10 +21,7 @@ import ( "os/exec" "path/filepath" "regexp" - "runtime" "strconv" - "strings" - "sync" "sync/atomic" "testing" "time" @@ -37,32 +33,17 @@ import ( "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn" - "tailscale.com/ipn/ipnlocal" - "tailscale.com/ipn/ipnstate" - "tailscale.com/ipn/store" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" - "tailscale.com/safesocket" - "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" - "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/must" - "tailscale.com/util/rands" - "tailscale.com/version" ) -var ( - verboseTailscaled = flag.Bool("verbose-tailscaled", false, "verbose tailscaled logging") - verboseTailscale = flag.Bool("verbose-tailscale", false, "verbose tailscale CLI logging") -) - -var mainError syncs.AtomicValue[error] - func TestMain(m *testing.M) { // Have to disable UPnP which hits the network, otherwise it fails due to HTTP proxy. os.Setenv("TS_DISABLE_UPNP", "true") @@ -72,7 +53,7 @@ func TestMain(m *testing.M) { if v != 0 { os.Exit(v) } - if err := mainError.Load(); err != nil { + if err := MainError.Load(); err != nil { fmt.Fprintf(os.Stderr, "FAIL: %v\n", err) os.Exit(1) } @@ -1485,583 +1466,3 @@ func TestNetstackUDPLoopback(t *testing.T) { d1.MustCleanShutdown(t) } - -// testEnv contains the test environment (set of servers) used by one -// or more nodes. -type testEnv struct { - t testing.TB - tunMode bool - cli string - daemon string - loopbackPort *int - - LogCatcher *LogCatcher - LogCatcherServer *httptest.Server - - Control *testcontrol.Server - ControlServer *httptest.Server - - TrafficTrap *trafficTrap - TrafficTrapServer *httptest.Server -} - -// controlURL returns e.ControlServer.URL, panicking if it's the empty string, -// which it should never be in tests. -func (e *testEnv) controlURL() string { - s := e.ControlServer.URL - if s == "" { - panic("control server not set") - } - return s -} - -type testEnvOpt interface { - modifyTestEnv(*testEnv) -} - -type configureControl func(*testcontrol.Server) - -func (f configureControl) modifyTestEnv(te *testEnv) { - f(te.Control) -} - -// newTestEnv starts a bunch of services and returns a new test environment. -// newTestEnv arranges for the environment's resources to be cleaned up on exit. -func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv { - if runtime.GOOS == "windows" { - t.Skip("not tested/working on Windows yet") - } - derpMap := RunDERPAndSTUN(t, logger.Discard, "127.0.0.1") - logc := new(LogCatcher) - control := &testcontrol.Server{ - DERPMap: derpMap, - } - control.HTTPTestServer = httptest.NewUnstartedServer(control) - trafficTrap := new(trafficTrap) - e := &testEnv{ - t: t, - cli: TailscaleBinary(t), - daemon: TailscaledBinary(t), - LogCatcher: logc, - LogCatcherServer: httptest.NewServer(logc), - Control: control, - ControlServer: control.HTTPTestServer, - TrafficTrap: trafficTrap, - TrafficTrapServer: httptest.NewServer(trafficTrap), - } - for _, o := range opts { - o.modifyTestEnv(e) - } - control.HTTPTestServer.Start() - t.Cleanup(func() { - // Shut down e. - if err := e.TrafficTrap.Err(); err != nil { - e.t.Errorf("traffic trap: %v", err) - e.t.Logf("logs: %s", e.LogCatcher.logsString()) - } - e.LogCatcherServer.Close() - e.TrafficTrapServer.Close() - e.ControlServer.Close() - }) - t.Logf("control URL: %v", e.controlURL()) - return e -} - -// testNode is a machine with a tailscale & tailscaled. -// Currently, the test is simplistic and user==node==machine. -// That may grow complexity later to test more. -type testNode struct { - env *testEnv - tailscaledParser *nodeOutputParser - - dir string // temp dir for sock & state - configFile string // or empty for none - sockFile string - stateFile string - upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI - - mu sync.Mutex - onLogLine []func([]byte) -} - -// newTestNode allocates a temp directory for a new test node. -// The node is not started automatically. -func newTestNode(t *testing.T, env *testEnv) *testNode { - dir := t.TempDir() - sockFile := filepath.Join(dir, "tailscale.sock") - if len(sockFile) >= 104 { - // Maximum length for a unix socket on darwin. Try something else. - sockFile = filepath.Join(os.TempDir(), rands.HexString(8)+".sock") - t.Cleanup(func() { os.Remove(sockFile) }) - } - n := &testNode{ - env: env, - dir: dir, - sockFile: sockFile, - stateFile: filepath.Join(dir, "tailscale.state"), - } - - // Look for a data race. Once we see the start marker, start logging the rest. - var sawRace bool - var sawPanic bool - n.addLogLineHook(func(line []byte) { - lineB := mem.B(line) - if mem.Contains(lineB, mem.S("WARNING: DATA RACE")) { - sawRace = true - } - if mem.HasPrefix(lineB, mem.S("panic: ")) { - sawPanic = true - } - if sawRace || sawPanic { - t.Logf("%s", line) - } - }) - - return n -} - -func (n *testNode) diskPrefs() *ipn.Prefs { - t := n.env.t - t.Helper() - if _, err := os.ReadFile(n.stateFile); err != nil { - t.Fatalf("reading prefs: %v", err) - } - fs, err := store.NewFileStore(nil, n.stateFile) - if err != nil { - t.Fatalf("reading prefs, NewFileStore: %v", err) - } - p, err := ipnlocal.ReadStartupPrefsForTest(t.Logf, fs) - if err != nil { - t.Fatalf("reading prefs, ReadDiskPrefsForTest: %v", err) - } - return p.AsStruct() -} - -// AwaitResponding waits for n's tailscaled to be up enough to be -// responding, but doesn't wait for any particular state. -func (n *testNode) AwaitResponding() { - t := n.env.t - t.Helper() - n.AwaitListening() - - st := n.MustStatus() - t.Logf("Status: %s", st.BackendState) - - if err := tstest.WaitFor(20*time.Second, func() error { - const sub = `Program starting: ` - if !n.env.LogCatcher.logsContains(mem.S(sub)) { - return fmt.Errorf("log catcher didn't see %#q; got %s", sub, n.env.LogCatcher.logsString()) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// addLogLineHook registers a hook f to be called on each tailscaled -// log line output. -func (n *testNode) addLogLineHook(f func([]byte)) { - n.mu.Lock() - defer n.mu.Unlock() - n.onLogLine = append(n.onLogLine, f) -} - -// socks5AddrChan returns a channel that receives the address (e.g. "localhost:23874") -// of the node's SOCKS5 listener, once started. -func (n *testNode) socks5AddrChan() <-chan string { - ch := make(chan string, 1) - n.addLogLineHook(func(line []byte) { - const sub = "SOCKS5 listening on " - i := mem.Index(mem.B(line), mem.S(sub)) - if i == -1 { - return - } - addr := strings.TrimSpace(string(line)[i+len(sub):]) - select { - case ch <- addr: - default: - } - }) - return ch -} - -func (n *testNode) AwaitSocksAddr(ch <-chan string) string { - t := n.env.t - t.Helper() - timer := time.NewTimer(10 * time.Second) - defer timer.Stop() - select { - case v := <-ch: - return v - case <-timer.C: - t.Fatal("timeout waiting for node to log its SOCK5 listening address") - panic("unreachable") - } -} - -// nodeOutputParser parses stderr of tailscaled processes, calling the -// per-line callbacks previously registered via -// testNode.addLogLineHook. -type nodeOutputParser struct { - allBuf bytes.Buffer - pendLineBuf bytes.Buffer - n *testNode -} - -func (op *nodeOutputParser) Write(p []byte) (n int, err error) { - tn := op.n - tn.mu.Lock() - defer tn.mu.Unlock() - - op.allBuf.Write(p) - n, err = op.pendLineBuf.Write(p) - op.parseLinesLocked() - return -} - -func (op *nodeOutputParser) parseLinesLocked() { - n := op.n - buf := op.pendLineBuf.Bytes() - for len(buf) > 0 { - nl := bytes.IndexByte(buf, '\n') - if nl == -1 { - break - } - line := buf[:nl+1] - buf = buf[nl+1:] - - for _, f := range n.onLogLine { - f(line) - } - } - if len(buf) == 0 { - op.pendLineBuf.Reset() - } else { - io.CopyN(io.Discard, &op.pendLineBuf, int64(op.pendLineBuf.Len()-len(buf))) - } -} - -type Daemon struct { - Process *os.Process -} - -func (d *Daemon) MustCleanShutdown(t testing.TB) { - d.Process.Signal(os.Interrupt) - ps, err := d.Process.Wait() - if err != nil { - t.Fatalf("tailscaled Wait: %v", err) - } - if ps.ExitCode() != 0 { - t.Errorf("tailscaled ExitCode = %d; want 0", ps.ExitCode()) - } -} - -// StartDaemon starts the node's tailscaled, failing if it fails to start. -// StartDaemon ensures that the process will exit when the test completes. -func (n *testNode) StartDaemon() *Daemon { - return n.StartDaemonAsIPNGOOS(runtime.GOOS) -} - -func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { - t := n.env.t - cmd := exec.Command(n.env.daemon) - cmd.Args = append(cmd.Args, - "--state="+n.stateFile, - "--socket="+n.sockFile, - "--socks5-server=localhost:0", - ) - if *verboseTailscaled { - cmd.Args = append(cmd.Args, "-verbose=2") - } - if !n.env.tunMode { - cmd.Args = append(cmd.Args, - "--tun=userspace-networking", - ) - } - if n.configFile != "" { - cmd.Args = append(cmd.Args, "--config="+n.configFile) - } - cmd.Env = append(os.Environ(), - "TS_CONTROL_IS_PLAINTEXT_HTTP=1", - "TS_DEBUG_PERMIT_HTTP_C2N=1", - "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, - "HTTP_PROXY="+n.env.TrafficTrapServer.URL, - "HTTPS_PROXY="+n.env.TrafficTrapServer.URL, - "TS_DEBUG_FAKE_GOOS="+ipnGOOS, - "TS_LOGS_DIR="+t.TempDir(), - "TS_NETCHECK_GENERATE_204_URL="+n.env.ControlServer.URL+"/generate_204", - "TS_ASSUME_NETWORK_UP_FOR_TEST=1", // don't pause control client in airplane mode (no wifi, etc) - "TS_PANIC_IF_HIT_MAIN_CONTROL=1", - "TS_DISABLE_PORTMAPPER=1", // shouldn't be needed; test is all localhost - "TS_DEBUG_LOG_RATE=all", - ) - if n.env.loopbackPort != nil { - cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) - } - if version.IsRace() { - cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") - } - n.tailscaledParser = &nodeOutputParser{n: n} - cmd.Stderr = n.tailscaledParser - if *verboseTailscaled { - cmd.Stdout = os.Stdout - cmd.Stderr = io.MultiWriter(cmd.Stderr, os.Stderr) - } - if runtime.GOOS != "windows" { - pr, pw, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { pw.Close() }) - cmd.ExtraFiles = append(cmd.ExtraFiles, pr) - cmd.Env = append(cmd.Env, "TS_PARENT_DEATH_FD=3") - } - if err := cmd.Start(); err != nil { - t.Fatalf("starting tailscaled: %v", err) - } - t.Cleanup(func() { cmd.Process.Kill() }) - return &Daemon{ - Process: cmd.Process, - } -} - -func (n *testNode) MustUp(extraArgs ...string) { - t := n.env.t - t.Helper() - args := []string{ - "up", - "--login-server=" + n.env.controlURL(), - "--reset", - } - args = append(args, extraArgs...) - cmd := n.Tailscale(args...) - t.Logf("Running %v ...", cmd) - cmd.Stdout = nil // in case --verbose-tailscale was set - cmd.Stderr = nil // in case --verbose-tailscale was set - if b, err := cmd.CombinedOutput(); err != nil { - t.Fatalf("up: %v, %v", string(b), err) - } -} - -func (n *testNode) MustDown() { - t := n.env.t - t.Logf("Running down ...") - if err := n.Tailscale("down", "--accept-risk=all").Run(); err != nil { - t.Fatalf("down: %v", err) - } -} - -func (n *testNode) MustLogOut() { - t := n.env.t - t.Logf("Running logout ...") - if err := n.Tailscale("logout").Run(); err != nil { - t.Fatalf("logout: %v", err) - } -} - -func (n *testNode) Ping(otherNode *testNode) error { - t := n.env.t - ip := otherNode.AwaitIP4().String() - t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP4()) - return n.Tailscale("ping", ip).Run() -} - -// AwaitListening waits for the tailscaled to be serving local clients -// over its localhost IPC mechanism. (Unix socket, etc) -func (n *testNode) AwaitListening() { - t := n.env.t - if err := tstest.WaitFor(20*time.Second, func() (err error) { - c, err := safesocket.ConnectContext(context.Background(), n.sockFile) - if err == nil { - c.Close() - } - return err - }); err != nil { - t.Fatal(err) - } -} - -func (n *testNode) AwaitIPs() []netip.Addr { - t := n.env.t - t.Helper() - var addrs []netip.Addr - if err := tstest.WaitFor(20*time.Second, func() error { - cmd := n.Tailscale("ip") - cmd.Stdout = nil // in case --verbose-tailscale was set - cmd.Stderr = nil // in case --verbose-tailscale was set - out, err := cmd.Output() - if err != nil { - return err - } - ips := string(out) - ipslice := strings.Fields(ips) - addrs = make([]netip.Addr, len(ipslice)) - - for i, ip := range ipslice { - netIP, err := netip.ParseAddr(ip) - if err != nil { - t.Fatal(err) - } - addrs[i] = netIP - } - return nil - }); err != nil { - t.Fatalf("awaiting an IP address: %v", err) - } - if len(addrs) == 0 { - t.Fatalf("returned IP address was blank") - } - return addrs -} - -// AwaitIP4 returns the IPv4 address of n. -func (n *testNode) AwaitIP4() netip.Addr { - t := n.env.t - t.Helper() - ips := n.AwaitIPs() - return ips[0] -} - -// AwaitIP6 returns the IPv6 address of n. -func (n *testNode) AwaitIP6() netip.Addr { - t := n.env.t - t.Helper() - ips := n.AwaitIPs() - return ips[1] -} - -// AwaitRunning waits for n to reach the IPN state "Running". -func (n *testNode) AwaitRunning() { - t := n.env.t - t.Helper() - n.AwaitBackendState("Running") -} - -func (n *testNode) AwaitBackendState(state string) { - t := n.env.t - t.Helper() - if err := tstest.WaitFor(20*time.Second, func() error { - st, err := n.Status() - if err != nil { - return err - } - if st.BackendState != state { - return fmt.Errorf("in state %q; want %q", st.BackendState, state) - } - return nil - }); err != nil { - t.Fatalf("failure/timeout waiting for transition to Running status: %v", err) - } -} - -// AwaitNeedsLogin waits for n to reach the IPN state "NeedsLogin". -func (n *testNode) AwaitNeedsLogin() { - t := n.env.t - t.Helper() - if err := tstest.WaitFor(20*time.Second, func() error { - st, err := n.Status() - if err != nil { - return err - } - if st.BackendState != "NeedsLogin" { - return fmt.Errorf("in state %q", st.BackendState) - } - return nil - }); err != nil { - t.Fatalf("failure/timeout waiting for transition to NeedsLogin status: %v", err) - } -} - -func (n *testNode) TailscaleForOutput(arg ...string) *exec.Cmd { - cmd := n.Tailscale(arg...) - cmd.Stdout = nil - cmd.Stderr = nil - return cmd -} - -// Tailscale returns a command that runs the tailscale CLI with the provided arguments. -// It does not start the process. -func (n *testNode) Tailscale(arg ...string) *exec.Cmd { - cmd := exec.Command(n.env.cli) - cmd.Args = append(cmd.Args, "--socket="+n.sockFile) - cmd.Args = append(cmd.Args, arg...) - cmd.Dir = n.dir - cmd.Env = append(os.Environ(), - "TS_DEBUG_UP_FLAG_GOOS="+n.upFlagGOOS, - "TS_LOGS_DIR="+n.env.t.TempDir(), - ) - if *verboseTailscale { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - return cmd -} - -func (n *testNode) Status() (*ipnstate.Status, error) { - cmd := n.Tailscale("status", "--json") - cmd.Stdout = nil // in case --verbose-tailscale was set - cmd.Stderr = nil // in case --verbose-tailscale was set - out, err := cmd.CombinedOutput() - if err != nil { - return nil, fmt.Errorf("running tailscale status: %v, %s", err, out) - } - st := new(ipnstate.Status) - if err := json.Unmarshal(out, st); err != nil { - return nil, fmt.Errorf("decoding tailscale status JSON: %w\njson:\n%s", err, out) - } - return st, nil -} - -func (n *testNode) MustStatus() *ipnstate.Status { - tb := n.env.t - tb.Helper() - st, err := n.Status() - if err != nil { - tb.Fatal(err) - } - return st -} - -// trafficTrap is an HTTP proxy handler to note whether any -// HTTP traffic tries to leave localhost from tailscaled. We don't -// expect any, so any request triggers a failure. -type trafficTrap struct { - atomicErr syncs.AtomicValue[error] -} - -func (tt *trafficTrap) Err() error { - return tt.atomicErr.Load() -} - -func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var got bytes.Buffer - r.Write(&got) - err := fmt.Errorf("unexpected HTTP request via proxy: %s", got.Bytes()) - mainError.Store(err) - if tt.Err() == nil { - // Best effort at remembering the first request. - tt.atomicErr.Store(err) - } - log.Printf("Error: %v", err) - w.WriteHeader(403) -} - -type authURLParserWriter struct { - buf bytes.Buffer - fn func(urlStr string) error -} - -var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) - -func (w *authURLParserWriter) Write(p []byte) (n int, err error) { - n, err = w.buf.Write(p) - m := authURLRx.FindSubmatch(w.buf.Bytes()) - if m != nil { - urlStr := string(m[1]) - w.buf.Reset() // so it's not matched again - if err := w.fn(urlStr); err != nil { - return 0, err - } - } - return n, err -} From ac1215c7e0e982064c58ddfc7dc5c087dc791a87 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 28 Apr 2025 15:35:51 -0700 Subject: [PATCH 0803/1708] tstest/integration: export test helpers In prep for Taildrop integration tests using them from another package. Updates #15812 Change-Id: I6a995de4e7400658229d99c90349ad5bd1f503ae Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration.go | 90 +++++++++--------- tstest/integration/integration_test.go | 122 ++++++++++++------------- 2 files changed, 107 insertions(+), 105 deletions(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 2761f807d..9df536971 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -386,9 +386,9 @@ func (lc *LogCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) // must have no content, but not a 204 } -// testEnv contains the test environment (set of servers) used by one +// TestEnv contains the test environment (set of servers) used by one // or more nodes. -type testEnv struct { +type TestEnv struct { t testing.TB tunMode bool cli string @@ -405,9 +405,9 @@ type testEnv struct { TrafficTrapServer *httptest.Server } -// controlURL returns e.ControlServer.URL, panicking if it's the empty string, +// ControlURL returns e.ControlServer.URL, panicking if it's the empty string, // which it should never be in tests. -func (e *testEnv) controlURL() string { +func (e *TestEnv) ControlURL() string { s := e.ControlServer.URL if s == "" { panic("control server not set") @@ -415,19 +415,21 @@ func (e *testEnv) controlURL() string { return s } -type testEnvOpt interface { - modifyTestEnv(*testEnv) +// TestEnvOpt represents an option that can be passed to NewTestEnv. +type TestEnvOpt interface { + ModifyTestEnv(*TestEnv) } -type configureControl func(*testcontrol.Server) +// ConfigureControl is a test option that configures the test control server. +type ConfigureControl func(*testcontrol.Server) -func (f configureControl) modifyTestEnv(te *testEnv) { +func (f ConfigureControl) ModifyTestEnv(te *TestEnv) { f(te.Control) } -// newTestEnv starts a bunch of services and returns a new test environment. -// newTestEnv arranges for the environment's resources to be cleaned up on exit. -func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv { +// NewTestEnv starts a bunch of services and returns a new test environment. +// NewTestEnv arranges for the environment's resources to be cleaned up on exit. +func NewTestEnv(t testing.TB, opts ...TestEnvOpt) *TestEnv { if runtime.GOOS == "windows" { t.Skip("not tested/working on Windows yet") } @@ -438,7 +440,7 @@ func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv { } control.HTTPTestServer = httptest.NewUnstartedServer(control) trafficTrap := new(trafficTrap) - e := &testEnv{ + e := &TestEnv{ t: t, cli: TailscaleBinary(t), daemon: TailscaledBinary(t), @@ -450,7 +452,7 @@ func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv { TrafficTrapServer: httptest.NewServer(trafficTrap), } for _, o := range opts { - o.modifyTestEnv(e) + o.ModifyTestEnv(e) } control.HTTPTestServer.Start() t.Cleanup(func() { @@ -463,15 +465,15 @@ func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv { e.TrafficTrapServer.Close() e.ControlServer.Close() }) - t.Logf("control URL: %v", e.controlURL()) + t.Logf("control URL: %v", e.ControlURL()) return e } -// testNode is a machine with a tailscale & tailscaled. +// TestNode is a machine with a tailscale & tailscaled. // Currently, the test is simplistic and user==node==machine. // That may grow complexity later to test more. -type testNode struct { - env *testEnv +type TestNode struct { + env *TestEnv tailscaledParser *nodeOutputParser dir string // temp dir for sock & state @@ -484,9 +486,9 @@ type testNode struct { onLogLine []func([]byte) } -// newTestNode allocates a temp directory for a new test node. +// NewTestNode allocates a temp directory for a new test node. // The node is not started automatically. -func newTestNode(t *testing.T, env *testEnv) *testNode { +func NewTestNode(t *testing.T, env *TestEnv) *TestNode { dir := t.TempDir() sockFile := filepath.Join(dir, "tailscale.sock") if len(sockFile) >= 104 { @@ -494,7 +496,7 @@ func newTestNode(t *testing.T, env *testEnv) *testNode { sockFile = filepath.Join(os.TempDir(), rands.HexString(8)+".sock") t.Cleanup(func() { os.Remove(sockFile) }) } - n := &testNode{ + n := &TestNode{ env: env, dir: dir, sockFile: sockFile, @@ -520,7 +522,7 @@ func newTestNode(t *testing.T, env *testEnv) *testNode { return n } -func (n *testNode) diskPrefs() *ipn.Prefs { +func (n *TestNode) diskPrefs() *ipn.Prefs { t := n.env.t t.Helper() if _, err := os.ReadFile(n.stateFile); err != nil { @@ -539,7 +541,7 @@ func (n *testNode) diskPrefs() *ipn.Prefs { // AwaitResponding waits for n's tailscaled to be up enough to be // responding, but doesn't wait for any particular state. -func (n *testNode) AwaitResponding() { +func (n *TestNode) AwaitResponding() { t := n.env.t t.Helper() n.AwaitListening() @@ -560,7 +562,7 @@ func (n *testNode) AwaitResponding() { // addLogLineHook registers a hook f to be called on each tailscaled // log line output. -func (n *testNode) addLogLineHook(f func([]byte)) { +func (n *TestNode) addLogLineHook(f func([]byte)) { n.mu.Lock() defer n.mu.Unlock() n.onLogLine = append(n.onLogLine, f) @@ -568,7 +570,7 @@ func (n *testNode) addLogLineHook(f func([]byte)) { // socks5AddrChan returns a channel that receives the address (e.g. "localhost:23874") // of the node's SOCKS5 listener, once started. -func (n *testNode) socks5AddrChan() <-chan string { +func (n *TestNode) socks5AddrChan() <-chan string { ch := make(chan string, 1) n.addLogLineHook(func(line []byte) { const sub = "SOCKS5 listening on " @@ -585,7 +587,7 @@ func (n *testNode) socks5AddrChan() <-chan string { return ch } -func (n *testNode) AwaitSocksAddr(ch <-chan string) string { +func (n *TestNode) AwaitSocksAddr(ch <-chan string) string { t := n.env.t t.Helper() timer := time.NewTimer(10 * time.Second) @@ -605,7 +607,7 @@ func (n *testNode) AwaitSocksAddr(ch <-chan string) string { type nodeOutputParser struct { allBuf bytes.Buffer pendLineBuf bytes.Buffer - n *testNode + n *TestNode } func (op *nodeOutputParser) Write(p []byte) (n int, err error) { @@ -658,11 +660,11 @@ func (d *Daemon) MustCleanShutdown(t testing.TB) { // StartDaemon starts the node's tailscaled, failing if it fails to start. // StartDaemon ensures that the process will exit when the test completes. -func (n *testNode) StartDaemon() *Daemon { +func (n *TestNode) StartDaemon() *Daemon { return n.StartDaemonAsIPNGOOS(runtime.GOOS) } -func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { +func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { t := n.env.t cmd := exec.Command(n.env.daemon) cmd.Args = append(cmd.Args, @@ -725,12 +727,12 @@ func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { } } -func (n *testNode) MustUp(extraArgs ...string) { +func (n *TestNode) MustUp(extraArgs ...string) { t := n.env.t t.Helper() args := []string{ "up", - "--login-server=" + n.env.controlURL(), + "--login-server=" + n.env.ControlURL(), "--reset", } args = append(args, extraArgs...) @@ -743,7 +745,7 @@ func (n *testNode) MustUp(extraArgs ...string) { } } -func (n *testNode) MustDown() { +func (n *TestNode) MustDown() { t := n.env.t t.Logf("Running down ...") if err := n.Tailscale("down", "--accept-risk=all").Run(); err != nil { @@ -751,7 +753,7 @@ func (n *testNode) MustDown() { } } -func (n *testNode) MustLogOut() { +func (n *TestNode) MustLogOut() { t := n.env.t t.Logf("Running logout ...") if err := n.Tailscale("logout").Run(); err != nil { @@ -759,7 +761,7 @@ func (n *testNode) MustLogOut() { } } -func (n *testNode) Ping(otherNode *testNode) error { +func (n *TestNode) Ping(otherNode *TestNode) error { t := n.env.t ip := otherNode.AwaitIP4().String() t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP4()) @@ -768,7 +770,7 @@ func (n *testNode) Ping(otherNode *testNode) error { // AwaitListening waits for the tailscaled to be serving local clients // over its localhost IPC mechanism. (Unix socket, etc) -func (n *testNode) AwaitListening() { +func (n *TestNode) AwaitListening() { t := n.env.t if err := tstest.WaitFor(20*time.Second, func() (err error) { c, err := safesocket.ConnectContext(context.Background(), n.sockFile) @@ -781,7 +783,7 @@ func (n *testNode) AwaitListening() { } } -func (n *testNode) AwaitIPs() []netip.Addr { +func (n *TestNode) AwaitIPs() []netip.Addr { t := n.env.t t.Helper() var addrs []netip.Addr @@ -815,7 +817,7 @@ func (n *testNode) AwaitIPs() []netip.Addr { } // AwaitIP4 returns the IPv4 address of n. -func (n *testNode) AwaitIP4() netip.Addr { +func (n *TestNode) AwaitIP4() netip.Addr { t := n.env.t t.Helper() ips := n.AwaitIPs() @@ -823,7 +825,7 @@ func (n *testNode) AwaitIP4() netip.Addr { } // AwaitIP6 returns the IPv6 address of n. -func (n *testNode) AwaitIP6() netip.Addr { +func (n *TestNode) AwaitIP6() netip.Addr { t := n.env.t t.Helper() ips := n.AwaitIPs() @@ -831,13 +833,13 @@ func (n *testNode) AwaitIP6() netip.Addr { } // AwaitRunning waits for n to reach the IPN state "Running". -func (n *testNode) AwaitRunning() { +func (n *TestNode) AwaitRunning() { t := n.env.t t.Helper() n.AwaitBackendState("Running") } -func (n *testNode) AwaitBackendState(state string) { +func (n *TestNode) AwaitBackendState(state string) { t := n.env.t t.Helper() if err := tstest.WaitFor(20*time.Second, func() error { @@ -855,7 +857,7 @@ func (n *testNode) AwaitBackendState(state string) { } // AwaitNeedsLogin waits for n to reach the IPN state "NeedsLogin". -func (n *testNode) AwaitNeedsLogin() { +func (n *TestNode) AwaitNeedsLogin() { t := n.env.t t.Helper() if err := tstest.WaitFor(20*time.Second, func() error { @@ -872,7 +874,7 @@ func (n *testNode) AwaitNeedsLogin() { } } -func (n *testNode) TailscaleForOutput(arg ...string) *exec.Cmd { +func (n *TestNode) TailscaleForOutput(arg ...string) *exec.Cmd { cmd := n.Tailscale(arg...) cmd.Stdout = nil cmd.Stderr = nil @@ -881,7 +883,7 @@ func (n *testNode) TailscaleForOutput(arg ...string) *exec.Cmd { // Tailscale returns a command that runs the tailscale CLI with the provided arguments. // It does not start the process. -func (n *testNode) Tailscale(arg ...string) *exec.Cmd { +func (n *TestNode) Tailscale(arg ...string) *exec.Cmd { cmd := exec.Command(n.env.cli) cmd.Args = append(cmd.Args, "--socket="+n.sockFile) cmd.Args = append(cmd.Args, arg...) @@ -897,7 +899,7 @@ func (n *testNode) Tailscale(arg ...string) *exec.Cmd { return cmd } -func (n *testNode) Status() (*ipnstate.Status, error) { +func (n *TestNode) Status() (*ipnstate.Status, error) { cmd := n.Tailscale("status", "--json") cmd.Stdout = nil // in case --verbose-tailscale was set cmd.Stderr = nil // in case --verbose-tailscale was set @@ -912,7 +914,7 @@ func (n *testNode) Status() (*ipnstate.Status, error) { return st, nil } -func (n *testNode) MustStatus() *ipnstate.Status { +func (n *TestNode) MustStatus() *ipnstate.Status { tb := n.env.t tb.Helper() st, err := n.Status() diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index d2f054361..0da2e6086 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -68,9 +68,9 @@ func TestTUNMode(t *testing.T) { t.Skip("skipping when not root") } tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) env.tunMode = true - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -85,8 +85,8 @@ func TestTUNMode(t *testing.T) { func TestOneNodeUpNoAuth(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -103,8 +103,8 @@ func TestOneNodeUpNoAuth(t *testing.T) { func TestOneNodeExpiredKey(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -140,8 +140,8 @@ func TestOneNodeExpiredKey(t *testing.T) { func TestControlKnobs(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() defer d1.MustCleanShutdown(t) @@ -171,8 +171,8 @@ func TestControlKnobs(t *testing.T) { func TestCollectPanic(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n := newTestNode(t, env) + env := NewTestEnv(t) + n := NewTestNode(t, env) cmd := exec.Command(env.daemon, "--cleanup") cmd.Env = append(os.Environ(), @@ -202,9 +202,9 @@ func TestCollectPanic(t *testing.T) { func TestControlTimeLogLine(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) env.LogCatcher.StoreRawJSON() - n := newTestNode(t, env) + n := NewTestNode(t, env) n.StartDaemon() n.AwaitResponding() @@ -226,8 +226,8 @@ func TestControlTimeLogLine(t *testing.T) { func TestStateSavedOnStart(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -245,7 +245,7 @@ func TestStateSavedOnStart(t *testing.T) { n1.MustDown() // And change the hostname to something: - if err := n1.Tailscale("up", "--login-server="+n1.env.controlURL(), "--hostname=foo").Run(); err != nil { + if err := n1.Tailscale("up", "--login-server="+n1.env.ControlURL(), "--hostname=foo").Run(); err != nil { t.Fatalf("up: %v", err) } @@ -263,11 +263,11 @@ func TestStateSavedOnStart(t *testing.T) { func TestOneNodeUpAuth(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t, configureControl(func(control *testcontrol.Server) { + env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) { control.RequireAuth = true })) - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitListening() @@ -275,9 +275,9 @@ func TestOneNodeUpAuth(t *testing.T) { st := n1.MustStatus() t.Logf("Status: %s", st.BackendState) - t.Logf("Running up --login-server=%s ...", env.controlURL()) + t.Logf("Running up --login-server=%s ...", env.ControlURL()) - cmd := n1.Tailscale("up", "--login-server="+env.controlURL()) + cmd := n1.Tailscale("up", "--login-server="+env.ControlURL()) var authCountAtomic int32 cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error { if env.Control.CompleteAuth(urlStr) { @@ -309,11 +309,11 @@ func TestConfigFileAuthKey(t *testing.T) { tstest.Shard(t) t.Parallel() const authKey = "opensesame" - env := newTestEnv(t, configureControl(func(control *testcontrol.Server) { + env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) { control.RequireAuthKey = authKey })) - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) n1.configFile = filepath.Join(n1.dir, "config.json") authKeyFile := filepath.Join(n1.dir, "my-auth-key") must.Do(os.WriteFile(authKeyFile, fmt.Appendf(nil, "%s\n", authKey), 0666)) @@ -334,14 +334,14 @@ func TestConfigFileAuthKey(t *testing.T) { func TestTwoNodes(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) // Create two nodes: - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) n1SocksAddrCh := n1.socks5AddrChan() d1 := n1.StartDaemon() - n2 := newTestNode(t, env) + n2 := NewTestNode(t, env) n2SocksAddrCh := n2.socks5AddrChan() d2 := n2.StartDaemon() @@ -360,7 +360,7 @@ func TestTwoNodes(t *testing.T) { defer n2.mu.Unlock() rxNoDates := regexp.MustCompile(`(?m)^\d{4}.\d{2}.\d{2}.\d{2}:\d{2}:\d{2}`) - cleanLog := func(n *testNode) []byte { + cleanLog := func(n *TestNode) []byte { b := n.tailscaledParser.allBuf.Bytes() b = rxNoDates.ReplaceAll(b, nil) return b @@ -420,10 +420,10 @@ func TestTwoNodes(t *testing.T) { func TestIncrementalMapUpdatePeersRemoved(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) // Create one node: - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitListening() n1.MustUp() @@ -435,7 +435,7 @@ func TestIncrementalMapUpdatePeersRemoved(t *testing.T) { } tnode1 := all[0] - n2 := newTestNode(t, env) + n2 := NewTestNode(t, env) d2 := n2.StartDaemon() n2.AwaitListening() n2.MustUp() @@ -505,8 +505,8 @@ func TestNodeAddressIPFields(t *testing.T) { tstest.Shard(t) flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7008") tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitListening() @@ -532,8 +532,8 @@ func TestNodeAddressIPFields(t *testing.T) { func TestAddPingRequest(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) n1.StartDaemon() n1.AwaitListening() @@ -586,7 +586,7 @@ func TestC2NPingRequest(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) gotPing := make(chan bool, 1) env.Control.HandleC2N = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -604,7 +604,7 @@ func TestC2NPingRequest(t *testing.T) { gotPing <- true }) - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) n1.StartDaemon() n1.AwaitListening() @@ -657,8 +657,8 @@ func TestC2NPingRequest(t *testing.T) { func TestNoControlConnWhenDown(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -696,8 +696,8 @@ func TestNoControlConnWhenDown(t *testing.T) { func TestOneNodeUpWindowsStyle(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - n1 := newTestNode(t, env) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) n1.upFlagGOOS = "windows" d1 := n1.StartDaemonAsIPNGOOS("windows") @@ -716,9 +716,9 @@ func TestOneNodeUpWindowsStyle(t *testing.T) { func TestClientSideJailing(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) - registerNode := func() (*testNode, key.NodePublic) { - n := newTestNode(t, env) + env := NewTestEnv(t) + registerNode := func() (*TestNode, key.NodePublic) { + n := NewTestNode(t, env) n.StartDaemon() n.AwaitListening() n.MustUp() @@ -832,9 +832,9 @@ func TestNATPing(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) for _, v6 := range []bool{false, true} { - env := newTestEnv(t) - registerNode := func() (*testNode, key.NodePublic) { - n := newTestNode(t, env) + env := NewTestEnv(t) + registerNode := func() (*TestNode, key.NodePublic) { + n := NewTestNode(t, env) n.StartDaemon() n.AwaitListening() n.MustUp() @@ -959,11 +959,11 @@ func TestNATPing(t *testing.T) { func TestLogoutRemovesAllPeers(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) // Spin up some nodes. - nodes := make([]*testNode, 2) + nodes := make([]*TestNode, 2) for i := range nodes { - nodes[i] = newTestNode(t, env) + nodes[i] = NewTestNode(t, env) nodes[i].StartDaemon() nodes[i].AwaitResponding() nodes[i].MustUp() @@ -1017,9 +1017,9 @@ func TestAutoUpdateDefaults(t *testing.T) { } tstest.Shard(t) tstest.Parallel(t) - env := newTestEnv(t) + env := NewTestEnv(t) - checkDefault := func(n *testNode, want bool) error { + checkDefault := func(n *TestNode, want bool) error { enabled, ok := n.diskPrefs().AutoUpdate.Apply.Get() if !ok { return fmt.Errorf("auto-update for node is unset, should be set as %v", want) @@ -1030,7 +1030,7 @@ func TestAutoUpdateDefaults(t *testing.T) { return nil } - sendAndCheckDefault := func(t *testing.T, n *testNode, send, want bool) { + sendAndCheckDefault := func(t *testing.T, n *TestNode, send, want bool) { t.Helper() if !env.Control.AddRawMapResponse(n.MustStatus().Self.PublicKey, &tailcfg.MapResponse{ DefaultAutoUpdate: opt.NewBool(send), @@ -1046,11 +1046,11 @@ func TestAutoUpdateDefaults(t *testing.T) { tests := []struct { desc string - run func(t *testing.T, n *testNode) + run func(t *testing.T, n *TestNode) }{ { desc: "tailnet-default-false", - run: func(t *testing.T, n *testNode) { + run: func(t *testing.T, n *TestNode) { // First received default "false". sendAndCheckDefault(t, n, false, false) // Should not be changed even if sent "true" later. @@ -1064,7 +1064,7 @@ func TestAutoUpdateDefaults(t *testing.T) { }, { desc: "tailnet-default-true", - run: func(t *testing.T, n *testNode) { + run: func(t *testing.T, n *TestNode) { // First received default "true". sendAndCheckDefault(t, n, true, true) // Should not be changed even if sent "false" later. @@ -1078,7 +1078,7 @@ func TestAutoUpdateDefaults(t *testing.T) { }, { desc: "user-sets-first", - run: func(t *testing.T, n *testNode) { + run: func(t *testing.T, n *TestNode) { // User sets auto-update first, before receiving defaults. if out, err := n.TailscaleForOutput("set", "--auto-update=false").CombinedOutput(); err != nil { t.Fatalf("failed to disable auto-update on node: %v\noutput: %s", err, out) @@ -1091,7 +1091,7 @@ func TestAutoUpdateDefaults(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - n := newTestNode(t, env) + n := NewTestNode(t, env) d := n.StartDaemon() defer d.MustCleanShutdown(t) @@ -1113,9 +1113,9 @@ func TestDNSOverTCPIntervalResolver(t *testing.T) { if os.Getuid() != 0 { t.Skip("skipping when not root") } - env := newTestEnv(t) + env := NewTestEnv(t) env.tunMode = true - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -1186,12 +1186,12 @@ func TestNetstackTCPLoopback(t *testing.T) { t.Skip("skipping when not root") } - env := newTestEnv(t) + env := NewTestEnv(t) env.tunMode = true loopbackPort := 5201 env.loopbackPort = &loopbackPort loopbackPortStr := strconv.Itoa(loopbackPort) - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() @@ -1328,11 +1328,11 @@ func TestNetstackUDPLoopback(t *testing.T) { t.Skip("skipping when not root") } - env := newTestEnv(t) + env := NewTestEnv(t) env.tunMode = true loopbackPort := 5201 env.loopbackPort = &loopbackPort - n1 := newTestNode(t, env) + n1 := NewTestNode(t, env) d1 := n1.StartDaemon() n1.AwaitResponding() From 1f1c323eebb02dca1153c56582ee01ce400e6b56 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Tue, 29 Apr 2025 11:37:12 +0100 Subject: [PATCH 0804/1708] control/controlclient,health: add tests for control health tracking Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- control/controlclient/map_test.go | 32 ++++++++++++ health/health.go | 16 ++++-- health/health_test.go | 85 +++++++++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 3 deletions(-) diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 09441d066..ccc57ae2b 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -17,6 +17,7 @@ import ( "github.com/google/go-cmp/cmp" "go4.org/mem" "tailscale.com/control/controlknobs" + "tailscale.com/health" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -1136,3 +1137,34 @@ func BenchmarkMapSessionDelta(b *testing.B) { }) } } + +// TestNetmapHealthIntegration checks that we get the expected health warnings +// from processing a map response and passing the NetworkMap to a health tracker +func TestNetmapHealthIntegration(t *testing.T) { + ms := newTestMapSession(t, nil) + ht := health.Tracker{} + + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + nm := ms.netmapForResponse(&tailcfg.MapResponse{ + Health: []string{"Test message"}, + }) + ht.SetControlHealth(nm.ControlHealth) + + state := ht.CurrentState() + warning, ok := state.Warnings["control-health"] + + if !ok { + t.Fatal("no warning found in current state with code 'control-health'") + } + if got, want := warning.Title, "Coordination server reports an issue"; got != want { + t.Errorf("warning.Title = %q, want %q", got, want) + } + if got, want := warning.Severity, health.SeverityMedium; got != want { + t.Errorf("warning.Severity = %s, want %s", got, want) + } + if got, want := warning.Text, "The coordination server is reporting an health issue: Test message"; got != want { + t.Errorf("warning.Text = %q, want %q", got, want) + } +} diff --git a/health/health.go b/health/health.go index b0733f353..65d4402ae 100644 --- a/health/health.go +++ b/health/health.go @@ -402,7 +402,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable // becomes visible. if w.IsVisible(ws, t.now) { - go cb(w, w.unhealthyState(ws)) + cb(w, w.unhealthyState(ws)) continue } @@ -415,7 +415,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // Check if the Warnable is still unhealthy, as it could have become healthy between the time // the timer was set for and the time it was executed. if t.warnableVal[w] != nil { - go cb(w, w.unhealthyState(ws)) + cb(w, w.unhealthyState(ws)) delete(t.pendingVisibleTimers, w) } }) @@ -449,7 +449,7 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { } for _, cb := range t.watchers { - go cb(w, nil) + cb(w, nil) } } @@ -483,6 +483,16 @@ func (t *Tracker) AppendWarnableDebugFlags(base []string) []string { // The provided callback function will be executed in its own goroutine. The returned function can be used // to unregister the callback. func (t *Tracker) RegisterWatcher(cb func(w *Warnable, r *UnhealthyState)) (unregister func()) { + return t.registerSyncWatcher(func(w *Warnable, r *UnhealthyState) { + go cb(w, r) + }) +} + +// registerSyncWatcher adds a function that will be called whenever the health +// state of any Warnable changes. The provided callback function will be +// executed synchronously. Call RegisterWatcher to register any callbacks that +// won't return from execution immediately. +func (t *Tracker) registerSyncWatcher(cb func(w *Warnable, r *UnhealthyState)) (unregister func()) { if t.nil() { return func() {} } diff --git a/health/health_test.go b/health/health_test.go index abc0ec07e..aa3904581 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -451,3 +451,88 @@ func TestNoDERPHomeWarnableManual(t *testing.T) { t.Fatalf("got unexpected noDERPHomeWarnable warnable: %v", ws) } } + +func TestControlHealth(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + ht.SetControlHealth([]string{"Test message"}) + state := ht.CurrentState() + warning, ok := state.Warnings["control-health"] + + if !ok { + t.Fatal("no warning found in current state with code 'control-health'") + } + if got, want := warning.Title, "Coordination server reports an issue"; got != want { + t.Errorf("warning.Title = %q, want %q", got, want) + } + if got, want := warning.Severity, SeverityMedium; got != want { + t.Errorf("warning.Severity = %s, want %s", got, want) + } + if got, want := warning.Text, "The coordination server is reporting an health issue: Test message"; got != want { + t.Errorf("warning.Text = %q, want %q", got, want) + } +} + +func TestControlHealthNotifiesOnChange(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + gotNotified := false + ht.registerSyncWatcher(func(_ *Warnable, _ *UnhealthyState) { + gotNotified = true + }) + + ht.SetControlHealth([]string{"Test message"}) + + if !gotNotified { + t.Errorf("watcher did not get called, want it to be called") + } +} + +func TestControlHealthNoNotifyOnUnchanged(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + // Set up an existing control health issue + ht.SetControlHealth([]string{"Test message"}) + + // Now register our watcher + gotNotified := false + ht.registerSyncWatcher(func(_ *Warnable, _ *UnhealthyState) { + gotNotified = true + }) + + // Send the same control health message again - should not notify + ht.SetControlHealth([]string{"Test message"}) + + if gotNotified { + t.Errorf("watcher got called, want it to not be called") + } +} + +func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + + gotNotified := false + ht.registerSyncWatcher(func(_ *Warnable, _ *UnhealthyState) { + gotNotified = true + }) + + ht.SetControlHealth([]string{"Test message"}) + + state := ht.CurrentState() + _, ok := state.Warnings["control-health"] + + if ok { + t.Error("got a warning with code 'control-health', want none") + } + + if gotNotified { + t.Error("watcher got called, want it to not be called") + } +} From cc6f36752004108ed8da0b058c98223848388108 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 28 Apr 2025 19:57:30 -0700 Subject: [PATCH 0805/1708] tstest/integration: remove vestigial env var set in tests TS_CONTROL_IS_PLAINTEXT_HTTP no longer does anything as of 8fd471ce5748d2129dba584b4fa14b0d29229299 Updates #13597 Change-Id: I32ae7f8c5f2a2632e80323b1302a36295ee00736 Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 9df536971..743a0382c 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -684,7 +684,6 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { cmd.Args = append(cmd.Args, "--config="+n.configFile) } cmd.Env = append(os.Environ(), - "TS_CONTROL_IS_PLAINTEXT_HTTP=1", "TS_DEBUG_PERMIT_HTTP_C2N=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, "HTTP_PROXY="+n.env.TrafficTrapServer.URL, From a9b3e09a1f519c890f74160dab7f75625a0dc6cd Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 29 Apr 2025 17:35:47 -0700 Subject: [PATCH 0806/1708] tool/gocross: break circular dependency on tailcfg (#15829) Instead of using the version package (which depends on tailcfg.CurrentCapabilityVersion) to get the git commit hash, do it directly using debug.BuildInfo. This way, when changing struct fields in tailcfg, we can successfully `go generate` it without compiler errors. Updates #9634 Updates https://github.com/tailscale/corp/issues/26717 Signed-off-by: Andrew Lytvynov --- tool/gocross/gocross.go | 25 ++++++++++++++++++------- tool/gocross/gocross_test.go | 19 +++++++++++++++++++ 2 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 tool/gocross/gocross_test.go diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index 8011c1095..d14ea0388 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -15,9 +15,9 @@ import ( "fmt" "os" "path/filepath" + "runtime/debug" "tailscale.com/atomicfile" - "tailscale.com/version" ) func main() { @@ -28,8 +28,19 @@ func main() { // any time. switch os.Args[1] { case "gocross-version": - fmt.Println(version.GetMeta().GitCommit) - os.Exit(0) + bi, ok := debug.ReadBuildInfo() + if !ok { + fmt.Fprintln(os.Stderr, "failed getting build info") + os.Exit(1) + } + for _, s := range bi.Settings { + if s.Key == "vcs.revision" { + fmt.Println(s.Value) + os.Exit(0) + } + } + fmt.Fprintln(os.Stderr, "did not find vcs.revision in build info") + os.Exit(1) case "is-gocross": // This subcommand exits with an error code when called on a // regular go binary, so it can be used to detect when `go` is @@ -85,9 +96,9 @@ func main() { path := filepath.Join(toolchain, "bin") + string(os.PathListSeparator) + os.Getenv("PATH") env.Set("PATH", path) - debug("Input: %s\n", formatArgv(os.Args)) - debug("Command: %s\n", formatArgv(newArgv)) - debug("Set the following flags/envvars:\n%s\n", env.Diff()) + debugf("Input: %s\n", formatArgv(os.Args)) + debugf("Command: %s\n", formatArgv(newArgv)) + debugf("Set the following flags/envvars:\n%s\n", env.Diff()) args = newArgv if err := env.Apply(); err != nil { @@ -103,7 +114,7 @@ func main() { //go:embed gocross-wrapper.sh var wrapperScript []byte -func debug(format string, args ...any) { +func debugf(format string, args ...any) { debug := os.Getenv("GOCROSS_DEBUG") var ( out *os.File diff --git a/tool/gocross/gocross_test.go b/tool/gocross/gocross_test.go new file mode 100644 index 000000000..82afd268c --- /dev/null +++ b/tool/gocross/gocross_test.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "testing" + + "tailscale.com/tstest/deptest" +) + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + BadDeps: map[string]string{ + "tailscale.com/tailcfg": "circular dependency via go generate", + "tailscale.com/version": "circular dependency via go generate", + }, + }.Check(t) +} From ab2deda4b7b93eb400934e8b3a63df77b0536a0c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 29 Apr 2025 20:05:28 -0700 Subject: [PATCH 0807/1708] tsnet: add FunnelTLSConfig FunnelOption type And also validate opts for unknown types, before other side effects. Fixes #15833 Change-Id: I4cabe16c49c5b7566dcafbec59f2cd1e0c8b4b3c Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet.go | 54 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index f97598075..1880b62b1 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1101,13 +1101,33 @@ type FunnelOption interface { funnelOption() } -type funnelOnly int +type funnelOnly struct{} func (funnelOnly) funnelOption() {} // FunnelOnly configures the listener to only respond to connections from Tailscale Funnel. // The local tailnet will not be able to connect to the listener. -func FunnelOnly() FunnelOption { return funnelOnly(1) } +func FunnelOnly() FunnelOption { return funnelOnly{} } + +type funnelTLSConfig struct{ conf *tls.Config } + +func (f funnelTLSConfig) funnelOption() {} + +// FunnelTLSConfig configures the TLS configuration for [Server.ListenFunnel] +// +// This is rarely needed but can permit requiring client certificates, specific +// ciphers suites, etc. +// +// The provided conf should at least be able to get a certificate, setting +// GetCertificate, Certificates or GetConfigForClient appropriately. +// The most common configuration is to set GetCertificate to +// Server.LocalClient's GetCertificate method. +// +// Unless [FunnelOnly] is also used, the configuration is also used for +// in-tailnet connections that don't arrive over Funnel. +func FunnelTLSConfig(conf *tls.Config) FunnelOption { + return funnelTLSConfig{conf: conf} +} // ListenFunnel announces on the public internet using Tailscale Funnel. // @@ -1140,6 +1160,26 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L return nil, err } + // Process, validate opts. + lnOn := listenOnBoth + var tlsConfig *tls.Config + for _, opt := range opts { + switch v := opt.(type) { + case funnelTLSConfig: + if v.conf == nil { + return nil, errors.New("invalid nil FunnelTLSConfig") + } + tlsConfig = v.conf + case funnelOnly: + lnOn = listenOnFunnel + default: + return nil, fmt.Errorf("unknown opts FunnelOption type %T", v) + } + } + if tlsConfig == nil { + tlsConfig = &tls.Config{GetCertificate: s.getCert} + } + ctx := context.Background() st, err := s.Up(ctx) if err != nil { @@ -1177,19 +1217,11 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L } // Start a funnel listener. - lnOn := listenOnBoth - for _, opt := range opts { - if _, ok := opt.(funnelOnly); ok { - lnOn = listenOnFunnel - } - } ln, err := s.listen(network, addr, lnOn) if err != nil { return nil, err } - return tls.NewListener(ln, &tls.Config{ - GetCertificate: s.getCert, - }), nil + return tls.NewListener(ln, tlsConfig), nil } type listenOn string From 080387558c6d7654ac6d7a694edc73c32b10b2cb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 30 Apr 2025 13:31:35 -0700 Subject: [PATCH 0808/1708] wgengine/magicsock: start to make disco reception Geneve aware (#15832) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 64 +++++++++-- wgengine/magicsock/magicsock_test.go | 162 +++++++++++++++++++++++++++ 2 files changed, 215 insertions(+), 11 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 31bf66b2b..28ad06d2a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -9,6 +9,7 @@ import ( "bufio" "bytes" "context" + "encoding/binary" "errors" "expvar" "fmt" @@ -1707,6 +1708,45 @@ const ( discoRXPathRawSocket discoRXPath = "raw socket" ) +const discoHeaderLen = len(disco.Magic) + key.DiscoPublicRawLen + +// isDiscoMaybeGeneve reports whether msg is a Tailscale Disco protocol +// message, and if true, whether it is encapsulated by a Geneve header. +// +// isGeneveEncap is only relevant when isDiscoMsg is true. +// +// Naked Disco, Geneve followed by Disco, and naked WireGuard can be confidently +// distinguished based on the following: +// 1. [disco.Magic] is sufficiently non-overlapping with a Geneve protocol +// field value of [packet.GeneveProtocolDisco]. +// 2. [disco.Magic] is sufficiently non-overlapping with the first 4 bytes of +// a WireGuard packet. +// 3. [packet.GeneveHeader] with a Geneve protocol field value of +// [packet.GeneveProtocolDisco] is sufficiently non-overlapping with the +// first 4 bytes of a WireGuard packet. +func isDiscoMaybeGeneve(msg []byte) (isDiscoMsg bool, isGeneveEncap bool) { + if len(msg) < discoHeaderLen { + return false, false + } + if string(msg[:len(disco.Magic)]) == disco.Magic { + return true, false + } + if len(msg) < packet.GeneveFixedHeaderLength+discoHeaderLen { + return false, false + } + if msg[0]&0xC0 != 0 || // version bits that we always transmit as 0s + msg[1]&0x3F != 0 || // reserved bits that we always transmit as 0s + binary.BigEndian.Uint16(msg[2:4]) != packet.GeneveProtocolDisco || + msg[7] != 0 { // reserved byte that we always transmit as 0 + return false, false + } + msg = msg[packet.GeneveFixedHeaderLength:] + if string(msg[:len(disco.Magic)]) == disco.Magic { + return true, true + } + return false, false +} + // handleDiscoMessage handles a discovery message and reports whether // msg was a Tailscale inter-node discovery message. // @@ -1722,18 +1762,16 @@ const ( // it was received from at the DERP layer. derpNodeSrc is zero when received // over UDP. func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc key.NodePublic, via discoRXPath) (isDiscoMsg bool) { - const headerLen = len(disco.Magic) + key.DiscoPublicRawLen - if len(msg) < headerLen || string(msg[:len(disco.Magic)]) != disco.Magic { - return false + isDiscoMsg, isGeneveEncap := isDiscoMaybeGeneve(msg) + if !isDiscoMsg { + return + } + if isGeneveEncap { + // TODO(jwhited): decode Geneve header + msg = msg[packet.GeneveFixedHeaderLength:] } - // If the first four parts are the prefix of disco.Magic - // (0x5453f09f) then it's definitely not a valid WireGuard - // packet (which starts with little-endian uint32 1, 2, 3, 4). - // Use naked returns for all following paths. - isDiscoMsg = true - - sender := key.DiscoPublicFromRaw32(mem.B(msg[len(disco.Magic):headerLen])) + sender := key.DiscoPublicFromRaw32(mem.B(msg[len(disco.Magic):discoHeaderLen])) c.mu.Lock() defer c.mu.Unlock() @@ -1751,6 +1789,10 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } if !c.peerMap.knownPeerDiscoKey(sender) { + // Geneve encapsulated disco used for udp relay handshakes are not known + // "peer" keys as they are dynamically discovered by UDP relay endpoint + // allocation or [disco.CallMeMaybeVia] reception. + // TODO(jwhited): handle relay handshake messsages instead of early return metricRecvDiscoBadPeer.Add(1) if debugDisco() { c.logf("magicsock: disco: ignoring disco-looking frame, don't know of key %v", sender.ShortString()) @@ -1774,7 +1816,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke di := c.discoInfoLocked(sender) - sealedBox := msg[headerLen:] + sealedBox := msg[discoHeaderLen:] payload, ok := di.sharedKey.Open(sealedBox) if !ok { // This might be have been intended for a previous diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index f50f21f56..1a899ea22 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3155,3 +3155,165 @@ func TestNetworkDownSendErrors(t *testing.T) { t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) } } + +func Test_isDiscoMaybeGeneve(t *testing.T) { + discoPub := key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 30: 30, 31: 31})) + nakedDisco := make([]byte, 0, 512) + nakedDisco = append(nakedDisco, disco.Magic...) + nakedDisco = discoPub.AppendTo(nakedDisco) + + geneveEncapDisco := make([]byte, packet.GeneveFixedHeaderLength+len(nakedDisco)) + gh := packet.GeneveHeader{ + Version: 0, + Protocol: packet.GeneveProtocolDisco, + VNI: 1, + Control: true, + } + err := gh.Encode(geneveEncapDisco) + if err != nil { + t.Fatal(err) + } + copy(geneveEncapDisco[packet.GeneveFixedHeaderLength:], nakedDisco) + + nakedWireGuardInitiation := make([]byte, len(geneveEncapDisco)) + binary.LittleEndian.PutUint32(nakedWireGuardInitiation, device.MessageInitiationType) + nakedWireGuardResponse := make([]byte, len(geneveEncapDisco)) + binary.LittleEndian.PutUint32(nakedWireGuardResponse, device.MessageResponseType) + nakedWireGuardCookieReply := make([]byte, len(geneveEncapDisco)) + binary.LittleEndian.PutUint32(nakedWireGuardCookieReply, device.MessageCookieReplyType) + nakedWireGuardTransport := make([]byte, len(geneveEncapDisco)) + binary.LittleEndian.PutUint32(nakedWireGuardTransport, device.MessageTransportType) + + geneveEncapWireGuard := make([]byte, packet.GeneveFixedHeaderLength+len(nakedWireGuardInitiation)) + gh = packet.GeneveHeader{ + Version: 0, + Protocol: packet.GeneveProtocolWireGuard, + VNI: 1, + Control: true, + } + err = gh.Encode(geneveEncapWireGuard) + if err != nil { + t.Fatal(err) + } + copy(geneveEncapWireGuard[packet.GeneveFixedHeaderLength:], nakedWireGuardInitiation) + + geneveEncapDiscoNonZeroGeneveVersion := make([]byte, packet.GeneveFixedHeaderLength+len(nakedDisco)) + gh = packet.GeneveHeader{ + Version: 1, + Protocol: packet.GeneveProtocolDisco, + VNI: 1, + Control: true, + } + err = gh.Encode(geneveEncapDiscoNonZeroGeneveVersion) + if err != nil { + t.Fatal(err) + } + copy(geneveEncapDiscoNonZeroGeneveVersion[packet.GeneveFixedHeaderLength:], nakedDisco) + + geneveEncapDiscoNonZeroGeneveReservedBits := make([]byte, packet.GeneveFixedHeaderLength+len(nakedDisco)) + gh = packet.GeneveHeader{ + Version: 0, + Protocol: packet.GeneveProtocolDisco, + VNI: 1, + Control: true, + } + err = gh.Encode(geneveEncapDiscoNonZeroGeneveReservedBits) + if err != nil { + t.Fatal(err) + } + geneveEncapDiscoNonZeroGeneveReservedBits[1] |= 0x3F + copy(geneveEncapDiscoNonZeroGeneveReservedBits[packet.GeneveFixedHeaderLength:], nakedDisco) + + geneveEncapDiscoNonZeroGeneveVNILSB := make([]byte, packet.GeneveFixedHeaderLength+len(nakedDisco)) + gh = packet.GeneveHeader{ + Version: 0, + Protocol: packet.GeneveProtocolDisco, + VNI: 1, + Control: true, + } + err = gh.Encode(geneveEncapDiscoNonZeroGeneveVNILSB) + if err != nil { + t.Fatal(err) + } + geneveEncapDiscoNonZeroGeneveVNILSB[7] |= 0xFF + copy(geneveEncapDiscoNonZeroGeneveVNILSB[packet.GeneveFixedHeaderLength:], nakedDisco) + + tests := []struct { + name string + msg []byte + wantIsDiscoMsg bool + wantIsGeneveEncap bool + }{ + { + name: "naked disco", + msg: nakedDisco, + wantIsDiscoMsg: true, + wantIsGeneveEncap: false, + }, + { + name: "geneve encap disco", + msg: geneveEncapDisco, + wantIsDiscoMsg: true, + wantIsGeneveEncap: true, + }, + { + name: "geneve encap disco nonzero geneve version", + msg: geneveEncapDiscoNonZeroGeneveVersion, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "geneve encap disco nonzero geneve reserved bits", + msg: geneveEncapDiscoNonZeroGeneveReservedBits, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "geneve encap disco nonzero geneve vni lsb", + msg: geneveEncapDiscoNonZeroGeneveVNILSB, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "geneve encap wireguard", + msg: geneveEncapWireGuard, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "naked WireGuard Initiation type", + msg: nakedWireGuardInitiation, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "naked WireGuard Response type", + msg: nakedWireGuardResponse, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "naked WireGuard Cookie Reply type", + msg: nakedWireGuardCookieReply, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + { + name: "naked WireGuard Transport type", + msg: nakedWireGuardTransport, + wantIsDiscoMsg: false, + wantIsGeneveEncap: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotIsDiscoMsg, gotIsGeneveEncap := isDiscoMaybeGeneve(tt.msg) + if gotIsDiscoMsg != tt.wantIsDiscoMsg { + t.Errorf("isDiscoMaybeGeneve() gotIsDiscoMsg = %v, want %v", gotIsDiscoMsg, tt.wantIsDiscoMsg) + } + if gotIsGeneveEncap != tt.wantIsGeneveEncap { + t.Errorf("isDiscoMaybeGeneve() gotIsGeneveEncap = %v, want %v", gotIsGeneveEncap, tt.wantIsGeneveEncap) + } + }) + } +} From ac04338a0d7f6e70f1a7be9578a3857ecfe75266 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 30 Apr 2025 19:07:31 -0700 Subject: [PATCH 0809/1708] wgengine/magicsock: fix discoInfo leak (#15845) Conn.sendDiscoMessage() now verifies if the destination disco key is associated with any known peer(s) in a thread-safe manner. Updates #15844 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 47 +++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 28ad06d2a..471d04e98 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1625,6 +1625,25 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey ke c.mu.Unlock() return false, errConnClosed } + var di *discoInfo + switch { + case c.peerMap.knownPeerDiscoKey(dstDisco): + di = c.discoInfoForKnownPeerLocked(dstDisco) + case isRelayHandshakeMsg: + // TODO(jwhited): consider caching relay server disco shared keys + di = &discoInfo{ + sharedKey: c.discoPrivate.Shared(dstDisco), + } + default: + // This is an attempt to send to an unknown peer that is not a relay + // server. This can happen when a call to the current function, which is + // often via a new goroutine, races with applying a change in the + // netmap, e.g. the associated peer(s) for dstDisco goes away. + c.mu.Unlock() + return false, errors.New("unknown peer") + } + c.mu.Unlock() + pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. if geneveVNI != nil { gh := packet.GeneveHeader{ @@ -1641,23 +1660,6 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey ke } pkt = append(pkt, disco.Magic...) pkt = c.discoPublic.AppendTo(pkt) - var di *discoInfo - if !isRelayHandshakeMsg { - di = c.discoInfoLocked(dstDisco) - } else { - // c.discoInfoLocked() caches [*discoInfo] for dstDisco. It assumes that - // dstDisco is a known Tailscale peer, and will be cleaned around - // network map changes. In the case of a relay handshake message, - // dstDisco belongs to a relay server with a disco key that is - // discovered at endpoint allocation time or [disco.CallMeMaybeVia] - // reception time. There is no clear ending to its lifetime, so we - // can't cache with the same strategy. Instead, generate the shared - // key on the fly for now. - di = &discoInfo{ - sharedKey: c.discoPrivate.Shared(dstDisco), - } - } - c.mu.Unlock() if isDERP { metricSendDiscoDERP.Add(1) @@ -1814,7 +1816,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke // // From here on, peerNode and de are non-nil. - di := c.discoInfoLocked(sender) + di := c.discoInfoForKnownPeerLocked(sender) sealedBox := msg[discoHeaderLen:] payload, ok := di.sharedKey.Open(sealedBox) @@ -2076,10 +2078,15 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { } } -// discoInfoLocked returns the previous or new discoInfo for k. +// discoInfoForKnownPeerLocked returns the previous or new discoInfo for k. +// +// Callers must only pass key.DiscoPublic's that are present in and +// lifetime-managed via [Conn].peerMap. UDP relay server disco keys are discovered +// at relay endpoint allocation time or [disco.CallMeMaybeVia] reception time +// and therefore must never pass through this method. // // c.mu must be held. -func (c *Conn) discoInfoLocked(k key.DiscoPublic) *discoInfo { +func (c *Conn) discoInfoForKnownPeerLocked(k key.DiscoPublic) *discoInfo { di, ok := c.discoInfo[k] if !ok { di = &discoInfo{ From c09cd34f59ba58127b1e841b863d70da4d552246 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 30 Apr 2025 14:42:11 -0700 Subject: [PATCH 0810/1708] ipn/ipnlocal: fix Taildrop deadlock This fixes the Taildrop deadlock from 8b72dd787320. Fixes #15824 Change-Id: I5ca583de20dd0d0b513ce546439dc632408ca1f1 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 95fe22641..b16906e71 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1463,15 +1463,30 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { return b.currentNode().PeerCaps(src) } +// AppendMatchingPeers returns base with all peers that match pred appended. +// +// It acquires b.mu to read the netmap but releases it before calling pred. func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + var peers []tailcfg.NodeView + b.mu.Lock() - defer b.mu.Unlock() - ret := base - if b.netMap == nil { - return ret + if b.netMap != nil { + // All fields on b.netMap are immutable, so this is + // safe to copy and use outside the lock. + peers = b.netMap.Peers } - for _, peer := range b.netMap.Peers { - if pred(peer) { + b.mu.Unlock() + + ret := base + for _, peer := range peers { + // The peers in b.netMap don't contain updates made via + // UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID, + // and then look up the latest copy in b.peers which is updated in + // response to UpdateNetmapDelta edits. + b.mu.Lock() + peer, ok := b.peers[peer.ID()] + b.mu.Unlock() + if ok && pred(peer) { ret = append(ret, peer) } } From fe0090909b3ab7aa583093073445fde47fd1a818 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 29 Apr 2025 14:57:27 +0100 Subject: [PATCH 0811/1708] cmd/tailscale/cli: unhide `--posture-checking` flag to `set` Updates #5902 Signed-off-by: Anton Tolchanov --- cmd/tailscale/cli/set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 37db252ad..f4ea674ec 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -83,7 +83,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.advertiseConnector, "advertise-connector", false, "offer to be an app connector for domain specific internet traffic for the tailnet") setf.BoolVar(&setArgs.updateCheck, "update-check", true, "notify about available Tailscale updates") setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") - setf.BoolVar(&setArgs.postureChecking, "posture-checking", false, hidden+"allow management plane to gather device posture information") + setf.BoolVar(&setArgs.postureChecking, "posture-checking", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") From e05e620096adec4e866dd05d869d887d47c8bdf2 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Thu, 1 May 2025 12:12:36 -0400 Subject: [PATCH 0812/1708] util/linuxfw: fix delete snat rule (#15763) * util/linuxfw: fix delete snat rule This pr is fixing the bug that in nftables mode setting snat-subnet-routes=false doesn't delete the masq rule in nat table. Updates #15661 Signed-off-by: Kevin Liang * change index arithmetic in test to chunk Signed-off-by: Kevin Liang * reuse rule creation function in rule delete Signed-off-by: Kevin Liang * add test for deleting the masq rule Signed-off-by: Kevin Liang --------- Signed-off-by: Kevin Liang --- util/linuxfw/nftables_runner.go | 64 ++++++++---------- util/linuxfw/nftables_runner_test.go | 98 ++++++++++++++++++++-------- 2 files changed, 98 insertions(+), 64 deletions(-) diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index 0f411521b..b87298c61 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -1710,55 +1710,43 @@ func (n *nftablesRunner) AddSNATRule() error { return nil } +func delMatchSubnetRouteMarkMasqRule(conn *nftables.Conn, table *nftables.Table, chain *nftables.Chain) error { + + rule, err := createMatchSubnetRouteMarkRule(table, chain, Masq) + if err != nil { + return fmt.Errorf("create match subnet route mark rule: %w", err) + } + + SNATRule, err := findRule(conn, rule) + if err != nil { + return fmt.Errorf("find SNAT rule v4: %w", err) + } + + if SNATRule != nil { + _ = conn.DelRule(SNATRule) + } + + if err := conn.Flush(); err != nil { + return fmt.Errorf("flush del SNAT rule: %w", err) + } + + return nil +} + // DelSNATRule removes the netfilter rule to SNAT traffic destined for // local subnets. An error is returned if the rule does not exist. func (n *nftablesRunner) DelSNATRule() error { conn := n.conn - hexTSFwmarkMask := getTailscaleFwmarkMask() - hexTSSubnetRouteMark := getTailscaleSubnetRouteMark() - - exprs := []expr.Any{ - &expr.Meta{Key: expr.MetaKeyMARK, Register: 1}, - &expr.Bitwise{ - SourceRegister: 1, - DestRegister: 1, - Len: 4, - Mask: hexTSFwmarkMask, - }, - &expr.Cmp{ - Op: expr.CmpOpEq, - Register: 1, - Data: hexTSSubnetRouteMark, - }, - &expr.Counter{}, - &expr.Masq{}, - } - for _, table := range n.getTables() { chain, err := getChainFromTable(conn, table.Nat, chainNamePostrouting) if err != nil { - return fmt.Errorf("get postrouting chain v4: %w", err) - } - - rule := &nftables.Rule{ - Table: table.Nat, - Chain: chain, - Exprs: exprs, + return fmt.Errorf("get postrouting chain: %w", err) } - - SNATRule, err := findRule(conn, rule) + err = delMatchSubnetRouteMarkMasqRule(conn, table.Nat, chain) if err != nil { - return fmt.Errorf("find SNAT rule v4: %w", err) + return err } - - if SNATRule != nil { - _ = conn.DelRule(SNATRule) - } - } - - if err := conn.Flush(); err != nil { - return fmt.Errorf("flush del SNAT rule: %w", err) } return nil diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index 712a7b939..6fb180ed6 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -12,6 +12,7 @@ import ( "net/netip" "os" "runtime" + "slices" "strings" "testing" @@ -24,21 +25,21 @@ import ( "tailscale.com/types/logger" ) +func toAnySlice[T any](s []T) []any { + out := make([]any, len(s)) + for i, v := range s { + out[i] = v + } + return out +} + // nfdump returns a hexdump of 4 bytes per line (like nft --debug=all), allowing // users to make sense of large byte literals more easily. func nfdump(b []byte) string { var buf bytes.Buffer - i := 0 - for ; i < len(b); i += 4 { - // TODO: show printable characters as ASCII - fmt.Fprintf(&buf, "%02x %02x %02x %02x\n", - b[i], - b[i+1], - b[i+2], - b[i+3]) - } - for ; i < len(b); i++ { - fmt.Fprintf(&buf, "%02x ", b[i]) + for c := range slices.Chunk(b, 4) { + format := strings.Repeat("%02x ", len(c)) + fmt.Fprintf(&buf, format+"\n", toAnySlice(c)...) } return buf.String() } @@ -75,7 +76,7 @@ func linediff(a, b string) string { return buf.String() } -func newTestConn(t *testing.T, want [][]byte) *nftables.Conn { +func newTestConn(t *testing.T, want [][]byte, reply [][]netlink.Message) *nftables.Conn { conn, err := nftables.New(nftables.WithTestDial( func(req []netlink.Message) ([]netlink.Message, error) { for idx, msg := range req { @@ -96,7 +97,13 @@ func newTestConn(t *testing.T, want [][]byte) *nftables.Conn { } want = want[1:] } - return req, nil + // no reply for batch end message + if len(want) == 0 { + return nil, nil + } + rep := reply[0] + reply = reply[1:] + return rep, nil })) if err != nil { t.Fatal(err) @@ -120,7 +127,7 @@ func TestInsertHookRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -160,7 +167,7 @@ func TestInsertLoopbackRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -196,7 +203,7 @@ func TestInsertLoopbackRuleV6(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) tableV6 := testConn.AddTable(&nftables.Table{ Family: protoV6, Name: "ts-filter-test", @@ -232,7 +239,7 @@ func TestAddReturnChromeOSVMRangeRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -264,7 +271,7 @@ func TestAddDropCGNATRangeRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -296,7 +303,7 @@ func TestAddSetSubnetRouteMarkRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -328,7 +335,7 @@ func TestAddDropOutgoingPacketFromCGNATRangeRuleWithTunname(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -360,7 +367,7 @@ func TestAddAcceptOutgoingPacketRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -392,7 +399,7 @@ func TestAddAcceptIncomingPacketRule(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", @@ -420,11 +427,11 @@ func TestAddMatchSubnetRouteMarkRuleMasq(t *testing.T) { // nft add chain ip ts-nat-test ts-postrouting-test { type nat hook postrouting priority 100; } []byte("\x02\x00\x00\x00\x10\x00\x01\x00\x74\x73\x2d\x6e\x61\x74\x2d\x74\x65\x73\x74\x00\x18\x00\x03\x00\x74\x73\x2d\x70\x6f\x73\x74\x72\x6f\x75\x74\x69\x6e\x67\x2d\x74\x65\x73\x74\x00\x14\x00\x04\x80\x08\x00\x01\x00\x00\x00\x00\x04\x08\x00\x02\x00\x00\x00\x00\x64\x08\x00\x07\x00\x6e\x61\x74\x00"), // nft add rule ip ts-nat-test ts-postrouting-test meta mark & 0x00ff0000 == 0x00040000 counter masquerade - []byte("\x02\x00\x00\x00\x10\x00\x01\x00\x74\x73\x2d\x6e\x61\x74\x2d\x74\x65\x73\x74\x00\x18\x00\x02\x00\x74\x73\x2d\x70\x6f\x73\x74\x72\x6f\x75\x74\x69\x6e\x67\x2d\x74\x65\x73\x74\x00\xf4\x00\x04\x80\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x00\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x04\x00\x00\x2c\x00\x01\x80\x0c\x00\x01\x00\x63\x6f\x75\x6e\x74\x65\x72\x00\x1c\x00\x02\x80\x0c\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x00\x01\x80\x0e\x00\x01\x00\x69\x6d\x6d\x65\x64\x69\x61\x74\x65\x00\x00\x00\x1c\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x00\x10\x00\x02\x80\x0c\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01"), + []byte("\x02\x00\x00\x00\x10\x00\x01\x00\x74\x73\x2d\x6e\x61\x74\x2d\x74\x65\x73\x74\x00\x18\x00\x02\x00\x74\x73\x2d\x70\x6f\x73\x74\x72\x6f\x75\x74\x69\x6e\x67\x2d\x74\x65\x73\x74\x00\xd8\x00\x04\x80\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x00\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x04\x00\x00\x2c\x00\x01\x80\x0c\x00\x01\x00\x63\x6f\x75\x6e\x74\x65\x72\x00\x1c\x00\x02\x80\x0c\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x00\x01\x80\x09\x00\x01\x00\x6d\x61\x73\x71\x00\x00\x00\x00\x04\x00\x02\x80"), // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-nat-test", @@ -436,7 +443,46 @@ func TestAddMatchSubnetRouteMarkRuleMasq(t *testing.T) { Hooknum: nftables.ChainHookPostrouting, Priority: nftables.ChainPriorityNATSource, }) - err := addMatchSubnetRouteMarkRule(testConn, table, chain, Accept) + err := addMatchSubnetRouteMarkRule(testConn, table, chain, Masq) + if err != nil { + t.Fatal(err) + } +} + +func TestDelMatchSubnetRouteMarkMasqRule(t *testing.T) { + proto := nftables.TableFamilyIPv4 + reply := [][]netlink.Message{ + nil, + {{Header: netlink.Header{Length: 0x128, Type: 0xa06, Flags: 0x802, Sequence: 0xa213d55d, PID: 0x11e79}, Data: []uint8{0x2, 0x0, 0x0, 0x8c, 0xd, 0x0, 0x1, 0x0, 0x6e, 0x61, 0x74, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x0, 0x0, 0x0, 0x0, 0x18, 0x0, 0x2, 0x0, 0x74, 0x73, 0x2d, 0x70, 0x6f, 0x73, 0x74, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x0, 0xc, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0xe0, 0x0, 0x4, 0x0, 0x24, 0x0, 0x1, 0x0, 0x9, 0x0, 0x1, 0x0, 0x6d, 0x65, 0x74, 0x61, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x2, 0x0, 0x8, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x3, 0x8, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x4c, 0x0, 0x1, 0x0, 0xc, 0x0, 0x1, 0x0, 0x62, 0x69, 0x74, 0x77, 0x69, 0x73, 0x65, 0x0, 0x3c, 0x0, 0x2, 0x0, 0x8, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x8, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x1, 0x8, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x4, 0x8, 0x0, 0x6, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x0, 0x4, 0x0, 0x8, 0x0, 0x1, 0x0, 0x0, 0xff, 0x0, 0x0, 0xc, 0x0, 0x5, 0x0, 0x8, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2c, 0x0, 0x1, 0x0, 0x8, 0x0, 0x1, 0x0, 0x63, 0x6d, 0x70, 0x0, 0x20, 0x0, 0x2, 0x0, 0x8, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x8, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x0, 0x3, 0x0, 0x8, 0x0, 0x1, 0x0, 0x0, 0x4, 0x0, 0x0, 0x2c, 0x0, 0x1, 0x0, 0xc, 0x0, 0x1, 0x0, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x0, 0x1c, 0x0, 0x2, 0x0, 0xc, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x1, 0x0, 0x9, 0x0, 0x1, 0x0, 0x6d, 0x61, 0x73, 0x71, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x2, 0x0}}}, + {{Header: netlink.Header{Length: 0x14, Type: 0x3, Flags: 0x2, Sequence: 0x311fdccb, PID: 0x11e79}, Data: []uint8{0x0, 0x0, 0x0, 0x0}}}, + {{Header: netlink.Header{Length: 0x24, Type: 0x2, Flags: 0x100, Sequence: 0x311fdccb, PID: 0x11e79}, Data: []uint8{0x0, 0x0, 0x0, 0x0, 0x48, 0x0, 0x0, 0x0, 0x8, 0xa, 0x5, 0x0, 0xcb, 0xdc, 0x1f, 0x31, 0x79, 0x1e, 0x1, 0x0}}}, + } + want := [][]byte{ + // get rules in nat-test table ts-postrouting-test chain + []byte("\x02\x00\x00\x00\x0d\x00\x01\x00\x6e\x61\x74\x2d\x74\x65\x73\x74\x00\x00\x00\x00\x18\x00\x02\x00\x74\x73\x2d\x70\x6f\x73\x74\x72\x6f\x75\x74\x69\x6e\x67\x2d\x74\x65\x73\x74\x00"), + // batch begin + []byte("\x00\x00\x00\x0a"), + // nft delete rule ip nat-test ts-postrouting-test handle 4 + []byte("\x02\x00\x00\x00\x0d\x00\x01\x00\x6e\x61\x74\x2d\x74\x65\x73\x74\x00\x00\x00\x00\x18\x00\x02\x00\x74\x73\x2d\x70\x6f\x73\x74\x72\x6f\x75\x74\x69\x6e\x67\x2d\x74\x65\x73\x74\x00\x0c\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x04"), + // batch end + []byte("\x00\x00\x00\x0a"), + } + + conn := newTestConn(t, want, reply) + + table := &nftables.Table{ + Family: proto, + Name: "nat-test", + } + chain := &nftables.Chain{ + Name: "ts-postrouting-test", + Table: table, + Type: nftables.ChainTypeNAT, + Hooknum: nftables.ChainHookPostrouting, + Priority: nftables.ChainPriorityNATSource, + } + + err := delMatchSubnetRouteMarkMasqRule(conn, table, chain) if err != nil { t.Fatal(err) } @@ -456,7 +502,7 @@ func TestAddMatchSubnetRouteMarkRuleAccept(t *testing.T) { // batch end []byte("\x00\x00\x00\x0a"), } - testConn := newTestConn(t, want) + testConn := newTestConn(t, want, nil) table := testConn.AddTable(&nftables.Table{ Family: proto, Name: "ts-filter-test", From a0d7c81a27ae037ab5bd0c6441f4677cf9e75e3a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 30 Apr 2025 21:23:43 -0700 Subject: [PATCH 0813/1708] ipn/ipnlocal: fix Taildrop regression from refactoring This fixes a refactoring bug introduced in 8b72dd7873201 Tests (that failed on this) are coming in a separate change. Updates #15812 Change-Id: Ibbf461b4eaefe22ad3005fc243d0a918e8af8981 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/taildrop.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/taildrop.go b/ipn/ipnlocal/taildrop.go index 17ca40926..d8113d219 100644 --- a/ipn/ipnlocal/taildrop.go +++ b/ipn/ipnlocal/taildrop.go @@ -194,8 +194,8 @@ func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { if !p.Valid() || p.Hostinfo().OS() == "tvOS" { return false } - if self != p.User() { - return false + if self == p.User() { + return true } if p.Addresses().Len() != 0 && cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { // Explicitly noted in the netmap ACL caps as a target. From e415f51351ac7cbc33a5aef78967017dc952d258 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 28 Apr 2025 19:57:01 -0700 Subject: [PATCH 0814/1708] feature/taildrop: add integration test Taildrop has never had an end-to-end test since it was introduced. This adds a basic one. It caught two recent refactoring bugs & one from 2022 (0f7da5c7dc0). This is prep for moving the rest of Taildrop out of LocalBackend, so we can do more refactorings with some confidence. Updates #15812 Change-Id: I6182e49c5641238af0bfdd9fea1ef0420c112738 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 16 +- feature/taildrop/integration_test.go | 170 ++++++++++++++++++ feature/taildrop/localapi.go | 18 +- tstest/integration/integration.go | 28 ++- tstest/integration/integration_test.go | 13 +- tstest/integration/testcontrol/testcontrol.go | 77 ++++---- 6 files changed, 272 insertions(+), 50 deletions(-) create mode 100644 feature/taildrop/integration_test.go diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 1c5236123..4b0dc95f9 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -573,7 +573,7 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) } - go runDebugServer(debugMux, args.debug) + go runDebugServer(logf, debugMux, args.debug) } ns, err := newNetstack(logf, sys) @@ -819,12 +819,20 @@ func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { clientmetric.WritePrometheusExpositionFormat(w) } -func runDebugServer(mux *http.ServeMux, addr string) { +func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { + ln, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("debug server: %v", err) + } + if strings.HasSuffix(addr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + logf("DEBUG-ADDR=%v", ln.Addr()) + } srv := &http.Server{ - Addr: addr, Handler: mux, } - if err := srv.ListenAndServe(); err != nil { + if err := srv.Serve(ln); err != nil { log.Fatal(err) } } diff --git a/feature/taildrop/integration_test.go b/feature/taildrop/integration_test.go new file mode 100644 index 000000000..46768bb31 --- /dev/null +++ b/feature/taildrop/integration_test.go @@ -0,0 +1,170 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "testing" + "time" + + "tailscale.com/client/local" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/tstest/integration" + "tailscale.com/tstest/integration/testcontrol" +) + +// TODO(bradfitz): add test where control doesn't send tailcfg.CapabilityFileSharing +// and verify that we get the "file sharing not enabled by Tailscale admin" error. + +// TODO(bradfitz): add test between different users with the peercap to permit that? + +func TestTaildropIntegration(t *testing.T) { + tstest.Parallel(t) + controlOpt := integration.ConfigureControl(func(s *testcontrol.Server) { + s.AllNodesSameUser = true // required for Taildrop + }) + env := integration.NewTestEnv(t, controlOpt) + + // Create two nodes: + n1 := integration.NewTestNode(t, env) + d1 := n1.StartDaemon() + + n2 := integration.NewTestNode(t, env) + d2 := n2.StartDaemon() + + n1.AwaitListening() + t.Logf("n1 is listening") + n2.AwaitListening() + t.Logf("n2 is listening") + n1.MustUp() + t.Logf("n1 is up") + n2.MustUp() + t.Logf("n2 is up") + n1.AwaitRunning() + t.Logf("n1 is running") + n2.AwaitRunning() + t.Logf("n2 is running") + + var peerStableID tailcfg.StableNodeID + + if err := tstest.WaitFor(5*time.Second, func() error { + st := n1.MustStatus() + if len(st.Peer) == 0 { + return errors.New("no peers") + } + if len(st.Peer) > 1 { + return fmt.Errorf("got %d peers; want 1", len(st.Peer)) + } + peer := st.Peer[st.Peers()[0]] + peerStableID = peer.ID + if peer.ID == st.Self.ID { + return errors.New("peer is self") + } + + if len(st.TailscaleIPs) == 0 { + return errors.New("no Tailscale IPs") + } + + return nil + }); err != nil { + t.Fatal(err) + } + + const timeout = 30 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + c1 := n1.LocalClient() + c2 := n2.LocalClient() + + wantNoWaitingFiles := func(c *local.Client) { + t.Helper() + files, err := c.WaitingFiles(ctx) + if err != nil { + t.Fatalf("WaitingFiles: %v", err) + } + if len(files) != 0 { + t.Fatalf("WaitingFiles: got %d files; want 0", len(files)) + } + } + + // Verify c2 has no files. + wantNoWaitingFiles(c2) + + gotFile := make(chan bool, 1) + go func() { + v, err := c2.AwaitWaitingFiles(t.Context(), timeout) + if err != nil { + return + } + if len(v) != 0 { + gotFile <- true + } + }() + + fileContents := []byte("hello world this is a file") + + n2ID := n2.MustStatus().Self.ID + t.Logf("n2 self.ID = %q; n1's peer[0].ID = %q", n2ID, peerStableID) + t.Logf("Doing PushFile ...") + err := c1.PushFile(ctx, n2.MustStatus().Self.ID, int64(len(fileContents)), "test.txt", bytes.NewReader(fileContents)) + if err != nil { + t.Fatalf("PushFile from n1->n2: %v", err) + } + t.Logf("PushFile done") + + select { + case <-gotFile: + t.Logf("n2 saw AwaitWaitingFiles wake up") + case <-ctx.Done(): + t.Fatalf("n2 timeout waiting for AwaitWaitingFiles") + } + + files, err := c2.WaitingFiles(ctx) + if err != nil { + t.Fatalf("c2.WaitingFiles: %v", err) + } + if len(files) != 1 { + t.Fatalf("c2.WaitingFiles: got %d files; want 1", len(files)) + } + got := files[0] + want := apitype.WaitingFile{ + Name: "test.txt", + Size: int64(len(fileContents)), + } + if got != want { + t.Fatalf("c2.WaitingFiles: got %+v; want %+v", got, want) + } + + // Download the file. + rc, size, err := c2.GetWaitingFile(ctx, got.Name) + if err != nil { + t.Fatalf("c2.GetWaitingFile: %v", err) + } + if size != int64(len(fileContents)) { + t.Fatalf("c2.GetWaitingFile: got size %d; want %d", size, len(fileContents)) + } + gotBytes, err := io.ReadAll(rc) + if err != nil { + t.Fatalf("c2.GetWaitingFile: %v", err) + } + if !bytes.Equal(gotBytes, fileContents) { + t.Fatalf("c2.GetWaitingFile: got %q; want %q", gotBytes, fileContents) + } + + // Now delete it. + if err := c2.DeleteWaitingFile(ctx, got.Name); err != nil { + t.Fatalf("c2.DeleteWaitingFile: %v", err) + } + wantNoWaitingFiles(c2) + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} diff --git a/feature/taildrop/localapi.go b/feature/taildrop/localapi.go index ce812514e..067a51f91 100644 --- a/feature/taildrop/localapi.go +++ b/feature/taildrop/localapi.go @@ -365,6 +365,7 @@ func serveFiles(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { return } ctx := r.Context() + var wfs []apitype.WaitingFile if s := r.FormValue("waitsec"); s != "" && s != "0" { d, err := strconv.Atoi(s) if err != nil { @@ -375,11 +376,18 @@ func serveFiles(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { var cancel context.CancelFunc ctx, cancel = context.WithDeadline(ctx, deadline) defer cancel() - } - wfs, err := lb.AwaitWaitingFiles(ctx) - if err != nil && ctx.Err() == nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + wfs, err = lb.AwaitWaitingFiles(ctx) + if err != nil && ctx.Err() == nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } else { + var err error + wfs, err = lb.WaitingFiles() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(wfs) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 743a0382c..2cde76b65 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -33,6 +33,7 @@ import ( "time" "go4.org/mem" + "tailscale.com/client/local" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/ipn" @@ -436,6 +437,7 @@ func NewTestEnv(t testing.TB, opts ...TestEnvOpt) *TestEnv { derpMap := RunDERPAndSTUN(t, logger.Discard, "127.0.0.1") logc := new(LogCatcher) control := &testcontrol.Server{ + Logf: logger.WithPrefix(t.Logf, "testcontrol: "), DERPMap: derpMap, } control.HTTPTestServer = httptest.NewUnstartedServer(control) @@ -484,6 +486,7 @@ type TestNode struct { mu sync.Mutex onLogLine []func([]byte) + lc *local.Client } // NewTestNode allocates a temp directory for a new test node. @@ -500,14 +503,18 @@ func NewTestNode(t *testing.T, env *TestEnv) *TestNode { env: env, dir: dir, sockFile: sockFile, - stateFile: filepath.Join(dir, "tailscale.state"), + stateFile: filepath.Join(dir, "tailscaled.state"), // matches what cmd/tailscaled uses } - // Look for a data race. Once we see the start marker, start logging the rest. + // Look for a data race or panic. + // Once we see the start marker, start logging the rest. var sawRace bool var sawPanic bool n.addLogLineHook(func(line []byte) { lineB := mem.B(line) + if mem.Contains(lineB, mem.S("DEBUG-ADDR=")) { + t.Log(strings.TrimSpace(string(line))) + } if mem.Contains(lineB, mem.S("WARNING: DATA RACE")) { sawRace = true } @@ -522,6 +529,20 @@ func NewTestNode(t *testing.T, env *TestEnv) *TestNode { return n } +func (n *TestNode) LocalClient() *local.Client { + n.mu.Lock() + defer n.mu.Unlock() + if n.lc == nil { + tr := &http.Transport{} + n.lc = &local.Client{ + Socket: n.sockFile, + UseSocketOnly: true, + } + n.env.t.Cleanup(tr.CloseIdleConnections) + } + return n.lc +} + func (n *TestNode) diskPrefs() *ipn.Prefs { t := n.env.t t.Helper() @@ -668,9 +689,10 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { t := n.env.t cmd := exec.Command(n.env.daemon) cmd.Args = append(cmd.Args, - "--state="+n.stateFile, + "--statedir="+n.dir, "--socket="+n.sockFile, "--socks5-server=localhost:0", + "--debug=localhost:0", ) if *verboseTailscaled { cmd.Args = append(cmd.Args, "-verbose=2") diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 0da2e6086..7e0d1332f 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -278,15 +278,20 @@ func TestOneNodeUpAuth(t *testing.T) { t.Logf("Running up --login-server=%s ...", env.ControlURL()) cmd := n1.Tailscale("up", "--login-server="+env.ControlURL()) - var authCountAtomic int32 + var authCountAtomic atomic.Int32 cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) if env.Control.CompleteAuth(urlStr) { - atomic.AddInt32(&authCountAtomic, 1) + if authCountAtomic.Add(1) > 1 { + err := errors.New("completed multple auth URLs") + t.Error(err) + return err + } t.Logf("completed auth path %s", urlStr) return nil } err := fmt.Errorf("Failed to complete auth path to %q", urlStr) - t.Log(err) + t.Error(err) return err }} cmd.Stderr = cmd.Stdout @@ -297,7 +302,7 @@ func TestOneNodeUpAuth(t *testing.T) { n1.AwaitRunning() - if n := atomic.LoadInt32(&authCountAtomic); n != 1 { + if n := authCountAtomic.Load(); n != 1 { t.Errorf("Auth URLs completed = %d; want 1", n) } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 52b96fe4d..71205f897 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -55,6 +55,10 @@ type Server struct { MagicDNSDomain string HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + // AllNodesSameUser, if true, makes all created nodes + // belong to the same user. + AllNodesSameUser bool + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL @@ -96,9 +100,9 @@ type Server struct { logins map[key.NodePublic]*tailcfg.Login updates map[tailcfg.NodeID]chan updateType authPath map[string]*AuthPath - nodeKeyAuthed map[key.NodePublic]bool // key => true once authenticated - msgToSend map[key.NodePublic]any // value is *tailcfg.PingRequest or entire *tailcfg.MapResponse - allExpired bool // All nodes will be told their node key is expired. + nodeKeyAuthed set.Set[key.NodePublic] + msgToSend map[key.NodePublic]any // value is *tailcfg.PingRequest or entire *tailcfg.MapResponse + allExpired bool // All nodes will be told their node key is expired. } // BaseURL returns the server's base URL, without trailing slash. @@ -522,6 +526,10 @@ func (s *Server) getUser(nodeKey key.NodePublic) (*tailcfg.User, *tailcfg.Login) return u, s.logins[nodeKey] } id := tailcfg.UserID(len(s.users) + 1) + if s.AllNodesSameUser { + id = 123 + } + s.logf("Created user %v for node %s", id, nodeKey) loginName := fmt.Sprintf("user-%d@%s", id, domain) displayName := fmt.Sprintf("User %d", id) login := &tailcfg.Login{ @@ -582,10 +590,8 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { if ap.nodeKey.IsZero() { panic("zero AuthPath.NodeKey") } - if s.nodeKeyAuthed == nil { - s.nodeKeyAuthed = map[key.NodePublic]bool{} - } - s.nodeKeyAuthed[ap.nodeKey] = true + s.nodeKeyAuthed.Make() + s.nodeKeyAuthed.Add(ap.nodeKey) ap.CompleteSuccessfully() return true } @@ -645,36 +651,40 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. if s.nodes == nil { s.nodes = map[key.NodePublic]*tailcfg.Node{} } - + _, ok := s.nodes[nk] machineAuthorized := true // TODO: add Server.RequireMachineAuth + if !ok { - v4Prefix := netip.PrefixFrom(netaddr.IPv4(100, 64, uint8(tailcfg.NodeID(user.ID)>>8), uint8(tailcfg.NodeID(user.ID))), 32) - v6Prefix := netip.PrefixFrom(tsaddr.Tailscale4To6(v4Prefix.Addr()), 128) - - allowedIPs := []netip.Prefix{ - v4Prefix, - v6Prefix, - } + nodeID := len(s.nodes) + 1 + v4Prefix := netip.PrefixFrom(netaddr.IPv4(100, 64, uint8(nodeID>>8), uint8(nodeID)), 32) + v6Prefix := netip.PrefixFrom(tsaddr.Tailscale4To6(v4Prefix.Addr()), 128) - s.nodes[nk] = &tailcfg.Node{ - ID: tailcfg.NodeID(user.ID), - StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(user.ID))), - User: user.ID, - Machine: mkey, - Key: req.NodeKey, - MachineAuthorized: machineAuthorized, - Addresses: allowedIPs, - AllowedIPs: allowedIPs, - Hostinfo: req.Hostinfo.View(), - Name: req.Hostinfo.Hostname, - Capabilities: []tailcfg.NodeCapability{ - tailcfg.CapabilityHTTPS, - tailcfg.NodeAttrFunnel, - tailcfg.CapabilityFunnelPorts + "?ports=8080,443", - }, + allowedIPs := []netip.Prefix{ + v4Prefix, + v6Prefix, + } + node := &tailcfg.Node{ + ID: tailcfg.NodeID(nodeID), + StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(nodeID))), + User: user.ID, + Machine: mkey, + Key: req.NodeKey, + MachineAuthorized: machineAuthorized, + Addresses: allowedIPs, + AllowedIPs: allowedIPs, + Hostinfo: req.Hostinfo.View(), + Name: req.Hostinfo.Hostname, + Capabilities: []tailcfg.NodeCapability{ + tailcfg.CapabilityHTTPS, + tailcfg.NodeAttrFunnel, + tailcfg.CapabilityFileSharing, + tailcfg.CapabilityFunnelPorts + "?ports=8080,443", + }, + } + s.nodes[nk] = node } requireAuth := s.RequireAuth - if requireAuth && s.nodeKeyAuthed[nk] { + if requireAuth && s.nodeKeyAuthed.Contains(nk) { requireAuth = false } allExpired := s.allExpired @@ -951,7 +961,6 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, node.CapMap = nodeCapMap node.Capabilities = append(node.Capabilities, tailcfg.NodeAttrDisableUPnP) - user, _ := s.getUser(nk) t := time.Date(2020, 8, 3, 0, 0, 0, 1, time.UTC) dns := s.DNSConfig if dns != nil && s.MagicDNSDomain != "" { @@ -1013,7 +1022,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, }) res.UserProfiles = s.allUserProfiles() - v4Prefix := netip.PrefixFrom(netaddr.IPv4(100, 64, uint8(tailcfg.NodeID(user.ID)>>8), uint8(tailcfg.NodeID(user.ID))), 32) + v4Prefix := netip.PrefixFrom(netaddr.IPv4(100, 64, uint8(node.ID>>8), uint8(node.ID)), 32) v6Prefix := netip.PrefixFrom(tsaddr.Tailscale4To6(v4Prefix.Addr()), 128) res.Node.Addresses = []netip.Prefix{ From 383664b2f7b4dd70d2dadc7ff5de3bdca080ffed Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 1 May 2025 11:54:43 -0700 Subject: [PATCH 0815/1708] cmd/tsidp: remove backticks in README in shell example Fixes #15818 Change-Id: I7a6f4c7368fed74b865a63acdea4559c3d0a0d09 Signed-off-by: Brad Fitzpatrick --- cmd/tsidp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index 29ce089df..61a81e8ae 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -35,7 +35,7 @@ ```bash docker run -d \ - --name `tsidp` \ + --name tsidp \ -p 443:443 \ -e TS_AUTHKEY=YOUR_TAILSCALE_AUTHKEY \ -e TS_HOSTNAME=idp \ From f05347a5bfa9ea544d70c49c8e7f5e82dc5ced5c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 2 May 2025 09:04:18 -0700 Subject: [PATCH 0816/1708] wgengine/magicsock: implement more relay handshake disco handling (#15856) Conn.handleDiscoMessage() now makes a distinction between relay handshake disco messages and peer disco messages. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 71 +++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 16 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 471d04e98..fadef40bc 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1627,13 +1627,13 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey ke } var di *discoInfo switch { - case c.peerMap.knownPeerDiscoKey(dstDisco): - di = c.discoInfoForKnownPeerLocked(dstDisco) case isRelayHandshakeMsg: // TODO(jwhited): consider caching relay server disco shared keys di = &discoInfo{ sharedKey: c.discoPrivate.Shared(dstDisco), } + case c.peerMap.knownPeerDiscoKey(dstDisco): + di = c.discoInfoForKnownPeerLocked(dstDisco) default: // This is an attempt to send to an unknown peer that is not a relay // server. This can happen when a call to the current function, which is @@ -1768,10 +1768,22 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke if !isDiscoMsg { return } + var geneve packet.GeneveHeader if isGeneveEncap { - // TODO(jwhited): decode Geneve header + err := geneve.Decode(msg) + if err != nil { + // Decode only returns an error when 'msg' is too short, and + // 'isGeneveEncap' indicates it's a sufficient length. + c.logf("[unexpected] geneve header decoding error: %v", err) + return + } msg = msg[packet.GeneveFixedHeaderLength:] } + // The control bit should only be set for relay handshake messages + // terminating on or originating from a UDP relay server. We have yet to + // open the encrypted payload to determine the [disco.MessageType], but + // we assert it should be handshake-related. + shouldBeRelayHandshakeMsg := isGeneveEncap && geneve.Control sender := key.DiscoPublicFromRaw32(mem.B(msg[len(disco.Magic):discoHeaderLen])) @@ -1790,11 +1802,20 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke return } - if !c.peerMap.knownPeerDiscoKey(sender) { - // Geneve encapsulated disco used for udp relay handshakes are not known - // "peer" keys as they are dynamically discovered by UDP relay endpoint - // allocation or [disco.CallMeMaybeVia] reception. - // TODO(jwhited): handle relay handshake messsages instead of early return + var di *discoInfo + switch { + case shouldBeRelayHandshakeMsg: + var ok bool + di, ok = c.discoInfoForRelayHandshakeLocked(sender, geneve.VNI) + if !ok { + if debugDisco() { + c.logf("magicsock: disco: ignoring disco-looking relay handshake frame, no active handshakes with key %v over VNI %d", sender.ShortString(), geneve.VNI) + } + return + } + case c.peerMap.knownPeerDiscoKey(sender): + di = c.discoInfoForKnownPeerLocked(sender) + default: metricRecvDiscoBadPeer.Add(1) if debugDisco() { c.logf("magicsock: disco: ignoring disco-looking frame, don't know of key %v", sender.ShortString()) @@ -1803,7 +1824,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } isDERP := src.Addr() == tailcfg.DerpMagicIPAddr - if !isDERP { + if !isDERP && !shouldBeRelayHandshakeMsg { // Record receive time for UDP transport packets. pi, ok := c.peerMap.byIPPort[src] if ok { @@ -1811,17 +1832,13 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } } - // We're now reasonably sure we're expecting communication from - // this peer, do the heavy crypto lifting to see what they want. - // - // From here on, peerNode and de are non-nil. - - di := c.discoInfoForKnownPeerLocked(sender) + // We're now reasonably sure we're expecting communication from 'sender', + // do the heavy crypto lifting to see what they want. sealedBox := msg[discoHeaderLen:] payload, ok := di.sharedKey.Open(sealedBox) if !ok { - // This might be have been intended for a previous + // This might have been intended for a previous // disco key. When we restart we get a new disco key // and old packets might've still been in flight (or // scheduled). This is particularly the case for LANs @@ -1864,6 +1881,19 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke metricRecvDiscoUDP.Add(1) } + if shouldBeRelayHandshakeMsg { + _, ok := dm.(*disco.BindUDPRelayEndpointChallenge) + if !ok { + // We successfully parsed the disco message, but it wasn't a + // challenge. We should never receive other message types + // from a relay server with the Geneve header control bit set. + c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) + return + } + // TODO(jwhited): handle the challenge on the associated [*endpoint] + return + } + switch dm := dm.(type) { case *disco.Ping: metricRecvDiscoPing.Add(1) @@ -2078,6 +2108,15 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { } } +// discoInfoForRelayHandshakeLocked returns a [*discoInfo] for k and vni if one +// is known, i.e. an [endpoint] has an in-progress handshake with k over vni. +// +// c.mu must be held +func (c *Conn) discoInfoForRelayHandshakeLocked(k key.DiscoPublic, vni uint32) (*discoInfo, bool) { + // TODO(jwhited): implement + return nil, false +} + // discoInfoForKnownPeerLocked returns the previous or new discoInfo for k. // // Callers must only pass key.DiscoPublic's that are present in and From 3105ecd958ebdc7c1383d6d2251031b968155668 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 2 May 2025 11:01:13 -0700 Subject: [PATCH 0817/1708] hostinfo,tailcfg: report TPM availability on windows/linux (#15831) Start collecting fleet data on TPM availability via hostinfo. Updates #15830 Signed-off-by: Andrew Lytvynov --- build_dist.sh | 2 +- cmd/k8s-operator/depaware.txt | 8 +++ cmd/tailscaled/depaware.txt | 8 +++ feature/condregister/maybe_tpm.go | 8 +++ feature/tpm/tpm.go | 83 +++++++++++++++++++++++++++++++ feature/tpm/tpm_linux.go | 18 +++++++ feature/tpm/tpm_other.go | 12 +++++ feature/tpm/tpm_test.go | 19 +++++++ feature/tpm/tpm_windows.go | 18 +++++++ go.mod | 1 + go.sum | 4 ++ tailcfg/tailcfg.go | 27 ++++++++++ tailcfg/tailcfg_clone.go | 4 ++ tailcfg/tailcfg_test.go | 1 + tailcfg/tailcfg_view.go | 5 +- 15 files changed, 216 insertions(+), 2 deletions(-) create mode 100644 feature/condregister/maybe_tpm.go create mode 100644 feature/tpm/tpm.go create mode 100644 feature/tpm/tpm_linux.go create mode 100644 feature/tpm/tpm_other.go create mode 100644 feature/tpm/tpm_test.go create mode 100644 feature/tpm/tpm_windows.go diff --git a/build_dist.sh b/build_dist.sh index f11d4aae2..fed37c264 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_taildrop" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_taildrop,ts_omit_tpm" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 4cc4a8d46..186c5a0c0 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -135,6 +135,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/go-cmp/cmp/internal/flags from github.com/google/go-cmp/cmp+ github.com/google/go-cmp/cmp/internal/function from github.com/google/go-cmp/cmp 💣 github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp + github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2/transport+ + github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm + github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2/transport/linuxtpm+ + L github.com/google/go-tpm/tpm2/transport/linuxtpm from tailscale.com/feature/tpm + W github.com/google/go-tpm/tpm2/transport/windowstpm from tailscale.com/feature/tpm + github.com/google/go-tpm/tpmutil from github.com/google/go-tpm/legacy/tpm2+ + W 💣 github.com/google/go-tpm/tpmutil/tbs from github.com/google/go-tpm/legacy/tpm2+ github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz L github.com/google/nftables from tailscale.com/util/linuxfw @@ -813,6 +820,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister + tailscale.com/feature/tpm from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 329c00e93..c5d5a7b2d 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -109,6 +109,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2/transport+ + github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm + github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2/transport/linuxtpm+ + L github.com/google/go-tpm/tpm2/transport/linuxtpm from tailscale.com/feature/tpm + W github.com/google/go-tpm/tpm2/transport/windowstpm from tailscale.com/feature/tpm + github.com/google/go-tpm/tpmutil from github.com/google/go-tpm/legacy/tpm2+ + W 💣 github.com/google/go-tpm/tpmutil/tbs from github.com/google/go-tpm/legacy/tpm2+ L github.com/google/nftables from tailscale.com/util/linuxfw L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ @@ -271,6 +278,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister + tailscale.com/feature/tpm from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal diff --git a/feature/condregister/maybe_tpm.go b/feature/condregister/maybe_tpm.go new file mode 100644 index 000000000..caa57fef1 --- /dev/null +++ b/feature/condregister/maybe_tpm.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_tpm + +package condregister + +import _ "tailscale.com/feature/tpm" diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go new file mode 100644 index 000000000..18e56ae89 --- /dev/null +++ b/feature/tpm/tpm.go @@ -0,0 +1,83 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package tpm implements support for TPM 2.0 devices. +package tpm + +import ( + "slices" + "sync" + + "github.com/google/go-tpm/tpm2" + "github.com/google/go-tpm/tpm2/transport" + "tailscale.com/feature" + "tailscale.com/hostinfo" + "tailscale.com/tailcfg" +) + +var infoOnce = sync.OnceValue(info) + +func init() { + feature.Register("tpm") + hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { + hi.TPM = infoOnce() + }) +} + +//lint:ignore U1000 used in Linux and Windows builds only +func infoFromCapabilities(tpm transport.TPM) *tailcfg.TPMInfo { + info := new(tailcfg.TPMInfo) + toStr := func(s *string) func(*tailcfg.TPMInfo, uint32) { + return func(info *tailcfg.TPMInfo, value uint32) { + *s += propToString(value) + } + } + for _, cap := range []struct { + prop tpm2.TPMPT + apply func(info *tailcfg.TPMInfo, value uint32) + }{ + {tpm2.TPMPTManufacturer, toStr(&info.Manufacturer)}, + {tpm2.TPMPTVendorString1, toStr(&info.Vendor)}, + {tpm2.TPMPTVendorString2, toStr(&info.Vendor)}, + {tpm2.TPMPTVendorString3, toStr(&info.Vendor)}, + {tpm2.TPMPTVendorString4, toStr(&info.Vendor)}, + {tpm2.TPMPTRevision, func(info *tailcfg.TPMInfo, value uint32) { info.SpecRevision = int(value) }}, + {tpm2.TPMPTVendorTPMType, func(info *tailcfg.TPMInfo, value uint32) { info.Model = int(value) }}, + {tpm2.TPMPTFirmwareVersion1, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) << 32 }}, + {tpm2.TPMPTFirmwareVersion2, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) }}, + } { + resp, err := tpm2.GetCapability{ + Capability: tpm2.TPMCapTPMProperties, + Property: uint32(cap.prop), + PropertyCount: 1, + }.Execute(tpm) + if err != nil { + continue + } + props, err := resp.CapabilityData.Data.TPMProperties() + if err != nil { + continue + } + if len(props.TPMProperty) == 0 { + continue + } + cap.apply(info, props.TPMProperty[0].Value) + } + return info +} + +// propToString converts TPM_PT property value, which is a uint32, into a +// string of up to 4 ASCII characters. This encoding applies only to some +// properties, see +// https://trustedcomputinggroup.org/resource/tpm-library-specification/ Part +// 2, section 6.13. +func propToString(v uint32) string { + chars := []byte{ + byte(v >> 24), + byte(v >> 16), + byte(v >> 8), + byte(v), + } + // Delete any non-printable ASCII characters. + return string(slices.DeleteFunc(chars, func(b byte) bool { return b < ' ' || b > '~' })) +} diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go new file mode 100644 index 000000000..a90c0e153 --- /dev/null +++ b/feature/tpm/tpm_linux.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "github.com/google/go-tpm/tpm2/transport/linuxtpm" + "tailscale.com/tailcfg" +) + +func info() *tailcfg.TPMInfo { + t, err := linuxtpm.Open("/dev/tpm0") + if err != nil { + return nil + } + defer t.Close() + return infoFromCapabilities(t) +} diff --git a/feature/tpm/tpm_other.go b/feature/tpm/tpm_other.go new file mode 100644 index 000000000..ba7c67621 --- /dev/null +++ b/feature/tpm/tpm_other.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux && !windows + +package tpm + +import "tailscale.com/tailcfg" + +func info() *tailcfg.TPMInfo { + return nil +} diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go new file mode 100644 index 000000000..fc0fc178c --- /dev/null +++ b/feature/tpm/tpm_test.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import "testing" + +func TestPropToString(t *testing.T) { + for prop, want := range map[uint32]string{ + 0: "", + 0x4D534654: "MSFT", + 0x414D4400: "AMD", + 0x414D440D: "AMD", + } { + if got := propToString(prop); got != want { + t.Errorf("propToString(0x%x): got %q, want %q", prop, got, want) + } + } +} diff --git a/feature/tpm/tpm_windows.go b/feature/tpm/tpm_windows.go new file mode 100644 index 000000000..578d687af --- /dev/null +++ b/feature/tpm/tpm_windows.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "github.com/google/go-tpm/tpm2/transport/windowstpm" + "tailscale.com/tailcfg" +) + +func info() *tailcfg.TPMInfo { + t, err := windowstpm.Open() + if err != nil { + return nil + } + defer t.Close() + return infoFromCapabilities(t) +} diff --git a/go.mod b/go.mod index 0c1224cf1..f346b1e40 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ require ( github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.20.2 + github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 github.com/google/uuid v1.6.0 diff --git a/go.sum b/go.sum index 8c8da8d14..bdbae11bb 100644 --- a/go.sum +++ b/go.sum @@ -486,6 +486,10 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= +github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I= +github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba h1:qJEJcuLzH5KDR0gKc0zcktin6KSAwL7+jWKBYceddTc= +github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba/go.mod h1:EFYHy8/1y2KfgTAsx7Luu7NGhoxtuVHnNo8jE7FikKc= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index ada0df8fc..79ec72d2e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -875,10 +875,37 @@ type Hostinfo struct { // explicitly declared by a node. Location *Location `json:",omitempty"` + TPM *TPMInfo `json:",omitempty"` // TPM device metadata, if available + // NOTE: any new fields containing pointers in this type // require changes to Hostinfo.Equal. } +// TPMInfo contains information about a TPM 2.0 device present on a node. +// All fields are read from TPM_CAP_TPM_PROPERTIES, see Part 2, section 6.13 of +// https://trustedcomputinggroup.org/resource/tpm-library-specification/. +type TPMInfo struct { + // Manufacturer is a 4-letter code from section 4.1 of + // https://trustedcomputinggroup.org/resource/vendor-id-registry/, + // for example "MSFT" for Microsoft. + // Read from TPM_PT_MANUFACTURER. + Manufacturer string `json:",omitempty"` + // Vendor is a vendor ID string, up to 16 characters. + // Read from TPM_PT_VENDOR_STRING_*. + Vendor string `json:",omitempty"` + // Model is a vendor-defined TPM model. + // Read from TPM_PT_VENDOR_TPM_TYPE. + Model int `json:",omitempty"` + // FirmwareVersion is the version number of the firmware. + // Read from TPM_PT_FIRMWARE_VERSION_*. + FirmwareVersion uint64 `json:",omitempty"` + // SpecRevision is the TPM 2.0 spec revision encoded as a single number. All + // revisions can be found at + // https://trustedcomputinggroup.org/resource/tpm-library-specification/. + // Before revision 184, TCG used the "01.83" format for revision 183. + SpecRevision int `json:",omitempty"` +} + // ServiceName is the name of a service, of the form `svc:dns-label`. Services // represent some kind of application provided for users of the tailnet with a // MagicDNS name and possibly dedicated IP addresses. Currently (2024-01-21), diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 3952f5f47..2c7941d51 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -141,6 +141,9 @@ func (src *Hostinfo) Clone() *Hostinfo { if dst.Location != nil { dst.Location = ptr.To(*src.Location) } + if dst.TPM != nil { + dst.TPM = ptr.To(*src.TPM) + } return dst } @@ -184,6 +187,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { AppConnector opt.Bool ServicesHash string Location *Location + TPM *TPMInfo }{}) // Clone makes a deep copy of NetInfo. diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index dd81af5d6..079162a15 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -68,6 +68,7 @@ func TestHostinfoEqual(t *testing.T) { "AppConnector", "ServicesHash", "Location", + "TPM", } if have := fieldsOf(reflect.TypeFor[Hostinfo]()); !reflect.DeepEqual(have, hiHandles) { t.Errorf("Hostinfo.Equal check might be out of sync\nfields: %q\nhandled: %q\n", diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index f8f9f865c..c76654887 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -301,7 +301,9 @@ func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.User func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } -func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } +func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } + +func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HostinfoViewNeedsRegeneration = Hostinfo(struct { @@ -343,6 +345,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { AppConnector opt.Bool ServicesHash string Location *Location + TPM *TPMInfo }{}) // View returns a read-only view of NetInfo. From 761aea3036d9ac35fafbd871df8b52c6e2b80cca Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 1 May 2025 13:12:51 -0700 Subject: [PATCH 0818/1708] tstest/integration: don't require TestMake, stop leaking binaries in /tmp Previously all tests shared their tailscale+tailscaled binaries in system /tmp directories, which often leaked, and required TestMain to clean up (which feature/taildrop didn't use). This makes it use testing.T.TempDir for the binaries, but still only builds them once and efficiently as possible depending on the OS copies them around between each test's temp dir. Updates #15812 Change-Id: I0e2585613f272c3d798a423b8ad1737f8916f527 Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration.go | 169 +++++++++++++++++++------ tstest/integration/integration_test.go | 1 - tstest/integration/vms/harness_test.go | 7 +- tstest/integration/vms/vms_test.go | 8 -- 4 files changed, 134 insertions(+), 51 deletions(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 2cde76b65..29d7c07fe 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -65,61 +65,151 @@ var ( // as a last ditch place to report errors. var MainError syncs.AtomicValue[error] -// CleanupBinaries cleans up any resources created by calls to BinaryDir, TailscaleBinary, or TailscaledBinary. -// It should be called from TestMain after all tests have completed. -func CleanupBinaries() { - buildOnce.Do(func() {}) - if binDir != "" { - os.RemoveAll(binDir) +// Binaries contains the paths to the tailscale and tailscaled binaries. +type Binaries struct { + Dir string + Tailscale BinaryInfo + Tailscaled BinaryInfo +} + +// BinaryInfo describes a tailscale or tailscaled binary. +type BinaryInfo struct { + Path string // abs path to tailscale or tailscaled binary + Size int64 + + // FD and FDmu are set on Unix to efficiently copy the binary to a new + // test's automatically-cleaned-up temp directory. + FD *os.File // for Unix (macOS, Linux, ...) + FDMu sync.Locker + + // Contents is used on Windows instead of FD to copy the binary between + // test directories. (On Windows you can't keep an FD open while an earlier + // test's temp directories are deleted.) + // This burns some memory and costs more in I/O, but oh well. + Contents []byte +} + +func (b BinaryInfo) CopyTo(dir string) (BinaryInfo, error) { + ret := b + ret.Path = filepath.Join(dir, path.Base(b.Path)) + + switch runtime.GOOS { + case "linux": + // TODO(bradfitz): be fancy and use linkat with AT_EMPTY_PATH to avoid + // copying? I couldn't get it to work, though. + // For now, just do the same thing as every other Unix and copy + // the binary. + fallthrough + case "darwin", "freebsd", "openbsd", "netbsd": + f, err := os.OpenFile(ret.Path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o755) + if err != nil { + return BinaryInfo{}, err + } + b.FDMu.Lock() + b.FD.Seek(0, 0) + size, err := io.Copy(f, b.FD) + b.FDMu.Unlock() + if err != nil { + f.Close() + return BinaryInfo{}, fmt.Errorf("copying %q: %w", b.Path, err) + } + if size != b.Size { + f.Close() + return BinaryInfo{}, fmt.Errorf("copy %q: size mismatch: %d != %d", b.Path, size, b.Size) + } + if err := f.Close(); err != nil { + return BinaryInfo{}, err + } + return ret, nil + case "windows": + return ret, os.WriteFile(ret.Path, b.Contents, 0o755) + default: + return BinaryInfo{}, fmt.Errorf("unsupported OS %q", runtime.GOOS) } } -// BinaryDir returns a directory containing test tailscale and tailscaled binaries. -// If any test calls BinaryDir, there must be a TestMain function that calls -// CleanupBinaries after all tests are complete. -func BinaryDir(tb testing.TB) string { +// GetBinaries create a temp directory using tb and builds (or copies previously +// built) cmd/tailscale and cmd/tailscaled binaries into that directory. +// +// It fails tb if the build or binary copies fail. +func GetBinaries(tb testing.TB) *Binaries { + dir := tb.TempDir() buildOnce.Do(func() { - binDir, buildErr = buildTestBinaries() + buildErr = buildTestBinaries(dir) }) if buildErr != nil { tb.Fatal(buildErr) } - return binDir -} - -// TailscaleBinary returns the path to the test tailscale binary. -// If any test calls TailscaleBinary, there must be a TestMain function that calls -// CleanupBinaries after all tests are complete. -func TailscaleBinary(tb testing.TB) string { - return filepath.Join(BinaryDir(tb), "tailscale"+exe()) -} - -// TailscaledBinary returns the path to the test tailscaled binary. -// If any test calls TailscaleBinary, there must be a TestMain function that calls -// CleanupBinaries after all tests are complete. -func TailscaledBinary(tb testing.TB) string { - return filepath.Join(BinaryDir(tb), "tailscaled"+exe()) + if binariesCache.Dir == dir { + return binariesCache + } + ts, err := binariesCache.Tailscale.CopyTo(dir) + if err != nil { + tb.Fatalf("copying tailscale binary: %v", err) + } + tsd, err := binariesCache.Tailscaled.CopyTo(dir) + if err != nil { + tb.Fatalf("copying tailscaled binary: %v", err) + } + return &Binaries{ + Dir: dir, + Tailscale: ts, + Tailscaled: tsd, + } } var ( - buildOnce sync.Once - buildErr error - binDir string + buildOnce sync.Once + buildErr error + binariesCache *Binaries ) // buildTestBinaries builds tailscale and tailscaled. -// It returns the dir containing the binaries. -func buildTestBinaries() (string, error) { - bindir, err := os.MkdirTemp("", "") +// On success, it initializes [binariesCache]. +func buildTestBinaries(dir string) error { + getBinaryInfo := func(name string) (BinaryInfo, error) { + bi := BinaryInfo{Path: filepath.Join(dir, name+exe())} + fi, err := os.Stat(bi.Path) + if err != nil { + return BinaryInfo{}, fmt.Errorf("stat %q: %v", bi.Path, err) + } + bi.Size = fi.Size() + + switch runtime.GOOS { + case "windows": + bi.Contents, err = os.ReadFile(bi.Path) + if err != nil { + return BinaryInfo{}, fmt.Errorf("read %q: %v", bi.Path, err) + } + default: + bi.FD, err = os.OpenFile(bi.Path, os.O_RDONLY, 0) + if err != nil { + return BinaryInfo{}, fmt.Errorf("open %q: %v", bi.Path, err) + } + bi.FDMu = new(sync.Mutex) + // Note: bi.FD is copied around between tests but never closed, by + // design. It will be closed when the process exits, and that will + // close the inode that we're copying the bytes from for each test. + } + return bi, nil + } + err := build(dir, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale") if err != nil { - return "", err + return err + } + b := &Binaries{ + Dir: dir, } - err = build(bindir, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale") + b.Tailscale, err = getBinaryInfo("tailscale") if err != nil { - os.RemoveAll(bindir) - return "", err + return err + } + b.Tailscaled, err = getBinaryInfo("tailscaled") + if err != nil { + return err } - return bindir, nil + binariesCache = b + return nil } func build(outDir string, targets ...string) error { @@ -442,10 +532,11 @@ func NewTestEnv(t testing.TB, opts ...TestEnvOpt) *TestEnv { } control.HTTPTestServer = httptest.NewUnstartedServer(control) trafficTrap := new(trafficTrap) + binaries := GetBinaries(t) e := &TestEnv{ t: t, - cli: TailscaleBinary(t), - daemon: TailscaledBinary(t), + cli: binaries.Tailscale.Path, + daemon: binaries.Tailscaled.Path, LogCatcher: logc, LogCatcherServer: httptest.NewServer(logc), Control: control, diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 7e0d1332f..90cc7e443 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -49,7 +49,6 @@ func TestMain(m *testing.M) { os.Setenv("TS_DISABLE_UPNP", "true") flag.Parse() v := m.Run() - CleanupBinaries() if v != 0 { os.Exit(v) } diff --git a/tstest/integration/vms/harness_test.go b/tstest/integration/vms/harness_test.go index 1e080414d..256227d6c 100644 --- a/tstest/integration/vms/harness_test.go +++ b/tstest/integration/vms/harness_test.go @@ -134,11 +134,12 @@ func newHarness(t *testing.T) *Harness { loginServer := fmt.Sprintf("http://%s", ln.Addr()) t.Logf("loginServer: %s", loginServer) + binaries := integration.GetBinaries(t) h := &Harness{ pubKey: string(pubkey), - binaryDir: integration.BinaryDir(t), - cli: integration.TailscaleBinary(t), - daemon: integration.TailscaledBinary(t), + binaryDir: binaries.Dir, + cli: binaries.Tailscale.Path, + daemon: binaries.Tailscaled.Path, signer: signer, loginServerURL: loginServer, cs: cs, diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index 6d73a3f78..f71f2bdbf 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -28,7 +28,6 @@ import ( "golang.org/x/crypto/ssh" "golang.org/x/sync/semaphore" "tailscale.com/tstest" - "tailscale.com/tstest/integration" "tailscale.com/types/logger" ) @@ -51,13 +50,6 @@ var ( }() ) -func TestMain(m *testing.M) { - flag.Parse() - v := m.Run() - integration.CleanupBinaries() - os.Exit(v) -} - func TestDownloadImages(t *testing.T) { if !*runVMTests { t.Skip("not running integration tests (need --run-vm-tests)") From fd631238498a0a2ca583b9ab2287585611729c28 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 2 May 2025 13:08:17 -0700 Subject: [PATCH 0819/1708] wgengine/magicsock: shape relayManager and CallMeMaybeVia handling (#15864) relayManager will eventually be responsible for handling the allocation and handshaking of UDP relay server endpoints. relay servers are endpoint-independent, and Conn must already maintain handshake state for all endpoints. This justifies a new data structure to fill these roles. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 68 ++++++++++++++++++------------ wgengine/magicsock/relaymanager.go | 51 ++++++++++++++++++++++ 2 files changed, 93 insertions(+), 26 deletions(-) create mode 100644 wgengine/magicsock/relaymanager.go diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fadef40bc..7df46f76c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -317,7 +317,11 @@ type Conn struct { // by node key, node ID, and discovery key. peerMap peerMap - // discoInfo is the state for an active DiscoKey. + // relayManager manages allocation and handshaking of + // [tailscale.com/net/udprelay.Server] endpoints. + relayManager relayManager + + // discoInfo is the state for an active peer DiscoKey. discoInfo map[key.DiscoPublic]*discoInfo // netInfoFunc is a callback that provides a tailcfg.NetInfo when @@ -1628,9 +1632,11 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey ke var di *discoInfo switch { case isRelayHandshakeMsg: - // TODO(jwhited): consider caching relay server disco shared keys - di = &discoInfo{ - sharedKey: c.discoPrivate.Shared(dstDisco), + var ok bool + di, ok = c.relayManager.discoInfo(dstDisco) + if !ok { + c.mu.Unlock() + return false, errors.New("unknown relay server") } case c.peerMap.knownPeerDiscoKey(dstDisco): di = c.discoInfoForKnownPeerLocked(dstDisco) @@ -1806,7 +1812,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke switch { case shouldBeRelayHandshakeMsg: var ok bool - di, ok = c.discoInfoForRelayHandshakeLocked(sender, geneve.VNI) + di, ok = c.relayManager.discoInfo(sender) if !ok { if debugDisco() { c.logf("magicsock: disco: ignoring disco-looking relay handshake frame, no active handshakes with key %v over VNI %d", sender.ShortString(), geneve.VNI) @@ -1882,7 +1888,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } if shouldBeRelayHandshakeMsg { - _, ok := dm.(*disco.BindUDPRelayEndpointChallenge) + challenge, ok := dm.(*disco.BindUDPRelayEndpointChallenge) if !ok { // We successfully parsed the disco message, but it wasn't a // challenge. We should never receive other message types @@ -1890,7 +1896,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - // TODO(jwhited): handle the challenge on the associated [*endpoint] + c.relayManager.handleBindUDPRelayEndpointChallenge(challenge, di, src, geneve.VNI) return } @@ -1909,18 +1915,28 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } return true }) - case *disco.CallMeMaybe: + case *disco.CallMeMaybe, *disco.CallMeMaybeVia: + var via *disco.CallMeMaybeVia + isVia := false + msgType := "CallMeMaybe" + cmm, ok := dm.(*disco.CallMeMaybe) + if !ok { + via = dm.(*disco.CallMeMaybeVia) + msgType = "CallMeMaybeVia" + isVia = true + } + metricRecvDiscoCallMeMaybe.Add(1) if !isDERP || derpNodeSrc.IsZero() { - // CallMeMaybe messages should only come via DERP. - c.logf("[unexpected] CallMeMaybe packets should only come via DERP") + // CallMeMaybe{Via} messages should only come via DERP. + c.logf("[unexpected] %s packets should only come via DERP", msgType) return } nodeKey := derpNodeSrc ep, ok := c.peerMap.endpointForNodeKey(nodeKey) if !ok { metricRecvDiscoCallMeMaybeBadNode.Add(1) - c.logf("magicsock: disco: ignoring CallMeMaybe from %v; %v is unknown", sender.ShortString(), derpNodeSrc.ShortString()) + c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } epDisco := ep.disco.Load() @@ -1929,14 +1945,23 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } if epDisco.key != di.discoKey { metricRecvDiscoCallMeMaybeBadDisco.Add(1) - c.logf("[unexpected] CallMeMaybe from peer via DERP whose netmap discokey != disco source") + c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) return } - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", - c.discoShort, epDisco.short, - ep.publicKey.ShortString(), derpStr(src.String()), - len(dm.MyNumber)) - go ep.handleCallMeMaybe(dm) + if isVia { + c.dlogf("[v1] magicsock: disco: %v<-%v via %v (%v, %v) got call-me-maybe-via, %d endpoints", + c.discoShort, epDisco.short, via.ServerDisco.ShortString(), + ep.publicKey.ShortString(), derpStr(src.String()), + len(via.AddrPorts)) + c.relayManager.handleCallMeMaybeVia(via) + } else { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", + c.discoShort, epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + len(cmm.MyNumber)) + go ep.handleCallMeMaybe(cmm) + } + } return } @@ -2108,15 +2133,6 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { } } -// discoInfoForRelayHandshakeLocked returns a [*discoInfo] for k and vni if one -// is known, i.e. an [endpoint] has an in-progress handshake with k over vni. -// -// c.mu must be held -func (c *Conn) discoInfoForRelayHandshakeLocked(k key.DiscoPublic, vni uint32) (*discoInfo, bool) { - // TODO(jwhited): implement - return nil, false -} - // discoInfoForKnownPeerLocked returns the previous or new discoInfo for k. // // Callers must only pass key.DiscoPublic's that are present in and diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go new file mode 100644 index 000000000..bf737b078 --- /dev/null +++ b/wgengine/magicsock/relaymanager.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "net/netip" + "sync" + + "tailscale.com/disco" + "tailscale.com/types/key" +) + +// relayManager manages allocation and handshaking of +// [tailscale.com/net/udprelay.Server] endpoints. The zero value is ready for +// use. +type relayManager struct { + mu sync.Mutex // guards the following fields + discoInfoByServerDisco map[key.DiscoPublic]*discoInfo +} + +func (h *relayManager) initLocked() { + if h.discoInfoByServerDisco != nil { + return + } + h.discoInfoByServerDisco = make(map[key.DiscoPublic]*discoInfo) +} + +// discoInfo returns a [*discoInfo] for 'serverDisco' if there is an +// active/ongoing handshake with it, otherwise it returns nil, false. +func (h *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok bool) { + h.mu.Lock() + defer h.mu.Unlock() + h.initLocked() + di, ok := h.discoInfoByServerDisco[serverDisco] + return di, ok +} + +func (h *relayManager) handleCallMeMaybeVia(dm *disco.CallMeMaybeVia) { + h.mu.Lock() + defer h.mu.Unlock() + h.initLocked() + // TODO(jwhited): implement +} + +func (h *relayManager) handleBindUDPRelayEndpointChallenge(dm *disco.BindUDPRelayEndpointChallenge, di *discoInfo, src netip.AddrPort, vni uint32) { + h.mu.Lock() + defer h.mu.Unlock() + h.initLocked() + // TODO(jwhited): implement +} From 4fa9411e3fe351c4336e5db40b28d68bf193ff3e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 28 Apr 2025 11:17:19 -0700 Subject: [PATCH 0820/1708] logtail: remove unneeded IP redaction code Updates tailscale/corp#15664 Change-Id: I9523a43860685048548890cf1931ee6cbd60452c Signed-off-by: Brad Fitzpatrick --- logtail/logtail.go | 43 ---------------------- logtail/logtail_test.go | 80 ----------------------------------------- 2 files changed, 123 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index a617397f9..b355addd2 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -15,9 +15,7 @@ import ( "log" mrand "math/rand/v2" "net/http" - "net/netip" "os" - "regexp" "runtime" "slices" "strconv" @@ -29,7 +27,6 @@ import ( "tailscale.com/envknob" "tailscale.com/net/netmon" "tailscale.com/net/sockstats" - "tailscale.com/net/tsaddr" "tailscale.com/tstime" tslogger "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -833,8 +830,6 @@ func (l *Logger) Logf(format string, args ...any) { fmt.Fprintf(l, format, args...) } -var obscureIPs = envknob.RegisterBool("TS_OBSCURE_LOGGED_IPS") - // Write logs an encoded JSON blob. // // If the []byte passed to Write is not an encoded JSON blob, @@ -859,10 +854,6 @@ func (l *Logger) Write(buf []byte) (int, error) { } } - if obscureIPs() { - buf = redactIPs(buf) - } - l.writeLock.Lock() defer l.writeLock.Unlock() @@ -871,40 +862,6 @@ func (l *Logger) Write(buf []byte) (int, error) { return inLen, err } -var ( - regexMatchesIPv6 = regexp.MustCompile(`([0-9a-fA-F]{1,4}):([0-9a-fA-F]{1,4}):([0-9a-fA-F:]{1,4})*`) - regexMatchesIPv4 = regexp.MustCompile(`(\d{1,3})\.(\d{1,3})\.\d{1,3}\.\d{1,3}`) -) - -// redactIPs is a helper function used in Write() to redact IPs (other than tailscale IPs). -// This function takes a log line as a byte slice and -// uses regex matching to parse and find IP addresses. Based on if the IP address is IPv4 or -// IPv6, it parses and replaces the end of the addresses with an "x". This function returns the -// log line with the IPs redacted. -func redactIPs(buf []byte) []byte { - out := regexMatchesIPv6.ReplaceAllFunc(buf, func(b []byte) []byte { - ip, err := netip.ParseAddr(string(b)) - if err != nil || tsaddr.IsTailscaleIP(ip) { - return b // don't change this one - } - - prefix := bytes.Split(b, []byte(":")) - return bytes.Join(append(prefix[:2], []byte("x")), []byte(":")) - }) - - out = regexMatchesIPv4.ReplaceAllFunc(out, func(b []byte) []byte { - ip, err := netip.ParseAddr(string(b)) - if err != nil || tsaddr.IsTailscaleIP(ip) { - return b // don't change this one - } - - prefix := bytes.Split(b, []byte(".")) - return bytes.Join(append(prefix[:2], []byte("x.x")), []byte(".")) - }) - - return []byte(out) -} - var ( openBracketV = []byte("[v") v1 = []byte("[v1] ") diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index 3ea630406..b8c46c448 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -15,7 +15,6 @@ import ( "time" "github.com/go-json-experiment/json/jsontext" - "tailscale.com/envknob" "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/util/must" @@ -316,85 +315,6 @@ func TestLoggerWriteResult(t *testing.T) { t.Errorf("mismatch.\n got: %#q\nwant: %#q", back, want) } } -func TestRedact(t *testing.T) { - envknob.Setenv("TS_OBSCURE_LOGGED_IPS", "true") - tests := []struct { - in string - want string - }{ - // tests for ipv4 addresses - { - "120.100.30.47", - "120.100.x.x", - }, - { - "192.167.0.1/65", - "192.167.x.x/65", - }, - { - "node [5Btdd] d:e89a3384f526d251 now using 10.0.0.222:41641 mtu=1360 tx=d81a8a35a0ce", - "node [5Btdd] d:e89a3384f526d251 now using 10.0.x.x:41641 mtu=1360 tx=d81a8a35a0ce", - }, - //tests for ipv6 addresses - { - "2001:0db8:85a3:0000:0000:8a2e:0370:7334", - "2001:0db8:x", - }, - { - "2345:0425:2CA1:0000:0000:0567:5673:23b5", - "2345:0425:x", - }, - { - "2601:645:8200:edf0::c9de/64", - "2601:645:x/64", - }, - { - "node [5Btdd] d:e89a3384f526d251 now using 2051:0000:140F::875B:131C mtu=1360 tx=d81a8a35a0ce", - "node [5Btdd] d:e89a3384f526d251 now using 2051:0000:x mtu=1360 tx=d81a8a35a0ce", - }, - { - "2601:645:8200:edf0::c9de/64 2601:645:8200:edf0:1ce9:b17d:71f5:f6a3/64", - "2601:645:x/64 2601:645:x/64", - }, - //tests for tailscale ip addresses - { - "100.64.5.6", - "100.64.5.6", - }, - { - "fd7a:115c:a1e0::/96", - "fd7a:115c:a1e0::/96", - }, - //tests for ipv6 and ipv4 together - { - "192.167.0.1 2001:0db8:85a3:0000:0000:8a2e:0370:7334", - "192.167.x.x 2001:0db8:x", - }, - { - "node [5Btdd] d:e89a3384f526d251 now using 10.0.0.222:41641 mtu=1360 tx=d81a8a35a0ce 2345:0425:2CA1::0567:5673:23b5", - "node [5Btdd] d:e89a3384f526d251 now using 10.0.x.x:41641 mtu=1360 tx=d81a8a35a0ce 2345:0425:x", - }, - { - "100.64.5.6 2091:0db8:85a3:0000:0000:8a2e:0370:7334", - "100.64.5.6 2091:0db8:x", - }, - { - "192.167.0.1 120.100.30.47 2041:0000:140F::875B:131B", - "192.167.x.x 120.100.x.x 2041:0000:x", - }, - { - "fd7a:115c:a1e0::/96 192.167.0.1 2001:0db8:85a3:0000:0000:8a2e:0370:7334", - "fd7a:115c:a1e0::/96 192.167.x.x 2001:0db8:x", - }, - } - - for _, tt := range tests { - gotBuf := redactIPs([]byte(tt.in)) - if string(gotBuf) != tt.want { - t.Errorf("for %q,\n got: %#q\nwant: %#q\n", tt.in, gotBuf, tt.want) - } - } -} func TestAppendMetadata(t *testing.T) { var l Logger From 653c45585e22e5c3d3b57f79539366dd98892650 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 2 May 2025 17:18:41 -0700 Subject: [PATCH 0821/1708] ipn/ipnlocal: rename localNodeContext to nodeBackend As just discussed on Slack with @nickkhyl. Updates #12614 Change-Id: I138dd7eaffb274494297567375d969b4122f3f50 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 54 +++++++++---------- ...{local_node_context.go => node_backend.go} | 52 +++++++++--------- 2 files changed, 53 insertions(+), 53 deletions(-) rename ipn/ipnlocal/{local_node_context.go => node_backend.go} (76%) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b16906e71..e147b2240 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -258,7 +258,7 @@ type LocalBackend struct { // We intend to relax this in the future and only require holding b.mu when replacing it, // but that requires a better (strictly ordered?) state machine and better management // of [LocalBackend]'s own state that is not tied to the node context. - currentNodeAtomic atomic.Pointer[localNodeContext] + currentNodeAtomic atomic.Pointer[nodeBackend] conf *conffile.Config // latest parsed config, or nil if not in declarative mode pm *profileManager // mu guards access @@ -519,7 +519,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - b.currentNodeAtomic.Store(newLocalNodeContext()) + b.currentNodeAtomic.Store(newNodeBackend()) mConn.SetNetInfoCallback(b.setNetInfo) if sys.InitialConfig != nil { @@ -594,12 +594,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } -func (b *LocalBackend) currentNode() *localNodeContext { +func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } // Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor... - v := newLocalNodeContext() + v := newNodeBackend() b.currentNodeAtomic.CompareAndSwap(nil, v) return b.currentNodeAtomic.Load() } @@ -1466,7 +1466,7 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { // AppendMatchingPeers returns base with all peers that match pred appended. // // It acquires b.mu to read the netmap but releases it before calling pred. -func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { +func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { var peers []tailcfg.NodeView b.mu.Lock() @@ -1495,13 +1495,13 @@ func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred fun // PeerCaps returns the capabilities that remote src IP has to // ths current node. -func (b *localNodeContext) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { +func (b *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { b.mu.Lock() defer b.mu.Unlock() return b.peerCapsLocked(src) } -func (b *localNodeContext) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { +func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { if b.netMap == nil { return nil } @@ -1523,7 +1523,7 @@ func (b *localNodeContext) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { return nil } -func (b *localNodeContext) GetFilterForTest() *filter.Filter { +func (b *nodeBackend) GetFilterForTest() *filter.Filter { return b.filterAtomic.Load() } @@ -2034,7 +2034,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } -func (c *localNodeContext) netMapWithPeers() *netmap.NetworkMap { +func (c *nodeBackend) netMapWithPeers() *netmap.NetworkMap { c.mu.Lock() defer c.mu.Unlock() if c.netMap == nil { @@ -2078,7 +2078,7 @@ func (b *LocalBackend) pickNewAutoExitNode() { b.send(ipn.Notify{Prefs: &newPrefs}) } -func (c *localNodeContext) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { +func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { c.mu.Lock() defer c.mu.Unlock() if c.netMap == nil || len(c.peers) == 0 { @@ -2265,7 +2265,7 @@ func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { return b.currentNode().PeersForTest() } -func (b *localNodeContext) PeersForTest() []tailcfg.NodeView { +func (b *nodeBackend) PeersForTest() []tailcfg.NodeView { b.mu.Lock() defer b.mu.Unlock() ret := slicesx.MapValues(b.peers) @@ -2547,12 +2547,12 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ // b.mu must be held. func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { // TODO(nickkhyl) split this into two functions: - // - (*localNodeContext).RebuildFilters() (normalFilter, jailedFilter *filter.Filter, changed bool), + // - (*nodeBackend).RebuildFilters() (normalFilter, jailedFilter *filter.Filter, changed bool), // which would return packet filters for the current state and whether they changed since the last call. // - (*LocalBackend).updateFilters(), which would use the above to update the engine with the new filters, // notify b.sshServer, etc. // - // For this, we would need to plumb a few more things into the [localNodeContext]. Most importantly, + // For this, we would need to plumb a few more things into the [nodeBackend]. Most importantly, // the current [ipn.PrefsView]), but also maybe also a b.logf and a b.health? // // NOTE(danderson): keep change detection as the first thing in @@ -2838,7 +2838,7 @@ func (b *LocalBackend) setFilter(f *filter.Filter) { b.e.SetFilter(f) } -func (c *localNodeContext) setFilter(f *filter.Filter) { +func (c *nodeBackend) setFilter(f *filter.Filter) { c.filterAtomic.Store(f) } @@ -3901,7 +3901,7 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt // in Hostinfo. When the user preferences currently request "shields up" // mode, all inbound connections are refused, so services are not reported. // Otherwise, shouldUploadServices respects NetMap.CollectServices. -// TODO(nickkhyl): move this into [localNodeContext]? +// TODO(nickkhyl): move this into [nodeBackend]? func (b *LocalBackend) shouldUploadServices() bool { b.mu.Lock() defer b.mu.Unlock() @@ -4773,7 +4773,7 @@ func (b *LocalBackend) NetMap() *netmap.NetworkMap { return b.currentNode().NetMap() } -func (c *localNodeContext) NetMap() *netmap.NetworkMap { +func (c *nodeBackend) NetMap() *netmap.NetworkMap { c.mu.Lock() defer c.mu.Unlock() return c.netMap @@ -5018,7 +5018,7 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs return false } -func (c *localNodeContext) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { +func (c *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { c.mu.Lock() defer c.mu.Unlock() return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS) @@ -6144,7 +6144,7 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre return newPrefs } -func (c *localNodeContext) SetNetMap(nm *netmap.NetworkMap) { +func (c *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { c.mu.Lock() defer c.mu.Unlock() c.netMap = nm @@ -6224,7 +6224,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.driveNotifyCurrentSharesLocked() } -func (b *localNodeContext) updateNodeByAddrLocked() { +func (b *nodeBackend) updateNodeByAddrLocked() { nm := b.netMap if nm == nil { b.nodeByAddr = nil @@ -6260,7 +6260,7 @@ func (b *localNodeContext) updateNodeByAddrLocked() { } } -func (b *localNodeContext) updatePeersLocked() { +func (b *nodeBackend) updatePeersLocked() { nm := b.netMap if nm == nil { b.peers = nil @@ -6667,13 +6667,13 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK // PeerHasCap reports whether the peer with the given Tailscale IP addresses // contains the given capability string, with any value(s). -func (b *localNodeContext) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { +func (b *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { b.mu.Lock() defer b.mu.Unlock() return b.peerHasCapLocked(addr, wantCap) } -func (b *localNodeContext) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { +func (b *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { return b.peerCapsLocked(addr).HasCapability(wantCap) } @@ -6737,13 +6737,13 @@ func peerAPIURL(ip netip.Addr, port uint16) string { return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) } -func (c *localNodeContext) PeerHasPeerAPI(p tailcfg.NodeView) bool { +func (c *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { return c.PeerAPIBase(p) != "" } // PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, // or the empty string if the peer is invalid or doesn't support PeerAPI. -func (c *localNodeContext) PeerAPIBase(p tailcfg.NodeView) string { +func (c *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { c.mu.Lock() nm := c.netMap c.mu.Unlock() @@ -6987,7 +6987,7 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg return "", false } -func (c *localNodeContext) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { +func (c *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { c.mu.Lock() defer c.mu.Unlock() return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID) @@ -7411,7 +7411,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } - b.currentNodeAtomic.Store(newLocalNodeContext()) + b.currentNodeAtomic.Store(newNodeBackend()) b.setNetMapLocked(nil) // Reset netmap. b.updateFilterLocked(ipn.PrefsView{}) // Reset the NetworkMap in the engine @@ -8101,7 +8101,7 @@ func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { // rules that require a source IP to have a certain node capability. // // TODO(bradfitz): optimize this later if/when it matters. -// TODO(nickkhyl): move this into [localNodeContext] along with [LocalBackend.updateFilterLocked]. +// TODO(nickkhyl): move this into [nodeBackend] along with [LocalBackend.updateFilterLocked]. func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCapability) bool { if cap == "" { // Shouldn't happen, but just in case. diff --git a/ipn/ipnlocal/local_node_context.go b/ipn/ipnlocal/node_backend.go similarity index 76% rename from ipn/ipnlocal/local_node_context.go rename to ipn/ipnlocal/node_backend.go index 871880893..e4d6d25bf 100644 --- a/ipn/ipnlocal/local_node_context.go +++ b/ipn/ipnlocal/node_backend.go @@ -18,29 +18,29 @@ import ( "tailscale.com/wgengine/filter" ) -// localNodeContext holds the [LocalBackend]'s context tied to a local node (usually the current one). +// nodeBackend is node-specific [LocalBackend] state. It is usually the current node. // // Its exported methods are safe for concurrent use, but the struct is not a snapshot of state at a given moment; // its state can change between calls. For example, asking for the same value (e.g., netmap or prefs) twice // may return different results. Returned values are immutable and safe for concurrent use. // -// If both the [LocalBackend]'s internal mutex and the [localNodeContext] mutex must be held at the same time, +// If both the [LocalBackend]'s internal mutex and the [nodeBackend] mutex must be held at the same time, // the [LocalBackend] mutex must be acquired first. See the comment on the [LocalBackend] field for more details. // -// Two pointers to different [localNodeContext] instances represent different local nodes. -// However, there's currently a bug where a new [localNodeContext] might not be created +// Two pointers to different [nodeBackend] instances represent different local nodes. +// However, there's currently a bug where a new [nodeBackend] might not be created // during an implicit node switch (see tailscale/corp#28014). // In the future, we might want to include at least the following in this struct (in addition to the current fields). // However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions, // peer API handlers, etc.). -// - [ipn.State]: when the LocalBackend switches to a different [localNodeContext], it can update the state of the old one. +// - [ipn.State]: when the LocalBackend switches to a different [nodeBackend], it can update the state of the old one. // - [ipn.LoginProfileView] and [ipn.Prefs]: we should update them when the [profileManager] reports changes to them. // In the future, [profileManager] (and the corresponding methods of the [LocalBackend]) can be made optional, // and something else could be used to set them once or update them as needed. // - [tailcfg.HostinfoView]: it includes certain fields that are tied to the current profile/node/prefs. We should also // update to build it once instead of mutating it in twelvety different places. -// - [filter.Filter] (normal and jailed, along with the filterHash): the localNodeContext could have a method to (re-)build +// - [filter.Filter] (normal and jailed, along with the filterHash): the nodeBackend could have a method to (re-)build // the filter for the current netmap/prefs (see [LocalBackend.updateFilterLocked]), and it needs to track the current // filters and their hash. // - Fields related to a requested or required (re-)auth: authURL, authURLTime, authActor, keyExpired, etc. @@ -51,7 +51,7 @@ import ( // It should not include any fields used by specific features that don't belong in [LocalBackend]. // Even if they're tied to the local node, instead of moving them here, we should extract the entire feature // into a separate package and have it install proper hooks. -type localNodeContext struct { +type nodeBackend struct { // filterAtomic is a stateful packet filter. Immutable once created, but can be // replaced with a new one. filterAtomic atomic.Pointer[filter.Filter] @@ -71,23 +71,23 @@ type localNodeContext struct { // peers is the set of current peers and their current values after applying // delta node mutations as they come in (with mu held). The map values can be // given out to callers, but the map itself can be mutated in place (with mu held) - // and must not escape the [localNodeContext]. + // and must not escape the [nodeBackend]. peers map[tailcfg.NodeID]tailcfg.NodeView // nodeByAddr maps nodes' own addresses (excluding subnet routes) to node IDs. - // It is mutated in place (with mu held) and must not escape the [localNodeContext]. + // It is mutated in place (with mu held) and must not escape the [nodeBackend]. nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newLocalNodeContext() *localNodeContext { - cn := &localNodeContext{} +func newNodeBackend() *nodeBackend { + cn := &nodeBackend{} // Default filter blocks everything and logs nothing. noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) cn.filterAtomic.Store(noneFilter) return cn } -func (c *localNodeContext) Self() tailcfg.NodeView { +func (c *nodeBackend) Self() tailcfg.NodeView { c.mu.Lock() defer c.mu.Unlock() if c.netMap == nil { @@ -96,7 +96,7 @@ func (c *localNodeContext) Self() tailcfg.NodeView { return c.netMap.SelfNode } -func (c *localNodeContext) SelfUserID() tailcfg.UserID { +func (c *nodeBackend) SelfUserID() tailcfg.UserID { self := c.Self() if !self.Valid() { return 0 @@ -105,13 +105,13 @@ func (c *localNodeContext) SelfUserID() tailcfg.UserID { } // SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap. -func (c *localNodeContext) SelfHasCap(wantCap tailcfg.NodeCapability) bool { +func (c *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool { return c.SelfHasCapOr(wantCap, false) } -// SelfHasCapOr is like [localNodeContext.SelfHasCap], but returns the specified default value +// SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value // if the netmap is not available yet. -func (c *localNodeContext) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { +func (c *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { c.mu.Lock() defer c.mu.Unlock() if c.netMap == nil { @@ -120,7 +120,7 @@ func (c *localNodeContext) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool return c.netMap.AllCaps.Contains(wantCap) } -func (c *localNodeContext) NetworkProfile() ipn.NetworkProfile { +func (c *nodeBackend) NetworkProfile() ipn.NetworkProfile { c.mu.Lock() defer c.mu.Unlock() return ipn.NetworkProfile{ @@ -131,7 +131,7 @@ func (c *localNodeContext) NetworkProfile() ipn.NetworkProfile { } // TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]? -func (c *localNodeContext) DERPMap() *tailcfg.DERPMap { +func (c *nodeBackend) DERPMap() *tailcfg.DERPMap { c.mu.Lock() defer c.mu.Unlock() if c.netMap == nil { @@ -140,14 +140,14 @@ func (c *localNodeContext) DERPMap() *tailcfg.DERPMap { return c.netMap.DERPMap } -func (c *localNodeContext) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { +func (c *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { c.mu.Lock() defer c.mu.Unlock() nid, ok := c.nodeByAddr[ip] return nid, ok } -func (c *localNodeContext) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { +func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { c.mu.Lock() defer c.mu.Unlock() if c.netMap == nil { @@ -165,14 +165,14 @@ func (c *localNodeContext) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok boo return 0, false } -func (c *localNodeContext) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { +func (c *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { c.mu.Lock() defer c.mu.Unlock() n, ok := c.peers[id] return n, ok } -func (c *localNodeContext) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { +func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { c.mu.Lock() nm := c.netMap c.mu.Unlock() @@ -184,7 +184,7 @@ func (c *localNodeContext) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileVie } // Peers returns all the current peers in an undefined order. -func (c *localNodeContext) Peers() []tailcfg.NodeView { +func (c *nodeBackend) Peers() []tailcfg.NodeView { c.mu.Lock() defer c.mu.Unlock() return slicesx.MapValues(c.peers) @@ -195,13 +195,13 @@ func (c *localNodeContext) Peers() []tailcfg.NodeView { // // TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here, // but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter. -// Something like (*localNodeContext).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps? -func (c *localNodeContext) unlockedNodesPermitted(packetFilter []filter.Match) bool { +// Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps? +func (c *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool { c.mu.Lock() defer c.mu.Unlock() return packetFilterPermitsUnlockedNodes(c.peers, packetFilter) } -func (c *localNodeContext) filter() *filter.Filter { +func (c *nodeBackend) filter() *filter.Filter { return c.filterAtomic.Load() } From 32ce1bdb48078ec4cedaeeb5b1b2ff9c0ef61a49 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 2 May 2025 17:28:41 -0700 Subject: [PATCH 0822/1708] ipn/ipnlocal: use "nb" consistently as receiver for nodeBackend Cleanup after #15866. It was using a mix of "b" and "c" before. But "b" is ambiguous with LocalBackend's usual "b". Updates #12614 Change-Id: I8c2e84597555ec3db0d783a00ac1c12549ce6706 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 168 +++++++++++++++++------------------ ipn/ipnlocal/node_backend.go | 104 +++++++++++----------- 2 files changed, 136 insertions(+), 136 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e147b2240..9dfa62d6e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1466,16 +1466,16 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { // AppendMatchingPeers returns base with all peers that match pred appended. // // It acquires b.mu to read the netmap but releases it before calling pred. -func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { +func (nb *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { var peers []tailcfg.NodeView - b.mu.Lock() - if b.netMap != nil { + nb.mu.Lock() + if nb.netMap != nil { // All fields on b.netMap are immutable, so this is // safe to copy and use outside the lock. - peers = b.netMap.Peers + peers = nb.netMap.Peers } - b.mu.Unlock() + nb.mu.Unlock() ret := base for _, peer := range peers { @@ -1483,9 +1483,9 @@ func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tai // UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID, // and then look up the latest copy in b.peers which is updated in // response to UpdateNetmapDelta edits. - b.mu.Lock() - peer, ok := b.peers[peer.ID()] - b.mu.Unlock() + nb.mu.Lock() + peer, ok := nb.peers[peer.ID()] + nb.mu.Unlock() if ok && pred(peer) { ret = append(ret, peer) } @@ -1495,21 +1495,21 @@ func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tai // PeerCaps returns the capabilities that remote src IP has to // ths current node. -func (b *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { - b.mu.Lock() - defer b.mu.Unlock() - return b.peerCapsLocked(src) +func (nb *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.peerCapsLocked(src) } -func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { - if b.netMap == nil { +func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { + if nb.netMap == nil { return nil } - filt := b.filterAtomic.Load() + filt := nb.filterAtomic.Load() if filt == nil { return nil } - addrs := b.netMap.GetAddresses() + addrs := nb.netMap.GetAddresses() for i := range addrs.Len() { a := addrs.At(i) if !a.IsSingleIP() { @@ -1523,8 +1523,8 @@ func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { return nil } -func (b *nodeBackend) GetFilterForTest() *filter.Filter { - return b.filterAtomic.Load() +func (nb *nodeBackend) GetFilterForTest() *filter.Filter { + return nb.filterAtomic.Load() } // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. @@ -2034,14 +2034,14 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } -func (c *nodeBackend) netMapWithPeers() *netmap.NetworkMap { - c.mu.Lock() - defer c.mu.Unlock() - if c.netMap == nil { +func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil { return nil } - nm := ptr.To(*c.netMap) // shallow clone - nm.Peers = slicesx.MapValues(c.peers) + nm := ptr.To(*nb.netMap) // shallow clone + nm.Peers = slicesx.MapValues(nb.peers) slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) @@ -2078,10 +2078,10 @@ func (b *LocalBackend) pickNewAutoExitNode() { b.send(ipn.Notify{Prefs: &newPrefs}) } -func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { - c.mu.Lock() - defer c.mu.Unlock() - if c.netMap == nil || len(c.peers) == 0 { +func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil || len(nb.peers) == 0 { return false } @@ -2093,7 +2093,7 @@ func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled boo for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] if !ok { - nv, ok := c.peers[m.NodeIDBeingMutated()] + nv, ok := nb.peers[m.NodeIDBeingMutated()] if !ok { // TODO(bradfitz): unexpected metric? return false @@ -2104,7 +2104,7 @@ func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled boo m.Apply(n) } for nid, n := range mutableNodes { - c.peers[nid] = n.View() + nb.peers[nid] = n.View() } return true } @@ -2265,10 +2265,10 @@ func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { return b.currentNode().PeersForTest() } -func (b *nodeBackend) PeersForTest() []tailcfg.NodeView { - b.mu.Lock() - defer b.mu.Unlock() - ret := slicesx.MapValues(b.peers) +func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { + nb.mu.Lock() + defer nb.mu.Unlock() + ret := slicesx.MapValues(nb.peers) slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) @@ -2838,8 +2838,8 @@ func (b *LocalBackend) setFilter(f *filter.Filter) { b.e.SetFilter(f) } -func (c *nodeBackend) setFilter(f *filter.Filter) { - c.filterAtomic.Store(f) +func (nb *nodeBackend) setFilter(f *filter.Filter) { + nb.filterAtomic.Store(f) } var removeFromDefaultRoute = []netip.Prefix{ @@ -4773,10 +4773,10 @@ func (b *LocalBackend) NetMap() *netmap.NetworkMap { return b.currentNode().NetMap() } -func (c *nodeBackend) NetMap() *netmap.NetworkMap { - c.mu.Lock() - defer c.mu.Unlock() - return c.netMap +func (nb *nodeBackend) NetMap() *netmap.NetworkMap { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.netMap } func (b *LocalBackend) isEngineBlocked() bool { @@ -5018,10 +5018,10 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs return false } -func (c *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { - c.mu.Lock() - defer c.mu.Unlock() - return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS) +func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { + nb.mu.Lock() + defer nb.mu.Unlock() + return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) } // dnsConfigForNetmap returns a *dns.Config for the given netmap, @@ -6144,12 +6144,12 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre return newPrefs } -func (c *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { - c.mu.Lock() - defer c.mu.Unlock() - c.netMap = nm - c.updateNodeByAddrLocked() - c.updatePeersLocked() +func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { + nb.mu.Lock() + defer nb.mu.Unlock() + nb.netMap = nm + nb.updateNodeByAddrLocked() + nb.updatePeersLocked() } // setNetMapLocked updates the LocalBackend state to reflect the newly @@ -6224,25 +6224,25 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.driveNotifyCurrentSharesLocked() } -func (b *nodeBackend) updateNodeByAddrLocked() { - nm := b.netMap +func (nb *nodeBackend) updateNodeByAddrLocked() { + nm := nb.netMap if nm == nil { - b.nodeByAddr = nil + nb.nodeByAddr = nil return } // Update the nodeByAddr index. - if b.nodeByAddr == nil { - b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} + if nb.nodeByAddr == nil { + nb.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} } // First pass, mark everything unwanted. - for k := range b.nodeByAddr { - b.nodeByAddr[k] = 0 + for k := range nb.nodeByAddr { + nb.nodeByAddr[k] = 0 } addNode := func(n tailcfg.NodeView) { for _, ipp := range n.Addresses().All() { if ipp.IsSingleIP() { - b.nodeByAddr[ipp.Addr()] = n.ID() + nb.nodeByAddr[ipp.Addr()] = n.ID() } } } @@ -6253,34 +6253,34 @@ func (b *nodeBackend) updateNodeByAddrLocked() { addNode(p) } // Third pass, actually delete the unwanted items. - for k, v := range b.nodeByAddr { + for k, v := range nb.nodeByAddr { if v == 0 { - delete(b.nodeByAddr, k) + delete(nb.nodeByAddr, k) } } } -func (b *nodeBackend) updatePeersLocked() { - nm := b.netMap +func (nb *nodeBackend) updatePeersLocked() { + nm := nb.netMap if nm == nil { - b.peers = nil + nb.peers = nil return } // First pass, mark everything unwanted. - for k := range b.peers { - b.peers[k] = tailcfg.NodeView{} + for k := range nb.peers { + nb.peers[k] = tailcfg.NodeView{} } // Second pass, add everything wanted. for _, p := range nm.Peers { - mak.Set(&b.peers, p.ID(), p) + mak.Set(&nb.peers, p.ID(), p) } // Third pass, remove deleted things. - for k, v := range b.peers { + for k, v := range nb.peers { if !v.Valid() { - delete(b.peers, k) + delete(nb.peers, k) } } } @@ -6667,14 +6667,14 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK // PeerHasCap reports whether the peer with the given Tailscale IP addresses // contains the given capability string, with any value(s). -func (b *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.peerHasCapLocked(addr, wantCap) +func (nb *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.peerHasCapLocked(addr, wantCap) } -func (b *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { - return b.peerCapsLocked(addr).HasCapability(wantCap) +func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { + return nb.peerCapsLocked(addr).HasCapability(wantCap) } // SetDNS adds a DNS record for the given domain name & TXT record @@ -6737,16 +6737,16 @@ func peerAPIURL(ip netip.Addr, port uint16) string { return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) } -func (c *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { - return c.PeerAPIBase(p) != "" +func (nb *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return nb.PeerAPIBase(p) != "" } // PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, // or the empty string if the peer is invalid or doesn't support PeerAPI. -func (c *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { - c.mu.Lock() - nm := c.netMap - c.mu.Unlock() +func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() return peerAPIBase(nm, p) } @@ -6987,10 +6987,10 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg return "", false } -func (c *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID) +func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) } // wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index e4d6d25bf..415c32ccf 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -87,17 +87,17 @@ func newNodeBackend() *nodeBackend { return cn } -func (c *nodeBackend) Self() tailcfg.NodeView { - c.mu.Lock() - defer c.mu.Unlock() - if c.netMap == nil { +func (nb *nodeBackend) Self() tailcfg.NodeView { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil { return tailcfg.NodeView{} } - return c.netMap.SelfNode + return nb.netMap.SelfNode } -func (c *nodeBackend) SelfUserID() tailcfg.UserID { - self := c.Self() +func (nb *nodeBackend) SelfUserID() tailcfg.UserID { + self := nb.Self() if !self.Valid() { return 0 } @@ -105,59 +105,59 @@ func (c *nodeBackend) SelfUserID() tailcfg.UserID { } // SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap. -func (c *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool { - return c.SelfHasCapOr(wantCap, false) +func (nb *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool { + return nb.SelfHasCapOr(wantCap, false) } // SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value // if the netmap is not available yet. -func (c *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { - c.mu.Lock() - defer c.mu.Unlock() - if c.netMap == nil { +func (nb *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil { return def } - return c.netMap.AllCaps.Contains(wantCap) + return nb.netMap.AllCaps.Contains(wantCap) } -func (c *nodeBackend) NetworkProfile() ipn.NetworkProfile { - c.mu.Lock() - defer c.mu.Unlock() +func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile { + nb.mu.Lock() + defer nb.mu.Unlock() return ipn.NetworkProfile{ // These are ok to call with nil netMap. - MagicDNSName: c.netMap.MagicDNSSuffix(), - DomainName: c.netMap.DomainName(), + MagicDNSName: nb.netMap.MagicDNSSuffix(), + DomainName: nb.netMap.DomainName(), } } // TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]? -func (c *nodeBackend) DERPMap() *tailcfg.DERPMap { - c.mu.Lock() - defer c.mu.Unlock() - if c.netMap == nil { +func (nb *nodeBackend) DERPMap() *tailcfg.DERPMap { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil { return nil } - return c.netMap.DERPMap + return nb.netMap.DERPMap } -func (c *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - nid, ok := c.nodeByAddr[ip] +func (nb *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + nid, ok := nb.nodeByAddr[ip] return nid, ok } -func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - if c.netMap == nil { +func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil { return 0, false } - if self := c.netMap.SelfNode; self.Valid() && self.Key() == k { + if self := nb.netMap.SelfNode; self.Valid() && self.Key() == k { return self.ID(), true } // TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers. - for _, n := range c.peers { + for _, n := range nb.peers { if n.Key() == k { return n.ID(), true } @@ -165,17 +165,17 @@ func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { return 0, false } -func (c *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - n, ok := c.peers[id] +func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + n, ok := nb.peers[id] return n, ok } -func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { - c.mu.Lock() - nm := c.netMap - c.mu.Unlock() +func (nb *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() if nm == nil { return tailcfg.UserProfileView{}, false } @@ -184,10 +184,10 @@ func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok } // Peers returns all the current peers in an undefined order. -func (c *nodeBackend) Peers() []tailcfg.NodeView { - c.mu.Lock() - defer c.mu.Unlock() - return slicesx.MapValues(c.peers) +func (nb *nodeBackend) Peers() []tailcfg.NodeView { + nb.mu.Lock() + defer nb.mu.Unlock() + return slicesx.MapValues(nb.peers) } // unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs @@ -196,12 +196,12 @@ func (c *nodeBackend) Peers() []tailcfg.NodeView { // TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here, // but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter. // Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps? -func (c *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool { - c.mu.Lock() - defer c.mu.Unlock() - return packetFilterPermitsUnlockedNodes(c.peers, packetFilter) +func (nb *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool { + nb.mu.Lock() + defer nb.mu.Unlock() + return packetFilterPermitsUnlockedNodes(nb.peers, packetFilter) } -func (c *nodeBackend) filter() *filter.Filter { - return c.filterAtomic.Load() +func (nb *nodeBackend) filter() *filter.Filter { + return nb.filterAtomic.Load() } From b03a2a323b03519c7451c93b37f021255c54aee0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 2 May 2025 21:05:09 -0700 Subject: [PATCH 0823/1708] tstest/integration: work around ETXTBSY flake This is a hack, but should suffice and be fast enough. I really want to figure out what's keeping that writable fd open. Fixes #15868 Change-Id: I285d836029355b11b7467841d31432cc5890a67e Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 29d7c07fe..d64bfbbd9 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -770,6 +770,27 @@ func (d *Daemon) MustCleanShutdown(t testing.TB) { } } +// awaitTailscaledRunnable tries to run `tailscaled --version` until it +// works. This is an unsatisfying workaround for ETXTBSY we were seeing +// on GitHub Actions that aren't understood. It's not clear what's holding +// a writable fd to tailscaled after `go install` completes. +// See https://github.com/tailscale/tailscale/issues/15868. +func (n *TestNode) awaitTailscaledRunnable() error { + t := n.env.t + t.Helper() + if err := tstest.WaitFor(10*time.Second, func() error { + out, err := exec.Command(n.env.daemon, "--version").CombinedOutput() + if err == nil { + return nil + } + t.Logf("error running tailscaled --version: %v, %s", err, out) + return err + }); err != nil { + return fmt.Errorf("gave up trying to run tailscaled: %v", err) + } + return nil +} + // StartDaemon starts the node's tailscaled, failing if it fails to start. // StartDaemon ensures that the process will exit when the test completes. func (n *TestNode) StartDaemon() *Daemon { @@ -778,6 +799,11 @@ func (n *TestNode) StartDaemon() *Daemon { func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { t := n.env.t + + if err := n.awaitTailscaledRunnable(); err != nil { + t.Fatalf("awaitTailscaledRunnable: %v", err) + } + cmd := exec.Command(n.env.daemon) cmd.Args = append(cmd.Args, "--statedir="+n.dir, From 597d0e8fd5954965cdcd42b326ef460180406187 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 5 May 2025 07:55:39 -0700 Subject: [PATCH 0824/1708] ipn/ipnlocal, tailcfg: add MagicDNS opt-in attr for IPv6 AAAA records Until we turn on AAAA by default (which might make some people rely on Happy Eyeballs for targets without IPv6), this lets people turn it on explicitly if they want. We still should add a peer cap as well in the future to let a peer explicitly say that it's cool with IPv6. Related: #9574 Updates #1813 Updates #1152 Change-Id: Iec6ec9b4b5db7a4dc700ecdf4a11146cc5303989 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 4 +++- tailcfg/tailcfg.go | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9dfa62d6e..b2998d11c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5056,6 +5056,8 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. !nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs4) dcfg.OnlyIPv6 = selfV6Only + wantAAAA := nm.AllCaps.Contains(tailcfg.NodeAttrMagicDNSPeerAAAA) + // Populate MagicDNS records. We do this unconditionally so that // quad-100 can always respond to MagicDNS queries, even if the OS // isn't configured to make MagicDNS resolution truly @@ -5092,7 +5094,7 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // https://github.com/tailscale/tailscale/issues/1152 // tracks adding the right capability reporting to // enable AAAA in MagicDNS. - if addr.Addr().Is6() && have4 { + if addr.Addr().Is6() && have4 && !wantAAAA { continue } ips = append(ips, addr.Addr()) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 79ec72d2e..11a0d0830 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -160,7 +160,8 @@ type CapabilityVersion int // - 113: 2025-01-20: Client communicates to control whether funnel is enabled by sending Hostinfo.IngressEnabled (#14688) // - 114: 2025-01-30: NodeAttrMaxKeyDuration CapMap defined, clients might use it (no tailscaled code change) (#14829) // - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. -const CurrentCapabilityVersion CapabilityVersion = 115 +// - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node +const CurrentCapabilityVersion CapabilityVersion = 116 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2493,6 +2494,10 @@ const ( // NodeAttrRelayClient permits the node to act as an underlay UDP relay // client. There are no expected values for this key in NodeCapMap. NodeAttrRelayClient NodeCapability = "relay:client" + + // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS + // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. + NodeAttrMagicDNSPeerAAAA NodeCapability = "magicdns-aaaa" ) // SetDNSRequest is a request to add a DNS record. From 62182f3bcf15504a20d2a8c146be10f83954ae39 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 6 May 2025 14:52:16 +0100 Subject: [PATCH 0825/1708] cmd/k8s-operator,k8s-operator/api-proxy: move k8s proxy code to library (#15857) The defaultEnv and defaultBool functions are copied over temporarily to minimise diff. This lays the ground work for having both the operator and the new k8s-proxy binary implement the API proxy Updates #13358 Change-Id: Ieacc79af64df2f13b27a18135517bb31c80a5a02 Signed-off-by: Tom Proctor --- cmd/k8s-operator/depaware.txt | 5 +- cmd/k8s-operator/operator.go | 7 +-- k8s-operator/api-proxy/doc.go | 8 ++++ k8s-operator/api-proxy/env.go | 29 ++++++++++++ .../api-proxy}/proxy.go | 46 +++++++++---------- .../api-proxy}/proxy_test.go | 2 +- 6 files changed, 68 insertions(+), 29 deletions(-) create mode 100644 k8s-operator/api-proxy/doc.go create mode 100644 k8s-operator/api-proxy/env.go rename {cmd/k8s-operator => k8s-operator/api-proxy}/proxy.go (93%) rename {cmd/k8s-operator => k8s-operator/api-proxy}/proxy_test.go (99%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 186c5a0c0..544fe9089 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -840,9 +840,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator + tailscale.com/k8s-operator/api-proxy from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1 tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+ - tailscale.com/k8s-operator/sessionrecording from tailscale.com/cmd/k8s-operator + tailscale.com/k8s-operator/sessionrecording from tailscale.com/k8s-operator/api-proxy tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording @@ -945,7 +946,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cmpver from tailscale.com/clientupdate+ - tailscale.com/util/ctxkey from tailscale.com/cmd/k8s-operator+ + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 1f637927b..9c35a7cec 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -45,6 +45,7 @@ import ( "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store/kubestore" + apiproxy "tailscale.com/k8s-operator/api-proxy" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" @@ -102,8 +103,8 @@ func main() { // The operator can run either as a plain operator or it can // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. - mode := parseAPIProxyMode() - if mode == apiserverProxyModeDisabled { + mode := apiproxy.ParseAPIProxyMode() + if mode == apiproxy.APIServerProxyModeDisabled { hostinfo.SetApp(kubetypes.AppOperator) } else { hostinfo.SetApp(kubetypes.AppAPIServerProxy) @@ -112,7 +113,7 @@ func main() { s, tsc := initTSNet(zlog) defer s.Close() restConfig := config.GetConfigOrDie() - maybeLaunchAPIServerProxy(zlog, restConfig, s, mode) + apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode) rOpts := reconcilerOpts{ log: zlog, tsServer: s, diff --git a/k8s-operator/api-proxy/doc.go b/k8s-operator/api-proxy/doc.go new file mode 100644 index 000000000..89d890959 --- /dev/null +++ b/k8s-operator/api-proxy/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package apiproxy contains the Kubernetes API Proxy implementation used by +// k8s-operator and k8s-proxy. +package apiproxy diff --git a/k8s-operator/api-proxy/env.go b/k8s-operator/api-proxy/env.go new file mode 100644 index 000000000..c0640ab1e --- /dev/null +++ b/k8s-operator/api-proxy/env.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package apiproxy + +import ( + "os" + + "tailscale.com/types/opt" +) + +func defaultBool(envName string, defVal bool) bool { + vs := os.Getenv(envName) + if vs == "" { + return defVal + } + v, _ := opt.Bool(vs).Get() + return v +} + +func defaultEnv(envName, defVal string) string { + v := os.Getenv(envName) + if v == "" { + return defVal + } + return v +} diff --git a/cmd/k8s-operator/proxy.go b/k8s-operator/api-proxy/proxy.go similarity index 93% rename from cmd/k8s-operator/proxy.go rename to k8s-operator/api-proxy/proxy.go index 01383a53d..7c7260b94 100644 --- a/cmd/k8s-operator/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -3,7 +3,7 @@ //go:build !plan9 -package main +package apiproxy import ( "crypto/tls" @@ -37,15 +37,15 @@ var ( whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) -type apiServerProxyMode int +type APIServerProxyMode int -func (a apiServerProxyMode) String() string { +func (a APIServerProxyMode) String() string { switch a { - case apiserverProxyModeDisabled: + case APIServerProxyModeDisabled: return "disabled" - case apiserverProxyModeEnabled: + case APIServerProxyModeEnabled: return "auth" - case apiserverProxyModeNoAuth: + case APIServerProxyModeNoAuth: return "noauth" default: return "unknown" @@ -53,12 +53,12 @@ func (a apiServerProxyMode) String() string { } const ( - apiserverProxyModeDisabled apiServerProxyMode = iota - apiserverProxyModeEnabled - apiserverProxyModeNoAuth + APIServerProxyModeDisabled APIServerProxyMode = iota + APIServerProxyModeEnabled + APIServerProxyModeNoAuth ) -func parseAPIProxyMode() apiServerProxyMode { +func ParseAPIProxyMode() APIServerProxyMode { haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" switch { @@ -67,34 +67,34 @@ func parseAPIProxyMode() apiServerProxyMode { case haveAuthProxyEnv: var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated if authProxyEnv { - return apiserverProxyModeEnabled + return APIServerProxyModeEnabled } - return apiserverProxyModeDisabled + return APIServerProxyModeDisabled case haveAPIProxyEnv: var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" switch apiProxyEnv { case "true": - return apiserverProxyModeEnabled + return APIServerProxyModeEnabled case "false", "": - return apiserverProxyModeDisabled + return APIServerProxyModeDisabled case "noauth": - return apiserverProxyModeNoAuth + return APIServerProxyModeNoAuth default: panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) } } - return apiserverProxyModeDisabled + return APIServerProxyModeDisabled } // maybeLaunchAPIServerProxy launches the auth proxy, which is a small HTTP server // that authenticates requests using the Tailscale LocalAPI and then proxies // them to the kube-apiserver. -func maybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, s *tsnet.Server, mode apiServerProxyMode) { - if mode == apiserverProxyModeDisabled { +func MaybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, s *tsnet.Server, mode APIServerProxyMode) { + if mode == APIServerProxyModeDisabled { return } startlog := zlog.Named("launchAPIProxy") - if mode == apiserverProxyModeNoAuth { + if mode == APIServerProxyModeNoAuth { restConfig = rest.AnonymousClientConfig(restConfig) } cfg, err := restConfig.TransportConfig() @@ -132,8 +132,8 @@ func maybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, // are passed through to the Kubernetes API. // // It never returns. -func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode apiServerProxyMode, host string) { - if mode == apiserverProxyModeDisabled { +func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode APIServerProxyMode, host string) { + if mode == APIServerProxyModeDisabled { return } ln, err := ts.Listen("tcp", ":443") @@ -192,7 +192,7 @@ type apiserverProxy struct { lc *local.Client rp *httputil.ReverseProxy - mode apiServerProxyMode + mode APIServerProxyMode ts *tsnet.Server upstreamURL *url.URL } @@ -285,7 +285,7 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { r.URL.Scheme = h.upstreamURL.Scheme r.URL.Host = h.upstreamURL.Host - if h.mode == apiserverProxyModeNoAuth { + if h.mode == APIServerProxyModeNoAuth { // If we are not providing authentication, then we are just // proxying to the Kubernetes API, so we don't need to do // anything else. diff --git a/cmd/k8s-operator/proxy_test.go b/k8s-operator/api-proxy/proxy_test.go similarity index 99% rename from cmd/k8s-operator/proxy_test.go rename to k8s-operator/api-proxy/proxy_test.go index d1d5733e7..71bf65648 100644 --- a/cmd/k8s-operator/proxy_test.go +++ b/k8s-operator/api-proxy/proxy_test.go @@ -3,7 +3,7 @@ //go:build !plan9 -package main +package apiproxy import ( "net/http" From cf6a593196ae0173ccccdd0c51c14cee2d8bcf93 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 5 May 2025 13:24:41 -0700 Subject: [PATCH 0826/1708] cmd/tailscale/cli: rename "--posture-checking" to "--report-posture" For consistency with other flags, per Slack chat. Updates #5902 Change-Id: I7ae1e4c97b37185573926f5fafda82cf8b46f071 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 15 +++++++++++---- cmd/tailscale/cli/cli_test.go | 33 +++++++++++++++++++-------------- cmd/tailscale/cli/set.go | 6 +++--- cmd/tailscale/cli/up.go | 6 +++--- cmd/tailscale/cli/up_test.go | 2 +- ipn/prefs.go | 5 +++++ 6 files changed, 42 insertions(+), 25 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d02db38f1..2fbee516a 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -65,15 +65,22 @@ func newFlagSet(name string) *flag.FlagSet { func CleanUpArgs(args []string) []string { out := make([]string, 0, len(args)) for _, arg := range args { + switch { // Rewrite --authkey to --auth-key, and --authkey=x to --auth-key=x, // and the same for the -authkey variant. - switch { case arg == "--authkey", arg == "-authkey": arg = "--auth-key" case strings.HasPrefix(arg, "--authkey="), strings.HasPrefix(arg, "-authkey="): - arg = strings.TrimLeft(arg, "-") - arg = strings.TrimPrefix(arg, "authkey=") - arg = "--auth-key=" + arg + _, val, _ := strings.Cut(arg, "=") + arg = "--auth-key=" + val + + // And the same, for posture-checking => report-posture + case arg == "--posture-checking", arg == "-posture-checking": + arg = "--report-posture" + case strings.HasPrefix(arg, "--posture-checking="), strings.HasPrefix(arg, "-posture-checking="): + _, val, _ := strings.Cut(arg, "=") + arg = "--report-posture=" + val + } out = append(out, arg) } diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 4f6bdab2e..9aa3693fd 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -605,7 +605,7 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { want: "", }, { - name: "losing_posture_checking", + name: "losing_report_posture", flags: []string{"--accept-dns"}, curPrefs: &ipn.Prefs{ ControlURL: ipn.DefaultControlURL, @@ -615,7 +615,7 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { NetfilterMode: preftype.NetfilterOn, NoStatefulFiltering: opt.NewBool(true), }, - want: accidentalUpPrefix + " --accept-dns --posture-checking", + want: accidentalUpPrefix + " --accept-dns --report-posture", }, } for _, tt := range tests { @@ -1394,23 +1394,28 @@ var cmpIP = cmp.Comparer(func(a, b netip.Addr) bool { }) func TestCleanUpArgs(t *testing.T) { + type S = []string c := qt.New(t) tests := []struct { in []string want []string }{ - {in: []string{"something"}, want: []string{"something"}}, - {in: []string{}, want: []string{}}, - {in: []string{"--authkey=0"}, want: []string{"--auth-key=0"}}, - {in: []string{"a", "--authkey=1", "b"}, want: []string{"a", "--auth-key=1", "b"}}, - {in: []string{"a", "--auth-key=2", "b"}, want: []string{"a", "--auth-key=2", "b"}}, - {in: []string{"a", "-authkey=3", "b"}, want: []string{"a", "--auth-key=3", "b"}}, - {in: []string{"a", "-auth-key=4", "b"}, want: []string{"a", "-auth-key=4", "b"}}, - {in: []string{"a", "--authkey", "5", "b"}, want: []string{"a", "--auth-key", "5", "b"}}, - {in: []string{"a", "-authkey", "6", "b"}, want: []string{"a", "--auth-key", "6", "b"}}, - {in: []string{"a", "authkey", "7", "b"}, want: []string{"a", "authkey", "7", "b"}}, - {in: []string{"--authkeyexpiry", "8"}, want: []string{"--authkeyexpiry", "8"}}, - {in: []string{"--auth-key-expiry", "9"}, want: []string{"--auth-key-expiry", "9"}}, + {in: S{"something"}, want: S{"something"}}, + {in: S{}, want: S{}}, + {in: S{"--authkey=0"}, want: S{"--auth-key=0"}}, + {in: S{"a", "--authkey=1", "b"}, want: S{"a", "--auth-key=1", "b"}}, + {in: S{"a", "--auth-key=2", "b"}, want: S{"a", "--auth-key=2", "b"}}, + {in: S{"a", "-authkey=3", "b"}, want: S{"a", "--auth-key=3", "b"}}, + {in: S{"a", "-auth-key=4", "b"}, want: S{"a", "-auth-key=4", "b"}}, + {in: S{"a", "--authkey", "5", "b"}, want: S{"a", "--auth-key", "5", "b"}}, + {in: S{"a", "-authkey", "6", "b"}, want: S{"a", "--auth-key", "6", "b"}}, + {in: S{"a", "authkey", "7", "b"}, want: S{"a", "authkey", "7", "b"}}, + {in: S{"--authkeyexpiry", "8"}, want: S{"--authkeyexpiry", "8"}}, + {in: S{"--auth-key-expiry", "9"}, want: S{"--auth-key-expiry", "9"}}, + + {in: S{"--posture-checking"}, want: S{"--report-posture"}}, + {in: S{"-posture-checking"}, want: S{"--report-posture"}}, + {in: S{"--posture-checking=nein"}, want: S{"--report-posture=nein"}}, } for _, tt := range tests { diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index f4ea674ec..aa5966698 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -60,7 +60,7 @@ type setArgsT struct { forceDaemon bool updateCheck bool updateApply bool - postureChecking bool + reportPosture bool snat bool statefulFiltering bool netfilterMode string @@ -83,7 +83,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.advertiseConnector, "advertise-connector", false, "offer to be an app connector for domain specific internet traffic for the tailnet") setf.BoolVar(&setArgs.updateCheck, "update-check", true, "notify about available Tailscale updates") setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") - setf.BoolVar(&setArgs.postureChecking, "posture-checking", false, "allow management plane to gather device posture information") + setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") @@ -156,7 +156,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { AppConnector: ipn.AppConnectorPrefs{ Advertise: setArgs.advertiseConnector, }, - PostureChecking: setArgs.postureChecking, + PostureChecking: setArgs.reportPosture, NoStatefulFiltering: opt.NewBool(!setArgs.statefulFiltering), }, } diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index d1e813b95..e4bb6f576 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -109,7 +109,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.StringVar(&upArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes") upf.BoolVar(&upArgs.advertiseConnector, "advertise-connector", false, "advertise this node as an app connector") upf.BoolVar(&upArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet") - upf.BoolVar(&upArgs.postureChecking, "posture-checking", false, hidden+"allow management plane to gather device posture information") + upf.BoolVar(&upArgs.postureChecking, "report-posture", false, hidden+"allow management plane to gather device posture information") if safesocket.GOOSUsesPeerCreds(goos) { upf.StringVar(&upArgs.opUser, "operator", "", "Unix username to allow to operate on tailscaled without sudo") @@ -772,7 +772,7 @@ func init() { addPrefFlagMapping("update-check", "AutoUpdate.Check") addPrefFlagMapping("auto-update", "AutoUpdate.Apply") addPrefFlagMapping("advertise-connector", "AppConnector") - addPrefFlagMapping("posture-checking", "PostureChecking") + addPrefFlagMapping("report-posture", "PostureChecking") addPrefFlagMapping("relay-server-port", "RelayServerPort") } @@ -1050,7 +1050,7 @@ func prefsToFlags(env upCheckEnv, prefs *ipn.Prefs) (flagVal map[string]any) { set(prefs.NetfilterMode.String()) case "unattended": set(prefs.ForceDaemon) - case "posture-checking": + case "report-posture": set(prefs.PostureChecking) } }) diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index 2c80ae94d..eb06f84dc 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -33,7 +33,7 @@ var validUpFlags = set.Of( "netfilter-mode", "nickname", "operator", - "posture-checking", + "report-posture", "qr", "reset", "shields-up", diff --git a/ipn/prefs.go b/ipn/prefs.go index 1c9d71d73..caf9ccfc3 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -235,6 +235,11 @@ type Prefs struct { // PostureChecking enables the collection of information used for device // posture checks. + // + // Note: this should be named ReportPosture, but it was shipped as + // PostureChecking in some early releases and this JSON field is written to + // disk, so we just keep its old name. (akin to CorpDNS which is an internal + // pref name that doesn't match the public interface) PostureChecking bool // NetfilterKind specifies what netfilter implementation to use. From 068d5ab6558f8ed7f6e6ecfb9dadb1ad5696966c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 2 May 2025 17:49:23 -0700 Subject: [PATCH 0827/1708] feature/taildrop: move rest of Taildrop out of LocalBackend Updates #12614 Change-Id: If451dec1d796f6a4216fe485975c87f0c62a53e5 Signed-off-by: Brad Fitzpatrick Co-authored-by: Nick Khyl --- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/taildrop_omit.go | 15 - cmd/tailscaled/tailscaled.go | 1 - feature/relayserver/relayserver.go | 2 +- feature/taildrop/ext.go | 398 +++++++++++++++++- feature/taildrop/localapi.go | 44 +- .../taildrop.go => feature/taildrop/paths.go | 23 +- feature/taildrop/peerapi.go | 30 +- feature/taildrop/peerapi_test.go | 29 +- feature/taildrop/target_test.go | 73 ++++ ipn/ipnext/ipnext.go | 49 ++- ipn/ipnlocal/drive.go | 7 +- ipn/ipnlocal/extension_host.go | 36 ++ ipn/ipnlocal/extension_host_test.go | 5 +- ipn/ipnlocal/local.go | 151 +++---- ipn/ipnlocal/peerapi.go | 2 - ipn/ipnlocal/taildrop.go | 280 ------------ ipn/ipnlocal/taildrop_omit.go | 12 - ipn/ipnlocal/taildrop_test.go | 75 ---- types/netmap/netmap.go | 8 + 21 files changed, 691 insertions(+), 555 deletions(-) delete mode 100644 cmd/tailscaled/taildrop_omit.go rename cmd/tailscaled/taildrop.go => feature/taildrop/paths.go (89%) create mode 100644 feature/taildrop/target_test.go delete mode 100644 ipn/ipnlocal/taildrop.go delete mode 100644 ipn/ipnlocal/taildrop_omit.go delete mode 100644 ipn/ipnlocal/taildrop_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 544fe9089..53a37fe01 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -908,7 +908,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/syncs from tailscale.com/control/controlknobs+ tailscale.com/tailcfg from tailscale.com/client/local+ - tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ + tailscale.com/taildrop from tailscale.com/feature/taildrop tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient @@ -965,7 +965,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag - tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal + tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal+ tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal tailscale.com/util/progresstracking from tailscale.com/feature/taildrop tailscale.com/util/race from tailscale.com/net/dns/resolver diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c5d5a7b2d..aa11fb9f3 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -359,7 +359,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/local+ - tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ + tailscale.com/taildrop from tailscale.com/feature/taildrop tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock diff --git a/cmd/tailscaled/taildrop_omit.go b/cmd/tailscaled/taildrop_omit.go deleted file mode 100644 index 3b7669391..000000000 --- a/cmd/tailscaled/taildrop_omit.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_taildrop - -package main - -import ( - "tailscale.com/ipn/ipnlocal" - "tailscale.com/types/logger" -) - -func configureTaildrop(logf logger.Logf, lb *ipnlocal.LocalBackend) { - // Nothing. -} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 4b0dc95f9..87750bc5d 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -660,7 +660,6 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID Socket: args.socketpath, UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), }) - configureTaildrop(logf, lb) if err := ns.Start(lb); err != nil { log.Fatalf("failed to start netstack: %v", err) } diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index e5c2afc17..87aba4228 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -133,7 +133,7 @@ func (e *extension) relayServerOrInit() (relayServer, error) { } func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - e, ok := h.LocalBackend().FindExtensionByName(featureName).(*extension) + e, ok := ipnlocal.GetExt[*extension](h.LocalBackend()) if !ok { http.Error(w, "relay failed to initialize", http.StatusServiceUnavailable) return diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index b7cfdec72..b86c0f926 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -4,10 +4,30 @@ package taildrop import ( + "cmp" + "context" + "errors" + "fmt" + "io" + "maps" + "os" + "path/filepath" + "slices" + "strings" + "sync" + "sync/atomic" + + "tailscale.com/client/tailscale/apitype" + "tailscale.com/ipn" "tailscale.com/ipn/ipnext" - "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" "tailscale.com/taildrop" + "tailscale.com/tstime" + "tailscale.com/types/empty" "tailscale.com/types/logger" + "tailscale.com/util/osshare" + "tailscale.com/util/set" ) func init() { @@ -15,38 +35,374 @@ func init() { } func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { - return &extension{ - logf: logger.WithPrefix(logf, "taildrop: "), - }, nil + e := &Extension{ + sb: b, + stateStore: b.Sys().StateStore.Get(), + logf: logger.WithPrefix(logf, "taildrop: "), + } + e.setPlatformDefaultDirectFileRoot() + return e, nil } -type extension struct { - logf logger.Logf - sb ipnext.SafeBackend - mgr *taildrop.Manager +// Extension implements Taildrop. +type Extension struct { + logf logger.Logf + sb ipnext.SafeBackend + stateStore ipn.StateStore + host ipnext.Host // from Init + + // directFileRoot, if non-empty, means to write received files + // directly to this directory, without staging them in an + // intermediate buffered directory for "pick-up" later. If + // empty, the files are received in a daemon-owned location + // and the localapi is used to enumerate, download, and delete + // them. This is used on macOS where the GUI lifetime is the + // same as the Network Extension lifetime and we can thus avoid + // double-copying files by writing them to the right location + // immediately. + // It's also used on several NAS platforms (Synology, TrueNAS, etc) + // but in that case DoFinalRename is also set true, which moves the + // *.partial file to its final name on completion. + directFileRoot string + + nodeBackendForTest ipnext.NodeBackend // if non-nil, pretend we're this node state for tests + + mu sync.Mutex // Lock order: lb.mu > e.mu + backendState ipn.State + selfUID tailcfg.UserID + capFileSharing bool + fileWaiters set.HandleSet[context.CancelFunc] // of wake-up funcs + mgr atomic.Pointer[taildrop.Manager] // mutex held to write; safe to read without lock; + // outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID + outgoingFiles map[string]*ipn.OutgoingFile } -func (e *extension) Name() string { +func (e *Extension) Name() string { return "taildrop" } -func (e *extension) Init(h ipnext.Host) error { - // TODO(bradfitz): move init of taildrop.Manager from ipnlocal/peerapi.go to - // here - e.mgr = nil +func (e *Extension) Init(h ipnext.Host) error { + e.host = h + + osshare.SetFileSharingEnabled(false, e.logf) + + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().OnSelfChange.Add(e.onSelfChange) + h.Hooks().MutateNotifyLocked.Add(e.setNotifyFilesWaiting) + h.Hooks().SetPeerStatus.Add(e.setPeerStatus) + h.Hooks().BackendStateChange.Add(e.onBackendStateChange) return nil } -func (e *extension) Shutdown() error { - lb, ok := e.sb.(*ipnlocal.LocalBackend) - if !ok { - return nil +func (e *Extension) onBackendStateChange(st ipn.State) { + e.mu.Lock() + defer e.mu.Unlock() + e.backendState = st +} + +func (e *Extension) onSelfChange(self tailcfg.NodeView) { + e.mu.Lock() + defer e.mu.Unlock() + + e.selfUID = 0 + if self.Valid() { + e.selfUID = self.User() + } + e.capFileSharing = self.Valid() && self.CapMap().Contains(tailcfg.CapabilityFileSharing) + osshare.SetFileSharingEnabled(e.capFileSharing, e.logf) +} + +func (e *Extension) setMgrLocked(mgr *taildrop.Manager) { + if old := e.mgr.Swap(mgr); old != nil { + old.Shutdown() + } +} + +func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + + uid := profile.UserProfile().ID + activeLogin := profile.UserProfile().LoginName + + if uid == 0 { + e.setMgrLocked(nil) + e.outgoingFiles = nil + return } - if mgr, err := lb.TaildropManager(); err == nil { - mgr.Shutdown() - } else { - e.logf("taildrop: failed to shutdown taildrop manager: %v", err) + + if sameNode && e.manager() != nil { + return + } + + // If we have a netmap, create a taildrop manager. + fileRoot, isDirectFileMode := e.fileRoot(uid, activeLogin) + if fileRoot == "" { + e.logf("no Taildrop directory configured") + } + e.setMgrLocked(taildrop.ManagerOptions{ + Logf: e.logf, + Clock: tstime.DefaultClock{Clock: e.sb.Clock()}, + State: e.stateStore, + Dir: fileRoot, + DirectFileMode: isDirectFileMode, + SendFileNotify: e.sendFileNotify, + }.New()) +} + +// fileRoot returns where to store Taildrop files for the given user and whether +// to write received files directly to this directory, without staging them in +// an intermediate buffered directory for "pick-up" later. +// +// It is safe to call this with b.mu held but it does not require it or acquire +// it itself. +func (e *Extension) fileRoot(uid tailcfg.UserID, activeLogin string) (root string, isDirect bool) { + if v := e.directFileRoot; v != "" { + return v, true } + varRoot := e.sb.TailscaleVarRoot() + if varRoot == "" { + e.logf("Taildrop disabled; no state directory") + return "", false + } + + if activeLogin == "" { + e.logf("taildrop: no active login; can't select a target directory") + return "", false + } + + baseDir := fmt.Sprintf("%s-uid-%d", + strings.ReplaceAll(activeLogin, "@", "-"), + uid) + dir := filepath.Join(varRoot, "files", baseDir) + if err := os.MkdirAll(dir, 0700); err != nil { + e.logf("Taildrop disabled; error making directory: %v", err) + return "", false + } + return dir, false +} + +// hasCapFileSharing reports whether the current node has the file sharing +// capability. +func (e *Extension) hasCapFileSharing() bool { + e.mu.Lock() + defer e.mu.Unlock() + return e.capFileSharing +} + +// manager returns the active taildrop.Manager, or nil. +// +// Methods on a nil Manager are safe to call. +func (e *Extension) manager() *taildrop.Manager { + return e.mgr.Load() +} + +func (e *Extension) Clock() tstime.Clock { + return e.sb.Clock() +} + +func (e *Extension) Shutdown() error { + e.manager().Shutdown() // no-op on nil receiver return nil } + +func (e *Extension) sendFileNotify() { + mgr := e.manager() + if mgr == nil { + return + } + + var n ipn.Notify + + e.mu.Lock() + for _, wakeWaiter := range e.fileWaiters { + wakeWaiter() + } + n.IncomingFiles = mgr.IncomingFiles() + e.mu.Unlock() + + e.host.SendNotifyAsync(n) +} + +func (e *Extension) setNotifyFilesWaiting(n *ipn.Notify) { + if e.manager().HasFilesWaiting() { + n.FilesWaiting = &empty.Message{} + } +} + +func (e *Extension) setPeerStatus(ps *ipnstate.PeerStatus, p tailcfg.NodeView, nb ipnext.NodeBackend) { + ps.TaildropTarget = e.taildropTargetStatus(p, nb) +} + +func (e *Extension) removeFileWaiter(handle set.Handle) { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fileWaiters, handle) +} + +func (e *Extension) addFileWaiter(wakeWaiter context.CancelFunc) set.Handle { + e.mu.Lock() + defer e.mu.Unlock() + return e.fileWaiters.Add(wakeWaiter) +} + +func (e *Extension) WaitingFiles() ([]apitype.WaitingFile, error) { + return e.manager().WaitingFiles() +} + +// AwaitWaitingFiles is like WaitingFiles but blocks while ctx is not done, +// waiting for any files to be available. +// +// On return, exactly one of the results will be non-empty or non-nil, +// respectively. +func (e *Extension) AwaitWaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) { + if ff, err := e.WaitingFiles(); err != nil || len(ff) > 0 { + return ff, err + } + if err := ctx.Err(); err != nil { + return nil, err + } + for { + gotFile, gotFileCancel := context.WithCancel(context.Background()) + defer gotFileCancel() + + handle := e.addFileWaiter(gotFileCancel) + defer e.removeFileWaiter(handle) + + // Now that we've registered ourselves, check again, in case + // of race. Otherwise there's a small window where we could + // miss a file arrival and wait forever. + if ff, err := e.WaitingFiles(); err != nil || len(ff) > 0 { + return ff, err + } + + select { + case <-gotFile.Done(): + if ff, err := e.WaitingFiles(); err != nil || len(ff) > 0 { + return ff, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (e *Extension) DeleteFile(name string) error { + return e.manager().DeleteFile(name) +} + +func (e *Extension) OpenFile(name string) (rc io.ReadCloser, size int64, err error) { + return e.manager().OpenFile(name) +} + +func (e *Extension) nodeBackend() ipnext.NodeBackend { + if e.nodeBackendForTest != nil { + return e.nodeBackendForTest + } + return e.host.NodeBackend() +} + +// FileTargets lists nodes that the current node can send files to. +func (e *Extension) FileTargets() ([]*apitype.FileTarget, error) { + var ret []*apitype.FileTarget + + e.mu.Lock() + st := e.backendState + self := e.selfUID + e.mu.Unlock() + + if st != ipn.Running { + return nil, errors.New("not connected to the tailnet") + } + if !e.hasCapFileSharing() { + return nil, errors.New("file sharing not enabled by Tailscale admin") + } + nb := e.nodeBackend() + peers := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { + if !p.Valid() || p.Hostinfo().OS() == "tvOS" { + return false + } + if self == p.User() { + return true + } + if nb.PeerHasCap(p, tailcfg.PeerCapabilityFileSharingTarget) { + // Explicitly noted in the netmap ACL caps as a target. + return true + } + return false + }) + for _, p := range peers { + peerAPI := nb.PeerAPIBase(p) + if peerAPI == "" { + continue + } + ret = append(ret, &apitype.FileTarget{ + Node: p.AsStruct(), + PeerAPIURL: peerAPI, + }) + } + slices.SortFunc(ret, func(a, b *apitype.FileTarget) int { + return cmp.Compare(a.Node.Name, b.Node.Name) + }) + return ret, nil +} + +func (e *Extension) taildropTargetStatus(p tailcfg.NodeView, nb ipnext.NodeBackend) ipnstate.TaildropTargetStatus { + e.mu.Lock() + st := e.backendState + selfUID := e.selfUID + capFileSharing := e.capFileSharing + e.mu.Unlock() + + if st != ipn.Running { + return ipnstate.TaildropTargetIpnStateNotRunning + } + + if !capFileSharing { + return ipnstate.TaildropTargetMissingCap + } + if !p.Valid() { + return ipnstate.TaildropTargetNoPeerInfo + } + if !p.Online().Get() { + return ipnstate.TaildropTargetOffline + } + if p.Hostinfo().OS() == "tvOS" { + return ipnstate.TaildropTargetUnsupportedOS + } + if selfUID != p.User() { + // Different user must have the explicit file sharing target capability + if !nb.PeerHasCap(p, tailcfg.PeerCapabilityFileSharingTarget) { + return ipnstate.TaildropTargetOwnedByOtherUser + } + } + if !nb.PeerHasPeerAPI(p) { + return ipnstate.TaildropTargetNoPeerAPI + } + return ipnstate.TaildropTargetAvailable +} + +// updateOutgoingFiles updates b.outgoingFiles to reflect the given updates and +// sends an ipn.Notify with the full list of outgoingFiles. +func (e *Extension) updateOutgoingFiles(updates map[string]*ipn.OutgoingFile) { + e.mu.Lock() + if e.outgoingFiles == nil { + e.outgoingFiles = make(map[string]*ipn.OutgoingFile, len(updates)) + } + maps.Copy(e.outgoingFiles, updates) + outgoingFiles := make([]*ipn.OutgoingFile, 0, len(e.outgoingFiles)) + for _, file := range e.outgoingFiles { + outgoingFiles = append(outgoingFiles, file) + } + e.mu.Unlock() + slices.SortFunc(outgoingFiles, func(a, b *ipn.OutgoingFile) int { + t := a.Started.Compare(b.Started) + if t != 0 { + return t + } + return strings.Compare(a.Name, b.Name) + }) + + e.host.SendNotifyAsync(ipn.Notify{OutgoingFiles: outgoingFiles}) +} diff --git a/feature/taildrop/localapi.go b/feature/taildrop/localapi.go index 067a51f91..02e6b0b52 100644 --- a/feature/taildrop/localapi.go +++ b/feature/taildrop/localapi.go @@ -21,6 +21,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" "tailscale.com/tailcfg" "tailscale.com/taildrop" @@ -80,9 +81,13 @@ func serveFilePut(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { return } - lb := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*Extension](h.LocalBackend()) + if !ok { + http.Error(w, "misconfigured taildrop extension", http.StatusInternalServerError) + return + } - fts, err := lb.FileTargets() + fts, err := ext.FileTargets() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -131,7 +136,7 @@ func serveFilePut(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { go func() { defer t.Stop() - defer lb.UpdateOutgoingFiles(outgoingFiles) + defer ext.updateOutgoingFiles(outgoingFiles) for { select { case u, ok := <-progressUpdates: @@ -140,7 +145,7 @@ func serveFilePut(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { } outgoingFiles[u.ID] = &u case <-t.C: - lb.UpdateOutgoingFiles(outgoingFiles) + ext.updateOutgoingFiles(outgoingFiles) } } }() @@ -301,7 +306,11 @@ func singleFilePut( fail() return false } - switch resp, err := client.Do(req); { + resp, err := client.Do(req) + if resp != nil { + defer resp.Body.Close() + } + switch { case err != nil: h.Logf("could not fetch remote hashes: %v", err) case resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotFound: @@ -353,7 +362,13 @@ func serveFiles(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { http.Error(w, "file access denied", http.StatusForbidden) return } - lb := h.LocalBackend() + + ext, ok := ipnlocal.GetExt[*Extension](h.LocalBackend()) + if !ok { + http.Error(w, "misconfigured taildrop extension", http.StatusInternalServerError) + return + } + suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/files/") if !ok { http.Error(w, "misconfigured", http.StatusInternalServerError) @@ -376,14 +391,14 @@ func serveFiles(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { var cancel context.CancelFunc ctx, cancel = context.WithDeadline(ctx, deadline) defer cancel() - wfs, err = lb.AwaitWaitingFiles(ctx) + wfs, err = ext.AwaitWaitingFiles(ctx) if err != nil && ctx.Err() == nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } else { var err error - wfs, err = lb.WaitingFiles() + wfs, err = ext.WaitingFiles() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -399,14 +414,14 @@ func serveFiles(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { return } if r.Method == "DELETE" { - if err := lb.DeleteFile(name); err != nil { + if err := ext.DeleteFile(name); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.WriteHeader(http.StatusNoContent) return } - rc, size, err := lb.OpenFile(name) + rc, size, err := ext.OpenFile(name) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -426,7 +441,14 @@ func serveFileTargets(h *localapi.Handler, w http.ResponseWriter, r *http.Reques http.Error(w, "want GET to list targets", http.StatusBadRequest) return } - fts, err := h.LocalBackend().FileTargets() + + ext, ok := ipnlocal.GetExt[*Extension](h.LocalBackend()) + if !ok { + http.Error(w, "misconfigured taildrop extension", http.StatusInternalServerError) + return + } + + fts, err := ext.FileTargets() if err != nil { localapi.WriteErrorJSON(w, err) return diff --git a/cmd/tailscaled/taildrop.go b/feature/taildrop/paths.go similarity index 89% rename from cmd/tailscaled/taildrop.go rename to feature/taildrop/paths.go index 3eda9bebf..1129fbcfa 100644 --- a/cmd/tailscaled/taildrop.go +++ b/feature/taildrop/paths.go @@ -1,35 +1,38 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ts_omit_taildrop - -package main +package taildrop import ( "fmt" "os" "path/filepath" - "tailscale.com/ipn/ipnlocal" - "tailscale.com/types/logger" "tailscale.com/version/distro" ) -func configureTaildrop(logf logger.Logf, lb *ipnlocal.LocalBackend) { +// SetDirectFileRoot sets the directory where received files are written. +// +// This must be called before Tailscale is started. +func (e *Extension) SetDirectFileRoot(root string) { + e.directFileRoot = root +} + +func (e *Extension) setPlatformDefaultDirectFileRoot() { dg := distro.Get() + switch dg { case distro.Synology, distro.TrueNAS, distro.QNAP, distro.Unraid: // See if they have a "Taildrop" share. // See https://github.com/tailscale/tailscale/issues/2179#issuecomment-982821319 path, err := findTaildropDir(dg) if err != nil { - logf("%s Taildrop support: %v", dg, err) + e.logf("%s Taildrop support: %v", dg, err) } else { - logf("%s Taildrop: using %v", dg, path) - lb.SetDirectFileRoot(path) + e.logf("%s Taildrop: using %v", dg, path) + e.directFileRoot = path } } - } func findTaildropDir(dg distro.Distro) (string, error) { diff --git a/feature/taildrop/peerapi.go b/feature/taildrop/peerapi.go index f90dca9dc..a81ce9c3a 100644 --- a/feature/taildrop/peerapi.go +++ b/feature/taildrop/peerapi.go @@ -38,26 +38,30 @@ func canPutFile(h ipnlocal.PeerAPIHandler) bool { } func handlePeerPut(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - lb := h.LocalBackend() - handlePeerPutWithBackend(h, lb, w, r) + ext, ok := ipnlocal.GetExt[*Extension](h.LocalBackend()) + if !ok { + http.Error(w, "miswired", http.StatusInternalServerError) + return + } + handlePeerPutWithBackend(h, ext, w, r) } -// localBackend is the subset of ipnlocal.Backend that taildrop +// extensionForPut is the subset of taildrop extension that taildrop // file put needs. This is pulled out for testability. -type localBackend interface { - TaildropManager() (*taildrop.Manager, error) - HasCapFileSharing() bool +type extensionForPut interface { + manager() *taildrop.Manager + hasCapFileSharing() bool Clock() tstime.Clock } -func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, lb localBackend, w http.ResponseWriter, r *http.Request) { +func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w http.ResponseWriter, r *http.Request) { if r.Method == "PUT" { metricPutCalls.Add(1) } - taildropMgr, err := lb.TaildropManager() - if err != nil { - h.Logf("taildropManager: %v", err) + taildropMgr := ext.manager() + if taildropMgr == nil { + h.Logf("taildrop: no taildrop manager") http.Error(w, "failed to get taildrop manager", http.StatusInternalServerError) return } @@ -66,7 +70,7 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, lb localBackend, w http http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) return } - if !lb.HasCapFileSharing() { + if !ext.hasCapFileSharing() { http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) return } @@ -123,7 +127,7 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, lb localBackend, w http } } case "PUT": - t0 := lb.Clock().Now() + t0 := ext.Clock().Now() id := taildrop.ClientID(h.Peer().StableID()) var offset int64 @@ -138,7 +142,7 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, lb localBackend, w http n, err := taildropMgr.PutFile(taildrop.ClientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength) switch err { case nil: - d := lb.Clock().Since(t0).Round(time.Second / 10) + d := ext.Clock().Since(t0).Round(time.Second / 10) h.Logf("got put of %s in %v from %v/%v", approxSize(n), d, h.RemoteAddr().Addr(), h.Peer().ComputedName) io.WriteString(w, "{}\n") case taildrop.ErrNoTaildrop: diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 46a61f547..a647add37 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -50,19 +50,19 @@ func (h *peerAPIHandler) PeerCaps() tailcfg.PeerCapMap { return nil } -type fakeLocalBackend struct { +type fakeExtension struct { logf logger.Logf capFileSharing bool clock tstime.Clock taildrop *taildrop.Manager } -func (lb *fakeLocalBackend) Clock() tstime.Clock { return lb.clock } -func (lb *fakeLocalBackend) HasCapFileSharing() bool { - return lb.capFileSharing +func (lb *fakeExtension) manager() *taildrop.Manager { + return lb.taildrop } -func (lb *fakeLocalBackend) TaildropManager() (*taildrop.Manager, error) { - return lb.taildrop, nil +func (lb *fakeExtension) Clock() tstime.Clock { return lb.clock } +func (lb *fakeExtension) hasCapFileSharing() bool { + return lb.capFileSharing } type peerAPITestEnv struct { @@ -472,16 +472,17 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var rootDir string - var e peerAPITestEnv if !tt.omitRoot { rootDir = t.TempDir() - e.taildrop = taildrop.ManagerOptions{ - Logf: e.logBuf.Logf, - Dir: rootDir, - }.New() } - lb := &fakeLocalBackend{ + var e peerAPITestEnv + e.taildrop = taildrop.ManagerOptions{ + Logf: e.logBuf.Logf, + Dir: rootDir, + }.New() + + ext := &fakeExtension{ logf: e.logBuf.Logf, capFileSharing: tt.capSharing, clock: &tstest.Clock{}, @@ -499,7 +500,7 @@ func TestHandlePeerAPI(t *testing.T) { if req.Host == "example.com" { req.Host = "100.100.100.101:12345" } - handlePeerPutWithBackend(e.ph, lb, e.rr, req) + handlePeerPutWithBackend(e.ph, ext, e.rr, req) } for _, f := range tt.checks { f(t, &e) @@ -539,7 +540,7 @@ func TestFileDeleteRace(t *testing.T) { Addresses: []netip.Prefix{netip.MustParsePrefix("100.100.100.101/32")}, }).View(), } - fakeLB := &fakeLocalBackend{ + fakeLB := &fakeExtension{ logf: t.Logf, capFileSharing: true, clock: &tstest.Clock{}, diff --git a/feature/taildrop/target_test.go b/feature/taildrop/target_test.go new file mode 100644 index 000000000..57c96a77a --- /dev/null +++ b/feature/taildrop/target_test.go @@ -0,0 +1,73 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "fmt" + "testing" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/tailcfg" +) + +func TestFileTargets(t *testing.T) { + e := new(Extension) + + _, err := e.FileTargets() + if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { + t.Errorf("before connect: got %q; want %q", got, want) + } + + e.nodeBackendForTest = testNodeBackend{peers: nil} + + _, err = e.FileTargets() + if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { + t.Errorf("non-running netmap: got %q; want %q", got, want) + } + + e.backendState = ipn.Running + _, err = e.FileTargets() + if got, want := fmt.Sprint(err), "file sharing not enabled by Tailscale admin"; got != want { + t.Errorf("without cap: got %q; want %q", got, want) + } + + e.capFileSharing = true + got, err := e.FileTargets() + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Fatalf("unexpected %d peers", len(got)) + } + + var nodeID tailcfg.NodeID = 1234 + peer := &tailcfg.Node{ + ID: nodeID, + Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), + } + e.nodeBackendForTest = testNodeBackend{peers: []tailcfg.NodeView{peer.View()}} + + got, err = e.FileTargets() + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Fatalf("unexpected %d peers", len(got)) + } +} + +type testNodeBackend struct { + ipnext.NodeBackend + peers []tailcfg.NodeView +} + +func (t testNodeBackend) AppendMatchingPeers(peers []tailcfg.NodeView, f func(tailcfg.NodeView) bool) []tailcfg.NodeView { + for _, p := range t.peers { + if f(p) { + peers = append(peers, p) + } + } + return peers +} diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index bd8d3d79c..895fadc1c 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -9,11 +9,14 @@ import ( "errors" "fmt" "iter" + "net/netip" "tailscale.com/control/controlclient" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstime" "tailscale.com/types/logger" @@ -191,6 +194,10 @@ type Host interface { // SendNotifyAsync sends a notification to the IPN bus, // typically to the GUI client. SendNotifyAsync(ipn.Notify) + + // NodeBackend returns the [NodeBackend] for the currently active node + // (which is approximately the same as the current profile). + NodeBackend() NodeBackend } // SafeBackend is a subset of the [ipnlocal.LocalBackend] type's methods that @@ -323,7 +330,9 @@ type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) ( // Each hook has its own rules about when it's called and what environment it // has access to and what it's allowed to do. type Hooks struct { - // ProfileStateChange are callbacks that are invoked when the current login profile + // BackendStateChange is called when the backend state changes. + BackendStateChange feature.Hooks[func(ipn.State)] + // or its [ipn.Prefs] change, after those changes have been made. The current login profile // may be changed either because of a profile switch, or because the profile information // was updated by [LocalBackend.SetControlClientStatus], including when the profile @@ -347,4 +356,42 @@ type Hooks struct { // NewControlClient are the functions to be called when a new control client // is created. It is called with the LocalBackend locked. NewControlClient feature.Hooks[NewControlClientCallback] + + // OnSelfChange is called (with LocalBackend.mu held) when the self node + // changes, including changing to nothing (an invalid view). + OnSelfChange feature.Hooks[func(tailcfg.NodeView)] + + // MutateNotifyLocked is called to optionally mutate the provided Notify + // before sending it to the IPN bus. It is called with LocalBackend.mu held. + MutateNotifyLocked feature.Hooks[func(*ipn.Notify)] + + // SetPeerStatus is called to mutate PeerStatus. + // Callers must only use NodeBackend to read data. + SetPeerStatus feature.Hooks[func(*ipnstate.PeerStatus, tailcfg.NodeView, NodeBackend)] +} + +// NodeBackend is an interface to query the current node and its peers. +// +// It is not a snapshot in time but is locked to a particular node. +type NodeBackend interface { + // AppendMatchingPeers appends all peers that match the predicate + // to the base slice and returns it. + AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView + + // PeerCaps returns the capabilities that src has to this node. + PeerCaps(src netip.Addr) tailcfg.PeerCapMap + + // PeerHasCap reports whether the peer has the specified peer capability. + PeerHasCap(peer tailcfg.NodeView, cap tailcfg.PeerCapability) bool + + // PeerAPIBase returns the "http://ip:port" URL base to reach peer's + // PeerAPI, or the empty string if the peer is invalid or doesn't support + // PeerAPI. + PeerAPIBase(tailcfg.NodeView) string + + // PeerHasPeerAPI whether the provided peer supports PeerAPI. + // + // It effectively just reports whether PeerAPIBase(node) is non-empty, but + // potentially more efficiently. + PeerHasPeerAPI(tailcfg.NodeView) bool } diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index f13c9de48..a06ea5e8c 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -336,11 +336,8 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem } // Check that the peer is allowed to share with us. - addresses := peer.Addresses() - for _, p := range addresses.All() { - if cn.PeerHasCap(p.Addr(), tailcfg.PeerCapabilityTaildriveSharer) { - return true - } + if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) { + return true } return false diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index bf0e6091c..faf9d2be9 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -87,6 +87,8 @@ type ExtensionHost struct { shuttingDown atomic.Bool + extByType sync.Map // reflect.Type -> ipnext.Extension + // mu protects the following fields. // It must not be held when calling [LocalBackend] methods // or when invoking callbacks registered by extensions. @@ -117,6 +119,9 @@ type Backend interface { SwitchToBestProfile(reason string) SendNotify(ipn.Notify) + + NodeBackend() ipnext.NodeBackend + ipnext.SafeBackend } @@ -183,6 +188,13 @@ func newExtensionHost(logf logger.Logf, b Backend, overrideExts ...*ipnext.Defin return host, nil } +func (h *ExtensionHost) NodeBackend() ipnext.NodeBackend { + if h == nil { + return nil + } + return h.b.NodeBackend() +} + // Init initializes the host and the extensions it manages. func (h *ExtensionHost) Init() { if h != nil { @@ -229,6 +241,7 @@ func (h *ExtensionHost) init() { h.mu.Lock() h.activeExtensions = append(h.activeExtensions, ext) h.extensionsByName[ext.Name()] = ext + h.extByType.Store(reflect.TypeOf(ext), ext) h.mu.Unlock() } @@ -276,6 +289,29 @@ func (h *ExtensionHost) FindExtensionByName(name string) any { // extensionIfaceType is the runtime type of the [ipnext.Extension] interface. var extensionIfaceType = reflect.TypeFor[ipnext.Extension]() +// GetExt returns the extension of type T registered with lb. +// If lb is nil or the extension is not found, it returns zero, false. +func GetExt[T ipnext.Extension](lb *LocalBackend) (_ T, ok bool) { + var zero T + if lb == nil { + return zero, false + } + if ext, ok := lb.extHost.extensionOfType(reflect.TypeFor[T]()); ok { + return ext.(T), true + } + return zero, false +} + +func (h *ExtensionHost) extensionOfType(t reflect.Type) (_ ipnext.Extension, ok bool) { + if h == nil { + return nil, false + } + if v, ok := h.extByType.Load(t); ok { + return v.(ipnext.Extension), true + } + return nil, false +} + // FindMatchingExtension implements [ipnext.ExtensionServices] // and is also used by the [LocalBackend]. func (h *ExtensionHost) FindMatchingExtension(target any) bool { diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index aa4a27d45..8816e659f 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -1335,8 +1335,9 @@ func (b *testBackend) Clock() tstime.Clock { return tstime.StdClock{} } func (b *testBackend) Sys() *tsd.System { return b.lazySys.Get(tsd.NewSystem) } -func (b *testBackend) SendNotify(ipn.Notify) { panic("not implemented") } -func (b *testBackend) TailscaleVarRoot() string { panic("not implemented") } +func (b *testBackend) SendNotify(ipn.Notify) { panic("not implemented") } +func (b *testBackend) NodeBackend() ipnext.NodeBackend { panic("not implemented") } +func (b *testBackend) TailscaleVarRoot() string { panic("not implemented") } func (b *testBackend) SwitchToBestProfile(reason string) { b.mu.Lock() diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b2998d11c..a7935c6cd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -26,7 +26,6 @@ import ( "net/url" "os" "os/exec" - "path/filepath" "reflect" "runtime" "slices" @@ -58,6 +57,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" @@ -277,37 +277,23 @@ type LocalBackend struct { capFileSharing bool // whether netMap contains the file sharing capability capTailnetLock bool // whether netMap contains the tailnet lock capability // hostinfo is mutated in-place while mu is held. - hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeContext - nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil; TODO(nickkhyl): move to nodeContext - activeLogin string // last logged LoginName from netMap; TODO(nickkhyl): move to nodeContext (or remove? it's in [ipn.LoginProfile]). - engineStatus ipn.EngineStatus - endpoints []tailcfg.Endpoint - blocked bool - keyExpired bool // TODO(nickkhyl): move to nodeContext - authURL string // non-empty if not Running; TODO(nickkhyl): move to nodeContext - authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeContext - authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeContext - egg bool - prevIfState *netmon.State - peerAPIServer *peerAPIServer // or nil - peerAPIListeners []*peerAPIListener - loginFlags controlclient.LoginFlags - fileWaiters set.HandleSet[context.CancelFunc] // of wake-up funcs - notifyWatchers map[string]*watchSession // by session ID - lastStatusTime time.Time // status.AsOf value of the last processed status update - // directFileRoot, if non-empty, means to write received files - // directly to this directory, without staging them in an - // intermediate buffered directory for "pick-up" later. If - // empty, the files are received in a daemon-owned location - // and the localapi is used to enumerate, download, and delete - // them. This is used on macOS where the GUI lifetime is the - // same as the Network Extension lifetime and we can thus avoid - // double-copying files by writing them to the right location - // immediately. - // It's also used on several NAS platforms (Synology, TrueNAS, etc) - // but in that case DoFinalRename is also set true, which moves the - // *.partial file to its final name on completion. - directFileRoot string + hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeContext + nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil; TODO(nickkhyl): move to nodeContext + activeLogin string // last logged LoginName from netMap; TODO(nickkhyl): move to nodeContext (or remove? it's in [ipn.LoginProfile]). + engineStatus ipn.EngineStatus + endpoints []tailcfg.Endpoint + blocked bool + keyExpired bool // TODO(nickkhyl): move to nodeContext + authURL string // non-empty if not Running; TODO(nickkhyl): move to nodeContext + authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeContext + authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeContext + egg bool + prevIfState *netmon.State + peerAPIServer *peerAPIServer // or nil + peerAPIListeners []*peerAPIListener + loginFlags controlclient.LoginFlags + notifyWatchers map[string]*watchSession // by session ID + lastStatusTime time.Time // status.AsOf value of the last processed status update componentLogUntil map[string]componentLogState // c2nUpdateStatus is the status of c2n-triggered client update. c2nUpdateStatus updateStatus @@ -371,9 +357,6 @@ type LocalBackend struct { // http://go/corp/25168 lastKnownHardwareAddrs syncs.AtomicValue[[]string] - // outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID - outgoingFiles map[string]*ipn.OutgoingFile - // lastSuggestedExitNode stores the last suggested exit node suggestion to // avoid unnecessary churn between multiple equally-good options. lastSuggestedExitNode tailcfg.StableNodeID @@ -594,6 +577,11 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } +// NodeBackend returns the current node's NodeBackend interface. +func (b *LocalBackend) NodeBackend() ipnext.NodeBackend { + return b.currentNode() +} + func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v @@ -772,17 +760,6 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { return b.dialer } -// SetDirectFileRoot sets the directory to download files to directly, -// without buffering them through an intermediate daemon-owned -// tailcfg.UserID-specific directory. -// -// This must be called before the LocalBackend starts being used. -func (b *LocalBackend) SetDirectFileRoot(dir string) { - b.mu.Lock() - defer b.mu.Unlock() - b.directFileRoot = dir -} - // ReloadConfig reloads the backend's config from disk. // // It returns (false, nil) if not running in declarative mode, (true, nil) on @@ -844,6 +821,16 @@ func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) } } +func (b *LocalBackend) setStateLocked(state ipn.State) { + if b.state == state { + return + } + b.state = state + for _, f := range b.extHost.Hooks().BackendStateChange { + f(state) + } +} + // setConfigLockedOnEntry uses the provided config to update the backend's prefs // and other state. func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { @@ -1309,8 +1296,8 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { Location: p.Hostinfo().Location().AsStruct(), Capabilities: p.Capabilities().AsSlice(), } - if f := hookSetPeerStatusTaildropTargetLocked; f != nil { - f(b, ps, p) + for _, f := range b.extHost.Hooks().SetPeerStatus { + f(ps, p, cn) } if cm := p.CapMap(); cm.Len() > 0 { ps.CapMap = make(tailcfg.NodeCapMap, cm.Len()) @@ -2357,7 +2344,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { hostinfo.Services = b.hostinfo.Services // keep any previous services } b.hostinfo = hostinfo - b.state = ipn.NoState + b.setStateLocked(ipn.NoState) cn := b.currentNode() if opts.UpdatePrefs != nil { @@ -3316,17 +3303,6 @@ func (b *LocalBackend) sendTo(n ipn.Notify, recipient notificationTarget) { b.sendToLocked(n, recipient) } -var ( - // hookSetNotifyFilesWaitingLocked, if non-nil, is called in sendToLocked to - // populate ipn.Notify.FilesWaiting when taildrop is linked in to the binary - // and enabled on a LocalBackend. - hookSetNotifyFilesWaitingLocked func(*LocalBackend, *ipn.Notify) - - // hookSetPeerStatusTaildropTargetLocked, if non-nil, is called to populate PeerStatus - // if taildrop is linked in to the binary and enabled on the LocalBackend. - hookSetPeerStatusTaildropTargetLocked func(*LocalBackend, *ipnstate.PeerStatus, tailcfg.NodeView) -) - // sendToLocked is like [LocalBackend.sendTo], but assumes b.mu is already held. func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) { if n.Prefs != nil { @@ -3336,8 +3312,8 @@ func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) n.Version = version.Long() } - if f := hookSetNotifyFilesWaitingLocked; f != nil { - f(b, &n) + for _, f := range b.extHost.Hooks().MutateNotifyLocked { + f(&n) } for _, sess := range b.notifyWatchers { @@ -5266,26 +5242,6 @@ func (b *LocalBackend) TailscaleVarRoot() string { return "" } -func (b *LocalBackend) fileRootLocked(uid tailcfg.UserID) string { - if v := b.directFileRoot; v != "" { - return v - } - varRoot := b.TailscaleVarRoot() - if varRoot == "" { - b.logf("Taildrop disabled; no state directory") - return "" - } - baseDir := fmt.Sprintf("%s-uid-%d", - strings.ReplaceAll(b.activeLogin, "@", "-"), - uid) - dir := filepath.Join(varRoot, "files", baseDir) - if err := os.MkdirAll(dir, 0700); err != nil { - b.logf("Taildrop disabled; error making directory: %v", err) - return "" - } - return dir -} - // closePeerAPIListenersLocked closes any existing PeerAPI listeners // and clears out the PeerAPI server state. // @@ -5353,8 +5309,7 @@ func (b *LocalBackend) initPeerAPIListener() { } ps := &peerAPIServer{ - b: b, - taildrop: b.newTaildropManager(b.fileRootLocked(selfNode.User())), + b: b, } if dm, ok := b.sys.DNSManager.GetOK(); ok { ps.resolver = dm.Resolver() @@ -5643,7 +5598,7 @@ func (b *LocalBackend) enterState(newState ipn.State) { func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { cn := b.currentNode() oldState := b.state - b.state = newState + b.setStateLocked(newState) prefs := b.pm.CurrentPrefs() // Some temporary (2024-05-05) debugging code to help us catch @@ -6158,6 +6113,8 @@ func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { // received nm. If nm is nil, it resets all configuration as though // Tailscale is turned off. func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { + oldSelf := b.currentNode().NetMap().SelfNodeOrZero() + b.dialer.SetNetMap(nm) if ns, ok := b.sys.Netstack.GetOK(); ok { ns.UpdateNetstackIPs(nm) @@ -6205,6 +6162,13 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + + if !oldSelf.Equal(nm.SelfNodeOrZero()) { + for _, f := range b.extHost.Hooks().OnSelfChange { + f(nm.SelfNode) + } + } + if nm == nil { // If there is no netmap, the client is going into a "turned off" // state so reset the metrics. @@ -6667,12 +6631,21 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK return mk, nk } -// PeerHasCap reports whether the peer with the given Tailscale IP addresses -// contains the given capability string, with any value(s). -func (nb *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { +// PeerHasCap reports whether the peer contains the given capability string, +// with any value(s). +func (nb *nodeBackend) PeerHasCap(peer tailcfg.NodeView, wantCap tailcfg.PeerCapability) bool { + if !peer.Valid() { + return false + } + nb.mu.Lock() defer nb.mu.Unlock() - return nb.peerHasCapLocked(addr, wantCap) + for _, ap := range peer.Addresses().All() { + if nb.peerHasCapLocked(ap.Addr(), wantCap) { + return true + } + } + return false } func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 2b4c07749..675623f33 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -60,8 +60,6 @@ type peerDNSQueryHandler interface { type peerAPIServer struct { b *LocalBackend resolver peerDNSQueryHandler - - taildrop *taildrop_Manager } func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Listener, err error) { diff --git a/ipn/ipnlocal/taildrop.go b/ipn/ipnlocal/taildrop.go deleted file mode 100644 index d8113d219..000000000 --- a/ipn/ipnlocal/taildrop.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_taildrop - -package ipnlocal - -import ( - "cmp" - "context" - "errors" - "io" - "maps" - "slices" - "strings" - - "tailscale.com/client/tailscale/apitype" - "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" - "tailscale.com/tailcfg" - "tailscale.com/taildrop" - "tailscale.com/tstime" - "tailscale.com/types/empty" - "tailscale.com/util/set" -) - -func init() { - hookSetNotifyFilesWaitingLocked = (*LocalBackend).setNotifyFilesWaitingLocked - hookSetPeerStatusTaildropTargetLocked = (*LocalBackend).setPeerStatusTaildropTargetLocked -} - -type taildrop_Manager = taildrop.Manager - -func (b *LocalBackend) newTaildropManager(fileRoot string) *taildrop.Manager { - // TODO(bradfitz): move all this to an ipnext so ipnlocal doesn't need to depend - // on taildrop at all. - if fileRoot == "" { - b.logf("no Taildrop directory configured") - } - return taildrop.ManagerOptions{ - Logf: b.logf, - Clock: tstime.DefaultClock{Clock: b.clock}, - State: b.store, - Dir: fileRoot, - DirectFileMode: b.directFileRoot != "", - SendFileNotify: b.sendFileNotify, - }.New() -} - -func (b *LocalBackend) sendFileNotify() { - var n ipn.Notify - - b.mu.Lock() - for _, wakeWaiter := range b.fileWaiters { - wakeWaiter() - } - apiSrv := b.peerAPIServer - if apiSrv == nil { - b.mu.Unlock() - return - } - - n.IncomingFiles = apiSrv.taildrop.IncomingFiles() - b.mu.Unlock() - - b.send(n) -} - -// TaildropManager returns the taildrop manager for this backend. -// -// TODO(bradfitz): as of 2025-04-15, this is a temporary method during -// refactoring; the plan is for all taildrop code to leave the ipnlocal package -// and move to an extension. Baby steps. -func (b *LocalBackend) TaildropManager() (*taildrop.Manager, error) { - b.mu.Lock() - ps := b.peerAPIServer - b.mu.Unlock() - if ps == nil { - return nil, errors.New("no peer API server initialized") - } - if ps.taildrop == nil { - return nil, errors.New("no taildrop manager initialized") - } - return ps.taildrop, nil -} - -func (b *LocalBackend) taildropOrNil() *taildrop.Manager { - b.mu.Lock() - ps := b.peerAPIServer - b.mu.Unlock() - if ps == nil { - return nil - } - return ps.taildrop -} - -func (b *LocalBackend) setNotifyFilesWaitingLocked(n *ipn.Notify) { - if ps := b.peerAPIServer; ps != nil { - if ps.taildrop.HasFilesWaiting() { - n.FilesWaiting = &empty.Message{} - } - } -} - -func (b *LocalBackend) setPeerStatusTaildropTargetLocked(ps *ipnstate.PeerStatus, p tailcfg.NodeView) { - ps.TaildropTarget = b.taildropTargetStatus(p) -} - -func (b *LocalBackend) removeFileWaiter(handle set.Handle) { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.fileWaiters, handle) -} - -func (b *LocalBackend) addFileWaiter(wakeWaiter context.CancelFunc) set.Handle { - b.mu.Lock() - defer b.mu.Unlock() - return b.fileWaiters.Add(wakeWaiter) -} - -func (b *LocalBackend) WaitingFiles() ([]apitype.WaitingFile, error) { - return b.taildropOrNil().WaitingFiles() -} - -// AwaitWaitingFiles is like WaitingFiles but blocks while ctx is not done, -// waiting for any files to be available. -// -// On return, exactly one of the results will be non-empty or non-nil, -// respectively. -func (b *LocalBackend) AwaitWaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) { - if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { - return ff, err - } - - for { - gotFile, gotFileCancel := context.WithCancel(context.Background()) - defer gotFileCancel() - - handle := b.addFileWaiter(gotFileCancel) - defer b.removeFileWaiter(handle) - - // Now that we've registered ourselves, check again, in case - // of race. Otherwise there's a small window where we could - // miss a file arrival and wait forever. - if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { - return ff, err - } - - select { - case <-gotFile.Done(): - if ff, err := b.WaitingFiles(); err != nil || len(ff) > 0 { - return ff, err - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} - -func (b *LocalBackend) DeleteFile(name string) error { - return b.taildropOrNil().DeleteFile(name) -} - -func (b *LocalBackend) OpenFile(name string) (rc io.ReadCloser, size int64, err error) { - return b.taildropOrNil().OpenFile(name) -} - -// HasCapFileSharing reports whether the current node has the file -// sharing capability enabled. -func (b *LocalBackend) HasCapFileSharing() bool { - // TODO(bradfitz): remove this method and all Taildrop/Taildrive - // references from LocalBackend as part of tailscale/tailscale#12614. - b.mu.Lock() - defer b.mu.Unlock() - return b.capFileSharing -} - -// FileTargets lists nodes that the current node can send files to. -func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { - var ret []*apitype.FileTarget - - b.mu.Lock() // for b.{state,capFileSharing} - defer b.mu.Unlock() - cn := b.currentNode() - nm := cn.NetMap() - self := cn.SelfUserID() - if b.state != ipn.Running || nm == nil { - return nil, errors.New("not connected to the tailnet") - } - if !b.capFileSharing { - return nil, errors.New("file sharing not enabled by Tailscale admin") - } - peers := cn.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { - if !p.Valid() || p.Hostinfo().OS() == "tvOS" { - return false - } - if self == p.User() { - return true - } - if p.Addresses().Len() != 0 && cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { - // Explicitly noted in the netmap ACL caps as a target. - return true - } - return false - }) - for _, p := range peers { - peerAPI := cn.PeerAPIBase(p) - if peerAPI == "" { - continue - } - ret = append(ret, &apitype.FileTarget{ - Node: p.AsStruct(), - PeerAPIURL: peerAPI, - }) - } - slices.SortFunc(ret, func(a, b *apitype.FileTarget) int { - return cmp.Compare(a.Node.Name, b.Node.Name) - }) - return ret, nil -} - -func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.TaildropTargetStatus { - if b.state != ipn.Running { - return ipnstate.TaildropTargetIpnStateNotRunning - } - cn := b.currentNode() - nm := cn.NetMap() - if nm == nil { - return ipnstate.TaildropTargetNoNetmapAvailable - } - if !b.capFileSharing { - return ipnstate.TaildropTargetMissingCap - } - - if !p.Online().Get() { - return ipnstate.TaildropTargetOffline - } - - if !p.Valid() { - return ipnstate.TaildropTargetNoPeerInfo - } - if nm.User() != p.User() { - // Different user must have the explicit file sharing target capability - if p.Addresses().Len() == 0 || !cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { - // Explicitly noted in the netmap ACL caps as a target. - return ipnstate.TaildropTargetOwnedByOtherUser - } - } - - if p.Hostinfo().OS() == "tvOS" { - return ipnstate.TaildropTargetUnsupportedOS - } - if !cn.PeerHasPeerAPI(p) { - return ipnstate.TaildropTargetNoPeerAPI - } - return ipnstate.TaildropTargetAvailable -} - -// UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and -// sends an ipn.Notify with the full list of outgoingFiles. -func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) { - b.mu.Lock() - if b.outgoingFiles == nil { - b.outgoingFiles = make(map[string]*ipn.OutgoingFile, len(updates)) - } - maps.Copy(b.outgoingFiles, updates) - outgoingFiles := make([]*ipn.OutgoingFile, 0, len(b.outgoingFiles)) - for _, file := range b.outgoingFiles { - outgoingFiles = append(outgoingFiles, file) - } - b.mu.Unlock() - slices.SortFunc(outgoingFiles, func(a, b *ipn.OutgoingFile) int { - t := a.Started.Compare(b.Started) - if t != 0 { - return t - } - return strings.Compare(a.Name, b.Name) - }) - b.send(ipn.Notify{OutgoingFiles: outgoingFiles}) -} diff --git a/ipn/ipnlocal/taildrop_omit.go b/ipn/ipnlocal/taildrop_omit.go deleted file mode 100644 index 07d2d5cc0..000000000 --- a/ipn/ipnlocal/taildrop_omit.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_taildrop - -package ipnlocal - -type taildrop_Manager = struct{} - -func (b *LocalBackend) newTaildropManager(fileRoot string) *taildrop_Manager { - return nil -} diff --git a/ipn/ipnlocal/taildrop_test.go b/ipn/ipnlocal/taildrop_test.go deleted file mode 100644 index a5166e8a3..000000000 --- a/ipn/ipnlocal/taildrop_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_taildrop - -package ipnlocal - -import ( - "fmt" - "testing" - - "tailscale.com/ipn" - "tailscale.com/tailcfg" - "tailscale.com/tstest/deptest" - "tailscale.com/types/netmap" -) - -func TestFileTargets(t *testing.T) { - b := new(LocalBackend) - _, err := b.FileTargets() - if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { - t.Errorf("before connect: got %q; want %q", got, want) - } - - b.currentNode().SetNetMap(new(netmap.NetworkMap)) - _, err = b.FileTargets() - if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { - t.Errorf("non-running netmap: got %q; want %q", got, want) - } - - b.state = ipn.Running - _, err = b.FileTargets() - if got, want := fmt.Sprint(err), "file sharing not enabled by Tailscale admin"; got != want { - t.Errorf("without cap: got %q; want %q", got, want) - } - - b.capFileSharing = true - got, err := b.FileTargets() - if err != nil { - t.Fatal(err) - } - if len(got) != 0 { - t.Fatalf("unexpected %d peers", len(got)) - } - - nm := &netmap.NetworkMap{ - Peers: []tailcfg.NodeView{ - (&tailcfg.Node{ - ID: 1234, - Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), - }).View(), - }, - } - b.currentNode().SetNetMap(nm) - got, err = b.FileTargets() - if err != nil { - t.Fatal(err) - } - if len(got) != 0 { - t.Fatalf("unexpected %d peers", len(got)) - } - // (other cases handled by TestPeerAPIBase above) -} - -func TestOmitTaildropDeps(t *testing.T) { - deptest.DepChecker{ - Tags: "ts_omit_taildrop", - GOOS: "linux", - GOARCH: "amd64", - BadDeps: map[string]string{ - "tailscale.com/taildrop": "should be omitted", - "tailscale.com/feature/taildrop": "should be omitted", - }, - }.Check(t) -} diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index b1eecaa8f..c6250c49c 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -148,6 +148,14 @@ func (nm *NetworkMap) GetIPVIPServiceMap() IPServiceMappings { return res } +// SelfNodeOrZero returns the self node, or a zero value if nm is nil. +func (nm *NetworkMap) SelfNodeOrZero() tailcfg.NodeView { + if nm == nil { + return tailcfg.NodeView{} + } + return nm.SelfNode +} + // AnyPeersAdvertiseRoutes reports whether any peer is advertising non-exit node routes. func (nm *NetworkMap) AnyPeersAdvertiseRoutes() bool { for _, p := range nm.Peers { From 5b597489bc6abddcc5b260076ff854f298d7dc78 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 6 May 2025 20:45:28 -0700 Subject: [PATCH 0828/1708] taildrop: merge taildrop and feature/taildrop packages together Fixes #15812 Change-Id: I3bf0666bf9e7a9caea5f0f99fdb0eb2812157608 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscaled/depaware.txt | 1 - {taildrop => feature/taildrop}/delete.go | 4 +- {taildrop => feature/taildrop}/delete_test.go | 4 +- feature/taildrop/ext.go | 11 +++-- feature/taildrop/localapi.go | 3 +- feature/taildrop/peerapi.go | 21 +++++----- feature/taildrop/peerapi_test.go | 11 +++-- {taildrop => feature/taildrop}/resume.go | 40 +++++++++---------- {taildrop => feature/taildrop}/resume_test.go | 6 +-- {taildrop => feature/taildrop}/retrieve.go | 8 ++-- {taildrop => feature/taildrop}/send.go | 10 ++--- {taildrop => feature/taildrop}/taildrop.go | 30 +++++++------- .../taildrop}/taildrop_test.go | 4 +- 14 files changed, 74 insertions(+), 80 deletions(-) rename {taildrop => feature/taildrop}/delete.go (97%) rename {taildrop => feature/taildrop}/delete_test.go (99%) rename {taildrop => feature/taildrop}/resume.go (76%) rename {taildrop => feature/taildrop}/resume_test.go (92%) rename {taildrop => feature/taildrop}/retrieve.go (95%) rename {taildrop => feature/taildrop}/send.go (96%) rename {taildrop => feature/taildrop}/taildrop.go (93%) rename {taildrop => feature/taildrop}/taildrop_test.go (94%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 53a37fe01..28fe4be8b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -908,7 +908,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/syncs from tailscale.com/control/controlknobs+ tailscale.com/tailcfg from tailscale.com/client/local+ - tailscale.com/taildrop from tailscale.com/feature/taildrop tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index aa11fb9f3..1af828f75 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -359,7 +359,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/local+ - tailscale.com/taildrop from tailscale.com/feature/taildrop tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock diff --git a/taildrop/delete.go b/feature/taildrop/delete.go similarity index 97% rename from taildrop/delete.go rename to feature/taildrop/delete.go index aaef34df1..e9c8d7f1c 100644 --- a/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -47,7 +47,7 @@ type deleteFile struct { inserted time.Time } -func (d *fileDeleter) Init(m *Manager, eventHook func(string)) { +func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.logf = m.opts.Logf d.clock = m.opts.Clock d.dir = m.opts.Dir @@ -81,7 +81,7 @@ func (d *fileDeleter) Init(m *Manager, eventHook func(string)) { // Only enqueue the file for deletion if there is no active put. nameID := strings.TrimSuffix(de.Name(), partialSuffix) if i := strings.LastIndexByte(nameID, '.'); i > 0 { - key := incomingFileKey{ClientID(nameID[i+len("."):]), nameID[:i]} + key := incomingFileKey{clientID(nameID[i+len("."):]), nameID[:i]} m.incomingFiles.LoadFunc(key, func(_ *incomingFile, loaded bool) { if !loaded { d.Insert(de.Name()) diff --git a/taildrop/delete_test.go b/feature/taildrop/delete_test.go similarity index 99% rename from taildrop/delete_test.go rename to feature/taildrop/delete_test.go index 5fa4b9c37..7a58de55c 100644 --- a/taildrop/delete_test.go +++ b/feature/taildrop/delete_test.go @@ -69,7 +69,7 @@ func TestDeleter(t *testing.T) { } eventHook := func(event string) { eventsChan <- event } - var m Manager + var m manager var fd fileDeleter m.opts.Logf = t.Logf m.opts.Clock = tstime.DefaultClock{Clock: clock} @@ -142,7 +142,7 @@ func TestDeleter(t *testing.T) { // Test that the asynchronous full scan of the taildrop directory does not occur // on a cold start if taildrop has never received any files. func TestDeleterInitWithoutTaildrop(t *testing.T) { - var m Manager + var m manager var fd fileDeleter m.opts.Logf = t.Logf m.opts.Dir = t.TempDir() diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index b86c0f926..058418cde 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -22,7 +22,6 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/tstime" "tailscale.com/types/empty" "tailscale.com/types/logger" @@ -72,7 +71,7 @@ type Extension struct { selfUID tailcfg.UserID capFileSharing bool fileWaiters set.HandleSet[context.CancelFunc] // of wake-up funcs - mgr atomic.Pointer[taildrop.Manager] // mutex held to write; safe to read without lock; + mgr atomic.Pointer[manager] // mutex held to write; safe to read without lock; // outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID outgoingFiles map[string]*ipn.OutgoingFile } @@ -113,7 +112,7 @@ func (e *Extension) onSelfChange(self tailcfg.NodeView) { osshare.SetFileSharingEnabled(e.capFileSharing, e.logf) } -func (e *Extension) setMgrLocked(mgr *taildrop.Manager) { +func (e *Extension) setMgrLocked(mgr *manager) { if old := e.mgr.Swap(mgr); old != nil { old.Shutdown() } @@ -141,7 +140,7 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie if fileRoot == "" { e.logf("no Taildrop directory configured") } - e.setMgrLocked(taildrop.ManagerOptions{ + e.setMgrLocked(managerOptions{ Logf: e.logf, Clock: tstime.DefaultClock{Clock: e.sb.Clock()}, State: e.stateStore, @@ -191,10 +190,10 @@ func (e *Extension) hasCapFileSharing() bool { return e.capFileSharing } -// manager returns the active taildrop.Manager, or nil. +// manager returns the active Manager, or nil. // // Methods on a nil Manager are safe to call. -func (e *Extension) manager() *taildrop.Manager { +func (e *Extension) manager() *manager { return e.mgr.Load() } diff --git a/feature/taildrop/localapi.go b/feature/taildrop/localapi.go index 02e6b0b52..8a3904f9f 100644 --- a/feature/taildrop/localapi.go +++ b/feature/taildrop/localapi.go @@ -24,7 +24,6 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/util/clientmetric" "tailscale.com/util/httphdr" "tailscale.com/util/mak" @@ -320,7 +319,7 @@ func singleFilePut( default: resumeStart := time.Now() dec := json.NewDecoder(resp.Body) - offset, remainingBody, err = taildrop.ResumeReader(body, func() (out taildrop.BlockChecksum, err error) { + offset, remainingBody, err = resumeReader(body, func() (out blockChecksum, err error) { err = dec.Decode(&out) return out, err }) diff --git a/feature/taildrop/peerapi.go b/feature/taildrop/peerapi.go index a81ce9c3a..b75ce33b8 100644 --- a/feature/taildrop/peerapi.go +++ b/feature/taildrop/peerapi.go @@ -14,7 +14,6 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/tstime" "tailscale.com/util/clientmetric" "tailscale.com/util/httphdr" @@ -49,7 +48,7 @@ func handlePeerPut(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Req // extensionForPut is the subset of taildrop extension that taildrop // file put needs. This is pulled out for testability. type extensionForPut interface { - manager() *taildrop.Manager + manager() *manager hasCapFileSharing() bool Clock() tstime.Clock } @@ -67,11 +66,11 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w } if !canPutFile(h) { - http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) + http.Error(w, ErrNoTaildrop.Error(), http.StatusForbidden) return } if !ext.hasCapFileSharing() { - http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden) + http.Error(w, ErrNoTaildrop.Error(), http.StatusForbidden) return } rawPath := r.URL.EscapedPath() @@ -82,13 +81,13 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w } baseName, err := url.PathUnescape(prefix) if err != nil { - http.Error(w, taildrop.ErrInvalidFileName.Error(), http.StatusBadRequest) + http.Error(w, ErrInvalidFileName.Error(), http.StatusBadRequest) return } enc := json.NewEncoder(w) switch r.Method { case "GET": - id := taildrop.ClientID(h.Peer().StableID()) + id := clientID(h.Peer().StableID()) if prefix == "" { // List all the partial files. files, err := taildropMgr.PartialFiles(id) @@ -128,7 +127,7 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w } case "PUT": t0 := ext.Clock().Now() - id := taildrop.ClientID(h.Peer().StableID()) + id := clientID(h.Peer().StableID()) var offset int64 if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { @@ -139,17 +138,17 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w } offset = ranges[0].Start } - n, err := taildropMgr.PutFile(taildrop.ClientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength) + n, err := taildropMgr.PutFile(clientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength) switch err { case nil: d := ext.Clock().Since(t0).Round(time.Second / 10) h.Logf("got put of %s in %v from %v/%v", approxSize(n), d, h.RemoteAddr().Addr(), h.Peer().ComputedName) io.WriteString(w, "{}\n") - case taildrop.ErrNoTaildrop: + case ErrNoTaildrop: http.Error(w, err.Error(), http.StatusForbidden) - case taildrop.ErrInvalidFileName: + case ErrInvalidFileName: http.Error(w, err.Error(), http.StatusBadRequest) - case taildrop.ErrFileExists: + case ErrFileExists: http.Error(w, err.Error(), http.StatusConflict) default: http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index a647add37..1a003b6ed 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -21,7 +21,6 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn/ipnlocal" "tailscale.com/tailcfg" - "tailscale.com/taildrop" "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/logger" @@ -54,10 +53,10 @@ type fakeExtension struct { logf logger.Logf capFileSharing bool clock tstime.Clock - taildrop *taildrop.Manager + taildrop *manager } -func (lb *fakeExtension) manager() *taildrop.Manager { +func (lb *fakeExtension) manager() *manager { return lb.taildrop } func (lb *fakeExtension) Clock() tstime.Clock { return lb.clock } @@ -66,7 +65,7 @@ func (lb *fakeExtension) hasCapFileSharing() bool { } type peerAPITestEnv struct { - taildrop *taildrop.Manager + taildrop *manager ph *peerAPIHandler rr *httptest.ResponseRecorder logBuf tstest.MemLogger @@ -477,7 +476,7 @@ func TestHandlePeerAPI(t *testing.T) { } var e peerAPITestEnv - e.taildrop = taildrop.ManagerOptions{ + e.taildrop = managerOptions{ Logf: e.logBuf.Logf, Dir: rootDir, }.New() @@ -526,7 +525,7 @@ func TestHandlePeerAPI(t *testing.T) { // a bit. So test that we work around that sufficiently. func TestFileDeleteRace(t *testing.T) { dir := t.TempDir() - taildropMgr := taildrop.ManagerOptions{ + taildropMgr := managerOptions{ Logf: t.Logf, Dir: dir, }.New() diff --git a/taildrop/resume.go b/feature/taildrop/resume.go similarity index 76% rename from taildrop/resume.go rename to feature/taildrop/resume.go index f7bee3d95..211a1ff6b 100644 --- a/taildrop/resume.go +++ b/feature/taildrop/resume.go @@ -19,29 +19,29 @@ var ( hashAlgorithm = "sha256" ) -// BlockChecksum represents the checksum for a single block. -type BlockChecksum struct { - Checksum Checksum `json:"checksum"` +// blockChecksum represents the checksum for a single block. +type blockChecksum struct { + Checksum checksum `json:"checksum"` Algorithm string `json:"algo"` // always "sha256" for now Size int64 `json:"size"` // always (64<<10) for now } -// Checksum is an opaque checksum that is comparable. -type Checksum struct{ cs [sha256.Size]byte } +// checksum is an opaque checksum that is comparable. +type checksum struct{ cs [sha256.Size]byte } -func hash(b []byte) Checksum { - return Checksum{sha256.Sum256(b)} +func hash(b []byte) checksum { + return checksum{sha256.Sum256(b)} } -func (cs Checksum) String() string { +func (cs checksum) String() string { return hex.EncodeToString(cs.cs[:]) } -func (cs Checksum) AppendText(b []byte) ([]byte, error) { +func (cs checksum) AppendText(b []byte) ([]byte, error) { return hex.AppendEncode(b, cs.cs[:]), nil } -func (cs Checksum) MarshalText() ([]byte, error) { +func (cs checksum) MarshalText() ([]byte, error) { return hex.AppendEncode(nil, cs.cs[:]), nil } -func (cs *Checksum) UnmarshalText(b []byte) error { +func (cs *checksum) UnmarshalText(b []byte) error { if len(b) != 2*len(cs.cs) { return fmt.Errorf("invalid hex length: %d", len(b)) } @@ -51,7 +51,7 @@ func (cs *Checksum) UnmarshalText(b []byte) error { // PartialFiles returns a list of partial files in [Handler.Dir] // that were sent (or is actively being sent) by the provided id. -func (m *Manager) PartialFiles(id ClientID) (ret []string, err error) { +func (m *manager) PartialFiles(id clientID) (ret []string, err error) { if m == nil || m.opts.Dir == "" { return nil, ErrNoTaildrop } @@ -72,11 +72,11 @@ func (m *Manager) PartialFiles(id ClientID) (ret []string, err error) { // starting from the beginning of the file. // It returns (BlockChecksum{}, io.EOF) when the stream is complete. // It is the caller's responsibility to call close. -func (m *Manager) HashPartialFile(id ClientID, baseName string) (next func() (BlockChecksum, error), close func() error, err error) { +func (m *manager) HashPartialFile(id clientID, baseName string) (next func() (blockChecksum, error), close func() error, err error) { if m == nil || m.opts.Dir == "" { return nil, nil, ErrNoTaildrop } - noopNext := func() (BlockChecksum, error) { return BlockChecksum{}, io.EOF } + noopNext := func() (blockChecksum, error) { return blockChecksum{}, io.EOF } noopClose := func() error { return nil } dstFile, err := joinDir(m.opts.Dir, baseName) @@ -92,25 +92,25 @@ func (m *Manager) HashPartialFile(id ClientID, baseName string) (next func() (Bl } b := make([]byte, blockSize) // TODO: Pool this? - next = func() (BlockChecksum, error) { + next = func() (blockChecksum, error) { switch n, err := io.ReadFull(f, b); { case err != nil && err != io.EOF && err != io.ErrUnexpectedEOF: - return BlockChecksum{}, redactError(err) + return blockChecksum{}, redactError(err) case n == 0: - return BlockChecksum{}, io.EOF + return blockChecksum{}, io.EOF default: - return BlockChecksum{hash(b[:n]), hashAlgorithm, int64(n)}, nil + return blockChecksum{hash(b[:n]), hashAlgorithm, int64(n)}, nil } } close = f.Close return next, close, nil } -// ResumeReader reads and discards the leading content of r +// resumeReader reads and discards the leading content of r // that matches the content based on the checksums that exist. // It returns the number of bytes consumed, // and returns an [io.Reader] representing the remaining content. -func ResumeReader(r io.Reader, hashNext func() (BlockChecksum, error)) (int64, io.Reader, error) { +func resumeReader(r io.Reader, hashNext func() (blockChecksum, error)) (int64, io.Reader, error) { if hashNext == nil { return 0, r, nil } diff --git a/taildrop/resume_test.go b/feature/taildrop/resume_test.go similarity index 92% rename from taildrop/resume_test.go rename to feature/taildrop/resume_test.go index d366340eb..dac3c657b 100644 --- a/taildrop/resume_test.go +++ b/feature/taildrop/resume_test.go @@ -19,7 +19,7 @@ func TestResume(t *testing.T) { defer func() { blockSize = oldBlockSize }() blockSize = 256 - m := ManagerOptions{Logf: t.Logf, Dir: t.TempDir()}.New() + m := managerOptions{Logf: t.Logf, Dir: t.TempDir()}.New() defer m.Shutdown() rn := rand.New(rand.NewSource(0)) @@ -32,7 +32,7 @@ func TestResume(t *testing.T) { next, close, err := m.HashPartialFile("", "foo") must.Do(err) defer close() - offset, r, err := ResumeReader(r, next) + offset, r, err := resumeReader(r, next) must.Do(err) must.Do(close()) // Windows wants the file handle to be closed to rename it. @@ -51,7 +51,7 @@ func TestResume(t *testing.T) { next, close, err := m.HashPartialFile("", "bar") must.Do(err) defer close() - offset, r, err := ResumeReader(r, next) + offset, r, err := resumeReader(r, next) must.Do(err) must.Do(close()) // Windows wants the file handle to be closed to rename it. diff --git a/taildrop/retrieve.go b/feature/taildrop/retrieve.go similarity index 95% rename from taildrop/retrieve.go rename to feature/taildrop/retrieve.go index 3e37b492a..6fb975193 100644 --- a/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -20,7 +20,7 @@ import ( // HasFilesWaiting reports whether any files are buffered in [Handler.Dir]. // This always returns false when [Handler.DirectFileMode] is false. -func (m *Manager) HasFilesWaiting() (has bool) { +func (m *manager) HasFilesWaiting() (has bool) { if m == nil || m.opts.Dir == "" || m.opts.DirectFileMode { return false } @@ -61,7 +61,7 @@ func (m *Manager) HasFilesWaiting() (has bool) { // WaitingFiles returns the list of files that have been sent by a // peer that are waiting in [Handler.Dir]. // This always returns nil when [Handler.DirectFileMode] is false. -func (m *Manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { +func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { if m == nil || m.opts.Dir == "" { return nil, ErrNoTaildrop } @@ -94,7 +94,7 @@ func (m *Manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { // DeleteFile deletes a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. -func (m *Manager) DeleteFile(baseName string) error { +func (m *manager) DeleteFile(baseName string) error { if m == nil || m.opts.Dir == "" { return ErrNoTaildrop } @@ -151,7 +151,7 @@ func touchFile(path string) error { // OpenFile opens a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. -func (m *Manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) { +func (m *manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) { if m == nil || m.opts.Dir == "" { return nil, 0, ErrNoTaildrop } diff --git a/taildrop/send.go b/feature/taildrop/send.go similarity index 96% rename from taildrop/send.go rename to feature/taildrop/send.go index 0dff71b24..98c3934bb 100644 --- a/taildrop/send.go +++ b/feature/taildrop/send.go @@ -19,7 +19,7 @@ import ( ) type incomingFileKey struct { - id ClientID + id clientID name string // e.g., "foo.jpeg" } @@ -61,19 +61,19 @@ func (f *incomingFile) Write(p []byte) (n int, err error) { return n, err } -// PutFile stores a file into [Manager.Dir] from a given client id. +// PutFile stores a file into [manager.Dir] from a given client id. // The baseName must be a base filename without any slashes. // The length is the expected length of content to read from r, // it may be negative to indicate that it is unknown. // It returns the length of the entire file. // // If there is a failure reading from r, then the partial file is not deleted -// for some period of time. The [Manager.PartialFiles] and [Manager.HashPartialFile] +// for some period of time. The [manager.PartialFiles] and [manager.HashPartialFile] // methods may be used to list all partial files and to compute the hash for a // specific partial file. This allows the client to determine whether to resume // a partial file. While resuming, PutFile may be called again with a non-zero // offset to specify where to resume receiving data at. -func (m *Manager) PutFile(id ClientID, baseName string, r io.Reader, offset, length int64) (int64, error) { +func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (int64, error) { switch { case m == nil || m.opts.Dir == "": return 0, ErrNoTaildrop @@ -227,7 +227,7 @@ func (m *Manager) PutFile(id ClientID, baseName string, r io.Reader, offset, len } // Choose a new destination filename and try again. - dstPath = NextFilename(dstPath) + dstPath = nextFilename(dstPath) inFile.finalPath = dstPath } if maxRetries <= 0 { diff --git a/taildrop/taildrop.go b/feature/taildrop/taildrop.go similarity index 93% rename from taildrop/taildrop.go rename to feature/taildrop/taildrop.go index 6996dbc4d..2e5c94861 100644 --- a/taildrop/taildrop.go +++ b/feature/taildrop/taildrop.go @@ -54,20 +54,20 @@ const ( deletedSuffix = ".deleted" ) -// ClientID is an opaque identifier for file resumption. +// clientID is an opaque identifier for file resumption. // A client can only list and resume partial files for its own ID. // It must contain any filesystem specific characters (e.g., slashes). -type ClientID string // e.g., "n12345CNTRL" +type clientID string // e.g., "n12345CNTRL" -func (id ClientID) partialSuffix() string { +func (id clientID) partialSuffix() string { if id == "" { return partialSuffix } return "." + string(id) + partialSuffix // e.g., ".n12345CNTRL.partial" } -// ManagerOptions are options to configure the [Manager]. -type ManagerOptions struct { +// managerOptions are options to configure the [manager]. +type managerOptions struct { Logf logger.Logf // may be nil Clock tstime.DefaultClock // may be nil State ipn.StateStore // may be nil @@ -98,9 +98,9 @@ type ManagerOptions struct { SendFileNotify func() } -// Manager manages the state for receiving and managing taildropped files. -type Manager struct { - opts ManagerOptions +// manager manages the state for receiving and managing taildropped files. +type manager struct { + opts managerOptions // incomingFiles is a map of files actively being received. incomingFiles syncs.Map[incomingFileKey, *incomingFile] @@ -120,27 +120,27 @@ type Manager struct { // New initializes a new taildrop manager. // It may spawn asynchronous goroutines to delete files, // so the Shutdown method must be called for resource cleanup. -func (opts ManagerOptions) New() *Manager { +func (opts managerOptions) New() *manager { if opts.Logf == nil { opts.Logf = logger.Discard } if opts.SendFileNotify == nil { opts.SendFileNotify = func() {} } - m := &Manager{opts: opts} + m := &manager{opts: opts} m.deleter.Init(m, func(string) {}) m.emptySince.Store(-1) // invalidate this cache return m } // Dir returns the directory. -func (m *Manager) Dir() string { +func (m *manager) Dir() string { return m.opts.Dir } // Shutdown shuts down the Manager. // It blocks until all spawned goroutines have stopped running. -func (m *Manager) Shutdown() { +func (m *manager) Shutdown() { if m != nil { m.deleter.shutdown() m.deleter.group.Wait() @@ -222,7 +222,7 @@ func rangeDir(dir string, fn func(fs.DirEntry) bool) error { } // IncomingFiles returns a list of active incoming files. -func (m *Manager) IncomingFiles() []ipn.PartialFile { +func (m *manager) IncomingFiles() []ipn.PartialFile { // Make sure we always set n.IncomingFiles non-nil so it gets encoded // in JSON to clients. They distinguish between empty and non-nil // to know whether a Notify should be able about files. @@ -318,12 +318,12 @@ var ( rxNumberSuffix = regexp.MustCompile(` \([0-9]+\)`) ) -// NextFilename returns the next filename in a sequence. +// nextFilename returns the next filename in a sequence. // It is used for construction a new filename if there is a conflict. // // For example, "Foo.jpg" becomes "Foo (1).jpg" and // "Foo (1).jpg" becomes "Foo (2).jpg". -func NextFilename(name string) string { +func nextFilename(name string) string { ext := rxExtensionSuffix.FindString(strings.TrimPrefix(name, ".")) name = strings.TrimSuffix(name, ext) var n uint64 diff --git a/taildrop/taildrop_test.go b/feature/taildrop/taildrop_test.go similarity index 94% rename from taildrop/taildrop_test.go rename to feature/taildrop/taildrop_test.go index df4783c30..da0bd2f43 100644 --- a/taildrop/taildrop_test.go +++ b/feature/taildrop/taildrop_test.go @@ -59,10 +59,10 @@ func TestNextFilename(t *testing.T) { } for _, tt := range tests { - if got := NextFilename(tt.in); got != tt.want { + if got := nextFilename(tt.in); got != tt.want { t.Errorf("NextFilename(%q) = %q, want %q", tt.in, got, tt.want) } - if got2 := NextFilename(tt.want); got2 != tt.want2 { + if got2 := nextFilename(tt.want); got2 != tt.want2 { t.Errorf("NextFilename(%q) = %q, want %q", tt.want, got2, tt.want2) } } From fee78de1ee105fcf7c964d5d6f01bda9e013b058 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 6 May 2025 21:07:09 -0700 Subject: [PATCH 0829/1708] Makefile: add tsnet to depaware Updates #12614 Change-Id: Iff30bc457efcc96f60b563195b213cbc4dccc349 Signed-off-by: Brad Fitzpatrick --- Makefile | 6 +- tsnet/depaware.txt | 680 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 684 insertions(+), 2 deletions(-) create mode 100644 tsnet/depaware.txt diff --git a/Makefile b/Makefile index 30ac5327a..f4e5abc87 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,8 @@ updatedeps: ## Update depaware deps tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ - tailscale.com/cmd/stund + tailscale.com/cmd/stund \ + tailscale.com/tsnet depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" @@ -32,7 +33,8 @@ depaware: ## Run depaware checks tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ - tailscale.com/cmd/stund + tailscale.com/cmd/stund \ + tailscale.com/tsnet buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt new file mode 100644 index 000000000..b60babb7c --- /dev/null +++ b/tsnet/depaware.txt @@ -0,0 +1,680 @@ +tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) + + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 + W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ + W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate + W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ + L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http + L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/coder/websocket from tailscale.com/util/eventbus + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket + L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ + W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc + W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com + W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ + LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture + github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/types/opt+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ + W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet + L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2+ + github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm + github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2+ + L github.com/google/go-tpm/tpm2/transport/linuxtpm from tailscale.com/feature/tpm + W github.com/google/go-tpm/tpm2/transport/windowstpm from tailscale.com/feature/tpm + github.com/google/go-tpm/tpmutil from github.com/google/go-tpm/legacy/tpm2+ + W 💣 github.com/google/go-tpm/tpmutil/tbs from github.com/google/go-tpm/legacy/tpm2+ + L github.com/google/nftables from tailscale.com/util/linuxfw + L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt + L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ + L github.com/google/nftables/expr from github.com/google/nftables+ + L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ + L github.com/google/nftables/xt from github.com/google/nftables/expr+ + DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ + github.com/gorilla/csrf from tailscale.com/client/web + github.com/gorilla/securecookie from github.com/gorilla/csrf + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 + L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap + L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 + L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 + L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 + L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/kortschak/wol from tailscale.com/feature/wakeonlan + L github.com/mdlayher/genetlink from tailscale.com/net/tstun + L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + L github.com/mdlayher/netlink/nltest from github.com/google/nftables + L github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + github.com/miekg/dns from tailscale.com/net/dns/recursive + 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio + L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ + L github.com/pierrec/lz4/v4/internal/lz4errors from github.com/pierrec/lz4/v4+ + L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 + L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream + D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack + L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient + W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket + W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio + W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio + W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs + W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ + github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ + github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ + github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp + github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ + github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp + github.com/tailscale/hujson from tailscale.com/ipn/conffile + L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + github.com/tailscale/web-client-prebuilt from tailscale.com/client/web + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + 💣 github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + W 💣 github.com/tailscale/wireguard-go/ipc/namedpipe from github.com/tailscale/wireguard-go/ipc + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 + L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ + L github.com/vishvananda/netns from github.com/tailscale/netlink+ + github.com/x448/float16 from github.com/fxamacker/cbor/v2 + 💣 go4.org/mem from tailscale.com/client/local+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun + W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/dns+ + gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer + 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs + 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ + gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log + gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ + gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ + 💣 gvisor.dev/gvisor/pkg/sleep from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ + gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state + 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack + 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ + gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 + gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/feature/tap+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/feature/tap+ + gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ + 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro + gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/packet from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/raw from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + 💣 gvisor.dev/gvisor/pkg/tcpip/transport/tcp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ + tailscale.com/client/web from tailscale.com/ipn/ipnlocal + tailscale.com/clientupdate from tailscale.com/client/web+ + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ + tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ + tailscale.com/disco from tailscale.com/derp+ + tailscale.com/doctor from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/drive from tailscale.com/client/local+ + tailscale.com/envknob from tailscale.com/client/local+ + tailscale.com/envknob/featureknob from tailscale.com/client/web+ + tailscale.com/feature from tailscale.com/feature/capture+ + tailscale.com/feature/capture from tailscale.com/feature/condregister + tailscale.com/feature/condregister from tailscale.com/tsnet + tailscale.com/feature/relayserver from tailscale.com/feature/condregister + tailscale.com/feature/taildrop from tailscale.com/feature/condregister + L tailscale.com/feature/tap from tailscale.com/feature/condregister + tailscale.com/feature/tpm from tailscale.com/feature/condregister + tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/control/controlclient+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/feature/relayserver+ + tailscale.com/ipn/ipnlocal from tailscale.com/feature/relayserver+ + tailscale.com/ipn/ipnstate from tailscale.com/client/local+ + tailscale.com/ipn/localapi from tailscale.com/feature/capture+ + tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ + L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store + L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ + L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ + L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore + tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/licenses from tailscale.com/client/web + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ + tailscale.com/logtail from tailscale.com/control/controlclient+ + tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ + tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/memnet from tailscale.com/tsnet + tailscale.com/net/netaddr from tailscale.com/feature/tap+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/dns/resolver+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ + W 💣 tailscale.com/net/netstat from tailscale.com/portlist + tailscale.com/net/netutil from tailscale.com/client/local+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/feature/capture+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/proxymux from tailscale.com/tsnet + tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/socks5 from tailscale.com/tsnet + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/ipn/localapi+ + L tailscale.com/net/tcpinfo from tailscale.com/derp + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/client/web+ + tailscale.com/net/tsdial from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tstun from tailscale.com/feature/tap+ + tailscale.com/net/udprelay from tailscale.com/feature/relayserver + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/client/local+ + 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal + tailscale.com/posture from tailscale.com/ipn/ipnlocal + tailscale.com/proxymap from tailscale.com/tsd+ + 💣 tailscale.com/safesocket from tailscale.com/client/local+ + tailscale.com/syncs from tailscale.com/control/controlhttp+ + tailscale.com/tailcfg from tailscale.com/client/local+ + tailscale.com/taildrop from tailscale.com/feature/taildrop + tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tka from tailscale.com/client/local+ + tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ + tailscale.com/tsd from tailscale.com/ipn/ipnext+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/tsweb+ + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/dnstype from tailscale.com/client/local+ + tailscale.com/types/empty from tailscale.com/feature/taildrop+ + tailscale.com/types/ipproto from tailscale.com/feature/tap+ + tailscale.com/types/key from tailscale.com/client/local+ + tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/client/local+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate+ + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/client/web+ + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash + tailscale.com/util/httphdr from tailscale.com/feature/taildrop + tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/multierr from tailscale.com/control/controlclient+ + tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi + W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag + tailscale.com/util/osshare from tailscale.com/feature/taildrop+ + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal + tailscale.com/util/progresstracking from tailscale.com/feature/taildrop + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/feature/taildrop+ + tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ + tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ + tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock + tailscale.com/util/systemd from tailscale.com/control/controlclient+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/truncate from tailscale.com/logtail + tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ + W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal + W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ + tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ + tailscale.com/version from tailscale.com/client/web+ + tailscale.com/version/distro from tailscale.com/client/web+ + tailscale.com/wgengine from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack from tailscale.com/tsnet + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + golang.org/x/crypto/argon2 from tailscale.com/tka + golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal + LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh + golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ + golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ + golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy + golang.org/x/net/ipv4 from github.com/miekg/dns+ + golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/proxy from tailscale.com/net/netns + D golang.org/x/net/route from net+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sys/cpu from github.com/tailscale/certstore+ + LD golang.org/x/sys/unix from github.com/google/go-tpm/tpmutil+ + W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ + W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ + W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ + W golang.org/x/sys/windows/svc/mgr from tailscale.com/util/winutil + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + archive/tar from tailscale.com/clientupdate + bufio from compress/flate+ + bytes from archive/tar+ + cmp from encoding/json+ + compress/flate from compress/gzip+ + compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + W compress/zlib from debug/pe + container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509+ + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls+ + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + D crypto/x509/internal/macos from crypto/x509 + crypto/x509/pkix from crypto/x509+ + DW database/sql/driver from github.com/google/uuid + W debug/dwarf from debug/pe + W debug/pe from github.com/dblohm7/wingoes/pe + embed from github.com/tailscale/web-client-prebuilt+ + encoding from encoding/gob+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/fxamacker/cbor/v2+ + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/gob from github.com/gorilla/securecookie + encoding/hex from crypto/x509+ + encoding/json from expvar+ + encoding/pem from crypto/tls+ + encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + errors from archive/tar+ + expvar from tailscale.com/derp+ + flag from tailscale.com/util/testenv + fmt from archive/tar+ + hash from compress/zlib+ + hash/adler32 from compress/zlib+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from html/template+ + html/template from github.com/gorilla/csrf+ + internal/abi from crypto/x509/internal/macos+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt+ + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ + L internal/runtime/syscall from runtime+ + internal/saferio from debug/pe+ + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/syscall/execenv from os+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + io from archive/tar+ + io/fs from archive/tar+ + io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + iter from bytes+ + log from expvar+ + log/internal from log + maps from archive/tar+ + math from archive/tar+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/fxamacker/cbor/v2+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http+ + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from expvar+ + net/http/httptrace from github.com/aws/smithy-go/transport/http+ + net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/internal from net/http+ + net/http/internal/ascii from net/http+ + net/http/pprof from tailscale.com/ipn/localapi+ + net/netip from crypto/x509+ + net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/user from archive/tar+ + path from archive/tar+ + path/filepath from archive/tar+ + reflect from archive/tar+ + regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp/syntax from regexp + runtime from archive/tar+ + runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/pprof from net/http/pprof+ + runtime/trace from net/http/pprof + slices from archive/tar+ + sort from compress/flate+ + strconv from archive/tar+ + strings from archive/tar+ + sync from archive/tar+ + sync/atomic from context+ + syscall from archive/tar+ + text/tabwriter from runtime/pprof + text/template from html/template + text/template/parse from html/template+ + time from archive/tar+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from unique From 7cc2837594729dd1c78c2aa54587793519c9cb1a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 6 May 2025 21:08:22 -0700 Subject: [PATCH 0830/1708] tsnet: don't depend on condregister & its default tailscaled features None of them are applicable to the common tsnet use cases. If somebody wants one of them, they can empty import it. Updates #12614 Change-Id: I3d7f74b555eed22e05a09ad667e4572a5bc452d8 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 43 ++++-------------------- tsnet/depaware.txt | 62 +++++++++-------------------------- tsnet/tsnet.go | 1 - 3 files changed, 23 insertions(+), 83 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 28fe4be8b..700085b39 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -135,13 +135,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/go-cmp/cmp/internal/flags from github.com/google/go-cmp/cmp+ github.com/google/go-cmp/cmp/internal/function from github.com/google/go-cmp/cmp 💣 github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp - github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2/transport+ - github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm - github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2/transport/linuxtpm+ - L github.com/google/go-tpm/tpm2/transport/linuxtpm from tailscale.com/feature/tpm - W github.com/google/go-tpm/tpm2/transport/windowstpm from tailscale.com/feature/tpm - github.com/google/go-tpm/tpmutil from github.com/google/go-tpm/legacy/tpm2+ - W 💣 github.com/google/go-tpm/tpmutil/tbs from github.com/google/go-tpm/legacy/tpm2+ github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz L github.com/google/nftables from tailscale.com/util/linuxfw @@ -156,10 +149,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap - L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 - L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 - L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon @@ -172,7 +161,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/kortschak/wol from tailscale.com/feature/wakeonlan github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag @@ -188,11 +176,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/opencontainers/go-digest from github.com/distribution/reference - L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio - L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ - L github.com/pierrec/lz4/v4/internal/lz4errors from github.com/pierrec/lz4/v4+ - L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 - L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream github.com/pkg/errors from github.com/evanphx/json-patch/v5+ D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ @@ -234,8 +217,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 - L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 go.uber.org/multierr from go.uber.org/zap+ @@ -314,8 +295,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/feature/tap+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ @@ -814,14 +795,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ - tailscale.com/feature from tailscale.com/feature/wakeonlan+ - tailscale.com/feature/capture from tailscale.com/feature/condregister - tailscale.com/feature/condregister from tailscale.com/tsnet - tailscale.com/feature/relayserver from tailscale.com/feature/condregister - tailscale.com/feature/taildrop from tailscale.com/feature/condregister - L tailscale.com/feature/tap from tailscale.com/feature/condregister - tailscale.com/feature/tpm from tailscale.com/feature/condregister - tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister + tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ @@ -830,10 +804,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ - tailscale.com/ipn/localapi from tailscale.com/tsnet+ + tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store @@ -898,7 +872,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ - tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal @@ -954,7 +927,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ @@ -964,9 +936,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag - tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal - tailscale.com/util/progresstracking from tailscale.com/feature/taildrop tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ @@ -1162,7 +1133,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go/scanner from go/ast+ go/token from go/ast+ hash from compress/zlib+ - hash/adler32 from compress/zlib+ + hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/fnv from google.golang.org/protobuf/internal/detrand hash/maphash from go4.org/mem diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b60babb7c..8bc93cd2f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -106,13 +106,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2+ - github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm - github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2+ - L github.com/google/go-tpm/tpm2/transport/linuxtpm from tailscale.com/feature/tpm - W github.com/google/go-tpm/tpm2/transport/windowstpm from tailscale.com/feature/tpm - github.com/google/go-tpm/tpmutil from github.com/google/go-tpm/legacy/tpm2+ - W 💣 github.com/google/go-tpm/tpmutil/tbs from github.com/google/go-tpm/legacy/tpm2+ L github.com/google/nftables from tailscale.com/util/linuxfw L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ @@ -125,10 +118,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap - L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 - L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 - L github.com/insomniacslk/dhcp/rfc1035label from github.com/insomniacslk/dhcp/dhcpv4 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -139,7 +128,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/kortschak/wol from tailscale.com/feature/wakeonlan L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ @@ -148,11 +136,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket - L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio - L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ - L github.com/pierrec/lz4/v4/internal/lz4errors from github.com/pierrec/lz4/v4+ - L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 - L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient @@ -182,8 +165,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 - L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ @@ -215,8 +196,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/feature/tap+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/feature/tap+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ @@ -255,14 +236,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ - tailscale.com/feature from tailscale.com/feature/capture+ - tailscale.com/feature/capture from tailscale.com/feature/condregister - tailscale.com/feature/condregister from tailscale.com/tsnet - tailscale.com/feature/relayserver from tailscale.com/feature/condregister - tailscale.com/feature/taildrop from tailscale.com/feature/condregister - L tailscale.com/feature/tap from tailscale.com/feature/condregister - tailscale.com/feature/tpm from tailscale.com/feature/condregister - tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister + tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ @@ -270,10 +244,10 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/feature/relayserver+ - tailscale.com/ipn/ipnlocal from tailscale.com/feature/relayserver+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ - tailscale.com/ipn/localapi from tailscale.com/feature/capture+ + tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store @@ -303,7 +277,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/flowtrack from tailscale.com/net/packet+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet - tailscale.com/net/netaddr from tailscale.com/feature/tap+ + tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ tailscale.com/net/neterror from tailscale.com/net/dns/resolver+ tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal @@ -313,7 +287,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/feature/capture+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ @@ -328,8 +302,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ - tailscale.com/net/tstun from tailscale.com/feature/tap+ - tailscale.com/net/udprelay from tailscale.com/feature/relayserver + tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal @@ -338,7 +311,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ - tailscale.com/taildrop from tailscale.com/feature/taildrop tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient @@ -353,8 +325,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ - tailscale.com/types/empty from tailscale.com/feature/taildrop+ - tailscale.com/types/ipproto from tailscale.com/feature/tap+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/clientupdate+ tailscale.com/types/logger from tailscale.com/appc+ @@ -384,7 +356,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ @@ -394,12 +365,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag - tailscale.com/util/osshare from tailscale.com/feature/taildrop+ + tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal - tailscale.com/util/progresstracking from tailscale.com/feature/taildrop tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy - tailscale.com/util/rands from tailscale.com/feature/taildrop+ + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ @@ -477,7 +447,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) D golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LD golang.org/x/sys/unix from github.com/google/go-tpm/tpmutil+ + LD golang.org/x/sys/unix from github.com/google/nftables+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -578,7 +548,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) flag from tailscale.com/util/testenv fmt from archive/tar+ hash from compress/zlib+ - hash/adler32 from compress/zlib+ + W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ @@ -637,7 +607,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) math/rand from github.com/fxamacker/cbor/v2+ math/rand/v2 from crypto/ecdsa+ mime from mime/multipart+ - mime/multipart from net/http+ + mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 1880b62b1..4664a66a7 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -30,7 +30,6 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/control/controlclient" "tailscale.com/envknob" - _ "tailscale.com/feature/condregister" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" From 48dacf1bf71da01bfbf76670108a6b4b26fbf70b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 6 May 2025 21:18:46 -0700 Subject: [PATCH 0831/1708] cmd/tailscale/cli: omit "file" subcommand if taildrop is omitted from build Updates #15812 Updates #12614 Change-Id: Ic945b26a127ba15399abdaab8fe43b1cfa64d874 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 11 ++++++++++- cmd/tailscale/cli/file.go | 24 ++++++++++++++++-------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 2fbee516a..b1a910295 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -207,6 +207,8 @@ func noDupFlagify(c *ffcli.Command) { } } +var fileCmd func() *ffcli.Command + func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") rootfs.Func("socket", "path to tailscaled socket", func(s string) error { @@ -247,7 +249,7 @@ change in the future. serveCmd(), versionCmd, webCmd, - fileCmd, + nilOrCall(fileCmd), bugReportCmd, certCmd, netlockCmd, @@ -286,6 +288,13 @@ func nonNilCmds(cmds ...*ffcli.Command) []*ffcli.Command { return slicesx.AppendNonzero(cmds[:0], cmds) } +func nilOrCall(f func() *ffcli.Command) *ffcli.Command { + if f == nil { + return nil + } + return f() +} + func fatalf(format string, a ...any) { if Fatalf != nil { Fatalf(format, a...) diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index 3de5f9766..6f3aa40b5 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_taildrop + package cli import ( @@ -38,14 +40,20 @@ import ( "tailscale.com/version" ) -var fileCmd = &ffcli.Command{ - Name: "file", - ShortUsage: "tailscale file ...", - ShortHelp: "Send or receive files", - Subcommands: []*ffcli.Command{ - fileCpCmd, - fileGetCmd, - }, +func init() { + fileCmd = getFileCmd +} + +func getFileCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "file", + ShortUsage: "tailscale file ...", + ShortHelp: "Send or receive files", + Subcommands: []*ffcli.Command{ + fileCpCmd, + fileGetCmd, + }, + } } type countingReader struct { From 7e2630235fd7393c9c9fcbfcaca02b6675dc028f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 7 May 2025 09:15:33 -0700 Subject: [PATCH 0832/1708] feature/relayserver: consider relay:server node attribute for enablement (#15901) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 87aba4228..846e21a7d 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -72,10 +72,20 @@ func (e *extension) Init(host ipnext.Host) error { profile, prefs := host.Profiles().CurrentProfileState() e.profileStateChanged(profile, prefs, false) host.Hooks().ProfileStateChange.Add(e.profileStateChanged) - // TODO(jwhited): callback for netmap/nodeattr changes (e.hasNodeAttrRelayServer) + host.Hooks().OnSelfChange.Add(e.selfNodeViewChanged) return nil } +func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { + e.mu.Lock() + defer e.mu.Unlock() + e.hasNodeAttrRelayServer = nodeView.HasCap(tailcfg.NodeAttrRelayServer) + if !e.hasNodeAttrRelayServer && e.server != nil { + e.server.Close() + e.server = nil + } +} + func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { e.mu.Lock() defer e.mu.Unlock() From 04936d6c0573c8f9d83ce5b1c10c9cafe1451330 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 7 May 2025 07:29:06 -0700 Subject: [PATCH 0833/1708] tsnet: add android & iOS results to depaware Updates #12614 Change-Id: Icd21deb754e7073871eeb34edadd41c167ec5984 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 3 +- Makefile | 6 +- tsnet/depaware.txt | 238 ++++++++++++++++++------------------- 3 files changed, 124 insertions(+), 123 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 666bd2962..fcd39e391 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -498,8 +498,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: check depaware run: | - export PATH=$(./tool/go env GOROOT)/bin:$PATH - find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check --internal + make depaware go_generate: runs-on: ubuntu-22.04 diff --git a/Makefile b/Makefile index f4e5abc87..c30818c96 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,8 @@ updatedeps: ## Update depaware deps tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ - tailscale.com/cmd/stund \ + tailscale.com/cmd/stund + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update -goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet depaware: ## Run depaware checks @@ -33,7 +34,8 @@ depaware: ## Run depaware checks tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ - tailscale.com/cmd/stund \ + tailscale.com/cmd/stund + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet buildwindows: ## Build tailscale CLI for windows/amd64 diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 8bc93cd2f..e035e111a 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -5,85 +5,85 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm - github.com/coder/websocket from tailscale.com/util/eventbus - github.com/coder/websocket/internal/errd from github.com/coder/websocket - github.com/coder/websocket/internal/util from github.com/coder/websocket - github.com/coder/websocket/internal/xsync from github.com/coder/websocket + LA github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + LA github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore + LA github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + LA github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + LA github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + LA github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + LA github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + LA github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + LA github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + LA github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + LA github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore + LA github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + LA github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + LA github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + LA github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + LA github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + LA github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ + LA github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + LA github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + LA github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + LA github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + LA github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + LA github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + LA github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + LA github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + LA github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + LA github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + LA github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore + LA github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm + LA github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + LA github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + LA github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + LA github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + LA github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + LA github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + LA github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + LA github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + LA github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + LA github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + LA github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + LA github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + LA github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + LA github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + LA github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + LA github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + LA github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + LA github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + LA github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + LA github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + LA github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + LA github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + LA github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + LA github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ + LA github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ + LA github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + LA github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ + LA github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http + LA github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + LDWA github.com/coder/websocket from tailscale.com/util/eventbus + LDWA github.com/coder/websocket/internal/errd from github.com/coder/websocket + LDWA github.com/coder/websocket/internal/util from github.com/coder/websocket + LDWA github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ @@ -103,7 +103,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet - L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns + LA 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ L github.com/google/nftables from tailscale.com/util/linuxfw @@ -112,13 +112,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/google/nftables/expr from github.com/google/nftables+ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ - DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/gorilla/csrf from tailscale.com/client/web - github.com/gorilla/securecookie from github.com/gorilla/csrf + DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ + LDW github.com/gorilla/csrf from tailscale.com/client/web + LDW github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + LA 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + LA github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 + LA github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -132,12 +132,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd - L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + LA github.com/mdlayher/sdnotify from tailscale.com/util/systemd + LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/miekg/dns from tailscale.com/net/dns/recursive - 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket - D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + LDWA 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack + LA 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -150,11 +150,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - github.com/tailscale/hujson from tailscale.com/ipn/conffile + LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth - github.com/tailscale/web-client-prebuilt from tailscale.com/client/web + LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ @@ -201,7 +201,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro + LDWA gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ @@ -218,7 +218,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/derp+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ - tailscale.com/client/web from tailscale.com/ipn/ipnlocal + LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ @@ -250,13 +250,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + LA tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store + LA tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore + LA tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ + LA tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/envknob+ - tailscale.com/licenses from tailscale.com/client/web + LDW tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ @@ -296,7 +296,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp + LA tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -349,7 +349,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -411,7 +411,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + LDA golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ @@ -424,8 +424,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh + LDA golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal + LDA golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ @@ -433,21 +433,21 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + LDW golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ - golang.org/x/net/internal/socks from golang.org/x/net/proxy + LDWA golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ - golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + LDWA golang.org/x/net/proxy from tailscale.com/net/netns + DI golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LDAI golang.org/x/sys/unix from github.com/google/nftables+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -527,9 +527,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ - D crypto/x509/internal/macos from crypto/x509 + DI crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DW database/sql/driver from github.com/google/uuid + DWI database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -538,7 +538,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/base32 from github.com/fxamacker/cbor/v2+ encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ - encoding/gob from github.com/gorilla/securecookie + LDW encoding/gob from github.com/gorilla/securecookie encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ @@ -552,7 +552,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf+ + LDWA html/template from github.com/gorilla/csrf+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -582,13 +582,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ - internal/saferio from debug/pe+ + LA internal/runtime/syscall from runtime+ + LDW internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ internal/syscall/execenv from os+ - LD internal/syscall/unix from crypto/internal/sysrand+ + LDAI internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ @@ -639,8 +639,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) sync/atomic from context+ syscall from archive/tar+ text/tabwriter from runtime/pprof - text/template from html/template - text/template/parse from html/template+ + LDWA text/template from html/template + LDWA text/template/parse from html/template+ time from archive/tar+ unicode from bytes+ unicode/utf16 from crypto/x509+ From fd263adc1b5b75ad78205a439cbbe5a505a90014 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 7 May 2025 09:26:28 -0700 Subject: [PATCH 0834/1708] ipn/store: don't link in AWS & Kubernetes stuff on Android Android is Linux, but that not much Linux. Updates #12614 Change-Id: Ice80bd3e3d173511c30d05a43d25a31e18928db7 Signed-off-by: Brad Fitzpatrick --- ipn/store/store_aws.go | 2 +- ipn/store/store_kube.go | 2 +- tsnet/depaware.txt | 160 ++++++++++++++++++++-------------------- 3 files changed, 82 insertions(+), 82 deletions(-) diff --git a/ipn/store/store_aws.go b/ipn/store/store_aws.go index d39e84319..4f6c5a6e7 100644 --- a/ipn/store/store_aws.go +++ b/ipn/store/store_aws.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (ts_aws || (linux && (arm64 || amd64))) && !ts_omit_aws +//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws package store diff --git a/ipn/store/store_kube.go b/ipn/store/store_kube.go index 8941620f6..01ee2870f 100644 --- a/ipn/store/store_kube.go +++ b/ipn/store/store_kube.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (ts_kube || (linux && (arm64 || amd64))) && !ts_omit_kube +//go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube package store diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index e035e111a..dd90f66e3 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -5,81 +5,81 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - LA github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - LA github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - LA github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - LA github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - LA github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - LA github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - LA github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - LA github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - LA github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - LA github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - LA github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - LA github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - LA github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - LA github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - LA github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - LA github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - LA github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - LA github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - LA github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - LA github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - LA github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - LA github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - LA github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - LA github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - LA github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - LA github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - LA github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - LA github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - LA github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - LA github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - LA github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - LA github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - LA github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - LA github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - LA github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - LA github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - LA github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - LA github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - LA github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - LA github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - LA github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - LA github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - LA github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - LA github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - LA github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - LA github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - LA github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - LA github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - LA github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - LA github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - LA github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - LA github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - LA github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - LA github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - LA github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - LA github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - LA github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - LA github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ + L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http + L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm LDWA github.com/coder/websocket from tailscale.com/util/eventbus LDWA github.com/coder/websocket/internal/errd from github.com/coder/websocket LDWA github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -118,7 +118,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ LA 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns LA github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - LA github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -250,11 +250,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - LA tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - LA tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store + L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - LA tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - LA tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore + L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ + L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/envknob+ LDW tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy From f5cc657e13ba322f44df617b731bc5d609c05ae2 Mon Sep 17 00:00:00 2001 From: Brian Palmer Date: Wed, 7 May 2025 12:57:56 -0600 Subject: [PATCH 0835/1708] control/controlclient: send optional ConnectionHandleForTest with map requests (#15904) This handle can be used in tests and debugging to identify the specific client connection. Updates tailscale/corp#28368 Change-Id: I48cc573fc0bcf018c66a18e67ad6c4f248fb760c Signed-off-by: Brian Palmer --- control/controlclient/direct.go | 51 ++++++++++++++++++++------------- tailcfg/tailcfg.go | 6 ++++ 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index c8e885799..ac799e2d9 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -95,15 +95,16 @@ type Direct struct { sfGroup singleflight.Group[struct{}, *NoiseClient] // protects noiseClient creation. noiseClient *NoiseClient - persist persist.PersistView - authKey string - tryingNewKey key.NodePrivate - expiry time.Time // or zero value if none/unknown - hostinfo *tailcfg.Hostinfo // always non-nil - netinfo *tailcfg.NetInfo - endpoints []tailcfg.Endpoint - tkaHead string - lastPingURL string // last PingRequest.URL received, for dup suppression + persist persist.PersistView + authKey string + tryingNewKey key.NodePrivate + expiry time.Time // or zero value if none/unknown + hostinfo *tailcfg.Hostinfo // always non-nil + netinfo *tailcfg.NetInfo + endpoints []tailcfg.Endpoint + tkaHead string + lastPingURL string // last PingRequest.URL received, for dup suppression + connectionHandleForTest string // sent in MapRequest.ConnectionHandleForTest } // Observer is implemented by users of the control client (such as LocalBackend) @@ -403,6 +404,14 @@ func (c *Direct) SetTKAHead(tkaHead string) bool { return true } +// SetConnectionHandleForTest stores a new MapRequest.ConnectionHandleForTest +// value for the next update. +func (c *Direct) SetConnectionHandleForTest(handle string) { + c.mu.Lock() + defer c.mu.Unlock() + c.connectionHandleForTest = handle +} + func (c *Direct) GetPersist() persist.PersistView { c.mu.Lock() defer c.mu.Unlock() @@ -851,6 +860,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap serverNoiseKey := c.serverNoiseKey hi := c.hostInfoLocked() backendLogID := hi.BackendLogID + connectionHandleForTest := c.connectionHandleForTest var epStrs []string var eps []netip.AddrPort var epTypes []tailcfg.EndpointType @@ -891,17 +901,18 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap nodeKey := persist.PublicNodeKey() request := &tailcfg.MapRequest{ - Version: tailcfg.CurrentCapabilityVersion, - KeepAlive: true, - NodeKey: nodeKey, - DiscoKey: c.discoPubKey, - Endpoints: eps, - EndpointTypes: epTypes, - Stream: isStreaming, - Hostinfo: hi, - DebugFlags: c.debugFlags, - OmitPeers: nu == nil, - TKAHead: c.tkaHead, + Version: tailcfg.CurrentCapabilityVersion, + KeepAlive: true, + NodeKey: nodeKey, + DiscoKey: c.discoPubKey, + Endpoints: eps, + EndpointTypes: epTypes, + Stream: isStreaming, + Hostinfo: hi, + DebugFlags: c.debugFlags, + OmitPeers: nu == nil, + TKAHead: c.tkaHead, + ConnectionHandleForTest: connectionHandleForTest, } var extraDebugFlags []string if hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 11a0d0830..0a58d8f0c 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1413,6 +1413,12 @@ type MapRequest struct { // * "warn-router-unhealthy": client's Router implementation is // having problems. DebugFlags []string `json:",omitempty"` + + // ConnectionHandleForTest, if non-empty, is an opaque string sent by the client that + // identifies this specific connection to the server. The server may choose to + // use this handle to identify the connection for debugging or testing + // purposes. It has no semantic meaning. + ConnectionHandleForTest string `json:",omitempty"` } // PortRange represents a range of UDP or TCP port numbers. From 02f68e5d9ff092617ad432943c68564344f72500 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 7 May 2025 15:56:57 -0700 Subject: [PATCH 0836/1708] net/dns: don't link dbus, gonotify on Android Android is Linux, but doesn't use Linux DNS managers (or D-Bus). Updates #12614 Change-Id: I487802ac74a259cd5d2480ac26f7faa17ca8d1c3 Signed-off-by: Brad Fitzpatrick --- net/dns/debian_resolvconf.go | 2 +- net/dns/direct.go | 2 ++ net/dns/direct_linux.go | 2 ++ net/dns/direct_notlinux.go | 2 +- net/dns/manager_default.go | 2 +- net/dns/manager_linux.go | 2 ++ net/dns/nm.go | 2 +- net/dns/openresolv.go | 2 +- net/dns/resolved.go | 2 +- tsnet/depaware.txt | 6 +++--- 10 files changed, 15 insertions(+), 9 deletions(-) diff --git a/net/dns/debian_resolvconf.go b/net/dns/debian_resolvconf.go index 3ffc796e0..63fd80c12 100644 --- a/net/dns/debian_resolvconf.go +++ b/net/dns/debian_resolvconf.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || freebsd || openbsd +//go:build (linux && !android) || freebsd || openbsd package dns diff --git a/net/dns/direct.go b/net/dns/direct.go index aaff18fcb..f23723d9a 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !android && !ios + package dns import ( diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go index 8dccc5bfb..0558f0f51 100644 --- a/net/dns/direct_linux.go +++ b/net/dns/direct_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !android + package dns import ( diff --git a/net/dns/direct_notlinux.go b/net/dns/direct_notlinux.go index c221ca1be..a73a35e5e 100644 --- a/net/dns/direct_notlinux.go +++ b/net/dns/direct_notlinux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux && !android && !ios package dns diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index e14454e76..dbe985cac 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris && !plan9 +//go:build (!linux || android) && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris && !plan9 package dns diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 3ba3022b6..6bd368f50 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !android + package dns import ( diff --git a/net/dns/nm.go b/net/dns/nm.go index ef07a90d8..97557e33a 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !android package dns diff --git a/net/dns/openresolv.go b/net/dns/openresolv.go index 0b5c87a3b..c9562b6a9 100644 --- a/net/dns/openresolv.go +++ b/net/dns/openresolv.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || freebsd || openbsd +//go:build (linux && !android) || freebsd || openbsd package dns diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 1a7c86041..4f58f3f9c 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !android package dns diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index dd90f66e3..9f0956ea0 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -103,7 +103,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet - LA 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns + L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ L github.com/google/nftables from tailscale.com/util/linuxfw @@ -116,8 +116,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/gorilla/csrf from tailscale.com/client/web LDW github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - LA 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - LA github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink From 9d623cf5eb6043be0aff2e794e12f7d43563b694 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 7 May 2025 16:28:24 -0700 Subject: [PATCH 0837/1708] util/systemd: don't link systemd-notification package on Android Updates #12614 Change-Id: Ie5f0bb072571249f08aca09132c8491c31d01605 Signed-off-by: Brad Fitzpatrick --- tsnet/depaware.txt | 2 +- util/systemd/systemd_linux.go | 2 +- util/systemd/systemd_nonlinux.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9f0956ea0..639c76e44 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - LA github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/miekg/dns from tailscale.com/net/dns/recursive LDWA 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket diff --git a/util/systemd/systemd_linux.go b/util/systemd/systemd_linux.go index 909cfcb20..fdfd1bba0 100644 --- a/util/systemd/systemd_linux.go +++ b/util/systemd/systemd_linux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !android package systemd diff --git a/util/systemd/systemd_nonlinux.go b/util/systemd/systemd_nonlinux.go index 36214020c..5d7772bb3 100644 --- a/util/systemd/systemd_nonlinux.go +++ b/util/systemd/systemd_nonlinux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || android package systemd From 5be6ff9b62ecd981b3a9f81af8567875c5e6e0ee Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 7 May 2025 20:27:30 -0700 Subject: [PATCH 0838/1708] all: remove non-applicable "linux" deps on Android Updates #12614 Change-Id: I0e2a18eca3515d3d6206c059110556d2bbbb0c5c Signed-off-by: Brad Fitzpatrick --- derp/derp_server_default.go | 2 +- derp/derp_server_linux.go | 2 ++ doctor/ethtool/ethtool_linux.go | 2 ++ doctor/ethtool/ethtool_other.go | 2 +- ipn/ipnlocal/ssh.go | 2 +- ipn/ipnlocal/ssh_stub.go | 2 +- net/netkernelconf/netkernelconf_default.go | 2 +- net/netkernelconf/netkernelconf_linux.go | 2 ++ net/netns/socks.go | 2 +- safesocket/safesocket_ps.go | 2 +- tsnet/depaware.txt | 16 ++++++++-------- 11 files changed, 21 insertions(+), 15 deletions(-) diff --git a/derp/derp_server_default.go b/derp/derp_server_default.go index 3e0b5b5e9..014cfffd6 100644 --- a/derp/derp_server_default.go +++ b/derp/derp_server_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || android package derp diff --git a/derp/derp_server_linux.go b/derp/derp_server_linux.go index bfc2aade6..5a40e114e 100644 --- a/derp/derp_server_linux.go +++ b/derp/derp_server_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !android + package derp import ( diff --git a/doctor/ethtool/ethtool_linux.go b/doctor/ethtool/ethtool_linux.go index b8cc08002..f6eaac1df 100644 --- a/doctor/ethtool/ethtool_linux.go +++ b/doctor/ethtool/ethtool_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !android + package ethtool import ( diff --git a/doctor/ethtool/ethtool_other.go b/doctor/ethtool/ethtool_other.go index 9aaa9dda8..7af74eec8 100644 --- a/doctor/ethtool/ethtool_other.go +++ b/doctor/ethtool/ethtool_other.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || android package ethtool diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index c1b477652..e48b1f2f1 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || (darwin && !ios) || freebsd || openbsd || plan9 +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 package ipnlocal diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go index 401f42bf8..d129084e4 100644 --- a/ipn/ipnlocal/ssh_stub.go +++ b/ipn/ipnlocal/ssh_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || (!linux && !darwin && !freebsd && !openbsd && !plan9) +//go:build ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) package ipnlocal diff --git a/net/netkernelconf/netkernelconf_default.go b/net/netkernelconf/netkernelconf_default.go index ec1b2e619..3e160e5ed 100644 --- a/net/netkernelconf/netkernelconf_default.go +++ b/net/netkernelconf/netkernelconf_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || android package netkernelconf diff --git a/net/netkernelconf/netkernelconf_linux.go b/net/netkernelconf/netkernelconf_linux.go index 51ed8ea99..2a4f0a049 100644 --- a/net/netkernelconf/netkernelconf_linux.go +++ b/net/netkernelconf/netkernelconf_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !android + package netkernelconf import ( diff --git a/net/netns/socks.go b/net/netns/socks.go index eea69d865..ee8dfa20e 100644 --- a/net/netns/socks.go +++ b/net/netns/socks.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !js +//go:build !ios && !js && !android package netns diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index 18197846d..48a8dd483 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || (darwin && !ios) || freebsd +//go:build (linux && !android) || windows || (darwin && !ios) || freebsd package safesocket diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 639c76e44..2895a36a7 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -135,9 +135,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/miekg/dns from tailscale.com/net/dns/recursive - LDWA 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - LA 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -296,7 +296,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - LA tailscale.com/net/tcpinfo from tailscale.com/derp + L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -411,7 +411,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LDA golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ @@ -424,8 +424,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LDA golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LDA golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh + LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal + LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ @@ -440,10 +440,10 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ - LDWA golang.org/x/net/internal/socks from golang.org/x/net/proxy + LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ - LDWA golang.org/x/net/proxy from tailscale.com/net/netns + LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from net+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ From 85a7abef0c7216cf4e15a349f00670530658093b Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 21 Mar 2025 09:30:42 +0000 Subject: [PATCH 0839/1708] tsnet: add test for packet filter generation from netmap This is an integration test that covers all the code in Direct, Auto, and LocalBackend that processes NetMaps and creates a Filter. The test uses tsnet as a convenient proxy for setting up all the client pieces correctly, but is not actually a test specific to tsnet. Updates tailscale/corp#20514 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 6 +- ipn/ipnlocal/local_test.go | 2 +- tsnet/packet_filter_test.go | 248 ++++++++++++++++++++++++++++++++++++ 3 files changed, 254 insertions(+), 2 deletions(-) create mode 100644 tsnet/packet_filter_test.go diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a7935c6cd..4810dabeb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1510,7 +1510,11 @@ func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { return nil } -func (nb *nodeBackend) GetFilterForTest() *filter.Filter { +func (b *LocalBackend) GetFilterForTest() *filter.Filter { + if !testenv.InTest() { + panic("GetFilterForTest called outside of test") + } + nb := b.currentNode() return nb.filterAtomic.Load() } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 94b5d9522..19cfd9195 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5328,7 +5328,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }}, }) - f := lb.currentNode().GetFilterForTest() + f := lb.GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) if res != filter.Accept { t.Errorf("Check(2.2.2.2, ...) = %s, want %s", res, filter.Accept) diff --git a/tsnet/packet_filter_test.go b/tsnet/packet_filter_test.go new file mode 100644 index 000000000..462234222 --- /dev/null +++ b/tsnet/packet_filter_test.go @@ -0,0 +1,248 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsnet + +import ( + "context" + "fmt" + "net/netip" + "testing" + "time" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/types/key" + "tailscale.com/types/netmap" + "tailscale.com/util/must" + "tailscale.com/wgengine/filter" +) + +// waitFor blocks until a NetMap is seen on the IPN bus that satisfies the given +// function f. Note: has no timeout, should be called with a ctx that has an +// appropriate timeout set. +func waitFor(t testing.TB, ctx context.Context, s *Server, f func(*netmap.NetworkMap) bool) error { + t.Helper() + watcher, err := s.localClient.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + t.Fatalf("error watching IPN bus: %s", err) + } + defer watcher.Close() + + for { + n, err := watcher.Next() + if err != nil { + return fmt.Errorf("getting next ipn.Notify from IPN bus: %w", err) + } + if n.NetMap != nil { + if f(n.NetMap) { + return nil + } + } + } +} + +// TestPacketFilterFromNetmap tests all of the client code for processing +// netmaps and turning them into packet filters together. Only the control-plane +// side is mocked out. +func TestPacketFilterFromNetmap(t *testing.T) { + t.Parallel() + + var key key.NodePublic + must.Do(key.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) + + type check struct { + src string + dst string + port uint16 + want filter.Response + } + + tests := []struct { + name string + mapResponse *tailcfg.MapResponse + waitTest func(*netmap.NetworkMap) bool + + incrementalMapResponse *tailcfg.MapResponse // optional + incrementalWaitTest func(*netmap.NetworkMap) bool // optional + + checks []check + }{ + { + name: "IP_based_peers", + mapResponse: &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, + }, + Peers: []*tailcfg.Node{{ + ID: 2, + Name: "foo", + Key: key, + Addresses: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + CapMap: nil, + }}, + PacketFilter: []tailcfg.FilterRule{{ + SrcIPs: []string{"2.2.2.2/32"}, + DstPorts: []tailcfg.NetPortRange{{ + IP: "1.1.1.1/32", + Ports: tailcfg.PortRange{ + First: 22, + Last: 22, + }, + }}, + IPProto: []int{int(ipproto.TCP)}, + }}, + }, + waitTest: func(nm *netmap.NetworkMap) bool { + return len(nm.Peers) > 0 + }, + checks: []check{ + {src: "2.2.2.2", dst: "1.1.1.1", port: 22, want: filter.Accept}, + {src: "2.2.2.2", dst: "1.1.1.1", port: 23, want: filter.Drop}, // different port + {src: "3.3.3.3", dst: "1.1.1.1", port: 22, want: filter.Drop}, // different src + {src: "2.2.2.2", dst: "1.1.1.2", port: 22, want: filter.Drop}, // different dst + }, + }, + { + name: "capmap_based_peers", + mapResponse: &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, + }, + Peers: []*tailcfg.Node{{ + ID: 2, + Name: "foo", + Key: key, + Addresses: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + CapMap: tailcfg.NodeCapMap{"X": nil}, + }}, + PacketFilter: []tailcfg.FilterRule{{ + SrcIPs: []string{"cap:X"}, + DstPorts: []tailcfg.NetPortRange{{ + IP: "1.1.1.1/32", + Ports: tailcfg.PortRange{ + First: 22, + Last: 22, + }, + }}, + IPProto: []int{int(ipproto.TCP)}, + }}, + }, + waitTest: func(nm *netmap.NetworkMap) bool { + return len(nm.Peers) > 0 + }, + checks: []check{ + {src: "2.2.2.2", dst: "1.1.1.1", port: 22, want: filter.Accept}, + {src: "2.2.2.2", dst: "1.1.1.1", port: 23, want: filter.Drop}, // different port + {src: "3.3.3.3", dst: "1.1.1.1", port: 22, want: filter.Drop}, // different src + {src: "2.2.2.2", dst: "1.1.1.2", port: 22, want: filter.Drop}, // different dst + }, + }, + { + name: "capmap_based_peers_changed", + mapResponse: &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, + CapMap: tailcfg.NodeCapMap{"X-sigil": nil}, + }, + PacketFilter: []tailcfg.FilterRule{{ + SrcIPs: []string{"cap:label-1"}, + DstPorts: []tailcfg.NetPortRange{{ + IP: "1.1.1.1/32", + Ports: tailcfg.PortRange{ + First: 22, + Last: 22, + }, + }}, + IPProto: []int{int(ipproto.TCP)}, + }}, + }, + waitTest: func(nm *netmap.NetworkMap) bool { + return nm.SelfNode.HasCap("X-sigil") + }, + incrementalMapResponse: &tailcfg.MapResponse{ + PeersChanged: []*tailcfg.Node{{ + ID: 2, + Name: "foo", + Key: key, + Addresses: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + CapMap: tailcfg.NodeCapMap{"label-1": nil}, + }}, + }, + incrementalWaitTest: func(nm *netmap.NetworkMap) bool { + return len(nm.Peers) > 0 + }, + checks: []check{ + {src: "2.2.2.2", dst: "1.1.1.1", port: 22, want: filter.Accept}, + {src: "2.2.2.2", dst: "1.1.1.1", port: 23, want: filter.Drop}, // different port + {src: "3.3.3.3", dst: "1.1.1.1", port: 22, want: filter.Drop}, // different src + {src: "2.2.2.2", dst: "1.1.1.2", port: 22, want: filter.Drop}, // different dst + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + + controlURL, c := startControl(t) + s, _, pubKey := startServer(t, ctx, controlURL, "node") + + if test.waitTest(s.lb.NetMap()) { + t.Fatal("waitTest already passes before sending initial netmap: this will be flaky") + } + + if !c.AddRawMapResponse(pubKey, test.mapResponse) { + t.Fatalf("could not send map response to %s", pubKey) + } + + if err := waitFor(t, ctx, s, test.waitTest); err != nil { + t.Fatalf("waitFor: %s", err) + } + + pf := s.lb.GetFilterForTest() + + for _, check := range test.checks { + got := pf.Check(netip.MustParseAddr(check.src), netip.MustParseAddr(check.dst), check.port, ipproto.TCP) + + want := check.want + if test.incrementalMapResponse != nil { + want = filter.Drop + } + if got != want { + t.Errorf("check %s -> %s:%d, got: %s, want: %s", check.src, check.dst, check.port, got, want) + } + } + + if test.incrementalMapResponse != nil { + if test.incrementalWaitTest == nil { + t.Fatal("incrementalWaitTest must be set if incrementalMapResponse is set") + } + + if test.incrementalWaitTest(s.lb.NetMap()) { + t.Fatal("incrementalWaitTest already passes before sending incremental netmap: this will be flaky") + } + + if !c.AddRawMapResponse(pubKey, test.incrementalMapResponse) { + t.Fatalf("could not send map response to %s", pubKey) + } + + if err := waitFor(t, ctx, s, test.incrementalWaitTest); err != nil { + t.Fatalf("waitFor: %s", err) + } + + pf := s.lb.GetFilterForTest() + + for _, check := range test.checks { + got := pf.Check(netip.MustParseAddr(check.src), netip.MustParseAddr(check.dst), check.port, ipproto.TCP) + if got != check.want { + t.Errorf("check %s -> %s:%d, got: %s, want: %s", check.src, check.dst, check.port, got, check.want) + } + } + } + + }) + } +} From e2814871a72d3139d083cbdf7eaebe57057f78f2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 8 May 2025 09:47:52 -0700 Subject: [PATCH 0840/1708] util/eventbus: also disable websocket debug on Android So tsnet-on-Android is smaller, like iOS. Updates #12614 Updates #15297 Change-Id: I97ae997f5d17576024470fe5fea93d9f5f134bde Signed-off-by: Brad Fitzpatrick --- tsnet/depaware.txt | 14 +++++++------- util/eventbus/debughttp.go | 2 +- .../{debughttp_ios.go => debughttp_off.go} | 4 +++- 3 files changed, 11 insertions(+), 9 deletions(-) rename util/eventbus/{debughttp_ios.go => debughttp_off.go} (80%) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 2895a36a7..97046b73d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -80,10 +80,10 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm - LDWA github.com/coder/websocket from tailscale.com/util/eventbus - LDWA github.com/coder/websocket/internal/errd from github.com/coder/websocket - LDWA github.com/coder/websocket/internal/util from github.com/coder/websocket - LDWA github.com/coder/websocket/internal/xsync from github.com/coder/websocket + LDW github.com/coder/websocket from tailscale.com/util/eventbus + LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket + LDW github.com/coder/websocket/internal/util from github.com/coder/websocket + LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ @@ -552,7 +552,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - LDWA html/template from github.com/gorilla/csrf+ + LDW html/template from github.com/gorilla/csrf+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -639,8 +639,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) sync/atomic from context+ syscall from archive/tar+ text/tabwriter from runtime/pprof - LDWA text/template from html/template - LDWA text/template/parse from html/template+ + LDW text/template from html/template + LDW text/template/parse from html/template+ time from archive/tar+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index 18888cc56..a94eaa9cf 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios +//go:build !ios && !android package eventbus diff --git a/util/eventbus/debughttp_ios.go b/util/eventbus/debughttp_off.go similarity index 80% rename from util/eventbus/debughttp_ios.go rename to util/eventbus/debughttp_off.go index a898898b7..85330579c 100644 --- a/util/eventbus/debughttp_ios.go +++ b/util/eventbus/debughttp_off.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios +//go:build ios || android package eventbus @@ -12,6 +12,8 @@ func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { // reflection for method lookups. This forces the compiler to // retain a lot more code and information to make dynamic method // dispatch work, which is unacceptable bloat for the iOS build. + // We also disable it on Android while we're at it, as nobody + // is debugging Tailscale internals on Android. // // TODO: https://github.com/tailscale/tailscale/issues/15297 to // bring the debug UI back to iOS somehow. From 165b99278b9cd4bf7fe8146d9521ce6d80dbf001 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 8 May 2025 09:29:50 -0700 Subject: [PATCH 0841/1708] feature/taildrop, ipn/ipnlocal: remove leftover dup calls to osshare I'd moved the osshare calls to feature/taildrop hooks, but forgot to remove them from ipnlocal, or lost them during a rebase. But then I noticed cmd/tailscaled also had some, so turn those into a hook. Updates #12614 Change-Id: I024fb1d27fbcc49c013158882ee5982c2737037d Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/install_windows.go | 7 +++-- .../tailscaledhooks/tailscaledhooks.go | 12 ++++++++ feature/taildrop/ext.go | 9 ++++++ ipn/ipnlocal/local.go | 10 ------- ipn/ipnlocal/peerapi_test.go | 30 ++++++++----------- tsnet/depaware.txt | 1 - .../tailscaled_deps_test_windows.go | 1 + 9 files changed, 40 insertions(+), 32 deletions(-) create mode 100644 cmd/tailscaled/tailscaledhooks/tailscaledhooks.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 700085b39..2ed36c3dc 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -936,7 +936,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag - tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1af828f75..faa1b5bd8 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -253,6 +253,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/clientupdate from tailscale.com/client/web+ LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ + tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index c36418642..c667539b0 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -15,9 +15,9 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" + "tailscale.com/cmd/tailscaled/tailscaledhooks" "tailscale.com/logtail/backoff" "tailscale.com/types/logger" - "tailscale.com/util/osshare" ) func init() { @@ -81,8 +81,9 @@ func installSystemDaemonWindows(args []string) (err error) { } func uninstallSystemDaemonWindows(args []string) (ret error) { - // Remove file sharing from Windows shell (noop in non-windows) - osshare.SetFileSharingEnabled(false, logger.Discard) + for _, f := range tailscaledhooks.UninstallSystemDaemonWindows { + f() + } m, err := mgr.Connect() if err != nil { diff --git a/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go b/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go new file mode 100644 index 000000000..6ea662d39 --- /dev/null +++ b/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package tailscaledhooks provides hooks for optional features +// to add to during init that tailscaled calls at runtime. +package tailscaledhooks + +import "tailscale.com/feature" + +// UninstallSystemDaemonWindows is called when the Windows +// system daemon is uninstalled. +var UninstallSystemDaemonWindows feature.Hooks[func()] diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index 058418cde..aee825ee7 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -12,12 +12,14 @@ import ( "maps" "os" "path/filepath" + "runtime" "slices" "strings" "sync" "sync/atomic" "tailscale.com/client/tailscale/apitype" + "tailscale.com/cmd/tailscaled/tailscaledhooks" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" @@ -31,6 +33,13 @@ import ( func init() { ipnext.RegisterExtension("taildrop", newExtension) + + if runtime.GOOS == "windows" { + tailscaledhooks.UninstallSystemDaemonWindows.Add(func() { + // Remove file sharing from Windows shell. + osshare.SetFileSharingEnabled(false, logger.Discard) + }) + } } func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4810dabeb..15f8f1c6f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -102,7 +102,6 @@ import ( "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/multierr" - "tailscale.com/util/osshare" "tailscale.com/util/osuser" "tailscale.com/util/rands" "tailscale.com/util/set" @@ -274,7 +273,6 @@ type LocalBackend struct { machinePrivKey key.MachinePrivate tka *tkaState // TODO(nickkhyl): move to nodeContext state ipn.State // TODO(nickkhyl): move to nodeContext - capFileSharing bool // whether netMap contains the file sharing capability capTailnetLock bool // whether netMap contains the tailnet lock capability // hostinfo is mutated in-place while mu is held. hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeContext @@ -460,7 +458,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } envknob.LogCurrent(logf) - osshare.SetFileSharingEnabled(false, logf) ctx, cancel := context.WithCancel(context.Background()) clock := tstime.StdClock{} @@ -6140,13 +6137,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.health.SetControlHealth(nil) } - // Determine if file sharing is enabled - fs := nm.HasCap(tailcfg.CapabilityFileSharing) - if fs != b.capFileSharing { - osshare.SetFileSharingEnabled(fs, b.logf) - } - b.capFileSharing = fs - if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { b.capForcedNetfilter = "iptables" } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 975ed38bb..d8655afa0 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -67,18 +67,16 @@ func bodyNotContains(sub string) check { func TestHandlePeerAPI(t *testing.T) { tests := []struct { - name string - isSelf bool // the peer sending the request is owned by us - capSharing bool // self node has file sharing capability - debugCap bool // self node has debug capability - reqs []*http.Request - checks []check + name string + isSelf bool // the peer sending the request is owned by us + debugCap bool // self node has debug capability + reqs []*http.Request + checks []check }{ { - name: "not_peer_api", - isSelf: true, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("GET", "/", nil)}, + name: "not_peer_api", + isSelf: true, + reqs: []*http.Request{httptest.NewRequest("GET", "/", nil)}, checks: checks( httpStatus(200), bodyContains("This is my Tailscale device."), @@ -86,10 +84,9 @@ func TestHandlePeerAPI(t *testing.T) { ), }, { - name: "not_peer_api_not_owner", - isSelf: false, - capSharing: true, - reqs: []*http.Request{httptest.NewRequest("GET", "/", nil)}, + name: "not_peer_api_not_owner", + isSelf: false, + reqs: []*http.Request{httptest.NewRequest("GET", "/", nil)}, checks: checks( httpStatus(200), bodyContains("This is my Tailscale device."), @@ -160,9 +157,8 @@ func TestHandlePeerAPI(t *testing.T) { } var e peerAPITestEnv lb := &LocalBackend{ - logf: e.logBuf.Logf, - capFileSharing: tt.capSharing, - clock: &tstest.Clock{}, + logf: e.logBuf.Logf, + clock: &tstest.Clock{}, } lb.currentNode().SetNetMap(&netmap.NetworkMap{SelfNode: selfNode.View()}) e.ph = &peerAPIHandler{ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 97046b73d..f9e58a71c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -365,7 +365,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag - tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 79e2e05a7..b5919b962 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -20,6 +20,7 @@ import ( _ "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" _ "tailscale.com/client/local" _ "tailscale.com/cmd/tailscaled/childproc" + _ "tailscale.com/cmd/tailscaled/tailscaledhooks" _ "tailscale.com/control/controlclient" _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" From cb6fc37d660f4fc392e5900ca154bba6dcefd650 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 8 May 2025 19:14:50 -0500 Subject: [PATCH 0842/1708] util/deephash: move tests that depend on other tailscale packages to deephash_test This is done to prevent import cycles in tests. Fixes #15923 Signed-off-by: Nick Khyl --- util/deephash/deephash_test.go | 156 ----------------------- util/deephash/tailscale_types_test.go | 177 ++++++++++++++++++++++++++ 2 files changed, 177 insertions(+), 156 deletions(-) create mode 100644 util/deephash/tailscale_types_test.go diff --git a/util/deephash/deephash_test.go b/util/deephash/deephash_test.go index d5584def3..413893ff9 100644 --- a/util/deephash/deephash_test.go +++ b/util/deephash/deephash_test.go @@ -23,18 +23,11 @@ import ( "go4.org/mem" "go4.org/netipx" "tailscale.com/tailcfg" - "tailscale.com/types/dnstype" - "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/ptr" - "tailscale.com/types/views" "tailscale.com/util/deephash/testtype" - "tailscale.com/util/dnsname" "tailscale.com/util/hashx" "tailscale.com/version" - "tailscale.com/wgengine/filter" - "tailscale.com/wgengine/router" - "tailscale.com/wgengine/wgcfg" ) type appendBytes []byte @@ -197,21 +190,6 @@ func TestHash(t *testing.T) { } } -func TestDeepHash(t *testing.T) { - // v contains the types of values we care about for our current callers. - // Mostly we're just testing that we don't panic on handled types. - v := getVal() - hash1 := Hash(v) - t.Logf("hash: %v", hash1) - for range 20 { - v := getVal() - hash2 := Hash(v) - if hash1 != hash2 { - t.Error("second hash didn't match") - } - } -} - // Tests that we actually hash map elements. Whoops. func TestIssue4868(t *testing.T) { m1 := map[int]string{1: "foo"} @@ -255,110 +233,6 @@ func TestQuick(t *testing.T) { } } -type tailscaleTypes struct { - WGConfig *wgcfg.Config - RouterConfig *router.Config - MapFQDNAddrs map[dnsname.FQDN][]netip.Addr - MapFQDNAddrPorts map[dnsname.FQDN][]netip.AddrPort - MapDiscoPublics map[key.DiscoPublic]bool - MapResponse *tailcfg.MapResponse - FilterMatch filter.Match -} - -func getVal() *tailscaleTypes { - return &tailscaleTypes{ - &wgcfg.Config{ - Name: "foo", - Addresses: []netip.Prefix{netip.PrefixFrom(netip.AddrFrom16([16]byte{3: 3}).Unmap(), 5)}, - Peers: []wgcfg.Peer{ - { - PublicKey: key.NodePublic{}, - }, - }, - }, - &router.Config{ - Routes: []netip.Prefix{ - netip.MustParsePrefix("1.2.3.0/24"), - netip.MustParsePrefix("1234::/64"), - }, - }, - map[dnsname.FQDN][]netip.Addr{ - dnsname.FQDN("a."): {netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("4.3.2.1")}, - dnsname.FQDN("b."): {netip.MustParseAddr("8.8.8.8"), netip.MustParseAddr("9.9.9.9")}, - dnsname.FQDN("c."): {netip.MustParseAddr("6.6.6.6"), netip.MustParseAddr("7.7.7.7")}, - dnsname.FQDN("d."): {netip.MustParseAddr("6.7.6.6"), netip.MustParseAddr("7.7.7.8")}, - dnsname.FQDN("e."): {netip.MustParseAddr("6.8.6.6"), netip.MustParseAddr("7.7.7.9")}, - dnsname.FQDN("f."): {netip.MustParseAddr("6.9.6.6"), netip.MustParseAddr("7.7.7.0")}, - }, - map[dnsname.FQDN][]netip.AddrPort{ - dnsname.FQDN("a."): {netip.MustParseAddrPort("1.2.3.4:11"), netip.MustParseAddrPort("4.3.2.1:22")}, - dnsname.FQDN("b."): {netip.MustParseAddrPort("8.8.8.8:11"), netip.MustParseAddrPort("9.9.9.9:22")}, - dnsname.FQDN("c."): {netip.MustParseAddrPort("8.8.8.8:12"), netip.MustParseAddrPort("9.9.9.9:23")}, - dnsname.FQDN("d."): {netip.MustParseAddrPort("8.8.8.8:13"), netip.MustParseAddrPort("9.9.9.9:24")}, - dnsname.FQDN("e."): {netip.MustParseAddrPort("8.8.8.8:14"), netip.MustParseAddrPort("9.9.9.9:25")}, - }, - map[key.DiscoPublic]bool{ - key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 31: 0})): true, - key.DiscoPublicFromRaw32(mem.B([]byte{1: 2, 31: 0})): false, - key.DiscoPublicFromRaw32(mem.B([]byte{1: 3, 31: 0})): true, - key.DiscoPublicFromRaw32(mem.B([]byte{1: 4, 31: 0})): false, - }, - &tailcfg.MapResponse{ - DERPMap: &tailcfg.DERPMap{ - Regions: map[int]*tailcfg.DERPRegion{ - 1: { - RegionID: 1, - RegionCode: "foo", - Nodes: []*tailcfg.DERPNode{ - { - Name: "n1", - RegionID: 1, - HostName: "foo.com", - }, - { - Name: "n2", - RegionID: 1, - HostName: "bar.com", - }, - }, - }, - }, - }, - DNSConfig: &tailcfg.DNSConfig{ - Resolvers: []*dnstype.Resolver{ - {Addr: "10.0.0.1"}, - }, - }, - PacketFilter: []tailcfg.FilterRule{ - { - SrcIPs: []string{"1.2.3.4"}, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "1.2.3.4/32", - Ports: tailcfg.PortRange{First: 1, Last: 2}, - }, - }, - }, - }, - Peers: []*tailcfg.Node{ - { - ID: 1, - }, - { - ID: 2, - }, - }, - UserProfiles: []tailcfg.UserProfile{ - {ID: 1, LoginName: "foo@bar.com"}, - {ID: 2, LoginName: "bar@foo.com"}, - }, - }, - filter.Match{ - IPProto: views.SliceOf([]ipproto.Proto{1, 2, 3}), - }, - } -} - type IntThenByte struct { _ int _ byte @@ -758,14 +632,6 @@ func TestInterfaceCycle(t *testing.T) { var sink Sum -func BenchmarkHash(b *testing.B) { - b.ReportAllocs() - v := getVal() - for range b.N { - sink = Hash(v) - } -} - // filterRules is a packet filter that has both everything populated (in its // first element) and also a few entries that are the typical shape for regular // packet filters as sent to clients. @@ -1072,16 +938,6 @@ func FuzzAddr(f *testing.F) { }) } -func TestAppendTo(t *testing.T) { - v := getVal() - h := Hash(v) - sum := h.AppendTo(nil) - - if s := h.String(); s != string(sum) { - t.Errorf("hash sum mismatch; h.String()=%q h.AppendTo()=%q", s, string(sum)) - } -} - func TestFilterFields(t *testing.T) { type T struct { A int @@ -1126,15 +982,3 @@ func TestFilterFields(t *testing.T) { } } } - -func BenchmarkAppendTo(b *testing.B) { - b.ReportAllocs() - v := getVal() - h := Hash(v) - - hashBuf := make([]byte, 0, 100) - b.ResetTimer() - for range b.N { - hashBuf = h.AppendTo(hashBuf[:0]) - } -} diff --git a/util/deephash/tailscale_types_test.go b/util/deephash/tailscale_types_test.go new file mode 100644 index 000000000..d76025399 --- /dev/null +++ b/util/deephash/tailscale_types_test.go @@ -0,0 +1,177 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// This file contains tests and benchmarks that use types from other packages +// in the Tailscale codebase. Unlike other deephash tests, these are in the _test +// package to avoid circular dependencies. + +package deephash_test + +import ( + "net/netip" + "testing" + + "go4.org/mem" + "tailscale.com/tailcfg" + "tailscale.com/types/dnstype" + "tailscale.com/types/ipproto" + "tailscale.com/types/key" + "tailscale.com/types/views" + "tailscale.com/util/dnsname" + "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/router" + "tailscale.com/wgengine/wgcfg" + + . "tailscale.com/util/deephash" +) + +var sink Sum + +func BenchmarkHash(b *testing.B) { + b.ReportAllocs() + v := getVal() + for range b.N { + sink = Hash(v) + } +} + +func BenchmarkAppendTo(b *testing.B) { + b.ReportAllocs() + v := getVal() + h := Hash(v) + + hashBuf := make([]byte, 0, 100) + b.ResetTimer() + for range b.N { + hashBuf = h.AppendTo(hashBuf[:0]) + } +} + +func TestDeepHash(t *testing.T) { + // v contains the types of values we care about for our current callers. + // Mostly we're just testing that we don't panic on handled types. + v := getVal() + hash1 := Hash(v) + t.Logf("hash: %v", hash1) + for range 20 { + v := getVal() + hash2 := Hash(v) + if hash1 != hash2 { + t.Error("second hash didn't match") + } + } +} + +func TestAppendTo(t *testing.T) { + v := getVal() + h := Hash(v) + sum := h.AppendTo(nil) + + if s := h.String(); s != string(sum) { + t.Errorf("hash sum mismatch; h.String()=%q h.AppendTo()=%q", s, string(sum)) + } +} + +type tailscaleTypes struct { + WGConfig *wgcfg.Config + RouterConfig *router.Config + MapFQDNAddrs map[dnsname.FQDN][]netip.Addr + MapFQDNAddrPorts map[dnsname.FQDN][]netip.AddrPort + MapDiscoPublics map[key.DiscoPublic]bool + MapResponse *tailcfg.MapResponse + FilterMatch filter.Match +} + +func getVal() *tailscaleTypes { + return &tailscaleTypes{ + &wgcfg.Config{ + Name: "foo", + Addresses: []netip.Prefix{netip.PrefixFrom(netip.AddrFrom16([16]byte{3: 3}).Unmap(), 5)}, + Peers: []wgcfg.Peer{ + { + PublicKey: key.NodePublic{}, + }, + }, + }, + &router.Config{ + Routes: []netip.Prefix{ + netip.MustParsePrefix("1.2.3.0/24"), + netip.MustParsePrefix("1234::/64"), + }, + }, + map[dnsname.FQDN][]netip.Addr{ + dnsname.FQDN("a."): {netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("4.3.2.1")}, + dnsname.FQDN("b."): {netip.MustParseAddr("8.8.8.8"), netip.MustParseAddr("9.9.9.9")}, + dnsname.FQDN("c."): {netip.MustParseAddr("6.6.6.6"), netip.MustParseAddr("7.7.7.7")}, + dnsname.FQDN("d."): {netip.MustParseAddr("6.7.6.6"), netip.MustParseAddr("7.7.7.8")}, + dnsname.FQDN("e."): {netip.MustParseAddr("6.8.6.6"), netip.MustParseAddr("7.7.7.9")}, + dnsname.FQDN("f."): {netip.MustParseAddr("6.9.6.6"), netip.MustParseAddr("7.7.7.0")}, + }, + map[dnsname.FQDN][]netip.AddrPort{ + dnsname.FQDN("a."): {netip.MustParseAddrPort("1.2.3.4:11"), netip.MustParseAddrPort("4.3.2.1:22")}, + dnsname.FQDN("b."): {netip.MustParseAddrPort("8.8.8.8:11"), netip.MustParseAddrPort("9.9.9.9:22")}, + dnsname.FQDN("c."): {netip.MustParseAddrPort("8.8.8.8:12"), netip.MustParseAddrPort("9.9.9.9:23")}, + dnsname.FQDN("d."): {netip.MustParseAddrPort("8.8.8.8:13"), netip.MustParseAddrPort("9.9.9.9:24")}, + dnsname.FQDN("e."): {netip.MustParseAddrPort("8.8.8.8:14"), netip.MustParseAddrPort("9.9.9.9:25")}, + }, + map[key.DiscoPublic]bool{ + key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 31: 0})): true, + key.DiscoPublicFromRaw32(mem.B([]byte{1: 2, 31: 0})): false, + key.DiscoPublicFromRaw32(mem.B([]byte{1: 3, 31: 0})): true, + key.DiscoPublicFromRaw32(mem.B([]byte{1: 4, 31: 0})): false, + }, + &tailcfg.MapResponse{ + DERPMap: &tailcfg.DERPMap{ + Regions: map[int]*tailcfg.DERPRegion{ + 1: { + RegionID: 1, + RegionCode: "foo", + Nodes: []*tailcfg.DERPNode{ + { + Name: "n1", + RegionID: 1, + HostName: "foo.com", + }, + { + Name: "n2", + RegionID: 1, + HostName: "bar.com", + }, + }, + }, + }, + }, + DNSConfig: &tailcfg.DNSConfig{ + Resolvers: []*dnstype.Resolver{ + {Addr: "10.0.0.1"}, + }, + }, + PacketFilter: []tailcfg.FilterRule{ + { + SrcIPs: []string{"1.2.3.4"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "1.2.3.4/32", + Ports: tailcfg.PortRange{First: 1, Last: 2}, + }, + }, + }, + }, + Peers: []*tailcfg.Node{ + { + ID: 1, + }, + { + ID: 2, + }, + }, + UserProfiles: []tailcfg.UserProfile{ + {ID: 1, LoginName: "foo@bar.com"}, + {ID: 2, LoginName: "bar@foo.com"}, + }, + }, + filter.Match{ + IPProto: views.SliceOf([]ipproto.Proto{1, 2, 3}), + }, + } +} From b1c2860485ae3f3d6d23bb26ad4ab71ca49948f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Mengu=C3=A9?= Date: Tue, 1 Apr 2025 18:00:06 +0200 Subject: [PATCH 0843/1708] client/local: add godoc links MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Olivier Mengué --- client/local/local.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 8953b8ee6..0e4d495d3 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -294,7 +294,7 @@ func (lc *Client) get200(ctx context.Context, path string) ([]byte, error) { // WhoIs returns the owner of the remoteAddr, which must be an IP or IP:port. // -// Deprecated: use Client.WhoIs. +// Deprecated: use [Client.WhoIs]. func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { return defaultClient.WhoIs(ctx, remoteAddr) } @@ -309,7 +309,7 @@ func decodeJSON[T any](b []byte) (ret T, err error) { // WhoIs returns the owner of the remoteAddr, which must be an IP or IP:port. // -// If not found, the error is ErrPeerNotFound. +// If not found, the error is [ErrPeerNotFound]. // // For connections proxied by tailscaled, this looks up the owner of the given // address as TCP first, falling back to UDP; if you want to only check a @@ -325,7 +325,8 @@ func (lc *Client) WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsR return decodeJSON[*apitype.WhoIsResponse](body) } -// ErrPeerNotFound is returned by WhoIs and WhoIsNodeKey when a peer is not found. +// ErrPeerNotFound is returned by [Client.WhoIs], [Client.WhoIsNodeKey] and +// [Client.WhoIsProto] when a peer is not found. var ErrPeerNotFound = errors.New("peer not found") // WhoIsNodeKey returns the owner of the given wireguard public key. @@ -345,7 +346,7 @@ func (lc *Client) WhoIsNodeKey(ctx context.Context, key key.NodePublic) (*apityp // WhoIsProto returns the owner of the remoteAddr, which must be an IP or // IP:port, for the given protocol (tcp or udp). // -// If not found, the error is ErrPeerNotFound. +// If not found, the error is [ErrPeerNotFound]. func (lc *Client) WhoIsProto(ctx context.Context, proto, remoteAddr string) (*apitype.WhoIsResponse, error) { body, err := lc.get200(ctx, "/localapi/v0/whois?proto="+url.QueryEscape(proto)+"&addr="+url.QueryEscape(remoteAddr)) if err != nil { @@ -490,7 +491,7 @@ func (lc *Client) BugReportWithOpts(ctx context.Context, opts BugReportOpts) (st // BugReport logs and returns a log marker that can be shared by the user with support. // -// This is the same as calling BugReportWithOpts and only specifying the Note +// This is the same as calling [Client.BugReportWithOpts] and only specifying the Note // field. func (lc *Client) BugReport(ctx context.Context, note string) (string, error) { return lc.BugReportWithOpts(ctx, BugReportOpts{Note: note}) @@ -531,7 +532,7 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } -// DebugPortmapOpts contains options for the DebugPortmap command. +// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. type DebugPortmapOpts struct { // Duration is how long the mapping should be created for. It defaults // to 5 seconds if not set. @@ -677,7 +678,7 @@ func (lc *Client) WaitingFiles(ctx context.Context) ([]apitype.WaitingFile, erro return lc.AwaitWaitingFiles(ctx, 0) } -// AwaitWaitingFiles is like WaitingFiles but takes a duration to await for an answer. +// AwaitWaitingFiles is like [Client.WaitingFiles] but takes a duration to await for an answer. // If the duration is 0, it will return immediately. The duration is respected at second // granularity only. If no files are available, it returns (nil, nil). func (lc *Client) AwaitWaitingFiles(ctx context.Context, d time.Duration) ([]apitype.WaitingFile, error) { @@ -946,7 +947,7 @@ func (lc *Client) SetDNS(ctx context.Context, name, value string) error { // The host may be a base DNS name (resolved from the netmap inside // tailscaled), a FQDN, or an IP address. // -// The ctx is only used for the duration of the call, not the lifetime of the net.Conn. +// The ctx is only used for the duration of the call, not the lifetime of the [net.Conn]. func (lc *Client) DialTCP(ctx context.Context, host string, port uint16) (net.Conn, error) { return lc.UserDial(ctx, "tcp", host, port) } @@ -957,7 +958,7 @@ func (lc *Client) DialTCP(ctx context.Context, host string, port uint16) (net.Co // a FQDN, or an IP address. // // The ctx is only used for the duration of the call, not the lifetime of the -// net.Conn. +// [net.Conn]. func (lc *Client) UserDial(ctx context.Context, network, host string, port uint16) (net.Conn, error) { connCh := make(chan net.Conn, 1) trace := httptrace.ClientTrace{ @@ -1025,7 +1026,7 @@ func (lc *Client) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) // // It returns a cached certificate from disk if it's still valid. // -// Deprecated: use Client.CertPair. +// Deprecated: use [Client.CertPair]. func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { return defaultClient.CertPair(ctx, domain) } @@ -1072,9 +1073,9 @@ func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minVa // It returns a cached certificate from disk if it's still valid. // // It's the right signature to use as the value of -// tls.Config.GetCertificate. +// [tls.Config.GetCertificate]. // -// Deprecated: use Client.GetCertificate. +// Deprecated: use [Client.GetCertificate]. func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { return defaultClient.GetCertificate(hi) } @@ -1084,7 +1085,7 @@ func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { // It returns a cached certificate from disk if it's still valid. // // It's the right signature to use as the value of -// tls.Config.GetCertificate. +// [tls.Config.GetCertificate]. // // API maturity: this is considered a stable API. func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { @@ -1113,7 +1114,7 @@ func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, err // ExpandSNIName expands bare label name into the most likely actual TLS cert name. // -// Deprecated: use Client.ExpandSNIName. +// Deprecated: use [Client.ExpandSNIName]. func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { return defaultClient.ExpandSNIName(ctx, name) } @@ -1502,7 +1503,7 @@ func (lc *Client) SwitchProfile(ctx context.Context, profile ipn.ProfileID) erro // DeleteProfile removes the profile with the given ID. // If the profile is the current profile, an empty profile -// will be selected as if SwitchToEmptyProfile was called. +// will be selected as if [Client.SwitchToEmptyProfile] was called. func (lc *Client) DeleteProfile(ctx context.Context, profile ipn.ProfileID) error { _, err := lc.send(ctx, "DELETE", "/localapi/v0/profiles/"+url.PathEscape(string(profile)), http.StatusNoContent, nil) return err @@ -1559,7 +1560,7 @@ func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error { // StreamDebugCapture streams a pcap-formatted packet capture. // // The provided context does not determine the lifetime of the -// returned io.ReadCloser. +// returned [io.ReadCloser]. func (lc *Client) StreamDebugCapture(ctx context.Context) (io.ReadCloser, error) { req, err := http.NewRequestWithContext(ctx, "POST", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-capture", nil) if err != nil { @@ -1582,7 +1583,7 @@ func (lc *Client) StreamDebugCapture(ctx context.Context) (io.ReadCloser, error) // The context is used for the life of the watch, not just the call to // WatchIPNBus. // -// The returned IPNBusWatcher's Close method must be called when done to release +// The returned [IPNBusWatcher]'s Close method must be called when done to release // resources. // // A default set of ipn.Notify messages are returned but the set can be modified by mask. @@ -1609,7 +1610,7 @@ func (lc *Client) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*IP }, nil } -// CheckUpdate returns a tailcfg.ClientVersion indicating whether or not an update is available +// CheckUpdate returns a [*tailcfg.ClientVersion] indicating whether or not an update is available // to be installed via the LocalAPI. In case the LocalAPI can't install updates, it returns a // ClientVersion that says that we are up to date. func (lc *Client) CheckUpdate(ctx context.Context) (*tailcfg.ClientVersion, error) { @@ -1685,7 +1686,7 @@ func (lc *Client) DriveShareList(ctx context.Context) ([]*drive.Share, error) { } // IPNBusWatcher is an active subscription (watch) of the local tailscaled IPN bus. -// It's returned by Client.WatchIPNBus. +// It's returned by [Client.WatchIPNBus]. // // It must be closed when done. type IPNBusWatcher struct { From 7d6d2b4c50667ee62da4ea7b4268c1faae8b95ac Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 9 May 2025 12:03:22 -0400 Subject: [PATCH 0844/1708] health, ipn/ipnlocal: add metrics for various client events (#15828) updates tailscale/corp#28092 Adds metrics for various client events: * Enabling an exit node * Enabling a mullvad exit node * Enabling a preferred exit node * Setting WantRunning to true/false * Requesting a bug report ID * Profile counts * Profile deletions * Captive portal detection Signed-off-by: Jonathan Nobels --- health/health.go | 12 +++++ ipn/ipnlocal/local.go | 22 ++++++++ ipn/ipnlocal/node_backend.go | 11 ++++ ipn/ipnlocal/prefs_metrics.go | 99 +++++++++++++++++++++++++++++++++++ ipn/ipnlocal/profiles.go | 6 ++- ipn/localapi/localapi.go | 17 +++--- 6 files changed, 158 insertions(+), 9 deletions(-) create mode 100644 ipn/ipnlocal/prefs_metrics.go diff --git a/health/health.go b/health/health.go index 65d4402ae..1ec2bcc9b 100644 --- a/health/health.go +++ b/health/health.go @@ -362,6 +362,18 @@ func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { })) } +// IsUnhealthy reports whether the current state is unhealthy because the given +// warnable is set. +func (t *Tracker) IsUnhealthy(w *Warnable) bool { + if t.nil() { + return false + } + t.mu.Lock() + defer t.mu.Unlock() + _, exists := t.warnableVal[w] + return exists +} + // SetUnhealthy sets a warningState for the given Warnable with the provided Args, and should be // called when a Warnable becomes unhealthy, or its unhealthy status needs to be updated. // SetUnhealthy takes ownership of args. The args can be nil if no additional information is diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 15f8f1c6f..91a46bbcc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -166,6 +166,8 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } +var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") + // LocalBackend is the glue between the major pieces of the Tailscale // network software: the cloud control plane (via controlclient), the // network data plane (via wgengine), and the user-facing UIs and CLIs @@ -2764,6 +2766,9 @@ func (b *LocalBackend) performCaptiveDetection() { b.mu.Unlock() found := d.Detect(ctx, netMon, dm, preferredDERP) if found { + if !b.health.IsUnhealthy(captivePortalWarnable) { + metricCaptivePortalDetected.Add(1) + } b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) } else { b.health.SetHealthy(captivePortalWarnable) @@ -4379,9 +4384,11 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock b.egg = true b.goTracker.Go(b.doSetHostinfoFilterServices) } + p0 := b.pm.CurrentPrefs() p1 := b.pm.CurrentPrefs().AsStruct() p1.ApplyEdits(mp) + if err := b.checkPrefsLocked(p1); err != nil { b.logf("EditPrefs check error: %v", err) return ipn.PrefsView{}, err @@ -4393,9 +4400,23 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock if p1.View().Equals(p0) { return stripKeysFromPrefs(p0), nil } + b.logf("EditPrefs: %v", mp.Pretty()) newPrefs := b.setPrefsLockedOnEntry(p1, unlock) + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. + // recordForEdit records metrics related to edits and changes, not the final state. + // If, in the future, we want to record gauge-metrics related to the state of prefs, + // that should be done in the setPrefs path. + e := prefsMetricsEditEvent{ + change: mp, + pNew: p1.View(), + pOld: p0, + node: b.currentNode(), + lastSuggestedExitNode: b.lastSuggestedExitNode, + } + e.record() + // Note: don't perform any actions for the new prefs here. Not // every prefs change goes through EditPrefs. Put your actions // in setPrefsLocksOnEntry instead. @@ -4467,6 +4488,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) // setExitNodeID does likewise. No-op if no exit node resolution is needed. setExitNodeID(newp, netMap) + // We do this to avoid holding the lock while doing everything else. oldHi := b.hostinfo diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 415c32ccf..fe4973723 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -172,6 +172,17 @@ func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) return n, ok } +func (nb *nodeBackend) PeerByStableID(id tailcfg.StableNodeID) (_ tailcfg.NodeView, ok bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + for _, n := range nb.peers { + if n.StableID() == id { + return n, true + } + } + return tailcfg.NodeView{}, false +} + func (nb *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { nb.mu.Lock() nm := nb.netMap diff --git a/ipn/ipnlocal/prefs_metrics.go b/ipn/ipnlocal/prefs_metrics.go new file mode 100644 index 000000000..fa768ba3c --- /dev/null +++ b/ipn/ipnlocal/prefs_metrics.go @@ -0,0 +1,99 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "errors" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/util/clientmetric" +) + +// Counter metrics for edit/change events +var ( + // metricExitNodeEnabled is incremented when the user enables an exit node independent of the node's characteristics. + metricExitNodeEnabled = clientmetric.NewCounter("prefs_exit_node_enabled") + // metricExitNodeEnabledSuggested is incremented when the user enables the suggested exit node. + metricExitNodeEnabledSuggested = clientmetric.NewCounter("prefs_exit_node_enabled_suggested") + // metricExitNodeEnabledMullvad is incremented when the user enables a Mullvad exit node. + metricExitNodeEnabledMullvad = clientmetric.NewCounter("prefs_exit_node_enabled_mullvad") + // metricWantRunningEnabled is incremented when WantRunning transitions from false to true. + metricWantRunningEnabled = clientmetric.NewCounter("prefs_want_running_enabled") + // metricWantRunningDisabled is incremented when WantRunning transitions from true to false. + metricWantRunningDisabled = clientmetric.NewCounter("prefs_want_running_disabled") +) + +type exitNodeProperty string + +const ( + exitNodeTypePreferred exitNodeProperty = "suggested" // The exit node is the last suggested exit node + exitNodeTypeMullvad exitNodeProperty = "mullvad" // The exit node is a Mullvad exit node +) + +// prefsMetricsEditEvent encapsulates information needed to record metrics related +// to any changes to preferences. +type prefsMetricsEditEvent struct { + change *ipn.MaskedPrefs // the preference mask used to update the preferences + pNew ipn.PrefsView // new preferences (after ApplyUpdates) + pOld ipn.PrefsView // old preferences (before ApplyUpdates) + node *nodeBackend // the node the event is associated with + lastSuggestedExitNode tailcfg.StableNodeID // the last suggested exit node +} + +// record records changes to preferences as clientmetrics. +func (e *prefsMetricsEditEvent) record() error { + if e.change == nil || e.node == nil { + return errors.New("prefsMetricsEditEvent: missing required fields") + } + + // Record up/down events. + if e.change.WantRunningSet && (e.pNew.WantRunning() != e.pOld.WantRunning()) { + if e.pNew.WantRunning() { + metricWantRunningEnabled.Add(1) + } else { + metricWantRunningDisabled.Add(1) + } + } + + // Record any changes to exit node settings. + if e.change.ExitNodeIDSet || e.change.ExitNodeIPSet { + if exitNodeTypes, ok := e.exitNodeType(e.pNew.ExitNodeID()); ok { + // We have switched to a valid exit node if ok is true. + metricExitNodeEnabled.Add(1) + + // We may have some additional characteristics we should also record. + for _, t := range exitNodeTypes { + switch t { + case exitNodeTypePreferred: + metricExitNodeEnabledSuggested.Add(1) + case exitNodeTypeMullvad: + metricExitNodeEnabledMullvad.Add(1) + } + } + } + } + return nil +} + +// exitNodeTypesLocked returns type of exit node for the given stable ID. +// An exit node may have multiple type (can be both mullvad and preferred +// simultaneously for example). +// +// This will return ok as true if the supplied stable ID resolves to a known peer, +// false otherwise. The caller is responsible for ensuring that the id belongs to +// an exit node. +func (e *prefsMetricsEditEvent) exitNodeType(id tailcfg.StableNodeID) (props []exitNodeProperty, isNode bool) { + var peer tailcfg.NodeView + + if peer, isNode = e.node.PeerByStableID(id); isNode { + if tailcfg.StableNodeID(id) == e.lastSuggestedExitNode { + props = append(props, exitNodeTypePreferred) + } + if peer.IsWireGuardOnly() { + props = append(props, exitNodeTypeMullvad) + } + } + return props, isNode +} diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 5c1b17038..1d312cfa6 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -705,7 +705,6 @@ var errProfileAccessDenied = errors.New("profile access denied") // This is useful for deleting the last profile. In other cases, it is // recommended to call [profileManager.SwitchProfile] first. func (pm *profileManager) DeleteProfile(id ipn.ProfileID) error { - metricDeleteProfile.Add(1) if id == pm.currentProfile.ID() { return pm.deleteCurrentProfile() } @@ -741,6 +740,7 @@ func (pm *profileManager) deleteProfileNoPermCheck(profile ipn.LoginProfileView) return err } delete(pm.knownProfiles, profile.ID()) + metricDeleteProfile.Add(1) return pm.writeKnownProfiles() } @@ -781,6 +781,7 @@ func (pm *profileManager) writeKnownProfiles() error { if err != nil { return err } + metricProfileCount.Set(int64(len(pm.knownProfiles))) return pm.WriteState(ipn.KnownProfilesStateKey, b) } @@ -893,6 +894,8 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt return nil, err } + metricProfileCount.Set(int64(len(knownProfiles))) + pm := &profileManager{ goos: goos, store: store, @@ -961,6 +964,7 @@ var ( metricSwitchProfile = clientmetric.NewCounter("profiles_switch") metricDeleteProfile = clientmetric.NewCounter("profiles_delete") metricDeleteAllProfile = clientmetric.NewCounter("profiles_delete_all") + metricProfileCount = clientmetric.NewGauge("profiles_count") metricMigration = clientmetric.NewCounter("profiles_migration") metricMigrationError = clientmetric.NewCounter("profiles_migration_error") diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 94f51d4f2..9c6c0a528 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -62,6 +62,13 @@ import ( "tailscale.com/wgengine/magicsock" ) +var ( + metricInvalidRequests = clientmetric.NewCounter("localapi_invalid_requests") + metricDebugMetricsCalls = clientmetric.NewCounter("localapi_debugmetric_requests") + metricUserMetricsCalls = clientmetric.NewCounter("localapi_usermetric_requests") + metricBugReportRequests = clientmetric.NewCounter("localapi_bugreport_requests") +) + type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) // handler is the set of LocalAPI handlers, keyed by the part of the @@ -424,6 +431,8 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { // NOTE(andrew): if we have anything else we want to do while recording // a bugreport, we can add it here. + metricBugReportRequests.Add(1) + // Read from the client; this will also return when the client closes // the connection. var buf [1]byte @@ -2623,14 +2632,6 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { } } -var ( - metricInvalidRequests = clientmetric.NewCounter("localapi_invalid_requests") - - // User-visible LocalAPI endpoints. - metricDebugMetricsCalls = clientmetric.NewCounter("localapi_debugmetric_requests") - metricUserMetricsCalls = clientmetric.NewCounter("localapi_usermetric_requests") -) - // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { From 0841477743109113b6fc1ba1052c097d2fcefee2 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 9 May 2025 11:29:36 -0700 Subject: [PATCH 0845/1708] net/udprelay{/endpoint}, all: move ServerEndpoint to independent pkg (#15934) ServerEndpoint will be used within magicsock and potentially elsewhere, which should be possible without needing to import the server implementation itself. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 1 + disco/disco.go | 16 ++--- feature/relayserver/relayserver.go | 3 +- feature/relayserver/relayserver_test.go | 6 +- net/udprelay/endpoint/endpoint.go | 55 ++++++++++++++++ net/udprelay/server.go | 87 +++++++------------------ net/udprelay/server_test.go | 9 +-- 7 files changed, 98 insertions(+), 79 deletions(-) create mode 100644 net/udprelay/endpoint/endpoint.go diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index faa1b5bd8..7e937165b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -350,6 +350,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver + tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/disco/disco.go b/disco/disco.go index 1219a604d..0854eb4c0 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -406,8 +406,8 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi // involving [BindUDPRelayEndpoint], [BindUDPRelayEndpointChallenge], and // [BindUDPRelayEndpointAnswer]. // -// CallMeMaybeVia mirrors [tailscale.com/net/udprelay.ServerEndpoint], which -// contains field documentation. +// CallMeMaybeVia mirrors [tailscale.com/net/udprelay/endpoint.ServerEndpoint], +// which contains field documentation. // // The recipient may choose to not open a path back if it's already happy with // its path. Direct connections, e.g. [CallMeMaybe]-signaled, take priority over @@ -416,17 +416,17 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi // This message type is currently considered experimental and is not yet tied to // a [tailscale.com/tailcfg.CapabilityVersion]. type CallMeMaybeVia struct { - // ServerDisco is [tailscale.com/net/udprelay.ServerEndpoint.ServerDisco] + // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] ServerDisco key.DiscoPublic - // LamportID is [tailscale.com/net/udprelay.ServerEndpoint.LamportID] + // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] LamportID uint64 - // VNI is [tailscale.com/net/udprelay.ServerEndpoint.VNI] + // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] VNI uint32 - // BindLifetime is [tailscale.com/net/udprelay.ServerEndpoint.BindLifetime] + // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] BindLifetime time.Duration - // SteadyStateLifetime is [tailscale.com/net/udprelay.ServerEndpoint.SteadyStateLifetime] + // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] SteadyStateLifetime time.Duration - // AddrPorts is [tailscale.com/net/udprelay.ServerEndpoint.AddrPorts] + // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] AddrPorts []netip.AddrPort } diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 846e21a7d..96d21138e 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -19,6 +19,7 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnlocal" "tailscale.com/net/udprelay" + "tailscale.com/net/udprelay/endpoint" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -57,7 +58,7 @@ type extension struct { // relayServer is the interface of [udprelay.Server]. type relayServer interface { - AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (udprelay.ServerEndpoint, error) + AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) Close() error } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index af4d11df0..cc7f05f67 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -8,7 +8,7 @@ import ( "testing" "tailscale.com/ipn" - "tailscale.com/net/udprelay" + "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" "tailscale.com/types/ptr" ) @@ -17,8 +17,8 @@ type fakeRelayServer struct{} func (f *fakeRelayServer) Close() error { return nil } -func (f *fakeRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (udprelay.ServerEndpoint, error) { - return udprelay.ServerEndpoint{}, errors.New("fake relay server") +func (f *fakeRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { + return endpoint.ServerEndpoint{}, errors.New("fake relay server") } func Test_extension_profileStateChanged(t *testing.T) { diff --git a/net/udprelay/endpoint/endpoint.go b/net/udprelay/endpoint/endpoint.go new file mode 100644 index 000000000..2672a856b --- /dev/null +++ b/net/udprelay/endpoint/endpoint.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package endpoint contains types relating to UDP relay server endpoints. It +// does not import tailscale.com/net/udprelay. +package endpoint + +import ( + "net/netip" + + "tailscale.com/tstime" + "tailscale.com/types/key" +) + +// ServerEndpoint contains details for an endpoint served by a +// [tailscale.com/net/udprelay.Server]. +type ServerEndpoint struct { + // ServerDisco is the Server's Disco public key used as part of the 3-way + // bind handshake. Server will use the same ServerDisco for its lifetime. + // ServerDisco value in combination with LamportID value represents a + // unique ServerEndpoint allocation. + ServerDisco key.DiscoPublic + + // LamportID is unique and monotonically non-decreasing across + // ServerEndpoint allocations for the lifetime of Server. It enables clients + // to dedup and resolve allocation event order. Clients may race to allocate + // on the same Server, and signal ServerEndpoint details via alternative + // channels, e.g. DERP. Additionally, Server.AllocateEndpoint() requests may + // not result in a new allocation depending on existing server-side endpoint + // state. Therefore, where clients have local, existing state that contains + // ServerDisco and LamportID values matching a newly learned endpoint, these + // can be considered one and the same. If ServerDisco is equal, but + // LamportID is unequal, LamportID comparison determines which + // ServerEndpoint was allocated most recently. + LamportID uint64 + + // AddrPorts are the IP:Port candidate pairs the Server may be reachable + // over. + AddrPorts []netip.AddrPort + + // VNI (Virtual Network Identifier) is the Geneve header VNI the Server + // will use for transmitted packets, and expects for received packets + // associated with this endpoint. + VNI uint32 + + // BindLifetime is amount of time post-allocation the Server will consider + // the endpoint active while it has yet to be bound via 3-way bind handshake + // from both client parties. + BindLifetime tstime.GoDuration + + // SteadyStateLifetime is the amount of time post 3-way bind handshake from + // both client parties the Server will consider the endpoint active lacking + // bidirectional data flow. + SteadyStateLifetime tstime.GoDuration +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 5580b6e65..7b63ec95e 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -21,20 +21,21 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/net/packet" + "tailscale.com/net/udprelay/endpoint" "tailscale.com/tstime" "tailscale.com/types/key" ) const ( // defaultBindLifetime is somewhat arbitrary. We attempt to account for - // high latency between client and Server, and high latency between - // clients over side channels, e.g. DERP, used to exchange ServerEndpoint - // details. So, a total of 3 paths with potentially high latency. Using a - // conservative 10s "high latency" bounds for each path we end up at a 30s - // total. It is worse to set an aggressive bind lifetime as this may lead - // to path discovery failure, vs dealing with a slight increase of Server - // resource utilization (VNIs, RAM, etc) while tracking endpoints that won't - // bind. + // high latency between client and [Server], and high latency between + // clients over side channels, e.g. DERP, used to exchange + // [endpoint.ServerEndpoint] details. So, a total of 3 paths with + // potentially high latency. Using a conservative 10s "high latency" bounds + // for each path we end up at a 30s total. It is worse to set an aggressive + // bind lifetime as this may lead to path discovery failure, vs dealing with + // a slight increase of [Server] resource utilization (VNIs, RAM, etc) while + // tracking endpoints that won't bind. defaultBindLifetime = time.Second * 30 defaultSteadyStateLifetime = time.Minute * 5 ) @@ -82,49 +83,8 @@ func newPairOfDiscoPubKeys(discoA, discoB key.DiscoPublic) pairOfDiscoPubKeys { return pair } -// ServerEndpoint contains the Server's endpoint details. -type ServerEndpoint struct { - // ServerDisco is the Server's Disco public key used as part of the 3-way - // bind handshake. Server will use the same ServerDisco for its lifetime. - // ServerDisco value in combination with LamportID value represents a - // unique ServerEndpoint allocation. - ServerDisco key.DiscoPublic - - // LamportID is unique and monotonically non-decreasing across - // ServerEndpoint allocations for the lifetime of Server. It enables clients - // to dedup and resolve allocation event order. Clients may race to allocate - // on the same Server, and signal ServerEndpoint details via alternative - // channels, e.g. DERP. Additionally, Server.AllocateEndpoint() requests may - // not result in a new allocation depending on existing server-side endpoint - // state. Therefore, where clients have local, existing state that contains - // ServerDisco and LamportID values matching a newly learned endpoint, these - // can be considered one and the same. If ServerDisco is equal, but - // LamportID is unequal, LamportID comparison determines which - // ServerEndpoint was allocated most recently. - LamportID uint64 - - // AddrPorts are the IP:Port candidate pairs the Server may be reachable - // over. - AddrPorts []netip.AddrPort - - // VNI (Virtual Network Identifier) is the Geneve header VNI the Server - // will use for transmitted packets, and expects for received packets - // associated with this endpoint. - VNI uint32 - - // BindLifetime is amount of time post-allocation the Server will consider - // the endpoint active while it has yet to be bound via 3-way bind handshake - // from both client parties. - BindLifetime tstime.GoDuration - - // SteadyStateLifetime is the amount of time post 3-way bind handshake from - // both client parties the Server will consider the endpoint active lacking - // bidirectional data flow. - SteadyStateLifetime tstime.GoDuration -} - -// serverEndpoint contains Server-internal ServerEndpoint state. serverEndpoint -// methods are not thread-safe. +// serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. +// serverEndpoint methods are not thread-safe. type serverEndpoint struct { // discoPubKeys contains the key.DiscoPublic of the served clients. The // indexing of this array aligns with the following fields, e.g. @@ -308,10 +268,11 @@ func (e *serverEndpoint) isBound() bool { e.handshakeState[1] == disco.BindUDPRelayHandshakeStateAnswerReceived } -// NewServer constructs a Server listening on 0.0.0.0:'port'. IPv6 is not yet +// NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet // supported. Port may be 0, and what ultimately gets bound is returned as // 'boundPort'. Supplied 'addrs' are joined with 'boundPort' and returned as -// ServerEndpoint.AddrPorts in response to Server.AllocateEndpoint() requests. +// [endpoint.ServerEndpoint.AddrPorts] in response to Server.AllocateEndpoint() +// requests. // // TODO: IPv6 support // TODO: dynamic addrs:port discovery @@ -454,30 +415,30 @@ func (s *Server) packetReadLoop() { var ErrServerClosed = errors.New("server closed") -// AllocateEndpoint allocates a [ServerEndpoint] for the provided pair of -// [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB it -// is returned without modification/reallocation. AllocateEndpoint returns +// AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair +// of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB +// it is returned without modification/reallocation. AllocateEndpoint returns // [ErrServerClosed] if the server has been closed. -func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoint, error) { +func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) { s.mu.Lock() defer s.mu.Unlock() if s.closed { - return ServerEndpoint{}, ErrServerClosed + return endpoint.ServerEndpoint{}, ErrServerClosed } if discoA.Compare(s.discoPublic) == 0 || discoB.Compare(s.discoPublic) == 0 { - return ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) + return endpoint.ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) } pair := newPairOfDiscoPubKeys(discoA, discoB) e, ok := s.byDisco[pair] if ok { // Return the existing allocation. Clients can resolve duplicate - // [ServerEndpoint]'s via [ServerEndpoint.LamportID]. + // [endpoint.ServerEndpoint]'s via [endpoint.ServerEndpoint.LamportID]. // // TODO: consider ServerEndpoint.BindLifetime -= time.Now()-e.allocatedAt // to give the client a more accurate picture of the bind window. - return ServerEndpoint{ + return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, AddrPorts: s.addrPorts, VNI: e.vni, @@ -488,7 +449,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoin } if len(s.vniPool) == 0 { - return ServerEndpoint{}, errors.New("VNI pool exhausted") + return endpoint.ServerEndpoint{}, errors.New("VNI pool exhausted") } s.lamportID++ @@ -506,7 +467,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (ServerEndpoin s.byDisco[pair] = e s.byVNI[e.vni] = e - return ServerEndpoint{ + return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, AddrPorts: s.addrPorts, VNI: e.vni, diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index c699e5d15..9d1e77fcc 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -17,6 +17,7 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/net/packet" + "tailscale.com/net/udprelay/endpoint" "tailscale.com/tstime" "tailscale.com/types/key" ) @@ -259,7 +260,7 @@ func TestServerEndpointJSONUnmarshal(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var out ServerEndpoint + var out endpoint.ServerEndpoint err := json.Unmarshal(tt.json, &out) if tt.wantErr != (err != nil) { t.Fatalf("wantErr: %v (err == nil): %v", tt.wantErr, err == nil) @@ -274,11 +275,11 @@ func TestServerEndpointJSONUnmarshal(t *testing.T) { func TestServerEndpointJSONMarshal(t *testing.T) { tests := []struct { name string - serverEndpoint ServerEndpoint + serverEndpoint endpoint.ServerEndpoint }{ { name: "valid roundtrip", - serverEndpoint: ServerEndpoint{ + serverEndpoint: endpoint.ServerEndpoint{ ServerDisco: key.NewDisco().Public(), LamportID: uint64(math.MaxUint64), AddrPorts: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:1"), netip.MustParseAddrPort("127.0.0.2:2")}, @@ -295,7 +296,7 @@ func TestServerEndpointJSONMarshal(t *testing.T) { if err != nil { t.Fatal(err) } - var got ServerEndpoint + var got endpoint.ServerEndpoint err = json.Unmarshal(b, &got) if err != nil { t.Fatal(err) From 3c98964065c8079382cd0803a889fcce76063b24 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 9 May 2025 12:55:57 -0700 Subject: [PATCH 0846/1708] ssh/tailssh: chdir to user's homedir when directly running a command (#15351) Commit 4b525fdda (ssh/tailssh: only chdir incubator process to user's homedir when necessary and possible, 2024-08-16) defers changing the working directory until the incubator process drops its privileges. However, it didn't account for the case where there is no incubator process, because no tailscaled was found on the PATH. In that case, it only intended to run `tailscaled be-child` in the root directory but accidentally ran everything there. Fixes: #15350 Signed-off-by: Simon Law --- ssh/tailssh/incubator.go | 68 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 5 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 4f630186d..442fedcf2 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -12,11 +12,13 @@ package tailssh import ( + "context" "encoding/json" "errors" "flag" "fmt" "io" + "io/fs" "log" "log/syslog" "os" @@ -29,6 +31,7 @@ import ( "strings" "sync/atomic" "syscall" + "time" "github.com/creack/pty" "github.com/pkg/sftp" @@ -70,11 +73,36 @@ var maybeStartLoginSession = func(dlogf logger.Logf, ia incubatorArgs) (close fu return nil } +// tryExecInDir tries to run a command in dir and returns nil if it succeeds. +// Otherwise, it returns a filesystem error or a timeout error if the command +// took too long. +func tryExecInDir(ctx context.Context, dir string) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Assume that the following executables exist, are executable, and + // immediately return. + var name string + switch runtime.GOOS { + case "windows": + windir := os.Getenv("windir") + name = filepath.Join(windir, "system32", "doskey.exe") + default: + name = "/bin/true" + } + + cmd := exec.CommandContext(ctx, name) + cmd.Dir = dir + return cmd.Run() +} + // newIncubatorCommand returns a new exec.Cmd configured with // `tailscaled be-child ssh` as the entrypoint. // -// If ss.srv.tailscaledPath is empty, this method is equivalent to -// exec.CommandContext. +// If ss.srv.tailscaledPath is empty, this method is almost equivalent to +// exec.CommandContext. It will refuse to run in SFTP-mode. It will simulate the +// behavior of SSHD when by falling back to the root directory if it cannot run +// a command in the user’s home directory. // // The returned Cmd.Env is guaranteed to be nil; the caller populates it. func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err error) { @@ -104,7 +132,35 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err loginShell := ss.conn.localUser.LoginShell() args := shellArgs(isShell, ss.RawCommand()) logf("directly running %s %q", loginShell, args) - return exec.CommandContext(ss.ctx, loginShell, args...), nil + cmd = exec.CommandContext(ss.ctx, loginShell, args...) + + // While running directly instead of using `tailscaled be-child`, + // do what sshd does by running inside the home directory, + // falling back to the root directory it doesn't have permissions. + // This can happen if the system has networked home directories, + // i.e. NFS or SMB, which enable root-squashing by default. + cmd.Dir = ss.conn.localUser.HomeDir + err := tryExecInDir(ss.ctx, cmd.Dir) + switch { + case errors.Is(err, exec.ErrNotFound): + // /bin/true might not be installed on a barebones system, + // so we assume that the home directory does not exist. + cmd.Dir = "/" + case errors.Is(err, fs.ErrPermission) || errors.Is(err, fs.ErrNotExist): + // Ensure that cmd.Dir is the source of the error. + var pathErr *fs.PathError + if errors.As(err, &pathErr) && pathErr.Path == cmd.Dir { + // If we cannot run loginShell in localUser.HomeDir, + // we will try to run this command in the root directory. + cmd.Dir = "/" + } else { + return nil, err + } + case err != nil: + return nil, err + } + + return cmd, nil } lu := ss.conn.localUser @@ -178,7 +234,10 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err } } - return exec.CommandContext(ss.ctx, ss.conn.srv.tailscaledPath, incubatorArgs...), nil + cmd = exec.CommandContext(ss.ctx, ss.conn.srv.tailscaledPath, incubatorArgs...) + // The incubator will chdir into the home directory after it drops privileges. + cmd.Dir = "/" + return cmd, nil } var debugIncubator bool @@ -777,7 +836,6 @@ func (ss *sshSession) launchProcess() error { } cmd := ss.cmd - cmd.Dir = "/" cmd.Env = envForUser(ss.conn.localUser) for _, kv := range ss.Environ() { if acceptEnvPair(kv) { From 3177e50b1402052bca4fd2cfb69279bd82380f73 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 9 May 2025 13:44:36 -0700 Subject: [PATCH 0847/1708] safeweb: Set Cross-Origin-Opener-Policy for browser requests (#15936) Set Cross-Origin-Opener-Policy: same-origin for all browser requests to prevent window.location manipulation by malicious origins. Updates tailscale/corp#28480 Thank you to Triet H.M. Pham for the report. Signed-off-by: Patrick O'Doherty --- safeweb/http.go | 1 + 1 file changed, 1 insertion(+) diff --git a/safeweb/http.go b/safeweb/http.go index 143c4dcee..d085fcb88 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -376,6 +376,7 @@ func (s *Server) serveBrowser(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Security-Policy", s.csp) w.Header().Set("X-Content-Type-Options", "nosniff") w.Header().Set("Referer-Policy", "same-origin") + w.Header().Set("Cross-Origin-Opener-Policy", "same-origin") if s.SecureContext { w.Header().Set("Strict-Transport-Security", cmp.Or(s.StrictTransportSecurityOptions, DefaultStrictTransportSecurityOptions)) } From a9be049c19bf1d91eeea418a695af71552f37a1c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 9 May 2025 10:25:28 -0500 Subject: [PATCH 0848/1708] ipn/ipnlocal,net/dns/resolver: use the user dialer and routes for DNS forwarding by default, except on iOS and Android In this PR, we make the "user-dial-routes" behavior default on all platforms except for iOS and Android. It can be disabled by setting the TS_DNS_FORWARD_USE_ROUTES envknob to 0 or false. Updates #12027 Updates #13837 Signed-off-by: Nick Khyl --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- ipn/ipnlocal/local.go | 4 ++-- net/dns/resolver/forwarder.go | 42 ++++++++++++++++++++++++++--------- tsnet/depaware.txt | 2 +- 5 files changed, 36 insertions(+), 16 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2ed36c3dc..9e6f24419 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -840,7 +840,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ - tailscale.com/net/dns/resolver from tailscale.com/net/dns + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ tailscale.com/net/flowtrack from tailscale.com/net/packet+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7e937165b..823d639c9 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -318,7 +318,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ - tailscale.com/net/dns/resolver from tailscale.com/net/dns + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ tailscale.com/net/flowtrack from tailscale.com/net/packet+ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 91a46bbcc..e8ff05b37 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -64,6 +64,7 @@ import ( "tailscale.com/logpolicy" "tailscale.com/net/captivedetection" "tailscale.com/net/dns" + "tailscale.com/net/dns/resolver" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/ipset" @@ -4908,7 +4909,6 @@ func (b *LocalBackend) authReconfig() { nm := cn.NetMap() hasPAC := b.prevIfState.HasPAC() disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) - userDialUseRoutes := cn.SelfHasCap(tailcfg.NodeAttrUserDialUseRoutes) dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) // If the current node is an app connector, ensure the app connector machine is started @@ -4969,7 +4969,7 @@ func (b *LocalBackend) authReconfig() { } b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) - if userDialUseRoutes { + if resolver.ShouldUseRoutes(b.ControlKnobs()) { b.dialer.SetRoutes(rcfg.Routes, rcfg.LocalRoutes) } else { b.dialer.SetRoutes(nil, nil) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c7b9439e6..f12876905 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -17,6 +17,7 @@ import ( "net/http" "net/netip" "net/url" + "runtime" "sort" "strings" "sync" @@ -740,18 +741,37 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn return out, nil } +var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DNS_FORWARD_USE_ROUTES") + +// ShouldUseRoutes reports true if the DNS resolver should use the peer or system dialer +// for forwarding DNS queries to upstream nameservers via TCP, based on the destination +// address and configured routes. Currently, this requires maintaining a [bart.Table], +// resulting in a slightly higher memory usage. +// +// It reports false if the system dialer should always be used, regardless of the +// destination address. +// +// TODO(nickkhyl): Update [tsdial.Dialer] to reuse the bart.Table we create in net/tstun.Wrapper +// to avoid having two bart tables in memory, especially on iOS. Once that's done, +// we can get rid of the nodeAttr/control knob and always use UserDial for DNS. +// +// See https://github.com/tailscale/tailscale/issues/12027. +func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { + switch runtime.GOOS { + case "android", "ios": + // On mobile platforms with lower memory limits (e.g., 50MB on iOS), + // this behavior is still gated by the "user-dial-routes" nodeAttr. + return knobs != nil && knobs.UserDialUseRoutes.Load() + default: + // On all other platforms, it is the default behavior, + // but it can be overridden with the "TS_DNS_FORWARD_USE_ROUTES" env var. + doNotUseRoutes := optDNSForwardUseRoutes().EqualBool(false) + return !doNotUseRoutes + } +} + func (f *forwarder) getDialerType() netx.DialFunc { - if f.controlKnobs != nil && f.controlKnobs.UserDialUseRoutes.Load() { - // It is safe to use UserDial as it dials external servers without going through Tailscale - // and closes connections on interface change in the same way as SystemDial does, - // thus preventing DNS resolution issues when switching between WiFi and cellular, - // but can also dial an internal DNS server on the Tailnet or via a subnet router. - // - // TODO(nickkhyl): Update tsdial.Dialer to reuse the bart.Table we create in net/tstun.Wrapper - // to avoid having two bart tables in memory, especially on iOS. Once that's done, - // we can get rid of the nodeAttr/control knob and always use UserDial for DNS. - // - // See https://github.com/tailscale/tailscale/issues/12027. + if ShouldUseRoutes(f.controlKnobs) { return f.dialer.UserDial } return f.dialer.SystemDial diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f9e58a71c..4c9c6831e 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -271,7 +271,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ - tailscale.com/net/dns/resolver from tailscale.com/net/dns + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ tailscale.com/net/flowtrack from tailscale.com/net/packet+ From cffb80956909ba1ca2f3a37bd75f3f4452b6c6db Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 9 May 2025 10:29:02 -0500 Subject: [PATCH 0849/1708] net/tsdial: update (*Dialer).SetRoutes() to log the size of the resulting bart.Table Updates #12027 Signed-off-by: Nick Khyl --- net/dns/resolver/forwarder.go | 16 +++++++--------- net/tsdial/tsdial.go | 1 + 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index f12876905..321401a84 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -741,21 +741,19 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn return out, nil } -var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DNS_FORWARD_USE_ROUTES") +var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DEBUG_DNS_FORWARD_USE_ROUTES") -// ShouldUseRoutes reports true if the DNS resolver should use the peer or system dialer -// for forwarding DNS queries to upstream nameservers via TCP, based on the destination -// address and configured routes. Currently, this requires maintaining a [bart.Table], -// resulting in a slightly higher memory usage. +// ShouldUseRoutes reports whether the DNS resolver should consider routes when dialing +// upstream nameservers via TCP. // -// It reports false if the system dialer should always be used, regardless of the -// destination address. +// If true, routes should be considered ([tsdial.Dialer.UserDial]), otherwise defer +// to the system routes ([tsdial.Dialer.SystemDial]). // // TODO(nickkhyl): Update [tsdial.Dialer] to reuse the bart.Table we create in net/tstun.Wrapper // to avoid having two bart tables in memory, especially on iOS. Once that's done, // we can get rid of the nodeAttr/control knob and always use UserDial for DNS. // -// See https://github.com/tailscale/tailscale/issues/12027. +// See tailscale/tailscale#12027. func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { switch runtime.GOOS { case "android", "ios": @@ -764,7 +762,7 @@ func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { return knobs != nil && knobs.UserDialUseRoutes.Load() default: // On all other platforms, it is the default behavior, - // but it can be overridden with the "TS_DNS_FORWARD_USE_ROUTES" env var. + // but it can be overridden with the "TS_DEBUG_DNS_FORWARD_USE_ROUTES" env var. doNotUseRoutes := optDNSForwardUseRoutes().EqualBool(false) return !doNotUseRoutes } diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 1188a3077..2492f666c 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -151,6 +151,7 @@ func (d *Dialer) SetRoutes(routes, localRoutes []netip.Prefix) { for _, r := range localRoutes { rt.Insert(r, false) } + d.logf("tsdial: bart table size: %d", rt.Size()) } d.routes.Store(rt) From 0f4f808e70c6bbc6bf6b6d0d00b3bd14c036974d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 9 May 2025 14:50:01 -0700 Subject: [PATCH 0850/1708] wgengine/magicsock: re-shape relayManager to use an event loop (#15935) The event loop removes the need for growing locking complexities and synchronization. Now we simply use channels. The event loop only runs while there is active work to do. relayManager remains no-op inside magicsock for the time being. endpoints are never 'relayCapable' and therefore endpoint & Conn will not feed CallMeMaybeVia or allocation events into it. A number of relayManager events remain unimplemented, e.g. CallMeMaybeVia reception and relay handshaking. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 1 + tsnet/depaware.txt | 1 + wgengine/magicsock/endpoint.go | 9 + wgengine/magicsock/magicsock.go | 7 + wgengine/magicsock/relaymanager.go | 275 ++++++++++++++++++++++-- wgengine/magicsock/relaymanager_test.go | 29 +++ 6 files changed, 302 insertions(+), 20 deletions(-) create mode 100644 wgengine/magicsock/relaymanager_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 9e6f24419..4e2215aec 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -872,6 +872,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4c9c6831e..f5cd1232d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -303,6 +303,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/tsdial from tailscale.com/control/controlclient+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 5f4f0bd8c..f88dab29d 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -95,6 +95,7 @@ type endpoint struct { expired bool // whether the node has expired isWireguardOnly bool // whether the endpoint is WireGuard only + relayCapable bool // whether the node is capable of speaking via a [tailscale.com/net/udprelay.Server] } func (de *endpoint) setBestAddrLocked(v addrQuality) { @@ -1249,6 +1250,13 @@ func (de *endpoint) sendDiscoPingsLocked(now mono.Time, sendCallMeMaybe bool) { // sent so our firewall ports are probably open and now // would be a good time for them to connect. go de.c.enqueueCallMeMaybe(derpAddr, de) + + // Schedule allocation of relay endpoints. We make no considerations for + // current relay endpoints or best UDP path state for now, keep it + // simple. + if de.relayCapable { + go de.c.relayManager.allocateAndHandshakeAllServers(de) + } } } @@ -1863,6 +1871,7 @@ func (de *endpoint) resetLocked() { } } de.probeUDPLifetime.resetCycleEndpointLocked() + de.c.relayManager.cancelOutstandingWork(de) } func (de *endpoint) numStopAndReset() int64 { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 7df46f76c..cf3ef2352 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1939,6 +1939,13 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } + ep.mu.Lock() + relayCapable := ep.relayCapable + ep.mu.Unlock() + if isVia && !relayCapable { + c.logf("magicsock: disco: ignoring %s from %v; %v is not known to be relay capable", msgType, sender.ShortString(), sender.ShortString()) + return + } epDisco := ep.disco.Load() if epDisco == nil { return diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index bf737b078..b1732ff41 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -4,48 +4,283 @@ package magicsock import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" "net/netip" "sync" + "time" "tailscale.com/disco" + udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" + "tailscale.com/util/httpm" + "tailscale.com/util/set" ) // relayManager manages allocation and handshaking of // [tailscale.com/net/udprelay.Server] endpoints. The zero value is ready for // use. type relayManager struct { - mu sync.Mutex // guards the following fields + initOnce sync.Once + + // =================================================================== + // The following fields are owned by a single goroutine, runLoop(). + serversByAddrPort set.Set[netip.AddrPort] + allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork + + // =================================================================== + // The following chan fields serve event inputs to a single goroutine, + // runLoop(). + allocateHandshakeCh chan *endpoint + allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent + cancelWorkCh chan *endpoint + newServerEndpointCh chan newRelayServerEndpointEvent + rxChallengeCh chan relayHandshakeChallengeEvent + rxCallMeMaybeViaCh chan *disco.CallMeMaybeVia + + discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*discoInfo + + // runLoopStoppedCh is written to by runLoop() upon return, enabling event + // writers to restart it when they are blocked (see + // relayManagerInputEvent()). + runLoopStoppedCh chan struct{} } -func (h *relayManager) initLocked() { - if h.discoInfoByServerDisco != nil { - return +type newRelayServerEndpointEvent struct { + ep *endpoint + se udprelay.ServerEndpoint +} + +type relayEndpointAllocWorkDoneEvent struct { + ep *endpoint + work *relayEndpointAllocWork +} + +// activeWork returns true if there is outstanding allocation or handshaking +// work, otherwise it returns false. +func (r *relayManager) activeWork() bool { + return len(r.allocWorkByEndpoint) > 0 + // TODO(jwhited): consider handshaking work +} + +// runLoop is a form of event loop. It ensures exclusive access to most of +// [relayManager] state. +func (r *relayManager) runLoop() { + defer func() { + r.runLoopStoppedCh <- struct{}{} + }() + + for { + select { + case ep := <-r.allocateHandshakeCh: + r.cancelAndClearWork(ep) + r.allocateAllServersForEndpoint(ep) + if !r.activeWork() { + return + } + case msg := <-r.allocateWorkDoneCh: + work, ok := r.allocWorkByEndpoint[msg.ep] + if ok && work == msg.work { + // Verify the work in the map is the same as the one that we're + // cleaning up. New events on r.allocateHandshakeCh can + // overwrite pre-existing keys. + delete(r.allocWorkByEndpoint, msg.ep) + } + if !r.activeWork() { + return + } + case ep := <-r.cancelWorkCh: + r.cancelAndClearWork(ep) + if !r.activeWork() { + return + } + case newEndpoint := <-r.newServerEndpointCh: + _ = newEndpoint + // TODO(jwhited): implement + if !r.activeWork() { + return + } + case challenge := <-r.rxChallengeCh: + _ = challenge + // TODO(jwhited): implement + if !r.activeWork() { + return + } + case via := <-r.rxCallMeMaybeViaCh: + _ = via + // TODO(jwhited): implement + if !r.activeWork() { + return + } + } } - h.discoInfoByServerDisco = make(map[key.DiscoPublic]*discoInfo) +} + +type relayHandshakeChallengeEvent struct { + challenge [32]byte + disco key.DiscoPublic + from netip.AddrPort + vni uint32 + at time.Time +} + +// relayEndpointAllocWork serves to track in-progress relay endpoint allocation +// for an [*endpoint]. This structure is immutable once initialized. +type relayEndpointAllocWork struct { + // ep is the [*endpoint] associated with the work + ep *endpoint + // cancel() will signal all associated goroutines to return + cancel context.CancelFunc + // wg.Wait() will return once all associated goroutines have returned + wg *sync.WaitGroup +} + +// init initializes [relayManager] if it is not already initialized. +func (r *relayManager) init() { + r.initOnce.Do(func() { + r.discoInfoByServerDisco = make(map[key.DiscoPublic]*discoInfo) + r.allocWorkByEndpoint = make(map[*endpoint]*relayEndpointAllocWork) + r.allocateHandshakeCh = make(chan *endpoint) + r.allocateWorkDoneCh = make(chan relayEndpointAllocWorkDoneEvent) + r.cancelWorkCh = make(chan *endpoint) + r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) + r.rxChallengeCh = make(chan relayHandshakeChallengeEvent) + r.rxCallMeMaybeViaCh = make(chan *disco.CallMeMaybeVia) + r.runLoopStoppedCh = make(chan struct{}, 1) + go r.runLoop() + }) } // discoInfo returns a [*discoInfo] for 'serverDisco' if there is an // active/ongoing handshake with it, otherwise it returns nil, false. -func (h *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok bool) { - h.mu.Lock() - defer h.mu.Unlock() - h.initLocked() - di, ok := h.discoInfoByServerDisco[serverDisco] +func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok bool) { + r.discoInfoMu.Lock() + defer r.discoInfoMu.Unlock() + di, ok := r.discoInfoByServerDisco[serverDisco] return di, ok } -func (h *relayManager) handleCallMeMaybeVia(dm *disco.CallMeMaybeVia) { - h.mu.Lock() - defer h.mu.Unlock() - h.initLocked() - // TODO(jwhited): implement +func (r *relayManager) handleCallMeMaybeVia(dm *disco.CallMeMaybeVia) { + relayManagerInputEvent(r, nil, &r.rxCallMeMaybeViaCh, dm) +} + +func (r *relayManager) handleBindUDPRelayEndpointChallenge(dm *disco.BindUDPRelayEndpointChallenge, di *discoInfo, src netip.AddrPort, vni uint32) { + relayManagerInputEvent(r, nil, &r.rxChallengeCh, relayHandshakeChallengeEvent{challenge: dm.Challenge, disco: di.discoKey, from: src, vni: vni, at: time.Now()}) +} + +// relayManagerInputEvent initializes [relayManager] if necessary, starts +// relayManager.runLoop() if it is not running, and writes 'event' on 'eventCh'. +// +// [relayManager] initialization will make `*eventCh`, so it must be passed as +// a pointer to a channel. +// +// 'ctx' can be used for returning when runLoop is waiting for the caller to +// return, i.e. the calling goroutine was birthed by runLoop and is cancelable +// via 'ctx'. 'ctx' may be nil. +func relayManagerInputEvent[T any](r *relayManager, ctx context.Context, eventCh *chan T, event T) { + r.init() + var ctxDoneCh <-chan struct{} + if ctx != nil { + ctxDoneCh = ctx.Done() + } + for { + select { + case <-ctxDoneCh: + return + case *eventCh <- event: + return + case <-r.runLoopStoppedCh: + go r.runLoop() + } + } +} + +// allocateAndHandshakeAllServers kicks off allocation and handshaking of relay +// endpoints for 'ep' on all known relay servers, canceling any existing +// in-progress work. +func (r *relayManager) allocateAndHandshakeAllServers(ep *endpoint) { + relayManagerInputEvent(r, nil, &r.allocateHandshakeCh, ep) +} + +// cancelOutstandingWork cancels all outstanding allocation & handshaking work +// for 'ep'. +func (r *relayManager) cancelOutstandingWork(ep *endpoint) { + relayManagerInputEvent(r, nil, &r.cancelWorkCh, ep) } -func (h *relayManager) handleBindUDPRelayEndpointChallenge(dm *disco.BindUDPRelayEndpointChallenge, di *discoInfo, src netip.AddrPort, vni uint32) { - h.mu.Lock() - defer h.mu.Unlock() - h.initLocked() - // TODO(jwhited): implement +// cancelAndClearWork cancels & clears any outstanding work for 'ep'. +func (r *relayManager) cancelAndClearWork(ep *endpoint) { + allocWork, ok := r.allocWorkByEndpoint[ep] + if ok { + allocWork.cancel() + allocWork.wg.Wait() + delete(r.allocWorkByEndpoint, ep) + } + // TODO(jwhited): cancel & clear handshake work +} + +func (r *relayManager) allocateAllServersForEndpoint(ep *endpoint) { + if len(r.serversByAddrPort) == 0 { + return + } + ctx, cancel := context.WithCancel(context.Background()) + started := &relayEndpointAllocWork{ep: ep, cancel: cancel, wg: &sync.WaitGroup{}} + for k := range r.serversByAddrPort { + started.wg.Add(1) + go r.allocateEndpoint(ctx, started.wg, k, ep) + } + r.allocWorkByEndpoint[ep] = started + go func() { + started.wg.Wait() + started.cancel() + relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{ep: ep, work: started}) + }() +} + +func (r *relayManager) allocateEndpoint(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { + // TODO(jwhited): introduce client metrics counters for notable failures + defer wg.Done() + var b bytes.Buffer + remoteDisco := ep.disco.Load() + if remoteDisco == nil { + return + } + type allocateRelayEndpointReq struct { + DiscoKeys []key.DiscoPublic + } + a := &allocateRelayEndpointReq{ + DiscoKeys: []key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}, + } + err := json.NewEncoder(&b).Encode(a) + if err != nil { + return + } + const reqTimeout = time.Second * 10 + reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) + defer cancel() + req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/relay/endpoint", &b) + if err != nil { + return + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return + } + var se udprelay.ServerEndpoint + err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) + if err != nil { + return + } + relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ + ep: ep, + se: se, + }) } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go new file mode 100644 index 000000000..579dceb53 --- /dev/null +++ b/wgengine/magicsock/relaymanager_test.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "net/netip" + "testing" + + "tailscale.com/disco" +) + +func TestRelayManagerInitAndIdle(t *testing.T) { + rm := relayManager{} + rm.allocateAndHandshakeAllServers(&endpoint{}) + <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.cancelOutstandingWork(&endpoint{}) + <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleCallMeMaybeVia(&disco.CallMeMaybeVia{}) + <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleBindUDPRelayEndpointChallenge(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, netip.AddrPort{}, 0) + <-rm.runLoopStoppedCh +} From f0a27066c46d50bca3a385d9b70368c4939484ce Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 16 Apr 2025 16:32:10 -0500 Subject: [PATCH 0851/1708] ipn/ipn{server,test}: extract the LocalAPI test client and server into ipntest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In this PR, we extract the in-process LocalAPI client/server implementation from ipn/ipnserver/server_test.go into a new ipntest package to be used in high‑level black‑box tests, such as those for the tailscale CLI. Updates #15575 Signed-off-by: Nick Khyl --- ipn/ipnserver/actor.go | 6 + ipn/ipnserver/server_fortest.go | 42 ++++ ipn/ipnserver/server_test.go | 379 ++++---------------------------- ipn/ipnserver/waiterset_test.go | 46 ++++ ipn/lapitest/backend.go | 63 ++++++ ipn/lapitest/client.go | 71 ++++++ ipn/lapitest/example_test.go | 80 +++++++ ipn/lapitest/opts.go | 170 ++++++++++++++ ipn/lapitest/server.go | 324 +++++++++++++++++++++++++++ 9 files changed, 846 insertions(+), 335 deletions(-) create mode 100644 ipn/ipnserver/server_fortest.go create mode 100644 ipn/ipnserver/waiterset_test.go create mode 100644 ipn/lapitest/backend.go create mode 100644 ipn/lapitest/client.go create mode 100644 ipn/lapitest/example_test.go create mode 100644 ipn/lapitest/opts.go create mode 100644 ipn/lapitest/server.go diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 9c203fc5f..dd40924bb 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -179,6 +179,12 @@ func contextWithActor(ctx context.Context, logf logger.Logf, c net.Conn) context return actorKey.WithValue(ctx, actorOrError{actor: actor, err: err}) } +// NewContextWithActorForTest returns a new context that carries the identity +// of the specified actor. It is used in tests only. +func NewContextWithActorForTest(ctx context.Context, actor ipnauth.Actor) context.Context { + return actorKey.WithValue(ctx, actorOrError{actor: actor}) +} + // actorFromContext returns an [ipnauth.Actor] associated with ctx, // or an error if the context does not carry an actor's identity. func actorFromContext(ctx context.Context) (ipnauth.Actor, error) { diff --git a/ipn/ipnserver/server_fortest.go b/ipn/ipnserver/server_fortest.go new file mode 100644 index 000000000..9aab3b276 --- /dev/null +++ b/ipn/ipnserver/server_fortest.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnserver + +import ( + "context" + "net/http" + + "tailscale.com/ipn/ipnauth" +) + +// BlockWhileInUseByOtherForTest blocks while the actor can't connect to the server because +// the server is in use by a different actor. It is used in tests only. +func (s *Server) BlockWhileInUseByOtherForTest(ctx context.Context, actor ipnauth.Actor) error { + return s.blockWhileIdentityInUse(ctx, actor) +} + +// BlockWhileInUseForTest blocks until the server becomes idle (no active requests), +// or the specified context is done. It returns the context's error if it is done. +// It is used in tests only. +func (s *Server) BlockWhileInUseForTest(ctx context.Context) error { + ready, cleanup := s.zeroReqWaiter.add(&s.mu, ctx) + + s.mu.Lock() + busy := len(s.activeReqs) != 0 + s.mu.Unlock() + + if busy { + <-ready + } + cleanup() + return ctx.Err() +} + +// ServeHTTPForTest responds to a single LocalAPI HTTP request. +// The request's context carries the actor that made the request +// and can be created with [NewContextWithActorForTest]. +// It is used in tests only. +func (s *Server) ServeHTTPForTest(w http.ResponseWriter, r *http.Request) { + s.serveHTTP(w, r) +} diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 9340fd1c6..903cb6b73 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -1,76 +1,22 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package ipnserver +package ipnserver_test import ( "context" - "encoding/json" - "errors" - "fmt" - "net" - "net/http" - "net/http/httptest" "runtime" "strconv" "sync" - "sync/atomic" "testing" "tailscale.com/client/local" - "tailscale.com/client/tailscale" - "tailscale.com/client/tailscale/apitype" - "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" - "tailscale.com/ipn/ipnauth" - "tailscale.com/ipn/ipnlocal" - "tailscale.com/ipn/store/mem" - "tailscale.com/tsd" - "tailscale.com/tstest" - "tailscale.com/types/logger" - "tailscale.com/types/logid" + "tailscale.com/ipn/lapitest" "tailscale.com/types/ptr" - "tailscale.com/util/mak" - "tailscale.com/wgengine" ) -func TestWaiterSet(t *testing.T) { - var s waiterSet - - wantLen := func(want int, when string) { - t.Helper() - if got := len(s); got != want { - t.Errorf("%s: len = %v; want %v", when, got, want) - } - } - wantLen(0, "initial") - var mu sync.Mutex - ctx, cancel := context.WithCancel(context.Background()) - - ready, cleanup := s.add(&mu, ctx) - wantLen(1, "after add") - - select { - case <-ready: - t.Fatal("should not be ready") - default: - } - s.wakeAll() - <-ready - - wantLen(1, "after fire") - cleanup() - wantLen(0, "after cleanup") - - // And again but on an already-expired ctx. - cancel() - ready, cleanup = s.add(&mu, ctx) - <-ready // shouldn't block - cleanup() - wantLen(0, "at end") -} - func TestUserConnectDisconnectNonWindows(t *testing.T) { enableLogging := false if runtime.GOOS == "windows" { @@ -78,20 +24,20 @@ func TestUserConnectDisconnectNonWindows(t *testing.T) { } ctx := context.Background() - server := startDefaultTestIPNServer(t, ctx, enableLogging) + server := lapitest.NewServer(t, lapitest.WithLogging(enableLogging)) // UserA connects and starts watching the IPN bus. - clientA := server.getClientAs("UserA") + clientA := server.ClientWithName("UserA") watcherA, _ := clientA.WatchIPNBus(ctx, 0) // The concept of "current user" is only relevant on Windows // and it should not be set on non-Windows platforms. - server.checkCurrentUser(nil) + server.CheckCurrentUser(nil) // Additionally, a different user should be able to connect and use the LocalAPI. - clientB := server.getClientAs("UserB") + clientB := server.ClientWithName("UserB") if _, gotErr := clientB.Status(ctx); gotErr != nil { - t.Fatalf("Status(%q): want nil; got %v", clientB.User.Name, gotErr) + t.Fatalf("Status(%q): want nil; got %v", clientB.Username(), gotErr) } // Watching the IPN bus should also work for UserB. @@ -100,18 +46,18 @@ func TestUserConnectDisconnectNonWindows(t *testing.T) { // And if we send a notification, both users should receive it. wantErrMessage := "test error" testNotify := ipn.Notify{ErrMessage: ptr.To(wantErrMessage)} - server.mustBackend().DebugNotify(testNotify) + server.Backend().DebugNotify(testNotify) if n, err := watcherA.Next(); err != nil { - t.Fatalf("IPNBusWatcher.Next(%q): %v", clientA.User.Name, err) + t.Fatalf("IPNBusWatcher.Next(%q): %v", clientA.Username(), err) } else if gotErrMessage := n.ErrMessage; gotErrMessage == nil || *gotErrMessage != wantErrMessage { - t.Fatalf("IPNBusWatcher.Next(%q): want %v; got %v", clientA.User.Name, wantErrMessage, gotErrMessage) + t.Fatalf("IPNBusWatcher.Next(%q): want %v; got %v", clientA.Username(), wantErrMessage, gotErrMessage) } if n, err := watcherB.Next(); err != nil { - t.Fatalf("IPNBusWatcher.Next(%q): %v", clientB.User.Name, err) + t.Fatalf("IPNBusWatcher.Next(%q): %v", clientB.Username(), err) } else if gotErrMessage := n.ErrMessage; gotErrMessage == nil || *gotErrMessage != wantErrMessage { - t.Fatalf("IPNBusWatcher.Next(%q): want %v; got %v", clientB.User.Name, wantErrMessage, gotErrMessage) + t.Fatalf("IPNBusWatcher.Next(%q): want %v; got %v", clientB.Username(), wantErrMessage, gotErrMessage) } } @@ -120,21 +66,21 @@ func TestUserConnectDisconnectOnWindows(t *testing.T) { setGOOSForTest(t, "windows") ctx := context.Background() - server := startDefaultTestIPNServer(t, ctx, enableLogging) + server := lapitest.NewServer(t, lapitest.WithLogging(enableLogging)) - client := server.getClientAs("User") + client := server.ClientWithName("User") _, cancelWatcher := client.WatchIPNBus(ctx, 0) // On Windows, however, the current user should be set to the user that connected. - server.checkCurrentUser(client.User) + server.CheckCurrentUser(client.Actor) // Cancel the IPN bus watcher request and wait for the server to unblock. cancelWatcher() - server.blockWhileInUse(ctx) + server.BlockWhileInUse(ctx) // The current user should not be set after a disconnect, as no one is // currently using the server. - server.checkCurrentUser(nil) + server.CheckCurrentUser(nil) } func TestIPNAlreadyInUseOnWindows(t *testing.T) { @@ -142,22 +88,22 @@ func TestIPNAlreadyInUseOnWindows(t *testing.T) { setGOOSForTest(t, "windows") ctx := context.Background() - server := startDefaultTestIPNServer(t, ctx, enableLogging) + server := lapitest.NewServer(t, lapitest.WithLogging(enableLogging)) // UserA connects and starts watching the IPN bus. - clientA := server.getClientAs("UserA") + clientA := server.ClientWithName("UserA") clientA.WatchIPNBus(ctx, 0) // While UserA is connected, UserB should not be able to connect. - clientB := server.getClientAs("UserB") + clientB := server.ClientWithName("UserB") if _, gotErr := clientB.Status(ctx); gotErr == nil { - t.Fatalf("Status(%q): want error; got nil", clientB.User.Name) + t.Fatalf("Status(%q): want error; got nil", clientB.Username()) } else if wantError := "401 Unauthorized: Tailscale already in use by UserA"; gotErr.Error() != wantError { - t.Fatalf("Status(%q): want %q; got %q", clientB.User.Name, wantError, gotErr.Error()) + t.Fatalf("Status(%q): want %q; got %q", clientB.Username(), wantError, gotErr.Error()) } // Current user should still be UserA. - server.checkCurrentUser(clientA.User) + server.CheckCurrentUser(clientA.Actor) } func TestSequentialOSUserSwitchingOnWindows(t *testing.T) { @@ -165,22 +111,22 @@ func TestSequentialOSUserSwitchingOnWindows(t *testing.T) { setGOOSForTest(t, "windows") ctx := context.Background() - server := startDefaultTestIPNServer(t, ctx, enableLogging) + server := lapitest.NewServer(t, lapitest.WithLogging(enableLogging)) connectDisconnectAsUser := func(name string) { // User connects and starts watching the IPN bus. - client := server.getClientAs(name) + client := server.ClientWithName(name) watcher, cancelWatcher := client.WatchIPNBus(ctx, 0) defer cancelWatcher() go pumpIPNBus(watcher) // It should be the current user from the LocalBackend's perspective... - server.checkCurrentUser(client.User) + server.CheckCurrentUser(client.Actor) // until it disconnects. cancelWatcher() - server.blockWhileInUse(ctx) + server.BlockWhileInUse(ctx) // Now, the current user should be unset. - server.checkCurrentUser(nil) + server.CheckCurrentUser(nil) } // UserA logs in, uses Tailscale for a bit, then logs out. @@ -194,11 +140,11 @@ func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { setGOOSForTest(t, "windows") ctx := context.Background() - server := startDefaultTestIPNServer(t, ctx, enableLogging) + server := lapitest.NewServer(t, lapitest.WithLogging(enableLogging)) connectDisconnectAsUser := func(name string) { // User connects and starts watching the IPN bus. - client := server.getClientAs(name) + client := server.ClientWithName(name) watcher, cancelWatcher := client.WatchIPNBus(ctx, ipn.NotifyInitialState) defer cancelWatcher() @@ -206,7 +152,7 @@ func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { // Get the current user from the LocalBackend's perspective // as soon as we're connected. - gotUID, gotActor := server.mustBackend().CurrentUserForTest() + gotUID, gotActor := server.Backend().CurrentUserForTest() // Wait for the first notification to arrive. // It will either be the initial state we've requested via [ipn.NotifyInitialState], @@ -225,17 +171,17 @@ func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { } // Otherwise, our user should have been the current user since the time we connected. - if gotUID != client.User.UID { - t.Errorf("CurrentUser(Initial): got UID %q; want %q", gotUID, client.User.UID) + if gotUID != client.Actor.UserID() { + t.Errorf("CurrentUser(Initial): got UID %q; want %q", gotUID, client.Actor.UserID()) return } - if gotActor, ok := gotActor.(*ipnauth.TestActor); !ok || *gotActor != *client.User { - t.Errorf("CurrentUser(Initial): got %v; want %v", gotActor, client.User) + if hasActor := gotActor != nil; !hasActor || gotActor != client.Actor { + t.Errorf("CurrentUser(Initial): got %v; want %v", gotActor, client.Actor) return } // And should still be the current user (as they're still connected)... - server.checkCurrentUser(client.User) + server.CheckCurrentUser(client.Actor) } numIterations := 10 @@ -253,11 +199,11 @@ func TestConcurrentOSUserSwitchingOnWindows(t *testing.T) { } wg.Wait() - if err := server.blockWhileInUse(ctx); err != nil { - t.Fatalf("blockWhileInUse: %v", err) + if err := server.BlockWhileInUse(ctx); err != nil { + t.Fatalf("BlockUntilIdle: %v", err) } - server.checkCurrentUser(nil) + server.CheckCurrentUser(nil) } } @@ -266,13 +212,13 @@ func TestBlockWhileIdentityInUse(t *testing.T) { setGOOSForTest(t, "windows") ctx := context.Background() - server := startDefaultTestIPNServer(t, ctx, enableLogging) + server := lapitest.NewServer(t, lapitest.WithLogging(enableLogging)) // connectWaitDisconnectAsUser connects as a user with the specified name // and keeps the IPN bus watcher alive until the context is canceled. // It returns a channel that is closed when done. connectWaitDisconnectAsUser := func(ctx context.Context, name string) <-chan struct{} { - client := server.getClientAs(name) + client := server.ClientWithName(name) watcher, cancelWatcher := client.WatchIPNBus(ctx, 0) done := make(chan struct{}) @@ -301,8 +247,8 @@ func TestBlockWhileIdentityInUse(t *testing.T) { // in blockWhileIdentityInUse. But the issue also occurs during // the normal execution path when UserB connects to the IPN server // while UserA is disconnecting. - userB := server.makeTestUser("UserB", "ClientB") - server.blockWhileIdentityInUse(ctx, userB) + userB := server.MakeTestActor("UserB", "ClientB") + server.BlockWhileInUseByOther(ctx, userB) <-userADone } } @@ -313,41 +259,7 @@ func setGOOSForTest(tb testing.TB, goos string) { tb.Cleanup(func() { envknob.Setenv("TS_DEBUG_FAKE_GOOS", "") }) } -func testLogger(tb testing.TB, enableLogging bool) logger.Logf { - tb.Helper() - if enableLogging { - return tstest.WhileTestRunningLogger(tb) - } - return logger.Discard -} - -// newTestIPNServer creates a new IPN server for testing, using the specified local backend. -func newTestIPNServer(tb testing.TB, lb *ipnlocal.LocalBackend, enableLogging bool) *Server { - tb.Helper() - server := New(testLogger(tb, enableLogging), logid.PublicID{}, lb.NetMon()) - server.lb.Store(lb) - return server -} - -type testIPNClient struct { - tb testing.TB - *local.Client - User *ipnauth.TestActor -} - -func (c *testIPNClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, context.CancelFunc) { - c.tb.Helper() - ctx, cancelWatcher := context.WithCancel(ctx) - c.tb.Cleanup(cancelWatcher) - watcher, err := c.Client.WatchIPNBus(ctx, mask) - if err != nil { - c.tb.Fatalf("WatchIPNBus(%q): %v", c.User.Name, err) - } - c.tb.Cleanup(func() { watcher.Close() }) - return watcher, cancelWatcher -} - -func pumpIPNBus(watcher *tailscale.IPNBusWatcher) { +func pumpIPNBus(watcher *local.IPNBusWatcher) { for { _, err := watcher.Next() if err != nil { @@ -355,206 +267,3 @@ func pumpIPNBus(watcher *tailscale.IPNBusWatcher) { } } } - -type testIPNServer struct { - tb testing.TB - *Server - clientID atomic.Int64 - getClient func(*ipnauth.TestActor) *local.Client - - actorsMu sync.Mutex - actors map[string]*ipnauth.TestActor -} - -func (s *testIPNServer) getClientAs(name string) *testIPNClient { - clientID := fmt.Sprintf("Client-%d", 1+s.clientID.Add(1)) - user := s.makeTestUser(name, clientID) - return &testIPNClient{ - tb: s.tb, - Client: s.getClient(user), - User: user, - } -} - -func (s *testIPNServer) makeTestUser(name string, clientID string) *ipnauth.TestActor { - s.actorsMu.Lock() - defer s.actorsMu.Unlock() - actor := s.actors[name] - if actor == nil { - actor = &ipnauth.TestActor{Name: name} - if envknob.GOOS() == "windows" { - // Historically, as of 2025-01-13, IPN does not distinguish between - // different users on non-Windows devices. Therefore, the UID, which is - // an [ipn.WindowsUserID], should only be populated when the actual or - // fake GOOS is Windows. - actor.UID = ipn.WindowsUserID(fmt.Sprintf("S-1-5-21-1-0-0-%d", 1001+len(s.actors))) - } - mak.Set(&s.actors, name, actor) - s.tb.Cleanup(func() { delete(s.actors, name) }) - } - actor = ptr.To(*actor) - actor.CID = ipnauth.ClientIDFrom(clientID) - return actor -} - -func (s *testIPNServer) blockWhileInUse(ctx context.Context) error { - ready, cleanup := s.zeroReqWaiter.add(&s.mu, ctx) - - s.mu.Lock() - busy := len(s.activeReqs) != 0 - s.mu.Unlock() - - if busy { - <-ready - } - cleanup() - return ctx.Err() -} - -func (s *testIPNServer) checkCurrentUser(want *ipnauth.TestActor) { - s.tb.Helper() - var wantUID ipn.WindowsUserID - if want != nil { - wantUID = want.UID - } - gotUID, gotActor := s.mustBackend().CurrentUserForTest() - if gotUID != wantUID { - s.tb.Errorf("CurrentUser: got UID %q; want %q", gotUID, wantUID) - } - if gotActor, ok := gotActor.(*ipnauth.TestActor); ok != (want != nil) || (want != nil && *gotActor != *want) { - s.tb.Errorf("CurrentUser: got %v; want %v", gotActor, want) - } -} - -// startTestIPNServer starts a [httptest.Server] that hosts the specified IPN server for the -// duration of the test, using the specified base context for incoming requests. -// It returns a function that creates a [local.Client] as a given [ipnauth.TestActor]. -func startTestIPNServer(tb testing.TB, baseContext context.Context, server *Server) *testIPNServer { - tb.Helper() - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - actor, err := extractActorFromHeader(r.Header) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - tb.Errorf("extractActorFromHeader: %v", err) - return - } - ctx := newTestContextWithActor(r.Context(), actor) - server.serveHTTP(w, r.Clone(ctx)) - })) - ts.Config.Addr = "http://" + apitype.LocalAPIHost - ts.Config.BaseContext = func(_ net.Listener) context.Context { return baseContext } - ts.Config.ErrorLog = logger.StdLogger(logger.WithPrefix(server.logf, "ipnserver: ")) - ts.Start() - tb.Cleanup(ts.Close) - return &testIPNServer{ - tb: tb, - Server: server, - getClient: func(actor *ipnauth.TestActor) *local.Client { - return &local.Client{Transport: newTestRoundTripper(ts, actor)} - }, - } -} - -func startDefaultTestIPNServer(tb testing.TB, ctx context.Context, enableLogging bool) *testIPNServer { - tb.Helper() - lb := newLocalBackendWithTestControl(tb, newUnreachableControlClient, enableLogging) - ctx, stopServer := context.WithCancel(ctx) - tb.Cleanup(stopServer) - return startTestIPNServer(tb, ctx, newTestIPNServer(tb, lb, enableLogging)) -} - -type testRoundTripper struct { - transport http.RoundTripper - actor *ipnauth.TestActor -} - -// newTestRoundTripper creates a new [http.RoundTripper] that sends requests -// to the specified test server as the specified actor. -func newTestRoundTripper(ts *httptest.Server, actor *ipnauth.TestActor) *testRoundTripper { - return &testRoundTripper{ - transport: &http.Transport{DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - var std net.Dialer - return std.DialContext(ctx, network, ts.Listener.Addr().(*net.TCPAddr).String()) - }}, - actor: actor, - } -} - -const testActorHeaderName = "TS-Test-Actor" - -// RoundTrip implements [http.RoundTripper] by forwarding the request to the underlying transport -// and including the test actor's identity in the request headers. -func (rt *testRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - actorJSON, err := json.Marshal(&rt.actor) - if err != nil { - // An [http.RoundTripper] must always close the request body, including on error. - if r.Body != nil { - r.Body.Close() - } - return nil, err - } - - r = r.Clone(r.Context()) - r.Header.Set(testActorHeaderName, string(actorJSON)) - return rt.transport.RoundTrip(r) -} - -// extractActorFromHeader extracts a test actor from the specified request headers. -func extractActorFromHeader(h http.Header) (*ipnauth.TestActor, error) { - actorJSON := h.Get(testActorHeaderName) - if actorJSON == "" { - return nil, errors.New("missing Test-Actor header") - } - actor := &ipnauth.TestActor{} - if err := json.Unmarshal([]byte(actorJSON), &actor); err != nil { - return nil, fmt.Errorf("invalid Test-Actor header: %v", err) - } - return actor, nil -} - -type newControlClientFn func(tb testing.TB, opts controlclient.Options) controlclient.Client - -func newLocalBackendWithTestControl(tb testing.TB, newControl newControlClientFn, enableLogging bool) *ipnlocal.LocalBackend { - tb.Helper() - - sys := tsd.NewSystem() - store := &mem.Store{} - sys.Set(store) - - logf := testLogger(tb, enableLogging) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) - if err != nil { - tb.Fatalf("NewFakeUserspaceEngine: %v", err) - } - tb.Cleanup(e.Close) - sys.Set(e) - - b, err := ipnlocal.NewLocalBackend(logf, logid.PublicID{}, sys, 0) - if err != nil { - tb.Fatalf("NewLocalBackend: %v", err) - } - tb.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() - - b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { - return newControl(tb, opts), nil - }) - return b -} - -func newUnreachableControlClient(tb testing.TB, opts controlclient.Options) controlclient.Client { - tb.Helper() - opts.ServerURL = "https://127.0.0.1:1" - cc, err := controlclient.New(opts) - if err != nil { - tb.Fatal(err) - } - return cc -} - -// newTestContextWithActor returns a new context that carries the identity -// of the specified actor and can be used for testing. -// It can be retrieved with [actorFromContext]. -func newTestContextWithActor(ctx context.Context, actor ipnauth.Actor) context.Context { - return actorKey.WithValue(ctx, actorOrError{actor: actor}) -} diff --git a/ipn/ipnserver/waiterset_test.go b/ipn/ipnserver/waiterset_test.go new file mode 100644 index 000000000..b7d5ea144 --- /dev/null +++ b/ipn/ipnserver/waiterset_test.go @@ -0,0 +1,46 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnserver + +import ( + "context" + "sync" + "testing" +) + +func TestWaiterSet(t *testing.T) { + var s waiterSet + + wantLen := func(want int, when string) { + t.Helper() + if got := len(s); got != want { + t.Errorf("%s: len = %v; want %v", when, got, want) + } + } + wantLen(0, "initial") + var mu sync.Mutex + ctx, cancel := context.WithCancel(context.Background()) + + ready, cleanup := s.add(&mu, ctx) + wantLen(1, "after add") + + select { + case <-ready: + t.Fatal("should not be ready") + default: + } + s.wakeAll() + <-ready + + wantLen(1, "after fire") + cleanup() + wantLen(0, "after cleanup") + + // And again but on an already-expired ctx. + cancel() + ready, cleanup = s.add(&mu, ctx) + <-ready // shouldn't block + cleanup() + wantLen(0, "at end") +} diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go new file mode 100644 index 000000000..ddf48fb28 --- /dev/null +++ b/ipn/lapitest/backend.go @@ -0,0 +1,63 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lapitest + +import ( + "testing" + + "tailscale.com/control/controlclient" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/store/mem" + "tailscale.com/types/logid" + "tailscale.com/wgengine" +) + +// NewBackend returns a new [ipnlocal.LocalBackend] for testing purposes. +// It fails the test if the specified options are invalid or if the backend cannot be created. +func NewBackend(tb testing.TB, opts ...Option) *ipnlocal.LocalBackend { + tb.Helper() + options, err := newOptions(tb, opts...) + if err != nil { + tb.Fatalf("NewBackend: %v", err) + } + return newBackend(options) +} + +func newBackend(opts *options) *ipnlocal.LocalBackend { + tb := opts.TB() + tb.Helper() + + sys := opts.Sys() + if _, ok := sys.StateStore.GetOK(); !ok { + sys.Set(&mem.Store{}) + } + + e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + if err != nil { + opts.tb.Fatalf("NewFakeUserspaceEngine: %v", err) + } + tb.Cleanup(e.Close) + sys.Set(e) + + b, err := ipnlocal.NewLocalBackend(opts.Logf(), logid.PublicID{}, sys, 0) + if err != nil { + tb.Fatalf("NewLocalBackend: %v", err) + } + tb.Cleanup(b.Shutdown) + b.DisablePortMapperForTest() + b.SetControlClientGetterForTesting(opts.MakeControlClient) + return b +} + +// NewUnreachableControlClient is a [NewControlFn] that creates +// a new [controlclient.Client] for an unreachable control server. +func NewUnreachableControlClient(tb testing.TB, opts controlclient.Options) (controlclient.Client, error) { + tb.Helper() + opts.ServerURL = "https://127.0.0.1:1" + cc, err := controlclient.New(opts) + if err != nil { + tb.Fatal(err) + } + return cc, nil +} diff --git a/ipn/lapitest/client.go b/ipn/lapitest/client.go new file mode 100644 index 000000000..6d22e938b --- /dev/null +++ b/ipn/lapitest/client.go @@ -0,0 +1,71 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lapitest + +import ( + "context" + "testing" + + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" +) + +// Client wraps a [local.Client] for testing purposes. +// It can be created using [Server.Client], [Server.ClientWithName], +// or [Server.ClientFor] and sends requests as the specified actor +// to the associated [Server]. +type Client struct { + tb testing.TB + // Client is the underlying [local.Client] wrapped by the test client. + // It is configured to send requests to the test server on behalf of the actor. + *local.Client + // Actor represents the user on whose behalf this client is making requests. + // The server uses it to determine the client's identity and permissions. + // The test can mutate the user to alter the actor's identity or permissions + // before making a new request. It is typically an [ipnauth.TestActor], + // unless the [Client] was created with s specific actor using [Server.ClientFor]. + Actor ipnauth.Actor +} + +// Username returns username of the client's owner. +func (c *Client) Username() string { + c.tb.Helper() + name, err := c.Actor.Username() + if err != nil { + c.tb.Fatalf("Client.Username: %v", err) + } + return name +} + +// WatchIPNBus is like [local.Client.WatchIPNBus] but returns a [local.IPNBusWatcher] +// that is closed when the test ends and a cancel function that stops the watcher. +// It fails the test if the underlying WatchIPNBus returns an error. +func (c *Client) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, context.CancelFunc) { + c.tb.Helper() + ctx, cancelWatcher := context.WithCancel(ctx) + c.tb.Cleanup(cancelWatcher) + watcher, err := c.Client.WatchIPNBus(ctx, mask) + name, _ := c.Actor.Username() + if err != nil { + c.tb.Fatalf("Client.WatchIPNBus(%q): %v", name, err) + } + c.tb.Cleanup(func() { watcher.Close() }) + return watcher, cancelWatcher +} + +// generateSequentialName generates a unique sequential name based on the given prefix and number n. +// It uses a base-26 encoding to create names like "User-A", "User-B", ..., "User-Z", "User-AA", etc. +func generateSequentialName(prefix string, n int) string { + n++ + name := "" + const numLetters = 'Z' - 'A' + 1 + for n > 0 { + n-- + remainder := byte(n % numLetters) + name = string([]byte{'A' + remainder}) + name + n = n / numLetters + } + return prefix + "-" + name +} diff --git a/ipn/lapitest/example_test.go b/ipn/lapitest/example_test.go new file mode 100644 index 000000000..57479199a --- /dev/null +++ b/ipn/lapitest/example_test.go @@ -0,0 +1,80 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lapitest + +import ( + "context" + "testing" + + "tailscale.com/ipn" +) + +func TestClientServer(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Create a server and two clients. + // Both clients represent the same user to make this work across platforms. + // On Windows we've been restricting the API usage to a single user at a time. + // While we're planning on changing this once a better permission model is in place, + // this test is currently limited to a single user (but more than one client is fine). + // Alternatively, we could override GOOS via envknobs to test as if we're + // on a different platform, but that would make the test depend on global state, etc. + s := NewServer(t, WithLogging(false)) + c1 := s.ClientWithName("User-A") + c2 := s.ClientWithName("User-A") + + // Start watching the IPN bus as the second client. + w2, _ := c2.WatchIPNBus(context.Background(), ipn.NotifyInitialPrefs) + + // We're supposed to get a notification about the initial prefs, + // and WantRunning should be false. + n, err := w2.Next() + for ; err == nil; n, err = w2.Next() { + if n.Prefs == nil { + // Ignore non-prefs notifications. + continue + } + if n.Prefs.WantRunning() { + t.Errorf("WantRunning(initial): got %v, want false", n.Prefs.WantRunning()) + } + break + } + if err != nil { + t.Fatalf("IPNBusWatcher.Next failed: %v", err) + } + + // Now send an EditPrefs request from the first client to set WantRunning to true. + change := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} + gotPrefs, err := c1.EditPrefs(ctx, change) + if err != nil { + t.Fatalf("EditPrefs failed: %v", err) + } + if !gotPrefs.WantRunning { + t.Fatalf("EditPrefs.WantRunning: got %v, want true", gotPrefs.WantRunning) + } + + // We can check the backend directly to see if the prefs were set correctly. + if gotWantRunning := s.Backend().Prefs().WantRunning(); !gotWantRunning { + t.Fatalf("Backend.Prefs.WantRunning: got %v, want true", gotWantRunning) + } + + // And can also wait for the second client with an IPN bus watcher to receive the notification + // about the prefs change. + n, err = w2.Next() + for ; err == nil; n, err = w2.Next() { + if n.Prefs == nil { + // Ignore non-prefs notifications. + continue + } + if !n.Prefs.WantRunning() { + t.Fatalf("WantRunning(changed): got %v, want true", n.Prefs.WantRunning()) + } + break + } + if err != nil { + t.Fatalf("IPNBusWatcher.Next failed: %v", err) + } +} diff --git a/ipn/lapitest/opts.go b/ipn/lapitest/opts.go new file mode 100644 index 000000000..6eb1594da --- /dev/null +++ b/ipn/lapitest/opts.go @@ -0,0 +1,170 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lapitest + +import ( + "context" + "errors" + "fmt" + "testing" + + "tailscale.com/control/controlclient" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tsd" + "tailscale.com/tstest" + "tailscale.com/types/lazy" + "tailscale.com/types/logger" +) + +// Option is any optional configuration that can be passed to [NewServer] or [NewBackend]. +type Option interface { + apply(*options) error +} + +// options is the merged result of all applied [Option]s. +type options struct { + tb testing.TB + ctx lazy.SyncValue[context.Context] + logf lazy.SyncValue[logger.Logf] + sys lazy.SyncValue[*tsd.System] + newCC lazy.SyncValue[NewControlFn] + backend lazy.SyncValue[*ipnlocal.LocalBackend] +} + +// newOptions returns a new [options] struct with the specified [Option]s applied. +func newOptions(tb testing.TB, opts ...Option) (*options, error) { + options := &options{tb: tb} + for _, opt := range opts { + if err := opt.apply(options); err != nil { + return nil, fmt.Errorf("lapitest: %w", err) + } + } + return options, nil +} + +// TB returns the owning [*testing.T] or [*testing.B]. +func (o *options) TB() testing.TB { + return o.tb +} + +// Context returns the base context to be used by the server. +func (o *options) Context() context.Context { + return o.ctx.Get(context.Background) +} + +// Logf returns the [logger.Logf] to be used for logging. +func (o *options) Logf() logger.Logf { + return o.logf.Get(func() logger.Logf { return logger.Discard }) +} + +// Sys returns the [tsd.System] that contains subsystems to be used +// when creating a new [ipnlocal.LocalBackend]. +func (o *options) Sys() *tsd.System { + return o.sys.Get(func() *tsd.System { return tsd.NewSystem() }) +} + +// Backend returns the [ipnlocal.LocalBackend] to be used by the server. +// If a backend is provided via [WithBackend], it is used as-is. +// Otherwise, a new backend is created with the the [options] in o. +func (o *options) Backend() *ipnlocal.LocalBackend { + return o.backend.Get(func() *ipnlocal.LocalBackend { return newBackend(o) }) +} + +// MakeControlClient returns a new [controlclient.Client] to be used by newly +// created [ipnlocal.LocalBackend]s. It is only used if no backend is provided +// via [WithBackend]. +func (o *options) MakeControlClient(opts controlclient.Options) (controlclient.Client, error) { + newCC := o.newCC.Get(func() NewControlFn { return NewUnreachableControlClient }) + return newCC(o.tb, opts) +} + +type loggingOption struct{ enableLogging bool } + +// WithLogging returns an [Option] that enables or disables logging. +func WithLogging(enableLogging bool) Option { + return loggingOption{enableLogging: enableLogging} +} + +func (o loggingOption) apply(opts *options) error { + var logf logger.Logf + if o.enableLogging { + logf = tstest.WhileTestRunningLogger(opts.tb) + } else { + logf = logger.Discard + } + if !opts.logf.Set(logf) { + return errors.New("logging already configured") + } + return nil +} + +type contextOption struct{ ctx context.Context } + +// WithContext returns an [Option] that sets the base context to be used by the [Server]. +func WithContext(ctx context.Context) Option { + return contextOption{ctx: ctx} +} + +func (o contextOption) apply(opts *options) error { + if !opts.ctx.Set(o.ctx) { + return errors.New("context already configured") + } + return nil +} + +type sysOption struct{ sys *tsd.System } + +// WithSys returns an [Option] that sets the [tsd.System] to be used +// when creating a new [ipnlocal.LocalBackend]. +func WithSys(sys *tsd.System) Option { + return sysOption{sys: sys} +} + +func (o sysOption) apply(opts *options) error { + if !opts.sys.Set(o.sys) { + return errors.New("tsd.System already configured") + } + return nil +} + +type backendOption struct{ backend *ipnlocal.LocalBackend } + +// WithBackend returns an [Option] that configures the server to use the specified +// [ipnlocal.LocalBackend] instead of creating a new one. +// It is mutually exclusive with [WithControlClient]. +func WithBackend(backend *ipnlocal.LocalBackend) Option { + return backendOption{backend: backend} +} + +func (o backendOption) apply(opts *options) error { + if _, ok := opts.backend.Peek(); ok { + return errors.New("backend cannot be set when control client is already set") + } + if !opts.backend.Set(o.backend) { + return errors.New("backend already set") + } + return nil +} + +// NewControlFn is any function that creates a new [controlclient.Client] +// with the specified options. +type NewControlFn func(tb testing.TB, opts controlclient.Options) (controlclient.Client, error) + +// WithControlClient returns an option that specifies a function to be used +// by the [ipnlocal.LocalBackend] when creating a new [controlclient.Client]. +// It is mutually exclusive with [WithBackend] and is only used if no backend +// has been provided. +func WithControlClient(newControl NewControlFn) Option { + return newControl +} + +func (fn NewControlFn) apply(opts *options) error { + if _, ok := opts.backend.Peek(); ok { + return errors.New("control client cannot be set when backend is already set") + } + if !opts.newCC.Set(fn) { + return errors.New("control client already set") + } + return nil +} diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go new file mode 100644 index 000000000..d477dc182 --- /dev/null +++ b/ipn/lapitest/server.go @@ -0,0 +1,324 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package lapitest provides utilities for black-box testing of LocalAPI ([ipnserver]). +package lapitest + +import ( + "context" + "fmt" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + + "tailscale.com/client/local" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnserver" + "tailscale.com/types/logger" + "tailscale.com/types/logid" + "tailscale.com/types/ptr" + "tailscale.com/util/mak" + "tailscale.com/util/rands" +) + +// A Server is an in-process LocalAPI server that can be used in end-to-end tests. +type Server struct { + tb testing.TB + + ctx context.Context + cancelCtx context.CancelFunc + + lb *ipnlocal.LocalBackend + ipnServer *ipnserver.Server + + // mu protects the following fields. + mu sync.Mutex + started bool + httpServer *httptest.Server + actorsByName map[string]*ipnauth.TestActor + lastClientID int +} + +// NewUnstartedServer returns a new [Server] with the specified options without starting it. +func NewUnstartedServer(tb testing.TB, opts ...Option) *Server { + tb.Helper() + options, err := newOptions(tb, opts...) + if err != nil { + tb.Fatalf("invalid options: %v", err) + } + + s := &Server{tb: tb, lb: options.Backend()} + s.ctx, s.cancelCtx = context.WithCancel(options.Context()) + s.ipnServer = newUnstartedIPNServer(options) + s.httpServer = httptest.NewUnstartedServer(http.HandlerFunc(s.serveHTTP)) + s.httpServer.Config.Addr = "http://" + apitype.LocalAPIHost + s.httpServer.Config.BaseContext = func(_ net.Listener) context.Context { return s.ctx } + s.httpServer.Config.ErrorLog = logger.StdLogger(logger.WithPrefix(options.Logf(), "lapitest: ")) + tb.Cleanup(s.Close) + return s +} + +// NewServer starts and returns a new [Server] with the specified options. +func NewServer(tb testing.TB, opts ...Option) *Server { + tb.Helper() + server := NewUnstartedServer(tb, opts...) + server.Start() + return server +} + +// Start starts the server from [NewUnstartedServer]. +func (s *Server) Start() { + s.tb.Helper() + s.mu.Lock() + defer s.mu.Unlock() + if !s.started && s.httpServer != nil { + s.httpServer.Start() + s.started = true + } +} + +// Backend returns the underlying [ipnlocal.LocalBackend]. +func (s *Server) Backend() *ipnlocal.LocalBackend { + s.tb.Helper() + return s.lb +} + +// Client returns a new [Client] configured for making requests to the server +// as a new [ipnauth.TestActor] with a unique username and [ipnauth.ClientID]. +func (s *Server) Client() *Client { + s.tb.Helper() + user := s.MakeTestActor("", "") // generate a unique username and client ID + return s.ClientFor(user) +} + +// ClientWithName returns a new [Client] configured for making requests to the server +// as a new [ipnauth.TestActor] with the specified name and a unique [ipnauth.ClientID]. +func (s *Server) ClientWithName(name string) *Client { + s.tb.Helper() + user := s.MakeTestActor(name, "") // generate a unique client ID + return s.ClientFor(user) +} + +// ClientFor returns a new [Client] configured for making requests to the server +// as the specified actor. +func (s *Server) ClientFor(actor ipnauth.Actor) *Client { + s.tb.Helper() + client := &Client{ + tb: s.tb, + Actor: actor, + } + client.Client = &local.Client{Transport: newRoundTripper(client, s.httpServer)} + return client +} + +// MakeTestActor returns a new [ipnauth.TestActor] with the specified name and client ID. +// If the name is empty, a unique sequential name is generated. Likewise, +// if clientID is empty, a unique sequential client ID is generated. +func (s *Server) MakeTestActor(name string, clientID string) *ipnauth.TestActor { + s.tb.Helper() + + s.mu.Lock() + defer s.mu.Unlock() + + // Generate a unique sequential name if the provided name is empty. + if name == "" { + n := len(s.actorsByName) + name = generateSequentialName("User", n) + } + + if clientID == "" { + s.lastClientID += 1 + clientID = fmt.Sprintf("Client-%d", s.lastClientID) + } + + // Create a new base actor if one doesn't already exist for the given name. + baseActor := s.actorsByName[name] + if baseActor == nil { + baseActor = &ipnauth.TestActor{Name: name} + if envknob.GOOS() == "windows" { + // Historically, as of 2025-04-15, IPN does not distinguish between + // different users on non-Windows devices. Therefore, the UID, which is + // an [ipn.WindowsUserID], should only be populated when the actual or + // fake GOOS is Windows. + baseActor.UID = ipn.WindowsUserID(fmt.Sprintf("S-1-5-21-1-0-0-%d", 1001+len(s.actorsByName))) + } + mak.Set(&s.actorsByName, name, baseActor) + s.tb.Cleanup(func() { delete(s.actorsByName, name) }) + } + + // Create a shallow copy of the base actor and assign it the new client ID. + actor := ptr.To(*baseActor) + actor.CID = ipnauth.ClientIDFrom(clientID) + return actor +} + +// BlockWhileInUse blocks until the server becomes idle (no active requests), +// or the context is done. It returns the context's error if it is done. +// It is used in tests only. +func (s *Server) BlockWhileInUse(ctx context.Context) error { + s.tb.Helper() + s.mu.Lock() + defer s.mu.Unlock() + if s.httpServer == nil { + return nil + } + return s.ipnServer.BlockWhileInUseForTest(ctx) +} + +// BlockWhileInUseByOther blocks while the specified actor can't connect to the server +// due to another actor being connected. +// It is used in tests only. +func (s *Server) BlockWhileInUseByOther(ctx context.Context, actor ipnauth.Actor) error { + s.tb.Helper() + s.mu.Lock() + defer s.mu.Unlock() + if s.httpServer == nil { + return nil + } + return s.ipnServer.BlockWhileInUseByOtherForTest(ctx, actor) +} + +// CheckCurrentUser fails the test if the current user does not match the expected user. +// It is only used on Windows and will be removed as we progress on tailscale/corp#18342. +func (s *Server) CheckCurrentUser(want ipnauth.Actor) { + s.tb.Helper() + var wantUID ipn.WindowsUserID + if want != nil { + wantUID = want.UserID() + } + lb := s.Backend() + if lb == nil { + s.tb.Fatalf("Backend: nil") + } + gotUID, gotActor := lb.CurrentUserForTest() + if gotUID != wantUID { + s.tb.Errorf("CurrentUser: got UID %q; want %q", gotUID, wantUID) + } + if hasActor := gotActor != nil; hasActor != (want != nil) || (want != nil && gotActor != want) { + s.tb.Errorf("CurrentUser: got %v; want %v", gotActor, want) + } +} + +func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { + actor, err := getActorForRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + s.tb.Errorf("getActorForRequest: %v", err) + return + } + ctx := ipnserver.NewContextWithActorForTest(r.Context(), actor) + s.ipnServer.ServeHTTPForTest(w, r.Clone(ctx)) +} + +// Close shuts down the server and blocks until all outstanding requests on this server have completed. +func (s *Server) Close() { + s.tb.Helper() + s.mu.Lock() + server := s.httpServer + s.httpServer = nil + s.mu.Unlock() + + if server != nil { + server.Close() + } + s.cancelCtx() +} + +// newUnstartedIPNServer returns a new [ipnserver.Server] that exposes +// the specified [ipnlocal.LocalBackend] via LocalAPI, but does not start it. +// The opts carry additional configuration options. +func newUnstartedIPNServer(opts *options) *ipnserver.Server { + opts.TB().Helper() + lb := opts.Backend() + server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.NetMon()) + server.SetLocalBackend(lb) + return server +} + +// roundTripper is a [http.RoundTripper] that sends requests to a [Server] +// on behalf of the [Client] who owns it. +type roundTripper struct { + client *Client + transport http.RoundTripper +} + +// newRoundTripper returns a new [http.RoundTripper] that sends requests +// to the specified server as the specified client. +func newRoundTripper(client *Client, server *httptest.Server) http.RoundTripper { + return &roundTripper{ + client: client, + transport: &http.Transport{DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + var std net.Dialer + return std.DialContext(ctx, network, server.Listener.Addr().(*net.TCPAddr).String()) + }}, + } +} + +// requestIDHeaderName is the name of the header used to pass request IDs +// between the client and server. It is used to associate requests with their actors. +const requestIDHeaderName = "TS-Request-ID" + +// RoundTrip implements [http.RoundTripper] by sending the request to the [ipnserver.Server] +// on behalf of the owning [Client]. It registers each request for the duration +// of the call and associates it with the actor sending the request. +func (rt *roundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + reqID, unregister := registerRequest(rt.client.Actor) + defer unregister() + r = r.Clone(r.Context()) + r.Header.Set(requestIDHeaderName, reqID) + return rt.transport.RoundTrip(r) +} + +// getActorForRequest returns the actor for a given request. +// It returns an error if the request is not associated with an actor, +// such as when it wasn't sent by a [roundTripper]. +func getActorForRequest(r *http.Request) (ipnauth.Actor, error) { + reqID := r.Header.Get(requestIDHeaderName) + if reqID == "" { + return nil, fmt.Errorf("missing %s header", requestIDHeaderName) + } + actor, ok := getActorByRequestID(reqID) + if !ok { + return nil, fmt.Errorf("unknown request: %s", reqID) + } + return actor, nil +} + +var ( + inFlightRequestsMu sync.Mutex + inFlightRequests map[string]ipnauth.Actor +) + +// registerRequest associates a request with the specified actor and returns a unique request ID +// which can be used to retrieve the actor later. The returned function unregisters the request. +func registerRequest(actor ipnauth.Actor) (requestID string, unregister func()) { + inFlightRequestsMu.Lock() + defer inFlightRequestsMu.Unlock() + for { + requestID = rands.HexString(16) + if _, ok := inFlightRequests[requestID]; !ok { + break + } + } + mak.Set(&inFlightRequests, requestID, actor) + return requestID, func() { + inFlightRequestsMu.Lock() + defer inFlightRequestsMu.Unlock() + delete(inFlightRequests, requestID) + } +} + +// getActorByRequestID returns the actor associated with the specified request ID. +// It returns the actor and true if found, or nil and false if not. +func getActorByRequestID(requestID string) (ipnauth.Actor, bool) { + inFlightRequestsMu.Lock() + defer inFlightRequestsMu.Unlock() + actor, ok := inFlightRequests[requestID] + return actor, ok +} From 565ebbdeb8c1140fb17a21f521d45f7fbd97fdd8 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 9 May 2025 17:50:33 -0500 Subject: [PATCH 0852/1708] ipn/ipnlocal: move nodeBackend methods from local.go to node_backend.go We previously kept these methods in local.go when we started moving node-specific state from LocalBackend to nodeBackend, to make those changes easier to review. But it's time to move them to node_backend.go. Updates #cleanup Updates #12614 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 480 ----------------------------------- ipn/ipnlocal/node_backend.go | 447 ++++++++++++++++++++++++++++++++ ipn/ipnlocal/peerapi.go | 43 ++++ 3 files changed, 490 insertions(+), 480 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e8ff05b37..5d6433002 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1450,66 +1450,6 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { return b.currentNode().PeerCaps(src) } -// AppendMatchingPeers returns base with all peers that match pred appended. -// -// It acquires b.mu to read the netmap but releases it before calling pred. -func (nb *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { - var peers []tailcfg.NodeView - - nb.mu.Lock() - if nb.netMap != nil { - // All fields on b.netMap are immutable, so this is - // safe to copy and use outside the lock. - peers = nb.netMap.Peers - } - nb.mu.Unlock() - - ret := base - for _, peer := range peers { - // The peers in b.netMap don't contain updates made via - // UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID, - // and then look up the latest copy in b.peers which is updated in - // response to UpdateNetmapDelta edits. - nb.mu.Lock() - peer, ok := nb.peers[peer.ID()] - nb.mu.Unlock() - if ok && pred(peer) { - ret = append(ret, peer) - } - } - return ret -} - -// PeerCaps returns the capabilities that remote src IP has to -// ths current node. -func (nb *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { - nb.mu.Lock() - defer nb.mu.Unlock() - return nb.peerCapsLocked(src) -} - -func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { - if nb.netMap == nil { - return nil - } - filt := nb.filterAtomic.Load() - if filt == nil { - return nil - } - addrs := nb.netMap.GetAddresses() - for i := range addrs.Len() { - a := addrs.At(i) - if !a.IsSingleIP() { - continue - } - dst := a.Addr() - if dst.BitLen() == src.BitLen() { // match on family - return filt.CapsWithValues(src, dst) - } - } - return nil -} - func (b *LocalBackend) GetFilterForTest() *filter.Filter { if !testenv.InTest() { panic("GetFilterForTest called outside of test") @@ -2025,20 +1965,6 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } -func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap { - nb.mu.Lock() - defer nb.mu.Unlock() - if nb.netMap == nil { - return nil - } - nm := ptr.To(*nb.netMap) // shallow clone - nm.Peers = slicesx.MapValues(nb.peers) - slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { - return cmp.Compare(a.ID(), b.ID()) - }) - return nm -} - // mutationsAreWorthyOfTellingIPNBus reports whether any mutation type in muts is // worthy of spamming the IPN bus (the Windows & Mac GUIs, basically) to tell them // about the update. @@ -2069,37 +1995,6 @@ func (b *LocalBackend) pickNewAutoExitNode() { b.send(ipn.Notify{Prefs: &newPrefs}) } -func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { - nb.mu.Lock() - defer nb.mu.Unlock() - if nb.netMap == nil || len(nb.peers) == 0 { - return false - } - - // Locally cloned mutable nodes, to avoid calling AsStruct (clone) - // multiple times on a node if it's mutated multiple times in this - // call (e.g. its endpoints + online status both change) - var mutableNodes map[tailcfg.NodeID]*tailcfg.Node - - for _, m := range muts { - n, ok := mutableNodes[m.NodeIDBeingMutated()] - if !ok { - nv, ok := nb.peers[m.NodeIDBeingMutated()] - if !ok { - // TODO(bradfitz): unexpected metric? - return false - } - n = nv.AsStruct() - mak.Set(&mutableNodes, nv.ID(), n) - } - m.Apply(n) - } - for nid, n := range mutableNodes { - nb.peers[nid] = n.View() - } - return true -} - // setExitNodeID updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { @@ -2256,16 +2151,6 @@ func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { return b.currentNode().PeersForTest() } -func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { - nb.mu.Lock() - defer nb.mu.Unlock() - ret := slicesx.MapValues(nb.peers) - slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { - return cmp.Compare(a.ID(), b.ID()) - }) - return ret -} - func (b *LocalBackend) getNewControlClientFuncLocked() clientGen { if b.ccGen == nil { // Initialize it rather than just returning the @@ -2832,10 +2717,6 @@ func (b *LocalBackend) setFilter(f *filter.Filter) { b.e.SetFilter(f) } -func (nb *nodeBackend) setFilter(f *filter.Filter) { - nb.filterAtomic.Store(f) -} - var removeFromDefaultRoute = []netip.Prefix{ // RFC1918 LAN ranges netip.MustParsePrefix("192.168.0.0/16"), @@ -4773,12 +4654,6 @@ func (b *LocalBackend) NetMap() *netmap.NetworkMap { return b.currentNode().NetMap() } -func (nb *nodeBackend) NetMap() *netmap.NetworkMap { - nb.mu.Lock() - defer nb.mu.Unlock() - return nb.netMap -} - func (b *LocalBackend) isEngineBlocked() bool { b.mu.Lock() defer b.mu.Unlock() @@ -5017,201 +4892,6 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs return false } -func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { - nb.mu.Lock() - defer nb.mu.Unlock() - return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) -} - -// dnsConfigForNetmap returns a *dns.Config for the given netmap, -// prefs, client OS version, and cloud hosting environment. -// -// The versionOS is a Tailscale-style version ("iOS", "macOS") and not -// a runtime.GOOS. -func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { - if nm == nil { - return nil - } - - // If the current node's key is expired, then we don't program any DNS - // configuration into the operating system. This ensures that if the - // DNS configuration specifies a DNS server that is only reachable over - // Tailscale, we don't break connectivity for the user. - // - // TODO(andrew-d): this also stops returning anything from quad-100; we - // could do the same thing as having "CorpDNS: false" and keep that but - // not program the OS? - if selfExpired { - return &dns.Config{} - } - - dcfg := &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: map[dnsname.FQDN][]netip.Addr{}, - } - - // selfV6Only is whether we only have IPv6 addresses ourselves. - selfV6Only := nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs6) && - !nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs4) - dcfg.OnlyIPv6 = selfV6Only - - wantAAAA := nm.AllCaps.Contains(tailcfg.NodeAttrMagicDNSPeerAAAA) - - // Populate MagicDNS records. We do this unconditionally so that - // quad-100 can always respond to MagicDNS queries, even if the OS - // isn't configured to make MagicDNS resolution truly - // magic. Details in - // https://github.com/tailscale/tailscale/issues/1886. - set := func(name string, addrs views.Slice[netip.Prefix]) { - if addrs.Len() == 0 || name == "" { - return - } - fqdn, err := dnsname.ToFQDN(name) - if err != nil { - return // TODO: propagate error? - } - var have4 bool - for _, addr := range addrs.All() { - if addr.Addr().Is4() { - have4 = true - break - } - } - var ips []netip.Addr - for _, addr := range addrs.All() { - if selfV6Only { - if addr.Addr().Is6() { - ips = append(ips, addr.Addr()) - } - continue - } - // If this node has an IPv4 address, then - // remove peers' IPv6 addresses for now, as we - // don't guarantee that the peer node actually - // can speak IPv6 correctly. - // - // https://github.com/tailscale/tailscale/issues/1152 - // tracks adding the right capability reporting to - // enable AAAA in MagicDNS. - if addr.Addr().Is6() && have4 && !wantAAAA { - continue - } - ips = append(ips, addr.Addr()) - } - dcfg.Hosts[fqdn] = ips - } - set(nm.Name, nm.GetAddresses()) - for _, peer := range peers { - set(peer.Name(), peer.Addresses()) - } - for _, rec := range nm.DNS.ExtraRecords { - switch rec.Type { - case "", "A", "AAAA": - // Treat these all the same for now: infer from the value - default: - // TODO: more - continue - } - ip, err := netip.ParseAddr(rec.Value) - if err != nil { - // Ignore. - continue - } - fqdn, err := dnsname.ToFQDN(rec.Name) - if err != nil { - continue - } - dcfg.Hosts[fqdn] = append(dcfg.Hosts[fqdn], ip) - } - - if !prefs.CorpDNS() { - return dcfg - } - - for _, dom := range nm.DNS.Domains { - fqdn, err := dnsname.ToFQDN(dom) - if err != nil { - logf("[unexpected] non-FQDN search domain %q", dom) - } - dcfg.SearchDomains = append(dcfg.SearchDomains, fqdn) - } - if nm.DNS.Proxied { // actually means "enable MagicDNS" - for _, dom := range magicDNSRootDomains(nm) { - dcfg.Routes[dom] = nil // resolve internally with dcfg.Hosts - } - } - - addDefault := func(resolvers []*dnstype.Resolver) { - dcfg.DefaultResolvers = append(dcfg.DefaultResolvers, resolvers...) - } - - // If we're using an exit node and that exit node is new enough (1.19.x+) - // to run a DoH DNS proxy, then send all our DNS traffic through it. - if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) - return dcfg - } - - // If the user has set default resolvers ("override local DNS"), prefer to - // use those resolvers as the default, otherwise if there are WireGuard exit - // node resolvers, use those as the default. - if len(nm.DNS.Resolvers) > 0 { - addDefault(nm.DNS.Resolvers) - } else { - if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok { - addDefault(resolvers) - } - } - - for suffix, resolvers := range nm.DNS.Routes { - fqdn, err := dnsname.ToFQDN(suffix) - if err != nil { - logf("[unexpected] non-FQDN route suffix %q", suffix) - } - - // Create map entry even if len(resolvers) == 0; Issue 2706. - // This lets the control plane send ExtraRecords for which we - // can authoritatively answer "name not exists" for when the - // control plane also sends this explicit but empty route - // making it as something we handle. - // - // While we're already populating it, might as well size the - // slice appropriately. - // Per #9498 the exact requirements of nil vs empty slice remain - // unclear, this is a haunted graveyard to be resolved. - dcfg.Routes[fqdn] = make([]*dnstype.Resolver, 0, len(resolvers)) - dcfg.Routes[fqdn] = append(dcfg.Routes[fqdn], resolvers...) - } - - // Set FallbackResolvers as the default resolvers in the - // scenarios that can't handle a purely split-DNS config. See - // https://github.com/tailscale/tailscale/issues/1743 for - // details. - switch { - case len(dcfg.DefaultResolvers) != 0: - // Default resolvers already set. - case !prefs.ExitNodeID().IsZero(): - // When using an exit node, we send all DNS traffic to the exit node, so - // we don't need a fallback resolver. - // - // However, if the exit node is too old to run a DoH DNS proxy, then we - // need to use a fallback resolver as it's very likely the LAN resolvers - // will become unreachable. - // - // This is especially important on Apple OSes, where - // adding the default route to the tunnel interface makes - // it "primary", and we MUST provide VPN-sourced DNS - // settings or we break all DNS resolution. - // - // https://github.com/tailscale/tailscale/issues/1713 - addDefault(nm.DNS.FallbackResolvers) - case len(dcfg.Routes) == 0: - // No settings requiring split DNS, no problem. - } - - return dcfg -} - // SetTCPHandlerForFunnelFlow sets the TCP handler for Funnel flows. // It should only be called before the LocalBackend is used. func (b *LocalBackend) SetTCPHandlerForFunnelFlow(h func(src netip.AddrPort, dstPort uint16) (handler func(net.Conn))) { @@ -6124,14 +5804,6 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre return newPrefs } -func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { - nb.mu.Lock() - defer nb.mu.Unlock() - nb.netMap = nm - nb.updateNodeByAddrLocked() - nb.updatePeersLocked() -} - // setNetMapLocked updates the LocalBackend state to reflect the newly // received nm. If nm is nil, it resets all configuration as though // Tailscale is turned off. @@ -6206,67 +5878,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.driveNotifyCurrentSharesLocked() } -func (nb *nodeBackend) updateNodeByAddrLocked() { - nm := nb.netMap - if nm == nil { - nb.nodeByAddr = nil - return - } - - // Update the nodeByAddr index. - if nb.nodeByAddr == nil { - nb.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} - } - // First pass, mark everything unwanted. - for k := range nb.nodeByAddr { - nb.nodeByAddr[k] = 0 - } - addNode := func(n tailcfg.NodeView) { - for _, ipp := range n.Addresses().All() { - if ipp.IsSingleIP() { - nb.nodeByAddr[ipp.Addr()] = n.ID() - } - } - } - if nm.SelfNode.Valid() { - addNode(nm.SelfNode) - } - for _, p := range nm.Peers { - addNode(p) - } - // Third pass, actually delete the unwanted items. - for k, v := range nb.nodeByAddr { - if v == 0 { - delete(nb.nodeByAddr, k) - } - } -} - -func (nb *nodeBackend) updatePeersLocked() { - nm := nb.netMap - if nm == nil { - nb.peers = nil - return - } - - // First pass, mark everything unwanted. - for k := range nb.peers { - nb.peers[k] = tailcfg.NodeView{} - } - - // Second pass, add everything wanted. - for _, p := range nm.Peers { - mak.Set(&nb.peers, p.ID(), p) - } - - // Third pass, remove deleted things. - for k, v := range nb.peers { - if !v.Valid() { - delete(nb.peers, k) - } - } -} - // responseBodyWrapper wraps an io.ReadCloser and stores // the number of bytesRead. type responseBodyWrapper struct { @@ -6647,27 +6258,6 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK return mk, nk } -// PeerHasCap reports whether the peer contains the given capability string, -// with any value(s). -func (nb *nodeBackend) PeerHasCap(peer tailcfg.NodeView, wantCap tailcfg.PeerCapability) bool { - if !peer.Valid() { - return false - } - - nb.mu.Lock() - defer nb.mu.Unlock() - for _, ap := range peer.Addresses().All() { - if nb.peerHasCapLocked(ap.Addr(), wantCap) { - return true - } - } - return false -} - -func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { - return nb.peerCapsLocked(addr).HasCapability(wantCap) -} - // SetDNS adds a DNS record for the given domain name & TXT record // value. // @@ -6717,70 +6307,6 @@ func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) { return } -// peerAPIURL returns an HTTP URL for the peer's peerapi service, -// without a trailing slash. -// -// If ip or port is the zero value then it returns the empty string. -func peerAPIURL(ip netip.Addr, port uint16) string { - if port == 0 || !ip.IsValid() { - return "" - } - return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) -} - -func (nb *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { - return nb.PeerAPIBase(p) != "" -} - -// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, -// or the empty string if the peer is invalid or doesn't support PeerAPI. -func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { - nb.mu.Lock() - nm := nb.netMap - nb.mu.Unlock() - return peerAPIBase(nm, p) -} - -// peerAPIBase returns the "http://ip:port" URL base to reach peer's peerAPI. -// It returns the empty string if the peer doesn't support the peerapi -// or there's no matching address family based on the netmap's own addresses. -func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string { - if nm == nil || !peer.Valid() || !peer.Hostinfo().Valid() { - return "" - } - - var have4, have6 bool - addrs := nm.GetAddresses() - for _, a := range addrs.All() { - if !a.IsSingleIP() { - continue - } - switch { - case a.Addr().Is4(): - have4 = true - case a.Addr().Is6(): - have6 = true - } - } - p4, p6 := peerAPIPorts(peer) - switch { - case have4 && p4 != 0: - return peerAPIURL(nodeIP(peer, netip.Addr.Is4), p4) - case have6 && p6 != 0: - return peerAPIURL(nodeIP(peer, netip.Addr.Is6), p6) - } - return "" -} - -func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { - for _, pfx := range n.Addresses().All() { - if pfx.IsSingleIP() && pred(pfx.Addr()) { - return pfx.Addr() - } - } - return netip.Addr{} -} - func (b *LocalBackend) CheckIPForwarding() error { if b.sys.IsNetstackRouter() { return nil @@ -6978,12 +6504,6 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg return "", false } -func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { - nb.mu.Lock() - defer nb.mu.Unlock() - return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) -} - // wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a // WireGuard-only exit node, if it has resolver addresses. func wireguardExitNodeDNSResolvers(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) ([]*dnstype.Resolver, bool) { diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index fe4973723..fb77f38eb 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -4,16 +4,25 @@ package ipnlocal import ( + "cmp" "net/netip" + "slices" "sync" "sync/atomic" "go4.org/netipx" "tailscale.com/ipn" + "tailscale.com/net/dns" + "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" + "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/types/ptr" + "tailscale.com/types/views" + "tailscale.com/util/dnsname" + "tailscale.com/util/mak" "tailscale.com/util/slicesx" "tailscale.com/wgengine/filter" ) @@ -201,6 +210,239 @@ func (nb *nodeBackend) Peers() []tailcfg.NodeView { return slicesx.MapValues(nb.peers) } +func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { + nb.mu.Lock() + defer nb.mu.Unlock() + ret := slicesx.MapValues(nb.peers) + slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) + return ret +} + +// AppendMatchingPeers returns base with all peers that match pred appended. +// +// It acquires b.mu to read the netmap but releases it before calling pred. +func (nb *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + var peers []tailcfg.NodeView + + nb.mu.Lock() + if nb.netMap != nil { + // All fields on b.netMap are immutable, so this is + // safe to copy and use outside the lock. + peers = nb.netMap.Peers + } + nb.mu.Unlock() + + ret := base + for _, peer := range peers { + // The peers in b.netMap don't contain updates made via + // UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID, + // and then look up the latest copy in b.peers which is updated in + // response to UpdateNetmapDelta edits. + nb.mu.Lock() + peer, ok := nb.peers[peer.ID()] + nb.mu.Unlock() + if ok && pred(peer) { + ret = append(ret, peer) + } + } + return ret +} + +// PeerCaps returns the capabilities that remote src IP has to +// ths current node. +func (nb *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.peerCapsLocked(src) +} + +func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { + if nb.netMap == nil { + return nil + } + filt := nb.filterAtomic.Load() + if filt == nil { + return nil + } + addrs := nb.netMap.GetAddresses() + for i := range addrs.Len() { + a := addrs.At(i) + if !a.IsSingleIP() { + continue + } + dst := a.Addr() + if dst.BitLen() == src.BitLen() { // match on family + return filt.CapsWithValues(src, dst) + } + } + return nil +} + +// PeerHasCap reports whether the peer contains the given capability string, +// with any value(s). +func (nb *nodeBackend) PeerHasCap(peer tailcfg.NodeView, wantCap tailcfg.PeerCapability) bool { + if !peer.Valid() { + return false + } + + nb.mu.Lock() + defer nb.mu.Unlock() + for _, ap := range peer.Addresses().All() { + if nb.peerHasCapLocked(ap.Addr(), wantCap) { + return true + } + } + return false +} + +func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { + return nb.peerCapsLocked(addr).HasCapability(wantCap) +} + +func (nb *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return nb.PeerAPIBase(p) != "" +} + +// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, +// or the empty string if the peer is invalid or doesn't support PeerAPI. +func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() + return peerAPIBase(nm, p) +} + +func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { + for _, pfx := range n.Addresses().All() { + if pfx.IsSingleIP() && pred(pfx.Addr()) { + return pfx.Addr() + } + } + return netip.Addr{} +} + +func (nb *nodeBackend) NetMap() *netmap.NetworkMap { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.netMap +} + +func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil { + return nil + } + nm := ptr.To(*nb.netMap) // shallow clone + nm.Peers = slicesx.MapValues(nb.peers) + slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) + return nm +} + +func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { + nb.mu.Lock() + defer nb.mu.Unlock() + nb.netMap = nm + nb.updateNodeByAddrLocked() + nb.updatePeersLocked() +} + +func (nb *nodeBackend) updateNodeByAddrLocked() { + nm := nb.netMap + if nm == nil { + nb.nodeByAddr = nil + return + } + + // Update the nodeByAddr index. + if nb.nodeByAddr == nil { + nb.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} + } + // First pass, mark everything unwanted. + for k := range nb.nodeByAddr { + nb.nodeByAddr[k] = 0 + } + addNode := func(n tailcfg.NodeView) { + for _, ipp := range n.Addresses().All() { + if ipp.IsSingleIP() { + nb.nodeByAddr[ipp.Addr()] = n.ID() + } + } + } + if nm.SelfNode.Valid() { + addNode(nm.SelfNode) + } + for _, p := range nm.Peers { + addNode(p) + } + // Third pass, actually delete the unwanted items. + for k, v := range nb.nodeByAddr { + if v == 0 { + delete(nb.nodeByAddr, k) + } + } +} + +func (nb *nodeBackend) updatePeersLocked() { + nm := nb.netMap + if nm == nil { + nb.peers = nil + return + } + + // First pass, mark everything unwanted. + for k := range nb.peers { + nb.peers[k] = tailcfg.NodeView{} + } + + // Second pass, add everything wanted. + for _, p := range nm.Peers { + mak.Set(&nb.peers, p.ID(), p) + } + + // Third pass, remove deleted things. + for k, v := range nb.peers { + if !v.Valid() { + delete(nb.peers, k) + } + } +} + +func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.netMap == nil || len(nb.peers) == 0 { + return false + } + + // Locally cloned mutable nodes, to avoid calling AsStruct (clone) + // multiple times on a node if it's mutated multiple times in this + // call (e.g. its endpoints + online status both change) + var mutableNodes map[tailcfg.NodeID]*tailcfg.Node + + for _, m := range muts { + n, ok := mutableNodes[m.NodeIDBeingMutated()] + if !ok { + nv, ok := nb.peers[m.NodeIDBeingMutated()] + if !ok { + // TODO(bradfitz): unexpected metric? + return false + } + n = nv.AsStruct() + mak.Set(&mutableNodes, nv.ID(), n) + } + m.Apply(n) + } + for nid, n := range mutableNodes { + nb.peers[nid] = n.View() + } + return true +} + // unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs // in the specified packet filter. // @@ -216,3 +458,208 @@ func (nb *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool func (nb *nodeBackend) filter() *filter.Filter { return nb.filterAtomic.Load() } + +func (nb *nodeBackend) setFilter(f *filter.Filter) { + nb.filterAtomic.Store(f) +} + +func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { + nb.mu.Lock() + defer nb.mu.Unlock() + return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) +} + +func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + nb.mu.Lock() + defer nb.mu.Unlock() + return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) +} + +// dnsConfigForNetmap returns a *dns.Config for the given netmap, +// prefs, client OS version, and cloud hosting environment. +// +// The versionOS is a Tailscale-style version ("iOS", "macOS") and not +// a runtime.GOOS. +func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { + if nm == nil { + return nil + } + + // If the current node's key is expired, then we don't program any DNS + // configuration into the operating system. This ensures that if the + // DNS configuration specifies a DNS server that is only reachable over + // Tailscale, we don't break connectivity for the user. + // + // TODO(andrew-d): this also stops returning anything from quad-100; we + // could do the same thing as having "CorpDNS: false" and keep that but + // not program the OS? + if selfExpired { + return &dns.Config{} + } + + dcfg := &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{}, + } + + // selfV6Only is whether we only have IPv6 addresses ourselves. + selfV6Only := nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs6) && + !nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs4) + dcfg.OnlyIPv6 = selfV6Only + + wantAAAA := nm.AllCaps.Contains(tailcfg.NodeAttrMagicDNSPeerAAAA) + + // Populate MagicDNS records. We do this unconditionally so that + // quad-100 can always respond to MagicDNS queries, even if the OS + // isn't configured to make MagicDNS resolution truly + // magic. Details in + // https://github.com/tailscale/tailscale/issues/1886. + set := func(name string, addrs views.Slice[netip.Prefix]) { + if addrs.Len() == 0 || name == "" { + return + } + fqdn, err := dnsname.ToFQDN(name) + if err != nil { + return // TODO: propagate error? + } + var have4 bool + for _, addr := range addrs.All() { + if addr.Addr().Is4() { + have4 = true + break + } + } + var ips []netip.Addr + for _, addr := range addrs.All() { + if selfV6Only { + if addr.Addr().Is6() { + ips = append(ips, addr.Addr()) + } + continue + } + // If this node has an IPv4 address, then + // remove peers' IPv6 addresses for now, as we + // don't guarantee that the peer node actually + // can speak IPv6 correctly. + // + // https://github.com/tailscale/tailscale/issues/1152 + // tracks adding the right capability reporting to + // enable AAAA in MagicDNS. + if addr.Addr().Is6() && have4 && !wantAAAA { + continue + } + ips = append(ips, addr.Addr()) + } + dcfg.Hosts[fqdn] = ips + } + set(nm.Name, nm.GetAddresses()) + for _, peer := range peers { + set(peer.Name(), peer.Addresses()) + } + for _, rec := range nm.DNS.ExtraRecords { + switch rec.Type { + case "", "A", "AAAA": + // Treat these all the same for now: infer from the value + default: + // TODO: more + continue + } + ip, err := netip.ParseAddr(rec.Value) + if err != nil { + // Ignore. + continue + } + fqdn, err := dnsname.ToFQDN(rec.Name) + if err != nil { + continue + } + dcfg.Hosts[fqdn] = append(dcfg.Hosts[fqdn], ip) + } + + if !prefs.CorpDNS() { + return dcfg + } + + for _, dom := range nm.DNS.Domains { + fqdn, err := dnsname.ToFQDN(dom) + if err != nil { + logf("[unexpected] non-FQDN search domain %q", dom) + } + dcfg.SearchDomains = append(dcfg.SearchDomains, fqdn) + } + if nm.DNS.Proxied { // actually means "enable MagicDNS" + for _, dom := range magicDNSRootDomains(nm) { + dcfg.Routes[dom] = nil // resolve internally with dcfg.Hosts + } + } + + addDefault := func(resolvers []*dnstype.Resolver) { + dcfg.DefaultResolvers = append(dcfg.DefaultResolvers, resolvers...) + } + + // If we're using an exit node and that exit node is new enough (1.19.x+) + // to run a DoH DNS proxy, then send all our DNS traffic through it. + if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + return dcfg + } + + // If the user has set default resolvers ("override local DNS"), prefer to + // use those resolvers as the default, otherwise if there are WireGuard exit + // node resolvers, use those as the default. + if len(nm.DNS.Resolvers) > 0 { + addDefault(nm.DNS.Resolvers) + } else { + if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok { + addDefault(resolvers) + } + } + + for suffix, resolvers := range nm.DNS.Routes { + fqdn, err := dnsname.ToFQDN(suffix) + if err != nil { + logf("[unexpected] non-FQDN route suffix %q", suffix) + } + + // Create map entry even if len(resolvers) == 0; Issue 2706. + // This lets the control plane send ExtraRecords for which we + // can authoritatively answer "name not exists" for when the + // control plane also sends this explicit but empty route + // making it as something we handle. + // + // While we're already populating it, might as well size the + // slice appropriately. + // Per #9498 the exact requirements of nil vs empty slice remain + // unclear, this is a haunted graveyard to be resolved. + dcfg.Routes[fqdn] = make([]*dnstype.Resolver, 0, len(resolvers)) + dcfg.Routes[fqdn] = append(dcfg.Routes[fqdn], resolvers...) + } + + // Set FallbackResolvers as the default resolvers in the + // scenarios that can't handle a purely split-DNS config. See + // https://github.com/tailscale/tailscale/issues/1743 for + // details. + switch { + case len(dcfg.DefaultResolvers) != 0: + // Default resolvers already set. + case !prefs.ExitNodeID().IsZero(): + // When using an exit node, we send all DNS traffic to the exit node, so + // we don't need a fallback resolver. + // + // However, if the exit node is too old to run a DoH DNS proxy, then we + // need to use a fallback resolver as it's very likely the LAN resolvers + // will become unreachable. + // + // This is especially important on Apple OSes, where + // adding the default route to the tunnel interface makes + // it "primary", and we MUST provide VPN-sourced DNS + // settings or we break all DNS resolution. + // + // https://github.com/tailscale/tailscale/issues/1713 + addDefault(nm.DNS.FallbackResolvers) + case len(dcfg.Routes) == 0: + // No settings requiring split DNS, no problem. + } + + return dcfg +} diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 675623f33..84aaecf7e 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -36,6 +36,7 @@ import ( "tailscale.com/net/netutil" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" @@ -1094,6 +1095,48 @@ func parseDriveFileExtensionForLog(path string) string { return fileExt } +// peerAPIURL returns an HTTP URL for the peer's peerapi service, +// without a trailing slash. +// +// If ip or port is the zero value then it returns the empty string. +func peerAPIURL(ip netip.Addr, port uint16) string { + if port == 0 || !ip.IsValid() { + return "" + } + return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) +} + +// peerAPIBase returns the "http://ip:port" URL base to reach peer's peerAPI. +// It returns the empty string if the peer doesn't support the peerapi +// or there's no matching address family based on the netmap's own addresses. +func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string { + if nm == nil || !peer.Valid() || !peer.Hostinfo().Valid() { + return "" + } + + var have4, have6 bool + addrs := nm.GetAddresses() + for _, a := range addrs.All() { + if !a.IsSingleIP() { + continue + } + switch { + case a.Addr().Is4(): + have4 = true + case a.Addr().Is6(): + have6 = true + } + } + p4, p6 := peerAPIPorts(peer) + switch { + case have4 && p4 != 0: + return peerAPIURL(nodeIP(peer, netip.Addr.Is4), p4) + case have6 && p6 != 0: + return peerAPIURL(nodeIP(peer, netip.Addr.Is6), p6) + } + return "" +} + // newFakePeerAPIListener creates a new net.Listener that acts like // it's listening on the provided IP address and on TCP port 1. // From b70c0c50fd73f134b8618792d89018cb444d8987 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 9 May 2025 23:12:00 -0500 Subject: [PATCH 0853/1708] ssh/tailssh: fix data race during execution of test In tailssh.go:1284, (*sshSession).startNewRecording starts a fire-and-forget goroutine that can outlive the test that triggered its creation. Among other things, it uses ss.logf, and may call it after the test has already returned. Since we typically use (*testing.T).Logf as the logger, this results in a data race and causes flaky tests. Ideally, we should fix the root cause and/or use a goroutines.Tracker to wait for the goroutine to complete. But with the release approaching, it's too risky to make such changes now. As a workaround, we update the tests to use tstest.WhileTestRunningLogger, which logs to t.Logf while the test is running and disables logging once the test finishes, avoiding the race. While there, we also fix TestSSHAuthFlow not to use log.Printf. Updates #15568 Updates #7707 (probably related) Signed-off-by: Nick Khyl --- ssh/tailssh/tailssh_test.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 980c77414..79479d7fb 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -16,7 +16,6 @@ import ( "errors" "fmt" "io" - "log" "net" "net/http" "net/http/httptest" @@ -48,7 +47,6 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/key" - "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" "tailscale.com/types/ptr" @@ -230,7 +228,7 @@ func TestMatchRule(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &conn{ info: tt.ci, - srv: &server{logf: t.Logf}, + srv: &server{logf: tstest.WhileTestRunningLogger(t)}, } got, gotUser, gotAcceptEnv, err := c.matchRule(tt.rule) if err != tt.wantErr { @@ -349,7 +347,7 @@ func TestEvalSSHPolicy(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &conn{ info: tt.ci, - srv: &server{logf: t.Logf}, + srv: &server{logf: tstest.WhileTestRunningLogger(t)}, } got, gotUser, gotAcceptEnv, match := c.evalSSHPolicy(tt.policy) if match != tt.wantMatch { @@ -491,7 +489,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { }) s := &server{ - logf: t.Logf, + logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, matchingRule: newSSHRule( @@ -553,7 +551,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s.logf = t.Logf + s.logf = tstest.WhileTestRunningLogger(t) tstest.Replace(t, &handler, tt.handler) sc, dc := memnet.NewTCPConn(src, dst, 1024) var wg sync.WaitGroup @@ -621,7 +619,7 @@ func TestMultipleRecorders(t *testing.T) { }) s := &server{ - logf: t.Logf, + logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, matchingRule: newSSHRule( @@ -714,7 +712,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { }) s := &server{ - logf: t.Logf, + logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, matchingRule: newSSHRule( @@ -887,13 +885,15 @@ func TestSSHAuthFlow(t *testing.T) { }, } s := &server{ - logf: log.Printf, + logf: tstest.WhileTestRunningLogger(t), } defer s.Shutdown() src, dst := must.Get(netip.ParseAddrPort("100.100.100.101:2231")), must.Get(netip.ParseAddrPort("100.100.100.102:22")) for _, tc := range tests { for _, authMethods := range [][]string{nil, {"publickey", "password"}, {"password", "publickey"}} { t.Run(fmt.Sprintf("%s-skip-none-auth-%v", tc.name, strings.Join(authMethods, "-then-")), func(t *testing.T) { + s.logf = tstest.WhileTestRunningLogger(t) + sc, dc := memnet.NewTCPConn(src, dst, 1024) s.lb = tc.state sshUser := "alice" @@ -1036,7 +1036,7 @@ func TestSSHAuthFlow(t *testing.T) { } func TestSSH(t *testing.T) { - var logf logger.Logf = t.Logf + logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { From b02de31563fe42fcfd94226c5407e94b7fb858a4 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 12 May 2025 10:25:31 -0400 Subject: [PATCH 0854/1708] prober: update cert check for prober (#15919) OCSP has been removed from the LE certs. Use CRL verification instead. If a cert provides a CRL, check its revocation status, if no CRL is provided and otherwise is valid, pass the check. Fixes #15912 Signed-off-by: Mike O'Driscoll Co-authored-by: Simon Law --- prober/tls.go | 66 ++++++++++--------- prober/tls_test.go | 153 ++++++++++++++++++++++++++++++--------------- 2 files changed, 137 insertions(+), 82 deletions(-) diff --git a/prober/tls.go b/prober/tls.go index 787df05c2..4fb4aa9c6 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -4,7 +4,6 @@ package prober import ( - "bytes" "context" "crypto/tls" "crypto/x509" @@ -15,12 +14,14 @@ import ( "net/netip" "time" - "github.com/pkg/errors" - "golang.org/x/crypto/ocsp" "tailscale.com/util/multierr" ) const expiresSoon = 7 * 24 * time.Hour // 7 days from now +// Let’s Encrypt promises to issue certificates with CRL servers after 2025-05-07: +// https://letsencrypt.org/2024/12/05/ending-ocsp/ +// https://github.com/tailscale/tailscale/issues/15912 +const letsEncryptStartedStaplingCRL int64 = 1746576000 // 2025-05-07 00:00:00 UTC // TLS returns a Probe that healthchecks a TLS endpoint. // @@ -106,50 +107,55 @@ func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr } } - if len(leafCert.OCSPServer) == 0 { - errs = append(errs, fmt.Errorf("no OCSP server presented in leaf cert for %v", leafCert.Subject)) + if len(leafCert.CRLDistributionPoints) == 0 { + if leafCert.NotBefore.Before(time.Unix(letsEncryptStartedStaplingCRL, 0)) { + // Certificate might not have a CRL. + return + } + errs = append(errs, fmt.Errorf("no CRL server presented in leaf cert for %v", leafCert.Subject)) return } - ocspResp, err := getOCSPResponse(ctx, leafCert.OCSPServer[0], leafCert, issuerCert) + err := checkCertCRL(ctx, leafCert.CRLDistributionPoints[0], leafCert, issuerCert) if err != nil { - errs = append(errs, errors.Wrapf(err, "OCSP verification failed for %v", leafCert.Subject)) - return - } - - if ocspResp.Status == ocsp.Unknown { - errs = append(errs, fmt.Errorf("unknown OCSP verification status for %v", leafCert.Subject)) - } - - if ocspResp.Status == ocsp.Revoked { - errs = append(errs, fmt.Errorf("cert for %v has been revoked on %v, reason: %v", leafCert.Subject, ocspResp.RevokedAt, ocspResp.RevocationReason)) + errs = append(errs, fmt.Errorf("CRL verification failed for %v: %w", leafCert.Subject, err)) } return } -func getOCSPResponse(ctx context.Context, ocspServer string, leafCert, issuerCert *x509.Certificate) (*ocsp.Response, error) { - reqb, err := ocsp.CreateRequest(leafCert, issuerCert, nil) - if err != nil { - return nil, errors.Wrap(err, "could not create OCSP request") - } - hreq, err := http.NewRequestWithContext(ctx, "POST", ocspServer, bytes.NewReader(reqb)) +func checkCertCRL(ctx context.Context, crlURL string, leafCert, issuerCert *x509.Certificate) error { + hreq, err := http.NewRequestWithContext(ctx, "GET", crlURL, nil) if err != nil { - return nil, errors.Wrap(err, "could not create OCSP POST request") + return fmt.Errorf("could not create CRL GET request: %w", err) } - hreq.Header.Add("Content-Type", "application/ocsp-request") - hreq.Header.Add("Accept", "application/ocsp-response") hresp, err := http.DefaultClient.Do(hreq) if err != nil { - return nil, errors.Wrap(err, "OCSP request failed") + return fmt.Errorf("CRL request failed: %w", err) } defer hresp.Body.Close() if hresp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("ocsp: non-200 status code from OCSP server: %s", hresp.Status) + return fmt.Errorf("crl: non-200 status code from CRL server: %s", hresp.Status) } lr := io.LimitReader(hresp.Body, 10<<20) // 10MB - ocspB, err := io.ReadAll(lr) + crlB, err := io.ReadAll(lr) if err != nil { - return nil, err + return err } - return ocsp.ParseResponse(ocspB, issuerCert) + + crl, err := x509.ParseRevocationList(crlB) + if err != nil { + return fmt.Errorf("could not parse CRL: %w", err) + } + + if err := crl.CheckSignatureFrom(issuerCert); err != nil { + return fmt.Errorf("could not verify CRL signature: %w", err) + } + + for _, revoked := range crl.RevokedCertificateEntries { + if revoked.SerialNumber.Cmp(leafCert.SerialNumber) == 0 { + return fmt.Errorf("cert for %v has been revoked on %v, reason: %v", leafCert.Subject, revoked.RevocationTime, revoked.ReasonCode) + } + } + + return nil } diff --git a/prober/tls_test.go b/prober/tls_test.go index 5bfb739db..cf5b60cb8 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -6,7 +6,6 @@ package prober import ( "bytes" "context" - "crypto" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -20,8 +19,6 @@ import ( "strings" "testing" "time" - - "golang.org/x/crypto/ocsp" ) var leafCert = x509.Certificate{ @@ -118,11 +115,6 @@ func TestCertExpiration(t *testing.T) { }, "one of the certs expires in", }, - { - "valid duration but no OCSP", - func() *x509.Certificate { return &leafCert }, - "no OCSP server presented in leaf cert for CN=tlsprobe.test", - }, } { t.Run(tt.name, func(t *testing.T) { cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{tt.cert()}} @@ -134,93 +126,150 @@ func TestCertExpiration(t *testing.T) { } } -type ocspServer struct { - issuer *x509.Certificate - responderCert *x509.Certificate - template *ocsp.Response - priv crypto.Signer +type CRLServer struct { + crlBytes []byte } -func (s *ocspServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if s.template == nil { +func (s *CRLServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if s.crlBytes == nil { w.WriteHeader(http.StatusInternalServerError) return } - resp, err := ocsp.CreateResponse(s.issuer, s.responderCert, *s.template, s.priv) - if err != nil { - panic(err) - } - w.Write(resp) + w.Header().Set("Content-Type", "application/pkix-crl") + w.WriteHeader(http.StatusOK) + w.Write(s.crlBytes) } -func TestOCSP(t *testing.T) { - issuerKey, err := rsa.GenerateKey(rand.Reader, 4096) +func TestCRL(t *testing.T) { + // Generate CA key and self-signed CA cert + caKey, err := rsa.GenerateKey(rand.Reader, 4096) if err != nil { t.Fatal(err) } - issuerBytes, err := x509.CreateCertificate(rand.Reader, &issuerCertTpl, &issuerCertTpl, &issuerKey.PublicKey, issuerKey) + caTpl := issuerCertTpl + caTpl.BasicConstraintsValid = true + caTpl.IsCA = true + caTpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature + caBytes, err := x509.CreateCertificate(rand.Reader, &caTpl, &caTpl, &caKey.PublicKey, caKey) if err != nil { t.Fatal(err) } - issuerCert, err := x509.ParseCertificate(issuerBytes) + caCert, err := x509.ParseCertificate(caBytes) if err != nil { t.Fatal(err) } - responderKey, err := rsa.GenerateKey(rand.Reader, 4096) + // Issue a leaf cert signed by the CA + leaf := leafCert + leaf.SerialNumber = big.NewInt(20001) + leaf.Issuer = caCert.Subject + leafKey, err := rsa.GenerateKey(rand.Reader, 4096) if err != nil { t.Fatal(err) } - // issuer cert template re-used here, but with a different key - responderBytes, err := x509.CreateCertificate(rand.Reader, &issuerCertTpl, &issuerCertTpl, &responderKey.PublicKey, responderKey) + leafBytes, err := x509.CreateCertificate(rand.Reader, &leaf, caCert, &leafKey.PublicKey, caKey) if err != nil { t.Fatal(err) } - responderCert, err := x509.ParseCertificate(responderBytes) + leafCertParsed, err := x509.ParseCertificate(leafBytes) if err != nil { t.Fatal(err) } - handler := &ocspServer{ - issuer: issuerCert, - responderCert: responderCert, - priv: issuerKey, + // Catch no CRL set by Let's Encrypt date. + noCRLCert := leafCert + noCRLCert.SerialNumber = big.NewInt(20002) + noCRLCert.CRLDistributionPoints = []string{} + noCRLCert.NotBefore = time.Unix(letsEncryptStartedStaplingCRL, 0).Add(-48 * time.Hour) + noCRLCert.Issuer = caCert.Subject + noCRLCertKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + t.Fatal(err) } - srv := httptest.NewUnstartedServer(handler) - srv.Start() - defer srv.Close() - - cert := leafCert - cert.OCSPServer = append(cert.OCSPServer, srv.URL) - key, err := rsa.GenerateKey(rand.Reader, 4096) + noCRLStapledBytes, err := x509.CreateCertificate(rand.Reader, &noCRLCert, caCert, &noCRLCertKey.PublicKey, caKey) if err != nil { t.Fatal(err) } - certBytes, err := x509.CreateCertificate(rand.Reader, &cert, issuerCert, &key.PublicKey, issuerKey) + noCRLStapledParsed, err := x509.ParseCertificate(noCRLStapledBytes) if err != nil { t.Fatal(err) } - parsed, err := x509.ParseCertificate(certBytes) + + crlServer := &CRLServer{crlBytes: nil} + srv := httptest.NewServer(crlServer) + defer srv.Close() + + // Create a CRL that revokes the leaf cert using x509.CreateRevocationList + now := time.Now() + revoked := []x509.RevocationListEntry{{ + SerialNumber: leaf.SerialNumber, + RevocationTime: now, + ReasonCode: 1, // Key compromise + }} + rl := x509.RevocationList{ + SignatureAlgorithm: caCert.SignatureAlgorithm, + Issuer: caCert.Subject, + ThisUpdate: now, + NextUpdate: now.Add(24 * time.Hour), + RevokedCertificateEntries: revoked, + Number: big.NewInt(1), + } + rlBytes, err := x509.CreateRevocationList(rand.Reader, &rl, caCert, caKey) + if err != nil { + t.Fatal(err) + } + + emptyRlBytes, err := x509.CreateRevocationList(rand.Reader, &x509.RevocationList{Number: big.NewInt(2)}, caCert, caKey) if err != nil { t.Fatal(err) } for _, tt := range []struct { - name string - resp *ocsp.Response - wantErr string + name string + cert *x509.Certificate + crlBytes []byte + wantErr string }{ - {"good response", &ocsp.Response{Status: ocsp.Good}, ""}, - {"unknown response", &ocsp.Response{Status: ocsp.Unknown}, "unknown OCSP verification status for CN=tlsprobe.test"}, - {"revoked response", &ocsp.Response{Status: ocsp.Revoked}, "cert for CN=tlsprobe.test has been revoked"}, - {"error 500 from ocsp", nil, "non-200 status code from OCSP"}, + { + "ValidCert", + leafCertParsed, + emptyRlBytes, + "", + }, + { + "RevokedCert", + leafCertParsed, + rlBytes, + "has been revoked on", + }, + { + "EmptyCRL", + leafCertParsed, + emptyRlBytes, + "", + }, + { + "NoCRL", + leafCertParsed, + nil, + "", + }, + { + "NotBeforeCRLStaplingDate", + noCRLStapledParsed, + nil, + "", + }, } { t.Run(tt.name, func(t *testing.T) { - handler.template = tt.resp - if handler.template != nil { - handler.template.SerialNumber = big.NewInt(1337) + cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{tt.cert, caCert}} + if tt.crlBytes != nil { + crlServer.crlBytes = tt.crlBytes + tt.cert.CRLDistributionPoints = []string{srv.URL} + } else { + crlServer.crlBytes = nil + tt.cert.CRLDistributionPoints = []string{} } - cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{parsed, issuerCert}} err := validateConnState(context.Background(), cs) if err == nil && tt.wantErr == "" { From d6dd74fe0e5301ddec51c427e144ea1b20291c80 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 12 May 2025 09:25:59 -0700 Subject: [PATCH 0855/1708] net/udprelay{/endpoint}: move ServerEndpoint tests (#15949) Commit 0841477 moved ServerEndpoint to an independent package. Move its tests over as well. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- net/udprelay/endpoint/endpoint_test.go | 110 +++++++++++++++++++++++++ net/udprelay/server_test.go | 97 ---------------------- 2 files changed, 110 insertions(+), 97 deletions(-) create mode 100644 net/udprelay/endpoint/endpoint_test.go diff --git a/net/udprelay/endpoint/endpoint_test.go b/net/udprelay/endpoint/endpoint_test.go new file mode 100644 index 000000000..f12a6e2f6 --- /dev/null +++ b/net/udprelay/endpoint/endpoint_test.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package endpoint + +import ( + "encoding/json" + "math" + "net/netip" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tstime" + "tailscale.com/types/key" +) + +func TestServerEndpointJSONUnmarshal(t *testing.T) { + tests := []struct { + name string + json []byte + wantErr bool + }{ + { + name: "valid", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: false, + }, + { + name: "invalid ServerDisco", + json: []byte(`{"ServerDisco":"1","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid LamportID", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":1.1,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid AddrPorts", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid VNI", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":18446744073709551615,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid BindLifetime", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"5","SteadyStateLifetime":"5m0s"}`), + wantErr: true, + }, + { + name: "invalid SteadyStateLifetime", + json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5"}`), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var out ServerEndpoint + err := json.Unmarshal(tt.json, &out) + if tt.wantErr != (err != nil) { + t.Fatalf("wantErr: %v (err == nil): %v", tt.wantErr, err == nil) + } + if tt.wantErr { + return + } + }) + } +} + +func TestServerEndpointJSONMarshal(t *testing.T) { + tests := []struct { + name string + serverEndpoint ServerEndpoint + }{ + { + name: "valid roundtrip", + serverEndpoint: ServerEndpoint{ + ServerDisco: key.NewDisco().Public(), + LamportID: uint64(math.MaxUint64), + AddrPorts: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:1"), netip.MustParseAddrPort("127.0.0.2:2")}, + VNI: 1<<24 - 1, + BindLifetime: tstime.GoDuration{Duration: time.Second * 30}, + SteadyStateLifetime: tstime.GoDuration{Duration: time.Minute * 5}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := json.Marshal(&tt.serverEndpoint) + if err != nil { + t.Fatal(err) + } + var got ServerEndpoint + err = json.Unmarshal(b, &got) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(got, tt.serverEndpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("ServerEndpoint unequal (-got +want)\n%s", diff) + } + }) + } +} diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 9d1e77fcc..38c7ae5d9 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -5,8 +5,6 @@ package udprelay import ( "bytes" - "encoding/json" - "math" "net" "net/netip" "testing" @@ -17,8 +15,6 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/net/packet" - "tailscale.com/net/udprelay/endpoint" - "tailscale.com/tstime" "tailscale.com/types/key" ) @@ -214,96 +210,3 @@ func TestServer(t *testing.T) { t.Fatal("unexpected msg B->A") } } - -func TestServerEndpointJSONUnmarshal(t *testing.T) { - tests := []struct { - name string - json []byte - wantErr bool - }{ - { - name: "valid", - json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), - wantErr: false, - }, - { - name: "invalid ServerDisco", - json: []byte(`{"ServerDisco":"1","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), - wantErr: true, - }, - { - name: "invalid LamportID", - json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":1.1,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), - wantErr: true, - }, - { - name: "invalid AddrPorts", - json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), - wantErr: true, - }, - { - name: "invalid VNI", - json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":18446744073709551615,"BindLifetime":"30s","SteadyStateLifetime":"5m0s"}`), - wantErr: true, - }, - { - name: "invalid BindLifetime", - json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"5","SteadyStateLifetime":"5m0s"}`), - wantErr: true, - }, - { - name: "invalid SteadyStateLifetime", - json: []byte(`{"ServerDisco":"discokey:003cd7453e04a653eb0e7a18f206fc353180efadb2facfd05ebd6982a1392c7f","LamportID":18446744073709551615,"AddrPorts":["127.0.0.1:1","127.0.0.2:2"],"VNI":16777215,"BindLifetime":"30s","SteadyStateLifetime":"5"}`), - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var out endpoint.ServerEndpoint - err := json.Unmarshal(tt.json, &out) - if tt.wantErr != (err != nil) { - t.Fatalf("wantErr: %v (err == nil): %v", tt.wantErr, err == nil) - } - if tt.wantErr { - return - } - }) - } -} - -func TestServerEndpointJSONMarshal(t *testing.T) { - tests := []struct { - name string - serverEndpoint endpoint.ServerEndpoint - }{ - { - name: "valid roundtrip", - serverEndpoint: endpoint.ServerEndpoint{ - ServerDisco: key.NewDisco().Public(), - LamportID: uint64(math.MaxUint64), - AddrPorts: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:1"), netip.MustParseAddrPort("127.0.0.2:2")}, - VNI: 1<<24 - 1, - BindLifetime: tstime.GoDuration{Duration: defaultBindLifetime}, - SteadyStateLifetime: tstime.GoDuration{Duration: defaultSteadyStateLifetime}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := json.Marshal(&tt.serverEndpoint) - if err != nil { - t.Fatal(err) - } - var got endpoint.ServerEndpoint - err = json.Unmarshal(b, &got) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.serverEndpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("ServerEndpoint unequal (-got +want)\n%s", diff) - } - }) - } -} From 2c16fcaa06d3b8ae7e4acb967ed072495ee08bc5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 12 May 2025 17:26:23 +0100 Subject: [PATCH 0856/1708] util/linuxfw,wgengine/router: add new netfilter rules for HA ingresses (#15896) Add new rules to update DNAT rules for Kubernetes operator's HA ingress where it's expected that rules will be added/removed frequently (so we don't want to keep old rules around or rewrite existing rules unnecessarily): - allow deleting DNAT rules using metadata lookup - allow inserting DNAT rules if they don't already exist (using metadata lookup) Updates tailscale/tailscale#15895 Signed-off-by: Irbe Krumina Co-authored-by: chaosinthecrd --- util/linuxfw/fake_netfilter.go | 95 +++++++++++ util/linuxfw/iptables_for_svcs.go | 58 ++++++- util/linuxfw/iptables_for_svcs_test.go | 129 ++++++++++++++ util/linuxfw/nftables_for_svcs.go | 64 +++++++ util/linuxfw/nftables_for_svcs_test.go | 223 +++++++++++++++++++++---- util/linuxfw/nftables_runner.go | 20 ++- wgengine/router/router_linux_test.go | 8 + 7 files changed, 558 insertions(+), 39 deletions(-) create mode 100644 util/linuxfw/fake_netfilter.go diff --git a/util/linuxfw/fake_netfilter.go b/util/linuxfw/fake_netfilter.go new file mode 100644 index 000000000..329c3a213 --- /dev/null +++ b/util/linuxfw/fake_netfilter.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package linuxfw + +import ( + "net/netip" + + "tailscale.com/types/logger" +) + +// FakeNetfilterRunner is a fake netfilter runner for tests. +type FakeNetfilterRunner struct { + // services is a map that tracks the firewall rules added/deleted via + // EnsureDNATRuleForSvc/DeleteDNATRuleForSvc. + services map[string]struct { + VIPServiceIP netip.Addr + ClusterIP netip.Addr + } +} + +// NewFakeNetfilterRunner creates a new FakeNetfilterRunner. +func NewFakeNetfilterRunner() *FakeNetfilterRunner { + return &FakeNetfilterRunner{ + services: make(map[string]struct { + VIPServiceIP netip.Addr + ClusterIP netip.Addr + }), + } +} + +func (f *FakeNetfilterRunner) EnsureDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + f.services[svcName] = struct { + VIPServiceIP netip.Addr + ClusterIP netip.Addr + }{origDst, dst} + return nil +} + +func (f *FakeNetfilterRunner) DeleteDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + delete(f.services, svcName) + return nil +} + +func (f *FakeNetfilterRunner) GetServiceState() map[string]struct { + VIPServiceIP netip.Addr + ClusterIP netip.Addr +} { + return f.services +} + +func (f *FakeNetfilterRunner) HasIPV6() bool { + return true +} + +func (f *FakeNetfilterRunner) HasIPV6Filter() bool { + return true +} + +func (f *FakeNetfilterRunner) HasIPV6NAT() bool { + return true +} + +func (f *FakeNetfilterRunner) AddBase(tunname string) error { return nil } +func (f *FakeNetfilterRunner) DelBase() error { return nil } +func (f *FakeNetfilterRunner) AddChains() error { return nil } +func (f *FakeNetfilterRunner) DelChains() error { return nil } +func (f *FakeNetfilterRunner) AddHooks() error { return nil } +func (f *FakeNetfilterRunner) DelHooks(logf logger.Logf) error { return nil } +func (f *FakeNetfilterRunner) AddSNATRule() error { return nil } +func (f *FakeNetfilterRunner) DelSNATRule() error { return nil } +func (f *FakeNetfilterRunner) AddStatefulRule(tunname string) error { return nil } +func (f *FakeNetfilterRunner) DelStatefulRule(tunname string) error { return nil } +func (f *FakeNetfilterRunner) AddLoopbackRule(addr netip.Addr) error { return nil } +func (f *FakeNetfilterRunner) DelLoopbackRule(addr netip.Addr) error { return nil } +func (f *FakeNetfilterRunner) AddDNATRule(origDst, dst netip.Addr) error { return nil } +func (f *FakeNetfilterRunner) DNATWithLoadBalancer(origDst netip.Addr, dsts []netip.Addr) error { + return nil +} +func (f *FakeNetfilterRunner) EnsureSNATForDst(src, dst netip.Addr) error { return nil } +func (f *FakeNetfilterRunner) DNATNonTailscaleTraffic(tun string, dst netip.Addr) error { return nil } +func (f *FakeNetfilterRunner) ClampMSSToPMTU(tun string, addr netip.Addr) error { return nil } +func (f *FakeNetfilterRunner) AddMagicsockPortRule(port uint16, network string) error { return nil } +func (f *FakeNetfilterRunner) DelMagicsockPortRule(port uint16, network string) error { return nil } +func (f *FakeNetfilterRunner) DeletePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error { + return nil +} +func (f *FakeNetfilterRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pms []PortMap) error { + return nil +} +func (f *FakeNetfilterRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error { + return nil +} diff --git a/util/linuxfw/iptables_for_svcs.go b/util/linuxfw/iptables_for_svcs.go index 8e0f5d48d..2cd8716e4 100644 --- a/util/linuxfw/iptables_for_svcs.go +++ b/util/linuxfw/iptables_for_svcs.go @@ -13,6 +13,7 @@ import ( // This file contains functionality to insert portmapping rules for a 'service'. // These are currently only used by the Kubernetes operator proxies. // An iptables rule for such a service contains a comment with the service name. +// A 'service' corresponds to a VIPService as used by the Kubernetes operator. // EnsurePortMapRuleForSvc adds a prerouting rule that forwards traffic received // on match port and NOT on the provided interface to target IP and target port. @@ -24,10 +25,10 @@ func (i *iptablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip if err != nil { return fmt.Errorf("error checking if rule exists: %w", err) } - if !exists { - return table.Append("nat", "PREROUTING", args...) + if exists { + return nil } - return nil + return table.Append("nat", "PREROUTING", args...) } // DeleteMapRuleForSvc constructs a prerouting rule as would be created by @@ -40,10 +41,41 @@ func (i *iptablesRunner) DeletePortMapRuleForSvc(svc, excludeI string, targetIP if err != nil { return fmt.Errorf("error checking if rule exists: %w", err) } + if !exists { + return nil + } + return table.Delete("nat", "PREROUTING", args...) +} + +// EnsureDNATRuleForSvc adds a DNAT rule that forwards traffic from the +// VIPService IP address to a local address. This is used by the Kubernetes +// operator's network layer proxies to forward tailnet traffic for VIPServices +// to Kubernetes Services. +func (i *iptablesRunner) EnsureDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + table := i.getIPTByAddr(dst) + args := argsForIngressRule(svcName, origDst, dst) + exists, err := table.Exists("nat", "PREROUTING", args...) + if err != nil { + return fmt.Errorf("error checking if rule exists: %w", err) + } if exists { - return table.Delete("nat", "PREROUTING", args...) + return nil } - return nil + return table.Append("nat", "PREROUTING", args...) +} + +// DeleteDNATRuleForSvc deletes a DNAT rule created by EnsureDNATRuleForSvc. +func (i *iptablesRunner) DeleteDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + table := i.getIPTByAddr(dst) + args := argsForIngressRule(svcName, origDst, dst) + exists, err := table.Exists("nat", "PREROUTING", args...) + if err != nil { + return fmt.Errorf("error checking if rule exists: %w", err) + } + if !exists { + return nil + } + return table.Delete("nat", "PREROUTING", args...) } // DeleteSvc constructs all possible rules that would have been created by @@ -72,8 +104,24 @@ func argsForPortMapRule(svc, excludeI string, targetIP netip.Addr, pm PortMap) [ } } +func argsForIngressRule(svcName string, origDst, targetIP netip.Addr) []string { + c := commentForIngressSvc(svcName, origDst, targetIP) + return []string{ + "--destination", origDst.String(), + "-m", "comment", "--comment", c, + "-j", "DNAT", + "--to-destination", targetIP.String(), + } +} + // commentForSvc generates a comment to be added to an iptables DNAT rule for a // service. This is for iptables debugging/readability purposes only. func commentForSvc(svc string, pm PortMap) string { return fmt.Sprintf("%s:%s:%d -> %s:%d", svc, pm.Protocol, pm.MatchPort, pm.Protocol, pm.TargetPort) } + +// commentForIngressSvc generates a comment to be added to an iptables DNAT rule for a +// service. This is for iptables debugging/readability purposes only. +func commentForIngressSvc(svc string, vip, clusterIP netip.Addr) string { + return fmt.Sprintf("svc: %s, %s -> %s", svc, vip.String(), clusterIP.String()) +} diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go index 99b2f517f..c3c1b1f65 100644 --- a/util/linuxfw/iptables_for_svcs_test.go +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -153,6 +153,135 @@ func Test_iptablesRunner_DeleteSvc(t *testing.T) { svcMustExist(t, "svc2", map[string][]string{v4Addr.String(): s2R1, v6Addr.String(): s2R2}, iptr) } +func Test_iptablesRunner_EnsureDNATRuleForSvc(t *testing.T) { + v4OrigDst := netip.MustParseAddr("10.0.0.1") + v4Target := netip.MustParseAddr("10.0.0.2") + v6OrigDst := netip.MustParseAddr("fd7a:115c:a1e0::1") + v6Target := netip.MustParseAddr("fd7a:115c:a1e0::2") + v4Rule := argsForIngressRule("svc:test", v4OrigDst, v4Target) + + tests := []struct { + name string + svcName string + origDst netip.Addr + targetIP netip.Addr + precreateSvcRules [][]string + }{ + { + name: "dnat_for_ipv4", + svcName: "svc:test", + origDst: v4OrigDst, + targetIP: v4Target, + }, + { + name: "dnat_for_ipv6", + svcName: "svc:test-2", + origDst: v6OrigDst, + targetIP: v6Target, + }, + { + name: "add_existing_rule", + svcName: "svc:test", + origDst: v4OrigDst, + targetIP: v4Target, + precreateSvcRules: [][]string{v4Rule}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + iptr := NewFakeIPTablesRunner() + table := iptr.getIPTByAddr(tt.targetIP) + for _, ruleset := range tt.precreateSvcRules { + mustPrecreateDNATRule(t, ruleset, table) + } + if err := iptr.EnsureDNATRuleForSvc(tt.svcName, tt.origDst, tt.targetIP); err != nil { + t.Errorf("[unexpected error] iptablesRunner.EnsureDNATRuleForSvc() = %v", err) + } + args := argsForIngressRule(tt.svcName, tt.origDst, tt.targetIP) + exists, err := table.Exists("nat", "PREROUTING", args...) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if !exists { + t.Errorf("expected rule was not created") + } + }) + } +} + +func Test_iptablesRunner_DeleteDNATRuleForSvc(t *testing.T) { + v4OrigDst := netip.MustParseAddr("10.0.0.1") + v4Target := netip.MustParseAddr("10.0.0.2") + v6OrigDst := netip.MustParseAddr("fd7a:115c:a1e0::1") + v6Target := netip.MustParseAddr("fd7a:115c:a1e0::2") + v4Rule := argsForIngressRule("svc:test", v4OrigDst, v4Target) + v6Rule := argsForIngressRule("svc:test", v6OrigDst, v6Target) + + tests := []struct { + name string + svcName string + origDst netip.Addr + targetIP netip.Addr + precreateSvcRules [][]string + }{ + { + name: "multiple_rules_ipv4_deleted", + svcName: "svc:test", + origDst: v4OrigDst, + targetIP: v4Target, + precreateSvcRules: [][]string{v4Rule, v6Rule}, + }, + { + name: "multiple_rules_ipv6_deleted", + svcName: "svc:test", + origDst: v6OrigDst, + targetIP: v6Target, + precreateSvcRules: [][]string{v4Rule, v6Rule}, + }, + { + name: "non-existent_rule_deleted", + svcName: "svc:test", + origDst: v4OrigDst, + targetIP: v4Target, + precreateSvcRules: [][]string{v6Rule}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + iptr := NewFakeIPTablesRunner() + table := iptr.getIPTByAddr(tt.targetIP) + for _, ruleset := range tt.precreateSvcRules { + mustPrecreateDNATRule(t, ruleset, table) + } + if err := iptr.DeleteDNATRuleForSvc(tt.svcName, tt.origDst, tt.targetIP); err != nil { + t.Errorf("iptablesRunner.DeleteDNATRuleForSvc() errored: %v ", err) + } + deletedRule := argsForIngressRule(tt.svcName, tt.origDst, tt.targetIP) + exists, err := table.Exists("nat", "PREROUTING", deletedRule...) + if err != nil { + t.Fatalf("error verifying that rule does not exist after deletion: %v", err) + } + if exists { + t.Errorf("DNAT rule exists after deletion") + } + }) + } +} + +func mustPrecreateDNATRule(t *testing.T, rules []string, table iptablesInterface) { + t.Helper() + exists, err := table.Exists("nat", "PREROUTING", rules...) + if err != nil { + t.Fatalf("error ensuring that nat PREROUTING table exists: %v", err) + } + if exists { + return + } + if err := table.Append("nat", "PREROUTING", rules...); err != nil { + t.Fatalf("error precreating DNAT rule: %v", err) + } +} + func svcMustExist(t *testing.T, svcName string, rules map[string][]string, iptr *iptablesRunner) { t.Helper() for dst, ruleset := range rules { diff --git a/util/linuxfw/nftables_for_svcs.go b/util/linuxfw/nftables_for_svcs.go index 130585b22..474b98086 100644 --- a/util/linuxfw/nftables_for_svcs.go +++ b/util/linuxfw/nftables_for_svcs.go @@ -119,6 +119,63 @@ func (n *nftablesRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pm [ return n.conn.Flush() } +// EnsureDNATRuleForSvc adds a DNAT rule that forwards traffic from the +// VIPService IP address to a local address. This is used by the Kubernetes +// operator's network layer proxies to forward tailnet traffic for VIPServices +// to Kubernetes Services. +func (n *nftablesRunner) EnsureDNATRuleForSvc(svc string, origDst, dst netip.Addr) error { + t, ch, err := n.ensurePreroutingChain(origDst) + if err != nil { + return fmt.Errorf("error ensuring chain for %s: %w", svc, err) + } + meta := svcRuleMeta(svc, origDst, dst) + rule, err := n.findRuleByMetadata(t, ch, meta) + if err != nil { + return fmt.Errorf("error looking up rule: %w", err) + } + if rule != nil { + return nil + } + rule = dnatRuleForChain(t, ch, origDst, dst, meta) + n.conn.InsertRule(rule) + return n.conn.Flush() +} + +// DeleteDNATRuleForSvc deletes a DNAT rule created by EnsureDNATRuleForSvc. +// We use the metadata attached to the rule to look it up. +func (n *nftablesRunner) DeleteDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + table, err := n.getNFTByAddr(origDst) + if err != nil { + return fmt.Errorf("error setting up nftables for IP family of %s: %w", origDst, err) + } + t, err := getTableIfExists(n.conn, table.Proto, "nat") + if err != nil { + return fmt.Errorf("error checking if nat table exists: %w", err) + } + if t == nil { + return nil + } + ch, err := getChainFromTable(n.conn, t, "PREROUTING") + if errors.Is(err, errorChainNotFound{tableName: "nat", chainName: "PREROUTING"}) { + return nil + } + if err != nil { + return fmt.Errorf("error checking if chain PREROUTING exists: %w", err) + } + meta := svcRuleMeta(svcName, origDst, dst) + rule, err := n.findRuleByMetadata(t, ch, meta) + if err != nil { + return fmt.Errorf("error checking if rule exists: %w", err) + } + if rule == nil { + return nil + } + if err := n.conn.DelRule(rule); err != nil { + return fmt.Errorf("error deleting rule: %w", err) + } + return n.conn.Flush() +} + func portMapRule(t *nftables.Table, ch *nftables.Chain, tun string, targetIP netip.Addr, matchPort, targetPort uint16, proto uint8, meta []byte) *nftables.Rule { var fam uint32 if targetIP.Is4() { @@ -243,3 +300,10 @@ func protoFromString(s string) (uint8, error) { return 0, fmt.Errorf("unrecognized protocol: %q", s) } } + +// svcRuleMeta generates metadata for a rule. +// This metadata can then be used to find the rule. +// https://github.com/google/nftables/issues/48 +func svcRuleMeta(svcName string, origDst, dst netip.Addr) []byte { + return []byte(fmt.Sprintf("svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String())) +} diff --git a/util/linuxfw/nftables_for_svcs_test.go b/util/linuxfw/nftables_for_svcs_test.go index d2df6e4bd..73472ce20 100644 --- a/util/linuxfw/nftables_for_svcs_test.go +++ b/util/linuxfw/nftables_for_svcs_test.go @@ -14,8 +14,9 @@ import ( // This test creates a temporary network namespace for the nftables rules being // set up, so it needs to run in a privileged mode. Locally it needs to be run -// by root, else it will be silently skipped. In CI it runs in a privileged -// container. +// by root, else it will be silently skipped. +// sudo go test -v -run Test_nftablesRunner_EnsurePortMapRuleForSvc ./util/linuxfw/... +// In CI it runs in a privileged container. func Test_nftablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { conn := newSysConn(t) runner := newFakeNftablesRunnerWithConn(t, conn, true) @@ -23,51 +24,215 @@ func Test_nftablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { pmTCP := PortMap{MatchPort: 4003, TargetPort: 80, Protocol: "TCP"} pmTCP1 := PortMap{MatchPort: 4004, TargetPort: 443, Protocol: "TCP"} - // Create a rule for service 'foo' to forward TCP traffic to IPv4 endpoint - runner.EnsurePortMapRuleForSvc("foo", "tailscale0", ipv4, pmTCP) + // Create a rule for service 'svc:foo' to forward TCP traffic to IPv4 endpoint + runner.EnsurePortMapRuleForSvc("svc:foo", "tailscale0", ipv4, pmTCP) svcChains(t, 1, conn) - chainRuleCount(t, "foo", 1, conn, nftables.TableFamilyIPv4) - checkPortMapRule(t, "foo", ipv4, pmTCP, runner, nftables.TableFamilyIPv4) + chainRuleCount(t, "svc:foo", 1, conn, nftables.TableFamilyIPv4) + checkPortMapRule(t, "svc:foo", ipv4, pmTCP, runner, nftables.TableFamilyIPv4) - // Create another rule for service 'foo' to forward TCP traffic to the + // Create another rule for service 'svc:foo' to forward TCP traffic to the // same IPv4 endpoint, but to a different port. - runner.EnsurePortMapRuleForSvc("foo", "tailscale0", ipv4, pmTCP1) + runner.EnsurePortMapRuleForSvc("svc:foo", "tailscale0", ipv4, pmTCP1) svcChains(t, 1, conn) - chainRuleCount(t, "foo", 2, conn, nftables.TableFamilyIPv4) - checkPortMapRule(t, "foo", ipv4, pmTCP1, runner, nftables.TableFamilyIPv4) + chainRuleCount(t, "svc:foo", 2, conn, nftables.TableFamilyIPv4) + checkPortMapRule(t, "svc:foo", ipv4, pmTCP1, runner, nftables.TableFamilyIPv4) - // Create a rule for service 'foo' to forward TCP traffic to an IPv6 endpoint - runner.EnsurePortMapRuleForSvc("foo", "tailscale0", ipv6, pmTCP) + // Create a rule for service 'svc:foo' to forward TCP traffic to an IPv6 endpoint + runner.EnsurePortMapRuleForSvc("svc:foo", "tailscale0", ipv6, pmTCP) svcChains(t, 2, conn) - chainRuleCount(t, "foo", 1, conn, nftables.TableFamilyIPv6) - checkPortMapRule(t, "foo", ipv6, pmTCP, runner, nftables.TableFamilyIPv6) + chainRuleCount(t, "svc:foo", 1, conn, nftables.TableFamilyIPv6) + checkPortMapRule(t, "svc:foo", ipv6, pmTCP, runner, nftables.TableFamilyIPv6) - // Create a rule for service 'bar' to forward TCP traffic to IPv4 endpoint - runner.EnsurePortMapRuleForSvc("bar", "tailscale0", ipv4, pmTCP) + // Create a rule for service 'svc:bar' to forward TCP traffic to IPv4 endpoint + runner.EnsurePortMapRuleForSvc("svc:bar", "tailscale0", ipv4, pmTCP) svcChains(t, 3, conn) - chainRuleCount(t, "bar", 1, conn, nftables.TableFamilyIPv4) - checkPortMapRule(t, "bar", ipv4, pmTCP, runner, nftables.TableFamilyIPv4) + chainRuleCount(t, "svc:bar", 1, conn, nftables.TableFamilyIPv4) + checkPortMapRule(t, "svc:bar", ipv4, pmTCP, runner, nftables.TableFamilyIPv4) - // Create a rule for service 'bar' to forward TCP traffic to an IPv6 endpoint - runner.EnsurePortMapRuleForSvc("bar", "tailscale0", ipv6, pmTCP) + // Create a rule for service 'svc:bar' to forward TCP traffic to an IPv6 endpoint + runner.EnsurePortMapRuleForSvc("svc:bar", "tailscale0", ipv6, pmTCP) svcChains(t, 4, conn) - chainRuleCount(t, "bar", 1, conn, nftables.TableFamilyIPv6) - checkPortMapRule(t, "bar", ipv6, pmTCP, runner, nftables.TableFamilyIPv6) + chainRuleCount(t, "svc:bar", 1, conn, nftables.TableFamilyIPv6) + checkPortMapRule(t, "svc:bar", ipv6, pmTCP, runner, nftables.TableFamilyIPv6) - // Delete service bar - runner.DeleteSvc("bar", "tailscale0", []netip.Addr{ipv4, ipv6}, []PortMap{pmTCP}) + // Delete service svc:bar + runner.DeleteSvc("svc:bar", "tailscale0", []netip.Addr{ipv4, ipv6}, []PortMap{pmTCP}) svcChains(t, 2, conn) - // Delete a rule from service foo - runner.DeletePortMapRuleForSvc("foo", "tailscale0", ipv4, pmTCP) + // Delete a rule from service svc:foo + runner.DeletePortMapRuleForSvc("svc:foo", "tailscale0", ipv4, pmTCP) svcChains(t, 2, conn) - chainRuleCount(t, "foo", 1, conn, nftables.TableFamilyIPv4) + chainRuleCount(t, "svc:foo", 1, conn, nftables.TableFamilyIPv4) - // Delete service foo - runner.DeleteSvc("foo", "tailscale0", []netip.Addr{ipv4, ipv6}, []PortMap{pmTCP, pmTCP1}) + // Delete service svc:foo + runner.DeleteSvc("svc:foo", "tailscale0", []netip.Addr{ipv4, ipv6}, []PortMap{pmTCP, pmTCP1}) svcChains(t, 0, conn) } +func Test_nftablesRunner_EnsureDNATRuleForSvc(t *testing.T) { + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + + // Test IPv4 DNAT rule + ipv4OrigDst := netip.MustParseAddr("10.0.0.1") + ipv4Target := netip.MustParseAddr("10.0.0.2") + + // Create DNAT rule for service 'svc:foo' to forward IPv4 traffic + err := runner.EnsureDNATRuleForSvc("svc:foo", ipv4OrigDst, ipv4Target) + if err != nil { + t.Fatalf("error creating IPv4 DNAT rule: %v", err) + } + checkDNATRule(t, "svc:foo", ipv4OrigDst, ipv4Target, runner, nftables.TableFamilyIPv4) + + // Test IPv6 DNAT rule + ipv6OrigDst := netip.MustParseAddr("fd7a:115c:a1e0::1") + ipv6Target := netip.MustParseAddr("fd7a:115c:a1e0::2") + + // Create DNAT rule for service 'svc:foo' to forward IPv6 traffic + err = runner.EnsureDNATRuleForSvc("svc:foo", ipv6OrigDst, ipv6Target) + if err != nil { + t.Fatalf("error creating IPv6 DNAT rule: %v", err) + } + checkDNATRule(t, "svc:foo", ipv6OrigDst, ipv6Target, runner, nftables.TableFamilyIPv6) + + // Test creating rule for another service + err = runner.EnsureDNATRuleForSvc("svc:bar", ipv4OrigDst, ipv4Target) + if err != nil { + t.Fatalf("error creating DNAT rule for service 'svc:bar': %v", err) + } + checkDNATRule(t, "svc:bar", ipv4OrigDst, ipv4Target, runner, nftables.TableFamilyIPv4) +} + +func Test_nftablesRunner_DeleteDNATRuleForSvc(t *testing.T) { + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + + // Test IPv4 DNAT rule deletion + ipv4OrigDst := netip.MustParseAddr("10.0.0.1") + ipv4Target := netip.MustParseAddr("10.0.0.2") + + // Create and then delete IPv4 DNAT rule + err := runner.EnsureDNATRuleForSvc("svc:foo", ipv4OrigDst, ipv4Target) + if err != nil { + t.Fatalf("error creating IPv4 DNAT rule: %v", err) + } + + // Verify rule exists before deletion + table, err := runner.getNFTByAddr(ipv4OrigDst) + if err != nil { + t.Fatalf("error getting table: %v", err) + } + nftTable, err := getTableIfExists(runner.conn, table.Proto, "nat") + if err != nil { + t.Fatalf("error getting nat table: %v", err) + } + ch, err := getChainFromTable(runner.conn, nftTable, "PREROUTING") + if err != nil { + t.Fatalf("error getting PREROUTING chain: %v", err) + } + meta := svcRuleMeta("svc:foo", ipv4OrigDst, ipv4Target) + rule, err := runner.findRuleByMetadata(nftTable, ch, meta) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if rule == nil { + t.Fatal("rule does not exist before deletion") + } + + err = runner.DeleteDNATRuleForSvc("svc:foo", ipv4OrigDst, ipv4Target) + if err != nil { + t.Fatalf("error deleting IPv4 DNAT rule: %v", err) + } + + // Verify rule is deleted + rule, err = runner.findRuleByMetadata(nftTable, ch, meta) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if rule != nil { + t.Fatal("rule still exists after deletion") + } + + // Test IPv6 DNAT rule deletion + ipv6OrigDst := netip.MustParseAddr("fd7a:115c:a1e0::1") + ipv6Target := netip.MustParseAddr("fd7a:115c:a1e0::2") + + // Create and then delete IPv6 DNAT rule + err = runner.EnsureDNATRuleForSvc("svc:foo", ipv6OrigDst, ipv6Target) + if err != nil { + t.Fatalf("error creating IPv6 DNAT rule: %v", err) + } + + // Verify rule exists before deletion + table, err = runner.getNFTByAddr(ipv6OrigDst) + if err != nil { + t.Fatalf("error getting table: %v", err) + } + nftTable, err = getTableIfExists(runner.conn, table.Proto, "nat") + if err != nil { + t.Fatalf("error getting nat table: %v", err) + } + ch, err = getChainFromTable(runner.conn, nftTable, "PREROUTING") + if err != nil { + t.Fatalf("error getting PREROUTING chain: %v", err) + } + meta = svcRuleMeta("svc:foo", ipv6OrigDst, ipv6Target) + rule, err = runner.findRuleByMetadata(nftTable, ch, meta) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if rule == nil { + t.Fatal("rule does not exist before deletion") + } + + err = runner.DeleteDNATRuleForSvc("svc:foo", ipv6OrigDst, ipv6Target) + if err != nil { + t.Fatalf("error deleting IPv6 DNAT rule: %v", err) + } + + // Verify rule is deleted + rule, err = runner.findRuleByMetadata(nftTable, ch, meta) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if rule != nil { + t.Fatal("rule still exists after deletion") + } +} + +// checkDNATRule verifies that a DNAT rule exists for the given service, original destination, and target IP. +func checkDNATRule(t *testing.T, svc string, origDst, targetIP netip.Addr, runner *nftablesRunner, fam nftables.TableFamily) { + t.Helper() + table, err := runner.getNFTByAddr(origDst) + if err != nil { + t.Fatalf("error getting table: %v", err) + } + nftTable, err := getTableIfExists(runner.conn, table.Proto, "nat") + if err != nil { + t.Fatalf("error getting nat table: %v", err) + } + if nftTable == nil { + t.Fatal("nat table not found") + } + + ch, err := getChainFromTable(runner.conn, nftTable, "PREROUTING") + if err != nil { + t.Fatalf("error getting PREROUTING chain: %v", err) + } + if ch == nil { + t.Fatal("PREROUTING chain not found") + } + + meta := svcRuleMeta(svc, origDst, targetIP) + rule, err := runner.findRuleByMetadata(nftTable, ch, meta) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if rule == nil { + t.Fatal("DNAT rule not found") + } +} + // svcChains verifies that the expected number of chains exist (for either IP // family) and that each of them is configured as NAT prerouting chain. func svcChains(t *testing.T, wantCount int, conn *nftables.Conn) { diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index b87298c61..faa02f7c7 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -107,6 +107,12 @@ func (n *nftablesRunner) AddDNATRule(origDst netip.Addr, dst netip.Addr) error { if err != nil { return err } + rule := dnatRuleForChain(nat, preroutingCh, origDst, dst, nil) + n.conn.InsertRule(rule) + return n.conn.Flush() +} + +func dnatRuleForChain(t *nftables.Table, ch *nftables.Chain, origDst, dst netip.Addr, meta []byte) *nftables.Rule { var daddrOffset, fam, dadderLen uint32 if origDst.Is4() { daddrOffset = 16 @@ -117,9 +123,9 @@ func (n *nftablesRunner) AddDNATRule(origDst netip.Addr, dst netip.Addr) error { dadderLen = 16 fam = unix.NFPROTO_IPV6 } - dnatRule := &nftables.Rule{ - Table: nat, - Chain: preroutingCh, + rule := &nftables.Rule{ + Table: t, + Chain: ch, Exprs: []expr.Any{ &expr.Payload{ DestRegister: 1, @@ -143,8 +149,10 @@ func (n *nftablesRunner) AddDNATRule(origDst netip.Addr, dst netip.Addr) error { }, }, } - n.conn.InsertRule(dnatRule) - return n.conn.Flush() + if len(meta) > 0 { + rule.UserData = meta + } + return rule } // DNATWithLoadBalancer currently just forwards all traffic destined for origDst @@ -555,6 +563,8 @@ type NetfilterRunner interface { EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error DeletePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error + EnsureDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error + DeleteDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error DeleteSvc(svc, tun string, targetIPs []netip.Addr, pm []PortMap) error diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index 7ddd7385d..a289fb0ac 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -557,6 +557,14 @@ func (n *fakeIPTablesRunner) ClampMSSToPMTU(tun string, addr netip.Addr) error { return errors.New("not implemented") } +func (n *fakeIPTablesRunner) EnsureDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + return errors.New("not implemented") +} + +func (n *fakeIPTablesRunner) DeleteDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { + return errors.New("not implemented") +} + func (n *fakeIPTablesRunner) addBase4(tunname string) error { curIPT := n.ipt4 newRules := []struct{ chain, rule string }{ From fb188c5b5398a6996abc2b1e1500c78d12ba150a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 8 May 2025 18:38:48 -0500 Subject: [PATCH 0857/1708] net/dns,docs/windows/policy,util/syspolicy: register Tailscale IP addresses in AD DNS if required by policy In this PR, we make DNS registration behavior configurable via the EnableDNSRegistration policy setting. We keep the default behavior unchanged, but allow admins to either enforce DNS registration and dynamic DNS updates for the Tailscale interface, or prevent Tailscale from modifying the settings configured in the network adapter's properties or by other means. Updates #14917 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 12 ++++ docs/windows/policy/tailscale.admx | 22 +++++++ net/dns/manager_windows.go | 83 ++++++++++++++++++++---- util/syspolicy/policy_keys.go | 9 +++ util/syspolicy/syspolicy.go | 7 ++ 5 files changed, 121 insertions(+), 12 deletions(-) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index eb6a520d1..fb71e521e 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -17,6 +17,7 @@ Tailscale version 1.74.0 and later Tailscale version 1.78.0 and later Tailscale version 1.82.0 and later + Tailscale version 1.84.0 and later Tailscale UI customization Settings @@ -147,6 +148,14 @@ If you disable this policy, then Use Tailscale Subnets is always disabled and th If you do not configure this policy, then Use Tailscale Subnets depends on what is selected in the Preferences submenu. See https://tailscale.com/kb/1315/mdm-keys#set-whether-the-device-accepts-tailscale-subnets or https://tailscale.com/kb/1019/subnets for more details.]]> + Always register + Use adapter properties + Register Tailscale IP addresses in DNS + Automatically install updates Exit Node: + + Registration mode: + Target IDs: diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 0ff311b40..3db2108b4 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -58,6 +58,10 @@ displayName="$(string.SINCE_V1_82)"> + + + @@ -193,6 +197,24 @@ never + + + + + + + + always + + + + + user-decides + + + + + diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index effdf23ca..6ed5d3ba6 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -29,6 +29,9 @@ import ( "tailscale.com/health" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" ) @@ -45,6 +48,8 @@ type windowsManager struct { nrptDB *nrptRuleDatabase wslManager *wslManager + unregisterPolicyChangeCb func() // called when the manager is closing + mu sync.Mutex closing bool } @@ -64,6 +69,11 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlk ret.nrptDB = newNRPTRuleDatabase(logf) } + var err error + if ret.unregisterPolicyChangeCb, err = syspolicy.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { + logf("error registering policy change callback: %v", err) // non-fatal + } + go func() { // Log WSL status once at startup. if distros, err := wslDistros(); err != nil { @@ -362,11 +372,9 @@ func (m *windowsManager) SetDNS(cfg OSConfig) error { // configuration only, routing one set of things to the "split" // resolver and the rest to the primary. - // Unconditionally disable dynamic DNS updates and NetBIOS on our - // interfaces. - if err := m.disableDynamicUpdates(); err != nil { - m.logf("disableDynamicUpdates error: %v\n", err) - } + // Reconfigure DNS registration according to the [syspolicy.DNSRegistration] + // policy setting, and unconditionally disable NetBIOS on our interfaces. + m.reconfigureDNSRegistration() if err := m.disableNetBIOS(); err != nil { m.logf("disableNetBIOS error: %v\n", err) } @@ -485,6 +493,10 @@ func (m *windowsManager) Close() error { m.closing = true m.mu.Unlock() + if m.unregisterPolicyChangeCb != nil { + m.unregisterPolicyChangeCb() + } + err := m.SetDNS(OSConfig{}) if m.nrptDB != nil { m.nrptDB.Close() @@ -493,15 +505,62 @@ func (m *windowsManager) Close() error { return err } -// disableDynamicUpdates sets the appropriate registry values to prevent the -// Windows DHCP client from sending dynamic DNS updates for our interface to -// AD domain controllers. -func (m *windowsManager) disableDynamicUpdates() error { +// sysPolicyChanged is a callback triggered by [syspolicy] when it detects +// a change in one or more syspolicy settings. +func (m *windowsManager) sysPolicyChanged(policy *rsop.PolicyChange) { + if policy.HasChanged(syspolicy.EnableDNSRegistration) { + m.reconfigureDNSRegistration() + } +} + +// reconfigureDNSRegistration configures the DNS registration settings +// using the [syspolicy.DNSRegistration] policy setting, if it is set. +// If the policy is not configured, it disables DNS registration. +func (m *windowsManager) reconfigureDNSRegistration() { + // Disable DNS registration by default (if the policy setting is not configured). + // This is primarily for historical reasons and to avoid breaking existing + // setups that rely on this behavior. + enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(syspolicy.EnableDNSRegistration, setting.NeverByPolicy) + if err != nil { + m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default + } + + if enableDNSRegistration.Show() { + // "Show" reports whether the policy setting is configured as "user-decides". + // The name is a bit unfortunate in this context, as we don't actually "show" anything. + // Still, if the admin configured the policy as "user-decides", we shouldn't modify + // the adapter's settings and should leave them up to the user (admin rights required) + // or the system defaults. + return + } + + // Otherwise, if the policy setting is configured as "always" or "never", + // we should configure the adapter accordingly. + if err := m.configureDNSRegistration(enableDNSRegistration.IsAlways()); err != nil { + m.logf("error configuring DNS registration: %v", err) + } +} + +// configureDNSRegistration sets the appropriate registry values to allow or prevent +// the Windows DHCP client from registering Tailscale IP addresses with DNS +// and sending dynamic updates for our interface to AD domain controllers. +func (m *windowsManager) configureDNSRegistration(enabled bool) error { prefixen := []winutil.RegistryPathPrefix{ winutil.IPv4TCPIPInterfacePrefix, winutil.IPv6TCPIPInterfacePrefix, } + var ( + registrationEnabled = uint32(0) + disableDynamicUpdate = uint32(1) + maxNumberOfAddressesToRegister = uint32(0) + ) + if enabled { + registrationEnabled = 1 + disableDynamicUpdate = 0 + maxNumberOfAddressesToRegister = 1 + } + for _, prefix := range prefixen { k, err := m.openInterfaceKey(prefix) if err != nil { @@ -509,13 +568,13 @@ func (m *windowsManager) disableDynamicUpdates() error { } defer k.Close() - if err := k.SetDWordValue("RegistrationEnabled", 0); err != nil { + if err := k.SetDWordValue("RegistrationEnabled", registrationEnabled); err != nil { return err } - if err := k.SetDWordValue("DisableDynamicUpdate", 1); err != nil { + if err := k.SetDWordValue("DisableDynamicUpdate", disableDynamicUpdate); err != nil { return err } - if err := k.SetDWordValue("MaxNumberOfAddressesToRegister", 0); err != nil { + if err := k.SetDWordValue("MaxNumberOfAddressesToRegister", maxNumberOfAddressesToRegister); err != nil { return err } } diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 8da0e0cc8..29b2dfd28 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -63,6 +63,14 @@ const ( ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" EnableTailscaleDNS Key = "UseTailscaleDNSSettings" EnableTailscaleSubnets Key = "UseTailscaleSubnets" + + // EnableDNSRegistration is a string value that can be set to "always", "never" + // or "user-decides". It controls whether DNS registration and dynamic DNS + // updates are enabled for the Tailscale interface. For historical reasons + // and to maintain compatibility with existing setups, the default is "never". + // It is only used on Windows. + EnableDNSRegistration Key = "EnableDNSRegistration" + // CheckUpdates is the key to signal if the updater should periodically // check for updates. CheckUpdates Key = "CheckUpdates" @@ -168,6 +176,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(ControlURL, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 5d5a283fb..afcc28ff1 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -90,6 +90,13 @@ func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) } +// GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows +// specifying a default value to return if the policy setting is not configured. +// It can be used in situations where "user-decides" is not the default. +func GetPreferenceOptionOrDefault(name Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { + return getCurrentPolicySettingValue(name, defaultValue) +} + // GetVisibility loads a policy from the registry that can be managed // by an enterprise policy management system and describes show/hide decisions // for UI elements. The registry value should be a string set to "show" (return From 13e91f4a2fc7237e6de877dfb95c76a63dff81ee Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 18 Nov 2024 15:44:06 -0700 Subject: [PATCH 0858/1708] docs/windows/policy: add OnboardingFlow policy to ADMX file Fixes #15907 Signed-off-by: Aaron Klotz --- docs/windows/policy/en-US/tailscale.adml | 7 +++++++ docs/windows/policy/tailscale.admx | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index fb71e521e..62ff94da7 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -16,6 +16,7 @@ Tailscale version 1.62.0 and later Tailscale version 1.74.0 and later Tailscale version 1.78.0 and later + Tailscale version 1.80.0 and later Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later Tailscale @@ -271,6 +272,12 @@ If you enable this policy, the menu item will be displayed indicating the organi If you disable this policy or do not configure it, the corresponding menu item will be hidden. See https://tailscale.com/kb/1315/mdm-keys#set-your-organization-name for more details.]]> + Show the onboarding flow + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 3db2108b4..d97b24c36 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -54,6 +54,10 @@ displayName="$(string.SINCE_V1_78)"> + + + @@ -335,6 +339,16 @@ hide + + + + + show + + + hide + + From 7f4aaed1d59b7b8d4e61bc7f2640b126ba6a9166 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 12 May 2025 12:53:55 -0700 Subject: [PATCH 0859/1708] cmd/derpprobe: exit with non-zero status if --once fails (#15926) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `cmd/derpprobe --once` didn’t respect the convention of non-zero exit status for a failed run. It would always exit zero (i.e. success), even. This patch fixes that, but only for `--once` mode. Fixes: #15925 Signed-off-by: Simon Law --- cmd/derpprobe/derpprobe.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 899838462..2723a31ae 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -9,6 +9,7 @@ import ( "fmt" "log" "net/http" + "os" "sort" "time" @@ -75,6 +76,9 @@ func main() { for _, s := range st.bad { log.Printf("bad: %s", s) } + if len(st.bad) > 0 { + os.Exit(1) + } return } From d303570ab7911ce864953455731c3f6a83c5998a Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 12 May 2025 16:23:36 -0700 Subject: [PATCH 0860/1708] docs/commit-messages.md: explain #cleanup commits (#15933) Adapted from http://go/cleanup. Fixes: #15932 Signed-off-by: Simon Law --- docs/commit-messages.md | 50 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/docs/commit-messages.md b/docs/commit-messages.md index 22a6e67ce..b3881eaeb 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -72,7 +72,7 @@ For the body (the rest of the description): - blank line after the subject (first) line - the text should be wrapped to ~76 characters (to appease git viewing tools, mainly), unless you really need longer lines (e.g. for ASCII art, tables, or long links) -- there must be a `Fixes` or `Updates` line for all non-trivial commits linking to a tracking bug. This goes after the body with a blank newline separating the two. Trivial code clean-up commits can use `Updates #cleanup` instead of an issue. +- there must be a `Fixes` or `Updates` line for all non-cleanup commits linking to a tracking bug. This goes after the body with a blank newline separating the two. [Cleanup commits](#is-it-a-cleanup) can use `Updates #cleanup` instead of an issue. - `Change-Id` lines should ideally be included in commits in the `corp` repo and are more optional in `tailscale/tailscale`. You can configure Git to do this for you by running `./tool/go run misc/install-git-hooks.go` from the root of the corp repo. This was originally a Gerrit thing and we don't use Gerrit, but it lets us tooling track commits as they're cherry-picked between branches. Also, tools like [git-cleanup](https://github.com/bradfitz/gitutil) use it to clean up your old local branches once they're merged upstream. - we don't use Markdown in commit messages. (Accidental Markdown like bulleted lists or even headings is fine, but not links) - we require `Signed-off-by` lines in public repos (such as `tailscale/tailscale`). Add them using `git commit --signoff` or `git commit -s` for short. You can use them in private repos but do not have to. @@ -108,6 +108,54 @@ For changes in `tailscale/tailscale` that fix a significant bug or add a new fea add `RELNOTE: ` toward the end of the commit message. This will aid the release engineer in writing the release notes for the next release. +## Is it a #cleanup? + +Our issuebot permits writing `Updates #cleanup` instead of an actual GitHub issue number. + +But only do that if it’s actually a cleanup. Don’t use that as an excuse to avoid filing an issue. + +Shortcuts[^1] to file issues: +- [go/bugc](http://go/bugc) (corp, safe choice) +- [go/bugo](http://go/bugo) (open source, if you want it public to the world). + +[^1]: These shortcuts point to our Tailscale’s internal URL shortener service, which you too [can run in your own Tailnet](https://tailscale.com/blog/golink). + +The following guide can help you decide whether a tracking issue is warranted. + +| | | +| --- | --- | +| Was there a crash/panic? | Not a cleanup. Put the panic in a bug. Talk about when it was introduced, why, why a test didn’t catch it, note what followup work might need to be done. | +| Did a customer report it? | Not a cleanup. Make a corp bug with links to the customer ticket. | +| Is it from an incident, get paged? | Not a cleanup. Let’s track why we got paged. | +| Does it change behavior? | Not a cleanup. File a bug to track why. | +| Adding a test for a recently fixed bug? | Not a cleanup. Use the recently fixed bug’s bug number. | +| Does it tweak a constant/parameter? | Not a cleanup. File a bug to track the debugging/tuning effort and record past results and goals for the future state. | +| Fixing a regression from an earlier change? | Not a cleanup. At minimum, reference the PR that caused the regression, but if users noticed, it might warrant its own bug. | +| Is it part of an overall effort that’ll take a hundred small steps? | Not a cleanup. The overall effort should have a tracking bug to collect all the minor efforts. | +| Is it a security fix? Is it a security hardening? | Not a cleanup. There should be a bug about security incidents or security hardening efforts and backporting to previous releases, etc. | +| Is it a feature flag being removed? | Not a cleanup. File a task to coordinate with other teams and to track the work. | + +### Actual cleanup examples + +- Fixing typos in internal comments that users would’ve never seen +- Simple, mechanical replacement of a deprecated API to its equivalently behaving replacement + - [`errors.Wrapf`](https://pkg.go.dev/github.com/pkg/errors#Wrapf) → [`fmt.Errorf("%w")`](https://pkg.go.dev/fmt#Errorf) + - [math/rand](https://pkg.go.dev/math/rand) → [math/rand/v2](https://pkg.go.dev/math/rand/v2) +- Code movement +- Removing dead code that doesn’t change behavior (API changes, feature flags, etc) +- Refactoring in prep for another change (but maybe mention the upcoming change’s bug as motivation) +- Adding a test that you just noticed was missing, not as a result of any bug or report or new feature coming +- Formatting (gofmt / prettifier) that was missed earlier + +### What’s the point of an issue? + +- Let us capture information that is inappropriate for a commit message +- Let us have conversations on a change after the fact +- Let us track metadata on issues and decide what to backport +- Let us associate related changes to each other, including after the fact +- Lets you write the backstory once on an overall bug/effort and re-use that issue number for N future commits, without having to repeat yourself on each commit message +- Provides archaeological breadcrumbs to future debuggers, providing context on why things were changed + # Reverts When you use `git revert` to revert a commit, the default commit message will identify the commit SHA and message that was reverted. You must expand this message to explain **why** it is being reverted, including a link to the associated issue. From ffb1dda2456ba0b35e85ba7aba5407acfc2e2ce7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 12 May 2025 18:35:12 -0500 Subject: [PATCH 0861/1708] ipn/ipnlocal,wgengine: move (*tsdial.Dialer).SetRoutes() calls from LocalBackend to userspaceEngine This avoids reconfiguring the dialer unless the router config has changed. Updates #12027 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 7 ------- wgengine/userspace.go | 11 +++++++++++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5d6433002..79383aa37 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -64,7 +64,6 @@ import ( "tailscale.com/logpolicy" "tailscale.com/net/captivedetection" "tailscale.com/net/dns" - "tailscale.com/net/dns/resolver" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/ipset" @@ -4844,12 +4843,6 @@ func (b *LocalBackend) authReconfig() { } b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) - if resolver.ShouldUseRoutes(b.ControlKnobs()) { - b.dialer.SetRoutes(rcfg.Routes, rcfg.LocalRoutes) - } else { - b.dialer.SetRoutes(nil, nil) - } - b.initPeerAPIListener() b.readvertiseAppConnectorRoutes() } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index e34eae667..b1b82032b 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -26,6 +26,7 @@ import ( "tailscale.com/health" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" + "tailscale.com/net/dns/resolver" "tailscale.com/net/flowtrack" "tailscale.com/net/ipset" "tailscale.com/net/netmon" @@ -102,6 +103,7 @@ type userspaceEngine struct { tundev *tstun.Wrapper wgdev *device.Device router router.Router + dialer *tsdial.Dialer confListenPort uint16 // original conf.ListenPort dns *dns.Manager magicConn *magicsock.Conn @@ -344,6 +346,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) waitCh: make(chan struct{}), tundev: tsTUNDev, router: rtr, + dialer: conf.Dialer, confListenPort: conf.ListenPort, birdClient: conf.BIRDClient, controlKnobs: conf.ControlKnobs, @@ -1028,6 +1031,14 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, if err != nil { return err } + + if resolver.ShouldUseRoutes(e.controlKnobs) { + e.logf("wgengine: Reconfig: user dialer") + e.dialer.SetRoutes(routerCfg.Routes, routerCfg.LocalRoutes) + } else { + e.dialer.SetRoutes(nil, nil) + } + // Keep DNS configuration after router configuration, as some // DNS managers refuse to apply settings if the device has no // assigned address. From 65e005ccaa4c4aea85e628853f02c4362eb6d029 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Mon, 12 May 2025 19:43:25 +0100 Subject: [PATCH 0862/1708] ipn/ipnlocal: attach Tailnet Lock status to bugreports Fixes tailscale/corp#28524 Signed-off-by: Anton Tolchanov --- ipn/localapi/localapi.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 9c6c0a528..99cb7c95b 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -397,6 +397,15 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { // OS-specific details h.logf.JSON(1, "UserBugReportOS", osdiag.SupportInfo(osdiag.LogSupportInfoReasonBugReport)) + // Tailnet lock details + st := h.b.NetworkLockStatus() + if st.Enabled { + h.logf.JSON(1, "UserBugReportTailnetLockStatus", st) + if st.NodeKeySignature != nil { + h.logf("user bugreport tailnet lock signature: %s", st.NodeKeySignature.String()) + } + } + if defBool(r.URL.Query().Get("diagnose"), false) { h.b.Doctor(r.Context(), logger.WithPrefix(h.logf, "diag: ")) } From fccba5a2f1a5e5dbde9e2fa57e33651b8fd047eb Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 13 May 2025 09:19:18 -0400 Subject: [PATCH 0863/1708] prober: fix test logic (#15952) Catch failing tests that have no expected error string. Updates #15912 Signed-off-by: Mike O'Driscoll --- prober/tls_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prober/tls_test.go b/prober/tls_test.go index cf5b60cb8..9ba17f79d 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -252,7 +252,7 @@ func TestCRL(t *testing.T) { "NoCRL", leafCertParsed, nil, - "", + "no CRL server presented in leaf cert for", }, { "NotBeforeCRLStaplingDate", @@ -276,7 +276,7 @@ func TestCRL(t *testing.T) { return } - if err == nil || !strings.Contains(err.Error(), tt.wantErr) { + if err == nil || tt.wantErr == "" || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("unexpected error %q; want %q", err, tt.wantErr) } }) From abe04bfa78f43b532cee3c0ff67f7593d21f45d9 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 14 May 2025 18:25:08 +0100 Subject: [PATCH 0864/1708] cmd/k8s-operator: warn if Tailscale Services use attempted for tailnet without the feature enabled (#15931) Also renames VIPService -> Tailscale Service (including user facing messages) Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 328 ++++++++++++++---------- cmd/k8s-operator/ingress-for-pg_test.go | 5 +- cmd/k8s-operator/tsclient.go | 3 + 3 files changed, 191 insertions(+), 145 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 3df5a07ee..fd6b71225 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -44,8 +44,8 @@ import ( ) const ( - serveConfigKey = "serve-config.json" - VIPSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s" + serveConfigKey = "serve-config.json" + TailscaleSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s" // FinalizerNamePG is the finalizer used by the IngressPGReconciler FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" @@ -54,7 +54,11 @@ const ( // well as the default HTTPS endpoint). annotationHTTPEndpoint = "tailscale.com/http-endpoint" - labelDomain = "tailscale.com/domain" + labelDomain = "tailscale.com/domain" + msgFeatureFlagNotEnabled = "Tailscale Service feature flag is not enabled for this tailnet, skipping provisioning. " + + "Please contact Tailscale support through https://tailscale.com/contact/support to enable the feature flag, then recreate the operator's Pod." + + warningTailscaleServiceFeatureFlagNotEnabled = "TailscaleServiceFeatureFlagNotEnabled" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -82,16 +86,16 @@ type HAIngressReconciler struct { // Reconcile reconciles Ingresses that should be exposed over Tailscale in HA // mode (on a ProxyGroup). It looks at all Ingresses with // tailscale.com/proxy-group annotation. For each such Ingress, it ensures that -// a VIPService named after the hostname of the Ingress exists and is up to +// a TailscaleService named after the hostname of the Ingress exists and is up to // date. It also ensures that the serve config for the ingress ProxyGroup is -// updated to route traffic for the VIPService to the Ingress's backend -// Services. Ingress hostname change also results in the VIPService for the -// previous hostname being cleaned up and a new VIPService being created for the +// updated to route traffic for the Tailscale Service to the Ingress's backend +// Services. Ingress hostname change also results in the Tailscale Service for the +// previous hostname being cleaned up and a new Tailscale Service being created for the // new hostname. // HA Ingresses support multi-cluster Ingress setup. -// Each VIPService contains a list of owner references that uniquely identify +// Each Tailscale Service contains a list of owner references that uniquely identify // the Ingress resource and the operator. When an Ingress that acts as a -// backend is being deleted, the corresponding VIPService is only deleted if the +// backend is being deleted, the corresponding Tailscale Service is only deleted if the // only owner reference that it contains is for this Ingress. If other owner // references are found, then cleanup operation only removes this Ingress' owner // reference. @@ -110,14 +114,17 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, fmt.Errorf("failed to get Ingress: %w", err) } - // hostname is the name of the VIPService that will be created for this Ingress as well as the first label in - // the MagicDNS name of the Ingress. + // hostname is the name of the Tailscale Service that will be created + // for this Ingress as well as the first label in the MagicDNS name of + // the Ingress. hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) - // needsRequeue is set to true if the underlying VIPService has changed as a result of this reconcile. If that - // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the VIPService in a - // multi-cluster Ingress setup have not resulted in another actor overwriting our VIPService update. + // needsRequeue is set to true if the underlying Tailscale Service has + // changed as a result of this reconcile. If that is the case, we + // reconcile the Ingress one more time to ensure that concurrent updates + // to the Tailscale Service in a multi-cluster Ingress setup have not + // resulted in another actor overwriting our Tailscale Service update. needsRequeue := false if !ing.DeletionTimestamp.IsZero() || !r.shouldExpose(ing) { needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger) @@ -133,15 +140,28 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, nil } -// maybeProvision ensures that a VIPService for this Ingress exists and is up to date and that the serve config for the +// maybeProvision ensures that a Tailscale Service for this Ingress exists and is up to date and that the serve config for the // corresponding ProxyGroup contains the Ingress backend's definition. -// If a VIPService does not exist, it will be created. -// If a VIPService exists, but only with owner references from other operator instances, an owner reference for this +// If a Tailscale Service does not exist, it will be created. +// If a Tailscale Service exists, but only with owner references from other operator instances, an owner reference for this // operator instance is added. -// If a VIPService exists, but does not have an owner reference from any operator, we error +// If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. -// Returns true if the operation resulted in a VIPService update. +// Returns true if the operation resulted in a Tailscale Service update. func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcsChanged bool, err error) { + // Currently (2025-05) Tailscale Services are behind an alpha feature flag that + // needs to be explicitly enabled for a tailnet to be able to use them. + serviceName := tailcfg.ServiceName("svc:" + hostname) + existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + if isErrorFeatureFlagNotEnabled(err) { + logger.Warn(msgFeatureFlagNotEnabled) + r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) + return false, nil + } + if err != nil && !isErrorTailscaleServiceNotFound(err) { + return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) + } + if err := validateIngressClass(ctx, r.Client); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) return false, nil @@ -149,7 +169,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // Get and validate ProxyGroup readiness pgName := ing.Annotations[AnnotationProxyGroup] if pgName == "" { - logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") + logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning") return false, nil } logger = logger.With("ProxyGroup", pgName) @@ -194,60 +214,49 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin r.mu.Unlock() } - // 1. Ensure that if Ingress' hostname has changed, any VIPService + // 1. Ensure that if Ingress' hostname has changed, any Tailscale Service // resources corresponding to the old hostname are cleaned up. - // In practice, this function will ensure that any VIPServices that are + // In practice, this function will ensure that any Tailscale Services that are // associated with the provided ProxyGroup and no longer owned by an // Ingress are cleaned up. This is fine- it is not expensive and ensures // that in edge cases (a single update changed both hostname and removed - // ProxyGroup annotation) the VIPService is more likely to be + // ProxyGroup annotation) the Tailscale Service is more likely to be // (eventually) removed. svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) if err != nil { - return false, fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) + return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err) } - // 2. Ensure that there isn't a VIPService with the same hostname + // 2. Ensure that there isn't a Tailscale Service with the same hostname // already created and not owned by this Ingress. // TODO(irbekrm): perhaps in future we could have record names being - // stored on VIPServices. I am not certain if there might not be edge + // stored on Tailscale Services. I am not certain if there might not be edge // cases (custom domains, etc?) where attempting to determine the DNS - // name of the VIPService in this way won't be incorrect. - tcd, err := r.tailnetCertDomain(ctx) - if err != nil { - return false, fmt.Errorf("error determining DNS name base: %w", err) - } - dnsName := hostname + "." + tcd - serviceName := tailcfg.ServiceName("svc:" + hostname) - existingVIPSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - // TODO(irbekrm): here and when creating the VIPService, verify if the - // error is not terminal (and therefore should not be reconciled). For - // example, if the hostname is already a hostname of a Tailscale node, - // the GET here will fail. - if err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { - return false, fmt.Errorf("error getting VIPService %q: %w", hostname, err) - } - } - // Generate the VIPService owner annotation for new or existing VIPService. - // This checks and ensures that VIPService's owner references are updated + // name of the Tailscale Service in this way won't be incorrect. + + // Generate the Tailscale Service owner annotation for a new or existing Tailscale Service. + // This checks and ensures that Tailscale Service's owner references are updated // for this Ingress and errors if that is not possible (i.e. because it - // appears that the VIPService has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingVIPSvc) + // appears that the Tailscale Service has been created by a non-operator actor). + updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) if err != nil { - const instr = "To proceed, you can either manually delete the existing VIPService or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" - msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr) + const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" + msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) logger.Warn(msg) - r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidVIPService", msg) + r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidTailscaleService", msg) return false, nil } // 3. Ensure that TLS Secret and RBAC exists + tcd, err := r.tailnetCertDomain(ctx) + if err != nil { + return false, fmt.Errorf("error determining DNS name base: %w", err) + } + dnsName := hostname + "." + tcd if err := r.ensureCertResources(ctx, pgName, dnsName, ing); err != nil { return false, fmt.Errorf("error ensuring cert resources: %w", err) } - // 4. Ensure that the serve config for the ProxyGroup contains the VIPService. + // 4. Ensure that the serve config for the ProxyGroup contains the Tailscale Service. cm, cfg, err := r.proxyGroupServeConfig(ctx, pgName) if err != nil { return false, fmt.Errorf("error getting Ingress serve config: %w", err) @@ -303,42 +312,42 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } } - // 4. Ensure that the VIPService exists and is up to date. + // 4. Ensure that the Tailscale Service exists and is up to date. tags := r.defaultTags if tstr, ok := ing.Annotations[AnnotationTags]; ok { tags = strings.Split(tstr, ",") } - vipPorts := []string{"443"} // always 443 for Ingress + tsSvcPorts := []string{"443"} // always 443 for Ingress if isHTTPEndpointEnabled(ing) { - vipPorts = append(vipPorts, "80") + tsSvcPorts = append(tsSvcPorts, "80") } - const managedVIPServiceComment = "This VIPService is managed by the Tailscale Kubernetes Operator, do not modify" - vipSvc := &tailscale.VIPService{ + const managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" + tsSvc := &tailscale.VIPService{ Name: serviceName, Tags: tags, - Ports: vipPorts, - Comment: managedVIPServiceComment, + Ports: tsSvcPorts, + Comment: managedTSServiceComment, Annotations: updatedAnnotations, } - if existingVIPSvc != nil { - vipSvc.Addrs = existingVIPSvc.Addrs + if existingTSSvc != nil { + tsSvc.Addrs = existingTSSvc.Addrs } - // TODO(irbekrm): right now if two Ingress resources attempt to apply different VIPService configs (different + // TODO(irbekrm): right now if two Ingress resources attempt to apply different Tailscale Service configs (different // tags, or HTTP endpoint settings) we can end up reconciling those in a loop. We should detect when an Ingress // with the same generation number has been reconciled ~more than N times and stop attempting to apply updates. - if existingVIPSvc == nil || - !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || - !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) || - !ownersAreSetAndEqual(vipSvc, existingVIPSvc) { - logger.Infof("Ensuring VIPService exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { - return false, fmt.Errorf("error creating VIPService: %w", err) + if existingTSSvc == nil || + !reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) || + !reflect.DeepEqual(tsSvc.Ports, existingTSSvc.Ports) || + !ownersAreSetAndEqual(tsSvc, existingTSSvc) { + logger.Infof("Ensuring Tailscale Service exists and is up to date") + if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + return false, fmt.Errorf("error creating Tailscale Service: %w", err) } } - // 5. Update tailscaled's AdvertiseServices config, which should add the VIPService + // 5. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. mode := serviceAdvertisementHTTPS if isHTTPEndpointEnabled(ing) { @@ -396,9 +405,9 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin const prefix = "Updating Ingress status" if count == 0 { - logger.Infof("%s. No Pods are advertising VIPService yet", prefix) + logger.Infof("%s. No Pods are advertising Tailscale Service yet", prefix) } else { - logger.Infof("%s. %d Pod(s) advertising VIPService", prefix, count) + logger.Infof("%s. %d Pod(s) advertising Tailscale Service", prefix, count) } if err := r.Status().Update(ctx, ing); err != nil { @@ -407,8 +416,12 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return svcsChanged, nil } -// VIPServices that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. -// Returns true if the operation resulted in existing VIPService updates (owner reference removal). +// maybeCleanupProxyGroup ensures that any Tailscale Services that are +// associated with the provided ProxyGroup and no longer needed for any +// Ingresses exposed on this ProxyGroup are deleted, if not owned by other +// operator instances, else the owner reference is cleaned up. Returns true if +// the operation resulted in an existing Tailscale Service updates (owner +// reference removal). func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { // Get serve config for the ProxyGroup cm, cfg, err := r.proxyGroupServeConfig(ctx, proxyGroupName) @@ -416,7 +429,8 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("getting serve config: %w", err) } if cfg == nil { - return false, nil // ProxyGroup does not have any VIPServices + // ProxyGroup does not have any Tailscale Services associated with it. + return false, nil } ingList := &networkingv1.IngressList{} @@ -424,38 +438,50 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("listing Ingresses: %w", err) } serveConfigChanged := false - // For each VIPService in serve config... - for vipServiceName := range cfg.Services { + // For each Tailscale Service in serve config... + for tsSvcName := range cfg.Services { // ...check if there is currently an Ingress with this hostname found := false for _, i := range ingList.Items { ingressHostname := hostnameForIngress(&i) - if ingressHostname == vipServiceName.WithoutPrefix() { + if ingressHostname == tsSvcName.WithoutPrefix() { found = true break } } if !found { - logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) + logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) + tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName) + if isErrorFeatureFlagNotEnabled(err) { + msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) + logger.Warn(msg) + return false, nil + } + if isErrorTailscaleServiceNotFound(err) { + return false, nil + } + if err != nil { + return false, fmt.Errorf("getting Tailscale Service %q: %w", tsSvcName, err) + } - // Delete the VIPService from control if necessary. - svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) + // Delete the Tailscale Service from control if necessary. + svcsChanged, err = r.cleanupTailscaleService(ctx, tsService, logger) if err != nil { - return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) + return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } - // Make sure the VIPService is not advertised in tailscaled or serve config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, serviceAdvertisementOff, logger); err != nil { + // Make sure the Tailscale Service is not advertised in tailscaled or serve config. + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, tsSvcName, serviceAdvertisementOff, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - _, ok := cfg.Services[vipServiceName] + _, ok := cfg.Services[tsSvcName] if ok { - logger.Infof("Removing VIPService %q from serve config", vipServiceName) - delete(cfg.Services, vipServiceName) + logger.Infof("Removing Tailscale Service %q from serve config", tsSvcName) + delete(cfg.Services, tsSvcName) serveConfigChanged = true } - if err := r.cleanupCertResources(ctx, proxyGroupName, vipServiceName); err != nil { + if err := r.cleanupCertResources(ctx, proxyGroupName, tsSvcName); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } } @@ -474,8 +500,8 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return svcsChanged, nil } -// maybeCleanup ensures that any resources, such as a VIPService created for this Ingress, are cleaned up when the -// Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the VIPService is only +// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Ingress, are cleaned up when the +// Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Ingress. func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcChanged bool, err error) { @@ -485,7 +511,21 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, logger.Debugf("no finalizer, nothing to do") return false, nil } - logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) + logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) + serviceName := tailcfg.ServiceName("svc:" + hostname) + svc, err := r.tsClient.GetVIPService(ctx, serviceName) + if err != nil { + if isErrorFeatureFlagNotEnabled(err) { + msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) + logger.Warn(msg) + r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msg) + return false, nil + } + if isErrorTailscaleServiceNotFound(err) { + return false, nil + } + return false, fmt.Errorf("error getting Tailscale Service: %w", err) + } // Ensure that if cleanup succeeded Ingress finalizers are removed. defer func() { @@ -497,26 +537,25 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } }() - // 1. Check if there is a VIPService associated with this Ingress. + // 1. Check if there is a Tailscale Service associated with this Ingress. pg := ing.Annotations[AnnotationProxyGroup] cm, cfg, err := r.proxyGroupServeConfig(ctx, pg) if err != nil { return false, fmt.Errorf("error getting ProxyGroup serve config: %w", err) } - serviceName := tailcfg.ServiceName("svc:" + hostname) - // VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not - // found in the serve config, we can assume that there is no VIPService. (If the serve config does not exist at + // Tailscale Service is always first added to serve config and only then created in the Tailscale API, so if it is not + // found in the serve config, we can assume that there is no Tailscale Service. (If the serve config does not exist at // all, it is possible that the ProxyGroup has been deleted before cleaning up the Ingress, so carry on with // cleanup). if cfg != nil && cfg.Services != nil && cfg.Services[serviceName] == nil { return false, nil } - // 2. Clean up the VIPService resources. - svcChanged, err = r.cleanupVIPService(ctx, serviceName, logger) + // 2. Clean up the Tailscale Service resources. + svcChanged, err = r.cleanupTailscaleService(ctx, svc, logger) if err != nil { - return false, fmt.Errorf("error deleting VIPService: %w", err) + return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } // 3. Clean up any cluster resources @@ -528,13 +567,13 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, return svcChanged, nil } - // 4. Unadvertise the VIPService in tailscaled config. + // 4. Unadvertise the Tailscale Service in tailscaled config. if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, serviceAdvertisementOff, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - // 5. Remove the VIPService from the serve config for the ProxyGroup. - logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) + // 5. Remove the Tailscale Service from the serve config for the ProxyGroup. + logger.Infof("Removing TailscaleService %q from serve config for ProxyGroup %q", hostname, pg) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) if err != nil { @@ -656,7 +695,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) } - // It is invalid to have multiple Ingress resources for the same VIPService in one cluster. + // It is invalid to have multiple Ingress resources for the same Tailscale Service in one cluster. ingList := &networkingv1.IngressList{} if err := r.List(ctx, ingList); err != nil { errs = append(errs, fmt.Errorf("[unexpected] error listing Ingresses: %w", err)) @@ -670,32 +709,23 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki return errors.Join(errs...) } -// cleanupVIPService deletes any VIPService by the provided name if it is not owned by operator instances other than this one. -// If a VIPService is found, but contains other owner references, only removes this operator's owner reference. -// If a VIPService by the given name is not found or does not contain this operator's owner reference, do nothing. -// It returns true if an existing VIPService was updated to remove owner reference, as well as any error that occurred. -func (r *HAIngressReconciler) cleanupVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, _ error) { - svc, err := r.tsClient.GetVIPService(ctx, name) - if err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - return false, nil - } - - return false, fmt.Errorf("error getting VIPService: %w", err) - } +// cleanupTailscaleService deletes any Tailscale Service by the provided name if it is not owned by operator instances other than this one. +// If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. +// If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. +// It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. +func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger) (updated bool, _ error) { if svc == nil { return false, nil } o, err := parseOwnerAnnotation(svc) if err != nil { - return false, fmt.Errorf("error parsing VIPService owner annotation") + return false, fmt.Errorf("error parsing Tailscale Service's owner annotation") } if o == nil || len(o.OwnerRefs) == 0 { return false, nil } // Comparing with the operatorID only means that we will not be able to - // clean up VIPServices in cases where the operator was deleted from the + // clean up Tailscale Service in cases where the operator was deleted from the // cluster before deleting the Ingress. Perhaps the comparison could be // 'if or.OperatorID === r.operatorID || or.ingressUID == r.ingressUID'. ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { @@ -705,14 +735,14 @@ func (r *HAIngressReconciler) cleanupVIPService(ctx context.Context, name tailcf return false, nil } if len(o.OwnerRefs) == 1 { - logger.Infof("Deleting VIPService %q", name) - return false, r.tsClient.DeleteVIPService(ctx, name) + logger.Infof("Deleting Tailscale Service %q", svc.Name) + return false, r.tsClient.DeleteVIPService(ctx, svc.Name) } o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) - logger.Infof("Deleting VIPService %q", name) + logger.Infof("Deleting Tailscale Service %q", svc.Name) json, err := json.Marshal(o) if err != nil { - return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err) + return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) @@ -726,7 +756,7 @@ func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { return ing.Annotations[annotationHTTPEndpoint] == "enabled" } -// serviceAdvertisementMode describes the desired state of a VIPService. +// serviceAdvertisementMode describes the desired state of a Tailscale Service. type serviceAdvertisementMode int const ( @@ -743,7 +773,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return fmt.Errorf("failed to list config Secrets: %w", err) } - // Verify that TLS cert for the VIPService has been successfully issued + // Verify that TLS cert for the Tailscale Service has been successfully issued // before attempting to advertise the service. // This is so that in multi-cluster setups where some Ingresses succeed // to issue certs and some do not (rate limits), clients are not pinned @@ -826,10 +856,10 @@ func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName const ownerAnnotation = "tailscale.com/owner-references" -// ownerAnnotationValue is the content of the VIPService.Annotation[ownerAnnotation] field. +// ownerAnnotationValue is the content of the TailscaleService.Annotation[ownerAnnotation] field. type ownerAnnotationValue struct { // OwnerRefs is a list of owner references that identify all operator - // instances that manage this VIPService. + // instances that manage this Tailscale Services. OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"` } @@ -841,9 +871,9 @@ type OwnerRef struct { } // ownerAnnotations returns the updated annotations required to ensure this -// instance of the operator is included as an owner. If the VIPService is not -// nil, but does not contain an owner we return an error as this likely means -// that the VIPService was created by somthing other than a Tailscale +// instance of the operator is included as an owner. If the Tailscale Service is not +// nil, but does not contain an owner reference we return an error as this likely means +// that the Service was created by somthing other than a Tailscale // Kubernetes operator. func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { ref := OwnerRef{ @@ -853,7 +883,7 @@ func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} json, err := json.Marshal(c) if err != nil { - return nil, fmt.Errorf("[unexpected] unable to marshal VIPService owner annotation contents: %w, please report this", err) + return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service's owner annotation contents: %w, please report this", err) } return map[string]string{ ownerAnnotation: string(json), @@ -864,7 +894,7 @@ func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s return nil, err } if o == nil || len(o.OwnerRefs) == 0 { - return nil, fmt.Errorf("VIPService %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) + return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) } if slices.Contains(o.OwnerRefs, ref) { // up to date return svc.Annotations, nil @@ -884,13 +914,13 @@ func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s } // parseOwnerAnnotation returns nil if no valid owner found. -func parseOwnerAnnotation(vipSvc *tailscale.VIPService) (*ownerAnnotationValue, error) { - if vipSvc.Annotations == nil || vipSvc.Annotations[ownerAnnotation] == "" { +func parseOwnerAnnotation(tsSvc *tailscale.VIPService) (*ownerAnnotationValue, error) { + if tsSvc.Annotations == nil || tsSvc.Annotations[ownerAnnotation] == "" { return nil, nil } o := &ownerAnnotationValue{} - if err := json.Unmarshal([]byte(vipSvc.Annotations[ownerAnnotation]), o); err != nil { - return nil, fmt.Errorf("error parsing VIPService %s annotation %q: %w", ownerAnnotation, vipSvc.Annotations[ownerAnnotation], err) + if err := json.Unmarshal([]byte(tsSvc.Annotations[ownerAnnotation]), o); err != nil { + return nil, fmt.Errorf("error parsing Tailscale Service's %s annotation %q: %w", ownerAnnotation, tsSvc.Annotations[ownerAnnotation], err) } return o, nil } @@ -905,8 +935,8 @@ func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool { // ensureCertResources ensures that the TLS Secret for an HA Ingress and RBAC // resources that allow proxies to manage the Secret are created. -// Note that Tailscale VIPService name validation matches Kubernetes -// resource name validation, so we can be certain that the VIPService name +// Note that Tailscale Service's name validation matches Kubernetes +// resource name validation, so we can be certain that the Tailscale Service name // (domain) is a valid Kubernetes resource name. // https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99 // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names @@ -931,7 +961,7 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, d func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName string, name tailcfg.ServiceName) error { domainName, err := r.dnsNameForService(ctx, tailcfg.ServiceName(name)) if err != nil { - return fmt.Errorf("error getting DNS name for VIPService %s: %w", name, err) + return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", name, err) } labels := certResourceLabels(pgName, domainName) if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { @@ -947,9 +977,9 @@ func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName s } // requeueInterval returns a time duration between 5 and 10 minutes, which is -// the period of time after which an HA Ingress, whose VIPService has been newly +// the period of time after which an HA Ingress, whose Tailscale Service has been newly // created or changed, needs to be requeued. This is to protect against -// VIPService owner references being overwritten as a result of concurrent +// Tailscale Service's owner references being overwritten as a result of concurrent // updates during multi-clutster Ingress create/update operations. func requeueInterval() time.Duration { return time.Duration(rand.N(5)+5) * time.Minute @@ -1040,7 +1070,7 @@ func certResourceLabels(pgName, domain string) map[string]string { } } -// dnsNameForService returns the DNS name for the given VIPService name. +// dnsNameForService returns the DNS name for the given Tailscale Service's name. func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { s := svc.WithoutPrefix() tcd, err := r.tailnetCertDomain(ctx) @@ -1074,3 +1104,19 @@ func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceN return len(cert) > 0 && len(key) > 0, nil } + +func isErrorFeatureFlagNotEnabled(err error) bool { + // messageFFNotEnabled is the error message returned by + // Tailscale control plane when a Tailscale Service API call is made for a + // tailnet that does not have the Tailscale Services feature flag enabled. + const messageFFNotEnabled = "feature unavailable for tailnet" + var errResp *tailscale.ErrResponse + ok := errors.As(err, &errResp) + return ok && strings.Contains(errResp.Message, messageFFNotEnabled) +} + +func isErrorTailscaleServiceNotFound(err error) bool { + var errResp *tailscale.ErrResponse + ok := errors.As(err, &errResp) + return ok && errResp.Status == http.StatusNotFound +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 0ad424bd6..989330862 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -8,10 +8,8 @@ package main import ( "context" "encoding/json" - "errors" "fmt" "maps" - "net/http" "reflect" "testing" @@ -265,8 +263,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { if err == nil { t.Fatalf("svc:my-svc not cleaned up") } - var errResp *tailscale.ErrResponse - if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { + if !isErrorTailscaleServiceNotFound(err) { t.Fatalf("unexpected error: %v", err) } } diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index 3101da75d..f49f84af9 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -46,7 +46,10 @@ type tsClient interface { CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) DeleteDevice(ctx context.Context, nodeStableID string) error + // GetVIPService is a method for getting a Tailscale Service. VIPService is the original name for Tailscale Service. GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) + // CreateOrUpdateVIPService is a method for creating or updating a Tailscale Service. CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error + // DeleteVIPService is a method for deleting a Tailscale Service. DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error } From 49a7685af95a6105f9818d69fcc8ce2440ece67f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 14 May 2025 11:17:15 -0700 Subject: [PATCH 0865/1708] feature/taildrop: add integration test variant with profiles that exist Updates #15970 Updates #15812 Updates tailscale/corp#28449 Change-Id: I52cf25f98636b0beac16275f46e58d0816963895 Signed-off-by: Brad Fitzpatrick --- feature/taildrop/integration_test.go | 51 +++++++++++++++++++++------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/feature/taildrop/integration_test.go b/feature/taildrop/integration_test.go index 46768bb31..6e60b7cba 100644 --- a/feature/taildrop/integration_test.go +++ b/feature/taildrop/integration_test.go @@ -26,6 +26,21 @@ import ( // TODO(bradfitz): add test between different users with the peercap to permit that? func TestTaildropIntegration(t *testing.T) { + t.Skip("known failing test; see https://github.com/tailscale/tailscale/issues/15970") + testTaildropIntegration(t, false) +} + +func TestTaildropIntegration_Fresh(t *testing.T) { + testTaildropIntegration(t, true) +} + +// freshProfiles is whether to start the test right away +// with a fresh profile. If false, tailscaled is started, stopped, +// and restarted again to simulate a real-world scenario where +// the first profile already existed. +// +// This exercises an ipnext hook ordering issue we hit earlier. +func testTaildropIntegration(t *testing.T, freshProfiles bool) { tstest.Parallel(t) controlOpt := integration.ConfigureControl(func(s *testcontrol.Server) { s.AllNodesSameUser = true // required for Taildrop @@ -39,18 +54,30 @@ func TestTaildropIntegration(t *testing.T) { n2 := integration.NewTestNode(t, env) d2 := n2.StartDaemon() - n1.AwaitListening() - t.Logf("n1 is listening") - n2.AwaitListening() - t.Logf("n2 is listening") - n1.MustUp() - t.Logf("n1 is up") - n2.MustUp() - t.Logf("n2 is up") - n1.AwaitRunning() - t.Logf("n1 is running") - n2.AwaitRunning() - t.Logf("n2 is running") + awaitUp := func() { + t.Helper() + n1.AwaitListening() + t.Logf("n1 is listening") + n2.AwaitListening() + t.Logf("n2 is listening") + n1.MustUp() + t.Logf("n1 is up") + n2.MustUp() + t.Logf("n2 is up") + n1.AwaitRunning() + t.Logf("n1 is running") + n2.AwaitRunning() + t.Logf("n2 is running") + } + awaitUp() + + if !freshProfiles { + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) + d1 = n1.StartDaemon() + d2 = n2.StartDaemon() + awaitUp() + } var peerStableID tailcfg.StableNodeID From 824985afe1767c49a1337974cfeceaa07ad8fdbd Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 14 May 2025 11:57:01 -0500 Subject: [PATCH 0866/1708] feature/taildrop,ipn/ipn{ext,local}: initialize taildrop for initial profile Currently, LocalBackend/ExtensionHost doesn't invoke the profile change callback for the initial profile. Since the initial profile may vary depending on loaded extensions and applied policy settings, it can't be reliably determined until all extensions are initialized. Additionally, some extensions may asynchronously trigger a switch to the "best" profile (based on system state and policy settings) during initialization. We intended to address these issues as part of the ongoing profileManager/LocalBackend refactoring, but the changes didn't land in time for the v1.84 release and the Taildrop refactoring. In this PR, we update the Taildrop extension to retrieve the current profile at initialization time and handle it as a profile change. We also defer extension initialization until LocalBackend has started, since the Taildrop extension already relies on this behavior (e.g., it requires clients to call SetDirectFileRoot before Init). Fixes #15970 Updates #15812 Updates tailscale/corp#28449 Signed-off-by: Nick Khyl --- feature/taildrop/ext.go | 4 ++++ feature/taildrop/integration_test.go | 1 - ipn/ipnext/ipnext.go | 6 +++++- ipn/ipnlocal/local.go | 11 +++++++++-- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index aee825ee7..ed26996fe 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -100,6 +100,10 @@ func (e *Extension) Init(h ipnext.Host) error { h.Hooks().SetPeerStatus.Add(e.setPeerStatus) h.Hooks().BackendStateChange.Add(e.onBackendStateChange) + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) return nil } diff --git a/feature/taildrop/integration_test.go b/feature/taildrop/integration_test.go index 6e60b7cba..75896a95b 100644 --- a/feature/taildrop/integration_test.go +++ b/feature/taildrop/integration_test.go @@ -26,7 +26,6 @@ import ( // TODO(bradfitz): add test between different users with the peercap to permit that? func TestTaildropIntegration(t *testing.T) { - t.Skip("known failing test; see https://github.com/tailscale/tailscale/issues/15970") testTaildropIntegration(t, false) } diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 895fadc1c..7a9c39dbb 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -37,7 +37,10 @@ type Extension interface { // It must be the same as the name used to register the extension. Name() string - // Init is called to initialize the extension when LocalBackend is initialized. + // Init is called to initialize the extension when LocalBackend's + // Start method is called. Extensions are created but not initialized + // unless LocalBackend is started. + // // If the extension cannot be initialized, it must return an error, // and its Shutdown method will not be called on the host's shutdown. // Returned errors are not fatal; they are used for logging. @@ -333,6 +336,7 @@ type Hooks struct { // BackendStateChange is called when the backend state changes. BackendStateChange feature.Hooks[func(ipn.State)] + // ProfileStateChange contains callbacks that are invoked when the current login profile // or its [ipn.Prefs] change, after those changes have been made. The current login profile // may be changed either because of a profile switch, or because the profile information // was updated by [LocalBackend.SetControlClientStatus], including when the profile diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 79383aa37..468fd72eb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -237,6 +237,8 @@ type LocalBackend struct { // for testing and graceful shutdown purposes. goTracker goroutines.Tracker + startOnce sync.Once // protects the one‑time initialization in [LocalBackend.Start] + // extHost is the bridge between [LocalBackend] and the registered [ipnext.Extension]s. // It may be nil in tests that use direct composite literal initialization of [LocalBackend] // instead of calling [NewLocalBackend]. A nil pointer is a valid, no-op host. @@ -568,8 +570,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } - - b.extHost.Init() return b, nil } @@ -2162,6 +2162,11 @@ func (b *LocalBackend) getNewControlClientFuncLocked() clientGen { return b.ccGen } +// initOnce is called on the first call to [LocalBackend.Start]. +func (b *LocalBackend) initOnce() { + b.extHost.Init() +} + // Start applies the configuration specified in opts, and starts the // state machine. // @@ -2175,6 +2180,8 @@ func (b *LocalBackend) getNewControlClientFuncLocked() clientGen { func (b *LocalBackend) Start(opts ipn.Options) error { b.logf("Start") + b.startOnce.Do(b.initOnce) + var clientToShutdown controlclient.Client defer func() { if clientToShutdown != nil { From 336b3b7df0ab8f9877dcf0b62a09851e4c5178a3 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 15 May 2025 14:26:19 -0700 Subject: [PATCH 0867/1708] cmd/proxy-to-grafana: strip X-Webauth* headers from all requests (#15985) Update proxy-to-grafana to strip any X-Webauth prefixed headers passed by the client in *every* request, not just those to /login. /api/ routes will also accept these headers to authenticate users, necessitating their removal to prevent forgery. Updates tailscale/corp#28687 Signed-off-by: Patrick O'Doherty --- cmd/proxy-to-grafana/proxy-to-grafana.go | 23 +++--- cmd/proxy-to-grafana/proxy-to-grafana_test.go | 77 +++++++++++++++++++ 2 files changed, 91 insertions(+), 9 deletions(-) create mode 100644 cmd/proxy-to-grafana/proxy-to-grafana_test.go diff --git a/cmd/proxy-to-grafana/proxy-to-grafana.go b/cmd/proxy-to-grafana/proxy-to-grafana.go index bdabd650f..27f5e338c 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana.go @@ -53,7 +53,7 @@ import ( "strings" "time" - "tailscale.com/client/local" + "tailscale.com/client/tailscale/apitype" "tailscale.com/tailcfg" "tailscale.com/tsnet" ) @@ -195,13 +195,7 @@ func main() { log.Fatal(http.Serve(ln, proxy)) } -func modifyRequest(req *http.Request, localClient *local.Client) { - // with enable_login_token set to true, we get a cookie that handles - // auth for paths that are not /login - if req.URL.Path != "/login" { - return - } - +func modifyRequest(req *http.Request, localClient whoisIdentitySource) { // Delete any existing X-Webauth-* headers to prevent possible spoofing // if getting Tailnet identity fails. for h := range req.Header { @@ -210,6 +204,13 @@ func modifyRequest(req *http.Request, localClient *local.Client) { } } + // Set the X-Webauth-* headers only for the /login path + // With enable_login_token set to true, we get a cookie that handles + // auth for paths that are not /login + if req.URL.Path != "/login" { + return + } + user, role, err := getTailscaleIdentity(req.Context(), localClient, req.RemoteAddr) if err != nil { log.Printf("error getting Tailscale user: %v", err) @@ -221,7 +222,7 @@ func modifyRequest(req *http.Request, localClient *local.Client) { req.Header.Set("X-Webauth-Role", role.String()) } -func getTailscaleIdentity(ctx context.Context, localClient *local.Client, ipPort string) (*tailcfg.UserProfile, grafanaRole, error) { +func getTailscaleIdentity(ctx context.Context, localClient whoisIdentitySource, ipPort string) (*tailcfg.UserProfile, grafanaRole, error) { whois, err := localClient.WhoIs(ctx, ipPort) if err != nil { return nil, ViewerRole, fmt.Errorf("failed to identify remote host: %w", err) @@ -248,3 +249,7 @@ func getTailscaleIdentity(ctx context.Context, localClient *local.Client, ipPort return whois.UserProfile, role, nil } + +type whoisIdentitySource interface { + WhoIs(ctx context.Context, ipPort string) (*apitype.WhoIsResponse, error) +} diff --git a/cmd/proxy-to-grafana/proxy-to-grafana_test.go b/cmd/proxy-to-grafana/proxy-to-grafana_test.go new file mode 100644 index 000000000..083c4bc49 --- /dev/null +++ b/cmd/proxy-to-grafana/proxy-to-grafana_test.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +package main + +import ( + "context" + "fmt" + "net/http/httptest" + "testing" + + "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" +) + +type mockWhoisSource struct { + id *apitype.WhoIsResponse +} + +func (m *mockWhoisSource) WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) { + if m.id == nil { + return nil, fmt.Errorf("missing mock identity") + } + return m.id, nil +} + +var whois = &apitype.WhoIsResponse{ + UserProfile: &tailcfg.UserProfile{ + LoginName: "foobar@example.com", + DisplayName: "Foobar", + }, + Node: &tailcfg.Node{ + ID: 1, + }, +} + +func TestModifyRequest_Login(t *testing.T) { + req := httptest.NewRequest("GET", "/login", nil) + modifyRequest(req, &mockWhoisSource{id: whois}) + + if got := req.Header.Get("X-Webauth-User"); got != "foobar@example.com" { + t.Errorf("X-Webauth-User = %q; want %q", got, "foobar@example.com") + } + + if got := req.Header.Get("X-Webauth-Role"); got != "Viewer" { + t.Errorf("X-Webauth-Role = %q; want %q", got, "Viewer") + } +} + +func TestModifyRequest_RemoveHeaders_Login(t *testing.T) { + req := httptest.NewRequest("GET", "/login", nil) + req.Header.Set("X-Webauth-User", "malicious@example.com") + req.Header.Set("X-Webauth-Role", "Admin") + + modifyRequest(req, &mockWhoisSource{id: whois}) + + if got := req.Header.Get("X-Webauth-User"); got != "foobar@example.com" { + t.Errorf("X-Webauth-User = %q; want %q", got, "foobar@example.com") + } + if got := req.Header.Get("X-Webauth-Role"); got != "Viewer" { + t.Errorf("X-Webauth-Role = %q; want %q", got, "Viewer") + } +} + +func TestModifyRequest_RemoveHeaders_API(t *testing.T) { + req := httptest.NewRequest("DELETE", "/api/org/users/1", nil) + req.Header.Set("X-Webauth-User", "malicious@example.com") + req.Header.Set("X-Webauth-Role", "Admin") + + modifyRequest(req, &mockWhoisSource{id: whois}) + + if got := req.Header.Get("X-Webauth-User"); got != "" { + t.Errorf("X-Webauth-User = %q; want %q", got, "") + } + if got := req.Header.Get("X-Webauth-Role"); got != "" { + t.Errorf("X-Webauth-Role = %q; want %q", got, "") + } +} From 9c52856af62074e51f125a90b4153fab61a13b00 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Fri, 16 May 2025 12:51:07 -0400 Subject: [PATCH 0868/1708] prober: correct content-type response (#15989) Content-type was responding as test/plain for probes accepting application/json. Set content type header before setting the response code to correct this. Updates tailscale/corp#27370 Signed-off-by: Mike O'Driscoll --- prober/prober.go | 2 +- prober/prober_test.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/prober/prober.go b/prober/prober.go index 4bd522f26..1237611f4 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -559,8 +559,8 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { PreviousSuccessRatio: prevInfo.RecentSuccessRatio(), PreviousMedianLatency: prevInfo.RecentMedianLatency(), } - w.WriteHeader(respStatus) w.Header().Set("Content-Type", "application/json") + w.WriteHeader(respStatus) if err := json.NewEncoder(w).Encode(resp); err != nil { return tsweb.Error(http.StatusInternalServerError, "error encoding JSON response", err) } diff --git a/prober/prober_test.go b/prober/prober_test.go index 109953b65..c90557eff 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -598,6 +598,9 @@ func TestProberRunHandler(t *testing.T) { } if reqJSON { + if w.Header().Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", w.Header().Get("Content-Type")) + } var gotJSON RunHandlerResponse if err := json.Unmarshal(w.Body.Bytes(), &gotJSON); err != nil { t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, w.Body.String()) From 6de4a021bb45e24aece07c3bf64dda15da49cfb0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 16 May 2025 13:51:40 -0700 Subject: [PATCH 0869/1708] wgengine/magicsock: implement relayManager handshaking (#15977) CallMeMaybeVia reception and endpoint allocation have been collapsed to a single event channel. discoInfo caching for active relay handshakes is now implemented. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/relaymanager.go | 439 +++++++++++++++++++++--- wgengine/magicsock/relaymanager_test.go | 5 +- 4 files changed, 393 insertions(+), 55 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index f88dab29d..e834c277c 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1871,7 +1871,7 @@ func (de *endpoint) resetLocked() { } } de.probeUDPLifetime.resetCycleEndpointLocked() - de.c.relayManager.cancelOutstandingWork(de) + de.c.relayManager.stopWork(de) } func (de *endpoint) numStopAndReset() int64 { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index cf3ef2352..05f4cf56d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1960,7 +1960,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke c.discoShort, epDisco.short, via.ServerDisco.ShortString(), ep.publicKey.ShortString(), derpStr(src.String()), len(via.AddrPorts)) - c.relayManager.handleCallMeMaybeVia(via) + c.relayManager.handleCallMeMaybeVia(ep, via) } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", c.discoShort, epDisco.short, diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index b1732ff41..a63754371 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -16,6 +16,7 @@ import ( "tailscale.com/disco" udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" + "tailscale.com/types/ptr" "tailscale.com/util/httpm" "tailscale.com/util/set" ) @@ -28,21 +29,24 @@ type relayManager struct { // =================================================================== // The following fields are owned by a single goroutine, runLoop(). - serversByAddrPort set.Set[netip.AddrPort] - allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork + serversByAddrPort map[netip.AddrPort]key.DiscoPublic + serversByDisco map[key.DiscoPublic]netip.AddrPort + allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork + handshakeWorkByEndpointByServerDisco map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork + handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). allocateHandshakeCh chan *endpoint allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent + handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent cancelWorkCh chan *endpoint newServerEndpointCh chan newRelayServerEndpointEvent rxChallengeCh chan relayHandshakeChallengeEvent - rxCallMeMaybeViaCh chan *disco.CallMeMaybeVia discoInfoMu sync.Mutex // guards the following field - discoInfoByServerDisco map[key.DiscoPublic]*discoInfo + discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo // runLoopStoppedCh is written to by runLoop() upon return, enabling event // writers to restart it when they are blocked (see @@ -50,21 +54,60 @@ type relayManager struct { runLoopStoppedCh chan struct{} } -type newRelayServerEndpointEvent struct { +// serverDiscoVNI represents a [tailscale.com/net/udprelay.Server] disco key +// and Geneve header VNI value for a given [udprelay.ServerEndpoint]. +type serverDiscoVNI struct { + serverDisco key.DiscoPublic + vni uint32 +} + +// relayHandshakeWork serves to track in-progress relay handshake work for a +// [udprelay.ServerEndpoint]. This structure is immutable once initialized. +type relayHandshakeWork struct { ep *endpoint se udprelay.ServerEndpoint + + // In order to not deadlock, runLoop() must select{} read doneCh when + // attempting to write into rxChallengeCh, and the handshake work goroutine + // must close(doneCh) before attempting to write to + // relayManager.handshakeWorkDoneCh. + rxChallengeCh chan relayHandshakeChallengeEvent + doneCh chan struct{} + + ctx context.Context + cancel context.CancelFunc + wg *sync.WaitGroup } +// newRelayServerEndpointEvent indicates a new [udprelay.ServerEndpoint] has +// become known either via allocation with a relay server, or via +// [disco.CallMeMaybeVia] reception. This structure is immutable once +// initialized. +type newRelayServerEndpointEvent struct { + ep *endpoint + se udprelay.ServerEndpoint + server netip.AddrPort // zero value if learned via [disco.CallMeMaybeVia] +} + +// relayEndpointAllocWorkDoneEvent indicates relay server endpoint allocation +// work for an [*endpoint] has completed. This structure is immutable once +// initialized. type relayEndpointAllocWorkDoneEvent struct { - ep *endpoint work *relayEndpointAllocWork } -// activeWork returns true if there is outstanding allocation or handshaking -// work, otherwise it returns false. -func (r *relayManager) activeWork() bool { - return len(r.allocWorkByEndpoint) > 0 - // TODO(jwhited): consider handshaking work +// relayEndpointHandshakeWorkDoneEvent indicates relay server endpoint handshake +// work for an [*endpoint] has completed. This structure is immutable once +// initialized. +type relayEndpointHandshakeWorkDoneEvent struct { + work *relayHandshakeWork + answerSentTo netip.AddrPort // zero value if answer was not transmitted +} + +// activeWorkRunLoop returns true if there is outstanding allocation or +// handshaking work, otherwise it returns false. +func (r *relayManager) activeWorkRunLoop() bool { + return len(r.allocWorkByEndpoint) > 0 || len(r.handshakeWorkByEndpointByServerDisco) > 0 } // runLoop is a form of event loop. It ensures exclusive access to most of @@ -77,43 +120,40 @@ func (r *relayManager) runLoop() { for { select { case ep := <-r.allocateHandshakeCh: - r.cancelAndClearWork(ep) - r.allocateAllServersForEndpoint(ep) - if !r.activeWork() { + r.stopWorkRunLoop(ep, stopHandshakeWorkOnlyKnownServers) + r.allocateAllServersRunLoop(ep) + if !r.activeWorkRunLoop() { return } - case msg := <-r.allocateWorkDoneCh: - work, ok := r.allocWorkByEndpoint[msg.ep] - if ok && work == msg.work { + case done := <-r.allocateWorkDoneCh: + work, ok := r.allocWorkByEndpoint[done.work.ep] + if ok && work == done.work { // Verify the work in the map is the same as the one that we're // cleaning up. New events on r.allocateHandshakeCh can // overwrite pre-existing keys. - delete(r.allocWorkByEndpoint, msg.ep) + delete(r.allocWorkByEndpoint, done.work.ep) } - if !r.activeWork() { + if !r.activeWorkRunLoop() { return } case ep := <-r.cancelWorkCh: - r.cancelAndClearWork(ep) - if !r.activeWork() { + r.stopWorkRunLoop(ep, stopHandshakeWorkAllServers) + if !r.activeWorkRunLoop() { return } - case newEndpoint := <-r.newServerEndpointCh: - _ = newEndpoint - // TODO(jwhited): implement - if !r.activeWork() { + case newServerEndpoint := <-r.newServerEndpointCh: + r.handleNewServerEndpointRunLoop(newServerEndpoint) + if !r.activeWorkRunLoop() { return } - case challenge := <-r.rxChallengeCh: - _ = challenge - // TODO(jwhited): implement - if !r.activeWork() { + case done := <-r.handshakeWorkDoneCh: + r.handleHandshakeWorkDoneRunLoop(done) + if !r.activeWorkRunLoop() { return } - case via := <-r.rxCallMeMaybeViaCh: - _ = via - // TODO(jwhited): implement - if !r.activeWork() { + case challenge := <-r.rxChallengeCh: + r.handleRxChallengeRunLoop(challenge) + if !r.activeWorkRunLoop() { return } } @@ -142,30 +182,93 @@ type relayEndpointAllocWork struct { // init initializes [relayManager] if it is not already initialized. func (r *relayManager) init() { r.initOnce.Do(func() { - r.discoInfoByServerDisco = make(map[key.DiscoPublic]*discoInfo) + r.discoInfoByServerDisco = make(map[key.DiscoPublic]*relayHandshakeDiscoInfo) + r.serversByDisco = make(map[key.DiscoPublic]netip.AddrPort) + r.serversByAddrPort = make(map[netip.AddrPort]key.DiscoPublic) r.allocWorkByEndpoint = make(map[*endpoint]*relayEndpointAllocWork) + r.handshakeWorkByEndpointByServerDisco = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) + r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) r.allocateHandshakeCh = make(chan *endpoint) r.allocateWorkDoneCh = make(chan relayEndpointAllocWorkDoneEvent) + r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) r.rxChallengeCh = make(chan relayHandshakeChallengeEvent) - r.rxCallMeMaybeViaCh = make(chan *disco.CallMeMaybeVia) r.runLoopStoppedCh = make(chan struct{}, 1) go r.runLoop() }) } +// relayHandshakeDiscoInfo serves to cache a [*discoInfo] for outstanding +// [*relayHandshakeWork] against a given relay server. +type relayHandshakeDiscoInfo struct { + work set.Set[*relayHandshakeWork] // guarded by relayManager.discoInfoMu + di *discoInfo // immutable once initialized +} + +// ensureDiscoInfoFor ensures a [*discoInfo] will be returned by discoInfo() for +// the server disco key associated with 'work'. Callers must also call +// derefDiscoInfoFor() when 'work' is complete. +func (r *relayManager) ensureDiscoInfoFor(work *relayHandshakeWork) { + r.discoInfoMu.Lock() + defer r.discoInfoMu.Unlock() + di, ok := r.discoInfoByServerDisco[work.se.ServerDisco] + if !ok { + di = &relayHandshakeDiscoInfo{} + di.work.Make() + r.discoInfoByServerDisco[work.se.ServerDisco] = di + } + di.work.Add(work) + if di.di == nil { + di.di = &discoInfo{ + discoKey: work.se.ServerDisco, + discoShort: work.se.ServerDisco.ShortString(), + sharedKey: work.ep.c.discoPrivate.Shared(work.se.ServerDisco), + } + } +} + +// derefDiscoInfoFor decrements the reference count of the [*discoInfo] +// associated with 'work'. +func (r *relayManager) derefDiscoInfoFor(work *relayHandshakeWork) { + r.discoInfoMu.Lock() + defer r.discoInfoMu.Unlock() + di, ok := r.discoInfoByServerDisco[work.se.ServerDisco] + if !ok { + // TODO(jwhited): unexpected + return + } + di.work.Delete(work) + if di.work.Len() == 0 { + delete(r.discoInfoByServerDisco, work.se.ServerDisco) + } +} + // discoInfo returns a [*discoInfo] for 'serverDisco' if there is an // active/ongoing handshake with it, otherwise it returns nil, false. func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok bool) { r.discoInfoMu.Lock() defer r.discoInfoMu.Unlock() di, ok := r.discoInfoByServerDisco[serverDisco] - return di, ok + if ok { + return di.di, ok + } + return nil, false } -func (r *relayManager) handleCallMeMaybeVia(dm *disco.CallMeMaybeVia) { - relayManagerInputEvent(r, nil, &r.rxCallMeMaybeViaCh, dm) +func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeVia) { + se := udprelay.ServerEndpoint{ + ServerDisco: dm.ServerDisco, + LamportID: dm.LamportID, + AddrPorts: dm.AddrPorts, + VNI: dm.VNI, + } + se.BindLifetime.Duration = dm.BindLifetime + se.SteadyStateLifetime.Duration = dm.SteadyStateLifetime + relayManagerInputEvent(r, nil, &r.newServerEndpointCh, newRelayServerEndpointEvent{ + ep: ep, + se: se, + }) } func (r *relayManager) handleBindUDPRelayEndpointChallenge(dm *disco.BindUDPRelayEndpointChallenge, di *discoInfo, src netip.AddrPort, vni uint32) { @@ -178,9 +281,9 @@ func (r *relayManager) handleBindUDPRelayEndpointChallenge(dm *disco.BindUDPRela // [relayManager] initialization will make `*eventCh`, so it must be passed as // a pointer to a channel. // -// 'ctx' can be used for returning when runLoop is waiting for the caller to -// return, i.e. the calling goroutine was birthed by runLoop and is cancelable -// via 'ctx'. 'ctx' may be nil. +// 'ctx' can be used for returning when runLoop is waiting for the calling +// goroutine to return, i.e. the calling goroutine was birthed by runLoop and is +// cancelable via 'ctx'. 'ctx' may be nil. func relayManagerInputEvent[T any](r *relayManager, ctx context.Context, eventCh *chan T, event T) { r.init() var ctxDoneCh <-chan struct{} @@ -206,24 +309,258 @@ func (r *relayManager) allocateAndHandshakeAllServers(ep *endpoint) { relayManagerInputEvent(r, nil, &r.allocateHandshakeCh, ep) } -// cancelOutstandingWork cancels all outstanding allocation & handshaking work -// for 'ep'. -func (r *relayManager) cancelOutstandingWork(ep *endpoint) { +// stopWork stops all outstanding allocation & handshaking work for 'ep'. +func (r *relayManager) stopWork(ep *endpoint) { relayManagerInputEvent(r, nil, &r.cancelWorkCh, ep) } -// cancelAndClearWork cancels & clears any outstanding work for 'ep'. -func (r *relayManager) cancelAndClearWork(ep *endpoint) { +// stopHandshakeWorkFilter represents filters for handshake work cancellation +type stopHandshakeWorkFilter bool + +const ( + stopHandshakeWorkAllServers stopHandshakeWorkFilter = false + stopHandshakeWorkOnlyKnownServers = true +) + +// stopWorkRunLoop cancels & clears outstanding allocation and handshaking +// work for 'ep'. Handshake work cancellation is subject to the filter supplied +// in 'f'. +func (r *relayManager) stopWorkRunLoop(ep *endpoint, f stopHandshakeWorkFilter) { allocWork, ok := r.allocWorkByEndpoint[ep] if ok { allocWork.cancel() allocWork.wg.Wait() delete(r.allocWorkByEndpoint, ep) } - // TODO(jwhited): cancel & clear handshake work + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[ep] + if ok { + for disco, handshakeWork := range byServerDisco { + _, knownServer := r.serversByDisco[disco] + if knownServer || f == stopHandshakeWorkAllServers { + handshakeWork.cancel() + handshakeWork.wg.Wait() + delete(byServerDisco, disco) + delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{handshakeWork.se.ServerDisco, handshakeWork.se.VNI}) + } + } + if len(byServerDisco) == 0 { + delete(r.handshakeWorkByEndpointByServerDisco, ep) + } + } +} + +func (r *relayManager) handleRxChallengeRunLoop(challenge relayHandshakeChallengeEvent) { + work, ok := r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{challenge.disco, challenge.vni}] + if !ok { + return + } + select { + case <-work.doneCh: + return + case work.rxChallengeCh <- challenge: + return + } +} + +func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshakeWorkDoneEvent) { + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.ep] + if !ok { + return + } + work, ok := byServerDisco[done.work.se.ServerDisco] + if !ok || work != done.work { + return + } + delete(byServerDisco, done.work.se.ServerDisco) + if len(byServerDisco) == 0 { + delete(r.handshakeWorkByEndpointByServerDisco, done.work.ep) + } + delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) + if !done.answerSentTo.IsValid() { + // The handshake timed out. + return + } + // We received a challenge from and transmitted an answer towards the relay + // server. + // TODO(jwhited): Make the associated [*endpoint] aware of this + // [tailscale.com/net/udprelay.ServerEndpoint]. +} + +func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { + // Check for duplicate work by server disco + VNI. + sdv := serverDiscoVNI{newServerEndpoint.se.ServerDisco, newServerEndpoint.se.VNI} + existingWork, ok := r.handshakeWorkByServerDiscoVNI[sdv] + if ok { + // There's in-progress handshake work for the server disco + VNI, which + // uniquely identify a [udprelay.ServerEndpoint]. Compare Lamport + // IDs to determine which is newer. + if existingWork.se.LamportID >= newServerEndpoint.se.LamportID { + // The existing work is a duplicate or newer. Return early. + return + } + + // The existing work is no longer valid, clean it up. Be sure to lookup + // by the existing work's [*endpoint], not the incoming "new" work as + // they are not necessarily matching. + existingWork.cancel() + existingWork.wg.Wait() + delete(r.handshakeWorkByServerDiscoVNI, sdv) + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[existingWork.ep] + if ok { + delete(byServerDisco, sdv.serverDisco) + if len(byServerDisco) == 0 { + delete(r.handshakeWorkByEndpointByServerDisco, existingWork.ep) + } + } + } + + // Check for duplicate work by [*endpoint] + server disco. + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] + if ok { + existingWork, ok := byServerDisco[newServerEndpoint.se.ServerDisco] + if ok { + if newServerEndpoint.se.LamportID <= existingWork.se.LamportID { + // The "new" server endpoint is outdated or duplicate in + // consideration against existing handshake work. Return early. + return + } + // Cancel existing handshake that has a lower lamport ID. + existingWork.cancel() + existingWork.wg.Wait() + delete(r.handshakeWorkByServerDiscoVNI, sdv) + delete(byServerDisco, sdv.serverDisco) + if len(byServerDisco) == 0 { + delete(r.handshakeWorkByEndpointByServerDisco, existingWork.ep) + } + } + } + + // We're now reasonably sure we're dealing with the latest + // [udprelay.ServerEndpoint] from a server event order perspective + // (LamportID). Update server disco key tracking if appropriate. + if newServerEndpoint.server.IsValid() { + serverDisco, ok := r.serversByAddrPort[newServerEndpoint.server] + if !ok { + // Allocation raced with an update to our known servers set. This + // server is no longer known. Return early. + return + } + if serverDisco.Compare(newServerEndpoint.se.ServerDisco) != 0 { + // The server's disco key has either changed, or simply become + // known for the first time. In the former case we end up detaching + // any in-progress handshake work from a "known" relay server. + // Practically speaking we expect the detached work to fail + // if the server key did in fact change (server restart) while we + // were attempting to handshake with it. It is possible, though + // unlikely, for a server addr:port to effectively move between + // nodes. Either way, there is no harm in detaching existing work, + // and we explicitly let that happen for the rare case the detached + // handshake would complete and remain functional. + delete(r.serversByDisco, serverDisco) + delete(r.serversByAddrPort, newServerEndpoint.server) + r.serversByDisco[serverDisco] = newServerEndpoint.server + r.serversByAddrPort[newServerEndpoint.server] = serverDisco + } + } + + // We're ready to start a new handshake. + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + work := &relayHandshakeWork{ + ep: newServerEndpoint.ep, + se: newServerEndpoint.se, + doneCh: make(chan struct{}), + ctx: ctx, + cancel: cancel, + wg: wg, + } + if byServerDisco == nil { + byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) + r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] = byServerDisco + } + byServerDisco[newServerEndpoint.se.ServerDisco] = work + r.handshakeWorkByServerDiscoVNI[sdv] = work + + wg.Add(1) + go r.handshakeServerEndpoint(work) +} + +func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { + defer work.wg.Done() + + done := relayEndpointHandshakeWorkDoneEvent{work: work} + r.ensureDiscoInfoFor(work) + + defer func() { + r.derefDiscoInfoFor(work) + close(work.doneCh) + relayManagerInputEvent(r, work.ctx, &r.handshakeWorkDoneCh, done) + work.cancel() + }() + + sentBindAny := false + bind := &disco.BindUDPRelayEndpoint{} + for _, addrPort := range work.se.AddrPorts { + if addrPort.IsValid() { + sentBindAny = true + go work.ep.c.sendDiscoMessage(addrPort, ptr.To(work.se.VNI), key.NodePublic{}, work.se.ServerDisco, bind, discoLog) + } + } + if !sentBindAny { + return + } + + // Limit goroutine lifetime to a reasonable duration. This is intentionally + // detached and independent of 'BindLifetime' to prevent relay server + // (mis)configuration from negatively impacting client resource usage. + const maxHandshakeLifetime = time.Second * 30 + timer := time.NewTimer(min(work.se.BindLifetime.Duration, maxHandshakeLifetime)) + defer timer.Stop() + + // Wait for cancellation, a challenge to be rx'd, or handshake lifetime to + // expire. Our initial implementation values simplicity over other aspects, + // e.g. it is not resilient to any packet loss. + // + // We may want to eventually consider [disc.BindUDPRelayEndpoint] + // retransmission lacking challenge rx, and + // [disco.BindUDPRelayEndpointAnswer] duplication in front of + // [disco.Ping] until [disco.Ping] or [disco.Pong] is received. + select { + case <-work.ctx.Done(): + return + case challenge := <-work.rxChallengeCh: + answer := &disco.BindUDPRelayEndpointAnswer{Answer: challenge.challenge} + done.answerSentTo = challenge.from + // Send answer back to relay server. Typically sendDiscoMessage() calls + // are invoked via a new goroutine in attempt to limit crypto+syscall + // time contributing to system backpressure, and to fire roundtrip + // latency-relevant messages as closely together as possible. We + // intentionally don't do that here, because: + // 1. The primary backpressure concern is around the work.rxChallengeCh + // writer on the [Conn] packet rx path, who is already unblocked + // since we read from the channel. Relay servers only ever tx one + // challenge per rx'd bind message for a given (the first seen) src. + // 2. runLoop() may be waiting for this 'work' to complete if + // explicitly canceled for some reason elsewhere, but this is + // typically only around [*endpoint] and/or [Conn] shutdown. + // 3. It complicates the defer()'d [*discoInfo] deref and 'work' + // completion event order. sendDiscoMessage() assumes the related + // [*discoInfo] is still available. We also don't want the + // [*endpoint] to send a [disco.Ping] before the + // [disco.BindUDPRelayEndpointAnswer] has gone out, otherwise the + // remote side will never see the ping, delaying/preventing the + // [udprelay.ServerEndpoint] from becoming fully operational. + // 4. This is a singular tx with no roundtrip latency measurements + // involved. + work.ep.c.sendDiscoMessage(challenge.from, ptr.To(work.se.VNI), key.NodePublic{}, work.se.ServerDisco, answer, discoLog) + return + case <-timer.C: + // The handshake timed out. + return + } } -func (r *relayManager) allocateAllServersForEndpoint(ep *endpoint) { +func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { if len(r.serversByAddrPort) == 0 { return } @@ -231,17 +568,17 @@ func (r *relayManager) allocateAllServersForEndpoint(ep *endpoint) { started := &relayEndpointAllocWork{ep: ep, cancel: cancel, wg: &sync.WaitGroup{}} for k := range r.serversByAddrPort { started.wg.Add(1) - go r.allocateEndpoint(ctx, started.wg, k, ep) + go r.allocateSingleServer(ctx, started.wg, k, ep) } r.allocWorkByEndpoint[ep] = started go func() { started.wg.Wait() started.cancel() - relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{ep: ep, work: started}) + relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) }() } -func (r *relayManager) allocateEndpoint(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { +func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { // TODO(jwhited): introduce client metrics counters for notable failures defer wg.Done() var b bytes.Buffer diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 579dceb53..3b75db9f6 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -8,6 +8,7 @@ import ( "testing" "tailscale.com/disco" + "tailscale.com/types/key" ) func TestRelayManagerInitAndIdle(t *testing.T) { @@ -16,11 +17,11 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.cancelOutstandingWork(&endpoint{}) + rm.stopWork(&endpoint{}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&disco.CallMeMaybeVia{}) + rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) <-rm.runLoopStoppedCh rm = relayManager{} From 469fabd8de1894f3cfea9218e51526916dae8f03 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 16 May 2025 16:12:59 -0700 Subject: [PATCH 0870/1708] wgengine/magicsock: add missing logf arg (#15995) Also, add the short version of the node key in parens to match existing patterns. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index e834c277c..020420f55 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1480,7 +1480,7 @@ func (de *endpoint) addCandidateEndpoint(ep netip.AddrPort, forRxPingTxID stun.T } } size2 := len(de.endpointState) - de.c.dlogf("[v1] magicsock: disco: addCandidateEndpoint pruned %v candidate set from %v to %v entries", size, size2) + de.c.dlogf("[v1] magicsock: disco: addCandidateEndpoint pruned %v (%s) candidate set from %v to %v entries", de.discoShort(), de.publicKey.ShortString(), size, size2) } return false } From 6b97e615d680b24a221c80d5a4c9542f4bfc5649 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 19 May 2025 10:42:03 +0100 Subject: [PATCH 0871/1708] cmd/containerboot,kube/ingressservices: proxy VIPService TCP/UDP traffic to cluster Services (#15897) cmd/containerboot,kube/ingressservices: proxy VIPService TCP/UDP traffic to cluster Services This PR is part of the work to implement HA for Kubernetes Operator's network layer proxy. Adds logic to containerboot to monitor mounted ingress firewall configuration rules and update iptables/nftables rules as the config changes. Also adds new shared types for the ingress configuration. The implementation is intentionally similar to that for HA for egress proxy. Updates tailscale/tailscale#15895 Signed-off-by: chaosinthecrd Signed-off-by: Irbe Krumina --- cmd/containerboot/egressservices.go | 766 +++++++++++++++++ ...ervices_test.go => egressservices_test.go} | 0 cmd/containerboot/ingressservices.go | 331 ++++++++ cmd/containerboot/ingressservices_test.go | 223 +++++ cmd/containerboot/main.go | 20 + cmd/containerboot/serve.go | 44 - cmd/containerboot/services.go | 778 +----------------- cmd/containerboot/settings.go | 27 +- kube/ingressservices/ingressservices.go | 53 ++ util/linuxfw/fake_netfilter.go | 16 +- 10 files changed, 1455 insertions(+), 803 deletions(-) create mode 100644 cmd/containerboot/egressservices.go rename cmd/containerboot/{services_test.go => egressservices_test.go} (100%) create mode 100644 cmd/containerboot/ingressservices.go create mode 100644 cmd/containerboot/ingressservices_test.go create mode 100644 kube/ingressservices/ingressservices.go diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go new file mode 100644 index 000000000..71141f17a --- /dev/null +++ b/cmd/containerboot/egressservices.go @@ -0,0 +1,766 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "net/netip" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/util/httpm" + "tailscale.com/util/linuxfw" + "tailscale.com/util/mak" +) + +const tailscaleTunInterface = "tailscale0" + +// Modified using a build flag to speed up tests. +var testSleepDuration string + +// This file contains functionality to run containerboot as a proxy that can +// route cluster traffic to one or more tailnet targets, based on portmapping +// rules read from a configfile. Currently (9/2024) this is only used for the +// Kubernetes operator egress proxies. + +// egressProxy knows how to configure firewall rules to route cluster traffic to +// one or more tailnet services. +type egressProxy struct { + cfgPath string // path to a directory with egress services config files + + nfr linuxfw.NetfilterRunner // never nil + + kc kubeclient.Client // never nil + stateSecret string // name of the kube state Secret + + tsClient *local.Client // never nil + + netmapChan chan ipn.Notify // chan to receive netmap updates on + + podIPv4 string // never empty string, currently only IPv4 is supported + + // tailnetFQDNs is the egress service FQDN to tailnet IP mappings that + // were last used to configure firewall rules for this proxy. + // TODO(irbekrm): target addresses are also stored in the state Secret. + // Evaluate whether we should retrieve them from there and not store in + // memory at all. + targetFQDNs map[string][]netip.Prefix + + tailnetAddrs []netip.Prefix // tailnet IPs of this tailnet device + + // shortSleep is the backoff sleep between healthcheck endpoint calls - can be overridden in tests. + shortSleep time.Duration + // longSleep is the time to sleep after the routing rules are updated to increase the chance that kube + // proxies on all nodes have updated their routing configuration. It can be configured to 0 in + // tests. + longSleep time.Duration + // client is a client that can send HTTP requests. + client httpClient +} + +// httpClient is a client that can send HTTP requests and can be mocked in tests. +type httpClient interface { + Do(*http.Request) (*http.Response, error) +} + +// run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when: +// - the mounted egress config has changed +// - the proxy's tailnet IP addresses have changed +// - tailnet IPs have changed for any backend targets specified by tailnet FQDN +func (ep *egressProxy) run(ctx context.Context, n ipn.Notify, opts egressProxyRunOpts) error { + ep.configure(opts) + var tickChan <-chan time.Time + var eventChan <-chan fsnotify.Event + // TODO (irbekrm): take a look if this can be pulled into a single func + // shared with serve config loader. + if w, err := fsnotify.NewWatcher(); err != nil { + log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + defer w.Close() + if err := w.Add(ep.cfgPath); err != nil { + return fmt.Errorf("failed to add fsnotify watch: %w", err) + } + eventChan = w.Events + } + + if err := ep.sync(ctx, n); err != nil { + return err + } + for { + select { + case <-ctx.Done(): + return nil + case <-tickChan: + log.Printf("periodic sync, ensuring firewall config is up to date...") + case <-eventChan: + log.Printf("config file change detected, ensuring firewall config is up to date...") + case n = <-ep.netmapChan: + shouldResync := ep.shouldResync(n) + if !shouldResync { + continue + } + log.Printf("netmap change detected, ensuring firewall config is up to date...") + } + if err := ep.sync(ctx, n); err != nil { + return fmt.Errorf("error syncing egress service config: %w", err) + } + } +} + +type egressProxyRunOpts struct { + cfgPath string + nfr linuxfw.NetfilterRunner + kc kubeclient.Client + tsClient *local.Client + stateSecret string + netmapChan chan ipn.Notify + podIPv4 string + tailnetAddrs []netip.Prefix +} + +// applyOpts configures egress proxy using the provided options. +func (ep *egressProxy) configure(opts egressProxyRunOpts) { + ep.cfgPath = opts.cfgPath + ep.nfr = opts.nfr + ep.kc = opts.kc + ep.tsClient = opts.tsClient + ep.stateSecret = opts.stateSecret + ep.netmapChan = opts.netmapChan + ep.podIPv4 = opts.podIPv4 + ep.tailnetAddrs = opts.tailnetAddrs + ep.client = &http.Client{} // default HTTP client + sleepDuration := time.Second + if d, err := time.ParseDuration(testSleepDuration); err == nil && d > 0 { + log.Printf("using test sleep duration %v", d) + sleepDuration = d + } + ep.shortSleep = sleepDuration + ep.longSleep = sleepDuration * 10 +} + +// sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if +// any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current +// firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such +// as failed firewall update +func (ep *egressProxy) sync(ctx context.Context, n ipn.Notify) error { + cfgs, err := ep.getConfigs() + if err != nil { + return fmt.Errorf("error retrieving egress service configs: %w", err) + } + status, err := ep.getStatus(ctx) + if err != nil { + return fmt.Errorf("error retrieving current egress proxy status: %w", err) + } + newStatus, err := ep.syncEgressConfigs(cfgs, status, n) + if err != nil { + return fmt.Errorf("error syncing egress service configs: %w", err) + } + if !servicesStatusIsEqual(newStatus, status) { + if err := ep.setStatus(ctx, newStatus, n); err != nil { + return fmt.Errorf("error setting egress proxy status: %w", err) + } + } + return nil +} + +// addrsHaveChanged returns true if the provided netmap update contains tailnet address change for this proxy node. +// Netmap must not be nil. +func (ep *egressProxy) addrsHaveChanged(n ipn.Notify) bool { + return !reflect.DeepEqual(ep.tailnetAddrs, n.NetMap.SelfNode.Addresses()) +} + +// syncEgressConfigs adds and deletes firewall rules to match the desired +// configuration. It uses the provided status to determine what is currently +// applied and updates the status after a successful sync. +func (ep *egressProxy) syncEgressConfigs(cfgs *egressservices.Configs, status *egressservices.Status, n ipn.Notify) (*egressservices.Status, error) { + if !(wantsServicesConfigured(cfgs) || hasServicesConfigured(status)) { + return nil, nil + } + + // Delete unnecessary services. + if err := ep.deleteUnnecessaryServices(cfgs, status); err != nil { + return nil, fmt.Errorf("error deleting services: %w", err) + + } + newStatus := &egressservices.Status{} + if !wantsServicesConfigured(cfgs) { + return newStatus, nil + } + + // Add new services, update rules for any that have changed. + rulesPerSvcToAdd := make(map[string][]rule, 0) + rulesPerSvcToDelete := make(map[string][]rule, 0) + for svcName, cfg := range *cfgs { + tailnetTargetIPs, err := ep.tailnetTargetIPsForSvc(cfg, n) + if err != nil { + return nil, fmt.Errorf("error determining tailnet target IPs: %w", err) + } + rulesToAdd, rulesToDelete, err := updatesForCfg(svcName, cfg, status, tailnetTargetIPs) + if err != nil { + return nil, fmt.Errorf("error validating service changes: %v", err) + } + log.Printf("syncegressservices: looking at svc %s rulesToAdd %d rulesToDelete %d", svcName, len(rulesToAdd), len(rulesToDelete)) + if len(rulesToAdd) != 0 { + mak.Set(&rulesPerSvcToAdd, svcName, rulesToAdd) + } + if len(rulesToDelete) != 0 { + mak.Set(&rulesPerSvcToDelete, svcName, rulesToDelete) + } + if len(rulesToAdd) != 0 || ep.addrsHaveChanged(n) { + // For each tailnet target, set up SNAT from the local tailnet device address of the matching + // family. + for _, t := range tailnetTargetIPs { + var local netip.Addr + for _, pfx := range n.NetMap.SelfNode.Addresses().All() { + if !pfx.IsSingleIP() { + continue + } + if pfx.Addr().Is4() != t.Is4() { + continue + } + local = pfx.Addr() + break + } + if !local.IsValid() { + return nil, fmt.Errorf("no valid local IP: %v", local) + } + if err := ep.nfr.EnsureSNATForDst(local, t); err != nil { + return nil, fmt.Errorf("error setting up SNAT rule: %w", err) + } + } + } + // Update the status. Status will be written back to the state Secret by the caller. + mak.Set(&newStatus.Services, svcName, &egressservices.ServiceStatus{TailnetTargetIPs: tailnetTargetIPs, TailnetTarget: cfg.TailnetTarget, Ports: cfg.Ports}) + } + + // Actually apply the firewall rules. + if err := ensureRulesAdded(rulesPerSvcToAdd, ep.nfr); err != nil { + return nil, fmt.Errorf("error adding rules: %w", err) + } + if err := ensureRulesDeleted(rulesPerSvcToDelete, ep.nfr); err != nil { + return nil, fmt.Errorf("error deleting rules: %w", err) + } + + return newStatus, nil +} + +// updatesForCfg calculates any rules that need to be added or deleted for an individucal egress service config. +func updatesForCfg(svcName string, cfg egressservices.Config, status *egressservices.Status, tailnetTargetIPs []netip.Addr) ([]rule, []rule, error) { + rulesToAdd := make([]rule, 0) + rulesToDelete := make([]rule, 0) + currentConfig, ok := lookupCurrentConfig(svcName, status) + + // If no rules for service are present yet, add them all. + if !ok { + for _, t := range tailnetTargetIPs { + for ports := range cfg.Ports { + log.Printf("syncegressservices: svc %s adding port %v", svcName, ports) + rulesToAdd = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: t}) + } + } + return rulesToAdd, rulesToDelete, nil + } + + // If there are no backend targets available, delete any currently configured rules. + if len(tailnetTargetIPs) == 0 { + log.Printf("tailnet target for egress service %s does not have any backend addresses, deleting all rules", svcName) + for _, ip := range currentConfig.TailnetTargetIPs { + for ports := range currentConfig.Ports { + rulesToDelete = append(rulesToDelete, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) + } + } + return rulesToAdd, rulesToDelete, nil + } + + // If there are rules present for backend targets that no longer match, delete them. + for _, ip := range currentConfig.TailnetTargetIPs { + var found bool + for _, wantsIP := range tailnetTargetIPs { + if reflect.DeepEqual(ip, wantsIP) { + found = true + break + } + } + if !found { + for ports := range currentConfig.Ports { + rulesToDelete = append(rulesToDelete, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) + } + } + } + + // Sync rules for the currently wanted backend targets. + for _, ip := range tailnetTargetIPs { + + // If the backend target is not yet present in status, add all rules. + var found bool + for _, gotIP := range currentConfig.TailnetTargetIPs { + if reflect.DeepEqual(ip, gotIP) { + found = true + break + } + } + if !found { + for ports := range cfg.Ports { + rulesToAdd = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) + } + continue + } + + // If the backend target is present in status, check that the + // currently applied rules are up to date. + + // Delete any current portmappings that are no longer present in config. + for port := range currentConfig.Ports { + if _, ok := cfg.Ports[port]; ok { + continue + } + rulesToDelete = append(rulesToDelete, rule{tailnetPort: port.TargetPort, containerPort: port.MatchPort, protocol: port.Protocol, tailnetIP: ip}) + } + + // Add any new portmappings. + for port := range cfg.Ports { + if _, ok := currentConfig.Ports[port]; ok { + continue + } + rulesToAdd = append(rulesToAdd, rule{tailnetPort: port.TargetPort, containerPort: port.MatchPort, protocol: port.Protocol, tailnetIP: ip}) + } + } + return rulesToAdd, rulesToDelete, nil +} + +// deleteUnneccessaryServices ensure that any services found on status, but not +// present in config are deleted. +func (ep *egressProxy) deleteUnnecessaryServices(cfgs *egressservices.Configs, status *egressservices.Status) error { + if !hasServicesConfigured(status) { + return nil + } + if !wantsServicesConfigured(cfgs) { + for svcName, svc := range status.Services { + log.Printf("service %s is no longer required, deleting", svcName) + if err := ensureServiceDeleted(svcName, svc, ep.nfr); err != nil { + return fmt.Errorf("error deleting service %s: %w", svcName, err) + } + } + return nil + } + + for svcName, svc := range status.Services { + if _, ok := (*cfgs)[svcName]; !ok { + log.Printf("service %s is no longer required, deleting", svcName) + if err := ensureServiceDeleted(svcName, svc, ep.nfr); err != nil { + return fmt.Errorf("error deleting service %s: %w", svcName, err) + } + // TODO (irbekrm): also delete the SNAT rule here + } + } + return nil +} + +// getConfigs gets the mounted egress service configuration. +func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) { + svcsCfg := filepath.Join(ep.cfgPath, egressservices.KeyEgressServices) + j, err := os.ReadFile(svcsCfg) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + if len(j) == 0 || string(j) == "" { + return nil, nil + } + cfg := &egressservices.Configs{} + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// getStatus gets the current status of the configured firewall. The current +// status is stored in state Secret. Returns nil status if no status that +// applies to the current proxy Pod was found. Uses the Pod IP to determine if a +// status found in the state Secret applies to this proxy Pod. +func (ep *egressProxy) getStatus(ctx context.Context) (*egressservices.Status, error) { + secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving state secret: %w", err) + } + status := &egressservices.Status{} + raw, ok := secret.Data[egressservices.KeyEgressServices] + if !ok { + return nil, nil + } + if err := json.Unmarshal([]byte(raw), status); err != nil { + return nil, fmt.Errorf("error unmarshalling previous config: %w", err) + } + if reflect.DeepEqual(status.PodIPv4, ep.podIPv4) { + return status, nil + } + return nil, nil +} + +// setStatus writes egress proxy's currently configured firewall to the state +// Secret and updates proxy's tailnet addresses. +func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Status, n ipn.Notify) error { + // Pod IP is used to determine if a stored status applies to THIS proxy Pod. + if status == nil { + status = &egressservices.Status{} + } + status.PodIPv4 = ep.podIPv4 + secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) + if err != nil { + return fmt.Errorf("error retrieving state Secret: %w", err) + } + bs, err := json.Marshal(status) + if err != nil { + return fmt.Errorf("error marshalling service config: %w", err) + } + secret.Data[egressservices.KeyEgressServices] = bs + patch := kubeclient.JSONPatch{ + Op: "replace", + Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices), + Value: bs, + } + if err := ep.kc.JSONPatchResource(ctx, ep.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil { + return fmt.Errorf("error patching state Secret: %w", err) + } + ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() + return nil +} + +// tailnetTargetIPsForSvc returns the tailnet IPs to which traffic for this +// egress service should be proxied. The egress service can be configured by IP +// or by FQDN. If it's configured by IP, just return that. If it's configured by +// FQDN, resolve the FQDN and return the resolved IPs. It checks if the +// netfilter runner supports IPv6 NAT and skips any IPv6 addresses if it +// doesn't. +func (ep *egressProxy) tailnetTargetIPsForSvc(svc egressservices.Config, n ipn.Notify) (addrs []netip.Addr, err error) { + if svc.TailnetTarget.IP != "" { + addr, err := netip.ParseAddr(svc.TailnetTarget.IP) + if err != nil { + return nil, fmt.Errorf("error parsing tailnet target IP: %w", err) + } + if addr.Is6() && !ep.nfr.HasIPV6NAT() { + log.Printf("tailnet target is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode. This will probably not work.") + return addrs, nil + } + return []netip.Addr{addr}, nil + } + + if svc.TailnetTarget.FQDN == "" { + return nil, errors.New("unexpected egress service config- neither tailnet target IP nor FQDN is set") + } + if n.NetMap == nil { + log.Printf("netmap is not available, unable to determine backend addresses for %s", svc.TailnetTarget.FQDN) + return addrs, nil + } + var ( + node tailcfg.NodeView + nodeFound bool + ) + for _, nn := range n.NetMap.Peers { + if equalFQDNs(nn.Name(), svc.TailnetTarget.FQDN) { + node = nn + nodeFound = true + break + } + } + if nodeFound { + for _, addr := range node.Addresses().AsSlice() { + if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { + log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) + continue + } + addrs = append(addrs, addr.Addr()) + } + // Egress target endpoints configured via FQDN are stored, so + // that we can determine if a netmap update should trigger a + // resync. + mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, node.Addresses().AsSlice()) + } + return addrs, nil +} + +// shouldResync parses netmap update and returns true if the update contains +// changes for which the egress proxy's firewall should be reconfigured. +func (ep *egressProxy) shouldResync(n ipn.Notify) bool { + if n.NetMap == nil { + return false + } + + // If proxy's tailnet addresses have changed, resync. + if !reflect.DeepEqual(n.NetMap.SelfNode.Addresses().AsSlice(), ep.tailnetAddrs) { + log.Printf("node addresses have changed, trigger egress config resync") + ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() + return true + } + + // If the IPs for any of the egress services configured via FQDN have + // changed, resync. + for fqdn, ips := range ep.targetFQDNs { + for _, nn := range n.NetMap.Peers { + if equalFQDNs(nn.Name(), fqdn) { + if !reflect.DeepEqual(ips, nn.Addresses().AsSlice()) { + log.Printf("backend addresses for egress target %q have changed old IPs %v, new IPs %v trigger egress config resync", nn.Name(), ips, nn.Addresses().AsSlice()) + return true + } + break + } + } + } + return false +} + +// ensureServiceDeleted ensures that any rules for an egress service are removed +// from the firewall configuration. +func ensureServiceDeleted(svcName string, svc *egressservices.ServiceStatus, nfr linuxfw.NetfilterRunner) error { + + // Note that the portmap is needed for iptables based firewall only. + // Nftables group rules for a service in a chain, so there is no need to + // specify individual portmapping based rules. + pms := make([]linuxfw.PortMap, 0) + for pm := range svc.Ports { + pms = append(pms, linuxfw.PortMap{MatchPort: pm.MatchPort, TargetPort: pm.TargetPort, Protocol: pm.Protocol}) + } + + if err := nfr.DeleteSvc(svcName, tailscaleTunInterface, svc.TailnetTargetIPs, pms); err != nil { + return fmt.Errorf("error deleting service %s: %w", svcName, err) + } + return nil +} + +// ensureRulesAdded ensures that all portmapping rules are added to the firewall +// configuration. For any rules that already exist, calling this function is a +// no-op. In case of nftables, a service consists of one or two (one per IP +// family) chains that conain the portmapping rules for the service and the +// chains as needed when this function is called. +func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { + for svc, rules := range rulesPerSvc { + for _, rule := range rules { + log.Printf("ensureRulesAdded svc %s tailnetTarget %s container port %d tailnet port %d protocol %s", svc, rule.tailnetIP, rule.containerPort, rule.tailnetPort, rule.protocol) + if err := nfr.EnsurePortMapRuleForSvc(svc, tailscaleTunInterface, rule.tailnetIP, linuxfw.PortMap{MatchPort: rule.containerPort, TargetPort: rule.tailnetPort, Protocol: rule.protocol}); err != nil { + return fmt.Errorf("error ensuring rule: %w", err) + } + } + } + return nil +} + +// ensureRulesDeleted ensures that the given rules are deleted from the firewall +// configuration. For any rules that do not exist, calling this funcion is a +// no-op. +func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { + for svc, rules := range rulesPerSvc { + for _, rule := range rules { + log.Printf("ensureRulesDeleted svc %s tailnetTarget %s container port %d tailnet port %d protocol %s", svc, rule.tailnetIP, rule.containerPort, rule.tailnetPort, rule.protocol) + if err := nfr.DeletePortMapRuleForSvc(svc, tailscaleTunInterface, rule.tailnetIP, linuxfw.PortMap{MatchPort: rule.containerPort, TargetPort: rule.tailnetPort, Protocol: rule.protocol}); err != nil { + return fmt.Errorf("error deleting rule: %w", err) + } + } + } + return nil +} + +func lookupCurrentConfig(svcName string, status *egressservices.Status) (*egressservices.ServiceStatus, bool) { + if status == nil || len(status.Services) == 0 { + return nil, false + } + c, ok := status.Services[svcName] + return c, ok +} + +func equalFQDNs(s, s1 string) bool { + s, _ = strings.CutSuffix(s, ".") + s1, _ = strings.CutSuffix(s1, ".") + return strings.EqualFold(s, s1) +} + +// rule contains configuration for an egress proxy firewall rule. +type rule struct { + containerPort uint16 // port to match incoming traffic + tailnetPort uint16 // tailnet service port + tailnetIP netip.Addr // tailnet service IP + protocol string +} + +func wantsServicesConfigured(cfgs *egressservices.Configs) bool { + return cfgs != nil && len(*cfgs) != 0 +} + +func hasServicesConfigured(status *egressservices.Status) bool { + return status != nil && len(status.Services) != 0 +} + +func servicesStatusIsEqual(st, st1 *egressservices.Status) bool { + if st == nil && st1 == nil { + return true + } + if st == nil || st1 == nil { + return false + } + st.PodIPv4 = "" + st1.PodIPv4 = "" + return reflect.DeepEqual(*st, *st1) +} + +// registerHandlers adds a new handler to the provided ServeMux that can be called as a Kubernetes prestop hook to +// delay shutdown till it's safe to do so. +func (ep *egressProxy) registerHandlers(mux *http.ServeMux) { + mux.Handle(fmt.Sprintf("GET %s", kubetypes.EgessServicesPreshutdownEP), ep) +} + +// ServeHTTP serves /internal-egress-services-preshutdown endpoint, when it receives a request, it periodically polls +// the configured health check endpoint for each egress service till it the health check endpoint no longer hits this +// proxy Pod. It uses the Pod-IPv4 header to verify if health check response is received from this Pod. +func (ep *egressProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + cfgs, err := ep.getConfigs() + if err != nil { + http.Error(w, fmt.Sprintf("error retrieving egress services configs: %v", err), http.StatusInternalServerError) + return + } + if cfgs == nil { + if _, err := w.Write([]byte("safe to terminate")); err != nil { + http.Error(w, fmt.Sprintf("error writing termination status: %v", err), http.StatusInternalServerError) + } + return + } + hp, err := ep.getHEPPings() + if err != nil { + http.Error(w, fmt.Sprintf("error determining the number of times health check endpoint should be pinged: %v", err), http.StatusInternalServerError) + return + } + ep.waitTillSafeToShutdown(r.Context(), cfgs, hp) +} + +// waitTillSafeToShutdown looks up all egress targets configured to be proxied via this instance and, for each target +// whose configuration includes a healthcheck endpoint, pings the endpoint till none of the responses +// are returned by this instance or till the HTTP request times out. In practice, the endpoint will be a Kubernetes Service for whom one of the backends +// would normally be this Pod. When this Pod is being deleted, the operator should have removed it from the Service +// backends and eventually kube proxy routing rules should be updated to no longer route traffic for the Service to this +// Pod. +func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egressservices.Configs, hp int) { + if cfgs == nil || len(*cfgs) == 0 { // avoid sleeping if no services are configured + return + } + log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...") + wg := syncs.WaitGroup{} + + for s, cfg := range *cfgs { + hep := cfg.HealthCheckEndpoint + if hep == "" { + log.Printf("Tailnet target %q does not have a cluster healthcheck specified, unable to verify if cluster traffic for the target is still routed via this Pod", s) + continue + } + svc := s + wg.Go(func() { + log.Printf("Ensuring that cluster traffic is no longer routed to %q via this Pod...", svc) + for { + if ctx.Err() != nil { // kubelet's HTTP request timeout + log.Printf("Cluster traffic for %s did not stop being routed to this Pod.", svc) + return + } + found, err := lookupPodRoute(ctx, hep, ep.podIPv4, hp, ep.client) + if err != nil { + log.Printf("unable to reach endpoint %q, assuming the routing rules for this Pod have been deleted: %v", hep, err) + break + } + if !found { + log.Printf("service %q is no longer routed through this Pod", svc) + break + } + log.Printf("service %q is still routed through this Pod, waiting...", svc) + time.Sleep(ep.shortSleep) + } + }) + } + wg.Wait() + // The check above really only checked that the routing rules are updated on this node. Sleep for a bit to + // ensure that the routing rules are updated on other nodes. TODO(irbekrm): this may or may not be good enough. + // If it's not good enough, we'd probably want to do something more complex, where the proxies check each other. + log.Printf("Sleeping for %s before shutdown to ensure that kube proxies on all nodes have updated routing configuration", ep.longSleep) + time.Sleep(ep.longSleep) +} + +// lookupPodRoute calls the healthcheck endpoint repeat times and returns true if the endpoint returns with the podIP +// header at least once. +func lookupPodRoute(ctx context.Context, hep, podIP string, repeat int, client httpClient) (bool, error) { + for range repeat { + f, err := lookup(ctx, hep, podIP, client) + if err != nil { + return false, err + } + if f { + return true, nil + } + } + return false, nil +} + +// lookup calls the healthcheck endpoint and returns true if the response contains the podIP header. +func lookup(ctx context.Context, hep, podIP string, client httpClient) (bool, error) { + req, err := http.NewRequestWithContext(ctx, httpm.GET, hep, nil) + if err != nil { + return false, fmt.Errorf("error creating new HTTP request: %v", err) + } + + // Close the TCP connection to ensure that the next request is routed to a different backend. + req.Close = true + + resp, err := client.Do(req) + if err != nil { + log.Printf("Endpoint %q can not be reached: %v, likely because there are no (more) healthy backends", hep, err) + return true, nil + } + defer resp.Body.Close() + gotIP := resp.Header.Get(kubetypes.PodIPv4Header) + return strings.EqualFold(podIP, gotIP), nil +} + +// getHEPPings gets the number of pings that should be sent to a health check endpoint to ensure that each configured +// backend is hit. This assumes that a health check endpoint is a Kubernetes Service and traffic to backend Pods is +// round robin load balanced. +func (ep *egressProxy) getHEPPings() (int, error) { + hepPingsPath := filepath.Join(ep.cfgPath, egressservices.KeyHEPPings) + j, err := os.ReadFile(hepPingsPath) + if os.IsNotExist(err) { + return 0, nil + } + if err != nil { + return -1, err + } + if len(j) == 0 || string(j) == "" { + return 0, nil + } + hp, err := strconv.Atoi(string(j)) + if err != nil { + return -1, fmt.Errorf("error parsing hep pings as int: %v", err) + } + if hp < 0 { + log.Printf("[unexpected] hep pings is negative: %d", hp) + return 0, nil + } + return hp, nil +} diff --git a/cmd/containerboot/services_test.go b/cmd/containerboot/egressservices_test.go similarity index 100% rename from cmd/containerboot/services_test.go rename to cmd/containerboot/egressservices_test.go diff --git a/cmd/containerboot/ingressservices.go b/cmd/containerboot/ingressservices.go new file mode 100644 index 000000000..1a2da9567 --- /dev/null +++ b/cmd/containerboot/ingressservices.go @@ -0,0 +1,331 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/netip" + "os" + "path/filepath" + "reflect" + "time" + + "github.com/fsnotify/fsnotify" + "tailscale.com/kube/ingressservices" + "tailscale.com/kube/kubeclient" + "tailscale.com/util/linuxfw" + "tailscale.com/util/mak" +) + +// ingressProxy corresponds to a Kubernetes Operator's network layer ingress +// proxy. It configures firewall rules (iptables or nftables) to proxy tailnet +// traffic to Kubernetes Services. Currently this is only used for network +// layer proxies in HA mode. +type ingressProxy struct { + cfgPath string // path to ingress configfile. + + // nfr is the netfilter runner used to configure firewall rules. + // This is going to be either iptables or nftables based runner. + // Never nil. + nfr linuxfw.NetfilterRunner + + kc kubeclient.Client // never nil + stateSecret string // Secret that holds Tailscale state + + // Pod's IP addresses are used as an identifier of this particular Pod. + podIPv4 string // empty if Pod does not have IPv4 address + podIPv6 string // empty if Pod does not have IPv6 address +} + +// run starts the ingress proxy and ensures that firewall rules are set on start +// and refreshed as ingress config changes. +func (p *ingressProxy) run(ctx context.Context, opts ingressProxyOpts) error { + log.Printf("starting ingress proxy...") + p.configure(opts) + var tickChan <-chan time.Time + var eventChan <-chan fsnotify.Event + if w, err := fsnotify.NewWatcher(); err != nil { + log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + defer w.Close() + dir := filepath.Dir(p.cfgPath) + if err := w.Add(dir); err != nil { + return fmt.Errorf("failed to add fsnotify watch for %v: %w", dir, err) + } + eventChan = w.Events + } + + if err := p.sync(ctx); err != nil { + return err + } + for { + select { + case <-ctx.Done(): + return nil + case <-tickChan: + log.Printf("periodic sync, ensuring firewall config is up to date...") + case <-eventChan: + log.Printf("config file change detected, ensuring firewall config is up to date...") + } + if err := p.sync(ctx); err != nil { + return fmt.Errorf("error syncing ingress service config: %w", err) + } + } +} + +// sync reconciles proxy's firewall rules (iptables or nftables) on ingress config changes: +// - ensures that new firewall rules are added +// - ensures that old firewall rules are deleted +// - updates ingress proxy's status in the state Secret +func (p *ingressProxy) sync(ctx context.Context) error { + // 1. Get the desired firewall configuration + cfgs, err := p.getConfigs() + if err != nil { + return fmt.Errorf("ingress proxy: error retrieving configs: %w", err) + } + + // 2. Get the recorded firewall status + status, err := p.getStatus(ctx) + if err != nil { + return fmt.Errorf("ingress proxy: error retrieving current status: %w", err) + } + + // 3. Ensure that firewall configuration is up to date + if err := p.syncIngressConfigs(cfgs, status); err != nil { + return fmt.Errorf("ingress proxy: error syncing configs: %w", err) + } + var existingConfigs *ingressservices.Configs + if status != nil { + existingConfigs = &status.Configs + } + + // 4. Update the recorded firewall status + if !(ingressServicesStatusIsEqual(cfgs, existingConfigs) && p.isCurrentStatus(status)) { + if err := p.recordStatus(ctx, cfgs); err != nil { + return fmt.Errorf("ingress proxy: error setting status: %w", err) + } + } + return nil +} + +// getConfigs returns the desired ingress service configuration from the mounted +// configfile. +func (p *ingressProxy) getConfigs() (*ingressservices.Configs, error) { + j, err := os.ReadFile(p.cfgPath) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + if len(j) == 0 || string(j) == "" { + return nil, nil + } + cfg := &ingressservices.Configs{} + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// getStatus gets the recorded status of the configured firewall. The status is +// stored in the proxy's state Secret. Note that the recorded status might not +// be the current status of the firewall if it belongs to a previous Pod- we +// take that into account further down the line when determining if the desired +// rules are actually present. +func (p *ingressProxy) getStatus(ctx context.Context) (*ingressservices.Status, error) { + secret, err := p.kc.GetSecret(ctx, p.stateSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving state Secret: %w", err) + } + status := &ingressservices.Status{} + raw, ok := secret.Data[ingressservices.IngressConfigKey] + if !ok { + return nil, nil + } + if err := json.Unmarshal([]byte(raw), status); err != nil { + return nil, fmt.Errorf("error unmarshalling previous config: %w", err) + } + return status, nil +} + +// syncIngressConfigs takes the desired firewall configuration and the recorded +// status and ensures that any missing rules are added and no longer needed +// rules are deleted. +func (p *ingressProxy) syncIngressConfigs(cfgs *ingressservices.Configs, status *ingressservices.Status) error { + rulesToAdd := p.getRulesToAdd(cfgs, status) + rulesToDelete := p.getRulesToDelete(cfgs, status) + + if err := ensureIngressRulesDeleted(rulesToDelete, p.nfr); err != nil { + return fmt.Errorf("error deleting ingress rules: %w", err) + } + if err := ensureIngressRulesAdded(rulesToAdd, p.nfr); err != nil { + return fmt.Errorf("error adding ingress rules: %w", err) + } + return nil +} + +// recordStatus writes the configured firewall status to the proxy's state +// Secret. This allows the Kubernetes Operator to determine whether this proxy +// Pod has setup firewall rules to route traffic for an ingress service. +func (p *ingressProxy) recordStatus(ctx context.Context, newCfg *ingressservices.Configs) error { + status := &ingressservices.Status{} + if newCfg != nil { + status.Configs = *newCfg + } + // Pod IPs are used to determine if recorded status applies to THIS proxy Pod. + status.PodIPv4 = p.podIPv4 + status.PodIPv6 = p.podIPv6 + secret, err := p.kc.GetSecret(ctx, p.stateSecret) + if err != nil { + return fmt.Errorf("error retrieving state Secret: %w", err) + } + bs, err := json.Marshal(status) + if err != nil { + return fmt.Errorf("error marshalling status: %w", err) + } + secret.Data[ingressservices.IngressConfigKey] = bs + patch := kubeclient.JSONPatch{ + Op: "replace", + Path: fmt.Sprintf("/data/%s", ingressservices.IngressConfigKey), + Value: bs, + } + if err := p.kc.JSONPatchResource(ctx, p.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil { + return fmt.Errorf("error patching state Secret: %w", err) + } + return nil +} + +// getRulesToAdd takes the desired firewall configuration and the recorded +// firewall status and returns a map of missing Tailscale Services and rules. +func (p *ingressProxy) getRulesToAdd(cfgs *ingressservices.Configs, status *ingressservices.Status) map[string]ingressservices.Config { + if cfgs == nil { + return nil + } + var rulesToAdd map[string]ingressservices.Config + for tsSvc, wantsCfg := range *cfgs { + if status == nil || !p.isCurrentStatus(status) { + mak.Set(&rulesToAdd, tsSvc, wantsCfg) + continue + } + gotCfg := status.Configs.GetConfig(tsSvc) + if gotCfg == nil || !reflect.DeepEqual(wantsCfg, *gotCfg) { + mak.Set(&rulesToAdd, tsSvc, wantsCfg) + } + } + return rulesToAdd +} + +// getRulesToDelete takes the desired firewall configuration and the recorded +// status and returns a map of Tailscale Services and rules that need to be deleted. +func (p *ingressProxy) getRulesToDelete(cfgs *ingressservices.Configs, status *ingressservices.Status) map[string]ingressservices.Config { + if status == nil || !p.isCurrentStatus(status) { + return nil + } + var rulesToDelete map[string]ingressservices.Config + for tsSvc, gotCfg := range status.Configs { + if cfgs == nil { + mak.Set(&rulesToDelete, tsSvc, gotCfg) + continue + } + wantsCfg := cfgs.GetConfig(tsSvc) + if wantsCfg != nil && reflect.DeepEqual(*wantsCfg, gotCfg) { + continue + } + mak.Set(&rulesToDelete, tsSvc, gotCfg) + } + return rulesToDelete +} + +// ensureIngressRulesAdded takes a map of Tailscale Services and rules and ensures that the firewall rules are added. +func ensureIngressRulesAdded(cfgs map[string]ingressservices.Config, nfr linuxfw.NetfilterRunner) error { + for serviceName, cfg := range cfgs { + if cfg.IPv4Mapping != nil { + if err := addDNATRuleForSvc(nfr, serviceName, cfg.IPv4Mapping.TailscaleServiceIP, cfg.IPv4Mapping.ClusterIP); err != nil { + return fmt.Errorf("error adding ingress rule for %s: %w", serviceName, err) + } + } + if cfg.IPv6Mapping != nil { + if err := addDNATRuleForSvc(nfr, serviceName, cfg.IPv6Mapping.TailscaleServiceIP, cfg.IPv6Mapping.ClusterIP); err != nil { + return fmt.Errorf("error adding ingress rule for %s: %w", serviceName, err) + } + } + } + return nil +} + +func addDNATRuleForSvc(nfr linuxfw.NetfilterRunner, serviceName string, tsIP, clusterIP netip.Addr) error { + log.Printf("adding DNAT rule for Tailscale Service %s with IP %s to Kubernetes Service IP %s", serviceName, tsIP, clusterIP) + return nfr.EnsureDNATRuleForSvc(serviceName, tsIP, clusterIP) +} + +// ensureIngressRulesDeleted takes a map of Tailscale Services and rules and ensures that the firewall rules are deleted. +func ensureIngressRulesDeleted(cfgs map[string]ingressservices.Config, nfr linuxfw.NetfilterRunner) error { + for serviceName, cfg := range cfgs { + if cfg.IPv4Mapping != nil { + if err := deleteDNATRuleForSvc(nfr, serviceName, cfg.IPv4Mapping.TailscaleServiceIP, cfg.IPv4Mapping.ClusterIP); err != nil { + return fmt.Errorf("error deleting ingress rule for %s: %w", serviceName, err) + } + } + if cfg.IPv6Mapping != nil { + if err := deleteDNATRuleForSvc(nfr, serviceName, cfg.IPv6Mapping.TailscaleServiceIP, cfg.IPv6Mapping.ClusterIP); err != nil { + return fmt.Errorf("error deleting ingress rule for %s: %w", serviceName, err) + } + } + } + return nil +} + +func deleteDNATRuleForSvc(nfr linuxfw.NetfilterRunner, serviceName string, tsIP, clusterIP netip.Addr) error { + log.Printf("deleting DNAT rule for Tailscale Service %s with IP %s to Kubernetes Service IP %s", serviceName, tsIP, clusterIP) + return nfr.DeleteDNATRuleForSvc(serviceName, tsIP, clusterIP) +} + +// isCurrentStatus returns true if the status of an ingress proxy as read from +// the proxy's state Secret is the status of the current proxy Pod. We use +// Pod's IP addresses to determine that the status is for this Pod. +func (p *ingressProxy) isCurrentStatus(status *ingressservices.Status) bool { + if status == nil { + return true + } + return status.PodIPv4 == p.podIPv4 && status.PodIPv6 == p.podIPv6 +} + +type ingressProxyOpts struct { + cfgPath string + nfr linuxfw.NetfilterRunner // never nil + kc kubeclient.Client // never nil + stateSecret string + podIPv4 string + podIPv6 string +} + +// configure sets the ingress proxy's configuration. It is called once on start +// so we don't care about concurrent access to fields. +func (p *ingressProxy) configure(opts ingressProxyOpts) { + p.cfgPath = opts.cfgPath + p.nfr = opts.nfr + p.kc = opts.kc + p.stateSecret = opts.stateSecret + p.podIPv4 = opts.podIPv4 + p.podIPv6 = opts.podIPv6 +} + +func ingressServicesStatusIsEqual(st, st1 *ingressservices.Configs) bool { + if st == nil && st1 == nil { + return true + } + if st == nil || st1 == nil { + return false + } + return reflect.DeepEqual(*st, *st1) +} diff --git a/cmd/containerboot/ingressservices_test.go b/cmd/containerboot/ingressservices_test.go new file mode 100644 index 000000000..228bbb159 --- /dev/null +++ b/cmd/containerboot/ingressservices_test.go @@ -0,0 +1,223 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "net/netip" + "testing" + + "tailscale.com/kube/ingressservices" + "tailscale.com/util/linuxfw" +) + +func TestSyncIngressConfigs(t *testing.T) { + tests := []struct { + name string + currentConfigs *ingressservices.Configs + currentStatus *ingressservices.Status + wantServices map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + } + }{ + { + name: "add_new_rules_when_no_existing_config", + currentConfigs: &ingressservices.Configs{ + "svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""), + }, + currentStatus: nil, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + "svc:foo": makeWantService("100.64.0.1", "10.0.0.1"), + }, + }, + { + name: "add_multiple_services", + currentConfigs: &ingressservices.Configs{ + "svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""), + "svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""), + "svc:baz": makeServiceConfig("100.64.0.3", "10.0.0.3", "", ""), + }, + currentStatus: nil, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + "svc:foo": makeWantService("100.64.0.1", "10.0.0.1"), + "svc:bar": makeWantService("100.64.0.2", "10.0.0.2"), + "svc:baz": makeWantService("100.64.0.3", "10.0.0.3"), + }, + }, + { + name: "add_both_ipv4_and_ipv6_rules", + currentConfigs: &ingressservices.Configs{ + "svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "2001:db8::1", "2001:db8::2"), + }, + currentStatus: nil, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + "svc:foo": makeWantService("2001:db8::1", "2001:db8::2"), + }, + }, + { + name: "add_ipv6_only_rules", + currentConfigs: &ingressservices.Configs{ + "svc:ipv6": makeServiceConfig("", "", "2001:db8::10", "2001:db8::20"), + }, + currentStatus: nil, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + "svc:ipv6": makeWantService("2001:db8::10", "2001:db8::20"), + }, + }, + { + name: "delete_all_rules_when_config_removed", + currentConfigs: nil, + currentStatus: &ingressservices.Status{ + Configs: ingressservices.Configs{ + "svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""), + "svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""), + }, + PodIPv4: "10.0.0.2", // Current pod IPv4 + PodIPv6: "2001:db8::2", // Current pod IPv6 + }, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{}, + }, + { + name: "add_remove_modify", + currentConfigs: &ingressservices.Configs{ + "svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.2", "", ""), // Changed cluster IP + "svc:new": makeServiceConfig("100.64.0.4", "10.0.0.4", "", ""), + }, + currentStatus: &ingressservices.Status{ + Configs: ingressservices.Configs{ + "svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""), + "svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""), + "svc:baz": makeServiceConfig("100.64.0.3", "10.0.0.3", "", ""), + }, + PodIPv4: "10.0.0.2", // Current pod IPv4 + PodIPv6: "2001:db8::2", // Current pod IPv6 + }, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + "svc:foo": makeWantService("100.64.0.1", "10.0.0.2"), + "svc:new": makeWantService("100.64.0.4", "10.0.0.4"), + }, + }, + { + name: "update_with_outdated_status", + currentConfigs: &ingressservices.Configs{ + "svc:web": makeServiceConfig("100.64.0.10", "10.0.0.10", "", ""), + "svc:web-ipv6": { + IPv6Mapping: &ingressservices.Mapping{ + TailscaleServiceIP: netip.MustParseAddr("2001:db8::10"), + ClusterIP: netip.MustParseAddr("2001:db8::20"), + }, + }, + "svc:api": makeServiceConfig("100.64.0.20", "10.0.0.20", "", ""), + }, + currentStatus: &ingressservices.Status{ + Configs: ingressservices.Configs{ + "svc:web": makeServiceConfig("100.64.0.10", "10.0.0.10", "", ""), + "svc:web-ipv6": { + IPv6Mapping: &ingressservices.Mapping{ + TailscaleServiceIP: netip.MustParseAddr("2001:db8::10"), + ClusterIP: netip.MustParseAddr("2001:db8::20"), + }, + }, + "svc:old": makeServiceConfig("100.64.0.30", "10.0.0.30", "", ""), + }, + PodIPv4: "10.0.0.1", // Outdated pod IP + PodIPv6: "2001:db8::1", // Outdated pod IP + }, + wantServices: map[string]struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + "svc:web": makeWantService("100.64.0.10", "10.0.0.10"), + "svc:web-ipv6": makeWantService("2001:db8::10", "2001:db8::20"), + "svc:api": makeWantService("100.64.0.20", "10.0.0.20"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var nfr linuxfw.NetfilterRunner = linuxfw.NewFakeNetfilterRunner() + + ep := &ingressProxy{ + nfr: nfr, + podIPv4: "10.0.0.2", // Current pod IPv4 + podIPv6: "2001:db8::2", // Current pod IPv6 + } + + err := ep.syncIngressConfigs(tt.currentConfigs, tt.currentStatus) + if err != nil { + t.Fatalf("syncIngressConfigs failed: %v", err) + } + + fake := nfr.(*linuxfw.FakeNetfilterRunner) + gotServices := fake.GetServiceState() + if len(gotServices) != len(tt.wantServices) { + t.Errorf("got %d services, want %d", len(gotServices), len(tt.wantServices)) + } + for svc, want := range tt.wantServices { + got, ok := gotServices[svc] + if !ok { + t.Errorf("service %s not found", svc) + continue + } + if got.TailscaleServiceIP != want.TailscaleServiceIP { + t.Errorf("service %s: got TailscaleServiceIP %v, want %v", svc, got.TailscaleServiceIP, want.TailscaleServiceIP) + } + if got.ClusterIP != want.ClusterIP { + t.Errorf("service %s: got ClusterIP %v, want %v", svc, got.ClusterIP, want.ClusterIP) + } + } + }) + } +} + +func makeServiceConfig(tsIP, clusterIP string, tsIP6, clusterIP6 string) ingressservices.Config { + cfg := ingressservices.Config{} + if tsIP != "" && clusterIP != "" { + cfg.IPv4Mapping = &ingressservices.Mapping{ + TailscaleServiceIP: netip.MustParseAddr(tsIP), + ClusterIP: netip.MustParseAddr(clusterIP), + } + } + if tsIP6 != "" && clusterIP6 != "" { + cfg.IPv6Mapping = &ingressservices.Mapping{ + TailscaleServiceIP: netip.MustParseAddr(tsIP6), + ClusterIP: netip.MustParseAddr(clusterIP6), + } + } + return cfg +} + +func makeWantService(tsIP, clusterIP string) struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr +} { + return struct { + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr + }{ + TailscaleServiceIP: netip.MustParseAddr(tsIP), + ClusterIP: netip.MustParseAddr(clusterIP), + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 9425571e6..954330897 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -441,6 +441,7 @@ authLoop: // egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+ // egress services in HA mode and errored. var egressSvcsErrorChan = make(chan error) + var ingressSvcsErrorChan = make(chan error) defer t.Stop() // resetTimer resets timer for when to next attempt to resolve the DNS // name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The @@ -694,6 +695,23 @@ runLoop: } }() } + ip := ingressProxy{} + if cfg.IngressProxiesCfgPath != "" { + log.Printf("configuring ingress proxy using configuration file at %s", cfg.IngressProxiesCfgPath) + opts := ingressProxyOpts{ + cfgPath: cfg.IngressProxiesCfgPath, + nfr: nfr, + kc: kc, + stateSecret: cfg.KubeSecret, + podIPv4: cfg.PodIPv4, + podIPv6: cfg.PodIPv6, + } + go func() { + if err := ip.run(ctx, opts); err != nil { + ingressSvcsErrorChan <- err + } + }() + } // Wait on tailscaled process. It won't be cleaned up by default when the // container exits as it is not PID1. TODO (irbekrm): perhaps we can replace the @@ -738,6 +756,8 @@ runLoop: resetTimer(false) case e := <-egressSvcsErrorChan: return fmt.Errorf("egress proxy failed: %v", e) + case e := <-ingressSvcsErrorChan: + return fmt.Errorf("ingress proxy failed: %v", e) } } wg.Wait() diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index bdf9432b5..37fd49777 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -9,7 +9,6 @@ import ( "bytes" "context" "encoding/json" - "fmt" "log" "os" "path/filepath" @@ -170,46 +169,3 @@ func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { } return &sc, nil } - -func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { - prefs, err := lc.GetPrefs(ctx) - if err != nil { - return fmt.Errorf("error getting prefs: %w", err) - } - if len(prefs.AdvertiseServices) == 0 { - return nil - } - - log.Printf("serve proxy: unadvertising services: %v", prefs.AdvertiseServices) - if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ - AdvertiseServicesSet: true, - Prefs: ipn.Prefs{ - AdvertiseServices: nil, - }, - }); err != nil { - // EditPrefs only returns an error if it fails _set_ its local prefs. - // If it fails to _persist_ the prefs in state, we don't get an error - // and we continue waiting below, as control will failover as usual. - return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) - } - - // Services use the same (failover XOR regional routing) mechanism that - // HA subnet routers use. Unfortunately we don't yet get a reliable signal - // from control that it's responded to our unadvertisement, so the best we - // can do is wait for 20 seconds, where 15s is the approximate maximum time - // it should take for control to choose a new primary, and 5s is for buffer. - // - // Note: There is no guarantee that clients have been _informed_ of the new - // primary no matter how long we wait. We would need a mechanism to await - // netmap updates for peers to know for sure. - // - // See https://tailscale.com/kb/1115/high-availability for more details. - // TODO(tomhjp): Wait for a netmap update instead of sleeping when control - // supports that. - select { - case <-ctx.Done(): - return nil - case <-time.After(20 * time.Second): - return nil - } -} diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go index ea56a6236..6079128c0 100644 --- a/cmd/containerboot/services.go +++ b/cmd/containerboot/services.go @@ -7,759 +7,57 @@ package main import ( "context" - "encoding/json" - "errors" "fmt" "log" - "net/http" - "net/netip" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" "time" - "github.com/fsnotify/fsnotify" "tailscale.com/client/local" "tailscale.com/ipn" - "tailscale.com/kube/egressservices" - "tailscale.com/kube/kubeclient" - "tailscale.com/kube/kubetypes" - "tailscale.com/syncs" - "tailscale.com/tailcfg" - "tailscale.com/util/httpm" - "tailscale.com/util/linuxfw" - "tailscale.com/util/mak" ) -const tailscaleTunInterface = "tailscale0" - -// Modified using a build flag to speed up tests. -var testSleepDuration string - -// This file contains functionality to run containerboot as a proxy that can -// route cluster traffic to one or more tailnet targets, based on portmapping -// rules read from a configfile. Currently (9/2024) this is only used for the -// Kubernetes operator egress proxies. - -// egressProxy knows how to configure firewall rules to route cluster traffic to -// one or more tailnet services. -type egressProxy struct { - cfgPath string // path to a directory with egress services config files - - nfr linuxfw.NetfilterRunner // never nil - - kc kubeclient.Client // never nil - stateSecret string // name of the kube state Secret - - tsClient *local.Client // never nil - - netmapChan chan ipn.Notify // chan to receive netmap updates on - - podIPv4 string // never empty string, currently only IPv4 is supported - - // tailnetFQDNs is the egress service FQDN to tailnet IP mappings that - // were last used to configure firewall rules for this proxy. - // TODO(irbekrm): target addresses are also stored in the state Secret. - // Evaluate whether we should retrieve them from there and not store in - // memory at all. - targetFQDNs map[string][]netip.Prefix - - tailnetAddrs []netip.Prefix // tailnet IPs of this tailnet device - - // shortSleep is the backoff sleep between healthcheck endpoint calls - can be overridden in tests. - shortSleep time.Duration - // longSleep is the time to sleep after the routing rules are updated to increase the chance that kube - // proxies on all nodes have updated their routing configuration. It can be configured to 0 in - // tests. - longSleep time.Duration - // client is a client that can send HTTP requests. - client httpClient -} - -// httpClient is a client that can send HTTP requests and can be mocked in tests. -type httpClient interface { - Do(*http.Request) (*http.Response, error) -} - -// run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when: -// - the mounted egress config has changed -// - the proxy's tailnet IP addresses have changed -// - tailnet IPs have changed for any backend targets specified by tailnet FQDN -func (ep *egressProxy) run(ctx context.Context, n ipn.Notify, opts egressProxyRunOpts) error { - ep.configure(opts) - var tickChan <-chan time.Time - var eventChan <-chan fsnotify.Event - // TODO (irbekrm): take a look if this can be pulled into a single func - // shared with serve config loader. - if w, err := fsnotify.NewWatcher(); err != nil { - log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - tickChan = ticker.C - } else { - defer w.Close() - if err := w.Add(ep.cfgPath); err != nil { - return fmt.Errorf("failed to add fsnotify watch: %w", err) - } - eventChan = w.Events - } - - if err := ep.sync(ctx, n); err != nil { - return err - } - for { - select { - case <-ctx.Done(): - return nil - case <-tickChan: - log.Printf("periodic sync, ensuring firewall config is up to date...") - case <-eventChan: - log.Printf("config file change detected, ensuring firewall config is up to date...") - case n = <-ep.netmapChan: - shouldResync := ep.shouldResync(n) - if !shouldResync { - continue - } - log.Printf("netmap change detected, ensuring firewall config is up to date...") - } - if err := ep.sync(ctx, n); err != nil { - return fmt.Errorf("error syncing egress service config: %w", err) - } - } -} - -type egressProxyRunOpts struct { - cfgPath string - nfr linuxfw.NetfilterRunner - kc kubeclient.Client - tsClient *local.Client - stateSecret string - netmapChan chan ipn.Notify - podIPv4 string - tailnetAddrs []netip.Prefix -} - -// applyOpts configures egress proxy using the provided options. -func (ep *egressProxy) configure(opts egressProxyRunOpts) { - ep.cfgPath = opts.cfgPath - ep.nfr = opts.nfr - ep.kc = opts.kc - ep.tsClient = opts.tsClient - ep.stateSecret = opts.stateSecret - ep.netmapChan = opts.netmapChan - ep.podIPv4 = opts.podIPv4 - ep.tailnetAddrs = opts.tailnetAddrs - ep.client = &http.Client{} // default HTTP client - sleepDuration := time.Second - if d, err := time.ParseDuration(testSleepDuration); err == nil && d > 0 { - log.Printf("using test sleep duration %v", d) - sleepDuration = d - } - ep.shortSleep = sleepDuration - ep.longSleep = sleepDuration * 10 -} - -// sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if -// any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current -// firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such -// as failed firewall update -func (ep *egressProxy) sync(ctx context.Context, n ipn.Notify) error { - cfgs, err := ep.getConfigs() +// ensureServicesNotAdvertised is a function that gets called on containerboot +// termination and ensures that any currently advertised VIPServices get +// unadvertised to give clients time to switch to another node before this one +// is shut down. +func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { + prefs, err := lc.GetPrefs(ctx) if err != nil { - return fmt.Errorf("error retrieving egress service configs: %w", err) - } - status, err := ep.getStatus(ctx) - if err != nil { - return fmt.Errorf("error retrieving current egress proxy status: %w", err) - } - newStatus, err := ep.syncEgressConfigs(cfgs, status, n) - if err != nil { - return fmt.Errorf("error syncing egress service configs: %w", err) - } - if !servicesStatusIsEqual(newStatus, status) { - if err := ep.setStatus(ctx, newStatus, n); err != nil { - return fmt.Errorf("error setting egress proxy status: %w", err) - } - } - return nil -} - -// addrsHaveChanged returns true if the provided netmap update contains tailnet address change for this proxy node. -// Netmap must not be nil. -func (ep *egressProxy) addrsHaveChanged(n ipn.Notify) bool { - return !reflect.DeepEqual(ep.tailnetAddrs, n.NetMap.SelfNode.Addresses()) -} - -// syncEgressConfigs adds and deletes firewall rules to match the desired -// configuration. It uses the provided status to determine what is currently -// applied and updates the status after a successful sync. -func (ep *egressProxy) syncEgressConfigs(cfgs *egressservices.Configs, status *egressservices.Status, n ipn.Notify) (*egressservices.Status, error) { - if !(wantsServicesConfigured(cfgs) || hasServicesConfigured(status)) { - return nil, nil - } - - // Delete unnecessary services. - if err := ep.deleteUnnecessaryServices(cfgs, status); err != nil { - return nil, fmt.Errorf("error deleting services: %w", err) - - } - newStatus := &egressservices.Status{} - if !wantsServicesConfigured(cfgs) { - return newStatus, nil - } - - // Add new services, update rules for any that have changed. - rulesPerSvcToAdd := make(map[string][]rule, 0) - rulesPerSvcToDelete := make(map[string][]rule, 0) - for svcName, cfg := range *cfgs { - tailnetTargetIPs, err := ep.tailnetTargetIPsForSvc(cfg, n) - if err != nil { - return nil, fmt.Errorf("error determining tailnet target IPs: %w", err) - } - rulesToAdd, rulesToDelete, err := updatesForCfg(svcName, cfg, status, tailnetTargetIPs) - if err != nil { - return nil, fmt.Errorf("error validating service changes: %v", err) - } - log.Printf("syncegressservices: looking at svc %s rulesToAdd %d rulesToDelete %d", svcName, len(rulesToAdd), len(rulesToDelete)) - if len(rulesToAdd) != 0 { - mak.Set(&rulesPerSvcToAdd, svcName, rulesToAdd) - } - if len(rulesToDelete) != 0 { - mak.Set(&rulesPerSvcToDelete, svcName, rulesToDelete) - } - if len(rulesToAdd) != 0 || ep.addrsHaveChanged(n) { - // For each tailnet target, set up SNAT from the local tailnet device address of the matching - // family. - for _, t := range tailnetTargetIPs { - var local netip.Addr - for _, pfx := range n.NetMap.SelfNode.Addresses().All() { - if !pfx.IsSingleIP() { - continue - } - if pfx.Addr().Is4() != t.Is4() { - continue - } - local = pfx.Addr() - break - } - if !local.IsValid() { - return nil, fmt.Errorf("no valid local IP: %v", local) - } - if err := ep.nfr.EnsureSNATForDst(local, t); err != nil { - return nil, fmt.Errorf("error setting up SNAT rule: %w", err) - } - } - } - // Update the status. Status will be written back to the state Secret by the caller. - mak.Set(&newStatus.Services, svcName, &egressservices.ServiceStatus{TailnetTargetIPs: tailnetTargetIPs, TailnetTarget: cfg.TailnetTarget, Ports: cfg.Ports}) - } - - // Actually apply the firewall rules. - if err := ensureRulesAdded(rulesPerSvcToAdd, ep.nfr); err != nil { - return nil, fmt.Errorf("error adding rules: %w", err) - } - if err := ensureRulesDeleted(rulesPerSvcToDelete, ep.nfr); err != nil { - return nil, fmt.Errorf("error deleting rules: %w", err) - } - - return newStatus, nil -} - -// updatesForCfg calculates any rules that need to be added or deleted for an individucal egress service config. -func updatesForCfg(svcName string, cfg egressservices.Config, status *egressservices.Status, tailnetTargetIPs []netip.Addr) ([]rule, []rule, error) { - rulesToAdd := make([]rule, 0) - rulesToDelete := make([]rule, 0) - currentConfig, ok := lookupCurrentConfig(svcName, status) - - // If no rules for service are present yet, add them all. - if !ok { - for _, t := range tailnetTargetIPs { - for ports := range cfg.Ports { - log.Printf("syncegressservices: svc %s adding port %v", svcName, ports) - rulesToAdd = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: t}) - } - } - return rulesToAdd, rulesToDelete, nil - } - - // If there are no backend targets available, delete any currently configured rules. - if len(tailnetTargetIPs) == 0 { - log.Printf("tailnet target for egress service %s does not have any backend addresses, deleting all rules", svcName) - for _, ip := range currentConfig.TailnetTargetIPs { - for ports := range currentConfig.Ports { - rulesToDelete = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) - } - } - return rulesToAdd, rulesToDelete, nil + return fmt.Errorf("error getting prefs: %w", err) } - - // If there are rules present for backend targets that no longer match, delete them. - for _, ip := range currentConfig.TailnetTargetIPs { - var found bool - for _, wantsIP := range tailnetTargetIPs { - if reflect.DeepEqual(ip, wantsIP) { - found = true - break - } - } - if !found { - for ports := range currentConfig.Ports { - rulesToDelete = append(rulesToDelete, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) - } - } - } - - // Sync rules for the currently wanted backend targets. - for _, ip := range tailnetTargetIPs { - - // If the backend target is not yet present in status, add all rules. - var found bool - for _, gotIP := range currentConfig.TailnetTargetIPs { - if reflect.DeepEqual(ip, gotIP) { - found = true - break - } - } - if !found { - for ports := range cfg.Ports { - rulesToAdd = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) - } - continue - } - - // If the backend target is present in status, check that the - // currently applied rules are up to date. - - // Delete any current portmappings that are no longer present in config. - for port := range currentConfig.Ports { - if _, ok := cfg.Ports[port]; ok { - continue - } - rulesToDelete = append(rulesToDelete, rule{tailnetPort: port.TargetPort, containerPort: port.MatchPort, protocol: port.Protocol, tailnetIP: ip}) - } - - // Add any new portmappings. - for port := range cfg.Ports { - if _, ok := currentConfig.Ports[port]; ok { - continue - } - rulesToAdd = append(rulesToAdd, rule{tailnetPort: port.TargetPort, containerPort: port.MatchPort, protocol: port.Protocol, tailnetIP: ip}) - } + if len(prefs.AdvertiseServices) == 0 { + return nil } - return rulesToAdd, rulesToDelete, nil -} -// deleteUnneccessaryServices ensure that any services found on status, but not -// present in config are deleted. -func (ep *egressProxy) deleteUnnecessaryServices(cfgs *egressservices.Configs, status *egressservices.Status) error { - if !hasServicesConfigured(status) { + log.Printf("unadvertising services: %v", prefs.AdvertiseServices) + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: nil, + }, + }); err != nil { + // EditPrefs only returns an error if it fails _set_ its local prefs. + // If it fails to _persist_ the prefs in state, we don't get an error + // and we continue waiting below, as control will failover as usual. + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) + } + + // Services use the same (failover XOR regional routing) mechanism that + // HA subnet routers use. Unfortunately we don't yet get a reliable signal + // from control that it's responded to our unadvertisement, so the best we + // can do is wait for 20 seconds, where 15s is the approximate maximum time + // it should take for control to choose a new primary, and 5s is for buffer. + // + // Note: There is no guarantee that clients have been _informed_ of the new + // primary no matter how long we wait. We would need a mechanism to await + // netmap updates for peers to know for sure. + // + // See https://tailscale.com/kb/1115/high-availability for more details. + // TODO(tomhjp): Wait for a netmap update instead of sleeping when control + // supports that. + select { + case <-ctx.Done(): return nil - } - if !wantsServicesConfigured(cfgs) { - for svcName, svc := range status.Services { - log.Printf("service %s is no longer required, deleting", svcName) - if err := ensureServiceDeleted(svcName, svc, ep.nfr); err != nil { - return fmt.Errorf("error deleting service %s: %w", svcName, err) - } - } + case <-time.After(20 * time.Second): return nil } - - for svcName, svc := range status.Services { - if _, ok := (*cfgs)[svcName]; !ok { - log.Printf("service %s is no longer required, deleting", svcName) - if err := ensureServiceDeleted(svcName, svc, ep.nfr); err != nil { - return fmt.Errorf("error deleting service %s: %w", svcName, err) - } - // TODO (irbekrm): also delete the SNAT rule here - } - } - return nil -} - -// getConfigs gets the mounted egress service configuration. -func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) { - svcsCfg := filepath.Join(ep.cfgPath, egressservices.KeyEgressServices) - j, err := os.ReadFile(svcsCfg) - if os.IsNotExist(err) { - return nil, nil - } - if err != nil { - return nil, err - } - if len(j) == 0 || string(j) == "" { - return nil, nil - } - cfg := &egressservices.Configs{} - if err := json.Unmarshal(j, &cfg); err != nil { - return nil, err - } - return cfg, nil -} - -// getStatus gets the current status of the configured firewall. The current -// status is stored in state Secret. Returns nil status if no status that -// applies to the current proxy Pod was found. Uses the Pod IP to determine if a -// status found in the state Secret applies to this proxy Pod. -func (ep *egressProxy) getStatus(ctx context.Context) (*egressservices.Status, error) { - secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) - if err != nil { - return nil, fmt.Errorf("error retrieving state secret: %w", err) - } - status := &egressservices.Status{} - raw, ok := secret.Data[egressservices.KeyEgressServices] - if !ok { - return nil, nil - } - if err := json.Unmarshal([]byte(raw), status); err != nil { - return nil, fmt.Errorf("error unmarshalling previous config: %w", err) - } - if reflect.DeepEqual(status.PodIPv4, ep.podIPv4) { - return status, nil - } - return nil, nil -} - -// setStatus writes egress proxy's currently configured firewall to the state -// Secret and updates proxy's tailnet addresses. -func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Status, n ipn.Notify) error { - // Pod IP is used to determine if a stored status applies to THIS proxy Pod. - if status == nil { - status = &egressservices.Status{} - } - status.PodIPv4 = ep.podIPv4 - secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) - if err != nil { - return fmt.Errorf("error retrieving state Secret: %w", err) - } - bs, err := json.Marshal(status) - if err != nil { - return fmt.Errorf("error marshalling service config: %w", err) - } - secret.Data[egressservices.KeyEgressServices] = bs - patch := kubeclient.JSONPatch{ - Op: "replace", - Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices), - Value: bs, - } - if err := ep.kc.JSONPatchResource(ctx, ep.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil { - return fmt.Errorf("error patching state Secret: %w", err) - } - ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() - return nil -} - -// tailnetTargetIPsForSvc returns the tailnet IPs to which traffic for this -// egress service should be proxied. The egress service can be configured by IP -// or by FQDN. If it's configured by IP, just return that. If it's configured by -// FQDN, resolve the FQDN and return the resolved IPs. It checks if the -// netfilter runner supports IPv6 NAT and skips any IPv6 addresses if it -// doesn't. -func (ep *egressProxy) tailnetTargetIPsForSvc(svc egressservices.Config, n ipn.Notify) (addrs []netip.Addr, err error) { - if svc.TailnetTarget.IP != "" { - addr, err := netip.ParseAddr(svc.TailnetTarget.IP) - if err != nil { - return nil, fmt.Errorf("error parsing tailnet target IP: %w", err) - } - if addr.Is6() && !ep.nfr.HasIPV6NAT() { - log.Printf("tailnet target is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode. This will probably not work.") - return addrs, nil - } - return []netip.Addr{addr}, nil - } - - if svc.TailnetTarget.FQDN == "" { - return nil, errors.New("unexpected egress service config- neither tailnet target IP nor FQDN is set") - } - if n.NetMap == nil { - log.Printf("netmap is not available, unable to determine backend addresses for %s", svc.TailnetTarget.FQDN) - return addrs, nil - } - var ( - node tailcfg.NodeView - nodeFound bool - ) - for _, nn := range n.NetMap.Peers { - if equalFQDNs(nn.Name(), svc.TailnetTarget.FQDN) { - node = nn - nodeFound = true - break - } - } - if nodeFound { - for _, addr := range node.Addresses().AsSlice() { - if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { - log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) - continue - } - addrs = append(addrs, addr.Addr()) - } - // Egress target endpoints configured via FQDN are stored, so - // that we can determine if a netmap update should trigger a - // resync. - mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, node.Addresses().AsSlice()) - } - return addrs, nil -} - -// shouldResync parses netmap update and returns true if the update contains -// changes for which the egress proxy's firewall should be reconfigured. -func (ep *egressProxy) shouldResync(n ipn.Notify) bool { - if n.NetMap == nil { - return false - } - - // If proxy's tailnet addresses have changed, resync. - if !reflect.DeepEqual(n.NetMap.SelfNode.Addresses().AsSlice(), ep.tailnetAddrs) { - log.Printf("node addresses have changed, trigger egress config resync") - ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() - return true - } - - // If the IPs for any of the egress services configured via FQDN have - // changed, resync. - for fqdn, ips := range ep.targetFQDNs { - for _, nn := range n.NetMap.Peers { - if equalFQDNs(nn.Name(), fqdn) { - if !reflect.DeepEqual(ips, nn.Addresses().AsSlice()) { - log.Printf("backend addresses for egress target %q have changed old IPs %v, new IPs %v trigger egress config resync", nn.Name(), ips, nn.Addresses().AsSlice()) - } - return true - } - } - } - return false -} - -// ensureServiceDeleted ensures that any rules for an egress service are removed -// from the firewall configuration. -func ensureServiceDeleted(svcName string, svc *egressservices.ServiceStatus, nfr linuxfw.NetfilterRunner) error { - - // Note that the portmap is needed for iptables based firewall only. - // Nftables group rules for a service in a chain, so there is no need to - // specify individual portmapping based rules. - pms := make([]linuxfw.PortMap, 0) - for pm := range svc.Ports { - pms = append(pms, linuxfw.PortMap{MatchPort: pm.MatchPort, TargetPort: pm.TargetPort, Protocol: pm.Protocol}) - } - - if err := nfr.DeleteSvc(svcName, tailscaleTunInterface, svc.TailnetTargetIPs, pms); err != nil { - return fmt.Errorf("error deleting service %s: %w", svcName, err) - } - return nil -} - -// ensureRulesAdded ensures that all portmapping rules are added to the firewall -// configuration. For any rules that already exist, calling this function is a -// no-op. In case of nftables, a service consists of one or two (one per IP -// family) chains that conain the portmapping rules for the service and the -// chains as needed when this function is called. -func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { - for svc, rules := range rulesPerSvc { - for _, rule := range rules { - log.Printf("ensureRulesAdded svc %s tailnetTarget %s container port %d tailnet port %d protocol %s", svc, rule.tailnetIP, rule.containerPort, rule.tailnetPort, rule.protocol) - if err := nfr.EnsurePortMapRuleForSvc(svc, tailscaleTunInterface, rule.tailnetIP, linuxfw.PortMap{MatchPort: rule.containerPort, TargetPort: rule.tailnetPort, Protocol: rule.protocol}); err != nil { - return fmt.Errorf("error ensuring rule: %w", err) - } - } - } - return nil -} - -// ensureRulesDeleted ensures that the given rules are deleted from the firewall -// configuration. For any rules that do not exist, calling this funcion is a -// no-op. -func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { - for svc, rules := range rulesPerSvc { - for _, rule := range rules { - log.Printf("ensureRulesDeleted svc %s tailnetTarget %s container port %d tailnet port %d protocol %s", svc, rule.tailnetIP, rule.containerPort, rule.tailnetPort, rule.protocol) - if err := nfr.DeletePortMapRuleForSvc(svc, tailscaleTunInterface, rule.tailnetIP, linuxfw.PortMap{MatchPort: rule.containerPort, TargetPort: rule.tailnetPort, Protocol: rule.protocol}); err != nil { - return fmt.Errorf("error deleting rule: %w", err) - } - } - } - return nil -} - -func lookupCurrentConfig(svcName string, status *egressservices.Status) (*egressservices.ServiceStatus, bool) { - if status == nil || len(status.Services) == 0 { - return nil, false - } - c, ok := status.Services[svcName] - return c, ok -} - -func equalFQDNs(s, s1 string) bool { - s, _ = strings.CutSuffix(s, ".") - s1, _ = strings.CutSuffix(s1, ".") - return strings.EqualFold(s, s1) -} - -// rule contains configuration for an egress proxy firewall rule. -type rule struct { - containerPort uint16 // port to match incoming traffic - tailnetPort uint16 // tailnet service port - tailnetIP netip.Addr // tailnet service IP - protocol string -} - -func wantsServicesConfigured(cfgs *egressservices.Configs) bool { - return cfgs != nil && len(*cfgs) != 0 -} - -func hasServicesConfigured(status *egressservices.Status) bool { - return status != nil && len(status.Services) != 0 -} - -func servicesStatusIsEqual(st, st1 *egressservices.Status) bool { - if st == nil && st1 == nil { - return true - } - if st == nil || st1 == nil { - return false - } - st.PodIPv4 = "" - st1.PodIPv4 = "" - return reflect.DeepEqual(*st, *st1) -} - -// registerHandlers adds a new handler to the provided ServeMux that can be called as a Kubernetes prestop hook to -// delay shutdown till it's safe to do so. -func (ep *egressProxy) registerHandlers(mux *http.ServeMux) { - mux.Handle(fmt.Sprintf("GET %s", kubetypes.EgessServicesPreshutdownEP), ep) -} - -// ServeHTTP serves /internal-egress-services-preshutdown endpoint, when it receives a request, it periodically polls -// the configured health check endpoint for each egress service till it the health check endpoint no longer hits this -// proxy Pod. It uses the Pod-IPv4 header to verify if health check response is received from this Pod. -func (ep *egressProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { - cfgs, err := ep.getConfigs() - if err != nil { - http.Error(w, fmt.Sprintf("error retrieving egress services configs: %v", err), http.StatusInternalServerError) - return - } - if cfgs == nil { - if _, err := w.Write([]byte("safe to terminate")); err != nil { - http.Error(w, fmt.Sprintf("error writing termination status: %v", err), http.StatusInternalServerError) - return - } - } - hp, err := ep.getHEPPings() - if err != nil { - http.Error(w, fmt.Sprintf("error determining the number of times health check endpoint should be pinged: %v", err), http.StatusInternalServerError) - return - } - ep.waitTillSafeToShutdown(r.Context(), cfgs, hp) -} - -// waitTillSafeToShutdown looks up all egress targets configured to be proxied via this instance and, for each target -// whose configuration includes a healthcheck endpoint, pings the endpoint till none of the responses -// are returned by this instance or till the HTTP request times out. In practice, the endpoint will be a Kubernetes Service for whom one of the backends -// would normally be this Pod. When this Pod is being deleted, the operator should have removed it from the Service -// backends and eventually kube proxy routing rules should be updated to no longer route traffic for the Service to this -// Pod. -func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egressservices.Configs, hp int) { - if cfgs == nil || len(*cfgs) == 0 { // avoid sleeping if no services are configured - return - } - log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...") - wg := syncs.WaitGroup{} - - for s, cfg := range *cfgs { - hep := cfg.HealthCheckEndpoint - if hep == "" { - log.Printf("Tailnet target %q does not have a cluster healthcheck specified, unable to verify if cluster traffic for the target is still routed via this Pod", s) - continue - } - svc := s - wg.Go(func() { - log.Printf("Ensuring that cluster traffic is no longer routed to %q via this Pod...", svc) - for { - if ctx.Err() != nil { // kubelet's HTTP request timeout - log.Printf("Cluster traffic for %s did not stop being routed to this Pod.", svc) - return - } - found, err := lookupPodRoute(ctx, hep, ep.podIPv4, hp, ep.client) - if err != nil { - log.Printf("unable to reach endpoint %q, assuming the routing rules for this Pod have been deleted: %v", hep, err) - break - } - if !found { - log.Printf("service %q is no longer routed through this Pod", svc) - break - } - log.Printf("service %q is still routed through this Pod, waiting...", svc) - time.Sleep(ep.shortSleep) - } - }) - } - wg.Wait() - // The check above really only checked that the routing rules are updated on this node. Sleep for a bit to - // ensure that the routing rules are updated on other nodes. TODO(irbekrm): this may or may not be good enough. - // If it's not good enough, we'd probably want to do something more complex, where the proxies check each other. - log.Printf("Sleeping for %s before shutdown to ensure that kube proxies on all nodes have updated routing configuration", ep.longSleep) - time.Sleep(ep.longSleep) -} - -// lookupPodRoute calls the healthcheck endpoint repeat times and returns true if the endpoint returns with the podIP -// header at least once. -func lookupPodRoute(ctx context.Context, hep, podIP string, repeat int, client httpClient) (bool, error) { - for range repeat { - f, err := lookup(ctx, hep, podIP, client) - if err != nil { - return false, err - } - if f { - return true, nil - } - } - return false, nil -} - -// lookup calls the healthcheck endpoint and returns true if the response contains the podIP header. -func lookup(ctx context.Context, hep, podIP string, client httpClient) (bool, error) { - req, err := http.NewRequestWithContext(ctx, httpm.GET, hep, nil) - if err != nil { - return false, fmt.Errorf("error creating new HTTP request: %v", err) - } - - // Close the TCP connection to ensure that the next request is routed to a different backend. - req.Close = true - - resp, err := client.Do(req) - if err != nil { - log.Printf("Endpoint %q can not be reached: %v, likely because there are no (more) healthy backends", hep, err) - return true, nil - } - defer resp.Body.Close() - gotIP := resp.Header.Get(kubetypes.PodIPv4Header) - return strings.EqualFold(podIP, gotIP), nil -} - -// getHEPPings gets the number of pings that should be sent to a health check endpoint to ensure that each configured -// backend is hit. This assumes that a health check endpoint is a Kubernetes Service and traffic to backend Pods is -// round robin load balanced. -func (ep *egressProxy) getHEPPings() (int, error) { - hepPingsPath := filepath.Join(ep.cfgPath, egressservices.KeyHEPPings) - j, err := os.ReadFile(hepPingsPath) - if os.IsNotExist(err) { - return 0, nil - } - if err != nil { - return -1, err - } - if len(j) == 0 || string(j) == "" { - return 0, nil - } - hp, err := strconv.Atoi(string(j)) - if err != nil { - return -1, fmt.Errorf("error parsing hep pings as int: %v", err) - } - if hp < 0 { - log.Printf("[unexpected] hep pings is negative: %d", hp) - return 0, nil - } - return hp, nil } diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index c62db5340..0ac9c828e 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -64,16 +64,17 @@ type settings struct { // when setting up rules to proxy cluster traffic to cluster ingress // target. // Deprecated: use PodIPv4, PodIPv6 instead to support dual stack clusters - PodIP string - PodIPv4 string - PodIPv6 string - PodUID string - HealthCheckAddrPort string - LocalAddrPort string - MetricsEnabled bool - HealthCheckEnabled bool - DebugAddrPort string - EgressProxiesCfgPath string + PodIP string + PodIPv4 string + PodIPv6 string + PodUID string + HealthCheckAddrPort string + LocalAddrPort string + MetricsEnabled bool + HealthCheckEnabled bool + DebugAddrPort string + EgressProxiesCfgPath string + IngressProxiesCfgPath string // CertShareMode is set for Kubernetes Pods running cert share mode. // Possible values are empty (containerboot doesn't run any certs // logic), 'ro' (for Pods that shold never attempt to issue/renew @@ -114,6 +115,7 @@ func configFromEnv() (*settings, error) { HealthCheckEnabled: defaultBool("TS_ENABLE_HEALTH_CHECK", false), DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""), EgressProxiesCfgPath: defaultEnv("TS_EGRESS_PROXIES_CONFIG_PATH", ""), + IngressProxiesCfgPath: defaultEnv("TS_INGRESS_PROXIES_CONFIG_PATH", ""), PodUID: defaultEnv("POD_UID", ""), } podIPs, ok := os.LookupEnv("POD_IPS") @@ -219,6 +221,9 @@ func (s *settings) validate() error { if s.EgressProxiesCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") { return errors.New("TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes") } + if s.IngressProxiesCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") { + return errors.New("TS_INGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes") + } return nil } @@ -308,7 +313,7 @@ func isOneStepConfig(cfg *settings) bool { // as an L3 proxy, proxying to an endpoint provided via one of the config env // vars. func isL3Proxy(cfg *settings) bool { - return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressProxiesCfgPath != "" + return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressProxiesCfgPath != "" || cfg.IngressProxiesCfgPath != "" } // hasKubeStateStore returns true if the state must be stored in a Kubernetes diff --git a/kube/ingressservices/ingressservices.go b/kube/ingressservices/ingressservices.go new file mode 100644 index 000000000..f79410761 --- /dev/null +++ b/kube/ingressservices/ingressservices.go @@ -0,0 +1,53 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ingressservices contains shared types for exposing Kubernetes Services to tailnet. +// These are split into a separate package for consumption of +// non-Kubernetes shared libraries and binaries. Be mindful of not increasing +// dependency size for those consumers when adding anything new here. +package ingressservices + +import "net/netip" + +// IngressConfigKey is the key at which both the desired ingress firewall +// configuration is stored in the ingress proxies' ConfigMap and at which the +// recorded firewall configuration status is stored in the proxies' state +// Secrets. +const IngressConfigKey = "ingress-config.json" + +// Configs contains the desired configuration for ingress proxies firewall. Map +// keys are Tailscale Service names. +type Configs map[string]Config + +// GetConfig returns the desired configuration for the given Tailscale Service name. +func (cfgs *Configs) GetConfig(name string) *Config { + if cfgs == nil { + return nil + } + if cfg, ok := (*cfgs)[name]; ok { + return &cfg + } + return nil +} + +// Status contains the recorded firewall configuration status for a specific +// ingress proxy Pod. +// Pod IPs are used to identify the ingress proxy Pod. +type Status struct { + Configs Configs `json:"configs,omitempty"` + PodIPv4 string `json:"podIPv4,omitempty"` + PodIPv6 string `json:"podIPv6,omitempty"` +} + +// Config is an ingress service configuration. +type Config struct { + IPv4Mapping *Mapping `json:"IPv4Mapping,omitempty"` + IPv6Mapping *Mapping `json:"IPv6Mapping,omitempty"` +} + +// Mapping describes a rule that forwards traffic from Tailscale Service IP to a +// Kubernetes Service IP. +type Mapping struct { + TailscaleServiceIP netip.Addr `json:"TailscaleServiceIP"` + ClusterIP netip.Addr `json:"ClusterIP"` +} diff --git a/util/linuxfw/fake_netfilter.go b/util/linuxfw/fake_netfilter.go index 329c3a213..a998ed765 100644 --- a/util/linuxfw/fake_netfilter.go +++ b/util/linuxfw/fake_netfilter.go @@ -16,8 +16,8 @@ type FakeNetfilterRunner struct { // services is a map that tracks the firewall rules added/deleted via // EnsureDNATRuleForSvc/DeleteDNATRuleForSvc. services map[string]struct { - VIPServiceIP netip.Addr - ClusterIP netip.Addr + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr } } @@ -25,16 +25,16 @@ type FakeNetfilterRunner struct { func NewFakeNetfilterRunner() *FakeNetfilterRunner { return &FakeNetfilterRunner{ services: make(map[string]struct { - VIPServiceIP netip.Addr - ClusterIP netip.Addr + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr }), } } func (f *FakeNetfilterRunner) EnsureDNATRuleForSvc(svcName string, origDst, dst netip.Addr) error { f.services[svcName] = struct { - VIPServiceIP netip.Addr - ClusterIP netip.Addr + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr }{origDst, dst} return nil } @@ -45,8 +45,8 @@ func (f *FakeNetfilterRunner) DeleteDNATRuleForSvc(svcName string, origDst, dst } func (f *FakeNetfilterRunner) GetServiceState() map[string]struct { - VIPServiceIP netip.Addr - ClusterIP netip.Addr + TailscaleServiceIP netip.Addr + ClusterIP netip.Addr } { return f.services } From d89aa2908186d104d22d471f8c6561f61aa8f458 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 19 May 2025 11:35:05 +0100 Subject: [PATCH 0872/1708] {cmd,}/k8s-operator: support IRSA for Recorder resources (#15913) Adds Recorder fields to configure the name and annotations of the ServiceAccount created for and used by its associated StatefulSet. This allows the created Pod to authenticate with AWS without requiring a Secret with static credentials, using AWS' IAM Roles for Service Accounts feature, documented here: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html Fixes #15875 Change-Id: Ib0e15c0dbc357efa4be260e9ae5077bacdcb264f Signed-off-by: Tom Proctor --- .../deploy/crds/tailscale.com_recorders.yaml | 30 +++++ .../deploy/manifests/operator.yaml | 30 +++++ cmd/k8s-operator/sts.go | 27 ++++- cmd/k8s-operator/tsrecorder.go | 105 ++++++++++++++++-- cmd/k8s-operator/tsrecorder_specs.go | 17 ++- cmd/k8s-operator/tsrecorder_test.go | 104 ++++++++++++++++- k8s-operator/api.md | 18 +++ k8s-operator/apis/v1alpha1/types_recorder.go | 30 +++++ .../apis/v1alpha1/zz_generated.deepcopy.go | 23 ++++ 9 files changed, 359 insertions(+), 25 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index b07e9f692..0f3dcfcca 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -1557,6 +1557,36 @@ spec: May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. type: string + serviceAccount: + description: |- + Config for the ServiceAccount to create for the Recorder's StatefulSet. + By default, the operator will create a ServiceAccount with the same + name as the Recorder resource. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account + type: object + properties: + annotations: + description: |- + Annotations to add to the ServiceAccount. + https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + + You can use this to add IAM roles to the ServiceAccount (IRSA) instead of + providing static S3 credentials in a Secret. + https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + + For example: + eks.amazonaws.com/role-arn: arn:aws:iam:::role/ + type: object + additionalProperties: + type: string + name: + description: |- + Name of the ServiceAccount to create. Defaults to the name of the + Recorder resource. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account + type: string + maxLength: 253 + pattern: ^[a-z0-9]([a-z0-9-.]{0,61}[a-z0-9])?$ tolerations: description: |- Tolerations for Recorder Pods. By default, the operator does not apply diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 9bfbd533f..e9a790d98 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4552,6 +4552,36 @@ spec: type: string type: object type: object + serviceAccount: + description: |- + Config for the ServiceAccount to create for the Recorder's StatefulSet. + By default, the operator will create a ServiceAccount with the same + name as the Recorder resource. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ServiceAccount. + https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + + You can use this to add IAM roles to the ServiceAccount (IRSA) instead of + providing static S3 credentials in a Secret. + https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + + For example: + eks.amazonaws.com/role-arn: arn:aws:iam:::role/ + type: object + name: + description: |- + Name of the ServiceAccount to create. Defaults to the name of the + Recorder resource. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account + maxLength: 253 + pattern: ^[a-z0-9]([a-z0-9-.]{0,61}[a-z0-9])?$ + type: string + type: object tolerations: description: |- Tolerations for Recorder Pods. By default, the operator does not apply diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 7434ea79d..70b25f2d2 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -1052,13 +1052,13 @@ func tailscaledConfigHash(c tailscaledConfigs) (string, error) { return fmt.Sprintf("%x", h.Sum(nil)), nil } -// createOrUpdate adds obj to the k8s cluster, unless the object already exists, -// in which case update is called to make changes to it. If update is nil, the -// existing object is returned unmodified. +// createOrMaybeUpdate adds obj to the k8s cluster, unless the object already exists, +// in which case update is called to make changes to it. If update is nil or returns +// an error, the object is returned unmodified. // // obj is looked up by its Name and Namespace if Name is set, otherwise it's // looked up by labels. -func createOrUpdate[T any, O ptrObject[T]](ctx context.Context, c client.Client, ns string, obj O, update func(O)) (O, error) { +func createOrMaybeUpdate[T any, O ptrObject[T]](ctx context.Context, c client.Client, ns string, obj O, update func(O) error) (O, error) { var ( existing O err error @@ -1073,7 +1073,9 @@ func createOrUpdate[T any, O ptrObject[T]](ctx context.Context, c client.Client, } if err == nil && existing != nil { if update != nil { - update(existing) + if err := update(existing); err != nil { + return nil, err + } if err := c.Update(ctx, existing); err != nil { return nil, err } @@ -1089,6 +1091,21 @@ func createOrUpdate[T any, O ptrObject[T]](ctx context.Context, c client.Client, return obj, nil } +// createOrUpdate adds obj to the k8s cluster, unless the object already exists, +// in which case update is called to make changes to it. If update is nil, the +// existing object is returned unmodified. +// +// obj is looked up by its Name and Namespace if Name is set, otherwise it's +// looked up by labels. +func createOrUpdate[T any, O ptrObject[T]](ctx context.Context, c client.Client, ns string, obj O, update func(O)) (O, error) { + return createOrMaybeUpdate(ctx, c, ns, obj, func(o O) error { + if update != nil { + update(o) + } + return nil + }) +} + // getSingleObject searches for k8s objects of type T // (e.g. corev1.Service) with the given labels, and returns // it. Returns nil if no objects match the labels, and an error if diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index e9e6b2c6c..081543cd3 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -8,13 +8,13 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "net/http" "slices" "strings" "sync" - "github.com/pkg/errors" "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -22,8 +22,10 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" + apivalidation "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -107,7 +109,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { - err = errors.Wrap(err, updateErr.Error()) + err = errors.Join(err, updateErr) } } return reconcile.Result{}, err @@ -125,7 +127,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques } } - if err := r.validate(tsr); err != nil { + if err := r.validate(ctx, tsr); err != nil { message := fmt.Sprintf("Recorder is invalid: %s", err) r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) @@ -160,20 +162,26 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil { return fmt.Errorf("error creating secrets: %w", err) } - // State secret is precreated so we can use the Recorder CR as its owner ref. + // State Secret is precreated so we can use the Recorder CR as its owner ref. sec := tsrStateSecret(tsr, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { s.ObjectMeta.Labels = sec.ObjectMeta.Labels s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations - s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { return fmt.Errorf("error creating state Secret: %w", err) } sa := tsrServiceAccount(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + if _, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error { + // Perform this check within the update function to make sure we don't + // have a race condition between the previous check and the update. + if err := saOwnedByRecorder(s, tsr); err != nil { + return err + } + s.ObjectMeta.Labels = sa.ObjectMeta.Labels s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations - s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + + return nil }); err != nil { return fmt.Errorf("error creating ServiceAccount: %w", err) } @@ -181,7 +189,6 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { r.ObjectMeta.Labels = role.ObjectMeta.Labels r.ObjectMeta.Annotations = role.ObjectMeta.Annotations - r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { return fmt.Errorf("error creating Role: %w", err) @@ -190,7 +197,6 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations - r.ObjectMeta.OwnerReferences = roleBinding.ObjectMeta.OwnerReferences r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { @@ -200,12 +206,18 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations - s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences s.Spec = ss.Spec }); err != nil { return fmt.Errorf("error creating StatefulSet: %w", err) } + // ServiceAccount name may have changed, in which case we need to clean up + // the previous ServiceAccount. RoleBinding will already be updated to point + // to the new ServiceAccount. + if err := r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil { + return fmt.Errorf("error cleaning up ServiceAccounts: %w", err) + } + var devices []tsapi.RecorderTailnetDevice device, ok, err := r.getDeviceInfo(ctx, tsr.Name) @@ -224,6 +236,47 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco return nil } +func saOwnedByRecorder(sa *corev1.ServiceAccount, tsr *tsapi.Recorder) error { + // If ServiceAccount name has been configured, check that we don't clobber + // a pre-existing SA not owned by this Recorder. + if sa.Name != tsr.Name && !apiequality.Semantic.DeepEqual(sa.OwnerReferences, tsrOwnerReference(tsr)) { + return fmt.Errorf("custom ServiceAccount name %q specified but conflicts with a pre-existing ServiceAccount in the %s namespace", sa.Name, sa.Namespace) + } + + return nil +} + +// maybeCleanupServiceAccounts deletes any dangling ServiceAccounts +// owned by the Recorder if the ServiceAccount name has been changed. +// They would eventually be cleaned up by owner reference deletion, but +// this avoids a long-lived Recorder with many ServiceAccount name changes +// accumulating a large amount of garbage. +// +// This is a no-op if the ServiceAccount name has not changed. +func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, tsr *tsapi.Recorder, currentName string) error { + logger := r.logger(tsr.Name) + + // List all ServiceAccounts owned by this Recorder. + sas := &corev1.ServiceAccountList{} + if err := r.List(ctx, sas, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels("recorder", tsr.Name, nil))); err != nil { + return fmt.Errorf("error listing ServiceAccounts for cleanup: %w", err) + } + for _, sa := range sas.Items { + if sa.Name == currentName { + continue + } + if err := r.Delete(ctx, &sa); err != nil { + if apierrors.IsNotFound(err) { + logger.Debugf("ServiceAccount %s not found, likely already deleted", sa.Name) + } else { + return fmt.Errorf("error deleting ServiceAccount %s: %w", sa.Name, err) + } + } + } + + return nil +} + // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a Recorder will get cleaned up via owner references // (which we can use because they are all in the same namespace). @@ -302,11 +355,41 @@ func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *t return nil } -func (r *RecorderReconciler) validate(tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder) error { if !tsr.Spec.EnableUI && tsr.Spec.Storage.S3 == nil { return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible") } + // Check any custom ServiceAccount config doesn't conflict with pre-existing + // ServiceAccounts. This check is performed once during validation to ensure + // errors are raised early, but also again during any Updates to prevent a race. + specSA := tsr.Spec.StatefulSet.Pod.ServiceAccount + if specSA.Name != "" && specSA.Name != tsr.Name { + sa := &corev1.ServiceAccount{} + key := client.ObjectKey{ + Name: specSA.Name, + Namespace: r.tsNamespace, + } + + err := r.Get(ctx, key, sa) + switch { + case apierrors.IsNotFound(err): + // ServiceAccount doesn't exist, so no conflict. + case err != nil: + return fmt.Errorf("error getting ServiceAccount %q for validation: %w", tsr.Spec.StatefulSet.Pod.ServiceAccount.Name, err) + default: + // ServiceAccount exists, check if it's owned by the Recorder. + if err := saOwnedByRecorder(sa, tsr); err != nil { + return err + } + } + } + if len(specSA.Annotations) > 0 { + if violations := apivalidation.ValidateAnnotations(specSA.Annotations, field.NewPath(".spec.statefulSet.pod.serviceAccount.annotations")); len(violations) > 0 { + return violations.ToAggregate() + } + } + return nil } diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 4a7bf9887..7c6e80aed 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -39,7 +39,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { Annotations: tsr.Spec.StatefulSet.Pod.Annotations, }, Spec: corev1.PodSpec{ - ServiceAccountName: tsr.Name, + ServiceAccountName: tsrServiceAccountName(tsr), Affinity: tsr.Spec.StatefulSet.Pod.Affinity, SecurityContext: tsr.Spec.StatefulSet.Pod.SecurityContext, ImagePullSecrets: tsr.Spec.StatefulSet.Pod.ImagePullSecrets, @@ -100,14 +100,25 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: tsr.Name, + Name: tsrServiceAccountName(tsr), Namespace: namespace, Labels: labels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), + Annotations: tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations, }, } } +func tsrServiceAccountName(tsr *tsapi.Recorder) string { + sa := tsr.Spec.StatefulSet.Pod.ServiceAccount + name := tsr.Name + if sa.Name != "" { + name = sa.Name + } + + return name +} + func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { return &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -154,7 +165,7 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding { Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: tsr.Name, + Name: tsrServiceAccountName(tsr), Namespace: namespace, }, }, diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index 4de1089a9..e6d56ef2f 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -41,7 +42,7 @@ func TestRecorder(t *testing.T) { Build() tsClient := &fakeTSClient{} zl, _ := zap.NewDevelopment() - fr := record.NewFakeRecorder(1) + fr := record.NewFakeRecorder(2) cl := tstest.NewClock(tstest.ClockOpts{}) reconciler := &RecorderReconciler{ tsNamespace: tsNamespace, @@ -52,7 +53,7 @@ func TestRecorder(t *testing.T) { clock: cl, } - t.Run("invalid spec gives an error condition", func(t *testing.T) { + t.Run("invalid_spec_gives_an_error_condition", func(t *testing.T) { expectReconciled(t, reconciler, "", tsr.Name) msg := "Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible" @@ -65,10 +66,66 @@ func TestRecorder(t *testing.T) { expectedEvent := "Warning RecorderInvalid Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible" expectEvents(t, fr, []string{expectedEvent}) - }) - t.Run("observe Ready=true status condition for a valid spec", func(t *testing.T) { tsr.Spec.EnableUI = true + tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations = map[string]string{ + "invalid space characters": "test", + } + mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { + t.Spec = tsr.Spec + }) + expectReconciled(t, reconciler, "", tsr.Name) + + // Only check part of this error message, because it's defined in an + // external package and may change. + if err := fc.Get(context.Background(), client.ObjectKey{ + Name: tsr.Name, + }, tsr); err != nil { + t.Fatal(err) + } + if len(tsr.Status.Conditions) != 1 { + t.Fatalf("expected 1 condition, got %d", len(tsr.Status.Conditions)) + } + cond := tsr.Status.Conditions[0] + if cond.Type != string(tsapi.RecorderReady) || cond.Status != metav1.ConditionFalse || cond.Reason != reasonRecorderInvalid { + t.Fatalf("expected condition RecorderReady false due to RecorderInvalid, got %v", cond) + } + for _, msg := range []string{cond.Message, <-fr.Events} { + if !strings.Contains(msg, `"invalid space characters"`) { + t.Fatalf("expected invalid annotation key in error message, got %q", cond.Message) + } + } + }) + + t.Run("conflicting_service_account_config_marked_as_invalid", func(t *testing.T) { + mustCreate(t, fc, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pre-existing-sa", + Namespace: tsNamespace, + }, + }) + + tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations = nil + tsr.Spec.StatefulSet.Pod.ServiceAccount.Name = "pre-existing-sa" + mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { + t.Spec = tsr.Spec + }) + + expectReconciled(t, reconciler, "", tsr.Name) + + msg := `Recorder is invalid: custom ServiceAccount name "pre-existing-sa" specified but conflicts with a pre-existing ServiceAccount in the tailscale namespace` + tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionFalse, reasonRecorderInvalid, msg, 0, cl, zl.Sugar()) + expectEqual(t, fc, tsr) + if expected := 0; reconciler.recorders.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) + } + + expectedEvent := "Warning RecorderInvalid " + msg + expectEvents(t, fr, []string{expectedEvent}) + }) + + t.Run("observe_Ready_true_status_condition_for_a_valid_spec", func(t *testing.T) { + tsr.Spec.StatefulSet.Pod.ServiceAccount.Name = "" mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { t.Spec = tsr.Spec }) @@ -83,7 +140,42 @@ func TestRecorder(t *testing.T) { expectRecorderResources(t, fc, tsr, true) }) - t.Run("populate node info in state secret, and see it appear in status", func(t *testing.T) { + t.Run("valid_service_account_config", func(t *testing.T) { + tsr.Spec.StatefulSet.Pod.ServiceAccount.Name = "test-sa" + tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations = map[string]string{ + "test": "test", + } + mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { + t.Spec = tsr.Spec + }) + + expectReconciled(t, reconciler, "", tsr.Name) + + expectEqual(t, fc, tsr) + if expected := 1; reconciler.recorders.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) + } + expectRecorderResources(t, fc, tsr, true) + + // Get the service account and check the annotations. + sa := &corev1.ServiceAccount{} + if err := fc.Get(context.Background(), client.ObjectKey{ + Name: tsr.Spec.StatefulSet.Pod.ServiceAccount.Name, + Namespace: tsNamespace, + }, sa); err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(sa.Annotations, tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations); diff != "" { + t.Fatalf("unexpected service account annotations (-got +want):\n%s", diff) + } + if sa.Name != tsr.Spec.StatefulSet.Pod.ServiceAccount.Name { + t.Fatalf("unexpected service account name: got %q, want %q", sa.Name, tsr.Spec.StatefulSet.Pod.ServiceAccount.Name) + } + + expectMissing[corev1.ServiceAccount](t, fc, tsNamespace, tsr.Name) + }) + + t.Run("populate_node_info_in_state_secret_and_see_it_appear_in_status", func(t *testing.T) { bytes, err := json.Marshal(map[string]any{ "Config": map[string]any{ "NodeID": "nodeid-123", @@ -115,7 +207,7 @@ func TestRecorder(t *testing.T) { expectEqual(t, fc, tsr) }) - t.Run("delete the Recorder and observe cleanup", func(t *testing.T) { + t.Run("delete_the_Recorder_and_observe_cleanup", func(t *testing.T) { if err := fc.Delete(context.Background(), tsr); err != nil { t.Fatal(err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 190f99d24..03bb8989b 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -726,6 +726,24 @@ _Appears in:_ | `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Image pull Secrets for Recorder Pods.
        https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | | | `nodeSelector` _object (keys:string, values:string)_ | Node selector rules for Recorder Pods. By default, the operator does
        not apply any node selector rules.
        https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Tolerations for Recorder Pods. By default, the operator does not apply
        any tolerations.
        https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `serviceAccount` _[RecorderServiceAccount](#recorderserviceaccount)_ | Config for the ServiceAccount to create for the Recorder's StatefulSet.
        By default, the operator will create a ServiceAccount with the same
        name as the Recorder resource.
        https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account | | | + + +#### RecorderServiceAccount + + + + + + + +_Appears in:_ +- [RecorderPod](#recorderpod) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Name of the ServiceAccount to create. Defaults to the name of the
        Recorder resource.
        https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account | | MaxLength: 253
        Pattern: `^[a-z0-9]([a-z0-9-.]{0,61}[a-z0-9])?$`
        Type: string
        | +| `annotations` _object (keys:string, values:string)_ | Annotations to add to the ServiceAccount.
        https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
        You can use this to add IAM roles to the ServiceAccount (IRSA) instead of
        providing static S3 credentials in a Secret.
        https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html
        For example:
        eks.amazonaws.com/role-arn: arn:aws:iam:::role/ | | | #### RecorderSpec diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index 6e5416ea5..16a610b26 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -142,6 +142,36 @@ type RecorderPod struct { // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Config for the ServiceAccount to create for the Recorder's StatefulSet. + // By default, the operator will create a ServiceAccount with the same + // name as the Recorder resource. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account + // +optional + ServiceAccount RecorderServiceAccount `json:"serviceAccount,omitempty"` +} + +type RecorderServiceAccount struct { + // Name of the ServiceAccount to create. Defaults to the name of the + // Recorder resource. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#service-account + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern=`^[a-z0-9]([a-z0-9-.]{0,61}[a-z0-9])?$` + // +kubebuilder:validation:MaxLength=253 + // +optional + Name string `json:"name,omitempty"` + + // Annotations to add to the ServiceAccount. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + // + // You can use this to add IAM roles to the ServiceAccount (IRSA) instead of + // providing static S3 credentials in a Secret. + // https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + // + // For example: + // eks.amazonaws.com/role-arn: arn:aws:iam:::role/ + // +optional + Annotations map[string]string `json:"annotations,omitempty"` } type RecorderContainer struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 5e7e7455c..e09127207 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -838,6 +838,7 @@ func (in *RecorderPod) DeepCopyInto(out *RecorderPod) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.ServiceAccount.DeepCopyInto(&out.ServiceAccount) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderPod. @@ -850,6 +851,28 @@ func (in *RecorderPod) DeepCopy() *RecorderPod { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderServiceAccount) DeepCopyInto(out *RecorderServiceAccount) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderServiceAccount. +func (in *RecorderServiceAccount) DeepCopy() *RecorderServiceAccount { + if in == nil { + return nil + } + out := new(RecorderServiceAccount) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecorderSpec) DeepCopyInto(out *RecorderSpec) { *out = *in From df8d51023e119a64e659981b9bf2dca1585b535f Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 19 May 2025 12:58:32 +0100 Subject: [PATCH 0873/1708] cmd/k8s-operator,kube/kubetypes,k8s-operator/apis: reconcile L3 HA Services (#15961) This reconciler allows users to make applications highly available at L3 by leveraging Tailscale Virtual Services. Many Kubernetes Service's (irrespective of the cluster they reside in) can be mapped to a Tailscale Virtual Service, allowing access to these Services at L3. Updates #15895 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/depaware.txt | 1 + .../deploy/chart/templates/operator-rbac.yaml | 3 + .../deploy/manifests/operator.yaml | 8 + cmd/k8s-operator/ingress-for-pg.go | 11 +- cmd/k8s-operator/ingress-for-pg_test.go | 6 +- cmd/k8s-operator/operator.go | 111 ++- cmd/k8s-operator/operator_test.go | 48 + cmd/k8s-operator/proxygroup_specs.go | 5 + cmd/k8s-operator/svc-for-pg.go | 849 ++++++++++++++++++ cmd/k8s-operator/svc-for-pg_test.go | 371 ++++++++ cmd/k8s-operator/testutils_test.go | 35 + k8s-operator/apis/v1alpha1/types_connector.go | 3 + kube/kubetypes/types.go | 1 + 13 files changed, 1431 insertions(+), 21 deletions(-) create mode 100644 cmd/k8s-operator/svc-for-pg.go create mode 100644 cmd/k8s-operator/svc-for-pg_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 4e2215aec..bbbaebc19 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -822,6 +822,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator + tailscale.com/kube/ingressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 5bf50617e..00d8318ac 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -25,6 +25,9 @@ rules: - apiGroups: ["networking.k8s.io"] resources: ["ingressclasses"] verbs: ["get", "list", "watch"] +- apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["get", "list", "watch"] - apiGroups: ["tailscale.com"] resources: ["connectors", "connectors/status", "proxyclasses", "proxyclasses/status", "proxygroups", "proxygroups/status"] verbs: ["get", "list", "watch", "update"] diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index e9a790d98..1d910cf92 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4828,6 +4828,14 @@ rules: - get - list - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch - apiGroups: - tailscale.com resources: diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index fd6b71225..729fb2a3b 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "math/rand/v2" "net/http" "reflect" "slices" @@ -17,8 +18,6 @@ import ( "sync" "time" - "math/rand/v2" - "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -59,6 +58,7 @@ const ( "Please contact Tailscale support through https://tailscale.com/contact/support to enable the feature flag, then recreate the operator's Pod." warningTailscaleServiceFeatureFlagNotEnabled = "TailscaleServiceFeatureFlagNotEnabled" + managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -323,7 +323,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin tsSvcPorts = append(tsSvcPorts, "80") } - const managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" tsSvc := &tailscale.VIPService{ Name: serviceName, Tags: tags, @@ -532,9 +531,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, if err != nil { return } - if e := r.deleteFinalizer(ctx, ing, logger); err != nil { - err = errors.Join(err, e) - } + err = r.deleteFinalizer(ctx, ing, logger) }() // 1. Check if there is a Tailscale Service associated with this Ingress. @@ -766,7 +763,6 @@ const ( ) func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, logger *zap.SugaredLogger) (err error) { - // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { @@ -1091,7 +1087,6 @@ func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceN Namespace: r.tsNamespace, Name: domain, }, secret) - if err != nil { if apierrors.IsNotFound(err) { return false, nil diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 989330862..b03664a76 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -11,9 +11,8 @@ import ( "fmt" "maps" "reflect" - "testing" - "slices" + "testing" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -621,7 +620,7 @@ func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantH func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []string) { t.Helper() var expected string - if expectedServices != nil { + if expectedServices != nil && len(expectedServices) > 0 { expectedServicesJSON, err := json.Marshal(expectedServices) if err != nil { t.Fatalf("marshaling expected services: %v", err) @@ -641,7 +640,6 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []s } func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) { - tsIngressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 9c35a7cec..a08dd4da8 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -241,6 +241,7 @@ func runReconcilers(opts reconcilerOpts) { nsFilter := cache.ByObject{ Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(), } + // We watch the ServiceMonitor CRD to ensure that reconcilers are re-triggered if user's workflows result in the // ServiceMonitor CRD applied after some of our resources that define ServiceMonitor creation. This selector // ensures that we only watch the ServiceMonitor CRD and that we don't cache full contents of it. @@ -248,10 +249,13 @@ func runReconcilers(opts reconcilerOpts) { Field: fields.SelectorFromSet(fields.Set{"metadata.name": serviceMonitorCRD}), Transform: crdTransformer(startlog), } + + // TODO (irbekrm): stricter filtering what we watch/cache/call + // reconcilers on. c/r by default starts a watch on any + // resources that we GET via the controller manager's client. mgrOpts := manager.Options{ - // TODO (irbekrm): stricter filtering what we watch/cache/call - // reconcilers on. c/r by default starts a watch on any - // resources that we GET via the controller manager's client. + // The cache will apply the specified filters only to the object types listed below via ByObject. + // Other object types (e.g., EndpointSlices) can still be fetched or watched using the cached client, but they will not have any filtering applied. Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ &corev1.Secret{}: nsFilter, @@ -260,7 +264,6 @@ func runReconcilers(opts reconcilerOpts) { &corev1.ConfigMap{}: nsFilter, &appsv1.StatefulSet{}: nsFilter, &appsv1.Deployment{}: nsFilter, - &discoveryv1.EndpointSlice{}: nsFilter, &rbacv1.Role{}: nsFilter, &rbacv1.RoleBinding{}: nsFilter, &apiextensionsv1.CustomResourceDefinition{}: serviceMonitorSelector, @@ -368,6 +371,33 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("failed setting up indexer for HA Ingresses: %v", err) } + ingressSvcFromEpsFilter := handler.EnqueueRequestsFromMapFunc(ingressSvcFromEps(mgr.GetClient(), opts.log.Named("service-pg-reconciler"))) + err = builder. + ControllerManagedBy(mgr). + For(&corev1.Service{}). + Named("service-pg-reconciler"). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAServicesFromSecret(mgr.GetClient(), startlog))). + Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). + Watches(&discoveryv1.EndpointSlice{}, ingressSvcFromEpsFilter). + Complete(&HAServiceReconciler{ + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("service-pg-reconciler"), + lc: lc, + clock: tstime.DefaultClock{}, + operatorID: id, + tsNamespace: opts.tailscaleNamespace, + }) + if err != nil { + startlog.Fatalf("could not create service-pg-reconciler: %v", err) + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(corev1.Service), indexIngressProxyGroup, indexPGIngresses); err != nil { + startlog.Fatalf("failed setting up indexer for HA Services: %v", err) + } + connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector")) // If a ProxyClassChanges, enqueue all Connectors that have // .spec.proxyClass set to the name of this ProxyClass. @@ -994,6 +1024,36 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { } } +func ingressSvcFromEps(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + svcName := o.GetLabels()[discoveryv1.LabelServiceName] + if svcName == "" { + return nil + } + + svc := &corev1.Service{} + ns := o.GetNamespace() + if err := cl.Get(ctx, types.NamespacedName{Name: svcName, Namespace: ns}, svc); err != nil { + logger.Errorf("failed to get service: %v", err) + return nil + } + + pgName := svc.Annotations[AnnotationProxyGroup] + if pgName == "" { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: ns, + Name: svcName, + }, + }, + } + } +} + // egressSvcFromEps is an event handler for EndpointSlices. If an EndpointSlice is for an egress ExternalName Service // meant to be exposed on a ProxyGroup, returns a reconcile request for the Service. func egressSvcFromEps(_ context.Context, o client.Object) []reconcile.Request { @@ -1099,6 +1159,40 @@ func HAIngressesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler. } } +// HAServiceFromSecret returns a handler that returns reconcile requests for +// all HA Services that should be reconciled in response to a Secret event. +func HAServicesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] Secret handler triggered for an object that is not a Secret") + return nil + } + if !isPGStateSecret(secret) { + return nil + } + pgName, ok := secret.ObjectMeta.Labels[LabelParentName] + if !ok { + return nil + } + svcList := &corev1.ServiceList{} + if err := cl.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: pgName}); err != nil { + logger.Infof("error listing Services, skipping a reconcile for event on Secret %s: %v", secret.Name, err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, svc := range svcList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: svc.Namespace, + Name: svc.Name, + }, + }) + } + return reqs + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { @@ -1270,7 +1364,7 @@ func crdTransformer(log *zap.SugaredLogger) toolscache.TransformFunc { } } -// indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The +// indexEgressServices adds a local index to cached Tailscale egress Services meant to be exposed on a ProxyGroup. The // index is used a list filter. func indexEgressServices(o client.Object) []string { if !isEgressSvcForProxyGroup(o) { @@ -1279,8 +1373,8 @@ func indexEgressServices(o client.Object) []string { return []string{o.GetAnnotations()[AnnotationProxyGroup]} } -// indexPGIngresses adds a local index to a cached Tailscale Ingresses meant to be exposed on a ProxyGroup. The index is -// used a list filter. +// indexPGIngresses is used to select ProxyGroup-backed Services which are +// locally indexed in the cache for efficient listing without requiring labels. func indexPGIngresses(o client.Object) []string { if !hasProxyGroupAnnotation(o) { return nil @@ -1325,8 +1419,7 @@ func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) han } func hasProxyGroupAnnotation(obj client.Object) bool { - ing := obj.(*networkingv1.Ingress) - return ing.Annotations[AnnotationProxyGroup] != "" + return obj.GetAnnotations()[AnnotationProxyGroup] != "" } func id(ctx context.Context, lc *local.Client) (string, error) { diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 175003ac7..f4b0db01c 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1802,6 +1802,54 @@ func Test_metricsResourceCreation(t *testing.T) { // object). We cannot test this using the fake client. } +func TestIgnorePGService(t *testing.T) { + // NOTE: creating proxygroup stuff just to be sure that it's all ignored + _, _, fc, _ := setupServiceTest(t) + + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + clock: clock, + } + + // Create a service that we should manage, and check that the initial round + // of objects looks right. + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxygroup": "test-pg", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeClusterIP, + }, + }) + + expectReconciled(t, sr, "default", "test") + + findNoGenName(t, fc, "default", "test", "svc") +} + func toFQDN(t *testing.T, s string) dnsname.FQDN { t.Helper() fqdn, err := dnsname.ToFQDN(s) diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 0cf88b738..1d12c39e0 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubetypes" "tailscale.com/types/ptr" ) @@ -175,6 +176,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: "TS_INTERNAL_APP", Value: kubetypes.AppProxyGroupIngress, }, + corev1.EnvVar{ + Name: "TS_INGRESS_PROXIES_CONFIG_PATH", + Value: fmt.Sprintf("/etc/proxies/%s", ingressservices.IngressConfigKey), + }, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", Value: fmt.Sprintf("/etc/proxies/%s", serveConfigKey), diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go new file mode 100644 index 000000000..1200d70a4 --- /dev/null +++ b/cmd/k8s-operator/svc-for-pg.go @@ -0,0 +1,849 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/netip" + "reflect" + "slices" + "strings" + "sync" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/ingressservices" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/util/clientmetric" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +const ( + finalizerName = "tailscale.com/service-pg-finalizer" + + reasonIngressSvcInvalid = "IngressSvcInvalid" + reasonIngressSvcValid = "IngressSvcValid" + reasonIngressSvcConfigured = "IngressSvcConfigured" + reasonIngressSvcNoBackendsConfigured = "IngressSvcNoBackendsConfigured" + reasonIngressSvcCreationFailed = "IngressSvcCreationFailed" +) + +var gaugePGServiceResources = clientmetric.NewGauge(kubetypes.MetricServicePGResourceCount) + +// HAServiceReconciler is a controller that reconciles Tailscale Kubernetes +// Services that should be exposed on an ingress ProxyGroup (in HA mode). +type HAServiceReconciler struct { + client.Client + isDefaultLoadBalancer bool + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsnetServer tsnetServer + tsNamespace string + lc localClient + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + + clock tstime.Clock + + mu sync.Mutex // protects following + // managedServices is a set of all Service resources that we're currently + // managing. This is only used for metrics. + managedServices set.Slice[types.UID] +} + +// Reconcile reconciles Services that should be exposed over Tailscale in HA +// mode (on a ProxyGroup). It looks at all Services with +// tailscale.com/proxy-group annotation. For each such Service, it ensures that +// a VIPService named after the hostname of the Service exists and is up to +// date. +// HA Servicees support multi-cluster Service setup. +// Each VIPService contains a list of owner references that uniquely identify +// the operator. When an Service that acts as a +// backend is being deleted, the corresponding VIPService is only deleted if the +// only owner reference that it contains is for this operator. If other owner +// references are found, then cleanup operation only removes this operator's owner +// reference. +func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := r.logger.With("Service", req.NamespacedName) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + svc := new(corev1.Service) + err = r.Get(ctx, req.NamespacedName, svc) + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + logger.Debugf("Service not found, assuming it was deleted") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get Service: %w", err) + } + + hostname := nameForService(svc) + logger = logger.With("hostname", hostname) + + if !svc.DeletionTimestamp.IsZero() || !r.isTailscaleService(svc) { + logger.Debugf("Service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") + _, err = r.maybeCleanup(ctx, hostname, svc, logger) + return res, err + } + + // needsRequeue is set to true if the underlying VIPService has changed as a result of this reconcile. If that + // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the VIPService in a + // multi-cluster Ingress setup have not resulted in another actor overwriting our VIPService update. + needsRequeue := false + needsRequeue, err = r.maybeProvision(ctx, hostname, svc, logger) + if err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + } else { + return reconcile.Result{}, err + } + } + if needsRequeue { + res = reconcile.Result{RequeueAfter: requeueInterval()} + } + + return reconcile.Result{}, nil +} + +// maybeProvision ensures that a VIPService for this Ingress exists and is up to date and that the serve config for the +// corresponding ProxyGroup contains the Ingress backend's definition. +// If a VIPService does not exist, it will be created. +// If a VIPService exists, but only with owner references from other operator instances, an owner reference for this +// operator instance is added. +// If a VIPService exists, but does not have an owner reference from any operator, we error +// out assuming that this is an owner reference created by an unknown actor. +// Returns true if the operation resulted in a VIPService update. +func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcsChanged bool, err error) { + oldSvcStatus := svc.Status.DeepCopy() + defer func() { + if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) { + // An error encountered here should get returned by the Reconcile function. + err = errors.Join(err, r.Client.Status().Update(ctx, svc)) + } + }() + + pgName := svc.Annotations[AnnotationProxyGroup] + if pgName == "" { + logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") + return false, nil + } + + logger = logger.With("ProxyGroup", pgName) + + pg := &tsapi.ProxyGroup{} + if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { + if apierrors.IsNotFound(err) { + msg := fmt.Sprintf("ProxyGroup %q does not exist", pgName) + logger.Warnf(msg) + r.recorder.Event(svc, corev1.EventTypeWarning, "ProxyGroupNotFound", msg) + return false, nil + } + return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + } + if !tsoperator.ProxyGroupIsReady(pg) { + logger.Infof("ProxyGroup is not (yet) ready") + return false, nil + } + + // Validate Service configuration + if violations := validateService(svc); len(violations) > 0 { + msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", ")) + r.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) + r.logger.Error(msg) + tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, msg, r.clock, logger) + return false, nil + } + + if !slices.Contains(svc.Finalizers, finalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to tell the operator that the high level, + // multi-reconcile operation is underway. + logger.Infof("exposing Service over tailscale") + svc.Finalizers = append(svc.Finalizers, finalizerName) + if err := r.Update(ctx, svc); err != nil { + return false, fmt.Errorf("failed to add finalizer: %w", err) + } + r.mu.Lock() + r.managedServices.Add(svc.UID) + gaugePGServiceResources.Set(int64(r.managedServices.Len())) + r.mu.Unlock() + } + + // 1. Ensure that if Service's hostname/name has changed, any VIPService + // resources corresponding to the old hostname are cleaned up. + // In practice, this function will ensure that any VIPServices that are + // associated with the provided ProxyGroup and no longer owned by a + // Service are cleaned up. This is fine- it is not expensive and ensures + // that in edge cases (a single update changed both hostname and removed + // ProxyGroup annotation) the VIPService is more likely to be + // (eventually) removed. + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) + if err != nil { + return false, fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) + } + + // 2. Ensure that there isn't a VIPService with the same hostname + // already created and not owned by this Service. + serviceName := tailcfg.ServiceName("svc:" + hostname) + existingVIPSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + if err != nil && !isErrorTailscaleServiceNotFound(err) { + return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) + } + + // 3. Generate the VIPService owner annotation for new or existing Tailscale Service. + // This checks and ensures that VIPService's owner references are updated + // for this Service and errors if that is not possible (i.e. because it + // appears that the VIPService has been created by a non-operator actor). + updatedAnnotations, err := r.ownerAnnotations(existingVIPSvc) + if err != nil { + instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname) + msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr) + logger.Warn(msg) + r.recorder.Event(svc, corev1.EventTypeWarning, "InvalidVIPService", msg) + tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, msg, r.clock, logger) + return false, nil + } + + tags := r.defaultTags + if tstr, ok := svc.Annotations[AnnotationTags]; ok && tstr != "" { + tags = strings.Split(tstr, ",") + } + + vipSvc := &tailscale.VIPService{ + Name: serviceName, + Tags: tags, + Ports: []string{"do-not-validate"}, // we don't want to validate ports + Comment: managedTSServiceComment, + Annotations: updatedAnnotations, + } + if existingVIPSvc != nil { + vipSvc.Addrs = existingVIPSvc.Addrs + } + + // TODO(irbekrm): right now if two Service resources attempt to apply different VIPService configs (different + // tags) we can end up reconciling those in a loop. We should detect when a Service + // with the same generation number has been reconciled ~more than N times and stop attempting to apply updates. + if existingVIPSvc == nil || + !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || + !ownersAreSetAndEqual(vipSvc, existingVIPSvc) { + logger.Infof("Ensuring VIPService exists and is up to date") + if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { + return false, fmt.Errorf("error creating VIPService: %w", err) + } + existingVIPSvc = vipSvc + } + + cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace) + if err != nil { + return false, fmt.Errorf("error retrieving ingress services configuration: %w", err) + } + if cm == nil { + logger.Info("ConfigMap not yet created, waiting..") + return false, nil + } + + if existingVIPSvc.Addrs == nil { + existingVIPSvc, err = r.tsClient.GetVIPService(ctx, vipSvc.Name) + if err != nil { + return false, fmt.Errorf("error getting VIPService: %w", err) + } + if existingVIPSvc.Addrs == nil { + // TODO(irbekrm): this should be a retry + return false, fmt.Errorf("unexpected: VIPService addresses not populated") + } + } + + var vipv4 netip.Addr + var vipv6 netip.Addr + for _, vip := range existingVIPSvc.Addrs { + ip, err := netip.ParseAddr(vip) + if err != nil { + return false, fmt.Errorf("error parsing Tailscale Service address: %w", err) + } + + if ip.Is4() { + vipv4 = ip + } else if ip.Is6() { + vipv6 = ip + } + } + + cfg := ingressservices.Config{} + for _, cip := range svc.Spec.ClusterIPs { + ip, err := netip.ParseAddr(cip) + if err != nil { + return false, fmt.Errorf("error parsing Kubernetes Service address: %w", err) + } + + if ip.Is4() { + cfg.IPv4Mapping = &ingressservices.Mapping{ + ClusterIP: ip, + TailscaleServiceIP: vipv4, + } + } else if ip.Is6() { + cfg.IPv6Mapping = &ingressservices.Mapping{ + ClusterIP: ip, + TailscaleServiceIP: vipv6, + } + } + } + + existingCfg := cfgs[serviceName.String()] + if !reflect.DeepEqual(existingCfg, cfg) { + mak.Set(&cfgs, serviceName.String(), cfg) + cfgBytes, err := json.Marshal(cfgs) + if err != nil { + return false, fmt.Errorf("error marshaling ingress config: %w", err) + } + mak.Set(&cm.BinaryData, ingressservices.IngressConfigKey, cfgBytes) + if err := r.Update(ctx, cm); err != nil { + return false, fmt.Errorf("error updating ingress config: %w", err) + } + } + + logger.Infof("updating AdvertiseServices config") + // 4. Update tailscaled's AdvertiseServices config, which should add the VIPService + // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, svc, pg.Name, serviceName, &cfg, true, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config: %w", err) + } + + count, err := r.numberPodsAdvertising(ctx, pgName, serviceName) + if err != nil { + return false, fmt.Errorf("failed to get number of advertised Pods: %w", err) + } + + // TODO(irbekrm): here and when creating the VIPService, verify if the + // error is not terminal (and therefore should not be reconciled). For + // example, if the hostname is already a hostname of a Tailscale node, + // the GET here will fail. + // If there are no Pods advertising the Tailscale Service (yet), we want to set 'svc.Status.LoadBalancer.Ingress' to nil" + var lbs []corev1.LoadBalancerIngress + conditionStatus := metav1.ConditionFalse + conditionType := tsapi.IngressSvcConfigured + conditionReason := reasonIngressSvcNoBackendsConfigured + conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", count, pgReplicas(pg)) + if count != 0 { + dnsName, err := r.dnsNameForService(ctx, serviceName) + if err != nil { + return false, fmt.Errorf("error getting DNS name for Service: %w", err) + } + + lbs = []corev1.LoadBalancerIngress{ + { + Hostname: dnsName, + IP: vipv4.String(), + }, + } + + conditionStatus = metav1.ConditionTrue + conditionReason = reasonIngressSvcConfigured + } + + tsoperator.SetServiceCondition(svc, conditionType, conditionStatus, conditionReason, conditionMessage, r.clock, logger) + svc.Status.LoadBalancer.Ingress = lbs + + return svcsChanged, nil +} + +// maybeCleanup ensures that any resources, such as a VIPService created for this Service, are cleaned up when the +// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the VIPService is only +// deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference +// corresponding to this Service. +func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) { + logger.Debugf("Ensuring any resources for Service are cleaned up") + ix := slices.Index(svc.Finalizers, finalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return false, nil + } + logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) + + defer func() { + if err != nil { + return + } + err = r.deleteFinalizer(ctx, svc, logger) + }() + + serviceName := tailcfg.ServiceName("svc:" + hostname) + // 1. Clean up the VIPService. + svcChanged, err = r.cleanupVIPService(ctx, serviceName, logger) + if err != nil { + return false, fmt.Errorf("error deleting VIPService: %w", err) + } + + // 2. Unadvertise the VIPService. + pgName := svc.Annotations[AnnotationProxyGroup] + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, svc, pgName, serviceName, nil, false, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config services: %w", err) + } + + // TODO: maybe wait for the service to be unadvertised, only then remove the backend routing + + // 3. Clean up ingress config (routing rules). + cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace) + if err != nil { + return false, fmt.Errorf("error retrieving ingress services configuration: %w", err) + } + if cm == nil || cfgs == nil { + return true, nil + } + logger.Infof("Removing VIPService %q from ingress config for ProxyGroup %q", hostname, pgName) + delete(cfgs, serviceName.String()) + cfgBytes, err := json.Marshal(cfgs) + if err != nil { + return false, fmt.Errorf("error marshaling ingress config: %w", err) + } + mak.Set(&cm.BinaryData, ingressservices.IngressConfigKey, cfgBytes) + return true, r.Update(ctx, cm) +} + +// VIPServices that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. +// Returns true if the operation resulted in existing VIPService updates (owner reference removal). +func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { + cm, config, err := ingressSvcsConfigs(ctx, r.Client, proxyGroupName, r.tsNamespace) + if err != nil { + return false, fmt.Errorf("failed to get ingress service config: %s", err) + } + + svcList := &corev1.ServiceList{} + if err := r.Client.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: proxyGroupName}); err != nil { + return false, fmt.Errorf("failed to find Services for ProxyGroup %q: %w", proxyGroupName, err) + } + + ingressConfigChanged := false + for vipSvcName, cfg := range config { + found := false + for _, svc := range svcList.Items { + if strings.EqualFold(fmt.Sprintf("svc:%s", nameForService(&svc)), vipSvcName) { + found = true + break + } + } + if !found { + logger.Infof("VIPService %q is not owned by any Service, cleaning up", vipSvcName) + + // Make sure the VIPService is not advertised in tailscaled or serve config. + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, nil, proxyGroupName, tailcfg.ServiceName(vipSvcName), &cfg, false, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config services: %w", err) + } + + svcsChanged, err = r.cleanupVIPService(ctx, tailcfg.ServiceName(vipSvcName), logger) + if err != nil { + return false, fmt.Errorf("deleting VIPService %q: %w", vipSvcName, err) + } + + _, ok := config[vipSvcName] + if ok { + logger.Infof("Removing VIPService %q from serve config", vipSvcName) + delete(config, vipSvcName) + ingressConfigChanged = true + } + } + } + + if ingressConfigChanged { + configBytes, err := json.Marshal(config) + if err != nil { + return false, fmt.Errorf("marshaling serve config: %w", err) + } + mak.Set(&cm.BinaryData, ingressservices.IngressConfigKey, configBytes) + if err := r.Update(ctx, cm); err != nil { + return false, fmt.Errorf("updating serve config: %w", err) + } + } + + return svcsChanged, nil +} + +func (r *HAServiceReconciler) deleteFinalizer(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { + svc.Finalizers = slices.DeleteFunc(svc.Finalizers, func(f string) bool { + return f == finalizerName + }) + logger.Debugf("ensure %q finalizer is removed", finalizerName) + + if err := r.Update(ctx, svc); err != nil { + return fmt.Errorf("failed to remove finalizer %q: %w", finalizerName, err) + } + r.mu.Lock() + defer r.mu.Unlock() + r.managedServices.Remove(svc.UID) + gaugePGServiceResources.Set(int64(r.managedServices.Len())) + return nil +} + +func (r *HAServiceReconciler) isTailscaleService(svc *corev1.Service) bool { + proxyGroup := svc.Annotations[AnnotationProxyGroup] + return r.shouldExpose(svc) && proxyGroup != "" +} + +func (r *HAServiceReconciler) shouldExpose(svc *corev1.Service) bool { + return r.shouldExposeClusterIP(svc) +} + +func (r *HAServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool { + if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { + return false + } + return isTailscaleLoadBalancerService(svc, r.isDefaultLoadBalancer) || hasExposeAnnotation(svc) +} + +// tailnetCertDomain returns the base domain (TCD) of the current tailnet. +func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, error) { + st, err := r.lc.StatusWithoutPeers(ctx) + if err != nil { + return "", fmt.Errorf("error getting tailscale status: %w", err) + } + return st.CurrentTailnet.MagicDNSSuffix, nil +} + +// cleanupVIPService deletes any VIPService by the provided name if it is not owned by operator instances other than this one. +// If a VIPService is found, but contains other owner references, only removes this operator's owner reference. +// If a VIPService by the given name is not found or does not contain this operator's owner reference, do nothing. +// It returns true if an existing VIPService was updated to remove owner reference, as well as any error that occurred. +func (r *HAServiceReconciler) cleanupVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { + svc, err := r.tsClient.GetVIPService(ctx, name) + if err != nil { + errResp := &tailscale.ErrResponse{} + ok := errors.As(err, errResp) + if ok && errResp.Status == http.StatusNotFound { + return false, nil + } + if !ok { + return false, fmt.Errorf("unexpected error getting VIPService %q: %w", name.String(), err) + } + + return false, fmt.Errorf("error getting VIPService: %w", err) + } + if svc == nil { + return false, nil + } + o, err := parseOwnerAnnotation(svc) + if err != nil { + return false, fmt.Errorf("error parsing VIPService owner annotation: %w", err) + } + if o == nil || len(o.OwnerRefs) == 0 { + return false, nil + } + // Comparing with the operatorID only means that we will not be able to + // clean up VIPServices in cases where the operator was deleted from the + // cluster before deleting the Ingress. Perhaps the comparison could be + // 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'. + ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { + return or.OperatorID == r.operatorID + }) + if ix == -1 { + return false, nil + } + if len(o.OwnerRefs) == 1 { + logger.Infof("Deleting VIPService %q", name) + return false, r.tsClient.DeleteVIPService(ctx, name) + } + o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) + logger.Infof("Updating VIPService %q", name) + json, err := json.Marshal(o) + if err != nil { + return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err) + } + svc.Annotations[ownerAnnotation] = string(json) + return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) +} + +func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { + logger.Debugf("checking backend routes for service '%s'", serviceName) + pod := &corev1.Pod{} + err := a.Get(ctx, client.ObjectKey{Namespace: a.tsNamespace, Name: replicaName}, pod) + if apierrors.IsNotFound(err) { + logger.Debugf("Pod %q not found", replicaName) + return false, nil + } + if err != nil { + return false, fmt.Errorf("failed to get Pod: %w", err) + } + secret := &corev1.Secret{} + err = a.Get(ctx, client.ObjectKey{Namespace: a.tsNamespace, Name: replicaName}, secret) + if apierrors.IsNotFound(err) { + logger.Debugf("Secret %q not found", replicaName) + return false, nil + } + if err != nil { + return false, fmt.Errorf("failed to get Secret: %w", err) + } + if len(secret.Data) == 0 || secret.Data[ingressservices.IngressConfigKey] == nil { + return false, nil + } + gotCfgB := secret.Data[ingressservices.IngressConfigKey] + var gotCfgs ingressservices.Status + if err := json.Unmarshal(gotCfgB, &gotCfgs); err != nil { + return false, fmt.Errorf("error unmarshalling ingress config: %w", err) + } + statusUpToDate, err := isCurrentStatus(gotCfgs, pod, logger) + if err != nil { + return false, fmt.Errorf("error checking ingress config status: %w", err) + } + if !statusUpToDate || !reflect.DeepEqual(gotCfgs.Configs.GetConfig(serviceName), wantsCfg) { + logger.Debugf("Pod %q is not ready to advertise VIPService", pod.Name) + return false, nil + } + return true, nil +} + +func isCurrentStatus(gotCfgs ingressservices.Status, pod *corev1.Pod, logger *zap.SugaredLogger) (bool, error) { + ips := pod.Status.PodIPs + if len(ips) == 0 { + logger.Debugf("Pod %q does not yet have IPs, unable to determine if status is up to date", pod.Name) + return false, nil + } + + if len(ips) > 2 { + return false, fmt.Errorf("pod 'status.PodIPs' can contain at most 2 IPs, got %d (%v)", len(ips), ips) + } + var podIPv4, podIPv6 string + for _, ip := range ips { + parsed, err := netip.ParseAddr(ip.IP) + if err != nil { + return false, fmt.Errorf("error parsing IP address %s: %w", ip.IP, err) + } + if parsed.Is4() { + podIPv4 = parsed.String() + continue + } + podIPv6 = parsed.String() + } + if podIPv4 != gotCfgs.PodIPv4 || podIPv6 != gotCfgs.PodIPv6 { + return false, nil + } + return true, nil +} + +func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, svc *corev1.Service, pgName string, serviceName tailcfg.ServiceName, cfg *ingressservices.Config, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { + logger.Debugf("checking advertisement for service '%s'", serviceName) + // Get all config Secrets for this ProxyGroup. + // Get all Pods + secrets := &corev1.SecretList{} + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + return fmt.Errorf("failed to list config Secrets: %w", err) + } + + if svc != nil && shouldBeAdvertised { + shouldBeAdvertised, err = a.checkEndpointsReady(ctx, svc, logger) + if err != nil { + return fmt.Errorf("failed to check readiness of Service '%s' endpoints: %w", svc.Name, err) + } + } + + for _, secret := range secrets.Items { + var updated bool + for fileName, confB := range secret.Data { + var conf ipn.ConfigVAlpha + if err := json.Unmarshal(confB, &conf); err != nil { + return fmt.Errorf("error unmarshalling ProxyGroup config: %w", err) + } + + idx := slices.Index(conf.AdvertiseServices, serviceName.String()) + isAdvertised := idx >= 0 + switch { + case !isAdvertised && !shouldBeAdvertised: + logger.Debugf("service %q shouldn't be advertised", serviceName) + continue + case isAdvertised && shouldBeAdvertised: + logger.Debugf("service %q is already advertised", serviceName) + continue + case isAdvertised && !shouldBeAdvertised: + logger.Debugf("deleting advertisement for service %q", serviceName) + conf.AdvertiseServices = slices.Delete(conf.AdvertiseServices, idx, idx+1) + case shouldBeAdvertised: + replicaName, ok := strings.CutSuffix(secret.Name, "-config") + if !ok { + logger.Infof("[unexpected] unable to determine replica name from config Secret name %q, unable to determine if backend routing has been configured", secret.Name) + return nil + } + ready, err := a.backendRoutesSetup(ctx, serviceName.String(), replicaName, pgName, cfg, logger) + if err != nil { + return fmt.Errorf("error checking backend routes: %w", err) + } + if !ready { + logger.Debugf("service %q is not ready to be advertised", serviceName) + continue + } + + conf.AdvertiseServices = append(conf.AdvertiseServices, serviceName.String()) + } + confB, err := json.Marshal(conf) + if err != nil { + return fmt.Errorf("error marshalling ProxyGroup config: %w", err) + } + mak.Set(&secret.Data, fileName, confB) + updated = true + } + if updated { + if err := a.Update(ctx, &secret); err != nil { + return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) + } + } + } + return nil +} + +func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { + // Get all state Secrets for this ProxyGroup. + secrets := &corev1.SecretList{} + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) + } + + var count int + for _, secret := range secrets.Items { + prefs, ok, err := getDevicePrefs(&secret) + if err != nil { + return 0, fmt.Errorf("error getting node metadata: %w", err) + } + if !ok { + continue + } + if slices.Contains(prefs.AdvertiseServices, serviceName.String()) { + count++ + } + } + + return count, nil +} + +// ownerAnnotations returns the updated annotations required to ensure this +// instance of the operator is included as an owner. If the VIPService is not +// nil, but does not contain an owner we return an error as this likely means +// that the VIPService was created by something other than a Tailscale +// Kubernetes operator. +func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { + ref := OwnerRef{ + OperatorID: r.operatorID, + } + if svc == nil { + c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} + json, err := json.Marshal(c) + if err != nil { + return nil, fmt.Errorf("[unexpected] unable to marshal VIPService owner annotation contents: %w, please report this", err) + } + return map[string]string{ + ownerAnnotation: string(json), + }, nil + } + o, err := parseOwnerAnnotation(svc) + if err != nil { + return nil, err + } + if o == nil || len(o.OwnerRefs) == 0 { + return nil, fmt.Errorf("VIPService %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) + } + if slices.Contains(o.OwnerRefs, ref) { // up to date + return svc.Annotations, nil + } + o.OwnerRefs = append(o.OwnerRefs, ref) + json, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshalling updated owner references: %w", err) + } + + newAnnots := make(map[string]string, len(svc.Annotations)+1) + for k, v := range svc.Annotations { + newAnnots[k] = v + } + newAnnots[ownerAnnotation] = string(json) + return newAnnots, nil +} + +// dnsNameForService returns the DNS name for the given VIPService name. +func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { + s := svc.WithoutPrefix() + tcd, err := r.tailnetCertDomain(ctx) + if err != nil { + return "", fmt.Errorf("error determining DNS name base: %w", err) + } + return s + "." + tcd, nil +} + +// ingressSvcsConfig returns a ConfigMap that contains ingress services configuration for the provided ProxyGroup as well +// as unmarshalled configuration from the ConfigMap. +func ingressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, tsNamespace string) (cm *corev1.ConfigMap, cfgs ingressservices.Configs, err error) { + name := pgIngressCMName(proxyGroupName) + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: tsNamespace, + }, + } + err = cl.Get(ctx, client.ObjectKeyFromObject(cm), cm) + if apierrors.IsNotFound(err) { // ProxyGroup resources have not been created (yet) + return nil, nil, nil + } + if err != nil { + return nil, nil, fmt.Errorf("error retrieving ingress services ConfigMap %s: %v", name, err) + } + cfgs = ingressservices.Configs{} + if len(cm.BinaryData[ingressservices.IngressConfigKey]) != 0 { + if err := json.Unmarshal(cm.BinaryData[ingressservices.IngressConfigKey], &cfgs); err != nil { + return nil, nil, fmt.Errorf("error unmarshaling ingress services config %v: %w", cm.BinaryData[ingressservices.IngressConfigKey], err) + } + } + return cm, cfgs, nil +} + +func (r *HAServiceReconciler) getEndpointSlicesForService(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) ([]discoveryv1.EndpointSlice, error) { + logger.Debugf("looking for endpoint slices for svc with name '%s' in namespace '%s' matching label '%s=%s'", svc.Name, svc.Namespace, discoveryv1.LabelServiceName, svc.Name) + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + labels := map[string]string{discoveryv1.LabelServiceName: svc.Name} + eps := new(discoveryv1.EndpointSliceList) + if err := r.List(ctx, eps, client.InNamespace(svc.Namespace), client.MatchingLabels(labels)); err != nil { + return nil, fmt.Errorf("error listing EndpointSlices: %w", err) + } + + if len(eps.Items) == 0 { + logger.Debugf("Service '%s' EndpointSlice does not yet exist. We will reconcile again once it's created", svc.Name) + return nil, nil + } + + return eps.Items, nil +} + +func (r *HAServiceReconciler) checkEndpointsReady(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) (bool, error) { + epss, err := r.getEndpointSlicesForService(ctx, svc, logger) + if err != nil { + return false, fmt.Errorf("failed to list EndpointSlices for Service %q: %w", svc.Name, err) + } + for _, eps := range epss { + for _, ep := range eps.Endpoints { + if *ep.Conditions.Ready { + return true, nil + } + } + } + + logger.Debugf("could not find any ready Endpoints in EndpointSlice") + return false, nil +} diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go new file mode 100644 index 000000000..66923ce7d --- /dev/null +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -0,0 +1,371 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/rand/v2" + "net/http" + "net/netip" + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/ingressservices" + "tailscale.com/tstest" + "tailscale.com/types/ptr" + "tailscale.com/util/mak" + + "tailscale.com/tailcfg" +) + +func TestServicePGReconciler(t *testing.T) { + svcPGR, stateSecret, fc, ft := setupServiceTest(t) + svcs := []*corev1.Service{} + config := []string{} + for i := range 4 { + svc, _ := setupTestService(t, fmt.Sprintf("test-svc-%d", i), "", fmt.Sprintf("1.2.3.%d", i), fc, stateSecret) + svcs = append(svcs, svc) + + // Verify initial reconciliation + expectReconciled(t, svcPGR, "default", svc.Name) + + config = append(config, fmt.Sprintf("svc:default-%s", svc.Name)) + verifyVIPService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) + verifyTailscaledConfig(t, fc, config) + } + + for i, svc := range svcs { + if err := fc.Delete(context.Background(), svc); err != nil { + t.Fatalf("deleting Service: %v", err) + } + + expectReconciled(t, svcPGR, "default", svc.Name) + + // Verify the ConfigMap was cleaned up + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + cfgs := ingressservices.Configs{} + if err := json.Unmarshal(cm.BinaryData[ingressservices.IngressConfigKey], &cfgs); err != nil { + t.Fatalf("unmarshaling serve config: %v", err) + } + + if len(cfgs) > len(svcs)-(i+1) { + t.Error("serve config not cleaned up") + } + + config = removeEl(config, fmt.Sprintf("svc:default-%s", svc.Name)) + verifyTailscaledConfig(t, fc, config) + } +} + +func TestServicePGReconciler_UpdateHostname(t *testing.T) { + svcPGR, stateSecret, fc, ft := setupServiceTest(t) + + cip := "4.1.6.7" + svc, _ := setupTestService(t, "test-service", "", cip, fc, stateSecret) + + expectReconciled(t, svcPGR, "default", svc.Name) + + verifyVIPService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) + verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:default-%s", svc.Name)}) + + hostname := "foobarbaz" + mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) { + mak.Set(&s.Annotations, AnnotationHostname, hostname) + }) + + // NOTE: we need to update the ingress config Secret because there is no containerboot in the fake proxy Pod + updateIngressConfigSecret(t, fc, stateSecret, hostname, cip) + expectReconciled(t, svcPGR, "default", svc.Name) + + verifyVIPService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"}) + verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:%s", hostname)}) + + _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(fmt.Sprintf("svc:default-%s", svc.Name))) + if err == nil { + t.Fatalf("svc:default-%s not cleaned up", svc.Name) + } + var errResp *tailscale.ErrResponse + if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { + t.Fatalf("unexpected error: %v", err) + } +} + +func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, client.Client, *fakeTSClient) { + // Pre-create the ProxyGroup + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + }, + } + + // Pre-create the ConfigMap for the ProxyGroup + pgConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, + BinaryData: map[string][]byte{ + "serve-config.json": []byte(`{"Services":{}}`), + }, + } + + // Pre-create a config Secret for the ProxyGroup + pgCfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName("test-pg", 0), + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "config"), + }, + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): []byte(`{"Version":""}`), + }, + } + + pgStateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: "operator-ns", + }, + Data: map[string][]byte{}, + } + + pgPod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: "operator-ns", + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + { + IP: "4.3.2.1", + }, + }, + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pgCfgSecret, pgConfigMap, pgPod, pgStateSecret). + WithStatusSubresource(pg). + WithIndex(new(corev1.Service), indexIngressProxyGroup, indexPGIngresses). + Build() + + // Set ProxyGroup status to ready + pg.Status.Conditions = []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupReady), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + } + if err := fc.Status().Update(context.Background(), pg); err != nil { + t.Fatal(err) + } + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + lc := &fakeLocalClient{ + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{ + MagicDNSSuffix: "ts.net", + }, + }, + } + + cl := tstest.NewClock(tstest.ClockOpts{}) + svcPGR := &HAServiceReconciler{ + Client: fc, + tsClient: ft, + clock: cl, + defaultTags: []string{"tag:k8s"}, + tsNamespace: "operator-ns", + tsnetServer: fakeTsnetServer, + logger: zl.Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + } + + return svcPGR, pgStateSecret, fc, ft +} + +func TestServicePGReconciler_MultiCluster(t *testing.T) { + var ft *fakeTSClient + var lc localClient + for i := 0; i <= 10; i++ { + pgr, stateSecret, fc, fti := setupServiceTest(t) + if i == 0 { + ft = fti + lc = pgr.lc + } else { + pgr.tsClient = ft + pgr.lc = lc + } + + svc, _ := setupTestService(t, "test-multi-cluster", "", "4.3.2.1", fc, stateSecret) + expectReconciled(t, pgr, "default", svc.Name) + + vipSvcs, err := ft.ListVIPServices(context.Background()) + if err != nil { + t.Fatalf("getting VIPService: %v", err) + } + + if len(vipSvcs) != 1 { + t.Fatalf("unexpected number of VIPServices (%d)", len(vipSvcs)) + } + + for name := range vipSvcs { + t.Logf("found vip service with name %q", name.String()) + } + } +} + +func TestIgnoreRegularService(t *testing.T) { + pgr, _, fc, ft := setupServiceTest(t) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/expose": "true", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeClusterIP, + }, + } + + mustCreate(t, fc, svc) + expectReconciled(t, pgr, "default", "test") + + verifyTailscaledConfig(t, fc, nil) + + vipSvcs, err := ft.ListVIPServices(context.Background()) + if err == nil { + if len(vipSvcs) > 0 { + t.Fatal("unexpected vip services found") + } + } +} + +func removeEl(s []string, value string) []string { + result := s[:0] + for _, v := range s { + if v != value { + result = append(result, v) + } + } + return result +} + +func updateIngressConfigSecret(t *testing.T, fc client.Client, stateSecret *corev1.Secret, serviceName string, clusterIP string) { + ingressConfig := ingressservices.Configs{ + fmt.Sprintf("svc:%s", serviceName): ingressservices.Config{ + IPv4Mapping: &ingressservices.Mapping{ + TailscaleServiceIP: netip.MustParseAddr(vipTestIP), + ClusterIP: netip.MustParseAddr(clusterIP), + }, + }, + } + + ingressStatus := ingressservices.Status{ + Configs: ingressConfig, + PodIPv4: "4.3.2.1", + } + + icJson, err := json.Marshal(ingressStatus) + if err != nil { + t.Fatalf("failed to json marshal ingress config: %s", err.Error()) + } + + mustUpdate(t, fc, stateSecret.Namespace, stateSecret.Name, func(sec *corev1.Secret) { + mak.Set(&sec.Data, ingressservices.IngressConfigKey, icJson) + }) +} + +func setupTestService(t *testing.T, svcName string, hostname string, clusterIP string, fc client.Client, stateSecret *corev1.Secret) (svc *corev1.Service, eps *discoveryv1.EndpointSlice) { + uid := rand.IntN(100) + svc = &corev1.Service{ + TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: "default", + UID: types.UID(fmt.Sprintf("%d-UID", uid)), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + ClusterIP: clusterIP, + ClusterIPs: []string{clusterIP}, + }, + } + + eps = &discoveryv1.EndpointSlice{ + TypeMeta: metav1.TypeMeta{Kind: "EndpointSlice", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: "default", + Labels: map[string]string{ + discoveryv1.LabelServiceName: svcName, + }, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{ + { + Addresses: []string{"4.3.2.1"}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), + }, + }, + }, + } + + updateIngressConfigSecret(t, fc, stateSecret, fmt.Sprintf("default-%s", svcName), clusterIP) + + mustCreate(t, fc, svc) + mustCreate(t, fc, eps) + + return svc, eps +} diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index f47f96e44..3d9bdbf9a 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -38,6 +38,10 @@ import ( "tailscale.com/util/mak" ) +const ( + vipTestIP = "5.6.7.8" +) + // confgOpts contains configuration options for creating cluster resources for // Tailscale proxies. type configOpts struct { @@ -561,6 +565,23 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec return s } +func findNoGenName(t *testing.T, client client.Client, ns, name, typ string) { + t.Helper() + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, + } + s, err := getSingleObject[corev1.Secret](context.Background(), client, "operator-ns", labels) + if err != nil { + t.Fatalf("finding secrets for %q: %v", name, err) + } + if s != nil { + t.Fatalf("found unexpected secret with name %q", s.GetName()) + } +} + func findGenName(t *testing.T, client client.Client, ns, name, typ string) (full, noSuffix string) { t.Helper() labels := map[string]string{ @@ -889,12 +910,26 @@ func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceNa return svc, nil } +func (c *fakeTSClient) ListVIPServices(ctx context.Context) (map[tailcfg.ServiceName]*tailscale.VIPService, error) { + c.Lock() + defer c.Unlock() + if c.vipServices == nil { + return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} + } + return c.vipServices, nil +} + func (c *fakeTSClient) CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error { c.Lock() defer c.Unlock() if c.vipServices == nil { c.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService) } + + if svc.Addrs == nil { + svc.Addrs = []string{vipTestIP} + } + c.vipServices[svc.Name] = svc return nil } diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index a26c9b542..b8b7a935e 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -222,4 +222,7 @@ const ( // on a ProxyGroup. // Set to true if the service is ready to route cluster traffic. EgressSvcReady ConditionType = `TailscaleEgressSvcReady` + + IngressSvcValid ConditionType = `TailscaleIngressSvcValid` + IngressSvcConfigured ConditionType = `TailscaleIngressSvcConfigured` ) diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index e54e1c99f..6f96875dd 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -18,6 +18,7 @@ const ( MetricIngressProxyCount = "k8s_ingress_proxies" // L3 MetricIngressResourceCount = "k8s_ingress_resources" // L7 MetricIngressPGResourceCount = "k8s_ingress_pg_resources" // L7 on ProxyGroup + MetricServicePGResourceCount = "k8s_service_pg_resources" // L3 on ProxyGroup MetricEgressProxyCount = "k8s_egress_proxies" MetricConnectorResourceCount = "k8s_connector_resources" MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources" From 7fe27496c81c683b474d09c3474a4fcff55efd12 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 19 May 2025 14:34:44 +0100 Subject: [PATCH 0874/1708] cmd/k8s-operator: warn if HA Service is applied, but VIPService feature flag is not enabled (#16013) Updates #15895 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/svc-for-pg.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 1200d70a4..ccd6a0fe8 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -211,6 +211,11 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) existingVIPSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + if isErrorFeatureFlagNotEnabled(err) { + logger.Warn(msgFeatureFlagNotEnabled) + r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) + return false, nil + } if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -529,6 +534,11 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er // It returns true if an existing VIPService was updated to remove owner reference, as well as any error that occurred. func (r *HAServiceReconciler) cleanupVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { svc, err := r.tsClient.GetVIPService(ctx, name) + if isErrorFeatureFlagNotEnabled(err) { + msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) + logger.Warn(msg) + return false, nil + } if err != nil { errResp := &tailscale.ErrResponse{} ok := errors.As(err, errResp) From b5770c81c9a8bbb2a56f6c6c1b87e0013ced978a Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 19 May 2025 16:33:34 +0100 Subject: [PATCH 0875/1708] cmd/k8s-operator: rename VIPService -> Tailscale Service in L3 HA Service Reconciler (#16014) Also changes wording tests for L7 HA Reconciler Updates #15895 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/ingress-for-pg_test.go | 72 +++++----- cmd/k8s-operator/svc-for-pg.go | 178 ++++++++++++------------ cmd/k8s-operator/svc-for-pg_test.go | 24 ++-- 3 files changed, 137 insertions(+), 137 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index b03664a76..3330da8d0 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -68,7 +68,7 @@ func TestIngressPGReconciler(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) - verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) // Verify that Role and RoleBinding have been created for the first Ingress. @@ -81,20 +81,20 @@ func TestIngressPGReconciler(t *testing.T) { }) expectReconciled(t, ingPGR, "default", "test-ingress") - // Verify VIPService uses custom tags - vipSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + // Verify Tailscale Service uses custom tags + tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") if err != nil { - t.Fatalf("getting VIPService: %v", err) + t.Fatalf("getting Tailscale Service: %v", err) } - if vipSvc == nil { - t.Fatal("VIPService not created") + if tsSvc == nil { + t.Fatal("Tailscale Service not created") } wantTags := []string{"tag:custom", "tag:test"} // custom tags only - gotTags := slices.Clone(vipSvc.Tags) + gotTags := slices.Clone(tsSvc.Tags) slices.Sort(gotTags) slices.Sort(wantTags) if !slices.Equal(gotTags, wantTags) { - t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags) + t.Errorf("incorrect Tailscale Service tags: got %v, want %v", gotTags, wantTags) } // Create second Ingress @@ -130,7 +130,7 @@ func TestIngressPGReconciler(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "my-other-svc.ts.net") expectReconciled(t, ingPGR, "default", "my-other-ingress") verifyServeConfig(t, fc, "svc:my-other-svc", false) - verifyVIPService(t, ft, "svc:my-other-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"443"}) // Verify that Role and RoleBinding have been created for the first Ingress. // Do not verify the cert Secret as that was already verified implicitly above. @@ -139,7 +139,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) - verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc", "svc:my-other-svc"}) @@ -244,10 +244,10 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) - verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) - // Update the Ingress hostname and make sure the original VIPService is deleted. + // Update the Ingress hostname and make sure the original Tailscale Service is deleted. mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Spec.TLS[0].Hosts[0] = "updated-svc" }) @@ -255,7 +255,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "updated-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:updated-svc", false) - verifyVIPService(t, ft, "svc:updated-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:updated-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"}) _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) @@ -475,7 +475,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") - verifyVIPService(t, ft, "svc:my-svc", []string{"80", "443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"80", "443"}) verifyServeConfig(t, fc, "svc:my-svc", true) // Verify Ingress status @@ -487,13 +487,13 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { t.Fatal(err) } - // Status will be empty until the VIPService shows up in prefs. + // Status will be empty until the Tailscale Service shows up in prefs. if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress, []networkingv1.IngressLoadBalancerIngress(nil)) { t.Errorf("incorrect Ingress status: got %v, want empty", ing.Status.LoadBalancer.Ingress) } - // Add the VIPService to prefs to have the Ingress recognised as ready. + // Add the Tailscale Service to prefs to have the Ingress recognised as ready. mustCreate(t, fc, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pg-0", @@ -528,7 +528,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { // Verify reconciliation after removing HTTP expectReconciled(t, ingPGR, "default", "test-ingress") - verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) verifyServeConfig(t, fc, "svc:my-svc", false) // Verify Ingress status @@ -549,20 +549,20 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { } } -func verifyVIPService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { +func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { t.Helper() - vipSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName)) + tsSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName)) if err != nil { - t.Fatalf("getting VIPService %q: %v", serviceName, err) + t.Fatalf("getting Tailscale Service %q: %v", serviceName, err) } - if vipSvc == nil { - t.Fatalf("VIPService %q not created", serviceName) + if tsSvc == nil { + t.Fatalf("Tailscale Service %q not created", serviceName) } - gotPorts := slices.Clone(vipSvc.Ports) + gotPorts := slices.Clone(tsSvc.Ports) slices.Sort(gotPorts) slices.Sort(wantPorts) if !slices.Equal(gotPorts, wantPorts) { - t.Errorf("incorrect ports for VIPService %q: got %v, want %v", serviceName, gotPorts, wantPorts) + t.Errorf("incorrect ports for Tailscale Service %q: got %v, want %v", serviceName, gotPorts, wantPorts) } } @@ -750,7 +750,7 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { } mustCreate(t, fc, ing) - // Simulate existing VIPService from another cluster + // Simulate existing Tailscale Service from another cluster existingVIPSvc := &tailscale.VIPService{ Name: "svc:my-svc", Annotations: map[string]string{ @@ -764,15 +764,15 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { // Verify reconciliation adds our operator reference expectReconciled(t, ingPGR, "default", "test-ingress") - vipSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") if err != nil { - t.Fatalf("getting VIPService: %v", err) + t.Fatalf("getting Tailscale Service: %v", err) } - if vipSvc == nil { - t.Fatal("VIPService not found") + if tsSvc == nil { + t.Fatal("Tailscale Service not found") } - o, err := parseOwnerAnnotation(vipSvc) + o, err := parseOwnerAnnotation(tsSvc) if err != nil { t.Fatalf("parsing owner annotation: %v", err) } @@ -785,21 +785,21 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) } - // Delete the Ingress and verify VIPService still exists with one owner ref + // Delete the Ingress and verify Tailscale Service still exists with one owner ref if err := fc.Delete(context.Background(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) } expectRequeue(t, ingPGR, "default", "test-ingress") - vipSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") + tsSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") if err != nil { - t.Fatalf("getting VIPService after deletion: %v", err) + t.Fatalf("getting Tailscale Service after deletion: %v", err) } - if vipSvc == nil { - t.Fatal("VIPService was incorrectly deleted") + if tsSvc == nil { + t.Fatal("Tailscale Service was incorrectly deleted") } - o, err = parseOwnerAnnotation(vipSvc) + o, err = parseOwnerAnnotation(tsSvc) if err != nil { t.Fatalf("parsing owner annotation: %v", err) } diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index ccd6a0fe8..779f2714e 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -77,12 +77,12 @@ type HAServiceReconciler struct { // Reconcile reconciles Services that should be exposed over Tailscale in HA // mode (on a ProxyGroup). It looks at all Services with // tailscale.com/proxy-group annotation. For each such Service, it ensures that -// a VIPService named after the hostname of the Service exists and is up to +// a Tailscale Service named after the hostname of the Service exists and is up to // date. // HA Servicees support multi-cluster Service setup. -// Each VIPService contains a list of owner references that uniquely identify +// Each Tailscale Service contains a list of owner references that uniquely identify // the operator. When an Service that acts as a -// backend is being deleted, the corresponding VIPService is only deleted if the +// backend is being deleted, the corresponding Tailscale Service is only deleted if the // only owner reference that it contains is for this operator. If other owner // references are found, then cleanup operation only removes this operator's owner // reference. @@ -110,9 +110,9 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, err } - // needsRequeue is set to true if the underlying VIPService has changed as a result of this reconcile. If that - // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the VIPService in a - // multi-cluster Ingress setup have not resulted in another actor overwriting our VIPService update. + // needsRequeue is set to true if the underlying Tailscale Service has changed as a result of this reconcile. If that + // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the Tailscale Service in a + // multi-cluster Ingress setup have not resulted in another actor overwriting our Tailscale Service update. needsRequeue := false needsRequeue, err = r.maybeProvision(ctx, hostname, svc, logger) if err != nil { @@ -129,14 +129,14 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque return reconcile.Result{}, nil } -// maybeProvision ensures that a VIPService for this Ingress exists and is up to date and that the serve config for the +// maybeProvision ensures that a Tailscale Service for this Ingress exists and is up to date and that the serve config for the // corresponding ProxyGroup contains the Ingress backend's definition. -// If a VIPService does not exist, it will be created. -// If a VIPService exists, but only with owner references from other operator instances, an owner reference for this +// If a Tailscale Service does not exist, it will be created. +// If a Tailscale Service exists, but only with owner references from other operator instances, an owner reference for this // operator instance is added. -// If a VIPService exists, but does not have an owner reference from any operator, we error +// If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. -// Returns true if the operation resulted in a VIPService update. +// Returns true if the operation resulted in a Tailscale Service update. func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcsChanged bool, err error) { oldSvcStatus := svc.Status.DeepCopy() defer func() { @@ -148,7 +148,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin pgName := svc.Annotations[AnnotationProxyGroup] if pgName == "" { - logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") + logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning") return false, nil } @@ -194,23 +194,23 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin r.mu.Unlock() } - // 1. Ensure that if Service's hostname/name has changed, any VIPService + // 1. Ensure that if Service's hostname/name has changed, any Tailscale Service // resources corresponding to the old hostname are cleaned up. - // In practice, this function will ensure that any VIPServices that are + // In practice, this function will ensure that any Tailscale Services that are // associated with the provided ProxyGroup and no longer owned by a // Service are cleaned up. This is fine- it is not expensive and ensures // that in edge cases (a single update changed both hostname and removed - // ProxyGroup annotation) the VIPService is more likely to be + // ProxyGroup annotation) the Tailscale Service is more likely to be // (eventually) removed. svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) if err != nil { - return false, fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) + return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err) } - // 2. Ensure that there isn't a VIPService with the same hostname + // 2. Ensure that there isn't a Tailscale Service with the same hostname // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingVIPSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) if isErrorFeatureFlagNotEnabled(err) { logger.Warn(msgFeatureFlagNotEnabled) r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) @@ -220,16 +220,16 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } - // 3. Generate the VIPService owner annotation for new or existing Tailscale Service. - // This checks and ensures that VIPService's owner references are updated + // 3. Generate the Tailscale Service owner annotation for new or existing Tailscale Service. + // This checks and ensures that Tailscale Service's owner references are updated // for this Service and errors if that is not possible (i.e. because it - // appears that the VIPService has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingVIPSvc) + // appears that the Tailscale Service has been created by a non-operator actor). + updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) if err != nil { instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname) - msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr) + msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) logger.Warn(msg) - r.recorder.Event(svc, corev1.EventTypeWarning, "InvalidVIPService", msg) + r.recorder.Event(svc, corev1.EventTypeWarning, "InvalidTailscaleService", msg) tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, msg, r.clock, logger) return false, nil } @@ -239,28 +239,28 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin tags = strings.Split(tstr, ",") } - vipSvc := &tailscale.VIPService{ + tsSvc := &tailscale.VIPService{ Name: serviceName, Tags: tags, Ports: []string{"do-not-validate"}, // we don't want to validate ports Comment: managedTSServiceComment, Annotations: updatedAnnotations, } - if existingVIPSvc != nil { - vipSvc.Addrs = existingVIPSvc.Addrs + if existingTSSvc != nil { + tsSvc.Addrs = existingTSSvc.Addrs } - // TODO(irbekrm): right now if two Service resources attempt to apply different VIPService configs (different + // TODO(irbekrm): right now if two Service resources attempt to apply different Tailscale Service configs (different // tags) we can end up reconciling those in a loop. We should detect when a Service // with the same generation number has been reconciled ~more than N times and stop attempting to apply updates. - if existingVIPSvc == nil || - !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || - !ownersAreSetAndEqual(vipSvc, existingVIPSvc) { - logger.Infof("Ensuring VIPService exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { - return false, fmt.Errorf("error creating VIPService: %w", err) + if existingTSSvc == nil || + !reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) || + !ownersAreSetAndEqual(tsSvc, existingTSSvc) { + logger.Infof("Ensuring Tailscale Service exists and is up to date") + if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + return false, fmt.Errorf("error creating Tailscale Service: %w", err) } - existingVIPSvc = vipSvc + existingTSSvc = tsSvc } cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace) @@ -272,29 +272,29 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } - if existingVIPSvc.Addrs == nil { - existingVIPSvc, err = r.tsClient.GetVIPService(ctx, vipSvc.Name) + if existingTSSvc.Addrs == nil { + existingTSSvc, err = r.tsClient.GetVIPService(ctx, tsSvc.Name) if err != nil { - return false, fmt.Errorf("error getting VIPService: %w", err) + return false, fmt.Errorf("error getting Tailscale Service: %w", err) } - if existingVIPSvc.Addrs == nil { + if existingTSSvc.Addrs == nil { // TODO(irbekrm): this should be a retry - return false, fmt.Errorf("unexpected: VIPService addresses not populated") + return false, fmt.Errorf("unexpected: Tailscale Service addresses not populated") } } - var vipv4 netip.Addr - var vipv6 netip.Addr - for _, vip := range existingVIPSvc.Addrs { - ip, err := netip.ParseAddr(vip) + var tsSvcIPv4 netip.Addr + var tsSvcIPv6 netip.Addr + for _, tsip := range existingTSSvc.Addrs { + ip, err := netip.ParseAddr(tsip) if err != nil { return false, fmt.Errorf("error parsing Tailscale Service address: %w", err) } if ip.Is4() { - vipv4 = ip + tsSvcIPv4 = ip } else if ip.Is6() { - vipv6 = ip + tsSvcIPv6 = ip } } @@ -308,12 +308,12 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin if ip.Is4() { cfg.IPv4Mapping = &ingressservices.Mapping{ ClusterIP: ip, - TailscaleServiceIP: vipv4, + TailscaleServiceIP: tsSvcIPv4, } } else if ip.Is6() { cfg.IPv6Mapping = &ingressservices.Mapping{ ClusterIP: ip, - TailscaleServiceIP: vipv6, + TailscaleServiceIP: tsSvcIPv6, } } } @@ -332,7 +332,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } logger.Infof("updating AdvertiseServices config") - // 4. Update tailscaled's AdvertiseServices config, which should add the VIPService + // 4. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. if err = r.maybeUpdateAdvertiseServicesConfig(ctx, svc, pg.Name, serviceName, &cfg, true, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config: %w", err) @@ -343,7 +343,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("failed to get number of advertised Pods: %w", err) } - // TODO(irbekrm): here and when creating the VIPService, verify if the + // TODO(irbekrm): here and when creating the Tailscale Service, verify if the // error is not terminal (and therefore should not be reconciled). For // example, if the hostname is already a hostname of a Tailscale node, // the GET here will fail. @@ -362,7 +362,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin lbs = []corev1.LoadBalancerIngress{ { Hostname: dnsName, - IP: vipv4.String(), + IP: tsSvcIPv4.String(), }, } @@ -376,8 +376,8 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return svcsChanged, nil } -// maybeCleanup ensures that any resources, such as a VIPService created for this Service, are cleaned up when the -// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the VIPService is only +// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the +// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Service. func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) { @@ -387,7 +387,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, logger.Debugf("no finalizer, nothing to do") return false, nil } - logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) + logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) defer func() { if err != nil { @@ -397,13 +397,13 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, }() serviceName := tailcfg.ServiceName("svc:" + hostname) - // 1. Clean up the VIPService. - svcChanged, err = r.cleanupVIPService(ctx, serviceName, logger) + // 1. Clean up the Tailscale Service. + svcChanged, err = r.cleanupTailscaleService(ctx, serviceName, logger) if err != nil { - return false, fmt.Errorf("error deleting VIPService: %w", err) + return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } - // 2. Unadvertise the VIPService. + // 2. Unadvertise the Tailscale Service. pgName := svc.Annotations[AnnotationProxyGroup] if err = r.maybeUpdateAdvertiseServicesConfig(ctx, svc, pgName, serviceName, nil, false, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) @@ -419,7 +419,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, if cm == nil || cfgs == nil { return true, nil } - logger.Infof("Removing VIPService %q from ingress config for ProxyGroup %q", hostname, pgName) + logger.Infof("Removing Tailscale Service %q from ingress config for ProxyGroup %q", hostname, pgName) delete(cfgs, serviceName.String()) cfgBytes, err := json.Marshal(cfgs) if err != nil { @@ -429,8 +429,8 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, return true, r.Update(ctx, cm) } -// VIPServices that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. -// Returns true if the operation resulted in existing VIPService updates (owner reference removal). +// Tailscale Services that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. +// Returns true if the operation resulted in existing Tailscale Service updates (owner reference removal). func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { cm, config, err := ingressSvcsConfigs(ctx, r.Client, proxyGroupName, r.tsNamespace) if err != nil { @@ -443,31 +443,31 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } ingressConfigChanged := false - for vipSvcName, cfg := range config { + for tsSvcName, cfg := range config { found := false for _, svc := range svcList.Items { - if strings.EqualFold(fmt.Sprintf("svc:%s", nameForService(&svc)), vipSvcName) { + if strings.EqualFold(fmt.Sprintf("svc:%s", nameForService(&svc)), tsSvcName) { found = true break } } if !found { - logger.Infof("VIPService %q is not owned by any Service, cleaning up", vipSvcName) + logger.Infof("Tailscale Service %q is not owned by any Service, cleaning up", tsSvcName) - // Make sure the VIPService is not advertised in tailscaled or serve config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, nil, proxyGroupName, tailcfg.ServiceName(vipSvcName), &cfg, false, logger); err != nil { + // Make sure the Tailscale Service is not advertised in tailscaled or serve config. + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, nil, proxyGroupName, tailcfg.ServiceName(tsSvcName), &cfg, false, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - svcsChanged, err = r.cleanupVIPService(ctx, tailcfg.ServiceName(vipSvcName), logger) + svcsChanged, err = r.cleanupTailscaleService(ctx, tailcfg.ServiceName(tsSvcName), logger) if err != nil { - return false, fmt.Errorf("deleting VIPService %q: %w", vipSvcName, err) + return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } - _, ok := config[vipSvcName] + _, ok := config[tsSvcName] if ok { - logger.Infof("Removing VIPService %q from serve config", vipSvcName) - delete(config, vipSvcName) + logger.Infof("Removing Tailscale Service %q from serve config", tsSvcName) + delete(config, tsSvcName) ingressConfigChanged = true } } @@ -528,11 +528,11 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er return st.CurrentTailnet.MagicDNSSuffix, nil } -// cleanupVIPService deletes any VIPService by the provided name if it is not owned by operator instances other than this one. -// If a VIPService is found, but contains other owner references, only removes this operator's owner reference. -// If a VIPService by the given name is not found or does not contain this operator's owner reference, do nothing. -// It returns true if an existing VIPService was updated to remove owner reference, as well as any error that occurred. -func (r *HAServiceReconciler) cleanupVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { +// cleanupTailscaleService deletes any Tailscale Service by the provided name if it is not owned by operator instances other than this one. +// If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. +// If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. +// It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. +func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { svc, err := r.tsClient.GetVIPService(ctx, name) if isErrorFeatureFlagNotEnabled(err) { msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) @@ -546,23 +546,23 @@ func (r *HAServiceReconciler) cleanupVIPService(ctx context.Context, name tailcf return false, nil } if !ok { - return false, fmt.Errorf("unexpected error getting VIPService %q: %w", name.String(), err) + return false, fmt.Errorf("unexpected error getting Tailscale Service %q: %w", name.String(), err) } - return false, fmt.Errorf("error getting VIPService: %w", err) + return false, fmt.Errorf("error getting Tailscale Service: %w", err) } if svc == nil { return false, nil } o, err := parseOwnerAnnotation(svc) if err != nil { - return false, fmt.Errorf("error parsing VIPService owner annotation: %w", err) + return false, fmt.Errorf("error parsing Tailscale Service owner annotation: %w", err) } if o == nil || len(o.OwnerRefs) == 0 { return false, nil } // Comparing with the operatorID only means that we will not be able to - // clean up VIPServices in cases where the operator was deleted from the + // clean up Tailscale Services in cases where the operator was deleted from the // cluster before deleting the Ingress. Perhaps the comparison could be // 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'. ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { @@ -572,14 +572,14 @@ func (r *HAServiceReconciler) cleanupVIPService(ctx context.Context, name tailcf return false, nil } if len(o.OwnerRefs) == 1 { - logger.Infof("Deleting VIPService %q", name) + logger.Infof("Deleting Tailscale Service %q", name) return false, r.tsClient.DeleteVIPService(ctx, name) } o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) - logger.Infof("Updating VIPService %q", name) + logger.Infof("Updating Tailscale Service %q", name) json, err := json.Marshal(o) if err != nil { - return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err) + return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) @@ -618,7 +618,7 @@ func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceNam return false, fmt.Errorf("error checking ingress config status: %w", err) } if !statusUpToDate || !reflect.DeepEqual(gotCfgs.Configs.GetConfig(serviceName), wantsCfg) { - logger.Debugf("Pod %q is not ready to advertise VIPService", pod.Name) + logger.Debugf("Pod %q is not ready to advertise Tailscale Service", pod.Name) return false, nil } return true, nil @@ -746,9 +746,9 @@ func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName } // ownerAnnotations returns the updated annotations required to ensure this -// instance of the operator is included as an owner. If the VIPService is not +// instance of the operator is included as an owner. If the Tailscale Service is not // nil, but does not contain an owner we return an error as this likely means -// that the VIPService was created by something other than a Tailscale +// that the Tailscale Service was created by something other than a Tailscale // Kubernetes operator. func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { ref := OwnerRef{ @@ -758,7 +758,7 @@ func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} json, err := json.Marshal(c) if err != nil { - return nil, fmt.Errorf("[unexpected] unable to marshal VIPService owner annotation contents: %w, please report this", err) + return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service owner annotation contents: %w, please report this", err) } return map[string]string{ ownerAnnotation: string(json), @@ -769,7 +769,7 @@ func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s return nil, err } if o == nil || len(o.OwnerRefs) == 0 { - return nil, fmt.Errorf("VIPService %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) + return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) } if slices.Contains(o.OwnerRefs, ref) { // up to date return svc.Annotations, nil @@ -788,7 +788,7 @@ func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s return newAnnots, nil } -// dnsNameForService returns the DNS name for the given VIPService name. +// dnsNameForService returns the DNS name for the given Tailscale Service name. func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { s := svc.WithoutPrefix() tcd, err := r.tailnetCertDomain(ctx) diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 66923ce7d..ec94d536f 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -47,7 +47,7 @@ func TestServicePGReconciler(t *testing.T) { expectReconciled(t, svcPGR, "default", svc.Name) config = append(config, fmt.Sprintf("svc:default-%s", svc.Name)) - verifyVIPService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) + verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) verifyTailscaledConfig(t, fc, config) } @@ -89,7 +89,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { expectReconciled(t, svcPGR, "default", svc.Name) - verifyVIPService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) + verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:default-%s", svc.Name)}) hostname := "foobarbaz" @@ -101,7 +101,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { updateIngressConfigSecret(t, fc, stateSecret, hostname, cip) expectReconciled(t, svcPGR, "default", svc.Name) - verifyVIPService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"}) + verifyTailscaleService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"}) verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:%s", hostname)}) _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(fmt.Sprintf("svc:default-%s", svc.Name))) @@ -238,17 +238,17 @@ func TestServicePGReconciler_MultiCluster(t *testing.T) { svc, _ := setupTestService(t, "test-multi-cluster", "", "4.3.2.1", fc, stateSecret) expectReconciled(t, pgr, "default", svc.Name) - vipSvcs, err := ft.ListVIPServices(context.Background()) + tsSvcs, err := ft.ListVIPServices(context.Background()) if err != nil { - t.Fatalf("getting VIPService: %v", err) + t.Fatalf("getting Tailscale Service: %v", err) } - if len(vipSvcs) != 1 { - t.Fatalf("unexpected number of VIPServices (%d)", len(vipSvcs)) + if len(tsSvcs) != 1 { + t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs)) } - for name := range vipSvcs { - t.Logf("found vip service with name %q", name.String()) + for name := range tsSvcs { + t.Logf("found Tailscale Service with name %q", name.String()) } } } @@ -279,10 +279,10 @@ func TestIgnoreRegularService(t *testing.T) { verifyTailscaledConfig(t, fc, nil) - vipSvcs, err := ft.ListVIPServices(context.Background()) + tsSvcs, err := ft.ListVIPServices(context.Background()) if err == nil { - if len(vipSvcs) > 0 { - t.Fatal("unexpected vip services found") + if len(tsSvcs) > 0 { + t.Fatal("unexpected Tailscale Services found") } } } From 8009ad74a3b1e0c532a21375266f42336cd22aa3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 19 May 2025 08:39:55 -0700 Subject: [PATCH 0876/1708] cmd/derper, net/tlsdial: fix client's self-signed cert validation This fixes the implementation and test from #15208 which apparently never worked. Ignore the metacert when counting the number of expected certs presented. And fix the test, pulling out the TLSConfig setup code into something shared between the real cmd/derper and the test. Fixes #15579 Change-Id: I90526e38e59f89b480629b415f00587b107de10a Signed-off-by: Brad Fitzpatrick --- cmd/derper/cert_test.go | 1 + cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + derp/derp_server.go | 23 ++++++++++++- derp/derp_test.go | 3 +- derp/derpconst/derpconst.go | 11 ++++++ derp/derphttp/derphttp_client.go | 3 +- net/tlsdial/tlsdial.go | 58 +++++++++++++++++++------------- tsnet/depaware.txt | 1 + 11 files changed, 77 insertions(+), 27 deletions(-) create mode 100644 derp/derpconst/derpconst.go diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 2ec7b756e..31fd4ea44 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -140,6 +140,7 @@ func TestPinnedCertRawIP(t *testing.T) { var hs http.Server hs.Handler = mux hs.TLSConfig = cp.TLSConfig() + ds.ModifyTLSConfigToAddMetaCert(hs.TLSConfig) go hs.ServeTLS(ln, "", "") lnPort := ln.Addr().(*net.TCPAddr).Port diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index f22b4873f..ca7723530 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -92,6 +92,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/client/tailscale from tailscale.com/derp tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/derp from tailscale.com/cmd/derper+ + tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index bbbaebc19..12fb5cf2e 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -786,6 +786,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/derp+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 85bf64e4a..03bf2f94c 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -88,6 +88,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/net/portmapper tailscale.com/derp from tailscale.com/derp/derphttp + tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 823d639c9..6de0ddc39 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -260,6 +260,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ tailscale.com/disco from tailscale.com/derp+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal diff --git a/derp/derp_server.go b/derp/derp_server.go index c330572d2..abda9da73 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -11,6 +11,7 @@ import ( "context" "crypto/ed25519" crand "crypto/rand" + "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/binary" @@ -38,6 +39,7 @@ import ( "golang.org/x/sync/errgroup" "tailscale.com/client/local" "tailscale.com/client/tailscale" + "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/metrics" @@ -616,7 +618,7 @@ func (s *Server) initMetacert() { tmpl := &x509.Certificate{ SerialNumber: big.NewInt(ProtocolVersion), Subject: pkix.Name{ - CommonName: fmt.Sprintf("derpkey%s", s.publicKey.UntypedHexString()), + CommonName: derpconst.MetaCertCommonNamePrefix + s.publicKey.UntypedHexString(), }, // Windows requires NotAfter and NotBefore set: NotAfter: s.clock.Now().Add(30 * 24 * time.Hour), @@ -636,6 +638,25 @@ func (s *Server) initMetacert() { // TLS server to let the client skip a round trip during start-up. func (s *Server) MetaCert() []byte { return s.metaCert } +// ModifyTLSConfigToAddMetaCert modifies c.GetCertificate to make +// it append s.MetaCert to the returned certificates. +// +// It panics if c or c.GetCertificate is nil. +func (s *Server) ModifyTLSConfigToAddMetaCert(c *tls.Config) { + getCert := c.GetCertificate + if getCert == nil { + panic("c.GetCertificate is nil") + } + c.GetCertificate = func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + cert, err := getCert(hi) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, s.MetaCert()) + return cert, nil + } +} + // registerClient notes that client c is now authenticated and ready for packets. // // If c.key is connected more than once, the earlier connection(s) are diff --git a/derp/derp_test.go b/derp/derp_test.go index f0fc52fe7..c5a92bafa 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -27,6 +27,7 @@ import ( qt "github.com/frankban/quicktest" "go4.org/mem" "golang.org/x/time/rate" + "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/net/memnet" "tailscale.com/tstest" @@ -930,7 +931,7 @@ func TestMetaCert(t *testing.T) { if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(ProtocolVersion) { t.Errorf("serial = %v; want %v", cert.SerialNumber, ProtocolVersion) } - if g, w := cert.Subject.CommonName, fmt.Sprintf("derpkey%s", pub.UntypedHexString()); g != w { + if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { t.Errorf("CommonName = %q; want %q", g, w) } if n := len(cert.Extensions); n != 1 { diff --git a/derp/derpconst/derpconst.go b/derp/derpconst/derpconst.go new file mode 100644 index 000000000..74ca09ccb --- /dev/null +++ b/derp/derpconst/derpconst.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package derpconst contains constants used by the DERP client and server. +package derpconst + +// MetaCertCommonNamePrefix is the prefix that the DERP server +// puts on for the common name of its "metacert". The suffix of +// the common name after "derpkey" is the hex key.NodePublic +// of the DERP server. +const MetaCertCommonNamePrefix = "derpkey" diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 21ee4a671..faa218ca2 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -30,6 +30,7 @@ import ( "go4.org/mem" "tailscale.com/derp" + "tailscale.com/derp/derpconst" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/dnscache" @@ -1152,7 +1153,7 @@ var ErrClientClosed = errors.New("derphttp.Client closed") func parseMetaCert(certs []*x509.Certificate) (serverPub key.NodePublic, serverProtoVersion int) { for _, cert := range certs { // Look for derpkey prefix added by initMetacert() on the server side. - if pubHex, ok := strings.CutPrefix(cert.Subject.CommonName, "derpkey"); ok { + if pubHex, ok := strings.CutPrefix(cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix); ok { var err error serverPub, err = key.ParseNodePublicUntyped(mem.S(pubHex)) if err == nil && cert.SerialNumber.BitLen() <= 8 { // supports up to version 255 diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 4d22383ef..1bd2450aa 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -21,10 +21,12 @@ import ( "net" "net/http" "os" + "strings" "sync" "sync/atomic" "time" + "tailscale.com/derp/derpconst" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" @@ -247,9 +249,10 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { } } -// SetConfigExpectedCertHash configures c's VerifyPeerCertificate function -// to require that exactly 1 cert is presented, and that the hex of its SHA256 hash -// is equal to wantFullCertSHA256Hex and that it's a valid cert for c.ServerName. +// SetConfigExpectedCertHash configures c's VerifyPeerCertificate function to +// require that exactly 1 cert is presented (not counting any present MetaCert), +// and that the hex of its SHA256 hash is equal to wantFullCertSHA256Hex and +// that it's a valid cert for c.ServerName. func SetConfigExpectedCertHash(c *tls.Config, wantFullCertSHA256Hex string) { if c.VerifyPeerCertificate != nil { panic("refusing to override tls.Config.VerifyPeerCertificate") @@ -260,28 +263,35 @@ func SetConfigExpectedCertHash(c *tls.Config, wantFullCertSHA256Hex string) { c.InsecureSkipVerify = true c.VerifyConnection = nil c.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error { - if len(rawCerts) == 0 { - return errors.New("no certs presented") - } - if len(rawCerts) > 1 { - return errors.New("unexpected multiple certs presented") - } - if fmt.Sprintf("%02x", sha256.Sum256(rawCerts[0])) != wantFullCertSHA256Hex { - return fmt.Errorf("cert hash does not match expected cert hash") - } - cert, err := x509.ParseCertificate(rawCerts[0]) - if err != nil { - return fmt.Errorf("ParseCertificate: %w", err) - } - if err := cert.VerifyHostname(c.ServerName); err != nil { - return fmt.Errorf("cert does not match server name %q: %w", c.ServerName, err) - } - now := time.Now() - if now.After(cert.NotAfter) { - return fmt.Errorf("cert expired %v", cert.NotAfter) + var sawGoodCert bool + for _, rawCert := range rawCerts { + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + return fmt.Errorf("ParseCertificate: %w", err) + } + if strings.HasPrefix(cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix) { + continue + } + if sawGoodCert { + return errors.New("unexpected multiple certs presented") + } + if fmt.Sprintf("%02x", sha256.Sum256(rawCert)) != wantFullCertSHA256Hex { + return fmt.Errorf("cert hash does not match expected cert hash") + } + if err := cert.VerifyHostname(c.ServerName); err != nil { + return fmt.Errorf("cert does not match server name %q: %w", c.ServerName, err) + } + now := time.Now() + if now.After(cert.NotAfter) { + return fmt.Errorf("cert expired %v", cert.NotAfter) + } + if now.Before(cert.NotBefore) { + return fmt.Errorf("cert not yet valid until %v; is your clock correct?", cert.NotBefore) + } + sawGoodCert = true } - if now.Before(cert.NotBefore) { - return fmt.Errorf("cert not yet valid until %v; is your clock correct?", cert.NotBefore) + if !sawGoodCert { + return errors.New("expected cert not presented") } return nil } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f5cd1232d..662752554 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -227,6 +227,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/derp+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal From 87a4f17883cf0f9a7e19355e45eda525fec7dd89 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 19 May 2025 11:42:13 -0700 Subject: [PATCH 0877/1708] wgengine/magicsock: fix pong handling 'EndpointChange' reporting (#16018) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 020420f55..97cbe8753 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -552,7 +552,7 @@ func (de *endpoint) addrForSendLocked(now mono.Time) (udpAddr, derpAddr netip.Ad // addrForWireGuardSendLocked returns the address that should be used for // sending the next packet. If a packet has never or not recently been sent to // the endpoint, then a randomly selected address for the endpoint is returned, -// as well as a bool indiciating that WireGuard discovery pings should be started. +// as well as a bool indicating that WireGuard discovery pings should be started. // If the addresses have latency information available, then the address with the // best latency is used. // @@ -1261,7 +1261,7 @@ func (de *endpoint) sendDiscoPingsLocked(now mono.Time, sendCallMeMaybe bool) { } // sendWireGuardOnlyPingsLocked evaluates all available addresses for -// a WireGuard only endpoint and initates an ICMP ping for useable +// a WireGuard only endpoint and initiates an ICMP ping for useable // addresses. func (de *endpoint) sendWireGuardOnlyPingsLocked(now mono.Time) { if runtime.GOOS == "js" { @@ -1629,7 +1629,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ When: time.Now(), - What: "handlePingLocked-bestAddr-update", + What: "handlePongConnLocked-bestAddr-update", From: de.bestAddr, To: thisPong, }) @@ -1638,7 +1638,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip if de.bestAddr.AddrPort == thisPong.AddrPort { de.debugUpdates.Add(EndpointChange{ When: time.Now(), - What: "handlePingLocked-bestAddr-latency", + What: "handlePongConnLocked-bestAddr-latency", From: de.bestAddr, To: thisPong, }) From 54970054a657fadebb98dd0e17edf531d48c2349 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 19 May 2025 07:56:03 -0700 Subject: [PATCH 0878/1708] cmd/tailscale/cli: suggest using "tailscale set", not "up", to set operator The same message was used for "up" and "down" permission failures, but "set" works better for both. Suggesting "up --operator" for a "down" permission failure was confusing. It's not like the latter command works in one shot anyway. Fixes #16008 Change-Id: I6e4225ef06ce2d8e19c40bece8104e254c2aa525 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index b1a910295..d7e8e5ca2 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -164,7 +164,7 @@ func Run(args []string) (err error) { err = rootCmd.Run(context.Background()) if tailscale.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { - return fmt.Errorf("%v\n\nUse 'sudo tailscale %s' or 'tailscale up --operator=$USER' to not require root.", err, strings.Join(args, " ")) + return fmt.Errorf("%v\n\nUse 'sudo tailscale %s'.\nTo not require root, use 'sudo tailscale set --operator=$USER' once.", err, strings.Join(args, " ")) } if errors.Is(err, flag.ErrHelp) { return nil From 30a89ad3781aa99ee8b92e1ecfdab0f90034a02c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 19 May 2025 13:02:20 -0700 Subject: [PATCH 0879/1708] ipn/ipnlocal: make GetExt work earlier, before extension init Taildrop wasn't working on iOS since #15971 because GetExt didn't work until after init, but that PR moved Init until after Start. This makes GetExt work before LocalBackend.Start (ExtensionHost.Init). Updates #15812 Change-Id: I6e87257cd97a20f86083a746d39df223e5b6791b Signed-off-by: Brad Fitzpatrick --- ipn/ipnext/ipnext.go | 2 +- ipn/ipnlocal/extension_host.go | 28 ++++++++++++++++------ ipn/ipnlocal/extension_host_test.go | 37 +++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 8 deletions(-) diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 7a9c39dbb..066763ba4 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -114,7 +114,7 @@ func RegisterExtension(name string, newExt NewExtensionFn) { panic(fmt.Sprintf("ipnext: newExt is nil: %q", name)) } if extensions.Contains(name) { - panic(fmt.Sprintf("ipnext: duplicate extensions: %q", name)) + panic(fmt.Sprintf("ipnext: duplicate extension name %q", name)) } extensions.Set(name, &Definition{name, newExt}) } diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index faf9d2be9..ca802ab89 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -22,6 +22,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/execqueue" + "tailscale.com/util/mak" "tailscale.com/util/testenv" ) @@ -97,7 +98,8 @@ type ExtensionHost struct { initialized atomic.Bool // activeExtensions is a subset of allExtensions that have been initialized and are ready to use. activeExtensions []ipnext.Extension - // extensionsByName are the activeExtensions indexed by their names. + // extensionsByName are the extensions indexed by their names. + // They are not necessarily initialized (in activeExtensions) yet. extensionsByName map[string]ipnext.Extension // postInitWorkQueue is a queue of functions to be executed // by the workQueue after all extensions have been initialized. @@ -184,6 +186,24 @@ func newExtensionHost(logf logger.Logf, b Backend, overrideExts ...*ipnext.Defin return nil, fmt.Errorf("failed to create %q extension: %v", d.Name(), err) } host.allExtensions = append(host.allExtensions, ext) + + if d.Name() != ext.Name() { + return nil, fmt.Errorf("extension name %q does not match the registered name %q", ext.Name(), d.Name()) + } + + if _, ok := host.extensionsByName[ext.Name()]; ok { + return nil, fmt.Errorf("duplicate extension name %q", ext.Name()) + } else { + mak.Set(&host.extensionsByName, ext.Name(), ext) + } + + typ := reflect.TypeOf(ext) + if _, ok := host.extByType.Load(typ); ok { + if _, ok := ext.(interface{ PermitDoubleRegister() }); !ok { + return nil, fmt.Errorf("duplicate extension type %T", ext) + } + } + host.extByType.Store(typ, ext) } return host, nil } @@ -215,10 +235,6 @@ func (h *ExtensionHost) init() { defer h.initDone.Store(true) // Initialize the extensions in the order they were registered. - h.mu.Lock() - h.activeExtensions = make([]ipnext.Extension, 0, len(h.allExtensions)) - h.extensionsByName = make(map[string]ipnext.Extension, len(h.allExtensions)) - h.mu.Unlock() for _, ext := range h.allExtensions { // Do not hold the lock while calling [ipnext.Extension.Init]. // Extensions call back into the host to register their callbacks, @@ -240,8 +256,6 @@ func (h *ExtensionHost) init() { // We'd like to make them visible to other extensions that are initialized later. h.mu.Lock() h.activeExtensions = append(h.activeExtensions, ext) - h.extensionsByName[ext.Name()] = ext - h.extByType.Store(reflect.TypeOf(ext), ext) h.mu.Unlock() } diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 8816e659f..f655c477f 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -30,6 +30,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/lazy" + "tailscale.com/types/logger" "tailscale.com/types/persist" "tailscale.com/util/must" ) @@ -1042,6 +1043,38 @@ func TestNilExtensionHostMethodCall(t *testing.T) { } } +// extBeforeStartExtension is a test extension used by TestGetExtBeforeStart. +// It is registered with the [ipnext.RegisterExtension]. +type extBeforeStartExtension struct{} + +func init() { + ipnext.RegisterExtension("ext-before-start", mkExtBeforeStartExtension) +} + +func mkExtBeforeStartExtension(logger.Logf, ipnext.SafeBackend) (ipnext.Extension, error) { + return extBeforeStartExtension{}, nil +} + +func (extBeforeStartExtension) Name() string { return "ext-before-start" } +func (extBeforeStartExtension) Init(ipnext.Host) error { + return nil +} +func (extBeforeStartExtension) Shutdown() error { + return nil +} + +// TestGetExtBeforeStart verifies that an extension registered via +// RegisterExtension can be retrieved with GetExt before the host is started +// (via LocalBackend.Start) +func TestGetExtBeforeStart(t *testing.T) { + lb := newTestBackend(t) + // Now call GetExt without calling Start on the LocalBackend. + _, ok := GetExt[extBeforeStartExtension](lb) + if !ok { + t.Fatal("didn't find extension") + } +} + // checkMethodCallWithZeroArgs calls the method m on the receiver r // with zero values for all its arguments, except the receiver itself. // It returns the result of the method call, or fails the test if the call panics. @@ -1151,6 +1184,10 @@ type testExtension struct { var _ ipnext.Extension = (*testExtension)(nil) +// PermitDoubleRegister is a sentinel method whose existence tells the +// ExtensionHost to permit it to be registered multiple times. +func (*testExtension) PermitDoubleRegister() {} + func (e *testExtension) setT(t *testing.T) { e.t = t } From 3cc80cce6ac045c64a410ae19d86d8100b567a26 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 19 May 2025 19:14:08 -0700 Subject: [PATCH 0880/1708] wgengine/magicsock: introduce virtualNetworkID type (#16021) This type improves code clarity and reduces the chance of heap alloc as we pass it as a non-pointer. VNI being a 3-byte value enables us to track set vs unset via the reserved/unused byte. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- wgengine/magicsock/magicsock.go | 55 +++++++++++++++++++++++----- wgengine/magicsock/magicsock_test.go | 50 +++++++++++++++++++++++++ wgengine/magicsock/relaymanager.go | 7 ++-- 4 files changed, 100 insertions(+), 14 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 97cbe8753..867eebda6 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1112,7 +1112,7 @@ func (de *endpoint) sendDiscoPing(ep netip.AddrPort, discoKey key.DiscoPublic, t size = min(size, MaxDiscoPingSize) padding := max(size-discoPingSize, 0) - sent, _ := de.c.sendDiscoMessage(ep, nil, de.publicKey, discoKey, &disco.Ping{ + sent, _ := de.c.sendDiscoMessage(ep, virtualNetworkID{}, de.publicKey, discoKey, &disco.Ping{ TxID: [12]byte(txid), NodeKey: de.c.publicKeyAtomic.Load(), Padding: padding, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 05f4cf56d..61cdf4954 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1603,16 +1603,43 @@ const ( // speeds. var debugIPv4DiscoPingPenalty = envknob.RegisterDuration("TS_DISCO_PONG_IPV4_DELAY") +// virtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network +// identifier. Its field must only ever be accessed via its methods. +type virtualNetworkID struct { + _vni uint32 +} + +const ( + vniSetMask uint32 = 0xFF000000 + vniGetMask uint32 = ^vniSetMask +) + +// isSet returns true if set() had been called previously, otherwise false. +func (v *virtualNetworkID) isSet() bool { + return v._vni&vniSetMask != 0 +} + +// set sets the provided VNI. If VNI exceeds the 3-byte storage it will be +// clamped. +func (v *virtualNetworkID) set(vni uint32) { + v._vni = vni | vniSetMask +} + +// get returns the VNI value. +func (v *virtualNetworkID) get() uint32 { + return v._vni & vniGetMask +} + // sendDiscoMessage sends discovery message m to dstDisco at dst. // // If dst is a DERP IP:port, then dstKey must be non-zero. // -// If geneveVNI is non-nil, then the [disco.Message] will be preceded by a -// Geneve header with the supplied VNI set. +// If vni.isSet(), the [disco.Message] will be preceded by a Geneve header with +// the VNI field set to the value returned by vni.get(). // // The dstKey should only be non-zero if the dstDisco key // unambiguously maps to exactly one peer. -func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey key.NodePublic, dstDisco key.DiscoPublic, m disco.Message, logLevel discoLogLevel) (sent bool, err error) { +func (c *Conn) sendDiscoMessage(dst netip.AddrPort, vni virtualNetworkID, dstKey key.NodePublic, dstDisco key.DiscoPublic, m disco.Message, logLevel discoLogLevel) (sent bool, err error) { isDERP := dst.Addr() == tailcfg.DerpMagicIPAddr if _, isPong := m.(*disco.Pong); isPong && !isDERP && dst.Addr().Is4() { time.Sleep(debugIPv4DiscoPingPenalty()) @@ -1651,11 +1678,11 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, geneveVNI *uint32, dstKey ke c.mu.Unlock() pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. - if geneveVNI != nil { + if vni.isSet() { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: *geneveVNI, + VNI: vni.get(), Control: isRelayHandshakeMsg, } pkt = append(pkt, make([]byte, packet.GeneveFixedHeaderLength)...) @@ -1903,9 +1930,17 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke switch dm := dm.(type) { case *disco.Ping: metricRecvDiscoPing.Add(1) + if isGeneveEncap { + // TODO(jwhited): handle Geneve-encapsulated disco ping. + return + } c.handlePingLocked(dm, src, di, derpNodeSrc) case *disco.Pong: metricRecvDiscoPong.Add(1) + if isGeneveEncap { + // TODO(jwhited): handle Geneve-encapsulated disco pong. + return + } // There might be multiple nodes for the sender's DiscoKey. // Ask each to handle it, stopping once one reports that // the Pong's TxID was theirs. @@ -2020,12 +2055,12 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, di *discoInf isDerp := src.Addr() == tailcfg.DerpMagicIPAddr // If we can figure out with certainty which node key this disco - // message is for, eagerly update our IP<>node and disco<>node + // message is for, eagerly update our IP:port<>node and disco<>node // mappings to make p2p path discovery faster in simple // cases. Without this, disco would still work, but would be // reliant on DERP call-me-maybe to establish the disco<>node // mapping, and on subsequent disco handlePongConnLocked to establish - // the IP<>disco mapping. + // the IP:port<>disco mapping. if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { if !isDerp { c.peerMap.setNodeKeyForIPPort(src, nk) @@ -2086,7 +2121,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, di *discoInf ipDst := src discoDest := di.discoKey - go c.sendDiscoMessage(ipDst, nil, dstKey, discoDest, &disco.Pong{ + go c.sendDiscoMessage(ipDst, virtualNetworkID{}, dstKey, discoDest, &disco.Pong{ TxID: dm.TxID, Src: src, }, discoVerboseLog) @@ -2131,12 +2166,12 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { for _, ep := range c.lastEndpoints { eps = append(eps, ep.Addr) } - go de.c.sendDiscoMessage(derpAddr, nil, de.publicKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) + go de.c.sendDiscoMessage(derpAddr, virtualNetworkID{}, de.publicKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) if debugSendCallMeUnknownPeer() { // Send a callMeMaybe packet to a non-existent peer unknownKey := key.NewNode().Public() c.logf("magicsock: sending CallMeMaybe to unknown peer per TS_DEBUG_SEND_CALLME_UNKNOWN_PEER") - go de.c.sendDiscoMessage(derpAddr, nil, unknownKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) + go de.c.sendDiscoMessage(derpAddr, virtualNetworkID{}, unknownKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) } } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1a899ea22..ddbf3e394 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "math" "math/rand" "net" "net/http" @@ -3317,3 +3318,52 @@ func Test_isDiscoMaybeGeneve(t *testing.T) { }) } } + +func Test_virtualNetworkID(t *testing.T) { + tests := []struct { + name string + set *uint32 + want uint32 + }{ + { + "don't set", + nil, + 0, + }, + { + "set 0", + ptr.To(uint32(0)), + 0, + }, + { + "set 1", + ptr.To(uint32(1)), + 1, + }, + { + "set math.MaxUint32", + ptr.To(uint32(math.MaxUint32)), + 1<<24 - 1, + }, + { + "set max 3-byte value", + ptr.To(uint32(1<<24 - 1)), + 1<<24 - 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := virtualNetworkID{} + if tt.set != nil { + v.set(*tt.set) + } + if v.isSet() != (tt.set != nil) { + t.Fatalf("isSet: %v != wantIsSet: %v", v.isSet(), tt.set != nil) + } + if v.get() != tt.want { + t.Fatalf("get(): %v != want: %v", v.get(), tt.want) + } + }) + } +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index a63754371..0b19bb83f 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -16,7 +16,6 @@ import ( "tailscale.com/disco" udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" - "tailscale.com/types/ptr" "tailscale.com/util/httpm" "tailscale.com/util/set" ) @@ -500,10 +499,12 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { sentBindAny := false bind := &disco.BindUDPRelayEndpoint{} + vni := virtualNetworkID{} + vni.set(work.se.VNI) for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true - go work.ep.c.sendDiscoMessage(addrPort, ptr.To(work.se.VNI), key.NodePublic{}, work.se.ServerDisco, bind, discoLog) + go work.ep.c.sendDiscoMessage(addrPort, vni, key.NodePublic{}, work.se.ServerDisco, bind, discoLog) } } if !sentBindAny { @@ -552,7 +553,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { // [udprelay.ServerEndpoint] from becoming fully operational. // 4. This is a singular tx with no roundtrip latency measurements // involved. - work.ep.c.sendDiscoMessage(challenge.from, ptr.To(work.se.VNI), key.NodePublic{}, work.se.ServerDisco, answer, discoLog) + work.ep.c.sendDiscoMessage(challenge.from, vni, key.NodePublic{}, work.se.ServerDisco, answer, discoLog) return case <-timer.C: // The handshake timed out. From c4fb380f3c5f672ed11753c4771365ac3d43b14d Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 20 May 2025 11:30:45 +0100 Subject: [PATCH 0881/1708] cmd/k8s-operator: fix Tailscale Service API errors check (#16020) Updates tailscale/tailscale#15895 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 6 ++---- cmd/k8s-operator/svc-for-pg_test.go | 6 +----- cmd/k8s-operator/testutils_test.go | 4 ++-- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 729fb2a3b..9cdd9cba9 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -1105,13 +1105,11 @@ func isErrorFeatureFlagNotEnabled(err error) bool { // Tailscale control plane when a Tailscale Service API call is made for a // tailnet that does not have the Tailscale Services feature flag enabled. const messageFFNotEnabled = "feature unavailable for tailnet" - var errResp *tailscale.ErrResponse - ok := errors.As(err, &errResp) - return ok && strings.Contains(errResp.Message, messageFFNotEnabled) + return err != nil && strings.Contains(err.Error(), messageFFNotEnabled) } func isErrorTailscaleServiceNotFound(err error) bool { - var errResp *tailscale.ErrResponse + var errResp tailscale.ErrResponse ok := errors.As(err, &errResp) return ok && errResp.Status == http.StatusNotFound } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index ec94d536f..4bb633cb8 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -8,10 +8,8 @@ package main import ( "context" "encoding/json" - "errors" "fmt" "math/rand/v2" - "net/http" "net/netip" "testing" @@ -23,7 +21,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "tailscale.com/internal/client/tailscale" "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -108,8 +105,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { if err == nil { t.Fatalf("svc:default-%s not cleaned up", svc.Name) } - var errResp *tailscale.ErrResponse - if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { + if !isErrorTailscaleServiceNotFound(err) { t.Fatalf("unexpected error: %v", err) } } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 3d9bdbf9a..619aecc56 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -901,11 +901,11 @@ func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceNa c.Lock() defer c.Unlock() if c.vipServices == nil { - return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} + return nil, tailscale.ErrResponse{Status: http.StatusNotFound} } svc, ok := c.vipServices[name] if !ok { - return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} + return nil, tailscale.ErrResponse{Status: http.StatusNotFound} } return svc, nil } From 118206ab7909f0d716b9e2067ffbd987872f979f Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 20 May 2025 09:45:12 -0400 Subject: [PATCH 0882/1708] prober: update header check test (#15993) Use of the httptest client doesn't render header ordering as expected. Use http.DefaultClient for the test to ensure that the header ordering test is valid. Updates tailscale/corp#27370 Signed-off-by: Mike O'Driscoll --- prober/prober_test.go | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/prober/prober_test.go b/prober/prober_test.go index c90557eff..21c975a73 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "net/http" "net/http/httptest" "strings" "sync" @@ -586,30 +587,48 @@ func TestProberRunHandler(t *testing.T) { defer probe.Close() <-probe.stopped // wait for the first run. - w := httptest.NewRecorder() + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/run/", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/run/?name="+tt.name, nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } - req := httptest.NewRequest("GET", "/prober/run/?name="+tt.name, nil) if reqJSON { req.Header.Set("Accept", "application/json") } - tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{}).ServeHTTP(w, req) - if w.Result().StatusCode != tt.wantResponseCode { - t.Errorf("unexpected response code: got %d, want %d", w.Code, tt.wantResponseCode) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != tt.wantResponseCode { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tt.wantResponseCode) } if reqJSON { - if w.Header().Get("Content-Type") != "application/json" { - t.Errorf("unexpected content type: got %q, want application/json", w.Header().Get("Content-Type")) + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) } var gotJSON RunHandlerResponse - if err := json.Unmarshal(w.Body.Bytes(), &gotJSON); err != nil { - t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, w.Body.String()) + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) } if diff := cmp.Diff(tt.wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { t.Errorf("unexpected JSON response (-want +got):\n%s", diff) } } else { - body, _ := io.ReadAll(w.Result().Body) + body, _ := io.ReadAll(resp.Body) if !strings.Contains(string(body), tt.wantPlaintextResponse) { t.Errorf("unexpected response body: got %q, want to contain %q", body, tt.wantPlaintextResponse) } From 70b6e8ca98e4b468c7dcecfe101e722d51342b58 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 20 May 2025 08:46:37 -0700 Subject: [PATCH 0883/1708] wgengine/magicsock: fix outdated heartbeat comment (#16023) heartbeatInterval is currently 3s. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 867eebda6..3788708a8 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -820,7 +820,7 @@ func (de *endpoint) heartbeat() { udpAddr, _, _ := de.addrForSendLocked(now) if udpAddr.IsValid() { - // We have a preferred path. Ping that every 2 seconds. + // We have a preferred path. Ping that every 'heartbeatInterval'. de.startDiscoPingLocked(udpAddr, now, pingHeartbeat, 0, nil) } From 5a8b99e977e32eeb115bdcf7efc882e69c5633b2 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Tue, 20 May 2025 15:30:19 -0700 Subject: [PATCH 0884/1708] ipn,ipnlocal,taildrop: use SAF for Android files (#15976) Create FileOps for calling platform-specific file operations such as SAF APIs in Taildrop Update taildrop.PutFile to support both traditional and SAF modes Updates tailscale/tailscale#15263 Signed-off-by: kari-ts --- feature/taildrop/ext.go | 34 ++++ feature/taildrop/paths.go | 6 + feature/taildrop/send.go | 283 ++++++++++++++++++++++++---------- feature/taildrop/send_test.go | 128 +++++++++++++++ feature/taildrop/taildrop.go | 4 + 5 files changed, 371 insertions(+), 84 deletions(-) create mode 100644 feature/taildrop/send_test.go diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index ed26996fe..c11fe3af4 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -73,6 +73,10 @@ type Extension struct { // *.partial file to its final name on completion. directFileRoot string + // FileOps abstracts platform-specific file operations needed for file transfers. + // This is currently being used for Android to use the Storage Access Framework. + FileOps FileOps + nodeBackendForTest ipnext.NodeBackend // if non-nil, pretend we're this node state for tests mu sync.Mutex // Lock order: lb.mu > e.mu @@ -85,6 +89,30 @@ type Extension struct { outgoingFiles map[string]*ipn.OutgoingFile } +// safDirectoryPrefix is used to determine if the directory is managed via SAF. +const SafDirectoryPrefix = "content://" + +// PutMode controls how Manager.PutFile writes files to storage. +// +// PutModeDirect – write files directly to a filesystem path (default). +// PutModeAndroidSAF – use Android’s Storage Access Framework (SAF), where +// the OS manages the underlying directory permissions. +type PutMode int + +const ( + PutModeDirect PutMode = iota + PutModeAndroidSAF +) + +// FileOps defines platform-specific file operations. +type FileOps interface { + OpenFileWriter(filename string) (io.WriteCloser, string, error) + + // RenamePartialFile finalizes a partial file. + // It returns the new SAF URI as a string and an error. + RenamePartialFile(partialUri, targetDirUri, targetName string) (string, error) +} + func (e *Extension) Name() string { return "taildrop" } @@ -153,12 +181,18 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie if fileRoot == "" { e.logf("no Taildrop directory configured") } + mode := PutModeDirect + if e.directFileRoot != "" && strings.HasPrefix(e.directFileRoot, SafDirectoryPrefix) { + mode = PutModeAndroidSAF + } e.setMgrLocked(managerOptions{ Logf: e.logf, Clock: tstime.DefaultClock{Clock: e.sb.Clock()}, State: e.stateStore, Dir: fileRoot, DirectFileMode: isDirectFileMode, + FileOps: e.FileOps, + Mode: mode, SendFileNotify: e.sendFileNotify, }.New()) } diff --git a/feature/taildrop/paths.go b/feature/taildrop/paths.go index 1129fbcfa..22d01160c 100644 --- a/feature/taildrop/paths.go +++ b/feature/taildrop/paths.go @@ -18,6 +18,12 @@ func (e *Extension) SetDirectFileRoot(root string) { e.directFileRoot = root } +// SetFileOps sets the platform specific file operations. This is used +// to call Android's Storage Access Framework APIs. +func (e *Extension) SetFileOps(fileOps FileOps) { + e.FileOps = fileOps +} + func (e *Extension) setPlatformDefaultDirectFileRoot() { dg := distro.Get() diff --git a/feature/taildrop/send.go b/feature/taildrop/send.go index 98c3934bb..59a1701da 100644 --- a/feature/taildrop/send.go +++ b/feature/taildrop/send.go @@ -5,7 +5,7 @@ package taildrop import ( "crypto/sha256" - "errors" + "fmt" "io" "os" "path/filepath" @@ -82,126 +82,215 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len case distro.Get() == distro.Unraid && !m.opts.DirectFileMode: return 0, ErrNotAccessible } - dstPath, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return 0, err - } - redactAndLogError := func(action string, err error) error { - err = redactError(err) - m.opts.Logf("put %v error: %v", action, err) - return err + //Compute dstPath & avoid mid‑upload deletion + var dstPath string + if m.opts.Mode == PutModeDirect { + var err error + dstPath, err = joinDir(m.opts.Dir, baseName) + if err != nil { + return 0, err + } + } else { + // In SAF mode, we simply use the baseName as the destination "path" + // (the actual directory is managed by SAF). + dstPath = baseName } + m.deleter.Remove(filepath.Base(dstPath)) // avoid deleting the partial file while receiving // Check whether there is an in-progress transfer for the file. - partialPath := dstPath + id.partialSuffix() - inFileKey := incomingFileKey{id, baseName} - inFile, loaded := m.incomingFiles.LoadOrInit(inFileKey, func() *incomingFile { - inFile := &incomingFile{ + partialFileKey := incomingFileKey{id, baseName} + inFile, loaded := m.incomingFiles.LoadOrInit(partialFileKey, func() *incomingFile { + return &incomingFile{ clock: m.opts.Clock, started: m.opts.Clock.Now(), size: length, sendFileNotify: m.opts.SendFileNotify, } - if m.opts.DirectFileMode { - inFile.partialPath = partialPath - inFile.finalPath = dstPath - } - return inFile }) if loaded { return 0, ErrFileExists } - defer m.incomingFiles.Delete(inFileKey) - m.deleter.Remove(filepath.Base(partialPath)) // avoid deleting the partial file while receiving + defer m.incomingFiles.Delete(partialFileKey) - // Create (if not already) the partial file with read-write permissions. - f, err := os.OpenFile(partialPath, os.O_CREATE|os.O_RDWR, 0666) + // Open writer & populate inFile paths + wc, partialPath, err := m.openWriterAndPaths(id, m.opts.Mode, inFile, baseName, dstPath, offset) if err != nil { - return 0, redactAndLogError("Create", err) + return 0, m.redactAndLogError("Create", err) } defer func() { - f.Close() // best-effort to cleanup dangling file handles + wc.Close() if err != nil { m.deleter.Insert(filepath.Base(partialPath)) // mark partial file for eventual deletion } }() - inFile.w = f // Record that we have started to receive at least one file. // This is used by the deleter upon a cold-start to scan the directory // for any files that need to be deleted. - if m.opts.State != nil { - if b, _ := m.opts.State.ReadState(ipn.TaildropReceivedKey); len(b) == 0 { - if err := m.opts.State.WriteState(ipn.TaildropReceivedKey, []byte{1}); err != nil { - m.opts.Logf("WriteState error: %v", err) // non-fatal error + if st := m.opts.State; st != nil { + if b, _ := st.ReadState(ipn.TaildropReceivedKey); len(b) == 0 { + if werr := st.WriteState(ipn.TaildropReceivedKey, []byte{1}); werr != nil { + m.opts.Logf("WriteState error: %v", werr) // non-fatal error } } } - // A positive offset implies that we are resuming an existing file. - // Seek to the appropriate offset and truncate the file. - if offset != 0 { - currLength, err := f.Seek(0, io.SeekEnd) - if err != nil { - return 0, redactAndLogError("Seek", err) - } - if offset < 0 || offset > currLength { - return 0, redactAndLogError("Seek", err) - } - if _, err := f.Seek(offset, io.SeekStart); err != nil { - return 0, redactAndLogError("Seek", err) - } - if err := f.Truncate(offset); err != nil { - return 0, redactAndLogError("Truncate", err) - } - } - - // Copy the contents of the file. - copyLength, err := io.Copy(inFile, r) + // Copy the contents of the file to the writer. + copyLength, err := io.Copy(wc, r) if err != nil { - return 0, redactAndLogError("Copy", err) + return 0, m.redactAndLogError("Copy", err) } if length >= 0 && copyLength != length { - return 0, redactAndLogError("Copy", errors.New("copied an unexpected number of bytes")) + return 0, m.redactAndLogError("Copy", fmt.Errorf("copied %d bytes; expected %d", copyLength, length)) } - if err := f.Close(); err != nil { - return 0, redactAndLogError("Close", err) + if err := wc.Close(); err != nil { + return 0, m.redactAndLogError("Close", err) } + fileLength := offset + copyLength inFile.mu.Lock() inFile.done = true inFile.mu.Unlock() - // File has been successfully received, rename the partial file - // to the final destination filename. If a file of that name already exists, - // then try multiple times with variations of the filename. - computePartialSum := sync.OnceValues(func() ([sha256.Size]byte, error) { - return sha256File(partialPath) - }) - maxRetries := 10 - for ; maxRetries > 0; maxRetries-- { + // Finalize rename + switch m.opts.Mode { + case PutModeDirect: + var finalDst string + finalDst, err = m.finalizeDirect(inFile, partialPath, dstPath, fileLength) + if err != nil { + return 0, m.redactAndLogError("Rename", err) + } + inFile.finalPath = finalDst + + case PutModeAndroidSAF: + if err = m.finalizeSAF(partialPath, baseName); err != nil { + return 0, m.redactAndLogError("Rename", err) + } + } + + m.totalReceived.Add(1) + m.opts.SendFileNotify() + return fileLength, nil +} + +// openWriterAndPaths opens the correct writer, seeks/truncates if needed, +// and sets inFile.partialPath & inFile.finalPath for later cleanup/rename. +// The caller is responsible for closing the file on completion. +func (m *manager) openWriterAndPaths( + id clientID, + mode PutMode, + inFile *incomingFile, + baseName string, + dstPath string, + offset int64, +) (wc io.WriteCloser, partialPath string, err error) { + switch mode { + + case PutModeDirect: + partialPath = dstPath + id.partialSuffix() + f, err := os.OpenFile(partialPath, os.O_CREATE|os.O_RDWR, 0o666) + if err != nil { + return nil, "", m.redactAndLogError("Create", err) + } + if offset != 0 { + curr, err := f.Seek(0, io.SeekEnd) + if err != nil { + f.Close() + return nil, "", m.redactAndLogError("Seek", err) + } + if offset < 0 || offset > curr { + f.Close() + return nil, "", m.redactAndLogError("Seek", fmt.Errorf("offset %d out of range", offset)) + } + if _, err := f.Seek(offset, io.SeekStart); err != nil { + f.Close() + return nil, "", m.redactAndLogError("Seek", err) + } + if err := f.Truncate(offset); err != nil { + f.Close() + return nil, "", m.redactAndLogError("Truncate", err) + } + } + inFile.w = f + wc = f + inFile.partialPath = partialPath + inFile.finalPath = dstPath + return wc, partialPath, nil + + case PutModeAndroidSAF: + if m.opts.FileOps == nil { + return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("missing FileOps")) + } + writer, uri, err := m.opts.FileOps.OpenFileWriter(baseName) + if err != nil { + return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("failed to open file for writing via SAF")) + } + if writer == nil || uri == "" { + return nil, "", fmt.Errorf("invalid SAF writer or URI") + } + // SAF mode does not support resuming, so enforce offset == 0. + if offset != 0 { + writer.Close() + return nil, "", m.redactAndLogError("Seek", fmt.Errorf("resuming is not supported in SAF mode")) + } + inFile.w = writer + wc = writer + partialPath = uri + inFile.partialPath = uri + inFile.finalPath = baseName + return wc, partialPath, nil + + default: + return nil, "", fmt.Errorf("unsupported PutMode: %v", mode) + } +} + +// finalizeDirect atomically renames or dedups the partial file, retrying +// under new names up to 10 times. It returns the final path that succeeded. +func (m *manager) finalizeDirect( + inFile *incomingFile, + partialPath string, + initialDst string, + fileLength int64, +) (string, error) { + var ( + once sync.Once + cachedSum [sha256.Size]byte + cacheErr error + computeSum = func() ([sha256.Size]byte, error) { + once.Do(func() { cachedSum, cacheErr = sha256File(partialPath) }) + return cachedSum, cacheErr + } + ) + + dstPath := initialDst + const maxRetries = 10 + for i := 0; i < maxRetries; i++ { // Atomically rename the partial file as the destination file if it doesn't exist. // Otherwise, it returns the length of the current destination file. // The operation is atomic. - dstLength, err := func() (int64, error) { + lengthOnDisk, err := func() (int64, error) { m.renameMu.Lock() defer m.renameMu.Unlock() - switch fi, err := os.Stat(dstPath); { - case os.IsNotExist(err): + fi, statErr := os.Stat(dstPath) + if os.IsNotExist(statErr) { + // dst missing → rename partial into place return -1, os.Rename(partialPath, dstPath) - case err != nil: - return -1, err - default: - return fi.Size(), nil } + if statErr != nil { + return -1, statErr + } + return fi.Size(), nil }() if err != nil { - return 0, redactAndLogError("Rename", err) + return "", err } - if dstLength < 0 { - break // we successfully renamed; so stop + if lengthOnDisk < 0 { + // successfully moved + inFile.finalPath = dstPath + return dstPath, nil } // Avoid the final rename if a destination file has the same contents. @@ -209,33 +298,59 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len // Note: this is best effort and copying files from iOS from the Media Library // results in processing on the iOS side which means the size and shas of the // same file can be different. - if dstLength == fileLength { - partialSum, err := computePartialSum() + if lengthOnDisk == fileLength { + partSum, err := computeSum() if err != nil { - return 0, redactAndLogError("Rename", err) + return "", err } dstSum, err := sha256File(dstPath) if err != nil { - return 0, redactAndLogError("Rename", err) + return "", err } - if dstSum == partialSum { + if partSum == dstSum { + // same content → drop the partial if err := os.Remove(partialPath); err != nil { - return 0, redactAndLogError("Remove", err) + return "", err } - break // we successfully found a content match; so stop + inFile.finalPath = dstPath + return dstPath, nil } } // Choose a new destination filename and try again. dstPath = nextFilename(dstPath) - inFile.finalPath = dstPath } - if maxRetries <= 0 { - return 0, errors.New("too many retries trying to rename partial file") + + return "", fmt.Errorf("too many retries trying to rename a partial file %q", initialDst) +} + +// finalizeSAF retries RenamePartialFile up to 10 times, generating a new +// name on each failure until the SAF URI changes. +func (m *manager) finalizeSAF( + partialPath, finalName string, +) error { + if m.opts.FileOps == nil { + return fmt.Errorf("missing FileOps for SAF finalize") } - m.totalReceived.Add(1) - m.opts.SendFileNotify() - return fileLength, nil + const maxTries = 10 + name := finalName + for i := 0; i < maxTries; i++ { + newURI, err := m.opts.FileOps.RenamePartialFile(partialPath, m.opts.Dir, name) + if err != nil { + return err + } + if newURI != "" && newURI != name { + return nil + } + name = nextFilename(name) + } + return fmt.Errorf("failed to finalize SAF file after %d retries", maxTries) +} + +func (m *manager) redactAndLogError(stage string, err error) error { + err = redactError(err) + m.opts.Logf("put %s error: %v", stage, err) + return err } func sha256File(file string) (out [sha256.Size]byte, err error) { diff --git a/feature/taildrop/send_test.go b/feature/taildrop/send_test.go new file mode 100644 index 000000000..8edb70417 --- /dev/null +++ b/feature/taildrop/send_test.go @@ -0,0 +1,128 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "tailscale.com/tstime" +) + +// nopWriteCloser is a no-op io.WriteCloser wrapping a bytes.Buffer. +type nopWriteCloser struct{ *bytes.Buffer } + +func (nwc nopWriteCloser) Close() error { return nil } + +// mockFileOps implements just enough of the FileOps interface for SAF tests. +type mockFileOps struct { + writes *bytes.Buffer + renameOK bool +} + +func (m *mockFileOps) OpenFileWriter(name string) (io.WriteCloser, string, error) { + m.writes = new(bytes.Buffer) + return nopWriteCloser{m.writes}, "uri://" + name + ".partial", nil +} + +func (m *mockFileOps) RenamePartialFile(partialPath, dir, finalName string) (string, error) { + if !m.renameOK { + m.renameOK = true + return "uri://" + finalName, nil + } + return "", io.ErrUnexpectedEOF +} + +func TestPutFile(t *testing.T) { + const content = "hello, world" + + tests := []struct { + name string + mode PutMode + setup func(t *testing.T) (*manager, string, *mockFileOps) + wantFile string + }{ + { + name: "PutModeDirect", + mode: PutModeDirect, + setup: func(t *testing.T) (*manager, string, *mockFileOps) { + dir := t.TempDir() + opts := managerOptions{ + Logf: t.Logf, + Clock: tstime.DefaultClock{}, + State: nil, + Dir: dir, + Mode: PutModeDirect, + DirectFileMode: true, + SendFileNotify: func() {}, + } + mgr := opts.New() + return mgr, dir, nil + }, + wantFile: "file.txt", + }, + { + name: "PutModeAndroidSAF", + mode: PutModeAndroidSAF, + setup: func(t *testing.T) (*manager, string, *mockFileOps) { + // SAF still needs a non-empty Dir to pass the guard. + dir := t.TempDir() + mops := &mockFileOps{} + opts := managerOptions{ + Logf: t.Logf, + Clock: tstime.DefaultClock{}, + State: nil, + Dir: dir, + Mode: PutModeAndroidSAF, + FileOps: mops, + DirectFileMode: true, + SendFileNotify: func() {}, + } + mgr := opts.New() + return mgr, dir, mops + }, + wantFile: "file.txt", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mgr, dir, mops := tc.setup(t) + id := clientID(fmt.Sprint(0)) + reader := bytes.NewReader([]byte(content)) + + n, err := mgr.PutFile(id, "file.txt", reader, 0, int64(len(content))) + if err != nil { + t.Fatalf("PutFile(%s) error: %v", tc.name, err) + } + if n != int64(len(content)) { + t.Errorf("wrote %d bytes; want %d", n, len(content)) + } + + switch tc.mode { + case PutModeDirect: + path := filepath.Join(dir, tc.wantFile) + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("ReadFile error: %v", err) + } + if got := string(data); got != content { + t.Errorf("file contents = %q; want %q", got, content) + } + + case PutModeAndroidSAF: + if mops.writes == nil { + t.Fatal("SAF writer was never created") + } + if got := mops.writes.String(); got != content { + t.Errorf("SAF writes = %q; want %q", got, content) + } + } + }) + } +} diff --git a/feature/taildrop/taildrop.go b/feature/taildrop/taildrop.go index 2e5c94861..2dfa415bb 100644 --- a/feature/taildrop/taildrop.go +++ b/feature/taildrop/taildrop.go @@ -91,6 +91,10 @@ type managerOptions struct { // copy them out, and then delete them. DirectFileMode bool + FileOps FileOps + + Mode PutMode + // SendFileNotify is called periodically while a file is actively // receiving the contents for the file. There is a final call // to the function when reception completes. From 0bab16448e0f23d61a975da16952a57184af5695 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 21 May 2025 08:47:23 -0700 Subject: [PATCH 0885/1708] ipn/store: remove a layer of indirection for registering stores (#15986) Registering a new store is cheap, it just adds a map entry. No need to lazy-init it with sync.Once and an intermediate slice holding init functions. Updates #cleanup Signed-off-by: Andrew Lytvynov --- ipn/store/store_aws.go | 4 ---- ipn/store/store_kube.go | 4 ---- ipn/store/stores.go | 11 +---------- ipn/store/stores_test.go | 6 +++--- 4 files changed, 4 insertions(+), 21 deletions(-) diff --git a/ipn/store/store_aws.go b/ipn/store/store_aws.go index 4f6c5a6e7..834b657d3 100644 --- a/ipn/store/store_aws.go +++ b/ipn/store/store_aws.go @@ -12,10 +12,6 @@ import ( ) func init() { - registerAvailableExternalStores = append(registerAvailableExternalStores, registerAWSStore) -} - -func registerAWSStore() { Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) if err != nil { diff --git a/ipn/store/store_kube.go b/ipn/store/store_kube.go index 01ee2870f..7eac75c19 100644 --- a/ipn/store/store_kube.go +++ b/ipn/store/store_kube.go @@ -14,10 +14,6 @@ import ( ) func init() { - registerAvailableExternalStores = append(registerAvailableExternalStores, registerKubeStore) -} - -func registerKubeStore() { Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { secretName := strings.TrimPrefix(path, "kube:") return kubestore.New(logf, secretName) diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 1a87fc548..1f98891bf 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -26,16 +26,8 @@ import ( // The arg is of the form "prefix:rest", where prefix was previously registered with Register. type Provider func(logf logger.Logf, arg string) (ipn.StateStore, error) -var regOnce sync.Once - -var registerAvailableExternalStores []func() - -func registerDefaultStores() { +func init() { Register("mem:", mem.New) - - for _, f := range registerAvailableExternalStores { - f() - } } var knownStores map[string]Provider @@ -55,7 +47,6 @@ var knownStores map[string]Provider // the suffix is a Kubernetes secret name // - In all other cases, the path is treated as a filepath. func New(logf logger.Logf, path string) (ipn.StateStore, error) { - regOnce.Do(registerDefaultStores) for prefix, sf := range knownStores { if strings.HasPrefix(path, prefix) { // We can't strip the prefix here as some NewStoreFunc (like arn:) diff --git a/ipn/store/stores_test.go b/ipn/store/stores_test.go index ea09e6ea6..1f0fc0fef 100644 --- a/ipn/store/stores_test.go +++ b/ipn/store/stores_test.go @@ -4,6 +4,7 @@ package store import ( + "maps" "path/filepath" "testing" @@ -14,10 +15,9 @@ import ( ) func TestNewStore(t *testing.T) { - regOnce.Do(registerDefaultStores) + oldKnownStores := maps.Clone(knownStores) t.Cleanup(func() { - knownStores = map[string]Provider{} - registerDefaultStores() + knownStores = oldKnownStores }) knownStores = map[string]Provider{} From 980ab4244dd7eab399fcda8f258621b324b5924d Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 21 May 2025 15:27:32 -0400 Subject: [PATCH 0886/1708] VERSION.txt: this is v1.85.0 (#16042) Signed-off-by: Jonathan Nobels --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 6b4de0a42..f288d1114 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.83.0 +1.85.0 From aa8bc23c496821dfa00771c9604fc4a71ead7d4c Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Thu, 22 May 2025 13:40:32 +0100 Subject: [PATCH 0887/1708] control/controlclient,health,tailcfg: refactor control health messages (#15839) * control/controlclient,health,tailcfg: refactor control health messages Updates tailscale/corp#27759 Signed-off-by: James Sanderson Signed-off-by: Paul Scott <408401+icio@users.noreply.github.com> Co-authored-by: Paul Scott <408401+icio@users.noreply.github.com> --- control/controlclient/auto.go | 7 +- control/controlclient/direct.go | 4 +- control/controlclient/map.go | 22 +++- control/controlclient/map_test.go | 42 ++++--- health/health.go | 191 ++++++++++++++++++++---------- health/health_test.go | 165 ++++++++++++++++++++++---- health/state.go | 28 ++++- health/warnings.go | 10 -- ipn/ipnlocal/local.go | 16 ++- tailcfg/tailcfg.go | 52 +++++++- tailcfg/tailcfg_test.go | 76 ++++++++++++ types/netmap/netmap.go | 4 +- 12 files changed, 495 insertions(+), 122 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index e0168c19d..e6335e54d 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -12,6 +12,7 @@ import ( "sync/atomic" "time" + "tailscale.com/health" "tailscale.com/logtail/backoff" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" @@ -198,7 +199,11 @@ func NewNoStart(opts Options) (_ *Auto, err error) { c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(direct.ReportHealthChange) + c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(func(c health.Change) { + if c.WarnableChanged { + direct.ReportWarnableChange(c.Warnable, c.UnhealthyState) + } + }) return c, nil } diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ac799e2d9..2d6dc6e36 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1623,9 +1623,9 @@ func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailc return nil } -// ReportHealthChange reports to the control plane a change to this node's +// ReportWarnableChange reports to the control plane a change to this node's // health. w must be non-nil. us can be nil to indicate a healthy state for w. -func (c *Direct) ReportHealthChange(w *health.Warnable, us *health.UnhealthyState) { +func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthyState) { if w == health.NetworkStatusWarnable || w == health.IPNStateWarnable || w == health.LoginStateWarnable { // We don't report these. These include things like the network is down // (in which case we can't report anyway) or the user wanted things diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 3173040fe..abfc5eb17 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -6,7 +6,10 @@ package controlclient import ( "cmp" "context" + "crypto/sha256" + "encoding/hex" "encoding/json" + "io" "maps" "net" "reflect" @@ -828,6 +831,16 @@ func (ms *mapSession) sortedPeers() []tailcfg.NodeView { func (ms *mapSession) netmap() *netmap.NetworkMap { peerViews := ms.sortedPeers() + // Convert all ms.lastHealth to the new [netmap.NetworkMap.DisplayMessages]. + var msgs map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + for _, h := range ms.lastHealth { + mak.Set(&msgs, tailcfg.DisplayMessageID("control-health-"+strhash(h)), tailcfg.DisplayMessage{ + Title: "Coordination server reports an issue", + Severity: tailcfg.SeverityMedium, + Text: "The coordination server is reporting a health issue: " + h, + }) + } + nm := &netmap.NetworkMap{ NodeKey: ms.publicNodeKey, PrivateKey: ms.privateNodeKey, @@ -842,7 +855,7 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { SSHPolicy: ms.lastSSHPolicy, CollectServices: ms.collectServices, DERPMap: ms.lastDERPMap, - ControlHealth: ms.lastHealth, + DisplayMessages: msgs, TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled, } @@ -868,5 +881,12 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { if DevKnob.ForceProxyDNS() { nm.DNS.Proxied = true } + return nm } + +func strhash(h string) string { + s := sha256.New() + io.WriteString(s, h) + return hex.EncodeToString(s.Sum(nil)) +} diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index ccc57ae2b..9abaae923 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "maps" "net/netip" "reflect" "strings" @@ -1148,23 +1149,36 @@ func TestNetmapHealthIntegration(t *testing.T) { ht.GotStreamedMapResponse() nm := ms.netmapForResponse(&tailcfg.MapResponse{ - Health: []string{"Test message"}, + Health: []string{ + "Test message", + "Another message", + }, }) - ht.SetControlHealth(nm.ControlHealth) - - state := ht.CurrentState() - warning, ok := state.Warnings["control-health"] + ht.SetControlHealth(nm.DisplayMessages) - if !ok { - t.Fatal("no warning found in current state with code 'control-health'") - } - if got, want := warning.Title, "Coordination server reports an issue"; got != want { - t.Errorf("warning.Title = %q, want %q", got, want) + want := map[health.WarnableCode]health.UnhealthyState{ + "control-health-c0719e9a8d5d838d861dc6f675c899d2b309a3a65bb9fe6b11e5afcbf9a2c0b1": { + WarnableCode: "control-health-c0719e9a8d5d838d861dc6f675c899d2b309a3a65bb9fe6b11e5afcbf9a2c0b1", + Title: "Coordination server reports an issue", + Severity: health.SeverityMedium, + Text: "The coordination server is reporting a health issue: Test message", + }, + "control-health-1dc7017a73a3c55c0d6a8423e3813c7ab6562d9d3064c2ec6ac7822f61b1db9c": { + WarnableCode: "control-health-1dc7017a73a3c55c0d6a8423e3813c7ab6562d9d3064c2ec6ac7822f61b1db9c", + Title: "Coordination server reports an issue", + Severity: health.SeverityMedium, + Text: "The coordination server is reporting a health issue: Another message", + }, } - if got, want := warning.Severity, health.SeverityMedium; got != want { - t.Errorf("warning.Severity = %s, want %s", got, want) + + got := maps.Clone(ht.CurrentState().Warnings) + for k := range got { + if !strings.HasPrefix(string(k), "control-health") { + delete(got, k) + } } - if got, want := warning.Text, "The coordination server is reporting an health issue: Test message"; got != want { - t.Errorf("warning.Text = %q, want %q", got, want) + + if d := cmp.Diff(want, got); d != "" { + t.Fatalf("CurrentStatus().Warnings[\"control-health*\"] different than expected (-want +got)\n%s", d) } } diff --git a/health/health.go b/health/health.go index 1ec2bcc9b..6dbbf782c 100644 --- a/health/health.go +++ b/health/health.go @@ -88,34 +88,35 @@ type Tracker struct { // sysErr maps subsystems to their current error (or nil if the subsystem is healthy) // Deprecated: using Warnables should be preferred sysErr map[Subsystem]error - watchers set.HandleSet[func(*Warnable, *UnhealthyState)] // opt func to run if error state changes + watchers set.HandleSet[func(Change)] // opt func to run if error state changes timer tstime.TimerController latestVersion *tailcfg.ClientVersion // or nil checkForUpdates bool applyUpdates opt.Bool - inMapPoll bool - inMapPollSince time.Time - lastMapPollEndedAt time.Time - lastStreamedMapResponse time.Time - lastNoiseDial time.Time - derpHomeRegion int - derpHomeless bool - derpRegionConnected map[int]bool - derpRegionHealthProblem map[int]string - derpRegionLastFrame map[int]time.Time - derpMap *tailcfg.DERPMap // last DERP map from control, could be nil if never received one - lastMapRequestHeard time.Time // time we got a 200 from control for a MapRequest - ipnState string - ipnWantRunning bool - ipnWantRunningLastTrue time.Time // when ipnWantRunning last changed false -> true - anyInterfaceUp opt.Bool // empty means unknown (assume true) - controlHealth []string - lastLoginErr error - localLogConfigErr error - tlsConnectionErrors map[string]error // map[ServerName]error - metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] + inMapPoll bool + inMapPollSince time.Time + lastMapPollEndedAt time.Time + lastStreamedMapResponse time.Time + lastNoiseDial time.Time + derpHomeRegion int + derpHomeless bool + derpRegionConnected map[int]bool + derpRegionHealthProblem map[int]string + derpRegionLastFrame map[int]time.Time + derpMap *tailcfg.DERPMap // last DERP map from control, could be nil if never received one + lastMapRequestHeard time.Time // time we got a 200 from control for a MapRequest + ipnState string + ipnWantRunning bool + ipnWantRunningLastTrue time.Time // when ipnWantRunning last changed false -> true + anyInterfaceUp opt.Bool // empty means unknown (assume true) + lastNotifiedControlMessages map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage // latest control messages processed, kept for change detection + controlMessages map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage // latest control messages received + lastLoginErr error + localLogConfigErr error + tlsConnectionErrors map[string]error // map[ServerName]error + metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] } func (t *Tracker) now() time.Time { @@ -207,13 +208,15 @@ func unregister(w *Warnable) { // the program. type WarnableCode string -// A Warnable is something that we might want to warn the user about, or not. A Warnable is either -// in an healthy or unhealth state. A Warnable is unhealthy if the Tracker knows about a WarningState -// affecting the Warnable. -// In most cases, Warnables are components of the backend (for instance, "DNS" or "Magicsock"). -// Warnables are similar to the Subsystem type previously used in this package, but they provide -// a unique identifying code for each Warnable, along with more metadata that makes it easier for -// a GUI to display the Warnable in a user-friendly way. +// A Warnable is something that we might want to warn the user about, or not. A +// Warnable is either in a healthy or unhealthy state. A Warnable is unhealthy if +// the Tracker knows about a WarningState affecting the Warnable. +// +// In most cases, Warnables are components of the backend (for instance, "DNS" +// or "Magicsock"). Warnables are similar to the Subsystem type previously used +// in this package, but they provide a unique identifying code for each +// Warnable, along with more metadata that makes it easier for a GUI to display +// the Warnable in a user-friendly way. type Warnable struct { // Code is a string that uniquely identifies this Warnable across the entire Tailscale backend, // and can be mapped to a user-displayable localized string. @@ -409,12 +412,18 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { prevWs := t.warnableVal[w] mak.Set(&t.warnableVal, w, ws) if !ws.Equal(prevWs) { + + change := Change{ + WarnableChanged: true, + Warnable: w, + UnhealthyState: w.unhealthyState(ws), + } for _, cb := range t.watchers { // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable // becomes visible. if w.IsVisible(ws, t.now) { - cb(w, w.unhealthyState(ws)) + cb(change) continue } @@ -427,7 +436,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // Check if the Warnable is still unhealthy, as it could have become healthy between the time // the timer was set for and the time it was executed. if t.warnableVal[w] != nil { - cb(w, w.unhealthyState(ws)) + cb(change) delete(t.pendingVisibleTimers, w) } }) @@ -460,8 +469,23 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { delete(t.pendingVisibleTimers, w) } + change := Change{ + WarnableChanged: true, + Warnable: w, + } for _, cb := range t.watchers { - cb(w, nil) + cb(change) + } +} + +// notifyWatchersControlChangedLocked calls each watcher to signal that control +// health messages have changed (and should be fetched via CurrentState). +func (t *Tracker) notifyWatchersControlChangedLocked() { + change := Change{ + ControlHealthChanged: true, + } + for _, cb := range t.watchers { + cb(change) } } @@ -488,23 +512,57 @@ func (t *Tracker) AppendWarnableDebugFlags(base []string) []string { return ret } -// RegisterWatcher adds a function that will be called whenever the health state of any Warnable changes. -// If a Warnable becomes unhealthy or its unhealthy state is updated, the callback will be called with its -// current Representation. -// If a Warnable becomes healthy, the callback will be called with ws set to nil. -// The provided callback function will be executed in its own goroutine. The returned function can be used -// to unregister the callback. -func (t *Tracker) RegisterWatcher(cb func(w *Warnable, r *UnhealthyState)) (unregister func()) { - return t.registerSyncWatcher(func(w *Warnable, r *UnhealthyState) { - go cb(w, r) +// Change is used to communicate a change to health. This could either be due to +// a Warnable changing from health to unhealthy (or vice-versa), or because the +// health messages received from the control-plane have changed. +// +// Exactly one *Changed field will be true. +type Change struct { + // ControlHealthChanged indicates it was health messages from the + // control-plane server that changed. + ControlHealthChanged bool + + // WarnableChanged indicates it was a client Warnable which changed state. + WarnableChanged bool + // Warnable is whose health changed, as indicated in UnhealthyState. + Warnable *Warnable + // UnhealthyState is set if the changed Warnable is now unhealthy, or nil + // if Warnable is now healthy. + UnhealthyState *UnhealthyState +} + +// RegisterWatcher adds a function that will be called its own goroutine +// whenever the health state of any client [Warnable] or control-plane health +// messages changes. The returned function can be used to unregister the +// callback. +// +// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, +// the callback will be called with WarnableChanged set to true and the Warnable +// and its UnhealthyState: +// +// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: us}) +// +// If a Warnable becomes healthy, the callback will be called with +// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: +// +// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil}) +// +// If the health messages from the control-plane change, the callback will be +// called with ControlHealthChanged set to true. Recipients can fetch the set of +// control-plane health messages by calling [Tracker.CurrentState]: +// +// go cb(Change{ControlHealthChanged: true}) +func (t *Tracker) RegisterWatcher(cb func(Change)) (unregister func()) { + return t.registerSyncWatcher(func(c Change) { + go cb(c) }) } // registerSyncWatcher adds a function that will be called whenever the health -// state of any Warnable changes. The provided callback function will be -// executed synchronously. Call RegisterWatcher to register any callbacks that -// won't return from execution immediately. -func (t *Tracker) registerSyncWatcher(cb func(w *Warnable, r *UnhealthyState)) (unregister func()) { +// state changes. The provided callback function will be executed synchronously. +// Call RegisterWatcher to register any callbacks that won't return from +// execution immediately. +func (t *Tracker) registerSyncWatcher(cb func(c Change)) (unregister func()) { if t.nil() { return func() {} } @@ -512,7 +570,7 @@ func (t *Tracker) registerSyncWatcher(cb func(w *Warnable, r *UnhealthyState)) ( t.mu.Lock() defer t.mu.Unlock() if t.watchers == nil { - t.watchers = set.HandleSet[func(*Warnable, *UnhealthyState)]{} + t.watchers = set.HandleSet[func(Change)]{} } handle := t.watchers.Add(cb) if t.timer == nil { @@ -659,13 +717,15 @@ func (t *Tracker) updateLegacyErrorWarnableLocked(key Subsystem, err error) { } } -func (t *Tracker) SetControlHealth(problems []string) { +func (t *Tracker) SetControlHealth(problems map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage) { if t.nil() { return } t.mu.Lock() defer t.mu.Unlock() - t.controlHealth = problems + + t.controlMessages = problems + t.selfCheckLocked() } @@ -961,11 +1021,11 @@ func (t *Tracker) OverallError() error { return t.multiErrLocked() } -// Strings() returns a string array containing the Text of all Warnings -// currently known to the Tracker. These strings can be presented to the -// user, although ideally you would use the Code property on each Warning -// to show a localized version of them instead. -// This function is here for legacy compatibility purposes and is deprecated. +// Strings() returns a string array containing the Text of all Warnings and +// ControlHealth messages currently known to the Tracker. These strings can be +// presented to the user, although ideally you would use the Code property on +// each Warning to show a localized version of them instead. This function is +// here for legacy compatibility purposes and is deprecated. func (t *Tracker) Strings() []string { if t.nil() { return nil @@ -991,6 +1051,19 @@ func (t *Tracker) stringsLocked() []string { result = append(result, w.Text(ws.Args)) } } + + warnLen := len(result) + for _, c := range t.controlMessages { + if c.Title != "" && c.Text != "" { + result = append(result, c.Title+": "+c.Text) + } else if c.Title != "" { + result = append(result, c.Title) + } else if c.Text != "" { + result = append(result, c.Text) + } + } + sort.Strings(result[warnLen:]) + return result } @@ -1171,14 +1244,10 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { t.setHealthyLocked(derpRegionErrorWarnable) } - if len(t.controlHealth) > 0 { - for _, s := range t.controlHealth { - t.setUnhealthyLocked(controlHealthWarnable, Args{ - ArgError: s, - }) - } - } else { - t.setHealthyLocked(controlHealthWarnable) + // Check if control health messages have changed + if !maps.EqualFunc(t.lastNotifiedControlMessages, t.controlMessages, tailcfg.DisplayMessage.Equal) { + t.lastNotifiedControlMessages = t.controlMessages + t.notifyWatchersControlChangedLocked() } if err := envknob.ApplyDiskConfigError(); err != nil { diff --git a/health/health_test.go b/health/health_test.go index aa3904581..f609cfb16 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -5,12 +5,14 @@ package health import ( "fmt" + "maps" "reflect" "slices" "strconv" "testing" "time" + "github.com/google/go-cmp/cmp" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/opt" @@ -25,6 +27,7 @@ func TestAppendWarnableDebugFlags(t *testing.T) { w := Register(&Warnable{ Code: WarnableCode(fmt.Sprintf("warnable-code-%d", i)), MapDebugFlag: fmt.Sprint(i), + Text: StaticMessage(""), }) defer unregister(w) if i%2 == 0 { @@ -114,7 +117,9 @@ func TestWatcher(t *testing.T) { becameUnhealthy := make(chan struct{}) becameHealthy := make(chan struct{}) - watcherFunc := func(w *Warnable, us *UnhealthyState) { + watcherFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState if w != testWarnable { t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) } @@ -184,7 +189,9 @@ func TestSetUnhealthyWithTimeToVisible(t *testing.T) { becameUnhealthy := make(chan struct{}) becameHealthy := make(chan struct{}) - watchFunc := func(w *Warnable, us *UnhealthyState) { + watchFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState if w != mw { t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) } @@ -457,21 +464,94 @@ func TestControlHealth(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() - ht.SetControlHealth([]string{"Test message"}) - state := ht.CurrentState() - warning, ok := state.Warnings["control-health"] + baseWarns := ht.CurrentState().Warnings + baseStrs := ht.Strings() - if !ok { - t.Fatal("no warning found in current state with code 'control-health'") - } - if got, want := warning.Title, "Coordination server reports an issue"; got != want { - t.Errorf("warning.Title = %q, want %q", got, want) - } - if got, want := warning.Severity, SeverityMedium; got != want { - t.Errorf("warning.Severity = %s, want %s", got, want) - } - if got, want := warning.Text, "The coordination server is reporting an health issue: Test message"; got != want { - t.Errorf("warning.Text = %q, want %q", got, want) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "control-health-test": { + Title: "Control health message", + Text: "Extra help", + }, + "control-health-title": { + Title: "Control health title only", + }, + }) + + t.Run("Warnings", func(t *testing.T) { + wantWarns := map[WarnableCode]UnhealthyState{ + "control-health-test": { + WarnableCode: "control-health-test", + Severity: SeverityMedium, + Title: "Control health message", + Text: "Extra help", + }, + "control-health-title": { + WarnableCode: "control-health-title", + Severity: SeverityMedium, + Title: "Control health title only", + }, + } + state := ht.CurrentState() + gotWarns := maps.Clone(state.Warnings) + for k := range gotWarns { + if _, inBase := baseWarns[k]; inBase { + delete(gotWarns, k) + } + } + if diff := cmp.Diff(wantWarns, gotWarns); diff != "" { + t.Fatalf(`CurrentState().Warnings["control-health-*"] wrong (-want +got):\n%s`, diff) + } + }) + + t.Run("Strings()", func(t *testing.T) { + wantStrs := []string{ + "Control health message: Extra help", + "Control health title only", + } + var gotStrs []string + for _, s := range ht.Strings() { + if !slices.Contains(baseStrs, s) { + gotStrs = append(gotStrs, s) + } + } + if diff := cmp.Diff(wantStrs, gotStrs); diff != "" { + t.Fatalf(`Strings() wrong (-want +got):\n%s`, diff) + } + }) + + t.Run("tailscaled_health_messages", func(t *testing.T) { + var r usermetric.Registry + ht.SetMetricsRegistry(&r) + + got := ht.metricHealthMessage.Get(metricHealthMessageLabel{ + Type: MetricLabelWarning, + }).String() + want := strconv.Itoa( + 2 + // from SetControlHealth + len(baseStrs), + ) + if got != want { + t.Errorf("metricsHealthMessage.Get(warning) = %q, want %q", got, want) + } + }) +} + +func TestControlHealthNotifiesOnSet(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + gotNotified := false + ht.registerSyncWatcher(func(_ Change) { + gotNotified = true + }) + + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }) + + if !gotNotified { + t.Errorf("watcher did not get called, want it to be called") } } @@ -480,12 +560,45 @@ func TestControlHealthNotifiesOnChange(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-1": {}, + }) + gotNotified := false - ht.registerSyncWatcher(func(_ *Warnable, _ *UnhealthyState) { + ht.registerSyncWatcher(func(_ Change) { gotNotified = true }) - ht.SetControlHealth([]string{"Test message"}) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-2": {}, + }) + + if !gotNotified { + t.Errorf("watcher did not get called, want it to be called") + } +} + +func TestControlHealthNotifiesOnDetailsChange(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-1": { + Title: "Title", + }, + }) + + gotNotified := false + ht.registerSyncWatcher(func(_ Change) { + gotNotified = true + }) + + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-1": { + Title: "Updated title", + }, + }) if !gotNotified { t.Errorf("watcher did not get called, want it to be called") @@ -498,16 +611,20 @@ func TestControlHealthNoNotifyOnUnchanged(t *testing.T) { ht.GotStreamedMapResponse() // Set up an existing control health issue - ht.SetControlHealth([]string{"Test message"}) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }) // Now register our watcher gotNotified := false - ht.registerSyncWatcher(func(_ *Warnable, _ *UnhealthyState) { + ht.registerSyncWatcher(func(_ Change) { gotNotified = true }) // Send the same control health message again - should not notify - ht.SetControlHealth([]string{"Test message"}) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }) if gotNotified { t.Errorf("watcher got called, want it to not be called") @@ -519,11 +636,13 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { ht.SetIPNState("NeedsLogin", true) gotNotified := false - ht.registerSyncWatcher(func(_ *Warnable, _ *UnhealthyState) { + ht.registerSyncWatcher(func(_ Change) { gotNotified = true }) - ht.SetControlHealth([]string{"Test message"}) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "control-health": {}, + }) state := ht.CurrentState() _, ok := state.Warnings["control-health"] diff --git a/health/state.go b/health/state.go index c06f6ef59..cf4f922d7 100644 --- a/health/state.go +++ b/health/state.go @@ -5,6 +5,8 @@ package health import ( "time" + + "tailscale.com/tailcfg" ) // State contains the health status of the backend, and is @@ -21,7 +23,8 @@ type State struct { } // UnhealthyState contains information to be shown to the user to inform them -// that a Warnable is currently unhealthy. +// that a [Warnable] is currently unhealthy or [tailcfg.DisplayMessage] is being +// sent from the control-plane. type UnhealthyState struct { WarnableCode WarnableCode Severity Severity @@ -98,11 +101,34 @@ func (t *Tracker) CurrentState() *State { wm[w.Code] = *w.unhealthyState(ws) } + for id, msg := range t.lastNotifiedControlMessages { + code := WarnableCode(id) + wm[code] = UnhealthyState{ + WarnableCode: code, + Severity: severityFromTailcfg(msg.Severity), + Title: msg.Title, + Text: msg.Text, + ImpactsConnectivity: msg.ImpactsConnectivity, + // TODO(tailscale/corp#27759): DependsOn? + } + } + return &State{ Warnings: wm, } } +func severityFromTailcfg(s tailcfg.DisplayMessageSeverity) Severity { + switch s { + case tailcfg.SeverityHigh: + return SeverityHigh + case tailcfg.SeverityLow: + return SeverityLow + default: + return SeverityMedium + } +} + // isEffectivelyHealthyLocked reports whether w is effectively healthy. // That means it's either actually healthy or it has a dependency that // that's unhealthy, so we should treat w as healthy to not spam users diff --git a/health/warnings.go b/health/warnings.go index 7a21f9695..3997e66b3 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -238,16 +238,6 @@ var applyDiskConfigWarnable = Register(&Warnable{ }, }) -// controlHealthWarnable is a Warnable that warns the user that the coordination server is reporting an health issue. -var controlHealthWarnable = Register(&Warnable{ - Code: "control-health", - Title: "Coordination server reports an issue", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("The coordination server is reporting an health issue: %v", args[ArgError]) - }, -}) - // warmingUpWarnableDuration is the duration for which the warmingUpWarnable is reported by the backend after the user // has changed ipnWantRunning to true from false. const warmingUpWarnableDuration = 5 * time.Second diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 468fd72eb..d2f6c86f7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -933,11 +933,15 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { } } -func (b *LocalBackend) onHealthChange(w *health.Warnable, us *health.UnhealthyState) { - if us == nil { - b.logf("health(warnable=%s): ok", w.Code) - } else { - b.logf("health(warnable=%s): error: %s", w.Code, us.Text) +func (b *LocalBackend) onHealthChange(change health.Change) { + if change.WarnableChanged { + w := change.Warnable + us := change.UnhealthyState + if us == nil { + b.logf("health(warnable=%s): ok", w.Code) + } else { + b.logf("health(warnable=%s): error: %s", w.Code, us.Text) + } } // Whenever health changes, send the current health state to the frontend. @@ -5826,7 +5830,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.pauseOrResumeControlClientLocked() if nm != nil { - b.health.SetControlHealth(nm.ControlHealth) + b.health.SetControlHealth(nm.DisplayMessages) } else { b.health.SetControlHealth(nil) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 0a58d8f0c..7e2fa3ffc 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2028,7 +2028,7 @@ type MapResponse struct { // plane's perspective. A nil value means no change from the previous // MapResponse. A non-nil 0-length slice restores the health to good (no // known problems). A non-zero length slice are the list of problems that - // the control place sees. + // the control plane sees. // // Note that this package's type, due its use of a slice and omitempty, is // unable to marshal a zero-length non-nil slice. The control server needs @@ -2078,6 +2078,56 @@ type MapResponse struct { DefaultAutoUpdate opt.Bool `json:",omitempty"` } +// DisplayMessage represents a health state of the node from the control plane's +// perspective. It is deliberately similar to health.Warnable as both get +// converted into health.UnhealthyState to be sent to the GUI. +type DisplayMessage struct { + // Title is a string that the GUI uses as title for this message. The title + // should be short and fit in a single line. + Title string + + // Text is an extended string that the GUI will display to the user. + Text string + + // Severity is the severity of the DisplayMessage, which the GUI can use to + // determine how to display it. Maps to health.Severity. + Severity DisplayMessageSeverity + + // ImpactsConnectivity is whether the health problem will impact the user's + // ability to connect to the Internet or other nodes on the tailnet, which + // the GUI can use to determine how to display it. + ImpactsConnectivity bool `json:",omitempty"` +} + +// DisplayMessageID is a string that uniquely identifies the kind of health +// issue (e.g. "session-expired"). +type DisplayMessageID string + +// Equal returns true iff all fields are equal. +func (m DisplayMessage) Equal(o DisplayMessage) bool { + return m.Title == o.Title && + m.Text == o.Text && + m.Severity == o.Severity && + m.ImpactsConnectivity == o.ImpactsConnectivity +} + +// DisplayMessageSeverity represents how serious a [DisplayMessage] is. Analogous +// to health.Severity. +type DisplayMessageSeverity string + +const ( + // SeverityHigh is the highest severity level, used for critical errors that need immediate attention. + // On platforms where the client GUI can deliver notifications, a SeverityHigh message will trigger + // a modal notification. + SeverityHigh DisplayMessageSeverity = "high" + // SeverityMedium is used for errors that are important but not critical. This won't trigger a modal + // notification, however it will be displayed in a more visible way than a SeverityLow message. + SeverityMedium DisplayMessageSeverity = "medium" + // SeverityLow is used for less important notices that don't need immediate attention. The user will + // have to go to a Settings window, or another "hidden" GUI location to see these messages. + SeverityLow DisplayMessageSeverity = "low" +) + // ClientVersion is information about the latest client version that's available // for the client (and whether they're already running it). // diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 079162a15..60e86794a 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -878,3 +878,79 @@ func TestCheckTag(t *testing.T) { }) } } + +func TestDisplayMessageEqual(t *testing.T) { + base := DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + } + + type test struct { + name string + value DisplayMessage + wantEqual bool + } + + for _, test := range []test{ + { + name: "same", + value: DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + }, + wantEqual: true, + }, + { + name: "different-title", + value: DisplayMessage{ + Title: "different title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + }, + wantEqual: false, + }, + { + name: "different-text", + value: DisplayMessage{ + Title: "title", + Text: "different text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + }, + wantEqual: false, + }, + { + name: "different-severity", + value: DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityMedium, + ImpactsConnectivity: false, + }, + wantEqual: false, + }, + { + name: "different-impactsConnectivity", + value: DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: true, + }, + wantEqual: false, + }, + } { + t.Run(test.name, func(t *testing.T) { + got := base.Equal(test.value) + + if got != test.wantEqual { + t.Errorf("Equal: got %t, want %t", got, test.wantEqual) + } + }) + } +} diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index c6250c49c..963f80a44 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -54,12 +54,12 @@ type NetworkMap struct { // between updates and should not be modified. DERPMap *tailcfg.DERPMap - // ControlHealth are the list of health check problems for this + // DisplayMessages are the list of health check problems for this // node from the perspective of the control plane. // If empty, there are no known problems from the control plane's // point of view, but the node might know about its own health // check problems. - ControlHealth []string + DisplayMessages map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage // TKAEnabled indicates whether the tailnet key authority should be // enabled, from the perspective of the control plane. From 3ee4c60ff0257d11842523c1c59492345030dce2 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 22 May 2025 12:14:16 -0700 Subject: [PATCH 0888/1708] cmd/derper: fix mesh auth for DERP servers (#16061) To authenticate mesh keys, the DERP servers used a simple == comparison, which is susceptible to a side channel timing attack. By extracting the mesh key for a DERP server, an attacker could DoS it by forcing disconnects using derp.Client.ClosePeer. They could also enumerate the public Wireguard keys, IP addresses and ports for nodes connected to that DERP server. DERP servers configured without mesh keys deny all such requests. This patch also extracts the mesh key logic into key.DERPMesh, to prevent this from happening again. Security bulletin: https://tailscale.com/security-bulletins#ts-2025-003 Fixes tailscale/corp#28720 Signed-off-by: Simon Law --- cmd/derper/derper.go | 14 +--- cmd/derper/derper_test.go | 43 ---------- derp/derp_client.go | 8 +- derp/derp_server.go | 28 +++++-- derp/derp_test.go | 103 +++++++++++++++++++++++- derp/derphttp/derphttp_client.go | 2 +- derp/derphttp/derphttp_test.go | 10 ++- types/key/derp.go | 68 ++++++++++++++++ types/key/derp_test.go | 133 +++++++++++++++++++++++++++++++ 9 files changed, 338 insertions(+), 71 deletions(-) create mode 100644 types/key/derp.go create mode 100644 types/key/derp_test.go diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 3c6fda68c..840de3fba 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -96,9 +96,6 @@ var ( var ( tlsRequestVersion = &metrics.LabelMap{Label: "version"} tlsActiveVersion = &metrics.LabelMap{Label: "version"} - - // Exactly 64 hexadecimal lowercase digits. - validMeshKey = regexp.MustCompile(`^[0-9a-f]{64}$`) ) const setecMeshKeyName = "meshkey" @@ -159,14 +156,6 @@ func writeNewConfig() config { return cfg } -func checkMeshKey(key string) (string, error) { - key = strings.TrimSpace(key) - if !validMeshKey.MatchString(key) { - return "", errors.New("key must contain exactly 64 hex digits") - } - return key, nil -} - func main() { flag.Parse() if *versionFlag { @@ -246,10 +235,9 @@ func main() { log.Printf("No mesh key configured for --dev mode") } else if meshKey == "" { log.Printf("No mesh key configured") - } else if key, err := checkMeshKey(meshKey); err != nil { + } else if err := s.SetMeshKey(meshKey); err != nil { log.Fatalf("invalid mesh key: %v", err) } else { - s.SetMeshKey(key) log.Println("DERP mesh key configured") } diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 12686ce4e..6dce1fcdf 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -138,46 +138,3 @@ func TestTemplate(t *testing.T) { t.Error("Output is missing debug info") } } - -func TestCheckMeshKey(t *testing.T) { - testCases := []struct { - name string - input string - want string - wantErr bool - }{ - { - name: "KeyOkay", - input: "f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6", - want: "f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6", - wantErr: false, - }, - { - name: "TrimKeyOkay", - input: " f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6 ", - want: "f1ffafffffffffffffffffffffffffffffffffffffffffffffffff2ffffcfff6", - wantErr: false, - }, - { - name: "NotAKey", - input: "zzthisisnotakey", - want: "", - wantErr: true, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - k, err := checkMeshKey(tt.input) - if err != nil && !tt.wantErr { - t.Errorf("unexpected error: %v", err) - } - if k != tt.want && err == nil { - t.Errorf("want: %s doesn't match expected: %s", tt.want, k) - } - - }) - } - -} diff --git a/derp/derp_client.go b/derp/derp_client.go index 7a646fa51..a9b92299c 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -30,7 +30,7 @@ type Client struct { logf logger.Logf nc Conn br *bufio.Reader - meshKey string + meshKey key.DERPMesh canAckPings bool isProber bool @@ -56,7 +56,7 @@ func (f clientOptFunc) update(o *clientOpt) { f(o) } // clientOpt are the options passed to newClient. type clientOpt struct { - MeshKey string + MeshKey key.DERPMesh ServerPub key.NodePublic CanAckPings bool IsProber bool @@ -66,7 +66,7 @@ type clientOpt struct { // access to join the mesh. // // An empty key means to not use a mesh key. -func MeshKey(key string) ClientOpt { return clientOptFunc(func(o *clientOpt) { o.MeshKey = key }) } +func MeshKey(k key.DERPMesh) ClientOpt { return clientOptFunc(func(o *clientOpt) { o.MeshKey = k }) } // IsProber returns a ClientOpt to pass to the DERP server during connect to // declare that this client is a a prober. @@ -182,7 +182,7 @@ type clientInfo struct { func (c *Client) sendClientKey() error { msg, err := json.Marshal(clientInfo{ Version: ProtocolVersion, - MeshKey: c.meshKey, + MeshKey: c.meshKey.String(), CanAckPings: c.canAckPings, IsProber: c.isProber, }) diff --git a/derp/derp_server.go b/derp/derp_server.go index abda9da73..6f86c3ea4 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -134,7 +134,7 @@ type Server struct { publicKey key.NodePublic logf logger.Logf memSys0 uint64 // runtime.MemStats.Sys at start (or early-ish) - meshKey string + meshKey key.DERPMesh limitedLogf logger.Logf metaCert []byte // the encoded x509 cert to send after LetsEncrypt cert+intermediate dupPolicy dupPolicy @@ -464,8 +464,13 @@ func genDroppedCounters() { // amongst themselves. // // It must be called before serving begins. -func (s *Server) SetMeshKey(v string) { - s.meshKey = v +func (s *Server) SetMeshKey(v string) error { + k, err := key.ParseDERPMesh(v) + if err != nil { + return err + } + s.meshKey = k + return nil } // SetVerifyClients sets whether this DERP server verifies clients through tailscaled. @@ -506,10 +511,10 @@ func (s *Server) SetTCPWriteTimeout(d time.Duration) { } // HasMeshKey reports whether the server is configured with a mesh key. -func (s *Server) HasMeshKey() bool { return s.meshKey != "" } +func (s *Server) HasMeshKey() bool { return !s.meshKey.IsZero() } // MeshKey returns the configured mesh key, if any. -func (s *Server) MeshKey() string { return s.meshKey } +func (s *Server) MeshKey() key.DERPMesh { return s.meshKey } // PrivateKey returns the server's private key. func (s *Server) PrivateKey() key.NodePrivate { return s.privateKey } @@ -1355,7 +1360,18 @@ func (c *sclient) requestMeshUpdate() { // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. func (s *Server) isMeshPeer(info *clientInfo) bool { - return info != nil && info.MeshKey != "" && info.MeshKey == s.meshKey + // Compare mesh keys in constant time to prevent timing attacks. + // Since mesh keys are a fixed length, we don’t need to be concerned + // about timing attacks on client mesh keys that are the wrong length. + // See https://github.com/tailscale/corp/issues/28720 + if info == nil || info.MeshKey == "" { + return false + } + k, err := key.ParseDERPMesh(info.MeshKey) + if err != nil { + return false + } + return s.meshKey.Equal(k) } // verifyClient checks whether the client is allowed to connect to the derper, diff --git a/derp/derp_test.go b/derp/derp_test.go index c5a92bafa..0093ee2b1 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -511,11 +511,13 @@ func (ts *testServer) close(t *testing.T) error { return nil } +const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") s := NewServer(key.NewNode(), logf) - s.SetMeshKey("mesh-key") + s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) @@ -591,8 +593,12 @@ func newRegularClient(t *testing.T, ts *testServer, name string) *testClient { func newTestWatcher(t *testing.T, ts *testServer, name string) *testClient { return newTestClient(t, ts, name, func(nc net.Conn, priv key.NodePrivate, logf logger.Logf) (*Client, error) { + mk, err := key.ParseDERPMesh(testMeshKey) + if err != nil { + return nil, err + } brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf, MeshKey("mesh-key")) + c, err := NewClient(priv, nc, brw, logf, MeshKey(mk)) if err != nil { return nil, err } @@ -1627,3 +1633,96 @@ func TestGetPerClientSendQueueDepth(t *testing.T) { }) } } + +func TestSetMeshKey(t *testing.T) { + for name, tt := range map[string]struct { + key string + want key.DERPMesh + wantErr bool + }{ + "clobber": { + key: testMeshKey, + wantErr: false, + }, + "invalid": { + key: "badf00d", + wantErr: true, + }, + } { + t.Run(name, func(t *testing.T) { + s := &Server{} + + err := s.SetMeshKey(tt.key) + if tt.wantErr { + if err == nil { + t.Fatalf("expected err") + } + return + } + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + want, err := key.ParseDERPMesh(tt.key) + if err != nil { + t.Fatal(err) + } + if !s.meshKey.Equal(want) { + t.Fatalf("got %v, want %v", s.meshKey, want) + } + }) + } +} + +func TestIsMeshPeer(t *testing.T) { + s := &Server{} + err := s.SetMeshKey(testMeshKey) + if err != nil { + t.Fatal(err) + } + for name, tt := range map[string]struct { + info *clientInfo + want bool + wantAllocs float64 + }{ + "nil": { + info: nil, + want: false, + wantAllocs: 0, + }, + "empty": { + info: &clientInfo{MeshKey: ""}, + want: false, + wantAllocs: 0, + }, + "invalid": { + info: &clientInfo{MeshKey: "invalid"}, + want: false, + wantAllocs: 2, // error message + }, + "mismatch": { + info: &clientInfo{MeshKey: "0badf00d00000000000000000000000000000000000000000000000000000000"}, + want: false, + wantAllocs: 1, + }, + "match": { + info: &clientInfo{MeshKey: testMeshKey}, + want: true, + wantAllocs: 1, + }, + } { + t.Run(name, func(t *testing.T) { + var got bool + allocs := testing.AllocsPerRun(1, func() { + got = s.isMeshPeer(tt.info) + }) + if got != tt.want { + t.Fatalf("got %t, want %t: info = %#v", got, tt.want, tt.info) + } + + if allocs != tt.wantAllocs && tt.want { + t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) + } + }) + } +} diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index faa218ca2..8c42e9070 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -57,7 +57,7 @@ type Client struct { TLSConfig *tls.Config // optional; nil means default HealthTracker *health.Tracker // optional; used if non-nil only DNSCache *dnscache.Resolver // optional; nil means no caching - MeshKey string // optional; for trusted clients + MeshKey key.DERPMesh // optional; for trusted clients IsProber bool // optional; for probers to optional declare themselves as such // WatchConnectionChanges is whether the client wishes to subscribe to diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index cfb3676cd..8d02db922 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -212,6 +212,8 @@ func TestPing(t *testing.T) { } } +const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) { s = derp.NewServer(k, t.Logf) httpsrv := &http.Server{ @@ -224,7 +226,7 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S t.Fatal(err) } serverURL = "http://" + ln.Addr().String() - s.SetMeshKey("1234") + s.SetMeshKey(testMeshKey) go func() { if err := httpsrv.Serve(ln); err != nil { @@ -243,7 +245,11 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW if err != nil { t.Fatal(err) } - c.MeshKey = "1234" + k, err := key.ParseDERPMesh(testMeshKey) + if err != nil { + t.Fatal(err) + } + c.MeshKey = k return } diff --git a/types/key/derp.go b/types/key/derp.go new file mode 100644 index 000000000..1fe690189 --- /dev/null +++ b/types/key/derp.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package key + +import ( + "crypto/subtle" + "encoding/hex" + "errors" + "fmt" + "strings" + + "go4.org/mem" + "tailscale.com/types/structs" +) + +var ErrInvalidMeshKey = errors.New("invalid mesh key") + +// DERPMesh is a mesh key, used for inter-DERP-node communication and for +// privileged DERP clients. +type DERPMesh struct { + _ structs.Incomparable // == isn't constant-time + k [32]byte // 64-digit hexadecimal numbers fit in 32 bytes +} + +// DERPMeshFromRaw32 parses a 32-byte raw value as a DERP mesh key. +func DERPMeshFromRaw32(raw mem.RO) DERPMesh { + if raw.Len() != 32 { + panic("input has wrong size") + } + var ret DERPMesh + raw.Copy(ret.k[:]) + return ret +} + +// ParseDERPMesh parses a DERP mesh key from a string. +// This function trims whitespace around the string. +// If the key is not a 64-digit hexadecimal number, ErrInvalidMeshKey is returned. +func ParseDERPMesh(key string) (DERPMesh, error) { + key = strings.TrimSpace(key) + if len(key) != 64 { + return DERPMesh{}, fmt.Errorf("%w: must be 64-digit hexadecimal number", ErrInvalidMeshKey) + } + decoded, err := hex.DecodeString(key) + if err != nil { + return DERPMesh{}, fmt.Errorf("%w: %v", ErrInvalidMeshKey, err) + } + return DERPMeshFromRaw32(mem.B(decoded)), nil +} + +// IsZero reports whether k is the zero value. +func (k DERPMesh) IsZero() bool { + return k.Equal(DERPMesh{}) +} + +// Equal reports whether k and other are the same key. +func (k DERPMesh) Equal(other DERPMesh) bool { + // Compare mesh keys in constant time to prevent timing attacks. + // Since mesh keys are a fixed length, we don’t need to be concerned + // about timing attacks on client mesh keys that are the wrong length. + // See https://github.com/tailscale/corp/issues/28720 + return subtle.ConstantTimeCompare(k.k[:], other.k[:]) == 1 +} + +// String returns k as a hex-encoded 64-digit number. +func (k DERPMesh) String() string { + return hex.EncodeToString(k.k[:]) +} diff --git a/types/key/derp_test.go b/types/key/derp_test.go new file mode 100644 index 000000000..b91cbbf8c --- /dev/null +++ b/types/key/derp_test.go @@ -0,0 +1,133 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package key + +import ( + "errors" + "testing" + + "go4.org/mem" +) + +func TestDERPMeshIsValid(t *testing.T) { + for name, tt := range map[string]struct { + input string + want string + wantErr error + }{ + "good": { + input: "0123456789012345678901234567890123456789012345678901234567890123", + want: "0123456789012345678901234567890123456789012345678901234567890123", + wantErr: nil, + }, + "hex": { + input: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + want: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + wantErr: nil, + }, + "uppercase": { + input: "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF", + want: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + wantErr: nil, + }, + "whitespace": { + input: " 0123456789012345678901234567890123456789012345678901234567890123 ", + want: "0123456789012345678901234567890123456789012345678901234567890123", + wantErr: nil, + }, + "short": { + input: "0123456789abcdef", + wantErr: ErrInvalidMeshKey, + }, + "long": { + input: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0", + wantErr: ErrInvalidMeshKey, + }, + } { + t.Run(name, func(t *testing.T) { + k, err := ParseDERPMesh(tt.input) + if !errors.Is(err, tt.wantErr) { + t.Errorf("err %v, want %v", err, tt.wantErr) + } + + got := k.String() + if got != tt.want && tt.wantErr == nil { + t.Errorf("got %q, want %q", got, tt.want) + } + + }) + } + +} + +func TestDERPMesh(t *testing.T) { + t.Parallel() + + for name, tt := range map[string]struct { + str string + hex []byte + equal bool // are str and hex equal? + }{ + "zero": { + str: "0000000000000000000000000000000000000000000000000000000000000000", + hex: []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + equal: true, + }, + "equal": { + str: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + hex: []byte{ + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + }, + equal: true, + }, + "unequal": { + str: "0badc0de00000000000000000000000000000000000000000000000000000000", + hex: []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + equal: false, + }, + } { + t.Run(name, func(t *testing.T) { + t.Parallel() + + k, err := ParseDERPMesh(tt.str) + if err != nil { + t.Fatal(err) + } + + // string representation should round-trip + s := k.String() + if s != tt.str { + t.Fatalf("string %s, want %s", s, tt.str) + } + + // if tt.equal, then tt.hex is intended to be equal + if k.k != [32]byte(tt.hex) && tt.equal { + t.Fatalf("decoded %x, want %x", k.k, tt.hex) + } + + h := DERPMeshFromRaw32(mem.B(tt.hex)) + if k.Equal(h) != tt.equal { + if tt.equal { + t.Fatalf("%v != %v", k, h) + } else { + t.Fatalf("%v == %v", k, h) + } + } + + }) + } +} From a05924a9e5018da6f64fd92eb9ba37e599cab567 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 22 May 2025 12:26:02 -0700 Subject: [PATCH 0889/1708] client/web: add Sec-Fetch-Site CSRF protection (#16046) RELNOTE=Fix CSRF errors in the client Web UI Replace gorilla/csrf with a Sec-Fetch-Site based CSRF protection middleware that falls back to comparing the Host & Origin headers if no SFS value is passed by the client. Add an -origin override to the web CLI that allows callers to specify the origin at which the web UI will be available if it is hosted behind a reverse proxy or within another application via CGI. Updates #14872 Updates #15065 Signed-off-by: Patrick O'Doherty --- client/web/src/api.ts | 10 --- client/web/web.go | 137 +++++++++++++++------------- client/web/web_test.go | 163 +++++++++++++++++++--------------- cmd/k8s-operator/depaware.txt | 9 +- cmd/tailscale/cli/web.go | 5 ++ cmd/tailscale/depaware.txt | 9 +- cmd/tailscaled/depaware.txt | 9 +- tsnet/depaware.txt | 9 +- 8 files changed, 183 insertions(+), 168 deletions(-) diff --git a/client/web/src/api.ts b/client/web/src/api.ts index 9414e2d5d..e780c7645 100644 --- a/client/web/src/api.ts +++ b/client/web/src/api.ts @@ -249,7 +249,6 @@ export function useAPI() { return api } -let csrfToken: string let synoToken: string | undefined // required for synology API requests let unraidCsrfToken: string | undefined // required for unraid POST requests (#8062) @@ -298,12 +297,10 @@ export function apiFetch( headers: { Accept: "application/json", "Content-Type": contentType, - "X-CSRF-Token": csrfToken, }, body: body, }) .then((r) => { - updateCsrfToken(r) if (!r.ok) { return r.text().then((err) => { throw new Error(err) @@ -322,13 +319,6 @@ export function apiFetch( }) } -function updateCsrfToken(r: Response) { - const tok = r.headers.get("X-CSRF-Token") - if (tok) { - csrfToken = tok - } -} - export function setSynoToken(token?: string) { synoToken = token } diff --git a/client/web/web.go b/client/web/web.go index 6eccdadcf..f3158cd1f 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -6,7 +6,6 @@ package web import ( "context" - "crypto/rand" "encoding/json" "errors" "fmt" @@ -14,14 +13,14 @@ import ( "log" "net/http" "net/netip" + "net/url" "os" "path" - "path/filepath" + "slices" "strings" "sync" "time" - "github.com/gorilla/csrf" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" @@ -60,6 +59,12 @@ type Server struct { cgiMode bool pathPrefix string + // originOverride is the origin that the web UI is accessible from. + // This value is used in the fallback CSRF checks when Sec-Fetch-Site is not + // available. In this case the application will compare Host and Origin + // header values to determine if the request is from the same origin. + originOverride string + apiHandler http.Handler // serves api endpoints; csrf-protected assetsHandler http.Handler // serves frontend assets assetsCleanup func() // called from Server.Shutdown @@ -150,6 +155,9 @@ type ServerOpts struct { // as completed. // This field is required for ManageServerMode mode. WaitAuthURL func(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) + + // OriginOverride specifies the origin that the web UI will be accessible from if hosted behind a reverse proxy or CGI. + OriginOverride string } // NewServer constructs a new Tailscale web client server. @@ -169,15 +177,16 @@ func NewServer(opts ServerOpts) (s *Server, err error) { opts.LocalClient = &local.Client{} } s = &Server{ - mode: opts.Mode, - logf: opts.Logf, - devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"), - lc: opts.LocalClient, - cgiMode: opts.CGIMode, - pathPrefix: opts.PathPrefix, - timeNow: opts.TimeNow, - newAuthURL: opts.NewAuthURL, - waitAuthURL: opts.WaitAuthURL, + mode: opts.Mode, + logf: opts.Logf, + devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"), + lc: opts.LocalClient, + cgiMode: opts.CGIMode, + pathPrefix: opts.PathPrefix, + timeNow: opts.TimeNow, + newAuthURL: opts.NewAuthURL, + waitAuthURL: opts.WaitAuthURL, + originOverride: opts.OriginOverride, } if opts.PathPrefix != "" { // Enforce that path prefix always has a single leading '/' @@ -205,7 +214,7 @@ func NewServer(opts ServerOpts) (s *Server, err error) { var metric string s.apiHandler, metric = s.modeAPIHandler(s.mode) - s.apiHandler = s.withCSRF(s.apiHandler) + s.apiHandler = s.csrfProtect(s.apiHandler) // Don't block startup on reporting metric. // Report in separate go routine with 5 second timeout. @@ -218,23 +227,64 @@ func NewServer(opts ServerOpts) (s *Server, err error) { return s, nil } -func (s *Server) withCSRF(h http.Handler) http.Handler { - csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false)) +func (s *Server) csrfProtect(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // CSRF is not required for GET, HEAD, or OPTIONS requests. + if slices.Contains([]string{"GET", "HEAD", "OPTIONS"}, r.Method) { + h.ServeHTTP(w, r) + return + } - // ref https://github.com/tailscale/tailscale/pull/14822 - // signal to the CSRF middleware that the request is being served over - // plaintext HTTP to skip TLS-only header checks. - withSetPlaintext := func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - r = csrf.PlaintextHTTPRequest(r) + // first attempt to use Sec-Fetch-Site header (sent by all modern + // browsers to "potentially trustworthy" origins i.e. localhost or those + // served over HTTPS) + secFetchSite := r.Header.Get("Sec-Fetch-Site") + if secFetchSite == "same-origin" { h.ServeHTTP(w, r) - }) - } + return + } else if secFetchSite != "" { + http.Error(w, fmt.Sprintf("CSRF request denied with Sec-Fetch-Site %q", secFetchSite), http.StatusForbidden) + return + } + + // if Sec-Fetch-Site is not available we presume we are operating over HTTP. + // We fall back to comparing the Origin & Host headers. + + // use the Host header to determine the expected origin + // (use the override if set to allow for reverse proxying) + host := r.Host + if host == "" { + http.Error(w, "CSRF request denied with no Host header", http.StatusForbidden) + return + } + if s.originOverride != "" { + host = s.originOverride + } + + originHeader := r.Header.Get("Origin") + if originHeader == "" { + http.Error(w, "CSRF request denied with no Origin header", http.StatusForbidden) + return + } + parsedOrigin, err := url.Parse(originHeader) + if err != nil { + http.Error(w, fmt.Sprintf("CSRF request denied with invalid Origin %q", r.Header.Get("Origin")), http.StatusForbidden) + return + } + origin := parsedOrigin.Host + if origin == "" { + http.Error(w, "CSRF request denied with no host in the Origin header", http.StatusForbidden) + return + } + + if origin != host { + http.Error(w, fmt.Sprintf("CSRF request denied with mismatched Origin %q and Host %q", origin, host), http.StatusForbidden) + return + } + + h.ServeHTTP(w, r) - // NB: the order of the withSetPlaintext and csrfProtect calls is important - // to ensure that we signal to the CSRF middleware that the request is being - // served over plaintext HTTP and not over TLS as it presumes by default. - return withSetPlaintext(csrfProtect(h)) + }) } func (s *Server) modeAPIHandler(mode ServerMode) (http.Handler, string) { @@ -452,7 +502,6 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo // It should only be called by Server.ServeHTTP, via Server.apiHandler, // which protects the handler using gorilla csrf. func (s *Server) serveLoginAPI(w http.ResponseWriter, r *http.Request) { - w.Header().Set("X-CSRF-Token", csrf.Token(r)) switch { case r.URL.Path == "/api/data" && r.Method == httpm.GET: s.serveGetNodeData(w, r) @@ -575,7 +624,6 @@ func (s *Server) serveAPI(w http.ResponseWriter, r *http.Request) { } } - w.Header().Set("X-CSRF-Token", csrf.Token(r)) path := strings.TrimPrefix(r.URL.Path, "/api") switch { case path == "/data" && r.Method == httpm.GET: @@ -1276,37 +1324,6 @@ func (s *Server) proxyRequestToLocalAPI(w http.ResponseWriter, r *http.Request) } } -// csrfKey returns a key that can be used for CSRF protection. -// If an error occurs during key creation, the error is logged and the active process terminated. -// If the server is running in CGI mode, the key is cached to disk and reused between requests. -// If an error occurs during key storage, the error is logged and the active process terminated. -func (s *Server) csrfKey() []byte { - csrfFile := filepath.Join(os.TempDir(), "tailscale-web-csrf.key") - - // if running in CGI mode, try to read from disk, but ignore errors - if s.cgiMode { - key, _ := os.ReadFile(csrfFile) - if len(key) == 32 { - return key - } - } - - // create a new key - key := make([]byte, 32) - if _, err := rand.Read(key); err != nil { - log.Fatalf("error generating CSRF key: %v", err) - } - - // if running in CGI mode, try to write the newly created key to disk, and exit if it fails. - if s.cgiMode { - if err := os.WriteFile(csrfFile, key, 0600); err != nil { - log.Fatalf("unable to store CSRF key: %v", err) - } - } - - return key -} - // enforcePrefix returns a HandlerFunc that enforces a given path prefix is used in requests, // then strips it before invoking h. // Unlike http.StripPrefix, it does not return a 404 if the prefix is not present. diff --git a/client/web/web_test.go b/client/web/web_test.go index 2a6bc787a..12dbb5c79 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -11,7 +11,6 @@ import ( "fmt" "io" "net/http" - "net/http/cookiejar" "net/http/httptest" "net/netip" "net/url" @@ -21,14 +20,12 @@ import ( "time" "github.com/google/go-cmp/cmp" - "github.com/gorilla/csrf" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/memnet" "tailscale.com/tailcfg" - "tailscale.com/tstest/nettest" "tailscale.com/types/views" "tailscale.com/util/httpm" ) @@ -1492,81 +1489,99 @@ func mockWaitAuthURL(_ context.Context, id string, src tailcfg.NodeID) (*tailcfg } func TestCSRFProtect(t *testing.T) { - s := &Server{} - - mux := http.NewServeMux() - mux.HandleFunc("GET /test/csrf-token", func(w http.ResponseWriter, r *http.Request) { - token := csrf.Token(r) - _, err := io.WriteString(w, token) - if err != nil { - t.Fatal(err) - } - }) - mux.HandleFunc("POST /test/csrf-protected", func(w http.ResponseWriter, r *http.Request) { - _, err := io.WriteString(w, "ok") - if err != nil { - t.Fatal(err) - } - }) - h := s.withCSRF(mux) - ser := nettest.NewHTTPServer(nettest.GetNetwork(t), h) - defer ser.Close() - - jar, err := cookiejar.New(nil) - if err != nil { - t.Fatalf("unable to construct cookie jar: %v", err) + tests := []struct { + name string + method string + secFetchSite string + host string + origin string + originOverride string + wantError bool + }{ + { + name: "GET requests with no header are allowed", + method: "GET", + }, + { + name: "POST requests with same-origin are allowed", + method: "POST", + secFetchSite: "same-origin", + }, + { + name: "POST requests with cross-site are not allowed", + method: "POST", + secFetchSite: "cross-site", + wantError: true, + }, + { + name: "POST requests with unknown sec-fetch-site values are not allowed", + method: "POST", + secFetchSite: "new-unknown-value", + wantError: true, + }, + { + name: "POST requests with none are not allowed", + method: "POST", + secFetchSite: "none", + wantError: true, + }, + { + name: "POST requests with no sec-fetch-site header but matching host and origin are allowed", + method: "POST", + host: "example.com", + origin: "https://example.com", + }, + { + name: "POST requests with no sec-fetch-site and non-matching host and origin are not allowed", + method: "POST", + host: "example.com", + origin: "https://example.net", + wantError: true, + }, + { + name: "POST requests with no sec-fetch-site and and origin that matches the override are allowed", + method: "POST", + originOverride: "example.net", + host: "internal.example.foo", // Host can be changed by reverse proxies + origin: "http://example.net", + }, } - client := ser.Client() - client.Jar = jar - - // make GET request to populate cookie jar - resp, err := client.Get(ser.URL + "/test/csrf-token") - if err != nil { - t.Fatalf("unable to make request: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status: %v", resp.Status) - } - tokenBytes, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unable to read body: %v", err) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "OK") + }) - csrfToken := strings.TrimSpace(string(tokenBytes)) - if csrfToken == "" { - t.Fatal("empty csrf token") - } + s := &Server{ + originOverride: tt.originOverride, + } + withCSRF := s.csrfProtect(handler) - // make a POST request without the CSRF header; ensure it fails - resp, err = client.Post(ser.URL+"/test/csrf-protected", "text/plain", nil) - if err != nil { - t.Fatalf("unable to make request: %v", err) - } - if resp.StatusCode != http.StatusForbidden { - t.Fatalf("unexpected status: %v", resp.Status) - } + r := httptest.NewRequest(tt.method, "http://example.com/", nil) + if tt.secFetchSite != "" { + r.Header.Set("Sec-Fetch-Site", tt.secFetchSite) + } + if tt.host != "" { + r.Host = tt.host + } + if tt.origin != "" { + r.Header.Set("Origin", tt.origin) + } - // make a POST request with the CSRF header; ensure it succeeds - req, err := http.NewRequest("POST", ser.URL+"/test/csrf-protected", nil) - if err != nil { - t.Fatalf("error building request: %v", err) - } - req.Header.Set("X-CSRF-Token", csrfToken) - resp, err = client.Do(req) - if err != nil { - t.Fatalf("unable to make request: %v", err) - } - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status: %v", resp.Status) - } - defer resp.Body.Close() - out, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unable to read body: %v", err) - } - if string(out) != "ok" { - t.Fatalf("unexpected body: %q", out) + w := httptest.NewRecorder() + withCSRF.ServeHTTP(w, r) + res := w.Result() + defer res.Body.Close() + if tt.wantError { + if res.StatusCode != http.StatusForbidden { + t.Errorf("expected status forbidden, got %v", res.StatusCode) + } + return + } + if res.StatusCode != http.StatusOK { + t.Errorf("expected status ok, got %v", res.StatusCode) + } + }) } } diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 12fb5cf2e..782603df0 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -144,8 +144,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/gorilla/csrf from tailscale.com/client/web - github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 @@ -1112,13 +1110,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ - encoding from encoding/gob+ + encoding from encoding/json+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ encoding/csv from github.com/spf13/pflag - encoding/gob from github.com/gorilla/securecookie encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ @@ -1140,7 +1137,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ hash/fnv from google.golang.org/protobuf/internal/detrand hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf+ + html/template from tailscale.com/util/eventbus internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -1172,7 +1169,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/runtime/math from internal/runtime/maps+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - internal/saferio from debug/pe+ + W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscale/cli/web.go b/cmd/tailscale/cli/web.go index e209d388e..5e1821dd0 100644 --- a/cmd/tailscale/cli/web.go +++ b/cmd/tailscale/cli/web.go @@ -43,6 +43,7 @@ Tailscale, as opposed to a CLI or a native app. webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") + webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") return webf })(), Exec: runWeb, @@ -53,6 +54,7 @@ var webArgs struct { cgi bool prefix string readonly bool + origin string } func tlsConfigFromEnvironment() *tls.Config { @@ -115,6 +117,9 @@ func runWeb(ctx context.Context, args []string) error { if webArgs.readonly { opts.Mode = web.ReadOnlyServerMode } + if webArgs.origin != "" { + opts.OriginOverride = webArgs.origin + } webServer, err := web.NewServer(opts) if err != nil { log.Printf("tailscale.web: %v", err) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 03bf2f94c..8c3b404b1 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -27,8 +27,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ - github.com/gorilla/csrf from tailscale.com/client/web - github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -319,12 +317,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/peterbourgon/ff/v3+ - encoding from encoding/gob+ + encoding from encoding/json+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ - encoding/gob from github.com/gorilla/securecookie encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ @@ -338,7 +335,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf+ + html/template from tailscale.com/util/eventbus image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode @@ -372,7 +369,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/runtime/math from internal/runtime/maps+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - internal/saferio from debug/pe+ + W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 6de0ddc39..d9a9cac65 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -123,8 +123,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ - github.com/gorilla/csrf from tailscale.com/client/web - github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 @@ -590,12 +588,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ - encoding from encoding/gob+ + encoding from encoding/json+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ - encoding/gob from github.com/gorilla/securecookie encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ @@ -609,7 +606,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - html/template from github.com/gorilla/csrf+ + html/template from tailscale.com/util/eventbus internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -640,7 +637,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/math from internal/runtime/maps+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - internal/saferio from debug/pe+ + W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 662752554..3b705f680 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -113,8 +113,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ - LDW github.com/gorilla/csrf from tailscale.com/client/web - LDW github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 @@ -534,12 +532,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ - encoding from encoding/gob+ + encoding from encoding/json+ encoding/asn1 from crypto/x509+ encoding/base32 from github.com/fxamacker/cbor/v2+ encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ - LDW encoding/gob from github.com/gorilla/securecookie encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ @@ -553,7 +550,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from html/template+ - LDW html/template from github.com/gorilla/csrf+ + LDW html/template from tailscale.com/util/eventbus internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -584,7 +581,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/runtime/math from internal/runtime/maps+ internal/runtime/sys from crypto/subtle+ LA internal/runtime/syscall from runtime+ - LDW internal/saferio from debug/pe+ + W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ From 7a5af6e6e7d4938923378fd93418615934bad8d8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 21 May 2025 20:30:55 -0700 Subject: [PATCH 0890/1708] ssh/tailssh: exclude Android from Linux build tags As noted in #16048, the ./ssh/tailssh package failed to build on Android, because GOOS=android also matches the "linux" build tag. Exclude Android like iOS is excluded from macOS (darwin). This now works: $ GOOS=android go install ./ipn/ipnlocal ./ssh/tailssh The original PR at #16048 is also fine, but this stops the problem earlier. Updates #16048 Change-Id: Ie4a6f6966a012e510c9cb11dd0d1fa88c48fac37 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 4 ++-- ssh/tailssh/incubator.go | 2 +- ssh/tailssh/incubator_linux.go | 2 +- ssh/tailssh/tailssh.go | 2 +- ssh/tailssh/user.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fcd39e391..8cbb6f351 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -305,7 +305,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: build some - run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient + run: ./tool/go build ./ipn/... ./ssh/tailssh ./wgengine/ ./types/... ./control/controlclient env: GOOS: ios GOARCH: arm64 @@ -375,7 +375,7 @@ jobs: # some Android breakages early. # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 - name: build some - run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version + run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version ./ssh/tailssh env: GOOS: android GOARCH: arm64 diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 442fedcf2..9e1a9ea94 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -7,7 +7,7 @@ // and groups to the specified `--uid`, `--gid` and `--groups`, and // then launches the requested `--cmd`. -//go:build linux || (darwin && !ios) || freebsd || openbsd +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd package tailssh diff --git a/ssh/tailssh/incubator_linux.go b/ssh/tailssh/incubator_linux.go index bcbe0e240..4dfb9f27c 100644 --- a/ssh/tailssh/incubator_linux.go +++ b/ssh/tailssh/incubator_linux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !android package tailssh diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index e42f09bdf..19a2b11fd 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || (darwin && !ios) || freebsd || openbsd || plan9 +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 // Package tailssh is an SSH server integrated into Tailscale. package tailssh diff --git a/ssh/tailssh/user.go b/ssh/tailssh/user.go index 097f0d296..ac92c762a 100644 --- a/ssh/tailssh/user.go +++ b/ssh/tailssh/user.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || (darwin && !ios) || freebsd || openbsd || plan9 +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 package tailssh From 00a7dd180a7582502773c71a7ea52e051dbc67cd Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 23 May 2025 12:23:58 +0100 Subject: [PATCH 0891/1708] cmd/k8s-operator: validate Service tags, catch duplicate Tailscale Services (#16058) Validate that any tags that users have specified via tailscale.com/tags annotation are valid Tailscale ACL tags. Validate that no more than one HA Tailscale Kubernetes Services in a single cluster refer to the same Tailscale Service. Updates tailscale/tailscale#16054 Updates tailscale/tailscale#16035 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 34 ++++++++---- cmd/k8s-operator/ingress-for-pg_test.go | 5 +- cmd/k8s-operator/operator_test.go | 2 +- cmd/k8s-operator/svc-for-pg.go | 32 +++++++++-- cmd/k8s-operator/svc-for-pg_test.go | 73 +++++++++++++++++++++++-- cmd/k8s-operator/svc.go | 1 + 6 files changed, 122 insertions(+), 25 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 9cdd9cba9..4779014f3 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -660,14 +660,9 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki var errs []error // Validate tags if present - if tstr, ok := ing.Annotations[AnnotationTags]; ok { - tags := strings.Split(tstr, ",") - for _, tag := range tags { - tag = strings.TrimSpace(tag) - if err := tailcfg.CheckTag(tag); err != nil { - errs = append(errs, fmt.Errorf("tailscale.com/tags annotation contains invalid tag %q: %w", tag, err)) - } - } + violations := tagViolations(ing) + if len(violations) > 0 { + errs = append(errs, fmt.Errorf("Ingress contains invalid tags: %v", strings.Join(violations, ","))) } // Validate TLS configuration @@ -699,8 +694,8 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki return errors.Join(errs...) } for _, i := range ingList.Items { - if r.shouldExpose(&i) && hostnameForIngress(&i) == hostname && i.Name != ing.Name { - errs = append(errs, fmt.Errorf("found duplicate Ingress %q for hostname %q - multiple Ingresses for the same hostname in the same cluster are not allowed", i.Name, hostname)) + if r.shouldExpose(&i) && hostnameForIngress(&i) == hostname && i.UID != ing.UID { + errs = append(errs, fmt.Errorf("found duplicate Ingress %q for hostname %q - multiple Ingresses for the same hostname in the same cluster are not allowed", client.ObjectKeyFromObject(&i), hostname)) } } return errors.Join(errs...) @@ -1113,3 +1108,22 @@ func isErrorTailscaleServiceNotFound(err error) bool { ok := errors.As(err, &errResp) return ok && errResp.Status == http.StatusNotFound } + +func tagViolations(obj client.Object) []string { + var violations []string + if obj == nil { + return nil + } + tags, ok := obj.GetAnnotations()[AnnotationTags] + if !ok { + return nil + } + + for _, tag := range strings.Split(tags, ",") { + tag = strings.TrimSpace(tag) + if err := tailcfg.CheckTag(tag); err != nil { + violations = append(violations, fmt.Sprintf("invalid tag %q: %v", tag, err)) + } + } + return violations +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 3330da8d0..9ce90f771 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -272,6 +272,7 @@ func TestValidateIngress(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-ingress", Namespace: "default", + UID: types.UID("1234-UID"), Annotations: map[string]string{ AnnotationProxyGroup: "test-pg", }, @@ -339,7 +340,7 @@ func TestValidateIngress(t *testing.T) { }, }, pg: readyProxyGroup, - wantErr: "tailscale.com/tags annotation contains invalid tag \"tag:invalid!\": tag names can only contain numbers, letters, or dashes", + wantErr: "Ingress contains invalid tags: invalid tag \"tag:invalid!\": tag names can only contain numbers, letters, or dashes", }, { name: "multiple_TLS_entries", @@ -417,7 +418,7 @@ func TestValidateIngress(t *testing.T) { }, }, }}, - wantErr: `found duplicate Ingress "existing-ingress" for hostname "test" - multiple Ingresses for the same hostname in the same cluster are not allowed`, + wantErr: `found duplicate Ingress "default/existing-ingress" for hostname "test" - multiple Ingresses for the same hostname in the same cluster are not allowed`, }, } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index f4b0db01c..33bf23e84 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1804,7 +1804,7 @@ func Test_metricsResourceCreation(t *testing.T) { func TestIgnorePGService(t *testing.T) { // NOTE: creating proxygroup stuff just to be sure that it's all ignored - _, _, fc, _ := setupServiceTest(t) + _, _, fc, _, _ := setupServiceTest(t) ft := &fakeTSClient{} zl, err := zap.NewDevelopment() diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 779f2714e..c9b5b8ae6 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -169,12 +169,9 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } - // Validate Service configuration - if violations := validateService(svc); len(violations) > 0 { - msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", ")) - r.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) - r.logger.Error(msg) - tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, msg, r.clock, logger) + if err := r.validateService(ctx, svc, pg); err != nil { + r.recorder.Event(svc, corev1.EventTypeWarning, reasonIngressSvcInvalid, err.Error()) + tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, err.Error(), r.clock, logger) return false, nil } @@ -857,3 +854,26 @@ func (r *HAServiceReconciler) checkEndpointsReady(ctx context.Context, svc *core logger.Debugf("could not find any ready Endpoints in EndpointSlice") return false, nil } + +func (r *HAServiceReconciler) validateService(ctx context.Context, svc *corev1.Service, pg *tsapi.ProxyGroup) error { + var errs []error + if pg.Spec.Type != tsapi.ProxyGroupTypeIngress { + errs = append(errs, fmt.Errorf("ProxyGroup %q is of type %q but must be of type %q", + pg.Name, pg.Spec.Type, tsapi.ProxyGroupTypeIngress)) + } + if violations := validateService(svc); len(violations) > 0 { + errs = append(errs, fmt.Errorf("invalid Service: %s", strings.Join(violations, ", "))) + } + svcList := &corev1.ServiceList{} + if err := r.List(ctx, svcList); err != nil { + errs = append(errs, fmt.Errorf("[unexpected] error listing Services: %w", err)) + return errors.Join(errs...) + } + svcName := nameForService(svc) + for _, s := range svcList.Items { + if r.shouldExpose(&s) && nameForService(&s) == svcName && s.UID != svc.UID { + errs = append(errs, fmt.Errorf("found duplicate Service %q for hostname %q - multiple HA Services for the same hostname in the same cluster are not allowed", client.ObjectKeyFromObject(&s), svcName)) + } + } + return errors.Join(errs...) +} diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 4bb633cb8..ecd60af50 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -12,6 +12,7 @@ import ( "math/rand/v2" "net/netip" "testing" + "time" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -33,7 +34,7 @@ import ( ) func TestServicePGReconciler(t *testing.T) { - svcPGR, stateSecret, fc, ft := setupServiceTest(t) + svcPGR, stateSecret, fc, ft, _ := setupServiceTest(t) svcs := []*corev1.Service{} config := []string{} for i := range 4 { @@ -79,7 +80,7 @@ func TestServicePGReconciler(t *testing.T) { } func TestServicePGReconciler_UpdateHostname(t *testing.T) { - svcPGR, stateSecret, fc, ft := setupServiceTest(t) + svcPGR, stateSecret, fc, ft, _ := setupServiceTest(t) cip := "4.1.6.7" svc, _ := setupTestService(t, "test-service", "", cip, fc, stateSecret) @@ -110,7 +111,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { } } -func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, client.Client, *fakeTSClient) { +func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, client.Client, *fakeTSClient, *tstest.Clock) { // Pre-create the ProxyGroup pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -215,14 +216,74 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien lc: lc, } - return svcPGR, pgStateSecret, fc, ft + return svcPGR, pgStateSecret, fc, ft, cl +} + +func TestValidateService(t *testing.T) { + // Test that no more than one Kubernetes Service in a cluster refers to the same Tailscale Service. + pgr, _, lc, _, cl := setupServiceTest(t) + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-app", + Namespace: "ns-1", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/hostname": "my-app", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + } + svc2 := &corev1.Service{ + TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-app2", + Namespace: "ns-2", + UID: types.UID("1235-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/hostname": "my-app", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.5", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + } + wantSvc := &corev1.Service{ + ObjectMeta: svc.ObjectMeta, + TypeMeta: svc.TypeMeta, + Spec: svc.Spec, + Status: corev1.ServiceStatus{ + Conditions: []metav1.Condition{ + { + Type: string(tsapi.IngressSvcValid), + Status: metav1.ConditionFalse, + Reason: reasonIngressSvcInvalid, + LastTransitionTime: metav1.NewTime(cl.Now().Truncate(time.Second)), + Message: `found duplicate Service "ns-2/my-app2" for hostname "my-app" - multiple HA Services for the same hostname in the same cluster are not allowed`, + }, + }, + }, + } + + mustCreate(t, lc, svc) + mustCreate(t, lc, svc2) + expectReconciled(t, pgr, svc.Namespace, svc.Name) + expectEqual(t, lc, wantSvc) } func TestServicePGReconciler_MultiCluster(t *testing.T) { var ft *fakeTSClient var lc localClient for i := 0; i <= 10; i++ { - pgr, stateSecret, fc, fti := setupServiceTest(t) + pgr, stateSecret, fc, fti, _ := setupServiceTest(t) if i == 0 { ft = fti lc = pgr.lc @@ -250,7 +311,7 @@ func TestServicePGReconciler_MultiCluster(t *testing.T) { } func TestIgnoreRegularService(t *testing.T) { - pgr, _, fc, ft := setupServiceTest(t) + pgr, _, fc, ft, _ := setupServiceTest(t) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index d6a6f440f..c880f59f5 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -392,6 +392,7 @@ func validateService(svc *corev1.Service) []string { violations = append(violations, fmt.Sprintf("invalid Tailscale hostname %q, use %q annotation to override: %s", svcName, AnnotationHostname, err)) } } + violations = append(violations, tagViolations(svc)...) return violations } From 4a11514db5fc4fe77225cd032f9f76cc8610b9ce Mon Sep 17 00:00:00 2001 From: Zach Buchheit Date: Fri, 23 May 2025 14:17:28 -0700 Subject: [PATCH 0892/1708] ipn/ipnlocal: improve dohQuery error to suggest `?dns=` and `?q=` (#16056) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, a missing or invalid `dns` parameter on GET `/dns-query` returned only “missing ‘dns’ parameter”. Now the error message guides users to use `?dns=` or `?q=`. Updates: #16055 Signed-off-by: Zach Buchheit --- ipn/ipnlocal/peerapi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 84aaecf7e..60dd41024 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -859,7 +859,7 @@ func dohQuery(r *http.Request) (dnsQuery []byte, publicErr string) { case "GET": q64 := r.FormValue("dns") if q64 == "" { - return nil, "missing 'dns' parameter" + return nil, "missing ‘dns’ parameter; try '?dns=' (DoH standard) or use '?q=' for JSON debug mode" } if base64.RawURLEncoding.DecodedLen(len(q64)) > maxQueryLen { return nil, "query too large" From 4980869977302612c77518adbd6351f568c264a4 Mon Sep 17 00:00:00 2001 From: Tim Klocke Date: Sat, 24 May 2025 18:05:57 +0200 Subject: [PATCH 0893/1708] cmd/tsidp: Fix sending string for refresh_token In accordance with the OIDC/OAuth 2.0 protocol, do not send an empty refresh_token and instead omit the field when empty. Fixes https://github.com/tailscale/tailscale/issues/16073 Signed-off-by: Tim Klocke --- cmd/tsidp/tsidp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index e2b777fa1..2d9450e96 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -795,7 +795,7 @@ type oidcTokenResponse struct { IDToken string `json:"id_token"` TokenType string `json:"token_type"` AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` + RefreshToken string `json:"refresh_token,omitempty"` ExpiresIn int `json:"expires_in"` } From 09582bdc009fc6faeb5a17b657570fd2d7b9dd3c Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Sat, 24 May 2025 18:16:29 -0400 Subject: [PATCH 0894/1708] cmd/tsidp: add web UI for managing OIDC clients (#16068) Add comprehensive web interface at ui for managing OIDC clients, similar to tsrecorder's design. Features include list view, create/edit forms with validation, client secret management, delete functionality with confirmation dialogs, responsive design, and restricted tailnet access only. Fixes #16067 Signed-off-by: Raj Singh --- cmd/tsidp/tsidp.go | 8 +- cmd/tsidp/ui-edit.html | 199 +++++++++++++++++ cmd/tsidp/ui-header.html | 53 +++++ cmd/tsidp/ui-list.html | 73 +++++++ cmd/tsidp/ui-style.css | 446 +++++++++++++++++++++++++++++++++++++++ cmd/tsidp/ui.go | 325 ++++++++++++++++++++++++++++ 6 files changed, 1097 insertions(+), 7 deletions(-) create mode 100644 cmd/tsidp/ui-edit.html create mode 100644 cmd/tsidp/ui-header.html create mode 100644 cmd/tsidp/ui-list.html create mode 100644 cmd/tsidp/ui-style.css create mode 100644 cmd/tsidp/ui.go diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 2d9450e96..5df99e1b8 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -452,13 +452,7 @@ func (s *idpServer) newMux() *http.ServeMux { mux.HandleFunc("/userinfo", s.serveUserInfo) mux.HandleFunc("/token", s.serveToken) mux.HandleFunc("/clients/", s.serveClients) - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/" { - io.WriteString(w, "

        Tailscale OIDC IdP

        ") - return - } - http.Error(w, "tsidp: not found", http.StatusNotFound) - }) + mux.HandleFunc("/", s.handleUI) return mux } diff --git a/cmd/tsidp/ui-edit.html b/cmd/tsidp/ui-edit.html new file mode 100644 index 000000000..d463981aa --- /dev/null +++ b/cmd/tsidp/ui-edit.html @@ -0,0 +1,199 @@ + + + + + {{if .IsNew}}Add New Client{{else}}Edit Client{{end}} - Tailscale OIDC Identity Provider + + + + + + + {{template "header"}} + +
        +
        +
        +

        + {{if .IsNew}}Add New OIDC Client{{else}}Edit OIDC Client{{end}} +

        + ← Back to Clients +
        + + {{if .Success}} +
        + {{.Success}} +
        + {{end}} + + {{if .Error}} +
        + {{.Error}} +
        + {{end}} + + {{if and .Secret .IsNew}} +
        +

        Client Created Successfully!

        +

        ⚠️ Save both the Client ID and Secret now! The secret will not be shown again.

        + +
        + +
        + + +
        +
        + +
        + +
        + + +
        +
        +
        + {{end}} + + {{if and .Secret .IsEdit}} +
        +

        New Client Secret

        +

        ⚠️ Save this secret now! It will not be shown again.

        +
        + + +
        +
        + {{end}} + +
        +
        + + +
        + A descriptive name for this OIDC client (optional). +
        +
        + +
        + + +
        + The URL where users will be redirected after authentication. +
        +
        + + {{if .IsEdit}} +
        + + +
        + The client ID cannot be changed. +
        +
        + {{end}} + +
        + + + {{if .IsEdit}} + + + + {{end}} +
        +
        + + {{if .IsEdit}} +
        +

        Client Information

        +
        +
        Client ID
        +
        {{.ID}}
        +
        Secret Status
        +
        + {{if .HasSecret}} + Secret configured + {{else}} + No secret + {{end}} +
        +
        +
        + {{end}} +
        +
        + + + + \ No newline at end of file diff --git a/cmd/tsidp/ui-header.html b/cmd/tsidp/ui-header.html new file mode 100644 index 000000000..68e9bc0df --- /dev/null +++ b/cmd/tsidp/ui-header.html @@ -0,0 +1,53 @@ +
        + +
        \ No newline at end of file diff --git a/cmd/tsidp/ui-list.html b/cmd/tsidp/ui-list.html new file mode 100644 index 000000000..d45b88349 --- /dev/null +++ b/cmd/tsidp/ui-list.html @@ -0,0 +1,73 @@ + + + + Tailscale OIDC Identity Provider + + + + + + {{template "header"}} + +
        +
        +
        +

        OIDC Clients

        + {{if .}} +

        {{len .}} client{{if ne (len .) 1}}s{{end}} configured

        + {{end}} +
        + Add New Client +
        + + {{if .}} + + + + + + + + + + + + {{range .}} + + + + + + + + {{end}} + +
        NameClient IDRedirect URIStatusActions
        + {{if .Name}} + {{.Name}} + {{else}} + Unnamed Client + {{end}} + + {{.ID}} + + {{.RedirectURI}} + + {{if .HasSecret}} + Active + {{else}} + No Secret + {{end}} + + Edit +
        + {{else}} +
        +

        No OIDC clients configured

        +

        Create your first OIDC client to get started with authentication.

        + Add New Client +
        + {{end}} +
        + + \ No newline at end of file diff --git a/cmd/tsidp/ui-style.css b/cmd/tsidp/ui-style.css new file mode 100644 index 000000000..148ec3030 --- /dev/null +++ b/cmd/tsidp/ui-style.css @@ -0,0 +1,446 @@ +:root { + --tw-text-opacity: 1; + --color-gray-100: 247 245 244; + --color-gray-200: 238 235 234; + --color-gray-500: 112 110 109; + --color-gray-700: 46 45 45; + --color-gray-800: 35 34 34; + --color-gray-900: 31 30 30; + --color-bg-app: rgb(var(--color-gray-900) / 1); + --color-border-base: rgb(var(--color-gray-200) / 1); + --color-primary: 59 130 246; + --color-primary-hover: 37 99 235; + --color-secondary: 107 114 128; + --color-secondary-hover: 75 85 99; + --color-success: 34 197 94; + --color-warning: 245 158 11; + --color-danger: 239 68 68; + --color-danger-hover: 220 38 38; +} + +* { + box-sizing: border-box; + padding: 0; + margin: 0; +} + +body { + font-family: Inter, -apple-system, BlinkMacSystemFont, Helvetica, Arial, + sans-serif; + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + font-size: 16px; + line-height: 1.4; + margin: 0; + background-color: var(--color-bg-app); + color: rgb(var(--color-gray-200)); +} + +a { + text-decoration: none; + color: inherit; +} + +header { + margin-top: 40px; +} +header nav { + margin: 0 auto; + max-width: 1120px; + display: flex; + align-items: center; + justify-content: center; +} +header nav h1 { + display: inline; + font-weight: 600; + font-size: 1.125rem; + line-height: 1.75rem; + margin-left: 0.75rem; +} + +main { + margin: 40px auto 60px auto; + max-width: 1120px; + padding: 0 20px; +} + +/* Header actions */ +.header-actions { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 2rem; +} + +.header-actions h2 { + font-size: 1.5rem; + font-weight: 600; + margin: 0 0 0.25rem 0; +} + +.client-count { + font-size: 0.875rem; + color: rgb(var(--color-gray-500)); + margin: 0; +} + +/* Buttons */ +.btn { + display: inline-flex; + align-items: center; + padding: 8px 16px; + border-radius: 6px; + font-size: 14px; + font-weight: 500; + text-decoration: none; + border: none; + cursor: pointer; + transition: all 0.2s ease; +} + +.btn-small { + padding: 4px 8px; + font-size: 12px; +} + +.btn-primary { + background-color: rgb(var(--color-primary)); + color: white; +} + +.btn-primary:hover { + background-color: rgb(var(--color-primary-hover)); +} + +.btn-secondary { + background-color: rgb(var(--color-secondary)); + color: white; +} + +.btn-secondary:hover { + background-color: rgb(var(--color-secondary-hover)); +} + +.btn-success { + background-color: rgb(var(--color-success)); + color: white; +} + +.btn-warning { + background-color: rgb(var(--color-warning)); + color: white; +} + +.btn-danger { + background-color: rgb(var(--color-danger)); + color: white; +} + +.btn-danger:hover { + background-color: rgb(var(--color-danger-hover)); +} + +/* Tables */ +table { + width: 100%; + border-spacing: 0; + border: 1px solid rgb(var(--color-gray-700)); + border-bottom-width: 0; + border-radius: 8px; + overflow: hidden; +} + +td { + border: 0 solid rgb(var(--color-gray-700)); + border-bottom-width: 1px; + padding: 12px 16px; +} + +thead td { + text-transform: uppercase; + color: rgb(var(--color-gray-500) / var(--tw-text-opacity)); + font-size: 12px; + letter-spacing: 0.08em; + font-weight: 600; + background-color: rgb(var(--color-gray-800)); +} + +tbody tr:hover { + background-color: rgb(var(--color-gray-800)); +} + +/* Client display elements */ +.client-id { + font-family: "SF Mono", SFMono-Regular, ui-monospace, "DejaVu Sans Mono", + Menlo, Consolas, monospace; + font-size: 12px; + background-color: rgb(var(--color-gray-800)); + padding: 2px 6px; + border-radius: 4px; + color: rgb(var(--color-gray-200)); +} + +.redirect-uri { + font-size: 14px; + color: rgb(var(--color-gray-200)); + word-break: break-all; +} + +.status-active { + color: rgb(var(--color-success)); + font-weight: 500; +} + +.status-inactive { + color: rgb(var(--color-gray-500)); + font-weight: 500; +} + +.text-muted { + color: rgb(var(--color-gray-500)); +} + +/* Empty state */ +.empty-state { + text-align: center; + padding: 60px 20px; + border: 1px solid rgb(var(--color-gray-700)); + border-radius: 8px; + background-color: rgb(var(--color-gray-800) / 0.5); +} + +.empty-state h3 { + font-size: 1.25rem; + font-weight: 600; + margin-bottom: 0.5rem; + color: rgb(var(--color-gray-200)); +} + +.empty-state p { + color: rgb(var(--color-gray-500)); + margin-bottom: 1.5rem; +} + +/* Forms */ +.form-container { + max-width: 600px; + margin: 0 auto; +} + +.form-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 2rem; +} + +.form-header h2 { + font-size: 1.5rem; + font-weight: 600; + margin: 0; +} + +.client-form { + background-color: rgb(var(--color-gray-800) / 0.5); + border: 1px solid rgb(var(--color-gray-700)); + border-radius: 8px; + padding: 24px; + margin-bottom: 2rem; +} + +.form-group { + margin-bottom: 1.5rem; +} + +.form-group:last-child { + margin-bottom: 0; +} + +.form-group label { + display: block; + font-weight: 500; + margin-bottom: 0.5rem; + color: rgb(var(--color-gray-200)); +} + +.required { + color: rgb(var(--color-danger)); +} + +.form-input { + width: 100%; + padding: 10px 12px; + border: 1px solid rgb(var(--color-gray-700)); + border-radius: 6px; + background-color: rgb(var(--color-gray-900)); + color: rgb(var(--color-gray-200)); + font-size: 14px; +} + +.form-input:focus { + outline: none; + border-color: rgb(var(--color-primary)); + box-shadow: 0 0 0 3px rgb(var(--color-primary) / 0.1); +} + +.form-input-readonly { + background-color: rgb(var(--color-gray-800)); + color: rgb(var(--color-gray-500)); +} + +.form-help { + font-size: 12px; + color: rgb(var(--color-gray-500)); + margin-top: 0.25rem; +} + +.form-actions { + display: flex; + gap: 1rem; + margin-top: 2rem; + padding-top: 1.5rem; + border-top: 1px solid rgb(var(--color-gray-700)); +} + +/* Alerts */ +.alert { + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 1.5rem; + font-size: 14px; +} + +.alert-success { + background-color: rgb(var(--color-success) / 0.1); + border: 1px solid rgb(var(--color-success) / 0.3); + color: rgb(var(--color-success)); +} + +.alert-error { + background-color: rgb(var(--color-danger) / 0.1); + border: 1px solid rgb(var(--color-danger) / 0.3); + color: rgb(var(--color-danger)); +} + +/* Secret display */ +.secret-display { + background-color: rgb(var(--color-gray-800) / 0.5); + border: 1px solid rgb(var(--color-gray-700)); + border-radius: 8px; + padding: 20px; + margin-bottom: 2rem; +} + +.secret-display h3 { + font-size: 1.125rem; + font-weight: 600; + margin-bottom: 0.5rem; + color: rgb(var(--color-gray-200)); +} + +.warning { + color: rgb(var(--color-warning)); + font-weight: 500; + margin-bottom: 1rem; +} + +.secret-field { + display: flex; + gap: 0.5rem; +} + +.secret-input { + flex: 1; + padding: 10px 12px; + border: 1px solid rgb(var(--color-gray-700)); + border-radius: 6px; + background-color: rgb(var(--color-gray-900)); + color: rgb(var(--color-gray-200)); + font-family: "SF Mono", SFMono-Regular, ui-monospace, "DejaVu Sans Mono", + Menlo, Consolas, monospace; + font-size: 12px; +} + +/* Client info */ +.client-info { + background-color: rgb(var(--color-gray-800) / 0.5); + border: 1px solid rgb(var(--color-gray-700)); + border-radius: 8px; + padding: 20px; +} + +.client-info h3 { + font-size: 1.125rem; + font-weight: 600; + margin-bottom: 1rem; + color: rgb(var(--color-gray-200)); +} + +.client-info dl { + display: grid; + grid-template-columns: auto 1fr; + gap: 0.5rem 1rem; + border: none; + border-radius: 0; + padding: 0; +} + +.client-info dt { + font-weight: 600; + color: rgb(var(--color-gray-400)); + border: none; + padding: 0; +} + +.client-info dd { + color: rgb(var(--color-gray-200)); + border: none; + padding: 0; +} + +.client-info code { + font-family: "SF Mono", SFMono-Regular, ui-monospace, "DejaVu Sans Mono", + Menlo, Consolas, monospace; + font-size: 12px; + background-color: rgb(var(--color-gray-800)); + padding: 2px 6px; + border-radius: 4px; + color: rgb(var(--color-gray-200)); +} + +/* Responsive design */ +@media (max-width: 768px) { + .header-actions { + flex-direction: column; + align-items: stretch; + gap: 1rem; + } + + .form-header { + flex-direction: column; + align-items: stretch; + gap: 1rem; + } + + .form-actions { + flex-direction: column; + } + + .secret-field { + flex-direction: column; + } + + table { + font-size: 14px; + } + + td { + padding: 8px 12px; + } + + .client-id { + font-size: 10px; + } +} \ No newline at end of file diff --git a/cmd/tsidp/ui.go b/cmd/tsidp/ui.go new file mode 100644 index 000000000..d37b64990 --- /dev/null +++ b/cmd/tsidp/ui.go @@ -0,0 +1,325 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "bytes" + _ "embed" + "html/template" + "log" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "tailscale.com/util/rands" +) + +//go:embed ui-header.html +var headerHTML string + +//go:embed ui-list.html +var listHTML string + +//go:embed ui-edit.html +var editHTML string + +//go:embed ui-style.css +var styleCSS string + +var headerTmpl = template.Must(template.New("header").Parse(headerHTML)) +var listTmpl = template.Must(headerTmpl.New("list").Parse(listHTML)) +var editTmpl = template.Must(headerTmpl.New("edit").Parse(editHTML)) + +var processStart = time.Now() + +func (s *idpServer) handleUI(w http.ResponseWriter, r *http.Request) { + if isFunnelRequest(r) { + http.Error(w, "tsidp: UI not available over Funnel", http.StatusNotFound) + return + } + + switch r.URL.Path { + case "/": + s.handleClientsList(w, r) + return + case "/new": + s.handleNewClient(w, r) + return + case "/style.css": + http.ServeContent(w, r, "ui-style.css", processStart, strings.NewReader(styleCSS)) + return + } + + if strings.HasPrefix(r.URL.Path, "/edit/") { + s.handleEditClient(w, r) + return + } + + http.Error(w, "tsidp: not found", http.StatusNotFound) +} + +func (s *idpServer) handleClientsList(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + clients := make([]clientDisplayData, 0, len(s.funnelClients)) + for _, c := range s.funnelClients { + clients = append(clients, clientDisplayData{ + ID: c.ID, + Name: c.Name, + RedirectURI: c.RedirectURI, + HasSecret: c.Secret != "", + }) + } + s.mu.Unlock() + + sort.Slice(clients, func(i, j int) bool { + if clients[i].Name != clients[j].Name { + return clients[i].Name < clients[j].Name + } + return clients[i].ID < clients[j].ID + }) + + var buf bytes.Buffer + if err := listTmpl.Execute(&buf, clients); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + buf.WriteTo(w) +} + +func (s *idpServer) handleNewClient(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + if err := s.renderClientForm(w, clientDisplayData{IsNew: true}); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + if r.Method == "POST" { + if err := r.ParseForm(); err != nil { + http.Error(w, "Failed to parse form", http.StatusBadRequest) + return + } + + name := strings.TrimSpace(r.FormValue("name")) + redirectURI := strings.TrimSpace(r.FormValue("redirect_uri")) + + baseData := clientDisplayData{ + IsNew: true, + Name: name, + RedirectURI: redirectURI, + } + + if errMsg := validateRedirectURI(redirectURI); errMsg != "" { + s.renderFormError(w, baseData, errMsg) + return + } + + clientID := rands.HexString(32) + clientSecret := rands.HexString(64) + newClient := funnelClient{ + ID: clientID, + Secret: clientSecret, + Name: name, + RedirectURI: redirectURI, + } + + s.mu.Lock() + if s.funnelClients == nil { + s.funnelClients = make(map[string]*funnelClient) + } + s.funnelClients[clientID] = &newClient + err := s.storeFunnelClientsLocked() + s.mu.Unlock() + + if err != nil { + log.Printf("could not write funnel clients db: %v", err) + s.renderFormError(w, baseData, "Failed to save client") + return + } + + successData := clientDisplayData{ + ID: clientID, + Name: name, + RedirectURI: redirectURI, + Secret: clientSecret, + IsNew: true, + } + s.renderFormSuccess(w, successData, "Client created successfully! Save the client secret - it won't be shown again.") + return + } + + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) +} + +func (s *idpServer) handleEditClient(w http.ResponseWriter, r *http.Request) { + clientID := strings.TrimPrefix(r.URL.Path, "/edit/") + if clientID == "" { + http.Error(w, "Client ID required", http.StatusBadRequest) + return + } + + s.mu.Lock() + client, exists := s.funnelClients[clientID] + s.mu.Unlock() + + if !exists { + http.Error(w, "Client not found", http.StatusNotFound) + return + } + + if r.Method == "GET" { + data := createEditBaseData(client, client.Name, client.RedirectURI) + if err := s.renderClientForm(w, data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + if r.Method == "POST" { + action := r.FormValue("action") + + if action == "delete" { + s.mu.Lock() + delete(s.funnelClients, clientID) + err := s.storeFunnelClientsLocked() + s.mu.Unlock() + + if err != nil { + log.Printf("could not write funnel clients db: %v", err) + s.mu.Lock() + s.funnelClients[clientID] = client + s.mu.Unlock() + + baseData := createEditBaseData(client, client.Name, client.RedirectURI) + s.renderFormError(w, baseData, "Failed to delete client. Please try again.") + return + } + + http.Redirect(w, r, "/", http.StatusSeeOther) + return + } + + if action == "regenerate_secret" { + newSecret := rands.HexString(64) + s.mu.Lock() + s.funnelClients[clientID].Secret = newSecret + err := s.storeFunnelClientsLocked() + s.mu.Unlock() + + baseData := createEditBaseData(client, client.Name, client.RedirectURI) + baseData.HasSecret = true + + if err != nil { + log.Printf("could not write funnel clients db: %v", err) + s.renderFormError(w, baseData, "Failed to regenerate secret") + return + } + + baseData.Secret = newSecret + s.renderFormSuccess(w, baseData, "New client secret generated! Save it - it won't be shown again.") + return + } + + if err := r.ParseForm(); err != nil { + http.Error(w, "Failed to parse form", http.StatusBadRequest) + return + } + + name := strings.TrimSpace(r.FormValue("name")) + redirectURI := strings.TrimSpace(r.FormValue("redirect_uri")) + baseData := createEditBaseData(client, name, redirectURI) + + if errMsg := validateRedirectURI(redirectURI); errMsg != "" { + s.renderFormError(w, baseData, errMsg) + return + } + + s.mu.Lock() + s.funnelClients[clientID].Name = name + s.funnelClients[clientID].RedirectURI = redirectURI + err := s.storeFunnelClientsLocked() + s.mu.Unlock() + + if err != nil { + log.Printf("could not write funnel clients db: %v", err) + s.renderFormError(w, baseData, "Failed to update client") + return + } + + s.renderFormSuccess(w, baseData, "Client updated successfully!") + return + } + + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) +} + +type clientDisplayData struct { + ID string + Name string + RedirectURI string + Secret string + HasSecret bool + IsNew bool + IsEdit bool + Success string + Error string +} + +func (s *idpServer) renderClientForm(w http.ResponseWriter, data clientDisplayData) error { + var buf bytes.Buffer + if err := editTmpl.Execute(&buf, data); err != nil { + return err + } + if _, err := buf.WriteTo(w); err != nil { + return err + } + return nil +} + +func (s *idpServer) renderFormError(w http.ResponseWriter, data clientDisplayData, errorMsg string) { + data.Error = errorMsg + if err := s.renderClientForm(w, data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func (s *idpServer) renderFormSuccess(w http.ResponseWriter, data clientDisplayData, successMsg string) { + data.Success = successMsg + if err := s.renderClientForm(w, data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func createEditBaseData(client *funnelClient, name, redirectURI string) clientDisplayData { + return clientDisplayData{ + ID: client.ID, + Name: name, + RedirectURI: redirectURI, + HasSecret: client.Secret != "", + IsEdit: true, + } +} + +func validateRedirectURI(redirectURI string) string { + if redirectURI == "" { + return "Redirect URI is required" + } + + u, err := url.Parse(redirectURI) + if err != nil { + return "Invalid URL format" + } + + if u.Scheme != "http" && u.Scheme != "https" { + return "Redirect URI must be a valid HTTP or HTTPS URL" + } + + if u.Host == "" { + return "Redirect URI must include a valid host" + } + + return "" +} From cd49faa123131414cf990e4ad6ff7e2157f2e82a Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 26 May 2025 10:23:30 -0400 Subject: [PATCH 0895/1708] feature/capture: fix wireshark decoding and add new disco frame types (#16089) Fix the wireshark lua dissector to support 0 bit position and not throw modulo div by 0 errors. Add new disco frame types to the decoder. Updates tailscale/corp#29036 Signed-off-by: Mike O'Driscoll --- feature/capture/dissector/ts-dissector.lua | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/feature/capture/dissector/ts-dissector.lua b/feature/capture/dissector/ts-dissector.lua index ad553d767..c2ee2b755 100644 --- a/feature/capture/dissector/ts-dissector.lua +++ b/feature/capture/dissector/ts-dissector.lua @@ -1,5 +1,5 @@ function hasbit(x, p) - return x % (p + p) >= p + return bit.band(x, p) ~= 0 end tsdebug_ll = Proto("tsdebug", "Tailscale debug") @@ -128,6 +128,10 @@ function tsdisco_frame.dissector(buffer, pinfo, tree) if message_type == 1 then subtree:add(DISCO_TYPE, "Ping") elseif message_type == 2 then subtree:add(DISCO_TYPE, "Pong") elseif message_type == 3 then subtree:add(DISCO_TYPE, "Call me maybe") + elseif message_type == 4 then subtree:add(DISCO_TYPE, "Bind UDP Relay Endpoint") + elseif message_type == 5 then subtree:add(DISCO_TYPE, "Bind UDP Relay Endpoint Challenge") + elseif message_type == 6 then subtree:add(DISCO_TYPE, "Bind UDP Relay Endpoint Answer") + elseif message_type == 7 then subtree:add(DISCO_TYPE, "Call me maybe via") end -- Message version From 4b59f1dfe6f0e1566f21573565204d07b49a3013 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 27 May 2025 16:03:45 +0100 Subject: [PATCH 0896/1708] .github/workflows: use Ubuntu 24.04 images (#16097) Bumps Ubuntu version for test container images 22.04 -> 24.04. Updates#cleanup Signed-off-by: Irbe Krumina --- .github/workflows/test.yml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8cbb6f351..2aad005ae 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -39,7 +39,7 @@ concurrency: jobs: race-root-integration: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: @@ -74,7 +74,7 @@ jobs: buildflags: "-race" shard: '3/3' - goarch: "386" # thanks yaml - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -187,7 +187,7 @@ jobs: find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete privileged: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 container: image: golang:latest options: --privileged @@ -214,7 +214,7 @@ jobs: XDG_CACHE_HOME: "/var/lib/ghrunner/cache" race-build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -258,7 +258,7 @@ jobs: - goos: openbsd goarch: amd64 - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -300,7 +300,7 @@ jobs: ios: # similar to cross above, but iOS can't build most of the repo. So, just #make it build a few smoke packages. - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -328,7 +328,7 @@ jobs: - goos: illumos goarch: amd64 - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -366,7 +366,7 @@ jobs: # similar to cross above, but android fails to build a few pieces of the # repo. We should fix those pieces, they're small, but as a stepping stone, # only test the subset of android that our past smoke test checked. - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -381,7 +381,7 @@ jobs: GOARCH: arm64 wasm: # builds tsconnect, which is the only wasm build we support - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -420,7 +420,7 @@ jobs: find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete tailscale_go: # Subset of tests that depend on our custom Go toolchain. - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -440,7 +440,7 @@ jobs: # explicit 'if' condition, because the default condition for steps is # 'success()', meaning "only run this if no previous steps failed". if: github.event_name == 'pull_request' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: build fuzzers id: build @@ -492,7 +492,7 @@ jobs: path: ${{ env.artifacts_path }}/out/artifacts depaware: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -501,7 +501,7 @@ jobs: make depaware go_generate: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -514,7 +514,7 @@ jobs: git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) go_mod_tidy: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -526,7 +526,7 @@ jobs: git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1) licenses: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -534,7 +534,7 @@ jobs: run: ./scripts/check_license_headers.sh . staticcheck: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: @@ -575,7 +575,7 @@ jobs: - go_mod_tidy - licenses - staticcheck - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: notify # Only notify slack for merged commits, not PR failures. @@ -604,7 +604,7 @@ jobs: check_mergeability: if: always() - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - android - test From 842df378037579f35f996c9f3bb89dc53ba8e720 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 28 May 2025 10:08:06 -0400 Subject: [PATCH 0897/1708] ipn: set RouteAll=true by default for new accounts on iOS and Android (#16110) fixes tailscale/tailscale#16082 RouteAll should be true by default on iOS and Android. Signed-off-by: Jonathan Nobels --- ipn/prefs.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ipn/prefs.go b/ipn/prefs.go index caf9ccfc3..01275a7e2 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -721,9 +721,10 @@ func (p *Prefs) ControlURLOrDefault() string { // of the platform it's running on. func (p *Prefs) DefaultRouteAll(goos string) bool { switch goos { - case "windows": + case "windows", "android", "ios": return true case "darwin": + // Only true for macAppStore and macsys, false for darwin tailscaled. return version.IsSandboxedMacOS() default: return false From ffc8ec289b7ebd001963d45ce11efc140030deb7 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 28 May 2025 10:45:59 -0700 Subject: [PATCH 0898/1708] wgengine/magicsock: implement relayManager endpoint probing (#16029) relayManager is responsible for disco ping/pong probing of relay endpoints once a handshake is complete. Future work will enable relayManager to set a relay endpoint as the best UDP path on an endpoint if appropriate. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 10 +- wgengine/magicsock/magicsock.go | 32 ++- wgengine/magicsock/relaymanager.go | 297 +++++++++++++++--------- wgengine/magicsock/relaymanager_test.go | 2 +- 4 files changed, 215 insertions(+), 126 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 3788708a8..c2d18d707 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1562,10 +1562,18 @@ func pktLenToPingSize(mtu tstun.WireMTU, is6 bool) int { // It should be called with the Conn.mu held. // // It reports whether m.TxID corresponds to a ping that this endpoint sent. -func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip.AddrPort) (knownTxID bool) { +func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip.AddrPort, vni virtualNetworkID) (knownTxID bool) { de.mu.Lock() defer de.mu.Unlock() + if vni.isSet() { + // TODO(jwhited): check for matching [endpoint.bestAddr] once that data + // structure is VNI-aware and [relayManager] can mutate it. We do not + // need to reference any [endpointState] for Geneve-encapsulated disco, + // we store nothing about them there. + return false + } + isDerp := src.Addr() == tailcfg.DerpMagicIPAddr sp, ok := de.sentPing[m.TxID] diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 61cdf4954..5b0f28a33 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1802,6 +1802,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke return } var geneve packet.GeneveHeader + var vni virtualNetworkID if isGeneveEncap { err := geneve.Decode(msg) if err != nil { @@ -1810,6 +1811,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke c.logf("[unexpected] geneve header decoding error: %v", err) return } + vni.set(geneve.VNI) msg = msg[packet.GeneveFixedHeaderLength:] } // The control bit should only be set for relay handshake messages @@ -1923,33 +1925,30 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleBindUDPRelayEndpointChallenge(challenge, di, src, geneve.VNI) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(challenge, di, src, geneve.VNI) return } switch dm := dm.(type) { case *disco.Ping: metricRecvDiscoPing.Add(1) - if isGeneveEncap { - // TODO(jwhited): handle Geneve-encapsulated disco ping. - return - } - c.handlePingLocked(dm, src, di, derpNodeSrc) + c.handlePingLocked(dm, src, vni, di, derpNodeSrc) case *disco.Pong: metricRecvDiscoPong.Add(1) - if isGeneveEncap { - // TODO(jwhited): handle Geneve-encapsulated disco pong. - return - } // There might be multiple nodes for the sender's DiscoKey. // Ask each to handle it, stopping once one reports that // the Pong's TxID was theirs. + knownTxID := false c.peerMap.forEachEndpointWithDiscoKey(sender, func(ep *endpoint) (keepGoing bool) { - if ep.handlePongConnLocked(dm, di, src) { + if ep.handlePongConnLocked(dm, di, src, vni) { + knownTxID = true return false } return true }) + if !knownTxID && vni.isSet() { + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src, vni.get()) + } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia isVia := false @@ -2048,12 +2047,21 @@ func (c *Conn) unambiguousNodeKeyOfPingLocked(dm *disco.Ping, dk key.DiscoPublic // di is the discoInfo of the source of the ping. // derpNodeSrc is non-zero if the ping arrived via DERP. -func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, di *discoInfo, derpNodeSrc key.NodePublic) { +func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, vni virtualNetworkID, di *discoInfo, derpNodeSrc key.NodePublic) { likelyHeartBeat := src == di.lastPingFrom && time.Since(di.lastPingTime) < 5*time.Second di.lastPingFrom = src di.lastPingTime = time.Now() isDerp := src.Addr() == tailcfg.DerpMagicIPAddr + if vni.isSet() { + // TODO(jwhited): check for matching [endpoint.bestAddr] once that data + // structure is VNI-aware and [relayManager] can mutate it. We do not + // need to reference any [endpointState] for Geneve-encapsulated disco, + // we store nothing about them there. + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src, vni.get()) + return + } + // If we can figure out with certainty which node key this disco // message is for, eagerly update our IP:port<>node and disco<>node // mappings to make p2p path discovery faster in simple diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 0b19bb83f..d9fd1fa24 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -14,15 +14,16 @@ import ( "time" "tailscale.com/disco" + "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" "tailscale.com/util/httpm" "tailscale.com/util/set" ) -// relayManager manages allocation and handshaking of -// [tailscale.com/net/udprelay.Server] endpoints. The zero value is ready for -// use. +// relayManager manages allocation, handshaking, and initial probing (disco +// ping/pong) of [tailscale.com/net/udprelay.Server] endpoints. The zero value +// is ready for use. type relayManager struct { initOnce sync.Once @@ -33,16 +34,18 @@ type relayManager struct { allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork handshakeWorkByEndpointByServerDisco map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork + handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI + addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). - allocateHandshakeCh chan *endpoint - allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent - handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent - cancelWorkCh chan *endpoint - newServerEndpointCh chan newRelayServerEndpointEvent - rxChallengeCh chan relayHandshakeChallengeEvent + allocateHandshakeCh chan *endpoint + allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent + handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent + cancelWorkCh chan *endpoint + newServerEndpointCh chan newRelayServerEndpointEvent + rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -66,16 +69,16 @@ type relayHandshakeWork struct { ep *endpoint se udprelay.ServerEndpoint - // In order to not deadlock, runLoop() must select{} read doneCh when - // attempting to write into rxChallengeCh, and the handshake work goroutine - // must close(doneCh) before attempting to write to - // relayManager.handshakeWorkDoneCh. - rxChallengeCh chan relayHandshakeChallengeEvent - doneCh chan struct{} + // handshakeServerEndpoint() always writes to doneCh (len 1) when it + // returns. It may end up writing the same event afterward to + // relayManager.handshakeWorkDoneCh if runLoop() can receive it. runLoop() + // must select{} read on doneCh to prevent deadlock when attempting to write + // to rxDiscoMsgCh. + rxDiscoMsgCh chan relayHandshakeDiscoMsgEvent + doneCh chan relayEndpointHandshakeWorkDoneEvent ctx context.Context cancel context.CancelFunc - wg *sync.WaitGroup } // newRelayServerEndpointEvent indicates a new [udprelay.ServerEndpoint] has @@ -99,8 +102,9 @@ type relayEndpointAllocWorkDoneEvent struct { // work for an [*endpoint] has completed. This structure is immutable once // initialized. type relayEndpointHandshakeWorkDoneEvent struct { - work *relayHandshakeWork - answerSentTo netip.AddrPort // zero value if answer was not transmitted + work *relayHandshakeWork + pongReceivedFrom netip.AddrPort // or zero value if handshake or ping/pong did not complete + latency time.Duration // only relevant if pongReceivedFrom.IsValid() } // activeWorkRunLoop returns true if there is outstanding allocation or @@ -150,8 +154,8 @@ func (r *relayManager) runLoop() { if !r.activeWorkRunLoop() { return } - case challenge := <-r.rxChallengeCh: - r.handleRxChallengeRunLoop(challenge) + case discoMsgEvent := <-r.rxHandshakeDiscoMsgCh: + r.handleRxHandshakeDiscoMsgRunLoop(discoMsgEvent) if !r.activeWorkRunLoop() { return } @@ -159,12 +163,12 @@ func (r *relayManager) runLoop() { } } -type relayHandshakeChallengeEvent struct { - challenge [32]byte - disco key.DiscoPublic - from netip.AddrPort - vni uint32 - at time.Time +type relayHandshakeDiscoMsgEvent struct { + msg disco.Message + disco key.DiscoPublic + from netip.AddrPort + vni uint32 + at time.Time } // relayEndpointAllocWork serves to track in-progress relay endpoint allocation @@ -187,12 +191,14 @@ func (r *relayManager) init() { r.allocWorkByEndpoint = make(map[*endpoint]*relayEndpointAllocWork) r.handshakeWorkByEndpointByServerDisco = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) + r.handshakeWorkAwaitingPong = make(map[*relayHandshakeWork]addrPortVNI) + r.addrPortVNIToHandshakeWork = make(map[addrPortVNI]*relayHandshakeWork) r.allocateHandshakeCh = make(chan *endpoint) r.allocateWorkDoneCh = make(chan relayEndpointAllocWorkDoneEvent) r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) - r.rxChallengeCh = make(chan relayHandshakeChallengeEvent) + r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) r.runLoopStoppedCh = make(chan struct{}, 1) go r.runLoop() }) @@ -270,8 +276,11 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV }) } -func (r *relayManager) handleBindUDPRelayEndpointChallenge(dm *disco.BindUDPRelayEndpointChallenge, di *discoInfo, src netip.AddrPort, vni uint32) { - relayManagerInputEvent(r, nil, &r.rxChallengeCh, relayHandshakeChallengeEvent{challenge: dm.Challenge, disco: di.discoKey, from: src, vni: vni, at: time.Now()}) +// handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated +// disco messages if they are not associated with any known +// [*endpoint.bestAddr]. +func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di *discoInfo, src netip.AddrPort, vni uint32) { + relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src, vni: vni, at: time.Now()}) } // relayManagerInputEvent initializes [relayManager] if necessary, starts @@ -337,26 +346,68 @@ func (r *relayManager) stopWorkRunLoop(ep *endpoint, f stopHandshakeWorkFilter) _, knownServer := r.serversByDisco[disco] if knownServer || f == stopHandshakeWorkAllServers { handshakeWork.cancel() - handshakeWork.wg.Wait() - delete(byServerDisco, disco) - delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{handshakeWork.se.ServerDisco, handshakeWork.se.VNI}) + done := <-handshakeWork.doneCh + r.handleHandshakeWorkDoneRunLoop(done) } } - if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, ep) - } } } -func (r *relayManager) handleRxChallengeRunLoop(challenge relayHandshakeChallengeEvent) { - work, ok := r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{challenge.disco, challenge.vni}] - if !ok { +// addrPortVNI represents a combined netip.AddrPort and Geneve header virtual +// network identifier. +type addrPortVNI struct { + addrPort netip.AddrPort + vni uint32 +} + +func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDiscoMsgEvent) { + var ( + work *relayHandshakeWork + ok bool + ) + apv := addrPortVNI{event.from, event.vni} + switch event.msg.(type) { + case *disco.BindUDPRelayEndpointChallenge: + work, ok = r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{event.disco, event.vni}] + if !ok { + // No outstanding work tied to this challenge, discard. + return + } + _, ok = r.handshakeWorkAwaitingPong[work] + if ok { + // We've seen a challenge for this relay endpoint previously, + // discard. Servers only respond to the first src ip:port they see + // binds from. + return + } + _, ok = r.addrPortVNIToHandshakeWork[apv] + if ok { + // There is existing work for the same [addrPortVNI] that is not + // 'work'. If both instances happen to be on the same server we + // could attempt to resolve event order using LamportID. For now + // just leave both work instances alone and take no action other + // than to discard this challenge msg. + return + } + // Update state so that future ping/pong will route to 'work'. + r.handshakeWorkAwaitingPong[work] = apv + r.addrPortVNIToHandshakeWork[apv] = work + case *disco.Ping, *disco.Pong: + work, ok = r.addrPortVNIToHandshakeWork[apv] + if !ok { + // No outstanding work tied to this [addrPortVNI], discard. + return + } + default: + // Unexpected message type, discard. return } select { - case <-work.doneCh: + case done := <-work.doneCh: + // handshakeServerEndpoint() returned, clean up its state. + r.handleHandshakeWorkDoneRunLoop(done) return - case work.rxChallengeCh <- challenge: + case work.rxDiscoMsgCh <- event: return } } @@ -375,14 +426,17 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak delete(r.handshakeWorkByEndpointByServerDisco, done.work.ep) } delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) - if !done.answerSentTo.IsValid() { - // The handshake timed out. + apv, ok := r.handshakeWorkAwaitingPong[work] + if ok { + delete(r.handshakeWorkAwaitingPong, work) + delete(r.addrPortVNIToHandshakeWork, apv) + } + if !done.pongReceivedFrom.IsValid() { + // The handshake or ping/pong probing timed out. return } - // We received a challenge from and transmitted an answer towards the relay - // server. - // TODO(jwhited): Make the associated [*endpoint] aware of this - // [tailscale.com/net/udprelay.ServerEndpoint]. + // This relay endpoint is functional. + // TODO(jwhited): Set it on done.work.ep.bestAddr if it is a betterAddr(). } func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { @@ -398,19 +452,10 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay return } - // The existing work is no longer valid, clean it up. Be sure to lookup - // by the existing work's [*endpoint], not the incoming "new" work as - // they are not necessarily matching. + // The existing work is no longer valid, clean it up. existingWork.cancel() - existingWork.wg.Wait() - delete(r.handshakeWorkByServerDiscoVNI, sdv) - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[existingWork.ep] - if ok { - delete(byServerDisco, sdv.serverDisco) - if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, existingWork.ep) - } - } + done := <-existingWork.doneCh + r.handleHandshakeWorkDoneRunLoop(done) } // Check for duplicate work by [*endpoint] + server disco. @@ -425,12 +470,8 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } // Cancel existing handshake that has a lower lamport ID. existingWork.cancel() - existingWork.wg.Wait() - delete(r.handshakeWorkByServerDiscoVNI, sdv) - delete(byServerDisco, sdv.serverDisco) - if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, existingWork.ep) - } + done := <-existingWork.doneCh + r.handleHandshakeWorkDoneRunLoop(done) } } @@ -464,14 +505,12 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay // We're ready to start a new handshake. ctx, cancel := context.WithCancel(context.Background()) - wg := &sync.WaitGroup{} work := &relayHandshakeWork{ ep: newServerEndpoint.ep, se: newServerEndpoint.se, - doneCh: make(chan struct{}), + doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), ctx: ctx, cancel: cancel, - wg: wg, } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) @@ -480,19 +519,16 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work - wg.Add(1) go r.handshakeServerEndpoint(work) } func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { - defer work.wg.Done() - done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) defer func() { r.derefDiscoInfoFor(work) - close(work.doneCh) + work.doneCh <- done relayManagerInputEvent(r, work.ctx, &r.handshakeWorkDoneCh, done) work.cancel() }() @@ -504,7 +540,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true - go work.ep.c.sendDiscoMessage(addrPort, vni, key.NodePublic{}, work.se.ServerDisco, bind, discoLog) + go work.ep.c.sendDiscoMessage(addrPort, vni, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) } } if !sentBindAny { @@ -518,46 +554,83 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { timer := time.NewTimer(min(work.se.BindLifetime.Duration, maxHandshakeLifetime)) defer timer.Stop() - // Wait for cancellation, a challenge to be rx'd, or handshake lifetime to - // expire. Our initial implementation values simplicity over other aspects, - // e.g. it is not resilient to any packet loss. - // - // We may want to eventually consider [disc.BindUDPRelayEndpoint] - // retransmission lacking challenge rx, and - // [disco.BindUDPRelayEndpointAnswer] duplication in front of - // [disco.Ping] until [disco.Ping] or [disco.Pong] is received. - select { - case <-work.ctx.Done(): - return - case challenge := <-work.rxChallengeCh: - answer := &disco.BindUDPRelayEndpointAnswer{Answer: challenge.challenge} - done.answerSentTo = challenge.from - // Send answer back to relay server. Typically sendDiscoMessage() calls - // are invoked via a new goroutine in attempt to limit crypto+syscall - // time contributing to system backpressure, and to fire roundtrip - // latency-relevant messages as closely together as possible. We - // intentionally don't do that here, because: - // 1. The primary backpressure concern is around the work.rxChallengeCh - // writer on the [Conn] packet rx path, who is already unblocked - // since we read from the channel. Relay servers only ever tx one - // challenge per rx'd bind message for a given (the first seen) src. - // 2. runLoop() may be waiting for this 'work' to complete if - // explicitly canceled for some reason elsewhere, but this is - // typically only around [*endpoint] and/or [Conn] shutdown. - // 3. It complicates the defer()'d [*discoInfo] deref and 'work' - // completion event order. sendDiscoMessage() assumes the related - // [*discoInfo] is still available. We also don't want the - // [*endpoint] to send a [disco.Ping] before the - // [disco.BindUDPRelayEndpointAnswer] has gone out, otherwise the - // remote side will never see the ping, delaying/preventing the - // [udprelay.ServerEndpoint] from becoming fully operational. - // 4. This is a singular tx with no roundtrip latency measurements - // involved. - work.ep.c.sendDiscoMessage(challenge.from, vni, key.NodePublic{}, work.se.ServerDisco, answer, discoLog) - return - case <-timer.C: - // The handshake timed out. - return + // Limit the number of pings we will transmit. Inbound pings trigger + // outbound pings, so we want to be a little defensive. + const limitPings = 10 + + var ( + handshakeState disco.BindUDPRelayHandshakeState = disco.BindUDPRelayHandshakeStateBindSent + sentPingAt = make(map[stun.TxID]time.Time) + ) + + txPing := func(to netip.AddrPort, withAnswer *[32]byte) { + if len(sentPingAt) == limitPings { + return + } + epDisco := work.ep.disco.Load() + if epDisco == nil { + return + } + txid := stun.NewTxID() + sentPingAt[txid] = time.Now() + ping := &disco.Ping{ + TxID: txid, + NodeKey: work.ep.c.publicKeyAtomic.Load(), + } + go func() { + if withAnswer != nil { + answer := &disco.BindUDPRelayEndpointAnswer{Answer: *withAnswer} + work.ep.c.sendDiscoMessage(to, vni, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) + } + work.ep.c.sendDiscoMessage(to, vni, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) + }() + } + + // This for{select{}} is responsible for handshaking and tx'ing ping/pong + // when the handshake is complete. + for { + select { + case <-work.ctx.Done(): + return + case msgEvent := <-work.rxDiscoMsgCh: + switch msg := msgEvent.msg.(type) { + case *disco.BindUDPRelayEndpointChallenge: + if handshakeState >= disco.BindUDPRelayHandshakeStateAnswerSent { + continue + } + txPing(msgEvent.from, &msg.Challenge) + handshakeState = disco.BindUDPRelayHandshakeStateAnswerSent + case *disco.Ping: + if handshakeState < disco.BindUDPRelayHandshakeStateAnswerSent { + continue + } + // An inbound ping from the remote peer indicates we completed a + // handshake with the relay server (our answer msg was + // received). Chances are our ping was dropped before the remote + // handshake was complete. We need to rx a pong to determine + // latency, so send another ping. Since the handshake is + // complete we do not need to send an answer in front of this + // one. + txPing(msgEvent.from, nil) + case *disco.Pong: + at, ok := sentPingAt[msg.TxID] + if !ok { + continue + } + // The relay server endpoint is functional! Record the + // round-trip latency and return. + done.pongReceivedFrom = msgEvent.from + done.latency = time.Since(at) + return + default: + // unexpected message type, silently discard + continue + } + return + case <-timer.C: + // The handshake timed out. + return + } } } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 3b75db9f6..8276849aa 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -25,6 +25,6 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleBindUDPRelayEndpointChallenge(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, netip.AddrPort{}, 0) + rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, netip.AddrPort{}, 0) <-rm.runLoopStoppedCh } From 5e54819ceecd789bce87c42b09b632932931d794 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 28 May 2025 15:43:12 -0400 Subject: [PATCH 0899/1708] net/dns: cache dns.Config for reuse when compileConfig fails (#16059) fixes tailscale/corp#25612 We now keep track of any dns configurations which we could not compile. This gives RecompileDNSConfig a configuration to attempt to recompile and apply when the OS pokes us to indicate that the interface dns servers have changed/updated. The manager config will remain unset until we have the required information to compile it correctly which should eliminate the problematic SERVFAIL responses (especially on macOS 15). This also removes the missingUpstreamRecovery func in the forwarder which is no longer required now that we have proper error handling and recovery manager and the client. Signed-off-by: Jonathan Nobels --- net/dns/manager.go | 46 ++++++++++------------------ net/dns/manager_test.go | 56 +++++++++++++++++++++++++++++++++-- net/dns/resolver/forwarder.go | 26 ++++------------ net/dns/resolver/tsdns.go | 9 ------ 4 files changed, 76 insertions(+), 61 deletions(-) diff --git a/net/dns/manager.go b/net/dns/manager.go index 64bf12c6b..5d6f225ce 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -25,7 +25,6 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/syncs" - "tailscale.com/tstime/rate" "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" @@ -63,10 +62,8 @@ type Manager struct { knobs *controlknobs.Knobs // or nil goos string // if empty, gets set to runtime.GOOS - mu sync.Mutex // guards following - // config is the last configuration we successfully compiled or nil if there - // was any failure applying the last configuration. - config *Config + mu sync.Mutex // guards following + config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called. } // NewManagers created a new manager from the given config. @@ -93,22 +90,6 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, goos: goos, } - // Rate limit our attempts to correct our DNS configuration. - // This is done on incoming queries, we don't want to spam it. - limiter := rate.NewLimiter(1.0/5.0, 1) - - // This will recompile the DNS config, which in turn will requery the system - // DNS settings. The recovery func should triggered only when we are missing - // upstream nameservers and require them to forward a query. - m.resolver.SetMissingUpstreamRecovery(func() { - if limiter.Allow() { - m.logf("resolution failed due to missing upstream nameservers. Recompiling DNS configuration.") - if err := m.RecompileDNSConfig(); err != nil { - m.logf("config recompilation failed: %v", err) - } - } - }) - m.ctx, m.ctxCancel = context.WithCancel(context.Background()) m.logf("using %T", m.os) return m @@ -117,7 +98,7 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, // Resolver returns the Manager's DNS Resolver. func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } -// RecompileDNSConfig sets the DNS config to the current value, which has +// RecompileDNSConfig recompiles the last attempted DNS configuration, which has // the side effect of re-querying the OS's interface nameservers. This should be used // on platforms where the interface nameservers can change. Darwin, for example, // where the nameservers aren't always available when we process a major interface @@ -127,14 +108,14 @@ func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } // give a better or different result than when [Manager.Set] was last called. The // logic for making that determination is up to the caller. // -// It returns [ErrNoDNSConfig] if the [Manager] has no existing DNS configuration. +// It returns [ErrNoDNSConfig] if [Manager.Set] has never been called. func (m *Manager) RecompileDNSConfig() error { m.mu.Lock() defer m.mu.Unlock() - if m.config == nil { - return ErrNoDNSConfig + if m.config != nil { + return m.setLocked(*m.config) } - return m.setLocked(*m.config) + return ErrNoDNSConfig } func (m *Manager) Set(cfg Config) error { @@ -154,15 +135,15 @@ func (m *Manager) GetBaseConfig() (OSConfig, error) { func (m *Manager) setLocked(cfg Config) error { syncs.AssertLocked(&m.mu) - // On errors, the 'set' config is cleared. - m.config = nil - m.logf("Set: %v", logger.ArgWriter(func(w *bufio.Writer) { cfg.WriteToBufioWriter(w) })) rcfg, ocfg, err := m.compileConfig(cfg) if err != nil { + // On a compilation failure, set m.config set for later reuse by + // [Manager.RecompileDNSConfig] and return the error. + m.config = &cfg return err } @@ -174,9 +155,11 @@ func (m *Manager) setLocked(cfg Config) error { })) if err := m.resolver.SetConfig(rcfg); err != nil { + m.config = nil return err } if err := m.os.SetDNS(ocfg); err != nil { + m.config = nil m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) return err } @@ -355,7 +338,10 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // that as the forwarder for all DNS traffic that quad-100 doesn't handle. if isApple || !m.os.SupportsSplitDNS() { // If the OS can't do native split-dns, read out the underlying - // resolver config and blend it into our config. + // resolver config and blend it into our config. On apple platforms, [OSConfigurator.GetBaseConfig] + // has a tendency to temporarily fail if called immediately following + // an interface change. These failures should be retried if/when the OS + // indicates that the DNS configuration has changed via [RecompileDNSConfig]. cfg, err := m.os.GetBaseConfig() if err == nil { baseCfg = &cfg diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 2bdbc72e2..522f9636a 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -4,6 +4,7 @@ package dns import ( + "errors" "net/netip" "runtime" "strings" @@ -24,8 +25,9 @@ type fakeOSConfigurator struct { SplitDNS bool BaseConfig OSConfig - OSConfig OSConfig - ResolverConfig resolver.Config + OSConfig OSConfig + ResolverConfig resolver.Config + GetBaseConfigErr *error } func (c *fakeOSConfigurator) SetDNS(cfg OSConfig) error { @@ -45,6 +47,9 @@ func (c *fakeOSConfigurator) SupportsSplitDNS() bool { } func (c *fakeOSConfigurator) GetBaseConfig() (OSConfig, error) { + if c.GetBaseConfigErr != nil { + return OSConfig{}, *c.GetBaseConfigErr + } return c.BaseConfig, nil } @@ -1019,3 +1024,50 @@ func upstreams(strs ...string) (ret map[dnsname.FQDN][]*dnstype.Resolver) { } return ret } + +func TestConfigRecompilation(t *testing.T) { + fakeErr := errors.New("fake os configurator error") + f := &fakeOSConfigurator{} + f.GetBaseConfigErr = &fakeErr + f.BaseConfig = OSConfig{ + Nameservers: mustIPs("1.1.1.1"), + } + + config := Config{ + Routes: upstreams("ts.net", "69.4.2.0", "foo.ts.net", ""), + SearchDomains: fqdns("foo.ts.net"), + } + + m := NewManager(t.Logf, f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + + var managerConfig *resolver.Config + m.resolver.TestOnlySetHook(func(cfg resolver.Config) { + managerConfig = &cfg + }) + + // Initial set should error out and store the config + if err := m.Set(config); err == nil { + t.Fatalf("Want non-nil error. Got nil") + } + if m.config == nil { + t.Fatalf("Want persisted config. Got nil.") + } + if managerConfig != nil { + t.Fatalf("Want nil managerConfig. Got %v", managerConfig) + } + + // Clear the error. We should take the happy path now and + // set m.manager's Config. + f.GetBaseConfigErr = nil + + // Recompilation without an error should succeed and set m.config and m.manager's [resolver.Config] + if err := m.RecompileDNSConfig(); err != nil { + t.Fatalf("Want nil error. Got err %v", err) + } + if m.config == nil { + t.Fatalf("Want non-nil config. Got nil") + } + if managerConfig == nil { + t.Fatalf("Want non nil managerConfig. Got nil") + } +} diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 321401a84..c87fbd504 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -245,12 +245,6 @@ type forwarder struct { // /etc/resolv.conf is missing/corrupt, and the peerapi ExitDNS stub // resolver lookup. cloudHostFallback []resolverAndDelay - - // missingUpstreamRecovery, if non-nil, is set called when a SERVFAIL is - // returned due to missing upstream resolvers. - // - // This should attempt to properly (re)set the upstream resolvers. - missingUpstreamRecovery func() } func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { @@ -258,13 +252,12 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS panic("nil netMon") } f := &forwarder{ - logf: logger.WithPrefix(logf, "forward: "), - netMon: netMon, - linkSel: linkSel, - dialer: dialer, - health: health, - controlKnobs: knobs, - missingUpstreamRecovery: func() {}, + logf: logger.WithPrefix(logf, "forward: "), + netMon: netMon, + linkSel: linkSel, + dialer: dialer, + health: health, + controlKnobs: knobs, } f.ctx, f.ctxCancel = context.WithCancel(context.Background()) return f @@ -962,13 +955,6 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""}) f.logf("no upstream resolvers set, returning SERVFAIL") - // Attempt to recompile the DNS configuration - // If we are being asked to forward queries and we have no - // nameservers, the network is in a bad state. - if f.missingUpstreamRecovery != nil { - f.missingUpstreamRecovery() - } - res, err := servfailResponse(query) if err != nil { return err diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 107740b13..33fa9c3c0 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -251,15 +251,6 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, h return r } -// SetMissingUpstreamRecovery sets a callback to be called upon encountering -// a SERVFAIL due to missing upstream resolvers. -// -// This call should only happen before the resolver is used. It is not safe -// for concurrent use. -func (r *Resolver) SetMissingUpstreamRecovery(f func()) { - r.forwarder.missingUpstreamRecovery = f -} - func (r *Resolver) TestOnlySetHook(hook func(Config)) { r.saveConfigForTests = hook } func (r *Resolver) SetConfig(cfg Config) error { From 36df320e6a66546f4921d359c555b64059a0aded Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 28 May 2025 14:12:24 -0700 Subject: [PATCH 0900/1708] tsnet: remove an expired configuration-path migration step (#16120) As note in the comment, it now being more than six months since this was deprecated and there being no (further) uses of the old pattern in our internal services, let's drop the migrator. Updates #cleanup Change-Id: Ie4fb9518b2ca04a9b361e09c51cbbacf1e2633a8 Signed-off-by: M. J. Fromberger --- tsnet/tsnet.go | 48 +----------------------------------------------- 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 4664a66a7..65367f235 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -536,10 +536,7 @@ func (s *Server) start() (reterr error) { if err != nil { return err } - s.rootPath, err = getTSNetDir(s.logf, confDir, prog) - if err != nil { - return err - } + s.rootPath = filepath.Join(confDir, "tsnet-"+prog) } if err := os.MkdirAll(s.rootPath, 0700); err != nil { return err @@ -897,49 +894,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// getTSNetDir usually just returns filepath.Join(confDir, "tsnet-"+prog) -// with no error. -// -// One special case is that it renames old "tslib-" directories to -// "tsnet-", and that rename might return an error. -// -// TODO(bradfitz): remove this maybe 6 months after 2022-03-17, -// once people (notably Tailscale corp services) have updated. -func getTSNetDir(logf logger.Logf, confDir, prog string) (string, error) { - oldPath := filepath.Join(confDir, "tslib-"+prog) - newPath := filepath.Join(confDir, "tsnet-"+prog) - - fi, err := os.Lstat(oldPath) - if os.IsNotExist(err) { - // Common path. - return newPath, nil - } - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", fmt.Errorf("expected old tslib path %q to be a directory; got %v", oldPath, fi.Mode()) - } - - // At this point, oldPath exists and is a directory. But does - // the new path exist? - - fi, err = os.Lstat(newPath) - if err == nil && fi.IsDir() { - // New path already exists somehow. Ignore the old one and - // don't try to migrate it. - return newPath, nil - } - if err != nil && !os.IsNotExist(err) { - return "", err - } - if err := os.Rename(oldPath, newPath); err != nil { - return "", err - } - logf("renamed old tsnet state storage directory %q to %q", oldPath, newPath) - return newPath, nil -} - // APIClient returns a tailscale.Client that can be used to make authenticated // requests to the Tailscale control server. // It requires the user to set tailscale.I_Acknowledge_This_API_Is_Unstable. From b0d35975c0462a8499667ac7b52d685b5e90465a Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 28 May 2025 17:54:04 -0700 Subject: [PATCH 0901/1708] go.toolchain.rev: bump to 1.24.3 (#16060) Updates https://github.com/tailscale/corp/issues/28916 Signed-off-by: Andrew Lytvynov --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index e8ede337c..a5d73929c 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -982da8f24fa0504f2214f24b0d68b2febd5983f8 +98e8c99c256a5aeaa13725d2e43fdd7f465ba200 From dca4036a207b5f7edeb1d54cce30c7dfe1914499 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 27 May 2025 13:31:39 -0700 Subject: [PATCH 0902/1708] util/set: add SmallSet Updates tailscale/corp#29093 Change-Id: I0e07e83dee51b4915597a913b0583c99756d90e2 Signed-off-by: Brad Fitzpatrick --- util/set/smallset.go | 134 ++++++++++++++++++++++++++++++++++++++ util/set/smallset_test.go | 91 ++++++++++++++++++++++++++ 2 files changed, 225 insertions(+) create mode 100644 util/set/smallset.go create mode 100644 util/set/smallset_test.go diff --git a/util/set/smallset.go b/util/set/smallset.go new file mode 100644 index 000000000..51cad6a25 --- /dev/null +++ b/util/set/smallset.go @@ -0,0 +1,134 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "iter" + "maps" + + "tailscale.com/types/structs" +) + +// SmallSet is a set that is optimized for reducing memory overhead when the +// expected size of the set is 0 or 1 elements. +// +// The zero value of SmallSet is a usable empty set. +// +// When storing a SmallSet in a map as a value type, it is important to re-assign +// the map entry after calling Add or Delete, as the SmallSet's representation +// may change. +// +// Copying a SmallSet by value may alias the previous value. Use the Clone method +// to create a new SmallSet with the same contents. +type SmallSet[T comparable] struct { + _ structs.Incomparable // to prevent == mistakes + one T // if non-zero, then single item in set + m Set[T] // if non-nil, the set of items, which might be size 1 if it's the zero value of T +} + +// Values returns an iterator over the elements of the set. +// The iterator will yield the elements in no particular order. +func (s SmallSet[T]) Values() iter.Seq[T] { + if s.m != nil { + return maps.Keys(s.m) + } + var zero T + return func(yield func(T) bool) { + if s.one != zero { + yield(s.one) + } + } +} + +// Contains reports whether e is in the set. +func (s SmallSet[T]) Contains(e T) bool { + if s.m != nil { + return s.m.Contains(e) + } + var zero T + return e != zero && s.one == e +} + +// Add adds e to the set. +// +// When storing a SmallSet in a map as a value type, it is important to +// re-assign the map entry after calling Add or Delete, as the SmallSet's +// representation may change. +func (s *SmallSet[T]) Add(e T) { + var zero T + if s.m != nil { + s.m.Add(e) + return + } + // Size zero to one non-zero element. + if s.one == zero && e != zero { + s.one = e + return + } + // Need to make a multi map, either + // because we now have two items, or + // because e is the zero value. + s.m = Set[T]{} + if s.one != zero { + s.m.Add(s.one) // move single item to multi + } + s.m.Add(e) // add new item + s.one = zero +} + +// Len reports the number of elements in the set. +func (s SmallSet[T]) Len() int { + var zero T + if s.m != nil { + return s.m.Len() + } + if s.one != zero { + return 1 + } + return 0 +} + +// Delete removes e from the set. +// +// When storing a SmallSet in a map as a value type, it is important to +// re-assign the map entry after calling Add or Delete, as the SmallSet's +// representation may change. +func (s *SmallSet[T]) Delete(e T) { + var zero T + if s.m == nil { + if s.one == e { + s.one = zero + } + return + } + s.m.Delete(e) + + // If the map size drops to zero, that means + // it only contained the zero value of T. + if s.m.Len() == 0 { + s.m = nil + return + } + + // If the map size drops to one element and doesn't + // contain the zero value, we can switch back to the + // single-item representation. + if s.m.Len() == 1 { + for v := range s.m { + if v != zero { + s.one = v + s.m = nil + } + } + } + return +} + +// Clone returns a copy of s that doesn't alias the original. +func (s SmallSet[T]) Clone() SmallSet[T] { + return SmallSet[T]{ + one: s.one, + m: maps.Clone(s.m), // preserves nilness + } +} diff --git a/util/set/smallset_test.go b/util/set/smallset_test.go new file mode 100644 index 000000000..2635bc893 --- /dev/null +++ b/util/set/smallset_test.go @@ -0,0 +1,91 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "fmt" + "iter" + "maps" + "reflect" + "slices" + "testing" +) + +func TestSmallSet(t *testing.T) { + t.Parallel() + + wantSize := reflect.TypeFor[int64]().Size() + reflect.TypeFor[map[int]struct{}]().Size() + if wantSize > 16 { + t.Errorf("wantSize should be no more than 16") // it might be smaller on 32-bit systems + } + if size := reflect.TypeFor[SmallSet[int64]]().Size(); size != wantSize { + t.Errorf("SmallSet[int64] size is %d, want %v", size, wantSize) + } + + type op struct { + add bool + v int + } + ops := iter.Seq[op](func(yield func(op) bool) { + for _, add := range []bool{false, true} { + for v := range 4 { + if !yield(op{add: add, v: v}) { + return + } + } + } + }) + type setLike interface { + Add(int) + Delete(int) + } + apply := func(s setLike, o op) { + if o.add { + s.Add(o.v) + } else { + s.Delete(o.v) + } + } + + // For all combinations of 4 operations, + // apply them to both a regular map and SmallSet + // and make sure all the invariants hold. + + for op1 := range ops { + for op2 := range ops { + for op3 := range ops { + for op4 := range ops { + + normal := Set[int]{} + small := &SmallSet[int]{} + for _, op := range []op{op1, op2, op3, op4} { + apply(normal, op) + apply(small, op) + } + + name := func() string { + return fmt.Sprintf("op1=%v, op2=%v, op3=%v, op4=%v", op1, op2, op3, op4) + } + if normal.Len() != small.Len() { + t.Errorf("len mismatch after ops %s: normal=%d, small=%d", name(), normal.Len(), small.Len()) + } + if got := small.Clone().Len(); normal.Len() != got { + t.Errorf("len mismatch after ops %s: normal=%d, clone=%d", name(), normal.Len(), got) + } + + normalEle := slices.Sorted(maps.Keys(normal)) + smallEle := slices.Sorted(small.Values()) + if !slices.Equal(normalEle, smallEle) { + t.Errorf("elements mismatch after ops %s: normal=%v, small=%v", name(), normalEle, smallEle) + } + for e := range 5 { + if normal.Contains(e) != small.Contains(e) { + t.Errorf("contains(%v) mismatch after ops %s: normal=%v, small=%v", e, name(), normal.Contains(e), small.Contains(e)) + } + } + } + } + } + } +} From 4cccd15eeb13d5d7f8f831e5406a567b8f18378b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 29 May 2025 13:51:46 -0500 Subject: [PATCH 0903/1708] ipn/ipnlocal: fix data race when accessing b.appConnector The field must only be accessed while holding LocalBackend's mutex, but there are two places where it's accessed without the mutex: - (LocalBackend).MaybeClearAppConnector() - handleC2NAppConnectorDomainRoutesGet() Fixes #16123 Signed-off-by: Nick Khyl --- ipn/ipnlocal/c2n.go | 5 +++-- ipn/ipnlocal/local.go | 17 ++++++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index b33794751..876c13064 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -240,13 +240,14 @@ func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter b.logf("c2n: GET /appconnector/routes received") var res tailcfg.C2NAppConnectorDomainRoutesResponse - if b.appConnector == nil { + appConnector := b.AppConnector() + if appConnector == nil { w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) return } - res.Domains = b.appConnector.DomainRoutes() + res.Domains = appConnector.DomainRoutes() w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d2f6c86f7..d69c07a9f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4150,8 +4150,8 @@ func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { // AdvertiseRoutes has been set in the MaskedPrefs. func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { var err error - if b.appConnector != nil && mp.AdvertiseRoutesSet { - err = b.appConnector.ClearRoutes() + if ac := b.AppConnector(); ac != nil && mp.AdvertiseRoutesSet { + err = ac.ClearRoutes() if err != nil { b.logf("appc: clear routes error: %v", err) } @@ -4755,9 +4755,7 @@ func (b *LocalBackend) readvertiseAppConnectorRoutes() { // // Grab a copy of the field, since b.mu only guards access to the // b.appConnector field itself. - b.mu.Lock() - appConnector := b.appConnector - b.mu.Unlock() + appConnector := b.AppConnector() if appConnector == nil { return @@ -6432,6 +6430,15 @@ func (b *LocalBackend) OfferingAppConnector() bool { return b.appConnector != nil } +// AppConnector returns the current AppConnector, or nil if not configured. +// +// TODO(nickkhyl): move app connectors to [nodeBackend], or perhaps a feature package? +func (b *LocalBackend) AppConnector() *appc.AppConnector { + b.mu.Lock() + defer b.mu.Unlock() + return b.appConnector +} + // allowExitNodeDNSProxyToServeName reports whether the Exit Node DNS // proxy is allowed to serve responses for the provided DNS name. func (b *LocalBackend) allowExitNodeDNSProxyToServeName(name string) bool { From 191afd3390f08354515af9d6b0c3f6b919b5a0fb Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 29 May 2025 10:41:23 -0500 Subject: [PATCH 0904/1708] net/tshttpproxy: fix WDAP/PAC proxy detection on Win10 1607 and earlier Using WINHTTP_AUTOPROXY_ALLOW_AUTOCONFIG on Windows versions older than Windows 10 1703 (build 15063) is not supported and causes WinHttpGetProxyForUrl to fail with ERROR_INVALID_PARAMETER. This results in failures reaching the control on environments where a proxy is required. We use wingoes version detection to conditionally set the WINHTTP_AUTOPROXY_ALLOW_AUTOCONFIG flag on Windows builds greater than 15063. While there, we also update proxy detection to use WINHTTP_AUTO_DETECT_TYPE_DNS_A, as DNS-based proxy discovery might be required with Active Directory and in certain other environments. Updates tailscale/corp#29168 Fixes #879 Signed-off-by: Nick Khyl --- cmd/derper/depaware.txt | 2 +- net/tshttpproxy/tshttpproxy_windows.go | 28 ++++++++++++++++++-------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index ca7723530..640e64d6c 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -12,7 +12,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil + W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil+ github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ diff --git a/net/tshttpproxy/tshttpproxy_windows.go b/net/tshttpproxy/tshttpproxy_windows.go index 06a1f5ae4..7163c7863 100644 --- a/net/tshttpproxy/tshttpproxy_windows.go +++ b/net/tshttpproxy/tshttpproxy_windows.go @@ -18,6 +18,7 @@ import ( "unsafe" "github.com/alexbrainman/sspi/negotiate" + "github.com/dblohm7/wingoes" "golang.org/x/sys/windows" "tailscale.com/hostinfo" "tailscale.com/syncs" @@ -97,9 +98,7 @@ func proxyFromWinHTTPOrCache(req *http.Request) (*url.URL, error) { } if err == windows.ERROR_INVALID_PARAMETER { metricErrInvalidParameters.Add(1) - // Seen on Windows 8.1. (https://github.com/tailscale/tailscale/issues/879) - // TODO(bradfitz): figure this out. - setNoProxyUntil(time.Hour) + setNoProxyUntil(10 * time.Second) proxyErrorf("tshttpproxy: winhttp: GetProxyForURL(%q): ERROR_INVALID_PARAMETER [unexpected]", urlStr) return nil, nil } @@ -238,17 +237,30 @@ func (pi *winHTTPProxyInfo) free() { } } -var proxyForURLOpts = &winHTTPAutoProxyOptions{ - DwFlags: winHTTP_AUTOPROXY_ALLOW_AUTOCONFIG | winHTTP_AUTOPROXY_AUTO_DETECT, - DwAutoDetectFlags: winHTTP_AUTO_DETECT_TYPE_DHCP, // | winHTTP_AUTO_DETECT_TYPE_DNS_A, -} +var getProxyForURLOpts = sync.OnceValue(func() *winHTTPAutoProxyOptions { + opts := &winHTTPAutoProxyOptions{ + DwFlags: winHTTP_AUTOPROXY_AUTO_DETECT, + DwAutoDetectFlags: winHTTP_AUTO_DETECT_TYPE_DHCP | winHTTP_AUTO_DETECT_TYPE_DNS_A, + } + // Support for the WINHTTP_AUTOPROXY_ALLOW_AUTOCONFIG flag was added in Windows 10, version 1703. + // + // Using it on earlier versions causes GetProxyForURL to fail with ERROR_INVALID_PARAMETER, + // which prevents proxy detection and can lead to failures reaching the control server + // on environments where a proxy is required. + // + // https://web.archive.org/web/20250529044903/https://learn.microsoft.com/en-us/windows/win32/api/winhttp/ns-winhttp-winhttp_autoproxy_options + if wingoes.IsWin10BuildOrGreater(wingoes.Win10Build1703) { + opts.DwFlags |= winHTTP_AUTOPROXY_ALLOW_AUTOCONFIG + } + return opts +}) func (hi winHTTPInternet) GetProxyForURL(urlStr string) (string, error) { var out winHTTPProxyInfo err := winHTTPGetProxyForURL( hi, windows.StringToUTF16Ptr(urlStr), - proxyForURLOpts, + getProxyForURLOpts(), &out, ) if err != nil { From 401d6c0cfaae0a2caf50640b79287505838704a9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 29 May 2025 12:05:41 -0700 Subject: [PATCH 0905/1708] go.mod: bump golang.org/x deps Updates #8043 Change-Id: I8702a17130559353ccdecbe8b64eeee461ff09c3 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 ++- cmd/tailscaled/depaware.txt | 3 ++- go.mod | 22 +++++++++--------- go.sum | 44 +++++++++++++++++------------------ tsnet/depaware.txt | 3 ++- 5 files changed, 39 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 782603df0..2e467843a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1072,7 +1072,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/tls+ crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -1092,6 +1092,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/randutil from crypto/dsa+ crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ + LD crypto/mlkem from golang.org/x/crypto/ssh crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d9a9cac65..c6011a12c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -551,7 +551,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/tls+ crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -571,6 +571,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/randutil from crypto/dsa+ crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ + LD crypto/mlkem from golang.org/x/crypto/ssh crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ diff --git a/go.mod b/go.mod index f346b1e40..d44a14aef 100644 --- a/go.mod +++ b/go.mod @@ -99,16 +99,16 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.37.0 + golang.org/x/crypto v0.38.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac - golang.org/x/mod v0.23.0 - golang.org/x/net v0.36.0 - golang.org/x/oauth2 v0.26.0 - golang.org/x/sync v0.13.0 - golang.org/x/sys v0.32.0 - golang.org/x/term v0.31.0 - golang.org/x/time v0.10.0 - golang.org/x/tools v0.30.0 + golang.org/x/mod v0.24.0 + golang.org/x/net v0.40.0 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.14.0 + golang.org/x/sys v0.33.0 + golang.org/x/term v0.32.0 + golang.org/x/time v0.11.0 + golang.org/x/tools v0.33.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 @@ -392,8 +392,8 @@ require ( gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/image v0.24.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/image v0.27.0 // indirect + golang.org/x/text v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index bdbae11bb..73d87fd66 100644 --- a/go.sum +++ b/go.sum @@ -1091,8 +1091,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1111,8 +1111,8 @@ golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9 golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ= -golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8= +golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w= +golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1140,8 +1140,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1181,16 +1181,16 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= -golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1204,8 +1204,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1268,16 +1268,16 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1288,13 +1288,13 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= -golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1359,8 +1359,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 3b705f680..242cd8f1b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -495,7 +495,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/tls+ crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -515,6 +515,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/randutil from crypto/dsa+ crypto/internal/sysrand from crypto/internal/entropy+ crypto/md5 from crypto/tls+ + LD crypto/mlkem from golang.org/x/crypto/ssh crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ From ef49e75b10a30b32c0c4e79c7e78392b95435eed Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 29 May 2025 12:40:29 -0700 Subject: [PATCH 0906/1708] util/set: add SmallSet.SoleElement, fix bug, add more tests This adds SmallSet.SoleElement, which I need in another repo for efficiency. I added tests, but those tests failed because Add(1) + Add(1) was promoting the first Add's sole element to a map of one item. So fix that, and add more tests. Updates tailscale/corp#29093 Change-Id: Iadd5ad08afe39721ee5449343095e389214d8389 Signed-off-by: Brad Fitzpatrick --- util/set/smallset.go | 24 +++++++++++++++++++----- util/set/smallset_test.go | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 5 deletions(-) diff --git a/util/set/smallset.go b/util/set/smallset.go index 51cad6a25..1b77419d2 100644 --- a/util/set/smallset.go +++ b/util/set/smallset.go @@ -50,6 +50,15 @@ func (s SmallSet[T]) Contains(e T) bool { return e != zero && s.one == e } +// SoleElement returns the single value in the set, if the set has exactly one +// element. +// +// If the set is empty or has more than one element, ok will be false and e will +// be the zero value of T. +func (s SmallSet[T]) SoleElement() (e T, ok bool) { + return s.one, s.Len() == 1 +} + // Add adds e to the set. // // When storing a SmallSet in a map as a value type, it is important to @@ -61,10 +70,15 @@ func (s *SmallSet[T]) Add(e T) { s.m.Add(e) return } - // Size zero to one non-zero element. - if s.one == zero && e != zero { - s.one = e - return + // Non-zero elements can go into s.one. + if e != zero { + if s.one == zero { + s.one = e // Len 0 to Len 1 + return + } + if s.one == e { + return // dup + } } // Need to make a multi map, either // because we now have two items, or @@ -73,7 +87,7 @@ func (s *SmallSet[T]) Add(e T) { if s.one != zero { s.m.Add(s.one) // move single item to multi } - s.m.Add(e) // add new item + s.m.Add(e) // add new item, possibly zero s.one = zero } diff --git a/util/set/smallset_test.go b/util/set/smallset_test.go index 2635bc893..d6f446df0 100644 --- a/util/set/smallset_test.go +++ b/util/set/smallset_test.go @@ -84,8 +84,43 @@ func TestSmallSet(t *testing.T) { t.Errorf("contains(%v) mismatch after ops %s: normal=%v, small=%v", e, name(), normal.Contains(e), small.Contains(e)) } } + + if err := small.checkInvariants(); err != nil { + t.Errorf("checkInvariants failed after ops %s: %v", name(), err) + } + + if !t.Failed() { + sole, ok := small.SoleElement() + if ok != (small.Len() == 1) { + t.Errorf("SoleElement ok mismatch after ops %s: SoleElement ok=%v, want=%v", name(), ok, !ok) + } + if ok && sole != smallEle[0] { + t.Errorf("SoleElement value mismatch after ops %s: SoleElement=%v, want=%v", name(), sole, smallEle[0]) + t.Errorf("Internals: %+v", small) + } + } + } + } + } + } +} + +func (s *SmallSet[T]) checkInvariants() error { + var zero T + if s.m != nil && s.one != zero { + return fmt.Errorf("both m and one are non-zero") + } + if s.m != nil { + switch len(s.m) { + case 0: + return fmt.Errorf("m is non-nil but empty") + case 1: + for k := range s.m { + if k != zero { + return fmt.Errorf("m contains exactly 1 non-zero element, %v", k) } } } } + return nil } From 5b670eb3a5f1749a655692d97a2e7086c78d1580 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 30 May 2025 11:30:03 +0100 Subject: [PATCH 0907/1708] cmd/containerboot: allow setting --accept-dns via TS_EXTRA_ARGS again (#16129) In 1.84 we made 'tailscale set'/'tailscale up' error out if duplicate command line flags are passed. This broke some container configurations as we have two env vars that can be used to set --accept-dns flag: - TS_ACCEPT_DNS- specifically for --accept-dns - TS_EXTRA_ARGS- accepts any arbitrary 'tailscale up'/'tailscale set' flag. We default TS_ACCEPT_DNS to false (to make the container behaviour more declarative), which with the new restrictive CLI behaviour resulted in failure for users who had set --accept-dns via TS_EXTRA_ARGS as the flag would be provided twice. This PR re-instates the previous behaviour by checking if TS_EXTRA_ARGS contains --accept-dns flag and if so using its value to override TS_ACCEPT_DNS. Updates tailscale/tailscale#16108 Signed-off-by: Irbe Krumina --- cmd/containerboot/main_test.go | 248 ++++++++++++++++++----------- cmd/containerboot/settings.go | 57 +++++++ cmd/containerboot/settings_test.go | 108 +++++++++++++ 3 files changed, 322 insertions(+), 91 deletions(-) create mode 100644 cmd/containerboot/settings_test.go diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index a0ccce3dd..c7293c77a 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -41,97 +41,6 @@ import ( "tailscale.com/types/ptr" ) -// testEnv represents the environment needed for a single sub-test so that tests -// can run in parallel. -type testEnv struct { - kube *kubeServer // Fake kube server. - lapi *localAPI // Local TS API server. - d string // Temp dir for the specific test. - argFile string // File with commands test_tailscale{,d}.sh were invoked with. - runningSockPath string // Path to the running tailscaled socket. - localAddrPort int // Port for the containerboot HTTP server. - healthAddrPort int // Port for the (deprecated) containerboot health server. -} - -func newTestEnv(t *testing.T) testEnv { - d := t.TempDir() - - lapi := localAPI{FSRoot: d} - if err := lapi.Start(); err != nil { - t.Fatal(err) - } - t.Cleanup(lapi.Close) - - kube := kubeServer{FSRoot: d} - kube.Start(t) - t.Cleanup(kube.Close) - - tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} - serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} - egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net") - - dirs := []string{ - "var/lib", - "usr/bin", - "tmp", - "dev/net", - "proc/sys/net/ipv4", - "proc/sys/net/ipv6/conf/all", - "etc/tailscaled", - } - for _, path := range dirs { - if err := os.MkdirAll(filepath.Join(d, path), 0700); err != nil { - t.Fatal(err) - } - } - files := map[string][]byte{ - "usr/bin/tailscaled": fakeTailscaled, - "usr/bin/tailscale": fakeTailscale, - "usr/bin/iptables": fakeTailscale, - "usr/bin/ip6tables": fakeTailscale, - "dev/net/tun": []byte(""), - "proc/sys/net/ipv4/ip_forward": []byte("0"), - "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), - "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), - "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), - filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg), - filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"), - } - for path, content := range files { - // Making everything executable is a little weird, but the - // stuff that doesn't need to be executable doesn't care if we - // do make it executable. - if err := os.WriteFile(filepath.Join(d, path), content, 0700); err != nil { - t.Fatal(err) - } - } - - argFile := filepath.Join(d, "args") - runningSockPath := filepath.Join(d, "tmp/tailscaled.sock") - var localAddrPort, healthAddrPort int - for _, p := range []*int{&localAddrPort, &healthAddrPort} { - ln, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("Failed to open listener: %v", err) - } - if err := ln.Close(); err != nil { - t.Fatalf("Failed to close listener: %v", err) - } - port := ln.Addr().(*net.TCPAddr).Port - *p = port - } - - return testEnv{ - kube: &kube, - lapi: &lapi, - d: d, - argFile: argFile, - runningSockPath: runningSockPath, - localAddrPort: localAddrPort, - healthAddrPort: healthAddrPort, - } -} - func TestContainerBoot(t *testing.T) { boot := filepath.Join(t.TempDir(), "containerboot") if err := exec.Command("go", "build", "-ldflags", "-X main.testSleepDuration=1ms", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { @@ -515,6 +424,37 @@ func TestContainerBoot(t *testing.T) { }, } }, + "auth_key_once_extra_args_override_dns": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_AUTHKEY": "tskey-key", + "TS_AUTH_ONCE": "true", + "TS_ACCEPT_DNS": "false", + "TS_EXTRA_ARGS": "--accept-dns", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + }, + }, + { + Notify: &ipn.Notify{ + State: ptr.To(ipn.NeedsLogin), + }, + WantCmds: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, + WantCmds: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=true", + }, + }, + }, + } + }, "kube_storage": func(env *testEnv) testCase { return testCase{ Env: map[string]string{ @@ -766,6 +706,41 @@ func TestContainerBoot(t *testing.T) { }, } }, + "extra_args_accept_dns": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EXTRA_ARGS": "--accept-dns", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true", + }, + }, { + Notify: runningNotify, + }, + }, + } + }, + "extra_args_accept_dns_overrides_env_var": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_ACCEPT_DNS": "true", // Overridden by TS_EXTRA_ARGS. + "TS_EXTRA_ARGS": "--accept-dns=false", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + }, { + Notify: runningNotify, + }, + }, + } + }, "hostname": func(env *testEnv) testCase { return testCase{ Env: map[string]string{ @@ -1604,3 +1579,94 @@ func egressSvcConfig(name, fqdn string) egressservices.Configs { }, } } + +// testEnv represents the environment needed for a single sub-test so that tests +// can run in parallel. +type testEnv struct { + kube *kubeServer // Fake kube server. + lapi *localAPI // Local TS API server. + d string // Temp dir for the specific test. + argFile string // File with commands test_tailscale{,d}.sh were invoked with. + runningSockPath string // Path to the running tailscaled socket. + localAddrPort int // Port for the containerboot HTTP server. + healthAddrPort int // Port for the (deprecated) containerboot health server. +} + +func newTestEnv(t *testing.T) testEnv { + d := t.TempDir() + + lapi := localAPI{FSRoot: d} + if err := lapi.Start(); err != nil { + t.Fatal(err) + } + t.Cleanup(lapi.Close) + + kube := kubeServer{FSRoot: d} + kube.Start(t) + t.Cleanup(kube.Close) + + tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} + serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} + egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net") + + dirs := []string{ + "var/lib", + "usr/bin", + "tmp", + "dev/net", + "proc/sys/net/ipv4", + "proc/sys/net/ipv6/conf/all", + "etc/tailscaled", + } + for _, path := range dirs { + if err := os.MkdirAll(filepath.Join(d, path), 0700); err != nil { + t.Fatal(err) + } + } + files := map[string][]byte{ + "usr/bin/tailscaled": fakeTailscaled, + "usr/bin/tailscale": fakeTailscale, + "usr/bin/iptables": fakeTailscale, + "usr/bin/ip6tables": fakeTailscale, + "dev/net/tun": []byte(""), + "proc/sys/net/ipv4/ip_forward": []byte("0"), + "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), + "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), + "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), + filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg), + filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"), + } + for path, content := range files { + // Making everything executable is a little weird, but the + // stuff that doesn't need to be executable doesn't care if we + // do make it executable. + if err := os.WriteFile(filepath.Join(d, path), content, 0700); err != nil { + t.Fatal(err) + } + } + + argFile := filepath.Join(d, "args") + runningSockPath := filepath.Join(d, "tmp/tailscaled.sock") + var localAddrPort, healthAddrPort int + for _, p := range []*int{&localAddrPort, &healthAddrPort} { + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Failed to open listener: %v", err) + } + if err := ln.Close(); err != nil { + t.Fatalf("Failed to close listener: %v", err) + } + port := ln.Addr().(*net.TCPAddr).Port + *p = port + } + + return testEnv{ + kube: &kube, + lapi: &lapi, + d: d, + argFile: argFile, + runningSockPath: runningSockPath, + localAddrPort: localAddrPort, + healthAddrPort: healthAddrPort, + } +} diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 0ac9c828e..5a8be9036 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -147,12 +147,69 @@ func configFromEnv() (*settings, error) { } } + // See https://github.com/tailscale/tailscale/issues/16108 for context- we + // do this to preserve the previous behaviour where --accept-dns could be + // set either via TS_ACCEPT_DNS or TS_EXTRA_ARGS. + acceptDNS := cfg.AcceptDNS != nil && *cfg.AcceptDNS + tsExtraArgs, acceptDNSNew := parseAcceptDNS(cfg.ExtraArgs, acceptDNS) + cfg.ExtraArgs = tsExtraArgs + if acceptDNS != acceptDNSNew { + cfg.AcceptDNS = &acceptDNSNew + } + if err := cfg.validate(); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } return cfg, nil } +// parseAcceptDNS parses any values for Tailscale --accept-dns flag set via +// TS_ACCEPT_DNS and TS_EXTRA_ARGS env vars. If TS_EXTRA_ARGS contains +// --accept-dns flag, override the acceptDNS value with the one from +// TS_EXTRA_ARGS. +// The value of extraArgs can be empty string or one or more whitespace-separate +// key value pairs for 'tailscale up' command. The value for boolean flags can +// be omitted (default to true). +func parseAcceptDNS(extraArgs string, acceptDNS bool) (string, bool) { + if !strings.Contains(extraArgs, "--accept-dns") { + return extraArgs, acceptDNS + } + // TODO(irbekrm): we should validate that TS_EXTRA_ARGS contains legit + // 'tailscale up' flag values separated by whitespace. + argsArr := strings.Fields(extraArgs) + i := -1 + for key, val := range argsArr { + if strings.HasPrefix(val, "--accept-dns") { + i = key + break + } + } + if i == -1 { + return extraArgs, acceptDNS + } + a := strings.TrimSpace(argsArr[i]) + var acceptDNSFromExtraArgsS string + keyval := strings.Split(a, "=") + if len(keyval) == 2 { + acceptDNSFromExtraArgsS = keyval[1] + } else if len(keyval) == 1 && keyval[0] == "--accept-dns" { + // If the arg is just --accept-dns, we assume it means true. + acceptDNSFromExtraArgsS = "true" + } else { + log.Printf("TS_EXTRA_ARGS contains --accept-dns, but it is not in the expected format --accept-dns=, ignoring it") + return extraArgs, acceptDNS + } + acceptDNSFromExtraArgs, err := strconv.ParseBool(acceptDNSFromExtraArgsS) + if err != nil { + log.Printf("TS_EXTRA_ARGS contains --accept-dns=%q, which is not a valid boolean value, ignoring it", acceptDNSFromExtraArgsS) + return extraArgs, acceptDNS + } + if acceptDNSFromExtraArgs != acceptDNS { + log.Printf("TS_EXTRA_ARGS contains --accept-dns=%v, which overrides TS_ACCEPT_DNS=%v", acceptDNSFromExtraArgs, acceptDNS) + } + return strings.Join(append(argsArr[:i], argsArr[i+1:]...), " "), acceptDNSFromExtraArgs +} + func (s *settings) validate() error { if s.TailscaledConfigFilePath != "" { dir, file := path.Split(s.TailscaledConfigFilePath) diff --git a/cmd/containerboot/settings_test.go b/cmd/containerboot/settings_test.go new file mode 100644 index 000000000..dbec066c9 --- /dev/null +++ b/cmd/containerboot/settings_test.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import "testing" + +func Test_parseAcceptDNS(t *testing.T) { + tests := []struct { + name string + extraArgs string + acceptDNS bool + wantExtraArgs string + wantAcceptDNS bool + }{ + { + name: "false_extra_args_unset", + extraArgs: "", + wantExtraArgs: "", + wantAcceptDNS: false, + }, + { + name: "false_unrelated_args_set", + extraArgs: "--accept-routes=true --advertise-routes=10.0.0.1/32", + wantExtraArgs: "--accept-routes=true --advertise-routes=10.0.0.1/32", + wantAcceptDNS: false, + }, + { + name: "true_extra_args_unset", + extraArgs: "", + acceptDNS: true, + wantExtraArgs: "", + wantAcceptDNS: true, + }, + { + name: "true_unrelated_args_set", + acceptDNS: true, + extraArgs: "--accept-routes=true --advertise-routes=10.0.0.1/32", + wantExtraArgs: "--accept-routes=true --advertise-routes=10.0.0.1/32", + wantAcceptDNS: true, + }, + { + name: "false_extra_args_set_to_false", + extraArgs: "--accept-dns=false", + wantExtraArgs: "", + wantAcceptDNS: false, + }, + { + name: "false_extra_args_set_to_true", + extraArgs: "--accept-dns=true", + wantExtraArgs: "", + wantAcceptDNS: true, + }, + { + name: "true_extra_args_set_to_false", + extraArgs: "--accept-dns=false", + acceptDNS: true, + wantExtraArgs: "", + wantAcceptDNS: false, + }, + { + name: "true_extra_args_set_to_true", + extraArgs: "--accept-dns=true", + acceptDNS: true, + wantExtraArgs: "", + wantAcceptDNS: true, + }, + { + name: "false_extra_args_set_to_true_implicitly", + extraArgs: "--accept-dns", + wantExtraArgs: "", + wantAcceptDNS: true, + }, + { + name: "false_extra_args_set_to_true_implicitly_with_unrelated_args", + extraArgs: "--accept-dns --accept-routes --advertise-routes=10.0.0.1/32", + wantExtraArgs: "--accept-routes --advertise-routes=10.0.0.1/32", + wantAcceptDNS: true, + }, + { + name: "false_extra_args_set_to_true_implicitly_surrounded_with_unrelated_args", + extraArgs: "--accept-routes --accept-dns --advertise-routes=10.0.0.1/32", + wantExtraArgs: "--accept-routes --advertise-routes=10.0.0.1/32", + wantAcceptDNS: true, + }, + { + name: "true_extra_args_set_to_false_with_unrelated_args", + extraArgs: "--accept-routes --accept-dns=false", + acceptDNS: true, + wantExtraArgs: "--accept-routes", + wantAcceptDNS: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotExtraArgs, gotAcceptDNS := parseAcceptDNS(tt.extraArgs, tt.acceptDNS) + if gotExtraArgs != tt.wantExtraArgs { + t.Errorf("parseAcceptDNS() gotExtraArgs = %v, want %v", gotExtraArgs, tt.wantExtraArgs) + } + if gotAcceptDNS != tt.wantAcceptDNS { + t.Errorf("parseAcceptDNS() gotAcceptDNS = %v, want %v", gotAcceptDNS, tt.wantAcceptDNS) + } + }) + } +} From 11e83f9da5eb4e11d50464ac6ab01bb663218b22 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 7 May 2025 17:01:40 +0100 Subject: [PATCH 0908/1708] controlclient,health,ipnlocal,tailcfg: add DisplayMessage support Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- control/controlclient/map.go | 35 ++++- control/controlclient/map_test.go | 238 +++++++++++++++++++++++++++++- health/state.go | 31 +++- ipn/ipnlocal/local.go | 9 +- ipn/ipnlocal/local_test.go | 65 ++++++++ tailcfg/tailcfg.go | 60 +++++++- types/netmap/nodemut.go | 1 + 7 files changed, 417 insertions(+), 22 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index abfc5eb17..f346e19d4 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -90,6 +90,7 @@ type mapSession struct { lastDomain string lastDomainAuditLogID string lastHealth []string + lastDisplayMessages map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage lastPopBrowserURL string lastTKAInfo *tailcfg.TKAInfo lastNetmapSummary string // from NetworkMap.VeryConcise @@ -412,6 +413,21 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { if resp.Health != nil { ms.lastHealth = resp.Health } + if resp.DisplayMessages != nil { + if v, ok := resp.DisplayMessages["*"]; ok && v == nil { + ms.lastDisplayMessages = nil + } + for k, v := range resp.DisplayMessages { + if k == "*" { + continue + } + if v != nil { + mak.Set(&ms.lastDisplayMessages, k, *v) + } else { + delete(ms.lastDisplayMessages, k) + } + } + } if resp.TKAInfo != nil { ms.lastTKAInfo = resp.TKAInfo } @@ -831,14 +847,19 @@ func (ms *mapSession) sortedPeers() []tailcfg.NodeView { func (ms *mapSession) netmap() *netmap.NetworkMap { peerViews := ms.sortedPeers() - // Convert all ms.lastHealth to the new [netmap.NetworkMap.DisplayMessages]. var msgs map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage - for _, h := range ms.lastHealth { - mak.Set(&msgs, tailcfg.DisplayMessageID("control-health-"+strhash(h)), tailcfg.DisplayMessage{ - Title: "Coordination server reports an issue", - Severity: tailcfg.SeverityMedium, - Text: "The coordination server is reporting a health issue: " + h, - }) + if len(ms.lastDisplayMessages) != 0 { + msgs = ms.lastDisplayMessages + } else if len(ms.lastHealth) > 0 { + // Convert all ms.lastHealth to the new [netmap.NetworkMap.DisplayMessages] + for _, h := range ms.lastHealth { + id := "control-health-" + strhash(h) // Unique ID in case there is more than one health message + mak.Set(&msgs, tailcfg.DisplayMessageID(id), tailcfg.DisplayMessage{ + Title: "Coordination server reports an issue", + Severity: tailcfg.SeverityMedium, + Text: "The coordination server is reporting a health issue: " + h, + }) + } } nm := &netmap.NetworkMap{ diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 9abaae923..013640f47 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/health" @@ -1139,8 +1140,190 @@ func BenchmarkMapSessionDelta(b *testing.B) { } } +// TestNetmapDisplayMessage checks that the various diff operations +// (add/update/delete/clear) for [tailcfg.DisplayMessage] in a +// [tailcfg.MapResponse] work as expected. +func TestNetmapDisplayMessage(t *testing.T) { + type test struct { + name string + initialState *tailcfg.MapResponse + mapResponse tailcfg.MapResponse + wantMessages map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + } + + tests := []test{ + { + name: "basic-set", + mapResponse: tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "test-message": { + Title: "Testing", + Text: "This is a test message", + Severity: tailcfg.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://www.example.com", + Label: "Learn more", + }, + }, + }, + }, + wantMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": { + Title: "Testing", + Text: "This is a test message", + Severity: tailcfg.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://www.example.com", + Label: "Learn more", + }, + }, + }, + }, + { + name: "delete-one", + initialState: &tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A", + }, + "message-b": { + Title: "Message B", + }, + }, + }, + mapResponse: tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": nil, + }, + }, + wantMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "message-b": { + Title: "Message B", + }, + }, + }, + { + name: "update-one", + initialState: &tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A", + }, + "message-b": { + Title: "Message B", + }, + }, + }, + mapResponse: tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A updated", + }, + }, + }, + wantMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A updated", + }, + "message-b": { + Title: "Message B", + }, + }, + }, + { + name: "add-one", + initialState: &tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A", + }, + }, + }, + mapResponse: tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-b": { + Title: "Message B", + }, + }, + }, + wantMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A", + }, + "message-b": { + Title: "Message B", + }, + }, + }, + { + name: "delete-all", + initialState: &tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A", + }, + "message-b": { + Title: "Message B", + }, + }, + }, + mapResponse: tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "*": nil, + }, + }, + wantMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{}, + }, + { + name: "delete-all-and-add", + initialState: &tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "message-a": { + Title: "Message A", + }, + "message-b": { + Title: "Message B", + }, + }, + }, + mapResponse: tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "*": nil, + "message-c": { + Title: "Message C", + }, + }, + }, + wantMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "message-c": { + Title: "Message C", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ms := newTestMapSession(t, nil) + + if test.initialState != nil { + ms.netmapForResponse(test.initialState) + } + + nm := ms.netmapForResponse(&test.mapResponse) + + if diff := cmp.Diff(test.wantMessages, nm.DisplayMessages, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("unexpected warnings (-want +got):\n%s", diff) + } + }) + } +} + // TestNetmapHealthIntegration checks that we get the expected health warnings -// from processing a map response and passing the NetworkMap to a health tracker +// from processing a [tailcfg.MapResponse] containing health messages and passing the +// [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapHealthIntegration(t *testing.T) { ms := newTestMapSession(t, nil) ht := health.Tracker{} @@ -1182,3 +1365,56 @@ func TestNetmapHealthIntegration(t *testing.T) { t.Fatalf("CurrentStatus().Warnings[\"control-health*\"] different than expected (-want +got)\n%s", d) } } + +// TestNetmapDisplayMessageIntegration checks that we get the expected health +// warnings from processing a [tailcfg.MapResponse] that contains DisplayMessages and +// passing the [netmap.NetworkMap] to a [health.Tracker]. +func TestNetmapDisplayMessageIntegration(t *testing.T) { + ms := newTestMapSession(t, nil) + ht := health.Tracker{} + + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + baseWarnings := ht.CurrentState().Warnings + + nm := ms.netmapForResponse(&tailcfg.MapResponse{ + DisplayMessages: map[tailcfg.DisplayMessageID]*tailcfg.DisplayMessage{ + "test-message": { + Title: "Testing", + Text: "This is a test message", + Severity: tailcfg.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://www.example.com", + Label: "Learn more", + }, + }, + }, + }) + ht.SetControlHealth(nm.DisplayMessages) + + state := ht.CurrentState() + + // Ignore warnings that aren't from the netmap + for k := range baseWarnings { + delete(state.Warnings, k) + } + + want := map[health.WarnableCode]health.UnhealthyState{ + "test-message": { + WarnableCode: "test-message", + Title: "Testing", + Text: "This is a test message", + Severity: health.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &health.UnhealthyStateAction{ + URL: "https://www.example.com", + Label: "Learn more", + }, + }, + } + + if diff := cmp.Diff(want, state.Warnings); diff != "" { + t.Errorf("unexpected message contents (-want +got):\n%s", diff) + } +} diff --git a/health/state.go b/health/state.go index cf4f922d7..cec967931 100644 --- a/health/state.go +++ b/health/state.go @@ -30,10 +30,19 @@ type UnhealthyState struct { Severity Severity Title string Text string - BrokenSince *time.Time `json:",omitempty"` - Args Args `json:",omitempty"` - DependsOn []WarnableCode `json:",omitempty"` - ImpactsConnectivity bool `json:",omitempty"` + BrokenSince *time.Time `json:",omitempty"` + Args Args `json:",omitempty"` + DependsOn []WarnableCode `json:",omitempty"` + ImpactsConnectivity bool `json:",omitempty"` + PrimaryAction *UnhealthyStateAction `json:",omitempty"` +} + +// UnhealthyStateAction represents an action (URL and link) to be presented to +// the user associated with an [UnhealthyState]. Analogous to +// [tailcfg.DisplayMessageAction]. +type UnhealthyStateAction struct { + URL string + Label string } // unhealthyState returns a unhealthyState of the Warnable given its current warningState. @@ -102,15 +111,23 @@ func (t *Tracker) CurrentState() *State { } for id, msg := range t.lastNotifiedControlMessages { - code := WarnableCode(id) - wm[code] = UnhealthyState{ - WarnableCode: code, + state := UnhealthyState{ + WarnableCode: WarnableCode(id), Severity: severityFromTailcfg(msg.Severity), Title: msg.Title, Text: msg.Text, ImpactsConnectivity: msg.ImpactsConnectivity, // TODO(tailscale/corp#27759): DependsOn? } + + if msg.PrimaryAction != nil { + state.PrimaryAction = &UnhealthyStateAction{ + URL: msg.PrimaryAction.URL, + Label: msg.PrimaryAction.Label, + } + } + + wm[state.WarnableCode] = state } return &State{ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d69c07a9f..05f026631 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5828,7 +5828,14 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.pauseOrResumeControlClientLocked() if nm != nil { - b.health.SetControlHealth(nm.DisplayMessages) + messages := make(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage) + for id, msg := range nm.DisplayMessages { + if msg.PrimaryAction != nil && !b.validPopBrowserURL(msg.PrimaryAction.URL) { + msg.PrimaryAction = nil + } + messages[id] = msg + } + b.health.SetControlHealth(messages) } else { b.health.SetControlHealth(nil) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 19cfd9195..1ad3225a5 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5339,3 +5339,68 @@ func TestSrcCapPacketFilter(t *testing.T) { t.Error("IsDrop() for node without cap = false, want true") } } + +func TestDisplayMessages(t *testing.T) { + b := newTestLocalBackend(t) + + // Pretend we're in a map poll so health updates get processed + ht := b.HealthTracker() + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + b.setNetMapLocked(&netmap.NetworkMap{ + DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": { + Title: "Testing", + }, + }, + }) + + state := ht.CurrentState() + _, ok := state.Warnings["test-message"] + + if !ok { + t.Error("no warning found with id 'test-message'") + } +} + +// TestDisplayMessagesURLFilter tests that we filter out any URLs that are not +// valid as a pop browser URL (see [LocalBackend.validPopBrowserURL]). +func TestDisplayMessagesURLFilter(t *testing.T) { + b := newTestLocalBackend(t) + + // Pretend we're in a map poll so health updates get processed + ht := b.HealthTracker() + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + b.setNetMapLocked(&netmap.NetworkMap{ + DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": { + Title: "Testing", + Severity: tailcfg.SeverityHigh, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://www.evil.com", + Label: "Phishing Link", + }, + }, + }, + }) + + state := ht.CurrentState() + got, ok := state.Warnings["test-message"] + + if !ok { + t.Fatal("no warning found with id 'test-message'") + } + + want := health.UnhealthyState{ + WarnableCode: "test-message", + Title: "Testing", + Severity: health.SeverityHigh, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected message content (-want/+got):\n%s", diff) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 7e2fa3ffc..4679609f3 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -161,7 +161,8 @@ type CapabilityVersion int // - 114: 2025-01-30: NodeAttrMaxKeyDuration CapMap defined, clients might use it (no tailscaled code change) (#14829) // - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. // - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node -const CurrentCapabilityVersion CapabilityVersion = 116 +// - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. +const CurrentCapabilityVersion CapabilityVersion = 117 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2030,11 +2031,29 @@ type MapResponse struct { // known problems). A non-zero length slice are the list of problems that // the control plane sees. // + // Either this will be set, or DisplayMessages will be set, but not both. + // // Note that this package's type, due its use of a slice and omitempty, is // unable to marshal a zero-length non-nil slice. The control server needs // to marshal this type using a separate type. See MapResponse docs. Health []string `json:",omitempty"` + // DisplayMessages sets the health state of the node from the control + // plane's perspective. + // + // Either this will be set, or Health will be set, but not both. + // + // The map keys are IDs that uniquely identify the type of health issue. The + // map values are the messages. If the server sends down a map with entries, + // the client treats it as a patch: new entries are added, keys with a value + // of nil are deleted, existing entries with new values are updated. A nil + // map and an empty map both mean no change has occurred since the last + // update. + // + // As a special case, the map key "*" with a value of nil means to clear all + // prior display messages before processing the other map entries. + DisplayMessages map[DisplayMessageID]*DisplayMessage `json:",omitempty"` + // SSHPolicy, if non-nil, updates the SSH policy for how incoming // SSH connections should be handled. SSHPolicy *SSHPolicy `json:",omitempty"` @@ -2079,24 +2098,53 @@ type MapResponse struct { } // DisplayMessage represents a health state of the node from the control plane's -// perspective. It is deliberately similar to health.Warnable as both get -// converted into health.UnhealthyState to be sent to the GUI. +// perspective. It is deliberately similar to [health.Warnable] as both get +// converted into [health.UnhealthyState] to be sent to the GUI. type DisplayMessage struct { // Title is a string that the GUI uses as title for this message. The title - // should be short and fit in a single line. + // should be short and fit in a single line. It should not end in a period. + // + // Example: "Network may be blocking Tailscale". + // + // See the various instantiations of [health.Warnable] for more examples. Title string - // Text is an extended string that the GUI will display to the user. + // Text is an extended string that the GUI will display to the user. This + // could be multiple sentences explaining the issue in more detail. + // + // Example: "macOS Screen Time seems to be blocking Tailscale. Try disabling + // Screen Time in System Settings > Screen Time > Content & Privacy > Access + // to Web Content." + // + // See the various instantiations of [health.Warnable] for more examples. Text string // Severity is the severity of the DisplayMessage, which the GUI can use to - // determine how to display it. Maps to health.Severity. + // determine how to display it. Maps to [health.Severity]. Severity DisplayMessageSeverity // ImpactsConnectivity is whether the health problem will impact the user's // ability to connect to the Internet or other nodes on the tailnet, which // the GUI can use to determine how to display it. ImpactsConnectivity bool `json:",omitempty"` + + // Primary action, if present, represents the action to allow the user to + // take when interacting with this message. For example, if the + // DisplayMessage is shown via a notification, the action label might be a + // button on that notification and clicking the button would open the URL. + PrimaryAction *DisplayMessageAction `json:",omitempty"` +} + +// DisplayMessageAction represents an action (URL and link) to be presented to +// the user associated with a [DisplayMessage]. +type DisplayMessageAction struct { + // URL is the URL to navigate to when the user interacts with this action + URL string + + // Label is the call to action for the UI to display on the UI element that + // will open the URL (such as a button or link). For example, "Sign in" or + // "Learn more". + Label string } // DisplayMessageID is a string that uniquely identifies the kind of health diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index e31c731be..ccbdeae3f 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -163,6 +163,7 @@ func mapResponseContainsNonPatchFields(res *tailcfg.MapResponse) bool { res.PacketFilters != nil || res.UserProfiles != nil || res.Health != nil || + res.DisplayMessages != nil || res.SSHPolicy != nil || res.TKAInfo != nil || res.DomainDataPlaneAuditLogID != "" || From 84aa7ff3bbcabd1bbc272c99de0c898b799cb144 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 30 May 2025 08:06:16 -1000 Subject: [PATCH 0909/1708] syncs: fix AtomicValue.CompareAndSwap (#16137) Fix CompareAndSwap in the edge-case where the underlying sync.AtomicValue is uninitialized (i.e., Store was never called) and the oldV is the zero value, then perform CompareAndSwap with any(nil). Also, document that T must be comparable. This is a pre-existing restriction. Fixes #16135 Signed-off-by: Joe Tsai --- syncs/syncs.go | 10 ++++++++-- syncs/syncs_test.go | 17 +++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/syncs/syncs.go b/syncs/syncs.go index 337fca755..cf0be919b 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -67,12 +67,18 @@ func (v *AtomicValue[T]) Swap(x T) (old T) { if oldV != nil { return oldV.(wrappedValue[T]).v } - return old + return old // zero value of T } // CompareAndSwap executes the compare-and-swap operation for the Value. +// It panics if T is not comparable. func (v *AtomicValue[T]) CompareAndSwap(oldV, newV T) (swapped bool) { - return v.v.CompareAndSwap(wrappedValue[T]{oldV}, wrappedValue[T]{newV}) + var zero T + return v.v.CompareAndSwap(wrappedValue[T]{oldV}, wrappedValue[T]{newV}) || + // In the edge-case where [atomic.Value.Store] is uninitialized + // and trying to compare with the zero value of T, + // then compare-and-swap with the nil any value. + (any(oldV) == any(zero) && v.v.CompareAndSwap(any(nil), wrappedValue[T]{newV})) } // MutexValue is a value protected by a mutex. diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 901d42948..2439b6068 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -64,6 +64,23 @@ func TestAtomicValue(t *testing.T) { t.Fatalf("LoadOk = (%v, %v), want (nil, true)", got, gotOk) } } + + { + c1, c2, c3 := make(chan struct{}), make(chan struct{}), make(chan struct{}) + var v AtomicValue[chan struct{}] + if v.CompareAndSwap(c1, c2) != false { + t.Fatalf("CompareAndSwap = true, want false") + } + if v.CompareAndSwap(nil, c1) != true { + t.Fatalf("CompareAndSwap = false, want true") + } + if v.CompareAndSwap(c2, c3) != false { + t.Fatalf("CompareAndSwap = true, want false") + } + if v.CompareAndSwap(c1, c2) != true { + t.Fatalf("CompareAndSwap = false, want true") + } + } } func TestMutexValue(t *testing.T) { From c9a5d638e9d8895eda0cb175afcb8dc738e75a09 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Tue, 27 May 2025 08:06:45 -0700 Subject: [PATCH 0910/1708] tsconsensus: enable writing state to disk The comments in the raft code say to only use the InMemStore for tests. Updates #16027 Signed-off-by: Fran Bull --- go.mod | 3 +++ go.sum | 10 +++++++++ tsconsensus/bolt_store.go | 19 ++++++++++++++++ tsconsensus/bolt_store_no_bolt.go | 18 +++++++++++++++ tsconsensus/tsconsensus.go | 37 +++++++++++++++++++++++++------ 5 files changed, 80 insertions(+), 7 deletions(-) create mode 100644 tsconsensus/bolt_store.go create mode 100644 tsconsensus/bolt_store_no_bolt.go diff --git a/go.mod b/go.mod index d44a14aef..9ea25446b 100644 --- a/go.mod +++ b/go.mod @@ -51,6 +51,7 @@ require ( github.com/goreleaser/nfpm/v2 v2.33.1 github.com/hashicorp/go-hclog v1.6.2 github.com/hashicorp/raft v1.7.2 + github.com/hashicorp/raft-boltdb/v2 v2.3.1 github.com/hdevalence/ed25519consensus v0.2.0 github.com/illarion/gonotify/v3 v3.0.2 github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f @@ -135,6 +136,7 @@ require ( github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect @@ -166,6 +168,7 @@ require ( github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect go-simpler.org/sloglint v0.5.0 // indirect + go.etcd.io/bbolt v1.3.11 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect go.opentelemetry.io/otel v1.33.0 // indirect diff --git a/go.sum b/go.sum index 73d87fd66..318eae1ea 100644 --- a/go.sum +++ b/go.sum @@ -180,6 +180,8 @@ github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4 github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= @@ -555,6 +557,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= @@ -571,6 +575,10 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc= github.com/hashicorp/raft v1.7.2/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= +github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc= +github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -1046,6 +1054,8 @@ go-simpler.org/musttag v0.9.0 h1:Dzt6/tyP9ONr5g9h9P3cnYWCxeBFRkd0uJL/w+1Mxos= go-simpler.org/musttag v0.9.0/go.mod h1:gA9nThnalvNSKpEoyp3Ko4/vCX2xTpqKoUtNqXOnVR4= go-simpler.org/sloglint v0.5.0 h1:2YCcd+YMuYpuqthCgubcF5lBSjb6berc5VMOYUHKrpY= go-simpler.org/sloglint v0.5.0/go.mod h1:EUknX5s8iXqf18KQxKnaBHUPVriiPnOrPjjJcsaTcSQ= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= diff --git a/tsconsensus/bolt_store.go b/tsconsensus/bolt_store.go new file mode 100644 index 000000000..ca347cfc0 --- /dev/null +++ b/tsconsensus/bolt_store.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !loong64 + +package tsconsensus + +import ( + "github.com/hashicorp/raft" + raftboltdb "github.com/hashicorp/raft-boltdb/v2" +) + +func boltStore(path string) (raft.StableStore, raft.LogStore, error) { + store, err := raftboltdb.NewBoltStore(path) + if err != nil { + return nil, nil, err + } + return store, store, nil +} diff --git a/tsconsensus/bolt_store_no_bolt.go b/tsconsensus/bolt_store_no_bolt.go new file mode 100644 index 000000000..33b3bd6c7 --- /dev/null +++ b/tsconsensus/bolt_store_no_bolt.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build loong64 + +package tsconsensus + +import ( + "errors" + + "github.com/hashicorp/raft" +) + +func boltStore(path string) (raft.StableStore, raft.LogStore, error) { + // "github.com/hashicorp/raft-boltdb/v2" doesn't build on loong64 + // see https://github.com/hashicorp/raft-boltdb/issues/27 + return nil, nil, errors.New("not implemented") +} diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index 74094782f..b6bf37310 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -32,6 +32,7 @@ import ( "net" "net/http" "net/netip" + "path/filepath" "time" "github.com/hashicorp/go-hclog" @@ -71,6 +72,7 @@ type Config struct { MaxConnPool int ConnTimeout time.Duration ServeDebugMonitor bool + StateDirPath string } // DefaultConfig returns a Config populated with default values ready for use. @@ -223,10 +225,31 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin func startRaft(shutdownCtx context.Context, ts *tsnet.Server, fsm *raft.FSM, self selfRaftNode, auth *authorization, cfg Config) (*raft.Raft, error) { cfg.Raft.LocalID = raft.ServerID(self.id) - // no persistence (for now?) - logStore := raft.NewInmemStore() - stableStore := raft.NewInmemStore() - snapshots := raft.NewInmemSnapshotStore() + var logStore raft.LogStore + var stableStore raft.StableStore + var snapStore raft.SnapshotStore + + if cfg.StateDirPath == "" { + // comments in raft code say to only use for tests + logStore = raft.NewInmemStore() + stableStore = raft.NewInmemStore() + snapStore = raft.NewInmemSnapshotStore() + } else { + var err error + stableStore, logStore, err = boltStore(filepath.Join(cfg.StateDirPath, "store")) + if err != nil { + return nil, err + } + snaplogger := hclog.New(&hclog.LoggerOptions{ + Name: "raft-snap", + Output: cfg.Raft.LogOutput, + Level: hclog.LevelFromString(cfg.Raft.LogLevel), + }) + snapStore, err = raft.NewFileSnapshotStoreWithLogger(filepath.Join(cfg.StateDirPath, "snapstore"), 2, snaplogger) + if err != nil { + return nil, err + } + } // opens the listener on the raft port, raft will close it when it thinks it's appropriate ln, err := ts.Listen("tcp", raftAddr(self.hostAddr, cfg)) @@ -234,7 +257,7 @@ func startRaft(shutdownCtx context.Context, ts *tsnet.Server, fsm *raft.FSM, sel return nil, err } - logger := hclog.New(&hclog.LoggerOptions{ + transportLogger := hclog.New(&hclog.LoggerOptions{ Name: "raft-net", Output: cfg.Raft.LogOutput, Level: hclog.LevelFromString(cfg.Raft.LogLevel), @@ -248,9 +271,9 @@ func startRaft(shutdownCtx context.Context, ts *tsnet.Server, fsm *raft.FSM, sel }, cfg.MaxConnPool, cfg.ConnTimeout, - logger) + transportLogger) - return raft.NewRaft(cfg.Raft, *fsm, logStore, stableStore, snapshots, transport) + return raft.NewRaft(cfg.Raft, *fsm, logStore, stableStore, snapStore, transport) } // A Consensus is the consensus algorithm for a tsnet.Server From 5f35143d83016520b6870fedd116ada0a84e856a Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 2 Jun 2025 13:22:28 -0700 Subject: [PATCH 0911/1708] go.mod,wgengine/magicsock: update wireguard-go (#16148) Our conn.Bind implementation is updated to make Send() offset-aware for future VXLAN/Geneve encapsulation support. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 +-- wgengine/magicsock/batching_conn.go | 2 +- wgengine/magicsock/batching_conn_linux.go | 9 +++--- .../magicsock/batching_conn_linux_test.go | 29 +++++++++++-------- wgengine/magicsock/endpoint.go | 7 +++-- wgengine/magicsock/magicsock.go | 12 ++++---- wgengine/magicsock/magicsock_test.go | 2 +- wgengine/magicsock/rebinding_conn.go | 5 ++-- wgengine/wgcfg/device_test.go | 6 ++-- 10 files changed, 43 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 9ea25446b..ec98275e5 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 + github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 318eae1ea..0b521da8c 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 h1:h/41LFTrwMxB9Xvvug0kRdQCU5TlV1+pAMQw0ZtDE3U= -github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f h1:vg3PmQdq1BbB2V81iC1VBICQtfwbVGZ/4A/p7QKXTK0= +github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= diff --git a/wgengine/magicsock/batching_conn.go b/wgengine/magicsock/batching_conn.go index 5320d1caf..58cfe28aa 100644 --- a/wgengine/magicsock/batching_conn.go +++ b/wgengine/magicsock/batching_conn.go @@ -21,5 +21,5 @@ var ( type batchingConn interface { nettype.PacketConn ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) - WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error + WriteBatchTo(buffs [][]byte, addr netip.AddrPort, offset int) error } diff --git a/wgengine/magicsock/batching_conn_linux.go b/wgengine/magicsock/batching_conn_linux.go index 25bf974b0..9ad5e4474 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/wgengine/magicsock/batching_conn_linux.go @@ -94,7 +94,7 @@ const ( // coalesceMessages iterates msgs, coalescing them where possible while // maintaining datagram order. All msgs have their Addr field set to addr. -func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, msgs []ipv6.Message) int { +func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, msgs []ipv6.Message, offset int) int { var ( base = -1 // index of msg we are currently coalescing into gsoSize int // segmentation size of msgs[base] @@ -106,6 +106,7 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, maxPayloadLen = maxIPv6PayloadLen } for i, buff := range buffs { + buff = buff[offset:] if i > 0 { msgLen := len(buff) baseLenBefore := len(msgs[base].Buffers[0]) @@ -162,7 +163,7 @@ func (c *linuxBatchingConn) putSendBatch(batch *sendBatch) { c.sendBatchPool.Put(batch) } -func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error { +func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, offset int) error { batch := c.getSendBatch() defer c.putSendBatch(batch) if addr.Addr().Is6() { @@ -181,10 +182,10 @@ func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) er ) retry: if c.txOffload.Load() { - n = c.coalesceMessages(batch.ua, buffs, batch.msgs) + n = c.coalesceMessages(batch.ua, buffs, batch.msgs, offset) } else { for i := range buffs { - batch.msgs[i].Buffers[0] = buffs[i] + batch.msgs[i].Buffers[0] = buffs[i][offset:] batch.msgs[i].Addr = batch.ua batch.msgs[i].OOB = batch.msgs[i].OOB[:0] } diff --git a/wgengine/magicsock/batching_conn_linux_test.go b/wgengine/magicsock/batching_conn_linux_test.go index 5c22bf1c7..effd5a2cc 100644 --- a/wgengine/magicsock/batching_conn_linux_test.go +++ b/wgengine/magicsock/batching_conn_linux_test.go @@ -9,6 +9,7 @@ import ( "testing" "golang.org/x/net/ipv6" + "tailscale.com/net/packet" ) func setGSOSize(control *[]byte, gsoSize uint16) { @@ -154,6 +155,10 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { getGSOSizeFromControl: getGSOSize, } + withGeneveSpace := func(len, cap int) []byte { + return make([]byte, len+packet.GeneveFixedHeaderLength, cap+packet.GeneveFixedHeaderLength) + } + cases := []struct { name string buffs [][]byte @@ -163,7 +168,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { { name: "one message no coalesce", buffs: [][]byte{ - make([]byte, 1, 1), + withGeneveSpace(1, 1), }, wantLens: []int{1}, wantGSO: []int{0}, @@ -171,8 +176,8 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { { name: "two messages equal len coalesce", buffs: [][]byte{ - make([]byte, 1, 2), - make([]byte, 1, 1), + withGeneveSpace(1, 2), + withGeneveSpace(1, 1), }, wantLens: []int{2}, wantGSO: []int{1}, @@ -180,8 +185,8 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { { name: "two messages unequal len coalesce", buffs: [][]byte{ - make([]byte, 2, 3), - make([]byte, 1, 1), + withGeneveSpace(2, 3), + withGeneveSpace(1, 1), }, wantLens: []int{3}, wantGSO: []int{2}, @@ -189,9 +194,9 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { { name: "three messages second unequal len coalesce", buffs: [][]byte{ - make([]byte, 2, 3), - make([]byte, 1, 1), - make([]byte, 2, 2), + withGeneveSpace(2, 3), + withGeneveSpace(1, 1), + withGeneveSpace(2, 2), }, wantLens: []int{3, 2}, wantGSO: []int{2, 0}, @@ -199,9 +204,9 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { { name: "three messages limited cap coalesce", buffs: [][]byte{ - make([]byte, 2, 4), - make([]byte, 2, 2), - make([]byte, 2, 2), + withGeneveSpace(2, 4), + withGeneveSpace(2, 2), + withGeneveSpace(2, 2), }, wantLens: []int{4, 2}, wantGSO: []int{2, 0}, @@ -219,7 +224,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { msgs[i].Buffers = make([][]byte, 1) msgs[i].OOB = make([]byte, 0, 2) } - got := c.coalesceMessages(addr, tt.buffs, msgs) + got := c.coalesceMessages(addr, tt.buffs, msgs, packet.GeneveFixedHeaderLength) if got != len(tt.wantLens) { t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index c2d18d707..243d0f4de 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -927,7 +927,7 @@ var ( errPingTooBig = errors.New("ping size too big") ) -func (de *endpoint) send(buffs [][]byte) error { +func (de *endpoint) send(buffs [][]byte, offset int) error { de.mu.Lock() if de.expired { de.mu.Unlock() @@ -961,7 +961,7 @@ func (de *endpoint) send(buffs [][]byte) error { } var err error if udpAddr.IsValid() { - _, err = de.c.sendUDPBatch(udpAddr, buffs) + _, err = de.c.sendUDPBatch(udpAddr, buffs, offset) // If the error is known to indicate that the endpoint is no longer // usable, clear the endpoint statistics so that the next send will @@ -972,7 +972,7 @@ func (de *endpoint) send(buffs [][]byte) error { var txBytes int for _, b := range buffs { - txBytes += len(b) + txBytes += len(b[offset:]) } switch { @@ -993,6 +993,7 @@ func (de *endpoint) send(buffs [][]byte) error { allOk := true var txBytes int for _, buff := range buffs { + buff = buff[offset:] const isDisco = false ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco) txBytes += len(buff) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 5b0f28a33..3a4fdf8a2 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1264,8 +1264,8 @@ func (c *Conn) networkDown() bool { return !c.networkUp.Load() } // Send implements conn.Bind. // -// See https://pkg.go.dev/golang.zx2c4.com/wireguard/conn#Bind.Send -func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) (err error) { +// See https://pkg.go.dev/github.com/tailscale/wireguard-go/conn#Bind.Send +func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint, offset int) (err error) { n := int64(len(buffs)) defer func() { if err != nil { @@ -1278,7 +1278,7 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) (err error) { return errNetworkDown } if ep, ok := ep.(*endpoint); ok { - return ep.send(buffs) + return ep.send(buffs, offset) } // If it's not of type *endpoint, it's probably *lazyEndpoint, which means // we don't actually know who the peer is and we're waiting for wireguard-go @@ -1294,7 +1294,7 @@ var errNoUDP = errors.New("no UDP available on platform") var errUnsupportedConnType = errors.New("unsupported connection type") -func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err error) { +func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte, offset int) (sent bool, err error) { isIPv6 := false switch { case addr.Addr().Is4(): @@ -1304,9 +1304,9 @@ func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err panic("bogus sendUDPBatch addr type") } if isIPv6 { - err = c.pconn6.WriteBatchTo(buffs, addr) + err = c.pconn6.WriteBatchTo(buffs, addr, offset) } else { - err = c.pconn4.WriteBatchTo(buffs, addr) + err = c.pconn4.WriteBatchTo(buffs, addr, offset) } if err != nil { var errGSO neterror.ErrUDPGSODisabled diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index ddbf3e394..e18011873 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3147,7 +3147,7 @@ func TestNetworkDownSendErrors(t *testing.T) { defer conn.Close() conn.SetNetworkUp(false) - if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}); err == nil { + if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0); err == nil { t.Error("expected error, got nil") } resp := httptest.NewRecorder() diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index c27abbadc..7a9dd1821 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -71,12 +71,13 @@ func (c *RebindingUDPConn) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, e } // WriteBatchTo writes buffs to addr. -func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error { +func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, offset int) error { for { pconn := *c.pconnAtomic.Load() b, ok := pconn.(batchingConn) if !ok { for _, buf := range buffs { + buf = buf[offset:] _, err := c.writeToUDPAddrPortWithInitPconn(pconn, buf, addr) if err != nil { return err @@ -84,7 +85,7 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) err } return nil } - err := b.WriteBatchTo(buffs, addr) + err := b.WriteBatchTo(buffs, addr, offset) if err != nil { if pconn != c.currentConn() { continue diff --git a/wgengine/wgcfg/device_test.go b/wgengine/wgcfg/device_test.go index d54282e4b..9138d6e5a 100644 --- a/wgengine/wgcfg/device_test.go +++ b/wgengine/wgcfg/device_test.go @@ -242,9 +242,9 @@ type noopBind struct{} func (noopBind) Open(port uint16) (fns []conn.ReceiveFunc, actualPort uint16, err error) { return nil, 1, nil } -func (noopBind) Close() error { return nil } -func (noopBind) SetMark(mark uint32) error { return nil } -func (noopBind) Send(b [][]byte, ep conn.Endpoint) error { return nil } +func (noopBind) Close() error { return nil } +func (noopBind) SetMark(mark uint32) error { return nil } +func (noopBind) Send(b [][]byte, ep conn.Endpoint, offset int) error { return nil } func (noopBind) ParseEndpoint(s string) (conn.Endpoint, error) { return dummyEndpoint(s), nil } From 8a3afa5963f425e42a02016b7059434c3962f2c3 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Mon, 2 Jun 2025 15:52:16 +0100 Subject: [PATCH 0912/1708] ipn/ipnlocal: fix deadlock when filtering DisplayMessage URLs Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 14 ++++++++++++-- ipn/ipnlocal/local_test.go | 1 + 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 05f026631..e494920b1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3289,6 +3289,16 @@ func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient // // b.mu must *not* be held. func (b *LocalBackend) validPopBrowserURL(urlStr string) bool { + b.mu.Lock() + defer b.mu.Unlock() + return b.validPopBrowserURLLocked(urlStr) +} + +// validPopBrowserURLLocked reports whether urlStr is a valid value for a +// control server to send in a *URL field. +// +// b.mu must be held. +func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { if urlStr == "" { return false } @@ -3296,7 +3306,7 @@ func (b *LocalBackend) validPopBrowserURL(urlStr string) bool { if err != nil { return false } - serverURL := b.Prefs().ControlURLOrDefault() + serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault() if ipn.IsLoginServerSynonym(serverURL) { // When connected to the official Tailscale control plane, only allow // URLs from tailscale.com or its subdomains. @@ -5830,7 +5840,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { if nm != nil { messages := make(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage) for id, msg := range nm.DisplayMessages { - if msg.PrimaryAction != nil && !b.validPopBrowserURL(msg.PrimaryAction.URL) { + if msg.PrimaryAction != nil && !b.validPopBrowserURLLocked(msg.PrimaryAction.URL) { msg.PrimaryAction = nil } messages[id] = msg diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 1ad3225a5..d23bd1e26 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5374,6 +5374,7 @@ func TestDisplayMessagesURLFilter(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() + defer b.lockAndGetUnlock()() b.setNetMapLocked(&netmap.NetworkMap{ DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test-message": { From cc988596a214e7bb429dc85e4413586c13ea99d3 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 30 May 2025 13:03:46 +0100 Subject: [PATCH 0913/1708] posture: propagate serial number from MDM on Android Updates #16010 Signed-off-by: Anton Tolchanov --- posture/serialnumber_stub.go | 3 +-- posture/{serialnumber_ios.go => serialnumber_syspolicy.go} | 6 ++++-- util/syspolicy/policy_keys.go | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) rename posture/{serialnumber_ios.go => serialnumber_syspolicy.go} (75%) diff --git a/posture/serialnumber_stub.go b/posture/serialnumber_stub.go index cdabf03e5..4cc84fa13 100644 --- a/posture/serialnumber_stub.go +++ b/posture/serialnumber_stub.go @@ -1,13 +1,12 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// android: not implemented // js: not implemented // plan9: not implemented // solaris: currently unsupported by go-smbios: // https://github.com/digitalocean/go-smbios/pull/21 -//go:build android || solaris || plan9 || js || wasm || tamago || aix || (darwin && !cgo && !ios) +//go:build solaris || plan9 || js || wasm || tamago || aix || (darwin && !cgo && !ios) package posture diff --git a/posture/serialnumber_ios.go b/posture/serialnumber_syspolicy.go similarity index 75% rename from posture/serialnumber_ios.go rename to posture/serialnumber_syspolicy.go index 55d0e438b..d6491ff21 100644 --- a/posture/serialnumber_ios.go +++ b/posture/serialnumber_syspolicy.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build android || ios + package posture import ( @@ -10,9 +12,9 @@ import ( "tailscale.com/util/syspolicy" ) -// GetSerialNumbers returns the serial number of the iOS/tvOS device as reported by an +// GetSerialNumbers returns the serial number of the device as reported by an // MDM solution. It requires configuration via the DeviceSerialNumber system policy. -// This is the only way to gather serial numbers on iOS and tvOS. +// This is the only way to gather serial numbers on iOS, tvOS and Android. func GetSerialNumbers(_ logger.Logf) ([]string, error) { s, err := syspolicy.GetString(syspolicy.DeviceSerialNumber, "") if err != nil { diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 29b2dfd28..ed00d0004 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -126,8 +126,8 @@ const ( // The default is "user-decides" unless otherwise stated. PostureChecking Key = "PostureChecking" // DeviceSerialNumber is the serial number of the device that is running Tailscale. - // This is used on iOS/tvOS to allow IT administrators to manually give us a serial number via MDM. - // We are unable to programmatically get the serial number from IOKit due to sandboxing restrictions. + // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. + // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. DeviceSerialNumber Key = "DeviceSerialNumber" // ManagedByOrganizationName indicates the name of the organization managing the Tailscale From 5f0e1390123db8231244107706145a48a9d60938 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Tue, 3 Jun 2025 12:52:00 -0400 Subject: [PATCH 0914/1708] cmd/tsidp: add Docker image building support (#16078) - Add tsidp target to build_docker.sh for standard Tailscale image builds - Add publishdevtsidp Makefile target for development image publishing - Remove Dockerfile, using standard build process - Include tsidp in depaware dependency tracking - Update README with comprehensive Docker usage examples This enables tsidp to be built and published like other Tailscale components (tailscale/tailscale, tailscale/k8s-operator, tailscale/k8s-nameserver). Fixes #16077 Signed-off-by: Raj Singh --- Makefile | 14 +- build_docker.sh | 18 ++ cmd/tsidp/Dockerfile | 41 --- cmd/tsidp/README.md | 69 ++--- cmd/tsidp/depaware.txt | 657 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 719 insertions(+), 80 deletions(-) delete mode 100644 cmd/tsidp/Dockerfile create mode 100644 cmd/tsidp/depaware.txt diff --git a/Makefile b/Makefile index c30818c96..1978af90d 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,8 @@ updatedeps: ## Update depaware deps tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ - tailscale.com/cmd/stund + tailscale.com/cmd/stund \ + tailscale.com/cmd/tsidp PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update -goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet @@ -34,7 +35,8 @@ depaware: ## Run depaware checks tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ - tailscale.com/cmd/stund + tailscale.com/cmd/stund \ + tailscale.com/cmd/tsidp PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet @@ -114,6 +116,14 @@ publishdevnameserver: ## Build and publish k8s-nameserver image to location spec @test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1) TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh +publishdevtsidp: ## Build and publish tsidp image to location specified by ${REPO} + @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) + @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) + @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) + @test "${REPO}" != "tailscale/tsidp" || (echo "REPO=... must not be tailscale/tsidp" && exit 1) + @test "${REPO}" != "ghcr.io/tailscale/tsidp" || (echo "REPO=... must not be ghcr.io/tailscale/tsidp" && exit 1) + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=tsidp ./build_docker.sh + .PHONY: sshintegrationtest sshintegrationtest: ## Run the SSH integration tests in various Docker containers @GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ diff --git a/build_docker.sh b/build_docker.sh index 15105c2ef..bdc9dc086 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -90,6 +90,24 @@ case "$TARGET" in --annotations="${ANNOTATIONS}" \ /usr/local/bin/k8s-nameserver ;; + tsidp) + DEFAULT_REPOS="tailscale/tsidp" + REPOS="${REPOS:-${DEFAULT_REPOS}}" + go run github.com/tailscale/mkctr \ + --gopaths="tailscale.com/cmd/tsidp:/usr/local/bin/tsidp" \ + --ldflags=" \ + -X tailscale.com/version.longStamp=${VERSION_LONG} \ + -X tailscale.com/version.shortStamp=${VERSION_SHORT} \ + -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ + --base="${BASE}" \ + --tags="${TAGS}" \ + --gotags="ts_package_container" \ + --repos="${REPOS}" \ + --push="${PUSH}" \ + --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ + /usr/local/bin/tsidp + ;; *) echo "unknown target: $TARGET" exit 1 diff --git a/cmd/tsidp/Dockerfile b/cmd/tsidp/Dockerfile deleted file mode 100644 index c4f352ed0..000000000 --- a/cmd/tsidp/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -# Build stage -FROM golang:alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git - -# Set working directory -WORKDIR /src - -# Copy only go.mod and go.sum first to leverage Docker caching -COPY go.mod go.sum ./ -RUN go mod download - -# Copy the entire repository -COPY . . - -# Build the tsidp binary -RUN go build -o /bin/tsidp ./cmd/tsidp - -# Final stage -FROM alpine:latest - -# Create necessary directories -RUN mkdir -p /var/lib/tsidp - -# Copy binary from builder stage -COPY --from=builder /bin/tsidp /app/tsidp - -# Set working directory -WORKDIR /app - -# Environment variables -ENV TAILSCALE_USE_WIP_CODE=1 \ - TS_HOSTNAME=idp \ - TS_STATE_DIR=/var/lib/tsidp - -# Expose the default port -EXPOSE 443 - -# Run the application -ENTRYPOINT ["/bin/sh", "-c", "/app/tsidp --hostname=${TS_HOSTNAME} --dir=${TS_STATE_DIR}"] diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index 61a81e8ae..fce844e0b 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -12,43 +12,38 @@ ## Installation using Docker -1. **Build the Docker Image** - - The Dockerfile uses a multi-stage build process to: - - Build the `tsidp` binary from source - - Create a minimal Alpine-based image with just the necessary components - - ```bash - # Clone the Tailscale repository - git clone https://github.com/tailscale/tailscale.git - cd tailscale - ``` - - ```bash - # Build the Docker image - docker build -t tsidp:latest -f cmd/tsidp/Dockerfile . - ``` - -2. **Run the Container** - - Replace `YOUR_TAILSCALE_AUTHKEY` with your Tailscale authentication key. - - ```bash - docker run -d \ - --name tsidp \ - -p 443:443 \ - -e TS_AUTHKEY=YOUR_TAILSCALE_AUTHKEY \ - -e TS_HOSTNAME=idp \ - -v tsidp-data:/var/lib/tsidp \ - tsidp:latest - ``` - -3. **Verify Installation** - ```bash - docker logs tsidp - ``` - - Visit `https://idp.tailnet.ts.net` to confirm the service is running. +### Building from Source + +```bash +# Clone the Tailscale repository +git clone https://github.com/tailscale/tailscale.git +cd tailscale + +# Build and publish to your own registry +make publishdevtsidp REPO=ghcr.io/yourusername/tsidp TAGS=v0.0.1 PUSH=true +``` + +### Running the Container + +Replace `YOUR_TAILSCALE_AUTHKEY` with your Tailscale authentication key: + +```bash +docker run -d \ + --name tsidp \ + -p 443:443 \ + -e TS_AUTHKEY=YOUR_TAILSCALE_AUTHKEY \ + -e TAILSCALE_USE_WIP_CODE=1 \ + -v tsidp-data:/var/lib/tsidp \ + ghcr.io/yourusername/tsidp:v0.0.1 \ + tsidp --hostname=idp --dir=/var/lib/tsidp +``` + +### Verify Installation +```bash +docker logs tsidp +``` + +Visit `https://idp.tailnet.ts.net` to confirm the service is running. ## Usage Example: Proxmox Integration diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt new file mode 100644 index 000000000..1ea4b3d88 --- /dev/null +++ b/cmd/tsidp/depaware.txt @@ -0,0 +1,657 @@ +tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depaware) + + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 + W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ + W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate + W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ + L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore + L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ + L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http + L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/coder/websocket from tailscale.com/util/eventbus + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket + L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ + W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc + W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com + W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ + LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture + github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/types/opt+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ + W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet + L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + L github.com/google/nftables from tailscale.com/util/linuxfw + L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt + L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ + L github.com/google/nftables/expr from github.com/google/nftables+ + L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ + L github.com/google/nftables/xt from github.com/google/nftables/expr+ + DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 + L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + L github.com/mdlayher/genetlink from tailscale.com/net/tstun + L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + L github.com/mdlayher/netlink/nltest from github.com/google/nftables + L github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + github.com/miekg/dns from tailscale.com/net/dns/recursive + 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack + L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient + W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket + W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio + W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio + W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs + W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ + github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ + github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ + github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp + github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ + github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp + github.com/tailscale/hujson from tailscale.com/ipn/conffile + L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + github.com/tailscale/web-client-prebuilt from tailscale.com/client/web + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + 💣 github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + W 💣 github.com/tailscale/wireguard-go/ipc/namedpipe from github.com/tailscale/wireguard-go/ipc + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + L github.com/vishvananda/netns from github.com/tailscale/netlink+ + github.com/x448/float16 from github.com/fxamacker/cbor/v2 + 💣 go4.org/mem from tailscale.com/client/local+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun + W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/dns+ + gopkg.in/square/go-jose.v2 from gopkg.in/square/go-jose.v2/jwt+ + gopkg.in/square/go-jose.v2/cipher from gopkg.in/square/go-jose.v2 + gopkg.in/square/go-jose.v2/json from gopkg.in/square/go-jose.v2+ + gopkg.in/square/go-jose.v2/jwt from tailscale.com/cmd/tsidp + gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer + 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs + 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ + gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log + gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ + gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ + 💣 gvisor.dev/gvisor/pkg/sleep from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ + gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state + 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack + 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ + gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 + gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ + 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro + gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/packet from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/raw from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + 💣 gvisor.dev/gvisor/pkg/tcpip/transport/tcp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ + tailscale.com/client/web from tailscale.com/ipn/ipnlocal + tailscale.com/clientupdate from tailscale.com/client/web+ + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ + tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ + tailscale.com/disco from tailscale.com/derp+ + tailscale.com/doctor from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/drive from tailscale.com/client/local+ + tailscale.com/envknob from tailscale.com/client/local+ + tailscale.com/envknob/featureknob from tailscale.com/client/web+ + tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/health from tailscale.com/control/controlclient+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnstate from tailscale.com/client/local+ + tailscale.com/ipn/localapi from tailscale.com/tsnet + tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ + L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store + L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ + L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ + L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore + tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/licenses from tailscale.com/client/web + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ + tailscale.com/logtail from tailscale.com/control/controlclient+ + tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ + tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/memnet from tailscale.com/tsnet + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/dns/resolver+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ + W 💣 tailscale.com/net/netstat from tailscale.com/portlist + tailscale.com/net/netutil from tailscale.com/client/local+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/proxymux from tailscale.com/tsnet + tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/socks5 from tailscale.com/tsnet + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/ipn/localapi+ + L tailscale.com/net/tcpinfo from tailscale.com/derp + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/client/web+ + tailscale.com/net/tsdial from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tstun from tailscale.com/tsd+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/client/local+ + 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal + tailscale.com/posture from tailscale.com/ipn/ipnlocal + tailscale.com/proxymap from tailscale.com/tsd+ + 💣 tailscale.com/safesocket from tailscale.com/client/local+ + tailscale.com/syncs from tailscale.com/control/controlhttp+ + tailscale.com/tailcfg from tailscale.com/client/local+ + tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tka from tailscale.com/client/local+ + tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ + tailscale.com/tsd from tailscale.com/ipn/ipnext+ + tailscale.com/tsnet from tailscale.com/cmd/tsidp + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/tsweb+ + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/dnstype from tailscale.com/client/local+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/client/local+ + tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/client/local+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate+ + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/client/web+ + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash + tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/multierr from tailscale.com/control/controlclient+ + tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi + W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/cmd/tsidp+ + tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ + tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ + tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock + tailscale.com/util/systemd from tailscale.com/control/controlclient+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/truncate from tailscale.com/logtail + tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ + W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal + W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ + tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ + tailscale.com/version from tailscale.com/client/web+ + tailscale.com/version/distro from tailscale.com/client/web+ + tailscale.com/wgengine from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack from tailscale.com/tsnet + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + golang.org/x/crypto/argon2 from tailscale.com/tka + golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/ed25519 from gopkg.in/square/go-jose.v2 + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/pbkdf2 from gopkg.in/square/go-jose.v2 + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal + LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh + golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ + golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ + golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy + golang.org/x/net/ipv4 from github.com/miekg/dns+ + golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/proxy from tailscale.com/net/netns + D golang.org/x/net/route from net+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sys/cpu from github.com/tailscale/certstore+ + LD golang.org/x/sys/unix from github.com/google/nftables+ + W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ + W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ + W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ + W golang.org/x/sys/windows/svc/mgr from tailscale.com/util/winutil + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + archive/tar from tailscale.com/clientupdate + bufio from compress/flate+ + bytes from archive/tar+ + cmp from encoding/json+ + compress/flate from compress/gzip+ + compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + W compress/zlib from debug/pe + container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509+ + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/internal/fips140/aes+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/internal/fips140/aes+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/nistec from crypto/elliptic+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls+ + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + D crypto/x509/internal/macos from crypto/x509 + crypto/x509/pkix from crypto/x509+ + DW database/sql/driver from github.com/google/uuid + W debug/dwarf from debug/pe + W debug/pe from github.com/dblohm7/wingoes/pe + embed from github.com/tailscale/web-client-prebuilt+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/fxamacker/cbor/v2+ + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from expvar+ + encoding/pem from crypto/tls+ + encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + errors from archive/tar+ + expvar from tailscale.com/derp+ + flag from tailscale.com/cmd/tsidp+ + fmt from archive/tar+ + hash from compress/zlib+ + W hash/adler32 from compress/zlib + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from html/template+ + html/template from tailscale.com/util/eventbus+ + internal/abi from crypto/x509/internal/macos+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt+ + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/poll+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/exithook from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/sys from crypto/subtle+ + L internal/runtime/syscall from runtime+ + W internal/saferio from debug/pe + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/syscall/execenv from os+ + LD internal/syscall/unix from crypto/internal/sysrand+ + W internal/syscall/windows from crypto/internal/sysrand+ + W internal/syscall/windows/registry from mime+ + W internal/syscall/windows/sysdll from internal/syscall/windows+ + internal/testlog from os + internal/unsafeheader from internal/reflectlite+ + io from archive/tar+ + io/fs from archive/tar+ + io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + iter from bytes+ + log from expvar+ + log/internal from log + maps from archive/tar+ + math from archive/tar+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/fxamacker/cbor/v2+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from expvar+ + net/http/httptrace from github.com/aws/smithy-go/transport/http+ + net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/internal from net/http+ + net/http/internal/ascii from net/http+ + net/http/pprof from tailscale.com/ipn/localapi+ + net/netip from crypto/x509+ + net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/signal from tailscale.com/cmd/tsidp + os/user from archive/tar+ + path from archive/tar+ + path/filepath from archive/tar+ + reflect from archive/tar+ + regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp/syntax from regexp + runtime from archive/tar+ + runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/pprof from net/http/pprof+ + runtime/trace from net/http/pprof + slices from archive/tar+ + sort from compress/flate+ + strconv from archive/tar+ + strings from archive/tar+ + sync from archive/tar+ + sync/atomic from context+ + syscall from archive/tar+ + text/tabwriter from runtime/pprof + text/template from html/template + text/template/parse from html/template+ + time from archive/tar+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from unique From 1635ccca275fe3223f96f35f9ec5393f5613685e Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Thu, 29 May 2025 09:11:31 -0500 Subject: [PATCH 0915/1708] ssh/tailssh: display more useful error messages when authentication fails Also add a trailing newline to error banners so that SSH client messages don't print on the same line. Updates tailscale/corp#29138 Signed-off-by: Percy Wegmann --- ssh/tailssh/tailssh.go | 59 ++++++++++++++++++++++++------------- ssh/tailssh/tailssh_test.go | 44 ++++++++++++++++++++------- 2 files changed, 73 insertions(+), 30 deletions(-) diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 19a2b11fd..b249a1063 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -281,7 +281,7 @@ func (c *conn) errBanner(message string, err error) error { if err != nil { c.logf("%s: %s", message, err) } - if err := c.spac.SendAuthBanner("tailscale: " + message); err != nil { + if err := c.spac.SendAuthBanner("tailscale: " + message + "\n"); err != nil { c.logf("failed to send auth banner: %s", err) } return errTerminal @@ -324,9 +324,16 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (perms *gossh.Permissions, retE return nil, c.errBanner("failed to get connection info", err) } - action, localUser, acceptEnv, err := c.evaluatePolicy() - if err != nil { - return nil, c.errBanner("failed to evaluate SSH policy", err) + action, localUser, acceptEnv, result := c.evaluatePolicy() + switch result { + case accepted: + // do nothing + case rejectedUser: + return nil, c.errBanner(fmt.Sprintf("tailnet policy does not permit you to SSH as user %q", c.info.sshUser), nil) + case rejected, noPolicy: + return nil, c.errBanner("tailnet policy does not permit you to SSH to this node", fmt.Errorf("failed to evaluate policy, result: %s", result)) + default: + return nil, c.errBanner("failed to evaluate tailnet policy", fmt.Errorf("failed to evaluate policy, result: %s", result)) } c.action0 = action @@ -597,18 +604,23 @@ func (c *conn) setInfo(cm gossh.ConnMetadata) error { return nil } +type evalResult string + +const ( + noPolicy evalResult = "no policy" + rejected evalResult = "rejected" + rejectedUser evalResult = "rejected user" + accepted evalResult = "accept" +) + // evaluatePolicy returns the SSHAction and localUser after evaluating // the SSHPolicy for this conn. -func (c *conn) evaluatePolicy() (_ *tailcfg.SSHAction, localUser string, acceptEnv []string, _ error) { +func (c *conn) evaluatePolicy() (_ *tailcfg.SSHAction, localUser string, acceptEnv []string, result evalResult) { pol, ok := c.sshPolicy() if !ok { - return nil, "", nil, fmt.Errorf("tailssh: rejecting connection; no SSH policy") - } - a, localUser, acceptEnv, ok := c.evalSSHPolicy(pol) - if !ok { - return nil, "", nil, fmt.Errorf("tailssh: rejecting connection; no matching policy") + return nil, "", nil, noPolicy } - return a, localUser, acceptEnv, nil + return c.evalSSHPolicy(pol) } // handleSessionPostSSHAuth runs an SSH session after the SSH-level authentication, @@ -706,9 +718,9 @@ func (c *conn) newSSHSession(s ssh.Session) *sshSession { // isStillValid reports whether the conn is still valid. func (c *conn) isStillValid() bool { - a, localUser, _, err := c.evaluatePolicy() - c.vlogf("stillValid: %+v %v %v", a, localUser, err) - if err != nil { + a, localUser, _, result := c.evaluatePolicy() + c.vlogf("stillValid: %+v %v %v", a, localUser, result) + if result != accepted { return false } if !a.Accept && a.HoldAndDelegate == "" { @@ -1089,13 +1101,20 @@ func (c *conn) ruleExpired(r *tailcfg.SSHRule) bool { return r.RuleExpires.Before(c.srv.now()) } -func (c *conn) evalSSHPolicy(pol *tailcfg.SSHPolicy) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, ok bool) { +func (c *conn) evalSSHPolicy(pol *tailcfg.SSHPolicy) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, result evalResult) { + failedOnUser := false for _, r := range pol.Rules { if a, localUser, acceptEnv, err := c.matchRule(r); err == nil { - return a, localUser, acceptEnv, true + return a, localUser, acceptEnv, accepted + } else if errors.Is(err, errUserMatch) { + failedOnUser = true } } - return nil, "", nil, false + result = rejected + if failedOnUser { + result = rejectedUser + } + return nil, "", nil, result } // internal errors for testing; they don't escape to callers or logs. @@ -1129,6 +1148,9 @@ func (c *conn) matchRule(r *tailcfg.SSHRule) (a *tailcfg.SSHAction, localUser st if c.ruleExpired(r) { return nil, "", nil, errRuleExpired } + if !c.anyPrincipalMatches(r.Principals) { + return nil, "", nil, errPrincipalMatch + } if !r.Action.Reject { // For all but Reject rules, SSHUsers is required. // If SSHUsers is nil or empty, mapLocalUser will return an @@ -1138,9 +1160,6 @@ func (c *conn) matchRule(r *tailcfg.SSHRule) (a *tailcfg.SSHAction, localUser st return nil, "", nil, errUserMatch } } - if !c.anyPrincipalMatches(r.Principals) { - return nil, "", nil, errPrincipalMatch - } return r.Action, localUser, r.AcceptEnv, nil } diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 79479d7fb..96fb87f49 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -253,7 +253,7 @@ func TestEvalSSHPolicy(t *testing.T) { name string policy *tailcfg.SSHPolicy ci *sshConnInfo - wantMatch bool + wantResult evalResult wantUser string wantAcceptEnv []string }{ @@ -299,10 +299,20 @@ func TestEvalSSHPolicy(t *testing.T) { ci: &sshConnInfo{sshUser: "alice"}, wantUser: "thealice", wantAcceptEnv: []string{"EXAMPLE", "?_?", "TEST_*"}, - wantMatch: true, + wantResult: accepted, }, { - name: "no-matches-returns-failure", + name: "no-matches-returns-rejected", + policy: &tailcfg.SSHPolicy{ + Rules: []*tailcfg.SSHRule{}, + }, + ci: &sshConnInfo{sshUser: "alice"}, + wantUser: "", + wantAcceptEnv: nil, + wantResult: rejected, + }, + { + name: "no-user-matches-returns-rejected-user", policy: &tailcfg.SSHPolicy{ Rules: []*tailcfg.SSHRule{ { @@ -340,7 +350,7 @@ func TestEvalSSHPolicy(t *testing.T) { ci: &sshConnInfo{sshUser: "alice"}, wantUser: "", wantAcceptEnv: nil, - wantMatch: false, + wantResult: rejectedUser, }, } for _, tt := range tests { @@ -349,14 +359,14 @@ func TestEvalSSHPolicy(t *testing.T) { info: tt.ci, srv: &server{logf: tstest.WhileTestRunningLogger(t)}, } - got, gotUser, gotAcceptEnv, match := c.evalSSHPolicy(tt.policy) - if match != tt.wantMatch { - t.Errorf("match = %v; want %v", match, tt.wantMatch) + got, gotUser, gotAcceptEnv, result := c.evalSSHPolicy(tt.policy) + if result != tt.wantResult { + t.Errorf("result = %v; want %v", result, tt.wantResult) } if gotUser != tt.wantUser { t.Errorf("user = %q; want %q", gotUser, tt.wantUser) } - if tt.wantMatch == true && got == nil { + if tt.wantResult == accepted && got == nil { t.Errorf("expected non-nil action on success") } if !slices.Equal(gotAcceptEnv, tt.wantAcceptEnv) { @@ -467,7 +477,7 @@ func (ts *localState) NodeKey() key.NodePublic { func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { return &tailcfg.SSHRule{ SSHUsers: map[string]string{ - "*": currentUser, + "alice": currentUser, }, Action: action, Principals: []*tailcfg.SSHPrincipal{ @@ -789,6 +799,11 @@ func TestSSHAuthFlow(t *testing.T) { Accept: true, Message: "Welcome to Tailscale SSH!", }) + bobRule := newSSHRule(&tailcfg.SSHAction{ + Accept: true, + Message: "Welcome to Tailscale SSH!", + }) + bobRule.SSHUsers = map[string]string{"bob": "bob"} rejectRule := newSSHRule(&tailcfg.SSHAction{ Reject: true, Message: "Go Away!", @@ -808,7 +823,16 @@ func TestSSHAuthFlow(t *testing.T) { sshEnabled: true, }, authErr: true, - wantBanners: []string{"tailscale: failed to evaluate SSH policy"}, + wantBanners: []string{"tailscale: tailnet policy does not permit you to SSH to this node\n"}, + }, + { + name: "user-mismatch", + state: &localState{ + sshEnabled: true, + matchingRule: bobRule, + }, + authErr: true, + wantBanners: []string{`tailscale: tailnet policy does not permit you to SSH as user "alice"` + "\n"}, }, { name: "accept", From 5fde183754c565c7a4f9c8cf956218d31ee30ba4 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Tue, 3 Jun 2025 15:09:34 +0100 Subject: [PATCH 0916/1708] ipn: add watch opt to include actions in health messages Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- ipn/backend.go | 2 + ipn/ipnlocal/local.go | 70 ++++++++++++++++++------ ipn/ipnlocal/local_test.go | 106 ++++++++++++++++++++++++++++++++++++- 3 files changed, 161 insertions(+), 17 deletions(-) diff --git a/ipn/backend.go b/ipn/backend.go index 3e956f473..ab01d2fde 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -81,6 +81,8 @@ const ( NotifyInitialHealthState NotifyWatchOpt = 1 << 7 // if set, the first Notify message (sent immediately) will contain the current health.State of the client NotifyRateLimit NotifyWatchOpt = 1 << 8 // if set, rate limit spammy netmap updates to every few seconds + + NotifyHealthActions NotifyWatchOpt = 1 << 9 // if set, include PrimaryActions in health.State. Otherwise append the action URL to the text ) // Notify is a communication from a backend (e.g. tailscaled) to a frontend diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e494920b1..0efec6b9f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2948,28 +2948,19 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa b.WatchNotificationsAs(ctx, nil, mask, onWatchAdded, fn) } -// WatchNotificationsAs is like WatchNotifications but takes an [ipnauth.Actor] +// WatchNotificationsAs is like [LocalBackend.WatchNotifications] but takes an [ipnauth.Actor] // as an additional parameter. If non-nil, the specified callback is invoked // only for notifications relevant to this actor. func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.Actor, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) { ch := make(chan *ipn.Notify, 128) sessionID := rands.HexString(16) - origFn := fn if mask&ipn.NotifyNoPrivateKeys != 0 { - fn = func(n *ipn.Notify) bool { - if n.NetMap == nil || n.NetMap.PrivateKey.IsZero() { - return origFn(n) - } - - // The netmap in n is shared across all watchers, so to mutate it for a - // single watcher we have to clone the notify and the netmap. We can - // make shallow clones, at least. - nm2 := *n.NetMap - n2 := *n - n2.NetMap = &nm2 - n2.NetMap.PrivateKey = key.NodePrivate{} - return origFn(&n2) - } + fn = filterPrivateKeys(fn) + } + if mask&ipn.NotifyHealthActions == 0 { + // if UI does not support PrimaryAction in health warnings, append + // action URLs to the warning text instead. + fn = appendHealthActions(fn) } var ini *ipn.Notify @@ -3060,6 +3051,53 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A sender.Run(ctx, ch) } +// filterPrivateKeys returns an IPN listener func that wraps the supplied IPN +// listener and zeroes out the PrivateKey in the NetMap passed to the wrapped +// listener. +func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { + return func(n *ipn.Notify) bool { + if n.NetMap == nil || n.NetMap.PrivateKey.IsZero() { + return fn(n) + } + + // The netmap in n is shared across all watchers, so to mutate it for a + // single watcher we have to clone the notify and the netmap. We can + // make shallow clones, at least. + nm2 := *n.NetMap + n2 := *n + n2.NetMap = &nm2 + n2.NetMap.PrivateKey = key.NodePrivate{} + return fn(&n2) + } +} + +// appendHealthActions returns an IPN listener func that wraps the supplied IPN +// listener func and transforms health messages passed to the wrapped listener. +// If health messages with PrimaryActions are present, it appends the label & +// url in the PrimaryAction to the text of the message. For use for clients that +// do not process the PrimaryAction. +func appendHealthActions(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { + return func(n *ipn.Notify) bool { + if n.Health == nil || len(n.Health.Warnings) == 0 { + return fn(n) + } + + // Shallow clone the notify and health so we can mutate them + h2 := *n.Health + n2 := *n + n2.Health = &h2 + n2.Health.Warnings = make(map[health.WarnableCode]health.UnhealthyState, len(n.Health.Warnings)) + for k, v := range n.Health.Warnings { + if v.PrimaryAction != nil { + v.Text = fmt.Sprintf("%s %s: %s", v.Text, v.PrimaryAction.Label, v.PrimaryAction.URL) + v.PrimaryAction = nil + } + n2.Health.Warnings[k] = v + } + return fn(&n2) + } +} + // pollRequestEngineStatus calls b.e.RequestStatus every 2 seconds until ctx // is done. func (b *LocalBackend) pollRequestEngineStatus(ctx context.Context) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index d23bd1e26..8f9b6ee68 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5348,6 +5348,8 @@ func TestDisplayMessages(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() + b.mu.Lock() + defer b.mu.Unlock() b.setNetMapLocked(&netmap.NetworkMap{ DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test-message": { @@ -5374,7 +5376,8 @@ func TestDisplayMessagesURLFilter(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() - defer b.lockAndGetUnlock()() + b.mu.Lock() + defer b.mu.Unlock() b.setNetMapLocked(&netmap.NetworkMap{ DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test-message": { @@ -5405,3 +5408,104 @@ func TestDisplayMessagesURLFilter(t *testing.T) { t.Errorf("Unexpected message content (-want/+got):\n%s", diff) } } + +// TestDisplayMessageIPNBus checks that we send health messages appropriately +// based on whether the watcher has sent the [ipn.NotifyHealthActions] watch +// option or not. +func TestDisplayMessageIPNBus(t *testing.T) { + type test struct { + name string + mask ipn.NotifyWatchOpt + wantWarning health.UnhealthyState + } + + msgs := map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": { + Title: "Message title", + Text: "Message text.", + Severity: tailcfg.SeverityMedium, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://example.com", + Label: "Learn more", + }, + }, + } + + for _, tt := range []test{ + { + name: "older-client-no-actions", + mask: 0, + wantWarning: health.UnhealthyState{ + WarnableCode: "test-message", + Severity: health.SeverityMedium, + Title: "Message title", + Text: "Message text. Learn more: https://example.com", // PrimaryAction appended to text + PrimaryAction: nil, // PrimaryAction not included + }, + }, + { + name: "new-client-with-actions", + mask: ipn.NotifyHealthActions, + wantWarning: health.UnhealthyState{ + WarnableCode: "test-message", + Severity: health.SeverityMedium, + Title: "Message title", + Text: "Message text.", + PrimaryAction: &health.UnhealthyStateAction{ + URL: "https://example.com", + Label: "Learn more", + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + lb := newLocalBackendWithTestControl(t, false, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + + ipnWatcher := newNotificationWatcher(t, lb, nil) + ipnWatcher.watch(tt.mask, []wantedNotification{{ + name: "test", + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + if n.Health == nil { + return false + } + got, ok := n.Health.Warnings["test-message"] + if ok { + if diff := cmp.Diff(tt.wantWarning, got); diff != "" { + t.Errorf("unexpected warning details (-want/+got):\n%s", diff) + return true // we failed the test so tell the watcher we've seen what we need to to stop it waiting + } + } + return ok + }, + }}) + + lb.SetPrefsForTest(&ipn.Prefs{ + ControlURL: "https://localhost:1/", + WantRunning: true, + LoggedOut: false, + }) + if err := lb.Start(ipn.Options{}); err != nil { + t.Fatalf("(*LocalBackend).Start(): %v", err) + } + + cc := lb.cc.(*mockControl) + + // Assert that we are logged in and authorized, and also send our DisplayMessages + cc.send(nil, "", true, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + DisplayMessages: msgs, + }) + + // Tell the health tracker that we are in a map poll because + // mockControl doesn't tell it + lb.HealthTracker().GotStreamedMapResponse() + + // Assert that we got the expected notification + ipnWatcher.check() + }) + } +} From 13ee2856752525a0ec4462f04eeb97f68b4f3830 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 4 Jun 2025 12:10:15 +0100 Subject: [PATCH 0917/1708] health: show DisplayMessage actions in 'tailscale status' Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- health/health.go | 11 ++++++++--- health/health_test.go | 35 +++++++++++++++++++++++++++-------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/health/health.go b/health/health.go index 6dbbf782c..058870438 100644 --- a/health/health.go +++ b/health/health.go @@ -1054,13 +1054,18 @@ func (t *Tracker) stringsLocked() []string { warnLen := len(result) for _, c := range t.controlMessages { + var msg string if c.Title != "" && c.Text != "" { - result = append(result, c.Title+": "+c.Text) + msg = c.Title + ": " + c.Text } else if c.Title != "" { - result = append(result, c.Title) + msg = c.Title + "." } else if c.Text != "" { - result = append(result, c.Text) + msg = c.Text } + if c.PrimaryAction != nil { + msg = msg + " " + c.PrimaryAction.Label + ": " + c.PrimaryAction.URL + } + result = append(result, msg) } sort.Strings(result[warnLen:]) diff --git a/health/health_test.go b/health/health_test.go index f609cfb16..aa519e92c 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -467,15 +467,24 @@ func TestControlHealth(t *testing.T) { baseWarns := ht.CurrentState().Warnings baseStrs := ht.Strings() - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + msgs := map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "control-health-test": { Title: "Control health message", - Text: "Extra help", + Text: "Extra help.", }, "control-health-title": { Title: "Control health title only", }, - }) + "control-health-with-action": { + Title: "Control health message", + Text: "Extra help.", + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com", + Label: "Learn more", + }, + }, + } + ht.SetControlHealth(msgs) t.Run("Warnings", func(t *testing.T) { wantWarns := map[WarnableCode]UnhealthyState{ @@ -483,13 +492,23 @@ func TestControlHealth(t *testing.T) { WarnableCode: "control-health-test", Severity: SeverityMedium, Title: "Control health message", - Text: "Extra help", + Text: "Extra help.", }, "control-health-title": { WarnableCode: "control-health-title", Severity: SeverityMedium, Title: "Control health title only", }, + "control-health-with-action": { + WarnableCode: "control-health-with-action", + Severity: SeverityMedium, + Title: "Control health message", + Text: "Extra help.", + PrimaryAction: &UnhealthyStateAction{ + URL: "http://www.example.com", + Label: "Learn more", + }, + }, } state := ht.CurrentState() gotWarns := maps.Clone(state.Warnings) @@ -505,8 +524,9 @@ func TestControlHealth(t *testing.T) { t.Run("Strings()", func(t *testing.T) { wantStrs := []string{ - "Control health message: Extra help", - "Control health title only", + "Control health message: Extra help.", + "Control health message: Extra help. Learn more: http://www.example.com", + "Control health title only.", } var gotStrs []string for _, s := range ht.Strings() { @@ -527,8 +547,7 @@ func TestControlHealth(t *testing.T) { Type: MetricLabelWarning, }).String() want := strconv.Itoa( - 2 + // from SetControlHealth - len(baseStrs), + len(msgs) + len(baseStrs), ) if got != want { t.Errorf("metricsHealthMessage.Get(warning) = %q, want %q", got, want) From 486a55f0a9bffc45eb34350f24fea5a76be5169a Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 16 Apr 2025 10:21:50 -0700 Subject: [PATCH 0918/1708] cmd/natc: add optional consensus backend Enable nat connector to be run on a cluster of machines for high availability. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 434 ++++++++++++++++++++ cmd/natc/ippool/consensusippool_test.go | 383 +++++++++++++++++ cmd/natc/ippool/consensusippoolserialize.go | 164 ++++++++ cmd/natc/ippool/ippool.go | 21 +- cmd/natc/ippool/ippool_test.go | 7 +- cmd/natc/natc.go | 28 +- cmd/natc/natc_test.go | 2 +- 7 files changed, 1029 insertions(+), 10 deletions(-) create mode 100644 cmd/natc/ippool/consensusippool.go create mode 100644 cmd/natc/ippool/consensusippool_test.go create mode 100644 cmd/natc/ippool/consensusippoolserialize.go diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go new file mode 100644 index 000000000..4783209b2 --- /dev/null +++ b/cmd/natc/ippool/consensusippool.go @@ -0,0 +1,434 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ippool + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net/netip" + "time" + + "github.com/hashicorp/raft" + "go4.org/netipx" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/tsconsensus" + "tailscale.com/tsnet" + "tailscale.com/util/mak" +) + +// ConsensusIPPool implements an [IPPool] that is distributed among members of a cluster for high availability. +// Writes are directed to a leader among the cluster and are slower than reads, reads are performed locally +// using information replicated from the leader. +// The cluster maintains consistency, reads can be stale and writes can be unavailable if sufficient cluster +// peers are unavailable. +type ConsensusIPPool struct { + IPSet *netipx.IPSet + perPeerMap *syncs.Map[tailcfg.NodeID, *consensusPerPeerState] + consensus commandExecutor + unusedAddressLifetime time.Duration +} + +func NewConsensusIPPool(ipSet *netipx.IPSet) *ConsensusIPPool { + return &ConsensusIPPool{ + unusedAddressLifetime: 48 * time.Hour, // TODO (fran) is this appropriate? should it be configurable? + IPSet: ipSet, + perPeerMap: &syncs.Map[tailcfg.NodeID, *consensusPerPeerState]{}, + } +} + +// IPForDomain looks up or creates an IP address allocation for the tailcfg.NodeID and domain pair. +// If no address association is found, one is allocated from the range of free addresses for this tailcfg.NodeID. +// If no more address are available, an error is returned. +func (ipp *ConsensusIPPool) IPForDomain(nid tailcfg.NodeID, domain string) (netip.Addr, error) { + now := time.Now() + // Check local state; local state may be stale. If we have an IP for this domain, and we are not + // close to the expiry time for the domain, it's safe to return what we have. + ps, psFound := ipp.perPeerMap.Load(nid) + if psFound { + if addr, addrFound := ps.domainToAddr[domain]; addrFound { + if ww, wwFound := ps.addrToDomain.Load(addr); wwFound { + if !isCloseToExpiry(ww.LastUsed, now, ipp.unusedAddressLifetime) { + ipp.fireAndForgetMarkLastUsed(nid, addr, ww, now) + return addr, nil + } + } + } + } + + // go via consensus + args := checkoutAddrArgs{ + NodeID: nid, + Domain: domain, + ReuseDeadline: now.Add(-1 * ipp.unusedAddressLifetime), + UpdatedAt: now, + } + bs, err := json.Marshal(args) + if err != nil { + return netip.Addr{}, err + } + c := tsconsensus.Command{ + Name: "checkoutAddr", + Args: bs, + } + result, err := ipp.consensus.ExecuteCommand(c) + if err != nil { + log.Printf("IPForDomain: raft error executing command: %v", err) + return netip.Addr{}, err + } + if result.Err != nil { + log.Printf("IPForDomain: error returned from state machine: %v", err) + return netip.Addr{}, result.Err + } + var addr netip.Addr + err = json.Unmarshal(result.Result, &addr) + return addr, err +} + +// DomainForIP looks up the domain associated with a tailcfg.NodeID and netip.Addr pair. +// If there is no association, the result is empty and ok is false. +func (ipp *ConsensusIPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr, updatedAt time.Time) (string, bool) { + // Look in local state, to save a consensus round trip; local state may be stale. + // + // The only time we expect ordering of commands to matter to clients is on first + // connection to a domain. In that case it may be that although we don't find the + // domain in our local state, it is in fact in the state of the state machine (ie + // the client did a DNS lookup, and we responded with an IP and _should_ know that + // domain when the TCP connection for that IP arrives.) + // + // So it's ok to return local state, unless local state doesn't recognize the domain, + // in which case we should check the consensus state machine to know for sure. + var domain string + ww, ok := ipp.domainLookup(from, addr) + if ok { + domain = ww.Domain + } else { + d, err := ipp.readDomainForIP(from, addr) + if err != nil { + log.Printf("error reading domain from consensus: %v", err) + return "", false + } + domain = d + } + if domain == "" { + log.Printf("did not find domain for node: %v, addr: %s", from, addr) + return "", false + } + ipp.fireAndForgetMarkLastUsed(from, addr, ww, updatedAt) + return domain, true +} + +func (ipp *ConsensusIPPool) fireAndForgetMarkLastUsed(from tailcfg.NodeID, addr netip.Addr, ww whereWhen, updatedAt time.Time) { + window := 5 * time.Minute + if updatedAt.Sub(ww.LastUsed).Abs() < window { + return + } + go func() { + err := ipp.markLastUsed(from, addr, ww.Domain, updatedAt) + if err != nil { + log.Printf("error marking last used: %v", err) + } + }() +} + +func (ipp *ConsensusIPPool) domainLookup(from tailcfg.NodeID, addr netip.Addr) (whereWhen, bool) { + ps, ok := ipp.perPeerMap.Load(from) + if !ok { + log.Printf("domainLookup: peer state absent for: %d", from) + return whereWhen{}, false + } + ww, ok := ps.addrToDomain.Load(addr) + if !ok { + log.Printf("domainLookup: peer state doesn't recognize addr: %s", addr) + return whereWhen{}, false + } + return ww, true +} + +// StartConsensus is part of the IPPool interface. It starts the raft background routines that handle consensus. +func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string) error { + cfg := tsconsensus.DefaultConfig() + cfg.ServeDebugMonitor = true + cns, err := tsconsensus.Start(ctx, ts, ipp, clusterTag, cfg) + if err != nil { + return err + } + ipp.consensus = cns + return nil +} + +type whereWhen struct { + Domain string + LastUsed time.Time +} + +type consensusPerPeerState struct { + domainToAddr map[string]netip.Addr + addrToDomain *syncs.Map[netip.Addr, whereWhen] +} + +// StopConsensus is part of the IPPool interface. It stops the raft background routines that handle consensus. +func (ipp *ConsensusIPPool) StopConsensus(ctx context.Context) error { + return (ipp.consensus).(*tsconsensus.Consensus).Stop(ctx) +} + +// unusedIPV4 finds the next unused or expired IP address in the pool. +// IP addresses in the pool should be reused if they haven't been used for some period of time. +// reuseDeadline is the time before which addresses are considered to be expired. +// So if addresses are being reused after they haven't been used for 24 hours say, reuseDeadline +// would be 24 hours ago. +func (ps *consensusPerPeerState) unusedIPV4(ipset *netipx.IPSet, reuseDeadline time.Time) (netip.Addr, bool, string, error) { + // If we want to have a random IP choice behavior we could make that work with the state machine by doing something like + // passing the randomly chosen IP into the state machine call (so replaying logs would still be deterministic). + for _, r := range ipset.Ranges() { + ip := r.From() + toIP := r.To() + if !ip.IsValid() || !toIP.IsValid() { + continue + } + for toIP.Compare(ip) != -1 { + ww, ok := ps.addrToDomain.Load(ip) + if !ok { + return ip, false, "", nil + } + if ww.LastUsed.Before(reuseDeadline) { + return ip, true, ww.Domain, nil + } + ip = ip.Next() + } + } + return netip.Addr{}, false, "", errors.New("ip pool exhausted") +} + +// isCloseToExpiry returns true if the lastUsed and now times are more than +// half the lifetime apart +func isCloseToExpiry(lastUsed, now time.Time, lifetime time.Duration) bool { + return now.Sub(lastUsed).Abs() > (lifetime / 2) +} + +type readDomainForIPArgs struct { + NodeID tailcfg.NodeID + Addr netip.Addr +} + +// executeReadDomainForIP parses a readDomainForIP log entry and applies it. +func (ipp *ConsensusIPPool) executeReadDomainForIP(bs []byte) tsconsensus.CommandResult { + var args readDomainForIPArgs + err := json.Unmarshal(bs, &args) + if err != nil { + return tsconsensus.CommandResult{Err: err} + } + return ipp.applyReadDomainForIP(args.NodeID, args.Addr) +} + +func (ipp *ConsensusIPPool) applyReadDomainForIP(from tailcfg.NodeID, addr netip.Addr) tsconsensus.CommandResult { + domain := func() string { + ps, ok := ipp.perPeerMap.Load(from) + if !ok { + return "" + } + ww, ok := ps.addrToDomain.Load(addr) + if !ok { + return "" + } + return ww.Domain + }() + resultBs, err := json.Marshal(domain) + return tsconsensus.CommandResult{Result: resultBs, Err: err} +} + +// readDomainForIP executes a readDomainForIP command on the leader with raft. +func (ipp *ConsensusIPPool) readDomainForIP(nid tailcfg.NodeID, addr netip.Addr) (string, error) { + args := readDomainForIPArgs{ + NodeID: nid, + Addr: addr, + } + bs, err := json.Marshal(args) + if err != nil { + return "", err + } + c := tsconsensus.Command{ + Name: "readDomainForIP", + Args: bs, + } + result, err := ipp.consensus.ExecuteCommand(c) + if err != nil { + log.Printf("readDomainForIP: raft error executing command: %v", err) + return "", err + } + if result.Err != nil { + log.Printf("readDomainForIP: error returned from state machine: %v", err) + return "", result.Err + } + var domain string + err = json.Unmarshal(result.Result, &domain) + return domain, err +} + +type markLastUsedArgs struct { + NodeID tailcfg.NodeID + Addr netip.Addr + Domain string + UpdatedAt time.Time +} + +// executeMarkLastUsed parses a markLastUsed log entry and applies it. +func (ipp *ConsensusIPPool) executeMarkLastUsed(bs []byte) tsconsensus.CommandResult { + var args markLastUsedArgs + err := json.Unmarshal(bs, &args) + if err != nil { + return tsconsensus.CommandResult{Err: err} + } + err = ipp.applyMarkLastUsed(args.NodeID, args.Addr, args.Domain, args.UpdatedAt) + if err != nil { + return tsconsensus.CommandResult{Err: err} + } + return tsconsensus.CommandResult{} +} + +// applyMarkLastUsed applies the arguments from the log entry to the state. It updates an entry in the AddrToDomain +// map with a new LastUsed timestamp. +// applyMarkLastUsed is not safe for concurrent access. It's only called from raft which will +// not call it concurrently. +func (ipp *ConsensusIPPool) applyMarkLastUsed(from tailcfg.NodeID, addr netip.Addr, domain string, updatedAt time.Time) error { + ps, ok := ipp.perPeerMap.Load(from) + if !ok { + // There's nothing to mark. But this is unexpected, because we mark last used after we do things with peer state. + log.Printf("applyMarkLastUsed: could not find peer state, nodeID: %s", from) + return nil + } + ww, ok := ps.addrToDomain.Load(addr) + if !ok { + // The peer state didn't have an entry for the IP address (possibly it expired), so there's nothing to mark. + return nil + } + if ww.Domain != domain { + // The IP address expired and was reused for a new domain. Don't mark. + return nil + } + if ww.LastUsed.After(updatedAt) { + // This has been marked more recently. Don't mark. + return nil + } + ww.LastUsed = updatedAt + ps.addrToDomain.Store(addr, ww) + return nil +} + +// markLastUsed executes a markLastUsed command on the leader with raft. +func (ipp *ConsensusIPPool) markLastUsed(nid tailcfg.NodeID, addr netip.Addr, domain string, lastUsed time.Time) error { + args := markLastUsedArgs{ + NodeID: nid, + Addr: addr, + Domain: domain, + UpdatedAt: lastUsed, + } + bs, err := json.Marshal(args) + if err != nil { + return err + } + c := tsconsensus.Command{ + Name: "markLastUsed", + Args: bs, + } + result, err := ipp.consensus.ExecuteCommand(c) + if err != nil { + log.Printf("markLastUsed: raft error executing command: %v", err) + return err + } + if result.Err != nil { + log.Printf("markLastUsed: error returned from state machine: %v", err) + return result.Err + } + return nil +} + +type checkoutAddrArgs struct { + NodeID tailcfg.NodeID + Domain string + ReuseDeadline time.Time + UpdatedAt time.Time +} + +// executeCheckoutAddr parses a checkoutAddr raft log entry and applies it. +func (ipp *ConsensusIPPool) executeCheckoutAddr(bs []byte) tsconsensus.CommandResult { + var args checkoutAddrArgs + err := json.Unmarshal(bs, &args) + if err != nil { + return tsconsensus.CommandResult{Err: err} + } + addr, err := ipp.applyCheckoutAddr(args.NodeID, args.Domain, args.ReuseDeadline, args.UpdatedAt) + if err != nil { + return tsconsensus.CommandResult{Err: err} + } + resultBs, err := json.Marshal(addr) + if err != nil { + return tsconsensus.CommandResult{Err: err} + } + return tsconsensus.CommandResult{Result: resultBs} +} + +// applyCheckoutAddr finds the IP address for a nid+domain +// Each nid can use all of the addresses in the pool. +// updatedAt is the current time, the time at which we are wanting to get a new IP address. +// reuseDeadline is the time before which addresses are considered to be expired. +// So if addresses are being reused after they haven't been used for 24 hours say updatedAt would be now +// and reuseDeadline would be 24 hours ago. +// It is not safe for concurrent access (it's only called from raft, which will not call concurrently +// so that's fine). +func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string, reuseDeadline, updatedAt time.Time) (netip.Addr, error) { + ps, ok := ipp.perPeerMap.Load(nid) + if !ok { + ps = &consensusPerPeerState{ + addrToDomain: &syncs.Map[netip.Addr, whereWhen]{}, + } + ipp.perPeerMap.Store(nid, ps) + } + if existing, ok := ps.domainToAddr[domain]; ok { + ww, ok := ps.addrToDomain.Load(existing) + if ok { + ww.LastUsed = updatedAt + ps.addrToDomain.Store(existing, ww) + return existing, nil + } + log.Printf("applyCheckoutAddr: data out of sync, allocating new IP") + } + addr, wasInUse, previousDomain, err := ps.unusedIPV4(ipp.IPSet, reuseDeadline) + if err != nil { + return netip.Addr{}, err + } + mak.Set(&ps.domainToAddr, domain, addr) + if wasInUse { + delete(ps.domainToAddr, previousDomain) + } + ps.addrToDomain.Store(addr, whereWhen{Domain: domain, LastUsed: updatedAt}) + return addr, nil +} + +// Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state. +func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { + var c tsconsensus.Command + if err := json.Unmarshal(l.Data, &c); err != nil { + panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) + } + switch c.Name { + case "checkoutAddr": + return ipp.executeCheckoutAddr(c.Args) + case "markLastUsed": + return ipp.executeMarkLastUsed(c.Args) + case "readDomainForIP": + return ipp.executeReadDomainForIP(c.Args) + default: + panic(fmt.Sprintf("unrecognized command: %s", c.Name)) + } +} + +// commandExecutor is an interface covering the routing parts of consensus +// used to allow a fake in the tests +type commandExecutor interface { + ExecuteCommand(tsconsensus.Command) (tsconsensus.CommandResult, error) +} diff --git a/cmd/natc/ippool/consensusippool_test.go b/cmd/natc/ippool/consensusippool_test.go new file mode 100644 index 000000000..242cdffaf --- /dev/null +++ b/cmd/natc/ippool/consensusippool_test.go @@ -0,0 +1,383 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ippool + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/netip" + "testing" + "time" + + "github.com/hashicorp/raft" + "go4.org/netipx" + "tailscale.com/tailcfg" + "tailscale.com/tsconsensus" + "tailscale.com/util/must" +) + +func makeSetFromPrefix(pfx netip.Prefix) *netipx.IPSet { + var ipsb netipx.IPSetBuilder + ipsb.AddPrefix(pfx) + return must.Get(ipsb.IPSet()) +} + +type FakeConsensus struct { + ipp *ConsensusIPPool +} + +func (c *FakeConsensus) ExecuteCommand(cmd tsconsensus.Command) (tsconsensus.CommandResult, error) { + b, err := json.Marshal(cmd) + if err != nil { + return tsconsensus.CommandResult{}, err + } + result := c.ipp.Apply(&raft.Log{Data: b}) + return result.(tsconsensus.CommandResult), nil +} + +func makePool(pfx netip.Prefix) *ConsensusIPPool { + ipp := NewConsensusIPPool(makeSetFromPrefix(pfx)) + ipp.consensus = &FakeConsensus{ipp: ipp} + return ipp +} + +func TestConsensusIPForDomain(t *testing.T) { + pfx := netip.MustParsePrefix("100.64.0.0/16") + ipp := makePool(pfx) + from := tailcfg.NodeID(1) + + a, err := ipp.IPForDomain(from, "example.com") + if err != nil { + t.Fatal(err) + } + if !pfx.Contains(a) { + t.Fatalf("expected %v to be in the prefix %v", a, pfx) + } + + b, err := ipp.IPForDomain(from, "a.example.com") + if err != nil { + t.Fatal(err) + } + if !pfx.Contains(b) { + t.Fatalf("expected %v to be in the prefix %v", b, pfx) + } + if b == a { + t.Fatalf("same address issued twice %v, %v", a, b) + } + + c, err := ipp.IPForDomain(from, "example.com") + if err != nil { + t.Fatal(err) + } + if c != a { + t.Fatalf("expected %v to be remembered as the addr for example.com, but got %v", a, c) + } +} + +func TestConsensusPoolExhaustion(t *testing.T) { + ipp := makePool(netip.MustParsePrefix("100.64.0.0/31")) + from := tailcfg.NodeID(1) + + subdomains := []string{"a", "b", "c"} + for i, sd := range subdomains { + _, err := ipp.IPForDomain(from, fmt.Sprintf("%s.example.com", sd)) + if i < 2 && err != nil { + t.Fatal(err) + } + expected := "ip pool exhausted" + if i == 2 && err.Error() != expected { + t.Fatalf("expected error to be '%s', got '%s'", expected, err.Error()) + } + } +} + +func TestConsensusPoolExpiry(t *testing.T) { + ipp := makePool(netip.MustParsePrefix("100.64.0.0/31")) + firstIP := netip.MustParseAddr("100.64.0.0") + secondIP := netip.MustParseAddr("100.64.0.1") + timeOfUse := time.Now() + beforeTimeOfUse := timeOfUse.Add(-1 * time.Hour) + afterTimeOfUse := timeOfUse.Add(1 * time.Hour) + from := tailcfg.NodeID(1) + + // the pool is unused, we get an address, and it's marked as being used at timeOfUse + aAddr, err := ipp.applyCheckoutAddr(from, "a.example.com", time.Time{}, timeOfUse) + if err != nil { + t.Fatal(err) + } + if aAddr.Compare(firstIP) != 0 { + t.Fatalf("expected %s, got %s", firstIP, aAddr) + } + ww, ok := ipp.domainLookup(from, firstIP) + if !ok { + t.Fatal("expected wherewhen to be found") + } + if ww.Domain != "a.example.com" { + t.Fatalf("expected aAddr to look up to a.example.com, got: %s", ww.Domain) + } + + // the time before which we will reuse addresses is prior to timeOfUse, so no reuse + bAddr, err := ipp.applyCheckoutAddr(from, "b.example.com", beforeTimeOfUse, timeOfUse) + if err != nil { + t.Fatal(err) + } + if bAddr.Compare(secondIP) != 0 { + t.Fatalf("expected %s, got %s", secondIP, bAddr) + } + + // the time before which we will reuse addresses is after timeOfUse, so reuse addresses that were marked as used at timeOfUse. + cAddr, err := ipp.applyCheckoutAddr(from, "c.example.com", afterTimeOfUse, timeOfUse) + if err != nil { + t.Fatal(err) + } + if cAddr.Compare(firstIP) != 0 { + t.Fatalf("expected %s, got %s", firstIP, cAddr) + } + ww, ok = ipp.domainLookup(from, firstIP) + if !ok { + t.Fatal("expected wherewhen to be found") + } + if ww.Domain != "c.example.com" { + t.Fatalf("expected firstIP to look up to c.example.com, got: %s", ww.Domain) + } + + // the addr remains associated with c.example.com + cAddrAgain, err := ipp.applyCheckoutAddr(from, "c.example.com", afterTimeOfUse, timeOfUse) + if err != nil { + t.Fatal(err) + } + if cAddrAgain.Compare(cAddr) != 0 { + t.Fatalf("expected cAddrAgain to be cAddr, but they are different. cAddrAgain=%s cAddr=%s", cAddrAgain, cAddr) + } + ww, ok = ipp.domainLookup(from, firstIP) + if !ok { + t.Fatal("expected wherewhen to be found") + } + if ww.Domain != "c.example.com" { + t.Fatalf("expected firstIP to look up to c.example.com, got: %s", ww.Domain) + } +} + +func TestConsensusPoolApplyMarkLastUsed(t *testing.T) { + ipp := makePool(netip.MustParsePrefix("100.64.0.0/31")) + firstIP := netip.MustParseAddr("100.64.0.0") + time1 := time.Now() + time2 := time1.Add(1 * time.Hour) + from := tailcfg.NodeID(1) + domain := "example.com" + + aAddr, err := ipp.applyCheckoutAddr(from, domain, time.Time{}, time1) + if err != nil { + t.Fatal(err) + } + if aAddr.Compare(firstIP) != 0 { + t.Fatalf("expected %s, got %s", firstIP, aAddr) + } + // example.com LastUsed is now time1 + ww, ok := ipp.domainLookup(from, firstIP) + if !ok { + t.Fatal("expected wherewhen to be found") + } + if ww.LastUsed != time1 { + t.Fatalf("expected %s, got %s", time1, ww.LastUsed) + } + if ww.Domain != domain { + t.Fatalf("expected %s, got %s", domain, ww.Domain) + } + + err = ipp.applyMarkLastUsed(from, firstIP, domain, time2) + if err != nil { + t.Fatal(err) + } + + // example.com LastUsed is now time2 + ww, ok = ipp.domainLookup(from, firstIP) + if !ok { + t.Fatal("expected wherewhen to be found") + } + if ww.LastUsed != time2 { + t.Fatalf("expected %s, got %s", time2, ww.LastUsed) + } + if ww.Domain != domain { + t.Fatalf("expected %s, got %s", domain, ww.Domain) + } +} + +func TestConsensusDomainForIP(t *testing.T) { + ipp := makePool(netip.MustParsePrefix("100.64.0.0/16")) + from := tailcfg.NodeID(1) + domain := "example.com" + now := time.Now() + + d, ok := ipp.DomainForIP(from, netip.MustParseAddr("100.64.0.1"), now) + if d != "" { + t.Fatalf("expected an empty string if the addr is not found but got %s", d) + } + if ok { + t.Fatalf("expected domain to not be found for IP, as it has never been looked up") + } + a, err := ipp.IPForDomain(from, domain) + if err != nil { + t.Fatal(err) + } + d2, ok := ipp.DomainForIP(from, a, now) + if d2 != domain { + t.Fatalf("expected %s but got %s", domain, d2) + } + if !ok { + t.Fatalf("expected domain to be found for IP that was handed out for it") + } +} + +func TestConsensusReadDomainForIP(t *testing.T) { + ipp := makePool(netip.MustParsePrefix("100.64.0.0/16")) + from := tailcfg.NodeID(1) + domain := "example.com" + + d, err := ipp.readDomainForIP(from, netip.MustParseAddr("100.64.0.1")) + if err != nil { + t.Fatal(err) + } + if d != "" { + t.Fatalf("expected an empty string if the addr is not found but got %s", d) + } + a, err := ipp.IPForDomain(from, domain) + if err != nil { + t.Fatal(err) + } + d2, err := ipp.readDomainForIP(from, a) + if err != nil { + t.Fatal(err) + } + if d2 != domain { + t.Fatalf("expected %s but got %s", domain, d2) + } +} + +func TestConsensusSnapshot(t *testing.T) { + pfx := netip.MustParsePrefix("100.64.0.0/16") + ipp := makePool(pfx) + domain := "example.com" + expectedAddr := netip.MustParseAddr("100.64.0.0") + expectedFrom := expectedAddr + expectedTo := netip.MustParseAddr("100.64.255.255") + from := tailcfg.NodeID(1) + + // pool allocates first addr for from + if _, err := ipp.IPForDomain(from, domain); err != nil { + t.Fatal(err) + } + // take a snapshot + fsmSnap, err := ipp.Snapshot() + if err != nil { + t.Fatal(err) + } + snap := fsmSnap.(fsmSnapshot) + + // verify snapshot state matches the state we know ipp will have + // ipset matches ipp.IPSet + if len(snap.IPSet.Ranges) != 1 { + t.Fatalf("expected 1, got %d", len(snap.IPSet.Ranges)) + } + if snap.IPSet.Ranges[0].From != expectedFrom { + t.Fatalf("want %s, got %s", expectedFrom, snap.IPSet.Ranges[0].From) + } + if snap.IPSet.Ranges[0].To != expectedTo { + t.Fatalf("want %s, got %s", expectedTo, snap.IPSet.Ranges[0].To) + } + + // perPeerMap has one entry, for from + if len(snap.PerPeerMap) != 1 { + t.Fatalf("expected 1, got %d", len(snap.PerPeerMap)) + } + ps := snap.PerPeerMap[from] + + // the one peer state has allocated one address, the first in the prefix + if len(ps.DomainToAddr) != 1 { + t.Fatalf("expected 1, got %d", len(ps.DomainToAddr)) + } + addr := ps.DomainToAddr[domain] + if addr != expectedAddr { + t.Fatalf("want %s, got %s", expectedAddr.String(), addr.String()) + } + if len(ps.AddrToDomain) != 1 { + t.Fatalf("expected 1, got %d", len(ps.AddrToDomain)) + } + ww := ps.AddrToDomain[addr] + if ww.Domain != domain { + t.Fatalf("want %s, got %s", domain, ww.Domain) + } +} + +func TestConsensusRestore(t *testing.T) { + pfx := netip.MustParsePrefix("100.64.0.0/16") + ipp := makePool(pfx) + domain := "example.com" + expectedAddr := netip.MustParseAddr("100.64.0.0") + from := tailcfg.NodeID(1) + + if _, err := ipp.IPForDomain(from, domain); err != nil { + t.Fatal(err) + } + // take the snapshot after only 1 addr allocated + fsmSnap, err := ipp.Snapshot() + if err != nil { + t.Fatal(err) + } + snap := fsmSnap.(fsmSnapshot) + + if _, err := ipp.IPForDomain(from, "b.example.com"); err != nil { + t.Fatal(err) + } + if _, err := ipp.IPForDomain(from, "c.example.com"); err != nil { + t.Fatal(err) + } + if _, err := ipp.IPForDomain(from, "d.example.com"); err != nil { + t.Fatal(err) + } + // ipp now has 4 entries in domainToAddr + ps, _ := ipp.perPeerMap.Load(from) + if len(ps.domainToAddr) != 4 { + t.Fatalf("want 4, got %d", len(ps.domainToAddr)) + } + + // restore the snapshot + bs, err := json.Marshal(snap) + if err != nil { + t.Fatal(err) + } + err = ipp.Restore(io.NopCloser(bytes.NewBuffer(bs))) + if err != nil { + t.Fatal(err) + } + + // everything should be as it was when the snapshot was taken + if ipp.perPeerMap.Len() != 1 { + t.Fatalf("want 1, got %d", ipp.perPeerMap.Len()) + } + psAfter, _ := ipp.perPeerMap.Load(from) + if len(psAfter.domainToAddr) != 1 { + t.Fatalf("want 1, got %d", len(psAfter.domainToAddr)) + } + if psAfter.domainToAddr[domain] != expectedAddr { + t.Fatalf("want %s, got %s", expectedAddr, psAfter.domainToAddr[domain]) + } + ww, _ := psAfter.addrToDomain.Load(expectedAddr) + if ww.Domain != domain { + t.Fatalf("want %s, got %s", domain, ww.Domain) + } +} + +func TestConsensusIsCloseToExpiry(t *testing.T) { + a := time.Now() + b := a.Add(5 * time.Second) + if !isCloseToExpiry(a, b, 8*time.Second) { + t.Fatal("times are not within half the lifetime, expected true") + } + if isCloseToExpiry(a, b, 12*time.Second) { + t.Fatal("times are within half the lifetime, expected false") + } +} diff --git a/cmd/natc/ippool/consensusippoolserialize.go b/cmd/natc/ippool/consensusippoolserialize.go new file mode 100644 index 000000000..97dc02f2c --- /dev/null +++ b/cmd/natc/ippool/consensusippoolserialize.go @@ -0,0 +1,164 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ippool + +import ( + "encoding/json" + "io" + "log" + "maps" + "net/netip" + + "github.com/hashicorp/raft" + "go4.org/netipx" + "tailscale.com/syncs" + "tailscale.com/tailcfg" +) + +// Snapshot and Restore enable the raft lib to do log compaction. +// https://pkg.go.dev/github.com/hashicorp/raft#FSM + +// Snapshot is part of the raft.FSM interface. +// According to the docs it: +// - should return quickly +// - will not be called concurrently with Apply +// - the snapshot returned will have Persist called on it concurrently with Apply +// (so it should not contain pointers to the original data that's being mutated) +func (ipp *ConsensusIPPool) Snapshot() (raft.FSMSnapshot, error) { + // everything is safe for concurrent reads and this is not called concurrently with Apply which is + // the only thing that writes, so we do not need to lock + return ipp.getPersistable(), nil +} + +type persistableIPSet struct { + Ranges []persistableIPRange +} + +func getPersistableIPSet(i *netipx.IPSet) persistableIPSet { + rs := []persistableIPRange{} + for _, r := range i.Ranges() { + rs = append(rs, getPersistableIPRange(r)) + } + return persistableIPSet{Ranges: rs} +} + +func (mips *persistableIPSet) toIPSet() (*netipx.IPSet, error) { + b := netipx.IPSetBuilder{} + for _, r := range mips.Ranges { + b.AddRange(r.toIPRange()) + } + return b.IPSet() +} + +type persistableIPRange struct { + From netip.Addr + To netip.Addr +} + +func getPersistableIPRange(r netipx.IPRange) persistableIPRange { + return persistableIPRange{ + From: r.From(), + To: r.To(), + } +} + +func (mipr *persistableIPRange) toIPRange() netipx.IPRange { + return netipx.IPRangeFrom(mipr.From, mipr.To) +} + +// Restore is part of the raft.FSM interface. +// According to the docs it: +// - will not be called concurrently with any other command +// - the FSM must discard all previous state before restoring +func (ipp *ConsensusIPPool) Restore(rc io.ReadCloser) error { + var snap fsmSnapshot + if err := json.NewDecoder(rc).Decode(&snap); err != nil { + return err + } + ipset, ppm, err := snap.getData() + if err != nil { + return err + } + ipp.IPSet = ipset + ipp.perPeerMap = ppm + return nil +} + +type fsmSnapshot struct { + IPSet persistableIPSet + PerPeerMap map[tailcfg.NodeID]persistablePPS +} + +// Persist is part of the raft.FSMSnapshot interface +// According to the docs Persist may be called concurrently with Apply +func (f fsmSnapshot) Persist(sink raft.SnapshotSink) error { + if err := json.NewEncoder(sink).Encode(f); err != nil { + log.Printf("Error encoding snapshot as JSON: %v", err) + return sink.Cancel() + } + return sink.Close() +} + +// Release is part of the raft.FSMSnapshot interface +func (f fsmSnapshot) Release() {} + +// getPersistable returns an object that: +// - contains all the data in ConsensusIPPool +// - doesn't share any pointers with it +// - can be marshalled to JSON +// +// part of the raft snapshotting, getPersistable will be called during Snapshot +// and the results used during persist (concurrently with Apply) +func (ipp *ConsensusIPPool) getPersistable() fsmSnapshot { + ppm := map[tailcfg.NodeID]persistablePPS{} + for k, v := range ipp.perPeerMap.All() { + ppm[k] = v.getPersistable() + } + return fsmSnapshot{ + IPSet: getPersistableIPSet(ipp.IPSet), + PerPeerMap: ppm, + } +} + +func (f fsmSnapshot) getData() (*netipx.IPSet, *syncs.Map[tailcfg.NodeID, *consensusPerPeerState], error) { + ppm := syncs.Map[tailcfg.NodeID, *consensusPerPeerState]{} + for k, v := range f.PerPeerMap { + ppm.Store(k, v.toPerPeerState()) + } + ipset, err := f.IPSet.toIPSet() + if err != nil { + return nil, nil, err + } + return ipset, &ppm, nil +} + +// getPersistable returns an object that: +// - contains all the data in consensusPerPeerState +// - doesn't share any pointers with it +// - can be marshalled to JSON +// +// part of the raft snapshotting, getPersistable will be called during Snapshot +// and the results used during persist (concurrently with Apply) +func (ps *consensusPerPeerState) getPersistable() persistablePPS { + return persistablePPS{ + AddrToDomain: maps.Collect(ps.addrToDomain.All()), + DomainToAddr: maps.Clone(ps.domainToAddr), + } +} + +type persistablePPS struct { + DomainToAddr map[string]netip.Addr + AddrToDomain map[netip.Addr]whereWhen +} + +func (p persistablePPS) toPerPeerState() *consensusPerPeerState { + atd := &syncs.Map[netip.Addr, whereWhen]{} + for k, v := range p.AddrToDomain { + atd.Store(k, v) + } + return &consensusPerPeerState{ + domainToAddr: p.DomainToAddr, + addrToDomain: atd, + } +} diff --git a/cmd/natc/ippool/ippool.go b/cmd/natc/ippool/ippool.go index 3a46a6e7a..5a2dcbec9 100644 --- a/cmd/natc/ippool/ippool.go +++ b/cmd/natc/ippool/ippool.go @@ -10,6 +10,7 @@ import ( "math/big" "net/netip" "sync" + "time" "github.com/gaissmai/bart" "go4.org/netipx" @@ -21,12 +22,26 @@ import ( var ErrNoIPsAvailable = errors.New("no IPs available") -type IPPool struct { +// IPPool allocates IPv4 addresses from a pool to DNS domains, on a per tailcfg.NodeID basis. +// For each tailcfg.NodeID, IPv4 addresses are associated with at most one DNS domain. +// Addresses may be reused across other tailcfg.NodeID's for the same or other domains. +type IPPool interface { + // DomainForIP looks up the domain associated with a tailcfg.NodeID and netip.Addr pair. + // If there is no association, the result is empty and ok is false. + DomainForIP(tailcfg.NodeID, netip.Addr, time.Time) (string, bool) + + // IPForDomain looks up or creates an IP address allocation for the tailcfg.NodeID and domain pair. + // If no address association is found, one is allocated from the range of free addresses for this tailcfg.NodeID. + // If no more address are available, an error is returned. + IPForDomain(tailcfg.NodeID, string) (netip.Addr, error) +} + +type SingleMachineIPPool struct { perPeerMap syncs.Map[tailcfg.NodeID, *perPeerState] IPSet *netipx.IPSet } -func (ipp *IPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr) (string, bool) { +func (ipp *SingleMachineIPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr, _ time.Time) (string, bool) { ps, ok := ipp.perPeerMap.Load(from) if !ok { log.Printf("handleTCPFlow: no perPeerState for %v", from) @@ -40,7 +55,7 @@ func (ipp *IPPool) DomainForIP(from tailcfg.NodeID, addr netip.Addr) (string, bo return domain, ok } -func (ipp *IPPool) IPForDomain(from tailcfg.NodeID, domain string) (netip.Addr, error) { +func (ipp *SingleMachineIPPool) IPForDomain(from tailcfg.NodeID, domain string) (netip.Addr, error) { npps := &perPeerState{ ipset: ipp.IPSet, } diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go index 2919d7757..8d474f86a 100644 --- a/cmd/natc/ippool/ippool_test.go +++ b/cmd/natc/ippool/ippool_test.go @@ -8,6 +8,7 @@ import ( "fmt" "net/netip" "testing" + "time" "go4.org/netipx" "tailscale.com/tailcfg" @@ -19,7 +20,7 @@ func TestIPPoolExhaustion(t *testing.T) { var ipsb netipx.IPSetBuilder ipsb.AddPrefix(smallPrefix) addrPool := must.Get(ipsb.IPSet()) - pool := IPPool{IPSet: addrPool} + pool := SingleMachineIPPool{IPSet: addrPool} assignedIPs := make(map[netip.Addr]string) @@ -68,7 +69,7 @@ func TestIPPool(t *testing.T) { var ipsb netipx.IPSetBuilder ipsb.AddPrefix(netip.MustParsePrefix("100.64.1.0/24")) addrPool := must.Get(ipsb.IPSet()) - pool := IPPool{ + pool := SingleMachineIPPool{ IPSet: addrPool, } from := tailcfg.NodeID(12345) @@ -89,7 +90,7 @@ func TestIPPool(t *testing.T) { t.Errorf("IPv4 address %s not in range %s", addr, addrPool) } - domain, ok := pool.DomainForIP(from, addr) + domain, ok := pool.DomainForIP(from, addr, time.Now()) if !ok { t.Errorf("domainForIP(%s) not found", addr) } else if domain != "example.com" { diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index b327f55bd..2dcdc551f 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -57,6 +57,8 @@ func main() { printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") + clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") + server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -94,6 +96,7 @@ func main() { ts := &tsnet.Server{ Hostname: *hostname, } + ts.ControlURL = *server if *wgPort != 0 { if *wgPort >= 1<<16 { log.Fatalf("wg-port must be in the range [0, 65535]") @@ -148,12 +151,31 @@ func main() { routes, dnsAddr, addrPool := calculateAddresses(prefixes) v6ULA := ula(uint16(*siteID)) + + var ipp ippool.IPPool + if *clusterTag != "" { + cipp := ippool.NewConsensusIPPool(addrPool) + err = cipp.StartConsensus(ctx, ts, *clusterTag) + if err != nil { + log.Fatalf("StartConsensus: %v", err) + } + defer func() { + err := cipp.StopConsensus(ctx) + if err != nil { + log.Printf("Error stopping consensus: %v", err) + } + }() + ipp = cipp + } else { + ipp = &ippool.SingleMachineIPPool{IPSet: addrPool} + } + c := &connector{ ts: ts, whois: lc, v6ULA: v6ULA, ignoreDsts: ignoreDstTable, - ipPool: &ippool.IPPool{IPSet: addrPool}, + ipPool: ipp, routes: routes, dnsAddr: dnsAddr, resolver: net.DefaultResolver, @@ -209,7 +231,7 @@ type connector struct { ignoreDsts *bart.Table[bool] // ipPool contains the per-peer IPv4 address assignments. - ipPool *ippool.IPPool + ipPool ippool.IPPool // resolver is used to lookup IP addresses for DNS queries. resolver lookupNetIPer @@ -453,7 +475,7 @@ func (c *connector) handleTCPFlow(src, dst netip.AddrPort) (handler func(net.Con if dstAddr.Is6() { dstAddr = v4ForV6(dstAddr) } - domain, ok := c.ipPool.DomainForIP(who.Node.ID, dstAddr) + domain, ok := c.ipPool.DomainForIP(who.Node.ID, dstAddr, time.Now()) if !ok { return nil, false } diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index 0320db8a4..78dec86fd 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -270,7 +270,7 @@ func TestDNSResponse(t *testing.T) { ignoreDsts: &bart.Table[bool]{}, routes: routes, v6ULA: v6ULA, - ipPool: &ippool.IPPool{IPSet: addrPool}, + ipPool: &ippool.SingleMachineIPPool{IPSet: addrPool}, dnsAddr: dnsAddr, } c.ignoreDsts.Insert(netip.MustParsePrefix("8.8.4.4/32"), true) From 75a7d28b079d1b16448e75c014eb689583a4d175 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 5 Jun 2025 10:33:16 -0700 Subject: [PATCH 0919/1708] net/packet: fix Parsed docs (#16200) Updates #cleanup Signed-off-by: Jordan Whited --- net/packet/packet.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/packet/packet.go b/net/packet/packet.go index b683b2212..876a653ed 100644 --- a/net/packet/packet.go +++ b/net/packet/packet.go @@ -51,10 +51,11 @@ type Parsed struct { IPVersion uint8 // IPProto is the IP subprotocol (UDP, TCP, etc.). Valid iff IPVersion != 0. IPProto ipproto.Proto - // SrcIP4 is the source address. Family matches IPVersion. Port is - // valid iff IPProto == TCP || IPProto == UDP. + // Src is the source address. Family matches IPVersion. Port is + // valid iff IPProto == TCP || IPProto == UDP || IPProto == SCTP. Src netip.AddrPort - // DstIP4 is the destination address. Family matches IPVersion. + // Dst is the destination address. Family matches IPVersion. Port is + // valid iff IPProto == TCP || IPProto == UDP || IPProto == SCTP. Dst netip.AddrPort // TCPFlags is the packet's TCP flag bits. Valid iff IPProto == TCP. TCPFlags TCPFlag From 3e08eab21e204bc3568762c2b49e0e1ab9ebf4b4 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Thu, 5 Jun 2025 08:51:10 -0700 Subject: [PATCH 0920/1708] cmd/natc: use new on disk state store for consensus Fixes #16027 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 33 +++++++++++++++++++++++++++++- cmd/natc/natc.go | 3 ++- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 4783209b2..adf2090d1 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -10,6 +10,8 @@ import ( "fmt" "log" "net/netip" + "os" + "path/filepath" "time" "github.com/hashicorp/raft" @@ -150,9 +152,14 @@ func (ipp *ConsensusIPPool) domainLookup(from tailcfg.NodeID, addr netip.Addr) ( } // StartConsensus is part of the IPPool interface. It starts the raft background routines that handle consensus. -func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string) error { +func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string, clusterStateDir string) error { cfg := tsconsensus.DefaultConfig() cfg.ServeDebugMonitor = true + var err error + cfg.StateDirPath, err = getStatePath(clusterStateDir) + if err != nil { + return err + } cns, err := tsconsensus.Start(ctx, ts, ipp, clusterTag, cfg) if err != nil { return err @@ -204,6 +211,30 @@ func (ps *consensusPerPeerState) unusedIPV4(ipset *netipx.IPSet, reuseDeadline t return netip.Addr{}, false, "", errors.New("ip pool exhausted") } +func getStatePath(pathFromFlag string) (string, error) { + var dirPath string + if pathFromFlag != "" { + dirPath = pathFromFlag + } else { + confDir, err := os.UserConfigDir() + if err != nil { + return "", err + } + dirPath = filepath.Join(confDir, "nat-connector-cluster-state") + } + + if err := os.MkdirAll(dirPath, 0700); err != nil { + return "", err + } + if fi, err := os.Stat(dirPath); err != nil { + return "", err + } else if !fi.IsDir() { + return "", fmt.Errorf("%v is not a directory", dirPath) + } + + return dirPath, nil +} + // isCloseToExpiry returns true if the lastUsed and now times are more than // half the lifetime apart func isCloseToExpiry(lastUsed, now time.Time, lifetime time.Duration) bool { diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 2dcdc551f..719d5d20d 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -59,6 +59,7 @@ func main() { wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") + clusterStateDir = fs.String("cluster-state-dir", "", "path to directory in which to store raft state") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -155,7 +156,7 @@ func main() { var ipp ippool.IPPool if *clusterTag != "" { cipp := ippool.NewConsensusIPPool(addrPool) - err = cipp.StartConsensus(ctx, ts, *clusterTag) + err = cipp.StartConsensus(ctx, ts, *clusterTag, *clusterStateDir) if err != nil { log.Fatalf("StartConsensus: %v", err) } From 3f7a9f82e39813ca42701aca18cf23a97b5652dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 6 Jun 2025 11:42:33 -0400 Subject: [PATCH 0921/1708] wgengine/magicsock: fix bpf fragmentation jump offsets (#16204) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fragmented datagrams would be processed instead of being dumped right away. In reality, thse datagrams would be dropped anyway later so there should functionally not be any change. Additionally, the feature is off by default. Closes #16203 Signed-off-by: Claus Lensbøl --- wgengine/magicsock/magicsock_linux.go | 4 +- wgengine/magicsock/magicsock_linux_test.go | 76 ++++++++++++++++++++++ 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index c5df555cd..34c39fe62 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -66,10 +66,10 @@ var ( // fragmented, and we don't want to handle reassembly. bpf.LoadAbsolute{Off: 6, Size: 2}, // More Fragments bit set means this is part of a fragmented packet. - bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 0x2000, SkipTrue: 7, SkipFalse: 0}, + bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 0x2000, SkipTrue: 8, SkipFalse: 0}, // Non-zero fragment offset with MF=0 means this is the last // fragment of packet. - bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 0x1fff, SkipTrue: 6, SkipFalse: 0}, + bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 0x1fff, SkipTrue: 7, SkipFalse: 0}, // Load IP header length into X register. bpf.LoadMemShift{Off: 0}, diff --git a/wgengine/magicsock/magicsock_linux_test.go b/wgengine/magicsock/magicsock_linux_test.go index 6b86b04f2..28ccd220e 100644 --- a/wgengine/magicsock/magicsock_linux_test.go +++ b/wgengine/magicsock/magicsock_linux_test.go @@ -9,6 +9,7 @@ import ( "net/netip" "testing" + "golang.org/x/net/bpf" "golang.org/x/sys/cpu" "golang.org/x/sys/unix" "tailscale.com/disco" @@ -146,3 +147,78 @@ func TestEthernetProto(t *testing.T) { } } } + +func TestBpfDiscardV4(t *testing.T) { + // Good packet as a reference for what should not be rejected + udp4Packet := []byte{ + // IPv4 header + 0x45, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x7f, 0x00, 0x00, 0x01, // source ip + 0x7f, 0x00, 0x00, 0x02, // dest ip + + // UDP header + 0x30, 0x39, // src port + 0xd4, 0x31, // dest port + 0x00, 0x12, // length; 8 bytes header + 10 bytes payload = 18 bytes + 0x00, 0x00, // checksum; unused + + // Payload: disco magic plus 32 bytes for key and 24 bytes for nonce + 0x54, 0x53, 0xf0, 0x9f, 0x92, 0xac, 0x00, 0x01, 0x02, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + } + + vm, err := bpf.NewVM(magicsockFilterV4) + if err != nil { + t.Fatalf("failed creating BPF VM: %v", err) + } + + tests := []struct { + name string + replace map[int]byte + accept bool + }{ + { + name: "base accepted datagram", + replace: map[int]byte{}, + accept: true, + }, + { + name: "more fragments", + replace: map[int]byte{ + 6: 0x20, + }, + accept: false, + }, + { + name: "some fragment", + replace: map[int]byte{ + 7: 0x01, + }, + accept: false, + }, + } + + udp4PacketChanged := make([]byte, len(udp4Packet)) + copy(udp4PacketChanged, udp4Packet) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.replace { + udp4PacketChanged[k] = v + } + ret, err := vm.Run(udp4PacketChanged) + if err != nil { + t.Fatalf("BPF VM error: %v", err) + } + + if (ret != 0) != tt.accept { + t.Errorf("expected accept=%v, got ret=%v", tt.accept, ret) + } + }) + } +} From 66ae8737f40bf5aebcff96824bf0d4f8439db9c7 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 6 Jun 2025 09:46:29 -0700 Subject: [PATCH 0922/1708] wgengine/magicsock: make endpoint.bestAddr Geneve-aware (#16195) This commit adds a new type to magicsock, epAddr, which largely ends up replacing netip.AddrPort in packet I/O paths throughout, enabling Geneve encapsulation over UDP awareness. The conn.ReceiveFunc for UDP has been revamped to fix and more clearly distinguish the different classes of packets we expect to receive: naked STUN binding messages, naked disco, naked WireGuard, Geneve-encapsulated disco, and Geneve-encapsulated WireGuard. Prior to this commit, STUN matching logic in the RX path could swallow a naked WireGuard packet if the keypair index, which is randomly generated, happened to overlap with a subset of the STUN magic cookie. Updates tailscale/corp#27502 Updates tailscale/corp#29326 Signed-off-by: Jordan Whited --- wgengine/magicsock/batching_conn.go | 4 +- wgengine/magicsock/batching_conn_linux.go | 44 ++- .../magicsock/batching_conn_linux_test.go | 57 ++- wgengine/magicsock/debughttp.go | 28 +- wgengine/magicsock/derp.go | 11 +- wgengine/magicsock/endpoint.go | 206 ++++++----- wgengine/magicsock/endpoint_test.go | 3 +- wgengine/magicsock/magicsock.go | 333 +++++++++++------- wgengine/magicsock/magicsock_linux.go | 8 +- wgengine/magicsock/magicsock_test.go | 221 ++++++------ wgengine/magicsock/peermap.go | 50 ++- wgengine/magicsock/rebinding_conn.go | 22 +- wgengine/magicsock/relaymanager.go | 12 +- wgengine/magicsock/relaymanager_test.go | 3 +- 14 files changed, 610 insertions(+), 392 deletions(-) diff --git a/wgengine/magicsock/batching_conn.go b/wgengine/magicsock/batching_conn.go index 58cfe28aa..b769907db 100644 --- a/wgengine/magicsock/batching_conn.go +++ b/wgengine/magicsock/batching_conn.go @@ -4,8 +4,6 @@ package magicsock import ( - "net/netip" - "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" "tailscale.com/types/nettype" @@ -21,5 +19,5 @@ var ( type batchingConn interface { nettype.PacketConn ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) - WriteBatchTo(buffs [][]byte, addr netip.AddrPort, offset int) error + WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error } diff --git a/wgengine/magicsock/batching_conn_linux.go b/wgengine/magicsock/batching_conn_linux.go index 9ad5e4474..c9aaff168 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/wgengine/magicsock/batching_conn_linux.go @@ -22,6 +22,7 @@ import ( "golang.org/x/sys/unix" "tailscale.com/hostinfo" "tailscale.com/net/neterror" + "tailscale.com/net/packet" "tailscale.com/types/nettype" ) @@ -92,9 +93,14 @@ const ( maxIPv6PayloadLen = 1<<16 - 1 - 8 ) -// coalesceMessages iterates msgs, coalescing them where possible while -// maintaining datagram order. All msgs have their Addr field set to addr. -func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, msgs []ipv6.Message, offset int) int { +// coalesceMessages iterates 'buffs', setting and coalescing them in 'msgs' +// where possible while maintaining datagram order. +// +// All msgs have their Addr field set to addr. +// +// All msgs[i].Buffers[0] are preceded by a Geneve header with vni.get() if +// vni.isSet(). +func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetworkID, buffs [][]byte, msgs []ipv6.Message, offset int) int { var ( base = -1 // index of msg we are currently coalescing into gsoSize int // segmentation size of msgs[base] @@ -105,8 +111,17 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, if addr.IP.To4() == nil { maxPayloadLen = maxIPv6PayloadLen } + vniIsSet := vni.isSet() + var gh packet.GeneveHeader + if vniIsSet { + gh.VNI = vni.get() + } for i, buff := range buffs { - buff = buff[offset:] + if vniIsSet { + gh.Encode(buffs[i]) + } else { + buff = buff[offset:] + } if i > 0 { msgLen := len(buff) baseLenBefore := len(msgs[base].Buffers[0]) @@ -163,28 +178,37 @@ func (c *linuxBatchingConn) putSendBatch(batch *sendBatch) { c.sendBatchPool.Put(batch) } -func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, offset int) error { +func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { batch := c.getSendBatch() defer c.putSendBatch(batch) - if addr.Addr().Is6() { - as16 := addr.Addr().As16() + if addr.ap.Addr().Is6() { + as16 := addr.ap.Addr().As16() copy(batch.ua.IP, as16[:]) batch.ua.IP = batch.ua.IP[:16] } else { - as4 := addr.Addr().As4() + as4 := addr.ap.Addr().As4() copy(batch.ua.IP, as4[:]) batch.ua.IP = batch.ua.IP[:4] } - batch.ua.Port = int(addr.Port()) + batch.ua.Port = int(addr.ap.Port()) var ( n int retried bool ) retry: if c.txOffload.Load() { - n = c.coalesceMessages(batch.ua, buffs, batch.msgs, offset) + n = c.coalesceMessages(batch.ua, addr.vni, buffs, batch.msgs, offset) } else { + vniIsSet := addr.vni.isSet() + var gh packet.GeneveHeader + if vniIsSet { + gh.VNI = addr.vni.get() + offset -= packet.GeneveFixedHeaderLength + } for i := range buffs { + if vniIsSet { + gh.Encode(buffs[i]) + } batch.msgs[i].Buffers[0] = buffs[i][offset:] batch.msgs[i].Addr = batch.ua batch.msgs[i].OOB = batch.msgs[i].OOB[:0] diff --git a/wgengine/magicsock/batching_conn_linux_test.go b/wgengine/magicsock/batching_conn_linux_test.go index effd5a2cc..7e0ab8fc4 100644 --- a/wgengine/magicsock/batching_conn_linux_test.go +++ b/wgengine/magicsock/batching_conn_linux_test.go @@ -159,9 +159,13 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { return make([]byte, len+packet.GeneveFixedHeaderLength, cap+packet.GeneveFixedHeaderLength) } + vni1 := virtualNetworkID{} + vni1.set(1) + cases := []struct { name string buffs [][]byte + vni virtualNetworkID wantLens []int wantGSO []int }{ @@ -173,6 +177,15 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { wantLens: []int{1}, wantGSO: []int{0}, }, + { + name: "one message no coalesce vni.isSet", + buffs: [][]byte{ + withGeneveSpace(1, 1), + }, + vni: vni1, + wantLens: []int{1 + packet.GeneveFixedHeaderLength}, + wantGSO: []int{0}, + }, { name: "two messages equal len coalesce", buffs: [][]byte{ @@ -182,6 +195,16 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { wantLens: []int{2}, wantGSO: []int{1}, }, + { + name: "two messages equal len coalesce vni.isSet", + buffs: [][]byte{ + withGeneveSpace(1, 2+packet.GeneveFixedHeaderLength), + withGeneveSpace(1, 1), + }, + vni: vni1, + wantLens: []int{2 + (2 * packet.GeneveFixedHeaderLength)}, + wantGSO: []int{1 + packet.GeneveFixedHeaderLength}, + }, { name: "two messages unequal len coalesce", buffs: [][]byte{ @@ -191,6 +214,16 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { wantLens: []int{3}, wantGSO: []int{2}, }, + { + name: "two messages unequal len coalesce vni.isSet", + buffs: [][]byte{ + withGeneveSpace(2, 3+packet.GeneveFixedHeaderLength), + withGeneveSpace(1, 1), + }, + vni: vni1, + wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength)}, + wantGSO: []int{2 + packet.GeneveFixedHeaderLength}, + }, { name: "three messages second unequal len coalesce", buffs: [][]byte{ @@ -201,6 +234,17 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { wantLens: []int{3, 2}, wantGSO: []int{2, 0}, }, + { + name: "three messages second unequal len coalesce vni.isSet", + buffs: [][]byte{ + withGeneveSpace(2, 3+(2*packet.GeneveFixedHeaderLength)), + withGeneveSpace(1, 1), + withGeneveSpace(2, 2), + }, + vni: vni1, + wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, + wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, + }, { name: "three messages limited cap coalesce", buffs: [][]byte{ @@ -211,6 +255,17 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { wantLens: []int{4, 2}, wantGSO: []int{2, 0}, }, + { + name: "three messages limited cap coalesce vni.isSet", + buffs: [][]byte{ + withGeneveSpace(2, 4+packet.GeneveFixedHeaderLength), + withGeneveSpace(2, 2), + withGeneveSpace(2, 2), + }, + vni: vni1, + wantLens: []int{4 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, + wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, + }, } for _, tt := range cases { @@ -224,7 +279,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { msgs[i].Buffers = make([][]byte, 1) msgs[i].OOB = make([]byte, 0, 2) } - got := c.coalesceMessages(addr, tt.buffs, msgs, packet.GeneveFixedHeaderLength) + got := c.coalesceMessages(addr, tt.vni, tt.buffs, msgs, packet.GeneveFixedHeaderLength) if got != len(tt.wantLens) { t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) } diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index aa109c242..cfdf8c1e1 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -72,18 +72,18 @@ func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "

        # ip:port to endpoint

          ") { type kv struct { - ipp netip.AddrPort - pi *peerInfo + addr epAddr + pi *peerInfo } - ent := make([]kv, 0, len(c.peerMap.byIPPort)) - for k, v := range c.peerMap.byIPPort { + ent := make([]kv, 0, len(c.peerMap.byEpAddr)) + for k, v := range c.peerMap.byEpAddr { ent = append(ent, kv{k, v}) } - sort.Slice(ent, func(i, j int) bool { return ipPortLess(ent[i].ipp, ent[j].ipp) }) + sort.Slice(ent, func(i, j int) bool { return epAddrLess(ent[i].addr, ent[j].addr) }) for _, e := range ent { ep := e.pi.ep shortStr := ep.publicKey.ShortString() - fmt.Fprintf(w, "
        • %v: %v
        • \n", e.ipp, strings.Trim(shortStr, "[]"), shortStr) + fmt.Fprintf(w, "
        • %v: %v
        • \n", e.addr, strings.Trim(shortStr, "[]"), shortStr) } } @@ -148,11 +148,11 @@ func printEndpointHTML(w io.Writer, ep *endpoint) { for ipp := range ep.endpointState { eps = append(eps, ipp) } - sort.Slice(eps, func(i, j int) bool { return ipPortLess(eps[i], eps[j]) }) + sort.Slice(eps, func(i, j int) bool { return addrPortLess(eps[i], eps[j]) }) io.WriteString(w, "

          Endpoints:

            ") for _, ipp := range eps { s := ep.endpointState[ipp] - if ipp == ep.bestAddr.AddrPort { + if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.isSet() { fmt.Fprintf(w, "
          • %s: (best)
              ", ipp) } else { fmt.Fprintf(w, "
            • %s: ...
                ", ipp) @@ -196,9 +196,19 @@ func peerDebugName(p tailcfg.NodeView) string { return p.Hostinfo().Hostname() } -func ipPortLess(a, b netip.AddrPort) bool { +func addrPortLess(a, b netip.AddrPort) bool { if v := a.Addr().Compare(b.Addr()); v != 0 { return v < 0 } return a.Port() < b.Port() } + +func epAddrLess(a, b epAddr) bool { + if v := a.ap.Addr().Compare(b.ap.Addr()); v != 0 { + return v < 0 + } + if a.ap.Port() == b.ap.Port() { + return a.vni.get() < b.vni.get() + } + return a.ap.Port() < b.ap.Port() +} diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index ffdff14a1..5afdbc6d8 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -740,8 +740,11 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en return 0, nil } - ipp := netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID)) - if c.handleDiscoMessage(b[:n], ipp, dm.src, discoRXPathDERP) { + srcAddr := epAddr{ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID))} + pt, isGeneveEncap := packetLooksLike(b[:n]) + if pt == packetLooksLikeDisco && + !isGeneveEncap { // We should never receive Geneve-encapsulated disco over DERP. + c.handleDiscoMessage(b[:n], srcAddr, false, dm.src, discoRXPathDERP) return 0, nil } @@ -755,9 +758,9 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en return 0, nil } - ep.noteRecvActivity(ipp, mono.Now()) + ep.noteRecvActivity(srcAddr, mono.Now()) if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, dm.n) + stats.UpdateRxPhysical(ep.nodeAddr, srcAddr.ap, 1, dm.n) } c.metrics.inboundPacketsDERPTotal.Add(1) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 243d0f4de..faae49a97 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -25,6 +25,7 @@ import ( "golang.org/x/net/ipv6" "tailscale.com/disco" "tailscale.com/ipn/ipnstate" + "tailscale.com/net/packet" "tailscale.com/net/stun" "tailscale.com/net/tstun" "tailscale.com/tailcfg" @@ -84,7 +85,7 @@ type endpoint struct { bestAddrAt mono.Time // time best address re-confirmed trustBestAddrUntil mono.Time // time when bestAddr expires sentPing map[stun.TxID]sentPing - endpointState map[netip.AddrPort]*endpointState + endpointState map[netip.AddrPort]*endpointState // netip.AddrPort type for key (instead of [epAddr]) as [endpointState] is irrelevant for Geneve-encapsulated paths isCallMeMaybeEP map[netip.AddrPort]bool // The following fields are related to the new "silent disco" @@ -99,7 +100,7 @@ type endpoint struct { } func (de *endpoint) setBestAddrLocked(v addrQuality) { - if v.AddrPort != de.bestAddr.AddrPort { + if v.epAddr != de.bestAddr.epAddr { de.probeUDPLifetime.resetCycleEndpointLocked() } de.bestAddr = v @@ -135,11 +136,11 @@ type probeUDPLifetime struct { // timeout cliff in the future. timer *time.Timer - // bestAddr contains the endpoint.bestAddr.AddrPort at the time a cycle was + // bestAddr contains the endpoint.bestAddr.epAddr at the time a cycle was // scheduled to start. A probing cycle is 1:1 with the current - // endpoint.bestAddr.AddrPort in the interest of simplicity. When - // endpoint.bestAddr.AddrPort changes, any active probing cycle will reset. - bestAddr netip.AddrPort + // endpoint.bestAddr.epAddr in the interest of simplicity. When + // endpoint.bestAddr.epAddr changes, any active probing cycle will reset. + bestAddr epAddr // cycleStartedAt contains the time at which the first cliff // (ProbeUDPLifetimeConfig.Cliffs[0]) was pinged for the current/last cycle. cycleStartedAt time.Time @@ -191,7 +192,7 @@ func (p *probeUDPLifetime) resetCycleEndpointLocked() { } p.cycleActive = false p.currentCliff = 0 - p.bestAddr = netip.AddrPort{} + p.bestAddr = epAddr{} } // ProbeUDPLifetimeConfig represents the configuration for probing UDP path @@ -334,7 +335,7 @@ type endpointDisco struct { } type sentPing struct { - to netip.AddrPort + to epAddr at mono.Time timer *time.Timer // timeout timer purpose discoPingPurpose @@ -446,7 +447,8 @@ func (de *endpoint) deleteEndpointLocked(why string, ep netip.AddrPort) { From: ep, }) delete(de.endpointState, ep) - if de.bestAddr.AddrPort == ep { + asEpAddr := epAddr{ap: ep} + if de.bestAddr.epAddr == asEpAddr { de.debugUpdates.Add(EndpointChange{ When: time.Now(), What: "deleteEndpointLocked-bestAddr-" + why, @@ -469,10 +471,10 @@ func (de *endpoint) initFakeUDPAddr() { // noteRecvActivity records receive activity on de, and invokes // Conn.noteRecvActivity no more than once every 10s. -func (de *endpoint) noteRecvActivity(ipp netip.AddrPort, now mono.Time) { +func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { if de.isWireguardOnly { de.mu.Lock() - de.bestAddr.AddrPort = ipp + de.bestAddr.ap = src.ap de.bestAddrAt = now de.trustBestAddrUntil = now.Add(5 * time.Second) de.mu.Unlock() @@ -482,7 +484,7 @@ func (de *endpoint) noteRecvActivity(ipp netip.AddrPort, now mono.Time) { // kick off discovery disco pings every trustUDPAddrDuration and mirror // to DERP. de.mu.Lock() - if de.heartbeatDisabled && de.bestAddr.AddrPort == ipp { + if de.heartbeatDisabled && de.bestAddr.epAddr == src { de.trustBestAddrUntil = now.Add(trustUDPAddrDuration) } de.mu.Unlock() @@ -530,10 +532,10 @@ func (de *endpoint) DstToBytes() []byte { return packIPPort(de.fakeWGAddr) } // de.mu must be held. // // TODO(val): Rewrite the addrFor*Locked() variations to share code. -func (de *endpoint) addrForSendLocked(now mono.Time) (udpAddr, derpAddr netip.AddrPort, sendWGPing bool) { - udpAddr = de.bestAddr.AddrPort +func (de *endpoint) addrForSendLocked(now mono.Time) (udpAddr epAddr, derpAddr netip.AddrPort, sendWGPing bool) { + udpAddr = de.bestAddr.epAddr - if udpAddr.IsValid() && !now.After(de.trustBestAddrUntil) { + if udpAddr.ap.IsValid() && !now.After(de.trustBestAddrUntil) { return udpAddr, netip.AddrPort{}, false } @@ -557,7 +559,7 @@ func (de *endpoint) addrForSendLocked(now mono.Time) (udpAddr, derpAddr netip.Ad // best latency is used. // // de.mu must be held. -func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr netip.AddrPort, shouldPing bool) { +func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr epAddr, shouldPing bool) { if len(de.endpointState) == 0 { de.c.logf("magicsock: addrForSendWireguardLocked: [unexpected] no candidates available for endpoint") return udpAddr, false @@ -581,22 +583,22 @@ func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr netip.Add // TODO(catzkorn): Consider a small increase in latency to use // IPv6 in comparison to IPv4, when possible. lowestLatency = latency - udpAddr = ipp + udpAddr.ap = ipp } } } needPing := len(de.endpointState) > 1 && now.Sub(oldestPing) > wireguardPingInterval - if !udpAddr.IsValid() { + if !udpAddr.ap.IsValid() { candidates := slicesx.MapKeys(de.endpointState) // Randomly select an address to use until we retrieve latency information // and give it a short trustBestAddrUntil time so we avoid flapping between // addresses while waiting on latency information to be populated. - udpAddr = candidates[rand.IntN(len(candidates))] + udpAddr.ap = candidates[rand.IntN(len(candidates))] } - de.bestAddr.AddrPort = udpAddr + de.bestAddr.epAddr = epAddr{ap: udpAddr.ap} // Only extend trustBestAddrUntil by one second to avoid packet // reordering and/or CPU usage from random selection during the first // second. We should receive a response due to a WireGuard handshake in @@ -614,18 +616,18 @@ func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr netip.Add // both of the returned UDP address and DERP address may be non-zero. // // de.mu must be held. -func (de *endpoint) addrForPingSizeLocked(now mono.Time, size int) (udpAddr, derpAddr netip.AddrPort) { +func (de *endpoint) addrForPingSizeLocked(now mono.Time, size int) (udpAddr epAddr, derpAddr netip.AddrPort) { if size == 0 { udpAddr, derpAddr, _ = de.addrForSendLocked(now) return } - udpAddr = de.bestAddr.AddrPort + udpAddr = de.bestAddr.epAddr pathMTU := de.bestAddr.wireMTU - requestedMTU := pingSizeToPktLen(size, udpAddr.Addr().Is6()) + requestedMTU := pingSizeToPktLen(size, udpAddr) mtuOk := requestedMTU <= pathMTU - if udpAddr.IsValid() && mtuOk { + if udpAddr.ap.IsValid() && mtuOk { if !now.After(de.trustBestAddrUntil) { return udpAddr, netip.AddrPort{} } @@ -638,7 +640,7 @@ func (de *endpoint) addrForPingSizeLocked(now mono.Time, size int) (udpAddr, der // for the packet. Return a zero-value udpAddr to signal that we should // keep probing the path MTU to all addresses for this endpoint, and a // valid DERP addr to signal that we should also send via DERP. - return netip.AddrPort{}, de.derpAddr + return epAddr{}, de.derpAddr } // maybeProbeUDPLifetimeLocked returns an afterInactivityFor duration and true @@ -649,7 +651,7 @@ func (de *endpoint) maybeProbeUDPLifetimeLocked() (afterInactivityFor time.Durat if p == nil { return afterInactivityFor, false } - if !de.bestAddr.IsValid() { + if !de.bestAddr.ap.IsValid() { return afterInactivityFor, false } epDisco := de.disco.Load() @@ -701,7 +703,7 @@ func (de *endpoint) scheduleHeartbeatForLifetimeLocked(after time.Duration, via } de.c.dlogf("[v1] magicsock: disco: scheduling UDP lifetime probe for cliff=%v via=%v to %v (%v)", p.currentCliffDurationEndpointLocked(), via, de.publicKey.ShortString(), de.discoShort()) - p.bestAddr = de.bestAddr.AddrPort + p.bestAddr = de.bestAddr.epAddr p.timer = time.AfterFunc(after, de.heartbeatForLifetime) if via == heartbeatForLifetimeViaSelf { metricUDPLifetimeCliffsRescheduled.Add(1) @@ -729,7 +731,7 @@ func (de *endpoint) heartbeatForLifetime() { return } p.timer = nil - if !p.bestAddr.IsValid() || de.bestAddr.AddrPort != p.bestAddr { + if !p.bestAddr.ap.IsValid() || de.bestAddr.epAddr != p.bestAddr { // best path changed p.resetCycleEndpointLocked() return @@ -761,7 +763,7 @@ func (de *endpoint) heartbeatForLifetime() { } de.c.dlogf("[v1] magicsock: disco: sending disco ping for UDP lifetime probe cliff=%v to %v (%v)", p.currentCliffDurationEndpointLocked(), de.publicKey.ShortString(), de.discoShort()) - de.startDiscoPingLocked(de.bestAddr.AddrPort, mono.Now(), pingHeartbeatForUDPLifetime, 0, nil) + de.startDiscoPingLocked(de.bestAddr.epAddr, mono.Now(), pingHeartbeatForUDPLifetime, 0, nil) } // heartbeat is called every heartbeatInterval to keep the best UDP path alive, @@ -819,7 +821,7 @@ func (de *endpoint) heartbeat() { } udpAddr, _, _ := de.addrForSendLocked(now) - if udpAddr.IsValid() { + if udpAddr.ap.IsValid() { // We have a preferred path. Ping that every 'heartbeatInterval'. de.startDiscoPingLocked(udpAddr, now, pingHeartbeat, 0, nil) } @@ -846,7 +848,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } - if !de.bestAddr.IsValid() || de.lastFullPing.IsZero() { + if !de.bestAddr.ap.IsValid() || de.lastFullPing.IsZero() { return true } if now.After(de.trustBestAddrUntil) { @@ -906,9 +908,9 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst udpAddr, derpAddr := de.addrForPingSizeLocked(now, size) if derpAddr.IsValid() { - de.startDiscoPingLocked(derpAddr, now, pingCLI, size, resCB) + de.startDiscoPingLocked(epAddr{ap: derpAddr}, now, pingCLI, size, resCB) } - if udpAddr.IsValid() && now.Before(de.trustBestAddrUntil) { + if udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil) { // Already have an active session, so just ping the address we're using. // Otherwise "tailscale ping" results to a node on the local network // can look like they're bouncing between, say 10.0.0.0/9 and the peer's @@ -916,7 +918,7 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst de.startDiscoPingLocked(udpAddr, now, pingCLI, size, resCB) } else { for ep := range de.endpointState { - de.startDiscoPingLocked(ep, now, pingCLI, size, resCB) + de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } } } @@ -941,14 +943,14 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { if startWGPing { de.sendWireGuardOnlyPingsLocked(now) } - } else if !udpAddr.IsValid() || now.After(de.trustBestAddrUntil) { + } else if !udpAddr.ap.IsValid() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) } de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now de.mu.Unlock() - if !udpAddr.IsValid() && !derpAddr.IsValid() { + if !udpAddr.ap.IsValid() && !derpAddr.IsValid() { // Make a last ditch effort to see if we have a DERP route for them. If // they contacted us over DERP and we don't know their UDP endpoints or // their DERP home, we can at least assume they're reachable over the @@ -960,7 +962,7 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } var err error - if udpAddr.IsValid() { + if udpAddr.ap.IsValid() { _, err = de.c.sendUDPBatch(udpAddr, buffs, offset) // If the error is known to indicate that the endpoint is no longer @@ -976,17 +978,17 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } switch { - case udpAddr.Addr().Is4(): + case udpAddr.ap.Addr().Is4(): de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) - case udpAddr.Addr().Is6(): + case udpAddr.ap.Addr().Is6(): de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. if stats := de.c.stats.Load(); err == nil && stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, udpAddr, len(buffs), txBytes) + stats.UpdateTxPhysical(de.nodeAddr, udpAddr.ap, len(buffs), txBytes) } } if derpAddr.IsValid() { @@ -1055,7 +1057,7 @@ func (de *endpoint) discoPingTimeout(txid stun.TxID) { if !ok { return } - if debugDisco() || !de.bestAddr.IsValid() || mono.Now().After(de.trustBestAddrUntil) { + if debugDisco() || !de.bestAddr.ap.IsValid() || mono.Now().After(de.trustBestAddrUntil) { de.c.dlogf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort()) } de.removeSentDiscoPingLocked(txid, sp, discoPingTimedOut) @@ -1109,11 +1111,11 @@ const discoPingSize = len(disco.Magic) + key.DiscoPublicRawLen + disco.NonceLen // // The caller should use de.discoKey as the discoKey argument. // It is passed in so that sendDiscoPing doesn't need to lock de.mu. -func (de *endpoint) sendDiscoPing(ep netip.AddrPort, discoKey key.DiscoPublic, txid stun.TxID, size int, logLevel discoLogLevel) { +func (de *endpoint) sendDiscoPing(ep epAddr, discoKey key.DiscoPublic, txid stun.TxID, size int, logLevel discoLogLevel) { size = min(size, MaxDiscoPingSize) padding := max(size-discoPingSize, 0) - sent, _ := de.c.sendDiscoMessage(ep, virtualNetworkID{}, de.publicKey, discoKey, &disco.Ping{ + sent, _ := de.c.sendDiscoMessage(ep, de.publicKey, discoKey, &disco.Ping{ TxID: [12]byte(txid), NodeKey: de.c.publicKeyAtomic.Load(), Padding: padding, @@ -1125,7 +1127,7 @@ func (de *endpoint) sendDiscoPing(ep netip.AddrPort, discoKey key.DiscoPublic, t if size != 0 { metricSentDiscoPeerMTUProbes.Add(1) - metricSentDiscoPeerMTUProbeBytes.Add(int64(pingSizeToPktLen(size, ep.Addr().Is6()))) + metricSentDiscoPeerMTUProbeBytes.Add(int64(pingSizeToPktLen(size, ep))) } } @@ -1156,7 +1158,7 @@ const ( // if non-nil, means that a caller external to the magicsock package internals // is interested in the result (such as a CLI "tailscale ping" or a c2n ping // request, etc) -func (de *endpoint) startDiscoPingLocked(ep netip.AddrPort, now mono.Time, purpose discoPingPurpose, size int, resCB *pingResultAndCallback) { +func (de *endpoint) startDiscoPingLocked(ep epAddr, now mono.Time, purpose discoPingPurpose, size int, resCB *pingResultAndCallback) { if runtime.GOOS == "js" { return } @@ -1164,8 +1166,9 @@ func (de *endpoint) startDiscoPingLocked(ep netip.AddrPort, now mono.Time, purpo if epDisco == nil { return } - if purpose != pingCLI { - st, ok := de.endpointState[ep] + if purpose != pingCLI && + !ep.vni.isSet() { // de.endpointState is only relevant for direct/non-vni epAddr's + st, ok := de.endpointState[ep.ap] if !ok { // Shouldn't happen. But don't ping an endpoint that's // not active for us. @@ -1182,11 +1185,11 @@ func (de *endpoint) startDiscoPingLocked(ep netip.AddrPort, now mono.Time, purpo // Default to sending a single ping of the specified size sizes := []int{size} if de.c.PeerMTUEnabled() { - isDerp := ep.Addr() == tailcfg.DerpMagicIPAddr + isDerp := ep.ap.Addr() == tailcfg.DerpMagicIPAddr if !isDerp && ((purpose == pingDiscovery) || (purpose == pingCLI && size == 0)) { de.c.dlogf("[v1] magicsock: starting MTU probe") sizes = mtuProbePingSizesV4 - if ep.Addr().Is6() { + if ep.ap.Addr().Is6() { sizes = mtuProbePingSizesV6 } } @@ -1241,7 +1244,7 @@ func (de *endpoint) sendDiscoPingsLocked(now mono.Time, sendCallMeMaybe bool) { de.c.dlogf("[v1] magicsock: disco: send, starting discovery for %v (%v)", de.publicKey.ShortString(), de.discoShort()) } - de.startDiscoPingLocked(ep, now, pingDiscovery, 0, nil) + de.startDiscoPingLocked(epAddr{ap: ep}, now, pingDiscovery, 0, nil) } derpAddr := de.derpAddr if sentAny && sendCallMeMaybe && derpAddr.IsValid() { @@ -1496,17 +1499,19 @@ func (de *endpoint) clearBestAddrLocked() { de.trustBestAddrUntil = 0 } -// noteBadEndpoint marks ipp as a bad endpoint that would need to be +// noteBadEndpoint marks udpAddr as a bad endpoint that would need to be // re-evaluated before future use, this should be called for example if a send -// to ipp fails due to a host unreachable error or similar. -func (de *endpoint) noteBadEndpoint(ipp netip.AddrPort) { +// to udpAddr fails due to a host unreachable error or similar. +func (de *endpoint) noteBadEndpoint(udpAddr epAddr) { de.mu.Lock() defer de.mu.Unlock() de.clearBestAddrLocked() - if st, ok := de.endpointState[ipp]; ok { - st.clear() + if !udpAddr.vni.isSet() { + if st, ok := de.endpointState[udpAddr.ap]; ok { + st.clear() + } } } @@ -1526,17 +1531,20 @@ func (de *endpoint) noteConnectivityChange() { // pingSizeToPktLen calculates the minimum path MTU that would permit // a disco ping message of length size to reach its target at -// addr. size is the length of the entire disco message including +// udpAddr. size is the length of the entire disco message including // disco headers. If size is zero, assume it is the safe wire MTU. -func pingSizeToPktLen(size int, is6 bool) tstun.WireMTU { +func pingSizeToPktLen(size int, udpAddr epAddr) tstun.WireMTU { if size == 0 { return tstun.SafeWireMTU() } headerLen := ipv4.HeaderLen - if is6 { + if udpAddr.ap.Addr().Is6() { headerLen = ipv6.HeaderLen } headerLen += 8 // UDP header length + if udpAddr.vni.isSet() { + headerLen += packet.GeneveFixedHeaderLength + } return tstun.WireMTU(size + headerLen) } @@ -1563,19 +1571,19 @@ func pktLenToPingSize(mtu tstun.WireMTU, is6 bool) int { // It should be called with the Conn.mu held. // // It reports whether m.TxID corresponds to a ping that this endpoint sent. -func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip.AddrPort, vni virtualNetworkID) (knownTxID bool) { +func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAddr) (knownTxID bool) { de.mu.Lock() defer de.mu.Unlock() - if vni.isSet() { - // TODO(jwhited): check for matching [endpoint.bestAddr] once that data - // structure is VNI-aware and [relayManager] can mutate it. We do not - // need to reference any [endpointState] for Geneve-encapsulated disco, - // we store nothing about them there. + if src.vni.isSet() { + // TODO(jwhited): fall through once [relayManager] is able to set an + // [epAddr] as de.bestAddr. We do not need to reference any + // [endpointState] for Geneve-encapsulated disco, we store nothing + // about them there. return false } - isDerp := src.Addr() == tailcfg.DerpMagicIPAddr + isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr sp, ok := de.sentPing[m.TxID] if !ok { @@ -1585,7 +1593,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip knownTxID = true // for naked returns below de.removeSentDiscoPingLocked(m.TxID, sp, discoPongReceived) - pktLen := int(pingSizeToPktLen(sp.size, sp.to.Addr().Is6())) + pktLen := int(pingSizeToPktLen(sp.size, src)) if sp.size != 0 { m := getPeerMTUsProbedMetric(tstun.WireMTU(pktLen)) m.Add(1) @@ -1598,18 +1606,18 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip latency := now.Sub(sp.at) if !isDerp { - st, ok := de.endpointState[sp.to] + st, ok := de.endpointState[sp.to.ap] if !ok { // This is no longer an endpoint we care about. return } - de.c.peerMap.setNodeKeyForIPPort(src, de.publicKey) + de.c.peerMap.setNodeKeyForEpAddr(src, de.publicKey) st.addPongReplyLocked(pongReply{ latency: latency, pongAt: now, - from: src, + from: src.ap, pongSrc: m.Src, }) } @@ -1633,7 +1641,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip // Promote this pong response to our current best address if it's lower latency. // TODO(bradfitz): decide how latency vs. preference order affects decision if !isDerp { - thisPong := addrQuality{sp.to, latency, tstun.WireMTU(pingSizeToPktLen(sp.size, sp.to.Addr().Is6()))} + thisPong := addrQuality{sp.to, latency, tstun.WireMTU(pingSizeToPktLen(sp.size, sp.to))} if betterAddr(thisPong, de.bestAddr) { de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ @@ -1644,7 +1652,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip }) de.setBestAddrLocked(thisPong) } - if de.bestAddr.AddrPort == thisPong.AddrPort { + if de.bestAddr.epAddr == thisPong.epAddr { de.debugUpdates.Add(EndpointChange{ When: time.Now(), What: "handlePongConnLocked-bestAddr-latency", @@ -1659,20 +1667,34 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src netip return } -// addrQuality is an IPPort with an associated latency and path mtu. +// epAddr is a [netip.AddrPort] with an optional Geneve header (RFC8926) +// [virtualNetworkID]. +type epAddr struct { + ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set + vni virtualNetworkID // vni.isSet() indicates if this [epAddr] involves a Geneve header +} + +func (e epAddr) String() string { + if !e.vni.isSet() { + return e.ap.String() + } + return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.get()) +} + +// addrQuality is an [epAddr] with an associated latency and path mtu. type addrQuality struct { - netip.AddrPort + epAddr latency time.Duration wireMTU tstun.WireMTU } func (a addrQuality) String() string { - return fmt.Sprintf("%v@%v+%v", a.AddrPort, a.latency, a.wireMTU) + return fmt.Sprintf("%v@%v+%v", a.epAddr, a.latency, a.wireMTU) } // betterAddr reports whether a is a better addr to use than b. func betterAddr(a, b addrQuality) bool { - if a.AddrPort == b.AddrPort { + if a.epAddr == b.epAddr { if a.wireMTU > b.wireMTU { // TODO(val): Think harder about the case of lower // latency and smaller or unknown MTU, and higher @@ -1683,10 +1705,19 @@ func betterAddr(a, b addrQuality) bool { } return false } - if !b.IsValid() { + if !b.ap.IsValid() { + return true + } + if !a.ap.IsValid() { + return false + } + + // Geneve-encapsulated paths (UDP relay servers) are lower preference in + // relation to non. + if !a.vni.isSet() && b.vni.isSet() { return true } - if !a.IsValid() { + if a.vni.isSet() && !b.vni.isSet() { return false } @@ -1710,27 +1741,27 @@ func betterAddr(a, b addrQuality) bool { // addresses, and prefer link-local unicast addresses over other types // of private IP addresses since it's definitionally more likely that // they'll be on the same network segment than a general private IP. - if a.Addr().IsLoopback() { + if a.ap.Addr().IsLoopback() { aPoints += 50 - } else if a.Addr().IsLinkLocalUnicast() { + } else if a.ap.Addr().IsLinkLocalUnicast() { aPoints += 30 - } else if a.Addr().IsPrivate() { + } else if a.ap.Addr().IsPrivate() { aPoints += 20 } - if b.Addr().IsLoopback() { + if b.ap.Addr().IsLoopback() { bPoints += 50 - } else if b.Addr().IsLinkLocalUnicast() { + } else if b.ap.Addr().IsLinkLocalUnicast() { bPoints += 30 - } else if b.Addr().IsPrivate() { + } else if b.ap.Addr().IsPrivate() { bPoints += 20 } // Prefer IPv6 for being a bit more robust, as long as // the latencies are roughly equivalent. - if a.Addr().Is6() { + if a.ap.Addr().Is6() { aPoints += 10 } - if b.Addr().Is6() { + if b.ap.Addr().Is6() { bPoints += 10 } @@ -1831,7 +1862,10 @@ func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { ps.LastWrite = de.lastSendExt.WallTime() ps.Active = now.Sub(de.lastSendExt) < sessionActiveTimeout - if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.IsValid() && !derpAddr.IsValid() { + if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.ap.IsValid() && !derpAddr.IsValid() { + // TODO(jwhited): if udpAddr.vni.isSet() we are using a Tailscale client + // as a UDP relay; update PeerStatus and its interpretation by + // "tailscale status" to make this clear. ps.CurAddr = udpAddr.String() } } diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 1e2de8967..b1e8cab91 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/dsnet/try" "tailscale.com/types/key" ) @@ -154,7 +153,7 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { lower = b higher = a } - addr := addrQuality{AddrPort: try.E1[netip.AddrPort](netip.ParseAddrPort("1.1.1.1:1"))} + addr := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:1")}} newProbeUDPLifetime := func() *probeUDPLifetime { return &probeUDPLifetime{ config: *defaultProbeUDPLifetimeConfig, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 3a4fdf8a2..c446cff2c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -950,7 +950,7 @@ func (c *Conn) callNetInfoCallbackLocked(ni *tailcfg.NetInfo) { func (c *Conn) addValidDiscoPathForTest(nodeKey key.NodePublic, addr netip.AddrPort) { c.mu.Lock() defer c.mu.Unlock() - c.peerMap.setNodeKeyForIPPort(addr, nodeKey) + c.peerMap.setNodeKeyForEpAddr(epAddr{ap: addr}, nodeKey) } // SetNetInfoCallback sets the func to be called whenever the network conditions @@ -1019,13 +1019,16 @@ func (c *Conn) Ping(peer tailcfg.NodeView, res *ipnstate.PingResult, size int, c } // c.mu must be held -func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep netip.AddrPort) { +func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep epAddr) { res.LatencySeconds = latency.Seconds() - if ep.Addr() != tailcfg.DerpMagicIPAddr { + if ep.ap.Addr() != tailcfg.DerpMagicIPAddr { + // TODO(jwhited): if ep.vni.isSet() we are using a Tailscale client + // as a UDP relay; update PingResult and its interpretation by + // "tailscale ping" to make this clear. res.Endpoint = ep.String() return } - regionID := int(ep.Port()) + regionID := int(ep.ap.Port()) res.DERPRegionID = regionID res.DERPRegionCode = c.derpRegionCodeLocked(regionID) } @@ -1294,11 +1297,11 @@ var errNoUDP = errors.New("no UDP available on platform") var errUnsupportedConnType = errors.New("unsupported connection type") -func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte, offset int) (sent bool, err error) { +func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, err error) { isIPv6 := false switch { - case addr.Addr().Is4(): - case addr.Addr().Is6(): + case addr.ap.Addr().Is4(): + case addr.ap.Addr().Is6(): isIPv6 = true default: panic("bogus sendUDPBatch addr type") @@ -1484,8 +1487,8 @@ func (c *Conn) receiveIPv6() conn.ReceiveFunc { // mkReceiveFunc creates a ReceiveFunc reading from ruc. // The provided healthItem and metrics are updated if non-nil. func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc { - // epCache caches an IPPort->endpoint for hot flows. - var epCache ippEndpointCache + // epCache caches an epAddr->endpoint for hot flows. + var epCache epAddrEndpointCache return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (_ int, retErr error) { if healthItem != nil { @@ -1519,7 +1522,7 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu continue } ipp := msg.Addr.(*net.UDPAddr).AddrPort() - if ep, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { + if ep, size, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { if packetMetric != nil { packetMetric.Add(1) } @@ -1527,7 +1530,7 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu bytesMetric.Add(int64(msg.N)) } eps[i] = ep - sizes[i] = msg.N + sizes[i] = size reportToCaller = true } else { sizes[i] = 0 @@ -1542,47 +1545,89 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. // +// size is the length of 'b' to report up to wireguard-go (only relevant if +// 'ok' is true) +// // ok is whether this read should be reported up to wireguard-go (our // caller). -func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *ippEndpointCache) (_ conn.Endpoint, ok bool) { +func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, ok bool) { var ep *endpoint - if stun.Is(b) { + size = len(b) + + var geneve packet.GeneveHeader + pt, isGeneveEncap := packetLooksLike(b) + src := epAddr{ap: ipp} + if isGeneveEncap { + err := geneve.Decode(b) + if err != nil { + // Decode only returns an error when 'b' is too short, and + // 'isGeneveEncap' indicates it's a sufficient length. + c.logf("[unexpected] geneve header decoding error: %v", err) + return nil, 0, false + } + src.vni.set(geneve.VNI) + } + switch pt { + case packetLooksLikeDisco: + if isGeneveEncap { + b = b[packet.GeneveFixedHeaderLength:] + } + // The Geneve header control bit should only be set for relay handshake + // messages terminating on or originating from a UDP relay server. We + // have yet to open the encrypted disco payload to determine the + // [disco.MessageType], but we assert it should be handshake-related. + shouldByRelayHandshakeMsg := geneve.Control == true + c.handleDiscoMessage(b, src, shouldByRelayHandshakeMsg, key.NodePublic{}, discoRXPathUDP) + return nil, 0, false + case packetLooksLikeSTUNBinding: c.netChecker.ReceiveSTUNPacket(b, ipp) - return nil, false - } - if c.handleDiscoMessage(b, ipp, key.NodePublic{}, discoRXPathUDP) { - return nil, false + return nil, 0, false + default: + // Fall through for all other packet types as they are assumed to + // be potentially WireGuard. } + if !c.havePrivateKey.Load() { // If we have no private key, we're logged out or // stopped. Don't try to pass these wireguard packets // up to wireguard-go; it'll just complain (issue 1167). - return nil, false + return nil, 0, false } - if cache.ipp == ipp && cache.de != nil && cache.gen == cache.de.numStopAndReset() { + + if src.vni.isSet() { + // Strip away the Geneve header before returning the packet to + // wireguard-go. + // + // TODO(jwhited): update [github.com/tailscale/wireguard-go/conn.ReceiveFunc] + // to support returning start offset in order to get rid of this memmove perf + // penalty. + size = copy(b, b[packet.GeneveFixedHeaderLength:]) + } + + if cache.epAddr == src && cache.de != nil && cache.gen == cache.de.numStopAndReset() { ep = cache.de } else { c.mu.Lock() - de, ok := c.peerMap.endpointForIPPort(ipp) + de, ok := c.peerMap.endpointForEpAddr(src) c.mu.Unlock() if !ok { if c.controlKnobs != nil && c.controlKnobs.DisableCryptorouting.Load() { - return nil, false + return nil, 0, false } - return &lazyEndpoint{c: c, src: ipp}, true + return &lazyEndpoint{c: c, src: src}, size, true } - cache.ipp = ipp + cache.epAddr = src cache.de = de cache.gen = de.numStopAndReset() ep = de } now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) - ep.noteRecvActivity(ipp, now) + ep.noteRecvActivity(src, now) if stats := c.stats.Load(); stats != nil { stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) } - return ep, true + return ep, size, true } // discoLogLevel controls the verbosity of discovery log messages. @@ -1632,16 +1677,16 @@ func (v *virtualNetworkID) get() uint32 { // sendDiscoMessage sends discovery message m to dstDisco at dst. // -// If dst is a DERP IP:port, then dstKey must be non-zero. +// If dst.ap is a DERP IP:port, then dstKey must be non-zero. // -// If vni.isSet(), the [disco.Message] will be preceded by a Geneve header with -// the VNI field set to the value returned by vni.get(). +// If dst.vni.isSet(), the [disco.Message] will be preceded by a Geneve header +// with the VNI field set to the value returned by vni.get(). // // The dstKey should only be non-zero if the dstDisco key // unambiguously maps to exactly one peer. -func (c *Conn) sendDiscoMessage(dst netip.AddrPort, vni virtualNetworkID, dstKey key.NodePublic, dstDisco key.DiscoPublic, m disco.Message, logLevel discoLogLevel) (sent bool, err error) { - isDERP := dst.Addr() == tailcfg.DerpMagicIPAddr - if _, isPong := m.(*disco.Pong); isPong && !isDERP && dst.Addr().Is4() { +func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key.DiscoPublic, m disco.Message, logLevel discoLogLevel) (sent bool, err error) { + isDERP := dst.ap.Addr() == tailcfg.DerpMagicIPAddr + if _, isPong := m.(*disco.Pong); isPong && !isDERP && dst.ap.Addr().Is4() { time.Sleep(debugIPv4DiscoPingPenalty()) } @@ -1678,11 +1723,11 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, vni virtualNetworkID, dstKey c.mu.Unlock() pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. - if vni.isSet() { + if dst.vni.isSet() { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: vni.get(), + VNI: dst.vni.get(), Control: isRelayHandshakeMsg, } pkt = append(pkt, make([]byte, packet.GeneveFixedHeaderLength)...) @@ -1703,7 +1748,7 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, vni virtualNetworkID, dstKey box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) const isDisco = true - sent, err = c.sendAddr(dst, dstKey, pkt, isDisco) + sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" @@ -1745,45 +1790,96 @@ const ( const discoHeaderLen = len(disco.Magic) + key.DiscoPublicRawLen -// isDiscoMaybeGeneve reports whether msg is a Tailscale Disco protocol -// message, and if true, whether it is encapsulated by a Geneve header. +type packetLooksLikeType int + +const ( + packetLooksLikeWireGuard packetLooksLikeType = iota + packetLooksLikeSTUNBinding + packetLooksLikeDisco +) + +// packetLooksLike reports a [packetsLooksLikeType] for 'msg', and whether +// 'msg' is encapsulated by a Geneve header (or naked). +// +// [packetLooksLikeSTUNBinding] is never Geneve-encapsulated. // -// isGeneveEncap is only relevant when isDiscoMsg is true. +// Naked STUN binding, Naked Disco, Geneve followed by Disco, naked WireGuard, +// and Geneve followed by WireGuard can be confidently distinguished based on +// the following: // -// Naked Disco, Geneve followed by Disco, and naked WireGuard can be confidently -// distinguished based on the following: -// 1. [disco.Magic] is sufficiently non-overlapping with a Geneve protocol -// field value of [packet.GeneveProtocolDisco]. -// 2. [disco.Magic] is sufficiently non-overlapping with the first 4 bytes of -// a WireGuard packet. -// 3. [packet.GeneveHeader] with a Geneve protocol field value of -// [packet.GeneveProtocolDisco] is sufficiently non-overlapping with the -// first 4 bytes of a WireGuard packet. -func isDiscoMaybeGeneve(msg []byte) (isDiscoMsg bool, isGeneveEncap bool) { - if len(msg) < discoHeaderLen { - return false, false - } - if string(msg[:len(disco.Magic)]) == disco.Magic { - return true, false - } - if len(msg) < packet.GeneveFixedHeaderLength+discoHeaderLen { - return false, false - } - if msg[0]&0xC0 != 0 || // version bits that we always transmit as 0s - msg[1]&0x3F != 0 || // reserved bits that we always transmit as 0s - binary.BigEndian.Uint16(msg[2:4]) != packet.GeneveProtocolDisco || - msg[7] != 0 { // reserved byte that we always transmit as 0 - return false, false - } - msg = msg[packet.GeneveFixedHeaderLength:] - if string(msg[:len(disco.Magic)]) == disco.Magic { - return true, true - } - return false, false -} - -// handleDiscoMessage handles a discovery message and reports whether -// msg was a Tailscale inter-node discovery message. +// 1. STUN binding @ msg[1] (0x01) is sufficiently non-overlapping with the +// Geneve header where the LSB is always 0 (part of 6 "reserved" bits). +// +// 2. STUN binding @ msg[1] (0x01) is sufficiently non-overlapping with naked +// WireGuard, which is always a 0 byte value (WireGuard message type +// occupies msg[0:4], and msg[1:4] are always 0). +// +// 3. STUN binding @ msg[1] (0x01) is sufficiently non-overlapping with the +// second byte of [disco.Magic] (0x53). +// +// 4. [disco.Magic] @ msg[2:4] (0xf09f) is sufficiently non-overlapping with a +// Geneve protocol field value of [packet.GeneveProtocolDisco] or +// [packet.GeneveProtocolWireGuard] . +// +// 5. [disco.Magic] @ msg[0] (0x54) is sufficiently non-overlapping with the +// first byte of a WireGuard packet (0x01-0x04). +// +// 6. [packet.GeneveHeader] with a Geneve protocol field value of +// [packet.GeneveProtocolDisco] or [packet.GeneveProtocolWireGuard] +// (msg[2:4]) is sufficiently non-overlapping with the second 2 bytes of a +// WireGuard packet which are always 0x0000. +func packetLooksLike(msg []byte) (t packetLooksLikeType, isGeneveEncap bool) { + if stun.Is(msg) && + msg[1] == 0x01 { // method binding + return packetLooksLikeSTUNBinding, false + } + + // TODO(jwhited): potentially collapse into disco.LooksLikeDiscoWrapper() + // if safe to do so. + looksLikeDisco := func(msg []byte) bool { + if len(msg) >= discoHeaderLen && string(msg[:len(disco.Magic)]) == disco.Magic { + return true + } + return false + } + + // Do we have a Geneve header? + if len(msg) >= packet.GeneveFixedHeaderLength && + msg[0]&0xC0 == 0 && // version bits that we always transmit as 0s + msg[1]&0x3F == 0 && // reserved bits that we always transmit as 0s + msg[7] == 0 { // reserved byte that we always transmit as 0 + switch binary.BigEndian.Uint16(msg[2:4]) { + case packet.GeneveProtocolDisco: + if looksLikeDisco(msg[packet.GeneveFixedHeaderLength:]) { + return packetLooksLikeDisco, true + } else { + // The Geneve header is well-formed, and it indicated this + // was disco, but it's not. The evaluated bytes at this point + // are always distinct from naked WireGuard (msg[2:4] are always + // 0x0000) and naked Disco (msg[2:4] are always 0xf09f), but + // maintain pre-Geneve behavior and fall back to assuming it's + // naked WireGuard. + return packetLooksLikeWireGuard, false + } + case packet.GeneveProtocolWireGuard: + return packetLooksLikeWireGuard, true + default: + // The Geneve header is well-formed, but the protocol field value is + // unknown to us. The evaluated bytes at this point are not + // necessarily distinct from naked WireGuard or naked Disco, fall + // through. + } + } + + if looksLikeDisco(msg) { + return packetLooksLikeDisco, false + } else { + return packetLooksLikeWireGuard, false + } +} + +// handleDiscoMessage handles a discovery message. The caller is assumed to have +// verified 'msg' returns [packetLooksLikeDisco] from packetLooksLike(). // // A discovery message has the form: // @@ -1792,34 +1888,17 @@ func isDiscoMaybeGeneve(msg []byte) (isDiscoMsg bool, isGeneveEncap bool) { // - nonce [24]byte // - naclbox of payload (see tailscale.com/disco package for inner payload format) // -// For messages received over DERP, the src.Addr() will be derpMagicIP (with -// src.Port() being the region ID) and the derpNodeSrc will be the node key +// For messages received over DERP, the src.ap.Addr() will be derpMagicIP (with +// src.ap.Port() being the region ID) and the derpNodeSrc will be the node key // it was received from at the DERP layer. derpNodeSrc is zero when received // over UDP. -func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc key.NodePublic, via discoRXPath) (isDiscoMsg bool) { - isDiscoMsg, isGeneveEncap := isDiscoMaybeGeneve(msg) - if !isDiscoMsg { - return - } - var geneve packet.GeneveHeader - var vni virtualNetworkID - if isGeneveEncap { - err := geneve.Decode(msg) - if err != nil { - // Decode only returns an error when 'msg' is too short, and - // 'isGeneveEncap' indicates it's a sufficient length. - c.logf("[unexpected] geneve header decoding error: %v", err) - return - } - vni.set(geneve.VNI) - msg = msg[packet.GeneveFixedHeaderLength:] - } - // The control bit should only be set for relay handshake messages - // terminating on or originating from a UDP relay server. We have yet to - // open the encrypted payload to determine the [disco.MessageType], but - // we assert it should be handshake-related. - shouldBeRelayHandshakeMsg := isGeneveEncap && geneve.Control - +// +// If 'msg' was encapsulated by a Geneve header it is assumed to have already +// been stripped. +// +// 'shouldBeRelayHandshakeMsg' will be true if 'msg' was encapsulated +// by a Geneve header with the control bit set. +func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshakeMsg bool, derpNodeSrc key.NodePublic, via discoRXPath) { sender := key.DiscoPublicFromRaw32(mem.B(msg[len(disco.Magic):discoHeaderLen])) c.mu.Lock() @@ -1833,7 +1912,6 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke } if c.privateKey.IsZero() { // Ignore disco messages when we're stopped. - // Still return true, to not pass it down to wireguard. return } @@ -1844,7 +1922,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke di, ok = c.relayManager.discoInfo(sender) if !ok { if debugDisco() { - c.logf("magicsock: disco: ignoring disco-looking relay handshake frame, no active handshakes with key %v over VNI %d", sender.ShortString(), geneve.VNI) + c.logf("magicsock: disco: ignoring disco-looking relay handshake frame, no active handshakes with key %v over %v", sender.ShortString(), src) } return } @@ -1858,10 +1936,10 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke return } - isDERP := src.Addr() == tailcfg.DerpMagicIPAddr + isDERP := src.ap.Addr() == tailcfg.DerpMagicIPAddr if !isDERP && !shouldBeRelayHandshakeMsg { // Record receive time for UDP transport packets. - pi, ok := c.peerMap.byIPPort[src] + pi, ok := c.peerMap.byEpAddr[src] if ok { pi.ep.lastRecvUDPAny.StoreAtomic(mono.Now()) } @@ -1893,7 +1971,8 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke // Emit information about the disco frame into the pcap stream // if a capture hook is installed. if cb := c.captureHook.Load(); cb != nil { - cb(packet.PathDisco, time.Now(), disco.ToPCAPFrame(src, derpNodeSrc, payload), packet.CaptureMeta{}) + // TODO(jwhited): include VNI context? + cb(packet.PathDisco, time.Now(), disco.ToPCAPFrame(src.ap, derpNodeSrc, payload), packet.CaptureMeta{}) } dm, err := disco.Parse(payload) @@ -1925,14 +2004,14 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(challenge, di, src, geneve.VNI) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(challenge, di, src) return } switch dm := dm.(type) { case *disco.Ping: metricRecvDiscoPing.Add(1) - c.handlePingLocked(dm, src, vni, di, derpNodeSrc) + c.handlePingLocked(dm, src, di, derpNodeSrc) case *disco.Pong: metricRecvDiscoPong.Add(1) // There might be multiple nodes for the sender's DiscoKey. @@ -1940,14 +2019,14 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke // the Pong's TxID was theirs. knownTxID := false c.peerMap.forEachEndpointWithDiscoKey(sender, func(ep *endpoint) (keepGoing bool) { - if ep.handlePongConnLocked(dm, di, src, vni) { + if ep.handlePongConnLocked(dm, di, src) { knownTxID = true return false } return true }) - if !knownTxID && vni.isSet() { - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src, vni.get()) + if !knownTxID && src.vni.isSet() { + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2047,18 +2126,18 @@ func (c *Conn) unambiguousNodeKeyOfPingLocked(dm *disco.Ping, dk key.DiscoPublic // di is the discoInfo of the source of the ping. // derpNodeSrc is non-zero if the ping arrived via DERP. -func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, vni virtualNetworkID, di *discoInfo, derpNodeSrc key.NodePublic) { +func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpNodeSrc key.NodePublic) { likelyHeartBeat := src == di.lastPingFrom && time.Since(di.lastPingTime) < 5*time.Second di.lastPingFrom = src di.lastPingTime = time.Now() - isDerp := src.Addr() == tailcfg.DerpMagicIPAddr + isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr - if vni.isSet() { + if src.vni.isSet() { // TODO(jwhited): check for matching [endpoint.bestAddr] once that data // structure is VNI-aware and [relayManager] can mutate it. We do not // need to reference any [endpointState] for Geneve-encapsulated disco, // we store nothing about them there. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src, vni.get()) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) return } @@ -2071,7 +2150,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, vni virtualN // the IP:port<>disco mapping. if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { if !isDerp { - c.peerMap.setNodeKeyForIPPort(src, nk) + c.peerMap.setNodeKeyForEpAddr(src, nk) } } @@ -2087,14 +2166,14 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, vni virtualN var dup bool if isDerp { if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src, dm.TxID) { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { return } numNodes = 1 } } else { c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { - if ep.addCandidateEndpoint(src, dm.TxID) { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { dup = true return false } @@ -2129,9 +2208,9 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src netip.AddrPort, vni virtualN ipDst := src discoDest := di.discoKey - go c.sendDiscoMessage(ipDst, virtualNetworkID{}, dstKey, discoDest, &disco.Pong{ + go c.sendDiscoMessage(ipDst, dstKey, discoDest, &disco.Pong{ TxID: dm.TxID, - Src: src, + Src: src.ap, }, discoVerboseLog) } @@ -2174,12 +2253,12 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { for _, ep := range c.lastEndpoints { eps = append(eps, ep.Addr) } - go de.c.sendDiscoMessage(derpAddr, virtualNetworkID{}, de.publicKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) + go de.c.sendDiscoMessage(epAddr{ap: derpAddr}, de.publicKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) if debugSendCallMeUnknownPeer() { // Send a callMeMaybe packet to a non-existent peer unknownKey := key.NewNode().Public() c.logf("magicsock: sending CallMeMaybe to unknown peer per TS_DEBUG_SEND_CALLME_UNKNOWN_PEER") - go de.c.sendDiscoMessage(derpAddr, virtualNetworkID{}, unknownKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) + go de.c.sendDiscoMessage(epAddr{ap: derpAddr}, unknownKey, epDisco.key, &disco.CallMeMaybe{MyNumber: eps}, discoLog) } } @@ -3275,12 +3354,12 @@ func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { // derpStr replaces DERP IPs in s with "derp-". func derpStr(s string) string { return strings.ReplaceAll(s, "127.3.3.40:", "derp-") } -// ippEndpointCache is a mutex-free single-element cache, mapping from -// a single netip.AddrPort to a single endpoint. -type ippEndpointCache struct { - ipp netip.AddrPort - gen int64 - de *endpoint +// epAddrEndpointCache is a mutex-free single-element cache, mapping from +// a single [epAddr] to a single [*endpoint]. +type epAddrEndpointCache struct { + epAddr epAddr + gen int64 + de *endpoint } // discoInfo is the info and state for the DiscoKey @@ -3309,7 +3388,7 @@ type discoInfo struct { // Mutable fields follow, owned by Conn.mu: // lastPingFrom is the src of a ping for discoKey. - lastPingFrom netip.AddrPort + lastPingFrom epAddr // lastPingTime is the last time of a ping for discoKey. lastPingTime time.Time @@ -3444,14 +3523,14 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec // to tell us who it is later and get the correct conn.Endpoint. type lazyEndpoint struct { c *Conn - src netip.AddrPort + src epAddr } var _ conn.PeerAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.Endpoint = (*lazyEndpoint)(nil) func (le *lazyEndpoint) ClearSrc() {} -func (le *lazyEndpoint) SrcIP() netip.Addr { return le.src.Addr() } +func (le *lazyEndpoint) SrcIP() netip.Addr { return le.src.ap.Addr() } func (le *lazyEndpoint) DstIP() netip.Addr { return netip.Addr{} } func (le *lazyEndpoint) SrcToString() string { return le.src.String() } func (le *lazyEndpoint) DstToString() string { return "dst" } diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index 34c39fe62..070380029 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -453,7 +453,13 @@ func (c *Conn) receiveDisco(pc *socket.Conn, isIPV6 bool) { metricRecvDiscoPacketIPv4.Add(1) } - c.handleDiscoMessage(payload, srcAddr, key.NodePublic{}, discoRXPathRawSocket) + pt, isGeneveEncap := packetLooksLike(payload) + if pt == packetLooksLikeDisco && !isGeneveEncap { + // The BPF program matching on disco does not currently support + // Geneve encapsulation. isGeneveEncap should not return true if + // payload is disco. + c.handleDiscoMessage(payload, epAddr{ap: srcAddr}, false, key.NodePublic{}, discoRXPathRawSocket) + } } } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index e18011873..5e71a40c9 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -50,6 +50,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/packet" "tailscale.com/net/ping" + "tailscale.com/net/stun" "tailscale.com/net/stun/stuntest" "tailscale.com/net/tstun" "tailscale.com/tailcfg" @@ -1290,41 +1291,6 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) } -func TestDiscoMessage(t *testing.T) { - c := newConn(t.Logf) - c.privateKey = key.NewNode() - - peer1Pub := c.DiscoPublicKey() - peer1Priv := c.discoPrivate - n := &tailcfg.Node{ - Key: key.NewNode().Public(), - DiscoKey: peer1Pub, - } - ep := &endpoint{ - nodeID: 1, - publicKey: n.Key, - } - ep.disco.Store(&endpointDisco{ - key: n.DiscoKey, - short: n.DiscoKey.ShortString(), - }) - c.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) - - const payload = "why hello" - - var nonce [24]byte - crand.Read(nonce[:]) - - pkt := peer1Pub.AppendTo([]byte("TS💬")) - - box := peer1Priv.Shared(c.discoPrivate.Public()).Seal([]byte(payload)) - pkt = append(pkt, box...) - got := c.handleDiscoMessage(pkt, netip.AddrPort{}, key.NodePublic{}, discoRXPathUDP) - if !got { - t.Error("failed to open it") - } -} - // tests that having a endpoint.String prevents wireguard-go's // log.Printf("%v") of its conn.Endpoint values from using reflect to // walk into read mutex while they're being used and then causing data @@ -1358,11 +1324,11 @@ func Test32bitAlignment(t *testing.T) { t.Fatalf("endpoint.lastRecvWG is not 8-byte aligned") } - de.noteRecvActivity(netip.AddrPort{}, mono.Now()) // verify this doesn't panic on 32-bit + de.noteRecvActivity(epAddr{}, mono.Now()) // verify this doesn't panic on 32-bit if called != 1 { t.Fatal("expected call to noteRecvActivity") } - de.noteRecvActivity(netip.AddrPort{}, mono.Now()) + de.noteRecvActivity(epAddr{}, mono.Now()) if called != 1 { t.Error("expected no second call to noteRecvActivity") } @@ -1799,10 +1765,15 @@ func TestEndpointSetsEqual(t *testing.T) { func TestBetterAddr(t *testing.T) { const ms = time.Millisecond al := func(ipps string, d time.Duration) addrQuality { - return addrQuality{AddrPort: netip.MustParseAddrPort(ipps), latency: d} + return addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort(ipps)}, latency: d} } almtu := func(ipps string, d time.Duration, mtu tstun.WireMTU) addrQuality { - return addrQuality{AddrPort: netip.MustParseAddrPort(ipps), latency: d, wireMTU: mtu} + return addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort(ipps)}, latency: d, wireMTU: mtu} + } + avl := func(ipps string, vni uint32, d time.Duration) addrQuality { + q := al(ipps, d) + q.vni.set(vni) + return q } zero := addrQuality{} @@ -1908,6 +1879,18 @@ func TestBetterAddr(t *testing.T) { b: al("[::1]:555", 100*ms), want: false, }, + + // Prefer non-Geneve over Geneve-encapsulated + { + a: al(publicV4, 100*ms), + b: avl(publicV4, 1, 100*ms), + want: true, + }, + { + a: avl(publicV4, 1, 100*ms), + b: al(publicV4, 100*ms), + want: false, + }, } for i, tt := range tests { got := betterAddr(tt.a, tt.b) @@ -2019,9 +2002,9 @@ func (m *peerMap) validate() error { return fmt.Errorf("duplicate endpoint present: %v", pi.ep.publicKey) } seenEps[pi.ep] = true - for ipp := range pi.ipPorts { - if got := m.byIPPort[ipp]; got != pi { - return fmt.Errorf("m.byIPPort[%v] = %v, want %v", ipp, got, pi) + for addr := range pi.epAddrs { + if got := m.byEpAddr[addr]; got != pi { + return fmt.Errorf("m.byEpAddr[%v] = %v, want %v", addr, got, pi) } } } @@ -2037,13 +2020,13 @@ func (m *peerMap) validate() error { } } - for ipp, pi := range m.byIPPort { - if !pi.ipPorts.Contains(ipp) { - return fmt.Errorf("ipPorts[%v] for %v is false", ipp, pi.ep.publicKey) + for addr, pi := range m.byEpAddr { + if !pi.epAddrs.Contains(addr) { + return fmt.Errorf("epAddrs[%v] for %v is false", addr, pi.ep.publicKey) } pi2 := m.byNodeKey[pi.ep.publicKey] if pi != pi2 { - return fmt.Errorf("byNodeKey[%v]=%p doesn't match byIPPort[%v]=%p", pi, pi, pi.ep.publicKey, pi2) + return fmt.Errorf("byNodeKey[%v]=%p doesn't match byEpAddr[%v]=%p", pi, pi, pi.ep.publicKey, pi2) } } @@ -2444,7 +2427,7 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { // Check that we got a valid address set on the first send - this // will be randomly selected, but because we have noV6 set to true, // it will be the IPv4 address. - if !pi.ep.bestAddr.Addr().IsValid() { + if !pi.ep.bestAddr.ap.Addr().IsValid() { t.Fatal("bestaddr was nil") } @@ -2504,12 +2487,12 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { t.Fatal("wgkey doesn't exist in peer map") } - if !pi.ep.bestAddr.Addr().IsValid() { + if !pi.ep.bestAddr.ap.Addr().IsValid() { t.Error("no bestAddr address was set") } - if pi.ep.bestAddr.Addr() != wgEp.Addr() { - t.Errorf("bestAddr was not set to the expected IPv4 address: got %v, want %v", pi.ep.bestAddr.Addr().String(), wgEp.Addr()) + if pi.ep.bestAddr.ap.Addr() != wgEp.Addr() { + t.Errorf("bestAddr was not set to the expected IPv4 address: got %v, want %v", pi.ep.bestAddr.ap.Addr().String(), wgEp.Addr()) } if pi.ep.trustBestAddrUntil.IsZero() { @@ -2670,7 +2653,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { sendFollowUpPing bool pingTime mono.Time ep []endpointDetails - want netip.AddrPort + want epAddr }{ { name: "no endpoints", @@ -2679,7 +2662,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { sendFollowUpPing: false, pingTime: testTime, ep: []endpointDetails{}, - want: netip.AddrPort{}, + want: epAddr{}, }, { name: "singular endpoint does not request ping", @@ -2693,7 +2676,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { latency: 100 * time.Millisecond, }, }, - want: netip.MustParseAddrPort("1.1.1.1:111"), + want: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:111")}, }, { name: "ping sent within wireguardPingInterval should not request ping", @@ -2711,7 +2694,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { latency: 2000 * time.Millisecond, }, }, - want: netip.MustParseAddrPort("1.1.1.1:111"), + want: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:111")}, }, { name: "ping sent outside of wireguardPingInterval should request ping", @@ -2729,7 +2712,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { latency: 150 * time.Millisecond, }, }, - want: netip.MustParseAddrPort("1.1.1.1:111"), + want: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:111")}, }, { name: "choose lowest latency for useable IPv4 and IPv6", @@ -2747,7 +2730,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { latency: 10 * time.Millisecond, }, }, - want: netip.MustParseAddrPort("[2345:0425:2CA1:0000:0000:0567:5673:23b5]:222"), + want: epAddr{ap: netip.MustParseAddrPort("[2345:0425:2CA1:0000:0000:0567:5673:23b5]:222")}, }, { name: "choose IPv6 address when latency is the same for v4 and v6", @@ -2765,7 +2748,7 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { latency: 100 * time.Millisecond, }, }, - want: netip.MustParseAddrPort("[1::1]:567"), + want: epAddr{ap: netip.MustParseAddrPort("[1::1]:567")}, }, } @@ -2785,8 +2768,8 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { endpoint.endpointState[epd.addrPort] = &endpointState{} } udpAddr, _, shouldPing := endpoint.addrForSendLocked(testTime) - if udpAddr.IsValid() != test.validAddr { - t.Errorf("udpAddr validity is incorrect; got %v, want %v", udpAddr.IsValid(), test.validAddr) + if udpAddr.ap.IsValid() != test.validAddr { + t.Errorf("udpAddr validity is incorrect; got %v, want %v", udpAddr.ap.IsValid(), test.validAddr) } if shouldPing != test.sendInitialPing { t.Errorf("addrForSendLocked did not indiciate correct ping state; got %v, want %v", shouldPing, test.sendInitialPing) @@ -2818,8 +2801,8 @@ func TestAddrForSendLockedForWireGuardOnly(t *testing.T) { if shouldPing != test.sendFollowUpPing { t.Errorf("addrForSendLocked did not indiciate correct ping state; got %v, want %v", shouldPing, test.sendFollowUpPing) } - if endpoint.bestAddr.AddrPort != test.want { - t.Errorf("bestAddr.AddrPort is not as expected: got %v, want %v", endpoint.bestAddr.AddrPort, test.want) + if endpoint.bestAddr.epAddr != test.want { + t.Errorf("bestAddr.epAddr is not as expected: got %v, want %v", endpoint.bestAddr.epAddr, test.want) } }) } @@ -2906,7 +2889,7 @@ func TestAddrForPingSizeLocked(t *testing.T) { t.Run(test.desc, func(t *testing.T) { bestAddr := addrQuality{wireMTU: test.mtu} if test.bestAddr { - bestAddr.AddrPort = validUdpAddr + bestAddr.epAddr.ap = validUdpAddr } ep := &endpoint{ derpAddr: validDerpAddr, @@ -2918,10 +2901,10 @@ func TestAddrForPingSizeLocked(t *testing.T) { udpAddr, derpAddr := ep.addrForPingSizeLocked(testTime, test.size) - if test.wantUDP && !udpAddr.IsValid() { + if test.wantUDP && !udpAddr.ap.IsValid() { t.Errorf("%s: udpAddr returned is not valid, won't be sent to UDP address", test.desc) } - if !test.wantUDP && udpAddr.IsValid() { + if !test.wantUDP && udpAddr.ap.IsValid() { t.Errorf("%s: udpAddr returned is valid, discovery will not start", test.desc) } if test.wantDERP && !derpAddr.IsValid() { @@ -3157,7 +3140,7 @@ func TestNetworkDownSendErrors(t *testing.T) { } } -func Test_isDiscoMaybeGeneve(t *testing.T) { +func Test_packetLooksLike(t *testing.T) { discoPub := key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 30: 30, 31: 31})) nakedDisco := make([]byte, 0, 512) nakedDisco = append(nakedDisco, disco.Magic...) @@ -3240,80 +3223,92 @@ func Test_isDiscoMaybeGeneve(t *testing.T) { copy(geneveEncapDiscoNonZeroGeneveVNILSB[packet.GeneveFixedHeaderLength:], nakedDisco) tests := []struct { - name string - msg []byte - wantIsDiscoMsg bool - wantIsGeneveEncap bool + name string + msg []byte + wantPacketLooksLikeType packetLooksLikeType + wantIsGeneveEncap bool }{ { - name: "naked disco", - msg: nakedDisco, - wantIsDiscoMsg: true, - wantIsGeneveEncap: false, + name: "STUN binding success response", + msg: stun.Response(stun.NewTxID(), netip.MustParseAddrPort("127.0.0.1:1")), + wantPacketLooksLikeType: packetLooksLikeSTUNBinding, + wantIsGeneveEncap: false, + }, + { + name: "naked disco", + msg: nakedDisco, + wantPacketLooksLikeType: packetLooksLikeDisco, + wantIsGeneveEncap: false, + }, + { + name: "geneve encap disco", + msg: geneveEncapDisco, + wantPacketLooksLikeType: packetLooksLikeDisco, + wantIsGeneveEncap: true, }, { - name: "geneve encap disco", - msg: geneveEncapDisco, - wantIsDiscoMsg: true, - wantIsGeneveEncap: true, + name: "geneve encap too short disco", + msg: geneveEncapDisco[:len(geneveEncapDisco)-key.DiscoPublicRawLen], + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "geneve encap disco nonzero geneve version", - msg: geneveEncapDiscoNonZeroGeneveVersion, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "geneve encap disco nonzero geneve version", + msg: geneveEncapDiscoNonZeroGeneveVersion, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "geneve encap disco nonzero geneve reserved bits", - msg: geneveEncapDiscoNonZeroGeneveReservedBits, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "geneve encap disco nonzero geneve reserved bits", + msg: geneveEncapDiscoNonZeroGeneveReservedBits, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "geneve encap disco nonzero geneve vni lsb", - msg: geneveEncapDiscoNonZeroGeneveVNILSB, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "geneve encap disco nonzero geneve vni lsb", + msg: geneveEncapDiscoNonZeroGeneveVNILSB, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "geneve encap wireguard", - msg: geneveEncapWireGuard, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "geneve encap wireguard", + msg: geneveEncapWireGuard, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: true, }, { - name: "naked WireGuard Initiation type", - msg: nakedWireGuardInitiation, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "naked WireGuard Initiation type", + msg: nakedWireGuardInitiation, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "naked WireGuard Response type", - msg: nakedWireGuardResponse, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "naked WireGuard Response type", + msg: nakedWireGuardResponse, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "naked WireGuard Cookie Reply type", - msg: nakedWireGuardCookieReply, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "naked WireGuard Cookie Reply type", + msg: nakedWireGuardCookieReply, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, { - name: "naked WireGuard Transport type", - msg: nakedWireGuardTransport, - wantIsDiscoMsg: false, - wantIsGeneveEncap: false, + name: "naked WireGuard Transport type", + msg: nakedWireGuardTransport, + wantPacketLooksLikeType: packetLooksLikeWireGuard, + wantIsGeneveEncap: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotIsDiscoMsg, gotIsGeneveEncap := isDiscoMaybeGeneve(tt.msg) - if gotIsDiscoMsg != tt.wantIsDiscoMsg { - t.Errorf("isDiscoMaybeGeneve() gotIsDiscoMsg = %v, want %v", gotIsDiscoMsg, tt.wantIsDiscoMsg) + gotPacketLooksLikeType, gotIsGeneveEncap := packetLooksLike(tt.msg) + if gotPacketLooksLikeType != tt.wantPacketLooksLikeType { + t.Errorf("packetLooksLike() gotPacketLooksLikeType = %v, want %v", gotPacketLooksLikeType, tt.wantPacketLooksLikeType) } if gotIsGeneveEncap != tt.wantIsGeneveEncap { - t.Errorf("isDiscoMaybeGeneve() gotIsGeneveEncap = %v, want %v", gotIsGeneveEncap, tt.wantIsGeneveEncap) + t.Errorf("packetLooksLike() gotIsGeneveEncap = %v, want %v", gotIsGeneveEncap, tt.wantIsGeneveEncap) } }) } diff --git a/wgengine/magicsock/peermap.go b/wgengine/magicsock/peermap.go index e1c7db1f6..04d5de8c9 100644 --- a/wgengine/magicsock/peermap.go +++ b/wgengine/magicsock/peermap.go @@ -4,8 +4,6 @@ package magicsock import ( - "net/netip" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/util/set" @@ -15,17 +13,17 @@ import ( // peer. type peerInfo struct { ep *endpoint // always non-nil. - // ipPorts is an inverted version of peerMap.byIPPort (below), so + // epAddrs is an inverted version of peerMap.byEpAddr (below), so // that when we're deleting this node, we can rapidly find out the - // keys that need deleting from peerMap.byIPPort without having to - // iterate over every IPPort known for any peer. - ipPorts set.Set[netip.AddrPort] + // keys that need deleting from peerMap.byEpAddr without having to + // iterate over every epAddr known for any peer. + epAddrs set.Set[epAddr] } func newPeerInfo(ep *endpoint) *peerInfo { return &peerInfo{ ep: ep, - ipPorts: set.Set[netip.AddrPort]{}, + epAddrs: set.Set[epAddr]{}, } } @@ -35,7 +33,7 @@ func newPeerInfo(ep *endpoint) *peerInfo { // It doesn't do any locking; all access must be done with Conn.mu held. type peerMap struct { byNodeKey map[key.NodePublic]*peerInfo - byIPPort map[netip.AddrPort]*peerInfo + byEpAddr map[epAddr]*peerInfo byNodeID map[tailcfg.NodeID]*peerInfo // nodesOfDisco contains the set of nodes that are using a @@ -46,7 +44,7 @@ type peerMap struct { func newPeerMap() peerMap { return peerMap{ byNodeKey: map[key.NodePublic]*peerInfo{}, - byIPPort: map[netip.AddrPort]*peerInfo{}, + byEpAddr: map[epAddr]*peerInfo{}, byNodeID: map[tailcfg.NodeID]*peerInfo{}, nodesOfDisco: map[key.DiscoPublic]set.Set[key.NodePublic]{}, } @@ -88,10 +86,10 @@ func (m *peerMap) endpointForNodeID(nodeID tailcfg.NodeID) (ep *endpoint, ok boo return nil, false } -// endpointForIPPort returns the endpoint for the peer we -// believe to be at ipp, or nil if we don't know of any such peer. -func (m *peerMap) endpointForIPPort(ipp netip.AddrPort) (ep *endpoint, ok bool) { - if info, ok := m.byIPPort[ipp]; ok { +// endpointForEpAddr returns the endpoint for the peer we +// believe to be at addr, or nil if we don't know of any such peer. +func (m *peerMap) endpointForEpAddr(addr epAddr) (ep *endpoint, ok bool) { + if info, ok := m.byEpAddr[addr]; ok { return info.ep, true } return nil, false @@ -148,10 +146,10 @@ func (m *peerMap) upsertEndpoint(ep *endpoint, oldDiscoKey key.DiscoPublic) { // TODO(raggi,catzkorn): this could mean that if a "isWireguardOnly" // peer has, say, 192.168.0.2 and so does a tailscale peer, the // wireguard one will win. That may not be the outcome that we want - - // perhaps we should prefer bestAddr.AddrPort if it is set? + // perhaps we should prefer bestAddr.epAddr.ap if it is set? // see tailscale/tailscale#7994 for ipp := range ep.endpointState { - m.setNodeKeyForIPPort(ipp, ep.publicKey) + m.setNodeKeyForEpAddr(epAddr{ap: ipp}, ep.publicKey) } return } @@ -163,20 +161,20 @@ func (m *peerMap) upsertEndpoint(ep *endpoint, oldDiscoKey key.DiscoPublic) { discoSet.Add(ep.publicKey) } -// setNodeKeyForIPPort makes future peer lookups by ipp return the +// setNodeKeyForEpAddr makes future peer lookups by addr return the // same endpoint as a lookup by nk. // -// This should only be called with a fully verified mapping of ipp to +// This should only be called with a fully verified mapping of addr to // nk, because calling this function defines the endpoint we hand to -// WireGuard for packets received from ipp. -func (m *peerMap) setNodeKeyForIPPort(ipp netip.AddrPort, nk key.NodePublic) { - if pi := m.byIPPort[ipp]; pi != nil { - delete(pi.ipPorts, ipp) - delete(m.byIPPort, ipp) +// WireGuard for packets received from addr. +func (m *peerMap) setNodeKeyForEpAddr(addr epAddr, nk key.NodePublic) { + if pi := m.byEpAddr[addr]; pi != nil { + delete(pi.epAddrs, addr) + delete(m.byEpAddr, addr) } if pi, ok := m.byNodeKey[nk]; ok { - pi.ipPorts.Add(ipp) - m.byIPPort[ipp] = pi + pi.epAddrs.Add(addr) + m.byEpAddr[addr] = pi } } @@ -203,7 +201,7 @@ func (m *peerMap) deleteEndpoint(ep *endpoint) { // Unexpected. But no logger plumbed here to log so. return } - for ip := range pi.ipPorts { - delete(m.byIPPort, ip) + for ip := range pi.epAddrs { + delete(m.byEpAddr, ip) } } diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 7a9dd1821..51e97c8cc 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -5,6 +5,7 @@ package magicsock import ( "errors" + "fmt" "net" "net/netip" "sync" @@ -13,6 +14,7 @@ import ( "golang.org/x/net/ipv6" "tailscale.com/net/netaddr" + "tailscale.com/net/packet" "tailscale.com/types/nettype" ) @@ -71,14 +73,28 @@ func (c *RebindingUDPConn) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, e } // WriteBatchTo writes buffs to addr. -func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, offset int) error { +func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { + if offset != packet.GeneveFixedHeaderLength { + return fmt.Errorf("RebindingUDPConn.WriteBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + } for { pconn := *c.pconnAtomic.Load() b, ok := pconn.(batchingConn) if !ok { + vniIsSet := addr.vni.isSet() + var gh packet.GeneveHeader + if vniIsSet { + gh = packet.GeneveHeader{ + VNI: addr.vni.get(), + } + } for _, buf := range buffs { - buf = buf[offset:] - _, err := c.writeToUDPAddrPortWithInitPconn(pconn, buf, addr) + if vniIsSet { + gh.Encode(buf) + } else { + buf = buf[offset:] + } + _, err := c.writeToUDPAddrPortWithInitPconn(pconn, buf, addr.ap) if err != nil { return err } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index d9fd1fa24..177eed355 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -279,8 +279,8 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV // handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated // disco messages if they are not associated with any known // [*endpoint.bestAddr]. -func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di *discoInfo, src netip.AddrPort, vni uint32) { - relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src, vni: vni, at: time.Now()}) +func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di *discoInfo, src epAddr) { + relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } // relayManagerInputEvent initializes [relayManager] if necessary, starts @@ -437,6 +437,8 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak } // This relay endpoint is functional. // TODO(jwhited): Set it on done.work.ep.bestAddr if it is a betterAddr(). + // We also need to conn.peerMap.setNodeKeyForEpAddr(), and ensure we clean + // it up when bestAddr changes, too. } func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { @@ -540,7 +542,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true - go work.ep.c.sendDiscoMessage(addrPort, vni, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) + go work.ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) } } if !sentBindAny { @@ -580,9 +582,9 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { go func() { if withAnswer != nil { answer := &disco.BindUDPRelayEndpointAnswer{Answer: *withAnswer} - work.ep.c.sendDiscoMessage(to, vni, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) + work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) } - work.ep.c.sendDiscoMessage(to, vni, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) + work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) }() } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 8276849aa..be0582669 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -4,7 +4,6 @@ package magicsock import ( - "net/netip" "testing" "tailscale.com/disco" @@ -25,6 +24,6 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, netip.AddrPort{}, 0) + rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh } From 7b06532ea108bd7d12785125c2423a1a4e9987b3 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 6 Jun 2025 15:20:23 -0400 Subject: [PATCH 0923/1708] ipn/ipnlocal: Update hostinfo to control on service config change (#16146) This commit fixes the bug that c2n requests are skiped when updating vipServices in serveConfig. This then resulted netmap update being skipped which caused inaccuracy of Capmap info on client side. After this fix, client always inform control about it's vipServices config changes. Fixes tailscale/corp#29219 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 15 ++++++--- ipn/ipnlocal/local_test.go | 69 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0efec6b9f..4b5accd85 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6194,17 +6194,17 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } } - // Update funnel info in hostinfo and kick off control update if needed. - b.updateIngressLocked() + // Update funnel and service hash info in hostinfo and kick off control update if needed. + b.updateIngressAndServiceHashLocked(prefs) b.setTCPPortsIntercepted(handlePorts) b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) } -// updateIngressLocked updates the hostinfo.WireIngress and hostinfo.IngressEnabled fields and kicks off a Hostinfo -// update if the values have changed. +// updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and +// hostinfo.IngressEnabled fields and kicks off a Hostinfo update if the values have changed. // // b.mu must be held. -func (b *LocalBackend) updateIngressLocked() { +func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { if b.hostinfo == nil { return } @@ -6219,6 +6219,11 @@ func (b *LocalBackend) updateIngressLocked() { b.hostinfo.WireIngress = wire hostInfoChanged = true } + latestHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + if b.hostinfo.ServicesHash != latestHash { + b.hostinfo.ServicesHash = latestHash + hostInfoChanged = true + } // Kick off a Hostinfo update to control if ingress status has changed. if hostInfoChanged { b.goTracker.Go(b.doSetHostinfoFilterServices) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 8f9b6ee68..f14ac037c 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5134,10 +5134,17 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { } } -func TestUpdateIngressLocked(t *testing.T) { +func TestUpdateIngressAndServiceHashLocked(t *testing.T) { + prefs := ipn.NewPrefs().View() + previousSC := &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + } tests := []struct { name string hi *tailcfg.Hostinfo + hasPreviousSC bool // whether to overwrite the ServeConfig hash in the Hostinfo using previousSC sc *ipn.ServeConfig wantIngress bool wantWireIngress bool @@ -5163,6 +5170,16 @@ func TestUpdateIngressLocked(t *testing.T) { wantWireIngress: false, // implied by wantIngress wantControlUpdate: true, }, + { + name: "empty_hostinfo_service_configured", + hi: &tailcfg.Hostinfo{}, + sc: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + }, + wantControlUpdate: true, + }, { name: "empty_hostinfo_funnel_disabled", hi: &tailcfg.Hostinfo{}, @@ -5175,7 +5192,7 @@ func TestUpdateIngressLocked(t *testing.T) { wantControlUpdate: true, }, { - name: "empty_hostinfo_no_funnel", + name: "empty_hostinfo_no_funnel_no_service", hi: &tailcfg.Hostinfo{}, sc: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{ @@ -5196,6 +5213,16 @@ func TestUpdateIngressLocked(t *testing.T) { wantIngress: true, wantWireIngress: false, // implied by wantIngress }, + { + name: "service_hash_no_change", + hi: &tailcfg.Hostinfo{}, + hasPreviousSC: true, + sc: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + }, + }, { name: "funnel_disabled_no_change", hi: &tailcfg.Hostinfo{ @@ -5208,6 +5235,13 @@ func TestUpdateIngressLocked(t *testing.T) { }, wantWireIngress: true, // true if there is any AllowFunnel block }, + { + name: "service_got_removed", + hi: &tailcfg.Hostinfo{}, + hasPreviousSC: true, + sc: &ipn.ServeConfig{}, + wantControlUpdate: true, + }, { name: "funnel_changes_to_disabled", hi: &tailcfg.Hostinfo{ @@ -5235,12 +5269,35 @@ func TestUpdateIngressLocked(t *testing.T) { wantWireIngress: false, // implied by wantIngress wantControlUpdate: true, }, + { + name: "both_funnel_and_service_changes", + hi: &tailcfg.Hostinfo{ + IngressEnabled: true, + }, + sc: &ipn.ServeConfig{ + AllowFunnel: map[ipn.HostPort]bool{ + "tailnet.xyz:443": false, + }, + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:abc": {Tun: true}, + }, + }, + wantWireIngress: true, // true if there is any AllowFunnel block + wantControlUpdate: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Parallel() b := newTestLocalBackend(t) b.hostinfo = tt.hi + if tt.hasPreviousSC { + b.mu.Lock() + b.serveConfig = previousSC.View() + b.hostinfo.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + b.mu.Unlock() + } b.serveConfig = tt.sc.View() allDone := make(chan bool, 1) defer b.goTracker.AddDoneCallback(func() { @@ -5256,7 +5313,7 @@ func TestUpdateIngressLocked(t *testing.T) { })() was := b.goTracker.StartedGoroutines() - b.updateIngressLocked() + b.updateIngressAndServiceHashLocked(prefs) if tt.hi != nil { if tt.hi.IngressEnabled != tt.wantIngress { @@ -5265,6 +5322,12 @@ func TestUpdateIngressLocked(t *testing.T) { if tt.hi.WireIngress != tt.wantWireIngress { t.Errorf("WireIngress = %v, want %v", tt.hi.WireIngress, tt.wantWireIngress) } + b.mu.Lock() + svcHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + b.mu.Unlock() + if tt.hi.ServicesHash != svcHash { + t.Errorf("ServicesHash = %v, want %v", tt.hi.ServicesHash, svcHash) + } } startedGoroutine := b.goTracker.StartedGoroutines() != was From 5716d0977d9ae0e3a5e4bc2071c01f8926c87912 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 6 Jun 2025 15:53:30 +0100 Subject: [PATCH 0924/1708] health: prefix Warnables received from the control plane Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- control/controlclient/map.go | 2 +- control/controlclient/map_test.go | 12 ++++++------ health/health_test.go | 18 +++++++++--------- health/state.go | 2 +- ipn/ipnlocal/local_test.go | 26 +++++++++++++++++--------- 5 files changed, 34 insertions(+), 26 deletions(-) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index f346e19d4..22cea5aca 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -853,7 +853,7 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { } else if len(ms.lastHealth) > 0 { // Convert all ms.lastHealth to the new [netmap.NetworkMap.DisplayMessages] for _, h := range ms.lastHealth { - id := "control-health-" + strhash(h) // Unique ID in case there is more than one health message + id := "health-" + strhash(h) // Unique ID in case there is more than one health message mak.Set(&msgs, tailcfg.DisplayMessageID(id), tailcfg.DisplayMessage{ Title: "Coordination server reports an issue", Severity: tailcfg.SeverityMedium, diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 013640f47..7e42f6f6a 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1340,14 +1340,14 @@ func TestNetmapHealthIntegration(t *testing.T) { ht.SetControlHealth(nm.DisplayMessages) want := map[health.WarnableCode]health.UnhealthyState{ - "control-health-c0719e9a8d5d838d861dc6f675c899d2b309a3a65bb9fe6b11e5afcbf9a2c0b1": { - WarnableCode: "control-health-c0719e9a8d5d838d861dc6f675c899d2b309a3a65bb9fe6b11e5afcbf9a2c0b1", + "control-health.health-c0719e9a8d5d838d861dc6f675c899d2b309a3a65bb9fe6b11e5afcbf9a2c0b1": { + WarnableCode: "control-health.health-c0719e9a8d5d838d861dc6f675c899d2b309a3a65bb9fe6b11e5afcbf9a2c0b1", Title: "Coordination server reports an issue", Severity: health.SeverityMedium, Text: "The coordination server is reporting a health issue: Test message", }, - "control-health-1dc7017a73a3c55c0d6a8423e3813c7ab6562d9d3064c2ec6ac7822f61b1db9c": { - WarnableCode: "control-health-1dc7017a73a3c55c0d6a8423e3813c7ab6562d9d3064c2ec6ac7822f61b1db9c", + "control-health.health-1dc7017a73a3c55c0d6a8423e3813c7ab6562d9d3064c2ec6ac7822f61b1db9c": { + WarnableCode: "control-health.health-1dc7017a73a3c55c0d6a8423e3813c7ab6562d9d3064c2ec6ac7822f61b1db9c", Title: "Coordination server reports an issue", Severity: health.SeverityMedium, Text: "The coordination server is reporting a health issue: Another message", @@ -1401,8 +1401,8 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { } want := map[health.WarnableCode]health.UnhealthyState{ - "test-message": { - WarnableCode: "test-message", + "control-health.test-message": { + WarnableCode: "control-health.test-message", Title: "Testing", Text: "This is a test message", Severity: health.SeverityHigh, diff --git a/health/health_test.go b/health/health_test.go index aa519e92c..0f1140f62 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -468,14 +468,14 @@ func TestControlHealth(t *testing.T) { baseStrs := ht.Strings() msgs := map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "control-health-test": { + "test": { Title: "Control health message", Text: "Extra help.", }, - "control-health-title": { + "title": { Title: "Control health title only", }, - "control-health-with-action": { + "with-action": { Title: "Control health message", Text: "Extra help.", PrimaryAction: &tailcfg.DisplayMessageAction{ @@ -488,19 +488,19 @@ func TestControlHealth(t *testing.T) { t.Run("Warnings", func(t *testing.T) { wantWarns := map[WarnableCode]UnhealthyState{ - "control-health-test": { - WarnableCode: "control-health-test", + "control-health.test": { + WarnableCode: "control-health.test", Severity: SeverityMedium, Title: "Control health message", Text: "Extra help.", }, - "control-health-title": { - WarnableCode: "control-health-title", + "control-health.title": { + WarnableCode: "control-health.title", Severity: SeverityMedium, Title: "Control health title only", }, - "control-health-with-action": { - WarnableCode: "control-health-with-action", + "control-health.with-action": { + WarnableCode: "control-health.with-action", Severity: SeverityMedium, Title: "Control health message", Text: "Extra help.", diff --git a/health/state.go b/health/state.go index cec967931..b5e6a8a38 100644 --- a/health/state.go +++ b/health/state.go @@ -112,7 +112,7 @@ func (t *Tracker) CurrentState() *State { for id, msg := range t.lastNotifiedControlMessages { state := UnhealthyState{ - WarnableCode: WarnableCode(id), + WarnableCode: WarnableCode("control-health." + id), Severity: severityFromTailcfg(msg.Severity), Title: msg.Title, Text: msg.Text, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f14ac037c..281d0e9c4 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "math" "net" "net/http" @@ -5422,10 +5423,11 @@ func TestDisplayMessages(t *testing.T) { }) state := ht.CurrentState() - _, ok := state.Warnings["test-message"] + wantID := health.WarnableCode("control-health.test-message") + _, ok := state.Warnings[wantID] if !ok { - t.Error("no warning found with id 'test-message'") + t.Errorf("no warning found with id %q", wantID) } } @@ -5455,14 +5457,15 @@ func TestDisplayMessagesURLFilter(t *testing.T) { }) state := ht.CurrentState() - got, ok := state.Warnings["test-message"] + wantID := health.WarnableCode("control-health.test-message") + got, ok := state.Warnings[wantID] if !ok { - t.Fatal("no warning found with id 'test-message'") + t.Fatalf("no warning found with id %q", wantID) } want := health.UnhealthyState{ - WarnableCode: "test-message", + WarnableCode: wantID, Title: "Testing", Severity: health.SeverityHigh, } @@ -5494,12 +5497,14 @@ func TestDisplayMessageIPNBus(t *testing.T) { }, } + wantID := health.WarnableCode("control-health.test-message") + for _, tt := range []test{ { name: "older-client-no-actions", mask: 0, wantWarning: health.UnhealthyState{ - WarnableCode: "test-message", + WarnableCode: wantID, Severity: health.SeverityMedium, Title: "Message title", Text: "Message text. Learn more: https://example.com", // PrimaryAction appended to text @@ -5510,7 +5515,7 @@ func TestDisplayMessageIPNBus(t *testing.T) { name: "new-client-with-actions", mask: ipn.NotifyHealthActions, wantWarning: health.UnhealthyState{ - WarnableCode: "test-message", + WarnableCode: wantID, Severity: health.SeverityMedium, Title: "Message title", Text: "Message text.", @@ -5530,17 +5535,20 @@ func TestDisplayMessageIPNBus(t *testing.T) { ipnWatcher := newNotificationWatcher(t, lb, nil) ipnWatcher.watch(tt.mask, []wantedNotification{{ - name: "test", + name: fmt.Sprintf("warning with ID %q", wantID), cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { if n.Health == nil { return false } - got, ok := n.Health.Warnings["test-message"] + got, ok := n.Health.Warnings[wantID] if ok { if diff := cmp.Diff(tt.wantWarning, got); diff != "" { t.Errorf("unexpected warning details (-want/+got):\n%s", diff) return true // we failed the test so tell the watcher we've seen what we need to to stop it waiting } + } else { + got := slices.Collect(maps.Keys(n.Health.Warnings)) + t.Logf("saw warnings: %v", got) } return ok }, From 4456f77af71367e52565a76dd58a796fa108e3f8 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 9 Jun 2025 11:13:03 +0100 Subject: [PATCH 0925/1708] cmd/k8s-operator: explicitly set tcp on VIPService port configuration for Ingress with ProxyGroup (#16199) Updates tailscale/corp#24795 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/ingress-for-pg.go | 4 ++-- cmd/k8s-operator/ingress-for-pg_test.go | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 4779014f3..66d74292b 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -318,9 +318,9 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin tags = strings.Split(tstr, ",") } - tsSvcPorts := []string{"443"} // always 443 for Ingress + tsSvcPorts := []string{"tcp:443"} // always 443 for Ingress if isHTTPEndpointEnabled(ing) { - tsSvcPorts = append(tsSvcPorts, "80") + tsSvcPorts = append(tsSvcPorts, "tcp:80") } tsSvc := &tailscale.VIPService{ diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 9ce90f771..b487d660c 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -68,7 +68,7 @@ func TestIngressPGReconciler(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) - verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) // Verify that Role and RoleBinding have been created for the first Ingress. @@ -130,7 +130,7 @@ func TestIngressPGReconciler(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "my-other-svc.ts.net") expectReconciled(t, ingPGR, "default", "my-other-ingress") verifyServeConfig(t, fc, "svc:my-other-svc", false) - verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"}) // Verify that Role and RoleBinding have been created for the first Ingress. // Do not verify the cert Secret as that was already verified implicitly above. @@ -139,7 +139,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) - verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc", "svc:my-other-svc"}) @@ -244,7 +244,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) - verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) // Update the Ingress hostname and make sure the original Tailscale Service is deleted. @@ -255,7 +255,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { populateTLSSecret(context.Background(), fc, "test-pg", "updated-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:updated-svc", false) - verifyTailscaleService(t, ft, "svc:updated-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"}) _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) @@ -476,7 +476,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") - verifyTailscaleService(t, ft, "svc:my-svc", []string{"80", "443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) verifyServeConfig(t, fc, "svc:my-svc", true) // Verify Ingress status @@ -529,7 +529,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { // Verify reconciliation after removing HTTP expectReconciled(t, ingPGR, "default", "test-ingress") - verifyTailscaleService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) verifyServeConfig(t, fc, "svc:my-svc", false) // Verify Ingress status From 67b1693c131e9cef21d7e6a0491905a179d46eb2 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 9 Jun 2025 13:17:14 -0700 Subject: [PATCH 0926/1708] wgengine/magicsock: enable setting relay epAddr's as bestAddr (#16229) relayManager can now hand endpoint a relay epAddr for it to consider as bestAddr. endpoint and Conn disco ping/pong handling are now VNI-aware. Updates tailscale/corp#27502 Updates tailscale/corp#29422 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 39 ++++++++-- wgengine/magicsock/magicsock.go | 120 ++++++++++++++++++----------- wgengine/magicsock/peermap.go | 33 +++++++- wgengine/magicsock/peermap_test.go | 36 +++++++++ wgengine/magicsock/relaymanager.go | 45 +++++++++-- 5 files changed, 212 insertions(+), 61 deletions(-) create mode 100644 wgengine/magicsock/peermap_test.go diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index faae49a97..bf7758fb8 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -99,6 +99,27 @@ type endpoint struct { relayCapable bool // whether the node is capable of speaking via a [tailscale.com/net/udprelay.Server] } +// relayEndpointReady determines whether the given relay addr should be +// installed as de.bestAddr. It is only called by [relayManager] once it has +// determined addr is functional via [disco.Pong] reception. +func (de *endpoint) relayEndpointReady(addr epAddr, latency time.Duration) { + de.c.mu.Lock() + defer de.c.mu.Unlock() + de.mu.Lock() + defer de.mu.Unlock() + + maybeBetter := addrQuality{addr, latency, pingSizeToPktLen(0, addr)} + if !betterAddr(maybeBetter, de.bestAddr) { + return + } + + // Promote maybeBetter to bestAddr. + // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() + de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBetter.epAddr, maybeBetter.wireMTU) + de.setBestAddrLocked(maybeBetter) + de.c.peerMap.setNodeKeyForEpAddr(addr, de.publicKey) +} + func (de *endpoint) setBestAddrLocked(v addrQuality) { if v.epAddr != de.bestAddr.epAddr { de.probeUDPLifetime.resetCycleEndpointLocked() @@ -1575,11 +1596,10 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd de.mu.Lock() defer de.mu.Unlock() - if src.vni.isSet() { - // TODO(jwhited): fall through once [relayManager] is able to set an - // [epAddr] as de.bestAddr. We do not need to reference any - // [endpointState] for Geneve-encapsulated disco, we store nothing - // about them there. + if src.vni.isSet() && src != de.bestAddr.epAddr { + // "src" is not our bestAddr, but [relayManager] might be in the + // middle of probing it, awaiting pong reception. Make it aware. + de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(m, di, src) return false } @@ -1605,7 +1625,9 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd now := mono.Now() latency := now.Sub(sp.at) - if !isDerp { + if !isDerp && !src.vni.isSet() { + // Note: we check vni.isSet() as relay [epAddr]'s are not stored in + // endpointState, they are either de.bestAddr or not. st, ok := de.endpointState[sp.to.ap] if !ok { // This is no longer an endpoint we care about. @@ -1643,6 +1665,11 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd if !isDerp { thisPong := addrQuality{sp.to, latency, tstun.WireMTU(pingSizeToPktLen(sp.size, sp.to))} if betterAddr(thisPong, de.bestAddr) { + if src.vni.isSet() { + // This would be unexpected. Switching to a Geneve-encapsulated + // path should only happen in de.relayEndpointReady(). + de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) + } de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ When: time.Now(), diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c446cff2c..2e2882110 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2132,28 +2132,10 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN di.lastPingTime = time.Now() isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr - if src.vni.isSet() { - // TODO(jwhited): check for matching [endpoint.bestAddr] once that data - // structure is VNI-aware and [relayManager] can mutate it. We do not - // need to reference any [endpointState] for Geneve-encapsulated disco, - // we store nothing about them there. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) - return - } - - // If we can figure out with certainty which node key this disco - // message is for, eagerly update our IP:port<>node and disco<>node - // mappings to make p2p path discovery faster in simple - // cases. Without this, disco would still work, but would be - // reliant on DERP call-me-maybe to establish the disco<>node - // mapping, and on subsequent disco handlePongConnLocked to establish - // the IP:port<>disco mapping. - if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { - if !isDerp { - c.peerMap.setNodeKeyForEpAddr(src, nk) - } - } - + // numNodes tracks how many nodes (node keys) are associated with the disco + // key tied to this inbound ping. Multiple nodes may share the same disco + // key in the case of node sharing and users switching accounts. + var numNodes int // If we got a ping over DERP, then derpNodeSrc is non-zero and we reply // over DERP (in which case ipDst is also a DERP address). // But if the ping was over UDP (ipDst is not a DERP address), then dstKey @@ -2161,35 +2143,81 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // a dstKey if the dst ip:port is DERP. dstKey := derpNodeSrc - // Remember this route if not present. - var numNodes int - var dup bool - if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return + switch { + case src.vni.isSet(): + if isDerp { + c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) + return + } + + var bestEpAddr epAddr + var discoKey key.DiscoPublic + ep, ok := c.peerMap.endpointForEpAddr(src) + if ok { + ep.mu.Lock() + bestEpAddr = ep.bestAddr.epAddr + ep.mu.Unlock() + disco := ep.disco.Load() + if disco != nil { + discoKey = disco.key } + } + + if src == bestEpAddr && discoKey == di.discoKey { + // We have an associated endpoint with src as its bestAddr. Set + // numNodes so we TX a pong further down. numNodes = 1 + } else { + // We have no [endpoint] in the [peerMap] for this relay [epAddr] + // using it as a bestAddr. [relayManager] might be in the middle of + // probing it or attempting to set it as best via + // [endpoint.relayEndpointReady()]. Make [relayManager] aware. + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) + return } - } else { - c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - dup = true - return false - } - numNodes++ - if numNodes == 1 && dstKey.IsZero() { - dstKey = ep.publicKey + default: // no VNI + // If we can figure out with certainty which node key this disco + // message is for, eagerly update our [epAddr]<>node and disco<>node + // mappings to make p2p path discovery faster in simple + // cases. Without this, disco would still work, but would be + // reliant on DERP call-me-maybe to establish the disco<>node + // mapping, and on subsequent disco handlePongConnLocked to establish + // the IP:port<>disco mapping. + if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { + if !isDerp { + c.peerMap.setNodeKeyForEpAddr(src, nk) } - return true - }) - if dup { - return } - if numNodes > 1 { - // Zero it out if it's ambiguous, so sendDiscoMessage logging - // isn't confusing. - dstKey = key.NodePublic{} + + // Remember this route if not present. + var dup bool + if isDerp { + if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + return + } + numNodes = 1 + } + } else { + c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + dup = true + return false + } + numNodes++ + if numNodes == 1 && dstKey.IsZero() { + dstKey = ep.publicKey + } + return true + }) + if dup { + return + } + if numNodes > 1 { + // Zero it out if it's ambiguous, so sendDiscoMessage logging + // isn't confusing. + dstKey = key.NodePublic{} + } } } diff --git a/wgengine/magicsock/peermap.go b/wgengine/magicsock/peermap.go index 04d5de8c9..838905396 100644 --- a/wgengine/magicsock/peermap.go +++ b/wgengine/magicsock/peermap.go @@ -36,6 +36,18 @@ type peerMap struct { byEpAddr map[epAddr]*peerInfo byNodeID map[tailcfg.NodeID]*peerInfo + // relayEpAddrByNodeKey ensures we only hold a single relay + // [epAddr] (vni.isSet()) for a given node key in byEpAddr, vs letting them + // grow unbounded. Relay [epAddr]'s are dynamically created by + // [relayManager] during path discovery, and are only useful to track in + // peerMap so long as they are the endpoint.bestAddr. [relayManager] handles + // all creation and initial probing responsibilities otherwise, and it does + // not depend on [peerMap]. + // + // Note: This doesn't address unbounded growth of non-relay epAddr's in + // byEpAddr. That issue is being tracked in http://go/corp/29422. + relayEpAddrByNodeKey map[key.NodePublic]epAddr + // nodesOfDisco contains the set of nodes that are using a // DiscoKey. Usually those sets will be just one node. nodesOfDisco map[key.DiscoPublic]set.Set[key.NodePublic] @@ -43,10 +55,11 @@ type peerMap struct { func newPeerMap() peerMap { return peerMap{ - byNodeKey: map[key.NodePublic]*peerInfo{}, - byEpAddr: map[epAddr]*peerInfo{}, - byNodeID: map[tailcfg.NodeID]*peerInfo{}, - nodesOfDisco: map[key.DiscoPublic]set.Set[key.NodePublic]{}, + byNodeKey: map[key.NodePublic]*peerInfo{}, + byEpAddr: map[epAddr]*peerInfo{}, + byNodeID: map[tailcfg.NodeID]*peerInfo{}, + relayEpAddrByNodeKey: map[key.NodePublic]epAddr{}, + nodesOfDisco: map[key.DiscoPublic]set.Set[key.NodePublic]{}, } } @@ -171,8 +184,19 @@ func (m *peerMap) setNodeKeyForEpAddr(addr epAddr, nk key.NodePublic) { if pi := m.byEpAddr[addr]; pi != nil { delete(pi.epAddrs, addr) delete(m.byEpAddr, addr) + if addr.vni.isSet() { + delete(m.relayEpAddrByNodeKey, pi.ep.publicKey) + } } if pi, ok := m.byNodeKey[nk]; ok { + if addr.vni.isSet() { + relay, ok := m.relayEpAddrByNodeKey[nk] + if ok { + delete(pi.epAddrs, relay) + delete(m.byEpAddr, relay) + } + m.relayEpAddrByNodeKey[nk] = addr + } pi.epAddrs.Add(addr) m.byEpAddr[addr] = pi } @@ -204,4 +228,5 @@ func (m *peerMap) deleteEndpoint(ep *endpoint) { for ip := range pi.epAddrs { delete(m.byEpAddr, ip) } + delete(m.relayEpAddrByNodeKey, ep.publicKey) } diff --git a/wgengine/magicsock/peermap_test.go b/wgengine/magicsock/peermap_test.go new file mode 100644 index 000000000..52504272f --- /dev/null +++ b/wgengine/magicsock/peermap_test.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "net/netip" + "testing" + + "tailscale.com/types/key" +) + +func Test_peerMap_oneRelayEpAddrPerNK(t *testing.T) { + pm := newPeerMap() + nk := key.NewNode().Public() + ep := &endpoint{ + nodeID: 1, + publicKey: nk, + } + ed := &endpointDisco{key: key.NewDisco().Public()} + ep.disco.Store(ed) + pm.upsertEndpoint(ep, key.DiscoPublic{}) + vni := virtualNetworkID{} + vni.set(1) + relayEpAddrA := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:1"), vni: vni} + relayEpAddrB := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:2"), vni: vni} + pm.setNodeKeyForEpAddr(relayEpAddrA, nk) + pm.setNodeKeyForEpAddr(relayEpAddrB, nk) + if len(pm.byEpAddr) != 1 { + t.Fatalf("expected 1 epAddr in byEpAddr, got: %d", len(pm.byEpAddr)) + } + got := pm.relayEpAddrByNodeKey[nk] + if got != relayEpAddrB { + t.Fatalf("expected relay epAddr %v, got: %v", relayEpAddrB, got) + } +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 177eed355..5cb43cd85 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -24,6 +24,11 @@ import ( // relayManager manages allocation, handshaking, and initial probing (disco // ping/pong) of [tailscale.com/net/udprelay.Server] endpoints. The zero value // is ready for use. +// +// [relayManager] methods can be called by [Conn] and [endpoint] while their .mu +// mutexes are held. Therefore, in order to avoid deadlocks, [relayManager] must +// never attempt to acquire those mutexes, including synchronous calls back +// towards [Conn] or [endpoint] methods that acquire them. type relayManager struct { initOnce sync.Once @@ -164,6 +169,7 @@ func (r *relayManager) runLoop() { } type relayHandshakeDiscoMsgEvent struct { + conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] msg disco.Message disco key.DiscoPublic from netip.AddrPort @@ -366,7 +372,7 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc ok bool ) apv := addrPortVNI{event.from, event.vni} - switch event.msg.(type) { + switch msg := event.msg.(type) { case *disco.BindUDPRelayEndpointChallenge: work, ok = r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{event.disco, event.vni}] if !ok { @@ -392,7 +398,29 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc // Update state so that future ping/pong will route to 'work'. r.handshakeWorkAwaitingPong[work] = apv r.addrPortVNIToHandshakeWork[apv] = work - case *disco.Ping, *disco.Pong: + case *disco.Ping: + // Always TX a pong. We might not have any associated work if ping + // reception raced with our call to [endpoint.relayEndpointReady()], so + // err on the side of enabling the remote side to use this path. + // + // Conn.handlePingLocked() makes efforts to suppress duplicate pongs + // where the same ping can be received both via raw socket and UDP + // socket on Linux. We make no such efforts here as the raw socket BPF + // program does not support Geneve-encapsulated disco, and is also + // disabled by default. + vni := virtualNetworkID{} + vni.set(event.vni) + go event.conn.sendDiscoMessage(epAddr{ap: event.from, vni: vni}, key.NodePublic{}, event.disco, &disco.Pong{ + TxID: msg.TxID, + Src: event.from, + }, discoVerboseLog) + + work, ok = r.addrPortVNIToHandshakeWork[apv] + if !ok { + // No outstanding work tied to this [addrPortVNI], return early. + return + } + case *disco.Pong: work, ok = r.addrPortVNIToHandshakeWork[apv] if !ok { // No outstanding work tied to this [addrPortVNI], discard. @@ -436,9 +464,13 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak return } // This relay endpoint is functional. - // TODO(jwhited): Set it on done.work.ep.bestAddr if it is a betterAddr(). - // We also need to conn.peerMap.setNodeKeyForEpAddr(), and ensure we clean - // it up when bestAddr changes, too. + vni := virtualNetworkID{} + vni.set(done.work.se.VNI) + addr := epAddr{ap: done.pongReceivedFrom, vni: vni} + // ep.relayEndpointReady() must be called in a new goroutine to prevent + // deadlocks as it acquires [endpoint] & [Conn] mutexes. See [relayManager] + // docs for details. + go done.work.ep.relayEndpointReady(addr, done.latency) } func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { @@ -613,6 +645,9 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { // latency, so send another ping. Since the handshake is // complete we do not need to send an answer in front of this // one. + // + // We don't need to TX a pong, that was already handled for us + // in handleRxHandshakeDiscoMsgRunLoop(). txPing(msgEvent.from, nil) case *disco.Pong: at, ok := sentPingAt[msg.TxID] From c343bffa72cacc583d2c39dafa9f51ded8a106f7 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 9 Jun 2025 14:49:00 -0700 Subject: [PATCH 0927/1708] wgengine/relaymanager: don't start runLoop() on init() (#16231) This is simply for consistency with relayManagerInputEvent(), which should be the sole launcher of runLoop(). Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 5cb43cd85..fd3f19dfb 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -206,7 +206,7 @@ func (r *relayManager) init() { r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) r.runLoopStoppedCh = make(chan struct{}, 1) - go r.runLoop() + r.runLoopStoppedCh <- struct{}{} }) } From 9501f66985e9ac375cba7cf427453cc66a55dae8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 9 Jun 2025 15:37:58 -0700 Subject: [PATCH 0928/1708] wgengine/magicsock: don't cancel in-progress relayManager work (#16233) It might complete, interrupting it reduces the chances of establishing a relay path. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 62 +++++++++++++++--------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index fd3f19dfb..2b636dc57 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -112,12 +112,21 @@ type relayEndpointHandshakeWorkDoneEvent struct { latency time.Duration // only relevant if pongReceivedFrom.IsValid() } -// activeWorkRunLoop returns true if there is outstanding allocation or -// handshaking work, otherwise it returns false. -func (r *relayManager) activeWorkRunLoop() bool { +// hasActiveWorkRunLoop returns true if there is outstanding allocation or +// handshaking work for any endpoint, otherwise it returns false. +func (r *relayManager) hasActiveWorkRunLoop() bool { return len(r.allocWorkByEndpoint) > 0 || len(r.handshakeWorkByEndpointByServerDisco) > 0 } +// hasActiveWorkForEndpointRunLoop returns true if there is outstanding +// allocation or handshaking work for the provided endpoint, otherwise it +// returns false. +func (r *relayManager) hasActiveWorkForEndpointRunLoop(ep *endpoint) bool { + _, handshakeWork := r.handshakeWorkByEndpointByServerDisco[ep] + _, allocWork := r.allocWorkByEndpoint[ep] + return handshakeWork || allocWork +} + // runLoop is a form of event loop. It ensures exclusive access to most of // [relayManager] state. func (r *relayManager) runLoop() { @@ -128,9 +137,10 @@ func (r *relayManager) runLoop() { for { select { case ep := <-r.allocateHandshakeCh: - r.stopWorkRunLoop(ep, stopHandshakeWorkOnlyKnownServers) - r.allocateAllServersRunLoop(ep) - if !r.activeWorkRunLoop() { + if !r.hasActiveWorkForEndpointRunLoop(ep) { + r.allocateAllServersRunLoop(ep) + } + if !r.hasActiveWorkRunLoop() { return } case done := <-r.allocateWorkDoneCh: @@ -141,27 +151,27 @@ func (r *relayManager) runLoop() { // overwrite pre-existing keys. delete(r.allocWorkByEndpoint, done.work.ep) } - if !r.activeWorkRunLoop() { + if !r.hasActiveWorkRunLoop() { return } case ep := <-r.cancelWorkCh: - r.stopWorkRunLoop(ep, stopHandshakeWorkAllServers) - if !r.activeWorkRunLoop() { + r.stopWorkRunLoop(ep) + if !r.hasActiveWorkRunLoop() { return } case newServerEndpoint := <-r.newServerEndpointCh: r.handleNewServerEndpointRunLoop(newServerEndpoint) - if !r.activeWorkRunLoop() { + if !r.hasActiveWorkRunLoop() { return } case done := <-r.handshakeWorkDoneCh: r.handleHandshakeWorkDoneRunLoop(done) - if !r.activeWorkRunLoop() { + if !r.hasActiveWorkRunLoop() { return } case discoMsgEvent := <-r.rxHandshakeDiscoMsgCh: r.handleRxHandshakeDiscoMsgRunLoop(discoMsgEvent) - if !r.activeWorkRunLoop() { + if !r.hasActiveWorkRunLoop() { return } } @@ -317,8 +327,8 @@ func relayManagerInputEvent[T any](r *relayManager, ctx context.Context, eventCh } // allocateAndHandshakeAllServers kicks off allocation and handshaking of relay -// endpoints for 'ep' on all known relay servers, canceling any existing -// in-progress work. +// endpoints for 'ep' on all known relay servers if there is no outstanding +// work. func (r *relayManager) allocateAndHandshakeAllServers(ep *endpoint) { relayManagerInputEvent(r, nil, &r.allocateHandshakeCh, ep) } @@ -328,18 +338,9 @@ func (r *relayManager) stopWork(ep *endpoint) { relayManagerInputEvent(r, nil, &r.cancelWorkCh, ep) } -// stopHandshakeWorkFilter represents filters for handshake work cancellation -type stopHandshakeWorkFilter bool - -const ( - stopHandshakeWorkAllServers stopHandshakeWorkFilter = false - stopHandshakeWorkOnlyKnownServers = true -) - // stopWorkRunLoop cancels & clears outstanding allocation and handshaking -// work for 'ep'. Handshake work cancellation is subject to the filter supplied -// in 'f'. -func (r *relayManager) stopWorkRunLoop(ep *endpoint, f stopHandshakeWorkFilter) { +// work for 'ep'. +func (r *relayManager) stopWorkRunLoop(ep *endpoint) { allocWork, ok := r.allocWorkByEndpoint[ep] if ok { allocWork.cancel() @@ -348,13 +349,10 @@ func (r *relayManager) stopWorkRunLoop(ep *endpoint, f stopHandshakeWorkFilter) } byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[ep] if ok { - for disco, handshakeWork := range byServerDisco { - _, knownServer := r.serversByDisco[disco] - if knownServer || f == stopHandshakeWorkAllServers { - handshakeWork.cancel() - done := <-handshakeWork.doneCh - r.handleHandshakeWorkDoneRunLoop(done) - } + for _, handshakeWork := range byServerDisco { + handshakeWork.cancel() + done := <-handshakeWork.doneCh + r.handleHandshakeWorkDoneRunLoop(done) } } } From cc8dc9e4dcc83fced123d0268e62d4530c515ac6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 9 Jun 2025 16:12:12 -0700 Subject: [PATCH 0929/1708] types/netmap: fix NodeMutationEndpoints docs typo (#16234) Updates #cleanup Signed-off-by: Jordan Whited --- types/netmap/nodemut.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index ccbdeae3f..f4de1bf0b 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -37,7 +37,7 @@ func (m NodeMutationDERPHome) Apply(n *tailcfg.Node) { n.HomeDERP = m.DERPRegion } -// NodeMutation is a NodeMutation that says a node's endpoints have changed. +// NodeMutationEndpoints is a NodeMutation that says a node's endpoints have changed. type NodeMutationEndpoints struct { mutatingNodeID Endpoints []netip.AddrPort From db34cdcfe7d4825ed8a7edec3f6c0164b3c85b5a Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Thu, 22 May 2025 20:12:59 +0100 Subject: [PATCH 0930/1708] cmd/tailscale/cli: add a risk message about rp_filter We already present a health warning about this, but it is easy to miss on a server when blackholing traffic makes it unreachable. In addition to a health warning, present a risk message when exit node is enabled. Example: ``` $ tailscale up --exit-node=lizard The following issues on your machine will likely make usage of exit nodes impossible: - interface "ens4" has strict reverse-path filtering enabled - interface "tailscale0" has strict reverse-path filtering enabled Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310 To skip this warning, use --accept-risk=linux-strict-rp-filter $ ``` Updates #3310 Signed-off-by: Anton Tolchanov --- client/local/local.go | 19 ++++ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/risks.go | 21 ++++- cmd/tailscale/cli/set.go | 3 + cmd/tailscale/cli/up.go | 3 + cmd/tailscaled/depaware.txt | 2 +- health/healthmsg/healthmsg.go | 1 + ipn/ipnlocal/local.go | 3 +- ipn/localapi/localapi.go | 158 ++++++++++++++++++++-------------- tsnet/depaware.txt | 2 +- 10 files changed, 143 insertions(+), 71 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 0e4d495d3..7a3a4b703 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -788,6 +788,25 @@ func (lc *Client) CheckUDPGROForwarding(ctx context.Context) error { return nil } +// CheckReversePathFiltering asks the local Tailscale daemon whether strict +// reverse path filtering is enabled, which would break exit node usage on Linux. +func (lc *Client) CheckReversePathFiltering(ctx context.Context) error { + body, err := lc.get200(ctx, "/localapi/v0/check-reverse-path-filtering") + if err != nil { + return err + } + var jres struct { + Warning string + } + if err := json.Unmarshal(body, &jres); err != nil { + return fmt.Errorf("invalid JSON from check-reverse-path-filtering: %w", err) + } + if jres.Warning != "" { + return errors.New(jres.Warning) + } + return nil +} + // SetUDPGROForwarding enables UDP GRO forwarding for the main interface of this // node. This can be done to improve performance of tailnet nodes acting as exit // nodes or subnet routers. diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2e467843a..36c5184c3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -796,7 +796,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator tailscale.com/internal/noiseconn from tailscale.com/control/controlclient diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index c36ffafae..9b03025a8 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -4,15 +4,18 @@ package cli import ( + "context" "errors" "flag" "fmt" "os" "os/signal" + "runtime" "strings" "syscall" "time" + "tailscale.com/ipn" "tailscale.com/util/testenv" ) @@ -20,11 +23,12 @@ var ( riskTypes []string riskLoseSSH = registerRiskType("lose-ssh") riskMacAppConnector = registerRiskType("mac-app-connector") + riskStrictRPFilter = registerRiskType("linux-strict-rp-filter") riskAll = registerRiskType("all") ) const riskMacAppConnectorMessage = ` -You are trying to configure an app connector on macOS, which is not officially supported due to system limitations. This may result in performance and reliability issues. +You are trying to configure an app connector on macOS, which is not officially supported due to system limitations. This may result in performance and reliability issues. Do not use a macOS app connector for any mission-critical purposes. For the best experience, Linux is the only recommended platform for app connectors. ` @@ -89,3 +93,18 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { printf("\r%s\r", strings.Repeat(" ", msgLen)) return errAborted } + +// checkExitNodeRisk checks if the user is using an exit node on Linux and +// whether reverse path filtering is enabled. If so, it presents a risk message. +func checkExitNodeRisk(ctx context.Context, prefs *ipn.Prefs, acceptedRisks string) error { + if runtime.GOOS != "linux" { + return nil + } + if !prefs.ExitNodeIP.IsValid() && prefs.ExitNodeID == "" { + return nil + } + if err := localClient.CheckReversePathFiltering(ctx); err != nil { + return presentRiskToUser(riskStrictRPFilter, err.Error(), acceptedRisks) + } + return nil +} diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index aa5966698..66e74d77f 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -183,6 +183,9 @@ func runSet(ctx context.Context, args []string) (retErr error) { } warnOnAdvertiseRouts(ctx, &maskedPrefs.Prefs) + if err := checkExitNodeRisk(ctx, &maskedPrefs.Prefs, setArgs.acceptedRisks); err != nil { + return err + } var advertiseExitNodeSet, advertiseRoutesSet bool setFlagSet.Visit(func(f *flag.Flag) { updateMaskedPrefsFromUpOrSetFlag(maskedPrefs, f.Name) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index e4bb6f576..37cdab754 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -481,6 +481,9 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } warnOnAdvertiseRouts(ctx, prefs) + if err := checkExitNodeRisk(ctx, prefs, upArgs.acceptedRisks); err != nil { + return err + } curPrefs, err := localClient.GetPrefs(ctx) if err != nil { diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c6011a12c..387b944c1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -281,7 +281,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/tpm from tailscale.com/feature/condregister tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ diff --git a/health/healthmsg/healthmsg.go b/health/healthmsg/healthmsg.go index 6c237678e..238410373 100644 --- a/health/healthmsg/healthmsg.go +++ b/health/healthmsg/healthmsg.go @@ -12,4 +12,5 @@ const ( TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" + DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" ) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4b5accd85..88adb3973 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4112,9 +4112,8 @@ func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTrac var msg string if p.ExitNodeIP().IsValid() || p.ExitNodeID() != "" { warn, _ := netutil.CheckReversePathFiltering(state) - const comment = "please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" if len(warn) > 0 { - msg = fmt.Sprintf("%s: %v, %s", healthmsg.WarnExitNodeUsage, warn, comment) + msg = fmt.Sprintf("%s: %v, %s", healthmsg.WarnExitNodeUsage, warn, healthmsg.DisableRPFilter) } } if len(msg) > 0 { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 99cb7c95b..78f95b2b1 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -32,6 +32,7 @@ import ( "tailscale.com/clientupdate" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" @@ -82,71 +83,72 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "bugreport": (*Handler).serveBugReport, - "check-ip-forwarding": (*Handler).serveCheckIPForwarding, - "check-prefs": (*Handler).serveCheckPrefs, - "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "component-debug-logging": (*Handler).serveComponentDebugLogging, - "debug": (*Handler).serveDebug, - "debug-derp-region": (*Handler).serveDebugDERPRegion, - "debug-dial-types": (*Handler).serveDebugDialTypes, - "debug-log": (*Handler).serveDebugLog, - "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, - "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, - "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, - "debug-portmap": (*Handler).serveDebugPortmap, - "derpmap": (*Handler).serveDERPMap, - "dev-set-state-store": (*Handler).serveDevSetStateStore, - "dial": (*Handler).serveDial, - "disconnect-control": (*Handler).disconnectControl, - "dns-osconfig": (*Handler).serveDNSOSConfig, - "dns-query": (*Handler).serveDNSQuery, - "drive/fileserver-address": (*Handler).serveDriveServerAddr, - "drive/shares": (*Handler).serveShares, - "goroutines": (*Handler).serveGoroutines, - "handle-push-message": (*Handler).serveHandlePushMessage, - "id-token": (*Handler).serveIDToken, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "logtap": (*Handler).serveLogTap, - "metrics": (*Handler).serveMetrics, - "ping": (*Handler).servePing, - "pprof": (*Handler).servePprof, - "prefs": (*Handler).servePrefs, - "query-feature": (*Handler).serveQueryFeature, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "serve-config": (*Handler).serveServeConfig, - "set-dns": (*Handler).serveSetDNS, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "set-gui-visible": (*Handler).serveSetGUIVisible, - "set-push-device-token": (*Handler).serveSetPushDeviceToken, - "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "suggest-exit-node": (*Handler).serveSuggestExitNode, - "tka/affected-sigs": (*Handler).serveTKAAffectedSigs, - "tka/cosign-recovery-aum": (*Handler).serveTKACosignRecoveryAUM, - "tka/disable": (*Handler).serveTKADisable, - "tka/force-local-disable": (*Handler).serveTKALocalDisable, - "tka/generate-recovery-aum": (*Handler).serveTKAGenerateRecoveryAUM, - "tka/init": (*Handler).serveTKAInit, - "tka/log": (*Handler).serveTKALog, - "tka/modify": (*Handler).serveTKAModify, - "tka/sign": (*Handler).serveTKASign, - "tka/status": (*Handler).serveTKAStatus, - "tka/submit-recovery-aum": (*Handler).serveTKASubmitRecoveryAUM, - "tka/verify-deeplink": (*Handler).serveTKAVerifySigningDeeplink, - "tka/wrap-preauth-key": (*Handler).serveTKAWrapPreauthKey, - "update/check": (*Handler).serveUpdateCheck, - "update/install": (*Handler).serveUpdateInstall, - "update/progress": (*Handler).serveUpdateProgress, - "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "usermetrics": (*Handler).serveUserMetrics, - "watch-ipn-bus": (*Handler).serveWatchIPNBus, - "whois": (*Handler).serveWhoIs, + "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 + "bugreport": (*Handler).serveBugReport, + "check-ip-forwarding": (*Handler).serveCheckIPForwarding, + "check-prefs": (*Handler).serveCheckPrefs, + "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, + "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, + "component-debug-logging": (*Handler).serveComponentDebugLogging, + "debug": (*Handler).serveDebug, + "debug-derp-region": (*Handler).serveDebugDERPRegion, + "debug-dial-types": (*Handler).serveDebugDialTypes, + "debug-log": (*Handler).serveDebugLog, + "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, + "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, + "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, + "debug-portmap": (*Handler).serveDebugPortmap, + "derpmap": (*Handler).serveDERPMap, + "dev-set-state-store": (*Handler).serveDevSetStateStore, + "dial": (*Handler).serveDial, + "disconnect-control": (*Handler).disconnectControl, + "dns-osconfig": (*Handler).serveDNSOSConfig, + "dns-query": (*Handler).serveDNSQuery, + "drive/fileserver-address": (*Handler).serveDriveServerAddr, + "drive/shares": (*Handler).serveShares, + "goroutines": (*Handler).serveGoroutines, + "handle-push-message": (*Handler).serveHandlePushMessage, + "id-token": (*Handler).serveIDToken, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "logtap": (*Handler).serveLogTap, + "metrics": (*Handler).serveMetrics, + "ping": (*Handler).servePing, + "pprof": (*Handler).servePprof, + "prefs": (*Handler).servePrefs, + "query-feature": (*Handler).serveQueryFeature, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "serve-config": (*Handler).serveServeConfig, + "set-dns": (*Handler).serveSetDNS, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "set-gui-visible": (*Handler).serveSetGUIVisible, + "set-push-device-token": (*Handler).serveSetPushDeviceToken, + "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, + "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "suggest-exit-node": (*Handler).serveSuggestExitNode, + "tka/affected-sigs": (*Handler).serveTKAAffectedSigs, + "tka/cosign-recovery-aum": (*Handler).serveTKACosignRecoveryAUM, + "tka/disable": (*Handler).serveTKADisable, + "tka/force-local-disable": (*Handler).serveTKALocalDisable, + "tka/generate-recovery-aum": (*Handler).serveTKAGenerateRecoveryAUM, + "tka/init": (*Handler).serveTKAInit, + "tka/log": (*Handler).serveTKALog, + "tka/modify": (*Handler).serveTKAModify, + "tka/sign": (*Handler).serveTKASign, + "tka/status": (*Handler).serveTKAStatus, + "tka/submit-recovery-aum": (*Handler).serveTKASubmitRecoveryAUM, + "tka/verify-deeplink": (*Handler).serveTKAVerifySigningDeeplink, + "tka/wrap-preauth-key": (*Handler).serveTKAWrapPreauthKey, + "update/check": (*Handler).serveUpdateCheck, + "update/install": (*Handler).serveUpdateInstall, + "update/progress": (*Handler).serveUpdateProgress, + "upload-client-metrics": (*Handler).serveUploadClientMetrics, + "usermetrics": (*Handler).serveUserMetrics, + "watch-ipn-bus": (*Handler).serveWatchIPNBus, + "whois": (*Handler).serveWhoIs, } // Register registers a new LocalAPI handler for the given name. @@ -1175,6 +1177,32 @@ func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) }) } +func (h *Handler) serveCheckReversePathFiltering(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "reverse path filtering check access denied", http.StatusForbidden) + return + } + var warning string + + state := h.b.Sys().NetMon.Get().InterfaceState() + warn, err := netutil.CheckReversePathFiltering(state) + if err == nil && len(warn) > 0 { + var msg strings.Builder + msg.WriteString(healthmsg.WarnExitNodeUsage + ":\n") + for _, w := range warn { + msg.WriteString("- " + w + "\n") + } + msg.WriteString(healthmsg.DisableRPFilter) + warning = msg.String() + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(struct { + Warning string + }{ + Warning: warning, + }) +} + func (h *Handler) serveCheckUDPGROForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "UDP GRO forwarding check access denied", http.StatusForbidden) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 242cd8f1b..da3175b8c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -237,7 +237,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ From e72c528a5fec1abda8a933e35b6c06ebd25d0175 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 10 Jun 2025 15:29:42 -0400 Subject: [PATCH 0931/1708] cmd/{derp,derpprobe},prober,derp: add mesh support to derpprobe (#15414) Add mesh key support to derpprobe for probing derpers with verify set to true. Move MeshKey checking to central point for code reuse. Fix a bad error fmt msg. Fixes tailscale/corp#27294 Fixes tailscale/corp#25756 Signed-off-by: Mike O'Driscoll --- cmd/derper/derper.go | 2 +- cmd/derpprobe/derpprobe.go | 69 +++++++++++++++++++++++++++++ cmd/tsidp/depaware.txt | 2 +- derp/derp_client.go | 15 ++++++- derp/derp_server.go | 9 ++-- derp/derp_test.go | 89 ++++++++++++++++++++++++++------------ prober/derp.go | 42 ++++++++++-------- types/key/derp.go | 22 ++++++++++ 8 files changed, 195 insertions(+), 55 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 840de3fba..7ea404beb 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -68,7 +68,7 @@ var ( runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") flagHome = flag.String("home", "", "what to serve at the root path. It may be left empty (the default, for a default homepage), \"blank\" for a blank page, or a URL to redirect to") - meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It should contain some hex string; whitespace is trimmed.") + meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It must be 64 lowercase hexadecimal characters; whitespace is trimmed.") meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list. If an entry contains a slash, the second part names a hostname to be used when dialing the target.") secretsURL = flag.String("secrets-url", "", "SETEC server URL for secrets retrieval of mesh key") secretPrefix = flag.String("secrets-path-prefix", "prod/derp", "setec path prefix for \""+setecMeshKeyName+"\" secret for DERP mesh key") diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 2723a31ae..25159d649 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -5,23 +5,36 @@ package main import ( + "context" "flag" "fmt" "log" "net/http" "os" + "path" + "path/filepath" "sort" "time" + "github.com/tailscale/setec/client/setec" "tailscale.com/prober" "tailscale.com/tsweb" + "tailscale.com/types/key" "tailscale.com/version" // Support for prometheus varz in tsweb _ "tailscale.com/tsweb/promvarz" ) +const meshKeyEnvVar = "TAILSCALE_DERPER_MESH_KEY" +const setecMeshKeyName = "meshkey" + +func defaultSetecCacheDir() string { + return filepath.Join(os.Getenv("HOME"), ".cache", "derper-secrets") +} + var ( + dev = flag.Bool("dev", false, "run in localhost development mode") derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://) or 'local' to use the local tailscaled's DERP map") versionFlag = flag.Bool("version", false, "print version and exit") listen = flag.String("listen", ":8030", "HTTP listen address") @@ -37,6 +50,10 @@ var ( qdPacketsPerSecond = flag.Int("qd-packets-per-second", 0, "if greater than 0, queuing delay will be measured continuously using 260 byte packets (approximate size of a CallMeMaybe packet) sent at this rate per second") qdPacketTimeout = flag.Duration("qd-packet-timeout", 5*time.Second, "queuing delay packets arriving after this period of time from being sent are treated like dropped packets and don't count toward queuing delay timings") regionCodeOrID = flag.String("region-code", "", "probe only this region (e.g. 'lax' or '17'); if left blank, all regions will be probed") + meshPSKFile = flag.String("mesh-psk-file", "", "if non-empty, path to file containing the mesh pre-shared key file. It must be 64 lowercase hexadecimal characters; whitespace is trimmed.") + secretsURL = flag.String("secrets-url", "", "SETEC server URL for secrets retrieval of mesh key") + secretPrefix = flag.String("secrets-path-prefix", "prod/derp", fmt.Sprintf("setec path prefix for \"%s\" secret for DERP mesh key", setecMeshKeyName)) + secretsCacheDir = flag.String("secrets-cache-dir", defaultSetecCacheDir(), "directory to cache setec secrets in (required if --secrets-url is set)") ) func main() { @@ -47,11 +64,16 @@ func main() { } p := prober.New().WithSpread(*spread).WithOnce(*probeOnce).WithMetricNamespace("derpprobe") + meshKey, err := getMeshKey() + if err != nil { + log.Fatalf("failed to get mesh key: %v", err) + } opts := []prober.DERPOpt{ prober.WithMeshProbing(*meshInterval), prober.WithSTUNProbing(*stunInterval), prober.WithTLSProbing(*tlsInterval), prober.WithQueuingDelayProbing(*qdPacketsPerSecond, *qdPacketTimeout), + prober.WithMeshKey(meshKey), } if *bwInterval > 0 { opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize, *bwTUNIPv4Address)) @@ -99,6 +121,53 @@ func main() { log.Fatal(http.ListenAndServe(*listen, mux)) } +func getMeshKey() (key.DERPMesh, error) { + var meshKey string + + if *dev { + meshKey = os.Getenv(meshKeyEnvVar) + if meshKey == "" { + log.Printf("No mesh key specified for dev via %s\n", meshKeyEnvVar) + } else { + log.Printf("Set mesh key from %s\n", meshKeyEnvVar) + } + } else if *secretsURL != "" { + meshKeySecret := path.Join(*secretPrefix, setecMeshKeyName) + fc, err := setec.NewFileCache(*secretsCacheDir) + if err != nil { + log.Fatalf("NewFileCache: %v", err) + } + log.Printf("Setting up setec store from %q", *secretsURL) + st, err := setec.NewStore(context.Background(), + setec.StoreConfig{ + Client: setec.Client{Server: *secretsURL}, + Secrets: []string{ + meshKeySecret, + }, + Cache: fc, + }) + if err != nil { + log.Fatalf("NewStore: %v", err) + } + meshKey = st.Secret(meshKeySecret).GetString() + log.Println("Got mesh key from setec store") + st.Close() + } else if *meshPSKFile != "" { + b, err := setec.StaticFile(*meshPSKFile) + if err != nil { + log.Fatalf("StaticFile failed to get key: %v", err) + } + log.Println("Got mesh key from static file") + meshKey = b.GetString() + } + if meshKey == "" { + log.Printf("No mesh key found, mesh key is empty") + return key.DERPMesh{}, nil + } + + return key.ParseDERPMesh(meshKey) +} + type overallStatus struct { good, bad []string } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 1ea4b3d88..b28460352 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -241,7 +241,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ diff --git a/derp/derp_client.go b/derp/derp_client.go index a9b92299c..69f35db1e 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -165,7 +165,7 @@ type clientInfo struct { // trusted clients. It's required to subscribe to the // connection list & forward packets. It's empty for regular // users. - MeshKey string `json:"meshKey,omitempty"` + MeshKey key.DERPMesh `json:"meshKey,omitempty,omitzero"` // Version is the DERP protocol version that the client was built with. // See the ProtocolVersion const. @@ -179,10 +179,21 @@ type clientInfo struct { IsProber bool `json:",omitempty"` } +// Equal reports if two clientInfo values are equal. +func (c *clientInfo) Equal(other *clientInfo) bool { + if c == nil || other == nil { + return c == other + } + if c.Version != other.Version || c.CanAckPings != other.CanAckPings || c.IsProber != other.IsProber { + return false + } + return c.MeshKey.Equal(other.MeshKey) +} + func (c *Client) sendClientKey() error { msg, err := json.Marshal(clientInfo{ Version: ProtocolVersion, - MeshKey: c.meshKey.String(), + MeshKey: c.meshKey, CanAckPings: c.canAckPings, IsProber: c.isProber, }) diff --git a/derp/derp_server.go b/derp/derp_server.go index 6f86c3ea4..c6a749485 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -1364,14 +1364,11 @@ func (s *Server) isMeshPeer(info *clientInfo) bool { // Since mesh keys are a fixed length, we don’t need to be concerned // about timing attacks on client mesh keys that are the wrong length. // See https://github.com/tailscale/corp/issues/28720 - if info == nil || info.MeshKey == "" { + if info == nil || info.MeshKey.IsZero() { return false } - k, err := key.ParseDERPMesh(info.MeshKey) - if err != nil { - return false - } - return s.meshKey.Equal(k) + + return s.meshKey.Equal(info.MeshKey) } // verifyClient checks whether the client is allowed to connect to the derper, diff --git a/derp/derp_test.go b/derp/derp_test.go index 0093ee2b1..9d07e159b 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -20,6 +20,7 @@ import ( "os" "reflect" "strconv" + "strings" "sync" "testing" "time" @@ -33,21 +34,53 @@ import ( "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/must" ) func TestClientInfoUnmarshal(t *testing.T) { - for i, in := range []string{ - `{"Version":5,"MeshKey":"abc"}`, - `{"version":5,"meshKey":"abc"}`, + for i, in := range map[string]struct { + json string + want *clientInfo + wantErr string + }{ + "empty": { + json: `{}`, + want: &clientInfo{}, + }, + "valid": { + json: `{"Version":5,"MeshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, + want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + }, + "validLowerMeshKey": { + json: `{"version":5,"meshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, + want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + }, + "invalidMeshKeyToShort": { + json: `{"version":5,"meshKey":"abcdefg"}`, + wantErr: "invalid mesh key", + }, + "invalidMeshKeyToLong": { + json: `{"version":5,"meshKey":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}`, + wantErr: "invalid mesh key", + }, } { - var got clientInfo - if err := json.Unmarshal([]byte(in), &got); err != nil { - t.Fatalf("[%d]: %v", i, err) - } - want := clientInfo{Version: 5, MeshKey: "abc"} - if got != want { - t.Errorf("[%d]: got %+v; want %+v", i, got, want) - } + t.Run(i, func(t *testing.T) { + t.Parallel() + var got clientInfo + err := json.Unmarshal([]byte(in.json), &got) + if in.wantErr != "" { + if err == nil || !strings.Contains(err.Error(), in.wantErr) { + t.Errorf("Unmarshal(%q) = %v, want error containing %q", in.json, err, in.wantErr) + } + return + } + if err != nil { + t.Fatalf("Unmarshal(%q) = %v, want no error", in.json, err) + } + if !got.Equal(in.want) { + t.Errorf("Unmarshal(%q) = %+v, want %+v", in.json, got, in.want) + } + }) } } @@ -1681,43 +1714,43 @@ func TestIsMeshPeer(t *testing.T) { t.Fatal(err) } for name, tt := range map[string]struct { - info *clientInfo want bool + meshKey string wantAllocs float64 }{ "nil": { - info: nil, - want: false, - wantAllocs: 0, - }, - "empty": { - info: &clientInfo{MeshKey: ""}, want: false, wantAllocs: 0, }, - "invalid": { - info: &clientInfo{MeshKey: "invalid"}, - want: false, - wantAllocs: 2, // error message - }, "mismatch": { - info: &clientInfo{MeshKey: "0badf00d00000000000000000000000000000000000000000000000000000000"}, + meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", want: false, wantAllocs: 1, }, "match": { - info: &clientInfo{MeshKey: testMeshKey}, + meshKey: testMeshKey, want: true, - wantAllocs: 1, + wantAllocs: 0, }, } { t.Run(name, func(t *testing.T) { var got bool + var mKey key.DERPMesh + if tt.meshKey != "" { + mKey, err = key.ParseDERPMesh(tt.meshKey) + if err != nil { + t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) + } + } + + info := clientInfo{ + MeshKey: mKey, + } allocs := testing.AllocsPerRun(1, func() { - got = s.isMeshPeer(tt.info) + got = s.isMeshPeer(&info) }) if got != tt.want { - t.Fatalf("got %t, want %t: info = %#v", got, tt.want, tt.info) + t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) } if allocs != tt.wantAllocs && tt.want { diff --git a/prober/derp.go b/prober/derp.go index 98e61ff54..e21c8ce76 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -47,6 +47,7 @@ import ( type derpProber struct { p *Prober derpMapURL string // or "local" + meshKey key.DERPMesh udpInterval time.Duration meshInterval time.Duration tlsInterval time.Duration @@ -71,7 +72,7 @@ type derpProber struct { udpProbeFn func(string, int) ProbeClass meshProbeFn func(string, string) ProbeClass bwProbeFn func(string, string, int64) ProbeClass - qdProbeFn func(string, string, int, time.Duration) ProbeClass + qdProbeFn func(string, string, int, time.Duration, key.DERPMesh) ProbeClass sync.Mutex lastDERPMap *tailcfg.DERPMap @@ -143,6 +144,12 @@ func WithRegionCodeOrID(regionCode string) DERPOpt { } } +func WithMeshKey(meshKey key.DERPMesh) DERPOpt { + return func(d *derpProber) { + d.meshKey = meshKey + } +} + // DERP creates a new derpProber. // // If derpMapURL is "local", the DERPMap is fetched via @@ -250,7 +257,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { wantProbes[n] = true if d.probes[n] == nil { log.Printf("adding DERP queuing delay probe for %s->%s (%s)", server.Name, to.Name, region.RegionName) - d.probes[n] = d.p.Run(n, -10*time.Second, labels, d.qdProbeFn(server.Name, to.Name, d.qdPacketsPerSecond, d.qdPacketTimeout)) + d.probes[n] = d.p.Run(n, -10*time.Second, labels, d.qdProbeFn(server.Name, to.Name, d.qdPacketsPerSecond, d.qdPacketTimeout, d.meshKey)) } } } @@ -284,7 +291,7 @@ func (d *derpProber) probeMesh(from, to string) ProbeClass { } dm := d.lastDERPMap - return derpProbeNodePair(ctx, dm, fromN, toN) + return derpProbeNodePair(ctx, dm, fromN, toN, d.meshKey) }, Class: "derp_mesh", Labels: Labels{"derp_path": derpPath}, @@ -308,7 +315,7 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { if err != nil { return err } - return derpProbeBandwidth(ctx, d.lastDERPMap, fromN, toN, size, &transferTimeSeconds, &totalBytesTransferred, d.bwTUNIPv4Prefix) + return derpProbeBandwidth(ctx, d.lastDERPMap, fromN, toN, size, &transferTimeSeconds, &totalBytesTransferred, d.bwTUNIPv4Prefix, d.meshKey) }, Class: "derp_bw", Labels: Labels{ @@ -336,7 +343,7 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { // to the queuing delay measurement and are recorded as dropped. 'from' and 'to' are // expected to be names (DERPNode.Name) of two DERP servers in the same region, // and may refer to the same server. -func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, packetTimeout time.Duration) ProbeClass { +func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, packetTimeout time.Duration, meshKey key.DERPMesh) ProbeClass { derpPath := "mesh" if from == to { derpPath = "single" @@ -349,7 +356,7 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa if err != nil { return err } - return derpProbeQueuingDelay(ctx, d.lastDERPMap, fromN, toN, packetsPerSecond, packetTimeout, &packetsDropped, qdh) + return derpProbeQueuingDelay(ctx, d.lastDERPMap, fromN, toN, packetsPerSecond, packetTimeout, &packetsDropped, qdh, meshKey) }, Class: "derp_qd", Labels: Labels{"derp_path": derpPath}, @@ -368,15 +375,15 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa // derpProbeQueuingDelay continuously sends data between two local DERP clients // connected to two DERP servers in order to measure queuing delays. From and to // can be the same server. -func derpProbeQueuingDelay(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, packetsPerSecond int, packetTimeout time.Duration, packetsDropped *expvar.Float, qdh *histogram) (err error) { +func derpProbeQueuingDelay(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, packetsPerSecond int, packetTimeout time.Duration, packetsDropped *expvar.Float, qdh *histogram, meshKey key.DERPMesh) (err error) { // This probe uses clients with isProber=false to avoid spamming the derper // logs with every packet sent by the queuing delay probe. - fromc, err := newConn(ctx, dm, from, false) + fromc, err := newConn(ctx, dm, from, false, meshKey) if err != nil { return err } defer fromc.Close() - toc, err := newConn(ctx, dm, to, false) + toc, err := newConn(ctx, dm, to, false, meshKey) if err != nil { return err } @@ -674,15 +681,15 @@ func derpProbeUDP(ctx context.Context, ipStr string, port int) error { // DERP clients connected to two DERP servers.If tunIPv4Address is specified, // probes will use a TCP connection over a TUN device at this address in order // to exercise TCP-in-TCP in similar fashion to TCP over Tailscale via DERP. -func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, size int64, transferTimeSeconds, totalBytesTransferred *expvar.Float, tunIPv4Prefix *netip.Prefix) (err error) { +func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, size int64, transferTimeSeconds, totalBytesTransferred *expvar.Float, tunIPv4Prefix *netip.Prefix, meshKey key.DERPMesh) (err error) { // This probe uses clients with isProber=false to avoid spamming the derper logs with every packet // sent by the bandwidth probe. - fromc, err := newConn(ctx, dm, from, false) + fromc, err := newConn(ctx, dm, from, false, meshKey) if err != nil { return err } defer fromc.Close() - toc, err := newConn(ctx, dm, to, false) + toc, err := newConn(ctx, dm, to, false, meshKey) if err != nil { return err } @@ -712,13 +719,13 @@ func derpProbeBandwidth(ctx context.Context, dm *tailcfg.DERPMap, from, to *tail // derpProbeNodePair sends a small packet between two local DERP clients // connected to two DERP servers. -func derpProbeNodePair(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode) (err error) { - fromc, err := newConn(ctx, dm, from, true) +func derpProbeNodePair(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode, meshKey key.DERPMesh) (err error) { + fromc, err := newConn(ctx, dm, from, true, meshKey) if err != nil { return err } defer fromc.Close() - toc, err := newConn(ctx, dm, to, true) + toc, err := newConn(ctx, dm, to, true, meshKey) if err != nil { return err } @@ -1116,7 +1123,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT return nil } -func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool) (*derphttp.Client, error) { +func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) { // To avoid spamming the log with regular connection messages. l := logger.Filtered(log.Printf, func(s string) bool { return !strings.Contains(s, "derphttp.Client.Connect: connecting to") @@ -1132,6 +1139,7 @@ func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isPr } }) dc.IsProber = isProber + dc.MeshKey = meshKey err := dc.Connect(ctx) if err != nil { return nil, err @@ -1165,7 +1173,7 @@ func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isPr case derp.ServerInfoMessage: errc <- nil default: - errc <- fmt.Errorf("unexpected first message type %T", errc) + errc <- fmt.Errorf("unexpected first message type %T", m) } }() select { diff --git a/types/key/derp.go b/types/key/derp.go index 1fe690189..1466b85bc 100644 --- a/types/key/derp.go +++ b/types/key/derp.go @@ -6,6 +6,7 @@ package key import ( "crypto/subtle" "encoding/hex" + "encoding/json" "errors" "fmt" "strings" @@ -23,6 +24,27 @@ type DERPMesh struct { k [32]byte // 64-digit hexadecimal numbers fit in 32 bytes } +// MarshalJSON implements the [encoding/json.Marshaler] interface. +func (k DERPMesh) MarshalJSON() ([]byte, error) { + return json.Marshal(k.String()) +} + +// UnmarshalJSON implements the [encoding/json.Unmarshaler] interface. +func (k *DERPMesh) UnmarshalJSON(data []byte) error { + var s string + json.Unmarshal(data, &s) + + if hex.DecodedLen(len(s)) != len(k.k) { + return fmt.Errorf("types/key/derp: cannot unmarshal, incorrect size mesh key len: %d, must be %d, %w", hex.DecodedLen(len(s)), len(k.k), ErrInvalidMeshKey) + } + _, err := hex.Decode(k.k[:], []byte(s)) + if err != nil { + return fmt.Errorf("types/key/derp: cannot unmarshal, invalid mesh key: %w", err) + } + + return nil +} + // DERPMeshFromRaw32 parses a 32-byte raw value as a DERP mesh key. func DERPMeshFromRaw32(raw mem.RO) DERPMesh { if raw.Len() != 32 { From 811426001907a93209ac811d79f09d5c8bc61988 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 10 Jun 2025 14:39:27 -0700 Subject: [PATCH 0932/1708] go.toolchain.rev: bump to go 1.24.4 (#16230) Updates #cleanup Signed-off-by: Patrick O'Doherty --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index a5d73929c..33aa56423 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -98e8c99c256a5aeaa13725d2e43fdd7f465ba200 +1cd3bf1a6eaf559aa8c00e749289559c884cef09 From 6a93b17c8cafc1d8e1c52e133511e52ed9086355 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 10 Jun 2025 17:31:14 -0700 Subject: [PATCH 0933/1708] types/netmap,wgengine/magicsock: propagate CapVer to magicsock.endpoint (#16244) This enables us to mark nodes as relay capable or not. We don't actually do that yet, as we haven't established a relay CapVer. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- types/netmap/nodemut.go | 13 +++++++++++++ types/netmap/nodemut_test.go | 9 +++++++++ wgengine/magicsock/endpoint.go | 2 ++ wgengine/magicsock/magicsock.go | 9 +++++++++ 4 files changed, 33 insertions(+) diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index f4de1bf0b..ab30ef1e6 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -69,6 +69,17 @@ func (m NodeMutationLastSeen) Apply(n *tailcfg.Node) { n.LastSeen = ptr.To(m.LastSeen) } +// NodeMutationCap is a NodeMutation that says a node's +// [tailcfg.CapabilityVersion] value has changed. +type NodeMutationCap struct { + mutatingNodeID + Cap tailcfg.CapabilityVersion +} + +func (m NodeMutationCap) Apply(n *tailcfg.Node) { + n.Cap = m.Cap +} + var peerChangeFields = sync.OnceValue(func() []reflect.StructField { var fields []reflect.StructField rt := reflect.TypeFor[tailcfg.PeerChange]() @@ -105,6 +116,8 @@ func NodeMutationsFromPatch(p *tailcfg.PeerChange) (_ []NodeMutation, ok bool) { ret = append(ret, NodeMutationOnline{mutatingNodeID(p.NodeID), *p.Online}) case "LastSeen": ret = append(ret, NodeMutationLastSeen{mutatingNodeID(p.NodeID), *p.LastSeen}) + case "Cap": + ret = append(ret, NodeMutationCap{mutatingNodeID(p.NodeID), p.Cap}) } } return ret, true diff --git a/types/netmap/nodemut_test.go b/types/netmap/nodemut_test.go index 374f8623a..0f1cac6b2 100644 --- a/types/netmap/nodemut_test.go +++ b/types/netmap/nodemut_test.go @@ -177,6 +177,14 @@ func TestMutationsFromMapResponse(t *testing.T) { }, want: nil, }, + { + name: "patch-cap", + mr: fromChanges(&tailcfg.PeerChange{ + NodeID: 1, + Cap: 2, + }), + want: muts(NodeMutationCap{1, 2}), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -195,6 +203,7 @@ func TestMutationsFromMapResponse(t *testing.T) { NodeMutationDERPHome{}, NodeMutationOnline{}, NodeMutationLastSeen{}, + NodeMutationCap{}, )); diff != "" { t.Errorf("wrong result (-want +got):\n%s", diff) } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index bf7758fb8..23316dcb4 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1423,6 +1423,8 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p } de.setEndpointsLocked(n.Endpoints()) + + de.relayCapable = capVerIsRelayCapable(n.Cap()) } func (de *endpoint) setEndpointsLocked(eps interface { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 2e2882110..e5cc87dc3 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2507,6 +2507,11 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { }) } +func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { + // TODO(jwhited): implement once capVer is bumped + return false +} + // SetNetworkMap is called when the control client gets a new network // map from the control server. It must always be non-nil. // @@ -3203,6 +3208,10 @@ func (c *Conn) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { ep.mu.Lock() ep.setEndpointsLocked(views.SliceOf(m.Endpoints)) ep.mu.Unlock() + case netmap.NodeMutationCap: + ep.mu.Lock() + ep.relayCapable = capVerIsRelayCapable(m.Cap) + ep.mu.Unlock() } } return true From 3b25e94352b7db6c6028a6b97c425aa01dcf3aa3 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Fri, 6 Jun 2025 09:38:34 -0700 Subject: [PATCH 0934/1708] cmd/natc: allow specifying the tsnet state dir Which can make operating the service more convenient. It makes sense to put the cluster state with this if specified, so rearrange the logic to handle that. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 32 +-------------------------- cmd/natc/natc.go | 35 ++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 33 deletions(-) diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index adf2090d1..3bc21bd03 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -10,8 +10,6 @@ import ( "fmt" "log" "net/netip" - "os" - "path/filepath" "time" "github.com/hashicorp/raft" @@ -155,11 +153,7 @@ func (ipp *ConsensusIPPool) domainLookup(from tailcfg.NodeID, addr netip.Addr) ( func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string, clusterStateDir string) error { cfg := tsconsensus.DefaultConfig() cfg.ServeDebugMonitor = true - var err error - cfg.StateDirPath, err = getStatePath(clusterStateDir) - if err != nil { - return err - } + cfg.StateDirPath = clusterStateDir cns, err := tsconsensus.Start(ctx, ts, ipp, clusterTag, cfg) if err != nil { return err @@ -211,30 +205,6 @@ func (ps *consensusPerPeerState) unusedIPV4(ipset *netipx.IPSet, reuseDeadline t return netip.Addr{}, false, "", errors.New("ip pool exhausted") } -func getStatePath(pathFromFlag string) (string, error) { - var dirPath string - if pathFromFlag != "" { - dirPath = pathFromFlag - } else { - confDir, err := os.UserConfigDir() - if err != nil { - return "", err - } - dirPath = filepath.Join(confDir, "nat-connector-cluster-state") - } - - if err := os.MkdirAll(dirPath, 0700); err != nil { - return "", err - } - if fi, err := os.Stat(dirPath); err != nil { - return "", err - } else if !fi.IsDir() { - return "", fmt.Errorf("%v is not a directory", dirPath) - } - - return dirPath, nil -} - // isCloseToExpiry returns true if the lastUsed and now times are more than // half the lifetime apart func isCloseToExpiry(lastUsed, now time.Time, lifetime time.Duration) bool { diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 719d5d20d..247bb2101 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -18,6 +18,7 @@ import ( "net/http" "net/netip" "os" + "path/filepath" "strings" "time" @@ -59,7 +60,7 @@ func main() { wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") - clusterStateDir = fs.String("cluster-state-dir", "", "path to directory in which to store raft state") + stateDir = fs.String("state-dir", "", "path to directory in which to store app state") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -96,6 +97,7 @@ func main() { } ts := &tsnet.Server{ Hostname: *hostname, + Dir: *stateDir, } ts.ControlURL = *server if *wgPort != 0 { @@ -156,7 +158,11 @@ func main() { var ipp ippool.IPPool if *clusterTag != "" { cipp := ippool.NewConsensusIPPool(addrPool) - err = cipp.StartConsensus(ctx, ts, *clusterTag, *clusterStateDir) + clusterStateDir, err := getClusterStatePath(*stateDir) + if err != nil { + log.Fatalf("Creating cluster state dir failed: %v", err) + } + err = cipp.StartConsensus(ctx, ts, *clusterTag, clusterStateDir) if err != nil { log.Fatalf("StartConsensus: %v", err) } @@ -570,3 +576,28 @@ func proxyTCPConn(c net.Conn, dest string, ctor *connector) { p.Start() } + +func getClusterStatePath(stateDirFlag string) (string, error) { + var dirPath string + if stateDirFlag != "" { + dirPath = stateDirFlag + } else { + confDir, err := os.UserConfigDir() + if err != nil { + return "", err + } + dirPath = filepath.Join(confDir, "nat-connector-state") + } + dirPath = filepath.Join(dirPath, "cluster") + + if err := os.MkdirAll(dirPath, 0700); err != nil { + return "", err + } + if fi, err := os.Stat(dirPath); err != nil { + return "", err + } else if !fi.IsDir() { + return "", fmt.Errorf("%v is not a directory", dirPath) + } + + return dirPath, nil +} From 6010812f0c033caed665ef66daca7d103d8399af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 11 Jun 2025 14:22:30 -0400 Subject: [PATCH 0935/1708] ipn/localapi,client/local: add debug watcher for bus events (#16239) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates: #15160 Signed-off-by: Claus Lensbøl --- client/local/local.go | 20 +++++ cmd/tailscale/cli/debug.go | 24 ++++++ cmd/tailscaled/tailscaled.go | 1 + ipn/localapi/localapi.go | 137 +++++++++++++++++++++++++---------- util/eventbus/debug.go | 9 +++ 5 files changed, 154 insertions(+), 37 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 7a3a4b703..bc643ad79 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -414,6 +414,26 @@ func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { return res.Body, nil } +// StreamBusEvents returns a stream of the Tailscale bus events as they arrive. +// Close the context to stop the stream. +// Expected response from the server is newline-delimited JSON. +// The caller must close the reader when it is finished reading. +func (lc *Client) StreamBusEvents(ctx context.Context) (io.ReadCloser, error) { + req, err := http.NewRequestWithContext(ctx, "GET", + "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-bus-events", nil) + if err != nil { + return nil, err + } + res, err := lc.doLocalRequestNiceError(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusOK { + return nil, errors.New(res.Status) + } + return res.Body, nil +} + // Pprof returns a pprof profile of the Tailscale daemon. func (lc *Client) Pprof(ctx context.Context, pprofType string, sec int) ([]byte, error) { var secArg string diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 213a0166e..025382ca9 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -102,6 +102,12 @@ func debugCmd() *ffcli.Command { return fs })(), }, + { + Name: "daemon-bus-events", + ShortUsage: "tailscale debug daemon-bus-events", + Exec: runDaemonBusEvents, + ShortHelp: "Watch events on the tailscaled bus", + }, { Name: "metrics", ShortUsage: "tailscale debug metrics", @@ -784,6 +790,24 @@ func runDaemonLogs(ctx context.Context, args []string) error { } } +func runDaemonBusEvents(ctx context.Context, args []string) error { + logs, err := localClient.StreamBusEvents(ctx) + if err != nil { + return err + } + defer logs.Close() + d := json.NewDecoder(bufio.NewReader(logs)) + for { + var line eventbus.DebugEvent + err := d.Decode(&line) + if err != nil { + return err + } + fmt.Printf("[%d][%q][from: %q][to: %q] %s\n", line.Count, line.Type, + line.From, line.To, line.Event) + } +} + var metricsArgs struct { watch bool } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 87750bc5d..61b811c12 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -719,6 +719,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo Dialer: sys.Dialer.Get(), SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), + EventBus: sys.Bus.Get(), DriveForLocal: driveimpl.NewFileSystemForLocal(logf), } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 78f95b2b1..6344da42d 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -20,6 +20,7 @@ import ( "net/url" "os" "path" + "reflect" "runtime" "slices" "strconv" @@ -91,6 +92,7 @@ var handler = map[string]LocalAPIHandler{ "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, "component-debug-logging": (*Handler).serveComponentDebugLogging, "debug": (*Handler).serveDebug, + "debug-bus-events": (*Handler).serveDebugBusEvents, "debug-derp-region": (*Handler).serveDebugDERPRegion, "debug-dial-types": (*Handler).serveDebugDialTypes, "debug-log": (*Handler).serveDebugLog, @@ -332,7 +334,7 @@ func (h *Handler) serveIDToken(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - httpReq, err := http.NewRequest("POST", "https://unused/machine/id-token", bytes.NewReader(b)) + httpReq, err := http.NewRequest(httpm.POST, "https://unused/machine/id-token", bytes.NewReader(b)) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -355,7 +357,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { http.Error(w, "bugreport access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) return } @@ -482,7 +484,7 @@ func (h *Handler) serveSetDeviceAttrs(w http.ResponseWriter, r *http.Request) { http.Error(w, "set-device-attrs access denied", http.StatusForbidden) return } - if r.Method != "PATCH" { + if r.Method != httpm.PATCH { http.Error(w, "only PATCH allowed", http.StatusMethodNotAllowed) return } @@ -587,7 +589,7 @@ func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) { http.Error(w, "logtap access denied", http.StatusForbidden) return } - if r.Method != "GET" { + if r.Method != httpm.GET { http.Error(w, "GET required", http.StatusMethodNotAllowed) return } @@ -639,7 +641,7 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { http.Error(w, "debug access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "POST required", http.StatusMethodNotAllowed) return } @@ -712,7 +714,7 @@ func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) http.Error(w, "debug access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "POST required", http.StatusMethodNotAllowed) return } @@ -917,6 +919,68 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { } } +// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams +// events to the client. +func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { + // Require write access (~root) as the logs could contain something + // sensitive. + if !h.PermitWrite { + http.Error(w, "event bus access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusNoContent) + return + } + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming unsupported", http.StatusInternalServerError) + return + } + + io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") + f.Flush() + + mon := bus.Debugger().WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) + return + case <-mon.Done(): + return + case event := <-mon.Events(): + data := eventbus.DebugEvent{ + Count: i, + Type: reflect.TypeOf(event.Event).String(), + Event: event.Event, + From: event.From.Name(), + } + for _, client := range event.To { + data.To = append(data.To, client.Name()) + } + + if msg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, `{"Event":"[ERROR] failed to marshal JSON for %T"}\n`, event.Event) + } else { + w.Write(msg) + } + f.Flush() + i++ + } + } +} + func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { http.Error(w, "debug access denied", http.StatusForbidden) @@ -1078,7 +1142,7 @@ func (h *Handler) serveResetAuth(w http.ResponseWriter, r *http.Request) { func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { switch r.Method { - case "GET": + case httpm.GET: if !h.PermitRead { http.Error(w, "serve config denied", http.StatusForbidden) return @@ -1094,7 +1158,7 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { w.Header().Set("Etag", etag) w.Header().Set("Content-Type", "application/json") w.Write(bts) - case "POST": + case httpm.POST: if !h.PermitWrite { http.Error(w, "serve config denied", http.StatusForbidden) return @@ -1157,7 +1221,6 @@ func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeC // should never happen. panic("unreachable") } - } func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) { @@ -1291,7 +1354,7 @@ func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.R // (in ipnserver.Server) provides the blocking until the connection is no longer // in use. func InUseOtherUserIPNStream(w http.ResponseWriter, r *http.Request, err error) (handled bool) { - if r.Method != "GET" || r.URL.Path != "/localapi/v0/watch-ipn-bus" { + if r.Method != httpm.GET || r.URL.Path != "/localapi/v0/watch-ipn-bus" { return false } js, err := json.Marshal(&ipn.Notify{ @@ -1356,7 +1419,7 @@ func (h *Handler) serveLoginInteractive(w http.ResponseWriter, r *http.Request) http.Error(w, "login access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "want POST", http.StatusBadRequest) return } @@ -1370,7 +1433,7 @@ func (h *Handler) serveStart(w http.ResponseWriter, r *http.Request) { http.Error(w, "access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "want POST", http.StatusBadRequest) return } @@ -1393,7 +1456,7 @@ func (h *Handler) serveLogout(w http.ResponseWriter, r *http.Request) { http.Error(w, "logout access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "want POST", http.StatusBadRequest) return } @@ -1412,7 +1475,7 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { } var prefs ipn.PrefsView switch r.Method { - case "PATCH": + case httpm.PATCH: if !h.PermitWrite { http.Error(w, "prefs write access denied", http.StatusForbidden) return @@ -1436,7 +1499,7 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) return } - case "GET", "HEAD": + case httpm.GET, httpm.HEAD: prefs = h.b.Prefs() default: http.Error(w, "unsupported method", http.StatusMethodNotAllowed) @@ -1476,9 +1539,9 @@ func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { var effectivePolicy *setting.Snapshot switch r.Method { - case "GET": + case httpm.GET: effectivePolicy = policy.Get() - case "POST": + case httpm.POST: effectivePolicy, err = policy.Reload() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -1504,7 +1567,7 @@ func (h *Handler) serveCheckPrefs(w http.ResponseWriter, r *http.Request) { http.Error(w, "checkprefs access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return } @@ -1542,7 +1605,7 @@ func (h *Handler) serveSetDNS(w http.ResponseWriter, r *http.Request) { http.Error(w, "access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "want POST", http.StatusBadRequest) return } @@ -1557,7 +1620,7 @@ func (h *Handler) serveSetDNS(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveDERPMap(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != httpm.GET { http.Error(w, "want GET", http.StatusBadRequest) return } @@ -1574,7 +1637,7 @@ func (h *Handler) serveSetExpirySooner(w http.ResponseWriter, r *http.Request) { http.Error(w, "access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "POST required", http.StatusMethodNotAllowed) return } @@ -1602,7 +1665,7 @@ func (h *Handler) serveSetExpirySooner(w http.ResponseWriter, r *http.Request) { func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "want POST", http.StatusBadRequest) return } @@ -1648,7 +1711,7 @@ func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveDial(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "POST required", http.StatusMethodNotAllowed) return } @@ -1711,7 +1774,7 @@ func (h *Handler) serveSetPushDeviceToken(w http.ResponseWriter, r *http.Request http.Error(w, "set push device token access denied", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return } @@ -1729,7 +1792,7 @@ func (h *Handler) serveHandlePushMessage(w http.ResponseWriter, r *http.Request) http.Error(w, "handle push message not allowed", http.StatusForbidden) return } - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return } @@ -1746,7 +1809,7 @@ func (h *Handler) serveHandlePushMessage(w http.ResponseWriter, r *http.Request) } func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return } @@ -2337,7 +2400,7 @@ func (h *Handler) serveQueryFeature(w http.ResponseWriter, r *http.Request) { } req, err := http.NewRequestWithContext(r.Context(), - "POST", "https://unused/machine/feature/query", bytes.NewReader(b)) + httpm.POST, "https://unused/machine/feature/query", bytes.NewReader(b)) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -2416,7 +2479,7 @@ func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { // Effectively, it tells us whether serveUpdateInstall will be able to install // an update for us. func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } @@ -2445,7 +2508,7 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { // serveUpdateProgress after pinging this endpoint to check how the update is // going. func (h *Handler) serveUpdateInstall(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { + if r.Method != httpm.POST { http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) return } @@ -2460,7 +2523,7 @@ func (h *Handler) serveUpdateInstall(w http.ResponseWriter, r *http.Request) { // log messages in order from oldest to newest. If an update is not in progress, // the returned slice will be empty. func (h *Handler) serveUpdateProgress(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } @@ -2516,7 +2579,7 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } @@ -2553,7 +2616,7 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { // serveDriveServerAddr handles updates of the Taildrive file server address. func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { + if r.Method != httpm.PUT { http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) return } @@ -2580,7 +2643,7 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { return } switch r.Method { - case "PUT": + case httpm.PUT: var share drive.Share err := json.NewDecoder(r.Body).Decode(&share) if err != nil { @@ -2616,7 +2679,7 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { return } w.WriteHeader(http.StatusCreated) - case "DELETE": + case httpm.DELETE: b, err := io.ReadAll(r.Body) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) @@ -2632,7 +2695,7 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { return } w.WriteHeader(http.StatusNoContent) - case "POST": + case httpm.POST: var names [2]string err := json.NewDecoder(r.Body).Decode(&names) if err != nil { @@ -2657,7 +2720,7 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { return } w.WriteHeader(http.StatusNoContent) - case "GET": + case httpm.GET: shares := h.b.DriveGetShares() err := json.NewEncoder(w).Encode(shares) if err != nil { @@ -2671,7 +2734,7 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 832d72ac0..b6264f82f 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -186,3 +186,12 @@ type hookFn[T any] struct { ID uint64 Fn func(T) } + +// DebugEvent is a representation of an event used for debug clients. +type DebugEvent struct { + Count int + Type string + From string + To []string + Event any +} From 8baa016a23a3960463b47472d5d1dd8b18f3a6cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 23:17:14 -0600 Subject: [PATCH 0936/1708] .github: Bump github/codeql-action from 3.28.15 to 3.28.19 (#16227) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.15 to 3.28.19. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/45775bd8235c68ba998cffa5171334d58593da47...fca7ace96b7d713c7035871441bd52efbe39e27e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.19 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 311f539e1..124a16561 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 From 75a42977c7312706496bb7ade24a5b65d488b45a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 23:18:14 -0600 Subject: [PATCH 0937/1708] .github: Bump slackapi/slack-github-action from 2.0.0 to 2.1.0 (#15948) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 2.0.0 to 2.1.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/485a9d42d3a73031f12ec201c457e2162c45d02d...b0fa283ad8fea605de13dc3f449259339835fc52) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-version: 2.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/govulncheck.yml | 2 +- .github/workflows/installer.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 10269ff0b..36ed1fe9b 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -24,7 +24,7 @@ jobs: - name: Post to slack if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 + uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 with: method: chat.postMessage token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 7888d9ba5..0ca16ae9f 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -108,7 +108,7 @@ jobs: steps: - name: Notify Slack of failure on scheduled runs if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 + uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2aad005ae..9f89fae01 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -586,7 +586,7 @@ jobs: # By having the job always run, but skipping its only step as needed, we # let the CI output collapse nicely in PRs. if: failure() && github.event_name == 'push' - uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 + uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook From 7c05811af03ec69ce28d1cc92c6412d5b714bdae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 23:18:58 -0600 Subject: [PATCH 0938/1708] .github: Bump actions/setup-go from 5.4.0 to 5.5.0 (#15947) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.4.0 to 5.5.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0aaccfd150d50ccaeb58ebd88d36e91967a5f35b...d35c59abb061a4a6fb18e82ac0862c26744d6ab5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: 5.5.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 124a16561..8bd72d80d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 04a2e042d..60eb6852a 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version-file: go.mod cache: false diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9f89fae01..1776653f4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -151,7 +151,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version-file: go.mod cache: false From 3219de4cb8f136456b211dc8bdf69aa4750a8c34 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 12 Jun 2025 13:47:34 +0100 Subject: [PATCH 0939/1708] cmd/k8s-operator: ensure status update errors are displayed to users (#16251) Updates#cleanup Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress_test.go | 2 +- cmd/k8s-operator/nameserver.go | 4 ++-- cmd/k8s-operator/proxygroup.go | 4 ++-- cmd/k8s-operator/proxygroup_test.go | 37 +++++++++++++++-------------- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index a975fec7a..dbd6961d7 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -427,7 +427,7 @@ func TestIngressLetsEncryptStaging(t *testing.T) { pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest() - testCases := testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther) + testCases := testCasesForLEStagingTests() for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index ef0762a12..20d66f7d0 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -7,6 +7,7 @@ package main import ( "context" + "errors" "fmt" "slices" "strings" @@ -14,7 +15,6 @@ import ( _ "embed" - "github.com/pkg/errors" "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -106,7 +106,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ if !apiequality.Semantic.DeepEqual(oldCnStatus, &dnsCfg.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := a.Client.Status().Update(ctx, dnsCfg); updateErr != nil { - err = errors.Wrap(err, updateErr.Error()) + err = errors.Join(err, updateErr) } } return res, err diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index f263829d7..e7c0590b0 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -9,13 +9,13 @@ import ( "context" "crypto/sha256" "encoding/json" + "errors" "fmt" "net/http" "slices" "strings" "sync" - "github.com/pkg/errors" "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -122,7 +122,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { - err = errors.Wrap(err, updateErr.Error()) + err = errors.Join(err, updateErr) } } return reconcile.Result{}, err diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 159329eda..f3f87aaac 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -257,10 +257,12 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyClassSpec{}, } + // Passing ProxyGroup as status subresource is a way to get around fake + // client's limitations for updating resource statuses. fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). WithObjects(pc). - WithStatusSubresource(pc). + WithStatusSubresource(pc, &tsapi.ProxyGroup{}). Build() mustUpdateStatus(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { p.Status.Conditions = []metav1.Condition{{ @@ -450,6 +452,7 @@ func TestProxyGroupTypes(t *testing.T) { func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&tsapi.ProxyGroup{}). Build() reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, @@ -693,7 +696,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { pgType tsapi.ProxyGroupType } pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest() - sharedTestCases := testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther) + sharedTestCases := testCasesForLEStagingTests() var tests []proxyGroupLETestCase for _, tt := range sharedTestCases { tests = append(tests, proxyGroupLETestCase{ @@ -715,9 +718,20 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { builder := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme) + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tt.pgType, + Replicas: ptr.To[int32](1), + ProxyClass: tt.proxyClassPerResource, + }, + } + // Pre-populate the fake client with ProxyClasses. - builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse, pcOther). - WithStatusSubresource(pcLEStaging, pcLEStagingFalse, pcOther) + builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse, pcOther, pg). + WithStatusSubresource(pcLEStaging, pcLEStagingFalse, pcOther, pg) fc := builder.Build() @@ -730,19 +744,6 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { setProxyClassReady(t, fc, cl, name) } - // Create ProxyGroup - pg := &tsapi.ProxyGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: tsapi.ProxyGroupSpec{ - Type: tt.pgType, - Replicas: ptr.To[int32](1), - ProxyClass: tt.proxyClassPerResource, - }, - } - mustCreate(t, fc, pg) - reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, proxyImage: testProxyImage, @@ -783,7 +784,7 @@ type leStagingTestCase struct { // Shared test cases for LE staging endpoint configuration for ProxyGroup and // non-HA Ingress. -func testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther *tsapi.ProxyClass) []leStagingTestCase { +func testCasesForLEStagingTests() []leStagingTestCase { return []leStagingTestCase{ { name: "with_staging_proxyclass", From 3b5ce9d1bcc30a6d400e5f57e79d710b05bbceb9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 11 Jun 2025 19:15:20 -0700 Subject: [PATCH 0940/1708] tsweb/varz: add binary name to version metric Fixes tailscale/corp#29530 Change-Id: Iae04456d7ac5527897f060370e90c9517c00a818 Signed-off-by: Brad Fitzpatrick --- tsweb/varz/varz.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index c6d66fbe2..aca2878b7 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -11,6 +11,8 @@ import ( "fmt" "io" "net/http" + "os" + "path/filepath" "reflect" "runtime" "sort" @@ -189,7 +191,11 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { return } if vs, ok := v.(string); ok && strings.HasSuffix(name, "version") { - fmt.Fprintf(w, "%s{version=%q} 1\n", name, vs) + if name == "version" { + fmt.Fprintf(w, "%s{version=%q,binary=%q} 1\n", name, vs, binaryName()) + } else { + fmt.Fprintf(w, "%s{version=%q} 1\n", name, vs) + } return } switch v := v.(type) { @@ -308,6 +314,18 @@ func ExpvarDoHandler(expvarDoFunc func(f func(expvar.KeyValue))) func(http.Respo } } +var binaryName = sync.OnceValue(func() string { + exe, err := os.Executable() + if err != nil { + return "" + } + exe2, err := filepath.EvalSymlinks(exe) + if err != nil { + return filepath.Base(exe) + } + return filepath.Base(exe2) +}) + // PrometheusMetricsReflectRooter is an optional interface that expvar.Var implementations // can implement to indicate that they should be walked recursively with reflect to find // sets of fields to export. From 3ed76ceed34e2fbff6eeee59facdcd72a8b5b795 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 12 Jun 2025 09:57:45 -0700 Subject: [PATCH 0941/1708] feature/relayserver,net/{netcheck,udprelay}: implement addr discovery (#16253) The relay server now fetches IPs from local interfaces and external perspective IP:port's via netcheck (STUN). Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 3 +- net/netcheck/netcheck.go | 19 ++- net/udprelay/server.go | 189 +++++++++++++++++++++++------ net/udprelay/server_test.go | 2 +- 4 files changed, 170 insertions(+), 43 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 96d21138e..a38587aa3 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -10,7 +10,6 @@ import ( "errors" "io" "net/http" - "net/netip" "sync" "tailscale.com/envknob" @@ -136,7 +135,7 @@ func (e *extension) relayServerOrInit() (relayServer, error) { return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") } var err error - e.server, _, err = udprelay.NewServer(*e.port, []netip.Addr{netip.MustParseAddr("127.0.0.1")}) + e.server, _, err = udprelay.NewServer(e.logf, *e.port, nil) if err != nil { return nil, err } diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index c9f03966b..54627f713 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -753,6 +753,7 @@ func newReport() *Report { // GetReportOpts contains options that can be passed to GetReport. Unless // specified, all fields are optional and can be left as their zero value. +// At most one of OnlyTCP443 or OnlySTUN may be set. type GetReportOpts struct { // GetLastDERPActivity is a callback that, if provided, should return // the absolute time that the calling code last communicated with a @@ -765,6 +766,8 @@ type GetReportOpts struct { // OnlyTCP443 constrains netcheck reporting to measurements over TCP port // 443. OnlyTCP443 bool + // OnlySTUN constrains netcheck reporting to STUN measurements over UDP. + OnlySTUN bool } // getLastDERPActivity calls o.GetLastDERPActivity if both o and @@ -790,6 +793,13 @@ func (c *Client) SetForcePreferredDERP(region int) { // // It may not be called concurrently with itself. func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetReportOpts) (_ *Report, reterr error) { + onlySTUN := false + if opts != nil && opts.OnlySTUN { + if opts.OnlyTCP443 { + return nil, errors.New("netcheck: only one of OnlySTUN or OnlyTCP443 may be set in opts") + } + onlySTUN = true + } defer func() { if reterr != nil { metricNumGetReportError.Add(1) @@ -865,6 +875,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe }() if runtime.GOOS == "js" || runtime.GOOS == "tamago" || (runtime.GOOS == "plan9" && hostinfo.IsInVM86()) { + if onlySTUN { + return nil, errors.New("platform is restricted to HTTP, but OnlySTUN is set in opts") + } if err := c.runHTTPOnlyChecks(ctx, last, rs, dm); err != nil { return nil, err } @@ -896,7 +909,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // it's unnecessary. captivePortalDone := syncs.ClosedChan() captivePortalStop := func() {} - if !rs.incremental { + if !rs.incremental && !onlySTUN { // NOTE(andrew): we can't simply add this goroutine to the // `NewWaitGroupChan` below, since we don't wait for that // waitgroup to finish when exiting this function and thus get @@ -970,9 +983,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe rs.stopTimers() // Try HTTPS and ICMP latency check if all STUN probes failed due to - // UDP presumably being blocked. + // UDP presumably being blocked, and we are not constrained to only STUN. // TODO: this should be moved into the probePlan, using probeProto probeHTTPS. - if !rs.anyUDP() && ctx.Err() == nil { + if !rs.anyUDP() && ctx.Err() == nil && !onlySTUN { var wg sync.WaitGroup var need []*tailcfg.DERPRegion for rid, reg := range dm.Regions { diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 7b63ec95e..f7f5868c0 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -8,6 +8,7 @@ package udprelay import ( "bytes" + "context" "crypto/rand" "errors" "fmt" @@ -19,11 +20,18 @@ import ( "time" "go4.org/mem" + "tailscale.com/client/local" "tailscale.com/disco" + "tailscale.com/net/netcheck" + "tailscale.com/net/netmon" "tailscale.com/net/packet" + "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" "tailscale.com/tstime" "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/set" ) const ( @@ -42,25 +50,22 @@ const ( // Server implements an experimental UDP relay server. type Server struct { - // disco keypair used as part of 3-way bind handshake - disco key.DiscoPrivate - discoPublic key.DiscoPublic - + // The following fields are initialized once and never mutated. + logf logger.Logf + disco key.DiscoPrivate + discoPublic key.DiscoPublic bindLifetime time.Duration steadyStateLifetime time.Duration - - // addrPorts contains the ip:port pairs returned as candidate server - // endpoints in response to an allocation request. - addrPorts []netip.AddrPort - - uc *net.UDPConn - - closeOnce sync.Once - wg sync.WaitGroup - closeCh chan struct{} + bus *eventbus.Bus + uc *net.UDPConn + closeOnce sync.Once + wg sync.WaitGroup + closeCh chan struct{} + netChecker *netcheck.Client + + mu sync.Mutex // guards the following fields + addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints closed bool - - mu sync.Mutex // guards the following fields lamportID uint64 vniPool []uint32 // the pool of available VNIs byVNI map[uint32]*serverEndpoint @@ -270,14 +275,13 @@ func (e *serverEndpoint) isBound() bool { // NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet // supported. Port may be 0, and what ultimately gets bound is returned as -// 'boundPort'. Supplied 'addrs' are joined with 'boundPort' and returned as -// [endpoint.ServerEndpoint.AddrPorts] in response to Server.AllocateEndpoint() -// requests. +// 'boundPort'. If len(overrideAddrs) > 0 these will be used in place of dynamic +// discovery, which is useful to override in tests. // // TODO: IPv6 support -// TODO: dynamic addrs:port discovery -func NewServer(port int, addrs []netip.Addr) (s *Server, boundPort int, err error) { +func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, boundPort uint16, err error) { s = &Server{ + logf: logger.WithPrefix(logf, "relayserver"), disco: key.NewDisco(), bindLifetime: defaultBindLifetime, steadyStateLifetime: defaultSteadyStateLifetime, @@ -292,26 +296,120 @@ func NewServer(port int, addrs []netip.Addr) (s *Server, boundPort int, err erro for i := 1; i < 1<<24; i++ { s.vniPool = append(s.vniPool, uint32(i)) } - boundPort, err = s.listenOn(port) + + bus := eventbus.New() + s.bus = bus + netMon, err := netmon.New(s.bus, logf) if err != nil { return nil, 0, err } - addrPorts := make([]netip.AddrPort, 0, len(addrs)) - for _, addr := range addrs { - addrPort, err := netip.ParseAddrPort(net.JoinHostPort(addr.String(), strconv.Itoa(boundPort))) - if err != nil { - return nil, 0, err - } - addrPorts = append(addrPorts, addrPort) + s.netChecker = &netcheck.Client{ + NetMon: netMon, + Logf: logger.WithPrefix(logf, "relayserver: netcheck:"), + SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { + return s.uc.WriteToUDPAddrPort(b, addrPort) + }, + } + + boundPort, err = s.listenOn(port) + if err != nil { + return nil, 0, err } - s.addrPorts = addrPorts - s.wg.Add(2) + + s.wg.Add(1) go s.packetReadLoop() + s.wg.Add(1) go s.endpointGCLoop() + if len(overrideAddrs) > 0 { + var addrPorts set.Set[netip.AddrPort] + addrPorts.Make() + for _, addr := range overrideAddrs { + if addr.IsValid() { + addrPorts.Add(netip.AddrPortFrom(addr, boundPort)) + } + } + s.addrPorts = addrPorts.Slice() + } else { + s.wg.Add(1) + go s.addrDiscoveryLoop() + } return s, boundPort, nil } -func (s *Server) listenOn(port int) (int, error) { +func (s *Server) addrDiscoveryLoop() { + defer s.wg.Done() + + timer := time.NewTimer(0) // fire immediately + defer timer.Stop() + + getAddrPorts := func() ([]netip.AddrPort, error) { + var addrPorts set.Set[netip.AddrPort] + addrPorts.Make() + + // get local addresses + localPort := s.uc.LocalAddr().(*net.UDPAddr).Port + ips, _, err := netmon.LocalAddresses() + if err != nil { + return nil, err + } + for _, ip := range ips { + if ip.IsValid() { + addrPorts.Add(netip.AddrPortFrom(ip, uint16(localPort))) + } + } + + // fetch DERPMap to feed to netcheck + derpMapCtx, derpMapCancel := context.WithTimeout(context.Background(), time.Second) + defer derpMapCancel() + localClient := &local.Client{} + // TODO(jwhited): We are in-process so use eventbus or similar. + // local.Client gets us going. + dm, err := localClient.CurrentDERPMap(derpMapCtx) + if err != nil { + return nil, err + } + + // get addrPorts as visible from DERP + netCheckerCtx, netCheckerCancel := context.WithTimeout(context.Background(), netcheck.ReportTimeout) + defer netCheckerCancel() + rep, err := s.netChecker.GetReport(netCheckerCtx, dm, &netcheck.GetReportOpts{ + OnlySTUN: true, + }) + if err != nil { + return nil, err + } + if rep.GlobalV4.IsValid() { + addrPorts.Add(rep.GlobalV4) + } + if rep.GlobalV6.IsValid() { + addrPorts.Add(rep.GlobalV6) + } + // TODO(jwhited): consider logging if rep.MappingVariesByDestIP as + // that's a hint we are not well-positioned to operate as a UDP relay. + return addrPorts.Slice(), nil + } + + for { + select { + case <-timer.C: + // Mirror magicsock behavior for duration between STUN. We consider + // 30s a min bound for NAT timeout. + timer.Reset(tstime.RandomDurationBetween(20*time.Second, 26*time.Second)) + addrPorts, err := getAddrPorts() + if err != nil { + s.logf("error discovering IP:port candidates: %v", err) + } + s.mu.Lock() + s.addrPorts = addrPorts + s.mu.Unlock() + case <-s.closeCh: + return + } + } + +} + +func (s *Server) listenOn(port int) (uint16, error) { uc, err := net.ListenUDP("udp4", &net.UDPAddr{Port: port}) if err != nil { return 0, err @@ -322,13 +420,13 @@ func (s *Server) listenOn(port int) (int, error) { s.uc.Close() return 0, err } - boundPort, err := strconv.Atoi(boundPortStr) + boundPort, err := strconv.ParseUint(boundPortStr, 10, 16) if err != nil { s.uc.Close() return 0, err } s.uc = uc - return boundPort, nil + return uint16(boundPort), nil } // Close closes the server. @@ -343,6 +441,7 @@ func (s *Server) Close() error { clear(s.byDisco) s.vniPool = nil s.closed = true + s.bus.Close() }) return nil } @@ -378,6 +477,13 @@ func (s *Server) endpointGCLoop() { } func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { + if stun.Is(b) && b[1] == 0x01 { + // A b[1] value of 0x01 (STUN method binding) is sufficiently + // non-overlapping with the Geneve header where the LSB is always 0 + // (part of 6 "reserved" bits). + s.netChecker.ReceiveSTUNPacket(b, from) + return + } gh := packet.GeneveHeader{} err := gh.Decode(b) if err != nil { @@ -426,6 +532,10 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{}, ErrServerClosed } + if len(s.addrPorts) == 0 { + return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") + } + if discoA.Compare(s.discoPublic) == 0 || discoB.Compare(s.discoPublic) == 0 { return endpoint.ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) } @@ -439,8 +549,13 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv // TODO: consider ServerEndpoint.BindLifetime -= time.Now()-e.allocatedAt // to give the client a more accurate picture of the bind window. return endpoint.ServerEndpoint{ - ServerDisco: s.discoPublic, - AddrPorts: s.addrPorts, + ServerDisco: s.discoPublic, + // Returning the "latest" addrPorts for an existing allocation is + // the simple choice. It may not be the best depending on client + // behaviors and endpoint state (bound or not). We might want to + // consider storing them (maybe interning) in the [*serverEndpoint] + // at allocation time. + AddrPorts: slices.Clone(s.addrPorts), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, @@ -469,7 +584,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, - AddrPorts: s.addrPorts, + AddrPorts: slices.Clone(s.addrPorts), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 38c7ae5d9..a4e5ca451 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -156,7 +156,7 @@ func TestServer(t *testing.T) { ipv4LoopbackAddr := netip.MustParseAddr("127.0.0.1") - server, _, err := NewServer(0, []netip.Addr{ipv4LoopbackAddr}) + server, _, err := NewServer(t.Logf, 0, []netip.Addr{ipv4LoopbackAddr}) if err != nil { t.Fatal(err) } From b0f7b23efe1c7d02e8caec2a5ad74ab2d5cb138a Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 11 Jun 2025 15:57:55 -0700 Subject: [PATCH 0942/1708] net/netcheck: preserve live home DERP through packet loss During a short period of packet loss, a TCP connection to the home DERP may be maintained. If no other regions emerge as winners, such as when all regions but one are avoided/disallowed as candidates, ensure that the current home region, if still active, is not dropped as the preferred region until it has failed two keepalives. Relatedly apply avoid and no measure no home to ICMP and HTTP checks as intended. Updates tailscale/corp#12894 Updates tailscale/corp#29491 Signed-off-by: James Tucker --- cmd/tailscale/depaware.txt | 2 +- derp/derp.go | 6 +++++- derp/derp_server.go | 2 +- net/netcheck/netcheck.go | 38 ++++++++++++++++++++++------------- net/netcheck/netcheck_test.go | 34 +++++++++++++++++++++++++++++++ 5 files changed, 65 insertions(+), 17 deletions(-) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8c3b404b1..69d054ea4 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,7 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/net/portmapper - tailscale.com/derp from tailscale.com/derp/derphttp + tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck tailscale.com/disco from tailscale.com/derp diff --git a/derp/derp.go b/derp/derp.go index 65acd4321..24c1ca65c 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -36,9 +36,13 @@ const ( frameHeaderLen = 1 + 4 // frameType byte + 4 byte length keyLen = 32 maxInfoLen = 1 << 20 - keepAlive = 60 * time.Second ) +// KeepAlive is the minimum frequency at which the DERP server sends +// keep alive frames. The server adds some jitter, so this timing is not +// exact, but 2x this value can be considered a missed keep alive. +const KeepAlive = 60 * time.Second + // ProtocolVersion is bumped whenever there's a wire-incompatible change. // - version 1 (zero on wire): consistent box headers, in use by employee dev nodes a bit // - version 2: received packets have src addrs in frameRecvPacket at beginning diff --git a/derp/derp_server.go b/derp/derp_server.go index c6a749485..bd67e7eec 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -1789,7 +1789,7 @@ func (c *sclient) sendLoop(ctx context.Context) error { defer c.onSendLoopDone() jitter := rand.N(5 * time.Second) - keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(keepAlive + jitter) + keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(KeepAlive + jitter) defer keepAliveTick.Stop() var werr error // last write error diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 54627f713..cb622a339 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -23,6 +23,7 @@ import ( "syscall" "time" + "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/envknob" "tailscale.com/hostinfo" @@ -449,7 +450,7 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report, pre // restoration back to the home DERP on the next full netcheck ~5 minutes later // - which is highly disruptive when it causes shifts in geo routed subnet // routers. By always including the home DERP in the incremental netcheck, we - // ensure that the home DERP is always probed, even if it observed a recenet + // ensure that the home DERP is always probed, even if it observed a recent // poor latency sample. This inclusion enables the latency history checks in // home DERP selection to still take effect. // planContainsHome indicates whether the home DERP has been added to the probePlan, @@ -989,7 +990,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe var wg sync.WaitGroup var need []*tailcfg.DERPRegion for rid, reg := range dm.Regions { - if !rs.haveRegionLatency(rid) && regionHasDERPNode(reg) { + if !rs.haveRegionLatency(rid) && regionHasDERPNode(reg) && !reg.Avoid && !reg.NoMeasureNoHome { need = append(need, reg) } } @@ -1371,6 +1372,15 @@ const ( // even without receiving a STUN response. // Note: must remain higher than the derp package frameReceiveRecordRate PreferredDERPFrameTime = 8 * time.Second + // PreferredDERPKeepAliveTimeout is 2x the DERP Keep Alive timeout. If there + // is no latency data to make judgements from, but we have heard from our + // current DERP region inside of 2x the KeepAlive window, don't switch DERP + // regions yet, keep the current region. This prevents region flapping / + // home DERP removal during short periods of packet loss where the DERP TCP + // connection may itself naturally recover. + // TODO(raggi): expose shared time bounds from the DERP package rather than + // duplicating them here. + PreferredDERPKeepAliveTimeout = 2 * derp.KeepAlive ) // addReportHistoryAndSetPreferredDERP adds r to the set of recent Reports @@ -1455,13 +1465,10 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report, // the STUN probe) since we started the netcheck, or in the past 2s, as // another signal for "this region is still working". heardFromOldRegionRecently := false + prevRegionLastHeard := rs.opts.getLastDERPActivity(prevDERP) if changingPreferred { - if lastHeard := rs.opts.getLastDERPActivity(prevDERP); !lastHeard.IsZero() { - now := c.timeNow() - - heardFromOldRegionRecently = lastHeard.After(rs.start) - heardFromOldRegionRecently = heardFromOldRegionRecently || lastHeard.After(now.Add(-PreferredDERPFrameTime)) - } + heardFromOldRegionRecently = prevRegionLastHeard.After(rs.start) + heardFromOldRegionRecently = heardFromOldRegionRecently || prevRegionLastHeard.After(now.Add(-PreferredDERPFrameTime)) } // The old region is accessible if we've heard from it via a non-STUN @@ -1488,17 +1495,20 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report, // If the forced DERP region probed successfully, or has recent traffic, // use it. _, haveLatencySample := r.RegionLatency[c.ForcePreferredDERP] - var recentActivity bool - if lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP); !lastHeard.IsZero() { - now := c.timeNow() - recentActivity = lastHeard.After(rs.start) - recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime)) - } + lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP) + recentActivity := lastHeard.After(rs.start) + recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime)) if haveLatencySample || recentActivity { r.PreferredDERP = c.ForcePreferredDERP } } + // If there was no latency data to make judgements on, but there is an + // active DERP connection that has at least been doing KeepAlive recently, + // keep it, rather than dropping it. + if r.PreferredDERP == 0 && prevRegionLastHeard.After(now.Add(-PreferredDERPKeepAliveTimeout)) { + r.PreferredDERP = prevDERP + } } func updateLatency(m map[int]time.Duration, regionID int, d time.Duration) { diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 3affa614d..6830e7f27 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -18,6 +18,7 @@ import ( "testing" "time" + "tailscale.com/derp" "tailscale.com/net/netmon" "tailscale.com/net/stun/stuntest" "tailscale.com/tailcfg" @@ -419,6 +420,39 @@ func TestAddReportHistoryAndSetPreferredDERP(t *testing.T) { wantPrevLen: 2, wantDERP: 1, }, + { + name: "no_data_keep_home", + steps: []step{ + {0, report("d1", 2, "d2", 3)}, + {30 * time.Second, report()}, + {2 * time.Second, report()}, + {2 * time.Second, report()}, + {2 * time.Second, report()}, + {2 * time.Second, report()}, + }, + opts: &GetReportOpts{ + GetLastDERPActivity: mkLDAFunc(map[int]time.Time{ + 1: startTime, + }), + }, + wantPrevLen: 6, + wantDERP: 1, + }, + { + name: "no_data_home_expires", + steps: []step{ + {0, report("d1", 2, "d2", 3)}, + {30 * time.Second, report()}, + {2 * derp.KeepAlive, report()}, + }, + opts: &GetReportOpts{ + GetLastDERPActivity: mkLDAFunc(map[int]time.Time{ + 1: startTime, + }), + }, + wantPrevLen: 3, + wantDERP: 0, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 9206e766edc0492a3899633c5ea3a9a8ace12fe0 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Tue, 3 Jun 2025 15:24:31 -0700 Subject: [PATCH 0943/1708] net/packet: cleanup IPv4 fragment guards The first packet fragment guard had an additional guard clause that was incorrectly comparing a length in bytes to a length in octets, and was also comparing what should have been an entire IPv4 through transport header length to a subprotocol payload length. The subprotocol header size guards were otherwise protecting against short transport headers, as is the conservative non-first fragment minimum offset size. Add an explicit disallowing of fragmentation for TSMP for the avoidance of doubt. Updates #cleanup Updates #5727 Signed-off-by: James Tucker --- net/packet/header.go | 1 + net/packet/packet.go | 34 +++++++---- net/packet/packet_test.go | 122 ++++++++++++++++++++++++++++++++++++++ net/packet/tsmp.go | 2 + 4 files changed, 149 insertions(+), 10 deletions(-) diff --git a/net/packet/header.go b/net/packet/header.go index dbe84429a..fa66a8641 100644 --- a/net/packet/header.go +++ b/net/packet/header.go @@ -8,6 +8,7 @@ import ( "math" ) +const igmpHeaderLength = 8 const tcpHeaderLength = 20 const sctpHeaderLength = 12 diff --git a/net/packet/packet.go b/net/packet/packet.go index 876a653ed..34b63aadd 100644 --- a/net/packet/packet.go +++ b/net/packet/packet.go @@ -161,14 +161,8 @@ func (q *Parsed) decode4(b []byte) { if fragOfs == 0 { // This is the first fragment - if moreFrags && len(sub) < minFragBlks { - // Suspiciously short first fragment, dump it. - q.IPProto = unknown - return - } - // otherwise, this is either non-fragmented (the usual case) - // or a big enough initial fragment that we can read the - // whole subprotocol header. + // Every protocol below MUST check that it has at least one entire + // transport header in order to protect against fragment confusion. switch q.IPProto { case ipproto.ICMPv4: if len(sub) < icmp4HeaderLength { @@ -180,6 +174,10 @@ func (q *Parsed) decode4(b []byte) { q.dataofs = q.subofs + icmp4HeaderLength return case ipproto.IGMP: + if len(sub) < igmpHeaderLength { + q.IPProto = unknown + return + } // Keep IPProto, but don't parse anything else // out. return @@ -212,6 +210,15 @@ func (q *Parsed) decode4(b []byte) { q.Dst = withPort(q.Dst, binary.BigEndian.Uint16(sub[2:4])) return case ipproto.TSMP: + // Strictly disallow fragmented TSMP + if moreFrags { + q.IPProto = unknown + return + } + if len(sub) < minTSMPSize { + q.IPProto = unknown + return + } // Inter-tailscale messages. q.dataofs = q.subofs return @@ -224,8 +231,11 @@ func (q *Parsed) decode4(b []byte) { } else { // This is a fragment other than the first one. if fragOfs < minFragBlks { - // First frag was suspiciously short, so we can't - // trust the followup either. + // disallow fragment offsets that are potentially inside of a + // transport header. This is notably asymmetric with the + // first-packet limit, that may allow a first-packet that requires a + // shorter offset than this limit, but without state to tie this + // to the first fragment we can not allow shorter packets. q.IPProto = unknown return } @@ -315,6 +325,10 @@ func (q *Parsed) decode6(b []byte) { q.Dst = withPort(q.Dst, binary.BigEndian.Uint16(sub[2:4])) return case ipproto.TSMP: + if len(sub) < minTSMPSize { + q.IPProto = unknown + return + } // Inter-tailscale messages. q.dataofs = q.subofs return diff --git a/net/packet/packet_test.go b/net/packet/packet_test.go index 4fc804a4f..09c2c101d 100644 --- a/net/packet/packet_test.go +++ b/net/packet/packet_test.go @@ -385,6 +385,124 @@ var sctpDecode = Parsed{ Dst: mustIPPort("100.74.70.3:456"), } +var ipv4ShortFirstFragmentBuffer = []byte{ + // IP header (20 bytes) + 0x45, 0x00, 0x00, 0x4f, // Total length 79 bytes + 0x00, 0x01, 0x20, 0x00, // ID, Flags (MoreFragments set, offset 0) + 0x40, 0x06, 0x00, 0x00, // TTL, Protocol (TCP), Checksum + 0x01, 0x02, 0x03, 0x04, // Source IP + 0x05, 0x06, 0x07, 0x08, // Destination IP + // TCP header (20 bytes), but packet is truncated to 59 bytes of TCP data + // (total 79 bytes, 20 for IP) + 0x00, 0x7b, 0x02, 0x37, 0x00, 0x00, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, + 0x50, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + // Payload (39 bytes) + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, +} + +var ipv4ShortFirstFragmentDecode = Parsed{ + b: ipv4ShortFirstFragmentBuffer, + subofs: 20, + dataofs: 40, + length: len(ipv4ShortFirstFragmentBuffer), + IPVersion: 4, + IPProto: ipproto.TCP, + Src: mustIPPort("1.2.3.4:123"), + Dst: mustIPPort("5.6.7.8:567"), + TCPFlags: 0x12, // SYN + ACK +} + +var ipv4SmallOffsetFragmentBuffer = []byte{ + // IP header (20 bytes) + 0x45, 0x00, 0x00, 0x28, // Total length 40 bytes + 0x00, 0x01, 0x20, 0x08, // ID, Flags (MoreFragments set, offset 8 bytes (0x08 / 8 = 1)) + 0x40, 0x06, 0x00, 0x00, // TTL, Protocol (TCP), Checksum + 0x01, 0x02, 0x03, 0x04, // Source IP + 0x05, 0x06, 0x07, 0x08, // Destination IP + // Payload (20 bytes) - this would be part of the TCP header in a real scenario + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, +} + +var ipv4SmallOffsetFragmentDecode = Parsed{ + b: ipv4SmallOffsetFragmentBuffer, + subofs: 20, // subofs will still be set based on IHL + dataofs: 0, // It's unknown, so dataofs should be 0 + length: len(ipv4SmallOffsetFragmentBuffer), + IPVersion: 4, + IPProto: ipproto.Unknown, // Expected to be Unknown + Src: mustIPPort("1.2.3.4:0"), + Dst: mustIPPort("5.6.7.8:0"), +} + +// First fragment packet missing exactly one byte of the TCP header +var ipv4OneByteShortTCPHeaderBuffer = []byte{ + // IP header (20 bytes) + 0x45, 0x00, 0x00, 0x27, // Total length 51 bytes (20 IP + 19 TCP) + 0x00, 0x01, 0x20, 0x00, // ID, Flags (MoreFragments set, offset 0) + 0x40, 0x06, 0x00, 0x00, // TTL, Protocol (TCP), Checksum + 0x01, 0x02, 0x03, 0x04, // Source IP + 0x05, 0x06, 0x07, 0x08, // Destination IP + // TCP header - only 19 bytes (one byte short of the required 20) + 0x00, 0x7b, 0x02, 0x37, // Source port, Destination port + 0x00, 0x00, 0x12, 0x34, // Sequence number + 0x00, 0x00, 0x00, 0x00, // Acknowledgment number + 0x50, 0x12, 0x01, 0x00, // Data offset, flags, window size + 0x00, 0x00, 0x00, // Checksum (missing the last byte of urgent pointer) +} + +// IPv4 packet with maximum header length (60 bytes = 15 words) and a TCP header that's +// one byte short of being complete +var ipv4MaxHeaderShortTCPBuffer = []byte{ + // IP header with max options (60 bytes) + 0x4F, 0x00, 0x00, 0x4F, // Version (4) + IHL (15), ToS, Total length 79 bytes (60 IP + 19 TCP) + 0x00, 0x01, 0x20, 0x00, // ID, Flags (MoreFragments set, offset 0) + 0x40, 0x06, 0x00, 0x00, // TTL, Protocol (TCP), Checksum + 0x01, 0x02, 0x03, 0x04, // Source IP + 0x05, 0x06, 0x07, 0x08, // Destination IP + // IPv4 options (40 bytes) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + 0x01, 0x01, 0x01, 0x01, // 4 NOP options (padding) + // TCP header - only 19 bytes (one byte short of the required 20) + 0x00, 0x7b, 0x02, 0x37, // Source port, Destination port + 0x00, 0x00, 0x12, 0x34, // Sequence number + 0x00, 0x00, 0x00, 0x00, // Acknowledgment number + 0x50, 0x12, 0x01, 0x00, // Data offset, flags, window size + 0x00, 0x00, 0x00, // Checksum (missing the last byte of urgent pointer) +} + +var ipv4MaxHeaderShortTCPDecode = Parsed{ + b: ipv4MaxHeaderShortTCPBuffer, + subofs: 60, // 60 bytes for full IPv4 header with max options + dataofs: 0, // It's unknown, so dataofs should be 0 + length: len(ipv4MaxHeaderShortTCPBuffer), + IPVersion: 4, + IPProto: ipproto.Unknown, // Expected to be Unknown + Src: mustIPPort("1.2.3.4:0"), + Dst: mustIPPort("5.6.7.8:0"), +} + +var ipv4OneByteShortTCPHeaderDecode = Parsed{ + b: ipv4OneByteShortTCPHeaderBuffer, + subofs: 20, + dataofs: 0, // It's unknown, so dataofs should be 0 + length: len(ipv4OneByteShortTCPHeaderBuffer), + IPVersion: 4, + IPProto: ipproto.Unknown, // Expected to be Unknown + Src: mustIPPort("1.2.3.4:0"), + Dst: mustIPPort("5.6.7.8:0"), +} + func TestParsedString(t *testing.T) { tests := []struct { name string @@ -450,6 +568,10 @@ func TestDecode(t *testing.T) { {"ipv4_sctp", sctpBuffer, sctpDecode}, {"ipv4_frag", tcp4MediumFragmentBuffer, tcp4MediumFragmentDecode}, {"ipv4_fragtooshort", tcp4ShortFragmentBuffer, tcp4ShortFragmentDecode}, + {"ipv4_short_first_fragment", ipv4ShortFirstFragmentBuffer, ipv4ShortFirstFragmentDecode}, + {"ipv4_small_offset_fragment", ipv4SmallOffsetFragmentBuffer, ipv4SmallOffsetFragmentDecode}, + {"ipv4_one_byte_short_tcp_header", ipv4OneByteShortTCPHeaderBuffer, ipv4OneByteShortTCPHeaderDecode}, + {"ipv4_max_header_short_tcp", ipv4MaxHeaderShortTCPBuffer, ipv4MaxHeaderShortTCPDecode}, {"ip97", mustHexDecode("4500 0019 d186 4000 4061 751d 644a 4603 6449 e549 6865 6c6c 6f"), Parsed{ IPVersion: 4, diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index 4e004cca2..d78d10d36 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -19,6 +19,8 @@ import ( "tailscale.com/types/ipproto" ) +const minTSMPSize = 7 // the rejected body is 7 bytes + // TailscaleRejectedHeader is a TSMP message that says that one // Tailscale node has rejected the connection from another. Unlike a // TCP RST, this includes a reason. From 923bbd696fabd6a2a11f380cf4368974461d9690 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 11 Jun 2025 13:56:46 -0700 Subject: [PATCH 0944/1708] prober: record DERP dropped packets as they occur Record dropped packets as soon as they time out, rather than after tx record queues spill over, this will more accurately capture small amounts of packet loss in a timely fashion. Updates tailscale/corp#24522 Signed-off-by: James Tucker --- prober/derp.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/prober/derp.go b/prober/derp.go index e21c8ce76..c7a82317d 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -425,6 +425,24 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. txRecords := make([]txRecord, 0, packetsPerSecond*int(packetTimeout.Seconds())) var txRecordsMu sync.Mutex + // applyTimeouts walks over txRecords and expires any records that are older + // than packetTimeout, recording in metrics that they were removed. + applyTimeouts := func() { + txRecordsMu.Lock() + defer txRecordsMu.Unlock() + + now := time.Now() + recs := txRecords[:0] + for _, r := range txRecords { + if now.Sub(r.at) > packetTimeout { + packetsDropped.Add(1) + } else { + recs = append(recs, r) + } + } + txRecords = recs + } + // Send the packets. sendErrC := make(chan error, 1) // TODO: construct a disco CallMeMaybe in the same fashion as magicsock, e.g. magic bytes, src pub, seal payload. @@ -445,10 +463,12 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. case <-ctx.Done(): return case <-t.C: + applyTimeouts() txRecordsMu.Lock() if len(txRecords) == cap(txRecords) { txRecords = slices.Delete(txRecords, 0, 1) packetsDropped.Add(1) + log.Printf("unexpected: overflow in txRecords") } txRecords = append(txRecords, txRecord{time.Now(), seq}) txRecordsMu.Unlock() From dac00e99163d88895bdfe1c1606e62938580d89f Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 13 Jun 2025 11:30:55 -0700 Subject: [PATCH 0945/1708] go.mod: bump github.com/cloudflare/circl (#16264) See https://github.com/cloudflare/circl/security/advisories/GHSA-2x5j-vhc8-9cwm This dependency is used in our release builder indirectly via https://github.com/ProtonMail/go-crypto/blob/3b22d8539b95b3b7e76a911053023e6ef9ef51d6/go.mod#L6 We should not be affected, since this is used indirectly for pgp signatures on our .deb releases, where we use only trusted inputs. Updates #cleanup Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ec98275e5..0c3d05d59 100644 --- a/go.mod +++ b/go.mod @@ -225,7 +225,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.6.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/daixiang0/gci v0.12.3 // indirect diff --git a/go.sum b/go.sum index 0b521da8c..6f44cd86e 100644 --- a/go.sum +++ b/go.sum @@ -226,8 +226,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= From 6a4d92ecef284229bf86286de0bb15e086a79d2c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 13 Jun 2025 14:39:35 -0500 Subject: [PATCH 0946/1708] ipn/ipnlocal: replace nodeContext with nodeBackend in comments We renamed the type in #15866 but didn't update the comments at the time. Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 48 +++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 88adb3973..7b7893bc1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -200,14 +200,14 @@ type LocalBackend struct { portpollOnce sync.Once // guards starting readPoller varRoot string // or empty if SetVarRoot never called logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil; TODO(nickkhyl): move to nodeContext - sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. - webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext + webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend // exposeRemoteWebClientAtomicBool controls whether the web client is exposed over // Tailscale on port 5252. - exposeRemoteWebClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext + exposeRemoteWebClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend shutdownCalled bool // if Shutdown has been called debugSink packet.CaptureSink sockstatLogger *sockstatlog.Logger @@ -228,10 +228,10 @@ type LocalBackend struct { // is never called. getTCPHandlerForFunnelFlow func(srcAddr netip.AddrPort, dstPort uint16) (handler func(net.Conn)) - containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] // TODO(nickkhyl): move to nodeContext - shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] // TODO(nickkhyl): move to nodeContext - shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] // TODO(nickkhyl): move to nodeContext - numClientStatusCalls atomic.Uint32 // TODO(nickkhyl): move to nodeContext + containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] // TODO(nickkhyl): move to nodeBackend + shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] // TODO(nickkhyl): move to nodeBackend + shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] // TODO(nickkhyl): move to nodeBackend + numClientStatusCalls atomic.Uint32 // TODO(nickkhyl): move to nodeBackend // goTracker accounts for all goroutines started by LocalBacked, primarily // for testing and graceful shutdown purposes. @@ -256,7 +256,7 @@ type LocalBackend struct { // // It is safe for reading with or without holding b.mu, but mutating it in place // or creating a new one must be done with b.mu held. If both mutexes must be held, - // the LocalBackend's mutex must be acquired first before acquiring the nodeContext's mutex. + // the LocalBackend's mutex must be acquired first before acquiring the nodeBackend's mutex. // // We intend to relax this in the future and only require holding b.mu when replacing it, // but that requires a better (strictly ordered?) state machine and better management @@ -265,30 +265,30 @@ type LocalBackend struct { conf *conffile.Config // latest parsed config, or nil if not in declarative mode pm *profileManager // mu guards access - filterHash deephash.Sum // TODO(nickkhyl): move to nodeContext + filterHash deephash.Sum // TODO(nickkhyl): move to nodeBackend httpTestClient *http.Client // for controlclient. nil by default, used by tests. ccGen clientGen // function for producing controlclient; lazily populated sshServer SSHServer // or nil, initialized lazily. appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. notifyCancel context.CancelFunc - cc controlclient.Client // TODO(nickkhyl): move to nodeContext - ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeContext + cc controlclient.Client // TODO(nickkhyl): move to nodeBackend + ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend machinePrivKey key.MachinePrivate - tka *tkaState // TODO(nickkhyl): move to nodeContext - state ipn.State // TODO(nickkhyl): move to nodeContext + tka *tkaState // TODO(nickkhyl): move to nodeBackend + state ipn.State // TODO(nickkhyl): move to nodeBackend capTailnetLock bool // whether netMap contains the tailnet lock capability // hostinfo is mutated in-place while mu is held. - hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeContext - nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil; TODO(nickkhyl): move to nodeContext - activeLogin string // last logged LoginName from netMap; TODO(nickkhyl): move to nodeContext (or remove? it's in [ipn.LoginProfile]). + hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeBackend + nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil; TODO(nickkhyl): move to nodeBackend + activeLogin string // last logged LoginName from netMap; TODO(nickkhyl): move to nodeBackend (or remove? it's in [ipn.LoginProfile]). engineStatus ipn.EngineStatus endpoints []tailcfg.Endpoint blocked bool - keyExpired bool // TODO(nickkhyl): move to nodeContext - authURL string // non-empty if not Running; TODO(nickkhyl): move to nodeContext - authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeContext - authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeContext + keyExpired bool // TODO(nickkhyl): move to nodeBackend + authURL string // non-empty if not Running; TODO(nickkhyl): move to nodeBackend + authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeBackend + authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeBackend egg bool prevIfState *netmon.State peerAPIServer *peerAPIServer // or nil @@ -305,7 +305,7 @@ type LocalBackend struct { lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. - capForcedNetfilter string // TODO(nickkhyl): move to nodeContext + capForcedNetfilter string // TODO(nickkhyl): move to nodeBackend // offlineAutoUpdateCancel stops offline auto-updates when called. It // should be used via stopOfflineAutoUpdate and // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are @@ -317,7 +317,7 @@ type LocalBackend struct { // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig serveConfig ipn.ServeConfigView // or !Valid if none - ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names; TODO(nickkhyl): move to nodeContext + ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names; TODO(nickkhyl): move to nodeBackend webClient webClient webClientListeners map[netip.AddrPort]*localListener // listeners for local web client traffic @@ -332,7 +332,7 @@ type LocalBackend struct { // dialPlan is any dial plan that we've received from the control // server during a previous connection; it is cleared on logout. - dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeContext? + dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeBackend? // tkaSyncLock is used to make tkaSyncIfNeeded an exclusive // section. This is needed to stop two map-responses in quick succession From fe391d569442283ed4f10e1d57c9e845290275bb Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 13 Jun 2025 15:47:35 -0700 Subject: [PATCH 0947/1708] client/local: use an iterator to stream bus events (#16269) This means the caller does not have to remember to close the reader, and avoids having to duplicate the logic to decode JSON into events. Updates #15160 Change-Id: I20186fabb02f72522f61d5908c4cc80b86b8936b Signed-off-by: M. J. Fromberger --- client/local/local.go | 57 +++++++++++++++++++++++++------------- cmd/derper/depaware.txt | 2 +- cmd/tailscale/cli/debug.go | 11 ++------ 3 files changed, 41 insertions(+), 29 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index bc643ad79..12bf2f7d6 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1,12 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.22 - // Package local contains a Go client for the Tailscale LocalAPI. package local import ( + "bufio" "bytes" "cmp" "context" @@ -16,6 +15,7 @@ import ( "errors" "fmt" "io" + "iter" "net" "net/http" "net/http/httptrace" @@ -42,6 +42,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/setting" ) @@ -414,24 +415,42 @@ func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { return res.Body, nil } -// StreamBusEvents returns a stream of the Tailscale bus events as they arrive. -// Close the context to stop the stream. -// Expected response from the server is newline-delimited JSON. -// The caller must close the reader when it is finished reading. -func (lc *Client) StreamBusEvents(ctx context.Context) (io.ReadCloser, error) { - req, err := http.NewRequestWithContext(ctx, "GET", - "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-bus-events", nil) - if err != nil { - return nil, err - } - res, err := lc.doLocalRequestNiceError(req) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusOK { - return nil, errors.New(res.Status) +// StreamBusEvents returns an iterator of Tailscale bus events as they arrive. +// Each pair is a valid event and a nil error, or a zero event a non-nil error. +// In case of error, the iterator ends after the pair reporting the error. +// Iteration stops if ctx ends. +func (lc *Client) StreamBusEvents(ctx context.Context) iter.Seq2[eventbus.DebugEvent, error] { + return func(yield func(eventbus.DebugEvent, error) bool) { + req, err := http.NewRequestWithContext(ctx, "GET", + "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-bus-events", nil) + if err != nil { + yield(eventbus.DebugEvent{}, err) + return + } + res, err := lc.doLocalRequestNiceError(req) + if err != nil { + yield(eventbus.DebugEvent{}, err) + return + } + if res.StatusCode != http.StatusOK { + yield(eventbus.DebugEvent{}, errors.New(res.Status)) + return + } + defer res.Body.Close() + dec := json.NewDecoder(bufio.NewReader(res.Body)) + for { + var evt eventbus.DebugEvent + if err := dec.Decode(&evt); err == io.EOF { + return + } else if err != nil { + yield(eventbus.DebugEvent{}, err) + return + } + if !yield(evt, nil) { + return + } + } } - return res.Body, nil } // Pprof returns a pprof profile of the Tailscale daemon. diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 640e64d6c..7adbf397f 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -157,7 +157,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ - tailscale.com/util/eventbus from tailscale.com/net/netmon + tailscale.com/util/eventbus from tailscale.com/net/netmon+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineiter from tailscale.com/hostinfo+ diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 025382ca9..ec8a0700d 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -791,21 +791,14 @@ func runDaemonLogs(ctx context.Context, args []string) error { } func runDaemonBusEvents(ctx context.Context, args []string) error { - logs, err := localClient.StreamBusEvents(ctx) - if err != nil { - return err - } - defer logs.Close() - d := json.NewDecoder(bufio.NewReader(logs)) - for { - var line eventbus.DebugEvent - err := d.Decode(&line) + for line, err := range localClient.StreamBusEvents(ctx) { if err != nil { return err } fmt.Printf("[%d][%q][from: %q][to: %q] %s\n", line.Count, line.Type, line.From, line.To, line.Event) } + return nil } var metricsArgs struct { From 733bfaeffed5c7cc3a05478b7671e85fafd5689c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 13 Jun 2025 12:51:40 -0500 Subject: [PATCH 0948/1708] ipn/ipnlocal: signal nodeBackend readiness and shutdown We update LocalBackend to shut down the current nodeBackend when switching to a different node, and to mark the new node's nodeBackend as ready when the switch completes. Updates tailscale/corp#28014 Updates tailscale/corp#29543 Updates #12614 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 43 ++++++++--- ipn/ipnlocal/node_backend.go | 82 ++++++++++++++++++-- ipn/ipnlocal/node_backend_test.go | 121 ++++++++++++++++++++++++++++++ 3 files changed, 230 insertions(+), 16 deletions(-) create mode 100644 ipn/ipnlocal/node_backend_test.go diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7b7893bc1..daedb1e19 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -168,6 +168,17 @@ type watchSession struct { var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") +var ( + // errShutdown indicates that the [LocalBackend.Shutdown] was called. + errShutdown = errors.New("shutting down") + + // errNodeContextChanged indicates that [LocalBackend] has switched + // to a different [localNodeContext], usually due to a profile change. + // It is used as a context cancellation cause for the old context + // and can be returned when an operation is performed on it. + errNodeContextChanged = errors.New("profile changed") +) + // LocalBackend is the glue between the major pieces of the Tailscale // network software: the cloud control plane (via controlclient), the // network data plane (via wgengine), and the user-facing UIs and CLIs @@ -180,11 +191,11 @@ var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detecte // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change sys *tsd.System health *health.Tracker // always non-nil metrics metrics @@ -463,7 +474,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo envknob.LogCurrent(logf) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancelCause(context.Background()) clock := tstime.StdClock{} // Until we transition to a Running state, use a canceled context for @@ -503,7 +514,10 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - b.currentNodeAtomic.Store(newNodeBackend()) + nb := newNodeBackend(ctx) + b.currentNodeAtomic.Store(nb) + nb.ready() + mConn.SetNetInfoCallback(b.setNetInfo) if sys.InitialConfig != nil { @@ -586,8 +600,10 @@ func (b *LocalBackend) currentNode() *nodeBackend { return v } // Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor... - v := newNodeBackend() - b.currentNodeAtomic.CompareAndSwap(nil, v) + v := newNodeBackend(cmp.Or(b.ctx, context.Background())) + if b.currentNodeAtomic.CompareAndSwap(nil, v) { + v.ready() + } return b.currentNodeAtomic.Load() } @@ -1089,8 +1105,9 @@ func (b *LocalBackend) Shutdown() { if cc != nil { cc.Shutdown() } + b.ctxCancel(errShutdown) + b.currentNode().shutdown(errShutdown) extHost.Shutdown() - b.ctxCancel() b.e.Close() <-b.e.Done() b.awaitNoGoroutinesInTest() @@ -6992,7 +7009,11 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } - b.currentNodeAtomic.Store(newNodeBackend()) + newNode := newNodeBackend(b.ctx) + if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil { + oldNode.shutdown(errNodeContextChanged) + } + defer newNode.ready() b.setNetMapLocked(nil) // Reset netmap. b.updateFilterLocked(ipn.PrefsView{}) // Reset the NetworkMap in the engine diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index fb77f38eb..361d10bb6 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -5,6 +5,7 @@ package ipnlocal import ( "cmp" + "context" "net/netip" "slices" "sync" @@ -39,7 +40,7 @@ import ( // Two pointers to different [nodeBackend] instances represent different local nodes. // However, there's currently a bug where a new [nodeBackend] might not be created // during an implicit node switch (see tailscale/corp#28014). - +// // In the future, we might want to include at least the following in this struct (in addition to the current fields). // However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions, // peer API handlers, etc.). @@ -61,6 +62,9 @@ import ( // Even if they're tied to the local node, instead of moving them here, we should extract the entire feature // into a separate package and have it install proper hooks. type nodeBackend struct { + ctx context.Context // canceled by [nodeBackend.shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + // filterAtomic is a stateful packet filter. Immutable once created, but can be // replaced with a new one. filterAtomic atomic.Pointer[filter.Filter] @@ -68,6 +72,9 @@ type nodeBackend struct { // TODO(nickkhyl): maybe use sync.RWMutex? mu sync.Mutex // protects the following fields + shutdownOnce sync.Once // guards calling [nodeBackend.shutdown] + readyCh chan struct{} // closed by [nodeBackend.ready]; nil after shutdown + // NetMap is the most recently set full netmap from the controlclient. // It can't be mutated in place once set. Because it can't be mutated in place, // delta updates from the control server don't apply to it. Instead, use @@ -88,12 +95,24 @@ type nodeBackend struct { nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newNodeBackend() *nodeBackend { - cn := &nodeBackend{} +func newNodeBackend(ctx context.Context) *nodeBackend { + ctx, ctxCancel := context.WithCancelCause(ctx) + nb := &nodeBackend{ + ctx: ctx, + ctxCancel: ctxCancel, + readyCh: make(chan struct{}), + } // Default filter blocks everything and logs nothing. noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) - cn.filterAtomic.Store(noneFilter) - return cn + nb.filterAtomic.Store(noneFilter) + return nb +} + +// Context returns a context that is canceled when the [nodeBackend] shuts down, +// either because [LocalBackend] is switching to a different [nodeBackend] +// or is shutting down itself. +func (nb *nodeBackend) Context() context.Context { + return nb.ctx } func (nb *nodeBackend) Self() tailcfg.NodeView { @@ -475,6 +494,59 @@ func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (doh return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) } +// ready signals that [LocalBackend] has completed the switch to this [nodeBackend] +// and any pending calls to [nodeBackend.Wait] must be unblocked. +func (nb *nodeBackend) ready() { + nb.mu.Lock() + defer nb.mu.Unlock() + if nb.readyCh != nil { + close(nb.readyCh) + } +} + +// Wait blocks until [LocalBackend] completes the switch to this [nodeBackend] +// and calls [nodeBackend.ready]. It returns an error if the provided context +// is canceled or if the [nodeBackend] shuts down or is already shut down. +// +// It must not be called with the [LocalBackend]'s internal mutex held as [LocalBackend] +// may need to acquire it to complete the switch. +// +// TODO(nickkhyl): Relax this restriction once [LocalBackend]'s state machine +// runs in its own goroutine, or if we decide that waiting for the state machine +// restart to finish isn't necessary for [LocalBackend] to consider the switch complete. +// We mostly need this because of [LocalBackend.Start] acquiring b.mu and the fact that +// methods like [LocalBackend.SwitchProfile] must report any errors returned by it. +// Perhaps we could report those errors asynchronously as [health.Warnable]s? +func (nb *nodeBackend) Wait(ctx context.Context) error { + nb.mu.Lock() + readyCh := nb.readyCh + nb.mu.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-nb.ctx.Done(): + return context.Cause(nb.ctx) + case <-readyCh: + return nil + } +} + +// shutdown shuts down the [nodeBackend] and cancels its context +// with the provided cause. +func (nb *nodeBackend) shutdown(cause error) { + nb.shutdownOnce.Do(func() { + nb.doShutdown(cause) + }) +} + +func (nb *nodeBackend) doShutdown(cause error) { + nb.mu.Lock() + defer nb.mu.Unlock() + nb.ctxCancel(cause) + nb.readyCh = nil +} + // dnsConfigForNetmap returns a *dns.Config for the given netmap, // prefs, client OS version, and cloud hosting environment. // diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go new file mode 100644 index 000000000..a82b60a9a --- /dev/null +++ b/ipn/ipnlocal/node_backend_test.go @@ -0,0 +1,121 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestNodeBackendReadiness(t *testing.T) { + nb := newNodeBackend(t.Context()) + + // The node backend is not ready until [nodeBackend.ready] is called, + // and [nodeBackend.Wait] should fail with [context.DeadlineExceeded]. + ctx, cancelCtx := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancelCtx() + if err := nb.Wait(ctx); err != ctx.Err() { + t.Fatalf("Wait: got %v; want %v", err, ctx.Err()) + } + + // Start a goroutine to wait for the node backend to become ready. + waitDone := make(chan struct{}) + go func() { + if err := nb.Wait(context.Background()); err != nil { + t.Errorf("Wait: got %v; want nil", err) + } + close(waitDone) + }() + + // Call [nodeBackend.ready] to indicate that the node backend is now ready. + go nb.ready() + + // Once the backend is called, [nodeBackend.Wait] should return immediately without error. + if err := nb.Wait(context.Background()); err != nil { + t.Fatalf("Wait: got %v; want nil", err) + } + // And any pending waiters should also be unblocked. + <-waitDone +} + +func TestNodeBackendShutdown(t *testing.T) { + nb := newNodeBackend(t.Context()) + + shutdownCause := errors.New("test shutdown") + + // Start a goroutine to wait for the node backend to become ready. + // This test expects it to block until the node backend shuts down + // and then return the specified shutdown cause. + waitDone := make(chan struct{}) + go func() { + if err := nb.Wait(context.Background()); err != shutdownCause { + t.Errorf("Wait: got %v; want %v", err, shutdownCause) + } + close(waitDone) + }() + + // Call [nodeBackend.shutdown] to indicate that the node backend is shutting down. + nb.shutdown(shutdownCause) + + // Calling it again is fine, but should not change the shutdown cause. + nb.shutdown(errors.New("test shutdown again")) + + // After shutdown, [nodeBackend.Wait] should return with the specified shutdown cause. + if err := nb.Wait(context.Background()); err != shutdownCause { + t.Fatalf("Wait: got %v; want %v", err, shutdownCause) + } + // The context associated with the node backend should also be cancelled + // and its cancellation cause should match the shutdown cause. + if err := nb.Context().Err(); !errors.Is(err, context.Canceled) { + t.Fatalf("Context.Err: got %v; want %v", err, context.Canceled) + } + if cause := context.Cause(nb.Context()); cause != shutdownCause { + t.Fatalf("Cause: got %v; want %v", cause, shutdownCause) + } + // And any pending waiters should also be unblocked. + <-waitDone +} + +func TestNodeBackendReadyAfterShutdown(t *testing.T) { + nb := newNodeBackend(t.Context()) + + shutdownCause := errors.New("test shutdown") + nb.shutdown(shutdownCause) + nb.ready() // Calling ready after shutdown is a no-op, but should not panic, etc. + if err := nb.Wait(context.Background()); err != shutdownCause { + t.Fatalf("Wait: got %v; want %v", err, shutdownCause) + } +} + +func TestNodeBackendParentContextCancellation(t *testing.T) { + ctx, cancelCtx := context.WithCancel(context.Background()) + nb := newNodeBackend(ctx) + + cancelCtx() + + // Cancelling the parent context should cause [nodeBackend.Wait] + // to return with [context.Canceled]. + if err := nb.Wait(context.Background()); !errors.Is(err, context.Canceled) { + t.Fatalf("Wait: got %v; want %v", err, context.Canceled) + } + + // And the node backend's context should also be cancelled. + if err := nb.Context().Err(); !errors.Is(err, context.Canceled) { + t.Fatalf("Context.Err: got %v; want %v", err, context.Canceled) + } +} + +func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { + nb := newNodeBackend(t.Context()) + + // Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently + // should not cause issues, and [nodeBackend.Wait] should unblock, + // but the result of [nodeBackend.Wait] is intentionally undefined. + go nb.ready() + go nb.shutdown(errors.New("test shutdown")) + + nb.Wait(context.Background()) +} From e29e3c150ff2a8fc5ecbe016ec275a9097a02b2e Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 16 Jun 2025 12:21:59 +0100 Subject: [PATCH 0949/1708] cmd/k8s-operator: ensure that TLS resources are updated for HA Ingress (#16262) Ensure that if the ProxyGroup for HA Ingress changes, the TLS Secret and Role and RoleBinding that allow proxies to read/write to it are updated. Fixes #16259 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 31 ++- cmd/k8s-operator/ingress-for-pg_test.go | 296 +++++++++++++----------- cmd/k8s-operator/svc-for-pg_test.go | 10 +- 3 files changed, 183 insertions(+), 154 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 66d74292b..ea31dbd63 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -252,7 +252,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("error determining DNS name base: %w", err) } dnsName := hostname + "." + tcd - if err := r.ensureCertResources(ctx, pgName, dnsName, ing); err != nil { + if err := r.ensureCertResources(ctx, pg, dnsName, ing); err != nil { return false, fmt.Errorf("error ensuring cert resources: %w", err) } @@ -931,18 +931,31 @@ func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool { // (domain) is a valid Kubernetes resource name. // https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99 // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names -func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, domain string, ing *networkingv1.Ingress) error { - secret := certSecret(pgName, r.tsNamespace, domain, ing) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, nil); err != nil { +func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string, ing *networkingv1.Ingress) error { + secret := certSecret(pg.Name, r.tsNamespace, domain, ing) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) { + // Labels might have changed if the Ingress has been updated to use a + // different ProxyGroup. + s.Labels = secret.Labels + }); err != nil { return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) } - role := certSecretRole(pgName, r.tsNamespace, domain) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, nil); err != nil { + role := certSecretRole(pg.Name, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + // Labels might have changed if the Ingress has been updated to use a + // different ProxyGroup. + r.Labels = role.Labels + }); err != nil { return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) } - rb := certSecretRoleBinding(pgName, r.tsNamespace, domain) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rb, nil); err != nil { - return fmt.Errorf("failed to create or update RoleBinding %s: %w", rb.Name, err) + rolebinding := certSecretRoleBinding(pg.Name, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { + // Labels and subjects might have changed if the Ingress has been updated to use a + // different ProxyGroup. + rb.Labels = rolebinding.Labels + rb.Subjects = rolebinding.Subjects + }); err != nil { + return fmt.Errorf("failed to create or update RoleBinding %s: %w", rolebinding.Name, err) } return nil } diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index b487d660c..05f482792 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -69,7 +69,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) - verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc"}) // Verify that Role and RoleBinding have been created for the first Ingress. // Do not verify the cert Secret as that was already verified implicitly above. @@ -132,7 +132,7 @@ func TestIngressPGReconciler(t *testing.T) { verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"}) - // Verify that Role and RoleBinding have been created for the first Ingress. + // Verify that Role and RoleBinding have been created for the second Ingress. // Do not verify the cert Secret as that was already verified implicitly above. expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) @@ -141,7 +141,7 @@ func TestIngressPGReconciler(t *testing.T) { verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) - verifyTailscaledConfig(t, fc, []string{"svc:my-svc", "svc:my-other-svc"}) + verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc", "svc:my-other-svc"}) // Delete second Ingress if err := fc.Delete(context.Background(), ing2); err != nil { @@ -172,11 +172,20 @@ func TestIngressPGReconciler(t *testing.T) { t.Error("second Ingress service config was not cleaned up") } - verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc"}) expectMissing[corev1.Secret](t, fc, "operator-ns", "my-other-svc.ts.net") expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-other-svc.ts.net") expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-other-svc.ts.net") + // Test Ingress ProxyGroup change + createPGResources(t, fc, "test-pg-second") + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + ing.Annotations["tailscale.com/proxy-group"] = "test-pg-second" + }) + expectReconciled(t, ingPGR, "default", "test-ingress") + expectEqual(t, fc, certSecretRole("test-pg-second", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding("test-pg-second", "operator-ns", "my-svc.ts.net")) + // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) @@ -187,7 +196,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify the ConfigMap was cleaned up cm = &corev1.ConfigMap{} if err := fc.Get(context.Background(), types.NamespacedName{ - Name: "test-pg-ingress-config", + Name: "test-pg-second-ingress-config", Namespace: "operator-ns", }, cm); err != nil { t.Fatalf("getting ConfigMap: %v", err) @@ -201,7 +210,7 @@ func TestIngressPGReconciler(t *testing.T) { if len(cfg.Services) > 0 { t.Error("serve config not cleaned up") } - verifyTailscaledConfig(t, fc, nil) + verifyTailscaledConfig(t, fc, "test-pg-second", nil) // Add verification that cert resources were cleaned up expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net") @@ -245,7 +254,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) - verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc"}) // Update the Ingress hostname and make sure the original Tailscale Service is deleted. mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { @@ -256,7 +265,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:updated-svc", false) verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"}) - verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"}) + verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"}) _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) if err == nil { @@ -550,6 +559,117 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { } } +func TestIngressPGReconciler_MultiCluster(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + ingPGR.operatorID = "operator-1" + + // Create initial Ingress + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + mustCreate(t, fc, ing) + + // Simulate existing Tailscale Service from another cluster + existingVIPSvc := &tailscale.VIPService{ + Name: "svc:my-svc", + Annotations: map[string]string{ + ownerAnnotation: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, + }, + } + ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{ + "svc:my-svc": existingVIPSvc, + } + + // Verify reconciliation adds our operator reference + expectReconciled(t, ingPGR, "default", "test-ingress") + + tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + if err != nil { + t.Fatalf("getting Tailscale Service: %v", err) + } + if tsSvc == nil { + t.Fatal("Tailscale Service not found") + } + + o, err := parseOwnerAnnotation(tsSvc) + if err != nil { + t.Fatalf("parsing owner annotation: %v", err) + } + + wantOwnerRefs := []OwnerRef{ + {OperatorID: "operator-2"}, + {OperatorID: "operator-1"}, + } + if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) + } + + // Delete the Ingress and verify Tailscale Service still exists with one owner ref + if err := fc.Delete(context.Background(), ing); err != nil { + t.Fatalf("deleting Ingress: %v", err) + } + expectRequeue(t, ingPGR, "default", "test-ingress") + + tsSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") + if err != nil { + t.Fatalf("getting Tailscale Service after deletion: %v", err) + } + if tsSvc == nil { + t.Fatal("Tailscale Service was incorrectly deleted") + } + + o, err = parseOwnerAnnotation(tsSvc) + if err != nil { + t.Fatalf("parsing owner annotation: %v", err) + } + + wantOwnerRefs = []OwnerRef{ + {OperatorID: "operator-2"}, + } + if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) + } +} + +func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: "operator-ns", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + labelProxyGroup: pgName, + labelDomain: domain, + kubetypes.LabelSecretType: "certs", + }, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + corev1.TLSCertKey: []byte("fake-cert"), + corev1.TLSPrivateKeyKey: []byte("fake-key"), + }, + } + + _, err := createOrUpdate(ctx, c, "operator-ns", secret, func(s *corev1.Secret) { + s.Data = secret.Data + }) + return err +} + func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { t.Helper() tsSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName)) @@ -618,7 +738,7 @@ func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantH } } -func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []string) { +func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expectedServices []string) { t.Helper() var expected string if expectedServices != nil && len(expectedServices) > 0 { @@ -630,9 +750,9 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []s } expectEqual(t, fc, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: pgConfigSecretName("test-pg", 0), + Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "config"), + Labels: pgSecretLabels(pgName, "config"), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), @@ -640,53 +760,44 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []s }) } -func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) { - tsIngressClass := &networkingv1.IngressClass{ - ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, - Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, - } - +func createPGResources(t *testing.T, fc client.Client, pgName string) { + t.Helper() // Pre-create the ProxyGroup pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg", + Name: pgName, Generation: 1, }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeIngress, }, } + mustCreate(t, fc, pg) // Pre-create the ConfigMap for the ProxyGroup pgConfigMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-ingress-config", + Name: fmt.Sprintf("%s-ingress-config", pgName), Namespace: "operator-ns", }, BinaryData: map[string][]byte{ "serve-config.json": []byte(`{"Services":{}}`), }, } + mustCreate(t, fc, pgConfigMap) // Pre-create a config Secret for the ProxyGroup pgCfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: pgConfigSecretName("test-pg", 0), + Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "config"), + Labels: pgSecretLabels(pgName, "config"), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(106): []byte("{}"), }, } - - fc := fake.NewClientBuilder(). - WithScheme(tsapi.GlobalScheme). - WithObjects(pg, pgCfgSecret, pgConfigMap, tsIngressClass). - WithStatusSubresource(pg). - Build() - - // Set ProxyGroup status to ready + mustCreate(t, fc, pgCfgSecret) pg.Status.Conditions = []metav1.Condition{ { Type: string(tsapi.ProxyGroupReady), @@ -697,6 +808,22 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT if err := fc.Status().Update(context.Background(), pg); err != nil { t.Fatal(err) } +} + +func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) { + tsIngressClass := &networkingv1.IngressClass{ + ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, + Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(tsIngressClass). + WithStatusSubresource(&tsapi.ProxyGroup{}). + Build() + + createPGResources(t, fc, "test-pg") + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} ft := &fakeTSClient{} @@ -726,114 +853,3 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT return ingPGR, fc, ft } - -func TestIngressPGReconciler_MultiCluster(t *testing.T) { - ingPGR, fc, ft := setupIngressTest(t) - ingPGR.operatorID = "operator-1" - - // Create initial Ingress - ing := &networkingv1.Ingress{ - TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ingress", - Namespace: "default", - UID: types.UID("1234-UID"), - Annotations: map[string]string{ - "tailscale.com/proxy-group": "test-pg", - }, - }, - Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), - TLS: []networkingv1.IngressTLS{ - {Hosts: []string{"my-svc"}}, - }, - }, - } - mustCreate(t, fc, ing) - - // Simulate existing Tailscale Service from another cluster - existingVIPSvc := &tailscale.VIPService{ - Name: "svc:my-svc", - Annotations: map[string]string{ - ownerAnnotation: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, - }, - } - ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{ - "svc:my-svc": existingVIPSvc, - } - - // Verify reconciliation adds our operator reference - expectReconciled(t, ingPGR, "default", "test-ingress") - - tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") - if err != nil { - t.Fatalf("getting Tailscale Service: %v", err) - } - if tsSvc == nil { - t.Fatal("Tailscale Service not found") - } - - o, err := parseOwnerAnnotation(tsSvc) - if err != nil { - t.Fatalf("parsing owner annotation: %v", err) - } - - wantOwnerRefs := []OwnerRef{ - {OperatorID: "operator-2"}, - {OperatorID: "operator-1"}, - } - if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) { - t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) - } - - // Delete the Ingress and verify Tailscale Service still exists with one owner ref - if err := fc.Delete(context.Background(), ing); err != nil { - t.Fatalf("deleting Ingress: %v", err) - } - expectRequeue(t, ingPGR, "default", "test-ingress") - - tsSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") - if err != nil { - t.Fatalf("getting Tailscale Service after deletion: %v", err) - } - if tsSvc == nil { - t.Fatal("Tailscale Service was incorrectly deleted") - } - - o, err = parseOwnerAnnotation(tsSvc) - if err != nil { - t.Fatalf("parsing owner annotation: %v", err) - } - - wantOwnerRefs = []OwnerRef{ - {OperatorID: "operator-2"}, - } - if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) { - t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs) - } -} - -func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: domain, - Namespace: "operator-ns", - Labels: map[string]string{ - kubetypes.LabelManaged: "true", - labelProxyGroup: pgName, - labelDomain: domain, - kubetypes.LabelSecretType: "certs", - }, - }, - Type: corev1.SecretTypeTLS, - Data: map[string][]byte{ - corev1.TLSCertKey: []byte("fake-cert"), - corev1.TLSPrivateKeyKey: []byte("fake-key"), - }, - } - - _, err := createOrUpdate(ctx, c, "operator-ns", secret, func(s *corev1.Secret) { - s.Data = secret.Data - }) - return err -} diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index ecd60af50..5772cd5d6 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -46,7 +46,7 @@ func TestServicePGReconciler(t *testing.T) { config = append(config, fmt.Sprintf("svc:default-%s", svc.Name)) verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) - verifyTailscaledConfig(t, fc, config) + verifyTailscaledConfig(t, fc, "test-pg", config) } for i, svc := range svcs { @@ -75,7 +75,7 @@ func TestServicePGReconciler(t *testing.T) { } config = removeEl(config, fmt.Sprintf("svc:default-%s", svc.Name)) - verifyTailscaledConfig(t, fc, config) + verifyTailscaledConfig(t, fc, "test-pg", config) } } @@ -88,7 +88,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { expectReconciled(t, svcPGR, "default", svc.Name) verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"}) - verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:default-%s", svc.Name)}) + verifyTailscaledConfig(t, fc, "test-pg", []string{fmt.Sprintf("svc:default-%s", svc.Name)}) hostname := "foobarbaz" mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) { @@ -100,7 +100,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { expectReconciled(t, svcPGR, "default", svc.Name) verifyTailscaleService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"}) - verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:%s", hostname)}) + verifyTailscaledConfig(t, fc, "test-pg", []string{fmt.Sprintf("svc:%s", hostname)}) _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(fmt.Sprintf("svc:default-%s", svc.Name))) if err == nil { @@ -334,7 +334,7 @@ func TestIgnoreRegularService(t *testing.T) { mustCreate(t, fc, svc) expectReconciled(t, pgr, "default", "test") - verifyTailscaledConfig(t, fc, nil) + verifyTailscaledConfig(t, fc, "test-pg", nil) tsSvcs, err := ft.ListVIPServices(context.Background()) if err == nil { From 59fab8bda797fa1316c1133a9bdd0b732686dd4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 08:02:26 -0600 Subject: [PATCH 0950/1708] .github: Bump github/codeql-action from 3.28.19 to 3.29.0 (#16287) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.19 to 3.29.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fca7ace96b7d713c7035871441bd52efbe39e27e...ce28f5bb42b7a9f2c824e633a3f6ee835bab6858) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8bd72d80d..32d2e7c2f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 From 42da161b194abe7104cbc8312f913a3db296d6b5 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 13 Jun 2025 14:45:28 +0100 Subject: [PATCH 0951/1708] tka: reject removal of the last signing key Fixes tailscale/corp#19447 Signed-off-by: Anton Tolchanov --- cmd/tailscale/cli/network-lock.go | 3 +++ tka/builder_test.go | 15 +++++++++++++++ tka/tka.go | 7 +++++++ 3 files changed, 25 insertions(+) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index c77767074..ae1e90bbf 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -326,6 +326,9 @@ func runNetworkLockRemove(ctx context.Context, args []string) error { if !st.Enabled { return errors.New("tailnet lock is not enabled") } + if len(st.TrustedKeys) == 1 { + return errors.New("cannot remove the last trusted signing key; use 'tailscale lock disable' to disable tailnet lock instead, or add another signing key before removing one") + } if nlRemoveArgs.resign { // Validate we are not removing trust in ourselves while resigning. This is because diff --git a/tka/builder_test.go b/tka/builder_test.go index 666af9ad0..3dbd4347a 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -5,6 +5,7 @@ package tka import ( "crypto/ed25519" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -90,6 +91,20 @@ func TestAuthorityBuilderRemoveKey(t *testing.T) { if _, err := a.state.GetKey(key2.MustID()); err != ErrNoSuchKey { t.Errorf("GetKey(key2).err = %v, want %v", err, ErrNoSuchKey) } + + // Check that removing the remaining key errors out. + b = a.NewUpdater(signer25519(priv)) + if err := b.RemoveKey(key.MustID()); err != nil { + t.Fatalf("RemoveKey(%v) failed: %v", key, err) + } + updates, err = b.Finalize(storage) + if err != nil { + t.Fatalf("Finalize() failed: %v", err) + } + wantErr := "cannot remove the last key" + if err := a.Inform(storage, updates); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("expected Inform() to return error %q, got: %v", wantErr, err) + } } func TestAuthorityBuilderSetKeyVote(t *testing.T) { diff --git a/tka/tka.go b/tka/tka.go index 04b712660..ade621bc6 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -440,6 +440,13 @@ func aumVerify(aum AUM, state State, isGenesisAUM bool) error { return fmt.Errorf("signature %d: %v", i, err) } } + + if aum.MessageKind == AUMRemoveKey && len(state.Keys) == 1 { + if kid, err := state.Keys[0].ID(); err == nil && bytes.Equal(aum.KeyID, kid) { + return errors.New("cannot remove the last key in the state") + } + } + return nil } From 8e6f63cf110364b52e3f6c232b23196f16484473 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 16 Jun 2025 08:42:09 -0700 Subject: [PATCH 0952/1708] ipn/ipnlocal,wgengine/magicsock: use eventbus for node & filter updates (#16271) nodeBackend now publishes filter and node changes to eventbus topics that are consumed by magicsock.Conn Updates tailscale/corp#27502 Updates tailscale/corp#29543 Signed-off-by: Jordan Whited --- ipn/ipnlocal/local.go | 16 ++++++-- ipn/ipnlocal/node_backend.go | 42 +++++++++++++++++--- ipn/ipnlocal/node_backend_test.go | 12 +++--- wgengine/magicsock/magicsock.go | 65 +++++++++++++++++++++++++------ 4 files changed, 109 insertions(+), 26 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index daedb1e19..cd30e92bb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -98,6 +98,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -514,7 +515,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - nb := newNodeBackend(ctx) + nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -599,8 +600,15 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - // Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor... - v := newNodeBackend(cmp.Or(b.ctx, context.Background())) + // Auto-init [nodeBackend] in tests for LocalBackend created without the + // NewLocalBackend() constructor. Same reasoning for checking b.sys. + var bus *eventbus.Bus + if b.sys == nil { + bus = eventbus.New() + } else { + bus = b.sys.Bus.Get() + } + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), bus) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } @@ -7009,7 +7017,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } - newNode := newNodeBackend(b.ctx) + newNode := newNodeBackend(b.ctx, b.sys.Bus.Get()) if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil { oldNode.shutdown(errNodeContextChanged) } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 361d10bb6..efa74577b 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -23,9 +23,11 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/slicesx" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/magicsock" ) // nodeBackend is node-specific [LocalBackend] state. It is usually the current node. @@ -69,6 +71,11 @@ type nodeBackend struct { // replaced with a new one. filterAtomic atomic.Pointer[filter.Filter] + // initialized once and immutable + eventClient *eventbus.Client + filterUpdates *eventbus.Publisher[magicsock.FilterUpdate] + nodeUpdates *eventbus.Publisher[magicsock.NodeAddrsHostInfoUpdate] + // TODO(nickkhyl): maybe use sync.RWMutex? mu sync.Mutex // protects the following fields @@ -95,16 +102,20 @@ type nodeBackend struct { nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newNodeBackend(ctx context.Context) *nodeBackend { +func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { ctx, ctxCancel := context.WithCancelCause(ctx) nb := &nodeBackend{ - ctx: ctx, - ctxCancel: ctxCancel, - readyCh: make(chan struct{}), + ctx: ctx, + ctxCancel: ctxCancel, + eventClient: bus.Client("ipnlocal.nodeBackend"), + readyCh: make(chan struct{}), } // Default filter blocks everything and logs nothing. noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) nb.filterAtomic.Store(noneFilter) + nb.filterUpdates = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) + nb.nodeUpdates = eventbus.Publish[magicsock.NodeAddrsHostInfoUpdate](nb.eventClient) + nb.filterUpdates.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) return nb } @@ -418,9 +429,16 @@ func (nb *nodeBackend) updatePeersLocked() { nb.peers[k] = tailcfg.NodeView{} } + changed := magicsock.NodeAddrsHostInfoUpdate{ + Complete: true, + } // Second pass, add everything wanted. for _, p := range nm.Peers { mak.Set(&nb.peers, p.ID(), p) + mak.Set(&changed.NodesByID, p.ID(), magicsock.NodeAddrsHostInfo{ + Addresses: p.Addresses(), + Hostinfo: p.Hostinfo(), + }) } // Third pass, remove deleted things. @@ -429,6 +447,7 @@ func (nb *nodeBackend) updatePeersLocked() { delete(nb.peers, k) } } + nb.nodeUpdates.Publish(changed) } func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { @@ -443,6 +462,9 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // call (e.g. its endpoints + online status both change) var mutableNodes map[tailcfg.NodeID]*tailcfg.Node + changed := magicsock.NodeAddrsHostInfoUpdate{ + Complete: false, + } for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] if !ok { @@ -457,8 +479,14 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo m.Apply(n) } for nid, n := range mutableNodes { - nb.peers[nid] = n.View() - } + nv := n.View() + nb.peers[nid] = nv + mak.Set(&changed.NodesByID, nid, magicsock.NodeAddrsHostInfo{ + Addresses: nv.Addresses(), + Hostinfo: nv.Hostinfo(), + }) + } + nb.nodeUpdates.Publish(changed) return true } @@ -480,6 +508,7 @@ func (nb *nodeBackend) filter() *filter.Filter { func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterAtomic.Store(f) + nb.filterUpdates.Publish(magicsock.FilterUpdate{Filter: f}) } func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { @@ -545,6 +574,7 @@ func (nb *nodeBackend) doShutdown(cause error) { defer nb.mu.Unlock() nb.ctxCancel(cause) nb.readyCh = nil + nb.eventClient.Close() } // dnsConfigForNetmap returns a *dns.Config for the given netmap, diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index a82b60a9a..dc67d327c 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -8,10 +8,12 @@ import ( "errors" "testing" "time" + + "tailscale.com/util/eventbus" ) func TestNodeBackendReadiness(t *testing.T) { - nb := newNodeBackend(t.Context()) + nb := newNodeBackend(t.Context(), eventbus.New()) // The node backend is not ready until [nodeBackend.ready] is called, // and [nodeBackend.Wait] should fail with [context.DeadlineExceeded]. @@ -42,7 +44,7 @@ func TestNodeBackendReadiness(t *testing.T) { } func TestNodeBackendShutdown(t *testing.T) { - nb := newNodeBackend(t.Context()) + nb := newNodeBackend(t.Context(), eventbus.New()) shutdownCause := errors.New("test shutdown") @@ -80,7 +82,7 @@ func TestNodeBackendShutdown(t *testing.T) { } func TestNodeBackendReadyAfterShutdown(t *testing.T) { - nb := newNodeBackend(t.Context()) + nb := newNodeBackend(t.Context(), eventbus.New()) shutdownCause := errors.New("test shutdown") nb.shutdown(shutdownCause) @@ -92,7 +94,7 @@ func TestNodeBackendReadyAfterShutdown(t *testing.T) { func TestNodeBackendParentContextCancellation(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) - nb := newNodeBackend(ctx) + nb := newNodeBackend(ctx, eventbus.New()) cancelCtx() @@ -109,7 +111,7 @@ func TestNodeBackendParentContextCancellation(t *testing.T) { } func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { - nb := newNodeBackend(t.Context()) + nb := newNodeBackend(t.Context(), eventbus.New()) // Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently // should not cause issues, and [nodeBackend.Wait] should unblock, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e5cc87dc3..1042e6794 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -63,6 +63,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" + "tailscale.com/wgengine/filter" "tailscale.com/wgengine/wgint" ) @@ -502,6 +503,30 @@ func (o *Options) derpActiveFunc() func() { return o.DERPActiveFunc } +// NodeAddrsHostInfoUpdate represents an update event of the addresses and +// [tailcfg.HostInfoView] for a node set. This event is published over an +// [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as of 2025-06. If +// you are adding more subscribers consider moving this type out of magicsock. +type NodeAddrsHostInfoUpdate struct { + NodesByID map[tailcfg.NodeID]NodeAddrsHostInfo + Complete bool // true if NodesByID contains all known nodes, false if it may be a subset +} + +// NodeAddrsHostInfo represents the addresses and [tailcfg.HostinfoView] for a +// Tailscale node. +type NodeAddrsHostInfo struct { + Addresses views.Slice[netip.Prefix] + Hostinfo tailcfg.HostinfoView +} + +// FilterUpdate represents an update event for a [*filter.Filter]. This event is +// signaled over an [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as +// of 2025-06. If you are adding more subscribers consider moving this type out +// of magicsock. +type FilterUpdate struct { + *filter.Filter +} + // newConn is the error-free, network-listening-side-effect-free based // of NewConn. Mostly for tests. func newConn(logf logger.Logf) *Conn { @@ -535,6 +560,20 @@ func newConn(logf logger.Logf) *Conn { return c } +// consumeEventbusTopic consumes events from sub and passes them to +// handlerFn until sub.Done() is closed. +func consumeEventbusTopic[T any](sub *eventbus.Subscriber[T], handlerFn func(t T)) { + defer sub.Close() + for { + select { + case evt := <-sub.Events(): + handlerFn(evt) + case <-sub.Done(): + return + } + } +} + // NewConn creates a magic Conn listening on opts.Port. // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. @@ -562,17 +601,17 @@ func NewConn(opts Options) (*Conn, error) { c.eventClient = c.eventBus.Client("magicsock.Conn") pmSub := eventbus.Subscribe[portmapper.Mapping](c.eventClient) - go func() { - defer pmSub.Close() - for { - select { - case <-pmSub.Events(): - c.onPortMapChanged() - case <-pmSub.Done(): - return - } - } - }() + go consumeEventbusTopic(pmSub, func(_ portmapper.Mapping) { + c.onPortMapChanged() + }) + filterSub := eventbus.Subscribe[FilterUpdate](c.eventClient) + go consumeEventbusTopic(filterSub, func(t FilterUpdate) { + // TODO(jwhited): implement + }) + nodeSub := eventbus.Subscribe[NodeAddrsHostInfoUpdate](c.eventClient) + go consumeEventbusTopic(nodeSub, func(t NodeAddrsHostInfoUpdate) { + // TODO(jwhited): implement + }) // Disable the explicit callback from the portmapper, the subscriber handles it. onPortMapChanged = nil @@ -2798,6 +2837,10 @@ func (c *connBind) Close() error { return nil } c.closed = true + // Close the [eventbus.Client]. + if c.eventClient != nil { + c.eventClient.Close() + } // Unblock all outstanding receives. c.pconn4.Close() c.pconn6.Close() From 5b7cf7fc3681d11aaddaa9a163ed72502405c83a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 15 Jun 2025 12:42:33 -0700 Subject: [PATCH 0953/1708] .github/workflows: do a go mod download & cache it before all jobs Updates tailscale/corp#28679 Change-Id: Ib0127cb2b03f781fc3187199abe4881e97074f5f Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 246 ++++++++++++++++++++++++++++++++----- go.mod | 2 +- 2 files changed, 215 insertions(+), 33 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1776653f4..11a851dc4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,6 +15,10 @@ env: # - false: we expect fuzzing to be happy, and should report failure if it's not. # - true: we expect fuzzing is broken, and should report failure if it start working. TS_FUZZ_CURRENTLY_BROKEN: false + # GOMODCACHE is the same definition on all OSes. Within the workspace, we use + # toplevel directories "src" (for the checked out source code), and "gomodcache" + # and other caches as siblings to follow. + GOMODCACHE: ${{ github.workspace }}/gomodcache on: push: @@ -38,8 +42,42 @@ concurrency: cancel-in-progress: true jobs: + gomod-cache: + runs-on: ubuntu-24.04 + outputs: + cache-key: ${{ steps.hash.outputs.key }} + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Compute cache key from go.{mod,sum} + id: hash + run: echo "key=gomod-cross3-${{ hashFiles('src/go.mod', 'src/go.sum') }}" >> $GITHUB_OUTPUT + # See if the cache entry already exists to avoid downloading it + # and doing the cache write again. + - id: check-cache + uses: actions/cache/restore@v4 + with: + path: gomodcache # relative to workspace; see env note at top of file + key: ${{ steps.hash.outputs.key }} + lookup-only: true + enableCrossOsArchive: true + - name: Download modules + if: steps.check-cache.outputs.cache-hit != 'true' + working-directory: src + run: go mod download + - name: Cache Go modules + if: steps.check-cache.outputs.cache-hit != 'true' + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache # relative to workspace; see env note at top of file + key: ${{ steps.hash.outputs.key }} + enableCrossOsArchive: true + race-root-integration: runs-on: ubuntu-24.04 + needs: gomod-cache strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: @@ -51,9 +89,19 @@ jobs: steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: build test wrapper + working-directory: src run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: integration tests as root + working-directory: src run: PATH=$PWD/tool:$PATH /tmp/testwrapper -exec "sudo -E" -race ./tstest/integration/ env: TS_TEST_SHARD: ${{ matrix.shard }} @@ -75,9 +123,18 @@ jobs: shard: '3/3' - goarch: "386" # thanks yaml runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: Restore Cache uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: @@ -87,7 +144,6 @@ jobs: # fetched and extracted by tar path: | ~/.cache/go-build - ~/go/pkg/mod/cache ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). @@ -97,11 +153,13 @@ jobs: ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2- - name: build all if: matrix.buildflags == '' # skip on race builder + working-directory: src run: ./tool/go build ${{matrix.buildflags}} ./... env: GOARCH: ${{ matrix.goarch }} - name: build variant CLIs if: matrix.buildflags == '' # skip on race builder + working-directory: src run: | export TS_USE_TOOLCHAIN=1 ./build_dist.sh --extra-small ./cmd/tailscaled @@ -116,19 +174,24 @@ jobs: sudo apt-get -y update sudo apt-get -y install qemu-user - name: build test wrapper + working-directory: src run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: test all + working-directory: src run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} env: GOARCH: ${{ matrix.goarch }} TS_TEST_SHARD: ${{ matrix.shard }} - name: bench all + working-directory: src run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done) env: GOARCH: ${{ matrix.goarch }} - name: check that no tracked files changed + working-directory: src run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1) - name: check that no new files were added + working-directory: src run: | # Note: The "error: pathspec..." you see below is normal! # In the success case in which there are no new untracked files, @@ -140,22 +203,33 @@ jobs: exit 1 fi - name: Tidy cache + working-directory: src shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete - find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete + windows: runs-on: windows-2022 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: - go-version-file: go.mod + go-version-file: src/go.mod cache: false + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true + - name: Restore Cache uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: @@ -165,7 +239,6 @@ jobs: # fetched and extracted by tar path: | ~/.cache/go-build - ~/go/pkg/mod/cache ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). @@ -174,19 +247,22 @@ jobs: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-go-2- - name: test + working-directory: src run: go run ./cmd/testwrapper ./... - name: bench all + working-directory: src # Don't use -bench=. -benchtime=1x. # Somewhere in the layers (powershell?) # the equals signs cause great confusion. run: go test ./... -bench . -benchtime 1x -run "^$" - name: Tidy cache + working-directory: src shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete - find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete privileged: + needs: gomod-cache runs-on: ubuntu-24.04 container: image: golang:latest @@ -194,36 +270,47 @@ jobs: steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: chown + working-directory: src run: chown -R $(id -u):$(id -g) $PWD - name: privileged tests + working-directory: src run: ./tool/go test ./util/linuxfw ./derp/xdp vm: + needs: gomod-cache runs-on: ["self-hosted", "linux", "vm"] # VM tests run with some privileges, don't let them run on 3p PRs. if: github.repository == 'tailscale/tailscale' steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: Run VM tests + working-directory: src run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004 env: HOME: "/var/lib/ghrunner/home" TMPDIR: "/tmp" XDG_CACHE_HOME: "/var/lib/ghrunner/cache" - race-build: - runs-on: ubuntu-24.04 - steps: - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: build all - run: ./tool/go install -race ./cmd/... - - name: build tests - run: ./tool/go test -race -exec=true ./... - cross: # cross-compile checks, build only. + needs: gomod-cache strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: @@ -262,6 +349,8 @@ jobs: steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src - name: Restore Cache uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: @@ -271,7 +360,6 @@ jobs: # fetched and extracted by tar path: | ~/.cache/go-build - ~/go/pkg/mod/cache ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). @@ -279,7 +367,14 @@ jobs: restore-keys: | ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: build all + working-directory: src run: ./tool/go build ./cmd/... env: GOOS: ${{ matrix.goos }} @@ -287,30 +382,42 @@ jobs: GOARM: ${{ matrix.goarm }} CGO_ENABLED: "0" - name: build tests + working-directory: src run: ./tool/go test -exec=true ./... env: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goarch }} CGO_ENABLED: "0" - name: Tidy cache + working-directory: src shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete - find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete ios: # similar to cross above, but iOS can't build most of the repo. So, just - #make it build a few smoke packages. + # make it build a few smoke packages. runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: build some + working-directory: src run: ./tool/go build ./ipn/... ./ssh/tailssh ./wgengine/ ./types/... ./control/controlclient env: GOOS: ios GOARCH: arm64 crossmin: # cross-compile for platforms where we only check cmd/tailscale{,d} + needs: gomod-cache strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: @@ -332,6 +439,8 @@ jobs: steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src - name: Restore Cache uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: @@ -341,7 +450,6 @@ jobs: # fetched and extracted by tar path: | ~/.cache/go-build - ~/go/pkg/mod/cache ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). @@ -349,7 +457,14 @@ jobs: restore-keys: | ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: build core + working-directory: src run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled env: GOOS: ${{ matrix.goos }} @@ -357,24 +472,34 @@ jobs: GOARM: ${{ matrix.goarm }} CGO_ENABLED: "0" - name: Tidy cache + working-directory: src shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete - find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete android: # similar to cross above, but android fails to build a few pieces of the # repo. We should fix those pieces, they're small, but as a stepping stone, # only test the subset of android that our past smoke test checked. runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src # Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed # and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch # some Android breakages early. # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: build some + working-directory: src run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version ./ssh/tailssh env: GOOS: android @@ -382,9 +507,12 @@ jobs: wasm: # builds tsconnect, which is the only wasm build we support runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src - name: Restore Cache uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: @@ -394,7 +522,6 @@ jobs: # fetched and extracted by tar path: | ~/.cache/go-build - ~/go/pkg/mod/cache ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). @@ -402,28 +529,45 @@ jobs: restore-keys: | ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-go-2- + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: build tsconnect client + working-directory: src run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli env: GOOS: js GOARCH: wasm - name: build tsconnect server + working-directory: src # Note, no GOOS/GOARCH in env on this build step, we're running a build # tool that handles the build itself. run: | ./tool/go run ./cmd/tsconnect --fast-compression build ./tool/go run ./cmd/tsconnect --fast-compression build-pkg - name: Tidy cache + working-directory: src shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete - find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete tailscale_go: # Subset of tests that depend on our custom Go toolchain. runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set GOMODCACHE env + run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: test tailscale_go run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/... @@ -477,7 +621,7 @@ jobs: uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master with: oss-fuzz-project-name: 'tailscale' - fuzz-seconds: 300 + fuzz-seconds: 150 dry-run: false language: go - name: Set artifacts_path in env (workaround for actions/upload-artifact#176) @@ -493,19 +637,40 @@ jobs: depaware: runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Set GOMODCACHE env + run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: check depaware - run: | - make depaware + working-directory: src + run: make depaware go_generate: runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: check that 'go generate' is clean + working-directory: src run: | pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') ./tool/go generate $pkgs @@ -515,10 +680,20 @@ jobs: go_mod_tidy: runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: check that 'go mod tidy' is clean + working-directory: src run: | ./tool/go mod tidy echo @@ -535,6 +710,7 @@ jobs: staticcheck: runs-on: ubuntu-24.04 + needs: gomod-cache strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: @@ -546,16 +722,22 @@ jobs: steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: install staticcheck - run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: run staticcheck + working-directory: src run: | export GOROOT=$(./tool/go env GOROOT) - export PATH=$GOROOT/bin:$PATH - staticcheck -- $(./tool/go list ./... | grep -v tempfork) - env: - GOOS: ${{ matrix.goos }} - GOARCH: ${{ matrix.goarch }} + ./tool/go run -exec \ + "env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }}" \ + honnef.co/go/tools/cmd/staticcheck -- \ + $(env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} ./tool/go list ./... | grep -v tempfork) notify_slack: if: always() diff --git a/go.mod b/go.mod index 0c3d05d59..0d031d0ba 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.24.0 +go 1.24.4 require ( filippo.io/mkcert v1.4.4 From 866614202c96fa1e5116116acf50834ee787ed6c Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 13 Jun 2025 18:08:22 -0500 Subject: [PATCH 0954/1708] util/eventbus: remove redundant code from eventbus.Publish MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit eventbus.Publish() calls newPublisher(), which in turn invokes (*Client).addPublisher(). That method adds the new publisher to c.pub, so we don’t need to add it again in eventbus.Publish. Updates #cleanup Signed-off-by: Nick Khyl --- util/eventbus/client.go | 13 +++++++------ util/eventbus/publish.go | 6 +----- util/eventbus/subscribe.go | 14 +++++--------- 3 files changed, 13 insertions(+), 20 deletions(-) diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a7a88c0a1..f4261b13c 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -113,15 +113,16 @@ func (c *Client) shouldPublish(t reflect.Type) bool { // Subscribe requests delivery of events of type T through the given // Queue. Panics if the queue already has a subscriber for T. func Subscribe[T any](c *Client) *Subscriber[T] { - return newSubscriber[T](c.subscribeState()) + r := c.subscribeState() + s := newSubscriber[T](r) + r.addSubscriber(s) + return s } // Publisher returns a publisher for event type T using the given // client. func Publish[T any](c *Client) *Publisher[T] { - ret := newPublisher[T](c) - c.mu.Lock() - defer c.mu.Unlock() - c.pub.Add(ret) - return ret + p := newPublisher[T](c) + c.addPublisher(p) + return p } diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 9897114b6..4a4bdfb7e 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -21,11 +21,7 @@ type Publisher[T any] struct { } func newPublisher[T any](c *Client) *Publisher[T] { - ret := &Publisher[T]{ - client: c, - } - c.addPublisher(ret) - return ret + return &Publisher[T]{client: c} } // Close closes the publisher. diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ba17e8548..ee534781a 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -91,7 +91,7 @@ func (q *subscribeState) pump(ctx context.Context) { } } else { // Keep the cases in this select in sync with - // Subscriber.dispatch below. The only different should be + // Subscriber.dispatch below. The only difference should be // that this select doesn't deliver queued values to // anyone, and unconditionally accepts new values. select { @@ -134,9 +134,10 @@ func (s *subscribeState) subscribeTypes() []reflect.Type { return ret } -func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { +func (s *subscribeState) addSubscriber(sub subscriber) { s.outputsMu.Lock() defer s.outputsMu.Unlock() + t := sub.subscribeType() if s.outputs[t] != nil { panic(fmt.Errorf("double subscription for event %s", t)) } @@ -183,15 +184,10 @@ type Subscriber[T any] struct { } func newSubscriber[T any](r *subscribeState) *Subscriber[T] { - t := reflect.TypeFor[T]() - - ret := &Subscriber[T]{ + return &Subscriber[T]{ read: make(chan T), - unregister: func() { r.deleteSubscriber(t) }, + unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, } - r.addSubscriber(t, ret) - - return ret } func newMonitor[T any](attach func(fn func(T)) (cancel func())) *Subscriber[T] { From 3d6e1171c165524987cdb878bf98bc6d2bb33256 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Mon, 16 Jun 2025 07:39:02 -0700 Subject: [PATCH 0955/1708] tsconsensus: protect from data race lock for access to a.peers Fixes #16284 Signed-off-by: Fran Bull --- tsconsensus/authorization.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tsconsensus/authorization.go b/tsconsensus/authorization.go index 1e0b70c07..bd8e2f39a 100644 --- a/tsconsensus/authorization.go +++ b/tsconsensus/authorization.go @@ -87,29 +87,29 @@ func (a *authorization) Refresh(ctx context.Context) error { } func (a *authorization) AllowsHost(addr netip.Addr) bool { + a.mu.Lock() + defer a.mu.Unlock() if a.peers == nil { return false } - a.mu.Lock() - defer a.mu.Unlock() return a.peers.addrs.Contains(addr) } func (a *authorization) SelfAllowed() bool { + a.mu.Lock() + defer a.mu.Unlock() if a.peers == nil { return false } - a.mu.Lock() - defer a.mu.Unlock() return a.peers.status.Self.Tags != nil && views.SliceContains(*a.peers.status.Self.Tags, a.tag) } func (a *authorization) AllowedPeers() views.Slice[*ipnstate.PeerStatus] { + a.mu.Lock() + defer a.mu.Unlock() if a.peers == nil { return views.Slice[*ipnstate.PeerStatus]{} } - a.mu.Lock() - defer a.mu.Unlock() return views.SliceOf(a.peers.statuses) } From 735f15cb49520a198cd2e063bcf9e8e511bcc691 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Mon, 16 Jun 2025 16:09:41 +0100 Subject: [PATCH 0956/1708] util/must: add Get2 for functions that return two values Updates #cleanup Signed-off-by: James Sanderson --- util/must/must.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/util/must/must.go b/util/must/must.go index 21965daa9..a292da226 100644 --- a/util/must/must.go +++ b/util/must/must.go @@ -23,3 +23,11 @@ func Get[T any](v T, err error) T { } return v } + +// Get2 returns v1 and v2 as is. It panics if err is non-nil. +func Get2[T any, U any](v1 T, v2 U, err error) (T, U) { + if err != nil { + panic(err) + } + return v1, v2 +} From 86985228bcef855a8071f6989bbceeb5b21810c2 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 16 Jun 2025 10:27:00 -0700 Subject: [PATCH 0957/1708] cmd/natc: add a flag to use specific DNS servers If natc is running on a host with tailscale using `--accept-dns=true` then a DNS loop can occur. Provide a flag for some specific DNS upstreams for natc to use instead, to overcome such situations. Updates #14667 Signed-off-by: James Tucker --- cmd/natc/natc.go | 31 ++++++- cmd/natc/natc_test.go | 196 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 225 insertions(+), 2 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 247bb2101..fdbce3da1 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -54,6 +54,7 @@ func main() { hostname = fs.String("hostname", "", "Hostname to register the service under") siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") + dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") @@ -78,7 +79,7 @@ func main() { } var ignoreDstTable *bart.Table[bool] - for _, s := range strings.Split(*ignoreDstPfxStr, ",") { + for s := range strings.SplitSeq(*ignoreDstPfxStr, ",") { s := strings.TrimSpace(s) if s == "" { continue @@ -185,11 +186,37 @@ func main() { ipPool: ipp, routes: routes, dnsAddr: dnsAddr, - resolver: net.DefaultResolver, + resolver: getResolver(*dnsServers), } c.run(ctx, lc) } +// getResolver parses serverFlag and returns either the default resolver, or a +// resolver that uses the provided comma-separated DNS server AddrPort's, or +// panics. +func getResolver(serverFlag string) lookupNetIPer { + if serverFlag == "" { + return net.DefaultResolver + } + var addrs []string + for s := range strings.SplitSeq(serverFlag, ",") { + s = strings.TrimSpace(s) + addr, err := netip.ParseAddrPort(s) + if err != nil { + log.Fatalf("dns server provided: %q does not parse: %v", s, err) + } + addrs = append(addrs, addr.String()) + } + return &net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network string, address string) (net.Conn, error) { + var dialer net.Dialer + // TODO(raggi): perhaps something other than random? + return dialer.DialContext(ctx, network, addrs[rand.N(len(addrs))]) + }, + } +} + func calculateAddresses(prefixes []netip.Prefix) (*netipx.IPSet, netip.Addr, *netipx.IPSet) { var ipsb netipx.IPSetBuilder for _, p := range prefixes { diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index 78dec86fd..c0a66deb8 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -9,6 +9,7 @@ import ( "io" "net" "net/netip" + "sync" "testing" "time" @@ -480,3 +481,198 @@ func TestV6V4(t *testing.T) { } } } + +// echoServer is a simple server that just echos back data set to it. +type echoServer struct { + listener net.Listener + addr string + wg sync.WaitGroup + done chan struct{} +} + +// newEchoServer creates a new test DNS server on the specified network and address +func newEchoServer(t *testing.T, network, addr string) *echoServer { + listener, err := net.Listen(network, addr) + if err != nil { + t.Fatalf("Failed to create test DNS server: %v", err) + } + + server := &echoServer{ + listener: listener, + addr: listener.Addr().String(), + done: make(chan struct{}), + } + + server.wg.Add(1) + go server.serve() + + return server +} + +func (s *echoServer) serve() { + defer s.wg.Done() + + for { + select { + case <-s.done: + return + default: + conn, err := s.listener.Accept() + if err != nil { + select { + case <-s.done: + return + default: + continue + } + } + go s.handleConnection(conn) + } + } +} + +func (s *echoServer) handleConnection(conn net.Conn) { + defer conn.Close() + // Simple response - just echo back some data to confirm connectivity + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + return + } + conn.Write(buf[:n]) +} + +func (s *echoServer) close() { + close(s.done) + s.listener.Close() + s.wg.Wait() +} + +func TestGetResolver(t *testing.T) { + tests := []struct { + name string + network string + addr string + }{ + { + name: "ipv4_loopback", + network: "tcp4", + addr: "127.0.0.1:0", + }, + { + name: "ipv6_loopback", + network: "tcp6", + addr: "[::1]:0", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := newEchoServer(t, tc.network, tc.addr) + defer server.close() + serverAddr := server.addr + resolver := getResolver(serverAddr) + if resolver == nil { + t.Fatal("getResolver returned nil") + } + + netResolver, ok := resolver.(*net.Resolver) + if !ok { + t.Fatal("getResolver did not return a *net.Resolver") + } + if netResolver.Dial == nil { + t.Fatal("resolver.Dial is nil") + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + conn, err := netResolver.Dial(ctx, "tcp", "dummy.address:53") + if err != nil { + t.Fatalf("Failed to dial test DNS server: %v", err) + } + defer conn.Close() + + testData := []byte("test") + _, err = conn.Write(testData) + if err != nil { + t.Fatalf("Failed to write to connection: %v", err) + } + + response := make([]byte, len(testData)) + _, err = conn.Read(response) + if err != nil { + t.Fatalf("Failed to read from connection: %v", err) + } + + if string(response) != string(testData) { + t.Fatalf("Expected echo response %q, got %q", testData, response) + } + }) + } +} + +func TestGetResolverMultipleServers(t *testing.T) { + server1 := newEchoServer(t, "tcp4", "127.0.0.1:0") + defer server1.close() + server2 := newEchoServer(t, "tcp4", "127.0.0.1:0") + defer server2.close() + serverFlag := server1.addr + ", " + server2.addr + + resolver := getResolver(serverFlag) + netResolver, ok := resolver.(*net.Resolver) + if !ok { + t.Fatal("getResolver did not return a *net.Resolver") + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + servers := map[string]bool{ + server1.addr: false, + server2.addr: false, + } + + // Try up to 1000 times to hit all servers, this should be very quick, and + // if this fails randomness has regressed beyond reason. + for range 1000 { + conn, err := netResolver.Dial(ctx, "tcp", "dummy.address:53") + if err != nil { + t.Fatalf("Failed to dial test DNS server: %v", err) + } + + remoteAddr := conn.RemoteAddr().String() + + conn.Close() + + servers[remoteAddr] = true + + var allDone = true + for _, done := range servers { + if !done { + allDone = false + break + } + } + if allDone { + break + } + } + + var allDone = true + for _, done := range servers { + if !done { + allDone = false + break + } + } + if !allDone { + t.Errorf("after 1000 queries, not all servers were hit, significant lack of randomness: %#v", servers) + } +} + +func TestGetResolverEmpty(t *testing.T) { + resolver := getResolver("") + if resolver != net.DefaultResolver { + t.Fatal(`getResolver("") should return net.DefaultResolver`) + } +} From 259bab9bff0d377eae360f10943819fab8f3813b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Jun 2025 12:02:20 -0700 Subject: [PATCH 0958/1708] scripts/check_license_headers.sh: delete, rewrite as a Go test Updates tailscale/corp#29650 Change-Id: Iad4e4ccd9d68ebb1d1a12f335cc5295d0bd05b60 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 14 ++- chirp/chirp_test.go | 1 + cmd/cloner/cloner_test.go | 1 + cmd/gitops-pusher/gitops-pusher_test.go | 1 + cmd/proxy-to-grafana/proxy-to-grafana_test.go | 1 + cmd/tsidp/tsidp_test.go | 1 + ipn/serve_test.go | 1 + license_test.go | 117 ++++++++++++++++++ net/tstun/mtu_test.go | 1 + scripts/check_license_headers.sh | 77 ------------ tsweb/promvarz/promvarz_test.go | 1 + 11 files changed, 138 insertions(+), 78 deletions(-) create mode 100644 license_test.go delete mode 100755 scripts/check_license_headers.sh diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 11a851dc4..2d1795668 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -702,11 +702,23 @@ jobs: licenses: runs-on: ubuntu-24.04 + needs: gomod-cache steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: check licenses - run: ./scripts/check_license_headers.sh . + working-directory: src + run: | + grep -q TestLicenseHeaders *.go || (echo "Expected a test named TestLicenseHeaders"; exit 1) + ./tool/go test -v -run=TestLicenseHeaders staticcheck: runs-on: ubuntu-24.04 diff --git a/chirp/chirp_test.go b/chirp/chirp_test.go index 2549c163f..a57ef224b 100644 --- a/chirp/chirp_test.go +++ b/chirp/chirp_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package chirp import ( diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index d8d5df3cb..cf1063714 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package main import ( diff --git a/cmd/gitops-pusher/gitops-pusher_test.go b/cmd/gitops-pusher/gitops-pusher_test.go index b050761d9..e08b06c9c 100644 --- a/cmd/gitops-pusher/gitops-pusher_test.go +++ b/cmd/gitops-pusher/gitops-pusher_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package main import ( diff --git a/cmd/proxy-to-grafana/proxy-to-grafana_test.go b/cmd/proxy-to-grafana/proxy-to-grafana_test.go index 083c4bc49..4831d5436 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana_test.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package main import ( diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index 76a118991..6932d8e29 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package main import ( diff --git a/ipn/serve_test.go b/ipn/serve_test.go index ae1d56eef..ba0a26f8c 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package ipn import ( diff --git a/license_test.go b/license_test.go new file mode 100644 index 000000000..ec452a6e3 --- /dev/null +++ b/license_test.go @@ -0,0 +1,117 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscaleroot + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "tailscale.com/util/set" +) + +func normalizeLineEndings(b []byte) []byte { + return bytes.ReplaceAll(b, []byte("\r\n"), []byte("\n")) +} + +// TestLicenseHeaders checks that all Go files in the tree +// directory tree have a correct-looking Tailscale license header. +func TestLicenseHeaders(t *testing.T) { + want := normalizeLineEndings([]byte(strings.TrimLeft(` +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +`, "\n"))) + + exceptions := set.Of( + // Subprocess test harness code + "util/winutil/testdata/testrestartableprocesses/main.go", + "util/winutil/subprocess_windows_test.go", + + // WireGuard copyright + "cmd/tailscale/cli/authenticode_windows.go", + "wgengine/router/ifconfig_windows.go", + + // noiseexplorer.com copyright + "control/controlbase/noiseexplorer_test.go", + + // Generated eBPF management code + "derp/xdp/bpf_bpfeb.go", + "derp/xdp/bpf_bpfel.go", + + // Generated kube deepcopy funcs file starts with a Go build tag + an empty line + "k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go", + ) + + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("path %s: %v", path, err) + } + if exceptions.Contains(filepath.ToSlash(path)) { + return nil + } + base := filepath.Base(path) + switch base { + case ".git", "node_modules", "tempfork": + return filepath.SkipDir + } + switch base { + case "zsyscall_windows.go": + // Generated code. + return nil + } + + if strings.HasSuffix(base, ".config.ts") { + return nil + } + if strings.HasSuffix(base, "_string.go") { + // Generated file from go:generate stringer + return nil + } + + ext := filepath.Ext(base) + switch ext { + default: + return nil + case ".go", ".ts", ".tsx": + } + + buf := make([]byte, 512) + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + if n, err := io.ReadAtLeast(f, buf, 512); err != nil && err != io.ErrUnexpectedEOF { + return err + } else { + buf = buf[:n] + } + + buf = normalizeLineEndings(buf) + + bufNoTrunc := buf + if i := bytes.Index(buf, []byte("\npackage ")); i != -1 { + buf = buf[:i] + } + + if bytes.Contains(buf, want) { + return nil + } + + if bytes.Contains(bufNoTrunc, []byte("BSD-3-Clause\npackage ")) { + t.Errorf("file %s has license header as a package doc; add a blank line before the package line", path) + return nil + } + + t.Errorf("file %s is missing Tailscale copyright header:\n\n%s", path, want) + return nil + }) + if err != nil { + t.Fatalf("Walk: %v", err) + } +} diff --git a/net/tstun/mtu_test.go b/net/tstun/mtu_test.go index 8d165bfd3..ec31e45ce 100644 --- a/net/tstun/mtu_test.go +++ b/net/tstun/mtu_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package tstun import ( diff --git a/scripts/check_license_headers.sh b/scripts/check_license_headers.sh deleted file mode 100755 index 8345afab7..000000000 --- a/scripts/check_license_headers.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/sh -# -# Copyright (c) Tailscale Inc & AUTHORS -# SPDX-License-Identifier: BSD-3-Clause -# -# check_license_headers.sh checks that all Go files in the given -# directory tree have a correct-looking Tailscale license header. - -check_file() { - got=$1 - - want=$(cat <&2 - exit 1 -fi - -fail=0 -for file in $(find $1 \( -name '*.go' -or -name '*.tsx' -or -name '*.ts' -not -name '*.config.ts' \) -not -path '*/.git/*' -not -path '*/node_modules/*'); do - case $file in - $1/tempfork/*) - # Skip, tempfork of third-party code - ;; - $1/wgengine/router/ifconfig_windows.go) - # WireGuard copyright. - ;; - $1/cmd/tailscale/cli/authenticode_windows.go) - # WireGuard copyright. - ;; - *_string.go) - # Generated file from go:generate stringer - ;; - $1/control/controlbase/noiseexplorer_test.go) - # Noiseexplorer.com copyright. - ;; - */zsyscall_windows.go) - # Generated syscall wrappers - ;; - $1/util/winutil/subprocess_windows_test.go) - # Subprocess test harness code - ;; - $1/util/winutil/testdata/testrestartableprocesses/main.go) - # Subprocess test harness code - ;; - *$1/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go) - # Generated kube deepcopy funcs file starts with a Go build tag + an empty line - header="$(head -5 $file | tail -n+3 )" - ;; - $1/derp/xdp/bpf_bpfe*.go) - # Generated eBPF management code - ;; - *) - header="$(head -2 $file)" - ;; - esac - if [ ! -z "$header" ]; then - if ! check_file "$header"; then - fail=1 - echo "${file#$1/} doesn't have the right copyright header:" - echo "$header" | sed -e 's/^/ /g' - fi - fi -done - -if [ $fail -ne 0 ]; then - exit 1 -fi diff --git a/tsweb/promvarz/promvarz_test.go b/tsweb/promvarz/promvarz_test.go index 9f91b5d12..cffbbec22 100644 --- a/tsweb/promvarz/promvarz_test.go +++ b/tsweb/promvarz/promvarz_test.go @@ -1,5 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause + package promvarz import ( From 5b086cd2addc694c4b59c1a827e13a31e2f04d26 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 15 Jun 2025 08:25:36 -0700 Subject: [PATCH 0959/1708] tool/gocross: make gocross opt-in instead of opt-out gocross is not needed like it used to be, now that Go does version stamping itself. We keep it for the xcode and Windows builds for now. This simplifies things in the build, especially with upcoming build system updates. Updates tailscale/corp#28679 Updates tailscale/corp#26717 Change-Id: Ib4bebe6f50f3b9c3d6cd27323fca603e3dfb43cc Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 1 - tool/gocross/gocross-wrapper.sh | 36 +++++++++++++++++++++++++--- tool/gocross/gocross_wrapper_test.go | 2 +- version/print.go | 1 + 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2d1795668..313ce609f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -161,7 +161,6 @@ jobs: if: matrix.buildflags == '' # skip on race builder working-directory: src run: | - export TS_USE_TOOLCHAIN=1 ./build_dist.sh --extra-small ./cmd/tailscaled ./build_dist.sh --box ./cmd/tailscaled ./build_dist.sh --extra-small --box ./cmd/tailscaled diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 366011fef..90f308eb5 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -3,8 +3,11 @@ # SPDX-License-Identifier: BSD-3-Clause # # gocross-wrapper.sh is a wrapper that can be aliased to 'go', which -# transparently builds gocross using a "bootstrap" Go toolchain, and -# then invokes gocross. +# transparently runs the version of github.com/tailscale/go as specified repo's +# go.toolchain.rev file. +# +# It also conditionally (if TS_USE_GOCROSS=1) builds gocross and uses it as a go +# wrapper to inject certain go flags. set -euo pipefail @@ -76,6 +79,14 @@ case "$REV" in ;; esac +# gocross is opt-in as of 2025-06-16. See tailscale/corp#26717. +# It's primarily used for xcode builds, and a bit still for Windows. +# In the past we needed it for git version stamping on Linux etc, but +# Go does that itself nowadays. +if [ "${TS_USE_GOCROSS:-}" != "1" ]; then + exit 0 # out of subshell +fi + if [[ -d "$toolchain" ]]; then # A toolchain exists, but is it recent enough to compile gocross? If not, # wipe it out so that the next if block fetches a usable one. @@ -119,4 +130,23 @@ if [[ "$gocross_ok" == "0" ]]; then fi ) # End of the subshell execution. -exec "${BASH_SOURCE%/*}/../../gocross" "$@" +repo_root="${BASH_SOURCE%/*}/../.." + +# gocross is opt-in as of 2025-06-16. See tailscale/corp#26717 +# and comment above in this file. +if [ "${TS_USE_GOCROSS:-}" != "1" ]; then + read -r REV <"${repo_root}/go.toolchain.rev" + case "$REV" in + /*) + toolchain="$REV" + ;; + *) + # If the prior subshell completed successfully, this toolchain location + # should be valid at this point. + toolchain="$HOME/.cache/tsgo/$REV" + ;; + esac + exec "$toolchain/bin/go" "$@" +fi + +exec "${repo_root}/gocross" "$@" diff --git a/tool/gocross/gocross_wrapper_test.go b/tool/gocross/gocross_wrapper_test.go index 2b0f016a2..f4dcec429 100644 --- a/tool/gocross/gocross_wrapper_test.go +++ b/tool/gocross/gocross_wrapper_test.go @@ -15,7 +15,7 @@ import ( func TestGocrossWrapper(t *testing.T) { for i := range 2 { // once to build gocross; second to test it's cached cmd := exec.Command("./gocross-wrapper.sh", "version") - cmd.Env = append(os.Environ(), "CI=true", "NOBASHDEBUG=false") // for "set -x" verbosity + cmd.Env = append(os.Environ(), "CI=true", "NOBASHDEBUG=false", "TS_USE_GOCROSS=1") // for "set -x" verbosity out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("gocross-wrapper.sh failed: %v\n%s", err, out) diff --git a/version/print.go b/version/print.go index be90432cc..43ee2b559 100644 --- a/version/print.go +++ b/version/print.go @@ -20,6 +20,7 @@ var stringLazy = sync.OnceValue(func() string { if gitCommit() != "" { fmt.Fprintf(&ret, " tailscale commit: %s%s\n", gitCommit(), dirtyString()) } + fmt.Fprintf(&ret, " long version: %s\n", Long()) if extraGitCommitStamp != "" { fmt.Fprintf(&ret, " other commit: %s\n", extraGitCommitStamp) } From 077d52b22f4ff35eb6e1a7427164df45f8efc1c0 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 16 Jun 2025 16:01:07 +0100 Subject: [PATCH 0960/1708] .github/workflows: removes extra '$' Signed-off-by: Irbe Krumina --- .github/workflows/checklocks.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/kubemanifests.yaml | 2 +- .github/workflows/natlab-integrationtest.yml | 2 +- .github/workflows/ssh-integrationtest.yml | 2 +- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- .github/workflows/webclient.yml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/checklocks.yml b/.github/workflows/checklocks.yml index 7464524ce..5957e6925 100644 --- a/.github/workflows/checklocks.yml +++ b/.github/workflows/checklocks.yml @@ -10,7 +10,7 @@ on: - '.github/workflows/checklocks.yml' concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 32d2e7c2f..2b471e943 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -23,7 +23,7 @@ on: - cron: '31 14 * * 5' concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 60eb6852a..ee62f04be 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -15,7 +15,7 @@ permissions: pull-requests: read concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/kubemanifests.yaml b/.github/workflows/kubemanifests.yaml index 5b100a276..4cffea02f 100644 --- a/.github/workflows/kubemanifests.yaml +++ b/.github/workflows/kubemanifests.yaml @@ -9,7 +9,7 @@ on: # Cancel workflow run if there is a newer push to the same PR for which it is # running concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index 1de74cdaa..99d58717b 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -3,7 +3,7 @@ name: "natlab-integrationtest" concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true on: diff --git a/.github/workflows/ssh-integrationtest.yml b/.github/workflows/ssh-integrationtest.yml index 829d10ab8..463f4bdd4 100644 --- a/.github/workflows/ssh-integrationtest.yml +++ b/.github/workflows/ssh-integrationtest.yml @@ -3,7 +3,7 @@ name: "ssh-integrationtest" concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true on: diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index f695c578e..af7bdff1e 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -12,7 +12,7 @@ on: workflow_dispatch: concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 412836db7..f1c2b0c3b 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index b1cfb7620..e64137f2b 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -15,7 +15,7 @@ on: # - main concurrency: - group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: From d7770d2b81d5b07466d0098c637d81204779eb0b Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 16 Jun 2025 16:01:46 +0100 Subject: [PATCH 0961/1708] .github/workflows: test that ./go/tool version matches go mod version Tests that go mod version matches ./tool/go version. Mismatched versions result in incosistent Go versions being used i.e. in CI jobs as the version in go.mod is used to determine what Go version Github actions pull in. Updates #16283 Signed-off-by: Irbe Krumina --- version_test.go | 72 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 62 insertions(+), 10 deletions(-) diff --git a/version_test.go b/version_test.go index 1f434e682..3d983a19d 100644 --- a/version_test.go +++ b/version_test.go @@ -6,21 +6,16 @@ package tailscaleroot import ( "fmt" "os" - "regexp" + "os/exec" + "runtime" "strings" "testing" + + "golang.org/x/mod/modfile" ) func TestDockerfileVersion(t *testing.T) { - goMod, err := os.ReadFile("go.mod") - if err != nil { - t.Fatal(err) - } - m := regexp.MustCompile(`(?m)^go (\d\.\d+)\r?($|\.)`).FindStringSubmatch(string(goMod)) - if m == nil { - t.Fatalf("didn't find go version in go.mod") - } - goVersion := m[1] + goVersion := mustGetGoModVersion(t, false) dockerFile, err := os.ReadFile("Dockerfile") if err != nil { @@ -31,3 +26,60 @@ func TestDockerfileVersion(t *testing.T) { t.Errorf("didn't find %q in Dockerfile", wantSub) } } + +// TestGoVersion tests that the Go version specified in go.mod matches ./tool/go version. +func TestGoVersion(t *testing.T) { + // We could special-case ./tool/go path for Windows, but really there is no + // need to run it there. + if runtime.GOOS == "windows" { + t.Skip("Skipping test on Windows") + } + goModVersion := mustGetGoModVersion(t, true) + + goToolCmd := exec.Command("./tool/go", "version") + goToolOutput, err := goToolCmd.Output() + if err != nil { + t.Fatalf("Failed to get ./tool/go version: %v", err) + } + + // Version info will approximately look like 'go version go1.24.4 linux/amd64'. + parts := strings.Fields(string(goToolOutput)) + if len(parts) < 4 { + t.Fatalf("Unexpected ./tool/go version output format: %s", goToolOutput) + } + + goToolVersion := strings.TrimPrefix(parts[2], "go") + + if goModVersion != goToolVersion { + t.Errorf("Go version in go.mod (%q) does not match the version of ./tool/go (%q).\nEnsure that the go.mod refers to the same Go version as ./go.toolchain.rev.", + goModVersion, goToolVersion) + } +} + +func mustGetGoModVersion(t *testing.T, includePatchVersion bool) string { + t.Helper() + + goModBytes, err := os.ReadFile("go.mod") + if err != nil { + t.Fatal(err) + } + + modFile, err := modfile.Parse("go.mod", goModBytes, nil) + if err != nil { + t.Fatal(err) + } + + if modFile.Go == nil { + t.Fatal("no Go version found in go.mod") + } + + version := modFile.Go.Version + + parts := strings.Split(version, ".") + if !includePatchVersion { + if len(parts) >= 2 { + version = parts[0] + "." + parts[1] + } + } + return version +} From 42f71e959dff4dc55b138c358764c8fbfe8cdb7f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Jun 2025 18:18:36 -0700 Subject: [PATCH 0962/1708] prober: speed up TestCRL ~450x by baking in some test keys Fixes #16290 Updates tailscale/corp#28679 Change-Id: Ic90129b686779d0ed1cb40acf187cfcbdd39eb83 Signed-off-by: Brad Fitzpatrick --- prober/tls_test.go | 65 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/prober/tls_test.go b/prober/tls_test.go index 9ba17f79d..f6ca4aeb1 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -6,6 +6,7 @@ package prober import ( "bytes" "context" + "crypto/ecdsa" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -140,16 +141,60 @@ func (s *CRLServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Write(s.crlBytes) } -func TestCRL(t *testing.T) { - // Generate CA key and self-signed CA cert - caKey, err := rsa.GenerateKey(rand.Reader, 4096) +// someECDSAKey{1,2,3} are different EC private keys in PEM format +// as generated by: +// +// openssl ecparam -name prime256v1 -genkey -noout -out - +// +// They're used in tests to avoid burning CPU at test time to just +// to make some arbitrary test keys. +const ( + someECDSAKey1 = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIDKggO47Si0/JgqF0q9m0HfQ92lbERWsBaKS5YihtuheoAoGCCqGSM49 +AwEHoUQDQgAE/JtNZkfFmAGQJHW5Xgz0Eoyi9MKVxl77sXjIFDMX233QDIWPEM/B +vmNMvdFkuYBjwbq6H+SNf1NXRNladEGU/Q== +-----END EC PRIVATE KEY----- +` + someECDSAKey2 = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIPIJhRf4MpzLil1ZKcRqMx+jPeJXw96KtYYzV2AcgBzgoAoGCCqGSM49 +AwEHoUQDQgAEhA9CSWFmUvdvXMzyt+as+6f+0luydHU1x/gEksVByYIgYxahaGts +xbSKj6F2WgAN/ok1gFLqhH3UWMNVthM1wA== +-----END EC PRIVATE KEY----- +` + someECDSAKey3 = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIKgZ1OJjK2St9O0i52N1K+IgSiu2/NSMk9Yt2+kDMHd7oAoGCCqGSM49 +AwEHoUQDQgAExFp80etkjy/AEUtSgJjXRA39jTU7eiEmCGRREewFQhwcEscBEfrg +6NN31r9YlEs+hZ8gXE1L3Deu6jn5jW3pig== +-----END EC PRIVATE KEY----- +` +) + +// parseECKey parses an EC private key from a PEM-encoded string. +func parseECKey(t *testing.T, pemPriv string) *ecdsa.PrivateKey { + t.Helper() + block, _ := pem.Decode([]byte(pemPriv)) + if block == nil { + t.Fatal("failed to decode PEM") + } + key, err := x509.ParseECPrivateKey(block.Bytes) if err != nil { - t.Fatal(err) + t.Fatalf("failed to parse EC key: %v", err) } + return key +} + +func TestCRL(t *testing.T) { + // Generate CA key and self-signed CA cert + caKey := parseECKey(t, someECDSAKey1) + caTpl := issuerCertTpl caTpl.BasicConstraintsValid = true caTpl.IsCA = true caTpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature + caTpl.SignatureAlgorithm = x509.ECDSAWithSHA256 caBytes, err := x509.CreateCertificate(rand.Reader, &caTpl, &caTpl, &caKey.PublicKey, caKey) if err != nil { t.Fatal(err) @@ -162,11 +207,9 @@ func TestCRL(t *testing.T) { // Issue a leaf cert signed by the CA leaf := leafCert leaf.SerialNumber = big.NewInt(20001) + leaf.SignatureAlgorithm = x509.ECDSAWithSHA256 leaf.Issuer = caCert.Subject - leafKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - t.Fatal(err) - } + leafKey := parseECKey(t, someECDSAKey2) leafBytes, err := x509.CreateCertificate(rand.Reader, &leaf, caCert, &leafKey.PublicKey, caKey) if err != nil { t.Fatal(err) @@ -182,10 +225,8 @@ func TestCRL(t *testing.T) { noCRLCert.CRLDistributionPoints = []string{} noCRLCert.NotBefore = time.Unix(letsEncryptStartedStaplingCRL, 0).Add(-48 * time.Hour) noCRLCert.Issuer = caCert.Subject - noCRLCertKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - t.Fatal(err) - } + noCRLCert.SignatureAlgorithm = x509.ECDSAWithSHA256 + noCRLCertKey := parseECKey(t, someECDSAKey3) noCRLStapledBytes, err := x509.CreateCertificate(rand.Reader, &noCRLCert, caCert, &noCRLCertKey.PublicKey, caKey) if err != nil { t.Fatal(err) From d37e8d0bfaaf3a40ef2432f6bed7bab2004e36eb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Jun 2025 21:10:59 -0700 Subject: [PATCH 0963/1708] .github/workflows: remove redundant work between staticcheck jobs Make the OS-specific staticcheck jobs only test stuff that's specialized for that OS. Do that using a new ./tool/listpkgs program that's a fancy 'go list' with more filtering flags. Updates tailscale/corp#28679 Change-Id: I790be2e3a0b42b105bd39f68c4b20e217a26de60 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 87 ++++++++++++++-- Makefile | 2 +- tool/listpkgs/listpkgs.go | 206 +++++++++++++++++++++++++++++++++++++ 3 files changed, 283 insertions(+), 12 deletions(-) create mode 100644 tool/listpkgs/listpkgs.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 313ce609f..6d8ab863c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -232,10 +232,6 @@ jobs: - name: Restore Cache uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar path: | ~/.cache/go-build ~\AppData\Local\go-build @@ -722,14 +718,40 @@ jobs: staticcheck: runs-on: ubuntu-24.04 needs: gomod-cache + name: staticcheck (${{ matrix.name }}) strategy: fail-fast: false # don't abort the entire matrix if one element fails matrix: - goos: ["linux", "windows", "darwin"] - goarch: ["amd64"] include: - - goos: "windows" - goarch: "386" + - name: "macOS" + goos: "darwin" + goarch: "arm64" + flags: "--with-tags-all=darwin" + - name: "Windows" + goos: "windows" + goarch: "amd64" + flags: "--with-tags-all=windows" + - name: "Linux" + goos: "linux" + goarch: "amd64" + flags: "--with-tags-all=linux" + - name: "Portable (1/4)" + goos: "linux" + goarch: "amd64" + flags: "--without-tags-any=windows,darwin,linux --shard=1/4" + - name: "Portable (2/4)" + goos: "linux" + goarch: "amd64" + flags: "--without-tags-any=windows,darwin,linux --shard=2/4" + - name: "Portable (3/4)" + goos: "linux" + goarch: "amd64" + flags: "--without-tags-any=windows,darwin,linux --shard=3/4" + - name: "Portable (4/4)" + goos: "linux" + goarch: "amd64" + flags: "--without-tags-any=windows,darwin,linux --shard=4/4" + steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -741,14 +763,14 @@ jobs: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - - name: run staticcheck + - name: run staticcheck (${{ matrix.name }}) working-directory: src run: | export GOROOT=$(./tool/go env GOROOT) ./tool/go run -exec \ "env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }}" \ honnef.co/go/tools/cmd/staticcheck -- \ - $(env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} ./tool/go list ./... | grep -v tempfork) + $(./tool/go run ./tool/listpkgs --ignore-3p --goos=${{ matrix.goos }} --goarch=${{ matrix.goarch }} ${{ matrix.flags }} ./...) notify_slack: if: always() @@ -795,7 +817,7 @@ jobs: }] } - check_mergeability: + merge_blocker: if: always() runs-on: ubuntu-24.04 needs: @@ -819,3 +841,46 @@ jobs: uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2 with: jobs: ${{ toJSON(needs) }} + + # This waits on all the jobs which must never fail. Branch protection rules + # enforce these. No flaky tests are allowed in these jobs. (We don't want flaky + # tests anywhere, really, but a flaky test here prevents merging.) + check_mergeability_strict: + if: always() + runs-on: ubuntu-24.04 + needs: + - android + - cross + - crossmin + - ios + - tailscale_go + - depaware + - go_generate + - go_mod_tidy + - licenses + - staticcheck + steps: + - name: Decide if change is okay to merge + if: github.event_name != 'push' + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2 + with: + jobs: ${{ toJSON(needs) }} + + check_mergeability: + if: always() + runs-on: ubuntu-24.04 + needs: + - check_mergeability_strict + - test + - windows + - vm + - wasm + - fuzz + - race-root-integration + - privileged + steps: + - name: Decide if change is okay to merge + if: github.event_name != 'push' + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2 + with: + jobs: ${{ toJSON(needs) }} diff --git a/Makefile b/Makefile index 1978af90d..41c67c711 100644 --- a/Makefile +++ b/Makefile @@ -64,7 +64,7 @@ buildmultiarchimage: ## Build (and optionally push) multiarch docker image check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm ## Perform basic checks and compilation tests staticcheck: ## Run staticcheck.io checks - ./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go list ./... | grep -v tempfork) + ./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go run ./tool/listpkgs --ignore-3p ./...) kube-generate-all: kube-generate-deepcopy ## Refresh generated files for Tailscale Kubernetes Operator ./tool/go generate ./cmd/k8s-operator diff --git a/tool/listpkgs/listpkgs.go b/tool/listpkgs/listpkgs.go new file mode 100644 index 000000000..400bf90c1 --- /dev/null +++ b/tool/listpkgs/listpkgs.go @@ -0,0 +1,206 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// listpkgs prints the import paths that match the Go package patterns +// given on the command line and conditionally filters them in various ways. +package main + +import ( + "bufio" + "flag" + "fmt" + "go/build/constraint" + "log" + "os" + "slices" + "strings" + "sync" + + "golang.org/x/tools/go/packages" +) + +var ( + ignore3p = flag.Bool("ignore-3p", false, "ignore third-party packages forked/vendored into Tailscale") + goos = flag.String("goos", "", "GOOS to use for loading packages (default: current OS)") + goarch = flag.String("goarch", "", "GOARCH to use for loading packages (default: current architecture)") + withTagsAllStr = flag.String("with-tags-all", "", "if non-empty, a comma-separated list of builds tags to require (a package will only be listed if it contains all of these build tags)") + withoutTagsAnyStr = flag.String("without-tags-any", "", "if non-empty, a comma-separated list of build constraints to exclude (a package will be omitted if it contains any of these build tags)") + shard = flag.String("shard", "", "if non-empty, a string of the form 'N/M' to only print packages in shard N of M (e.g. '1/3', '2/3', '3/3/' for different thirds of the list)") +) + +func main() { + flag.Parse() + + patterns := flag.Args() + if len(patterns) == 0 { + flag.Usage() + os.Exit(1) + } + + cfg := &packages.Config{ + Mode: packages.LoadFiles, + Env: os.Environ(), + } + if *goos != "" { + cfg.Env = append(cfg.Env, "GOOS="+*goos) + } + if *goarch != "" { + cfg.Env = append(cfg.Env, "GOARCH="+*goarch) + } + + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + log.Fatalf("loading packages: %v", err) + } + + var withoutAny []string + if *withoutTagsAnyStr != "" { + withoutAny = strings.Split(*withoutTagsAnyStr, ",") + } + var withAll []string + if *withTagsAllStr != "" { + withAll = strings.Split(*withTagsAllStr, ",") + } + + seen := map[string]bool{} + matches := 0 +Pkg: + for _, pkg := range pkgs { + if pkg.PkgPath == "" { // malformed (shouldn’t happen) + continue + } + if seen[pkg.PkgPath] { + continue // suppress duplicates when patterns overlap + } + seen[pkg.PkgPath] = true + + pkgPath := pkg.PkgPath + + if *ignore3p && isThirdParty(pkgPath) { + continue + } + if withAll != nil { + for _, t := range withAll { + if !hasBuildTag(pkg, t) { + continue Pkg + } + } + } + for _, t := range withoutAny { + if hasBuildTag(pkg, t) { + continue Pkg + } + } + matches++ + + if *shard != "" { + var n, m int + if _, err := fmt.Sscanf(*shard, "%d/%d", &n, &m); err != nil || n < 1 || m < 1 { + log.Fatalf("invalid shard format %q; expected 'N/M'", *shard) + } + if m > 0 && (matches-1)%m != n-1 { + continue // not in this shard + } + } + fmt.Println(pkgPath) + } + + // If any package had errors (e.g. missing deps) report them via packages.PrintErrors. + // This mirrors `go list` behaviour when -e is *not* supplied. + if packages.PrintErrors(pkgs) > 0 { + os.Exit(1) + } +} + +func isThirdParty(pkg string) bool { + return strings.HasPrefix(pkg, "tailscale.com/tempfork/") +} + +// hasBuildTag reports whether any source file in pkg mentions `tag` +// in a //go:build constraint. +func hasBuildTag(pkg *packages.Package, tag string) bool { + all := slices.Concat(pkg.CompiledGoFiles, pkg.OtherFiles, pkg.IgnoredFiles) + suffix := "_" + tag + ".go" + for _, name := range all { + if strings.HasSuffix(name, suffix) { + return true + } + ok, err := fileMentionsTag(name, tag) + if err != nil { + log.Printf("reading %s: %v", name, err) + continue + } + if ok { + return true + } + } + return false +} + +// tagSet is a set of build tags. +// The values are always true. We avoid non-std set types +// to make this faster to "go run" on empty caches. +type tagSet map[string]bool + +var ( + mu sync.Mutex + fileTags = map[string]tagSet{} // abs path -> set of build tags mentioned in file +) + +func getFileTags(filename string) (tagSet, error) { + mu.Lock() + tags, ok := fileTags[filename] + mu.Unlock() + if ok { + return tags, nil + } + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + ts := make(tagSet) + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if strings.TrimSpace(line) == "" { + continue // still in leading blank lines + } + if !strings.HasPrefix(line, "//") { + // hit real code – done with header comments + // TODO(bradfitz): care about /* */ comments? + break + } + if !strings.HasPrefix(line, "//go:build") { + continue // some other comment + } + expr, err := constraint.Parse(line) + if err != nil { + return nil, fmt.Errorf("parsing %q: %w", line, err) + } + // Call Eval to populate ts with the tags mentioned in the expression. + // We don't care about the result, just the side effect of populating ts. + expr.Eval(func(tag string) bool { + ts[tag] = true + return true // arbitrary + }) + } + if err := s.Err(); err != nil { + return nil, fmt.Errorf("reading %s: %w", filename, err) + } + + mu.Lock() + defer mu.Unlock() + fileTags[filename] = ts + return tags, nil +} + +func fileMentionsTag(filename, tag string) (bool, error) { + tags, err := getFileTags(filename) + if err != nil { + return false, err + } + return tags[tag], nil +} From e7f5c9a01583b6d26977216d93c676ee21cb84eb Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 17 Jun 2025 13:05:05 -0400 Subject: [PATCH 0964/1708] derp/derphttp: add error notify for RunWatchConnectionLoop (#16261) The caller of client.RunWatchConnectionLoop may need to be aware of errors that occur within loop. Add a channel that notifies of errors to the caller to allow for decisions to be make as to the state of the client. Updates tailscale/corp#25756 Signed-off-by: Mike O'Driscoll --- cmd/derper/mesh.go | 3 +- derp/derphttp/derphttp_test.go | 73 ++++++++++++++++++++++++++++++++-- derp/derphttp/mesh_client.go | 15 ++++++- 3 files changed, 84 insertions(+), 7 deletions(-) diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index 1d8e3ef93..cbb2fa59a 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -72,6 +72,7 @@ func startMeshWithHost(s *derp.Server, hostTuple string) error { add := func(m derp.PeerPresentMessage) { s.AddPacketForwarder(m.Key, c) } remove := func(m derp.PeerGoneMessage) { s.RemovePacketForwarder(m.Peer, c) } - go c.RunWatchConnectionLoop(context.Background(), s.PublicKey(), logf, add, remove) + notifyError := func(err error) {} + go c.RunWatchConnectionLoop(context.Background(), s.PublicKey(), logf, add, remove, notifyError) return nil } diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 8d02db922..252549660 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -11,12 +11,14 @@ import ( "net" "net/http" "net/http/httptest" + "strings" "sync" "testing" "time" "tailscale.com/derp" "tailscale.com/net/netmon" + "tailscale.com/net/netx" "tailscale.com/types/key" ) @@ -298,6 +300,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer cancel() watcherChan := make(chan int, 1) + errChan := make(chan error, 1) // Start the watcher thread (which connects to the watched server) wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 @@ -311,8 +314,11 @@ func TestBreakWatcherConnRecv(t *testing.T) { watcherChan <- peers } remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyErr := func(err error) { + errChan <- err + } - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove) + watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) }() timer := time.NewTimer(5 * time.Second) @@ -326,6 +332,10 @@ func TestBreakWatcherConnRecv(t *testing.T) { if peers != 1 { t.Fatal("wrong number of peers added during watcher connection") } + case err := <-errChan: + if !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) + } case <-timer.C: t.Fatalf("watcher did not process the peer update") } @@ -369,6 +379,7 @@ func TestBreakWatcherConn(t *testing.T) { watcherChan := make(chan int, 1) breakerChan := make(chan bool, 1) + errorChan := make(chan error, 1) // Start the watcher thread (which connects to the watched server) wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 @@ -384,8 +395,11 @@ func TestBreakWatcherConn(t *testing.T) { <-breakerChan } remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyError := func(err error) { + errorChan <- err + } - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove) + watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) }() timer := time.NewTimer(5 * time.Second) @@ -399,6 +413,10 @@ func TestBreakWatcherConn(t *testing.T) { if peers != 1 { t.Fatal("wrong number of peers added during watcher connection") } + case err := <-errorChan: + if !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) + } case <-timer.C: t.Fatalf("watcher did not process the peer update") } @@ -414,6 +432,7 @@ func TestBreakWatcherConn(t *testing.T) { func noopAdd(derp.PeerPresentMessage) {} func noopRemove(derp.PeerGoneMessage) {} +func noopNotifyError(error) {} func TestRunWatchConnectionLoopServeConnect(t *testing.T) { defer func() { testHookWatchLookConnectResult = nil }() @@ -441,7 +460,7 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { } return false } - watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove) + watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError) // Test connecting to the server with a zero value for ignoreServerKey, // so we should always connect. @@ -455,7 +474,7 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { } return false } - watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove) + watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError) } // verify that the LocalAddr method doesn't acquire the mutex. @@ -491,3 +510,49 @@ func TestProbe(t *testing.T) { } } } + +func TestNotifyError(t *testing.T) { + defer func() { testHookWatchLookConnectResult = nil }() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + priv := key.NewNode() + serverURL, s := newTestServer(t, priv) + defer s.Close() + + pub := priv.Public() + + // Test early error notification when c.connect fails. + watcher := newWatcherClient(t, priv, serverURL) + watcher.SetURLDialer(netx.DialFunc(func(ctx context.Context, network, addr string) (net.Conn, error) { + t.Helper() + return nil, fmt.Errorf("test error: %s", addr) + })) + defer watcher.Close() + + testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + t.Helper() + if err == nil { + t.Fatal("expected error connecting to server, got nil") + } + if wasSelfConnect { + t.Error("wanted normal connect; got self connect") + } + return false + } + + errChan := make(chan error, 1) + notifyError := func(err error) { + errChan <- err + } + watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, notifyError) + + select { + case err := <-errChan: + if !strings.Contains(err.Error(), "test") { + t.Errorf("expected test error, got %v", err) + } + case <-ctx.Done(): + t.Fatalf("context done before receiving error: %v", ctx.Err()) + } +} diff --git a/derp/derphttp/mesh_client.go b/derp/derphttp/mesh_client.go index 66b8c166e..c14a9a7e1 100644 --- a/derp/derphttp/mesh_client.go +++ b/derp/derphttp/mesh_client.go @@ -31,6 +31,9 @@ var testHookWatchLookConnectResult func(connectError error, wasSelfConnect bool) // This behavior will likely change. Callers should do their own accounting // and dup suppression as needed. // +// If set the notifyError func is called with any error that occurs within the ctx +// main loop connection setup, or the inner loop receiving messages via RecvDetail. +// // infoLogf, if non-nil, is the logger to write periodic status updates about // how many peers are on the server. Error log output is set to the c's logger, // regardless of infoLogf's value. @@ -42,10 +45,11 @@ var testHookWatchLookConnectResult func(connectError error, wasSelfConnect bool) // initialized Client.WatchConnectionChanges to true. // // If the DERP connection breaks and reconnects, remove will be called for all -// previously seen peers, with Reason type PeerGoneReasonSynthetic. Those +// previously seen peers, with Reason type PeerGoneReasonMeshConnBroke. Those // clients are likely still connected and their add message will appear after // reconnect. -func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key.NodePublic, infoLogf logger.Logf, add func(derp.PeerPresentMessage), remove func(derp.PeerGoneMessage)) { +func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key.NodePublic, infoLogf logger.Logf, + add func(derp.PeerPresentMessage), remove func(derp.PeerGoneMessage), notifyError func(error)) { if !c.WatchConnectionChanges { if c.isStarted() { panic("invalid use of RunWatchConnectionLoop on already-started Client without setting Client.RunWatchConnectionLoop") @@ -121,6 +125,10 @@ func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key // Make sure we're connected before calling s.ServerPublicKey. _, _, err := c.connect(ctx, "RunWatchConnectionLoop") if err != nil { + logf("mesh connect: %v", err) + if notifyError != nil { + notifyError(err) + } if f := testHookWatchLookConnectResult; f != nil && !f(err, false) { return } @@ -141,6 +149,9 @@ func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key if err != nil { clear() logf("Recv: %v", err) + if notifyError != nil { + notifyError(err) + } sleep(retryInterval) break } From 939355f66727bb86819f90e74b25a6ed11ff5ad7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 15 Jun 2025 08:20:48 -0700 Subject: [PATCH 0965/1708] tool/gocross: put the synthetic GOROOTs outside of the tsgo directory We aim to make the tsgo directories be read-only mounts on builders. But gocross was previously writing within the ~/.cache/tsgo/$HASH/ directories to make the synthetic GOROOT directories. This moves them to ~/.cache/tsgoroot/$HASH/ instead. Updates tailscale/corp#28679 Updates tailscale/corp#26717 Change-Id: I0d17730bbdce3d6374e79d49486826575d4690af Signed-off-by: Brad Fitzpatrick --- tool/gocross/gocross-wrapper.sh | 1 + tool/gocross/toolchain.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 90f308eb5..e9fca2aea 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -74,6 +74,7 @@ case "$REV" in echo "# Cleaning up old Go toolchain $hash" >&2 rm -rf "$HOME/.cache/tsgo/$hash" rm -rf "$HOME/.cache/tsgo/$hash.extracted" + rm -rf "$HOME/.cache/tsgoroot/$hash" done fi ;; diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index e701662f5..f422e289e 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -62,7 +62,7 @@ func getToolchain() (toolchainDir, gorootDir string, err error) { cache := filepath.Join(os.Getenv("HOME"), ".cache") toolchainDir = filepath.Join(cache, "tsgo", rev) - gorootDir = filepath.Join(toolchainDir, "gocross-goroot") + gorootDir = filepath.Join(cache, "tsgoroot", rev) // You might wonder why getting the toolchain also provisions and returns a // path suitable for use as GOROOT. Wonder no longer! From 4431fb89c2191fc501e14b2fb92e934feaaf264e Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 17 Jun 2025 13:38:17 -0500 Subject: [PATCH 0966/1708] ipn/ipnlocal: add some verbose logging to taildrive peerapi handler Updates tailscale/corp#29702 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/peerapi.go | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 60dd41024..89554f0ff 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -247,6 +247,10 @@ func (h *peerAPIHandler) logf(format string, a ...any) { h.ps.b.logf("peerapi: "+format, a...) } +func (h *peerAPIHandler) logfv1(format string, a ...any) { + h.ps.b.logf("[v1] peerapi: "+format, a...) +} + // isAddressValid reports whether addr is a valid destination address for this // node originating from the peer. func (h *peerAPIHandler) isAddressValid(addr netip.Addr) bool { @@ -1015,6 +1019,7 @@ func (rbw *requestBodyWrapper) Read(b []byte) (int, error) { } func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request) { + h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) if !h.ps.b.DriveSharingEnabled() { h.logf("taildrive: not enabled") http.Error(w, "taildrive not enabled", http.StatusNotFound) @@ -1055,21 +1060,23 @@ func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request } r.Body = bw - if r.Method == httpm.PUT || r.Method == httpm.GET { - defer func() { - switch wr.statusCode { - case 304: - // 304s are particularly chatty so skip logging. - default: - contentType := "unknown" - if ct := wr.Header().Get("Content-Type"); ct != "" { - contentType = ct - } - - h.logf("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) + defer func() { + switch wr.statusCode { + case 304: + // 304s are particularly chatty so skip logging. + default: + log := h.logf + if r.Method != httpm.PUT && r.Method != httpm.GET { + log = h.logfv1 } - }() - } + contentType := "unknown" + if ct := wr.Header().Get("Content-Type"); ct != "" { + contentType = ct + } + + log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) + } + }() r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) fs.ServeHTTPWithPerms(p, wr, r) From cbc14bd3b07f8367a4062033b701dbaa18c2c22a Mon Sep 17 00:00:00 2001 From: Juan Francisco Cantero Hurtado Date: Tue, 17 Jun 2025 20:22:42 +0200 Subject: [PATCH 0967/1708] ipn: add missing entries for OpenBSD Signed-off-by: Juan Francisco Cantero Hurtado --- ipn/ipnlocal/c2n.go | 2 +- ipn/ipnserver/actor.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 876c13064..4b91c3cb9 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -443,7 +443,7 @@ func findCmdTailscale() (string, error) { } case "windows": ts = filepath.Join(filepath.Dir(self), "tailscale.exe") - case "freebsd": + case "freebsd", "openbsd": if self == "/usr/local/bin/tailscaled" { ts = "/usr/local/bin/tailscale" } diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index dd40924bb..9d86d2c82 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -144,7 +144,7 @@ func (a *actor) Username() (string, error) { } defer tok.Close() return tok.Username() - case "darwin", "linux", "illumos", "solaris": + case "darwin", "linux", "illumos", "solaris", "openbsd": uid, ok := a.ci.Creds().UserID() if !ok { return "", errors.New("missing user ID") From 49ae66c10c95f5f6a1c33e1b021e4c8b2a95cd9f Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 17 Jun 2025 20:39:59 -0700 Subject: [PATCH 0968/1708] cmd/tailscale: clean up dns --help messages (#16306) This patch contains the following cleanups: 1. Simplify `ffcli.Command` definitions; 2. Word-wrap help text, consistent with other commands; 3. `tailscale dns --help` usage makes subcommand usage more obvious; 4. `tailscale dns query --help` describes DNS record types. Updates #cleanup Signed-off-by: Simon Law --- cmd/tailscale/cli/dns-query.go | 19 +++++++ cmd/tailscale/cli/dns-status.go | 94 ++++++++++++++++++++++----------- cmd/tailscale/cli/dns.go | 48 ++++++----------- 3 files changed, 98 insertions(+), 63 deletions(-) diff --git a/cmd/tailscale/cli/dns-query.go b/cmd/tailscale/cli/dns-query.go index da2d9d2a5..11f644537 100644 --- a/cmd/tailscale/cli/dns-query.go +++ b/cmd/tailscale/cli/dns-query.go @@ -9,12 +9,31 @@ import ( "fmt" "net/netip" "os" + "strings" "text/tabwriter" + "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/dns/dnsmessage" "tailscale.com/types/dnstype" ) +var dnsQueryCmd = &ffcli.Command{ + Name: "query", + ShortUsage: "tailscale dns query [a|aaaa|cname|mx|ns|opt|ptr|srv|txt]", + Exec: runDNSQuery, + ShortHelp: "Perform a DNS query", + LongHelp: strings.TrimSpace(` +The 'tailscale dns query' subcommand performs a DNS query for the specified name +using the internal DNS forwarder (100.100.100.100). + +By default, the DNS query will request an A record. Another DNS record type can +be specified as the second parameter. + +The output also provides information about the resolver(s) used to resolve the +query. +`), +} + func runDNSQuery(ctx context.Context, args []string) error { if len(args) < 1 { return flag.ErrHelp diff --git a/cmd/tailscale/cli/dns-status.go b/cmd/tailscale/cli/dns-status.go index e487c66bc..8c18622ce 100644 --- a/cmd/tailscale/cli/dns-status.go +++ b/cmd/tailscale/cli/dns-status.go @@ -5,15 +5,77 @@ package cli import ( "context" + "flag" "fmt" "maps" "slices" "strings" + "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn" "tailscale.com/types/netmap" ) +var dnsStatusCmd = &ffcli.Command{ + Name: "status", + ShortUsage: "tailscale dns status [--all]", + Exec: runDNSStatus, + ShortHelp: "Print the current DNS status and configuration", + LongHelp: strings.TrimSpace(` +The 'tailscale dns status' subcommand prints the current DNS status and +configuration, including: + +- Whether the built-in DNS forwarder is enabled. + +- The MagicDNS configuration provided by the coordination server. + +- Details on which resolver(s) Tailscale believes the system is using by + default. + +The --all flag can be used to output advanced debugging information, including +fallback resolvers, nameservers, certificate domains, extra records, and the +exit node filtered set. + +=== Contents of the MagicDNS configuration === + +The MagicDNS configuration is provided by the coordination server to the client +and includes the following components: + +- MagicDNS enablement status: Indicates whether MagicDNS is enabled across the + entire tailnet. + +- MagicDNS Suffix: The DNS suffix used for devices within your tailnet. + +- DNS Name: The DNS name that other devices in the tailnet can use to reach this + device. + +- Resolvers: The preferred DNS resolver(s) to be used for resolving queries, in + order of preference. If no resolvers are listed here, the system defaults are + used. + +- Split DNS Routes: Custom DNS resolvers may be used to resolve hostnames in + specific domains, this is also known as a 'Split DNS' configuration. The + mapping of domains to their respective resolvers is provided here. + +- Certificate Domains: The DNS names for which the coordination server will + assist in provisioning TLS certificates. + +- Extra Records: Additional DNS records that the coordination server might + provide to the internal DNS resolver. + +- Exit Node Filtered Set: DNS suffixes that the node, when acting as an exit + node DNS proxy, will not answer. + +For more information about the DNS functionality built into Tailscale, refer to +https://tailscale.com/kb/1054/dns. +`), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("status") + fs.BoolVar(&dnsStatusArgs.all, "all", false, "outputs advanced debugging information") + return fs + })(), +} + // dnsStatusArgs are the arguments for the "dns status" subcommand. var dnsStatusArgs struct { all bool @@ -208,35 +270,3 @@ func fetchNetMap() (netMap *netmap.NetworkMap, err error) { } return notify.NetMap, nil } - -func dnsStatusLongHelp() string { - return `The 'tailscale dns status' subcommand prints the current DNS status and configuration, including: - -- Whether the built-in DNS forwarder is enabled. -- The MagicDNS configuration provided by the coordination server. -- Details on which resolver(s) Tailscale believes the system is using by default. - -The --all flag can be used to output advanced debugging information, including fallback resolvers, nameservers, certificate domains, extra records, and the exit node filtered set. - -=== Contents of the MagicDNS configuration === - -The MagicDNS configuration is provided by the coordination server to the client and includes the following components: - -- MagicDNS enablement status: Indicates whether MagicDNS is enabled across the entire tailnet. - -- MagicDNS Suffix: The DNS suffix used for devices within your tailnet. - -- DNS Name: The DNS name that other devices in the tailnet can use to reach this device. - -- Resolvers: The preferred DNS resolver(s) to be used for resolving queries, in order of preference. If no resolvers are listed here, the system defaults are used. - -- Split DNS Routes: Custom DNS resolvers may be used to resolve hostnames in specific domains, this is also known as a 'Split DNS' configuration. The mapping of domains to their respective resolvers is provided here. - -- Certificate Domains: The DNS names for which the coordination server will assist in provisioning TLS certificates. - -- Extra Records: Additional DNS records that the coordination server might provide to the internal DNS resolver. - -- Exit Node Filtered Set: DNS suffixes that the node, when acting as an exit node DNS proxy, will not answer. - -For more information about the DNS functionality built into Tailscale, refer to https://tailscale.com/kb/1054/dns.` -} diff --git a/cmd/tailscale/cli/dns.go b/cmd/tailscale/cli/dns.go index 402f0cedf..086abefd6 100644 --- a/cmd/tailscale/cli/dns.go +++ b/cmd/tailscale/cli/dns.go @@ -4,46 +4,32 @@ package cli import ( - "flag" + "strings" "github.com/peterbourgon/ff/v3/ffcli" ) var dnsCmd = &ffcli.Command{ - Name: "dns", - ShortHelp: "Diagnose the internal DNS forwarder", - LongHelp: dnsCmdLongHelp(), - ShortUsage: "tailscale dns [flags]", - UsageFunc: usageFuncNoDefaultValues, + Name: "dns", + ShortHelp: "Diagnose the internal DNS forwarder", + LongHelp: strings.TrimSpace(` +The 'tailscale dns' subcommand provides tools for diagnosing the internal DNS +forwarder (100.100.100.100). + +For more information about the DNS functionality built into Tailscale, refer to +https://tailscale.com/kb/1054/dns. +`), + ShortUsage: strings.Join([]string{ + dnsStatusCmd.ShortUsage, + dnsQueryCmd.ShortUsage, + }, "\n"), + UsageFunc: usageFuncNoDefaultValues, Subcommands: []*ffcli.Command{ - { - Name: "status", - ShortUsage: "tailscale dns status [--all]", - Exec: runDNSStatus, - ShortHelp: "Print the current DNS status and configuration", - LongHelp: dnsStatusLongHelp(), - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("status") - fs.BoolVar(&dnsStatusArgs.all, "all", false, "outputs advanced debugging information (fallback resolvers, nameservers, cert domains, extra records, and exit node filtered set)") - return fs - })(), - }, - { - Name: "query", - ShortUsage: "tailscale dns query [a|aaaa|cname|mx|ns|opt|ptr|srv|txt]", - Exec: runDNSQuery, - ShortHelp: "Perform a DNS query", - LongHelp: "The 'tailscale dns query' subcommand performs a DNS query for the specified name using the internal DNS forwarder (100.100.100.100).\n\nIt also provides information about the resolver(s) used to resolve the query.", - }, + dnsStatusCmd, + dnsQueryCmd, // TODO: implement `tailscale log` here // The above work is tracked in https://github.com/tailscale/tailscale/issues/13326 }, } - -func dnsCmdLongHelp() string { - return `The 'tailscale dns' subcommand provides tools for diagnosing the internal DNS forwarder (100.100.100.100). - -For more information about the DNS functionality built into Tailscale, refer to https://tailscale.com/kb/1054/dns.` -} From a91fcc88138cceb4f891f5338f2b28d80bb81b9c Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Wed, 18 Jun 2025 11:38:18 +0100 Subject: [PATCH 0969/1708] ipn/ipnlocal: make pricing restriction message for Tailnet Lock clearer Fixes tailscale/corp#24417 Signed-off-by: Anton Tolchanov --- ipn/ipnlocal/network-lock.go | 15 +++++++++------ ipn/localapi/localapi.go | 5 +++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 36d39a465..10f0cc827 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -600,18 +600,14 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt var ourNodeKey key.NodePublic var nlPriv key.NLPrivate - b.mu.Lock() - - if !b.capTailnetLock { - b.mu.Unlock() - return errors.New("not permitted to enable tailnet lock") - } + b.mu.Lock() if p := b.pm.CurrentPrefs(); p.Valid() && p.Persist().Valid() && !p.Persist().PrivateNodeKey().IsZero() { ourNodeKey = p.Persist().PublicNodeKey() nlPriv = p.Persist().NetworkLockKey() } b.mu.Unlock() + if ourNodeKey.IsZero() || nlPriv.IsZero() { return errors.New("no node-key: is tailscale logged in?") } @@ -671,6 +667,13 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt return err } +// NetworkLockAllowed reports whether the node is allowed to use Tailnet Lock. +func (b *LocalBackend) NetworkLockAllowed() bool { + b.mu.Lock() + defer b.mu.Unlock() + return b.capTailnetLock +} + // Only use is in tests. func (b *LocalBackend) NetworkLockVerifySignatureForTest(nks tkatype.MarshaledSignature, nodeKey key.NodePublic) error { b.mu.Lock() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 6344da42d..a90ae5d84 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1970,6 +1970,11 @@ func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { return } + if !h.b.NetworkLockAllowed() { + http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) + return + } + if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) return From 45a4b69ce01f3529728bb523ac348794d9abc14a Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Wed, 18 Jun 2025 10:43:19 -0500 Subject: [PATCH 0970/1708] cmd/tsidp: fix OIDC client persistence across restarts Fixes #16088 Signed-off-by: Raj Singh --- cmd/tsidp/tsidp.go | 19 +++--- cmd/tsidp/tsidp_test.go | 138 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+), 9 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 5df99e1b8..43020eaf7 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -161,16 +161,17 @@ func main() { } else { srv.serverURL = fmt.Sprintf("https://%s", strings.TrimSuffix(st.Self.DNSName, ".")) } - if *flagFunnel { - f, err := os.Open(funnelClientsFile) - if err == nil { - srv.funnelClients = make(map[string]*funnelClient) - if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { - log.Fatalf("could not parse %s: %v", funnelClientsFile, err) - } - } else if !errors.Is(err, os.ErrNotExist) { - log.Fatalf("could not open %s: %v", funnelClientsFile, err) + + // Load funnel clients from disk if they exist, regardless of whether funnel is enabled + // This ensures OIDC clients persist across restarts + f, err := os.Open(funnelClientsFile) + if err == nil { + if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { + log.Fatalf("could not parse %s: %v", funnelClientsFile, err) } + f.Close() + } else if !errors.Is(err, os.ErrNotExist) { + log.Fatalf("could not open %s: %v", funnelClientsFile, err) } log.Printf("Running tsidp at %s ...", srv.serverURL) diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index 6932d8e29..e5465d3cf 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -7,6 +7,7 @@ import ( "crypto/rand" "crypto/rsa" "encoding/json" + "errors" "fmt" "io" "log" @@ -14,6 +15,7 @@ import ( "net/http/httptest" "net/netip" "net/url" + "os" "reflect" "sort" "strings" @@ -825,3 +827,139 @@ func TestExtraUserInfo(t *testing.T) { }) } } + +func TestFunnelClientsPersistence(t *testing.T) { + testClients := map[string]*funnelClient{ + "test-client-1": { + ID: "test-client-1", + Secret: "test-secret-1", + Name: "Test Client 1", + RedirectURI: "https://example.com/callback", + }, + "test-client-2": { + ID: "test-client-2", + Secret: "test-secret-2", + Name: "Test Client 2", + RedirectURI: "https://example2.com/callback", + }, + } + + testData, err := json.Marshal(testClients) + if err != nil { + t.Fatalf("failed to marshal test data: %v", err) + } + + tmpFile := t.TempDir() + "/oidc-funnel-clients.json" + if err := os.WriteFile(tmpFile, testData, 0600); err != nil { + t.Fatalf("failed to write test file: %v", err) + } + + t.Run("step1_load_from_existing_file", func(t *testing.T) { + srv := &idpServer{} + + // Simulate the funnel clients loading logic from main() + srv.funnelClients = make(map[string]*funnelClient) + f, err := os.Open(tmpFile) + if err == nil { + if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { + t.Fatalf("could not parse %s: %v", tmpFile, err) + } + f.Close() + } else if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("could not open %s: %v", tmpFile, err) + } + + // Verify clients were loaded correctly + if len(srv.funnelClients) != 2 { + t.Errorf("expected 2 clients, got %d", len(srv.funnelClients)) + } + + client1, ok := srv.funnelClients["test-client-1"] + if !ok { + t.Error("expected test-client-1 to be loaded") + } else { + if client1.Name != "Test Client 1" { + t.Errorf("expected client name 'Test Client 1', got '%s'", client1.Name) + } + if client1.Secret != "test-secret-1" { + t.Errorf("expected client secret 'test-secret-1', got '%s'", client1.Secret) + } + } + }) + + t.Run("step2_initialize_empty_when_no_file", func(t *testing.T) { + nonExistentFile := t.TempDir() + "/non-existent.json" + + srv := &idpServer{} + + // Simulate the funnel clients loading logic from main() + srv.funnelClients = make(map[string]*funnelClient) + f, err := os.Open(nonExistentFile) + if err == nil { + if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { + t.Fatalf("could not parse %s: %v", nonExistentFile, err) + } + f.Close() + } else if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("could not open %s: %v", nonExistentFile, err) + } + + // Verify map is initialized but empty + if srv.funnelClients == nil { + t.Error("expected funnelClients map to be initialized") + } + if len(srv.funnelClients) != 0 { + t.Errorf("expected empty map, got %d clients", len(srv.funnelClients)) + } + }) + + t.Run("step3_persist_and_reload_clients", func(t *testing.T) { + tmpFile2 := t.TempDir() + "/test-persistence.json" + + // Create initial server with one client + srv1 := &idpServer{ + funnelClients: make(map[string]*funnelClient), + } + srv1.funnelClients["new-client"] = &funnelClient{ + ID: "new-client", + Secret: "new-secret", + Name: "New Client", + RedirectURI: "https://new.example.com/callback", + } + + // Save clients to file (simulating saveFunnelClients) + data, err := json.Marshal(srv1.funnelClients) + if err != nil { + t.Fatalf("failed to marshal clients: %v", err) + } + if err := os.WriteFile(tmpFile2, data, 0600); err != nil { + t.Fatalf("failed to write clients file: %v", err) + } + + // Create new server instance and load clients + srv2 := &idpServer{} + srv2.funnelClients = make(map[string]*funnelClient) + f, err := os.Open(tmpFile2) + if err == nil { + if err := json.NewDecoder(f).Decode(&srv2.funnelClients); err != nil { + t.Fatalf("could not parse %s: %v", tmpFile2, err) + } + f.Close() + } else if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("could not open %s: %v", tmpFile2, err) + } + + // Verify the client was persisted correctly + loadedClient, ok := srv2.funnelClients["new-client"] + if !ok { + t.Error("expected new-client to be loaded after persistence") + } else { + if loadedClient.Name != "New Client" { + t.Errorf("expected client name 'New Client', got '%s'", loadedClient.Name) + } + if loadedClient.Secret != "new-secret" { + t.Errorf("expected client secret 'new-secret', got '%s'", loadedClient.Secret) + } + } + }) +} From fcab50b2763a1c7cd51f3c5d9cf8d2198eb7fa90 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 18 Jun 2025 10:31:00 -0700 Subject: [PATCH 0971/1708] ipn/ipnlocal,wgengine{/magicsock}: replace SetNetworkMap with eventbus (#16299) Same with UpdateNetmapDelta. Updates tailscale/corp#27502 Updates #15160 Signed-off-by: Jordan Whited --- ipn/ipnlocal/local.go | 4 - ipn/ipnlocal/local_test.go | 49 +++++++- ipn/ipnlocal/node_backend.go | 46 ++++---- ipn/ipnlocal/serve_test.go | 2 + wgengine/magicsock/magicsock.go | 161 ++++++++++++++------------- wgengine/magicsock/magicsock_test.go | 62 +++++++---- wgengine/userspace.go | 1 - 7 files changed, 195 insertions(+), 130 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index cd30e92bb..908418d4a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1946,10 +1946,6 @@ var _ controlclient.NetmapDeltaUpdater = (*LocalBackend)(nil) // UpdateNetmapDelta implements controlclient.NetmapDeltaUpdater. func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { - if !b.MagicConn().UpdateNetmapDelta(muts) { - return false - } - var notify *ipn.Notify // non-nil if we need to send a Notify defer func() { if notify != nil { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 281d0e9c4..6e24f4300 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5,6 +5,7 @@ package ipnlocal import ( "context" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -23,6 +24,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + memro "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" @@ -77,6 +79,12 @@ func inRemove(ip netip.Addr) bool { return false } +func makeNodeKeyFromID(nodeID tailcfg.NodeID) key.NodePublic { + raw := make([]byte, 32) + binary.BigEndian.PutUint64(raw[24:], uint64(nodeID)) + return key.NodePublicFromRaw32(memro.B(raw)) +} + func TestShrinkDefaultRoute(t *testing.T) { tests := []struct { route string @@ -794,6 +802,7 @@ func TestStatusPeerCapabilities(t *testing.T) { (&tailcfg.Node{ ID: 1, StableID: "foo", + Key: makeNodeKeyFromID(1), IsWireGuardOnly: true, Hostinfo: (&tailcfg.Hostinfo{}).View(), Capabilities: []tailcfg.NodeCapability{tailcfg.CapabilitySSH}, @@ -804,6 +813,7 @@ func TestStatusPeerCapabilities(t *testing.T) { (&tailcfg.Node{ ID: 2, StableID: "bar", + Key: makeNodeKeyFromID(2), Hostinfo: (&tailcfg.Hostinfo{}).View(), Capabilities: []tailcfg.NodeCapability{tailcfg.CapabilityAdmin}, CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ @@ -830,12 +840,14 @@ func TestStatusPeerCapabilities(t *testing.T) { (&tailcfg.Node{ ID: 1, StableID: "foo", + Key: makeNodeKeyFromID(1), IsWireGuardOnly: true, Hostinfo: (&tailcfg.Hostinfo{}).View(), }).View(), (&tailcfg.Node{ ID: 2, StableID: "bar", + Key: makeNodeKeyFromID(2), Hostinfo: (&tailcfg.Hostinfo{}).View(), }).View(), }, @@ -927,7 +939,11 @@ func TestUpdateNetmapDelta(t *testing.T) { nm := &netmap.NetworkMap{} for i := range 5 { - nm.Peers = append(nm.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View()) + id := tailcfg.NodeID(i + 1) + nm.Peers = append(nm.Peers, (&tailcfg.Node{ + ID: id, + Key: makeNodeKeyFromID(id), + }).View()) } b.currentNode().SetNetMap(nm) @@ -963,18 +979,22 @@ func TestUpdateNetmapDelta(t *testing.T) { wants := []*tailcfg.Node{ { ID: 1, + Key: makeNodeKeyFromID(1), HomeDERP: 1, }, { ID: 2, + Key: makeNodeKeyFromID(2), Online: ptr.To(true), }, { ID: 3, + Key: makeNodeKeyFromID(3), Online: ptr.To(false), }, { ID: 4, + Key: makeNodeKeyFromID(4), LastSeen: ptr.To(someTime), }, } @@ -998,12 +1018,14 @@ func TestWhoIs(t *testing.T) { SelfNode: (&tailcfg.Node{ ID: 1, User: 10, + Key: makeNodeKeyFromID(1), Addresses: []netip.Prefix{netip.MustParsePrefix("100.101.102.103/32")}, }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ ID: 2, User: 20, + Key: makeNodeKeyFromID(2), Addresses: []netip.Prefix{netip.MustParsePrefix("100.200.200.200/32")}, }).View(), }, @@ -1593,6 +1615,7 @@ func dnsResponse(domain, address string) []byte { } func TestSetExitNodeIDPolicy(t *testing.T) { + zeroValHostinfoView := new(tailcfg.Hostinfo).View() pfx := netip.MustParsePrefix tests := []struct { name string @@ -1669,14 +1692,18 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ + ID: 201, Name: "a.tailnet", + Key: makeNodeKeyFromID(201), Addresses: []netip.Prefix{ pfx("100.0.0.201/32"), pfx("100::201/128"), }, }).View(), (&tailcfg.Node{ + ID: 202, Name: "b.tailnet", + Key: makeNodeKeyFromID(202), Addresses: []netip.Prefix{ pfx("100::202/128"), }, @@ -1702,18 +1729,24 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ + ID: 123, Name: "a.tailnet", StableID: tailcfg.StableNodeID("123"), + Key: makeNodeKeyFromID(123), Addresses: []netip.Prefix{ pfx("127.0.0.1/32"), pfx("100::201/128"), }, + Hostinfo: zeroValHostinfoView, }).View(), (&tailcfg.Node{ + ID: 202, Name: "b.tailnet", + Key: makeNodeKeyFromID(202), Addresses: []netip.Prefix{ pfx("100::202/128"), }, + Hostinfo: zeroValHostinfoView, }).View(), }, }, @@ -1734,18 +1767,24 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ + ID: 123, Name: "a.tailnet", StableID: tailcfg.StableNodeID("123"), + Key: makeNodeKeyFromID(123), Addresses: []netip.Prefix{ pfx("127.0.0.1/32"), pfx("100::201/128"), }, + Hostinfo: zeroValHostinfoView, }).View(), (&tailcfg.Node{ + ID: 202, Name: "b.tailnet", + Key: makeNodeKeyFromID(202), Addresses: []netip.Prefix{ pfx("100::202/128"), }, + Hostinfo: zeroValHostinfoView, }).View(), }, }, @@ -1768,18 +1807,24 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ + ID: 123, Name: "a.tailnet", StableID: tailcfg.StableNodeID("123"), + Key: makeNodeKeyFromID(123), Addresses: []netip.Prefix{ pfx("100.64.5.6/32"), pfx("100::201/128"), }, + Hostinfo: zeroValHostinfoView, }).View(), (&tailcfg.Node{ + ID: 202, Name: "b.tailnet", + Key: makeNodeKeyFromID(202), Addresses: []netip.Prefix{ pfx("100::202/128"), }, + Hostinfo: zeroValHostinfoView, }).View(), }, }, @@ -1827,7 +1872,6 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.currentNode().SetNetMap(test.nm) b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode - prefs := b.pm.prefs.AsStruct() if changed := applySysPolicy(prefs, test.lastSuggestedExitNode, false) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) @@ -3218,6 +3262,7 @@ type peerOptFunc func(*tailcfg.Node) func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { node := &tailcfg.Node{ ID: id, + Key: makeNodeKeyFromID(id), StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), Name: fmt.Sprintf("peer%d", id), HomeDERP: int(id), diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index efa74577b..05389a677 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -72,9 +72,10 @@ type nodeBackend struct { filterAtomic atomic.Pointer[filter.Filter] // initialized once and immutable - eventClient *eventbus.Client - filterUpdates *eventbus.Publisher[magicsock.FilterUpdate] - nodeUpdates *eventbus.Publisher[magicsock.NodeAddrsHostInfoUpdate] + eventClient *eventbus.Client + filterPub *eventbus.Publisher[magicsock.FilterUpdate] + nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] + nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] // TODO(nickkhyl): maybe use sync.RWMutex? mu sync.Mutex // protects the following fields @@ -113,9 +114,10 @@ func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { // Default filter blocks everything and logs nothing. noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) nb.filterAtomic.Store(noneFilter) - nb.filterUpdates = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) - nb.nodeUpdates = eventbus.Publish[magicsock.NodeAddrsHostInfoUpdate](nb.eventClient) - nb.filterUpdates.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) + nb.filterPub = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) + nb.nodeViewsPub = eventbus.Publish[magicsock.NodeViewsUpdate](nb.eventClient) + nb.nodeMutsPub = eventbus.Publish[magicsock.NodeMutationsUpdate](nb.eventClient) + nb.filterPub.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) return nb } @@ -379,6 +381,12 @@ func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { nb.netMap = nm nb.updateNodeByAddrLocked() nb.updatePeersLocked() + nv := magicsock.NodeViewsUpdate{} + if nm != nil { + nv.SelfNode = nm.SelfNode + nv.Peers = nm.Peers + } + nb.nodeViewsPub.Publish(nv) } func (nb *nodeBackend) updateNodeByAddrLocked() { @@ -429,16 +437,9 @@ func (nb *nodeBackend) updatePeersLocked() { nb.peers[k] = tailcfg.NodeView{} } - changed := magicsock.NodeAddrsHostInfoUpdate{ - Complete: true, - } // Second pass, add everything wanted. for _, p := range nm.Peers { mak.Set(&nb.peers, p.ID(), p) - mak.Set(&changed.NodesByID, p.ID(), magicsock.NodeAddrsHostInfo{ - Addresses: p.Addresses(), - Hostinfo: p.Hostinfo(), - }) } // Third pass, remove deleted things. @@ -447,7 +448,6 @@ func (nb *nodeBackend) updatePeersLocked() { delete(nb.peers, k) } } - nb.nodeUpdates.Publish(changed) } func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { @@ -462,8 +462,8 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // call (e.g. its endpoints + online status both change) var mutableNodes map[tailcfg.NodeID]*tailcfg.Node - changed := magicsock.NodeAddrsHostInfoUpdate{ - Complete: false, + update := magicsock.NodeMutationsUpdate{ + Mutations: make([]netmap.NodeMutation, 0, len(muts)), } for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] @@ -475,18 +475,14 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo } n = nv.AsStruct() mak.Set(&mutableNodes, nv.ID(), n) + update.Mutations = append(update.Mutations, m) } m.Apply(n) } for nid, n := range mutableNodes { - nv := n.View() - nb.peers[nid] = nv - mak.Set(&changed.NodesByID, nid, magicsock.NodeAddrsHostInfo{ - Addresses: nv.Addresses(), - Hostinfo: nv.Hostinfo(), - }) - } - nb.nodeUpdates.Publish(changed) + nb.peers[nid] = n.View() + } + nb.nodeMutsPub.Publish(update) return true } @@ -508,7 +504,7 @@ func (nb *nodeBackend) filter() *filter.Filter { func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterAtomic.Store(f) - nb.filterUpdates.Publish(magicsock.FilterUpdate{Filter: f}) + nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f}) } func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index b9370f877..57d1a4745 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -918,6 +918,7 @@ func newTestBackend(t *testing.T) *LocalBackend { ID: 152, ComputedName: "some-peer", User: tailcfg.UserID(1), + Key: makeNodeKeyFromID(152), Addresses: []netip.Prefix{ netip.MustParsePrefix("100.150.151.152/32"), }, @@ -927,6 +928,7 @@ func newTestBackend(t *testing.T) *LocalBackend { ComputedName: "some-tagged-peer", Tags: []string{"tag:server", "tag:test"}, User: tailcfg.UserID(1), + Key: makeNodeKeyFromID(153), Addresses: []netip.Prefix{ netip.MustParsePrefix("100.150.151.153/32"), }, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1042e6794..a6c6a3fb6 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -160,6 +160,14 @@ type Conn struct { connCtxCancel func() // closes connCtx donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call + // These [eventbus.Subscriber] fields are solely accessed by + // consumeEventbusTopics once initialized. + pmSub *eventbus.Subscriber[portmapper.Mapping] + filterSub *eventbus.Subscriber[FilterUpdate] + nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] + nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock // protocols. @@ -341,9 +349,9 @@ type Conn struct { netInfoLast *tailcfg.NetInfo derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled - peers views.Slice[tailcfg.NodeView] // from last SetNetworkMap update - lastFlags debugFlags // at time of last SetNetworkMap - firstAddrForTest netip.Addr // from last SetNetworkMap update; for tests only + peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate update + lastFlags debugFlags // at time of last onNodeViewsUpdate + firstAddrForTest netip.Addr // from last onNodeViewsUpdate update; for tests only privateKey key.NodePrivate // WireGuard private key for this node everHadKey bool // whether we ever had a non-zero private key myDerp int // nearest DERP region ID; 0 means none/unknown @@ -411,10 +419,8 @@ func (c *Conn) dlogf(format string, a ...any) { // Options contains options for Listen. type Options struct { // EventBus, if non-nil, is used for event publication and subscription by - // each Conn created from these Options. - // - // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to - // become required non-nil. + // each Conn created from these Options. It must not be nil outside of + // tests. EventBus *eventbus.Bus // Logf provides a log function to use. It must not be nil. @@ -503,20 +509,22 @@ func (o *Options) derpActiveFunc() func() { return o.DERPActiveFunc } -// NodeAddrsHostInfoUpdate represents an update event of the addresses and -// [tailcfg.HostInfoView] for a node set. This event is published over an -// [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as of 2025-06. If -// you are adding more subscribers consider moving this type out of magicsock. -type NodeAddrsHostInfoUpdate struct { - NodesByID map[tailcfg.NodeID]NodeAddrsHostInfo - Complete bool // true if NodesByID contains all known nodes, false if it may be a subset +// NodeViewsUpdate represents an update event of [tailcfg.NodeView] for all +// nodes. This event is published over an [eventbus.Bus]. It may be published +// with an invalid SelfNode, and/or zero/nil Peers. [magicsock.Conn] is the sole +// subscriber as of 2025-06. If you are adding more subscribers consider moving +// this type out of magicsock. +type NodeViewsUpdate struct { + SelfNode tailcfg.NodeView + Peers []tailcfg.NodeView } -// NodeAddrsHostInfo represents the addresses and [tailcfg.HostinfoView] for a -// Tailscale node. -type NodeAddrsHostInfo struct { - Addresses views.Slice[netip.Prefix] - Hostinfo tailcfg.HostinfoView +// NodeMutationsUpdate represents an update event of one or more +// [netmap.NodeMutation]. This event is published over an [eventbus.Bus]. +// [magicsock.Conn] is the sole subscriber as of 2025-06. If you are adding more +// subscribers consider moving this type out of magicsock. +type NodeMutationsUpdate struct { + Mutations []netmap.NodeMutation } // FilterUpdate represents an update event for a [*filter.Filter]. This event is @@ -560,16 +568,28 @@ func newConn(logf logger.Logf) *Conn { return c } -// consumeEventbusTopic consumes events from sub and passes them to -// handlerFn until sub.Done() is closed. -func consumeEventbusTopic[T any](sub *eventbus.Subscriber[T], handlerFn func(t T)) { - defer sub.Close() +// consumeEventbusTopics consumes events from all [Conn]-relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [portmapper.Mapping] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (c *Conn) consumeEventbusTopics() { + defer close(c.subsDoneCh) + for { select { - case evt := <-sub.Events(): - handlerFn(evt) - case <-sub.Done(): + case <-c.pmSub.Done(): return + case <-c.pmSub.Events(): + c.onPortMapChanged() + case filterUpdate := <-c.filterSub.Events(): + c.onFilterUpdate(filterUpdate) + case nodeViews := <-c.nodeViewsSub.Events(): + c.onNodeViewsUpdate(nodeViews) + case nodeMuts := <-c.nodeMutsSub.Events(): + c.onNodeMutationsUpdate(nodeMuts) } } } @@ -592,29 +612,17 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity - // If an event bus is enabled, subscribe to portmapping changes; otherwise - // use the callback mechanism of portmapper.Client. - // - // TODO(creachadair): Remove the switch once the event bus is mandatory. - onPortMapChanged := c.onPortMapChanged if c.eventBus != nil { c.eventClient = c.eventBus.Client("magicsock.Conn") - pmSub := eventbus.Subscribe[portmapper.Mapping](c.eventClient) - go consumeEventbusTopic(pmSub, func(_ portmapper.Mapping) { - c.onPortMapChanged() - }) - filterSub := eventbus.Subscribe[FilterUpdate](c.eventClient) - go consumeEventbusTopic(filterSub, func(t FilterUpdate) { - // TODO(jwhited): implement - }) - nodeSub := eventbus.Subscribe[NodeAddrsHostInfoUpdate](c.eventClient) - go consumeEventbusTopic(nodeSub, func(t NodeAddrsHostInfoUpdate) { - // TODO(jwhited): implement - }) - - // Disable the explicit callback from the portmapper, the subscriber handles it. - onPortMapChanged = nil + // Subscribe calls must return before NewConn otherwise published + // events can be missed. + c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) + c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) + c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) + c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) + c.subsDoneCh = make(chan struct{}) + go c.consumeEventbusTopics() } // Don't log the same log messages possibly every few seconds in our @@ -630,7 +638,6 @@ func NewConn(opts Options) (*Conn, error) { NetMon: opts.NetMon, DebugKnobs: portMapOpts, ControlKnobs: opts.ControlKnobs, - OnChange: onPortMapChanged, }) c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) c.netMon = opts.NetMon @@ -2551,12 +2558,13 @@ func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { return false } -// SetNetworkMap is called when the control client gets a new network -// map from the control server. It must always be non-nil. -// -// It should not use the DERPMap field of NetworkMap; that's -// conditionally sent to SetDERPMap instead. -func (c *Conn) SetNetworkMap(nm *netmap.NetworkMap) { +func (c *Conn) onFilterUpdate(f FilterUpdate) { + // TODO(jwhited): implement +} + +// onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the +// [eventbus.Bus]. +func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { c.mu.Lock() defer c.mu.Unlock() @@ -2565,15 +2573,15 @@ func (c *Conn) SetNetworkMap(nm *netmap.NetworkMap) { } priorPeers := c.peers - metricNumPeers.Set(int64(len(nm.Peers))) + metricNumPeers.Set(int64(len(update.Peers))) // Update c.netMap regardless, before the following early return. - curPeers := views.SliceOf(nm.Peers) + curPeers := views.SliceOf(update.Peers) c.peers = curPeers flags := c.debugFlagsLocked() - if addrs := nm.GetAddresses(); addrs.Len() > 0 { - c.firstAddrForTest = addrs.At(0).Addr() + if update.SelfNode.Valid() && update.SelfNode.Addresses().Len() > 0 { + c.firstAddrForTest = update.SelfNode.Addresses().At(0).Addr() } else { c.firstAddrForTest = netip.Addr{} } @@ -2588,16 +2596,16 @@ func (c *Conn) SetNetworkMap(nm *netmap.NetworkMap) { c.lastFlags = flags - c.logf("[v1] magicsock: got updated network map; %d peers", len(nm.Peers)) + c.logf("[v1] magicsock: got updated network map; %d peers", len(update.Peers)) - entriesPerBuffer := debugRingBufferSize(len(nm.Peers)) + entriesPerBuffer := debugRingBufferSize(len(update.Peers)) // Try a pass of just upserting nodes and creating missing // endpoints. If the set of nodes is the same, this is an // efficient alloc-free update. If the set of nodes is different, // we'll fall through to the next pass, which allocates but can // handle full set updates. - for _, n := range nm.Peers { + for _, n := range update.Peers { if n.ID() == 0 { devPanicf("node with zero ID") continue @@ -2697,14 +2705,14 @@ func (c *Conn) SetNetworkMap(nm *netmap.NetworkMap) { c.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) } - // If the set of nodes changed since the last SetNetworkMap, the + // If the set of nodes changed since the last onNodeViewsUpdate, the // upsert loop just above made c.peerMap contain the union of the // old and new peers - which will be larger than the set from the // current netmap. If that happens, go through the allocful // deletion path to clean up moribund nodes. - if c.peerMap.nodeCount() != len(nm.Peers) { + if c.peerMap.nodeCount() != len(update.Peers) { keep := set.Set[key.NodePublic]{} - for _, n := range nm.Peers { + for _, n := range update.Peers { keep.Add(n.Key()) } c.peerMap.forEachEndpoint(func(ep *endpoint) { @@ -2837,10 +2845,6 @@ func (c *connBind) Close() error { return nil } c.closed = true - // Close the [eventbus.Client]. - if c.eventClient != nil { - c.eventClient.Close() - } // Unblock all outstanding receives. c.pconn4.Close() c.pconn6.Close() @@ -2850,9 +2854,6 @@ func (c *connBind) Close() error { if c.closeDisco6 != nil { c.closeDisco6.Close() } - if c.eventClient != nil { - c.eventClient.Close() - } // Send an empty read result to unblock receiveDERP, // which will then check connBind.Closed. // connBind.Closed takes c.mu, but c.derpRecvCh is buffered. @@ -2871,6 +2872,17 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { + // Close the [eventbus.Client] and wait for Conn.consumeEventbusTopics to + // return. Do this before acquiring c.mu: + // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can + // deadlock with c.Close(). + // 2. Conn.consumeEventbusTopics event handlers may not guard against + // undesirable post/in-progress Conn.Close() behaviors. + if c.eventClient != nil { + c.eventClient.Close() + <-c.subsDoneCh + } + c.mu.Lock() defer c.mu.Unlock() if c.closed { @@ -2901,7 +2913,6 @@ func (c *Conn) Close() error { if c.closeDisco6 != nil { c.closeDisco6.Close() } - // Wait on goroutines updating right at the end, once everything is // already closed. We want everything else in the Conn to be // consistently in the closed state before we release mu to wait @@ -3233,12 +3244,13 @@ func simpleDur(d time.Duration) time.Duration { return d.Round(time.Minute) } -// UpdateNetmapDelta implements controlclient.NetmapDeltaUpdater. -func (c *Conn) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { +// onNodeMutationsUpdate is called when a [NodeMutationsUpdate] is received over +// the [eventbus.Bus]. +func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { c.mu.Lock() defer c.mu.Unlock() - for _, m := range muts { + for _, m := range update.Mutations { nodeID := m.NodeIDBeingMutated() ep, ok := c.peerMap.endpointForNodeID(nodeID) if !ok { @@ -3257,7 +3269,6 @@ func (c *Conn) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { ep.mu.Unlock() } } - return true } // UpdateStatus implements the interface nede by ipnstate.StatusBuilder. diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5e71a40c9..7fa062fa8 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -166,7 +166,7 @@ type magicStack struct { } // newMagicStack builds and initializes an idle magicsock and -// friends. You need to call conn.SetNetworkMap and dev.Reconfig +// friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig // before anything interesting happens. func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { privateKey := key.NewNode() @@ -339,9 +339,13 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM for i, m := range ms { nm := buildNetmapLocked(i) - m.conn.SetNetworkMap(nm) - peerSet := make(set.Set[key.NodePublic], len(nm.Peers)) - for _, peer := range nm.Peers { + nv := NodeViewsUpdate{ + SelfNode: nm.SelfNode, + Peers: nm.Peers, + } + m.conn.onNodeViewsUpdate(nv) + peerSet := make(set.Set[key.NodePublic], len(nv.Peers)) + for _, peer := range nv.Peers { peerSet.Add(peer.Key()) } m.conn.UpdatePeers(peerSet) @@ -1366,7 +1370,7 @@ func newTestConn(t testing.TB) *Conn { return conn } -// addTestEndpoint sets conn's network map to a single peer expected +// addTestEndpoint sets conn's node views to a single peer expected // to receive packets from sendConn (or DERP), and returns that peer's // nodekey and discokey. func addTestEndpoint(tb testing.TB, conn *Conn, sendConn net.PacketConn) (key.NodePublic, key.DiscoPublic) { @@ -1375,7 +1379,7 @@ func addTestEndpoint(tb testing.TB, conn *Conn, sendConn net.PacketConn) (key.No // codepath. discoKey := key.DiscoPublicFromRaw32(mem.B([]byte{31: 1})) nodeKey := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 31: 0})) - conn.SetNetworkMap(&netmap.NetworkMap{ + conn.onNodeViewsUpdate(NodeViewsUpdate{ Peers: nodeViews([]*tailcfg.Node{ { ID: 1, @@ -1564,11 +1568,11 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { return nv } -// Test that a netmap update where node changes its node key but +// Test that a node views update where node changes its node key but // doesn't change its disco key doesn't result in a broken state. // // https://github.com/tailscale/tailscale/issues/1391 -func TestSetNetworkMapChangingNodeKey(t *testing.T) { +func TestOnNodeViewsUpdateChangingNodeKey(t *testing.T) { conn := newTestConn(t) t.Cleanup(func() { conn.Close() }) var buf tstest.MemLogger @@ -1580,7 +1584,7 @@ func TestSetNetworkMapChangingNodeKey(t *testing.T) { nodeKey1 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '1', 31: 0})) nodeKey2 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '2', 31: 0})) - conn.SetNetworkMap(&netmap.NetworkMap{ + conn.onNodeViewsUpdate(NodeViewsUpdate{ Peers: nodeViews([]*tailcfg.Node{ { ID: 1, @@ -1596,7 +1600,7 @@ func TestSetNetworkMapChangingNodeKey(t *testing.T) { } for range 3 { - conn.SetNetworkMap(&netmap.NetworkMap{ + conn.onNodeViewsUpdate(NodeViewsUpdate{ Peers: nodeViews([]*tailcfg.Node{ { ID: 2, @@ -1921,7 +1925,7 @@ func eps(s ...string) []netip.AddrPort { return eps } -func TestStressSetNetworkMap(t *testing.T) { +func TestStressOnNodeViewsUpdate(t *testing.T) { t.Parallel() conn := newTestConn(t) @@ -1969,15 +1973,15 @@ func TestStressSetNetworkMap(t *testing.T) { allPeers[j].Key = randNodeKey() } } - // Clone existing peers into a new netmap. + // Clone existing peers. peers := make([]*tailcfg.Node, 0, len(allPeers)) for peerIdx, p := range allPeers { if present[peerIdx] { peers = append(peers, p.Clone()) } } - // Set the netmap. - conn.SetNetworkMap(&netmap.NetworkMap{ + // Set the node views. + conn.onNodeViewsUpdate(NodeViewsUpdate{ Peers: nodeViews(peers), }) // Check invariants. @@ -2102,10 +2106,10 @@ func TestRebindingUDPConn(t *testing.T) { } // https://github.com/tailscale/tailscale/issues/6680: don't ignore -// SetNetworkMap calls when there are no peers. (A too aggressive fast path was +// onNodeViewsUpdate calls when there are no peers. (A too aggressive fast path was // previously bailing out early, thinking there were no changes since all zero -// peers didn't change, but the netmap has non-peer info in it too we shouldn't discard) -func TestSetNetworkMapWithNoPeers(t *testing.T) { +// peers didn't change, but the node views has non-peer info in it too we shouldn't discard) +func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { var c Conn knobs := &controlknobs.Knobs{} c.logf = logger.Discard @@ -2114,9 +2118,9 @@ func TestSetNetworkMapWithNoPeers(t *testing.T) { for i := 1; i <= 3; i++ { v := !debugEnableSilentDisco() envknob.Setenv("TS_DEBUG_ENABLE_SILENT_DISCO", fmt.Sprint(v)) - nm := &netmap.NetworkMap{} - c.SetNetworkMap(nm) - t.Logf("ptr %d: %p", i, nm) + nv := NodeViewsUpdate{} + c.onNodeViewsUpdate(nv) + t.Logf("ptr %d: %p", i, nv) if c.lastFlags.heartbeatDisabled != v { t.Fatalf("call %d: didn't store netmap", i) } @@ -2213,7 +2217,11 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { }, }), } - m.conn.SetNetworkMap(nm) + nv := NodeViewsUpdate{ + SelfNode: nm.SelfNode, + Peers: nm.Peers, + } + m.conn.onNodeViewsUpdate(nv) cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { @@ -2275,7 +2283,11 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { }, }), } - m.conn.SetNetworkMap(nm) + nv := NodeViewsUpdate{ + SelfNode: nm.SelfNode, + Peers: nm.Peers, + } + m.conn.onNodeViewsUpdate(nv) cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { @@ -2312,7 +2324,11 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { // configures WG. func applyNetworkMap(t *testing.T, m *magicStack, nm *netmap.NetworkMap) { t.Helper() - m.conn.SetNetworkMap(nm) + nv := NodeViewsUpdate{ + SelfNode: nm.SelfNode, + Peers: nm.Peers, + } + m.conn.onNodeViewsUpdate(nv) // Make sure we can't use v6 to avoid test failures. m.conn.noV6.Store(true) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index b1b82032b..4a9f32143 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1300,7 +1300,6 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { } func (e *userspaceEngine) SetNetworkMap(nm *netmap.NetworkMap) { - e.magicConn.SetNetworkMap(nm) e.mu.Lock() e.netMap = nm e.mu.Unlock() From ad0dfcb1857105597b1bed3422c9057aafd7b22f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 17 Jun 2025 20:25:09 -0700 Subject: [PATCH 0972/1708] net/*: remove Windows exceptions for when Resolver.PreferGo didn't work Resolver.PreferGo didn't used to work on Windows. It was fixed in 2022, though. (https://github.com/golang/go/issues/33097) Updates #5161 Change-Id: I4e1aeff220ebd6adc8a14f781664fa6a2068b48c Signed-off-by: Brad Fitzpatrick --- net/dns/resolver/tsdns_test.go | 7 ------- net/dnscache/messagecache_test.go | 9 --------- net/tsdial/tsdial.go | 2 +- 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index de08450d2..4bbfd4d6a 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -1106,10 +1106,6 @@ type linkSelFunc func(ip netip.Addr) string func (f linkSelFunc) PickLink(ip netip.Addr) string { return f(ip) } func TestHandleExitNodeDNSQueryWithNetPkg(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping test on Windows; waiting for golang.org/issue/33097") - } - records := []any{ "no-records.test.", dnsHandler(), @@ -1405,9 +1401,6 @@ func TestHandleExitNodeDNSQueryWithNetPkg(t *testing.T) { // newWrapResolver returns a resolver that uses r (via handleExitNodeDNSQueryWithNetPkg) // to make DNS requests. func newWrapResolver(r *net.Resolver) *net.Resolver { - if runtime.GOOS == "windows" { - panic("doesn't work on Windows") // golang.org/issue/33097 - } return &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, network, addr string) (net.Conn, error) { diff --git a/net/dnscache/messagecache_test.go b/net/dnscache/messagecache_test.go index 41fc33448..0bedfa5ad 100644 --- a/net/dnscache/messagecache_test.go +++ b/net/dnscache/messagecache_test.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "net" - "runtime" "testing" "time" @@ -249,14 +248,6 @@ func TestGetDNSQueryCacheKey(t *testing.T) { } func getGoNetPacketDNSQuery(name string) []byte { - if runtime.GOOS == "windows" { - // On Windows, Go's net.Resolver doesn't use the DNS client. - // See https://github.com/golang/go/issues/33097 which - // was approved but not yet implemented. - // For now just pretend it's implemented to make this test - // pass on Windows with complicated the caller. - return makeQ(123, name) - } res := make(chan []byte, 1) r := &net.Resolver{ PreferGo: true, diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 2492f666c..e4e4e9e8b 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -322,7 +322,7 @@ func (d *Dialer) userDialResolve(ctx context.Context, network, addr string) (net } var r net.Resolver - if exitDNSDoH != "" && runtime.GOOS != "windows" { // Windows: https://github.com/golang/go/issues/33097 + if exitDNSDoH != "" { r.PreferGo = true r.Dial = func(ctx context.Context, network, address string) (net.Conn, error) { return &dohConn{ From 4979ce7a94cd023db5cd03cbb556934d9652dfd2 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 18 Jun 2025 14:17:12 -0700 Subject: [PATCH 0973/1708] feature/tpm: implement ipn.StateStore using TPM sealing (#16030) Updates #15830 Signed-off-by: Andrew Lytvynov --- cmd/tailscaled/depaware.txt | 2 +- feature/tpm/tpm.go | 322 +++++++++++++++++++++++++++++++++++- feature/tpm/tpm_linux.go | 11 +- feature/tpm/tpm_other.go | 10 +- feature/tpm/tpm_test.go | 165 +++++++++++++++++- feature/tpm/tpm_windows.go | 11 +- ipn/store/stores.go | 2 + 7 files changed, 500 insertions(+), 23 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 387b944c1..7c4885a4b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -474,7 +474,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ golang.org/x/crypto/nacl/box from tailscale.com/types/key - golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 18e56ae89..6feac85e3 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -5,14 +5,29 @@ package tpm import ( + "bytes" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "log" + "os" + "path/filepath" "slices" + "strings" "sync" "github.com/google/go-tpm/tpm2" "github.com/google/go-tpm/tpm2/transport" + "golang.org/x/crypto/nacl/secretbox" + "tailscale.com/atomicfile" "tailscale.com/feature" "tailscale.com/hostinfo" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + "tailscale.com/paths" "tailscale.com/tailcfg" + "tailscale.com/types/logger" ) var infoOnce = sync.OnceValue(info) @@ -22,10 +37,16 @@ func init() { hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) + store.Register(storePrefix, newStore) } -//lint:ignore U1000 used in Linux and Windows builds only -func infoFromCapabilities(tpm transport.TPM) *tailcfg.TPMInfo { +func info() *tailcfg.TPMInfo { + tpm, err := open() + if err != nil { + return nil + } + defer tpm.Close() + info := new(tailcfg.TPMInfo) toStr := func(s *string) func(*tailcfg.TPMInfo, uint32) { return func(info *tailcfg.TPMInfo, value uint32) { @@ -81,3 +102,300 @@ func propToString(v uint32) string { // Delete any non-printable ASCII characters. return string(slices.DeleteFunc(chars, func(b byte) bool { return b < ' ' || b > '~' })) } + +const storePrefix = "tpmseal:" + +func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { + path = strings.TrimPrefix(path, storePrefix) + if err := paths.MkStateDir(filepath.Dir(path)); err != nil { + return nil, fmt.Errorf("creating state directory: %w", err) + } + var parsed map[ipn.StateKey][]byte + bs, err := os.ReadFile(path) + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to open %q: %w", path, err) + } + logf("tpm.newStore: initializing state file") + + var key [32]byte + // crypto/rand.Read never returns an error. + rand.Read(key[:]) + + store := &tpmStore{ + logf: logf, + path: path, + key: key, + cache: make(map[ipn.StateKey][]byte), + } + if err := store.writeSealed(); err != nil { + return nil, fmt.Errorf("failed to write initial state file: %w", err) + } + return store, nil + } + + // State file exists, unseal and parse it. + var sealed encryptedData + if err := json.Unmarshal(bs, &sealed); err != nil { + return nil, fmt.Errorf("failed to unmarshal state file: %w", err) + } + if len(sealed.Data) == 0 || sealed.Key == nil || len(sealed.Nonce) == 0 { + return nil, fmt.Errorf("state file %q has not been TPM-sealed or is corrupt", path) + } + data, err := unseal(logf, sealed) + if err != nil { + return nil, fmt.Errorf("failed to unseal state file: %w", err) + } + if err := json.Unmarshal(data.Data, &parsed); err != nil { + return nil, fmt.Errorf("failed to parse state file: %w", err) + } + return &tpmStore{ + logf: logf, + path: path, + key: data.Key, + cache: parsed, + }, nil +} + +// tpmStore is an ipn.StateStore that stores the state in a secretbox-encrypted +// file using a TPM-sealed symmetric key. +type tpmStore struct { + logf logger.Logf + path string + key [32]byte + + mu sync.RWMutex + cache map[ipn.StateKey][]byte +} + +func (s *tpmStore) ReadState(k ipn.StateKey) ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.cache[k] + if !ok { + return nil, ipn.ErrStateNotExist + } + return bytes.Clone(v), nil +} + +func (s *tpmStore) WriteState(k ipn.StateKey, bs []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + if bytes.Equal(s.cache[k], bs) { + return nil + } + s.cache[k] = bytes.Clone(bs) + + return s.writeSealed() +} + +func (s *tpmStore) writeSealed() error { + bs, err := json.Marshal(s.cache) + if err != nil { + return err + } + sealed, err := seal(s.logf, decryptedData{Key: s.key, Data: bs}) + if err != nil { + return fmt.Errorf("failed to seal state file: %w", err) + } + buf, err := json.Marshal(sealed) + if err != nil { + return err + } + return atomicfile.WriteFile(s.path, buf, 0600) +} + +// The nested levels of encoding and encryption are confusing, so here's what's +// going on in plain English. +// +// Not all TPM devices support symmetric encryption (TPM2_EncryptDecrypt2) +// natively, but they do support "sealing" small values (see +// tpmSeal/tpmUnseal). The size limit is too small for the actual state file, +// so we seal a symmetric key instead. This symmetric key is then used to seal +// the actual data using nacl/secretbox. +// Confusingly, both TPMs and secretbox use "seal" terminology. +// +// tpmSeal/tpmUnseal do the lower-level sealing of small []byte blobs, which we +// use to seal a 32-byte secretbox key. +// +// seal/unseal do the higher-level sealing of store data using secretbox, and +// also sealing of the symmetric key using TPM. + +// decryptedData contains the fully decrypted raw data along with the symmetric +// key used for secretbox. This struct should only live in memory and never get +// stored to disk! +type decryptedData struct { + Key [32]byte + Data []byte +} + +func (decryptedData) MarshalJSON() ([]byte, error) { + return nil, errors.New("[unexpected]: decryptedData should never get JSON-marshaled!") +} + +// encryptedData contains the secretbox-sealed data and nonce, along with a +// TPM-sealed key. All fields are required. +type encryptedData struct { + Key *tpmSealedData `json:"key"` + Nonce []byte `json:"nonce"` + Data []byte `json:"data"` +} + +func seal(logf logger.Logf, dec decryptedData) (*encryptedData, error) { + var nonce [24]byte + // crypto/rand.Read never returns an error. + rand.Read(nonce[:]) + + sealedData := secretbox.Seal(nil, dec.Data, &nonce, &dec.Key) + sealedKey, err := tpmSeal(logf, dec.Key[:]) + if err != nil { + return nil, fmt.Errorf("failed to seal encryption key to TPM: %w", err) + } + + return &encryptedData{ + Key: sealedKey, + Nonce: nonce[:], + Data: sealedData, + }, nil +} + +func unseal(logf logger.Logf, data encryptedData) (*decryptedData, error) { + if len(data.Nonce) != 24 { + return nil, fmt.Errorf("nonce should be 24 bytes long, got %d", len(data.Nonce)) + } + + unsealedKey, err := tpmUnseal(logf, data.Key) + if err != nil { + return nil, fmt.Errorf("failed to unseal encryption key with TPM: %w", err) + } + if len(unsealedKey) != 32 { + return nil, fmt.Errorf("unsealed key should be 32 bytes long, got %d", len(unsealedKey)) + } + unsealedData, ok := secretbox.Open(nil, data.Data, (*[24]byte)(data.Nonce), (*[32]byte)(unsealedKey)) + if !ok { + return nil, errors.New("failed to unseal data") + } + + return &decryptedData{ + Key: *(*[32]byte)(unsealedKey), + Data: unsealedData, + }, nil +} + +type tpmSealedData struct { + Private []byte + Public []byte +} + +// withSRK runs fn with the loaded Storage Root Key (SRK) handle. The SRK is +// flushed after fn returns. +func withSRK(logf logger.Logf, tpm transport.TPM, fn func(srk tpm2.AuthHandle) error) error { + srkCmd := tpm2.CreatePrimary{ + PrimaryHandle: tpm2.TPMRHOwner, + InPublic: tpm2.New2B(tpm2.ECCSRKTemplate), + } + srkRes, err := srkCmd.Execute(tpm) + if err != nil { + return fmt.Errorf("tpm2.CreatePrimary: %w", err) + } + defer func() { + cmd := tpm2.FlushContext{FlushHandle: srkRes.ObjectHandle} + if _, err := cmd.Execute(tpm); err != nil { + logf("tpm2.FlushContext: failed to flush SRK handle: %v", err) + } + }() + + return fn(tpm2.AuthHandle{ + Handle: srkRes.ObjectHandle, + Name: srkRes.Name, + Auth: tpm2.HMAC(tpm2.TPMAlgSHA256, 32), + }) +} + +// tpmSeal seals the data using SRK of the local TPM. +func tpmSeal(logf logger.Logf, data []byte) (*tpmSealedData, error) { + tpm, err := open() + if err != nil { + return nil, fmt.Errorf("opening TPM: %w", err) + } + defer tpm.Close() + + var res *tpmSealedData + err = withSRK(logf, tpm, func(srk tpm2.AuthHandle) error { + sealCmd := tpm2.Create{ + ParentHandle: srk, + InSensitive: tpm2.TPM2BSensitiveCreate{ + Sensitive: &tpm2.TPMSSensitiveCreate{ + Data: tpm2.NewTPMUSensitiveCreate(&tpm2.TPM2BSensitiveData{ + Buffer: data, + }), + }, + }, + InPublic: tpm2.New2B(tpm2.TPMTPublic{ + Type: tpm2.TPMAlgKeyedHash, + NameAlg: tpm2.TPMAlgSHA256, + ObjectAttributes: tpm2.TPMAObject{ + FixedTPM: true, + FixedParent: true, + UserWithAuth: true, + }, + }), + } + sealRes, err := sealCmd.Execute(tpm) + if err != nil { + return fmt.Errorf("tpm2.Create: %w", err) + } + + res = &tpmSealedData{ + Private: sealRes.OutPrivate.Buffer, + Public: sealRes.OutPublic.Bytes(), + } + return nil + }) + return res, err +} + +// tpmUnseal unseals the data using SRK of the local TPM. +func tpmUnseal(logf logger.Logf, data *tpmSealedData) ([]byte, error) { + tpm, err := open() + if err != nil { + return nil, fmt.Errorf("opening TPM: %w", err) + } + defer tpm.Close() + + var res []byte + err = withSRK(logf, tpm, func(srk tpm2.AuthHandle) error { + // Load the sealed object into the TPM first under SRK. + loadCmd := tpm2.Load{ + ParentHandle: srk, + InPrivate: tpm2.TPM2BPrivate{Buffer: data.Private}, + InPublic: tpm2.BytesAs2B[tpm2.TPMTPublic](data.Public), + } + loadRes, err := loadCmd.Execute(tpm) + if err != nil { + return fmt.Errorf("tpm2.Load: %w", err) + } + defer func() { + cmd := tpm2.FlushContext{FlushHandle: loadRes.ObjectHandle} + if _, err := cmd.Execute(tpm); err != nil { + log.Printf("tpm2.FlushContext: failed to flush loaded sealed blob handle: %v", err) + } + }() + + // Then unseal the object. + unsealCmd := tpm2.Unseal{ + ItemHandle: tpm2.NamedHandle{ + Handle: loadRes.ObjectHandle, + Name: loadRes.Name, + }, + } + unsealRes, err := unsealCmd.Execute(tpm) + if err != nil { + return fmt.Errorf("tpm2.Unseal: %w", err) + } + res = unsealRes.OutData.Buffer + + return nil + }) + return res, err +} diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go index a90c0e153..f2d0f1402 100644 --- a/feature/tpm/tpm_linux.go +++ b/feature/tpm/tpm_linux.go @@ -4,15 +4,10 @@ package tpm import ( + "github.com/google/go-tpm/tpm2/transport" "github.com/google/go-tpm/tpm2/transport/linuxtpm" - "tailscale.com/tailcfg" ) -func info() *tailcfg.TPMInfo { - t, err := linuxtpm.Open("/dev/tpm0") - if err != nil { - return nil - } - defer t.Close() - return infoFromCapabilities(t) +func open() (transport.TPMCloser, error) { + return linuxtpm.Open("/dev/tpm0") } diff --git a/feature/tpm/tpm_other.go b/feature/tpm/tpm_other.go index ba7c67621..108b2c057 100644 --- a/feature/tpm/tpm_other.go +++ b/feature/tpm/tpm_other.go @@ -5,8 +5,12 @@ package tpm -import "tailscale.com/tailcfg" +import ( + "errors" -func info() *tailcfg.TPMInfo { - return nil + "github.com/google/go-tpm/tpm2/transport" +) + +func open() (transport.TPMCloser, error) { + return nil, errors.New("TPM not supported on this platform") } diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index fc0fc178c..a022b69b2 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -3,7 +3,17 @@ package tpm -import "testing" +import ( + "bytes" + "crypto/rand" + "errors" + "path/filepath" + "strconv" + "testing" + + "tailscale.com/ipn" + "tailscale.com/ipn/store" +) func TestPropToString(t *testing.T) { for prop, want := range map[uint32]string{ @@ -17,3 +27,156 @@ func TestPropToString(t *testing.T) { } } } + +func skipWithoutTPM(t testing.TB) { + tpm, err := open() + if err != nil { + t.Skip("TPM not available") + } + tpm.Close() +} + +func TestSealUnseal(t *testing.T) { + skipWithoutTPM(t) + + data := make([]byte, 100*1024) + rand.Read(data) + var key [32]byte + rand.Read(key[:]) + + sealed, err := seal(t.Logf, decryptedData{Key: key, Data: data}) + if err != nil { + t.Fatalf("seal: %v", err) + } + if bytes.Contains(sealed.Data, data) { + t.Fatalf("sealed data %q contains original input %q", sealed.Data, data) + } + + unsealed, err := unseal(t.Logf, *sealed) + if err != nil { + t.Fatalf("unseal: %v", err) + } + if !bytes.Equal(data, unsealed.Data) { + t.Errorf("got unsealed data: %q, want: %q", unsealed, data) + } + if key != unsealed.Key { + t.Errorf("got unsealed key: %q, want: %q", unsealed.Key, key) + } +} + +func TestStore(t *testing.T) { + skipWithoutTPM(t) + + path := storePrefix + filepath.Join(t.TempDir(), "state") + store, err := newStore(t.Logf, path) + if err != nil { + t.Fatal(err) + } + + checkState := func(t *testing.T, store ipn.StateStore, k ipn.StateKey, want []byte) { + got, err := store.ReadState(k) + if err != nil { + t.Errorf("ReadState(%q): %v", k, err) + } + if !bytes.Equal(want, got) { + t.Errorf("ReadState(%q): got %q, want %q", k, got, want) + } + } + + k1, k2 := ipn.StateKey("k1"), ipn.StateKey("k2") + v1, v2 := []byte("v1"), []byte("v2") + + t.Run("read-non-existent-key", func(t *testing.T) { + _, err := store.ReadState(k1) + if !errors.Is(err, ipn.ErrStateNotExist) { + t.Errorf("ReadState succeeded, want %v", ipn.ErrStateNotExist) + } + }) + + t.Run("read-write-k1", func(t *testing.T) { + if err := store.WriteState(k1, v1); err != nil { + t.Errorf("WriteState(%q, %q): %v", k1, v1, err) + } + checkState(t, store, k1, v1) + }) + + t.Run("read-write-k2", func(t *testing.T) { + if err := store.WriteState(k2, v2); err != nil { + t.Errorf("WriteState(%q, %q): %v", k2, v2, err) + } + checkState(t, store, k2, v2) + }) + + t.Run("update-k2", func(t *testing.T) { + v2 = []byte("new v2") + if err := store.WriteState(k2, v2); err != nil { + t.Errorf("WriteState(%q, %q): %v", k2, v2, err) + } + checkState(t, store, k2, v2) + }) + + t.Run("reopen-store", func(t *testing.T) { + store, err := newStore(t.Logf, path) + if err != nil { + t.Fatal(err) + } + checkState(t, store, k1, v1) + checkState(t, store, k2, v2) + }) +} + +func BenchmarkStore(b *testing.B) { + skipWithoutTPM(b) + b.StopTimer() + + stores := make(map[string]ipn.StateStore) + key := ipn.StateKey(b.Name()) + + // Set up tpmStore + tpmStore, err := newStore(b.Logf, filepath.Join(b.TempDir(), "tpm.store")) + if err != nil { + b.Fatal(err) + } + if err := tpmStore.WriteState(key, []byte("-1")); err != nil { + b.Fatal(err) + } + stores["tpmStore"] = tpmStore + + // Set up FileStore + fileStore, err := store.NewFileStore(b.Logf, filepath.Join(b.TempDir(), "file.store")) + if err != nil { + b.Fatal(err) + } + if err := fileStore.WriteState(key, []byte("-1")); err != nil { + b.Fatal(err) + } + stores["fileStore"] = fileStore + + b.StartTimer() + + for name, store := range stores { + b.Run(name, func(b *testing.B) { + b.Run("write-noop", func(b *testing.B) { + for range b.N { + if err := store.WriteState(key, []byte("-1")); err != nil { + b.Fatal(err) + } + } + }) + b.Run("write", func(b *testing.B) { + for i := range b.N { + if err := store.WriteState(key, []byte(strconv.Itoa(i))); err != nil { + b.Fatal(err) + } + } + }) + b.Run("read", func(b *testing.B) { + for range b.N { + if _, err := store.ReadState(key); err != nil { + b.Fatal(err) + } + } + }) + }) + } +} diff --git a/feature/tpm/tpm_windows.go b/feature/tpm/tpm_windows.go index 578d687af..429d20cb8 100644 --- a/feature/tpm/tpm_windows.go +++ b/feature/tpm/tpm_windows.go @@ -4,15 +4,10 @@ package tpm import ( + "github.com/google/go-tpm/tpm2/transport" "github.com/google/go-tpm/tpm2/transport/windowstpm" - "tailscale.com/tailcfg" ) -func info() *tailcfg.TPMInfo { - t, err := windowstpm.Open() - if err != nil { - return nil - } - defer t.Close() - return infoFromCapabilities(t) +func open() (transport.TPMCloser, error) { + return windowstpm.Open() } diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 1f98891bf..1a98574c9 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -45,6 +45,8 @@ var knownStores map[string]Provider // the suffix an AWS ARN for an SSM. // - (Linux-only) if the string begins with "kube:", // the suffix is a Kubernetes secret name +// - (Linux or Windows) if the string begins with "tpmseal:", the suffix is +// filepath that is sealed with the local TPM device. // - In all other cases, the path is treated as a filepath. func New(logf logger.Logf, path string) (ipn.StateStore, error) { for prefix, sf := range knownStores { From e92eb6b17bb59cd66cd78c90db3b285015ed5e11 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 8 Jun 2025 18:51:41 -0700 Subject: [PATCH 0974/1708] net/tlsdial: fix TLS cert validation of HTTPS proxies If you had HTTPS_PROXY=https://some-valid-cert.example.com running a CONNECT proxy, we should've been able to do a TLS CONNECT request to e.g. controlplane.tailscale.com:443 through that, and I'm pretty sure it used to work, but refactorings and lack of integration tests made it regress. It probably regressed when we added the baked-in LetsEncrypt root cert validation fallback code, which was testing against the wrong hostname (the ultimate one, not the one which we were being asked to validate) Fixes #16222 Change-Id: If014e395f830e2f87f056f588edacad5c15e91bc Signed-off-by: Brad Fitzpatrick --- cmd/proxy-test-server/proxy-test-server.go | 81 +++++++ control/controlclient/controlclient_test.go | 225 ++++++++++++++++++ control/controlclient/direct.go | 7 +- control/controlhttp/client.go | 2 +- derp/derphttp/derphttp_client.go | 3 +- derp/derphttp/derphttp_test.go | 34 +++ logpolicy/logpolicy.go | 4 +- net/bakedroots/bakedroots.go | 5 +- net/connectproxy/connectproxy.go | 93 ++++++++ net/dnscache/dnscache.go | 13 +- net/dnsfallback/dnsfallback.go | 2 +- net/tlsdial/tlsdial.go | 68 +++--- net/tlsdial/tlsdial_test.go | 2 +- .../tlstest/testdata/controlplane.tstest.key | 5 + tstest/tlstest/testdata/proxy.tstest.key | 5 + tstest/tlstest/testdata/root-ca.key | 5 + tstest/tlstest/tlstest.go | 167 +++++++++++++ 17 files changed, 672 insertions(+), 49 deletions(-) create mode 100644 cmd/proxy-test-server/proxy-test-server.go create mode 100644 net/connectproxy/connectproxy.go create mode 100644 tstest/tlstest/testdata/controlplane.tstest.key create mode 100644 tstest/tlstest/testdata/proxy.tstest.key create mode 100644 tstest/tlstest/testdata/root-ca.key create mode 100644 tstest/tlstest/tlstest.go diff --git a/cmd/proxy-test-server/proxy-test-server.go b/cmd/proxy-test-server/proxy-test-server.go new file mode 100644 index 000000000..9f8c94a38 --- /dev/null +++ b/cmd/proxy-test-server/proxy-test-server.go @@ -0,0 +1,81 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The proxy-test-server command is a simple HTTP proxy server for testing +// Tailscale's client proxy functionality. +package main + +import ( + "crypto/tls" + "flag" + "fmt" + "log" + "net" + "net/http" + "os" + "strings" + + "golang.org/x/crypto/acme/autocert" + "tailscale.com/net/connectproxy" + "tailscale.com/tempfork/acme" +) + +var ( + listen = flag.String("listen", ":8080", "Address to listen on for HTTPS proxy requests") + hostname = flag.String("hostname", "localhost", "Hostname for the proxy server") + tailscaleOnly = flag.Bool("tailscale-only", true, "Restrict proxy to Tailscale targets only") + extraAllowedHosts = flag.String("allow-hosts", "", "Comma-separated list of allowed target hosts to additionally allow if --tailscale-only is true") +) + +func main() { + flag.Parse() + + am := &autocert.Manager{ + HostPolicy: autocert.HostWhitelist(*hostname), + Prompt: autocert.AcceptTOS, + Cache: autocert.DirCache(os.ExpandEnv("$HOME/.cache/autocert/proxy-test-server")), + } + var allowTarget func(hostPort string) error + if *tailscaleOnly { + allowTarget = func(hostPort string) error { + host, port, err := net.SplitHostPort(hostPort) + if err != nil { + return fmt.Errorf("invalid target %q: %v", hostPort, err) + } + if port != "443" { + return fmt.Errorf("target %q must use port 443", hostPort) + } + for allowed := range strings.SplitSeq(*extraAllowedHosts, ",") { + if host == allowed { + return nil // explicitly allowed target + } + } + if !strings.HasSuffix(host, ".tailscale.com") { + return fmt.Errorf("target %q is not a Tailscale host", hostPort) + } + return nil // valid Tailscale target + } + } + + go func() { + if err := http.ListenAndServe(":http", am.HTTPHandler(nil)); err != nil { + log.Fatalf("autocert HTTP server failed: %v", err) + } + }() + hs := &http.Server{ + Addr: *listen, + Handler: &connectproxy.Handler{ + Check: allowTarget, + Logf: log.Printf, + }, + TLSConfig: &tls.Config{ + GetCertificate: am.GetCertificate, + NextProtos: []string{ + "http/1.1", // enable HTTP/2 + acme.ALPNProto, // enable tls-alpn ACME challenges + }, + }, + } + log.Printf("Starting proxy-test-server on %s (hostname: %q)\n", *listen, *hostname) + log.Fatal(hs.ListenAndServeTLS("", "")) // cert and key are provided by autocert +} diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index f8882a4e7..1107f76a4 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -4,13 +4,35 @@ package controlclient import ( + "context" + "crypto/tls" "errors" + "flag" "fmt" "io" + "net" + "net/http" + "net/netip" + "net/url" "reflect" "slices" + "sync/atomic" "testing" + "time" + "tailscale.com/control/controlknobs" + "tailscale.com/health" + "tailscale.com/net/bakedroots" + "tailscale.com/net/connectproxy" + "tailscale.com/net/netmon" + "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/tstest/integration/testcontrol" + "tailscale.com/tstest/tlstest" + "tailscale.com/tstime" + "tailscale.com/types/key" + "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" ) @@ -188,3 +210,206 @@ func isRetryableErrorForTest(err error) bool { } return false } + +var liveNetworkTest = flag.Bool("live-network-test", false, "run live network tests") + +func TestDirectProxyManual(t *testing.T) { + if !*liveNetworkTest { + t.Skip("skipping without --live-network-test") + } + + dialer := &tsdial.Dialer{} + dialer.SetNetMon(netmon.NewStatic()) + + opts := Options{ + Persist: persist.Persist{}, + GetMachinePrivateKey: func() (key.MachinePrivate, error) { + return key.NewMachine(), nil + }, + ServerURL: "https://controlplane.tailscale.com", + Clock: tstime.StdClock{}, + Hostinfo: &tailcfg.Hostinfo{ + BackendLogID: "test-backend-log-id", + }, + DiscoPublicKey: key.NewDisco().Public(), + Logf: t.Logf, + HealthTracker: &health.Tracker{}, + PopBrowserURL: func(url string) { + t.Logf("PopBrowserURL: %q", url) + }, + Dialer: dialer, + ControlKnobs: &controlknobs.Knobs{}, + } + d, err := NewDirect(opts) + if err != nil { + t.Fatalf("NewDirect: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + url, err := d.TryLogin(ctx, LoginEphemeral) + if err != nil { + t.Fatalf("TryLogin: %v", err) + } + t.Logf("URL: %q", url) +} + +func TestHTTPSNoProxy(t *testing.T) { testHTTPS(t, false) } + +// TestTLSWithProxy verifies we can connect to the control plane via +// an HTTPS proxy. +func TestHTTPSWithProxy(t *testing.T) { testHTTPS(t, true) } + +func testHTTPS(t *testing.T, withProxy bool) { + bakedroots.ResetForTest(t, tlstest.TestRootCA()) + + controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlaneKeyPair.ServerTLSConfig()) + if err != nil { + t.Fatal(err) + } + defer controlLn.Close() + + proxyLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ProxyServerKeyPair.ServerTLSConfig()) + if err != nil { + t.Fatal(err) + } + defer proxyLn.Close() + + const requiredAuthKey = "hunter2" + const someUsername = "testuser" + const somePassword = "testpass" + + testControl := &testcontrol.Server{ + Logf: tstest.WhileTestRunningLogger(t), + RequireAuthKey: requiredAuthKey, + } + controlSrv := &http.Server{ + Handler: testControl, + ErrorLog: logger.StdLogger(t.Logf), + } + go controlSrv.Serve(controlLn) + + const fakeControlIP = "1.2.3.4" + const fakeProxyIP = "5.6.7.8" + + dialer := &tsdial.Dialer{} + dialer.SetNetMon(netmon.NewStatic()) + dialer.SetSystemDialerForTest(func(ctx context.Context, network, addr string) (net.Conn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, fmt.Errorf("SplitHostPort(%q): %v", addr, err) + } + var d net.Dialer + if host == fakeControlIP { + return d.DialContext(ctx, network, controlLn.Addr().String()) + } + if host == fakeProxyIP { + return d.DialContext(ctx, network, proxyLn.Addr().String()) + } + return nil, fmt.Errorf("unexpected dial to %q", addr) + }) + + opts := Options{ + Persist: persist.Persist{}, + GetMachinePrivateKey: func() (key.MachinePrivate, error) { + return key.NewMachine(), nil + }, + AuthKey: requiredAuthKey, + ServerURL: "https://controlplane.tstest", + Clock: tstime.StdClock{}, + Hostinfo: &tailcfg.Hostinfo{ + BackendLogID: "test-backend-log-id", + }, + DiscoPublicKey: key.NewDisco().Public(), + Logf: t.Logf, + HealthTracker: &health.Tracker{}, + PopBrowserURL: func(url string) { + t.Logf("PopBrowserURL: %q", url) + }, + Dialer: dialer, + } + d, err := NewDirect(opts) + if err != nil { + t.Fatalf("NewDirect: %v", err) + } + + d.dnsCache.LookupIPForTest = func(ctx context.Context, host string) ([]netip.Addr, error) { + switch host { + case "controlplane.tstest": + return []netip.Addr{netip.MustParseAddr(fakeControlIP)}, nil + case "proxy.tstest": + if !withProxy { + t.Errorf("unexpected DNS lookup for %q with proxy disabled", host) + return nil, fmt.Errorf("unexpected DNS lookup for %q", host) + } + return []netip.Addr{netip.MustParseAddr(fakeProxyIP)}, nil + } + t.Errorf("unexpected DNS query for %q", host) + return []netip.Addr{}, nil + } + + var proxyReqs atomic.Int64 + if withProxy { + d.httpc.Transport.(*http.Transport).Proxy = func(req *http.Request) (*url.URL, error) { + t.Logf("using proxy for %q", req.URL) + u := &url.URL{ + Scheme: "https", + Host: "proxy.tstest:443", + User: url.UserPassword(someUsername, somePassword), + } + return u, nil + } + + connectProxy := &http.Server{ + Handler: connectProxyTo(t, "controlplane.tstest:443", controlLn.Addr().String(), &proxyReqs), + } + go connectProxy.Serve(proxyLn) + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + url, err := d.TryLogin(ctx, LoginEphemeral) + if err != nil { + t.Fatalf("TryLogin: %v", err) + } + if url != "" { + t.Errorf("got URL %q, want empty", url) + } + + if withProxy { + if got, want := proxyReqs.Load(), int64(1); got != want { + t.Errorf("proxy CONNECT requests = %d; want %d", got, want) + } + } +} + +func connectProxyTo(t testing.TB, target, backendAddrPort string, reqs *atomic.Int64) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI != target { + t.Errorf("invalid CONNECT request to %q; want %q", r.RequestURI, target) + http.Error(w, "bad target", http.StatusBadRequest) + return + } + + r.Header.Set("Authorization", r.Header.Get("Proxy-Authorization")) // for the BasicAuth method. kinda trashy. + user, pass, ok := r.BasicAuth() + if !ok || user != "testuser" || pass != "testpass" { + t.Errorf("invalid CONNECT auth %q:%q; want %q:%q", user, pass, "testuser", "testpass") + http.Error(w, "bad auth", http.StatusUnauthorized) + return + } + + (&connectproxy.Handler{ + Dial: func(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + c, err := d.DialContext(ctx, network, backendAddrPort) + if err == nil { + reqs.Add(1) + } + return c, err + }, + Logf: t.Logf, + }).ServeHTTP(w, r) + }) +} diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 2d6dc6e36..4c9b04ce9 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -16,7 +16,6 @@ import ( "net" "net/http" "net/netip" - "net/url" "os" "reflect" "runtime" @@ -240,10 +239,6 @@ func NewDirect(opts Options) (*Direct, error) { opts.ControlKnobs = &controlknobs.Knobs{} } opts.ServerURL = strings.TrimRight(opts.ServerURL, "/") - serverURL, err := url.Parse(opts.ServerURL) - if err != nil { - return nil, err - } if opts.Clock == nil { opts.Clock = tstime.StdClock{} } @@ -273,7 +268,7 @@ func NewDirect(opts Options) (*Direct, error) { tr := http.DefaultTransport.(*http.Transport).Clone() tr.Proxy = tshttpproxy.ProxyFromEnvironment tshttpproxy.SetTransportGetProxyConnectHeader(tr) - tr.TLSClientConfig = tlsdial.Config(serverURL.Hostname(), opts.HealthTracker, tr.TLSClientConfig) + tr.TLSClientConfig = tlsdial.Config(opts.HealthTracker, tr.TLSClientConfig) var dialFunc netx.DialFunc dialFunc, interceptedDial = makeScreenTimeDetectingDialFunc(opts.Dialer.SystemDial) tr.DialContext = dnscache.Dialer(dialFunc, dnsCache) diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 869bcb599..1bb60d672 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -534,7 +534,7 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad // Disable HTTP2, since h2 can't do protocol switching. tr.TLSClientConfig.NextProtos = []string{} tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} - tr.TLSClientConfig = tlsdial.Config(a.Hostname, a.HealthTracker, tr.TLSClientConfig) + tr.TLSClientConfig = tlsdial.Config(a.HealthTracker, tr.TLSClientConfig) if !tr.TLSClientConfig.InsecureSkipVerify { panic("unexpected") // should be set by tlsdial.Config } diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 8c42e9070..7385f0ad1 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -647,12 +647,13 @@ func (c *Client) dialRegion(ctx context.Context, reg *tailcfg.DERPRegion) (net.C } func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { - tlsConf := tlsdial.Config(c.tlsServerName(node), c.HealthTracker, c.TLSConfig) + tlsConf := tlsdial.Config(c.HealthTracker, c.TLSConfig) if node != nil { if node.InsecureForTests { tlsConf.InsecureSkipVerify = true tlsConf.VerifyConnection = nil } + tlsConf.ServerName = c.tlsServerName(node) if node.CertName != "" { if suf, ok := strings.CutPrefix(node.CertName, "sha256-raw:"); ok { tlsdial.SetConfigExpectedCertHash(tlsConf, suf) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 252549660..7f0a7e333 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -7,10 +7,14 @@ import ( "bytes" "context" "crypto/tls" + "encoding/json" + "flag" "fmt" + "maps" "net" "net/http" "net/http/httptest" + "slices" "strings" "sync" "testing" @@ -19,6 +23,7 @@ import ( "tailscale.com/derp" "tailscale.com/net/netmon" "tailscale.com/net/netx" + "tailscale.com/tailcfg" "tailscale.com/types/key" ) @@ -556,3 +561,32 @@ func TestNotifyError(t *testing.T) { t.Fatalf("context done before receiving error: %v", ctx.Err()) } } + +var liveNetworkTest = flag.Bool("live-net-tests", false, "run live network tests") + +func TestManualDial(t *testing.T) { + if !*liveNetworkTest { + t.Skip("skipping live network test without --live-net-tests") + } + dm := &tailcfg.DERPMap{} + res, err := http.Get("https://controlplane.tailscale.com/derpmap/default") + if err != nil { + t.Fatalf("fetching DERPMap: %v", err) + } + defer res.Body.Close() + if err := json.NewDecoder(res.Body).Decode(dm); err != nil { + t.Fatalf("decoding DERPMap: %v", err) + } + + region := slices.Sorted(maps.Keys(dm.Regions))[0] + + netMon := netmon.NewStatic() + rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + return dm.Regions[region] + }) + defer rc.Close() + + if err := rc.Connect(context.Background()); err != nil { + t.Fatalf("rc.Connect: %v", err) + } +} diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index fc259a417..b84528d7b 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -9,7 +9,6 @@ package logpolicy import ( "bufio" "bytes" - "cmp" "context" "crypto/tls" "encoding/json" @@ -911,8 +910,7 @@ func (opts TransportOptions) New() http.RoundTripper { tr.TLSNextProto = map[string]func(authority string, c *tls.Conn) http.RoundTripper{} } - host := cmp.Or(opts.Host, logtail.DefaultHost) - tr.TLSClientConfig = tlsdial.Config(host, opts.Health, tr.TLSClientConfig) + tr.TLSClientConfig = tlsdial.Config(opts.Health, tr.TLSClientConfig) // Force TLS 1.3 since we know log.tailscale.com supports it. tr.TLSClientConfig.MinVersion = tls.VersionTLS13 diff --git a/net/bakedroots/bakedroots.go b/net/bakedroots/bakedroots.go index 42e70c0dd..8787b4a6d 100644 --- a/net/bakedroots/bakedroots.go +++ b/net/bakedroots/bakedroots.go @@ -7,6 +7,7 @@ package bakedroots import ( "crypto/x509" + "fmt" "sync" "tailscale.com/util/testenv" @@ -14,7 +15,7 @@ import ( // Get returns the baked-in roots. // -// As of 2025-01-21, this includes only the LetsEncrypt ISRG Root X1 root. +// As of 2025-01-21, this includes only the LetsEncrypt ISRG Root X1 & X2 roots. func Get() *x509.CertPool { roots.once.Do(func() { roots.parsePEM(append( @@ -56,7 +57,7 @@ type rootsOnce struct { func (r *rootsOnce) parsePEM(caPEM []byte) { p := x509.NewCertPool() if !p.AppendCertsFromPEM(caPEM) { - panic("bogus PEM") + panic(fmt.Sprintf("bogus PEM: %q", caPEM)) } r.p = p } diff --git a/net/connectproxy/connectproxy.go b/net/connectproxy/connectproxy.go new file mode 100644 index 000000000..4bf687502 --- /dev/null +++ b/net/connectproxy/connectproxy.go @@ -0,0 +1,93 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package connectproxy contains some CONNECT proxy code. +package connectproxy + +import ( + "context" + "io" + "log" + "net" + "net/http" + "time" + + "tailscale.com/net/netx" + "tailscale.com/types/logger" +) + +// Handler is an HTTP CONNECT proxy handler. +type Handler struct { + // Dial, if non-nil, is an alternate dialer to use + // instead of the default dialer. + Dial netx.DialFunc + + // Logf, if non-nil, is an alterate logger to + // use instead of log.Printf. + Logf logger.Logf + + // Check, if non-nil, validates the CONNECT target. + Check func(hostPort string) error +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if r.Method != "CONNECT" { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + dial := h.Dial + if dial == nil { + var d net.Dialer + dial = d.DialContext + } + logf := h.Logf + if logf == nil { + logf = log.Printf + } + + hostPort := r.RequestURI + if h.Check != nil { + if err := h.Check(hostPort); err != nil { + logf("CONNECT target %q not allowed: %v", hostPort, err) + http.Error(w, "Invalid CONNECT target", http.StatusForbidden) + return + } + } + + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + back, err := dial(ctx, "tcp", hostPort) + if err != nil { + logf("error CONNECT dialing %v: %v", hostPort, err) + http.Error(w, "Connect failure", http.StatusBadGateway) + return + } + defer back.Close() + + hj, ok := w.(http.Hijacker) + if !ok { + http.Error(w, "CONNECT hijack unavailable", http.StatusInternalServerError) + return + } + c, br, err := hj.Hijack() + if err != nil { + logf("CONNECT hijack: %v", err) + return + } + defer c.Close() + + io.WriteString(c, "HTTP/1.1 200 OK\r\n\r\n") + + errc := make(chan error, 2) + go func() { + _, err := io.Copy(c, back) + errc <- err + }() + go func() { + _, err := io.Copy(back, br) + errc <- err + }() + <-errc +} diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index 96550cbb1..d60e92f0b 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -24,6 +24,7 @@ import ( "tailscale.com/util/cloudenv" "tailscale.com/util/singleflight" "tailscale.com/util/slicesx" + "tailscale.com/util/testenv" ) var zaddr netip.Addr @@ -63,6 +64,10 @@ type Resolver struct { // If nil, net.DefaultResolver is used. Forward *net.Resolver + // LookupIPForTest, if non-nil and in tests, handles requests instead + // of the usual mechanisms. + LookupIPForTest func(ctx context.Context, host string) ([]netip.Addr, error) + // LookupIPFallback optionally provides a backup DNS mechanism // to use if Forward returns an error or no results. LookupIPFallback func(ctx context.Context, host string) ([]netip.Addr, error) @@ -284,7 +289,13 @@ func (r *Resolver) lookupIP(ctx context.Context, host string) (ip, ip6 netip.Add lookupCtx, lookupCancel := context.WithTimeout(ctx, r.lookupTimeoutForHost(host)) defer lookupCancel() - ips, err := r.fwd().LookupNetIP(lookupCtx, "ip", host) + + var ips []netip.Addr + if r.LookupIPForTest != nil && testenv.InTest() { + ips, err = r.LookupIPForTest(ctx, host) + } else { + ips, err = r.fwd().LookupNetIP(lookupCtx, "ip", host) + } if err != nil || len(ips) == 0 { if resolver, ok := r.cloudHostResolver(); ok { r.dlogf("resolving %q via cloud resolver", host) diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 4c5d5fa2f..8e53c3b29 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -286,7 +286,7 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443")) } - tr.TLSClientConfig = tlsdial.Config(serverName, ht, tr.TLSClientConfig) + tr.TLSClientConfig = tlsdial.Config(ht, tr.TLSClientConfig) c := &http.Client{Transport: tr} req, err := http.NewRequestWithContext(ctx, "GET", "https://"+serverName+"/bootstrap-dns?q="+url.QueryEscape(queryName), nil) if err != nil { diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 1bd2450aa..80f3bfc06 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -59,18 +59,26 @@ var mitmBlockWarnable = health.Register(&health.Warnable{ ImpactsConnectivity: true, }) -// Config returns a tls.Config for connecting to a server. +// Config returns a tls.Config for connecting to a server that +// uses system roots for validation but, if those fail, also tries +// the baked-in LetsEncrypt roots as a fallback validation method. +// // If base is non-nil, it's cloned as the base config before // being configured and returned. // If ht is non-nil, it's used to report health errors. -func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { +func Config(ht *health.Tracker, base *tls.Config) *tls.Config { var conf *tls.Config if base == nil { conf = new(tls.Config) } else { conf = base.Clone() } - conf.ServerName = host + + // Note: we do NOT set conf.ServerName here (as we accidentally did + // previously), as this path is also used when dialing an HTTPS proxy server + // (through which we'll send a CONNECT request to get a TCP connection to do + // the real TCP connection) because host is the ultimate hostname, but this + // tls.Config is used for both the proxy and the ultimate target. if n := sslKeyLogFile; n != "" { f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) @@ -93,7 +101,9 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // (with the baked-in fallback root) in the VerifyConnection hook. conf.InsecureSkipVerify = true conf.VerifyConnection = func(cs tls.ConnectionState) (retErr error) { - if host == "log.tailscale.com" && hostinfo.IsNATLabGuestVM() { + dialedHost := cs.ServerName + + if dialedHost == "log.tailscale.com" && hostinfo.IsNATLabGuestVM() { // Allow log.tailscale.com TLS MITM for integration tests when // the client's running within a NATLab VM. return nil @@ -116,7 +126,7 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // Show a dedicated warning. m, ok := blockblame.VerifyCertificate(cert) if ok { - log.Printf("tlsdial: server cert for %q looks like %q equipment (could be blocking Tailscale)", host, m.Name) + log.Printf("tlsdial: server cert seen while dialing %q looks like %q equipment (could be blocking Tailscale)", dialedHost, m.Name) ht.SetUnhealthy(mitmBlockWarnable, health.Args{"manufacturer": m.Name}) } else { ht.SetHealthy(mitmBlockWarnable) @@ -135,7 +145,7 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { ht.SetTLSConnectionError(cs.ServerName, nil) if selfSignedIssuer != "" { // Log the self-signed issuer, but don't treat it as an error. - log.Printf("tlsdial: warning: server cert for %q passed x509 validation but is self-signed by %q", host, selfSignedIssuer) + log.Printf("tlsdial: warning: server cert for %q passed x509 validation but is self-signed by %q", dialedHost, selfSignedIssuer) } } }() @@ -144,7 +154,7 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // First try doing x509 verification with the system's // root CA pool. opts := x509.VerifyOptions{ - DNSName: cs.ServerName, + DNSName: dialedHost, Intermediates: x509.NewCertPool(), } for _, cert := range cs.PeerCertificates[1:] { @@ -152,7 +162,7 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { } _, errSys := cs.PeerCertificates[0].Verify(opts) if debug() { - log.Printf("tlsdial(sys %q): %v", host, errSys) + log.Printf("tlsdial(sys %q): %v", dialedHost, errSys) } // Always verify with our baked-in Let's Encrypt certificate, @@ -161,13 +171,11 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { opts.Roots = bakedroots.Get() _, bakedErr := cs.PeerCertificates[0].Verify(opts) if debug() { - log.Printf("tlsdial(bake %q): %v", host, bakedErr) + log.Printf("tlsdial(bake %q): %v", dialedHost, bakedErr) } else if bakedErr != nil { - if _, loaded := tlsdialWarningPrinted.LoadOrStore(host, true); !loaded { - if errSys == nil { - log.Printf("tlsdial: warning: server cert for %q is not a Let's Encrypt cert", host) - } else { - log.Printf("tlsdial: error: server cert for %q failed to verify and is not a Let's Encrypt cert", host) + if _, loaded := tlsdialWarningPrinted.LoadOrStore(dialedHost, true); !loaded { + if errSys != nil { + log.Printf("tlsdial: error: server cert for %q failed both system roots & Let's Encrypt root validation", dialedHost) } } } @@ -202,9 +210,6 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { c.ServerName = certDNSName return } - if c.VerifyPeerCertificate != nil { - panic("refusing to override tls.Config.VerifyPeerCertificate") - } // Set InsecureSkipVerify to prevent crypto/tls from doing its // own cert verification, but do the same work that it'd do // (but using certDNSName) in the VerifyPeerCertificate hook. @@ -257,29 +262,30 @@ func SetConfigExpectedCertHash(c *tls.Config, wantFullCertSHA256Hex string) { if c.VerifyPeerCertificate != nil { panic("refusing to override tls.Config.VerifyPeerCertificate") } + // Set InsecureSkipVerify to prevent crypto/tls from doing its // own cert verification, but do the same work that it'd do - // (but using certDNSName) in the VerifyPeerCertificate hook. + // (but using certDNSName) in the VerifyConnection hook. c.InsecureSkipVerify = true - c.VerifyConnection = nil - c.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error { + + c.VerifyConnection = func(cs tls.ConnectionState) error { + dialedHost := cs.ServerName var sawGoodCert bool - for _, rawCert := range rawCerts { - cert, err := x509.ParseCertificate(rawCert) - if err != nil { - return fmt.Errorf("ParseCertificate: %w", err) - } + + for _, cert := range cs.PeerCertificates { if strings.HasPrefix(cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix) { continue } if sawGoodCert { return errors.New("unexpected multiple certs presented") } - if fmt.Sprintf("%02x", sha256.Sum256(rawCert)) != wantFullCertSHA256Hex { + if fmt.Sprintf("%02x", sha256.Sum256(cert.Raw)) != wantFullCertSHA256Hex { return fmt.Errorf("cert hash does not match expected cert hash") } - if err := cert.VerifyHostname(c.ServerName); err != nil { - return fmt.Errorf("cert does not match server name %q: %w", c.ServerName, err) + if dialedHost != "" { // it's empty when dialing a derper by IP with no hostname + if err := cert.VerifyHostname(dialedHost); err != nil { + return fmt.Errorf("cert does not match server name %q: %w", dialedHost, err) + } } now := time.Now() if now.After(cert.NotAfter) { @@ -302,12 +308,8 @@ func SetConfigExpectedCertHash(c *tls.Config, wantFullCertSHA256Hex string) { func NewTransport() *http.Transport { return &http.Transport{ DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } var d tls.Dialer - d.Config = Config(host, nil, nil) + d.Config = Config(nil, nil) return d.DialContext(ctx, network, addr) }, } diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index 6723b82e0..e2c4cdd4f 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -86,7 +86,7 @@ func TestFallbackRootWorks(t *testing.T) { DisableKeepAlives: true, // for test cleanup ease } ht := new(health.Tracker) - tr.TLSClientConfig = Config("tlsdial.test", ht, tr.TLSClientConfig) + tr.TLSClientConfig = Config(ht, tr.TLSClientConfig) c := &http.Client{Transport: tr} ctr0 := atomic.LoadInt32(&counterFallbackOK) diff --git a/tstest/tlstest/testdata/controlplane.tstest.key b/tstest/tlstest/testdata/controlplane.tstest.key new file mode 100644 index 000000000..dbe5ede34 --- /dev/null +++ b/tstest/tlstest/testdata/controlplane.tstest.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIHcxOQNVyqvBSSlu7c93QW6OsyccjL+R1evW4acd32MWoAoGCCqGSM49 +AwEHoUQDQgAEIOY5/CQ8CMuKYPLf+r6OEneqfzQ5RfgPnLdkL22qhm8xb69ZCXxz +UecawU0KEDfHLYbUYXSuhAFxxuPh9I3x5Q== +-----END EC PRIVATE KEY----- diff --git a/tstest/tlstest/testdata/proxy.tstest.key b/tstest/tlstest/testdata/proxy.tstest.key new file mode 100644 index 000000000..067279089 --- /dev/null +++ b/tstest/tlstest/testdata/proxy.tstest.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEING1XBDWFXQjqBmLjhp20hXOf2rk/I0N6W7muv9RVvk3oAoGCCqGSM49 +AwEHoUQDQgAE8lxnEEeLqYikwmXbXSsIQSw20R0oLA831s960KQZEgt0P9SbWcJc +QTk98rdfYT/QDdHn157Oh4FPcDtxmdQ4vw== +-----END EC PRIVATE KEY----- diff --git a/tstest/tlstest/testdata/root-ca.key b/tstest/tlstest/testdata/root-ca.key new file mode 100644 index 000000000..ece23ddf9 --- /dev/null +++ b/tstest/tlstest/testdata/root-ca.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIMl3xjqt1dnXBpYJSEqevirAcnSJ79I2tucdRazlrDG9oAoGCCqGSM49 +AwEHoUQDQgAEQ/+Jme+16hgO7TtPSIFHVV0Yt969ltVlARVcNUZmWc0upQaq7uiJ +Aur5KtzwxU3YI4bhNK0593OK2TLvEEWIdw== +-----END EC PRIVATE KEY----- diff --git a/tstest/tlstest/tlstest.go b/tstest/tlstest/tlstest.go new file mode 100644 index 000000000..f65c261e8 --- /dev/null +++ b/tstest/tlstest/tlstest.go @@ -0,0 +1,167 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package tlstest contains code to help test Tailscale's client proxy support. +package tlstest + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + _ "embed" + "encoding/pem" + "fmt" + "math/big" + "sync" + "time" +) + +// Some baked-in ECDSA keys to speed up tests, not having to burn CPU to +// generate them each time. We only make the certs (which have expiry times) +// at runtime. +// +// They were made with: +// +// openssl ecparam -name prime256v1 -genkey -noout -out root-ca.key +var ( + //go:embed testdata/root-ca.key + rootCAKeyPEM []byte + + // TestProxyServerKey is the PEM private key for [TestProxyServerCert]. + // + //go:embed testdata/proxy.tstest.key + TestProxyServerKey []byte + + // TestControlPlaneKey is the PEM private key for [TestControlPlaneCert]. + // + //go:embed testdata/controlplane.tstest.key + TestControlPlaneKey []byte +) + +// TestRootCA returns a self-signed ECDSA root CA certificate (as PEM) for +// testing purposes. +func TestRootCA() []byte { + return bytes.Clone(testRootCAOncer()) +} + +var testRootCAOncer = sync.OnceValue(func() []byte { + key := rootCAKey() + now := time.Now().Add(-time.Hour) + tpl := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "Tailscale Unit Test ECDSA Root", + Organization: []string{"Tailscale Test Org"}, + }, + NotBefore: now, + NotAfter: now.AddDate(5, 0, 0), + + IsCA: true, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + SubjectKeyId: mustSKID(&key.PublicKey), + } + + der, err := x509.CreateCertificate(rand.Reader, tpl, tpl, &key.PublicKey, key) + if err != nil { + panic(err) + } + return pemCert(der) +}) + +func pemCert(der []byte) []byte { + var buf bytes.Buffer + if err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + panic(fmt.Sprintf("failed to encode PEM: %v", err)) + } + return buf.Bytes() +} + +var rootCAKey = sync.OnceValue(func() *ecdsa.PrivateKey { + return mustParsePEM(rootCAKeyPEM, x509.ParseECPrivateKey) +}) + +func mustParsePEM[T any](pemBytes []byte, parse func([]byte) (T, error)) T { + block, rest := pem.Decode(pemBytes) + if block == nil || len(rest) > 0 { + panic("invalid PEM") + } + v, err := parse(block.Bytes) + if err != nil { + panic(fmt.Sprintf("invalid PEM: %v", err)) + } + return v +} + +// KeyPair is a simple struct to hold a certificate and its private key. +type KeyPair struct { + Domain string + KeyPEM []byte // PEM-encoded private key +} + +// ServerTLSConfig returns a TLS configuration suitable for a server +// using the KeyPair's certificate and private key. +func (p KeyPair) ServerTLSConfig() *tls.Config { + cert, err := tls.X509KeyPair(p.CertPEM(), p.KeyPEM) + if err != nil { + panic("invalid TLS key pair: " + err.Error()) + } + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + } +} + +// ProxyServerKeyPair is a KeyPair for a test control plane server +// with domain name "proxy.tstest". +var ProxyServerKeyPair = KeyPair{ + Domain: "proxy.tstest", + KeyPEM: TestProxyServerKey, +} + +// ControlPlaneKeyPair is a KeyPair for a test control plane server +// with domain name "controlplane.tstest". +var ControlPlaneKeyPair = KeyPair{ + Domain: "controlplane.tstest", + KeyPEM: TestControlPlaneKey, +} + +func (p KeyPair) CertPEM() []byte { + caCert := mustParsePEM(TestRootCA(), x509.ParseCertificate) + caPriv := mustParsePEM(rootCAKeyPEM, x509.ParseECPrivateKey) + leafKey := mustParsePEM(p.KeyPEM, x509.ParseECPrivateKey) + + serial, err := rand.Int(rand.Reader, big.NewInt(0).Lsh(big.NewInt(1), 128)) + if err != nil { + panic(err) + } + + now := time.Now().Add(-time.Hour) + tpl := &x509.Certificate{ + SerialNumber: serial, + Subject: pkix.Name{CommonName: p.Domain}, + NotBefore: now, + NotAfter: now.AddDate(2, 0, 0), + + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + DNSNames: []string{p.Domain}, + } + + der, err := x509.CreateCertificate(rand.Reader, tpl, caCert, &leafKey.PublicKey, caPriv) + if err != nil { + panic(err) + } + return pemCert(der) +} + +func mustSKID(pub *ecdsa.PublicKey) []byte { + skid, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + panic(err) + } + return skid[:20] // same as x509 library +} From 583f740c0b583081b0c1a39f92e349c49c0c4a41 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 19 Jun 2025 09:47:06 -0700 Subject: [PATCH 0975/1708] Revert "types/netmap,wgengine/magicsock: propagate CapVer to magicsock.endpoint (#16244)" (#16322) This reverts commit 6a93b17c8cafc1d8e1c52e133511e52ed9086355. The reverted commit added more complexity than it was worth at the current stage. Handling delta CapVer changes requires extensive changes to relayManager datastructures in order to also support delta updates of relay servers. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- types/netmap/nodemut.go | 13 ------------- types/netmap/nodemut_test.go | 9 --------- wgengine/magicsock/magicsock.go | 4 ---- 3 files changed, 26 deletions(-) diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index ab30ef1e6..f4de1bf0b 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -69,17 +69,6 @@ func (m NodeMutationLastSeen) Apply(n *tailcfg.Node) { n.LastSeen = ptr.To(m.LastSeen) } -// NodeMutationCap is a NodeMutation that says a node's -// [tailcfg.CapabilityVersion] value has changed. -type NodeMutationCap struct { - mutatingNodeID - Cap tailcfg.CapabilityVersion -} - -func (m NodeMutationCap) Apply(n *tailcfg.Node) { - n.Cap = m.Cap -} - var peerChangeFields = sync.OnceValue(func() []reflect.StructField { var fields []reflect.StructField rt := reflect.TypeFor[tailcfg.PeerChange]() @@ -116,8 +105,6 @@ func NodeMutationsFromPatch(p *tailcfg.PeerChange) (_ []NodeMutation, ok bool) { ret = append(ret, NodeMutationOnline{mutatingNodeID(p.NodeID), *p.Online}) case "LastSeen": ret = append(ret, NodeMutationLastSeen{mutatingNodeID(p.NodeID), *p.LastSeen}) - case "Cap": - ret = append(ret, NodeMutationCap{mutatingNodeID(p.NodeID), p.Cap}) } } return ret, true diff --git a/types/netmap/nodemut_test.go b/types/netmap/nodemut_test.go index 0f1cac6b2..374f8623a 100644 --- a/types/netmap/nodemut_test.go +++ b/types/netmap/nodemut_test.go @@ -177,14 +177,6 @@ func TestMutationsFromMapResponse(t *testing.T) { }, want: nil, }, - { - name: "patch-cap", - mr: fromChanges(&tailcfg.PeerChange{ - NodeID: 1, - Cap: 2, - }), - want: muts(NodeMutationCap{1, 2}), - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -203,7 +195,6 @@ func TestMutationsFromMapResponse(t *testing.T) { NodeMutationDERPHome{}, NodeMutationOnline{}, NodeMutationLastSeen{}, - NodeMutationCap{}, )); diff != "" { t.Errorf("wrong result (-want +got):\n%s", diff) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a6c6a3fb6..bfc7afba9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3263,10 +3263,6 @@ func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { ep.mu.Lock() ep.setEndpointsLocked(views.SliceOf(m.Endpoints)) ep.mu.Unlock() - case netmap.NodeMutationCap: - ep.mu.Lock() - ep.relayCapable = capVerIsRelayCapable(m.Cap) - ep.mu.Unlock() } } } From a64ca7a5b4efed0437a1d4eace3815b4de7f6eaf Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 07:58:19 -0700 Subject: [PATCH 0976/1708] tstest/tlstest: simplify, don't even bake in any keys I earlier thought this saved a second of CPU even on a fast machine, but I think when I was previously measuring, I still had a 4096 bit RSA key being generated in the code I was measuring. Measuring again for this, it's plenty fast. Prep for using this package more, for derp, etc. Updates #16315 Change-Id: I4c9008efa9aa88a3d65409d6ffd7b3807f4d75e9 Signed-off-by: Brad Fitzpatrick --- control/controlclient/controlclient_test.go | 4 +- net/bakedroots/bakedroots.go | 13 +- .../tlstest/testdata/controlplane.tstest.key | 5 - tstest/tlstest/testdata/proxy.tstest.key | 5 - tstest/tlstest/testdata/root-ca.key | 5 - tstest/tlstest/tlstest.go | 114 ++++++++++-------- tstest/tlstest/tlstest_test.go | 21 ++++ 7 files changed, 95 insertions(+), 72 deletions(-) delete mode 100644 tstest/tlstest/testdata/controlplane.tstest.key delete mode 100644 tstest/tlstest/testdata/proxy.tstest.key delete mode 100644 tstest/tlstest/testdata/root-ca.key create mode 100644 tstest/tlstest/tlstest_test.go diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 1107f76a4..792c26955 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -263,13 +263,13 @@ func TestHTTPSWithProxy(t *testing.T) { testHTTPS(t, true) } func testHTTPS(t *testing.T, withProxy bool) { bakedroots.ResetForTest(t, tlstest.TestRootCA()) - controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlaneKeyPair.ServerTLSConfig()) + controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlane.ServerTLSConfig()) if err != nil { t.Fatal(err) } defer controlLn.Close() - proxyLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ProxyServerKeyPair.ServerTLSConfig()) + proxyLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ProxyServer.ServerTLSConfig()) if err != nil { t.Fatal(err) } diff --git a/net/bakedroots/bakedroots.go b/net/bakedroots/bakedroots.go index 8787b4a6d..b268b1546 100644 --- a/net/bakedroots/bakedroots.go +++ b/net/bakedroots/bakedroots.go @@ -26,16 +26,9 @@ func Get() *x509.CertPool { return roots.p } -// testingTB is a subset of testing.TB needed -// to verify the caller isn't in a parallel test. -type testingTB interface { - // Setenv panics if it's in a parallel test. - Setenv(k, v string) -} - // ResetForTest resets the cached roots for testing, // optionally setting them to caPEM if non-nil. -func ResetForTest(tb testingTB, caPEM []byte) { +func ResetForTest(tb testenv.TB, caPEM []byte) { if !testenv.InTest() { panic("not in test") } @@ -44,6 +37,10 @@ func ResetForTest(tb testingTB, caPEM []byte) { roots = rootsOnce{} if caPEM != nil { roots.once.Do(func() { roots.parsePEM(caPEM) }) + tb.Cleanup(func() { + // Reset the roots to real roots for any following test. + roots = rootsOnce{} + }) } } diff --git a/tstest/tlstest/testdata/controlplane.tstest.key b/tstest/tlstest/testdata/controlplane.tstest.key deleted file mode 100644 index dbe5ede34..000000000 --- a/tstest/tlstest/testdata/controlplane.tstest.key +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIHcxOQNVyqvBSSlu7c93QW6OsyccjL+R1evW4acd32MWoAoGCCqGSM49 -AwEHoUQDQgAEIOY5/CQ8CMuKYPLf+r6OEneqfzQ5RfgPnLdkL22qhm8xb69ZCXxz -UecawU0KEDfHLYbUYXSuhAFxxuPh9I3x5Q== ------END EC PRIVATE KEY----- diff --git a/tstest/tlstest/testdata/proxy.tstest.key b/tstest/tlstest/testdata/proxy.tstest.key deleted file mode 100644 index 067279089..000000000 --- a/tstest/tlstest/testdata/proxy.tstest.key +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEING1XBDWFXQjqBmLjhp20hXOf2rk/I0N6W7muv9RVvk3oAoGCCqGSM49 -AwEHoUQDQgAE8lxnEEeLqYikwmXbXSsIQSw20R0oLA831s960KQZEgt0P9SbWcJc -QTk98rdfYT/QDdHn157Oh4FPcDtxmdQ4vw== ------END EC PRIVATE KEY----- diff --git a/tstest/tlstest/testdata/root-ca.key b/tstest/tlstest/testdata/root-ca.key deleted file mode 100644 index ece23ddf9..000000000 --- a/tstest/tlstest/testdata/root-ca.key +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIMl3xjqt1dnXBpYJSEqevirAcnSJ79I2tucdRazlrDG9oAoGCCqGSM49 -AwEHoUQDQgAEQ/+Jme+16hgO7TtPSIFHVV0Yt969ltVlARVcNUZmWc0upQaq7uiJ -Aur5KtzwxU3YI4bhNK0593OK2TLvEEWIdw== ------END EC PRIVATE KEY----- diff --git a/tstest/tlstest/tlstest.go b/tstest/tlstest/tlstest.go index f65c261e8..76ec0e7e2 100644 --- a/tstest/tlstest/tlstest.go +++ b/tstest/tlstest/tlstest.go @@ -1,12 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package tlstest contains code to help test Tailscale's client proxy support. +// Package tlstest contains code to help test Tailscale's TLS support without +// depending on real WebPKI roots or certificates during tests. package tlstest import ( "bytes" "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" "crypto/tls" "crypto/x509" @@ -19,32 +21,47 @@ import ( "time" ) -// Some baked-in ECDSA keys to speed up tests, not having to burn CPU to -// generate them each time. We only make the certs (which have expiry times) -// at runtime. +// TestRootCA returns a self-signed ECDSA root CA certificate (as PEM) for +// testing purposes. // -// They were made with: +// Typical use in a test is like: // -// openssl ecparam -name prime256v1 -genkey -noout -out root-ca.key +// bakedroots.ResetForTest(t, tlstest.TestRootCA()) +func TestRootCA() []byte { + return bytes.Clone(testRootCAOncer()) +} + +// cache for [privateKey], so it always returns the same key for a given domain. var ( - //go:embed testdata/root-ca.key - rootCAKeyPEM []byte - - // TestProxyServerKey is the PEM private key for [TestProxyServerCert]. - // - //go:embed testdata/proxy.tstest.key - TestProxyServerKey []byte - - // TestControlPlaneKey is the PEM private key for [TestControlPlaneCert]. - // - //go:embed testdata/controlplane.tstest.key - TestControlPlaneKey []byte + mu sync.Mutex + privateKeys = make(map[string][]byte) // domain -> private key PEM ) -// TestRootCA returns a self-signed ECDSA root CA certificate (as PEM) for -// testing purposes. -func TestRootCA() []byte { - return bytes.Clone(testRootCAOncer()) +// caDomain is a fake domain name to repreesnt the private key for the root CA. +const caDomain = "_root" + +// privateKey returns a PEM-encoded test ECDSA private key for the given domain. +func privateKey(domain string) (pemBytes []byte) { + mu.Lock() + defer mu.Unlock() + if pemBytes, ok := privateKeys[domain]; ok { + return bytes.Clone(pemBytes) + } + defer func() { privateKeys[domain] = bytes.Clone(pemBytes) }() + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(fmt.Sprintf("failed to generate ECDSA key for %q: %v", domain, err)) + } + der, err := x509.MarshalECPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("failed to marshal ECDSA key for %q: %v", domain, err)) + } + var buf bytes.Buffer + if err := pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: der}); err != nil { + panic(fmt.Sprintf("failed to encode PEM: %v", err)) + } + return buf.Bytes() } var testRootCAOncer = sync.OnceValue(func() []byte { @@ -81,7 +98,7 @@ func pemCert(der []byte) []byte { } var rootCAKey = sync.OnceValue(func() *ecdsa.PrivateKey { - return mustParsePEM(rootCAKeyPEM, x509.ParseECPrivateKey) + return mustParsePEM(privateKey(caDomain), x509.ParseECPrivateKey) }) func mustParsePEM[T any](pemBytes []byte, parse func([]byte) (T, error)) T { @@ -96,16 +113,27 @@ func mustParsePEM[T any](pemBytes []byte, parse func([]byte) (T, error)) T { return v } -// KeyPair is a simple struct to hold a certificate and its private key. -type KeyPair struct { - Domain string - KeyPEM []byte // PEM-encoded private key -} +// Domain is a fake domain name used in TLS tests. +// +// They don't have real DNS records. Tests are expected to fake DNS +// lookups and dials for these domains. +type Domain string + +// ProxyServer is a domain name for a hypothetical proxy server. +const ( + ProxyServer = Domain("proxy.tstest") + + // ControlPlane is a domain name for a test control plane server. + ControlPlane = Domain("controlplane.tstest") + + // Derper is a domain name for a test DERP server. + Derper = Domain("derp.tstest") +) // ServerTLSConfig returns a TLS configuration suitable for a server // using the KeyPair's certificate and private key. -func (p KeyPair) ServerTLSConfig() *tls.Config { - cert, err := tls.X509KeyPair(p.CertPEM(), p.KeyPEM) +func (d Domain) ServerTLSConfig() *tls.Config { + cert, err := tls.X509KeyPair(d.CertPEM(), privateKey(string(d))) if err != nil { panic("invalid TLS key pair: " + err.Error()) } @@ -114,24 +142,16 @@ func (p KeyPair) ServerTLSConfig() *tls.Config { } } -// ProxyServerKeyPair is a KeyPair for a test control plane server -// with domain name "proxy.tstest". -var ProxyServerKeyPair = KeyPair{ - Domain: "proxy.tstest", - KeyPEM: TestProxyServerKey, -} - -// ControlPlaneKeyPair is a KeyPair for a test control plane server -// with domain name "controlplane.tstest". -var ControlPlaneKeyPair = KeyPair{ - Domain: "controlplane.tstest", - KeyPEM: TestControlPlaneKey, +// KeyPEM returns a PEM-encoded private key for the domain. +func (d Domain) KeyPEM() []byte { + return privateKey(string(d)) } -func (p KeyPair) CertPEM() []byte { +// CertPEM returns a PEM-encoded certificate for the domain. +func (d Domain) CertPEM() []byte { caCert := mustParsePEM(TestRootCA(), x509.ParseCertificate) - caPriv := mustParsePEM(rootCAKeyPEM, x509.ParseECPrivateKey) - leafKey := mustParsePEM(p.KeyPEM, x509.ParseECPrivateKey) + caPriv := mustParsePEM(privateKey(caDomain), x509.ParseECPrivateKey) + leafKey := mustParsePEM(d.KeyPEM(), x509.ParseECPrivateKey) serial, err := rand.Int(rand.Reader, big.NewInt(0).Lsh(big.NewInt(1), 128)) if err != nil { @@ -141,14 +161,14 @@ func (p KeyPair) CertPEM() []byte { now := time.Now().Add(-time.Hour) tpl := &x509.Certificate{ SerialNumber: serial, - Subject: pkix.Name{CommonName: p.Domain}, + Subject: pkix.Name{CommonName: string(d)}, NotBefore: now, NotAfter: now.AddDate(2, 0, 0), KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, - DNSNames: []string{p.Domain}, + DNSNames: []string{string(d)}, } der, err := x509.CreateCertificate(rand.Reader, tpl, caCert, &leafKey.PublicKey, caPriv) diff --git a/tstest/tlstest/tlstest_test.go b/tstest/tlstest/tlstest_test.go new file mode 100644 index 000000000..8497b872e --- /dev/null +++ b/tstest/tlstest/tlstest_test.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tlstest + +import ( + "testing" +) + +func TestPrivateKey(t *testing.T) { + a := privateKey("a.tstest") + a2 := privateKey("a.tstest") + b := privateKey("b.tstest") + + if string(a) != string(a2) { + t.Errorf("a and a2 should be equal") + } + if string(a) == string(b) { + t.Errorf("a and b should not be equal") + } +} From 253d0b026dbd55f38787d8e7334261b044b8c703 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 20 Jun 2025 10:34:47 +0100 Subject: [PATCH 0977/1708] cmd/k8s-operator: remove conffile hashing mechanism (#16335) Proxies know how to reload configfile on changes since 1.80, which is going to be the earliest supported proxy version with 1.84 operator, so remove the mechanism that was updating configfile hash to force proxy Pod restarts on config changes. Updates #13032 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/connector_test.go | 24 ++++---- cmd/k8s-operator/ingress_test.go | 16 ++--- cmd/k8s-operator/operator_test.go | 42 ++++++------- cmd/k8s-operator/proxygroup.go | 91 +++-------------------------- cmd/k8s-operator/proxygroup_test.go | 35 +++-------- cmd/k8s-operator/sts.go | 85 +++++---------------------- cmd/k8s-operator/testutils_test.go | 19 ------ 7 files changed, 74 insertions(+), 238 deletions(-) diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index f32fe3282..d5829c37f 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -80,7 +80,7 @@ func TestConnector(t *testing.T) { app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Connector status should get updated with the IP/hostname info when available. const hostname = "foo.tailnetxyz.ts.net" @@ -106,7 +106,7 @@ func TestConnector(t *testing.T) { opts.subnetRoutes = "10.40.0.0/14,10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Remove a route. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -114,7 +114,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Remove the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -122,7 +122,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Re-add the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -132,7 +132,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -176,7 +176,7 @@ func TestConnector(t *testing.T) { app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Add an exit node. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -184,7 +184,7 @@ func TestConnector(t *testing.T) { }) opts.isExitNode = true expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -262,7 +262,7 @@ func TestConnectorWithProxyClass(t *testing.T) { app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Update Connector to specify a ProxyClass. ProxyClass is not yet // ready, so its configuration is NOT applied to the Connector @@ -271,7 +271,7 @@ func TestConnectorWithProxyClass(t *testing.T) { conn.Spec.ProxyClass = "custom-metadata" }) expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Connector // get reconciled and configuration from the ProxyClass is applied to @@ -286,7 +286,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 4. Connector.spec.proxyClass field is unset, Connector gets // reconciled and configuration from the ProxyClass is removed from the @@ -296,7 +296,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func TestConnectorWithAppConnector(t *testing.T) { @@ -352,7 +352,7 @@ func TestConnectorWithAppConnector(t *testing.T) { isAppConnector: true, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Connector's ready condition should be set to true cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index dbd6961d7..aacf27d8e 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -71,7 +71,7 @@ func TestTailscaleIngress(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress status gets updated with ingress proxy's MagicDNS name // once that becomes available. @@ -98,7 +98,7 @@ func TestTailscaleIngress(t *testing.T) { }) opts.shouldEnableForwardingClusterTrafficViaIngress = true expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 4. Resources get cleaned up when Ingress class is unset mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { @@ -162,7 +162,7 @@ func TestTailscaleIngressHostname(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint set mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -280,7 +280,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. @@ -288,7 +288,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { mak.Set(&ing.ObjectMeta.Labels, LabelProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Ingress get // reconciled and configuration from the ProxyClass is applied to the @@ -303,7 +303,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 4. tailscale.com/proxy-class label is removed from the Ingress, the // Ingress gets reconciled and the custom ProxyClass configuration is @@ -313,7 +313,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = "" - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) } func TestTailscaleIngressWithServiceMonitor(t *testing.T) { @@ -608,7 +608,7 @@ func TestEmptyPath(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) expectEvents(t, fr, tt.expectedEvents) }) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 33bf23e84..ff6ba4f95 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -130,7 +130,7 @@ func TestLoadBalancerClass(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) want.Annotations = nil want.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} @@ -268,7 +268,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -291,7 +291,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { expectEqual(t, fc, want) expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Change the tailscale-target-fqdn annotation which should update the // StatefulSet @@ -380,7 +380,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -403,7 +403,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectEqual(t, fc, want) expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Change the tailscale-target-ip annotation which should update the // StatefulSet @@ -631,7 +631,7 @@ func TestAnnotations(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -737,7 +737,7 @@ func TestAnnotationIntoLB(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, since it would have normally happened at @@ -781,7 +781,7 @@ func TestAnnotationIntoLB(t *testing.T) { expectReconciled(t, sr, "default", "test") // None of the proxy machinery should have changed... expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // ... but the service should have a LoadBalancer status. want = &corev1.Service{ @@ -867,7 +867,7 @@ func TestLBIntoAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, then verify reconcile again and verify @@ -927,7 +927,7 @@ func TestLBIntoAnnotation(t *testing.T) { expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want = &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1007,7 +1007,7 @@ func TestCustomHostname(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -1118,7 +1118,7 @@ func TestCustomPriorityClassName(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func TestProxyClassForService(t *testing.T) { @@ -1188,7 +1188,7 @@ func TestProxyClassForService(t *testing.T) { } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. The Service gets updated with tailscale.com/proxy-class label // pointing at the 'custom-metadata' ProxyClass. The ProxyClass is not @@ -1197,7 +1197,7 @@ func TestProxyClassForService(t *testing.T) { mak.Set(&svc.Labels, LabelProxyClass, "custom-metadata") }) expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts)) // 3. ProxyClass is set to Ready, the Service gets reconciled by the @@ -1213,7 +1213,7 @@ func TestProxyClassForService(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts), removeAuthKeyIfExistsModifier(t)) // 4. tailscale.com/proxy-class label is removed from the Service, the @@ -1224,7 +1224,7 @@ func TestProxyClassForService(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func TestDefaultLoadBalancer(t *testing.T) { @@ -1280,7 +1280,7 @@ func TestDefaultLoadBalancer(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func TestProxyFirewallMode(t *testing.T) { @@ -1336,7 +1336,7 @@ func TestProxyFirewallMode(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func Test_isMagicDNSName(t *testing.T) { @@ -1617,7 +1617,7 @@ func Test_authKeyRemoval(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Apply update to the Secret that imitates the proxy setting device_id. s := expectedSecret(t, fc, opts) @@ -1691,7 +1691,7 @@ func Test_externalNameService(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Change the ExternalName and verify that changes get propagated. mustUpdate(t, sr, "default", "test", func(s *corev1.Service) { @@ -1699,7 +1699,7 @@ func Test_externalNameService(t *testing.T) { }) expectReconciled(t, sr, "default", "test") opts.clusterTargetDNS = "bar.com" - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func Test_metricsResourceCreation(t *testing.T) { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index e7c0590b0..0d5eff551 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -7,7 +7,6 @@ package main import ( "context" - "crypto/sha256" "encoding/json" "errors" "fmt" @@ -237,8 +236,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ensureAddedToGaugeForProxyGroup(pg) r.mu.Unlock() - cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) - if err != nil { + if err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass); err != nil { return fmt.Errorf("error provisioning config Secrets: %w", err) } // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. @@ -306,33 +304,10 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: string(pg.Spec.Type), } ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) - capver, err := r.capVerForPG(ctx, pg, logger) - if err != nil { - return fmt.Errorf("error getting device info: %w", err) - } updateSS := func(s *appsv1.StatefulSet) { - // This is a temporary workaround to ensure that egress ProxyGroup proxies with capver older than 110 - // are restarted when tailscaled configfile contents have changed. - // This workaround ensures that: - // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. - // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. - // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid - // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a - // restarting Pod and the hash is re-added again. - // Note that this workaround is only applied to egress ProxyGroups, because ingress ProxyGroup was added after capver 110. - // Note also that the hash annotation is only set on updates, not creation, because if the StatefulSet is - // being created, there is no need for a restart. - // TODO(irbekrm): remove this in 1.84. - hash := cfgHash - if capver >= 110 { - hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] - } s.Spec = ss.Spec - if hash != "" && pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) - } s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations @@ -449,9 +424,8 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc return nil } -func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) { +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (err error) { logger := r.logger(pg.Name) - var configSHA256Sum string for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -467,7 +441,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return "", err + return err } var authKey string @@ -479,65 +453,39 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } authKey, err = newAuthKey(ctx, r.tsClient, tags) if err != nil { - return "", err + return err } } configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) if err != nil { - return "", fmt.Errorf("error creating tailscaled config: %w", err) + return fmt.Errorf("error creating tailscaled config: %w", err) } for cap, cfg := range configs { cfgJSON, err := json.Marshal(cfg) if err != nil { - return "", fmt.Errorf("error marshalling tailscaled config: %w", err) + return fmt.Errorf("error marshalling tailscaled config: %w", err) } mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } - // The config sha256 sum is a value for a hash annotation used to trigger - // pod restarts when tailscaled config changes. Any config changes apply - // to all replicas, so it is sufficient to only hash the config for the - // first replica. - // - // In future, we're aiming to eliminate restarts altogether and have - // pods dynamically reload their config when it changes. - if i == 0 { - sum := sha256.New() - for _, cfg := range configs { - // Zero out the auth key so it doesn't affect the sha256 hash when we - // remove it from the config after the pods have all authed. Otherwise - // all the pods will need to restart immediately after authing. - cfg.AuthKey = nil - b, err := json.Marshal(cfg) - if err != nil { - return "", err - } - if _, err := sum.Write(b); err != nil { - return "", err - } - } - - configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil)) - } - if existingCfgSecret != nil { if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) if err := r.Update(ctx, cfgSecret); err != nil { - return "", err + return err } } } else { logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { - return "", err + return err } } } - return configSHA256Sum, nil + return nil } // ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup @@ -707,24 +655,3 @@ type nodeMetadata struct { tsID tailcfg.StableNodeID dnsName string } - -// capVerForPG returns best effort capability version for the given ProxyGroup. It attempts to find it by looking at the -// Secret + Pod for the replica with ordinal 0. Returns -1 if it is not possible to determine the capability version -// (i.e there is no Pod yet). -func (r *ProxyGroupReconciler) capVerForPG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (tailcfg.CapabilityVersion, error) { - metas, err := r.getNodeMetadata(ctx, pg) - if err != nil { - return -1, fmt.Errorf("error getting node metadata: %w", err) - } - if len(metas) == 0 { - return -1, nil - } - dev, err := deviceInfo(metas[0].stateSecret, metas[0].podUID, logger) - if err != nil { - return -1, fmt.Errorf("error getting device info: %w", err) - } - if dev == nil { - return -1, nil - } - return dev.capver, nil -} diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index f3f87aaac..c556ae94a 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -30,7 +30,6 @@ import ( "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" - "tailscale.com/util/mak" ) const testProxyImage = "tailscale/tailscale:test" @@ -40,7 +39,6 @@ var defaultProxyClassAnnotations = map[string]string{ } func TestProxyGroup(t *testing.T) { - const initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ @@ -98,7 +96,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, false, "", pc) + expectProxyGroupResources(t, fc, pg, false, pc) }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -119,11 +117,11 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "", pc) + expectProxyGroupResources(t, fc, pg, true, pc) if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } - expectProxyGroupResources(t, fc, pg, true, "", pc) + expectProxyGroupResources(t, fc, pg, true, pc) keyReq := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -155,7 +153,7 @@ func TestProxyGroup(t *testing.T) { } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("scale_up_to_3", func(t *testing.T) { @@ -166,7 +164,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) @@ -176,7 +174,7 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("scale_down_to_1", func(t *testing.T) { @@ -189,21 +187,7 @@ func TestProxyGroup(t *testing.T) { pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) - }) - - t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) { - pc.Spec.TailscaleConfig = &tsapi.TailscaleConfig{ - AcceptRoutes: true, - } - mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { - p.Spec = pc.Spec - }) - - expectReconciled(t, reconciler, "", pg.Name) - - expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74", pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("enable_metrics", func(t *testing.T) { @@ -608,7 +592,7 @@ func verifyEnvVarNotPresent(t *testing.T, sts *appsv1.StatefulSet, name string) } } -func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) { +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, proxyClass *tsapi.ProxyClass) { t.Helper() role := pgRole(pg, tsNamespace) @@ -619,9 +603,6 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox t.Fatal(err) } statefulSet.Annotations = defaultProxyClassAnnotations - if cfgHash != "" { - mak.Set(&statefulSet.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, cfgHash) - } if shouldExist { expectEqual(t, fc, role) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 70b25f2d2..4c7c3ac67 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -7,7 +7,6 @@ package main import ( "context" - "crypto/sha256" _ "embed" "encoding/json" "errors" @@ -91,8 +90,6 @@ const ( podAnnotationLastSetClusterDNSName = "tailscale.com/operator-last-set-cluster-dns-name" podAnnotationLastSetTailnetTargetIP = "tailscale.com/operator-last-set-ts-tailnet-target-ip" podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn" - // podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents. - podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash" proxyTypeEgress = "egress_service" proxyTypeIngressService = "ingress_service" @@ -110,7 +107,7 @@ var ( // tailscaleManagedLabels are label keys that tailscale operator sets on StatefulSets and Pods. tailscaleManagedLabels = []string{kubetypes.LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} // tailscaleManagedAnnotations are annotation keys that tailscale operator sets on StatefulSets and Pods. - tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN, podAnnotationLastSetConfigFileHash} + tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN} ) type tailscaleSTSConfig struct { @@ -201,11 +198,11 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretName, tsConfigHash, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretName, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash) + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -335,7 +332,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaledConfigs, _ error) { +func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName string, configs tailscaledConfigs, _ error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ // Hardcode a -0 suffix so that in future, if we support @@ -351,7 +348,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) orig = secret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return "", "", nil, err + return "", nil, err } var authKey string @@ -361,13 +358,13 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * // ACME account key. sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels) if err != nil { - return "", "", nil, err + return "", nil, err } if sts != nil { // StatefulSet exists, so we have already created the secret. // If the secret is missing, they should delete the StatefulSet. logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName()) - return "", "", nil, nil + return "", nil, nil } // Create API Key secret which is going to be used by the statefulset // to authenticate with Tailscale. @@ -378,25 +375,20 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * } authKey, err = newAuthKey(ctx, a.tsClient, tags) if err != nil { - return "", "", nil, err + return "", nil, err } } configs, err := tailscaledConfig(stsC, authKey, orig) if err != nil { - return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err) + return "", nil, fmt.Errorf("error creating tailscaled config: %w", err) } - hash, err = tailscaledConfigHash(configs) - if err != nil { - return "", "", nil, fmt.Errorf("error calculating hash of tailscaled configs: %w", err) - } - latest := tailcfg.CapabilityVersion(-1) var latestConfig ipn.ConfigVAlpha for key, val := range configs { fn := tsoperator.TailscaledConfigFileName(key) b, err := json.Marshal(val) if err != nil { - return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + return "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) } mak.Set(&secret.StringData, fn, string(b)) if key > latest { @@ -408,7 +400,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * if stsC.ServeConfig != nil { j, err := json.Marshal(stsC.ServeConfig) if err != nil { - return "", "", nil, err + return "", nil, err } mak.Set(&secret.StringData, "serve-config", string(j)) } @@ -416,15 +408,15 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * if orig != nil { logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { - return "", "", nil, err + return "", nil, err } } else { logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) if err := a.Create(ctx, secret); err != nil { - return "", "", nil, err + return "", nil, err } } - return secret.Name, hash, configs, nil + return secret.Name, configs, nil } // sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted @@ -535,7 +527,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -662,11 +654,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }) } - dev, err := a.DeviceInfo(ctx, sts.ChildResourceLabels, logger) - if err != nil { - return nil, fmt.Errorf("failed to get device info: %w", err) - } - app, err := appInfoForProxy(sts) if err != nil { // No need to error out if now or in future we end up in a @@ -685,25 +672,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S ss = applyProxyClassToStatefulSet(sts.ProxyClass, ss, sts, logger) } updateSS := func(s *appsv1.StatefulSet) { - // This is a temporary workaround to ensure that proxies with capver older than 110 - // are restarted when tailscaled configfile contents have changed. - // This workaround ensures that: - // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. - // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. - // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid - // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a - // restarting Pod and the hash is re-added again. - // Note that the hash annotation is only set on updates not creation, because if the StatefulSet is - // being created, there is no need for a restart. - // TODO(irbekrm): remove this in 1.84. - hash := tsConfigHash - if dev == nil || dev.capver >= 110 { - hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] - } s.Spec = ss.Spec - if hash != "" { - mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) - } s.ObjectMeta.Labels = ss.Labels s.ObjectMeta.Annotations = ss.Annotations } @@ -937,8 +906,7 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) { } // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy -// state and auth key and returns tailscaled config files for currently supported proxy versions and a hash of that -// configuration. +// state and auth key and returns tailscaled config files for currently supported proxy versions. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", @@ -1031,27 +999,6 @@ type ptrObject[T any] interface { type tailscaledConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha -// hashBytes produces a hash for the provided tailscaled config that is the same across -// different invocations of this code. We do not use the -// tailscale.com/deephash.Hash here because that produces a different hash for -// the same value in different tailscale builds. The hash we are producing here -// is used to determine if the container running the Connector Tailscale node -// needs to be restarted. The container does not need restarting when the only -// thing that changed is operator version (the hash is also exposed to users via -// an annotation and might be confusing if it changes without the config having -// changed). -func tailscaledConfigHash(c tailscaledConfigs) (string, error) { - b, err := json.Marshal(c) - if err != nil { - return "", fmt.Errorf("error marshalling tailscaled configs: %w", err) - } - h := sha256.New() - if _, err = h.Write(b); err != nil { - return "", fmt.Errorf("error calculating hash: %w", err) - } - return fmt.Sprintf("%x", h.Sum(nil)), nil -} - // createOrMaybeUpdate adds obj to the k8s cluster, unless the object already exists, // in which case update is called to make changes to it. If update is nil or returns // an error, the object is returned unmodified. diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 619aecc56..56542700d 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -62,7 +62,6 @@ type configOpts struct { subnetRoutes string isExitNode bool isAppConnector bool - confFileHash string serveConfig *ipn.ServeConfig shouldEnableForwardingClusterTrafficViaIngress bool proxyClass string // configuration from the named ProxyClass should be applied to proxy resources @@ -120,9 +119,6 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef ReadOnly: true, MountPath: "/etc/tsconfig", }} - if opts.confFileHash != "" { - mak.Set(&annots, "tailscale.com/operator-last-set-config-file-hash", opts.confFileHash) - } if opts.firewallMode != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_DEBUG_FIREWALL_MODE", @@ -358,10 +354,6 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps }, }, } - ss.Spec.Template.Annotations = map[string]string{} - if opts.confFileHash != "" { - ss.Spec.Template.Annotations["tailscale.com/operator-last-set-config-file-hash"] = opts.confFileHash - } // If opts.proxyClass is set, retrieve the ProxyClass and apply // configuration from that to the StatefulSet. if opts.proxyClass != "" { @@ -842,17 +834,6 @@ func (c *fakeTSClient) Deleted() []string { return c.deleted } -// removeHashAnnotation can be used to remove declarative tailscaled config hash -// annotation from proxy StatefulSets to make the tests more maintainable (so -// that we don't have to change the annotation in each test case after any -// change to the configfile contents). -func removeHashAnnotation(sts *appsv1.StatefulSet) { - delete(sts.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash) - if len(sts.Spec.Template.Annotations) == 0 { - sts.Spec.Template.Annotations = nil - } -} - func removeResourceReqs(sts *appsv1.StatefulSet) { if sts != nil { sts.Spec.Template.Spec.Resources = nil From 5a52f80c4cb4fc231faec2790a088c8cb856397f Mon Sep 17 00:00:00 2001 From: okunamayanad Date: Tue, 17 Jun 2025 04:50:01 +0300 Subject: [PATCH 0978/1708] docs: fix typo in commit-messages.md Updates: #cleanup Signed-off-by: okunamayanad --- docs/commit-messages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commit-messages.md b/docs/commit-messages.md index b3881eaeb..79b16e4c6 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -65,7 +65,7 @@ Notably, for the subject (the first line of description): | `foo/bar:fix memory leak` | BAD: no space after colon | | `foo/bar : fix memory leak` | BAD: space before colon | | `foo/bar: fix memory leak Fixes #123` | BAD: the "Fixes" shouldn't be part of the title | - | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bissect to a clean, working tree | + | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bisect to a clean, working tree | For the body (the rest of the description): From 9af42f425ca48ca2e0dee9b3524ea586675069c6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 10:56:15 -0700 Subject: [PATCH 0979/1708] .github/workflows: shard the Windows builder It's one of the slower ones, so split it up into chunks. Updates tailscale/corp#28679 Change-Id: I16a5ba667678bf238c84417a51dda61baefbecf7 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 21 +++++++++++++++++---- cmd/testwrapper/testwrapper.go | 10 ++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6d8ab863c..722a73f93 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -210,6 +210,17 @@ jobs: windows: runs-on: windows-2022 needs: gomod-cache + name: Windows (${{ matrix.name || matrix.shard}}) + strategy: + fail-fast: false # don't abort the entire matrix if one element fails + matrix: + include: + - key: "win-bench" + name: "benchmarks" + - key: "win-shard-1-2" + shard: "1/2" + - key: "win-shard-2-2" + shard: "2/2" steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -237,14 +248,16 @@ jobs: ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-go-2- + ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }} + ${{ github.job }}-${{ matrix.key }}-go-2- - name: test + if: matrix.key != 'win-bench' # skip on bench builder working-directory: src - run: go run ./cmd/testwrapper ./... + run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} - name: bench all + if: matrix.key == 'win-bench' working-directory: src # Don't use -bench=. -benchtime=1x. # Somewhere in the layers (powershell?) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 53c1b1d05..173edee73 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -213,6 +213,16 @@ func main() { return } + // As a special case, if the packages looks like "sharded:1/2" then shell out to + // ./tool/listpkgs to cut up the package list pieces for each sharded builder. + if nOfM, ok := strings.CutPrefix(packages[0], "sharded:"); ok && len(packages) == 1 { + out, err := exec.Command("go", "run", "tailscale.com/tool/listpkgs", "-shard", nOfM, "./...").Output() + if err != nil { + log.Fatalf("failed to list packages for sharded test: %v", err) + } + packages = strings.Split(strings.TrimSpace(string(out)), "\n") + } + ctx := context.Background() type nextRun struct { tests []*packageTests From ca06d944c5622e89ce1ae8e507149af2f858d2a0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 18:35:49 -0700 Subject: [PATCH 0980/1708] .github/workflows: try running Windows jobs on bigger VMs Updates tailscale/corp#28679 Change-Id: Iee3f3820d2d8308fff3494e300ad3939e3ed2598 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 722a73f93..2ebb82a85 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -208,7 +208,10 @@ jobs: find $(go env GOCACHE) -type f -mmin +90 -delete windows: - runs-on: windows-2022 + # windows-8vpu is a 2022 GitHub-managed runner in our + # org with 8 cores and 32 GB of RAM: + # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/1 + runs-on: windows-8vcpu needs: gomod-cache name: Windows (${{ matrix.name || matrix.shard}}) strategy: From bb085cfa3e434a5a8da2d27eca6e94c49bebc036 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 20:48:50 -0700 Subject: [PATCH 0981/1708] tool: add go toolchain wrapper for Windows go.cmd lets you run just "./tool/go" on Windows the same as Linux/Darwin. The batch script (go.md) then just invokes PowerShell which is more powerful than batch. I wanted this while debugging Windows CI performance by reproducing slow tests on my local Windows laptop. Updates tailscale/corp#28679 Change-Id: I6e520968da3cef3032091c1c4f4237f663cefcab Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 16 +++++++++- tool/go.cmd | 2 ++ tool/go.ps1 | 64 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 tool/go.cmd create mode 100644 tool/go.ps1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2ebb82a85..2e80b44dc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -220,6 +220,8 @@ jobs: include: - key: "win-bench" name: "benchmarks" + - key: "win-tool-go" + name: "./tool/go" - key: "win-shard-1-2" shard: "1/2" - key: "win-shard-2-2" @@ -231,12 +233,14 @@ jobs: path: src - name: Install Go + if: matrix.key != 'win-tool-go' uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version-file: src/go.mod cache: false - name: Restore Go module cache + if: matrix.key != 'win-tool-go' uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: gomodcache @@ -244,6 +248,7 @@ jobs: enableCrossOsArchive: true - name: Restore Cache + if: matrix.key != 'win-tool-go' uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: | @@ -255,10 +260,17 @@ jobs: restore-keys: | ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ matrix.key }}-go-2- + + - name: test-tool-go + if: matrix.key == 'win-tool-go' + working-directory: src + run: ./tool/go version + - name: test - if: matrix.key != 'win-bench' # skip on bench builder + if: matrix.key != 'win-bench' && matrix.key != 'win-tool-go' # skip on bench builder working-directory: src run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} + - name: bench all if: matrix.key == 'win-bench' working-directory: src @@ -266,7 +278,9 @@ jobs: # Somewhere in the layers (powershell?) # the equals signs cause great confusion. run: go test ./... -bench . -benchtime 1x -run "^$" + - name: Tidy cache + if: matrix.key != 'win-tool-go' working-directory: src shell: bash run: | diff --git a/tool/go.cmd b/tool/go.cmd new file mode 100644 index 000000000..51bace110 --- /dev/null +++ b/tool/go.cmd @@ -0,0 +1,2 @@ +@echo off +powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go.ps1" %* diff --git a/tool/go.ps1 b/tool/go.ps1 new file mode 100644 index 000000000..49313ffba --- /dev/null +++ b/tool/go.ps1 @@ -0,0 +1,64 @@ +<# + go.ps1 – Tailscale Go toolchain fetching wrapper for Windows/PowerShell + • Reads go.toolchain.rev one dir above this script + • If the requested commit hash isn't cached, downloads and unpacks + https://github.com/tailscale/go/releases/download/build-${REV}/${OS}-${ARCH}.tar.gz + • Finally execs the toolchain's "go" binary, forwarding all args & exit-code +#> + +param( + [Parameter(ValueFromRemainingArguments = $true)] + [string[]] $Args +) + +Set-StrictMode -Version Latest +$ErrorActionPreference = 'Stop' + +if ($env:CI -eq 'true' -and $env:NODEBUG -ne 'true') { + $VerbosePreference = 'Continue' +} + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..') +$REV = (Get-Content (Join-Path $repoRoot 'go.toolchain.rev') -Raw).Trim() + +if ([IO.Path]::IsPathRooted($REV)) { + $toolchain = $REV +} else { + if (-not [string]::IsNullOrWhiteSpace($env:TSGO_CACHE_ROOT)) { + $cacheRoot = $env:TSGO_CACHE_ROOT + } else { + $cacheRoot = Join-Path $env:USERPROFILE '.cache\tsgo' + } + + $toolchain = Join-Path $cacheRoot $REV + $marker = "$toolchain.extracted" + + if (-not (Test-Path $marker)) { + Write-Host "# Downloading Go toolchain $REV" -ForegroundColor Cyan + if (Test-Path $toolchain) { Remove-Item -Recurse -Force $toolchain } + + # Removing the marker file again (even though it shouldn't still exist) + # because the equivalent Bash script also does so (to guard against + # concurrent cache fills?). + # TODO(bradfitz): remove this and add some proper locking instead? + if (Test-Path $marker ) { Remove-Item -Force $marker } + + New-Item -ItemType Directory -Path $cacheRoot -Force | Out-Null + + $url = "https://github.com/tailscale/go/releases/download/build-$REV/windows-amd64.tar.gz" + $tgz = "$toolchain.tar.gz" + Invoke-WebRequest -Uri $url -OutFile $tgz -UseBasicParsing -ErrorAction Stop + + New-Item -ItemType Directory -Path $toolchain -Force | Out-Null + tar --strip-components=1 -xzf $tgz -C $toolchain + Remove-Item $tgz + Set-Content -Path $marker -Value $REV + } +} + +$goExe = Join-Path $toolchain 'bin\go.exe' +if (-not (Test-Path $goExe)) { throw "go executable not found at $goExe" } + +& $goExe @Args +exit $LASTEXITCODE + From 12e92b1b085b72e900e001d2bd5c827ed395bd57 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 20 Jun 2025 10:25:42 -0700 Subject: [PATCH 0982/1708] tsconsensus: skipping slow non-applicable tests on Windows for now Updates #16340 Change-Id: I61b0186295c095f99c5be81dc4dced5853025d35 Signed-off-by: Brad Fitzpatrick --- tsconsensus/tsconsensus_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index d1b92f8a4..bfb6b3e06 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -17,6 +17,7 @@ import ( "net/netip" "os" "path/filepath" + "runtime" "strings" "sync" "testing" @@ -37,6 +38,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/views" + "tailscale.com/util/cibuild" "tailscale.com/util/racebuild" ) @@ -113,6 +115,9 @@ func (f *fsm) Restore(rc io.ReadCloser) error { } func testConfig(t *testing.T) { + if runtime.GOOS == "windows" && cibuild.On() { + t.Skip("cmd/natc isn't supported on Windows, so skipping tsconsensus tests on CI for now; see https://github.com/tailscale/tailscale/issues/16340") + } // -race AND Parallel makes things start to take too long. if !racebuild.On { t.Parallel() From d3bb34c628b01953c1f064d75d01c0a41e4d41ab Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 20 Jun 2025 15:00:28 -0700 Subject: [PATCH 0983/1708] wgengine/magicsock: generate relay server set from tailnet policy (#16331) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 173 +++++++++++++++++--- wgengine/magicsock/magicsock_test.go | 202 +++++++++++++++++++++++- wgengine/magicsock/relaymanager.go | 29 ++++ wgengine/magicsock/relaymanager_test.go | 6 + 4 files changed, 386 insertions(+), 24 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index bfc7afba9..0679a4ebd 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -14,6 +14,7 @@ import ( "expvar" "fmt" "io" + "math" "net" "net/netip" "reflect" @@ -348,17 +349,19 @@ type Conn struct { // magicsock could do with any complexity reduction it can get. netInfoLast *tailcfg.NetInfo - derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled - peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate update - lastFlags debugFlags // at time of last onNodeViewsUpdate - firstAddrForTest netip.Addr // from last onNodeViewsUpdate update; for tests only - privateKey key.NodePrivate // WireGuard private key for this node - everHadKey bool // whether we ever had a non-zero private key - myDerp int // nearest DERP region ID; 0 means none/unknown - homeless bool // if true, don't try to find & stay conneted to a DERP home (myDerp will stay 0) - derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close - activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region - prevDerp map[int]*syncs.WaitGroupChan + derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled + self tailcfg.NodeView // from last onNodeViewsUpdate + peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in onNodeMutationsUpdate are never applied + filt *filter.Filter // from last onFilterUpdate + relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers + lastFlags debugFlags // at time of last onNodeViewsUpdate + privateKey key.NodePrivate // WireGuard private key for this node + everHadKey bool // whether we ever had a non-zero private key + myDerp int // nearest DERP region ID; 0 means none/unknown + homeless bool // if true, don't try to find & stay conneted to a DERP home (myDerp will stay 0) + derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close + activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region + prevDerp map[int]*syncs.WaitGroupChan // derpRoute contains optional alternate routes to use as an // optimization instead of contacting a peer via their home @@ -516,7 +519,7 @@ func (o *Options) derpActiveFunc() func() { // this type out of magicsock. type NodeViewsUpdate struct { SelfNode tailcfg.NodeView - Peers []tailcfg.NodeView + Peers []tailcfg.NodeView // sorted by Node.ID } // NodeMutationsUpdate represents an update event of one or more @@ -2555,38 +2558,160 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { // TODO(jwhited): implement once capVer is bumped - return false + return version == math.MinInt32 +} + +func capVerIsRelayServerCapable(version tailcfg.CapabilityVersion) bool { + // TODO(jwhited): implement once capVer is bumped + return version == math.MinInt32 } +// onFilterUpdate is called when a [FilterUpdate] is received over the +// [eventbus.Bus]. func (c *Conn) onFilterUpdate(f FilterUpdate) { - // TODO(jwhited): implement + c.mu.Lock() + c.filt = f.Filter + self := c.self + peers := c.peers + relayClientEnabled := c.relayClientEnabled + c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + + if !relayClientEnabled { + // Early return if we cannot operate as a relay client. + return + } + + // The filter has changed, and we are operating as a relay server client. + // Re-evaluate it in order to produce an updated relay server set. + c.updateRelayServersSet(f.Filter, self, peers) +} + +// updateRelayServersSet iterates all peers, evaluating filt for each one in +// order to determine which peers are relay server candidates. filt, self, and +// peers are passed as args (vs c.mu-guarded fields) to enable callers to +// release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' +// in filt for every peer 'n'). +// TODO: Optimize this so that it's not O(m * n). This might involve: +// 1. Changes to [filter.Filter], e.g. adding a CapsWithValues() to check for +// a given capability instead of building and returning a map of all of +// them. +// 2. Moving this work upstream into [nodeBackend] or similar, and publishing +// the computed result over the eventbus instead. +func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { + relayServers := make(set.Set[netip.AddrPort]) + for _, peer := range peers.All() { + peerAPI := peerAPIIfCandidateRelayServer(filt, self, peer) + if peerAPI.IsValid() { + relayServers.Add(peerAPI) + } + } + c.relayManager.handleRelayServersSet(relayServers) +} + +// peerAPIIfCandidateRelayServer returns the peer API address of peer if it +// is considered to be a candidate relay server upon evaluation against filt and +// self, otherwise it returns a zero value. +func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, peer tailcfg.NodeView) netip.AddrPort { + if filt == nil || + !self.Valid() || + !peer.Valid() || + !capVerIsRelayServerCapable(peer.Cap()) || + !peer.Hostinfo().Valid() { + return netip.AddrPort{} + } + for _, peerPrefix := range peer.Addresses().All() { + if !peerPrefix.IsSingleIP() { + continue + } + peerAddr := peerPrefix.Addr() + for _, selfPrefix := range self.Addresses().All() { + if !selfPrefix.IsSingleIP() { + continue + } + selfAddr := selfPrefix.Addr() + if selfAddr.BitLen() == peerAddr.BitLen() { // same address family + if filt.CapsWithValues(peerAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { + for _, s := range peer.Hostinfo().Services().All() { + if peerAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || + peerAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { + return netip.AddrPortFrom(peerAddr, s.Port) + } + } + return netip.AddrPort{} // no peerAPI + } else { + // [nodeBackend.peerCapsLocked] only returns/considers the + // [tailcfg.PeerCapMap] between the passed src and the + // _first_ host (/32 or /128) address for self. We are + // consistent with that behavior here. If self and peer + // host addresses are of the same address family they either + // have the capability or not. We do not check against + // additional host addresses of the same address family. + return netip.AddrPort{} + } + } + } + } + return netip.AddrPort{} } // onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the // [eventbus.Bus]. func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { + peersChanged := c.updateNodes(update) + + relayClientEnabled := update.SelfNode.Valid() && + update.SelfNode.HasCap(tailcfg.NodeAttrRelayClient) && + envknob.UseWIPCode() + + c.mu.Lock() + relayClientChanged := c.relayClientEnabled != relayClientEnabled + c.relayClientEnabled = relayClientEnabled + filt := c.filt + self := c.self + peers := c.peers + c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + + if peersChanged || relayClientChanged { + if !relayClientEnabled { + c.relayManager.handleRelayServersSet(nil) + } else { + c.updateRelayServersSet(filt, self, peers) + } + } +} + +// updateNodes updates [Conn] to reflect the [tailcfg.NodeView]'s contained +// in update. It returns true if update.Peers was unequal to c.peers, otherwise +// false. +func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { c.mu.Lock() defer c.mu.Unlock() if c.closed { - return + return false } priorPeers := c.peers metricNumPeers.Set(int64(len(update.Peers))) - // Update c.netMap regardless, before the following early return. + // Update c.self & c.peers regardless, before the following early return. + c.self = update.SelfNode curPeers := views.SliceOf(update.Peers) c.peers = curPeers + // [debugFlags] are mutable in [Conn.SetSilentDisco] & + // [Conn.SetProbeUDPLifetime]. These setters are passed [controlknobs.Knobs] + // values by [ipnlocal.LocalBackend] around netmap reception. + // [controlknobs.Knobs] are simply self [tailcfg.NodeCapability]'s. They are + // useful as a global view of notable feature toggles, but the magicsock + // setters are completely unnecessary as we have the same values right here + // (update.SelfNode.Capabilities) at a time they are considered most + // up-to-date. + // TODO: mutate [debugFlags] here instead of in various [Conn] setters. flags := c.debugFlagsLocked() - if update.SelfNode.Valid() && update.SelfNode.Addresses().Len() > 0 { - c.firstAddrForTest = update.SelfNode.Addresses().At(0).Addr() - } else { - c.firstAddrForTest = netip.Addr{} - } - if nodesEqual(priorPeers, curPeers) && c.lastFlags == flags { + peersChanged = !nodesEqual(priorPeers, curPeers) + if !peersChanged && c.lastFlags == flags { // The rest of this function is all adjusting state for peers that have // changed. But if the set of peers is equal and the debug flags (for // silent disco and probe UDP lifetime) haven't changed, there is no @@ -2728,6 +2853,8 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { delete(c.discoInfo, dk) } } + + return peersChanged } func devPanicf(format string, a ...any) { @@ -3245,7 +3372,7 @@ func simpleDur(d time.Duration) time.Duration { } // onNodeMutationsUpdate is called when a [NodeMutationsUpdate] is received over -// the [eventbus.Bus]. +// the [eventbus.Bus]. Note: It does not apply these mutations to c.peers. func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 7fa062fa8..8aa9a09d2 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -19,6 +19,7 @@ import ( "net/http/httptest" "net/netip" "os" + "reflect" "runtime" "strconv" "strings" @@ -71,6 +72,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgcfg/nmcfg" "tailscale.com/wgengine/wglog" @@ -275,7 +277,10 @@ func (s *magicStack) Status() *ipnstate.Status { func (s *magicStack) IP() netip.Addr { for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { s.conn.mu.Lock() - addr := s.conn.firstAddrForTest + var addr netip.Addr + if s.conn.self.Valid() && s.conn.self.Addresses().Len() > 0 { + addr = s.conn.self.Addresses().At(0).Addr() + } s.conn.mu.Unlock() if addr.IsValid() { return addr @@ -3378,3 +3383,198 @@ func Test_virtualNetworkID(t *testing.T) { }) } } + +func Test_peerAPIIfCandidateRelayServer(t *testing.T) { + selfOnlyIPv4 := &tailcfg.Node{ + Cap: math.MinInt32, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + } + selfOnlyIPv6 := selfOnlyIPv4.Clone() + selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + + peerHostinfo := &tailcfg.Hostinfo{ + Services: []tailcfg.Service{ + { + Proto: tailcfg.PeerAPI4, + Port: 4, + }, + { + Proto: tailcfg.PeerAPI6, + Port: 6, + }, + }, + } + peerOnlyIPv4 := &tailcfg.Node{ + Cap: math.MinInt32, + CapMap: map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.NodeAttrRelayServer: nil, + }, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("2.2.2.2/32"), + }, + Hostinfo: peerHostinfo.View(), + } + + peerOnlyIPv6 := peerOnlyIPv4.Clone() + peerOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") + + peerOnlyIPv4ZeroCapVer := peerOnlyIPv4.Clone() + peerOnlyIPv4ZeroCapVer.Cap = 0 + + peerOnlyIPv4NilHostinfo := peerOnlyIPv4.Clone() + peerOnlyIPv4NilHostinfo.Hostinfo = tailcfg.HostinfoView{} + + tests := []struct { + name string + filt *filter.Filter + self tailcfg.NodeView + peer tailcfg.NodeView + want netip.AddrPort + }{ + { + name: "match v4", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + peer: peerOnlyIPv4.View(), + want: netip.MustParseAddrPort("2.2.2.2:4"), + }, + { + name: "match v6", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + peer: peerOnlyIPv6.View(), + want: netip.MustParseAddrPort("[::2]:6"), + }, + { + name: "no match dst", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::3/128"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + peer: peerOnlyIPv6.View(), + }, + { + name: "no match peer cap", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityIngress, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + peer: peerOnlyIPv6.View(), + }, + { + name: "cap ver not relay capable", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: peerOnlyIPv4.View(), + peer: peerOnlyIPv4ZeroCapVer.View(), + }, + { + name: "nil filt", + filt: nil, + self: selfOnlyIPv4.View(), + peer: peerOnlyIPv4.View(), + }, + { + name: "nil self", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: tailcfg.NodeView{}, + peer: peerOnlyIPv4.View(), + }, + { + name: "nil peer", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + peer: tailcfg.NodeView{}, + }, + { + name: "nil peer hostinfo", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + peer: peerOnlyIPv4NilHostinfo.View(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.peer); !reflect.DeepEqual(got, tt.want) { + t.Errorf("peerAPIIfCandidateRelayServer() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 2b636dc57..3c8ceb2de 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -51,6 +51,7 @@ type relayManager struct { cancelWorkCh chan *endpoint newServerEndpointCh chan newRelayServerEndpointEvent rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent + serversCh chan set.Set[netip.AddrPort] discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -174,7 +175,29 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } + case serversUpdate := <-r.serversCh: + r.handleServersUpdateRunLoop(serversUpdate) + if !r.hasActiveWorkRunLoop() { + return + } + } + } +} + +func (r *relayManager) handleServersUpdateRunLoop(update set.Set[netip.AddrPort]) { + for k, v := range r.serversByAddrPort { + if !update.Contains(k) { + delete(r.serversByAddrPort, k) + delete(r.serversByDisco, v) + } + } + for _, v := range update.Slice() { + _, ok := r.serversByAddrPort[v] + if ok { + // don't zero known disco keys + continue } + r.serversByAddrPort[v] = key.DiscoPublic{} } } @@ -215,6 +238,7 @@ func (r *relayManager) init() { r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) + r.serversCh = make(chan set.Set[netip.AddrPort]) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) @@ -299,6 +323,11 @@ func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } +// handleRelayServersSet handles an update of the complete relay server set. +func (r *relayManager) handleRelayServersSet(servers set.Set[netip.AddrPort]) { + relayManagerInputEvent(r, nil, &r.serversCh, servers) +} + // relayManagerInputEvent initializes [relayManager] if necessary, starts // relayManager.runLoop() if it is not running, and writes 'event' on 'eventCh'. // diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index be0582669..6055c2d72 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -4,10 +4,12 @@ package magicsock import ( + "net/netip" "testing" "tailscale.com/disco" "tailscale.com/types/key" + "tailscale.com/util/set" ) func TestRelayManagerInitAndIdle(t *testing.T) { @@ -26,4 +28,8 @@ func TestRelayManagerInitAndIdle(t *testing.T) { rm = relayManager{} rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleRelayServersSet(make(set.Set[netip.AddrPort])) + <-rm.runLoopStoppedCh } From cd9b9a8cadfd03b9e304ca8a2ff0900d016387fc Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 20 Jun 2025 19:23:52 -0700 Subject: [PATCH 0984/1708] wgengine/magicsock: fix relay endpoint allocation URL (#16344) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 3c8ceb2de..81a71b20e 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -737,7 +737,7 @@ func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGr const reqTimeout = time.Second * 10 reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/relay/endpoint", &b) + req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &b) if err != nil { return } From e935a28a196f4ccb212ed44c23b62f4e40a7f243 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 21 Jun 2025 19:09:19 -0700 Subject: [PATCH 0985/1708] wgengine/magicsock: set rxDiscoMsgCh field in relayHandshakeWork (#16349) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 81a71b20e..3e72ff0f0 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -567,11 +567,12 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay // We're ready to start a new handshake. ctx, cancel := context.WithCancel(context.Background()) work := &relayHandshakeWork{ - ep: newServerEndpoint.ep, - se: newServerEndpoint.se, - doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), - ctx: ctx, - cancel: cancel, + ep: newServerEndpoint.ep, + se: newServerEndpoint.se, + rxDiscoMsgCh: make(chan relayHandshakeDiscoMsgEvent), + doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), + ctx: ctx, + cancel: cancel, } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) From 61958f531c5c6a004415b46eb341f2dc289288cd Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 21 Jun 2025 19:09:36 -0700 Subject: [PATCH 0986/1708] wgengine/magicsock: set conn field in relayHandshakeDiscoMsgEvent (#16348) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- wgengine/magicsock/magicsock.go | 6 +++--- wgengine/magicsock/relaymanager.go | 4 ++-- wgengine/magicsock/relaymanager_test.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 23316dcb4..fb5a28c28 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1601,7 +1601,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd if src.vni.isSet() && src != de.bestAddr.epAddr { // "src" is not our bestAddr, but [relayManager] might be in the // middle of probing it, awaiting pong reception. Make it aware. - de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(m, di, src) + de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(de.c, m, di, src) return false } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0679a4ebd..a96eaf3d8 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2053,7 +2053,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(challenge, di, src) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, challenge, di, src) return } @@ -2075,7 +2075,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return true }) if !knownTxID && src.vni.isSet() { - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2221,7 +2221,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // using it as a bestAddr. [relayManager] might be in the middle of // probing it or attempting to set it as best via // [endpoint.relayEndpointReady()]. Make [relayManager] aware. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) return } default: // no VNI diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 3e72ff0f0..e655ec992 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -319,8 +319,8 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV // handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated // disco messages if they are not associated with any known // [*endpoint.bestAddr]. -func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di *discoInfo, src epAddr) { - relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) +func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { + relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{conn: conn, msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } // handleRelayServersSet handles an update of the complete relay server set. diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 6055c2d72..de282b499 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -26,7 +26,7 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + rm.handleGeneveEncapDiscoMsgNotBestAddr(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} From 0905936c45b6380d65d347e3cb9037f64991b8f4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 21 Jun 2025 21:14:42 -0700 Subject: [PATCH 0987/1708] wgengine/magicsock: set Geneve header protocol for WireGuard (#16350) Otherwise receives interpret as naked WireGuard. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/batching_conn_linux.go | 2 ++ wgengine/magicsock/rebinding_conn.go | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/batching_conn_linux.go b/wgengine/magicsock/batching_conn_linux.go index c9aaff168..a0607c624 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/wgengine/magicsock/batching_conn_linux.go @@ -114,6 +114,7 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetwo vniIsSet := vni.isSet() var gh packet.GeneveHeader if vniIsSet { + gh.Protocol = packet.GeneveProtocolWireGuard gh.VNI = vni.get() } for i, buff := range buffs { @@ -202,6 +203,7 @@ retry: vniIsSet := addr.vni.isSet() var gh packet.GeneveHeader if vniIsSet { + gh.Protocol = packet.GeneveProtocolWireGuard gh.VNI = addr.vni.get() offset -= packet.GeneveFixedHeaderLength } diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 51e97c8cc..8b9ad4bb0 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -85,7 +85,8 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) var gh packet.GeneveHeader if vniIsSet { gh = packet.GeneveHeader{ - VNI: addr.vni.get(), + Protocol: packet.GeneveProtocolWireGuard, + VNI: addr.vni.get(), } } for _, buf := range buffs { From b3e74367d84650600b25162510d8beaf8a460240 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 22 Jun 2025 21:15:20 -0700 Subject: [PATCH 0988/1708] tool: rename go.ps1 to go-win.ps1 for cmd.exe+Powershell compat This tweaks the just-added ./tool/go.{cmd,ps1} port of ./tool/go for Windows. Otherwise in Windows Terminal in Powershell, running just ".\tool\go" picks up go.ps1 before go.cmd, which means execution gets denied without the cmd script's -ExecutionPolicy Bypass part letting it work. This makes it work in both cmd.exe and in Powershell. Updates tailscale/corp#28679 Change-Id: Iaf628a9fd6cb95670633b2dbdb635dfb8afaa006 Signed-off-by: Brad Fitzpatrick --- tool/{go.ps1 => go-win.ps1} | 0 tool/go.cmd | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename tool/{go.ps1 => go-win.ps1} (100%) diff --git a/tool/go.ps1 b/tool/go-win.ps1 similarity index 100% rename from tool/go.ps1 rename to tool/go-win.ps1 diff --git a/tool/go.cmd b/tool/go.cmd index 51bace110..04172a28d 100644 --- a/tool/go.cmd +++ b/tool/go.cmd @@ -1,2 +1,2 @@ @echo off -powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go.ps1" %* +powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go-win.ps1" %* From 9309760263e7c7c34522871752cf1da08b82b72a Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Thu, 19 Jun 2025 11:31:47 +0200 Subject: [PATCH 0989/1708] util/prompt: make yes/no prompt reusable Updates #19445 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/serve_v2.go | 3 ++- cmd/tailscale/cli/update.go | 18 ++---------------- cmd/tailscale/depaware.txt | 1 + util/prompt/prompt.go | 24 ++++++++++++++++++++++++ 4 files changed, 29 insertions(+), 17 deletions(-) create mode 100644 util/prompt/prompt.go diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 3e173ce28..bb51fb7d0 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -28,6 +28,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/util/mak" + "tailscale.com/util/prompt" "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -757,7 +758,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u if len(mounts) > 1 { msg := fmt.Sprintf("Are you sure you want to delete %d handlers under port %s?", len(mounts), portStr) - if !e.yes && !promptYesNo(msg) { + if !e.yes && !prompt.YesNo(msg) { return nil } } diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 69d1aa97b..7c0269f6a 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -9,10 +9,10 @@ import ( "flag" "fmt" "runtime" - "strings" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/clientupdate" + "tailscale.com/util/prompt" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -87,19 +87,5 @@ func confirmUpdate(ver string) bool { } msg := fmt.Sprintf("This will update Tailscale from %v to %v. Continue?", version.Short(), ver) - return promptYesNo(msg) -} - -// PromptYesNo takes a question and prompts the user to answer the -// question with a yes or no. It appends a [y/n] to the message. -func promptYesNo(msg string) bool { - fmt.Print(msg + " [y/n] ") - var resp string - fmt.Scanln(&resp) - resp = strings.ToLower(resp) - switch resp { - case "y", "yes", "sure": - return true - } - return false + return prompt.YesNo(msg) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 69d054ea4..e44e20e8c 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -172,6 +172,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/multierr from tailscale.com/control/controlhttp+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp+ diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go new file mode 100644 index 000000000..4e589ceb3 --- /dev/null +++ b/util/prompt/prompt.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package prompt provides a simple way to prompt the user for input. +package prompt + +import ( + "fmt" + "strings" +) + +// YesNo takes a question and prompts the user to answer the +// question with a yes or no. It appends a [y/n] to the message. +func YesNo(msg string) bool { + fmt.Print(msg + " [y/n] ") + var resp string + fmt.Scanln(&resp) + resp = strings.ToLower(resp) + switch resp { + case "y", "yes", "sure": + return true + } + return false +} From 01982552663848378ba6cd6ac27013fe4d65f84b Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Thu, 19 Jun 2025 11:32:54 +0200 Subject: [PATCH 0990/1708] cmd/tailscale: warn user about nllock key removal without resigning Fixes #19445 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/network-lock.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index ae1e90bbf..871a931b5 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -17,12 +17,14 @@ import ( "strings" "time" + "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" "tailscale.com/tsconst" "tailscale.com/types/key" "tailscale.com/types/tkatype" + "tailscale.com/util/prompt" ) var netlockCmd = &ffcli.Command{ @@ -369,6 +371,18 @@ func runNetworkLockRemove(ctx context.Context, args []string) error { } } } + } else { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Printf(`Warning +Removal of a signing key(s) without resigning nodes (--re-sign=false) +will cause any nodes signed by the the given key(s) to be locked out +of the Tailscale network. Proceed with caution. +`) + if !prompt.YesNo("Are you sure you want to remove the signing key(s)?") { + fmt.Printf("aborting removal of signing key(s)\n") + os.Exit(0) + } + } } return localClient.NetworkLockModify(ctx, nil, removeKeys) From 9288efe592d45c2578278c61ac0bddd4db57e901 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 23 Jun 2025 08:53:29 -0700 Subject: [PATCH 0991/1708] wgengine/magicsock: remove premature return in handshakeServerEndpoint (#16351) Any return underneath this select case must belong to a type switch case. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 1 - 1 file changed, 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index e655ec992..4ccfbb501 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -691,7 +691,6 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { // unexpected message type, silently discard continue } - return case <-timer.C: // The handshake timed out. return From a589863d61725bf027bb03a1389c7900dce611b8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 23 Jun 2025 15:50:43 -0700 Subject: [PATCH 0992/1708] feature/relayserver,net/udprelay,wgengine/magicsock: implement retry (#16347) udprelay.Server is lazily initialized when the first request is received over peerAPI. These early requests have a high chance of failure until the first address discovery cycle has completed. Return an ErrServerNotReady error until the first address discovery cycle has completed, and plumb retry handling for this error all the way back to the client in relayManager. relayManager can now retry after a few seconds instead of waiting for the next path discovery cycle, which could take another minute or longer. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 8 +++ net/udprelay/server.go | 37 +++++++++---- wgengine/magicsock/relaymanager.go | 85 ++++++++++++++++++++++-------- 3 files changed, 96 insertions(+), 34 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index a38587aa3..4634f3ac2 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -8,9 +8,11 @@ package relayserver import ( "encoding/json" "errors" + "fmt" "io" "net/http" "sync" + "time" "tailscale.com/envknob" "tailscale.com/feature" @@ -184,6 +186,12 @@ func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.Respon } ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) if err != nil { + var notReady udprelay.ErrServerNotReady + if errors.As(err, ¬Ready) { + w.Header().Set("Retry-After", fmt.Sprintf("%d", notReady.RetryAfter.Round(time.Second)/time.Second)) + httpErrAndLog(err.Error(), http.StatusServiceUnavailable) + return + } httpErrAndLog(err.Error(), http.StatusInternalServerError) return } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index f7f5868c0..8b9e95fb1 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -63,13 +63,14 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields - addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints - closed bool - lamportID uint64 - vniPool []uint32 // the pool of available VNIs - byVNI map[uint32]*serverEndpoint - byDisco map[pairOfDiscoPubKeys]*serverEndpoint + mu sync.Mutex // guards the following fields + addrDiscoveryOnce bool // addrDiscovery completed once (successfully or unsuccessfully) + addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints + closed bool + lamportID uint64 + vniPool []uint32 // the pool of available VNIs + byVNI map[uint32]*serverEndpoint + byDisco map[pairOfDiscoPubKeys]*serverEndpoint } // pairOfDiscoPubKeys is a pair of key.DiscoPublic. It must be constructed via @@ -321,8 +322,7 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.wg.Add(1) go s.endpointGCLoop() if len(overrideAddrs) > 0 { - var addrPorts set.Set[netip.AddrPort] - addrPorts.Make() + addrPorts := make(set.Set[netip.AddrPort], len(overrideAddrs)) for _, addr := range overrideAddrs { if addr.IsValid() { addrPorts.Add(netip.AddrPortFrom(addr, boundPort)) @@ -401,12 +401,12 @@ func (s *Server) addrDiscoveryLoop() { } s.mu.Lock() s.addrPorts = addrPorts + s.addrDiscoveryOnce = true s.mu.Unlock() case <-s.closeCh: return } } - } func (s *Server) listenOn(port int) (uint16, error) { @@ -521,10 +521,22 @@ func (s *Server) packetReadLoop() { var ErrServerClosed = errors.New("server closed") +// ErrServerNotReady indicates the server is not ready. Allocation should be +// requested after waiting for at least RetryAfter duration. +type ErrServerNotReady struct { + RetryAfter time.Duration +} + +func (e ErrServerNotReady) Error() string { + return fmt.Sprintf("server not ready, retry after %v", e.RetryAfter) +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns -// [ErrServerClosed] if the server has been closed. +// the following notable errors: +// 1. [ErrServerClosed] if the server has been closed. +// 2. [ErrServerNotReady] if the server is not ready. func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) { s.mu.Lock() defer s.mu.Unlock() @@ -533,6 +545,9 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv } if len(s.addrPorts) == 0 { + if !s.addrDiscoveryOnce { + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: 3 * time.Second} + } return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 4ccfbb501..d149d0c59 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -7,9 +7,12 @@ import ( "bytes" "context" "encoding/json" + "errors" + "fmt" "io" "net/http" "net/netip" + "strconv" "sync" "time" @@ -716,46 +719,82 @@ func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { }() } -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { - // TODO(jwhited): introduce client metrics counters for notable failures - defer wg.Done() - var b bytes.Buffer - remoteDisco := ep.disco.Load() - if remoteDisco == nil { - return - } +type errNotReady struct{ retryAfter time.Duration } + +func (e errNotReady) Error() string { + return fmt.Sprintf("server not ready, retry after %v", e.retryAfter) +} + +const reqTimeout = time.Second * 10 + +func doAllocate(ctx context.Context, server netip.AddrPort, discoKeys [2]key.DiscoPublic) (udprelay.ServerEndpoint, error) { + var reqBody bytes.Buffer type allocateRelayEndpointReq struct { DiscoKeys []key.DiscoPublic } a := &allocateRelayEndpointReq{ - DiscoKeys: []key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}, + DiscoKeys: []key.DiscoPublic{discoKeys[0], discoKeys[1]}, } - err := json.NewEncoder(&b).Encode(a) + err := json.NewEncoder(&reqBody).Encode(a) if err != nil { - return + return udprelay.ServerEndpoint{}, err } - const reqTimeout = time.Second * 10 reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &b) + req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &reqBody) if err != nil { - return + return udprelay.ServerEndpoint{}, err } resp, err := http.DefaultClient.Do(req) if err != nil { - return + return udprelay.ServerEndpoint{}, err } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { + switch resp.StatusCode { + case http.StatusOK: + var se udprelay.ServerEndpoint + err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) + return se, err + case http.StatusServiceUnavailable: + raHeader := resp.Header.Get("Retry-After") + raSeconds, err := strconv.ParseUint(raHeader, 10, 32) + if err == nil { + return udprelay.ServerEndpoint{}, errNotReady{retryAfter: time.Second * time.Duration(raSeconds)} + } + fallthrough + default: + return udprelay.ServerEndpoint{}, fmt.Errorf("non-200 status: %d", resp.StatusCode) + } +} + +func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { + // TODO(jwhited): introduce client metrics counters for notable failures + defer wg.Done() + remoteDisco := ep.disco.Load() + if remoteDisco == nil { return } - var se udprelay.ServerEndpoint - err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) - if err != nil { + firstTry := true + for { + se, err := doAllocate(ctx, server, [2]key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}) + if err == nil { + relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ + ep: ep, + se: se, + }) + return + } + ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, ep.discoShort(), err) + var notReady errNotReady + if firstTry && errors.As(err, ¬Ready) { + select { + case <-ctx.Done(): + return + case <-time.After(min(notReady.retryAfter, reqTimeout)): + firstTry = false + continue + } + } return } - relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, - se: se, - }) } From 31eebdb0f8b42d40f0360a835e25d4d35c1cf420 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 23 Jun 2025 16:13:58 -0700 Subject: [PATCH 0993/1708] wgengine/magicsock: send CallMeMaybeVia for relay endpoints (#16360) If we acted as the allocator we are responsible for signaling it to the remote peer in a CallMeMaybeVia message over DERP. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 38 ++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index d149d0c59..f22e281e6 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -30,8 +30,9 @@ import ( // // [relayManager] methods can be called by [Conn] and [endpoint] while their .mu // mutexes are held. Therefore, in order to avoid deadlocks, [relayManager] must -// never attempt to acquire those mutexes, including synchronous calls back -// towards [Conn] or [endpoint] methods that acquire them. +// never attempt to acquire those mutexes synchronously from its runLoop(), +// including synchronous calls back towards [Conn] or [endpoint] methods that +// acquire them. type relayManager struct { initOnce sync.Once @@ -584,9 +585,37 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work + if newServerEndpoint.server.IsValid() { + // Send CallMeMaybeVia to the remote peer if we allocated this endpoint. + go r.sendCallMeMaybeVia(work.ep, work.se) + } + go r.handshakeServerEndpoint(work) } +// sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be +// called as part of a goroutine independent from runLoop(), for 2 reasons: +// 1. it acquires ep.mu (refer to [relayManager] docs for reasoning) +// 2. it makes a networking syscall, which can introduce unwanted backpressure +func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoint) { + ep.mu.Lock() + derpAddr := ep.derpAddr + ep.mu.Unlock() + epDisco := ep.disco.Load() + if epDisco == nil || !derpAddr.IsValid() { + return + } + callMeMaybeVia := &disco.CallMeMaybeVia{ + ServerDisco: se.ServerDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + } + ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) +} + func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -779,8 +808,9 @@ func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGr se, err := doAllocate(ctx, server, [2]key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}) if err == nil { relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, - se: se, + ep: ep, + se: se, + server: server, // we allocated this endpoint (vs CallMeMaybeVia reception), mark it as such }) return } From 4a1fc378d1a8fa4d7f5beef318830d8354f76d1c Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 23 Jun 2025 17:55:23 -0500 Subject: [PATCH 0994/1708] release/dist: switch back to Ubuntu 20.04 for building QNAP packages After the switch to 24.04, unsigned packages did not build correctly (came out as only a few KBs). Fixes tailscale/tailscale-qpkg#148 Signed-off-by: Percy Wegmann --- release/dist/qnap/files/scripts/Dockerfile.qpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index 1f4c2406d..542eb95e1 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -1,4 +1,4 @@ -FROM ubuntu:24.04 +FROM ubuntu:20.04 RUN apt-get update -y && \ apt-get install -y --no-install-recommends \ From 9e28bfc69c0127a21fbce6beeaee2d763fe78d2a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 24 Jun 2025 13:39:29 -0500 Subject: [PATCH 0995/1708] ipn/ipnlocal,wgengine/magicsock: wait for magicsock to process pending events on authReconfig Updates #16369 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 5 ++++ ipn/ipnlocal/local_test.go | 6 ++++ ipn/ipnlocal/state_test.go | 51 ++++++++++++++++++++++++++++++++- wgengine/magicsock/magicsock.go | 34 ++++++++++++++++++++++ 4 files changed, 95 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 908418d4a..5467088f7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4853,6 +4853,11 @@ func (b *LocalBackend) readvertiseAppConnectorRoutes() { // updates are not currently blocked, based on the cached netmap and // user prefs. func (b *LocalBackend) authReconfig() { + // Wait for magicsock to process pending [eventbus] events, + // such as netmap updates. This should be completed before + // wireguard-go is reconfigured. See tailscale/tailscale#16369. + b.MagicConn().Synchronize() + b.mu.Lock() blocked := b.blocked prefs := b.pm.CurrentPrefs() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6e24f4300..6e6278688 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -85,6 +85,12 @@ func makeNodeKeyFromID(nodeID tailcfg.NodeID) key.NodePublic { return key.NodePublicFromRaw32(memro.B(raw)) } +func makeDiscoKeyFromID(nodeID tailcfg.NodeID) (ret key.DiscoPublic) { + raw := make([]byte, 32) + binary.BigEndian.PutUint64(raw[24:], uint64(nodeID)) + return key.DiscoPublicFromRaw32(memro.B(raw)) +} + func TestShrinkDefaultRoute(t *testing.T) { tests := []struct { route string diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 5d9e8b169..2921de203 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1114,6 +1114,8 @@ func TestEngineReconfigOnStateChange(t *testing.T) { disconnect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: false}, WantRunningSet: true} node1 := testNetmapForNode(1, "node-1", []netip.Prefix{netip.MustParsePrefix("100.64.1.1/32")}) node2 := testNetmapForNode(2, "node-2", []netip.Prefix{netip.MustParsePrefix("100.64.1.2/32")}) + node3 := testNetmapForNode(3, "node-3", []netip.Prefix{netip.MustParsePrefix("100.64.1.3/32")}) + node3.Peers = []tailcfg.NodeView{node1.SelfNode, node2.SelfNode} routesWithQuad100 := func(extra ...netip.Prefix) []netip.Prefix { return append(extra, netip.MustParsePrefix("100.100.100.100/32")) } @@ -1308,6 +1310,40 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Hosts: hostsFor(node1), }, }, + { + name: "Start/Connect/Login/WithPeers", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node3) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node3.SelfNode.StableID(), + Peers: []wgcfg.Peer{ + { + PublicKey: node1.SelfNode.Key(), + DiscoKey: node1.SelfNode.DiscoKey(), + }, + { + PublicKey: node2.SelfNode.Key(), + DiscoKey: node2.SelfNode.DiscoKey(), + }, + }, + Addresses: node3.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node3.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node3), + }, + }, } for _, tt := range tests { @@ -1322,8 +1358,18 @@ func TestEngineReconfigOnStateChange(t *testing.T) { t.Errorf("State: got %v; want %v", gotState, tt.wantState) } + if engine.Config() != nil { + for _, p := range engine.Config().Peers { + pKey := p.PublicKey.UntypedHexString() + _, err := lb.MagicConn().ParseEndpoint(pKey) + if err != nil { + t.Errorf("ParseEndpoint(%q) failed: %v", pKey, err) + } + } + } + opts := []cmp.Option{ - cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), + cmpopts.EquateComparable(key.NodePublic{}, key.DiscoPublic{}, netip.Addr{}, netip.Prefix{}), } if diff := cmp.Diff(tt.wantCfg, engine.Config(), opts...); diff != "" { t.Errorf("wgcfg.Config(+got -want): %v", diff) @@ -1356,6 +1402,8 @@ func testNetmapForNode(userID tailcfg.UserID, name string, addresses []netip.Pre Addresses: addresses, MachineAuthorized: true, } + self.Key = makeNodeKeyFromID(self.ID) + self.DiscoKey = makeDiscoKeyFromID(self.ID) return &netmap.NetworkMap{ SelfNode: self.View(), Name: self.Name, @@ -1403,6 +1451,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( magicConn, err := magicsock.NewConn(magicsock.Options{ Logf: logf, + EventBus: sys.Bus.Get(), NetMon: dialer.NetMon(), Metrics: sys.UserMetricsRegistry(), HealthTracker: sys.HealthTracker(), diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a96eaf3d8..d7b522699 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -167,6 +167,8 @@ type Conn struct { filterSub *eventbus.Subscriber[FilterUpdate] nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] + syncSub *eventbus.Subscriber[syncPoint] + syncPub *eventbus.Publisher[syncPoint] subsDoneCh chan struct{} // closed when consumeEventbusTopics returns // pconn4 and pconn6 are the underlying UDP sockets used to @@ -538,6 +540,21 @@ type FilterUpdate struct { *filter.Filter } +// syncPoint is an event published over an [eventbus.Bus] by [Conn.Synchronize]. +// It serves as a synchronization point, allowing to wait until magicsock +// has processed all pending events. +type syncPoint chan struct{} + +// Wait blocks until [syncPoint.Signal] is called. +func (s syncPoint) Wait() { + <-s +} + +// Signal signals the sync point, unblocking the [syncPoint.Wait] call. +func (s syncPoint) Signal() { + close(s) +} + // newConn is the error-free, network-listening-side-effect-free based // of NewConn. Mostly for tests. func newConn(logf logger.Logf) *Conn { @@ -593,10 +610,25 @@ func (c *Conn) consumeEventbusTopics() { c.onNodeViewsUpdate(nodeViews) case nodeMuts := <-c.nodeMutsSub.Events(): c.onNodeMutationsUpdate(nodeMuts) + case syncPoint := <-c.syncSub.Events(): + c.dlogf("magicsock: received sync point after reconfig") + syncPoint.Signal() } } } +// Synchronize waits for all [eventbus] events published +// prior to this call to be processed by the receiver. +func (c *Conn) Synchronize() { + if c.syncPub == nil { + // Eventbus is not used; no need to synchronize (in certain tests). + return + } + sp := syncPoint(make(chan struct{})) + c.syncPub.Publish(sp) + sp.Wait() +} + // NewConn creates a magic Conn listening on opts.Port. // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. @@ -624,6 +656,8 @@ func NewConn(opts Options) (*Conn, error) { c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) + c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) + c.syncPub = eventbus.Publish[syncPoint](c.eventClient) c.subsDoneCh = make(chan struct{}) go c.consumeEventbusTopics() } From 83cd446b5d2cd136e87023187949bbd45710be7a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 24 Jun 2025 16:56:28 -0500 Subject: [PATCH 0996/1708] release/dist/qnap: upgrade to Ubuntu 24.04 Docker image 20.04 is no longer supported. This pulls in changes to the QDK package that were required to make build succeed on 24.04. Updates https://github.com/tailscale/corp/issues/29849 Signed-off-by: Percy Wegmann --- release/dist/qnap/files/scripts/Dockerfile.qpkg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index 542eb95e1..dbcaac116 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:24.04 RUN apt-get update -y && \ apt-get install -y --no-install-recommends \ @@ -10,7 +10,7 @@ RUN apt-get update -y && \ patch # Install QNAP QDK (force a specific version to pick up updates) -RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 9a31a67387c583d19a81a378dcf7c25e2abe231d +RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 6aba74f6b4c8ea0c30b8aec9f3476f428f6a58a1 RUN cd /QDK && ./InstallToUbuntu.sh install ENV PATH="/usr/share/QDK/bin:${PATH}" From f2f1236ad4174ca46402f26139cca71dd1c94c2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 25 Jun 2025 09:00:34 -0400 Subject: [PATCH 0997/1708] util/eventbus: add test helpers to simplify testing events (#16294) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of every module having to come up with a set of test methods for the event bus, this handful of test helpers hides a lot of the needed setup for the testing of the event bus. The tests in portmapper is also ported over to the new helpers. Updates #15160 Signed-off-by: Claus Lensbøl --- net/portmapper/portmapper.go | 2 +- net/portmapper/portmapper_test.go | 17 +- util/eventbus/doc.go | 10 + util/eventbus/eventbustest/doc.go | 45 +++ util/eventbus/eventbustest/eventbustest.go | 203 ++++++++++ .../eventbustest/eventbustest_test.go | 366 ++++++++++++++++++ util/eventbus/eventbustest/examples_test.go | 201 ++++++++++ 7 files changed, 831 insertions(+), 13 deletions(-) create mode 100644 util/eventbus/eventbustest/doc.go create mode 100644 util/eventbus/eventbustest/eventbustest.go create mode 100644 util/eventbus/eventbustest/eventbustest_test.go create mode 100644 util/eventbus/eventbustest/examples_test.go diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 59f88e966..1c6c7634b 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -515,7 +515,7 @@ func (c *Client) createMapping() { GoodUntil: mapping.GoodUntil(), }) } - if c.onChange != nil { + if c.onChange != nil && c.pubClient == nil { go c.onChange() } } diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index 515a0c28c..e66d3c159 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -12,7 +12,7 @@ import ( "time" "tailscale.com/control/controlknobs" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func TestCreateOrGetMapping(t *testing.T) { @@ -142,22 +142,15 @@ func TestUpdateEvent(t *testing.T) { t.Fatalf("Create test gateway: %v", err) } - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) - sub := eventbus.Subscribe[Mapping](bus.Client("TestUpdateEvent")) c := newTestClient(t, igd, bus) if _, err := c.Probe(t.Context()); err != nil { t.Fatalf("Probe failed: %v", err) } c.GetCachedMappingOrStartCreatingOne() - - select { - case evt := <-sub.Events(): - t.Logf("Received portmap update: %+v", evt) - case <-sub.Done(): - t.Error("Subscriber closed prematurely") - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for an update event") + if err := eventbustest.Expect(tw, eventbustest.Type[Mapping]()); err != nil { + t.Error(err.Error()) } } diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index 964a686ea..f95f9398c 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -89,4 +89,14 @@ // The [Debugger], obtained through [Bus.Debugger], provides // introspection facilities to monitor events flowing through the bus, // and inspect publisher and subscriber state. +// +// Additionally, a debug command exists for monitoring the eventbus: +// +// tailscale debug daemon-bus-events +// +// # Testing facilities +// +// Helpers for testing code with the eventbus can be found in: +// +// eventbus/eventbustest package eventbus diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go new file mode 100644 index 000000000..9e39504a8 --- /dev/null +++ b/util/eventbus/eventbustest/doc.go @@ -0,0 +1,45 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package eventbustest provides helper methods for testing an [eventbus.Bus]. +// +// # Usage +// +// A [Watcher] presents a set of generic helpers for testing events. +// +// To test code that generates events, create a [Watcher] from the [eventbus.Bus] +// used by the code under test, run the code to generate events, then use the watcher +// to verify that the expected events were produced. In outline: +// +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatEmitsSomeEvent() +// if err := eventbustest.Expect(tw, eventbustest.Type[EventFoo]()); err != nil { +// t.Error(err.Error()) +// } +// +// As shown, [Expect] checks that at least one event of the given type occurs +// in the stream generated by the code under test. +// +// The following functions all take an any parameter representing a function. +// This function will take an argument of the expected type and is used to test +// for the events on the eventbus being of the given type. The function can +// take the shape described in [Expect]. +// +// [Type] is a helper for only testing event type. +// +// To check for specific properties of an event, use [Expect], and pass a function +// as the second argument that tests for those properties. +// +// To test for multiple events, use [Expect], which checks that the stream +// contains the given events in the given order, possibly with other events +// interspersed. +// +// To test the complete contents of the stream, use [ExpectExactly], which +// checks that the stream contains exactly the given events in the given order, +// and no others. +// +// See the [usage examples]. +// +// [usage examples]: https://github.com/tailscale/tailscale/blob/main/util/eventbus/eventbustest/examples_test.go +package eventbustest diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go new file mode 100644 index 000000000..75d430d53 --- /dev/null +++ b/util/eventbus/eventbustest/eventbustest.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "tailscale.com/util/eventbus" +) + +// NewBus constructs an [eventbus.Bus] that will be shut automatically when +// its controlling test ends. +func NewBus(t *testing.T) *eventbus.Bus { + bus := eventbus.New() + t.Cleanup(bus.Close) + return bus +} + +// NewTestWatcher constructs a [Watcher] that can be used to check the stream of +// events generated by code under test. After construction the caller may use +// [Expect] and [ExpectExactly], to verify that the desired events were captured. +func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { + tw := &Watcher{ + mon: bus.Debugger().WatchBus(), + TimeOut: 5 * time.Second, + chDone: make(chan bool, 1), + events: make(chan any, 100), + } + if deadline, ok := t.Deadline(); ok { + tw.TimeOut = deadline.Sub(time.Now()) + } + t.Cleanup(tw.done) + go tw.watch() + return tw +} + +// Watcher monitors and holds events for test expectations. +type Watcher struct { + mon *eventbus.Subscriber[eventbus.RoutedEvent] + events chan any + chDone chan bool + // TimeOut defines when the Expect* functions should stop looking for events + // coming from the Watcher. The value is set by [NewWatcher] and defaults to + // the deadline passed in by [testing.T]. If looking to verify the absence + // of an event, the TimeOut can be set to a lower value after creating the + // Watcher. + TimeOut time.Duration +} + +// Type is a helper representing the expectation to see an event of type T, without +// caring about the content of the event. +// It makes it possible to use helpers like: +// +// eventbustest.ExpectFilter(tw, eventbustest.Type[EventFoo]()) +func Type[T any]() func(T) { return func(T) {} } + +// Expect verifies that the given events are a subsequence of the events +// observed by tw. That is, tw must contain at least one event matching the type +// of each argument in the given order, other event types are allowed to occur in +// between without error. The given events are represented by a function +// that must have one of the following forms: +// +// // Tests for the event type only +// func(e ExpectedType) +// +// // Tests for event type and whatever is defined in the body. +// // If return is false, the test will look for other events of that type +// // If return is true, the test will look for the next given event +// // if a list is given +// func(e ExpectedType) bool +// +// // Tests for event type and whatever is defined in the body. +// // The boolean return works as above. +// // The if error != nil, the test helper will return that error immediately. +// func(e ExpectedType) (bool, error) +// +// If the list of events must match exactly with no extra events, +// use [ExpectExactly]. +func Expect(tw *Watcher, filters ...any) error { + if len(filters) == 0 { + return errors.New("no event filters were provided") + } + eventCount := 0 + head := 0 + for head < len(filters) { + eventFunc := eventFilter(filters[head]) + select { + case event := <-tw.events: + eventCount++ + if ok, err := eventFunc(event); err != nil { + return err + } else if ok { + head++ + } + case <-time.After(tw.TimeOut): + return fmt.Errorf( + "timed out waiting for event, saw %d events, %d was expected", + eventCount, head) + case <-tw.chDone: + return errors.New("watcher closed while waiting for events") + } + } + return nil +} + +// ExpectExactly checks for some number of events showing up on the event bus +// in a given order, returning an error if the events does not match the given list +// exactly. The given events are represented by a function as described in +// [Expect]. Use [Expect] if other events are allowed. +func ExpectExactly(tw *Watcher, filters ...any) error { + if len(filters) == 0 { + return errors.New("no event filters were provided") + } + eventCount := 0 + for pos, next := range filters { + eventFunc := eventFilter(next) + fnType := reflect.TypeOf(next) + argType := fnType.In(0) + select { + case event := <-tw.events: + eventCount++ + typeEvent := reflect.TypeOf(event) + if typeEvent != argType { + return fmt.Errorf( + "expected event type %s, saw %s, at index %d", + argType, typeEvent, pos) + } else if ok, err := eventFunc(event); err != nil { + return err + } else if !ok { + return fmt.Errorf( + "expected test ok for type %s, at index %d", argType, pos) + } + case <-time.After(tw.TimeOut): + return fmt.Errorf( + "timed out waiting for event, saw %d events, %d was expected", + eventCount, pos) + case <-tw.chDone: + return errors.New("watcher closed while waiting for events") + } + } + return nil +} + +func (tw *Watcher) watch() { + for { + select { + case event := <-tw.mon.Events(): + tw.events <- event.Event + case <-tw.chDone: + tw.mon.Close() + return + } + } +} + +// done tells the watcher to stop monitoring for new events. +func (tw *Watcher) done() { + close(tw.chDone) +} + +type filter = func(any) (bool, error) + +func eventFilter(f any) filter { + ft := reflect.TypeOf(f) + if ft.Kind() != reflect.Func { + panic("filter is not a function") + } else if ft.NumIn() != 1 { + panic(fmt.Sprintf("function takes %d arguments, want 1", ft.NumIn())) + } + var fixup func([]reflect.Value) []reflect.Value + switch ft.NumOut() { + case 0: + fixup = func([]reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(true), reflect.Zero(reflect.TypeFor[error]())} + } + case 1: + if ft.Out(0) != reflect.TypeFor[bool]() { + panic(fmt.Sprintf("result is %T, want bool", ft.Out(0))) + } + fixup = func(vals []reflect.Value) []reflect.Value { + return append(vals, reflect.Zero(reflect.TypeFor[error]())) + } + case 2: + if ft.Out(0) != reflect.TypeFor[bool]() || ft.Out(1) != reflect.TypeFor[error]() { + panic(fmt.Sprintf("results are %T, %T; want bool, error", ft.Out(0), ft.Out(1))) + } + fixup = func(vals []reflect.Value) []reflect.Value { return vals } + default: + panic(fmt.Sprintf("function returns %d values", ft.NumOut())) + } + fv := reflect.ValueOf(f) + return reflect.MakeFunc(reflect.TypeFor[filter](), func(args []reflect.Value) []reflect.Value { + if !args[0].IsValid() || args[0].Elem().Type() != ft.In(0) { + return []reflect.Value{reflect.ValueOf(false), reflect.Zero(reflect.TypeFor[error]())} + } + return fixup(fv.Call([]reflect.Value{args[0].Elem()})) + }).Interface().(filter) +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go new file mode 100644 index 000000000..fd95973e5 --- /dev/null +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -0,0 +1,366 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest_test + +import ( + "fmt" + "testing" + "time" + + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" +) + +type EventFoo struct { + Value int +} + +type EventBar struct { + Value string +} + +type EventBaz struct { + Value []float64 +} + +func TestExpectFilter(t *testing.T) { + tests := []struct { + name string + events []int + expectFunc any + wantErr bool + }{ + { + name: "single event", + events: []int{42}, + expectFunc: eventbustest.Type[EventFoo](), + wantErr: false, + }, + { + name: "multiple events, single expectation", + events: []int{42, 1, 2, 3, 4, 5}, + expectFunc: eventbustest.Type[EventFoo](), + wantErr: false, + }, + { + name: "filter on event with function", + events: []int{24, 42}, + expectFunc: func(event EventFoo) (bool, error) { + if event.Value == 42 { + return true, nil + } + return false, nil + }, + wantErr: false, + }, + { + name: "first event has to be func", + events: []int{24, 42}, + expectFunc: func(event EventFoo) (bool, error) { + if event.Value != 42 { + return false, fmt.Errorf("expected 42, got %d", event.Value) + } + return false, nil + }, + wantErr: true, + }, + { + name: "no events", + events: []int{}, + expectFunc: func(event EventFoo) (bool, error) { + return true, nil + }, + wantErr: true, + }, + } + + bus := eventbustest.NewBus(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: + // https://go.dev/blog/synctest + tw.TimeOut = 10 * time.Millisecond + + client := bus.Client("testClient") + defer client.Close() + updater := eventbus.Publish[EventFoo](client) + + for _, i := range tt.events { + updater.Publish(EventFoo{i}) + } + + if err := eventbustest.Expect(tw, tt.expectFunc); (err != nil) != tt.wantErr { + t.Errorf("ExpectFilter[EventFoo]: error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestExpectEvents(t *testing.T) { + tests := []struct { + name string + events []any + expectEvents []any + wantErr bool + }{ + { + name: "No expectations", + events: []any{EventFoo{}}, + expectEvents: []any{}, + wantErr: true, + }, + { + name: "One event", + events: []any{EventFoo{}}, + expectEvents: []any{eventbustest.Type[EventFoo]()}, + wantErr: false, + }, + { + name: "Two events", + events: []any{EventFoo{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Two expected events with another in the middle", + events: []any{EventFoo{}, EventBaz{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Missing event", + events: []any{EventFoo{}, EventBaz{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "One event with specific value", + events: []any{EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "Two event with one specific value", + events: []any{EventFoo{43}, EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "One event with wrong value", + events: []any{EventFoo{43}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "Two events with specific values", + events: []any{EventFoo{42}, EventFoo{42}, EventBar{"42"}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + func(ev EventBar) (bool, error) { + if ev.Value == "42" { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + } + + bus := eventbustest.NewBus(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: + // https://go.dev/blog/synctest + tw.TimeOut = 10 * time.Millisecond + + client := bus.Client("testClient") + defer client.Close() + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev.(type) { + case EventFoo: + evCast := ev.(EventFoo) + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev.(EventBar) + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev.(EventBaz) + updaterBaz.Publish(evCast) + } + } + + if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestExpectExactlyEventsFilter(t *testing.T) { + tests := []struct { + name string + events []any + expectEvents []any + wantErr bool + }{ + { + name: "No expectations", + events: []any{EventFoo{}}, + expectEvents: []any{}, + wantErr: true, + }, + { + name: "One event", + events: []any{EventFoo{}}, + expectEvents: []any{eventbustest.Type[EventFoo]()}, + wantErr: false, + }, + { + name: "Two events", + events: []any{EventFoo{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Two expected events with another in the middle", + events: []any{EventFoo{}, EventBaz{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "Missing event", + events: []any{EventFoo{}, EventBaz{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "One event with value", + events: []any{EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "Two event with one specific value", + events: []any{EventFoo{43}, EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "One event with wrong value", + events: []any{EventFoo{43}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "Two events with specific values", + events: []any{EventFoo{42}, EventFoo{42}, EventBar{"42"}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + func(ev EventBar) (bool, error) { + if ev.Value == "42" { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + } + + bus := eventbustest.NewBus(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: + // https://go.dev/blog/synctest + tw.TimeOut = 10 * time.Millisecond + + client := bus.Client("testClient") + defer client.Close() + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev.(type) { + case EventFoo: + evCast := ev.(EventFoo) + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev.(EventBar) + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev.(EventBaz) + updaterBaz.Publish(evCast) + } + } + + if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go new file mode 100644 index 000000000..914e29933 --- /dev/null +++ b/util/eventbus/eventbustest/examples_test.go @@ -0,0 +1,201 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest_test + +import ( + "testing" + + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" +) + +func TestExample_Expect(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + updater.Publish(eventOfInterest{}) + + if err := eventbustest.Expect(tw, eventbustest.Type[eventOfInterest]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_Expect_WithFunction(t *testing.T) { + type eventOfInterest struct { + value int + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + updater.Publish(eventOfInterest{43}) + updater.Publish(eventOfInterest{42}) + + // Look for an event of eventOfInterest with a specific value + if err := eventbustest.Expect(tw, func(event eventOfInterest) (bool, error) { + if event.value != 42 { + return false, nil // Look for another event with the expected value. + // You could alternatively return an error here to ensure that the + // first seen eventOfInterest matches the value: + // return false, fmt.Errorf("expected 42, got %d", event.value) + } + return true, nil + }); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_Expect_MultipleEvents(t *testing.T) { + type eventOfInterest struct{} + type eventOfNoConcern struct{} + type eventOfCuriosity struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{}) + + // Even though three events was published, we just care about the two + if err := eventbustest.Expect(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfCuriosity]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_ExpectExactly_MultipleEvents(t *testing.T) { + type eventOfInterest struct{} + type eventOfNoConcern struct{} + type eventOfCuriosity struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{}) + + // Will fail as more events than the two expected comes in + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfCuriosity]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } +} + +func TestExample_Expect_WithMultipleFunctions(t *testing.T) { + type eventOfInterest struct { + value int + } + type eventOfNoConcern struct{} + type eventOfCuriosity struct { + value string + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{42}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{"42"}) + + interest := func(event eventOfInterest) (bool, error) { + if event.value == 42 { + return true, nil + } + return false, nil + } + curiosity := func(event eventOfCuriosity) (bool, error) { + if event.value == "42" { + return true, nil + } + return false, nil + } + + // Will fail as more events than the two expected comes in + if err := eventbustest.Expect(tw, interest, curiosity); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_ExpectExactly_WithMultipleFuncions(t *testing.T) { + type eventOfInterest struct { + value int + } + type eventOfNoConcern struct{} + type eventOfCuriosity struct { + value string + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{42}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{"42"}) + + interest := func(event eventOfInterest) (bool, error) { + if event.value == 42 { + return true, nil + } + return false, nil + } + curiosity := func(event eventOfCuriosity) (bool, error) { + if event.value == "42" { + return true, nil + } + return false, nil + } + + // Will fail as more events than the two expected comes in + if err := eventbustest.ExpectExactly(tw, interest, curiosity); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // expected event type eventbustest.eventOfCuriosity, saw eventbustest.eventOfNoConcern, at index 1 +} From b75fe9eeca13d7ff651c83d8202d22cce466dc08 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 25 Jun 2025 14:14:17 +0100 Subject: [PATCH 0998/1708] cmd/k8s-operator: Add NOTES.txt to Helm chart (#16364) This commit adds a NOTES.txt to the operator helm chart that will be written to the terminal upon successful installation of the operator. It includes a small list of knowledgebase articles with possible next steps for the actor that installed the operator to the cluster. It also provides possible commands to use for explaining the custom resources. Fixes #13427 Signed-off-by: David Bond --- .gitignore | 3 +++ .../deploy/chart/templates/NOTES.txt | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 cmd/k8s-operator/deploy/chart/templates/NOTES.txt diff --git a/.gitignore b/.gitignore index 47d2bbe95..3941fd06e 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,6 @@ client/web/build/assets *.xcworkspacedata /tstest/tailmac/bin /tstest/tailmac/build + +# Ignore personal IntelliJ settings +.idea/ diff --git a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt new file mode 100644 index 000000000..5678e597a --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt @@ -0,0 +1,25 @@ +You have successfully installed the Tailscale Kubernetes Operator! + +Once connected, the operator should appear as a device within the Tailscale admin console: +https://login.tailscale.com/admin/machines + +If you have not used the Tailscale operator before, here are some examples to try out: + +* Private Kubernetes API access and authorization using the API server proxy + https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy + +* Private access to cluster Services using an ingress proxy + https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress + +* Private access to the cluster's available subnets using a subnet router + https://tailscale.com/kb/1441/kubernetes-operator-connector + +You can also explore the CRDs, operator, and associated resources within the {{ .Release.Namespace }} namespace: + +$ kubectl explain connector +$ kubectl explain proxygroup +$ kubectl explain proxyclass +$ kubectl explain recorder +$ kubectl explain dnsconfig + +$ kubectl --namespace={{ .Release.Namespace }} get pods From 35b11e7be55088e282b5e240b9473968eebeb002 Mon Sep 17 00:00:00 2001 From: Laszlo Magyar Date: Wed, 25 Jun 2025 20:26:11 +0300 Subject: [PATCH 0999/1708] envknob/featureknob: restore SSH and exit-node capability for Home Assistant (#16263) SSH was disabled in #10538 Exit node was disabled in #13726 This enables ssh and exit-node options in case of Home Assistant. Fixes #15552 Signed-off-by: Laszlo Magyar --- envknob/featureknob/featureknob.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go index e9b871f74..5a54a1c42 100644 --- a/envknob/featureknob/featureknob.go +++ b/envknob/featureknob/featureknob.go @@ -10,7 +10,6 @@ import ( "runtime" "tailscale.com/envknob" - "tailscale.com/hostinfo" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -26,14 +25,6 @@ func CanRunTailscaleSSH() error { if distro.Get() == distro.QNAP && !envknob.UseWIPCode() { return errors.New("The Tailscale SSH server does not run on QNAP.") } - - // Setting SSH on Home Assistant causes trouble on startup - // (since the flag is not being passed to `tailscale up`). - // Although Tailscale SSH does work here, - // it's not terribly useful since it's running in a separate container. - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { - return errors.New("The Tailscale SSH server does not run on HomeAssistant.") - } // otherwise okay case "darwin": // okay only in tailscaled mode for now. @@ -58,10 +49,5 @@ func CanUseExitNode() error { distro.QNAP: return errors.New("Tailscale exit nodes cannot be used on " + string(dist)) } - - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { - return errors.New("Tailscale exit nodes cannot be used on HomeAssistant.") - } - return nil } From 37eca1785c280311b16133e6bd455fa062df29e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 25 Jun 2025 14:44:01 -0400 Subject: [PATCH 1000/1708] net/netmon: add tests for the events over the eventbus (#16382) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #15160 Signed-off-by: Claus Lensbøl --- net/netmon/netmon_test.go | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index a9af8fb00..b8ec1b75f 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -12,6 +12,7 @@ import ( "time" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" ) @@ -68,6 +69,23 @@ func TestMonitorInjectEvent(t *testing.T) { } } +func TestMonitorInjectEventOnBus(t *testing.T) { + bus := eventbustest.NewBus(t) + + mon, err := New(bus, t.Logf) + if err != nil { + t.Fatal(err) + } + defer mon.Close() + tw := eventbustest.NewWatcher(t, bus) + + mon.Start() + mon.InjectEvent() + if err := eventbustest.Expect(tw, eventbustest.Type[*ChangeDelta]()); err != nil { + t.Error(err) + } +} + var ( monitor = flag.String("monitor", "", `go into monitor mode like 'route monitor'; test never terminates. Value can be either "raw" or "callback"`) monitorDuration = flag.Duration("monitor-duration", 0, "if non-zero, how long to run TestMonitorMode. Zero means forever.") @@ -77,13 +95,13 @@ func TestMonitorMode(t *testing.T) { switch *monitor { case "": t.Skip("skipping non-test without --monitor") - case "raw", "callback": + case "raw", "callback", "eventbus": default: - t.Skipf(`invalid --monitor value: must be "raw" or "callback"`) + t.Skipf(`invalid --monitor value: must be "raw", "callback" or "eventbus"`) } - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) mon, err := New(bus, t.Logf) if err != nil { @@ -124,6 +142,16 @@ func TestMonitorMode(t *testing.T) { mon.Start() <-done t.Logf("%v callbacks", n) + case "eventbus": + tw.TimeOut = *monitorDuration + n := 0 + mon.Start() + eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { + n++ + t.Logf("cb: changed=%v, ifSt=%v", event.Major, event.New) + return false, nil // Return false, indicating we wanna look for more events + }) + t.Logf("%v events", n) } } From 51d00e135b6c5775f60f77ecd2a94e327aabd1f6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 25 Jun 2025 19:13:02 -0700 Subject: [PATCH 1001/1708] wgengine/magicsock: fix relayManager alloc work cleanup (#16387) Premature cancellation was preventing the work from ever being cleaned up in runLoop(). Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index f22e281e6..7b378838a 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -743,8 +743,11 @@ func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { r.allocWorkByEndpoint[ep] = started go func() { started.wg.Wait() - started.cancel() relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) + // cleanup context cancellation must come after the + // relayManagerInputEvent call, otherwise it returns early without + // writing the event to runLoop(). + started.cancel() }() } From aa106c92a4ed6d66b26d455dc4bff23516514af1 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 25 Jun 2025 21:30:44 -0700 Subject: [PATCH 1002/1708] .github/workflows: request @tailscale/dataplane review DERP changes (#16372) For any changes that involve DERP, automatically add the @tailscale/dataplane team as a reviewer. Updates #cleanup Signed-off-by: Simon Law --- .../workflows/request-dataplane-review.yml | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/request-dataplane-review.yml diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml new file mode 100644 index 000000000..836fef6fb --- /dev/null +++ b/.github/workflows/request-dataplane-review.yml @@ -0,0 +1,31 @@ +name: request-dataplane-review + +on: + pull_request: + branches: + - "*" + paths: + - ".github/workflows/request-dataplane-review.yml" + - "**/*derp*" + - "**/derp*/**" + +jobs: + request-dataplane-review: + name: Request Dataplane Review + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Get access token + uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 + id: generate-token + with: + # Get token for app: https://github.com/apps/change-visibility-bot + app-id: ${{ secrets.VISIBILITY_BOT_APP_ID }} + private-key: ${{ secrets.VISIBILITY_BOT_APP_PRIVATE_KEY }} + - name: Add reviewers + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + url: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit "$url" --add-reviewer tailscale/dataplane From 47dff33eac2441003a1d8ca4e98d56660f8119d4 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 25 Jun 2025 18:07:49 -0700 Subject: [PATCH 1003/1708] tool/gocross: remove GOROOT to ensure correct toolchain use go(1) repsects GOROOT if set, but tool/go / gocross-wrapper.sh are explicitly intending to use our toolchain. We don't need to set GOROOT, just unset it, and then go(1) handles the rest. Updates tailscale/corp#26717 Signed-off-by: James Tucker --- tool/gocross/gocross-wrapper.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index e9fca2aea..90485d31b 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -133,6 +133,12 @@ fi repo_root="${BASH_SOURCE%/*}/../.." +# Some scripts/package systems set GOROOT even though they should only be +# setting $PATH. Stop them from breaking builds - go(1) respects GOROOT and +# so if it is left on here, compilation units depending on our Go fork will +# fail (such as those which depend on our net/ patches). +unset GOROOT + # gocross is opt-in as of 2025-06-16. See tailscale/corp#26717 # and comment above in this file. if [ "${TS_USE_GOCROSS:-}" != "1" ]; then From 99aaa6e92cda572896503538e1716851358e42d6 Mon Sep 17 00:00:00 2001 From: JerryYan Date: Fri, 27 Jun 2025 00:43:48 +0800 Subject: [PATCH 1004/1708] ipn/ipnlocal: update PeerByID to return SelfNode and rename it to NodeByID (#16096) Like NodeByKey, add an if stmt for checking the NodeId is SelfNode. Updates #16052 Signed-off-by: Jerry Yan <792602257@qq.com> --- ipn/ipnlocal/drive.go | 2 +- ipn/ipnlocal/local.go | 14 +++++--------- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/node_backend.go | 7 ++++++- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index a06ea5e8c..6a6f9bcd2 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -318,7 +318,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // - They are online // - They are allowed to share at least one folder with us cn := b.currentNode() - peer, ok := cn.PeerByID(peerID) + peer, ok := cn.NodeByID(peerID) if !ok { return false } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5467088f7..9cec088f1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1391,7 +1391,7 @@ func profileFromView(v tailcfg.UserProfileView) tailcfg.UserProfile { func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { cn := b.currentNode() if nid, ok := cn.NodeByKey(k); ok { - if n, ok := cn.PeerByID(nid); ok { + if n, ok := cn.NodeByID(nid); ok { up, ok := cn.NetMap().UserProfiles[n.User()] u = profileFromView(up) return n, u, ok @@ -1457,13 +1457,9 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi if nm == nil { return failf("no netmap") } - n, ok = cn.PeerByID(nid) + n, ok = cn.NodeByID(nid) if !ok { - // Check if this the self-node, which would not appear in peers. - if !nm.SelfNode.Valid() || nid != nm.SelfNode.ID() { - return zero, u, false - } - n = nm.SelfNode + return zero, u, false } up, ok := cn.UserByID(n.User()) if !ok { @@ -1968,7 +1964,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if !ok || mo.Online { continue } - n, ok := cn.PeerByID(m.NodeIDBeingMutated()) + n, ok := cn.NodeByID(m.NodeIDBeingMutated()) if !ok || n.StableID() != exitNodeID { continue } @@ -7724,7 +7720,7 @@ func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCa if !ok { return false } - n, ok := cn.PeerByID(nodeID) + n, ok := cn.NodeByID(nodeID) if !ok { return false } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6e6278688..16dbef62a 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1005,7 +1005,7 @@ func TestUpdateNetmapDelta(t *testing.T) { }, } for _, want := range wants { - gotv, ok := b.currentNode().PeerByID(want.ID) + gotv, ok := b.currentNode().NodeByID(want.ID) if !ok { t.Errorf("netmap.Peer %v missing from b.profile.Peers", want.ID) continue diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 05389a677..ec503f130 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -206,9 +206,14 @@ func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { return 0, false } -func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { +func (nb *nodeBackend) NodeByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { nb.mu.Lock() defer nb.mu.Unlock() + if nb.netMap != nil { + if self := nb.netMap.SelfNode; self.Valid() && self.ID() == id { + return self, true + } + } n, ok := nb.peers[id] return n, ok } From d2c1ed22c39096f11cfd7920449ff746b865a025 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 26 Jun 2025 13:37:21 -0700 Subject: [PATCH 1005/1708] .github/workflows: replace tibdex with official GitHub Action (#16385) GitHub used to recommend the tibdex/github-app-token GitHub Action until they wrote their own actions/create-github-app-token. This patch replaces the use of the third-party action with the official one. Updates #cleanup Signed-off-by: Simon Law --- .github/workflows/update-flake.yml | 9 ++++----- .github/workflows/update-webclient-prebuilt.yml | 11 ++++------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index af7bdff1e..61a09cea1 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -27,13 +27,12 @@ jobs: run: ./update-flake.sh - name: Get access token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: generate-token with: - app_id: ${{ secrets.LICENSING_APP_ID }} - installation_retrieval_mode: "id" - installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} - private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} + # Get token for app: https://github.com/apps/tailscale-code-updater + app-id: ${{ secrets.CODE_UPDATER_APP_ID }} + private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index f1c2b0c3b..5565b8c86 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -23,15 +23,12 @@ jobs: ./tool/go mod tidy - name: Get access token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: generate-token with: - # TODO(will): this should use the code updater app rather than licensing. - # It has the same permissions, so not a big deal, but still. - app_id: ${{ secrets.LICENSING_APP_ID }} - installation_retrieval_mode: "id" - installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} - private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} + # Get token for app: https://github.com/apps/tailscale-code-updater + app-id: ${{ secrets.CODE_UPDATER_APP_ID }} + private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request id: pull-request From 6feb3c35cb851d54b613236c31f2dd3c03dbd6b7 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 26 Jun 2025 17:09:13 -0700 Subject: [PATCH 1006/1708] ipn/store: automatically migrate between plaintext and encrypted state (#16318) Add a new `--encrypt-state` flag to `cmd/tailscaled`. Based on that flag, migrate the existing state file to/from encrypted format if needed. Updates #15830 Signed-off-by: Andrew Lytvynov --- atomicfile/atomicfile.go | 6 +- cmd/tailscaled/tailscaled.go | 49 +++++- cmd/tsconnect/src/lib/js-state-store.ts | 3 + cmd/tsconnect/src/types/wasm_js.d.ts | 1 + cmd/tsconnect/wasm/wasm_js.go | 24 +++ docs/windows/policy/en-US/tailscale.adml | 11 +- docs/windows/policy/tailscale.admx | 14 ++ feature/tpm/tpm.go | 20 ++- feature/tpm/tpm_test.go | 165 +++++++++++++++++- ipn/ipnlocal/state_test.go | 8 +- ipn/store.go | 6 + ipn/store/awsstore/store_aws.go | 5 + ipn/store/kubestore/store_kube.go | 5 + ipn/store/mem/store_mem.go | 14 ++ ipn/store/stores.go | 140 +++++++++++++++ ipn/store_test.go | 14 ++ tailcfg/tailcfg.go | 3 + tstest/integration/integration.go | 16 +- tstest/integration/integration_test.go | 59 +++++++ .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + util/syspolicy/policy_keys.go | 5 + 24 files changed, 546 insertions(+), 26 deletions(-) diff --git a/atomicfile/atomicfile.go b/atomicfile/atomicfile.go index b3c8c93da..9cae9bb75 100644 --- a/atomicfile/atomicfile.go +++ b/atomicfile/atomicfile.go @@ -48,5 +48,9 @@ func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { if err := f.Close(); err != nil { return err } - return rename(tmpName, filename) + return Rename(tmpName, filename) } + +// Rename srcFile to dstFile, similar to [os.Rename] but preserving file +// attributes and ACLs on Windows. +func Rename(srcFile, dstFile string) error { return rename(srcFile, dstFile) } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 61b811c12..3987b0c26 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -64,6 +64,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" "tailscale.com/util/osshare" + "tailscale.com/util/syspolicy" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -126,6 +127,7 @@ var args struct { debug string port uint16 statepath string + encryptState bool statedir string socketpath string birdSocketPath string @@ -193,6 +195,7 @@ func main() { flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) + flag.BoolVar(&args.encryptState, "encrypt-state", defaultEncryptState(), "encrypt the state file on disk; uses TPM on Linux and Windows, on all other platforms this flag is not supported") flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") @@ -268,6 +271,28 @@ func main() { } } + if args.encryptState { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + log.SetFlags(0) + log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM support in this build. + if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported in this build of tailscaled") + } + // Check if we have TPM access. + if !hostinfo.New().TPM.Present() { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + log.SetFlags(0) + log.Fatal("--encrypt-state can only be used with --state set to a local file path") + } + } + if args.disableLogs { envknob.SetNoLogsNoSupport() } @@ -315,13 +340,17 @@ func trySynologyMigration(p string) error { } func statePathOrDefault() string { + var path string if args.statepath != "" { - return args.statepath + path = args.statepath } - if args.statedir != "" { - return filepath.Join(args.statedir, "tailscaled.state") + if path == "" && args.statedir != "" { + path = filepath.Join(args.statedir, "tailscaled.state") } - return "" + if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState { + path = store.TPMPrefix + path + } + return path } // serverOptions is the configuration of the Tailscale node agent. @@ -974,3 +1003,15 @@ func applyIntegrationTestEnvKnob() { } } } + +func defaultEncryptState() bool { + if runtime.GOOS != "windows" && runtime.GOOS != "linux" { + // TPM encryption is only configurable on Windows and Linux. Other + // platforms either use system APIs and are not configurable + // (Android/Apple), or don't support any form of encryption yet + // (plan9/FreeBSD/etc). + return false + } + v, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + return v +} diff --git a/cmd/tsconnect/src/lib/js-state-store.ts b/cmd/tsconnect/src/lib/js-state-store.ts index e57dfd98e..7f2fc8087 100644 --- a/cmd/tsconnect/src/lib/js-state-store.ts +++ b/cmd/tsconnect/src/lib/js-state-store.ts @@ -10,4 +10,7 @@ export const sessionStateStorage: IPNStateStorage = { getState(id) { return window.sessionStorage[`ipn-state-${id}`] || "" }, + all() { + return JSON.stringify(window.sessionStorage) + }, } diff --git a/cmd/tsconnect/src/types/wasm_js.d.ts b/cmd/tsconnect/src/types/wasm_js.d.ts index 492197ccb..f47a972b0 100644 --- a/cmd/tsconnect/src/types/wasm_js.d.ts +++ b/cmd/tsconnect/src/types/wasm_js.d.ts @@ -44,6 +44,7 @@ declare global { interface IPNStateStorage { setState(id: string, value: string): void getState(id: string): string + all(): string } type IPNConfig = { diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 779a87e49..c5ff56120 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -15,6 +15,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "iter" "log" "math/rand/v2" "net" @@ -579,6 +580,29 @@ func (s *jsStateStore) WriteState(id ipn.StateKey, bs []byte) error { return nil } +func (s *jsStateStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + jsValue := s.jsStateStorage.Call("all") + if jsValue.String() == "" { + return + } + buf, err := hex.DecodeString(jsValue.String()) + if err != nil { + return + } + var state map[string][]byte + if err := json.Unmarshal(buf, &state); err != nil { + return + } + + for k, v := range state { + if !yield(ipn.StateKey(k), v) { + break + } + } + } +} + func mapSlice[T any, M any](a []T, f func(T) M) []M { n := make([]M, len(a)) for i, e := range a { diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 62ff94da7..c09d847bc 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -19,6 +19,7 @@ Tailscale version 1.80.0 and later Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later + Tailscale version 1.86.0 and later Tailscale UI customization Settings @@ -67,7 +68,7 @@ If you disable or do not configure this policy setting, an interactive user logi See https://tailscale.com/kb/1315/mdm-keys#set-an-auth-key for more details.]]> Require using a specific Exit Node + Encrypt client state file stored on disk + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index d97b24c36..0a8aa1a75 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -66,6 +66,10 @@ displayName="$(string.SINCE_V1_84)"> + + + @@ -365,5 +369,15 @@ + + + + + + + + + + diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 6feac85e3..64656d412 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "iter" "log" "os" "path/filepath" @@ -37,7 +38,7 @@ func init() { hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) - store.Register(storePrefix, newStore) + store.Register(store.TPMPrefix, newStore) } func info() *tailcfg.TPMInfo { @@ -103,10 +104,8 @@ func propToString(v uint32) string { return string(slices.DeleteFunc(chars, func(b byte) bool { return b < ' ' || b > '~' })) } -const storePrefix = "tpmseal:" - func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { - path = strings.TrimPrefix(path, storePrefix) + path = strings.TrimPrefix(path, store.TPMPrefix) if err := paths.MkStateDir(filepath.Dir(path)); err != nil { return nil, fmt.Errorf("creating state directory: %w", err) } @@ -205,6 +204,19 @@ func (s *tpmStore) writeSealed() error { return atomicfile.WriteFile(s.path, buf, 0600) } +func (s *tpmStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} + // The nested levels of encoding and encryption are confusing, so here's what's // going on in plain English. // diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index a022b69b2..b08681354 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -6,13 +6,22 @@ package tpm import ( "bytes" "crypto/rand" + "encoding/json" "errors" + "fmt" + "maps" + "os" "path/filepath" + "slices" "strconv" + "strings" "testing" + "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" + "tailscale.com/ipn/store/mem" + "tailscale.com/types/logger" ) func TestPropToString(t *testing.T) { @@ -29,11 +38,9 @@ func TestPropToString(t *testing.T) { } func skipWithoutTPM(t testing.TB) { - tpm, err := open() - if err != nil { + if !tpmSupported() { t.Skip("TPM not available") } - tpm.Close() } func TestSealUnseal(t *testing.T) { @@ -67,7 +74,7 @@ func TestSealUnseal(t *testing.T) { func TestStore(t *testing.T) { skipWithoutTPM(t) - path := storePrefix + filepath.Join(t.TempDir(), "state") + path := store.TPMPrefix + filepath.Join(t.TempDir(), "state") store, err := newStore(t.Logf, path) if err != nil { t.Fatal(err) @@ -180,3 +187,153 @@ func BenchmarkStore(b *testing.B) { }) } } + +func TestMigrateStateToTPM(t *testing.T) { + if !tpmSupported() { + t.Logf("using mock tpmseal provider") + store.RegisterForTest(t, store.TPMPrefix, newMockTPMSeal) + } + + storePath := filepath.Join(t.TempDir(), "store") + // Make sure migration doesn't cause a failure when no state file exists. + if _, err := store.New(t.Logf, store.TPMPrefix+storePath); err != nil { + t.Fatalf("store.New failed for new tpmseal store: %v", err) + } + os.Remove(storePath) + + initial, err := store.New(t.Logf, storePath) + if err != nil { + t.Fatalf("store.New failed for new file store: %v", err) + } + + // Populate initial state file. + content := map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + } + for k, v := range content { + if err := initial.WriteState(k, v); err != nil { + t.Fatal(err) + } + } + // Expected file keys for plaintext and sealed versions of state. + keysPlaintext := []string{"foo", "baz"} + keysTPMSeal := []string{"key", "nonce", "data"} + + for _, tt := range []struct { + desc string + path string + wantKeys []string + }{ + { + desc: "plaintext-to-plaintext", + path: storePath, + wantKeys: keysPlaintext, + }, + { + desc: "plaintext-to-tpmseal", + path: store.TPMPrefix + storePath, + wantKeys: keysTPMSeal, + }, + { + desc: "tpmseal-to-tpmseal", + path: store.TPMPrefix + storePath, + wantKeys: keysTPMSeal, + }, + { + desc: "tpmseal-to-plaintext", + path: storePath, + wantKeys: keysPlaintext, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + s, err := store.New(t.Logf, tt.path) + if err != nil { + t.Fatalf("migration failed: %v", err) + } + gotContent := maps.Collect(s.All()) + if diff := cmp.Diff(content, gotContent); diff != "" { + t.Errorf("unexpected content after migration, diff:\n%s", diff) + } + + buf, err := os.ReadFile(storePath) + if err != nil { + t.Fatal(err) + } + var data map[string]any + if err := json.Unmarshal(buf, &data); err != nil { + t.Fatal(err) + } + gotKeys := slices.Collect(maps.Keys(data)) + slices.Sort(gotKeys) + slices.Sort(tt.wantKeys) + if diff := cmp.Diff(gotKeys, tt.wantKeys); diff != "" { + t.Errorf("unexpected content keys after migration, diff:\n%s", diff) + } + }) + } +} + +func tpmSupported() bool { + tpm, err := open() + if err != nil { + return false + } + tpm.Close() + return true +} + +type mockTPMSealProvider struct { + path string + mem.Store +} + +func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { + path, ok := strings.CutPrefix(path, store.TPMPrefix) + if !ok { + return nil, fmt.Errorf("%q missing tpmseal: prefix", path) + } + s := &mockTPMSealProvider{path: path, Store: mem.Store{}} + buf, err := os.ReadFile(path) + if errors.Is(err, os.ErrNotExist) { + return s, s.flushState() + } + if err != nil { + return nil, err + } + var data struct { + Key string + Nonce string + Data map[ipn.StateKey][]byte + } + if err := json.Unmarshal(buf, &data); err != nil { + return nil, err + } + if data.Key == "" || data.Nonce == "" { + return nil, fmt.Errorf("%q missing key or nonce", path) + } + for k, v := range data.Data { + s.Store.WriteState(k, v) + } + return s, nil +} + +func (p *mockTPMSealProvider) WriteState(k ipn.StateKey, v []byte) error { + if err := p.Store.WriteState(k, v); err != nil { + return err + } + return p.flushState() +} + +func (p *mockTPMSealProvider) flushState() error { + data := map[string]any{ + "key": "foo", + "nonce": "bar", + "data": maps.Collect(p.Store.All()), + } + buf, err := json.Marshal(data) + if err != nil { + return err + } + return os.WriteFile(p.path, buf, 0600) +} diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 2921de203..eb3664385 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1013,17 +1013,13 @@ func TestEditPrefsHasNoKeys(t *testing.T) { } type testStateStorage struct { - mem mem.Store + mem.Store written atomic.Bool } -func (s *testStateStorage) ReadState(id ipn.StateKey) ([]byte, error) { - return s.mem.ReadState(id) -} - func (s *testStateStorage) WriteState(id ipn.StateKey, bs []byte) error { s.written.Store(true) - return s.mem.WriteState(id, bs) + return s.Store.WriteState(id, bs) } // awaitWrite clears the "I've seen writes" bit, in prep for a future diff --git a/ipn/store.go b/ipn/store.go index 550aa8cba..e176e4842 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "iter" "net" "strconv" ) @@ -83,6 +84,11 @@ type StateStore interface { // instead, which only writes if the value is different from what's // already in the store. WriteState(id StateKey, bs []byte) error + // All returns an iterator over all StateStore keys. Using ReadState or + // WriteState is not safe while iterating and can lead to a deadlock. + // The order of keys in the iterator is not specified and may change + // between runs. + All() iter.Seq2[StateKey, []byte] } // WriteState is a wrapper around store.WriteState that only writes if diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 40bbbf037..523d1657b 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "iter" "net/url" "regexp" "strings" @@ -253,3 +254,7 @@ func (s *awsStore) persistState() error { _, err = s.ssmClient.PutParameter(context.TODO(), in) return err } + +func (s *awsStore) All() iter.Seq2[ipn.StateKey, []byte] { + return s.memory.All() +} diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 14025bbb4..f6bedbf0b 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -7,6 +7,7 @@ package kubestore import ( "context" "fmt" + "iter" "log" "net" "os" @@ -428,3 +429,7 @@ func sanitizeKey[T ~string](k T) string { return '_' }, string(k)) } + +func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { + return s.memory.All() +} diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index 6f474ce99..6c22aefd5 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -7,6 +7,7 @@ package mem import ( "bytes" "encoding/json" + "iter" "sync" xmaps "golang.org/x/exp/maps" @@ -85,3 +86,16 @@ func (s *Store) ExportToJSON() ([]byte, error) { } return json.MarshalIndent(s.cache, "", " ") } + +func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 1a98574c9..43c796399 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -7,10 +7,14 @@ package store import ( "bytes" "encoding/json" + "errors" "fmt" + "iter" + "maps" "os" "path/filepath" "runtime" + "slices" "strings" "sync" @@ -20,6 +24,7 @@ import ( "tailscale.com/paths" "tailscale.com/types/logger" "tailscale.com/util/mak" + "tailscale.com/util/testenv" ) // Provider returns a StateStore for the provided path. @@ -32,6 +37,9 @@ func init() { var knownStores map[string]Provider +// TPMPrefix is the path prefix used for TPM-encrypted StateStore. +const TPMPrefix = "tpmseal:" + // New returns a StateStore based on the provided arg // and registered stores. // The arg is of the form "prefix:rest", where prefix was previously @@ -53,12 +61,23 @@ func New(logf logger.Logf, path string) (ipn.StateStore, error) { if strings.HasPrefix(path, prefix) { // We can't strip the prefix here as some NewStoreFunc (like arn:) // expect the prefix. + if prefix == TPMPrefix { + if runtime.GOOS == "windows" { + path = TPMPrefix + TryWindowsAppDataMigration(logf, strings.TrimPrefix(path, TPMPrefix)) + } + if err := maybeMigrateLocalStateFile(logf, path); err != nil { + return nil, fmt.Errorf("failed to migrate existing state file to TPM-sealed format: %w", err) + } + } return sf(logf, path) } } if runtime.GOOS == "windows" { path = TryWindowsAppDataMigration(logf, path) } + if err := maybeMigrateLocalStateFile(logf, path); err != nil { + return nil, fmt.Errorf("failed to migrate existing TPM-sealed state file to plaintext format: %w", err) + } return NewFileStore(logf, path) } @@ -77,6 +96,29 @@ func Register(prefix string, fn Provider) { mak.Set(&knownStores, prefix, fn) } +// RegisterForTest registers a prefix to be used for NewStore in tests. An +// existing registered prefix will be replaced. +func RegisterForTest(t testenv.TB, prefix string, fn Provider) { + if len(prefix) == 0 { + panic("prefix is empty") + } + old := maps.Clone(knownStores) + t.Cleanup(func() { knownStores = old }) + + mak.Set(&knownStores, prefix, fn) +} + +// HasKnownProviderPrefix reports whether path uses one of the registered +// Provider prefixes. +func HasKnownProviderPrefix(path string) bool { + for prefix := range knownStores { + if strings.HasPrefix(path, prefix) { + return true + } + } + return false +} + // TryWindowsAppDataMigration attempts to copy the Windows state file // from its old location to the new location. (Issue 2856) // @@ -179,3 +221,101 @@ func (s *FileStore) WriteState(id ipn.StateKey, bs []byte) error { } return atomicfile.WriteFile(s.path, bs, 0600) } + +func (s *FileStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} + +func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { + path, toTPM := strings.CutPrefix(path, TPMPrefix) + + // Extract JSON keys from the file on disk and guess what kind it is. + bs, err := os.ReadFile(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + var content map[string]any + if err := json.Unmarshal(bs, &content); err != nil { + return fmt.Errorf("failed to unmarshal %q: %w", path, err) + } + keys := slices.Sorted(maps.Keys(content)) + tpmKeys := []string{"key", "nonce", "data"} + slices.Sort(tpmKeys) + // TPM-sealed files will have exactly these keys. + existingFileSealed := slices.Equal(keys, tpmKeys) + // Plaintext files for nodes that registered at least once will have this + // key, plus other dynamic ones. + _, existingFilePlaintext := content["_machinekey"] + isTPM := existingFileSealed && !existingFilePlaintext + + if isTPM == toTPM { + // No migration needed. + return nil + } + + newTPMStore, ok := knownStores[TPMPrefix] + if !ok { + return errors.New("this build does not support TPM integration") + } + + // Open from (old format) and to (new format) stores for migration. The + // "to" store will be at tmpPath. + var from, to ipn.StateStore + tmpPath := path + ".tmp" + if toTPM { + // Migrate plaintext file to be TPM-sealed. + from, err = NewFileStore(logf, path) + if err != nil { + return fmt.Errorf("NewFileStore(%q): %w", path, err) + } + to, err = newTPMStore(logf, TPMPrefix+tmpPath) + if err != nil { + return fmt.Errorf("newTPMStore(%q): %w", tmpPath, err) + } + } else { + // Migrate TPM-selaed file to plaintext. + from, err = newTPMStore(logf, TPMPrefix+path) + if err != nil { + return fmt.Errorf("newTPMStore(%q): %w", path, err) + } + to, err = NewFileStore(logf, tmpPath) + if err != nil { + return fmt.Errorf("NewFileStore(%q): %w", tmpPath, err) + } + } + defer os.Remove(tmpPath) + + // Copy all the items. This is pretty inefficient, because both stores + // write the file to disk for each WriteState, but that's ok for a one-time + // migration. + for k, v := range from.All() { + if err := to.WriteState(k, v); err != nil { + return err + } + } + + // Finally, overwrite the state file with the new one we created at + // tmpPath. + if err := atomicfile.Rename(tmpPath, path); err != nil { + return err + } + + if toTPM { + logf("migrated %q from plaintext to TPM-sealed format", path) + } else { + logf("migrated %q from TPM-sealed to plaintext format", path) + } + return nil +} diff --git a/ipn/store_test.go b/ipn/store_test.go index fcc082d8a..4dd7321b9 100644 --- a/ipn/store_test.go +++ b/ipn/store_test.go @@ -5,6 +5,7 @@ package ipn import ( "bytes" + "iter" "sync" "testing" @@ -31,6 +32,19 @@ func (s *memStore) WriteState(k StateKey, v []byte) error { return nil } +func (s *memStore) All() iter.Seq2[StateKey, []byte] { + return func(yield func(StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.m { + if !yield(k, v) { + break + } + } + } +} + func TestWriteState(t *testing.T) { var ss StateStore = new(memStore) WriteState(ss, "foo", []byte("bar")) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4679609f3..23f3cc49b 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -908,6 +908,9 @@ type TPMInfo struct { SpecRevision int `json:",omitempty"` } +// Present reports whether a TPM device is present on this machine. +func (t *TPMInfo) Present() bool { return t != nil } + // ServiceName is the name of a service, of the form `svc:dns-label`. Services // represent some kind of application provided for users of the tailnet with a // MagicDNS name and possibly dedicated IP addresses. Currently (2024-01-21), diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index d64bfbbd9..987bb569a 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -569,11 +569,12 @@ type TestNode struct { env *TestEnv tailscaledParser *nodeOutputParser - dir string // temp dir for sock & state - configFile string // or empty for none - sockFile string - stateFile string - upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + dir string // temp dir for sock & state + configFile string // or empty for none + sockFile string + stateFile string + upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + encryptState bool mu sync.Mutex onLogLine []func([]byte) @@ -640,7 +641,7 @@ func (n *TestNode) diskPrefs() *ipn.Prefs { if _, err := os.ReadFile(n.stateFile); err != nil { t.Fatalf("reading prefs: %v", err) } - fs, err := store.NewFileStore(nil, n.stateFile) + fs, err := store.New(nil, n.stateFile) if err != nil { t.Fatalf("reading prefs, NewFileStore: %v", err) } @@ -822,6 +823,9 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { if n.configFile != "" { cmd.Args = append(cmd.Args, "--config="+n.configFile) } + if n.encryptState { + cmd.Args = append(cmd.Args, "--encrypt-state") + } cmd.Env = append(os.Environ(), "TS_DEBUG_PERMIT_HTTP_C2N=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 90cc7e443..7cb251f31 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -21,6 +21,7 @@ import ( "os/exec" "path/filepath" "regexp" + "runtime" "strconv" "sync/atomic" "testing" @@ -32,6 +33,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" @@ -1470,3 +1472,60 @@ func TestNetstackUDPLoopback(t *testing.T) { d1.MustCleanShutdown(t) } + +func TestEncryptStateMigration(t *testing.T) { + if !hostinfo.New().TPM.Present() { + t.Skip("TPM not available") + } + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + t.Skip("--encrypt-state for tailscaled state not supported on this platform") + } + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n := NewTestNode(t, env) + + runNode := func(t *testing.T, wantStateKeys []string) { + t.Helper() + + // Run the node. + d := n.StartDaemon() + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + + // Check the contents of the state file. + buf, err := os.ReadFile(n.stateFile) + if err != nil { + t.Fatalf("reading %q: %v", n.stateFile, err) + } + t.Logf("state file content:\n%s", buf) + var content map[string]any + if err := json.Unmarshal(buf, &content); err != nil { + t.Fatalf("parsing %q: %v", n.stateFile, err) + } + for _, k := range wantStateKeys { + if _, ok := content[k]; !ok { + t.Errorf("state file is missing key %q", k) + } + } + + // Stop the node. + d.MustCleanShutdown(t) + } + + wantPlaintextStateKeys := []string{"_machinekey", "_current-profile", "_profiles"} + wantEncryptedStateKeys := []string{"key", "nonce", "data"} + t.Run("regular-state", func(t *testing.T) { + n.encryptState = false + runNode(t, wantPlaintextStateKeys) + }) + t.Run("migrate-to-encrypted", func(t *testing.T) { + n.encryptState = true + runNode(t, wantEncryptedStateKeys) + }) + t.Run("migrate-to-plaintext", func(t *testing.T) { + n.encryptState = false + runNode(t, wantPlaintextStateKeys) + }) +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 321ba2566..a73c6ebf6 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 321ba2566..a73c6ebf6 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 321ba2566..a73c6ebf6 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 321ba2566..a73c6ebf6 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ed00d0004..b19a3e7fe 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -120,6 +120,10 @@ const ( LogSCMInteractions Key = "LogSCMInteractions" FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" + // EncryptState is a boolean setting that specifies whether to encrypt the + // tailscaled state file with a TPM device. + EncryptState Key = "EncryptState" + // PostureChecking indicates if posture checking is enabled and the client shall gather // posture data. // Key is a string value that specifies an option: "always", "never", "user-decides". @@ -186,6 +190,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(EncryptState, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(Hostname, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), From b2bf7e988e110e2a7245ac67792f666e1cd114f1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 26 Jun 2025 18:39:47 -0700 Subject: [PATCH 1007/1708] wgengine/magicsock: add envknob to toggle UDP relay feature (#16396) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/debugknobs.go | 6 ++++++ wgengine/magicsock/debugknobs_stubs.go | 1 + wgengine/magicsock/magicsock.go | 6 +++--- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index f8fd9f040..055895388 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,6 +62,12 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") + // debugAssumeUDPRelayCapable forces magicsock to assume that all peers are + // UDP relay capable clients and servers. This will eventually be replaced + // by a [tailcfg.CapabilityVersion] comparison. It enables early testing of + // the UDP relay feature before we have established related + // [tailcfg.CapabilityVersion]'s. + debugAssumeUDPRelayCapable = envknob.RegisterBool("TS_DEBUG_ASSUME_UDP_RELAY_CAPABLE") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 336d7baa1..3d23b1f8e 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -31,3 +31,4 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } +func debugAssumeUDPRelayCapable() bool { return false } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d7b522699..e76d0054f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2592,12 +2592,12 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { // TODO(jwhited): implement once capVer is bumped - return version == math.MinInt32 + return version == math.MinInt32 || debugAssumeUDPRelayCapable() } func capVerIsRelayServerCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped - return version == math.MinInt32 + // TODO(jwhited): implement once capVer is bumped & update Test_peerAPIIfCandidateRelayServer + return version == math.MinInt32 || debugAssumeUDPRelayCapable() } // onFilterUpdate is called when a [FilterUpdate] is received over the From b32a01b2dc44986ce83d2dd091f53c31e9a25391 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 26 Jun 2025 19:30:14 -0700 Subject: [PATCH 1008/1708] disco,net/udprelay,wgengine/magicsock: support relay re-binding (#16388) Relay handshakes may now occur multiple times over the lifetime of a relay server endpoint. Handshake messages now include a handshake generation, which is client specified, as a means to trigger safe challenge reset server-side. Relay servers continue to enforce challenge values as single use. They will only send a given value once, in reply to the first arriving bind message for a handshake generation. VNI has been added to the handshake messages, and we expect the outer Geneve header value to match the sealed value upon reception. Remote peer disco pub key is now also included in handshake messages, and it must match the receiver's expectation for the remote, participating party. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- disco/disco.go | 110 ++++++++++++++++------ disco/disco_test.go | 27 ++++-- net/udprelay/server.go | 146 +++++++++++++++-------------- net/udprelay/server_test.go | 80 +++++++++++++--- wgengine/magicsock/relaymanager.go | 46 +++++++-- 5 files changed, 276 insertions(+), 133 deletions(-) diff --git a/disco/disco.go b/disco/disco.go index 0854eb4c0..d4623c119 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -321,79 +321,131 @@ const ( BindUDPRelayHandshakeStateAnswerReceived ) -// bindUDPRelayEndpointLen is the length of a marshalled BindUDPRelayEndpoint -// message, without the message header. -const bindUDPRelayEndpointLen = BindUDPRelayEndpointChallengeLen +// bindUDPRelayEndpointCommonLen is the length of a marshalled +// [BindUDPRelayEndpointCommon], without the message header. +const bindUDPRelayEndpointCommonLen = 72 + +// BindUDPRelayChallengeLen is the length of the Challenge field carried in +// [BindUDPRelayEndpointChallenge] & [BindUDPRelayEndpointAnswer] messages. +const BindUDPRelayChallengeLen = 32 + +// BindUDPRelayEndpointCommon contains fields that are common across all 3 +// UDP relay handshake message types. All 4 field values are expected to be +// consistent for the lifetime of a handshake besides Challenge, which is +// irrelevant in a [BindUDPRelayEndpoint] message. +type BindUDPRelayEndpointCommon struct { + // VNI is the Geneve header Virtual Network Identifier field value, which + // must match this disco-sealed value upon reception. If they are + // non-matching it indicates the cleartext Geneve header was tampered with + // and/or mangled. + VNI uint32 + // Generation represents the handshake generation. Clients must set a new, + // nonzero value at the start of every handshake. + Generation uint32 + // RemoteKey is the disco key of the remote peer participating over this + // relay endpoint. + RemoteKey key.DiscoPublic + // Challenge is set by the server in a [BindUDPRelayEndpointChallenge] + // message, and expected to be echoed back by the client in a + // [BindUDPRelayEndpointAnswer] message. Its value is irrelevant in a + // [BindUDPRelayEndpoint] message, where it simply serves a padding purpose + // ensuring all handshake messages are equal in size. + Challenge [BindUDPRelayChallengeLen]byte +} + +// encode encodes m in b. b must be at least bindUDPRelayEndpointCommonLen bytes +// long. +func (m *BindUDPRelayEndpointCommon) encode(b []byte) { + binary.BigEndian.PutUint32(b, m.VNI) + b = b[4:] + binary.BigEndian.PutUint32(b, m.Generation) + b = b[4:] + m.RemoteKey.AppendTo(b[:0]) + b = b[key.DiscoPublicRawLen:] + copy(b, m.Challenge[:]) +} + +// decode decodes m from b. +func (m *BindUDPRelayEndpointCommon) decode(b []byte) error { + if len(b) < bindUDPRelayEndpointCommonLen { + return errShort + } + m.VNI = binary.BigEndian.Uint32(b) + b = b[4:] + m.Generation = binary.BigEndian.Uint32(b) + b = b[4:] + m.RemoteKey = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + copy(m.Challenge[:], b[:BindUDPRelayChallengeLen]) + return nil +} // BindUDPRelayEndpoint is the first messaged transmitted from UDP relay client -// towards UDP relay server as part of the 3-way bind handshake. It is padded to -// match the length of BindUDPRelayEndpointChallenge. This message type is -// currently considered experimental and is not yet tied to a +// towards UDP relay server as part of the 3-way bind handshake. This message +// type is currently considered experimental and is not yet tied to a // tailcfg.CapabilityVersion. type BindUDPRelayEndpoint struct { + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpoint) AppendMarshal(b []byte) []byte { - ret, _ := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointLen) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpoint(ver uint8, p []byte) (m *BindUDPRelayEndpoint, err error) { m = new(BindUDPRelayEndpoint) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } -// BindUDPRelayEndpointChallengeLen is the length of a marshalled -// BindUDPRelayEndpointChallenge message, without the message header. -const BindUDPRelayEndpointChallengeLen = 32 - // BindUDPRelayEndpointChallenge is transmitted from UDP relay server towards // UDP relay client in response to a BindUDPRelayEndpoint message as part of the // 3-way bind handshake. This message type is currently considered experimental // and is not yet tied to a tailcfg.CapabilityVersion. type BindUDPRelayEndpointChallenge struct { - Challenge [BindUDPRelayEndpointChallengeLen]byte + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpointChallenge) AppendMarshal(b []byte) []byte { - ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, BindUDPRelayEndpointChallengeLen) - copy(d, m.Challenge[:]) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpointChallenge(ver uint8, p []byte) (m *BindUDPRelayEndpointChallenge, err error) { - if len(p) < BindUDPRelayEndpointChallengeLen { - return nil, errShort - } m = new(BindUDPRelayEndpointChallenge) - copy(m.Challenge[:], p[:]) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } -// bindUDPRelayEndpointAnswerLen is the length of a marshalled -// BindUDPRelayEndpointAnswer message, without the message header. -const bindUDPRelayEndpointAnswerLen = BindUDPRelayEndpointChallengeLen - // BindUDPRelayEndpointAnswer is transmitted from UDP relay client to UDP relay // server in response to a BindUDPRelayEndpointChallenge message. This message // type is currently considered experimental and is not yet tied to a // tailcfg.CapabilityVersion. type BindUDPRelayEndpointAnswer struct { - Answer [bindUDPRelayEndpointAnswerLen]byte + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpointAnswer) AppendMarshal(b []byte) []byte { - ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointAnswerLen) - copy(d, m.Answer[:]) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpointAnswer, err error) { - if len(p) < bindUDPRelayEndpointAnswerLen { - return nil, errShort - } m = new(BindUDPRelayEndpointAnswer) - copy(m.Answer[:], p[:]) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } diff --git a/disco/disco_test.go b/disco/disco_test.go index f2a29a744..9fb71ff83 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -16,6 +16,15 @@ import ( ) func TestMarshalAndParse(t *testing.T) { + relayHandshakeCommon := BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 2, + RemoteKey: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + Challenge: [BindUDPRelayChallengeLen]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + } + tests := []struct { name string want string @@ -86,26 +95,24 @@ func TestMarshalAndParse(t *testing.T) { }, { name: "bind_udp_relay_endpoint", - m: &BindUDPRelayEndpoint{}, - want: "04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + m: &BindUDPRelayEndpoint{ + relayHandshakeCommon, + }, + want: "04 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "bind_udp_relay_endpoint_challenge", m: &BindUDPRelayEndpointChallenge{ - Challenge: [BindUDPRelayEndpointChallengeLen]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, + relayHandshakeCommon, }, - want: "05 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + want: "05 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "bind_udp_relay_endpoint_answer", m: &BindUDPRelayEndpointAnswer{ - Answer: [bindUDPRelayEndpointAnswerLen]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, + relayHandshakeCommon, }, - want: "06 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + want: "06 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "call_me_maybe_via", diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 8b9e95fb1..e32f8917c 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -96,12 +96,13 @@ type serverEndpoint struct { // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys pairOfDiscoPubKeys - discoSharedSecrets [2]key.DiscoShared - handshakeState [2]disco.BindUDPRelayHandshakeState - addrPorts [2]netip.AddrPort - lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time - challenge [2][disco.BindUDPRelayEndpointChallengeLen]byte + discoPubKeys pairOfDiscoPubKeys + discoSharedSecrets [2]key.DiscoShared + handshakeGeneration [2]uint32 // or zero if a handshake has never started for that relay leg + handshakeAddrPorts [2]netip.AddrPort // or zero value if a handshake has never started for that relay leg + boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg + lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time + challenge [2][disco.BindUDPRelayChallengeLen]byte lamportID uint64 vni uint32 @@ -112,69 +113,77 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if senderIndex != 0 && senderIndex != 1 { return } - handshakeState := e.handshakeState[senderIndex] - if handshakeState == disco.BindUDPRelayHandshakeStateAnswerReceived { - // this sender is already bound - return + + otherSender := 0 + if senderIndex == 0 { + otherSender = 1 } + + validateVNIAndRemoteKey := func(common disco.BindUDPRelayEndpointCommon) error { + if common.VNI != e.vni { + return errors.New("mismatching VNI") + } + if common.RemoteKey.Compare(e.discoPubKeys[otherSender]) != 0 { + return errors.New("mismatching RemoteKey") + } + return nil + } + switch discoMsg := discoMsg.(type) { case *disco.BindUDPRelayEndpoint: - switch handshakeState { - case disco.BindUDPRelayHandshakeStateInit: - // set sender addr - e.addrPorts[senderIndex] = from - fallthrough - case disco.BindUDPRelayHandshakeStateChallengeSent: - if from != e.addrPorts[senderIndex] { - // this is a later arriving bind from a different source, or - // a retransmit and the sender's source has changed, discard - return - } - m := new(disco.BindUDPRelayEndpointChallenge) - copy(m.Challenge[:], e.challenge[senderIndex][:]) - reply := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} - err := gh.Encode(reply) - if err != nil { - return - } - reply = append(reply, disco.Magic...) - reply = serverDisco.AppendTo(reply) - box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) - reply = append(reply, box...) - uw.WriteMsgUDPAddrPort(reply, nil, from) - // set new state - e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateChallengeSent + err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop return - default: - // disco.BindUDPRelayEndpoint is unexpected in all other handshake states + } + if discoMsg.Generation == 0 { + // Generation must be nonzero, silently drop + return + } + if e.handshakeGeneration[senderIndex] == discoMsg.Generation { + // we've seen this generation before, silently drop + return + } + e.handshakeGeneration[senderIndex] = discoMsg.Generation + e.handshakeAddrPorts[senderIndex] = from + m := new(disco.BindUDPRelayEndpointChallenge) + m.VNI = e.vni + m.Generation = discoMsg.Generation + m.RemoteKey = e.discoPubKeys[otherSender] + rand.Read(e.challenge[senderIndex][:]) + copy(m.Challenge[:], e.challenge[senderIndex][:]) + reply := make([]byte, packet.GeneveFixedHeaderLength, 512) + gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} + err = gh.Encode(reply) + if err != nil { return } + reply = append(reply, disco.Magic...) + reply = serverDisco.AppendTo(reply) + box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) + reply = append(reply, box...) + uw.WriteMsgUDPAddrPort(reply, nil, from) + return case *disco.BindUDPRelayEndpointAnswer: - switch handshakeState { - case disco.BindUDPRelayHandshakeStateChallengeSent: - if from != e.addrPorts[senderIndex] { - // sender source has changed - return - } - if !bytes.Equal(discoMsg.Answer[:], e.challenge[senderIndex][:]) { - // bad answer - return - } - // sender is now bound - // TODO: Consider installing a fast path via netfilter or similar to - // relay (NAT) data packets for this serverEndpoint. - e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateAnswerReceived - // record last seen as bound time - e.lastSeen[senderIndex] = time.Now() + err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop return - default: - // disco.BindUDPRelayEndpointAnswer is unexpected in all other handshake - // states, or we've already handled it + } + generation := e.handshakeGeneration[senderIndex] + if generation == 0 || // we have no active handshake + generation != discoMsg.Generation || // mismatching generation for the active handshake + e.handshakeAddrPorts[senderIndex] != from || // mismatching source for the active handshake + !bytes.Equal(e.challenge[senderIndex][:], discoMsg.Challenge[:]) { // mismatching answer for the active handshake + // silently drop return } + // Handshake complete. Update the binding for this sender. + e.boundAddrPorts[senderIndex] = from + e.lastSeen[senderIndex] = time.Now() // record last seen as bound time + return default: - // unexpected Disco message type + // unexpected message types, silently drop return } } @@ -225,12 +234,12 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade } var to netip.AddrPort switch { - case from == e.addrPorts[0]: + case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() - to = e.addrPorts[1] - case from == e.addrPorts[1]: + to = e.boundAddrPorts[1] + case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() - to = e.addrPorts[0] + to = e.boundAddrPorts[0] default: // unrecognized source return @@ -240,11 +249,6 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade return } - if e.isBound() { - // control packet, but serverEndpoint is already bound - return - } - if gh.Protocol != packet.GeneveProtocolDisco { // control packet, but not Disco return @@ -267,11 +271,11 @@ func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifet return false } -// isBound returns true if both clients have completed their 3-way handshake, +// isBound returns true if both clients have completed a 3-way handshake, // otherwise false. func (e *serverEndpoint) isBound() bool { - return e.handshakeState[0] == disco.BindUDPRelayHandshakeStateAnswerReceived && - e.handshakeState[1] == disco.BindUDPRelayHandshakeStateAnswerReceived + return e.boundAddrPorts[0].IsValid() && + e.boundAddrPorts[1].IsValid() } // NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet @@ -591,8 +595,6 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys[0]) e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys[1]) e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] - rand.Read(e.challenge[0][:]) - rand.Read(e.challenge[1][:]) s.byDisco[pair] = e s.byVNI[e.vni] = e diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index a4e5ca451..3fcb9b8b1 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -19,23 +19,27 @@ import ( ) type testClient struct { - vni uint32 - local key.DiscoPrivate - server key.DiscoPublic - uc *net.UDPConn + vni uint32 + handshakeGeneration uint32 + local key.DiscoPrivate + remote key.DiscoPublic + server key.DiscoPublic + uc *net.UDPConn } -func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, server key.DiscoPublic) *testClient { +func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, remote, server key.DiscoPublic) *testClient { rAddr := &net.UDPAddr{IP: serverEndpoint.Addr().AsSlice(), Port: int(serverEndpoint.Port())} uc, err := net.DialUDP("udp4", nil, rAddr) if err != nil { t.Fatal(err) } return &testClient{ - vni: vni, - local: local, - server: server, - uc: uc, + vni: vni, + handshakeGeneration: 1, + local: local, + remote: remote, + server: server, + uc: uc, } } @@ -137,13 +141,35 @@ func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { } func (c *testClient) handshake(t *testing.T) { - c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{}) + generation := c.handshakeGeneration + c.handshakeGeneration++ + common := disco.BindUDPRelayEndpointCommon{ + VNI: c.vni, + Generation: generation, + RemoteKey: c.remote, + } + c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{ + BindUDPRelayEndpointCommon: common, + }) msg := c.readControlDiscoMsg(t) challenge, ok := msg.(*disco.BindUDPRelayEndpointChallenge) if !ok { - t.Fatal("unexepcted disco message type") + t.Fatal("unexpected disco message type") + } + if challenge.Generation != common.Generation { + t.Fatalf("rx'd challenge.Generation (%d) != %d", challenge.Generation, common.Generation) + } + if challenge.VNI != common.VNI { + t.Fatalf("rx'd challenge.VNI (%d) != %d", challenge.VNI, common.VNI) + } + if challenge.RemoteKey != common.RemoteKey { + t.Fatalf("rx'd challenge.RemoteKey (%v) != %v", challenge.RemoteKey, common.RemoteKey) } - c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpointAnswer{Answer: challenge.Challenge}) + answer := &disco.BindUDPRelayEndpointAnswer{ + BindUDPRelayEndpointCommon: common, + } + answer.Challenge = challenge.Challenge + c.writeControlDiscoMsg(t, answer) } func (c *testClient) close() { @@ -179,9 +205,9 @@ func TestServer(t *testing.T) { if len(endpoint.AddrPorts) != 1 { t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, endpoint.ServerDisco) + tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, endpoint.ServerDisco) + tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) defer tcB.close() tcA.handshake(t) @@ -209,4 +235,30 @@ func TestServer(t *testing.T) { if !bytes.Equal(txToA, rxFromB) { t.Fatal("unexpected msg B->A") } + + tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 + defer tcAOnNewPort.close() + + // Handshake client A on a new source IP:port, verify we receive packets on the new binding + tcAOnNewPort.handshake(t) + txToAOnNewPort := []byte{7, 8, 9} + tcB.writeDataPkt(t, txToAOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(txToAOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") + } + + tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 + defer tcBOnNewPort.close() + + // Handshake client B on a new source IP:port, verify we receive packets on the new binding + tcBOnNewPort.handshake(t) + txToBOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) + rxFromA = tcBOnNewPort.readDataPkt(t) + if !bytes.Equal(txToBOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") + } } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 7b378838a..6418a4364 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -45,6 +45,7 @@ type relayManager struct { handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork + handshakeGeneration uint32 // =================================================================== // The following chan fields serve event inputs to a single goroutine, @@ -590,7 +591,12 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay go r.sendCallMeMaybeVia(work.ep, work.se) } - go r.handshakeServerEndpoint(work) + r.handshakeGeneration++ + if r.handshakeGeneration == 0 { // generation must be nonzero + r.handshakeGeneration++ + } + + go r.handshakeServerEndpoint(work, r.handshakeGeneration) } // sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be @@ -616,7 +622,7 @@ func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoi ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) } -func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { +func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generation uint32) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -627,8 +633,21 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { work.cancel() }() + epDisco := work.ep.disco.Load() + if epDisco == nil { + return + } + + common := disco.BindUDPRelayEndpointCommon{ + VNI: work.se.VNI, + Generation: generation, + RemoteKey: epDisco.key, + } + sentBindAny := false - bind := &disco.BindUDPRelayEndpoint{} + bind := &disco.BindUDPRelayEndpoint{ + BindUDPRelayEndpointCommon: common, + } vni := virtualNetworkID{} vni.set(work.se.VNI) for _, addrPort := range work.se.AddrPorts { @@ -661,10 +680,6 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { if len(sentPingAt) == limitPings { return } - epDisco := work.ep.disco.Load() - if epDisco == nil { - return - } txid := stun.NewTxID() sentPingAt[txid] = time.Now() ping := &disco.Ping{ @@ -673,13 +688,24 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { } go func() { if withAnswer != nil { - answer := &disco.BindUDPRelayEndpointAnswer{Answer: *withAnswer} + answer := &disco.BindUDPRelayEndpointAnswer{BindUDPRelayEndpointCommon: common} + answer.Challenge = *withAnswer work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) } work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) }() } + validateVNIAndRemoteKey := func(common disco.BindUDPRelayEndpointCommon) error { + if common.VNI != work.se.VNI { + return errors.New("mismatching VNI") + } + if common.RemoteKey.Compare(epDisco.key) != 0 { + return errors.New("mismatching RemoteKey") + } + return nil + } + // This for{select{}} is responsible for handshaking and tx'ing ping/pong // when the handshake is complete. for { @@ -689,6 +715,10 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { case msgEvent := <-work.rxDiscoMsgCh: switch msg := msgEvent.msg.(type) { case *disco.BindUDPRelayEndpointChallenge: + err := validateVNIAndRemoteKey(msg.BindUDPRelayEndpointCommon) + if err != nil { + continue + } if handshakeState >= disco.BindUDPRelayHandshakeStateAnswerSent { continue } From 4a7b8afabfe71cde16445b416e7c93274ff657b8 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 20 Jun 2025 13:21:31 +0200 Subject: [PATCH 1009/1708] cmd/tailscale: add tlpub: prefix to lock log output Updates tailscale/corp#23258 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/network-lock.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 871a931b5..9ab2b11b0 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -623,7 +623,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er printKey := func(key *tka.Key, prefix string) { fmt.Fprintf(&stanza, "%sType: %s\n", prefix, key.Kind.String()) if keyID, err := key.ID(); err == nil { - fmt.Fprintf(&stanza, "%sKeyID: %x\n", prefix, keyID) + fmt.Fprintf(&stanza, "%sKeyID: tlpub:%x\n", prefix, keyID) } else { // Older versions of the client shouldn't explode when they encounter an // unknown key type. @@ -645,10 +645,10 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er case tka.AUMAddKey.String(): printKey(aum.Key, "") case tka.AUMRemoveKey.String(): - fmt.Fprintf(&stanza, "KeyID: %x\n", aum.KeyID) + fmt.Fprintf(&stanza, "KeyID: tlpub:%x\n", aum.KeyID) case tka.AUMUpdateKey.String(): - fmt.Fprintf(&stanza, "KeyID: %x\n", aum.KeyID) + fmt.Fprintf(&stanza, "KeyID: tlpub:%x\n", aum.KeyID) if aum.Votes != nil { fmt.Fprintf(&stanza, "Votes: %d\n", aum.Votes) } From df786be14d40a9aadd82c5900bcf4d79b3e6af4f Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 27 Jun 2025 11:54:01 +0200 Subject: [PATCH 1010/1708] cmd/tailscale: use text format for TKA head Updates tailscale/corp#23258 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/network-lock.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 9ab2b11b0..d19909576 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -639,7 +639,11 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er return "", fmt.Errorf("decoding: %w", err) } - fmt.Fprintf(&stanza, "%supdate %x (%s)%s\n", terminalYellow, update.Hash, update.Change, terminalClear) + tkaHead, err := aum.Hash().MarshalText() + if err != nil { + return "", fmt.Errorf("decoding AUM hash: %w", err) + } + fmt.Fprintf(&stanza, "%supdate %s (%s)%s\n", terminalYellow, string(tkaHead), update.Change, terminalClear) switch update.Change { case tka.AUMAddKey.String(): From 53f67c43961a322048bc2c858181b331d2f35695 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 27 Jun 2025 10:03:56 -0400 Subject: [PATCH 1011/1708] util/eventbus: fix docstrings (#16401) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #15160 Signed-off-by: Claus Lensbøl --- util/eventbus/bus.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 45d12da2f..e5bf7329a 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -40,8 +40,8 @@ type Bus struct { clients set.Set[*Client] } -// New returns a new bus. Use [PublisherOf] to make event publishers, -// and [Bus.Queue] and [Subscribe] to make event subscribers. +// New returns a new bus. Use [Publish] to make event publishers, +// and [Subscribe] and [SubscribeFunc] to make event subscribers. func New() *Bus { ret := &Bus{ write: make(chan PublishedEvent), From f81baa2d56795267df835f770d0779d414aed283 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 27 Jun 2025 17:12:14 +0100 Subject: [PATCH 1012/1708] cmd/k8s-operator, k8s-operator: support Static Endpoints on ProxyGroups (#16115) updates: #14674 Signed-off-by: chaosinthecrd --- .../deploy/chart/templates/operator-rbac.yaml | 3 + .../crds/tailscale.com_proxyclasses.yaml | 45 + .../crds/tailscale.com_proxygroups.yaml | 5 + .../deploy/manifests/operator.yaml | 58 ++ cmd/k8s-operator/nodeport-service-ports.go | 203 +++++ .../nodeport-services-ports_test.go | 277 +++++++ cmd/k8s-operator/operator.go | 88 +- cmd/k8s-operator/proxyclass.go | 33 +- cmd/k8s-operator/proxyclass_test.go | 118 ++- cmd/k8s-operator/proxygroup.go | 389 ++++++++- cmd/k8s-operator/proxygroup_specs.go | 45 +- cmd/k8s-operator/proxygroup_test.go | 771 +++++++++++++++++- k8s-operator/api.md | 55 ++ .../apis/v1alpha1/types_proxyclass.go | 122 +++ .../apis/v1alpha1/types_proxygroup.go | 4 + .../apis/v1alpha1/zz_generated.deepcopy.go | 91 +++ 16 files changed, 2244 insertions(+), 63 deletions(-) create mode 100644 cmd/k8s-operator/nodeport-service-ports.go create mode 100644 cmd/k8s-operator/nodeport-services-ports_test.go diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 00d8318ac..5eb920a6f 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -16,6 +16,9 @@ kind: ClusterRole metadata: name: tailscale-operator rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events", "services", "services/status"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 154123475..fcf1b27aa 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -2203,6 +2203,51 @@ spec: won't make it *more* imbalanced. It's a required field. type: string + staticEndpoints: + description: |- + Configuration for 'static endpoints' on proxies in order to facilitate + direct connections from other devices on the tailnet. + See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + type: object + required: + - nodePort + properties: + nodePort: + description: The configuration for static endpoints using NodePort Services. + type: object + required: + - ports + properties: + ports: + description: |- + The port ranges from which the operator will select NodePorts for the Services. + You must ensure that firewall rules allow UDP ingress traffic for these ports + to the node's external IPs. + The ports must be in the range of service node ports for the cluster (default `30000-32767`). + See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + type: array + minItems: 1 + items: + type: object + required: + - port + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be used. This field cannot be defined if the port field is not defined. + The endPort must be either unset, or equal or greater than port. + type: integer + port: + description: port represents a port selected to be used. This is a required field. + type: integer + selector: + description: |- + A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + by the ProxyGroup as Static Endpoints. + type: object + additionalProperties: + type: string tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 4b9149e23..f695e989d 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -196,6 +196,11 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + staticEndpoints: + description: StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + type: array + items: + type: string tailnetIPs: description: |- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 1d910cf92..fa18a5deb 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2679,6 +2679,51 @@ spec: type: array type: object type: object + staticEndpoints: + description: |- + Configuration for 'static endpoints' on proxies in order to facilitate + direct connections from other devices on the tailnet. + See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + properties: + nodePort: + description: The configuration for static endpoints using NodePort Services. + properties: + ports: + description: |- + The port ranges from which the operator will select NodePorts for the Services. + You must ensure that firewall rules allow UDP ingress traffic for these ports + to the node's external IPs. + The ports must be in the range of service node ports for the cluster (default `30000-32767`). + See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + items: + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be used. This field cannot be defined if the port field is not defined. + The endPort must be either unset, or equal or greater than port. + type: integer + port: + description: port represents a port selected to be used. This is a required field. + type: integer + required: + - port + type: object + minItems: 1 + type: array + selector: + additionalProperties: + type: string + description: |- + A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + by the ProxyGroup as Static Endpoints. + type: object + required: + - ports + type: object + required: + - nodePort + type: object tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific @@ -2976,6 +3021,11 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + staticEndpoints: + description: StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + items: + type: string + type: array tailnetIPs: description: |- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) @@ -4791,6 +4841,14 @@ kind: ClusterRole metadata: name: tailscale-operator rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - "" resources: diff --git a/cmd/k8s-operator/nodeport-service-ports.go b/cmd/k8s-operator/nodeport-service-ports.go new file mode 100644 index 000000000..a9504e3e9 --- /dev/null +++ b/cmd/k8s-operator/nodeport-service-ports.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "context" + "fmt" + "math/rand/v2" + "regexp" + "sort" + "strconv" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + k8soperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" +) + +const ( + tailscaledPortMax = 65535 + tailscaledPortMin = 1024 + testSvcName = "test-node-port-range" + + invalidSvcNodePort = 777777 +) + +// getServicesNodePortRange is a hacky function that attempts to determine Service NodePort range by +// creating a deliberately invalid Service with a NodePort that is too large and parsing the returned +// validation error. Returns nil if unable to determine port range. +// https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport +func getServicesNodePortRange(ctx context.Context, c client.Client, tsNamespace string, logger *zap.SugaredLogger) *tsapi.PortRange { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSvcName, + Namespace: tsNamespace, + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: testSvcName, + Port: 8080, + TargetPort: intstr.FromInt32(8080), + Protocol: corev1.ProtocolUDP, + NodePort: invalidSvcNodePort, + }, + }, + }, + } + + // NOTE(ChaosInTheCRD): ideally this would be a server side dry-run but could not get it working + err := c.Create(ctx, svc) + if err == nil { + return nil + } + + if validPorts := getServicesNodePortRangeFromErr(err.Error()); validPorts != "" { + pr, err := parseServicesNodePortRange(validPorts) + if err != nil { + logger.Debugf("failed to parse NodePort range set for Kubernetes Cluster: %w", err) + return nil + } + + return pr + } + + return nil +} + +func getServicesNodePortRangeFromErr(err string) string { + reg := regexp.MustCompile(`\d{1,5}-\d{1,5}`) + matches := reg.FindAllString(err, -1) + if len(matches) != 1 { + return "" + } + + return matches[0] +} + +// parseServicesNodePortRange converts the `ValidPorts` string field in the Kubernetes PortAllocator error and converts it to +// PortRange +func parseServicesNodePortRange(p string) (*tsapi.PortRange, error) { + parts := strings.Split(p, "-") + s, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse string as uint16: %w", err) + } + + var e uint64 + switch len(parts) { + case 1: + e = uint64(s) + case 2: + e, err = strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse string as uint16: %w", err) + } + default: + return nil, fmt.Errorf("failed to parse port range %q", p) + } + + portRange := &tsapi.PortRange{Port: uint16(s), EndPort: uint16(e)} + if !portRange.IsValid() { + return nil, fmt.Errorf("port range %q is not valid", portRange.String()) + } + + return portRange, nil +} + +// validateNodePortRanges checks that the port range specified is valid. It also ensures that the specified ranges +// lie within the NodePort Service port range specified for the Kubernetes API Server. +func validateNodePortRanges(ctx context.Context, c client.Client, kubeRange *tsapi.PortRange, pc *tsapi.ProxyClass) error { + if pc.Spec.StaticEndpoints == nil { + return nil + } + + portRanges := pc.Spec.StaticEndpoints.NodePort.Ports + + if kubeRange != nil { + for _, pr := range portRanges { + if !kubeRange.Contains(pr.Port) || (pr.EndPort != 0 && !kubeRange.Contains(pr.EndPort)) { + return fmt.Errorf("range %q is not within Cluster configured range %q", pr.String(), kubeRange.String()) + } + } + } + + for _, r := range portRanges { + if !r.IsValid() { + return fmt.Errorf("port range %q is invalid", r.String()) + } + } + + // TODO(ChaosInTheCRD): if a ProxyClass that made another invalid (due to port range clash) is deleted, + // the invalid ProxyClass doesn't get reconciled on, and therefore will not go valid. We should fix this. + proxyClassRanges, err := getPortsForProxyClasses(ctx, c) + if err != nil { + return fmt.Errorf("failed to get port ranges for ProxyClasses: %w", err) + } + + for _, r := range portRanges { + for pcName, pcr := range proxyClassRanges { + if pcName == pc.Name { + continue + } + if pcr.ClashesWith(r) { + return fmt.Errorf("port ranges for ProxyClass %q clash with existing ProxyClass %q", pc.Name, pcName) + } + } + } + + if len(portRanges) == 1 { + return nil + } + + sort.Slice(portRanges, func(i, j int) bool { + return portRanges[i].Port < portRanges[j].Port + }) + + for i := 1; i < len(portRanges); i++ { + prev := portRanges[i-1] + curr := portRanges[i] + if curr.Port <= prev.Port || curr.Port <= prev.EndPort { + return fmt.Errorf("overlapping ranges: %q and %q", prev.String(), curr.String()) + } + } + + return nil +} + +// getPortsForProxyClasses gets the port ranges for all the other existing ProxyClasses +func getPortsForProxyClasses(ctx context.Context, c client.Client) (map[string]tsapi.PortRanges, error) { + pcs := new(tsapi.ProxyClassList) + + err := c.List(ctx, pcs) + if err != nil { + return nil, fmt.Errorf("failed to list ProxyClasses: %w", err) + } + + portRanges := make(map[string]tsapi.PortRanges) + for _, i := range pcs.Items { + if !k8soperator.ProxyClassIsReady(&i) { + continue + } + if se := i.Spec.StaticEndpoints; se != nil && se.NodePort != nil { + portRanges[i.Name] = se.NodePort.Ports + } + } + + return portRanges, nil +} + +func getRandomPort() uint16 { + return uint16(rand.IntN(tailscaledPortMax-tailscaledPortMin+1) + tailscaledPortMin) +} diff --git a/cmd/k8s-operator/nodeport-services-ports_test.go b/cmd/k8s-operator/nodeport-services-ports_test.go new file mode 100644 index 000000000..9418bb844 --- /dev/null +++ b/cmd/k8s-operator/nodeport-services-ports_test.go @@ -0,0 +1,277 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" +) + +func TestGetServicesNodePortRangeFromErr(t *testing.T) { + tests := []struct { + name string + errStr string + want string + }{ + { + name: "valid_error_string", + errStr: "NodePort 777777 is not in the allowed range 30000-32767", + want: "30000-32767", + }, + { + name: "error_string_with_different_message", + errStr: "some other error without a port range", + want: "", + }, + { + name: "error_string_with_multiple_port_ranges", + errStr: "range 1000-2000 and another range 3000-4000", + want: "", + }, + { + name: "empty_error_string", + errStr: "", + want: "", + }, + { + name: "error_string_with_range_at_start", + errStr: "30000-32767 is the range", + want: "30000-32767", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getServicesNodePortRangeFromErr(tt.errStr); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseServicesNodePortRange(t *testing.T) { + tests := []struct { + name string + p string + want *tsapi.PortRange + wantErr bool + }{ + { + name: "valid_range", + p: "30000-32767", + want: &tsapi.PortRange{Port: 30000, EndPort: 32767}, + wantErr: false, + }, + { + name: "single_port_range", + p: "30000", + want: &tsapi.PortRange{Port: 30000, EndPort: 30000}, + wantErr: false, + }, + { + name: "invalid_format_non_numeric_end", + p: "30000-abc", + want: nil, + wantErr: true, + }, + { + name: "invalid_format_non_numeric_start", + p: "abc-32767", + want: nil, + wantErr: true, + }, + { + name: "empty_string", + p: "", + want: nil, + wantErr: true, + }, + { + name: "too_many_parts", + p: "1-2-3", + want: nil, + wantErr: true, + }, + { + name: "port_too_large_start", + p: "65536-65537", + want: nil, + wantErr: true, + }, + { + name: "port_too_large_end", + p: "30000-65536", + want: nil, + wantErr: true, + }, + { + name: "inverted_range", + p: "32767-30000", + want: nil, + wantErr: true, // IsValid() will fail + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + portRange, err := parseServicesNodePortRange(tt.p) + if (err != nil) != tt.wantErr { + t.Errorf("error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + if portRange == nil { + t.Fatalf("got nil port range, expected %v", tt.want) + } + + if portRange.Port != tt.want.Port || portRange.EndPort != tt.want.EndPort { + t.Errorf("got = %v, want %v", portRange, tt.want) + } + }) + } +} + +func TestValidateNodePortRanges(t *testing.T) { + tests := []struct { + name string + portRanges []tsapi.PortRange + wantErr bool + }{ + { + name: "valid_ranges_with_unknown_kube_range", + portRanges: []tsapi.PortRange{ + {Port: 30003, EndPort: 30005}, + {Port: 30006, EndPort: 30007}, + }, + wantErr: false, + }, + { + name: "overlapping_ranges", + portRanges: []tsapi.PortRange{ + {Port: 30000, EndPort: 30010}, + {Port: 30005, EndPort: 30015}, + }, + wantErr: true, + }, + { + name: "adjacent_ranges_no_overlap", + portRanges: []tsapi.PortRange{ + {Port: 30010, EndPort: 30020}, + {Port: 30021, EndPort: 30022}, + }, + wantErr: false, + }, + { + name: "identical_ranges_are_overlapping", + portRanges: []tsapi.PortRange{ + {Port: 30005, EndPort: 30010}, + {Port: 30005, EndPort: 30010}, + }, + wantErr: true, + }, + { + name: "range_clashes_with_existing_proxyclass", + portRanges: []tsapi.PortRange{ + {Port: 31005, EndPort: 32070}, + }, + wantErr: true, + }, + } + + // as part of this test, we want to create an adjacent ProxyClass in order to ensure that if it clashes with the one created in this test + // that we get an error + cl := tstest.NewClock(tstest.ClockOpts{}) + opc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + StaticEndpoints: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 31000}, {Port: 32000}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + + fc := fake.NewClientBuilder(). + WithObjects(opc). + WithStatusSubresource(opc). + WithScheme(tsapi.GlobalScheme). + Build() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + StaticEndpoints: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: tt.portRanges, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + err := validateNodePortRanges(context.Background(), fc, &tsapi.PortRange{Port: 30000, EndPort: 32767}, pc) + if (err != nil) != tt.wantErr { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestGetRandomPort(t *testing.T) { + for range 100 { + port := getRandomPort() + if port < tailscaledPortMin || port > tailscaledPortMax { + t.Errorf("generated port %d which is out of range [%d, %d]", port, tailscaledPortMin, tailscaledPortMax) + } + } +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index a08dd4da8..cd1ae8158 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -26,7 +26,9 @@ import ( networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + klabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" @@ -39,6 +41,7 @@ import ( kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "tailscale.com/client/local" "tailscale.com/client/tailscale" @@ -228,6 +231,17 @@ waitOnline: return s, tsc } +// predicate function for filtering to ensure we *don't* reconcile on tailscale managed Kubernetes Services +func serviceManagedResourceFilterPredicate() predicate.Predicate { + return predicate.NewPredicateFuncs(func(object client.Object) bool { + if svc, ok := object.(*corev1.Service); !ok { + return false + } else { + return !isManagedResource(svc) + } + }) +} + // runReconcilers starts the controller-runtime manager and registers the // ServiceReconciler. It blocks forever. func runReconcilers(opts reconcilerOpts) { @@ -374,7 +388,7 @@ func runReconcilers(opts reconcilerOpts) { ingressSvcFromEpsFilter := handler.EnqueueRequestsFromMapFunc(ingressSvcFromEps(mgr.GetClient(), opts.log.Named("service-pg-reconciler"))) err = builder. ControllerManagedBy(mgr). - For(&corev1.Service{}). + For(&corev1.Service{}, builder.WithPredicates(serviceManagedResourceFilterPredicate())). Named("service-pg-reconciler"). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAServicesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). @@ -519,16 +533,19 @@ func runReconcilers(opts reconcilerOpts) { // ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that // define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get // reconciled if the CRD is applied at a later point. + kPortRange := getServicesNodePortRange(context.Background(), mgr.GetClient(), opts.tailscaleNamespace, startlog) serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyClass{}). Named("proxyclass-reconciler"). Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter). Complete(&ProxyClassReconciler{ - Client: mgr.GetClient(), - recorder: eventRecorder, - logger: opts.log.Named("proxyclass-reconciler"), - clock: tstime.DefaultClock{}, + Client: mgr.GetClient(), + nodePortRange: kPortRange, + recorder: eventRecorder, + tsNamespace: opts.tailscaleNamespace, + logger: opts.log.Named("proxyclass-reconciler"), + clock: tstime.DefaultClock{}, }) if err != nil { startlog.Fatal("could not create proxyclass reconciler: %v", err) @@ -587,9 +604,11 @@ func runReconcilers(opts reconcilerOpts) { // ProxyGroup reconciler. ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) + nodeFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(nodeHandlerForProxyGroup(mgr.GetClient(), opts.defaultProxyClass, startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). Named("proxygroup-reconciler"). + Watches(&corev1.Service{}, ownedByProxyGroupFilter). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). @@ -597,6 +616,7 @@ func runReconcilers(opts reconcilerOpts) { Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). Watches(&tsapi.ProxyClass{}, proxyClassFilterForProxyGroup). + Watches(&corev1.Node{}, nodeFilterForProxyGroup). Complete(&ProxyGroupReconciler{ recorder: eventRecorder, Client: mgr.GetClient(), @@ -840,6 +860,64 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger) } } +// nodeHandlerForProxyGroup returns a handler that, for a given Node, returns a +// list of reconcile requests for ProxyGroups that should be reconciled for the +// Node event. ProxyGroups need to be reconciled for Node events if they are +// configured to expose tailscaled static endpoints to tailnet using NodePort +// Services. +func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ProxyClass: %v", err) + return nil + } + + reqs := make([]reconcile.Request, 0) + for _, pg := range pgList.Items { + if pg.Spec.ProxyClass == "" && defaultProxyClass == "" { + continue + } + + pc := defaultProxyClass + if pc == "" { + pc = pg.Spec.ProxyClass + } + + proxyClass := &tsapi.ProxyClass{} + if err := cl.Get(ctx, types.NamespacedName{Name: pc}, proxyClass); err != nil { + logger.Debugf("error getting ProxyClass %q: %v", pg.Spec.ProxyClass, err) + return nil + } + + stat := proxyClass.Spec.StaticEndpoints + if stat == nil { + continue + } + + // If the selector is empty, all nodes match. + // TODO(ChaosInTheCRD): think about how this must be handled if we want to limit the number of nodes used + if len(stat.NodePort.Selector) == 0 { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + continue + } + + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: stat.NodePort.Selector, + }) + if err != nil { + logger.Debugf("error converting `spec.staticEndpoints.nodePort.selector` to Selector: %v", err) + return nil + } + + if selector.Matches(klabels.Set(o.GetLabels())) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + } + return reqs + } +} + // proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass, // returns a list of reconcile requests for all Connectors that have // .spec.proxyClass set. diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index 5ec9897d0..2d51b351d 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -44,22 +44,24 @@ const ( type ProxyClassReconciler struct { client.Client - recorder record.EventRecorder - logger *zap.SugaredLogger - clock tstime.Clock + recorder record.EventRecorder + logger *zap.SugaredLogger + clock tstime.Clock + tsNamespace string mu sync.Mutex // protects following // managedProxyClasses is a set of all ProxyClass resources that we're currently // managing. This is only used for metrics. managedProxyClasses set.Slice[types.UID] + // nodePortRange is the NodePort range set for the Kubernetes Cluster. This is used + // when validating port ranges configured by users for spec.StaticEndpoints + nodePortRange *tsapi.PortRange } -var ( - // gaugeProxyClassResources tracks the number of ProxyClass resources - // that we're currently managing. - gaugeProxyClassResources = clientmetric.NewGauge("k8s_proxyclass_resources") -) +// gaugeProxyClassResources tracks the number of ProxyClass resources +// that we're currently managing. +var gaugeProxyClassResources = clientmetric.NewGauge("k8s_proxyclass_resources") func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { logger := pcr.logger.With("ProxyClass", req.Name) @@ -96,7 +98,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re pcr.mu.Unlock() oldPCStatus := pc.Status.DeepCopy() - if errs := pcr.validate(ctx, pc); errs != nil { + if errs := pcr.validate(ctx, pc, logger); errs != nil { msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error()) pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg) tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) @@ -112,7 +114,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re return reconcile.Result{}, nil } -func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) { +func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) (violations field.ErrorList) { if sts := pc.Spec.StatefulSet; sts != nil { if len(sts.Labels) > 0 { if errs := metavalidation.ValidateLabels(sts.Labels.Parse(), field.NewPath(".spec.statefulSet.labels")); errs != nil { @@ -183,6 +185,17 @@ func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyCl violations = append(violations, errs...) } } + + if stat := pc.Spec.StaticEndpoints; stat != nil { + if err := validateNodePortRanges(ctx, pcr.Client, pcr.nodePortRange, pc); err != nil { + var prs tsapi.PortRanges = stat.NodePort.Ports + violations = append(violations, field.TypeInvalid(field.NewPath("spec", "staticEndpoints", "nodePort", "ports"), prs.String(), err.Error())) + } + + if len(stat.NodePort.Selector) < 1 { + logger.Debug("no Selectors specified on `spec.staticEndpoints.nodePort.selectors` field") + } + } // We do not validate embedded fields (security context, resource // requirements etc) as we inherit upstream validation for those fields. // Invalid values would get rejected by upstream validations at apply diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index 48290eea7..ae0f63d99 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -131,9 +131,11 @@ func TestProxyClass(t *testing.T) { proxyClass.Spec.StatefulSet.Pod.TailscaleInitContainer.Image = pc.Spec.StatefulSet.Pod.TailscaleInitContainer.Image proxyClass.Spec.StatefulSet.Pod.TailscaleContainer.Env = []tsapi.Env{{Name: "TS_USERSPACE", Value: "true"}, {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH"}, {Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS"}} }) - expectedEvents := []string{"Warning CustomTSEnvVar ProxyClass overrides the default value for TS_USERSPACE env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", + expectedEvents := []string{ + "Warning CustomTSEnvVar ProxyClass overrides the default value for TS_USERSPACE env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_TS_CONFIGFILE_PATH env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", - "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."} + "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", + } expectReconciled(t, pcr, "", "test") expectEvents(t, fr, expectedEvents) @@ -176,6 +178,110 @@ func TestProxyClass(t *testing.T) { expectEqual(t, fc, pc) } +func TestValidateProxyClassStaticEndpoints(t *testing.T) { + for name, tc := range map[string]struct { + staticEndpointConfig *tsapi.StaticEndpointsConfig + valid bool + }{ + "no_static_endpoints": { + staticEndpointConfig: nil, + valid: true, + }, + "valid_specific_ports": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: true, + }, + "valid_port_ranges": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3002}, + {Port: 3005, EndPort: 3007}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: true, + }, + "overlapping_port_ranges": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 1000, EndPort: 2000}, + {Port: 1500, EndPort: 1800}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "clashing_port_and_range": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3005}, + {Port: 3001, EndPort: 3010}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "malformed_port_range": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001, EndPort: 3000}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "empty_selector": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{{Port: 3000}}, + Selector: map[string]string{}, + }, + }, + valid: true, + }, + } { + t.Run(name, func(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + Build() + zl, _ := zap.NewDevelopment() + pcr := &ProxyClassReconciler{ + logger: zl.Sugar(), + Client: fc, + } + + pc := &tsapi.ProxyClass{ + Spec: tsapi.ProxyClassSpec{ + StaticEndpoints: tc.staticEndpointConfig, + }, + } + + logger := pcr.logger.With("ProxyClass", pc) + err := pcr.validate(context.Background(), pc, logger) + valid := err == nil + if valid != tc.valid { + t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) + } + }) + } +} + func TestValidateProxyClass(t *testing.T) { for name, tc := range map[string]struct { pc *tsapi.ProxyClass @@ -219,8 +325,12 @@ func TestValidateProxyClass(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - pcr := &ProxyClassReconciler{} - err := pcr.validate(context.Background(), tc.pc) + zl, _ := zap.NewDevelopment() + pcr := &ProxyClassReconciler{ + logger: zl.Sugar(), + } + logger := pcr.logger.With("ProxyClass", tc.pc) + err := pcr.validate(context.Background(), tc.pc, logger) valid := err == nil if valid != tc.valid { t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 0d5eff551..328262031 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "net/http" + "net/netip" "slices" "strings" "sync" @@ -24,6 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -48,7 +50,8 @@ const ( reasonProxyGroupInvalid = "ProxyGroupInvalid" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c - optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + staticEndpointsMaxAddrs = 2 ) var ( @@ -174,7 +177,8 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } } - if err = r.maybeProvision(ctx, pg, proxyClass); err != nil { + isProvisioned, err := r.maybeProvision(ctx, pg, proxyClass) + if err != nil { reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -185,9 +189,20 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } else { r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) } + return setStatusReady(pg, metav1.ConditionFalse, reason, msg) } + if !isProvisioned { + if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + return reconcile.Result{}, errors.Join(err, updateErr) + } + } + return + } + desiredReplicas := int(pgReplicas(pg)) if len(pg.Status.Devices) < desiredReplicas { message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) @@ -230,15 +245,42 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc } } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (isProvisioned bool, err error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureAddedToGaugeForProxyGroup(pg) r.mu.Unlock() - if err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass); err != nil { - return fmt.Errorf("error provisioning config Secrets: %w", err) + svcToNodePorts := make(map[string]uint16) + var tailscaledPort *uint16 + if proxyClass != nil && proxyClass.Spec.StaticEndpoints != nil { + svcToNodePorts, tailscaledPort, err = r.ensureNodePortServiceCreated(ctx, pg, proxyClass) + if err != nil { + wrappedErr := fmt.Errorf("error provisioning NodePort Services for static endpoints: %w", err) + var allocatePortErr *allocatePortsErr + if errors.As(err, &allocatePortErr) { + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) + r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) + return false, nil + } + return false, wrappedErr + } } + + staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass, svcToNodePorts) + if err != nil { + wrappedErr := fmt.Errorf("error provisioning config Secrets: %w", err) + var selectorErr *FindStaticEndpointErr + if errors.As(err, &selectorErr) { + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) + r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) + return false, nil + } + return false, wrappedErr + } + // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. stateSecrets := pgStateSecrets(pg, r.tsNamespace) for _, sec := range stateSecrets { @@ -247,7 +289,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning state Secrets: %w", err) + return false, fmt.Errorf("error provisioning state Secrets: %w", err) } } sa := pgServiceAccount(pg, r.tsNamespace) @@ -256,7 +298,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning ServiceAccount: %w", err) + return false, fmt.Errorf("error provisioning ServiceAccount: %w", err) } role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { @@ -265,7 +307,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return fmt.Errorf("error provisioning Role: %w", err) + return false, fmt.Errorf("error provisioning Role: %w", err) } roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { @@ -275,7 +317,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return fmt.Errorf("error provisioning RoleBinding: %w", err) + return false, fmt.Errorf("error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) @@ -284,7 +326,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) + return false, fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) } } if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { @@ -293,12 +335,12 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return false, fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, proxyClass) + ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return fmt.Errorf("error generating StatefulSet spec: %w", err) + return false, fmt.Errorf("error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), @@ -306,7 +348,6 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) updateSS := func(s *appsv1.StatefulSet) { - s.Spec = ss.Spec s.ObjectMeta.Labels = ss.ObjectMeta.Labels @@ -314,7 +355,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences } if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, updateSS); err != nil { - return fmt.Errorf("error provisioning StatefulSet: %w", err) + return false, fmt.Errorf("error provisioning StatefulSet: %w", err) } mo := &metricsOpts{ tsNamespace: r.tsNamespace, @@ -323,26 +364,150 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return fmt.Errorf("error reconciling metrics resources: %w", err) + return false, fmt.Errorf("error reconciling metrics resources: %w", err) } - if err := r.cleanupDanglingResources(ctx, pg); err != nil { - return fmt.Errorf("error cleaning up dangling resources: %w", err) + if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { + return false, fmt.Errorf("error cleaning up dangling resources: %w", err) } - devices, err := r.getDeviceInfo(ctx, pg) + devices, err := r.getDeviceInfo(ctx, staticEndpoints, pg) if err != nil { - return fmt.Errorf("failed to get device info: %w", err) + return false, fmt.Errorf("failed to get device info: %w", err) } pg.Status.Devices = devices - return nil + return true, nil +} + +// getServicePortsForProxyGroups returns a map of ProxyGroup Service names to their NodePorts, +// and a set of all allocated NodePorts for quick occupancy checking. +func getServicePortsForProxyGroups(ctx context.Context, c client.Client, namespace string, portRanges tsapi.PortRanges) (map[string]uint16, set.Set[uint16], error) { + svcs := new(corev1.ServiceList) + matchingLabels := client.MatchingLabels(map[string]string{ + LabelParentType: "proxygroup", + }) + + err := c.List(ctx, svcs, matchingLabels, client.InNamespace(namespace)) + if err != nil { + return nil, nil, fmt.Errorf("failed to list ProxyGroup Services: %w", err) + } + + svcToNodePorts := map[string]uint16{} + usedPorts := set.Set[uint16]{} + for _, svc := range svcs.Items { + if len(svc.Spec.Ports) == 1 && svc.Spec.Ports[0].NodePort != 0 { + p := uint16(svc.Spec.Ports[0].NodePort) + if portRanges.Contains(p) { + svcToNodePorts[svc.Name] = p + usedPorts.Add(p) + } + } + } + + return svcToNodePorts, usedPorts, nil +} + +type allocatePortsErr struct { + msg string +} + +func (e *allocatePortsErr) Error() string { + return e.msg +} + +func (r *ProxyGroupReconciler) allocatePorts(ctx context.Context, pg *tsapi.ProxyGroup, proxyClassName string, portRanges tsapi.PortRanges) (map[string]uint16, error) { + replicaCount := int(pgReplicas(pg)) + svcToNodePorts, usedPorts, err := getServicePortsForProxyGroups(ctx, r.Client, r.tsNamespace, portRanges) + if err != nil { + return nil, &allocatePortsErr{msg: fmt.Sprintf("failed to find ports for existing ProxyGroup NodePort Services: %s", err.Error())} + } + + replicasAllocated := 0 + for i := range pgReplicas(pg) { + if _, ok := svcToNodePorts[pgNodePortServiceName(pg.Name, i)]; !ok { + svcToNodePorts[pgNodePortServiceName(pg.Name, i)] = 0 + } else { + replicasAllocated++ + } + } + + for replica, port := range svcToNodePorts { + if port == 0 { + for p := range portRanges.All() { + if !usedPorts.Contains(p) { + svcToNodePorts[replica] = p + usedPorts.Add(p) + replicasAllocated++ + break + } + } + } + } + + if replicasAllocated < replicaCount { + return nil, &allocatePortsErr{msg: fmt.Sprintf("not enough available ports to allocate all replicas (needed %d, got %d). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass %q must have bigger range allocated", replicaCount, usedPorts.Len(), proxyClassName)} + } + + return svcToNodePorts, nil +} + +func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) (map[string]uint16, *uint16, error) { + // NOTE: (ChaosInTheCRD) we want the same TargetPort for every static endpoint NodePort Service for the ProxyGroup + tailscaledPort := getRandomPort() + svcs := []*corev1.Service{} + for i := range pgReplicas(pg) { + replicaName := pgNodePortServiceName(pg.Name, i) + + svc := &corev1.Service{} + err := r.Get(ctx, types.NamespacedName{Name: replicaName, Namespace: r.tsNamespace}, svc) + if err != nil && !apierrors.IsNotFound(err) { + return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", replicaName, err) + } + if apierrors.IsNotFound(err) { + svcs = append(svcs, pgNodePortService(pg, replicaName, r.tsNamespace)) + } else { + // NOTE: if we can we want to recover the random port used for tailscaled, + // as well as the NodePort previously used for that Service + if len(svc.Spec.Ports) == 1 { + if svc.Spec.Ports[0].Port != 0 { + tailscaledPort = uint16(svc.Spec.Ports[0].Port) + } + } + svcs = append(svcs, svc) + } + } + + svcToNodePorts, err := r.allocatePorts(ctx, pg, pc.Name, pc.Spec.StaticEndpoints.NodePort.Ports) + if err != nil { + return nil, nil, fmt.Errorf("failed to allocate NodePorts to ProxyGroup Services: %w", err) + } + + for _, svc := range svcs { + // NOTE: we know that every service is going to have 1 port here + svc.Spec.Ports[0].Port = int32(tailscaledPort) + svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(tailscaledPort)) + svc.Spec.Ports[0].NodePort = int32(svcToNodePorts[svc.Name]) + + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, svc, func(s *corev1.Service) { + s.ObjectMeta.Labels = svc.ObjectMeta.Labels + s.ObjectMeta.Annotations = svc.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = svc.ObjectMeta.OwnerReferences + s.Spec.Selector = svc.Spec.Selector + s.Spec.Ports = svc.Spec.Ports + }) + if err != nil { + return nil, nil, fmt.Errorf("error creating/updating Kubernetes NodePort Service %q: %w", svc.Name, err) + } + } + + return svcToNodePorts, ptr.To(tailscaledPort), nil } // cleanupDanglingResources ensures we don't leak config secrets, state secrets, and // tailnet devices when the number of replicas specified is reduced. -func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup) error { +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { logger := r.logger(pg.Name) metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { @@ -371,6 +536,30 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) } } + // NOTE(ChaosInTheCRD): we shouldn't need to get the service first, checking for a not found error should be enough + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-nodeport", m.stateSecret.Name), + Namespace: m.stateSecret.Namespace, + }, + } + if err := r.Delete(ctx, svc); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting static endpoints Kubernetes Service %q: %w", svc.Name, err) + } + } + } + + // If the ProxyClass has its StaticEndpoints config removed, we want to remove all of the NodePort Services + if pc != nil && pc.Spec.StaticEndpoints == nil { + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentType: proxyTypeProxyGroup, + LabelParentName: pg.Name, + } + if err := r.DeleteAllOf(ctx, &corev1.Service{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Kubernetes Services for static endpoints: %w", err) + } } return nil @@ -396,7 +585,8 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy mo := &metricsOpts{ proxyLabels: pgLabels(pg.Name, nil), tsNamespace: r.tsNamespace, - proxyType: "proxygroup"} + proxyType: "proxygroup", + } if err := maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } @@ -424,8 +614,9 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc return nil } -func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (err error) { +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16) (endpoints map[string][]netip.AddrPort, err error) { logger := r.logger(pg.Name) + endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -441,7 +632,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return err + return nil, err } var authKey string @@ -453,19 +644,32 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } authKey, err = newAuthKey(ctx, r.tsClient, tags) if err != nil { - return err + return nil, err + } + } + + replicaName := pgNodePortServiceName(pg.Name, i) + if len(svcToNodePorts) > 0 { + port, ok := svcToNodePorts[replicaName] + if !ok { + return nil, fmt.Errorf("could not find configured NodePort for ProxyGroup replica %q", replicaName) + } + + endpoints[replicaName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) + if err != nil { + return nil, fmt.Errorf("could not find static endpoints for replica %q: %w", replicaName, err) } } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret, endpoints[replicaName]) if err != nil { - return fmt.Errorf("error creating tailscaled config: %w", err) + return nil, fmt.Errorf("error creating tailscaled config: %w", err) } for cap, cfg := range configs { cfgJSON, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("error marshalling tailscaled config: %w", err) + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) } mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } @@ -474,18 +678,111 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) if err := r.Update(ctx, cfgSecret); err != nil { - return err + return nil, err } } } else { logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { - return err + return nil, err } } } - return nil + return endpoints, nil +} + +type FindStaticEndpointErr struct { + msg string +} + +func (e *FindStaticEndpointErr) Error() string { + return e.msg +} + +// findStaticEndpoints returns up to two `netip.AddrPort` entries, derived from the ExternalIPs of Nodes that +// match the `proxyClass`'s selector within the StaticEndpoints configuration. The port is set to the replica's NodePort Service Port. +func (r *ProxyGroupReconciler) findStaticEndpoints(ctx context.Context, existingCfgSecret *corev1.Secret, proxyClass *tsapi.ProxyClass, port uint16, logger *zap.SugaredLogger) ([]netip.AddrPort, error) { + var currAddrs []netip.AddrPort + if existingCfgSecret != nil { + oldConfB := existingCfgSecret.Data[tsoperator.TailscaledConfigFileName(106)] + if len(oldConfB) > 0 { + var oldConf ipn.ConfigVAlpha + if err := json.Unmarshal(oldConfB, &oldConf); err == nil { + currAddrs = oldConf.StaticEndpoints + } else { + logger.Debugf("failed to unmarshal tailscaled config from secret %q: %v", existingCfgSecret.Name, err) + } + } else { + logger.Debugf("failed to get tailscaled config from secret %q: empty data", existingCfgSecret.Name) + } + } + + nodes := new(corev1.NodeList) + selectors := client.MatchingLabels(proxyClass.Spec.StaticEndpoints.NodePort.Selector) + + err := r.List(ctx, nodes, selectors) + if err != nil { + return nil, fmt.Errorf("failed to list nodes: %w", err) + } + + if len(nodes.Items) == 0 { + return nil, &FindStaticEndpointErr{msg: fmt.Sprintf("failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass %q", proxyClass.Name)} + } + + endpoints := []netip.AddrPort{} + + // NOTE(ChaosInTheCRD): Setting a hard limit of two static endpoints. + newAddrs := []netip.AddrPort{} + for _, n := range nodes.Items { + for _, a := range n.Status.Addresses { + if a.Type == corev1.NodeExternalIP { + addr := getStaticEndpointAddress(&a, port) + if addr == nil { + logger.Debugf("failed to parse %q address on node %q: %q", corev1.NodeExternalIP, n.Name, a.Address) + continue + } + + // we want to add the currently used IPs first before + // adding new ones. + if currAddrs != nil && slices.Contains(currAddrs, *addr) { + endpoints = append(endpoints, *addr) + } else { + newAddrs = append(newAddrs, *addr) + } + } + + if len(endpoints) == 2 { + break + } + } + } + + // if the 2 endpoints limit hasn't been reached, we + // can start adding newIPs. + if len(endpoints) < 2 { + for _, a := range newAddrs { + endpoints = append(endpoints, a) + if len(endpoints) == 2 { + break + } + } + } + + if len(endpoints) == 0 { + return nil, &FindStaticEndpointErr{msg: fmt.Sprintf("failed to find any `status.addresses` of type %q on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass %q", corev1.NodeExternalIP, proxyClass.Name)} + } + + return endpoints, nil +} + +func getStaticEndpointAddress(a *corev1.NodeAddress, port uint16) *netip.AddrPort { + addr, err := netip.ParseAddr(a.Address) + if err != nil { + return nil + } + + return ptr.To(netip.AddrPortFrom(addr, port)) } // ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup @@ -514,7 +811,7 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret, staticEndpoints []netip.AddrPort) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -531,6 +828,10 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 conf.AcceptRoutes = "true" } + if len(staticEndpoints) > 0 { + conf.StaticEndpoints = staticEndpoints + } + deviceAuthed := false for _, d := range pg.Status.Devices { if d.Hostname == *conf.Hostname { @@ -624,7 +925,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr return metadata, nil } -func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { +func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoints map[string][]netip.AddrPort, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { return nil, err @@ -638,10 +939,21 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.Prox if !ok { continue } - devices = append(devices, tsapi.TailnetDevice{ + + dev := tsapi.TailnetDevice{ Hostname: device.Hostname, TailnetIPs: device.TailnetIPs, - }) + } + + if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 { + eps := make([]string, 0, len(ep)) + for _, e := range ep { + eps = append(eps, e.String()) + } + dev.StaticEndpoints = eps + } + + devices = append(devices, dev) } return devices, nil @@ -655,3 +967,8 @@ type nodeMetadata struct { tsID tailcfg.StableNodeID dnsName string } + +func (pr *ProxyGroupReconciler) setStatusReady(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason string, msg string, logger *zap.SugaredLogger) { + pr.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, msg, pg.Generation, pr.clock, logger) +} diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 1d12c39e0..20e797f0c 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -9,6 +9,7 @@ import ( "fmt" "slices" "strconv" + "strings" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -23,12 +24,43 @@ import ( "tailscale.com/types/ptr" ) -// deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. -const deletionGracePeriodSeconds int64 = 360 +const ( + // deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. + deletionGracePeriodSeconds int64 = 360 + staticEndpointPortName = "static-endpoint-port" +) + +func pgNodePortServiceName(proxyGroupName string, replica int32) string { + return fmt.Sprintf("%s-%d-nodeport", proxyGroupName, replica) +} + +func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + // NOTE(ChaosInTheCRD): we set the ports once we've iterated over every svc and found any old configuration we want to persist. + { + Name: staticEndpointPortName, + Protocol: corev1.ProtocolUDP, + }, + }, + Selector: map[string]string{ + appsv1.StatefulSetPodNameLabel: strings.TrimSuffix(name, "-nodeport"), + }, + }, + } +} // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. -func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -144,6 +176,13 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string }, } + if port != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PORT", + Value: strconv.Itoa(int(*port)), + }) + } + if tsFirewallMode != "" { envs = append(envs, corev1.EnvVar{ Name: "TS_DEBUG_FIREWALL_MODE", diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index c556ae94a..8ffce2c0c 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -9,6 +9,8 @@ import ( "context" "encoding/json" "fmt" + "net/netip" + "slices" "testing" "time" @@ -18,6 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" @@ -32,14 +35,772 @@ import ( "tailscale.com/types/ptr" ) -const testProxyImage = "tailscale/tailscale:test" +const ( + testProxyImage = "tailscale/tailscale:test" + initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" +) + +var ( + defaultProxyClassAnnotations = map[string]string{ + "some-annotation": "from-the-proxy-class", + } + + defaultReplicas = ptr.To(int32(2)) + defaultStaticEndpointConfig = &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 30001}, {Port: 30002}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + } +) + +func TestProxyGroupWithStaticEndpoints(t *testing.T) { + type testNodeAddr struct { + ip string + addrType corev1.NodeAddressType + } -var defaultProxyClassAnnotations = map[string]string{ - "some-annotation": "from-the-proxy-class", + type testNode struct { + name string + addresses []testNodeAddr + labels map[string]string + } + + type reconcile struct { + staticEndpointConfig *tsapi.StaticEndpointsConfig + replicas *int32 + nodes []testNode + expectedIPs []netip.Addr + expectedEvents []string + expectedErr string + expectStatefulSet bool + } + + testCases := []struct { + name string + description string + reconciles []reconcile + }{ + { + // the reconciler should manage to create static endpoints when Nodes have IPv6 addresses. + name: "IPv6", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "2001:0db8::1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "2001:0db8::2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "2001:0db8::3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("2001:0db8::1"), netip.MustParseAddr("2001:0db8::2"), netip.MustParseAddr("2001:0db8::3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // declaring specific ports (with no `endPort`s) in the `spec.staticEndpoints.nodePort` should work. + name: "SpecificPorts", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("192.168.0.1"), netip.MustParseAddr("192.168.0.2"), netip.MustParseAddr("192.168.0.3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // if too narrow a range of `spec.staticEndpoints.nodePort.Ports` on the proxyClass should result in no StatefulSet being created. + name: "NotEnoughPorts", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // when supplying a variety of ranges that are not clashing, the reconciler should manage to create a StatefulSet. + name: "NonClashingRanges", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3002}, + {Port: 3003, EndPort: 3005}, + {Port: 3006}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(3)), + nodes: []testNode{ + {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + {name: "node3", addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // when there isn't a node that matches the selector, the ProxyGroup enters a failed state as there are no valid Static Endpoints. + // while it does create an event on the resource, It does not return an error + name: "NoMatchingNodes", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3005}, + }, + Selector: map[string]string{ + "zone": "us-west", + }, + }, + }, + replicas: defaultReplicas, + nodes: []testNode{ + {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"zone": "eu-central"}}, + {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, labels: map[string]string{"zone": "eu-central"}}, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // when all the nodes have only have addresses of type InternalIP populated in their status, the ProxyGroup enters a failed state as there are no valid Static Endpoints. + // while it does create an event on the resource, It does not return an error + name: "AllInternalIPAddresses", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // When the node's (and some of their addresses) change between reconciles, the reconciler should first pick addresses that + // have been used previously (provided that they are still populated on a node that matches the selector) + name: "NodeIPChangesAndPersists", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.10", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectStatefulSet: true, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + }, + }, + }, + { + // given a new node being created with a new IP, and a node previously used for Static Endpoints being removed, the Static Endpoints should be updated + // correctly + name: "NodeIPChangesWithNewNode", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.3")}, + expectStatefulSet: true, + }, + }, + }, + { + // when all the node IPs change, they should all update + name: "AllNodeIPsChange", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.100", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.200", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.100"), netip.MustParseAddr("10.0.0.200")}, + expectStatefulSet: true, + }, + }, + }, + { + // if there are less ExternalIPs after changes to the nodes between reconciles, the reconciler should complete without issues + name: "LessExternalIPsAfterChange", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + expectStatefulSet: true, + }, + }, + }, + { + // if node address parsing fails (given an invalid address), the reconciler should continue without failure and find other + // valid addresses + name: "NodeAddressParsingFails", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "invalid-ip", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "invalid-ip", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + }, + }, + { + // if the node's become unlabeled, the ProxyGroup should enter a ProxyGroupInvalid state, but the reconciler should not fail + name: "NodesBecomeUnlabeled", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{}, + }, + { + name: "node4", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectStatefulSet: true, + }, + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + tsClient := &fakeTSClient{} + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(10) + cl := tstest.NewClock(tstest.ClockOpts{}) + + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + ProxyClass: pc.Name, + }, + } + + fc := fake.NewClientBuilder(). + WithObjects(pc, pg). + WithStatusSubresource(pc, pg). + WithScheme(tsapi.GlobalScheme). + Build() + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + clock: cl, + } + + for i, r := range tt.reconciles { + createdNodes := []corev1.Node{} + t.Run(tt.name, func(t *testing.T) { + for _, n := range r.nodes { + no := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: n.name, + Labels: n.labels, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + } + for _, addr := range n.addresses { + no.Status.Addresses = append(no.Status.Addresses, corev1.NodeAddress{ + Type: addr.addrType, + Address: addr.ip, + }) + } + if err := fc.Create(context.Background(), no); err != nil { + t.Fatalf("failed to create node %q: %v", n.name, err) + } + createdNodes = append(createdNodes, *no) + t.Logf("created node %q with data", n.name) + } + + reconciler.l = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) + pg.Spec.Replicas = r.replicas + pc.Spec.StaticEndpoints = r.staticEndpointConfig + + createOrUpdate(context.Background(), fc, "", pg, func(o *tsapi.ProxyGroup) { + o.Spec.Replicas = pg.Spec.Replicas + }) + + createOrUpdate(context.Background(), fc, "", pc, func(o *tsapi.ProxyClass) { + o.Spec.StaticEndpoints = pc.Spec.StaticEndpoints + }) + + if r.expectedErr != "" { + expectError(t, reconciler, "", pg.Name) + } else { + expectReconciled(t, reconciler, "", pg.Name) + } + expectEvents(t, fr, r.expectedEvents) + + sts := &appsv1.StatefulSet{} + err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) + if r.expectStatefulSet { + if err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + for j := range 2 { + sec := &corev1.Secret{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { + t.Fatalf("failed to get state Secret for replica %d: %v", j, err) + } + + config := &ipn.ConfigVAlpha{} + foundConfig := false + for _, d := range sec.Data { + if err := json.Unmarshal(d, config); err == nil { + foundConfig = true + break + } + } + if !foundConfig { + t.Fatalf("could not unmarshal config from secret data for replica %d", j) + } + + if len(config.StaticEndpoints) > staticEndpointsMaxAddrs { + t.Fatalf("expected %d StaticEndpoints in config Secret, but got %d for replica %d. Found Static Endpoints: %v", staticEndpointsMaxAddrs, len(config.StaticEndpoints), j, config.StaticEndpoints) + } + + for _, e := range config.StaticEndpoints { + if !slices.Contains(r.expectedIPs, e.Addr()) { + t.Fatalf("found unexpected static endpoint IP %q for replica %d. Expected one of %v", e.Addr().String(), j, r.expectedIPs) + } + if c := r.staticEndpointConfig; c != nil && c.NodePort.Ports != nil { + var ports tsapi.PortRanges = c.NodePort.Ports + found := false + for port := range ports.All() { + if port == e.Port() { + found = true + break + } + } + + if !found { + t.Fatalf("found unexpected static endpoint port %d for replica %d. Expected one of %v .", e.Port(), j, ports.All()) + } + } else { + if e.Port() != 3001 && e.Port() != 3002 { + t.Fatalf("found unexpected static endpoint port %d for replica %d. Expected 3001 or 3002.", e.Port(), j) + } + } + } + } + + pgroup := &tsapi.ProxyGroup{} + err = fc.Get(context.Background(), client.ObjectKey{Name: pg.Name}, pgroup) + if err != nil { + t.Fatalf("failed to get ProxyGroup %q: %v", pg.Name, err) + } + + t.Logf("getting proxygroup after reconcile") + for _, d := range pgroup.Status.Devices { + t.Logf("found device %q", d.Hostname) + for _, e := range d.StaticEndpoints { + t.Logf("found static endpoint %q", e) + } + } + } else { + if err == nil { + t.Fatal("expected error when getting Statefulset") + } + } + }) + + // node cleanup between reconciles + // we created a new set of nodes for each + for _, n := range createdNodes { + err := fc.Delete(context.Background(), &n) + if err != nil && !apierrors.IsNotFound(err) { + t.Fatalf("failed to delete node: %v", err) + } + } + } + + t.Run("delete_and_cleanup", func(t *testing.T) { + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + l: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), + clock: cl, + } + + if err := fc.Delete(context.Background(), pg); err != nil { + t.Fatalf("error deleting ProxyGroup: %v", err) + } + + expectReconciled(t, reconciler, "", pg.Name) + expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) + + if err := fc.Delete(context.Background(), pc); err != nil { + t.Fatalf("error deleting ProxyClass: %v", err) + } + expectMissing[tsapi.ProxyClass](t, fc, "", pc.Name) + }) + }) + } } func TestProxyGroup(t *testing.T) { - pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ Name: "default-pc", @@ -598,7 +1359,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", proxyClass) + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", nil, proxyClass) if err != nil { t.Fatal(err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 03bb8989b..aba5f9e2d 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -425,6 +425,23 @@ _Appears in:_ | `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
                Currently you must manually update your cluster DNS config to add
                this address as a stub nameserver for ts.net for cluster workloads to be
                able to resolve MagicDNS names associated with egress or Ingress
                proxies.
                The IP address will change if you delete and recreate the DNSConfig. | | | +#### NodePortConfig + + + + + + + +_Appears in:_ +- [StaticEndpointsConfig](#staticendpointsconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ports` _[PortRange](#portrange) array_ | The port ranges from which the operator will select NodePorts for the Services.
                You must ensure that firewall rules allow UDP ingress traffic for these ports
                to the node's external IPs.
                The ports must be in the range of service node ports for the cluster (default `30000-32767`).
                See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. | | MinItems: 1
                | +| `selector` _object (keys:string, values:string)_ | A selector which will be used to select the node's that will have their `ExternalIP`'s advertised
                by the ProxyGroup as Static Endpoints. | | | + + #### Pod @@ -451,6 +468,26 @@ _Appears in:_ | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
                By default Tailscale Kubernetes operator does not apply any topology spread constraints.
                https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | +#### PortRange + + + + + + + +_Appears in:_ +- [NodePortConfig](#nodeportconfig) +- [PortRanges](#portranges) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `port` _integer_ | port represents a port selected to be used. This is a required field. | | | +| `endPort` _integer_ | endPort indicates that the range of ports from port to endPort if set, inclusive,
                should be used. This field cannot be defined if the port field is not defined.
                The endPort must be either unset, or equal or greater than port. | | | + + + + #### ProxyClass @@ -518,6 +555,7 @@ _Appears in:_ | `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported
                for egress proxies and for Ingress proxies that have been configured
                with tailscale.com/experimental-forward-cluster-traffic-via-ingress
                annotation. Note that the metrics are currently considered unstable
                and will likely change in breaking ways in the future - we only
                recommend that you use those for debugging purposes. | | | | `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific
                parameters of proxies. | | | | `useLetsEncryptStagingEnvironment` _boolean_ | Set UseLetsEncryptStagingEnvironment to true to issue TLS
                certificates for any HTTPS endpoints exposed to the tailnet from
                LetsEncrypt's staging environment.
                https://letsencrypt.org/docs/staging-environment/
                This setting only affects Tailscale Ingress resources.
                By default Ingress TLS certificates are issued from LetsEncrypt's
                production environment.
                Changing this setting true -> false, will result in any
                existing certs being re-issued from the production environment.
                Changing this setting false (default) -> true, when certs have already
                been provisioned from production environment will NOT result in certs
                being re-issued from the staging environment before they need to be
                renewed. | | | +| `staticEndpoints` _[StaticEndpointsConfig](#staticendpointsconfig)_ | Configuration for 'static endpoints' on proxies in order to facilitate
                direct connections from other devices on the tailnet.
                See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. | | | #### ProxyClassStatus @@ -935,6 +973,22 @@ _Appears in:_ | `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | +#### StaticEndpointsConfig + + + + + + + +_Appears in:_ +- [ProxyClassSpec](#proxyclassspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `nodePort` _[NodePortConfig](#nodeportconfig)_ | The configuration for static endpoints using NodePort Services. | | | + + #### Storage @@ -1015,6 +1069,7 @@ _Appears in:_ | --- | --- | --- | --- | | `hostname` _string_ | Hostname is the fully qualified domain name of the device.
                If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
                node. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
                assigned to the device. | | | +| `staticEndpoints` _string array_ | StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. | | | #### TailscaleConfig diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 899abf096..9221c60f3 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -6,6 +6,10 @@ package v1alpha1 import ( + "fmt" + "iter" + "strings" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -82,6 +86,124 @@ type ProxyClassSpec struct { // renewed. // +optional UseLetsEncryptStagingEnvironment bool `json:"useLetsEncryptStagingEnvironment,omitempty"` + // Configuration for 'static endpoints' on proxies in order to facilitate + // direct connections from other devices on the tailnet. + // See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + // +optional + StaticEndpoints *StaticEndpointsConfig `json:"staticEndpoints,omitempty"` +} + +type StaticEndpointsConfig struct { + // The configuration for static endpoints using NodePort Services. + NodePort *NodePortConfig `json:"nodePort"` +} + +type NodePortConfig struct { + // The port ranges from which the operator will select NodePorts for the Services. + // You must ensure that firewall rules allow UDP ingress traffic for these ports + // to the node's external IPs. + // The ports must be in the range of service node ports for the cluster (default `30000-32767`). + // See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + // +kubebuilder:validation:MinItems=1 + Ports []PortRange `json:"ports"` + // A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + // by the ProxyGroup as Static Endpoints. + Selector map[string]string `json:"selector,omitempty"` +} + +// PortRanges is a list of PortRange(s) +type PortRanges []PortRange + +func (prs PortRanges) String() string { + var prStrings []string + + for _, pr := range prs { + prStrings = append(prStrings, pr.String()) + } + + return strings.Join(prStrings, ", ") +} + +// All allows us to iterate over all the ports in the PortRanges +func (prs PortRanges) All() iter.Seq[uint16] { + return func(yield func(uint16) bool) { + for _, pr := range prs { + end := pr.EndPort + if end == 0 { + end = pr.Port + } + + for port := pr.Port; port <= end; port++ { + if !yield(port) { + return + } + } + } + } +} + +// Contains reports whether port is in any of the PortRanges. +func (prs PortRanges) Contains(port uint16) bool { + for _, r := range prs { + if r.Contains(port) { + return true + } + } + + return false +} + +// ClashesWith reports whether the supplied PortRange clashes with any of the PortRanges. +func (prs PortRanges) ClashesWith(pr PortRange) bool { + for p := range prs.All() { + if pr.Contains(p) { + return true + } + } + + return false +} + +type PortRange struct { + // port represents a port selected to be used. This is a required field. + Port uint16 `json:"port"` + + // endPort indicates that the range of ports from port to endPort if set, inclusive, + // should be used. This field cannot be defined if the port field is not defined. + // The endPort must be either unset, or equal or greater than port. + // +optional + EndPort uint16 `json:"endPort,omitempty"` +} + +// Contains reports whether port is in pr. +func (pr PortRange) Contains(port uint16) bool { + switch pr.EndPort { + case 0: + return port == pr.Port + default: + return port >= pr.Port && port <= pr.EndPort + } +} + +// String returns the PortRange in a string form. +func (pr PortRange) String() string { + if pr.EndPort == 0 { + return fmt.Sprintf("%d", pr.Port) + } + + return fmt.Sprintf("%d-%d", pr.Port, pr.EndPort) +} + +// IsValid reports whether the port range is valid. +func (pr PortRange) IsValid() bool { + if pr.Port == 0 { + return false + } + if pr.EndPort == 0 { + return true + } + + return pr.Port <= pr.EndPort } type TailscaleConfig struct { diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index ac87cc6ca..17b13064b 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -111,6 +111,10 @@ type TailnetDevice struct { // assigned to the device. // +optional TailnetIPs []string `json:"tailnetIPs,omitempty"` + + // StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + // +optional + StaticEndpoints []string `json:"staticEndpoints,omitempty"` } // +kubebuilder:validation:Type=string diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index e09127207..ffc04d3b9 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -407,6 +407,33 @@ func (in *NameserverStatus) DeepCopy() *NameserverStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortRange, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig. +func (in *NodePortConfig) DeepCopy() *NodePortConfig { + if in == nil { + return nil + } + out := new(NodePortConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Pod) DeepCopyInto(out *Pod) { *out = *in @@ -482,6 +509,40 @@ func (in *Pod) DeepCopy() *Pod { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortRange) DeepCopyInto(out *PortRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRange. +func (in *PortRange) DeepCopy() *PortRange { + if in == nil { + return nil + } + out := new(PortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PortRanges) DeepCopyInto(out *PortRanges) { + { + in := &in + *out = make(PortRanges, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRanges. +func (in PortRanges) DeepCopy() PortRanges { + if in == nil { + return nil + } + out := new(PortRanges) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyClass) DeepCopyInto(out *ProxyClass) { *out = *in @@ -559,6 +620,11 @@ func (in *ProxyClassSpec) DeepCopyInto(out *ProxyClassSpec) { *out = new(TailscaleConfig) **out = **in } + if in.StaticEndpoints != nil { + in, out := &in.StaticEndpoints, &out.StaticEndpoints + *out = new(StaticEndpointsConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyClassSpec. @@ -1096,6 +1162,26 @@ func (in *StatefulSet) DeepCopy() *StatefulSet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticEndpointsConfig) DeepCopyInto(out *StaticEndpointsConfig) { + *out = *in + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticEndpointsConfig. +func (in *StaticEndpointsConfig) DeepCopy() *StaticEndpointsConfig { + if in == nil { + return nil + } + out := new(StaticEndpointsConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in @@ -1163,6 +1249,11 @@ func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.StaticEndpoints != nil { + in, out := &in.StaticEndpoints, &out.StaticEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetDevice. From 711698f5a985a5c93649b31c9f49ed6d22a91c42 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 27 Jun 2025 18:10:04 +0100 Subject: [PATCH 1013/1708] cmd/{containerboot,k8s-operator}: use state Secret for checking device auth (#16328) Previously, the operator checked the ProxyGroup status fields for information on how many of the proxies had successfully authed. Use their state Secrets instead as a more reliable source of truth. containerboot has written device_fqdn and device_ips keys to the state Secret since inception, and pod_uid since 1.78.0, so there's no need to use the API for that data. Read it from the state Secret for consistency. However, to ensure we don't read data from a previous run of containerboot, make sure we reset containerboot's state keys on startup. One other knock-on effect of that is ProxyGroups can briefly be marked not Ready while a Pod is restarting. Introduce a new ProxyGroupAvailable condition to more accurately reflect when downstream controllers can implement flows that rely on a ProxyGroup having at least 1 proxy Pod running. Fixes #16327 Change-Id: I026c18e9d23e87109a471a87b8e4fb6271716a66 Signed-off-by: Tom Proctor --- cmd/containerboot/kube.go | 42 +++-- cmd/containerboot/kube_test.go | 80 +++++++++ cmd/containerboot/main.go | 19 +- cmd/containerboot/main_test.go | 132 +++++++------- cmd/k8s-operator/egress-services-readiness.go | 2 +- .../egress-services-readiness_test.go | 2 +- cmd/k8s-operator/egress-services.go | 2 +- cmd/k8s-operator/ingress-for-pg.go | 6 +- cmd/k8s-operator/ingress-for-pg_test.go | 10 +- cmd/k8s-operator/proxygroup.go | 167 +++++++++++------- cmd/k8s-operator/proxygroup_specs.go | 6 +- cmd/k8s-operator/proxygroup_test.go | 18 +- cmd/k8s-operator/sts.go | 36 ++-- cmd/k8s-operator/svc-for-pg.go | 2 +- cmd/k8s-operator/svc-for-pg_test.go | 4 +- cmd/k8s-operator/tsrecorder.go | 9 +- k8s-operator/apis/v1alpha1/types_connector.go | 11 +- k8s-operator/conditions.go | 10 +- kube/kubeclient/fake_client.go | 17 +- 19 files changed, 373 insertions(+), 202 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 0a2dfa1bf..d4a974e6f 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -18,12 +18,15 @@ import ( "time" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/set" ) // kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use @@ -117,20 +120,39 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { return nil } -// storeCapVerUID stores the current capability version of tailscale and, if provided, UID of the Pod in the tailscale -// state Secret. -// These two fields are used by the Kubernetes Operator to observe the current capability version of tailscaled running in this container. -func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error { - capVerS := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion) - d := map[string][]byte{ - kubetypes.KeyCapVer: []byte(capVerS), +// resetContainerbootState resets state from previous runs of containerboot to +// ensure the operator doesn't use stale state when a Pod is first recreated. +func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error { + existingSecret, err := kc.GetSecret(ctx, kc.stateSecret) + if err != nil { + return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err) + } + + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion), + }, } if podUID != "" { - d[kubetypes.KeyPodUID] = []byte(podUID) + s.Data[kubetypes.KeyPodUID] = []byte(podUID) } - s := &kubeapi.Secret{ - Data: d, + + toClear := set.SetOf([]string{ + kubetypes.KeyDeviceID, + kubetypes.KeyDeviceFQDN, + kubetypes.KeyDeviceIPs, + kubetypes.KeyHTTPSEndpoint, + egressservices.KeyEgressServices, + ingressservices.IngressConfigKey, + }) + for key := range existingSecret.Data { + if toClear.Contains(key) { + // It's fine to leave the key in place as a debugging breadcrumb, + // it should get a new value soon. + s.Data[key] = nil + } } + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") } diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index 413971bc6..c33714ed1 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -8,13 +8,18 @@ package main import ( "context" "errors" + "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" ) func TestSetupKube(t *testing.T) { @@ -238,3 +243,78 @@ func TestWaitForConsistentState(t *testing.T) { t.Fatalf("expected nil, got %v", err) } } + +func TestResetContainerbootState(t *testing.T) { + capver := fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) + for name, tc := range map[string]struct { + podUID string + initial map[string][]byte + expected map[string][]byte + }{ + "empty_initial": { + podUID: "1234", + initial: map[string][]byte{}, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: []byte("1234"), + }, + }, + "empty_initial_no_pod_uid": { + initial: map[string][]byte{}, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + }, + }, + "only_relevant_keys_updated": { + podUID: "1234", + initial: map[string][]byte{ + kubetypes.KeyCapVer: []byte("1"), + kubetypes.KeyPodUID: []byte("5678"), + kubetypes.KeyDeviceID: []byte("device-id"), + kubetypes.KeyDeviceFQDN: []byte("device-fqdn"), + kubetypes.KeyDeviceIPs: []byte(`["192.0.2.1"]`), + kubetypes.KeyHTTPSEndpoint: []byte("https://example.com"), + egressservices.KeyEgressServices: []byte("egress-services"), + ingressservices.IngressConfigKey: []byte("ingress-config"), + "_current-profile": []byte("current-profile"), + "_machinekey": []byte("machine-key"), + "_profiles": []byte("profiles"), + "_serve_e0ce": []byte("serve-e0ce"), + "profile-e0ce": []byte("profile-e0ce"), + }, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: []byte("1234"), + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + // Tailscaled keys not included in patch. + }, + }, + } { + t.Run(name, func(t *testing.T) { + var actual map[string][]byte + kc := &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{ + Data: tc.initial, + }, nil + }, + StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error { + actual = secret.Data + return nil + }, + }} + if err := kc.resetContainerbootState(context.Background(), tc.podUID); err != nil { + t.Fatalf("resetContainerbootState() error = %v", err) + } + if diff := cmp.Diff(tc.expected, actual); diff != "" { + t.Errorf("resetContainerbootState() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 954330897..52b30b837 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -188,6 +188,14 @@ func run() error { if err := cfg.setupKube(bootCtx, kc); err != nil { return fmt.Errorf("error setting up for running on Kubernetes: %w", err) } + // Clear out any state from previous runs of containerboot. Check + // hasKubeStateStore because although we know we're in kube, that + // doesn't guarantee the state store is properly configured. + if hasKubeStateStore(cfg) { + if err := kc.resetContainerbootState(bootCtx, cfg.PodUID); err != nil { + return fmt.Errorf("error clearing previous state from Secret: %w", err) + } + } } client, daemonProcess, err := startTailscaled(bootCtx, cfg) @@ -367,11 +375,6 @@ authLoop: if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { return fmt.Errorf("failed to unset serve config: %w", err) } - if hasKubeStateStore(cfg) { - if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { - return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err) - } - } } if hasKubeStateStore(cfg) && isTwoStepConfigAuthOnce(cfg) { @@ -384,12 +387,6 @@ authLoop: } } - if hasKubeStateStore(cfg) { - if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil { - return fmt.Errorf("storing capability version and UID: %w", err) - } - } - w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) if err != nil { return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err) diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index c7293c77a..96feef682 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -460,6 +460,7 @@ func TestContainerBoot(t *testing.T) { Env: map[string]string{ "KUBERNETES_SERVICE_HOST": env.kube.Host, "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "POD_UID": "some-pod-uid", }, KubeSecret: map[string]string{ "authkey": "tskey-key", @@ -471,17 +472,20 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: "some-pod-uid", }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: "some-pod-uid", }, }, }, @@ -554,7 +558,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -565,7 +570,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -574,10 +580,10 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", }, WantKubeSecret: map[string]string{ - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, }, @@ -599,17 +605,18 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, { @@ -624,11 +631,11 @@ func TestContainerBoot(t *testing.T) { }, }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "new-name.test.ts.net", - "device_id": "newID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "new-name.test.ts.net", + "device_id": "newID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, }, @@ -912,18 +919,19 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "https_endpoint": "no-https", - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "https_endpoint": "no-https", + kubetypes.KeyCapVer: capver, }, }, }, @@ -947,7 +955,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, EndpointStatuses: map[string]int{ egressSvcTerminateURL(env.localAddrPort): 200, @@ -956,12 +965,12 @@ func TestContainerBoot(t *testing.T) { { Notify: runningNotify, WantKubeSecret: map[string]string{ - "egress-services": mustBase64(t, egressStatus), - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "egress-services": string(mustJSON(t, egressStatus)), + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, EndpointStatuses: map[string]int{ egressSvcTerminateURL(env.localAddrPort): 200, @@ -1002,7 +1011,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -1016,10 +1026,11 @@ func TestContainerBoot(t *testing.T) { // Missing "_current-profile" key. }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + kubetypes.KeyCapVer: capver, }, WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"", }, @@ -1029,11 +1040,12 @@ func TestContainerBoot(t *testing.T) { "_current-profile": "foo", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", - "_current-profile": "foo", + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + "_current-profile": "foo", + kubetypes.KeyCapVer: capver, }, WantLog: "HTTP server at [::]:9002 closed", WantExitCode: ptr.To(0), @@ -1061,7 +1073,7 @@ func TestContainerBoot(t *testing.T) { fmt.Sprintf("TS_TEST_SOCKET=%s", env.lapi.Path), fmt.Sprintf("TS_SOCKET=%s", env.runningSockPath), fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", env.d), - fmt.Sprint("TS_TEST_FAKE_NETFILTER=true"), + "TS_TEST_FAKE_NETFILTER=true", } for k, v := range tc.Env { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) @@ -1489,10 +1501,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { } switch r.Header.Get("Content-Type") { case "application/json-patch+json": - req := []struct { - Op string `json:"op"` - Path string `json:"path"` - }{} + req := []kubeclient.JSONPatch{} if err := json.Unmarshal(bs, &req); err != nil { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } @@ -1503,23 +1512,20 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } delete(k.secret, strings.TrimPrefix(op.Path, "/data/")) - case "replace": + case "add", "replace": path, ok := strings.CutPrefix(op.Path, "/data/") if !ok { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } - req := make([]kubeclient.JSONPatch, 0) - if err := json.Unmarshal(bs, &req); err != nil { - panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) + val, ok := op.Value.(string) + if !ok { + panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", op.Value)) } - - for _, patch := range req { - val, ok := patch.Value.(string) - if !ok { - panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", patch.Value)) - } - k.secret[path] = val + v, err := base64.StdEncoding.DecodeString(val) + if err != nil { + panic(fmt.Sprintf("json patch value %q is not base64 encoded: %v", val, err)) } + k.secret[path] = string(v) default: panic(fmt.Sprintf("unsupported json-patch op %q", op.Op)) } diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index 5e95a5279..ecf99b63c 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -102,7 +102,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re msg = err.Error() return res, err } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { l.Infof("ProxyGroup for Service is not ready, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index ce947329d..f80759aef 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -137,7 +137,7 @@ func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replic } func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) } func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 7103205ac..ca6562071 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -531,7 +531,7 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index ea31dbd63..09417fd0c 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -182,7 +182,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { logger.Infof("ProxyGroup is not (yet) ready") return false, nil } @@ -666,7 +666,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki } // Validate TLS configuration - if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { + if len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS)) } @@ -683,7 +683,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki } // Validate ProxyGroup readiness - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) } diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 05f482792..2308514f3 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -305,7 +305,7 @@ func TestValidateIngress(t *testing.T) { Status: tsapi.ProxyGroupStatus{ Conditions: []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, @@ -399,7 +399,7 @@ func TestValidateIngress(t *testing.T) { Status: tsapi.ProxyGroupStatus{ Conditions: []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionFalse, ObservedGeneration: 1, }, @@ -755,7 +755,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec Labels: pgSecretLabels(pgName, "config"), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), }, }) } @@ -794,13 +794,13 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { Labels: pgSecretLabels(pgName, "config"), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte("{}"), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte("{}"), }, } mustCreate(t, fc, pgCfgSecret) pg.Status.Conditions = []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 328262031..bedf06ba0 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -52,6 +52,17 @@ const ( // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" staticEndpointsMaxAddrs = 2 + + // The minimum tailcfg.CapabilityVersion that deployed clients are expected + // to support to be compatible with the current ProxyGroup controller. + // If the controller needs to depend on newer client behaviour, it should + // maintain backwards compatible logic for older capability versions for 3 + // stable releases, as per documentation on supported version drift: + // https://tailscale.com/kb/1236/kubernetes-operator#supported-versions + // + // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was + // first introduced. + pgMinCapabilityVersion = 106 ) var ( @@ -204,14 +215,27 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } desiredReplicas := int(pgReplicas(pg)) + + // Set ProxyGroupAvailable condition. + status := metav1.ConditionFalse + reason := reasonProxyGroupCreating + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) + if len(pg.Status.Devices) > 0 { + status = metav1.ConditionTrue + if len(pg.Status.Devices) == desiredReplicas { + reason = reasonProxyGroupReady + } + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, pg.Generation, r.clock, logger) + + // Set ProxyGroupReady condition. if len(pg.Status.Devices) < desiredReplicas { - message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) logger.Debug(message) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) } if len(pg.Status.Devices) > desiredReplicas { - message := fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) + message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) logger.Debug(message) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) } @@ -524,17 +548,13 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { return err } - if err := r.Delete(ctx, m.stateSecret); err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting state Secret %s: %w", m.stateSecret.Name, err) - } + if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting state Secret %q: %w", m.stateSecret.Name, err) } configSecret := m.stateSecret.DeepCopy() configSecret.Name += "-config" - if err := r.Delete(ctx, configSecret); err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) - } + if err := r.Delete(ctx, configSecret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting config Secret %q: %w", configSecret.Name, err) } // NOTE(ChaosInTheCRD): we shouldn't need to get the service first, checking for a not found error should be enough svc := &corev1.Service{ @@ -635,17 +655,38 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - var authKey string + var authKey *string if existingCfgSecret == nil { logger.Debugf("Creating authkey for new ProxyGroup proxy") tags := pg.Spec.Tags.Stringify() if len(tags) == 0 { tags = r.defaultTags } - authKey, err = newAuthKey(ctx, r.tsClient, tags) + key, err := newAuthKey(ctx, r.tsClient, tags) if err != nil { return nil, err } + authKey = &key + } + + if authKey == nil { + // Get state Secret to check if it's already authed. + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, i), + Namespace: r.tsNamespace, + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + + if shouldRetainAuthKey(stateSecret) && existingCfgSecret != nil { + authKey, err = authKeyFromSecret(existingCfgSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from existing config Secret: %w", err) + } + } } replicaName := pgNodePortServiceName(pg.Name, i) @@ -661,7 +702,14 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret, endpoints[replicaName]) + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it if already set. + existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) + if err != nil { + return nil, err + } + + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -811,20 +859,22 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret, staticEndpoints []netip.AddrPort) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ - Version: "alpha0", - AcceptDNS: "false", - AcceptRoutes: "false", // AcceptRoutes defaults to true - Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", // AcceptRoutes defaults to true + Locked: "false", + Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + AdvertiseServices: oldAdvertiseServices, + AuthKey: authKey, } if pg.Spec.HostnamePrefix != "" { conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) } - if shouldAcceptRoutes(class) { + if shouldAcceptRoutes(pc) { conf.AcceptRoutes = "true" } @@ -832,51 +882,26 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 conf.StaticEndpoints = staticEndpoints } - deviceAuthed := false - for _, d := range pg.Status.Devices { - if d.Hostname == *conf.Hostname { - deviceAuthed = true - break - } - } - - if authKey != "" { - conf.AuthKey = &authKey - } else if !deviceAuthed { - key, err := authKeyFromSecret(oldSecret) - if err != nil { - return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) - } - conf.AuthKey = key - } - capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) - - // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we - // don't overwrite it here. - if err := copyAdvertiseServicesConfig(conf, oldSecret, 106); err != nil { - return nil, err - } - capVerConfigs[106] = *conf - return capVerConfigs, nil + return map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha{ + pgMinCapabilityVersion: *conf, + }, nil } -func copyAdvertiseServicesConfig(conf *ipn.ConfigVAlpha, oldSecret *corev1.Secret, capVer tailcfg.CapabilityVersion) error { - if oldSecret == nil { - return nil +func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) { + if cfgSecret == nil { + return nil, nil } - oldConfB := oldSecret.Data[tsoperator.TailscaledConfigFileName(capVer)] - if len(oldConfB) == 0 { - return nil + conf, err := latestConfigFromSecret(cfgSecret) + if err != nil { + return nil, err } - var oldConf ipn.ConfigVAlpha - if err := json.Unmarshal(oldConfB, &oldConf); err != nil { - return fmt.Errorf("error unmarshalling existing config: %w", err) + if conf == nil { + return nil, nil } - conf.AdvertiseServices = oldConf.AdvertiseServices - return nil + return conf.AdvertiseServices, nil } func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { @@ -914,7 +939,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr dnsName: prefs.Config.UserProfile.LoginName, } pod := &corev1.Pod{} - if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) { + if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { return nil, err } else if err == nil { nm.podUID = string(pod.UID) @@ -932,17 +957,23 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint } for _, m := range metadata { - device, ok, err := getDeviceInfo(ctx, r.tsClient, m.stateSecret) - if err != nil { - return nil, err - } - if !ok { + if !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { + // Current Pod has not yet written its UID to the state Secret, data may + // be stale. continue } - dev := tsapi.TailnetDevice{ - Hostname: device.Hostname, - TailnetIPs: device.TailnetIPs, + device := tsapi.TailnetDevice{} + if ipsB := m.stateSecret.Data[kubetypes.KeyDeviceIPs]; len(ipsB) > 0 { + ips := []string{} + if err := json.Unmarshal(ipsB, &ips); err != nil { + return nil, fmt.Errorf("failed to extract device IPs from state Secret %q: %w", m.stateSecret.Name, err) + } + device.TailnetIPs = ips + } + + if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { + device.Hostname = hostname } if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 { @@ -950,10 +981,10 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint for _, e := range ep { eps = append(eps, e.String()) } - dev.StaticEndpoints = eps + device.StaticEndpoints = eps } - devices = append(devices, dev) + devices = append(devices, device) } return devices, nil diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 20e797f0c..50d9c2d5f 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -351,7 +351,7 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S for i := range pgReplicas(pg) { secrets = append(secrets, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d", pg.Name, i), + Name: pgStateSecretName(pg.Name, i), Namespace: namespace, Labels: pgSecretLabels(pg.Name, "state"), OwnerReferences: pgOwnerReference(pg), @@ -422,6 +422,10 @@ func pgConfigSecretName(pgName string, i int32) string { return fmt.Sprintf("%s-%d-config", pgName, i) } +func pgStateSecretName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d", pgName, i) +} + func pgEgressCMName(pg string) string { return fmt.Sprintf("%s-egress-config", pg) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 8ffce2c0c..87b04a434 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -877,6 +877,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) if expected := 1; reconciler.egressProxyGroups.Len() != expected { @@ -913,6 +914,7 @@ func TestProxyGroup(t *testing.T) { }, } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -924,12 +926,14 @@ func TestProxyGroup(t *testing.T) { }) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", TailnetIPs: []string{"1.2.3.4", "::1"}, @@ -947,6 +951,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -1224,7 +1229,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Namespace: tsNamespace, }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): existingConfigBytes, + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): existingConfigBytes, }, }) @@ -1261,7 +1266,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { ResourceVersion: "2", }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): expectedConfigBytes, + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): expectedConfigBytes, }, }) } @@ -1421,8 +1426,13 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { s.Data = map[string][]byte{ - currentProfileKey: []byte(key), - key: bytes, + currentProfileKey: []byte(key), + key: bytes, + kubetypes.KeyDeviceIPs: []byte(`["1.2.3.4", "::1"]`), + kubetypes.KeyDeviceFQDN: []byte(fmt.Sprintf("hostname-nodeid-%d.tails-scales.ts.net", i)), + // TODO(tomhjp): We have two different mechanisms to retrieve device IDs. + // Consolidate on this one. + kubetypes.KeyDeviceID: []byte(fmt.Sprintf("nodeid-%d", i)), } }) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 4c7c3ac67..3e3d2d590 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -897,14 +897,6 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { } } -func readAuthKey(secret *corev1.Secret, key string) (*string, error) { - origConf := &ipn.ConfigVAlpha{} - if err := json.Unmarshal([]byte(secret.Data[key]), origConf); err != nil { - return nil, fmt.Errorf("error unmarshaling previous tailscaled config in %q: %w", key, err) - } - return origConf.AuthKey, nil -} - // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { @@ -951,7 +943,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co return capVerConfigs, nil } -func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { +// latestConfigFromSecret returns the ipn.ConfigVAlpha with the highest capver +// as found in the Secret's key names, e.g. "cap-107.hujson" has capver 107. +// If no config is found, it returns nil. +func latestConfigFromSecret(s *corev1.Secret) (*ipn.ConfigVAlpha, error) { latest := tailcfg.CapabilityVersion(-1) latestStr := "" for k, data := range s.Data { @@ -968,12 +963,31 @@ func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { latest = v } } + + var conf *ipn.ConfigVAlpha + if latestStr != "" { + conf = &ipn.ConfigVAlpha{} + if err := json.Unmarshal([]byte(s.Data[latestStr]), conf); err != nil { + return nil, fmt.Errorf("error unmarshaling tailscaled config from Secret %q in field %q: %w", s.Name, latestStr, err) + } + } + + return conf, nil +} + +func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { + conf, err := latestConfigFromSecret(s) + if err != nil { + return nil, err + } + // Allow for configs that don't contain an auth key. Perhaps // users have some mechanisms to delete them. Auth key is // normally not needed after the initial login. - if latestStr != "" { - return readAuthKey(s, latestStr) + if conf != nil { + key = conf.AuthKey } + return key, nil } diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index c9b5b8ae6..9846513c7 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -164,7 +164,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { logger.Infof("ProxyGroup is not (yet) ready") return false, nil } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 5772cd5d6..e08bfd80d 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -142,7 +142,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien Labels: pgSecretLabels("test-pg", "config"), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte(`{"Version":""}`), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(`{"Version":""}`), }, } @@ -179,7 +179,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien // Set ProxyGroup status to ready pg.Status.Conditions = []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 081543cd3..cbabc1d89 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -446,18 +446,15 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) return tsapi.RecorderTailnetDevice{}, false, err } - return getDeviceInfo(ctx, r.tsClient, secret) -} - -func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { prefs, ok, err := getDevicePrefs(secret) if !ok || err != nil { return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we - // need the API. Should we instead update the profile to include addresses? - device, err := tsClient.Device(ctx, string(prefs.Config.NodeID), nil) + // need the API. Should maybe update tsrecorder to write IPs to the state + // Secret like containerboot does. + device, err := r.tsClient.Device(ctx, string(prefs.Config.NodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index b8b7a935e..88fd07346 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -205,11 +205,12 @@ type ConnectorStatus struct { type ConditionType string const ( - ConnectorReady ConditionType = `ConnectorReady` - ProxyClassReady ConditionType = `ProxyClassReady` - ProxyGroupReady ConditionType = `ProxyGroupReady` - ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service - RecorderReady ConditionType = `RecorderReady` + ConnectorReady ConditionType = `ConnectorReady` + ProxyClassReady ConditionType = `ProxyClassReady` + ProxyGroupReady ConditionType = `ProxyGroupReady` // All proxy Pods running. + ProxyGroupAvailable ConditionType = `ProxyGroupAvailable` // At least one proxy Pod running. + ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service + RecorderReady ConditionType = `RecorderReady` // EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed // on a ProxyGroup. // Set to true if the user provided configuration is valid. diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index abe8f7f9c..1d30f352c 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -137,8 +137,16 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { } func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool { + return proxyGroupCondition(pg, tsapi.ProxyGroupReady) +} + +func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { + return proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) +} + +func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) bool { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { - return cond.Type == string(tsapi.ProxyGroupReady) + return cond.Type == string(condType) }) if idx == -1 { return false diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index c21dc2bf8..15ebb5f44 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -13,12 +13,13 @@ import ( var _ Client = &FakeClient{} type FakeClient struct { - GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) - CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) - CreateSecretImpl func(context.Context, *kubeapi.Secret) error - UpdateSecretImpl func(context.Context, *kubeapi.Secret) error - JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error - ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) + GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) + CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) + CreateSecretImpl func(context.Context, *kubeapi.Secret) error + UpdateSecretImpl func(context.Context, *kubeapi.Secret) error + JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error + ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) + StrategicMergePatchSecretImpl func(context.Context, string, *kubeapi.Secret, string) error } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -30,8 +31,8 @@ func (fc *FakeClient) GetSecret(ctx context.Context, name string) (*kubeapi.Secr func (fc *FakeClient) SetURL(_ string) {} func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) { } -func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error { - return nil +func (fc *FakeClient) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { + return fc.StrategicMergePatchSecretImpl(ctx, name, s, fieldManager) } func (fc *FakeClient) Event(context.Context, string, string, string) error { return nil From 0a64e86a0df89db063211a826ba4f62eb5ec959f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 27 Jun 2025 13:56:55 -0700 Subject: [PATCH 1014/1708] wgengine/magicsock: move UDP relay path discovery to heartbeat() (#16407) This was previously hooked around direct UDP path discovery / CallMeMaybe transmission, and related conditions. Now it is subject to relay-specific considerations. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 80 +++++++++++++++++++++++------ wgengine/magicsock/endpoint_test.go | 42 +++++++++++++++ wgengine/magicsock/magicsock.go | 14 +++-- 3 files changed, 118 insertions(+), 18 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index fb5a28c28..29ae025f4 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -75,11 +75,12 @@ type endpoint struct { // mu protects all following fields. mu sync.Mutex // Lock ordering: Conn.mu, then endpoint.mu - heartBeatTimer *time.Timer // nil when idle - lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) - lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock - lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints - derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) + heartBeatTimer *time.Timer // nil when idle + lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) + lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock + lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints + lastUDPRelayPathDiscovery mono.Time // last time we ran UDP relay path discovery + derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) bestAddr addrQuality // best non-DERP path; zero if none; mutate via setBestAddrLocked() bestAddrAt mono.Time // time best address re-confirmed @@ -851,6 +852,10 @@ func (de *endpoint) heartbeat() { de.sendDiscoPingsLocked(now, true) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } + de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat) } @@ -861,6 +866,45 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { de.heartbeatDisabled = v } +// discoverUDPRelayPathsLocked starts UDP relay path discovery. +func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { + // TODO(jwhited): return early if there are no relay servers set, otherwise + // we spin up and down relayManager.runLoop unnecessarily. + de.lastUDPRelayPathDiscovery = now + de.c.relayManager.allocateAndHandshakeAllServers(de) +} + +// wantUDPRelayPathDiscoveryLocked reports whether we should kick off UDP relay +// path discovery. +func (de *endpoint) wantUDPRelayPathDiscoveryLocked(now mono.Time) bool { + if runtime.GOOS == "js" { + return false + } + if !de.relayCapable { + return false + } + if de.bestAddr.isDirect() && now.Before(de.trustBestAddrUntil) { + return false + } + if !de.lastUDPRelayPathDiscovery.IsZero() && now.Sub(de.lastUDPRelayPathDiscovery) < discoverUDPRelayPathsInterval { + return false + } + // TODO(jwhited): consider applying 'goodEnoughLatency' suppression here, + // but not until we have a strategy for triggering CallMeMaybeVia regularly + // and/or enabling inbound packets to act as a UDP relay path discovery + // trigger, otherwise clients without relay servers may fall off a UDP + // relay path and never come back. They are dependent on the remote side + // regularly TX'ing CallMeMaybeVia, which currently only happens as part + // of full UDP relay path discovery. + if now.After(de.trustBestAddrUntil) { + return true + } + if !de.lastUDPRelayPathDiscovery.IsZero() && now.Sub(de.lastUDPRelayPathDiscovery) >= upgradeUDPRelayInterval { + return true + } + return false +} + // wantFullPingLocked reports whether we should ping to all our peers looking for // a better path. // @@ -869,7 +913,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } - if !de.bestAddr.ap.IsValid() || de.lastFullPing.IsZero() { + if !de.bestAddr.isDirect() || de.lastFullPing.IsZero() { return true } if now.After(de.trustBestAddrUntil) { @@ -878,7 +922,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if de.bestAddr.latency <= goodEnoughLatency { return false } - if now.Sub(de.lastFullPing) >= upgradeInterval { + if now.Sub(de.lastFullPing) >= upgradeUDPDirectInterval { return true } return false @@ -964,9 +1008,16 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { if startWGPing { de.sendWireGuardOnlyPingsLocked(now) } - } else if !udpAddr.ap.IsValid() || now.After(de.trustBestAddrUntil) { + } else if !udpAddr.isDirect() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) } + // TODO(jwhited): consider triggering UDP relay path discovery here under + // certain conditions. We currently only trigger it in heartbeat(), which + // is both good and bad. It's good because the first heartbeat() tick is 3s + // after the first packet, which gives us time to discover a UDP direct + // path and potentially avoid what would be wasted UDP relay path discovery + // work. It's bad because we might not discover a UDP direct path, and we + // incur a 3s delay before we try to discover a UDP relay path. de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now de.mu.Unlock() @@ -1275,13 +1326,6 @@ func (de *endpoint) sendDiscoPingsLocked(now mono.Time, sendCallMeMaybe bool) { // sent so our firewall ports are probably open and now // would be a good time for them to connect. go de.c.enqueueCallMeMaybe(derpAddr, de) - - // Schedule allocation of relay endpoints. We make no considerations for - // current relay endpoints or best UDP path state for now, keep it - // simple. - if de.relayCapable { - go de.c.relayManager.allocateAndHandshakeAllServers(de) - } } } @@ -1703,6 +1747,12 @@ type epAddr struct { vni virtualNetworkID // vni.isSet() indicates if this [epAddr] involves a Geneve header } +// isDirect returns true if e.ap is valid and not tailcfg.DerpMagicIPAddr, +// and a VNI is not set. +func (e epAddr) isDirect() bool { + return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.isSet() +} + func (e epAddr) String() string { if !e.vni.isSet() { return e.ap.String() diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index b1e8cab91..3a1e55b8b 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "tailscale.com/tailcfg" "tailscale.com/types/key" ) @@ -323,3 +324,44 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { }) } } + +func Test_epAddr_isDirectUDP(t *testing.T) { + vni := virtualNetworkID{} + vni.set(7) + tests := []struct { + name string + ap netip.AddrPort + vni virtualNetworkID + want bool + }{ + { + name: "true", + ap: netip.MustParseAddrPort("192.0.2.1:7"), + vni: virtualNetworkID{}, + want: true, + }, + { + name: "false derp magic addr", + ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, 0), + vni: virtualNetworkID{}, + want: false, + }, + { + name: "false vni set", + ap: netip.MustParseAddrPort("192.0.2.1:7"), + vni: vni, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := epAddr{ + ap: tt.ap, + vni: tt.vni, + } + if got := e.isDirect(); got != tt.want { + t.Errorf("isDirect() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e76d0054f..553543b0f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3494,9 +3494,17 @@ const ( // keep NAT mappings alive. sessionActiveTimeout = 45 * time.Second - // upgradeInterval is how often we try to upgrade to a better path - // even if we have some non-DERP route that works. - upgradeInterval = 1 * time.Minute + // upgradeUDPDirectInterval is how often we try to upgrade to a better, + // direct UDP path even if we have some direct UDP path that works. + upgradeUDPDirectInterval = 1 * time.Minute + + // upgradeUDPRelayInterval is how often we try to discover UDP relay paths + // even if we have a UDP relay path that works. + upgradeUDPRelayInterval = 1 * time.Minute + + // discoverUDPRelayPathsInterval is the minimum time between UDP relay path + // discovery. + discoverUDPRelayPathsInterval = 30 * time.Second // heartbeatInterval is how often pings to the best UDP address // are sent. From 76b9afb54d7ddf662a6d5e47ab42021a2e6dba36 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 27 Jun 2025 15:14:18 -0700 Subject: [PATCH 1015/1708] ipn/store: make StateStore.All optional (#16409) This method is only needed to migrate between store.FileStore and tpm.tpmStore. We can make a runtime type assertion instead of implementing an unused method for every platform. Updates #15830 Signed-off-by: Andrew Lytvynov --- cmd/tsconnect/src/lib/js-state-store.ts | 3 --- cmd/tsconnect/src/types/wasm_js.d.ts | 1 - cmd/tsconnect/wasm/wasm_js.go | 24 -------------------- feature/tpm/tpm.go | 4 ++++ feature/tpm/tpm_test.go | 29 +++++++++++++++---------- ipn/store.go | 6 ----- ipn/store/awsstore/store_aws.go | 5 ----- ipn/store/kubestore/store_kube.go | 5 ----- ipn/store/mem/store_mem.go | 14 ------------ ipn/store/stores.go | 24 +++++++++++++++++++- 10 files changed, 45 insertions(+), 70 deletions(-) diff --git a/cmd/tsconnect/src/lib/js-state-store.ts b/cmd/tsconnect/src/lib/js-state-store.ts index 7f2fc8087..e57dfd98e 100644 --- a/cmd/tsconnect/src/lib/js-state-store.ts +++ b/cmd/tsconnect/src/lib/js-state-store.ts @@ -10,7 +10,4 @@ export const sessionStateStorage: IPNStateStorage = { getState(id) { return window.sessionStorage[`ipn-state-${id}`] || "" }, - all() { - return JSON.stringify(window.sessionStorage) - }, } diff --git a/cmd/tsconnect/src/types/wasm_js.d.ts b/cmd/tsconnect/src/types/wasm_js.d.ts index f47a972b0..492197ccb 100644 --- a/cmd/tsconnect/src/types/wasm_js.d.ts +++ b/cmd/tsconnect/src/types/wasm_js.d.ts @@ -44,7 +44,6 @@ declare global { interface IPNStateStorage { setState(id: string, value: string): void getState(id: string): string - all(): string } type IPNConfig = { diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index c5ff56120..779a87e49 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -15,7 +15,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "iter" "log" "math/rand/v2" "net" @@ -580,29 +579,6 @@ func (s *jsStateStore) WriteState(id ipn.StateKey, bs []byte) error { return nil } -func (s *jsStateStore) All() iter.Seq2[ipn.StateKey, []byte] { - return func(yield func(ipn.StateKey, []byte) bool) { - jsValue := s.jsStateStorage.Call("all") - if jsValue.String() == "" { - return - } - buf, err := hex.DecodeString(jsValue.String()) - if err != nil { - return - } - var state map[string][]byte - if err := json.Unmarshal(buf, &state); err != nil { - return - } - - for k, v := range state { - if !yield(ipn.StateKey(k), v) { - break - } - } - } -} - func mapSlice[T any, M any](a []T, f func(T) M) []M { n := make([]M, len(a)) for i, e := range a { diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 64656d412..5ec084eff 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -217,6 +217,10 @@ func (s *tpmStore) All() iter.Seq2[ipn.StateKey, []byte] { } } +// Ensure tpmStore implements store.ExportableStore for migration to/from +// store.FileStore. +var _ store.ExportableStore = (*tpmStore)(nil) + // The nested levels of encoding and encryption are confusing, so here's what's // going on in plain English. // diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index b08681354..f4497f8c7 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "iter" "maps" "os" "path/filepath" @@ -20,8 +21,8 @@ import ( "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" - "tailscale.com/ipn/store/mem" "tailscale.com/types/logger" + "tailscale.com/util/mak" ) func TestPropToString(t *testing.T) { @@ -251,7 +252,9 @@ func TestMigrateStateToTPM(t *testing.T) { if err != nil { t.Fatalf("migration failed: %v", err) } - gotContent := maps.Collect(s.All()) + gotContent := maps.Collect(s.(interface { + All() iter.Seq2[ipn.StateKey, []byte] + }).All()) if diff := cmp.Diff(content, gotContent); diff != "" { t.Errorf("unexpected content after migration, diff:\n%s", diff) } @@ -285,7 +288,7 @@ func tpmSupported() bool { type mockTPMSealProvider struct { path string - mem.Store + data map[ipn.StateKey][]byte } func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { @@ -293,7 +296,7 @@ func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { if !ok { return nil, fmt.Errorf("%q missing tpmseal: prefix", path) } - s := &mockTPMSealProvider{path: path, Store: mem.Store{}} + s := &mockTPMSealProvider{path: path} buf, err := os.ReadFile(path) if errors.Is(err, os.ErrNotExist) { return s, s.flushState() @@ -312,24 +315,28 @@ func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { if data.Key == "" || data.Nonce == "" { return nil, fmt.Errorf("%q missing key or nonce", path) } - for k, v := range data.Data { - s.Store.WriteState(k, v) - } + s.data = data.Data return s, nil } +func (p *mockTPMSealProvider) ReadState(k ipn.StateKey) ([]byte, error) { + return p.data[k], nil +} + func (p *mockTPMSealProvider) WriteState(k ipn.StateKey, v []byte) error { - if err := p.Store.WriteState(k, v); err != nil { - return err - } + mak.Set(&p.data, k, v) return p.flushState() } +func (p *mockTPMSealProvider) All() iter.Seq2[ipn.StateKey, []byte] { + return maps.All(p.data) +} + func (p *mockTPMSealProvider) flushState() error { data := map[string]any{ "key": "foo", "nonce": "bar", - "data": maps.Collect(p.Store.All()), + "data": p.data, } buf, err := json.Marshal(data) if err != nil { diff --git a/ipn/store.go b/ipn/store.go index e176e4842..550aa8cba 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "iter" "net" "strconv" ) @@ -84,11 +83,6 @@ type StateStore interface { // instead, which only writes if the value is different from what's // already in the store. WriteState(id StateKey, bs []byte) error - // All returns an iterator over all StateStore keys. Using ReadState or - // WriteState is not safe while iterating and can lead to a deadlock. - // The order of keys in the iterator is not specified and may change - // between runs. - All() iter.Seq2[StateKey, []byte] } // WriteState is a wrapper around store.WriteState that only writes if diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 523d1657b..40bbbf037 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -10,7 +10,6 @@ import ( "context" "errors" "fmt" - "iter" "net/url" "regexp" "strings" @@ -254,7 +253,3 @@ func (s *awsStore) persistState() error { _, err = s.ssmClient.PutParameter(context.TODO(), in) return err } - -func (s *awsStore) All() iter.Seq2[ipn.StateKey, []byte] { - return s.memory.All() -} diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index f6bedbf0b..14025bbb4 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -7,7 +7,6 @@ package kubestore import ( "context" "fmt" - "iter" "log" "net" "os" @@ -429,7 +428,3 @@ func sanitizeKey[T ~string](k T) string { return '_' }, string(k)) } - -func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { - return s.memory.All() -} diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index 6c22aefd5..6f474ce99 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -7,7 +7,6 @@ package mem import ( "bytes" "encoding/json" - "iter" "sync" xmaps "golang.org/x/exp/maps" @@ -86,16 +85,3 @@ func (s *Store) ExportToJSON() ([]byte, error) { } return json.MarshalIndent(s.cache, "", " ") } - -func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { - return func(yield func(ipn.StateKey, []byte) bool) { - s.mu.Lock() - defer s.mu.Unlock() - - for k, v := range s.cache { - if !yield(k, v) { - break - } - } - } -} diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 43c796399..bf175da41 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -235,6 +235,23 @@ func (s *FileStore) All() iter.Seq2[ipn.StateKey, []byte] { } } +// Ensure FileStore implements ExportableStore for migration to/from +// tpm.tpmStore. +var _ ExportableStore = (*FileStore)(nil) + +// ExportableStore is an ipn.StateStore that can export all of its contents. +// This interface is optional to implement, and used for migrating the state +// between different store implementations. +type ExportableStore interface { + ipn.StateStore + + // All returns an iterator over all store keys. Using ReadState or + // WriteState is not safe while iterating and can lead to a deadlock. The + // order of keys in the iterator is not specified and may change between + // runs. + All() iter.Seq2[ipn.StateKey, []byte] +} + func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { path, toTPM := strings.CutPrefix(path, TPMPrefix) @@ -297,10 +314,15 @@ func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { } defer os.Remove(tmpPath) + fromExp, ok := from.(ExportableStore) + if !ok { + return fmt.Errorf("%T does not implement the exportableStore interface", from) + } + // Copy all the items. This is pretty inefficient, because both stores // write the file to disk for each WriteState, but that's ok for a one-time // migration. - for k, v := range from.All() { + for k, v := range fromExp.All() { if err := to.WriteState(k, v); err != nil { return err } From 544aee9d08e214c3fc2699916c2ed410b2fb79eb Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 27 Jun 2025 17:30:43 -0700 Subject: [PATCH 1016/1708] tsidp: update README to refer to community projects (#16411) We dropped the idea of the Experimental release stage in tailscale/tailscale-www#7697, in favour of Community Projects. Updates #cleanup Signed-off-by: Simon Law --- cmd/tsidp/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index fce844e0b..780d9ab95 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -1,6 +1,6 @@ # `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider -[![status: experimental](https://img.shields.io/badge/status-experimental-blue)](https://tailscale.com/kb/1167/release-stages/#experimental) +[![status: community project](https://img.shields.io/badge/status-community_project-blue)](https://tailscale.com/kb/1531/community-projects) `tsidp` is an OIDC Identity Provider (IdP) server that integrates with your Tailscale network. It allows you to use Tailscale identities for authentication in applications that support OpenID Connect, enabling single sign-on (SSO) capabilities within your tailnet. @@ -89,7 +89,7 @@ The `tsidp` server supports several command-line flags: ## Support -This is an [experimental](https://tailscale.com/kb/1167/release-stages#experimental), work in progress feature. For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale) +This is an experimental, work in progress, [community project](https://tailscale.com/kb/1531/community-projects). For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale). ## License From 3dc694b4f1983dfcb1731cdf3f29aa6e4f058505 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 27 Jun 2025 19:11:59 -0700 Subject: [PATCH 1017/1708] wgengine/magicsock: clear UDP relay bestAddr's on disco ping timeout (#16410) Otherwise we can end up mirroring packets to them forever. We may eventually want to relax this to direct paths as well, but start with UDP relay paths, which have a higher chance of becoming untrusted and never working again, to be conservative. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 29ae025f4..9edc6403e 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1129,7 +1129,12 @@ func (de *endpoint) discoPingTimeout(txid stun.TxID) { if !ok { return } - if debugDisco() || !de.bestAddr.ap.IsValid() || mono.Now().After(de.trustBestAddrUntil) { + bestUntrusted := mono.Now().After(de.trustBestAddrUntil) + if sp.to == de.bestAddr.epAddr && sp.to.vni.isSet() && bestUntrusted { + // TODO(jwhited): consider applying this to direct UDP paths as well + de.clearBestAddrLocked() + } + if debugDisco() || !de.bestAddr.ap.IsValid() || bestUntrusted { de.c.dlogf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort()) } de.removeSentDiscoPingLocked(txid, sp, discoPingTimedOut) From ee8c3560ef74613443859e1f78a0c9b9071bac76 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 23 Jun 2025 09:02:03 -0700 Subject: [PATCH 1018/1708] tailcfg: format integer IDs as decimal consistently The server-side code already does e.g. "nodeid:%d" instead of "%x" and as a result we have to second guess a lot of identifiers that could be hex or decimal. This stops the bleeding and means in a year and change we'll stop seeing the hex forms. Updates tailscale/corp#29827 Change-Id: Ie5785a07fc32631f7c949348d3453538ab170e6d Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 23f3cc49b..fb7d54c38 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2265,10 +2265,10 @@ type Debug struct { Exit *int `json:",omitempty"` } -func (id ID) String() string { return fmt.Sprintf("id:%x", int64(id)) } -func (id UserID) String() string { return fmt.Sprintf("userid:%x", int64(id)) } -func (id LoginID) String() string { return fmt.Sprintf("loginid:%x", int64(id)) } -func (id NodeID) String() string { return fmt.Sprintf("nodeid:%x", int64(id)) } +func (id ID) String() string { return fmt.Sprintf("id:%d", int64(id)) } +func (id UserID) String() string { return fmt.Sprintf("userid:%d", int64(id)) } +func (id LoginID) String() string { return fmt.Sprintf("loginid:%d", int64(id)) } +func (id NodeID) String() string { return fmt.Sprintf("nodeid:%d", int64(id)) } // Equal reports whether n and n2 are equal. func (n *Node) Equal(n2 *Node) bool { From f85e4bcb3287a0adef9567ff79ba58d9cec4e1d2 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 27 Jun 2025 13:11:59 -0400 Subject: [PATCH 1019/1708] client/systray: replace counter metric with gauge Replace the existing systray_start counter metrics with a systray_running gauge metrics. This also adds an IncrementGauge method to local client to parallel IncrementCounter. The LocalAPI handler supports both, we've just never added a client method for gauges. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/local/local.go | 17 +++++++++++++++++ client/systray/systray.go | 3 ++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/client/local/local.go b/client/local/local.go index 12bf2f7d6..74c4f0b6f 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -398,6 +398,23 @@ func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) return err } +// IncrementGauge increments the value of a Tailscale daemon's gauge +// metric by the given delta. If the metric has yet to exist, a new gauge +// metric is created and initialized to delta. The delta value can be negative. +func (lc *Client) IncrementGauge(ctx context.Context, name string, delta int) error { + type metricUpdate struct { + Name string `json:"name"` + Type string `json:"type"` + Value int `json:"value"` // amount to increment by + } + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]metricUpdate{{ + Name: name, + Type: "gauge", + Value: delta, + }})) + return err +} + // TailDaemonLogs returns a stream the Tailscale daemon's logs as they arrive. // Close the context to stop the stream. func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { diff --git a/client/systray/systray.go b/client/systray/systray.go index 195a157fb..a87783c06 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -61,7 +61,8 @@ func (menu *Menu) Run() { case <-menu.bgCtx.Done(): } }() - go menu.lc.IncrementCounter(menu.bgCtx, "systray_start", 1) + go menu.lc.IncrementGauge(menu.bgCtx, "systray_running", 1) + defer menu.lc.IncrementGauge(menu.bgCtx, "systray_running", -1) systray.Run(menu.onReady, menu.onExit) } From 2fc247573bfaa7e1dac695e98b5a31c3a2f5217e Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 30 Jun 2025 12:08:35 +0100 Subject: [PATCH 1020/1708] cmd/k8s-operator: ProxyClass annotation for Services and Ingresses (#16363) * cmd/k8s-operator: ProxyClass annotation for Services and Ingresses Previously, the ProxyClass could only be configured for Services and Ingresses via a Label. This adds the ability to set it via an Annotation, but prioritizes the Label if both a Label and Annotation are set. Updates #14323 Signed-off-by: chaosinthecrd * Update cmd/k8s-operator/operator.go Co-authored-by: Tom Proctor Signed-off-by: Tom Meadows * Update cmd/k8s-operator/operator.go Signed-off-by: Tom Meadows * cmd/k8s-operator: ProxyClass annotation for Services and Ingresses Previously, the ProxyClass could only be configured for Services and Ingresses via a Label. This adds the ability to set it via an Annotation, but prioritizes the Label if both a Label and Annotation are set. Updates #14323 Signed-off-by: chaosinthecrd --------- Signed-off-by: chaosinthecrd Signed-off-by: Tom Meadows Co-authored-by: Tom Proctor --- cmd/k8s-operator/ingress.go | 1 + cmd/k8s-operator/ingress_test.go | 124 ++++++++++++++++-- cmd/k8s-operator/operator.go | 69 +++++++++- cmd/k8s-operator/operator_test.go | 202 ++++++++++++++++++++++++++++-- cmd/k8s-operator/sts.go | 18 ++- cmd/k8s-operator/svc.go | 12 +- 6 files changed, 398 insertions(+), 28 deletions(-) diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 6c50e10b2..5058fd6dd 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -34,6 +34,7 @@ const ( tailscaleIngressClassName = "tailscale" // ingressClass.metadata.name for tailscale IngressClass resource tailscaleIngressControllerName = "tailscale.com/ts-ingress" // ingressClass.spec.controllerName for tailscale IngressClass resource ingressClassDefaultAnnotation = "ingressclass.kubernetes.io/is-default-class" // we do not support this https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class + indexIngressProxyClass = ".metadata.annotations.ingress-proxy-class" ) type IngressReconciler struct { diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index aacf27d8e..e4396eb10 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -230,7 +230,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Spec: tsapi.ProxyClassSpec{StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, + Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}, + }}, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -285,7 +286,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - mak.Set(&ing.ObjectMeta.Labels, LabelProxyClass, "custom-metadata") + mak.Set(&ing.ObjectMeta.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) @@ -299,7 +300,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, - }}} + }}, + } }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name @@ -309,7 +311,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { // Ingress gets reconciled and the custom ProxyClass configuration is // removed from the proxy resources. mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - delete(ing.ObjectMeta.Labels, LabelProxyClass) + delete(ing.ObjectMeta.Labels, LabelAnnotationProxyClass) }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = "" @@ -325,14 +327,15 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: 1, - }}}, + }}, + }, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} // Create fake client with ProxyClass, IngressClass, Ingress with metrics ProxyClass, and Service ing := ingress() ing.Labels = map[string]string{ - LabelProxyClass: "metrics", + LabelAnnotationProxyClass: "metrics", } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -421,6 +424,113 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { // ServiceMonitor gets garbage collected when the Service is deleted - we cannot test that here. } +func TestIngressProxyClassAnnotation(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcLEStaging, pcLEStagingFalse, _ := proxyClassesForLEStagingTest() + + testCases := []struct { + name string + proxyClassAnnotation string + proxyClassLabel string + proxyClassDefault string + expectedProxyClass string + expectEvents []string + }{ + { + name: "via_label", + proxyClassLabel: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_annotation", + proxyClassAnnotation: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_default", + proxyClassDefault: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_label_override_annotation", + proxyClassLabel: pcLEStaging.Name, + proxyClassAnnotation: pcLEStagingFalse.Name, + expectedProxyClass: pcLEStaging.Name, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + + builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse). + WithStatusSubresource(pcLEStaging, pcLEStagingFalse) + + fc := builder.Build() + + if tt.proxyClassAnnotation != "" || tt.proxyClassLabel != "" || tt.proxyClassDefault != "" { + name := tt.proxyClassDefault + if name == "" { + name = tt.proxyClassLabel + if name == "" { + name = tt.proxyClassAnnotation + } + } + setProxyClassReady(t, fc, cl, name) + } + + mustCreate(t, fc, ingressClass()) + mustCreate(t, fc, service()) + ing := ingress() + if tt.proxyClassLabel != "" { + ing.Labels = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassLabel, + } + } + if tt.proxyClassAnnotation != "" { + ing.Annotations = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassAnnotation, + } + } + mustCreate(t, fc, ing) + + ingR := &IngressReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: &fakeTSClient{}, + tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}}, + defaultTags: []string{"tag:test"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale:test", + }, + logger: zl.Sugar(), + defaultProxyClass: tt.proxyClassDefault, + } + + expectReconciled(t, ingR, "default", "test") + + _, shortName := findGenName(t, fc, "default", "test", "ingress") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + switch tt.expectedProxyClass { + case pcLEStaging.Name: + verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) + case pcLEStagingFalse.Name: + verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL") + default: + t.Fatalf("unexpected expected ProxyClass %q", tt.expectedProxyClass) + } + }) + } +} + func TestIngressLetsEncryptStaging(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) zl := zap.Must(zap.NewDevelopment()) @@ -452,7 +562,7 @@ func TestIngressLetsEncryptStaging(t *testing.T) { ing := ingress() if tt.proxyClassPerResource != "" { ing.Labels = map[string]string{ - LabelProxyClass: tt.proxyClassPerResource, + LabelAnnotationProxyClass: tt.proxyClassPerResource, } } mustCreate(t, fc, ing) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index cd1ae8158..b33dcd114 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -54,6 +54,7 @@ import ( "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/types/logger" + "tailscale.com/util/set" "tailscale.com/version" ) @@ -307,6 +308,7 @@ func runReconcilers(opts reconcilerOpts) { proxyPriorityClassName: opts.proxyPriorityClassName, tsFirewallMode: opts.proxyFirewallMode, } + err = builder. ControllerManagedBy(mgr). Named("service-reconciler"). @@ -327,6 +329,10 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create service reconciler: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(corev1.Service), indexServiceProxyClass, indexProxyClass); err != nil { + startlog.Fatalf("failed setting up ProxyClass indexer for Services: %v", err) + } + ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress")) // If a ProxyClassChanges, enqueue all Ingresses labeled with that // ProxyClass's name. @@ -351,6 +357,10 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyClass, indexProxyClass); err != nil { + startlog.Fatalf("failed setting up ProxyClass indexer for Ingresses: %v", err) + } + lc, err := opts.tsServer.LocalClient() if err != nil { startlog.Fatalf("could not get local client: %v", err) @@ -797,6 +807,16 @@ func managedResourceHandlerForType(typ string) handler.MapFunc { } } +// indexProxyClass is used to select ProxyClass-backed objects which are +// locally indexed in the cache for efficient listing without requiring labels. +func indexProxyClass(o client.Object) []string { + if !hasProxyClassAnnotation(o) { + return nil + } + + return []string{o.GetAnnotations()[LabelAnnotationProxyClass]} +} + // proxyClassHandlerForSvc returns a handler that, for a given ProxyClass, // returns a list of reconcile requests for all Services labeled with // tailscale.com/proxy-class: . @@ -804,16 +824,37 @@ func proxyClassHandlerForSvc(cl client.Client, logger *zap.SugaredLogger) handle return func(ctx context.Context, o client.Object) []reconcile.Request { svcList := new(corev1.ServiceList) labels := map[string]string{ - LabelProxyClass: o.GetName(), + LabelAnnotationProxyClass: o.GetName(), } + if err := cl.List(ctx, svcList, client.MatchingLabels(labels)); err != nil { logger.Debugf("error listing Services for ProxyClass: %v", err) return nil } + reqs := make([]reconcile.Request, 0) + seenSvcs := make(set.Set[string]) for _, svc := range svcList.Items { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&svc)}) + seenSvcs.Add(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name)) } + + svcAnnotationList := new(corev1.ServiceList) + if err := cl.List(ctx, svcAnnotationList, client.MatchingFields{indexServiceProxyClass: o.GetName()}); err != nil { + logger.Debugf("error listing Services for ProxyClass: %v", err) + return nil + } + + for _, svc := range svcAnnotationList.Items { + nsname := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) + if seenSvcs.Contains(nsname) { + continue + } + + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&svc)}) + seenSvcs.Add(nsname) + } + return reqs } } @@ -825,16 +866,36 @@ func proxyClassHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) ha return func(ctx context.Context, o client.Object) []reconcile.Request { ingList := new(networkingv1.IngressList) labels := map[string]string{ - LabelProxyClass: o.GetName(), + LabelAnnotationProxyClass: o.GetName(), } if err := cl.List(ctx, ingList, client.MatchingLabels(labels)); err != nil { logger.Debugf("error listing Ingresses for ProxyClass: %v", err) return nil } + reqs := make([]reconcile.Request, 0) + seenIngs := make(set.Set[string]) for _, ing := range ingList.Items { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + seenIngs.Add(fmt.Sprintf("%s/%s", ing.Namespace, ing.Name)) + } + + ingAnnotationList := new(networkingv1.IngressList) + if err := cl.List(ctx, ingAnnotationList, client.MatchingFields{indexIngressProxyClass: o.GetName()}); err != nil { + logger.Debugf("error listing Ingreses for ProxyClass: %v", err) + return nil + } + + for _, ing := range ingAnnotationList.Items { + nsname := fmt.Sprintf("%s/%s", ing.Namespace, ing.Name) + if seenIngs.Contains(nsname) { + continue + } + + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + seenIngs.Add(nsname) } + return reqs } } @@ -1500,6 +1561,10 @@ func hasProxyGroupAnnotation(obj client.Object) bool { return obj.GetAnnotations()[AnnotationProxyGroup] != "" } +func hasProxyClassAnnotation(obj client.Object) bool { + return obj.GetAnnotations()[LabelAnnotationProxyClass] != "" +} + func id(ctx context.Context, lc *local.Client) (string, error) { st, err := lc.StatusWithoutPeers(ctx) if err != nil { diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index ff6ba4f95..a9f08c18b 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -7,6 +7,7 @@ package main import ( "context" + "encoding/json" "fmt" "testing" "time" @@ -20,8 +21,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/net/dns/resolvconffile" @@ -1121,6 +1124,182 @@ func TestCustomPriorityClassName(t *testing.T) { expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } +func TestServiceProxyClassAnnotation(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcIfNotPresent := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "if-not-present", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &v1alpha1.Container{ + ImagePullPolicy: corev1.PullIfNotPresent, + }, + }, + }, + }, + } + + pcAlways := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "always", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &v1alpha1.Container{ + ImagePullPolicy: corev1.PullAlways, + }, + }, + }, + }, + } + + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + builder = builder.WithObjects(pcIfNotPresent, pcAlways). + WithStatusSubresource(pcIfNotPresent, pcAlways) + fc := builder.Build() + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + }, + } + + mustCreate(t, fc, svc) + + testCases := []struct { + name string + proxyClassAnnotation string + proxyClassLabel string + proxyClassDefault string + expectedProxyClass string + expectEvents []string + }{ + { + name: "via_label", + proxyClassLabel: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_annotation", + proxyClassAnnotation: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_default", + proxyClassDefault: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_label_override_annotation", + proxyClassLabel: pcIfNotPresent.Name, + proxyClassAnnotation: pcAlways.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + ft := &fakeTSClient{} + + if tt.proxyClassAnnotation != "" || tt.proxyClassLabel != "" || tt.proxyClassDefault != "" { + name := tt.proxyClassDefault + if name == "" { + name = tt.proxyClassLabel + if name == "" { + name = tt.proxyClassAnnotation + } + } + setProxyClassReady(t, fc, cl, name) + } + + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + defaultProxyClass: tt.proxyClassDefault, + logger: zl.Sugar(), + clock: cl, + isDefaultLoadBalancer: true, + } + + if tt.proxyClassLabel != "" { + svc.Labels = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassLabel, + } + } + if tt.proxyClassAnnotation != "" { + svc.Annotations = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassAnnotation, + } + } + + mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) { + s.Labels = svc.Labels + s.Annotations = svc.Annotations + }) + + expectReconciled(t, sr, "default", "test") + + list := &corev1.ServiceList{} + fc.List(context.Background(), list, client.InNamespace("default")) + + for _, i := range list.Items { + t.Logf("found service %s", i.Name) + } + + slist := &corev1.SecretList{} + fc.List(context.Background(), slist, client.InNamespace("operator-ns")) + for _, i := range slist.Items { + l, _ := json.Marshal(i.Labels) + t.Logf("found secret %q with labels %q ", i.Name, string(l)) + } + + _, shortName := findGenName(t, fc, "default", "test", "svc") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + switch tt.expectedProxyClass { + case pcIfNotPresent.Name: + for _, cont := range sts.Spec.Template.Spec.Containers { + if cont.Name == "tailscale" && cont.ImagePullPolicy != corev1.PullIfNotPresent { + t.Fatalf("ImagePullPolicy %q does not match ProxyClass %q with value %q", cont.ImagePullPolicy, pcIfNotPresent.Name, pcIfNotPresent.Spec.StatefulSet.Pod.TailscaleContainer.ImagePullPolicy) + } + } + case pcAlways.Name: + for _, cont := range sts.Spec.Template.Spec.Containers { + if cont.Name == "tailscale" && cont.ImagePullPolicy != corev1.PullAlways { + t.Fatalf("ImagePullPolicy %q does not match ProxyClass %q with value %q", cont.ImagePullPolicy, pcAlways.Name, pcAlways.Spec.StatefulSet.Pod.TailscaleContainer.ImagePullPolicy) + } + } + default: + t.Fatalf("unexpected expected ProxyClass %q", tt.expectedProxyClass) + } + }) + } +} + func TestProxyClassForService(t *testing.T) { // Setup pc := &tsapi.ProxyClass{ @@ -1132,7 +1311,9 @@ func TestProxyClassForService(t *testing.T) { StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, + Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}, + }, + }, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -1194,7 +1375,7 @@ func TestProxyClassForService(t *testing.T) { // pointing at the 'custom-metadata' ProxyClass. The ProxyClass is not // yet ready, so no changes are actually applied to the proxy resources. mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - mak.Set(&svc.Labels, LabelProxyClass, "custom-metadata") + mak.Set(&svc.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -1209,7 +1390,8 @@ func TestProxyClassForService(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, - }}} + }}, + } }) opts.proxyClass = pc.Name expectReconciled(t, sr, "default", "test") @@ -1220,7 +1402,7 @@ func TestProxyClassForService(t *testing.T) { // configuration from the ProxyClass is removed from the cluster // resources. mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - delete(svc.Labels, LabelProxyClass) + delete(svc.Labels, LabelAnnotationProxyClass) }) opts.proxyClass = "" expectReconciled(t, sr, "default", "test") @@ -1439,7 +1621,8 @@ func Test_serviceHandlerForIngress(t *testing.T) { IngressClassName: ptr.To(tailscaleIngressClassName), Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ - {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}}, + {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}, + }, }}}}, }, }) @@ -1466,7 +1649,8 @@ func Test_serviceHandlerForIngress(t *testing.T) { Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ - {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "non-ts-backend"}}}}, + {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "non-ts-backend"}}}, + }, }}}}, }, }) @@ -1565,6 +1749,7 @@ func Test_clusterDomainFromResolverConf(t *testing.T) { }) } } + func Test_authKeyRemoval(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} @@ -1711,14 +1896,15 @@ func Test_metricsResourceCreation(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: 1, - }}}, + }}, + }, } svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", UID: types.UID("1234-UID"), - Labels: map[string]string{LabelProxyClass: "metrics"}, + Labels: map[string]string{LabelAnnotationProxyClass: "metrics"}, }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 3e3d2d590..a943ae971 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -50,7 +50,7 @@ const ( // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for // the Ingress or Service. - LabelProxyClass = "tailscale.com/proxy-class" + LabelAnnotationProxyClass = "tailscale.com/proxy-class" FinalizerName = "tailscale.com/finalizer" @@ -1127,6 +1127,22 @@ func nameForService(svc *corev1.Service) string { return svc.Namespace + "-" + svc.Name } +// proxyClassForObject returns the proxy class for the given object. If the +// object does not have a proxy class label, it returns the default proxy class +func proxyClassForObject(o client.Object, proxyDefaultClass string) string { + proxyClass, exists := o.GetLabels()[LabelAnnotationProxyClass] + if exists { + return proxyClass + } + + proxyClass, exists = o.GetAnnotations()[LabelAnnotationProxyClass] + if exists { + return proxyClass + } + + return proxyDefaultClass +} + func isValidFirewallMode(m string) bool { return m == "auto" || m == "nftables" || m == "iptables" } diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index c880f59f5..f8c9af239 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -41,6 +41,8 @@ const ( reasonProxyInvalid = "ProxyInvalid" reasonProxyFailed = "ProxyFailed" reasonProxyPending = "ProxyPending" + + indexServiceProxyClass = ".metadata.annotations.service-proxy-class" ) type ServiceReconciler struct { @@ -438,16 +440,6 @@ func tailnetTargetAnnotation(svc *corev1.Service) string { return svc.Annotations[annotationTailnetTargetIPOld] } -// proxyClassForObject returns the proxy class for the given object. If the -// object does not have a proxy class label, it returns the default proxy class -func proxyClassForObject(o client.Object, proxyDefaultClass string) string { - proxyClass, exists := o.GetLabels()[LabelProxyClass] - if !exists { - proxyClass = proxyDefaultClass - } - return proxyClass -} - func proxyClassIsReady(ctx context.Context, name string, cl client.Client) (bool, error) { proxyClass := new(tsapi.ProxyClass) if err := cl.Get(ctx, types.NamespacedName{Name: name}, proxyClass); err != nil { From 47e77565c63aa1af9d0de27a38281bc0fcc02250 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 30 Jun 2025 12:12:57 -0700 Subject: [PATCH 1021/1708] wgengine/magicsock: avoid handshaking relay endpoints that are trusted (#16412) Changes to our src/address family can trigger blackholes. This commit also adds a missing set of trustBestAddrUntil when setting a UDP relay path as bestAddr. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 52 +++++++--- wgengine/magicsock/magicsock.go | 6 +- wgengine/magicsock/relaymanager.go | 131 ++++++++++++++++-------- wgengine/magicsock/relaymanager_test.go | 4 +- 4 files changed, 130 insertions(+), 63 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 9edc6403e..af4666665 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -100,25 +100,33 @@ type endpoint struct { relayCapable bool // whether the node is capable of speaking via a [tailscale.com/net/udprelay.Server] } -// relayEndpointReady determines whether the given relay addr should be -// installed as de.bestAddr. It is only called by [relayManager] once it has -// determined addr is functional via [disco.Pong] reception. -func (de *endpoint) relayEndpointReady(addr epAddr, latency time.Duration) { +// udpRelayEndpointReady determines whether the given relay [addrQuality] should +// be installed as de.bestAddr. It is only called by [relayManager] once it has +// determined maybeBest is functional via [disco.Pong] reception. +func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.c.mu.Lock() defer de.c.mu.Unlock() de.mu.Lock() defer de.mu.Unlock() - maybeBetter := addrQuality{addr, latency, pingSizeToPktLen(0, addr)} - if !betterAddr(maybeBetter, de.bestAddr) { + if maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 { + // TODO(jwhited): add some observability for this case, e.g. did we + // flip transports during a de.bestAddr transition from untrusted to + // trusted? + // + // If these are equal we must set maybeBest as bestAddr, otherwise we + // could leave a stale bestAddr if it goes over a different + // address family or src. + } else if !betterAddr(maybeBest, de.bestAddr) { return } - // Promote maybeBetter to bestAddr. + // Promote maybeBest to bestAddr. // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() - de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBetter.epAddr, maybeBetter.wireMTU) - de.setBestAddrLocked(maybeBetter) - de.c.peerMap.setNodeKeyForEpAddr(addr, de.publicKey) + de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) + de.setBestAddrLocked(maybeBest) + de.trustBestAddrUntil = mono.Now().Add(trustUDPAddrDuration) + de.c.peerMap.setNodeKeyForEpAddr(maybeBest.epAddr, de.publicKey) } func (de *endpoint) setBestAddrLocked(v addrQuality) { @@ -871,7 +879,9 @@ func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { // TODO(jwhited): return early if there are no relay servers set, otherwise // we spin up and down relayManager.runLoop unnecessarily. de.lastUDPRelayPathDiscovery = now - de.c.relayManager.allocateAndHandshakeAllServers(de) + lastBest := de.bestAddr + lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) + de.c.relayManager.startUDPRelayPathDiscoveryFor(de, lastBest, lastBestIsTrusted) } // wantUDPRelayPathDiscoveryLocked reports whether we should kick off UDP relay @@ -1714,7 +1724,16 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // Promote this pong response to our current best address if it's lower latency. // TODO(bradfitz): decide how latency vs. preference order affects decision if !isDerp { - thisPong := addrQuality{sp.to, latency, tstun.WireMTU(pingSizeToPktLen(sp.size, sp.to))} + thisPong := addrQuality{ + epAddr: sp.to, + latency: latency, + wireMTU: pingSizeToPktLen(sp.size, sp.to), + } + // TODO(jwhited): consider checking de.trustBestAddrUntil as well. If + // de.bestAddr is untrusted we may want to clear it, otherwise we could + // get stuck with a forever untrusted bestAddr that blackholes, since + // we don't clear direct UDP paths on disco ping timeout (see + // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { if src.vni.isSet() { // This would be unexpected. Switching to a Geneve-encapsulated @@ -1765,14 +1784,17 @@ func (e epAddr) String() string { return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.get()) } -// addrQuality is an [epAddr] with an associated latency and path mtu. +// addrQuality is an [epAddr], an optional [key.DiscoPublic] if a relay server +// is associated, a round-trip latency measurement, and path mtu. type addrQuality struct { epAddr - latency time.Duration - wireMTU tstun.WireMTU + relayServerDisco key.DiscoPublic // only relevant if epAddr.vni.isSet(), otherwise zero value + latency time.Duration + wireMTU tstun.WireMTU } func (a addrQuality) String() string { + // TODO(jwhited): consider including relayServerDisco return fmt.Sprintf("%v@%v+%v", a.epAddr, a.latency, a.wireMTU) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 553543b0f..0933c5be2 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2137,6 +2137,8 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } ep.mu.Lock() relayCapable := ep.relayCapable + lastBest := ep.bestAddr + lastBestIsTrusted := mono.Now().Before(ep.trustBestAddrUntil) ep.mu.Unlock() if isVia && !relayCapable { c.logf("magicsock: disco: ignoring %s from %v; %v is not known to be relay capable", msgType, sender.ShortString(), sender.ShortString()) @@ -2156,7 +2158,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.discoShort, epDisco.short, via.ServerDisco.ShortString(), ep.publicKey.ShortString(), derpStr(src.String()), len(via.AddrPorts)) - c.relayManager.handleCallMeMaybeVia(ep, via) + c.relayManager.handleCallMeMaybeVia(ep, lastBest, lastBestIsTrusted, via) } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", c.discoShort, epDisco.short, @@ -2254,7 +2256,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // We have no [endpoint] in the [peerMap] for this relay [epAddr] // using it as a bestAddr. [relayManager] might be in the middle of // probing it or attempting to set it as best via - // [endpoint.relayEndpointReady()]. Make [relayManager] aware. + // [endpoint.udpRelayEndpointReady()]. Make [relayManager] aware. c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) return } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 6418a4364..1c173c01a 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -50,7 +50,7 @@ type relayManager struct { // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). - allocateHandshakeCh chan *endpoint + startDiscoveryCh chan endpointWithLastBest allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent cancelWorkCh chan *endpoint @@ -77,8 +77,8 @@ type serverDiscoVNI struct { // relayHandshakeWork serves to track in-progress relay handshake work for a // [udprelay.ServerEndpoint]. This structure is immutable once initialized. type relayHandshakeWork struct { - ep *endpoint - se udprelay.ServerEndpoint + wlb endpointWithLastBest + se udprelay.ServerEndpoint // handshakeServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to @@ -97,7 +97,7 @@ type relayHandshakeWork struct { // [disco.CallMeMaybeVia] reception. This structure is immutable once // initialized. type newRelayServerEndpointEvent struct { - ep *endpoint + wlb endpointWithLastBest se udprelay.ServerEndpoint server netip.AddrPort // zero value if learned via [disco.CallMeMaybeVia] } @@ -142,9 +142,9 @@ func (r *relayManager) runLoop() { for { select { - case ep := <-r.allocateHandshakeCh: - if !r.hasActiveWorkForEndpointRunLoop(ep) { - r.allocateAllServersRunLoop(ep) + case startDiscovery := <-r.startDiscoveryCh: + if !r.hasActiveWorkForEndpointRunLoop(startDiscovery.ep) { + r.allocateAllServersRunLoop(startDiscovery) } if !r.hasActiveWorkRunLoop() { return @@ -153,7 +153,7 @@ func (r *relayManager) runLoop() { work, ok := r.allocWorkByEndpoint[done.work.ep] if ok && work == done.work { // Verify the work in the map is the same as the one that we're - // cleaning up. New events on r.allocateHandshakeCh can + // cleaning up. New events on r.startDiscoveryCh can // overwrite pre-existing keys. delete(r.allocWorkByEndpoint, done.work.ep) } @@ -237,7 +237,7 @@ func (r *relayManager) init() { r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) r.handshakeWorkAwaitingPong = make(map[*relayHandshakeWork]addrPortVNI) r.addrPortVNIToHandshakeWork = make(map[addrPortVNI]*relayHandshakeWork) - r.allocateHandshakeCh = make(chan *endpoint) + r.startDiscoveryCh = make(chan endpointWithLastBest) r.allocateWorkDoneCh = make(chan relayEndpointAllocWorkDoneEvent) r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) @@ -273,7 +273,7 @@ func (r *relayManager) ensureDiscoInfoFor(work *relayHandshakeWork) { di.di = &discoInfo{ discoKey: work.se.ServerDisco, discoShort: work.se.ServerDisco.ShortString(), - sharedKey: work.ep.c.discoPrivate.Shared(work.se.ServerDisco), + sharedKey: work.wlb.ep.c.discoPrivate.Shared(work.se.ServerDisco), } } } @@ -306,7 +306,7 @@ func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok return nil, false } -func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeVia) { +func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool, dm *disco.CallMeMaybeVia) { se := udprelay.ServerEndpoint{ ServerDisco: dm.ServerDisco, LamportID: dm.LamportID, @@ -316,7 +316,11 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV se.BindLifetime.Duration = dm.BindLifetime se.SteadyStateLifetime.Duration = dm.SteadyStateLifetime relayManagerInputEvent(r, nil, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, + wlb: endpointWithLastBest{ + ep: ep, + lastBest: lastBest, + lastBestIsTrusted: lastBestIsTrusted, + }, se: se, }) } @@ -360,11 +364,19 @@ func relayManagerInputEvent[T any](r *relayManager, ctx context.Context, eventCh } } -// allocateAndHandshakeAllServers kicks off allocation and handshaking of relay -// endpoints for 'ep' on all known relay servers if there is no outstanding -// work. -func (r *relayManager) allocateAndHandshakeAllServers(ep *endpoint) { - relayManagerInputEvent(r, nil, &r.allocateHandshakeCh, ep) +// endpointWithLastBest represents an [*endpoint], its last bestAddr, and if +// the last bestAddr was trusted (see endpoint.trustBestAddrUntil) at the time +// of init. This structure is immutable once initialized. +type endpointWithLastBest struct { + ep *endpoint + lastBest addrQuality + lastBestIsTrusted bool +} + +// startUDPRelayPathDiscoveryFor starts UDP relay path discovery for ep on all +// known relay servers if ep has no in-progress work. +func (r *relayManager) startUDPRelayPathDiscoveryFor(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool) { + relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ep, lastBest, lastBestIsTrusted}) } // stopWork stops all outstanding allocation & handshaking work for 'ep'. @@ -432,7 +444,7 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc r.addrPortVNIToHandshakeWork[apv] = work case *disco.Ping: // Always TX a pong. We might not have any associated work if ping - // reception raced with our call to [endpoint.relayEndpointReady()], so + // reception raced with our call to [endpoint.udpRelayEndpointReady()], so // err on the side of enabling the remote side to use this path. // // Conn.handlePingLocked() makes efforts to suppress duplicate pongs @@ -473,7 +485,7 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc } func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshakeWorkDoneEvent) { - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.ep] + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.wlb.ep] if !ok { return } @@ -483,7 +495,7 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak } delete(byServerDisco, done.work.se.ServerDisco) if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, done.work.ep) + delete(r.handshakeWorkByEndpointByServerDisco, done.work.wlb.ep) } delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) apv, ok := r.handshakeWorkAwaitingPong[work] @@ -499,10 +511,15 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak vni := virtualNetworkID{} vni.set(done.work.se.VNI) addr := epAddr{ap: done.pongReceivedFrom, vni: vni} - // ep.relayEndpointReady() must be called in a new goroutine to prevent + // ep.udpRelayEndpointReady() must be called in a new goroutine to prevent // deadlocks as it acquires [endpoint] & [Conn] mutexes. See [relayManager] // docs for details. - go done.work.ep.relayEndpointReady(addr, done.latency) + go done.work.wlb.ep.udpRelayEndpointReady(addrQuality{ + epAddr: addr, + relayServerDisco: done.work.se.ServerDisco, + latency: done.latency, + wireMTU: pingSizeToPktLen(0, addr), + }) } func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { @@ -525,7 +542,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } // Check for duplicate work by [*endpoint] + server disco. - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] if ok { existingWork, ok := byServerDisco[newServerEndpoint.se.ServerDisco] if ok { @@ -569,10 +586,40 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } } + if newServerEndpoint.server.IsValid() { + // Send a [disco.CallMeMaybeVia] to the remote peer if we allocated this + // endpoint, regardless of if we start a handshake below. + go r.sendCallMeMaybeVia(newServerEndpoint.wlb.ep, newServerEndpoint.se) + } + + lastBestMatchingServer := newServerEndpoint.se.ServerDisco.Compare(newServerEndpoint.wlb.lastBest.relayServerDisco) == 0 + if lastBestMatchingServer && newServerEndpoint.wlb.lastBestIsTrusted { + // This relay server endpoint is the same as [endpoint]'s bestAddr at + // the time UDP relay path discovery was started, and it was also a + // trusted path (see endpoint.trustBestAddrUntil), so return early. + // + // If we were to start a new handshake, there is a chance that we + // cause [endpoint] to blackhole some packets on its bestAddr if we end + // up shifting to a new address family or src, e.g. IPv4 to IPv6, due to + // the window of time between the handshake completing, and our call to + // udpRelayEndpointReady(). The relay server can only forward packets + // from us on a single [epAddr]. + return + } + + // TODO(jwhited): if lastBest is untrusted, consider some strategies + // to reduce the chance we blackhole if it were to transition to + // trusted during/before the new handshake: + // 1. Start by attempting a handshake with only lastBest.epAddr. If + // that fails then try the remaining [epAddr]s. + // 2. Signal bestAddr trust transitions between [endpoint] and + // [relayManager] in order to prevent a handshake from starting + // and/or stop one that is running. + // We're ready to start a new handshake. ctx, cancel := context.WithCancel(context.Background()) work := &relayHandshakeWork{ - ep: newServerEndpoint.ep, + wlb: newServerEndpoint.wlb, se: newServerEndpoint.se, rxDiscoMsgCh: make(chan relayHandshakeDiscoMsgEvent), doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), @@ -581,16 +628,11 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) - r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] = byServerDisco + r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] = byServerDisco } byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work - if newServerEndpoint.server.IsValid() { - // Send CallMeMaybeVia to the remote peer if we allocated this endpoint. - go r.sendCallMeMaybeVia(work.ep, work.se) - } - r.handshakeGeneration++ if r.handshakeGeneration == 0 { // generation must be nonzero r.handshakeGeneration++ @@ -633,7 +675,8 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat work.cancel() }() - epDisco := work.ep.disco.Load() + ep := work.wlb.ep + epDisco := ep.disco.Load() if epDisco == nil { return } @@ -653,7 +696,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true - go work.ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) + go ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) } } if !sentBindAny { @@ -684,15 +727,15 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat sentPingAt[txid] = time.Now() ping := &disco.Ping{ TxID: txid, - NodeKey: work.ep.c.publicKeyAtomic.Load(), + NodeKey: ep.c.publicKeyAtomic.Load(), } go func() { if withAnswer != nil { answer := &disco.BindUDPRelayEndpointAnswer{BindUDPRelayEndpointCommon: common} answer.Challenge = *withAnswer - work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) + ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) } - work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) + ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) }() } @@ -760,17 +803,17 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat } } -func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { +func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { if len(r.serversByAddrPort) == 0 { return } ctx, cancel := context.WithCancel(context.Background()) - started := &relayEndpointAllocWork{ep: ep, cancel: cancel, wg: &sync.WaitGroup{}} + started := &relayEndpointAllocWork{ep: wlb.ep, cancel: cancel, wg: &sync.WaitGroup{}} for k := range r.serversByAddrPort { started.wg.Add(1) - go r.allocateSingleServer(ctx, started.wg, k, ep) + go r.allocateSingleServer(ctx, started.wg, k, wlb) } - r.allocWorkByEndpoint[ep] = started + r.allocWorkByEndpoint[wlb.ep] = started go func() { started.wg.Wait() relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) @@ -829,25 +872,25 @@ func doAllocate(ctx context.Context, server netip.AddrPort, discoKeys [2]key.Dis } } -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { +func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, wlb endpointWithLastBest) { // TODO(jwhited): introduce client metrics counters for notable failures defer wg.Done() - remoteDisco := ep.disco.Load() + remoteDisco := wlb.ep.disco.Load() if remoteDisco == nil { return } firstTry := true for { - se, err := doAllocate(ctx, server, [2]key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}) + se, err := doAllocate(ctx, server, [2]key.DiscoPublic{wlb.ep.c.discoPublic, remoteDisco.key}) if err == nil { relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, + wlb: wlb, se: se, server: server, // we allocated this endpoint (vs CallMeMaybeVia reception), mark it as such }) return } - ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, ep.discoShort(), err) + wlb.ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, wlb.ep.discoShort(), err) var notReady errNotReady if firstTry && errors.As(err, ¬Ready) { select { diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index de282b499..8feff2f3d 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -14,7 +14,7 @@ import ( func TestRelayManagerInitAndIdle(t *testing.T) { rm := relayManager{} - rm.allocateAndHandshakeAllServers(&endpoint{}) + rm.startUDPRelayPathDiscoveryFor(&endpoint{}, addrQuality{}, false) <-rm.runLoopStoppedCh rm = relayManager{} @@ -22,7 +22,7 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) + rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) <-rm.runLoopStoppedCh rm = relayManager{} From 6a9bf9172b6fa6dc645b5ea960b98014f389533d Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 30 Jun 2025 13:43:16 -0500 Subject: [PATCH 1022/1708] ipn/ipnlocal: add verbose Taildrive logging on client side This allows logging the following Taildrive behavior from the client's perspective when --verbose=1: - Initialization of Taildrive remotes for every peer - Peer availability checks - All HTTP requests to peers (not just GET and PUT) Updates tailscale/corp#29702 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/drive.go | 6 ++++++ ipn/ipnlocal/local.go | 35 +++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 6a6f9bcd2..8c2f339bb 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -306,10 +306,12 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) { } func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote { + b.logf("[v1] taildrive: setting up drive remotes from peers") driveRemotes := make([]*drive.Remote, 0, len(nm.Peers)) for _, p := range nm.Peers { peerID := p.ID() url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:]) + b.logf("[v1] taildrive: appending remote for peer %d: %s", peerID, url) driveRemotes = append(driveRemotes, &drive.Remote{ Name: p.DisplayName(false), URL: url, @@ -320,6 +322,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem cn := b.currentNode() peer, ok := cn.NodeByID(peerID) if !ok { + b.logf("[v1] taildrive: Available(): peer %d not found", peerID) return false } @@ -332,14 +335,17 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // The netmap.Peers slice is not updated in all cases. // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { + b.logf("[v1] taildrive: Available(): peer %d offline", peerID) return false } // Check that the peer is allowed to share with us. if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) { + b.logf("[v1] taildrive: Available(): peer %d available", peerID) return true } + b.logf("[v1] taildrive: Available(): peer %d not allowed to share", peerID) return false }, }) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9cec088f1..29d09400b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1459,7 +1459,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi } n, ok = cn.NodeByID(nid) if !ok { - return zero, u, false + return zero, u, false } up, ok := cn.UserByID(n.User()) if !ok { @@ -5960,6 +5960,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { // the number of bytesRead. type responseBodyWrapper struct { io.ReadCloser + logVerbose bool bytesRx int64 bytesTx int64 log logger.Logf @@ -5981,8 +5982,22 @@ func (rbw *responseBodyWrapper) logAccess(err string) { // Some operating systems create and copy lots of 0 length hidden files for // tracking various states. Omit these to keep logs from being too verbose. - if rbw.contentLength > 0 { - rbw.log("taildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", rbw.method, rbw.selfNodeKey, rbw.shareNodeKey, rbw.statusCode, rbw.fileExtension, rbw.contentType, roundTraffic(rbw.contentLength), roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) + if rbw.logVerbose || rbw.contentLength > 0 { + levelPrefix := "" + if rbw.logVerbose { + levelPrefix = "[v1] " + } + rbw.log( + "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", + levelPrefix, + rbw.method, + rbw.selfNodeKey, + rbw.shareNodeKey, + rbw.statusCode, + rbw.fileExtension, + rbw.contentType, + roundTraffic(rbw.contentLength), + roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) } } @@ -6037,17 +6052,8 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err defer func() { contentType := "unknown" - switch req.Method { - case httpm.PUT: - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - case httpm.GET: - if ct := resp.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - default: - return + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct } dt.b.mu.Lock() @@ -6061,6 +6067,7 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err rbw := responseBodyWrapper{ log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level method: req.Method, bytesTx: int64(bw.bytesRead), selfNodeKey: selfNodeKey, From 454d856be853c713e5e916f13f75cf183de2c94e Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 1 Jul 2025 09:03:54 -0500 Subject: [PATCH 1023/1708] drive,ipn/ipnlocal: calculate peer taildrive URLs on-demand Instead of calculating the PeerAPI URL at the time that we add the peer, we now calculate it on every access to the peer. This way, if we initially did not have a shared address family with the peer, but later do, this allows us to access the peer at that point. This follows the pattern from other places where we access the peer API, which also calculate the URL on an as-needed basis. Additionally, we now show peers as not Available when we can't get a peer API URL. Lastly, this moves some of the more frequent verbose Taildrive logging from [v1] to [v2] level. Updates #29702 Signed-off-by: Percy Wegmann --- drive/driveimpl/drive_test.go | 2 +- drive/driveimpl/local_impl.go | 2 +- drive/local.go | 2 +- ipn/ipnlocal/drive.go | 27 +++++++++++++++++++-------- 4 files changed, 22 insertions(+), 11 deletions(-) diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index e7dd83291..cff55fbb2 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -524,7 +524,7 @@ func (s *system) addRemote(name string) string { for name, r := range s.remotes { remotes = append(remotes, &drive.Remote{ Name: name, - URL: fmt.Sprintf("http://%s", r.l.Addr()), + URL: func() string { return fmt.Sprintf("http://%s", r.l.Addr()) }, }) } s.local.fs.SetRemotes( diff --git a/drive/driveimpl/local_impl.go b/drive/driveimpl/local_impl.go index 8cdf60179..871d03343 100644 --- a/drive/driveimpl/local_impl.go +++ b/drive/driveimpl/local_impl.go @@ -81,7 +81,7 @@ func (s *FileSystemForLocal) SetRemotes(domain string, remotes []*drive.Remote, Name: remote.Name, Available: remote.Available, }, - BaseURL: func() (string, error) { return remote.URL, nil }, + BaseURL: func() (string, error) { return remote.URL(), nil }, Transport: transport, }) } diff --git a/drive/local.go b/drive/local.go index aff79a57b..052efb3f9 100644 --- a/drive/local.go +++ b/drive/local.go @@ -17,7 +17,7 @@ import ( // Remote represents a remote Taildrive node. type Remote struct { Name string - URL string + URL func() string Available func() bool } diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 8c2f339bb..d77481903 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -309,20 +309,26 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem b.logf("[v1] taildrive: setting up drive remotes from peers") driveRemotes := make([]*drive.Remote, 0, len(nm.Peers)) for _, p := range nm.Peers { - peerID := p.ID() - url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:]) - b.logf("[v1] taildrive: appending remote for peer %d: %s", peerID, url) + peer := p + peerID := peer.ID() + peerKey := peer.Key().ShortString() + b.logf("[v1] taildrive: appending remote for peer %s", peerKey) driveRemotes = append(driveRemotes, &drive.Remote{ Name: p.DisplayName(false), - URL: url, + URL: func() string { + url := fmt.Sprintf("%s/%s", b.currentNode().PeerAPIBase(peer), taildrivePrefix[1:]) + b.logf("[v2] taildrive: url for peer %s: %s", peerKey, url) + return url + }, Available: func() bool { // Peers are available to Taildrive if: // - They are online + // - Their PeerAPI is reachable // - They are allowed to share at least one folder with us cn := b.currentNode() peer, ok := cn.NodeByID(peerID) if !ok { - b.logf("[v1] taildrive: Available(): peer %d not found", peerID) + b.logf("[v2] taildrive: Available(): peer %s not found", peerKey) return false } @@ -335,17 +341,22 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // The netmap.Peers slice is not updated in all cases. // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { - b.logf("[v1] taildrive: Available(): peer %d offline", peerID) + b.logf("[v2] taildrive: Available(): peer %s offline", peerKey) + return false + } + + if b.currentNode().PeerAPIBase(peer) == "" { + b.logf("[v2] taildrive: Available(): peer %s PeerAPI unreachable", peerKey) return false } // Check that the peer is allowed to share with us. if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) { - b.logf("[v1] taildrive: Available(): peer %d available", peerID) + b.logf("[v2] taildrive: Available(): peer %s available", peerKey) return true } - b.logf("[v1] taildrive: Available(): peer %d not allowed to share", peerID) + b.logf("[v2] taildrive: Available(): peer %s not allowed to share", peerKey) return false }, }) From d15b2312c4fb7b8ea1f98c5c80f7f72aed784b5d Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Tue, 1 Jul 2025 09:28:48 -0700 Subject: [PATCH 1024/1708] tailcfg: add CapabilityOwner (#16426) We would like to start sending whether a node is a Tailnet owner in netmap responses so that clients can determine what information to display to a user who wants to request account deletion. Updates tailscale/corp#30016 Signed-off-by: kari-ts --- ipn/ipnlocal/local_test.go | 14 ++++++++++++++ tailcfg/tailcfg.go | 1 + 2 files changed, 15 insertions(+) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 16dbef62a..47e5fa37d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -826,10 +826,21 @@ func TestStatusPeerCapabilities(t *testing.T) { tailcfg.CapabilityAdmin: {`{"test": "true}`}, }), }).View(), + (&tailcfg.Node{ + ID: 3, + StableID: "baz", + Key: makeNodeKeyFromID(3), + Hostinfo: (&tailcfg.Hostinfo{}).View(), + Capabilities: []tailcfg.NodeCapability{tailcfg.CapabilityOwner}, + CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.CapabilityOwner: nil, + }), + }).View(), }, expectedPeerCapabilities: map[tailcfg.StableNodeID][]tailcfg.NodeCapability{ tailcfg.StableNodeID("foo"): {tailcfg.CapabilitySSH}, tailcfg.StableNodeID("bar"): {tailcfg.CapabilityAdmin}, + tailcfg.StableNodeID("baz"): {tailcfg.CapabilityOwner}, }, expectedPeerCapMap: map[tailcfg.StableNodeID]tailcfg.NodeCapMap{ tailcfg.StableNodeID("foo"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ @@ -838,6 +849,9 @@ func TestStatusPeerCapabilities(t *testing.T) { tailcfg.StableNodeID("bar"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ tailcfg.CapabilityAdmin: {`{"test": "true}`}, }), + tailcfg.StableNodeID("baz"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.CapabilityOwner: nil, + }), }, }, { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index fb7d54c38..4b1217d4e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2367,6 +2367,7 @@ type NodeCapability string const ( CapabilityFileSharing NodeCapability = "https://tailscale.com/cap/file-sharing" CapabilityAdmin NodeCapability = "https://tailscale.com/cap/is-admin" + CapabilityOwner NodeCapability = "https://tailscale.com/cap/is-owner" CapabilitySSH NodeCapability = "https://tailscale.com/cap/ssh" // feature enabled/available CapabilitySSHRuleIn NodeCapability = "https://tailscale.com/cap/ssh-rule-in" // some SSH rule reach this node CapabilityDataPlaneAuditLogs NodeCapability = "https://tailscale.com/cap/data-plane-audit-logs" // feature enabled From d2edf7133a078880995deb184ae66211efb07b34 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Jul 2025 09:23:54 -0700 Subject: [PATCH 1025/1708] wgengine/magicsock: remove references to rucPtr (#16441) It used to be a **RebindingUDPConn, now it's just a *RebindingUDPConn. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0933c5be2..89111b7a0 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3197,9 +3197,9 @@ func (c *Conn) listenPacket(network string, port uint16) (nettype.PacketConn, er return nettype.MakePacketListenerWithNetIP(netns.Listener(c.logf, c.netMon)).ListenPacket(ctx, network, addr) } -// bindSocket initializes rucPtr if necessary and binds a UDP socket to it. +// bindSocket binds a UDP socket to ruc. // Network indicates the UDP socket type; it must be "udp4" or "udp6". -// If rucPtr had an existing UDP socket bound, it closes that socket. +// If ruc had an existing UDP socket bound, it closes that socket. // The caller is responsible for informing the portMapper of any changes. // If curPortFate is set to dropCurrentPort, no attempt is made to reuse // the current port. From 172e26b3e3cf70455161609379da1820f6065f77 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 2 Jul 2025 10:52:00 -0700 Subject: [PATCH 1026/1708] tailcfg: report StateEncrypted in Hostinfo (#16434) Report whether the client is configured with state encryption (which varies by platform and can be optional on some). Wire it up to `--encrypt-state` in tailscaled, which is set for Linux/Windows, and set defaults for other platforms. Macsys will also report this if full Keychain migration is done. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm.go | 2 ++ ipn/ipnlocal/local.go | 27 +++++++++++++++++++++++++++ ipn/store.go | 6 ++++++ tailcfg/tailcfg.go | 9 ++++++++- tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 1 + tailcfg/tailcfg_view.go | 2 ++ 7 files changed, 47 insertions(+), 1 deletion(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 5ec084eff..9499ed02a 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -159,6 +159,8 @@ func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { // tpmStore is an ipn.StateStore that stores the state in a secretbox-encrypted // file using a TPM-sealed symmetric key. type tpmStore struct { + ipn.EncryptedStateStore + logf logger.Logf path string key [32]byte diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 29d09400b..9c16d55af 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2244,6 +2244,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { hostinfo.Userspace.Set(b.sys.IsNetstack()) hostinfo.UserspaceRouter.Set(b.sys.IsNetstackRouter()) hostinfo.AppConnector.Set(b.appConnector != nil) + hostinfo.StateEncrypted = b.stateEncrypted() b.logf.JSON(1, "Hostinfo", hostinfo) // TODO(apenwarr): avoid the need to reinit controlclient. @@ -7801,3 +7802,29 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf var ( metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") ) + +func (b *LocalBackend) stateEncrypted() opt.Bool { + switch runtime.GOOS { + case "android", "ios": + return opt.NewBool(true) + case "darwin": + switch { + case version.IsMacAppStore(): + return opt.NewBool(true) + case version.IsMacSysExt(): + // MacSys still stores its state in plaintext on disk in addition to + // the Keychain. A future release will clean up the on-disk state + // files. + // TODO(#15830): always return true here once MacSys is fully migrated. + sp, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + return opt.NewBool(sp) + default: + // Probably self-compiled tailscaled, we don't use the Keychain + // there. + return opt.NewBool(false) + } + default: + _, ok := b.store.(ipn.EncryptedStateStore) + return opt.NewBool(ok) + } +} diff --git a/ipn/store.go b/ipn/store.go index 550aa8cba..9da5288c0 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -113,3 +113,9 @@ func ReadStoreInt(store StateStore, id StateKey) (int64, error) { func PutStoreInt(store StateStore, id StateKey, val int64) error { return WriteState(store, id, fmt.Appendf(nil, "%d", val)) } + +// EncryptedStateStore is a marker interface implemented by StateStores that +// encrypt data at rest. +type EncryptedStateStore interface { + stateStoreIsEncrypted() +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4b1217d4e..10b157ac1 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -162,7 +162,8 @@ type CapabilityVersion int // - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. // - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. -const CurrentCapabilityVersion CapabilityVersion = 117 +// - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) +const CurrentCapabilityVersion CapabilityVersion = 118 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -878,6 +879,12 @@ type Hostinfo struct { Location *Location `json:",omitempty"` TPM *TPMInfo `json:",omitempty"` // TPM device metadata, if available + // StateEncrypted reports whether the node state is stored encrypted on + // disk. The actual mechanism is platform-specific: + // * Apple nodes use the Keychain + // * Linux and Windows nodes use the TPM + // * Android apps use EncryptedSharedPreferences + StateEncrypted opt.Bool `json:",omitempty"` // NOTE: any new fields containing pointers in this type // require changes to Hostinfo.Equal. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 2c7941d51..412e1f38d 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -188,6 +188,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { ServicesHash string Location *Location TPM *TPMInfo + StateEncrypted opt.Bool }{}) // Clone makes a deep copy of NetInfo. diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 60e86794a..e8e86cdb1 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -69,6 +69,7 @@ func TestHostinfoEqual(t *testing.T) { "ServicesHash", "Location", "TPM", + "StateEncrypted", } if have := fieldsOf(reflect.TypeFor[Hostinfo]()); !reflect.DeepEqual(have, hiHandles) { t.Errorf("Hostinfo.Equal check might be out of sync\nfields: %q\nhandled: %q\n", diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index c76654887..7e82cd871 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -303,6 +303,7 @@ func (v HostinfoView) ServicesHash() string { return v.ж.Serv func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } +func (v HostinfoView) StateEncrypted() opt.Bool { return v.ж.StateEncrypted } func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -346,6 +347,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { ServicesHash string Location *Location TPM *TPMInfo + StateEncrypted opt.Bool }{}) // View returns a read-only view of NetInfo. From f9e7131772ffc85016921fe099791ffb467cc681 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Jul 2025 13:27:30 -0700 Subject: [PATCH 1027/1708] wgengine/magicsock: make lazyEndpoint load bearing for UDP relay (#16435) Cryptokey Routing identification is now required to set an [epAddr] into the peerMap for Geneve-encapsulated [epAddr]s. Updates tailscale/corp#27502 Updates tailscale/corp#29422 Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- wgengine/magicsock/endpoint.go | 1 - wgengine/magicsock/magicsock.go | 28 ++++++++++++++++++++++++---- 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 0d031d0ba..5bf04feda 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f + github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 6f44cd86e..f9910bb59 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f h1:vg3PmQdq1BbB2V81iC1VBICQtfwbVGZ/4A/p7QKXTK0= -github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 h1:chIzUDKxR0nXQQra0j41aqiiFNICs0FIC5ZCwDO7z3k= +github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index af4666665..0569341ff 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -126,7 +126,6 @@ func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) de.setBestAddrLocked(maybeBest) de.trustBestAddrUntil = mono.Now().Add(trustUDPAddrDuration) - de.c.peerMap.setNodeKeyForEpAddr(maybeBest.epAddr, de.publicKey) } func (de *endpoint) setBestAddrLocked(v addrQuality) { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 89111b7a0..174345a84 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1695,8 +1695,13 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach c.mu.Unlock() if !ok { if c.controlKnobs != nil && c.controlKnobs.DisableCryptorouting.Load() { + // Note: UDP relay is dependent on cryptorouting enablement. We + // only update Geneve-encapsulated [epAddr]s in the [peerMap] + // via [lazyEndpoint]. return nil, 0, false } + // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() + // for the same batch & [epAddr] src. return &lazyEndpoint{c: c, src: src}, size, true } cache.epAddr = src @@ -1704,6 +1709,8 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach cache.gen = de.numStopAndReset() ep = de } + // TODO(jwhited): consider the implications of not recording this receive + // activity due to an early [lazyEndpoint] return above. now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(src, now) @@ -3793,14 +3800,27 @@ func (le *lazyEndpoint) DstIP() netip.Addr { return netip.Addr{} } func (le *lazyEndpoint) SrcToString() string { return le.src.String() } func (le *lazyEndpoint) DstToString() string { return "dst" } func (le *lazyEndpoint) DstToBytes() []byte { return nil } -func (le *lazyEndpoint) GetPeerEndpoint(peerPublicKey [32]byte) conn.Endpoint { + +// FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in +// our [conn.ReceiveFunc]s when we are unable to identify the peer at WireGuard +// packet reception time, pre-decryption. If wireguard-go successfully decrypts +// the packet it calls us here, and we update our [peerMap] in order to +// associate le.src with peerPublicKey. +func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) if !ok { - return nil + return } - le.c.logf("magicsock: lazyEndpoint.GetPeerEndpoint(%v) found: %v", pubKey.ShortString(), ep.nodeAddr) - return ep + // TODO(jwhited): Consider [lazyEndpoint] effectiveness as a means to make + // this the sole call site for setNodeKeyForEpAddr. If this is the sole + // call site, and we always update the mapping based on successful + // Cryptokey Routing identification events, then we can go ahead and make + // [epAddr]s singular per peer (like they are for Geneve-encapsulated ones + // already). + // See http://go/corp/29422 & http://go/corp/30042 + le.c.peerMap.setNodeKeyForEpAddr(le.src, pubKey) + le.c.logf("magicsock: lazyEndpoint.FromPeer(%v) setting epAddr(%v) in peerMap for node(%v)", pubKey.ShortString(), le.src, ep.nodeAddr) } From eb03d42fe60acce0e7efacc3a026b26bfb56897c Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 2 Jul 2025 21:42:31 +0100 Subject: [PATCH 1028/1708] cmd/k8s-operator: Allow configuration of login server (#16432) This commit modifies the kubernetes operator to allow for customisation of the tailscale login url. This provides some data locality for people that want to configure it. This value is set in the `loginServer` helm value and is propagated down to all resources managed by the operator. The only exception to this is recorder nodes, where additional changes are required to support modifying the url. Updates https://github.com/tailscale/corp/issues/29847 Signed-off-by: David Bond --- cmd/k8s-operator/connector.go | 5 +++-- .../deploy/chart/templates/deployment.yaml | 2 ++ cmd/k8s-operator/deploy/chart/values.yaml | 3 +++ cmd/k8s-operator/deploy/manifests/operator.yaml | 2 ++ cmd/k8s-operator/ingress.go | 2 ++ cmd/k8s-operator/operator.go | 11 ++++++++--- cmd/k8s-operator/proxygroup.go | 10 ++++++++-- cmd/k8s-operator/sts.go | 9 +++++++++ cmd/k8s-operator/svc.go | 2 ++ cmd/k8s-operator/tsclient.go | 14 +++++++++++--- 10 files changed, 50 insertions(+), 10 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index c243036cb..8406a1156 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -7,6 +7,7 @@ package main import ( "context" + "errors" "fmt" "net/netip" "slices" @@ -14,8 +15,6 @@ import ( "sync" "time" - "errors" - "go.uber.org/zap" xslices "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" @@ -26,6 +25,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -199,6 +199,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge }, ProxyClassName: proxyClass, proxyType: proxyTypeConnector, + LoginServer: a.ssr.loginServer, } if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 { diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 1b9b97186..8deba7dab 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -68,6 +68,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OPERATOR_LOGIN_SERVER + value: {{ .Values.operatorConfig.loginServer }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 2d1effc25..af941425a 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -72,6 +72,9 @@ operatorConfig: # - name: EXTRA_VAR2 # value: "value2" + # URL of the control plane to be used by all resources managed by the operator. + loginServer: "" + # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: enabled: true diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index fa18a5deb..4f1faf104 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5124,6 +5124,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OPERATOR_LOGIN_SERVER + value: null - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 5058fd6dd..d62770938 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/ipn" "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" @@ -219,6 +220,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga ChildResourceLabels: crl, ProxyClassName: proxyClass, proxyType: proxyTypeIngressResource, + LoginServer: a.ssr.loginServer, } if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" { diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index b33dcd114..e5f7d932c 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" @@ -144,18 +145,20 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) { hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") + loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ) startlog := zlog.Named("startup") if clientIDPath == "" || clientSecretPath == "" { startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") } - tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath) + tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } s := &tsnet.Server{ - Hostname: hostname, - Logf: zlog.Named("tailscaled").Debugf, + Hostname: hostname, + Logf: zlog.Named("tailscaled").Debugf, + ControlURL: loginServer, } if p := os.Getenv("TS_PORT"); p != "" { port, err := strconv.ParseUint(p, 10, 16) @@ -307,6 +310,7 @@ func runReconcilers(opts reconcilerOpts) { proxyImage: opts.proxyImage, proxyPriorityClassName: opts.proxyPriorityClassName, tsFirewallMode: opts.proxyFirewallMode, + loginServer: opts.tsServer.ControlURL, } err = builder. @@ -639,6 +643,7 @@ func runReconcilers(opts reconcilerOpts) { defaultTags: strings.Split(opts.proxyTags, ","), tsFirewallMode: opts.proxyFirewallMode, defaultProxyClass: opts.defaultProxyClass, + loginServer: opts.tsServer.ControlURL, }) if err != nil { startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index bedf06ba0..1b622c920 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -84,6 +85,7 @@ type ProxyGroupReconciler struct { defaultTags []string tsFirewallMode string defaultProxyClass string + loginServer string mu sync.Mutex // protects following egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge @@ -709,7 +711,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices) + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices, r.loginServer) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -859,7 +861,7 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string, loginServer string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -870,6 +872,10 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a AuthKey: authKey, } + if loginServer != "" { + conf.ServerURL = &loginServer + } + if pg.Spec.HostnamePrefix != "" { conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index a943ae971..193acad87 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -27,6 +27,7 @@ import ( "k8s.io/apiserver/pkg/storage/names" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -138,6 +139,9 @@ type tailscaleSTSConfig struct { ProxyClassName string // name of ProxyClass if one needs to be applied to the proxy ProxyClass *tsapi.ProxyClass // ProxyClass that needs to be applied to the proxy (if there is one) + + // LoginServer denotes the URL of the control plane that should be used by the proxy. + LoginServer string } type connector struct { @@ -162,6 +166,7 @@ type tailscaleSTSReconciler struct { proxyImage string proxyPriorityClassName string tsFirewallMode string + loginServer string } func (sts tailscaleSTSReconciler) validate() error { @@ -910,6 +915,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } + if stsC.LoginServer != "" { + conf.ServerURL = &stsC.LoginServer + } + if stsC.Connector != nil { routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode) if err != nil { diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index f8c9af239..52c8bec7f 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -270,6 +271,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga Tags: tags, ChildResourceLabels: crl, ProxyClassName: proxyClass, + LoginServer: a.ssr.loginServer, } sts.proxyType = proxyTypeEgress if a.shouldExpose(svc) { diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index f49f84af9..a94d55afe 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -12,6 +12,7 @@ import ( "golang.org/x/oauth2/clientcredentials" "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" "tailscale.com/tailcfg" ) @@ -19,10 +20,9 @@ import ( // call should be performed on the default tailnet for the provided credentials. const ( defaultTailnet = "-" - defaultBaseURL = "https://api.tailscale.com" ) -func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (tsClient, error) { +func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer string) (tsClient, error) { clientID, err := os.ReadFile(clientIDPath) if err != nil { return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) @@ -31,14 +31,22 @@ func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (ts if err != nil { return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) } + const tokenURLPath = "/api/v2/oauth/token" + tokenURL := fmt.Sprintf("%s%s", ipn.DefaultControlURL, tokenURLPath) + if loginServer != "" { + tokenURL = fmt.Sprintf("%s%s", loginServer, tokenURLPath) + } credentials := clientcredentials.Config{ ClientID: string(clientID), ClientSecret: string(clientSecret), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + TokenURL: tokenURL, } c := tailscale.NewClient(defaultTailnet, nil) c.UserAgent = "tailscale-k8s-operator" c.HTTPClient = credentials.Client(ctx) + if loginServer != "" { + c.BaseURL = loginServer + } return c, nil } From 77d19604f449ac65092e232c93d28f9e686df161 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 2 Jul 2025 14:32:21 -0700 Subject: [PATCH 1029/1708] derp/derphttp: fix DERP TLS client server name inclusion in URL form When dialed with just an URL and no node, the recent proxy fixes caused a regression where there was no TLS server name being included. Updates #16222 Updates #16223 Signed-off-by: James Tucker Co-Authored-by: Jordan Whited --- derp/derphttp/derphttp_client.go | 4 +++- derp/derphttp/derphttp_test.go | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 7385f0ad1..704b8175d 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -648,12 +648,14 @@ func (c *Client) dialRegion(ctx context.Context, reg *tailcfg.DERPRegion) (net.C func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { tlsConf := tlsdial.Config(c.HealthTracker, c.TLSConfig) + // node is allowed to be nil here, tlsServerName falls back to using the URL + // if node is nil. + tlsConf.ServerName = c.tlsServerName(node) if node != nil { if node.InsecureForTests { tlsConf.InsecureSkipVerify = true tlsConf.VerifyConnection = nil } - tlsConf.ServerName = c.tlsServerName(node) if node.CertName != "" { if suf, ok := strings.CutPrefix(node.CertName, "sha256-raw:"); ok { tlsdial.SetConfigExpectedCertHash(tlsConf, suf) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 7f0a7e333..bb33e6023 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -590,3 +590,39 @@ func TestManualDial(t *testing.T) { t.Fatalf("rc.Connect: %v", err) } } + +func TestURLDial(t *testing.T) { + if !*liveNetworkTest { + t.Skip("skipping live network test without --live-net-tests") + } + dm := &tailcfg.DERPMap{} + res, err := http.Get("https://controlplane.tailscale.com/derpmap/default") + if err != nil { + t.Fatalf("fetching DERPMap: %v", err) + } + defer res.Body.Close() + if err := json.NewDecoder(res.Body).Decode(dm); err != nil { + t.Fatalf("decoding DERPMap: %v", err) + } + + // find a valid target DERP host to test against + var hostname string + for _, reg := range dm.Regions { + for _, node := range reg.Nodes { + if !node.STUNOnly && node.CanPort80 && node.CertName == "" || node.CertName == node.HostName { + hostname = node.HostName + break + } + } + if hostname != "" { + break + } + } + netMon := netmon.NewStatic() + c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + defer c.Close() + + if err := c.Connect(context.Background()); err != nil { + t.Fatalf("rc.Connect: %v", err) + } +} From 3a4b439c62ba30f882e50a08ae4b93f087501847 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Jul 2025 20:38:39 -0700 Subject: [PATCH 1030/1708] feature/relayserver,net/udprelay: add IPv6 support (#16442) Updates tailscale/corp#27502 Updates tailscale/corp#30043 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 2 +- net/udprelay/server.go | 133 +++++++++++++++++-------- net/udprelay/server_test.go | 154 ++++++++++++++++------------- 3 files changed, 178 insertions(+), 111 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 4634f3ac2..5a82a9d11 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -137,7 +137,7 @@ func (e *extension) relayServerOrInit() (relayServer, error) { return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") } var err error - e.server, _, err = udprelay.NewServer(e.logf, *e.port, nil) + e.server, err = udprelay.NewServer(e.logf, *e.port, nil) if err != nil { return nil, err } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e32f8917c..d2661e59f 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -57,7 +57,10 @@ type Server struct { bindLifetime time.Duration steadyStateLifetime time.Duration bus *eventbus.Bus - uc *net.UDPConn + uc4 *net.UDPConn // always non-nil + uc4Port uint16 // always nonzero + uc6 *net.UDPConn // may be nil if IPv6 bind fails during initialization + uc6Port uint16 // may be zero if IPv6 bind fails during initialization closeOnce sync.Once wg sync.WaitGroup closeCh chan struct{} @@ -278,13 +281,11 @@ func (e *serverEndpoint) isBound() bool { e.boundAddrPorts[1].IsValid() } -// NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet -// supported. Port may be 0, and what ultimately gets bound is returned as -// 'boundPort'. If len(overrideAddrs) > 0 these will be used in place of dynamic -// discovery, which is useful to override in tests. -// -// TODO: IPv6 support -func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, boundPort uint16, err error) { +// NewServer constructs a [Server] listening on port. If port is zero, then +// port selection is left up to the host networking stack. If +// len(overrideAddrs) > 0 these will be used in place of dynamic discovery, +// which is useful to override in tests. +func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, err error) { s = &Server{ logf: logger.WithPrefix(logf, "relayserver"), disco: key.NewDisco(), @@ -306,30 +307,36 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.bus = bus netMon, err := netmon.New(s.bus, logf) if err != nil { - return nil, 0, err + return nil, err } s.netChecker = &netcheck.Client{ NetMon: netMon, Logf: logger.WithPrefix(logf, "relayserver: netcheck:"), SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { - return s.uc.WriteToUDPAddrPort(b, addrPort) + if addrPort.Addr().Is4() { + return s.uc4.WriteToUDPAddrPort(b, addrPort) + } else if s.uc6 != nil { + return s.uc6.WriteToUDPAddrPort(b, addrPort) + } else { + return 0, errors.New("IPv6 socket is not bound") + } }, } - boundPort, err = s.listenOn(port) + err = s.listenOn(port) if err != nil { - return nil, 0, err + return nil, err } - s.wg.Add(1) - go s.packetReadLoop() - s.wg.Add(1) - go s.endpointGCLoop() if len(overrideAddrs) > 0 { addrPorts := make(set.Set[netip.AddrPort], len(overrideAddrs)) for _, addr := range overrideAddrs { if addr.IsValid() { - addrPorts.Add(netip.AddrPortFrom(addr, boundPort)) + if addr.Is4() { + addrPorts.Add(netip.AddrPortFrom(addr, s.uc4Port)) + } else if s.uc6 != nil { + addrPorts.Add(netip.AddrPortFrom(addr, s.uc6Port)) + } } } s.addrPorts = addrPorts.Slice() @@ -337,7 +344,17 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.wg.Add(1) go s.addrDiscoveryLoop() } - return s, boundPort, nil + + s.wg.Add(1) + go s.packetReadLoop(s.uc4) + if s.uc6 != nil { + s.wg.Add(1) + go s.packetReadLoop(s.uc6) + } + s.wg.Add(1) + go s.endpointGCLoop() + + return s, nil } func (s *Server) addrDiscoveryLoop() { @@ -351,14 +368,17 @@ func (s *Server) addrDiscoveryLoop() { addrPorts.Make() // get local addresses - localPort := s.uc.LocalAddr().(*net.UDPAddr).Port ips, _, err := netmon.LocalAddresses() if err != nil { return nil, err } for _, ip := range ips { if ip.IsValid() { - addrPorts.Add(netip.AddrPortFrom(ip, uint16(localPort))) + if ip.Is4() { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc4Port)) + } else { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc6Port)) + } } } @@ -413,24 +433,52 @@ func (s *Server) addrDiscoveryLoop() { } } -func (s *Server) listenOn(port int) (uint16, error) { - uc, err := net.ListenUDP("udp4", &net.UDPAddr{Port: port}) - if err != nil { - return 0, err - } - // TODO: set IP_PKTINFO sockopt - _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) - if err != nil { - s.uc.Close() - return 0, err - } - boundPort, err := strconv.ParseUint(boundPortStr, 10, 16) - if err != nil { - s.uc.Close() - return 0, err +// listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if +// we manage to bind the IPv4 socket. +// +// The requested port may be zero, in which case port selection is left up to +// the host networking stack. We make no attempt to bind a consistent port +// across IPv4 and IPv6 if the requested port is zero. +// +// TODO: make these "re-bindable" in similar fashion to magicsock as a means to +// deal with EDR software closing them. http://go/corp/30118 +func (s *Server) listenOn(port int) error { + for _, network := range []string{"udp4", "udp6"} { + uc, err := net.ListenUDP(network, &net.UDPAddr{Port: port}) + if err != nil { + if network == "udp4" { + return err + } else { + s.logf("ignoring IPv6 bind failure: %v", err) + break + } + } + // TODO: set IP_PKTINFO sockopt + _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) + if err != nil { + uc.Close() + if s.uc4 != nil { + s.uc4.Close() + } + return err + } + portUint, err := strconv.ParseUint(boundPortStr, 10, 16) + if err != nil { + uc.Close() + if s.uc4 != nil { + s.uc4.Close() + } + return err + } + if network == "udp4" { + s.uc4 = uc + s.uc4Port = uint16(portUint) + } else { + s.uc6 = uc + s.uc6Port = uint16(portUint) + } } - s.uc = uc - return uint16(boundPort), nil + return nil } // Close closes the server. @@ -438,7 +486,10 @@ func (s *Server) Close() error { s.closeOnce.Do(func() { s.mu.Lock() defer s.mu.Unlock() - s.uc.Close() + s.uc4.Close() + if s.uc6 != nil { + s.uc6.Close() + } close(s.closeCh) s.wg.Wait() clear(s.byVNI) @@ -507,7 +558,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { e.handlePacket(from, gh, b, uw, s.discoPublic) } -func (s *Server) packetReadLoop() { +func (s *Server) packetReadLoop(uc *net.UDPConn) { defer func() { s.wg.Done() s.Close() @@ -515,11 +566,11 @@ func (s *Server) packetReadLoop() { b := make([]byte, 1<<16-1) for { // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := s.uc.ReadFromUDPAddrPort(b) + n, from, err := uc.ReadFromUDPAddrPort(b) if err != nil { return } - s.handlePacket(from, b[:n], s.uc) + s.handlePacket(from, b[:n], uc) } } diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 3fcb9b8b1..8c0c5aff6 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -29,7 +29,7 @@ type testClient struct { func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, remote, server key.DiscoPublic) *testClient { rAddr := &net.UDPAddr{IP: serverEndpoint.Addr().AsSlice(), Port: int(serverEndpoint.Port())} - uc, err := net.DialUDP("udp4", nil, rAddr) + uc, err := net.DialUDP("udp", nil, rAddr) if err != nil { t.Fatal(err) } @@ -180,85 +180,101 @@ func TestServer(t *testing.T) { discoA := key.NewDisco() discoB := key.NewDisco() - ipv4LoopbackAddr := netip.MustParseAddr("127.0.0.1") - - server, _, err := NewServer(t.Logf, 0, []netip.Addr{ipv4LoopbackAddr}) - if err != nil { - t.Fatal(err) + cases := []struct { + name string + overrideAddrs []netip.Addr + }{ + { + name: "over ipv4", + overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, + }, + { + name: "over ipv6", + overrideAddrs: []netip.Addr{netip.MustParseAddr("::1")}, + }, } - defer server.Close() - endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) - if err != nil { - t.Fatal(err) - } - dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) - if err != nil { - t.Fatal(err) - } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + server, err := NewServer(t.Logf, 0, tt.overrideAddrs) + if err != nil { + t.Fatal(err) + } + defer server.Close() - // We expect the same endpoint details pre-handshake. - if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) - } + endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } - if len(endpoint.AddrPorts) != 1 { - t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) - } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) - defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) - defer tcB.close() + // We expect the same endpoint details pre-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } - tcA.handshake(t) - tcB.handshake(t) + if len(endpoint.AddrPorts) != 1 { + t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) + } + tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + defer tcA.close() + tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + defer tcB.close() - dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) - if err != nil { - t.Fatal(err) - } - // We expect the same endpoint details post-handshake. - if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) - } + tcA.handshake(t) + tcB.handshake(t) - txToB := []byte{1, 2, 3} - tcA.writeDataPkt(t, txToB) - rxFromA := tcB.readDataPkt(t) - if !bytes.Equal(txToB, rxFromA) { - t.Fatal("unexpected msg A->B") - } + dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + // We expect the same endpoint details post-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } - txToA := []byte{4, 5, 6} - tcB.writeDataPkt(t, txToA) - rxFromB := tcA.readDataPkt(t) - if !bytes.Equal(txToA, rxFromB) { - t.Fatal("unexpected msg B->A") - } + txToB := []byte{1, 2, 3} + tcA.writeDataPkt(t, txToB) + rxFromA := tcB.readDataPkt(t) + if !bytes.Equal(txToB, rxFromA) { + t.Fatal("unexpected msg A->B") + } - tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) - tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 - defer tcAOnNewPort.close() + txToA := []byte{4, 5, 6} + tcB.writeDataPkt(t, txToA) + rxFromB := tcA.readDataPkt(t) + if !bytes.Equal(txToA, rxFromB) { + t.Fatal("unexpected msg B->A") + } - // Handshake client A on a new source IP:port, verify we receive packets on the new binding - tcAOnNewPort.handshake(t) - txToAOnNewPort := []byte{7, 8, 9} - tcB.writeDataPkt(t, txToAOnNewPort) - rxFromB = tcAOnNewPort.readDataPkt(t) - if !bytes.Equal(txToAOnNewPort, rxFromB) { - t.Fatal("unexpected msg B->A") - } + tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 + defer tcAOnNewPort.close() + + // Handshake client A on a new source IP:port, verify we receive packets on the new binding + tcAOnNewPort.handshake(t) + txToAOnNewPort := []byte{7, 8, 9} + tcB.writeDataPkt(t, txToAOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(txToAOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") + } - tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) - tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 - defer tcBOnNewPort.close() + tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 + defer tcBOnNewPort.close() - // Handshake client B on a new source IP:port, verify we receive packets on the new binding - tcBOnNewPort.handshake(t) - txToBOnNewPort := []byte{7, 8, 9} - tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) - rxFromA = tcBOnNewPort.readDataPkt(t) - if !bytes.Equal(txToBOnNewPort, rxFromA) { - t.Fatal("unexpected msg A->B") + // Handshake client B on a new source IP:port, verify we receive packets on the new binding + tcBOnNewPort.handshake(t) + txToBOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) + rxFromA = tcBOnNewPort.readDataPkt(t) + if !bytes.Equal(txToBOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") + } + }) } } From 5dc11d50f787026055a0125f536e87287ce6899e Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 3 Jul 2025 15:53:35 +0100 Subject: [PATCH 1031/1708] cmd/k8s-operator: Set login server on tsrecorder nodes (#16443) This commit modifies the recorder node reconciler to include the environment variable added in https://github.com/tailscale/corp/pull/30058 which allows for configuration of the coordination server. Updates https://github.com/tailscale/corp/issues/29847 Signed-off-by: David Bond --- cmd/k8s-operator/operator.go | 10 +++++++--- cmd/k8s-operator/tsrecorder.go | 3 ++- cmd/k8s-operator/tsrecorder_specs.go | 10 +++++++--- cmd/k8s-operator/tsrecorder_specs_test.go | 4 ++-- cmd/k8s-operator/tsrecorder_test.go | 8 ++++++-- 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index e5f7d932c..276de411c 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -82,6 +82,7 @@ func main() { tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "") defaultProxyClass = defaultEnv("PROXY_DEFAULT_CLASS", "") isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) + loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ) var opts []kzap.Opts @@ -115,7 +116,7 @@ func main() { hostinfo.SetApp(kubetypes.AppAPIServerProxy) } - s, tsc := initTSNet(zlog) + s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode) @@ -131,6 +132,7 @@ func main() { proxyTags: tags, proxyFirewallMode: tsFirewallMode, defaultProxyClass: defaultProxyClass, + loginServer: loginServer, } runReconcilers(rOpts) } @@ -138,14 +140,13 @@ func main() { // initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the // CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate // with Tailscale. -func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) { +func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { var ( clientIDPath = defaultEnv("CLIENT_ID_FILE", "") clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") - loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ) startlog := zlog.Named("startup") if clientIDPath == "" || clientSecretPath == "" { @@ -610,6 +611,7 @@ func runReconcilers(opts reconcilerOpts) { l: opts.log.Named("recorder-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, + loginServer: opts.loginServer, }) if err != nil { startlog.Fatalf("could not create Recorder reconciler: %v", err) @@ -693,6 +695,8 @@ type reconcilerOpts struct { // class for proxies that do not have a ProxyClass set. // this is defined by an operator env variable. defaultProxyClass string + // loginServer is the coordination server URL that should be used by managed resources. + loginServer string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index cbabc1d89..ec95ecf40 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -59,6 +59,7 @@ type RecorderReconciler struct { clock tstime.Clock tsNamespace string tsClient tsClient + loginServer string mu sync.Mutex // protects following recorders set.Slice[types.UID] // for recorders gauge @@ -202,7 +203,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco }); err != nil { return fmt.Errorf("error creating RoleBinding: %w", err) } - ss := tsrStatefulSet(tsr, r.tsNamespace) + ss := tsrStatefulSet(tsr, r.tsNamespace, r.loginServer) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 7c6e80aed..f5eedc2a1 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -17,7 +17,7 @@ import ( "tailscale.com/version" ) -func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { +func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *appsv1.StatefulSet { return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, @@ -59,7 +59,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy, Resources: tsr.Spec.StatefulSet.Pod.Container.Resources, SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext, - Env: env(tsr), + Env: env(tsr, loginServer), EnvFrom: func() []corev1.EnvFromSource { if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" { return nil @@ -201,7 +201,7 @@ func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret { } } -func env(tsr *tsapi.Recorder) []corev1.EnvVar { +func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { envs := []corev1.EnvVar{ { Name: "TS_AUTHKEY", @@ -239,6 +239,10 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar { Name: "TSRECORDER_HOSTNAME", Value: "$(POD_NAME)", }, + { + Name: "TSRECORDER_LOGIN_SERVER", + Value: loginServer, + }, } for _, env := range tsr.Spec.StatefulSet.Pod.Container.Env { diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go index 94a8a816c..49332d09b 100644 --- a/cmd/k8s-operator/tsrecorder_specs_test.go +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -90,7 +90,7 @@ func TestRecorderSpecs(t *testing.T) { }, } - ss := tsrStatefulSet(tsr, tsNamespace) + ss := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) // StatefulSet-level. if diff := cmp.Diff(ss.Annotations, tsr.Spec.StatefulSet.Annotations); diff != "" { @@ -124,7 +124,7 @@ func TestRecorderSpecs(t *testing.T) { } // Container-level. - if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr)); diff != "" { + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr, tsLoginServer)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" { diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index e6d56ef2f..990bd6819 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -25,7 +25,10 @@ import ( "tailscale.com/tstest" ) -const tsNamespace = "tailscale" +const ( + tsNamespace = "tailscale" + tsLoginServer = "example.tailscale.com" +) func TestRecorder(t *testing.T) { tsr := &tsapi.Recorder{ @@ -51,6 +54,7 @@ func TestRecorder(t *testing.T) { recorder: fr, l: zl.Sugar(), clock: cl, + loginServer: tsLoginServer, } t.Run("invalid_spec_gives_an_error_condition", func(t *testing.T) { @@ -234,7 +238,7 @@ func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recor role := tsrRole(tsr, tsNamespace) roleBinding := tsrRoleBinding(tsr, tsNamespace) serviceAccount := tsrServiceAccount(tsr, tsNamespace) - statefulSet := tsrStatefulSet(tsr, tsNamespace) + statefulSet := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) if shouldExist { expectEqual(t, fc, auth) From 1a2185b1ee2d96ade04fb9f4e43eff5915b9b22a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 2 Jul 2025 19:06:54 -0500 Subject: [PATCH 1032/1708] ipn/ipnlocal: rename setAutoExitNodeIDLockedOnEntry to pickNewAutoExitNode; drop old function Currently, (*LocalBackend).pickNewAutoExitNode() is just a wrapper around setAutoExitNodeIDLockedOnEntry that sends a prefs-change notification at the end. It doesn't need to do that, since setPrefsLockedOnEntry already sends the notification (setAutoExitNodeIDLockedOnEntry calls it via editPrefsLockedOnEntry). This PR removes the old pickNewAutoExitNode function and renames setAutoExitNodeIDLockedOnEntry to pickNewAutoExitNode for clarity. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9c16d55af..bea5085b7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2001,20 +2001,6 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// pickNewAutoExitNode picks a new automatic exit node if needed. -func (b *LocalBackend) pickNewAutoExitNode() { - unlock := b.lockAndGetUnlock() - defer unlock() - - newPrefs := b.setAutoExitNodeIDLockedOnEntry(unlock) - if !newPrefs.Valid() { - // Unchanged. - return - } - - b.send(ipn.Notify{Prefs: &newPrefs}) -} - // setExitNodeID updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { @@ -5840,40 +5826,37 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { } cc.SetNetInfo(ni) if refresh { - unlock := b.lockAndGetUnlock() - defer unlock() - b.setAutoExitNodeIDLockedOnEntry(unlock) + b.pickNewAutoExitNode() } } -func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPrefs ipn.PrefsView) { - var zero ipn.PrefsView +// pickNewAutoExitNode picks a new automatic exit node if needed. +func (b *LocalBackend) pickNewAutoExitNode() { + unlock := b.lockAndGetUnlock() defer unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { b.logf("[unexpected]: received tailnet exit node ID pref change callback but current prefs are nil") - return zero + return } prefsClone := prefs.AsStruct() newSuggestion, err := b.suggestExitNodeLocked(nil) if err != nil { b.logf("setAutoExitNodeID: %v", err) - return zero + return } if prefsClone.ExitNodeID == newSuggestion.ID { - return zero + return } prefsClone.ExitNodeID = newSuggestion.ID - newPrefs, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ + _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ Prefs: *prefsClone, ExitNodeIDSet: true, }, unlock) if err != nil { b.logf("setAutoExitNodeID: failed to apply exit node ID preference: %v", err) - return zero } - return newPrefs } // setNetMapLocked updates the LocalBackend state to reflect the newly From 56d772bd63e5caf711ec7ffe63967d05e33307df Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 2 Jul 2025 19:16:39 -0500 Subject: [PATCH 1033/1708] ipn/ipnlocal: simplify pickNewAutoExitNode (*profileManager).CurrentPrefs() is always valid. Additionally, there's no value in cloning and passing the full ipn.Prefs when editing preferences. Instead, ipn.MaskedPrefs should only have ExitNodeID set. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bea5085b7..adc0af5cd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5835,23 +5835,16 @@ func (b *LocalBackend) pickNewAutoExitNode() { unlock := b.lockAndGetUnlock() defer unlock() - prefs := b.pm.CurrentPrefs() - if !prefs.Valid() { - b.logf("[unexpected]: received tailnet exit node ID pref change callback but current prefs are nil") - return - } - prefsClone := prefs.AsStruct() newSuggestion, err := b.suggestExitNodeLocked(nil) if err != nil { b.logf("setAutoExitNodeID: %v", err) return } - if prefsClone.ExitNodeID == newSuggestion.ID { + if b.pm.CurrentPrefs().ExitNodeID() == newSuggestion.ID { return } - prefsClone.ExitNodeID = newSuggestion.ID _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, + Prefs: ipn.Prefs{ExitNodeID: newSuggestion.ID}, ExitNodeIDSet: true, }, unlock) if err != nil { From 6ecc25b26a8edf191cfbebe2f16254468b1f1695 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 11:50:27 -0500 Subject: [PATCH 1034/1708] ipn/ipnlocal: skip TestUpdateNetmapDeltaAutoExitNode suggestExitNode never checks whether an exit node candidate is online. It also accepts a full netmap, which doesn't include changes from delta updates. The test can't work correctly until both issues are fixed. Previously, it passed only because the test itself is flawed. It doesn't succeed because the currently selected node goes offline and a new one is chosen. Instead, it succeeds because lastSuggestedExitNode is incorrect, and suggestExitNode picks the correct node the first time it runs, based on the DERP map and the netcheck report. The node in exitNodeIDWant just happens to be the optimal choice. Fixing SuggestExitNode requires refactoring its callers first, which in turn reveals the flawed test, as suggestExitNode ends up being called slightly earlier. In this PR, we update the test to correctly fail due to existing bugs in SuggestExitNode, and temporarily skip it until those issues are addressed in a future commit. Updates #16455 Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local_test.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 47e5fa37d..06acd85ce 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1918,8 +1918,10 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { - peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes()) - peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes()) + t.Skip("TODO(tailscale/tailscale#16455): suggestExitNode does not check for online status of exit nodes") + + peer1 := makePeer(1, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) + peer2 := makePeer(2, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -1958,8 +1960,10 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { }{ { // selected auto exit node goes offline - name: "exit-node-goes-offline", - lastSuggestedExitNode: peer1.StableID(), + name: "exit-node-goes-offline", + // PreferredDERP is 2, and it's also the region with the lowest latency. + // So, peer2 should be selected as the exit node. + lastSuggestedExitNode: peer2.StableID(), netmap: &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ peer1, @@ -1970,14 +1974,14 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), + Online: ptr.To(true), }, { NodeID: 2, - Online: ptr.To(true), + Online: ptr.To(false), // the selected exit node goes offline }, }, - exitNodeIDWant: peer2.StableID(), + exitNodeIDWant: peer1.StableID(), report: report, }, { @@ -1994,7 +1998,7 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), + Online: ptr.To(false), // a different exit node goes offline }, { NodeID: 2, From 009882298135672522e0fa9dac1b9fe32a71581a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 11:51:27 -0500 Subject: [PATCH 1035/1708] ipn/ipnlocal: update suggestExitNode to skip offline candidates and fix TestSetControlClientStatusAutoExitNode TestSetControlClientStatusAutoExitNode is broken similarly to TestUpdateNetmapDeltaAutoExitNode as suggestExitNode didn't previously check the online status of exit nodes, and similarly to the other test it succeeded because the test itself is also broken. However, it is easier to fix as it sends out a full netmap update rather than a delta peer update, so it doesn't depend on the same refactoring as TestSetControlClientStatusAutoExitNode. Updates #16455 Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/local_test.go | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index adc0af5cd..8889fa90b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7433,7 +7433,7 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug } candidates := make([]tailcfg.NodeView, 0, len(netMap.Peers)) for _, peer := range netMap.Peers { - if !peer.Valid() { + if !peer.Valid() || !peer.Online().Get() { continue } if allowList != nil && !allowList.Contains(peer.StableID()) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 06acd85ce..ca968ccd7 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2166,8 +2166,8 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { } func TestSetControlClientStatusAutoExitNode(t *testing.T) { - peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withNodeKey()) - peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withNodeKey()) + peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withOnline(true), withNodeKey()) + peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withOnline(true), withNodeKey()) derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -2210,22 +2210,25 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) b.currentNode().SetNetMap(nm) - b.lastSuggestedExitNode = peer1.StableID() + // Peer 2 should be the initial exit node, as it's better than peer 1 + // in terms of latency and DERP region. + b.lastSuggestedExitNode = peer2.StableID() b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) - firstExitNode := b.Prefs().ExitNodeID() - newPeer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withOnline(false), withNodeKey()) + offlinePeer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withOnline(false), withNodeKey()) updatedNetmap := &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ - newPeer1, - peer2, + peer1, + offlinePeer2, }, DERPMap: derpMap, } b.SetControlClientStatus(b.cc, controlclient.Status{NetMap: updatedNetmap}) - lastExitNode := b.Prefs().ExitNodeID() - if firstExitNode == lastExitNode { - t.Errorf("did not switch exit nodes despite auto exit node going offline") + // But now that peer 2 is offline, we should switch to peer 1. + wantExitNode := peer1.StableID() + gotExitNode := b.Prefs().ExitNodeID() + if gotExitNode != wantExitNode { + t.Errorf("did not switch exit nodes despite auto exit node going offline: got %q; want %q", gotExitNode, wantExitNode) } } @@ -3289,6 +3292,7 @@ func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { Key: makeNodeKeyFromID(id), StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), Name: fmt.Sprintf("peer%d", id), + Online: ptr.To(true), HomeDERP: int(id), } for _, opt := range opts { From a8055b5f40c625777e6e13dd504a110c223bc8fb Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 12:21:29 -0500 Subject: [PATCH 1036/1708] cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection With this change, policy enforcement and exit node resolution can happen in separate steps, since enforcement no longer depends on resolving the suggested exit node. This keeps policy enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution to be asynchronous on netmap updates, link changes, etc. Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode after a manual exit node override, which is necessary for tailscale/corp#29969. Updates tailscale/corp#29969 Updates #16459 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/cli_test.go | 4 + ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 2 + ipn/ipnlocal/local.go | 189 +++++++++--- ipn/ipnlocal/local_test.go | 527 ++++++++++++++++++++++++++++++++-- ipn/ipnlocal/state_test.go | 106 +++++-- ipn/prefs.go | 45 +++ ipn/prefs_test.go | 12 + 8 files changed, 793 insertions(+), 93 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 9aa3693fd..48121c7d9 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -971,6 +971,10 @@ func TestPrefFlagMapping(t *testing.T) { // Used internally by LocalBackend as part of exit node usage toggling. // No CLI flag for this. continue + case "AutoExitNode": + // TODO(nickkhyl): should be handled by tailscale {set,up} --exit-node. + // See tailscale/tailscale#16459. + continue } t.Errorf("unexpected new ipn.Pref field %q is not handled by up.go (see addPrefFlagMapping and checkForAccidentalSettingReverts)", prefName) } diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 65438444e..3d67efc6f 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -74,6 +74,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { RouteAll bool ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression InternalExitNodePrior tailcfg.StableNodeID ExitNodeAllowLANAccess bool CorpDNS bool diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 871270b85..1d31ced9d 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -135,6 +135,7 @@ func (v PrefsView) ControlURL() string { return v.ж.Co func (v PrefsView) RouteAll() bool { return v.ж.RouteAll } func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID } func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP } +func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode } func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior } func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess } func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS } @@ -179,6 +180,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { RouteAll bool ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression InternalExitNodePrior tailcfg.StableNodeID ExitNodeAllowLANAccess bool CorpDNS bool diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8889fa90b..21057c0e6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -912,13 +912,14 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { hadPAC := b.prevIfState.HasPAC() b.prevIfState = ifst b.pauseOrResumeControlClientLocked() - if delta.Major && shouldAutoExitNode() { + prefs := b.pm.CurrentPrefs() + if delta.Major && prefs.AutoExitNode().IsSet() { b.refreshAutoExitNode = true } var needReconfig bool // If the network changed and we're using an exit node and allowing LAN access, we may need to reconfigure. - if delta.Major && b.pm.CurrentPrefs().ExitNodeID() != "" && b.pm.CurrentPrefs().ExitNodeAllowLANAccess() { + if delta.Major && prefs.ExitNodeID() != "" && prefs.ExitNodeAllowLANAccess() { b.logf("linkChange: in state %v; updating LAN routes", b.state) needReconfig = true } @@ -941,8 +942,8 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. - b.updateFilterLocked(b.pm.CurrentPrefs()) - updateExitNodeUsageWarning(b.pm.CurrentPrefs(), delta.New, b.health) + b.updateFilterLocked(prefs) + updateExitNodeUsageWarning(prefs, delta.New, b.health) cn := b.currentNode() nm := cn.NetMap() @@ -1623,17 +1624,17 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } } - if shouldAutoExitNode() { + if applySysPolicy(prefs, b.overrideAlwaysOn) { + prefsChanged = true + } + if prefs.AutoExitNode.IsSet() { // Re-evaluate exit node suggestion in case circumstances have changed. _, err := b.suggestExitNodeLocked(curNetMap) if err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("SetControlClientStatus failed to select auto exit node: %v", err) } } - if applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - prefsChanged = true - } - if setExitNodeID(prefs, curNetMap) { + if setExitNodeID(prefs, b.lastSuggestedExitNode, curNetMap) { prefsChanged = true } @@ -1800,7 +1801,7 @@ var preferencePolicies = []preferencePolicyInfo{ // applySysPolicy overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID, overrideAlwaysOn bool) (anyChange bool) { +func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1839,21 +1840,51 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) - if shouldAutoExitNode() && lastSuggestedExitNode != "" { - exitNodeID = lastSuggestedExitNode - } - // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "", - // then exitNodeID is now "auto" which will never match a peer's node ID. - // When there is no a peer matching the node ID, traffic will blackhole, - // preventing accidental non-exit-node usage when a policy is in effect that requires an exit node. - if prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid() { + + // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], + // and update prefs if it differs from the current one. + // This includes cases where it was previously an expression but no longer is, + // or where it wasn't before but now is. + autoExitNode, useAutoExitNode := parseAutoExitNodeID(exitNodeID) + if prefs.AutoExitNode != autoExitNode { + prefs.AutoExitNode = autoExitNode + anyChange = true + } + // Additionally, if the specified exit node ID is an expression, + // meaning an exit node is required but we don't yet have a valid exit node ID, + // we should set exitNodeID to a value that is never a valid [tailcfg.StableNodeID], + // to install a blackhole route and prevent accidental non-exit-node usage + // until the expression is evaluated and an actual exit node is selected. + // We use "auto:any" for this purpose, primarily for compatibility with + // older clients (in case a user downgrades to an earlier version) + // and GUIs/CLIs that have special handling for it. + if useAutoExitNode { + exitNodeID = unresolvedExitNodeID + } + + // If the current exit node ID doesn't match the one enforced by the policy setting, + // and the policy either requires a specific exit node ID, + // or requires an auto exit node ID and the current one isn't allowed, + // then update the exit node ID. + if prefs.ExitNodeID != exitNodeID { + if !useAutoExitNode || !isAllowedAutoExitNodeID(prefs.ExitNodeID) { + prefs.ExitNodeID = exitNodeID + anyChange = true + } + } + + // If the exit node IP is set, clear it. When ExitNodeIP is set in the prefs, + // it takes precedence over the ExitNodeID. + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} anyChange = true } - prefs.ExitNodeID = exitNodeID - prefs.ExitNodeIP = netip.Addr{} } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { - exitNodeIP, err := netip.ParseAddr(exitNodeIPStr) - if exitNodeIP.IsValid() && err == nil { + if prefs.AutoExitNode != "" { + prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP + anyChange = true + } + if exitNodeIP, err := netip.ParseAddr(exitNodeIPStr); err == nil { if prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP { anyChange = true } @@ -1901,7 +1932,7 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() - if !applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { + if !applySysPolicy(prefs, b.overrideAlwaysOn) { unlock.UnlockEarly() return prefs.View(), false } @@ -1957,8 +1988,8 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // If auto exit nodes are enabled and our exit node went offline, // we need to schedule picking a new one. // TODO(nickkhyl): move the auto exit node logic to a feature package. - if shouldAutoExitNode() { - exitNodeID := b.pm.prefs.ExitNodeID() + if prefs := b.pm.CurrentPrefs(); prefs.AutoExitNode().IsSet() { + exitNodeID := prefs.ExitNodeID() for _, m := range muts { mo, ok := m.(netmap.NodeMutationOnline) if !ok || mo.Online { @@ -2001,9 +2032,27 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// setExitNodeID updates prefs to reference an exit node by ID, rather +// setExitNodeID updates prefs to either use the suggestedExitNodeID if AutoExitNode is enabled, +// or resolve ExitNodeIP to an ID and use that. It returns whether prefs was mutated. +func setExitNodeID(prefs *ipn.Prefs, suggestedExitNodeID tailcfg.StableNodeID, nm *netmap.NetworkMap) (prefsChanged bool) { + if prefs.AutoExitNode.IsSet() { + newExitNodeID := cmp.Or(suggestedExitNodeID, unresolvedExitNodeID) + if prefs.ExitNodeID != newExitNodeID { + prefs.ExitNodeID = newExitNodeID + prefsChanged = true + } + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} + prefsChanged = true + } + return prefsChanged + } + return resolveExitNodeIP(prefs, nm) +} + +// resolveExitNodeIP updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. -func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { +func resolveExitNodeIP(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { if nm == nil { // No netmap, can't resolve anything. return false @@ -2265,8 +2314,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // And also apply syspolicy settings to the current profile. // This is important in two cases: when opts.UpdatePrefs is not nil, // and when Always Mode is enabled and we need to set WantRunning to true. - if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - setExitNodeID(newp, cn.NetMap()) + if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.overrideAlwaysOn) { + setExitNodeID(newp, b.lastSuggestedExitNode, cn.NetMap()) b.pm.setPrefsNoPermCheck(newp.View()) } prefs := b.pm.CurrentPrefs() @@ -4187,12 +4236,23 @@ func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { mp := &ipn.MaskedPrefs{} if v { mp.ExitNodeIDSet = true - mp.ExitNodeID = tailcfg.StableNodeID(p0.InternalExitNodePrior()) + mp.ExitNodeID = p0.InternalExitNodePrior() + if expr, ok := parseAutoExitNodeID(mp.ExitNodeID); ok { + mp.AutoExitNodeSet = true + mp.AutoExitNode = expr + mp.ExitNodeID = unresolvedExitNodeID + } } else { mp.ExitNodeIDSet = true mp.ExitNodeID = "" + mp.AutoExitNodeSet = true + mp.AutoExitNode = "" mp.InternalExitNodePriorSet = true - mp.InternalExitNodePrior = p0.ExitNodeID() + if p0.AutoExitNode().IsSet() { + mp.InternalExitNodePrior = tailcfg.StableNodeID(autoExitNodePrefix + p0.AutoExitNode()) + } else { + mp.InternalExitNodePrior = p0.ExitNodeID() + } } return b.editPrefsLockedOnEntry(mp, unlock) } @@ -4229,6 +4289,13 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip mp.InternalExitNodePriorSet = true } + // Disable automatic exit node selection if the user explicitly sets + // ExitNodeID or ExitNodeIP. + if mp.ExitNodeIDSet || mp.ExitNodeIPSet { + mp.AutoExitNodeSet = true + mp.AutoExitNode = "" + } + // Acquire the lock before checking the profile access to prevent // TOCTOU issues caused by the current profile changing between the // check and the actual edit. @@ -4428,9 +4495,14 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // applySysPolicy returns whether it updated newp, // but everything in this function treats b.prefs as completely new // anyway, so its return value can be ignored here. - applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) + applySysPolicy(newp, b.overrideAlwaysOn) + if newp.AutoExitNode.IsSet() { + if _, err := b.suggestExitNodeLocked(nil); err != nil { + b.logf("failed to select auto exit node: %v", err) + } + } // setExitNodeID does likewise. No-op if no exit node resolution is needed. - setExitNodeID(newp, netMap) + setExitNodeID(newp, b.lastSuggestedExitNode, netMap) // We do this to avoid holding the lock while doing everything else. @@ -7630,10 +7702,53 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { return earthRadiusMeters * c } -// shouldAutoExitNode checks for the auto exit node MDM policy. -func shouldAutoExitNode() bool { - exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, "") - return exitNodeIDStr == "auto:any" +const ( + // autoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values + // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. + autoExitNodePrefix = "auto:" + + // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value + // used as an exit node ID to install a blackhole route, preventing + // accidental non-exit-node usage until the [ipn.ExitNodeExpression] + // is evaluated and an actual exit node is selected. + // + // We use "auto:any" for compatibility with older, pre-[ipn.ExitNodeExpression] + // clients that have been using "auto:any" for this purpose for a long time. + unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" +) + +// isAutoExitNodeID reports whether the given [tailcfg.StableNodeID] is +// actually an "auto:"-prefixed [ipn.ExitNodeExpression]. +func isAutoExitNodeID(id tailcfg.StableNodeID) bool { + _, ok := parseAutoExitNodeID(id) + return ok +} + +// parseAutoExitNodeID attempts to parse the given [tailcfg.StableNodeID] +// as an [ExitNodeExpression]. +// +// It returns the parsed expression and true on success, +// or an empty string and false if the input does not appear to be +// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). +// +// It is mainly used to parse the [syspolicy.ExitNodeID] value +// when it is set to "auto:" (e.g., auto:any). +func parseAutoExitNodeID(id tailcfg.StableNodeID) (_ ipn.ExitNodeExpression, ok bool) { + if expr, ok := strings.CutPrefix(string(id), autoExitNodePrefix); ok && expr != "" { + return ipn.ExitNodeExpression(expr), true + } + return "", false +} + +func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { + if exitNodeID == "" { + return false // an exit node is required + } + if nodes, _ := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil); nodes != nil { + return slices.Contains(nodes, string(exitNodeID)) + + } + return true // no policy configured; allow all exit nodes } // startAutoUpdate triggers an auto-update attempt. The actual update happens diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index ca968ccd7..5c9c9f2fa 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" memro "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" @@ -590,6 +591,391 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } } +func makeExitNode(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { + return makePeer(id, append([]peerOptFunc{withCap(26), withSuggest(), withExitRoutes()}, opts...)...) +} + +func TestConfigureExitNode(t *testing.T) { + controlURL := "https://localhost:1/" + exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) + exitNode2 := makeExitNode(2, withName("node-2"), withDERP(2), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))) + selfNode := makeExitNode(3, withName("node-3"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))) + clientNetmap := buildNetmapWithPeers(selfNode, exitNode1, exitNode2) + + report := &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 5 * time.Millisecond, + 2: 10 * time.Millisecond, + }, + PreferredDERP: 1, + } + + tests := []struct { + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + wantPrefs ipn.Prefs + }{ + { + name: "exit-node-id-via-prefs", // set exit node ID via prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeID: exitNode1.StableID()}, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeIP: exitNode1.Addresses().At(0).Addr()}, + ExitNodeIPSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "any", + ExitNodeID: exitNode1.StableID(), + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeID: exitNode2.StableID()}, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "", // should be unset + }, + }, + { + name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped + AutoExitNode: "any", + }, + }, + { + name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped + AutoExitNode: "any", + }, + }, + { + name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "foo"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + }, + { + name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + useExitNodeEnabled: ptr.To(false), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: "", + AutoExitNode: "", + InternalExitNodePrior: "auto:any", + }, + }, + { + name: "auto-exit-node-via-prefs/on", // toggle the exit node on + prefs: ipn.Prefs{ + ControlURL: controlURL, + InternalExitNodePrior: "auto:any", + }, + netMap: clientNetmap, + report: report, + useExitNodeEnabled: ptr.To(true), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + InternalExitNodePrior: "auto:any", + }, + }, + { + name: "id-via-policy", // set exit node ID via syspolicy + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode2.StableID(), // this should be ignored + }, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeIP: exitNode2.Addresses().At(0).Addr(), // this should be ignored + }, + ExitNodeIPSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "any", // this should be ignored + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIPPolicy: ptr.To(exitNode2.Addresses().At(0).Addr()), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + }, + }, + { + name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: nil, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, + AutoExitNode: "any", + }, + }, + { + name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:foo")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + }, + { + name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + useExitNodeEnabled: ptr.To(false), // should be ignored + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "auto:any", + }, + }, + } + syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Configure policy settings, if any. + var settings []source.TestSetting[string] + if tt.exitNodeIDPolicy != nil { + settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) + } + if tt.exitNodeIPPolicy != nil { + settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) + } + if settings != nil { + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, source.NewTestStoreOf(t, settings...)) + } else { + // No syspolicy settings, so don't register a store. + // This allows the test to run in parallel with other tests. + t.Parallel() + } + + // Create a new LocalBackend with the given prefs. + // Any syspolicy settings will be applied to the initial prefs. + lb := newTestLocalBackend(t) + lb.SetPrefsForTest(tt.prefs.Clone()) + // Then set the netcheck report and netmap, if any. + if tt.report != nil { + lb.MagicConn().SetLastNetcheckReportForTest(t.Context(), tt.report) + } + if tt.netMap != nil { + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) + } + + // If we have a changePrefs, apply it. + if tt.changePrefs != nil { + lb.EditPrefs(tt.changePrefs) + } + + // If we need to flip exit node toggle on or off, do it. + if tt.useExitNodeEnabled != nil { + lb.SetUseExitNodeEnabled(*tt.useExitNodeEnabled) + } + + // Now check the prefs. + opts := []cmp.Option{ + cmpopts.EquateComparable(netip.Addr{}, netip.Prefix{}), + } + if diff := cmp.Diff(&tt.wantPrefs, lb.Prefs().AsStruct(), opts...); diff != "" { + t.Errorf("Prefs(+got -want): %v", diff) + } + }) + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface @@ -1646,6 +2032,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { prefs *ipn.Prefs exitNodeIPWant string exitNodeIDWant string + autoExitNodeWant ipn.ExitNodeExpression prefsChanged bool nm *netmap.NetworkMap lastSuggestedExitNode tailcfg.StableNodeID @@ -1850,19 +2237,38 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }, }, { - name: "ExitNodeID key is set to auto and last suggested exit node is populated", + name: "ExitNodeID key is set to auto:any and last suggested exit node is populated", exitNodeIDKey: true, exitNodeID: "auto:any", lastSuggestedExitNode: "123", exitNodeIDWant: "123", + autoExitNodeWant: "any", prefsChanged: true, }, { - name: "ExitNodeID key is set to auto and last suggested exit node is not populated", - exitNodeIDKey: true, - exitNodeID: "auto:any", - prefsChanged: true, - exitNodeIDWant: "auto:any", + name: "ExitNodeID key is set to auto:any and last suggested exit node is not populated", + exitNodeIDKey: true, + exitNodeID: "auto:any", + exitNodeIDWant: "auto:any", + autoExitNodeWant: "any", + prefsChanged: true, + }, + { + name: "ExitNodeID key is set to auto:foo and last suggested exit node is populated", + exitNodeIDKey: true, + exitNodeID: "auto:foo", + lastSuggestedExitNode: "123", + exitNodeIDWant: "123", + autoExitNodeWant: "foo", + prefsChanged: true, + }, + { + name: "ExitNodeID key is set to auto:foo and last suggested exit node is not populated", + exitNodeIDKey: true, + exitNodeID: "auto:foo", + exitNodeIDWant: "auto:any", // should be "auto:any" for compatibility with existing clients + autoExitNodeWant: "foo", + prefsChanged: true, }, } @@ -1893,7 +2299,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode prefs := b.pm.prefs.AsStruct() - if changed := applySysPolicy(prefs, test.lastSuggestedExitNode, false) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { + if changed := applySysPolicy(prefs, false) || setExitNodeID(prefs, test.lastSuggestedExitNode, test.nm); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) } @@ -1903,15 +2309,18 @@ func TestSetExitNodeIDPolicy(t *testing.T) { // preferences to change. b.SetPrefsForTest(pm.CurrentPrefs().AsStruct()) - if got := b.pm.prefs.ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { - t.Errorf("got %v want %v", got, test.exitNodeIDWant) + if got := b.Prefs().ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { + t.Errorf("ExitNodeID: got %q; want %q", got, test.exitNodeIDWant) } - if got := b.pm.prefs.ExitNodeIP(); test.exitNodeIPWant == "" { + if got := b.Prefs().ExitNodeIP(); test.exitNodeIPWant == "" { if got.String() != "invalid IP" { - t.Errorf("got %v want invalid IP", got) + t.Errorf("ExitNodeIP: got %v want invalid IP", got) } } else if got.String() != test.exitNodeIPWant { - t.Errorf("got %v want %v", got, test.exitNodeIPWant) + t.Errorf("ExitNodeIP: got %q; want %q", got, test.exitNodeIPWant) + } + if got := b.Prefs().AutoExitNode(); got != test.autoExitNodeWant { + t.Errorf("AutoExitNode: got %q; want %q", got, test.autoExitNodeWant) } }) } @@ -2459,7 +2868,7 @@ func TestApplySysPolicy(t *testing.T) { t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs, "", false) + gotAnyChange := applySysPolicy(prefs, false) if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -2607,7 +3016,7 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs, "", false) + gotAnyChange := applySysPolicy(prefs, false) if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) @@ -3288,12 +3697,14 @@ type peerOptFunc func(*tailcfg.Node) func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { node := &tailcfg.Node{ - ID: id, - Key: makeNodeKeyFromID(id), - StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), - Name: fmt.Sprintf("peer%d", id), - Online: ptr.To(true), - HomeDERP: int(id), + ID: id, + Key: makeNodeKeyFromID(id), + DiscoKey: makeDiscoKeyFromID(id), + StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), + Name: fmt.Sprintf("peer%d", id), + Online: ptr.To(true), + MachineAuthorized: true, + HomeDERP: int(id), } for _, opt := range opts { opt(node) @@ -3363,6 +3774,12 @@ func withNodeKey() peerOptFunc { } } +func withAddresses(addresses ...netip.Prefix) peerOptFunc { + return func(n *tailcfg.Node) { + n.Addresses = append(n.Addresses, addresses...) + } +} + func deterministicRegionForTest(t testing.TB, want views.Slice[int], use int) selectRegionFunc { t.Helper() @@ -4065,9 +4482,9 @@ func TestShouldAutoExitNode(t *testing.T) { expectedBool: false, }, { - name: "auto prefix invalid suffix", + name: "auto prefix unknown suffix", exitNodeIDPolicyValue: "auto:foo", - expectedBool: false, + expectedBool: true, // "auto:{unknown}" is treated as "auto:any" }, } @@ -4075,12 +4492,7 @@ func TestShouldAutoExitNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, tt.exitNodeIDPolicyValue, - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - - got := shouldAutoExitNode() + got := isAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeIDPolicyValue)) if got != tt.expectedBool { t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue) } @@ -4088,6 +4500,65 @@ func TestShouldAutoExitNode(t *testing.T) { } } +func TestParseAutoExitNodeID(t *testing.T) { + tests := []struct { + name string + exitNodeID string + wantOk bool + wantExpr ipn.ExitNodeExpression + }{ + { + name: "empty expr", + exitNodeID: "", + wantOk: false, + wantExpr: "", + }, + { + name: "no auto prefix", + exitNodeID: "foo", + wantOk: false, + wantExpr: "", + }, + { + name: "auto:any", + exitNodeID: "auto:any", + wantOk: true, + wantExpr: ipn.AnyExitNode, + }, + { + name: "auto:foo", + exitNodeID: "auto:foo", + wantOk: true, + wantExpr: "foo", + }, + { + name: "auto prefix but empty suffix", + exitNodeID: "auto:", + wantOk: false, + wantExpr: "", + }, + { + name: "auto prefix no colon", + exitNodeID: "auto", + wantOk: false, + wantExpr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotExpr, gotOk := parseAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeID)) + if gotOk != tt.wantOk || gotExpr != tt.wantExpr { + if tt.wantOk { + t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) + } else { + t.Fatalf("got %v (%q); want false", gotOk, gotExpr) + } + } + }) + } +} + func TestEnableAutoUpdates(t *testing.T) { lb := newTestLocalBackend(t) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index eb3664385..f0ac5f944 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -6,6 +6,7 @@ package ipnlocal import ( "context" "errors" + "fmt" "net/netip" "strings" "sync" @@ -1108,10 +1109,17 @@ func TestEngineReconfigOnStateChange(t *testing.T) { enableLogging := false connect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} disconnect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: false}, WantRunningSet: true} - node1 := testNetmapForNode(1, "node-1", []netip.Prefix{netip.MustParsePrefix("100.64.1.1/32")}) - node2 := testNetmapForNode(2, "node-2", []netip.Prefix{netip.MustParsePrefix("100.64.1.2/32")}) - node3 := testNetmapForNode(3, "node-3", []netip.Prefix{netip.MustParsePrefix("100.64.1.3/32")}) - node3.Peers = []tailcfg.NodeView{node1.SelfNode, node2.SelfNode} + node1 := buildNetmapWithPeers( + makePeer(1, withName("node-1"), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))), + ) + node2 := buildNetmapWithPeers( + makePeer(2, withName("node-2"), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))), + ) + node3 := buildNetmapWithPeers( + makePeer(3, withName("node-3"), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))), + node1.SelfNode, + node2.SelfNode, + ) routesWithQuad100 := func(extra ...netip.Prefix) []netip.Prefix { return append(extra, netip.MustParsePrefix("100.100.100.100/32")) } @@ -1380,33 +1388,75 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } -func testNetmapForNode(userID tailcfg.UserID, name string, addresses []netip.Prefix) *netmap.NetworkMap { +func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( - domain = "example.com" - magicDNSSuffix = ".test.ts.net" + firstAutoUserID = tailcfg.UserID(10000) + domain = "example.com" + magicDNSSuffix = ".test.ts.net" ) - user := &tailcfg.UserProfile{ - ID: userID, - DisplayName: name, - LoginName: strings.Join([]string{name, domain}, "@"), - } - self := &tailcfg.Node{ - ID: tailcfg.NodeID(1000 + userID), - StableID: tailcfg.StableNodeID("stable-" + name), - User: user.ID, - Name: name + magicDNSSuffix, - Addresses: addresses, - MachineAuthorized: true, - } - self.Key = makeNodeKeyFromID(self.ID) - self.DiscoKey = makeDiscoKeyFromID(self.ID) + + users := make(map[tailcfg.UserID]tailcfg.UserProfileView) + makeUserForNode := func(n *tailcfg.Node) { + var user *tailcfg.UserProfile + if n.User == 0 { + n.User = firstAutoUserID + tailcfg.UserID(n.ID) + user = &tailcfg.UserProfile{ + DisplayName: n.Name, + LoginName: n.Name, + } + } else if _, ok := users[n.User]; !ok { + user = &tailcfg.UserProfile{ + DisplayName: fmt.Sprintf("User %d", n.User), + LoginName: fmt.Sprintf("user-%d", n.User), + } + } + if user != nil { + user.ID = n.User + user.LoginName = strings.Join([]string{user.LoginName, domain}, "@") + users[n.User] = user.View() + } + } + + derpmap := &tailcfg.DERPMap{ + Regions: make(map[int]*tailcfg.DERPRegion), + } + makeDERPRegionForNode := func(n *tailcfg.Node) { + if n.HomeDERP == 0 { + return // no DERP region + } + if _, ok := derpmap.Regions[n.HomeDERP]; !ok { + r := &tailcfg.DERPRegion{ + RegionID: n.HomeDERP, + RegionName: fmt.Sprintf("Region %d", n.HomeDERP), + } + r.Nodes = append(r.Nodes, &tailcfg.DERPNode{ + Name: fmt.Sprintf("%da", n.HomeDERP), + RegionID: n.HomeDERP, + }) + derpmap.Regions[n.HomeDERP] = r + } + } + + updateNode := func(n tailcfg.NodeView) tailcfg.NodeView { + mut := n.AsStruct() + makeUserForNode(mut) + makeDERPRegionForNode(mut) + mut.Name = mut.Name + magicDNSSuffix + return mut.View() + } + + self = updateNode(self) + for i := range peers { + peers[i] = updateNode(peers[i]) + } + return &netmap.NetworkMap{ - SelfNode: self.View(), - Name: self.Name, - Domain: domain, - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ - user.ID: user.View(), - }, + SelfNode: self, + Name: self.Name(), + Domain: domain, + Peers: peers, + UserProfiles: users, + DERPMap: derpmap, } } diff --git a/ipn/prefs.go b/ipn/prefs.go index 01275a7e2..77cea0493 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -94,6 +94,25 @@ type Prefs struct { ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + // AutoExitNode is an optional expression that specifies whether and how + // tailscaled should pick an exit node automatically. + // + // If specified, tailscaled will use an exit node based on the expression, + // and will re-evaluate the selection periodically as network conditions, + // available exit nodes, or policy settings change. A blackhole route will + // be installed to prevent traffic from escaping to the local network until + // an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP. + // + // If empty, tailscaled will not automatically select an exit node. + // + // If the specified expression is invalid or unsupported by the client, + // it falls back to the behavior of [AnyExitNode]. + // + // As of 2025-07-02, the only supported value is [AnyExitNode]. + // It's a string rather than a boolean to allow future extensibility + // (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us"). + AutoExitNode ExitNodeExpression `json:",omitempty"` + // InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by // the backend on transition from exit node on to off and used by the // backend. @@ -325,6 +344,7 @@ type MaskedPrefs struct { RouteAllSet bool `json:",omitempty"` ExitNodeIDSet bool `json:",omitempty"` ExitNodeIPSet bool `json:",omitempty"` + AutoExitNodeSet bool `json:",omitempty"` InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients ExitNodeAllowLANAccessSet bool `json:",omitempty"` CorpDNSSet bool `json:",omitempty"` @@ -533,6 +553,9 @@ func (p *Prefs) pretty(goos string) string { } else if !p.ExitNodeID.IsZero() { fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) } + if p.AutoExitNode.IsSet() { + fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) + } if len(p.AdvertiseRoutes) > 0 || goos == "linux" { fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) } @@ -609,6 +632,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.RouteAll == p2.RouteAll && p.ExitNodeID == p2.ExitNodeID && p.ExitNodeIP == p2.ExitNodeIP && + p.AutoExitNode == p2.AutoExitNode && p.InternalExitNodePrior == p2.InternalExitNodePrior && p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess && p.CorpDNS == p2.CorpDNS && @@ -804,6 +828,7 @@ func isRemoteIP(st *ipnstate.Status, ip netip.Addr) bool { func (p *Prefs) ClearExitNode() { p.ExitNodeID = "" p.ExitNodeIP = netip.Addr{} + p.AutoExitNode = "" } // ExitNodeLocalIPError is returned when the requested IP address for an exit @@ -1043,3 +1068,23 @@ func (p *LoginProfile) Equals(p2 *LoginProfile) bool { p.LocalUserID == p2.LocalUserID && p.ControlURL == p2.ControlURL } + +// ExitNodeExpression is a string that specifies how an exit node +// should be selected. An empty string means that no exit node +// should be selected. +// +// As of 2025-07-02, the only supported value is [AnyExitNode]. +type ExitNodeExpression string + +// AnyExitNode indicates that the exit node should be automatically +// selected from the pool of available exit nodes, excluding any +// disallowed by policy (e.g., [syspolicy.AllowedSuggestedExitNodes]). +// The exact implementation is subject to change, but exit nodes +// offering the best performance will be preferred. +const AnyExitNode ExitNodeExpression = "any" + +// IsSet reports whether the expression is non-empty and can be used +// to select an exit node. +func (e ExitNodeExpression) IsSet() bool { + return e != "" +} diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index d28d161db..268ea206c 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -40,6 +40,7 @@ func TestPrefsEqual(t *testing.T) { "RouteAll", "ExitNodeID", "ExitNodeIP", + "AutoExitNode", "InternalExitNodePrior", "ExitNodeAllowLANAccess", "CorpDNS", @@ -150,6 +151,17 @@ func TestPrefsEqual(t *testing.T) { true, }, + { + &Prefs{AutoExitNode: ""}, + &Prefs{AutoExitNode: "auto:any"}, + false, + }, + { + &Prefs{AutoExitNode: "auto:any"}, + &Prefs{AutoExitNode: "auto:any"}, + true, + }, + { &Prefs{}, &Prefs{ExitNodeAllowLANAccess: true}, From c46145b99e4157d89df807dc64133e31d855cf09 Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 4 Jul 2025 12:19:23 +0100 Subject: [PATCH 1037/1708] cmd/k8s-operator: Move login server value to top-level (#16470) This commit modifies the operator helm chart values to bring the newly added `loginServer` field to the top level. We felt as though it was a bit confusing to be at the `operatorConfig` level as this value modifies the behaviour or the operator, api server & all resources that the operator manages. Updates https://github.com/tailscale/corp/issues/29847 Signed-off-by: David Bond --- cmd/k8s-operator/deploy/chart/templates/deployment.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 8deba7dab..01a290c07 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -69,7 +69,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: OPERATOR_LOGIN_SERVER - value: {{ .Values.operatorConfig.loginServer }} + value: {{ .Values.loginServer }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index af941425a..0ba8d045a 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -9,6 +9,9 @@ oauth: {} # clientId: "" # clientSecret: "" +# URL of the control plane to be used by all resources managed by the operator. +loginServer: "" + # Secret volume. # If set it defines the volume the oauth secrets will be mounted from. # The volume needs to contain two files named `client_id` and `client_secret`. @@ -72,9 +75,6 @@ operatorConfig: # - name: EXTRA_VAR2 # value: "value2" - # URL of the control plane to be used by all resources managed by the operator. - loginServer: "" - # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: enabled: true From 639fed6856722bad94762b48546cd84331f12b97 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 4 Jul 2025 16:06:22 +0100 Subject: [PATCH 1038/1708] Dockerfile,build_docker.sh: add a note on how to build local images (#16471) Updates#cleanup Signed-off-by: Irbe Krumina --- Dockerfile | 9 +++++++++ build_docker.sh | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/Dockerfile b/Dockerfile index 015022e49..fbc0d1194 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,15 @@ # Tailscale images are currently built using https://github.com/tailscale/mkctr, # and the build script can be found in ./build_docker.sh. # +# If you want to build local images for testing, you can use make. +# +# To build a Tailscale image and push to the local docker registry: +# +# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage +# +# To build a Tailscale image and push to a remote docker registry: +# +# $ REPO=//tailscale TAGS=v0.0.1 make publishdevimage # # This Dockerfile includes all the tailscale binaries. # diff --git a/build_docker.sh b/build_docker.sh index bdc9dc086..7840dc897 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -6,6 +6,16 @@ # hash of this repository as produced by ./cmd/mkversion. # This is the image build mechanim used to build the official Tailscale # container images. +# +# If you want to build local images for testing, you can use make, which provides few convenience wrappers around this script. +# +# To build a Tailscale image and push to the local docker registry: + +# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage +# +# To build a Tailscale image and push to a remote docker registry: +# +# $ REPO=//tailscale TAGS=v0.0.1 make publishdevimage set -eu From 92a114c66d296704d48045ee12c0fe28bb7f5b6c Mon Sep 17 00:00:00 2001 From: Dylan Bargatze Date: Fri, 4 Jul 2025 12:48:38 -0400 Subject: [PATCH 1039/1708] tailcfg, feature/relayserver, wgengine/magicsock: invert UDP relay server nodeAttrs (#16444) Inverts the nodeAttrs related to UDP relay client/server enablement to disablement, and fixes up the corresponding logic that uses them. Also updates the doc comments on both nodeAttrs. Fixes tailscale/corp#30024 Signed-off-by: Dylan Bargatze --- feature/relayserver/relayserver.go | 18 +++++++++--------- tailcfg/tailcfg.go | 21 ++++++++++++++------- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/magicsock_test.go | 3 --- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 5a82a9d11..f4a533193 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -50,11 +50,11 @@ func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, err type extension struct { logf logger.Logf - mu sync.Mutex // guards the following fields - shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - hasNodeAttrRelayServer bool // tailcfg.NodeAttrRelayServer - server relayServer // lazily initialized + mu sync.Mutex // guards the following fields + shutdown bool + port *int // ipn.Prefs.RelayServerPort, nil if disabled + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + server relayServer // lazily initialized } // relayServer is the interface of [udprelay.Server]. @@ -81,8 +81,8 @@ func (e *extension) Init(host ipnext.Host) error { func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() - e.hasNodeAttrRelayServer = nodeView.HasCap(tailcfg.NodeAttrRelayServer) - if !e.hasNodeAttrRelayServer && e.server != nil { + e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) + if e.hasNodeAttrDisableRelayServer && e.server != nil { e.server.Close() e.server = nil } @@ -130,8 +130,8 @@ func (e *extension) relayServerOrInit() (relayServer, error) { if e.port == nil { return nil, errors.New("relay server is not configured") } - if !e.hasNodeAttrRelayServer { - return nil, errors.New("no relay:server node attribute") + if e.hasNodeAttrDisableRelayServer { + return nil, errors.New("disable-relay-server node attribute is present") } if !envknob.UseWIPCode() { return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 10b157ac1..d97f60a8a 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2602,13 +2602,20 @@ const ( // peer node list. NodeAttrNativeIPV4 NodeCapability = "native-ipv4" - // NodeAttrRelayServer permits the node to act as an underlay UDP relay - // server. There are no expected values for this key in NodeCapMap. - NodeAttrRelayServer NodeCapability = "relay:server" - - // NodeAttrRelayClient permits the node to act as an underlay UDP relay - // client. There are no expected values for this key in NodeCapMap. - NodeAttrRelayClient NodeCapability = "relay:client" + // NodeAttrDisableRelayServer prevents the node from acting as an underlay + // UDP relay server. There are no expected values for this key; the key + // only needs to be present in [NodeCapMap] to take effect. + NodeAttrDisableRelayServer NodeCapability = "disable-relay-server" + + // NodeAttrDisableRelayClient prevents the node from allocating UDP relay + // server endpoints itself; the node may still bind into and relay traffic + // using endpoints allocated by its peers. This attribute can be added to + // the node dynamically; if added while the node is already running, the + // node will be unable to allocate UDP relay server endpoints after it next + // updates its network map. There are no expected values for this key in + // [NodeCapMap]; the key only needs to be present in [NodeCapMap] to take + // effect. + NodeAttrDisableRelayClient NodeCapability = "disable-relay-client" // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 174345a84..5719b20f9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2703,7 +2703,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { peersChanged := c.updateNodes(update) relayClientEnabled := update.SelfNode.Valid() && - update.SelfNode.HasCap(tailcfg.NodeAttrRelayClient) && + !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && envknob.UseWIPCode() c.mu.Lock() diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 8aa9a09d2..c388e9ed1 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3408,9 +3408,6 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { } peerOnlyIPv4 := &tailcfg.Node{ Cap: math.MinInt32, - CapMap: map[tailcfg.NodeCapability][]tailcfg.RawMessage{ - tailcfg.NodeAttrRelayServer: nil, - }, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, From 079134d3c0f51ad27e502e70a172e10326c70d3d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 7 Jul 2025 00:40:56 +0100 Subject: [PATCH 1040/1708] cmd/k8s-operator: always set ProxyGroup status conditions (#16429) Refactors setting status into its own top-level function to make it easier to ensure we _always_ set the status if it's changed on every reconcile. Previously, it was possible to have stale status if some earlier part of the provision logic failed. Updates #16327 Change-Id: Idab0cfc15ae426cf6914a82f0d37a5cc7845236b Signed-off-by: Tom Proctor --- .../crds/tailscale.com_proxygroups.yaml | 5 +- .../deploy/manifests/operator.yaml | 5 +- cmd/k8s-operator/proxygroup.go | 300 +++++++++--------- cmd/k8s-operator/proxygroup_test.go | 66 ++-- k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 6 +- 6 files changed, 212 insertions(+), 172 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index f695e989d..c426c8427 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -124,7 +124,10 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`. + resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. + `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled + and ready. `ProxyGroupAvailable` indicates that at least one proxy is + ready to serve traffic. type: array items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 4f1faf104..288857569 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2953,7 +2953,10 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`. + resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. + `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled + and ready. `ProxyGroupAvailable` indicates that at least one proxy is + ready to serve traffic. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 1b622c920..c44de09a7 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -13,6 +13,7 @@ import ( "net/http" "net/netip" "slices" + "sort" "strings" "sync" @@ -48,7 +49,6 @@ const ( reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" reasonProxyGroupReady = "ProxyGroupReady" reasonProxyGroupCreating = "ProxyGroupCreating" - reasonProxyGroupInvalid = "ProxyGroupInvalid" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" @@ -132,17 +132,15 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldPGStatus := pg.Status.DeepCopy() - setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { - err = errors.Join(err, updateErr) - } - } - return reconcile.Result{}, err - } + staticEndpoints, nrr, err := r.reconcilePG(ctx, pg, logger) + return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, staticEndpoints)) +} +// reconcilePG handles all reconciliation of a ProxyGroup that is not marked +// for deletion. It is separated out from Reconcile to make a clear separation +// between reconciling the ProxyGroup, and posting the status of its created +// resources onto the ProxyGroup status field. +func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { if !slices.Contains(pg.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -150,18 +148,11 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // operation is underway. logger.Infof("ensuring ProxyGroup is set up") pg.Finalizers = append(pg.Finalizers, FinalizerName) - if err = r.Update(ctx, pg); err != nil { - err = fmt.Errorf("error adding finalizer: %w", err) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, reasonProxyGroupCreationFailed) + if err := r.Update(ctx, pg); err != nil { + return r.notReadyErrf(pg, "error adding finalizer: %w", err) } } - if err = r.validate(pg); err != nil { - message := fmt.Sprintf("ProxyGroup is invalid: %s", err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupInvalid, message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupInvalid, message) - } - proxyClassName := r.defaultProxyClass if pg.Spec.ProxyClass != "" { proxyClassName = pg.Spec.ProxyClass @@ -172,78 +163,33 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ proxyClass = new(tsapi.ProxyClass) err := r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass) if apierrors.IsNotFound(err) { - err = nil - message := fmt.Sprintf("the ProxyGroup's ProxyClass %s does not (yet) exist", proxyClassName) - logger.Info(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q does not (yet) exist", proxyClassName) + logger.Info(msg) + return r.notReady(reasonProxyGroupCreating, msg) } if err != nil { - err = fmt.Errorf("error getting ProxyGroup's ProxyClass %s: %s", proxyClassName, err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } validateProxyClassForPG(logger, pg, proxyClass) if !tsoperator.ProxyClassIsReady(proxyClass) { - message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName) - logger.Info(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) + logger.Info(msg) + return r.notReady(reasonProxyGroupCreating, msg) } } - isProvisioned, err := r.maybeProvision(ctx, pg, proxyClass) + staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) if err != nil { - reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { - reason = reasonProxyGroupCreating - msg = fmt.Sprintf("optimistic lock error, retrying: %s", err) - err = nil + msg := fmt.Sprintf("optimistic lock error, retrying: %s", nrr.message) logger.Info(msg) + return r.notReady(reasonProxyGroupCreating, msg) } else { - r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) - } - - return setStatusReady(pg, metav1.ConditionFalse, reason, msg) - } - - if !isProvisioned { - if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { - return reconcile.Result{}, errors.Join(err, updateErr) - } + return nil, nrr, err } - return } - desiredReplicas := int(pgReplicas(pg)) - - // Set ProxyGroupAvailable condition. - status := metav1.ConditionFalse - reason := reasonProxyGroupCreating - message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) - if len(pg.Status.Devices) > 0 { - status = metav1.ConditionTrue - if len(pg.Status.Devices) == desiredReplicas { - reason = reasonProxyGroupReady - } - } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, pg.Generation, r.clock, logger) - - // Set ProxyGroupReady condition. - if len(pg.Status.Devices) < desiredReplicas { - logger.Debug(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) - } - - if len(pg.Status.Devices) > desiredReplicas { - message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) - logger.Debug(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) - } - - logger.Info("ProxyGroup resources synced") - return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) + return staticEndpoints, nrr, nil } // validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup. @@ -271,7 +217,7 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc } } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (isProvisioned bool, err error) { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureAddedToGaugeForProxyGroup(pg) @@ -280,31 +226,30 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro svcToNodePorts := make(map[string]uint16) var tailscaledPort *uint16 if proxyClass != nil && proxyClass.Spec.StaticEndpoints != nil { + var err error svcToNodePorts, tailscaledPort, err = r.ensureNodePortServiceCreated(ctx, pg, proxyClass) if err != nil { - wrappedErr := fmt.Errorf("error provisioning NodePort Services for static endpoints: %w", err) var allocatePortErr *allocatePortsErr if errors.As(err, &allocatePortErr) { reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) - r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) - return false, nil + msg := fmt.Sprintf("error provisioning NodePort Services for static endpoints: %v", err) + r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) + return r.notReady(reason, msg) } - return false, wrappedErr + return r.notReadyErrf(pg, "error provisioning NodePort Services for static endpoints: %w", err) } } staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass, svcToNodePorts) if err != nil { - wrappedErr := fmt.Errorf("error provisioning config Secrets: %w", err) var selectorErr *FindStaticEndpointErr if errors.As(err, &selectorErr) { reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) - r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) - return false, nil + msg := fmt.Sprintf("error provisioning config Secrets: %v", err) + r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) + return r.notReady(reason, msg) } - return false, wrappedErr + return r.notReadyErrf(pg, "error provisioning config Secrets: %w", err) } // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. @@ -315,7 +260,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return false, fmt.Errorf("error provisioning state Secrets: %w", err) + return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err) } } sa := pgServiceAccount(pg, r.tsNamespace) @@ -324,7 +269,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences }); err != nil { - return false, fmt.Errorf("error provisioning ServiceAccount: %w", err) + return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) } role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { @@ -333,7 +278,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return false, fmt.Errorf("error provisioning Role: %w", err) + return r.notReadyErrf(pg, "error provisioning Role: %w", err) } roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { @@ -343,7 +288,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return false, fmt.Errorf("error provisioning RoleBinding: %w", err) + return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) @@ -352,7 +297,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return false, fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { @@ -361,28 +306,27 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return false, fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return false, fmt.Errorf("error generating StatefulSet spec: %w", err) + return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), } ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) - updateSS := func(s *appsv1.StatefulSet) { + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.Spec = ss.Spec - s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences + }); err != nil { + return r.notReadyErrf(pg, "error provisioning StatefulSet: %w", err) } - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, updateSS); err != nil { - return false, fmt.Errorf("error provisioning StatefulSet: %w", err) - } + mo := &metricsOpts{ tsNamespace: r.tsNamespace, proxyStsName: pg.Name, @@ -390,21 +334,67 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return false, fmt.Errorf("error reconciling metrics resources: %w", err) + return r.notReadyErrf(pg, "error reconciling metrics resources: %w", err) } if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { - return false, fmt.Errorf("error cleaning up dangling resources: %w", err) + return r.notReadyErrf(pg, "error cleaning up dangling resources: %w", err) } - devices, err := r.getDeviceInfo(ctx, staticEndpoints, pg) + logger.Info("ProxyGroup resources synced") + + return staticEndpoints, nil, nil +} + +func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, oldPGStatus *tsapi.ProxyGroupStatus, nrr *notReadyReason, endpoints map[string][]netip.AddrPort) (err error) { + defer func() { + if !apiequality.Semantic.DeepEqual(*oldPGStatus, pg.Status) { + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + err = errors.Join(err, updateErr) + } + } + }() + + devices, err := r.getRunningProxies(ctx, pg, endpoints) if err != nil { - return false, fmt.Errorf("failed to get device info: %w", err) + return fmt.Errorf("failed to list running proxies: %w", err) } pg.Status.Devices = devices - return true, nil + desiredReplicas := int(pgReplicas(pg)) + + // Set ProxyGroupAvailable condition. + status := metav1.ConditionFalse + reason := reasonProxyGroupCreating + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(devices), desiredReplicas) + if len(devices) > 0 { + status = metav1.ConditionTrue + if len(devices) == desiredReplicas { + reason = reasonProxyGroupReady + } + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) + + // Set ProxyGroupReady condition. + status = metav1.ConditionFalse + reason = reasonProxyGroupCreating + switch { + case nrr != nil: + // If we failed earlier, that reason takes precedence. + reason = nrr.reason + message = nrr.message + case len(devices) < desiredReplicas: + case len(devices) > desiredReplicas: + message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(devices)-desiredReplicas) + default: + status = metav1.ConditionTrue + reason = reasonProxyGroupReady + message = reasonProxyGroupReady + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) + + return nil } // getServicePortsForProxyGroups returns a map of ProxyGroup Service names to their NodePorts, @@ -484,15 +474,15 @@ func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, tailscaledPort := getRandomPort() svcs := []*corev1.Service{} for i := range pgReplicas(pg) { - replicaName := pgNodePortServiceName(pg.Name, i) + nodePortSvcName := pgNodePortServiceName(pg.Name, i) svc := &corev1.Service{} - err := r.Get(ctx, types.NamespacedName{Name: replicaName, Namespace: r.tsNamespace}, svc) + err := r.Get(ctx, types.NamespacedName{Name: nodePortSvcName, Namespace: r.tsNamespace}, svc) if err != nil && !apierrors.IsNotFound(err) { - return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", replicaName, err) + return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", nodePortSvcName, err) } if apierrors.IsNotFound(err) { - svcs = append(svcs, pgNodePortService(pg, replicaName, r.tsNamespace)) + svcs = append(svcs, pgNodePortService(pg, nodePortSvcName, r.tsNamespace)) } else { // NOTE: if we can we want to recover the random port used for tailscaled, // as well as the NodePort previously used for that Service @@ -638,7 +628,7 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16) (endpoints map[string][]netip.AddrPort, err error) { logger := r.logger(pg.Name) - endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) + endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) // keyed by Service name. for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -691,14 +681,15 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - replicaName := pgNodePortServiceName(pg.Name, i) + nodePortSvcName := pgNodePortServiceName(pg.Name, i) if len(svcToNodePorts) > 0 { - port, ok := svcToNodePorts[replicaName] + replicaName := fmt.Sprintf("%s-%d", pg.Name, i) + port, ok := svcToNodePorts[nodePortSvcName] if !ok { return nil, fmt.Errorf("could not find configured NodePort for ProxyGroup replica %q", replicaName) } - endpoints[replicaName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) + endpoints[nodePortSvcName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) if err != nil { return nil, fmt.Errorf("could not find static endpoints for replica %q: %w", replicaName, err) } @@ -711,7 +702,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices, r.loginServer) + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -910,16 +901,14 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) return conf.AdvertiseServices, nil } -func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { - return nil -} - // getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by // querying their state Secrets. It may not return the same number of items as // specified in the ProxyGroup spec if e.g. it is getting scaled up or down, or // some pods have failed to write state. +// +// The returned metadata will contain an entry for each state Secret that exists. func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { - // List all state secrets owned by this ProxyGroup. + // List all state Secrets owned by this ProxyGroup. secrets := &corev1.SecretList{} if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { return nil, fmt.Errorf("failed to list state Secrets: %w", err) @@ -930,20 +919,20 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) } + nm := nodeMetadata{ + ordinal: ordinal, + stateSecret: &secret, + } + prefs, ok, err := getDevicePrefs(&secret) if err != nil { return nil, err } - if !ok { - continue + if ok { + nm.tsID = prefs.Config.NodeID + nm.dnsName = prefs.Config.UserProfile.LoginName } - nm := nodeMetadata{ - ordinal: ordinal, - stateSecret: &secret, - tsID: prefs.Config.NodeID, - dnsName: prefs.Config.UserProfile.LoginName, - } pod := &corev1.Pod{} if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { return nil, err @@ -953,23 +942,36 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr metadata = append(metadata, nm) } + // Sort for predictable ordering and status. + sort.Slice(metadata, func(i, j int) bool { + return metadata[i].ordinal < metadata[j].ordinal + }) + return metadata, nil } -func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoints map[string][]netip.AddrPort, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { +// getRunningProxies will return status for all proxy Pods whose state Secret +// has an up to date Pod UID and at least a hostname. +func (r *ProxyGroupReconciler) getRunningProxies(ctx context.Context, pg *tsapi.ProxyGroup, staticEndpoints map[string][]netip.AddrPort) (devices []tsapi.TailnetDevice, _ error) { metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { return nil, err } - for _, m := range metadata { - if !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { + for i, m := range metadata { + if m.podUID == "" || !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { // Current Pod has not yet written its UID to the state Secret, data may // be stale. continue } device := tsapi.TailnetDevice{} + if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { + device.Hostname = hostname + } else { + continue + } + if ipsB := m.stateSecret.Data[kubetypes.KeyDeviceIPs]; len(ipsB) > 0 { ips := []string{} if err := json.Unmarshal(ipsB, &ips); err != nil { @@ -978,11 +980,10 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint device.TailnetIPs = ips } - if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { - device.Hostname = hostname - } - - if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 { + // TODO(tomhjp): This is our input to the proxy, but we should instead + // read this back from the proxy's state in some way to more accurately + // reflect its status. + if ep, ok := staticEndpoints[pgNodePortServiceName(pg.Name, int32(i))]; ok && len(ep) > 0 { eps := make([]string, 0, len(ep)) for _, e := range ep { eps = append(eps, e.String()) @@ -999,13 +1000,28 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint type nodeMetadata struct { ordinal int stateSecret *corev1.Secret - // podUID is the UID of the current Pod or empty if the Pod does not exist. - podUID string - tsID tailcfg.StableNodeID - dnsName string + podUID string // or empty if the Pod no longer exists. + tsID tailcfg.StableNodeID + dnsName string +} + +func (r *ProxyGroupReconciler) notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { + return nil, ¬ReadyReason{ + reason: reason, + message: msg, + }, nil +} + +func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { + err := fmt.Errorf(format, a...) + r.recorder.Event(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return nil, ¬ReadyReason{ + reason: reasonProxyGroupCreationFailed, + message: err.Error(), + }, err } -func (pr *ProxyGroupReconciler) setStatusReady(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason string, msg string, logger *zap.SugaredLogger) { - pr.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, msg, pg.Generation, pr.clock, logger) +type notReadyReason struct { + reason string + message string } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 87b04a434..bd69b49a8 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -6,7 +6,6 @@ package main import ( - "context" "encoding/json" "fmt" "net/netip" @@ -22,6 +21,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -207,7 +207,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, expectedIPs: []netip.Addr{}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, expectedErr: "", expectStatefulSet: false, }, @@ -265,7 +265,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, labels: map[string]string{"zone": "eu-central"}}, }, expectedIPs: []netip.Addr{}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, expectedErr: "", expectStatefulSet: false, }, @@ -309,7 +309,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, expectedIPs: []netip.Addr{}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, expectedErr: "", expectStatefulSet: false, }, @@ -576,7 +576,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, expectStatefulSet: true, }, }, @@ -659,7 +659,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { Address: addr.ip, }) } - if err := fc.Create(context.Background(), no); err != nil { + if err := fc.Create(t.Context(), no); err != nil { t.Fatalf("failed to create node %q: %v", n.name, err) } createdNodes = append(createdNodes, *no) @@ -670,11 +670,11 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { pg.Spec.Replicas = r.replicas pc.Spec.StaticEndpoints = r.staticEndpointConfig - createOrUpdate(context.Background(), fc, "", pg, func(o *tsapi.ProxyGroup) { + createOrUpdate(t.Context(), fc, "", pg, func(o *tsapi.ProxyGroup) { o.Spec.Replicas = pg.Spec.Replicas }) - createOrUpdate(context.Background(), fc, "", pc, func(o *tsapi.ProxyClass) { + createOrUpdate(t.Context(), fc, "", pc, func(o *tsapi.ProxyClass) { o.Spec.StaticEndpoints = pc.Spec.StaticEndpoints }) @@ -686,7 +686,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { expectEvents(t, fr, r.expectedEvents) sts := &appsv1.StatefulSet{} - err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) + err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) if r.expectStatefulSet { if err != nil { t.Fatalf("failed to get StatefulSet: %v", err) @@ -694,7 +694,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { for j := range 2 { sec := &corev1.Secret{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { t.Fatalf("failed to get state Secret for replica %d: %v", j, err) } @@ -740,7 +740,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { } pgroup := &tsapi.ProxyGroup{} - err = fc.Get(context.Background(), client.ObjectKey{Name: pg.Name}, pgroup) + err = fc.Get(t.Context(), client.ObjectKey{Name: pg.Name}, pgroup) if err != nil { t.Fatalf("failed to get ProxyGroup %q: %v", pg.Name, err) } @@ -762,7 +762,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { // node cleanup between reconciles // we created a new set of nodes for each for _, n := range createdNodes { - err := fc.Delete(context.Background(), &n) + err := fc.Delete(t.Context(), &n) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("failed to delete node: %v", err) } @@ -784,14 +784,14 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { clock: cl, } - if err := fc.Delete(context.Background(), pg); err != nil { + if err := fc.Delete(t.Context(), pg); err != nil { t.Fatalf("error deleting ProxyGroup: %v", err) } expectReconciled(t, reconciler, "", pg.Name) expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) - if err := fc.Delete(context.Background(), pc); err != nil { + if err := fc.Delete(t.Context(), pc); err != nil { t.Fatalf("error deleting ProxyClass: %v", err) } expectMissing[tsapi.ProxyClass](t, fc, "", pc.Name) @@ -855,7 +855,8 @@ func TestProxyGroup(t *testing.T) { t.Run("proxyclass_not_ready", func(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, false, pc) }) @@ -870,7 +871,7 @@ func TestProxyGroup(t *testing.T) { LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, }}, } - if err := fc.Status().Update(context.Background(), pc); err != nil { + if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } @@ -978,7 +979,7 @@ func TestProxyGroup(t *testing.T) { }) t.Run("delete_and_cleanup", func(t *testing.T) { - if err := fc.Delete(context.Background(), pg); err != nil { + if err := fc.Delete(t.Context(), pg); err != nil { t.Fatal(err) } @@ -1049,7 +1050,7 @@ func TestProxyGroupTypes(t *testing.T) { verifyProxyGroupCounts(t, reconciler, 0, 1) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupEgress) @@ -1059,7 +1060,7 @@ func TestProxyGroupTypes(t *testing.T) { // Verify that egress configuration has been set up. cm := &corev1.ConfigMap{} cmName := fmt.Sprintf("%s-egress-config", pg.Name) - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { t.Fatalf("failed to get ConfigMap: %v", err) } @@ -1135,7 +1136,7 @@ func TestProxyGroupTypes(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } @@ -1155,7 +1156,7 @@ func TestProxyGroupTypes(t *testing.T) { Replicas: ptr.To[int32](0), }, } - if err := fc.Create(context.Background(), pg); err != nil { + if err := fc.Create(t.Context(), pg); err != nil { t.Fatal(err) } @@ -1163,7 +1164,7 @@ func TestProxyGroupTypes(t *testing.T) { verifyProxyGroupCounts(t, reconciler, 1, 2) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) @@ -1306,7 +1307,7 @@ func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsap func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name string) *tsapi.ProxyClass { t.Helper() pc := &tsapi.ProxyClass{} - if err := fc.Get(context.Background(), client.ObjectKey{Name: name}, pc); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Name: name}, pc); err != nil { t.Fatal(err) } pc.Status = tsapi.ProxyClassStatus{ @@ -1319,7 +1320,7 @@ func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name s ObservedGeneration: pc.Generation, }}, } - if err := fc.Status().Update(context.Background(), pc); err != nil { + if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } return pc @@ -1398,7 +1399,7 @@ func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { t.Helper() secrets := &corev1.SecretList{} - if err := fc.List(context.Background(), secrets); err != nil { + if err := fc.List(t.Context(), secrets); err != nil { t.Fatal(err) } @@ -1413,6 +1414,7 @@ func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { } func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup) { + t.Helper() const key = "profile-abc" for i := range pgReplicas(pg) { bytes, err := json.Marshal(map[string]any{ @@ -1424,6 +1426,17 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG t.Fatal(err) } + podUID := fmt.Sprintf("pod-uid-%d", i) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, i), + Namespace: "tailscale", + UID: types.UID(podUID), + }, + } + if _, err := createOrUpdate(t.Context(), fc, "tailscale", pod, nil); err != nil { + t.Fatalf("failed to create or update Pod %s: %v", pod.Name, err) + } mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { s.Data = map[string][]byte{ currentProfileKey: []byte(key), @@ -1433,6 +1446,7 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG // TODO(tomhjp): We have two different mechanisms to retrieve device IDs. // Consolidate on this one. kubetypes.KeyDeviceID: []byte(fmt.Sprintf("nodeid-%d", i)), + kubetypes.KeyPodUID: []byte(podUID), } }) } @@ -1512,7 +1526,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { // Verify that the StatefulSet created for ProxyGrup has // the expected setting for the staging endpoint. sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index aba5f9e2d..18bf1cb50 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -658,7 +658,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
                resources. Known condition types are `ProxyGroupReady`. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
                resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`.
                `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled
                and ready. `ProxyGroupAvailable` indicates that at least one proxy is
                ready to serve traffic. | | | | `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the ProxyGroup StatefulSet. | | | diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 17b13064b..5edb47f0d 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -88,7 +88,11 @@ type ProxyGroupSpec struct { type ProxyGroupStatus struct { // List of status conditions to indicate the status of the ProxyGroup - // resources. Known condition types are `ProxyGroupReady`. + // resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. + // `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled + // and ready. `ProxyGroupAvailable` indicates that at least one proxy is + // ready to serve traffic. + // // +listType=map // +listMapKey=type // +optional From 4f3355e4997500cef05a7189e6a325c8a687730e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 6 Jul 2025 22:25:18 -0600 Subject: [PATCH 1041/1708] .github: Bump github/codeql-action from 3.29.0 to 3.29.1 (#16423) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.0 to 3.29.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ce28f5bb42b7a9f2c824e633a3f6ee835bab6858...39edc492dbe16b1465b0cafca41432d857bdb31a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2b471e943..610b93b61 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 From 84eac7b8de99e0d6bad73f2b7998ede7228f2a2a Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 7 Jul 2025 12:12:59 +0100 Subject: [PATCH 1042/1708] cmd/k8s-operator: Allow custom ingress class names (#16472) This commit modifies the k8s operator to allow for customisation of the ingress class name via a new `OPERATOR_INGRESS_CLASS_NAME` environment variable. For backwards compatibility, this defaults to `tailscale`. When using helm, a new `ingress.name` value is provided that will set this environment variable and modify the name of the deployed `IngressClass` resource. Fixes https://github.com/tailscale/tailscale/issues/16248 Signed-off-by: David Bond --- .../deploy/chart/templates/deployment.yaml | 2 + .../deploy/chart/templates/ingressclass.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 4 ++ .../deploy/manifests/operator.yaml | 2 + cmd/k8s-operator/ingress-for-pg.go | 21 ++++++----- cmd/k8s-operator/ingress-for-pg_test.go | 22 +++++++---- cmd/k8s-operator/ingress.go | 10 ++--- cmd/k8s-operator/ingress_test.go | 23 ++++++++---- cmd/k8s-operator/operator.go | 37 +++++++++++-------- cmd/k8s-operator/operator_test.go | 12 +++--- 10 files changed, 83 insertions(+), 52 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 01a290c07..51d0a88c3 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -70,6 +70,8 @@ spec: fieldPath: metadata.namespace - name: OPERATOR_LOGIN_SERVER value: {{ .Values.loginServer }} + - name: OPERATOR_INGRESS_CLASS_NAME + value: {{ .Values.ingressClass.name }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml index 208d58ee1..54851955d 100644 --- a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml @@ -2,7 +2,7 @@ apiVersion: networking.k8s.io/v1 kind: IngressClass metadata: - name: tailscale # class name currently can not be changed + name: {{ .Values.ingressClass.name }} annotations: {} # we do not support default IngressClass annotation https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class spec: controller: tailscale.com/ts-ingress # controller name currently can not be changed diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 0ba8d045a..2926f6d07 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -77,6 +77,10 @@ operatorConfig: # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: + # Allows for customization of the ingress class name used by the operator to identify ingresses to reconcile. This does + # not allow multiple operator instances to manage different ingresses, but provides an onboarding route for users that + # may have previously set up ingress classes named "tailscale" prior to using the operator. + name: "tailscale" enabled: true # proxyConfig contains configuraton that will be applied to any ingress/egress diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 288857569..cdf301318 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5129,6 +5129,8 @@ spec: fieldPath: metadata.namespace - name: OPERATOR_LOGIN_SERVER value: null + - name: OPERATOR_INGRESS_CLASS_NAME + value: tailscale - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 09417fd0c..79bad92be 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -68,14 +68,15 @@ var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGRes type HAIngressReconciler struct { client.Client - recorder record.EventRecorder - logger *zap.SugaredLogger - tsClient tsClient - tsnetServer tsnetServer - tsNamespace string - lc localClient - defaultTags []string - operatorID string // stableID of the operator's Tailscale device + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsnetServer tsnetServer + tsNamespace string + lc localClient + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + ingressClassName string mu sync.Mutex // protects following // managedIngresses is a set of all ingress resources that we're currently @@ -162,7 +163,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } - if err := validateIngressClass(ctx, r.Client); err != nil { + if err := validateIngressClass(ctx, r.Client, r.ingressClassName); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) return false, nil } @@ -645,7 +646,7 @@ func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, er func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName + *ing.Spec.IngressClassName == r.ingressClassName pgAnnot := ing.Annotations[AnnotationProxyGroup] return isTSIngress && pgAnnot != "" } diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 2308514f3..d29368cae 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -438,7 +438,12 @@ func TestValidateIngress(t *testing.T) { WithObjects(tt.ing). WithLists(&networkingv1.IngressList{Items: tt.existingIngs}). Build() + r := &HAIngressReconciler{Client: fc} + if tt.ing.Spec.IngressClassName != nil { + r.ingressClassName = *tt.ing.Spec.IngressClassName + } + err := r.validateIngress(context.Background(), tt.ing, tt.pg) if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) { t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr) @@ -841,14 +846,15 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT } ingPGR := &HAIngressReconciler{ - Client: fc, - tsClient: ft, - defaultTags: []string{"tag:k8s"}, - tsNamespace: "operator-ns", - tsnetServer: fakeTsnetServer, - logger: zl.Sugar(), - recorder: record.NewFakeRecorder(10), - lc: lc, + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + tsNamespace: "operator-ns", + tsnetServer: fakeTsnetServer, + logger: zl.Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + ingressClassName: tsIngressClass.Name, } return ingPGR, fc, ft diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index d62770938..d66cf9116 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -32,7 +32,6 @@ import ( ) const ( - tailscaleIngressClassName = "tailscale" // ingressClass.metadata.name for tailscale IngressClass resource tailscaleIngressControllerName = "tailscale.com/ts-ingress" // ingressClass.spec.controllerName for tailscale IngressClass resource ingressClassDefaultAnnotation = "ingressclass.kubernetes.io/is-default-class" // we do not support this https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class indexIngressProxyClass = ".metadata.annotations.ingress-proxy-class" @@ -52,6 +51,7 @@ type IngressReconciler struct { managedIngresses set.Slice[types.UID] defaultProxyClass string + ingressClassName string } var ( @@ -132,7 +132,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare // This function adds a finalizer to ing, ensuring that we can handle orderly // deprovisioning later. func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error { - if err := validateIngressClass(ctx, a.Client); err != nil { + if err := validateIngressClass(ctx, a.Client, a.ingressClassName); err != nil { logger.Warnf("error validating tailscale IngressClass: %v. In future this might be a terminal error.", err) } if !slices.Contains(ing.Finalizers, FinalizerName) { @@ -266,17 +266,17 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga func (a *IngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName && + *ing.Spec.IngressClassName == a.ingressClassName && ing.Annotations[AnnotationProxyGroup] == "" } // validateIngressClass attempts to validate that 'tailscale' IngressClass // included in Tailscale installation manifests exists and has not been modified // to attempt to enable features that we do not support. -func validateIngressClass(ctx context.Context, cl client.Client) error { +func validateIngressClass(ctx context.Context, cl client.Client, ingressClassName string) error { ic := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ - Name: tailscaleIngressClassName, + Name: ingressClassName, }, } if err := cl.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) { diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index e4396eb10..fe4d90c78 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -36,7 +36,8 @@ func TestTailscaleIngress(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -120,7 +121,8 @@ func TestTailscaleIngressHostname(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -245,7 +247,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -350,7 +353,8 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -498,7 +502,8 @@ func TestIngressProxyClassAnnotation(t *testing.T) { mustCreate(t, fc, ing) ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: &fakeTSClient{}, @@ -568,7 +573,8 @@ func TestIngressLetsEncryptStaging(t *testing.T) { mustCreate(t, fc, ing) ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: &fakeTSClient{}, @@ -675,8 +681,9 @@ func TestEmptyPath(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - recorder: fr, - Client: fc, + recorder: fr, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 276de411c..96b3b37ad 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -83,6 +83,7 @@ func main() { defaultProxyClass = defaultEnv("PROXY_DEFAULT_CLASS", "") isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") + ingressClassName = defaultEnv("OPERATOR_INGRESS_CLASS_NAME", "tailscale") ) var opts []kzap.Opts @@ -133,6 +134,7 @@ func main() { proxyFirewallMode: tsFirewallMode, defaultProxyClass: defaultProxyClass, loginServer: loginServer, + ingressClassName: ingressClassName, } runReconcilers(rOpts) } @@ -343,7 +345,7 @@ func runReconcilers(opts reconcilerOpts) { // ProxyClass's name. proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog)) // Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes. - svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog)) + svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog, opts.ingressClassName)) err = builder. ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). @@ -358,6 +360,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), logger: opts.log.Named("ingress-reconciler"), defaultProxyClass: opts.defaultProxyClass, + ingressClassName: opts.ingressClassName, }) if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) @@ -379,19 +382,20 @@ func runReconcilers(opts reconcilerOpts) { ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). Named("ingress-pg-reconciler"). - Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog, opts.ingressClassName))). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ - recorder: eventRecorder, - tsClient: opts.tsClient, - tsnetServer: opts.tsServer, - defaultTags: strings.Split(opts.proxyTags, ","), - Client: mgr.GetClient(), - logger: opts.log.Named("ingress-pg-reconciler"), - lc: lc, - operatorID: id, - tsNamespace: opts.tailscaleNamespace, + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-pg-reconciler"), + lc: lc, + operatorID: id, + tsNamespace: opts.tailscaleNamespace, + ingressClassName: opts.ingressClassName, }) if err != nil { startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) @@ -697,6 +701,9 @@ type reconcilerOpts struct { defaultProxyClass string // loginServer is the coordination server URL that should be used by managed resources. loginServer string + // ingressClassName is the name of the ingress class used by reconcilers of Ingress resources. This defaults + // to "tailscale" but can be customised. + ingressClassName string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each @@ -1015,7 +1022,7 @@ func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) // The Services of interest are backend Services for tailscale Ingress and // managed Services for an StatefulSet for a proxy configured for tailscale // Ingress -func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger, ingressClassName string) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { if isManagedByType(o, "ingress") { ingName := parentFromObjectLabels(o) @@ -1028,7 +1035,7 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handl } reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { return nil } if hasProxyGroupAnnotation(&ing) { @@ -1533,7 +1540,7 @@ func indexPGIngresses(o client.Object) []string { // serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service // associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, // the associated Ingress gets reconciled. -func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger, ingressClassName string) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { ingList := networkingv1.IngressList{} if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { @@ -1542,7 +1549,7 @@ func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) han } reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { continue } if !hasProxyGroupAnnotation(&ing) { diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index a9f08c18b..1f700f13a 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1549,6 +1549,8 @@ func Test_isMagicDNSName(t *testing.T) { } func Test_serviceHandlerForIngress(t *testing.T) { + const tailscaleIngressClassName = "tailscale" + fc := fake.NewFakeClient() zl, err := zap.NewDevelopment() if err != nil { @@ -1578,7 +1580,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, svc1) wantReqs := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-1", Name: "ing-1"}}} - gotReqs := serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), svc1) + gotReqs := serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), svc1) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1605,7 +1607,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, backendSvc) wantReqs = []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-2", Name: "ing-2"}}} - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), backendSvc) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), backendSvc) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1634,7 +1636,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, backendSvc2) wantReqs = []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-3", Name: "ing-3"}}} - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), backendSvc2) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), backendSvc2) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1661,7 +1663,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { }, } mustCreate(t, fc, nonTSBackend) - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), nonTSBackend) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), nonTSBackend) if len(gotReqs) > 0 { t.Errorf("unexpected reconcile request for a Service that does not belong to a Tailscale Ingress: %#+v\n", gotReqs) } @@ -1675,7 +1677,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { }, } mustCreate(t, fc, someSvc) - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), someSvc) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), someSvc) if len(gotReqs) > 0 { t.Errorf("unexpected reconcile request for a Service that does not belong to any Ingress: %#+v\n", gotReqs) } From 540eb0563803e86fd08369d242e0aff4db5fee32 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 08:45:13 -0700 Subject: [PATCH 1043/1708] wgengine/magicsock: make Conn.Send() lazyEndpoint aware (#16465) A lazyEndpoint may end up on this TX codepath when wireguard-go is deemed "under load" and ends up transmitting a cookie reply using the received conn.Endpoint. Updates tailscale/corp#20732 Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 5719b20f9..8d3b2d082 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1363,12 +1363,18 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint, offset int) (err error) { metricSendDataNetworkDown.Add(n) return errNetworkDown } - if ep, ok := ep.(*endpoint); ok { + switch ep := ep.(type) { + case *endpoint: return ep.send(buffs, offset) + case *lazyEndpoint: + // A [*lazyEndpoint] may end up on this TX codepath when wireguard-go is + // deemed "under handshake load" and ends up transmitting a cookie reply + // using the received [conn.Endpoint] in [device.SendHandshakeCookie]. + if ep.src.ap.Addr().Is6() { + return c.pconn6.WriteBatchTo(buffs, ep.src, offset) + } + return c.pconn4.WriteBatchTo(buffs, ep.src, offset) } - // If it's not of type *endpoint, it's probably *lazyEndpoint, which means - // we don't actually know who the peer is and we're waiting for wireguard-go - // to switch the endpoint. See go/corp/20732. return nil } @@ -1702,6 +1708,11 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. + // + // TODO(jwhited): implement [lazyEndpoint] integration to call + // [endpoint.noteRecvActivity], which triggers just-in-time + // wireguard-go configuration of the peer, prior to peer lookup + // within wireguard-go. return &lazyEndpoint{c: c, src: src}, size, true } cache.epAddr = src @@ -1709,8 +1720,6 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach cache.gen = de.numStopAndReset() ep = de } - // TODO(jwhited): consider the implications of not recording this receive - // activity due to an early [lazyEndpoint] return above. now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(src, now) From 3b32cc758647bde17c9e3fef36086439ba1bb7e8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 09:38:10 -0700 Subject: [PATCH 1044/1708] wgengine/magicsock: simplify Geneve-encapsulated disco.Ping handling (#16448) Just make [relayManager] always handle it, there's no benefit to checking bestAddr's. Also, remove passing of disco.Pong to [relayManager] in endpoint.handlePongConnLocked(), which is redundant with the callsite in Conn.handleDiscoMessage(). Conn.handleDiscoMessage() already passes to [relayManager] if the txID us not known to any [*endpoint]. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 7 -- wgengine/magicsock/magicsock.go | 129 +++++++++++------------- wgengine/magicsock/relaymanager.go | 7 +- wgengine/magicsock/relaymanager_test.go | 2 +- 4 files changed, 61 insertions(+), 84 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 0569341ff..4780c7f37 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1656,13 +1656,6 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd de.mu.Lock() defer de.mu.Unlock() - if src.vni.isSet() && src != de.bestAddr.epAddr { - // "src" is not our bestAddr, but [relayManager] might be in the - // middle of probing it, awaiting pong reception. Make it aware. - de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(de.c, m, di, src) - return false - } - isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr sp, ok := de.sentPing[m.TxID] diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8d3b2d082..37de4668a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2103,7 +2103,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, challenge, di, src) + c.relayManager.handleGeneveEncapDiscoMsg(c, challenge, di, src) return } @@ -2125,7 +2125,10 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return true }) if !knownTxID && src.vni.isSet() { - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) + // If it's an unknown TxID, and it's Geneve-encapsulated, then + // make [relayManager] aware. It might be in the middle of probing + // src. + c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2233,6 +2236,35 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN di.lastPingTime = time.Now() isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr + if src.vni.isSet() { + if isDerp { + c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) + return + } + + // [relayManager] is always responsible for handling (replying) to + // Geneve-encapsulated [disco.Ping] messages in the interest of + // simplicity. It might be in the middle of probing src, so it must be + // made aware. + c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) + return + } + + // This is a naked [disco.Ping] without a VNI. + + // If we can figure out with certainty which node key this disco + // message is for, eagerly update our [epAddr]<>node and disco<>node + // mappings to make p2p path discovery faster in simple + // cases. Without this, disco would still work, but would be + // reliant on DERP call-me-maybe to establish the disco<>node + // mapping, and on subsequent disco handlePongConnLocked to establish + // the IP:port<>disco mapping. + if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { + if !isDerp { + c.peerMap.setNodeKeyForEpAddr(src, nk) + } + } + // numNodes tracks how many nodes (node keys) are associated with the disco // key tied to this inbound ping. Multiple nodes may share the same disco // key in the case of node sharing and users switching accounts. @@ -2244,81 +2276,34 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // a dstKey if the dst ip:port is DERP. dstKey := derpNodeSrc - switch { - case src.vni.isSet(): - if isDerp { - c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) - return - } - - var bestEpAddr epAddr - var discoKey key.DiscoPublic - ep, ok := c.peerMap.endpointForEpAddr(src) - if ok { - ep.mu.Lock() - bestEpAddr = ep.bestAddr.epAddr - ep.mu.Unlock() - disco := ep.disco.Load() - if disco != nil { - discoKey = disco.key + // Remember this route if not present. + var dup bool + if isDerp { + if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + return } - } - - if src == bestEpAddr && discoKey == di.discoKey { - // We have an associated endpoint with src as its bestAddr. Set - // numNodes so we TX a pong further down. numNodes = 1 - } else { - // We have no [endpoint] in the [peerMap] for this relay [epAddr] - // using it as a bestAddr. [relayManager] might be in the middle of - // probing it or attempting to set it as best via - // [endpoint.udpRelayEndpointReady()]. Make [relayManager] aware. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) - return - } - default: // no VNI - // If we can figure out with certainty which node key this disco - // message is for, eagerly update our [epAddr]<>node and disco<>node - // mappings to make p2p path discovery faster in simple - // cases. Without this, disco would still work, but would be - // reliant on DERP call-me-maybe to establish the disco<>node - // mapping, and on subsequent disco handlePongConnLocked to establish - // the IP:port<>disco mapping. - if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { - if !isDerp { - c.peerMap.setNodeKeyForEpAddr(src, nk) - } } - - // Remember this route if not present. - var dup bool - if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return - } - numNodes = 1 - } - } else { - c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - dup = true - return false - } - numNodes++ - if numNodes == 1 && dstKey.IsZero() { - dstKey = ep.publicKey - } - return true - }) - if dup { - return + } else { + c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + dup = true + return false } - if numNodes > 1 { - // Zero it out if it's ambiguous, so sendDiscoMessage logging - // isn't confusing. - dstKey = key.NodePublic{} + numNodes++ + if numNodes == 1 && dstKey.IsZero() { + dstKey = ep.publicKey } + return true + }) + if dup { + return + } + if numNodes > 1 { + // Zero it out if it's ambiguous, so sendDiscoMessage logging + // isn't confusing. + dstKey = key.NodePublic{} } } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 1c173c01a..c8c9ed41b 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -325,10 +325,9 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, }) } -// handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated -// disco messages if they are not associated with any known -// [*endpoint.bestAddr]. -func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { +// handleGeneveEncapDiscoMsg handles reception of Geneve-encapsulated disco +// messages. +func (r *relayManager) handleGeneveEncapDiscoMsg(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{conn: conn, msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 8feff2f3d..8f9236012 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -26,7 +26,7 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsgNotBestAddr(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + rm.handleGeneveEncapDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} From a84d58015ce875863a266dacdfb1ffd65de1615d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 10:06:38 -0700 Subject: [PATCH 1045/1708] wgengine/magicsock: fix lazyEndpoint DstIP() vs SrcIP() (#16453) These were flipped. DstIP() and DstIPBytes() are used internally by wireguard-go as part of a handshake DoS mitigation strategy. Updates tailscale/corp#20732 Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 34 +++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 37de4668a..a7eab3678 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3774,12 +3774,12 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec c.lastNetCheckReport.Store(report) } -// lazyEndpoint is a wireguard conn.Endpoint for when magicsock received a +// lazyEndpoint is a wireguard [conn.Endpoint] for when magicsock received a // non-disco (presumably WireGuard) packet from a UDP address from which we -// can't map to a Tailscale peer. But Wireguard most likely can, once it -// decrypts it. So we implement the conn.PeerAwareEndpoint interface +// can't map to a Tailscale peer. But WireGuard most likely can, once it +// decrypts it. So we implement the [conn.PeerAwareEndpoint] interface // from https://github.com/tailscale/wireguard-go/pull/27 to allow WireGuard -// to tell us who it is later and get the correct conn.Endpoint. +// to tell us who it is later and get the correct [conn.Endpoint]. type lazyEndpoint struct { c *Conn src epAddr @@ -3788,12 +3788,26 @@ type lazyEndpoint struct { var _ conn.PeerAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.Endpoint = (*lazyEndpoint)(nil) -func (le *lazyEndpoint) ClearSrc() {} -func (le *lazyEndpoint) SrcIP() netip.Addr { return le.src.ap.Addr() } -func (le *lazyEndpoint) DstIP() netip.Addr { return netip.Addr{} } -func (le *lazyEndpoint) SrcToString() string { return le.src.String() } -func (le *lazyEndpoint) DstToString() string { return "dst" } -func (le *lazyEndpoint) DstToBytes() []byte { return nil } +func (le *lazyEndpoint) ClearSrc() {} +func (le *lazyEndpoint) SrcIP() netip.Addr { return netip.Addr{} } + +// DstIP returns the remote address of the peer. +// +// Note: DstIP is used internally by wireguard-go as part of handshake DoS +// mitigation. +func (le *lazyEndpoint) DstIP() netip.Addr { return le.src.ap.Addr() } + +func (le *lazyEndpoint) SrcToString() string { return "" } +func (le *lazyEndpoint) DstToString() string { return le.src.String() } + +// DstToBytes returns a binary representation of the remote address of the peer. +// +// Note: DstToBytes is used internally by wireguard-go as part of handshake DoS +// mitigation. +func (le *lazyEndpoint) DstToBytes() []byte { + b, _ := le.src.ap.MarshalBinary() + return b +} // FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in // our [conn.ReceiveFunc]s when we are unable to identify the peer at WireGuard From 04d24cdbd4b551d95f85ca3b9b36ef147503d2b7 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Mon, 7 Jul 2025 15:36:16 -0400 Subject: [PATCH 1046/1708] wgengine/netstack: correctly proxy half-closed TCP connections TCP connections are two unidirectional data streams, and if one of these streams closes, we should not assume the other half is closed as well. For example, if an HTTP client closes its write half of the connection early, it may still be expecting to receive data on its read half, so we should keep the server -> client half of the connection open, while terminating the client -> server half. Fixes tailscale/corp#29837. Signed-off-by: Naman Sood --- wgengine/netstack/netstack.go | 43 ++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index dab692ead..d97c66946 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1435,6 +1435,13 @@ func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) { } } +// tcpCloser is an interface to abstract around various TCPConn types that +// allow closing of the read and write streams independently of each other. +type tcpCloser interface { + CloseRead() error + CloseWrite() error +} + func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet.TCPConn, clientRemoteIP netip.Addr, wq *waiter.Queue, dialAddr netip.AddrPort) (handled bool) { dialAddrStr := dialAddr.String() if debugNetstack() { @@ -1501,18 +1508,48 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. } defer client.Close() + // As of 2025-07-03, backend is always either a net.TCPConn + // from stdDialer.DialContext (which has the requisite functions), + // or nil from hangDialer in tests (in which case we would have + // errored out by now), so this conversion should always succeed. + backendTCPCloser, backendIsTCPCloser := backend.(tcpCloser) connClosed := make(chan error, 2) go func() { _, err := io.Copy(backend, client) + if err != nil { + err = fmt.Errorf("client -> backend: %w", err) + } connClosed <- err + err = nil + if backendIsTCPCloser { + err = backendTCPCloser.CloseWrite() + } + err = errors.Join(err, client.CloseRead()) + if err != nil { + ns.logf("client -> backend close connection: %v", err) + } }() go func() { _, err := io.Copy(client, backend) + if err != nil { + err = fmt.Errorf("backend -> client: %w", err) + } connClosed <- err + err = nil + if backendIsTCPCloser { + err = backendTCPCloser.CloseRead() + } + err = errors.Join(err, client.CloseWrite()) + if err != nil { + ns.logf("backend -> client close connection: %v", err) + } }() - err = <-connClosed - if err != nil { - ns.logf("proxy connection closed with error: %v", err) + // Wait for both ends of the connection to close. + for range 2 { + err = <-connClosed + if err != nil { + ns.logf("proxy connection closed with error: %v", err) + } } ns.logf("[v2] netstack: forwarder connection to %s closed", dialAddrStr) return From 3e01652e4dba619d475cc98e691c0e1d155969ae Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 14:25:33 -0500 Subject: [PATCH 1047/1708] ipn/ipnlocal: add (*LocalBackend).RefreshExitNode In this PR, we add (*LocalBackend).RefreshExitNode which determines which exit node to use based on the current prefs and netmap and switches to it if needed. It supports both scenarios when an exit node is specified by IP (rather than ID) and needs to be resolved once the netmap is ready as well as auto exit nodes. We then use it in (*LocalBackend).SetControlClientStatus when the netmap changes, and wherever (*LocalBackend).pickNewAutoExitNode was previously used. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 77 +++++++++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 21057c0e6..a69b7dd5a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1627,16 +1627,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control if applySysPolicy(prefs, b.overrideAlwaysOn) { prefsChanged = true } - if prefs.AutoExitNode.IsSet() { - // Re-evaluate exit node suggestion in case circumstances have changed. - _, err := b.suggestExitNodeLocked(curNetMap) - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("SetControlClientStatus failed to select auto exit node: %v", err) - } - } - if setExitNodeID(prefs, b.lastSuggestedExitNode, curNetMap) { - prefsChanged = true - } // Until recently, we did not store the account's tailnet name. So check if this is the case, // and backfill it on incoming status update. @@ -1653,6 +1643,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } + + b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) } // initTKALocked is dependent on CurrentProfile.ID, which is initialized @@ -1695,16 +1687,17 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.mu.Unlock() // Now complete the lock-free parts of what we started while locked. - if prefsChanged { - b.send(ipn.Notify{Prefs: ptr.To(prefs.View())}) - } - if st.NetMap != nil { + // Check and update the exit node if needed, now that we have a new netmap. + b.RefreshExitNode() + if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) // Connecting to this tailnet without logging is forbidden; boot us outta here. b.mu.Lock() + // Get the current prefs again, since we unlocked above. + prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false p := prefs.View() if err := b.pm.SetPrefs(p, ipn.NetworkProfile{ @@ -1999,7 +1992,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if !ok || n.StableID() != exitNodeID { continue } - b.goTracker.Go(b.pickNewAutoExitNode) + b.goTracker.Go(b.RefreshExitNode) break } } @@ -5898,30 +5891,50 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { } cc.SetNetInfo(ni) if refresh { - b.pickNewAutoExitNode() + b.RefreshExitNode() } } -// pickNewAutoExitNode picks a new automatic exit node if needed. -func (b *LocalBackend) pickNewAutoExitNode() { - unlock := b.lockAndGetUnlock() - defer unlock() +// RefreshExitNode determines which exit node to use based on the current +// prefs and netmap and switches to it if needed. +func (b *LocalBackend) RefreshExitNode() { + if b.resolveExitNode() { + b.authReconfig() + } +} - newSuggestion, err := b.suggestExitNodeLocked(nil) - if err != nil { - b.logf("setAutoExitNodeID: %v", err) - return +// resolveExitNode determines which exit node to use based on the current +// prefs and netmap. It updates the exit node ID in the prefs if needed, +// sends a notification to clients, and returns true if the exit node has changed. +// +// It is the caller's responsibility to reconfigure routes and actually +// start using the selected exit node, if needed. +// +// b.mu must not be held. +func (b *LocalBackend) resolveExitNode() (changed bool) { + b.mu.Lock() + defer b.mu.Unlock() + + nm := b.currentNode().NetMap() + prefs := b.pm.CurrentPrefs().AsStruct() + if prefs.AutoExitNode.IsSet() { + _, err := b.suggestExitNodeLocked(nil) + if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) + } } - if b.pm.CurrentPrefs().ExitNodeID() == newSuggestion.ID { - return + if !setExitNodeID(prefs, b.lastSuggestedExitNode, nm) { + return false // no changes } - _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: ipn.Prefs{ExitNodeID: newSuggestion.ID}, - ExitNodeIDSet: true, - }, unlock) - if err != nil { - b.logf("setAutoExitNodeID: failed to apply exit node ID preference: %v", err) + + if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ + MagicDNSName: nm.MagicDNSSuffix(), + DomainName: nm.DomainName(), + }); err != nil { + b.logf("failed to save exit node changes: %v", err) } + b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) + return true } // setNetMapLocked updates the LocalBackend state to reflect the newly From 4c1c0bac8dcaa717c9909d7b5c9c9991223e9f5f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 14:32:28 -0500 Subject: [PATCH 1048/1708] ipn/ipnlocal: plumb nodeBackend into suggestExitNode to support delta updates, such as online status changes Now that (*LocalBackend).suggestExitNodeLocked is never called with a non-current netmap (the netMap parameter is always nil, indicating that the current netmap should be used), we can remove the unused parameter. Additionally, instead of suggestExitNodeLocked passing the most recent full netmap to suggestExitNode, we now pass the current nodeBackend so it can access peers with delta updates applied. Finally, with that fixed, we no longer need to skip TestUpdateNetmapDeltaAutoExitNode. Updates tailscale/corp#29969 Fixes #16455 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 41 +++++++++++++++----------------------- ipn/ipnlocal/local_test.go | 9 ++++++--- 2 files changed, 22 insertions(+), 28 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a69b7dd5a..5fbb0bd98 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1947,10 +1947,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. - b.mu.Lock() - _, err := b.suggestExitNodeLocked(nil) - b.mu.Unlock() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID @@ -4490,7 +4487,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // anyway, so its return value can be ignored here. applySysPolicy(newp, b.overrideAlwaysOn) if newp.AutoExitNode.IsSet() { - if _, err := b.suggestExitNodeLocked(nil); err != nil { + if _, err := b.suggestExitNodeLocked(); err != nil { b.logf("failed to select auto exit node: %v", err) } } @@ -5918,7 +5915,7 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { nm := b.currentNode().NetMap() prefs := b.pm.CurrentPrefs().AsStruct() if prefs.AutoExitNode.IsSet() { - _, err := b.suggestExitNodeLocked(nil) + _, err := b.suggestExitNodeLocked() if err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } @@ -7445,19 +7442,12 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // Peers are selected based on having a DERP home that is the lowest latency to this device. For peers // without a DERP home, we look for geographic proximity to this device's DERP home. // -// netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). -// If netMap is nil, then b.netMap is used. -// // b.mu.lock() must be held. -func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (response apitype.ExitNodeSuggestionResponse, err error) { - // netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). If netMap is nil, then b.netMap is used. - if netMap == nil { - netMap = b.NetMap() - } +func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode - res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) + res, err := suggestExitNode(lastReport, b.currentNode(), prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) if err != nil { return res, err } @@ -7468,7 +7458,7 @@ func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (respons func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionResponse, err error) { b.mu.Lock() defer b.mu.Unlock() - return b.suggestExitNodeLocked(nil) + return b.suggestExitNodeLocked() } // getAllowedSuggestions returns a set of exit nodes permitted by the most recent @@ -7512,22 +7502,23 @@ func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { return s } -func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { +func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP } - candidates := make([]tailcfg.NodeView, 0, len(netMap.Peers)) - for _, peer := range netMap.Peers { + // Use [nodeBackend.AppendMatchingPeers] instead of the netmap directly, + // since the netmap doesn't include delta updates (e.g., home DERP or Online + // status changes) from the control plane since the last full update. + candidates := nb.AppendMatchingPeers(nil, func(peer tailcfg.NodeView) bool { if !peer.Valid() || !peer.Online().Get() { - continue + return false } if allowList != nil && !allowList.Contains(peer.StableID()) { - continue - } - if peer.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) { - candidates = append(candidates, peer) + return false } - } + return peer.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) + }) if len(candidates) == 0 { return res, nil } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5c9c9f2fa..5c9adfb5f 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -57,6 +57,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" @@ -2327,8 +2328,6 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { - t.Skip("TODO(tailscale/tailscale#16455): suggestExitNode does not check for online status of exit nodes") - peer1 := makePeer(1, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) peer2 := makePeer(2, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) derpMap := &tailcfg.DERPMap{ @@ -4278,7 +4277,11 @@ func TestSuggestExitNode(t *testing.T) { allowList = set.SetOf(tt.allowPolicy) } - got, err := suggestExitNode(tt.lastReport, tt.netMap, tt.lastSuggestion, selectRegion, selectNode, allowList) + nb := newNodeBackend(t.Context(), eventbus.New()) + defer nb.shutdown(errShutdown) + nb.SetNetMap(tt.netMap) + + got, err := suggestExitNode(tt.lastReport, nb, tt.lastSuggestion, selectRegion, selectNode, allowList) if got.Name != tt.wantName { t.Errorf("name=%v, want %v", got.Name, tt.wantName) } From 381fdcc3f17f406bb8c5a711b562a23aaef6c98f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 20:32:30 -0500 Subject: [PATCH 1049/1708] ipn/ipnlocal,util/syspolicy/source: retain existing exit node when using auto exit node, if it's allowed by policy In this PR, we update setExitNodeID to retain the existing exit node if auto exit node is enabled, the current exit node is allowed by policy, and no suggested exit node is available yet. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 15 +++- ipn/ipnlocal/local_test.go | 110 ++++++++++++++++++++++++++-- util/syspolicy/source/test_store.go | 7 ++ 3 files changed, 125 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5fbb0bd98..6120c52c6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2026,7 +2026,20 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // or resolve ExitNodeIP to an ID and use that. It returns whether prefs was mutated. func setExitNodeID(prefs *ipn.Prefs, suggestedExitNodeID tailcfg.StableNodeID, nm *netmap.NetworkMap) (prefsChanged bool) { if prefs.AutoExitNode.IsSet() { - newExitNodeID := cmp.Or(suggestedExitNodeID, unresolvedExitNodeID) + var newExitNodeID tailcfg.StableNodeID + if !suggestedExitNodeID.IsZero() { + // If we have a suggested exit node, use it. + newExitNodeID = suggestedExitNodeID + } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { + // If we don't have a suggested exit node, but the prefs already + // specify an allowed auto exit node ID, retain it. + newExitNodeID = prefs.ExitNodeID + } else { + // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, + // preventing traffic from leaking to the local network until an actual + // exit node is selected. + newExitNodeID = unresolvedExitNodeID + } if prefs.ExitNodeID != newExitNodeID { prefs.ExitNodeID = newExitNodeID prefsChanged = true diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5c9adfb5f..c9bad838e 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -620,6 +620,7 @@ func TestConfigureExitNode(t *testing.T) { useExitNodeEnabled *bool exitNodeIDPolicy *tailcfg.StableNodeID exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes wantPrefs ipn.Prefs }{ { @@ -894,6 +895,91 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", }, }, + { + name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // should be retained + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: nil, // not configured, so all exit node IDs are implicitly allowed + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // should be retained + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode2.StableID(), // the current exit node ID is allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode1.StableID(), // a different exit node ID; the current one is not allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode2.StableID(), // a different exit node ID; the current one is not allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // switch to the best exit node + AutoExitNode: "any", + }, + }, { name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression prefs: ipn.Prefs{ @@ -929,19 +1015,23 @@ func TestConfigureExitNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Configure policy settings, if any. - var settings []source.TestSetting[string] + store := source.NewTestStore(t) if tt.exitNodeIDPolicy != nil { - settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) + store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) } if tt.exitNodeIPPolicy != nil { - settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) + store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) } - if settings != nil { - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, source.NewTestStoreOf(t, settings...)) - } else { + if tt.exitNodeAllowedIDs != nil { + store.SetStringLists(source.TestSettingOf(syspolicy.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) + } + if store.IsEmpty() { // No syspolicy settings, so don't register a store. // This allows the test to run in parallel with other tests. t.Parallel() + } else { + // Register the store for syspolicy settings to make them available to the LocalBackend. + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, store) } // Create a new LocalBackend with the given prefs. @@ -6127,3 +6217,11 @@ func TestDisplayMessageIPNBus(t *testing.T) { }) } } + +func toStrings[T ~string](in []T) []string { + out := make([]string, len(in)) + for i, v := range in { + out[i] = string(v) + } + return out +} diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index 4b175611f..efaf4cd5a 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -154,6 +154,13 @@ func (s *TestStore) RegisterChangeCallback(callback func()) (unregister func(), }, nil } +// IsEmpty reports whether the store does not contain any settings. +func (s *TestStore) IsEmpty() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.mr) == 0 +} + // ReadString implements [Store]. func (s *TestStore) ReadString(key setting.Key) (string, error) { defer s.recordRead(key, setting.StringValue) From cb7b49941eae3a933c4c5b7dc56398bce24d7e08 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 19:37:56 -0500 Subject: [PATCH 1050/1708] ipn/ipnlocal: add (*LocalBackend).reconcilePrefsLocked We have several places where we call applySysPolicy, suggestExitNodeLocked, and setExitNodeID. While there are cases where we want to resolve the exit node specifically, such as when network conditions change or a new netmap is received, we typically need to perform all three steps. For example, enforcing policy settings may enable auto exit nodes or set an ExitNodeIP, which in turn requires picking a suggested exit node or resolving the IP to an ID, respectively. In this PR, we introduce (*LocalBackend).resolveExitNodeInPrefsLocked and (*LocalBackend).reconcilePrefsLocked, with the latter calling both applySysPolicy and resolveExitNodeInPrefsLocked. Consolidating these steps into a single extensibility point would also make it easier to support future hooks registered by ipnext extensions. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 113 ++++++++++++++++++++++++------------- ipn/ipnlocal/local_test.go | 2 +- 2 files changed, 76 insertions(+), 39 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6120c52c6..0ee249dfb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1624,7 +1624,11 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } } - if applySysPolicy(prefs, b.overrideAlwaysOn) { + // We primarily need this to apply syspolicy to the prefs if an implicit profile + // switch is about to happen. + // TODO(nickkhyl): remove this once we improve handling of implicit profile switching + // in tailscale/corp#28014 and we apply syspolicy when the switch actually happens. + if b.reconcilePrefsLocked(prefs) { prefsChanged = true } @@ -1911,21 +1915,21 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil { return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err) } - if prefs, anyChange := b.applySysPolicy(); anyChange { + if prefs, anyChange := b.reconcilePrefs(); anyChange { b.logf("syspolicy: changed initial profile prefs: %v", prefs.Pretty()) } b.refreshAllowedSuggestions() return unregister, nil } -// applySysPolicy overwrites the current profile's preferences with policies +// reconcilePrefs overwrites the current profile's preferences with policies // that may be configured by the system administrator in an OS-specific way. // // b.mu must not be held. -func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { +func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() - if !applySysPolicy(prefs, b.overrideAlwaysOn) { + if !b.reconcilePrefsLocked(prefs) { unlock.UnlockEarly() return prefs.View(), false } @@ -1954,7 +1958,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { // will be used when [applySysPolicy] updates the current profile's prefs. } - if prefs, anyChange := b.applySysPolicy(); anyChange { + if prefs, anyChange := b.reconcilePrefs(); anyChange { b.logf("syspolicy: changed profile prefs: %v", prefs.Pretty()) } } @@ -2302,26 +2306,32 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.setStateLocked(ipn.NoState) cn := b.currentNode() + + prefsChanged := false + newPrefs := b.pm.CurrentPrefs().AsStruct() if opts.UpdatePrefs != nil { - oldPrefs := b.pm.CurrentPrefs() - newPrefs := opts.UpdatePrefs.Clone() - newPrefs.Persist = oldPrefs.Persist().AsStruct() - pv := newPrefs.View() - if err := b.pm.SetPrefs(pv, cn.NetworkProfile()); err != nil { - b.logf("failed to save UpdatePrefs state: %v", err) + newPrefs = opts.UpdatePrefs.Clone() + prefsChanged = true + } + // Apply any syspolicy overrides, resolve exit node ID, etc. + // As of 2025-07-03, this is primarily needed in two cases: + // - when opts.UpdatePrefs is not nil + // - when Always Mode is enabled and we need to set WantRunning to true + if b.reconcilePrefsLocked(newPrefs) { + prefsChanged = true + } + if prefsChanged { + // Neither opts.UpdatePrefs nor prefs reconciliation + // is allowed to modify Persist; retain the old value. + newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() + if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { + b.logf("failed to save updated and reconciled prefs: %v", err) } } + prefs := newPrefs.View() // Reset the always-on override whenever Start is called. b.resetAlwaysOnOverrideLocked() - // And also apply syspolicy settings to the current profile. - // This is important in two cases: when opts.UpdatePrefs is not nil, - // and when Always Mode is enabled and we need to set WantRunning to true. - if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.overrideAlwaysOn) { - setExitNodeID(newp, b.lastSuggestedExitNode, cn.NetMap()) - b.pm.setPrefsNoPermCheck(newp.View()) - } - prefs := b.pm.CurrentPrefs() b.setAtomicValuesFromPrefsLocked(prefs) wantRunning := prefs.WantRunning() @@ -4495,17 +4505,11 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if oldp.Valid() { newp.Persist = oldp.Persist().AsStruct() // caller isn't allowed to override this } - // applySysPolicy returns whether it updated newp, - // but everything in this function treats b.prefs as completely new + // Apply reconciliation to the prefs, such as policy overrides, + // exit node resolution, and so on. The call returns whether it updated + // newp, but everything in this function treats newp as completely new // anyway, so its return value can be ignored here. - applySysPolicy(newp, b.overrideAlwaysOn) - if newp.AutoExitNode.IsSet() { - if _, err := b.suggestExitNodeLocked(); err != nil { - b.logf("failed to select auto exit node: %v", err) - } - } - // setExitNodeID does likewise. No-op if no exit node resolution is needed. - setExitNodeID(newp, b.lastSuggestedExitNode, netMap) + b.reconcilePrefsLocked(newp) // We do this to avoid holding the lock while doing everything else. @@ -5927,14 +5931,8 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { nm := b.currentNode().NetMap() prefs := b.pm.CurrentPrefs().AsStruct() - if prefs.AutoExitNode.IsSet() { - _, err := b.suggestExitNodeLocked() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("failed to select auto exit node: %v", err) - } - } - if !setExitNodeID(prefs, b.lastSuggestedExitNode, nm) { - return false // no changes + if !b.resolveExitNodeInPrefsLocked(prefs) { + return } if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ @@ -5947,6 +5945,45 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { return true } +// reconcilePrefsLocked applies policy overrides, exit node resolution, +// and other post-processing to the prefs, and reports whether the prefs +// were modified as a result. +// +// It must not perform any reconfiguration, as the prefs are not yet effective. +// +// b.mu must be held. +func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { + if applySysPolicy(prefs, b.overrideAlwaysOn) { + changed = true + } + if b.resolveExitNodeInPrefsLocked(prefs) { + changed = true + } + if changed { + b.logf("prefs reconciled: %v", prefs.Pretty()) + } + return changed +} + +// resolveExitNodeInPrefsLocked determines which exit node to use +// based on the specified prefs and netmap. It updates the exit node ID +// in the prefs if needed, and returns true if the exit node has changed. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + if prefs.AutoExitNode.IsSet() { + _, err := b.suggestExitNodeLocked() + if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) + } + } + if setExitNodeID(prefs, b.lastSuggestedExitNode, b.currentNode().NetMap()) { + b.logf("exit node resolved: %v", prefs.ExitNodeID) + return true + } + return false +} + // setNetMapLocked updates the LocalBackend state to reflect the newly // received nm. If nm is nil, it resets all configuration as though // Tailscale is turned off. diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index c9bad838e..3a2258cc6 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2390,7 +2390,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode prefs := b.pm.prefs.AsStruct() - if changed := applySysPolicy(prefs, false) || setExitNodeID(prefs, test.lastSuggestedExitNode, test.nm); changed != test.prefsChanged { + if changed := b.reconcilePrefsLocked(prefs); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) } From a6f647812901a11572b9143607ec24445574fed7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 11:50:59 -0500 Subject: [PATCH 1051/1708] util/syspolicy: add HasAnyOf to check if any specified policy settings are configured Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- util/syspolicy/syspolicy.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index afcc28ff1..a84afa5db 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -56,6 +56,27 @@ func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicySc return reg } +// HasAnyOf returns whether at least one of the specified policy settings is configured, +// or an error if no keys are provided or the check fails. +func HasAnyOf(keys ...Key) (bool, error) { + if len(keys) == 0 { + return false, errors.New("at least one key must be specified") + } + policy, err := rsop.PolicyFor(setting.DefaultScope()) + if err != nil { + return false, err + } + effective := policy.Get() + for _, k := range keys { + _, err := effective.GetErr(k) + if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) { + continue + } + return err == nil, err // err may be nil or non-nil + } + return false, nil +} + // GetString returns a string policy setting with the specified key, // or defaultValue if it does not exist. func GetString(key Key, defaultValue string) (string, error) { From f1c7b463cd1cbc6de634a8b75a14cfeca498756f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 17:04:07 -0500 Subject: [PATCH 1052/1708] ipn/{ipnauth,ipnlocal,localapi}: make EditPrefs return an error if changing exit node is restricted by policy We extract checkEditPrefsAccessLocked, adjustEditPrefsLocked, and onEditPrefsLocked from the EditPrefs execution path, defining when each step is performed and what behavior is allowed at each stage. Currently, this is primarily used to support Always On mode, to handle the Exit Node enablement toggle, and to report prefs edit metrics. We then use it to enforce Exit Node policy settings by preventing users from setting an exit node and making EditPrefs return an error when an exit node is restricted by policy. This enforcement is also extended to the Exit Node toggle. These changes prepare for supporting Exit Node overrides when permitted by policy and preventing logout while Always On mode is enabled. In the future, implementation of these methods can be delegated to ipnext extensions via the feature hooks. Updates tailscale/corp#29969 Updates tailscale/corp#26249 Signed-off-by: Nick Khyl --- ipn/ipnauth/self.go | 12 +++ ipn/ipnlocal/local.go | 168 ++++++++++++++++++++++++++---------- ipn/ipnlocal/local_test.go | 83 +++++++++++------- ipn/localapi/localapi.go | 2 +- util/syspolicy/syspolicy.go | 5 +- 5 files changed, 191 insertions(+), 79 deletions(-) diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go index 9b430dc6d..adee06964 100644 --- a/ipn/ipnauth/self.go +++ b/ipn/ipnauth/self.go @@ -13,6 +13,11 @@ import ( // has unlimited access. var Self Actor = unrestricted{} +// TODO is a caller identity used when the operation is performed on behalf of a user, +// rather than by tailscaled itself, but the surrounding function is not yet extended +// to accept an [Actor] parameter. It grants the same unrestricted access as [Self]. +var TODO Actor = unrestricted{} + // unrestricted is an [Actor] that has unlimited access to the currently running // tailscaled instance. It's typically used for operations performed by tailscaled // on its own, or upon a request from the control plane, rather on behalf of a user. @@ -49,3 +54,10 @@ func (unrestricted) IsLocalSystem() bool { return false } // Deprecated: this method exists for compatibility with the current (as of 2025-01-28) // permission model and will be removed as we progress on tailscale/corp#18342. func (unrestricted) IsLocalAdmin(operatorUID string) bool { return false } + +// IsTailscaled reports whether the given Actor represents Tailscaled itself, +// such as [Self] or a [TODO] placeholder actor. +func IsTailscaled(a Actor) bool { + _, ok := a.(unrestricted) + return ok +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0ee249dfb..03a0709e2 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -178,6 +178,10 @@ var ( // It is used as a context cancellation cause for the old context // and can be returned when an operation is performed on it. errNodeContextChanged = errors.New("profile changed") + + // errManagedByPolicy indicates the operation is blocked + // because the target state is managed by a GP/MDM policy. + errManagedByPolicy = errors.New("managed by policy") ) // LocalBackend is the glue between the major pieces of the Tailscale @@ -3477,12 +3481,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, - AutoUpdateSet: ipn.AutoUpdatePrefsMask{ - ApplySet: true, - }, - }, unlock) + _, err := b.editPrefsLockedOnEntry( + ipnauth.Self, + &ipn.MaskedPrefs{ + Prefs: *prefsClone, + AutoUpdateSet: ipn.AutoUpdatePrefsMask{ + ApplySet: true, + }, + }, unlock) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -4224,7 +4230,7 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // On success, it returns the resulting prefs (or current prefs, in the case of no change). // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. -func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { +func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { unlock := b.lockAndGetUnlock() defer unlock() @@ -4267,7 +4273,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLockedOnEntry(mp, unlock) + return b.editPrefsLockedOnEntry(actor, mp, unlock) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4296,30 +4302,83 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - // Zeroing the ExitNodeId via localAPI must also zero the prior exit node. - if mp.ExitNodeIDSet && mp.ExitNodeID == "" { + return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) +} + +// checkEditPrefsAccessLocked checks whether the current user has access +// to apply the prefs changes in mp. +// +// It returns an error if the user is not allowed, or nil otherwise. +// +// b.mu must be held. +func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) error { + var errs []error + + if mp.RunSSHSet && mp.RunSSH && !envknob.CanSSHD() { + errs = append(errs, errors.New("Tailscale SSH server administratively disabled")) + } + + // Check if the user is allowed to disconnect Tailscale. + if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.extHost.AuditLogger()); err != nil { + errs = append(errs, err) + } + } + + // Prevent users from changing exit node preferences + // when exit node usage is managed by policy. + if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { + // TODO(nickkhyl): Allow users to override ExitNode policy settings + // if the ExitNode.AllowUserOverride policy permits it. + // (Policy setting name and details are TBD. See tailscale/corp#29969) + isManaged, err := syspolicy.HasAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP) + if err != nil { + err = fmt.Errorf("policy check failed: %w", err) + } else if isManaged { + err = errManagedByPolicy + } + if err != nil { + errs = append(errs, fmt.Errorf("exit node cannot be changed: %w", err)) + } + } + + return multierr.New(errs...) +} + +// adjustEditPrefsLocked applies additional changes to mp if necessary, +// such as zeroing out mutually exclusive fields. +// +// It must not assume that the changes in mp will actually be applied. +// +// b.mu must be held. +func (b *LocalBackend) adjustEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs) { + // Zeroing the ExitNodeID via localAPI must also zero the prior exit node. + if mp.ExitNodeIDSet && mp.ExitNodeID == "" && !mp.InternalExitNodePriorSet { mp.InternalExitNodePrior = "" mp.InternalExitNodePriorSet = true } // Disable automatic exit node selection if the user explicitly sets // ExitNodeID or ExitNodeIP. - if mp.ExitNodeIDSet || mp.ExitNodeIPSet { + if (mp.ExitNodeIDSet || mp.ExitNodeIPSet) && !mp.AutoExitNodeSet { mp.AutoExitNodeSet = true mp.AutoExitNode = "" } +} - // Acquire the lock before checking the profile access to prevent - // TOCTOU issues caused by the current profile changing between the - // check and the actual edit. - unlock := b.lockAndGetUnlock() - defer unlock() - if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { - if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.extHost.AuditLogger()); err != nil { - b.logf("check profile access failed: %v", err) - return ipn.PrefsView{}, err - } - +// onEditPrefsLocked is called when prefs are edited (typically, via LocalAPI), +// just before the changes in newPrefs are set for the current profile. +// +// The changes in mp have been allowed, but the resulting [ipn.Prefs] +// have not yet been applied and may be subject to reconciliation +// by [LocalBackend.reconcilePrefsLocked], either before or after being set. +// +// This method handles preference edits, typically initiated by the user, +// as opposed to reconfiguring the backend when the final prefs are set. +// +// b.mu must be held; mp must not be mutated by this method. +func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, oldPrefs, newPrefs ipn.PrefsView) { + if mp.WantRunningSet && !mp.WantRunning && oldPrefs.WantRunning() { // If a user has enough rights to disconnect, such as when [syspolicy.AlwaysOn] // is disabled, or [syspolicy.AlwaysOnOverrideWithReason] is also set and the user // provides a reason for disconnecting, then we should not force the "always on" @@ -4331,7 +4390,18 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip } } - return b.editPrefsLockedOnEntry(mp, unlock) + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. + // recordForEdit records metrics related to edits and changes, not the final state. + // If, in the future, we want to record gauge-metrics related to the state of prefs, + // that should be done in the setPrefs path. + e := prefsMetricsEditEvent{ + change: mp, + pNew: newPrefs, + pOld: oldPrefs, + node: b.currentNode(), + lastSuggestedExitNode: b.lastSuggestedExitNode, + } + e.record() } // startReconnectTimerLocked sets a timer to automatically set WantRunning to true @@ -4368,7 +4438,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(mp, unlock); err != nil { + if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4399,9 +4469,19 @@ func (b *LocalBackend) stopReconnectTimerLocked() { // Warning: b.mu must be held on entry, but it unlocks it on the way out. // TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { +func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { defer unlock() // for error paths + // Check if the changes in mp are allowed. + if err := b.checkEditPrefsAccessLocked(actor, mp); err != nil { + b.logf("EditPrefs(%v): %v", mp.Pretty(), err) + return ipn.PrefsView{}, err + } + + // Apply additional changes to mp if necessary, + // such as clearing mutually exclusive fields. + b.adjustEditPrefsLocked(actor, mp) + if mp.EggSet { mp.EggSet = false b.egg = true @@ -4416,29 +4496,18 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock b.logf("EditPrefs check error: %v", err) return ipn.PrefsView{}, err } - if p1.RunSSH && !envknob.CanSSHD() { - b.logf("EditPrefs requests SSH, but disabled by envknob; returning error") - return ipn.PrefsView{}, errors.New("Tailscale SSH server administratively disabled.") - } + if p1.View().Equals(p0) { return stripKeysFromPrefs(p0), nil } b.logf("EditPrefs: %v", mp.Pretty()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) - // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. - // recordForEdit records metrics related to edits and changes, not the final state. - // If, in the future, we want to record gauge-metrics related to the state of prefs, - // that should be done in the setPrefs path. - e := prefsMetricsEditEvent{ - change: mp, - pNew: p1.View(), - pOld: p0, - node: b.currentNode(), - lastSuggestedExitNode: b.lastSuggestedExitNode, - } - e.record() + // Perform any actions required when prefs are edited (typically by a user), + // before the modified prefs are actually set for the current profile. + b.onEditPrefsLocked(actor, mp, p0, p1.View()) + + newPrefs := b.setPrefsLockedOnEntry(p1, unlock) // Note: don't perform any actions for the new prefs here. Not // every prefs change goes through EditPrefs. Put your actions @@ -5829,11 +5898,16 @@ func (b *LocalBackend) Logout(ctx context.Context) error { // delete it later. profile := b.pm.CurrentProfile() - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) + // TODO(nickkhyl): change [LocalBackend.Logout] to accept an [ipnauth.Actor]. + // This will allow enforcing Always On mode when a user tries to log out + // while logged in and connected. See tailscale/corp#26249. + _, err := b.editPrefsLockedOnEntry( + ipnauth.TODO, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }, unlock) if err != nil { return err } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3a2258cc6..1e1b7663a 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -501,29 +501,30 @@ func TestLazyMachineKeyGeneration(t *testing.T) { func TestZeroExitNodeViaLocalAPI(t *testing.T) { lb := newTestLocalBackend(t) + user := &ipnauth.TestActor{} // Give it an initial exit node in use. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ ExitNodeIDSet: true, Prefs: ipn.Prefs{ ExitNodeID: "foo", }, - }); err != nil { + }, user); err != nil { t.Fatalf("enabling first exit node: %v", err) } // SetUseExitNodeEnabled(false) "remembers" the prior exit node. - if _, err := lb.SetUseExitNodeEnabled(false); err != nil { + if _, err := lb.SetUseExitNodeEnabled(user, false); err != nil { t.Fatal("expected failure") } // Zero the exit node - pv, err := lb.EditPrefs(&ipn.MaskedPrefs{ + pv, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ ExitNodeIDSet: true, Prefs: ipn.Prefs{ ExitNodeID: "", }, - }) + }, user) if err != nil { t.Fatalf("enabling first exit node: %v", err) @@ -539,29 +540,30 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { func TestSetUseExitNodeEnabled(t *testing.T) { lb := newTestLocalBackend(t) + user := &ipnauth.TestActor{} // Can't turn it on if it never had an old value. - if _, err := lb.SetUseExitNodeEnabled(true); err == nil { + if _, err := lb.SetUseExitNodeEnabled(user, true); err == nil { t.Fatal("expected success") } // But we can turn it off when it's already off. - if _, err := lb.SetUseExitNodeEnabled(false); err != nil { + if _, err := lb.SetUseExitNodeEnabled(user, false); err != nil { t.Fatal("expected failure") } // Give it an initial exit node in use. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ ExitNodeIDSet: true, Prefs: ipn.Prefs{ ExitNodeID: "foo", }, - }); err != nil { + }, user); err != nil { t.Fatalf("enabling first exit node: %v", err) } // Now turn off that exit node. - if prefs, err := lb.SetUseExitNodeEnabled(false); err != nil { + if prefs, err := lb.SetUseExitNodeEnabled(user, false); err != nil { t.Fatal("expected failure") } else { if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID(""); g != w { @@ -573,7 +575,7 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } // And turn it back on. - if prefs, err := lb.SetUseExitNodeEnabled(true); err != nil { + if prefs, err := lb.SetUseExitNodeEnabled(user, true); err != nil { t.Fatal("expected failure") } else { if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID("foo"); g != w { @@ -585,9 +587,9 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } // Verify we block setting an Internal field. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ InternalExitNodePriorSet: true, - }); err == nil { + }, user); err == nil { t.Fatalf("unexpected success; want an error trying to set an internal field") } } @@ -612,16 +614,18 @@ func TestConfigureExitNode(t *testing.T) { } tests := []struct { - name string - prefs ipn.Prefs - netMap *netmap.NetworkMap - report *netcheck.Report - changePrefs *ipn.MaskedPrefs - useExitNodeEnabled *bool - exitNodeIDPolicy *tailcfg.StableNodeID - exitNodeIPPolicy *netip.Addr - exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes - wantPrefs ipn.Prefs + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] + wantPrefs ipn.Prefs + wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] }{ { name: "exit-node-id-via-prefs", // set exit node ID via prefs @@ -804,6 +808,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs @@ -822,6 +827,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs @@ -840,6 +846,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantChangePrefsErr: errManagedByPolicy, }, { name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) @@ -999,15 +1006,16 @@ func TestConfigureExitNode(t *testing.T) { prefs: ipn.Prefs{ ControlURL: controlURL, }, - netMap: clientNetmap, - report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), - useExitNodeEnabled: ptr.To(false), // should be ignored + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + useExitNodeEnabled: ptr.To(false), // should fail with an error + wantExitNodeToggleErr: errManagedByPolicy, wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting AutoExitNode: "any", - InternalExitNodePrior: "auto:any", + InternalExitNodePrior: "", }, }, } @@ -1046,14 +1054,17 @@ func TestConfigureExitNode(t *testing.T) { lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) } + user := &ipnauth.TestActor{} // If we have a changePrefs, apply it. if tt.changePrefs != nil { - lb.EditPrefs(tt.changePrefs) + _, err := lb.EditPrefsAs(tt.changePrefs, user) + checkError(t, err, tt.wantChangePrefsErr, true) } // If we need to flip exit node toggle on or off, do it. if tt.useExitNodeEnabled != nil { - lb.SetUseExitNodeEnabled(*tt.useExitNodeEnabled) + _, err := lb.SetUseExitNodeEnabled(user, *tt.useExitNodeEnabled) + checkError(t, err, tt.wantExitNodeToggleErr, true) } // Now check the prefs. @@ -6218,6 +6229,18 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func checkError(tb testing.TB, got, want error, fatal bool) { + tb.Helper() + f := tb.Errorf + if fatal { + f = tb.Fatalf + } + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + f("gotErr: %v; wantErr: %v", got, want) + } +} + func toStrings[T ~string](in []T) []string { out := make([]string, len(in)) for i, v := range in { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a90ae5d84..d4b4b443e 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1910,7 +1910,7 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ http.Error(w, "invalid 'enabled' parameter", http.StatusBadRequest) return } - prefs, err := h.b.SetUseExitNodeEnabled(v) + prefs, err := h.b.SetUseExitNodeEnabled(h.Actor, v) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index a84afa5db..6555a58ac 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -72,7 +72,10 @@ func HasAnyOf(keys ...Key) (bool, error) { if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) { continue } - return err == nil, err // err may be nil or non-nil + if err != nil { + return false, err + } + return true, nil } return false, nil } From ea4018b757fa6f925be59f9d95011c3a7de3ee10 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 17:21:21 -0500 Subject: [PATCH 1053/1708] ipn/ipnlocal: fix missing defer in testExtension.Shutdown Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/extension_host_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index f655c477f..509833ff6 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -1230,7 +1230,7 @@ func (e *testExtension) InitCalled() bool { func (e *testExtension) Shutdown() (err error) { e.t.Helper() e.mu.Lock() - e.mu.Unlock() + defer e.mu.Unlock() if e.ShutdownHook != nil { err = e.ShutdownHook(e) } From 47f431b656d0c35aac6f97530a4daa2404bc12d6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 19:46:20 -0700 Subject: [PATCH 1054/1708] net/udprelay: fix relaying between mixed address family sockets (#16485) We can't relay a packet received over the IPv4 socket back out the same socket if destined to an IPv6 address, and vice versa. Updates tailscale/corp#30206 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 42 ++++++++++--------- net/udprelay/server_test.go | 81 +++++++++++++++++++++++++++---------- 2 files changed, 83 insertions(+), 40 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index d2661e59f..979ccf717 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -112,7 +112,7 @@ type serverEndpoint struct { allocatedAt time.Time } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, conn *net.UDPConn, serverDisco key.DiscoPublic) { if senderIndex != 0 && senderIndex != 1 { return } @@ -165,7 +165,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex reply = serverDisco.AppendTo(reply) box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) reply = append(reply, box...) - uw.WriteMsgUDPAddrPort(reply, nil, from) + conn.WriteMsgUDPAddrPort(reply, nil, from) return case *disco.BindUDPRelayEndpointAnswer: err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) @@ -191,7 +191,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, conn *net.UDPConn, serverDisco key.DiscoPublic) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message @@ -222,14 +222,10 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by return } - e.handleDiscoControlMsg(from, senderIndex, discoMsg, uw, serverDisco) + e.handleDiscoControlMsg(from, senderIndex, discoMsg, conn, serverDisco) } -type udpWriter interface { - WriteMsgUDPAddrPort(b []byte, oob []byte, addr netip.AddrPort) (n, oobn int, err error) -} - -func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, rxSocket, otherAFSocket *net.UDPConn, serverDisco key.DiscoPublic) { if !gh.Control { if !e.isBound() { // not a control packet, but serverEndpoint isn't bound @@ -247,8 +243,16 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade // unrecognized source return } - // relay packet - uw.WriteMsgUDPAddrPort(b, nil, to) + // Relay the packet towards the other party via the socket associated + // with the destination's address family. If source and destination + // address families are matching we tx on the same socket the packet + // was received (rxSocket), otherwise we use the "other" socket + // (otherAFSocket). [Server] makes no use of dual-stack sockets. + if from.Addr().Is4() == to.Addr().Is4() { + rxSocket.WriteMsgUDPAddrPort(b, nil, to) + } else if otherAFSocket != nil { + otherAFSocket.WriteMsgUDPAddrPort(b, nil, to) + } return } @@ -258,7 +262,7 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade } msg := b[packet.GeneveFixedHeaderLength:] - e.handleSealedDiscoControlMsg(from, msg, uw, serverDisco) + e.handleSealedDiscoControlMsg(from, msg, rxSocket, serverDisco) } func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { @@ -346,10 +350,10 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve } s.wg.Add(1) - go s.packetReadLoop(s.uc4) + go s.packetReadLoop(s.uc4, s.uc6) if s.uc6 != nil { s.wg.Add(1) - go s.packetReadLoop(s.uc6) + go s.packetReadLoop(s.uc6, s.uc4) } s.wg.Add(1) go s.endpointGCLoop() @@ -531,7 +535,7 @@ func (s *Server) endpointGCLoop() { } } -func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { +func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSocket *net.UDPConn) { if stun.Is(b) && b[1] == 0x01 { // A b[1] value of 0x01 (STUN method binding) is sufficiently // non-overlapping with the Geneve header where the LSB is always 0 @@ -555,10 +559,10 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { return } - e.handlePacket(from, gh, b, uw, s.discoPublic) + e.handlePacket(from, gh, b, rxSocket, otherAFSocket, s.discoPublic) } -func (s *Server) packetReadLoop(uc *net.UDPConn) { +func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { defer func() { s.wg.Done() s.Close() @@ -566,11 +570,11 @@ func (s *Server) packetReadLoop(uc *net.UDPConn) { b := make([]byte, 1<<16-1) for { // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := uc.ReadFromUDPAddrPort(b) + n, from, err := readFromSocket.ReadFromUDPAddrPort(b) if err != nil { return } - s.handlePacket(from, b[:n], uc) + s.handlePacket(from, b[:n], readFromSocket, otherSocket) } } diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 8c0c5aff6..de1c29364 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -181,8 +181,9 @@ func TestServer(t *testing.T) { discoB := key.NewDisco() cases := []struct { - name string - overrideAddrs []netip.Addr + name string + overrideAddrs []netip.Addr + forceClientsMixedAF bool }{ { name: "over ipv4", @@ -192,6 +193,11 @@ func TestServer(t *testing.T) { name: "over ipv6", overrideAddrs: []netip.Addr{netip.MustParseAddr("::1")}, }, + { + name: "mixed address families", + overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, + forceClientsMixedAF: true, + }, } for _, tt := range cases { @@ -216,16 +222,47 @@ func TestServer(t *testing.T) { t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) } - if len(endpoint.AddrPorts) != 1 { + if len(endpoint.AddrPorts) < 1 { t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAServerEndpointAddr := endpoint.AddrPorts[0] + tcA := newTestClient(t, endpoint.VNI, tcAServerEndpointAddr, discoA, discoB.Public(), endpoint.ServerDisco) defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBServerEndpointAddr := tcAServerEndpointAddr + if tt.forceClientsMixedAF { + foundMixedAF := false + for _, addr := range endpoint.AddrPorts { + if addr.Addr().Is4() != tcBServerEndpointAddr.Addr().Is4() { + tcBServerEndpointAddr = addr + foundMixedAF = true + } + } + if !foundMixedAF { + t.Fatal("force clients to mixed address families is set, but relay server lacks address family diversity") + } + } + tcB := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) defer tcB.close() - tcA.handshake(t) - tcB.handshake(t) + for i := 0; i < 2; i++ { + // We handshake both clients twice to guarantee server-side + // packet reading goroutines, which are independent across + // address families, have seen an answer from both clients + // before proceeding. This is needed because the test assumes + // that B's handshake is complete (the first send is A->B below), + // but the server may not have handled B's handshake answer + // before it handles A's data pkt towards B. + // + // Data transmissions following "re-handshakes" orient so that + // the sender is the same as the party that performed the + // handshake, for the same reasons. + // + // [magicsock.relayManager] is not prone to this issue as both + // parties transmit data packets immediately following their + // handshake answer. + tcA.handshake(t) + tcB.handshake(t) + } dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) if err != nil { @@ -250,30 +287,32 @@ func TestServer(t *testing.T) { t.Fatal("unexpected msg B->A") } - tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort := newTestClient(t, endpoint.VNI, tcAServerEndpointAddr, discoA, discoB.Public(), endpoint.ServerDisco) tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 defer tcAOnNewPort.close() - // Handshake client A on a new source IP:port, verify we receive packets on the new binding + // Handshake client A on a new source IP:port, verify we can send packets on the new binding tcAOnNewPort.handshake(t) - txToAOnNewPort := []byte{7, 8, 9} - tcB.writeDataPkt(t, txToAOnNewPort) - rxFromB = tcAOnNewPort.readDataPkt(t) - if !bytes.Equal(txToAOnNewPort, rxFromB) { - t.Fatal("unexpected msg B->A") + + fromAOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, fromAOnNewPort) + rxFromA = tcB.readDataPkt(t) + if !bytes.Equal(fromAOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") } - tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 defer tcBOnNewPort.close() - // Handshake client B on a new source IP:port, verify we receive packets on the new binding + // Handshake client B on a new source IP:port, verify we can send packets on the new binding tcBOnNewPort.handshake(t) - txToBOnNewPort := []byte{7, 8, 9} - tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) - rxFromA = tcBOnNewPort.readDataPkt(t) - if !bytes.Equal(txToBOnNewPort, rxFromA) { - t.Fatal("unexpected msg A->B") + + fromBOnNewPort := []byte{7, 8, 9} + tcBOnNewPort.writeDataPkt(t, fromBOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(fromBOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") } }) } From 5b0074729d38f8cc301803da06086033f53b1b93 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 8 Jul 2025 09:45:18 -0700 Subject: [PATCH 1055/1708] go.mod,wgengine/magicsock: implement conn.InitiationAwareEndpoint (#16486) Since a [*lazyEndpoint] makes wireguard-go responsible for peer ID, but wireguard-go may not yet be configured for said peer, we need a JIT hook around initiation message reception to call what is usually called from an [*endpoint]. Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- wgengine/magicsock/magicsock.go | 34 ++++++++++++++++++++++++++++++--- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 5bf04feda..e89a383a6 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 + github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index f9910bb59..062af6662 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 h1:chIzUDKxR0nXQQra0j41aqiiFNICs0FIC5ZCwDO7z3k= -github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 h1:Yjg/+1VVRcdY3DL9fs8g+QnZ1aizotU0pp0VSOSCuTQ= +github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a7eab3678..fbfcf0b41 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3777,17 +3777,45 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec // lazyEndpoint is a wireguard [conn.Endpoint] for when magicsock received a // non-disco (presumably WireGuard) packet from a UDP address from which we // can't map to a Tailscale peer. But WireGuard most likely can, once it -// decrypts it. So we implement the [conn.PeerAwareEndpoint] interface -// from https://github.com/tailscale/wireguard-go/pull/27 to allow WireGuard -// to tell us who it is later and get the correct [conn.Endpoint]. +// decrypts it. So we implement the [conn.InitiationAwareEndpoint] and +// [conn.PeerAwareEndpoint] interfaces, to allow WireGuard to tell us who it is +// later, just-in-time to configure the peer, and set the associated [epAddr] +// in the [peerMap]. Future receives on the associated [epAddr] will then be +// resolvable directly to an [*endpoint]. type lazyEndpoint struct { c *Conn src epAddr } +var _ conn.InitiationAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.PeerAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.Endpoint = (*lazyEndpoint)(nil) +// InitiationMessagePublicKey implements [conn.InitiationAwareEndpoint]. +// wireguard-go calls us here if we passed it a [*lazyEndpoint] for an +// initiation message, for which it might not have the relevant peer configured, +// enabling us to just-in-time configure it and note its activity via +// [*endpoint.noteRecvActivity], before it performs peer lookup and attempts +// decryption. +// +// Reception of all other WireGuard message types implies pre-existing knowledge +// of the peer by wireguard-go for it to do useful work. See +// [userspaceEngine.maybeReconfigWireguardLocked] & +// [userspaceEngine.noteRecvActivity] for more details around just-in-time +// wireguard-go peer (de)configuration. +func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { + pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + le.c.mu.Lock() + defer le.c.mu.Unlock() + ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) + if !ok { + return + } + now := mono.Now() + ep.lastRecvUDPAny.StoreAtomic(now) + ep.noteRecvActivity(le.src, now) +} + func (le *lazyEndpoint) ClearSrc() {} func (le *lazyEndpoint) SrcIP() netip.Addr { return netip.Addr{} } From 1fe82d6ef5f48a85ce7ba6ce388a6c29f112b2cb Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Jul 2025 14:37:13 -0500 Subject: [PATCH 1056/1708] cmd/tailscale/cli,ipn/ipnlocal: restrict logout when AlwaysOn mode is enabled In this PR, we start passing a LocalAPI actor to (*LocalBackend).Logout to make it subject to the same access check as disconnects made via tailscale down or the GUI. We then update the CLI to allow `tailscale logout` to accept a reason, similar to `tailscale down`. Updates tailscale/corp#26249 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/logout.go | 12 ++++++++++++ cmd/tsconnect/wasm/wasm_js.go | 3 ++- ipn/ipnlocal/local.go | 9 +++------ ipn/ipnlocal/state_test.go | 7 ++++--- ipn/localapi/localapi.go | 2 +- 5 files changed, 22 insertions(+), 11 deletions(-) diff --git a/cmd/tailscale/cli/logout.go b/cmd/tailscale/cli/logout.go index 0c2007a66..fbc394730 100644 --- a/cmd/tailscale/cli/logout.go +++ b/cmd/tailscale/cli/logout.go @@ -5,12 +5,18 @@ package cli import ( "context" + "flag" "fmt" "strings" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/tailscale/apitype" ) +var logoutArgs struct { + reason string +} + var logoutCmd = &ffcli.Command{ Name: "logout", ShortUsage: "tailscale logout", @@ -22,11 +28,17 @@ the current node key, forcing a future use of it to cause a reauthentication. `), Exec: runLogout, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("logout") + fs.StringVar(&logoutArgs.reason, "reason", "", "reason for the logout, if required by a policy") + return fs + })(), } func runLogout(ctx context.Context, args []string) error { if len(args) > 0 { return fmt.Errorf("too many non-flag arguments: %q", args) } + ctx = apitype.RequestReasonKey.WithValue(ctx, logoutArgs.reason) return localClient.Logout(ctx) } diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 779a87e49..ebf7284aa 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -27,6 +27,7 @@ import ( "golang.org/x/crypto/ssh" "tailscale.com/control/controlclient" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnserver" "tailscale.com/ipn/store/mem" @@ -336,7 +337,7 @@ func (i *jsIPN) logout() { go func() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - i.lb.Logout(ctx) + i.lb.Logout(ctx, ipnauth.Self) }() } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 03a0709e2..8fbce4631 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1077,7 +1077,7 @@ func (b *LocalBackend) Shutdown() { ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second) defer cancel() t0 := time.Now() - err := b.Logout(ctx) // best effort + err := b.Logout(ctx, ipnauth.Self) // best effort td := time.Since(t0).Round(time.Millisecond) if err != nil { b.logf("failed to log out ephemeral node on shutdown after %v: %v", td, err) @@ -5884,7 +5884,7 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. -func (b *LocalBackend) Logout(ctx context.Context) error { +func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { unlock := b.lockAndGetUnlock() defer unlock() @@ -5898,11 +5898,8 @@ func (b *LocalBackend) Logout(ctx context.Context) error { // delete it later. profile := b.pm.CurrentProfile() - // TODO(nickkhyl): change [LocalBackend.Logout] to accept an [ipnauth.Actor]. - // This will allow enforcing Always On mode when a user tries to log out - // while logged in and connected. See tailscale/corp#26249. _, err := b.editPrefsLockedOnEntry( - ipnauth.TODO, + actor, &ipn.MaskedPrefs{ WantRunningSet: true, LoggedOutSet: true, diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index f0ac5f944..c29589acc 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -21,6 +21,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store/mem" "tailscale.com/net/dns" @@ -607,7 +608,7 @@ func TestStateMachine(t *testing.T) { store.awaitWrite() t.Logf("\n\nLogout") notifies.expect(5) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { nn := notifies.drain(5) previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown") @@ -637,7 +638,7 @@ func TestStateMachine(t *testing.T) { // A second logout should be a no-op as we are in the NeedsLogin state. t.Logf("\n\nLogout2") notifies.expect(0) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { notifies.drain(0) cc.assertCalls() @@ -650,7 +651,7 @@ func TestStateMachine(t *testing.T) { // AuthCantContinue state. t.Logf("\n\nLogout3") notifies.expect(3) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { notifies.drain(0) cc.assertCalls() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d4b4b443e..60ed89b3b 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1460,7 +1460,7 @@ func (h *Handler) serveLogout(w http.ResponseWriter, r *http.Request) { http.Error(w, "want POST", http.StatusBadRequest) return } - err := h.b.Logout(r.Context()) + err := h.b.Logout(r.Context(), h.Actor) if err == nil { w.WriteHeader(http.StatusNoContent) return From 9bf99741ddb42cf3a2dec644cdff0f8cf9b99265 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 19:02:10 -0500 Subject: [PATCH 1057/1708] ipn/ipnlocal: refactor resolveExitNodeInPrefsLocked, setExitNodeID and resolveExitNodeIP Now that resolveExitNodeInPrefsLocked is the only caller of setExitNodeID, and setExitNodeID is the only caller of resolveExitNodeIP, we can restructure the code with resolveExitNodeInPrefsLocked now calling both resolveAutoExitNodeLocked and resolveExitNodeIPLocked directly. This prepares for factoring out resolveAutoExitNodeLocked and related auto-exit-node logic into an ipnext extension in a future commit. While there, we also update exit node by IP lookup to use (*nodeBackend).NodeByAddr and (*nodeBackend).NodeByID instead of iterating over all peers in the most recent netmap. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 106 ++++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 55 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8fbce4631..221edad92 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2030,47 +2030,48 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// setExitNodeID updates prefs to either use the suggestedExitNodeID if AutoExitNode is enabled, -// or resolve ExitNodeIP to an ID and use that. It returns whether prefs was mutated. -func setExitNodeID(prefs *ipn.Prefs, suggestedExitNodeID tailcfg.StableNodeID, nm *netmap.NetworkMap) (prefsChanged bool) { - if prefs.AutoExitNode.IsSet() { - var newExitNodeID tailcfg.StableNodeID - if !suggestedExitNodeID.IsZero() { - // If we have a suggested exit node, use it. - newExitNodeID = suggestedExitNodeID - } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { - // If we don't have a suggested exit node, but the prefs already - // specify an allowed auto exit node ID, retain it. - newExitNodeID = prefs.ExitNodeID - } else { - // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, - // preventing traffic from leaking to the local network until an actual - // exit node is selected. - newExitNodeID = unresolvedExitNodeID - } - if prefs.ExitNodeID != newExitNodeID { - prefs.ExitNodeID = newExitNodeID - prefsChanged = true - } - if prefs.ExitNodeIP.IsValid() { - prefs.ExitNodeIP = netip.Addr{} - prefsChanged = true - } - return prefsChanged +// resolveAutoExitNodeLocked computes a suggested exit node and updates prefs +// to use it if AutoExitNode is enabled, and reports whether prefs was mutated. +// +// b.mu must be held. +func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !prefs.AutoExitNode.IsSet() { + return false + } + if _, err := b.suggestExitNodeLocked(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) // non-fatal, see below + } + var newExitNodeID tailcfg.StableNodeID + if !b.lastSuggestedExitNode.IsZero() { + // If we have a suggested exit node, use it. + newExitNodeID = b.lastSuggestedExitNode + } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { + // If we don't have a suggested exit node, but the prefs already + // specify an allowed auto exit node ID, retain it. + newExitNodeID = prefs.ExitNodeID + } else { + // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, + // preventing traffic from leaking to the local network until an actual + // exit node is selected. + newExitNodeID = unresolvedExitNodeID + } + if prefs.ExitNodeID != newExitNodeID { + prefs.ExitNodeID = newExitNodeID + prefsChanged = true } - return resolveExitNodeIP(prefs, nm) + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} + prefsChanged = true + } + return prefsChanged } -// resolveExitNodeIP updates prefs to reference an exit node by ID, rather +// resolveExitNodeIPLocked updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. -func resolveExitNodeIP(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { - if nm == nil { - // No netmap, can't resolve anything. - return false - } - - // If we have a desired IP on file, try to find the corresponding - // node. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged bool) { + // If we have a desired IP on file, try to find the corresponding node. if !prefs.ExitNodeIP.IsValid() { return false } @@ -2081,20 +2082,19 @@ func resolveExitNodeIP(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bo prefsChanged = true } - oldExitNodeID := prefs.ExitNodeID - for _, peer := range nm.Peers { - for _, addr := range peer.Addresses().All() { - if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP { - continue - } + cn := b.currentNode() + if nid, ok := cn.NodeByAddr(prefs.ExitNodeIP); ok { + if node, ok := cn.NodeByID(nid); ok { // Found the node being referenced, upgrade prefs to // reference it directly for next time. - prefs.ExitNodeID = peer.StableID() + prefs.ExitNodeID = node.StableID() prefs.ExitNodeIP = netip.Addr{} - return prefsChanged || oldExitNodeID != prefs.ExitNodeID + // Cleared ExitNodeIP, so prefs changed + // even if the ID stayed the same. + prefsChanged = true + } } - return prefsChanged } @@ -6042,17 +6042,13 @@ func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { - if prefs.AutoExitNode.IsSet() { - _, err := b.suggestExitNodeLocked() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("failed to select auto exit node: %v", err) - } + if b.resolveAutoExitNodeLocked(prefs) { + changed = true } - if setExitNodeID(prefs, b.lastSuggestedExitNode, b.currentNode().NetMap()) { - b.logf("exit node resolved: %v", prefs.ExitNodeID) - return true + if b.resolveExitNodeIPLocked(prefs) { + changed = true } - return false + return changed } // setNetMapLocked updates the LocalBackend state to reflect the newly From 2c630e126b84b537053947b579f0b44623deb496 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 19:05:41 -0500 Subject: [PATCH 1058/1708] ipn/ipnlocal: make applySysPolicy a method on LocalBackend Now that applySysPolicy is only called by (*LocalBackend).reconcilePrefsLocked, we can make it a method to avoid passing state via parameters and to support future extensibility. Also factor out exit node-specific logic into applyExitNodeSysPolicyLocked. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 52 ++++++++++++++++++++++++-------------- ipn/ipnlocal/local_test.go | 6 +++-- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 221edad92..9ed9522ab 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1800,9 +1800,11 @@ var preferencePolicies = []preferencePolicyInfo{ }, } -// applySysPolicy overwrites configured preferences with policies that may be +// applySysPolicyLocked overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { +// +// b.mu must be held. +func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1839,6 +1841,34 @@ func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { } } + if b.applyExitNodeSysPolicyLocked(prefs) { + anyChange = true + } + + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { + prefs.WantRunning = true + anyChange = true + } + + for _, opt := range preferencePolicies { + if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { + curVal := opt.get(prefs.View()) + newVal := po.ShouldEnable(curVal) + if curVal != newVal { + opt.set(prefs, newVal) + anyChange = true + } + } + } + + return anyChange +} + +// applyExitNodeSysPolicyLocked applies the exit node policy settings to prefs +// and reports whether any change was made. +// +// b.mu must be held. +func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) @@ -1894,22 +1924,6 @@ func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { } } - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !overrideAlwaysOn && !prefs.WantRunning { - prefs.WantRunning = true - anyChange = true - } - - for _, opt := range preferencePolicies { - if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { - curVal := opt.get(prefs.View()) - newVal := po.ShouldEnable(curVal) - if curVal != newVal { - opt.set(prefs, newVal) - anyChange = true - } - } - } - return anyChange } @@ -6024,7 +6038,7 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { // // b.mu must be held. func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { - if applySysPolicy(prefs, b.overrideAlwaysOn) { + if b.applySysPolicyLocked(prefs) { changed = true } if b.resolveExitNodeInPrefsLocked(prefs) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 1e1b7663a..b8526a4fc 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2968,7 +2968,8 @@ func TestApplySysPolicy(t *testing.T) { t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs, false) + lb := newTestLocalBackend(t) + gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -3116,7 +3117,8 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs, false) + lb := newTestLocalBackend(t) + gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) From 740b77df594f649830d151f64700caea5c341e60 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Jul 2025 16:08:28 -0500 Subject: [PATCH 1059/1708] ipn/ipnlocal,util/syspolicy: add support for ExitNode.AllowOverride policy setting When the policy setting is enabled, it allows users to override the exit node enforced by the ExitNodeID or ExitNodeIP policy. It's primarily intended for use when ExitNodeID is set to auto:any, but it can also be used with specific exit nodes. It does not allow disabling exit node usage entirely. Once the exit node policy is overridden, it will not be enforced again until the policy changes, the user connects or disconnects Tailscale, switches profiles, or disables the override. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 119 ++++++++- ipn/ipnlocal/local_test.go | 312 ++++++++++++++++++++++++ util/syspolicy/policy_keys.go | 10 + util/syspolicy/rsop/change_callbacks.go | 5 + 4 files changed, 434 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9ed9522ab..c54cb32d3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -414,6 +414,19 @@ type LocalBackend struct { // reconnectTimer is used to schedule a reconnect by setting [ipn.Prefs.WantRunning] // to true after a delay, or nil if no reconnect is scheduled. reconnectTimer tstime.TimerController + + // overrideExitNodePolicy is whether the user has overridden the exit node policy + // by manually selecting an exit node, as allowed by [syspolicy.AllowExitNodeOverride]. + // + // If true, the [syspolicy.ExitNodeID] and [syspolicy.ExitNodeIP] policy settings are ignored, + // and the suggested exit node is not applied automatically. + // + // It is cleared when the user switches back to the state required by policy (typically, auto:any), + // or when switching profiles, connecting/disconnecting Tailscale, restarting the client, + // or on similar events. + // + // See tailscale/corp#29969. + overrideExitNodePolicy bool } // HealthTracker returns the health tracker for the backend. @@ -1841,7 +1854,8 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { } } - if b.applyExitNodeSysPolicyLocked(prefs) { + // Only apply the exit node policy if the user hasn't overridden it. + if !b.overrideExitNodePolicy && b.applyExitNodeSysPolicyLocked(prefs) { anyChange = true } @@ -1957,7 +1971,7 @@ func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChanged(syspolicy.AlwaysOn) || policy.HasChanged(syspolicy.AlwaysOnOverrideWithReason) { + if policy.HasChangedAnyOf(syspolicy.AlwaysOn, syspolicy.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might // no longer be valid. @@ -1966,6 +1980,14 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } + if policy.HasChangedAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP, syspolicy.AllowExitNodeOverride) { + // Reset the exit node override if a policy that enforces exit node usage + // or allows the user to override automatic exit node selection has changed. + b.mu.Lock() + b.overrideExitNodePolicy = false + b.mu.Unlock() + } + if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. @@ -4320,12 +4342,12 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip } // checkEditPrefsAccessLocked checks whether the current user has access -// to apply the prefs changes in mp. +// to apply the changes in mp to the given prefs. // // It returns an error if the user is not allowed, or nil otherwise. // // b.mu must be held. -func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) error { +func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn.PrefsView, mp *ipn.MaskedPrefs) error { var errs []error if mp.RunSSHSet && mp.RunSSH && !envknob.CanSSHD() { @@ -4342,14 +4364,18 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.M // Prevent users from changing exit node preferences // when exit node usage is managed by policy. if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { - // TODO(nickkhyl): Allow users to override ExitNode policy settings - // if the ExitNode.AllowUserOverride policy permits it. - // (Policy setting name and details are TBD. See tailscale/corp#29969) isManaged, err := syspolicy.HasAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP) if err != nil { err = fmt.Errorf("policy check failed: %w", err) } else if isManaged { - err = errManagedByPolicy + // Allow users to override ExitNode policy settings and select an exit node manually + // if permitted by [syspolicy.AllowExitNodeOverride]. + // + // Disabling exit node usage entirely is not allowed. + allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false) + if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { + err = errManagedByPolicy + } } if err != nil { errs = append(errs, fmt.Errorf("exit node cannot be changed: %w", err)) @@ -4359,19 +4385,70 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.M return multierr.New(errs...) } +// changeDisablesExitNodeLocked reports whether applying the change +// to the given prefs would disable exit node usage. +// +// In other words, it returns true if prefs.ExitNodeID is non-empty +// initially, but would become empty after applying the given change. +// +// It applies the same adjustments and resolves the exit node in the prefs +// as done during actual edits. While not optimal performance-wise, +// changing the exit node via LocalAPI isn't a hot path, and reusing +// the same logic ensures consistency and simplifies maintenance. +// +// b.mu must be held. +func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + if !change.AutoExitNodeSet && !change.ExitNodeIDSet && !change.ExitNodeIPSet { + // The change does not affect exit node usage. + return false + } + + if prefs.ExitNodeID() == "" { + // Exit node usage is already disabled. + // Note that we do not check for ExitNodeIP here. + // If ExitNodeIP hasn't been resolved to a node, + // it's not enabled yet. + return false + } + + // First, apply the adjustments to a copy of the changes, + // e.g., clear AutoExitNode if ExitNodeID is set. + tmpChange := ptr.To(*change) + tmpChange.Prefs = *change.Prefs.Clone() + b.adjustEditPrefsLocked(prefs, tmpChange) + + // Then apply the adjusted changes to a copy of the current prefs, + // and resolve the exit node in the prefs. + tmpPrefs := prefs.AsStruct() + tmpPrefs.ApplyEdits(tmpChange) + b.resolveExitNodeInPrefsLocked(tmpPrefs) + + // If ExitNodeID is empty after applying the changes, + // but wasn't empty before, then the change disables + // exit node usage. + return tmpPrefs.ExitNodeID == "" + +} + // adjustEditPrefsLocked applies additional changes to mp if necessary, // such as zeroing out mutually exclusive fields. // // It must not assume that the changes in mp will actually be applied. // // b.mu must be held. -func (b *LocalBackend) adjustEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs) { +func (b *LocalBackend) adjustEditPrefsLocked(prefs ipn.PrefsView, mp *ipn.MaskedPrefs) { // Zeroing the ExitNodeID via localAPI must also zero the prior exit node. if mp.ExitNodeIDSet && mp.ExitNodeID == "" && !mp.InternalExitNodePriorSet { mp.InternalExitNodePrior = "" mp.InternalExitNodePriorSet = true } + // Clear ExitNodeID if AutoExitNode is disabled and ExitNodeID is still unresolved. + if mp.AutoExitNodeSet && mp.AutoExitNode == "" && prefs.ExitNodeID() == unresolvedExitNodeID { + mp.ExitNodeIDSet = true + mp.ExitNodeID = "" + } + // Disable automatic exit node selection if the user explicitly sets // ExitNodeID or ExitNodeIP. if (mp.ExitNodeIDSet || mp.ExitNodeIPSet) && !mp.AutoExitNodeSet { @@ -4404,6 +4481,22 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o } } + if oldPrefs.WantRunning() != newPrefs.WantRunning() { + // Connecting to or disconnecting from Tailscale clears the override, + // unless the user is also explicitly changing the exit node (see below). + b.overrideExitNodePolicy = false + } + if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { + if allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false); allowExitNodeOverride { + // If applying exit node policy settings to the new prefs results in no change, + // the user is not overriding the policy. Otherwise, it is an override. + b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) + } else { + // Overrides are not allowed; clear the override flag. + b.overrideExitNodePolicy = false + } + } + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. // recordForEdit records metrics related to edits and changes, not the final state. // If, in the future, we want to record gauge-metrics related to the state of prefs, @@ -4486,15 +4579,17 @@ func (b *LocalBackend) stopReconnectTimerLocked() { func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { defer unlock() // for error paths + p0 := b.pm.CurrentPrefs() + // Check if the changes in mp are allowed. - if err := b.checkEditPrefsAccessLocked(actor, mp); err != nil { + if err := b.checkEditPrefsAccessLocked(actor, p0, mp); err != nil { b.logf("EditPrefs(%v): %v", mp.Pretty(), err) return ipn.PrefsView{}, err } // Apply additional changes to mp if necessary, // such as clearing mutually exclusive fields. - b.adjustEditPrefsLocked(actor, mp) + b.adjustEditPrefsLocked(p0, mp) if mp.EggSet { mp.EggSet = false @@ -4502,7 +4597,6 @@ func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.Maske b.goTracker.Go(b.doSetHostinfoFilterServices) } - p0 := b.pm.CurrentPrefs() p1 := b.pm.CurrentPrefs().AsStruct() p1.ApplyEdits(mp) @@ -7231,6 +7325,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.serveConfig = ipn.ServeConfigView{} b.lastSuggestedExitNode = "" b.keyExpired = false + b.overrideExitNodePolicy = false b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b8526a4fc..8bc84b081 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -623,6 +623,7 @@ func TestConfigureExitNode(t *testing.T) { exitNodeIDPolicy *tailcfg.StableNodeID exitNodeIPPolicy *netip.Addr exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] wantPrefs ipn.Prefs wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] @@ -1018,6 +1019,108 @@ func TestConfigureExitNode(t *testing.T) { InternalExitNodePrior: "", }, }, + { + name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing the exit node + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode2.StableID(), // change the exit node ID + }, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // overridden by user + AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly + }, + }, + { + name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: "", // clearing the exit node ID disables the exit node and should not be allowed + }, + ExitNodeIDSet: true, + }, + wantChangePrefsErr: errManagedByPolicy, // edit prefs should fail with an error + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + }, + { + name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + useExitNodeEnabled: ptr.To(false), // should fail with an error + wantExitNodeToggleErr: errManagedByPolicy, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + }, + { + name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }, + netMap: nil, // no netmap; exit node cannot be resolved + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "", // clear the auto exit node + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "", // cleared + ExitNodeID: "", // has never been resolved, so it should be cleared as well + }, + }, + { + name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }, + netMap: clientNetmap, // has a netmap; exit node will be resolved + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "", // clear the auto exit node + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "", // cleared + ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained + }, + }, } syspolicy.RegisterWellKnownSettingsForTest(t) for _, tt := range tests { @@ -1033,6 +1136,9 @@ func TestConfigureExitNode(t *testing.T) { if tt.exitNodeAllowedIDs != nil { store.SetStringLists(source.TestSettingOf(syspolicy.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) } + if tt.exitNodeAllowOverride { + store.SetBooleans(source.TestSettingOf(syspolicy.AllowExitNodeOverride, true)) + } if store.IsEmpty() { // No syspolicy settings, so don't register a store. // This allows the test to run in parallel with other tests. @@ -1078,6 +1184,212 @@ func TestConfigureExitNode(t *testing.T) { } } +func TestPrefsChangeDisablesExitNode(t *testing.T) { + tests := []struct { + name string + netMap *netmap.NetworkMap + prefs ipn.Prefs + change ipn.MaskedPrefs + wantDisablesExitNode bool + }{ + { + name: "has-exit-node-id/no-change", + prefs: ipn.Prefs{ + ExitNodeID: "test-exit-node", + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-ip/no-change", + prefs: ipn.Prefs{ + ExitNodeIP: netip.MustParseAddr("100.100.1.1"), + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-auto-exit-node/no-change", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-id/non-exit-node-change", + prefs: ipn.Prefs{ + ExitNodeID: "test-exit-node", + }, + change: ipn.MaskedPrefs{ + WantRunningSet: true, + HostnameSet: true, + ExitNodeAllowLANAccessSet: true, + Prefs: ipn.Prefs{ + WantRunning: true, + Hostname: "test-hostname", + ExitNodeAllowLANAccess: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-ip/non-exit-node-change", + prefs: ipn.Prefs{ + ExitNodeIP: netip.MustParseAddr("100.100.1.1"), + }, + change: ipn.MaskedPrefs{ + WantRunningSet: true, + RouteAllSet: true, + ShieldsUpSet: true, + Prefs: ipn.Prefs{ + WantRunning: false, + RouteAll: false, + ShieldsUp: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-auto-exit-node/non-exit-node-change", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + CorpDNSSet: true, + RouteAllSet: true, + ExitNodeAllowLANAccessSet: true, + Prefs: ipn.Prefs{ + CorpDNS: true, + RouteAll: false, + ExitNodeAllowLANAccess: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-id/change-exit-node-id", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "exit-node-2", + }, + }, + wantDisablesExitNode: false, // changing the exit node ID does not disable it + }, + { + name: "has-exit-node-id/enable-auto-exit-node", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + }, + wantDisablesExitNode: false, // changing the exit node ID does not disable it + }, + { + name: "has-exit-node-id/clear-exit-node-id", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, + wantDisablesExitNode: true, // clearing the exit node ID disables it + }, + { + name: "has-auto-exit-node/clear-exit-node-id", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, + wantDisablesExitNode: true, // clearing the exit node ID disables auto exit node as well... + }, + { + name: "has-auto-exit-node/clear-exit-node-id/but-keep-auto-exit-node", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + AutoExitNode: ipn.AnyExitNode, + }, + }, + wantDisablesExitNode: false, // ... unless we explicitly keep the auto exit node enabled + }, + { + name: "has-auto-exit-node/clear-exit-node-ip", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIPSet: true, + Prefs: ipn.Prefs{ + ExitNodeIP: netip.Addr{}, + }, + }, + wantDisablesExitNode: false, // auto exit node is still enabled + }, + { + name: "has-auto-exit-node/clear-auto-exit-node", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + AutoExitNode: "", + }, + }, + wantDisablesExitNode: true, // clearing the auto exit while the exit node ID is unresolved disables exit node usage + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lb := newTestLocalBackend(t) + if tt.netMap != nil { + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) + } + // Set the initial prefs via SetPrefsForTest + // to apply necessary adjustments. + lb.SetPrefsForTest(tt.prefs.Clone()) + initialPrefs := lb.Prefs() + + // Check whether changeDisablesExitNodeLocked correctly identifies the change. + if got := lb.changeDisablesExitNodeLocked(initialPrefs, &tt.change); got != tt.wantDisablesExitNode { + t.Errorf("disablesExitNode: got %v; want %v", got, tt.wantDisablesExitNode) + } + + // Apply the change and check if it the actual behavior matches the expectation. + gotPrefs, err := lb.EditPrefsAs(&tt.change, &ipnauth.TestActor{}) + if err != nil { + t.Fatalf("EditPrefsAs failed: %v", err) + } + gotDisabledExitNode := initialPrefs.ExitNodeID() != "" && gotPrefs.ExitNodeID() == "" + if gotDisabledExitNode != tt.wantDisablesExitNode { + t.Errorf("disabledExitNode: got %v; want %v", gotDisabledExitNode, tt.wantDisablesExitNode) + } + }) + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index b19a3e7fe..cd5f8172c 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -54,6 +54,15 @@ const ( ExitNodeID Key = "ExitNodeID" ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. + // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings + // and manually select an exit node. It does not allow disabling exit node usage entirely. + // It is typically used in conjunction with [ExitNodeID] set to "auto:any". + // + // Warning: This policy setting is experimental and may change, be renamed or removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#29969. + AllowExitNodeOverride Key = "ExitNode.AllowOverride" + // Keys with a string value that specifies an option: "always", "never", "user-decides". // The default is "user-decides" unless otherwise stated. Enforcement of // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs @@ -173,6 +182,7 @@ const ( var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index b962f30c0..87b45b654 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -59,6 +59,11 @@ func (c PolicyChange) HasChanged(key setting.Key) bool { } } +// HasChangedAnyOf reports whether any of the specified policy settings has changed. +func (c PolicyChange) HasChangedAnyOf(keys ...setting.Key) bool { + return slices.ContainsFunc(keys, c.HasChanged) +} + // policyChangeCallbacks are the callbacks to invoke when the effective policy changes. // It is safe for concurrent use. type policyChangeCallbacks struct { From a60e0caf6a3bc4c2801f4ca6e1630fc9409d1125 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 8 Jul 2025 19:37:09 -0700 Subject: [PATCH 1060/1708] wgengine/magicsock: remove conn.InitiationAwareEndpoint TODO (#16498) It was implemented in 5b0074729d38f8cc301803da06086033f53b1b93. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fbfcf0b41..ab7c2102f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1708,11 +1708,6 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. - // - // TODO(jwhited): implement [lazyEndpoint] integration to call - // [endpoint.noteRecvActivity], which triggers just-in-time - // wireguard-go configuration of the peer, prior to peer lookup - // within wireguard-go. return &lazyEndpoint{c: c, src: src}, size, true } cache.epAddr = src From bad17a1bfaa0ac3e62e2ebc95fca7c5c5959055b Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 8 Jul 2025 22:14:18 -0700 Subject: [PATCH 1061/1708] cmd/tailscale: format empty cities and countries as hyphens (#16495) When running `tailscale exit-node list`, an empty city or country name should be displayed as a hyphen "-". However, this only happened when there was no location at all. If a node provides a Hostinfo.Location, then the list would display exactly what was provided. This patch changes the listing so that empty cities and countries will either render the provided name or "-". Fixes #16500 Signed-off-by: Simon Law --- cmd/tailscale/cli/exitnode.go | 22 +++++++++------------- cmd/tailscale/cli/exitnode_test.go | 28 ++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index ad7a8ccee..b153f096d 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -131,7 +131,7 @@ func runExitNodeList(ctx context.Context, args []string) error { for _, country := range filteredPeers.Countries { for _, city := range country.Cities { for _, peer := range city.Peers { - fmt.Fprintf(w, "\n %s\t%s\t%s\t%s\t%s\t", peer.TailscaleIPs[0], strings.Trim(peer.DNSName, "."), country.Name, city.Name, peerStatus(peer)) + fmt.Fprintf(w, "\n %s\t%s\t%s\t%s\t%s\t", peer.TailscaleIPs[0], strings.Trim(peer.DNSName, "."), cmp.Or(country.Name, "-"), cmp.Or(city.Name, "-"), peerStatus(peer)) } } } @@ -202,23 +202,16 @@ type filteredCity struct { Peers []*ipnstate.PeerStatus } -const noLocationData = "-" - -var noLocation = &tailcfg.Location{ - Country: noLocationData, - CountryCode: noLocationData, - City: noLocationData, - CityCode: noLocationData, -} - // filterFormatAndSortExitNodes filters and sorts exit nodes into // alphabetical order, by country, city and then by priority if // present. +// // If an exit node has location data, and the country has more than // one city, an `Any` city is added to the country that contains the // highest priority exit node within that country. +// // For exit nodes without location data, their country fields are -// defined as '-' to indicate that the data is not available. +// defined as the empty string to indicate that the data is not available. func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) filteredExitNodes { // first get peers into some fixed order, as code below doesn't break ties // and our input comes from a random range-over-map. @@ -229,7 +222,10 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) countries := make(map[string]*filteredCountry) cities := make(map[string]*filteredCity) for _, ps := range peers { - loc := cmp.Or(ps.Location, noLocation) + loc := ps.Location + if loc == nil { + loc = &tailcfg.Location{} + } if filterBy != "" && !strings.EqualFold(loc.Country, filterBy) { continue @@ -259,7 +255,7 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) } for _, country := range filteredExitNodes.Countries { - if country.Name == noLocationData { + if country.Name == "" { // Countries without location data should not // be filtered further. continue diff --git a/cmd/tailscale/cli/exitnode_test.go b/cmd/tailscale/cli/exitnode_test.go index 9d569a45a..cc38fd3a4 100644 --- a/cmd/tailscale/cli/exitnode_test.go +++ b/cmd/tailscale/cli/exitnode_test.go @@ -74,10 +74,10 @@ func TestFilterFormatAndSortExitNodes(t *testing.T) { want := filteredExitNodes{ Countries: []*filteredCountry{ { - Name: noLocationData, + Name: "", Cities: []*filteredCity{ { - Name: noLocationData, + Name: "", Peers: []*ipnstate.PeerStatus{ ps[5], }, @@ -273,14 +273,20 @@ func TestSortByCountryName(t *testing.T) { Name: "Zimbabwe", }, { - Name: noLocationData, + Name: "", }, } sortByCountryName(fc) - if fc[0].Name != noLocationData { - t.Fatalf("sortByCountryName did not order countries by alphabetical order, got %v, want %v", fc[0].Name, noLocationData) + want := []string{"", "Albania", "Sweden", "Zimbabwe"} + var got []string + for _, c := range fc { + got = append(got, c.Name) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("sortByCountryName did not order countries by alphabetical order (-want +got):\n%s", diff) } } @@ -296,13 +302,19 @@ func TestSortByCityName(t *testing.T) { Name: "Squamish", }, { - Name: noLocationData, + Name: "", }, } sortByCityName(fc) - if fc[0].Name != noLocationData { - t.Fatalf("sortByCityName did not order cities by alphabetical order, got %v, want %v", fc[0].Name, noLocationData) + want := []string{"", "Goteborg", "Kingston", "Squamish"} + var got []string + for _, c := range fc { + got = append(got, c.Name) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("sortByCityName did not order countries by alphabetical order (-want +got):\n%s", diff) } } From 90bf0a97b3b1c042b3a6be48ec186732733f995b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Jul 2025 09:13:11 +0100 Subject: [PATCH 1062/1708] cmd/k8s-operator/deploy: clarify helm install notes (#16449) Based on feedback that it wasn't clear what the user is meant to do with the output of the last command, clarify that it's an optional command to explore what got created. Updates #13427 Change-Id: Iff64ec6d02dc04bf4bbebf415d7ed1a44e7dd658 Signed-off-by: Tom Proctor --- cmd/k8s-operator/deploy/chart/templates/NOTES.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt index 5678e597a..1bee67046 100644 --- a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt +++ b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt @@ -22,4 +22,6 @@ $ kubectl explain proxyclass $ kubectl explain recorder $ kubectl explain dnsconfig -$ kubectl --namespace={{ .Release.Namespace }} get pods +If you're interested to explore what resources were created: + +$ kubectl --namespace={{ .Release.Namespace }} get all -l app.kubernetes.io/managed-by=Helm From 4dfed6b14697d1a9ab217e01fff774a3b72391df Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Jul 2025 09:21:56 +0100 Subject: [PATCH 1063/1708] cmd/{k8s-operator,k8s-proxy}: add kube-apiserver ProxyGroup type (#16266) Adds a new k8s-proxy command to convert operator's in-process proxy to a separately deployable type of ProxyGroup: kube-apiserver. k8s-proxy reads in a new config file written by the operator, modelled on tailscaled's conffile but with some modifications to ensure multiple versions of the config can co-exist within a file. This should make it much easier to support reading that config file from a Kube Secret with a stable file name. To avoid needing to give the operator ClusterRole{,Binding} permissions, the helm chart now optionally deploys a new static ServiceAccount for the API Server proxy to use if in auth mode. Proxies deployed by kube-apiserver ProxyGroups currently work the same as the operator's in-process proxy. They do not yet leverage Tailscale Services for presenting a single HA DNS name. Updates #13358 Change-Id: Ib6ead69b2173c5e1929f3c13fb48a9a5362195d8 Signed-off-by: Tom Proctor --- Makefile | 48 ++-- build_docker.sh | 18 ++ cmd/k8s-operator/depaware.txt | 3 +- .../chart/templates/apiserverproxy-rbac.yaml | 16 +- cmd/k8s-operator/deploy/chart/values.yaml | 16 ++ .../crds/tailscale.com_proxyclasses.yaml | 46 +++- .../crds/tailscale.com_proxygroups.yaml | 19 +- .../deploy/manifests/authproxy-rbac.yaml | 9 + .../deploy/manifests/operator.yaml | 65 ++++- cmd/k8s-operator/ingress-for-pg.go | 6 +- cmd/k8s-operator/ingress-for-pg_test.go | 49 ++++ cmd/k8s-operator/operator.go | 64 ++++- cmd/k8s-operator/proxy.go | 61 +++++ cmd/k8s-operator/proxygroup.go | 194 +++++++++++--- cmd/k8s-operator/proxygroup_specs.go | 162 +++++++++++- cmd/k8s-operator/proxygroup_test.go | 249 ++++++++++++++++-- cmd/k8s-operator/sts.go | 12 +- cmd/k8s-operator/svc-for-pg.go | 60 +---- cmd/k8s-operator/svc-for-pg_test.go | 2 - cmd/k8s-proxy/k8s-proxy.go | 197 ++++++++++++++ k8s-operator/api-proxy/env.go | 29 -- k8s-operator/api-proxy/proxy.go | 187 +++++-------- k8s-operator/api.md | 40 ++- .../apis/v1alpha1/types_proxyclass.go | 22 +- .../apis/v1alpha1/types_proxygroup.go | 33 ++- .../apis/v1alpha1/zz_generated.deepcopy.go | 25 ++ kube/k8s-proxy/conf/conf.go | 101 +++++++ kube/k8s-proxy/conf/conf_test.go | 86 ++++++ kube/kubetypes/types.go | 18 +- kube/state/state.go | 97 +++++++ kube/state/state_test.go | 203 ++++++++++++++ 31 files changed, 1787 insertions(+), 350 deletions(-) create mode 100644 cmd/k8s-operator/proxy.go create mode 100644 cmd/k8s-proxy/k8s-proxy.go delete mode 100644 k8s-operator/api-proxy/env.go create mode 100644 kube/k8s-proxy/conf/conf.go create mode 100644 kube/k8s-proxy/conf/conf_test.go create mode 100644 kube/state/state.go create mode 100644 kube/state/state_test.go diff --git a/Makefile b/Makefile index 41c67c711..f5fc20589 100644 --- a/Makefile +++ b/Makefile @@ -92,38 +92,38 @@ pushspk: spk ## Push and install synology package on ${SYNO_HOST} host scp tailscale.spk root@${SYNO_HOST}: ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk -publishdevimage: ## Build and publish tailscale image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) +.PHONY: check-image-repo +check-image-repo: + @if [ -z "$(REPO)" ]; then \ + echo "REPO=... required; e.g. REPO=ghcr.io/$$USER/tailscale" >&2; \ + exit 1; \ + fi + @for repo in tailscale/tailscale ghcr.io/tailscale/tailscale \ + tailscale/k8s-operator ghcr.io/tailscale/k8s-operator \ + tailscale/k8s-nameserver ghcr.io/tailscale/k8s-nameserver \ + tailscale/tsidp ghcr.io/tailscale/tsidp \ + tailscale/k8s-proxy ghcr.io/tailscale/k8s-proxy; do \ + if [ "$(REPO)" = "$$repo" ]; then \ + echo "REPO=... must not be $$repo" >&2; \ + exit 1; \ + fi; \ + done + +publishdevimage: check-image-repo ## Build and publish tailscale image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh -publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) +publishdevoperator: check-image-repo ## Build and publish k8s-operator image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-operator ./build_docker.sh -publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1) +publishdevnameserver: check-image-repo ## Build and publish k8s-nameserver image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh -publishdevtsidp: ## Build and publish tsidp image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tsidp" || (echo "REPO=... must not be tailscale/tsidp" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tsidp" || (echo "REPO=... must not be ghcr.io/tailscale/tsidp" && exit 1) +publishdevtsidp: check-image-repo ## Build and publish tsidp image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=tsidp ./build_docker.sh +publishdevproxy: check-image-repo ## Build and publish k8s-proxy image to location specified by ${REPO} + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-proxy ./build_docker.sh + .PHONY: sshintegrationtest sshintegrationtest: ## Run the SSH integration tests in various Docker containers @GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ diff --git a/build_docker.sh b/build_docker.sh index 7840dc897..bdeaa8659 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -118,6 +118,24 @@ case "$TARGET" in --annotations="${ANNOTATIONS}" \ /usr/local/bin/tsidp ;; + k8s-proxy) + DEFAULT_REPOS="tailscale/k8s-proxy" + REPOS="${REPOS:-${DEFAULT_REPOS}}" + go run github.com/tailscale/mkctr \ + --gopaths="tailscale.com/cmd/k8s-proxy:/usr/local/bin/k8s-proxy" \ + --ldflags=" \ + -X tailscale.com/version.longStamp=${VERSION_LONG} \ + -X tailscale.com/version.shortStamp=${VERSION_SHORT} \ + -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ + --base="${BASE}" \ + --tags="${TAGS}" \ + --gotags="ts_kube,ts_package_container" \ + --repos="${REPOS}" \ + --push="${PUSH}" \ + --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ + /usr/local/bin/k8s-proxy + ;; *) echo "unknown target: $TARGET" exit 1 diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 36c5184c3..f810d1b4f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -200,7 +200,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - github.com/tailscale/hujson from tailscale.com/ipn/conffile + github.com/tailscale/hujson from tailscale.com/ipn/conffile+ L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth @@ -822,6 +822,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/ingressservices from tailscale.com/cmd/k8s-operator + tailscale.com/kube/k8s-proxy/conf from tailscale.com/cmd/k8s-operator tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml index 072ecf6d2..ad0a6fb66 100644 --- a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml @@ -1,7 +1,16 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -{{ if eq .Values.apiServerProxyConfig.mode "true" }} +# If old setting used, enable both old (operator) and new (ProxyGroup) workflows. +# If new setting used, enable only new workflow. +{{ if or (eq .Values.apiServerProxyConfig.mode "true") + (eq .Values.apiServerProxyConfig.allowImpersonation "true") }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-apiserver-auth-proxy + namespace: {{ .Release.Namespace }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -16,9 +25,14 @@ kind: ClusterRoleBinding metadata: name: tailscale-auth-proxy subjects: +{{- if eq .Values.apiServerProxyConfig.mode "true" }} - kind: ServiceAccount name: operator namespace: {{ .Release.Namespace }} +{{- end }} +- kind: ServiceAccount + name: kube-apiserver-auth-proxy + namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole name: tailscale-auth-proxy diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 2926f6d07..cdedb92e8 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -92,6 +92,13 @@ ingressClass: # If you need more configuration options, take a look at ProxyClass: # https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource proxyConfig: + # Configure the proxy image to use instead of the default tailscale/tailscale:latest. + # Applying a ProxyClass with `spec.statefulSet.pod.tailscaleContainer.image` + # set will override any defaults here. + # + # Note that ProxyGroups of type "kube-apiserver" use a different default image, + # tailscale/k8s-proxy:latest, and it is currently only possible to override + # that image via the same ProxyClass field. image: # Repository defaults to DockerHub, but images are also synced to ghcr.io/tailscale/tailscale. repository: tailscale/tailscale @@ -115,6 +122,15 @@ proxyConfig: # Kubernetes API server. # https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy apiServerProxyConfig: + # Set to "true" to create the ClusterRole permissions required for the API + # server proxy's auth mode. In auth mode, the API server proxy impersonates + # groups and users based on tailnet ACL grants. Required for ProxyGroups of + # type "kube-apiserver" running in auth mode. + allowImpersonation: "false" # "true", "false" + + # If true or noauth, the operator will run an in-process API server proxy. + # You can deploy a ProxyGroup of type "kube-apiserver" to run a high + # availability set of API server proxies instead. mode: "false" # "true", "false", "noauth" imagePullSecrets: [] diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index fcf1b27aa..c5dc9c3e9 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1379,12 +1379,21 @@ spec: type: string image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -1655,7 +1664,9 @@ spec: PodSecurityContext, the value specified in SecurityContext takes precedence. type: string tailscaleInitContainer: - description: Configuration for the proxy init container that enables forwarding. + description: |- + Configuration for the proxy init container that enables forwarding. + Not valid to apply to ProxyGroups of type "kube-apiserver". type: object properties: debug: @@ -1709,12 +1720,21 @@ spec: type: string image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index c426c8427..06c847925 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -77,6 +77,22 @@ spec: must not start with a dash and must be between 1 and 62 characters long. type: string pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + kubeAPIServer: + description: |- + KubeAPIServer contains configuration specific to the kube-apiserver + ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + type: object + properties: + mode: + description: |- + Mode to run the API server proxy in. Supported modes are auth and noauth. + In auth mode, requests from the tailnet proxied over to the Kubernetes + API server are additionally impersonated using the sender's tailnet identity. + If not specified, defaults to auth mode. + type: string + enum: + - auth + - noauth proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that contains @@ -106,12 +122,13 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. Type is immutable once a ProxyGroup is created. type: string enum: - egress - ingress + - kube-apiserver x-kubernetes-validations: - rule: self == oldSelf message: ProxyGroup type is immutable diff --git a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml index ddbdda32e..5818fa69f 100644 --- a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml @@ -1,6 +1,12 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-apiserver-auth-proxy + namespace: tailscale +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -18,6 +24,9 @@ subjects: - kind: ServiceAccount name: operator namespace: tailscale +- kind: ServiceAccount + name: kube-apiserver-auth-proxy + namespace: tailscale roleRef: kind: ClusterRole name: tailscale-auth-proxy diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index cdf301318..ff3705cb3 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1852,12 +1852,21 @@ spec: type: array image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -2129,7 +2138,9 @@ spec: type: object type: object tailscaleInitContainer: - description: Configuration for the proxy init container that enables forwarding. + description: |- + Configuration for the proxy init container that enables forwarding. + Not valid to apply to ProxyGroups of type "kube-apiserver". properties: debug: description: |- @@ -2182,12 +2193,21 @@ spec: type: array image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -2904,6 +2924,22 @@ spec: must not start with a dash and must be between 1 and 62 characters long. pattern: ^[a-z0-9][a-z0-9-]{0,61}$ type: string + kubeAPIServer: + description: |- + KubeAPIServer contains configuration specific to the kube-apiserver + ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + properties: + mode: + description: |- + Mode to run the API server proxy in. Supported modes are auth and noauth. + In auth mode, requests from the tailnet proxied over to the Kubernetes + API server are additionally impersonated using the sender's tailnet identity. + If not specified, defaults to auth mode. + enum: + - auth + - noauth + type: string + type: object proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that contains @@ -2933,11 +2969,12 @@ spec: type: array type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. Type is immutable once a ProxyGroup is created. enum: - egress - ingress + - kube-apiserver type: string x-kubernetes-validations: - message: ProxyGroup type is immutable diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 79bad92be..aaf22d471 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -239,7 +239,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // This checks and ensures that Tailscale Service's owner references are updated // for this Ingress and errors if that is not possible (i.e. because it // appears that the Tailscale Service has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) + updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc) if err != nil { const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) @@ -867,9 +867,9 @@ type OwnerRef struct { // nil, but does not contain an owner reference we return an error as this likely means // that the Service was created by somthing other than a Tailscale // Kubernetes operator. -func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { +func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string]string, error) { ref := OwnerRef{ - OperatorID: r.operatorID, + OperatorID: operatorID, } if svc == nil { c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index d29368cae..5de86cdad 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -12,8 +12,10 @@ import ( "maps" "reflect" "slices" + "strings" "testing" + "github.com/google/go-cmp/cmp" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -650,6 +652,53 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { } } +func TestOwnerAnnotations(t *testing.T) { + singleSelfOwner := map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, + } + + for name, tc := range map[string]struct { + svc *tailscale.VIPService + wantAnnotations map[string]string + wantErr string + }{ + "no_svc": { + svc: nil, + wantAnnotations: singleSelfOwner, + }, + "empty_svc": { + svc: &tailscale.VIPService{}, + wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator", + }, + "already_owner": { + svc: &tailscale.VIPService{ + Annotations: singleSelfOwner, + }, + wantAnnotations: singleSelfOwner, + }, + "add_owner": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`, + }, + }, + wantAnnotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"},{"operatorID":"self-id"}]}`, + }, + }, + } { + t.Run(name, func(t *testing.T) { + got, err := ownerAnnotations("self-id", tc.svc) + if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("ownerAnnotations() error = %v, wantErr %v", err, tc.wantErr) + } + if diff := cmp.Diff(tc.wantAnnotations, got); diff != "" { + t.Errorf("ownerAnnotations() mismatch (-want +got):\n%s", diff) + } + }) + } +} + func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 96b3b37ad..870a6f8b7 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -26,6 +26,7 @@ import ( networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" klabels "k8s.io/apimachinery/pkg/labels" @@ -77,6 +78,7 @@ func main() { tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "") tslogging = defaultEnv("OPERATOR_LOGGING", "info") image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest") + k8sProxyImage = defaultEnv("K8S_PROXY_IMAGE", "tailscale/k8s-proxy:latest") priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "") tags = defaultEnv("PROXY_TAGS", "tag:k8s") tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "") @@ -110,17 +112,27 @@ func main() { // The operator can run either as a plain operator or it can // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. - mode := apiproxy.ParseAPIProxyMode() - if mode == apiproxy.APIServerProxyModeDisabled { + mode := parseAPIProxyMode() + if mode == apiServerProxyModeDisabled { hostinfo.SetApp(kubetypes.AppOperator) } else { - hostinfo.SetApp(kubetypes.AppAPIServerProxy) + hostinfo.SetApp(kubetypes.AppInProcessAPIServerProxy) } s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() - apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode) + if mode != apiServerProxyModeDisabled { + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled) + if err != nil { + zlog.Fatalf("error creating API server proxy: %v", err) + } + go func() { + if err := ap.Run(context.Background()); err != nil { + zlog.Fatalf("error running API server proxy: %v", err) + } + }() + } rOpts := reconcilerOpts{ log: zlog, tsServer: s, @@ -128,6 +140,7 @@ func main() { tailscaleNamespace: tsNamespace, restConfig: restConfig, proxyImage: image, + k8sProxyImage: k8sProxyImage, proxyPriorityClassName: priorityClassName, proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer, proxyTags: tags, @@ -415,7 +428,6 @@ func runReconcilers(opts reconcilerOpts) { Complete(&HAServiceReconciler{ recorder: eventRecorder, tsClient: opts.tsClient, - tsnetServer: opts.tsServer, defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), logger: opts.log.Named("service-pg-reconciler"), @@ -625,13 +637,14 @@ func runReconcilers(opts reconcilerOpts) { ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) nodeFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(nodeHandlerForProxyGroup(mgr.GetClient(), opts.defaultProxyClass, startlog)) + saFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(serviceAccountHandlerForProxyGroup(mgr.GetClient(), startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). Named("proxygroup-reconciler"). Watches(&corev1.Service{}, ownedByProxyGroupFilter). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). - Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). + Watches(&corev1.ServiceAccount{}, saFilterForProxyGroup). Watches(&corev1.Secret{}, ownedByProxyGroupFilter). Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). @@ -645,7 +658,8 @@ func runReconcilers(opts reconcilerOpts) { tsClient: opts.tsClient, tsNamespace: opts.tailscaleNamespace, - proxyImage: opts.proxyImage, + tsProxyImage: opts.proxyImage, + k8sProxyImage: opts.k8sProxyImage, defaultTags: strings.Split(opts.proxyTags, ","), tsFirewallMode: opts.proxyFirewallMode, defaultProxyClass: opts.defaultProxyClass, @@ -668,6 +682,7 @@ type reconcilerOpts struct { tailscaleNamespace string // namespace in which operator resources will be deployed restConfig *rest.Config // config for connecting to the kube API server proxyImage string // : + k8sProxyImage string // : // proxyPriorityClassName isPriorityClass to be set for proxy Pods. This // is a legacy mechanism for cluster resource configuration options - // going forward use ProxyClass. @@ -996,8 +1011,8 @@ func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger } // proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass, -// returns a list of reconcile requests for all Connectors that have -// .spec.proxyClass set. +// returns a list of reconcile requests for all ProxyGroups that have +// .spec.proxyClass set to that ProxyClass. func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { pgList := new(tsapi.ProxyGroupList) @@ -1016,6 +1031,37 @@ func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } +// serviceAccountHandlerForProxyGroup returns a handler that, for a given ServiceAccount, +// returns a list of reconcile requests for all ProxyGroups that use that ServiceAccount. +// For most ProxyGroups, this will be a dedicated ServiceAccount owned by a specific +// ProxyGroup. But for kube-apiserver ProxyGroups running in auth mode, they use a shared +// static ServiceAccount named "kube-apiserver-auth-proxy". +func serviceAccountHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ServiceAccount: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + saName := o.GetName() + for _, pg := range pgList.Items { + if saName == authAPIServerProxySAName && isAuthAPIServerProxy(&pg) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + expectedOwner := pgOwnerReference(&pg)[0] + saOwnerRefs := o.GetOwnerReferences() + for _, ref := range saOwnerRefs { + if apiequality.Semantic.DeepEqual(ref, expectedOwner) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + break + } + } + } + return reqs + } +} + // serviceHandlerForIngress returns a handler for Service events for ingress // reconciler that ensures that if the Service associated with an event is of // interest to the reconciler, the associated Ingress(es) gets be reconciled. diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go new file mode 100644 index 000000000..09a7b8c62 --- /dev/null +++ b/cmd/k8s-operator/proxy.go @@ -0,0 +1,61 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + "log" + "os" +) + +type apiServerProxyMode int + +func (a apiServerProxyMode) String() string { + switch a { + case apiServerProxyModeDisabled: + return "disabled" + case apiServerProxyModeEnabled: + return "auth" + case apiServerProxyModeNoAuth: + return "noauth" + default: + return "unknown" + } +} + +const ( + apiServerProxyModeDisabled apiServerProxyMode = iota + apiServerProxyModeEnabled + apiServerProxyModeNoAuth +) + +func parseAPIProxyMode() apiServerProxyMode { + haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" + haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" + switch { + case haveAPIProxyEnv && haveAuthProxyEnv: + log.Fatal("AUTH_PROXY (deprecated) and APISERVER_PROXY are mutually exclusive, please unset AUTH_PROXY") + case haveAuthProxyEnv: + var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated + if authProxyEnv { + return apiServerProxyModeEnabled + } + return apiServerProxyModeDisabled + case haveAPIProxyEnv: + var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" + switch apiProxyEnv { + case "true": + return apiServerProxyModeEnabled + case "false", "": + return apiServerProxyModeDisabled + case "noauth": + return apiServerProxyModeNoAuth + default: + panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) + } + } + return apiServerProxyModeDisabled +} diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index c44de09a7..3dfb004f1 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -17,6 +17,7 @@ import ( "strings" "sync" + dockerref "github.com/distribution/reference" "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -36,9 +37,11 @@ import ( tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tstime" + "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" @@ -48,7 +51,9 @@ import ( const ( reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" reasonProxyGroupReady = "ProxyGroupReady" + reasonProxyGroupAvailable = "ProxyGroupAvailable" reasonProxyGroupCreating = "ProxyGroupCreating" + reasonProxyGroupInvalid = "ProxyGroupInvalid" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" @@ -63,12 +68,14 @@ const ( // // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was // first introduced. - pgMinCapabilityVersion = 106 + pgMinCapabilityVersion = 106 + kubeAPIServerConfigFile = "config.hujson" ) var ( - gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) - gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) + gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) + gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) + gaugeAPIServerProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupAPIServerCount) ) // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. @@ -81,15 +88,17 @@ type ProxyGroupReconciler struct { // User-specified defaults from the helm installation. tsNamespace string - proxyImage string + tsProxyImage string + k8sProxyImage string defaultTags []string tsFirewallMode string defaultProxyClass string loginServer string - mu sync.Mutex // protects following - egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge - ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + mu sync.Mutex // protects following + egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge + ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + apiServerProxyGroups set.Slice[types.UID] // for kube-apiserver proxygroups gauge } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { @@ -170,7 +179,6 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG if err != nil { return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } - validateProxyClassForPG(logger, pg, proxyClass) if !tsoperator.ProxyClassIsReady(proxyClass) { msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) logger.Info(msg) @@ -178,6 +186,10 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG } } + if err := r.validate(ctx, pg, proxyClass, logger); err != nil { + return r.notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) + } + staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -192,11 +204,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG return staticEndpoints, nrr, nil } -// validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup. -func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) { - if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { - return - } +func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) error { // Our custom logic for ensuring minimum downtime ProxyGroup update rollouts relies on the local health check // beig accessible on the replica Pod IP:9002. This address can also be modified by users, via // TS_LOCAL_ADDR_PORT env var. @@ -208,13 +216,70 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc // shouldn't need to set their own). // // TODO(irbekrm): maybe disallow configuring this env var in future (in Tailscale 1.84 or later). - if hasLocalAddrPortSet(pc) { + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && hasLocalAddrPortSet(pc) { msg := fmt.Sprintf("ProxyClass %s applied to an egress ProxyGroup has TS_LOCAL_ADDR_PORT env var set to a custom value."+ "This will disable the ProxyGroup graceful failover mechanism, so you might experience downtime when ProxyGroup pods are restarted."+ "In future we will remove the ability to set custom TS_LOCAL_ADDR_PORT for egress ProxyGroups."+ "Please raise an issue if you expect that this will cause issues for your workflow.", pc.Name) logger.Warn(msg) } + + // image is the value of pc.Spec.StatefulSet.Pod.TailscaleContainer.Image or "" + // imagePath is a slash-delimited path ending with the image name, e.g. + // "tailscale/tailscale" or maybe "k8s-proxy" if hosted at example.com/k8s-proxy. + var image, imagePath string + if pc != nil && + pc.Spec.StatefulSet != nil && + pc.Spec.StatefulSet.Pod != nil && + pc.Spec.StatefulSet.Pod.TailscaleContainer != nil && + pc.Spec.StatefulSet.Pod.TailscaleContainer.Image != "" { + image, err := dockerref.ParseNormalizedNamed(pc.Spec.StatefulSet.Pod.TailscaleContainer.Image) + if err != nil { + // Shouldn't be possible as the ProxyClass won't be marked ready + // without successfully parsing the image. + return fmt.Errorf("error parsing %q as a container image reference: %w", pc.Spec.StatefulSet.Pod.TailscaleContainer.Image, err) + } + imagePath = dockerref.Path(image) + } + + var errs []error + if isAuthAPIServerProxy(pg) { + // Validate that the static ServiceAccount already exists. + sa := &corev1.ServiceAccount{} + if err := r.Get(ctx, types.NamespacedName{Namespace: r.tsNamespace, Name: authAPIServerProxySAName}, sa); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error validating that ServiceAccount %q exists: %w", authAPIServerProxySAName, err) + } + + errs = append(errs, fmt.Errorf("the ServiceAccount %q used for the API server proxy in auth mode does not exist but "+ + "should have been created during operator installation; use apiServerProxyConfig.allowImpersonation=true "+ + "in the helm chart, or authproxy-rbac.yaml from the static manifests", authAPIServerProxySAName)) + } + } else { + // Validate that the ServiceAccount we create won't overwrite the static one. + // TODO(tomhjp): This doesn't cover other controllers that could create a + // ServiceAccount. Perhaps should have some guards to ensure that an update + // would never change the ownership of a resource we expect to already be owned. + if pgServiceAccountName(pg) == authAPIServerProxySAName { + errs = append(errs, fmt.Errorf("the name of the ProxyGroup %q conflicts with the static ServiceAccount used for the API server proxy in auth mode", pg.Name)) + } + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + if strings.HasSuffix(imagePath, "tailscale") { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "k8s-proxy", pg.Spec.Type)) + } + + if pc != nil && pc.Spec.StatefulSet != nil && pc.Spec.StatefulSet.Pod != nil && pc.Spec.StatefulSet.Pod.TailscaleInitContainer != nil { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies Tailscale init container config, but ProxyGroups of type %q do not use init containers", pc.Name, pg.Spec.Type)) + } + } else { + if strings.HasSuffix(imagePath, "k8s-proxy") { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "tailscale", pg.Spec.Type)) + } + } + + return errors.Join(errs...) } func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { @@ -263,14 +328,21 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err) } } - sa := pgServiceAccount(pg, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { - s.ObjectMeta.Labels = sa.ObjectMeta.Labels - s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations - s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences - }); err != nil { - return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) + + // auth mode kube-apiserver ProxyGroups use a statically created + // ServiceAccount to keep ClusterRole creation permissions limited to the + // helm chart installer. + if !isAuthAPIServerProxy(pg) { + sa := pgServiceAccount(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + s.ObjectMeta.Labels = sa.ObjectMeta.Labels + s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + }); err != nil { + return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) + } } + role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { r.ObjectMeta.Labels = role.ObjectMeta.Labels @@ -280,6 +352,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro }); err != nil { return r.notReadyErrf(pg, "error provisioning Role: %w", err) } + roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels @@ -290,6 +363,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro }); err != nil { return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err) } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { @@ -300,6 +374,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } + if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { cm := pgIngressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { @@ -309,7 +384,12 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass) + + defaultImage := r.tsProxyImage + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + defaultImage = r.k8sProxyImage + } + ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err) } @@ -371,7 +451,7 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za if len(devices) > 0 { status = metav1.ConditionTrue if len(devices) == desiredReplicas { - reason = reasonProxyGroupReady + reason = reasonProxyGroupAvailable } } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) @@ -702,17 +782,57 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) - if err != nil { - return nil, fmt.Errorf("error creating tailscaled config: %w", err) - } + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + hostname := pgHostname(pg, i) - for cap, cfg := range configs { - cfgJSON, err := json.Marshal(cfg) + if authKey == nil && existingCfgSecret != nil { + deviceAuthed := false + for _, d := range pg.Status.Devices { + if d.Hostname == hostname { + deviceAuthed = true + break + } + } + if !deviceAuthed { + existingCfg := conf.ConfigV1Alpha1{} + if err := json.Unmarshal(existingCfgSecret.Data[kubeAPIServerConfigFile], &existingCfg); err != nil { + return nil, fmt.Errorf("error unmarshalling existing config: %w", err) + } + if existingCfg.AuthKey != nil { + authKey = existingCfg.AuthKey + } + } + } + cfg := conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + Hostname: &hostname, + State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), + App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), + AuthKey: authKey, + KubeAPIServer: &conf.KubeAPIServer{ + AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)), + }, + }, + } + cfgB, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) + } + mak.Set(&cfgSecret.Data, kubeAPIServerConfigFile, cfgB) + } else { + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) if err != nil { - return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + return nil, fmt.Errorf("error creating tailscaled config: %w", err) + } + + for cap, cfg := range configs { + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + } + mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } - mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } if existingCfgSecret != nil { @@ -834,9 +954,12 @@ func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGr r.egressProxyGroups.Add(pg.UID) case tsapi.ProxyGroupTypeIngress: r.ingressProxyGroups.Add(pg.UID) + case tsapi.ProxyGroupTypeKubernetesAPIServer: + r.apiServerProxyGroups.Add(pg.UID) } gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) + gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) } // ensureRemovedFromGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the @@ -847,9 +970,12 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro r.egressProxyGroups.Remove(pg.UID) case tsapi.ProxyGroupTypeIngress: r.ingressProxyGroups.Remove(pg.UID) + case tsapi.ProxyGroupTypeKubernetesAPIServer: + r.apiServerProxyGroups.Remove(pg.UID) } gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) + gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) } func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string, loginServer string) (tailscaledConfigs, error) { @@ -858,7 +984,7 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a AcceptDNS: "false", AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + Hostname: ptr.To(pgHostname(pg, idx)), AdvertiseServices: oldAdvertiseServices, AuthKey: authKey, } @@ -867,10 +993,6 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a conf.ServerURL = &loginServer } - if pg.Spec.HostnamePrefix != "" { - conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) - } - if shouldAcceptRoutes(pc) { conf.AcceptRoutes = "true" } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 50d9c2d5f..5d6d0b8ef 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -7,6 +7,7 @@ package main import ( "fmt" + "path/filepath" "slices" "strconv" "strings" @@ -28,6 +29,9 @@ const ( // deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. deletionGracePeriodSeconds int64 = 360 staticEndpointPortName = "static-endpoint-port" + // authAPIServerProxySAName is the ServiceAccount deployed by the helm chart + // if apiServerProxy.authEnabled is true. + authAPIServerProxySAName = "kube-apiserver-auth-proxy" ) func pgNodePortServiceName(proxyGroupName string, replica int32) string { @@ -61,6 +65,9 @@ func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *cor // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + return kubeAPIServerStatefulSet(pg, namespace, image) + } ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -167,6 +174,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Value: "$(POD_NAME)", }, { + // TODO(tomhjp): This is tsrecorder-specific and does nothing. Delete. Name: "TS_STATE", Value: "kube:$(POD_NAME)", }, @@ -264,9 +272,124 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string // gracefully. ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds) } + return ss, nil } +func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*appsv1.StatefulSet, error) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(pgReplicas(pg)), + Selector: &metav1.LabelSelector{ + MatchLabels: pgLabels(pg.Name, nil), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + DeletionGracePeriodSeconds: ptr.To[int64](10), + }, + Spec: corev1.PodSpec{ + ServiceAccountName: pgServiceAccountName(pg), + Containers: []corev1.Container{ + { + Name: mainContainerName, + Image: image, + Env: []corev1.EnvVar{ + { + // Used as default hostname and in Secret names. + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + // Used by kubeclient to post Events about the Pod's lifecycle. + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + { + // Used in an interpolated env var if metrics enabled. + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + { + // Included for completeness with POD_IP and easier backwards compatibility in future. + Name: "POD_IPS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, + }, + }, + { + Name: "TS_K8S_PROXY_CONFIG", + Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), + }, + }, + VolumeMounts: func() []corev1.VolumeMount { + var mounts []corev1.VolumeMount + + // TODO(tomhjp): Read config directly from the Secret instead. + for i := range pgReplicas(pg) { + mounts = append(mounts, corev1.VolumeMount{ + Name: fmt.Sprintf("k8s-proxy-config-%d", i), + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), + }) + } + + return mounts + }(), + Ports: []corev1.ContainerPort{ + { + Name: "k8s-proxy", + ContainerPort: 443, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Volumes: func() []corev1.Volume { + var volumes []corev1.Volume + for i := range pgReplicas(pg) { + volumes = append(volumes, corev1.Volume{ + Name: fmt.Sprintf("k8s-proxy-config-%d", i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: pgConfigSecretName(pg.Name, i), + }, + }, + }) + } + + return volumes + }(), + }, + }, + }, + } + + return sts, nil +} + func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -305,8 +428,8 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { ResourceNames: func() (secrets []string) { for i := range pgReplicas(pg) { secrets = append(secrets, - pgConfigSecretName(pg.Name, i), // Config with auth key. - fmt.Sprintf("%s-%d", pg.Name, i), // State. + pgConfigSecretName(pg.Name, i), // Config with auth key. + pgPodName(pg.Name, i), // State. ) } return secrets @@ -336,7 +459,7 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: pg.Name, + Name: pgServiceAccountName(pg), Namespace: namespace, }, }, @@ -347,6 +470,27 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { } } +// kube-apiserver proxies in auth mode use a static ServiceAccount. Everything +// else uses a per-ProxyGroup ServiceAccount. +func pgServiceAccountName(pg *tsapi.ProxyGroup) string { + if isAuthAPIServerProxy(pg) { + return authAPIServerProxySAName + } + + return pg.Name +} + +func isAuthAPIServerProxy(pg *tsapi.ProxyGroup) bool { + if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer { + return false + } + + // The default is auth mode. + return pg.Spec.KubeAPIServer == nil || + pg.Spec.KubeAPIServer.Mode == nil || + *pg.Spec.KubeAPIServer.Mode == tsapi.APIServerProxyModeAuth +} + func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) { for i := range pgReplicas(pg) { secrets = append(secrets, &corev1.Secret{ @@ -418,6 +562,18 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { return 2 } +func pgPodName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d", pgName, i) +} + +func pgHostname(pg *tsapi.ProxyGroup, i int32) string { + if pg.Spec.HostnamePrefix != "" { + return fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, i) + } + + return fmt.Sprintf("%s-%d", pg.Name, i) +} + func pgConfigSecretName(pgName string, i int32) string { return fmt.Sprintf("%s-%d-config", pgName, i) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index bd69b49a8..c58e427aa 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -629,7 +629,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", @@ -772,7 +772,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { t.Run("delete_and_cleanup", func(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", @@ -832,7 +832,7 @@ func TestProxyGroup(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", @@ -915,7 +915,7 @@ func TestProxyGroup(t *testing.T) { }, } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -934,7 +934,7 @@ func TestProxyGroup(t *testing.T) { addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", TailnetIPs: []string{"1.2.3.4", "::1"}, @@ -952,7 +952,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -1025,12 +1025,12 @@ func TestProxyGroupTypes(t *testing.T) { zl, _ := zap.NewDevelopment() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - Client: fc, - l: zl.Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zl.Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), } t.Run("egress_type", func(t *testing.T) { @@ -1047,7 +1047,7 @@ func TestProxyGroupTypes(t *testing.T) { mustCreate(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 0, 1) + verifyProxyGroupCounts(t, reconciler, 0, 1, 0) sts := &appsv1.StatefulSet{} if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { @@ -1161,7 +1161,7 @@ func TestProxyGroupTypes(t *testing.T) { } expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 1, 2) + verifyProxyGroupCounts(t, reconciler, 1, 2, 0) sts := &appsv1.StatefulSet{} if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { @@ -1198,6 +1198,44 @@ func TestProxyGroupTypes(t *testing.T) { t.Errorf("unexpected volume mounts (-want +got):\n%s", diff) } }) + + t.Run("kubernetes_api_server_type", func(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: ptr.To[int32](2), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + }, + }, + } + if err := fc.Create(t.Context(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + verifyProxyGroupCounts(t, reconciler, 1, 2, 1) + + sts := &appsv1.StatefulSet{} + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + // Verify the StatefulSet configuration for KubernetesAPIServer type. + if sts.Spec.Template.Spec.Containers[0].Name != mainContainerName { + t.Errorf("unexpected container name %s, want %s", sts.Spec.Template.Spec.Containers[0].Name, mainContainerName) + } + if sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort != 443 { + t.Errorf("unexpected container port %d, want 443", sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort) + } + if sts.Spec.Template.Spec.Containers[0].Ports[0].Name != "k8s-proxy" { + t.Errorf("unexpected port name %s, want k8s-proxy", sts.Spec.Template.Spec.Containers[0].Ports[0].Name) + } + }) } func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { @@ -1206,12 +1244,12 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { WithStatusSubresource(&tsapi.ProxyGroup{}). Build() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), } existingServices := []string{"svc1", "svc2"} @@ -1272,6 +1310,170 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { }) } +func TestValidateProxyGroup(t *testing.T) { + type testCase struct { + typ tsapi.ProxyGroupType + pgName string + image string + noauth bool + initContainer bool + staticSAExists bool + expectedErrs int + } + + for name, tc := range map[string]testCase{ + "default_ingress": { + typ: tsapi.ProxyGroupTypeIngress, + }, + "default_kube": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + }, + "default_kube_noauth": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + noauth: true, + // Does not require the static ServiceAccount to exist. + }, + "kube_static_sa_missing": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: false, + expectedErrs: 1, + }, + "kube_noauth_would_overwrite_static_sa": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + noauth: true, + pgName: authAPIServerProxySAName, + expectedErrs: 1, + }, + "ingress_would_overwrite_static_sa": { + typ: tsapi.ProxyGroupTypeIngress, + staticSAExists: true, + pgName: authAPIServerProxySAName, + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_1": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale/tailscale", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_2": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_3": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale/tailscale:latest", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_4": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "tailscale/tailscale", + expectedErrs: 1, + }, + "k8s_proxy_image_for_ingress_pg": { + typ: tsapi.ProxyGroupTypeIngress, + image: "example.com/k8s-proxy", + expectedErrs: 1, + }, + "init_container_for_kube_pg": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + initContainer: true, + expectedErrs: 1, + }, + "init_container_for_ingress_pg": { + typ: tsapi.ProxyGroupTypeIngress, + initContainer: true, + }, + "init_container_for_egress_pg": { + typ: tsapi.ProxyGroupTypeEgress, + initContainer: true, + }, + } { + t.Run(name, func(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{}, + }, + }, + } + if tc.image != "" { + pc.Spec.StatefulSet.Pod.TailscaleContainer = &tsapi.Container{ + Image: tc.image, + } + } + if tc.initContainer { + pc.Spec.StatefulSet.Pod.TailscaleInitContainer = &tsapi.Container{} + } + pgName := "some-pg" + if tc.pgName != "" { + pgName = tc.pgName + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tc.typ, + }, + } + if tc.noauth { + pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + } + } + + var objs []client.Object + if tc.staticSAExists { + objs = append(objs, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: authAPIServerProxySAName, + Namespace: tsNamespace, + }, + }) + } + r := ProxyGroupReconciler{ + tsNamespace: tsNamespace, + Client: fake.NewClientBuilder(). + WithObjects(objs...). + Build(), + } + + logger, _ := zap.NewDevelopment() + err := r.validate(t.Context(), pg, pc, logger.Sugar()) + if tc.expectedErrs == 0 { + if err != nil { + t.Fatalf("expected no errors, got: %v", err) + } + // Test finished. + return + } + + if err == nil { + t.Fatalf("expected %d errors, got none", tc.expectedErrs) + } + + type unwrapper interface { + Unwrap() []error + } + errs := err.(unwrapper) + if len(errs.Unwrap()) != tc.expectedErrs { + t.Fatalf("expected %d errors, got %d: %v", tc.expectedErrs, len(errs.Unwrap()), err) + } + }) + } +} + func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) { pcLEStaging := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ @@ -1326,7 +1528,7 @@ func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name s return pc } -func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { +func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress, wantAPIServer int) { t.Helper() if r.ingressProxyGroups.Len() != wantIngress { t.Errorf("expected %d ingress proxy groups, got %d", wantIngress, r.ingressProxyGroups.Len()) @@ -1334,6 +1536,9 @@ func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, if r.egressProxyGroups.Len() != wantEgress { t.Errorf("expected %d egress proxy groups, got %d", wantEgress, r.egressProxyGroups.Len()) } + if r.apiServerProxyGroups.Len() != wantAPIServer { + t.Errorf("expected %d kube-apiserver proxy groups, got %d", wantAPIServer, r.apiServerProxyGroups.Len()) + } } func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue string) { @@ -1512,7 +1717,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test"}, defaultProxyClass: tt.defaultProxyClass, Client: fc, diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 193acad87..fbb271800 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -102,6 +102,8 @@ const ( defaultLocalAddrPort = 9002 // metrics and health check port letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory" + + mainContainerName = "tailscale" ) var ( @@ -761,7 +763,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, } if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) { for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ Name: "TS_DEBUG_ACME_DIRECTORY_URL", Value: letsEncryptStagingEndpoint, @@ -829,7 +831,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, return base } for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { ss.Spec.Template.Spec.Containers[i] = updateContainer(wantsPod.TailscaleContainer, ss.Spec.Template.Spec.Containers[i]) break } @@ -847,7 +849,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { if debug { ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, // Serve tailscaled's debug metrics on on @@ -902,6 +904,10 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { } } +func isMainContainer(c *corev1.Container) bool { + return c.Name == mainContainerName +} + // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 9846513c7..4247eaaa0 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -60,7 +60,6 @@ type HAServiceReconciler struct { recorder record.EventRecorder logger *zap.SugaredLogger tsClient tsClient - tsnetServer tsnetServer tsNamespace string lc localClient defaultTags []string @@ -221,7 +220,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // This checks and ensures that Tailscale Service's owner references are updated // for this Service and errors if that is not possible (i.e. because it // appears that the Tailscale Service has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) + updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc) if err != nil { instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname) msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) @@ -395,7 +394,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) // 1. Clean up the Tailscale Service. - svcChanged, err = r.cleanupTailscaleService(ctx, serviceName, logger) + svcChanged, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } @@ -456,7 +455,7 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - svcsChanged, err = r.cleanupTailscaleService(ctx, tailcfg.ServiceName(tsSvcName), logger) + svcsChanged, err = cleanupTailscaleService(ctx, r.tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } @@ -529,8 +528,8 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { - svc, err := r.tsClient.GetVIPService(ctx, name) +func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { + svc, err := tsClient.GetVIPService(ctx, name) if isErrorFeatureFlagNotEnabled(err) { msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) logger.Warn(msg) @@ -563,14 +562,14 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name // cluster before deleting the Ingress. Perhaps the comparison could be // 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'. ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { - return or.OperatorID == r.operatorID + return or.OperatorID == operatorID }) if ix == -1 { return false, nil } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", name) - return false, r.tsClient.DeleteVIPService(ctx, name) + return false, tsClient.DeleteVIPService(ctx, name) } o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) logger.Infof("Updating Tailscale Service %q", name) @@ -579,7 +578,7 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) - return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) + return true, tsClient.CreateOrUpdateVIPService(ctx, svc) } func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { @@ -742,49 +741,6 @@ func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName return count, nil } -// ownerAnnotations returns the updated annotations required to ensure this -// instance of the operator is included as an owner. If the Tailscale Service is not -// nil, but does not contain an owner we return an error as this likely means -// that the Tailscale Service was created by something other than a Tailscale -// Kubernetes operator. -func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { - ref := OwnerRef{ - OperatorID: r.operatorID, - } - if svc == nil { - c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} - json, err := json.Marshal(c) - if err != nil { - return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service owner annotation contents: %w, please report this", err) - } - return map[string]string{ - ownerAnnotation: string(json), - }, nil - } - o, err := parseOwnerAnnotation(svc) - if err != nil { - return nil, err - } - if o == nil || len(o.OwnerRefs) == 0 { - return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) - } - if slices.Contains(o.OwnerRefs, ref) { // up to date - return svc.Annotations, nil - } - o.OwnerRefs = append(o.OwnerRefs, ref) - json, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshalling updated owner references: %w", err) - } - - newAnnots := make(map[string]string, len(svc.Annotations)+1) - for k, v := range svc.Annotations { - newAnnots[k] = v - } - newAnnots[ownerAnnotation] = string(json) - return newAnnots, nil -} - // dnsNameForService returns the DNS name for the given Tailscale Service name. func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { s := svc.WithoutPrefix() diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index e08bfd80d..054c3ed49 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -187,7 +187,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien if err := fc.Status().Update(context.Background(), pg); err != nil { t.Fatal(err) } - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} ft := &fakeTSClient{} zl, err := zap.NewDevelopment() @@ -210,7 +209,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien clock: cl, defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", - tsnetServer: fakeTsnetServer, logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), lc: lc, diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go new file mode 100644 index 000000000..6e7eadb73 --- /dev/null +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -0,0 +1,197 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// k8s-proxy proxies between tailnet and Kubernetes cluster traffic. +// Currently, it only supports proxying tailnet clients to the Kubernetes API +// server. +package main + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "tailscale.com/hostinfo" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + apiproxy "tailscale.com/k8s-operator/api-proxy" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/state" + "tailscale.com/tsnet" +) + +func main() { + logger := zap.Must(zap.NewProduction()).Sugar() + defer logger.Sync() + if err := run(logger); err != nil { + logger.Fatal(err.Error()) + } +} + +func run(logger *zap.SugaredLogger) error { + var ( + configFile = os.Getenv("TS_K8S_PROXY_CONFIG") + podUID = os.Getenv("POD_UID") + ) + if configFile == "" { + return errors.New("TS_K8S_PROXY_CONFIG unset") + } + + // TODO(tomhjp): Support reloading config. + // TODO(tomhjp): Support reading config from a Secret. + cfg, err := conf.Load(configFile) + if err != nil { + return fmt.Errorf("error loading config file %q: %w", configFile, err) + } + + if cfg.Parsed.LogLevel != nil { + level, err := zapcore.ParseLevel(*cfg.Parsed.LogLevel) + if err != nil { + return fmt.Errorf("error parsing log level %q: %w", *cfg.Parsed.LogLevel, err) + } + logger = logger.WithOptions(zap.IncreaseLevel(level)) + } + + if cfg.Parsed.App != nil { + hostinfo.SetApp(*cfg.Parsed.App) + } + + st, err := getStateStore(cfg.Parsed.State, logger) + if err != nil { + return err + } + + // If Pod UID unset, assume we're running outside of a cluster/not managed + // by the operator, so no need to set additional state keys. + if podUID != "" { + if err := state.SetInitialKeys(st, podUID); err != nil { + return fmt.Errorf("error setting initial state: %w", err) + } + } + + var authKey string + if cfg.Parsed.AuthKey != nil { + authKey = *cfg.Parsed.AuthKey + } + + ts := &tsnet.Server{ + Logf: logger.Named("tsnet").Debugf, + UserLogf: logger.Named("tsnet").Infof, + Store: st, + AuthKey: authKey, + } + if cfg.Parsed.Hostname != nil { + ts.Hostname = *cfg.Parsed.Hostname + } + + // ctx to live for the lifetime of the process. + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + // Make sure we crash loop if Up doesn't complete in reasonable time. + upCtx, upCancel := context.WithTimeout(ctx, time.Minute) + defer upCancel() + if _, err := ts.Up(upCtx); err != nil { + return fmt.Errorf("error starting tailscale server: %w", err) + } + defer ts.Close() + + group, groupCtx := errgroup.WithContext(ctx) + + // Setup for updating state keys. + if podUID != "" { + lc, err := ts.LocalClient() + if err != nil { + return fmt.Errorf("error getting local client: %w", err) + } + w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + group.Go(func() error { + if err := state.KeepKeysUpdated(st, w.Next); err != nil && err != groupCtx.Err() { + return fmt.Errorf("error keeping state keys updated: %w", err) + } + + return nil + }) + } + + // Setup for the API server proxy. + restConfig, err := getRestConfig(logger) + if err != nil { + return fmt.Errorf("error getting rest config: %w", err) + } + authMode := true + if cfg.Parsed.KubeAPIServer != nil { + v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get() + if ok { + authMode = v + } + } + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode) + if err != nil { + return fmt.Errorf("error creating api server proxy: %w", err) + } + + // TODO(tomhjp): Work out whether we should use TS_CERT_SHARE_MODE or not, + // and possibly issue certs upfront here before serving. + group.Go(func() error { + if err := ap.Run(groupCtx); err != nil { + return fmt.Errorf("error running API server proxy: %w", err) + } + + return nil + }) + + return group.Wait() +} + +func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { + p := "mem:" + if path != nil { + p = *path + } else { + logger.Warn("No state Secret provided; using in-memory store, which will lose state on restart") + } + st, err := store.New(logger.Errorf, p) + if err != nil { + return nil, fmt.Errorf("error creating state store: %w", err) + } + + return st, nil +} + +func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) { + restConfig, err := rest.InClusterConfig() + switch err { + case nil: + return restConfig, nil + case rest.ErrNotInCluster: + logger.Info("Not running in-cluster, falling back to kubeconfig") + default: + return nil, fmt.Errorf("error getting in-cluster config: %w", err) + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, nil) + restConfig, err = clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("error loading kubeconfig: %w", err) + } + + return restConfig, nil +} diff --git a/k8s-operator/api-proxy/env.go b/k8s-operator/api-proxy/env.go deleted file mode 100644 index c0640ab1e..000000000 --- a/k8s-operator/api-proxy/env.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !plan9 - -package apiproxy - -import ( - "os" - - "tailscale.com/types/opt" -) - -func defaultBool(envName string, defVal bool) bool { - vs := os.Getenv(envName) - if vs == "" { - return defVal - } - v, _ := opt.Bool(vs).Get() - return v -} - -func defaultEnv(envName, defVal string) string { - v := os.Getenv(envName) - if v == "" { - return defVal - } - return v -} diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index 7c7260b94..c3c13e784 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -6,17 +6,17 @@ package apiproxy import ( + "context" "crypto/tls" + "errors" "fmt" - "log" "net/http" "net/http/httputil" "net/netip" "net/url" - "os" "strings" + "time" - "github.com/pkg/errors" "go.uber.org/zap" "k8s.io/client-go/rest" "k8s.io/client-go/transport" @@ -37,123 +37,52 @@ var ( whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) -type APIServerProxyMode int - -func (a APIServerProxyMode) String() string { - switch a { - case APIServerProxyModeDisabled: - return "disabled" - case APIServerProxyModeEnabled: - return "auth" - case APIServerProxyModeNoAuth: - return "noauth" - default: - return "unknown" - } -} - -const ( - APIServerProxyModeDisabled APIServerProxyMode = iota - APIServerProxyModeEnabled - APIServerProxyModeNoAuth -) - -func ParseAPIProxyMode() APIServerProxyMode { - haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" - haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" - switch { - case haveAPIProxyEnv && haveAuthProxyEnv: - log.Fatal("AUTH_PROXY and APISERVER_PROXY are mutually exclusive") - case haveAuthProxyEnv: - var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated - if authProxyEnv { - return APIServerProxyModeEnabled - } - return APIServerProxyModeDisabled - case haveAPIProxyEnv: - var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" - switch apiProxyEnv { - case "true": - return APIServerProxyModeEnabled - case "false", "": - return APIServerProxyModeDisabled - case "noauth": - return APIServerProxyModeNoAuth - default: - panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) - } - } - return APIServerProxyModeDisabled -} - -// maybeLaunchAPIServerProxy launches the auth proxy, which is a small HTTP server -// that authenticates requests using the Tailscale LocalAPI and then proxies -// them to the kube-apiserver. -func MaybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, s *tsnet.Server, mode APIServerProxyMode) { - if mode == APIServerProxyModeDisabled { - return - } - startlog := zlog.Named("launchAPIProxy") - if mode == APIServerProxyModeNoAuth { +// NewAPIServerProxy creates a new APIServerProxy that's ready to start once Run +// is called. No network traffic will flow until Run is called. +// +// authMode controls how the proxy behaves: +// - true: the proxy is started and requests are impersonated using the +// caller's Tailscale identity and the rules defined in the tailnet ACLs. +// - false: the proxy is started and requests are passed through to the +// Kubernetes API without any auth modifications. +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool) (*APIServerProxy, error) { + if !authMode { restConfig = rest.AnonymousClientConfig(restConfig) } cfg, err := restConfig.TransportConfig() if err != nil { - startlog.Fatalf("could not get rest.TransportConfig(): %v", err) + return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) } - // Kubernetes uses SPDY for exec and port-forward, however SPDY is - // incompatible with HTTP/2; so disable HTTP/2 in the proxy. tr := http.DefaultTransport.(*http.Transport).Clone() tr.TLSClientConfig, err = transport.TLSConfigFor(cfg) if err != nil { - startlog.Fatalf("could not get transport.TLSConfigFor(): %v", err) + return nil, fmt.Errorf("could not get transport.TLSConfigFor(): %w", err) } tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) rt, err := transport.HTTPWrappersForConfig(cfg, tr) if err != nil { - startlog.Fatalf("could not get rest.TransportConfig(): %v", err) + return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) } - go runAPIServerProxy(s, rt, zlog.Named("apiserver-proxy"), mode, restConfig.Host) -} -// runAPIServerProxy runs an HTTP server that authenticates requests using the -// Tailscale LocalAPI and then proxies them to the Kubernetes API. -// It listens on :443 and uses the Tailscale HTTPS certificate. -// s will be started if it is not already running. -// rt is used to proxy requests to the Kubernetes API. -// -// mode controls how the proxy behaves: -// - apiserverProxyModeDisabled: the proxy is not started. -// - apiserverProxyModeEnabled: the proxy is started and requests are impersonated using the -// caller's identity from the Tailscale LocalAPI. -// - apiserverProxyModeNoAuth: the proxy is started and requests are not impersonated and -// are passed through to the Kubernetes API. -// -// It never returns. -func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode APIServerProxyMode, host string) { - if mode == APIServerProxyModeDisabled { - return - } - ln, err := ts.Listen("tcp", ":443") + u, err := url.Parse(restConfig.Host) if err != nil { - log.Fatalf("could not listen on :443: %v", err) + return nil, fmt.Errorf("failed to parse URL %w", err) } - u, err := url.Parse(host) - if err != nil { - log.Fatalf("runAPIServerProxy: failed to parse URL %v", err) + if u.Scheme == "" || u.Host == "" { + return nil, fmt.Errorf("the API server proxy requires host and scheme but got: %q", restConfig.Host) } lc, err := ts.LocalClient() if err != nil { - log.Fatalf("could not get local client: %v", err) + return nil, fmt.Errorf("could not get local client: %w", err) } - ap := &apiserverProxy{ - log: log, + ap := &APIServerProxy{ + log: zlog, lc: lc, - mode: mode, + authMode: authMode, upstreamURL: u, ts: ts, } @@ -164,41 +93,69 @@ func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredL Transport: rt, } + return ap, nil +} + +// Run starts the HTTP server that authenticates requests using the +// Tailscale LocalAPI and then proxies them to the Kubernetes API. +// It listens on :443 and uses the Tailscale HTTPS certificate. +// +// It return when ctx is cancelled or ServeTLS fails. +func (ap *APIServerProxy) Run(ctx context.Context) error { + ln, err := ap.ts.Listen("tcp", ":443") + if err != nil { + return fmt.Errorf("could not listen on :443: %v", err) + } + mux := http.NewServeMux() mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecWS) - hs := &http.Server{ + ap.hs = &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is // incompatible with HTTP/2; so disable HTTP/2 in the proxy. TLSConfig: &tls.Config{ - GetCertificate: lc.GetCertificate, + GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, }, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), Handler: mux, } - log.Infof("API server proxy in %q mode is listening on %s", mode, ln.Addr()) - if err := hs.ServeTLS(ln, "", ""); err != nil { - log.Fatalf("runAPIServerProxy: failed to serve %v", err) + + errs := make(chan error) + go func() { + ap.log.Infof("API server proxy is listening on %s with auth mode: %v", ln.Addr(), ap.authMode) + if err := ap.hs.ServeTLS(ln, "", ""); err != nil && err != http.ErrServerClosed { + errs <- fmt.Errorf("failed to serve: %w", err) + } + }() + + select { + case <-ctx.Done(): + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return ap.hs.Shutdown(shutdownCtx) + case err := <-errs: + return err } } -// apiserverProxy is an [net/http.Handler] that authenticates requests using the Tailscale +// APIServerProxy is an [net/http.Handler] that authenticates requests using the Tailscale // LocalAPI and then proxies them to the Kubernetes API. -type apiserverProxy struct { +type APIServerProxy struct { log *zap.SugaredLogger lc *local.Client rp *httputil.ReverseProxy - mode APIServerProxyMode + authMode bool ts *tsnet.Server + hs *http.Server upstreamURL *url.URL } // serveDefault is the default handler for Kubernetes API server requests. -func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { who, err := ap.whoIs(r) if err != nil { ap.authError(w, err) @@ -210,17 +167,17 @@ func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { // serveExecSPDY serves 'kubectl exec' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. -func (ap *apiserverProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { ap.execForProto(w, r, ksr.SPDYProtocol) } // serveExecWS serves 'kubectl exec' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. -func (ap *apiserverProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { ap.execForProto(w, r, ksr.WSProtocol) } -func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { +func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -282,10 +239,10 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { - r.URL.Scheme = h.upstreamURL.Scheme - r.URL.Host = h.upstreamURL.Host - if h.mode == APIServerProxyModeNoAuth { +func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { + r.URL.Scheme = ap.upstreamURL.Scheme + r.URL.Host = ap.upstreamURL.Host + if !ap.authMode { // If we are not providing authentication, then we are just // proxying to the Kubernetes API, so we don't need to do // anything else. @@ -310,16 +267,16 @@ func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { } // Now add the impersonation headers that we want. - if err := addImpersonationHeaders(r, h.log); err != nil { - log.Print("failed to add impersonation headers: ", err.Error()) + if err := addImpersonationHeaders(r, ap.log); err != nil { + ap.log.Errorf("failed to add impersonation headers: %v", err) } } -func (ap *apiserverProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { +func (ap *APIServerProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { return ap.lc.WhoIs(r.Context(), r.RemoteAddr) } -func (ap *apiserverProxy) authError(w http.ResponseWriter, err error) { +func (ap *APIServerProxy) authError(w http.ResponseWriter, err error) { ap.log.Errorf("failed to authenticate caller: %v", err) http.Error(w, "failed to authenticate caller", http.StatusInternalServerError) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 18bf1cb50..c09152da6 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -21,6 +21,21 @@ +#### APIServerProxyMode + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [auth noauth] +- Type: string + +_Appears in:_ +- [KubeAPIServerConfig](#kubeapiserverconfig) + + + #### AppConnector @@ -142,7 +157,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `env` _[Env](#env) array_ | List of environment variables to set in the container.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
                Note that environment variables provided here will take precedence
                over Tailscale-specific environment variables set by the operator,
                however running proxies with custom values for Tailscale environment
                variables (i.e TS_USERSPACE) is not recommended and might break in
                the future. | | | -| `image` _string_ | Container image name. By default images are pulled from
                docker.io/tailscale/tailscale, but the official images are also
                available at ghcr.io/tailscale/tailscale. Specifying image name here
                will override any proxy image values specified via the Kubernetes
                operator's Helm chart values or PROXY_IMAGE env var in the operator
                Deployment.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | +| `image` _string_ | Container image name. By default images are pulled from docker.io/tailscale,
                but the official images are also available at ghcr.io/tailscale.
                For all uses except on ProxyGroups of type "kube-apiserver", this image must
                be either tailscale/tailscale, or an equivalent mirror of that image.
                To apply to ProxyGroups of type "kube-apiserver", this image must be
                tailscale/k8s-proxy or a mirror of that image.
                For "tailscale/tailscale"-based proxies, specifying image name here will
                override any proxy image values specified via the Kubernetes operator's
                Helm chart values or PROXY_IMAGE env var in the operator Deployment.
                For "tailscale/k8s-proxy"-based proxies, there is currently no way to
                configure your own default, and this field is the only way to use a
                custom image.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
                | | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
                By default Tailscale Kubernetes operator does not apply any resource
                requirements. The amount of resources required wil depend on the
                amount of resources the operator needs to parse, usage patterns and
                cluster size.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
                Security context specified here will override the security context set by the operator.
                By default the operator sets the Tailscale container and the Tailscale init container to privileged
                for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
                You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
                installing device plugin in your cluster and configuring the proxies tun device to be created
                by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | @@ -313,6 +328,22 @@ _Appears in:_ +#### KubeAPIServerConfig + + + +KubeAPIServerConfig contains configuration specific to the kube-apiserver ProxyGroup type. + + + +_Appears in:_ +- [ProxyGroupSpec](#proxygroupspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `mode` _[APIServerProxyMode](#apiserverproxymode)_ | Mode to run the API server proxy in. Supported modes are auth and noauth.
                In auth mode, requests from the tailnet proxied over to the Kubernetes
                API server are additionally impersonated using the sender's tailnet identity.
                If not specified, defaults to auth mode. | | Enum: [auth noauth]
                Type: string
                | + + #### LabelValue _Underlying type:_ _string_ @@ -459,7 +490,7 @@ _Appears in:_ | `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the proxy Pod.
                Any annotations specified here will be merged with the default
                annotations applied to the Pod by the Tailscale Kubernetes operator.
                Annotations must be valid Kubernetes annotations.
                https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | | `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Proxy Pod's affinity rules.
                By default, the Tailscale Kubernetes operator does not apply any affinity rules.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | | | `tailscaleContainer` _[Container](#container)_ | Configuration for the proxy container running tailscale. | | | -| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding. | | | +| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding.
                Not valid to apply to ProxyGroups of type "kube-apiserver". | | | | `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Proxy Pod's security context.
                By default Tailscale Kubernetes operator does not apply any Pod
                security context.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | | | `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Proxy Pod's image pull Secrets.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | | | `nodeName` _string_ | Proxy Pod's node name.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | @@ -638,11 +669,12 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
                Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
                Type: string
                | +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver.
                Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress kube-apiserver]
                Type: string
                | | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
                If you specify custom tags here, make sure you also make the operator
                an owner of these tags.
                See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
                Tags cannot be changed once a ProxyGroup device has been created.
                Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
                Type: string
                | | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
                Defaults to 2. | | Minimum: 0
                | | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
                by the ProxyGroup. Each device will have the integer number from its
                StatefulSet pod appended to this prefix to form the full hostname.
                HostnamePrefix can contain lower case letters, numbers and dashes, it
                must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
                Type: string
                | | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
                configuration options that should be applied to the resources created
                for this ProxyGroup. If unset, and there is no default ProxyClass
                configured, the operator will create resources with the default
                configuration. | | | +| `kubeAPIServer` _[KubeAPIServerConfig](#kubeapiserverconfig)_ | KubeAPIServer contains configuration specific to the kube-apiserver
                ProxyGroup type. This field is only used when Type is set to "kube-apiserver". | | | #### ProxyGroupStatus @@ -669,7 +701,7 @@ _Underlying type:_ _string_ _Validation:_ -- Enum: [egress ingress] +- Enum: [egress ingress kube-apiserver] - Type: string _Appears in:_ diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 9221c60f3..6a4114bfa 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -264,6 +264,7 @@ type Pod struct { // +optional TailscaleContainer *Container `json:"tailscaleContainer,omitempty"` // Configuration for the proxy init container that enables forwarding. + // Not valid to apply to ProxyGroups of type "kube-apiserver". // +optional TailscaleInitContainer *Container `json:"tailscaleInitContainer,omitempty"` // Proxy Pod's security context. @@ -364,12 +365,21 @@ type Container struct { // the future. // +optional Env []Env `json:"env,omitempty"` - // Container image name. By default images are pulled from - // docker.io/tailscale/tailscale, but the official images are also - // available at ghcr.io/tailscale/tailscale. Specifying image name here - // will override any proxy image values specified via the Kubernetes - // operator's Helm chart values or PROXY_IMAGE env var in the operator - // Deployment. + // Container image name. By default images are pulled from docker.io/tailscale, + // but the official images are also available at ghcr.io/tailscale. + // + // For all uses except on ProxyGroups of type "kube-apiserver", this image must + // be either tailscale/tailscale, or an equivalent mirror of that image. + // To apply to ProxyGroups of type "kube-apiserver", this image must be + // tailscale/k8s-proxy or a mirror of that image. + // + // For "tailscale/tailscale"-based proxies, specifying image name here will + // override any proxy image values specified via the Kubernetes operator's + // Helm chart values or PROXY_IMAGE env var in the operator Deployment. + // For "tailscale/k8s-proxy"-based proxies, there is currently no way to + // configure your own default, and this field is the only way to use a + // custom image. + // // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image // +optional Image string `json:"image,omitempty"` diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 5edb47f0d..ad5b11361 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -49,7 +49,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Supported types are egress and ingress. + // Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` @@ -84,6 +84,11 @@ type ProxyGroupSpec struct { // configuration. // +optional ProxyClass string `json:"proxyClass,omitempty"` + + // KubeAPIServer contains configuration specific to the kube-apiserver + // ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + // +optional + KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty"` } type ProxyGroupStatus struct { @@ -122,14 +127,34 @@ type TailnetDevice struct { } // +kubebuilder:validation:Type=string -// +kubebuilder:validation:Enum=egress;ingress +// +kubebuilder:validation:Enum=egress;ingress;kube-apiserver type ProxyGroupType string const ( - ProxyGroupTypeEgress ProxyGroupType = "egress" - ProxyGroupTypeIngress ProxyGroupType = "ingress" + ProxyGroupTypeEgress ProxyGroupType = "egress" + ProxyGroupTypeIngress ProxyGroupType = "ingress" + ProxyGroupTypeKubernetesAPIServer ProxyGroupType = "kube-apiserver" +) + +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:Enum=auth;noauth +type APIServerProxyMode string + +const ( + APIServerProxyModeAuth APIServerProxyMode = "auth" + APIServerProxyModeNoAuth APIServerProxyMode = "noauth" ) // +kubebuilder:validation:Type=string // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-]{0,61}$` type HostnamePrefix string + +// KubeAPIServerConfig contains configuration specific to the kube-apiserver ProxyGroup type. +type KubeAPIServerConfig struct { + // Mode to run the API server proxy in. Supported modes are auth and noauth. + // In auth mode, requests from the tailnet proxied over to the Kubernetes + // API server are additionally impersonated using the sender's tailnet identity. + // If not specified, defaults to auth mode. + // +optional + Mode *APIServerProxyMode `json:"mode,omitempty"` +} diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index ffc04d3b9..32adbd680 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -316,6 +316,26 @@ func (in *Env) DeepCopy() *Env { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(APIServerProxyMode) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Labels) DeepCopyInto(out *Labels) { { @@ -731,6 +751,11 @@ func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { *out = new(int32) **out = **in } + if in.KubeAPIServer != nil { + in, out := &in.KubeAPIServer, &out.KubeAPIServer + *out = new(KubeAPIServerConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupSpec. diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go new file mode 100644 index 000000000..6b0e853c5 --- /dev/null +++ b/kube/k8s-proxy/conf/conf.go @@ -0,0 +1,101 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package conf contains code to load, manipulate, and access config file +// settings for k8s-proxy. +package conf + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/tailscale/hujson" + "tailscale.com/types/opt" +) + +const v1Alpha1 = "v1alpha1" + +// Config describes a config file. +type Config struct { + Path string // disk path of HuJSON + Raw []byte // raw bytes from disk, in HuJSON form + Std []byte // standardized JSON form + Version string // "v1alpha1" + + // Parsed is the parsed config, converted from its on-disk version to the + // latest known format. + Parsed ConfigV1Alpha1 +} + +// VersionedConfig allows specifying config at the root of the object, or in +// a versioned sub-object. +// e.g. {"version": "v1alpha1", "authKey": "abc123"} +// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}} +type VersionedConfig struct { + Version string `json:",omitempty"` // "v1alpha1" + + // Latest version of the config. + *ConfigV1Alpha1 + + // Backwards compatibility version(s) of the config. Fields and sub-fields + // from here should only be added to, never changed in place. + V1Alpha1 *ConfigV1Alpha1 `json:",omitempty"` + // V1Beta1 *ConfigV1Beta1 `json:",omitempty"` // Not yet used. +} + +type ConfigV1Alpha1 struct { + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + Hostname *string `json:",omitempty"` // Tailscale device hostname. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. +} + +type KubeAPIServer struct { + AuthMode opt.Bool `json:",omitempty"` +} + +// Load reads and parses the config file at the provided path on disk. +func Load(path string) (c Config, err error) { + c.Path = path + + c.Raw, err = os.ReadFile(path) + if err != nil { + return c, fmt.Errorf("error reading config file %q: %w", path, err) + } + c.Std, err = hujson.Standardize(c.Raw) + if err != nil { + return c, fmt.Errorf("error parsing config file %q HuJSON/JSON: %w", path, err) + } + var ver VersionedConfig + if err := json.Unmarshal(c.Std, &ver); err != nil { + return c, fmt.Errorf("error parsing config file %q: %w", path, err) + } + rootV1Alpha1 := (ver.Version == v1Alpha1) + backCompatV1Alpha1 := (ver.V1Alpha1 != nil) + switch { + case ver.Version == "": + return c, fmt.Errorf("error parsing config file %q: no \"version\" field provided", path) + case rootV1Alpha1 && backCompatV1Alpha1: + // Exactly one of these should be set. + return c, fmt.Errorf("error parsing config file %q: both root and v1alpha1 config provided", path) + case rootV1Alpha1 != backCompatV1Alpha1: + c.Version = v1Alpha1 + switch { + case rootV1Alpha1 && ver.ConfigV1Alpha1 != nil: + c.Parsed = *ver.ConfigV1Alpha1 + case backCompatV1Alpha1: + c.Parsed = *ver.V1Alpha1 + default: + c.Parsed = ConfigV1Alpha1{} + } + default: + return c, fmt.Errorf("error parsing config file %q: unsupported \"version\" value %q; want \"%s\"", path, ver.Version, v1Alpha1) + } + + return c, nil +} diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go new file mode 100644 index 000000000..a47391dc9 --- /dev/null +++ b/kube/k8s-proxy/conf/conf_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package conf + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/types/ptr" +) + +// Test that the config file can be at the root of the object, or in a versioned sub-object. +// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}} +func TestVersionedConfig(t *testing.T) { + testCases := map[string]struct { + inputConfig string + expectedConfig ConfigV1Alpha1 + expectedError string + }{ + "root_config_v1alpha1": { + inputConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + }, + "backwards_compat_v1alpha1_config": { + // Client doesn't know about v1beta1, so it should read in v1alpha1. + inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456", "v1alpha1": {"authKey": "abc123"}}`, + expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + }, + "unknown_key_allowed": { + // Adding new keys to the config doesn't require a version bump. + inputConfig: `{"version": "v1alpha1", "unknown-key": "unknown-value", "authKey": "abc123"}`, + expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + }, + "version_only_no_authkey": { + inputConfig: `{"version": "v1alpha1"}`, + expectedConfig: ConfigV1Alpha1{}, + }, + "both_config_v1alpha1": { + inputConfig: `{"version": "v1alpha1", "authKey": "abc123", "v1alpha1": {"authKey": "def456"}}`, + expectedError: "both root and v1alpha1 config provided", + }, + "empty_config": { + inputConfig: `{}`, + expectedError: `no "version" field provided`, + }, + "v1beta1_without_backwards_compat": { + inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456"}`, + expectedError: `unsupported "version" value "v1beta1"; want "v1alpha1"`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.json") + if err := os.WriteFile(path, []byte(tc.inputConfig), 0644); err != nil { + t.Fatalf("failed to write config file: %v", err) + } + cfg, err := Load(path) + switch { + case tc.expectedError == "" && err != nil: + t.Fatalf("unexpected error: %v", err) + case tc.expectedError != "": + if err == nil { + t.Fatalf("expected error %q, got nil", tc.expectedError) + } else if !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("expected error %q, got %q", tc.expectedError, err.Error()) + } + return + } + if cfg.Version != "v1alpha1" { + t.Fatalf("expected version %q, got %q", "v1alpha1", cfg.Version) + } + // Diff actual vs expected config. + if diff := cmp.Diff(cfg.Parsed, tc.expectedConfig); diff != "" { + t.Fatalf("Unexpected parsed config (-got +want):\n%s", diff) + } + }) + } +} diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 6f96875dd..20b005014 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -5,14 +5,15 @@ package kubetypes const ( // Hostinfo App values for the Tailscale Kubernetes Operator components. - AppOperator = "k8s-operator" - AppAPIServerProxy = "k8s-operator-proxy" - AppIngressProxy = "k8s-operator-ingress-proxy" - AppIngressResource = "k8s-operator-ingress-resource" - AppEgressProxy = "k8s-operator-egress-proxy" - AppConnector = "k8s-operator-connector-resource" - AppProxyGroupEgress = "k8s-operator-proxygroup-egress" - AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" + AppOperator = "k8s-operator" + AppInProcessAPIServerProxy = "k8s-operator-proxy" + AppIngressProxy = "k8s-operator-ingress-proxy" + AppIngressResource = "k8s-operator-ingress-resource" + AppEgressProxy = "k8s-operator-egress-proxy" + AppConnector = "k8s-operator-connector-resource" + AppProxyGroupEgress = "k8s-operator-proxygroup-egress" + AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" + AppProxyGroupKubeAPIServer = "k8s-operator-proxygroup-kube-apiserver" // Clientmetrics for Tailscale Kubernetes Operator components MetricIngressProxyCount = "k8s_ingress_proxies" // L3 @@ -29,6 +30,7 @@ const ( MetricEgressServiceCount = "k8s_egress_service_resources" MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources" MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources" + MetricProxyGroupAPIServerCount = "k8s_proxygroup_kube_apiserver_resources" // Keys that containerboot writes to state file that can be used to determine its state. // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine diff --git a/kube/state/state.go b/kube/state/state.go new file mode 100644 index 000000000..4831a5f5b --- /dev/null +++ b/kube/state/state.go @@ -0,0 +1,97 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package state updates state keys for tailnet client devices managed by the +// operator. These keys are used to signal readiness, metadata, and current +// configuration state to the operator. Client packages deployed by the operator +// include containerboot, tsrecorder, and k8s-proxy, but currently containerboot +// has its own implementation to manage the same keys. +package state + +import ( + "encoding/json" + "fmt" + + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/util/deephash" +) + +const ( + keyPodUID = ipn.StateKey(kubetypes.KeyPodUID) + keyCapVer = ipn.StateKey(kubetypes.KeyCapVer) + keyDeviceID = ipn.StateKey(kubetypes.KeyDeviceID) + keyDeviceIPs = ipn.StateKey(kubetypes.KeyDeviceIPs) + keyDeviceFQDN = ipn.StateKey(kubetypes.KeyDeviceFQDN) +) + +// SetInitialKeys sets Pod UID and cap ver and clears tailnet device state +// keys to help stop the operator using stale tailnet device state. +func SetInitialKeys(store ipn.StateStore, podUID string) error { + // Clear device state keys first so the operator knows if the pod UID + // matches, the other values are definitely not stale. + for _, key := range []ipn.StateKey{keyDeviceID, keyDeviceFQDN, keyDeviceIPs} { + if _, err := store.ReadState(key); err == nil { + if err := store.WriteState(key, nil); err != nil { + return fmt.Errorf("error writing %q to state store: %w", key, err) + } + } + } + + if err := store.WriteState(keyPodUID, []byte(podUID)); err != nil { + return fmt.Errorf("error writing pod UID to state store: %w", err) + } + if err := store.WriteState(keyCapVer, fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion)); err != nil { + return fmt.Errorf("error writing capability version to state store: %w", err) + } + + return nil +} + +// KeepKeysUpdated sets state store keys consistent with containerboot to +// signal proxy readiness to the operator. It runs until its context is +// cancelled or it hits an error. The passed in next function is expected to be +// from a local.IPNBusWatcher that is at least subscribed to +// ipn.NotifyInitialNetMap. +func KeepKeysUpdated(store ipn.StateStore, next func() (ipn.Notify, error)) error { + var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum + + for { + n, err := next() // Blocks on a streaming LocalAPI HTTP call. + if err != nil { + return err + } + if n.NetMap == nil { + continue + } + + if deviceID := n.NetMap.SelfNode.StableID(); deephash.Update(¤tDeviceID, &deviceID) { + if err := store.WriteState(keyDeviceID, []byte(deviceID)); err != nil { + return fmt.Errorf("failed to store device ID in state: %w", err) + } + } + + if fqdn := n.NetMap.SelfNode.Name(); deephash.Update(¤tDeviceFQDN, &fqdn) { + if err := store.WriteState(keyDeviceFQDN, []byte(fqdn)); err != nil { + return fmt.Errorf("failed to store device FQDN in state: %w", err) + } + } + + if addrs := n.NetMap.SelfNode.Addresses(); deephash.Update(¤tDeviceIPs, &addrs) { + var deviceIPs []string + for _, addr := range addrs.AsSlice() { + deviceIPs = append(deviceIPs, addr.Addr().String()) + } + deviceIPsValue, err := json.Marshal(deviceIPs) + if err != nil { + return err + } + if err := store.WriteState(keyDeviceIPs, deviceIPsValue); err != nil { + return fmt.Errorf("failed to store device IPs in state: %w", err) + } + } + } +} diff --git a/kube/state/state_test.go b/kube/state/state_test.go new file mode 100644 index 000000000..0375b1c01 --- /dev/null +++ b/kube/state/state_test.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package state + +import ( + "bytes" + "fmt" + "net/netip" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/types/netmap" +) + +func TestSetInitialStateKeys(t *testing.T) { + var ( + podUID = []byte("test-pod-uid") + expectedCapVer = fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) + ) + for name, tc := range map[string]struct { + initial map[ipn.StateKey][]byte + expected map[ipn.StateKey][]byte + }{ + "empty_initial": { + initial: map[ipn.StateKey][]byte{}, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + }, + "existing_pod_uid_and_capver": { + initial: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + }, + "all_keys_preexisting": { + initial: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + keyDeviceID: []byte("existing-device-id"), + keyDeviceFQDN: []byte("existing-device-fqdn"), + keyDeviceIPs: []byte(`["1.2.3.4"]`), + }, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + keyDeviceID: nil, + keyDeviceFQDN: nil, + keyDeviceIPs: nil, + }, + }, + } { + t.Run(name, func(t *testing.T) { + store, err := store.New(logger.Discard, "mem:") + if err != nil { + t.Fatalf("error creating in-memory store: %v", err) + } + + for key, value := range tc.initial { + if err := store.WriteState(key, value); err != nil { + t.Fatalf("error writing initial state key %q: %v", key, err) + } + } + + if err := SetInitialKeys(store, string(podUID)); err != nil { + t.Fatalf("setInitialStateKeys failed: %v", err) + } + + actual := make(map[ipn.StateKey][]byte) + for expectedKey, expectedValue := range tc.expected { + actualValue, err := store.ReadState(expectedKey) + if err != nil { + t.Errorf("error reading state key %q: %v", expectedKey, err) + continue + } + + actual[expectedKey] = actualValue + if !bytes.Equal(actualValue, expectedValue) { + t.Errorf("state key %q mismatch: expected %q, got %q", expectedKey, expectedValue, actualValue) + } + } + if diff := cmp.Diff(actual, tc.expected); diff != "" { + t.Errorf("state keys mismatch (-got +want):\n%s", diff) + } + }) + } +} + +func TestKeepStateKeysUpdated(t *testing.T) { + store, err := store.New(logger.Discard, "mem:") + if err != nil { + t.Fatalf("error creating in-memory store: %v", err) + } + + nextWaiting := make(chan struct{}) + go func() { + <-nextWaiting // Acknowledge the initial signal. + }() + notifyCh := make(chan ipn.Notify) + next := func() (ipn.Notify, error) { + nextWaiting <- struct{}{} // Send signal to test that state is consistent. + return <-notifyCh, nil // Wait for test input. + } + + errs := make(chan error, 1) + go func() { + err := KeepKeysUpdated(store, next) + if err != nil { + errs <- fmt.Errorf("keepStateKeysUpdated returned with error: %w", err) + } + }() + + for _, tc := range []struct { + name string + notify ipn.Notify + expected map[ipn.StateKey][]byte + }{ + { + name: "initial_not_authed", + notify: ipn.Notify{}, + expected: map[ipn.StateKey][]byte{ + keyDeviceID: nil, + keyDeviceFQDN: nil, + keyDeviceIPs: nil, + }, + }, + { + name: "authed", + notify: ipn.Notify{ + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "test-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:0:1/128")}, + }).View(), + }, + }, + expected: map[ipn.StateKey][]byte{ + keyDeviceID: []byte("TESTCTRL00000001"), + keyDeviceFQDN: []byte("test-node.test.ts.net"), + keyDeviceIPs: []byte(`["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), + }, + }, + { + name: "updated_fields", + notify: ipn.Notify{ + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "updated.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.250/32")}, + }).View(), + }, + }, + expected: map[ipn.StateKey][]byte{ + keyDeviceID: []byte("TESTCTRL00000001"), + keyDeviceFQDN: []byte("updated.test.ts.net"), + keyDeviceIPs: []byte(`["100.64.0.250"]`), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Send test input. + select { + case notifyCh <- tc.notify: + case <-errs: + t.Fatal("keepStateKeysUpdated returned before test input") + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for next() to be called again") + } + + // Wait for next() to be called again so we know the goroutine has + // processed the event. + select { + case <-nextWaiting: + case <-errs: + t.Fatal("keepStateKeysUpdated returned before test input") + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for next() to be called again") + } + + for key, value := range tc.expected { + got, _ := store.ReadState(key) + if !bytes.Equal(got, value) { + t.Errorf("state key %q mismatch: expected %q, got %q", key, value, got) + } + } + }) + } +} From 27fa2ad868f0e1bf48614dd97b7fde9cd00fa93d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Jul 2025 09:37:45 +0100 Subject: [PATCH 1064/1708] cmd/k8s-operator: don't require generation for Available condition (#16497) The observed generation was set to always 0 in #16429, but this had the knock-on effect of other controllers considering ProxyGroups never ready because the observed generation is never up to date in proxyGroupCondition. Make sure the ProxyGroupAvailable function does not requires the observed generation to be up to date, and add testing coverage to catch regressions. Updates #16327 Change-Id: I42f50ad47dd81cc2d3c3ce2cd7b252160bb58e40 Signed-off-by: Tom Proctor --- cmd/k8s-operator/proxygroup_test.go | 30 +++++++++++++++++++++++------ k8s-operator/conditions.go | 13 +++++++------ 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index c58e427aa..6f143c056 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/client/tailscale" "tailscale.com/ipn" + kube "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -815,6 +816,7 @@ func TestProxyGroup(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Finalizers: []string{"tailscale.com/finalizer"}, + Generation: 1, }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, @@ -856,9 +858,12 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 1, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, false, pc) + if kube.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to not be available") + } }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -874,13 +879,19 @@ func TestProxyGroup(t *testing.T) { if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } - + pg.ObjectMeta.Generation = 2 + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.ObjectMeta.Generation = pg.ObjectMeta.Generation + }) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 2, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) + if kube.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to not be available") + } if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } @@ -902,6 +913,10 @@ func TestProxyGroup(t *testing.T) { t.Run("simulate_successful_device_auth", func(t *testing.T) { addNodeIDToStateSecrets(t, fc, pg) + pg.ObjectMeta.Generation = 3 + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.ObjectMeta.Generation = pg.ObjectMeta.Generation + }) expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = []tsapi.TailnetDevice{ @@ -914,10 +929,13 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }, } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 3, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) + if !kube.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to be available") + } }) t.Run("scale_up_to_3", func(t *testing.T) { @@ -926,14 +944,14 @@ func TestProxyGroup(t *testing.T) { p.Spec = pg.Spec }) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 3, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 3, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 1d30f352c..f6858c005 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -137,22 +137,23 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { } func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool { - return proxyGroupCondition(pg, tsapi.ProxyGroupReady) + cond := proxyGroupCondition(pg, tsapi.ProxyGroupReady) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation } func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { - return proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) + cond := proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) + return cond != nil && cond.Status == metav1.ConditionTrue } -func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) bool { +func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) *metav1.Condition { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { return cond.Type == string(condType) }) if idx == -1 { - return false + return nil } - cond := pg.Status.Conditions[idx] - return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation + return &pg.Status.Conditions[idx] } func DNSCfgIsReady(cfg *tsapi.DNSConfig) bool { From 008a238acddcf1cb73c544eee41f689392b74494 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Jul 2025 09:16:29 -0700 Subject: [PATCH 1065/1708] wgengine/magicsock: support self as candidate peer relay (#16499) Updates tailscale/corp#30247 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 51 ++++++------ wgengine/magicsock/magicsock_test.go | 114 ++++++++++++++++++--------- 2 files changed, 102 insertions(+), 63 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index ab7c2102f..1978867fa 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2618,8 +2618,8 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { c.updateRelayServersSet(f.Filter, self, peers) } -// updateRelayServersSet iterates all peers, evaluating filt for each one in -// order to determine which peers are relay server candidates. filt, self, and +// updateRelayServersSet iterates all peers and self, evaluating filt for each +// one in order to determine which are relay server candidates. filt, self, and // peers are passed as args (vs c.mu-guarded fields) to enable callers to // release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' // in filt for every peer 'n'). @@ -2631,8 +2631,9 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // the computed result over the eventbus instead. func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { relayServers := make(set.Set[netip.AddrPort]) - for _, peer := range peers.All() { - peerAPI := peerAPIIfCandidateRelayServer(filt, self, peer) + nodes := append(peers.AsSlice(), self) + for _, maybeCandidate := range nodes { + peerAPI := peerAPIIfCandidateRelayServer(filt, self, maybeCandidate) if peerAPI.IsValid() { relayServers.Add(peerAPI) } @@ -2640,33 +2641,34 @@ func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, c.relayManager.handleRelayServersSet(relayServers) } -// peerAPIIfCandidateRelayServer returns the peer API address of peer if it -// is considered to be a candidate relay server upon evaluation against filt and -// self, otherwise it returns a zero value. -func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, peer tailcfg.NodeView) netip.AddrPort { +// peerAPIIfCandidateRelayServer returns the peer API address of maybeCandidate +// if it is considered to be a candidate relay server upon evaluation against +// filt and self, otherwise it returns a zero value. self and maybeCandidate +// may be equal. +func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, maybeCandidate tailcfg.NodeView) netip.AddrPort { if filt == nil || !self.Valid() || - !peer.Valid() || - !capVerIsRelayServerCapable(peer.Cap()) || - !peer.Hostinfo().Valid() { + !maybeCandidate.Valid() || + !capVerIsRelayServerCapable(maybeCandidate.Cap()) || + !maybeCandidate.Hostinfo().Valid() { return netip.AddrPort{} } - for _, peerPrefix := range peer.Addresses().All() { - if !peerPrefix.IsSingleIP() { + for _, maybeCandidatePrefix := range maybeCandidate.Addresses().All() { + if !maybeCandidatePrefix.IsSingleIP() { continue } - peerAddr := peerPrefix.Addr() + maybeCandidateAddr := maybeCandidatePrefix.Addr() for _, selfPrefix := range self.Addresses().All() { if !selfPrefix.IsSingleIP() { continue } selfAddr := selfPrefix.Addr() - if selfAddr.BitLen() == peerAddr.BitLen() { // same address family - if filt.CapsWithValues(peerAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { - for _, s := range peer.Hostinfo().Services().All() { - if peerAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || - peerAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { - return netip.AddrPortFrom(peerAddr, s.Port) + if selfAddr.BitLen() == maybeCandidateAddr.BitLen() { // same address family + if filt.CapsWithValues(maybeCandidateAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { + for _, s := range maybeCandidate.Hostinfo().Services().All() { + if maybeCandidateAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || + maybeCandidateAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { + return netip.AddrPortFrom(maybeCandidateAddr, s.Port) } } return netip.AddrPort{} // no peerAPI @@ -2674,10 +2676,11 @@ func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, peer tailcfg.NodeV // [nodeBackend.peerCapsLocked] only returns/considers the // [tailcfg.PeerCapMap] between the passed src and the // _first_ host (/32 or /128) address for self. We are - // consistent with that behavior here. If self and peer - // host addresses are of the same address family they either - // have the capability or not. We do not check against - // additional host addresses of the same address family. + // consistent with that behavior here. If self and + // maybeCandidate host addresses are of the same address + // family they either have the capability or not. We do not + // check against additional host addresses of the same + // address family. return netip.AddrPort{} } } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c388e9ed1..aea2de17d 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3385,16 +3385,7 @@ func Test_virtualNetworkID(t *testing.T) { } func Test_peerAPIIfCandidateRelayServer(t *testing.T) { - selfOnlyIPv4 := &tailcfg.Node{ - Cap: math.MinInt32, - Addresses: []netip.Prefix{ - netip.MustParsePrefix("1.1.1.1/32"), - }, - } - selfOnlyIPv6 := selfOnlyIPv4.Clone() - selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") - - peerHostinfo := &tailcfg.Hostinfo{ + hostInfo := &tailcfg.Hostinfo{ Services: []tailcfg.Service{ { Proto: tailcfg.PeerAPI4, @@ -3406,12 +3397,23 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, } + + selfOnlyIPv4 := &tailcfg.Node{ + Cap: math.MinInt32, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + Hostinfo: hostInfo.View(), + } + selfOnlyIPv6 := selfOnlyIPv4.Clone() + selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + peerOnlyIPv4 := &tailcfg.Node{ Cap: math.MinInt32, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, - Hostinfo: peerHostinfo.View(), + Hostinfo: hostInfo.View(), } peerOnlyIPv6 := peerOnlyIPv4.Clone() @@ -3424,11 +3426,11 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { peerOnlyIPv4NilHostinfo.Hostinfo = tailcfg.HostinfoView{} tests := []struct { - name string - filt *filter.Filter - self tailcfg.NodeView - peer tailcfg.NodeView - want netip.AddrPort + name string + filt *filter.Filter + self tailcfg.NodeView + maybeCandidate tailcfg.NodeView + want netip.AddrPort }{ { name: "match v4", @@ -3443,9 +3445,26 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - peer: peerOnlyIPv4.View(), - want: netip.MustParseAddrPort("2.2.2.2:4"), + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4.View(), + want: netip.MustParseAddrPort("2.2.2.2:4"), + }, + { + name: "match v4 self", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{selfOnlyIPv4.Addresses[0]}, + Caps: []filtertype.CapMatch{ + { + Dst: selfOnlyIPv4.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + maybeCandidate: selfOnlyIPv4.View(), + want: netip.AddrPortFrom(selfOnlyIPv4.Addresses[0].Addr(), 4), }, { name: "match v6", @@ -3460,9 +3479,26 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - peer: peerOnlyIPv6.View(), - want: netip.MustParseAddrPort("[::2]:6"), + self: selfOnlyIPv6.View(), + maybeCandidate: peerOnlyIPv6.View(), + want: netip.MustParseAddrPort("[::2]:6"), + }, + { + name: "match v6 self", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{selfOnlyIPv6.Addresses[0]}, + Caps: []filtertype.CapMatch{ + { + Dst: selfOnlyIPv6.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + maybeCandidate: selfOnlyIPv6.View(), + want: netip.AddrPortFrom(selfOnlyIPv6.Addresses[0].Addr(), 6), }, { name: "no match dst", @@ -3477,8 +3513,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - peer: peerOnlyIPv6.View(), + self: selfOnlyIPv6.View(), + maybeCandidate: peerOnlyIPv6.View(), }, { name: "no match peer cap", @@ -3493,8 +3529,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - peer: peerOnlyIPv6.View(), + self: selfOnlyIPv6.View(), + maybeCandidate: peerOnlyIPv6.View(), }, { name: "cap ver not relay capable", @@ -3509,14 +3545,14 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: peerOnlyIPv4.View(), - peer: peerOnlyIPv4ZeroCapVer.View(), + self: peerOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4ZeroCapVer.View(), }, { - name: "nil filt", - filt: nil, - self: selfOnlyIPv4.View(), - peer: peerOnlyIPv4.View(), + name: "nil filt", + filt: nil, + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4.View(), }, { name: "nil self", @@ -3531,8 +3567,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: tailcfg.NodeView{}, - peer: peerOnlyIPv4.View(), + self: tailcfg.NodeView{}, + maybeCandidate: peerOnlyIPv4.View(), }, { name: "nil peer", @@ -3547,8 +3583,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - peer: tailcfg.NodeView{}, + self: selfOnlyIPv4.View(), + maybeCandidate: tailcfg.NodeView{}, }, { name: "nil peer hostinfo", @@ -3563,13 +3599,13 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - peer: peerOnlyIPv4NilHostinfo.View(), + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4NilHostinfo.View(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.peer); !reflect.DeepEqual(got, tt.want) { + if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.maybeCandidate); !reflect.DeepEqual(got, tt.want) { t.Errorf("peerAPIIfCandidateRelayServer() = %v, want %v", got, tt.want) } }) From cc2f4ac921106ad46691b9271008f0bf43aeb970 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 11:59:57 -0500 Subject: [PATCH 1066/1708] ipn: move ParseAutoExitNodeID from ipn/ipnlocal to ipn So it can be used from the CLI without importing ipnlocal. While there, also remove isAutoExitNodeID, a wrapper around parseAutoExitNodeID that's no longer used. Updates tailscale/corp#29969 Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 33 ++---------- ipn/ipnlocal/local_test.go | 104 ------------------------------------- ipn/prefs.go | 22 ++++++++ ipn/prefs_test.go | 59 +++++++++++++++++++++ 4 files changed, 84 insertions(+), 134 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c54cb32d3..048bb1219 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1890,7 +1890,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange // and update prefs if it differs from the current one. // This includes cases where it was previously an expression but no longer is, // or where it wasn't before but now is. - autoExitNode, useAutoExitNode := parseAutoExitNodeID(exitNodeID) + autoExitNode, useAutoExitNode := ipn.ParseAutoExitNodeString(exitNodeID) if prefs.AutoExitNode != autoExitNode { prefs.AutoExitNode = autoExitNode anyChange = true @@ -4292,7 +4292,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P if v { mp.ExitNodeIDSet = true mp.ExitNodeID = p0.InternalExitNodePrior() - if expr, ok := parseAutoExitNodeID(mp.ExitNodeID); ok { + if expr, ok := ipn.ParseAutoExitNodeString(mp.ExitNodeID); ok { mp.AutoExitNodeSet = true mp.AutoExitNode = expr mp.ExitNodeID = unresolvedExitNodeID @@ -4304,7 +4304,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.AutoExitNode = "" mp.InternalExitNodePriorSet = true if p0.AutoExitNode().IsSet() { - mp.InternalExitNodePrior = tailcfg.StableNodeID(autoExitNodePrefix + p0.AutoExitNode()) + mp.InternalExitNodePrior = tailcfg.StableNodeID(ipn.AutoExitNodePrefix + p0.AutoExitNode()) } else { mp.InternalExitNodePrior = p0.ExitNodeID() } @@ -7933,10 +7933,6 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { } const ( - // autoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values - // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. - autoExitNodePrefix = "auto:" - // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value // used as an exit node ID to install a blackhole route, preventing // accidental non-exit-node usage until the [ipn.ExitNodeExpression] @@ -7947,29 +7943,6 @@ const ( unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" ) -// isAutoExitNodeID reports whether the given [tailcfg.StableNodeID] is -// actually an "auto:"-prefixed [ipn.ExitNodeExpression]. -func isAutoExitNodeID(id tailcfg.StableNodeID) bool { - _, ok := parseAutoExitNodeID(id) - return ok -} - -// parseAutoExitNodeID attempts to parse the given [tailcfg.StableNodeID] -// as an [ExitNodeExpression]. -// -// It returns the parsed expression and true on success, -// or an empty string and false if the input does not appear to be -// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). -// -// It is mainly used to parse the [syspolicy.ExitNodeID] value -// when it is set to "auto:" (e.g., auto:any). -func parseAutoExitNodeID(id tailcfg.StableNodeID) (_ ipn.ExitNodeExpression, ok bool) { - if expr, ok := strings.CutPrefix(string(id), autoExitNodePrefix); ok && expr != "" { - return ipn.ExitNodeExpression(expr), true - } - return "", false -} - func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { if exitNodeID == "" { return false // an exit node is required diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 8bc84b081..73bae7ede 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4873,110 +4873,6 @@ func TestMinLatencyDERPregion(t *testing.T) { } } -func TestShouldAutoExitNode(t *testing.T) { - tests := []struct { - name string - exitNodeIDPolicyValue string - expectedBool bool - }{ - { - name: "auto:any", - exitNodeIDPolicyValue: "auto:any", - expectedBool: true, - }, - { - name: "no auto prefix", - exitNodeIDPolicyValue: "foo", - expectedBool: false, - }, - { - name: "auto prefix but empty suffix", - exitNodeIDPolicyValue: "auto:", - expectedBool: false, - }, - { - name: "auto prefix no colon", - exitNodeIDPolicyValue: "auto", - expectedBool: false, - }, - { - name: "auto prefix unknown suffix", - exitNodeIDPolicyValue: "auto:foo", - expectedBool: true, // "auto:{unknown}" is treated as "auto:any" - }, - } - - syspolicy.RegisterWellKnownSettingsForTest(t) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := isAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeIDPolicyValue)) - if got != tt.expectedBool { - t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue) - } - }) - } -} - -func TestParseAutoExitNodeID(t *testing.T) { - tests := []struct { - name string - exitNodeID string - wantOk bool - wantExpr ipn.ExitNodeExpression - }{ - { - name: "empty expr", - exitNodeID: "", - wantOk: false, - wantExpr: "", - }, - { - name: "no auto prefix", - exitNodeID: "foo", - wantOk: false, - wantExpr: "", - }, - { - name: "auto:any", - exitNodeID: "auto:any", - wantOk: true, - wantExpr: ipn.AnyExitNode, - }, - { - name: "auto:foo", - exitNodeID: "auto:foo", - wantOk: true, - wantExpr: "foo", - }, - { - name: "auto prefix but empty suffix", - exitNodeID: "auto:", - wantOk: false, - wantExpr: "", - }, - { - name: "auto prefix no colon", - exitNodeID: "auto", - wantOk: false, - wantExpr: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotExpr, gotOk := parseAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeID)) - if gotOk != tt.wantOk || gotExpr != tt.wantExpr { - if tt.wantOk { - t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) - } else { - t.Fatalf("got %v (%q); want false", gotOk, gotExpr) - } - } - }) - } -} - func TestEnableAutoUpdates(t *testing.T) { lb := newTestLocalBackend(t) diff --git a/ipn/prefs.go b/ipn/prefs.go index 77cea0493..71a80b182 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -1088,3 +1088,25 @@ const AnyExitNode ExitNodeExpression = "any" func (e ExitNodeExpression) IsSet() bool { return e != "" } + +const ( + // AutoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values and CLI + // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. + AutoExitNodePrefix = "auto:" +) + +// ParseAutoExitNodeString attempts to parse the given string +// as an [ExitNodeExpression]. +// +// It returns the parsed expression and true on success, +// or an empty string and false if the input does not appear to be +// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). +// +// It is mainly used to parse the [syspolicy.ExitNodeID] value +// when it is set to "auto:" (e.g., auto:any). +func ParseAutoExitNodeString[T ~string](s T) (_ ExitNodeExpression, ok bool) { + if expr, ok := strings.CutPrefix(string(s), AutoExitNodePrefix); ok && expr != "" { + return ExitNodeExpression(expr), true + } + return "", false +} diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 268ea206c..43e360c6a 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -1129,3 +1129,62 @@ func TestPrefsDowngrade(t *testing.T) { t.Fatal("AllowSingleHosts should be true") } } + +func TestParseAutoExitNodeString(t *testing.T) { + tests := []struct { + name string + exitNodeID string + wantOk bool + wantExpr ExitNodeExpression + }{ + { + name: "empty expr", + exitNodeID: "", + wantOk: false, + wantExpr: "", + }, + { + name: "no auto prefix", + exitNodeID: "foo", + wantOk: false, + wantExpr: "", + }, + { + name: "auto:any", + exitNodeID: "auto:any", + wantOk: true, + wantExpr: AnyExitNode, + }, + { + name: "auto:foo", + exitNodeID: "auto:foo", + wantOk: true, + wantExpr: "foo", + }, + { + name: "auto prefix but empty suffix", + exitNodeID: "auto:", + wantOk: false, + wantExpr: "", + }, + { + name: "auto prefix no colon", + exitNodeID: "auto", + wantOk: false, + wantExpr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotExpr, gotOk := ParseAutoExitNodeString(tt.exitNodeID) + if gotOk != tt.wantOk || gotExpr != tt.wantExpr { + if tt.wantOk { + t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) + } else { + t.Fatalf("got %v (%q); want false", gotOk, gotExpr) + } + } + }) + } +} From c5fdf9e1db149d5c205ec971ffe9ae6d487833a4 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 12:07:44 -0500 Subject: [PATCH 1067/1708] cmd/tailscale/cli: add support for tailscale {up,set} --exit-node=auto:any If the specified exit node string starts with "auto:" (i.e., can be parsed as an ipn.ExitNodeExpression), we update ipn.Prefs.AutoExitNode instead of ipn.Prefs.ExitNodeID. Fixes #16459 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/cli_test.go | 24 ++++++++++++++++++++++-- cmd/tailscale/cli/set.go | 7 +++++-- cmd/tailscale/cli/up.go | 9 +++++++-- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 48121c7d9..5dd4fa234 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -972,8 +972,7 @@ func TestPrefFlagMapping(t *testing.T) { // No CLI flag for this. continue case "AutoExitNode": - // TODO(nickkhyl): should be handled by tailscale {set,up} --exit-node. - // See tailscale/tailscale#16459. + // Handled by tailscale {set,up} --exit-node=auto:any. continue } t.Errorf("unexpected new ipn.Pref field %q is not handled by up.go (see addPrefFlagMapping and checkForAccidentalSettingReverts)", prefName) @@ -1338,6 +1337,27 @@ func TestUpdatePrefs(t *testing.T) { } }, }, + { + name: "auto_exit_node", + flags: []string{"--exit-node=auto:any"}, + curPrefs: &ipn.Prefs{ + ControlURL: ipn.DefaultControlURL, + CorpDNS: true, // enabled by [ipn.NewPrefs] by default + NetfilterMode: preftype.NetfilterOn, // enabled by [ipn.NewPrefs] by default + }, + wantJustEditMP: &ipn.MaskedPrefs{ + WantRunningSet: true, // enabled by default for tailscale up + AutoExitNodeSet: true, + ExitNodeIDSet: true, // we want ExitNodeID cleared + ExitNodeIPSet: true, // same for ExitNodeIP + }, + env: upCheckEnv{backendState: "Running"}, + checkUpdatePrefsMutations: func(t *testing.T, newPrefs *ipn.Prefs) { + if newPrefs.AutoExitNode != ipn.AnyExitNode { + t.Errorf("AutoExitNode: got %q; want %q", newPrefs.AutoExitNode, ipn.AnyExitNode) + } + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 66e74d77f..f1b21995e 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -73,7 +73,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.StringVar(&setArgs.profileName, "nickname", "", "nickname for the current account") setf.BoolVar(&setArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") setf.BoolVar(&setArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") - setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") + setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP, base name, or auto:any) for internet traffic, or empty string to not use an exit node") setf.BoolVar(&setArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") setf.BoolVar(&setArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") setf.BoolVar(&setArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") @@ -173,7 +173,10 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.exitNodeIP != "" { - if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { + if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(setArgs.exitNodeIP); useAutoExitNode { + maskedPrefs.AutoExitNode = expr + maskedPrefs.AutoExitNodeSet = true + } else if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { var e ipn.ExitNodeLocalIPError if errors.As(err, &e) { return fmt.Errorf("%w; did you mean --advertise-exit-node?", err) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 37cdab754..1863957d3 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -100,7 +100,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") upf.BoolVar(&upArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") upf.Var(notFalseVar{}, "host-routes", hidden+"install host routes to other Tailscale nodes (must be true as of Tailscale 1.67+)") - upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") + upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP, base name, or auto:any) for internet traffic, or empty string to not use an exit node") upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") upf.BoolVar(&upArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") @@ -278,7 +278,9 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo prefs.NetfilterMode = preftype.NetfilterOff } if upArgs.exitNodeIP != "" { - if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { + if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(upArgs.exitNodeIP); useAutoExitNode { + prefs.AutoExitNode = expr + } else if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { var e ipn.ExitNodeLocalIPError if errors.As(err, &e) { return nil, fmt.Errorf("%w; did you mean --advertise-exit-node?", err) @@ -408,6 +410,9 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus if env.upArgs.reset { visitFlags = env.flagSet.VisitAll } + if prefs.AutoExitNode.IsSet() { + justEditMP.AutoExitNodeSet = true + } visitFlags(func(f *flag.Flag) { updateMaskedPrefsFromUpOrSetFlag(justEditMP, f.Name) }) From 21a4058ec71878373d68ef6c983e81dda690e441 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Jul 2025 18:35:32 -0500 Subject: [PATCH 1068/1708] ipn/ipnlocal: add test to verify handling of unknown auto exit node expressions We already check this for cases where ipn.Prefs.AutoExitNode is configured via syspolicy. Configuring it directly through EditPrefs should behave the same, so we add a test for that as well. Additionally, we clarify the implementation and future extensibility in (*LocalBackend).resolveAutoExitNodeLocked, where the AutoExitNode is actually enforced. Updates tailscale/corp#29969 Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 8 ++++++++ ipn/ipnlocal/local_test.go | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 048bb1219..55730489e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2071,6 +2071,14 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // // b.mu must be held. func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. + // + // However, to maintain forward compatibility with future auto exit node expressions, + // we treat any non-empty AutoExitNode as [ipn.AnyExitNode]. + // + // If and when we support additional auto exit node expressions, this method should be updated + // to handle them appropriately, while still falling back to [ipn.AnyExitNode] or a more appropriate + // default for unknown (or partially supported) expressions. if !prefs.AutoExitNode.IsSet() { return false } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 73bae7ede..e70e5ad2a 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1002,6 +1002,23 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "foo", }, }, + { + name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "foo"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + }, { name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy prefs: ipn.Prefs{ From ff1803158a60c37128557f40c643f3839bc5609a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 13:01:32 -0500 Subject: [PATCH 1069/1708] ipn/ipnlocal: change order of exit node refresh and netmap update so that clients receive the new netmap first If the GUI receives a new exit node ID before the new netmap, it may treat the node as offline or invalid if the previous netmap didn't include the peer at all, or if the peer was offline or not advertised as an exit node. This may result in briefly issuing and dismissing a warning, or a similar issue, which isn't ideal. In this PR, we change the operation order to send the new netmap to clients first before selecting the new exit node and notifying them of the Exit Node change. Updates tailscale/corp#30252 (an old issue discovered during testing this) Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 13 ++++++--- ipn/ipnlocal/local_test.go | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 55730489e..48eceb36c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1709,9 +1709,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Now complete the lock-free parts of what we started while locked. if st.NetMap != nil { - // Check and update the exit node if needed, now that we have a new netmap. - b.RefreshExitNode() - if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) @@ -1751,6 +1748,16 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.health.SetDERPMap(st.NetMap.DERPMap) b.send(ipn.Notify{NetMap: st.NetMap}) + + // Check and update the exit node if needed, now that we have a new netmap. + // + // This must happen after the netmap change is sent via [ipn.Notify], + // so the GUI can correctly display the exit node if it has changed + // since the last netmap was sent. + // + // Otherwise, it might briefly show the exit node as offline and display a warning, + // if the node wasn't online or wasn't advertising default routes in the previous netmap. + b.RefreshExitNode() } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index e70e5ad2a..bb7f433c0 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1407,6 +1407,60 @@ func TestPrefsChangeDisablesExitNode(t *testing.T) { } } +func TestExitNodeNotifyOrder(t *testing.T) { + const controlURL = "https://localhost:1/" + + report := &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 5 * time.Millisecond, + 2: 10 * time.Millisecond, + }, + PreferredDERP: 1, + } + + exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) + exitNode2 := makeExitNode(2, withName("node-2"), withDERP(2), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))) + selfNode := makeExitNode(3, withName("node-3"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))) + clientNetmap := buildNetmapWithPeers(selfNode, exitNode1, exitNode2) + + lb := newTestLocalBackend(t) + lb.sys.MagicSock.Get().SetLastNetcheckReportForTest(lb.ctx, report) + lb.SetPrefsForTest(&ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }) + + nw := newNotificationWatcher(t, lb, ipnauth.Self) + + // Updating the netmap should trigger both a netmap notification + // and an exit node ID notification (since an exit node is selected). + // The netmap notification should be sent first. + nw.watch(0, []wantedNotification{ + wantNetmapNotify(clientNetmap), + wantExitNodeIDNotify(exitNode1.StableID()), + }) + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: clientNetmap}) + nw.check() +} + +func wantNetmapNotify(want *netmap.NetworkMap) wantedNotification { + return wantedNotification{ + name: "Netmap", + cond: func(t testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.NetMap == want + }, + } +} + +func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { + return wantedNotification{ + name: fmt.Sprintf("ExitNodeID-%s", want), + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.Prefs != nil && n.Prefs.Valid() && n.Prefs.ExitNodeID() == want + }, + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface From d40b25326ccdb111ce4e99893164dd6742328a52 Mon Sep 17 00:00:00 2001 From: Dylan Bargatze Date: Wed, 9 Jul 2025 18:06:58 -0400 Subject: [PATCH 1070/1708] tailcfg, wgengine/magicsock: disable all UDP relay usage if disable-relay-client is set (#16492) If the NodeAttrDisableRelayClient node attribute is set, ensures that a node cannot allocate endpoints on a UDP relay server itself, and cannot use newly-discovered paths (via disco/CallMeMaybeVia) that traverse a UDP relay server. Fixes tailscale/corp#30180 Signed-off-by: Dylan Bargatze --- tailcfg/tailcfg.go | 18 ++++++++++-------- wgengine/magicsock/magicsock.go | 10 +++++++++- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index d97f60a8a..6c88217de 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2607,14 +2607,16 @@ const ( // only needs to be present in [NodeCapMap] to take effect. NodeAttrDisableRelayServer NodeCapability = "disable-relay-server" - // NodeAttrDisableRelayClient prevents the node from allocating UDP relay - // server endpoints itself; the node may still bind into and relay traffic - // using endpoints allocated by its peers. This attribute can be added to - // the node dynamically; if added while the node is already running, the - // node will be unable to allocate UDP relay server endpoints after it next - // updates its network map. There are no expected values for this key in - // [NodeCapMap]; the key only needs to be present in [NodeCapMap] to take - // effect. + // NodeAttrDisableRelayClient prevents the node from both allocating UDP + // relay server endpoints itself, and from using endpoints allocated by + // its peers. This attribute can be added to the node dynamically; if added + // while the node is already running, the node will be unable to allocate + // endpoints after it next updates its network map, and will be immediately + // unable to use new paths via a UDP relay server. Setting this attribute + // dynamically does not remove any existing paths, including paths that + // traverse a UDP relay server. There are no expected values for this key + // in [NodeCapMap]; the key only needs to be present in [NodeCapMap] to + // take effect. NodeAttrDisableRelayClient NodeCapability = "disable-relay-client" // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1978867fa..582e74c8b 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -355,7 +355,7 @@ type Conn struct { self tailcfg.NodeView // from last onNodeViewsUpdate peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in onNodeMutationsUpdate are never applied filt *filter.Filter // from last onFilterUpdate - relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers + relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers or receive CallMeMaybeVia messages from peers lastFlags debugFlags // at time of last onNodeViewsUpdate privateKey key.NodePrivate // WireGuard private key for this node everHadKey bool // whether we ever had a non-zero private key @@ -2149,6 +2149,14 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } + // If the "disable-relay-client" node attr is set for this node, it + // can't be a UDP relay client, so drop any CallMeMaybeVia messages it + // receives. + if isVia && !c.relayClientEnabled { + c.logf("magicsock: disco: ignoring %s from %v; disable-relay-client node attr is set", msgType, sender.ShortString()) + return + } + ep.mu.Lock() relayCapable := ep.relayCapable lastBest := ep.bestAddr From ae8641735df2844b4d5d0abcd25c074d297a013d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Jul 2025 15:17:51 -0700 Subject: [PATCH 1071/1708] cmd/tailscale/cli,ipn/ipnstate,wgengine/magicsock: label peer-relay (#16510) Updates tailscale/corp#30033 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/status.go | 4 +++- ipn/ipnstate/ipnstate.go | 10 +++++++--- wgengine/magicsock/endpoint.go | 9 +++++---- wgengine/magicsock/magicsock.go | 2 +- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index e4dccc247..85679a7de 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -183,10 +183,12 @@ func runStatus(ctx context.Context, args []string) error { } else if ps.ExitNodeOption { f("offers exit node; ") } - if relay != "" && ps.CurAddr == "" { + if relay != "" && ps.CurAddr == "" && ps.PeerRelay == "" { f("relay %q", relay) } else if ps.CurAddr != "" { f("direct %s", ps.CurAddr) + } else if ps.PeerRelay != "" { + f("peer-relay %s", ps.PeerRelay) } if !ps.Online { f("; offline") diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 89c6d7e24..fdfd4e334 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -251,9 +251,10 @@ type PeerStatus struct { PrimaryRoutes *views.Slice[netip.Prefix] `json:",omitempty"` // Endpoints: - Addrs []string - CurAddr string // one of Addrs, or unique if roaming - Relay string // DERP region + Addrs []string + CurAddr string // one of Addrs, or unique if roaming + Relay string // DERP region + PeerRelay string // peer relay address (ip:port:vni) RxBytes int64 TxBytes int64 @@ -451,6 +452,9 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { if v := st.Relay; v != "" { e.Relay = v } + if v := st.PeerRelay; v != "" { + e.PeerRelay = v + } if v := st.UserID; v != 0 { e.UserID = v } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 4780c7f37..bfafec5ab 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1961,10 +1961,11 @@ func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { ps.Active = now.Sub(de.lastSendExt) < sessionActiveTimeout if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.ap.IsValid() && !derpAddr.IsValid() { - // TODO(jwhited): if udpAddr.vni.isSet() we are using a Tailscale client - // as a UDP relay; update PeerStatus and its interpretation by - // "tailscale status" to make this clear. - ps.CurAddr = udpAddr.String() + if udpAddr.vni.isSet() { + ps.PeerRelay = udpAddr.String() + } else { + ps.CurAddr = udpAddr.String() + } } } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 582e74c8b..8743fe9cc 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3437,7 +3437,7 @@ func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { } } -// UpdateStatus implements the interface nede by ipnstate.StatusBuilder. +// UpdateStatus implements the interface needed by ipnstate.StatusBuilder. // // This method adds in the magicsock-specific information only. Most // of the status is otherwise populated by LocalBackend. From 6a0fad1e1044f567cb4d00608d1b1f00cef954c1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Jul 2025 20:02:00 -0700 Subject: [PATCH 1072/1708] wgengine/magicsock: don't peer relay if NodeAttrOnlyTCP443 is set (#16517) Updates tailscale/corp#30138 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8743fe9cc..18a6bbceb 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2704,6 +2704,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { relayClientEnabled := update.SelfNode.Valid() && !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && + !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) && envknob.UseWIPCode() c.mu.Lock() From fbc4c34cf7377f4ddb1d95163085e2b27c845018 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 10 Jul 2025 03:04:29 -0400 Subject: [PATCH 1073/1708] ipn/localapi: do not break client on event marshalling errors (#16503) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Errors were mashalled without the correct newlines. Also, they could generally be mashalled with more data, so an intermediate was introduced to make them slightly nicer to look at. Updates #15160 Signed-off-by: Claus Lensbøl --- ipn/localapi/localapi.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 60ed89b3b..cd59c54e0 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -919,6 +919,11 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { } } +// EventError provides the JSON encoding of internal errors from event processing. +type EventError struct { + Error string +} + // serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams // events to the client. func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { @@ -971,7 +976,16 @@ func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { } if msg, err := json.Marshal(data); err != nil { - fmt.Fprintf(w, `{"Event":"[ERROR] failed to marshal JSON for %T"}\n`, event.Event) + data.Event = EventError{Error: fmt.Sprintf( + "failed to marshal JSON for %T", event.Event, + )} + if errMsg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, + `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, + i, event.Event) + } else { + w.Write(errMsg) + } } else { w.Write(msg) } From cf0460b9da23f70fb8442baa0a1bca1df32ba2c1 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 10 Jul 2025 14:33:13 +0100 Subject: [PATCH 1074/1708] cmd/k8s-operator: allow letsencrypt staging on k8s proxies (#16521) This commit modifies the operator to detect the usage of k8s-apiserver type proxy groups that wish to use the letsencrypt staging directory and apply the appropriate environment variable to the statefulset it produces. Updates #13358 Signed-off-by: David Bond --- cmd/k8s-operator/sts.go | 23 +++++++++++++++-------- cmd/k8s-operator/sts_test.go | 5 +++++ 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index fbb271800..df12554e0 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -761,14 +761,21 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, enableEndpoints(ss, metricsEnabled, debugEnabled) } } - if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) { - for i, c := range ss.Spec.Template.Spec.Containers { - if isMainContainer(&c) { - ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ - Name: "TS_DEBUG_ACME_DIRECTORY_URL", - Value: letsEncryptStagingEndpoint, - }) - break + + if stsCfg != nil { + usesLetsEncrypt := stsCfg.proxyType == proxyTypeIngressResource || + stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress) || + stsCfg.proxyType == string(tsapi.ProxyGroupTypeKubernetesAPIServer) + + if pc.Spec.UseLetsEncryptStagingEnvironment && usesLetsEncrypt { + for i, c := range ss.Spec.Template.Spec.Containers { + if isMainContainer(&c) { + ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ + Name: "TS_DEBUG_ACME_DIRECTORY_URL", + Value: letsEncryptStagingEndpoint, + }) + break + } } } } diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 35c512c8c..afa791ccc 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -61,6 +61,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // Setup proxyClassAllOpts := &tsapi.ProxyClass{ Spec: tsapi.ProxyClassSpec{ + UseLetsEncryptStagingEnvironment: true, StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"foo.io/bar": "foo"}, @@ -292,6 +293,10 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } + + // 8. A Kubernetes API proxy with letsencrypt staging enabled + gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), &tailscaleSTSConfig{proxyType: string(tsapi.ProxyGroupTypeKubernetesAPIServer)}, zl.Sugar()) + verifyEnvVar(t, gotSS, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) } func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { From 2b665c370c50a85f65edf4b9cb15c41bc45a8008 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 10 Jul 2025 14:33:30 +0100 Subject: [PATCH 1075/1708] cmd/{k8s-operator,k8s-proxy}: allow setting login server url (#16504) This commit modifies the k8s proxy application configuration to include a new field named `ServerURL` which, when set, modifies the tailscale coordination server used by the proxy. This works in the same way as the operator and the proxies it deploys. If unset, the default coordination server is used. Updates https://github.com/tailscale/tailscale/issues/13358 Signed-off-by: David Bond --- cmd/k8s-operator/proxygroup.go | 5 +++++ cmd/k8s-proxy/k8s-proxy.go | 5 +++++ kube/k8s-proxy/conf/conf.go | 1 + 3 files changed, 11 insertions(+) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 3dfb004f1..7b8a0754e 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -815,6 +815,11 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p }, }, } + + if r.loginServer != "" { + cfg.ServerURL = &r.loginServer + } + cfgB, err := json.Marshal(cfg) if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 6e7eadb73..81a5a8483 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -91,6 +91,11 @@ func run(logger *zap.SugaredLogger) error { Store: st, AuthKey: authKey, } + + if cfg.Parsed.ServerURL != nil { + ts.ControlURL = *cfg.Parsed.ServerURL + } + if cfg.Parsed.Hostname != nil { ts.Hostname = *cfg.Parsed.Hostname } diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 6b0e853c5..2901e7b44 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -53,6 +53,7 @@ type ConfigV1Alpha1 struct { LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. } type KubeAPIServer struct { From d0cafc0a6776397d9a346dde60962c679062a21c Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 10 Jul 2025 15:53:01 +0100 Subject: [PATCH 1076/1708] cmd/{k8s-operator,k8s-proxy}: apply accept-routes configuration to k8s-proxy (#16522) This commit modifies the k8s-operator and k8s-proxy to support passing down the accept-routes configuration from the proxy class as a configuration value read and used by the k8s-proxy when ran as a distinct container managed by the operator. Updates #13358 Signed-off-by: David Bond --- cmd/k8s-operator/proxygroup.go | 4 ++++ cmd/k8s-proxy/k8s-proxy.go | 19 +++++++++++++++---- kube/k8s-proxy/conf/conf.go | 1 + 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 7b8a0754e..66b6c96e3 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -820,6 +820,10 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p cfg.ServerURL = &r.loginServer } + if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { + cfg.AcceptRoutes = &proxyClass.Spec.TailscaleConfig.AcceptRoutes + } + cfgB, err := json.Marshal(cfg) if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 81a5a8483..7dcf6c2ab 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -114,12 +114,13 @@ func run(logger *zap.SugaredLogger) error { group, groupCtx := errgroup.WithContext(ctx) + lc, err := ts.LocalClient() + if err != nil { + return fmt.Errorf("error getting local client: %w", err) + } + // Setup for updating state keys. if podUID != "" { - lc, err := ts.LocalClient() - if err != nil { - return fmt.Errorf("error getting local client: %w", err) - } w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap) if err != nil { return fmt.Errorf("error watching IPN bus: %w", err) @@ -135,6 +136,16 @@ func run(logger *zap.SugaredLogger) error { }) } + if cfg.Parsed.AcceptRoutes != nil { + _, err = lc.EditPrefs(groupCtx, &ipn.MaskedPrefs{ + RouteAllSet: true, + Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes}, + }) + if err != nil { + return fmt.Errorf("error editing prefs: %w", err) + } + } + // Setup for the API server proxy. restConfig, err := getRestConfig(logger) if err != nil { diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 2901e7b44..fba4a39a4 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -54,6 +54,7 @@ type ConfigV1Alpha1 struct { App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. + AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. } type KubeAPIServer struct { From f9bfd8118ae85b5782a29b442acb9ec3764caacb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Jul 2025 12:41:14 -0700 Subject: [PATCH 1077/1708] wgengine/magicsock: resolve epAddr collisions across peer relay conns (#16526) Updates tailscale/corp#30042 Updates tailscale/corp#29422 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 9 +++-- wgengine/magicsock/magicsock.go | 57 +++++++++++++++++++++++----- wgengine/magicsock/magicsock_test.go | 40 +++++++++++++++++++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index bfafec5ab..c4ca81296 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -499,8 +499,9 @@ func (de *endpoint) initFakeUDPAddr() { } // noteRecvActivity records receive activity on de, and invokes -// Conn.noteRecvActivity no more than once every 10s. -func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { +// Conn.noteRecvActivity no more than once every 10s, returning true if it +// was called, otherwise false. +func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) bool { if de.isWireguardOnly { de.mu.Lock() de.bestAddr.ap = src.ap @@ -524,10 +525,12 @@ func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { de.lastRecvWG.StoreAtomic(now) if de.c.noteRecvActivity == nil { - return + return false } de.c.noteRecvActivity(de.publicKey) + return true } + return false } func (de *endpoint) discoShort() string { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 18a6bbceb..6ce91902d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -27,6 +27,7 @@ import ( "time" "github.com/tailscale/wireguard-go/conn" + "github.com/tailscale/wireguard-go/device" "go4.org/mem" "golang.org/x/net/ipv6" @@ -1632,6 +1633,16 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu } } +// looksLikeInitiationMsg returns true if b looks like a WireGuard initiation +// message, otherwise it returns false. +func looksLikeInitiationMsg(b []byte) bool { + if len(b) == device.MessageInitiationSize && + binary.BigEndian.Uint32(b) == device.MessageInitiationType { + return true + } + return false +} + // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. // // size is the length of 'b' to report up to wireguard-go (only relevant if @@ -1717,10 +1728,18 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) - ep.noteRecvActivity(src, now) + connNoted := ep.noteRecvActivity(src, now) if stats := c.stats.Load(); stats != nil { stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) } + if src.vni.isSet() && (connNoted || looksLikeInitiationMsg(b)) { + // connNoted is periodic, but we also want to verify if the peer is who + // we believe for all initiation messages, otherwise we could get + // unlucky and fail to JIT configure the "correct" peer. + // TODO(jwhited): relax this to include direct connections + // See http://go/corp/29422 & http://go/corp/30042 + return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, true + } return ep, size, true } @@ -3787,11 +3806,19 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec // decrypts it. So we implement the [conn.InitiationAwareEndpoint] and // [conn.PeerAwareEndpoint] interfaces, to allow WireGuard to tell us who it is // later, just-in-time to configure the peer, and set the associated [epAddr] -// in the [peerMap]. Future receives on the associated [epAddr] will then be -// resolvable directly to an [*endpoint]. +// in the [peerMap]. Future receives on the associated [epAddr] will then +// resolve directly to an [*endpoint]. +// +// We also sometimes (see [Conn.receiveIP]) return a [*lazyEndpoint] to +// wireguard-go to verify an [epAddr] resolves to the [*endpoint] (maybeEP) we +// believe it to be, to resolve [epAddr] collisions across peers. [epAddr] +// collisions have a higher chance of occurrence for packets received over peer +// relays versus direct connections, as peer relay connections do not upsert +// into [peerMap] around disco packet reception, but direct connections do. type lazyEndpoint struct { - c *Conn - src epAddr + c *Conn + maybeEP *endpoint // or nil if unknown + src epAddr } var _ conn.InitiationAwareEndpoint = (*lazyEndpoint)(nil) @@ -3812,6 +3839,9 @@ var _ conn.Endpoint = (*lazyEndpoint)(nil) // wireguard-go peer (de)configuration. func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + if le.maybeEP != nil && pubKey.Compare(le.maybeEP.publicKey) == 0 { + return + } le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) @@ -3821,6 +3851,11 @@ func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(le.src, now) + // [ep.noteRecvActivity] may end up JIT configuring the peer, but we don't + // update [peerMap] as wireguard-go hasn't decrypted the initiation + // message yet. wireguard-go will call us below in [lazyEndpoint.FromPeer] + // if it successfully decrypts the message, at which point it's safe to + // insert le.src into the [peerMap] for ep. } func (le *lazyEndpoint) ClearSrc() {} @@ -3845,12 +3880,16 @@ func (le *lazyEndpoint) DstToBytes() []byte { } // FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in -// our [conn.ReceiveFunc]s when we are unable to identify the peer at WireGuard -// packet reception time, pre-decryption. If wireguard-go successfully decrypts -// the packet it calls us here, and we update our [peerMap] in order to -// associate le.src with peerPublicKey. +// [Conn.receiveIP] when we are unable to identify the peer at WireGuard +// packet reception time, pre-decryption, or we want wireguard-go to verify who +// we believe it to be (le.maybeEP). If wireguard-go successfully decrypts the +// packet it calls us here, and we update our [peerMap] to associate le.src with +// peerPublicKey. func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + if le.maybeEP != nil && pubKey.Compare(le.maybeEP.publicKey) == 0 { + return + } le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index aea2de17d..0515162c7 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3611,3 +3611,43 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }) } } + +func Test_looksLikeInitiationMsg(t *testing.T) { + initMsg := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) + initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) + tests := []struct { + name string + b []byte + want bool + }{ + { + name: "valid initiation", + b: initMsg, + want: true, + }, + { + name: "invalid message type field", + b: initMsgSizeTransportType, + want: false, + }, + { + name: "too small", + b: initMsg[:device.MessageInitiationSize-1], + want: false, + }, + { + name: "too big", + b: append(initMsg, 0), + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := looksLikeInitiationMsg(tt.b); got != tt.want { + t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + } + }) + } +} From bebc796e6c124e090d01c4651fe79cac771a0b30 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 10 Jul 2025 12:45:05 -0700 Subject: [PATCH 1078/1708] ipn/ipnlocal: add traffic-steering nodecap (#16529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To signal when a tailnet has the `traffic-steering` feature flag, Control will send a `traffic-steering` NodeCapability in netmap’s AllCaps. This patch adds `tailcfg.NodeAttrTrafficSteering` so that it can be used in the control plane. Future patches will implement the actual steering mechanisms. Updates tailscale/corp#29966 Signed-off-by: Simon Law --- tailcfg/tailcfg.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6c88217de..e55389f18 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2622,6 +2622,10 @@ const ( // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. NodeAttrMagicDNSPeerAAAA NodeCapability = "magicdns-aaaa" + + // NodeAttrTrafficSteering configures the node to use the traffic + // steering subsystem for via routes. See tailscale/corp#29966. + NodeAttrTrafficSteering NodeCapability = "traffic-steering" ) // SetDNSRequest is a request to add a DNS record. From fbc6a9ec5a797d9a551e74a90bc96947825b7719 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 10 Jul 2025 11:14:08 -0700 Subject: [PATCH 1079/1708] all: detect JetKVM and specialize a handful of things for it Updates #16524 Change-Id: I183428de8c65d7155d82979d2d33f031c22e3331 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 15 +++++++-------- logpolicy/logpolicy.go | 3 +++ net/dns/manager_linux.go | 5 +++++ net/tstun/tun.go | 20 +++++++++++++++++++- net/tstun/tun_linux.go | 10 +++++++++- paths/paths.go | 32 ++++++++++++++++++++++++++++++++ paths/paths_unix.go | 3 +++ util/linuxfw/detector.go | 5 +++++ util/linuxfw/iptables_runner.go | 5 +++-- version/distro/distro.go | 3 +++ 10 files changed, 89 insertions(+), 12 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 3987b0c26..ab1590132 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -257,14 +257,13 @@ func main() { // Only apply a default statepath when neither have been provided, so that a // user may specify only --statedir if they wish. if args.statepath == "" && args.statedir == "" { - if runtime.GOOS == "plan9" { - home, err := os.UserHomeDir() - if err != nil { - log.Fatalf("failed to get home directory: %v", err) - } - args.statedir = filepath.Join(home, "tailscale-state") - if err := os.MkdirAll(args.statedir, 0700); err != nil { - log.Fatalf("failed to create state directory: %v", err) + if paths.MakeAutomaticStateDir() { + d := paths.DefaultTailscaledStateDir() + if d != "" { + args.statedir = d + if err := os.MkdirAll(d, 0700); err != nil { + log.Fatalf("failed to create state directory: %v", err) + } } } else { args.statepath = paths.DefaultTailscaledStateFile() diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index b84528d7b..f5c475712 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -224,6 +224,9 @@ func LogsDir(logf logger.Logf) string { logf("logpolicy: using LocalAppData dir %v", dir) return dir case "linux": + if distro.Get() == distro.JetKVM { + return "/userdata/tailscale/var" + } // STATE_DIRECTORY is set by systemd 240+ but we support older // systems-d. For example, Ubuntu 18.04 (Bionic Beaver) is 237. systemdStateDir := os.Getenv("STATE_DIRECTORY") diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 6bd368f50..643cc280a 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -22,6 +22,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/cmpver" + "tailscale.com/version/distro" ) type kv struct { @@ -38,6 +39,10 @@ var publishOnce sync.Once // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { + if distro.Get() == distro.JetKVM { + return NewNoopManager() + } + env := newOSConfigEnv{ fs: directFS{}, dbusPing: dbusPing, diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 88679daa2..bfdaddf58 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -24,6 +24,9 @@ import ( // CrateTAP is the hook set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] +// modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". +var modprobeTunHook feature.Hook[func() error] + // New returns a tun.Device for the requested device name, along with // the OS-dependent name that was allocated to the device. func New(logf logger.Logf, tunName string) (tun.Device, string, error) { @@ -51,7 +54,22 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { if runtime.GOOS == "plan9" { cleanUpPlan9Interfaces() } - dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) + // Try to create the TUN device up to two times. If it fails + // the first time and we're on Linux, try a desperate + // "modprobe tun" to load the tun module and try again. + for try := range 2 { + dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) + if err == nil || !modprobeTunHook.IsSet() { + if try > 0 { + logf("created TUN device %q after doing `modprobe tun`", tunName) + } + break + } + if modprobeTunHook.Get()() != nil { + // modprobe failed; no point trying again. + break + } + } } if err != nil { return nil, "", err diff --git a/net/tstun/tun_linux.go b/net/tstun/tun_linux.go index 9600ceb77..05cf58c17 100644 --- a/net/tstun/tun_linux.go +++ b/net/tstun/tun_linux.go @@ -17,6 +17,14 @@ import ( func init() { tunDiagnoseFailure = diagnoseLinuxTUNFailure + modprobeTunHook.Set(func() error { + _, err := modprobeTun() + return err + }) +} + +func modprobeTun() ([]byte, error) { + return exec.Command("/sbin/modprobe", "tun").CombinedOutput() } func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) { @@ -36,7 +44,7 @@ func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) kernel := utsReleaseField(&un) logf("Linux kernel version: %s", kernel) - modprobeOut, err := exec.Command("/sbin/modprobe", "tun").CombinedOutput() + modprobeOut, err := modprobeTun() if err == nil { logf("'modprobe tun' successful") // Either tun is currently loaded, or it's statically diff --git a/paths/paths.go b/paths/paths.go index 28c3be02a..6c9c3fa6c 100644 --- a/paths/paths.go +++ b/paths/paths.go @@ -6,6 +6,7 @@ package paths import ( + "log" "os" "path/filepath" "runtime" @@ -70,6 +71,37 @@ func DefaultTailscaledStateFile() string { return "" } +// DefaultTailscaledStateDir returns the default state directory +// to use for tailscaled, for use when the user provided neither +// a state directory or state file path to use. +// +// It returns the empty string if there's no reasonable default. +func DefaultTailscaledStateDir() string { + if runtime.GOOS == "plan9" { + home, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get home directory: %v", err) + } + return filepath.Join(home, "tailscale-state") + } + return filepath.Dir(DefaultTailscaledStateFile()) +} + +// MakeAutomaticStateDir reports whether the platform +// automatically creates the state directory for tailscaled +// when it's absent. +func MakeAutomaticStateDir() bool { + switch runtime.GOOS { + case "plan9": + return true + case "linux": + if distro.Get() == distro.JetKVM { + return true + } + } + return false +} + // MkStateDir ensures that dirPath, the daemon's configuration directory // containing machine keys etc, both exists and has the correct permissions. // We want it to only be accessible to the user the daemon is running under. diff --git a/paths/paths_unix.go b/paths/paths_unix.go index 50a8b7ca5..d317921d5 100644 --- a/paths/paths_unix.go +++ b/paths/paths_unix.go @@ -21,6 +21,9 @@ func init() { } func statePath() string { + if runtime.GOOS == "linux" && distro.Get() == distro.JetKVM { + return "/userdata/tailscale/var/tailscaled.state" + } switch runtime.GOOS { case "linux", "illumos", "solaris": return "/var/lib/tailscale/tailscaled.state" diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index f3ee4aa0b..fffa523af 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -23,6 +23,11 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { hostinfo.SetFirewallMode("nft-gokrazy") return FirewallModeNfTables } + if distro.Get() == distro.JetKVM { + // JetKVM doesn't have iptables. + hostinfo.SetFirewallMode("nft-jetkvm") + return FirewallModeNfTables + } mode := envknob.String("TS_DEBUG_FIREWALL_MODE") // If the envknob isn't set, fall back to the pref suggested by c2n or diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 9a6fc0224..78844065a 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -688,8 +688,9 @@ func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error // IPTablesCleanUp removes all Tailscale added iptables rules. // Any errors that occur are logged to the provided logf. func IPTablesCleanUp(logf logger.Logf) { - if distro.Get() == distro.Gokrazy { - // Gokrazy uses nftables and doesn't have the "iptables" command. + switch distro.Get() { + case distro.Gokrazy, distro.JetKVM: + // These use nftables and don't have the "iptables" command. // Avoid log spam on cleanup. (#12277) return } diff --git a/version/distro/distro.go b/version/distro/distro.go index f7997e1d9..dd5e0b21b 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -31,6 +31,7 @@ const ( Unraid = Distro("unraid") Alpine = Distro("alpine") UBNT = Distro("ubnt") // Ubiquiti Networks + JetKVM = Distro("jetkvm") ) var distro lazy.SyncValue[Distro] @@ -102,6 +103,8 @@ func linuxDistro() Distro { return Unraid case have("/etc/alpine-release"): return Alpine + case haveDir("/userdata/jetkvm") && haveDir("/sys/kernel/config/usb_gadget/jetkvm"): + return JetKVM } return "" } From fed72e2aa9d55620655ab1790036523cbae9956f Mon Sep 17 00:00:00 2001 From: Dylan Bargatze Date: Thu, 10 Jul 2025 18:22:25 -0400 Subject: [PATCH 1080/1708] cmd/tailscale, ipn/ipnstate, wgengine/magicsock: update ping output for peer relay (#16515) Updates the output for "tailscale ping" to indicate if a peer relay was traversed, just like the output for DERP or direct connections. Fixes tailscale/corp#30034 Signed-off-by: Dylan Bargatze --- cmd/tailscale/cli/ping.go | 4 +++- ipn/ipnstate/ipnstate.go | 12 ++++++++++-- tailcfg/tailcfg.go | 8 ++++++-- wgengine/magicsock/magicsock.go | 9 +++++---- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/cmd/tailscale/cli/ping.go b/cmd/tailscale/cli/ping.go index 3a909f30d..d438cb228 100644 --- a/cmd/tailscale/cli/ping.go +++ b/cmd/tailscale/cli/ping.go @@ -152,7 +152,9 @@ func runPing(ctx context.Context, args []string) error { } latency := time.Duration(pr.LatencySeconds * float64(time.Second)).Round(time.Millisecond) via := pr.Endpoint - if pr.DERPRegionID != 0 { + if pr.PeerRelay != "" { + via = fmt.Sprintf("peer-relay(%s)", pr.PeerRelay) + } else if pr.DERPRegionID != 0 { via = fmt.Sprintf("DERP(%s)", pr.DERPRegionCode) } if via == "" { diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index fdfd4e334..e7ae2d62b 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -701,10 +701,17 @@ type PingResult struct { Err string LatencySeconds float64 - // Endpoint is the ip:port if direct UDP was used. - // It is not currently set for TSMP pings. + // Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It + // is not currently set for TSMP. Endpoint string + // PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer + // relay was used. It is not currently set for TSMP. Note that this field + // is not omitted during JSON encoding if it contains a zero value. This is + // done for consistency with the Endpoint field; this structure is exposed + // externally via localAPI, so we want to maintain the existing convention. + PeerRelay string + // DERPRegionID is non-zero DERP region ID if DERP was used. // It is not currently set for TSMP pings. DERPRegionID int @@ -739,6 +746,7 @@ func (pr *PingResult) ToPingResponse(pingType tailcfg.PingType) *tailcfg.PingRes Err: pr.Err, LatencySeconds: pr.LatencySeconds, Endpoint: pr.Endpoint, + PeerRelay: pr.PeerRelay, DERPRegionID: pr.DERPRegionID, DERPRegionCode: pr.DERPRegionCode, PeerAPIPort: pr.PeerAPIPort, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index e55389f18..ab8add5b8 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1854,10 +1854,14 @@ type PingResponse struct { // omitted, Err should contain information as to the cause. LatencySeconds float64 `json:",omitempty"` - // Endpoint is the ip:port if direct UDP was used. - // It is not currently set for TSMP pings. + // Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It + // is not currently set for TSMP. Endpoint string `json:",omitempty"` + // PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer + // relay was used. It is not currently set for TSMP. + PeerRelay string `json:",omitempty"` + // DERPRegionID is non-zero DERP region ID if DERP was used. // It is not currently set for TSMP pings. DERPRegionID int `json:",omitempty"` diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6ce91902d..b5087b02e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1106,10 +1106,11 @@ func (c *Conn) Ping(peer tailcfg.NodeView, res *ipnstate.PingResult, size int, c func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep epAddr) { res.LatencySeconds = latency.Seconds() if ep.ap.Addr() != tailcfg.DerpMagicIPAddr { - // TODO(jwhited): if ep.vni.isSet() we are using a Tailscale client - // as a UDP relay; update PingResult and its interpretation by - // "tailscale ping" to make this clear. - res.Endpoint = ep.String() + if ep.vni.isSet() { + res.PeerRelay = ep.String() + } else { + res.Endpoint = ep.String() + } return } regionID := int(ep.ap.Port()) From 5f678b9becfbd13b3a5ec57c48fc4bd78d8353db Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 17:41:55 -0500 Subject: [PATCH 1081/1708] docs/windows/policy: add ExitNode.AllowOverride as an option to ExitNodeID policy In this PR, we make ExitNode.AllowOverride configurable as part of the Exit Node ADMX policy setting, similarly to Always On w/ "Disconnect with reason" option. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 4 +++- docs/windows/policy/tailscale.admx | 12 ++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index c09d847bc..2e143d49c 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -23,6 +23,7 @@ Tailscale UI customization Settings + Allowed Allowed (with audit) Not Allowed Require using a specific Tailscale coordination server @@ -69,7 +70,7 @@ See https://tailscale.com/kb/1315/mdm-keys#set-an-auth-key for more details.]]>< Require using a specific Exit Node + User override:
                Registration mode: diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 0a8aa1a75..0da8aef42 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -115,6 +115,18 @@ + + + + + + + + + + + + From bd29a1c8c1000d620b26dcb31363c7b678463c2d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Jul 2025 18:52:01 -0700 Subject: [PATCH 1082/1708] feature/relayserver,wgengine/magicsock: remove WIP gating of peer relay (#16533) Updates tailscale/corp#30051 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 4 ---- wgengine/magicsock/magicsock.go | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index f4a533193..d0ad27624 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -14,7 +14,6 @@ import ( "sync" "time" - "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -133,9 +132,6 @@ func (e *extension) relayServerOrInit() (relayServer, error) { if e.hasNodeAttrDisableRelayServer { return nil, errors.New("disable-relay-server node attribute is present") } - if !envknob.UseWIPCode() { - return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") - } var err error e.server, err = udprelay.NewServer(e.logf, *e.port, nil) if err != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b5087b02e..14feed32b 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2724,8 +2724,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { relayClientEnabled := update.SelfNode.Valid() && !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && - !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) && - envknob.UseWIPCode() + !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) c.mu.Lock() relayClientChanged := c.relayClientEnabled != relayClientEnabled From c18ba4470b452112b83975f042705e950ef7d232 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 10 Jul 2025 22:15:55 -0700 Subject: [PATCH 1083/1708] ipn/ipnlocal: add traffic steering support to exit-node suggestions (#16527) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When `tailscale exit-node suggest` contacts the LocalAPI for a suggested exit node, the client consults its netmap for peers that contain the `suggest-exit-node` peercap. It currently uses a series of heuristics to determine the exit node to suggest. When the `traffic-steering` feature flag is enabled on its tailnet, the client will defer to Control’s priority scores for a particular peer. These scores, in `tailcfg.Hostinfo.Location.Priority`, were historically only used for Mullvad exit nodes, but they have now been extended to score any peer that could host a redundant resource. Client capability version 119 is the earliest client that understands these traffic steering scores. Control tells the client to switch to rely on these scores by adding `tailcfg.NodeAttrTrafficSteering` to its `AllCaps`. Updates tailscale/corp#29966 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 134 +++++++++++- ipn/ipnlocal/local_test.go | 417 +++++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 3 +- 3 files changed, 546 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 48eceb36c..4ed012f2e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7675,13 +7675,10 @@ func allowedAutoRoute(ipp netip.Prefix) bool { var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") -// suggestExitNodeLocked computes a suggestion based on the current netmap and last netcheck report. If -// there are multiple equally good options, one is selected at random, so the result is not stable. To be -// eligible for consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. -// -// Currently, peers with a DERP home are preferred over those without (typically this means Mullvad). -// Peers are selected based on having a DERP home that is the lowest latency to this device. For peers -// without a DERP home, we look for geographic proximity to this device's DERP home. +// suggestExitNodeLocked computes a suggestion based on the current netmap and +// other optional factors. If there are multiple equally good options, one may +// be selected at random, so the result is not stable. To be eligible for +// consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. // // b.mu.lock() must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { @@ -7743,7 +7740,32 @@ func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { return s } +// suggestExitNode returns a suggestion for reasonably good exit node based on +// the current netmap and the previous suggestion. func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + switch { + case nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering): + // The traffic-steering feature flag is enabled on this tailnet. + return suggestExitNodeUsingTrafficSteering(nb, prevSuggestion, allowList) + default: + return suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) + } +} + +// suggestExitNodeUsingDERP is the classic algorithm used to suggest exit nodes, +// before traffic steering was implemented. This handles the plain failover +// case, in addition to the optional Regional Routing. +// +// It computes a suggestion based on the current netmap and last netcheck +// report. If there are multiple equally good options, one is selected at +// random, so the result is not stable. To be eligible for consideration, the +// peer must have NodeAttrSuggestExitNode in its CapMap. +// +// Currently, peers with a DERP home are preferred over those without (typically +// this means Mullvad). Peers are selected based on having a DERP home that is +// the lowest latency to this device. For peers without a DERP home, we look for +// geographic proximity to this device's DERP home. +func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP @@ -7864,6 +7886,104 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta return res, nil } +var ErrNoNetMap = errors.New("no network map, try again later") + +// suggestExitNodeUsingTrafficSteering uses traffic steering priority scores to +// pick one of the best exit nodes. These priorities are provided by Control in +// the node’s [tailcfg.Location]. To be eligible for consideration, the node +// must have NodeAttrSuggestExitNode in its CapMap. +func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNodeID, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { + nm := nb.NetMap() + if nm == nil { + return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap + } + + if !nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering) { + panic("missing traffic-steering capability") + } + + peers := nm.Peers + nodes := make([]tailcfg.NodeView, 0, len(peers)) + + for _, p := range peers { + if !p.Valid() { + continue + } + if allowed != nil && !allowed.Contains(p.StableID()) { + continue + } + if !p.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) { + continue + } + if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { + continue + } + if p.StableID() == prev { + // Prevent flapping: since prev is a valid suggestion, + // force prev to be the only valid pick. + nodes = []tailcfg.NodeView{p} + break + } + nodes = append(nodes, p) + } + + var pick tailcfg.NodeView + + scores := make(map[tailcfg.NodeID]int, len(nodes)) + score := func(n tailcfg.NodeView) int { + id := n.ID() + s, ok := scores[id] + if !ok { + s = 0 // score of zero means incomparable + if hi := n.Hostinfo(); hi.Valid() { + if loc := hi.Location(); loc.Valid() { + s = loc.Priority() + } + } + scores[id] = s + } + return s + } + + if len(nodes) > 0 { + // Find the highest scoring exit nodes. + slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { + return cmp.Compare(score(b), score(a)) // reverse sort + }) + + // Find the top exit nodes, which all have the same score. + topI := len(nodes) + ts := score(nodes[0]) + for i, n := range nodes[1:] { + if score(n) < ts { + // n is the first node with a lower score. + // Make nodes[:topI] to slice the top exit nodes. + topI = i + 1 + break + } + } + + // TODO(sfllaw): add a temperature knob so that this client has + // a chance of picking the next best option. + randSeed := uint64(nm.SelfNode.ID()) + pick = nodes[rands.IntN(randSeed, topI)] + } + + if !pick.Valid() { + return apitype.ExitNodeSuggestionResponse{}, nil + } + res := apitype.ExitNodeSuggestionResponse{ + ID: pick.StableID(), + Name: pick.Name(), + } + if hi := pick.Hostinfo(); hi.Valid() { + if loc := hi.Location(); loc.Valid() { + res.Location = loc + } + } + return res, nil +} + // pickWeighted chooses the node with highest priority given a list of mullvad nodes. func pickWeighted(candidates []tailcfg.NodeView) []tailcfg.NodeView { maxWeight := 0 diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bb7f433c0..0b39c45c2 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4229,6 +4229,23 @@ func withLocation(loc tailcfg.LocationView) peerOptFunc { } } +func withLocationPriority(pri int) peerOptFunc { + return func(n *tailcfg.Node) { + var hi *tailcfg.Hostinfo + if n.Hostinfo.Valid() { + hi = n.Hostinfo.AsStruct() + } else { + hi = new(tailcfg.Hostinfo) + } + if hi.Location == nil { + hi.Location = new(tailcfg.Location) + } + hi.Location.Priority = pri + + n.Hostinfo = hi.View() + } +} + func withExitRoutes() peerOptFunc { return func(n *tailcfg.Node) { n.AllowedIPs = append(n.AllowedIPs, tsaddr.ExitRoutes()...) @@ -4895,6 +4912,406 @@ func TestSuggestExitNodeLongLatDistance(t *testing.T) { } } +func TestSuggestExitNodeTrafficSteering(t *testing.T) { + city := &tailcfg.Location{ + Country: "Canada", + CountryCode: "CA", + City: "Montreal", + CityCode: "MTR", + Latitude: 45.5053, + Longitude: -73.5525, + } + noLatLng := &tailcfg.Location{ + Country: "Canada", + CountryCode: "CA", + City: "Montreal", + CityCode: "MTR", + } + + selfNode := tailcfg.Node{ + ID: 0, // randomness is seeded off NetMap.SelfNode.ID + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.64.1.1/32"), + netip.MustParsePrefix("fe70::1/128"), + }, + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrTrafficSteering: []tailcfg.RawMessage{}, + }, + } + + for _, tt := range []struct { + name string + + netMap *netmap.NetworkMap + lastExit tailcfg.StableNodeID + allowPolicy []tailcfg.StableNodeID + + wantID tailcfg.StableNodeID + wantName string + wantLoc *tailcfg.Location + wantPri int + + wantErr error + }{ + { + name: "no-netmap", + netMap: nil, + wantErr: ErrNoNetMap, + }, + { + name: "no-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{}, + }, + wantID: "", + }, + { + name: "no-exit-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1), + }, + }, + wantID: "", + }, + { + name: "exit-node-without-suggestion", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes()), + }, + }, + wantID: "", + }, + { + name: "suggested-exit-node-without-routes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withSuggest()), + }, + }, + wantID: "", + }, + { + name: "suggested-exit-node", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable1", + wantName: "peer1", + }, + { + name: "many-suggested-exit-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable3", + wantName: "peer3", + }, + { + name: "suggested-exit-node-was-last-suggested", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + lastExit: "stable2", // overrides many-suggested-exit-nodes + wantID: "stable2", + wantName: "peer2", + }, + { + name: "suggested-exit-node-was-never-suggested", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + lastExit: "stable10", + wantID: "stable3", // matches many-suggested-exit-nodes + wantName: "peer3", + }, + { + name: "exit-nodes-with-and-without-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(2, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: 1, + }, + { + name: "exit-nodes-without-and-with-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + }, + }, + wantID: "stable2", + wantName: "peer2", + wantPri: 1, + }, + { + name: "exit-nodes-with-negative-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(-1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(-2)), + makePeer(3, + withExitRoutes(), + withSuggest(), + withLocationPriority(-3)), + makePeer(4, + withExitRoutes(), + withSuggest(), + withLocationPriority(-4)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: -1, + }, + { + name: "exit-nodes-no-priority-beats-negative-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(-1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(-2)), + makePeer(3, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable3", + wantName: "peer3", + }, + { + name: "exit-nodes-same-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(3, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(4, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(5, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(6, + withExitRoutes(), + withSuggest()), + makePeer(7, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + }, + }, + wantID: "stable5", + wantName: "peer5", + wantPri: 2, + }, + { + name: "suggested-exit-node-with-city", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(city.View())), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: city, + }, + { + name: "suggested-exit-node-with-city-and-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(city.View()), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: city, + wantPri: 1, + }, + { + name: "suggested-exit-node-without-latlng", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(noLatLng.View())), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: noLatLng, + }, + { + name: "suggested-exit-node-without-latlng-with-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(noLatLng.View()), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: noLatLng, + wantPri: 1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + var allowList set.Set[tailcfg.StableNodeID] + if tt.allowPolicy != nil { + allowList = set.SetOf(tt.allowPolicy) + } + + // HACK: NetMap.AllCaps is populated by Control: + if tt.netMap != nil { + caps := maps.Keys(tt.netMap.SelfNode.CapMap().AsMap()) + tt.netMap.AllCaps = set.SetOf(slices.Collect(caps)) + } + + nb := newNodeBackend(t.Context(), eventbus.New()) + defer nb.shutdown(errShutdown) + nb.SetNetMap(tt.netMap) + + got, err := suggestExitNodeUsingTrafficSteering(nb, tt.lastExit, allowList) + if tt.wantErr == nil && err != nil { + t.Fatalf("err=%v, want nil", err) + } + if tt.wantErr != nil && !errors.Is(err, tt.wantErr) { + t.Fatalf("err=%v, want %v", err, tt.wantErr) + } + + if got.Name != tt.wantName { + t.Errorf("name=%q, want %q", got.Name, tt.wantName) + } + + if got.ID != tt.wantID { + t.Errorf("ID=%q, want %q", got.ID, tt.wantID) + } + + wantLoc := tt.wantLoc + if tt.wantPri != 0 { + if wantLoc == nil { + wantLoc = new(tailcfg.Location) + } + wantLoc.Priority = tt.wantPri + } + if diff := cmp.Diff(got.Location.AsStruct(), wantLoc); diff != "" { + t.Errorf("location mismatch (+want -got)\n%s", diff) + } + }) + } +} + func TestMinLatencyDERPregion(t *testing.T) { tests := []struct { name string diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index ab8add5b8..53c4683c1 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -163,7 +163,8 @@ type CapabilityVersion int // - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. // - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) -const CurrentCapabilityVersion CapabilityVersion = 118 +// - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. +const CurrentCapabilityVersion CapabilityVersion = 119 // ID is an integer ID for a user, node, or login allocated by the // control plane. From 04e8d21b0bcaab54f1906fb6a0ebc507ed7114ea Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Jul 2025 22:21:08 -0700 Subject: [PATCH 1084/1708] go.mod: bump wg-go to fix keepalive detection (#16535) Updates tailscale/corp#30364 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e89a383a6..f040d7799 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 + github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 062af6662..ea17b1182 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 h1:Yjg/+1VVRcdY3DL9fs8g+QnZ1aizotU0pp0VSOSCuTQ= -github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 h1:kSzi/ugdekAxhcVdCxH6er7OjoNc2oDRcimWJDvnRFM= +github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= From 30da2e1c3206b7e45b42fd3fddfe1d9081c6982d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 11 Jul 2025 08:51:02 -0700 Subject: [PATCH 1085/1708] cmd/tailscale/cli: add "configure jetkvm" subcommand To write the init script. And fix the JetKVM detection to work during early boot while the filesystem and modules are still being loaded; it wasn't being detected on early boot and then tailscaled was failing to start because it didn't know it was on JetKVM and didn't modprobe tun. Updates #16524 Change-Id: I0524ca3abd7ace68a69af96aab4175d32c07e116 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/configure-jetkvm.go | 81 +++++++++++++++++++++++++++ cmd/tailscale/cli/configure.go | 3 + version/distro/distro.go | 11 +++- 3 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 cmd/tailscale/cli/configure-jetkvm.go diff --git a/cmd/tailscale/cli/configure-jetkvm.go b/cmd/tailscale/cli/configure-jetkvm.go new file mode 100644 index 000000000..a8e0a7cb5 --- /dev/null +++ b/cmd/tailscale/cli/configure-jetkvm.go @@ -0,0 +1,81 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && arm + +package cli + +import ( + "bytes" + "context" + "errors" + "flag" + "os" + "runtime" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/version/distro" +) + +func init() { + maybeJetKVMConfigureCmd = jetKVMConfigureCmd +} + +func jetKVMConfigureCmd() *ffcli.Command { + if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { + return nil + } + return &ffcli.Command{ + Name: "jetkvm", + Exec: runConfigureJetKVM, + ShortUsage: "tailscale configure jetkvm", + ShortHelp: "Configure JetKVM to run tailscaled at boot", + LongHelp: strings.TrimSpace(` +This command configures the JetKVM host to run tailscaled at boot. +`), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("jetkvm") + return fs + })(), + } +} + +func runConfigureJetKVM(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unknown arguments") + } + if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { + return errors.New("only implemented on JetKVM") + } + err := os.WriteFile("/etc/init.d/S22tailscale", bytes.TrimLeft([]byte(` +#!/bin/sh +# /etc/init.d/S22tailscale +# Start/stop tailscaled + +case "$1" in + start) + /userdata/tailscale/tailscaled > /dev/null 2>&1 & + ;; + stop) + killall tailscaled + ;; + *) + echo "Usage: $0 {start|stop}" + exit 1 + ;; +esac +`), "\n"), 0755) + if err != nil { + return err + } + + if err := os.Symlink("/userdata/tailscale/tailscale", "/bin/tailscale"); err != nil { + if !os.IsExist(err) { + return err + } + } + + printf("Done. Now restart your JetKVM.\n") + return nil +} diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index acb416755..da6278ce2 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -10,6 +10,8 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" ) +var maybeJetKVMConfigureCmd func() *ffcli.Command // non-nil only on Linux/arm for JetKVM + func configureCmd() *ffcli.Command { return &ffcli.Command{ Name: "configure", @@ -29,6 +31,7 @@ services on the host to use Tailscale in more ways. synologyConfigureCertCmd(), ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), + ccall(maybeJetKVMConfigureCmd), ), } } diff --git a/version/distro/distro.go b/version/distro/distro.go index dd5e0b21b..0e88bdd2f 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -9,6 +9,7 @@ import ( "os" "runtime" "strconv" + "strings" "tailscale.com/types/lazy" "tailscale.com/util/lineiter" @@ -103,12 +104,20 @@ func linuxDistro() Distro { return Unraid case have("/etc/alpine-release"): return Alpine - case haveDir("/userdata/jetkvm") && haveDir("/sys/kernel/config/usb_gadget/jetkvm"): + case runtime.GOARCH == "arm" && isDeviceModel("JetKVM"): return JetKVM } return "" } +func isDeviceModel(want string) bool { + if runtime.GOOS != "linux" { + return false + } + v, _ := os.ReadFile("/sys/firmware/devicetree/base/model") + return want == strings.Trim(string(v), "\x00\r\n\t ") +} + func freebsdDistro() Distro { switch { case have("/etc/pfSense-rc"): From 39bf84d1c70d1b31384acbf37dd9f8d36db47404 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 11 Jul 2025 16:01:15 -0700 Subject: [PATCH 1086/1708] cmd/tsidp: set hostinfo.App in tsnet mode (#16544) This makes it easier to track how widely tsidp is used in practice. Updates #cleanup Signed-off-by: Andrew Lytvynov --- cmd/tsidp/tsidp.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 43020eaf7..6a0c2d89e 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -39,6 +39,7 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -121,6 +122,7 @@ func main() { } defer cleanup() } else { + hostinfo.SetApp("tsidp") ts := &tsnet.Server{ Hostname: *flagHostname, Dir: *flagDir, From 24062e33d13a4859b7d08f2bcfc518827517784e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 11 Jul 2025 17:12:23 -0700 Subject: [PATCH 1087/1708] net/udprelay: fix peer relay server deadlock (#16542) Fixes tailscale/corp#30381 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 979ccf717..e2652ae99 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -488,14 +488,17 @@ func (s *Server) listenOn(port int) error { // Close closes the server. func (s *Server) Close() error { s.closeOnce.Do(func() { - s.mu.Lock() - defer s.mu.Unlock() s.uc4.Close() if s.uc6 != nil { s.uc6.Close() } close(s.closeCh) s.wg.Wait() + // s.mu must not be held while s.wg.Wait'ing, otherwise we can + // deadlock. The goroutines we are waiting on to return can also + // acquire s.mu. + s.mu.Lock() + defer s.mu.Unlock() clear(s.byVNI) clear(s.byDisco) s.vniPool = nil @@ -564,6 +567,12 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSo func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { defer func() { + // We intentionally close the [Server] if we encounter a socket read + // error below, at least until socket "re-binding" is implemented as + // part of http://go/corp/30118. + // + // Decrementing this [sync.WaitGroup] _before_ calling [Server.Close] is + // intentional as [Server.Close] waits on it. s.wg.Done() s.Close() }() From f23e4279c42aec766eb6a89562c1fed3a1b97e09 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Sun, 13 Jul 2025 05:47:56 -0700 Subject: [PATCH 1088/1708] types/lazy: add lazy.GMap: a map of lazily computed GValues (#16532) Fixes tailscale/corp#30360 Signed-off-by: Simon Law --- cmd/stund/depaware.txt | 2 +- types/lazy/map.go | 62 +++++++++++++++++++++++++++ types/lazy/map_test.go | 95 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+), 1 deletion(-) create mode 100644 types/lazy/map.go create mode 100644 types/lazy/map_test.go diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index da7680394..81544b750 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -76,7 +76,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/tailcfg tailscale.com/util/lineiter from tailscale.com/version/distro - tailscale.com/util/mak from tailscale.com/syncs + tailscale.com/util/mak from tailscale.com/syncs+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/slicesx from tailscale.com/tailcfg diff --git a/types/lazy/map.go b/types/lazy/map.go new file mode 100644 index 000000000..75a1dd739 --- /dev/null +++ b/types/lazy/map.go @@ -0,0 +1,62 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import "tailscale.com/util/mak" + +// GMap is a map of lazily computed [GValue] pointers, keyed by a comparable +// type. +// +// Use either Get or GetErr, depending on whether your fill function returns an +// error. +// +// GMap is not safe for concurrent use. +type GMap[K comparable, V any] struct { + store map[K]*GValue[V] +} + +// Len returns the number of entries in the map. +func (s *GMap[K, V]) Len() int { + return len(s.store) +} + +// Set attempts to set the value of k to v, and reports whether it succeeded. +// Set only succeeds if k has never been called with Get/GetErr/Set before. +func (s *GMap[K, V]) Set(k K, v V) bool { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.Set(v) +} + +// MustSet sets the value of k to v, or panics if k already has a value. +func (s *GMap[K, V]) MustSet(k K, v V) { + if !s.Set(k, v) { + panic("Set after already filled") + } +} + +// Get returns the value for k, computing it with fill if it's not already +// present. +func (s *GMap[K, V]) Get(k K, fill func() V) V { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.Get(fill) +} + +// GetErr returns the value for k, computing it with fill if it's not already +// present. +func (s *GMap[K, V]) GetErr(k K, fill func() (V, error)) (V, error) { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.GetErr(fill) +} diff --git a/types/lazy/map_test.go b/types/lazy/map_test.go new file mode 100644 index 000000000..ec1152b0b --- /dev/null +++ b/types/lazy/map_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "errors" + "testing" +) + +func TestGMap(t *testing.T) { + var gm GMap[string, int] + n := int(testing.AllocsPerRun(1000, func() { + got := gm.Get("42", fortyTwo) + if got != 42 { + t.Fatalf("got %v; want 42", got) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapErr(t *testing.T) { + var gm GMap[string, int] + n := int(testing.AllocsPerRun(1000, func() { + got, err := gm.GetErr("42", func() (int, error) { + return 42, nil + }) + if got != 42 || err != nil { + t.Fatalf("got %v, %v; want 42, nil", got, err) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } + + var gmErr GMap[string, int] + wantErr := errors.New("test error") + n = int(testing.AllocsPerRun(1000, func() { + got, err := gmErr.GetErr("42", func() (int, error) { + return 0, wantErr + }) + if got != 0 || err != wantErr { + t.Fatalf("got %v, %v; want 0, %v", got, err, wantErr) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapSet(t *testing.T) { + var gm GMap[string, int] + if !gm.Set("42", 42) { + t.Fatalf("Set failed") + } + if gm.Set("42", 43) { + t.Fatalf("Set succeeded after first Set") + } + n := int(testing.AllocsPerRun(1000, func() { + got := gm.Get("42", fortyTwo) + if got != 42 { + t.Fatalf("got %v; want 42", got) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapMustSet(t *testing.T) { + var gm GMap[string, int] + gm.MustSet("42", 42) + defer func() { + if e := recover(); e == nil { + t.Errorf("unexpected success; want panic") + } + }() + gm.MustSet("42", 43) +} + +func TestGMapRecursivePanic(t *testing.T) { + defer func() { + if e := recover(); e != nil { + t.Logf("got panic, as expected") + } else { + t.Errorf("unexpected success; want panic") + } + }() + gm := GMap[string, int]{} + gm.Get("42", func() int { + return gm.Get("42", func() int { return 42 }) + }) +} From bcaea4f24597d840d8a0fd94cafbb2dc0ff7a774 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 14 Jul 2025 15:17:20 +0100 Subject: [PATCH 1089/1708] k8s-operator,sessionrecording: fixing race condition between resize (#16454) messages and cast headers when recording `kubectl attach` sessions Updates #16490 Signed-off-by: chaosinthecrd --- k8s-operator/api-proxy/proxy.go | 51 ++++--- k8s-operator/sessionrecording/fakes/fakes.go | 12 +- k8s-operator/sessionrecording/hijacker.go | 63 +++++--- .../sessionrecording/hijacker_test.go | 2 +- k8s-operator/sessionrecording/spdy/conn.go | 98 +++++++----- .../sessionrecording/spdy/conn_test.go | 98 ++++++------ .../sessionrecording/tsrecorder/tsrecorder.go | 1 + k8s-operator/sessionrecording/ws/conn.go | 115 ++++++++------ k8s-operator/sessionrecording/ws/conn_test.go | 144 ++++++++++-------- sessionrecording/header.go | 10 +- 10 files changed, 351 insertions(+), 243 deletions(-) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index c3c13e784..d33c088de 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/k8s-operator/sessionrecording" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -49,6 +50,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn if !authMode { restConfig = rest.AnonymousClientConfig(restConfig) } + cfg, err := restConfig.TransportConfig() if err != nil { return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) @@ -111,6 +113,8 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecWS) + mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachSPDY) + mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) ap.hs = &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is @@ -165,19 +169,31 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -// serveExecSPDY serves 'kubectl exec' requests for sessions streamed over SPDY, +// serveExecSPDY serves '/exec' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { - ap.execForProto(w, r, ksr.SPDYProtocol) + ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.SPDYProtocol) } -// serveExecWS serves 'kubectl exec' requests for sessions streamed over WebSocket, +// serveExecWS serves '/exec' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { - ap.execForProto(w, r, ksr.WSProtocol) + ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.WSProtocol) +} + +// serveExecSPDY serves '/attach' requests for sessions streamed over SPDY, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *APIServerProxy) serveAttachSPDY(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.SPDYProtocol) +} + +// serveExecWS serves '/attach' requests for sessions streamed over WebSocket, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) } -func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { +func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType sessionrecording.SessionType, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -192,7 +208,7 @@ func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, p counterNumRequestsProxied.Add(1) failOpen, addrs, err := determineRecorderConfig(who) if err != nil { - ap.log.Errorf("error trying to determine whether the 'kubectl exec' session needs to be recorded: %v", err) + ap.log.Errorf("error trying to determine whether the 'kubectl %s' session needs to be recorded: %v", sessionType, err) return } if failOpen && len(addrs) == 0 { // will not record @@ -201,7 +217,7 @@ func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, p } ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded if !failOpen && len(addrs) == 0 { - msg := "forbidden: 'kubectl exec' session must be recorded, but no recorders are available." + msg := fmt.Sprintf("forbidden: 'kubectl %s' session must be recorded, but no recorders are available.", sessionType) ap.log.Error(msg) http.Error(w, msg, http.StatusForbidden) return @@ -223,16 +239,17 @@ func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, p } opts := ksr.HijackerOpts{ - Req: r, - W: w, - Proto: proto, - TS: ap.ts, - Who: who, - Addrs: addrs, - FailOpen: failOpen, - Pod: r.PathValue(podNameKey), - Namespace: r.PathValue(namespaceNameKey), - Log: ap.log, + Req: r, + W: w, + Proto: proto, + SessionType: sessionType, + TS: ap.ts, + Who: who, + Addrs: addrs, + FailOpen: failOpen, + Pod: r.PathValue(podNameKey), + Namespace: r.PathValue(namespaceNameKey), + Log: ap.log, } h := ksr.New(opts) diff --git a/k8s-operator/sessionrecording/fakes/fakes.go b/k8s-operator/sessionrecording/fakes/fakes.go index 9eb1047e4..94853df19 100644 --- a/k8s-operator/sessionrecording/fakes/fakes.go +++ b/k8s-operator/sessionrecording/fakes/fakes.go @@ -10,13 +10,13 @@ package fakes import ( "bytes" "encoding/json" + "fmt" + "math/rand" "net" "sync" "testing" "time" - "math/rand" - "tailscale.com/sessionrecording" "tailscale.com/tstime" ) @@ -107,7 +107,13 @@ func CastLine(t *testing.T, p []byte, clock tstime.Clock) []byte { return append(j, '\n') } -func AsciinemaResizeMsg(t *testing.T, width, height int) []byte { +func AsciinemaCastResizeMsg(t *testing.T, width, height int) []byte { + msg := fmt.Sprintf(`[0,"r","%dx%d"]`, height, width) + + return append([]byte(msg), '\n') +} + +func AsciinemaCastHeaderMsg(t *testing.T, width, height int) []byte { t.Helper() ch := sessionrecording.CastHeader{ Width: width, diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index a9ed65896..e8c534afc 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -4,7 +4,7 @@ //go:build !plan9 // Package sessionrecording contains functionality for recording Kubernetes API -// server proxy 'kubectl exec' sessions. +// server proxy 'kubectl exec/attach' sessions. package sessionrecording import ( @@ -35,14 +35,20 @@ import ( ) const ( - SPDYProtocol Protocol = "SPDY" - WSProtocol Protocol = "WebSocket" + SPDYProtocol Protocol = "SPDY" + WSProtocol Protocol = "WebSocket" + ExecSessionType SessionType = "exec" + AttachSessionType SessionType = "attach" ) // Protocol is the streaming protocol of the hijacked session. Supported // protocols are SPDY and WebSocket. type Protocol string +// SessionType is the type of session initiated with `kubectl` +// (`exec` or `attach`) +type SessionType string + var ( // CounterSessionRecordingsAttempted counts the number of session recording attempts. CounterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_attempted") @@ -63,25 +69,27 @@ func New(opts HijackerOpts) *Hijacker { failOpen: opts.FailOpen, proto: opts.Proto, log: opts.Log, + sessionType: opts.SessionType, connectToRecorder: sessionrecording.ConnectToRecorder, } } type HijackerOpts struct { - TS *tsnet.Server - Req *http.Request - W http.ResponseWriter - Who *apitype.WhoIsResponse - Addrs []netip.AddrPort - Log *zap.SugaredLogger - Pod string - Namespace string - FailOpen bool - Proto Protocol + TS *tsnet.Server + Req *http.Request + W http.ResponseWriter + Who *apitype.WhoIsResponse + Addrs []netip.AddrPort + Log *zap.SugaredLogger + Pod string + Namespace string + FailOpen bool + Proto Protocol + SessionType SessionType } // Hijacker implements [net/http.Hijacker] interface. -// It must be configured with an http request for a 'kubectl exec' session that +// It must be configured with an http request for a 'kubectl exec/attach' session that // needs to be recorded. It knows how to hijack the connection and configure for // the session contents to be sent to a tsrecorder instance. type Hijacker struct { @@ -90,12 +98,13 @@ type Hijacker struct { req *http.Request who *apitype.WhoIsResponse log *zap.SugaredLogger - pod string // pod being exec-d - ns string // namespace of the pod being exec-d + pod string // pod being exec/attach-d + ns string // namespace of the pod being exec/attach-d addrs []netip.AddrPort // tsrecorder addresses failOpen bool // whether to fail open if recording fails connectToRecorder RecorderDialFn - proto Protocol // streaming protocol + proto Protocol // streaming protocol + sessionType SessionType // subcommand, e.g., "exec, attach" } // RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder @@ -105,7 +114,7 @@ type Hijacker struct { // after having been established, an error is sent down the channel. type RecorderDialFn func(context.Context, []netip.AddrPort, netx.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) -// Hijack hijacks a 'kubectl exec' session and configures for the session +// Hijack hijacks a 'kubectl exec/attach' session and configures for the session // contents to be sent to a recorder. func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen) @@ -114,7 +123,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, fmt.Errorf("error hijacking connection: %w", err) } - conn, err := h.setUpRecording(context.Background(), reqConn) + conn, err := h.setUpRecording(h.req.Context(), reqConn) if err != nil { return nil, nil, fmt.Errorf("error setting up session recording: %w", err) } @@ -138,7 +147,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, err error errChan <-chan error ) - h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen) + h.log.Infof("kubectl %s session will be recorded, recorders: %v, fail open policy: %t", h.sessionType, h.addrs, h.failOpen) qp := h.req.URL.Query() container := strings.Join(qp[containerKey], "") var recorderAddr net.Addr @@ -161,7 +170,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } return nil, errors.New(msg) } else { - h.log.Infof("exec session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", container, h.pod, h.ns, recorderAddr) + h.log.Infof("%s session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", h.sessionType, container, h.pod, h.ns, recorderAddr) } cl := tstime.DefaultClock{} @@ -190,9 +199,15 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, var lc net.Conn switch h.proto { case SPDYProtocol: - lc = spdy.New(conn, rec, ch, hasTerm, h.log) + lc, err = spdy.New(ctx, conn, rec, ch, hasTerm, h.log) + if err != nil { + return nil, fmt.Errorf("failed to initialize spdy connection: %w", err) + } case WSProtocol: - lc = ws.New(conn, rec, ch, hasTerm, h.log) + lc, err = ws.New(ctx, conn, rec, ch, hasTerm, h.log) + if err != nil { + return nil, fmt.Errorf("failed to initialize websocket connection: %w", err) + } default: return nil, fmt.Errorf("unknown protocol: %s", h.proto) } @@ -209,7 +224,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, h.log.Info("finished uploading the recording") return } - msg := fmt.Sprintf("connection to the session recorder errorred: %v;", err) + msg := fmt.Sprintf("connection to the session recorder errored: %v;", err) if h.failOpen { msg += msg + "; failure mode is 'fail open'; continuing session without recording." h.log.Info(msg) diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index 880015b22..cac6f55c7 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -91,7 +91,7 @@ func Test_Hijacker(t *testing.T) { who: &apitype.WhoIsResponse{Node: &tailcfg.Node{}, UserProfile: &tailcfg.UserProfile{}}, log: zl.Sugar(), ts: &tsnet.Server{}, - req: &http.Request{URL: &url.URL{}}, + req: &http.Request{URL: &url.URL{RawQuery: "tty=true"}}, proto: tt.proto, } ctx := context.Background() diff --git a/k8s-operator/sessionrecording/spdy/conn.go b/k8s-operator/sessionrecording/spdy/conn.go index 455c2225a..9fefca11f 100644 --- a/k8s-operator/sessionrecording/spdy/conn.go +++ b/k8s-operator/sessionrecording/spdy/conn.go @@ -4,11 +4,12 @@ //go:build !plan9 // Package spdy contains functionality for parsing SPDY streaming sessions. This -// is used for 'kubectl exec' session recording. +// is used for 'kubectl exec/attach' session recording. package spdy import ( "bytes" + "context" "encoding/binary" "encoding/json" "fmt" @@ -24,29 +25,50 @@ import ( ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. -// The connection must be a hijacked connection for a 'kubectl exec' session using SPDY. +// The connection must be a hijacked connection for a 'kubectl exec/attach' session using SPDY. // The hijacked connection is used to transmit SPDY streams between Kubernetes client ('kubectl') and the destination container. // Data read from the underlying network connection is data sent via one of the SPDY streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. // We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#background-remotecommand-subprotocol -func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { - return &conn{ - Conn: nc, - rec: rec, - ch: ch, - log: log, - hasTerm: hasTerm, - initialTermSizeSet: make(chan struct{}), +func New(ctx context.Context, nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) (net.Conn, error) { + lc := &conn{ + Conn: nc, + ctx: ctx, + rec: rec, + ch: ch, + log: log, + hasTerm: hasTerm, + initialCastHeaderSent: make(chan struct{}, 1), } + + // if there is no term, we don't need to wait for a resize message + if !hasTerm { + var err error + lc.writeCastHeaderOnce.Do(func() { + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = lc.rec.WriteCastHeader(ch) + close(lc.initialCastHeaderSent) + }) + if err != nil { + return nil, fmt.Errorf("error writing CastHeader: %w", err) + } + } + + return lc, nil } // conn is a wrapper around net.Conn. It reads the bytestream for a 'kubectl -// exec' session streamed using SPDY protocol, sends session recording data to +// exec/attach' session streamed using SPDY protocol, sends session recording data to // the configured recorder and forwards the raw bytes to the original // destination. type conn struct { net.Conn + ctx context.Context // rec knows how to send data written to it to a tsrecorder instance. rec *tsrecorder.Client @@ -63,7 +85,7 @@ type conn struct { // CastHeader must be sent before any payload. If the session has a // terminal attached, the CastHeader must have '.Width' and '.Height' // fields set for the tsrecorder UI to be able to play the recording. - // For 'kubectl exec' sessions, terminal width and height are sent as a + // For 'kubectl exec/attach' sessions, terminal width and height are sent as a // resize message on resize stream from the client when the session // starts as well as at any time the client detects a terminal change. // We can intercept the resize message on Read calls. As there is no @@ -79,15 +101,10 @@ type conn struct { // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once hasTerm bool // whether the session had TTY attached - // initialTermSizeSet channel gets sent a value once, when the Read has - // received a resize message and set the initial terminal size. It must - // be set to a buffered channel to prevent Reads being blocked on the - // first stdout/stderr write reading from the channel. - initialTermSizeSet chan struct{} - // sendInitialTermSizeSetOnce is used to ensure that a value is sent to - // initialTermSizeSet channel only once, when the initial resize message - // is received. - sendinitialTermSizeSetOnce sync.Once + // initialCastHeaderSent is a channel to ensure that the cast + // header is the first thing that is streamed to the session recorder. + // Otherwise the stream will fail. + initialCastHeaderSent chan struct{} zlibReqReader zlibReader // writeBuf is used to store data written to the connection that has not @@ -124,7 +141,7 @@ func (c *conn) Read(b []byte) (int, error) { } c.readBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame - if !sf.Ctrl { // data frame + if !sf.Ctrl && c.hasTerm { // data frame switch sf.StreamID { case c.resizeStreamID.Load(): @@ -140,10 +157,19 @@ func (c *conn) Read(b []byte) (int, error) { // subsequent resize message, we need to send asciinema // resize message. var isInitialResize bool - c.sendinitialTermSizeSetOnce.Do(func() { + c.writeCastHeaderOnce.Do(func() { isInitialResize = true - close(c.initialTermSizeSet) // unblock sending of CastHeader + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } if !isInitialResize { if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { return 0, fmt.Errorf("error writing resize message: %w", err) @@ -190,24 +216,14 @@ func (c *conn) Write(b []byte) (int, error) { if !sf.Ctrl { switch sf.StreamID { case c.stdoutStreamID.Load(), c.stderrStreamID.Load(): - var err error - c.writeCastHeaderOnce.Do(func() { - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - if c.hasTerm { - c.log.Debugf("write: waiting for the initial terminal size to be set before proceeding with sending the first payload") - <-c.initialTermSizeSet + // we must wait for confirmation that the initial cast header was sent before proceeding with any more writes + select { + case <-c.ctx.Done(): + return 0, c.ctx.Err() + case <-c.initialCastHeaderSent: + if err := c.rec.WriteOutput(sf.Payload); err != nil { + return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } - err = c.rec.WriteCastHeader(c.ch) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } - if err := c.rec.WriteOutput(sf.Payload); err != nil { - return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } } } diff --git a/k8s-operator/sessionrecording/spdy/conn_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go index 3485d61c4..3c1cb8427 100644 --- a/k8s-operator/sessionrecording/spdy/conn_test.go +++ b/k8s-operator/sessionrecording/spdy/conn_test.go @@ -6,10 +6,12 @@ package spdy import ( + "context" "encoding/json" "fmt" "reflect" "testing" + "time" "go.uber.org/zap" "tailscale.com/k8s-operator/sessionrecording/fakes" @@ -29,15 +31,11 @@ func Test_Writes(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int - sendInitialResize bool - hasTerm bool + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + hasTerm bool }{ { name: "single_write_control_frame_with_payload", @@ -78,24 +76,17 @@ func Test_Writes(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", - inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, - wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - width: 10, - height: 20, - hasTerm: true, - firstWrite: true, - sendInitialResize: true, + name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + hasTerm: true, }, { name: "single_first_write_stdout_data_frame_with_payload_sess_does_not_have_terminal", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - width: 10, - height: 20, - firstWrite: true, + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, } for _, tt := range tests { @@ -104,29 +95,25 @@ func Test_Writes(t *testing.T) { sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - ch: sessionrecording.CastHeader{ - Width: tt.width, - Height: tt.height, - }, - initialTermSizeSet: make(chan struct{}), - hasTerm: tt.hasTerm, - } - if !tt.firstWrite { - // this test case does not intend to test that cast header gets written once - c.writeCastHeaderOnce.Do(func() {}) - } - if tt.sendInitialResize { - close(c.initialTermSizeSet) + ctx: ctx, + Conn: tc, + log: zl.Sugar(), + rec: rec, + ch: sessionrecording.CastHeader{}, + initialCastHeaderSent: make(chan struct{}), + hasTerm: tt.hasTerm, } + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + c.stdoutStreamID.Store(stdoutStreamID) c.stderrStreamID.Store(stderrStreamID) for i, input := range tt.inputs { - c.hasTerm = tt.hasTerm if _, err := c.Write(input); err != nil { t.Errorf("[%d] spdyRemoteConnRecorder.Write() unexpected error %v", i, err) } @@ -171,11 +158,25 @@ func Test_Reads(t *testing.T) { wantResizeStreamID uint32 wantWidth int wantHeight int + wantRecorded []byte resizeStreamIDBeforeRead uint32 }{ { name: "resize_data_frame_single_read", inputs: [][]byte{append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...)}, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), + resizeStreamIDBeforeRead: 1, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "resize_data_frame_many", + inputs: [][]byte{ + append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...), + append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + resizeStreamIDBeforeRead: 1, wantWidth: 10, wantHeight: 20, @@ -183,6 +184,7 @@ func Test_Reads(t *testing.T) { { name: "resize_data_frame_two_reads", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg}, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), resizeStreamIDBeforeRead: 1, wantWidth: 10, wantHeight: 20, @@ -215,11 +217,15 @@ func Test_Reads(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - initialTermSizeSet: make(chan struct{}), + ctx: ctx, + Conn: tc, + log: zl.Sugar(), + rec: rec, + initialCastHeaderSent: make(chan struct{}), + hasTerm: true, } c.resizeStreamID.Store(tt.resizeStreamIDBeforeRead) @@ -251,6 +257,12 @@ func Test_Reads(t *testing.T) { t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) } } + + // Assert that the expected bytes have been forwarded to the session recorder. + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded) + } }) } } diff --git a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go index af5fcb8da..a5bdf7ddd 100644 --- a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go +++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go @@ -25,6 +25,7 @@ func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool clock: clock, conn: conn, failOpen: failOpen, + logger: logger, } } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index 86029f67b..0d8aefaac 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -3,12 +3,13 @@ //go:build !plan9 -// package ws has functionality to parse 'kubectl exec' sessions streamed using +// package ws has functionality to parse 'kubectl exec/attach' sessions streamed using // WebSocket protocol. package ws import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -24,31 +25,53 @@ import ( ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. -// The connection must be a hijacked connection for a 'kubectl exec' session using WebSocket protocol and a *.channel.k8s.io subprotocol. +// The connection must be a hijacked connection for a 'kubectl exec/attach' session using WebSocket protocol and a *.channel.k8s.io subprotocol. // The hijacked connection is used to transmit *.channel.k8s.io streams between Kubernetes client ('kubectl') and the destination proxy controlled by Kubernetes. // Data read from the underlying network connection is data sent via one of the streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. // We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#proposal-new-remotecommand-sub-protocol-version---v5channelk8sio -func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { - return &conn{ - Conn: c, - rec: rec, - ch: ch, - hasTerm: hasTerm, - log: log, - initialTermSizeSet: make(chan struct{}, 1), +func New(ctx context.Context, c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) (net.Conn, error) { + lc := &conn{ + Conn: c, + ctx: ctx, + rec: rec, + ch: ch, + hasTerm: hasTerm, + log: log, + initialCastHeaderSent: make(chan struct{}, 1), } + + // if there is no term, we don't need to wait for a resize message + if !hasTerm { + var err error + lc.writeCastHeaderOnce.Do(func() { + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = lc.rec.WriteCastHeader(ch) + close(lc.initialCastHeaderSent) + }) + if err != nil { + return nil, fmt.Errorf("error writing CastHeader: %w", err) + } + } + + return lc, nil } // conn is a wrapper around net.Conn. It reads the bytestream -// for a 'kubectl exec' session, sends session recording data to the configured +// for a 'kubectl exec/attach' session, sends session recording data to the configured // recorder and forwards the raw bytes to the original destination. // A new conn is created per session. -// conn only knows to how to read a 'kubectl exec' session that is streamed using WebSocket protocol. +// conn only knows to how to read a 'kubectl exec/attach' session that is streamed using WebSocket protocol. // https://www.rfc-editor.org/rfc/rfc6455 type conn struct { net.Conn + + ctx context.Context // rec knows how to send data to a tsrecorder instance. rec *tsrecorder.Client @@ -56,7 +79,7 @@ type conn struct { // CastHeader must be sent before any payload. If the session has a // terminal attached, the CastHeader must have '.Width' and '.Height' // fields set for the tsrecorder UI to be able to play the recording. - // For 'kubectl exec' sessions, terminal width and height are sent as a + // For 'kubectl exec/attach' sessions, terminal width and height are sent as a // resize message on resize stream from the client when the session // starts as well as at any time the client detects a terminal change. // We can intercept the resize message on Read calls. As there is no @@ -72,15 +95,10 @@ type conn struct { // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once hasTerm bool // whether the session has TTY attached - // initialTermSizeSet channel gets sent a value once, when the Read has - // received a resize message and set the initial terminal size. It must - // be set to a buffered channel to prevent Reads being blocked on the - // first stdout/stderr write reading from the channel. - initialTermSizeSet chan struct{} - // sendInitialTermSizeSetOnce is used to ensure that a value is sent to - // initialTermSizeSet channel only once, when the initial resize message - // is received. - sendInitialTermSizeSetOnce sync.Once + // initialCastHeaderSent is a boolean that is set to ensure that the cast + // header is the first thing that is streamed to the session recorder. + // Otherwise the stream will fail. + initialCastHeaderSent chan struct{} log *zap.SugaredLogger @@ -171,9 +189,10 @@ func (c *conn) Read(b []byte) (int, error) { c.readBuf.Next(len(readMsg.raw)) if readMsg.isFinalized && !c.readMsgIsIncomplete() { + // we want to send stream resize messages for terminal sessions // Stream IDs for websocket streams are static. // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 - if readMsg.streamID.Load() == remotecommand.StreamResize { + if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { var msg tsrecorder.ResizeMsg if err = json.Unmarshal(readMsg.payload, &msg); err != nil { return 0, fmt.Errorf("error umarshalling resize message: %w", err) @@ -182,22 +201,29 @@ func (c *conn) Read(b []byte) (int, error) { c.ch.Width = msg.Width c.ch.Height = msg.Height - // If this is initial resize message, the width and - // height will be sent in the CastHeader. If this is a - // subsequent resize message, we need to send asciinema - // resize message. var isInitialResize bool - c.sendInitialTermSizeSetOnce.Do(func() { + c.writeCastHeaderOnce.Do(func() { isInitialResize = true - close(c.initialTermSizeSet) // unblock sending of CastHeader + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } + if !isInitialResize { - if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { + if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { return 0, fmt.Errorf("error writing resize message: %w", err) } } } } + c.currentReadMsg = readMsg return n, nil } @@ -244,39 +270,33 @@ func (c *conn) Write(b []byte) (int, error) { c.log.Errorf("write: parsing a message errored: %v", err) return 0, fmt.Errorf("write: error parsing message: %v", err) } + c.currentWriteMsg = writeMsg if !ok { // incomplete fragment return len(b), nil } + c.writeBuf.Next(len(writeMsg.raw)) // advance frame if len(writeMsg.payload) != 0 && writeMsg.isFinalized { if writeMsg.streamID.Load() == remotecommand.StreamStdOut || writeMsg.streamID.Load() == remotecommand.StreamStdErr { - var err error - c.writeCastHeaderOnce.Do(func() { - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - if c.hasTerm { - c.log.Debug("waiting for terminal size to be set before starting to send recorded data") - <-c.initialTermSizeSet + // we must wait for confirmation that the initial cast header was sent before proceeding with any more writes + select { + case <-c.ctx.Done(): + return 0, c.ctx.Err() + case <-c.initialCastHeaderSent: + if err := c.rec.WriteOutput(writeMsg.payload); err != nil { + return 0, fmt.Errorf("error writing message to recorder: %w", err) } - err = c.rec.WriteCastHeader(c.ch) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } - if err := c.rec.WriteOutput(writeMsg.payload); err != nil { - return 0, fmt.Errorf("error writing message to recorder: %v", err) } } } + _, err = c.Conn.Write(c.currentWriteMsg.raw) if err != nil { c.log.Errorf("write: error writing to conn: %v", err) } + return len(b), nil } @@ -321,6 +341,7 @@ func (c *conn) writeMsgIsIncomplete() bool { func (c *conn) readMsgIsIncomplete() bool { return c.currentReadMsg != nil && !c.currentReadMsg.isFinalized } + func (c *conn) curReadMsgType() (messageType, error) { if c.currentReadMsg != nil { return c.currentReadMsg.typ, nil diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 11174480b..f29154c62 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -6,9 +6,11 @@ package ws import ( + "context" "fmt" "reflect" "testing" + "time" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/remotecommand" @@ -26,46 +28,69 @@ func Test_conn_Read(t *testing.T) { // Resize stream ID + {"width": 10, "height": 20} testResizeMsg := []byte{byte(remotecommand.StreamResize), 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d} lenResizeMsgPayload := byte(len(testResizeMsg)) - + cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantWidth int - wantHeight int + name string + inputs [][]byte + wantCastHeaderWidth int + wantCastHeaderHeight int + wantRecorded []byte }{ { name: "single_read_control_message", inputs: [][]byte{{0x88, 0x0}}, }, { - name: "single_read_resize_message", - inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, - wantWidth: 10, - wantHeight: 20, + name: "single_read_resize_message", + inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, { - name: "two_reads_resize_message", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, - wantWidth: 10, - wantHeight: 20, + name: "resize_data_frame_many", + inputs: [][]byte{ + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, }, { - name: "three_reads_resize_message_with_split_fragment", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, - wantWidth: 10, - wantHeight: 20, + name: "two_reads_resize_message", + inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), + }, + { + name: "three_reads_resize_message_with_split_fragment", + inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + l := zl.Sugar() tc := &fakes.TestConn{} + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) tc.ResetReadBuf() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), + ctx: ctx, + Conn: tc, + log: l, + hasTerm: true, + initialCastHeaderSent: make(chan struct{}), + rec: rec, } for i, input := range tt.inputs { - c.initialTermSizeSet = make(chan struct{}) if err := tc.WriteReadBufBytes(input); err != nil { t.Fatalf("writing bytes to test conn: %v", err) } @@ -75,14 +100,20 @@ func Test_conn_Read(t *testing.T) { return } } - if tt.wantHeight != 0 || tt.wantWidth != 0 { - if tt.wantWidth != c.ch.Width { - t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width) + + if tt.wantCastHeaderHeight != 0 || tt.wantCastHeaderWidth != 0 { + if tt.wantCastHeaderWidth != c.ch.Width { + t.Errorf("wants width: %v, got %v", tt.wantCastHeaderWidth, c.ch.Width) } - if tt.wantHeight != c.ch.Height { - t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) + if tt.wantCastHeaderHeight != c.ch.Height { + t.Errorf("want height: %v, got %v", tt.wantCastHeaderHeight, c.ch.Height) } } + + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", string(tt.wantRecorded), string(gotRecorded)) + } }) } } @@ -94,15 +125,11 @@ func Test_conn_Write(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int - hasTerm bool - sendInitialResize bool + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + hasTerm bool }{ { name: "single_write_control_frame", @@ -130,10 +157,7 @@ func Test_conn_Write(t *testing.T) { name: "single_write_stdout_data_message_with_cast_header", inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}}, wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8}, cl)...), - width: 10, - height: 20, - firstWrite: true, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl), }, { name: "two_writes_stdout_data_message", @@ -148,15 +172,11 @@ func Test_conn_Write(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", - inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, - wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - height: 20, - width: 10, - hasTerm: true, - firstWrite: true, - sendInitialResize: true, + name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", + inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, + wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), + hasTerm: true, }, } for _, tt := range tests { @@ -164,24 +184,22 @@ func Test_conn_Write(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - ch: sessionrecording.CastHeader{ - Width: tt.width, - Height: tt.height, - }, - rec: rec, - initialTermSizeSet: make(chan struct{}), - hasTerm: tt.hasTerm, - } - if !tt.firstWrite { - // This test case does not intend to test that cast header gets written once. - c.writeCastHeaderOnce.Do(func() {}) - } - if tt.sendInitialResize { - close(c.initialTermSizeSet) + Conn: tc, + ctx: ctx, + log: zl.Sugar(), + ch: sessionrecording.CastHeader{}, + rec: rec, + initialCastHeaderSent: make(chan struct{}), + hasTerm: tt.hasTerm, } + + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + for i, input := range tt.inputs { _, err := c.Write(input) if err != nil { diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 4806f6585..545bf06bd 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -66,13 +66,15 @@ type CastHeader struct { Kubernetes *Kubernetes `json:"kubernetes,omitempty"` } -// Kubernetes contains 'kubectl exec' session specific information for +// Kubernetes contains 'kubectl exec/attach' session specific information for // tsrecorder. type Kubernetes struct { - // PodName is the name of the Pod being exec-ed. + // PodName is the name of the Pod the session was recorded for. PodName string - // Namespace is the namespace in which is the Pod that is being exec-ed. + // Namespace is the namespace in which the Pod the session was recorded for exists in. Namespace string - // Container is the container being exec-ed. + // Container is the container the session was recorded for. Container string + // SessionType is the type of session that was executed (e.g., exec, attach) + SessionType string } From fe46f33885f5abb797f7289fa00d5b49a59d8468 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 14 Jul 2025 15:39:39 +0100 Subject: [PATCH 1090/1708] cmd/{k8s-operator,k8s-proxy},kube/k8s-proxy: add static endpoints for kube-apiserver type ProxyGroups (#16523) Updates #13358 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/proxygroup.go | 4 ++ cmd/k8s-operator/proxygroup_specs.go | 83 ++++++++++++++++------------ cmd/k8s-proxy/k8s-proxy.go | 15 +++++ kube/k8s-proxy/conf/conf.go | 4 ++ 4 files changed, 70 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 66b6c96e3..1fdc076f9 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -824,6 +824,10 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p cfg.AcceptRoutes = &proxyClass.Spec.TailscaleConfig.AcceptRoutes } + if len(endpoints[nodePortSvcName]) > 0 { + cfg.StaticEndpoints = endpoints[nodePortSvcName] + } + cfgB, err := json.Marshal(cfg) if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 5d6d0b8ef..71398d0d5 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -66,7 +66,7 @@ func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *cor // applied over the top after. func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { - return kubeAPIServerStatefulSet(pg, namespace, image) + return kubeAPIServerStatefulSet(pg, namespace, image, port) } ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { @@ -276,7 +276,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string return ss, nil } -func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*appsv1.StatefulSet, error) { +func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, port *uint16) (*appsv1.StatefulSet, error) { sts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: pg.Name, @@ -302,48 +302,59 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*a { Name: mainContainerName, Image: image, - Env: []corev1.EnvVar{ - { - // Used as default hostname and in Secret names. - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", + Env: func() []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + // Used as default hostname and in Secret names. + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, }, }, - }, - { - // Used by kubeclient to post Events about the Pod's lifecycle. - Name: "POD_UID", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.uid", + { + // Used by kubeclient to post Events about the Pod's lifecycle. + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, }, }, - }, - { - // Used in an interpolated env var if metrics enabled. - Name: "POD_IP", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIP", + { + // Used in an interpolated env var if metrics enabled. + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, }, }, - }, - { - // Included for completeness with POD_IP and easier backwards compatibility in future. - Name: "POD_IPS", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIPs", + { + // Included for completeness with POD_IP and easier backwards compatibility in future. + Name: "POD_IPS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, }, }, - }, - { - Name: "TS_K8S_PROXY_CONFIG", - Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), - }, - }, + { + Name: "TS_K8S_PROXY_CONFIG", + Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), + }, + } + + if port != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PORT", + Value: strconv.Itoa(int(*port)), + }) + } + + return envs + }(), VolumeMounts: func() []corev1.VolumeMount { var mounts []corev1.VolumeMount diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 7dcf6c2ab..b7f3d9535 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -14,6 +14,7 @@ import ( "fmt" "os" "os/signal" + "strings" "syscall" "time" @@ -63,6 +64,20 @@ func run(logger *zap.SugaredLogger) error { logger = logger.WithOptions(zap.IncreaseLevel(level)) } + // TODO:(ChaosInTheCRD) This is a temporary workaround until we can set static endpoints using prefs + if se := cfg.Parsed.StaticEndpoints; len(se) > 0 { + logger.Debugf("setting static endpoints '%v' via TS_DEBUG_PRETENDPOINT environment variable", cfg.Parsed.StaticEndpoints) + ses := make([]string, len(se)) + for i, e := range se { + ses[i] = e.String() + } + + err := os.Setenv("TS_DEBUG_PRETENDPOINT", strings.Join(ses, ",")) + if err != nil { + return err + } + } + if cfg.Parsed.App != nil { hostinfo.SetApp(*cfg.Parsed.App) } diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index fba4a39a4..8882360c5 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -10,6 +10,7 @@ package conf import ( "encoding/json" "fmt" + "net/netip" "os" "github.com/tailscale/hujson" @@ -55,6 +56,9 @@ type ConfigV1Alpha1 struct { KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + // StaticEndpoints are additional, user-defined endpoints that this node + // should advertise amongst its wireguard endpoints. + StaticEndpoints []netip.AddrPort `json:",omitempty"` } type KubeAPIServer struct { From fc5050048ee9c71dcdeb232d3a38f068072f489f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 14 Jul 2025 10:42:56 -0700 Subject: [PATCH 1091/1708] wgengine/magicsock: don't acquire Conn.mu in udpRelayEndpointReady (#16557) udpRelayEndpointReady used to write into the peerMap, which required holding Conn.mu, but this changed in f9e7131. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index c4ca81296..d8d1e6ee3 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -104,8 +104,6 @@ type endpoint struct { // be installed as de.bestAddr. It is only called by [relayManager] once it has // determined maybeBest is functional via [disco.Pong] reception. func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { - de.c.mu.Lock() - defer de.c.mu.Unlock() de.mu.Lock() defer de.mu.Unlock() From f338c4074d4bb67acfbabdddf2974b23274236d9 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 14 Jul 2025 11:57:54 -1000 Subject: [PATCH 1092/1708] util/jsonutil: remove unused package (#16563) This package promises more performance, but was never used. The intent of the package is somewhat moot as "encoding/json" in Go 1.25 (while under GOEXPERIMENT=jsonv2) has been completely re-implemented using "encoding/json/v2" such that unmarshal is dramatically faster. Updates #cleanup Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- util/jsonutil/types.go | 16 ------ util/jsonutil/unmarshal.go | 89 --------------------------------- util/jsonutil/unmarshal_test.go | 64 ------------------------ 3 files changed, 169 deletions(-) delete mode 100644 util/jsonutil/types.go delete mode 100644 util/jsonutil/unmarshal.go delete mode 100644 util/jsonutil/unmarshal_test.go diff --git a/util/jsonutil/types.go b/util/jsonutil/types.go deleted file mode 100644 index 057473249..000000000 --- a/util/jsonutil/types.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package jsonutil - -// Bytes is a byte slice in a json-encoded struct. -// encoding/json assumes that []byte fields are hex-encoded. -// Bytes are not hex-encoded; they are treated the same as strings. -// This can avoid unnecessary allocations due to a round trip through strings. -type Bytes []byte - -func (b *Bytes) UnmarshalText(text []byte) error { - // Copy the contexts of text. - *b = append(*b, text...) - return nil -} diff --git a/util/jsonutil/unmarshal.go b/util/jsonutil/unmarshal.go deleted file mode 100644 index b1eb4ea87..000000000 --- a/util/jsonutil/unmarshal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package jsonutil provides utilities to improve JSON performance. -// It includes an Unmarshal wrapper that amortizes allocated garbage over subsequent runs -// and a Bytes type to reduce allocations when unmarshalling a non-hex-encoded string into a []byte. -package jsonutil - -import ( - "bytes" - "encoding/json" - "sync" -) - -// decoder is a re-usable json decoder. -type decoder struct { - dec *json.Decoder - r *bytes.Reader -} - -var readerPool = sync.Pool{ - New: func() any { - return bytes.NewReader(nil) - }, -} - -var decoderPool = sync.Pool{ - New: func() any { - var d decoder - d.r = readerPool.Get().(*bytes.Reader) - d.dec = json.NewDecoder(d.r) - return &d - }, -} - -// Unmarshal is similar to encoding/json.Unmarshal. -// There are three major differences: -// -// On error, encoding/json.Unmarshal zeros v. -// This Unmarshal may leave partial data in v. -// Always check the error before using v! -// (Future improvements may remove this bug.) -// -// The errors they return don't always match perfectly. -// If you do error matching more precise than err != nil, -// don't use this Unmarshal. -// -// This Unmarshal allocates considerably less memory. -func Unmarshal(b []byte, v any) error { - d := decoderPool.Get().(*decoder) - d.r.Reset(b) - off := d.dec.InputOffset() - err := d.dec.Decode(v) - d.r.Reset(nil) // don't keep a reference to b - // In case of error, report the offset in this byte slice, - // instead of in the totality of all bytes this decoder has processed. - // It is not possible to make all errors match json.Unmarshal exactly, - // but we can at least try. - switch jsonerr := err.(type) { - case *json.SyntaxError: - jsonerr.Offset -= off - case *json.UnmarshalTypeError: - jsonerr.Offset -= off - case nil: - // json.Unmarshal fails if there's any extra junk in the input. - // json.Decoder does not; see https://github.com/golang/go/issues/36225. - // We need to check for anything left over in the buffer. - if d.dec.More() { - // TODO: Provide a better error message. - // Unfortunately, we can't set the msg field. - // The offset doesn't perfectly match json: - // Ours is at the end of the valid data, - // and theirs is at the beginning of the extra data after whitespace. - // Close enough, though. - err = &json.SyntaxError{Offset: d.dec.InputOffset() - off} - - // TODO: zero v. This is hard; see encoding/json.indirect. - } - } - if err == nil { - decoderPool.Put(d) - } else { - // There might be junk left in the decoder's buffer. - // There's no way to flush it, no Reset method. - // Abandoned the decoder but reuse the reader. - readerPool.Put(d.r) - } - return err -} diff --git a/util/jsonutil/unmarshal_test.go b/util/jsonutil/unmarshal_test.go deleted file mode 100644 index 32f8402f0..000000000 --- a/util/jsonutil/unmarshal_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package jsonutil - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestCompareToStd(t *testing.T) { - tests := []string{ - `{}`, - `{"a": 1}`, - `{]`, - `"abc"`, - `5`, - `{"a": 1} `, - `{"a": 1} {}`, - `{} bad data`, - `{"a": 1} "hello"`, - `[]`, - ` {"x": {"t": [3,4,5]}}`, - } - - for _, test := range tests { - b := []byte(test) - var ourV, stdV any - ourErr := Unmarshal(b, &ourV) - stdErr := json.Unmarshal(b, &stdV) - if (ourErr == nil) != (stdErr == nil) { - t.Errorf("Unmarshal(%q): our err = %#[2]v (%[2]T), std err = %#[3]v (%[3]T)", test, ourErr, stdErr) - } - // if !reflect.DeepEqual(ourErr, stdErr) { - // t.Logf("Unmarshal(%q): our err = %#[2]v (%[2]T), std err = %#[3]v (%[3]T)", test, ourErr, stdErr) - // } - if ourErr != nil { - // TODO: if we zero ourV on error, remove this continue. - continue - } - if !reflect.DeepEqual(ourV, stdV) { - t.Errorf("Unmarshal(%q): our val = %v, std val = %v", test, ourV, stdV) - } - } -} - -func BenchmarkUnmarshal(b *testing.B) { - var m any - j := []byte("5") - b.ReportAllocs() - for range b.N { - Unmarshal(j, &m) - } -} - -func BenchmarkStdUnmarshal(b *testing.B) { - var m any - j := []byte("5") - b.ReportAllocs() - for range b.N { - json.Unmarshal(j, &m) - } -} From b63f8a457dbb14700a7c6bdb96e4df95a5c258b3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 14 Jul 2025 15:09:31 -0700 Subject: [PATCH 1093/1708] wgengine/magicsock: prioritize trusted peer relay paths over untrusted (#16559) A trusted peer relay path is always better than an untrusted direct or peer relay path. Updates tailscale/corp#30412 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 35 ++++++------ wgengine/magicsock/endpoint_test.go | 89 +++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 16 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index d8d1e6ee3..385c9245e 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -106,24 +106,27 @@ type endpoint struct { func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.mu.Lock() defer de.mu.Unlock() - - if maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 { - // TODO(jwhited): add some observability for this case, e.g. did we - // flip transports during a de.bestAddr transition from untrusted to - // trusted? + now := mono.Now() + curBestAddrTrusted := now.Before(de.trustBestAddrUntil) + sameRelayServer := de.bestAddr.vni.isSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 + + if !curBestAddrTrusted || + sameRelayServer || + betterAddr(maybeBest, de.bestAddr) { + // We must set maybeBest as de.bestAddr if: + // 1. de.bestAddr is untrusted. betterAddr does not consider + // time-based trust. + // 2. maybeBest & de.bestAddr are on the same relay. If the maybeBest + // handshake happened to use a different source address/transport, + // the relay will drop packets from the 'old' de.bestAddr's. + // 3. maybeBest is a 'betterAddr'. // - // If these are equal we must set maybeBest as bestAddr, otherwise we - // could leave a stale bestAddr if it goes over a different - // address family or src. - } else if !betterAddr(maybeBest, de.bestAddr) { - return + // TODO(jwhited): add observability around !curBestAddrTrusted and sameRelayServer + // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() + de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) + de.setBestAddrLocked(maybeBest) + de.trustBestAddrUntil = now.Add(trustUDPAddrDuration) } - - // Promote maybeBest to bestAddr. - // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() - de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) - de.setBestAddrLocked(maybeBest) - de.trustBestAddrUntil = mono.Now().Add(trustUDPAddrDuration) } func (de *endpoint) setBestAddrLocked(v addrQuality) { diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 3a1e55b8b..92f4ef1d3 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/tailcfg" + "tailscale.com/tstime/mono" "tailscale.com/types/key" ) @@ -365,3 +366,91 @@ func Test_epAddr_isDirectUDP(t *testing.T) { }) } } + +func Test_endpoint_udpRelayEndpointReady(t *testing.T) { + directAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.1:7")}} + peerRelayAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.2:77")}, latency: time.Second} + peerRelayAddrQuality.vni.set(1) + peerRelayAddrQualityHigherLatencySameServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency * 10, + } + peerRelayAddrQualityHigherLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency * 10, + relayServerDisco: key.NewDisco().Public(), + } + peerRelayAddrQualityLowerLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.4:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency / 10, + relayServerDisco: key.NewDisco().Public(), + } + peerRelayAddrQualityEqualLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.4:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency, + relayServerDisco: key.NewDisco().Public(), + } + tests := []struct { + name string + curBestAddr addrQuality + trustBestAddrUntil mono.Time + maybeBest addrQuality + wantBestAddr addrQuality + }{ + { + name: "bestAddr trusted direct", + curBestAddr: directAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQuality, + wantBestAddr: directAddrQuality, + }, + { + name: "bestAddr untrusted direct", + curBestAddr: directAddrQuality, + trustBestAddrUntil: mono.Now().Add(-1 * time.Hour), + maybeBest: peerRelayAddrQuality, + wantBestAddr: peerRelayAddrQuality, + }, + { + name: "maybeBest same relay server higher latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityHigherLatencySameServer, + wantBestAddr: peerRelayAddrQualityHigherLatencySameServer, + }, + { + name: "maybeBest diff relay server higher latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityHigherLatencyDiffServer, + wantBestAddr: peerRelayAddrQuality, + }, + { + name: "maybeBest diff relay server lower latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityLowerLatencyDiffServer, + wantBestAddr: peerRelayAddrQualityLowerLatencyDiffServer, + }, + { + name: "maybeBest diff relay server equal latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityEqualLatencyDiffServer, + wantBestAddr: peerRelayAddrQuality, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + de := &endpoint{ + c: &Conn{logf: func(msg string, args ...any) { return }}, + bestAddr: tt.curBestAddr, + trustBestAddrUntil: tt.trustBestAddrUntil, + } + de.udpRelayEndpointReady(tt.maybeBest) + if de.bestAddr != tt.wantBestAddr { + t.Errorf("de.bestAddr = %v, want %v", de.bestAddr, tt.wantBestAddr) + } + }) + } +} From bfb344905f5d12648031b0aaec27393ae4173e12 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 14 Jul 2025 18:51:55 -0700 Subject: [PATCH 1094/1708] ipn/ipnlocal: modernize nm.Peers with AppendMatchingPeers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thanks to @nickkhyl for pointing out that NetMap.Peers doesn’t get incremental updates since the last full NetMap update. Instead, he recommends using ipn/ipnlocal.nodeBackend.AppendMatchingPeers. Updates #cleanup Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4ed012f2e..cd1654eb1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7902,33 +7902,32 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod panic("missing traffic-steering capability") } - peers := nm.Peers - nodes := make([]tailcfg.NodeView, 0, len(peers)) - - for _, p := range peers { + var force tailcfg.NodeView + nodes := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { if !p.Valid() { - continue + return false } if allowed != nil && !allowed.Contains(p.StableID()) { - continue + return false } if !p.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) { - continue + return false } if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { - continue + return false } if p.StableID() == prev { // Prevent flapping: since prev is a valid suggestion, // force prev to be the only valid pick. - nodes = []tailcfg.NodeView{p} - break + force = p + return false } - nodes = append(nodes, p) + return true + }) + if force.Valid() { + nodes = append(nodes[:0], force) } - var pick tailcfg.NodeView - scores := make(map[tailcfg.NodeID]int, len(nodes)) score := func(n tailcfg.NodeView) int { id := n.ID() @@ -7945,7 +7944,11 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod return s } - if len(nodes) > 0 { + var pick tailcfg.NodeView + if len(nodes) == 1 { + pick = nodes[0] + } + if len(nodes) > 1 { // Find the highest scoring exit nodes. slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { return cmp.Compare(score(b), score(a)) // reverse sort From 205f822372d203f32b3fb3c7562347770a927181 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 14 Jul 2025 19:01:02 -0700 Subject: [PATCH 1095/1708] ipn/ipnlocal: check if suggested exit node is online @nickkyl added an peer.Online check to suggestExitNodeUsingDERP, so it should also check when running suggestExitNodeUsingTrafficSteering. Updates tailscale/corp#29966 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index cd1654eb1..9b9bd82b5 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7907,6 +7907,9 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod if !p.Valid() { return false } + if !p.Online().Get() { + return false + } if allowed != nil && !allowed.Contains(p.StableID()) { return false } From 7a3221177e0e323d89b5e6389a4a4274065eb725 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 08:33:22 -0600 Subject: [PATCH 1096/1708] .github: Bump slackapi/slack-github-action from 2.1.0 to 2.1.1 (#16553) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 2.1.0 to 2.1.1. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/b0fa283ad8fea605de13dc3f449259339835fc52...91efab103c0de0a537f72a35f6b8cda0ee76bf0a) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-version: 2.1.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/govulncheck.yml | 2 +- .github/workflows/installer.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 36ed1fe9b..c7560983a 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -24,7 +24,7 @@ jobs: - name: Post to slack if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: method: chat.postMessage token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 0ca16ae9f..6144864fd 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -108,7 +108,7 @@ jobs: steps: - name: Notify Slack of failure on scheduled runs if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2e80b44dc..d5b09a9e6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -831,7 +831,7 @@ jobs: # By having the job always run, but skipping its only step as needed, we # let the CI output collapse nicely in PRs. if: failure() && github.event_name == 'push' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook From e0fcd596bf50556243c488f916d5128dccba6638 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Mon, 14 Jul 2025 17:54:56 +0100 Subject: [PATCH 1097/1708] tailcfg: send health update if DisplayMessage URL changes Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- health/health_test.go | 160 +++++++++++++++++++--------------------- tailcfg/tailcfg.go | 5 +- tailcfg/tailcfg_test.go | 113 ++++++++++++++++++++-------- 3 files changed, 162 insertions(+), 116 deletions(-) diff --git a/health/health_test.go b/health/health_test.go index 0f1140f62..53f012ecf 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -555,98 +555,88 @@ func TestControlHealth(t *testing.T) { }) } -func TestControlHealthNotifiesOnSet(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) - - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") - } -} - -func TestControlHealthNotifiesOnChange(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": {}, - }) - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-2": {}, - }) - - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") - } -} - -func TestControlHealthNotifiesOnDetailsChange(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": { - Title: "Title", +func TestControlHealthNotifies(t *testing.T) { + type test struct { + name string + initialState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + newState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + wantNotify bool + } + tests := []test{ + { + name: "no-change", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + wantNotify: false, }, - }) - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": { - Title: "Updated title", + { + name: "on-set", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{}, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + wantNotify: true, + }, + { + name: "details-change", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + Title: "Title", + }, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + Title: "Updated title", + }, + }, + wantNotify: true, + }, + { + name: "action-changes", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com/a/123456", + Label: "Sign in", + }, + }, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com/a/abcdefg", + Label: "Sign in", + }, + }, + }, + wantNotify: true, }, - }) - - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") } -} - -func TestControlHealthNoNotifyOnUnchanged(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() - // Set up an existing control health issue - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) + if len(test.initialState) != 0 { + ht.SetControlHealth(test.initialState) + } - // Now register our watcher - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + gotNotified := false + ht.registerSyncWatcher(func(_ Change) { + gotNotified = true + }) - // Send the same control health message again - should not notify - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) + ht.SetControlHealth(test.newState) - if gotNotified { - t.Errorf("watcher got called, want it to not be called") + if gotNotified != test.wantNotify { + t.Errorf("notified: got %v, want %v", gotNotified, test.wantNotify) + } + }) } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 53c4683c1..0f13c725e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2171,7 +2171,10 @@ func (m DisplayMessage) Equal(o DisplayMessage) bool { return m.Title == o.Title && m.Text == o.Text && m.Severity == o.Severity && - m.ImpactsConnectivity == o.ImpactsConnectivity + m.ImpactsConnectivity == o.ImpactsConnectivity && + (m.PrimaryAction == nil) == (o.PrimaryAction == nil) && + (m.PrimaryAction == nil || (m.PrimaryAction.URL == o.PrimaryAction.URL && + m.PrimaryAction.Label == o.PrimaryAction.Label)) } // DisplayMessageSeverity represents how serious a [DisplayMessage] is. Analogous diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index e8e86cdb1..833314df8 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -881,76 +881,129 @@ func TestCheckTag(t *testing.T) { } func TestDisplayMessageEqual(t *testing.T) { - base := DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityHigh, - ImpactsConnectivity: false, - } - type test struct { name string - value DisplayMessage + value1 DisplayMessage + value2 DisplayMessage wantEqual bool } for _, test := range []test{ { name: "same", - value: DisplayMessage{ + value1: DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ Title: "title", Text: "text", Severity: SeverityHigh, ImpactsConnectivity: false, + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, }, wantEqual: true, }, { name: "different-title", - value: DisplayMessage{ - Title: "different title", - Text: "text", - Severity: SeverityHigh, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Title: "title", + }, + value2: DisplayMessage{ + Title: "different title", }, wantEqual: false, }, { name: "different-text", - value: DisplayMessage{ - Title: "title", - Text: "different text", - Severity: SeverityHigh, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Text: "some text", + }, + value2: DisplayMessage{ + Text: "different text", }, wantEqual: false, }, { name: "different-severity", - value: DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityMedium, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Severity: SeverityHigh, + }, + value2: DisplayMessage{ + Severity: SeverityMedium, }, wantEqual: false, }, { name: "different-impactsConnectivity", - value: DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityHigh, + value1: DisplayMessage{ ImpactsConnectivity: true, }, + value2: DisplayMessage{ + ImpactsConnectivity: false, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-nil-non-nil", + value1: DisplayMessage{}, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-url", + value1: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://zombo.com", + Label: "Open", + }, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-label", + value1: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Learn more", + }, + }, wantEqual: false, }, } { t.Run(test.name, func(t *testing.T) { - got := base.Equal(test.value) + got := test.value1.Equal(test.value2) if got != test.wantEqual { - t.Errorf("Equal: got %t, want %t", got, test.wantEqual) + value1 := must.Get(json.MarshalIndent(test.value1, "", " ")) + value2 := must.Get(json.MarshalIndent(test.value2, "", " ")) + t.Errorf("value1.Equal(value2): got %t, want %t\nvalue1:\n%s\nvalue2:\n%s", got, test.wantEqual, value1, value2) } }) } From ffe8cc9442335ffb76c0e7555c67493a1975181c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 09:54:00 -0600 Subject: [PATCH 1098/1708] .github: Bump github/codeql-action from 3.29.1 to 3.29.2 (#16480) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.1 to 3.29.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/39edc492dbe16b1465b0cafca41432d857bdb31a...181d5eefc20863364f96762470ba6f862bdef56b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 610b93b61..4e129b847 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 From d65c0fd2d04a49fb11964cf0457df499a0e6e366 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 15 Jul 2025 12:29:07 -0700 Subject: [PATCH 1099/1708] tailcfg,wgengine/magicsock: set peer relay CapVer (#16531) Updates tailscale/corp#27502 Updates tailscale/corp#30051 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 3 ++- wgengine/magicsock/debugknobs.go | 6 ------ wgengine/magicsock/debugknobs_stubs.go | 1 - wgengine/magicsock/magicsock.go | 19 ++++++++--------- wgengine/magicsock/magicsock_test.go | 28 ++++++++++++++++++++++++-- 5 files changed, 38 insertions(+), 19 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 0f13c725e..636e2434d 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -164,7 +164,8 @@ type CapabilityVersion int // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. // - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) // - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. -const CurrentCapabilityVersion CapabilityVersion = 119 +// - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions +const CurrentCapabilityVersion CapabilityVersion = 120 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index 055895388..f8fd9f040 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,12 +62,6 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") - // debugAssumeUDPRelayCapable forces magicsock to assume that all peers are - // UDP relay capable clients and servers. This will eventually be replaced - // by a [tailcfg.CapabilityVersion] comparison. It enables early testing of - // the UDP relay feature before we have established related - // [tailcfg.CapabilityVersion]'s. - debugAssumeUDPRelayCapable = envknob.RegisterBool("TS_DEBUG_ASSUME_UDP_RELAY_CAPABLE") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 3d23b1f8e..336d7baa1 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -31,4 +31,3 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } -func debugAssumeUDPRelayCapable() bool { return false } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 14feed32b..a8b1c8f15 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -14,7 +14,6 @@ import ( "expvar" "fmt" "io" - "math" "net" "net/netip" "reflect" @@ -2616,14 +2615,10 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { }) } +// capVerIsRelayCapable returns true if version is relay client and server +// capable, otherwise it returns false. func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped - return version == math.MinInt32 || debugAssumeUDPRelayCapable() -} - -func capVerIsRelayServerCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped & update Test_peerAPIIfCandidateRelayServer - return version == math.MinInt32 || debugAssumeUDPRelayCapable() + return version >= 120 } // onFilterUpdate is called when a [FilterUpdate] is received over the @@ -2677,10 +2672,16 @@ func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, maybeCandidate tai if filt == nil || !self.Valid() || !maybeCandidate.Valid() || - !capVerIsRelayServerCapable(maybeCandidate.Cap()) || !maybeCandidate.Hostinfo().Valid() { return netip.AddrPort{} } + if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { + // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, + // we skip it. If maybeCandidate happens to be self, then this check is + // unnecessary as self is always capable from this point (the statically + // compiled [tailcfg.CurrentCapabilityVersion]) forward. + return netip.AddrPort{} + } for _, maybeCandidatePrefix := range maybeCandidate.Addresses().All() { if !maybeCandidatePrefix.IsSingleIP() { continue diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 0515162c7..1d76e6c59 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3399,7 +3399,11 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { } selfOnlyIPv4 := &tailcfg.Node{ - Cap: math.MinInt32, + ID: 1, + // Intentionally set a value < 120 to verify the statically compiled + // [tailcfg.CurrentCapabilityVersion] is used when self is + // maybeCandidate. + Cap: 119, Addresses: []netip.Prefix{ netip.MustParsePrefix("1.1.1.1/32"), }, @@ -3409,13 +3413,17 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") peerOnlyIPv4 := &tailcfg.Node{ - Cap: math.MinInt32, + ID: 2, + Cap: 120, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, Hostinfo: hostInfo.View(), } + peerOnlyIPv4NotCapable := peerOnlyIPv4.Clone() + peerOnlyIPv4NotCapable.Cap = 119 + peerOnlyIPv6 := peerOnlyIPv4.Clone() peerOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") @@ -3500,6 +3508,22 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { maybeCandidate: selfOnlyIPv6.View(), want: netip.AddrPortFrom(selfOnlyIPv6.Addresses[0].Addr(), 6), }, + { + name: "peer incapable", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4NotCapable.View(), + }, { name: "no match dst", filt: filter.New([]filtertype.Match{ From cb7a0b1dca91cef710f61cd4f3694bafa27bb7a0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 15 Jul 2025 15:23:47 -0700 Subject: [PATCH 1100/1708] net/udprelay: log socket read errors (#16573) Socket read errors currently close the server, so we need to understand when and why they occur. Updates tailscale/corp#27502 Updates tailscale/corp#30118 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e2652ae99..7651bf295 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -581,6 +581,7 @@ func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { // TODO: extract laddr from IP_PKTINFO for use in reply n, from, err := readFromSocket.ReadFromUDPAddrPort(b) if err != nil { + s.logf("error reading from socket(%v): %v", readFromSocket.LocalAddr(), err) return } s.handlePacket(from, b[:n], readFromSocket, otherSocket) From 67514f5eb2f9737e7d819f43f007be970e17f293 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 16 Jul 2025 08:08:59 -0700 Subject: [PATCH 1101/1708] ssh/tailssh: fix path of "true" on Darwin (#16569) This is a follow-up to #15351, which fixed the test for Linux but not for Darwin, which stores its "true" executable in /usr/bin instead of /bin. Try both paths when not running on Windows. In addition, disable CGo in the integration test build, which was causing the linker to fail. These tests do not need CGo, and it appears we had some version skew with the base image on the runners. In addition, in error cases the recover step of the permissions check was spuriously panicking and masking the "real" failure reason. Don't do that check when a command was not produced. Updates #15350 Change-Id: Icd91517f45c90f7554310ebf1c888cdfd109f43a Signed-off-by: M. J. Fromberger --- Makefile | 4 ++-- ssh/tailssh/incubator.go | 25 ++++++++++++++----------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index f5fc20589..55e55f209 100644 --- a/Makefile +++ b/Makefile @@ -126,8 +126,8 @@ publishdevproxy: check-image-repo ## Build and publish k8s-proxy image to locati .PHONY: sshintegrationtest sshintegrationtest: ## Run the SSH integration tests in various Docker containers - @GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ - GOOS=linux GOARCH=amd64 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ + @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ + GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 9e1a9ea94..dd280143e 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -51,6 +51,7 @@ const ( darwin = "darwin" freebsd = "freebsd" openbsd = "openbsd" + windows = "windows" ) func init() { @@ -80,20 +81,22 @@ func tryExecInDir(ctx context.Context, dir string) error { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() + run := func(path string) error { + cmd := exec.CommandContext(ctx, path) + cmd.Dir = dir + return cmd.Run() + } + // Assume that the following executables exist, are executable, and // immediately return. - var name string - switch runtime.GOOS { - case "windows": + if runtime.GOOS == windows { windir := os.Getenv("windir") - name = filepath.Join(windir, "system32", "doskey.exe") - default: - name = "/bin/true" + return run(filepath.Join(windir, "system32", "doskey.exe")) } - - cmd := exec.CommandContext(ctx, name) - cmd.Dir = dir - return cmd.Run() + if err := run("/bin/true"); !errors.Is(err, exec.ErrNotFound) { // including nil + return err + } + return run("/usr/bin/true") } // newIncubatorCommand returns a new exec.Cmd configured with @@ -107,7 +110,7 @@ func tryExecInDir(ctx context.Context, dir string) error { // The returned Cmd.Env is guaranteed to be nil; the caller populates it. func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err error) { defer func() { - if cmd.Env != nil { + if cmd != nil && cmd.Env != nil { panic("internal error") } }() From 3c6d17e6f114a2dc166e62b84789154b176e07c6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 10:03:05 -0700 Subject: [PATCH 1102/1708] cmd/tailscale/cli,ipn/ipnlocal,wgengine/magicsock: implement tailscale debug peer-relay-servers (#16577) Updates tailscale/corp#30036 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/debug.go | 20 ++++++++++++++++++++ ipn/ipnlocal/local.go | 4 ++++ ipn/localapi/localapi.go | 6 ++++++ wgengine/magicsock/magicsock.go | 5 +++++ wgengine/magicsock/relaymanager.go | 21 +++++++++++++++++++++ wgengine/magicsock/relaymanager_test.go | 15 +++++++++++++++ 6 files changed, 71 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ec8a0700d..8473c4a17 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -356,6 +356,12 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print Go's runtime/debug.BuildInfo", Exec: runGoBuildInfo, }, + { + Name: "peer-relay-servers", + ShortUsage: "tailscale debug peer-relay-servers", + ShortHelp: "Print the current set of candidate peer relay servers", + Exec: runPeerRelayServers, + }, }...), } } @@ -1327,3 +1333,17 @@ func runDebugResolve(ctx context.Context, args []string) error { } return nil } + +func runPeerRelayServers(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unexpected arguments") + } + v, err := localClient.DebugResultJSON(ctx, "peer-relay-servers") + if err != nil { + return err + } + e := json.NewEncoder(os.Stdout) + e.SetIndent("", " ") + e.Encode(v) + return nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9b9bd82b5..62ab6d904 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6956,6 +6956,10 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } +func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.AddrPort] { + return b.MagicConn().PeerRelays() +} + // ControlKnobs returns the node's control knobs. func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs { return b.sys.ControlKnobs() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index cd59c54e0..fb024039b 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -696,6 +696,12 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { break } h.b.DebugForcePreferDERP(n) + case "peer-relay-servers": + servers := h.b.DebugPeerRelayServers() + err = json.NewEncoder(w).Encode(servers) + if err == nil { + return + } case "": err = fmt.Errorf("missing parameter 'action'") default: diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a8b1c8f15..24a4fc073 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3907,3 +3907,8 @@ func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { le.c.peerMap.setNodeKeyForEpAddr(le.src, pubKey) le.c.logf("magicsock: lazyEndpoint.FromPeer(%v) setting epAddr(%v) in peerMap for node(%v)", pubKey.ShortString(), le.src, ep.nodeAddr) } + +// PeerRelays returns the current set of candidate peer relays. +func (c *Conn) PeerRelays() set.Set[netip.AddrPort] { + return c.relayManager.getServers() +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index c8c9ed41b..d7acf80b5 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -57,6 +57,7 @@ type relayManager struct { newServerEndpointCh chan newRelayServerEndpointEvent rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent serversCh chan set.Set[netip.AddrPort] + getServersCh chan chan set.Set[netip.AddrPort] discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -185,10 +186,29 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } + case getServersCh := <-r.getServersCh: + r.handleGetServersRunLoop(getServersCh) + if !r.hasActiveWorkRunLoop() { + return + } } } } +func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[netip.AddrPort]) { + servers := make(set.Set[netip.AddrPort], len(r.serversByAddrPort)) + for server := range r.serversByAddrPort { + servers.Add(server) + } + getServersCh <- servers +} + +func (r *relayManager) getServers() set.Set[netip.AddrPort] { + ch := make(chan set.Set[netip.AddrPort]) + relayManagerInputEvent(r, nil, &r.getServersCh, ch) + return <-ch +} + func (r *relayManager) handleServersUpdateRunLoop(update set.Set[netip.AddrPort]) { for k, v := range r.serversByAddrPort { if !update.Contains(k) { @@ -244,6 +264,7 @@ func (r *relayManager) init() { r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) r.serversCh = make(chan set.Set[netip.AddrPort]) + r.getServersCh = make(chan chan set.Set[netip.AddrPort]) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 8f9236012..01f9258ad 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -32,4 +32,19 @@ func TestRelayManagerInitAndIdle(t *testing.T) { rm = relayManager{} rm.handleRelayServersSet(make(set.Set[netip.AddrPort])) <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.getServers() + <-rm.runLoopStoppedCh +} + +func TestRelayManagerGetServers(t *testing.T) { + rm := relayManager{} + servers := make(set.Set[netip.AddrPort], 1) + servers.Add(netip.MustParseAddrPort("192.0.2.1:7")) + rm.handleRelayServersSet(servers) + got := rm.getServers() + if !servers.Equal(got) { + t.Errorf("got %v != want %v", got, servers) + } } From 097c2bcf6700e5dc074187bbe0c05ae4cd8b3c26 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 11:04:32 -0700 Subject: [PATCH 1103/1708] go.mod: bump wireguard-go (#16578) So that conn.PeerAwareEndpoint is always evaluated per-packet, rather than at least once per packet batch. Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f040d7799..3d7514158 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 + github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index ea17b1182..995b93010 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 h1:kSzi/ugdekAxhcVdCxH6er7OjoNc2oDRcimWJDvnRFM= -github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= From 17c5116d469f79d5fba20e50fc414932f3ce681d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 11:19:21 -0700 Subject: [PATCH 1104/1708] ipn/ipnlocal: sort tailscale debug peer-relay-servers slice (#16579) Updates tailscale/corp#30036 Signed-off-by: Jordan Whited --- ipn/localapi/localapi.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index fb024039b..d7c64b917 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -697,7 +697,10 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { } h.b.DebugForcePreferDERP(n) case "peer-relay-servers": - servers := h.b.DebugPeerRelayServers() + servers := h.b.DebugPeerRelayServers().Slice() + slices.SortFunc(servers, func(a, b netip.AddrPort) int { + return a.Compare(b) + }) err = json.NewEncoder(w).Encode(servers) if err == nil { return From e84e58c56733072b15fb92c10e4ff702d8fa84d4 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 16 Jul 2025 11:50:13 -0700 Subject: [PATCH 1105/1708] ipn/ipnlocal: use rendezvous hashing to traffic-steer exit nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With auto exit nodes enabled, the client picks exit nodes from the ones advertised in the network map. Usually, it picks the one with the highest priority score, but when the top spot is tied, it used to pick randomly. Then, once it made a selection, it would strongly prefer to stick with that exit node. It wouldn’t even consider another exit node unless the client was shutdown or the exit node went offline. This is to prevent flapping, where a client constantly chooses a different random exit node. The major problem with this algorithm is that new exit nodes don’t get selected as often as they should. In fact, they wouldn’t even move over if a higher scoring exit node appeared. Let’s say that you have an exit node and it’s overloaded. So you spin up a new exit node, right beside your existing one, in the hopes that the traffic will be split across them. But since the client had this strong affinity, they stick with the exit node they know and love. Using rendezvous hashing, we can have different clients spread their selections equally across their top scoring exit nodes. When an exit node shuts down, its clients will spread themselves evenly to their other equal options. When an exit node starts, a proportional number of clients will migrate to their new best option. Read more: https://en.wikipedia.org/wiki/Rendezvous_hashing The trade-off is that starting up a new exit node may cause some clients to move over, interrupting their existing network connections. So this change is only enabled for tailnets with `traffic-steering` enabled. Updates tailscale/corp#29966 Fixes #16551 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 53 +++++++++++++++++++++----------------- ipn/ipnlocal/local_test.go | 51 +++--------------------------------- 2 files changed, 33 insertions(+), 71 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 62ab6d904..d3754e540 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -11,6 +11,7 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/binary" "encoding/hex" "encoding/json" "errors" @@ -7750,7 +7751,7 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta switch { case nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering): // The traffic-steering feature flag is enabled on this tailnet. - return suggestExitNodeUsingTrafficSteering(nb, prevSuggestion, allowList) + return suggestExitNodeUsingTrafficSteering(nb, allowList) default: return suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) } @@ -7896,12 +7897,17 @@ var ErrNoNetMap = errors.New("no network map, try again later") // pick one of the best exit nodes. These priorities are provided by Control in // the node’s [tailcfg.Location]. To be eligible for consideration, the node // must have NodeAttrSuggestExitNode in its CapMap. -func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNodeID, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { +func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { nm := nb.NetMap() if nm == nil { return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap } + self := nb.Self() + if !self.Valid() { + return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap + } + if !nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering) { panic("missing traffic-steering capability") } @@ -7923,12 +7929,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { return false } - if p.StableID() == prev { - // Prevent flapping: since prev is a valid suggestion, - // force prev to be the only valid pick. - force = p - return false - } return true }) if force.Valid() { @@ -7950,6 +7950,7 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod } return s } + rdvHash := makeRendezvousHasher(self.ID()) var pick tailcfg.NodeView if len(nodes) == 1 { @@ -7958,25 +7959,18 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod if len(nodes) > 1 { // Find the highest scoring exit nodes. slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { - return cmp.Compare(score(b), score(a)) // reverse sort - }) - - // Find the top exit nodes, which all have the same score. - topI := len(nodes) - ts := score(nodes[0]) - for i, n := range nodes[1:] { - if score(n) < ts { - // n is the first node with a lower score. - // Make nodes[:topI] to slice the top exit nodes. - topI = i + 1 - break + c := cmp.Compare(score(b), score(a)) // Highest score first. + if c == 0 { + // Rendezvous hashing for reliably picking the + // same node from a list: tailscale/tailscale#16551. + return cmp.Compare(rdvHash(b.ID()), rdvHash(a.ID())) } - } + return c + }) // TODO(sfllaw): add a temperature knob so that this client has // a chance of picking the next best option. - randSeed := uint64(nm.SelfNode.ID()) - pick = nodes[rands.IntN(randSeed, topI)] + pick = nodes[0] } if !pick.Valid() { @@ -8077,6 +8071,19 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { return earthRadiusMeters * c } +// makeRendezvousHasher returns a function that hashes a node ID to a uint64. +// https://en.wikipedia.org/wiki/Rendezvous_hashing +func makeRendezvousHasher(seed tailcfg.NodeID) func(tailcfg.NodeID) uint64 { + en := binary.BigEndian + return func(n tailcfg.NodeID) uint64 { + var b [16]byte + en.PutUint64(b[:], uint64(seed)) + en.PutUint64(b[8:], uint64(n)) + v := sha256.Sum256(b[:]) + return en.Uint64(v[:]) + } +} + const ( // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value // used as an exit node ID to install a blackhole route, preventing diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 0b39c45c2..13681fc04 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5012,7 +5012,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { wantName: "peer1", }, { - name: "many-suggested-exit-nodes", + name: "suggest-exit-node-stable-pick", netMap: &netmap.NetworkMap{ SelfNode: selfNode.View(), Peers: []tailcfg.NodeView{ @@ -5030,55 +5030,10 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { withSuggest()), }, }, + // Change this, if the hashing function changes. wantID: "stable3", wantName: "peer3", }, - { - name: "suggested-exit-node-was-last-suggested", - netMap: &netmap.NetworkMap{ - SelfNode: selfNode.View(), - Peers: []tailcfg.NodeView{ - makePeer(1, - withExitRoutes(), - withSuggest()), - makePeer(2, - withExitRoutes(), - withSuggest()), - makePeer(3, - withExitRoutes(), - withSuggest()), - makePeer(4, - withExitRoutes(), - withSuggest()), - }, - }, - lastExit: "stable2", // overrides many-suggested-exit-nodes - wantID: "stable2", - wantName: "peer2", - }, - { - name: "suggested-exit-node-was-never-suggested", - netMap: &netmap.NetworkMap{ - SelfNode: selfNode.View(), - Peers: []tailcfg.NodeView{ - makePeer(1, - withExitRoutes(), - withSuggest()), - makePeer(2, - withExitRoutes(), - withSuggest()), - makePeer(3, - withExitRoutes(), - withSuggest()), - makePeer(4, - withExitRoutes(), - withSuggest()), - }, - }, - lastExit: "stable10", - wantID: "stable3", // matches many-suggested-exit-nodes - wantName: "peer3", - }, { name: "exit-nodes-with-and-without-priority", netMap: &netmap.NetworkMap{ @@ -5282,7 +5237,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) - got, err := suggestExitNodeUsingTrafficSteering(nb, tt.lastExit, allowList) + got, err := suggestExitNodeUsingTrafficSteering(nb, allowList) if tt.wantErr == nil && err != nil { t.Fatalf("err=%v, want nil", err) } From 36aeacb297ae97f5b21358cfe6ddc814d3920d59 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 14:34:05 -0700 Subject: [PATCH 1106/1708] wgengine/magicsock: add peer relay metrics (#16582) Updates tailscale/corp#30040 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 21 +++- wgengine/magicsock/magicsock.go | 194 ++++++++++++++++++++++---------- 2 files changed, 151 insertions(+), 64 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 385c9245e..48d5ef5a1 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1064,11 +1064,21 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { switch { case udpAddr.ap.Addr().Is4(): - de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) - de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + if udpAddr.vni.isSet() { + de.c.metrics.outboundPacketsPeerRelayIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(txBytes)) + } else { + de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + } case udpAddr.ap.Addr().Is6(): - de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) - de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + if udpAddr.vni.isSet() { + de.c.metrics.outboundPacketsPeerRelayIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(txBytes)) + } else { + de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + } } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. @@ -1082,7 +1092,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { for _, buff := range buffs { buff = buff[offset:] const isDisco = false - ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco) + const isGeneveEncap = false + ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco, isGeneveEncap) txBytes += len(buff) if !ok { allOk = false diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 24a4fc073..ad07003f7 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -87,9 +87,11 @@ const ( type Path string const ( - PathDirectIPv4 Path = "direct_ipv4" - PathDirectIPv6 Path = "direct_ipv6" - PathDERP Path = "derp" + PathDirectIPv4 Path = "direct_ipv4" + PathDirectIPv6 Path = "direct_ipv6" + PathDERP Path = "derp" + PathPeerRelayIPv4 Path = "peer_relay_ipv4" + PathPeerRelayIPv6 Path = "peer_relay_ipv6" ) type pathLabel struct { @@ -97,6 +99,8 @@ type pathLabel struct { // - direct_ipv4 // - direct_ipv6 // - derp + // - peer_relay_ipv4 + // - peer_relay_ipv6 Path Path } @@ -108,27 +112,35 @@ type pathLabel struct { type metrics struct { // inboundPacketsTotal is the total number of inbound packets received, // labeled by the path the packet took. - inboundPacketsIPv4Total expvar.Int - inboundPacketsIPv6Total expvar.Int - inboundPacketsDERPTotal expvar.Int + inboundPacketsIPv4Total expvar.Int + inboundPacketsIPv6Total expvar.Int + inboundPacketsDERPTotal expvar.Int + inboundPacketsPeerRelayIPv4Total expvar.Int + inboundPacketsPeerRelayIPv6Total expvar.Int // inboundBytesTotal is the total number of inbound bytes received, // labeled by the path the packet took. - inboundBytesIPv4Total expvar.Int - inboundBytesIPv6Total expvar.Int - inboundBytesDERPTotal expvar.Int + inboundBytesIPv4Total expvar.Int + inboundBytesIPv6Total expvar.Int + inboundBytesDERPTotal expvar.Int + inboundBytesPeerRelayIPv4Total expvar.Int + inboundBytesPeerRelayIPv6Total expvar.Int // outboundPacketsTotal is the total number of outbound packets sent, // labeled by the path the packet took. - outboundPacketsIPv4Total expvar.Int - outboundPacketsIPv6Total expvar.Int - outboundPacketsDERPTotal expvar.Int + outboundPacketsIPv4Total expvar.Int + outboundPacketsIPv6Total expvar.Int + outboundPacketsDERPTotal expvar.Int + outboundPacketsPeerRelayIPv4Total expvar.Int + outboundPacketsPeerRelayIPv6Total expvar.Int // outboundBytesTotal is the total number of outbound bytes sent, // labeled by the path the packet took. - outboundBytesIPv4Total expvar.Int - outboundBytesIPv6Total expvar.Int - outboundBytesDERPTotal expvar.Int + outboundBytesIPv4Total expvar.Int + outboundBytesIPv6Total expvar.Int + outboundBytesDERPTotal expvar.Int + outboundBytesPeerRelayIPv4Total expvar.Int + outboundBytesPeerRelayIPv6Total expvar.Int // outboundPacketsDroppedErrors is the total number of outbound packets // dropped due to errors. @@ -723,6 +735,8 @@ func registerMetrics(reg *usermetric.Registry) *metrics { pathDirectV4 := pathLabel{Path: PathDirectIPv4} pathDirectV6 := pathLabel{Path: PathDirectIPv6} pathDERP := pathLabel{Path: PathDERP} + pathPeerRelayV4 := pathLabel{Path: PathPeerRelayIPv4} + pathPeerRelayV6 := pathLabel{Path: PathPeerRelayIPv6} inboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( reg, "tailscaled_inbound_packets_total", @@ -755,25 +769,37 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataPacketsIPv4.Register(&m.inboundPacketsIPv4Total) metricRecvDataPacketsIPv6.Register(&m.inboundPacketsIPv6Total) metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) + metricRecvDataPacketsPeerRelayIPv4.Register(&m.inboundPacketsPeerRelayIPv4Total) + metricRecvDataPacketsPeerRelayIPv6.Register(&m.inboundPacketsPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) + metricSendPeerRelay.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendPeerRelay.Register(&m.outboundPacketsPeerRelayIPv6Total) inboundPacketsTotal.Set(pathDirectV4, &m.inboundPacketsIPv4Total) inboundPacketsTotal.Set(pathDirectV6, &m.inboundPacketsIPv6Total) inboundPacketsTotal.Set(pathDERP, &m.inboundPacketsDERPTotal) + inboundPacketsTotal.Set(pathPeerRelayV4, &m.inboundPacketsPeerRelayIPv4Total) + inboundPacketsTotal.Set(pathPeerRelayV6, &m.inboundPacketsPeerRelayIPv6Total) inboundBytesTotal.Set(pathDirectV4, &m.inboundBytesIPv4Total) inboundBytesTotal.Set(pathDirectV6, &m.inboundBytesIPv6Total) inboundBytesTotal.Set(pathDERP, &m.inboundBytesDERPTotal) + inboundBytesTotal.Set(pathPeerRelayV4, &m.inboundBytesPeerRelayIPv4Total) + inboundBytesTotal.Set(pathPeerRelayV6, &m.inboundBytesPeerRelayIPv6Total) outboundPacketsTotal.Set(pathDirectV4, &m.outboundPacketsIPv4Total) outboundPacketsTotal.Set(pathDirectV6, &m.outboundPacketsIPv6Total) outboundPacketsTotal.Set(pathDERP, &m.outboundPacketsDERPTotal) + outboundPacketsTotal.Set(pathPeerRelayV4, &m.outboundPacketsPeerRelayIPv4Total) + outboundPacketsTotal.Set(pathPeerRelayV6, &m.outboundPacketsPeerRelayIPv6Total) outboundBytesTotal.Set(pathDirectV4, &m.outboundBytesIPv4Total) outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total) outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal) + outboundBytesTotal.Set(pathPeerRelayV4, &m.outboundBytesPeerRelayIPv4Total) + outboundBytesTotal.Set(pathPeerRelayV6, &m.outboundBytesPeerRelayIPv6Total) outboundPacketsDroppedErrors.Set(usermetric.DropLabels{Reason: usermetric.ReasonError}, &m.outboundPacketsDroppedErrors) @@ -786,8 +812,11 @@ func deregisterMetrics(m *metrics) { metricRecvDataPacketsIPv4.UnregisterAll() metricRecvDataPacketsIPv6.UnregisterAll() metricRecvDataPacketsDERP.UnregisterAll() + metricRecvDataPacketsPeerRelayIPv4.UnregisterAll() + metricRecvDataPacketsPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() + metricSendPeerRelay.UnregisterAll() } // InstallCaptureHook installs a callback which is called to @@ -1415,23 +1444,37 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, // sendUDP sends UDP packet b to ipp. // See sendAddr's docs on the return value meanings. -func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, err error) { +func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool, isGeneveEncap bool) (sent bool, err error) { if runtime.GOOS == "js" { return false, errNoUDP } sent, err = c.sendUDPStd(ipp, b) if err != nil { - metricSendUDPError.Add(1) + if isGeneveEncap { + metricSendPeerRelayError.Add(1) + } else { + metricSendUDPError.Add(1) + } c.maybeRebindOnError(err) } else { if sent && !isDisco { switch { case ipp.Addr().Is4(): - c.metrics.outboundPacketsIPv4Total.Add(1) - c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + if isGeneveEncap { + c.metrics.outboundPacketsPeerRelayIPv4Total.Add(1) + c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(len(b))) + } else { + c.metrics.outboundPacketsIPv4Total.Add(1) + c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + } case ipp.Addr().Is6(): - c.metrics.outboundPacketsIPv6Total.Add(1) - c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + if isGeneveEncap { + c.metrics.outboundPacketsPeerRelayIPv6Total.Add(1) + c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(len(b))) + } else { + c.metrics.outboundPacketsIPv6Total.Add(1) + c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + } } } } @@ -1506,9 +1549,9 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error) // An example of when they might be different: sending to an // IPv6 address when the local machine doesn't have IPv6 support // returns (false, nil); it's not an error, but nothing was sent. -func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) { +func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool, isGeneveEncap bool) (sent bool, err error) { if addr.Addr() != tailcfg.DerpMagicIPAddr { - return c.sendUDP(addr, b, isDisco) + return c.sendUDP(addr, b, isDisco, isGeneveEncap) } regionID := int(addr.Port()) @@ -1562,7 +1605,9 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) { func (c *Conn) receiveIPv4() conn.ReceiveFunc { return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), &c.metrics.inboundPacketsIPv4Total, + &c.metrics.inboundPacketsPeerRelayIPv4Total, &c.metrics.inboundBytesIPv4Total, + &c.metrics.inboundBytesPeerRelayIPv4Total, ) } @@ -1570,13 +1615,15 @@ func (c *Conn) receiveIPv4() conn.ReceiveFunc { func (c *Conn) receiveIPv6() conn.ReceiveFunc { return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), &c.metrics.inboundPacketsIPv6Total, + &c.metrics.inboundPacketsPeerRelayIPv6Total, &c.metrics.inboundBytesIPv6Total, + &c.metrics.inboundBytesPeerRelayIPv6Total, ) } // mkReceiveFunc creates a ReceiveFunc reading from ruc. // The provided healthItem and metrics are updated if non-nil. -func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc { +func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, directPacketMetric, peerRelayPacketMetric, directBytesMetric, peerRelayBytesMetric *expvar.Int) conn.ReceiveFunc { // epCache caches an epAddr->endpoint for hot flows. var epCache epAddrEndpointCache @@ -1612,12 +1659,21 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu continue } ipp := msg.Addr.(*net.UDPAddr).AddrPort() - if ep, size, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { - if packetMetric != nil { - packetMetric.Add(1) - } - if bytesMetric != nil { - bytesMetric.Add(int64(msg.N)) + if ep, size, isGeneveEncap, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { + if isGeneveEncap { + if peerRelayPacketMetric != nil { + peerRelayPacketMetric.Add(1) + } + if peerRelayBytesMetric != nil { + peerRelayBytesMetric.Add(int64(msg.N)) + } + } else { + if directPacketMetric != nil { + directPacketMetric.Add(1) + } + if directBytesMetric != nil { + directBytesMetric.Add(int64(msg.N)) + } } eps[i] = ep sizes[i] = size @@ -1646,11 +1702,14 @@ func looksLikeInitiationMsg(b []byte) bool { // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. // // size is the length of 'b' to report up to wireguard-go (only relevant if -// 'ok' is true) +// 'ok' is true). +// +// isGeneveEncap is whether 'b' is encapsulated by a Geneve header (only +// relevant if 'ok' is true). // // ok is whether this read should be reported up to wireguard-go (our // caller). -func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, ok bool) { +func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, isGeneveEncap bool, ok bool) { var ep *endpoint size = len(b) @@ -1663,7 +1722,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // Decode only returns an error when 'b' is too short, and // 'isGeneveEncap' indicates it's a sufficient length. c.logf("[unexpected] geneve header decoding error: %v", err) - return nil, 0, false + return nil, 0, false, false } src.vni.set(geneve.VNI) } @@ -1678,10 +1737,10 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // [disco.MessageType], but we assert it should be handshake-related. shouldByRelayHandshakeMsg := geneve.Control == true c.handleDiscoMessage(b, src, shouldByRelayHandshakeMsg, key.NodePublic{}, discoRXPathUDP) - return nil, 0, false + return nil, 0, false, false case packetLooksLikeSTUNBinding: c.netChecker.ReceiveSTUNPacket(b, ipp) - return nil, 0, false + return nil, 0, false, false default: // Fall through for all other packet types as they are assumed to // be potentially WireGuard. @@ -1691,7 +1750,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // If we have no private key, we're logged out or // stopped. Don't try to pass these wireguard packets // up to wireguard-go; it'll just complain (issue 1167). - return nil, 0, false + return nil, 0, false, false } if src.vni.isSet() { @@ -1715,11 +1774,11 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // Note: UDP relay is dependent on cryptorouting enablement. We // only update Geneve-encapsulated [epAddr]s in the [peerMap] // via [lazyEndpoint]. - return nil, 0, false + return nil, 0, false, false } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. - return &lazyEndpoint{c: c, src: src}, size, true + return &lazyEndpoint{c: c, src: src}, size, isGeneveEncap, true } cache.epAddr = src cache.de = de @@ -1738,9 +1797,9 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // unlucky and fail to JIT configure the "correct" peer. // TODO(jwhited): relax this to include direct connections // See http://go/corp/29422 & http://go/corp/30042 - return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, true + return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, isGeneveEncap, true } - return ep, size, true + return ep, size, isGeneveEncap, true } // discoLogLevel controls the verbosity of discovery log messages. @@ -1861,7 +1920,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) const isDisco = true - sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco) + sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.isSet()) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" @@ -2149,13 +2208,15 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake isVia := false msgType := "CallMeMaybe" cmm, ok := dm.(*disco.CallMeMaybe) - if !ok { + if ok { + metricRecvDiscoCallMeMaybe.Add(1) + } else { + metricRecvDiscoCallMeMaybeVia.Add(1) via = dm.(*disco.CallMeMaybeVia) msgType = "CallMeMaybeVia" isVia = true } - metricRecvDiscoCallMeMaybe.Add(1) if !isDERP || derpNodeSrc.IsZero() { // CallMeMaybe{Via} messages should only come via DERP. c.logf("[unexpected] %s packets should only come via DERP", msgType) @@ -2164,7 +2225,11 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake nodeKey := derpNodeSrc ep, ok := c.peerMap.endpointForNodeKey(nodeKey) if !ok { - metricRecvDiscoCallMeMaybeBadNode.Add(1) + if isVia { + metricRecvDiscoCallMeMaybeViaBadNode.Add(1) + } else { + metricRecvDiscoCallMeMaybeBadNode.Add(1) + } c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } @@ -2190,7 +2255,11 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } if epDisco.key != di.discoKey { - metricRecvDiscoCallMeMaybeBadDisco.Add(1) + if isVia { + metricRecvDiscoCallMeMaybeViaBadDisco.Add(1) + } else { + metricRecvDiscoCallMeMaybeBadDisco.Add(1) + } c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) return } @@ -3695,15 +3764,19 @@ var ( metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") + metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") + metricSendPeerRelayError = clientmetric.NewCounter("magicsock_send_peer_relay_error") metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") // Data packets (non-disco) - metricSendData = clientmetric.NewCounter("magicsock_send_data") - metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") - metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") - metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") - metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") + metricSendData = clientmetric.NewCounter("magicsock_send_data") + metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") + metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") + metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") + metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") + metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") + metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") @@ -3719,15 +3792,18 @@ var ( metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") - metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") - metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") - metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") - metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") - metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") - metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") - metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") - metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") - metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") + metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") + metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") + metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") + metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") + metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") + metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") + metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") + metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") + metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") + metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") + metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has // changed from non-zero to a different non-zero. metricDERPHomeChange = clientmetric.NewCounter("derp_home_change") From e7238efafa427c2a360540534bd08613a81ca7bc Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 16 Jul 2025 19:37:46 -0400 Subject: [PATCH 1107/1708] cmd/tailscale/cli: Add service flag to serve command (#16191) * cmd/tailscale/cli: Add service flag to serve command This commit adds the service flag to serve command which allows serving a service and add the service to the advertisedServices field in prefs (What advertise command does that will be removed later). When adding proxies, TCP proxies and WEB proxies work the same way as normal serve, just under a different DNSname. There is a services specific L3 serving mode called Tun, can be set via --tun flag. Serving a service is always in --bg mode. If --bg is explicitly set t o false, an error message will be sent out. The restriction on proxy target being localhost or 127.0.0.1 also applies to services. When removing proxies, TCP proxies can be removed with type and port flag and off argument. Web proxies can be removed with type, port, setPath flag and off argument. To align with normal serve, when setPath is not set, all handler under the hostport will be removed. When flags are not set but off argument was passed by user, it will be a noop. Removing all config for a service will be available later with a new subcommand clear. Updates tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: fix ai comments and fix a test Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Add a test for addServiceToPrefs Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: fix comment Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * add dnsName in error message Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * change the cli input flag variable type Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace FindServiceConfig with map lookup Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * some code simplification and add asServiceName This commit cotains code simplification for IsServingHTTPS, SetWebHandler, SetTCPForwarding Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace IsServiceName with tailcfg.AsServiceName Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace all assemble of host name for service with strings.Join Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: adjust parameter order and update output message This commit updates the parameter order for IsTCPForwardingOnPort and SetWebHandler. Also updated the message msgServiceIPNotAssigned to msgServiceWaitingApproval to adapt to latest terminologies around services. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: flip bool condition This commit fixes a previous bug added that throws error when serve funnel without service. It should've been the opposite, which throws error when serve funnel with service. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: change parameter of IsTCPForwardingOnPort This commit changes the dnsName string parameter for IsTCPForwardingOnPort to svcName tailcfg.ServiceName. This change is made to reduce ambiguity when a single service might have different dnsNames Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * ipn/ipnlocal: replace the key to webHandler for services This commit changes the way we get the webhandler for vipServices. It used to use the host name from request to find the webHandler, now everything targeting the vipService IP have the same set of handlers. This commit also stores service:port instead of FQDN:port as the key in serviceConfig for Web map. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Updated use of service name. This commit removes serviceName.IsEmpty and use direct comparison to instead. In legacy code, when an empty service name needs to be passed, a new constant noService is passed. Removed redundant code for checking service name validity and string method for serviceNameFlag. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Update bgBoolFlag This commit update field name, set and string method of bgBoolFlag to make code cleaner. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: remove isDefaultService output from srvTypeAndPortFromFlags This commit removes the isDefaultService out put as it's no longer needed. Also deleted redundant code. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: remove unnessesary variable declare in messageForPort Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace bool output for AsServiceName with err Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Replace DNSName with NoService if DNSname only used to identify service This commit moves noService constant to tailcfg, updates AsServiceName to return tailcfg.NoService if the input is not a valid service name. This commit also removes using the local DNSName as scvName parameter. When a function is only using DNSName to identify if it's working with a service, the input in replaced with svcName and expect caller to pass tailcfg.NoService if it's a local serve. This commit also replaces some use of Sprintf with net.JoinHostPort for ipn.HostPort creation. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Remove the returned error for AsServiceName Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * apply suggested code and comment Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace local dnsName in test with tailcfg.NoService Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: move noService back and use else where The constant serves the purpose of provide readability for passing as a function parameter. It's more meaningful comparing to a . It can just be an empty string in other places. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * ipn: Make WebHandlerExists and RemoveTCPForwarding accept svcName This commit replaces two functions' string input with svcName input since they only use the dnsName to identify service. Also did some minor cleanups Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_legacy.go | 42 +- cmd/tailscale/cli/serve_legacy_test.go | 16 + cmd/tailscale/cli/serve_v2.go | 395 +++++++++-- cmd/tailscale/cli/serve_v2_test.go | 925 ++++++++++++++++++++++++- cmd/tailscale/cli/status.go | 2 +- ipn/ipnlocal/serve.go | 4 +- ipn/serve.go | 208 ++++-- ipn/serve_test.go | 115 +++ tailcfg/tailcfg.go | 10 + 9 files changed, 1573 insertions(+), 144 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 96629b5ad..7c79f7f7b 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -141,6 +141,8 @@ type localServeClient interface { QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) IncrementCounter(ctx context.Context, name string, delta int) error + GetPrefs(ctx context.Context) (*ipn.Prefs, error) + EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) } // serveEnv is the environment the serve command runs within. All I/O should be @@ -154,14 +156,16 @@ type serveEnv struct { json bool // output JSON (status only for now) // v2 specific flags - bg bool // background mode - setPath string // serve path - https uint // HTTP port - http uint // HTTP port - tcp uint // TCP port - tlsTerminatedTCP uint // a TLS terminated TCP port - subcmd serveMode // subcommand - yes bool // update without prompt + bg bgBoolFlag // background mode + setPath string // serve path + https uint // HTTP port + http uint // HTTP port + tcp uint // TCP port + tlsTerminatedTCP uint // a TLS terminated TCP port + subcmd serveMode // subcommand + yes bool // update without prompt + service tailcfg.ServiceName // service name + tun bool // redirect traffic to OS for service lc localServeClient // localClient interface, specific to serve @@ -354,7 +358,7 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo if err != nil { return err } - if sc.IsTCPForwardingOnPort(srvPort) { + if sc.IsTCPForwardingOnPort(srvPort, noService) { fmt.Fprintf(Stderr, "error: cannot serve web; already serving TCP\n") return errHelp } @@ -411,11 +415,11 @@ func (e *serveEnv) handleWebServeRemove(ctx context.Context, srvPort uint16, mou if err != nil { return err } - if sc.IsTCPForwardingOnPort(srvPort) { + if sc.IsTCPForwardingOnPort(srvPort, noService) { return errors.New("cannot remove web handler; currently serving TCP") } hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort)))) - if !sc.WebHandlerExists(hp, mount) { + if !sc.WebHandlerExists(noService, hp, mount) { return errors.New("error: handler does not exist") } sc.RemoveWebHandler(dnsName, srvPort, []string{mount}, false) @@ -550,15 +554,15 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u fwdAddr := "127.0.0.1:" + dstPortStr - if sc.IsServingWeb(srcPort) { - return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) - } - dnsName, err := e.getSelfDNSName(ctx) if err != nil { return err } + if sc.IsServingWeb(srcPort, noService) { + return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) + } + sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, dnsName) if !reflect.DeepEqual(cursc, sc) { @@ -581,11 +585,11 @@ func (e *serveEnv) handleTCPServeRemove(ctx context.Context, src uint16) error { if sc == nil { sc = new(ipn.ServeConfig) } - if sc.IsServingWeb(src) { + if sc.IsServingWeb(src, noService) { return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src) } - if ph := sc.GetTCPPortHandler(src); ph != nil { - sc.RemoveTCPForwarding(src) + if ph := sc.GetTCPPortHandler(src, noService); ph != nil { + sc.RemoveTCPForwarding(noService, src) return e.lc.SetServeConfig(ctx, sc) } return errors.New("error: serve config does not exist") @@ -682,7 +686,7 @@ func (e *serveEnv) printWebStatusTree(sc *ipn.ServeConfig, hp ipn.HostPort) erro } scheme := "https" - if sc.IsServingHTTP(port) { + if sc.IsServingHTTP(port, noService) { scheme = "http" } diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index df68b5edd..6b053fbd7 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -859,6 +859,7 @@ type fakeLocalServeClient struct { config *ipn.ServeConfig setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls + prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs } // fakeStatus is a fake ipnstate.Status value for tests. @@ -891,6 +892,21 @@ func (lc *fakeLocalServeClient) SetServeConfig(ctx context.Context, config *ipn. return nil } +func (lc *fakeLocalServeClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { + if lc.prefs == nil { + lc.prefs = ipn.NewPrefs() + } + return lc.prefs, nil +} + +func (lc *fakeLocalServeClient) EditPrefs(ctx context.Context, prefs *ipn.MaskedPrefs) (*ipn.Prefs, error) { + if lc.prefs == nil { + lc.prefs = ipn.NewPrefs() + } + lc.prefs.ApplyEdits(prefs) + return lc.prefs, nil +} + type mockQueryFeatureResponse struct { resp *tailcfg.QueryFeatureResponse err error diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index bb51fb7d0..15de0609c 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -18,6 +18,7 @@ import ( "os/signal" "path" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -41,6 +42,55 @@ type commandInfo struct { LongHelp string } +type serviceNameFlag struct { + Value *tailcfg.ServiceName +} + +func (s *serviceNameFlag) Set(sv string) error { + if sv == "" { + s.Value = new(tailcfg.ServiceName) + return nil + } + v := tailcfg.ServiceName(sv) + if err := v.Validate(); err != nil { + return fmt.Errorf("invalid service name: %q", sv) + } + *s.Value = v + return nil +} + +// String returns the string representation of service name. +func (s *serviceNameFlag) String() string { + return s.Value.String() +} + +type bgBoolFlag struct { + Value bool + IsSet bool // tracks if the flag was set by the user +} + +// Set sets the boolean flag and whether it's explicitly set by user based on the string value. +func (b *bgBoolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.Value = v + b.IsSet = true + return nil +} + +// This is a hack to make the flag package recognize that this is a boolean flag. +func (b *bgBoolFlag) IsBoolFlag() bool { return true } + +// String returns the string representation of the boolean flag. +func (b *bgBoolFlag) String() string { + if !b.IsSet { + return "default" + } + return strconv.FormatBool(b.Value) +} + var serveHelpCommon = strings.TrimSpace(` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., 3000), @@ -73,8 +123,11 @@ const ( serveTypeHTTP serveTypeTCP serveTypeTLSTerminatedTCP + serveTypeTUN ) +const noService tailcfg.ServiceName = "" + var infoMap = map[serveMode]commandInfo{ serve: { Name: "serve", @@ -120,7 +173,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Exec: e.runServeCombined(subcmd), FlagSet: e.newFlags("serve-set", func(fs *flag.FlagSet) { - fs.BoolVar(&e.bg, "bg", false, "Run the command as a background process (default false)") + fs.Var(&e.bg, "bg", "Run the command as a background process (default false, when --service is set defaults to true).") fs.StringVar(&e.setPath, "set-path", "", "Appends the specified path to the base URL for accessing the underlying service") fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { @@ -128,7 +181,9 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") + fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), UsageFunc: usageFuncNoDefaultValues, Subcommands: []*ffcli.Command{ @@ -162,9 +217,16 @@ func (e *serveEnv) validateArgs(subcmd serveMode, args []string) error { fmt.Fprint(e.stderr(), "\nPlease see https://tailscale.com/kb/1242/tailscale-serve for more information.\n") return errHelpFunc(subcmd) } + if len(args) == 0 && e.tun { + return nil + } if len(args) == 0 { return flag.ErrHelp } + if e.tun && len(args) > 1 { + fmt.Fprintln(e.stderr(), "Error: invalid argument format") + return errHelpFunc(subcmd) + } if len(args) > 2 { fmt.Fprintf(e.stderr(), "Error: invalid number of arguments (%d)\n", len(args)) return errHelpFunc(subcmd) @@ -206,7 +268,16 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { ctx, cancel := signal.NotifyContext(ctx, os.Interrupt) defer cancel() + forService := e.service != "" + if !e.bg.IsSet { + e.bg.Value = forService + } + funnel := subcmd == funnel + if forService && funnel { + return errors.New("Error: --service flag is not supported with funnel") + } + if funnel { // verify node has funnel capabilities if err := e.verifyFunnelEnabled(ctx, 443); err != nil { @@ -214,6 +285,10 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } + if forService && !e.bg.Value { + return errors.New("Error: --service flag is only compatible with background mode") + } + mount, err := cleanURLPath(e.setPath) if err != nil { return fmt.Errorf("failed to clean the mount point: %w", err) @@ -246,7 +321,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { // foreground or background. parentSC := sc - turnOff := "off" == args[len(args)-1] + turnOff := len(args) > 0 && "off" == args[len(args)-1] if !turnOff && srvType == serveTypeHTTPS { // Running serve with https requires that the tailnet has enabled // https cert provisioning. Send users through an interactive flow @@ -263,10 +338,19 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } var watcher *tailscale.IPNBusWatcher - wantFg := !e.bg && !turnOff + svcName := noService + + if forService { + svcName = e.service + dnsName = e.service.String() + } + if !forService && srvType == serveTypeTUN { + return errors.New("tun mode is only supported for services") + } + wantFg := !e.bg.Value && !turnOff if wantFg { // validate the config before creating a WatchIPNBus session - if err := e.validateConfig(parentSC, srvPort, srvType); err != nil { + if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { return err } @@ -292,12 +376,20 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { var msg string if turnOff { - err = e.unsetServe(sc, dnsName, srvType, srvPort, mount) + // only unset serve when trying to unset with type and port flags. + err = e.unsetServe(sc, st, dnsName, srvType, srvPort, mount) } else { - if err := e.validateConfig(parentSC, srvPort, srvType); err != nil { + if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { return err } - err = e.setServe(sc, st, dnsName, srvType, srvPort, mount, args[0], funnel) + if forService { + e.addServiceToPrefs(ctx, svcName.String()) + } + target := "" + if len(args) > 0 { + target = args[0] + } + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -332,22 +424,66 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } -const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" - -func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType) error { - sc, isFg := sc.FindConfig(port) - if sc == nil { - return nil +func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName string) error { + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) } - if isFg { - return errors.New("foreground already exists under this port") + advertisedServices := prefs.AdvertiseServices + if slices.Contains(advertisedServices, serviceName) { + return nil // already advertised } - if !e.bg { - return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port) + advertisedServices = append(advertisedServices, serviceName) + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: advertisedServices, + }, + }) + return err +} + +const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" + +// validateConfig checks if the serve config is valid to serve the type wanted on the port. +// dnsName is a FQDN or a serviceName (with `svc:` prefix). +func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType, svcName tailcfg.ServiceName) error { + var tcpHandlerForPort *ipn.TCPPortHandler + if svcName != noService { + svc := sc.Services[svcName] + if svc == nil { + return nil + } + if wantServe == serveTypeTUN && (svc.TCP != nil || svc.Web != nil) { + return errors.New("service already has a TCP or Web handler, cannot serve in TUN mode") + } + if svc.Tun && wantServe != serveTypeTUN { + return errors.New("service is already being served in TUN mode") + } + if svc.TCP[port] == nil { + return nil + } + tcpHandlerForPort = svc.TCP[port] + } else { + sc, isFg := sc.FindConfig(port) + if sc == nil { + return nil + } + if isFg { + return errors.New("foreground already exists under this port") + } + if !e.bg.Value { + return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port) + } + tcpHandlerForPort = sc.TCP[port] } - existingServe := serveFromPortHandler(sc.TCP[port]) + existingServe := serveFromPortHandler(tcpHandlerForPort) if wantServe != existingServe { - return fmt.Errorf("want %q but port is already serving %q", wantServe, existingServe) + target := svcName + if target == noService { + target = "machine" + } + return fmt.Errorf("want to serve %q but port is already serving %q for %q", wantServe, existingServe, target) } return nil } @@ -367,7 +503,7 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: @@ -380,45 +516,61 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName st if e.setPath != "" { return fmt.Errorf("cannot mount a path for TCP serve") } - err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target) if err != nil { return fmt.Errorf("failed to apply TCP serve: %w", err) } + case serveTypeTUN: + // Caller checks that TUN mode is only supported for services. + svcName := tailcfg.ServiceName(dnsName) + if _, ok := sc.Services[svcName]; !ok { + mak.Set(&sc.Services, svcName, new(ipn.ServiceConfig)) + } + sc.Services[svcName].Tun = true default: return fmt.Errorf("invalid type %q", srvType) } // update the serve config based on if funnel is enabled - e.applyFunnel(sc, dnsName, srvPort, allowFunnel) - + // Since funnel is not supported for services, we only apply it for node's serve. + if svcName := tailcfg.AsServiceName(dnsName); svcName == noService { + e.applyFunnel(sc, dnsName, srvPort, allowFunnel) + } return nil } var ( - msgFunnelAvailable = "Available on the internet:" - msgServeAvailable = "Available within your tailnet:" - msgRunningInBackground = "%s started and running in the background." - msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" - msgToExit = "Press Ctrl+C to exit." + msgFunnelAvailable = "Available on the internet:" + msgServeAvailable = "Available within your tailnet:" + msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" + msgRunningInBackground = "%s started and running in the background." + msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." + msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" + msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" + msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" + msgDisableService = "To remove config for the service, run: tailscale serve clear --service=%s" + msgToExit = "Press Ctrl+C to exit." ) // messageForPort returns a message for the given port based on the // serve config and status. func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16) string { var output strings.Builder - - hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort)))) - - if sc.AllowFunnel[hp] == true { - output.WriteString(msgFunnelAvailable) - } else { - output.WriteString(msgServeAvailable) + svcName := tailcfg.AsServiceName(dnsName) + forService := svcName != noService + var webConfig *ipn.WebServerConfig + var tcpHandler *ipn.TCPPortHandler + ips := st.TailscaleIPs + host := dnsName + displayedHost := dnsName + if forService { + displayedHost = strings.Join([]string{svcName.WithoutPrefix(), st.CurrentTailnet.MagicDNSSuffix}, ".") + host = svcName.WithoutPrefix() } - output.WriteString("\n\n") + hp := ipn.HostPort(net.JoinHostPort(host, strconv.Itoa(int(srvPort)))) scheme := "https" - if sc.IsServingHTTP(srvPort) { + if sc.IsServingHTTP(srvPort, svcName) { scheme = "http" } @@ -439,37 +591,68 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN } return "", "" } + if forService { + serviceIPMaps, err := tailcfg.UnmarshalNodeCapJSON[tailcfg.ServiceIPMappings](st.Self.CapMap, tailcfg.NodeAttrServiceHost) + if err != nil || len(serviceIPMaps) == 0 || serviceIPMaps[0][svcName] == nil { + // The capmap does not contain IPs for this service yet. Usually this means + // the service hasn't been added to prefs and sent to control yet. + output.WriteString(fmt.Sprintf(msgServiceWaitingApproval, svcName.String())) + ips = nil + } else { + output.WriteString(msgServeAvailable) + ips = serviceIPMaps[0][svcName] + } + output.WriteString("\n\n") + svc := sc.Services[svcName] + if srvType == serveTypeTUN && svc.Tun { + output.WriteString(fmt.Sprintf(msgRunningTunService, displayedHost)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableServiceTun, dnsName)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableService, dnsName)) + return output.String() + } + if svc != nil { + webConfig = svc.Web[hp] + tcpHandler = svc.TCP[srvPort] + } + } else { + if sc.AllowFunnel[hp] == true { + output.WriteString(msgFunnelAvailable) + } else { + output.WriteString(msgServeAvailable) + } + output.WriteString("\n\n") + webConfig = sc.Web[hp] + tcpHandler = sc.TCP[srvPort] + } - if sc.Web[hp] != nil { - mounts := slicesx.MapKeys(sc.Web[hp].Handlers) + if webConfig != nil { + mounts := slicesx.MapKeys(webConfig.Handlers) sort.Slice(mounts, func(i, j int) bool { return len(mounts[i]) < len(mounts[j]) }) - for _, m := range mounts { - h := sc.Web[hp].Handlers[m] - t, d := srvTypeAndDesc(h) - output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, dnsName, portPart, m)) + t, d := srvTypeAndDesc(webConfig.Handlers[m]) + output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, displayedHost, portPart, m)) output.WriteString(fmt.Sprintf("%s %-5s %s\n\n", "|--", t, d)) } - } else if sc.TCP[srvPort] != nil { - h := sc.TCP[srvPort] + } else if tcpHandler != nil { tlsStatus := "TLS over TCP" - if h.TerminateTLS != "" { + if tcpHandler.TerminateTLS != "" { tlsStatus = "TLS terminated" } - output.WriteString(fmt.Sprintf("%s://%s%s\n", scheme, dnsName, portPart)) - output.WriteString(fmt.Sprintf("|-- tcp://%s (%s)\n", hp, tlsStatus)) - for _, a := range st.TailscaleIPs { + output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", displayedHost, srvPort, tlsStatus)) + for _, a := range ips { ipp := net.JoinHostPort(a.String(), strconv.Itoa(int(srvPort))) output.WriteString(fmt.Sprintf("|-- tcp://%s\n", ipp)) } - output.WriteString(fmt.Sprintf("|--> tcp://%s\n", h.TCPForward)) + output.WriteString(fmt.Sprintf("|--> tcp://%s\n\n", tcpHandler.TCPForward)) } - if !e.bg { + if !forService && !e.bg.Value { output.WriteString(msgToExit) return output.String() } @@ -479,14 +662,19 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN output.WriteString(fmt.Sprintf(msgRunningInBackground, subCmdUpper)) output.WriteString("\n") - output.WriteString(fmt.Sprintf(msgDisableProxy, subCmd, srvType.String(), srvPort)) + if forService { + output.WriteString(fmt.Sprintf(msgDisableServiceProxy, dnsName, srvType.String(), srvPort)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableService, dnsName)) + } else { + output.WriteString(fmt.Sprintf(msgDisableProxy, subCmd, srvType.String(), srvPort)) + } return output.String() } func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string) error { h := new(ipn.HTTPHandler) - switch { case strings.HasPrefix(target, "text:"): text := strings.TrimPrefix(target, "text:") @@ -522,7 +710,8 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui } // TODO: validation needs to check nested foreground configs - if sc.IsTCPForwardingOnPort(srvPort) { + svcName := tailcfg.AsServiceName(dnsName) + if sc.IsTCPForwardingOnPort(srvPort, svcName) { return errors.New("cannot serve web; already serving TCP") } @@ -553,8 +742,9 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se } // TODO: needs to account for multiple configs from foreground mode - if sc.IsServingWeb(srcPort) { - return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) + svcName := tailcfg.AsServiceName(dnsName) + if sc.IsServingWeb(srcPort, svcName) { + return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, dnsName) @@ -578,18 +768,24 @@ func (e *serveEnv) applyFunnel(sc *ipn.ServeConfig, dnsName string, srvPort uint } // unsetServe removes the serve config for the given serve port. -func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string) error { +// dnsName is a FQDN or a serviceName (with `svc:` prefix). +func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string) error { switch srvType { case serveTypeHTTPS, serveTypeHTTP: - err := e.removeWebServe(sc, dnsName, srvPort, mount) + err := e.removeWebServe(sc, st, dnsName, srvPort, mount) if err != nil { return fmt.Errorf("failed to remove web serve: %w", err) } case serveTypeTCP, serveTypeTLSTerminatedTCP: - err := e.removeTCPServe(sc, srvPort) + err := e.removeTCPServe(sc, dnsName, srvPort) if err != nil { return fmt.Errorf("failed to remove TCP serve: %w", err) } + case serveTypeTUN: + err := e.removeTunServe(sc, dnsName) + if err != nil { + return fmt.Errorf("failed to remove TUN serve: %w", err) + } default: return fmt.Errorf("invalid type %q", srvType) } @@ -620,11 +816,16 @@ func srvTypeAndPortFromFlags(e *serveEnv) (srvType serveType, srvPort uint16, er } } + if e.tun { + srcTypeCount++ + srvType = serveTypeTUN + } + if srcTypeCount > 1 { return 0, 0, fmt.Errorf("cannot serve multiple types for a single mount point") - } else if srcTypeCount == 0 { - srvType = serveTypeHTTPS - srvPort = 443 + } + if srcTypeCount == 0 { + return serveTypeHTTPS, 443, nil } return srvType, srvPort, nil @@ -728,32 +929,48 @@ func isLegacyInvocation(subcmd serveMode, args []string) (string, bool) { // and removes funnel if no remaining mounts exist for the serve port. // The srvPort argument is the serving port and the mount argument is // the mount point or registered path to remove. -func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string) error { - if sc.IsTCPForwardingOnPort(srvPort) { - return errors.New("cannot remove web handler; currently serving TCP") +func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvPort uint16, mount string) error { + if sc == nil { + return nil } portStr := strconv.Itoa(int(srvPort)) - hp := ipn.HostPort(net.JoinHostPort(dnsName, portStr)) + hostName := dnsName + webServeMap := sc.Web + svcName := tailcfg.AsServiceName(dnsName) + forService := svcName != noService + if forService { + svc := sc.Services[svcName] + if svc == nil { + return errors.New("service does not exist") + } + hostName = svcName.WithoutPrefix() + webServeMap = svc.Web + } + + hp := ipn.HostPort(net.JoinHostPort(hostName, portStr)) + if sc.IsTCPForwardingOnPort(srvPort, svcName) { + return errors.New("cannot remove web handler; currently serving TCP") + } var targetExists bool var mounts []string // mount is deduced from e.setPath but it is ambiguous as // to whether the user explicitly passed "/" or it was defaulted to. if e.setPath == "" { - targetExists = sc.Web[hp] != nil && len(sc.Web[hp].Handlers) > 0 + targetExists = webServeMap[hp] != nil && len(webServeMap[hp].Handlers) > 0 if targetExists { - for mount := range sc.Web[hp].Handlers { + for mount := range webServeMap[hp].Handlers { mounts = append(mounts, mount) } } } else { - targetExists = sc.WebHandlerExists(hp, mount) + targetExists = sc.WebHandlerExists(svcName, hp, mount) mounts = []string{mount} } if !targetExists { - return errors.New("error: handler does not exist") + return errors.New("handler does not exist") } if len(mounts) > 1 { @@ -763,23 +980,47 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u } } - sc.RemoveWebHandler(dnsName, srvPort, mounts, true) + if forService { + sc.RemoveServiceWebHandler(st, svcName, srvPort, mounts) + } else { + sc.RemoveWebHandler(dnsName, srvPort, mounts, true) + } return nil } // removeTCPServe removes the TCP forwarding configuration for the -// given srvPort, or serving port. -func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, src uint16) error { +// given srvPort, or serving port for the given dnsName. +func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, dnsName string, src uint16) error { if sc == nil { return nil } - if sc.GetTCPPortHandler(src) == nil { - return errors.New("error: serve config does not exist") + svcName := tailcfg.AsServiceName(dnsName) + if sc.GetTCPPortHandler(src, svcName) == nil { + return errors.New("serve config does not exist") } - if sc.IsServingWeb(src) { + if sc.IsServingWeb(src, svcName) { return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src) } - sc.RemoveTCPForwarding(src) + sc.RemoveTCPForwarding(svcName, src) + return nil +} + +func (e *serveEnv) removeTunServe(sc *ipn.ServeConfig, dnsName string) error { + if sc == nil { + return nil + } + svcName := tailcfg.ServiceName(dnsName) + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return errors.New("service does not exist") + } + if !svc.Tun { + return errors.New("service is not being served in TUN mode") + } + delete(sc.Services, svcName) + if len(sc.Services) == 0 { + sc.Services = nil // clean up empty map + } return nil } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 5768127ad..b3e7ea773 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -8,9 +8,11 @@ import ( "context" "encoding/json" "fmt" + "net/netip" "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "testing" @@ -19,6 +21,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" ) func TestServeDevConfigMutations(t *testing.T) { @@ -874,9 +877,10 @@ func TestValidateConfig(t *testing.T) { name string desc string cfg *ipn.ServeConfig + svc tailcfg.ServiceName servePort uint16 serveType serveType - bg bool + bg bgBoolFlag wantErr bool }{ { @@ -894,7 +898,7 @@ func TestValidateConfig(t *testing.T) { 443: {HTTPS: true}, }, }, - bg: true, + bg: bgBoolFlag{true, false}, servePort: 10000, serveType: serveTypeHTTPS, }, @@ -906,7 +910,7 @@ func TestValidateConfig(t *testing.T) { 443: {TCPForward: "http://localhost:4545"}, }, }, - bg: true, + bg: bgBoolFlag{true, false}, servePort: 443, serveType: serveTypeTCP, }, @@ -918,7 +922,7 @@ func TestValidateConfig(t *testing.T) { 443: {HTTPS: true}, }, }, - bg: true, + bg: bgBoolFlag{true, false}, servePort: 443, serveType: serveTypeHTTP, wantErr: true, @@ -957,12 +961,90 @@ func TestValidateConfig(t *testing.T) { serveType: serveTypeTCP, wantErr: true, }, + { + name: "new_service_tcp", + desc: "no error when adding a new service port", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + }, + }, + }, + svc: "svc:foo", + servePort: 8080, + serveType: serveTypeTCP, + }, + { + name: "override_service_tcp", + desc: "no error when overwriting a previous service port", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:4545"}, + }, + }, + }, + }, + svc: "svc:foo", + servePort: 443, + serveType: serveTypeTCP, + }, + { + name: "override_service_tcp", + desc: "error when overwriting a previous service port with a different serve type", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + }, + }, + }, + svc: "svc:foo", + servePort: 443, + serveType: serveTypeHTTP, + wantErr: true, + }, + { + name: "override_service_tcp", + desc: "error when setting previous tcp service to tun mode", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:4545"}, + }, + }, + }, + }, + svc: "svc:foo", + serveType: serveTypeTUN, + wantErr: true, + }, + { + name: "override_service_tun", + desc: "error when setting previous tun service to tcp forwarder", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + svc: "svc:foo", + serveType: serveTypeTCP, + servePort: 443, + wantErr: true, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { se := serveEnv{bg: tc.bg} - err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType) + err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType, tc.svc) if err == nil && tc.wantErr { t.Fatal("expected an error but got nil") } @@ -1017,6 +1099,13 @@ func TestSrcTypeFromFlags(t *testing.T) { expectedPort: 443, expectedErr: false, }, + { + name: "defaults to https, port 443 for service", + env: &serveEnv{service: "svc:foo"}, + expectedType: serveTypeHTTPS, + expectedPort: 443, + expectedErr: false, + }, { name: "multiple types set", env: &serveEnv{http: 80, https: 443}, @@ -1075,12 +1164,70 @@ func TestCleanURLPath(t *testing.T) { } } +func TestAddServiceToPrefs(t *testing.T) { + tests := []struct { + name string + dnsName string + startServices []string + expected []string + }{ + { + name: "add service to empty prefs", + dnsName: "svc:foo", + expected: []string{"svc:foo"}, + }, + { + name: "add service to existing prefs", + dnsName: "svc:bar", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo", "svc:bar"}, + }, + { + name: "add existing service to prefs", + dnsName: "svc:foo", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lc := &fakeLocalServeClient{} + ctx := t.Context() + lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: tt.startServices, + }, + }) + e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} + err := e.addServiceToPrefs(ctx, tt.dnsName) + if err != nil { + t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.dnsName, err) + } + if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { + t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.dnsName, lc.prefs.AdvertiseServices, tt.expected) + } + }) + } + +} + func TestMessageForPort(t *testing.T) { + svcIPMap := tailcfg.ServiceIPMappings{ + "svc:foo": []netip.Addr{ + netip.MustParseAddr("100.101.101.101"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:6565:6565"), + }, + } + svcIPMapJSON, _ := json.Marshal(svcIPMap) + svcIPMapJSONRawMSG := tailcfg.RawMessage(svcIPMapJSON) + tests := []struct { name string subcmd serveMode serveConfig *ipn.ServeConfig status *ipnstate.Status + prefs *ipn.Prefs dnsName string srvType serveType srvPort uint16 @@ -1147,10 +1294,206 @@ func TestMessageForPort(t *testing.T) { fmt.Sprintf(msgDisableProxy, "serve", "http", 80), }, "\n"), }, + { + name: "serve service http", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{ + AdvertiseServices: []string{"svc:foo"}, + }, + dnsName: "svc:foo", + srvType: serveTypeHTTP, + srvPort: 80, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "http://foo.test.ts.net/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "http", 80), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service no capmap", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{ + AdvertiseServices: []string{"svc:bar"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + expected: strings.Join([]string{ + fmt.Sprintf(msgServiceWaitingApproval, "svc:bar"), + "", + "http://bar.test.ts.net/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:bar", "http", 80), + fmt.Sprintf(msgDisableService, "svc:bar"), + }, "\n"), + }, + { + name: "serve service https non-default port", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 2200: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:2200": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeHTTPS, + srvPort: 2200, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "https://foo.test.ts.net:2200/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "https", 2200), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service TCPForward", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 2200: {TCPForward: "localhost:3000"}, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeTCP, + srvPort: 2200, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "|-- tcp://foo.test.ts.net:2200 (TLS over TCP)", + "|-- tcp://100.101.101.101:2200", + "|-- tcp://[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:2200", + "|--> tcp://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "tcp", 2200), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service Tun", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeTUN, + expected: strings.Join([]string{ + msgServeAvailable, + "", + fmt.Sprintf(msgRunningTunService, "foo.test.ts.net"), + fmt.Sprintf(msgDisableServiceTun, "svc:foo"), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, } for _, tt := range tests { - e := &serveEnv{bg: true, subcmd: tt.subcmd} + e := &serveEnv{bg: bgBoolFlag{true, false}, subcmd: tt.subcmd} t.Run(tt.name, func(t *testing.T) { actual := e.messageForPort(tt.serveConfig, tt.status, tt.dnsName, tt.srvType, tt.srvPort) @@ -1277,6 +1620,576 @@ func TestIsLegacyInvocation(t *testing.T) { } } +func TestSetServe(t *testing.T) { + e := &serveEnv{} + tests := []struct { + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mountPath string + target string + allowFunnel bool + expected *ipn.ServeConfig + expectErr bool + }{ + { + name: "add new handler", + desc: "add a new http handler to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3000", + expected: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + { + name: "update http handler", + desc: "update an existing http handler on the same port to same type", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + { + name: "update TCP handler", + desc: "update an existing TCP handler on the same port to a http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "http://localhost:3000"}}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expectErr: true, + }, + { + name: "add new service handler", + desc: "add a new service TCP handler to empty config", + cfg: &ipn.ServeConfig{}, + + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + target: "3000", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + }, + { + name: "update service handler", + desc: "update an existing service TCP handler on the same port to same type", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + target: "3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3001"}}, + }, + }, + }, + }, + { + name: "update service handler", + desc: "update an existing service TCP handler on the same port to a http handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expectErr: true, + }, + { + name: "add new service handler", + desc: "add a new service HTTP handler to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3000", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "update existing service handler", + desc: "update an existing service HTTP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service handler", + desc: "add a new service HTTP handler to existing service config", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 88, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 88: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + "bar:88": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service mount", + desc: "add a new service mount to existing service config", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/added", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + "/added": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service handler", + desc: "add a new service handler in tun mode to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "svc:bar", + srvType: serveTypeTUN, + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + Tun: true, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel) + if err != nil && !tt.expectErr { + t.Fatalf("got error: %v; did not expect error.", err) + } + if err == nil && tt.expectErr { + t.Fatalf("got no error; expected error.") + } + if !tt.expectErr && !reflect.DeepEqual(tt.cfg, tt.expected) { + svcName := tailcfg.ServiceName(tt.dnsName) + t.Fatalf("got: %v; expected: %v", tt.cfg.Services[svcName], tt.expected.Services[svcName]) + } + }) + } +} + +func TestUnsetServe(t *testing.T) { + tests := []struct { + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mount string + setServeEnv bool + serveEnv *serveEnv // if set, use this instead of the default serveEnv + expected *ipn.ServeConfig + expectErr bool + }{ + { + name: "unset http handler", + desc: "remove an existing http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/", + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler", + desc: "remove an existing service TCP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/", + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler tun", + desc: "remove an existing service handler in tun mode", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + Tun: true, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTUN, + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler tcp", + desc: "remove an existing service TCP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {TCPForward: "11.11.11.11:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset http handler not found", + desc: "try to remove a non-existing http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "bar.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/abc", + expected: &ipn.ServeConfig{}, + expectErr: true, + }, + { + name: "unset service handler not found", + desc: "try to remove a non-existing service TCP handler", + + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/abc", + setServeEnv: true, + serveEnv: &serveEnv{setPath: "/abc"}, + expected: &ipn.ServeConfig{}, + expectErr: true, + }, + { + name: "unset service doesn't exist", + desc: "try to remove a non-existing service's handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {TCPForward: "11.11.11.11:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:foo", + srvType: serveTypeTCP, + srvPort: 80, + expectErr: true, + }, + { + name: "unset tcp while port is in use", + desc: "try to remove a TCP handler while the port is used for web", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeTCP, + srvPort: 80, + mount: "/", + expectErr: true, + }, + { + name: "unset service tcp while port is in use", + desc: "try to remove a service TCP handler while the port is used for web", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + mount: "/", + expectErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &serveEnv{} + if tt.setServeEnv { + e = tt.serveEnv + } + err := e.unsetServe(tt.cfg, tt.st, tt.dnsName, tt.srvType, tt.srvPort, tt.mount) + if err != nil && !tt.expectErr { + t.Fatalf("got error: %v; did not expect error.", err) + } + if err == nil && tt.expectErr { + t.Fatalf("got no error; expected error.") + } + if !tt.expectErr && !reflect.DeepEqual(tt.cfg, tt.expected) { + t.Fatalf("got: %v; expected: %v", tt.cfg, tt.expected) + } + }) + } +} + // exactErrMsg returns an error checker that wants exactly the provided want error. // If optName is non-empty, it's used in the error message. func exactErrMsg(want error) func(error) string { diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 85679a7de..39e6f9fbd 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -262,7 +262,7 @@ func printFunnelStatus(ctx context.Context) { } sni, portStr, _ := net.SplitHostPort(string(hp)) p, _ := strconv.ParseUint(portStr, 10, 16) - isTCP := sc.IsTCPForwardingOnPort(uint16(p)) + isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) url := "https://" if isTCP { url = "tcp://" diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 44d63fe54..28262251c 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1007,8 +1007,6 @@ func allNumeric(s string) bool { } func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.ServiceName, port uint16) (c ipn.WebServerConfigView, ok bool) { - key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port)) - b.mu.Lock() defer b.mu.Unlock() @@ -1016,8 +1014,10 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.Se return c, false } if forVIPService != "" { + key := ipn.HostPort(net.JoinHostPort(forVIPService.WithoutPrefix(), fmt.Sprintf("%d", port))) return b.serveConfig.FindServiceWeb(forVIPService, key) } + key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) return b.serveConfig.FindWeb(key) } diff --git a/ipn/serve.go b/ipn/serve.go index ac92287bd..fae0ad5d6 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -166,26 +166,44 @@ type HTTPHandler struct { // WebHandlerExists reports whether if the ServeConfig Web handler exists for // the given host:port and mount point. -func (sc *ServeConfig) WebHandlerExists(hp HostPort, mount string) bool { - h := sc.GetWebHandler(hp, mount) +func (sc *ServeConfig) WebHandlerExists(svcName tailcfg.ServiceName, hp HostPort, mount string) bool { + h := sc.GetWebHandler(svcName, hp, mount) return h != nil } // GetWebHandler returns the HTTPHandler for the given host:port and mount point. // Returns nil if the handler does not exist. -func (sc *ServeConfig) GetWebHandler(hp HostPort, mount string) *HTTPHandler { - if sc == nil || sc.Web[hp] == nil { +func (sc *ServeConfig) GetWebHandler(svcName tailcfg.ServiceName, hp HostPort, mount string) *HTTPHandler { + if sc == nil { + return nil + } + if svcName != "" { + if svc, ok := sc.Services[svcName]; ok && svc.Web != nil { + if webCfg, ok := svc.Web[hp]; ok { + return webCfg.Handlers[mount] + } + } + return nil + } + if sc.Web[hp] == nil { return nil } return sc.Web[hp].Handlers[mount] } -// GetTCPPortHandler returns the TCPPortHandler for the given port. -// If the port is not configured, nil is returned. -func (sc *ServeConfig) GetTCPPortHandler(port uint16) *TCPPortHandler { +// GetTCPPortHandler returns the TCPPortHandler for the given port. If the port +// is not configured, nil is returned. Parameter svcName can be tailcfg.NoService +// for local serve or a service name for a service hosted on node. +func (sc *ServeConfig) GetTCPPortHandler(port uint16, svcName tailcfg.ServiceName) *TCPPortHandler { if sc == nil { return nil } + if svcName != "" { + if svc, ok := sc.Services[svcName]; ok && svc != nil { + return svc.TCP[port] + } + return nil + } return sc.TCP[port] } @@ -227,34 +245,78 @@ func (sc *ServeConfig) IsTCPForwardingAny() bool { return false } -// IsTCPForwardingOnPort reports whether if ServeConfig is currently forwarding -// in TCPForward mode on the given port. This is exclusive of Web/HTTPS serving. -func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsTCPForwardingOnPort reports whether ServeConfig is currently forwarding +// in TCPForward mode on the given port for local or a service. svcName will +// either be noService (empty string) for local serve or a serviceName for service +// hosted on node. Notice TCPForwarding is exclusive with Web/HTTPS serving. +func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + + if svcName != "" { + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return false + } + if svc.TCP[port] == nil { + return false + } + } else if sc.TCP[port] == nil { return false } - return !sc.IsServingWeb(port) + return !sc.IsServingWeb(port, svcName) } -// IsServingWeb reports whether if ServeConfig is currently serving Web -// (HTTP/HTTPS) on the given port. This is exclusive of TCPForwarding. -func (sc *ServeConfig) IsServingWeb(port uint16) bool { - return sc.IsServingHTTP(port) || sc.IsServingHTTPS(port) +// IsServingWeb reports whether ServeConfig is currently serving Web (HTTP/HTTPS) +// on the given port for local or a service. svcName will be either tailcfg.NoService, +// or a serviceName for service hosted on node. This is exclusive with TCPForwarding. +func (sc *ServeConfig) IsServingWeb(port uint16, svcName tailcfg.ServiceName) bool { + return sc.IsServingHTTP(port, svcName) || sc.IsServingHTTPS(port, svcName) } -// IsServingHTTPS reports whether if ServeConfig is currently serving HTTPS on -// the given port. This is exclusive of HTTP and TCPForwarding. -func (sc *ServeConfig) IsServingHTTPS(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsServingHTTPS reports whether ServeConfig is currently serving HTTPS on +// the given port for local or a service. svcName will be either tailcfg.NoService +// for local serve, or a serviceName for service hosted on node. This is exclusive +// with HTTP and TCPForwarding. +func (sc *ServeConfig) IsServingHTTPS(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + var tcpHandlers map[uint16]*TCPPortHandler + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + tcpHandlers = svc.TCP + } + } else { + tcpHandlers = sc.TCP + } + + th := tcpHandlers[port] + if th == nil { return false } - return sc.TCP[port].HTTPS + return th.HTTPS } -// IsServingHTTP reports whether if ServeConfig is currently serving HTTP on the -// given port. This is exclusive of HTTPS and TCPForwarding. -func (sc *ServeConfig) IsServingHTTP(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsServingHTTP reports whether ServeConfig is currently serving HTTP on the +// given port for local or a service. svcName will be either tailcfg.NoService for +// local serve, or a serviceName for service hosted on node. This is exclusive +// with HTTPS and TCPForwarding. +func (sc *ServeConfig) IsServingHTTP(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + if svc.TCP[port] != nil { + return svc.TCP[port].HTTP + } + } + return false + } + + if sc.TCP[port] == nil { return false } return sc.TCP[port].HTTP @@ -280,21 +342,37 @@ func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) { // SetWebHandler sets the given HTTPHandler at the specified host, port, // and mount in the serve config. sc.TCP is also updated to reflect web -// serving usage of the given port. +// serving usage of the given port. The st argument is needed when setting +// a web handler for a service, otherwise it can be nil. func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) { if sc == nil { sc = new(ServeConfig) } - mak.Set(&sc.TCP, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS}) - hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port)))) - if _, ok := sc.Web[hp]; !ok { - mak.Set(&sc.Web, hp, new(WebServerConfig)) + tcpMap := &sc.TCP + webServerMap := &sc.Web + hostName := host + if svcName := tailcfg.AsServiceName(host); svcName != "" { + hostName = svcName.WithoutPrefix() + svc, ok := sc.Services[svcName] + if !ok { + svc = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svc) + } + tcpMap = &svc.TCP + webServerMap = &svc.Web } - mak.Set(&sc.Web[hp].Handlers, mount, handler) + mak.Set(tcpMap, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS}) + hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) + webCfg, ok := (*webServerMap)[hp] + if !ok { + webCfg = new(WebServerConfig) + mak.Set(webServerMap, hp, webCfg) + } + mak.Set(&webCfg.Handlers, mount, handler) // TODO(tylersmalley): handle multiple web handlers from foreground mode - for k, v := range sc.Web[hp].Handlers { + for k, v := range webCfg.Handlers { if v == handler { continue } @@ -305,7 +383,7 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin m1 := strings.TrimSuffix(mount, "/") m2 := strings.TrimSuffix(k, "/") if m1 == m2 { - delete(sc.Web[hp].Handlers, k) + delete(webCfg.Handlers, k) } } } @@ -318,9 +396,19 @@ func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTL if sc == nil { sc = new(ServeConfig) } - mak.Set(&sc.TCP, port, &TCPPortHandler{TCPForward: fwdAddr}) + tcpPortHandler := &sc.TCP + if svcName := tailcfg.AsServiceName(host); svcName != "" { + svcConfig, ok := sc.Services[svcName] + if !ok { + svcConfig = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svcConfig) + } + tcpPortHandler = &svcConfig.TCP + } + mak.Set(tcpPortHandler, port, &TCPPortHandler{TCPForward: fwdAddr}) + if terminateTLS { - sc.TCP[port].TerminateTLS = host + (*tcpPortHandler)[port].TerminateTLS = host } } @@ -344,9 +432,9 @@ func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) { } } -// RemoveWebHandler deletes the web handlers at all of the given mount points -// for the provided host and port in the serve config. If cleanupFunnel is -// true, this also removes the funnel value for this port if no handlers remain. +// RemoveWebHandler deletes the web handlers at all of the given mount points for the +// provided host and port in the serve config for the node (as opposed to a service). +// If cleanupFunnel is true, this also removes the funnel value for this port if no handlers remain. func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []string, cleanupFunnel bool) { hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port)))) @@ -374,9 +462,51 @@ func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []strin } } +// RemoveServiceWebHandler deletes the web handlers at all of the given mount points +// for the provided host and port in the serve config for the given service. +func (sc *ServeConfig) RemoveServiceWebHandler(st *ipnstate.Status, svcName tailcfg.ServiceName, port uint16, mounts []string) { + hostName := svcName.WithoutPrefix() + hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) + + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return + } + + // Delete existing handler, then cascade delete if empty. + for _, m := range mounts { + delete(svc.Web[hp].Handlers, m) + } + if len(svc.Web[hp].Handlers) == 0 { + delete(svc.Web, hp) + delete(svc.TCP, port) + } + if len(svc.Web) == 0 && len(svc.TCP) == 0 { + delete(sc.Services, svcName) + } + if len(sc.Services) == 0 { + sc.Services = nil + } +} + // RemoveTCPForwarding deletes the TCP forwarding configuration for the given // port from the serve config. -func (sc *ServeConfig) RemoveTCPForwarding(port uint16) { +func (sc *ServeConfig) RemoveTCPForwarding(svcName tailcfg.ServiceName, port uint16) { + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + delete(svc.TCP, port) + if len(svc.TCP) == 0 { + svc.TCP = nil + } + if len(svc.Web) == 0 && len(svc.TCP) == 0 { + delete(sc.Services, svcName) + } + if len(sc.Services) == 0 { + sc.Services = nil + } + } + return + } delete(sc.TCP, port) if len(sc.TCP) == 0 { sc.TCP = nil diff --git a/ipn/serve_test.go b/ipn/serve_test.go index ba0a26f8c..7028c1e17 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -128,6 +128,121 @@ func TestHasPathHandler(t *testing.T) { } } +func TestIsTCPForwardingOnPort(t *testing.T) { + tests := []struct { + name string + cfg ServeConfig + svcName tailcfg.ServiceName + port uint16 + want bool + }{ + { + name: "empty-config", + cfg: ServeConfig{}, + svcName: "", + port: 80, + want: false, + }, + { + name: "node-tcp-config-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "", + port: 80, + want: true, + }, + { + name: "node-tcp-config-no-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "", + port: 443, + want: false, + }, + { + name: "node-tcp-config-no-match-with-service", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "svc:bar", + port: 80, + want: false, + }, + { + name: "node-web-config-no-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {HTTPS: true}}, + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*HTTPHandler{ + "/": {Text: "Hello, world!"}, + }, + }, + }, + }, + svcName: "", + port: 80, + want: false, + }, + { + name: "service-tcp-config-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + }, + }, + svcName: "svc:foo", + port: 80, + want: true, + }, + { + name: "service-tcp-config-no-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + }, + }, + svcName: "svc:bar", + port: 80, + want: false, + }, + { + name: "service-web-config-no-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {HTTPS: true}}, + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*HTTPHandler{ + "/": {Text: "Hello, world!"}, + }, + }, + }, + }, + }, + }, + svcName: "svc:foo", + port: 80, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.cfg.IsTCPForwardingOnPort(tt.port, tt.svcName) + if tt.want != got { + t.Errorf("IsTCPForwardingOnPort() = %v, want %v", got, tt.want) + } + }) + } +} + func TestExpandProxyTargetDev(t *testing.T) { tests := []struct { name string diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 636e2434d..398a2c8a2 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -927,6 +927,16 @@ func (t *TPMInfo) Present() bool { return t != nil } // This is not related to the older [Service] used in [Hostinfo.Services]. type ServiceName string +// AsServiceName reports whether the given string is a valid service name. +// If so returns the name as a [tailcfg.ServiceName], otherwise returns "". +func AsServiceName(s string) ServiceName { + svcName := ServiceName(s) + if err := svcName.Validate(); err != nil { + return "" + } + return svcName +} + // Validate validates if the service name is formatted correctly. // We only allow valid DNS labels, since the expectation is that these will be // used as parts of domain names. All errors are [vizerror.Error]. From 93511be04483dfd9ab6fa3164b70dcae5ec366f9 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 17 Jul 2025 01:30:08 -0700 Subject: [PATCH 1108/1708] types/geo: add geo.Point and its associated units (#16583) Package geo provides functionality to represent and process geographical locations on a sphere. The main type, geo.Point, represents a pair of latitude and longitude coordinates. Updates tailscale/corp#29968 Signed-off-by: Simon Law --- types/geo/doc.go | 6 + types/geo/point.go | 279 +++++++++++++++++++ types/geo/point_test.go | 541 +++++++++++++++++++++++++++++++++++++ types/geo/quantize.go | 106 ++++++++ types/geo/quantize_test.go | 130 +++++++++ types/geo/units.go | 191 +++++++++++++ types/geo/units_test.go | 395 +++++++++++++++++++++++++++ 7 files changed, 1648 insertions(+) create mode 100644 types/geo/doc.go create mode 100644 types/geo/point.go create mode 100644 types/geo/point_test.go create mode 100644 types/geo/quantize.go create mode 100644 types/geo/quantize_test.go create mode 100644 types/geo/units.go create mode 100644 types/geo/units_test.go diff --git a/types/geo/doc.go b/types/geo/doc.go new file mode 100644 index 000000000..749c63080 --- /dev/null +++ b/types/geo/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package geo provides functionality to represent and process geographical +// locations on a spherical Earth. +package geo diff --git a/types/geo/point.go b/types/geo/point.go new file mode 100644 index 000000000..d7160ac59 --- /dev/null +++ b/types/geo/point.go @@ -0,0 +1,279 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "strconv" +) + +// ErrBadPoint indicates that the point is malformed. +var ErrBadPoint = errors.New("not a valid point") + +// Point represents a pair of latitude and longitude coordinates. +type Point struct { + lat Degrees + // lng180 is the longitude offset by +180° so the zero value is invalid + // and +0+0/ is Point{lat: +0.0, lng180: +180.0}. + lng180 Degrees +} + +// MakePoint returns a Point representing a given latitude and longitude on +// a WGS 84 ellipsoid. The Coordinate Reference System is EPSG:4326. +// Latitude is wrapped to [-90°, +90°] and longitude to (-180°, +180°]. +func MakePoint(latitude, longitude Degrees) Point { + lat, lng := float64(latitude), float64(longitude) + + switch { + case math.IsNaN(lat) || math.IsInf(lat, 0): + // don’t wrap + case lat < -90 || lat > 90: + // Latitude wraps by flipping the longitude + lat = math.Mod(lat, 360.0) + switch { + case lat == 0.0: + lat = 0.0 // -0.0 == 0.0, but -0° is not valid + case lat < -270.0: + lat = +360.0 + lat + case lat < -90.0: + lat = -180.0 - lat + lng += 180.0 + case lat > +270.0: + lat = -360.0 + lat + case lat > +90.0: + lat = +180.0 - lat + lng += 180.0 + } + } + + switch { + case lat == -90.0 || lat == +90.0: + // By convention, the north and south poles have longitude 0°. + lng = 0 + case math.IsNaN(lng) || math.IsInf(lng, 0): + // don’t wrap + case lng <= -180.0 || lng > 180.0: + // Longitude wraps around normally + lng = math.Mod(lng, 360.0) + switch { + case lng == 0.0: + lng = 0.0 // -0.0 == 0.0, but -0° is not valid + case lng <= -180.0: + lng = +360.0 + lng + case lng > +180.0: + lng = -360.0 + lng + } + } + + return Point{ + lat: Degrees(lat), + lng180: Degrees(lng + 180.0), + } +} + +// Valid reports if p is a valid point. +func (p Point) Valid() bool { + return !p.IsZero() +} + +// LatLng reports the latitude and longitude. +func (p Point) LatLng() (lat, lng Degrees, err error) { + if p.IsZero() { + return 0 * Degree, 0 * Degree, ErrBadPoint + } + return p.lat, p.lng180 - 180.0*Degree, nil +} + +// LatLng reports the latitude and longitude in float64. If err is nil, then lat +// and lng will never both be 0.0 to disambiguate between an empty struct and +// Null Island (0° 0°). +func (p Point) LatLngFloat64() (lat, lng float64, err error) { + dlat, dlng, err := p.LatLng() + if err != nil { + return 0.0, 0.0, err + } + if dlat == 0.0 && dlng == 0.0 { + // dlng must survive conversion to float32. + dlng = math.SmallestNonzeroFloat32 + } + return float64(dlat), float64(dlng), err +} + +// SphericalAngleTo returns the angular distance from p to q, calculated on a +// spherical Earth. +func (p Point) SphericalAngleTo(q Point) (Radians, error) { + pLat, pLng, pErr := p.LatLng() + qLat, qLng, qErr := q.LatLng() + switch { + case pErr != nil && qErr != nil: + return 0.0, fmt.Errorf("spherical distance from %v to %v: %w", p, q, errors.Join(pErr, qErr)) + case pErr != nil: + return 0.0, fmt.Errorf("spherical distance from %v: %w", p, pErr) + case qErr != nil: + return 0.0, fmt.Errorf("spherical distance to %v: %w", q, qErr) + } + // The spherical law of cosines is accurate enough for close points when + // using float64. + // + // The haversine formula is an alternative, but it is poorly behaved + // when points are on opposite sides of the sphere. + rLat, rLng := float64(pLat.Radians()), float64(pLng.Radians()) + sLat, sLng := float64(qLat.Radians()), float64(qLng.Radians()) + cosA := math.Sin(rLat)*math.Sin(sLat) + + math.Cos(rLat)*math.Cos(sLat)*math.Cos(rLng-sLng) + return Radians(math.Acos(cosA)), nil +} + +// DistanceTo reports the great-circle distance between p and q, in meters. +func (p Point) DistanceTo(q Point) (Distance, error) { + r, err := p.SphericalAngleTo(q) + if err != nil { + return 0, err + } + return DistanceOnEarth(r.Turns()), nil +} + +// String returns a space-separated pair of latitude and longitude, in decimal +// degrees. Positive latitudes are in the northern hemisphere, and positive +// longitudes are east of the prime meridian. If p was not initialized, this +// will return "nowhere". +func (p Point) String() string { + lat, lng, err := p.LatLng() + if err != nil { + if err == ErrBadPoint { + return "nowhere" + } + panic(err) + } + + return lat.String() + " " + lng.String() +} + +// AppendBinary implements [encoding.BinaryAppender]. The output consists of two +// float32s in big-endian byte order: latitude and longitude offset by 180°. +// If p is not a valid, the output will be an 8-byte zero value. +func (p Point) AppendBinary(b []byte) ([]byte, error) { + end := binary.BigEndian + b = end.AppendUint32(b, math.Float32bits(float32(p.lat))) + b = end.AppendUint32(b, math.Float32bits(float32(p.lng180))) + return b, nil +} + +// MarshalBinary implements [encoding.BinaryMarshaller]. The output matches that +// of calling [Point.AppendBinary]. +func (p Point) MarshalBinary() ([]byte, error) { + var b [8]byte + return p.AppendBinary(b[:0]) +} + +// UnmarshalBinary implements [encoding.BinaryUnmarshaler]. It expects input +// that was formatted by [Point.AppendBinary]: in big-endian byte order, a +// float32 representing latitude followed by a float32 representing longitude +// offset by 180°. If latitude and longitude fall outside valid ranges, then +// an error is returned. +func (p *Point) UnmarshalBinary(data []byte) error { + if len(data) < 8 { // Two uint32s are 8 bytes long + return fmt.Errorf("%w: not enough data: %q", ErrBadPoint, data) + } + + end := binary.BigEndian + lat := Degrees(math.Float32frombits(end.Uint32(data[0:]))) + if lat < -90*Degree || lat > 90*Degree { + return fmt.Errorf("%w: latitude outside [-90°, +90°]: %s", ErrBadPoint, lat) + } + lng180 := Degrees(math.Float32frombits(end.Uint32(data[4:]))) + if lng180 != 0 && (lng180 < 0*Degree || lng180 > 360*Degree) { + // lng180 == 0 is OK: the zero value represents invalid points. + lng := lng180 - 180*Degree + return fmt.Errorf("%w: longitude outside (-180°, +180°]: %s", ErrBadPoint, lng) + } + + p.lat = lat + p.lng180 = lng180 + return nil +} + +// AppendText implements [encoding.TextAppender]. The output is a point +// formatted as OGC Well-Known Text, as "POINT (longitude latitude)" where +// longitude and latitude are in decimal degrees. If p is not valid, the output +// will be "POINT EMPTY". +func (p Point) AppendText(b []byte) ([]byte, error) { + if p.IsZero() { + b = append(b, []byte("POINT EMPTY")...) + return b, nil + } + + lat, lng, err := p.LatLng() + if err != nil { + return b, err + } + + b = append(b, []byte("POINT (")...) + b = strconv.AppendFloat(b, float64(lng), 'f', -1, 64) + b = append(b, ' ') + b = strconv.AppendFloat(b, float64(lat), 'f', -1, 64) + b = append(b, ')') + return b, nil +} + +// MarshalText implements [encoding.TextMarshaller]. The output matches that +// of calling [Point.AppendText]. +func (p Point) MarshalText() ([]byte, error) { + var b [8]byte + return p.AppendText(b[:0]) +} + +// MarshalUint64 produces the same output as MashalBinary, encoded in a uint64. +func (p Point) MarshalUint64() (uint64, error) { + b, err := p.MarshalBinary() + return binary.NativeEndian.Uint64(b), err +} + +// UnmarshalUint64 expects input formatted by MarshalUint64. +func (p *Point) UnmarshalUint64(v uint64) error { + b := binary.NativeEndian.AppendUint64(nil, v) + return p.UnmarshalBinary(b) +} + +// IsZero reports if p is the zero value. +func (p Point) IsZero() bool { + return p == Point{} +} + +// EqualApprox reports if p and q are approximately equal: that is the absolute +// difference of both latitude and longitude are less than tol. If tol is +// negative, then tol defaults to a reasonably small number (10⁻⁵). If tol is +// zero, then p and q must be exactly equal. +func (p Point) EqualApprox(q Point, tol float64) bool { + if tol == 0 { + return p == q + } + + if p.IsZero() && q.IsZero() { + return true + } else if p.IsZero() || q.IsZero() { + return false + } + + plat, plng, err := p.LatLng() + if err != nil { + panic(err) + } + qlat, qlng, err := q.LatLng() + if err != nil { + panic(err) + } + + if tol < 0 { + tol = 1e-5 + } + + dlat := float64(plat) - float64(qlat) + dlng := float64(plng) - float64(qlng) + return ((dlat < 0 && -dlat < tol) || (dlat >= 0 && dlat < tol)) && + ((dlng < 0 && -dlng < tol) || (dlng >= 0 && dlng < tol)) +} diff --git a/types/geo/point_test.go b/types/geo/point_test.go new file mode 100644 index 000000000..308c1a183 --- /dev/null +++ b/types/geo/point_test.go @@ -0,0 +1,541 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "fmt" + "math" + "testing" + "testing/quick" + + "tailscale.com/types/geo" +) + +func TestPointZero(t *testing.T) { + var zero geo.Point + + if got := zero.IsZero(); !got { + t.Errorf("IsZero() got %t", got) + } + + if got := zero.Valid(); got { + t.Errorf("Valid() got %t", got) + } + + wantErr := geo.ErrBadPoint.Error() + if _, _, err := zero.LatLng(); err.Error() != wantErr { + t.Errorf("LatLng() err %q, want %q", err, wantErr) + } + + wantStr := "nowhere" + if got := zero.String(); got != wantStr { + t.Errorf("String() got %q, want %q", got, wantStr) + } + + wantB := []byte{0, 0, 0, 0, 0, 0, 0, 0} + if b, err := zero.MarshalBinary(); err != nil { + t.Errorf("MarshalBinary() err %q, want nil", err) + } else if string(b) != string(wantB) { + t.Errorf("MarshalBinary got %q, want %q", b, wantB) + } + + wantI := uint64(0x00000000) + if i, err := zero.MarshalUint64(); err != nil { + t.Errorf("MarshalUint64() err %q, want nil", err) + } else if i != wantI { + t.Errorf("MarshalUint64 got %v, want %v", i, wantI) + } +} + +func TestPoint(t *testing.T) { + for _, tt := range []struct { + name string + lat geo.Degrees + lng geo.Degrees + wantLat geo.Degrees + wantLng geo.Degrees + wantString string + wantText string + }{ + { + name: "null-island", + lat: +0.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +0.0, + wantString: "+0° +0°", + wantText: "POINT (0 0)", + }, + { + name: "north-pole", + lat: +90.0, + lng: +0.0, + wantLat: +90.0, + wantLng: +0.0, + wantString: "+90° +0°", + wantText: "POINT (0 90)", + }, + { + name: "south-pole", + lat: -90.0, + lng: +0.0, + wantLat: -90.0, + wantLng: +0.0, + wantString: "-90° +0°", + wantText: "POINT (0 -90)", + }, + { + name: "north-pole-weird-longitude", + lat: +90.0, + lng: +1.0, + wantLat: +90.0, + wantLng: +0.0, + wantString: "+90° +0°", + wantText: "POINT (0 90)", + }, + { + name: "south-pole-weird-longitude", + lat: -90.0, + lng: +1.0, + wantLat: -90.0, + wantLng: +0.0, + wantString: "-90° +0°", + wantText: "POINT (0 -90)", + }, + { + name: "almost-north", + lat: +89.0, + lng: +0.0, + wantLat: +89.0, + wantLng: +0.0, + wantString: "+89° +0°", + wantText: "POINT (0 89)", + }, + { + name: "past-north", + lat: +91.0, + lng: +0.0, + wantLat: +89.0, + wantLng: +180.0, + wantString: "+89° +180°", + wantText: "POINT (180 89)", + }, + { + name: "almost-south", + lat: -89.0, + lng: +0.0, + wantLat: -89.0, + wantLng: +0.0, + wantString: "-89° +0°", + wantText: "POINT (0 -89)", + }, + { + name: "past-south", + lat: -91.0, + lng: +0.0, + wantLat: -89.0, + wantLng: +180.0, + wantString: "-89° +180°", + wantText: "POINT (180 -89)", + }, + { + name: "antimeridian-north", + lat: +180.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "antimeridian-south", + lat: -180.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "almost-antimeridian-north", + lat: +179.0, + lng: +0.0, + wantLat: +1.0, + wantLng: +180.0, + wantString: "+1° +180°", + wantText: "POINT (180 1)", + }, + { + name: "past-antimeridian-north", + lat: +181.0, + lng: +0.0, + wantLat: -1.0, + wantLng: +180.0, + wantString: "-1° +180°", + wantText: "POINT (180 -1)", + }, + { + name: "almost-antimeridian-south", + lat: -179.0, + lng: +0.0, + wantLat: -1.0, + wantLng: +180.0, + wantString: "-1° +180°", + wantText: "POINT (180 -1)", + }, + { + name: "past-antimeridian-south", + lat: -181.0, + lng: +0.0, + wantLat: +1.0, + wantLng: +180.0, + wantString: "+1° +180°", + wantText: "POINT (180 1)", + }, + { + name: "circumnavigate-north", + lat: +360.0, + lng: +1.0, + wantLat: +0.0, + wantLng: +1.0, + wantString: "+0° +1°", + wantText: "POINT (1 0)", + }, + { + name: "circumnavigate-south", + lat: -360.0, + lng: +1.0, + wantLat: +0.0, + wantLng: +1.0, + wantString: "+0° +1°", + wantText: "POINT (1 0)", + }, + { + name: "almost-circumnavigate-north", + lat: +359.0, + lng: +1.0, + wantLat: -1.0, + wantLng: +1.0, + wantString: "-1° +1°", + wantText: "POINT (1 -1)", + }, + { + name: "past-circumnavigate-north", + lat: +361.0, + lng: +1.0, + wantLat: +1.0, + wantLng: +1.0, + wantString: "+1° +1°", + wantText: "POINT (1 1)", + }, + { + name: "almost-circumnavigate-south", + lat: -359.0, + lng: +1.0, + wantLat: +1.0, + wantLng: +1.0, + wantString: "+1° +1°", + wantText: "POINT (1 1)", + }, + { + name: "past-circumnavigate-south", + lat: -361.0, + lng: +1.0, + wantLat: -1.0, + wantLng: +1.0, + wantString: "-1° +1°", + wantText: "POINT (1 -1)", + }, + { + name: "antimeridian-east", + lat: +0.0, + lng: +180.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "antimeridian-west", + lat: +0.0, + lng: -180.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "almost-antimeridian-east", + lat: +0.0, + lng: +179.0, + wantLat: +0.0, + wantLng: +179.0, + wantString: "+0° +179°", + wantText: "POINT (179 0)", + }, + { + name: "past-antimeridian-east", + lat: +0.0, + lng: +181.0, + wantLat: +0.0, + wantLng: -179.0, + wantString: "+0° -179°", + wantText: "POINT (-179 0)", + }, + { + name: "almost-antimeridian-west", + lat: +0.0, + lng: -179.0, + wantLat: +0.0, + wantLng: -179.0, + wantString: "+0° -179°", + wantText: "POINT (-179 0)", + }, + { + name: "past-antimeridian-west", + lat: +0.0, + lng: -181.0, + wantLat: +0.0, + wantLng: +179.0, + wantString: "+0° +179°", + wantText: "POINT (179 0)", + }, + { + name: "montreal", + lat: +45.508888, + lng: -73.561668, + wantLat: +45.508888, + wantLng: -73.561668, + wantString: "+45.508888° -73.561668°", + wantText: "POINT (-73.561668 45.508888)", + }, + { + name: "canada", + lat: 57.550480044655636, + lng: -98.41680517868062, + wantLat: 57.550480044655636, + wantLng: -98.41680517868062, + wantString: "+57.550480044655636° -98.41680517868062°", + wantText: "POINT (-98.41680517868062 57.550480044655636)", + }, + } { + t.Run(tt.name, func(t *testing.T) { + p := geo.MakePoint(tt.lat, tt.lng) + + lat, lng, err := p.LatLng() + if !approx(lat, tt.wantLat) { + t.Errorf("MakePoint: lat %v, want %v", lat, tt.wantLat) + } + if !approx(lng, tt.wantLng) { + t.Errorf("MakePoint: lng %v, want %v", lng, tt.wantLng) + } + if err != nil { + t.Fatalf("LatLng: err %q, expected nil", err) + } + + if got := p.String(); got != tt.wantString { + t.Errorf("String: got %q, wantString %q", got, tt.wantString) + } + + txt, err := p.MarshalText() + if err != nil { + t.Errorf("Text: err %q, expected nil", err) + } else if string(txt) != tt.wantText { + t.Errorf("Text: got %q, wantText %q", txt, tt.wantText) + } + + b, err := p.MarshalBinary() + if err != nil { + t.Fatalf("MarshalBinary: err %q, expected nil", err) + } + + var q geo.Point + if err := q.UnmarshalBinary(b); err != nil { + t.Fatalf("UnmarshalBinary: err %q, expected nil", err) + } + if !q.EqualApprox(p, -1) { + t.Errorf("UnmarshalBinary: roundtrip failed: %#v != %#v", q, p) + } + + i, err := p.MarshalUint64() + if err != nil { + t.Fatalf("MarshalUint64: err %q, expected nil", err) + } + + var r geo.Point + if err := r.UnmarshalUint64(i); err != nil { + t.Fatalf("UnmarshalUint64: err %r, expected nil", err) + } + if !q.EqualApprox(r, -1) { + t.Errorf("UnmarshalUint64: roundtrip failed: %#v != %#v", r, p) + } + }) + } +} + +func TestPointMarshalBinary(t *testing.T) { + roundtrip := func(p geo.Point) error { + b, err := p.MarshalBinary() + if err != nil { + return fmt.Errorf("marshal: %v", err) + } + var q geo.Point + if err := q.UnmarshalBinary(b); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + if q != p { + return fmt.Errorf("%#v != %#v", q, p) + } + return nil + } + + t.Run("nowhere", func(t *testing.T) { + var nowhere geo.Point + if err := roundtrip(nowhere); err != nil { + t.Errorf("roundtrip: %v", err) + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat geo.Degrees, lng geo.Degrees) (ok bool) { + pt := geo.MakePoint(lat, lng) + if err := roundtrip(pt); err != nil { + t.Errorf("roundtrip: %v", err) + } + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } + }) +} + +func TestPointMarshalUint64(t *testing.T) { + t.Skip("skip") + roundtrip := func(p geo.Point) error { + i, err := p.MarshalUint64() + if err != nil { + return fmt.Errorf("marshal: %v", err) + } + var q geo.Point + if err := q.UnmarshalUint64(i); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + if q != p { + return fmt.Errorf("%#v != %#v", q, p) + } + return nil + } + + t.Run("nowhere", func(t *testing.T) { + var nowhere geo.Point + if err := roundtrip(nowhere); err != nil { + t.Errorf("roundtrip: %v", err) + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat geo.Degrees, lng geo.Degrees) (ok bool) { + if err := roundtrip(geo.MakePoint(lat, lng)); err != nil { + t.Errorf("roundtrip: %v", err) + } + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } + }) +} + +func TestPointSphericalAngleTo(t *testing.T) { + const earthRadius = 6371.000 // volumetric mean radius (km) + const kmToRad = 1 / earthRadius + for _, tt := range []struct { + name string + x geo.Point + y geo.Point + want geo.Radians + wantErr string + }{ + { + name: "same-point-null-island", + x: geo.MakePoint(0, 0), + y: geo.MakePoint(0, 0), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-north-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(+90, +90), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-south-pole", + x: geo.MakePoint(-90, 0), + y: geo.MakePoint(-90, -90), + want: 0.0 * geo.Radian, + }, + { + name: "north-pole-to-south-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(-90, -90), + want: math.Pi * geo.Radian, + }, + { + name: "toronto-to-montreal", + x: geo.MakePoint(+43.6532, -79.3832), + y: geo.MakePoint(+45.5019, -73.5674), + want: 504.26 * kmToRad * geo.Radian, + }, + { + name: "sydney-to-san-francisco", + x: geo.MakePoint(-33.8727, +151.2057), + y: geo.MakePoint(+37.7749, -122.4194), + want: 11948.18 * kmToRad * geo.Radian, + }, + { + name: "new-york-to-paris", + x: geo.MakePoint(+40.7128, -74.0060), + y: geo.MakePoint(+48.8575, +2.3514), + want: 5837.15 * kmToRad * geo.Radian, + }, + { + name: "seattle-to-tokyo", + x: geo.MakePoint(+47.6061, -122.3328), + y: geo.MakePoint(+35.6764, +139.6500), + want: 7700.00 * kmToRad * geo.Radian, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.x.SphericalAngleTo(tt.y) + if tt.wantErr == "" && err != nil { + t.Fatalf("err %q, expected nil", err) + } + if tt.wantErr != "" && (err == nil || err.Error() != tt.wantErr) { + t.Fatalf("err %q, expected %q", err, tt.wantErr) + } + if tt.wantErr != "" { + return + } + + if !approx(got, tt.want) { + t.Errorf("x to y: got %v, want %v", got, tt.want) + } + + // Distance should be commutative + got, err = tt.y.SphericalAngleTo(tt.x) + if err != nil { + t.Fatalf("err %q, expected nil", err) + } + if !approx(got, tt.want) { + t.Errorf("y to x: got %v, want %v", got, tt.want) + } + t.Logf("x to y: %v km", got/kmToRad) + }) + } +} + +func approx[T ~float64](x, y T) bool { + return math.Abs(float64(x)-float64(y)) <= 1e-5 +} diff --git a/types/geo/quantize.go b/types/geo/quantize.go new file mode 100644 index 000000000..18ec11f9f --- /dev/null +++ b/types/geo/quantize.go @@ -0,0 +1,106 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "math" + "sync" +) + +// MinSeparation is the minimum separation between two points after quantizing. +// [Point.Quantize] guarantees that two points will either be snapped to exactly +// the same point, which conflates multiple positions together, or that the two +// points will be far enough apart that successfully performing most reverse +// lookups would be highly improbable. +const MinSeparation = 50_000 * Meter + +// Latitude +var ( + // numSepsEquatorToPole is the number of separations between a point on + // the equator to a point on a pole, that satisfies [minPointSep]. In + // other words, the number of separations between 0° and +90° degrees + // latitude. + numSepsEquatorToPole = int(math.Floor(float64( + earthPolarCircumference / MinSeparation / 4))) + + // latSep is the number of degrees between two adjacent latitudinal + // points. In other words, the next point going straight north of + // 0° would be latSep°. + latSep = Degrees(90.0 / float64(numSepsEquatorToPole)) +) + +// snapToLat returns the number of the nearest latitudinal separation to +// lat. A positive result is north of the equator, a negative result is south, +// and zero is the equator itself. For example, a result of -1 would mean a +// point that is [latSep] south of the equator. +func snapToLat(lat Degrees) int { + return int(math.Round(float64(lat / latSep))) +} + +// lngSep is a lookup table for the number of degrees between two adjacent +// longitudinal separations. where the index corresponds to the absolute value +// of the latitude separation. The first value corresponds to the equator and +// the last value corresponds to the separation before the pole. There is no +// value for the pole itself, because longitude has no meaning there. +// +// [lngSep] is calculated on init, which is so quick and will be used so often +// that the startup cost is negligible. +var lngSep = sync.OnceValue(func() []Degrees { + lut := make([]Degrees, numSepsEquatorToPole) + + // i ranges from the equator to a pole + for i := range len(lut) { + // lat ranges from [0°, 90°], because the southern hemisphere is + // a reflection of the northern one. + lat := Degrees(i) * latSep + ratio := math.Cos(float64(lat.Radians())) + circ := Distance(ratio) * earthEquatorialCircumference + num := int(math.Floor(float64(circ / MinSeparation))) + // We define lut[0] as 0°, lut[len(lut)] to be the north pole, + // which means -lut[len(lut)] is the south pole. + lut[i] = Degrees(360.0 / float64(num)) + } + return lut +}) + +// snapToLatLng returns the number of the nearest latitudinal separation to lat, +// and the nearest longitudinal separation to lng. +func snapToLatLng(lat, lng Degrees) (Degrees, Degrees) { + latN := snapToLat(lat) + + // absolute index into lngSep + n := latN + if n < 0 { + n = -latN + } + + lngSep := lngSep() + if n < len(lngSep) { + sep := lngSep[n] + lngN := int(math.Round(float64(lng / sep))) + return Degrees(latN) * latSep, Degrees(lngN) * sep + } + if latN < 0 { // south pole + return -90 * Degree, 0 * Degree + } else { // north pole + return +90 * Degree, 0 * Degree + } +} + +// Quantize returns a new [Point] after throwing away enough location data in p +// so that it would be difficult to distinguish a node among all the other nodes +// in its general vicinity. One caveat is that if there’s only one point in an +// obscure location, someone could triangulate the node using additional data. +// +// This method is stable: given the same p, it will always return the same +// result. It is equivalent to snapping to points on Earth that are at least +// [MinSeparation] apart. +func (p Point) Quantize() Point { + if p.IsZero() { + return p + } + + lat, lng := snapToLatLng(p.lat, p.lng180-180) + return MakePoint(lat, lng) +} diff --git a/types/geo/quantize_test.go b/types/geo/quantize_test.go new file mode 100644 index 000000000..3c707e303 --- /dev/null +++ b/types/geo/quantize_test.go @@ -0,0 +1,130 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "testing" + "testing/quick" + + "tailscale.com/types/geo" +) + +func TestPointAnonymize(t *testing.T) { + t.Run("nowhere", func(t *testing.T) { + var zero geo.Point + p := zero.Quantize() + want := zero.Valid() + if got := p.Valid(); got != want { + t.Fatalf("zero.Valid %t, want %t", got, want) + } + }) + + t.Run("separation", func(t *testing.T) { + // Walk from the south pole to the north pole and check that each + // point on the latitude is approximately MinSeparation apart. + const southPole = -90 * geo.Degree + const northPole = 90 * geo.Degree + const dateLine = 180 * geo.Degree + + llat := southPole + for lat := llat; lat <= northPole; lat += 0x1p-4 { + last := geo.MakePoint(llat, 0) + cur := geo.MakePoint(lat, 0) + anon := cur.Quantize() + switch l, g, err := anon.LatLng(); { + case err != nil: + t.Fatal(err) + case lat == southPole: + // initialize llng, to the first snapped longitude + llat = l + goto Lng + case g != 0: + t.Fatalf("%v is west or east of %v", anon, last) + case l < llat: + t.Fatalf("%v is south of %v", anon, last) + case l == llat: + continue + case l > llat: + switch dist, err := last.DistanceTo(anon); { + case err != nil: + t.Fatal(err) + case dist == 0.0: + continue + case dist < geo.MinSeparation: + t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon) + t.Fatalf("%v is too close to %v", anon, last) + default: + llat = l + } + } + + Lng: + llng := dateLine + for lng := llng; lng <= dateLine && lng >= -dateLine; lng -= 0x1p-3 { + last := geo.MakePoint(llat, llng) + cur := geo.MakePoint(lat, lng) + anon := cur.Quantize() + switch l, g, err := anon.LatLng(); { + case err != nil: + t.Fatal(err) + case lng == dateLine: + // initialize llng, to the first snapped longitude + llng = g + continue + case l != llat: + t.Fatalf("%v is north or south of %v", anon, last) + case g != llng: + const tolerance = geo.MinSeparation * 0x1p-9 + switch dist, err := last.DistanceTo(anon); { + case err != nil: + t.Fatal(err) + case dist < tolerance: + continue + case dist < (geo.MinSeparation - tolerance): + t.Logf("lat=%v lng=%v last=%v cur=%v anon=%v", lat, lng, last, cur, anon) + t.Fatalf("%v is too close to %v: %v", anon, last, dist) + default: + llng = g + } + + } + } + } + if llat == southPole { + t.Fatal("llat never incremented") + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat, lng geo.Degrees) bool { + p := geo.MakePoint(lat, lng) + q := p.Quantize() + t.Logf("quantize %v = %v", p, q) + + lat, lng, err := q.LatLng() + if err != nil { + t.Errorf("err %v, want nil", err) + return !t.Failed() + } + + if lat < -90*geo.Degree || lat > 90*geo.Degree { + t.Errorf("lat outside [-90°, +90°]: %v", lat) + } + if lng < -180*geo.Degree || lng > 180*geo.Degree { + t.Errorf("lng outside [-180°, +180°], %v", lng) + } + + if dist, err := p.DistanceTo(q); err != nil { + t.Error(err) + } else if dist > (geo.MinSeparation * 2) { + t.Errorf("moved too far: %v", dist) + } + + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Fatal(err) + } + }) +} diff --git a/types/geo/units.go b/types/geo/units.go new file mode 100644 index 000000000..76a4c02f7 --- /dev/null +++ b/types/geo/units.go @@ -0,0 +1,191 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "math" + "strconv" + "strings" + "unicode" +) + +const ( + Degree Degrees = 1 + Radian Radians = 1 + Turn Turns = 1 + Meter Distance = 1 +) + +// Degrees represents a latitude or longitude, in decimal degrees. +type Degrees float64 + +// ParseDegrees parses s as decimal degrees. +func ParseDegrees(s string) (Degrees, error) { + s = strings.TrimSuffix(s, "°") + f, err := strconv.ParseFloat(s, 64) + return Degrees(f), err +} + +// MustParseDegrees parses s as decimal degrees, but panics on error. +func MustParseDegrees(s string) Degrees { + d, err := ParseDegrees(s) + if err != nil { + panic(err) + } + return d +} + +// String implements the [Stringer] interface. The output is formatted in +// decimal degrees, prefixed by either the appropriate + or - sign, and suffixed +// by a ° degree symbol. +func (d Degrees) String() string { + b, _ := d.AppendText(nil) + b = append(b, []byte("°")...) + return string(b) +} + +// AppendText implements [encoding.TextAppender]. The output is formatted in +// decimal degrees, prefixed by either the appropriate + or - sign. +func (d Degrees) AppendText(b []byte) ([]byte, error) { + b = d.AppendZeroPaddedText(b, 0) + return b, nil +} + +// AppendZeroPaddedText appends d formatted as decimal degrees to b. The number of +// integer digits will be zero-padded to nint. +func (d Degrees) AppendZeroPaddedText(b []byte, nint int) []byte { + n := float64(d) + + if math.IsInf(n, 0) || math.IsNaN(n) { + return strconv.AppendFloat(b, n, 'f', -1, 64) + } + + sign := byte('+') + if math.Signbit(n) { + sign = '-' + n = -n + } + b = append(b, sign) + + pad := nint - 1 + for nn := n / 10; nn >= 1 && pad > 0; nn /= 10 { + pad-- + } + for range pad { + b = append(b, '0') + } + return strconv.AppendFloat(b, n, 'f', -1, 64) +} + +// Radians converts d into radians. +func (d Degrees) Radians() Radians { + return Radians(d * math.Pi / 180.0) +} + +// Turns converts d into a number of turns. +func (d Degrees) Turns() Turns { + return Turns(d / 360.0) +} + +// Radians represents a latitude or longitude, in radians. +type Radians float64 + +// ParseRadians parses s as radians. +func ParseRadians(s string) (Radians, error) { + s = strings.TrimSuffix(s, "rad") + s = strings.TrimRightFunc(s, unicode.IsSpace) + f, err := strconv.ParseFloat(s, 64) + return Radians(f), err +} + +// MustParseRadians parses s as radians, but panics on error. +func MustParseRadians(s string) Radians { + r, err := ParseRadians(s) + if err != nil { + panic(err) + } + return r +} + +// String implements the [Stringer] interface. +func (r Radians) String() string { + return strconv.FormatFloat(float64(r), 'f', -1, 64) + " rad" +} + +// Degrees converts r into decimal degrees. +func (r Radians) Degrees() Degrees { + return Degrees(r * 180.0 / math.Pi) +} + +// Turns converts r into a number of turns. +func (r Radians) Turns() Turns { + return Turns(r / 2 / math.Pi) +} + +// Turns represents a number of complete revolutions around a sphere. +type Turns float64 + +// String implements the [Stringer] interface. +func (o Turns) String() string { + return strconv.FormatFloat(float64(o), 'f', -1, 64) +} + +// Degrees converts t into decimal degrees. +func (o Turns) Degrees() Degrees { + return Degrees(o * 360.0) +} + +// Radians converts t into radians. +func (o Turns) Radians() Radians { + return Radians(o * 2 * math.Pi) +} + +// Distance represents a great-circle distance in meters. +type Distance float64 + +// ParseDistance parses s as distance in meters. +func ParseDistance(s string) (Distance, error) { + s = strings.TrimSuffix(s, "m") + s = strings.TrimRightFunc(s, unicode.IsSpace) + f, err := strconv.ParseFloat(s, 64) + return Distance(f), err +} + +// MustParseDistance parses s as distance in meters, but panics on error. +func MustParseDistance(s string) Distance { + d, err := ParseDistance(s) + if err != nil { + panic(err) + } + return d +} + +// String implements the [Stringer] interface. +func (d Distance) String() string { + return strconv.FormatFloat(float64(d), 'f', -1, 64) + "m" +} + +// DistanceOnEarth converts t turns into the great-circle distance, in meters. +func DistanceOnEarth(t Turns) Distance { + return Distance(t) * EarthMeanCircumference +} + +// Earth Fact Sheet +// https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html +const ( + // EarthMeanRadius is the volumetric mean radius of the Earth. + EarthMeanRadius = 6_371_000 * Meter + // EarthMeanCircumference is the volumetric mean circumference of the Earth. + EarthMeanCircumference = 2 * math.Pi * EarthMeanRadius + + // earthEquatorialRadius is the equatorial radius of the Earth. + earthEquatorialRadius = 6_378_137 * Meter + // earthEquatorialCircumference is the equatorial circumference of the Earth. + earthEquatorialCircumference = 2 * math.Pi * earthEquatorialRadius + + // earthPolarRadius is the polar radius of the Earth. + earthPolarRadius = 6_356_752 * Meter + // earthPolarCircumference is the polar circumference of the Earth. + earthPolarCircumference = 2 * math.Pi * earthPolarRadius +) diff --git a/types/geo/units_test.go b/types/geo/units_test.go new file mode 100644 index 000000000..b6f724ce0 --- /dev/null +++ b/types/geo/units_test.go @@ -0,0 +1,395 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "math" + "strings" + "testing" + + "tailscale.com/types/geo" +) + +func TestDegrees(t *testing.T) { + for _, tt := range []struct { + name string + degs geo.Degrees + wantStr string + wantText string + wantPad string + wantRads geo.Radians + wantTurns geo.Turns + }{ + { + name: "zero", + degs: 0.0 * geo.Degree, + wantStr: "+0°", + wantText: "+0", + wantPad: "+000", + wantRads: 0.0 * geo.Radian, + wantTurns: 0 * geo.Turn, + }, + { + name: "quarter-turn", + degs: 90.0 * geo.Degree, + wantStr: "+90°", + wantText: "+90", + wantPad: "+090", + wantRads: 0.5 * math.Pi * geo.Radian, + wantTurns: 0.25 * geo.Turn, + }, + { + name: "half-turn", + degs: 180.0 * geo.Degree, + wantStr: "+180°", + wantText: "+180", + wantPad: "+180", + wantRads: 1.0 * math.Pi * geo.Radian, + wantTurns: 0.5 * geo.Turn, + }, + { + name: "full-turn", + degs: 360.0 * geo.Degree, + wantStr: "+360°", + wantText: "+360", + wantPad: "+360", + wantRads: 2.0 * math.Pi * geo.Radian, + wantTurns: 1.0 * geo.Turn, + }, + { + name: "negative-zero", + degs: geo.MustParseDegrees("-0.0"), + wantStr: "-0°", + wantText: "-0", + wantPad: "-000", + wantRads: 0 * geo.Radian * -1, + wantTurns: 0 * geo.Turn * -1, + }, + { + name: "small-degree", + degs: -1.2003 * geo.Degree, + wantStr: "-1.2003°", + wantText: "-1.2003", + wantPad: "-001.2003", + wantRads: -0.020949187011687936 * geo.Radian, + wantTurns: -0.0033341666666666663 * geo.Turn, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.degs.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + d, err := geo.ParseDegrees(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if d != tt.degs { + t.Errorf("ParseDegrees got %q, want %q", d, tt.degs) + } + + b, err := tt.degs.AppendText(nil) + if err != nil { + t.Fatalf("AppendText err %q, want nil", err.Error()) + } + if string(b) != tt.wantText { + t.Errorf("AppendText got %q, want %q", b, tt.wantText) + } + + b = tt.degs.AppendZeroPaddedText(nil, 3) + if string(b) != tt.wantPad { + t.Errorf("AppendZeroPaddedText got %q, want %q", b, tt.wantPad) + } + + r := tt.degs.Radians() + if r != tt.wantRads { + t.Errorf("Radian got %v, want %v", r, tt.wantRads) + } + if d := r.Degrees(); d != tt.degs { // Roundtrip + t.Errorf("Degrees got %v, want %v", d, tt.degs) + } + + o := tt.degs.Turns() + if o != tt.wantTurns { + t.Errorf("Turns got %v, want %v", o, tt.wantTurns) + } + }) + } +} + +func TestRadians(t *testing.T) { + for _, tt := range []struct { + name string + rads geo.Radians + wantStr string + wantText string + wantDegs geo.Degrees + wantTurns geo.Turns + }{ + { + name: "zero", + rads: 0.0 * geo.Radian, + wantStr: "0 rad", + wantDegs: 0.0 * geo.Degree, + wantTurns: 0 * geo.Turn, + }, + { + name: "quarter-turn", + rads: 0.5 * math.Pi * geo.Radian, + wantStr: "1.5707963267948966 rad", + wantDegs: 90.0 * geo.Degree, + wantTurns: 0.25 * geo.Turn, + }, + { + name: "half-turn", + rads: 1.0 * math.Pi * geo.Radian, + wantStr: "3.141592653589793 rad", + wantDegs: 180.0 * geo.Degree, + wantTurns: 0.5 * geo.Turn, + }, + { + name: "full-turn", + rads: 2.0 * math.Pi * geo.Radian, + wantStr: "6.283185307179586 rad", + wantDegs: 360.0 * geo.Degree, + wantTurns: 1.0 * geo.Turn, + }, + { + name: "negative-zero", + rads: geo.MustParseRadians("-0"), + wantStr: "-0 rad", + wantDegs: 0 * geo.Degree * -1, + wantTurns: 0 * geo.Turn * -1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.rads.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + r, err := geo.ParseRadians(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if r != tt.rads { + t.Errorf("ParseDegrees got %q, want %q", r, tt.rads) + } + + d := tt.rads.Degrees() + if d != tt.wantDegs { + t.Errorf("Degrees got %v, want %v", d, tt.wantDegs) + } + if r := d.Radians(); r != tt.rads { // Roundtrip + t.Errorf("Radians got %v, want %v", r, tt.rads) + } + + o := tt.rads.Turns() + if o != tt.wantTurns { + t.Errorf("Turns got %v, want %v", o, tt.wantTurns) + } + }) + } +} + +func TestTurns(t *testing.T) { + for _, tt := range []struct { + name string + turns geo.Turns + wantStr string + wantText string + wantDegs geo.Degrees + wantRads geo.Radians + }{ + { + name: "zero", + turns: 0.0, + wantStr: "0", + wantDegs: 0.0 * geo.Degree, + wantRads: 0 * geo.Radian, + }, + { + name: "quarter-turn", + turns: 0.25, + wantStr: "0.25", + wantDegs: 90.0 * geo.Degree, + wantRads: 0.5 * math.Pi * geo.Radian, + }, + { + name: "half-turn", + turns: 0.5, + wantStr: "0.5", + wantDegs: 180.0 * geo.Degree, + wantRads: 1.0 * math.Pi * geo.Radian, + }, + { + name: "full-turn", + turns: 1.0, + wantStr: "1", + wantDegs: 360.0 * geo.Degree, + wantRads: 2.0 * math.Pi * geo.Radian, + }, + { + name: "negative-zero", + turns: geo.Turns(math.Copysign(0, -1)), + wantStr: "-0", + wantDegs: 0 * geo.Degree * -1, + wantRads: 0 * geo.Radian * -1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.turns.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + d := tt.turns.Degrees() + if d != tt.wantDegs { + t.Errorf("Degrees got %v, want %v", d, tt.wantDegs) + } + if o := d.Turns(); o != tt.turns { // Roundtrip + t.Errorf("Turns got %v, want %v", o, tt.turns) + } + + r := tt.turns.Radians() + if r != tt.wantRads { + t.Errorf("Turns got %v, want %v", r, tt.wantRads) + } + }) + } +} + +func TestDistance(t *testing.T) { + for _, tt := range []struct { + name string + dist geo.Distance + wantStr string + }{ + { + name: "zero", + dist: 0.0 * geo.Meter, + wantStr: "0m", + }, + { + name: "random", + dist: 4 * geo.Meter, + wantStr: "4m", + }, + { + name: "light-second", + dist: 299_792_458 * geo.Meter, + wantStr: "299792458m", + }, + { + name: "planck-length", + dist: 1.61625518e-35 * geo.Meter, + wantStr: "0.0000000000000000000000000000000000161625518m", + }, + { + name: "negative-zero", + dist: geo.Distance(math.Copysign(0, -1)), + wantStr: "-0m", + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.dist.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + r, err := geo.ParseDistance(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if r != tt.dist { + t.Errorf("ParseDegrees got %q, want %q", r, tt.dist) + } + }) + } +} + +func TestDistanceOnEarth(t *testing.T) { + for _, tt := range []struct { + name string + here geo.Point + there geo.Point + want geo.Distance + wantErr string + }{ + { + name: "no-points", + here: geo.Point{}, + there: geo.Point{}, + wantErr: "not a valid point", + }, + { + name: "not-here", + here: geo.Point{}, + there: geo.MakePoint(0, 0), + wantErr: "not a valid point", + }, + { + name: "not-there", + here: geo.MakePoint(0, 0), + there: geo.Point{}, + wantErr: "not a valid point", + }, + { + name: "null-island", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(0, 0), + want: 0 * geo.Meter, + }, + { + name: "equator-to-south-pole", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(-90, 0), + want: geo.EarthMeanCircumference / 4, + }, + { + name: "north-pole-to-south-pole", + here: geo.MakePoint(+90, 0), + there: geo.MakePoint(-90, 0), + want: geo.EarthMeanCircumference / 2, + }, + { + name: "meridian-to-antimeridian", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(0, -180), + want: geo.EarthMeanCircumference / 2, + }, + { + name: "positive-to-negative-antimeridian", + here: geo.MakePoint(0, 180), + there: geo.MakePoint(0, -180), + want: 0 * geo.Meter, + }, + { + name: "toronto-to-montreal", + here: geo.MakePoint(+43.70011, -79.41630), + there: geo.MakePoint(+45.50884, -73.58781), + want: 503_200 * geo.Meter, + }, + { + name: "montreal-to-san-francisco", + here: geo.MakePoint(+45.50884, -73.58781), + there: geo.MakePoint(+37.77493, -122.41942), + want: 4_082_600 * geo.Meter, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.here.DistanceTo(tt.there) + if tt.wantErr == "" && err != nil { + t.Fatalf("err %q, want nil", err) + } + if tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("err %q, want %q", err, tt.wantErr) + } + + approx := func(x, y geo.Distance) bool { + return math.Abs(float64(x)-float64(y)) <= 10 + } + if !approx(got, tt.want) { + t.Fatalf("got %v, want %v", got, tt.want) + } + }) + } +} From d334d9ba07fa8ae8abb5d39fa5a3e7a277f2dc32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 18 Jul 2025 10:55:17 -0400 Subject: [PATCH 1109/1708] client/local,cmd/tailscale/cli,ipn/localapi: expose eventbus graph (#16597) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it possible to dump the eventbus graph as JSON or DOT to both debug and document what is communicated via the bus. Updates #15160 Signed-off-by: Claus Lensbøl --- client/local/local.go | 6 ++++ cmd/tailscale/cli/debug.go | 56 ++++++++++++++++++++++++++++++++++++++ ipn/localapi/localapi.go | 50 ++++++++++++++++++++++++++++++++++ util/eventbus/debug.go | 13 +++++++++ 4 files changed, 125 insertions(+) diff --git a/client/local/local.go b/client/local/local.go index 74c4f0b6f..55d14f95e 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -432,6 +432,12 @@ func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { return res.Body, nil } +// EventBusGraph returns a graph of active publishers and subscribers in the eventbus +// as a [eventbus.DebugTopics] +func (lc *Client) EventBusGraph(ctx context.Context) ([]byte, error) { + return lc.get200(ctx, "/localapi/v0/debug-bus-graph") +} + // StreamBusEvents returns an iterator of Tailscale bus events as they arrive. // Each pair is a valid event and a nil error, or a zero event a non-nil error. // In case of error, the iterator ends after the pair reporting the error. diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 8473c4a17..fb062fd17 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -6,6 +6,7 @@ package cli import ( "bufio" "bytes" + "cmp" "context" "encoding/binary" "encoding/json" @@ -108,6 +109,17 @@ func debugCmd() *ffcli.Command { Exec: runDaemonBusEvents, ShortHelp: "Watch events on the tailscaled bus", }, + { + Name: "daemon-bus-graph", + ShortUsage: "tailscale debug daemon-bus-graph", + Exec: runDaemonBusGraph, + ShortHelp: "Print graph for the tailscaled bus", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("debug-bus-graph") + fs.StringVar(&daemonBusGraphArgs.format, "format", "json", "output format [json/dot]") + return fs + })(), + }, { Name: "metrics", ShortUsage: "tailscale debug metrics", @@ -807,6 +819,50 @@ func runDaemonBusEvents(ctx context.Context, args []string) error { return nil } +var daemonBusGraphArgs struct { + format string +} + +func runDaemonBusGraph(ctx context.Context, args []string) error { + graph, err := localClient.EventBusGraph(ctx) + if err != nil { + return err + } + if format := daemonBusGraphArgs.format; format != "json" && format != "dot" { + return fmt.Errorf("unrecognized output format %q", format) + } + if daemonBusGraphArgs.format == "dot" { + var topics eventbus.DebugTopics + if err := json.Unmarshal(graph, &topics); err != nil { + return fmt.Errorf("unable to parse json: %w", err) + } + fmt.Print(generateDOTGraph(topics.Topics)) + } else { + fmt.Print(string(graph)) + } + return nil +} + +// generateDOTGraph generates the DOT graph format based on the events +func generateDOTGraph(topics []eventbus.DebugTopic) string { + var sb strings.Builder + sb.WriteString("digraph event_bus {\n") + + for _, topic := range topics { + // If no subscribers, still ensure the topic is drawn + if len(topic.Subscribers) == 0 { + topic.Subscribers = append(topic.Subscribers, "no-subscribers") + } + for _, subscriber := range topic.Subscribers { + fmt.Fprintf(&sb, "\t%q -> %q [label=%q];\n", + topic.Publisher, subscriber, cmp.Or(topic.Name, "???")) + } + } + + sb.WriteString("}\n") + return sb.String() +} + var metricsArgs struct { watch bool } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d7c64b917..2409aa1ae 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -93,6 +93,7 @@ var handler = map[string]LocalAPIHandler{ "component-debug-logging": (*Handler).serveComponentDebugLogging, "debug": (*Handler).serveDebug, "debug-bus-events": (*Handler).serveDebugBusEvents, + "debug-bus-graph": (*Handler).serveEventBusGraph, "debug-derp-region": (*Handler).serveDebugDERPRegion, "debug-dial-types": (*Handler).serveDebugDialTypes, "debug-log": (*Handler).serveDebugLog, @@ -1004,6 +1005,55 @@ func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { } } +// serveEventBusGraph taps into the event bus and dumps out the active graph of +// publishers and subscribers. It does not represent anything about the messages +// exchanged. +func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + clients := debugger.Clients() + + graph := map[string]eventbus.DebugTopic{} + + for _, client := range clients { + for _, pub := range debugger.PublishTypes(client) { + topic, ok := graph[pub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: pub.Name()} + } + topic.Publisher = client.Name() + graph[pub.Name()] = topic + } + for _, sub := range debugger.SubscribeTypes(client) { + topic, ok := graph[sub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: sub.Name()} + } + topic.Subscribers = append(topic.Subscribers, client.Name()) + graph[sub.Name()] = topic + } + } + + // The top level map is not really needed for the client, convert to a list. + topics := eventbus.DebugTopics{} + for _, v := range graph { + topics.Topics = append(topics.Topics, v) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(topics) +} + func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { http.Error(w, "debug access denied", http.StatusForbidden) diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index b6264f82f..a055f078f 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -195,3 +195,16 @@ type DebugEvent struct { To []string Event any } + +// DebugTopics provides the JSON encoding as a wrapper for a collection of [DebugTopic]. +type DebugTopics struct { + Topics []DebugTopic +} + +// DebugTopic provides the JSON encoding of publishers and subscribers for a +// given topic. +type DebugTopic struct { + Name string + Publisher string + Subscribers []string +} From 871f73d9924bc046a90d62fdbc0f74b783cc4630 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 18 Jul 2025 10:55:43 -0400 Subject: [PATCH 1110/1708] Kevin/add drain sub command for serve services (#16502) * cmd/tailscale/cli: add drain subCommand for serve This commit adds the drain subcommand for serving services. After we merge advertise and serve service as one step, we now need a way to unadvertise service and this is it. Updates tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * move runServeDrain and some update regarding pr comments Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * some code structure change Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 48 ++++++++++++++++++++++++++++++ cmd/tailscale/cli/serve_v2_test.go | 48 ++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 15de0609c..6fa1a1b08 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -203,6 +203,16 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Exec: e.runServeReset, FlagSet: e.newFlags("serve-reset", nil), }, + { + Name: "drain", + ShortUsage: fmt.Sprintf("tailscale %s drain ", info.Name), + ShortHelp: "Drain a service from the current node", + LongHelp: "Make the current node no longer accept new connections for the specified service.\n" + + "Existing connections will continue to work until they are closed, but no new connections will be accepted.\n" + + "Use this command to gracefully remove a service from the current node without disrupting existing connections.\n" + + " should be a service name (e.g., svc:my-service).", + Exec: e.runServeDrain, + }, }, } } @@ -443,6 +453,44 @@ func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName string) er return err } +func (e *serveEnv) removeServiceFromPrefs(ctx context.Context, serviceName tailcfg.ServiceName) error { + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) + } + if len(prefs.AdvertiseServices) == 0 { + return nil // nothing to remove + } + initialLen := len(prefs.AdvertiseServices) + prefs.AdvertiseServices = slices.DeleteFunc(prefs.AdvertiseServices, func(s string) bool { return s == serviceName.String() }) + if initialLen == len(prefs.AdvertiseServices) { + return nil // serviceName not advertised + } + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: prefs.AdvertiseServices, + }, + }) + return err +} + +func (e *serveEnv) runServeDrain(ctx context.Context, args []string) error { + if len(args) == 0 { + return errHelp + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := args[0] + svcName := tailcfg.ServiceName(svc) + if err := svcName.Validate(); err != nil { + return fmt.Errorf("invalid service name: %s", err) + } + return e.removeServiceFromPrefs(ctx, svcName) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index b3e7ea773..2ba0b3f84 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1212,6 +1212,54 @@ func TestAddServiceToPrefs(t *testing.T) { } +func TestRemoveServiceFromPrefs(t *testing.T) { + tests := []struct { + name string + svcName tailcfg.ServiceName + startServices []string + expected []string + }{ + { + name: "remove service from empty prefs", + svcName: "svc:foo", + expected: []string{}, + }, + { + name: "remove existing service from prefs", + svcName: "svc:foo", + startServices: []string{"svc:foo"}, + expected: []string{}, + }, + { + name: "remove service not in prefs", + svcName: "svc:bar", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lc := &fakeLocalServeClient{} + ctx := t.Context() + lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: tt.startServices, + }, + }) + e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} + err := e.removeServiceFromPrefs(ctx, tt.svcName) + if err != nil { + t.Fatalf("removeServiceFromPrefs(%q) returned unexpected error: %v", tt.svcName, err) + } + if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { + t.Errorf("removeServiceFromPrefs(%q) = %v, want %v", tt.svcName, lc.prefs.AdvertiseServices, tt.expected) + } + }) + } +} + func TestMessageForPort(t *testing.T) { svcIPMap := tailcfg.ServiceIPMappings{ "svc:foo": []netip.Addr{ From d1ceb62e2726ce0408a8376e22a27656dbb77d7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 17 Jul 2025 09:13:19 -0400 Subject: [PATCH 1111/1708] client/systray: look for ubuntu gnome MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ubuntu gnome has a different name on at least 25.04. Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/systray.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index a87783c06..76c93ae18 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -128,7 +128,7 @@ func init() { desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP")) switch desktop { - case "gnome": + case "gnome", "ubuntu:gnome": // GNOME expands submenus downward in the main menu, rather than flyouts to the side. // Either as a result of that or another limitation, there seems to be a maximum depth of submenus. // Mullvad countries that have a city submenu are not being rendered, and so can't be selected. From 6c206fab58fc556b253e78547cc0073ef0c53975 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 18 Jul 2025 10:17:40 -0700 Subject: [PATCH 1112/1708] feature/tpm: try opening /dev/tpmrm0 before /tmp/tpm0 on Linux (#16600) The tpmrm0 is a kernel-managed version of tpm0 that multiplexes multiple concurrent connections. The basic tpm0 can only be accessed by one application at a time, which can be pretty unreliable. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm_linux.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go index f2d0f1402..6c8131e8d 100644 --- a/feature/tpm/tpm_linux.go +++ b/feature/tpm/tpm_linux.go @@ -9,5 +9,9 @@ import ( ) func open() (transport.TPMCloser, error) { + tpm, err := linuxtpm.Open("/dev/tpmrm0") + if err == nil { + return tpm, nil + } return linuxtpm.Open("/dev/tpm0") } From e01618a7c4eb5113f17f644b9b2ed8204c23a99b Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 18 Jul 2025 13:46:03 -0400 Subject: [PATCH 1113/1708] cmd/tailscale/cli: Add clear subcommand for serve services (#16509) * cmd/tailscale/cli: add clear subcommand for serve services This commit adds a clear subcommand for serve command, to remove all config for a passed service. This is a short cut for user to remove services after they drain a service. As an indipendent command it would avoid accidently remove a service on typo. Updates tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * update regarding comments Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * log when clearing a non-existing service but not error Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 36 ++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 6fa1a1b08..8832a232d 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -213,6 +213,13 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { " should be a service name (e.g., svc:my-service).", Exec: e.runServeDrain, }, + { + Name: "clear", + ShortUsage: fmt.Sprintf("tailscale %s clear ", info.Name), + ShortHelp: "Remove all config for a service", + LongHelp: "Remove all handlers configured for the specified service.", + Exec: e.runServeClear, + }, }, } } @@ -486,11 +493,38 @@ func (e *serveEnv) runServeDrain(ctx context.Context, args []string) error { svc := args[0] svcName := tailcfg.ServiceName(svc) if err := svcName.Validate(); err != nil { - return fmt.Errorf("invalid service name: %s", err) + return fmt.Errorf("invalid service name: %w", err) } return e.removeServiceFromPrefs(ctx, svcName) } +func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { + if len(args) == 0 { + return errHelp + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := tailcfg.ServiceName(args[0]) + if err := svc.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("error getting serve config: %w", err) + } + if _, ok := sc.Services[svc]; !ok { + log.Printf("service %s not found in serve config, nothing to clear", svc) + return nil + } + delete(sc.Services, svc) + if err := e.removeServiceFromPrefs(ctx, svc); err != nil { + return fmt.Errorf("error removing service %s from prefs: %w", svc, err) + } + return e.lc.SetServeConfig(ctx, sc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. From 5adde9e3f3f87cd9ce47832244aad49bcfb96bd8 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:06:09 -0400 Subject: [PATCH 1114/1708] cmd/tailscale/cli: remove advertise command (#16592) This commit removes the advertise command for service. The advertising is now embedded into serve command and unadvertising is moved to drain subcommand Fixes tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/advertise.go | 76 ---------------------------------- cmd/tailscale/cli/cli.go | 1 - cmd/tailscale/cli/cli_test.go | 2 +- 3 files changed, 1 insertion(+), 78 deletions(-) delete mode 100644 cmd/tailscale/cli/advertise.go diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go deleted file mode 100644 index 83d1a35aa..000000000 --- a/cmd/tailscale/cli/advertise.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cli - -import ( - "context" - "flag" - "fmt" - "strings" - - "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/envknob" - "tailscale.com/ipn" - "tailscale.com/tailcfg" -) - -var advertiseArgs struct { - services string // comma-separated list of services to advertise -} - -// TODO(naman): This flag may move to set.go or serve_v2.go after the WIPCode -// envknob is not needed. -func advertiseCmd() *ffcli.Command { - if !envknob.UseWIPCode() { - return nil - } - return &ffcli.Command{ - Name: "advertise", - ShortUsage: "tailscale advertise --services=", - ShortHelp: "Advertise this node as a destination for a service", - Exec: runAdvertise, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("advertise") - fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")") - return fs - })(), - } -} - -func runAdvertise(ctx context.Context, args []string) error { - if len(args) > 0 { - return flag.ErrHelp - } - - services, err := parseServiceNames(advertiseArgs.services) - if err != nil { - return err - } - - _, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ - AdvertiseServicesSet: true, - Prefs: ipn.Prefs{ - AdvertiseServices: services, - }, - }) - return err -} - -// parseServiceNames takes a comma-separated list of service names -// (eg. "svc:hello,svc:webserver,svc:catphotos"), splits them into -// a list and validates each service name. If valid, it returns -// the service names in a slice of strings. -func parseServiceNames(servicesArg string) ([]string, error) { - var services []string - if servicesArg != "" { - services = strings.Split(servicesArg, ",") - for _, svc := range services { - err := tailcfg.ServiceName(svc).Validate() - if err != nil { - return nil, fmt.Errorf("service %q: %s", svc, err) - } - } - } - return services, nil -} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d7e8e5ca2..bdfc7af42 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -260,7 +260,6 @@ change in the future. debugCmd(), driveCmd, idTokenCmd, - advertiseCmd(), configureHostCmd(), ), FlagSet: rootfs, diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 5dd4fa234..2e1bec8c9 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -964,7 +964,7 @@ func TestPrefFlagMapping(t *testing.T) { // flag for this. continue case "AdvertiseServices": - // Handled by the tailscale advertise subcommand, we don't want a + // Handled by the tailscale serve subcommand, we don't want a // CLI flag for this. continue case "InternalExitNodePrior": From f421907c38df057e1b293613644532f31e77b24b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 21 Jul 2025 11:03:21 +0100 Subject: [PATCH 1115/1708] all-kube: create Tailscale Service for HA kube-apiserver ProxyGroup (#16572) Adds a new reconciler for ProxyGroups of type kube-apiserver that will provision a Tailscale Service for each replica to advertise. Adds two new condition types to the ProxyGroup, TailscaleServiceValid and TailscaleServiceConfigured, to post updates on the state of that reconciler in a way that's consistent with the service-pg reconciler. The created Tailscale Service name is configurable via a new ProxyGroup field spec.kubeAPISserver.ServiceName, which expects a string of the form "svc:". Lots of supporting changes were needed to implement this in a way that's consistent with other operator workflows, including: * Pulled containerboot's ensureServicesUnadvertised and certManager into kube/ libraries to be shared with k8s-proxy. Use those in k8s-proxy to aid Service cert sharing between replicas and graceful Service shutdown. * For certManager, add an initial wait to the cert loop to wait until the domain appears in the devices's netmap to avoid a guaranteed error on the first issue attempt when it's quick to start. * Made several methods in ingress-for-pg.go and svc-for-pg.go into functions to share with the new reconciler * Added a Resource struct to the owner refs stored in Tailscale Service annotations to be able to distinguish between Ingress- and ProxyGroup- based Services that need cleaning up in the Tailscale API. * Added a ListVIPServices method to the internal tailscale client to aid cleaning up orphaned Services * Support for reading config from a kube Secret, and partial support for config reloading, to prevent us having to force Pod restarts when config changes. * Fixed up the zap logger so it's possible to set debug log level. Updates #13358 Change-Id: Ia9607441157dd91fb9b6ecbc318eecbef446e116 Signed-off-by: Tom Proctor --- cmd/containerboot/main.go | 3 +- cmd/containerboot/serve.go | 10 +- cmd/k8s-operator/api-server-proxy-pg.go | 479 ++++++++++++++++++ cmd/k8s-operator/api-server-proxy-pg_test.go | 384 ++++++++++++++ .../{proxy.go => api-server-proxy.go} | 0 .../crds/tailscale.com_proxygroups.yaml | 56 +- .../deploy/manifests/operator.yaml | 56 +- cmd/k8s-operator/egress-eps_test.go | 3 +- cmd/k8s-operator/ingress-for-pg.go | 77 +-- cmd/k8s-operator/ingress-for-pg_test.go | 32 +- cmd/k8s-operator/operator.go | 70 ++- cmd/k8s-operator/proxygroup.go | 131 +++-- cmd/k8s-operator/proxygroup_specs.go | 41 +- cmd/k8s-operator/proxygroup_test.go | 162 +++++- cmd/k8s-operator/svc-for-pg.go | 18 +- cmd/k8s-operator/svc-for-pg_test.go | 13 +- cmd/k8s-operator/testutils_test.go | 8 +- cmd/k8s-operator/tsclient.go | 2 + cmd/k8s-proxy/internal/config/config.go | 264 ++++++++++ cmd/k8s-proxy/internal/config/config_test.go | 245 +++++++++ cmd/k8s-proxy/k8s-proxy.go | 268 ++++++++-- internal/client/tailscale/vip_service.go | 28 + ipn/store/kubestore/store_kube.go | 6 +- ipn/store/kubestore/store_kube_test.go | 7 +- k8s-operator/api-proxy/proxy.go | 65 ++- k8s-operator/api.md | 25 +- k8s-operator/apis/v1alpha1/types_connector.go | 3 + .../apis/v1alpha1/types_proxygroup.go | 54 +- k8s-operator/conditions.go | 10 + {cmd/containerboot => kube/certs}/certs.go | 107 ++-- .../certs}/certs_test.go | 35 +- kube/k8s-proxy/conf/conf.go | 54 +- kube/k8s-proxy/conf/conf_test.go | 9 +- kube/kubetypes/types.go | 6 + kube/localclient/fake-client.go | 35 ++ kube/localclient/local-client.go | 49 ++ .../services}/services.go | 20 +- kube/state/state.go | 16 +- kube/state/state_test.go | 97 ++-- 39 files changed, 2551 insertions(+), 397 deletions(-) create mode 100644 cmd/k8s-operator/api-server-proxy-pg.go create mode 100644 cmd/k8s-operator/api-server-proxy-pg_test.go rename cmd/k8s-operator/{proxy.go => api-server-proxy.go} (100%) create mode 100644 cmd/k8s-proxy/internal/config/config.go create mode 100644 cmd/k8s-proxy/internal/config/config_test.go rename {cmd/containerboot => kube/certs}/certs.go (60%) rename {cmd/containerboot => kube/certs}/certs_test.go (89%) create mode 100644 kube/localclient/fake-client.go create mode 100644 kube/localclient/local-client.go rename {cmd/containerboot => kube/services}/services.go (74%) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 52b30b837..49c8a473a 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -122,6 +122,7 @@ import ( "tailscale.com/ipn" kubeutils "tailscale.com/k8s-operator" "tailscale.com/kube/kubetypes" + "tailscale.com/kube/services" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/ptr" @@ -210,7 +211,7 @@ func run() error { ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) defer cancel() - if err := ensureServicesNotAdvertised(ctx, client); err != nil { + if err := services.EnsureServicesNotAdvertised(ctx, client, log.Printf); err != nil { log.Printf("Error ensuring services are not advertised: %v", err) } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 37fd49777..5fa8e580d 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -19,7 +19,9 @@ import ( "github.com/fsnotify/fsnotify" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/kube/certs" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" "tailscale.com/types/netmap" ) @@ -52,11 +54,9 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom var certDomain string var prevServeConfig *ipn.ServeConfig - var cm certManager + var cm *certs.CertManager if cfg.CertShareMode == "rw" { - cm = certManager{ - lc: lc, - } + cm = certs.NewCertManager(klc.New(lc), log.Printf) } for { select { @@ -93,7 +93,7 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom if cfg.CertShareMode != "rw" { continue } - if err := cm.ensureCertLoops(ctx, sc); err != nil { + if err := cm.EnsureCertLoops(ctx, sc); err != nil { log.Fatalf("serve proxy: error ensuring cert loops: %v", err) } } diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go new file mode 100644 index 000000000..252859eb3 --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -0,0 +1,479 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "maps" + "slices" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" +) + +const ( + proxyPGFinalizerName = "tailscale.com/kube-apiserver-finalizer" + + // Reasons for KubeAPIServerProxyValid condition. + reasonKubeAPIServerProxyInvalid = "KubeAPIServerProxyInvalid" + reasonKubeAPIServerProxyValid = "KubeAPIServerProxyValid" + + // Reasons for KubeAPIServerProxyConfigured condition. + reasonKubeAPIServerProxyConfigured = "KubeAPIServerProxyConfigured" + reasonKubeAPIServerProxyNoBackends = "KubeAPIServerProxyNoBackends" +) + +// KubeAPIServerTSServiceReconciler reconciles the Tailscale Services required for an +// HA deployment of the API Server Proxy. +type KubeAPIServerTSServiceReconciler struct { + client.Client + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsNamespace string + lc localClient + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + + clock tstime.Clock +} + +// Reconcile is the entry point for the controller. +func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := r.logger.With("ProxyGroup", req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + pg := new(tsapi.ProxyGroup) + err = r.Get(ctx, req.NamespacedName, pg) + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + logger.Debugf("ProxyGroup not found, assuming it was deleted") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get ProxyGroup: %w", err) + } + + serviceName := serviceNameForAPIServerProxy(pg) + logger = logger.With("Tailscale Service", serviceName) + + if markedForDeletion(pg) { + logger.Debugf("ProxyGroup is being deleted, ensuring any created resources are cleaned up") + if err = r.maybeCleanup(ctx, serviceName, pg, logger); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return res, nil + } + + return res, err + } + + err = r.maybeProvision(ctx, serviceName, pg, logger) + if err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +// maybeProvision ensures that a Tailscale Service for this ProxyGroup exists +// and is up to date. +// +// Returns true if the operation resulted in a Tailscale Service update. +func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) { + var dnsName string + oldPGStatus := pg.Status.DeepCopy() + defer func() { + podsAdvertising, podsErr := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) + if podsErr != nil { + err = errors.Join(err, fmt.Errorf("failed to get number of advertised Pods: %w", podsErr)) + // Continue, updating the status with the best available information. + } + + // Update the ProxyGroup status with the Tailscale Service information + // Update the condition based on how many pods are advertising the service + conditionStatus := metav1.ConditionFalse + conditionReason := reasonKubeAPIServerProxyNoBackends + conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", podsAdvertising, pgReplicas(pg)) + + pg.Status.URL = "" + if podsAdvertising > 0 { + // At least one pod is advertising the service, consider it configured + conditionStatus = metav1.ConditionTrue + conditionReason = reasonKubeAPIServerProxyConfigured + if dnsName != "" { + pg.Status.URL = "https://" + dnsName + } + } + + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, conditionStatus, conditionReason, conditionMessage, pg.Generation, r.clock, logger) + + if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { + // An error encountered here should get returned by the Reconcile function. + err = errors.Join(err, r.Client.Status().Update(ctx, pg)) + } + }() + + if !tsoperator.ProxyGroupAvailable(pg) { + return nil + } + + if !slices.Contains(pg.Finalizers, proxyPGFinalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to tell the operator that the high level, + // multi-reconcile operation is underway. + logger.Info("provisioning Tailscale Service for ProxyGroup") + pg.Finalizers = append(pg.Finalizers, proxyPGFinalizerName) + if err := r.Update(ctx, pg); err != nil { + return fmt.Errorf("failed to add finalizer: %w", err) + } + } + + // 1. Check there isn't a Tailscale Service with the same hostname + // already created and not owned by this ProxyGroup. + existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + if isErrorFeatureFlagNotEnabled(err) { + logger.Warn(msgFeatureFlagNotEnabled) + r.recorder.Event(pg, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msgFeatureFlagNotEnabled, pg.Generation, r.clock, logger) + return nil + } + if err != nil && !isErrorTailscaleServiceNotFound(err) { + return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) + } + + updatedAnnotations, err := exclusiveOwnerAnnotations(pg, r.operatorID, existingTSSvc) + if err != nil { + const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different Service name in the ProxyGroup's spec.kubeAPIServer.serviceName field" + msg := fmt.Sprintf("error ensuring exclusive ownership of Tailscale Service %s: %v. %s", serviceName, err, instr) + logger.Warn(msg) + r.recorder.Event(pg, corev1.EventTypeWarning, "InvalidTailscaleService", msg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msg, pg.Generation, r.clock, logger) + return nil + } + + // After getting this far, we know the Tailscale Service is valid. + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, reasonKubeAPIServerProxyValid, pg.Generation, r.clock, logger) + + // Service tags are limited to matching the ProxyGroup's tags until we have + // support for querying peer caps for a Service-bound request. + serviceTags := r.defaultTags + if len(pg.Spec.Tags) > 0 { + serviceTags = pg.Spec.Tags.Stringify() + } + + tsSvc := &tailscale.VIPService{ + Name: serviceName, + Tags: serviceTags, + Ports: []string{"tcp:443"}, + Comment: managedTSServiceComment, + Annotations: updatedAnnotations, + } + if existingTSSvc != nil { + tsSvc.Addrs = existingTSSvc.Addrs + } + + // 2. Ensure the Tailscale Service exists and is up to date. + if existingTSSvc == nil || + !slices.Equal(tsSvc.Tags, existingTSSvc.Tags) || + !ownersAreSetAndEqual(tsSvc, existingTSSvc) || + !slices.Equal(tsSvc.Ports, existingTSSvc.Ports) { + logger.Infof("Ensuring Tailscale Service exists and is up to date") + if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + return fmt.Errorf("error creating Tailscale Service: %w", err) + } + } + + // 3. Ensure that TLS Secret and RBAC exists. + tcd, err := tailnetCertDomain(ctx, r.lc) + if err != nil { + return fmt.Errorf("error determining DNS name base: %w", err) + } + dnsName = serviceName.WithoutPrefix() + "." + tcd + if err = r.ensureCertResources(ctx, pg, dnsName); err != nil { + return fmt.Errorf("error ensuring cert resources: %w", err) + } + + // 4. Configure the Pods to advertise the Tailscale Service. + if err = r.maybeAdvertiseServices(ctx, pg, serviceName, logger); err != nil { + return fmt.Errorf("error updating advertised Tailscale Services: %w", err) + } + + // 5. Clean up any stale Tailscale Services from previous resource versions. + if err = r.maybeDeleteStaleServices(ctx, pg, logger); err != nil { + return fmt.Errorf("failed to delete stale Tailscale Services: %w", err) + } + + return nil +} + +// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the +// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only +// deleted if it does not contain any other owner references. If it does, the cleanup only removes the owner reference +// corresponding to this Service. +func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) { + ix := slices.Index(pg.Finalizers, proxyPGFinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return nil + } + logger.Infof("Ensuring that Service %q is cleaned up", serviceName) + + defer func() { + if err == nil { + err = r.deleteFinalizer(ctx, pg, logger) + } + }() + + if _, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger); err != nil { + return fmt.Errorf("error deleting Tailscale Service: %w", err) + } + + if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, serviceName); err != nil { + return fmt.Errorf("failed to clean up cert resources: %w", err) + } + + return nil +} + +// maybeDeleteStaleServices deletes Services that have previously been created for +// this ProxyGroup but are no longer needed. +func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error { + serviceName := serviceNameForAPIServerProxy(pg) + + svcs, err := r.tsClient.ListVIPServices(ctx) + if err != nil { + return fmt.Errorf("error listing Tailscale Services: %w", err) + } + + for _, svc := range svcs.VIPServices { + if svc.Name == serviceName { + continue + } + + owners, err := parseOwnerAnnotation(&svc) + if err != nil { + logger.Warnf("error parsing owner annotation for Tailscale Service %s: %v", svc.Name, err) + continue + } + if owners == nil || len(owners.OwnerRefs) != 1 || owners.OwnerRefs[0].OperatorID != r.operatorID { + continue + } + + owner := owners.OwnerRefs[0] + if owner.Resource == nil || owner.Resource.Kind != "ProxyGroup" || owner.Resource.UID != string(pg.UID) { + continue + } + + logger.Infof("Deleting Tailscale Service %s", svc.Name) + if err := r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + return fmt.Errorf("error deleting Tailscale Service %s: %w", svc.Name, err) + } + + if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, svc.Name); err != nil { + return fmt.Errorf("failed to clean up cert resources: %w", err) + } + } + + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) deleteFinalizer(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error { + pg.Finalizers = slices.DeleteFunc(pg.Finalizers, func(f string) bool { + return f == proxyPGFinalizerName + }) + logger.Debugf("ensure %q finalizer is removed", proxyPGFinalizerName) + + if err := r.Update(ctx, pg); err != nil { + return fmt.Errorf("failed to remove finalizer %q: %w", proxyPGFinalizerName, err) + } + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string) error { + secret := certSecret(pg.Name, r.tsNamespace, domain, pg) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) { + s.Labels = secret.Labels + }); err != nil { + return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) + } + role := certSecretRole(pg.Name, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + r.Labels = role.Labels + r.Rules = role.Rules + }); err != nil { + return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) + } + rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { + rb.Labels = rolebinding.Labels + rb.Subjects = rolebinding.Subjects + rb.RoleRef = rolebinding.RoleRef + }); err != nil { + return fmt.Errorf("failed to create or update RoleBinding %s: %w", rolebinding.Name, err) + } + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) maybeAdvertiseServices(ctx context.Context, pg *tsapi.ProxyGroup, serviceName tailcfg.ServiceName, logger *zap.SugaredLogger) error { + // Get all config Secrets for this ProxyGroup + cfgSecrets := &corev1.SecretList{} + if err := r.List(ctx, cfgSecrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil { + return fmt.Errorf("failed to list config Secrets: %w", err) + } + + // Only advertise a Tailscale Service once the TLS certs required for + // serving it are available. + shouldBeAdvertised, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName) + if err != nil { + return fmt.Errorf("error checking TLS credentials provisioned for Tailscale Service %q: %w", serviceName, err) + } + var advertiseServices []string + if shouldBeAdvertised { + advertiseServices = []string{serviceName.String()} + } + + for _, s := range cfgSecrets.Items { + if len(s.Data[kubetypes.KubeAPIServerConfigFile]) == 0 { + continue + } + + // Parse the existing config. + cfg, err := conf.Load(s.Data[kubetypes.KubeAPIServerConfigFile]) + if err != nil { + return fmt.Errorf("error loading config from Secret %q: %w", s.Name, err) + } + + if cfg.Parsed.APIServerProxy == nil { + return fmt.Errorf("config Secret %q does not contain APIServerProxy config", s.Name) + } + + existingCfgSecret := s.DeepCopy() + + var updated bool + if cfg.Parsed.APIServerProxy.ServiceName == nil || *cfg.Parsed.APIServerProxy.ServiceName != serviceName { + cfg.Parsed.APIServerProxy.ServiceName = &serviceName + updated = true + } + + // Update the services to advertise if required. + if !slices.Equal(cfg.Parsed.AdvertiseServices, advertiseServices) { + cfg.Parsed.AdvertiseServices = advertiseServices + updated = true + } + + if !updated { + continue + } + + // Update the config Secret. + cfgB, err := json.Marshal(conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &cfg.Parsed, + }) + if err != nil { + return err + } + + s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + if !apiequality.Semantic.DeepEqual(existingCfgSecret, s) { + logger.Debugf("Updating the Tailscale Services in ProxyGroup config Secret %s", s.Name) + if err := r.Update(ctx, &s); err != nil { + return err + } + } + } + + return nil +} + +func serviceNameForAPIServerProxy(pg *tsapi.ProxyGroup) tailcfg.ServiceName { + if pg.Spec.KubeAPIServer != nil && pg.Spec.KubeAPIServer.Hostname != "" { + return tailcfg.ServiceName("svc:" + pg.Spec.KubeAPIServer.Hostname) + } + + return tailcfg.ServiceName("svc:" + pg.Name) +} + +// exclusiveOwnerAnnotations returns the updated annotations required to ensure this +// instance of the operator is the exclusive owner. If the Tailscale Service is not +// nil, but does not contain an owner reference we return an error as this likely means +// that the Service was created by something other than a Tailscale Kubernetes operator. +// We also error if it is already owned by another operator instance, as we do not +// want to load balance a kube-apiserver ProxyGroup across multiple clusters. +func exclusiveOwnerAnnotations(pg *tsapi.ProxyGroup, operatorID string, svc *tailscale.VIPService) (map[string]string, error) { + ref := OwnerRef{ + OperatorID: operatorID, + Resource: &Resource{ + Kind: "ProxyGroup", + Name: pg.Name, + UID: string(pg.UID), + }, + } + if svc == nil { + c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} + json, err := json.Marshal(c) + if err != nil { + return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service's owner annotation contents: %w, please report this", err) + } + return map[string]string{ + ownerAnnotation: string(json), + }, nil + } + o, err := parseOwnerAnnotation(svc) + if err != nil { + return nil, err + } + if o == nil || len(o.OwnerRefs) == 0 { + return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) + } + if len(o.OwnerRefs) > 1 || o.OwnerRefs[0].OperatorID != operatorID { + return nil, fmt.Errorf("Tailscale Service %s is already owned by other operator(s) and cannot be shared across multiple clusters; configure a difference Service name to continue", svc.Name) + } + if o.OwnerRefs[0].Resource == nil { + return nil, fmt.Errorf("Tailscale Service %s exists, but does not reference an owning resource; not proceeding as this is likely a Service already owned by an Ingress", svc.Name) + } + if o.OwnerRefs[0].Resource.Kind != "ProxyGroup" || o.OwnerRefs[0].Resource.UID != string(pg.UID) { + return nil, fmt.Errorf("Tailscale Service %s is already owned by another resource: %#v; configure a difference Service name to continue", svc.Name, o.OwnerRefs[0].Resource) + } + if o.OwnerRefs[0].Resource.Name != pg.Name { + // ProxyGroup name can be updated in place. + o.OwnerRefs[0].Resource.Name = pg.Name + } + + oBytes, err := json.Marshal(o) + if err != nil { + return nil, err + } + + newAnnots := make(map[string]string, len(svc.Annotations)+1) + maps.Copy(newAnnots, svc.Annotations) + newAnnots[ownerAnnotation] = string(oBytes) + + return newAnnots, nil +} diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go new file mode 100644 index 000000000..dfef63f22 --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -0,0 +1,384 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "encoding/json" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" +) + +func TestAPIServerProxyReconciler(t *testing.T) { + const ( + pgName = "test-pg" + pgUID = "test-pg-uid" + ns = "operator-ns" + defaultDomain = "test-pg.ts.net" + ) + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + Generation: 1, + UID: pgUID, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + }, + Status: tsapi.ProxyGroupStatus{ + Conditions: []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupAvailable), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + } + initialCfg := &conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("test-key"), + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + }, + }, + } + expectedCfg := *initialCfg + initialCfgB, err := json.Marshal(initialCfg) + if err != nil { + t.Fatalf("marshaling initial config: %v", err) + } + pgCfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), + }, + Data: map[string][]byte{ + // Existing config should be preserved. + kubetypes.KubeAPIServerConfigFile: initialCfgB, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pgCfgSecret). + WithStatusSubresource(pg). + Build() + expectCfg := func(c *conf.VersionedConfig) { + t.Helper() + cBytes, err := json.Marshal(c) + if err != nil { + t.Fatalf("marshaling expected config: %v", err) + } + pgCfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cBytes + expectEqual(t, fc, pgCfgSecret) + } + + ft := &fakeTSClient{} + ingressTSSvc := &tailscale.VIPService{ + Name: "svc:some-ingress-hostname", + Comment: managedTSServiceComment, + Annotations: map[string]string{ + // No resource field. + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, + }, + Ports: []string{"tcp:443"}, + Tags: []string{"tag:k8s"}, + Addrs: []string{"5.6.7.8"}, + } + ft.CreateOrUpdateVIPService(t.Context(), ingressTSSvc) + + lc := &fakeLocalClient{ + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{ + MagicDNSSuffix: "ts.net", + }, + }, + } + + r := &KubeAPIServerTSServiceReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + tsNamespace: ns, + logger: zap.Must(zap.NewDevelopment()).Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + clock: tstest.NewClock(tstest.ClockOpts{}), + operatorID: "self-id", + } + + // Create a Tailscale Service that will conflict with the initial config. + if err := ft.CreateOrUpdateVIPService(t.Context(), &tailscale.VIPService{ + Name: "svc:" + pgName, + }); err != nil { + t.Fatalf("creating initial Tailscale Service: %v", err) + } + expectReconciled(t, r, "", pgName) + pg.ObjectMeta.Finalizers = []string{proxyPGFinalizerName} + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, "", 1, r.clock, r.logger) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + expectMissing[corev1.Secret](t, fc, ns, defaultDomain) + expectMissing[rbacv1.Role](t, fc, ns, defaultDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) + expectEqual(t, fc, pgCfgSecret) // Unchanged. + + // Delete Tailscale Service; should see Service created and valid condition updated to true. + if err := ft.DeleteVIPService(t.Context(), "svc:"+pgName); err != nil { + t.Fatalf("deleting initial Tailscale Service: %v", err) + } + expectReconciled(t, r, "", pgName) + + tsSvc, err := ft.GetVIPService(t.Context(), "svc:"+pgName) + if err != nil { + t.Fatalf("getting Tailscale Service: %v", err) + } + if tsSvc == nil { + t.Fatalf("expected Tailscale Service to be created, but got nil") + } + expectedTSSvc := &tailscale.VIPService{ + Name: "svc:" + pgName, + Comment: managedTSServiceComment, + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"test-pg-uid"}}]}`, + }, + Ports: []string{"tcp:443"}, + Tags: []string{"tag:k8s"}, + Addrs: []string{"5.6.7.8"}, + } + if !reflect.DeepEqual(tsSvc, expectedTSSvc) { + t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) + } + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.logger) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + expectedCfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:" + pgName)) + expectCfg(&expectedCfg) + + expectEqual(t, fc, certSecret(pgName, ns, defaultDomain, pg)) + expectEqual(t, fc, certSecretRole(pgName, ns, defaultDomain)) + expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain)) + + // Simulate certs being issued; should observe AdvertiseServices config change. + if err := populateTLSSecret(t.Context(), fc, pgName, defaultDomain); err != nil { + t.Fatalf("populating TLS Secret: %v", err) + } + expectReconciled(t, r, "", pgName) + + expectedCfg.AdvertiseServices = []string{"svc:" + pgName} + expectCfg(&expectedCfg) + + expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Unchanged status. + + // Simulate Pod prefs updated with advertised services; should see Configured condition updated to true. + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + "_current-profile": []byte("profile-foo"), + "profile-foo": []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`), + }, + }) + expectReconciled(t, r, "", pgName) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) + pg.Status.URL = "https://" + defaultDomain + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Rename the Tailscale Service - old one + cert resources should be cleaned up. + updatedServiceName := tailcfg.ServiceName("svc:test-pg-renamed") + updatedDomain := "test-pg-renamed.ts.net" + pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ + Hostname: updatedServiceName.WithoutPrefix(), + } + mustUpdate(t, fc, "", pgName, func(p *tsapi.ProxyGroup) { + p.Spec.KubeAPIServer = pg.Spec.KubeAPIServer + }) + expectReconciled(t, r, "", pgName) + _, err = ft.GetVIPService(t.Context(), "svc:"+pgName) + if !isErrorTailscaleServiceNotFound(err) { + t.Fatalf("Expected 404, got: %v", err) + } + tsSvc, err = ft.GetVIPService(t.Context(), updatedServiceName) + if err != nil { + t.Fatalf("Expected renamed svc, got error: %v", err) + } + expectedTSSvc.Name = updatedServiceName + if !reflect.DeepEqual(tsSvc, expectedTSSvc) { + t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) + } + // Check cfg and status reset until TLS certs are available again. + expectedCfg.APIServerProxy.ServiceName = ptr.To(updatedServiceName) + expectedCfg.AdvertiseServices = nil + expectCfg(&expectedCfg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + pg.Status.URL = "" + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + expectEqual(t, fc, certSecret(pgName, ns, updatedDomain, pg)) + expectEqual(t, fc, certSecretRole(pgName, ns, updatedDomain)) + expectEqual(t, fc, certSecretRoleBinding(pg, ns, updatedDomain)) + expectMissing[corev1.Secret](t, fc, ns, defaultDomain) + expectMissing[rbacv1.Role](t, fc, ns, defaultDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) + + // Check we get the new hostname in the status once ready. + if err := populateTLSSecret(t.Context(), fc, pgName, updatedDomain); err != nil { + t.Fatalf("populating TLS Secret: %v", err) + } + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) { + s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`) + }) + expectReconciled(t, r, "", pgName) + expectedCfg.AdvertiseServices = []string{updatedServiceName.String()} + expectCfg(&expectedCfg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) + pg.Status.URL = "https://" + updatedDomain + + // Delete the ProxyGroup and verify Tailscale Service and cert resources are cleaned up. + if err := fc.Delete(t.Context(), pg); err != nil { + t.Fatalf("deleting ProxyGroup: %v", err) + } + expectReconciled(t, r, "", pgName) + expectMissing[corev1.Secret](t, fc, ns, updatedDomain) + expectMissing[rbacv1.Role](t, fc, ns, updatedDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, updatedDomain) + _, err = ft.GetVIPService(t.Context(), updatedServiceName) + if !isErrorTailscaleServiceNotFound(err) { + t.Fatalf("Expected 404, got: %v", err) + } + + // Ingress Tailscale Service should not be affected. + svc, err := ft.GetVIPService(t.Context(), ingressTSSvc.Name) + if err != nil { + t.Fatalf("getting ingress Tailscale Service: %v", err) + } + if !reflect.DeepEqual(svc, ingressTSSvc) { + t.Fatalf("expected ingress Tailscale Service to be unmodified %+v, got %+v", ingressTSSvc, svc) + } +} + +func TestExclusiveOwnerAnnotations(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg1", + UID: "pg1-uid", + }, + } + const ( + selfOperatorID = "self-id" + pg1Owner = `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg1","uid":"pg1-uid"}}]}` + ) + + for name, tc := range map[string]struct { + svc *tailscale.VIPService + wantErr string + }{ + "no_svc": { + svc: nil, + }, + "empty_svc": { + svc: &tailscale.VIPService{}, + wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator", + }, + "already_owner": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: pg1Owner, + }, + }, + }, + "already_owner_name_updated": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"old-pg1-name","uid":"pg1-uid"}}]}`, + }, + }, + }, + "preserves_existing_annotations": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + "existing": "annotation", + ownerAnnotation: pg1Owner, + }, + }, + }, + "owned_by_another_operator": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`, + }, + }, + wantErr: "already owned by other operator(s)", + }, + "owned_by_an_ingress": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, // Ingress doesn't set Resource field (yet). + }, + }, + wantErr: "does not reference an owning resource", + }, + "owned_by_another_pg": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg2","uid":"pg2-uid"}}]}`, + }, + }, + wantErr: "already owned by another resource", + }, + } { + t.Run(name, func(t *testing.T) { + got, err := exclusiveOwnerAnnotations(pg, "self-id", tc.svc) + if tc.wantErr != "" { + if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("exclusiveOwnerAnnotations() error = %v, wantErr %v", err, tc.wantErr) + } + } else if diff := cmp.Diff(pg1Owner, got[ownerAnnotation]); diff != "" { + t.Errorf("exclusiveOwnerAnnotations() mismatch (-want +got):\n%s", diff) + } + if tc.svc == nil { + return // Don't check annotations being preserved. + } + for k, v := range tc.svc.Annotations { + if k == ownerAnnotation { + continue + } + if got[k] != v { + t.Errorf("exclusiveOwnerAnnotations() did not preserve annotation %q: got %q, want %q", k, got[k], v) + } + } + }) + } +} + +func omitPGStatusConditionMessages(p *tsapi.ProxyGroup) { + for i := range p.Status.Conditions { + // Don't bother validating the message. + p.Status.Conditions[i].Message = "" + } +} diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/api-server-proxy.go similarity index 100% rename from cmd/k8s-operator/proxy.go rename to cmd/k8s-operator/api-server-proxy.go diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 06c847925..98ca1c378 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -20,6 +20,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver. + jsonPath: .status.url + name: URL + type: string - description: ProxyGroup type. jsonPath: .spec.type name: Type @@ -32,15 +36,22 @@ spec: openAPIV3Schema: description: |- ProxyGroup defines a set of Tailscale devices that will act as proxies. - Currently only egress ProxyGroups are supported. + Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver + proxies. In addition to running a highly available set of proxies, ingress + and egress ProxyGroups also allow for serving many annotated Services from a + single set of proxies to minimise resource consumption. + + For ingress and egress, use the tailscale.com/proxy-group annotation on a + Service to specify that the proxy should be implemented by a ProxyGroup + instead of a single dedicated proxy. - Use the tailscale.com/proxy-group annotation on a Service to specify that - the egress proxy should be implemented by a ProxyGroup instead of a single - dedicated proxy. In addition to running a highly available set of proxies, - ProxyGroup also allows for serving many annotated Services from a single - set of proxies to minimise resource consumption. + More info: + * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress - More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + For kube-apiserver, the ProxyGroup is a standalone resource. Use the + spec.kubeAPIServer field to configure options specific to the kube-apiserver + ProxyGroup type. type: object required: - spec @@ -83,6 +94,14 @@ spec: ProxyGroup type. This field is only used when Type is set to "kube-apiserver". type: object properties: + hostname: + description: |- + Hostname is the hostname with which to expose the Kubernetes API server + proxies. Must be a valid DNS label no longer than 63 characters. If not + specified, the name of the ProxyGroup is used as the hostname. Must be + unique across the whole tailnet. + type: string + pattern: ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ mode: description: |- Mode to run the API server proxy in. Supported modes are auth and noauth. @@ -141,10 +160,20 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. - `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled - and ready. `ProxyGroupAvailable` indicates that at least one proxy is - ready to serve traffic. + resources. Known condition types include `ProxyGroupReady` and + `ProxyGroupAvailable`. + + * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + all expected conditions are true. + * `ProxyGroupAvailable` indicates that at least one proxy is ready to + serve traffic. + + For ProxyGroups of type kube-apiserver, there are two additional conditions: + + * `KubeAPIServerProxyConfigured` indicates that at least one API server + proxy is configured and ready to serve traffic. + * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + valid. type: array items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -231,6 +260,11 @@ spec: x-kubernetes-list-map-keys: - hostname x-kubernetes-list-type: map + url: + description: |- + URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + any. Only applies to ProxyGroups of type kube-apiserver. + type: string served: true storage: true subresources: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index ff3705cb3..ac8143e98 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2873,6 +2873,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver. + jsonPath: .status.url + name: URL + type: string - description: ProxyGroup type. jsonPath: .spec.type name: Type @@ -2885,15 +2889,22 @@ spec: openAPIV3Schema: description: |- ProxyGroup defines a set of Tailscale devices that will act as proxies. - Currently only egress ProxyGroups are supported. + Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver + proxies. In addition to running a highly available set of proxies, ingress + and egress ProxyGroups also allow for serving many annotated Services from a + single set of proxies to minimise resource consumption. + + For ingress and egress, use the tailscale.com/proxy-group annotation on a + Service to specify that the proxy should be implemented by a ProxyGroup + instead of a single dedicated proxy. - Use the tailscale.com/proxy-group annotation on a Service to specify that - the egress proxy should be implemented by a ProxyGroup instead of a single - dedicated proxy. In addition to running a highly available set of proxies, - ProxyGroup also allows for serving many annotated Services from a single - set of proxies to minimise resource consumption. + More info: + * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress - More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + For kube-apiserver, the ProxyGroup is a standalone resource. Use the + spec.kubeAPIServer field to configure options specific to the kube-apiserver + ProxyGroup type. properties: apiVersion: description: |- @@ -2929,6 +2940,14 @@ spec: KubeAPIServer contains configuration specific to the kube-apiserver ProxyGroup type. This field is only used when Type is set to "kube-apiserver". properties: + hostname: + description: |- + Hostname is the hostname with which to expose the Kubernetes API server + proxies. Must be a valid DNS label no longer than 63 characters. If not + specified, the name of the ProxyGroup is used as the hostname. Must be + unique across the whole tailnet. + pattern: ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ + type: string mode: description: |- Mode to run the API server proxy in. Supported modes are auth and noauth. @@ -2990,10 +3009,20 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. - `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled - and ready. `ProxyGroupAvailable` indicates that at least one proxy is - ready to serve traffic. + resources. Known condition types include `ProxyGroupReady` and + `ProxyGroupAvailable`. + + * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + all expected conditions are true. + * `ProxyGroupAvailable` indicates that at least one proxy is ready to + serve traffic. + + For ProxyGroups of type kube-apiserver, there are two additional conditions: + + * `KubeAPIServerProxyConfigured` indicates that at least one API server + proxy is configured and ready to serve traffic. + * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + valid. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -3080,6 +3109,11 @@ spec: x-kubernetes-list-map-keys: - hostname x-kubernetes-list-type: map + url: + description: |- + URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + any. Only applies to ProxyGroups of type kube-apiserver. + type: string type: object required: - spec diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index bd81071cb..bd80112ae 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" ) @@ -200,7 +201,7 @@ func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-0", pg), Namespace: "operator-ns", - Labels: pgSecretLabels(pg, "state"), + Labels: pgSecretLabels(pg, kubetypes.LabelSecretTypeState), }, } return p, s diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index aaf22d471..3afeb528f 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -248,7 +248,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } // 3. Ensure that TLS Secret and RBAC exists - tcd, err := r.tailnetCertDomain(ctx) + tcd, err := tailnetCertDomain(ctx, r.lc) if err != nil { return false, fmt.Errorf("error determining DNS name base: %w", err) } @@ -358,7 +358,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } // 6. Update Ingress status if ProxyGroup Pods are ready. - count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) + count, err := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) if err != nil { return false, fmt.Errorf("failed to check if any Pods are configured: %w", err) } @@ -370,7 +370,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin ing.Status.LoadBalancer.Ingress = nil default: var ports []networkingv1.IngressPortStatus - hasCerts, err := r.hasCerts(ctx, serviceName) + hasCerts, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName) if err != nil { return false, fmt.Errorf("error checking TLS credentials provisioned for Ingress: %w", err) } @@ -481,7 +481,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG delete(cfg.Services, tsSvcName) serveConfigChanged = true } - if err := r.cleanupCertResources(ctx, proxyGroupName, tsSvcName); err != nil { + if err := cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, proxyGroupName, tsSvcName); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } } @@ -557,7 +557,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 3. Clean up any cluster resources - if err := r.cleanupCertResources(ctx, pg, serviceName); err != nil { + if err := cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg, serviceName); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } @@ -634,8 +634,8 @@ type localClient interface { } // tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := r.lc.StatusWithoutPeers(ctx) +func tailnetCertDomain(ctx context.Context, lc localClient) (string, error) { + st, err := lc.StatusWithoutPeers(ctx) if err != nil { return "", fmt.Errorf("error getting tailscale status: %w", err) } @@ -761,7 +761,7 @@ const ( func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, logger *zap.SugaredLogger) (err error) { // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } @@ -773,7 +773,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con // The only exception is Ingresses with an HTTP endpoint enabled - if an // Ingress has an HTTP endpoint enabled, it will be advertised even if the // TLS cert is not yet provisioned. - hasCert, err := a.hasCerts(ctx, serviceName) + hasCert, err := hasCerts(ctx, a.Client, a.lc, a.tsNamespace, serviceName) if err != nil { return fmt.Errorf("error checking TLS credentials provisioned for service %q: %w", serviceName, err) } @@ -822,10 +822,10 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } -func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { +func numberPodsAdvertising(ctx context.Context, cl client.Client, tsNamespace, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + if err := cl.List(ctx, secrets, client.InNamespace(tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } @@ -859,7 +859,14 @@ type ownerAnnotationValue struct { // Kubernetes operator instance. type OwnerRef struct { // OperatorID is the stable ID of the operator's Tailscale device. - OperatorID string `json:"operatorID,omitempty"` + OperatorID string `json:"operatorID,omitempty"` + Resource *Resource `json:"resource,omitempty"` // optional, used to identify the ProxyGroup that owns this Tailscale Service. +} + +type Resource struct { + Kind string `json:"kind,omitempty"` // "ProxyGroup" + Name string `json:"name,omitempty"` // Name of the ProxyGroup that owns this Tailscale Service. Informational only. + UID string `json:"uid,omitempty"` // UID of the ProxyGroup that owns this Tailscale Service. } // ownerAnnotations returns the updated annotations required to ensure this @@ -891,6 +898,9 @@ func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string] if slices.Contains(o.OwnerRefs, ref) { // up to date return svc.Annotations, nil } + if o.OwnerRefs[0].Resource != nil { + return nil, fmt.Errorf("Tailscale Service %s is owned by another resource: %#v; cannot be reused for an Ingress", svc.Name, o.OwnerRefs[0].Resource) + } o.OwnerRefs = append(o.OwnerRefs, ref) json, err := json.Marshal(o) if err != nil { @@ -949,7 +959,7 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi }); err != nil { return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) } - rolebinding := certSecretRoleBinding(pg.Name, r.tsNamespace, domain) + rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { // Labels and subjects might have changed if the Ingress has been updated to use a // different ProxyGroup. @@ -963,19 +973,19 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi // cleanupCertResources ensures that the TLS Secret and associated RBAC // resources that allow proxies to read/write to the Secret are deleted. -func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName string, name tailcfg.ServiceName) error { - domainName, err := r.dnsNameForService(ctx, tailcfg.ServiceName(name)) +func cleanupCertResources(ctx context.Context, cl client.Client, lc localClient, tsNamespace, pgName string, serviceName tailcfg.ServiceName) error { + domainName, err := dnsNameForService(ctx, lc, serviceName) if err != nil { - return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", name, err) + return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", serviceName, err) } labels := certResourceLabels(pgName, domainName) - if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting RoleBinding for domain name %s: %w", domainName, err) } - if err := r.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting Role for domain name %s: %w", domainName, err) } - if err := r.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting Secret for domain name %s: %w", domainName, err) } return nil @@ -1018,17 +1028,17 @@ func certSecretRole(pgName, namespace, domain string) *rbacv1.Role { // certSecretRoleBinding creates a RoleBinding for Role that will allow proxies // to manage the TLS Secret for the given domain. Domain must be a valid // Kubernetes resource name. -func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding { +func certSecretRoleBinding(pg *tsapi.ProxyGroup, namespace, domain string) *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: domain, Namespace: namespace, - Labels: certResourceLabels(pgName, domain), + Labels: certResourceLabels(pg.Name, domain), }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: pgName, + Name: pgServiceAccountName(pg), Namespace: namespace, }, }, @@ -1041,14 +1051,17 @@ func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding // certSecret creates a Secret that will store the TLS certificate and private // key for the given domain. Domain must be a valid Kubernetes resource name. -func certSecret(pgName, namespace, domain string, ing *networkingv1.Ingress) *corev1.Secret { +func certSecret(pgName, namespace, domain string, parent client.Object) *corev1.Secret { labels := certResourceLabels(pgName, domain) - labels[kubetypes.LabelSecretType] = "certs" + labels[kubetypes.LabelSecretType] = kubetypes.LabelSecretTypeCerts // Labels that let us identify the Ingress resource lets us reconcile // the Ingress when the TLS Secret is updated (for example, when TLS // certs have been provisioned). - labels[LabelParentName] = ing.Name - labels[LabelParentNamespace] = ing.Namespace + labels[LabelParentType] = strings.ToLower(parent.GetObjectKind().GroupVersionKind().Kind) + labels[LabelParentName] = parent.GetName() + if ns := parent.GetNamespace(); ns != "" { + labels[LabelParentNamespace] = ns + } return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -1076,9 +1089,9 @@ func certResourceLabels(pgName, domain string) map[string]string { } // dnsNameForService returns the DNS name for the given Tailscale Service's name. -func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { +func dnsNameForService(ctx context.Context, lc localClient, svc tailcfg.ServiceName) (string, error) { s := svc.WithoutPrefix() - tcd, err := r.tailnetCertDomain(ctx) + tcd, err := tailnetCertDomain(ctx, lc) if err != nil { return "", fmt.Errorf("error determining DNS name base: %w", err) } @@ -1086,14 +1099,14 @@ func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg } // hasCerts checks if the TLS Secret for the given service has non-zero cert and key data. -func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceName) (bool, error) { - domain, err := r.dnsNameForService(ctx, svc) +func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string, svc tailcfg.ServiceName) (bool, error) { + domain, err := dnsNameForService(ctx, lc, svc) if err != nil { return false, fmt.Errorf("failed to get DNS name for service: %w", err) } secret := &corev1.Secret{} - err = r.Get(ctx, client.ObjectKey{ - Namespace: r.tsNamespace, + err = cl.Get(ctx, client.ObjectKey{ + Namespace: ns, Name: domain, }, secret) if err != nil { diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 5de86cdad..77e5ecb37 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -75,8 +75,13 @@ func TestIngressPGReconciler(t *testing.T) { // Verify that Role and RoleBinding have been created for the first Ingress. // Do not verify the cert Secret as that was already verified implicitly above. + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + }, + } expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" @@ -137,7 +142,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify that Role and RoleBinding have been created for the second Ingress. // Do not verify the cert Secret as that was already verified implicitly above. expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-other-svc.ts.net")) // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) @@ -186,7 +191,12 @@ func TestIngressPGReconciler(t *testing.T) { }) expectReconciled(t, ingPGR, "default", "test-ingress") expectEqual(t, fc, certSecretRole("test-pg-second", "operator-ns", "my-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg-second", "operator-ns", "my-svc.ts.net")) + pg = &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-second", + }, + } + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { @@ -515,7 +525,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-pg-0", Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "state"), + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), }, Data: map[string][]byte{ "_current-profile": []byte("profile-foo"), @@ -686,6 +696,14 @@ func TestOwnerAnnotations(t *testing.T) { ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"},{"operatorID":"self-id"}]}`, }, }, + "owned_by_proxygroup": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"1234-UID"}}]}`, + }, + }, + wantErr: "owned by another resource", + }, } { t.Run(name, func(t *testing.T) { got, err := ownerAnnotations("self-id", tc.svc) @@ -708,7 +726,7 @@ func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain stri kubetypes.LabelManaged: "true", labelProxyGroup: pgName, labelDomain: domain, - kubetypes.LabelSecretType: "certs", + kubetypes.LabelSecretType: kubetypes.LabelSecretTypeCerts, }, }, Type: corev1.SecretTypeTLS, @@ -806,7 +824,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels(pgName, "config"), + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), @@ -845,7 +863,7 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels(pgName, "config"), + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte("{}"), diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 870a6f8b7..94a0a6a78 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -123,7 +123,7 @@ func main() { defer s.Close() restConfig := config.GetConfigOrDie() if mode != apiServerProxyModeDisabled { - ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled) + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled, true) if err != nil { zlog.Fatalf("error creating API server proxy: %v", err) } @@ -633,6 +633,32 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create Recorder reconciler: %v", err) } + // kube-apiserver's Tailscale Service reconciler. + err = builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroup{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(obj client.Object) bool { + pg, ok := obj.(*tsapi.ProxyGroup) + return ok && pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer + }), + )). + Named("kube-apiserver-ts-service-reconciler"). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(kubeAPIServerPGsFromSecret(mgr.GetClient(), startlog))). + Complete(&KubeAPIServerTSServiceReconciler{ + Client: mgr.GetClient(), + recorder: eventRecorder, + logger: opts.log.Named("kube-apiserver-ts-service-reconciler"), + tsClient: opts.tsClient, + tsNamespace: opts.tailscaleNamespace, + lc: lc, + defaultTags: strings.Split(opts.proxyTags, ","), + operatorID: id, + clock: tstime.DefaultClock{}, + }) + if err != nil { + startlog.Fatalf("could not create Kubernetes API server Tailscale Service reconciler: %v", err) + } + // ProxyGroup reconciler. ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) @@ -1214,7 +1240,7 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } - if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != "state" { + if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != kubetypes.LabelSecretTypeState { return nil } pg, ok := o.GetLabels()[LabelParentName] @@ -1304,7 +1330,7 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. func isTLSSecret(secret *corev1.Secret) bool { return secret.Type == corev1.SecretTypeTLS && secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "certs" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeCerts && secret.ObjectMeta.Labels[labelDomain] != "" && secret.ObjectMeta.Labels[labelProxyGroup] != "" } @@ -1312,7 +1338,7 @@ func isTLSSecret(secret *corev1.Secret) bool { func isPGStateSecret(secret *corev1.Secret) bool { return secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && secret.ObjectMeta.Labels[LabelParentType] == "proxygroup" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "state" + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeState } // HAIngressesFromSecret returns a handler that returns reconcile requests for @@ -1394,6 +1420,42 @@ func HAServicesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.M } } +// kubeAPIServerPGsFromSecret finds ProxyGroups of type "kube-apiserver" that +// need to be reconciled after a ProxyGroup-owned Secret is updated. +func kubeAPIServerPGsFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] Secret handler triggered for an object that is not a Secret") + return nil + } + if secret.ObjectMeta.Labels[kubetypes.LabelManaged] != "true" || + secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { + return nil + } + + var pg tsapi.ProxyGroup + if err := cl.Get(ctx, types.NamespacedName{Name: secret.ObjectMeta.Labels[LabelParentName]}, &pg); err != nil { + logger.Infof("error getting ProxyGroup %s: %v", secret.ObjectMeta.Labels[LabelParentName], err) + return nil + } + + if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: secret.ObjectMeta.Labels[LabelParentNamespace], + Name: secret.ObjectMeta.Labels[LabelParentName], + }, + }, + } + + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 1fdc076f9..d62cb0f11 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -68,8 +68,7 @@ const ( // // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was // first introduced. - pgMinCapabilityVersion = 106 - kubeAPIServerConfigFile = "config.hujson" + pgMinCapabilityVersion = 106 ) var ( @@ -127,6 +126,10 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } if done, err := r.maybeCleanup(ctx, pg); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } return reconcile.Result{}, err } else if !done { logger.Debugf("ProxyGroup resource cleanup not yet finished, will retry...") @@ -158,7 +161,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG logger.Infof("ensuring ProxyGroup is set up") pg.Finalizers = append(pg.Finalizers, FinalizerName) if err := r.Update(ctx, pg); err != nil { - return r.notReadyErrf(pg, "error adding finalizer: %w", err) + return r.notReadyErrf(pg, logger, "error adding finalizer: %w", err) } } @@ -174,31 +177,25 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG if apierrors.IsNotFound(err) { msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q does not (yet) exist", proxyClassName) logger.Info(msg) - return r.notReady(reasonProxyGroupCreating, msg) + return notReady(reasonProxyGroupCreating, msg) } if err != nil { - return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) + return r.notReadyErrf(pg, logger, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } if !tsoperator.ProxyClassIsReady(proxyClass) { msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) logger.Info(msg) - return r.notReady(reasonProxyGroupCreating, msg) + return notReady(reasonProxyGroupCreating, msg) } } if err := r.validate(ctx, pg, proxyClass, logger); err != nil { - return r.notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) + return notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) } staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) if err != nil { - if strings.Contains(err.Error(), optimisticLockErrorMsg) { - msg := fmt.Sprintf("optimistic lock error, retrying: %s", nrr.message) - logger.Info(msg) - return r.notReady(reasonProxyGroupCreating, msg) - } else { - return nil, nrr, err - } + return nil, nrr, err } return staticEndpoints, nrr, nil @@ -299,9 +296,9 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning NodePort Services for static endpoints: %v", err) r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) - return r.notReady(reason, msg) + return notReady(reason, msg) } - return r.notReadyErrf(pg, "error provisioning NodePort Services for static endpoints: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning NodePort Services for static endpoints: %w", err) } } @@ -312,9 +309,9 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning config Secrets: %v", err) r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) - return r.notReady(reason, msg) + return notReady(reason, msg) } - return r.notReadyErrf(pg, "error provisioning config Secrets: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning config Secrets: %w", err) } // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. @@ -325,7 +322,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning state Secrets: %w", err) } } @@ -339,7 +336,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning ServiceAccount: %w", err) } } @@ -350,7 +347,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return r.notReadyErrf(pg, "error provisioning Role: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning Role: %w", err) } roleBinding := pgRoleBinding(pg, r.tsNamespace) @@ -361,7 +358,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { @@ -371,7 +368,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, logger, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } @@ -381,7 +378,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, logger, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } @@ -391,7 +388,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro } ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err) + return r.notReadyErrf(pg, logger, "error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), @@ -404,7 +401,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning StatefulSet: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning StatefulSet: %w", err) } mo := &metricsOpts{ @@ -414,11 +411,11 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return r.notReadyErrf(pg, "error reconciling metrics resources: %w", err) + return r.notReadyErrf(pg, logger, "error reconciling metrics resources: %w", err) } if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { - return r.notReadyErrf(pg, "error cleaning up dangling resources: %w", err) + return r.notReadyErrf(pg, logger, "error cleaning up dangling resources: %w", err) } logger.Info("ProxyGroup resources synced") @@ -430,6 +427,10 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za defer func() { if !apiequality.Semantic.DeepEqual(*oldPGStatus, pg.Status) { if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + if strings.Contains(updateErr.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error updating status, retrying: %s", updateErr) + updateErr = nil + } err = errors.Join(err, updateErr) } } @@ -457,6 +458,7 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) // Set ProxyGroupReady condition. + tsSvcValid, tsSvcSet := tsoperator.KubeAPIServerProxyValid(pg) status = metav1.ConditionFalse reason = reasonProxyGroupCreating switch { @@ -464,9 +466,15 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za // If we failed earlier, that reason takes precedence. reason = nrr.reason message = nrr.message + case pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer && tsSvcSet && !tsSvcValid: + reason = reasonProxyGroupInvalid + message = "waiting for config in spec.kubeAPIServer to be marked valid" case len(devices) < desiredReplicas: case len(devices) > desiredReplicas: message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(devices)-desiredReplicas) + case pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer && !tsoperator.KubeAPIServerProxyConfigured(pg): + reason = reasonProxyGroupCreating + message = "waiting for proxies to start advertising the kube-apiserver proxy's hostname" default: status = metav1.ConditionTrue reason = reasonProxyGroupReady @@ -714,7 +722,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pg.Name, i), Namespace: r.tsNamespace, - Labels: pgSecretLabels(pg.Name, "config"), + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig), OwnerReferences: pgOwnerReference(pg), }, } @@ -775,13 +783,6 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we - // don't overwrite it if already set. - existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) - if err != nil { - return nil, err - } - if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { hostname := pgHostname(pg, i) @@ -795,7 +796,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } if !deviceAuthed { existingCfg := conf.ConfigV1Alpha1{} - if err := json.Unmarshal(existingCfgSecret.Data[kubeAPIServerConfigFile], &existingCfg); err != nil { + if err := json.Unmarshal(existingCfgSecret.Data[kubetypes.KubeAPIServerConfigFile], &existingCfg); err != nil { return nil, fmt.Errorf("error unmarshalling existing config: %w", err) } if existingCfg.AuthKey != nil { @@ -803,19 +804,42 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } } + cfg := conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ - Hostname: &hostname, + AuthKey: authKey, State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), - AuthKey: authKey, - KubeAPIServer: &conf.KubeAPIServer{ + LogLevel: ptr.To(logger.Level().String()), + + // Reloadable fields. + Hostname: &hostname, + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)), + // The first replica is elected as the cert issuer, same + // as containerboot does for ingress-pg-reconciler. + IssueCerts: opt.NewBool(i == 0), }, }, } + // Copy over config that the apiserver-proxy-service-reconciler sets. + if existingCfgSecret != nil { + if k8sProxyCfg, ok := cfgSecret.Data[kubetypes.KubeAPIServerConfigFile]; ok { + k8sCfg := &conf.ConfigV1Alpha1{} + if err := json.Unmarshal(k8sProxyCfg, k8sCfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal kube-apiserver config: %w", err) + } + + cfg.AdvertiseServices = k8sCfg.AdvertiseServices + if k8sCfg.APIServerProxy != nil { + cfg.APIServerProxy.ServiceName = k8sCfg.APIServerProxy.ServiceName + } + } + } + if r.loginServer != "" { cfg.ServerURL = &r.loginServer } @@ -832,8 +856,15 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) } - mak.Set(&cfgSecret.Data, kubeAPIServerConfigFile, cfgB) + mak.Set(&cfgSecret.Data, kubetypes.KubeAPIServerConfigFile, cfgB) } else { + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it if already set. + existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) + if err != nil { + return nil, err + } + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) @@ -1024,16 +1055,16 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) return nil, nil } - conf, err := latestConfigFromSecret(cfgSecret) + cfg, err := latestConfigFromSecret(cfgSecret) if err != nil { return nil, err } - if conf == nil { + if cfg == nil { return nil, nil } - return conf.AdvertiseServices, nil + return cfg.AdvertiseServices, nil } // getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by @@ -1045,7 +1076,7 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { // List all state Secrets owned by this ProxyGroup. secrets := &corev1.SecretList{} - if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState))); err != nil { return nil, fmt.Errorf("failed to list state Secrets: %w", err) } for _, secret := range secrets.Items { @@ -1140,15 +1171,21 @@ type nodeMetadata struct { dnsName string } -func (r *ProxyGroupReconciler) notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { +func notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { return nil, ¬ReadyReason{ reason: reason, message: msg, }, nil } -func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { err := fmt.Errorf(format, a...) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + msg := fmt.Sprintf("optimistic lock error, retrying: %s", err.Error()) + logger.Info(msg) + return notReady(reasonProxyGroupCreating, msg) + } + r.recorder.Event(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) return nil, ¬ReadyReason{ reason: reasonProxyGroupCreationFailed, diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 71398d0d5..e185499f0 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -7,7 +7,6 @@ package main import ( "fmt" - "path/filepath" "slices" "strconv" "strings" @@ -16,6 +15,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -341,8 +341,11 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por }, }, { - Name: "TS_K8S_PROXY_CONFIG", - Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), + Name: "TS_K8S_PROXY_CONFIG", + Value: "kube:" + types.NamespacedName{ + Namespace: namespace, + Name: "$(POD_NAME)-config", + }.String(), }, } @@ -355,20 +358,6 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por return envs }(), - VolumeMounts: func() []corev1.VolumeMount { - var mounts []corev1.VolumeMount - - // TODO(tomhjp): Read config directly from the Secret instead. - for i := range pgReplicas(pg) { - mounts = append(mounts, corev1.VolumeMount{ - Name: fmt.Sprintf("k8s-proxy-config-%d", i), - ReadOnly: true, - MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), - }) - } - - return mounts - }(), Ports: []corev1.ContainerPort{ { Name: "k8s-proxy", @@ -378,21 +367,6 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por }, }, }, - Volumes: func() []corev1.Volume { - var volumes []corev1.Volume - for i := range pgReplicas(pg) { - volumes = append(volumes, corev1.Volume{ - Name: fmt.Sprintf("k8s-proxy-config-%d", i), - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: pgConfigSecretName(pg.Name, i), - }, - }, - }) - } - - return volumes - }(), }, }, }, @@ -426,6 +400,7 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { Resources: []string{"secrets"}, Verbs: []string{ "list", + "watch", // For k8s-proxy. }, }, { @@ -508,7 +483,7 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S ObjectMeta: metav1.ObjectMeta{ Name: pgStateSecretName(pg.Name, i), Namespace: namespace, - Labels: pgSecretLabels(pg.Name, "state"), + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState), OwnerReferences: pgOwnerReference(pg), }, }) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 6f143c056..ef6babc56 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -31,8 +31,11 @@ import ( kube "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/opt" "tailscale.com/types/ptr" ) @@ -1256,6 +1259,163 @@ func TestProxyGroupTypes(t *testing.T) { }) } +func TestKubeAPIServerStatusConditionFlow(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: ptr.To[int32](1), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + }, + }, + } + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, stateSecret). + WithStatusSubresource(pg). + Build() + r := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + expectReconciled(t, r, "", pg.Name) + pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set kube-apiserver valid. + mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) + }) + expectReconciled(t, r, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set available. + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, r, "", pg.Name) + pg.Status.Devices = []tsapi.TailnetDevice{ + { + Hostname: "hostname-nodeid-0", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set kube-apiserver configured. + mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) + }) + expectReconciled(t, r, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) +} + +func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&tsapi.ProxyGroup{}). + Build() + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: ptr.To[int32](1), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), // Avoid needing to pre-create the static ServiceAccount. + }, + }, + } + if err := fc.Create(t.Context(), pg); err != nil { + t.Fatal(err) + } + expectReconciled(t, reconciler, "", pg.Name) + + cfg := conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("secret-authkey"), + State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, 0))), + App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), + LogLevel: ptr.To("debug"), + + Hostname: ptr.To("test-k8s-apiserver-0"), + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + AuthMode: opt.NewBool(false), + IssueCerts: opt.NewBool(true), + }, + }, + } + cfgB, err := json.Marshal(cfg) + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + + cfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pg.Name, 0), + Namespace: tsNamespace, + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig), + OwnerReferences: pgOwnerReference(pg), + }, + Data: map[string][]byte{ + kubetypes.KubeAPIServerConfigFile: cfgB, + }, + } + expectEqual(t, fc, cfgSecret) + + // Now simulate the kube-apiserver services reconciler updating config, + // then check the proxygroup reconciler doesn't overwrite it. + cfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:some-svc-name")) + cfg.AdvertiseServices = []string{"svc:should-not-be-overwritten"} + cfgB, err = json.Marshal(cfg) + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + mustUpdate(t, fc, tsNamespace, cfgSecret.Name, func(s *corev1.Secret) { + s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + }) + expectReconciled(t, reconciler, "", pg.Name) + + cfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + expectEqual(t, fc, cfgSecret) +} + func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -1660,7 +1820,7 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG if _, err := createOrUpdate(t.Context(), fc, "tailscale", pod, nil); err != nil { t.Fatalf("failed to create or update Pod %s: %v", pod.Name, err) } - mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { + mustUpdate(t, fc, tsNamespace, pgStateSecretName(pg.Name, i), func(s *corev1.Secret) { s.Data = map[string][]byte{ currentProfileKey: []byte(key), key: bytes, diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 4247eaaa0..62cc36bd4 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -41,7 +41,7 @@ import ( ) const ( - finalizerName = "tailscale.com/service-pg-finalizer" + svcPGFinalizerName = "tailscale.com/service-pg-finalizer" reasonIngressSvcInvalid = "IngressSvcInvalid" reasonIngressSvcValid = "IngressSvcValid" @@ -174,13 +174,13 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } - if !slices.Contains(svc.Finalizers, finalizerName) { + if !slices.Contains(svc.Finalizers, svcPGFinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, // this is a nice place to tell the operator that the high level, // multi-reconcile operation is underway. logger.Infof("exposing Service over tailscale") - svc.Finalizers = append(svc.Finalizers, finalizerName) + svc.Finalizers = append(svc.Finalizers, svcPGFinalizerName) if err := r.Update(ctx, svc); err != nil { return false, fmt.Errorf("failed to add finalizer: %w", err) } @@ -378,7 +378,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // corresponding to this Service. func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Service are cleaned up") - ix := slices.Index(svc.Finalizers, finalizerName) + ix := slices.Index(svc.Finalizers, svcPGFinalizerName) if ix < 0 { logger.Debugf("no finalizer, nothing to do") return false, nil @@ -485,12 +485,12 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG func (r *HAServiceReconciler) deleteFinalizer(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { svc.Finalizers = slices.DeleteFunc(svc.Finalizers, func(f string) bool { - return f == finalizerName + return f == svcPGFinalizerName }) - logger.Debugf("ensure %q finalizer is removed", finalizerName) + logger.Debugf("ensure %q finalizer is removed", svcPGFinalizerName) if err := r.Update(ctx, svc); err != nil { - return fmt.Errorf("failed to remove finalizer %q: %w", finalizerName, err) + return fmt.Errorf("failed to remove finalizer %q: %w", svcPGFinalizerName, err) } r.mu.Lock() defer r.mu.Unlock() @@ -653,7 +653,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con // Get all config Secrets for this ProxyGroup. // Get all Pods secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } @@ -720,7 +720,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 054c3ed49..baaa07727 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -26,6 +26,7 @@ import ( tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/ingressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" "tailscale.com/util/mak" @@ -139,7 +140,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName("test-pg", 0), Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "config"), + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(`{"Version":""}`), @@ -298,12 +299,12 @@ func TestServicePGReconciler_MultiCluster(t *testing.T) { t.Fatalf("getting Tailscale Service: %v", err) } - if len(tsSvcs) != 1 { - t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs)) + if len(tsSvcs.VIPServices) != 1 { + t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs.VIPServices)) } - for name := range tsSvcs { - t.Logf("found Tailscale Service with name %q", name.String()) + for _, svc := range tsSvcs.VIPServices { + t.Logf("found Tailscale Service with name %q", svc.Name) } } } @@ -336,7 +337,7 @@ func TestIgnoreRegularService(t *testing.T) { tsSvcs, err := ft.ListVIPServices(context.Background()) if err == nil { - if len(tsSvcs) > 0 { + if len(tsSvcs.VIPServices) > 0 { t.Fatal("unexpected Tailscale Services found") } } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 56542700d..6ae32d6fb 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -891,13 +891,17 @@ func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceNa return svc, nil } -func (c *fakeTSClient) ListVIPServices(ctx context.Context) (map[tailcfg.ServiceName]*tailscale.VIPService, error) { +func (c *fakeTSClient) ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) { c.Lock() defer c.Unlock() if c.vipServices == nil { return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} } - return c.vipServices, nil + result := &tailscale.VIPServiceList{} + for _, svc := range c.vipServices { + result.VIPServices = append(result.VIPServices, *svc) + } + return result, nil } func (c *fakeTSClient) CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error { diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index a94d55afe..50620c26d 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -56,6 +56,8 @@ type tsClient interface { DeleteDevice(ctx context.Context, nodeStableID string) error // GetVIPService is a method for getting a Tailscale Service. VIPService is the original name for Tailscale Service. GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) + // ListVIPServices is a method for listing all Tailscale Services. VIPService is the original name for Tailscale Service. + ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) // CreateOrUpdateVIPService is a method for creating or updating a Tailscale Service. CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error // DeleteVIPService is a method for deleting a Tailscale Service. diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go new file mode 100644 index 000000000..4013047e7 --- /dev/null +++ b/cmd/k8s-proxy/internal/config/config.go @@ -0,0 +1,264 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package config provides watchers for the various supported ways to load a +// config file for k8s-proxy; currently file or Kubernetes Secret. +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/ptr" + "tailscale.com/util/testenv" +) + +type configLoader struct { + logger *zap.SugaredLogger + client clientcorev1.CoreV1Interface + + cfgChan chan<- *conf.Config + previous []byte + + once sync.Once // For use in tests. To close cfgIgnored. + cfgIgnored chan struct{} // For use in tests. +} + +func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interface, cfgChan chan<- *conf.Config) *configLoader { + return &configLoader{ + logger: logger, + client: client, + cfgChan: cfgChan, + } +} + +func (l *configLoader) WatchConfig(ctx context.Context, path string) error { + secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") + if isKubeSecret { + secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) + if !ok { + return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format /", path) + } + if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) + } + + return nil + } + + if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("error watching config file %q: %w", path, err) + } + + return nil +} + +func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { + if bytes.Equal(raw, l.previous) { + if l.cfgIgnored != nil && testenv.InTest() { + l.once.Do(func() { + close(l.cfgIgnored) + }) + } + return nil + } + + cfg, err := conf.Load(raw) + if err != nil { + return fmt.Errorf("error loading config: %w", err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case l.cfgChan <- &cfg: + } + + l.previous = raw + return nil +} + +func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { + var ( + tickChan <-chan time.Time + eventChan <-chan fsnotify.Event + errChan <-chan error + ) + + if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 + l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + dir := filepath.Dir(path) + file := filepath.Base(path) + l.logger.Infof("Watching directory %q for changes to config file %q", dir, file) + defer w.Close() + if err := w.Add(dir); err != nil { + return fmt.Errorf("failed to add fsnotify watch: %w", err) + } + eventChan = w.Events + errChan = w.Errors + } + + // Read the initial config file, but after the watcher is already set up to + // avoid an unlucky race condition if the config file is edited in between. + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading config file %q: %w", path, err) + } + if err := l.reloadConfig(ctx, b); err != nil { + return fmt.Errorf("error loading initial config file %q: %w", path, err) + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err, ok := <-errChan: + if !ok { + // Watcher was closed. + return nil + } + return fmt.Errorf("watcher error: %w", err) + case <-tickChan: + case ev, ok := <-eventChan: + if !ok { + // Watcher was closed. + return nil + } + if ev.Name != path || ev.Op&fsnotify.Write == 0 { + // Ignore irrelevant events. + continue + } + } + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading config file: %w", err) + } + // Writers such as os.WriteFile may truncate the file before writing + // new contents, so it's possible to read an empty file if we read before + // the write has completed. + if len(b) == 0 { + continue + } + if err := l.reloadConfig(ctx, b); err != nil { + return fmt.Errorf("error reloading config file %q: %v", path, err) + } + } +} + +func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { + secrets := l.client.Secrets(secretNamespace) + w, err := secrets.Watch(ctx, metav1.ListOptions{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + // Re-watch regularly to avoid relying on long-lived connections. + // See https://github.com/kubernetes-client/javascript/issues/596#issuecomment-786419380 + TimeoutSeconds: ptr.To(int64(600)), + FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), + Watch: true, + }) + if err != nil { + return fmt.Errorf("failed to watch config Secret %q: %w", secretName, err) + } + defer func() { + // May not be the original watcher by the time we exit. + if w != nil { + w.Stop() + } + }() + + // Get the initial config Secret now we've got the watcher set up. + secret, err := secrets.Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) + } + + if err := l.configFromSecret(ctx, secret); err != nil { + return fmt.Errorf("error loading initial config: %w", err) + } + + l.logger.Infof("Watching config Secret %q for changes", secretName) + for { + var secret *corev1.Secret + select { + case <-ctx.Done(): + return ctx.Err() + case ev, ok := <-w.ResultChan(): + if !ok { + w.Stop() + w, err = secrets.Watch(ctx, metav1.ListOptions{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + TimeoutSeconds: ptr.To(int64(600)), + FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), + Watch: true, + }) + if err != nil { + return fmt.Errorf("failed to re-watch config Secret %q: %w", secretName, err) + } + continue + } + + switch ev.Type { + case watch.Added, watch.Modified: + // New config available to load. + var ok bool + secret, ok = ev.Object.(*corev1.Secret) + if !ok { + return fmt.Errorf("unexpected object type %T in watch event for config Secret %q", ev.Object, secretName) + } + if secret == nil || secret.Data == nil { + continue + } + if err := l.configFromSecret(ctx, secret); err != nil { + return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) + } + case watch.Error: + return fmt.Errorf("error watching config Secret %q: %v", secretName, ev.Object) + default: + // Ignore, no action required. + continue + } + } + } +} + +func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { + b := s.Data[kubetypes.KubeAPIServerConfigFile] + if len(b) == 0 { + return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) + } + + if err := l.reloadConfig(ctx, b); err != nil { + return err + } + + return nil +} diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go new file mode 100644 index 000000000..1603dbe1f --- /dev/null +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -0,0 +1,245 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package config + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/ptr" +) + +func TestWatchConfig(t *testing.T) { + type phase struct { + config string + cancel bool + expectedConf *conf.ConfigV1Alpha1 + expectedErr string + } + + // Same set of behaviour tests for each config source. + for _, env := range []string{"file", "kube"} { + t.Run(env, func(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + initialConfig string + phases []phase + }{ + { + name: "no_config", + phases: []phase{{ + expectedErr: "error loading initial config", + }}, + }, + { + name: "valid_config", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{{ + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }}, + }, + { + name: "can_cancel", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }, + { + cancel: true, + }, + }, + }, + { + name: "can_reload", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }, + { + config: `{"version": "v1alpha1", "authKey": "def456"}`, + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("def456"), + }, + }, + }, + }, + { + name: "ignores_events_with_no_changes", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }, + { + config: `{"version": "v1alpha1", "authKey": "abc123"}`, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + root := t.TempDir() + cl := fake.NewClientset() + + var cfgPath string + var writeFile func(*testing.T, string) + if env == "file" { + cfgPath = filepath.Join(root, kubetypes.KubeAPIServerConfigFile) + writeFile = func(t *testing.T, content string) { + if err := os.WriteFile(cfgPath, []byte(content), 0o644); err != nil { + t.Fatalf("error writing config file %q: %v", cfgPath, err) + } + } + } else { + cfgPath = "kube:default/config-secret" + writeFile = func(t *testing.T, content string) { + s := secretFrom(content) + mustCreateOrUpdate(t, cl, s) + } + } + configChan := make(chan *conf.Config) + l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + l.cfgIgnored = make(chan struct{}) + errs := make(chan error) + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + writeFile(t, tc.initialConfig) + go func() { + errs <- l.WatchConfig(ctx, cfgPath) + }() + + for i, p := range tc.phases { + if p.config != "" { + writeFile(t, p.config) + } + if p.cancel { + cancel() + } + + select { + case cfg := <-configChan: + if diff := cmp.Diff(*p.expectedConf, cfg.Parsed); diff != "" { + t.Errorf("unexpected config (-want +got):\n%s", diff) + } + case err := <-errs: + if p.cancel { + if err != nil { + t.Fatalf("unexpected error after cancel: %v", err) + } + } else if p.expectedErr == "" { + t.Fatalf("unexpected error: %v", err) + } else if !strings.Contains(err.Error(), p.expectedErr) { + t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) + } + case <-l.cfgIgnored: + if p.expectedConf != nil { + t.Fatalf("expected config to be reloaded, but got ignored signal") + } + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected event in phase: %d", i) + } + } + }) + } + }) + } +} + +func TestWatchConfigSecret_Rewatches(t *testing.T) { + cl := fake.NewClientset() + var watchCount int + var watcher *watch.RaceFreeFakeWatcher + expected := []string{ + `{"version": "v1alpha1", "authKey": "abc123"}`, + `{"version": "v1alpha1", "authKey": "def456"}`, + `{"version": "v1alpha1", "authKey": "ghi789"}`, + } + cl.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + watcher = watch.NewRaceFreeFake() + watcher.Add(secretFrom(expected[watchCount])) + if action.GetVerb() == "watch" && action.GetResource().Resource == "secrets" { + watchCount++ + } + return true, watcher, nil + }) + + configChan := make(chan *conf.Config) + l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + + mustCreateOrUpdate(t, cl, secretFrom(expected[0])) + + errs := make(chan error) + go func() { + errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret") + }() + + for i := range 2 { + select { + case cfg := <-configChan: + if exp := expected[i]; cfg.Parsed.AuthKey == nil || !strings.Contains(exp, *cfg.Parsed.AuthKey) { + t.Fatalf("expected config to have authKey %q, got: %v", exp, cfg.Parsed.AuthKey) + } + if i == 0 { + watcher.Stop() + } + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + case <-l.cfgIgnored: + t.Fatalf("expected config to be reloaded, but got ignored signal") + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected event") + } + } + + if watchCount != 2 { + t.Fatalf("expected 2 watch API calls, got %d", watchCount) + } +} + +func secretFrom(content string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-secret", + }, + Data: map[string][]byte{ + kubetypes.KubeAPIServerConfigFile: []byte(content), + }, + } +} + +func mustCreateOrUpdate(t *testing.T, cl *fake.Clientset, s *corev1.Secret) { + t.Helper() + if _, err := cl.CoreV1().Secrets("default").Create(t.Context(), s, metav1.CreateOptions{}); err != nil { + if _, updateErr := cl.CoreV1().Secrets("default").Update(t.Context(), s, metav1.UpdateOptions{}); updateErr != nil { + t.Fatalf("error writing config Secret %q: %v", s.Name, updateErr) + } + } +} diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index b7f3d9535..eea1f15f7 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -14,6 +14,7 @@ import ( "fmt" "os" "os/signal" + "reflect" "strings" "syscall" "time" @@ -21,20 +22,37 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/strings/slices" + "tailscale.com/client/local" + "tailscale.com/cmd/k8s-proxy/internal/config" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" apiproxy "tailscale.com/k8s-operator/api-proxy" + "tailscale.com/kube/certs" "tailscale.com/kube/k8s-proxy/conf" + klc "tailscale.com/kube/localclient" + "tailscale.com/kube/services" "tailscale.com/kube/state" + "tailscale.com/tailcfg" "tailscale.com/tsnet" ) func main() { - logger := zap.Must(zap.NewProduction()).Sugar() + encoderCfg := zap.NewProductionEncoderConfig() + encoderCfg.EncodeTime = zapcore.RFC3339TimeEncoder + logger := zap.Must(zap.Config{ + Level: zap.NewAtomicLevelAt(zap.DebugLevel), + Encoding: "json", + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: encoderCfg, + }.Build()).Sugar() defer logger.Sync() + if err := run(logger); err != nil { logger.Fatal(err.Error()) } @@ -42,18 +60,58 @@ func main() { func run(logger *zap.SugaredLogger) error { var ( - configFile = os.Getenv("TS_K8S_PROXY_CONFIG") + configPath = os.Getenv("TS_K8S_PROXY_CONFIG") podUID = os.Getenv("POD_UID") ) - if configFile == "" { + if configPath == "" { return errors.New("TS_K8S_PROXY_CONFIG unset") } - // TODO(tomhjp): Support reloading config. - // TODO(tomhjp): Support reading config from a Secret. - cfg, err := conf.Load(configFile) + // serveCtx to live for the lifetime of the process, only gets cancelled + // once the Tailscale Service has been drained + serveCtx, serveCancel := context.WithCancel(context.Background()) + defer serveCancel() + + // ctx to cancel to start the shutdown process. + ctx, cancel := context.WithCancel(serveCtx) + defer cancel() + + sigsChan := make(chan os.Signal, 1) + signal.Notify(sigsChan, syscall.SIGINT, syscall.SIGTERM) + go func() { + select { + case <-ctx.Done(): + case s := <-sigsChan: + logger.Infof("Received shutdown signal %s, exiting", s) + cancel() + } + }() + + var group *errgroup.Group + group, ctx = errgroup.WithContext(ctx) + + restConfig, err := getRestConfig(logger) + if err != nil { + return fmt.Errorf("error getting rest config: %w", err) + } + clientset, err := kubernetes.NewForConfig(restConfig) if err != nil { - return fmt.Errorf("error loading config file %q: %w", configFile, err) + return fmt.Errorf("error creating Kubernetes clientset: %w", err) + } + + // Load and watch config. + cfgChan := make(chan *conf.Config) + cfgLoader := config.NewConfigLoader(logger, clientset.CoreV1(), cfgChan) + group.Go(func() error { + return cfgLoader.WatchConfig(ctx, configPath) + }) + + // Get initial config. + var cfg *conf.Config + select { + case <-ctx.Done(): + return group.Wait() + case cfg = <-cfgChan: } if cfg.Parsed.LogLevel != nil { @@ -82,6 +140,14 @@ func run(logger *zap.SugaredLogger) error { hostinfo.SetApp(*cfg.Parsed.App) } + // TODO(tomhjp): Pass this setting directly into the store instead of using + // environment variables. + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) { + os.Setenv("TS_CERT_SHARE_MODE", "rw") + } else { + os.Setenv("TS_CERT_SHARE_MODE", "ro") + } + st, err := getStateStore(cfg.Parsed.State, logger) if err != nil { return err @@ -115,10 +181,6 @@ func run(logger *zap.SugaredLogger) error { ts.Hostname = *cfg.Parsed.Hostname } - // ctx to live for the lifetime of the process. - ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancel() - // Make sure we crash loop if Up doesn't complete in reasonable time. upCtx, upCancel := context.WithTimeout(ctx, time.Minute) defer upCancel() @@ -126,9 +188,6 @@ func run(logger *zap.SugaredLogger) error { return fmt.Errorf("error starting tailscale server: %w", err) } defer ts.Close() - - group, groupCtx := errgroup.WithContext(ctx) - lc, err := ts.LocalClient() if err != nil { return fmt.Errorf("error getting local client: %w", err) @@ -136,23 +195,13 @@ func run(logger *zap.SugaredLogger) error { // Setup for updating state keys. if podUID != "" { - w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap) - if err != nil { - return fmt.Errorf("error watching IPN bus: %w", err) - } - defer w.Close() - group.Go(func() error { - if err := state.KeepKeysUpdated(st, w.Next); err != nil && err != groupCtx.Err() { - return fmt.Errorf("error keeping state keys updated: %w", err) - } - - return nil + return state.KeepKeysUpdated(ctx, st, klc.New(lc)) }) } if cfg.Parsed.AcceptRoutes != nil { - _, err = lc.EditPrefs(groupCtx, &ipn.MaskedPrefs{ + _, err = lc.EditPrefs(ctx, &ipn.MaskedPrefs{ RouteAllSet: true, Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes}, }) @@ -161,34 +210,97 @@ func run(logger *zap.SugaredLogger) error { } } - // Setup for the API server proxy. - restConfig, err := getRestConfig(logger) - if err != nil { - return fmt.Errorf("error getting rest config: %w", err) + // TODO(tomhjp): There seems to be a bug that on restart the device does + // not get reassigned it's already working Service IPs unless we clear and + // reset the serve config. + if err := lc.SetServeConfig(ctx, &ipn.ServeConfig{}); err != nil { + return fmt.Errorf("error clearing existing ServeConfig: %w", err) } - authMode := true - if cfg.Parsed.KubeAPIServer != nil { - v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get() - if ok { - authMode = v + + var cm *certs.CertManager + if shouldIssueCerts(cfg) { + logger.Infof("Will issue TLS certs for Tailscale Service") + cm = certs.NewCertManager(klc.New(lc), logger.Infof) + } + if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil { + return err + } + + if cfg.Parsed.AdvertiseServices != nil { + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: cfg.Parsed.AdvertiseServices, + }, + }); err != nil { + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) } } - ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode) + + // Setup for the API server proxy. + authMode := true + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.AuthMode.EqualBool(false) { + authMode = false + } + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode, false) if err != nil { return fmt.Errorf("error creating api server proxy: %w", err) } - // TODO(tomhjp): Work out whether we should use TS_CERT_SHARE_MODE or not, - // and possibly issue certs upfront here before serving. group.Go(func() error { - if err := ap.Run(groupCtx); err != nil { + if err := ap.Run(serveCtx); err != nil { return fmt.Errorf("error running API server proxy: %w", err) } return nil }) - return group.Wait() + for { + select { + case <-ctx.Done(): + // Context cancelled, exit. + logger.Info("Context cancelled, exiting") + shutdownCtx, shutdownCancel := context.WithTimeout(serveCtx, 20*time.Second) + unadvertiseErr := services.EnsureServicesNotAdvertised(shutdownCtx, lc, logger.Infof) + shutdownCancel() + serveCancel() + return errors.Join(unadvertiseErr, group.Wait()) + case cfg = <-cfgChan: + // Handle config reload. + // TODO(tomhjp): Make auth mode reloadable. + var prefs ipn.MaskedPrefs + cfgLogger := logger + currentPrefs, err := lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting current prefs: %w", err) + } + if !slices.Equal(currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices) { + cfgLogger = cfgLogger.With("AdvertiseServices", fmt.Sprintf("%v -> %v", currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices)) + prefs.AdvertiseServicesSet = true + prefs.Prefs.AdvertiseServices = cfg.Parsed.AdvertiseServices + } + if cfg.Parsed.Hostname != nil && *cfg.Parsed.Hostname != currentPrefs.Hostname { + cfgLogger = cfgLogger.With("Hostname", fmt.Sprintf("%s -> %s", currentPrefs.Hostname, *cfg.Parsed.Hostname)) + prefs.HostnameSet = true + prefs.Hostname = *cfg.Parsed.Hostname + } + if cfg.Parsed.AcceptRoutes != nil && *cfg.Parsed.AcceptRoutes != currentPrefs.RouteAll { + cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, *cfg.Parsed.AcceptRoutes)) + prefs.RouteAllSet = true + prefs.Prefs.RouteAll = *cfg.Parsed.AcceptRoutes + } + if !prefs.IsEmpty() { + if _, err := lc.EditPrefs(ctx, &prefs); err != nil { + return fmt.Errorf("error editing prefs: %w", err) + } + } + if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil { + return fmt.Errorf("error setting serve config: %w", err) + } + + cfgLogger.Infof("Config reloaded") + } + } } func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { @@ -226,3 +338,79 @@ func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) { return restConfig, nil } + +func apiServerProxyService(cfg *conf.Config) tailcfg.ServiceName { + if cfg.Parsed.APIServerProxy != nil && + cfg.Parsed.APIServerProxy.Enabled.EqualBool(true) && + cfg.Parsed.APIServerProxy.ServiceName != nil && + *cfg.Parsed.APIServerProxy.ServiceName != "" { + return tailcfg.ServiceName(*cfg.Parsed.APIServerProxy.ServiceName) + } + + return "" +} + +func shouldIssueCerts(cfg *conf.Config) bool { + return cfg.Parsed.APIServerProxy != nil && + cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) +} + +// setServeConfig sets up serve config such that it's serving for the passed in +// Tailscale Service, and does nothing if it's already up to date. +func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager, name tailcfg.ServiceName) error { + existingServeConfig, err := lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("error getting existing serve config: %w", err) + } + + // Ensure serve config is cleared if no Tailscale Service. + if name == "" { + if reflect.DeepEqual(*existingServeConfig, ipn.ServeConfig{}) { + // Already up to date. + return nil + } + + if cm != nil { + cm.EnsureCertLoops(ctx, &ipn.ServeConfig{}) + } + return lc.SetServeConfig(ctx, &ipn.ServeConfig{}) + } + + status, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("error getting local client status: %w", err) + } + serviceHostPort := ipn.HostPort(fmt.Sprintf("%s.%s:443", name.WithoutPrefix(), status.CurrentTailnet.MagicDNSSuffix)) + + serveConfig := ipn.ServeConfig{ + // Configure for the Service hostname. + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + name: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + serviceHostPort: { + Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: fmt.Sprintf("http://%s:80", strings.TrimSuffix(status.Self.DNSName, ".")), + }, + }, + }, + }, + }, + }, + } + + if reflect.DeepEqual(*existingServeConfig, serveConfig) { + // Already up to date. + return nil + } + + if cm != nil { + cm.EnsureCertLoops(ctx, &serveConfig) + } + return lc.SetServeConfig(ctx, &serveConfig) +} diff --git a/internal/client/tailscale/vip_service.go b/internal/client/tailscale/vip_service.go index 64fcfdf5e..48c59ce45 100644 --- a/internal/client/tailscale/vip_service.go +++ b/internal/client/tailscale/vip_service.go @@ -36,6 +36,11 @@ type VIPService struct { Tags []string `json:"tags,omitempty"` } +// VIPServiceList represents the JSON response to the list VIP Services API. +type VIPServiceList struct { + VIPServices []VIPService `json:"vipServices"` +} + // GetVIPService retrieves a VIPService by its name. It returns 404 if the VIPService is not found. func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { path := client.BuildTailnetURL("vip-services", name.String()) @@ -59,6 +64,29 @@ func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceNam return svc, nil } +// ListVIPServices retrieves all existing Services and returns them as a list. +func (client *Client) ListVIPServices(ctx context.Context) (*VIPServiceList, error) { + path := client.BuildTailnetURL("vip-services") + req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) + if err != nil { + return nil, fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := SendRequest(client, req) + if err != nil { + return nil, fmt.Errorf("error making Tailsale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return nil, HandleErrorResponse(b, resp) + } + result := &VIPServiceList{} + if err := json.Unmarshal(b, result); err != nil { + return nil, err + } + return result, nil +} + // CreateOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the // VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not // lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 14025bbb4..a9ad514e7 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -394,8 +394,8 @@ func (s *Store) canPatchSecret(secret string) bool { // certSecretSelector returns a label selector that can be used to list all // Secrets that aren't Tailscale state Secrets and contain TLS certificates for // HTTPS endpoints that this node serves. -// Currently (3/2025) this only applies to the Kubernetes Operator's ingress -// ProxyGroup. +// Currently (7/2025) this only applies to the Kubernetes Operator's ProxyGroup +// when spec.Type is "ingress" or "kube-apiserver". func (s *Store) certSecretSelector() map[string]string { if s.podName == "" { return map[string]string{} @@ -406,7 +406,7 @@ func (s *Store) certSecretSelector() map[string]string { } pgName := s.podName[:p] return map[string]string{ - kubetypes.LabelSecretType: "certs", + kubetypes.LabelSecretType: kubetypes.LabelSecretTypeCerts, kubetypes.LabelManaged: "true", "tailscale.com/proxy-group": pgName, } diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 0d709264e..9a49f3028 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -17,6 +17,7 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" ) func TestWriteState(t *testing.T) { @@ -516,7 +517,7 @@ func TestNewWithClient(t *testing.T) { ) certSecretsLabels := map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "ingress-proxies", } @@ -582,7 +583,7 @@ func TestNewWithClient(t *testing.T) { makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), makeSecret("some-other-secret", nil, "3"), makeSecret("app3.other-proxies.ts.net", map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "some-other-proxygroup", }, "4"), @@ -606,7 +607,7 @@ func TestNewWithClient(t *testing.T) { makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), makeSecret("some-other-secret", nil, "3"), makeSecret("app3.other-proxies.ts.net", map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "some-other-proxygroup", }, "4"), diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index d33c088de..e079e984f 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "errors" "fmt" + "net" "net/http" "net/http/httputil" "net/netip" @@ -46,7 +47,7 @@ var ( // caller's Tailscale identity and the rules defined in the tailnet ACLs. // - false: the proxy is started and requests are passed through to the // Kubernetes API without any auth modifications. -func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool) (*APIServerProxy, error) { +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool, https bool) (*APIServerProxy, error) { if !authMode { restConfig = rest.AnonymousClientConfig(restConfig) } @@ -85,6 +86,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn log: zlog, lc: lc, authMode: authMode, + https: https, upstreamURL: u, ts: ts, } @@ -104,11 +106,6 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn // // It return when ctx is cancelled or ServeTLS fails. func (ap *APIServerProxy) Run(ctx context.Context) error { - ln, err := ap.ts.Listen("tcp", ":443") - if err != nil { - return fmt.Errorf("could not listen on :443: %v", err) - } - mux := http.NewServeMux() mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) @@ -117,32 +114,61 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) ap.hs = &http.Server{ + Handler: mux, + ErrorLog: zap.NewStdLog(ap.log.Desugar()), + } + + mode := "noauth" + if ap.authMode { + mode = "auth" + } + var tsLn net.Listener + var serve func(ln net.Listener) error + if ap.https { + var err error + tsLn, err = ap.ts.Listen("tcp", ":443") + if err != nil { + return fmt.Errorf("could not listen on :443: %w", err) + } + serve = func(ln net.Listener) error { + return ap.hs.ServeTLS(ln, "", "") + } + // Kubernetes uses SPDY for exec and port-forward, however SPDY is // incompatible with HTTP/2; so disable HTTP/2 in the proxy. - TLSConfig: &tls.Config{ + ap.hs.TLSConfig = &tls.Config{ GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, - }, - TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: mux, + } + ap.hs.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) + } else { + var err error + tsLn, err = ap.ts.Listen("tcp", ":80") + if err != nil { + return fmt.Errorf("could not listen on :80: %w", err) + } + serve = ap.hs.Serve } errs := make(chan error) go func() { - ap.log.Infof("API server proxy is listening on %s with auth mode: %v", ln.Addr(), ap.authMode) - if err := ap.hs.ServeTLS(ln, "", ""); err != nil && err != http.ErrServerClosed { - errs <- fmt.Errorf("failed to serve: %w", err) + ap.log.Infof("API server proxy in %s mode is listening on tailnet addresses %s", mode, tsLn.Addr()) + if err := serve(tsLn); err != nil && err != http.ErrServerClosed { + errs <- fmt.Errorf("error serving: %w", err) } }() select { case <-ctx.Done(): - shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - return ap.hs.Shutdown(shutdownCtx) case err := <-errs: + ap.hs.Close() return err } + + // Graceful shutdown with a timeout of 10s. + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return ap.hs.Shutdown(shutdownCtx) } // APIServerProxy is an [net/http.Handler] that authenticates requests using the Tailscale @@ -152,7 +178,8 @@ type APIServerProxy struct { lc *local.Client rp *httputil.ReverseProxy - authMode bool + authMode bool // Whether to run with impersonation using caller's tailnet identity. + https bool // Whether to serve on https for the device hostname; true for k8s-operator, false for k8s-proxy. ts *tsnet.Server hs *http.Server upstreamURL *url.URL @@ -181,13 +208,13 @@ func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.WSProtocol) } -// serveExecSPDY serves '/attach' requests for sessions streamed over SPDY, +// serveAttachSPDY serves '/attach' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveAttachSPDY(w http.ResponseWriter, r *http.Request) { ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.SPDYProtocol) } -// serveExecWS serves '/attach' requests for sessions streamed over WebSocket, +// serveAttachWS serves '/attach' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) { ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index c09152da6..cd36798d6 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -342,6 +342,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `mode` _[APIServerProxyMode](#apiserverproxymode)_ | Mode to run the API server proxy in. Supported modes are auth and noauth.
                In auth mode, requests from the tailnet proxied over to the Kubernetes
                API server are additionally impersonated using the sender's tailnet identity.
                If not specified, defaults to auth mode. | | Enum: [auth noauth]
                Type: string
                | +| `hostname` _string_ | Hostname is the hostname with which to expose the Kubernetes API server
                proxies. Must be a valid DNS label no longer than 63 characters. If not
                specified, the name of the ProxyGroup is used as the hostname. Must be
                unique across the whole tailnet. | | Pattern: `^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$`
                Type: string
                | #### LabelValue @@ -610,15 +611,22 @@ _Appears in:_ ProxyGroup defines a set of Tailscale devices that will act as proxies. -Currently only egress ProxyGroups are supported. +Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver +proxies. In addition to running a highly available set of proxies, ingress +and egress ProxyGroups also allow for serving many annotated Services from a +single set of proxies to minimise resource consumption. -Use the tailscale.com/proxy-group annotation on a Service to specify that -the egress proxy should be implemented by a ProxyGroup instead of a single -dedicated proxy. In addition to running a highly available set of proxies, -ProxyGroup also allows for serving many annotated Services from a single -set of proxies to minimise resource consumption. +For ingress and egress, use the tailscale.com/proxy-group annotation on a +Service to specify that the proxy should be implemented by a ProxyGroup +instead of a single dedicated proxy. -More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +More info: +* https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +* https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress + +For kube-apiserver, the ProxyGroup is a standalone resource. Use the +spec.kubeAPIServer field to configure options specific to the kube-apiserver +ProxyGroup type. @@ -690,8 +698,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
                resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`.
                `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled
                and ready. `ProxyGroupAvailable` indicates that at least one proxy is
                ready to serve traffic. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
                resources. Known condition types include `ProxyGroupReady` and
                `ProxyGroupAvailable`.
                * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and
                all expected conditions are true.
                * `ProxyGroupAvailable` indicates that at least one proxy is ready to
                serve traffic.
                For ProxyGroups of type kube-apiserver, there are two additional conditions:
                * `KubeAPIServerProxyConfigured` indicates that at least one API server
                proxy is configured and ready to serve traffic.
                * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is
                valid. | | | | `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the ProxyGroup StatefulSet. | | | +| `url` _string_ | URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if
                any. Only applies to ProxyGroups of type kube-apiserver. | | | #### ProxyGroupType diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 88fd07346..ce6a1411b 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -226,4 +226,7 @@ const ( IngressSvcValid ConditionType = `TailscaleIngressSvcValid` IngressSvcConfigured ConditionType = `TailscaleIngressSvcConfigured` + + KubeAPIServerProxyValid ConditionType = `KubeAPIServerProxyValid` // The kubeAPIServer config for the ProxyGroup is valid. + KubeAPIServerProxyConfigured ConditionType = `KubeAPIServerProxyConfigured` // At least one of the ProxyGroup's Pods is advertising the kube-apiserver proxy's hostname. ) diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index ad5b11361..28fd9e009 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -13,19 +13,27 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,shortName=pg // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." +// +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.url`,description="URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver." // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=`.spec.type`,description="ProxyGroup type." // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ProxyGroup defines a set of Tailscale devices that will act as proxies. -// Currently only egress ProxyGroups are supported. +// Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver +// proxies. In addition to running a highly available set of proxies, ingress +// and egress ProxyGroups also allow for serving many annotated Services from a +// single set of proxies to minimise resource consumption. // -// Use the tailscale.com/proxy-group annotation on a Service to specify that -// the egress proxy should be implemented by a ProxyGroup instead of a single -// dedicated proxy. In addition to running a highly available set of proxies, -// ProxyGroup also allows for serving many annotated Services from a single -// set of proxies to minimise resource consumption. +// For ingress and egress, use the tailscale.com/proxy-group annotation on a +// Service to specify that the proxy should be implemented by a ProxyGroup +// instead of a single dedicated proxy. // -// More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +// More info: +// * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +// * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress +// +// For kube-apiserver, the ProxyGroup is a standalone resource. Use the +// spec.kubeAPIServer field to configure options specific to the kube-apiserver +// ProxyGroup type. type ProxyGroup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -93,10 +101,20 @@ type ProxyGroupSpec struct { type ProxyGroupStatus struct { // List of status conditions to indicate the status of the ProxyGroup - // resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. - // `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled - // and ready. `ProxyGroupAvailable` indicates that at least one proxy is - // ready to serve traffic. + // resources. Known condition types include `ProxyGroupReady` and + // `ProxyGroupAvailable`. + // + // * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + // all expected conditions are true. + // * `ProxyGroupAvailable` indicates that at least one proxy is ready to + // serve traffic. + // + // For ProxyGroups of type kube-apiserver, there are two additional conditions: + // + // * `KubeAPIServerProxyConfigured` indicates that at least one API server + // proxy is configured and ready to serve traffic. + // * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + // valid. // // +listType=map // +listMapKey=type @@ -108,6 +126,11 @@ type ProxyGroupStatus struct { // +listMapKey=hostname // +optional Devices []TailnetDevice `json:"devices,omitempty"` + + // URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + // any. Only applies to ProxyGroups of type kube-apiserver. + // +optional + URL string `json:"url,omitempty"` } type TailnetDevice struct { @@ -157,4 +180,13 @@ type KubeAPIServerConfig struct { // If not specified, defaults to auth mode. // +optional Mode *APIServerProxyMode `json:"mode,omitempty"` + + // Hostname is the hostname with which to expose the Kubernetes API server + // proxies. Must be a valid DNS label no longer than 63 characters. If not + // specified, the name of the ProxyGroup is used as the hostname. Must be + // unique across the whole tailnet. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern=`^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$` + // +optional + Hostname string `json:"hostname,omitempty"` } diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index f6858c005..ae465a728 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -146,6 +146,16 @@ func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { return cond != nil && cond.Status == metav1.ConditionTrue } +func KubeAPIServerProxyValid(pg *tsapi.ProxyGroup) (valid bool, set bool) { + cond := proxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation, cond != nil +} + +func KubeAPIServerProxyConfigured(pg *tsapi.ProxyGroup) bool { + cond := proxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation +} + func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) *metav1.Condition { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { return cond.Type == string(condType) diff --git a/cmd/containerboot/certs.go b/kube/certs/certs.go similarity index 60% rename from cmd/containerboot/certs.go rename to kube/certs/certs.go index 504ef7988..8e2e5fb43 100644 --- a/cmd/containerboot/certs.go +++ b/kube/certs/certs.go @@ -1,29 +1,32 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +// Package certs implements logic to help multiple Kubernetes replicas share TLS +// certs for a common Tailscale Service. +package certs import ( "context" "fmt" - "log" "net" + "slices" "sync" "time" "tailscale.com/ipn" + "tailscale.com/kube/localclient" + "tailscale.com/types/logger" "tailscale.com/util/goroutines" "tailscale.com/util/mak" ) -// certManager is responsible for issuing certificates for known domains and for +// CertManager is responsible for issuing certificates for known domains and for // maintaining a loop that re-attempts issuance daily. // Currently cert manager logic is only run on ingress ProxyGroup replicas that are responsible for managing certs for // HA Ingress HTTPS endpoints ('write' replicas). -type certManager struct { - lc localClient +type CertManager struct { + lc localclient.LocalClient + logf logger.Logf tracker goroutines.Tracker // tracks running goroutines mu sync.Mutex // guards the following // certLoops contains a map of DNS names, for which we currently need to @@ -32,11 +35,18 @@ type certManager struct { certLoops map[string]context.CancelFunc } -// ensureCertLoops ensures that, for all currently managed Service HTTPS +func NewCertManager(lc localclient.LocalClient, logf logger.Logf) *CertManager { + return &CertManager{ + lc: lc, + logf: logf, + } +} + +// EnsureCertLoops ensures that, for all currently managed Service HTTPS // endpoints, there is a cert loop responsible for issuing and ensuring the // renewal of the TLS certs. // ServeConfig must not be nil. -func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { +func (cm *CertManager) EnsureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { if sc == nil { return fmt.Errorf("[unexpected] ensureCertLoops called with nil ServeConfig") } @@ -87,12 +97,18 @@ func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) // renewed at that point. Renewal here is needed to prevent the shared certs from expiry in edge cases where the 'write' // replica does not get any HTTPS requests. // https://letsencrypt.org/docs/integration-guide/#retrying-failures -func (cm *certManager) runCertLoop(ctx context.Context, domain string) { +func (cm *CertManager) runCertLoop(ctx context.Context, domain string) { const ( normalInterval = 24 * time.Hour // regular renewal check initialRetry = 1 * time.Minute // initial backoff after a failure maxRetryInterval = 24 * time.Hour // max backoff period ) + + if err := cm.waitForCertDomain(ctx, domain); err != nil { + // Best-effort, log and continue with the issuing loop. + cm.logf("error waiting for cert domain %s: %v", domain, err) + } + timer := time.NewTimer(0) // fire off timer immediately defer timer.Stop() retryCount := 0 @@ -101,38 +117,31 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { case <-ctx.Done(): return case <-timer.C: - // We call the certificate endpoint, but don't do anything - // with the returned certs here. - // The call to the certificate endpoint will ensure that - // certs are issued/renewed as needed and stored in the - // relevant state store. For example, for HA Ingress - // 'write' replica, the cert and key will be stored in a - // Kubernetes Secret named after the domain for which we - // are issuing. - // Note that renewals triggered by the call to the - // certificates endpoint here and by renewal check - // triggered during a call to node's HTTPS endpoint - // share the same state/renewal lock mechanism, so we - // should not run into redundant issuances during - // concurrent renewal checks. - // TODO(irbekrm): maybe it is worth adding a new - // issuance endpoint that explicitly only triggers - // issuance and stores certs in the relevant store, but - // does not return certs to the caller? + // We call the certificate endpoint, but don't do anything with the + // returned certs here. The call to the certificate endpoint will + // ensure that certs are issued/renewed as needed and stored in the + // relevant state store. For example, for HA Ingress 'write' replica, + // the cert and key will be stored in a Kubernetes Secret named after + // the domain for which we are issuing. + // + // Note that renewals triggered by the call to the certificates + // endpoint here and by renewal check triggered during a call to + // node's HTTPS endpoint share the same state/renewal lock mechanism, + // so we should not run into redundant issuances during concurrent + // renewal checks. - // An issuance holds a shared lock, so we need to avoid - // a situation where other services cannot issue certs - // because a single one is holding the lock. + // An issuance holds a shared lock, so we need to avoid a situation + // where other services cannot issue certs because a single one is + // holding the lock. ctxT, cancel := context.WithTimeout(ctx, time.Second*300) - defer cancel() _, _, err := cm.lc.CertPair(ctxT, domain) + cancel() if err != nil { - log.Printf("error refreshing certificate for %s: %v", domain, err) + cm.logf("error refreshing certificate for %s: %v", domain, err) } var nextInterval time.Duration - // TODO(irbekrm): distinguish between LE rate limit - // errors and other error types like transient network - // errors. + // TODO(irbekrm): distinguish between LE rate limit errors and other + // error types like transient network errors. if err == nil { retryCount = 0 nextInterval = normalInterval @@ -147,10 +156,34 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { backoff = maxRetryInterval } nextInterval = backoff - log.Printf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", + cm.logf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", domain, retryCount, err, nextInterval) } timer.Reset(nextInterval) } } } + +// waitForCertDomain ensures the requested domain is in the list of allowed +// domains before issuing the cert for the first time. +func (cm *CertManager) waitForCertDomain(ctx context.Context, domain string) error { + w, err := cm.lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + for { + n, err := w.Next() + if err != nil { + return err + } + if n.NetMap == nil { + continue + } + + if slices.Contains(n.NetMap.DNS.CertDomains, domain) { + return nil + } + } +} diff --git a/cmd/containerboot/certs_test.go b/kube/certs/certs_test.go similarity index 89% rename from cmd/containerboot/certs_test.go rename to kube/certs/certs_test.go index 577311ea3..8434f21ae 100644 --- a/cmd/containerboot/certs_test.go +++ b/kube/certs/certs_test.go @@ -1,17 +1,18 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +package certs import ( "context" + "log" "testing" "time" "tailscale.com/ipn" + "tailscale.com/kube/localclient" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" ) // TestEnsureCertLoops tests that the certManager correctly starts and stops @@ -161,8 +162,28 @@ func TestEnsureCertLoops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cm := &certManager{ - lc: &fakeLocalClient{}, + notifyChan := make(chan ipn.Notify) + go func() { + for { + notifyChan <- ipn.Notify{ + NetMap: &netmap.NetworkMap{ + DNS: tailcfg.DNSConfig{ + CertDomains: []string{ + "my-app.tailnetxyz.ts.net", + "my-other-app.tailnetxyz.ts.net", + }, + }, + }, + } + } + }() + cm := &CertManager{ + lc: &localclient.FakeLocalClient{ + FakeIPNBusWatcher: localclient.FakeIPNBusWatcher{ + NotifyChan: notifyChan, + }, + }, + logf: log.Printf, certLoops: make(map[string]context.CancelFunc), } @@ -179,7 +200,7 @@ func TestEnsureCertLoops(t *testing.T) { } })() - err := cm.ensureCertLoops(ctx, tt.initialConfig) + err := cm.EnsureCertLoops(ctx, tt.initialConfig) if (err != nil) != tt.wantErr { t.Fatalf("ensureCertLoops() error = %v", err) } @@ -189,7 +210,7 @@ func TestEnsureCertLoops(t *testing.T) { } if tt.updatedConfig != nil { - if err := cm.ensureCertLoops(ctx, tt.updatedConfig); err != nil { + if err := cm.EnsureCertLoops(ctx, tt.updatedConfig); err != nil { t.Fatalf("ensureCertLoops() error on update = %v", err) } diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 8882360c5..a32e0c03e 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -9,11 +9,12 @@ package conf import ( "encoding/json" + "errors" "fmt" "net/netip" - "os" "github.com/tailscale/hujson" + "tailscale.com/tailcfg" "tailscale.com/types/opt" ) @@ -21,12 +22,11 @@ const v1Alpha1 = "v1alpha1" // Config describes a config file. type Config struct { - Path string // disk path of HuJSON - Raw []byte // raw bytes from disk, in HuJSON form + Raw []byte // raw bytes, in HuJSON form Std []byte // standardized JSON form Version string // "v1alpha1" - // Parsed is the parsed config, converted from its on-disk version to the + // Parsed is the parsed config, converted from its raw bytes version to the // latest known format. Parsed ConfigV1Alpha1 } @@ -48,47 +48,49 @@ type VersionedConfig struct { } type ConfigV1Alpha1 struct { - AuthKey *string `json:",omitempty"` // Tailscale auth key to use. - Hostname *string `json:",omitempty"` // Tailscale device hostname. - State *string `json:",omitempty"` // Path to the Tailscale state. - LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". - App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer - KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. - ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. - AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. // StaticEndpoints are additional, user-defined endpoints that this node // should advertise amongst its wireguard endpoints. StaticEndpoints []netip.AddrPort `json:",omitempty"` + + // TODO(tomhjp): The remaining fields should all be reloadable during + // runtime, but currently missing most of the APIServerProxy fields. + Hostname *string `json:",omitempty"` // Tailscale device hostname. + AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AdvertiseServices []string `json:",omitempty"` // Tailscale Services to advertise. + APIServerProxy *APIServerProxyConfig `json:",omitempty"` // Config specific to the API Server proxy. } -type KubeAPIServer struct { - AuthMode opt.Bool `json:",omitempty"` +type APIServerProxyConfig struct { + Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. + AuthMode opt.Bool `json:",omitempty"` // Run in auth or noauth mode. + ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. + IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. } // Load reads and parses the config file at the provided path on disk. -func Load(path string) (c Config, err error) { - c.Path = path - - c.Raw, err = os.ReadFile(path) - if err != nil { - return c, fmt.Errorf("error reading config file %q: %w", path, err) - } +func Load(raw []byte) (c Config, err error) { + c.Raw = raw c.Std, err = hujson.Standardize(c.Raw) if err != nil { - return c, fmt.Errorf("error parsing config file %q HuJSON/JSON: %w", path, err) + return c, fmt.Errorf("error parsing config as HuJSON/JSON: %w", err) } var ver VersionedConfig if err := json.Unmarshal(c.Std, &ver); err != nil { - return c, fmt.Errorf("error parsing config file %q: %w", path, err) + return c, fmt.Errorf("error parsing config: %w", err) } rootV1Alpha1 := (ver.Version == v1Alpha1) backCompatV1Alpha1 := (ver.V1Alpha1 != nil) switch { case ver.Version == "": - return c, fmt.Errorf("error parsing config file %q: no \"version\" field provided", path) + return c, errors.New("error parsing config: no \"version\" field provided") case rootV1Alpha1 && backCompatV1Alpha1: // Exactly one of these should be set. - return c, fmt.Errorf("error parsing config file %q: both root and v1alpha1 config provided", path) + return c, errors.New("error parsing config: both root and v1alpha1 config provided") case rootV1Alpha1 != backCompatV1Alpha1: c.Version = v1Alpha1 switch { @@ -100,7 +102,7 @@ func Load(path string) (c Config, err error) { c.Parsed = ConfigV1Alpha1{} } default: - return c, fmt.Errorf("error parsing config file %q: unsupported \"version\" value %q; want \"%s\"", path, ver.Version, v1Alpha1) + return c, fmt.Errorf("error parsing config: unsupported \"version\" value %q; want \"%s\"", ver.Version, v1Alpha1) } return c, nil diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go index a47391dc9..3082be1ba 100644 --- a/kube/k8s-proxy/conf/conf_test.go +++ b/kube/k8s-proxy/conf/conf_test.go @@ -6,8 +6,6 @@ package conf import ( - "os" - "path/filepath" "strings" "testing" @@ -57,12 +55,7 @@ func TestVersionedConfig(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "config.json") - if err := os.WriteFile(path, []byte(tc.inputConfig), 0644); err != nil { - t.Fatalf("failed to write config file: %v", err) - } - cfg, err := Load(path) + cfg, err := Load([]byte(tc.inputConfig)) switch { case tc.expectedError == "" && err != nil: t.Fatalf("unexpected error: %v", err) diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 20b005014..5e7d4cd1f 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -54,4 +54,10 @@ const ( LabelManaged = "tailscale.com/managed" LabelSecretType = "tailscale.com/secret-type" // "config", "state" "certs" + + LabelSecretTypeConfig = "config" + LabelSecretTypeState = "state" + LabelSecretTypeCerts = "certs" + + KubeAPIServerConfigFile = "config.hujson" ) diff --git a/kube/localclient/fake-client.go b/kube/localclient/fake-client.go new file mode 100644 index 000000000..7f0a08316 --- /dev/null +++ b/kube/localclient/fake-client.go @@ -0,0 +1,35 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package localclient + +import ( + "context" + "fmt" + + "tailscale.com/ipn" +) + +type FakeLocalClient struct { + FakeIPNBusWatcher +} + +func (f *FakeLocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return &f.FakeIPNBusWatcher, nil +} + +func (f *FakeLocalClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return nil, nil, fmt.Errorf("CertPair not implemented") +} + +type FakeIPNBusWatcher struct { + NotifyChan chan ipn.Notify +} + +func (f *FakeIPNBusWatcher) Close() error { + return nil +} + +func (f *FakeIPNBusWatcher) Next() (ipn.Notify, error) { + return <-f.NotifyChan, nil +} diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go new file mode 100644 index 000000000..5d541e365 --- /dev/null +++ b/kube/localclient/local-client.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package localclient provides an interface for all the local.Client methods +// kube needs to use, so that we can easily mock it in tests. +package localclient + +import ( + "context" + "io" + + "tailscale.com/client/local" + "tailscale.com/ipn" +) + +// LocalClient is roughly a subset of the local.Client struct's methods, used +// for easier testing. +type LocalClient interface { + WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) + CertIssuer +} + +// IPNBusWatcher is local.IPNBusWatcher's methods restated in an interface to +// allow for easier mocking in tests. +type IPNBusWatcher interface { + io.Closer + Next() (ipn.Notify, error) +} + +type CertIssuer interface { + CertPair(context.Context, string) ([]byte, []byte, error) +} + +// New returns a LocalClient that wraps the provided local.Client. +func New(lc *local.Client) LocalClient { + return &localClient{lc: lc} +} + +type localClient struct { + lc *local.Client +} + +func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return l.lc.WatchIPNBus(ctx, mask) +} + +func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return l.lc.CertPair(ctx, domain) +} diff --git a/cmd/containerboot/services.go b/kube/services/services.go similarity index 74% rename from cmd/containerboot/services.go rename to kube/services/services.go index 6079128c0..a9e50975c 100644 --- a/cmd/containerboot/services.go +++ b/kube/services/services.go @@ -1,25 +1,25 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +// Package services manages graceful shutdown of Tailscale Services advertised +// by Kubernetes clients. +package services import ( "context" "fmt" - "log" "time" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/types/logger" ) -// ensureServicesNotAdvertised is a function that gets called on containerboot -// termination and ensures that any currently advertised VIPServices get -// unadvertised to give clients time to switch to another node before this one -// is shut down. -func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { +// EnsureServicesNotAdvertised is a function that gets called on containerboot +// or k8s-proxy termination and ensures that any currently advertised Services +// get unadvertised to give clients time to switch to another node before this +// one is shut down. +func EnsureServicesNotAdvertised(ctx context.Context, lc *local.Client, logf logger.Logf) error { prefs, err := lc.GetPrefs(ctx) if err != nil { return fmt.Errorf("error getting prefs: %w", err) @@ -28,7 +28,7 @@ func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { return nil } - log.Printf("unadvertising services: %v", prefs.AdvertiseServices) + logf("unadvertising services: %v", prefs.AdvertiseServices) if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ AdvertiseServicesSet: true, Prefs: ipn.Prefs{ diff --git a/kube/state/state.go b/kube/state/state.go index 4831a5f5b..2605f0952 100644 --- a/kube/state/state.go +++ b/kube/state/state.go @@ -11,11 +11,13 @@ package state import ( + "context" "encoding/json" "fmt" "tailscale.com/ipn" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" "tailscale.com/tailcfg" "tailscale.com/util/deephash" ) @@ -56,12 +58,20 @@ func SetInitialKeys(store ipn.StateStore, podUID string) error { // cancelled or it hits an error. The passed in next function is expected to be // from a local.IPNBusWatcher that is at least subscribed to // ipn.NotifyInitialNetMap. -func KeepKeysUpdated(store ipn.StateStore, next func() (ipn.Notify, error)) error { - var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum +func KeepKeysUpdated(ctx context.Context, store ipn.StateStore, lc klc.LocalClient) error { + w, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum for { - n, err := next() // Blocks on a streaming LocalAPI HTTP call. + n, err := w.Next() // Blocks on a streaming LocalAPI HTTP call. if err != nil { + if err == ctx.Err() { + return nil + } return err } if n.NetMap == nil { diff --git a/kube/state/state_test.go b/kube/state/state_test.go index 0375b1c01..8701aa1b7 100644 --- a/kube/state/state_test.go +++ b/kube/state/state_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" + klc "tailscale.com/kube/localclient" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/netmap" @@ -100,24 +101,20 @@ func TestSetInitialStateKeys(t *testing.T) { } func TestKeepStateKeysUpdated(t *testing.T) { - store, err := store.New(logger.Discard, "mem:") - if err != nil { - t.Fatalf("error creating in-memory store: %v", err) + store := fakeStore{ + writeChan: make(chan string), } - nextWaiting := make(chan struct{}) - go func() { - <-nextWaiting // Acknowledge the initial signal. - }() - notifyCh := make(chan ipn.Notify) - next := func() (ipn.Notify, error) { - nextWaiting <- struct{}{} // Send signal to test that state is consistent. - return <-notifyCh, nil // Wait for test input. + errs := make(chan error) + notifyChan := make(chan ipn.Notify) + lc := &klc.FakeLocalClient{ + FakeIPNBusWatcher: klc.FakeIPNBusWatcher{ + NotifyChan: notifyChan, + }, } - errs := make(chan error, 1) go func() { - err := KeepKeysUpdated(store, next) + err := KeepKeysUpdated(t.Context(), store, lc) if err != nil { errs <- fmt.Errorf("keepStateKeysUpdated returned with error: %w", err) } @@ -126,16 +123,12 @@ func TestKeepStateKeysUpdated(t *testing.T) { for _, tc := range []struct { name string notify ipn.Notify - expected map[ipn.StateKey][]byte + expected []string }{ { - name: "initial_not_authed", - notify: ipn.Notify{}, - expected: map[ipn.StateKey][]byte{ - keyDeviceID: nil, - keyDeviceFQDN: nil, - keyDeviceIPs: nil, - }, + name: "initial_not_authed", + notify: ipn.Notify{}, + expected: nil, }, { name: "authed", @@ -148,10 +141,10 @@ func TestKeepStateKeysUpdated(t *testing.T) { }).View(), }, }, - expected: map[ipn.StateKey][]byte{ - keyDeviceID: []byte("TESTCTRL00000001"), - keyDeviceFQDN: []byte("test-node.test.ts.net"), - keyDeviceIPs: []byte(`["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), + expected: []string{ + fmt.Sprintf("%s=%s", keyDeviceID, "TESTCTRL00000001"), + fmt.Sprintf("%s=%s", keyDeviceFQDN, "test-node.test.ts.net"), + fmt.Sprintf("%s=%s", keyDeviceIPs, `["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), }, }, { @@ -165,39 +158,39 @@ func TestKeepStateKeysUpdated(t *testing.T) { }).View(), }, }, - expected: map[ipn.StateKey][]byte{ - keyDeviceID: []byte("TESTCTRL00000001"), - keyDeviceFQDN: []byte("updated.test.ts.net"), - keyDeviceIPs: []byte(`["100.64.0.250"]`), + expected: []string{ + fmt.Sprintf("%s=%s", keyDeviceFQDN, "updated.test.ts.net"), + fmt.Sprintf("%s=%s", keyDeviceIPs, `["100.64.0.250"]`), }, }, } { t.Run(tc.name, func(t *testing.T) { - // Send test input. - select { - case notifyCh <- tc.notify: - case <-errs: - t.Fatal("keepStateKeysUpdated returned before test input") - case <-time.After(5 * time.Second): - t.Fatal("timed out waiting for next() to be called again") - } - - // Wait for next() to be called again so we know the goroutine has - // processed the event. - select { - case <-nextWaiting: - case <-errs: - t.Fatal("keepStateKeysUpdated returned before test input") - case <-time.After(5 * time.Second): - t.Fatal("timed out waiting for next() to be called again") - } - - for key, value := range tc.expected { - got, _ := store.ReadState(key) - if !bytes.Equal(got, value) { - t.Errorf("state key %q mismatch: expected %q, got %q", key, value, got) + notifyChan <- tc.notify + for _, expected := range tc.expected { + select { + case got := <-store.writeChan: + if got != expected { + t.Errorf("expected %q, got %q", expected, got) + } + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected write %q", expected) } } }) } } + +type fakeStore struct { + writeChan chan string +} + +func (f fakeStore) ReadState(key ipn.StateKey) ([]byte, error) { + return nil, fmt.Errorf("ReadState not implemented") +} + +func (f fakeStore) WriteState(key ipn.StateKey, value []byte) error { + f.writeChan <- fmt.Sprintf("%s=%s", key, value) + return nil +} From d6d29abbb6878fc777a9a21dd631ec3a8455e4ec Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Mon, 14 Jul 2025 15:23:45 -0500 Subject: [PATCH 1116/1708] tstest/integration/testcontrol: include peer CapMaps in MapResponses Fixes #16560 Signed-off-by: Raj Singh --- tstest/integration/capmap_test.go | 147 ++++++++++++++++++ tstest/integration/testcontrol/testcontrol.go | 4 + 2 files changed, 151 insertions(+) create mode 100644 tstest/integration/capmap_test.go diff --git a/tstest/integration/capmap_test.go b/tstest/integration/capmap_test.go new file mode 100644 index 000000000..0ee05be2f --- /dev/null +++ b/tstest/integration/capmap_test.go @@ -0,0 +1,147 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package integration + +import ( + "errors" + "testing" + "time" + + "tailscale.com/tailcfg" + "tailscale.com/tstest" +) + +// TestPeerCapMap tests that the node capability map (CapMap) is included in peer information. +func TestPeerCapMap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + // Spin up two nodes. + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + n1.AwaitListening() + n1.MustUp() + n1.AwaitRunning() + + n2 := NewTestNode(t, env) + d2 := n2.StartDaemon() + n2.AwaitListening() + n2.MustUp() + n2.AwaitRunning() + + n1.AwaitIP4() + n2.AwaitIP4() + + // Get the nodes from the control server. + nodes := env.Control.AllNodes() + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %d nodes", len(nodes)) + } + + // Figure out which node is which by comparing keys. + st1 := n1.MustStatus() + var tn1, tn2 *tailcfg.Node + for _, n := range nodes { + if n.Key == st1.Self.PublicKey { + tn1 = n + } else { + tn2 = n + } + } + + // Set CapMap on both nodes. + caps := make(tailcfg.NodeCapMap) + caps["example:custom"] = []tailcfg.RawMessage{`"value"`} + caps["example:enabled"] = []tailcfg.RawMessage{`true`} + + env.Control.SetNodeCapMap(tn1.Key, caps) + env.Control.SetNodeCapMap(tn2.Key, caps) + + // Check that nodes see each other's CapMap. + if err := tstest.WaitFor(10*time.Second, func() error { + st1 := n1.MustStatus() + st2 := n2.MustStatus() + + if len(st1.Peer) == 0 || len(st2.Peer) == 0 { + return errors.New("no peers") + } + + // Check n1 sees n2's CapMap. + p1 := st1.Peer[st1.Peers()[0]] + if p1.CapMap == nil { + return errors.New("peer CapMap is nil") + } + if p1.CapMap["example:custom"] == nil || p1.CapMap["example:enabled"] == nil { + return errors.New("peer CapMap missing entries") + } + + // Check n2 sees n1's CapMap. + p2 := st2.Peer[st2.Peers()[0]] + if p2.CapMap == nil { + return errors.New("peer CapMap is nil") + } + if p2.CapMap["example:custom"] == nil || p2.CapMap["example:enabled"] == nil { + return errors.New("peer CapMap missing entries") + } + + return nil + }); err != nil { + t.Fatal(err) + } + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} + +// TestSetNodeCapMap tests that SetNodeCapMap updates are propagated to peers. +func TestSetNodeCapMap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + n1.AwaitListening() + n1.MustUp() + n1.AwaitRunning() + + nodes := env.Control.AllNodes() + if len(nodes) != 1 { + t.Fatalf("expected 1 node, got %d nodes", len(nodes)) + } + node1 := nodes[0] + + // Set initial CapMap. + caps := make(tailcfg.NodeCapMap) + caps["test:state"] = []tailcfg.RawMessage{`"initial"`} + env.Control.SetNodeCapMap(node1.Key, caps) + + // Start second node and verify it sees the first node's CapMap. + n2 := NewTestNode(t, env) + d2 := n2.StartDaemon() + n2.AwaitListening() + n2.MustUp() + n2.AwaitRunning() + + if err := tstest.WaitFor(5*time.Second, func() error { + st := n2.MustStatus() + if len(st.Peer) == 0 { + return errors.New("no peers") + } + p := st.Peer[st.Peers()[0]] + if p.CapMap == nil || p.CapMap["test:state"] == nil { + return errors.New("peer CapMap not set") + } + if string(p.CapMap["test:state"][0]) != `"initial"` { + return errors.New("wrong CapMap value") + } + return nil + }); err != nil { + t.Fatal(err) + } + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 71205f897..739795bb3 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1000,7 +1000,11 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() peerAddress := s.masquerades[p.Key][node.Key] routes := s.nodeSubnetRoutes[p.Key] + peerCapMap := maps.Clone(s.nodeCapMaps[p.Key]) s.mu.Unlock() + if peerCapMap != nil { + p.CapMap = peerCapMap + } if peerAddress.IsValid() { if peerAddress.Is6() { p.Addresses[1] = netip.PrefixFrom(peerAddress, peerAddress.BitLen()) From 5d4e67fd937bef4f3ad5ec8e93174a5b6bd7dceb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 21 Jul 2025 08:36:43 -0700 Subject: [PATCH 1117/1708] net/dns/recursive: set EDNS on queries Updates tailscale/corp#30631 Change-Id: Ib88ea1bb51dd917c04f8d41bcaa6d59b9abd4f73 Signed-off-by: Brad Fitzpatrick --- net/dns/recursive/recursive.go | 1 + 1 file changed, 1 insertion(+) diff --git a/net/dns/recursive/recursive.go b/net/dns/recursive/recursive.go index eb23004d8..fd865e37a 100644 --- a/net/dns/recursive/recursive.go +++ b/net/dns/recursive/recursive.go @@ -547,6 +547,7 @@ func (r *Resolver) queryNameserverProto( // Prepare a message asking for an appropriately-typed record // for the name we're querying. m := new(dns.Msg) + m.SetEdns0(1232, false /* no DNSSEC */) m.SetQuestion(name.WithTrailingDot(), uint16(qtype)) // Allow mocking out the network components with our exchange hook. From 1677fb190519710d66354600f659b50af77d7759 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 21 Jul 2025 10:02:37 -0700 Subject: [PATCH 1118/1708] wgengine/magicsock,all: allocate peer relay over disco instead of PeerAPI (#16603) Updates tailscale/corp#30583 Updates tailscale/corp#30534 Updates tailscale/corp#30557 Signed-off-by: Dylan Bargatze Signed-off-by: Jordan Whited Co-authored-by: Dylan Bargatze --- disco/disco.go | 261 +++++++++---- disco/disco_test.go | 41 +- feature/relayserver/relayserver.go | 151 +++---- feature/relayserver/relayserver_test.go | 10 + ipn/ipnlocal/local.go | 2 +- ipn/localapi/localapi.go | 2 +- net/udprelay/endpoint/endpoint.go | 9 + net/udprelay/server.go | 45 +-- tailcfg/tailcfg.go | 3 +- types/key/disco.go | 38 ++ types/key/disco_test.go | 18 + wgengine/magicsock/endpoint.go | 17 +- wgengine/magicsock/magicsock.go | 355 +++++++++++++---- wgengine/magicsock/magicsock_test.go | 335 ++++++++-------- wgengine/magicsock/relaymanager.go | 498 ++++++++++++++---------- wgengine/magicsock/relaymanager_test.go | 42 +- 16 files changed, 1187 insertions(+), 640 deletions(-) diff --git a/disco/disco.go b/disco/disco.go index d4623c119..1689d2a93 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -42,13 +42,15 @@ const NonceLen = 24 type MessageType byte const ( - TypePing = MessageType(0x01) - TypePong = MessageType(0x02) - TypeCallMeMaybe = MessageType(0x03) - TypeBindUDPRelayEndpoint = MessageType(0x04) - TypeBindUDPRelayEndpointChallenge = MessageType(0x05) - TypeBindUDPRelayEndpointAnswer = MessageType(0x06) - TypeCallMeMaybeVia = MessageType(0x07) + TypePing = MessageType(0x01) + TypePong = MessageType(0x02) + TypeCallMeMaybe = MessageType(0x03) + TypeBindUDPRelayEndpoint = MessageType(0x04) + TypeBindUDPRelayEndpointChallenge = MessageType(0x05) + TypeBindUDPRelayEndpointAnswer = MessageType(0x06) + TypeCallMeMaybeVia = MessageType(0x07) + TypeAllocateUDPRelayEndpointRequest = MessageType(0x08) + TypeAllocateUDPRelayEndpointResponse = MessageType(0x09) ) const v0 = byte(0) @@ -97,6 +99,10 @@ func Parse(p []byte) (Message, error) { return parseBindUDPRelayEndpointAnswer(ver, p) case TypeCallMeMaybeVia: return parseCallMeMaybeVia(ver, p) + case TypeAllocateUDPRelayEndpointRequest: + return parseAllocateUDPRelayEndpointRequest(ver, p) + case TypeAllocateUDPRelayEndpointResponse: + return parseAllocateUDPRelayEndpointResponse(ver, p) default: return nil, fmt.Errorf("unknown message type 0x%02x", byte(t)) } @@ -381,9 +387,7 @@ func (m *BindUDPRelayEndpointCommon) decode(b []byte) error { } // BindUDPRelayEndpoint is the first messaged transmitted from UDP relay client -// towards UDP relay server as part of the 3-way bind handshake. This message -// type is currently considered experimental and is not yet tied to a -// tailcfg.CapabilityVersion. +// towards UDP relay server as part of the 3-way bind handshake. type BindUDPRelayEndpoint struct { BindUDPRelayEndpointCommon } @@ -405,8 +409,7 @@ func parseBindUDPRelayEndpoint(ver uint8, p []byte) (m *BindUDPRelayEndpoint, er // BindUDPRelayEndpointChallenge is transmitted from UDP relay server towards // UDP relay client in response to a BindUDPRelayEndpoint message as part of the -// 3-way bind handshake. This message type is currently considered experimental -// and is not yet tied to a tailcfg.CapabilityVersion. +// 3-way bind handshake. type BindUDPRelayEndpointChallenge struct { BindUDPRelayEndpointCommon } @@ -427,9 +430,7 @@ func parseBindUDPRelayEndpointChallenge(ver uint8, p []byte) (m *BindUDPRelayEnd } // BindUDPRelayEndpointAnswer is transmitted from UDP relay client to UDP relay -// server in response to a BindUDPRelayEndpointChallenge message. This message -// type is currently considered experimental and is not yet tied to a -// tailcfg.CapabilityVersion. +// server in response to a BindUDPRelayEndpointChallenge message. type BindUDPRelayEndpointAnswer struct { BindUDPRelayEndpointCommon } @@ -449,6 +450,168 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi return m, nil } +// AllocateUDPRelayEndpointRequest is a message sent only over DERP to request +// allocation of a relay endpoint on a [tailscale.com/net/udprelay.Server] +type AllocateUDPRelayEndpointRequest struct { + // ClientDisco are the Disco public keys of the clients that should be + // permitted to handshake with the endpoint. + ClientDisco [2]key.DiscoPublic + // Generation represents the allocation request generation. The server must + // echo it back in the [AllocateUDPRelayEndpointResponse] to enable request + // and response alignment client-side. + Generation uint32 +} + +// allocateUDPRelayEndpointRequestLen is the length of a marshaled +// [AllocateUDPRelayEndpointRequest] message without the message header. +const allocateUDPRelayEndpointRequestLen = key.DiscoPublicRawLen*2 + // ClientDisco + 4 // Generation + +func (m *AllocateUDPRelayEndpointRequest) AppendMarshal(b []byte) []byte { + ret, p := appendMsgHeader(b, TypeAllocateUDPRelayEndpointRequest, v0, allocateUDPRelayEndpointRequestLen) + for i := 0; i < len(m.ClientDisco); i++ { + disco := m.ClientDisco[i].AppendTo(nil) + copy(p, disco) + p = p[key.DiscoPublicRawLen:] + } + binary.BigEndian.PutUint32(p, m.Generation) + return ret +} + +func parseAllocateUDPRelayEndpointRequest(ver uint8, p []byte) (m *AllocateUDPRelayEndpointRequest, err error) { + m = new(AllocateUDPRelayEndpointRequest) + if ver != 0 { + return + } + if len(p) < allocateUDPRelayEndpointRequestLen { + return m, errShort + } + for i := 0; i < len(m.ClientDisco); i++ { + m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) + p = p[key.DiscoPublicRawLen:] + } + m.Generation = binary.BigEndian.Uint32(p) + return m, nil +} + +// AllocateUDPRelayEndpointResponse is a message sent only over DERP in response +// to a [AllocateUDPRelayEndpointRequest]. +type AllocateUDPRelayEndpointResponse struct { + // Generation represents the allocation request generation. The server must + // echo back the [AllocateUDPRelayEndpointRequest.Generation] here to enable + // request and response alignment client-side. + Generation uint32 + UDPRelayEndpoint +} + +func (m *AllocateUDPRelayEndpointResponse) AppendMarshal(b []byte) []byte { + endpointsLen := epLength * len(m.AddrPorts) + generationLen := 4 + ret, d := appendMsgHeader(b, TypeAllocateUDPRelayEndpointResponse, v0, generationLen+udpRelayEndpointLenMinusAddrPorts+endpointsLen) + binary.BigEndian.PutUint32(d, m.Generation) + m.encode(d[4:]) + return ret +} + +func parseAllocateUDPRelayEndpointResponse(ver uint8, p []byte) (m *AllocateUDPRelayEndpointResponse, err error) { + m = new(AllocateUDPRelayEndpointResponse) + if ver != 0 { + return m, nil + } + if len(p) < 4 { + return m, errShort + } + m.Generation = binary.BigEndian.Uint32(p) + err = m.decode(p[4:]) + return m, err +} + +const udpRelayEndpointLenMinusAddrPorts = key.DiscoPublicRawLen + // ServerDisco + (key.DiscoPublicRawLen * 2) + // ClientDisco + 8 + // LamportID + 4 + // VNI + 8 + // BindLifetime + 8 // SteadyStateLifetime + +// UDPRelayEndpoint is a mirror of [tailscale.com/net/udprelay/endpoint.ServerEndpoint], +// refer to it for field documentation. [UDPRelayEndpoint] is carried in both +// [CallMeMaybeVia] and [AllocateUDPRelayEndpointResponse] messages. +type UDPRelayEndpoint struct { + // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] + ServerDisco key.DiscoPublic + // ClientDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ClientDisco] + ClientDisco [2]key.DiscoPublic + // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] + LamportID uint64 + // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] + VNI uint32 + // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] + BindLifetime time.Duration + // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] + SteadyStateLifetime time.Duration + // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] + AddrPorts []netip.AddrPort +} + +// encode encodes m in b. b must be at least [udpRelayEndpointLenMinusAddrPorts] +// + [epLength] * len(m.AddrPorts) bytes long. +func (m *UDPRelayEndpoint) encode(b []byte) { + disco := m.ServerDisco.AppendTo(nil) + copy(b, disco) + b = b[key.DiscoPublicRawLen:] + for i := 0; i < len(m.ClientDisco); i++ { + disco = m.ClientDisco[i].AppendTo(nil) + copy(b, disco) + b = b[key.DiscoPublicRawLen:] + } + binary.BigEndian.PutUint64(b[:8], m.LamportID) + b = b[8:] + binary.BigEndian.PutUint32(b[:4], m.VNI) + b = b[4:] + binary.BigEndian.PutUint64(b[:8], uint64(m.BindLifetime)) + b = b[8:] + binary.BigEndian.PutUint64(b[:8], uint64(m.SteadyStateLifetime)) + b = b[8:] + for _, ipp := range m.AddrPorts { + a := ipp.Addr().As16() + copy(b, a[:]) + binary.BigEndian.PutUint16(b[16:18], ipp.Port()) + b = b[epLength:] + } +} + +// decode decodes m from b. +func (m *UDPRelayEndpoint) decode(b []byte) error { + if len(b) < udpRelayEndpointLenMinusAddrPorts+epLength || + (len(b)-udpRelayEndpointLenMinusAddrPorts)%epLength != 0 { + return errShort + } + m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + for i := 0; i < len(m.ClientDisco); i++ { + m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + } + m.LamportID = binary.BigEndian.Uint64(b[:8]) + b = b[8:] + m.VNI = binary.BigEndian.Uint32(b[:4]) + b = b[4:] + m.BindLifetime = time.Duration(binary.BigEndian.Uint64(b[:8])) + b = b[8:] + m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(b[:8])) + b = b[8:] + m.AddrPorts = make([]netip.AddrPort, 0, len(b)-udpRelayEndpointLenMinusAddrPorts/epLength) + for len(b) > 0 { + var a [16]byte + copy(a[:], b) + m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( + netip.AddrFrom16(a).Unmap(), + binary.BigEndian.Uint16(b[16:18]))) + b = b[epLength:] + } + return nil +} + // CallMeMaybeVia is a message sent only over DERP to request that the recipient // try to open up a magicsock path back to the sender. The 'Via' in // CallMeMaybeVia highlights that candidate paths are served through an @@ -464,78 +627,22 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi // The recipient may choose to not open a path back if it's already happy with // its path. Direct connections, e.g. [CallMeMaybe]-signaled, take priority over // CallMeMaybeVia paths. -// -// This message type is currently considered experimental and is not yet tied to -// a [tailscale.com/tailcfg.CapabilityVersion]. type CallMeMaybeVia struct { - // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] - ServerDisco key.DiscoPublic - // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] - LamportID uint64 - // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] - VNI uint32 - // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] - BindLifetime time.Duration - // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] - SteadyStateLifetime time.Duration - // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] - AddrPorts []netip.AddrPort + UDPRelayEndpoint } -const cmmvDataLenMinusEndpoints = key.DiscoPublicRawLen + // ServerDisco - 8 + // LamportID - 4 + // VNI - 8 + // BindLifetime - 8 // SteadyStateLifetime - func (m *CallMeMaybeVia) AppendMarshal(b []byte) []byte { endpointsLen := epLength * len(m.AddrPorts) - ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, cmmvDataLenMinusEndpoints+endpointsLen) - disco := m.ServerDisco.AppendTo(nil) - copy(p, disco) - p = p[key.DiscoPublicRawLen:] - binary.BigEndian.PutUint64(p[:8], m.LamportID) - p = p[8:] - binary.BigEndian.PutUint32(p[:4], m.VNI) - p = p[4:] - binary.BigEndian.PutUint64(p[:8], uint64(m.BindLifetime)) - p = p[8:] - binary.BigEndian.PutUint64(p[:8], uint64(m.SteadyStateLifetime)) - p = p[8:] - for _, ipp := range m.AddrPorts { - a := ipp.Addr().As16() - copy(p, a[:]) - binary.BigEndian.PutUint16(p[16:18], ipp.Port()) - p = p[epLength:] - } + ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, udpRelayEndpointLenMinusAddrPorts+endpointsLen) + m.encode(p) return ret } func parseCallMeMaybeVia(ver uint8, p []byte) (m *CallMeMaybeVia, err error) { m = new(CallMeMaybeVia) - if len(p) < cmmvDataLenMinusEndpoints+epLength || - (len(p)-cmmvDataLenMinusEndpoints)%epLength != 0 || - ver != 0 { + if ver != 0 { return m, nil } - m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) - p = p[key.DiscoPublicRawLen:] - m.LamportID = binary.BigEndian.Uint64(p[:8]) - p = p[8:] - m.VNI = binary.BigEndian.Uint32(p[:4]) - p = p[4:] - m.BindLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) - p = p[8:] - m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) - p = p[8:] - m.AddrPorts = make([]netip.AddrPort, 0, len(p)-cmmvDataLenMinusEndpoints/epLength) - for len(p) > 0 { - var a [16]byte - copy(a[:], p) - m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( - netip.AddrFrom16(a).Unmap(), - binary.BigEndian.Uint16(p[16:18]))) - p = p[epLength:] - } - return m, nil + err = m.decode(p) + return m, err } diff --git a/disco/disco_test.go b/disco/disco_test.go index 9fb71ff83..71b68338a 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -25,6 +25,19 @@ func TestMarshalAndParse(t *testing.T) { }, } + udpRelayEndpoint := UDPRelayEndpoint{ + ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + ClientDisco: [2]key.DiscoPublic{key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 3: 3, 30: 30, 31: 31})), key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 4: 4, 30: 30, 31: 31}))}, + LamportID: 123, + VNI: 456, + BindLifetime: time.Second, + SteadyStateLifetime: time.Minute, + AddrPorts: []netip.AddrPort{ + netip.MustParseAddrPort("1.2.3.4:567"), + netip.MustParseAddrPort("[2001::3456]:789"), + }, + } + tests := []struct { name string want string @@ -117,17 +130,25 @@ func TestMarshalAndParse(t *testing.T) { { name: "call_me_maybe_via", m: &CallMeMaybeVia{ - ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), - LamportID: 123, - VNI: 456, - BindLifetime: time.Second, - SteadyStateLifetime: time.Minute, - AddrPorts: []netip.AddrPort{ - netip.MustParseAddrPort("1.2.3.4:567"), - netip.MustParseAddrPort("[2001::3456]:789"), - }, + UDPRelayEndpoint: udpRelayEndpoint, + }, + want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + }, + { + name: "allocate_udp_relay_endpoint_request", + m: &AllocateUDPRelayEndpointRequest{ + ClientDisco: [2]key.DiscoPublic{key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 3: 3, 30: 30, 31: 31})), key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 4: 4, 30: 30, 31: 31}))}, + Generation: 1, + }, + want: "08 00 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 01", + }, + { + name: "allocate_udp_relay_endpoint_response", + m: &AllocateUDPRelayEndpointResponse{ + Generation: 1, + UDPRelayEndpoint: udpRelayEndpoint, }, - want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + want: "09 00 00 00 00 01 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", }, } for _, tt := range tests { diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index d0ad27624..f4077b5f9 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,25 +6,21 @@ package relayserver import ( - "encoding/json" "errors" - "fmt" - "io" - "net/http" "sync" - "time" + "tailscale.com/disco" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" - "tailscale.com/ipn/ipnlocal" "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" - "tailscale.com/util/httpm" + "tailscale.com/util/eventbus" + "tailscale.com/wgengine/magicsock" ) // featureName is the name of the feature implemented by this package. @@ -34,26 +30,34 @@ const featureName = "relayserver" func init() { feature.Register(featureName) ipnext.RegisterExtension(featureName, newExtension) - ipnlocal.RegisterPeerAPIHandler("/v0/relay/endpoint", handlePeerAPIRelayAllocateEndpoint) } // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server // extension. It is registered with [ipnext.RegisterExtension] if the package is // imported. -func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, error) { - return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + logf: logger.WithPrefix(logf, featureName+": "), + bus: sb.Sys().Bus.Get(), + }, nil } // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { logf logger.Logf + bus *eventbus.Bus - mu sync.Mutex // guards the following fields + mu sync.Mutex // guards the following fields + eventClient *eventbus.Client // closed to stop consumeEventbusTopics + reqSub *eventbus.Subscriber[magicsock.UDPRelayAllocReq] // receives endpoint alloc requests from magicsock + respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] // publishes endpoint alloc responses to magicsock shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer - server relayServer // lazily initialized + port *int // ipn.Prefs.RelayServerPort, nil if disabled + busDoneCh chan struct{} // non-nil if port is non-nil, closed when consumeEventbusTopics returns + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + server relayServer // lazily initialized + } // relayServer is the interface of [udprelay.Server]. @@ -77,6 +81,18 @@ func (e *extension) Init(host ipnext.Host) error { return nil } +// initBusConnection initializes the [*eventbus.Client], [*eventbus.Subscriber], +// [*eventbus.Publisher], and [chan struct{}] used to publish/receive endpoint +// allocation messages to/from the [*eventbus.Bus]. It also starts +// consumeEventbusTopics in a separate goroutine. +func (e *extension) initBusConnection() { + e.eventClient = e.bus.Client("relayserver.extension") + e.reqSub = eventbus.Subscribe[magicsock.UDPRelayAllocReq](e.eventClient) + e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.eventClient) + e.busDoneCh = make(chan struct{}) + go e.consumeEventbusTopics() +} + func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() @@ -98,11 +114,57 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.server.Close() e.server = nil } + if e.port != nil { + e.eventClient.Close() + <-e.busDoneCh + } e.port = nil if ok { e.port = ptr.To(newPort) + e.initBusConnection() + } + } +} + +func (e *extension) consumeEventbusTopics() { + defer close(e.busDoneCh) + + for { + select { + case <-e.reqSub.Done(): + // If reqSub is done, the eventClient has been closed, which is a + // signal to return. + return + case req := <-e.reqSub.Events(): + rs, err := e.relayServerOrInit() + if err != nil { + e.logf("error initializing server: %v", err) + continue + } + se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) + if err != nil { + e.logf("error allocating endpoint: %v", err) + continue + } + e.respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, + }, + }) } } + } // Shutdown implements [ipnlocal.Extension]. @@ -114,6 +176,10 @@ func (e *extension) Shutdown() error { e.server.Close() e.server = nil } + if e.port != nil { + e.eventClient.Close() + <-e.busDoneCh + } return nil } @@ -139,60 +205,3 @@ func (e *extension) relayServerOrInit() (relayServer, error) { } return e.server, nil } - -func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - e, ok := ipnlocal.GetExt[*extension](h.LocalBackend()) - if !ok { - http.Error(w, "relay failed to initialize", http.StatusServiceUnavailable) - return - } - - httpErrAndLog := func(message string, code int) { - http.Error(w, message, code) - h.Logf("relayserver: request from %v returned code %d: %s", h.RemoteAddr(), code, message) - } - - if !h.PeerCaps().HasCapability(tailcfg.PeerCapabilityRelay) { - httpErrAndLog("relay not permitted", http.StatusForbidden) - return - } - - if r.Method != httpm.POST { - httpErrAndLog("only POST method is allowed", http.StatusMethodNotAllowed) - return - } - - var allocateEndpointReq struct { - DiscoKeys []key.DiscoPublic - } - err := json.NewDecoder(io.LimitReader(r.Body, 512)).Decode(&allocateEndpointReq) - if err != nil { - httpErrAndLog(err.Error(), http.StatusBadRequest) - return - } - if len(allocateEndpointReq.DiscoKeys) != 2 { - httpErrAndLog("2 disco public keys must be supplied", http.StatusBadRequest) - return - } - - rs, err := e.relayServerOrInit() - if err != nil { - httpErrAndLog(err.Error(), http.StatusServiceUnavailable) - return - } - ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) - if err != nil { - var notReady udprelay.ErrServerNotReady - if errors.As(err, ¬Ready) { - w.Header().Set("Retry-After", fmt.Sprintf("%d", notReady.RetryAfter.Round(time.Second)/time.Second)) - httpErrAndLog(err.Error(), http.StatusServiceUnavailable) - return - } - httpErrAndLog(err.Error(), http.StatusInternalServerError) - return - } - err = json.NewEncoder(w).Encode(&ep) - if err != nil { - httpErrAndLog(err.Error(), http.StatusInternalServerError) - } -} diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index cc7f05f67..84158188e 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -9,6 +9,7 @@ import ( "tailscale.com/ipn" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/tsd" "tailscale.com/types/key" "tailscale.com/types/ptr" ) @@ -108,9 +109,18 @@ func Test_extension_profileStateChanged(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + sys := tsd.NewSystem() + bus := sys.Bus.Get() e := &extension{ port: tt.fields.port, server: tt.fields.server, + bus: bus, + } + if e.port != nil { + // Entering profileStateChanged with a non-nil port requires + // bus init, which is called in profileStateChanged when + // transitioning port from nil to non-nil. + e.initBusConnection() } e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) if tt.wantNilServer != (e.server == nil) { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d3754e540..8665a88c4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6957,7 +6957,7 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } -func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.AddrPort] { +func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.Addr] { return b.MagicConn().PeerRelays() } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2409aa1ae..0acc5a65f 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -699,7 +699,7 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { h.b.DebugForcePreferDERP(n) case "peer-relay-servers": servers := h.b.DebugPeerRelayServers().Slice() - slices.SortFunc(servers, func(a, b netip.AddrPort) int { + slices.SortFunc(servers, func(a, b netip.Addr) int { return a.Compare(b) }) err = json.NewEncoder(w).Encode(servers) diff --git a/net/udprelay/endpoint/endpoint.go b/net/udprelay/endpoint/endpoint.go index 2672a856b..0d2a14e96 100644 --- a/net/udprelay/endpoint/endpoint.go +++ b/net/udprelay/endpoint/endpoint.go @@ -7,11 +7,16 @@ package endpoint import ( "net/netip" + "time" "tailscale.com/tstime" "tailscale.com/types/key" ) +// ServerRetryAfter is the default +// [tailscale.com/net/udprelay.ErrServerNotReady.RetryAfter] value. +const ServerRetryAfter = time.Second * 3 + // ServerEndpoint contains details for an endpoint served by a // [tailscale.com/net/udprelay.Server]. type ServerEndpoint struct { @@ -21,6 +26,10 @@ type ServerEndpoint struct { // unique ServerEndpoint allocation. ServerDisco key.DiscoPublic + // ClientDisco are the Disco public keys of the relay participants permitted + // to handshake with this endpoint. + ClientDisco [2]key.DiscoPublic + // LamportID is unique and monotonically non-decreasing across // ServerEndpoint allocations for the lifetime of Server. It enables clients // to dedup and resolve allocation event order. Clients may race to allocate diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 7651bf295..c34a4b5f6 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -73,23 +73,7 @@ type Server struct { lamportID uint64 vniPool []uint32 // the pool of available VNIs byVNI map[uint32]*serverEndpoint - byDisco map[pairOfDiscoPubKeys]*serverEndpoint -} - -// pairOfDiscoPubKeys is a pair of key.DiscoPublic. It must be constructed via -// newPairOfDiscoPubKeys to ensure lexicographical ordering. -type pairOfDiscoPubKeys [2]key.DiscoPublic - -func (p pairOfDiscoPubKeys) String() string { - return fmt.Sprintf("%s <=> %s", p[0].ShortString(), p[1].ShortString()) -} - -func newPairOfDiscoPubKeys(discoA, discoB key.DiscoPublic) pairOfDiscoPubKeys { - pair := pairOfDiscoPubKeys([2]key.DiscoPublic{discoA, discoB}) - slices.SortFunc(pair[:], func(a, b key.DiscoPublic) int { - return a.Compare(b) - }) - return pair + byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } // serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. @@ -99,7 +83,7 @@ type serverEndpoint struct { // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys pairOfDiscoPubKeys + discoPubKeys key.SortedPairOfDiscoPublic discoSharedSecrets [2]key.DiscoShared handshakeGeneration [2]uint32 // or zero if a handshake has never started for that relay leg handshakeAddrPorts [2]netip.AddrPort // or zero value if a handshake has never started for that relay leg @@ -126,7 +110,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if common.VNI != e.vni { return errors.New("mismatching VNI") } - if common.RemoteKey.Compare(e.discoPubKeys[otherSender]) != 0 { + if common.RemoteKey.Compare(e.discoPubKeys.Get()[otherSender]) != 0 { return errors.New("mismatching RemoteKey") } return nil @@ -152,7 +136,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex m := new(disco.BindUDPRelayEndpointChallenge) m.VNI = e.vni m.Generation = discoMsg.Generation - m.RemoteKey = e.discoPubKeys[otherSender] + m.RemoteKey = e.discoPubKeys.Get()[otherSender] rand.Read(e.challenge[senderIndex][:]) copy(m.Challenge[:], e.challenge[senderIndex][:]) reply := make([]byte, packet.GeneveFixedHeaderLength, 512) @@ -200,9 +184,9 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by sender := key.DiscoPublicFromRaw32(mem.B(senderRaw)) senderIndex := -1 switch { - case sender.Compare(e.discoPubKeys[0]) == 0: + case sender.Compare(e.discoPubKeys.Get()[0]) == 0: senderIndex = 0 - case sender.Compare(e.discoPubKeys[1]) == 0: + case sender.Compare(e.discoPubKeys.Get()[1]) == 0: senderIndex = 1 default: // unknown Disco public key @@ -291,12 +275,12 @@ func (e *serverEndpoint) isBound() bool { // which is useful to override in tests. func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, err error) { s = &Server{ - logf: logger.WithPrefix(logf, "relayserver"), + logf: logf, disco: key.NewDisco(), bindLifetime: defaultBindLifetime, steadyStateLifetime: defaultSteadyStateLifetime, closeCh: make(chan struct{}), - byDisco: make(map[pairOfDiscoPubKeys]*serverEndpoint), + byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), byVNI: make(map[uint32]*serverEndpoint), } s.discoPublic = s.disco.Public() @@ -315,7 +299,7 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve } s.netChecker = &netcheck.Client{ NetMon: netMon, - Logf: logger.WithPrefix(logf, "relayserver: netcheck:"), + Logf: logger.WithPrefix(logf, "netcheck: "), SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { if addrPort.Addr().Is4() { return s.uc4.WriteToUDPAddrPort(b, addrPort) @@ -615,7 +599,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv if len(s.addrPorts) == 0 { if !s.addrDiscoveryOnce { - return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: 3 * time.Second} + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} } return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") } @@ -624,7 +608,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) } - pair := newPairOfDiscoPubKeys(discoA, discoB) + pair := key.NewSortedPairOfDiscoPublic(discoA, discoB) e, ok := s.byDisco[pair] if ok { // Return the existing allocation. Clients can resolve duplicate @@ -639,6 +623,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv // behaviors and endpoint state (bound or not). We might want to // consider storing them (maybe interning) in the [*serverEndpoint] // at allocation time. + ClientDisco: pair.Get(), AddrPorts: slices.Clone(s.addrPorts), VNI: e.vni, LamportID: e.lamportID, @@ -657,15 +642,17 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv lamportID: s.lamportID, allocatedAt: time.Now(), } - e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys[0]) - e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys[1]) + e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) + e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1]) e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] s.byDisco[pair] = e s.byVNI[e.vni] = e + s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, + ClientDisco: pair.Get(), AddrPorts: slices.Clone(s.addrPorts), VNI: e.vni, LamportID: e.lamportID, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 398a2c8a2..550914b96 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -165,7 +165,8 @@ type CapabilityVersion int // - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) // - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. // - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions -const CurrentCapabilityVersion CapabilityVersion = 120 +// - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] +const CurrentCapabilityVersion CapabilityVersion = 121 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/types/key/disco.go b/types/key/disco.go index 1013ce5bf..ce5f9b36f 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -73,6 +73,44 @@ func (k DiscoPrivate) Shared(p DiscoPublic) DiscoShared { return ret } +// SortedPairOfDiscoPublic is a lexicographically sorted container of two +// [DiscoPublic] keys. +type SortedPairOfDiscoPublic struct { + k [2]DiscoPublic +} + +// Get returns the underlying keys. +func (s SortedPairOfDiscoPublic) Get() [2]DiscoPublic { + return s.k +} + +// NewSortedPairOfDiscoPublic returns a SortedPairOfDiscoPublic from a and b. +func NewSortedPairOfDiscoPublic(a, b DiscoPublic) SortedPairOfDiscoPublic { + s := SortedPairOfDiscoPublic{} + if a.Compare(b) < 0 { + s.k[0] = a + s.k[1] = b + } else { + s.k[0] = b + s.k[1] = a + } + return s +} + +func (s SortedPairOfDiscoPublic) String() string { + return fmt.Sprintf("%s <=> %s", s.k[0].ShortString(), s.k[1].ShortString()) +} + +// Equal returns true if s and b are equal, otherwise it returns false. +func (s SortedPairOfDiscoPublic) Equal(b SortedPairOfDiscoPublic) bool { + for i := range s.k { + if s.k[i].Compare(b.k[i]) != 0 { + return false + } + } + return true +} + // DiscoPublic is the public portion of a DiscoPrivate. type DiscoPublic struct { k [32]byte diff --git a/types/key/disco_test.go b/types/key/disco_test.go index c62c13cbf..131fe350f 100644 --- a/types/key/disco_test.go +++ b/types/key/disco_test.go @@ -81,3 +81,21 @@ func TestDiscoShared(t *testing.T) { t.Error("k1.Shared(k2) != k2.Shared(k1)") } } + +func TestSortedPairOfDiscoPublic(t *testing.T) { + pubA := DiscoPublic{} + pubA.k[0] = 0x01 + pubB := DiscoPublic{} + pubB.k[0] = 0x02 + sortedInput := NewSortedPairOfDiscoPublic(pubA, pubB) + unsortedInput := NewSortedPairOfDiscoPublic(pubB, pubA) + if sortedInput.Get() != unsortedInput.Get() { + t.Fatal("sortedInput.Get() != unsortedInput.Get()") + } + if unsortedInput.Get()[0] != pubA { + t.Fatal("unsortedInput.Get()[0] != pubA") + } + if unsortedInput.Get()[1] != pubB { + t.Fatal("unsortedInput.Get()[1] != pubB") + } +} diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 48d5ef5a1..6381b0210 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -879,8 +879,14 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { // discoverUDPRelayPathsLocked starts UDP relay path discovery. func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { - // TODO(jwhited): return early if there are no relay servers set, otherwise - // we spin up and down relayManager.runLoop unnecessarily. + if !de.c.hasPeerRelayServers.Load() { + // Changes in this value between its access and the logic following + // are fine, we will eventually do the "right" thing during future path + // discovery. The worst case is we suppress path discovery for the + // current cycle, or we unnecessarily call into [relayManager] and do + // some wasted work. + return + } de.lastUDPRelayPathDiscovery = now lastBest := de.bestAddr lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) @@ -2035,8 +2041,15 @@ func (de *endpoint) numStopAndReset() int64 { return atomic.LoadInt64(&de.numStopAndResetAtomic) } +// setDERPHome sets the provided regionID as home for de. Calls to setDERPHome +// must never run concurrent to [Conn.updateRelayServersSet], otherwise +// [candidatePeerRelay] DERP home changes may be missed from the perspective of +// [relayManager]. func (de *endpoint) setDERPHome(regionID uint16) { de.mu.Lock() defer de.mu.Unlock() de.derpAddr = netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID)) + if de.c.hasPeerRelayServers.Load() { + de.c.relayManager.handleDERPHomeChange(de.publicKey, regionID) + } } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index ad07003f7..ee0ee40ca 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -175,13 +175,15 @@ type Conn struct { // These [eventbus.Subscriber] fields are solely accessed by // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmapper.Mapping] - filterSub *eventbus.Subscriber[FilterUpdate] - nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] - nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] - syncSub *eventbus.Subscriber[syncPoint] - syncPub *eventbus.Publisher[syncPoint] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + pmSub *eventbus.Subscriber[portmapper.Mapping] + filterSub *eventbus.Subscriber[FilterUpdate] + nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] + nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] + syncSub *eventbus.Subscriber[syncPoint] + syncPub *eventbus.Publisher[syncPoint] + allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] + allocRelayEndpointSub *eventbus.Subscriber[UDPRelayAllocResp] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -271,6 +273,14 @@ type Conn struct { // captureHook, if non-nil, is the pcap logging callback when capturing. captureHook syncs.AtomicValue[packet.CaptureCallback] + // hasPeerRelayServers is whether [relayManager] is configured with at least + // one peer relay server via [relayManager.handleRelayServersSet]. It is + // only accessed by [Conn.updateRelayServersSet], [endpoint.setDERPHome], + // and [endpoint.discoverUDPRelayPathsLocked]. It exists to suppress + // calls into [relayManager] leading to wasted work involving channel + // operations and goroutine creation. + hasPeerRelayServers atomic.Bool + // discoPrivate is the private naclbox key used for active // discovery traffic. It is always present, and immutable. discoPrivate key.DiscoPrivate @@ -567,6 +577,36 @@ func (s syncPoint) Signal() { close(s) } +// UDPRelayAllocReq represents a [*disco.AllocateUDPRelayEndpointRequest] +// reception event. This is signaled over an [eventbus.Bus] from +// [magicsock.Conn] towards [relayserver.extension]. +type UDPRelayAllocReq struct { + // RxFromNodeKey is the unauthenticated (DERP server claimed src) node key + // of the transmitting party, noted at disco message reception time over + // DERP. This node key is unambiguously-aligned with RxFromDiscoKey being + // that the disco message is received over DERP. + RxFromNodeKey key.NodePublic + // RxFromDiscoKey is the disco key of the transmitting party, noted and + // authenticated at reception time. + RxFromDiscoKey key.DiscoPublic + // Message is the disco message. + Message *disco.AllocateUDPRelayEndpointRequest +} + +// UDPRelayAllocResp represents a [*disco.AllocateUDPRelayEndpointResponse] +// that is yet to be transmitted over DERP (or delivered locally if +// ReqRxFromNodeKey is self). This is signaled over an [eventbus.Bus] from +// [relayserver.extension] towards [magicsock.Conn]. +type UDPRelayAllocResp struct { + // ReqRxFromNodeKey is copied from [UDPRelayAllocReq.RxFromNodeKey]. It + // enables peer lookup leading up to transmission over DERP. + ReqRxFromNodeKey key.NodePublic + // ReqRxFromDiscoKey is copied from [UDPRelayAllocReq.RxFromDiscoKey]. + ReqRxFromDiscoKey key.DiscoPublic + // Message is the disco message. + Message *disco.AllocateUDPRelayEndpointResponse +} + // newConn is the error-free, network-listening-side-effect-free based // of NewConn. Mostly for tests. func newConn(logf logger.Logf) *Conn { @@ -625,10 +665,40 @@ func (c *Conn) consumeEventbusTopics() { case syncPoint := <-c.syncSub.Events(): c.dlogf("magicsock: received sync point after reconfig") syncPoint.Signal() + case allocResp := <-c.allocRelayEndpointSub.Events(): + c.onUDPRelayAllocResp(allocResp) } } } +func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { + c.mu.Lock() + defer c.mu.Unlock() + ep, ok := c.peerMap.endpointForNodeKey(allocResp.ReqRxFromNodeKey) + if !ok { + // If it's not a peer, it might be for self (we can peer relay through + // ourselves), in which case we want to hand it down to [relayManager] + // now versus taking a network round-trip through DERP. + selfNodeKey := c.publicKeyAtomic.Load() + if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && + allocResp.ReqRxFromDiscoKey.Compare(c.discoPublic) == 0 { + c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) + } + return + } + disco := ep.disco.Load() + if disco == nil { + return + } + if disco.key.Compare(allocResp.ReqRxFromDiscoKey) != 0 { + return + } + ep.mu.Lock() + defer ep.mu.Unlock() + derpAddr := ep.derpAddr + go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) +} + // Synchronize waits for all [eventbus] events published // prior to this call to be processed by the receiver. func (c *Conn) Synchronize() { @@ -670,6 +740,8 @@ func NewConn(opts Options) (*Conn, error) { c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) c.syncPub = eventbus.Publish[syncPoint](c.eventClient) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) + c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) c.subsDoneCh = make(chan struct{}) go c.consumeEventbusTopics() } @@ -1847,6 +1919,24 @@ func (v *virtualNetworkID) get() uint32 { return v._vni & vniGetMask } +// sendDiscoAllocateUDPRelayEndpointRequest is primarily an alias for +// sendDiscoMessage, but it will alternatively send m over the eventbus if dst +// is a DERP IP:port, and dstKey is self. This saves a round-trip through DERP +// when we are attempting to allocate on a self (in-process) peer relay server. +func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.NodePublic, dstDisco key.DiscoPublic, allocReq *disco.AllocateUDPRelayEndpointRequest, logLevel discoLogLevel) (sent bool, err error) { + isDERP := dst.ap.Addr() == tailcfg.DerpMagicIPAddr + selfNodeKey := c.publicKeyAtomic.Load() + if isDERP && dstKey.Compare(selfNodeKey) == 0 { + c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + RxFromNodeKey: selfNodeKey, + RxFromDiscoKey: c.discoPublic, + Message: allocReq, + }) + return true, nil + } + return c.sendDiscoMessage(dst, dstKey, dstDisco, allocReq, logLevel) +} + // sendDiscoMessage sends discovery message m to dstDisco at dst. // // If dst.ap is a DERP IP:port, then dstKey must be non-zero. @@ -2176,7 +2266,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsg(c, challenge, di, src) + c.relayManager.handleRxDiscoMsg(c, challenge, key.NodePublic{}, di.discoKey, src) return } @@ -2201,7 +2291,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake // If it's an unknown TxID, and it's Geneve-encapsulated, then // make [relayManager] aware. It might be in the middle of probing // src. - c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) + c.relayManager.handleRxDiscoMsg(c, dm, key.NodePublic{}, di.discoKey, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2276,7 +2366,95 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake len(cmm.MyNumber)) go ep.handleCallMeMaybe(cmm) } + case *disco.AllocateUDPRelayEndpointRequest, *disco.AllocateUDPRelayEndpointResponse: + var resp *disco.AllocateUDPRelayEndpointResponse + isResp := false + msgType := "AllocateUDPRelayEndpointRequest" + req, ok := dm.(*disco.AllocateUDPRelayEndpointRequest) + if ok { + metricRecvDiscoAllocUDPRelayEndpointRequest.Add(1) + } else { + metricRecvDiscoAllocUDPRelayEndpointResponse.Add(1) + resp = dm.(*disco.AllocateUDPRelayEndpointResponse) + msgType = "AllocateUDPRelayEndpointResponse" + isResp = true + } + + if !isDERP { + // These messages should only come via DERP. + c.logf("[unexpected] %s packets should only come via DERP", msgType) + return + } + nodeKey := derpNodeSrc + ep, ok := c.peerMap.endpointForNodeKey(nodeKey) + if !ok { + c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) + return + } + epDisco := ep.disco.Load() + if epDisco == nil { + return + } + if epDisco.key != di.discoKey { + if isResp { + metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco.Add(1) + } else { + metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco.Add(1) + } + c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) + return + } + if isResp { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, %d endpoints", + c.discoShort, epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + msgType, + len(resp.AddrPorts)) + c.relayManager.handleRxDiscoMsg(c, resp, nodeKey, di.discoKey, src) + return + } else if sender.Compare(req.ClientDisco[0]) != 0 && sender.Compare(req.ClientDisco[1]) != 0 { + // An allocation request must contain the sender's disco key in + // ClientDisco. One of the relay participants must be the sender. + c.logf("magicsock: disco: %s from %v; %v does not contain sender's disco key", + msgType, sender.ShortString(), derpNodeSrc.ShortString()) + return + } else { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, for %d<->%d", + c.discoShort, epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + msgType, + req.ClientDisco[0], req.ClientDisco[1]) + } + + if c.filt == nil { + return + } + // Binary search of peers is O(log n) while c.mu is held. + // TODO: We might be able to use ep.nodeAddr instead of all addresses, + // or we might be able to release c.mu before doing this work. Keep it + // simple and slow for now. c.peers.AsSlice is a copy. We may need to + // write our own binary search for a [views.Slice]. + peerI, ok := slices.BinarySearchFunc(c.peers.AsSlice(), ep.nodeID, func(peer tailcfg.NodeView, target tailcfg.NodeID) int { + if peer.ID() < target { + return -1 + } else if peer.ID() > target { + return 1 + } + return 0 + }) + if !ok { + // unexpected + return + } + if !nodeHasCap(c.filt, c.peers.At(peerI), c.self, tailcfg.PeerCapabilityRelay) { + return + } + c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + RxFromDiscoKey: sender, + RxFromNodeKey: nodeKey, + Message: req, + }) } return } @@ -2337,7 +2515,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // Geneve-encapsulated [disco.Ping] messages in the interest of // simplicity. It might be in the middle of probing src, so it must be // made aware. - c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) + c.relayManager.handleRxDiscoMsg(c, dm, key.NodePublic{}, di.discoKey, src) return } @@ -2687,7 +2865,7 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { // capVerIsRelayCapable returns true if version is relay client and server // capable, otherwise it returns false. func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { - return version >= 120 + return version >= 121 } // onFilterUpdate is called when a [FilterUpdate] is received over the @@ -2715,6 +2893,11 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // peers are passed as args (vs c.mu-guarded fields) to enable callers to // release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' // in filt for every peer 'n'). +// +// Calls to updateRelayServersSet must never run concurrent to +// [endpoint.setDERPHome], otherwise [candidatePeerRelay] DERP home changes may +// be missed from the perspective of [relayManager]. +// // TODO: Optimize this so that it's not O(m * n). This might involve: // 1. Changes to [filter.Filter], e.g. adding a CapsWithValues() to check for // a given capability instead of building and returning a map of all of @@ -2722,69 +2905,75 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // 2. Moving this work upstream into [nodeBackend] or similar, and publishing // the computed result over the eventbus instead. func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { - relayServers := make(set.Set[netip.AddrPort]) + relayServers := make(set.Set[candidatePeerRelay]) nodes := append(peers.AsSlice(), self) for _, maybeCandidate := range nodes { - peerAPI := peerAPIIfCandidateRelayServer(filt, self, maybeCandidate) - if peerAPI.IsValid() { - relayServers.Add(peerAPI) + if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { + // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, + // we skip it. If maybeCandidate happens to be self, then this check is + // unnecessary as self is always capable from this point (the statically + // compiled [tailcfg.CurrentCapabilityVersion]) forward. + continue + } + if !nodeHasCap(filt, maybeCandidate, self, tailcfg.PeerCapabilityRelayTarget) { + continue } + relayServers.Add(candidatePeerRelay{ + nodeKey: maybeCandidate.Key(), + discoKey: maybeCandidate.DiscoKey(), + derpHomeRegionID: uint16(maybeCandidate.HomeDERP()), + }) } c.relayManager.handleRelayServersSet(relayServers) + if len(relayServers) > 0 { + c.hasPeerRelayServers.Store(true) + } else { + c.hasPeerRelayServers.Store(false) + } } -// peerAPIIfCandidateRelayServer returns the peer API address of maybeCandidate -// if it is considered to be a candidate relay server upon evaluation against -// filt and self, otherwise it returns a zero value. self and maybeCandidate -// may be equal. -func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, maybeCandidate tailcfg.NodeView) netip.AddrPort { +// nodeHasCap returns true if src has cap on dst, otherwise it returns false. +func nodeHasCap(filt *filter.Filter, src, dst tailcfg.NodeView, cap tailcfg.PeerCapability) bool { if filt == nil || - !self.Valid() || - !maybeCandidate.Valid() || - !maybeCandidate.Hostinfo().Valid() { - return netip.AddrPort{} - } - if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { - // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, - // we skip it. If maybeCandidate happens to be self, then this check is - // unnecessary as self is always capable from this point (the statically - // compiled [tailcfg.CurrentCapabilityVersion]) forward. - return netip.AddrPort{} - } - for _, maybeCandidatePrefix := range maybeCandidate.Addresses().All() { - if !maybeCandidatePrefix.IsSingleIP() { + !src.Valid() || + !dst.Valid() { + return false + } + for _, srcPrefix := range src.Addresses().All() { + if !srcPrefix.IsSingleIP() { continue } - maybeCandidateAddr := maybeCandidatePrefix.Addr() - for _, selfPrefix := range self.Addresses().All() { - if !selfPrefix.IsSingleIP() { + srcAddr := srcPrefix.Addr() + for _, dstPrefix := range dst.Addresses().All() { + if !dstPrefix.IsSingleIP() { continue } - selfAddr := selfPrefix.Addr() - if selfAddr.BitLen() == maybeCandidateAddr.BitLen() { // same address family - if filt.CapsWithValues(maybeCandidateAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { - for _, s := range maybeCandidate.Hostinfo().Services().All() { - if maybeCandidateAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || - maybeCandidateAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { - return netip.AddrPortFrom(maybeCandidateAddr, s.Port) - } - } - return netip.AddrPort{} // no peerAPI - } else { - // [nodeBackend.peerCapsLocked] only returns/considers the - // [tailcfg.PeerCapMap] between the passed src and the - // _first_ host (/32 or /128) address for self. We are - // consistent with that behavior here. If self and - // maybeCandidate host addresses are of the same address - // family they either have the capability or not. We do not - // check against additional host addresses of the same - // address family. - return netip.AddrPort{} - } + dstAddr := dstPrefix.Addr() + if dstAddr.BitLen() == srcAddr.BitLen() { // same address family + // [nodeBackend.peerCapsLocked] only returns/considers the + // [tailcfg.PeerCapMap] between the passed src and the _first_ + // host (/32 or /128) address for self. We are consistent with + // that behavior here. If src and dst host addresses are of the + // same address family they either have the capability or not. + // We do not check against additional host addresses of the same + // address family. + return filt.CapsWithValues(srcAddr, dstAddr).HasCapability(cap) } } } - return netip.AddrPort{} + return false +} + +// candidatePeerRelay represents the identifiers and DERP home region ID for a +// peer relay server. +type candidatePeerRelay struct { + nodeKey key.NodePublic + discoKey key.DiscoPublic + derpHomeRegionID uint16 +} + +func (c *candidatePeerRelay) isValid() bool { + return !c.nodeKey.IsZero() && !c.discoKey.IsZero() } // onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the @@ -3792,18 +3981,22 @@ var ( metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") - metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") - metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") - metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") - metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") - metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") - metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") - metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") - metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") - metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") - metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") - metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") - metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") + metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") + metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") + metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") + metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") + metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") + metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") + metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") + metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") + metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") + metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request") + metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response") + metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") + metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has // changed from non-zero to a different non-zero. metricDERPHomeChange = clientmetric.NewCounter("derp_home_change") @@ -3985,6 +4178,22 @@ func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { } // PeerRelays returns the current set of candidate peer relays. -func (c *Conn) PeerRelays() set.Set[netip.AddrPort] { - return c.relayManager.getServers() +func (c *Conn) PeerRelays() set.Set[netip.Addr] { + candidatePeerRelays := c.relayManager.getServers() + servers := make(set.Set[netip.Addr], len(candidatePeerRelays)) + c.mu.Lock() + defer c.mu.Unlock() + for relay := range candidatePeerRelays { + pi, ok := c.peerMap.byNodeKey[relay.nodeKey] + if !ok { + if c.self.Key().Compare(relay.nodeKey) == 0 { + if c.self.Addresses().Len() > 0 { + servers.Add(c.self.Addresses().At(0).Addr()) + } + } + continue + } + servers.Add(pi.ep.nodeAddr) + } + return servers } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1d76e6c59..8a09df27d 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -19,7 +19,6 @@ import ( "net/http/httptest" "net/netip" "os" - "reflect" "runtime" "strconv" "strings" @@ -64,6 +63,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/ptr" + "tailscale.com/types/views" "tailscale.com/util/cibuild" "tailscale.com/util/eventbus" "tailscale.com/util/must" @@ -3384,61 +3384,72 @@ func Test_virtualNetworkID(t *testing.T) { } } -func Test_peerAPIIfCandidateRelayServer(t *testing.T) { - hostInfo := &tailcfg.Hostinfo{ - Services: []tailcfg.Service{ - { - Proto: tailcfg.PeerAPI4, - Port: 4, - }, - { - Proto: tailcfg.PeerAPI6, - Port: 6, - }, +func Test_looksLikeInitiationMsg(t *testing.T) { + initMsg := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) + initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) + tests := []struct { + name string + b []byte + want bool + }{ + { + name: "valid initiation", + b: initMsg, + want: true, + }, + { + name: "invalid message type field", + b: initMsgSizeTransportType, + want: false, + }, + { + name: "too small", + b: initMsg[:device.MessageInitiationSize-1], + want: false, + }, + { + name: "too big", + b: append(initMsg, 0), + want: false, }, } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := looksLikeInitiationMsg(tt.b); got != tt.want { + t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + } + }) + } +} - selfOnlyIPv4 := &tailcfg.Node{ +func Test_nodeHasCap(t *testing.T) { + nodeAOnlyIPv4 := &tailcfg.Node{ ID: 1, - // Intentionally set a value < 120 to verify the statically compiled - // [tailcfg.CurrentCapabilityVersion] is used when self is - // maybeCandidate. - Cap: 119, Addresses: []netip.Prefix{ netip.MustParsePrefix("1.1.1.1/32"), }, - Hostinfo: hostInfo.View(), } - selfOnlyIPv6 := selfOnlyIPv4.Clone() - selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + nodeBOnlyIPv6 := nodeAOnlyIPv4.Clone() + nodeBOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") - peerOnlyIPv4 := &tailcfg.Node{ - ID: 2, - Cap: 120, + nodeCOnlyIPv4 := &tailcfg.Node{ + ID: 2, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, - Hostinfo: hostInfo.View(), } - - peerOnlyIPv4NotCapable := peerOnlyIPv4.Clone() - peerOnlyIPv4NotCapable.Cap = 119 - - peerOnlyIPv6 := peerOnlyIPv4.Clone() - peerOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") - - peerOnlyIPv4ZeroCapVer := peerOnlyIPv4.Clone() - peerOnlyIPv4ZeroCapVer.Cap = 0 - - peerOnlyIPv4NilHostinfo := peerOnlyIPv4.Clone() - peerOnlyIPv4NilHostinfo.Hostinfo = tailcfg.HostinfoView{} + nodeDOnlyIPv6 := nodeCOnlyIPv4.Clone() + nodeDOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") tests := []struct { - name string - filt *filter.Filter - self tailcfg.NodeView - maybeCandidate tailcfg.NodeView - want netip.AddrPort + name string + filt *filter.Filter + src tailcfg.NodeView + dst tailcfg.NodeView + cap tailcfg.PeerCapability + want bool }{ { name: "match v4", @@ -3453,26 +3464,10 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4.View(), - want: netip.MustParseAddrPort("2.2.2.2:4"), - }, - { - name: "match v4 self", - filt: filter.New([]filtertype.Match{ - { - Srcs: []netip.Prefix{selfOnlyIPv4.Addresses[0]}, - Caps: []filtertype.CapMatch{ - { - Dst: selfOnlyIPv4.Addresses[0], - Cap: tailcfg.PeerCapabilityRelayTarget, - }, - }, - }, - }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: selfOnlyIPv4.View(), - want: netip.AddrPortFrom(selfOnlyIPv4.Addresses[0].Addr(), 4), + src: nodeCOnlyIPv4.View(), + dst: nodeAOnlyIPv4.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: true, }, { name: "match v6", @@ -3487,77 +3482,67 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: peerOnlyIPv6.View(), - want: netip.MustParseAddrPort("[::2]:6"), - }, - { - name: "match v6 self", - filt: filter.New([]filtertype.Match{ - { - Srcs: []netip.Prefix{selfOnlyIPv6.Addresses[0]}, - Caps: []filtertype.CapMatch{ - { - Dst: selfOnlyIPv6.Addresses[0], - Cap: tailcfg.PeerCapabilityRelayTarget, - }, - }, - }, - }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: selfOnlyIPv6.View(), - want: netip.AddrPortFrom(selfOnlyIPv6.Addresses[0].Addr(), 6), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: true, }, { - name: "peer incapable", + name: "no match CapMatch Dst", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: netip.MustParsePrefix("::3/128"), Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4NotCapable.View(), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, { - name: "no match dst", + name: "no match peer cap", filt: filter.New([]filtertype.Match{ { Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("::3/128"), - Cap: tailcfg.PeerCapabilityRelayTarget, + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityIngress, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: peerOnlyIPv6.View(), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, { - name: "no match peer cap", + name: "nil src", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("::1/128"), - Cap: tailcfg.PeerCapabilityIngress, + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: peerOnlyIPv6.View(), + src: tailcfg.NodeView{}, + dst: nodeAOnlyIPv4.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, { - name: "cap ver not relay capable", + name: "nil dst", filt: filter.New([]filtertype.Match{ { Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, @@ -3569,108 +3554,136 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: peerOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4ZeroCapVer.View(), + src: nodeCOnlyIPv4.View(), + dst: tailcfg.NodeView{}, + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, - { - name: "nil filt", - filt: nil, - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4.View(), + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := nodeHasCap(tt.filt, tt.src, tt.dst, tt.cap); got != tt.want { + t.Errorf("nodeHasCap() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConn_updateRelayServersSet(t *testing.T) { + peerNodeCandidateRelay := &tailcfg.Node{ + Cap: 121, + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), }, + HomeDERP: 1, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + peerNodeNotCandidateRelayCapVer := &tailcfg.Node{ + Cap: 120, // intentionally lower to fail capVer check + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + HomeDERP: 1, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + selfNode := &tailcfg.Node{ + Cap: 120, // intentionally lower than capVerIsRelayCapable to verify self check + ID: 2, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("2.2.2.2/32"), + }, + HomeDERP: 2, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + tests := []struct { + name string + filt *filter.Filter + self tailcfg.NodeView + peers views.Slice[tailcfg.NodeView] + wantRelayServers set.Set[candidatePeerRelay] + }{ { - name: "nil self", + name: "candidate relay server", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: peerNodeCandidateRelay.Addresses, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: selfNode.Addresses[0], Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: tailcfg.NodeView{}, - maybeCandidate: peerOnlyIPv4.View(), + self: selfNode.View(), + peers: views.SliceOf([]tailcfg.NodeView{peerNodeCandidateRelay.View()}), + wantRelayServers: set.SetOf([]candidatePeerRelay{ + { + nodeKey: peerNodeCandidateRelay.Key, + discoKey: peerNodeCandidateRelay.DiscoKey, + derpHomeRegionID: 1, + }, + }), }, { - name: "nil peer", + name: "self candidate relay server", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: selfNode.Addresses, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: selfNode.Addresses[0], Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: tailcfg.NodeView{}, + self: selfNode.View(), + peers: views.SliceOf([]tailcfg.NodeView{selfNode.View()}), + wantRelayServers: set.SetOf([]candidatePeerRelay{ + { + nodeKey: selfNode.Key, + discoKey: selfNode.DiscoKey, + derpHomeRegionID: 2, + }, + }), }, { - name: "nil peer hostinfo", + name: "no candidate relay server", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: peerNodeNotCandidateRelayCapVer.Addresses, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: selfNode.Addresses[0], Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4NilHostinfo.View(), + self: selfNode.View(), + peers: views.SliceOf([]tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}), + wantRelayServers: make(set.Set[candidatePeerRelay]), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.maybeCandidate); !reflect.DeepEqual(got, tt.want) { - t.Errorf("peerAPIIfCandidateRelayServer() = %v, want %v", got, tt.want) + c := &Conn{} + c.updateRelayServersSet(tt.filt, tt.self, tt.peers) + got := c.relayManager.getServers() + if !got.Equal(tt.wantRelayServers) { + t.Fatalf("got: %v != want: %v", got, tt.wantRelayServers) } - }) - } -} - -func Test_looksLikeInitiationMsg(t *testing.T) { - initMsg := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) - initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) - tests := []struct { - name string - b []byte - want bool - }{ - { - name: "valid initiation", - b: initMsg, - want: true, - }, - { - name: "invalid message type field", - b: initMsgSizeTransportType, - want: false, - }, - { - name: "too small", - b: initMsg[:device.MessageInitiationSize-1], - want: false, - }, - { - name: "too big", - b: append(initMsg, 0), - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := looksLikeInitiationMsg(tt.b); got != tt.want { - t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + if len(tt.wantRelayServers) > 0 != c.hasPeerRelayServers.Load() { + t.Fatalf("c.hasPeerRelayServers: %v != wantRelayServers: %v", c.hasPeerRelayServers.Load(), tt.wantRelayServers) } }) } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index d7acf80b5..ad8c5fc76 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -4,23 +4,18 @@ package magicsock import ( - "bytes" "context" - "encoding/json" "errors" - "fmt" - "io" - "net/http" "net/netip" - "strconv" "sync" "time" "tailscale.com/disco" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" + "tailscale.com/tailcfg" + "tailscale.com/tstime" "tailscale.com/types/key" - "tailscale.com/util/httpm" "tailscale.com/util/set" ) @@ -38,26 +33,28 @@ type relayManager struct { // =================================================================== // The following fields are owned by a single goroutine, runLoop(). - serversByAddrPort map[netip.AddrPort]key.DiscoPublic - serversByDisco map[key.DiscoPublic]netip.AddrPort - allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork - handshakeWorkByEndpointByServerDisco map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork - handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork - handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI - addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork - handshakeGeneration uint32 + serversByNodeKey map[key.NodePublic]candidatePeerRelay + allocWorkByCandidatePeerRelayByEndpoint map[*endpoint]map[candidatePeerRelay]*relayEndpointAllocWork + allocWorkByDiscoKeysByServerNodeKey map[key.NodePublic]map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork + handshakeWorkByServerDiscoByEndpoint map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork + handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork + handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI + addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork + handshakeGeneration uint32 + allocGeneration uint32 // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). - startDiscoveryCh chan endpointWithLastBest - allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent - handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent - cancelWorkCh chan *endpoint - newServerEndpointCh chan newRelayServerEndpointEvent - rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent - serversCh chan set.Set[netip.AddrPort] - getServersCh chan chan set.Set[netip.AddrPort] + startDiscoveryCh chan endpointWithLastBest + allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent + handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent + cancelWorkCh chan *endpoint + newServerEndpointCh chan newRelayServerEndpointEvent + rxDiscoMsgCh chan relayDiscoMsgEvent + serversCh chan set.Set[candidatePeerRelay] + getServersCh chan chan set.Set[candidatePeerRelay] + derpHomeChangeCh chan derpHomeChangeEvent discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -86,7 +83,7 @@ type relayHandshakeWork struct { // relayManager.handshakeWorkDoneCh if runLoop() can receive it. runLoop() // must select{} read on doneCh to prevent deadlock when attempting to write // to rxDiscoMsgCh. - rxDiscoMsgCh chan relayHandshakeDiscoMsgEvent + rxDiscoMsgCh chan relayDiscoMsgEvent doneCh chan relayEndpointHandshakeWorkDoneEvent ctx context.Context @@ -100,14 +97,15 @@ type relayHandshakeWork struct { type newRelayServerEndpointEvent struct { wlb endpointWithLastBest se udprelay.ServerEndpoint - server netip.AddrPort // zero value if learned via [disco.CallMeMaybeVia] + server candidatePeerRelay // zero value if learned via [disco.CallMeMaybeVia] } // relayEndpointAllocWorkDoneEvent indicates relay server endpoint allocation // work for an [*endpoint] has completed. This structure is immutable once // initialized. type relayEndpointAllocWorkDoneEvent struct { - work *relayEndpointAllocWork + work *relayEndpointAllocWork + allocated udprelay.ServerEndpoint // !allocated.ServerDisco.IsZero() if allocation succeeded } // relayEndpointHandshakeWorkDoneEvent indicates relay server endpoint handshake @@ -122,18 +120,42 @@ type relayEndpointHandshakeWorkDoneEvent struct { // hasActiveWorkRunLoop returns true if there is outstanding allocation or // handshaking work for any endpoint, otherwise it returns false. func (r *relayManager) hasActiveWorkRunLoop() bool { - return len(r.allocWorkByEndpoint) > 0 || len(r.handshakeWorkByEndpointByServerDisco) > 0 + return len(r.allocWorkByCandidatePeerRelayByEndpoint) > 0 || len(r.handshakeWorkByServerDiscoByEndpoint) > 0 } // hasActiveWorkForEndpointRunLoop returns true if there is outstanding // allocation or handshaking work for the provided endpoint, otherwise it // returns false. func (r *relayManager) hasActiveWorkForEndpointRunLoop(ep *endpoint) bool { - _, handshakeWork := r.handshakeWorkByEndpointByServerDisco[ep] - _, allocWork := r.allocWorkByEndpoint[ep] + _, handshakeWork := r.handshakeWorkByServerDiscoByEndpoint[ep] + _, allocWork := r.allocWorkByCandidatePeerRelayByEndpoint[ep] return handshakeWork || allocWork } +// derpHomeChangeEvent represents a change in the DERP home region for the +// node identified by nodeKey. This structure is immutable once initialized. +type derpHomeChangeEvent struct { + nodeKey key.NodePublic + regionID uint16 +} + +// handleDERPHomeChange handles a DERP home change event for nodeKey and +// regionID. +func (r *relayManager) handleDERPHomeChange(nodeKey key.NodePublic, regionID uint16) { + relayManagerInputEvent(r, nil, &r.derpHomeChangeCh, derpHomeChangeEvent{ + nodeKey: nodeKey, + regionID: regionID, + }) +} + +func (r *relayManager) handleDERPHomeChangeRunLoop(event derpHomeChangeEvent) { + c, ok := r.serversByNodeKey[event.nodeKey] + if ok { + c.derpHomeRegionID = event.regionID + r.serversByNodeKey[event.nodeKey] = c + } +} + // runLoop is a form of event loop. It ensures exclusive access to most of // [relayManager] state. func (r *relayManager) runLoop() { @@ -151,13 +173,7 @@ func (r *relayManager) runLoop() { return } case done := <-r.allocateWorkDoneCh: - work, ok := r.allocWorkByEndpoint[done.work.ep] - if ok && work == done.work { - // Verify the work in the map is the same as the one that we're - // cleaning up. New events on r.startDiscoveryCh can - // overwrite pre-existing keys. - delete(r.allocWorkByEndpoint, done.work.ep) - } + r.handleAllocWorkDoneRunLoop(done) if !r.hasActiveWorkRunLoop() { return } @@ -176,8 +192,8 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } - case discoMsgEvent := <-r.rxHandshakeDiscoMsgCh: - r.handleRxHandshakeDiscoMsgRunLoop(discoMsgEvent) + case discoMsgEvent := <-r.rxDiscoMsgCh: + r.handleRxDiscoMsgRunLoop(discoMsgEvent) if !r.hasActiveWorkRunLoop() { return } @@ -191,69 +207,77 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } + case derpHomeChange := <-r.derpHomeChangeCh: + r.handleDERPHomeChangeRunLoop(derpHomeChange) + if !r.hasActiveWorkRunLoop() { + return + } } } } -func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[netip.AddrPort]) { - servers := make(set.Set[netip.AddrPort], len(r.serversByAddrPort)) - for server := range r.serversByAddrPort { - servers.Add(server) +func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[candidatePeerRelay]) { + servers := make(set.Set[candidatePeerRelay], len(r.serversByNodeKey)) + for _, v := range r.serversByNodeKey { + servers.Add(v) } getServersCh <- servers } -func (r *relayManager) getServers() set.Set[netip.AddrPort] { - ch := make(chan set.Set[netip.AddrPort]) +func (r *relayManager) getServers() set.Set[candidatePeerRelay] { + ch := make(chan set.Set[candidatePeerRelay]) relayManagerInputEvent(r, nil, &r.getServersCh, ch) return <-ch } -func (r *relayManager) handleServersUpdateRunLoop(update set.Set[netip.AddrPort]) { - for k, v := range r.serversByAddrPort { - if !update.Contains(k) { - delete(r.serversByAddrPort, k) - delete(r.serversByDisco, v) +func (r *relayManager) handleServersUpdateRunLoop(update set.Set[candidatePeerRelay]) { + for _, v := range r.serversByNodeKey { + if !update.Contains(v) { + delete(r.serversByNodeKey, v.nodeKey) } } for _, v := range update.Slice() { - _, ok := r.serversByAddrPort[v] - if ok { - // don't zero known disco keys - continue - } - r.serversByAddrPort[v] = key.DiscoPublic{} + r.serversByNodeKey[v.nodeKey] = v } } -type relayHandshakeDiscoMsgEvent struct { - conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] - msg disco.Message - disco key.DiscoPublic - from netip.AddrPort - vni uint32 - at time.Time +type relayDiscoMsgEvent struct { + conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] + msg disco.Message + relayServerNodeKey key.NodePublic // nonzero if msg is a [*disco.AllocateUDPRelayEndpointResponse] + disco key.DiscoPublic + from netip.AddrPort + vni uint32 + at time.Time } // relayEndpointAllocWork serves to track in-progress relay endpoint allocation // for an [*endpoint]. This structure is immutable once initialized. type relayEndpointAllocWork struct { - // ep is the [*endpoint] associated with the work - ep *endpoint - // cancel() will signal all associated goroutines to return + wlb endpointWithLastBest + discoKeys key.SortedPairOfDiscoPublic + candidatePeerRelay candidatePeerRelay + + // allocateServerEndpoint() always writes to doneCh (len 1) when it + // returns. It may end up writing the same event afterward to + // [relayManager.allocateWorkDoneCh] if runLoop() can receive it. runLoop() + // must select{} read on doneCh to prevent deadlock when attempting to write + // to rxDiscoMsgCh. + rxDiscoMsgCh chan *disco.AllocateUDPRelayEndpointResponse + doneCh chan relayEndpointAllocWorkDoneEvent + + ctx context.Context cancel context.CancelFunc - // wg.Wait() will return once all associated goroutines have returned - wg *sync.WaitGroup } // init initializes [relayManager] if it is not already initialized. func (r *relayManager) init() { r.initOnce.Do(func() { r.discoInfoByServerDisco = make(map[key.DiscoPublic]*relayHandshakeDiscoInfo) - r.serversByDisco = make(map[key.DiscoPublic]netip.AddrPort) - r.serversByAddrPort = make(map[netip.AddrPort]key.DiscoPublic) - r.allocWorkByEndpoint = make(map[*endpoint]*relayEndpointAllocWork) - r.handshakeWorkByEndpointByServerDisco = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) + r.serversByNodeKey = make(map[key.NodePublic]candidatePeerRelay) + r.allocWorkByCandidatePeerRelayByEndpoint = make(map[*endpoint]map[candidatePeerRelay]*relayEndpointAllocWork) + r.allocWorkByDiscoKeysByServerNodeKey = make(map[key.NodePublic]map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork) + r.handshakeWorkByServerDiscoByEndpoint = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) r.handshakeWorkAwaitingPong = make(map[*relayHandshakeWork]addrPortVNI) r.addrPortVNIToHandshakeWork = make(map[addrPortVNI]*relayHandshakeWork) @@ -262,9 +286,10 @@ func (r *relayManager) init() { r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) - r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) - r.serversCh = make(chan set.Set[netip.AddrPort]) - r.getServersCh = make(chan chan set.Set[netip.AddrPort]) + r.rxDiscoMsgCh = make(chan relayDiscoMsgEvent) + r.serversCh = make(chan set.Set[candidatePeerRelay]) + r.getServersCh = make(chan chan set.Set[candidatePeerRelay]) + r.derpHomeChangeCh = make(chan derpHomeChangeEvent) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) @@ -330,6 +355,7 @@ func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool, dm *disco.CallMeMaybeVia) { se := udprelay.ServerEndpoint{ ServerDisco: dm.ServerDisco, + ClientDisco: dm.ClientDisco, LamportID: dm.LamportID, AddrPorts: dm.AddrPorts, VNI: dm.VNI, @@ -346,14 +372,25 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, }) } -// handleGeneveEncapDiscoMsg handles reception of Geneve-encapsulated disco -// messages. -func (r *relayManager) handleGeneveEncapDiscoMsg(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { - relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{conn: conn, msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) +// handleRxDiscoMsg handles reception of disco messages that [relayManager] +// may be interested in. This includes all Geneve-encapsulated disco messages +// and [*disco.AllocateUDPRelayEndpointResponse]. If dm is a +// [*disco.AllocateUDPRelayEndpointResponse] then relayServerNodeKey must be +// nonzero. +func (r *relayManager) handleRxDiscoMsg(conn *Conn, dm disco.Message, relayServerNodeKey key.NodePublic, discoKey key.DiscoPublic, src epAddr) { + relayManagerInputEvent(r, nil, &r.rxDiscoMsgCh, relayDiscoMsgEvent{ + conn: conn, + msg: dm, + relayServerNodeKey: relayServerNodeKey, + disco: discoKey, + from: src.ap, + vni: src.vni.get(), + at: time.Now(), + }) } // handleRelayServersSet handles an update of the complete relay server set. -func (r *relayManager) handleRelayServersSet(servers set.Set[netip.AddrPort]) { +func (r *relayManager) handleRelayServersSet(servers set.Set[candidatePeerRelay]) { relayManagerInputEvent(r, nil, &r.serversCh, servers) } @@ -396,7 +433,11 @@ type endpointWithLastBest struct { // startUDPRelayPathDiscoveryFor starts UDP relay path discovery for ep on all // known relay servers if ep has no in-progress work. func (r *relayManager) startUDPRelayPathDiscoveryFor(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool) { - relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ep, lastBest, lastBestIsTrusted}) + relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ + ep: ep, + lastBest: lastBest, + lastBestIsTrusted: lastBestIsTrusted, + }) } // stopWork stops all outstanding allocation & handshaking work for 'ep'. @@ -407,13 +448,15 @@ func (r *relayManager) stopWork(ep *endpoint) { // stopWorkRunLoop cancels & clears outstanding allocation and handshaking // work for 'ep'. func (r *relayManager) stopWorkRunLoop(ep *endpoint) { - allocWork, ok := r.allocWorkByEndpoint[ep] + byDiscoKeys, ok := r.allocWorkByCandidatePeerRelayByEndpoint[ep] if ok { - allocWork.cancel() - allocWork.wg.Wait() - delete(r.allocWorkByEndpoint, ep) + for _, work := range byDiscoKeys { + work.cancel() + done := <-work.doneCh + r.handleAllocWorkDoneRunLoop(done) + } } - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[ep] if ok { for _, handshakeWork := range byServerDisco { handshakeWork.cancel() @@ -430,13 +473,33 @@ type addrPortVNI struct { vni uint32 } -func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDiscoMsgEvent) { +func (r *relayManager) handleRxDiscoMsgRunLoop(event relayDiscoMsgEvent) { var ( work *relayHandshakeWork ok bool ) apv := addrPortVNI{event.from, event.vni} switch msg := event.msg.(type) { + case *disco.AllocateUDPRelayEndpointResponse: + sorted := key.NewSortedPairOfDiscoPublic(msg.ClientDisco[0], msg.ClientDisco[1]) + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[event.relayServerNodeKey] + if !ok { + // No outstanding work tied to this relay sever, discard. + return + } + allocWork, ok := byDiscoKeys[sorted] + if !ok { + // No outstanding work tied to these disco keys, discard. + return + } + select { + case done := <-allocWork.doneCh: + // allocateServerEndpoint returned, clean up its state + r.handleAllocWorkDoneRunLoop(done) + return + case allocWork.rxDiscoMsgCh <- msg: + return + } case *disco.BindUDPRelayEndpointChallenge: work, ok = r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{event.disco, event.vni}] if !ok { @@ -504,8 +567,39 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc } } +func (r *relayManager) handleAllocWorkDoneRunLoop(done relayEndpointAllocWorkDoneEvent) { + byCandidatePeerRelay, ok := r.allocWorkByCandidatePeerRelayByEndpoint[done.work.wlb.ep] + if !ok { + return + } + work, ok := byCandidatePeerRelay[done.work.candidatePeerRelay] + if !ok || work != done.work { + return + } + delete(byCandidatePeerRelay, done.work.candidatePeerRelay) + if len(byCandidatePeerRelay) == 0 { + delete(r.allocWorkByCandidatePeerRelayByEndpoint, done.work.wlb.ep) + } + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[done.work.candidatePeerRelay.nodeKey] + if !ok { + // unexpected + return + } + delete(byDiscoKeys, done.work.discoKeys) + if len(byDiscoKeys) == 0 { + delete(r.allocWorkByDiscoKeysByServerNodeKey, done.work.candidatePeerRelay.nodeKey) + } + if !done.allocated.ServerDisco.IsZero() { + r.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: done.work.wlb, + se: done.allocated, + server: done.work.candidatePeerRelay, + }) + } +} + func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshakeWorkDoneEvent) { - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.wlb.ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[done.work.wlb.ep] if !ok { return } @@ -515,7 +609,7 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak } delete(byServerDisco, done.work.se.ServerDisco) if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, done.work.wlb.ep) + delete(r.handshakeWorkByServerDiscoByEndpoint, done.work.wlb.ep) } delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) apv, ok := r.handshakeWorkAwaitingPong[work] @@ -562,7 +656,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } // Check for duplicate work by [*endpoint] + server disco. - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] if ok { existingWork, ok := byServerDisco[newServerEndpoint.se.ServerDisco] if ok { @@ -580,33 +674,9 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay // We're now reasonably sure we're dealing with the latest // [udprelay.ServerEndpoint] from a server event order perspective - // (LamportID). Update server disco key tracking if appropriate. - if newServerEndpoint.server.IsValid() { - serverDisco, ok := r.serversByAddrPort[newServerEndpoint.server] - if !ok { - // Allocation raced with an update to our known servers set. This - // server is no longer known. Return early. - return - } - if serverDisco.Compare(newServerEndpoint.se.ServerDisco) != 0 { - // The server's disco key has either changed, or simply become - // known for the first time. In the former case we end up detaching - // any in-progress handshake work from a "known" relay server. - // Practically speaking we expect the detached work to fail - // if the server key did in fact change (server restart) while we - // were attempting to handshake with it. It is possible, though - // unlikely, for a server addr:port to effectively move between - // nodes. Either way, there is no harm in detaching existing work, - // and we explicitly let that happen for the rare case the detached - // handshake would complete and remain functional. - delete(r.serversByDisco, serverDisco) - delete(r.serversByAddrPort, newServerEndpoint.server) - r.serversByDisco[serverDisco] = newServerEndpoint.server - r.serversByAddrPort[newServerEndpoint.server] = serverDisco - } - } + // (LamportID). - if newServerEndpoint.server.IsValid() { + if newServerEndpoint.server.isValid() { // Send a [disco.CallMeMaybeVia] to the remote peer if we allocated this // endpoint, regardless of if we start a handshake below. go r.sendCallMeMaybeVia(newServerEndpoint.wlb.ep, newServerEndpoint.se) @@ -641,14 +711,14 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay work := &relayHandshakeWork{ wlb: newServerEndpoint.wlb, se: newServerEndpoint.se, - rxDiscoMsgCh: make(chan relayHandshakeDiscoMsgEvent), + rxDiscoMsgCh: make(chan relayDiscoMsgEvent), doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), ctx: ctx, cancel: cancel, } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) - r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] = byServerDisco + r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] = byServerDisco } byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work @@ -674,12 +744,15 @@ func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoi return } callMeMaybeVia := &disco.CallMeMaybeVia{ - ServerDisco: se.ServerDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, } ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) } @@ -800,7 +873,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat // one. // // We don't need to TX a pong, that was already handled for us - // in handleRxHandshakeDiscoMsgRunLoop(). + // in handleRxDiscoMsgRunLoop(). txPing(msgEvent.from, nil) case *disco.Pong: at, ok := sentPingAt[msg.TxID] @@ -823,104 +896,113 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat } } -func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { - if len(r.serversByAddrPort) == 0 { - return - } - ctx, cancel := context.WithCancel(context.Background()) - started := &relayEndpointAllocWork{ep: wlb.ep, cancel: cancel, wg: &sync.WaitGroup{}} - for k := range r.serversByAddrPort { - started.wg.Add(1) - go r.allocateSingleServer(ctx, started.wg, k, wlb) - } - r.allocWorkByEndpoint[wlb.ep] = started - go func() { - started.wg.Wait() - relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) - // cleanup context cancellation must come after the - // relayManagerInputEvent call, otherwise it returns early without - // writing the event to runLoop(). - started.cancel() - }() -} +const allocateUDPRelayEndpointRequestTimeout = time.Second * 10 -type errNotReady struct{ retryAfter time.Duration } +func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, generation uint32) { + done := relayEndpointAllocWorkDoneEvent{work: work} -func (e errNotReady) Error() string { - return fmt.Sprintf("server not ready, retry after %v", e.retryAfter) -} + defer func() { + work.doneCh <- done + relayManagerInputEvent(r, work.ctx, &r.allocateWorkDoneCh, done) + work.cancel() + }() -const reqTimeout = time.Second * 10 + dm := &disco.AllocateUDPRelayEndpointRequest{ + ClientDisco: work.discoKeys.Get(), + Generation: generation, + } + + sendAllocReq := func() { + work.wlb.ep.c.sendDiscoAllocateUDPRelayEndpointRequest( + epAddr{ + ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, work.candidatePeerRelay.derpHomeRegionID), + }, + work.candidatePeerRelay.nodeKey, + work.candidatePeerRelay.discoKey, + dm, + discoVerboseLog, + ) + } + go sendAllocReq() + + returnAfterTimer := time.NewTimer(allocateUDPRelayEndpointRequestTimeout) + defer returnAfterTimer.Stop() + // While connections to DERP are over TCP, they can be lossy on the DERP + // server when data moves between the two independent streams. Also, the + // peer relay server may not be "ready" (see [tailscale.com/net/udprelay.ErrServerNotReady]). + // So, start a timer to retry once if needed. + retryAfterTimer := time.NewTimer(udprelay.ServerRetryAfter) + defer retryAfterTimer.Stop() -func doAllocate(ctx context.Context, server netip.AddrPort, discoKeys [2]key.DiscoPublic) (udprelay.ServerEndpoint, error) { - var reqBody bytes.Buffer - type allocateRelayEndpointReq struct { - DiscoKeys []key.DiscoPublic - } - a := &allocateRelayEndpointReq{ - DiscoKeys: []key.DiscoPublic{discoKeys[0], discoKeys[1]}, - } - err := json.NewEncoder(&reqBody).Encode(a) - if err != nil { - return udprelay.ServerEndpoint{}, err - } - reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) - defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &reqBody) - if err != nil { - return udprelay.ServerEndpoint{}, err - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return udprelay.ServerEndpoint{}, err - } - defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - var se udprelay.ServerEndpoint - err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) - return se, err - case http.StatusServiceUnavailable: - raHeader := resp.Header.Get("Retry-After") - raSeconds, err := strconv.ParseUint(raHeader, 10, 32) - if err == nil { - return udprelay.ServerEndpoint{}, errNotReady{retryAfter: time.Second * time.Duration(raSeconds)} + for { + select { + case <-work.ctx.Done(): + return + case <-returnAfterTimer.C: + return + case <-retryAfterTimer.C: + go sendAllocReq() + case resp := <-work.rxDiscoMsgCh: + if resp.Generation != generation || + !work.discoKeys.Equal(key.NewSortedPairOfDiscoPublic(resp.ClientDisco[0], resp.ClientDisco[1])) { + continue + } + done.allocated = udprelay.ServerEndpoint{ + ServerDisco: resp.ServerDisco, + ClientDisco: resp.ClientDisco, + LamportID: resp.LamportID, + AddrPorts: resp.AddrPorts, + VNI: resp.VNI, + BindLifetime: tstime.GoDuration{Duration: resp.BindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: resp.SteadyStateLifetime}, + } + return } - fallthrough - default: - return udprelay.ServerEndpoint{}, fmt.Errorf("non-200 status: %d", resp.StatusCode) } } -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, wlb endpointWithLastBest) { - // TODO(jwhited): introduce client metrics counters for notable failures - defer wg.Done() +func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { + if len(r.serversByNodeKey) == 0 { + return + } remoteDisco := wlb.ep.disco.Load() if remoteDisco == nil { return } - firstTry := true - for { - se, err := doAllocate(ctx, server, [2]key.DiscoPublic{wlb.ep.c.discoPublic, remoteDisco.key}) - if err == nil { - relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - wlb: wlb, - se: se, - server: server, // we allocated this endpoint (vs CallMeMaybeVia reception), mark it as such - }) - return - } - wlb.ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, wlb.ep.discoShort(), err) - var notReady errNotReady - if firstTry && errors.As(err, ¬Ready) { - select { - case <-ctx.Done(): - return - case <-time.After(min(notReady.retryAfter, reqTimeout)): - firstTry = false + discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoPublic, remoteDisco.key) + for _, v := range r.serversByNodeKey { + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] + if !ok { + byDiscoKeys = make(map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork) + r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] = byDiscoKeys + } else { + _, ok = byDiscoKeys[discoKeys] + if ok { + // If there is an existing key, a disco key collision may have + // occurred across peers ([*endpoint]). Do not overwrite the + // existing work, let it finish. + wlb.ep.c.logf("[unexpected] magicsock: relayManager: suspected disco key collision on server %v for keys: %v", v.nodeKey.ShortString(), discoKeys) continue } } - return + ctx, cancel := context.WithCancel(context.Background()) + started := &relayEndpointAllocWork{ + wlb: wlb, + discoKeys: discoKeys, + candidatePeerRelay: v, + rxDiscoMsgCh: make(chan *disco.AllocateUDPRelayEndpointResponse), + doneCh: make(chan relayEndpointAllocWorkDoneEvent, 1), + ctx: ctx, + cancel: cancel, + } + byDiscoKeys[discoKeys] = started + byCandidatePeerRelay, ok := r.allocWorkByCandidatePeerRelayByEndpoint[wlb.ep] + if !ok { + byCandidatePeerRelay = make(map[candidatePeerRelay]*relayEndpointAllocWork) + r.allocWorkByCandidatePeerRelayByEndpoint[wlb.ep] = byCandidatePeerRelay + } + byCandidatePeerRelay[v] = started + r.allocGeneration++ + go r.allocateServerEndpoint(started, r.allocGeneration) } } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 01f9258ad..e4891f567 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -4,7 +4,6 @@ package magicsock import ( - "net/netip" "testing" "tailscale.com/disco" @@ -22,26 +21,57 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) + rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + rm.handleRxDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleRelayServersSet(make(set.Set[netip.AddrPort])) + rm.handleRelayServersSet(make(set.Set[candidatePeerRelay])) <-rm.runLoopStoppedCh rm = relayManager{} rm.getServers() <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleDERPHomeChange(key.NodePublic{}, 1) + <-rm.runLoopStoppedCh +} + +func TestRelayManagerHandleDERPHomeChange(t *testing.T) { + rm := relayManager{} + servers := make(set.Set[candidatePeerRelay], 1) + c := candidatePeerRelay{ + nodeKey: key.NewNode().Public(), + discoKey: key.NewDisco().Public(), + derpHomeRegionID: 1, + } + servers.Add(c) + rm.handleRelayServersSet(servers) + want := c + want.derpHomeRegionID = 2 + rm.handleDERPHomeChange(c.nodeKey, 2) + got := rm.getServers() + if len(got) != 1 { + t.Fatalf("got %d servers, want 1", len(got)) + } + _, ok := got[want] + if !ok { + t.Fatal("DERP home change failed to propagate") + } } func TestRelayManagerGetServers(t *testing.T) { rm := relayManager{} - servers := make(set.Set[netip.AddrPort], 1) - servers.Add(netip.MustParseAddrPort("192.0.2.1:7")) + servers := make(set.Set[candidatePeerRelay], 1) + c := candidatePeerRelay{ + nodeKey: key.NewNode().Public(), + discoKey: key.NewDisco().Public(), + } + servers.Add(c) rm.handleRelayServersSet(servers) got := rm.getServers() if !servers.Equal(got) { From 0d03a3746a0229fe749b94b1d60491de64b135cd Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 21 Jul 2025 10:35:53 -0700 Subject: [PATCH 1119/1708] feature/tpm: log errors on the initial info fetch (#16574) This function is behind a sync.Once so we should only see errors at startup. In particular the error from `open` is useful to diagnose why TPM might not be accessible. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 9499ed02a..0260cca58 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -44,8 +44,10 @@ func init() { func info() *tailcfg.TPMInfo { tpm, err := open() if err != nil { + log.Printf("TPM: error opening: %v", err) return nil } + log.Printf("TPM: successfully opened") defer tpm.Close() info := new(tailcfg.TPMInfo) @@ -74,10 +76,12 @@ func info() *tailcfg.TPMInfo { PropertyCount: 1, }.Execute(tpm) if err != nil { + log.Printf("TPM: GetCapability %v: %v", cap.prop, err) continue } props, err := resp.CapabilityData.Data.TPMProperties() if err != nil { + log.Printf("TPM: GetCapability %v: %v", cap.prop, err) continue } if len(props.TPMProperty) == 0 { From c989824aac0df05b00275ae8911b7bbf26797d9d Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 21 Jul 2025 19:06:36 +0100 Subject: [PATCH 1120/1708] cmd/k8s-operator: Allow specifying cluster ips for nameservers (#16477) This commit modifies the kubernetes operator's `DNSConfig` resource with the addition of a new field at `nameserver.service.clusterIP`. This field allows users to specify a static in-cluster IP address of the nameserver when deployed. Fixes #14305 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 9 +- .../deploy/manifests/operator.yaml | 9 +- cmd/k8s-operator/nameserver.go | 9 +- cmd/k8s-operator/nameserver_test.go | 177 +++++++++++------- k8s-operator/api.md | 19 +- .../apis/v1alpha1/types_tsdnsconfig.go | 11 +- .../apis/v1alpha1/zz_generated.deepcopy.go | 20 ++ 7 files changed, 179 insertions(+), 75 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 268d978c1..bffad47f9 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -101,6 +101,13 @@ spec: tag: description: Tag defaults to unstable. type: string + service: + description: Service configuration. + type: object + properties: + clusterIP: + description: ClusterIP sets the static IP of the service used by the nameserver. + type: string status: description: |- Status describes the status of the DNSConfig. This is set @@ -172,7 +179,7 @@ spec: ip: description: |- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - Currently you must manually update your cluster DNS config to add + Currently, you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index ac8143e98..175f2a7fb 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -389,6 +389,13 @@ spec: description: Tag defaults to unstable. type: string type: object + service: + description: Service configuration. + properties: + clusterIP: + description: ClusterIP sets the static IP of the service used by the nameserver. + type: string + type: object type: object required: - nameserver @@ -462,7 +469,7 @@ spec: ip: description: |- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - Currently you must manually update your cluster DNS config to add + Currently, you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 20d66f7d0..983a28c91 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -7,14 +7,13 @@ package main import ( "context" + _ "embed" "errors" "fmt" "slices" "strings" "sync" - _ "embed" - "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -183,6 +182,10 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" { dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag } + if tsDNSCfg.Spec.Nameserver.Service != nil { + dCfg.clusterIP = tsDNSCfg.Spec.Nameserver.Service.ClusterIP + } + for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { return fmt.Errorf("error reconciling %s: %w", deployable.kind, err) @@ -213,6 +216,7 @@ type deployConfig struct { labels map[string]string ownerRefs []metav1.OwnerReference namespace string + clusterIP string } var ( @@ -267,6 +271,7 @@ var ( svc.ObjectMeta.Labels = cfg.labels svc.ObjectMeta.OwnerReferences = cfg.ownerRefs svc.ObjectMeta.Namespace = cfg.namespace + svc.Spec.ClusterIP = cfg.clusterIP _, err := createOrUpdate[corev1.Service](ctx, kubeClient, cfg.namespace, svc, func(*corev1.Service) {}) return err }, diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index cec95b84e..55a998ac3 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -26,7 +26,7 @@ import ( ) func TestNameserverReconciler(t *testing.T) { - dnsCfg := &tsapi.DNSConfig{ + dnsConfig := &tsapi.DNSConfig{ TypeMeta: metav1.TypeMeta{Kind: "DNSConfig", APIVersion: "tailscale.com/v1alpha1"}, ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -37,91 +37,130 @@ func TestNameserverReconciler(t *testing.T) { Repo: "test", Tag: "v0.0.1", }, + Service: &tsapi.NameserverService{ + ClusterIP: "5.4.3.2", + }, }, }, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(dnsCfg). - WithStatusSubresource(dnsCfg). + WithObjects(dnsConfig). + WithStatusSubresource(dnsConfig). Build() - zl, err := zap.NewDevelopment() + + logger, err := zap.NewDevelopment() if err != nil { t.Fatal(err) } - cl := tstest.NewClock(tstest.ClockOpts{}) - nr := &NameserverReconciler{ + + clock := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &NameserverReconciler{ Client: fc, - clock: cl, - logger: zl.Sugar(), - tsNamespace: "tailscale", + clock: clock, + logger: logger.Sugar(), + tsNamespace: tsNamespace, } - expectReconciled(t, nr, "", "test") - // Verify that nameserver Deployment has been created and has the expected fields. - wantsDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: "tailscale"}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} - if err := yaml.Unmarshal(deployYaml, wantsDeploy); err != nil { - t.Fatalf("unmarshalling yaml: %v", err) - } - dnsCfgOwnerRef := metav1.NewControllerRef(dnsCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig")) - wantsDeploy.OwnerReferences = []metav1.OwnerReference{*dnsCfgOwnerRef} - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" - wantsDeploy.Namespace = "tailscale" - labels := nameserverResourceLabels("test", "tailscale") - wantsDeploy.ObjectMeta.Labels = labels - expectEqual(t, fc, wantsDeploy) - - // Verify that DNSConfig advertizes the nameserver's Service IP address, - // has the ready status condition and tailscale finalizer. - mustUpdate(t, fc, "tailscale", "nameserver", func(svc *corev1.Service) { - svc.Spec.ClusterIP = "1.2.3.4" + expectReconciled(t, reconciler, "", "test") + + ownerReference := metav1.NewControllerRef(dnsConfig, tsapi.SchemeGroupVersion.WithKind("DNSConfig")) + nameserverLabels := nameserverResourceLabels(dnsConfig.Name, tsNamespace) + + wantsDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: tsNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} + t.Run("deployment has expected fields", func(t *testing.T) { + if err = yaml.Unmarshal(deployYaml, wantsDeploy); err != nil { + t.Fatalf("unmarshalling yaml: %v", err) + } + wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" + wantsDeploy.Namespace = tsNamespace + wantsDeploy.ObjectMeta.Labels = nameserverLabels + expectEqual(t, fc, wantsDeploy) }) - expectReconciled(t, nr, "", "test") - dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{ - IP: "1.2.3.4", - } - dnsCfg.Finalizers = []string{FinalizerName} - dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ - Type: string(tsapi.NameserverReady), - Status: metav1.ConditionTrue, - Reason: reasonNameserverCreated, - Message: reasonNameserverCreated, - LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + + wantsSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: tsNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: corev1.SchemeGroupVersion.Identifier()}} + t.Run("service has expected fields", func(t *testing.T) { + if err = yaml.Unmarshal(svcYaml, wantsSvc); err != nil { + t.Fatalf("unmarshalling yaml: %v", err) + } + wantsSvc.Spec.ClusterIP = dnsConfig.Spec.Nameserver.Service.ClusterIP + wantsSvc.OwnerReferences = []metav1.OwnerReference{*ownerReference} + wantsSvc.Namespace = tsNamespace + wantsSvc.ObjectMeta.Labels = nameserverLabels + expectEqual(t, fc, wantsSvc) }) - expectEqual(t, fc, dnsCfg) - // // Verify that nameserver image gets updated to match DNSConfig spec. - mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { - dnsCfg.Spec.Nameserver.Image.Tag = "v0.0.2" + t.Run("dns config status is set", func(t *testing.T) { + // Verify that DNSConfig advertizes the nameserver's Service IP address, + // has the ready status condition and tailscale finalizer. + mustUpdate(t, fc, "tailscale", "nameserver", func(svc *corev1.Service) { + svc.Spec.ClusterIP = "1.2.3.4" + }) + expectReconciled(t, reconciler, "", "test") + + dnsConfig.Finalizers = []string{FinalizerName} + dnsConfig.Status.Nameserver = &tsapi.NameserverStatus{ + IP: "1.2.3.4", + } + dnsConfig.Status.Conditions = append(dnsConfig.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + Reason: reasonNameserverCreated, + Message: reasonNameserverCreated, + LastTransitionTime: metav1.Time{Time: clock.Now().Truncate(time.Second)}, + }) + + expectEqual(t, fc, dnsConfig) }) - expectReconciled(t, nr, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" - expectEqual(t, fc, wantsDeploy) - - // Verify that when another actor sets ConfigMap data, it does not get - // overwritten by nameserver reconciler. - dnsRecords := &operatorutils.Records{Version: "v1alpha1", IP4: map[string][]string{"foo.ts.net": {"1.2.3.4"}}} - bs, err := json.Marshal(dnsRecords) - if err != nil { - t.Fatalf("error marshalling ConfigMap contents: %v", err) - } - mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) { - mak.Set(&cm.Data, "records.json", string(bs)) + + t.Run("nameserver image can be updated", func(t *testing.T) { + // Verify that nameserver image gets updated to match DNSConfig spec. + mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { + dnsCfg.Spec.Nameserver.Image.Tag = "v0.0.2" + }) + expectReconciled(t, reconciler, "", "test") + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" + expectEqual(t, fc, wantsDeploy) + }) + + t.Run("reconciler does not overwrite custom configuration", func(t *testing.T) { + // Verify that when another actor sets ConfigMap data, it does not get + // overwritten by nameserver reconciler. + dnsRecords := &operatorutils.Records{Version: "v1alpha1", IP4: map[string][]string{"foo.ts.net": {"1.2.3.4"}}} + bs, err := json.Marshal(dnsRecords) + if err != nil { + t.Fatalf("error marshalling ConfigMap contents: %v", err) + } + + mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) { + mak.Set(&cm.Data, "records.json", string(bs)) + }) + + expectReconciled(t, reconciler, "", "test") + + wantCm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dnsrecords", + Namespace: "tailscale", + Labels: nameserverLabels, + OwnerReferences: []metav1.OwnerReference{*ownerReference}, + }, + TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, + Data: map[string]string{"records.json": string(bs)}, + } + + expectEqual(t, fc, wantCm) }) - expectReconciled(t, nr, "", "test") - wantCm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dnsrecords", - Namespace: "tailscale", Labels: labels, OwnerReferences: []metav1.OwnerReference{*dnsCfgOwnerRef}}, - TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, - Data: map[string]string{"records.json": string(bs)}, - } - expectEqual(t, fc, wantCm) - // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, - // the nameserver image defaults to tailscale/k8s-nameserver:unstable. - mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { - dnsCfg.Spec.Nameserver.Image = nil + t.Run("uses default nameserver image", func(t *testing.T) { + // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, + // the nameserver image defaults to tailscale/k8s-nameserver:unstable. + mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { + dnsCfg.Spec.Nameserver.Image = nil + }) + expectReconciled(t, reconciler, "", "test") + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" + expectEqual(t, fc, wantsDeploy) }) - expectReconciled(t, nr, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" - expectEqual(t, fc, wantsDeploy) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index cd36798d6..564c87f50 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -422,6 +422,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | +| `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | #### NameserverImage @@ -441,6 +442,22 @@ _Appears in:_ | `tag` _string_ | Tag defaults to unstable. | | | +#### NameserverService + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `clusterIP` _string_ | ClusterIP sets the static IP of the service used by the nameserver. | | | + + #### NameserverStatus @@ -454,7 +471,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
                Currently you must manually update your cluster DNS config to add
                this address as a stub nameserver for ts.net for cluster workloads to be
                able to resolve MagicDNS names associated with egress or Ingress
                proxies.
                The IP address will change if you delete and recreate the DNSConfig. | | | +| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
                Currently, you must manually update your cluster DNS config to add
                this address as a stub nameserver for ts.net for cluster workloads to be
                able to resolve MagicDNS names associated with egress or Ingress
                proxies.
                The IP address will change if you delete and recreate the DNSConfig. | | | #### NodePortConfig diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0178d60ea..0e26ee647 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -82,6 +82,9 @@ type Nameserver struct { // Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. // +optional Image *NameserverImage `json:"image,omitempty"` + // Service configuration. + // +optional + Service *NameserverService `json:"service,omitempty"` } type NameserverImage struct { @@ -93,6 +96,12 @@ type NameserverImage struct { Tag string `json:"tag,omitempty"` } +type NameserverService struct { + // ClusterIP sets the static IP of the service used by the nameserver. + // +optional + ClusterIP string `json:"clusterIP,omitempty"` +} + type DNSConfigStatus struct { // +listType=map // +listMapKey=type @@ -105,7 +114,7 @@ type DNSConfigStatus struct { type NameserverStatus struct { // IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - // Currently you must manually update your cluster DNS config to add + // Currently, you must manually update your cluster DNS config to add // this address as a stub nameserver for ts.net for cluster workloads to be // able to resolve MagicDNS names associated with egress or Ingress // proxies. diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 32adbd680..6586c1354 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -385,6 +385,11 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverImage) **out = **in } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(NameserverService) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. @@ -412,6 +417,21 @@ func (in *NameserverImage) DeepCopy() *NameserverImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverService) DeepCopyInto(out *NameserverService) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverService. +func (in *NameserverService) DeepCopy() *NameserverService { + if in == nil { + return nil + } + out := new(NameserverService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) { *out = *in From 8453170aa120227dfec3c3141f081d9495a0a7c1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 21 Jul 2025 12:36:16 -0700 Subject: [PATCH 1121/1708] feature/relayserver: fix consumeEventbusTopics deadlock (#16618) consumeEventbusTopics now owns server and related eventbus machinery. Updates tailscale/corp#30651 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 121 +++++++++------------ feature/relayserver/relayserver_test.go | 134 +++++++++++++++--------- 2 files changed, 136 insertions(+), 119 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index f4077b5f9..b90a62345 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,7 +6,6 @@ package relayserver import ( - "errors" "sync" "tailscale.com/disco" @@ -48,16 +47,12 @@ type extension struct { logf logger.Logf bus *eventbus.Bus - mu sync.Mutex // guards the following fields - eventClient *eventbus.Client // closed to stop consumeEventbusTopics - reqSub *eventbus.Subscriber[magicsock.UDPRelayAllocReq] // receives endpoint alloc requests from magicsock - respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] // publishes endpoint alloc responses to magicsock + mu sync.Mutex // guards the following fields shutdown bool port *int // ipn.Prefs.RelayServerPort, nil if disabled - busDoneCh chan struct{} // non-nil if port is non-nil, closed when consumeEventbusTopics returns + disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return + busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer - server relayServer // lazily initialized - } // relayServer is the interface of [udprelay.Server]. @@ -81,26 +76,27 @@ func (e *extension) Init(host ipnext.Host) error { return nil } -// initBusConnection initializes the [*eventbus.Client], [*eventbus.Subscriber], -// [*eventbus.Publisher], and [chan struct{}] used to publish/receive endpoint -// allocation messages to/from the [*eventbus.Bus]. It also starts -// consumeEventbusTopics in a separate goroutine. -func (e *extension) initBusConnection() { - e.eventClient = e.bus.Client("relayserver.extension") - e.reqSub = eventbus.Subscribe[magicsock.UDPRelayAllocReq](e.eventClient) - e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.eventClient) +// handleBusLifetimeLocked handles the lifetime of consumeEventbusTopics. +func (e *extension) handleBusLifetimeLocked() { + busShouldBeRunning := !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer + if !busShouldBeRunning { + e.disconnectFromBusLocked() + return + } + if e.busDoneCh != nil { + return // already running + } + port := *e.port + e.disconnectFromBusCh = make(chan struct{}) e.busDoneCh = make(chan struct{}) - go e.consumeEventbusTopics() + go e.consumeEventbusTopics(port) } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) - if e.hasNodeAttrDisableRelayServer && e.server != nil { - e.server.Close() - e.server = nil - } + e.handleBusLifetimeLocked() } func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { @@ -110,43 +106,52 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV enableOrDisableServer := ok != (e.port != nil) portChanged := ok && e.port != nil && newPort != *e.port if enableOrDisableServer || portChanged || !sameNode { - if e.server != nil { - e.server.Close() - e.server = nil - } - if e.port != nil { - e.eventClient.Close() - <-e.busDoneCh - } + e.disconnectFromBusLocked() e.port = nil if ok { e.port = ptr.To(newPort) - e.initBusConnection() } } + e.handleBusLifetimeLocked() } -func (e *extension) consumeEventbusTopics() { +func (e *extension) consumeEventbusTopics(port int) { defer close(e.busDoneCh) + eventClient := e.bus.Client("relayserver.extension") + reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](eventClient) + respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](eventClient) + defer eventClient.Close() + + var rs relayServer // lazily initialized + defer func() { + if rs != nil { + rs.Close() + } + }() for { select { - case <-e.reqSub.Done(): + case <-e.disconnectFromBusCh: + return + case <-reqSub.Done(): // If reqSub is done, the eventClient has been closed, which is a // signal to return. return - case req := <-e.reqSub.Events(): - rs, err := e.relayServerOrInit() - if err != nil { - e.logf("error initializing server: %v", err) - continue + case req := <-reqSub.Events(): + if rs == nil { + var err error + rs, err = udprelay.NewServer(e.logf, port, nil) + if err != nil { + e.logf("error initializing server: %v", err) + continue + } } se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) if err != nil { e.logf("error allocating endpoint: %v", err) continue } - e.respPub.Publish(magicsock.UDPRelayAllocResp{ + respPub.Publish(magicsock.UDPRelayAllocResp{ ReqRxFromNodeKey: req.RxFromNodeKey, ReqRxFromDiscoKey: req.RxFromDiscoKey, Message: &disco.AllocateUDPRelayEndpointResponse{ @@ -164,44 +169,22 @@ func (e *extension) consumeEventbusTopics() { }) } } +} +func (e *extension) disconnectFromBusLocked() { + if e.busDoneCh != nil { + close(e.disconnectFromBusCh) + <-e.busDoneCh + e.busDoneCh = nil + e.disconnectFromBusCh = nil + } } // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { e.mu.Lock() defer e.mu.Unlock() + e.disconnectFromBusLocked() e.shutdown = true - if e.server != nil { - e.server.Close() - e.server = nil - } - if e.port != nil { - e.eventClient.Close() - <-e.busDoneCh - } return nil } - -func (e *extension) relayServerOrInit() (relayServer, error) { - e.mu.Lock() - defer e.mu.Unlock() - if e.shutdown { - return nil, errors.New("relay server is shutdown") - } - if e.server != nil { - return e.server, nil - } - if e.port == nil { - return nil, errors.New("relay server is not configured") - } - if e.hasNodeAttrDisableRelayServer { - return nil, errors.New("disable-relay-server node attribute is present") - } - var err error - e.server, err = udprelay.NewServer(e.logf, *e.port, nil) - if err != nil { - return nil, err - } - return e.server, nil -} diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 84158188e..d3fc36a83 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -4,107 +4,91 @@ package relayserver import ( - "errors" "testing" "tailscale.com/ipn" - "tailscale.com/net/udprelay/endpoint" "tailscale.com/tsd" - "tailscale.com/types/key" "tailscale.com/types/ptr" + "tailscale.com/util/eventbus" ) -type fakeRelayServer struct{} - -func (f *fakeRelayServer) Close() error { return nil } - -func (f *fakeRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { - return endpoint.ServerEndpoint{}, errors.New("fake relay server") -} - func Test_extension_profileStateChanged(t *testing.T) { prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(1)} prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} type fields struct { - server relayServer - port *int + port *int } type args struct { prefs ipn.PrefsView sameNode bool } tests := []struct { - name string - fields fields - args args - wantPort *int - wantNilServer bool + name string + fields fields + args args + wantPort *int + wantBusRunning bool }{ { - name: "no changes non-nil server", + name: "no changes non-nil port", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: ptr.To(1), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantNilServer: false, + wantPort: ptr.To(1), + wantBusRunning: true, }, { name: "prefs port nil", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: ptr.To(1), }, args: args{ prefs: prefsWithNilPort.View(), sameNode: true, }, - wantPort: nil, - wantNilServer: true, + wantPort: nil, + wantBusRunning: false, }, { name: "prefs port changed", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(2), + port: ptr.To(2), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: ptr.To(1), + wantBusRunning: true, }, { name: "sameNode false", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: ptr.To(1), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: ptr.To(1), + wantBusRunning: true, }, { name: "prefs port non-nil extension port nil", fields: fields{ - server: nil, - port: nil, + port: nil, }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: ptr.To(1), + wantBusRunning: true, }, } for _, tt := range tests { @@ -112,19 +96,13 @@ func Test_extension_profileStateChanged(t *testing.T) { sys := tsd.NewSystem() bus := sys.Bus.Get() e := &extension{ - port: tt.fields.port, - server: tt.fields.server, - bus: bus, - } - if e.port != nil { - // Entering profileStateChanged with a non-nil port requires - // bus init, which is called in profileStateChanged when - // transitioning port from nil to non-nil. - e.initBusConnection() + port: tt.fields.port, + bus: bus, } + defer e.disconnectFromBusLocked() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantNilServer != (e.server == nil) { - t.Errorf("wantNilServer: %v != (e.server == nil): %v", tt.wantNilServer, e.server == nil) + if tt.wantBusRunning != (e.busDoneCh != nil) { + t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) @@ -134,3 +112,59 @@ func Test_extension_profileStateChanged(t *testing.T) { }) } } + +func Test_extension_handleBusLifetimeLocked(t *testing.T) { + tests := []struct { + name string + shutdown bool + port *int + busDoneCh chan struct{} + hasNodeAttrDisableRelayServer bool + wantBusRunning bool + }{ + { + name: "want running", + shutdown: false, + port: ptr.To(1), + hasNodeAttrDisableRelayServer: false, + wantBusRunning: true, + }, + { + name: "shutdown true", + shutdown: true, + port: ptr.To(1), + hasNodeAttrDisableRelayServer: false, + wantBusRunning: false, + }, + { + name: "port nil", + shutdown: false, + port: nil, + hasNodeAttrDisableRelayServer: false, + wantBusRunning: false, + }, + { + name: "hasNodeAttrDisableRelayServer true", + shutdown: false, + port: nil, + hasNodeAttrDisableRelayServer: true, + wantBusRunning: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &extension{ + bus: eventbus.New(), + shutdown: tt.shutdown, + port: tt.port, + busDoneCh: tt.busDoneCh, + hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, + } + e.handleBusLifetimeLocked() + defer e.disconnectFromBusLocked() + if tt.wantBusRunning != (e.busDoneCh != nil) { + t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + } + }) + } +} From 6f7e78b10ffac8f1dcd79aebe12b38ee96e76ce7 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 22 Jul 2025 10:07:09 +0100 Subject: [PATCH 1122/1708] cmd/tailscale/cli: make configure kubeconfig accept Tailscale Services (#16601) The Kubernetes API server proxy is getting the ability to serve on a Tailscale Service instead of individual node names. Update the configure kubeconfig sub-command to accept arguments that look like a Tailscale Service. Note, we can't know for sure whether a peer is advertising a Tailscale Service, we can only guess based on the ExtraRecords in the netmap and that IP showing up in a peer's AllowedIPs. Also adds an --http flag to allow targeting individual proxies that can be adverting on http for their node name, and makes the command a bit more forgiving on the range of inputs it accepts and how eager it is to print the help text when the input is obviously wrong. Updates #13358 Change-Id: Ica0509c6b2c707252a43d7c18b530ec1acf7508f Signed-off-by: Tom Proctor --- cmd/tailscale/cli/configure-kube.go | 151 +++++++++++++++++++++-- cmd/tailscale/cli/configure-kube_test.go | 56 ++++++++- 2 files changed, 194 insertions(+), 13 deletions(-) diff --git a/cmd/tailscale/cli/configure-kube.go b/cmd/tailscale/cli/configure-kube.go index 6bc4e202e..e74e88779 100644 --- a/cmd/tailscale/cli/configure-kube.go +++ b/cmd/tailscale/cli/configure-kube.go @@ -9,17 +9,29 @@ import ( "errors" "flag" "fmt" + "net/netip" + "net/url" "os" "path/filepath" "slices" "strings" + "time" "github.com/peterbourgon/ff/v3/ffcli" "k8s.io/client-go/util/homedir" "sigs.k8s.io/yaml" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/types/netmap" + "tailscale.com/util/dnsname" "tailscale.com/version" ) +var configureKubeconfigArgs struct { + http bool // Use HTTP instead of HTTPS (default) for the auth proxy. +} + func configureKubeconfigCmd() *ffcli.Command { return &ffcli.Command{ Name: "kubeconfig", @@ -34,6 +46,7 @@ See: https://tailscale.com/s/k8s-auth-proxy `), FlagSet: (func() *flag.FlagSet { fs := newFlagSet("kubeconfig") + fs.BoolVar(&configureKubeconfigArgs.http, "http", false, "Use HTTP instead of HTTPS to connect to the auth proxy. Ignored if you include a scheme in the hostname argument.") return fs })(), Exec: runConfigureKubeconfig, @@ -70,10 +83,13 @@ func kubeconfigPath() (string, error) { } func runConfigureKubeconfig(ctx context.Context, args []string) error { - if len(args) != 1 { - return errors.New("unknown arguments") + if len(args) != 1 || args[0] == "" { + return flag.ErrHelp + } + hostOrFQDNOrIP, http, err := getInputs(args[0], configureKubeconfigArgs.http) + if err != nil { + return fmt.Errorf("error parsing inputs: %w", err) } - hostOrFQDN := args[0] st, err := localClient.Status(ctx) if err != nil { @@ -82,22 +98,45 @@ func runConfigureKubeconfig(ctx context.Context, args []string) error { if st.BackendState != "Running" { return errors.New("Tailscale is not running") } - targetFQDN, ok := nodeDNSNameFromArg(st, hostOrFQDN) - if !ok { - return fmt.Errorf("no peer found with hostname %q", hostOrFQDN) + nm, err := getNetMap(ctx) + if err != nil { + return err + } + + targetFQDN, err := nodeOrServiceDNSNameFromArg(st, nm, hostOrFQDNOrIP) + if err != nil { + return err } targetFQDN = strings.TrimSuffix(targetFQDN, ".") var kubeconfig string if kubeconfig, err = kubeconfigPath(); err != nil { return err } - if err = setKubeconfigForPeer(targetFQDN, kubeconfig); err != nil { + scheme := "https://" + if http { + scheme = "http://" + } + if err = setKubeconfigForPeer(scheme, targetFQDN, kubeconfig); err != nil { return err } - printf("kubeconfig configured for %q\n", hostOrFQDN) + printf("kubeconfig configured for %q at URL %q\n", targetFQDN, scheme+targetFQDN) return nil } +func getInputs(arg string, httpArg bool) (string, bool, error) { + u, err := url.Parse(arg) + if err != nil { + return "", false, err + } + + switch u.Scheme { + case "http", "https": + return u.Host, u.Scheme == "http", nil + default: + return arg, httpArg, nil + } +} + // appendOrSetNamed finds a map with a "name" key matching name in dst, and // replaces it with val. If no such map is found, val is appended to dst. func appendOrSetNamed(dst []any, name string, val map[string]any) []any { @@ -116,7 +155,7 @@ func appendOrSetNamed(dst []any, name string, val map[string]any) []any { var errInvalidKubeconfig = errors.New("invalid kubeconfig") -func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { +func updateKubeconfig(cfgYaml []byte, scheme, fqdn string) ([]byte, error) { var cfg map[string]any if len(cfgYaml) > 0 { if err := yaml.Unmarshal(cfgYaml, &cfg); err != nil { @@ -139,7 +178,7 @@ func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { cfg["clusters"] = appendOrSetNamed(clusters, fqdn, map[string]any{ "name": fqdn, "cluster": map[string]string{ - "server": "https://" + fqdn, + "server": scheme + fqdn, }, }) @@ -172,7 +211,7 @@ func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { return yaml.Marshal(cfg) } -func setKubeconfigForPeer(fqdn, filePath string) error { +func setKubeconfigForPeer(scheme, fqdn, filePath string) error { dir := filepath.Dir(filePath) if _, err := os.Stat(dir); err != nil { if !os.IsNotExist(err) { @@ -191,9 +230,97 @@ func setKubeconfigForPeer(fqdn, filePath string) error { if err != nil && !os.IsNotExist(err) { return fmt.Errorf("reading kubeconfig: %w", err) } - b, err = updateKubeconfig(b, fqdn) + b, err = updateKubeconfig(b, scheme, fqdn) if err != nil { return err } return os.WriteFile(filePath, b, 0600) } + +// nodeOrServiceDNSNameFromArg returns the PeerStatus.DNSName value from a peer +// in st that matches the input arg which can be a base name, full DNS name, or +// an IP. If none is found, it looks for a Tailscale Service +func nodeOrServiceDNSNameFromArg(st *ipnstate.Status, nm *netmap.NetworkMap, arg string) (string, error) { + // First check for a node DNS name. + if dnsName, ok := nodeDNSNameFromArg(st, arg); ok { + return dnsName, nil + } + + // If not found, check for a Tailscale Service DNS name. + rec, ok := serviceDNSRecordFromNetMap(nm, st.CurrentTailnet.MagicDNSSuffix, arg) + if !ok { + return "", fmt.Errorf("no peer found for %q", arg) + } + + // Validate we can see a peer advertising the Tailscale Service. + ip, err := netip.ParseAddr(rec.Value) + if err != nil { + return "", fmt.Errorf("error parsing ExtraRecord IP address %q: %w", rec.Value, err) + } + ipPrefix := netip.PrefixFrom(ip, ip.BitLen()) + for _, ps := range st.Peer { + for _, allowedIP := range ps.AllowedIPs.All() { + if allowedIP == ipPrefix { + return rec.Name, nil + } + } + } + + return "", fmt.Errorf("%q is in MagicDNS, but is not currently reachable on any known peer", arg) +} + +func getNetMap(ctx context.Context) (*netmap.NetworkMap, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return nil, err + } + defer watcher.Close() + + n, err := watcher.Next() + if err != nil { + return nil, err + } + + return n.NetMap, nil +} + +func serviceDNSRecordFromNetMap(nm *netmap.NetworkMap, tcd, arg string) (rec tailcfg.DNSRecord, ok bool) { + argIP, _ := netip.ParseAddr(arg) + argFQDN, err := dnsname.ToFQDN(arg) + argFQDNValid := err == nil + if !argIP.IsValid() && !argFQDNValid { + return rec, false + } + + for _, rec := range nm.DNS.ExtraRecords { + if argIP.IsValid() { + recIP, _ := netip.ParseAddr(rec.Value) + if recIP == argIP { + return rec, true + } + continue + } + + if !argFQDNValid { + continue + } + + recFirstLabel := dnsname.FirstLabel(rec.Name) + if strings.EqualFold(arg, recFirstLabel) { + return rec, true + } + + recFQDN, err := dnsname.ToFQDN(rec.Name) + if err != nil { + continue + } + if strings.EqualFold(argFQDN.WithTrailingDot(), recFQDN.WithTrailingDot()) { + return rec, true + } + } + + return tailcfg.DNSRecord{}, false +} diff --git a/cmd/tailscale/cli/configure-kube_test.go b/cmd/tailscale/cli/configure-kube_test.go index d71a9b627..0c8b6b2b6 100644 --- a/cmd/tailscale/cli/configure-kube_test.go +++ b/cmd/tailscale/cli/configure-kube_test.go @@ -6,6 +6,7 @@ package cli import ( "bytes" + "fmt" "strings" "testing" @@ -16,6 +17,7 @@ func TestKubeconfig(t *testing.T) { const fqdn = "foo.tail-scale.ts.net" tests := []struct { name string + http bool in string want string wantErr error @@ -48,6 +50,27 @@ contexts: current-context: foo.tail-scale.ts.net kind: Config users: +- name: tailscale-auth + user: + token: unused`, + }, + { + name: "empty_http", + http: true, + in: "", + want: `apiVersion: v1 +clusters: +- cluster: + server: http://foo.tail-scale.ts.net + name: foo.tail-scale.ts.net +contexts: +- context: + cluster: foo.tail-scale.ts.net + user: tailscale-auth + name: foo.tail-scale.ts.net +current-context: foo.tail-scale.ts.net +kind: Config +users: - name: tailscale-auth user: token: unused`, @@ -202,7 +225,11 @@ users: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := updateKubeconfig([]byte(tt.in), fqdn) + scheme := "https://" + if tt.http { + scheme = "http://" + } + got, err := updateKubeconfig([]byte(tt.in), scheme, fqdn) if err != nil { if err != tt.wantErr { t.Fatalf("updateKubeconfig() error = %v, wantErr %v", err, tt.wantErr) @@ -219,3 +246,30 @@ users: }) } } + +func TestGetInputs(t *testing.T) { + for _, arg := range []string{ + "foo.tail-scale.ts.net", + "foo", + "127.0.0.1", + } { + for _, prefix := range []string{"", "https://", "http://"} { + for _, httpFlag := range []bool{false, true} { + expectedHost := arg + expectedHTTP := (httpFlag && !strings.HasPrefix(prefix, "https://")) || strings.HasPrefix(prefix, "http://") + t.Run(fmt.Sprintf("%s%s_http=%v", prefix, arg, httpFlag), func(t *testing.T) { + host, http, err := getInputs(prefix+arg, httpFlag) + if err != nil { + t.Fatal(err) + } + if host != expectedHost { + t.Errorf("host = %v, want %v", host, expectedHost) + } + if http != expectedHTTP { + t.Errorf("http = %v, want %v", http, expectedHTTP) + } + }) + } + } + } +} From 22a8e0ac50ee2211e013fae2f2dbd8a9622657d8 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 22 Jul 2025 14:46:38 +0100 Subject: [PATCH 1123/1708] cmd/{k8s-operator,k8s-proxy},kube: use consistent type for auth mode config (#16626) Updates k8s-proxy's config so its auth mode config matches that we set in kube-apiserver ProxyGroups for consistency. Updates #13358 Change-Id: I95e29cec6ded2dc7c6d2d03f968a25c822bc0e01 Signed-off-by: Tom Proctor --- cmd/k8s-operator/api-server-proxy.go | 36 +++++-------------- cmd/k8s-operator/operator.go | 6 ++-- cmd/k8s-operator/proxygroup.go | 8 +++-- cmd/k8s-operator/proxygroup_test.go | 2 +- cmd/k8s-proxy/k8s-proxy.go | 9 ++--- k8s-operator/api-proxy/proxy.go | 8 ++--- k8s-operator/sessionrecording/hijacker.go | 2 +- kube/k8s-proxy/conf/conf.go | 9 ++--- kube/kubetypes/types.go | 23 ++++++++++++- kube/kubetypes/types_test.go | 42 +++++++++++++++++++++++ 10 files changed, 98 insertions(+), 47 deletions(-) create mode 100644 kube/kubetypes/types_test.go diff --git a/cmd/k8s-operator/api-server-proxy.go b/cmd/k8s-operator/api-server-proxy.go index 09a7b8c62..70333d2c4 100644 --- a/cmd/k8s-operator/api-server-proxy.go +++ b/cmd/k8s-operator/api-server-proxy.go @@ -9,30 +9,12 @@ import ( "fmt" "log" "os" -) - -type apiServerProxyMode int - -func (a apiServerProxyMode) String() string { - switch a { - case apiServerProxyModeDisabled: - return "disabled" - case apiServerProxyModeEnabled: - return "auth" - case apiServerProxyModeNoAuth: - return "noauth" - default: - return "unknown" - } -} -const ( - apiServerProxyModeDisabled apiServerProxyMode = iota - apiServerProxyModeEnabled - apiServerProxyModeNoAuth + "tailscale.com/kube/kubetypes" + "tailscale.com/types/ptr" ) -func parseAPIProxyMode() apiServerProxyMode { +func parseAPIProxyMode() *kubetypes.APIServerProxyMode { haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" switch { @@ -41,21 +23,21 @@ func parseAPIProxyMode() apiServerProxyMode { case haveAuthProxyEnv: var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated if authProxyEnv { - return apiServerProxyModeEnabled + return ptr.To(kubetypes.APIServerProxyModeAuth) } - return apiServerProxyModeDisabled + return nil case haveAPIProxyEnv: var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" switch apiProxyEnv { case "true": - return apiServerProxyModeEnabled + return ptr.To(kubetypes.APIServerProxyModeAuth) case "false", "": - return apiServerProxyModeDisabled + return nil case "noauth": - return apiServerProxyModeNoAuth + return ptr.To(kubetypes.APIServerProxyModeNoAuth) default: panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) } } - return apiServerProxyModeDisabled + return nil } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 94a0a6a78..76d2df51d 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -113,7 +113,7 @@ func main() { // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. mode := parseAPIProxyMode() - if mode == apiServerProxyModeDisabled { + if mode == nil { hostinfo.SetApp(kubetypes.AppOperator) } else { hostinfo.SetApp(kubetypes.AppInProcessAPIServerProxy) @@ -122,8 +122,8 @@ func main() { s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() - if mode != apiServerProxyModeDisabled { - ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled, true) + if mode != nil { + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, *mode, true) if err != nil { zlog.Fatalf("error creating API server proxy: %v", err) } diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index d62cb0f11..f9c12797d 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -805,6 +805,10 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } + mode := kubetypes.APIServerProxyModeAuth + if !isAuthAPIServerProxy(pg) { + mode = kubetypes.APIServerProxyModeNoAuth + } cfg := conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ @@ -816,8 +820,8 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p // Reloadable fields. Hostname: &hostname, APIServerProxy: &conf.APIServerProxyConfig{ - Enabled: opt.NewBool(true), - AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)), + Enabled: opt.NewBool(true), + Mode: &mode, // The first replica is elected as the cert issuer, same // as containerboot does for ingress-pg-reconciler. IssueCerts: opt.NewBool(i == 0), diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index ef6babc56..0dc791b04 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1376,7 +1376,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { Hostname: ptr.To("test-k8s-apiserver-0"), APIServerProxy: &conf.APIServerProxyConfig{ Enabled: opt.NewBool(true), - AuthMode: opt.NewBool(false), + Mode: ptr.To(kubetypes.APIServerProxyModeNoAuth), IssueCerts: opt.NewBool(true), }, }, diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index eea1f15f7..b56ceaab0 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -34,6 +34,7 @@ import ( apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" klc "tailscale.com/kube/localclient" "tailscale.com/kube/services" "tailscale.com/kube/state" @@ -238,11 +239,11 @@ func run(logger *zap.SugaredLogger) error { } // Setup for the API server proxy. - authMode := true - if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.AuthMode.EqualBool(false) { - authMode = false + mode := kubetypes.APIServerProxyModeAuth + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.Mode != nil { + mode = *cfg.Parsed.APIServerProxy.Mode } - ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode, false) + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, mode, false) if err != nil { return fmt.Errorf("error creating api server proxy: %w", err) } diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index e079e984f..c648e1622 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -47,8 +47,8 @@ var ( // caller's Tailscale identity and the rules defined in the tailnet ACLs. // - false: the proxy is started and requests are passed through to the // Kubernetes API without any auth modifications. -func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool, https bool) (*APIServerProxy, error) { - if !authMode { +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, mode kubetypes.APIServerProxyMode, https bool) (*APIServerProxy, error) { + if mode == kubetypes.APIServerProxyModeNoAuth { restConfig = rest.AnonymousClientConfig(restConfig) } @@ -85,7 +85,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn ap := &APIServerProxy{ log: zlog, lc: lc, - authMode: authMode, + authMode: mode == kubetypes.APIServerProxyModeAuth, https: https, upstreamURL: u, ts: ts, @@ -278,7 +278,7 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request Namespace: r.PathValue(namespaceNameKey), Log: ap.log, } - h := ksr.New(opts) + h := ksr.NewHijacker(opts) ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index e8c534afc..675a9b1dd 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -57,7 +57,7 @@ var ( counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded") ) -func New(opts HijackerOpts) *Hijacker { +func NewHijacker(opts HijackerOpts) *Hijacker { return &Hijacker{ ts: opts.TS, req: opts.Req, diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index a32e0c03e..fdb6301ac 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -14,6 +14,7 @@ import ( "net/netip" "github.com/tailscale/hujson" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/types/opt" ) @@ -66,10 +67,10 @@ type ConfigV1Alpha1 struct { } type APIServerProxyConfig struct { - Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. - AuthMode opt.Bool `json:",omitempty"` // Run in auth or noauth mode. - ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. - IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. + Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. + Mode *kubetypes.APIServerProxyMode `json:",omitempty"` // "auth" or "noauth" mode. + ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. + IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. } // Load reads and parses the config file at the provided path on disk. diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 5e7d4cd1f..44b01fe1a 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -3,6 +3,8 @@ package kubetypes +import "fmt" + const ( // Hostinfo App values for the Tailscale Kubernetes Operator components. AppOperator = "k8s-operator" @@ -59,5 +61,24 @@ const ( LabelSecretTypeState = "state" LabelSecretTypeCerts = "certs" - KubeAPIServerConfigFile = "config.hujson" + KubeAPIServerConfigFile = "config.hujson" + APIServerProxyModeAuth APIServerProxyMode = "auth" + APIServerProxyModeNoAuth APIServerProxyMode = "noauth" ) + +// APIServerProxyMode specifies whether the API server proxy will add +// impersonation headers to requests based on the caller's Tailscale identity. +// May be "auth" or "noauth". +type APIServerProxyMode string + +func (a *APIServerProxyMode) UnmarshalJSON(data []byte) error { + switch string(data) { + case `"auth"`: + *a = APIServerProxyModeAuth + case `"noauth"`: + *a = APIServerProxyModeNoAuth + default: + return fmt.Errorf("unknown APIServerProxyMode %q", data) + } + return nil +} diff --git a/kube/kubetypes/types_test.go b/kube/kubetypes/types_test.go new file mode 100644 index 000000000..ea1846b32 --- /dev/null +++ b/kube/kubetypes/types_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package kubetypes + +import ( + "encoding/json" + "testing" +) + +func TestUnmarshalAPIServerProxyMode(t *testing.T) { + tests := []struct { + data string + expected APIServerProxyMode + }{ + {data: `{"mode":"auth"}`, expected: APIServerProxyModeAuth}, + {data: `{"mode":"noauth"}`, expected: APIServerProxyModeNoAuth}, + {data: `{"mode":""}`, expected: ""}, + {data: `{"mode":"Auth"}`, expected: ""}, + {data: `{"mode":"unknown"}`, expected: ""}, + } + + for _, tc := range tests { + var s struct { + Mode *APIServerProxyMode `json:",omitempty"` + } + err := json.Unmarshal([]byte(tc.data), &s) + if tc.expected == "" { + if err == nil { + t.Errorf("expected error for %q, got none", tc.data) + } + continue + } + if err != nil { + t.Errorf("unexpected error for %q: %v", tc.data, err) + continue + } + if *s.Mode != tc.expected { + t.Errorf("for %q expected %q, got %q", tc.data, tc.expected, *s.Mode) + } + } +} From 44947054967e3eda476c92206e0a14fd1ffc4ec0 Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 22 Jul 2025 17:07:51 +0100 Subject: [PATCH 1124/1708] cmd/{k8s-proxy,containerboot,k8s-operator},kube: add health check and metrics endpoints for k8s-proxy (#16540) * Modifies the k8s-proxy to expose health check and metrics endpoints on the Pod's IP. * Moves cmd/containerboot/healthz.go and cmd/containerboot/metrics.go to /kube to be shared with /k8s-proxy. Updates #13358 Signed-off-by: David Bond --- cmd/containerboot/healthz.go | 57 ------------- cmd/containerboot/main.go | 16 ++-- cmd/k8s-operator/proxygroup.go | 8 +- cmd/k8s-operator/proxygroup_test.go | 2 + cmd/k8s-proxy/k8s-proxy.go | 67 +++++++++++++-- kube/health/healthz.go | 84 +++++++++++++++++++ kube/k8s-proxy/conf/conf.go | 36 ++++++-- .../containerboot => kube/metrics}/metrics.go | 8 +- 8 files changed, 196 insertions(+), 82 deletions(-) delete mode 100644 cmd/containerboot/healthz.go create mode 100644 kube/health/healthz.go rename {cmd/containerboot => kube/metrics}/metrics.go (90%) diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go deleted file mode 100644 index d6a64a37c..000000000 --- a/cmd/containerboot/healthz.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux - -package main - -import ( - "fmt" - "log" - "net/http" - "sync" - - "tailscale.com/kube/kubetypes" -) - -// healthz is a simple health check server, if enabled it returns 200 OK if -// this tailscale node currently has at least one tailnet IP address else -// returns 503. -type healthz struct { - sync.Mutex - hasAddrs bool - podIPv4 string -} - -func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.Lock() - defer h.Unlock() - - if h.hasAddrs { - w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) - if _, err := w.Write([]byte("ok")); err != nil { - http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) - } - } else { - http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) - } -} - -func (h *healthz) update(healthy bool) { - h.Lock() - defer h.Unlock() - - if h.hasAddrs != healthy { - log.Println("Setting healthy", healthy) - } - h.hasAddrs = healthy -} - -// registerHealthHandlers registers a simple health handler at /healthz. -// A containerized tailscale instance is considered healthy if -// it has at least one tailnet IP address. -func registerHealthHandlers(mux *http.ServeMux, podIPv4 string) *healthz { - h := &healthz{podIPv4: podIPv4} - mux.Handle("GET /healthz", h) - return h -} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 49c8a473a..f056d26f3 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -121,7 +121,9 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/ipn" kubeutils "tailscale.com/k8s-operator" + healthz "tailscale.com/kube/health" "tailscale.com/kube/kubetypes" + "tailscale.com/kube/metrics" "tailscale.com/kube/services" "tailscale.com/tailcfg" "tailscale.com/types/logger" @@ -232,13 +234,13 @@ func run() error { } defer killTailscaled() - var healthCheck *healthz + var healthCheck *healthz.Healthz ep := &egressProxy{} if cfg.HealthCheckAddrPort != "" { mux := http.NewServeMux() log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort) - healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) + healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf) close := runHTTPServer(mux, cfg.HealthCheckAddrPort) defer close() @@ -249,12 +251,12 @@ func run() error { if cfg.localMetricsEnabled() { log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort) - registerMetricsHandlers(mux, client, cfg.DebugAddrPort) + metrics.RegisterMetricsHandlers(mux, client, cfg.DebugAddrPort) } if cfg.localHealthEnabled() { log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort) - healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) + healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf) } if cfg.egressSvcsTerminateEPEnabled() { @@ -438,8 +440,8 @@ authLoop: ) // egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+ // egress services in HA mode and errored. - var egressSvcsErrorChan = make(chan error) - var ingressSvcsErrorChan = make(chan error) + egressSvcsErrorChan := make(chan error) + ingressSvcsErrorChan := make(chan error) defer t.Stop() // resetTimer resets timer for when to next attempt to resolve the DNS // name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The @@ -644,7 +646,7 @@ runLoop: } if healthCheck != nil { - healthCheck.update(len(addrs) != 0) + healthCheck.Update(len(addrs) != 0) } if cfg.ServeConfigPath != "" { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index f9c12797d..debeb5c6b 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -826,6 +826,8 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p // as containerboot does for ingress-pg-reconciler. IssueCerts: opt.NewBool(i == 0), }, + LocalPort: ptr.To(uint16(9002)), + HealthCheckEnabled: opt.NewBool(true), }, } @@ -849,7 +851,11 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { - cfg.AcceptRoutes = &proxyClass.Spec.TailscaleConfig.AcceptRoutes + cfg.AcceptRoutes = opt.NewBool(proxyClass.Spec.TailscaleConfig.AcceptRoutes) + } + + if proxyClass != nil && proxyClass.Spec.Metrics != nil { + cfg.MetricsEnabled = opt.NewBool(proxyClass.Spec.Metrics.Enable) } if len(endpoints[nodePortSvcName]) > 0 { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 0dc791b04..d763cf922 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1379,6 +1379,8 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { Mode: ptr.To(kubetypes.APIServerProxyModeNoAuth), IssueCerts: opt.NewBool(true), }, + LocalPort: ptr.To(uint16(9002)), + HealthCheckEnabled: opt.NewBool(true), }, } cfgB, err := json.Marshal(cfg) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index b56ceaab0..448bbe397 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -12,9 +12,12 @@ import ( "context" "errors" "fmt" + "net" + "net/http" "os" "os/signal" "reflect" + "strconv" "strings" "syscall" "time" @@ -33,9 +36,11 @@ import ( "tailscale.com/ipn/store" apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" + healthz "tailscale.com/kube/health" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" klc "tailscale.com/kube/localclient" + "tailscale.com/kube/metrics" "tailscale.com/kube/services" "tailscale.com/kube/state" "tailscale.com/tailcfg" @@ -63,6 +68,7 @@ func run(logger *zap.SugaredLogger) error { var ( configPath = os.Getenv("TS_K8S_PROXY_CONFIG") podUID = os.Getenv("POD_UID") + podIP = os.Getenv("POD_IP") ) if configPath == "" { return errors.New("TS_K8S_PROXY_CONFIG unset") @@ -201,10 +207,57 @@ func run(logger *zap.SugaredLogger) error { }) } - if cfg.Parsed.AcceptRoutes != nil { + if cfg.Parsed.HealthCheckEnabled.EqualBool(true) || cfg.Parsed.MetricsEnabled.EqualBool(true) { + addr := podIP + if addr == "" { + addr = cfg.GetLocalAddr() + } + + addrPort := getLocalAddrPort(addr, cfg.GetLocalPort()) + mux := http.NewServeMux() + localSrv := &http.Server{Addr: addrPort, Handler: mux} + + if cfg.Parsed.MetricsEnabled.EqualBool(true) { + logger.Infof("Running metrics endpoint at %s/metrics", addrPort) + metrics.RegisterMetricsHandlers(mux, lc, "") + } + + if cfg.Parsed.HealthCheckEnabled.EqualBool(true) { + ipV4, _ := ts.TailscaleIPs() + hz := healthz.RegisterHealthHandlers(mux, ipV4.String(), logger.Infof) + group.Go(func() error { + err := hz.MonitorHealth(ctx, lc) + if err == nil || errors.Is(err, context.Canceled) { + return nil + } + return err + }) + } + + group.Go(func() error { + errChan := make(chan error) + go func() { + if err := localSrv.ListenAndServe(); err != nil { + errChan <- err + } + close(errChan) + }() + + select { + case <-ctx.Done(): + sCtx, scancel := context.WithTimeout(serveCtx, 10*time.Second) + defer scancel() + return localSrv.Shutdown(sCtx) + case err := <-errChan: + return err + } + }) + } + + if v, ok := cfg.Parsed.AcceptRoutes.Get(); ok { _, err = lc.EditPrefs(ctx, &ipn.MaskedPrefs{ RouteAllSet: true, - Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes}, + Prefs: ipn.Prefs{RouteAll: v}, }) if err != nil { return fmt.Errorf("error editing prefs: %w", err) @@ -285,10 +338,10 @@ func run(logger *zap.SugaredLogger) error { prefs.HostnameSet = true prefs.Hostname = *cfg.Parsed.Hostname } - if cfg.Parsed.AcceptRoutes != nil && *cfg.Parsed.AcceptRoutes != currentPrefs.RouteAll { - cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, *cfg.Parsed.AcceptRoutes)) + if v, ok := cfg.Parsed.AcceptRoutes.Get(); ok && v != currentPrefs.RouteAll { + cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, v)) prefs.RouteAllSet = true - prefs.Prefs.RouteAll = *cfg.Parsed.AcceptRoutes + prefs.Prefs.RouteAll = v } if !prefs.IsEmpty() { if _, err := lc.EditPrefs(ctx, &prefs); err != nil { @@ -304,6 +357,10 @@ func run(logger *zap.SugaredLogger) error { } } +func getLocalAddrPort(addr string, port uint16) string { + return net.JoinHostPort(addr, strconv.FormatUint(uint64(port), 10)) +} + func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { p := "mem:" if path != nil { diff --git a/kube/health/healthz.go b/kube/health/healthz.go new file mode 100644 index 000000000..c8cfcc7ec --- /dev/null +++ b/kube/health/healthz.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package health contains shared types and underlying methods for serving +// a `/healthz` endpoint for containerboot and k8s-proxy. +package health + +import ( + "context" + "fmt" + "net/http" + "sync" + + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/logger" +) + +// Healthz is a simple health check server, if enabled it returns 200 OK if +// this tailscale node currently has at least one tailnet IP address else +// returns 503. +type Healthz struct { + sync.Mutex + hasAddrs bool + podIPv4 string + logger logger.Logf +} + +func (h *Healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs { + w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) + if _, err := w.Write([]byte("ok")); err != nil { + http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) + } + } else { + http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) + } +} + +func (h *Healthz) Update(healthy bool) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs != healthy { + h.logger("Setting healthy %v", healthy) + } + h.hasAddrs = healthy +} + +func (h *Healthz) MonitorHealth(ctx context.Context, lc *local.Client) error { + w, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("failed to watch IPN bus: %w", err) + } + + for { + n, err := w.Next() + if err != nil { + return err + } + + if n.NetMap != nil { + h.Update(n.NetMap.SelfNode.Addresses().Len() != 0) + } + } +} + +// RegisterHealthHandlers registers a simple health handler at /healthz. +// A containerized tailscale instance is considered healthy if +// it has at least one tailnet IP address. +func RegisterHealthHandlers(mux *http.ServeMux, podIPv4 string, logger logger.Logf) *Healthz { + h := &Healthz{ + podIPv4: podIPv4, + logger: logger, + } + mux.Handle("GET /healthz", h) + return h +} diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index fdb6301ac..529495243 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -49,21 +49,23 @@ type VersionedConfig struct { } type ConfigV1Alpha1 struct { - AuthKey *string `json:",omitempty"` // Tailscale auth key to use. - State *string `json:",omitempty"` // Path to the Tailscale state. - LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". - App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer - ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. - // StaticEndpoints are additional, user-defined endpoints that this node - // should advertise amongst its wireguard endpoints. - StaticEndpoints []netip.AddrPort `json:",omitempty"` + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. + LocalAddr *string `json:",omitempty"` // The address to use for serving HTTP health checks and metrics (defaults to all interfaces). + LocalPort *uint16 `json:",omitempty"` // The port to use for serving HTTP health checks and metrics (defaults to 9002). + MetricsEnabled opt.Bool `json:",omitempty"` // Serve metrics on :/metrics. + HealthCheckEnabled opt.Bool `json:",omitempty"` // Serve health check on :/metrics. // TODO(tomhjp): The remaining fields should all be reloadable during // runtime, but currently missing most of the APIServerProxy fields. Hostname *string `json:",omitempty"` // Tailscale device hostname. - AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AcceptRoutes opt.Bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. AdvertiseServices []string `json:",omitempty"` // Tailscale Services to advertise. APIServerProxy *APIServerProxyConfig `json:",omitempty"` // Config specific to the API Server proxy. + StaticEndpoints []netip.AddrPort `json:",omitempty"` // StaticEndpoints are additional, user-defined endpoints that this node should advertise amongst its wireguard endpoints. } type APIServerProxyConfig struct { @@ -108,3 +110,19 @@ func Load(raw []byte) (c Config, err error) { return c, nil } + +func (c *Config) GetLocalAddr() string { + if c.Parsed.LocalAddr == nil { + return "[::]" + } + + return *c.Parsed.LocalAddr +} + +func (c *Config) GetLocalPort() uint16 { + if c.Parsed.LocalPort == nil { + return uint16(9002) + } + + return *c.Parsed.LocalPort +} diff --git a/cmd/containerboot/metrics.go b/kube/metrics/metrics.go similarity index 90% rename from cmd/containerboot/metrics.go rename to kube/metrics/metrics.go index bbd050de6..0db683008 100644 --- a/cmd/containerboot/metrics.go +++ b/kube/metrics/metrics.go @@ -1,9 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build !plan9 -package main +// Package metrics contains shared types and underlying methods for serving +// localapi metrics. This is primarily consumed by containerboot and k8s-proxy. +package metrics import ( "fmt" @@ -68,7 +70,7 @@ func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { // In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug // endpoint if configured to ease migration for a breaking change serving user // metrics instead of debug metrics on the "metrics" port. -func registerMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { +func RegisterMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { m := &metrics{ lc: lc, debugEndpoint: debugAddrPort, From 0de5e7b94f0bb89bcaed108f656d3ed50da85d02 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 22 Jul 2025 09:22:17 -1000 Subject: [PATCH 1125/1708] util/set: add IntSet (#16602) IntSet is a set optimized for integers. Updates tailscale/corp#29809 Signed-off-by: Joe Tsai --- util/set/intset.go | 172 +++++++++++++++++++++++++++++++++++++++ util/set/intset_test.go | 174 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 346 insertions(+) create mode 100644 util/set/intset.go create mode 100644 util/set/intset_test.go diff --git a/util/set/intset.go b/util/set/intset.go new file mode 100644 index 000000000..b747d3bff --- /dev/null +++ b/util/set/intset.go @@ -0,0 +1,172 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "iter" + "maps" + "math/bits" + "math/rand/v2" + + "golang.org/x/exp/constraints" + "tailscale.com/util/mak" +) + +// IntSet is a set optimized for integer values close to zero +// or set of integers that are close in value. +type IntSet[T constraints.Integer] struct { + // bits is a [bitSet] for numbers less than [bits.UintSize]. + bits bitSet + + // extra is a mapping of [bitSet] for numbers not in bits, + // where the key is a number modulo [bits.UintSize]. + extra map[uint64]bitSet + + // extraLen is the count of numbers in extra since len(extra) + // does not reflect that each bitSet may have multiple numbers. + extraLen int +} + +// Values returns an iterator over the elements of the set. +// The iterator will yield the elements in no particular order. +func (s IntSet[T]) Values() iter.Seq[T] { + return func(yield func(T) bool) { + if s.bits != 0 { + for i := range s.bits.values() { + if !yield(decodeZigZag[T](i)) { + return + } + } + } + if s.extra != nil { + for hi, bs := range s.extra { + for lo := range bs.values() { + if !yield(decodeZigZag[T](hi*bits.UintSize + lo)) { + return + } + } + } + } + } +} + +// Contains reports whether e is in the set. +func (s IntSet[T]) Contains(e T) bool { + if v := encodeZigZag(e); v < bits.UintSize { + return s.bits.contains(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + return s.extra[hi].contains(lo) + } +} + +// Add adds e to the set. +// +// When storing a IntSet in a map as a value type, +// it is important to re-assign the map entry after calling Add or Delete, +// as the IntSet's representation may change. +func (s *IntSet[T]) Add(e T) { + if v := encodeZigZag(e); v < bits.UintSize { + s.bits.add(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + if bs := s.extra[hi]; !bs.contains(lo) { + bs.add(lo) + mak.Set(&s.extra, hi, bs) + s.extra[hi] = bs + s.extraLen++ + } + } +} + +// AddSeq adds the values from seq to the set. +func (s *IntSet[T]) AddSeq(seq iter.Seq[T]) { + for e := range seq { + s.Add(e) + } +} + +// Len reports the number of elements in the set. +func (s IntSet[T]) Len() int { + return s.bits.len() + s.extraLen +} + +// Delete removes e from the set. +// +// When storing a IntSet in a map as a value type, +// it is important to re-assign the map entry after calling Add or Delete, +// as the IntSet's representation may change. +func (s *IntSet[T]) Delete(e T) { + if v := encodeZigZag(e); v < bits.UintSize { + s.bits.delete(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + if bs := s.extra[hi]; bs.contains(lo) { + bs.delete(lo) + mak.Set(&s.extra, hi, bs) + s.extra[hi] = bs + s.extraLen-- + } + } +} + +// Clone returns a copy of s that doesn't alias the original. +func (s IntSet[T]) Clone() IntSet[T] { + return IntSet[T]{ + bits: s.bits, + extra: maps.Clone(s.extra), + extraLen: s.extraLen, + } +} + +type bitSet uint + +func (s bitSet) values() iter.Seq[uint64] { + return func(yield func(uint64) bool) { + // Hyrum-proofing: randomly iterate in forwards or reverse. + if rand.Uint64()%2 == 0 { + for i := 0; i < bits.UintSize; i++ { + if s.contains(uint64(i)) && !yield(uint64(i)) { + return + } + } + } else { + for i := bits.UintSize; i >= 0; i-- { + if s.contains(uint64(i)) && !yield(uint64(i)) { + return + } + } + } + } +} +func (s bitSet) len() int { return bits.OnesCount(uint(s)) } +func (s bitSet) contains(i uint64) bool { return s&(1< 0 } +func (s *bitSet) add(i uint64) { *s |= 1 << i } +func (s *bitSet) delete(i uint64) { *s &= ^(1 << i) } + +// encodeZigZag encodes an integer as an unsigned integer ensuring that +// negative integers near zero still have a near zero positive value. +// For unsigned integers, it returns the value verbatim. +func encodeZigZag[T constraints.Integer](v T) uint64 { + var zero T + if ^zero >= 0 { // must be constraints.Unsigned + return uint64(v) + } else { // must be constraints.Signed + // See [google.golang.org/protobuf/encoding/protowire.EncodeZigZag] + return uint64(int64(v)<<1) ^ uint64(int64(v)>>63) + } +} + +// decodeZigZag decodes an unsigned integer as an integer ensuring that +// negative integers near zero still have a near zero positive value. +// For unsigned integers, it returns the value verbatim. +func decodeZigZag[T constraints.Integer](v uint64) T { + var zero T + if ^zero >= 0 { // must be constraints.Unsigned + return T(v) + } else { // must be constraints.Signed + // See [google.golang.org/protobuf/encoding/protowire.DecodeZigZag] + return T(int64(v>>1) ^ int64(v)<<63>>63) + } +} diff --git a/util/set/intset_test.go b/util/set/intset_test.go new file mode 100644 index 000000000..9523fe88d --- /dev/null +++ b/util/set/intset_test.go @@ -0,0 +1,174 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "maps" + "math" + "slices" + "testing" + + "golang.org/x/exp/constraints" +) + +func TestIntSet(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + ss := make(Set[int64]) + var si IntSet[int64] + intValues(t, ss, si) + deleteInt(t, ss, &si, -5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + addInt(t, ss, &si, 2) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, math.MinInt64) + addInt(t, ss, &si, 8) + intValues(t, ss, si) + addInt(t, ss, &si, 77) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + intValues(t, ss, si) + addInt(t, ss, &si, -5) + addInt(t, ss, &si, 7) + addInt(t, ss, &si, -83) + addInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + deleteInt(t, ss, &si, -5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + deleteInt(t, ss, &si, math.MinInt64) + deleteInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + }) + + t.Run("Uint64", func(t *testing.T) { + ss := make(Set[uint64]) + var si IntSet[uint64] + intValues(t, ss, si) + deleteInt(t, ss, &si, 5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + addInt(t, ss, &si, 2) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 3) + addInt(t, ss, &si, 3) + addInt(t, ss, &si, 8) + intValues(t, ss, si) + addInt(t, ss, &si, 77) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + intValues(t, ss, si) + addInt(t, ss, &si, 5) + addInt(t, ss, &si, 7) + addInt(t, ss, &si, 83) + addInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + deleteInt(t, ss, &si, 5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + deleteInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + }) +} + +func intValues[T constraints.Integer](t testing.TB, ss Set[T], si IntSet[T]) { + got := slices.Collect(maps.Keys(ss)) + slices.Sort(got) + want := slices.Collect(si.Values()) + slices.Sort(want) + if !slices.Equal(got, want) { + t.Fatalf("Values mismatch:\n\tgot %v\n\twant %v", got, want) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func addInt[T constraints.Integer](t testing.TB, ss Set[T], si *IntSet[T], v T) { + t.Helper() + if got, want := si.Contains(v), ss.Contains(v); got != want { + t.Fatalf("Contains(%v) = %v, want %v", v, got, want) + } + ss.Add(v) + si.Add(v) + if !si.Contains(v) { + t.Fatalf("Contains(%v) = false, want true", v) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func deleteInt[T constraints.Integer](t testing.TB, ss Set[T], si *IntSet[T], v T) { + t.Helper() + if got, want := si.Contains(v), ss.Contains(v); got != want { + t.Fatalf("Contains(%v) = %v, want %v", v, got, want) + } + ss.Delete(v) + si.Delete(v) + if si.Contains(v) { + t.Fatalf("Contains(%v) = true, want false", v) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func TestZigZag(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + for _, tt := range []struct { + decoded int64 + encoded uint64 + }{ + {math.MinInt64, math.MaxUint64}, + {-2, 3}, + {-1, 1}, + {0, 0}, + {1, 2}, + {2, 4}, + {math.MaxInt64, math.MaxUint64 - 1}, + } { + encoded := encodeZigZag(tt.decoded) + if encoded != tt.encoded { + t.Errorf("encodeZigZag(%v) = %v, want %v", tt.decoded, encoded, tt.encoded) + } + decoded := decodeZigZag[int64](tt.encoded) + if decoded != tt.decoded { + t.Errorf("decodeZigZag(%v) = %v, want %v", tt.encoded, decoded, tt.decoded) + } + } + }) + t.Run("Uint64", func(t *testing.T) { + for _, tt := range []struct { + decoded uint64 + encoded uint64 + }{ + {0, 0}, + {1, 1}, + {2, 2}, + {math.MaxInt64, math.MaxInt64}, + {math.MaxUint64, math.MaxUint64}, + } { + encoded := encodeZigZag(tt.decoded) + if encoded != tt.encoded { + t.Errorf("encodeZigZag(%v) = %v, want %v", tt.decoded, encoded, tt.encoded) + } + decoded := decodeZigZag[uint64](tt.encoded) + if decoded != tt.decoded { + t.Errorf("decodeZigZag(%v) = %v, want %v", tt.encoded, decoded, tt.decoded) + } + } + }) +} From 19faaff95c6f32a2fae26f003b467fb623962d09 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 22 Jul 2025 16:23:51 -0400 Subject: [PATCH 1126/1708] cmd/tailscale/cli: revert key for web config for services to FQDN (#16627) This commit reverts the key of Web field in ipn.ServiceConfig to use FQDN instead of service name for the host part of HostPort. This change is because k8s operator already build base on the assumption of the part being FQDN. We don't want to break the code with dependency. Fixes tailscale/corp#30695 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_legacy_test.go | 1 + cmd/tailscale/cli/serve_v2.go | 40 ++++++++++++++------------ cmd/tailscale/cli/serve_v2_test.go | 37 ++++++++++++------------ cmd/tsidp/tsidp.go | 2 +- ipn/ipnlocal/serve.go | 4 ++- ipn/serve.go | 10 +++---- 7 files changed, 51 insertions(+), 45 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 7c79f7f7b..1a05d0543 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -363,7 +363,7 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo return errHelp } - sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS) + sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS, noService.String()) if !reflect.DeepEqual(cursc, sc) { if err := e.lc.SetServeConfig(ctx, sc); err != nil { diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 6b053fbd7..1ea76e72c 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -876,6 +876,7 @@ var fakeStatus = &ipnstate.Status{ tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil, }, }, + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, } func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 8832a232d..056bfabb0 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -331,6 +331,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return fmt.Errorf("getting client status: %w", err) } dnsName := strings.TrimSuffix(st.Self.DNSName, ".") + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix // set parent serve config to always be persisted // at the top level, but a nested config might be @@ -394,7 +395,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { var msg string if turnOff { // only unset serve when trying to unset with type and port flags. - err = e.unsetServe(sc, st, dnsName, srvType, srvPort, mount) + err = e.unsetServe(sc, dnsName, srvType, srvPort, mount, magicDNSSuffix) } else { if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { return err @@ -406,7 +407,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -585,12 +586,12 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: useTLS := srvType == serveTypeHTTPS - err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target) + err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds) if err != nil { return fmt.Errorf("failed apply web serve: %w", err) } @@ -643,11 +644,10 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN var webConfig *ipn.WebServerConfig var tcpHandler *ipn.TCPPortHandler ips := st.TailscaleIPs + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix host := dnsName - displayedHost := dnsName if forService { - displayedHost = strings.Join([]string{svcName.WithoutPrefix(), st.CurrentTailnet.MagicDNSSuffix}, ".") - host = svcName.WithoutPrefix() + host = strings.Join([]string{svcName.WithoutPrefix(), magicDNSSuffix}, ".") } hp := ipn.HostPort(net.JoinHostPort(host, strconv.Itoa(int(srvPort)))) @@ -687,7 +687,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN output.WriteString("\n\n") svc := sc.Services[svcName] if srvType == serveTypeTUN && svc.Tun { - output.WriteString(fmt.Sprintf(msgRunningTunService, displayedHost)) + output.WriteString(fmt.Sprintf(msgRunningTunService, host)) output.WriteString("\n") output.WriteString(fmt.Sprintf(msgDisableServiceTun, dnsName)) output.WriteString("\n") @@ -716,7 +716,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN }) for _, m := range mounts { t, d := srvTypeAndDesc(webConfig.Handlers[m]) - output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, displayedHost, portPart, m)) + output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, host, portPart, m)) output.WriteString(fmt.Sprintf("%s %-5s %s\n\n", "|--", t, d)) } } else if tcpHandler != nil { @@ -726,7 +726,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN tlsStatus = "TLS terminated" } - output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", displayedHost, srvPort, tlsStatus)) + output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus)) for _, a := range ips { ipp := net.JoinHostPort(a.String(), strconv.Itoa(int(srvPort))) output.WriteString(fmt.Sprintf("|-- tcp://%s\n", ipp)) @@ -755,7 +755,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN return output.String() } -func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string) error { +func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string, mds string) error { h := new(ipn.HTTPHandler) switch { case strings.HasPrefix(target, "text:"): @@ -797,7 +797,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return errors.New("cannot serve web; already serving TCP") } - sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS) + sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS, mds) return nil } @@ -850,11 +850,12 @@ func (e *serveEnv) applyFunnel(sc *ipn.ServeConfig, dnsName string, srvPort uint } // unsetServe removes the serve config for the given serve port. -// dnsName is a FQDN or a serviceName (with `svc:` prefix). -func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string) error { +// dnsName is a FQDN or a serviceName (with `svc:` prefix). mds +// is the Magic DNS suffix, which is used to recreate serve's host. +func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, mds string) error { switch srvType { case serveTypeHTTPS, serveTypeHTTP: - err := e.removeWebServe(sc, st, dnsName, srvPort, mount) + err := e.removeWebServe(sc, dnsName, srvPort, mount, mds) if err != nil { return fmt.Errorf("failed to remove web serve: %w", err) } @@ -1010,8 +1011,9 @@ func isLegacyInvocation(subcmd serveMode, args []string) (string, bool) { // removeWebServe removes a web handler from the serve config // and removes funnel if no remaining mounts exist for the serve port. // The srvPort argument is the serving port and the mount argument is -// the mount point or registered path to remove. -func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvPort uint16, mount string) error { +// the mount point or registered path to remove. mds is the Magic DNS suffix, +// which is used to recreate serve's host. +func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string, mds string) error { if sc == nil { return nil } @@ -1026,7 +1028,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN if svc == nil { return errors.New("service does not exist") } - hostName = svcName.WithoutPrefix() + hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".") webServeMap = svc.Web } @@ -1063,7 +1065,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN } if forService { - sc.RemoveServiceWebHandler(st, svcName, srvPort, mounts) + sc.RemoveServiceWebHandler(svcName, hostName, srvPort, mounts) } else { sc.RemoveWebHandler(dnsName, srvPort, mounts, true) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 2ba0b3f84..95bf5b101 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1299,7 +1299,7 @@ func TestMessageForPort(t *testing.T) { "foo.test.ts.net:443": true, }, }, - status: &ipnstate.Status{}, + status: &ipnstate.Status{CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}}, dnsName: "foo.test.ts.net", srvType: serveTypeHTTPS, srvPort: 443, @@ -1328,7 +1328,7 @@ func TestMessageForPort(t *testing.T) { }, }, }, - status: &ipnstate.Status{}, + status: &ipnstate.Status{CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}}, dnsName: "foo.test.ts.net", srvType: serveTypeHTTP, srvPort: 80, @@ -1352,7 +1352,7 @@ func TestMessageForPort(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "foo:80": { + "foo.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1396,7 +1396,7 @@ func TestMessageForPort(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1440,7 +1440,7 @@ func TestMessageForPort(t *testing.T) { 2200: {HTTPS: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "foo:2200": { + "foo.test.ts.net:2200": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1670,6 +1670,7 @@ func TestIsLegacyInvocation(t *testing.T) { func TestSetServe(t *testing.T) { e := &serveEnv{} + magicDNSSuffix := "test.ts.net" tests := []struct { name string desc string @@ -1816,7 +1817,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1834,7 +1835,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1853,7 +1854,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3001"}, }, @@ -1871,7 +1872,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1893,12 +1894,12 @@ func TestSetServe(t *testing.T) { 88: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, }, - "bar:88": { + "bar.test.ts.net:88": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3001"}, }, @@ -1916,7 +1917,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1937,7 +1938,7 @@ func TestSetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, "/added": {Proxy: "http://localhost:3001"}, @@ -1965,7 +1966,7 @@ func TestSetServe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel) + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } @@ -2030,7 +2031,7 @@ func TestUnsetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -2124,7 +2125,7 @@ func TestUnsetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -2199,7 +2200,7 @@ func TestUnsetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -2224,7 +2225,7 @@ func TestUnsetServe(t *testing.T) { if tt.setServeEnv { e = tt.serveEnv } - err := e.unsetServe(tt.cfg, tt.st, tt.dnsName, tt.srvType, tt.srvPort, tt.mount) + err := e.unsetServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mount, tt.st.CurrentTailnet.MagicDNSSuffix) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 6a0c2d89e..8df68cd74 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -270,7 +270,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. foregroundSc.SetFunnel(serverURL, dstPort, shouldFunnel) foregroundSc.SetWebHandler(&ipn.HTTPHandler{ Proxy: fmt.Sprintf("https://%s", net.JoinHostPort(serverURL, strconv.Itoa(int(dstPort)))), - }, serverURL, uint16(*flagPort), "/", true) + }, serverURL, uint16(*flagPort), "/", true, st.CurrentTailnet.MagicDNSSuffix) err = lc.SetServeConfig(ctx, sc) if err != nil { return nil, watcherChan, fmt.Errorf("could not set serve config: %v", err) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 28262251c..36738b881 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1014,7 +1014,9 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.Se return c, false } if forVIPService != "" { - key := ipn.HostPort(net.JoinHostPort(forVIPService.WithoutPrefix(), fmt.Sprintf("%d", port))) + magicDNSSuffix := b.currentNode().NetMap().MagicDNSSuffix() + fqdn := strings.Join([]string{forVIPService.WithoutPrefix(), magicDNSSuffix}, ".") + key := ipn.HostPort(net.JoinHostPort(fqdn, fmt.Sprintf("%d", port))) return b.serveConfig.FindServiceWeb(forVIPService, key) } key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) diff --git a/ipn/serve.go b/ipn/serve.go index fae0ad5d6..a0f1334d7 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -343,8 +343,9 @@ func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) { // SetWebHandler sets the given HTTPHandler at the specified host, port, // and mount in the serve config. sc.TCP is also updated to reflect web // serving usage of the given port. The st argument is needed when setting -// a web handler for a service, otherwise it can be nil. -func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) { +// a web handler for a service, otherwise it can be nil. mds is the Magic DNS +// suffix, which is used to recreate serve's host. +func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool, mds string) { if sc == nil { sc = new(ServeConfig) } @@ -353,7 +354,7 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin webServerMap := &sc.Web hostName := host if svcName := tailcfg.AsServiceName(host); svcName != "" { - hostName = svcName.WithoutPrefix() + hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".") svc, ok := sc.Services[svcName] if !ok { svc = new(ServiceConfig) @@ -464,8 +465,7 @@ func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []strin // RemoveServiceWebHandler deletes the web handlers at all of the given mount points // for the provided host and port in the serve config for the given service. -func (sc *ServeConfig) RemoveServiceWebHandler(st *ipnstate.Status, svcName tailcfg.ServiceName, port uint16, mounts []string) { - hostName := svcName.WithoutPrefix() +func (sc *ServeConfig) RemoveServiceWebHandler(svcName tailcfg.ServiceName, hostName string, port uint16, mounts []string) { hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) svc, ok := sc.Services[svcName] From 729d6532ff356101b3e34c28b3c5ce9a186af44e Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 22 Jul 2025 13:54:28 -0700 Subject: [PATCH 1127/1708] tailcfg: add Hostinfo.ExitNodeID to report the selected exit node (#16625) When a client selects a particular exit node, Control may use that as a signal for deciding other routes. This patch causes the client to report whenever the current exit node changes, through tailcfg.Hostinfo.ExitNodeID. It relies on a properly set ipn.Prefs.ExitNodeID, which should already be resolved by `tailscale set`. Updates tailscale/corp#30536 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 9 +++++ ipn/ipnlocal/local_test.go | 67 +++++++++++++++++++++++++++++--------- tailcfg/tailcfg.go | 4 ++- tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 16 +++++++++ tailcfg/tailcfg_view.go | 2 ++ 6 files changed, 83 insertions(+), 16 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8665a88c4..ce0f4f687 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5612,6 +5612,11 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // WireIngress. hi.WireIngress = b.shouldWireInactiveIngressLocked() hi.AppConnector.Set(prefs.AppConnector().Advertise) + + // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node + // was selected, if any. Since [LocalBackend.resolveExitNodeIPLocked] + // has already run, there is no need to consult [ipn.Prefs.ExitNodeIP]. + hi.ExitNodeID = prefs.ExitNodeID() } // enterState transitions the backend into newState, updating internal @@ -6136,6 +6141,10 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { }); err != nil { b.logf("failed to save exit node changes: %v", err) } + + // Send the resolved exit node to Control via Hostinfo. + b.hostinfo.ExitNodeID = prefs.ExitNodeID + b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) return true } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 13681fc04..da6fc8b4a 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -614,19 +614,20 @@ func TestConfigureExitNode(t *testing.T) { } tests := []struct { - name string - prefs ipn.Prefs - netMap *netmap.NetworkMap - report *netcheck.Report - changePrefs *ipn.MaskedPrefs - useExitNodeEnabled *bool - exitNodeIDPolicy *tailcfg.StableNodeID - exitNodeIPPolicy *netip.Addr - exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes - exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true - wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] - wantPrefs ipn.Prefs - wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true + wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] + wantPrefs ipn.Prefs + wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] + wantHostinfoExitNodeID *tailcfg.StableNodeID }{ { name: "exit-node-id-via-prefs", // set exit node ID via prefs @@ -643,6 +644,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) @@ -659,6 +661,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs @@ -676,6 +679,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node @@ -695,6 +699,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "", // should be unset }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID @@ -711,6 +716,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -727,6 +733,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression @@ -744,6 +751,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" @@ -763,6 +771,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", InternalExitNodePrior: "auto:any", }, + wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), }, { name: "auto-exit-node-via-prefs/on", // toggle the exit node on @@ -779,6 +788,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "auto:any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "id-via-policy", // set exit node ID via syspolicy @@ -791,6 +801,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs @@ -809,7 +820,8 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantChangePrefsErr: errManagedByPolicy, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs @@ -828,7 +840,8 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantChangePrefsErr: errManagedByPolicy, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs @@ -860,6 +873,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode2.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) @@ -874,6 +888,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) @@ -888,6 +903,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -902,6 +918,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID @@ -918,6 +935,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID @@ -936,6 +954,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -954,6 +973,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -972,6 +992,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap @@ -987,6 +1008,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // switch to the best exit node AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression @@ -1001,6 +1023,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression @@ -1018,6 +1041,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy @@ -1035,6 +1059,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] @@ -1056,6 +1081,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // overridden by user AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] @@ -1079,6 +1105,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] @@ -1097,6 +1124,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", @@ -1117,6 +1145,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: "", // has never been resolved, so it should be cleared as well }, + wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), }, { name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", @@ -1137,6 +1166,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, } syspolicy.RegisterWellKnownSettingsForTest(t) @@ -1197,6 +1227,13 @@ func TestConfigureExitNode(t *testing.T) { if diff := cmp.Diff(&tt.wantPrefs, lb.Prefs().AsStruct(), opts...); diff != "" { t.Errorf("Prefs(+got -want): %v", diff) } + + // And check Hostinfo. + if tt.wantHostinfoExitNodeID != nil { + if got := lb.hostinfo.ExitNodeID; got != *tt.wantHostinfoExitNodeID { + t.Errorf("Hostinfo.ExitNodeID got %v, want %v", got, *tt.wantHostinfoExitNodeID) + } + } }) } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 550914b96..307b39f93 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -166,7 +166,8 @@ type CapabilityVersion int // - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. // - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions // - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] -const CurrentCapabilityVersion CapabilityVersion = 121 +// - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. +const CurrentCapabilityVersion CapabilityVersion = 122 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -875,6 +876,7 @@ type Hostinfo struct { UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. // Location represents geographical location data about a // Tailscale host. Location is optional and only set if diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 412e1f38d..95f8905b8 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -186,6 +186,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + ExitNodeID StableNodeID Location *Location TPM *TPMInfo StateEncrypted opt.Bool diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 833314df8..addd2330b 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -67,6 +67,7 @@ func TestHostinfoEqual(t *testing.T) { "UserspaceRouter", "AppConnector", "ServicesHash", + "ExitNodeID", "Location", "TPM", "StateEncrypted", @@ -273,6 +274,21 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{IngressEnabled: true}, false, }, + { + &Hostinfo{ExitNodeID: "stable-exit"}, + &Hostinfo{ExitNodeID: "stable-exit"}, + true, + }, + { + &Hostinfo{ExitNodeID: ""}, + &Hostinfo{}, + true, + }, + { + &Hostinfo{ExitNodeID: ""}, + &Hostinfo{ExitNodeID: "stable-exit"}, + false, + }, } for i, tt := range tests { got := tt.a.Equal(tt.b) diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 7e82cd871..c40780021 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -300,6 +300,7 @@ func (v HostinfoView) Userspace() opt.Bool { return v.ж.User func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } +func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } @@ -345,6 +346,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + ExitNodeID StableNodeID Location *Location TPM *TPMInfo StateEncrypted opt.Bool From 1ae6a97a7313b3412dc89618efffad3181a07997 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 22 Jul 2025 21:13:25 -0400 Subject: [PATCH 1128/1708] cmd/tailscale/cli: add advertise command to advertise a node as service proxy to tailnet (#16620) This commit adds a advertise subcommand for tailscale serve, that would declare the node as a service proxy for a service. This command only adds the service to node's list of advertised service, but doesn't modify the list of services currently advertised. Fixes tailscale/corp#28016 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 33 ++++++++++++++++++++++++++---- cmd/tailscale/cli/serve_v2_test.go | 14 ++++++------- 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 056bfabb0..91a236970 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -220,6 +220,16 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { LongHelp: "Remove all handlers configured for the specified service.", Exec: e.runServeClear, }, + { + Name: "advertise", + ShortUsage: fmt.Sprintf("tailscale %s advertise ", info.Name), + ShortHelp: "Advertise this node as a service proxy to the tailnet", + LongHelp: "Advertise this node as a service proxy to the tailnet. This command is used\n" + + "to make the current node be considered as a service host for a service. This is\n" + + "useful to bring a service back after it has been drained. (i.e. after running \n" + + "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", + Exec: e.runServeAdvertise, + }, }, } } @@ -401,7 +411,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return err } if forService { - e.addServiceToPrefs(ctx, svcName.String()) + e.addServiceToPrefs(ctx, svcName) } target := "" if len(args) > 0 { @@ -442,16 +452,16 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } -func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName string) error { +func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName tailcfg.ServiceName) error { prefs, err := e.lc.GetPrefs(ctx) if err != nil { return fmt.Errorf("error getting prefs: %w", err) } advertisedServices := prefs.AdvertiseServices - if slices.Contains(advertisedServices, serviceName) { + if slices.Contains(advertisedServices, serviceName.String()) { return nil // already advertised } - advertisedServices = append(advertisedServices, serviceName) + advertisedServices = append(advertisedServices, serviceName.String()) _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ AdvertiseServicesSet: true, Prefs: ipn.Prefs{ @@ -526,6 +536,21 @@ func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { return e.lc.SetServeConfig(ctx, sc) } +func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { + if len(args) == 0 { + return fmt.Errorf("error: missing service name argument") + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := tailcfg.ServiceName(args[0]) + if err := svc.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + return e.addServiceToPrefs(ctx, svc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 95bf5b101..1deeaf3ea 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1167,24 +1167,24 @@ func TestCleanURLPath(t *testing.T) { func TestAddServiceToPrefs(t *testing.T) { tests := []struct { name string - dnsName string + svcName tailcfg.ServiceName startServices []string expected []string }{ { name: "add service to empty prefs", - dnsName: "svc:foo", + svcName: "svc:foo", expected: []string{"svc:foo"}, }, { name: "add service to existing prefs", - dnsName: "svc:bar", + svcName: "svc:bar", startServices: []string{"svc:foo"}, expected: []string{"svc:foo", "svc:bar"}, }, { name: "add existing service to prefs", - dnsName: "svc:foo", + svcName: "svc:foo", startServices: []string{"svc:foo"}, expected: []string{"svc:foo"}, }, @@ -1200,12 +1200,12 @@ func TestAddServiceToPrefs(t *testing.T) { }, }) e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} - err := e.addServiceToPrefs(ctx, tt.dnsName) + err := e.addServiceToPrefs(ctx, tt.svcName) if err != nil { - t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.dnsName, err) + t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.svcName, err) } if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { - t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.dnsName, lc.prefs.AdvertiseServices, tt.expected) + t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.svcName, lc.prefs.AdvertiseServices, tt.expected) } }) } From f1f334b23d4891d5195442b4581e72febff17de4 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 23 Jul 2025 11:25:05 -0400 Subject: [PATCH 1129/1708] flake.lock/go.mod.sri: update flake version info (#16631) Update nixpkgs-unstable to include newer golang to satisfy go.mod requirement of 1.24.4 Update vendor hash to current. Updates #15015 Signed-off-by: Mike O'Driscoll --- .github/workflows/update-flake.yml | 2 +- flake.lock | 6 +++--- flake.nix | 3 ++- go.mod.sri | 2 +- shell.nix | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 61a09cea1..1968c6830 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -8,7 +8,7 @@ on: - main paths: - go.mod - - .github/workflows/update-flakes.yml + - .github/workflows/update-flake.yml workflow_dispatch: concurrency: diff --git a/flake.lock b/flake.lock index 05b0f303e..87f234e3e 100644 --- a/flake.lock +++ b/flake.lock @@ -36,11 +36,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1743938762, - "narHash": "sha256-UgFYn8sGv9B8PoFpUfCa43CjMZBl1x/ShQhRDHBFQdI=", + "lastModified": 1753151930, + "narHash": "sha256-XSQy6wRKHhRe//iVY5lS/ZpI/Jn6crWI8fQzl647wCg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "74a40410369a1c35ee09b8a1abee6f4acbedc059", + "rev": "83e677f31c84212343f4cc553bab85c2efcad60a", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 2f920bfd4..17d263a8d 100644 --- a/flake.nix +++ b/flake.nix @@ -130,4 +130,5 @@ in flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); } -# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= +# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= + diff --git a/go.mod.sri b/go.mod.sri index 6c8357e04..845086191 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= \ No newline at end of file +sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= diff --git a/shell.nix b/shell.nix index bb8eacb67..2eb5b441a 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= +# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= From 1ef8fbf4705637ee73c46300566e3df56c4885e4 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 23 Jul 2025 11:50:42 -0700 Subject: [PATCH 1130/1708] ipn/ipnlocal: send Hostinfo after resolveExitNode for "auto:any" (#16632) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In #16625, I introduced a mechanism for sending the selected exit node to Control via tailcfg.Hostinfo.ExitNodeID as part of the MapRequest. @nickkhyl pointed out that LocalBackend.doSetHostinfoFilterServices needs to be triggered in order to actually send this update. This patch adds that command. It also prevents the client from sending "auto:any" in that field, because that’s not a real exit node ID. This patch also fills in some missing checks in TestConfigureExitNode. Updates tailscale/corp#30536 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 40 +++++++++++++++++----- ipn/ipnlocal/local_test.go | 69 +++++++++++++++++++------------------- 2 files changed, 66 insertions(+), 43 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ce0f4f687..7154b942c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5614,9 +5614,22 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.AppConnector.Set(prefs.AppConnector().Advertise) // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node - // was selected, if any. Since [LocalBackend.resolveExitNodeIPLocked] - // has already run, there is no need to consult [ipn.Prefs.ExitNodeIP]. - hi.ExitNodeID = prefs.ExitNodeID() + // was selected, if any. + // + // If auto exit node is enabled (via [ipn.Prefs.AutoExitNode] or + // [syspolicy.ExitNodeID]), or an exit node is specified by ExitNodeIP + // instead of ExitNodeID , and we don't yet have enough info to resolve + // it (usually due to missing netmap or net report), then ExitNodeID in + // the prefs may be invalid (typically, [unresolvedExitNodeID]) until + // the netmap is available. + // + // In this case, we shouldn't update the Hostinfo with the bogus + // ExitNodeID here; [LocalBackend.ResolveExitNode] will be called once + // the netmap and/or net report have been received to both pick the exit + // node and notify control of the change. + if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { + hi.ExitNodeID = prefs.ExitNodeID() + } } // enterState transitions the backend into newState, updating internal @@ -6117,9 +6130,10 @@ func (b *LocalBackend) RefreshExitNode() { } } -// resolveExitNode determines which exit node to use based on the current -// prefs and netmap. It updates the exit node ID in the prefs if needed, -// sends a notification to clients, and returns true if the exit node has changed. +// resolveExitNode determines which exit node to use based on the current prefs +// and netmap. It updates the exit node ID in the prefs if needed, updates the +// exit node ID in the hostinfo if needed, sends a notification to clients, and +// returns true if the exit node has changed. // // It is the caller's responsibility to reconfigure routes and actually // start using the selected exit node, if needed. @@ -6142,8 +6156,18 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { b.logf("failed to save exit node changes: %v", err) } - // Send the resolved exit node to Control via Hostinfo. - b.hostinfo.ExitNodeID = prefs.ExitNodeID + // Send the resolved exit node to control via [tailcfg.Hostinfo]. + // [LocalBackend.applyPrefsToHostinfoLocked] usually sets the Hostinfo, + // but it deferred until this point because there was a bogus ExitNodeID + // in the prefs. + // + // TODO(sfllaw): Mutating b.hostinfo here is undesirable, mutating + // in-place doubly so. + sid := prefs.ExitNodeID + if sid != unresolvedExitNodeID && b.hostinfo.ExitNodeID != sid { + b.hostinfo.ExitNodeID = sid + b.goTracker.Go(b.doSetHostinfoFilterServices) + } b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) return true diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index da6fc8b4a..dd2837022 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -627,7 +627,7 @@ func TestConfigureExitNode(t *testing.T) { wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] wantPrefs ipn.Prefs wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] - wantHostinfoExitNodeID *tailcfg.StableNodeID + wantHostinfoExitNodeID tailcfg.StableNodeID }{ { name: "exit-node-id-via-prefs", // set exit node ID via prefs @@ -644,7 +644,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) @@ -661,7 +661,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs @@ -679,7 +679,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node @@ -699,7 +699,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "", // should be unset }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID @@ -716,7 +716,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -733,7 +733,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression @@ -751,7 +751,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" @@ -771,7 +771,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", InternalExitNodePrior: "auto:any", }, - wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), + wantHostinfoExitNodeID: "", }, { name: "auto-exit-node-via-prefs/on", // toggle the exit node on @@ -788,7 +788,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "auto:any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "id-via-policy", // set exit node ID via syspolicy @@ -801,7 +801,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs @@ -820,7 +820,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), wantChangePrefsErr: errManagedByPolicy, }, { @@ -840,7 +840,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), wantChangePrefsErr: errManagedByPolicy, }, { @@ -860,7 +860,8 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantChangePrefsErr: errManagedByPolicy, + wantHostinfoExitNodeID: exitNode1.StableID(), + wantChangePrefsErr: errManagedByPolicy, }, { name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) @@ -873,7 +874,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode2.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) @@ -888,7 +889,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) @@ -903,7 +904,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -918,7 +919,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID @@ -935,7 +936,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID @@ -954,7 +955,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -973,7 +974,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -992,7 +993,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap @@ -1008,7 +1009,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // switch to the best exit node AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression @@ -1023,7 +1024,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression @@ -1041,7 +1042,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy @@ -1059,7 +1060,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] @@ -1081,7 +1082,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // overridden by user AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] @@ -1105,7 +1106,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] @@ -1124,7 +1125,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", @@ -1145,7 +1146,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: "", // has never been resolved, so it should be cleared as well }, - wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", @@ -1166,7 +1167,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, } syspolicy.RegisterWellKnownSettingsForTest(t) @@ -1229,10 +1230,8 @@ func TestConfigureExitNode(t *testing.T) { } // And check Hostinfo. - if tt.wantHostinfoExitNodeID != nil { - if got := lb.hostinfo.ExitNodeID; got != *tt.wantHostinfoExitNodeID { - t.Errorf("Hostinfo.ExitNodeID got %v, want %v", got, *tt.wantHostinfoExitNodeID) - } + if got := lb.hostinfo.ExitNodeID; got != tt.wantHostinfoExitNodeID { + t.Errorf("Hostinfo.ExitNodeID got %s, want %s", got, tt.wantHostinfoExitNodeID) } }) } From 179745b83ed6d687bdc9d501ccdbfdec1cb3f9d7 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 23 Jul 2025 12:30:04 -0700 Subject: [PATCH 1131/1708] wgengine/magicsock: update discoInfo docs (#16638) discoInfo is also used for holding peer relay server disco keys. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index ee0ee40ca..fb7f5edcb 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3907,14 +3907,18 @@ type epAddrEndpointCache struct { } // discoInfo is the info and state for the DiscoKey -// in the Conn.discoInfo map key. +// in the [Conn.discoInfo] and [relayManager.discoInfoByServerDisco] map keys. +// +// When the disco protocol is used to handshake with a peer relay server, the +// corresponding discoInfo is held in [relayManager.discoInfoByServerDisco] +// instead of [Conn.discoInfo]. // // Note that a DiscoKey does not necessarily map to exactly one // node. In the case of shared nodes and users switching accounts, two // nodes in the NetMap may legitimately have the same DiscoKey. As // such, no fields in here should be considered node-specific. type discoInfo struct { - // discoKey is the same as the Conn.discoInfo map key, + // discoKey is the same as the corresponding map key, // just so you can pass around a *discoInfo alone. // Not modified once initialized. discoKey key.DiscoPublic @@ -3925,11 +3929,13 @@ type discoInfo struct { // sharedKey is the precomputed key for communication with the // peer that has the DiscoKey used to look up this *discoInfo in - // Conn.discoInfo. + // the corresponding map. // Not modified once initialized. sharedKey key.DiscoShared - // Mutable fields follow, owned by Conn.mu: + // Mutable fields follow, owned by [Conn.mu]. These are irrelevant when + // discoInfo is a peer relay server disco key in the + // [relayManager.discoInfoByServerDisco] map: // lastPingFrom is the src of a ping for discoKey. lastPingFrom epAddr From c87f44b687e4b549d30fe420d45bfeebf47e5cd1 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 22 Jul 2025 18:57:24 -0500 Subject: [PATCH 1132/1708] cmd/tailscale/cli: use DNS name instead of Location to hide Mullvad exit nodes from status output Previously, we used a non-nil Location as an indicator that a peer is a Mullvad exit node. However, this is not, or no longer, reliable, since regular exit nodes may also have a non-nil Location, such as when traffic steering is enabled for a tailnet. In this PR, we update the plaintext `tailscale status` output to omit only Mullvad exit nodes, rather than all exit nodes with a non-nil Location. The JSON output remains unchanged and continues to include all peers. Updates tailscale/corp#30614 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/status.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 39e6f9fbd..726606109 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -70,6 +70,8 @@ var statusArgs struct { peers bool // in CLI mode, show status of peer machines } +const mullvadTCD = "mullvad.ts.net." + func runStatus(ctx context.Context, args []string) error { if len(args) > 0 { return errors.New("unexpected non-flag arguments to 'tailscale status'") @@ -212,9 +214,8 @@ func runStatus(ctx context.Context, args []string) error { if ps.ShareeNode { continue } - if ps.Location != nil && ps.ExitNodeOption && !ps.ExitNode { - // Location based exit nodes are only shown with the - // `exit-node list` command. + if ps.ExitNodeOption && !ps.ExitNode && strings.HasSuffix(ps.DNSName, mullvadTCD) { + // Mullvad exit nodes are only shown with the `exit-node list` command. locBasedExitNode = true continue } From 2a5d9c726993318000eb4f42a8f35e6fcc6e2798 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Thu, 24 Jul 2025 12:20:28 -0600 Subject: [PATCH 1133/1708] VERSION.txt: this is v1.87.0 Signed-off-by: Aaron Klotz --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index f288d1114..f63427167 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.85.0 +1.87.0 From c5724425480a4bbd21442ed0138eaa374d7ba02a Mon Sep 17 00:00:00 2001 From: Danni Popova Date: Fri, 25 Jul 2025 10:21:41 +0100 Subject: [PATCH 1134/1708] cmd/tailscale: allow SSH to IPs or DNS names without MagicDNS (#16591) fixes #16381 Signed-off-by: Danni Popova --- cmd/tailscale/cli/ssh.go | 63 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/ssh.go b/cmd/tailscale/cli/ssh.go index ba70e97e9..9275c9a1c 100644 --- a/cmd/tailscale/cli/ssh.go +++ b/cmd/tailscale/cli/ssh.go @@ -70,12 +70,28 @@ func runSSH(ctx context.Context, args []string) error { return err } + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + // hostForSSH is the hostname we'll tell OpenSSH we're // connecting to, so we have to maintain fewer entries in the // known_hosts files. hostForSSH := host - if v, ok := nodeDNSNameFromArg(st, host); ok { - hostForSSH = v + ps, ok := peerStatusFromArg(st, host) + if ok { + hostForSSH = ps.DNSName + + // If MagicDNS isn't enabled on the client, + // we will use the first IPv4 we know about + // or fallback to the first IPv6 address + if !prefs.CorpDNS { + ipHost, found := ipFromPeerStatus(ps) + if found { + hostForSSH = ipHost + } + } } ssh, err := findSSH() @@ -169,11 +185,40 @@ func genKnownHosts(st *ipnstate.Status) []byte { continue } fmt.Fprintf(&buf, "%s %s\n", ps.DNSName, hostKey) + for _, ip := range ps.TailscaleIPs { + fmt.Fprintf(&buf, "%s %s\n", ip.String(), hostKey) + } } } return buf.Bytes() } +// peerStatusFromArg returns the PeerStatus that matches +// the input arg which can be a base name, full DNS name, or an IP. +func peerStatusFromArg(st *ipnstate.Status, arg string) (*ipnstate.PeerStatus, bool) { + if arg == "" { + return nil, false + } + argIP, _ := netip.ParseAddr(arg) + for _, ps := range st.Peer { + if argIP.IsValid() { + for _, ip := range ps.TailscaleIPs { + if ip == argIP { + return ps, true + } + } + continue + } + if strings.EqualFold(strings.TrimSuffix(arg, "."), strings.TrimSuffix(ps.DNSName, ".")) { + return ps, true + } + if base, _, ok := strings.Cut(ps.DNSName, "."); ok && strings.EqualFold(base, arg) { + return ps, true + } + } + return nil, false +} + // nodeDNSNameFromArg returns the PeerStatus.DNSName value from a peer // in st that matches the input arg which can be a base name, full // DNS name, or an IP. @@ -202,6 +247,20 @@ func nodeDNSNameFromArg(st *ipnstate.Status, arg string) (dnsName string, ok boo return "", false } +func ipFromPeerStatus(ps *ipnstate.PeerStatus) (string, bool) { + if len(ps.TailscaleIPs) < 1 { + return "", false + } + + // Look for a IPv4 address or default to the first IP of the list + for _, ip := range ps.TailscaleIPs { + if ip.Is4() { + return ip.String(), true + } + } + return ps.TailscaleIPs[0].String(), true +} + // getSSHClientEnvVar returns the "SSH_CLIENT" environment variable // for the current process group, if any. var getSSHClientEnvVar = func() string { From bfebf870ae3ecc5dba74cb900f4a8994a2cfd8cc Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Fri, 25 Jul 2025 10:41:02 -0600 Subject: [PATCH 1135/1708] cmd/tailscaled: update installSystemDaemonWindows to set the correct system service depndencies Fixes #16658 Signed-off-by: Aaron Klotz --- cmd/tailscaled/install_windows.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index c667539b0..3e5036fba 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -25,6 +25,15 @@ func init() { uninstallSystemDaemon = uninstallSystemDaemonWindows } +// serviceDependencies lists all system services that tailscaled depends on. +// This list must be kept in sync with the TailscaledDependencies preprocessor +// variable in the installer. +var serviceDependencies = []string{ + "iphlpsvc", + "netprofm", + "WinHttpAutoProxySvc", +} + func installSystemDaemonWindows(args []string) (err error) { m, err := mgr.Connect() if err != nil { @@ -48,6 +57,7 @@ func installSystemDaemonWindows(args []string) (err error) { ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, StartType: mgr.StartAutomatic, ErrorControl: mgr.ErrorNormal, + Dependencies: serviceDependencies, DisplayName: serviceName, Description: "Connects this computer to others on the Tailscale network.", } From e300a00058b77691a0b8a3354fb8244af6eef59e Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Fri, 25 Jul 2025 19:45:37 -0500 Subject: [PATCH 1136/1708] cmd/k8s-operator: Enhance DNS record handling for ProxyGroup egress services (#16181) This update introduces support for DNS records associated with ProxyGroup egress services, ensuring that the ClusterIP Service IP is used instead of Pod IPs. Fixes #15945 Signed-off-by: Raj Singh --- cmd/k8s-operator/dnsrecords.go | 280 ++++++++++++++++++---------- cmd/k8s-operator/dnsrecords_test.go | 128 ++++++++++++- 2 files changed, 310 insertions(+), 98 deletions(-) diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index f91dd49ec..54c1584c6 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -31,6 +31,10 @@ import ( const ( dnsRecordsRecocilerFinalizer = "tailscale.com/dns-records-reconciler" annotationTSMagicDNSName = "tailscale.com/magic-dnsname" + + // Service types for consistent string usage + serviceTypeIngress = "ingress" + serviceTypeSvc = "svc" ) // dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS @@ -51,7 +55,7 @@ type dnsRecordsReconciler struct { isDefaultLoadBalancer bool // true if operator is the default ingress controller in this cluster } -// Reconcile takes a reconcile.Request for a headless Service fronting a +// Reconcile takes a reconcile.Request for a Service fronting a // tailscale proxy and updates DNS Records in dnsrecords ConfigMap for the // in-cluster ts.net nameserver if required. func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { @@ -59,8 +63,8 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") - headlessSvc := new(corev1.Service) - err = dnsRR.Client.Get(ctx, req.NamespacedName, headlessSvc) + proxySvc := new(corev1.Service) + err = dnsRR.Client.Get(ctx, req.NamespacedName, proxySvc) if apierrors.IsNotFound(err) { logger.Debugf("Service not found") return reconcile.Result{}, nil @@ -68,14 +72,14 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get Service: %w", err) } - if !(isManagedByType(headlessSvc, "svc") || isManagedByType(headlessSvc, "ingress")) { - logger.Debugf("Service is not a headless Service for a tailscale ingress or egress proxy; do nothing") + if !(isManagedByType(proxySvc, serviceTypeSvc) || isManagedByType(proxySvc, serviceTypeIngress)) { + logger.Debugf("Service is not a proxy Service for a tailscale ingress or egress proxy; do nothing") return reconcile.Result{}, nil } - if !headlessSvc.DeletionTimestamp.IsZero() { + if !proxySvc.DeletionTimestamp.IsZero() { logger.Debug("Service is being deleted, clean up resources") - return reconcile.Result{}, dnsRR.maybeCleanup(ctx, headlessSvc, logger) + return reconcile.Result{}, dnsRR.maybeCleanup(ctx, proxySvc, logger) } // Check that there is a ts.net nameserver deployed to the cluster by @@ -99,7 +103,7 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. return reconcile.Result{}, nil } - if err := dnsRR.maybeProvision(ctx, headlessSvc, logger); err != nil { + if err := dnsRR.maybeProvision(ctx, proxySvc, logger); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) } else { @@ -111,37 +115,33 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. } // maybeProvision ensures that dnsrecords ConfigMap contains a record for the -// proxy associated with the headless Service. +// proxy associated with the Service. // The record is only provisioned if the proxy is for a tailscale Ingress or // egress configured via tailscale.com/tailnet-fqdn annotation. // // For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from // ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses -// retrieved from the EndpoinSlice associated with this headless Service, i.e +// retrieved from the EndpointSlice associated with this Service, i.e // Records{IP4: : <[IPs of the ingress proxy Pods]>} // // For egress, the record is a mapping between tailscale.com/tailnet-fqdn // annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice -// associated with this headless Service, i.e +// associated with this Service, i.e // Records{IP4: {: <[IPs of the egress proxy Pods]>} // +// For ProxyGroup egress, the record is a mapping between tailscale.com/magic-dnsname +// annotation and the ClusterIP Service IP (which provides portmapping), i.e +// Records{IP4: {: <[ClusterIP Service IP]>} +// // If records need to be created for this proxy, maybeProvision will also: -// - update the headless Service with a tailscale.com/magic-dnsname annotation -// - update the headless Service with a finalizer -func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error { - if headlessSvc == nil { - logger.Info("[unexpected] maybeProvision called with a nil Service") - return nil - } - isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, headlessSvc) - if err != nil { - return fmt.Errorf("error checking whether the Service is for an egress proxy: %w", err) - } - if !(isEgressFQDNSvc || isManagedByType(headlessSvc, "ingress")) { +// - update the Service with a tailscale.com/magic-dnsname annotation +// - update the Service with a finalizer +func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { + if !dnsRR.isInterestingService(ctx, proxySvc) { logger.Debug("Service is not fronting a proxy that we create DNS records for; do nothing") return nil } - fqdn, err := dnsRR.fqdnForDNSRecord(ctx, headlessSvc, logger) + fqdn, err := dnsRR.fqdnForDNSRecord(ctx, proxySvc, logger) if err != nil { return fmt.Errorf("error determining DNS name for record: %w", err) } @@ -150,18 +150,18 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return nil // a new reconcile will be triggered once it's added } - oldHeadlessSvc := headlessSvc.DeepCopy() - // Ensure that headless Service is annotated with a finalizer to help + oldProxySvc := proxySvc.DeepCopy() + // Ensure that proxy Service is annotated with a finalizer to help // with records cleanup when proxy resources are deleted. - if !slices.Contains(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) { - headlessSvc.Finalizers = append(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) + if !slices.Contains(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) { + proxySvc.Finalizers = append(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) } - // Ensure that headless Service is annotated with the current MagicDNS + // Ensure that proxy Service is annotated with the current MagicDNS // name to help with records cleanup when proxy resources are deleted or // MagicDNS name changes. - oldFqdn := headlessSvc.Annotations[annotationTSMagicDNSName] + oldFqdn := proxySvc.Annotations[annotationTSMagicDNSName] if oldFqdn != "" && oldFqdn != fqdn { // i.e user has changed the value of tailscale.com/tailnet-fqdn annotation - logger.Debugf("MagicDNS name has changed, remvoving record for %s", oldFqdn) + logger.Debugf("MagicDNS name has changed, removing record for %s", oldFqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, oldFqdn) } @@ -169,57 +169,26 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return fmt.Errorf("error removing record for %s: %w", oldFqdn, err) } } - mak.Set(&headlessSvc.Annotations, annotationTSMagicDNSName, fqdn) - if !apiequality.Semantic.DeepEqual(oldHeadlessSvc, headlessSvc) { + mak.Set(&proxySvc.Annotations, annotationTSMagicDNSName, fqdn) + if !apiequality.Semantic.DeepEqual(oldProxySvc, proxySvc) { logger.Infof("provisioning DNS record for MagicDNS name: %s", fqdn) // this will be printed exactly once - if err := dnsRR.Update(ctx, headlessSvc); err != nil { - return fmt.Errorf("error updating proxy headless Service metadata: %w", err) + if err := dnsRR.Update(ctx, proxySvc); err != nil { + return fmt.Errorf("error updating proxy Service metadata: %w", err) } } - // Get the Pod IP addresses for the proxy from the EndpointSlices for - // the headless Service. The Service can have multiple EndpointSlices - // associated with it, for example in dual-stack clusters. - labels := map[string]string{discoveryv1.LabelServiceName: headlessSvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - var eps = new(discoveryv1.EndpointSliceList) - if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { - return fmt.Errorf("error listing EndpointSlices for the proxy's headless Service: %w", err) - } - if len(eps.Items) == 0 { - logger.Debugf("proxy's headless Service EndpointSlice does not yet exist. We will reconcile again once it's created") - return nil - } - // Each EndpointSlice for a Service can have a list of endpoints that each - // can have multiple addresses - these are the IP addresses of any Pods - // selected by that Service. Pick all the IPv4 addresses. - // It is also possible that multiple EndpointSlices have overlapping addresses. - // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints - ips := make(set.Set[string], 0) - for _, slice := range eps.Items { - if slice.AddressType != discoveryv1.AddressTypeIPv4 { - logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) - continue - } - for _, ep := range slice.Endpoints { - if !epIsReady(&ep) { - logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) - continue - } - for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips.Add(ip) - } - } - } + // Get the IP addresses for the DNS record + ips, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) + if err != nil { + return fmt.Errorf("error getting target IPs: %w", err) } - if ips.Len() == 0 { - logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses. We will reconcile again once they are created.") + if len(ips) == 0 { + logger.Debugf("No target IP addresses available yet. We will reconcile again once they are available.") return nil } + updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips.Slice()) + mak.Set(&rec.IP4, fqdn, ips) } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) @@ -243,8 +212,8 @@ func epIsReady(ep *discoveryv1.Endpoint) bool { // has been removed from the Service. If the record is not found in the // ConfigMap, the ConfigMap does not exist, or the Service does not have // tailscale.com/magic-dnsname annotation, just remove the finalizer. -func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error { - ix := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) +func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { + ix := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if ix == -1 { logger.Debugf("no finalizer, nothing to do") return nil @@ -252,24 +221,24 @@ func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *co cm := &corev1.ConfigMap{} err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm) if apierrors.IsNotFound(err) { - logger.Debug("'dsnrecords' ConfigMap not found") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + logger.Debug("'dnsrecords' ConfigMap not found") + return h.removeProxySvcFinalizer(ctx, proxySvc) } if err != nil { return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err) } if cm.Data == nil { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } _, ok := cm.Data[operatorutils.DNSRecordsCMKey] if !ok { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } - fqdn, _ := headlessSvc.GetAnnotations()[annotationTSMagicDNSName] + fqdn, _ := proxySvc.GetAnnotations()[annotationTSMagicDNSName] if fqdn == "" { - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } logger.Infof("removing DNS record for MagicDNS name %s", fqdn) updateFunc := func(rec *operatorutils.Records) { @@ -278,27 +247,28 @@ func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *co if err = h.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS config: %w", err) } - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } -func (dnsRR *dnsRecordsReconciler) removeHeadlessSvcFinalizer(ctx context.Context, headlessSvc *corev1.Service) error { - idx := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) +func (dnsRR *dnsRecordsReconciler) removeProxySvcFinalizer(ctx context.Context, proxySvc *corev1.Service) error { + idx := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if idx == -1 { return nil } - headlessSvc.Finalizers = append(headlessSvc.Finalizers[:idx], headlessSvc.Finalizers[idx+1:]...) - return dnsRR.Update(ctx, headlessSvc) + proxySvc.Finalizers = slices.Delete(proxySvc.Finalizers, idx, idx+1) + return dnsRR.Update(ctx, proxySvc) } -// fqdnForDNSRecord returns MagicDNS name associated with a given headless Service. -// If the headless Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname. -// If the headless Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value. -// This function is not expected to be called with headless Services for other +// fqdnForDNSRecord returns MagicDNS name associated with a given proxy Service. +// If the proxy Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname. +// If the proxy Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value. +// For ProxyGroup egress Services, returns the tailnet-fqdn annotation from the parent Service. +// This function is not expected to be called with proxy Services for other // proxy types, or any other Services, but it just returns an empty string if // that happens. -func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) (string, error) { - parentName := parentFromObjectLabels(headlessSvc) - if isManagedByType(headlessSvc, "ingress") { +func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) (string, error) { + parentName := parentFromObjectLabels(proxySvc) + if isManagedByType(proxySvc, serviceTypeIngress) { ing := new(networkingv1.Ingress) if err := dnsRR.Get(ctx, parentName, ing); err != nil { return "", err @@ -308,10 +278,10 @@ func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headles } return ing.Status.LoadBalancer.Ingress[0].Hostname, nil } - if isManagedByType(headlessSvc, "svc") { + if isManagedByType(proxySvc, serviceTypeSvc) { svc := new(corev1.Service) if err := dnsRR.Get(ctx, parentName, svc); apierrors.IsNotFound(err) { - logger.Info("[unexpected] parent Service for egress proxy %s not found", headlessSvc.Name) + logger.Infof("[unexpected] parent Service for egress proxy %s not found", proxySvc.Name) return "", nil } else if err != nil { return "", err @@ -328,7 +298,7 @@ func (dnsRR *dnsRecordsReconciler) updateDNSConfig(ctx context.Context, update f cm := &corev1.ConfigMap{} err := dnsRR.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { - dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an isue and attach operator logs.") + dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an issue and attach operator logs.") return nil } if err != nil { @@ -366,3 +336,119 @@ func (dnsRR *dnsRecordsReconciler) isSvcForFQDNEgressProxy(ctx context.Context, annots := parentSvc.Annotations return annots != nil && annots[AnnotationTailnetTargetFQDN] != "", nil } + +// isProxyGroupEgressService reports whether the Service is a ClusterIP Service +// created for ProxyGroup egress. For ProxyGroup egress, there are no headless +// services. Instead, the DNS reconciler processes the ClusterIP Service +// directly, which has portmapping and should use its own IP for DNS records. +func (dnsRR *dnsRecordsReconciler) isProxyGroupEgressService(svc *corev1.Service) bool { + return svc.GetLabels()[labelProxyGroup] != "" && + svc.GetLabels()[labelSvcType] == typeEgress && + svc.Spec.Type == corev1.ServiceTypeClusterIP && + isManagedByType(svc, serviceTypeSvc) +} + +// isInterestingService reports whether the Service is one that we should create +// DNS records for. +func (dnsRR *dnsRecordsReconciler) isInterestingService(ctx context.Context, svc *corev1.Service) bool { + if isManagedByType(svc, serviceTypeIngress) { + return true + } + + isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, svc) + if err != nil { + return false + } + if isEgressFQDNSvc { + return true + } + + if dnsRR.isProxyGroupEgressService(svc) { + return dnsRR.parentSvcTargetsFQDN(ctx, svc) + } + + return false +} + +// parentSvcTargetsFQDN reports whether the parent Service of a ProxyGroup +// egress Service has an FQDN target (not an IP target). +func (dnsRR *dnsRecordsReconciler) parentSvcTargetsFQDN(ctx context.Context, svc *corev1.Service) bool { + + parentName := parentFromObjectLabels(svc) + parentSvc := new(corev1.Service) + if err := dnsRR.Get(ctx, parentName, parentSvc); err != nil { + return false + } + + return parentSvc.Annotations[AnnotationTailnetTargetFQDN] != "" +} + +// getTargetIPs returns the IP addresses that should be used for DNS records +// for the given proxy Service. +func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + if dnsRR.isProxyGroupEgressService(proxySvc) { + return dnsRR.getClusterIPServiceIPs(proxySvc, logger) + } + return dnsRR.getPodIPs(ctx, proxySvc, logger) +} + +// getClusterIPServiceIPs returns the ClusterIP of a ProxyGroup egress Service. +func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + if proxySvc.Spec.ClusterIP == "" || proxySvc.Spec.ClusterIP == "None" { + logger.Debugf("ProxyGroup egress ClusterIP Service does not have a ClusterIP yet.") + return nil, nil + } + // Validate that ClusterIP is a valid IPv4 address + if !net.IsIPv4String(proxySvc.Spec.ClusterIP) { + logger.Debugf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + return nil, fmt.Errorf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + } + logger.Debugf("Using ClusterIP Service IP %s for ProxyGroup egress DNS record", proxySvc.Spec.ClusterIP) + return []string{proxySvc.Spec.ClusterIP}, nil +} + +// getPodIPs returns Pod IP addresses from EndpointSlices for non-ProxyGroup Services. +func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + // Get the Pod IP addresses for the proxy from the EndpointSlices for + // the headless Service. The Service can have multiple EndpointSlices + // associated with it, for example in dual-stack clusters. + labels := map[string]string{discoveryv1.LabelServiceName: proxySvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + var eps = new(discoveryv1.EndpointSliceList) + if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { + return nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) + } + if len(eps.Items) == 0 { + logger.Debugf("proxy's Service EndpointSlice does not yet exist.") + return nil, nil + } + // Each EndpointSlice for a Service can have a list of endpoints that each + // can have multiple addresses - these are the IP addresses of any Pods + // selected by that Service. Pick all the IPv4 addresses. + // It is also possible that multiple EndpointSlices have overlapping addresses. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints + ips := make(set.Set[string], 0) + for _, slice := range eps.Items { + if slice.AddressType != discoveryv1.AddressTypeIPv4 { + logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) + continue + } + for _, ep := range slice.Endpoints { + if !epIsReady(&ep) { + logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) + continue + } + for _, ip := range ep.Addresses { + if !net.IsIPv4String(ip) { + logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) + } else { + ips.Add(ip) + } + } + } + } + if ips.Len() == 0 { + logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses.") + return nil, nil + } + return ips.Slice(), nil +} diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 4e73e6c9e..51dfb9049 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -18,6 +18,7 @@ import ( networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" operatorutils "tailscale.com/k8s-operator" @@ -66,7 +67,7 @@ func TestDNSRecordsReconciler(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) // Set the ready condition of the DNSConfig - mustUpdateStatus[tsapi.DNSConfig](t, fc, "", "test", func(c *tsapi.DNSConfig) { + mustUpdateStatus(t, fc, "", "test", func(c *tsapi.DNSConfig) { operatorutils.SetDNSConfigCondition(c, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated, 0, cl, zl.Sugar()) }) dnsRR := &dnsRecordsReconciler{ @@ -156,6 +157,131 @@ func TestDNSRecordsReconciler(t *testing.T) { expectReconciled(t, dnsRR, "tailscale", "ts-ingress") wantHosts["another.ingress.ts.net"] = []string{"1.2.3.4"} expectHostsRecords(t, fc, wantHosts) + + // 8. DNS record is created for ProxyGroup egress using ClusterIP Service IP instead of Pod IPs + t.Log("test case 8: ProxyGroup egress") + + // Create the parent ExternalName service with tailnet-fqdn annotation + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-service", + Namespace: "default", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "external-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + mustCreate(t, fc, parentEgressSvc) + + proxyGroupEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-egress-abcd1", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "external-service", + LabelParentNamespace: "default", + LabelParentType: "svc", + labelProxyGroup: "test-proxy-group", + labelSvcType: typeEgress, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.0.100.50", // This IP should be used in DNS, not Pod IPs + Ports: []corev1.ServicePort{{ + Port: 443, + TargetPort: intstr.FromInt(10443), // Port mapping + }}, + }, + } + + // Create EndpointSlice with Pod IPs (these should NOT be used in DNS records) + proxyGroupEps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-egress-abcd1-ipv4", + Namespace: "tailscale", + Labels: map[string]string{ + discoveryv1.LabelServiceName: "ts-proxygroup-egress-abcd1", + kubetypes.LabelManaged: "true", + LabelParentName: "external-service", + LabelParentNamespace: "default", + LabelParentType: "svc", + labelProxyGroup: "test-proxy-group", + labelSvcType: typeEgress, + }, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{{ + Addresses: []string{"10.1.0.100", "10.1.0.101", "10.1.0.102"}, // Pod IPs that should NOT be used + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), + }, + }}, + Ports: []discoveryv1.EndpointPort{{ + Port: ptr.To(int32(10443)), + }}, + } + + mustCreate(t, fc, proxyGroupEgressSvc) + mustCreate(t, fc, proxyGroupEps) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + + // Verify DNS record uses ClusterIP Service IP, not Pod IPs + wantHosts["external-service.example.ts.net"] = []string{"10.0.100.50"} + expectHostsRecords(t, fc, wantHosts) + + // 9. ProxyGroup egress DNS record updates when ClusterIP changes + t.Log("test case 9: ProxyGroup egress ClusterIP change") + mustUpdate(t, fc, "tailscale", "ts-proxygroup-egress-abcd1", func(svc *corev1.Service) { + svc.Spec.ClusterIP = "10.0.100.51" + }) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + wantHosts["external-service.example.ts.net"] = []string{"10.0.100.51"} + expectHostsRecords(t, fc, wantHosts) + + // 10. Test ProxyGroup service deletion and DNS cleanup + t.Log("test case 10: ProxyGroup egress service deletion") + mustDeleteAll(t, fc, proxyGroupEgressSvc) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + delete(wantHosts, "external-service.example.ts.net") + expectHostsRecords(t, fc, wantHosts) +} + +func TestDNSRecordsReconcilerErrorCases(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + dnsRR := &dnsRecordsReconciler{ + logger: zl.Sugar(), + } + + testSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: corev1.ServiceSpec{Type: corev1.ServiceTypeClusterIP}, + } + + // Test invalid IP format + testSvc.Spec.ClusterIP = "invalid-ip" + _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + if err == nil { + t.Error("expected error for invalid IP format") + } + + // Test valid IP + testSvc.Spec.ClusterIP = "10.0.100.50" + _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + if err != nil { + t.Errorf("unexpected error for valid IP: %v", err) + } } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { From 5154bbb0b3f556b7cc1c7ba2f92eea92b4d3bfb9 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 28 Jul 2025 11:15:14 +0100 Subject: [PATCH 1137/1708] k8s-operator: adding session type to cast header (#16660) Updates #16490 Signed-off-by: chaosinthecrd --- k8s-operator/sessionrecording/hijacker.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 675a9b1dd..0df72b6c3 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -184,9 +184,10 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, SrcNode: strings.TrimSuffix(h.who.Node.Name, "."), SrcNodeID: h.who.Node.StableID, Kubernetes: &sessionrecording.Kubernetes{ - PodName: h.pod, - Namespace: h.ns, - Container: container, + PodName: h.pod, + Namespace: h.ns, + Container: container, + SessionType: string(h.sessionType), }, } if !h.who.Node.IsTagged() { From 57318695656ec693f75fc858ea853ee4c4591f57 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Mon, 28 Jul 2025 11:38:34 +0100 Subject: [PATCH 1138/1708] health: add an ETag to UnhealthyState for change detection Updates tailscale/corp#30596 Signed-off-by: James Sanderson --- control/controlclient/map_test.go | 4 +- health/health_test.go | 176 +++++++++++++++++++++++++++++- health/state.go | 38 ++++++- ipn/ipnlocal/local_test.go | 4 +- 4 files changed, 215 insertions(+), 7 deletions(-) diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 7e42f6f6a..ff5df8207 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1361,7 +1361,7 @@ func TestNetmapHealthIntegration(t *testing.T) { } } - if d := cmp.Diff(want, got); d != "" { + if d := cmp.Diff(want, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); d != "" { t.Fatalf("CurrentStatus().Warnings[\"control-health*\"] different than expected (-want +got)\n%s", d) } } @@ -1414,7 +1414,7 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { }, } - if diff := cmp.Diff(want, state.Warnings); diff != "" { + if diff := cmp.Diff(want, state.Warnings, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("unexpected message contents (-want +got):\n%s", diff) } } diff --git a/health/health_test.go b/health/health_test.go index 53f012ecf..d66cea06c 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -13,8 +13,10 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -517,7 +519,7 @@ func TestControlHealth(t *testing.T) { delete(gotWarns, k) } } - if diff := cmp.Diff(wantWarns, gotWarns); diff != "" { + if diff := cmp.Diff(wantWarns, gotWarns, cmpopts.IgnoreFields(UnhealthyState{}, "ETag")); diff != "" { t.Fatalf(`CurrentState().Warnings["control-health-*"] wrong (-want +got):\n%s`, diff) } }) @@ -664,3 +666,175 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { t.Error("watcher got called, want it to not be called") } } + +// TestCurrentStateETagControlHealth tests that the ETag on an [UnhealthyState] +// created from Control health & returned by [Tracker.CurrentState] is different +// when the details of the [tailcfg.DisplayMessage] are different. +func TestCurrentStateETagControlHealth(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + msg := tailcfg.DisplayMessage{ + Title: "Test Warning", + Text: "This is a test warning.", + Severity: tailcfg.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://example.com/", + Label: "open", + }, + } + + type test struct { + name string + change func(tailcfg.DisplayMessage) tailcfg.DisplayMessage + wantChangedETag bool + } + tests := []test{ + { + name: "same_value", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { return m }, + wantChangedETag: false, + }, + { + name: "different_severity", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Severity = tailcfg.SeverityLow + return m + }, + wantChangedETag: true, + }, + { + name: "different_title", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Title = "Different Title" + return m + }, + wantChangedETag: true, + }, + { + name: "different_text", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Text = "This is a different text." + return m + }, + wantChangedETag: true, + }, + { + name: "different_impacts_connectivity", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.ImpactsConnectivity = false + return m + }, + wantChangedETag: true, + }, + { + name: "different_primary_action_label", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.PrimaryAction.Label = "new_label" + return m + }, + wantChangedETag: true, + }, + { + name: "different_primary_action_url", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.PrimaryAction.URL = "https://new.example.com/" + return m + }, + wantChangedETag: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": msg, + }) + state := ht.CurrentState().Warnings["control-health.test-message"] + + newMsg := test.change(msg) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": newMsg, + }) + newState := ht.CurrentState().Warnings["control-health.test-message"] + + if (state.ETag != newState.ETag) != test.wantChangedETag { + if test.wantChangedETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } else { + t.Errorf("got changed ETag, want unchanged") + } + } + }) + } +} + +// TestCurrentStateETagWarnable tests that the ETag on an [UnhealthyState] +// created from a Warnable & returned by [Tracker.CurrentState] is different +// when the details of the Warnable are different. +func TestCurrentStateETagWarnable(t *testing.T) { + newTracker := func(clock tstime.Clock) *Tracker { + ht := &Tracker{ + testClock: clock, + } + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + return ht + } + + t.Run("new_args", func(t *testing.T) { + ht := newTracker(nil) + + ht.SetUnhealthy(testWarnable, Args{ArgError: "initial value"}) + state := ht.CurrentState().Warnings[testWarnable.Code] + + ht.SetUnhealthy(testWarnable, Args{ArgError: "new value"}) + newState := ht.CurrentState().Warnings[testWarnable.Code] + + if state.ETag == newState.ETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } + }) + + t.Run("new_broken_since", func(t *testing.T) { + clock1 := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(123, 0), + }) + ht1 := newTracker(clock1) + + ht1.SetUnhealthy(testWarnable, Args{}) + state := ht1.CurrentState().Warnings[testWarnable.Code] + + // Use a second tracker to get a different broken since time + clock2 := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(456, 0), + }) + ht2 := newTracker(clock2) + + ht2.SetUnhealthy(testWarnable, Args{}) + newState := ht2.CurrentState().Warnings[testWarnable.Code] + + if state.ETag == newState.ETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } + }) + + t.Run("no_change", func(t *testing.T) { + clock := tstest.NewClock(tstest.ClockOpts{}) + ht1 := newTracker(clock) + + ht1.SetUnhealthy(testWarnable, Args{}) + state := ht1.CurrentState().Warnings[testWarnable.Code] + + // Using a second tracker because SetUnhealthy with no changes is a no-op + ht2 := newTracker(clock) + ht2.SetUnhealthy(testWarnable, Args{}) + newState := ht2.CurrentState().Warnings[testWarnable.Code] + + if state.ETag != newState.ETag { + t.Errorf("got changed ETag, want unchanged") + } + }) +} diff --git a/health/state.go b/health/state.go index b5e6a8a38..116518629 100644 --- a/health/state.go +++ b/health/state.go @@ -4,6 +4,9 @@ package health import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" "time" "tailscale.com/tailcfg" @@ -35,6 +38,36 @@ type UnhealthyState struct { DependsOn []WarnableCode `json:",omitempty"` ImpactsConnectivity bool `json:",omitempty"` PrimaryAction *UnhealthyStateAction `json:",omitempty"` + + // ETag identifies a specific version of an UnhealthyState. If the contents + // of the other fields of two UnhealthyStates are the same, the ETags will + // be the same. If the contents differ, the ETags will also differ. The + // implementation is not defined and the value is opaque: it might be a + // hash, it might be a simple counter. Implementations should not rely on + // any specific implementation detail or format of the ETag string other + // than string (in)equality. + ETag string `json:",omitzero"` +} + +// hash computes a deep hash of UnhealthyState which will be stable across +// different runs of the same binary. +func (u UnhealthyState) hash() []byte { + hasher := sha256.New() + enc := json.NewEncoder(hasher) + + // hash.Hash.Write never returns an error, so this will only fail if u is + // not marshalable, in which case we have much bigger problems. + _ = enc.Encode(u) + return hasher.Sum(nil) +} + +// withETag returns a copy of UnhealthyState with an ETag set. The ETag will be +// the same for all UnhealthyState instances that are equal. If calculating the +// ETag errors, it returns a copy of the UnhealthyState with an empty ETag. +func (u UnhealthyState) withETag() UnhealthyState { + u.ETag = "" + u.ETag = hex.EncodeToString(u.hash()) + return u } // UnhealthyStateAction represents an action (URL and link) to be presented to @@ -107,7 +140,8 @@ func (t *Tracker) CurrentState() *State { // that are unhealthy. continue } - wm[w.Code] = *w.unhealthyState(ws) + state := w.unhealthyState(ws) + wm[w.Code] = state.withETag() } for id, msg := range t.lastNotifiedControlMessages { @@ -127,7 +161,7 @@ func (t *Tracker) CurrentState() *State { } } - wm[state.WarnableCode] = state + wm[state.WarnableCode] = state.withETag() } return &State{ diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index dd2837022..37b81c84b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -6807,7 +6807,7 @@ func TestDisplayMessagesURLFilter(t *testing.T) { Severity: health.SeverityHigh, } - if diff := cmp.Diff(want, got); diff != "" { + if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("Unexpected message content (-want/+got):\n%s", diff) } } @@ -6879,7 +6879,7 @@ func TestDisplayMessageIPNBus(t *testing.T) { } got, ok := n.Health.Warnings[wantID] if ok { - if diff := cmp.Diff(tt.wantWarning, got); diff != "" { + if diff := cmp.Diff(tt.wantWarning, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("unexpected warning details (-want/+got):\n%s", diff) return true // we failed the test so tell the watcher we've seen what we need to to stop it waiting } From 02084629e208db6e7601a00777619697b49ac770 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 28 Jul 2025 12:03:08 +0100 Subject: [PATCH 1139/1708] k8s-operator: handle multiple WebSocket frames per read (#16678) When kubectl starts an interactive attach session, it sends 2 resize messages in quick succession. It seems that particularly in HTTP mode, we often receive both of these WebSocket frames from the underlying connection in a single read. However, our parser currently assumes 0-1 frames per read, and leaves the second frame in the read buffer until the next read from the underlying connection. It doesn't take long after that before we end up failing to skip a control message as we normally should, and then we parse a control message as though it will have a stream ID (part of the Kubernetes protocol) and error out. Instead, we should keep parsing frames from the read buffer for as long as we're able to parse complete frames, so this commit refactors the messages parsing logic into a loop based on the contents of the read buffer being non-empty. k/k staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go for full details of the resize messages. There are at least a couple more multiple-frame read edge cases we should handle, but this commit is very conservatively fixing a single observed issue to make it a low-risk candidate for cherry picking. Updates #13358 Change-Id: Iafb91ad1cbeed9c5231a1525d4563164fc1f002f Signed-off-by: Tom Proctor --- k8s-operator/api-proxy/proxy.go | 6 +- k8s-operator/sessionrecording/hijacker.go | 1 - k8s-operator/sessionrecording/ws/conn.go | 97 ++++++++++--------- k8s-operator/sessionrecording/ws/conn_test.go | 32 +++++- k8s-operator/sessionrecording/ws/message.go | 4 +- 5 files changed, 86 insertions(+), 54 deletions(-) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index c648e1622..ff0373270 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -114,8 +114,9 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) ap.hs = &http.Server{ - Handler: mux, - ErrorLog: zap.NewStdLog(ap.log.Desugar()), + Handler: mux, + ErrorLog: zap.NewStdLog(ap.log.Desugar()), + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } mode := "noauth" @@ -140,7 +141,6 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, } - ap.hs.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } else { var err error tsLn, err = ap.ts.Listen("tcp", ":80") diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 0df72b6c3..789a9fdb9 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -237,7 +237,6 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, if err := lc.Close(); err != nil { h.log.Infof("error closing recorder connections: %v", err) } - return }() return lc, nil } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index 0d8aefaac..a34379658 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -148,6 +148,8 @@ func (c *conn) Read(b []byte) (int, error) { return 0, nil } + // TODO(tomhjp): If we get multiple frames in a single Read with different + // types, we may parse the second frame with the wrong type. typ := messageType(opcode(b)) if (typ == noOpcode && c.readMsgIsIncomplete()) || c.readBufHasIncompleteFragment() { // subsequent fragment if typ, err = c.curReadMsgType(); err != nil { @@ -157,6 +159,8 @@ func (c *conn) Read(b []byte) (int, error) { // A control message can not be fragmented and we are not interested in // these messages. Just return. + // TODO(tomhjp): If we get multiple frames in a single Read, we may skip + // some non-control messages. if isControlMessage(typ) { return n, nil } @@ -169,62 +173,65 @@ func (c *conn) Read(b []byte) (int, error) { return n, nil } - readMsg := &message{typ: typ} // start a new message... - // ... or pick up an already started one if the previous fragment was not final. - if c.readMsgIsIncomplete() || c.readBufHasIncompleteFragment() { - readMsg = c.currentReadMsg - } - if _, err := c.readBuf.Write(b[:n]); err != nil { return 0, fmt.Errorf("[unexpected] error writing message contents to read buffer: %w", err) } - ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) - if err != nil { - return 0, fmt.Errorf("error parsing message: %v", err) - } - if !ok { // incomplete fragment - return n, nil - } - c.readBuf.Next(len(readMsg.raw)) - - if readMsg.isFinalized && !c.readMsgIsIncomplete() { - // we want to send stream resize messages for terminal sessions - // Stream IDs for websocket streams are static. - // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 - if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { - var msg tsrecorder.ResizeMsg - if err = json.Unmarshal(readMsg.payload, &msg); err != nil { - return 0, fmt.Errorf("error umarshalling resize message: %w", err) - } + for c.readBuf.Len() != 0 { + readMsg := &message{typ: typ} // start a new message... + // ... or pick up an already started one if the previous fragment was not final. + if c.readMsgIsIncomplete() { + readMsg = c.currentReadMsg + } - c.ch.Width = msg.Width - c.ch.Height = msg.Height - - var isInitialResize bool - c.writeCastHeaderOnce.Do(func() { - isInitialResize = true - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - err = c.rec.WriteCastHeader(c.ch) - close(c.initialCastHeaderSent) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } + ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) + if err != nil { + return 0, fmt.Errorf("error parsing message: %v", err) + } + if !ok { // incomplete fragment + return n, nil + } + c.readBuf.Next(len(readMsg.raw)) + + if readMsg.isFinalized && !c.readMsgIsIncomplete() { + // we want to send stream resize messages for terminal sessions + // Stream IDs for websocket streams are static. + // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 + if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { + var msg tsrecorder.ResizeMsg + if err = json.Unmarshal(readMsg.payload, &msg); err != nil { + return 0, fmt.Errorf("error umarshalling resize message: %w", err) + } + + c.ch.Width = msg.Width + c.ch.Height = msg.Height + + var isInitialResize bool + c.writeCastHeaderOnce.Do(func() { + isInitialResize = true + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) + }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } - if !isInitialResize { - if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { - return 0, fmt.Errorf("error writing resize message: %w", err) + if !isInitialResize { + if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { + return 0, fmt.Errorf("error writing resize message: %w", err) + } } } } + + c.currentReadMsg = readMsg } - c.currentReadMsg = readMsg return n, nil } diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index f29154c62..5e23845a7 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -58,15 +58,39 @@ func Test_conn_Read(t *testing.T) { wantCastHeaderHeight: 20, }, { - name: "two_reads_resize_message", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, + name: "resize_data_frame_two_in_one_read", + inputs: [][]byte{ + fmt.Appendf(nil, "%s%s", + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + ), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + }, + { + name: "two_reads_resize_message", + inputs: [][]byte{ + // op, len, stream ID, `{"width` + {0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, + // op, len, stream ID, `:10,"height":20}` + {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}, + }, wantCastHeaderWidth: 10, wantCastHeaderHeight: 20, wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, { - name: "three_reads_resize_message_with_split_fragment", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, + name: "three_reads_resize_message_with_split_fragment", + inputs: [][]byte{ + // op, len, stream ID, `{"width"` + {0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, + // op, len, stream ID, `:10,"height` + {0x00, 0x0c, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, + // op, len, stream ID, `":20}` + {0x80, 0x06, 0x4, 0x22, 0x3a, 0x32, 0x30, 0x7d}, + }, wantCastHeaderWidth: 10, wantCastHeaderHeight: 20, wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), diff --git a/k8s-operator/sessionrecording/ws/message.go b/k8s-operator/sessionrecording/ws/message.go index 713febec7..35667ae21 100644 --- a/k8s-operator/sessionrecording/ws/message.go +++ b/k8s-operator/sessionrecording/ws/message.go @@ -7,10 +7,10 @@ package ws import ( "encoding/binary" + "errors" "fmt" "sync/atomic" - "github.com/pkg/errors" "go.uber.org/zap" "golang.org/x/net/websocket" @@ -139,6 +139,8 @@ func (msg *message) Parse(b []byte, log *zap.SugaredLogger) (bool, error) { return false, errors.New("[unexpected] received a message fragment with no stream ID") } + // Stream ID will be one of the constants from: + // https://github.com/kubernetes/kubernetes/blob/f9ed14bf9b1119a2e091f4b487a3b54930661034/staging/src/k8s.io/apimachinery/pkg/util/remotecommand/constants.go#L57-L64 streamID := uint32(msgPayload[0]) if !isInitialFragment && msg.streamID.Load() != streamID { return false, fmt.Errorf("[unexpected] received message fragments with mismatched streamIDs %d and %d", msg.streamID.Load(), streamID) From 61d42eb300799f4617327f5e5145b69ac795fecf Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 28 Jul 2025 13:33:46 +0100 Subject: [PATCH 1140/1708] k8s-operator: fix test flake (#16680) This occasionally panics waiting on a nil ctx, but was missed in the previous PR because it's quite a rare flake as it needs to progress to a specific point in the parser. Updates #16678 Change-Id: Ifd36dfc915b153aede36b8ee39eff83750031f95 Signed-off-by: Tom Proctor --- k8s-operator/sessionrecording/ws/conn_test.go | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 5e23845a7..f2fd4ea55 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -9,6 +9,7 @@ import ( "context" "fmt" "reflect" + "runtime/debug" "testing" "time" @@ -284,19 +285,28 @@ func Test_conn_WriteRand(t *testing.T) { sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) for i := range 100 { - tc := &fakes.TestConn{} - c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - } - bb := fakes.RandomBytes(t) - for j, input := range bb { - f := func() { - c.Write(input) + t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { + tc := &fakes.TestConn{} + c := &conn{ + Conn: tc, + log: zl.Sugar(), + rec: rec, + + ctx: context.Background(), // ctx must be non-nil. + initialCastHeaderSent: make(chan struct{}), } - testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) - } + // Never block for random data. + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + bb := fakes.RandomBytes(t) + for j, input := range bb { + f := func() { + c.Write(input) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) + } + }) } } @@ -304,7 +314,7 @@ func testPanic(t *testing.T, f func(), msg string) { t.Helper() defer func() { if r := recover(); r != nil { - t.Fatal(msg, r) + t.Fatal(msg, r, string(debug.Stack())) } }() f() From 4a435aedcb357877b84f776c99fba4517796b01d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 08:41:59 -0600 Subject: [PATCH 1141/1708] .github: Bump github/codeql-action from 3.29.2 to 3.29.3 (#16615) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.2 to 3.29.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/181d5eefc20863364f96762470ba6f862bdef56b...d6bbdef45e766d081b84a2def353b0055f728d3e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4e129b847..e5616d83a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 From c962fefa3ed2a1b7234ccbf1a1f9a8bd1c6ef9a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 08:51:58 -0600 Subject: [PATCH 1142/1708] build(deps): bump form-data from 4.0.0 to 4.0.4 in /client/web (#16623) Bumps [form-data](https://github.com/form-data/form-data) from 4.0.0 to 4.0.4. - [Release notes](https://github.com/form-data/form-data/releases) - [Changelog](https://github.com/form-data/form-data/blob/master/CHANGELOG.md) - [Commits](https://github.com/form-data/form-data/compare/v4.0.0...v4.0.4) --- updated-dependencies: - dependency-name: form-data dependency-version: 4.0.4 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- client/web/yarn.lock | 105 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 102 insertions(+), 3 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index a9b2ae876..455f8dde0 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -2450,6 +2450,14 @@ cac@^6.7.14: resolved "https://registry.yarnpkg.com/cac/-/cac-6.7.14.tgz#804e1e6f506ee363cb0e3ccbb09cad5dd9870959" integrity sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ== +call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + call-bind@^1.0.0, call-bind@^1.0.2, call-bind@^1.0.4, call-bind@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513" @@ -2767,6 +2775,15 @@ dot-case@^3.0.4: no-case "^3.0.4" tslib "^2.0.3" +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" + electron-to-chromium@^1.4.535: version "1.4.596" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.596.tgz#6752d1aa795d942d49dfc5d3764d6ea283fab1d7" @@ -2834,6 +2851,16 @@ es-abstract@^1.22.1: unbox-primitive "^1.0.2" which-typed-array "^1.1.13" +es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: version "1.0.15" resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.15.tgz#bd81d275ac766431d19305923707c3efd9f1ae40" @@ -2854,6 +2881,13 @@ es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: iterator.prototype "^1.1.2" safe-array-concat "^1.0.1" +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" + es-set-tostringtag@^2.0.1: version "2.0.2" resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz#11f7cc9f63376930a5f20be4915834f4bc74f9c9" @@ -2863,6 +2897,16 @@ es-set-tostringtag@^2.0.1: has-tostringtag "^1.0.0" hasown "^2.0.0" +es-set-tostringtag@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== + dependencies: + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + es-shim-unscopables@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763" @@ -3270,12 +3314,14 @@ for-each@^0.3.3: is-callable "^1.1.3" form-data@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" - integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== + version "4.0.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4" + integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow== dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" + es-set-tostringtag "^2.1.0" + hasown "^2.0.2" mime-types "^2.1.12" fraction.js@^4.2.0: @@ -3333,11 +3379,35 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@ has-symbols "^1.0.3" hasown "^2.0.0" +get-intrinsic@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== + dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.1.1" + function-bind "^1.1.2" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" + get-nonce@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/get-nonce/-/get-nonce-1.0.1.tgz#fdf3f0278073820d2ce9426c18f07481b1e0cdf3" integrity sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q== +get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + get-stream@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" @@ -3437,6 +3507,11 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" +gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== + graphemer@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" @@ -3474,6 +3549,11 @@ has-symbols@^1.0.2, has-symbols@^1.0.3: resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== +has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== + has-tostringtag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" @@ -3481,6 +3561,13 @@ has-tostringtag@^1.0.0: dependencies: has-symbols "^1.0.2" +has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + hasown@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" @@ -3488,6 +3575,13 @@ hasown@^2.0.0: dependencies: function-bind "^1.1.2" +hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + html-encoding-sniffer@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448" @@ -3992,6 +4086,11 @@ magic-string@^0.30.5: dependencies: "@jridgewell/sourcemap-codec" "^1.4.15" +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" From 5ce3845a021b8384814f8279546af80e9fddbf39 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 28 Jul 2025 09:01:41 -0700 Subject: [PATCH 1143/1708] net/portmapper: avert a panic when a mapping is not available (#16686) Ideally when we attempt to create a new port mapping, we should not return without error when no mapping is available. We already log these cases as unexpected, so this change is just to avoiding panicking dispatch on the invalid result in those cases. We still separately need to fix the underlying control flow. Updates #16662 Change-Id: I51e8a116b922b49eda45e31cd27f6b89dd51abc8 Signed-off-by: M. J. Fromberger --- net/portmapper/portmapper.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 1c6c7634b..c82fbf9da 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -507,6 +507,13 @@ func (c *Client) createMapping() { c.logf("createOrGetMapping: %v", err) } return + } else if mapping == nil { + return + + // TODO(creachadair): This was already logged in createOrGetMapping. + // It really should not happen at all, but we will need to untangle + // the control flow to eliminate that possibility. Meanwhile, this + // mitigates a panic downstream, cf. #16662. } if c.updates != nil { c.updates.Publish(Mapping{ From a9f3fd1c67ca427aceee708f319a0a12df6a5de8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 28 Jul 2025 09:26:24 -0700 Subject: [PATCH 1144/1708] wgengine/magicsock: fix magicsock deadlock around Conn.NoteRecvActivity (#16687) Updates #16651 Updates tailscale/corp#30836 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 3 ++- wgengine/magicsock/magicsock.go | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 307b39f93..5e3c4e572 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -167,7 +167,8 @@ type CapabilityVersion int // - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions // - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] // - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. -const CurrentCapabilityVersion CapabilityVersion = 122 +// - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) +const CurrentCapabilityVersion CapabilityVersion = 123 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fb7f5edcb..d2835aed3 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -4119,8 +4119,11 @@ func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { return } le.c.mu.Lock() - defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) + // [Conn.mu] must not be held while [Conn.noteRecvActivity] is called, which + // [endpoint.noteRecvActivity] can end up calling. See + // [Options.NoteRecvActivity] docs. + le.c.mu.Unlock() if !ok { return } From 4df02bbb486d07b0ad23f59c4cb3675ab691e79b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 28 Jul 2025 12:23:40 -0500 Subject: [PATCH 1145/1708] util/syspolicy/setting: use a custom marshaler for time.Duration jsonv2 now returns an error when you marshal or unmarshal a time.Duration without an explicit format flag. This is an intentional, temporary choice until the default [time.Duration] representation is decided (see golang/go#71631). setting.Snapshot can hold time.Duration values inside a map[string]any, so the jsonv2 update breaks marshaling. In this PR, we start using a custom marshaler until that decision is made or golang/go#71664 lets us specify the format explicitly. This fixes `tailscale syspolicy list` failing when KeyExpirationNotice or any other time.Duration policy setting is configured. Fixes #16683 Signed-off-by: Nick Khyl --- util/syspolicy/setting/snapshot.go | 21 ++++++++++++++++++++- util/syspolicy/setting/snapshot_test.go | 12 ++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 087325a04..3a40785dc 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -9,6 +9,7 @@ import ( "maps" "slices" "strings" + "time" jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" @@ -152,6 +153,24 @@ var ( _ jsonv2.UnmarshalerFrom = (*Snapshot)(nil) ) +// As of 2025-07-28, jsonv2 no longer has a default representation for [time.Duration], +// so we need to provide a custom marshaler. +// +// This is temporary until the decision on the default representation is made +// (see https://github.com/golang/go/issues/71631#issuecomment-2981670799). +// +// In the future, we might either use the default representation (if compatible with +// [time.Duration.String]) or specify something like json.WithFormat[time.Duration]("units") +// when golang/go#71664 is implemented. +// +// TODO(nickkhyl): revisit this when the decision on the default [time.Duration] +// representation is made in golang/go#71631 and/or golang/go#71664 is implemented. +var formatDurationAsUnits = jsonv2.JoinOptions( + jsonv2.WithMarshalers(jsonv2.MarshalToFunc(func(e *jsontext.Encoder, t time.Duration) error { + return e.WriteToken(jsontext.String(t.String())) + })), +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} @@ -159,7 +178,7 @@ func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data.Summary = s.summary data.Settings = s.m } - return jsonv2.MarshalEncode(out, data) + return jsonv2.MarshalEncode(out, data, formatDurationAsUnits) } // UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index d41b362f0..19f014aca 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -491,6 +491,18 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`, }, + { + name: "Duration/Zero", + snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), + wantJSON: `{"Settings": {"DurationPolicy": {"Value": "0s"}}}`, + wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("0s")}), + }, + { + name: "Duration/NonZero", + snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), + wantJSON: `{"Settings": {"DurationPolicy": {"Value": "2h0m0s"}}}`, + wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), + }, { name: "Empty/With-Summary", snapshot: NewSnapshot( From e5e4386f334c8eb222bffc94c0de011a37a8bc29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 23:36:36 -0600 Subject: [PATCH 1146/1708] build(deps): bump @babel/runtime from 7.23.4 to 7.26.10 in /client/web (#15299) Bumps [@babel/runtime](https://github.com/babel/babel/tree/HEAD/packages/babel-runtime) from 7.23.4 to 7.26.10. - [Release notes](https://github.com/babel/babel/releases) - [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md) - [Commits](https://github.com/babel/babel/commits/v7.26.10/packages/babel-runtime) --- updated-dependencies: - dependency-name: "@babel/runtime" dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- client/web/yarn.lock | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index 455f8dde0..7c9d9222e 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -1087,11 +1087,9 @@ integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.16.3", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4": - version "7.23.4" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.4.tgz#36fa1d2b36db873d25ec631dcc4923fdc1cf2e2e" - integrity sha512-2Yv65nlWnWlSpe3fXEyX5i7fx5kIKo4Qbcj+hMO0odwaneFjfXw5fdum+4yL20O0QiaHpia0cYQ9xpNMqrBwHg== - dependencies: - regenerator-runtime "^0.14.0" + version "7.28.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.2.tgz#2ae5a9d51cc583bd1f5673b3bb70d6d819682473" + integrity sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA== "@babel/template@^7.22.15": version "7.22.15" @@ -4642,11 +4640,6 @@ regenerate@^1.4.2: resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-runtime@^0.14.0: - version "0.14.0" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" - integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== - regenerator-transform@^0.15.2: version "0.15.2" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" From b34cdc971007edb6968b793ea01f87196d8f9439 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 29 Jul 2025 09:04:08 -0700 Subject: [PATCH 1147/1708] ipn,net,tsnet,wgengine: make an eventbus mandatory where it is used (#16594) In the components where an event bus is already plumbed through, remove the exceptions that allow it to be omitted, and update all the tests that relied on those workarounds execute properly. This change applies only to the places where we're already using the bus; it does not enforce the existence of a bus in other components (yet), Updates #15160 Change-Id: Iebb92243caba82b5eb420c49fc3e089a77454f65 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/dnsconfig_test.go | 2 +- ipn/ipnlocal/local.go | 11 +-- ipn/ipnlocal/local_test.go | 6 ++ ipn/ipnlocal/network-lock_test.go | 26 ++++--- ipn/ipnlocal/peerapi_test.go | 102 +++++++++++++-------------- net/portmapper/igd_test.go | 11 ++- net/portmapper/portmapper.go | 38 +++++----- net/udprelay/server.go | 3 + tsnet/tsnet.go | 5 +- wgengine/magicsock/magicsock.go | 41 ++++++----- wgengine/magicsock/magicsock_test.go | 13 ++-- 11 files changed, 134 insertions(+), 124 deletions(-) diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index c0f5b25f3..71f175148 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -377,7 +377,7 @@ func peersMap(s []tailcfg.NodeView) map[tailcfg.NodeID]tailcfg.NodeView { } func TestAllowExitNodeDNSProxyToServeName(t *testing.T) { - b := &LocalBackend{} + b := newTestLocalBackend(t) if b.allowExitNodeDNSProxyToServeName("google.com") { t.Fatal("unexpected true on backend with nil NetMap") } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7154b942c..bf13b2ac1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -99,7 +99,6 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" - "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -618,15 +617,7 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - // Auto-init [nodeBackend] in tests for LocalBackend created without the - // NewLocalBackend() constructor. Same reasoning for checking b.sys. - var bus *eventbus.Bus - if b.sys == nil { - bus = eventbus.New() - } else { - bus = b.sys.Bus.Get() - } - v := newNodeBackend(cmp.Or(b.ctx, context.Background()), bus) + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.sys.Bus.Get()) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 37b81c84b..30833e748 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -463,6 +463,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { var logf logger.Logf = logger.Discard if _, ok := sys.StateStore.GetOK(); !ok { sys.Set(new(mem.Store)) + t.Log("Added memory store for testing") } if _, ok := sys.Engine.GetOK(); !ok { eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) @@ -471,6 +472,11 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { } t.Cleanup(eng.Close) sys.Set(eng) + t.Log("Added fake userspace engine for testing") + } + if _, ok := sys.Dialer.GetOK(); !ok { + sys.Set(tsdial.NewDialer(netmon.NewStatic())) + t.Log("Added static dialer for testing") } lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) if err != nil { diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 838f16cb9..443539aec 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/tsd" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/persist" @@ -935,18 +936,21 @@ func TestTKAForceDisable(t *testing.T) { defer ts.Close() cc := fakeControlClient(t, client) - b := LocalBackend{ - varRoot: temp, - cc: cc, - ccAuto: cc, - logf: t.Logf, - tka: &tkaState{ - authority: authority, - storage: chonk, - }, - pm: pm, - store: pm.Store(), + sys := tsd.NewSystem() + sys.Set(pm.Store()) + + b := newTestLocalBackendWithSys(t, sys) + b.SetVarRoot(temp) + b.SetControlClientGetterForTesting(func(controlclient.Options) (controlclient.Client, error) { + return cc, nil + }) + b.mu.Lock() + b.tka = &tkaState{ + authority: authority, + storage: chonk, } + b.pm = pm + b.mu.Unlock() if err := b.NetworkLockForceLocalDisable(); err != nil { t.Fatalf("NetworkLockForceLocalDisable() failed: %v", err) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index d8655afa0..5654cf277 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -21,10 +21,10 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" + "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -156,10 +156,9 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var e peerAPITestEnv - lb := &LocalBackend{ - logf: e.logBuf.Logf, - clock: &tstest.Clock{}, - } + lb := newTestLocalBackend(t) + lb.logf = e.logBuf.Logf + lb.clock = &tstest.Clock{} lb.currentNode().SetNetMap(&netmap.NetworkMap{SelfNode: selfNode.View()}) e.ph = &peerAPIHandler{ isSelf: tt.isSelf, @@ -195,20 +194,20 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) ht := new(health.Tracker) - reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - }, - } + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + + h.ps = &peerAPIServer{b: b} if h.ps.b.OfferingExitNode() { t.Fatal("unexpectedly offering exit node") } @@ -250,12 +249,12 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) ht := new(health.Tracker) reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -263,16 +262,14 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { } else { a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - // configure as an app connector just to enable the API. - appConnector: a, - }, - } + sys.Set(pm.Store()) + sys.Set(eng) + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a // configure as an app connector just to enable the API. + + h.ps = &peerAPIServer{b: b} h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( dnsmessage.ResourceHeader{ @@ -326,27 +323,29 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) + rc := &appctest.RouteCollector{} ht := new(health.Tracker) - reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) + + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) var a *appc.AppConnector if shouldStore { a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) } else { a = appc.NewAppConnector(t.Logf, rc, nil, nil) } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - appConnector: a, - }, - } + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a + + h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"example.com"}) h.ps.b.appConnector.Wait(ctx) @@ -393,12 +392,13 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) + ht := new(health.Tracker) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -406,14 +406,14 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { } else { a = appc.NewAppConnector(t.Logf, rc, nil, nil) } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - appConnector: a, - }, - } + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a + + h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"www.example.com"}) h.ps.b.appConnector.Wait(ctx) diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 3ef7989a3..cca87e0b8 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -263,16 +263,21 @@ func (d *TestIGD) handlePCPQuery(pkt []byte, src netip.AddrPort) { } // newTestClient configures a new test client connected to igd for mapping updates. -// If bus != nil, update events are published to it. -// A cleanup for the resulting client is added to t. +// If bus == nil, a new empty event bus is constructed that is cleaned up when t exits. +// A cleanup for the resulting client is also added to t. func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { + if bus == nil { + bus = eventbus.New() + t.Log("Created empty event bus for test client") + t.Cleanup(bus.Close) + } var c *Client c = NewClient(Config{ Logf: tstest.WhileTestRunningLogger(t), NetMon: netmon.NewStatic(), ControlKnobs: new(controlknobs.Knobs), EventBus: bus, - OnChange: func() { + OnChange: func() { // TODO(creachadair): Remove. t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) }, diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index c82fbf9da..30535157c 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -85,7 +85,7 @@ const trustServiceStillAvailableDuration = 10 * time.Minute // Client is a port mapping client. type Client struct { - // The following two fields must either both be nil, or both non-nil. + // The following two fields must both be non-nil. // Both are immutable after construction. pubClient *eventbus.Client updates *eventbus.Publisher[Mapping] @@ -238,8 +238,11 @@ type Config struct { // NewClient constructs a new portmapping [Client] from c. It will panic if any // required parameters are omitted. func NewClient(c Config) *Client { - if c.NetMon == nil { - panic("nil netMon") + switch { + case c.NetMon == nil: + panic("nil NetMon") + case c.EventBus == nil: + panic("nil EventBus") } ret := &Client{ logf: c.Logf, @@ -248,10 +251,8 @@ func NewClient(c Config) *Client { onChange: c.OnChange, controlKnobs: c.ControlKnobs, } - if c.EventBus != nil { - ret.pubClient = c.EventBus.Client("portmapper") - ret.updates = eventbus.Publish[Mapping](ret.pubClient) - } + ret.pubClient = c.EventBus.Client("portmapper") + ret.updates = eventbus.Publish[Mapping](ret.pubClient) if ret.logf == nil { ret.logf = logger.Discard } @@ -286,10 +287,9 @@ func (c *Client) Close() error { } c.closed = true c.invalidateMappingsLocked(true) - if c.updates != nil { - c.updates.Close() - c.pubClient.Close() - } + c.updates.Close() + c.pubClient.Close() + // TODO: close some future ever-listening UDP socket(s), // waiting for multicast announcements from router. return nil @@ -515,14 +515,14 @@ func (c *Client) createMapping() { // the control flow to eliminate that possibility. Meanwhile, this // mitigates a panic downstream, cf. #16662. } - if c.updates != nil { - c.updates.Publish(Mapping{ - External: mapping.External(), - Type: mapping.MappingType(), - GoodUntil: mapping.GoodUntil(), - }) - } - if c.onChange != nil && c.pubClient == nil { + c.updates.Publish(Mapping{ + External: mapping.External(), + Type: mapping.MappingType(), + GoodUntil: mapping.GoodUntil(), + }) + // TODO(creachadair): Remove this entirely once there are no longer any + // places where the callback is set. + if c.onChange != nil { go c.onChange() } } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index c34a4b5f6..aece3bc59 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -291,6 +291,9 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.vniPool = append(s.vniPool, uint32(i)) } + // TODO(creachadair): Find a way to plumb this in during initialization. + // As-written, messages published here will not be seen by other components + // in a running client. bus := eventbus.New() s.bus = bus netMon, err := netmon.New(s.bus, logf) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 65367f235..d81dec7d6 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -435,10 +435,7 @@ func (s *Server) Close() error { ln.closeLocked() } wg.Wait() - - if bus := s.sys.Bus.Get(); bus != nil { - bus.Close() - } + s.sys.Bus.Get().Close() s.closed = true return nil } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d2835aed3..6495b13b5 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -715,8 +715,11 @@ func (c *Conn) Synchronize() { // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. func NewConn(opts Options) (*Conn, error) { - if opts.NetMon == nil { + switch { + case opts.NetMon == nil: return nil, errors.New("magicsock.Options.NetMon must be non-nil") + case opts.EventBus == nil: + return nil, errors.New("magicsock.Options.EventBus must be non-nil") } c := newConn(opts.logf()) @@ -729,22 +732,20 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity - if c.eventBus != nil { - c.eventClient = c.eventBus.Client("magicsock.Conn") - - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) - c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) - c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) - c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) - c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) - c.syncPub = eventbus.Publish[syncPoint](c.eventClient) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) - c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) - c.subsDoneCh = make(chan struct{}) - go c.consumeEventbusTopics() - } + c.eventClient = c.eventBus.Client("magicsock.Conn") + + // Subscribe calls must return before NewConn otherwise published + // events can be missed. + c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) + c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) + c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) + c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) + c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) + c.syncPub = eventbus.Publish[syncPoint](c.eventClient) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) + c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) + c.subsDoneCh = make(chan struct{}) + go c.consumeEventbusTopics() // Don't log the same log messages possibly every few seconds in our // portmapper. @@ -3327,10 +3328,8 @@ func (c *Conn) Close() error { // deadlock with c.Close(). // 2. Conn.consumeEventbusTopics event handlers may not guard against // undesirable post/in-progress Conn.Close() behaviors. - if c.eventClient != nil { - c.eventClient.Close() - <-c.subsDoneCh - } + c.eventClient.Close() + <-c.subsDoneCh c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 8a09df27d..480faa694 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -179,7 +179,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen t.Helper() bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logf) if err != nil { @@ -191,6 +191,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary conn, err := NewConn(Options{ NetMon: netMon, + EventBus: bus, Metrics: ®, Logf: logf, HealthTracker: ht, @@ -406,7 +407,7 @@ func TestNewConn(t *testing.T) { } bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -424,6 +425,7 @@ func TestNewConn(t *testing.T) { EndpointsFunc: epFunc, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: new(usermetric.Registry), }) if err != nil { @@ -542,7 +544,7 @@ func TestDeviceStartStop(t *testing.T) { tstest.ResourceCheck(t) bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -554,6 +556,7 @@ func TestDeviceStartStop(t *testing.T) { EndpointsFunc: func(eps []tailcfg.Endpoint) {}, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: new(usermetric.Registry), }) if err != nil { @@ -1349,7 +1352,7 @@ func newTestConn(t testing.TB) *Conn { port := pickPort(t) bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -1359,6 +1362,7 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, + EventBus: bus, HealthTracker: new(health.Tracker), Metrics: new(usermetric.Registry), DisablePortMapper: true, @@ -3147,6 +3151,7 @@ func TestNetworkDownSendErrors(t *testing.T) { Logf: t.Logf, NetMon: netMon, Metrics: reg, + EventBus: bus, })) defer conn.Close() From e37432afb7acb012576b8df483d31492317b790b Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 29 Jul 2025 13:59:09 -0400 Subject: [PATCH 1148/1708] cmd/tailscale/cli: update message for disable service (#16705) This commit update the message for recommanding clear command after running serve for service. Instead of a flag, we pass the service name as a parameter. Fixes tailscale/corp#30846 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 91a236970..acefd881f 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -656,7 +656,7 @@ var ( msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" - msgDisableService = "To remove config for the service, run: tailscale serve clear --service=%s" + msgDisableService = "To remove config for the service, run: tailscale serve clear %s" msgToExit = "Press Ctrl+C to exit." ) From 3d1e4f147afb7359061dde08c270a37032fe5aef Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 29 Jul 2025 14:58:47 -0700 Subject: [PATCH 1149/1708] tstest/natlab: fix conn.Close race with conn.ReadFromUDPAddrPort (#16710) If a conn.Close call raced conn.ReadFromUDPAddrPort before it could "register" itself as an active read, the conn.ReadFromUDPAddrPort would never return. This commit replaces all the activeRead and breakActiveReads machinery with a channel. These constructs were only depended upon by SetReadDeadline, and SetReadDeadline was unused. Updates #16707 Signed-off-by: Jordan Whited --- tstest/natlab/natlab.go | 112 +++++++++------------------------------- 1 file changed, 23 insertions(+), 89 deletions(-) diff --git a/tstest/natlab/natlab.go b/tstest/natlab/natlab.go index 92a4ccb68..ffa02eee4 100644 --- a/tstest/natlab/natlab.go +++ b/tstest/natlab/natlab.go @@ -684,10 +684,11 @@ func (m *Machine) ListenPacket(ctx context.Context, network, address string) (ne ipp := netip.AddrPortFrom(ip, port) c := &conn{ - m: m, - fam: fam, - ipp: ipp, - in: make(chan *Packet, 100), // arbitrary + m: m, + fam: fam, + ipp: ipp, + closedCh: make(chan struct{}), + in: make(chan *Packet, 100), // arbitrary } switch c.fam { case 0: @@ -716,70 +717,28 @@ type conn struct { fam uint8 // 0, 4, or 6 ipp netip.AddrPort - mu sync.Mutex - closed bool - readDeadline time.Time - activeReads map[*activeRead]bool - in chan *Packet -} + closeOnce sync.Once + closedCh chan struct{} // closed by Close -type activeRead struct { - cancel context.CancelFunc -} - -// canRead reports whether we can do a read. -func (c *conn) canRead() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return net.ErrClosed - } - if !c.readDeadline.IsZero() && c.readDeadline.Before(time.Now()) { - return errors.New("read deadline exceeded") - } - return nil -} - -func (c *conn) registerActiveRead(ar *activeRead, active bool) { - c.mu.Lock() - defer c.mu.Unlock() - if c.activeReads == nil { - c.activeReads = make(map[*activeRead]bool) - } - if active { - c.activeReads[ar] = true - } else { - delete(c.activeReads, ar) - } + in chan *Packet } func (c *conn) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return nil - } - c.closed = true - switch c.fam { - case 0: - c.m.unregisterConn4(c) - c.m.unregisterConn6(c) - case 4: - c.m.unregisterConn4(c) - case 6: - c.m.unregisterConn6(c) - } - c.breakActiveReadsLocked() + c.closeOnce.Do(func() { + switch c.fam { + case 0: + c.m.unregisterConn4(c) + c.m.unregisterConn6(c) + case 4: + c.m.unregisterConn4(c) + case 6: + c.m.unregisterConn6(c) + } + close(c.closedCh) + }) return nil } -func (c *conn) breakActiveReadsLocked() { - for ar := range c.activeReads { - ar.cancel() - } - c.activeReads = nil -} - func (c *conn) LocalAddr() net.Addr { return &net.UDPAddr{ IP: c.ipp.Addr().AsSlice(), @@ -809,25 +768,13 @@ func (c *conn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { } func (c *conn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ar := &activeRead{cancel: cancel} - - if err := c.canRead(); err != nil { - return 0, netip.AddrPort{}, err - } - - c.registerActiveRead(ar, true) - defer c.registerActiveRead(ar, false) - select { + case <-c.closedCh: + return 0, netip.AddrPort{}, net.ErrClosed case pkt := <-c.in: n = copy(p, pkt.Payload) pkt.Trace("PacketConn.ReadFrom") return n, pkt.Src, nil - case <-ctx.Done(): - return 0, netip.AddrPort{}, context.DeadlineExceeded } } @@ -857,18 +804,5 @@ func (c *conn) SetWriteDeadline(t time.Time) error { panic("SetWriteDeadline unsupported; TODO when needed") } func (c *conn) SetReadDeadline(t time.Time) error { - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - if t.After(now) { - panic("SetReadDeadline in the future not yet supported; TODO?") - } - - if !t.IsZero() && t.Before(now) { - c.breakActiveReadsLocked() - } - c.readDeadline = t - - return nil + panic("SetReadDeadline unsupported; TODO when needed") } From aa6a2d1e56a58c9e800b81701fa4636f85c9982a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 29 Jul 2025 09:11:36 -0500 Subject: [PATCH 1150/1708] drive/driveimpl: use sudo or su to run file server Some systems have `sudo`, some have `su`. This tries both, increasing the chance that we can run the file server as an unprivileged user. Updates #14629 Signed-off-by: Percy Wegmann --- drive/driveimpl/remote_impl.go | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drive/driveimpl/remote_impl.go b/drive/driveimpl/remote_impl.go index 7fd5d3325..2ff98075e 100644 --- a/drive/driveimpl/remote_impl.go +++ b/drive/driveimpl/remote_impl.go @@ -333,8 +333,14 @@ func (s *userServer) run() error { args = append(args, s.Name, s.Path) } var cmd *exec.Cmd - if su := s.canSU(); su != "" { - s.logf("starting taildrive file server as user %q", s.username) + + if s.canSudo() { + s.logf("starting taildrive file server with sudo as user %q", s.username) + allArgs := []string{"-n", "-u", s.username, s.executable} + allArgs = append(allArgs, args...) + cmd = exec.Command("sudo", allArgs...) + } else if su := s.canSU(); su != "" { + s.logf("starting taildrive file server with su as user %q", s.username) // Quote and escape arguments. Use single quotes to prevent shell substitutions. for i, arg := range args { args[i] = "'" + strings.ReplaceAll(arg, "'", "'\"'\"'") + "'" @@ -343,7 +349,7 @@ func (s *userServer) run() error { allArgs := []string{s.username, "-c", cmdString} cmd = exec.Command(su, allArgs...) } else { - // If we were root, we should have been able to sudo as a specific + // If we were root, we should have been able to sudo or su as a specific // user, but let's check just to make sure, since we never want to // access shared folders as root. err := s.assertNotRoot() @@ -409,6 +415,18 @@ var writeMethods = map[string]bool{ "DELETE": true, } +// canSudo checks wether we can sudo -u the configured executable as the +// configured user by attempting to call the executable with the '-h' flag to +// print help. +func (s *userServer) canSudo() bool { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + if err := exec.CommandContext(ctx, "sudo", "-n", "-u", s.username, s.executable, "-h").Run(); err != nil { + return false + } + return true +} + // canSU checks whether the current process can run su with the right username. // If su can be run, this returns the path to the su command. // If not, this returns the empty string "". From eed3e5dc611f17de9ca435523bb21ff312f21389 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 30 Jul 2025 13:39:59 +0100 Subject: [PATCH 1151/1708] ipn/store/kubestore,kube: fix cert error in admin UI (#16717) Also adds a test to kube/kubeclient to defend against the error type returned by the client changing in future. Fixes tailscale/corp#30855 Change-Id: Id11d4295003e66ad5c29a687f1239333c21226a4 Signed-off-by: Tom Proctor --- ipn/store/kubestore/store_kube.go | 18 ++++++ ipn/store/kubestore/store_kube_test.go | 7 +++ kube/kubeclient/client_test.go | 76 ++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index a9ad514e7..5b25471c7 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -9,6 +9,7 @@ import ( "fmt" "log" "net" + "net/http" "os" "strings" "time" @@ -203,6 +204,23 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { // that wraps ipn.ErrStateNotExist here. return nil, nil, ipn.ErrStateNotExist } + st, ok := err.(*kubeapi.Status) + if ok && st.Code == http.StatusForbidden && (s.certShareMode == "ro" || s.certShareMode == "rw") { + // In cert share mode, we read from a dedicated Secret per domain. + // To get here, we already had a cache miss from our in-memory + // store. For write replicas, that means it wasn't available on + // start and it wasn't written since. For read replicas, that means + // it wasn't available on start and it hasn't been reloaded in the + // background. So getting a "forbidden" error is an expected + // "not found" case where we've been asked for a cert we don't + // expect to issue, and so the forbidden error reflects that the + // operator didn't assign permission for a Secret for that domain. + // + // This code path gets triggered by the admin UI's machine page, + // which queries for the node's own TLS cert existing via the + // "tls-cert-status" c2n API. + return nil, nil, ipn.ErrStateNotExist + } return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err) } cert = secret.Data[keyTLSCert] diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 9a49f3028..8c8e5e870 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -426,6 +426,13 @@ func TestReadTLSCertAndKey(t *testing.T) { secretGetErr: &kubeapi.Status{Code: 404}, wantErr: ipn.ErrStateNotExist, }, + { + name: "cert_share_ro_mode_forbidden", + certShareMode: "ro", + domain: testDomain, + secretGetErr: &kubeapi.Status{Code: 403}, + wantErr: ipn.ErrStateNotExist, + }, { name: "cert_share_ro_mode_empty_cert_in_secret", certShareMode: "ro", diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go index 31878befe..8599e7e3c 100644 --- a/kube/kubeclient/client_test.go +++ b/kube/kubeclient/client_test.go @@ -7,6 +7,9 @@ import ( "context" "encoding/json" "net/http" + "net/http/httptest" + "os" + "path/filepath" "testing" "github.com/google/go-cmp/cmp" @@ -104,6 +107,48 @@ func Test_client_Event(t *testing.T) { } } +// TestReturnsKubeStatusError ensures HTTP error codes from the Kubernetes API +// server can always be extracted by casting the error to the *kubeapi.Status +// type, as lots of calling code relies on this cast succeeding. Note that +// transport errors are not expected or required to be of type *kubeapi.Status. +func TestReturnsKubeStatusError(t *testing.T) { + cl := clientForKubeHandler(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + _ = json.NewEncoder(w).Encode(kubeapi.Status{Code: http.StatusForbidden, Message: "test error"}) + })) + + _, err := cl.GetSecret(t.Context(), "test-secret") + if err == nil { + t.Fatal("expected error, got nil") + } + if st, ok := err.(*kubeapi.Status); !ok || st.Code != http.StatusForbidden { + t.Fatalf("expected kubeapi.Status with code %d, got %T: %v", http.StatusForbidden, err, err) + } +} + +// clientForKubeHandler creates a client using the externally accessible package +// API to ensure it's testing behaviour as close to prod as possible. The passed +// in handler mocks the Kubernetes API server's responses to any HTTP requests +// made by the client. +func clientForKubeHandler(t *testing.T, handler http.Handler) Client { + t.Helper() + tmpDir := t.TempDir() + rootPathForTests = tmpDir + saDir := filepath.Join(tmpDir, "var", "run", "secrets", "kubernetes.io", "serviceaccount") + _ = os.MkdirAll(saDir, 0755) + _ = os.WriteFile(filepath.Join(saDir, "token"), []byte("test-token"), 0600) + _ = os.WriteFile(filepath.Join(saDir, "namespace"), []byte("test-namespace"), 0600) + _ = os.WriteFile(filepath.Join(saDir, "ca.crt"), []byte(ca), 0644) + cl, err := New("test-client") + if err != nil { + t.Fatalf("New() error = %v", err) + } + srv := httptest.NewServer(handler) + t.Cleanup(srv.Close) + cl.SetURL(srv.URL) + return cl +} + // args is a set of values for testing a single call to client.kubeAPIRequest. type args struct { // wantsMethod is the expected value of 'method' arg. @@ -149,3 +194,34 @@ func fakeKubeAPIRequest(t *testing.T, argSets []args) kubeAPIRequestFunc { } return f } + +const ca = `-----BEGIN CERTIFICATE----- +MIIFEDCCA3igAwIBAgIRANf5NdPojIfj70wMfJVYUg8wDQYJKoZIhvcNAQELBQAw +gZ8xHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTE6MDgGA1UECwwxZnJv +bWJlcmdlckBzdGFyZHVzdC5sb2NhbCAoTWljaGFlbCBKLiBGcm9tYmVyZ2VyKTFB +MD8GA1UEAww4bWtjZXJ0IGZyb21iZXJnZXJAc3RhcmR1c3QubG9jYWwgKE1pY2hh +ZWwgSi4gRnJvbWJlcmdlcikwHhcNMjMwMjA3MjAzNDE4WhcNMzMwMjA3MjAzNDE4 +WjCBnzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMTowOAYDVQQLDDFm +cm9tYmVyZ2VyQHN0YXJkdXN0LmxvY2FsIChNaWNoYWVsIEouIEZyb21iZXJnZXIp +MUEwPwYDVQQDDDhta2NlcnQgZnJvbWJlcmdlckBzdGFyZHVzdC5sb2NhbCAoTWlj +aGFlbCBKLiBGcm9tYmVyZ2VyKTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoC +ggGBAL5uXNnrZ6dgjcvK0Hc7ZNUIRYEWst9qbO0P9H7le08pJ6d9T2BUWruZtVjk +Q12msv5/bVWHhVk8dZclI9FLXuMsIrocH8bsoP4wruPMyRyp6EedSKODN51fFSRv +/jHbS5vzUVAWTYy9qYmd6qL0uhsHCZCCT6gfigamHPUFKM3sHDn5ZHWvySMwcyGl +AicmPAIkBWqiCZAkB5+WM7+oyRLjmrIalfWIZYxW/rojGLwTfneHv6J5WjVQnpJB +ayWCzCzaiXukK9MeBWeTOe8UfVN0Engd74/rjLWvjbfC+uZSr6RVkZvs2jANLwPF +zgzBPHgRPfAhszU1NNAMjnNQ47+OMOTKRt7e6jYzhO5fyO1qVAAvGBqcfpj+JfDk +cccaUMhUvdiGrhGf1V1tN/PislxvALirzcFipjD01isBKwn0fxRugzvJNrjEo8RA +RvbcdeKcwex7M0o/Cd0+G2B13gZNOFvR33PmG7iTpp7IUrUKfQg28I83Sp8tMY3s +ljJSawIDAQABo0UwQzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/BAgwBgEB/wIB +ADAdBgNVHQ4EFgQU18qto0Fa56kCi/HwfQuC9ECX7cAwDQYJKoZIhvcNAQELBQAD +ggGBAAzs96LwZVOsRSlBdQqMo8oMAvs7HgnYbXt8SqaACLX3+kJ3cV/vrCE3iJrW +ma4CiQbxS/HqsiZjota5m4lYeEevRnUDpXhp+7ugZTiz33Flm1RU99c9UYfQ+919 +ANPAKeqNpoPco/HF5Bz0ocepjcfKQrVZZNTj6noLs8o12FHBLO5976AcF9mqlNfh +8/F0gDJXq6+x7VT5y8u0rY004XKPRe3CklRt8kpeMiP6mhRyyUehOaHeIbNx8ubi +Pi44ByN/ueAnuRhF9zYtyZVZZOaSLysJge01tuPXF8rBXGruoJIv35xTTBa9BzaP +YDOGbGn1ZnajdNagHqCba8vjTLDSpqMvgRj3TFrGHdETA2LDQat38uVxX8gxm68K +va5Tyv7n+6BQ5YTpJjTPnmSJKaXZrrhdLPvG0OU2TxeEsvbcm5LFQofirOOw86Se +vzF2cQ94mmHRZiEk0Av3NO0jF93ELDrBCuiccVyEKq6TknuvPQlutCXKDOYSEb8I +MHctBg== +-----END CERTIFICATE-----` From 1cc842b389a9f928cea2fb01fdd0a2c486ff5939 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 30 Jul 2025 13:08:53 -0700 Subject: [PATCH 1152/1708] util/set: add more functionality to IntSet (#16640) --- util/set/intset.go | 26 ++++++++++++++++++++++++++ util/set/intset_test.go | 6 ++++++ 2 files changed, 32 insertions(+) diff --git a/util/set/intset.go b/util/set/intset.go index b747d3bff..d32524691 100644 --- a/util/set/intset.go +++ b/util/set/intset.go @@ -28,6 +28,15 @@ type IntSet[T constraints.Integer] struct { extraLen int } +// IntsOf constructs an [IntSet] with the provided elements. +func IntsOf[T constraints.Integer](slice ...T) IntSet[T] { + var s IntSet[T] + for _, e := range slice { + s.Add(e) + } + return s +} + // Values returns an iterator over the elements of the set. // The iterator will yield the elements in no particular order. func (s IntSet[T]) Values() iter.Seq[T] { @@ -111,6 +120,23 @@ func (s *IntSet[T]) Delete(e T) { } } +// DeleteSeq deletes the values in seq from the set. +func (s *IntSet[T]) DeleteSeq(seq iter.Seq[T]) { + for e := range seq { + s.Delete(e) + } +} + +// Equal reports whether s is equal to other. +func (s IntSet[T]) Equal(other IntSet[T]) bool { + for hi, bits := range s.extra { + if other.extra[hi] != bits { + return false + } + } + return s.extraLen == other.extraLen && s.bits == other.bits +} + // Clone returns a copy of s that doesn't alias the original. func (s IntSet[T]) Clone() IntSet[T] { return IntSet[T]{ diff --git a/util/set/intset_test.go b/util/set/intset_test.go index 9523fe88d..d838215c9 100644 --- a/util/set/intset_test.go +++ b/util/set/intset_test.go @@ -47,6 +47,9 @@ func TestIntSet(t *testing.T) { deleteInt(t, ss, &si, math.MinInt64) deleteInt(t, ss, &si, math.MaxInt64) intValues(t, ss, si) + if !si.Equal(IntsOf(ss.Slice()...)) { + t.Errorf("{%v}.Equal({%v}) = false, want true", si, ss) + } }) t.Run("Uint64", func(t *testing.T) { @@ -80,6 +83,9 @@ func TestIntSet(t *testing.T) { intValues(t, ss, si) deleteInt(t, ss, &si, math.MaxInt64) intValues(t, ss, si) + if !si.Equal(IntsOf(ss.Slice()...)) { + t.Errorf("{%v}.Equal({%v}) = false, want true", si, ss) + } }) } From 47b5f10165ad7ff48b65417786b0ea961a481d01 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 31 Jul 2025 12:13:36 -0400 Subject: [PATCH 1153/1708] cmd/tsidp,tsnet: update tsidp oidc-key store path (#16735) The tsidp oidc-key.json ended up in the root directory or home dir of the user process running it. Update this to store it in a known location respecting the TS_STATE_DIR and flagDir options. Fixes #16734 Signed-off-by: Mike O'Driscoll --- cmd/tsidp/tsidp.go | 26 +++++++++++++++++++++++--- tsnet/tsnet.go | 6 ++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 8df68cd74..e68e55ca9 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -29,6 +29,7 @@ import ( "net/url" "os" "os/signal" + "path/filepath" "strconv" "strings" "sync" @@ -60,6 +61,9 @@ type ctxConn struct{} // accessing the IDP over Funnel are persisted. const funnelClientsFile = "oidc-funnel-clients.json" +// oidcKeyFile is where the OIDC private key is persisted. +const oidcKeyFile = "oidc-key.json" + var ( flagVerbose = flag.Bool("verbose", false, "be verbose") flagPort = flag.Int("port", 443, "port to listen on") @@ -80,12 +84,14 @@ func main() { var ( lc *local.Client st *ipnstate.Status + rootPath string err error watcherChan chan error cleanup func() lns []net.Listener ) + if *flagUseLocalTailscaled { lc = &local.Client{} st, err = lc.StatusWithoutPeers(ctx) @@ -110,6 +116,15 @@ func main() { log.Fatalf("failed to listen on any of %v", st.TailscaleIPs) } + if flagDir == nil || *flagDir == "" { + // use user config directory as storage for tsidp oidc key + configDir, err := os.UserConfigDir() + if err != nil { + log.Fatalf("getting user config directory: %v", err) + } + rootPath = filepath.Join(configDir, "tsidp") + } + // tailscaled needs to be setting an HTTP header for funneled requests // that older versions don't provide. // TODO(naman): is this the correct check? @@ -127,6 +142,8 @@ func main() { Hostname: *flagHostname, Dir: *flagDir, } + rootPath = ts.GetRootPath() + log.Printf("tsidp root path: %s", rootPath) if *flagVerbose { ts.Logf = log.Printf } @@ -157,7 +174,9 @@ func main() { lc: lc, funnel: *flagFunnel, localTSMode: *flagUseLocalTailscaled, + rootPath: rootPath, } + if *flagPort != 443 { srv.serverURL = fmt.Sprintf("https://%s:%d", strings.TrimSuffix(st.Self.DNSName, "."), *flagPort) } else { @@ -285,6 +304,7 @@ type idpServer struct { serverURL string // "https://foo.bar.ts.net" funnel bool localTSMode bool + rootPath string // root path, used for storing state files lazyMux lazy.SyncValue[*http.ServeMux] lazySigningKey lazy.SyncValue[*signingKey] @@ -819,8 +839,9 @@ func (s *idpServer) oidcSigner() (jose.Signer, error) { func (s *idpServer) oidcPrivateKey() (*signingKey, error) { return s.lazySigningKey.GetErr(func() (*signingKey, error) { + keyPath := filepath.Join(s.rootPath, oidcKeyFile) var sk signingKey - b, err := os.ReadFile("oidc-key.json") + b, err := os.ReadFile(keyPath) if err == nil { if err := sk.UnmarshalJSON(b); err == nil { return &sk, nil @@ -835,7 +856,7 @@ func (s *idpServer) oidcPrivateKey() (*signingKey, error) { if err != nil { log.Fatalf("Error marshaling key: %v", err) } - if err := os.WriteFile("oidc-key.json", b, 0600); err != nil { + if err := os.WriteFile(keyPath, b, 0600); err != nil { log.Fatalf("Error writing key: %v", err) } return &sk, nil @@ -869,7 +890,6 @@ func (s *idpServer) serveJWKS(w http.ResponseWriter, r *http.Request) { }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } - return } // openIDProviderMetadata is a partial representation of diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d81dec7d6..2715917a2 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1268,6 +1268,12 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro return ln, nil } +// GetRootPath returns the root path of the tsnet server. +// This is where the state file and other data is stored. +func (s *Server) GetRootPath() string { + return s.rootPath +} + // CapturePcap can be called by the application code compiled with tsnet to save a pcap // of packets which the netstack within tsnet sees. This is expected to be useful during // debugging, probably not useful for production. From 23a0398136d9d894eaf332c8ed8743dc9ecf4611 Mon Sep 17 00:00:00 2001 From: jishudashu <979260390@qq.com> Date: Fri, 1 Aug 2025 02:36:51 +0800 Subject: [PATCH 1154/1708] ipn/ipnlocal, net/dns: use slices.Equal to simplify code (#16641) Signed-off-by: jishudashu <979260390@qq.com> --- ipn/ipnlocal/local.go | 14 +------------- net/dns/config.go | 15 ++------------- 2 files changed, 3 insertions(+), 26 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bf13b2ac1..5fb3d5771 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2166,7 +2166,7 @@ func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { es := b.parseWgStatusLocked(s) cc := b.cc b.engineStatus = es - needUpdateEndpoints := !endpointsEqual(s.LocalAddrs, b.endpoints) + needUpdateEndpoints := !slices.Equal(s.LocalAddrs, b.endpoints) if needUpdateEndpoints { b.endpoints = append([]tailcfg.Endpoint{}, s.LocalAddrs...) } @@ -2192,18 +2192,6 @@ func (b *LocalBackend) broadcastStatusChanged() { b.statusLock.Unlock() } -func endpointsEqual(x, y []tailcfg.Endpoint) bool { - if len(x) != len(y) { - return false - } - for i := range x { - if x[i] != y[i] { - return false - } - } - return true -} - // SetNotifyCallback sets the function to call when the backend has something to // notify the frontend about. Only one callback can be set at a time, so calling // this function will replace the previous callback. diff --git a/net/dns/config.go b/net/dns/config.go index b2f4e6dbd..b2c7c4285 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -8,6 +8,7 @@ import ( "bufio" "fmt" "net/netip" + "slices" "sort" "tailscale.com/control/controlknobs" @@ -181,19 +182,7 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { if a[i].Addr != b[i].Addr { return false } - if !sameIPs(a[i].BootstrapResolution, b[i].BootstrapResolution) { - return false - } - } - return true -} - -func sameIPs(a, b []netip.Addr) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { + if !slices.Equal(a[i].BootstrapResolution, b[i].BootstrapResolution) { return false } } From f2fd7a051437ed19a8a77a7c71e4acdc86ad84c9 Mon Sep 17 00:00:00 2001 From: Lee Briggs Date: Thu, 31 Jul 2025 23:35:48 -0700 Subject: [PATCH 1155/1708] cmd/k8s-operator,k8s-operator: allow setting a `priorityClassName` (#16685) * cmd/k8s-operator,k8s-operator: allow setting a `priorityClassName` Fixes #16682 Signed-off-by: Lee Briggs * Update k8s-operator/apis/v1alpha1/types_proxyclass.go Co-authored-by: Tom Proctor Signed-off-by: Lee Briggs * run make kube-generate-all Change-Id: I5f8f16694fdc181b048217b9f05ec2ee2aa04def Signed-off-by: Tom Proctor --------- Signed-off-by: Lee Briggs Signed-off-by: Lee Briggs Signed-off-by: Tom Proctor Co-authored-by: Tom Proctor --- .../deploy/crds/tailscale.com_proxyclasses.yaml | 6 ++++++ cmd/k8s-operator/deploy/manifests/operator.yaml | 6 ++++++ cmd/k8s-operator/sts.go | 1 + cmd/k8s-operator/sts_test.go | 3 +++ k8s-operator/api.md | 1 + k8s-operator/apis/v1alpha1/types_proxyclass.go | 5 +++++ 6 files changed, 22 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index c5dc9c3e9..cb9e0b991 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1093,6 +1093,12 @@ spec: type: object additionalProperties: type: string + priorityClassName: + description: |- + PriorityClassName for the proxy Pod. + By default Tailscale Kubernetes operator does not apply any priority class. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: string securityContext: description: |- Proxy Pod's security context. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 175f2a7fb..5e0cca9b5 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1574,6 +1574,12 @@ spec: selector. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling type: object + priorityClassName: + description: |- + PriorityClassName for the proxy Pod. + By default Tailscale Kubernetes operator does not apply any priority class. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: string securityContext: description: |- Proxy Pod's security context. diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index df12554e0..911d02832 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -809,6 +809,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.NodeSelector = wantsPod.NodeSelector ss.Spec.Template.Spec.Affinity = wantsPod.Affinity ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations + ss.Spec.Template.Spec.PriorityClassName = wantsPod.PriorityClassName ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints // Update containers. diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index afa791ccc..e2cb2962f 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -76,6 +76,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + PriorityClassName: "high-priority", TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { WhenUnsatisfiable: "DoNotSchedule", @@ -198,6 +199,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.InitContainers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = "IfNotPresent" + wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -236,6 +238,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: "foo", Value: "bar"}, {Name: "TS_USERSPACE", Value: "true"}, {Name: "bar"}}...) wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" + wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 564c87f50..93a024b31 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -515,6 +515,7 @@ _Appears in:_ | `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector.
                By default Tailscale Kubernetes operator does not apply any node
                selector.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
                By default Tailscale Kubernetes operator does not apply any
                tolerations.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
                By default Tailscale Kubernetes operator does not apply any topology spread constraints.
                https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | +| `priorityClassName` _string_ | PriorityClassName for the proxy Pod.
                By default Tailscale Kubernetes operator does not apply any priority class.
                https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | #### PortRange diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 6a4114bfa..ea4e6a27c 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -298,6 +298,11 @@ type Pod struct { // https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ // +optional TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // PriorityClassName for the proxy Pod. + // By default Tailscale Kubernetes operator does not apply any priority class. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` } // +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" From 5865d0a61a493ecbb15d33a9b84263952a81d7b0 Mon Sep 17 00:00:00 2001 From: mzbenami Date: Fri, 1 Aug 2025 13:30:42 -0400 Subject: [PATCH 1156/1708] Makefile: 'generate' target (#16746) Signed-off-by: Michael Ben-Ami --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 55e55f209..9fffdc48a 100644 --- a/Makefile +++ b/Makefile @@ -133,6 +133,10 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers +.PHONY: generate +generate: ## Generate code + ./tool/go generate ./... + help: ## Show this help @echo "\nSpecify a command. The choices are:\n" @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' From d897d809d649b312a3f87d01d9f9426d518cdced Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Fri, 1 Aug 2025 15:10:00 -0700 Subject: [PATCH 1157/1708] feature/taildrop: do not use m.opts.Dir for Android (#16316) In Android, we are prompting the user to select a Taildrop directory when they first receive a Taildrop: we block writes on Taildrop dir selection. This means that we cannot use Dir inside managerOptions, since the http request would not get the new Taildrop extension. This PR removes, in the Android case, the reliance on m.opts.Dir, and instead has FileOps hold the correct directory. This expands FileOps to be the Taildrop interface for all file system operations. Updates tailscale/corp#29211 Signed-off-by: kari-ts restore tstest --- feature/taildrop/delete.go | 52 +++--- feature/taildrop/delete_test.go | 34 ++-- feature/taildrop/ext.go | 67 +++----- feature/taildrop/fileops.go | 41 +++++ feature/taildrop/fileops_fs.go | 221 ++++++++++++++++++++++++ feature/taildrop/paths.go | 2 +- feature/taildrop/peerapi_test.go | 41 +++-- feature/taildrop/resume.go | 28 ++-- feature/taildrop/resume_test.go | 9 +- feature/taildrop/retrieve.go | 116 +++++++------ feature/taildrop/send.go | 270 ++++-------------------------- feature/taildrop/send_test.go | 131 ++++----------- feature/taildrop/taildrop.go | 83 +++------ feature/taildrop/taildrop_test.go | 56 +++---- 14 files changed, 555 insertions(+), 596 deletions(-) create mode 100644 feature/taildrop/fileops.go create mode 100644 feature/taildrop/fileops_fs.go diff --git a/feature/taildrop/delete.go b/feature/taildrop/delete.go index e9c8d7f1c..0b7259879 100644 --- a/feature/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -6,9 +6,7 @@ package taildrop import ( "container/list" "context" - "io/fs" "os" - "path/filepath" "strings" "sync" "time" @@ -28,7 +26,6 @@ const deleteDelay = time.Hour type fileDeleter struct { logf logger.Logf clock tstime.DefaultClock - dir string event func(string) // called for certain events; for testing only mu sync.Mutex @@ -39,6 +36,7 @@ type fileDeleter struct { group syncs.WaitGroup shutdownCtx context.Context shutdown context.CancelFunc + fs FileOps // must be used for all filesystem operations } // deleteFile is a specific file to delete after deleteDelay. @@ -50,15 +48,14 @@ type deleteFile struct { func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.logf = m.opts.Logf d.clock = m.opts.Clock - d.dir = m.opts.Dir d.event = eventHook + d.fs = m.opts.fileOps d.byName = make(map[string]*list.Element) d.emptySignal = make(chan struct{}) d.shutdownCtx, d.shutdown = context.WithCancel(context.Background()) // From a cold-start, load the list of partial and deleted files. - // // Only run this if we have ever received at least one file // to avoid ever touching the taildrop directory on systems (e.g., MacOS) // that pop up a security dialog window upon first access. @@ -71,38 +68,45 @@ func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.group.Go(func() { d.event("start full-scan") defer d.event("end full-scan") - rangeDir(d.dir, func(de fs.DirEntry) bool { + + if d.fs == nil { + d.logf("deleter: nil FileOps") + } + + files, err := d.fs.ListFiles() + if err != nil { + d.logf("deleter: ListDir error: %v", err) + return + } + for _, filename := range files { switch { case d.shutdownCtx.Err() != nil: - return false // terminate early - case !de.Type().IsRegular(): - return true - case strings.HasSuffix(de.Name(), partialSuffix): + return // terminate early + case strings.HasSuffix(filename, partialSuffix): // Only enqueue the file for deletion if there is no active put. - nameID := strings.TrimSuffix(de.Name(), partialSuffix) + nameID := strings.TrimSuffix(filename, partialSuffix) if i := strings.LastIndexByte(nameID, '.'); i > 0 { key := incomingFileKey{clientID(nameID[i+len("."):]), nameID[:i]} m.incomingFiles.LoadFunc(key, func(_ *incomingFile, loaded bool) { if !loaded { - d.Insert(de.Name()) + d.Insert(filename) } }) } else { - d.Insert(de.Name()) + d.Insert(filename) } - case strings.HasSuffix(de.Name(), deletedSuffix): + case strings.HasSuffix(filename, deletedSuffix): // Best-effort immediate deletion of deleted files. - name := strings.TrimSuffix(de.Name(), deletedSuffix) - if os.Remove(filepath.Join(d.dir, name)) == nil { - if os.Remove(filepath.Join(d.dir, de.Name())) == nil { - break + name := strings.TrimSuffix(filename, deletedSuffix) + if d.fs.Remove(name) == nil { + if d.fs.Remove(filename) == nil { + continue } } - // Otherwise, enqueue the file for later deletion. - d.Insert(de.Name()) + // Otherwise enqueue for later deletion. + d.Insert(filename) } - return true - }) + } }) } @@ -149,13 +153,13 @@ func (d *fileDeleter) waitAndDelete(wait time.Duration) { // Delete the expired file. if name, ok := strings.CutSuffix(file.name, deletedSuffix); ok { - if err := os.Remove(filepath.Join(d.dir, name)); err != nil && !os.IsNotExist(err) { + if err := d.fs.Remove(name); err != nil && !os.IsNotExist(err) { d.logf("could not delete: %v", redactError(err)) failed = append(failed, elem) continue } } - if err := os.Remove(filepath.Join(d.dir, file.name)); err != nil && !os.IsNotExist(err) { + if err := d.fs.Remove(file.name); err != nil && !os.IsNotExist(err) { d.logf("could not delete: %v", redactError(err)) failed = append(failed, elem) continue diff --git a/feature/taildrop/delete_test.go b/feature/taildrop/delete_test.go index 7a58de55c..36950f582 100644 --- a/feature/taildrop/delete_test.go +++ b/feature/taildrop/delete_test.go @@ -5,7 +5,6 @@ package taildrop import ( "os" - "path/filepath" "slices" "testing" "time" @@ -20,11 +19,20 @@ import ( func TestDeleter(t *testing.T) { dir := t.TempDir() - must.Do(touchFile(filepath.Join(dir, "foo.partial"))) - must.Do(touchFile(filepath.Join(dir, "bar.partial"))) - must.Do(touchFile(filepath.Join(dir, "fizz"))) - must.Do(touchFile(filepath.Join(dir, "fizz.deleted"))) - must.Do(touchFile(filepath.Join(dir, "buzz.deleted"))) // lacks a matching "buzz" file + var m manager + var fd fileDeleter + m.opts.Logf = t.Logf + m.opts.Clock = tstime.DefaultClock{Clock: tstest.NewClock(tstest.ClockOpts{ + Start: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + })} + m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(dir) + + must.Do(m.touchFile("foo.partial")) + must.Do(m.touchFile("bar.partial")) + must.Do(m.touchFile("fizz")) + must.Do(m.touchFile("fizz.deleted")) + must.Do(m.touchFile("buzz.deleted")) // lacks a matching "buzz" file checkDirectory := func(want ...string) { t.Helper() @@ -69,12 +77,10 @@ func TestDeleter(t *testing.T) { } eventHook := func(event string) { eventsChan <- event } - var m manager - var fd fileDeleter m.opts.Logf = t.Logf m.opts.Clock = tstime.DefaultClock{Clock: clock} - m.opts.Dir = dir m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(dir) must.Do(m.opts.State.WriteState(ipn.TaildropReceivedKey, []byte{1})) fd.Init(&m, eventHook) defer fd.Shutdown() @@ -100,17 +106,17 @@ func TestDeleter(t *testing.T) { checkEvents("end waitAndDelete") checkDirectory() - must.Do(touchFile(filepath.Join(dir, "one.partial"))) + must.Do(m.touchFile("one.partial")) insert("one.partial") checkEvents("start waitAndDelete") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "two.partial"))) + must.Do(m.touchFile("two.partial")) insert("two.partial") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "three.partial"))) + must.Do(m.touchFile("three.partial")) insert("three.partial") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "four.partial"))) + must.Do(m.touchFile("four.partial")) insert("four.partial") advance(deleteDelay / 4) @@ -145,8 +151,8 @@ func TestDeleterInitWithoutTaildrop(t *testing.T) { var m manager var fd fileDeleter m.opts.Logf = t.Logf - m.opts.Dir = t.TempDir() m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(t.TempDir()) fd.Init(&m, func(event string) { t.Errorf("unexpected event: %v", event) }) fd.Shutdown() } diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index c11fe3af4..f8f45b53f 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "maps" - "os" "path/filepath" "runtime" "slices" @@ -75,7 +74,7 @@ type Extension struct { // FileOps abstracts platform-specific file operations needed for file transfers. // This is currently being used for Android to use the Storage Access Framework. - FileOps FileOps + fileOps FileOps nodeBackendForTest ipnext.NodeBackend // if non-nil, pretend we're this node state for tests @@ -89,30 +88,6 @@ type Extension struct { outgoingFiles map[string]*ipn.OutgoingFile } -// safDirectoryPrefix is used to determine if the directory is managed via SAF. -const SafDirectoryPrefix = "content://" - -// PutMode controls how Manager.PutFile writes files to storage. -// -// PutModeDirect – write files directly to a filesystem path (default). -// PutModeAndroidSAF – use Android’s Storage Access Framework (SAF), where -// the OS manages the underlying directory permissions. -type PutMode int - -const ( - PutModeDirect PutMode = iota - PutModeAndroidSAF -) - -// FileOps defines platform-specific file operations. -type FileOps interface { - OpenFileWriter(filename string) (io.WriteCloser, string, error) - - // RenamePartialFile finalizes a partial file. - // It returns the new SAF URI as a string and an error. - RenamePartialFile(partialUri, targetDirUri, targetName string) (string, error) -} - func (e *Extension) Name() string { return "taildrop" } @@ -176,23 +151,34 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie return } - // If we have a netmap, create a taildrop manager. - fileRoot, isDirectFileMode := e.fileRoot(uid, activeLogin) - if fileRoot == "" { - e.logf("no Taildrop directory configured") - } - mode := PutModeDirect - if e.directFileRoot != "" && strings.HasPrefix(e.directFileRoot, SafDirectoryPrefix) { - mode = PutModeAndroidSAF + // Use the provided [FileOps] implementation (typically for SAF access on Android), + // or create an [fsFileOps] instance rooted at fileRoot. + // + // A non-nil [FileOps] also implies that we are in DirectFileMode. + fops := e.fileOps + isDirectFileMode := fops != nil + if fops == nil { + var fileRoot string + if fileRoot, isDirectFileMode = e.fileRoot(uid, activeLogin); fileRoot == "" { + e.logf("no Taildrop directory configured") + e.setMgrLocked(nil) + return + } + + var err error + if fops, err = newFileOps(fileRoot); err != nil { + e.logf("taildrop: cannot create FileOps: %v", err) + e.setMgrLocked(nil) + return + } } + e.setMgrLocked(managerOptions{ Logf: e.logf, Clock: tstime.DefaultClock{Clock: e.sb.Clock()}, State: e.stateStore, - Dir: fileRoot, DirectFileMode: isDirectFileMode, - FileOps: e.FileOps, - Mode: mode, + fileOps: fops, SendFileNotify: e.sendFileNotify, }.New()) } @@ -221,12 +207,7 @@ func (e *Extension) fileRoot(uid tailcfg.UserID, activeLogin string) (root strin baseDir := fmt.Sprintf("%s-uid-%d", strings.ReplaceAll(activeLogin, "@", "-"), uid) - dir := filepath.Join(varRoot, "files", baseDir) - if err := os.MkdirAll(dir, 0700); err != nil { - e.logf("Taildrop disabled; error making directory: %v", err) - return "", false - } - return dir, false + return filepath.Join(varRoot, "files", baseDir), false } // hasCapFileSharing reports whether the current node has the file sharing diff --git a/feature/taildrop/fileops.go b/feature/taildrop/fileops.go new file mode 100644 index 000000000..14f76067a --- /dev/null +++ b/feature/taildrop/fileops.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "io" + "io/fs" + "os" +) + +// FileOps abstracts over both local‐FS paths and Android SAF URIs. +type FileOps interface { + // OpenWriter creates or truncates a file named relative to the receiver's root, + // seeking to the specified offset. If the file does not exist, it is created with mode perm + // on platforms that support it. + // + // It returns an [io.WriteCloser] and the file's absolute path, or an error. + // This call may block. Callers should avoid holding locks when calling OpenWriter. + OpenWriter(name string, offset int64, perm os.FileMode) (wc io.WriteCloser, path string, err error) + + // Remove deletes a file or directory relative to the receiver's root. + // It returns [io.ErrNotExist] if the file or directory does not exist. + Remove(name string) error + + // Rename atomically renames oldPath to a new file named newName, + // returning the full new path or an error. + Rename(oldPath, newName string) (newPath string, err error) + + // ListFiles returns just the basenames of all regular files + // in the root directory. + ListFiles() ([]string, error) + + // Stat returns the FileInfo for the given name or an error. + Stat(name string) (fs.FileInfo, error) + + // OpenReader opens the given basename for the given name or an error. + OpenReader(name string) (io.ReadCloser, error) +} + +var newFileOps func(dir string) (FileOps, error) diff --git a/feature/taildrop/fileops_fs.go b/feature/taildrop/fileops_fs.go new file mode 100644 index 000000000..4fecbe4af --- /dev/null +++ b/feature/taildrop/fileops_fs.go @@ -0,0 +1,221 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +//go:build !android + +package taildrop + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" + "sync" + "unicode/utf8" +) + +var renameMu sync.Mutex + +// fsFileOps implements FileOps using the local filesystem rooted at a directory. +// It is used on non-Android platforms. +type fsFileOps struct{ rootDir string } + +func init() { + newFileOps = func(dir string) (FileOps, error) { + if dir == "" { + return nil, errors.New("rootDir cannot be empty") + } + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, fmt.Errorf("mkdir %q: %w", dir, err) + } + return fsFileOps{rootDir: dir}, nil + } +} + +func (f fsFileOps) OpenWriter(name string, offset int64, perm os.FileMode) (io.WriteCloser, string, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, "", err + } + if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return nil, "", err + } + fi, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, perm) + if err != nil { + return nil, "", err + } + if offset != 0 { + curr, err := fi.Seek(0, io.SeekEnd) + if err != nil { + fi.Close() + return nil, "", err + } + if offset < 0 || offset > curr { + fi.Close() + return nil, "", fmt.Errorf("offset %d out of range", offset) + } + if _, err := fi.Seek(offset, io.SeekStart); err != nil { + fi.Close() + return nil, "", err + } + if err := fi.Truncate(offset); err != nil { + fi.Close() + return nil, "", err + } + } + return fi, path, nil +} + +func (f fsFileOps) Remove(name string) error { + path, err := joinDir(f.rootDir, name) + if err != nil { + return err + } + return os.Remove(path) +} + +// Rename moves the partial file into its final name. +// newName must be a base name (not absolute or containing path separators). +// It will retry up to 10 times, de-dup same-checksum files, etc. +func (f fsFileOps) Rename(oldPath, newName string) (newPath string, err error) { + var dst string + if filepath.IsAbs(newName) || strings.ContainsRune(newName, os.PathSeparator) { + return "", fmt.Errorf("invalid newName %q: must not be an absolute path or contain path separators", newName) + } + + dst = filepath.Join(f.rootDir, newName) + + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return "", err + } + + st, err := os.Stat(oldPath) + if err != nil { + return "", err + } + wantSize := st.Size() + + const maxRetries = 10 + for i := 0; i < maxRetries; i++ { + renameMu.Lock() + fi, statErr := os.Stat(dst) + // Atomically rename the partial file as the destination file if it doesn't exist. + // Otherwise, it returns the length of the current destination file. + // The operation is atomic. + if os.IsNotExist(statErr) { + err = os.Rename(oldPath, dst) + renameMu.Unlock() + if err != nil { + return "", err + } + return dst, nil + } + if statErr != nil { + renameMu.Unlock() + return "", statErr + } + gotSize := fi.Size() + renameMu.Unlock() + + // Avoid the final rename if a destination file has the same contents. + // + // Note: this is best effort and copying files from iOS from the Media Library + // results in processing on the iOS side which means the size and shas of the + // same file can be different. + if gotSize == wantSize { + sumP, err := sha256File(oldPath) + if err != nil { + return "", err + } + sumD, err := sha256File(dst) + if err != nil { + return "", err + } + if bytes.Equal(sumP[:], sumD[:]) { + if err := os.Remove(oldPath); err != nil { + return "", err + } + return dst, nil + } + } + + // Choose a new destination filename and try again. + dst = filepath.Join(filepath.Dir(dst), nextFilename(filepath.Base(dst))) + } + + return "", fmt.Errorf("too many retries trying to rename %q to %q", oldPath, newName) +} + +// sha256File computes the SHA‑256 of a file. +func sha256File(path string) (sum [sha256.Size]byte, _ error) { + f, err := os.Open(path) + if err != nil { + return sum, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return sum, err + } + copy(sum[:], h.Sum(nil)) + return sum, nil +} + +func (f fsFileOps) ListFiles() ([]string, error) { + entries, err := os.ReadDir(f.rootDir) + if err != nil { + return nil, err + } + var names []string + for _, e := range entries { + if e.Type().IsRegular() { + names = append(names, e.Name()) + } + } + return names, nil +} + +func (f fsFileOps) Stat(name string) (fs.FileInfo, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, err + } + return os.Stat(path) +} + +func (f fsFileOps) OpenReader(name string) (io.ReadCloser, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, err + } + return os.Open(path) +} + +// joinDir is like [filepath.Join] but returns an error if baseName is too long, +// is a relative path instead of a basename, or is otherwise invalid or unsafe for incoming files. +func joinDir(dir, baseName string) (string, error) { + if !utf8.ValidString(baseName) || + strings.TrimSpace(baseName) != baseName || + len(baseName) > 255 { + return "", ErrInvalidFileName + } + // TODO: validate unicode normalization form too? Varies by platform. + clean := path.Clean(baseName) + if clean != baseName || clean == "." || clean == ".." { + return "", ErrInvalidFileName + } + for _, r := range baseName { + if !validFilenameRune(r) { + return "", ErrInvalidFileName + } + } + if !filepath.IsLocal(baseName) { + return "", ErrInvalidFileName + } + return filepath.Join(dir, baseName), nil +} diff --git a/feature/taildrop/paths.go b/feature/taildrop/paths.go index 22d01160c..79dc37d8f 100644 --- a/feature/taildrop/paths.go +++ b/feature/taildrop/paths.go @@ -21,7 +21,7 @@ func (e *Extension) SetDirectFileRoot(root string) { // SetFileOps sets the platform specific file operations. This is used // to call Android's Storage Access Framework APIs. func (e *Extension) SetFileOps(fileOps FileOps) { - e.FileOps = fileOps + e.fileOps = fileOps } func (e *Extension) setPlatformDefaultDirectFileRoot() { diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 1a003b6ed..633997354 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -24,6 +24,7 @@ import ( "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/logger" + "tailscale.com/util/must" ) // peerAPIHandler serves the PeerAPI for a source specific client. @@ -93,7 +94,16 @@ func bodyContains(sub string) check { func fileHasSize(name string, size int) check { return func(t *testing.T, e *peerAPITestEnv) { - root := e.taildrop.Dir() + fsImpl, ok := e.taildrop.opts.fileOps.(*fsFileOps) + if !ok { + t.Skip("fileHasSize only supported on fsFileOps backend") + return + } + root := fsImpl.rootDir + if root == "" { + t.Errorf("no rootdir; can't check whether %q has size %v", name, size) + return + } if root == "" { t.Errorf("no rootdir; can't check whether %q has size %v", name, size) return @@ -109,12 +119,12 @@ func fileHasSize(name string, size int) check { func fileHasContents(name string, want string) check { return func(t *testing.T, e *peerAPITestEnv) { - root := e.taildrop.Dir() - if root == "" { - t.Errorf("no rootdir; can't check contents of %q", name) + fsImpl, ok := e.taildrop.opts.fileOps.(*fsFileOps) + if !ok { + t.Skip("fileHasContents only supported on fsFileOps backend") return } - path := filepath.Join(root, name) + path := filepath.Join(fsImpl.rootDir, name) got, err := os.ReadFile(path) if err != nil { t.Errorf("fileHasContents: %v", err) @@ -172,9 +182,10 @@ func TestHandlePeerAPI(t *testing.T) { reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, checks: checks( httpStatus(http.StatusForbidden), - bodyContains("Taildrop disabled; no storage directory"), + bodyContains("Taildrop disabled"), ), }, + { name: "bad_method", isSelf: true, @@ -471,14 +482,18 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var rootDir string + var fo FileOps if !tt.omitRoot { - rootDir = t.TempDir() + var err error + if fo, err = newFileOps(t.TempDir()); err != nil { + t.Fatalf("newFileOps: %v", err) + } } var e peerAPITestEnv e.taildrop = managerOptions{ - Logf: e.logBuf.Logf, - Dir: rootDir, + Logf: e.logBuf.Logf, + fileOps: fo, }.New() ext := &fakeExtension{ @@ -490,9 +505,7 @@ func TestHandlePeerAPI(t *testing.T) { e.ph = &peerAPIHandler{ isSelf: tt.isSelf, selfNode: selfNode.View(), - peerNode: (&tailcfg.Node{ - ComputedName: "some-peer-name", - }).View(), + peerNode: (&tailcfg.Node{ComputedName: "some-peer-name"}).View(), } for _, req := range tt.reqs { e.rr = httptest.NewRecorder() @@ -526,8 +539,8 @@ func TestHandlePeerAPI(t *testing.T) { func TestFileDeleteRace(t *testing.T) { dir := t.TempDir() taildropMgr := managerOptions{ - Logf: t.Logf, - Dir: dir, + Logf: t.Logf, + fileOps: must.Get(newFileOps(dir)), }.New() ph := &peerAPIHandler{ diff --git a/feature/taildrop/resume.go b/feature/taildrop/resume.go index 211a1ff6b..20ef527a6 100644 --- a/feature/taildrop/resume.go +++ b/feature/taildrop/resume.go @@ -9,7 +9,6 @@ import ( "encoding/hex" "fmt" "io" - "io/fs" "os" "strings" ) @@ -51,19 +50,20 @@ func (cs *checksum) UnmarshalText(b []byte) error { // PartialFiles returns a list of partial files in [Handler.Dir] // that were sent (or is actively being sent) by the provided id. -func (m *manager) PartialFiles(id clientID) (ret []string, err error) { - if m == nil || m.opts.Dir == "" { +func (m *manager) PartialFiles(id clientID) ([]string, error) { + if m == nil || m.opts.fileOps == nil { return nil, ErrNoTaildrop } - suffix := id.partialSuffix() - if err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - if name := de.Name(); strings.HasSuffix(name, suffix) { - ret = append(ret, name) + files, err := m.opts.fileOps.ListFiles() + if err != nil { + return nil, redactError(err) + } + var ret []string + for _, filename := range files { + if strings.HasSuffix(filename, suffix) { + ret = append(ret, filename) } - return true - }); err != nil { - return ret, redactError(err) } return ret, nil } @@ -73,17 +73,13 @@ func (m *manager) PartialFiles(id clientID) (ret []string, err error) { // It returns (BlockChecksum{}, io.EOF) when the stream is complete. // It is the caller's responsibility to call close. func (m *manager) HashPartialFile(id clientID, baseName string) (next func() (blockChecksum, error), close func() error, err error) { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return nil, nil, ErrNoTaildrop } noopNext := func() (blockChecksum, error) { return blockChecksum{}, io.EOF } noopClose := func() error { return nil } - dstFile, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return nil, nil, err - } - f, err := os.Open(dstFile + id.partialSuffix()) + f, err := m.opts.fileOps.OpenReader(baseName + id.partialSuffix()) if err != nil { if os.IsNotExist(err) { return noopNext, noopClose, nil diff --git a/feature/taildrop/resume_test.go b/feature/taildrop/resume_test.go index dac3c657b..4e59d401d 100644 --- a/feature/taildrop/resume_test.go +++ b/feature/taildrop/resume_test.go @@ -8,6 +8,7 @@ import ( "io" "math/rand" "os" + "path/filepath" "testing" "testing/iotest" @@ -19,7 +20,9 @@ func TestResume(t *testing.T) { defer func() { blockSize = oldBlockSize }() blockSize = 256 - m := managerOptions{Logf: t.Logf, Dir: t.TempDir()}.New() + dir := t.TempDir() + + m := managerOptions{Logf: t.Logf, fileOps: must.Get(newFileOps(dir))}.New() defer m.Shutdown() rn := rand.New(rand.NewSource(0)) @@ -37,7 +40,7 @@ func TestResume(t *testing.T) { must.Do(close()) // Windows wants the file handle to be closed to rename it. must.Get(m.PutFile("", "foo", r, offset, -1)) - got := must.Get(os.ReadFile(must.Get(joinDir(m.opts.Dir, "foo")))) + got := must.Get(os.ReadFile(filepath.Join(dir, "foo"))) if !bytes.Equal(got, want) { t.Errorf("content mismatches") } @@ -66,7 +69,7 @@ func TestResume(t *testing.T) { t.Fatalf("too many iterations to complete the test") } } - got := must.Get(os.ReadFile(must.Get(joinDir(m.opts.Dir, "bar")))) + got := must.Get(os.ReadFile(filepath.Join(dir, "bar"))) if !bytes.Equal(got, want) { t.Errorf("content mismatches") } diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index 6fb975193..b048a1b3b 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -9,19 +9,19 @@ import ( "io" "io/fs" "os" - "path/filepath" "runtime" "sort" "time" "tailscale.com/client/tailscale/apitype" "tailscale.com/logtail/backoff" + "tailscale.com/util/set" ) // HasFilesWaiting reports whether any files are buffered in [Handler.Dir]. // This always returns false when [Handler.DirectFileMode] is false. -func (m *manager) HasFilesWaiting() (has bool) { - if m == nil || m.opts.Dir == "" || m.opts.DirectFileMode { +func (m *manager) HasFilesWaiting() bool { + if m == nil || m.opts.fileOps == nil || m.opts.DirectFileMode { return false } @@ -30,63 +30,66 @@ func (m *manager) HasFilesWaiting() (has bool) { // has-files-or-not values as the macOS/iOS client might // in the future use+delete the files directly. So only // keep this negative cache. - totalReceived := m.totalReceived.Load() - if totalReceived == m.emptySince.Load() { + total := m.totalReceived.Load() + if total == m.emptySince.Load() { return false } - // Check whether there is at least one one waiting file. - err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - name := de.Name() - if isPartialOrDeleted(name) || !de.Type().IsRegular() { - return true + files, err := m.opts.fileOps.ListFiles() + if err != nil { + return false + } + + // Build a set of filenames present in Dir + fileSet := set.Of(files...) + + for _, filename := range files { + if isPartialOrDeleted(filename) { + continue } - _, err := os.Stat(filepath.Join(m.opts.Dir, name+deletedSuffix)) - if os.IsNotExist(err) { - has = true - return false + if fileSet.Contains(filename + deletedSuffix) { + continue // already handled } + // Found at least one downloadable file return true - }) - - // If there are no more waiting files, record totalReceived as emptySince - // so that we can short-circuit the expensive directory traversal - // if no files have been received after the start of this call. - if err == nil && !has { - m.emptySince.Store(totalReceived) } - return has + + // No waiting files → update negative‑result cache + m.emptySince.Store(total) + return false } // WaitingFiles returns the list of files that have been sent by a // peer that are waiting in [Handler.Dir]. // This always returns nil when [Handler.DirectFileMode] is false. -func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { - if m == nil || m.opts.Dir == "" { +func (m *manager) WaitingFiles() ([]apitype.WaitingFile, error) { + if m == nil || m.opts.fileOps == nil { return nil, ErrNoTaildrop } if m.opts.DirectFileMode { return nil, nil } - if err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - name := de.Name() - if isPartialOrDeleted(name) || !de.Type().IsRegular() { - return true + names, err := m.opts.fileOps.ListFiles() + if err != nil { + return nil, redactError(err) + } + var ret []apitype.WaitingFile + for _, name := range names { + if isPartialOrDeleted(name) { + continue } - _, err := os.Stat(filepath.Join(m.opts.Dir, name+deletedSuffix)) - if os.IsNotExist(err) { - fi, err := de.Info() - if err != nil { - return true - } - ret = append(ret, apitype.WaitingFile{ - Name: filepath.Base(name), - Size: fi.Size(), - }) + // A corresponding .deleted marker means the file was already handled. + if _, err := m.opts.fileOps.Stat(name + deletedSuffix); err == nil { + continue } - return true - }); err != nil { - return nil, redactError(err) + fi, err := m.opts.fileOps.Stat(name) + if err != nil { + continue + } + ret = append(ret, apitype.WaitingFile{ + Name: name, + Size: fi.Size(), + }) } sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) return ret, nil @@ -95,21 +98,18 @@ func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { // DeleteFile deletes a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. func (m *manager) DeleteFile(baseName string) error { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return ErrNoTaildrop } if m.opts.DirectFileMode { return errors.New("deletes not allowed in direct mode") } - path, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return err - } + var bo *backoff.Backoff logf := m.opts.Logf t0 := m.opts.Clock.Now() for { - err := os.Remove(path) + err := m.opts.fileOps.Remove(baseName) if err != nil && !os.IsNotExist(err) { err = redactError(err) // Put a retry loop around deletes on Windows. @@ -129,7 +129,7 @@ func (m *manager) DeleteFile(baseName string) error { bo.BackOff(context.Background(), err) continue } - if err := touchFile(path + deletedSuffix); err != nil { + if err := m.touchFile(baseName + deletedSuffix); err != nil { logf("peerapi: failed to leave deleted marker: %v", err) } m.deleter.Insert(baseName + deletedSuffix) @@ -141,35 +141,31 @@ func (m *manager) DeleteFile(baseName string) error { } } -func touchFile(path string) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666) +func (m *manager) touchFile(name string) error { + wc, _, err := m.opts.fileOps.OpenWriter(name /* offset= */, 0, 0666) if err != nil { return redactError(err) } - return f.Close() + return wc.Close() } // OpenFile opens a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. func (m *manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return nil, 0, ErrNoTaildrop } if m.opts.DirectFileMode { return nil, 0, errors.New("opens not allowed in direct mode") } - path, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return nil, 0, err - } - if _, err := os.Stat(path + deletedSuffix); err == nil { - return nil, 0, redactError(&fs.PathError{Op: "open", Path: path, Err: fs.ErrNotExist}) + if _, err := m.opts.fileOps.Stat(baseName + deletedSuffix); err == nil { + return nil, 0, redactError(&fs.PathError{Op: "open", Path: baseName, Err: fs.ErrNotExist}) } - f, err := os.Open(path) + f, err := m.opts.fileOps.OpenReader(baseName) if err != nil { return nil, 0, redactError(err) } - fi, err := f.Stat() + fi, err := m.opts.fileOps.Stat(baseName) if err != nil { f.Close() return nil, 0, redactError(err) diff --git a/feature/taildrop/send.go b/feature/taildrop/send.go index 59a1701da..32ba5f6f0 100644 --- a/feature/taildrop/send.go +++ b/feature/taildrop/send.go @@ -4,11 +4,8 @@ package taildrop import ( - "crypto/sha256" "fmt" "io" - "os" - "path/filepath" "sync" "time" @@ -73,9 +70,10 @@ func (f *incomingFile) Write(p []byte) (n int, err error) { // specific partial file. This allows the client to determine whether to resume // a partial file. While resuming, PutFile may be called again with a non-zero // offset to specify where to resume receiving data at. -func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (int64, error) { +func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (fileLength int64, err error) { + switch { - case m == nil || m.opts.Dir == "": + case m == nil || m.opts.fileOps == nil: return 0, ErrNoTaildrop case !envknob.CanTaildrop(): return 0, ErrNoTaildrop @@ -83,47 +81,47 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len return 0, ErrNotAccessible } - //Compute dstPath & avoid mid‑upload deletion - var dstPath string - if m.opts.Mode == PutModeDirect { - var err error - dstPath, err = joinDir(m.opts.Dir, baseName) + if err := validateBaseName(baseName); err != nil { + return 0, err + } + + // and make sure we don't delete it while uploading: + m.deleter.Remove(baseName) + + // Create (if not already) the partial file with read-write permissions. + partialName := baseName + id.partialSuffix() + wc, partialPath, err := m.opts.fileOps.OpenWriter(partialName, offset, 0o666) + if err != nil { + return 0, m.redactAndLogError("Create", err) + } + defer func() { + wc.Close() if err != nil { - return 0, err + m.deleter.Insert(partialName) // mark partial file for eventual deletion } - } else { - // In SAF mode, we simply use the baseName as the destination "path" - // (the actual directory is managed by SAF). - dstPath = baseName - } - m.deleter.Remove(filepath.Base(dstPath)) // avoid deleting the partial file while receiving + }() // Check whether there is an in-progress transfer for the file. - partialFileKey := incomingFileKey{id, baseName} - inFile, loaded := m.incomingFiles.LoadOrInit(partialFileKey, func() *incomingFile { - return &incomingFile{ + inFileKey := incomingFileKey{id, baseName} + inFile, loaded := m.incomingFiles.LoadOrInit(inFileKey, func() *incomingFile { + inFile := &incomingFile{ clock: m.opts.Clock, started: m.opts.Clock.Now(), size: length, sendFileNotify: m.opts.SendFileNotify, } + if m.opts.DirectFileMode { + inFile.partialPath = partialPath + } + return inFile }) + + inFile.w = wc + if loaded { return 0, ErrFileExists } - defer m.incomingFiles.Delete(partialFileKey) - - // Open writer & populate inFile paths - wc, partialPath, err := m.openWriterAndPaths(id, m.opts.Mode, inFile, baseName, dstPath, offset) - if err != nil { - return 0, m.redactAndLogError("Create", err) - } - defer func() { - wc.Close() - if err != nil { - m.deleter.Insert(filepath.Base(partialPath)) // mark partial file for eventual deletion - } - }() + defer m.incomingFiles.Delete(inFileKey) // Record that we have started to receive at least one file. // This is used by the deleter upon a cold-start to scan the directory @@ -148,220 +146,26 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len return 0, m.redactAndLogError("Close", err) } - fileLength := offset + copyLength + fileLength = offset + copyLength inFile.mu.Lock() inFile.done = true inFile.mu.Unlock() - // Finalize rename - switch m.opts.Mode { - case PutModeDirect: - var finalDst string - finalDst, err = m.finalizeDirect(inFile, partialPath, dstPath, fileLength) - if err != nil { - return 0, m.redactAndLogError("Rename", err) - } - inFile.finalPath = finalDst - - case PutModeAndroidSAF: - if err = m.finalizeSAF(partialPath, baseName); err != nil { - return 0, m.redactAndLogError("Rename", err) - } + // 6) Finalize (rename/move) the partial into place via FileOps.Rename + finalPath, err := m.opts.fileOps.Rename(partialPath, baseName) + if err != nil { + return 0, m.redactAndLogError("Rename", err) } + inFile.finalPath = finalPath m.totalReceived.Add(1) m.opts.SendFileNotify() return fileLength, nil } -// openWriterAndPaths opens the correct writer, seeks/truncates if needed, -// and sets inFile.partialPath & inFile.finalPath for later cleanup/rename. -// The caller is responsible for closing the file on completion. -func (m *manager) openWriterAndPaths( - id clientID, - mode PutMode, - inFile *incomingFile, - baseName string, - dstPath string, - offset int64, -) (wc io.WriteCloser, partialPath string, err error) { - switch mode { - - case PutModeDirect: - partialPath = dstPath + id.partialSuffix() - f, err := os.OpenFile(partialPath, os.O_CREATE|os.O_RDWR, 0o666) - if err != nil { - return nil, "", m.redactAndLogError("Create", err) - } - if offset != 0 { - curr, err := f.Seek(0, io.SeekEnd) - if err != nil { - f.Close() - return nil, "", m.redactAndLogError("Seek", err) - } - if offset < 0 || offset > curr { - f.Close() - return nil, "", m.redactAndLogError("Seek", fmt.Errorf("offset %d out of range", offset)) - } - if _, err := f.Seek(offset, io.SeekStart); err != nil { - f.Close() - return nil, "", m.redactAndLogError("Seek", err) - } - if err := f.Truncate(offset); err != nil { - f.Close() - return nil, "", m.redactAndLogError("Truncate", err) - } - } - inFile.w = f - wc = f - inFile.partialPath = partialPath - inFile.finalPath = dstPath - return wc, partialPath, nil - - case PutModeAndroidSAF: - if m.opts.FileOps == nil { - return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("missing FileOps")) - } - writer, uri, err := m.opts.FileOps.OpenFileWriter(baseName) - if err != nil { - return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("failed to open file for writing via SAF")) - } - if writer == nil || uri == "" { - return nil, "", fmt.Errorf("invalid SAF writer or URI") - } - // SAF mode does not support resuming, so enforce offset == 0. - if offset != 0 { - writer.Close() - return nil, "", m.redactAndLogError("Seek", fmt.Errorf("resuming is not supported in SAF mode")) - } - inFile.w = writer - wc = writer - partialPath = uri - inFile.partialPath = uri - inFile.finalPath = baseName - return wc, partialPath, nil - - default: - return nil, "", fmt.Errorf("unsupported PutMode: %v", mode) - } -} - -// finalizeDirect atomically renames or dedups the partial file, retrying -// under new names up to 10 times. It returns the final path that succeeded. -func (m *manager) finalizeDirect( - inFile *incomingFile, - partialPath string, - initialDst string, - fileLength int64, -) (string, error) { - var ( - once sync.Once - cachedSum [sha256.Size]byte - cacheErr error - computeSum = func() ([sha256.Size]byte, error) { - once.Do(func() { cachedSum, cacheErr = sha256File(partialPath) }) - return cachedSum, cacheErr - } - ) - - dstPath := initialDst - const maxRetries = 10 - for i := 0; i < maxRetries; i++ { - // Atomically rename the partial file as the destination file if it doesn't exist. - // Otherwise, it returns the length of the current destination file. - // The operation is atomic. - lengthOnDisk, err := func() (int64, error) { - m.renameMu.Lock() - defer m.renameMu.Unlock() - fi, statErr := os.Stat(dstPath) - if os.IsNotExist(statErr) { - // dst missing → rename partial into place - return -1, os.Rename(partialPath, dstPath) - } - if statErr != nil { - return -1, statErr - } - return fi.Size(), nil - }() - if err != nil { - return "", err - } - if lengthOnDisk < 0 { - // successfully moved - inFile.finalPath = dstPath - return dstPath, nil - } - - // Avoid the final rename if a destination file has the same contents. - // - // Note: this is best effort and copying files from iOS from the Media Library - // results in processing on the iOS side which means the size and shas of the - // same file can be different. - if lengthOnDisk == fileLength { - partSum, err := computeSum() - if err != nil { - return "", err - } - dstSum, err := sha256File(dstPath) - if err != nil { - return "", err - } - if partSum == dstSum { - // same content → drop the partial - if err := os.Remove(partialPath); err != nil { - return "", err - } - inFile.finalPath = dstPath - return dstPath, nil - } - } - - // Choose a new destination filename and try again. - dstPath = nextFilename(dstPath) - } - - return "", fmt.Errorf("too many retries trying to rename a partial file %q", initialDst) -} - -// finalizeSAF retries RenamePartialFile up to 10 times, generating a new -// name on each failure until the SAF URI changes. -func (m *manager) finalizeSAF( - partialPath, finalName string, -) error { - if m.opts.FileOps == nil { - return fmt.Errorf("missing FileOps for SAF finalize") - } - const maxTries = 10 - name := finalName - for i := 0; i < maxTries; i++ { - newURI, err := m.opts.FileOps.RenamePartialFile(partialPath, m.opts.Dir, name) - if err != nil { - return err - } - if newURI != "" && newURI != name { - return nil - } - name = nextFilename(name) - } - return fmt.Errorf("failed to finalize SAF file after %d retries", maxTries) -} - func (m *manager) redactAndLogError(stage string, err error) error { err = redactError(err) m.opts.Logf("put %s error: %v", stage, err) return err } - -func sha256File(file string) (out [sha256.Size]byte, err error) { - h := sha256.New() - f, err := os.Open(file) - if err != nil { - return out, err - } - defer f.Close() - if _, err := io.Copy(h, f); err != nil { - return out, err - } - return [sha256.Size]byte(h.Sum(nil)), nil -} diff --git a/feature/taildrop/send_test.go b/feature/taildrop/send_test.go index 8edb70417..9ffa5fccc 100644 --- a/feature/taildrop/send_test.go +++ b/feature/taildrop/send_test.go @@ -4,123 +4,64 @@ package taildrop import ( - "bytes" - "fmt" - "io" "os" "path/filepath" + "strings" "testing" "tailscale.com/tstime" + "tailscale.com/util/must" ) -// nopWriteCloser is a no-op io.WriteCloser wrapping a bytes.Buffer. -type nopWriteCloser struct{ *bytes.Buffer } - -func (nwc nopWriteCloser) Close() error { return nil } - -// mockFileOps implements just enough of the FileOps interface for SAF tests. -type mockFileOps struct { - writes *bytes.Buffer - renameOK bool -} - -func (m *mockFileOps) OpenFileWriter(name string) (io.WriteCloser, string, error) { - m.writes = new(bytes.Buffer) - return nopWriteCloser{m.writes}, "uri://" + name + ".partial", nil -} - -func (m *mockFileOps) RenamePartialFile(partialPath, dir, finalName string) (string, error) { - if !m.renameOK { - m.renameOK = true - return "uri://" + finalName, nil - } - return "", io.ErrUnexpectedEOF -} - func TestPutFile(t *testing.T) { const content = "hello, world" tests := []struct { - name string - mode PutMode - setup func(t *testing.T) (*manager, string, *mockFileOps) - wantFile string + name string + directFileMode bool }{ - { - name: "PutModeDirect", - mode: PutModeDirect, - setup: func(t *testing.T) (*manager, string, *mockFileOps) { - dir := t.TempDir() - opts := managerOptions{ - Logf: t.Logf, - Clock: tstime.DefaultClock{}, - State: nil, - Dir: dir, - Mode: PutModeDirect, - DirectFileMode: true, - SendFileNotify: func() {}, - } - mgr := opts.New() - return mgr, dir, nil - }, - wantFile: "file.txt", - }, - { - name: "PutModeAndroidSAF", - mode: PutModeAndroidSAF, - setup: func(t *testing.T) (*manager, string, *mockFileOps) { - // SAF still needs a non-empty Dir to pass the guard. - dir := t.TempDir() - mops := &mockFileOps{} - opts := managerOptions{ - Logf: t.Logf, - Clock: tstime.DefaultClock{}, - State: nil, - Dir: dir, - Mode: PutModeAndroidSAF, - FileOps: mops, - DirectFileMode: true, - SendFileNotify: func() {}, - } - mgr := opts.New() - return mgr, dir, mops - }, - wantFile: "file.txt", - }, + {"DirectFileMode", true}, + {"NonDirectFileMode", false}, } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - mgr, dir, mops := tc.setup(t) - id := clientID(fmt.Sprint(0)) - reader := bytes.NewReader([]byte(content)) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + mgr := managerOptions{ + Logf: t.Logf, + Clock: tstime.DefaultClock{}, + State: nil, + fileOps: must.Get(newFileOps(dir)), + DirectFileMode: tt.directFileMode, + SendFileNotify: func() {}, + }.New() - n, err := mgr.PutFile(id, "file.txt", reader, 0, int64(len(content))) + id := clientID("0") + n, err := mgr.PutFile(id, "file.txt", strings.NewReader(content), 0, int64(len(content))) if err != nil { - t.Fatalf("PutFile(%s) error: %v", tc.name, err) + t.Fatalf("PutFile error: %v", err) } if n != int64(len(content)) { t.Errorf("wrote %d bytes; want %d", n, len(content)) } - switch tc.mode { - case PutModeDirect: - path := filepath.Join(dir, tc.wantFile) - data, err := os.ReadFile(path) - if err != nil { - t.Fatalf("ReadFile error: %v", err) - } - if got := string(data); got != content { - t.Errorf("file contents = %q; want %q", got, content) - } + path := filepath.Join(dir, "file.txt") - case PutModeAndroidSAF: - if mops.writes == nil { - t.Fatal("SAF writer was never created") - } - if got := mops.writes.String(); got != content { - t.Errorf("SAF writes = %q; want %q", got, content) + got, err := os.ReadFile(path) + if err != nil { + t.Fatalf("ReadFile %q: %v", path, err) + } + if string(got) != content { + t.Errorf("file contents = %q; want %q", string(got), content) + } + + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + for _, entry := range entries { + if strings.Contains(entry.Name(), ".partial") { + t.Errorf("unexpected partial file left behind: %s", entry.Name()) } } }) diff --git a/feature/taildrop/taildrop.go b/feature/taildrop/taildrop.go index 2dfa415bb..6c3deaed1 100644 --- a/feature/taildrop/taildrop.go +++ b/feature/taildrop/taildrop.go @@ -12,8 +12,6 @@ package taildrop import ( "errors" "hash/adler32" - "io" - "io/fs" "os" "path" "path/filepath" @@ -21,7 +19,6 @@ import ( "sort" "strconv" "strings" - "sync" "sync/atomic" "unicode" "unicode/utf8" @@ -72,11 +69,6 @@ type managerOptions struct { Clock tstime.DefaultClock // may be nil State ipn.StateStore // may be nil - // Dir is the directory to store received files. - // This main either be the final location for the files - // or just a temporary staging directory (see DirectFileMode). - Dir string - // DirectFileMode reports whether we are writing files // directly to a download directory, rather than writing them to // a temporary staging directory. @@ -91,9 +83,10 @@ type managerOptions struct { // copy them out, and then delete them. DirectFileMode bool - FileOps FileOps - - Mode PutMode + // FileOps abstracts platform-specific file operations needed for file transfers. + // Android's implementation uses the Storage Access Framework, and other platforms + // use fsFileOps. + fileOps FileOps // SendFileNotify is called periodically while a file is actively // receiving the contents for the file. There is a final call @@ -111,9 +104,6 @@ type manager struct { // deleter managers asynchronous deletion of files. deleter fileDeleter - // renameMu is used to protect os.Rename calls so that they are atomic. - renameMu sync.Mutex - // totalReceived counts the cumulative total of received files. totalReceived atomic.Int64 // emptySince specifies that there were no waiting files @@ -137,11 +127,6 @@ func (opts managerOptions) New() *manager { return m } -// Dir returns the directory. -func (m *manager) Dir() string { - return m.opts.Dir -} - // Shutdown shuts down the Manager. // It blocks until all spawned goroutines have stopped running. func (m *manager) Shutdown() { @@ -172,57 +157,29 @@ func isPartialOrDeleted(s string) bool { return strings.HasSuffix(s, deletedSuffix) || strings.HasSuffix(s, partialSuffix) } -func joinDir(dir, baseName string) (fullPath string, err error) { - if !utf8.ValidString(baseName) { - return "", ErrInvalidFileName - } - if strings.TrimSpace(baseName) != baseName { - return "", ErrInvalidFileName - } - if len(baseName) > 255 { - return "", ErrInvalidFileName +func validateBaseName(name string) error { + if !utf8.ValidString(name) || + strings.TrimSpace(name) != name || + len(name) > 255 { + return ErrInvalidFileName } // TODO: validate unicode normalization form too? Varies by platform. - clean := path.Clean(baseName) - if clean != baseName || - clean == "." || clean == ".." || - isPartialOrDeleted(clean) { - return "", ErrInvalidFileName + clean := path.Clean(name) + if clean != name || clean == "." || clean == ".." { + return ErrInvalidFileName } - for _, r := range baseName { + if isPartialOrDeleted(name) { + return ErrInvalidFileName + } + for _, r := range name { if !validFilenameRune(r) { - return "", ErrInvalidFileName + return ErrInvalidFileName } } - if !filepath.IsLocal(baseName) { - return "", ErrInvalidFileName - } - return filepath.Join(dir, baseName), nil -} - -// rangeDir iterates over the contents of a directory, calling fn for each entry. -// It continues iterating while fn returns true. -// It reports the number of entries seen. -func rangeDir(dir string, fn func(fs.DirEntry) bool) error { - f, err := os.Open(dir) - if err != nil { - return err - } - defer f.Close() - for { - des, err := f.ReadDir(10) - for _, de := range des { - if !fn(de) { - return nil - } - } - if err != nil { - if err == io.EOF { - return nil - } - return err - } + if !filepath.IsLocal(name) { + return ErrInvalidFileName } + return nil } // IncomingFiles returns a list of active incoming files. diff --git a/feature/taildrop/taildrop_test.go b/feature/taildrop/taildrop_test.go index da0bd2f43..0d77273f0 100644 --- a/feature/taildrop/taildrop_test.go +++ b/feature/taildrop/taildrop_test.go @@ -4,40 +4,10 @@ package taildrop import ( - "path/filepath" "strings" "testing" ) -func TestJoinDir(t *testing.T) { - dir := t.TempDir() - tests := []struct { - in string - want string // just relative to m.Dir - wantOk bool - }{ - {"", "", false}, - {"foo", "foo", true}, - {"./foo", "", false}, - {"../foo", "", false}, - {"foo/bar", "", false}, - {"😋", "😋", true}, - {"\xde\xad\xbe\xef", "", false}, - {"foo.partial", "", false}, - {"foo.deleted", "", false}, - {strings.Repeat("a", 1024), "", false}, - {"foo:bar", "", false}, - } - for _, tt := range tests { - got, gotErr := joinDir(dir, tt.in) - got, _ = filepath.Rel(dir, got) - gotOk := gotErr == nil - if got != tt.want || gotOk != tt.wantOk { - t.Errorf("joinDir(%q) = (%v, %v), want (%v, %v)", tt.in, got, gotOk, tt.want, tt.wantOk) - } - } -} - func TestNextFilename(t *testing.T) { tests := []struct { in string @@ -67,3 +37,29 @@ func TestNextFilename(t *testing.T) { } } } + +func TestValidateBaseName(t *testing.T) { + tests := []struct { + in string + wantOk bool + }{ + {"", false}, + {"foo", true}, + {"./foo", false}, + {"../foo", false}, + {"foo/bar", false}, + {"😋", true}, + {"\xde\xad\xbe\xef", false}, + {"foo.partial", false}, + {"foo.deleted", false}, + {strings.Repeat("a", 1024), false}, + {"foo:bar", false}, + } + for _, tt := range tests { + err := validateBaseName(tt.in) + gotOk := err == nil + if gotOk != tt.wantOk { + t.Errorf("validateBaseName(%q) = %v, wantOk = %v", tt.in, err, tt.wantOk) + } + } +} From 0f15e4419683f9a5c0f4048ba9216759de553b20 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 1 Aug 2025 15:17:57 -0700 Subject: [PATCH 1158/1708] Makefile: sort make commands and fix printing newlines Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 9fffdc48a..78812d57c 100644 --- a/Makefile +++ b/Makefile @@ -138,8 +138,10 @@ generate: ## Generate code ./tool/go generate ./... help: ## Show this help - @echo "\nSpecify a command. The choices are:\n" - @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' + @echo "" + @echo "Specify a command. The choices are:" + @echo "" + @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' @echo "" .PHONY: help From 834630fedfe4e8ac3e675477c806608f2606e664 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 1 Aug 2025 14:16:00 -0700 Subject: [PATCH 1159/1708] cmd/tailscale: add systray subcommand on Linux builds This will start including the sytray app in unstable builds for Linux, unless the `ts_omit_systray` build flag is specified. If we decide not to include it in the v1.88 release, we can pull it back out or restrict it to unstable builds. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- build_dist.sh | 2 +- cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/cli/systray.go | 24 ++++++++++++++++++++++++ cmd/tailscale/cli/systray_omit.go | 31 +++++++++++++++++++++++++++++++ cmd/tailscale/depaware.txt | 27 ++++++++++++++++++++++++--- 5 files changed, 81 insertions(+), 4 deletions(-) create mode 100644 cmd/tailscale/cli/systray.go create mode 100644 cmd/tailscale/cli/systray_omit.go diff --git a/build_dist.sh b/build_dist.sh index fed37c264..0fc123ade 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_taildrop,ts_omit_tpm" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index bdfc7af42..72924350c 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -261,6 +261,7 @@ change in the future. driveCmd, idTokenCmd, configureHostCmd(), + systrayCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go new file mode 100644 index 000000000..184c85360 --- /dev/null +++ b/cmd/tailscale/cli/systray.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_systray + +package cli + +import ( + "context" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/systray" +) + +var systrayCmd = &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale systray", + ShortHelp: "Run a systray application to manage Tailscale", + Exec: func(_ context.Context, _ []string) error { + // TODO(will): pass localClient to menu to use the global --socket flag + new(systray.Menu).Run() + return nil + }, +} diff --git a/cmd/tailscale/cli/systray_omit.go b/cmd/tailscale/cli/systray_omit.go new file mode 100644 index 000000000..8d93fd84b --- /dev/null +++ b/cmd/tailscale/cli/systray_omit.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux || ts_omit_systray + +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" +) + +// TODO(will): update URL to KB article when available +var systrayHelp = strings.TrimSpace(` +The Tailscale systray app is not included in this client build. +To run it manually, see https://github.com/tailscale/tailscale/tree/main/cmd/systray +`) + +var systrayCmd = &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale systray", + ShortHelp: "Not available in this client build", + LongHelp: hidden + systrayHelp, + Exec: func(_ context.Context, _ []string) error { + fmt.Println(systrayHelp) + return nil + }, +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index e44e20e8c..020479ebb 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -2,9 +2,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 + L fyne.io/systray from tailscale.com/client/systray + L fyne.io/systray/internal/generated/menu from fyne.io/systray + L fyne.io/systray/internal/generated/notifier from fyne.io/systray + L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + L github.com/atotto/clipboard from tailscale.com/client/systray github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -12,6 +17,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode + L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ @@ -19,6 +25,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + L 💣 github.com/godbus/dbus/v5 from fyne.io/systray+ + L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ + L github.com/godbus/dbus/v5/prop from fyne.io/systray + L github.com/golang/freetype/raster from github.com/fogleman/gg+ + L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache L github.com/google/nftables from tailscale.com/util/linuxfw L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt @@ -59,7 +70,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/web-client-prebuilt from tailscale.com/client/web - github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli + github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ @@ -73,6 +84,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ + L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli @@ -178,6 +190,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + L tailscale.com/util/stringsx from tailscale.com/client/systray tailscale.com/util/syspolicy from tailscale.com/ipn tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ @@ -213,6 +226,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ + L golang.org/x/image/draw from github.com/fogleman/gg + L golang.org/x/image/font from github.com/fogleman/gg+ + L golang.org/x/image/font/basicfont from github.com/fogleman/gg + L golang.org/x/image/math/f64 from github.com/fogleman/gg+ + L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ @@ -339,7 +357,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep html/template from tailscale.com/util/eventbus image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ - image/png from github.com/skip2/go-qrcode + L image/draw from github.com/Kodeworks/golang-image-ico+ + L image/internal/imageutil from image/draw+ + L image/jpeg from github.com/fogleman/gg + image/png from github.com/skip2/go-qrcode+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -409,7 +430,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from github.com/coreos/go-iptables/iptables+ - os/signal from tailscale.com/cmd/tailscale/cli + os/signal from tailscale.com/cmd/tailscale/cli+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ From b0018f1e7df47099ee22f3c5fcc91a4112e10523 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 4 Aug 2025 14:21:32 -0700 Subject: [PATCH 1160/1708] wgengine/magicsock: fix looksLikeInitiationMsg endianness (#16771) WireGuard message type is little-endian encoded. Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 7 ++----- wgengine/magicsock/magicsock_test.go | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6495b13b5..c99d1b68f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1765,11 +1765,8 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu // looksLikeInitiationMsg returns true if b looks like a WireGuard initiation // message, otherwise it returns false. func looksLikeInitiationMsg(b []byte) bool { - if len(b) == device.MessageInitiationSize && - binary.BigEndian.Uint32(b) == device.MessageInitiationType { - return true - } - return false + return len(b) == device.MessageInitiationSize && + binary.LittleEndian.Uint32(b) == device.MessageInitiationType } // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 480faa694..0d1ac9dfd 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -9,6 +9,7 @@ import ( crand "crypto/rand" "crypto/tls" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" @@ -3390,10 +3391,17 @@ func Test_virtualNetworkID(t *testing.T) { } func Test_looksLikeInitiationMsg(t *testing.T) { - initMsg := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) - initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) + // initMsg was captured as the first packet from a WireGuard "session" + initMsg, err := hex.DecodeString("01000000d9205f67915a500e377b409e0c3d97ca91e68654b95952de965e75df491000cce00632678cd9e8c8525556aa8daf24e6cfc44c48812bb560ff3c1c5dee061b3f833dfaa48acf13b64bd1e0027aa4d977a3721b82fd6072338702fc3193651404980ad46dae2869ba6416cc0eb38621a4140b5b918eb6402b697202adb3002a6d00000000000000000000000000000000") + if err != nil { + t.Fatal(err) + } + if len(initMsg) != device.MessageInitiationSize { + t.Fatalf("initMsg is not %d bytes long", device.MessageInitiationSize) + } + initMsgSizeTransportType := make([]byte, len(initMsg)) + copy(initMsgSizeTransportType, initMsg) + binary.LittleEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) tests := []struct { name string b []byte From 5bb42e3018a0543467a332322f438cda98530c3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 5 Aug 2025 08:31:51 -0400 Subject: [PATCH 1161/1708] wgengine/router: rely on events for deleted IP rules (#16744) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds the eventbus to the router subsystem. The event is currently only used on linux. Also includes facilities to inject events into the bus. Updates #15160 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/tailscaled.go | 2 +- net/netmon/netmon.go | 43 --------------------- net/netmon/netmon_linux.go | 11 ++---- util/eventbus/client.go | 2 +- util/eventbus/eventbustest/eventbustest.go | 38 +++++++++++++++++- wgengine/router/router.go | 7 +++- wgengine/router/router_android.go | 3 +- wgengine/router/router_darwin.go | 3 +- wgengine/router/router_default.go | 3 +- wgengine/router/router_freebsd.go | 3 +- wgengine/router/router_linux.go | 45 ++++++++++++++++++---- wgengine/router/router_linux_test.go | 45 +++++++++++++++------- wgengine/router/router_openbsd.go | 3 +- wgengine/router/router_plan9.go | 3 +- wgengine/router/router_windows.go | 3 +- 15 files changed, 132 insertions(+), 82 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index ab1590132..06d366aa6 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -800,7 +800,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo sys.NetMon.Get().SetTailscaleInterfaceName(devName) } - r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker()) + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker(), sys.Bus.Get()) if err != nil { dev.Close() return false, fmt.Errorf("creating router: %w", err) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index 3f825bc97..b97b184d4 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -66,7 +66,6 @@ type Monitor struct { mu sync.Mutex // guards all following fields cbs set.HandleSet[ChangeFunc] - ruleDelCB set.HandleSet[RuleDeleteCallback] ifState *State gwValid bool // whether gw and gwSelfIP are valid gw netip.Addr // our gateway's IP @@ -224,29 +223,6 @@ func (m *Monitor) RegisterChangeCallback(callback ChangeFunc) (unregister func() } } -// RuleDeleteCallback is a callback when a Linux IP policy routing -// rule is deleted. The table is the table number (52, 253, 354) and -// priority is the priority order number (for Tailscale rules -// currently: 5210, 5230, 5250, 5270) -type RuleDeleteCallback func(table uint8, priority uint32) - -// RegisterRuleDeleteCallback adds callback to the set of parties to be -// notified (in their own goroutine) when a Linux ip rule is deleted. -// To remove this callback, call unregister (or close the monitor). -func (m *Monitor) RegisterRuleDeleteCallback(callback RuleDeleteCallback) (unregister func()) { - if m.static { - return func() {} - } - m.mu.Lock() - defer m.mu.Unlock() - handle := m.ruleDelCB.Add(callback) - return func() { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.ruleDelCB, handle) - } -} - // Start starts the monitor. // A monitor can only be started & closed once. func (m *Monitor) Start() { @@ -359,10 +335,6 @@ func (m *Monitor) pump() { time.Sleep(time.Second) continue } - if rdm, ok := msg.(ipRuleDeletedMessage); ok { - m.notifyRuleDeleted(rdm) - continue - } if msg.ignore() { continue } @@ -370,14 +342,6 @@ func (m *Monitor) pump() { } } -func (m *Monitor) notifyRuleDeleted(rdm ipRuleDeletedMessage) { - m.mu.Lock() - defer m.mu.Unlock() - for _, cb := range m.ruleDelCB { - go cb(rdm.table, rdm.priority) - } -} - // isInterestingInterface reports whether the provided interface should be // considered when checking for network state changes. // The ips parameter should be the IPs of the provided interface. @@ -624,10 +588,3 @@ func (m *Monitor) checkWallTimeAdvanceLocked() bool { func (m *Monitor) resetTimeJumpedLocked() { m.timeJumped = false } - -type ipRuleDeletedMessage struct { - table uint8 - priority uint32 -} - -func (ipRuleDeletedMessage) ignore() bool { return true } diff --git a/net/netmon/netmon_linux.go b/net/netmon/netmon_linux.go index 659fcc74b..a1077c257 100644 --- a/net/netmon/netmon_linux.go +++ b/net/netmon/netmon_linux.go @@ -241,18 +241,15 @@ func (c *nlConn) Receive() (message, error) { // On `ip -4 rule del pref 5210 table main`, logs: // monitor: ip rule deleted: {Family:2 DstLength:0 SrcLength:0 Tos:0 Table:254 Protocol:0 Scope:0 Type:1 Flags:0 Attributes:{Dst: Src: Gateway: OutIface:0 Priority:5210 Table:254 Mark:4294967295 Expires: Metrics: Multipath:[]}} } - c.rulesDeleted.Publish(RuleDeleted{ + rd := RuleDeleted{ Table: rmsg.Table, Priority: rmsg.Attributes.Priority, - }) - rdm := ipRuleDeletedMessage{ - table: rmsg.Table, - priority: rmsg.Attributes.Priority, } + c.rulesDeleted.Publish(rd) if debugNetlinkMessages() { - c.logf("%+v", rdm) + c.logf("%+v", rd) } - return rdm, nil + return ignoreMessage{}, nil case unix.RTM_NEWLINK, unix.RTM_DELLINK: // This is an unhandled message, but don't print an error. // See https://github.com/tailscale/tailscale/issues/6806 diff --git a/util/eventbus/client.go b/util/eventbus/client.go index f4261b13c..a6266a4d8 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -119,7 +119,7 @@ func Subscribe[T any](c *Client) *Subscriber[T] { return s } -// Publisher returns a publisher for event type T using the given +// Publish returns a publisher for event type T using the given // client. func Publish[T any](c *Client) *Publisher[T] { p := newPublisher[T](c) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 75d430d53..98536ae0a 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -21,7 +21,7 @@ func NewBus(t *testing.T) *eventbus.Bus { return bus } -// NewTestWatcher constructs a [Watcher] that can be used to check the stream of +// NewWatcher constructs a [Watcher] that can be used to check the stream of // events generated by code under test. After construction the caller may use // [Expect] and [ExpectExactly], to verify that the desired events were captured. func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { @@ -201,3 +201,39 @@ func eventFilter(f any) filter { return fixup(fv.Call([]reflect.Value{args[0].Elem()})) }).Interface().(filter) } + +// Injector holds a map with [eventbus.Publisher], tied to an [eventbus.Client] +// for testing purposes. +type Injector struct { + client *eventbus.Client + publishers map[reflect.Type]any + // The value for a key is an *eventbus.Publisher[T] for the corresponding type. +} + +// NewInjector constructs an [Injector] that can be used to inject events into +// the the stream of events used by code under test. After construction the +// caller may use [Inject] to insert events into the bus. +func NewInjector(t *testing.T, b *eventbus.Bus) *Injector { + inj := &Injector{ + client: b.Client(t.Name()), + publishers: make(map[reflect.Type]any), + } + t.Cleanup(inj.client.Close) + + return inj +} + +// Inject inserts events of T onto an [eventbus.Bus]. If an [eventbus.Publisher] +// for the type does not exist, it will be initialized lazily. Calling inject is +// synchronous, and the event will as such have been published to the eventbus +// by the time the function returns. +func Inject[T any](inj *Injector, event T) { + eventType := reflect.TypeFor[T]() + + pub, ok := inj.publishers[eventType] + if !ok { + pub = eventbus.Publish[T](inj.client) + inj.publishers[eventType] = pub + } + pub.(*eventbus.Publisher[T]).Publish(event) +} diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 423008978..25d1c08a2 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -14,6 +14,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/preftype" + "tailscale.com/util/eventbus" ) // Router is responsible for managing the system network stack. @@ -45,9 +46,11 @@ type Router interface { // // If netMon is nil, it's not used. It's currently (2021-07-20) only // used on Linux in some situations. -func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, + health *health.Tracker, bus *eventbus.Bus, +) (Router, error) { logf = logger.WithPrefix(logf, "router: ") - return newUserspaceRouter(logf, tundev, netMon, health) + return newUserspaceRouter(logf, tundev, netMon, health, bus) } // CleanUp restores the system network configuration to its original state diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go index deeccda4a..de680606f 100644 --- a/wgengine/router/router_android.go +++ b/wgengine/router/router_android.go @@ -10,9 +10,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { // Note, this codepath is _not_ used when building the android app // from github.com/tailscale/tailscale-android. The android app // constructs its own wgengine with a custom router implementation diff --git a/wgengine/router/router_darwin.go b/wgengine/router/router_darwin.go index 73e394b04..ebb2615a0 100644 --- a/wgengine/router/router_darwin.go +++ b/wgengine/router/router_darwin.go @@ -8,9 +8,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { return newUserspaceBSDRouter(logf, tundev, netMon, health) } diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go index 8dcbd36d0..190575973 100644 --- a/wgengine/router/router_default.go +++ b/wgengine/router/router_default.go @@ -13,9 +13,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) } diff --git a/wgengine/router/router_freebsd.go b/wgengine/router/router_freebsd.go index 40523b4fd..ce4753d7d 100644 --- a/wgengine/router/router_freebsd.go +++ b/wgengine/router/router_freebsd.go @@ -8,6 +8,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // For now this router only supports the userspace WireGuard implementations. @@ -15,7 +16,7 @@ import ( // Work is currently underway for an in-kernel FreeBSD implementation of wireguard // https://svnweb.freebsd.org/base?view=revision&revision=357986 -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { return newUserspaceBSDRouter(logf, tundev, netMon, health) } diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index adc54c88d..2382e87cd 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -29,6 +29,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/preftype" + "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" "tailscale.com/util/multierr" "tailscale.com/version/distro" @@ -48,6 +49,9 @@ type linuxRouter struct { tunname string netMon *netmon.Monitor health *health.Tracker + eventClient *eventbus.Client + ruleDeletedSub *eventbus.Subscriber[netmon.RuleDeleted] + rulesAddedPub *eventbus.Publisher[AddIPRules] unregNetMon func() addrs map[netip.Prefix]bool routes map[netip.Prefix]bool @@ -77,7 +81,7 @@ type linuxRouter struct { magicsockPortV6 uint16 } -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { tunname, err := tunDev.Name() if err != nil { return nil, err @@ -87,15 +91,16 @@ func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Moni ambientCapNetAdmin: useAmbientCaps(), } - return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health) + return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health, bus) } -func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker) (Router, error) { +func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (Router, error) { r := &linuxRouter{ logf: logf, tunname: tunname, netfilterMode: netfilterOff, netMon: netMon, + eventClient: bus.Client("router-linux"), health: health, cmd: cmd, @@ -103,6 +108,10 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon ipRuleFixLimiter: rate.NewLimiter(rate.Every(5*time.Second), 10), ipPolicyPrefBase: 5200, } + r.ruleDeletedSub = eventbus.Subscribe[netmon.RuleDeleted](r.eventClient) + r.rulesAddedPub = eventbus.Publish[AddIPRules](r.eventClient) + go r.consumeEventbusTopics() + if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) } else { @@ -145,6 +154,24 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon return r, nil } +// consumeEventbusTopics consumes events from all [Conn]-relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [portmapper.Mapping] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (r *linuxRouter) consumeEventbusTopics() { + for { + select { + case <-r.ruleDeletedSub.Done(): + return + case rulesDeleted := <-r.ruleDeletedSub.Events(): + r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) + } + } +} + // ipCmdSupportsFwmask returns true if the system 'ip' binary supports using a // fwmark stanza with a mask specified. To our knowledge, everything except busybox // pre-1.33 supports this. @@ -276,6 +303,10 @@ func (r *linuxRouter) fwmaskWorks() bool { return v } +// AddIPRules is used as an event signal to signify that rules have been added. +// It is added to aid testing, but could be extended if there's a reason for it. +type AddIPRules struct{} + // onIPRuleDeleted is the callback from the network monitor for when an IP // policy rule is deleted. See Issue 1591. // @@ -303,6 +334,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { r.ruleRestorePending.Swap(false) return } + + r.rulesAddedPub.Publish(AddIPRules{}) + time.AfterFunc(rr.Delay()+250*time.Millisecond, func() { if r.ruleRestorePending.Swap(false) && !r.closed.Load() { r.logf("somebody (likely systemd-networkd) deleted ip rules; restoring Tailscale's") @@ -312,9 +346,6 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { } func (r *linuxRouter) Up() error { - if r.unregNetMon == nil && r.netMon != nil { - r.unregNetMon = r.netMon.RegisterRuleDeleteCallback(r.onIPRuleDeleted) - } if err := r.setNetfilterMode(netfilterOff); err != nil { return fmt.Errorf("setting netfilter mode: %w", err) } @@ -333,6 +364,7 @@ func (r *linuxRouter) Close() error { if r.unregNetMon != nil { r.unregNetMon() } + r.eventClient.Close() if err := r.downInterface(); err != nil { return err } @@ -1276,7 +1308,6 @@ func (r *linuxRouter) justAddIPRules() error { } var errAcc error for _, family := range r.addrFamilies() { - for _, ru := range ipRules() { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index a289fb0ac..b6a5a1ac0 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" ) @@ -375,7 +376,7 @@ ip route add throw 192.168.0.0/24 table 52` + basic, fake := NewFakeOS(t) ht := new(health.Tracker) - router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht) + router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht, bus) router.(*linuxRouter).nfr = fake.nfr if err != nil { t.Fatalf("failed to create router: %v", err) @@ -414,7 +415,7 @@ type fakeIPTablesRunner struct { t *testing.T ipt4 map[string][]string ipt6 map[string][]string - //we always assume ipv6 and ipv6 nat are enabled when testing + // we always assume ipv6 and ipv6 nat are enabled when testing } func newIPTablesRunner(t *testing.T) linuxfw.NetfilterRunner { @@ -541,6 +542,7 @@ func (n *fakeIPTablesRunner) EnsureSNATForDst(src, dst netip.Addr) error { func (n *fakeIPTablesRunner) DNATNonTailscaleTraffic(exemptInterface string, dst netip.Addr) error { return errors.New("not implemented") } + func (n *fakeIPTablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm linuxfw.PortMap) error { return errors.New("not implemented") } @@ -781,8 +783,8 @@ type fakeOS struct { ips []string routes []string rules []string - //This test tests on the router level, so we will not bother - //with using iptables or nftables, chose the simpler one. + // This test tests on the router level, so we will not bother + // with using iptables or nftables, chose the simpler one. nfr linuxfw.NetfilterRunner } @@ -974,7 +976,7 @@ func (lt *linuxTest) Close() error { return nil } -func newLinuxRootTest(t *testing.T) *linuxTest { +func newLinuxRootTest(t *testing.T) (*linuxTest, *eventbus.Bus) { if os.Getuid() != 0 { t.Skip("test requires root") } @@ -984,8 +986,7 @@ func newLinuxRootTest(t *testing.T) *linuxTest { logf := lt.logOutput.Logf - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) mon, err := netmon.New(bus, logger.Discard) if err != nil { @@ -995,7 +996,7 @@ func newLinuxRootTest(t *testing.T) *linuxTest { mon.Start() lt.mon = mon - r, err := newUserspaceRouter(logf, lt.tun, mon, nil) + r, err := newUserspaceRouter(logf, lt.tun, mon, nil, bus) if err != nil { lt.Close() t.Fatal(err) @@ -1006,11 +1007,31 @@ func newLinuxRootTest(t *testing.T) *linuxTest { t.Fatal(err) } lt.r = lr - return lt + return lt, bus +} + +func TestRuleDeletedEvent(t *testing.T) { + fake := NewFakeOS(t) + lt, bus := newLinuxRootTest(t) + lt.r.nfr = fake.nfr + defer lt.Close() + event := netmon.RuleDeleted{ + Table: 52, + Priority: 5210, + } + tw := eventbustest.NewWatcher(t, bus) + + t.Logf("Value before: %t", lt.r.ruleRestorePending.Load()) + if lt.r.ruleRestorePending.Load() { + t.Errorf("rule deletion already ongoing") + } + injector := eventbustest.NewInjector(t, bus) + eventbustest.Inject(injector, event) + eventbustest.Expect(tw, eventbustest.Type[AddIPRules]()) } func TestDelRouteIdempotent(t *testing.T) { - lt := newLinuxRootTest(t) + lt, _ := newLinuxRootTest(t) defer lt.Close() for _, s := range []string{ @@ -1036,7 +1057,7 @@ func TestDelRouteIdempotent(t *testing.T) { } func TestAddRemoveRules(t *testing.T) { - lt := newLinuxRootTest(t) + lt, _ := newLinuxRootTest(t) defer lt.Close() r := lt.r @@ -1054,14 +1075,12 @@ func TestAddRemoveRules(t *testing.T) { t.Logf("Rule: %+v", r) } } - } step("init_del_and_add", r.addIPRules) step("dup_add", r.justAddIPRules) step("del", r.delIPRules) step("dup_del", r.delIPRules) - } func TestDebugListLinks(t *testing.T) { diff --git a/wgengine/router/router_openbsd.go b/wgengine/router/router_openbsd.go index 6fdd47ac9..f91878b4c 100644 --- a/wgengine/router/router_openbsd.go +++ b/wgengine/router/router_openbsd.go @@ -15,6 +15,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -31,7 +32,7 @@ type openbsdRouter struct { routes set.Set[netip.Prefix] } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err diff --git a/wgengine/router/router_plan9.go b/wgengine/router/router_plan9.go index 7ed7686d9..fd6850ade 100644 --- a/wgengine/router/router_plan9.go +++ b/wgengine/router/router_plan9.go @@ -15,9 +15,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { r := &plan9Router{ logf: logf, tundev: tundev, diff --git a/wgengine/router/router_windows.go b/wgengine/router/router_windows.go index 64163660d..32d05110d 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/router_windows.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) type winRouter struct { @@ -38,7 +39,7 @@ type winRouter struct { firewall *firewallTweaker } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { nativeTun := tundev.(*tun.NativeTun) luid := winipcfg.LUID(nativeTun.LUID()) guid, err := luid.GUID() From 9f29c428f44f38d452f1e4090cb016f5e238baf6 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 4 Aug 2025 17:09:32 -0700 Subject: [PATCH 1162/1708] client/systray: allow specifying tailscaled socket Pass a local.Client to systray.Run, so we can use the existing global localClient in the cmd/tailscale CLI. Add socket flag to cmd/systray. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 9 +++++++-- cmd/systray/systray.go | 10 +++++++++- cmd/tailscale/cli/systray.go | 3 +-- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 76c93ae18..5cd5e602f 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -48,7 +48,12 @@ var ( ) // Run starts the systray menu and blocks until the menu exits. -func (menu *Menu) Run() { +// If client is nil, a default local.Client is used. +func (menu *Menu) Run(client *local.Client) { + if client == nil { + client = &local.Client{} + } + menu.lc = client menu.updateState() // exit cleanly on SIGINT and SIGTERM @@ -71,7 +76,7 @@ func (menu *Menu) Run() { type Menu struct { mu sync.Mutex // protects the entire Menu - lc local.Client + lc *local.Client status *ipnstate.Status curProfile ipn.LoginProfile allProfiles []ipn.LoginProfile diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 0185a1bc2..d35595e25 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -7,9 +7,17 @@ package main import ( + "flag" + + "tailscale.com/client/local" "tailscale.com/client/systray" + "tailscale.com/paths" ) +var socket = flag.String("socket", paths.DefaultTailscaledSocket(), "path to tailscaled socket") + func main() { - new(systray.Menu).Run() + flag.Parse() + lc := &local.Client{Socket: *socket} + new(systray.Menu).Run(lc) } diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index 184c85360..05d688faa 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -17,8 +17,7 @@ var systrayCmd = &ffcli.Command{ ShortUsage: "tailscale systray", ShortHelp: "Run a systray application to manage Tailscale", Exec: func(_ context.Context, _ []string) error { - // TODO(will): pass localClient to menu to use the global --socket flag - new(systray.Menu).Run() + new(systray.Menu).Run(&localClient) return nil }, } From ad273d75b7a14680f230b9c6b61bac0c72adca0c Mon Sep 17 00:00:00 2001 From: Erisa A Date: Tue, 5 Aug 2025 18:24:32 +0100 Subject: [PATCH 1163/1708] scripts/installer.sh: add bazzite handling (#16779) Fixes #14540 Signed-off-by: Erisa A --- scripts/installer.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index f81ae5292..d259cfda5 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -280,6 +280,14 @@ main() { echo "services.tailscale.enable = true;" exit 1 ;; + bazzite) + echo "Bazzite comes with Tailscale installed by default." + echo "Please enable Tailscale by running the following commands as root:" + echo + echo "ujust enable-tailscale" + echo "tailscale up" + exit 1 + ;; void) OS="$ID" VERSION="" # rolling release From f80ea9203055a4853bc156909de6869a3c6d4347 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 5 Aug 2025 11:49:33 -0700 Subject: [PATCH 1164/1708] .github/workflows: enforce github action version pinning (#16768) Use https://github.com/stacklok/frizbee via the new `go tool` support from Go 1.24. Updates https://github.com/tailscale/corp/issues/31017 Signed-off-by: Andrew Lytvynov --- .github/workflows/pin-github-actions.yml | 29 ++++++++ .github/workflows/test.yml | 2 +- Makefile | 4 + cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 5 +- cmd/stund/depaware.txt | 1 + go.mod | 31 +++++--- go.sum | 95 +++++++++++++++--------- 8 files changed, 120 insertions(+), 48 deletions(-) create mode 100644 .github/workflows/pin-github-actions.yml diff --git a/.github/workflows/pin-github-actions.yml b/.github/workflows/pin-github-actions.yml new file mode 100644 index 000000000..cb6673993 --- /dev/null +++ b/.github/workflows/pin-github-actions.yml @@ -0,0 +1,29 @@ +# Pin images used in github actions to a hash instead of a version tag. +name: pin-github-actions +on: + pull_request: + branches: + - main + paths: + - ".github/workflows/**" + + workflow_dispatch: + +permissions: + contents: read + pull-requests: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + run: + name: pin-github-actions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: pin + run: make pin-github-actions + - name: check for changed workflow files + run: git diff --no-ext-diff --exit-code .github/workflows || (echo "Some github actions versions need pinning, run make pin-github-actions."; exit 1) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d5b09a9e6..c2f539662 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -57,7 +57,7 @@ jobs: # See if the cache entry already exists to avoid downloading it # and doing the cache write again. - id: check-cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} diff --git a/Makefile b/Makefile index 78812d57c..0a7fc28dd 100644 --- a/Makefile +++ b/Makefile @@ -137,6 +137,10 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container generate: ## Generate code ./tool/go generate ./... +.PHONY: pin-github-actions +pin-github-actions: + ./tool/go tool github.com/stacklok/frizbee actions .github/workflows + help: ## Show this help @echo "" @echo "Specify a command. The choices are:" diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7adbf397f..20b6bfb6e 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -77,6 +77,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + 💣 google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index f810d1b4f..2dbf49d07 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -157,7 +157,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd - github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe+ github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag @@ -176,6 +176,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/opencontainers/go-digest from github.com/distribution/reference github.com/pkg/errors from github.com/evanphx/json-patch/v5+ D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack + github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp + github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ @@ -252,6 +254,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + 💣 google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 81544b750..d389d59a3 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -38,6 +38,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar 💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + 💣 google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl diff --git a/go.mod b/go.mod index 3d7514158..92de032ff 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.20.2 + github.com/google/go-containerregistry v0.20.3 github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 @@ -72,7 +72,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.55.0 github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff github.com/safchain/ethtool v0.3.0 @@ -96,7 +96,7 @@ require ( github.com/tcnksm/go-httpstat v0.2.0 github.com/toqueteos/webbrowser v1.2.0 github.com/u-root/u-root v0.14.0 - github.com/vishvananda/netns v0.0.4 + github.com/vishvananda/netns v0.0.5 go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba @@ -142,7 +142,9 @@ require ( github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect + github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/deckarep/golang-set/v2 v2.8.0 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -154,6 +156,8 @@ require ( github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/go-github/v66 v66.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect @@ -161,9 +165,14 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/macabu/inamedparam v0.1.3 // indirect + github.com/moby/buildkit v0.20.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/puzpuzpuz/xsync v1.5.2 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/stacklok/frizbee v0.1.7 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect @@ -231,9 +240,9 @@ require ( github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/docker/cli v27.4.1+incompatible // indirect + github.com/docker/cli v27.5.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.4.1+incompatible // indirect + github.com/docker/docker v27.5.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -246,7 +255,7 @@ require ( github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -261,7 +270,7 @@ require ( github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect @@ -364,9 +373,9 @@ require ( github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect @@ -398,7 +407,7 @@ require ( golang.org/x/image v0.27.0 // indirect golang.org/x/text v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect @@ -414,3 +423,5 @@ require ( sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) + +tool github.com/stacklok/frizbee diff --git a/go.sum b/go.sum index 995b93010..7db41f566 100644 --- a/go.sum +++ b/go.sum @@ -235,11 +235,13 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= @@ -259,6 +261,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ= +github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ= +github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= @@ -267,12 +271,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= -github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= +github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= -github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -334,8 +338,8 @@ github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjB github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= @@ -411,8 +415,8 @@ github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -486,8 +490,12 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= -github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= +github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I= github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba h1:qJEJcuLzH5KDR0gKc0zcktin6KSAwL7+jWKBYceddTc= @@ -547,8 +555,10 @@ github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= @@ -670,6 +680,8 @@ github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= @@ -733,10 +745,12 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= +github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -802,6 +816,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -818,8 +834,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -843,6 +859,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff h1:X1Tly81aZ22DA1fxBdfvR3iw8+yFoUBUHMEd+AX/ZXI= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= +github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= +github.com/puzpuzpuz/xsync v1.5.2/go.mod h1:K98BYhX3k1dQ2M63t1YNVDanbwUPmBCAhNmVrrxfiGg= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -912,16 +930,19 @@ github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNo github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stacklok/frizbee v0.1.7 h1:IgrZy8dqKy+vBxNWrZTbDoctnV0doQKrFC6bNbWP5ho= +github.com/stacklok/frizbee v0.1.7/go.mod h1:eqMjHEgRYDSlpYpir3wXO6jyGpxr1dnFTvrTdrTIF7E= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1021,8 +1042,8 @@ github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7Km github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -1067,14 +1088,14 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEj go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -1435,10 +1456,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= +google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1451,8 +1472,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1465,8 +1486,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1476,6 +1497,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= +gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From 57d653014b1a8bbb4dc7c96c98781df506210351 Mon Sep 17 00:00:00 2001 From: TheBigBear <471105+TheBigBear@users.noreply.github.com> Date: Wed, 6 Aug 2025 03:38:50 +0200 Subject: [PATCH 1165/1708] scripts/installer.sh: add FreeBSD 15 (#16741) * Update installer.sh add FreeBSD ver 15 this should fix the issue on https://github.com/tailscale/tailscale/issues/16740 Signed-off-by: TheBigBear <471105+TheBigBear@users.noreply.github.com> * scripts/installer.sh: small indentation change Signed-off-by: Erisa A Fixes #16740 --------- Signed-off-by: TheBigBear <471105+TheBigBear@users.noreply.github.com> Signed-off-by: Erisa A Co-authored-by: Erisa A --- scripts/installer.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index d259cfda5..4d968cd2b 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -408,7 +408,8 @@ main() { freebsd) if [ "$VERSION" != "12" ] && \ [ "$VERSION" != "13" ] && \ - [ "$VERSION" != "14" ] + [ "$VERSION" != "14" ] && \ + [ "$VERSION" != "15" ] then OS_UNSUPPORTED=1 fi From 908f20e0a506f9fe0c3f6479bc6b7c017cab27a1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 09:35:25 -0700 Subject: [PATCH 1166/1708] wgengine/magicsock: add receiveIP() unit tests (#16781) One of these tests highlighted a Geneve encap bug, which is also fixed in this commit. looksLikeInitMsg was passed a packet post Geneve header stripping with slice offsets that had not been updated to account for the stripping. Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 6 +- wgengine/magicsock/magicsock_test.go | 314 +++++++++++++++++++++++++++ 2 files changed, 319 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c99d1b68f..04d4bbbde 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1823,6 +1823,9 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach return nil, 0, false, false } + // geneveInclusivePacketLen holds the packet length prior to any potential + // Geneve header stripping. + geneveInclusivePacketLen := len(b) if src.vni.isSet() { // Strip away the Geneve header before returning the packet to // wireguard-go. @@ -1831,6 +1834,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // to support returning start offset in order to get rid of this memmove perf // penalty. size = copy(b, b[packet.GeneveFixedHeaderLength:]) + b = b[:size] } if cache.epAddr == src && cache.de != nil && cache.gen == cache.de.numStopAndReset() { @@ -1859,7 +1863,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) + stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) } if src.vni.isSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 0d1ac9dfd..685fff4da 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -20,6 +20,7 @@ import ( "net/http/httptest" "net/netip" "os" + "reflect" "runtime" "strconv" "strings" @@ -66,6 +67,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/cibuild" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" @@ -3701,3 +3703,315 @@ func TestConn_updateRelayServersSet(t *testing.T) { }) } } + +func TestConn_receiveIP(t *testing.T) { + looksLikeNakedDisco := make([]byte, 0, len(disco.Magic)+key.DiscoPublicRawLen) + looksLikeNakedDisco = append(looksLikeNakedDisco, disco.Magic...) + looksLikeNakedDisco = looksLikeNakedDisco[:cap(looksLikeNakedDisco)] + + looksLikeGeneveDisco := make([]byte, packet.GeneveFixedHeaderLength+len(looksLikeNakedDisco)) + gh := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolDisco, + } + err := gh.Encode(looksLikeGeneveDisco) + if err != nil { + t.Fatal(err) + } + copy(looksLikeGeneveDisco[packet.GeneveFixedHeaderLength:], looksLikeNakedDisco) + + looksLikeSTUNBinding := stun.Response(stun.NewTxID(), netip.MustParseAddrPort("127.0.0.1:7777")) + + findMetricByName := func(name string) *clientmetric.Metric { + for _, metric := range clientmetric.Metrics() { + if metric.Name() == name { + return metric + } + } + t.Fatalf("failed to find metric with name: %v", name) + return nil + } + + looksLikeNakedWireGuardInit := make([]byte, device.MessageInitiationSize) + binary.LittleEndian.PutUint32(looksLikeNakedWireGuardInit, device.MessageInitiationType) + + looksLikeGeneveWireGuardInit := make([]byte, packet.GeneveFixedHeaderLength+device.MessageInitiationSize) + gh = packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + VNI: 1, + } + vni := virtualNetworkID{} + vni.set(gh.VNI) + err = gh.Encode(looksLikeGeneveWireGuardInit) + if err != nil { + t.Fatal(err) + } + copy(looksLikeGeneveWireGuardInit[packet.GeneveFixedHeaderLength:], looksLikeNakedWireGuardInit) + + newPeerMapInsertableEndpoint := func(lastRecvWG mono.Time) *endpoint { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + lastRecvWG: lastRecvWG, + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + return ep + } + + tests := []struct { + name string + // A copy of b is used as input, tests may re-use the same value. + b []byte + ipp netip.AddrPort + // cache must be non-nil, and must not be reused across tests. If + // cache.de is non-nil after receiveIP(), then we verify it is equal to + // wantEndpointType. + cache *epAddrEndpointCache + // If true, wantEndpointType is inserted into the [peerMap]. + insertWantEndpointTypeInPeerMap bool + // If insertWantEndpointTypeInPeerMap is true, use this [epAddr] for it + // in the [peerMap.setNodeKeyForEpAddr] call. + peerMapEpAddr epAddr + // If [*endpoint] then we expect 'got' to be the same [*endpoint]. If + // [*lazyEndpoint] and [*lazyEndpoint.maybeEP] is non-nil, we expect + // got.maybeEP to also be non-nil. Must not be reused across tests. + wantEndpointType wgconn.Endpoint + wantSize int + wantIsGeneveEncap bool + wantOk bool + wantMetricInc *clientmetric.Metric + wantNoteRecvActivityCalled bool + }{ + { + name: "naked disco", + b: looksLikeNakedDisco, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: metricRecvDiscoBadPeer, + wantNoteRecvActivityCalled: false, + }, + { + name: "geneve encap disco", + b: looksLikeGeneveDisco, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: metricRecvDiscoBadPeer, + wantNoteRecvActivityCalled: false, + }, + { + name: "STUN binding", + b: looksLikeSTUNBinding, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: findMetricByName("netcheck_stun_recv_ipv4"), + wantNoteRecvActivityCalled: false, + }, + { + name: "naked WireGuard init lazyEndpoint empty peerMap", + b: looksLikeNakedWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: &lazyEndpoint{}, + wantSize: len(looksLikeNakedWireGuardInit), + wantIsGeneveEncap: false, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + { + name: "naked WireGuard init endpoint matching peerMap entry", + b: looksLikeNakedWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777")}, + wantEndpointType: newPeerMapInsertableEndpoint(0), + wantSize: len(looksLikeNakedWireGuardInit), + wantIsGeneveEncap: false, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: true, + }, + { + name: "geneve WireGuard init lazyEndpoint empty peerMap", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: &lazyEndpoint{}, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + { + name: "geneve WireGuard init lazyEndpoint matching peerMap activity noted", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + wantEndpointType: &lazyEndpoint{ + maybeEP: newPeerMapInsertableEndpoint(0), + }, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: true, + }, + { + name: "geneve WireGuard init lazyEndpoint matching peerMap no activity noted", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + wantEndpointType: &lazyEndpoint{ + maybeEP: newPeerMapInsertableEndpoint(mono.Now().Add(time.Hour * 24)), + }, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + // TODO(jwhited): verify cache.de is used when conditions permit + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + noteRecvActivityCalled := false + metricBefore := int64(0) + if tt.wantMetricInc != nil { + metricBefore = tt.wantMetricInc.Value() + } + + // Init Conn. + c := &Conn{ + privateKey: key.NewNode(), + netChecker: &netcheck.Client{}, + peerMap: newPeerMap(), + } + c.havePrivateKey.Store(true) + c.noteRecvActivity = func(public key.NodePublic) { + noteRecvActivityCalled = true + } + c.SetStatistics(connstats.NewStatistics(0, 0, nil)) + + if tt.insertWantEndpointTypeInPeerMap { + var insertEPIntoPeerMap *endpoint + switch ep := tt.wantEndpointType.(type) { + case *endpoint: + insertEPIntoPeerMap = ep + case *lazyEndpoint: + insertEPIntoPeerMap = ep.maybeEP + default: + t.Fatal("unexpected tt.wantEndpointType concrete type") + } + insertEPIntoPeerMap.c = c + c.peerMap.upsertEndpoint(insertEPIntoPeerMap, key.DiscoPublic{}) + c.peerMap.setNodeKeyForEpAddr(tt.peerMapEpAddr, insertEPIntoPeerMap.publicKey) + } + + // Allow the same input packet to be used across tests, receiveIP() + // may mutate. + inputPacket := make([]byte, len(tt.b)) + copy(inputPacket, tt.b) + + got, gotSize, gotIsGeneveEncap, gotOk := c.receiveIP(inputPacket, tt.ipp, tt.cache) + if (tt.wantEndpointType == nil) != (got == nil) { + t.Errorf("receiveIP() (tt.wantEndpointType == nil): %v != (got == nil): %v", tt.wantEndpointType == nil, got == nil) + } + if tt.wantEndpointType != nil && reflect.TypeOf(got).String() != reflect.TypeOf(tt.wantEndpointType).String() { + t.Errorf("receiveIP() got = %v, want %v", reflect.TypeOf(got).String(), reflect.TypeOf(tt.wantEndpointType).String()) + } else { + switch ep := tt.wantEndpointType.(type) { + case *endpoint: + if ep != got.(*endpoint) { + t.Errorf("receiveIP() want [*endpoint]: %p != got [*endpoint]: %p", ep, got) + } + case *lazyEndpoint: + if ep.maybeEP != nil && ep.maybeEP != got.(*lazyEndpoint).maybeEP { + t.Errorf("receiveIP() want [*lazyEndpoint.maybeEP]: %p != got [*lazyEndpoint.maybeEP] %p", ep, got) + } + } + } + + if gotSize != tt.wantSize { + t.Errorf("receiveIP() gotSize = %v, want %v", gotSize, tt.wantSize) + } + if gotIsGeneveEncap != tt.wantIsGeneveEncap { + t.Errorf("receiveIP() gotIsGeneveEncap = %v, want %v", gotIsGeneveEncap, tt.wantIsGeneveEncap) + } + if gotOk != tt.wantOk { + t.Errorf("receiveIP() gotOk = %v, want %v", gotOk, tt.wantOk) + } + if tt.wantMetricInc != nil && tt.wantMetricInc.Value() != metricBefore+1 { + t.Errorf("receiveIP() metric %v not incremented", tt.wantMetricInc.Name()) + } + if tt.wantNoteRecvActivityCalled != noteRecvActivityCalled { + t.Errorf("receiveIP() noteRecvActivityCalled = %v, want %v", noteRecvActivityCalled, tt.wantNoteRecvActivityCalled) + } + + if tt.cache.de != nil { + switch ep := got.(type) { + case *endpoint: + if tt.cache.de != ep { + t.Errorf("receiveIP() cache populated with [*endpoint] %p, want %p", tt.cache.de, ep) + } + case *lazyEndpoint: + if tt.cache.de != ep.maybeEP { + t.Errorf("receiveIP() cache populated with [*endpoint] %p, want (lazyEndpoint.maybeEP) %p", tt.cache.de, ep.maybeEP) + } + default: + t.Fatal("receiveIP() unexpected [conn.Endpoint] type") + } + } + + // Verify physical rx stats + stats := c.stats.Load() + _, gotPhy := stats.TestExtract() + wantNonzeroRxStats := false + switch ep := tt.wantEndpointType.(type) { + case *lazyEndpoint: + if ep.maybeEP != nil { + wantNonzeroRxStats = true + } + case *endpoint: + wantNonzeroRxStats = true + } + if tt.wantOk && wantNonzeroRxStats { + wantRxBytes := uint64(tt.wantSize) + if tt.wantIsGeneveEncap { + wantRxBytes += packet.GeneveFixedHeaderLength + } + wantPhy := map[netlogtype.Connection]netlogtype.Counts{ + {Dst: tt.ipp}: { + RxPackets: 1, + RxBytes: wantRxBytes, + }, + } + if !reflect.DeepEqual(gotPhy, wantPhy) { + t.Errorf("receiveIP() got physical conn stats = %v, want %v", gotPhy, wantPhy) + } + } else { + if len(gotPhy) != 0 { + t.Errorf("receiveIP() unexpected nonzero physical count stats: %+v", gotPhy) + } + } + }) + } +} From 02967ffcf258ec0face9955d1d420ed22681b306 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 14:41:05 -0700 Subject: [PATCH 1167/1708] wgengine/magicsock: add lazyEndpoint.InitiationMessagePublicKey tests (#16790) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock_test.go | 81 ++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 685fff4da..c57086201 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -4015,3 +4015,84 @@ func TestConn_receiveIP(t *testing.T) { }) } } + +func Test_lazyEndpoint_InitiationMessagePublicKey(t *testing.T) { + tests := []struct { + name string + callWithPeerMapKey bool + maybeEPMatchingKey bool + wantNoteRecvActivityCalled bool + }{ + { + name: "noteRecvActivity called", + callWithPeerMapKey: true, + maybeEPMatchingKey: false, + wantNoteRecvActivityCalled: true, + }, + { + name: "maybeEP early return", + callWithPeerMapKey: true, + maybeEPMatchingKey: true, + wantNoteRecvActivityCalled: false, + }, + { + name: "not in peerMap early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: false, + wantNoteRecvActivityCalled: false, + }, + { + name: "not in peerMap maybeEP early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: true, + wantNoteRecvActivityCalled: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + + var noteRecvActivityCalledFor key.NodePublic + conn := newConn(t.Logf) + conn.noteRecvActivity = func(public key.NodePublic) { + // wireguard-go will call into ParseEndpoint if the "real" + // noteRecvActivity ends up JIT configuring the peer. Mimic that + // to ensure there are no deadlocks around conn.mu. + // See tailscale/tailscale#16651 & http://go/corp#30836 + _, err := conn.ParseEndpoint(ep.publicKey.UntypedHexString()) + if err != nil { + t.Fatalf("ParseEndpoint() err: %v", err) + } + noteRecvActivityCalledFor = public + } + ep.c = conn + + var pubKey [32]byte + if tt.callWithPeerMapKey { + copy(pubKey[:], ep.publicKey.AppendTo(nil)) + } + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + le := &lazyEndpoint{ + c: conn, + } + if tt.maybeEPMatchingKey { + le.maybeEP = ep + } + le.InitiationMessagePublicKey(pubKey) + want := key.NodePublic{} + if tt.wantNoteRecvActivityCalled { + want = ep.publicKey + } + if noteRecvActivityCalledFor.Compare(want) != 0 { + t.Fatalf("noteRecvActivityCalledFor = %v, want %v", noteRecvActivityCalledFor, want) + } + }) + } +} From 0374e6d9060a7eb2c233825e5bbc344375e7f8a3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 14:55:34 -0700 Subject: [PATCH 1168/1708] wgengine/magicsock: add lazyEndpoint.FromPeer tests (#16791) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock_test.go | 72 ++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c57086201..e12f15b22 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -4096,3 +4096,75 @@ func Test_lazyEndpoint_InitiationMessagePublicKey(t *testing.T) { }) } } + +func Test_lazyEndpoint_FromPeer(t *testing.T) { + tests := []struct { + name string + callWithPeerMapKey bool + maybeEPMatchingKey bool + wantEpAddrInPeerMap bool + }{ + { + name: "epAddr in peerMap", + callWithPeerMapKey: true, + maybeEPMatchingKey: false, + wantEpAddrInPeerMap: true, + }, + { + name: "maybeEP early return", + callWithPeerMapKey: true, + maybeEPMatchingKey: true, + wantEpAddrInPeerMap: false, + }, + { + name: "not in peerMap early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: false, + wantEpAddrInPeerMap: false, + }, + { + name: "not in peerMap maybeEP early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: true, + wantEpAddrInPeerMap: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + conn := newConn(t.Logf) + ep.c = conn + + var pubKey [32]byte + if tt.callWithPeerMapKey { + copy(pubKey[:], ep.publicKey.AppendTo(nil)) + } + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + le := &lazyEndpoint{ + c: conn, + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777")}, + } + if tt.maybeEPMatchingKey { + le.maybeEP = ep + } + le.FromPeer(pubKey) + if tt.wantEpAddrInPeerMap { + gotEP, ok := conn.peerMap.endpointForEpAddr(le.src) + if !ok { + t.Errorf("lazyEndpoint epAddr not found in peerMap") + } else if gotEP != ep { + t.Errorf("gotEP: %p != ep: %p", gotEP, ep) + } + } else if len(conn.peerMap.byEpAddr) != 0 { + t.Errorf("unexpected epAddr in peerMap") + } + }) + } +} From 4666d4ca2af5885329a6546d14c890d08e65c82e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 14:57:55 -0700 Subject: [PATCH 1169/1708] wgengine/magicsock: fix missing Conn.hasPeerRelayServers.Store() call (#16792) This commit also extends the updateRelayServersSet unit tests to cover onNodeViewsUpdate. Fixes tailscale/corp#31080 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 9 ++- wgengine/magicsock/magicsock_test.go | 89 +++++++++++++++++++++++----- 2 files changed, 78 insertions(+), 20 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 04d4bbbde..a4ba090ef 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -274,11 +274,9 @@ type Conn struct { captureHook syncs.AtomicValue[packet.CaptureCallback] // hasPeerRelayServers is whether [relayManager] is configured with at least - // one peer relay server via [relayManager.handleRelayServersSet]. It is - // only accessed by [Conn.updateRelayServersSet], [endpoint.setDERPHome], - // and [endpoint.discoverUDPRelayPathsLocked]. It exists to suppress - // calls into [relayManager] leading to wasted work involving channel - // operations and goroutine creation. + // one peer relay server via [relayManager.handleRelayServersSet]. It exists + // to suppress calls into [relayManager] leading to wasted work involving + // channel operations and goroutine creation. hasPeerRelayServers atomic.Bool // discoPrivate is the private naclbox key used for active @@ -2998,6 +2996,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { if peersChanged || relayClientChanged { if !relayClientEnabled { c.relayManager.handleRelayServersSet(nil) + c.hasPeerRelayServers.Store(false) } else { c.updateRelayServersSet(filt, self, peers) } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index e12f15b22..9399dab32 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -65,7 +65,6 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/ptr" - "tailscale.com/types/views" "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -3584,7 +3583,7 @@ func Test_nodeHasCap(t *testing.T) { } } -func TestConn_updateRelayServersSet(t *testing.T) { +func TestConn_onNodeViewsUpdate_updateRelayServersSet(t *testing.T) { peerNodeCandidateRelay := &tailcfg.Node{ Cap: 121, ID: 1, @@ -3618,12 +3617,21 @@ func TestConn_updateRelayServersSet(t *testing.T) { DiscoKey: key.NewDisco().Public(), } + selfNodeNodeAttrDisableRelayClient := selfNode.Clone() + selfNodeNodeAttrDisableRelayClient.CapMap = make(tailcfg.NodeCapMap) + selfNodeNodeAttrDisableRelayClient.CapMap[tailcfg.NodeAttrDisableRelayClient] = nil + + selfNodeNodeAttrOnlyTCP443 := selfNode.Clone() + selfNodeNodeAttrOnlyTCP443.CapMap = make(tailcfg.NodeCapMap) + selfNodeNodeAttrOnlyTCP443.CapMap[tailcfg.NodeAttrOnlyTCP443] = nil + tests := []struct { - name string - filt *filter.Filter - self tailcfg.NodeView - peers views.Slice[tailcfg.NodeView] - wantRelayServers set.Set[candidatePeerRelay] + name string + filt *filter.Filter + self tailcfg.NodeView + peers []tailcfg.NodeView + wantRelayServers set.Set[candidatePeerRelay] + wantRelayClientEnabled bool }{ { name: "candidate relay server", @@ -3639,7 +3647,7 @@ func TestConn_updateRelayServersSet(t *testing.T) { }, }, nil, nil, nil, nil, nil), self: selfNode.View(), - peers: views.SliceOf([]tailcfg.NodeView{peerNodeCandidateRelay.View()}), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, wantRelayServers: set.SetOf([]candidatePeerRelay{ { nodeKey: peerNodeCandidateRelay.Key, @@ -3647,6 +3655,43 @@ func TestConn_updateRelayServersSet(t *testing.T) { derpHomeRegionID: 1, }, }), + wantRelayClientEnabled: true, + }, + { + name: "no candidate relay server because self has tailcfg.NodeAttrDisableRelayClient", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNodeNodeAttrDisableRelayClient.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNodeNodeAttrDisableRelayClient.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: false, + }, + { + name: "no candidate relay server because self has tailcfg.NodeAttrOnlyTCP443", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNodeNodeAttrOnlyTCP443.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNodeNodeAttrOnlyTCP443.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: false, }, { name: "self candidate relay server", @@ -3662,7 +3707,7 @@ func TestConn_updateRelayServersSet(t *testing.T) { }, }, nil, nil, nil, nil, nil), self: selfNode.View(), - peers: views.SliceOf([]tailcfg.NodeView{selfNode.View()}), + peers: []tailcfg.NodeView{selfNode.View()}, wantRelayServers: set.SetOf([]candidatePeerRelay{ { nodeKey: selfNode.Key, @@ -3670,6 +3715,7 @@ func TestConn_updateRelayServersSet(t *testing.T) { derpHomeRegionID: 2, }, }), + wantRelayClientEnabled: true, }, { name: "no candidate relay server", @@ -3684,21 +3730,34 @@ func TestConn_updateRelayServersSet(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfNode.View(), - peers: views.SliceOf([]tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}), - wantRelayServers: make(set.Set[candidatePeerRelay]), + self: selfNode.View(), + peers: []tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &Conn{} - c.updateRelayServersSet(tt.filt, tt.self, tt.peers) + c := newConn(t.Logf) + c.filt = tt.filt + if len(tt.wantRelayServers) == 0 { + // So we can verify it gets flipped back. + c.hasPeerRelayServers.Store(true) + } + + c.onNodeViewsUpdate(NodeViewsUpdate{ + SelfNode: tt.self, + Peers: tt.peers, + }) got := c.relayManager.getServers() if !got.Equal(tt.wantRelayServers) { t.Fatalf("got: %v != want: %v", got, tt.wantRelayServers) } if len(tt.wantRelayServers) > 0 != c.hasPeerRelayServers.Load() { - t.Fatalf("c.hasPeerRelayServers: %v != wantRelayServers: %v", c.hasPeerRelayServers.Load(), tt.wantRelayServers) + t.Fatalf("c.hasPeerRelayServers: %v != len(tt.wantRelayServers) > 0: %v", c.hasPeerRelayServers.Load(), len(tt.wantRelayServers) > 0) + } + if c.relayClientEnabled != tt.wantRelayClientEnabled { + t.Fatalf("c.relayClientEnabled: %v != wantRelayClientEnabled: %v", c.relayClientEnabled, tt.wantRelayClientEnabled) } }) } From 89954fbceb78a2ecff529166da66ebee614e4253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 7 Aug 2025 11:51:15 -0400 Subject: [PATCH 1170/1708] client/systray: add startup script generator for systemd (#16801) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/startup-creator.go | 76 ++++++++++++++++++++++++ client/systray/tailscale-systray.service | 10 ++++ cmd/tailscale/cli/systray.go | 30 +++++++++- 3 files changed, 113 insertions(+), 3 deletions(-) create mode 100644 client/systray/startup-creator.go create mode 100644 client/systray/tailscale-systray.service diff --git a/client/systray/startup-creator.go b/client/systray/startup-creator.go new file mode 100644 index 000000000..cb354856d --- /dev/null +++ b/client/systray/startup-creator.go @@ -0,0 +1,76 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build cgo || !darwin + +// Package systray provides a minimal Tailscale systray application. +package systray + +import ( + "bufio" + "bytes" + _ "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +//go:embed tailscale-systray.service +var embedSystemd string + +func InstallStartupScript(initSystem string) error { + switch initSystem { + case "systemd": + return installSystemd() + default: + return fmt.Errorf("unsupported init system '%s'", initSystem) + } +} + +func installSystemd() error { + // Find the path to tailscale, just in case it's not where the example file + // has it placed, and replace that before writing the file. + tailscaleBin, err := exec.LookPath("tailscale") + if err != nil { + return fmt.Errorf("failed to find tailscale binary %w", err) + } + + var output bytes.Buffer + scanner := bufio.NewScanner(strings.NewReader(embedSystemd)) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "ExecStart=") { + line = fmt.Sprintf("ExecStart=%s systray", tailscaleBin) + } + output.WriteString(line + "\n") + } + + configDir, err := os.UserConfigDir() + if err != nil { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("unable to locate user home: %w", err) + } + configDir = filepath.Join(homeDir, ".config") + } + + systemdDir := filepath.Join(configDir, "systemd", "user") + if err := os.MkdirAll(systemdDir, 0o755); err != nil { + return fmt.Errorf("failed creating systemd uuser dir: %w", err) + } + + serviceFile := filepath.Join(systemdDir, "tailscale-systray.service") + + if err := os.WriteFile(serviceFile, output.Bytes(), 0o755); err != nil { + return fmt.Errorf("failed writing systemd user service: %w", err) + } + + fmt.Printf("Successfully installed systemd service to: %s\n", serviceFile) + fmt.Println("To enable and start the service, run:") + fmt.Println(" systemctl --user daemon-reload") + fmt.Println(" systemctl --user enable --now tailscale-systray") + + return nil +} diff --git a/client/systray/tailscale-systray.service b/client/systray/tailscale-systray.service new file mode 100644 index 000000000..a4d987563 --- /dev/null +++ b/client/systray/tailscale-systray.service @@ -0,0 +1,10 @@ +[Unit] +Description=Tailscale System Tray +After=systemd.service + +[Service] +Type=simple +ExecStart=/usr/bin/tailscale systray + +[Install] +WantedBy=default.target diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index 05d688faa..c0296ae26 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -7,17 +7,41 @@ package cli import ( "context" + "flag" + "fmt" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/systray" ) +var systrayArgs struct { + initSystem string + installStartup bool +} + var systrayCmd = &ffcli.Command{ Name: "systray", ShortUsage: "tailscale systray", ShortHelp: "Run a systray application to manage Tailscale", - Exec: func(_ context.Context, _ []string) error { - new(systray.Menu).Run(&localClient) + LongHelp: `Run a systray application to manage Tailscale. +To have the application run on startup, use the --enable-startup flag.`, + Exec: runSystray, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("systray") + fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", + "Install startup script for init system. Currently supported systems are [systemd].") + return fs + })(), +} + +func runSystray(ctx context.Context, _ []string) error { + if systrayArgs.initSystem != "" { + if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { + fmt.Printf("%s\n\n", err.Error()) + return flag.ErrHelp + } return nil - }, + } + new(systray.Menu).Run(&localClient) + return nil } From d4060f1a394e95a20797e0824fea6c0f9a0d7e42 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 7 Aug 2025 19:27:56 +0100 Subject: [PATCH 1171/1708] CODE_OF_CONDUCT.md: update Code of Conduct (#16806) Updates #cleanup Signed-off-by: Erisa A --- CODE_OF_CONDUCT.md | 142 +++++++++++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 64 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index be5564ef4..3d33bba98 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,71 +2,72 @@ ## Our Pledge -We as members, contributors, and leaders pledge to make participation -in our community a harassment-free experience for everyone, regardless -of age, body size, visible or invisible disability, ethnicity, sex -characteristics, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, -race, religion, or sexual identity and orientation. - -We pledge to act and interact in ways that contribute to an open, -welcoming, diverse, inclusive, and healthy community. +We are committed to creating an open, welcoming, diverse, inclusive, +healthy and respectful community. ## Our Standards -Examples of behavior that contributes to a positive environment for -our community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback +Examples of behavior that contributes to a positive environment for our +community include: +* Demonstrating empathy and kindness toward other people. +* Being respectful of differing opinions, viewpoints, and experiences. +* Giving and gracefully accepting constructive feedback. * Accepting responsibility and apologizing to those affected by our - mistakes, and learning from the experience + mistakes, and learning from the experience. * Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: + overall community. +Examples of unacceptable behavior include without limitation: * The use of sexualized language or imagery, and sexual attention or - advances of any kind + advances of any kind. +* The use of violent, intimidating or bullying language or imagery. * Trolling, insulting or derogatory comments, and personal or - political attacks -* Public or private harassment + political attacks. +* Public or private harassment. * Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in - a professional setting + address, without their explicit permission. +* Spamming community channels and members, such as sending repeat messages, + low-effort content, or automated messages. +* Phishing or any similar activity; +* Distributing or promoting malware; +* Other conduct which could reasonably be considered inappropriate in a + professional setting. +* Please also see the Tailscale Acceptable Use Policy, available at + [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). -## Enforcement Responsibilities +Please also see the Tailscale Acceptable Use Policy, available at +[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). -Community leaders are responsible for clarifying and enforcing our -standards of acceptable behavior and will take appropriate and fair -corrective action in response to any behavior that they deem -inappropriate, threatening, offensive, or harmful. +# Reporting Incidents -Community leaders have the right and responsibility to remove, edit, -or reject comments, commits, code, wiki edits, issues, and other -contributions that are not aligned to this Code of Conduct, and will -communicate reasons for moderation decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also -applies when an individual is officially representing the community in -public spaces. Examples of representing our community include using an -official e-mail address, posting via an official social media account, -or acting as an appointed representative at an online or offline -event. +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported to Tailscale directly via info@tailscale.com, or to +the community leaders or moderators via DM or similar. +All complaints will be reviewed and investigated promptly and fairly. +We will respect the privacy and safety of the reporter of any issues. + +Please note that this community is not moderated by staff 24/7, and we +do not have, and do not undertake, any obligation to prescreen, monitor, +edit, or remove any content or data, or to actively seek facts or +circumstances indicating illegal activity. While we strive to keep the +community safe and welcoming, moderation may not be immediate at all hours. +If you encounter any issues, report them using the appropriate channels. ## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported to the community leaders responsible for enforcement -at [info@tailscale.com](mailto:info@tailscale.com). All complaints -will be reviewed and investigated promptly and fairly. +Community leaders and moderators are responsible for clarifying and +enforcing our standards of acceptable behavior and will take appropriate +and fair corrective action in response to any behavior that they deem +inappropriate, threatening, offensive, or harmful. -All community leaders are obligated to respect the privacy and -security of the reporter of any incident. +Community leaders and moderators have the right and responsibility to remove, +edit, or reject comments, commits, code, wiki edits, issues, and other +contributions that are not aligned to this Community Code of Conduct. +Tailscale retains full discretion to take action (or not) in response +to a violation of these guidelines with or without notice or liability +to you. We will interpret our policies and resolve disputes in favor of +protecting users, customers, the public, our community and our company, +as a whole. ## Enforcement Guidelines @@ -76,48 +77,61 @@ this Code of Conduct: ### 1. Correction -**Community Impact**: Use of inappropriate language or other behavior +Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. -**Consequence**: A private, written warning from community leaders, +Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning -**Community Impact**: A violation through a single incident or series +Community Impact: A violation through a single incident or series of actions. -**Consequence**: A warning with consequences for continued +Consequence: A warning with consequences for continued behavior. No interaction with the people involved, including -unsolicited interaction with those enforcing the Code of Conduct, for -a specified period of time. This includes avoiding interactions in +unsolicited interaction with those enforcing this Community Code of Conduct, +for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban -**Community Impact**: A serious violation of community standards, +Community Impact: A serious violation of community standards, including sustained inappropriate behavior. -**Consequence**: A temporary ban from any sort of interaction or +Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, -including unsolicited interaction with those enforcing the Code of -Conduct, is allowed during this period. Violating these terms may lead -to a permanent ban. +including unsolicited interaction with those enforcing the Code of Conduct, +is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban -**Community Impact**: Demonstrating a pattern of violation of -community standards, including sustained inappropriate behavior, -harassment of an individual, or aggression toward or disparagement of +Community Impact: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of +an individual, or aggression toward or disparagement of classes of individuals. -**Consequence**: A permanent ban from any sort of public interaction +Consequence: A permanent ban from any sort of public interaction within the community. +## Acceptable Use Policy + +Violation of this Community Code of Conduct may also violate the +Tailscale Acceptable Use Policy, which may result in suspension or +termination of your Tailscale account. For more information, please +see the Tailscale Acceptable Use Policy, available at +[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). + +## Privacy + +Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) +for more information about how Tailscale collects, uses, discloses and protects +information. + ## Attribution This Code of Conduct is adapted from the [Contributor From 3fe022877afd3ccfdbbd10a3b8a94dbac4f930bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 7 Aug 2025 16:02:47 -0400 Subject: [PATCH 1172/1708] client/systray: temporarily replace systray module (#16807) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are waiting for a PR to be reviewed upstream. https://github.com/fyne-io/systray/pull/100 Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/logo.go | 2 +- client/systray/systray.go | 2 +- cmd/tailscale/depaware.txt | 12 ++++++------ go.mod | 2 +- go.sum | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client/systray/logo.go b/client/systray/logo.go index 3467d1b74..d9b0932bc 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -15,9 +15,9 @@ import ( "sync" "time" - "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/fogleman/gg" + "github.com/tailscale/systray" ) // tsLogo represents the Tailscale logo displayed as the systray icon. diff --git a/client/systray/systray.go b/client/systray/systray.go index 5cd5e602f..d5a19f91c 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -24,10 +24,10 @@ import ( "syscall" "time" - "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/atotto/clipboard" dbus "github.com/godbus/dbus/v5" + "github.com/tailscale/systray" "github.com/toqueteos/webbrowser" "tailscale.com/client/local" "tailscale.com/ipn" diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 020479ebb..8e28e2933 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -2,9 +2,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - L fyne.io/systray from tailscale.com/client/systray - L fyne.io/systray/internal/generated/menu from fyne.io/systray - L fyne.io/systray/internal/generated/notifier from fyne.io/systray L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate @@ -25,9 +22,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - L 💣 github.com/godbus/dbus/v5 from fyne.io/systray+ - L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ - L github.com/godbus/dbus/v5/prop from fyne.io/systray + L 💣 github.com/godbus/dbus/v5 from github.com/godbus/dbus/v5/introspect+ + L github.com/godbus/dbus/v5/introspect from github.com/godbus/dbus/v5/prop+ + L github.com/godbus/dbus/v5/prop from github.com/tailscale/systray L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache @@ -69,6 +66,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + L github.com/tailscale/systray from tailscale.com/client/systray + L github.com/tailscale/systray/internal/generated/menu from github.com/tailscale/systray + L github.com/tailscale/systray/internal/generated/notifier from github.com/tailscale/systray github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ diff --git a/go.mod b/go.mod index 92de032ff..09dcd575e 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.24.4 require ( filippo.io/mkcert v1.4.4 - fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa @@ -88,6 +87,7 @@ require ( github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb + github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da diff --git a/go.sum b/go.sum index 7db41f566..23ca2dc9b 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,6 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= -fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a h1:I8mEKo5sawHu8CqYf3FSjIl9b3puXasFVn2D/hrCneY= -fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= @@ -992,6 +990,8 @@ github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+y github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= +github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 h1:H7/LOg6wgQ116wFRVa8tz9KTB8pc6jeNtqS9tyKgeVw= +github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78/go.mod h1:1NbyArqaFj+AzkSWl0odw7flO9DsHIYWC4lMkwCKVAo= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= From b5283ab13a356f83f2f4e0506bfda5c6654e8b69 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 7 Aug 2025 14:41:55 -0700 Subject: [PATCH 1173/1708] go.toolchain.rev: bump to 1.24.6 (#16811) Updates https://github.com/tailscale/corp/issues/31103 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.toolchain.rev | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 09dcd575e..28b2a764f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.24.4 +go 1.24.6 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 33aa56423..116d2fa6e 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -1cd3bf1a6eaf559aa8c00e749289559c884cef09 +cc1987b0b2df322aeb66514b3fbd584ba1201ef6 From 5297dc3baf386084e9b3791415aca12a261d2d69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 8 Aug 2025 16:12:11 -0400 Subject: [PATCH 1174/1708] cmd/tailscale/cli: move systray configuration to tailscale configure (#16817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #1708 Signed-off-by: Claus Lensbøl --- cmd/tailscale/cli/configure.go | 1 + cmd/tailscale/cli/configure_linux-all.go | 8 ++++ cmd/tailscale/cli/configure_linux.go | 51 ++++++++++++++++++++++++ cmd/tailscale/cli/systray.go | 25 +----------- 4 files changed, 62 insertions(+), 23 deletions(-) create mode 100644 cmd/tailscale/cli/configure_linux-all.go create mode 100644 cmd/tailscale/cli/configure_linux.go diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index da6278ce2..0354a1944 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -32,6 +32,7 @@ services on the host to use Tailscale in more ways. ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), ccall(maybeJetKVMConfigureCmd), + ccall(maybeSystrayCmd), ), } } diff --git a/cmd/tailscale/cli/configure_linux-all.go b/cmd/tailscale/cli/configure_linux-all.go new file mode 100644 index 000000000..e645e9654 --- /dev/null +++ b/cmd/tailscale/cli/configure_linux-all.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import "github.com/peterbourgon/ff/v3/ffcli" + +var maybeSystrayCmd func() *ffcli.Command // non-nil only on Linux, see configure_linux.go diff --git a/cmd/tailscale/cli/configure_linux.go b/cmd/tailscale/cli/configure_linux.go new file mode 100644 index 000000000..4bbde8721 --- /dev/null +++ b/cmd/tailscale/cli/configure_linux.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_systray + +package cli + +import ( + "context" + "flag" + "fmt" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/systray" +) + +func init() { + maybeSystrayCmd = systrayConfigCmd +} + +var systrayArgs struct { + initSystem string + installStartup bool +} + +func systrayConfigCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale configure systray [options]", + ShortHelp: "[ALPHA] Manage the systray client for Linux", + LongHelp: "[ALPHA] The systray set of commands provides a way to configure the systray application on Linux.", + Exec: configureSystray, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("systray") + fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", + "Install startup script for init system. Currently supported systems are [systemd].") + return fs + })(), + } +} + +func configureSystray(_ context.Context, _ []string) error { + if systrayArgs.initSystem != "" { + if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { + fmt.Printf("%s\n\n", err.Error()) + return flag.ErrHelp + } + return nil + } + return flag.ErrHelp +} diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index c0296ae26..827e8a9a4 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -7,41 +7,20 @@ package cli import ( "context" - "flag" - "fmt" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/systray" ) -var systrayArgs struct { - initSystem string - installStartup bool -} - var systrayCmd = &ffcli.Command{ Name: "systray", ShortUsage: "tailscale systray", ShortHelp: "Run a systray application to manage Tailscale", - LongHelp: `Run a systray application to manage Tailscale. -To have the application run on startup, use the --enable-startup flag.`, - Exec: runSystray, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("systray") - fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", - "Install startup script for init system. Currently supported systems are [systemd].") - return fs - })(), + LongHelp: "Run a systray application to manage Tailscale.", + Exec: runSystray, } func runSystray(ctx context.Context, _ []string) error { - if systrayArgs.initSystem != "" { - if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { - fmt.Printf("%s\n\n", err.Error()) - return flag.ErrHelp - } - return nil - } new(systray.Menu).Run(&localClient) return nil } From 796eb2120449bd84bb50a6b72fc5ae142c1c7c46 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 8 Aug 2025 15:10:06 -0700 Subject: [PATCH 1175/1708] go.toolchain.rev: bump tsgo toolchain Updates tailscale/go#129 Change-Id: I94debd1d0b7080c5b012f200ad98d22c3048f350 Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 116d2fa6e..fa951ac1b 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -cc1987b0b2df322aeb66514b3fbd584ba1201ef6 +606f294beebf9df5754804710cd5e16d30532692 From 71d51eb8db62e4e5b2a2afbce0262bad0746ef2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 10 Aug 2025 21:56:17 -0600 Subject: [PATCH 1176/1708] .github: bump github/codeql-action from 3.29.3 to 3.29.5 (#16765) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.3 to 3.29.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d6bbdef45e766d081b84a2def353b0055f728d3e...51f77329afa6477de8c49fc9c7046c15b9a4e79d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e5616d83a..90a20e2f0 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 From d122f0350e8efc4ee80b295829d447ff9d5ddb08 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 09:04:03 -0700 Subject: [PATCH 1177/1708] control/controlknobs,tailcfg,wgengine/magicsock: deprecate NodeAttrDisableMagicSockCryptoRouting (#16818) Peer Relay is dependent on crypto routing, therefore crypto routing is now mandatory. Updates tailscale/corp#20732 Updates tailscale/corp#31083 Signed-off-by: Jordan Whited --- control/controlknobs/controlknobs.go | 6 ------ tailcfg/tailcfg.go | 6 +++++- wgengine/magicsock/magicsock.go | 6 ------ 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index a86f0af53..2578744ca 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -98,10 +98,6 @@ type Knobs struct { // allows us to disable the new behavior remotely if needed. DisableLocalDNSOverrideViaNRPT atomic.Bool - // DisableCryptorouting indicates that the node should not use the - // magicsock crypto routing feature. - DisableCryptorouting atomic.Bool - // DisableCaptivePortalDetection is whether the node should not perform captive portal detection // automatically when the network state changes. DisableCaptivePortalDetection atomic.Bool @@ -137,7 +133,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes) disableSplitDNSWhenNoCustomResolvers = has(tailcfg.NodeAttrDisableSplitDNSWhenNoCustomResolvers) disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT) - disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting) disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue) ) @@ -165,7 +160,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.UserDialUseRoutes.Store(userDialUseRoutes) k.DisableSplitDNSWhenNoCustomResolvers.Store(disableSplitDNSWhenNoCustomResolvers) k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) - k.DisableCryptorouting.Store(disableCryptorouting) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 5e3c4e572..9f4734f1f 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -168,7 +168,8 @@ type CapabilityVersion int // - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] // - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) -const CurrentCapabilityVersion CapabilityVersion = 123 +// - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory +const CurrentCapabilityVersion CapabilityVersion = 124 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2590,6 +2591,9 @@ const ( // NodeAttrDisableMagicSockCryptoRouting disables the use of the // magicsock cryptorouting hook. See tailscale/corp#20732. + // + // Deprecated: NodeAttrDisableMagicSockCryptoRouting is deprecated as of + // CapabilityVersion 124, CryptoRouting is now mandatory. See tailscale/corp#31083. NodeAttrDisableMagicSockCryptoRouting NodeCapability = "disable-magicsock-crypto-routing" // NodeAttrDisableCaptivePortalDetection instructs the client to not perform captive portal detection diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a4ba090ef..8dce6be36 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1842,12 +1842,6 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach de, ok := c.peerMap.endpointForEpAddr(src) c.mu.Unlock() if !ok { - if c.controlKnobs != nil && c.controlKnobs.DisableCryptorouting.Load() { - // Note: UDP relay is dependent on cryptorouting enablement. We - // only update Geneve-encapsulated [epAddr]s in the [peerMap] - // via [lazyEndpoint]. - return nil, 0, false, false - } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. return &lazyEndpoint{c: c, src: src}, size, isGeneveEncap, true From 03c4b2a0d0d23a183b216f065092e691e1844d4b Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 11 Aug 2025 12:57:15 -0400 Subject: [PATCH 1178/1708] derp/derphttp: test improvements (#16723) Update some logging to help future failures. Improve test shutdown concurrency issues. Fixes #16722 Signed-off-by: Mike O'Driscoll --- derp/derphttp/derphttp_test.go | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index bb33e6023..6e8e0bd21 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -286,7 +286,6 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer func() { retryInterval = origRetryInterval }() var wg sync.WaitGroup - defer wg.Wait() // Make the watcher server serverPrivateKey1 := key.NewNode() _, s1 := newTestServer(t, serverPrivateKey1) @@ -298,14 +297,15 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer s2.Close() // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() + watcher := newWatcherClient(t, serverPrivateKey1, serverURL2) + defer watcher.Close() ctx, cancel := context.WithCancel(context.Background()) - defer cancel() watcherChan := make(chan int, 1) + defer close(watcherChan) errChan := make(chan error, 1) + defer close(errChan) // Start the watcher thread (which connects to the watched server) wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 @@ -323,7 +323,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { errChan <- err } - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) + watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) }() timer := time.NewTimer(5 * time.Second) @@ -335,7 +335,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { select { case peers := <-watcherChan: if peers != 1 { - t.Fatal("wrong number of peers added during watcher connection") + t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) } case err := <-errChan: if !strings.Contains(err.Error(), "use of closed network connection") { @@ -344,12 +344,13 @@ func TestBreakWatcherConnRecv(t *testing.T) { case <-timer.C: t.Fatalf("watcher did not process the peer update") } - watcher1.breakConnection(watcher1.client) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - timer.Reset(5 * time.Second) + watcher.breakConnection(watcher.client) + // re-establish connection by sending a packet + watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } + cancel() // Cancel the context to stop the watcher loop. + wg.Wait() } // Test that a watcher connection successfully reconnects and processes peer @@ -364,7 +365,6 @@ func TestBreakWatcherConn(t *testing.T) { defer func() { retryInterval = origRetryInterval }() var wg sync.WaitGroup - defer wg.Wait() // Make the watcher server serverPrivateKey1 := key.NewNode() _, s1 := newTestServer(t, serverPrivateKey1) @@ -380,7 +380,6 @@ func TestBreakWatcherConn(t *testing.T) { defer watcher1.Close() ctx, cancel := context.WithCancel(context.Background()) - defer cancel() watcherChan := make(chan int, 1) breakerChan := make(chan bool, 1) @@ -396,8 +395,12 @@ func TestBreakWatcherConn(t *testing.T) { peers++ // Signal that the watcher has run watcherChan <- peers + select { + case <-ctx.Done(): + return // Wait for breaker to run - <-breakerChan + case <-breakerChan: + } } remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } notifyError := func(err error) { @@ -416,7 +419,7 @@ func TestBreakWatcherConn(t *testing.T) { select { case peers := <-watcherChan: if peers != 1 { - t.Fatal("wrong number of peers added during watcher connection") + t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) } case err := <-errorChan: if !strings.Contains(err.Error(), "use of closed network connection") { @@ -433,6 +436,9 @@ func TestBreakWatcherConn(t *testing.T) { timer.Reset(5 * time.Second) } + watcher1.Close() + cancel() + wg.Wait() } func noopAdd(derp.PeerPresentMessage) {} From 36397f17946e0b20e1c1a79370666a44bcc5c634 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 13:29:57 -0700 Subject: [PATCH 1179/1708] wgengine/magicsock: add clientmetrics for TX direction Peer Relay disco messages (#16831) Updates tailscale/corp#30527 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 39 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8dce6be36..9dc201cdc 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2025,6 +2025,16 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. metricSentDiscoPong.Add(1) case *disco.CallMeMaybe: metricSentDiscoCallMeMaybe.Add(1) + case *disco.CallMeMaybeVia: + metricSentDiscoCallMeMaybeVia.Add(1) + case *disco.BindUDPRelayEndpoint: + metricSentDiscoBindUDPRelayEndpoint.Add(1) + case *disco.BindUDPRelayEndpointAnswer: + metricSentDiscoBindUDPRelayEndpointAnswer.Add(1) + case *disco.AllocateUDPRelayEndpointRequest: + metricSentDiscoAllocUDPRelayEndpointRequest.Add(1) + case *disco.AllocateUDPRelayEndpointResponse: + metricSentDiscoAllocUDPRelayEndpointResponse.Add(1) } } else if err == nil { // Can't send. (e.g. no IPv6 locally) @@ -3967,18 +3977,23 @@ var ( metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") // Disco packets - metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") - metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") - metricSentDiscoUDP = clientmetric.NewCounter("magicsock_disco_sent_udp") - metricSentDiscoDERP = clientmetric.NewCounter("magicsock_disco_sent_derp") - metricSentDiscoPing = clientmetric.NewCounter("magicsock_disco_sent_ping") - metricSentDiscoPong = clientmetric.NewCounter("magicsock_disco_sent_pong") - metricSentDiscoPeerMTUProbes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probes") - metricSentDiscoPeerMTUProbeBytes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probe_bytes") - metricSentDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_sent_callmemaybe") - metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") - metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") - metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") + metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") + metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") + metricSentDiscoUDP = clientmetric.NewCounter("magicsock_disco_sent_udp") + metricSentDiscoDERP = clientmetric.NewCounter("magicsock_disco_sent_derp") + metricSentDiscoPing = clientmetric.NewCounter("magicsock_disco_sent_ping") + metricSentDiscoPong = clientmetric.NewCounter("magicsock_disco_sent_pong") + metricSentDiscoPeerMTUProbes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probes") + metricSentDiscoPeerMTUProbeBytes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probe_bytes") + metricSentDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_sent_callmemaybe") + metricSentDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_sent_callmemaybevia") + metricSentDiscoBindUDPRelayEndpoint = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint") + metricSentDiscoBindUDPRelayEndpointAnswer = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint_answer") + metricSentDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_request") + metricSentDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_response") + metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") + metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") + metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") From 4fa27db8dd3ed05c0f3c704d2d97b449236b90d8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 14:48:19 -0700 Subject: [PATCH 1180/1708] wgengine/magicsock: add clientmetrics for locally delivered Peer Relay alloc disco (#16833) Expected when Peer Relay'ing via self. These disco messages never get sealed, and never leave the process. Updates tailscale/corp#30527 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 9dc201cdc..a7b6d1178 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -681,6 +681,7 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && allocResp.ReqRxFromDiscoKey.Compare(c.discoPublic) == 0 { c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) + metricLocalDiscoAllocUDPRelayEndpointResponse.Add(1) } return } @@ -1926,6 +1927,7 @@ func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.N RxFromDiscoKey: c.discoPublic, Message: allocReq, }) + metricLocalDiscoAllocUDPRelayEndpointRequest.Add(1) return true, nil } return c.sendDiscoMessage(dst, dstKey, dstDisco, allocReq, logLevel) @@ -3990,6 +3992,7 @@ var ( metricSentDiscoBindUDPRelayEndpoint = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint") metricSentDiscoBindUDPRelayEndpointAnswer = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint_answer") metricSentDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_request") + metricLocalDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_local_alloc_udp_relay_endpoint_request") metricSentDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_response") metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") @@ -4009,6 +4012,7 @@ var ( metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") metricRecvDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response") + metricLocalDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_local_alloc_udp_relay_endpoint_response") metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has From cde65dba16a24d799fafc804595b209b17481ebb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 14:53:25 -0700 Subject: [PATCH 1181/1708] wgengine/magicsock: add clientmetric for Peer Relay challenge reception (#16834) Updates tailscale/corp#30527 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a7b6d1178..0fac793ef 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2273,6 +2273,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } c.relayManager.handleRxDiscoMsg(c, challenge, key.NodePublic{}, di.discoKey, src) + metricRecvDiscoBindUDPRelayEndpointChallenge.Add(1) return } @@ -4008,6 +4009,7 @@ var ( metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoBindUDPRelayEndpointChallenge = clientmetric.NewCounter("magicsock_disco_recv_bind_udp_relay_endpoint_challenge") metricRecvDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request") metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") From ee0c7b05a5b6deabd9492276db608952cff11b57 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 12 Aug 2025 10:19:33 -0700 Subject: [PATCH 1182/1708] cmd/tailscale: fix a panic in netcheck portmapper construction (#16843) This affects the 1.87.33 unstable release. Updates #16842 Updates #15160 Change-Id: Ie6d1b2c094d1a6059fbd1023760567900f06e0ad Signed-off-by: M. J. Fromberger --- cmd/tailscale/cli/netcheck.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 3cf05a3b7..0bdab59cb 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -59,8 +59,9 @@ func runNetcheck(ctx context.Context, args []string) error { // Ensure that we close the portmapper after running a netcheck; this // will release any port mappings created. pm := portmapper.NewClient(portmapper.Config{ - Logf: logf, - NetMon: netMon, + Logf: logf, + NetMon: netMon, + EventBus: bus, }) defer pm.Close() From d07166b87daeeee314115c4f05610c87a00827cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 20:36:47 -0600 Subject: [PATCH 1183/1708] .github: Bump actions/cache from 4.2.3 to 4.2.4 (#16829) Bumps [actions/cache](https://github.com/actions/cache) from 4.2.3 to 4.2.4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/5a3ec84eff668545956fd18022155c47e93e2684...0400d5f644dc74513175e3cd8d07132dd4860809) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.2.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 46 +++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c2f539662..7ccb39869 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -57,7 +57,7 @@ jobs: # See if the cache entry already exists to avoid downloading it # and doing the cache write again. - id: check-cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -69,7 +69,7 @@ jobs: run: go mod download - name: Cache Go modules if: steps.check-cache.outputs.cache-hit != 'true' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -92,7 +92,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -130,13 +130,13 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -241,7 +241,7 @@ jobs: - name: Restore Go module cache if: matrix.key != 'win-tool-go' - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -249,7 +249,7 @@ jobs: - name: Restore Cache if: matrix.key != 'win-tool-go' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: | ~/.cache/go-build @@ -298,7 +298,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -321,7 +321,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -377,7 +377,7 @@ jobs: with: path: src - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -393,7 +393,7 @@ jobs: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -429,7 +429,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -467,7 +467,7 @@ jobs: with: path: src - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -483,7 +483,7 @@ jobs: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -518,7 +518,7 @@ jobs: # some Android breakages early. # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -539,7 +539,7 @@ jobs: with: path: src - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -555,7 +555,7 @@ jobs: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-go-2- - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -588,7 +588,7 @@ jobs: - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -671,7 +671,7 @@ jobs: - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -689,7 +689,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -712,7 +712,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -734,7 +734,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -788,7 +788,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} From f22c7657e54cf4b3a10a2bc635f6a68f89123bfb Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 13 Aug 2025 11:19:29 -0700 Subject: [PATCH 1184/1708] cmd/tailscale: add --json-docs flag (#16851) This prints all command and flag docs as JSON. To be used for generating the contents of https://tailscale.com/kb/1080/cli. Updates https://github.com/tailscale/tailscale-www/issues/4722 Signed-off-by: Andrew Lytvynov --- cmd/tailscale/cli/cli.go | 59 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 72924350c..208ee93fd 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -7,6 +7,7 @@ package cli import ( "context" + "encoding/json" "errors" "flag" "fmt" @@ -217,8 +218,10 @@ func newRootCmd() *ffcli.Command { return nil }) rootfs.Lookup("socket").DefValue = localClient.Socket + jsonDocs := rootfs.Bool("json-docs", false, hidden+"print JSON-encoded docs for all subcommands and flags") - rootCmd := &ffcli.Command{ + var rootCmd *ffcli.Command + rootCmd = &ffcli.Command{ Name: "tailscale", ShortUsage: "tailscale [flags] [command flags]", ShortHelp: "The easiest, most secure way to use WireGuard.", @@ -265,6 +268,9 @@ change in the future. ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { + if *jsonDocs { + return printJSONDocs(rootCmd) + } if len(args) > 0 { return fmt.Errorf("tailscale: unknown subcommand: %s", args[0]) } @@ -472,3 +478,54 @@ func colorableOutput() (w io.Writer, ok bool) { } return colorable.NewColorableStdout(), true } + +type commandDoc struct { + Name string + Desc string + Subcommands []commandDoc `json:",omitempty"` + Flags []flagDoc `json:",omitempty"` +} + +type flagDoc struct { + Name string + Desc string +} + +func printJSONDocs(root *ffcli.Command) error { + docs := jsonDocsWalk(root) + return json.NewEncoder(os.Stdout).Encode(docs) +} + +func jsonDocsWalk(cmd *ffcli.Command) *commandDoc { + res := &commandDoc{ + Name: cmd.Name, + } + if cmd.LongHelp != "" { + res.Desc = cmd.LongHelp + } else if cmd.ShortHelp != "" { + res.Desc = cmd.ShortHelp + } else { + res.Desc = cmd.ShortUsage + } + if strings.HasPrefix(res.Desc, hidden) { + return nil + } + if cmd.FlagSet != nil { + cmd.FlagSet.VisitAll(func(f *flag.Flag) { + if strings.HasPrefix(f.Usage, hidden) { + return + } + res.Flags = append(res.Flags, flagDoc{ + Name: f.Name, + Desc: f.Usage, + }) + }) + } + for _, sub := range cmd.Subcommands { + subj := jsonDocsWalk(sub) + if subj != nil { + res.Subcommands = append(res.Subcommands, *subj) + } + } + return res +} From 16bc0a5558aac7617cb94b497db71242a2452db3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 13 Aug 2025 13:13:11 -0700 Subject: [PATCH 1185/1708] net/{batching,packet},wgengine/magicsock: export batchingConn (#16848) For eventual use by net/udprelay.Server. Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + net/batching/conn.go | 48 ++++++++++++ net/batching/conn_default.go | 21 +++++ .../batching/conn_linux.go | 77 +++++++++++-------- .../batching/conn_linux_test.go | 32 +++++--- net/packet/geneve.go | 50 +++++++++--- net/packet/geneve_test.go | 56 +++++++++++++- net/udprelay/server.go | 5 +- net/udprelay/server_test.go | 10 ++- tsnet/depaware.txt | 1 + wgengine/magicsock/batching_conn.go | 23 ------ wgengine/magicsock/batching_conn_default.go | 14 ---- wgengine/magicsock/debughttp.go | 4 +- wgengine/magicsock/endpoint.go | 36 ++++----- wgengine/magicsock/endpoint_test.go | 13 ++-- wgengine/magicsock/magicsock.go | 56 ++++---------- wgengine/magicsock/magicsock_default.go | 4 - wgengine/magicsock/magicsock_linux.go | 8 -- wgengine/magicsock/magicsock_test.go | 71 +++-------------- wgengine/magicsock/peermap.go | 4 +- wgengine/magicsock/peermap_test.go | 5 +- wgengine/magicsock/rebinding_conn.go | 40 +++++----- wgengine/magicsock/relaymanager.go | 15 ++-- 25 files changed, 328 insertions(+), 268 deletions(-) create mode 100644 net/batching/conn.go create mode 100644 net/batching/conn_default.go rename wgengine/magicsock/batching_conn_linux.go => net/batching/conn_linux.go (88%) rename wgengine/magicsock/batching_conn_linux_test.go => net/batching/conn_linux_test.go (89%) delete mode 100644 wgengine/magicsock/batching_conn.go delete mode 100644 wgengine/magicsock/batching_conn_default.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2dbf49d07..1ecef4953 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -838,6 +838,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7c4885a4b..07f5958ca 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -311,6 +311,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b28460352..5e558a0cd 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -268,6 +268,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/net/batching/conn.go b/net/batching/conn.go new file mode 100644 index 000000000..2c6100258 --- /dev/null +++ b/net/batching/conn.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package batching implements a socket optimized for increased throughput. +package batching + +import ( + "net/netip" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "tailscale.com/net/packet" + "tailscale.com/types/nettype" +) + +var ( + // This acts as a compile-time check for our usage of ipv6.Message in + // [Conn] for both IPv6 and IPv4 operations. + _ ipv6.Message = ipv4.Message{} +) + +// Conn is a nettype.PacketConn that provides batched i/o using +// platform-specific optimizations, e.g. {recv,send}mmsg & UDP GSO/GRO. +// +// Conn originated from (and is still used by) magicsock where its API was +// strongly influenced by [wireguard-go/conn.Bind] constraints, namely +// wireguard-go's ownership of packet memory. +type Conn interface { + nettype.PacketConn + // ReadBatch reads messages from [Conn] into msgs. It returns the number of + // messages the caller should evaluate for nonzero len, as a zero len + // message may fall on either side of a nonzero. + // + // Each [ipv6.Message.OOB] must be sized to at least MinControlMessageSize(). + // len(msgs) must be at least MinReadBatchMsgsLen(). + ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) + // WriteBatchTo writes buffs to addr. + // + // If geneve.VNI.IsSet(), then geneve is encoded into the space preceding + // offset, and offset must equal [packet.GeneveFixedHeaderLength]. If + // !geneve.VNI.IsSet() then the space preceding offset is ignored. + // + // len(buffs) must be <= batchSize supplied in TryUpgradeToConn(). + // + // WriteBatchTo may return a [neterror.ErrUDPGSODisabled] error if UDP GSO + // was disabled as a result of a send error. + WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error +} diff --git a/net/batching/conn_default.go b/net/batching/conn_default.go new file mode 100644 index 000000000..ed5c494f3 --- /dev/null +++ b/net/batching/conn_default.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package batching + +import ( + "tailscale.com/types/nettype" +) + +// TryUpgradeToConn is no-op on all platforms except linux. +func TryUpgradeToConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { + return pconn +} + +var controlMessageSize = 0 + +func MinControlMessageSize() int { + return controlMessageSize +} diff --git a/wgengine/magicsock/batching_conn_linux.go b/net/batching/conn_linux.go similarity index 88% rename from wgengine/magicsock/batching_conn_linux.go rename to net/batching/conn_linux.go index a0607c624..0416c2729 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/net/batching/conn_linux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package batching import ( "encoding/binary" @@ -43,10 +43,15 @@ type xnetBatchWriter interface { WriteBatch([]ipv6.Message, int) (int, error) } +var ( + // [linuxBatchingConn] implements [Conn]. + _ Conn = &linuxBatchingConn{} +) + // linuxBatchingConn is a UDP socket that provides batched i/o. It implements -// batchingConn. +// [Conn]. type linuxBatchingConn struct { - pc nettype.PacketConn + pc *net.UDPConn xpc xnetBatchReaderWriter rxOffload bool // supports UDP GRO or similar txOffload atomic.Bool // supports UDP GSO or similar @@ -98,9 +103,8 @@ const ( // // All msgs have their Addr field set to addr. // -// All msgs[i].Buffers[0] are preceded by a Geneve header with vni.get() if -// vni.isSet(). -func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetworkID, buffs [][]byte, msgs []ipv6.Message, offset int) int { +// All msgs[i].Buffers[0] are preceded by a Geneve header (geneve) if geneve.VNI.IsSet(). +func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, geneve packet.GeneveHeader, buffs [][]byte, msgs []ipv6.Message, offset int) int { var ( base = -1 // index of msg we are currently coalescing into gsoSize int // segmentation size of msgs[base] @@ -111,15 +115,10 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetwo if addr.IP.To4() == nil { maxPayloadLen = maxIPv6PayloadLen } - vniIsSet := vni.isSet() - var gh packet.GeneveHeader - if vniIsSet { - gh.Protocol = packet.GeneveProtocolWireGuard - gh.VNI = vni.get() - } + vniIsSet := geneve.VNI.IsSet() for i, buff := range buffs { if vniIsSet { - gh.Encode(buffs[i]) + geneve.Encode(buff) } else { buff = buff[offset:] } @@ -179,37 +178,34 @@ func (c *linuxBatchingConn) putSendBatch(batch *sendBatch) { c.sendBatchPool.Put(batch) } -func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { +func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error { batch := c.getSendBatch() defer c.putSendBatch(batch) - if addr.ap.Addr().Is6() { - as16 := addr.ap.Addr().As16() + if addr.Addr().Is6() { + as16 := addr.Addr().As16() copy(batch.ua.IP, as16[:]) batch.ua.IP = batch.ua.IP[:16] } else { - as4 := addr.ap.Addr().As4() + as4 := addr.Addr().As4() copy(batch.ua.IP, as4[:]) batch.ua.IP = batch.ua.IP[:4] } - batch.ua.Port = int(addr.ap.Port()) + batch.ua.Port = int(addr.Port()) var ( n int retried bool ) retry: if c.txOffload.Load() { - n = c.coalesceMessages(batch.ua, addr.vni, buffs, batch.msgs, offset) + n = c.coalesceMessages(batch.ua, geneve, buffs, batch.msgs, offset) } else { - vniIsSet := addr.vni.isSet() - var gh packet.GeneveHeader + vniIsSet := geneve.VNI.IsSet() if vniIsSet { - gh.Protocol = packet.GeneveProtocolWireGuard - gh.VNI = addr.vni.get() offset -= packet.GeneveFixedHeaderLength } for i := range buffs { if vniIsSet { - gh.Encode(buffs[i]) + geneve.Encode(buffs[i]) } batch.msgs[i].Buffers[0] = buffs[i][offset:] batch.msgs[i].Addr = batch.ua @@ -231,11 +227,7 @@ retry: } func (c *linuxBatchingConn) SyscallConn() (syscall.RawConn, error) { - sc, ok := c.pc.(syscall.Conn) - if !ok { - return nil, errUnsupportedConnType - } - return sc.SyscallConn() + return c.pc.SyscallConn() } func (c *linuxBatchingConn) writeBatch(msgs []ipv6.Message) error { @@ -391,9 +383,10 @@ func setGSOSizeInControl(control *[]byte, gsoSize uint16) { *control = (*control)[:unix.CmsgSpace(2)] } -// tryUpgradeToBatchingConn probes the capabilities of the OS and pconn, and -// upgrades pconn to a *linuxBatchingConn if appropriate. -func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { +// TryUpgradeToConn probes the capabilities of the OS and pconn, and upgrades +// pconn to a [Conn] if appropriate. A batch size of MinReadBatchMsgsLen() is +// suggested for the best performance. +func TryUpgradeToConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { if runtime.GOOS != "linux" { // Exclude Android. return pconn @@ -415,7 +408,7 @@ func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSiz return pconn } b := &linuxBatchingConn{ - pc: pconn, + pc: uc, getGSOSizeFromControl: getGSOSizeFromControl, setGSOSizeInControl: setGSOSizeInControl, sendBatchPool: sync.Pool{ @@ -449,3 +442,21 @@ func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSiz b.txOffload.Store(txOffload) return b } + +var controlMessageSize = -1 // bomb if used for allocation before init + +func init() { + // controlMessageSize is set to hold a UDP_GRO or UDP_SEGMENT control + // message. These contain a single uint16 of data. + controlMessageSize = unix.CmsgSpace(2) +} + +// MinControlMessageSize returns the minimum control message size required to +// support read batching via [Conn.ReadBatch]. +func MinControlMessageSize() int { + return controlMessageSize +} + +func MinReadBatchMsgsLen() int { + return 128 +} diff --git a/wgengine/magicsock/batching_conn_linux_test.go b/net/batching/conn_linux_test.go similarity index 89% rename from wgengine/magicsock/batching_conn_linux_test.go rename to net/batching/conn_linux_test.go index 7e0ab8fc4..e33ad6d7a 100644 --- a/wgengine/magicsock/batching_conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package batching import ( "encoding/binary" "net" "testing" + "github.com/tailscale/wireguard-go/conn" "golang.org/x/net/ipv6" "tailscale.com/net/packet" ) @@ -159,13 +160,15 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { return make([]byte, len+packet.GeneveFixedHeaderLength, cap+packet.GeneveFixedHeaderLength) } - vni1 := virtualNetworkID{} - vni1.set(1) + geneve := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + } + geneve.VNI.Set(1) cases := []struct { name string buffs [][]byte - vni virtualNetworkID + geneve packet.GeneveHeader wantLens []int wantGSO []int }{ @@ -182,7 +185,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { buffs: [][]byte{ withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{1 + packet.GeneveFixedHeaderLength}, wantGSO: []int{0}, }, @@ -201,7 +204,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(1, 2+packet.GeneveFixedHeaderLength), withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{2 + (2 * packet.GeneveFixedHeaderLength)}, wantGSO: []int{1 + packet.GeneveFixedHeaderLength}, }, @@ -220,7 +223,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(2, 3+packet.GeneveFixedHeaderLength), withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength)}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength}, }, @@ -241,7 +244,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(1, 1), withGeneveSpace(2, 2), }, - vni: vni1, + geneve: geneve, wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, }, @@ -262,7 +265,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(2, 2), withGeneveSpace(2, 2), }, - vni: vni1, + geneve: geneve, wantLens: []int{4 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, }, @@ -279,7 +282,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { msgs[i].Buffers = make([][]byte, 1) msgs[i].OOB = make([]byte, 0, 2) } - got := c.coalesceMessages(addr, tt.vni, tt.buffs, msgs, packet.GeneveFixedHeaderLength) + got := c.coalesceMessages(addr, tt.geneve, tt.buffs, msgs, packet.GeneveFixedHeaderLength) if got != len(tt.wantLens) { t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) } @@ -302,3 +305,12 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { }) } } + +func TestMinReadBatchMsgsLen(t *testing.T) { + // So long as magicsock uses [Conn], and [wireguard-go/conn.Bind] API is + // shaped for wireguard-go to control packet memory, these values should be + // aligned. + if MinReadBatchMsgsLen() != conn.IdealBatchSize { + t.Fatalf("MinReadBatchMsgsLen():%d != conn.IdealBatchSize(): %d", MinReadBatchMsgsLen(), conn.IdealBatchSize) + } +} diff --git a/net/packet/geneve.go b/net/packet/geneve.go index 29970a8fd..71b365ae8 100644 --- a/net/packet/geneve.go +++ b/net/packet/geneve.go @@ -24,6 +24,33 @@ const ( GeneveProtocolWireGuard uint16 = 0x7A12 ) +// VirtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network +// identifier. Its methods are NOT thread-safe. +type VirtualNetworkID struct { + _vni uint32 +} + +const ( + vniSetMask uint32 = 0xFF000000 + vniGetMask uint32 = ^vniSetMask +) + +// IsSet returns true if Set() had been called previously, otherwise false. +func (v *VirtualNetworkID) IsSet() bool { + return v._vni&vniSetMask != 0 +} + +// Set sets the provided VNI. If VNI exceeds the 3-byte storage it will be +// clamped. +func (v *VirtualNetworkID) Set(vni uint32) { + v._vni = vni | vniSetMask +} + +// Get returns the VNI value. +func (v *VirtualNetworkID) Get() uint32 { + return v._vni & vniGetMask +} + // GeneveHeader represents the fixed size Geneve header from RFC8926. // TLVs/options are not implemented/supported. // @@ -51,7 +78,7 @@ type GeneveHeader struct { // decisions or MAY be used as a mechanism to distinguish between // overlapping address spaces contained in the encapsulated packet when load // balancing across CPUs. - VNI uint32 + VNI VirtualNetworkID // O (1 bit): Control packet. This packet contains a control message. // Control messages are sent between tunnel endpoints. Tunnel endpoints MUST @@ -65,12 +92,18 @@ type GeneveHeader struct { Control bool } -// Encode encodes GeneveHeader into b. If len(b) < GeneveFixedHeaderLength an -// io.ErrShortBuffer error is returned. +var ErrGeneveVNIUnset = errors.New("VNI is unset") + +// Encode encodes GeneveHeader into b. If len(b) < [GeneveFixedHeaderLength] an +// [io.ErrShortBuffer] error is returned. If !h.VNI.IsSet() then an +// [ErrGeneveVNIUnset] error is returned. func (h *GeneveHeader) Encode(b []byte) error { if len(b) < GeneveFixedHeaderLength { return io.ErrShortBuffer } + if !h.VNI.IsSet() { + return ErrGeneveVNIUnset + } if h.Version > 3 { return errors.New("version must be <= 3") } @@ -81,15 +114,12 @@ func (h *GeneveHeader) Encode(b []byte) error { b[1] |= 0x80 } binary.BigEndian.PutUint16(b[2:], h.Protocol) - if h.VNI > 1<<24-1 { - return errors.New("VNI must be <= 2^24-1") - } - binary.BigEndian.PutUint32(b[4:], h.VNI<<8) + binary.BigEndian.PutUint32(b[4:], h.VNI.Get()<<8) return nil } -// Decode decodes GeneveHeader from b. If len(b) < GeneveFixedHeaderLength an -// io.ErrShortBuffer error is returned. +// Decode decodes GeneveHeader from b. If len(b) < [GeneveFixedHeaderLength] an +// [io.ErrShortBuffer] error is returned. func (h *GeneveHeader) Decode(b []byte) error { if len(b) < GeneveFixedHeaderLength { return io.ErrShortBuffer @@ -99,6 +129,6 @@ func (h *GeneveHeader) Decode(b []byte) error { h.Control = true } h.Protocol = binary.BigEndian.Uint16(b[2:]) - h.VNI = binary.BigEndian.Uint32(b[4:]) >> 8 + h.VNI.Set(binary.BigEndian.Uint32(b[4:]) >> 8) return nil } diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go index 029638638..be9784998 100644 --- a/net/packet/geneve_test.go +++ b/net/packet/geneve_test.go @@ -4,18 +4,21 @@ package packet import ( + "math" "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/types/ptr" ) func TestGeneveHeader(t *testing.T) { in := GeneveHeader{ Version: 3, Protocol: GeneveProtocolDisco, - VNI: 1<<24 - 1, Control: true, } + in.VNI.Set(1<<24 - 1) b := make([]byte, GeneveFixedHeaderLength) err := in.Encode(b) if err != nil { @@ -26,7 +29,56 @@ func TestGeneveHeader(t *testing.T) { if err != nil { t.Fatal(err) } - if diff := cmp.Diff(out, in); diff != "" { + if diff := cmp.Diff(out, in, cmpopts.EquateComparable(VirtualNetworkID{})); diff != "" { t.Fatalf("wrong results (-got +want)\n%s", diff) } } + +func TestVirtualNetworkID(t *testing.T) { + tests := []struct { + name string + set *uint32 + want uint32 + }{ + { + "don't Set", + nil, + 0, + }, + { + "Set 0", + ptr.To(uint32(0)), + 0, + }, + { + "Set 1", + ptr.To(uint32(1)), + 1, + }, + { + "Set math.MaxUint32", + ptr.To(uint32(math.MaxUint32)), + 1<<24 - 1, + }, + { + "Set max 3-byte value", + ptr.To(uint32(1<<24 - 1)), + 1<<24 - 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := VirtualNetworkID{} + if tt.set != nil { + v.Set(*tt.set) + } + if v.IsSet() != (tt.set != nil) { + t.Fatalf("IsSet: %v != wantIsSet: %v", v.IsSet(), tt.set != nil) + } + if v.Get() != tt.want { + t.Fatalf("Get(): %v != want: %v", v.Get(), tt.want) + } + }) + } +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index aece3bc59..e138c33f2 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -140,7 +140,8 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex rand.Read(e.challenge[senderIndex][:]) copy(m.Challenge[:], e.challenge[senderIndex][:]) reply := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} + gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} + gh.VNI.Set(e.vni) err = gh.Encode(reply) if err != nil { return @@ -543,7 +544,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSo // it simple (and slow) for now. s.mu.Lock() defer s.mu.Unlock() - e, ok := s.byVNI[gh.VNI] + e, ok := s.byVNI[gh.VNI.Get()] if !ok { // unknown VNI return diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index de1c29364..8fc4a4f78 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -62,7 +62,8 @@ func (c *testClient) read(t *testing.T) []byte { func (c *testClient) writeDataPkt(t *testing.T, b []byte) { pkt := make([]byte, packet.GeneveFixedHeaderLength, packet.GeneveFixedHeaderLength+len(b)) - gh := packet.GeneveHeader{Control: false, VNI: c.vni, Protocol: packet.GeneveProtocolWireGuard} + gh := packet.GeneveHeader{Control: false, Protocol: packet.GeneveProtocolWireGuard} + gh.VNI.Set(c.vni) err := gh.Encode(pkt) if err != nil { t.Fatal(err) @@ -84,7 +85,7 @@ func (c *testClient) readDataPkt(t *testing.T) []byte { if gh.Control { t.Fatal("unexpected control") } - if gh.VNI != c.vni { + if gh.VNI.Get() != c.vni { t.Fatal("unexpected vni") } return b[packet.GeneveFixedHeaderLength:] @@ -92,7 +93,8 @@ func (c *testClient) readDataPkt(t *testing.T) []byte { func (c *testClient) writeControlDiscoMsg(t *testing.T, msg disco.Message) { pkt := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: c.vni, Protocol: packet.GeneveProtocolDisco} + gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} + gh.VNI.Set(c.vni) err := gh.Encode(pkt) if err != nil { t.Fatal(err) @@ -117,7 +119,7 @@ func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { if !gh.Control { t.Fatal("unexpected non-control") } - if gh.VNI != c.vni { + if gh.VNI.Get() != c.vni { t.Fatal("unexpected vni") } b = b[packet.GeneveFixedHeaderLength:] diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index da3175b8c..9ad340c90 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -264,6 +264,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/wgengine/magicsock/batching_conn.go b/wgengine/magicsock/batching_conn.go deleted file mode 100644 index b769907db..000000000 --- a/wgengine/magicsock/batching_conn.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package magicsock - -import ( - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" - "tailscale.com/types/nettype" -) - -var ( - // This acts as a compile-time check for our usage of ipv6.Message in - // batchingConn for both IPv6 and IPv4 operations. - _ ipv6.Message = ipv4.Message{} -) - -// batchingConn is a nettype.PacketConn that provides batched i/o. -type batchingConn interface { - nettype.PacketConn - ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) - WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error -} diff --git a/wgengine/magicsock/batching_conn_default.go b/wgengine/magicsock/batching_conn_default.go deleted file mode 100644 index 519cf8082..000000000 --- a/wgengine/magicsock/batching_conn_default.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux - -package magicsock - -import ( - "tailscale.com/types/nettype" -) - -func tryUpgradeToBatchingConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { - return pconn -} diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index cfdf8c1e1..a0159d21e 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -152,7 +152,7 @@ func printEndpointHTML(w io.Writer, ep *endpoint) { io.WriteString(w, "

                Endpoints:

                  ") for _, ipp := range eps { s := ep.endpointState[ipp] - if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.isSet() { + if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.IsSet() { fmt.Fprintf(w, "
                • %s: (best)
                    ", ipp) } else { fmt.Fprintf(w, "
                  • %s: ...
                      ", ipp) @@ -208,7 +208,7 @@ func epAddrLess(a, b epAddr) bool { return v < 0 } if a.ap.Port() == b.ap.Port() { - return a.vni.get() < b.vni.get() + return a.vni.Get() < b.vni.Get() } return a.ap.Port() < b.ap.Port() } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 6381b0210..951e59011 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -108,7 +108,7 @@ func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { defer de.mu.Unlock() now := mono.Now() curBestAddrTrusted := now.Before(de.trustBestAddrUntil) - sameRelayServer := de.bestAddr.vni.isSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 + sameRelayServer := de.bestAddr.vni.IsSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 if !curBestAddrTrusted || sameRelayServer || @@ -1070,7 +1070,7 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { switch { case udpAddr.ap.Addr().Is4(): - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { de.c.metrics.outboundPacketsPeerRelayIPv4Total.Add(int64(len(buffs))) de.c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(txBytes)) } else { @@ -1078,7 +1078,7 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) } case udpAddr.ap.Addr().Is6(): - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { de.c.metrics.outboundPacketsPeerRelayIPv6Total.Add(int64(len(buffs))) de.c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(txBytes)) } else { @@ -1160,7 +1160,7 @@ func (de *endpoint) discoPingTimeout(txid stun.TxID) { return } bestUntrusted := mono.Now().After(de.trustBestAddrUntil) - if sp.to == de.bestAddr.epAddr && sp.to.vni.isSet() && bestUntrusted { + if sp.to == de.bestAddr.epAddr && sp.to.vni.IsSet() && bestUntrusted { // TODO(jwhited): consider applying this to direct UDP paths as well de.clearBestAddrLocked() } @@ -1274,7 +1274,7 @@ func (de *endpoint) startDiscoPingLocked(ep epAddr, now mono.Time, purpose disco return } if purpose != pingCLI && - !ep.vni.isSet() { // de.endpointState is only relevant for direct/non-vni epAddr's + !ep.vni.IsSet() { // de.endpointState is only relevant for direct/non-vni epAddr's st, ok := de.endpointState[ep.ap] if !ok { // Shouldn't happen. But don't ping an endpoint that's @@ -1610,7 +1610,7 @@ func (de *endpoint) noteBadEndpoint(udpAddr epAddr) { de.clearBestAddrLocked() - if !udpAddr.vni.isSet() { + if !udpAddr.vni.IsSet() { if st, ok := de.endpointState[udpAddr.ap]; ok { st.clear() } @@ -1644,7 +1644,7 @@ func pingSizeToPktLen(size int, udpAddr epAddr) tstun.WireMTU { headerLen = ipv6.HeaderLen } headerLen += 8 // UDP header length - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { headerLen += packet.GeneveFixedHeaderLength } return tstun.WireMTU(size + headerLen) @@ -1699,7 +1699,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd now := mono.Now() latency := now.Sub(sp.at) - if !isDerp && !src.vni.isSet() { + if !isDerp && !src.vni.IsSet() { // Note: we check vni.isSet() as relay [epAddr]'s are not stored in // endpointState, they are either de.bestAddr or not. st, ok := de.endpointState[sp.to.ap] @@ -1748,7 +1748,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // we don't clear direct UDP paths on disco ping timeout (see // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { - if src.vni.isSet() { + if src.vni.IsSet() { // This would be unexpected. Switching to a Geneve-encapsulated // path should only happen in de.relayEndpointReady(). de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) @@ -1778,23 +1778,23 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd } // epAddr is a [netip.AddrPort] with an optional Geneve header (RFC8926) -// [virtualNetworkID]. +// [packet.VirtualNetworkID]. type epAddr struct { - ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set - vni virtualNetworkID // vni.isSet() indicates if this [epAddr] involves a Geneve header + ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set + vni packet.VirtualNetworkID // vni.IsSet() indicates if this [epAddr] involves a Geneve header } // isDirect returns true if e.ap is valid and not tailcfg.DerpMagicIPAddr, // and a VNI is not set. func (e epAddr) isDirect() bool { - return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.isSet() + return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.IsSet() } func (e epAddr) String() string { - if !e.vni.isSet() { + if !e.vni.IsSet() { return e.ap.String() } - return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.get()) + return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.Get()) } // addrQuality is an [epAddr], an optional [key.DiscoPublic] if a relay server @@ -1833,10 +1833,10 @@ func betterAddr(a, b addrQuality) bool { // Geneve-encapsulated paths (UDP relay servers) are lower preference in // relation to non. - if !a.vni.isSet() && b.vni.isSet() { + if !a.vni.IsSet() && b.vni.IsSet() { return true } - if a.vni.isSet() && !b.vni.isSet() { + if a.vni.IsSet() && !b.vni.IsSet() { return false } @@ -1982,7 +1982,7 @@ func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { ps.Active = now.Sub(de.lastSendExt) < sessionActiveTimeout if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.ap.IsValid() && !derpAddr.IsValid() { - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { ps.PeerRelay = udpAddr.String() } else { ps.CurAddr = udpAddr.String() diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 92f4ef1d3..666d86231 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -327,24 +328,24 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { } func Test_epAddr_isDirectUDP(t *testing.T) { - vni := virtualNetworkID{} - vni.set(7) + vni := packet.VirtualNetworkID{} + vni.Set(7) tests := []struct { name string ap netip.AddrPort - vni virtualNetworkID + vni packet.VirtualNetworkID want bool }{ { name: "true", ap: netip.MustParseAddrPort("192.0.2.1:7"), - vni: virtualNetworkID{}, + vni: packet.VirtualNetworkID{}, want: true, }, { name: "false derp magic addr", ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, 0), - vni: virtualNetworkID{}, + vni: packet.VirtualNetworkID{}, want: false, }, { @@ -370,7 +371,7 @@ func Test_epAddr_isDirectUDP(t *testing.T) { func Test_endpoint_udpRelayEndpointReady(t *testing.T) { directAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.1:7")}} peerRelayAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.2:77")}, latency: time.Second} - peerRelayAddrQuality.vni.set(1) + peerRelayAddrQuality.vni.Set(1) peerRelayAddrQualityHigherLatencySameServer := addrQuality{ epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, latency: peerRelayAddrQuality.latency * 10, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0fac793ef..a99a0a8e3 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -36,6 +36,7 @@ import ( "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" + "tailscale.com/net/batching" "tailscale.com/net/connstats" "tailscale.com/net/netcheck" "tailscale.com/net/neterror" @@ -626,7 +627,7 @@ func newConn(logf logger.Logf) *Conn { msgs := make([]ipv6.Message, c.bind.BatchSize()) for i := range msgs { msgs[i].Buffers = make([][]byte, 1) - msgs[i].OOB = make([]byte, controlMessageSize) + msgs[i].OOB = make([]byte, batching.MinControlMessageSize()) } batch := &receiveBatch{ msgs: msgs, @@ -1206,7 +1207,7 @@ func (c *Conn) Ping(peer tailcfg.NodeView, res *ipnstate.PingResult, size int, c func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep epAddr) { res.LatencySeconds = latency.Seconds() if ep.ap.Addr() != tailcfg.DerpMagicIPAddr { - if ep.vni.isSet() { + if ep.vni.IsSet() { res.PeerRelay = ep.String() } else { res.Endpoint = ep.String() @@ -1473,9 +1474,9 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint, offset int) (err error) { // deemed "under handshake load" and ends up transmitting a cookie reply // using the received [conn.Endpoint] in [device.SendHandshakeCookie]. if ep.src.ap.Addr().Is6() { - return c.pconn6.WriteBatchTo(buffs, ep.src, offset) + return c.pconn6.WriteWireGuardBatchTo(buffs, ep.src, offset) } - return c.pconn4.WriteBatchTo(buffs, ep.src, offset) + return c.pconn4.WriteWireGuardBatchTo(buffs, ep.src, offset) } return nil } @@ -1498,9 +1499,9 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, panic("bogus sendUDPBatch addr type") } if isIPv6 { - err = c.pconn6.WriteBatchTo(buffs, addr, offset) + err = c.pconn6.WriteWireGuardBatchTo(buffs, addr, offset) } else { - err = c.pconn4.WriteBatchTo(buffs, addr, offset) + err = c.pconn4.WriteWireGuardBatchTo(buffs, addr, offset) } if err != nil { var errGSO neterror.ErrUDPGSODisabled @@ -1793,7 +1794,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach c.logf("[unexpected] geneve header decoding error: %v", err) return nil, 0, false, false } - src.vni.set(geneve.VNI) + src.vni = geneve.VNI } switch pt { case packetLooksLikeDisco: @@ -1825,7 +1826,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // geneveInclusivePacketLen holds the packet length prior to any potential // Geneve header stripping. geneveInclusivePacketLen := len(b) - if src.vni.isSet() { + if src.vni.IsSet() { // Strip away the Geneve header before returning the packet to // wireguard-go. // @@ -1858,7 +1859,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach if stats := c.stats.Load(); stats != nil { stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) } - if src.vni.isSet() && (connNoted || looksLikeInitiationMsg(b)) { + if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who // we believe for all initiation messages, otherwise we could get // unlucky and fail to JIT configure the "correct" peer. @@ -1887,33 +1888,6 @@ const ( // speeds. var debugIPv4DiscoPingPenalty = envknob.RegisterDuration("TS_DISCO_PONG_IPV4_DELAY") -// virtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network -// identifier. Its field must only ever be accessed via its methods. -type virtualNetworkID struct { - _vni uint32 -} - -const ( - vniSetMask uint32 = 0xFF000000 - vniGetMask uint32 = ^vniSetMask -) - -// isSet returns true if set() had been called previously, otherwise false. -func (v *virtualNetworkID) isSet() bool { - return v._vni&vniSetMask != 0 -} - -// set sets the provided VNI. If VNI exceeds the 3-byte storage it will be -// clamped. -func (v *virtualNetworkID) set(vni uint32) { - v._vni = vni | vniSetMask -} - -// get returns the VNI value. -func (v *virtualNetworkID) get() uint32 { - return v._vni & vniGetMask -} - // sendDiscoAllocateUDPRelayEndpointRequest is primarily an alias for // sendDiscoMessage, but it will alternatively send m over the eventbus if dst // is a DERP IP:port, and dstKey is self. This saves a round-trip through DERP @@ -1981,11 +1955,11 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. c.mu.Unlock() pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. - if dst.vni.isSet() { + if dst.vni.IsSet() { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: dst.vni.get(), + VNI: dst.vni, Control: isRelayHandshakeMsg, } pkt = append(pkt, make([]byte, packet.GeneveFixedHeaderLength)...) @@ -2006,7 +1980,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) const isDisco = true - sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.isSet()) + sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.IsSet()) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" @@ -2294,7 +2268,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } return true }) - if !knownTxID && src.vni.isSet() { + if !knownTxID && src.vni.IsSet() { // If it's an unknown TxID, and it's Geneve-encapsulated, then // make [relayManager] aware. It might be in the middle of probing // src. @@ -2512,7 +2486,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN di.lastPingTime = time.Now() isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr - if src.vni.isSet() { + if src.vni.IsSet() { if isDerp { c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) return diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 7614c64c9..4922f2c09 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -21,7 +21,3 @@ func (c *Conn) listenRawDisco(family string) (io.Closer, error) { func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { portableTrySetSocketBuffer(pconn, logf) } - -const ( - controlMessageSize = 0 -) diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index 070380029..3369bcb89 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -516,11 +516,3 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { } } } - -var controlMessageSize = -1 // bomb if used for allocation before init - -func init() { - // controlMessageSize is set to hold a UDP_GRO or UDP_SEGMENT control - // message. These contain a single uint16 of data. - controlMessageSize = unix.CmsgSpace(2) -} diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 9399dab32..5e348b02b 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "math" "math/rand" "net" "net/http" @@ -1787,7 +1786,7 @@ func TestBetterAddr(t *testing.T) { } avl := func(ipps string, vni uint32, d time.Duration) addrQuality { q := al(ipps, d) - q.vni.set(vni) + q.vni.Set(vni) return q } zero := addrQuality{} @@ -3178,9 +3177,9 @@ func Test_packetLooksLike(t *testing.T) { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err := gh.Encode(geneveEncapDisco) if err != nil { t.Fatal(err) @@ -3200,9 +3199,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolWireGuard, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapWireGuard) if err != nil { t.Fatal(err) @@ -3213,9 +3212,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 1, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveVersion) if err != nil { t.Fatal(err) @@ -3226,9 +3225,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveReservedBits) if err != nil { t.Fatal(err) @@ -3240,9 +3239,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveVNILSB) if err != nil { t.Fatal(err) @@ -3342,55 +3341,6 @@ func Test_packetLooksLike(t *testing.T) { } } -func Test_virtualNetworkID(t *testing.T) { - tests := []struct { - name string - set *uint32 - want uint32 - }{ - { - "don't set", - nil, - 0, - }, - { - "set 0", - ptr.To(uint32(0)), - 0, - }, - { - "set 1", - ptr.To(uint32(1)), - 1, - }, - { - "set math.MaxUint32", - ptr.To(uint32(math.MaxUint32)), - 1<<24 - 1, - }, - { - "set max 3-byte value", - ptr.To(uint32(1<<24 - 1)), - 1<<24 - 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := virtualNetworkID{} - if tt.set != nil { - v.set(*tt.set) - } - if v.isSet() != (tt.set != nil) { - t.Fatalf("isSet: %v != wantIsSet: %v", v.isSet(), tt.set != nil) - } - if v.get() != tt.want { - t.Fatalf("get(): %v != want: %v", v.get(), tt.want) - } - }) - } -} - func Test_looksLikeInitiationMsg(t *testing.T) { // initMsg was captured as the first packet from a WireGuard "session" initMsg, err := hex.DecodeString("01000000d9205f67915a500e377b409e0c3d97ca91e68654b95952de965e75df491000cce00632678cd9e8c8525556aa8daf24e6cfc44c48812bb560ff3c1c5dee061b3f833dfaa48acf13b64bd1e0027aa4d977a3721b82fd6072338702fc3193651404980ad46dae2869ba6416cc0eb38621a4140b5b918eb6402b697202adb3002a6d00000000000000000000000000000000") @@ -3772,6 +3722,7 @@ func TestConn_receiveIP(t *testing.T) { gh := packet.GeneveHeader{ Protocol: packet.GeneveProtocolDisco, } + gh.VNI.Set(1) err := gh.Encode(looksLikeGeneveDisco) if err != nil { t.Fatal(err) @@ -3796,10 +3747,8 @@ func TestConn_receiveIP(t *testing.T) { looksLikeGeneveWireGuardInit := make([]byte, packet.GeneveFixedHeaderLength+device.MessageInitiationSize) gh = packet.GeneveHeader{ Protocol: packet.GeneveProtocolWireGuard, - VNI: 1, } - vni := virtualNetworkID{} - vni.set(gh.VNI) + gh.VNI.Set(1) err = gh.Encode(looksLikeGeneveWireGuardInit) if err != nil { t.Fatal(err) @@ -3922,7 +3871,7 @@ func TestConn_receiveIP(t *testing.T) { ipp: netip.MustParseAddrPort("127.0.0.1:7777"), cache: &epAddrEndpointCache{}, insertWantEndpointTypeInPeerMap: true, - peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: gh.VNI}, wantEndpointType: &lazyEndpoint{ maybeEP: newPeerMapInsertableEndpoint(0), }, @@ -3938,7 +3887,7 @@ func TestConn_receiveIP(t *testing.T) { ipp: netip.MustParseAddrPort("127.0.0.1:7777"), cache: &epAddrEndpointCache{}, insertWantEndpointTypeInPeerMap: true, - peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: gh.VNI}, wantEndpointType: &lazyEndpoint{ maybeEP: newPeerMapInsertableEndpoint(mono.Now().Add(time.Hour * 24)), }, diff --git a/wgengine/magicsock/peermap.go b/wgengine/magicsock/peermap.go index 838905396..136353563 100644 --- a/wgengine/magicsock/peermap.go +++ b/wgengine/magicsock/peermap.go @@ -184,12 +184,12 @@ func (m *peerMap) setNodeKeyForEpAddr(addr epAddr, nk key.NodePublic) { if pi := m.byEpAddr[addr]; pi != nil { delete(pi.epAddrs, addr) delete(m.byEpAddr, addr) - if addr.vni.isSet() { + if addr.vni.IsSet() { delete(m.relayEpAddrByNodeKey, pi.ep.publicKey) } } if pi, ok := m.byNodeKey[nk]; ok { - if addr.vni.isSet() { + if addr.vni.IsSet() { relay, ok := m.relayEpAddrByNodeKey[nk] if ok { delete(pi.epAddrs, relay) diff --git a/wgengine/magicsock/peermap_test.go b/wgengine/magicsock/peermap_test.go index 52504272f..171e22a6d 100644 --- a/wgengine/magicsock/peermap_test.go +++ b/wgengine/magicsock/peermap_test.go @@ -7,6 +7,7 @@ import ( "net/netip" "testing" + "tailscale.com/net/packet" "tailscale.com/types/key" ) @@ -20,8 +21,8 @@ func Test_peerMap_oneRelayEpAddrPerNK(t *testing.T) { ed := &endpointDisco{key: key.NewDisco().Public()} ep.disco.Store(ed) pm.upsertEndpoint(ep, key.DiscoPublic{}) - vni := virtualNetworkID{} - vni.set(1) + vni := packet.VirtualNetworkID{} + vni.Set(1) relayEpAddrA := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:1"), vni: vni} relayEpAddrB := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:2"), vni: vni} pm.setNodeKeyForEpAddr(relayEpAddrA, nk) diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 8b9ad4bb0..2798abbf2 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -13,6 +13,7 @@ import ( "syscall" "golang.org/x/net/ipv6" + "tailscale.com/net/batching" "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/types/nettype" @@ -42,7 +43,7 @@ type RebindingUDPConn struct { // disrupting surrounding code that assumes nettype.PacketConn is a // *net.UDPConn. func (c *RebindingUDPConn) setConnLocked(p nettype.PacketConn, network string, batchSize int) { - upc := tryUpgradeToBatchingConn(p, network, batchSize) + upc := batching.TryUpgradeToConn(p, network, batchSize) c.pconn = upc c.pconnAtomic.Store(&upc) c.port = uint16(c.localAddrLocked().Port) @@ -72,25 +73,27 @@ func (c *RebindingUDPConn) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, e return c.readFromWithInitPconn(*c.pconnAtomic.Load(), b) } -// WriteBatchTo writes buffs to addr. -func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { +// WriteWireGuardBatchTo writes buffs to addr. It serves primarily as an alias +// for [batching.Conn.WriteBatchTo], with fallback to single packet operations +// if c.pconn is not a [batching.Conn]. +// +// WriteWireGuardBatchTo assumes buffs are WireGuard packets, which is notable +// for Geneve encapsulation: Geneve protocol is set to [packet.GeneveProtocolWireGuard], +// and the control bit is left unset. +func (c *RebindingUDPConn) WriteWireGuardBatchTo(buffs [][]byte, addr epAddr, offset int) error { if offset != packet.GeneveFixedHeaderLength { - return fmt.Errorf("RebindingUDPConn.WriteBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + return fmt.Errorf("RebindingUDPConn.WriteWireGuardBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + } + gh := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + VNI: addr.vni, } for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(batchingConn) + b, ok := pconn.(batching.Conn) if !ok { - vniIsSet := addr.vni.isSet() - var gh packet.GeneveHeader - if vniIsSet { - gh = packet.GeneveHeader{ - Protocol: packet.GeneveProtocolWireGuard, - VNI: addr.vni.get(), - } - } for _, buf := range buffs { - if vniIsSet { + if gh.VNI.IsSet() { gh.Encode(buf) } else { buf = buf[offset:] @@ -102,7 +105,7 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) } return nil } - err := b.WriteBatchTo(buffs, addr, offset) + err := b.WriteBatchTo(buffs, addr.ap, gh, offset) if err != nil { if pconn != c.currentConn() { continue @@ -113,13 +116,12 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) } } -// ReadBatch reads messages from c into msgs. It returns the number of messages -// the caller should evaluate for nonzero len, as a zero len message may fall -// on either side of a nonzero. +// ReadBatch is an alias for [batching.Conn.ReadBatch] with fallback to single +// packet operations if c.pconn is not a [batching.Conn]. func (c *RebindingUDPConn) ReadBatch(msgs []ipv6.Message, flags int) (int, error) { for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(batchingConn) + b, ok := pconn.(batching.Conn) if !ok { n, ap, err := c.readFromWithInitPconn(pconn, msgs[0].Buffers[0]) if err == nil { diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index ad8c5fc76..8a1a4fcf5 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -11,6 +11,7 @@ import ( "time" "tailscale.com/disco" + "tailscale.com/net/packet" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/tailcfg" @@ -384,7 +385,7 @@ func (r *relayManager) handleRxDiscoMsg(conn *Conn, dm disco.Message, relayServe relayServerNodeKey: relayServerNodeKey, disco: discoKey, from: src.ap, - vni: src.vni.get(), + vni: src.vni.Get(), at: time.Now(), }) } @@ -535,8 +536,8 @@ func (r *relayManager) handleRxDiscoMsgRunLoop(event relayDiscoMsgEvent) { // socket on Linux. We make no such efforts here as the raw socket BPF // program does not support Geneve-encapsulated disco, and is also // disabled by default. - vni := virtualNetworkID{} - vni.set(event.vni) + vni := packet.VirtualNetworkID{} + vni.Set(event.vni) go event.conn.sendDiscoMessage(epAddr{ap: event.from, vni: vni}, key.NodePublic{}, event.disco, &disco.Pong{ TxID: msg.TxID, Src: event.from, @@ -622,8 +623,8 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak return } // This relay endpoint is functional. - vni := virtualNetworkID{} - vni.set(done.work.se.VNI) + vni := packet.VirtualNetworkID{} + vni.Set(done.work.se.VNI) addr := epAddr{ap: done.pongReceivedFrom, vni: vni} // ep.udpRelayEndpointReady() must be called in a new goroutine to prevent // deadlocks as it acquires [endpoint] & [Conn] mutexes. See [relayManager] @@ -784,8 +785,8 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat bind := &disco.BindUDPRelayEndpoint{ BindUDPRelayEndpointCommon: common, } - vni := virtualNetworkID{} - vni.set(work.se.VNI) + vni := packet.VirtualNetworkID{} + vni.Set(work.se.VNI) for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true From 0f7facfeee1bde318f214b9882349f80fa02d582 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 13 Aug 2025 13:49:27 -0700 Subject: [PATCH 1186/1708] control/controlclient: fix data race on tkaHead (#16855) Grab a copy under mutex in sendMapRequest. Updates #cleanup Signed-off-by: Andrew Lytvynov --- control/controlclient/direct.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 4c9b04ce9..78a86e935 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -856,6 +856,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap hi := c.hostInfoLocked() backendLogID := hi.BackendLogID connectionHandleForTest := c.connectionHandleForTest + tkaHead := c.tkaHead var epStrs []string var eps []netip.AddrPort var epTypes []tailcfg.EndpointType @@ -906,7 +907,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap Hostinfo: hi, DebugFlags: c.debugFlags, OmitPeers: nu == nil, - TKAHead: c.tkaHead, + TKAHead: tkaHead, ConnectionHandleForTest: connectionHandleForTest, } var extraDebugFlags []string From e4d2822afcf4e0a2a3ffd6cf54ac256d8291c10f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 13 Aug 2025 17:19:32 -0700 Subject: [PATCH 1187/1708] go.toolchain.rev: bump Go for data race in Go http client Updates golang/go#73522 Updates tailscale/go#131 Updates tailscale/corp#31133 Change-Id: Ibb7a98944ef287d455ce4f5d202b2e2bd6d8742b Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index fa951ac1b..6e3bd7ff9 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -606f294beebf9df5754804710cd5e16d30532692 +54f31cd8fc7b3d7d87c1ea455c8bb4b33372f706 From 5402620db804a8f88cc5eb249b3fc3802310012d Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 14 Aug 2025 07:19:20 -0700 Subject: [PATCH 1188/1708] net/tshttpproxy: add macOS support for system proxy (#16826) Adds a setter for proxyFunc to allow macOS to pull defined system proxies. Disallows overriding if proxyFunc is set via config. Updates tailscale/corp#30668 Signed-off-by: Will Hannah --- net/tshttpproxy/tshttpproxy.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/net/tshttpproxy/tshttpproxy.go b/net/tshttpproxy/tshttpproxy.go index 2ca440b57..ab2fd39e3 100644 --- a/net/tshttpproxy/tshttpproxy.go +++ b/net/tshttpproxy/tshttpproxy.go @@ -38,6 +38,23 @@ var ( proxyFunc func(*url.URL) (*url.URL, error) ) +// SetProxyFunc can be used by clients to set a platform-specific function for proxy resolution. +// If config is set when this function is called, an error will be returned. +// The provided function should return a proxy URL for the given request URL, +// nil if no proxy is enabled for the request URL, or an error if proxy settings cannot be resolved. +func SetProxyFunc(fn func(*url.URL) (*url.URL, error)) error { + mu.Lock() + defer mu.Unlock() + + // Allow override only if config is not set + if config != nil { + return fmt.Errorf("tshttpproxy: SetProxyFunc can only be called when config is not set") + } + + proxyFunc = fn + return nil +} + func getProxyFunc() func(*url.URL) (*url.URL, error) { // Create config/proxyFunc if it's not created mu.Lock() From 819db6759cd5087c47ba83598be5f67c936ab156 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Thu, 14 Aug 2025 14:02:19 +0100 Subject: [PATCH 1189/1708] tka: block key addition when the max number of keys is reached Updates #16607 Signed-off-by: Anton Tolchanov --- tka/builder.go | 5 +++++ tka/builder_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/tka/builder.go b/tka/builder.go index c14ba2330..ec38bb6fa 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -67,6 +67,11 @@ func (b *UpdateBuilder) AddKey(key Key) error { if _, err := b.state.GetKey(keyID); err == nil { return fmt.Errorf("cannot add key %v: already exists", key) } + + if len(b.state.Keys) >= maxKeys { + return fmt.Errorf("cannot add key %v: maximum number of keys reached", key) + } + return b.mkUpdate(AUM{MessageKind: AUMAddKey, Key: &key}) } diff --git a/tka/builder_test.go b/tka/builder_test.go index 3dbd4347a..52907186b 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -58,6 +58,50 @@ func TestAuthorityBuilderAddKey(t *testing.T) { t.Errorf("could not read new key: %v", err) } } +func TestAuthorityBuilderMaxKey(t *testing.T) { + pub, priv := testingKey25519(t, 1) + key := Key{Kind: Key25519, Public: pub, Votes: 2} + + storage := &Mem{} + a, _, err := Create(storage, State{ + Keys: []Key{key}, + DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, + }, signer25519(priv)) + if err != nil { + t.Fatalf("Create() failed: %v", err) + } + + for i := 0; i <= maxKeys; i++ { + pub2, _ := testingKey25519(t, int64(2+i)) + key2 := Key{Kind: Key25519, Public: pub2, Votes: 1} + + b := a.NewUpdater(signer25519(priv)) + err := b.AddKey(key2) + if i < maxKeys-1 { + if err != nil { + t.Fatalf("AddKey(%v) failed: %v", key2, err) + } + } else { + // Too many keys. + if err == nil { + t.Fatalf("AddKey(%v) succeeded unexpectedly", key2) + } + continue + } + + updates, err := b.Finalize(storage) + if err != nil { + t.Fatalf("Finalize() failed: %v", err) + } + + if err := a.Inform(storage, updates); err != nil { + t.Fatalf("could not apply generated updates: %v", err) + } + if _, err := a.state.GetKey(key2.MustID()); err != nil { + t.Errorf("could not read new key: %v", err) + } + } +} func TestAuthorityBuilderRemoveKey(t *testing.T) { pub, priv := testingKey25519(t, 1) From c083a9b05330372aa0435f4c89fb1784c826f9bb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 14 Aug 2025 10:48:06 -0700 Subject: [PATCH 1190/1708] net/batching: fix compile-time assert (#16864) Updates #cleanup Signed-off-by: Jordan Whited --- net/batching/conn_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 0416c2729..09a80ed9f 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -45,7 +45,7 @@ type xnetBatchWriter interface { var ( // [linuxBatchingConn] implements [Conn]. - _ Conn = &linuxBatchingConn{} + _ Conn = (*linuxBatchingConn)(nil) ) // linuxBatchingConn is a UDP socket that provides batched i/o. It implements From fbb91758ac41d279bf67103d204690ba8520afa2 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 14 Aug 2025 13:46:48 -0700 Subject: [PATCH 1191/1708] cmd/viewer, types/views: implement support for json/v2 (#16852) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for having every viewer type implement jsonv2.MarshalerTo and jsonv2.UnmarshalerFrom. This provides a significant boost in performance as the json package no longer needs to validate the entirety of the JSON value outputted by MarshalJSON, nor does it need to identify the boundaries of a JSON value in order to call UnmarshalJSON. For deeply nested and recursive MarshalJSON or UnmarshalJSON calls, this can improve runtime from O(N²) to O(N). This still references "github.com/go-json-experiment/json" instead of the experimental "encoding/json/v2" package now available in Go 1.25 under goexperiment.jsonv2 so that code still builds without the experiment tag. Of note, the "github.com/go-json-experiment/json" package aliases the standard library under the right build conditions. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/cloner/cloner.go | 12 +- cmd/stund/depaware.txt | 2 +- cmd/viewer/tests/tests_view.go | 288 +++++++++- cmd/viewer/viewer.go | 40 +- cmd/viewer/viewer_test.go | 12 +- drive/drive_view.go | 30 +- go.mod | 2 +- go.sum | 4 +- ipn/ipn_view.go | 186 ++++++- tailcfg/tailcfg_view.go | 524 ++++++++++++++++-- types/dnstype/dnstype_view.go | 30 +- types/persist/persist_view.go | 30 +- .../prefs/prefs_example/prefs_example_view.go | 82 ++- types/prefs/prefs_view_test.go | 135 ++++- types/views/views.go | 166 ++++-- types/views/views_test.go | 92 ++- util/codegen/codegen.go | 29 +- 17 files changed, 1463 insertions(+), 201 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index a1ffc30fe..15a808141 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -136,13 +136,13 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("if src.%s[i] == nil { dst.%s[i] = nil } else {", fname, fname) if codegen.ContainsPointers(ptr.Elem()) { if _, isIface := ptr.Elem().Underlying().(*types.Interface); isIface { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\tdst.%s[i] = ptr.To((*src.%s[i]).Clone())", fname, fname) } else { writef("\tdst.%s[i] = src.%s[i].Clone()", fname, fname) } } else { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\tdst.%s[i] = ptr.To(*src.%s[i])", fname, fname) } writef("}") @@ -165,7 +165,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("dst.%s = src.%s.Clone()", fname, fname) continue } - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("if dst.%s != nil {", fname) if _, isIface := base.Underlying().(*types.Interface); isIface && hasPtrs { writef("\tdst.%s = ptr.To((*src.%s).Clone())", fname, fname) @@ -197,13 +197,13 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname) if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { if _, isIface := base.(*types.Interface); isIface { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname) } else { writef("\t\t\tdst.%s[k] = v.Clone()", fname) } } else { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname) } writef("}") @@ -224,7 +224,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t}") writef("}") } else { - it.Import("maps") + it.Import("", "maps") writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } case *types.Interface: diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index d389d59a3..8e4db75ae 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -2,7 +2,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index f1d8f424f..bc95fea01 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -6,10 +6,12 @@ package tests import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/views" ) @@ -44,8 +46,17 @@ func (v StructWithPtrsView) AsStruct() *StructWithPtrs { return v.ж.Clone() } -func (v StructWithPtrsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithPtrsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithPtrsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -54,7 +65,20 @@ func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithPtrs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithPtrsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithPtrs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -108,8 +132,17 @@ func (v StructWithoutPtrsView) AsStruct() *StructWithoutPtrs { return v.ж.Clone() } -func (v StructWithoutPtrsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithoutPtrsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithoutPtrsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithoutPtrsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -118,7 +151,20 @@ func (v *StructWithoutPtrsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithoutPtrs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithoutPtrsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithoutPtrs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -162,8 +208,17 @@ func (v MapView) AsStruct() *Map { return v.ж.Clone() } -func (v MapView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v MapView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v MapView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *MapView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -172,7 +227,20 @@ func (v *MapView) UnmarshalJSON(b []byte) error { return nil } var x Map - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *MapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Map + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -268,8 +336,17 @@ func (v StructWithSlicesView) AsStruct() *StructWithSlices { return v.ж.Clone() } -func (v StructWithSlicesView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithSlicesView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithSlicesView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithSlicesView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -278,7 +355,20 @@ func (v *StructWithSlicesView) UnmarshalJSON(b []byte) error { return nil } var x StructWithSlices - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithSlicesView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithSlices + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -342,8 +432,17 @@ func (v StructWithEmbeddedView) AsStruct() *StructWithEmbedded { return v.ж.Clone() } -func (v StructWithEmbeddedView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithEmbeddedView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithEmbeddedView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithEmbeddedView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -352,7 +451,20 @@ func (v *StructWithEmbeddedView) UnmarshalJSON(b []byte) error { return nil } var x StructWithEmbedded - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithEmbeddedView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithEmbedded + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -398,8 +510,17 @@ func (v GenericIntStructView[T]) AsStruct() *GenericIntStruct[T] { return v.ж.Clone() } -func (v GenericIntStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericIntStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericIntStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -408,7 +529,20 @@ func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x GenericIntStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericIntStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericIntStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -470,8 +604,17 @@ func (v GenericNoPtrsStructView[T]) AsStruct() *GenericNoPtrsStruct[T] { return v.ж.Clone() } -func (v GenericNoPtrsStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericNoPtrsStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericNoPtrsStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -480,7 +623,20 @@ func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x GenericNoPtrsStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericNoPtrsStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericNoPtrsStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -542,8 +698,17 @@ func (v GenericCloneableStructView[T, V]) AsStruct() *GenericCloneableStruct[T, return v.ж.Clone() } -func (v GenericCloneableStructView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericCloneableStructView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericCloneableStructView[T, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericCloneableStructView[T, V]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -552,7 +717,20 @@ func (v *GenericCloneableStructView[T, V]) UnmarshalJSON(b []byte) error { return nil } var x GenericCloneableStruct[T, V] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericCloneableStructView[T, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericCloneableStruct[T, V] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -617,8 +795,17 @@ func (v StructWithContainersView) AsStruct() *StructWithContainers { return v.ж.Clone() } -func (v StructWithContainersView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithContainersView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithContainersView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -627,7 +814,20 @@ func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { return nil } var x StructWithContainers - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithContainersView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithContainers + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -689,8 +889,17 @@ func (v StructWithTypeAliasFieldsView) AsStruct() *StructWithTypeAliasFields { return v.ж.Clone() } -func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithTypeAliasFieldsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -699,7 +908,20 @@ func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithTypeAliasFields - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithTypeAliasFieldsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithTypeAliasFields + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -787,10 +1009,17 @@ func (v GenericTypeAliasStructView[T, T2, V2]) AsStruct() *GenericTypeAliasStruc return v.ж.Clone() } +// MarshalJSON implements [jsonv1.Marshaler]. func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSON() ([]byte, error) { - return json.Marshal(v.ж) + return jsonv1.Marshal(v.ж) } +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -799,7 +1028,20 @@ func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { return nil } var x GenericTypeAliasStruct[T, T2, V2] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericTypeAliasStruct[T, T2, V2] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 2d30cc2eb..a9617ac10 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -49,8 +49,17 @@ func (v {{.ViewName}}{{.TypeParamNames}}) AsStruct() *{{.StructName}}{{.TypePara return v.ж.Clone() } -func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -59,10 +68,23 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { return nil } var x {{.StructName}}{{.TypeParamNames}} - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x {{.StructName}}{{.TypeParamNames}} + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } - v.ж=&x + v.ж = &x return nil } @@ -125,8 +147,10 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if !ok || codegen.IsViewType(t) { return } - it.Import("encoding/json") - it.Import("errors") + it.Import("jsonv1", "encoding/json") + it.Import("jsonv2", "github.com/go-json-experiment/json") + it.Import("", "github.com/go-json-experiment/json/jsontext") + it.Import("", "errors") args := struct { StructName string @@ -182,11 +206,11 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * switch elem.String() { case "byte": args.FieldType = it.QualifiedName(fieldType) - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") writeTemplate("byteSliceField") default: args.FieldType = it.QualifiedName(elem) - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") shallow, deep, base := requiresCloning(elem) if deep { switch elem.Underlying().(type) { @@ -252,7 +276,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * writeTemplate("unsupportedField") continue } - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") args.MapKeyType = it.QualifiedName(key) mElem := m.Elem() var template string diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go index cd5f3d95f..d12d49655 100644 --- a/cmd/viewer/viewer_test.go +++ b/cmd/viewer/viewer_test.go @@ -20,19 +20,19 @@ func TestViewerImports(t *testing.T) { name string content string typeNames []string - wantImports []string + wantImports [][2]string }{ { name: "Map", content: `type Test struct { Map map[string]int }`, typeNames: []string{"Test"}, - wantImports: []string{"tailscale.com/types/views"}, + wantImports: [][2]string{{"", "tailscale.com/types/views"}}, }, { name: "Slice", content: `type Test struct { Slice []int }`, typeNames: []string{"Test"}, - wantImports: []string{"tailscale.com/types/views"}, + wantImports: [][2]string{{"", "tailscale.com/types/views"}}, }, } for _, tt := range tests { @@ -68,9 +68,9 @@ func TestViewerImports(t *testing.T) { genView(&output, tracker, namedType, pkg) } - for _, pkgName := range tt.wantImports { - if !tracker.Has(pkgName) { - t.Errorf("missing import %q", pkgName) + for _, pkg := range tt.wantImports { + if !tracker.Has(pkg[0], pkg[1]) { + t.Errorf("missing import %q", pkg) } } }) diff --git a/drive/drive_view.go b/drive/drive_view.go index 0f6686f24..6338705a6 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -6,9 +6,11 @@ package drive import ( - "encoding/json" + jsonv1 "encoding/json" "errors" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/views" ) @@ -42,8 +44,17 @@ func (v ShareView) AsStruct() *Share { return v.ж.Clone() } -func (v ShareView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ShareView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ShareView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ShareView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -52,7 +63,20 @@ func (v *ShareView) UnmarshalJSON(b []byte) error { return nil } var x Share - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ShareView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Share + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/go.mod b/go.mod index 28b2a764f..fba5a4f54 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.7.0 github.com/gaissmai/bart v0.18.0 - github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 + github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 diff --git a/go.sum b/go.sum index 23ca2dc9b..df5d27313 100644 --- a/go.sum +++ b/go.sum @@ -345,8 +345,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= -github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 1d31ced9d..0f0f652d1 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -6,10 +6,12 @@ package ipn import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/drive" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -48,8 +50,17 @@ func (v LoginProfileView) AsStruct() *LoginProfile { return v.ж.Clone() } -func (v LoginProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LoginProfileView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LoginProfileView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LoginProfileView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -58,7 +69,20 @@ func (v *LoginProfileView) UnmarshalJSON(b []byte) error { return nil } var x LoginProfile - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LoginProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x LoginProfile + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -114,8 +138,17 @@ func (v PrefsView) AsStruct() *Prefs { return v.ж.Clone() } -func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -124,7 +157,20 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error { return nil } var x Prefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Prefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -239,8 +285,17 @@ func (v ServeConfigView) AsStruct() *ServeConfig { return v.ж.Clone() } -func (v ServeConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ServeConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ServeConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ServeConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -249,7 +304,20 @@ func (v *ServeConfigView) UnmarshalJSON(b []byte) error { return nil } var x ServeConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ServeConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x ServeConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -323,8 +391,17 @@ func (v ServiceConfigView) AsStruct() *ServiceConfig { return v.ж.Clone() } -func (v ServiceConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ServiceConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ServiceConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -333,7 +410,20 @@ func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { return nil } var x ServiceConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ServiceConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x ServiceConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -388,8 +478,17 @@ func (v TCPPortHandlerView) AsStruct() *TCPPortHandler { return v.ж.Clone() } -func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TCPPortHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -398,7 +497,20 @@ func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error { return nil } var x TCPPortHandler - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TCPPortHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TCPPortHandler + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -446,8 +558,17 @@ func (v HTTPHandlerView) AsStruct() *HTTPHandler { return v.ж.Clone() } -func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v HTTPHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -456,7 +577,20 @@ func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error { return nil } var x HTTPHandler - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *HTTPHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x HTTPHandler + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -502,8 +636,17 @@ func (v WebServerConfigView) AsStruct() *WebServerConfig { return v.ж.Clone() } -func (v WebServerConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v WebServerConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v WebServerConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *WebServerConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -512,7 +655,20 @@ func (v *WebServerConfigView) UnmarshalJSON(b []byte) error { return nil } var x WebServerConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *WebServerConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x WebServerConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index c40780021..8dc4f1ca8 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -6,11 +6,13 @@ package tailcfg import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" "time" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -49,8 +51,17 @@ func (v UserView) AsStruct() *User { return v.ж.Clone() } -func (v UserView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v UserView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v UserView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *UserView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -59,7 +70,20 @@ func (v *UserView) UnmarshalJSON(b []byte) error { return nil } var x User - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x User + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -107,8 +131,17 @@ func (v NodeView) AsStruct() *Node { return v.ж.Clone() } -func (v NodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v NodeView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v NodeView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *NodeView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -117,7 +150,20 @@ func (v *NodeView) UnmarshalJSON(b []byte) error { return nil } var x Node - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Node + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -246,8 +292,17 @@ func (v HostinfoView) AsStruct() *Hostinfo { return v.ж.Clone() } -func (v HostinfoView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v HostinfoView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v HostinfoView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *HostinfoView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -256,7 +311,20 @@ func (v *HostinfoView) UnmarshalJSON(b []byte) error { return nil } var x Hostinfo - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *HostinfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Hostinfo + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -380,8 +448,17 @@ func (v NetInfoView) AsStruct() *NetInfo { return v.ж.Clone() } -func (v NetInfoView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v NetInfoView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v NetInfoView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *NetInfoView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -390,7 +467,20 @@ func (v *NetInfoView) UnmarshalJSON(b []byte) error { return nil } var x NetInfo - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x NetInfo + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -460,8 +550,17 @@ func (v LoginView) AsStruct() *Login { return v.ж.Clone() } -func (v LoginView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LoginView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LoginView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LoginView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -470,7 +569,20 @@ func (v *LoginView) UnmarshalJSON(b []byte) error { return nil } var x Login - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LoginView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Login + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -521,8 +633,17 @@ func (v DNSConfigView) AsStruct() *DNSConfig { return v.ж.Clone() } -func (v DNSConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DNSConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DNSConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DNSConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -531,7 +652,20 @@ func (v *DNSConfigView) UnmarshalJSON(b []byte) error { return nil } var x DNSConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DNSConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DNSConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -602,8 +736,17 @@ func (v RegisterResponseView) AsStruct() *RegisterResponse { return v.ж.Clone() } -func (v RegisterResponseView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterResponseView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterResponseView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -612,7 +755,20 @@ func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { return nil } var x RegisterResponse - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterResponseView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x RegisterResponse + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -668,8 +824,17 @@ func (v RegisterResponseAuthView) AsStruct() *RegisterResponseAuth { return v.ж.Clone() } -func (v RegisterResponseAuthView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterResponseAuthView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterResponseAuthView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -678,7 +843,20 @@ func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { return nil } var x RegisterResponseAuth - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterResponseAuthView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x RegisterResponseAuth + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -726,8 +904,17 @@ func (v RegisterRequestView) AsStruct() *RegisterRequest { return v.ж.Clone() } -func (v RegisterRequestView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterRequestView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterRequestView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterRequestView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -736,7 +923,20 @@ func (v *RegisterRequestView) UnmarshalJSON(b []byte) error { return nil } var x RegisterRequest - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterRequestView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x RegisterRequest + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -816,8 +1016,17 @@ func (v DERPHomeParamsView) AsStruct() *DERPHomeParams { return v.ж.Clone() } -func (v DERPHomeParamsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPHomeParamsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPHomeParamsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPHomeParamsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -826,7 +1035,20 @@ func (v *DERPHomeParamsView) UnmarshalJSON(b []byte) error { return nil } var x DERPHomeParams - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPHomeParamsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPHomeParams + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -870,8 +1092,17 @@ func (v DERPRegionView) AsStruct() *DERPRegion { return v.ж.Clone() } -func (v DERPRegionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPRegionView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPRegionView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPRegionView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -880,7 +1111,20 @@ func (v *DERPRegionView) UnmarshalJSON(b []byte) error { return nil } var x DERPRegion - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPRegionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPRegion + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -938,8 +1182,17 @@ func (v DERPMapView) AsStruct() *DERPMap { return v.ж.Clone() } -func (v DERPMapView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPMapView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPMapView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPMapView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -948,7 +1201,20 @@ func (v *DERPMapView) UnmarshalJSON(b []byte) error { return nil } var x DERPMap - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPMapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPMap + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -999,8 +1265,17 @@ func (v DERPNodeView) AsStruct() *DERPNode { return v.ж.Clone() } -func (v DERPNodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPNodeView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPNodeView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPNodeView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1009,7 +1284,20 @@ func (v *DERPNodeView) UnmarshalJSON(b []byte) error { return nil } var x DERPNode - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPNodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPNode + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1073,8 +1361,17 @@ func (v SSHRuleView) AsStruct() *SSHRule { return v.ж.Clone() } -func (v SSHRuleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHRuleView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHRuleView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHRuleView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1083,7 +1380,20 @@ func (v *SSHRuleView) UnmarshalJSON(b []byte) error { return nil } var x SSHRule - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHRuleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHRule + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1139,8 +1449,17 @@ func (v SSHActionView) AsStruct() *SSHAction { return v.ж.Clone() } -func (v SSHActionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHActionView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHActionView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHActionView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1149,7 +1468,20 @@ func (v *SSHActionView) UnmarshalJSON(b []byte) error { return nil } var x SSHAction - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHActionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHAction + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1211,8 +1543,17 @@ func (v SSHPrincipalView) AsStruct() *SSHPrincipal { return v.ж.Clone() } -func (v SSHPrincipalView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPrincipalView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPrincipalView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1221,7 +1562,20 @@ func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { return nil } var x SSHPrincipal - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPrincipalView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHPrincipal + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1273,8 +1627,17 @@ func (v ControlDialPlanView) AsStruct() *ControlDialPlan { return v.ж.Clone() } -func (v ControlDialPlanView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ControlDialPlanView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ControlDialPlanView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ControlDialPlanView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1283,7 +1646,20 @@ func (v *ControlDialPlanView) UnmarshalJSON(b []byte) error { return nil } var x ControlDialPlan - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ControlDialPlanView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x ControlDialPlan + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1327,8 +1703,17 @@ func (v LocationView) AsStruct() *Location { return v.ж.Clone() } -func (v LocationView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LocationView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LocationView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LocationView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1337,7 +1722,20 @@ func (v *LocationView) UnmarshalJSON(b []byte) error { return nil } var x Location - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LocationView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Location + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1391,8 +1789,17 @@ func (v UserProfileView) AsStruct() *UserProfile { return v.ж.Clone() } -func (v UserProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v UserProfileView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v UserProfileView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *UserProfileView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1401,7 +1808,20 @@ func (v *UserProfileView) UnmarshalJSON(b []byte) error { return nil } var x UserProfile - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *UserProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x UserProfile + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1450,8 +1870,17 @@ func (v VIPServiceView) AsStruct() *VIPService { return v.ж.Clone() } -func (v VIPServiceView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v VIPServiceView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v VIPServiceView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *VIPServiceView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1460,7 +1889,20 @@ func (v *VIPServiceView) UnmarshalJSON(b []byte) error { return nil } var x VIPService - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *VIPServiceView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x VIPService + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index c77ff9a40..3d374ab47 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -6,10 +6,12 @@ package dnstype import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/views" ) @@ -43,8 +45,17 @@ func (v ResolverView) AsStruct() *Resolver { return v.ж.Clone() } -func (v ResolverView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ResolverView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ResolverView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ResolverView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -53,7 +64,20 @@ func (v *ResolverView) UnmarshalJSON(b []byte) error { return nil } var x Resolver - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ResolverView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Resolver + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 55eb40c51..99a86a6a5 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -6,9 +6,11 @@ package persist import ( - "encoding/json" + jsonv1 "encoding/json" "errors" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/structs" @@ -45,8 +47,17 @@ func (v PersistView) AsStruct() *Persist { return v.ж.Clone() } -func (v PersistView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PersistView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PersistView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PersistView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -55,7 +66,20 @@ func (v *PersistView) UnmarshalJSON(b []byte) error { return nil } var x Persist - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Persist + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index 9aaac6e9c..afc9f1781 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -6,10 +6,12 @@ package prefs_example import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/drive" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -48,8 +50,17 @@ func (v PrefsView) AsStruct() *Prefs { return v.ж.Clone() } -func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -58,7 +69,20 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error { return nil } var x Prefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Prefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -160,8 +184,17 @@ func (v AutoUpdatePrefsView) AsStruct() *AutoUpdatePrefs { return v.ж.Clone() } -func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v AutoUpdatePrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -170,7 +203,20 @@ func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { return nil } var x AutoUpdatePrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *AutoUpdatePrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x AutoUpdatePrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -214,8 +260,17 @@ func (v AppConnectorPrefsView) AsStruct() *AppConnectorPrefs { return v.ж.Clone() } -func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v AppConnectorPrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -224,7 +279,20 @@ func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { return nil } var x AppConnectorPrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *AppConnectorPrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x AppConnectorPrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index f6cfc918d..44c3beb87 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -6,9 +6,12 @@ package prefs import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" ) //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup -tags=test @@ -41,8 +44,17 @@ func (v TestPrefsView) AsStruct() *TestPrefs { return v.ж.Clone() } -func (v TestPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestPrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestPrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestPrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -51,7 +63,20 @@ func (v *TestPrefsView) UnmarshalJSON(b []byte) error { return nil } var x TestPrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestPrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestPrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -145,8 +170,17 @@ func (v TestBundleView) AsStruct() *TestBundle { return v.ж.Clone() } -func (v TestBundleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestBundleView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestBundleView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestBundleView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -155,7 +189,20 @@ func (v *TestBundleView) UnmarshalJSON(b []byte) error { return nil } var x TestBundle - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestBundleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestBundle + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -200,8 +247,17 @@ func (v TestValueStructView) AsStruct() *TestValueStruct { return v.ж.Clone() } -func (v TestValueStructView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestValueStructView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestValueStructView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestValueStructView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -210,7 +266,20 @@ func (v *TestValueStructView) UnmarshalJSON(b []byte) error { return nil } var x TestValueStruct - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestValueStructView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestValueStruct + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -253,8 +322,17 @@ func (v TestGenericStructView[T]) AsStruct() *TestGenericStruct[T] { return v.ж.Clone() } -func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestGenericStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -263,7 +341,20 @@ func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x TestGenericStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestGenericStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestGenericStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -308,8 +399,17 @@ func (v TestPrefsGroupView) AsStruct() *TestPrefsGroup { return v.ж.Clone() } -func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestPrefsGroupView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -318,7 +418,20 @@ func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { return nil } var x TestPrefsGroup - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestPrefsGroupView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestPrefsGroup + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/views/views.go b/types/views/views.go index 3911f1112..6d15b80d4 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -7,7 +7,7 @@ package views import ( "bytes" - "encoding/json" + jsonv1 "encoding/json" "errors" "fmt" "iter" @@ -15,20 +15,12 @@ import ( "reflect" "slices" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "go4.org/mem" "tailscale.com/types/ptr" ) -func unmarshalSliceFromJSON[T any](b []byte, x *[]T) error { - if *x != nil { - return errors.New("already initialized") - } - if len(b) == 0 { - return nil - } - return json.Unmarshal(b, x) -} - // ByteSlice is a read-only accessor for types that are backed by a []byte. type ByteSlice[T ~[]byte] struct { // ж is the underlying mutable value, named with a hard-to-type @@ -93,15 +85,32 @@ func (v ByteSlice[T]) SliceTo(i int) ByteSlice[T] { return ByteSlice[T]{v.ж[:i] // Slice returns v[i:j] func (v ByteSlice[T]) Slice(i, j int) ByteSlice[T] { return ByteSlice[T]{v.ж[i:j]} } -// MarshalJSON implements json.Marshaler. -func (v ByteSlice[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ByteSlice[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ByteSlice[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized ByteSlice. func (v *ByteSlice[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &v.ж) + return jsonv1.Unmarshal(b, &v.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized ByteSlice. +func (v *ByteSlice[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.ж) } // StructView represents the corresponding StructView of a Viewable. The concrete types are @@ -159,11 +168,35 @@ func (v SliceView[T, V]) All() iter.Seq2[int, V] { } } -// MarshalJSON implements json.Marshaler. -func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SliceView[T, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} -// UnmarshalJSON implements json.Unmarshaler. -func (v *SliceView[T, V]) UnmarshalJSON(b []byte) error { return unmarshalSliceFromJSON(b, &v.ж) } +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized SliceView. +func (v *SliceView[T, V]) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } else if len(b) == 0 { + return nil + } + return jsonv1.Unmarshal(b, &v.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized SliceView. +func (v *SliceView[T, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.ж) +} // IsNil reports whether the underlying slice is nil. func (v SliceView[T, V]) IsNil() bool { return v.ж == nil } @@ -252,14 +285,34 @@ func SliceOf[T any](x []T) Slice[T] { return Slice[T]{x} } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (v Slice[T]) MarshalJSON() ([]byte, error) { - return json.Marshal(v.ж) + return jsonv1.Marshal(v.ж) } -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v Slice[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized Slice. func (v *Slice[T]) UnmarshalJSON(b []byte) error { - return unmarshalSliceFromJSON(b, &v.ж) + if v.ж != nil { + return errors.New("already initialized") + } else if len(b) == 0 { + return nil + } + return jsonv1.Unmarshal(b, &v.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized Slice. +func (v *Slice[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.ж) } // IsNil reports whether the underlying slice is nil. @@ -512,18 +565,32 @@ func (m MapSlice[K, V]) GetOk(k K) (Slice[V], bool) { return SliceOf(v), ok } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (m MapSlice[K, V]) MarshalJSON() ([]byte, error) { - return json.Marshal(m.ж) + return jsonv1.Marshal(m.ж) } -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (m MapSlice[K, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, m.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. // It should only be called on an uninitialized Map. func (m *MapSlice[K, V]) UnmarshalJSON(b []byte) error { if m.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &m.ж) + return jsonv1.Unmarshal(b, &m.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It should only be called on an uninitialized MapSlice. +func (m *MapSlice[K, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if m.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &m.ж) } // AsMap returns a shallow-clone of the underlying map. @@ -600,18 +667,32 @@ func (m Map[K, V]) GetOk(k K) (V, bool) { return v, ok } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (m Map[K, V]) MarshalJSON() ([]byte, error) { - return json.Marshal(m.ж) + return jsonv1.Marshal(m.ж) } -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (m Map[K, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, m.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. // It should only be called on an uninitialized Map. func (m *Map[K, V]) UnmarshalJSON(b []byte) error { if m.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &m.ж) + return jsonv1.Unmarshal(b, &m.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized Map. +func (m *Map[K, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if m.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &m.ж) } // AsMap returns a shallow-clone of the underlying map. @@ -809,17 +890,32 @@ func ValuePointerOf[T any](v *T) ValuePointer[T] { return ValuePointer[T]{v} } -// MarshalJSON implements [json.Marshaler]. +// MarshalJSON implements [jsonv1.Marshaler]. func (p ValuePointer[T]) MarshalJSON() ([]byte, error) { - return json.Marshal(p.ж) + return jsonv1.Marshal(p.ж) } -// UnmarshalJSON implements [json.Unmarshaler]. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p ValuePointer[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, p.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized ValuePointer. func (p *ValuePointer[T]) UnmarshalJSON(b []byte) error { if p.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &p.ж) + return jsonv1.Unmarshal(b, &p.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized ValuePointer. +func (p *ValuePointer[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if p.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &p.ж) } // ContainsPointers reports whether T contains any pointers, diff --git a/types/views/views_test.go b/types/views/views_test.go index 2205cbc03..5a30c11a1 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -4,8 +4,7 @@ package views import ( - "bytes" - "encoding/json" + jsonv1 "encoding/json" "fmt" "net/netip" "reflect" @@ -15,9 +14,27 @@ import ( "unsafe" qt "github.com/frankban/quicktest" + jsonv2 "github.com/go-json-experiment/json" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/types/structs" ) +// Statically verify that each type implements the following interfaces. +var _ = []interface { + jsonv1.Marshaler + jsonv1.Unmarshaler + jsonv2.MarshalerTo + jsonv2.UnmarshalerFrom +}{ + (*ByteSlice[[]byte])(nil), + (*SliceView[*testStruct, testStructView])(nil), + (*Slice[testStruct])(nil), + (*MapSlice[*testStruct, testStructView])(nil), + (*Map[*testStruct, testStructView])(nil), + (*ValuePointer[testStruct])(nil), +} + type viewStruct struct { Int int Addrs Slice[netip.Prefix] @@ -83,14 +100,16 @@ func TestViewsJSON(t *testing.T) { ipp := SliceOf(mustCIDR("192.168.0.0/24")) ss := SliceOf([]string{"bar"}) tests := []struct { - name string - in viewStruct - wantJSON string + name string + in viewStruct + wantJSONv1 string + wantJSONv2 string }{ { - name: "empty", - in: viewStruct{}, - wantJSON: `{"Int":0,"Addrs":null,"Strings":null}`, + name: "empty", + in: viewStruct{}, + wantJSONv1: `{"Int":0,"Addrs":null,"Strings":null}`, + wantJSONv2: `{"Int":0,"Addrs":[],"Strings":[]}`, }, { name: "everything", @@ -101,30 +120,49 @@ func TestViewsJSON(t *testing.T) { StringsPtr: &ss, Strings: ss, }, - wantJSON: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, + wantJSONv1: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, + wantJSONv2: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, }, } - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetIndent("", "") for _, tc := range tests { - buf.Reset() - if err := encoder.Encode(&tc.in); err != nil { - t.Fatal(err) - } - b := buf.Bytes() - gotJSON := strings.TrimSpace(string(b)) - if tc.wantJSON != gotJSON { - t.Fatalf("JSON: %v; want: %v", gotJSON, tc.wantJSON) - } - var got viewStruct - if err := json.Unmarshal(b, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tc.in) { - t.Fatalf("unmarshal resulted in different output: %+v; want %+v", got, tc.in) + cmpOpts := cmp.Options{ + cmp.AllowUnexported(Slice[string]{}), + cmp.AllowUnexported(Slice[netip.Prefix]{}), + cmpopts.EquateComparable(netip.Prefix{}), } + t.Run("JSONv1", func(t *testing.T) { + gotJSON, err := jsonv1.Marshal(tc.in) + if err != nil { + t.Fatal(err) + } + if string(gotJSON) != tc.wantJSONv1 { + t.Fatalf("JSON: %s; want: %s", gotJSON, tc.wantJSONv1) + } + var got viewStruct + if err := jsonv1.Unmarshal(gotJSON, &got); err != nil { + t.Fatal(err) + } + if d := cmp.Diff(got, tc.in, cmpOpts); d != "" { + t.Fatalf("unmarshal mismatch (-got +want):\n%s", d) + } + }) + t.Run("JSONv2", func(t *testing.T) { + gotJSON, err := jsonv2.Marshal(tc.in) + if err != nil { + t.Fatal(err) + } + if string(gotJSON) != tc.wantJSONv2 { + t.Fatalf("JSON: %s; want: %s", gotJSON, tc.wantJSONv2) + } + var got viewStruct + if err := jsonv2.Unmarshal(gotJSON, &got); err != nil { + t.Fatal(err) + } + if d := cmp.Diff(got, tc.in, cmpOpts, cmpopts.EquateEmpty()); d != "" { + t.Fatalf("unmarshal mismatch (-got +want):\n%s", d) + } + }) } } diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index 1b3af10e0..ec02d652b 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -85,28 +85,35 @@ func NewImportTracker(thisPkg *types.Package) *ImportTracker { } } +type namePkgPath struct { + name string // optional import name + pkgPath string +} + // ImportTracker provides a mechanism to track and build import paths. type ImportTracker struct { thisPkg *types.Package - packages map[string]bool + packages map[namePkgPath]bool } -func (it *ImportTracker) Import(pkg string) { - if pkg != "" && !it.packages[pkg] { - mak.Set(&it.packages, pkg, true) +// Import imports pkgPath under an optional import name. +func (it *ImportTracker) Import(name, pkgPath string) { + if pkgPath != "" && !it.packages[namePkgPath{name, pkgPath}] { + mak.Set(&it.packages, namePkgPath{name, pkgPath}, true) } } -// Has reports whether the specified package has been imported. -func (it *ImportTracker) Has(pkg string) bool { - return it.packages[pkg] +// Has reports whether the specified package path has been imported +// under the particular import name. +func (it *ImportTracker) Has(name, pkgPath string) bool { + return it.packages[namePkgPath{name, pkgPath}] } func (it *ImportTracker) qualifier(pkg *types.Package) string { if it.thisPkg == pkg { return "" } - it.Import(pkg.Path()) + it.Import("", pkg.Path()) // TODO(maisem): handle conflicts? return pkg.Name() } @@ -128,7 +135,11 @@ func (it *ImportTracker) PackagePrefix(pkg *types.Package) string { func (it *ImportTracker) Write(w io.Writer) { fmt.Fprintf(w, "import (\n") for s := range it.packages { - fmt.Fprintf(w, "\t%q\n", s) + if s.name == "" { + fmt.Fprintf(w, "\t%q\n", s.pkgPath) + } else { + fmt.Fprintf(w, "\t%s %q\n", s.name, s.pkgPath) + } } fmt.Fprintf(w, ")\n\n") } From b8c45a6a8f27315b6cc65e26d455254be7d35196 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 14 Aug 2025 13:46:51 -0700 Subject: [PATCH 1192/1708] client/systray: add CLI error output if operator is missing We already show a message in the menu itself, this just adds it to the CLI output as well. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index d5a19f91c..98c6156b8 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -160,6 +160,17 @@ func (menu *Menu) onReady() { log.Printf("starting") setAppIcon(disconnected) menu.rebuild() + + menu.mu.Lock() + if menu.readonly { + fmt.Fprintln(os.Stderr, ` +No permission to manage Tailscale. Set operator by running: + +sudo tailscale set --operator=$USER + +See https://tailscale.com/s/cli-operator for more information.`) + } + menu.mu.Unlock() } // updateState updates the Menu state from the Tailscale local client. From 3f1851a6d9507ed2ffe46098835c53e63dff93b8 Mon Sep 17 00:00:00 2001 From: Michael Ben-Ami Date: Mon, 11 Aug 2025 12:10:33 -0400 Subject: [PATCH 1193/1708] types/dnstype, ipn/ipnlocal: allow other DNS resolvers with exit nodes dnstype.Resolver adds a boolean UseWithExitNode that controls whether the resolver should be used in tailscale exit node contexts (not wireguard exit nodes). If UseWithExitNode resolvers are found, they are installed as the global resolvers. If no UseWithExitNode resolvers are found, the exit node resolver continues to be installed as the global resolver. Split DNS Routes referencing UseWithExitNode resolvers are also installed. Updates #8237 Fixes tailscale/corp#30906 Fixes tailscale/corp#30907 Signed-off-by: Michael Ben-Ami --- client/tailscale/apitype/controltype.go | 49 ++++++++-- ipn/ipnlocal/local_test.go | 113 ++++++++++++++++++++---- ipn/ipnlocal/node_backend.go | 87 +++++++++++++----- tailcfg/tailcfg.go | 10 +-- types/dnstype/dnstype.go | 10 ++- types/dnstype/dnstype_clone.go | 1 + types/dnstype/dnstype_test.go | 14 ++- types/dnstype/dnstype_view.go | 2 + 8 files changed, 235 insertions(+), 51 deletions(-) diff --git a/client/tailscale/apitype/controltype.go b/client/tailscale/apitype/controltype.go index 9a623be31..d9d79f0ad 100644 --- a/client/tailscale/apitype/controltype.go +++ b/client/tailscale/apitype/controltype.go @@ -3,17 +3,50 @@ package apitype +// DNSConfig is the DNS configuration for a tailnet +// used in /tailnet/{tailnet}/dns/config. type DNSConfig struct { - Resolvers []DNSResolver `json:"resolvers"` - FallbackResolvers []DNSResolver `json:"fallbackResolvers"` - Routes map[string][]DNSResolver `json:"routes"` - Domains []string `json:"domains"` - Nameservers []string `json:"nameservers"` - Proxied bool `json:"proxied"` - TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` + // Resolvers are the global DNS resolvers to use + // overriding the local OS configuration. + Resolvers []DNSResolver `json:"resolvers"` + + // FallbackResolvers are used as global resolvers when + // the client is unable to determine the OS's preferred DNS servers. + FallbackResolvers []DNSResolver `json:"fallbackResolvers"` + + // Routes map DNS name suffixes to a set of DNS resolvers, + // used for Split DNS and other advanced routing overlays. + Routes map[string][]DNSResolver `json:"routes"` + + // Domains are the search domains to use. + Domains []string `json:"domains"` + + // Proxied means MagicDNS is enabled. + Proxied bool `json:"proxied"` + + // TempCorpIssue13969 is from an internal hack day prototype, + // See tailscale/corp#13969. + TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` + + // Nameservers are the IP addresses of global nameservers to use. + // This is a deprecated format but may still be found in tailnets + // that were configured a long time ago. When making updates, + // set Resolvers and leave Nameservers empty. + Nameservers []string `json:"nameservers"` } +// DNSResolver is a DNS resolver in a DNS configuration. type DNSResolver struct { - Addr string `json:"addr"` + // Addr is the address of the DNS resolver. + // It is usually an IP address or a DoH URL. + // See dnstype.Resolver.Addr for full details. + Addr string `json:"addr"` + + // BootstrapResolution is an optional suggested resolution for + // the DoT/DoH resolver. BootstrapResolution []string `json:"bootstrapResolution,omitempty"` + + // UseWithExitNode signals this resolver should be used + // even when a tailscale exit node is configured on a device. + UseWithExitNode bool `json:"useWithExitNode,omitempty"` } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 30833e748..49cfc3e07 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2080,7 +2080,14 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { wantRoutes map[dnsname.FQDN][]*dnstype.Resolver } - defaultResolvers := []*dnstype.Resolver{{Addr: "default.example.com"}} + const tsUseWithExitNodeResolverAddr = "usewithexitnode.example.com" + defaultResolvers := []*dnstype.Resolver{ + {Addr: "default.example.com"}, + } + containsFlaggedResolvers := append([]*dnstype.Resolver{ + {Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}, + }, defaultResolvers...) + wgResolvers := []*dnstype.Resolver{{Addr: "wg.example.com"}} peers := []tailcfg.NodeView{ (&tailcfg.Node{ @@ -2099,9 +2106,33 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { }).View(), } exitDOH := peerAPIBase(&netmap.NetworkMap{Peers: peers}, peers[0]) + "/dns-query" - routes := map[dnsname.FQDN][]*dnstype.Resolver{ + baseRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "route.example.com.": {{Addr: "route.example.com"}}, + } + containsEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ "route.example.com.": {{Addr: "route.example.com"}}, + "empty.example.com.": {}, + } + containsFlaggedRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "route.example.com.": {{Addr: "route.example.com"}}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + containsFlaggedAndEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + "route.example.com.": {{Addr: "route.example.com"}}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, } + flaggedRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + emptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + } + flaggedAndEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + stringifyRoutes := func(routes map[dnsname.FQDN][]*dnstype.Resolver) map[string][]*dnstype.Resolver { if routes == nil { return nil @@ -2138,19 +2169,23 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, + { + name: "tsExit/noRoutes/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: nil, + }, - // The following two cases may need to be revisited. For a shared-in - // exit node split-DNS may effectively break, furthermore in the future - // if different nodes observe different DNS configurations, even a - // tailnet local exit node may present a different DNS configuration, - // which may not meet expectations in some use cases. - // In the case where a default resolver is set, the default resolver - // should also perhaps take precedence also. + // When at tailscale exit node is in use, + // only routes that reference resolvers with the UseWithExitNode should be installed, + // as well as routes with 0-length resolver lists, which should be installed in all cases. { name: "tsExit/routes/noResolver", exitNode: "ts", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes)}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes)}, wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, @@ -2158,10 +2193,58 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { name: "tsExit/routes/defaultResolver", exitNode: "ts", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes), Resolvers: defaultResolvers}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: defaultResolvers}, wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, + { + name: "tsExit/routes/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: nil, + }, + { + name: "tsExit/flaggedRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: flaggedRoutes, + }, + { + name: "tsExit/flaggedRoutesOnly/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: flaggedRoutes, + }, + { + name: "tsExit/emptyRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsEmptyRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: emptyRoutes, + }, + { + name: "tsExit/flaggedAndEmptyRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedAndEmptyRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: flaggedAndEmptyRoutes, + }, + { + name: "tsExit/flaggedAndEmptyRoutesOnly/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedAndEmptyRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: flaggedAndEmptyRoutes, + }, // WireGuard exit nodes with DNS capabilities provide a "fallback" type // behavior, they have a lower precedence than a default resolver, but @@ -2187,17 +2270,17 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { name: "wgExit/routes/defaultResolver", exitNode: "wg", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes), Resolvers: defaultResolvers}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: defaultResolvers}, wantDefaultResolvers: defaultResolvers, - wantRoutes: routes, + wantRoutes: baseRoutes, }, { name: "wgExit/routes/noResolver", exitNode: "wg", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes)}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes)}, wantDefaultResolvers: wgResolvers, - wantRoutes: routes, + wantRoutes: baseRoutes, }, } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index ec503f130..a3889b643 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -578,6 +578,42 @@ func (nb *nodeBackend) doShutdown(cause error) { nb.eventClient.Close() } +// useWithExitNodeResolvers filters out resolvers so the ones that remain +// are all the ones marked for use with exit nodes. +func useWithExitNodeResolvers(resolvers []*dnstype.Resolver) []*dnstype.Resolver { + var filtered []*dnstype.Resolver + for _, res := range resolvers { + if res.UseWithExitNode { + filtered = append(filtered, res) + } + } + return filtered +} + +// useWithExitNodeRoutes filters out routes so the ones that remain +// are either zero-length resolver lists, or lists containing only +// resolvers marked for use with exit nodes. +func useWithExitNodeRoutes(routes map[string][]*dnstype.Resolver) map[string][]*dnstype.Resolver { + var filtered map[string][]*dnstype.Resolver + for suffix, resolvers := range routes { + // Suffixes with no resolvers represent a valid configuration, + // and should persist regardless of exit node considerations. + if len(resolvers) == 0 { + mak.Set(&filtered, suffix, make([]*dnstype.Resolver, 0)) + continue + } + + // In exit node contexts, we filter out resolvers not configured for use with + // exit nodes. If there are no such configured resolvers, there should not be an entry for that suffix. + filteredResolvers := useWithExitNodeResolvers(resolvers) + if len(filteredResolvers) > 0 { + mak.Set(&filtered, suffix, filteredResolvers) + } + } + + return filtered +} + // dnsConfigForNetmap returns a *dns.Config for the given netmap, // prefs, client OS version, and cloud hosting environment. // @@ -700,10 +736,36 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. dcfg.DefaultResolvers = append(dcfg.DefaultResolvers, resolvers...) } + addSplitDNSRoutes := func(routes map[string][]*dnstype.Resolver) { + for suffix, resolvers := range routes { + fqdn, err := dnsname.ToFQDN(suffix) + if err != nil { + logf("[unexpected] non-FQDN route suffix %q", suffix) + } + + // Create map entry even if len(resolvers) == 0; Issue 2706. + // This lets the control plane send ExtraRecords for which we + // can authoritatively answer "name not exists" for when the + // control plane also sends this explicit but empty route + // making it as something we handle. + dcfg.Routes[fqdn] = slices.Clone(resolvers) + } + } + // If we're using an exit node and that exit node is new enough (1.19.x+) - // to run a DoH DNS proxy, then send all our DNS traffic through it. + // to run a DoH DNS proxy, then send all our DNS traffic through it, + // unless we find resolvers with UseWithExitNode set, in which case we use that. if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) + if len(filtered) > 0 { + addDefault(filtered) + } else { + // If no default global resolvers with the override + // are configured, configure the exit node's resolver. + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + } + + addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) return dcfg } @@ -718,25 +780,8 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } } - for suffix, resolvers := range nm.DNS.Routes { - fqdn, err := dnsname.ToFQDN(suffix) - if err != nil { - logf("[unexpected] non-FQDN route suffix %q", suffix) - } - - // Create map entry even if len(resolvers) == 0; Issue 2706. - // This lets the control plane send ExtraRecords for which we - // can authoritatively answer "name not exists" for when the - // control plane also sends this explicit but empty route - // making it as something we handle. - // - // While we're already populating it, might as well size the - // slice appropriately. - // Per #9498 the exact requirements of nil vs empty slice remain - // unclear, this is a haunted graveyard to be resolved. - dcfg.Routes[fqdn] = make([]*dnstype.Resolver, 0, len(resolvers)) - dcfg.Routes[fqdn] = append(dcfg.Routes[fqdn], resolvers...) - } + // Add split DNS routes, with no regard to exit node configuration. + addSplitDNSRoutes(nm.DNS.Routes) // Set FallbackResolvers as the default resolvers in the // scenarios that can't handle a purely split-DNS config. See diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 9f4734f1f..d2125684d 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -169,7 +169,8 @@ type CapabilityVersion int // - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory -const CurrentCapabilityVersion CapabilityVersion = 124 +// - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. +const CurrentCapabilityVersion CapabilityVersion = 125 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -1730,10 +1731,9 @@ type DNSConfig struct { // proxying to be enabled. Proxied bool `json:",omitempty"` - // The following fields are only set and used by - // MapRequest.Version >=9 and <14. - - // Nameservers are the IP addresses of the nameservers to use. + // Nameservers are the IP addresses of the global nameservers to use. + // + // Deprecated: this is only set and used by MapRequest.Version >=9 and <14. Use Resolvers instead. Nameservers []netip.Addr `json:",omitempty"` // CertDomains are the set of DNS names for which the control diff --git a/types/dnstype/dnstype.go b/types/dnstype/dnstype.go index b7f5b9d02..a3ba1b0a9 100644 --- a/types/dnstype/dnstype.go +++ b/types/dnstype/dnstype.go @@ -35,6 +35,12 @@ type Resolver struct { // // As of 2022-09-08, BootstrapResolution is not yet used. BootstrapResolution []netip.Addr `json:",omitempty"` + + // UseWithExitNode designates that this resolver should continue to be used when an + // exit node is in use. Normally, DNS resolution is delegated to the exit node but + // there are situations where it is preferable to still use a Split DNS server and/or + // global DNS server instead of the exit node. + UseWithExitNode bool `json:",omitempty"` } // IPPort returns r.Addr as an IP address and port if either @@ -64,5 +70,7 @@ func (r *Resolver) Equal(other *Resolver) bool { return true } - return r.Addr == other.Addr && slices.Equal(r.BootstrapResolution, other.BootstrapResolution) + return r.Addr == other.Addr && + slices.Equal(r.BootstrapResolution, other.BootstrapResolution) && + r.UseWithExitNode == other.UseWithExitNode } diff --git a/types/dnstype/dnstype_clone.go b/types/dnstype/dnstype_clone.go index 86ca0535f..3985704aa 100644 --- a/types/dnstype/dnstype_clone.go +++ b/types/dnstype/dnstype_clone.go @@ -25,6 +25,7 @@ func (src *Resolver) Clone() *Resolver { var _ResolverCloneNeedsRegeneration = Resolver(struct { Addr string BootstrapResolution []netip.Addr + UseWithExitNode bool }{}) // Clone duplicates src into dst and reports whether it succeeded. diff --git a/types/dnstype/dnstype_test.go b/types/dnstype/dnstype_test.go index e3a941a20..ada5f687d 100644 --- a/types/dnstype/dnstype_test.go +++ b/types/dnstype/dnstype_test.go @@ -17,7 +17,7 @@ func TestResolverEqual(t *testing.T) { fieldNames = append(fieldNames, field.Name) } sort.Strings(fieldNames) - if !slices.Equal(fieldNames, []string{"Addr", "BootstrapResolution"}) { + if !slices.Equal(fieldNames, []string{"Addr", "BootstrapResolution", "UseWithExitNode"}) { t.Errorf("Resolver fields changed; update test") } @@ -68,6 +68,18 @@ func TestResolverEqual(t *testing.T) { }, want: false, }, + { + name: "equal UseWithExitNode", + a: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + b: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + want: true, + }, + { + name: "not equal UseWithExitNode", + a: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + b: &Resolver{Addr: "dns.example.com", UseWithExitNode: false}, + want: false, + }, } for _, tt := range tests { diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index 3d374ab47..0704670a2 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -88,10 +88,12 @@ func (v ResolverView) Addr() string { return v.ж.Addr } func (v ResolverView) BootstrapResolution() views.Slice[netip.Addr] { return views.SliceOf(v.ж.BootstrapResolution) } +func (v ResolverView) UseWithExitNode() bool { return v.ж.UseWithExitNode } func (v ResolverView) Equal(v2 ResolverView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ResolverViewNeedsRegeneration = Resolver(struct { Addr string BootstrapResolution []netip.Addr + UseWithExitNode bool }{}) From 5b6c64b1873a6cb824f120343bf268d4cc6ddbf5 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Fri, 15 Aug 2025 06:11:27 -0700 Subject: [PATCH 1194/1708] net/tshttpproxy: use errors.New for error creation (#16860) Updates tailscale/corp#30668 Signed-off-by: Will Hannah --- net/tshttpproxy/tshttpproxy.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/tshttpproxy/tshttpproxy.go b/net/tshttpproxy/tshttpproxy.go index ab2fd39e3..0456009ed 100644 --- a/net/tshttpproxy/tshttpproxy.go +++ b/net/tshttpproxy/tshttpproxy.go @@ -7,6 +7,7 @@ package tshttpproxy import ( "context" + "errors" "fmt" "log" "net" @@ -48,7 +49,7 @@ func SetProxyFunc(fn func(*url.URL) (*url.URL, error)) error { // Allow override only if config is not set if config != nil { - return fmt.Errorf("tshttpproxy: SetProxyFunc can only be called when config is not set") + return errors.New("tshttpproxy: SetProxyFunc can only be called when config is not set") } proxyFunc = fn From 55beba40948ea406e82e79eca5504be02bbf8c9f Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 15 Aug 2025 11:36:30 -0700 Subject: [PATCH 1195/1708] types/key: init HardwareAttestionKey implementation (#16867) Define the HardwareAttestionKey interface describing a platform-specific hardware backed node identity attestation key. Clients will register the key type implementations for their platform. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- types/key/hardware_attestation.go | 68 +++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 types/key/hardware_attestation.go diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go new file mode 100644 index 000000000..be2eefb78 --- /dev/null +++ b/types/key/hardware_attestation.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package key + +import ( + "crypto" + "encoding/json" + "fmt" +) + +var ErrUnsupported = fmt.Errorf("key type not supported on this platform") + +// HardwareAttestationKey describes a hardware-backed key that is used to +// identify a node. Implementation details will +// vary based on the platform in use (SecureEnclave for Apple, TPM for +// Windows/Linux, Android Hardware-backed Keystore). +// This key can only be marshalled and unmarshalled on the same machine. +type HardwareAttestationKey interface { + crypto.Signer + json.Marshaler + json.Unmarshaler +} + +// emptyHardwareAttestationKey is a function that returns an empty +// HardwareAttestationKey suitable for use with JSON unmarshalling. +var emptyHardwareAttestationKey func() HardwareAttestationKey + +// createHardwareAttestationKey is a function that creates a new +// HardwareAttestationKey for the current platform. +var createHardwareAttestationKey func() (HardwareAttestationKey, error) + +// HardwareAttestationKeyFn is a callback function type that returns a HardwareAttestationKey +// and an error. It is used to register platform-specific implementations of +// HardwareAttestationKey. +type HardwareAttestationKeyFn func() (HardwareAttestationKey, error) + +// RegisterHardwareAttestationKeyFns registers a hardware attestation +// key implementation for the current platform. +func RegisterHardwareAttestationKeyFns(emptyFn func() HardwareAttestationKey, createFn HardwareAttestationKeyFn) { + if emptyHardwareAttestationKey != nil { + panic("emptyPlatformHardwareAttestationKey already registered") + } + emptyHardwareAttestationKey = emptyFn + + if createHardwareAttestationKey != nil { + panic("createPlatformHardwareAttestationKey already registered") + } + createHardwareAttestationKey = createFn +} + +// NewEmptyHardwareAttestationKey returns an empty HardwareAttestationKey +// suitable for JSON unmarshalling. +func NewEmptyHardwareAttestationKey() (HardwareAttestationKey, error) { + if emptyHardwareAttestationKey == nil { + return nil, ErrUnsupported + } + return emptyHardwareAttestationKey(), nil +} + +// NewHardwareAttestationKey returns a newly created HardwareAttestationKey for +// the current platform. +func NewHardwareAttestationKey() (HardwareAttestationKey, error) { + if createHardwareAttestationKey == nil { + return nil, ErrUnsupported + } + return createHardwareAttestationKey() +} From 9c39296ab51c0088f4cf285456dbf5dd04f43f90 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 15 Aug 2025 13:43:45 -0500 Subject: [PATCH 1196/1708] release/dist/qnap: verify code signing This pulls in a change from github.com/tailscale/QDK to verify code signing when using QNAP_SIGNING_SCRIPT. It also upgrades to the latest Google Cloud PKCS#11 library, and reorders the Dockerfile to allow for more efficient future upgrades to the included QDK. Updates tailscale/corp#23528 Signed-off-by: Percy Wegmann --- release/dist/qnap/files/scripts/Dockerfile.qpkg | 12 ++++++------ release/dist/qnap/files/scripts/sign-qpkg.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index dbcaac116..8e99630d1 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -9,13 +9,13 @@ RUN apt-get update -y && \ curl \ patch -# Install QNAP QDK (force a specific version to pick up updates) -RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 6aba74f6b4c8ea0c30b8aec9f3476f428f6a58a1 -RUN cd /QDK && ./InstallToUbuntu.sh install -ENV PATH="/usr/share/QDK/bin:${PATH}" - # Install Google Cloud PKCS11 module RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list RUN apt-get update -y && apt-get install -y --no-install-recommends google-cloud-cli libengine-pkcs11-openssl -RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.6/libkmsp11-1.6-linux-amd64.tar.gz | tar xz +RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.7/libkmsp11-1.7-linux-amd64.tar.gz | tar xz + +# Install QNAP QDK (force a specific version to pick up updates) +RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 8478a990decf0b0bb259ae11c636e66bfeff2433 +RUN cd /QDK && ./InstallToUbuntu.sh install +ENV PATH="/usr/share/QDK/bin:${PATH}" diff --git a/release/dist/qnap/files/scripts/sign-qpkg.sh b/release/dist/qnap/files/scripts/sign-qpkg.sh index 5629672f8..b6b99a3b1 100755 --- a/release/dist/qnap/files/scripts/sign-qpkg.sh +++ b/release/dist/qnap/files/scripts/sign-qpkg.sh @@ -13,7 +13,7 @@ log_directory: "/tmp/kmsp11" chmod 0600 pkcs11-config.yaml export KMS_PKCS11_CONFIG=`readlink -f pkcs11-config.yaml` -export PKCS11_MODULE_PATH=/libkmsp11-1.6-linux-amd64/libkmsp11.so +export PKCS11_MODULE_PATH=/libkmsp11-1.7-linux-amd64/libkmsp11.so # Verify signature of pkcs11 module # See https://github.com/GoogleCloudPlatform/kms-integrations/blob/master/kmsp11/docs/user_guide.md#downloading-and-verifying-the-library From 6006bc92b5d1fd6a71f776826fc8e200ebc9b755 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 15 Aug 2025 16:04:23 -0400 Subject: [PATCH 1197/1708] net/{netns, netmon}: use LastKnownDefaultInterface if set and check for utun (#16873) fixes tailscale/corp#31299 Fixes two issues: getInterfaceIndex would occasionally race with netmon's state, returning the cached default interface index after it had be changed by NWNetworkMonitor. This had the potential to cause connections to bind to the prior default. The fix here is to preferentially use the interface index provided by NWNetworkMonitor preferentially. When no interfaces are available, macOS will set the tunnel as the default interface when an exit node is enabled, potentially causing getInterfaceIndex to return utun's index. We now guard against this when taking the defaultIdx path. Signed-off-by: Jonathan Nobels --- net/netmon/defaultroute_darwin.go | 106 +++++++++++++++++---------- net/netmon/interfaces_darwin_test.go | 22 ++++++ net/netns/netns_darwin.go | 41 +++++++++-- 3 files changed, 124 insertions(+), 45 deletions(-) diff --git a/net/netmon/defaultroute_darwin.go b/net/netmon/defaultroute_darwin.go index 4efe2f1aa..57f7e22b7 100644 --- a/net/netmon/defaultroute_darwin.go +++ b/net/netmon/defaultroute_darwin.go @@ -6,6 +6,8 @@ package netmon import ( + "errors" + "fmt" "log" "net" @@ -16,14 +18,26 @@ var ( lastKnownDefaultRouteIfName syncs.AtomicValue[string] ) -// UpdateLastKnownDefaultRouteInterface is called by ipn-go-bridge in the iOS app when +// UpdateLastKnownDefaultRouteInterface is called by ipn-go-bridge from apple network extensions when // our NWPathMonitor instance detects a network path transition. func UpdateLastKnownDefaultRouteInterface(ifName string) { if ifName == "" { return } if old := lastKnownDefaultRouteIfName.Swap(ifName); old != ifName { - log.Printf("defaultroute_darwin: update from Swift, ifName = %s (was %s)", ifName, old) + interfaces, err := netInterfaces() + if err != nil { + log.Printf("defaultroute_darwin: UpdateLastKnownDefaultRouteInterface could not get interfaces: %v", err) + return + } + + netif, err := getInterfaceByName(ifName, interfaces) + if err != nil { + log.Printf("defaultroute_darwin: UpdateLastKnownDefaultRouteInterface could not find interface index for %s: %v", ifName, err) + return + } + + log.Printf("defaultroute_darwin: updated last known default if from OS, ifName = %s index: %d (was %s)", ifName, netif.Index, old) } } @@ -40,57 +54,69 @@ func defaultRoute() (d DefaultRouteDetails, err error) { // // If for any reason the Swift machinery didn't work and we don't get any updates, we will // fallback to the BSD logic. + osRoute, osRouteErr := OSDefaultRoute() + if osRouteErr == nil { + // If we got a valid interface from the OS, use it. + d.InterfaceName = osRoute.InterfaceName + d.InterfaceIndex = osRoute.InterfaceIndex + return d, nil + } - // Start by getting all available interfaces. - interfaces, err := netInterfaces() + // Fallback to the BSD logic + idx, err := DefaultRouteInterfaceIndex() if err != nil { - log.Printf("defaultroute_darwin: could not get interfaces: %v", err) - return d, ErrNoGatewayIndexFound + return d, err } - - getInterfaceByName := func(name string) *Interface { - for _, ifc := range interfaces { - if ifc.Name != name { - continue - } - - if !ifc.IsUp() { - log.Printf("defaultroute_darwin: %s is down", name) - return nil - } - - addrs, _ := ifc.Addrs() - if len(addrs) == 0 { - log.Printf("defaultroute_darwin: %s has no addresses", name) - return nil - } - return &ifc - } - return nil + iface, err := net.InterfaceByIndex(idx) + if err != nil { + return d, err } + d.InterfaceName = iface.Name + d.InterfaceIndex = idx + return d, nil +} + +// OSDefaultRoute returns the DefaultRouteDetails for the default interface as provided by the OS +// via UpdateLastKnownDefaultRouteInterface. If UpdateLastKnownDefaultRouteInterface has not been called, +// the interface name is not valid, or we cannot find its index, an error is returned. +func OSDefaultRoute() (d DefaultRouteDetails, err error) { // Did Swift set lastKnownDefaultRouteInterface? If so, we should use it and don't bother // with anything else. However, for sanity, do check whether Swift gave us with an interface - // that exists, is up, and has an address. + // that exists, is up, and has an address and is not the tunnel itself. if swiftIfName := lastKnownDefaultRouteIfName.Load(); swiftIfName != "" { - ifc := getInterfaceByName(swiftIfName) - if ifc != nil { + // Start by getting all available interfaces. + interfaces, err := netInterfaces() + if err != nil { + log.Printf("defaultroute_darwin: could not get interfaces: %v", err) + return d, err + } + + if ifc, err := getInterfaceByName(swiftIfName, interfaces); err == nil { d.InterfaceName = ifc.Name d.InterfaceIndex = ifc.Index return d, nil } } + err = errors.New("no os provided default route interface found") + return d, err +} - // Fallback to the BSD logic - idx, err := DefaultRouteInterfaceIndex() - if err != nil { - return d, err - } - iface, err := net.InterfaceByIndex(idx) - if err != nil { - return d, err +func getInterfaceByName(name string, interfaces []Interface) (*Interface, error) { + for _, ifc := range interfaces { + if ifc.Name != name { + continue + } + + if !ifc.IsUp() { + return nil, fmt.Errorf("defaultroute_darwin: %s is down", name) + } + + addrs, _ := ifc.Addrs() + if len(addrs) == 0 { + return nil, fmt.Errorf("defaultroute_darwin: %s has no addresses", name) + } + return &ifc, nil } - d.InterfaceName = iface.Name - d.InterfaceIndex = idx - return d, nil + return nil, errors.New("no interfaces found") } diff --git a/net/netmon/interfaces_darwin_test.go b/net/netmon/interfaces_darwin_test.go index d756d1334..c3d40a6f0 100644 --- a/net/netmon/interfaces_darwin_test.go +++ b/net/netmon/interfaces_darwin_test.go @@ -112,3 +112,25 @@ func TestFetchRoutingTable(t *testing.T) { } } } + +func TestUpdateLastKnownDefaultRouteInterface(t *testing.T) { + // Pick some interface on the machine + interfaces, err := netInterfaces() + if err != nil || len(interfaces) == 0 { + t.Fatalf("netInterfaces() error: %v", err) + } + + // Set it as our last known default route interface + ifName := interfaces[0].Name + UpdateLastKnownDefaultRouteInterface(ifName) + + // And make sure we can get it back + route, err := OSDefaultRoute() + if err != nil { + t.Fatalf("OSDefaultRoute() error: %v", err) + } + want, got := ifName, route.InterfaceName + if want != got { + t.Errorf("OSDefaultRoute() = %q, want %q", got, want) + } +} diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index ac5e89d76..f2ed16601 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -78,10 +78,38 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) return -1, errInterfaceStateInvalid } - if iface, ok := state.Interface[state.DefaultRouteInterface]; ok { - return iface.Index, nil + // Netmon's cached view of the default inteface + cachedIdx, ok := state.Interface[state.DefaultRouteInterface] + // OSes view (if available) of the default interface + osIf, osIferr := netmon.OSDefaultRoute() + + idx := -1 + errOut := errInterfaceStateInvalid + // Preferentially choose the OS's view of the default if index. Due to the way darwin sets the delegated + // interface on tunnel creation only, it is possible for netmon to have a stale view of the default and + // netmon's view is often temporarily wrong during network transitions, or for us to not have the + // the the oses view of the defaultIf yet. + if osIferr == nil { + idx = osIf.InterfaceIndex + errOut = nil + } else if ok { + idx = cachedIdx.Index + errOut = nil + } + + if osIferr == nil && ok && (osIf.InterfaceIndex != cachedIdx.Index) { + logf("netns: [unexpected] os default if %q (%d) != netmon cached if %q (%d)", osIf.InterfaceName, osIf.InterfaceIndex, cachedIdx.Name, cachedIdx.Index) + } + + // Sanity check to make sure we didn't pick the tailscale interface + if tsif, err2 := tailscaleInterface(); tsif != nil && err2 == nil && errOut == nil { + if tsif.Index == idx { + idx = -1 + errOut = errInterfaceStateInvalid + } } - return -1, errInterfaceStateInvalid + + return idx, errOut } useRoute := bindToInterfaceByRoute.Load() || bindToInterfaceByRouteEnv() @@ -100,7 +128,7 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) idx, err := interfaceIndexFor(addr, true /* canRecurse */) if err != nil { - logf("netns: error in interfaceIndexFor: %v", err) + logf("netns: error getting interface index for %q: %v", address, err) return defaultIdx() } @@ -108,10 +136,13 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) // if so, we fall back to binding from the default. tsif, err2 := tailscaleInterface() if err2 == nil && tsif != nil && tsif.Index == idx { - logf("[unexpected] netns: interfaceIndexFor returned Tailscale interface") + // note: with an exit node enabled, this is almost always true. defaultIdx() is the + // right thing to do here. return defaultIdx() } + logf("netns: completed success interfaceIndexFor(%s) = %d", address, idx) + return idx, err } From 192fa6f05d12cfadaa3044d57e0a74f2b9f46a55 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 15 Aug 2025 15:45:17 -0500 Subject: [PATCH 1198/1708] {cmd/dist,release/dist}: add support for intermediary QNAP signing certificates Updates #23528 Signed-off-by: Percy Wegmann --- cmd/dist/dist.go | 20 +++++++++++--------- release/dist/qnap/files/scripts/sign-qpkg.sh | 7 +++++-- release/dist/qnap/pkgs.go | 12 +++++++----- release/dist/qnap/targets.go | 15 ++++++++------- 4 files changed, 31 insertions(+), 23 deletions(-) diff --git a/cmd/dist/dist.go b/cmd/dist/dist.go index 038ced708..c7406298d 100644 --- a/cmd/dist/dist.go +++ b/cmd/dist/dist.go @@ -21,12 +21,13 @@ import ( ) var ( - synologyPackageCenter bool - gcloudCredentialsBase64 string - gcloudProject string - gcloudKeyring string - qnapKeyName string - qnapCertificateBase64 string + synologyPackageCenter bool + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + qnapKeyName string + qnapCertificateBase64 string + qnapCertificateIntermediariesBase64 string ) func getTargets() ([]dist.Target, error) { @@ -47,11 +48,11 @@ func getTargets() ([]dist.Target, error) { // To build for package center, run // ./tool/go run ./cmd/dist build --synology-package-center synology ret = append(ret, synology.Targets(synologyPackageCenter, nil)...) - qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64} + qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64, qnapCertificateIntermediariesBase64} if cmp.Or(qnapSigningArgs...) != "" && slices.Contains(qnapSigningArgs, "") { - return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name and --qnap-certificate must be set") + return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name, --qnap-certificate and --qnap-certificate-intermediaries must be set") } - ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64)...) + ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64, qnapCertificateIntermediariesBase64)...) return ret, nil } @@ -65,6 +66,7 @@ func main() { subcmd.FlagSet.StringVar(&gcloudKeyring, "gcloud-keyring", "", "path to keyring in GCP KMS (used when signing QNAP builds)") subcmd.FlagSet.StringVar(&qnapKeyName, "qnap-key-name", "", "name of GCP key to use when signing QNAP builds") subcmd.FlagSet.StringVar(&qnapCertificateBase64, "qnap-certificate", "", "base64 encoded certificate to use when signing QNAP builds") + subcmd.FlagSet.StringVar(&qnapCertificateIntermediariesBase64, "qnap-certificate-intermediaries", "", "base64 encoded intermediary certificate to use when signing QNAP builds") } } diff --git a/release/dist/qnap/files/scripts/sign-qpkg.sh b/release/dist/qnap/files/scripts/sign-qpkg.sh index b6b99a3b1..1dacb876f 100755 --- a/release/dist/qnap/files/scripts/sign-qpkg.sh +++ b/release/dist/qnap/files/scripts/sign-qpkg.sh @@ -24,7 +24,9 @@ MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEtfLbXkHUVc9oUPTNyaEK3hIwmuGRoTtd -----END PUBLIC KEY-----" > pkcs11-release-signing-key.pem openssl dgst -sha384 -verify pkcs11-release-signing-key.pem -signature "$PKCS11_MODULE_PATH.sig" "$PKCS11_MODULE_PATH" -echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > cert.crt +echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > signer.pem + +echo "$QNAP_SIGNING_CERT_INTERMEDIARIES_BASE64" | base64 --decode > certs.pem openssl cms \ -sign \ @@ -35,6 +37,7 @@ openssl cms \ -inkey "pkcs11:object=$QNAP_SIGNING_KEY_NAME" \ -keyopt rsa_padding_mode:pss \ -keyopt rsa_pss_saltlen:digest \ - -signer cert.crt \ + -signer signer.pem \ + -certfile certs.pem \ -in "$1" \ -out - diff --git a/release/dist/qnap/pkgs.go b/release/dist/qnap/pkgs.go index 7dc3b9495..5062011f0 100644 --- a/release/dist/qnap/pkgs.go +++ b/release/dist/qnap/pkgs.go @@ -27,11 +27,12 @@ type target struct { } type signer struct { - gcloudCredentialsBase64 string - gcloudProject string - gcloudKeyring string - keyName string - certificateBase64 string + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + keyName string + certificateBase64 string + certificateIntermediariesBase64 string } func (t *target) String() string { @@ -90,6 +91,7 @@ func (t *target) buildQPKG(b *dist.Build, qnapBuilds *qnapBuilds, inner *innerPk "-e", fmt.Sprintf("GCLOUD_KEYRING=%s", t.signer.gcloudKeyring), "-e", fmt.Sprintf("QNAP_SIGNING_KEY_NAME=%s", t.signer.keyName), "-e", fmt.Sprintf("QNAP_SIGNING_CERT_BASE64=%s", t.signer.certificateBase64), + "-e", fmt.Sprintf("QNAP_SIGNING_CERT_INTERMEDIARIES_BASE64=%s", t.signer.certificateIntermediariesBase64), "-e", fmt.Sprintf("QNAP_SIGNING_SCRIPT=%s", "/sign-qpkg.sh"), "-v", fmt.Sprintf("%s:/sign-qpkg.sh", filepath.Join(qnapBuilds.tmpDir, "files/scripts/sign-qpkg.sh")), ) diff --git a/release/dist/qnap/targets.go b/release/dist/qnap/targets.go index 1c1818a70..0a0213954 100644 --- a/release/dist/qnap/targets.go +++ b/release/dist/qnap/targets.go @@ -18,15 +18,16 @@ import ( // gcloudKeyring is the full path to the Google Cloud keyring containing the signing key. // keyName is the name of the key. // certificateBase64 is the PEM certificate to use in the signature, base64 encoded. -func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64 string) []dist.Target { +func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64, certificateIntermediariesBase64 string) []dist.Target { var signerInfo *signer - if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64}, "") { + if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64, certificateIntermediariesBase64}, "") { signerInfo = &signer{ - gcloudCredentialsBase64: gcloudCredentialsBase64, - gcloudProject: gcloudProject, - gcloudKeyring: gcloudKeyring, - keyName: keyName, - certificateBase64: certificateBase64, + gcloudCredentialsBase64: gcloudCredentialsBase64, + gcloudProject: gcloudProject, + gcloudKeyring: gcloudKeyring, + keyName: keyName, + certificateBase64: certificateBase64, + certificateIntermediariesBase64: certificateIntermediariesBase64, } } return []dist.Target{ From 6d45663dd4305b41c27354febe30b0b7dd0d273d Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Sat, 16 Aug 2025 09:42:25 -0400 Subject: [PATCH 1199/1708] cmd/derpprobe,prober: add run all probes handler (#16875) Add a Run all probes handler that executes all probes except those that are continuous or the derpmap probe. This is leveraged by other tooling to confirm DERP stability after a deploy. Updates tailscale/corp#27370 Signed-off-by: Mike O'Driscoll --- cmd/derpprobe/derpprobe.go | 1 + prober/prober.go | 64 +++++++++++++++++++- prober/prober_test.go | 118 +++++++++++++++++++++++++++++++++++++ 3 files changed, 182 insertions(+), 1 deletion(-) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 25159d649..5d2179b51 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -107,6 +107,7 @@ func main() { mux := http.NewServeMux() d := tsweb.Debugger(mux) d.Handle("probe-run", "Run a probe", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{Logf: log.Printf})) + d.Handle("probe-all", "Run all configured probes", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{Logf: log.Printf})) mux.Handle("/", tsweb.StdHandler(p.StatusHandler( prober.WithTitle("DERP Prober"), prober.WithPageLink("Prober metrics", "/debug/varz"), diff --git a/prober/prober.go b/prober/prober.go index 1237611f4..b69d26821 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -7,6 +7,7 @@ package prober import ( + "bytes" "cmp" "container/ring" "context" @@ -21,6 +22,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" "tailscale.com/syncs" "tailscale.com/tsweb" ) @@ -574,7 +576,67 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err) } w.WriteHeader(respStatus) - w.Write([]byte(fmt.Sprintf("Probe succeeded in %v\n%s", info.Latency, stats))) + fmt.Fprintf(w, "Probe succeeded in %v\n%s", info.Latency, stats) + return nil +} + +type RunHandlerAllResponse struct { + Results map[string]RunHandlerResponse +} + +func (p *Prober) RunAllHandler(w http.ResponseWriter, r *http.Request) error { + probes := make(map[string]*Probe) + p.mu.Lock() + for _, probe := range p.probes { + if !probe.IsContinuous() && probe.name != "derpmap-probe" { + probes[probe.name] = probe + } + } + p.mu.Unlock() + + // Do not abort running probes just because one of them has failed. + g := new(errgroup.Group) + + var resultsMu sync.Mutex + results := make(map[string]RunHandlerResponse) + + for name, probe := range probes { + g.Go(func() error { + probe.mu.Lock() + prevInfo := probe.probeInfoLocked() + probe.mu.Unlock() + + info, err := probe.run() + + resultsMu.Lock() + results[name] = RunHandlerResponse{ + ProbeInfo: info, + PreviousSuccessRatio: prevInfo.RecentSuccessRatio(), + PreviousMedianLatency: prevInfo.RecentMedianLatency(), + } + resultsMu.Unlock() + return err + }) + } + + respStatus := http.StatusOK + if err := g.Wait(); err != nil { + respStatus = http.StatusFailedDependency + } + + // Return serialized JSON response if the client requested JSON + resp := &RunHandlerAllResponse{ + Results: results, + } + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(resp); err != nil { + return tsweb.Error(http.StatusInternalServerError, "error encoding JSON response", err) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(respStatus) + w.Write(b.Bytes()) + return nil } diff --git a/prober/prober_test.go b/prober/prober_test.go index 21c975a73..7cb841936 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -639,6 +639,124 @@ func TestProberRunHandler(t *testing.T) { } +func TestRunAllHandler(t *testing.T) { + clk := newFakeTime() + + tests := []struct { + name string + probeFunc []func(context.Context) error + wantResponseCode int + wantJSONResponse RunHandlerAllResponse + wantPlaintextResponse string + }{ + { + name: "successProbe", + probeFunc: []func(context.Context) error{func(context.Context) error { return nil }, func(context.Context) error { return nil }}, + wantResponseCode: http.StatusOK, + wantJSONResponse: RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "successProbe-0": { + ProbeInfo: ProbeInfo{ + Name: "successProbe-0", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + "successProbe-1": { + ProbeInfo: ProbeInfo{ + Name: "successProbe-1", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + }, + }, + wantPlaintextResponse: "Probe successProbe-0: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\nProbe successProbe-1: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\n\n", + }, + { + name: "successAndFailureProbes", + probeFunc: []func(context.Context) error{func(context.Context) error { return nil }, func(context.Context) error { return fmt.Errorf("error2") }}, + wantResponseCode: http.StatusFailedDependency, + wantJSONResponse: RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "successAndFailureProbes-0": { + ProbeInfo: ProbeInfo{ + Name: "successAndFailureProbes-0", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + "successAndFailureProbes-1": { + ProbeInfo: ProbeInfo{ + Name: "successAndFailureProbes-1", + Interval: probeInterval, + Status: ProbeStatusFailed, + Error: "error2", + RecentResults: []bool{false, false}, + }, + }, + }, + }, + wantPlaintextResponse: "Probe successAndFailureProbes-0: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\nProbe successAndFailureProbes-1: failed\n\tLast run: 0s\n\tPrevious success rate: 0.0%\n\tPrevious median latency: 0s\n\n\tLast error: error2\n\n", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + for i, pfunc := range tc.probeFunc { + probe := p.Run(fmt.Sprintf("%s-%d", tc.name, i), probeInterval, nil, FuncProbe(pfunc)) + defer probe.Close() + <-probe.stopped // wait for the first run. + } + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/runall/", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/runall/", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != tc.wantResponseCode { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tc.wantResponseCode) + } + + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) + } + var gotJSON RunHandlerAllResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) + } + if diff := cmp.Diff(tc.wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } + + }) + } + +} + type fakeTicker struct { ch chan time.Time interval time.Duration From 9d9a70d81d87849971add8588dc47120db81bc9d Mon Sep 17 00:00:00 2001 From: Will Norris Date: Sun, 17 Aug 2025 08:26:59 -0700 Subject: [PATCH 1200/1708] client/systray: disable 'more settings' menu if backend not running Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 98c6156b8..b1bc45fa8 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -306,11 +306,14 @@ func (menu *Menu) rebuild() { menu.rebuildExitNodeMenu(ctx) } - if menu.status != nil { - menu.more = systray.AddMenuItem("More settings", "") + menu.more = systray.AddMenuItem("More settings", "") + if menu.status != nil && menu.status.BackendState == "Running" { + // web client is only available if backend is running onClick(ctx, menu.more, func(_ context.Context) { webbrowser.Open("http://100.100.100.100/") }) + } else { + menu.more.Disable() } // TODO(#15528): this menu item shouldn't be necessary at all, From 02f6030dbd3b48a30d5e33803eb04a8fcdce7856 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 21 Jul 2025 14:14:02 -0600 Subject: [PATCH 1201/1708] tool, tool/gocross: update gocross to support building natively on Windows and add a PowerShell Core wrapper script gocross-wrapper.ps1 is a PowerShell core script that is essentially a straight port of gocross-wrapper.sh. It requires PowerShell 7.4, which is the latest LTS release of PSCore. Why use PowerShell Core instead of Windows PowerShell? Essentially because the former is much better to script with and is the edition that is currently maintained. Because we're using PowerShell Core, but many people will be running scripts from a machine that only has Windows PowerShell, go.cmd has been updated to prompt the user for PowerShell core installation if necessary. gocross-wrapper.sh has also been updated to utilize the PSCore script when running under cygwin or msys. gocross itself required a couple of updates: We update gocross to output the PowerShell Core wrapper alongside the bash wrapper, which will propagate the revised scripts to other repos as necessary. We also fix a couple of things in gocross that didn't work on Windows: we change the toolchain resolution code to use os.UserHomeDir instead of directly referencing the HOME environment variable, and we fix a bug in the way arguments were being passed into exec.Command on non-Unix systems. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- .github/workflows/test.yml | 2 +- tool/go.cmd | 36 ++- tool/gocross/exec_other.go | 2 +- tool/gocross/gocross-wrapper.ps1 | 220 +++++++++++++++++++ tool/gocross/gocross-wrapper.sh | 6 + tool/gocross/gocross.go | 15 +- tool/gocross/gocross_wrapper_test.go | 2 +- tool/gocross/gocross_wrapper_windows_test.go | 25 +++ tool/gocross/toolchain.go | 10 +- 9 files changed, 310 insertions(+), 8 deletions(-) create mode 100644 tool/gocross/gocross-wrapper.ps1 create mode 100644 tool/gocross/gocross_wrapper_windows_test.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7ccb39869..fe7849af6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -177,7 +177,7 @@ jobs: run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: test all working-directory: src - run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} + run: NOBASHDEBUG=true NOPWSHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} env: GOARCH: ${{ matrix.goarch }} TS_TEST_SHARD: ${{ matrix.shard }} diff --git a/tool/go.cmd b/tool/go.cmd index 04172a28d..b7b5d0483 100644 --- a/tool/go.cmd +++ b/tool/go.cmd @@ -1,2 +1,36 @@ @echo off -powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go-win.ps1" %* +rem Checking for PowerShell Core using PowerShell for Windows... +powershell -NoProfile -NonInteractive -Command "& {Get-Command -Name pwsh -ErrorAction Stop}" > NUL +if ERRORLEVEL 1 ( + rem Ask the user whether they should install the dependencies. Note that this + rem code path never runs in CI because pwsh is always explicitly installed. + + rem Time out after 5 minutes, defaulting to 'N' + choice /c yn /t 300 /d n /m "PowerShell Core is required. Install now" + if ERRORLEVEL 2 ( + echo Aborting due to unmet dependencies. + exit /b 1 + ) + + rem Check for a .NET Core runtime using PowerShell for Windows... + powershell -NoProfile -NonInteractive -Command "& {if (-not (dotnet --list-runtimes | Select-String 'Microsoft\.NETCore\.App' -Quiet)) {exit 1}}" > NUL + rem Install .NET Core if missing to provide PowerShell Core's runtime library. + if ERRORLEVEL 1 ( + rem Time out after 5 minutes, defaulting to 'N' + choice /c yn /t 300 /d n /m "PowerShell Core requires .NET Core for its runtime library. Install now" + if ERRORLEVEL 2 ( + echo Aborting due to unmet dependencies. + exit /b 1 + ) + + winget install --accept-package-agreements --id Microsoft.DotNet.Runtime.8 -e --source winget + ) + + rem Now install PowerShell Core. + winget install --accept-package-agreements --id Microsoft.PowerShell -e --source winget + if ERRORLEVEL 0 echo Please re-run this script within a new console session to pick up PATH changes. + rem Either way we didn't build, so return 1. + exit /b 1 +) + +pwsh -NoProfile -ExecutionPolicy Bypass "%~dp0..\tool\gocross\gocross-wrapper.ps1" %* diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 8d4df0db3..7bce0c099 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -11,7 +11,7 @@ import ( ) func doExec(cmd string, args []string, env []string) error { - c := exec.Command(cmd, args...) + c := exec.Command(cmd, args[1:]...) c.Env = env c.Stdin = os.Stdin c.Stdout = os.Stdout diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 new file mode 100644 index 000000000..fcc010dce --- /dev/null +++ b/tool/gocross/gocross-wrapper.ps1 @@ -0,0 +1,220 @@ +# Copyright (c) Tailscale Inc & AUTHORS +# SPDX-License-Identifier: BSD-3-Clause + +#Requires -Version 7.4 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version 3.0 + +if (($Env:CI -eq 'true') -and ($Env:NOPWSHDEBUG -ne 'true')) { + Set-PSDebug -Trace 1 +} + +<# + .DESCRIPTION + Copies the script's $args variable into an array, which is easier to work with + when preparing to start child processes. +#> +function Copy-ScriptArgs { + $list = [System.Collections.Generic.List[string]]::new($Script:args.Count) + foreach ($arg in $Script:args) { + $list.Add($arg) + } + return $list.ToArray() +} + +<# + .DESCRIPTION + Copies the current environment into a hashtable, which is easier to work with + when preparing to start child processes. +#> +function Copy-Environment { + $result = @{} + foreach ($pair in (Get-Item -Path Env:)) { + $result[$pair.Key] = $pair.Value + } + return $result +} + +<# + .DESCRIPTION + Outputs the fully-qualified path to the repository's root directory. This + function expects to be run from somewhere within a git repository. + The directory containing the git executable must be somewhere in the PATH. +#> +function Get-RepoRoot { + Get-Command -Name 'git' | Out-Null + $repoRoot = & git rev-parse --show-toplevel + if ($LASTEXITCODE -ne 0) { + throw "failed obtaining repo root: git failed with code $LASTEXITCODE" + } + + # Git outputs a path containing forward slashes. Canonicalize. + return [System.IO.Path]::GetFullPath($repoRoot) +} + +<# + .DESCRIPTION + Runs the provided ScriptBlock in a child scope, restoring any changes to the + current working directory once the script block completes. +#> +function Start-ChildScope { + param ( + [Parameter(Mandatory = $true)] + [ScriptBlock]$ScriptBlock + ) + + $initialLocation = Get-Location + try { + Invoke-Command -ScriptBlock $ScriptBlock + } + finally { + Set-Location -Path $initialLocation + } +} + +<# + .SYNOPSIS + Write-Output with timestamps prepended to each line. +#> +function Write-Log { + param ($message) + $timestamp = (Get-Date).ToString('yyyy-MM-dd HH:mm:ss') + Write-Output "$timestamp - $message" +} + +$bootstrapScriptBlock = { + + $repoRoot = Get-RepoRoot + + Set-Location -LiteralPath $repoRoot + + switch -Wildcard -File .\go.toolchain.rev { + "/*" { $toolchain = $_ } + default { + $rev = $_ + $tsgo = Join-Path $Env:USERPROFILE '.cache' 'tsgo' + $toolchain = Join-Path $tsgo $rev + if (-not (Test-Path -LiteralPath "$toolchain.extracted" -PathType Leaf -ErrorAction SilentlyContinue)) { + New-Item -Force -Path $tsgo -ItemType Directory | Out-Null + Remove-Item -Force -Recurse -LiteralPath $toolchain -ErrorAction SilentlyContinue + Write-Log "Downloading Go toolchain $rev" + + # Values from https://web.archive.org/web/20250227081443/https://learn.microsoft.com/en-us/dotnet/api/system.runtime.interopservices.architecture?view=net-9.0 + $cpuArch = ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture | Out-String -NoNewline) + # Comparison in switch is case-insensitive by default. + switch ($cpuArch) { + 'x86' { $goArch = '386' } + 'x64' { $goArch = 'amd64' } + default { $goArch = $cpuArch } + } + + Invoke-WebRequest -Uri "https://github.com/tailscale/go/releases/download/build-$rev/windows-$goArch.tar.gz" -OutFile "$toolchain.tar.gz" + try { + New-Item -Force -Path $toolchain -ItemType Directory | Out-Null + Start-ChildScope -ScriptBlock { + Set-Location -LiteralPath $toolchain + tar --strip-components=1 -xf "$toolchain.tar.gz" + if ($LASTEXITCODE -ne 0) { + throw "tar failed with exit code $LASTEXITCODE" + } + } + $rev | Out-File -FilePath "$toolchain.extracted" + } + finally { + Remove-Item -Force "$toolchain.tar.gz" -ErrorAction Continue + } + + # Cleanup old toolchains. + $maxDays = 90 + $oldFiles = Get-ChildItem -Path $tsgo -Filter '*.extracted' -File -Recurse -Depth 1 | Where-Object { $_.LastWriteTime -lt (Get-Date).AddDays(-$maxDays) } + foreach ($file in $oldFiles) { + Write-Log "Cleaning up old Go toolchain $($file.Basename)" + Remove-Item -LiteralPath $file.FullName -Force -ErrorAction Continue + $dirName = Join-Path $file.DirectoryName $file.Basename -Resolve -ErrorAction Continue + if ($dirName -and (Test-Path -LiteralPath $dirName -PathType Container -ErrorAction Continue)) { + Remove-Item -LiteralPath $dirName -Recurse -Force -ErrorAction Continue + } + } + } + } + } + + if ($Env:TS_USE_GOCROSS -ne '1') { + return + } + + if (Test-Path -LiteralPath $toolchain -PathType Container -ErrorAction SilentlyContinue) { + $goMod = Join-Path $repoRoot 'go.mod' -Resolve + $goLine = Get-Content -LiteralPath $goMod | Select-String -Pattern '^go (.*)$' -List + $wantGoMinor = $goLine.Matches.Groups[1].Value.split('.')[1] + $versionFile = Join-Path $toolchain 'VERSION' + if (Test-Path -LiteralPath $versionFile -PathType Leaf -ErrorAction SilentlyContinue) { + try { + $haveGoMinor = ((Get-Content -LiteralPath $versionFile -TotalCount 1).split('.')[1]) -replace 'rc.*', '' + } + catch { + } + } + + if ([string]::IsNullOrEmpty($haveGoMinor) -or ($haveGoMinor -lt $wantGoMinor)) { + Remove-Item -Force -Recurse -LiteralPath $toolchain -ErrorAction Continue + Remove-Item -Force -LiteralPath "$toolchain.extracted" -ErrorAction Continue + } + } + + $wantVer = & git rev-parse HEAD + $gocrossOk = $false + $gocrossPath = '.\gocross.exe' + if (Get-Command -Name $gocrossPath -CommandType Application -ErrorAction SilentlyContinue) { + $gotVer = & $gocrossPath gocross-version 2> $null + if ($gotVer -eq $wantVer) { + $gocrossOk = $true + } + } + + if (-not $gocrossOk) { + $goBuildEnv = Copy-Environment + $goBuildEnv['CGO_ENABLED'] = '0' + $goBuildEnv.Remove('GOOS') + $goBuildEnv.Remove('GOARCH') + $goBuildEnv.Remove('GO111MODULE') + $goBuildEnv.Remove('GOROOT') + + $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -Wait -PassThru + if ($proc.ExitCode -ne 0) { + throw 'error building gocross' + } + } + +} # bootstrapScriptBlock + +Start-ChildScope -ScriptBlock $bootstrapScriptBlock + +$repoRoot = Get-RepoRoot + +$execEnv = Copy-Environment +$execEnv.Remove('GOROOT') + +$argList = Copy-ScriptArgs + +if ($Env:TS_USE_GOCROSS -ne '1') { + $revFile = Join-Path $repoRoot 'go.toolchain.rev' -Resolve + switch -Wildcard -File $revFile { + "/*" { $toolchain = $_ } + default { + $rev = $_ + $tsgo = Join-Path $Env:USERPROFILE '.cache' 'tsgo' + $toolchain = Join-Path $tsgo $rev -Resolve + } + } + + $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -Wait -PassThru + exit $proc.ExitCode +} + +$procExe = Join-Path $repoRoot 'gocross.exe' -Resolve +$proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -Wait -PassThru +exit $proc.ExitCode diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 90485d31b..d93b137aa 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -15,6 +15,12 @@ if [[ "${CI:-}" == "true" && "${NOBASHDEBUG:-}" != "true" ]]; then set -x fi +if [[ "${OSTYPE:-}" == "cygwin" || "${OSTYPE:-}" == "msys" ]]; then + hash pwsh 2>/dev/null || { echo >&2 "This operation requires PowerShell Core."; exit 1; } + pwsh -NoProfile -ExecutionPolicy Bypass "${BASH_SOURCE%/*}/gocross-wrapper.ps1" "$@" + exit +fi + # Locate a bootstrap toolchain and (re)build gocross if necessary. We run all of # this in a subshell because posix shell semantics make it very easy to # accidentally mutate the input environment that will get passed to gocross at diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index d14ea0388..6d5d06aeb 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -16,6 +16,7 @@ import ( "os" "path/filepath" "runtime/debug" + "strings" "tailscale.com/atomicfile" ) @@ -68,8 +69,13 @@ func main() { fmt.Fprintf(os.Stderr, "usage: gocross write-wrapper-script \n") os.Exit(1) } - if err := atomicfile.WriteFile(os.Args[2], wrapperScript, 0755); err != nil { - fmt.Fprintf(os.Stderr, "writing wrapper script: %v\n", err) + if err := atomicfile.WriteFile(os.Args[2], wrapperScriptBash, 0755); err != nil { + fmt.Fprintf(os.Stderr, "writing bash wrapper script: %v\n", err) + os.Exit(1) + } + psFileName := strings.TrimSuffix(os.Args[2], filepath.Ext(os.Args[2])) + ".ps1" + if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0755); err != nil { + fmt.Fprintf(os.Stderr, "writing PowerShell wrapper script: %v\n", err) os.Exit(1) } os.Exit(0) @@ -112,7 +118,10 @@ func main() { } //go:embed gocross-wrapper.sh -var wrapperScript []byte +var wrapperScriptBash []byte + +//go:embed gocross-wrapper.ps1 +var wrapperScriptPowerShell []byte func debugf(format string, args ...any) { debug := os.Getenv("GOCROSS_DEBUG") diff --git a/tool/gocross/gocross_wrapper_test.go b/tool/gocross/gocross_wrapper_test.go index f4dcec429..6937ccec7 100644 --- a/tool/gocross/gocross_wrapper_test.go +++ b/tool/gocross/gocross_wrapper_test.go @@ -21,7 +21,7 @@ func TestGocrossWrapper(t *testing.T) { t.Fatalf("gocross-wrapper.sh failed: %v\n%s", err, out) } if i > 0 && !strings.Contains(string(out), "gocross_ok=1\n") { - t.Errorf("expected to find 'gocross-ok=1'; got output:\n%s", out) + t.Errorf("expected to find 'gocross_ok=1'; got output:\n%s", out) } } } diff --git a/tool/gocross/gocross_wrapper_windows_test.go b/tool/gocross/gocross_wrapper_windows_test.go new file mode 100644 index 000000000..aa4277425 --- /dev/null +++ b/tool/gocross/gocross_wrapper_windows_test.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestGocrossWrapper(t *testing.T) { + for i := range 2 { // once to build gocross; second to test it's cached + cmd := exec.Command("pwsh", "-NoProfile", "-ExecutionPolicy", "Bypass", ".\\gocross-wrapper.ps1", "version") + cmd.Env = append(os.Environ(), "CI=true", "NOPWSHDEBUG=false", "TS_USE_GOCROSS=1") // for Set-PSDebug verbosity + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("gocross-wrapper.ps1 failed: %v\n%s", err, out) + } + if i > 0 && !strings.Contains(string(out), "$gocrossOk = $true\r\n") { + t.Errorf("expected to find '$gocrossOk = $true'; got output:\n%s", out) + } + } +} diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index f422e289e..9cf7f892b 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -60,7 +60,15 @@ func getToolchain() (toolchainDir, gorootDir string, err error) { return "", "", err } - cache := filepath.Join(os.Getenv("HOME"), ".cache") + homeDir, err := os.UserHomeDir() + if err != nil { + return "", "", err + } + + // We use ".cache" instead of os.UserCacheDir for legacy reasons and we + // don't want to break that on platforms where the latter returns a different + // result. + cache := filepath.Join(homeDir, ".cache") toolchainDir = filepath.Join(cache, "tsgo", rev) gorootDir = filepath.Join(cache, "tsgoroot", rev) From 55698c8511cb2e52fb41fa89dd71093a02cdff93 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 18 Aug 2025 10:56:17 -0700 Subject: [PATCH 1202/1708] ipn/localapi: plumb an event bus through the localapi.Handler (#16892) Some of the operations of the local API need an event bus to correctly instantiate other components (notably including the portmapper). This commit adds that, and as the parameter list is starting to get a bit long and hard to read, I took the opportunity to move the arguments to a config type. Only a few call sites needed to be updated and this API is not intended for general use, so I did not bother to stage the change. Updates #15160 Updates #16842 Change-Id: I7b057d71161bd859f5acb96e2f878a34c85be0ef Signed-off-by: M. J. Fromberger --- ipn/ipnserver/server.go | 8 +++++++- ipn/localapi/localapi.go | 25 ++++++++++++++++++++++--- net/portmapper/portmapper.go | 7 ++----- tsnet/tsnet.go | 16 ++++++++++++++-- 4 files changed, 45 insertions(+), 11 deletions(-) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index a7ded9c00..fdbd82b0b 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -199,7 +199,13 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { ci = actorWithAccessOverride(actor, string(reason)) } - lah := localapi.NewHandler(ci, lb, s.logf, s.backendLogID) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ci, + Backend: lb, + Logf: s.logf, + LogID: s.backendLogID, + EventBus: lb.Sys().Bus.Get(), + }) if actor, ok := ci.(*actor); ok { lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) lah.PermitCert = actor.CanFetchCerts() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 0acc5a65f..a199a2908 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -172,9 +172,26 @@ var ( metrics = map[string]*clientmetric.Metric{} ) -// NewHandler creates a new LocalAPI HTTP handler. All parameters are required. -func NewHandler(actor ipnauth.Actor, b *ipnlocal.LocalBackend, logf logger.Logf, logID logid.PublicID) *Handler { - return &Handler{Actor: actor, b: b, logf: logf, backendLogID: logID, clock: tstime.StdClock{}} +// NewHandler creates a new LocalAPI HTTP handler from the given config. +func NewHandler(cfg HandlerConfig) *Handler { + return &Handler{ + Actor: cfg.Actor, + b: cfg.Backend, + logf: cfg.Logf, + backendLogID: cfg.LogID, + clock: tstime.StdClock{}, + eventBus: cfg.EventBus, + } +} + +// HandlerConfig carries the settings for a local API handler. +// All fields are required. +type HandlerConfig struct { + Actor ipnauth.Actor + Backend *ipnlocal.LocalBackend + Logf logger.Logf + LogID logid.PublicID + EventBus *eventbus.Bus } type Handler struct { @@ -203,6 +220,7 @@ type Handler struct { logf logger.Logf backendLogID logid.PublicID clock tstime.Clock + eventBus *eventbus.Bus // read-only after initialization } func (h *Handler) Logf(format string, args ...any) { @@ -850,6 +868,7 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { NetMon: h.b.NetMon(), DebugKnobs: debugKnobs, ControlKnobs: h.b.ControlKnobs(), + EventBus: h.eventBus, OnChange: func() { logf("portmapping changed.") logf("have mapping: %v", c.HaveMapping()) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 30535157c..a1ab86815 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -209,11 +209,8 @@ func (m *pmpMapping) Release(ctx context.Context) { // Config carries the settings for a [Client]. type Config struct { - // EventBus, if non-nil, is used for event publication and subscription by - // portmapper clients created from this config. - // - // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to - // become required non-nil. + // EventBus, which must be non-nil, is used for event publication and + // subscription by portmapper clients created from this config. EventBus *eventbus.Bus // Logf is called to generate text logs for the client. If nil, logger.Discard is used. diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 2715917a2..06709bf8b 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -274,7 +274,13 @@ func (s *Server) Loopback() (addr string, proxyCred, localAPICred string, err er // out the CONNECT code from tailscaled/proxy.go that uses // httputil.ReverseProxy and adding auth support. go func() { - lah := localapi.NewHandler(ipnauth.Self, s.lb, s.logf, s.logid) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ipnauth.Self, + Backend: s.lb, + Logf: s.logf, + LogID: s.logid, + EventBus: s.sys.Bus.Get(), + }) lah.PermitWrite = true lah.PermitRead = true lah.RequiredPassword = s.localAPICred @@ -676,7 +682,13 @@ func (s *Server) start() (reterr error) { go s.printAuthURLLoop() // Run the localapi handler, to allow fetching LetsEncrypt certs. - lah := localapi.NewHandler(ipnauth.Self, lb, tsLogf, s.logid) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ipnauth.Self, + Backend: lb, + Logf: tsLogf, + LogID: s.logid, + EventBus: sys.Bus.Get(), + }) lah.PermitWrite = true lah.PermitRead = true From d92789affa7f46cf52f94f0d5ed36a9095ece00b Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 18 Aug 2025 14:33:02 -0600 Subject: [PATCH 1203/1708] tool/gocross: don't set executable bits on PowerShell script Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/gocross.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index 6d5d06aeb..c71012d73 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -74,7 +74,7 @@ func main() { os.Exit(1) } psFileName := strings.TrimSuffix(os.Args[2], filepath.Ext(os.Args[2])) + ".ps1" - if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0755); err != nil { + if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0644); err != nil { fmt.Fprintf(os.Stderr, "writing PowerShell wrapper script: %v\n", err) os.Exit(1) } From 84472167dd9cf0a9e1c3c911ac91df91a3ce6de8 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 18 Aug 2025 15:27:16 -0600 Subject: [PATCH 1204/1708] tool/gocross: fix environment variable clearing in gocross-wrapper.ps1 The -Environment argument to Start-Process is essentially being treated as a delta; removing a particular variable from the argument's hash table does not indicate to delete. Instead we must set the value of each unwanted variable to $null. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/gocross-wrapper.ps1 | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 index fcc010dce..fe0b46996 100644 --- a/tool/gocross/gocross-wrapper.ps1 +++ b/tool/gocross/gocross-wrapper.ps1 @@ -176,10 +176,13 @@ $bootstrapScriptBlock = { if (-not $gocrossOk) { $goBuildEnv = Copy-Environment $goBuildEnv['CGO_ENABLED'] = '0' - $goBuildEnv.Remove('GOOS') - $goBuildEnv.Remove('GOARCH') - $goBuildEnv.Remove('GO111MODULE') - $goBuildEnv.Remove('GOROOT') + # Start-Process's -Environment arg applies diffs, so instead of removing + # these variables from $goBuildEnv, we must set them to $null to indicate + # that they should be cleared. + $goBuildEnv['GOOS'] = $null + $goBuildEnv['GOARCH'] = $null + $goBuildEnv['GO111MODULE'] = $null + $goBuildEnv['GOROOT'] = $null $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -Wait -PassThru @@ -195,7 +198,10 @@ Start-ChildScope -ScriptBlock $bootstrapScriptBlock $repoRoot = Get-RepoRoot $execEnv = Copy-Environment -$execEnv.Remove('GOROOT') +# Start-Process's -Environment arg applies diffs, so instead of removing +# these variables from $execEnv, we must set them to $null to indicate +# that they should be cleared. +$execEnv['GOROOT'] = $null $argList = Copy-ScriptArgs From e4031daa086e4a6dce10ab0ffdca1e32b889320c Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 19 Aug 2025 10:46:07 -0400 Subject: [PATCH 1205/1708] .github/Makefile/flake: update nix flake support (#16636) Cleanup nix support, make flake easier to read with nix-systems. This also harmonizes with golinks flake setup and reduces an input dependency by 1. Update deps test to ensure the vendor hash stays harmonized with go.mod. Update make tidy to ensure vendor hash stays current. Overlay the current version of golang, tailscale runs recent releases faster than nixpkgs can update them into the unstable branch. Updates #16637 Signed-off-by: Mike O'Driscoll --- .github/workflows/test.yml | 4 +- Makefile | 3 +- flake.lock | 22 +--------- flake.nix | 90 +++++++++++++++++++++++--------------- go.mod.sri | 2 +- shell.nix | 2 +- 6 files changed, 62 insertions(+), 61 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fe7849af6..17e08ae9d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -720,10 +720,10 @@ jobs: - name: check that 'go mod tidy' is clean working-directory: src run: | - ./tool/go mod tidy + make tidy echo echo - git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1) + git diff --name-only --exit-code || (echo "Please run 'make tidy'"; exit 1) licenses: runs-on: ubuntu-24.04 diff --git a/Makefile b/Makefile index 0a7fc28dd..532bded94 100644 --- a/Makefile +++ b/Makefile @@ -8,8 +8,9 @@ PLATFORM ?= "flyio" ## flyio==linux/amd64. Set to "" to build all platforms. vet: ## Run go vet ./tool/go vet ./... -tidy: ## Run go mod tidy +tidy: ## Run go mod tidy and update nix flake hashes ./tool/go mod tidy + ./update-flake.sh lint: ## Run golangci-lint ./tool/go run github.com/golangci/golangci-lint/cmd/golangci-lint run diff --git a/flake.lock b/flake.lock index 87f234e3e..1623342c6 100644 --- a/flake.lock +++ b/flake.lock @@ -16,24 +16,6 @@ "type": "github" } }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "nixpkgs": { "locked": { "lastModified": 1753151930, @@ -53,8 +35,8 @@ "root": { "inputs": { "flake-compat": "flake-compat", - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "systems": "systems" } }, "systems": { diff --git a/flake.nix b/flake.nix index 17d263a8d..311c422fb 100644 --- a/flake.nix +++ b/flake.nix @@ -32,7 +32,7 @@ { inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-utils.url = "github:numtide/flake-utils"; + systems.url = "github:nix-systems/default"; # Used by shell.nix as a compat shim. flake-compat = { url = "github:edolstra/flake-compat"; @@ -43,13 +43,29 @@ outputs = { self, nixpkgs, - flake-utils, + systems, flake-compat, }: let - # tailscaleRev is the git commit at which this flake was imported, - # or the empty string when building from a local checkout of the - # tailscale repo. + go124Version = "1.24.6"; + goHash = "sha256-4ctVgqq1iGaLwEwH3hhogHD2uMmyqvNh+CHhm9R8/b0="; + eachSystem = f: + nixpkgs.lib.genAttrs (import systems) (system: + f (import nixpkgs { + system = system; + overlays = [ + (final: prev: { + go_1_24 = prev.go_1_24.overrideAttrs { + version = go124Version; + src = prev.fetchurl { + url = "https://go.dev/dl/go${go124Version}.src.tar.gz"; + hash = goHash; + }; + }; + }) + ]; + })); tailscaleRev = self.rev or ""; + in { # tailscale takes a nixpkgs package set, and builds Tailscale from # the same commit as this flake. IOW, it provides "tailscale built # from HEAD", where HEAD is "whatever commit you imported the @@ -67,16 +83,20 @@ # So really, this flake is for tailscale devs to dogfood with, if # you're an end user you should be prepared for this flake to not # build periodically. - tailscale = pkgs: - pkgs.buildGo124Module rec { + packages = eachSystem (pkgs: rec { + default = pkgs.buildGo124Module { name = "tailscale"; - + pname = "tailscale"; src = ./.; vendorHash = pkgs.lib.fileContents ./go.mod.sri; - nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [pkgs.makeWrapper]; + nativeBuildInputs = [pkgs.makeWrapper pkgs.installShellFiles]; ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"]; env.CGO_ENABLED = 0; - subPackages = ["cmd/tailscale" "cmd/tailscaled"]; + subPackages = [ + "cmd/tailscale" + "cmd/tailscaled" + "cmd/tsidp" + ]; doCheck = false; # NOTE: We strip the ${PORT} and $FLAGS because they are unset in the @@ -84,32 +104,31 @@ # point, there should be a NixOS module that allows configuration of these # things, but for now, we hardcode the default of port 41641 (taken from # ./cmd/tailscaled/tailscaled.defaults). - postInstall = pkgs.lib.optionalString pkgs.stdenv.isLinux '' - wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} - wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} + postInstall = + pkgs.lib.optionalString pkgs.stdenv.isLinux '' + wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} + wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} - sed -i \ - -e "s#/usr/sbin#$out/bin#" \ - -e "/^EnvironmentFile/d" \ - -e 's/''${PORT}/41641/' \ - -e 's/$FLAGS//' \ - ./cmd/tailscaled/tailscaled.service + sed -i \ + -e "s#/usr/sbin#$out/bin#" \ + -e "/^EnvironmentFile/d" \ + -e 's/''${PORT}/41641/' \ + -e 's/$FLAGS//' \ + ./cmd/tailscaled/tailscaled.service - install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service - ''; + install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service + '' + + pkgs.lib.optionalString (pkgs.stdenv.buildPlatform.canExecute pkgs.stdenv.hostPlatform) '' + installShellCompletion --cmd tailscale \ + --bash <($out/bin/tailscale completion bash) \ + --fish <($out/bin/tailscale completion fish) \ + --zsh <($out/bin/tailscale completion zsh) + ''; }; + tailscale = default; + }); - # This whole blob makes the tailscale package available for all - # OS/CPU combos that nix supports, as well as a dev shell so that - # "nix develop" and "nix-shell" give you a dev env. - flakeForSystem = nixpkgs: system: let - pkgs = nixpkgs.legacyPackages.${system}; - ts = tailscale pkgs; - in { - packages = { - default = ts; - tailscale = ts; - }; + devShells = eachSystem (pkgs: { devShell = pkgs.mkShell { packages = with pkgs; [ curl @@ -126,9 +145,8 @@ e2fsprogs ]; }; - }; - in - flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); + }); + }; } -# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= +# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= diff --git a/go.mod.sri b/go.mod.sri index 845086191..34e9a57de 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= +sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= diff --git a/shell.nix b/shell.nix index 2eb5b441a..9dfdf4935 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= +# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= From 2581e387899413e9933d28101a1a3707331f0327 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 19 Aug 2025 12:13:55 -0400 Subject: [PATCH 1206/1708] prober: update runall handler to be generic (#16895) Update the runall handler to be more generic with an exclude param to exclude multiple probes as the requesters definition. Updates tailscale/corp#27370 Signed-off-by: Mike O'Driscoll --- prober/prober.go | 5 +++- prober/prober_test.go | 69 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 2 deletions(-) diff --git a/prober/prober.go b/prober/prober.go index b69d26821..9c494c3c9 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -18,6 +18,7 @@ import ( "maps" "math/rand" "net/http" + "slices" "sync" "time" @@ -585,10 +586,12 @@ type RunHandlerAllResponse struct { } func (p *Prober) RunAllHandler(w http.ResponseWriter, r *http.Request) error { + excluded := r.URL.Query()["exclude"] + probes := make(map[string]*Probe) p.mu.Lock() for _, probe := range p.probes { - if !probe.IsContinuous() && probe.name != "derpmap-probe" { + if !probe.IsContinuous() && !slices.Contains(excluded, probe.name) { probes[probe.name] = probe } } diff --git a/prober/prober_test.go b/prober/prober_test.go index 7cb841936..15db21a5e 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -11,6 +11,7 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" "strings" "sync" "sync/atomic" @@ -722,7 +723,7 @@ func TestRunAllHandler(t *testing.T) { mux.Handle("/prober/runall/", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) - req, err := http.NewRequest("GET", server.URL+"/prober/runall/", nil) + req, err := http.NewRequest("GET", server.URL+"/prober/runall", nil) if err != nil { t.Fatalf("failed to create request: %v", err) } @@ -757,6 +758,72 @@ func TestRunAllHandler(t *testing.T) { } +func TestExcludeInRunAll(t *testing.T) { + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + + wantJSONResponse := RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "includedProbe": { + ProbeInfo: ProbeInfo{ + Name: "includedProbe", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + }, + } + + p.Run("includedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + p.Run("excludedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + p.Run("excludedOtherProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/runall", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/runall", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + // Exclude probes with "excluded" in their name + req.URL.RawQuery = url.Values{ + "exclude": []string{"excludedProbe", "excludedOtherProbe"}, + }.Encode() + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != http.StatusOK { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, http.StatusOK) + } + + var gotJSON RunHandlerAllResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) + } + + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) + } + + if diff := cmp.Diff(wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } +} + type fakeTicker struct { ch chan time.Time interval time.Duration From b28699cd316e339c86e1f0a4751ed7021db3c787 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Tue, 19 Aug 2025 12:05:41 -0400 Subject: [PATCH 1207/1708] types/views: add min/max helpers to views.Slice This has come up in a few situations recently and adding these helpers is much better than copying the slice (calling AsSlice()) in order to use slices.Max and friends. Updates #cleanup Change-Id: Ib289a07d23c3687220c72c4ce341b9695cd875bf Signed-off-by: Adrian Dewhurst --- types/views/views.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/types/views/views.go b/types/views/views.go index 6d15b80d4..252f126a7 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -7,6 +7,7 @@ package views import ( "bytes" + "cmp" jsonv1 "encoding/json" "errors" "fmt" @@ -363,6 +364,20 @@ func (v Slice[T]) ContainsFunc(f func(T) bool) bool { return slices.ContainsFunc(v.ж, f) } +// MaxFunc returns the maximal value in v, using cmp to compare elements. It +// panics if v is empty. If there is more than one maximal element according to +// the cmp function, MaxFunc returns the first one. See also [slices.MaxFunc]. +func (v Slice[T]) MaxFunc(cmp func(a, b T) int) T { + return slices.MaxFunc(v.ж, cmp) +} + +// MinFunc returns the minimal value in v, using cmp to compare elements. It +// panics if v is empty. If there is more than one minimal element according to +// the cmp function, MinFunc returns the first one. See also [slices.MinFunc]. +func (v Slice[T]) MinFunc(cmp func(a, b T) int) T { + return slices.MinFunc(v.ж, cmp) +} + // AppendStrings appends the string representation of each element in v to dst. func AppendStrings[T fmt.Stringer](dst []string, v Slice[T]) []string { for _, x := range v.ж { @@ -383,6 +398,20 @@ func SliceEqual[T comparable](a, b Slice[T]) bool { return slices.Equal(a.ж, b.ж) } +// SliceMax returns the maximal value in v. It panics if v is empty. For +// floating point T, SliceMax propagates NaNs (any NaN value in v forces the +// output to be NaN). See also [slices.Max]. +func SliceMax[T cmp.Ordered](v Slice[T]) T { + return slices.Max(v.ж) +} + +// SliceMin returns the minimal value in v. It panics if v is empty. For +// floating point T, SliceMin propagates NaNs (any NaN value in v forces the +// output to be NaN). See also [slices.Min]. +func SliceMin[T cmp.Ordered](v Slice[T]) T { + return slices.Min(v.ж) +} + // shortOOOLen (short Out-of-Order length) is the slice length at or // under which we attempt to compare two slices quadratically rather // than allocating memory for a map in SliceEqualAnyOrder and From 5c560d748903b06713a0506b4f1f6f58aa273973 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Fri, 15 Aug 2025 13:29:56 -0700 Subject: [PATCH 1208/1708] tsconsensus: check for bootstrap error We have been unintentionally ignoring errors from calling bootstrap. bootstrap sometimes calls raft.BootstrapCluster which sometimes returns a safe to ignore error, handle that case appropriately. Updates #14667 Signed-off-by: Fran Bull --- tsconsensus/tsconsensus.go | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index b6bf37310..53a2c3f54 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -209,7 +209,21 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin } c.raft = r - c.bootstrap(auth.AllowedPeers()) + // we may already be in a consensus (see comment above before startRaft) but we're going to + // try to bootstrap anyway in case this is a fresh start. + err = c.bootstrap(auth.AllowedPeers()) + if err != nil { + if errors.Is(err, raft.ErrCantBootstrap) { + // Raft cluster state can be persisted, if we try to call raft.BootstrapCluster when + // we already have cluster state it will return raft.ErrCantBootstrap. It's safe to + // ignore (according to the comment in the raft code), and we can expect that the other + // nodes of the cluster will become available at some point and we can get back into the + // consensus. + log.Print("Bootstrap: raft has cluster state, waiting for peers") + } else { + return nil, err + } + } if cfg.ServeDebugMonitor { srv, err = serveMonitor(&c, ts, netip.AddrPortFrom(c.self.hostAddr, cfg.MonitorPort).String()) @@ -292,9 +306,9 @@ type Consensus struct { // bootstrap tries to join a raft cluster, or start one. // // We need to do the very first raft cluster configuration, but after that raft manages it. -// bootstrap is called at start up, and we are not currently aware of what the cluster config might be, +// bootstrap is called at start up, and we may not currently be aware of what the cluster config might be, // our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and -// if unsuccessful, assume we are the first and start our own. +// if unsuccessful, assume we are the first and try to start our own. // // It's possible for bootstrap to return an error, or start a errant breakaway cluster. // From d4b720012987f67a3b3a636a33f89c446590c467 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 19 Aug 2025 14:44:39 -0700 Subject: [PATCH 1209/1708] net/udprelay: use batching.Conn (#16866) This significantly improves throughput of a peer relay server on Linux. Server.packetReadLoop no longer passes sockets down the stack. Instead, packet handling methods return a netip.AddrPort and []byte, which packetReadLoop gathers together for eventual batched writes on the appropriate socket(s). Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 2 +- net/batching/conn.go | 1 - net/batching/conn_default.go | 2 + net/batching/conn_linux.go | 6 +- net/batching/conn_linux_test.go | 4 +- net/udprelay/server.go | 201 +++++++++++++++++++++++--------- 6 files changed, 153 insertions(+), 63 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 07f5958ca..e60c1cb04 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -311,7 +311,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ - 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ diff --git a/net/batching/conn.go b/net/batching/conn.go index 2c6100258..77cdf8c84 100644 --- a/net/batching/conn.go +++ b/net/batching/conn.go @@ -32,7 +32,6 @@ type Conn interface { // message may fall on either side of a nonzero. // // Each [ipv6.Message.OOB] must be sized to at least MinControlMessageSize(). - // len(msgs) must be at least MinReadBatchMsgsLen(). ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) // WriteBatchTo writes buffs to addr. // diff --git a/net/batching/conn_default.go b/net/batching/conn_default.go index ed5c494f3..37d644f50 100644 --- a/net/batching/conn_default.go +++ b/net/batching/conn_default.go @@ -19,3 +19,5 @@ var controlMessageSize = 0 func MinControlMessageSize() int { return controlMessageSize } + +const IdealBatchSize = 1 diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 09a80ed9f..7f6c4ed42 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -384,7 +384,7 @@ func setGSOSizeInControl(control *[]byte, gsoSize uint16) { } // TryUpgradeToConn probes the capabilities of the OS and pconn, and upgrades -// pconn to a [Conn] if appropriate. A batch size of MinReadBatchMsgsLen() is +// pconn to a [Conn] if appropriate. A batch size of [IdealBatchSize] is // suggested for the best performance. func TryUpgradeToConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { if runtime.GOOS != "linux" { @@ -457,6 +457,4 @@ func MinControlMessageSize() int { return controlMessageSize } -func MinReadBatchMsgsLen() int { - return 128 -} +const IdealBatchSize = 128 diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index e33ad6d7a..e518c3f9f 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -310,7 +310,7 @@ func TestMinReadBatchMsgsLen(t *testing.T) { // So long as magicsock uses [Conn], and [wireguard-go/conn.Bind] API is // shaped for wireguard-go to control packet memory, these values should be // aligned. - if MinReadBatchMsgsLen() != conn.IdealBatchSize { - t.Fatalf("MinReadBatchMsgsLen():%d != conn.IdealBatchSize(): %d", MinReadBatchMsgsLen(), conn.IdealBatchSize) + if IdealBatchSize != conn.IdealBatchSize { + t.Fatalf("IdealBatchSize: %d != conn.IdealBatchSize(): %d", IdealBatchSize, conn.IdealBatchSize) } } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e138c33f2..a039c9930 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -20,8 +20,11 @@ import ( "time" "go4.org/mem" + "golang.org/x/net/ipv6" "tailscale.com/client/local" "tailscale.com/disco" + "tailscale.com/net/batching" + "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" "tailscale.com/net/packet" @@ -57,10 +60,10 @@ type Server struct { bindLifetime time.Duration steadyStateLifetime time.Duration bus *eventbus.Bus - uc4 *net.UDPConn // always non-nil - uc4Port uint16 // always nonzero - uc6 *net.UDPConn // may be nil if IPv6 bind fails during initialization - uc6Port uint16 // may be zero if IPv6 bind fails during initialization + uc4 batching.Conn // always non-nil + uc4Port uint16 // always nonzero + uc6 batching.Conn // may be nil if IPv6 bind fails during initialization + uc6Port uint16 // may be zero if IPv6 bind fails during initialization closeOnce sync.Once wg sync.WaitGroup closeCh chan struct{} @@ -96,9 +99,9 @@ type serverEndpoint struct { allocatedAt time.Time } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, conn *net.UDPConn, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { if senderIndex != 0 && senderIndex != 1 { - return + return nil, netip.AddrPort{} } otherSender := 0 @@ -121,15 +124,15 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) if err != nil { // silently drop - return + return nil, netip.AddrPort{} } if discoMsg.Generation == 0 { // Generation must be nonzero, silently drop - return + return nil, netip.AddrPort{} } if e.handshakeGeneration[senderIndex] == discoMsg.Generation { // we've seen this generation before, silently drop - return + return nil, netip.AddrPort{} } e.handshakeGeneration[senderIndex] = discoMsg.Generation e.handshakeAddrPorts[senderIndex] = from @@ -144,19 +147,18 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex gh.VNI.Set(e.vni) err = gh.Encode(reply) if err != nil { - return + return nil, netip.AddrPort{} } reply = append(reply, disco.Magic...) reply = serverDisco.AppendTo(reply) box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) reply = append(reply, box...) - conn.WriteMsgUDPAddrPort(reply, nil, from) - return + return reply, from case *disco.BindUDPRelayEndpointAnswer: err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) if err != nil { // silently drop - return + return nil, netip.AddrPort{} } generation := e.handshakeGeneration[senderIndex] if generation == 0 || // we have no active handshake @@ -164,23 +166,23 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex e.handshakeAddrPorts[senderIndex] != from || // mismatching source for the active handshake !bytes.Equal(e.challenge[senderIndex][:], discoMsg.Challenge[:]) { // mismatching answer for the active handshake // silently drop - return + return nil, netip.AddrPort{} } // Handshake complete. Update the binding for this sender. e.boundAddrPorts[senderIndex] = from e.lastSeen[senderIndex] = time.Now() // record last seen as bound time - return + return nil, netip.AddrPort{} default: // unexpected message types, silently drop - return + return nil, netip.AddrPort{} } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, conn *net.UDPConn, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message - return + return nil, netip.AddrPort{} } sender := key.DiscoPublicFromRaw32(mem.B(senderRaw)) senderIndex := -1 @@ -191,63 +193,51 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by senderIndex = 1 default: // unknown Disco public key - return + return nil, netip.AddrPort{} } const headerLen = len(disco.Magic) + key.DiscoPublicRawLen discoPayload, ok := e.discoSharedSecrets[senderIndex].Open(b[headerLen:]) if !ok { // unable to decrypt the Disco payload - return + return nil, netip.AddrPort{} } discoMsg, err := disco.Parse(discoPayload) if err != nil { // unable to parse the Disco payload - return + return nil, netip.AddrPort{} } - e.handleDiscoControlMsg(from, senderIndex, discoMsg, conn, serverDisco) + return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco) } -func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, rxSocket, otherAFSocket *net.UDPConn, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { if !gh.Control { if !e.isBound() { // not a control packet, but serverEndpoint isn't bound - return + return nil, netip.AddrPort{} } - var to netip.AddrPort switch { case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() - to = e.boundAddrPorts[1] + return b, e.boundAddrPorts[1] case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() - to = e.boundAddrPorts[0] + return b, e.boundAddrPorts[0] default: // unrecognized source - return - } - // Relay the packet towards the other party via the socket associated - // with the destination's address family. If source and destination - // address families are matching we tx on the same socket the packet - // was received (rxSocket), otherwise we use the "other" socket - // (otherAFSocket). [Server] makes no use of dual-stack sockets. - if from.Addr().Is4() == to.Addr().Is4() { - rxSocket.WriteMsgUDPAddrPort(b, nil, to) - } else if otherAFSocket != nil { - otherAFSocket.WriteMsgUDPAddrPort(b, nil, to) + return nil, netip.AddrPort{} } - return } if gh.Protocol != packet.GeneveProtocolDisco { // control packet, but not Disco - return + return nil, netip.AddrPort{} } msg := b[packet.GeneveFixedHeaderLength:] - e.handleSealedDiscoControlMsg(from, msg, rxSocket, serverDisco) + return e.handleSealedDiscoControlMsg(from, msg, serverDisco) } func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { @@ -338,10 +328,10 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve } s.wg.Add(1) - go s.packetReadLoop(s.uc4, s.uc6) + go s.packetReadLoop(s.uc4, s.uc6, true) if s.uc6 != nil { s.wg.Add(1) - go s.packetReadLoop(s.uc6, s.uc4) + go s.packetReadLoop(s.uc6, s.uc4, false) } s.wg.Add(1) go s.endpointGCLoop() @@ -425,6 +415,41 @@ func (s *Server) addrDiscoveryLoop() { } } +// This is a compile-time assertion that [singlePacketConn] implements the +// [batching.Conn] interface. +var _ batching.Conn = (*singlePacketConn)(nil) + +// singlePacketConn implements [batching.Conn] with single packet syscall +// operations. +type singlePacketConn struct { + *net.UDPConn +} + +func (c *singlePacketConn) ReadBatch(msgs []ipv6.Message, _ int) (int, error) { + n, ap, err := c.UDPConn.ReadFromUDPAddrPort(msgs[0].Buffers[0]) + if err != nil { + return 0, err + } + msgs[0].N = n + msgs[0].Addr = net.UDPAddrFromAddrPort(netaddr.Unmap(ap)) + return 1, nil +} + +func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error { + for _, buff := range buffs { + if geneve.VNI.IsSet() { + geneve.Encode(buff) + } else { + buff = buff[offset:] + } + _, err := c.UDPConn.WriteToUDPAddrPort(buff, addr) + if err != nil { + return err + } + } + return nil +} + // listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if // we manage to bind the IPv4 socket. // @@ -433,7 +458,10 @@ func (s *Server) addrDiscoveryLoop() { // across IPv4 and IPv6 if the requested port is zero. // // TODO: make these "re-bindable" in similar fashion to magicsock as a means to -// deal with EDR software closing them. http://go/corp/30118 +// deal with EDR software closing them. http://go/corp/30118. We could re-use +// [magicsock.RebindingConn], which would also remove the need for +// [singlePacketConn], as [magicsock.RebindingConn] also handles fallback to +// single packet syscall operations. func (s *Server) listenOn(port int) error { for _, network := range []string{"udp4", "udp6"} { uc, err := net.ListenUDP(network, &net.UDPAddr{Port: port}) @@ -462,11 +490,16 @@ func (s *Server) listenOn(port int) error { } return err } + pc := batching.TryUpgradeToConn(uc, network, batching.IdealBatchSize) + bc, ok := pc.(batching.Conn) + if !ok { + bc = &singlePacketConn{uc} + } if network == "udp4" { - s.uc4 = uc + s.uc4 = bc s.uc4Port = uint16(portUint) } else { - s.uc6 = uc + s.uc6 = bc s.uc6Port = uint16(portUint) } } @@ -526,18 +559,18 @@ func (s *Server) endpointGCLoop() { } } -func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSocket *net.UDPConn) { +func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to netip.AddrPort) { if stun.Is(b) && b[1] == 0x01 { // A b[1] value of 0x01 (STUN method binding) is sufficiently // non-overlapping with the Geneve header where the LSB is always 0 // (part of 6 "reserved" bits). s.netChecker.ReceiveSTUNPacket(b, from) - return + return nil, netip.AddrPort{} } gh := packet.GeneveHeader{} err := gh.Decode(b) if err != nil { - return + return nil, netip.AddrPort{} } // TODO: consider performance implications of holding s.mu for the remainder // of this method, which does a bunch of disco/crypto work depending. Keep @@ -547,13 +580,13 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSo e, ok := s.byVNI[gh.VNI.Get()] if !ok { // unknown VNI - return + return nil, netip.AddrPort{} } - e.handlePacket(from, gh, b, rxSocket, otherAFSocket, s.discoPublic) + return e.handlePacket(from, gh, b, s.discoPublic) } -func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { +func (s *Server) packetReadLoop(readFromSocket, otherSocket batching.Conn, readFromSocketIsIPv4 bool) { defer func() { // We intentionally close the [Server] if we encounter a socket read // error below, at least until socket "re-binding" is implemented as @@ -564,15 +597,73 @@ func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { s.wg.Done() s.Close() }() - b := make([]byte, 1<<16-1) + + msgs := make([]ipv6.Message, batching.IdealBatchSize) + for i := range msgs { + msgs[i].OOB = make([]byte, batching.MinControlMessageSize()) + msgs[i].Buffers = make([][]byte, 1) + msgs[i].Buffers[0] = make([]byte, 1<<16-1) + } + writeBuffsByDest := make(map[netip.AddrPort][][]byte, batching.IdealBatchSize) + for { + for i := range msgs { + msgs[i] = ipv6.Message{Buffers: msgs[i].Buffers, OOB: msgs[i].OOB[:cap(msgs[i].OOB)]} + } + // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := readFromSocket.ReadFromUDPAddrPort(b) + // ReadBatch will split coalesced datagrams before returning, which + // WriteBatchTo will re-coalesce further down. We _could_ be more + // efficient and not split datagrams that belong to the same VNI if they + // are non-control/handshake packets. We pay the memmove/memcopy + // performance penalty for now in the interest of simple single packet + // handlers. + n, err := readFromSocket.ReadBatch(msgs, 0) if err != nil { s.logf("error reading from socket(%v): %v", readFromSocket.LocalAddr(), err) return } - s.handlePacket(from, b[:n], readFromSocket, otherSocket) + + for _, msg := range msgs[:n] { + if msg.N == 0 { + continue + } + buf := msg.Buffers[0][:msg.N] + from := msg.Addr.(*net.UDPAddr).AddrPort() + write, to := s.handlePacket(from, buf) + if !to.IsValid() { + continue + } + if from.Addr().Is4() == to.Addr().Is4() || otherSocket != nil { + buffs, ok := writeBuffsByDest[to] + if !ok { + buffs = make([][]byte, 0, batching.IdealBatchSize) + } + buffs = append(buffs, write) + writeBuffsByDest[to] = buffs + } else { + // This is unexpected. We should never produce a packet to write + // to the "other" socket if the other socket is nil/unbound. + // [server.handlePacket] has to see a packet from a particular + // address family at least once in order for it to return a + // packet to write towards a dest for the same address family. + s.logf("[unexpected] packet from: %v produced packet to: %v while otherSocket is nil", from, to) + } + } + + for dest, buffs := range writeBuffsByDest { + // Write the packet batches via the socket associated with the + // destination's address family. If source and destination address + // families are matching we tx on the same socket the packet was + // received, otherwise we use the "other" socket. [Server] makes no + // use of dual-stack sockets. + if dest.Addr().Is4() == readFromSocketIsIPv4 { + readFromSocket.WriteBatchTo(buffs, dest, packet.GeneveHeader{}, 0) + } else { + otherSocket.WriteBatchTo(buffs, dest, packet.GeneveHeader{}, 0) + } + delete(writeBuffsByDest, dest) + } } } From d986baa18fbca65462f85fcc0f1c19a38a042fcc Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 6 Aug 2025 07:43:58 -0700 Subject: [PATCH 1210/1708] tsconsensus,cmd/natc: add 'follower only' bootstrap option Currently consensus has a bootstrap routine where a tsnet node tries to join each other node with the cluster tag, and if it is not able to join any other node it starts its own cluster. That algorithm is racy, and can result in split brain (more than one leader/cluster) if all the nodes for a cluster are started at the same time. Add a FollowOnly argument to the bootstrap function. If provided this tsnet node will never lead, it will try (and retry with exponential back off) to follow any node it can contact. Add a --follow-only flag to cmd/natc that uses this new tsconsensus functionality. Also slightly reorganize some arguments into opts structs. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 15 ++++- cmd/natc/natc.go | 31 ++++++----- tsconsensus/tsconsensus.go | 89 ++++++++++++++++++++++-------- tsconsensus/tsconsensus_test.go | 30 ++++++++-- 4 files changed, 121 insertions(+), 44 deletions(-) diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 3bc21bd03..821f12fae 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -149,12 +149,21 @@ func (ipp *ConsensusIPPool) domainLookup(from tailcfg.NodeID, addr netip.Addr) ( return ww, true } +type ClusterOpts struct { + Tag string + StateDir string + FollowOnly bool +} + // StartConsensus is part of the IPPool interface. It starts the raft background routines that handle consensus. -func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string, clusterStateDir string) error { +func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, opts ClusterOpts) error { cfg := tsconsensus.DefaultConfig() cfg.ServeDebugMonitor = true - cfg.StateDirPath = clusterStateDir - cns, err := tsconsensus.Start(ctx, ts, ipp, clusterTag, cfg) + cfg.StateDirPath = opts.StateDir + cns, err := tsconsensus.Start(ctx, ts, ipp, tsconsensus.BootstrapOpts{ + Tag: opts.Tag, + FollowOnly: opts.FollowOnly, + }, cfg) if err != nil { return err } diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index fdbce3da1..2007f0a24 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -50,18 +50,19 @@ func main() { // Parse flags fs := flag.NewFlagSet("natc", flag.ExitOnError) var ( - debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint") - hostname = fs.String("hostname", "", "Hostname to register the service under") - siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") - v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") - dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") - verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") - printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") - ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") - wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") - clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") - server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") - stateDir = fs.String("state-dir", "", "path to directory in which to store app state") + debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint") + hostname = fs.String("hostname", "", "Hostname to register the service under") + siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") + v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") + dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") + verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") + printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") + ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") + wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") + clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") + server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") + stateDir = fs.String("state-dir", "", "path to directory in which to store app state") + clusterFollowOnly = fs.Bool("follow-only", false, "Try to find a leader with the cluster tag or exit.") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -163,7 +164,11 @@ func main() { if err != nil { log.Fatalf("Creating cluster state dir failed: %v", err) } - err = cipp.StartConsensus(ctx, ts, *clusterTag, clusterStateDir) + err = cipp.StartConsensus(ctx, ts, ippool.ClusterOpts{ + Tag: *clusterTag, + StateDir: clusterStateDir, + FollowOnly: *clusterFollowOnly, + }) if err != nil { log.Fatalf("StartConsensus: %v", err) } diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index 53a2c3f54..11b039d57 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -157,13 +157,18 @@ func (sl StreamLayer) Accept() (net.Conn, error) { } } +type BootstrapOpts struct { + Tag string + FollowOnly bool +} + // Start returns a pointer to a running Consensus instance. // Calling it with a *tsnet.Server will cause that server to join or start a consensus cluster // with other nodes on the tailnet tagged with the clusterTag. The *tsnet.Server will run the state // machine defined by the raft.FSM also provided, and keep it in sync with the other cluster members' // state machines using Raft. -func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag string, cfg Config) (*Consensus, error) { - if clusterTag == "" { +func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, bootstrapOpts BootstrapOpts, cfg Config) (*Consensus, error) { + if bootstrapOpts.Tag == "" { return nil, errors.New("cluster tag must be provided") } @@ -185,7 +190,7 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin shutdownCtxCancel: shutdownCtxCancel, } - auth := newAuthorization(ts, clusterTag) + auth := newAuthorization(ts, bootstrapOpts.Tag) err := auth.Refresh(shutdownCtx) if err != nil { return nil, fmt.Errorf("auth refresh: %w", err) @@ -211,7 +216,7 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin // we may already be in a consensus (see comment above before startRaft) but we're going to // try to bootstrap anyway in case this is a fresh start. - err = c.bootstrap(auth.AllowedPeers()) + err = c.bootstrap(shutdownCtx, auth, bootstrapOpts) if err != nil { if errors.Is(err, raft.ErrCantBootstrap) { // Raft cluster state can be persisted, if we try to call raft.BootstrapCluster when @@ -303,14 +308,59 @@ type Consensus struct { shutdownCtxCancel context.CancelFunc } +func (c *Consensus) bootstrapTryToJoinAnyTarget(targets views.Slice[*ipnstate.PeerStatus]) bool { + log.Printf("Bootstrap: Trying to find cluster: num targets to try: %d", targets.Len()) + for _, p := range targets.All() { + if !p.Online { + log.Printf("Bootstrap: Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) + continue + } + log.Printf("Bootstrap: Trying to find cluster: trying %s", p.TailscaleIPs[0]) + err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ + RemoteHost: c.self.hostAddr.String(), + RemoteID: c.self.id, + }) + if err != nil { + log.Printf("Bootstrap: Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) + continue + } + log.Printf("Bootstrap: Trying to find cluster: joined %s", p.TailscaleIPs[0]) + return true + } + return false +} + +func (c *Consensus) retryFollow(ctx context.Context, auth *authorization) bool { + waitFor := 500 * time.Millisecond + nRetries := 10 + attemptCount := 1 + for true { + log.Printf("Bootstrap: trying to follow any cluster member: attempt %v", attemptCount) + joined := c.bootstrapTryToJoinAnyTarget(auth.AllowedPeers()) + if joined || attemptCount == nRetries { + return joined + } + log.Printf("Bootstrap: Failed to follow. Retrying in %v", waitFor) + time.Sleep(waitFor) + waitFor *= 2 + attemptCount++ + auth.Refresh(ctx) + } + return false +} + // bootstrap tries to join a raft cluster, or start one. // // We need to do the very first raft cluster configuration, but after that raft manages it. // bootstrap is called at start up, and we may not currently be aware of what the cluster config might be, // our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and -// if unsuccessful, assume we are the first and try to start our own. +// if unsuccessful, assume we are the first and try to start our own. If the FollowOnly option is set, only try +// to join, never start our own. // -// It's possible for bootstrap to return an error, or start a errant breakaway cluster. +// It's possible for bootstrap to start an errant breakaway cluster if for example all nodes are having a fresh +// start, they're racing bootstrap and multiple nodes were unable to join a peer and so start their own new cluster. +// To avoid this operators should either ensure bootstrap is called for a single node first and allow it to become +// leader before starting the other nodes. Or start all but one node with the FollowOnly option. // // We have a list of expected cluster members already from control (the members of the tailnet with the tag) // so we could do the initial configuration with all servers specified. @@ -318,27 +368,20 @@ type Consensus struct { // - We want to handle machines joining after start anyway. // - Not all tagged nodes tailscale believes are active are necessarily actually responsive right now, // so let each node opt in when able. -func (c *Consensus) bootstrap(targets views.Slice[*ipnstate.PeerStatus]) error { - log.Printf("Trying to find cluster: num targets to try: %d", targets.Len()) - for _, p := range targets.All() { - if !p.Online { - log.Printf("Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) - continue - } - log.Printf("Trying to find cluster: trying %s", p.TailscaleIPs[0]) - err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ - RemoteHost: c.self.hostAddr.String(), - RemoteID: c.self.id, - }) - if err != nil { - log.Printf("Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) - continue +func (c *Consensus) bootstrap(ctx context.Context, auth *authorization, opts BootstrapOpts) error { + if opts.FollowOnly { + joined := c.retryFollow(ctx, auth) + if !joined { + return errors.New("unable to join cluster") } - log.Printf("Trying to find cluster: joined %s", p.TailscaleIPs[0]) return nil } - log.Printf("Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) + joined := c.bootstrapTryToJoinAnyTarget(auth.AllowedPeers()) + if joined { + return nil + } + log.Printf("Bootstrap: Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) f := c.raft.BootstrapCluster( raft.Configuration{ Servers: []raft.Server{ diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index bfb6b3e06..3b51a093f 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -262,7 +262,7 @@ func TestStart(t *testing.T) { waitForNodesToBeTaggedInStatus(t, ctx, one, []key.NodePublic{k}, clusterTag) sm := &fsm{} - r, err := Start(ctx, one, sm, clusterTag, warnLogConfig()) + r, err := Start(ctx, one, sm, BootstrapOpts{Tag: clusterTag}, warnLogConfig()) if err != nil { t.Fatal(err) } @@ -334,7 +334,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string t.Helper() participants[0].sm = &fsm{} myCfg := addIDedLogger("0", cfg) - first, err := Start(ctx, participants[0].ts, participants[0].sm, clusterTag, myCfg) + first, err := Start(ctx, participants[0].ts, participants[0].sm, BootstrapOpts{Tag: clusterTag}, myCfg) if err != nil { t.Fatal(err) } @@ -347,7 +347,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string for i := 1; i < len(participants); i++ { participants[i].sm = &fsm{} myCfg := addIDedLogger(fmt.Sprintf("%d", i), cfg) - c, err := Start(ctx, participants[i].ts, participants[i].sm, clusterTag, myCfg) + c, err := Start(ctx, participants[i].ts, participants[i].sm, BootstrapOpts{Tag: clusterTag}, myCfg) if err != nil { t.Fatal(err) } @@ -530,7 +530,7 @@ func TestFollowerFailover(t *testing.T) { // follower comes back smThreeAgain := &fsm{} cfg = addIDedLogger("2 after restarting", warnLogConfig()) - rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, clusterTag, cfg) + rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, BootstrapOpts{Tag: clusterTag}, cfg) if err != nil { t.Fatal(err) } @@ -565,7 +565,7 @@ func TestRejoin(t *testing.T) { tagNodes(t, control, []key.NodePublic{keyJoiner}, clusterTag) waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, []key.NodePublic{keyJoiner}, clusterTag) smJoiner := &fsm{} - cJoiner, err := Start(ctx, tsJoiner, smJoiner, clusterTag, cfg) + cJoiner, err := Start(ctx, tsJoiner, smJoiner, BootstrapOpts{Tag: clusterTag}, cfg) if err != nil { t.Fatal(err) } @@ -744,3 +744,23 @@ func TestOnlyTaggedPeersCanJoin(t *testing.T) { t.Fatalf("join req when not tagged, expected body: %s, got: %s", expected, sBody) } } + +func TestFollowOnly(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + + // start the leader + _, err := Start(ctx, ps[0].ts, ps[0].sm, BootstrapOpts{Tag: clusterTag}, cfg) + if err != nil { + t.Fatal(err) + } + + // start the follower with FollowOnly + _, err = Start(ctx, ps[1].ts, ps[1].sm, BootstrapOpts{Tag: clusterTag, FollowOnly: true}, cfg) + if err != nil { + t.Fatal(err) + } +} From b48d2de6ab7ad2588395e896649f4bf6bea73fcf Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Tue, 19 Aug 2025 13:11:10 -0700 Subject: [PATCH 1211/1708] cmd/natc,tsconsensus: add cluster config admin Add the ability for operators of natc in consensus mode to remove servers from the raft cluster config, without losing other state. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 17 +++++++++++++ cmd/natc/natc.go | 38 ++++++++++++++++++++++++++++++ tsconsensus/tsconsensus.go | 20 ++++++++++++++++ 3 files changed, 75 insertions(+) diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 821f12fae..64807b6c2 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -30,6 +30,7 @@ type ConsensusIPPool struct { IPSet *netipx.IPSet perPeerMap *syncs.Map[tailcfg.NodeID, *consensusPerPeerState] consensus commandExecutor + clusterController clusterController unusedAddressLifetime time.Duration } @@ -168,6 +169,7 @@ func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server return err } ipp.consensus = cns + ipp.clusterController = cns return nil } @@ -442,3 +444,18 @@ func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { type commandExecutor interface { ExecuteCommand(tsconsensus.Command) (tsconsensus.CommandResult, error) } + +type clusterController interface { + GetClusterConfiguration() (raft.Configuration, error) + DeleteClusterServer(id raft.ServerID) (uint64, error) +} + +// GetClusterConfiguration gets the consensus implementation's cluster configuration +func (ipp *ConsensusIPPool) GetClusterConfiguration() (raft.Configuration, error) { + return ipp.clusterController.GetClusterConfiguration() +} + +// DeleteClusterServer removes a server from the consensus implementation's cluster configuration +func (ipp *ConsensusIPPool) DeleteClusterServer(id raft.ServerID) (uint64, error) { + return ipp.clusterController.DeleteClusterServer(id) +} diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 2007f0a24..a4f53d657 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -8,6 +8,7 @@ package main import ( "context" + "encoding/json" "errors" "expvar" "flag" @@ -23,6 +24,7 @@ import ( "time" "github.com/gaissmai/bart" + "github.com/hashicorp/raft" "github.com/inetaf/tcpproxy" "github.com/peterbourgon/ff/v3" "go4.org/netipx" @@ -63,6 +65,7 @@ func main() { server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") stateDir = fs.String("state-dir", "", "path to directory in which to store app state") clusterFollowOnly = fs.Bool("follow-only", false, "Try to find a leader with the cluster tag or exit.") + clusterAdminPort = fs.Int("cluster-admin-port", 8081, "Port on localhost for the cluster admin HTTP API") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -179,6 +182,12 @@ func main() { } }() ipp = cipp + + go func() { + // This listens on localhost only, so that only those with access to the host machine + // can remove servers from the cluster config. + log.Print(http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", *clusterAdminPort), httpClusterAdmin(cipp))) + }() } else { ipp = &ippool.SingleMachineIPPool{IPSet: addrPool} } @@ -633,3 +642,32 @@ func getClusterStatePath(stateDirFlag string) (string, error) { return dirPath, nil } + +func httpClusterAdmin(ipp *ippool.ConsensusIPPool) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("GET /{$}", func(w http.ResponseWriter, r *http.Request) { + c, err := ipp.GetClusterConfiguration() + if err != nil { + log.Printf("cluster admin http: error getClusterConfig: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(c); err != nil { + log.Printf("cluster admin http: error encoding raft configuration: %v", err) + } + }) + mux.HandleFunc("DELETE /{id}", func(w http.ResponseWriter, r *http.Request) { + idString := r.PathValue("id") + id := raft.ServerID(idString) + idx, err := ipp.DeleteClusterServer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(idx); err != nil { + log.Printf("cluster admin http: error encoding delete index: %v", err) + return + } + }) + return mux +} diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index 11b039d57..1f7dc1b7b 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -525,3 +525,23 @@ func (c *Consensus) raftAddr(host netip.Addr) string { func (c *Consensus) commandAddr(host netip.Addr) string { return netip.AddrPortFrom(host, c.config.CommandPort).String() } + +// GetClusterConfiguration returns the result of the underlying raft instance's GetConfiguration +func (c *Consensus) GetClusterConfiguration() (raft.Configuration, error) { + fut := c.raft.GetConfiguration() + err := fut.Error() + if err != nil { + return raft.Configuration{}, err + } + return fut.Configuration(), nil +} + +// DeleteClusterServer returns the result of the underlying raft instance's RemoveServer +func (c *Consensus) DeleteClusterServer(id raft.ServerID) (uint64, error) { + fut := c.raft.RemoveServer(id, 0, 1*time.Second) + err := fut.Error() + if err != nil { + return 0, err + } + return fut.Index(), nil +} From 641a90ea33b07e4550eb244ad02f6d1b4b30baeb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 20 Aug 2025 16:24:00 -0700 Subject: [PATCH 1212/1708] net/sockopts,wgengine/magicsock: export socket buffer sizing logic (#16909) For eventual use by net/udprelay.Server Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + net/sockopts/sockopts.go | 37 +++++++++++++++++ net/sockopts/sockopts_default.go | 21 ++++++++++ net/sockopts/sockopts_linux.go | 40 +++++++++++++++++++ .../sockopts/sockopts_unix_test.go | 7 ++-- tsnet/depaware.txt | 1 + wgengine/magicsock/magicsock.go | 26 ++++++------ wgengine/magicsock/magicsock_default.go | 7 ---- wgengine/magicsock/magicsock_linux.go | 29 -------------- 11 files changed, 119 insertions(+), 52 deletions(-) create mode 100644 net/sockopts/sockopts.go create mode 100644 net/sockopts/sockopts_default.go create mode 100644 net/sockopts/sockopts_linux.go rename wgengine/magicsock/magicsock_unix_test.go => net/sockopts/sockopts_unix_test.go (87%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 1ecef4953..d9cc43e6b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -867,6 +867,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e60c1cb04..219de5b0c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -339,6 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 5e558a0cd..2cd76f91a 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -297,6 +297,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/net/sockopts/sockopts.go b/net/sockopts/sockopts.go new file mode 100644 index 000000000..0c0ee7692 --- /dev/null +++ b/net/sockopts/sockopts.go @@ -0,0 +1,37 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package sockopts contains logic for applying socket options. +package sockopts + +import ( + "net" + "runtime" + + "tailscale.com/types/nettype" +) + +// BufferDirection represents either the read/receive or write/send direction +// of a socket buffer. +type BufferDirection string + +const ( + ReadDirection BufferDirection = "read" + WriteDirection BufferDirection = "write" +) + +func portableSetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) error { + if runtime.GOOS == "plan9" { + // Not supported. Don't try. Avoid logspam. + return nil + } + var err error + if c, ok := pconn.(*net.UDPConn); ok { + if direction == WriteDirection { + err = c.SetWriteBuffer(size) + } else { + err = c.SetReadBuffer(size) + } + } + return err +} diff --git a/net/sockopts/sockopts_default.go b/net/sockopts/sockopts_default.go new file mode 100644 index 000000000..3cc8679b5 --- /dev/null +++ b/net/sockopts/sockopts_default.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package sockopts + +import ( + "tailscale.com/types/nettype" +) + +// SetBufferSize sets pconn's buffer to size for direction. size may be silently +// capped depending on platform. +// +// errForce is only relevant for Linux, and will always be nil otherwise, +// but we maintain a consistent cross-platform API. +// +// If pconn is not a [*net.UDPConn], then SetBufferSize is no-op. +func SetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) (errForce error, errPortable error) { + return nil, portableSetBufferSize(pconn, direction, size) +} diff --git a/net/sockopts/sockopts_linux.go b/net/sockopts/sockopts_linux.go new file mode 100644 index 000000000..5d778d380 --- /dev/null +++ b/net/sockopts/sockopts_linux.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package sockopts + +import ( + "net" + "syscall" + + "tailscale.com/types/nettype" +) + +// SetBufferSize sets pconn's buffer to size for direction. It attempts +// (errForce) to set SO_SNDBUFFORCE or SO_RECVBUFFORCE which can overcome the +// limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. It falls back to +// the portable implementation (errPortable) if that fails, which may be +// silently capped to net.core.{r,w}mem_max. +// +// If pconn is not a [*net.UDPConn], then SetBufferSize is no-op. +func SetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) (errForce error, errPortable error) { + opt := syscall.SO_RCVBUFFORCE + if direction == WriteDirection { + opt = syscall.SO_SNDBUFFORCE + } + if c, ok := pconn.(*net.UDPConn); ok { + var rc syscall.RawConn + rc, errForce = c.SyscallConn() + if errForce == nil { + rc.Control(func(fd uintptr) { + errForce = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, opt, size) + }) + } + if errForce != nil { + errPortable = portableSetBufferSize(pconn, direction, size) + } + } + return errForce, errPortable +} diff --git a/wgengine/magicsock/magicsock_unix_test.go b/net/sockopts/sockopts_unix_test.go similarity index 87% rename from wgengine/magicsock/magicsock_unix_test.go rename to net/sockopts/sockopts_unix_test.go index b0700a8eb..ebb4354ac 100644 --- a/wgengine/magicsock/magicsock_unix_test.go +++ b/net/sockopts/sockopts_unix_test.go @@ -3,7 +3,7 @@ //go:build unix -package magicsock +package sockopts import ( "net" @@ -13,7 +13,7 @@ import ( "tailscale.com/types/nettype" ) -func TestTrySetSocketBuffer(t *testing.T) { +func TestSetBufferSize(t *testing.T) { c, err := net.ListenPacket("udp", ":0") if err != nil { t.Fatal(err) @@ -42,7 +42,8 @@ func TestTrySetSocketBuffer(t *testing.T) { curRcv, curSnd := getBufs() - trySetSocketBuffer(c.(nettype.PacketConn), t.Logf) + SetBufferSize(c.(nettype.PacketConn), ReadDirection, 7<<20) + SetBufferSize(c.(nettype.PacketConn), WriteDirection, 7<<20) newRcv, newSnd := getBufs() diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9ad340c90..d7d5be658 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -293,6 +293,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a99a0a8e3..a59a38f65 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -45,6 +45,7 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/ping" "tailscale.com/net/portmapper" + "tailscale.com/net/sockopts" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/net/tstun" @@ -3857,20 +3858,19 @@ func (c *Conn) DebugForcePreferDERP(n int) { c.netChecker.SetForcePreferredDERP(n) } -// portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize, -// logging an error if it occurs. -func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - if runtime.GOOS == "plan9" { - // Not supported. Don't try. Avoid logspam. - return - } - if c, ok := pconn.(*net.UDPConn); ok { - // Attempt to increase the buffer size, and allow failures. - if err := c.SetReadBuffer(socketBufferSize); err != nil { - logf("magicsock: failed to set UDP read buffer size to %d: %v", socketBufferSize, err) +// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which +// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. +// It falls back to the portable implementation if that fails, which may be +// silently capped to net.core.{r,w}mem_max. +func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { + directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} + for _, direction := range directions { + forceErr, portableErr := sockopts.SetBufferSize(pconn, direction, socketBufferSize) + if forceErr != nil { + logf("magicsock: [warning] failed to force-set UDP %v buffer size to %d: %v; using kernel default values (impacts throughput only)", direction, socketBufferSize, forceErr) } - if err := c.SetWriteBuffer(socketBufferSize); err != nil { - logf("magicsock: failed to set UDP write buffer size to %d: %v", socketBufferSize, err) + if portableErr != nil { + logf("magicsock: failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, portableErr) } } } diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 4922f2c09..1c315034a 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -9,15 +9,8 @@ import ( "errors" "fmt" "io" - - "tailscale.com/types/logger" - "tailscale.com/types/nettype" ) func (c *Conn) listenRawDisco(family string) (io.Closer, error) { return nil, fmt.Errorf("raw disco listening not supported on this OS: %w", errors.ErrUnsupported) } - -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - portableTrySetSocketBuffer(pconn, logf) -} diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index 3369bcb89..cad0e9b5e 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -13,7 +13,6 @@ import ( "net" "net/netip" "strings" - "syscall" "time" "github.com/mdlayher/socket" @@ -28,7 +27,6 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/nettype" ) const ( @@ -489,30 +487,3 @@ func printSockaddr(sa unix.Sockaddr) string { return fmt.Sprintf("unknown(%T)", sa) } } - -// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which -// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. -// It falls back to the portable implementation if that fails, which may be -// silently capped to net.core.{r,w}mem_max. -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - if c, ok := pconn.(*net.UDPConn); ok { - var errRcv, errSnd error - rc, err := c.SyscallConn() - if err == nil { - rc.Control(func(fd uintptr) { - errRcv = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, socketBufferSize) - if errRcv != nil { - logf("magicsock: [warning] failed to force-set UDP read buffer size to %d: %v; using kernel default values (impacts throughput only)", socketBufferSize, errRcv) - } - errSnd = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUFFORCE, socketBufferSize) - if errSnd != nil { - logf("magicsock: [warning] failed to force-set UDP write buffer size to %d: %v; using kernel default values (impacts throughput only)", socketBufferSize, errSnd) - } - }) - } - - if err != nil || errRcv != nil || errSnd != nil { - portableTrySetSocketBuffer(pconn, logf) - } - } -} From 3e198f6d5f859c75cd049932d82ee26a5d6df8fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Aug 2025 09:48:31 -0600 Subject: [PATCH 1213/1708] .github: Bump github/codeql-action from 3.29.7 to 3.29.8 (#16828) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.7 to 3.29.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/51f77329afa6477de8c49fc9c7046c15b9a4e79d...76621b61decf072c1cee8dd1ce2d2a82d33c17ed) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 90a20e2f0..2f5ae7d92 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 From e296a6be8dcf2ad8f6a16a9e84afa11fd0546bec Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 21 Aug 2025 13:56:11 -0400 Subject: [PATCH 1214/1708] cmd/tsidp: update oidc-funnel-clients.json store path (#16845) Update odic-funnel-clients.json to take a path, this allows setting the location of the file and prevents it from landing in the root directory or users home directory. Move setting of rootPath until after tsnet has started. Previously this was added for the lazy creation of the oidc-key.json. It's now needed earlier in the flow. Updates #16734 Fixes #16844 Signed-off-by: Mike O'Driscoll --- cmd/tsidp/tsidp.go | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index e68e55ca9..2fc6d27e4 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -142,8 +142,6 @@ func main() { Hostname: *flagHostname, Dir: *flagDir, } - rootPath = ts.GetRootPath() - log.Printf("tsidp root path: %s", rootPath) if *flagVerbose { ts.Logf = log.Printf } @@ -168,6 +166,9 @@ func main() { log.Fatal(err) } lns = append(lns, ln) + + rootPath = ts.GetRootPath() + log.Printf("tsidp root path: %s", rootPath) } srv := &idpServer{ @@ -185,14 +186,18 @@ func main() { // Load funnel clients from disk if they exist, regardless of whether funnel is enabled // This ensures OIDC clients persist across restarts - f, err := os.Open(funnelClientsFile) + funnelClientsFilePath, err := getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + log.Fatalf("could not get funnel clients file path: %v", err) + } + f, err := os.Open(funnelClientsFilePath) if err == nil { if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { - log.Fatalf("could not parse %s: %v", funnelClientsFile, err) + log.Fatalf("could not parse %s: %v", funnelClientsFilePath, err) } f.Close() } else if !errors.Is(err, os.ErrNotExist) { - log.Fatalf("could not open %s: %v", funnelClientsFile, err) + log.Fatalf("could not open %s: %v", funnelClientsFilePath, err) } log.Printf("Running tsidp at %s ...", srv.serverURL) @@ -839,7 +844,10 @@ func (s *idpServer) oidcSigner() (jose.Signer, error) { func (s *idpServer) oidcPrivateKey() (*signingKey, error) { return s.lazySigningKey.GetErr(func() (*signingKey, error) { - keyPath := filepath.Join(s.rootPath, oidcKeyFile) + keyPath, err := getConfigFilePath(s.rootPath, oidcKeyFile) + if err != nil { + return nil, fmt.Errorf("could not get OIDC key file path: %w", err) + } var sk signingKey b, err := os.ReadFile(keyPath) if err == nil { @@ -1147,7 +1155,13 @@ func (s *idpServer) storeFunnelClientsLocked() error { if err := json.NewEncoder(&buf).Encode(s.funnelClients); err != nil { return err } - return os.WriteFile(funnelClientsFile, buf.Bytes(), 0600) + + funnelClientsFilePath, err := getConfigFilePath(s.rootPath, funnelClientsFile) + if err != nil { + return fmt.Errorf("storeFunnelClientsLocked: %v", err) + } + + return os.WriteFile(funnelClientsFilePath, buf.Bytes(), 0600) } const ( @@ -1260,3 +1274,18 @@ func isFunnelRequest(r *http.Request) bool { } return false } + +// getConfigFilePath returns the path to the config file for the given file name. +// The oidc-key.json and funnel-clients.json files were originally opened and written +// to without paths, and ended up in /root dir or home directory of the user running +// the process. To maintain backward compatibility, we return the naked file name if that +// file exists already, otherwise we return the full path in the rootPath. +func getConfigFilePath(rootPath string, fileName string) (string, error) { + if _, err := os.Stat(fileName); err == nil { + return fileName, nil + } else if errors.Is(err, os.ErrNotExist) { + return filepath.Join(rootPath, fileName), nil + } else { + return "", err + } +} From cf739256caa86d8ba48f107bb22c623de0d0822d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 21 Aug 2025 11:03:05 -0700 Subject: [PATCH 1215/1708] net/udprelay: increase socket buffer size (#16910) This increases throughput over long fat networks, and in the presence of crypto/syscall-induced delay. Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 2 +- net/udprelay/server.go | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 219de5b0c..25f8ee3a1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -339,7 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/net/udprelay/server.go b/net/udprelay/server.go index a039c9930..8aea8ae55 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -28,11 +28,13 @@ import ( "tailscale.com/net/netcheck" "tailscale.com/net/netmon" "tailscale.com/net/packet" + "tailscale.com/net/sockopts" "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/nettype" "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -450,6 +452,25 @@ func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, gen return nil } +// UDP socket read/write buffer size (7MB). At the time of writing (2025-08-21) +// this value was heavily influenced by magicsock, with similar motivations for +// its increase relative to typical defaults, e.g. long fat networks and +// reducing packet loss around crypto/syscall-induced delay. +const socketBufferSize = 7 << 20 + +func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { + directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} + for _, direction := range directions { + errForce, errPortable := sockopts.SetBufferSize(pconn, direction, socketBufferSize) + if errForce != nil { + logf("[warning] failed to force-set UDP %v buffer size to %d: %v; using kernel default values (impacts throughput only)", direction, socketBufferSize, errForce) + } + if errPortable != nil { + logf("failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, errPortable) + } + } +} + // listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if // we manage to bind the IPv4 socket. // @@ -473,6 +494,7 @@ func (s *Server) listenOn(port int) error { break } } + trySetSocketBuffer(uc, s.logf) // TODO: set IP_PKTINFO sockopt _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) if err != nil { From b17cfe4aed58e6802a45800863670ef299c70891 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 21 Aug 2025 13:44:13 -0700 Subject: [PATCH 1216/1708] wgengine/magicsock,net/sockopts: export Windows ICMP suppression logic (#16917) For eventual use by net/udprelay.Server. Updates tailscale/corp#31506 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- .../sockopts/sockopts_notwindows.go | 8 +++++--- .../sockopts/sockopts_windows.go | 20 +++++++++++-------- tsnet/depaware.txt | 2 +- wgengine/magicsock/magicsock.go | 12 +++++------ 7 files changed, 27 insertions(+), 21 deletions(-) rename wgengine/magicsock/magicsock_notwindows.go => net/sockopts/sockopts_notwindows.go (52%) rename wgengine/magicsock/magicsock_windows.go => net/sockopts/sockopts_windows.go (67%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d9cc43e6b..555407421 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -867,7 +867,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 25f8ee3a1..be490a943 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -339,7 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 2cd76f91a..577050194 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -297,7 +297,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/wgengine/magicsock/magicsock_notwindows.go b/net/sockopts/sockopts_notwindows.go similarity index 52% rename from wgengine/magicsock/magicsock_notwindows.go rename to net/sockopts/sockopts_notwindows.go index 7c31c8202..f1bc7fd44 100644 --- a/wgengine/magicsock/magicsock_notwindows.go +++ b/net/sockopts/sockopts_notwindows.go @@ -3,11 +3,13 @@ //go:build !windows -package magicsock +package sockopts import ( - "tailscale.com/types/logger" "tailscale.com/types/nettype" ) -func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) {} +// SetICMPErrImmunity is no-op on non-Windows. +func SetICMPErrImmunity(pconn nettype.PacketConn) error { + return nil +} diff --git a/wgengine/magicsock/magicsock_windows.go b/net/sockopts/sockopts_windows.go similarity index 67% rename from wgengine/magicsock/magicsock_windows.go rename to net/sockopts/sockopts_windows.go index fe2a80e0b..1e6c3f69d 100644 --- a/wgengine/magicsock/magicsock_windows.go +++ b/net/sockopts/sockopts_windows.go @@ -3,28 +3,31 @@ //go:build windows -package magicsock +package sockopts import ( + "fmt" "net" "unsafe" "golang.org/x/sys/windows" - "tailscale.com/types/logger" "tailscale.com/types/nettype" ) -func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { +// SetICMPErrImmunity sets socket options on pconn to prevent ICMP reception, +// e.g. ICMP Port Unreachable, from surfacing as a syscall error. +// +// If pconn is not a [*net.UDPConn], then SetICMPErrImmunity is no-op. +func SetICMPErrImmunity(pconn nettype.PacketConn) error { c, ok := pconn.(*net.UDPConn) if !ok { // not a UDP connection; nothing to do - return + return nil } sysConn, err := c.SyscallConn() if err != nil { - logf("trySetUDPSocketOptions: getting SyscallConn failed: %v", err) - return + return fmt.Errorf("SetICMPErrImmunity: getting SyscallConn failed: %v", err) } // Similar to https://github.com/golang/go/issues/5834 (which involved @@ -50,9 +53,10 @@ func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { ) }) if ioctlErr != nil { - logf("trySetUDPSocketOptions: could not set SIO_UDP_NETRESET: %v", ioctlErr) + return fmt.Errorf("SetICMPErrImmunity: could not set SIO_UDP_NETRESET: %v", ioctlErr) } if err != nil { - logf("trySetUDPSocketOptions: SyscallConn.Control failed: %v", err) + return fmt.Errorf("SetICMPErrImmunity: SyscallConn.Control failed: %v", err) } + return nil } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index d7d5be658..1e25090fd 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -293,7 +293,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a59a38f65..7fb3517e9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3537,7 +3537,6 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur } } } - trySetSocketBuffer(pconn, c.logf) trySetUDPSocketOptions(pconn, c.logf) // Success. @@ -3858,11 +3857,7 @@ func (c *Conn) DebugForcePreferDERP(n int) { c.netChecker.SetForcePreferredDERP(n) } -// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which -// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. -// It falls back to the portable implementation if that fails, which may be -// silently capped to net.core.{r,w}mem_max. -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} for _, direction := range directions { forceErr, portableErr := sockopts.SetBufferSize(pconn, direction, socketBufferSize) @@ -3873,6 +3868,11 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { logf("magicsock: failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, portableErr) } } + + err := sockopts.SetICMPErrImmunity(pconn) + if err != nil { + logf("magicsock: %v", err) + } } // derpStr replaces DERP IPs in s with "derp-". From c85cdabdfc4959d4d2c43b3cf56b2950fbb908d4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 21 Aug 2025 13:59:23 -0700 Subject: [PATCH 1217/1708] net/udprelay: set ICMP err immunity sock opt (#16918) Updates tailscale/corp#31506 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 8aea8ae55..123813c16 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -458,7 +458,7 @@ func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, gen // reducing packet loss around crypto/syscall-induced delay. const socketBufferSize = 7 << 20 -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} for _, direction := range directions { errForce, errPortable := sockopts.SetBufferSize(pconn, direction, socketBufferSize) @@ -469,6 +469,11 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { logf("failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, errPortable) } } + + err := sockopts.SetICMPErrImmunity(pconn) + if err != nil { + logf("failed to set ICMP error immunity: %v", err) + } } // listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if @@ -494,7 +499,7 @@ func (s *Server) listenOn(port int) error { break } } - trySetSocketBuffer(uc, s.logf) + trySetUDPSocketOptions(uc, s.logf) // TODO: set IP_PKTINFO sockopt _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) if err != nil { From 3eeecb4c7f340a43ee133c85985111cf0e00e537 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 22 Aug 2025 16:07:05 +0100 Subject: [PATCH 1218/1708] cmd/k8s-proxy,k8s-operator: fix serve config for userspace mode (#16919) The serve code leaves it up to the system's DNS resolver and netstack to figure out how to reach the proxy destination. Combined with k8s-proxy running in userspace mode, this means we can't rely on MagicDNS being available or tailnet IPs being routable. I'd like to implement that as a feature for serve in userspace mode, but for now the safer fix to get kube-apiserver ProxyGroups consistently working in all environments is to switch to using localhost as the proxy target instead. This has a small knock-on in the code that does WhoIs lookups, which now needs to check the X-Forwarded-For header that serve populates to get the correct tailnet IP to look up, because the request's remote address will be loopback. Fixes #16920 Change-Id: I869ddcaf93102da50e66071bb00114cc1acc1288 Signed-off-by: Tom Proctor --- cmd/k8s-proxy/k8s-proxy.go | 2 +- k8s-operator/api-proxy/proxy.go | 30 +++++++++++++++++++++++------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 448bbe397..7a7707214 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -453,7 +453,7 @@ func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager serviceHostPort: { Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: fmt.Sprintf("http://%s:80", strings.TrimSuffix(status.Self.DNSName, ".")), + Proxy: "http://localhost:80", }, }, }, diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index ff0373270..a0f2f930b 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -123,11 +123,11 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { if ap.authMode { mode = "auth" } - var tsLn net.Listener + var proxyLn net.Listener var serve func(ln net.Listener) error if ap.https { var err error - tsLn, err = ap.ts.Listen("tcp", ":443") + proxyLn, err = ap.ts.Listen("tcp", ":443") if err != nil { return fmt.Errorf("could not listen on :443: %w", err) } @@ -143,7 +143,7 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { } } else { var err error - tsLn, err = ap.ts.Listen("tcp", ":80") + proxyLn, err = net.Listen("tcp", "localhost:80") if err != nil { return fmt.Errorf("could not listen on :80: %w", err) } @@ -152,8 +152,8 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { errs := make(chan error) go func() { - ap.log.Infof("API server proxy in %s mode is listening on tailnet addresses %s", mode, tsLn.Addr()) - if err := serve(tsLn); err != nil && err != http.ErrServerClosed { + ap.log.Infof("API server proxy in %s mode is listening on %s", mode, proxyLn.Addr()) + if err := serve(proxyLn); err != nil && err != http.ErrServerClosed { errs <- fmt.Errorf("error serving: %w", err) } }() @@ -179,7 +179,7 @@ type APIServerProxy struct { rp *httputil.ReverseProxy authMode bool // Whether to run with impersonation using caller's tailnet identity. - https bool // Whether to serve on https for the device hostname; true for k8s-operator, false for k8s-proxy. + https bool // Whether to serve on https for the device hostname; true for k8s-operator, false (and localhost) for k8s-proxy. ts *tsnet.Server hs *http.Server upstreamURL *url.URL @@ -317,7 +317,23 @@ func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { } func (ap *APIServerProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { - return ap.lc.WhoIs(r.Context(), r.RemoteAddr) + who, remoteErr := ap.lc.WhoIs(r.Context(), r.RemoteAddr) + if remoteErr == nil { + ap.log.Debugf("WhoIs from remote addr: %s", r.RemoteAddr) + return who, nil + } + + var fwdErr error + fwdFor := r.Header.Get("X-Forwarded-For") + if fwdFor != "" && !ap.https { + who, fwdErr = ap.lc.WhoIs(r.Context(), fwdFor) + if fwdErr == nil { + ap.log.Debugf("WhoIs from X-Forwarded-For header: %s", fwdFor) + return who, nil + } + } + + return nil, errors.Join(remoteErr, fwdErr) } func (ap *APIServerProxy) authError(w http.ResponseWriter, err error) { From b558f81a82bac09222b2320dbee5a4dfe96a3a17 Mon Sep 17 00:00:00 2001 From: Need-an-AwP <113933967+Need-an-AwP@users.noreply.github.com> Date: Sat, 23 Aug 2025 02:51:24 +0800 Subject: [PATCH 1219/1708] fix: invalid memory address or nil pointer dereference (#16922) Signed-off-by: Need-an-AwP <113933967+Need-an-AwP@users.noreply.github.com> --- cmd/tsconnect/wasm/wasm_js.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index ebf7284aa..87f814866 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -110,6 +110,7 @@ func newIPN(jsConfig js.Value) map[string]any { ControlKnobs: sys.ControlKnobs(), HealthTracker: sys.HealthTracker(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }) if err != nil { log.Fatal(err) From 86a5292c03bce774b5ffedaccb768b2d5ff9f0a4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 22 Aug 2025 15:11:51 -0700 Subject: [PATCH 1220/1708] ipn/localapi: make tailscale debug derp STUNOnly-aware (#16927) Fixes #16926 Signed-off-by: Jordan Whited --- ipn/localapi/debugderp.go | 92 ++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 6636fd253..017b90692 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -228,55 +228,59 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { // Start by checking whether we can establish a HTTP connection for _, derpNode := range reg.Nodes { - connSuccess := checkConn(derpNode) + if !derpNode.STUNOnly { + connSuccess := checkConn(derpNode) - // Verify that the /generate_204 endpoint works - captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) - req, err := http.NewRequest("GET", captivePortalURL, nil) - if err != nil { - st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) - continue - } - req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") - resp, err := client.Do(req) - if err != nil { - st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) - } else { - resp.Body.Close() - } + // Verify that the /generate_204 endpoint works + captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) + req, err := http.NewRequest("GET", captivePortalURL, nil) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) + continue + } + req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") + resp, err := client.Do(req) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) + } else { + resp.Body.Close() + } - if !connSuccess { - continue - } + if !connSuccess { + continue + } - fakePrivKey := key.NewNode() - - // Next, repeatedly get the server key to see if the node is - // behind a load balancer (incorrectly). - serverPubKeys := make(map[key.NodePublic]bool) - for i := range 5 { - func() { - rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion { - return &tailcfg.DERPRegion{ - RegionID: reg.RegionID, - RegionCode: reg.RegionCode, - RegionName: reg.RegionName, - Nodes: []*tailcfg.DERPNode{derpNode}, + fakePrivKey := key.NewNode() + + // Next, repeatedly get the server key to see if the node is + // behind a load balancer (incorrectly). + serverPubKeys := make(map[key.NodePublic]bool) + for i := range 5 { + func() { + rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion { + return &tailcfg.DERPRegion{ + RegionID: reg.RegionID, + RegionCode: reg.RegionCode, + RegionName: reg.RegionName, + Nodes: []*tailcfg.DERPNode{derpNode}, + } + }) + if err := rc.Connect(ctx); err != nil { + st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err)) + return } - }) - if err := rc.Connect(ctx); err != nil { - st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err)) - return - } - if len(serverPubKeys) == 0 { - st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName)) - } - serverPubKeys[rc.ServerPublicKey()] = true - }() - } - if len(serverPubKeys) > 1 { - st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys))) + if len(serverPubKeys) == 0 { + st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName)) + } + serverPubKeys[rc.ServerPublicKey()] = true + }() + } + if len(serverPubKeys) > 1 { + st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys))) + } + } else { + st.Info = append(st.Info, fmt.Sprintf("Node %q is marked STUNOnly; skipped non-STUN checks", derpNode.HostName)) } // Send a STUN query to this node to verify whether or not it From fa0e83ab4f890120f957b34c82f3ff32fb979664 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Sat, 23 Aug 2025 05:00:09 -0400 Subject: [PATCH 1221/1708] tsnet: add Server.AdvertiseTags option (#15840) Updates #8531 Change-Id: I9b6653872c66929e692bd592ef3f438430c657b5 Signed-off-by: Valentin Alekseev Co-authored-by: Valentin Alekseev --- tsnet/tsnet.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 06709bf8b..8f2f7bdcd 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -124,6 +124,13 @@ type Server struct { // field at zero unless you know what you are doing. Port uint16 + // AdvertiseTags specifies groups that this embedded server wants to join, for + // purposes of ACL enforcement. These can be referenced from the ACL + // security policy. Note that advertising a tag doesn't guarantee that + // the control server will allow you to take on the rights for that + // tag. + AdvertiseTags []string + getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) initOnce sync.Once @@ -662,6 +669,7 @@ func (s *Server) start() (reterr error) { prefs.WantRunning = true prefs.ControlURL = s.ControlURL prefs.RunWebClient = s.RunWebClient + prefs.AdvertiseTags = s.AdvertiseTags authKey := s.getAuthKey() err = lb.Start(ipn.Options{ UpdatePrefs: prefs, From 6c8fef961eab77a51e2b30dcce0f84d7478892b2 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Sat, 23 Aug 2025 02:07:22 -0700 Subject: [PATCH 1222/1708] ipn/ipnlocal: replace the LockedOnEntry pattern with conventional lock/unlock discipline (#16925) There are several methods within the LocalBackend that used an unusual and error-prone lock discipline whereby they require the caller to hold the backend mutex on entry, but release it on the way out. In #11650 we added some support code to make this pattern more visible. Now it is time to eliminate the pattern (at least within this package). This is intended to produce no semantic changes, though I am relying on integration tests and careful inspection to achieve that. To the extent possible I preserved the existing control flow. In a few places, however, I replaced this with an unlock/lock closure. This means we will sometimes reacquire a lock only to release it again one frame up the stack, but these operations are not performance sensitive and the legibility gain seems worthwhile. We can probably also pull some of these out into separate methods, but I did not do that here so as to avoid other variable scope changes that might be hard to see. I would like to do some more cleanup separately. As a follow-up, we could also remove the unlockOnce helper, but I did not do that here either. Updates #11649 Change-Id: I4c92d4536eca629cfcd6187528381c33f4d64e20 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 316 +++++++++++++++++++------------------ ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/profiles.go | 15 +- 3 files changed, 173 insertions(+), 160 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5fb3d5771..5e6724701 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -806,7 +806,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { + if err := b.setConfigLocked(conf); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -863,10 +863,9 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLockedOnEntry uses the provided config to update the backend's prefs +// setConfigLocked uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { - defer unlock() +func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -874,8 +873,7 @@ func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlo } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLockedOnEntry(p, unlock) - + b.setPrefsLocked(p) b.conf = conf return nil } @@ -1959,12 +1957,12 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() + defer unlock() prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { - unlock.UnlockEarly() return prefs.View(), false } - return b.setPrefsLockedOnEntry(prefs, unlock), true + return b.setPrefsLocked(prefs), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects @@ -2492,8 +2490,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLockedOnEntry(unlock) - + b.stateMachineLocked() return nil } @@ -3512,14 +3509,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry( + _, err := b.editPrefsLocked( ipnauth.Self, &ipn.MaskedPrefs{ Prefs: *prefsClone, AutoUpdateSet: ipn.AutoUpdatePrefsMask{ ApplySet: true, }, - }, unlock) + }) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -3979,7 +3976,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLockedOnEntry(reason, unlock) + b.switchToBestProfileLocked(reason) } // SwitchToBestProfile selects the best profile to use, @@ -3989,13 +3986,14 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) + unlock := b.lockAndGetUnlock() + defer unlock() + b.switchToBestProfileLocked(reason) } -// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], -// but b.mu must held on entry. It is released on exit. -func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { - defer unlock() +// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], but +// the caller must hold b.mu. +func (b *LocalBackend) switchToBestProfileLocked(reason string) { oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) @@ -4026,7 +4024,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { + if err := b.resetForProfileChangeLocked(); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -4304,7 +4302,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLockedOnEntry(actor, mp, unlock) + return b.editPrefsLocked(actor, mp) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4333,7 +4331,9 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) + unlock := b.lockAndGetUnlock() + defer unlock() + return b.editPrefsLocked(actor, mp) } // checkEditPrefsAccessLocked checks whether the current user has access @@ -4540,7 +4540,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { + if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4569,11 +4569,8 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry, but it unlocks it on the way out. -// TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { - defer unlock() // for error paths - +// Warning: b.mu must be held on entry. +func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -4610,12 +4607,10 @@ func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.Maske // before the modified prefs are actually set for the current profile. b.onEditPrefsLocked(actor, mp, p0, p1.View()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) - - // Note: don't perform any actions for the new prefs here. Not - // every prefs change goes through EditPrefs. Put your actions - // in setPrefsLocksOnEntry instead. + newPrefs := b.setPrefsLocked(p1) + // Note: don't perform any actions for the new prefs here. Not every prefs + // change goes through EditPrefs. Put your actions in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil } @@ -4663,12 +4658,9 @@ func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() } -// setPrefsLockedOnEntry requires b.mu be held to call it, but it -// unlocks b.mu when done. newp ownership passes to this function. -// It returns a read-only copy of the new prefs. -func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { - defer unlock() - +// setPrefsLocked requires b.mu be held to call it. It returns a read-only +// copy of the new prefs. +func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4737,28 +4729,33 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.stopOfflineAutoUpdate() } - unlock.UnlockEarly() + // Update status that needs to happen outside the lock, but reacquire it + // before returning (including in case of panics). + func() { + b.mu.Unlock() + defer b.mu.Lock() - if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() - } + if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { + b.doSetHostinfoFilterServices() + } - if netMap != nil { - b.MagicConn().SetDERPMap(netMap.DERPMap) - } + if netMap != nil { + b.MagicConn().SetDERPMap(netMap.DERPMap) + } - if !oldp.WantRunning() && newp.WantRunning && cc != nil { - b.logf("transitioning to running; doing Login...") - cc.Login(controlclient.LoginDefault) - } + if !oldp.WantRunning() && newp.WantRunning && cc != nil { + b.logf("transitioning to running; doing Login...") + cc.Login(controlclient.LoginDefault) + } - if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() - } else { - b.authReconfig() - } + if oldp.WantRunning() != newp.WantRunning { + b.stateMachine() + } else { + b.authReconfig() + } - b.send(ipn.Notify{Prefs: &prefs}) + b.send(ipn.Notify{Prefs: &prefs}) + }() return prefs } @@ -5620,12 +5617,12 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // happen". func (b *LocalBackend) enterState(newState ipn.State) { unlock := b.lockAndGetUnlock() - b.enterStateLockedOnEntry(newState, unlock) + defer unlock() + b.enterStateLocked(newState) } -// enterStateLockedOnEntry is like enterState but requires b.mu be held to call -// it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { +// enterStateLocked is like enterState but requires the caller to hold b.mu. +func (b *LocalBackend) enterStateLocked(newState ipn.State) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5674,51 +5671,56 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock b.maybeStartOfflineAutoUpdate(prefs) } - unlock.UnlockEarly() - - // prefs may change irrespective of state; WantRunning should be explicitly - // set before potential early return even if the state is unchanged. - b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) - if oldState == newState { - return - } - b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", - oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + // Resolve the state transition outside the lock, but reacquire it before + // returning (including in case of panics). + func() { + b.mu.Unlock() + defer b.mu.Lock() - switch newState { - case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } - b.blockEngineUpdates(true) - fallthrough - case ipn.Stopped, ipn.NoState: - // Unconfigure the engine if it has stopped (WantRunning is set to false) - // or if we've switched to a different profile and the state is unknown. - err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - if err != nil { - b.logf("Reconfig(down): %v", err) + // prefs may change irrespective of state; WantRunning should be explicitly + // set before potential early return even if the state is unchanged. + b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) + if oldState == newState { + return } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", + oldState, newState, prefs.WantRunning(), netMap != nil) + b.send(ipn.Notify{State: &newState}) + + switch newState { + case ipn.NeedsLogin: + systemd.Status("Needs login: %s", authURL) + if b.seamlessRenewalEnabled() { + break + } + b.blockEngineUpdates(true) + fallthrough + case ipn.Stopped, ipn.NoState: + // Unconfigure the engine if it has stopped (WantRunning is set to false) + // or if we've switched to a different profile and the state is unknown. + err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + b.logf("Reconfig(down): %v", err) + } - if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") + if newState == ipn.Stopped && authURL == "" { + systemd.Status("Stopped; run 'tailscale up' to log in") + } + case ipn.Starting, ipn.NeedsMachineAuth: + b.authReconfig() + // Needed so that UpdateEndpoints can run + b.e.RequestStatus() + case ipn.Running: + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + default: + b.logf("[unexpected] unknown newState %#v", newState) } - case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() - // Needed so that UpdateEndpoints can run - b.e.RequestStatus() - case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) - } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) - default: - b.logf("[unexpected] unknown newState %#v", newState) - } + }() } func (b *LocalBackend) hasNodeKeyLocked() bool { @@ -5819,26 +5821,28 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // Or maybe just call the state machine from fewer places. func (b *LocalBackend) stateMachine() { unlock := b.lockAndGetUnlock() - b.stateMachineLockedOnEntry(unlock) + defer unlock() + b.stateMachineLocked() } -// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to -// call it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { - b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) +// stateMachineLocked is like stateMachine but requires b.mu be held. +func (b *LocalBackend) stateMachineLocked() { + b.enterStateLocked(b.nextStateLocked()) } -// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will -// unlock it at most once. +// lockAndGetUnlock locks b.mu and returns a function that will unlock it at +// most once. +// +// TODO(creachadair): This was added as a guardrail against the unfortunate +// "LockedOnEntry" methods that were originally used in this package (primarily +// enterStateLockedOnEntry) that required b.mu held to be locked on entry to +// the function but unlocked the mutex on their way out. // -// This is all very unfortunate but exists as a guardrail against the -// unfortunate "lockedOnEntry" methods in this package (primarily -// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the -// function but unlock the mutex on their way out. As a stepping stone to -// cleaning things up (as of 2024-04-06), we at least pass the unlock func -// around now and defer unlock in the caller to avoid missing unlocks and double -// unlocks. TODO(bradfitz,maisem): make the locking in this package more -// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 +// Now that these have all been updated, we could remove this type and acquire +// and release locks directly. For now, however, I've left it alone to reduce +// the scope of lock-related changes. +// +// See: https://github.com/tailscale/tailscale/issues/11649 func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { b.mu.Lock() var unlocked atomic.Bool @@ -6006,30 +6010,35 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { - unlock := b.lockAndGetUnlock() - defer unlock() + // These values are initialized inside the lock on success. + var cc controlclient.Client + var profile ipn.LoginProfileView - if !b.hasNodeKeyLocked() { - // Already logged out. - return nil - } - cc := b.cc + if err := func() error { + unlock := b.lockAndGetUnlock() + defer unlock() - // Grab the current profile before we unlock the mutex, so that we can - // delete it later. - profile := b.pm.CurrentProfile() - - _, err := b.editPrefsLockedOnEntry( - actor, - &ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) - if err != nil { + if !b.hasNodeKeyLocked() { + // Already logged out. + return nil + } + cc = b.cc + + // Grab the current profile before we unlock the mutex, so that we can + // delete it later. + profile = b.pm.CurrentProfile() + + _, err := b.editPrefsLocked( + actor, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }) + return err + }(); err != nil { return err } - // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -6049,14 +6058,14 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - unlock = b.lockAndGetUnlock() + unlock := b.lockAndGetUnlock() defer unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -7245,7 +7254,7 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { b.resetDialPlan() } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) initTKALocked() error { @@ -7325,12 +7334,10 @@ func (b *LocalBackend) getHardwareAddrs() ([]string, error) { return addrs, nil } -// resetForProfileChangeLockedOnEntry resets the backend for a profile change. +// resetForProfileChangeLocked resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. -func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { - defer unlock() - +func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting @@ -7361,12 +7368,19 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu - b.health.SetLocalLogConfigHealth(nil) - if tkaErr != nil { - return tkaErr - } - return b.Start(ipn.Options{}) + b.enterStateLocked(ipn.NoState) + + // Update health status and start outside the lock. + return func() error { + b.mu.Unlock() + defer b.mu.Lock() + + b.health.SetLocalLogConfigHealth(nil) + if tkaErr != nil { + return tkaErr + } + return b.Start(ipn.Options{}) + }() } // DeleteProfile deletes a profile with the given ID. @@ -7385,7 +7399,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { if !needToRestart { return nil } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // CurrentProfile returns the current LoginProfile. @@ -7407,7 +7421,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // ListProfiles returns a list of all LoginProfiles. @@ -7436,7 +7450,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 49cfc3e07..60b5b2c5b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4300,7 +4300,7 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { } unlock := b.lockAndGetUnlock() defer unlock() - b.setPrefsLockedOnEntry(newp, unlock) + b.setPrefsLocked(newp) } type peerOptFunc func(*tailcfg.Node) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 1d312cfa6..7519ee157 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -180,7 +180,7 @@ func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn. f(pm.currentProfile, pm.prefs, false) } // Do not call pm.extHost.NotifyProfileChange here; it is invoked in - // [LocalBackend.resetForProfileChangeLockedOnEntry] after the netmap reset. + // [LocalBackend.resetForProfileChangeLocked] after the netmap reset. // TODO(nickkhyl): Consider moving it here (or into the stateChangeCb handler // in [LocalBackend]) once the profile/node state, including the netmap, // is actually tied to the current profile. @@ -359,9 +359,9 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) // where prefsIn is the previous profile's prefs with an updated Persist, LoggedOut, // WantRunning and possibly other fields. This may not be the desired behavior. // - // Additionally, LocalBackend doesn't treat it as a proper profile switch, meaning that - // [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain - // node/profile-specific state may not be reset as expected. + // Additionally, LocalBackend doesn't treat it as a proper profile switch, + // meaning that [LocalBackend.resetForProfileChangeLocked] is not called and + // certain node/profile-specific state may not be reset as expected. // // However, [profileManager] notifies [ipnext.Extension]s about the profile change, // so features migrated from LocalBackend to external packages should not be affected. @@ -494,10 +494,9 @@ func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileVie oldPrefs := pm.prefs pm.prefs = clonedPrefs - // Sadly, profile prefs can be changed in multiple ways. - // It's pretty chaotic, and in many cases callers use - // unexported methods of the profile manager instead of - // going through [LocalBackend.setPrefsLockedOnEntry] + // Sadly, profile prefs can be changed in multiple ways. It's pretty + // chaotic, and in many cases callers use unexported methods of the + // profile manager instead of going through [LocalBackend.setPrefsLocked] // or at least using [profileManager.SetPrefs]. // // While we should definitely clean this up to improve From 16bd60f9caff91549b40e470d04b3fdfc2e90c47 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Sat, 23 Aug 2025 08:07:36 -0700 Subject: [PATCH 1223/1708] ipn,tsnet: update AdvertiseTags documentation (#16931) Instead of referring to groups, which is a term of art for a different entity, update the doc comments to more accurately describe what tags are in reference to the policy document. Updates #cleanup Change-Id: Iefff6f84981985f834bae7c6a6c34044f53f2ea2 Signed-off-by: M. J. Fromberger --- ipn/prefs.go | 9 ++++----- tsnet/tsnet.go | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/ipn/prefs.go b/ipn/prefs.go index 71a80b182..2eb0ccf0c 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -158,11 +158,10 @@ type Prefs struct { // connections. This overrides tailcfg.Hostinfo's ShieldsUp. ShieldsUp bool - // AdvertiseTags specifies groups that this node wants to join, for - // purposes of ACL enforcement. These can be referenced from the ACL - // security policy. Note that advertising a tag doesn't guarantee that - // the control server will allow you to take on the rights for that - // tag. + // AdvertiseTags specifies tags that should be applied to this node, for + // purposes of ACL enforcement. These can be referenced from the ACL policy + // document. Note that advertising a tag on the client doesn't guarantee + // that the control server will allow the node to adopt that tag. AdvertiseTags []string // Hostname is the hostname to use for identifying the node. If diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 8f2f7bdcd..4cb977c73 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -124,11 +124,10 @@ type Server struct { // field at zero unless you know what you are doing. Port uint16 - // AdvertiseTags specifies groups that this embedded server wants to join, for - // purposes of ACL enforcement. These can be referenced from the ACL - // security policy. Note that advertising a tag doesn't guarantee that - // the control server will allow you to take on the rights for that - // tag. + // AdvertiseTags specifies tags that should be applied to this node, for + // purposes of ACL enforcement. These can be referenced from the ACL policy + // document. Note that advertising a tag on the client doesn't guarantee + // that the control server will allow the node to adopt that tag. AdvertiseTags []string getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) From 4236a759f3a1c2ae1e78729d677b2bb33c26232b Mon Sep 17 00:00:00 2001 From: Kot C Date: Sat, 23 Aug 2025 17:52:23 -0700 Subject: [PATCH 1224/1708] cmd/tsidp: Add Docker image to README (#16915) Signed-off-by: Kot C --- cmd/tsidp/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index 780d9ab95..ffc296b87 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -12,6 +12,10 @@ ## Installation using Docker +### Pre-built image + +A pre-built tsidp image exists at `tailscale/tsidp:unstable`. + ### Building from Source ```bash From fafb51453838ddcac71f0ebee44ff1093168f105 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 25 Aug 2025 08:58:12 -0400 Subject: [PATCH 1225/1708] client/systray: go back to using upstream library (#16938) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We had a fix in a local branch, but upstream has merged it now. Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/logo.go | 2 +- client/systray/systray.go | 2 +- cmd/tailscale/depaware.txt | 12 ++++++------ flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/client/systray/logo.go b/client/systray/logo.go index d9b0932bc..3467d1b74 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -15,9 +15,9 @@ import ( "sync" "time" + "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/fogleman/gg" - "github.com/tailscale/systray" ) // tsLogo represents the Tailscale logo displayed as the systray icon. diff --git a/client/systray/systray.go b/client/systray/systray.go index b1bc45fa8..bd7c15972 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -24,10 +24,10 @@ import ( "syscall" "time" + "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/atotto/clipboard" dbus "github.com/godbus/dbus/v5" - "github.com/tailscale/systray" "github.com/toqueteos/webbrowser" "tailscale.com/client/local" "tailscale.com/ipn" diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8e28e2933..020479ebb 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -2,6 +2,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 + L fyne.io/systray from tailscale.com/client/systray + L fyne.io/systray/internal/generated/menu from fyne.io/systray + L fyne.io/systray/internal/generated/notifier from fyne.io/systray L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate @@ -22,9 +25,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - L 💣 github.com/godbus/dbus/v5 from github.com/godbus/dbus/v5/introspect+ - L github.com/godbus/dbus/v5/introspect from github.com/godbus/dbus/v5/prop+ - L github.com/godbus/dbus/v5/prop from github.com/tailscale/systray + L 💣 github.com/godbus/dbus/v5 from fyne.io/systray+ + L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ + L github.com/godbus/dbus/v5/prop from fyne.io/systray L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache @@ -66,9 +69,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - L github.com/tailscale/systray from tailscale.com/client/systray - L github.com/tailscale/systray/internal/generated/menu from github.com/tailscale/systray - L github.com/tailscale/systray/internal/generated/notifier from github.com/tailscale/systray github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ diff --git a/flake.nix b/flake.nix index 311c422fb..4ed2ab324 100644 --- a/flake.nix +++ b/flake.nix @@ -148,5 +148,5 @@ }); }; } -# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= +# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= diff --git a/go.mod b/go.mod index fba5a4f54..380c325bc 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.24.6 require ( filippo.io/mkcert v1.4.4 + fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa @@ -87,7 +88,6 @@ require ( github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb - github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da diff --git a/go.mod.sri b/go.mod.sri index 34e9a57de..69c69b8db 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= +sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= diff --git a/go.sum b/go.sum index df5d27313..f2544b9ac 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= +fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c h1:km4PIleGtbbF1oxmFQuO93CyNCldwuRTPB8WlzNWNZs= +fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= @@ -990,8 +992,6 @@ github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+y github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= -github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 h1:H7/LOg6wgQ116wFRVa8tz9KTB8pc6jeNtqS9tyKgeVw= -github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78/go.mod h1:1NbyArqaFj+AzkSWl0odw7flO9DsHIYWC4lMkwCKVAo= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= diff --git a/shell.nix b/shell.nix index 9dfdf4935..e0f6e79f1 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= +# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= From b5f834aef83cfa24bc3d4ed39e2e41f7e2cad944 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 25 Aug 2025 10:16:52 -0600 Subject: [PATCH 1226/1708] cmd/tailscaled: add Dnscache as a service dependency Updates https://github.com/tailscale/corp/issues/30961 Signed-off-by: Aaron Klotz --- cmd/tailscaled/install_windows.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index 3e5036fba..e98a6461e 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -29,6 +29,7 @@ func init() { // This list must be kept in sync with the TailscaledDependencies preprocessor // variable in the installer. var serviceDependencies = []string{ + "Dnscache", "iphlpsvc", "netprofm", "WinHttpAutoProxySvc", From 9403ba8c69eb4664f0c78a08f049c31ea4906a0f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 25 Aug 2025 09:40:15 -0700 Subject: [PATCH 1227/1708] wgengine/magicsock: trigger peer relay path discovery on CallMeMaybe RX (#16929) Updates tailscale/corp#30333 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 951e59011..aba4242c2 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1964,7 +1964,25 @@ func (de *endpoint) handleCallMeMaybe(m *disco.CallMeMaybe) { for _, st := range de.endpointState { st.lastPing = 0 } - de.sendDiscoPingsLocked(mono.Now(), false) + monoNow := mono.Now() + de.sendDiscoPingsLocked(monoNow, false) + + // This hook is required to trigger peer relay path discovery around + // disco "tailscale ping" initiated by de. We may be configured with peer + // relay servers that differ from de. + // + // The only other peer relay path discovery hook is in [endpoint.heartbeat], + // which is kicked off around outbound WireGuard packet flow, or if you are + // the "tailscale ping" initiator. Disco "tailscale ping" does not propagate + // into wireguard-go. + // + // We choose not to hook this around disco ping reception since peer relay + // path discovery can also trigger disco ping transmission, which *could* + // lead to an infinite loop of peer relay path discovery between two peers, + // absent intended triggers. + if de.wantUDPRelayPathDiscoveryLocked(monoNow) { + de.discoverUDPRelayPathsLocked(monoNow) + } } func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { From 2fb9472990ea76b30b7ac7c138b856ba9500dfa1 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 25 Aug 2025 10:49:06 -0700 Subject: [PATCH 1228/1708] ipn/ipnlocal: remove unnecessary usees of lockAndGetUnlock In places where we are locking the LocakBackend and immediately deferring an unlock, and where there is no shortcut path in the control flow below the deferral, we do not need the unlockOnce helper. Replace all these with use of the lock directly. Updates #11649 Change-Id: I3e6a7110dfc9ec6c1d38d2585c5367a0d4e76514 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 72 +++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5e6724701..a5c4e1f22 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -797,8 +797,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.conf == nil { return false, nil } @@ -1956,8 +1956,8 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { return prefs.View(), false @@ -2284,8 +2284,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { clientToShutdown.Shutdown() } }() - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -3486,8 +3486,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3953,8 +3953,8 @@ func (b *LocalBackend) shouldUploadServices() bool { // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -3986,8 +3986,8 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.switchToBestProfileLocked(reason) } @@ -4260,8 +4260,8 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() p0 := b.pm.CurrentPrefs() if v && p0.ExitNodeID() != "" { @@ -4331,8 +4331,8 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() return b.editPrefsLocked(actor, mp) } @@ -4521,8 +4521,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4569,7 +4569,7 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry. +// The caller must hold b.mu. func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { p0 := b.pm.CurrentPrefs() @@ -5616,8 +5616,8 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // really this is more "one of several places in which random things // happen". func (b *LocalBackend) enterState(newState ipn.State) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.enterStateLocked(newState) } @@ -5820,8 +5820,8 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // TODO(apenwarr): use a channel or something to prevent reentrancy? // Or maybe just call the state machine from fewer places. func (b *LocalBackend) stateMachine() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.stateMachineLocked() } @@ -6015,8 +6015,8 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { var profile ipn.LoginProfileView if err := func() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if !b.hasNodeKeyLocked() { // Already logged out. @@ -6058,8 +6058,8 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) @@ -7241,8 +7241,8 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { @@ -7336,7 +7336,7 @@ func (b *LocalBackend) getHardwareAddrs() ([]string, error) { // resetForProfileChangeLocked resets the backend for a profile change. // -// b.mu must held on entry. It is released on exit. +// The caller must hold b.mu. func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for @@ -7386,8 +7386,8 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -7412,8 +7412,8 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.pm.SwitchToNewProfile() @@ -7436,8 +7436,8 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prevCC := b.resetControlClientLocked() if prevCC != nil { From 9002e5fd6b8ede093ad05916db0755834f0ab5c9 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Sat, 23 Aug 2025 08:44:43 -0700 Subject: [PATCH 1229/1708] ipn/ipnlocal: remove an unnecessary unlock shortcut The early unlock on this branch was required because the "send" method goes on to acquire the mutex itself. Rather than release the lock just to acquire it again, call the underlying locked helper directly. Updates #11649 Change-Id: I50d81864a00150fc41460b7486a9c65655f282f5 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a5c4e1f22..a1d2df24c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1501,8 +1501,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } if st.Err != nil { - // The following do not depend on any data for which we need b locked. - unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return @@ -1511,7 +1509,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.send(ipn.Notify{ErrMessage: &s}) + b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) } return } From b411ffb52f1336e5284dd70641ccc654fd2b407f Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 25 Aug 2025 09:16:29 -0700 Subject: [PATCH 1230/1708] ipn/ipnlocal: remove UnlockEarly from doSetHostinfoFilterServices Pull the lock-bearing code into a closure, and use a clone rather than a shallow copy of the hostinfo record. Updates #11649 Change-Id: I4f1d42c42ce45e493b204baae0d50b1cbf82b102 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 46 +++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a1d2df24c..26f0155a1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4896,36 +4896,34 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - unlock := b.lockAndGetUnlock() - defer unlock() + // Check the control client, hostinfo, and services under the mutex. + // On return, either both the client and hostinfo are nil, or both are non-nil. + // When non-nil, the Hostinfo is a clone of the value carried by b, safe to modify. + cc, hi, peerAPIServices := func() (controlclient.Client, *tailcfg.Hostinfo, []tailcfg.Service) { + b.mu.Lock() + defer b.mu.Unlock() - cc := b.cc - if cc == nil { - // Control client isn't up yet. - return - } - if b.hostinfo == nil { - b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") + if b.cc == nil { + return nil, nil, nil // control client isn't up yet + } else if b.hostinfo == nil { + b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") + return nil, nil, nil + } + svc := b.peerAPIServicesLocked() + if b.egg { + svc = append(svc, tailcfg.Service{Proto: "egg", Port: 1}) + } + // Make a clone of hostinfo so we can mutate the service field, below. + return b.cc, b.hostinfo.Clone(), svc + }() + if cc == nil || hi == nil { return } - peerAPIServices := b.peerAPIServicesLocked() - if b.egg { - peerAPIServices = append(peerAPIServices, tailcfg.Service{Proto: "egg", Port: 1}) - } - - // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. - hi := *b.hostinfo // shallow copy - unlock.UnlockEarly() - // Make a shallow copy of hostinfo so we can mutate - // at the Service field. if !b.shouldUploadServices() { hi.Services = []tailcfg.Service{} } - // Don't mutate hi.Service's underlying array. Append to - // the slice with no free capacity. - c := len(hi.Services) - hi.Services = append(hi.Services[:c:c], peerAPIServices...) + hi.Services = append(hi.Services, peerAPIServices...) hi.PushDeviceToken = b.pushDeviceToken.Load() // Compare the expected ports from peerAPIServices to the actual ports in hi.Services. @@ -4935,7 +4933,7 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) } - cc.SetHostinfo(&hi) + cc.SetHostinfo(hi) } type portPair struct { From c5429cd49c60b766077e792e805f9e42df607c9e Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 26 Aug 2025 07:44:26 -0700 Subject: [PATCH 1231/1708] go.toolchain.branch: bump to go1.25 (#16954) go.toolchain.rev: bump go1.25 version flake.nix: bump Go to 1.25 Updates #16330 Signed-off-by: Patrick O'Doherty --- .github/workflows/golangci-lint.yml | 2 +- Dockerfile | 2 +- cmd/derper/depaware.txt | 19 +++++++++++++++---- cmd/k8s-operator/depaware.txt | 15 +++++++++++++-- cmd/stund/depaware.txt | 20 +++++++++++++++----- cmd/tailscale/depaware.txt | 15 +++++++++++++-- cmd/tailscaled/depaware.txt | 15 +++++++++++++-- cmd/tsidp/depaware.txt | 15 +++++++++++++-- flake.nix | 14 +++++++------- go.mod | 2 +- go.toolchain.branch | 2 +- go.toolchain.rev | 2 +- tsnet/depaware.txt | 15 +++++++++++++-- 13 files changed, 107 insertions(+), 31 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index ee62f04be..bcf17f8e6 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -33,7 +33,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 with: - version: v2.0.2 + version: v2.4.0 # Show only new issues if it's a pull request. only-new-issues: true diff --git a/Dockerfile b/Dockerfile index fbc0d1194..bd0f2840f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,7 +36,7 @@ # $ docker exec tailscaled tailscale status -FROM golang:1.24-alpine AS build-env +FROM golang:1.25-alpine AS build-env WORKDIR /go/src/tailscale diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 20b6bfb6e..6b149e5f5 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -205,13 +205,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http + golang.org/x/net/http/httpguts from net/http+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2/hpack from net/http + golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ @@ -241,6 +241,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -270,6 +272,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -337,21 +340,27 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -373,6 +382,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -395,6 +405,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa sort from compress/flate+ strconv from compress/flate+ strings from bufio+ + W structs from internal/syscall/windows sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ @@ -407,4 +418,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 555407421..85bec4a79 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1022,7 +1022,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator @@ -1059,6 +1059,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -1088,6 +1090,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -1170,22 +1173,28 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -1211,6 +1220,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -1233,6 +1243,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -1245,4 +1256,4 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 8e4db75ae..c8a18eb07 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -98,11 +98,10 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http + golang.org/x/net/http/httpguts from net/http+ golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http + golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - D golang.org/x/net/route from net golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus @@ -126,6 +125,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -155,6 +156,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -221,21 +223,27 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -253,9 +261,10 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from net/http + net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -276,6 +285,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar sort from compress/flate+ strconv from compress/flate+ strings from bufio+ + W structs from internal/syscall/windows sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ @@ -286,4 +296,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 020479ebb..b121a411f 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -246,7 +246,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli golang.org/x/oauth2/internal from golang.org/x/oauth2+ @@ -280,6 +280,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -309,6 +311,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -385,22 +388,28 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -424,6 +433,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -445,6 +455,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -457,4 +468,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index be490a943..a83c67cca 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -499,7 +499,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/jellydator/ttlcache/v3 golang.org/x/sys/cpu from github.com/tailscale/certstore+ @@ -534,6 +534,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -563,6 +565,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -634,22 +637,28 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -673,6 +682,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from github.com/tailscale/wireguard-go/conn+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -694,6 +704,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -706,4 +717,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 577050194..a695aa5f3 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -452,7 +452,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LD golang.org/x/sys/unix from github.com/google/nftables+ @@ -485,6 +485,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -514,6 +516,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -585,22 +588,28 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -623,6 +632,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -644,6 +654,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -656,4 +667,4 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/flake.nix b/flake.nix index 4ed2ab324..c739e8720 100644 --- a/flake.nix +++ b/flake.nix @@ -46,18 +46,18 @@ systems, flake-compat, }: let - go124Version = "1.24.6"; - goHash = "sha256-4ctVgqq1iGaLwEwH3hhogHD2uMmyqvNh+CHhm9R8/b0="; + go125Version = "1.25.0"; + goHash = "sha256-S9AekSlyB7+kUOpA1NWpOxtTGl5DhHOyoG4Y4HciciU="; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { system = system; overlays = [ (final: prev: { - go_1_24 = prev.go_1_24.overrideAttrs { - version = go124Version; + go_1_25 = prev.go_1_25.overrideAttrs { + version = go125Version; src = prev.fetchurl { - url = "https://go.dev/dl/go${go124Version}.src.tar.gz"; + url = "https://go.dev/dl/go${go125Version}.src.tar.gz"; hash = goHash; }; }; @@ -84,7 +84,7 @@ # you're an end user you should be prepared for this flake to not # build periodically. packages = eachSystem (pkgs: rec { - default = pkgs.buildGo124Module { + default = pkgs.buildGo125Module { name = "tailscale"; pname = "tailscale"; src = ./.; @@ -137,7 +137,7 @@ gotools graphviz perl - go_1_24 + go_1_25 yarn # qemu and e2fsprogs are needed for natlab diff --git a/go.mod b/go.mod index 380c325bc..ecd229427 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.24.6 +go 1.25.0 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.branch b/go.toolchain.branch index 5e1cd0620..a2bebbeb7 100644 --- a/go.toolchain.branch +++ b/go.toolchain.branch @@ -1 +1 @@ -tailscale.go1.24 +tailscale.go1.25 diff --git a/go.toolchain.rev b/go.toolchain.rev index 6e3bd7ff9..e3dfee540 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -54f31cd8fc7b3d7d87c1ea455c8bb4b33372f706 +9a1a6a51164c9c7a23f711052bb8776326cd30cd diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 1e25090fd..67c182430 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -445,7 +445,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ LDW golang.org/x/net/proxy from tailscale.com/net/netns - DI golang.org/x/net/route from net+ + DI golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LDAI golang.org/x/sys/unix from github.com/google/nftables+ @@ -478,6 +478,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -507,6 +509,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -578,22 +581,28 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + DI internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + LA internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ LA internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LDAI internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -616,6 +625,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -636,6 +646,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -648,4 +659,4 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ From 575664b26358533466fa3a881a15b821f6176ae2 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 26 Aug 2025 09:22:36 -0700 Subject: [PATCH 1232/1708] wgengine/magicsock: make endpoint.discoPing peer relay aware (#16946) Updates tailscale/corp#30333 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index aba4242c2..37892176b 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -994,13 +994,30 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst if derpAddr.IsValid() { de.startDiscoPingLocked(epAddr{ap: derpAddr}, now, pingCLI, size, resCB) } - if udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil) { - // Already have an active session, so just ping the address we're using. - // Otherwise "tailscale ping" results to a node on the local network - // can look like they're bouncing between, say 10.0.0.0/9 and the peer's - // IPv6 address, both 1ms away, and it's random who replies first. + + switch { + case udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil): + // We have a "trusted" direct OR peer relay address, ping it. de.startDiscoPingLocked(udpAddr, now, pingCLI, size, resCB) - } else { + if !udpAddr.vni.IsSet() { + // If the path is direct we do not want to fallthrough to pinging + // all candidate direct paths, otherwise "tailscale ping" results to + // a node on the local network can look like they're bouncing + // between, say 10.0.0.0/8 and the peer's IPv6 address, both 1ms + // away, and it's random who replies first. cb() is called with the + // first reply, vs background path discovery that is subject to + // betterAddr() comparison and hysteresis + break + } + // If the trusted path is via a peer relay we want to fallthrough in + // order to also try all candidate direct paths. + fallthrough + default: + // Ping all candidate direct paths. This work overlaps with what + // [de.heartbeat] will periodically fire when it calls + // [de.sendDiscoPingsLocked], but a user-initiated [pingCLI] is a + // "do it now" operation that should not be subject to + // [heartbeatInterval] tick or [discoPingInterval] rate-limiting. for ep := range de.endpointState { de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } From 6542a00ab04d85c1157b6e4b44b33184071b0569 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 27 Aug 2025 09:45:31 -0400 Subject: [PATCH 1233/1708] tailcfg: add mac-ui-v2 node capability (#16940) updates tailscale/corp#29841 Adds a node cap macOS UIs can query to determine whether then should enable the new windowed UI. Signed-off-by: Jonathan Nobels --- tailcfg/tailcfg.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index d2125684d..6383af486 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2405,6 +2405,9 @@ const ( CapabilityDebug NodeCapability = "https://tailscale.com/cap/debug" // exposes debug endpoints over the PeerAPI CapabilityHTTPS NodeCapability = "https" + // CapabilityMacUIV2 makes the macOS GUI enable its v2 mode. + CapabilityMacUIV2 NodeCapability = "https://tailscale.com/cap/mac-ui-v2" + // CapabilityBindToInterfaceByRoute changes how Darwin nodes create // sockets (in the net/netns package). See that package for more // details on the behaviour of this capability. From 80f5a00e7604632c302b021013df03e4f6ee5a0b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 27 Aug 2025 09:51:28 -0400 Subject: [PATCH 1234/1708] ipn/local: add the suggested exit node to the ipn bus (#16748) fixes tailscale/corp#26369 The suggested exit node is currently only calculated during a localAPI request. For older UIs, this wasn't a bad choice - we could just fetch it on-demand when a menu presented itself. For newer incarnations however, this is an always-visible field that needs to react to changes in the suggested exit node's value. This change recalculates the suggested exit node ID on netmap updates and broadcasts it on the IPN bus. The localAPI version of this remains intact for the time being. Signed-off-by: Jonathan Nobels --- ipn/backend.go | 24 ++++++++++++------ ipn/ipnlocal/bus.go | 3 ++- ipn/ipnlocal/local.go | 57 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 9 deletions(-) diff --git a/ipn/backend.go b/ipn/backend.go index ab01d2fde..fd4442f71 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -83,6 +83,8 @@ const ( NotifyRateLimit NotifyWatchOpt = 1 << 8 // if set, rate limit spammy netmap updates to every few seconds NotifyHealthActions NotifyWatchOpt = 1 << 9 // if set, include PrimaryActions in health.State. Otherwise append the action URL to the text + + NotifyInitialSuggestedExitNode NotifyWatchOpt = 1 << 10 // if set, the first Notify message (sent immediately) will contain the current SuggestedExitNode if available ) // Notify is a communication from a backend (e.g. tailscaled) to a frontend @@ -98,7 +100,7 @@ type Notify struct { // This field is only set in the first message when requesting // NotifyInitialState. Clients must store it on their side as // following notifications will not include this field. - SessionID string `json:",omitempty"` + SessionID string `json:",omitzero"` // ErrMessage, if non-nil, contains a critical error message. // For State InUseOtherUser, ErrMessage is not critical and just contains the details. @@ -116,7 +118,7 @@ type Notify struct { // user's preferred storage location. // // Deprecated: use LocalClient.AwaitWaitingFiles instead. - FilesWaiting *empty.Message `json:",omitempty"` + FilesWaiting *empty.Message `json:",omitzero"` // IncomingFiles, if non-nil, specifies which files are in the // process of being received. A nil IncomingFiles means this @@ -125,22 +127,22 @@ type Notify struct { // of being transferred. // // Deprecated: use LocalClient.AwaitWaitingFiles instead. - IncomingFiles []PartialFile `json:",omitempty"` + IncomingFiles []PartialFile `json:",omitzero"` // OutgoingFiles, if non-nil, tracks which files are in the process of // being sent via TailDrop, including files that finished, whether // successful or failed. This slice is sorted by Started time, then Name. - OutgoingFiles []*OutgoingFile `json:",omitempty"` + OutgoingFiles []*OutgoingFile `json:",omitzero"` // LocalTCPPort, if non-nil, informs the UI frontend which // (non-zero) localhost TCP port it's listening on. // This is currently only used by Tailscale when run in the // macOS Network Extension. - LocalTCPPort *uint16 `json:",omitempty"` + LocalTCPPort *uint16 `json:",omitzero"` // ClientVersion, if non-nil, describes whether a client version update // is available. - ClientVersion *tailcfg.ClientVersion `json:",omitempty"` + ClientVersion *tailcfg.ClientVersion `json:",omitzero"` // DriveShares tracks the full set of current DriveShares that we're // publishing. Some client applications, like the MacOS and Windows clients, @@ -153,7 +155,11 @@ type Notify struct { // Health is the last-known health state of the backend. When this field is // non-nil, a change in health verified, and the API client should surface // any changes to the user in the UI. - Health *health.State `json:",omitempty"` + Health *health.State `json:",omitzero"` + + // SuggestedExitNode, if non-nil, is the node that the backend has determined to + // be the best exit node for the current network conditions. + SuggestedExitNode *tailcfg.StableNodeID `json:",omitzero"` // type is mirrored in xcode/IPN/Core/LocalAPI/Model/LocalAPIModel.swift } @@ -194,6 +200,10 @@ func (n Notify) String() string { if n.Health != nil { sb.WriteString("Health{...} ") } + if n.SuggestedExitNode != nil { + fmt.Fprintf(&sb, "SuggestedExitNode=%v ", *n.SuggestedExitNode) + } + s := sb.String() return s[0:len(s)-1] + "}" } diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go index 111a877d8..910e4e774 100644 --- a/ipn/ipnlocal/bus.go +++ b/ipn/ipnlocal/bus.go @@ -156,5 +156,6 @@ func isNotableNotify(n *ipn.Notify) bool { n.Health != nil || len(n.IncomingFiles) > 0 || len(n.OutgoingFiles) > 0 || - n.FilesWaiting != nil + n.FilesWaiting != nil || + n.SuggestedExitNode != nil } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 26f0155a1..43d7e1216 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1737,6 +1737,10 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.send(ipn.Notify{NetMap: st.NetMap}) + // The error here is unimportant as is the result. This will recalculate the suggested exit node + // cache the value and push any changes to the IPN bus. + b.SuggestExitNode() + // Check and update the exit node if needed, now that we have a new netmap. // // This must happen after the netmap change is sent via [ipn.Notify], @@ -2033,7 +2037,13 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo } } + if cn.NetMap() != nil && mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts, cn, b.lastSuggestedExitNode) { + // Recompute the suggested exit node + b.suggestExitNodeLocked() + } + if cn.NetMap() != nil && mutationsAreWorthyOfTellingIPNBus(muts) { + nm := cn.netMapWithPeers() notify = &ipn.Notify{NetMap: nm} } else if testenv.InTest() { @@ -2045,6 +2055,41 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } +// mustationsAreWorthyOfRecalculatingSuggestedExitNode reports whether any mutation type in muts is +// worthy of recalculating the suggested exit node. +func mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts []netmap.NodeMutation, cn *nodeBackend, sid tailcfg.StableNodeID) bool { + for _, m := range muts { + n, ok := cn.NodeByID(m.NodeIDBeingMutated()) + if !ok { + // The node being mutated is not in the netmap. + continue + } + + // The previously suggested exit node itself is being mutated. + if sid != "" && n.StableID() == sid { + return true + } + + allowed := n.AllowedIPs().AsSlice() + isExitNode := slices.Contains(allowed, tsaddr.AllIPv4()) || slices.Contains(allowed, tsaddr.AllIPv6()) + // The node being mutated is not an exit node. We don't care about it - unless + // it was our previously suggested exit node which we catch above. + if !isExitNode { + continue + } + + // Some exit node is being mutated. We care about it if it's online + // or offline state has changed. We *might* eventually care about it for other reasons + // but for the sake of finding a "better" suggested exit node, this is probably + // sufficient. + switch m.(type) { + case netmap.NodeMutationOnline: + return true + } + } + return false +} + // mutationsAreWorthyOfTellingIPNBus reports whether any mutation type in muts is // worthy of spamming the IPN bus (the Windows & Mac GUIs, basically) to tell them // about the update. @@ -3063,7 +3108,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A b.mu.Lock() - const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares + const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares | ipn.NotifyInitialSuggestedExitNode if mask&initialBits != 0 { cn := b.currentNode() ini = &ipn.Notify{Version: version.Long()} @@ -3086,6 +3131,11 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A if mask&ipn.NotifyInitialHealthState != 0 { ini.Health = b.HealthTracker().CurrentState() } + if mask&ipn.NotifyInitialSuggestedExitNode != 0 { + if en, err := b.SuggestExitNode(); err != nil { + ini.SuggestedExitNode = &en.ID + } + } } ctx, cancel := context.WithCancel(ctx) @@ -7716,7 +7766,12 @@ func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggest if err != nil { return res, err } + if prevSuggestion != res.ID { + // Notify the clients via the IPN bus if the exit node suggestion has changed. + b.sendToLocked(ipn.Notify{SuggestedExitNode: &res.ID}, allClients) + } b.lastSuggestedExitNode = res.ID + return res, err } From 882b05fff9b67b4a63e9a9d7486c0981b2c73016 Mon Sep 17 00:00:00 2001 From: Maisem Ali <3953239+maisem@users.noreply.github.com> Date: Wed, 27 Aug 2025 00:06:28 -0700 Subject: [PATCH 1235/1708] cmd/viewer: add field comments to generated view methods Extract field comments from AST and include them in generated view methods. Comments are preserved from the original struct fields to provide documentation for the view accessors. Fixes #16958 Signed-off-by: Maisem Ali <3953239+maisem@users.noreply.github.com> --- cmd/viewer/tests/tests_view.go | 35 +- cmd/viewer/viewer.go | 146 ++- cmd/viewer/viewer_test.go | 3 +- drive/drive_view.go | 17 +- ipn/ipn_view.go | 365 ++++++- tailcfg/tailcfg_view.go | 945 +++++++++++++++--- types/dnstype/dnstype_view.go | 25 + types/persist/persist_view.go | 9 +- .../prefs/prefs_example/prefs_example_view.go | 74 +- types/prefs/prefs_view_test.go | 7 + 10 files changed, 1382 insertions(+), 244 deletions(-) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index bc95fea01..e50a71c9e 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -247,47 +247,41 @@ func (v *MapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v MapView) Int() views.Map[string, int] { return views.MapOf(v.ж.Int) } - +func (v MapView) Int() views.Map[string, int] { return views.MapOf(v.ж.Int) } func (v MapView) SliceInt() views.MapSlice[string, int] { return views.MapSliceOf(v.ж.SliceInt) } - func (v MapView) StructPtrWithPtr() views.MapFn[string, *StructWithPtrs, StructWithPtrsView] { return views.MapFnOf(v.ж.StructPtrWithPtr, func(t *StructWithPtrs) StructWithPtrsView { return t.View() }) } - func (v MapView) StructPtrWithoutPtr() views.MapFn[string, *StructWithoutPtrs, StructWithoutPtrsView] { return views.MapFnOf(v.ж.StructPtrWithoutPtr, func(t *StructWithoutPtrs) StructWithoutPtrsView { return t.View() }) } - func (v MapView) StructWithoutPtr() views.Map[string, StructWithoutPtrs] { return views.MapOf(v.ж.StructWithoutPtr) } - func (v MapView) SlicesWithPtrs() views.MapFn[string, []*StructWithPtrs, views.SliceView[*StructWithPtrs, StructWithPtrsView]] { return views.MapFnOf(v.ж.SlicesWithPtrs, func(t []*StructWithPtrs) views.SliceView[*StructWithPtrs, StructWithPtrsView] { return views.SliceOfViews[*StructWithPtrs, StructWithPtrsView](t) }) } - func (v MapView) SlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrs, views.SliceView[*StructWithoutPtrs, StructWithoutPtrsView]] { return views.MapFnOf(v.ж.SlicesWithoutPtrs, func(t []*StructWithoutPtrs) views.SliceView[*StructWithoutPtrs, StructWithoutPtrsView] { return views.SliceOfViews[*StructWithoutPtrs, StructWithoutPtrsView](t) }) } - func (v MapView) StructWithoutPtrKey() views.Map[StructWithoutPtrs, int] { return views.MapOf(v.ж.StructWithoutPtrKey) } - func (v MapView) StructWithPtr() views.MapFn[string, StructWithPtrs, StructWithPtrsView] { return views.MapFnOf(v.ж.StructWithPtr, func(t StructWithPtrs) StructWithPtrsView { return t.View() }) } + +// Unsupported views. func (v MapView) SliceIntPtr() map[string][]*int { panic("unsupported") } func (v MapView) PointerKey() map[*string]int { panic("unsupported") } func (v MapView) StructWithPtrKey() map[StructWithPtrs]int { panic("unsupported") } @@ -389,8 +383,10 @@ func (v StructWithSlicesView) Prefixes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Prefixes) } func (v StructWithSlicesView) Data() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Data) } -func (v StructWithSlicesView) Structs() StructWithPtrs { panic("unsupported") } -func (v StructWithSlicesView) Ints() *int { panic("unsupported") } + +// Unsupported views. +func (v StructWithSlicesView) Structs() StructWithPtrs { panic("unsupported") } +func (v StructWithSlicesView) Ints() *int { panic("unsupported") } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _StructWithSlicesViewNeedsRegeneration = StructWithSlices(struct { @@ -554,9 +550,10 @@ func (v GenericIntStructView[T]) Pointer() views.ValuePointer[T] { return views.ValuePointerOf(v.ж.Pointer) } -func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericIntStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } -func (v GenericIntStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } +// Unsupported views. func (v GenericIntStructView[T]) PtrSlice() *T { panic("unsupported") } func (v GenericIntStructView[T]) PtrKeyMap() map[*T]string { panic("unsupported") } func (v GenericIntStructView[T]) PtrValueMap() map[string]*T { panic("unsupported") } @@ -648,9 +645,10 @@ func (v GenericNoPtrsStructView[T]) Pointer() views.ValuePointer[T] { return views.ValuePointerOf(v.ж.Pointer) } -func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericNoPtrsStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } -func (v GenericNoPtrsStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } +// Unsupported views. func (v GenericNoPtrsStructView[T]) PtrSlice() *T { panic("unsupported") } func (v GenericNoPtrsStructView[T]) PtrKeyMap() map[*T]string { panic("unsupported") } func (v GenericNoPtrsStructView[T]) PtrValueMap() map[string]*T { panic("unsupported") } @@ -741,12 +739,13 @@ func (v GenericCloneableStructView[T, V]) Value() V { return v.ж.Value.View() } func (v GenericCloneableStructView[T, V]) Slice() views.SliceView[T, V] { return views.SliceOfViews[T, V](v.ж.Slice) } - func (v GenericCloneableStructView[T, V]) Map() views.MapFn[string, T, V] { return views.MapFnOf(v.ж.Map, func(t T) V { return t.View() }) } + +// Unsupported views. func (v GenericCloneableStructView[T, V]) Pointer() map[string]T { panic("unsupported") } func (v GenericCloneableStructView[T, V]) PtrSlice() *T { panic("unsupported") } func (v GenericCloneableStructView[T, V]) PtrKeyMap() map[*T]string { panic("unsupported") } @@ -942,25 +941,21 @@ func (v StructWithTypeAliasFieldsView) SliceWithPtrs() views.SliceView[*StructWi func (v StructWithTypeAliasFieldsView) SliceWithoutPtrs() views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](v.ж.SliceWithoutPtrs) } - func (v StructWithTypeAliasFieldsView) MapWithPtrs() views.MapFn[string, *StructWithPtrsAlias, StructWithPtrsAliasView] { return views.MapFnOf(v.ж.MapWithPtrs, func(t *StructWithPtrsAlias) StructWithPtrsAliasView { return t.View() }) } - func (v StructWithTypeAliasFieldsView) MapWithoutPtrs() views.MapFn[string, *StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.MapFnOf(v.ж.MapWithoutPtrs, func(t *StructWithoutPtrsAlias) StructWithoutPtrsAliasView { return t.View() }) } - func (v StructWithTypeAliasFieldsView) MapOfSlicesWithPtrs() views.MapFn[string, []*StructWithPtrsAlias, views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView]] { return views.MapFnOf(v.ж.MapOfSlicesWithPtrs, func(t []*StructWithPtrsAlias) views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView] { return views.SliceOfViews[*StructWithPtrsAlias, StructWithPtrsAliasView](t) }) } - func (v StructWithTypeAliasFieldsView) MapOfSlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrsAlias, views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView]] { return views.MapFnOf(v.ж.MapOfSlicesWithoutPtrs, func(t []*StructWithoutPtrsAlias) views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](t) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index a9617ac10..4fd81ea51 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -9,6 +9,8 @@ import ( "bytes" "flag" "fmt" + "go/ast" + "go/token" "go/types" "html/template" "log" @@ -17,6 +19,7 @@ import ( "strings" "tailscale.com/util/codegen" + "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -104,16 +107,13 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSONFrom(dec *jsontext.Decod {{define "valuePointerField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.ValuePointer[{{.FieldType}}] { return views.ValuePointerOf(v.ж.{{.FieldName}}) } {{end}} -{{define "mapField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.Map[{{.MapKeyType}},{{.MapValueType}}] { return views.MapOf(v.ж.{{.FieldName}})} +{{define "mapField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.Map[{{.MapKeyType}},{{.MapValueType}}] { return views.MapOf(v.ж.{{.FieldName}})} {{end}} -{{define "mapFnField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapFn[{{.MapKeyType}},{{.MapValueType}},{{.MapValueView}}] { return views.MapFnOf(v.ж.{{.FieldName}}, func (t {{.MapValueType}}) {{.MapValueView}} { +{{define "mapFnField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapFn[{{.MapKeyType}},{{.MapValueType}},{{.MapValueView}}] { return views.MapFnOf(v.ж.{{.FieldName}}, func (t {{.MapValueType}}) {{.MapValueView}} { return {{.MapFn}} })} {{end}} -{{define "mapSliceField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapSlice[{{.MapKeyType}},{{.MapValueType}}] { return views.MapSliceOf(v.ж.{{.FieldName}}) } +{{define "mapSliceField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapSlice[{{.MapKeyType}},{{.MapValueType}}] { return views.MapSliceOf(v.ж.{{.FieldName}}) } {{end}} {{define "unsupportedField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldType}} {panic("unsupported")} {{end}} @@ -142,7 +142,81 @@ func requiresCloning(t types.Type) (shallow, deep bool, base types.Type) { return p, p, t } -func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ *types.Package) { +type fieldNameKey struct { + typeName string + fieldName string +} + +// getFieldComments extracts field comments from the AST for a given struct type. +func getFieldComments(syntax []*ast.File) map[fieldNameKey]string { + if len(syntax) == 0 { + return nil + } + var fieldComments map[fieldNameKey]string + + // Search through all AST files in the package + for _, file := range syntax { + // Look for the type declaration + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + typeName := typeSpec.Name.Name + + // Check if it's a struct type + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + continue + } + + // Extract field comments + for _, field := range structType.Fields.List { + if len(field.Names) == 0 { + // Anonymous field or no names + continue + } + + // Get the field name + fieldName := field.Names[0].Name + key := fieldNameKey{typeName, fieldName} + + // Get the comment + var comment string + if field.Doc != nil && field.Doc.Text() != "" { + // Format the comment for Go code generation + comment = strings.TrimSpace(field.Doc.Text()) + // Convert multi-line comments to proper Go comment format + var sb strings.Builder + for line := range strings.Lines(comment) { + sb.WriteString("// ") + sb.WriteString(line) + } + if sb.Len() > 0 { + comment = sb.String() + } + } else if field.Comment != nil && field.Comment.Text() != "" { + // Handle inline comments + comment = "// " + strings.TrimSpace(field.Comment.Text()) + } + if comment != "" { + mak.Set(&fieldComments, key, comment) + } + } + } + } + } + + return fieldComments +} + +func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, fieldComments map[fieldNameKey]string) { t, ok := typ.Underlying().(*types.Struct) if !ok || codegen.IsViewType(t) { return @@ -182,6 +256,15 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * log.Fatal(err) } } + writeTemplateWithComment := func(name, fieldName string) { + // Write the field comment if it exists + key := fieldNameKey{args.StructName, fieldName} + if comment, ok := fieldComments[key]; ok && comment != "" { + fmt.Fprintln(buf, comment) + } + writeTemplate(name) + } + writeTemplate("common") for i := range t.NumFields() { f := t.Field(i) @@ -196,7 +279,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * } if !codegen.ContainsPointers(fieldType) || codegen.IsViewType(fieldType) || codegen.HasNoClone(t.Tag(i)) { args.FieldType = it.QualifiedName(fieldType) - writeTemplate("valueField") + writeTemplateWithComment("valueField", fname) continue } switch underlying := fieldType.Underlying().(type) { @@ -207,7 +290,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * case "byte": args.FieldType = it.QualifiedName(fieldType) it.Import("", "tailscale.com/types/views") - writeTemplate("byteSliceField") + writeTemplateWithComment("byteSliceField", fname) default: args.FieldType = it.QualifiedName(elem) it.Import("", "tailscale.com/types/views") @@ -217,35 +300,35 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * case *types.Pointer: if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldViewName = appendNameSuffix(it.QualifiedName(base), "View") - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } continue case *types.Interface: if viewType := viewTypeForValueType(elem); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) continue } } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } else if shallow { switch base.Underlying().(type) { case *types.Basic, *types.Interface: - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) default: if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldViewName = appendNameSuffix(it.QualifiedName(base), "View") - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } } continue } - writeTemplate("sliceField") + writeTemplateWithComment("sliceField", fname) } continue case *types.Struct: @@ -254,26 +337,26 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if codegen.ContainsPointers(strucT) { if viewType := viewTypeForValueType(fieldType); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } if viewType, makeViewFn := viewTypeForContainerType(fieldType); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) args.MakeViewFnName = it.PackagePrefix(makeViewFn.Pkg()) + makeViewFn.Name() - writeTemplate("makeViewField") + writeTemplateWithComment("makeViewField", fname) continue } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } - writeTemplate("valueField") + writeTemplateWithComment("valueField", fname) continue case *types.Map: m := underlying args.FieldType = it.QualifiedName(fieldType) shallow, deep, key := requiresCloning(m.Key()) if shallow || deep { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } it.Import("", "tailscale.com/types/views") @@ -358,7 +441,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * default: template = "unsupportedField" } - writeTemplate(template) + writeTemplateWithComment(template, fname) continue case *types.Pointer: ptr := underlying @@ -368,9 +451,9 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldType = it.QualifiedName(base) args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } continue } @@ -379,7 +462,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if viewType := viewTypeForValueType(base); viewType != nil { args.FieldType = it.QualifiedName(base) args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } @@ -389,7 +472,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * baseTypeName := it.QualifiedName(base) args.FieldType = baseTypeName args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } @@ -397,18 +480,18 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * // and will not have a generated view type, use views.ValuePointer[T] as the field's view type. // Its Get/GetOk methods return stack-allocated shallow copies of the field's value. args.FieldType = it.QualifiedName(base) - writeTemplate("valuePointerField") + writeTemplateWithComment("valuePointerField", fname) continue case *types.Interface: // If fieldType is an interface with a "View() {ViewType}" method, it can be used to clone the field. // This includes scenarios where fieldType is a constrained type parameter. if viewType := viewTypeForValueType(underlying); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } for i := range typ.NumMethods() { f := typ.Method(i) @@ -627,6 +710,7 @@ func main() { log.Fatal(err) } it := codegen.NewImportTracker(pkg.Types) + fieldComments := getFieldComments(pkg.Syntax) cloneOnlyType := map[string]bool{} for _, t := range strings.Split(*flagCloneOnlyTypes, ",") { @@ -654,7 +738,7 @@ func main() { if !hasClone { runCloner = true } - genView(buf, it, typ, pkg.Types) + genView(buf, it, typ, fieldComments) } out := pkg.Name + "_view" if *flagBuildTags == "test" { diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go index d12d49655..1e24b7050 100644 --- a/cmd/viewer/viewer_test.go +++ b/cmd/viewer/viewer_test.go @@ -53,6 +53,7 @@ func TestViewerImports(t *testing.T) { if err != nil { t.Fatal(err) } + var fieldComments map[fieldNameKey]string // don't need it for this test. var output bytes.Buffer tracker := codegen.NewImportTracker(pkg) @@ -65,7 +66,7 @@ func TestViewerImports(t *testing.T) { if !ok { t.Fatalf("%q is not a named type", tt.typeNames[i]) } - genView(&output, tracker, namedType, pkg) + genView(&output, tracker, namedType, fieldComments) } for _, pkg := range tt.wantImports { diff --git a/drive/drive_view.go b/drive/drive_view.go index 6338705a6..b481751bb 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -83,9 +83,24 @@ func (v *ShareView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Name is how this share appears on remote nodes. func (v ShareView) Name() string { return v.ж.Name } + +// Path is the path to the directory on this machine that's being shared. func (v ShareView) Path() string { return v.ж.Path } -func (v ShareView) As() string { return v.ж.As } + +// As is the UNIX or Windows username of the local account used for this +// share. File read/write permissions are enforced based on this username. +// Can be left blank to use the default value of "whoever is running the +// Tailscale GUI". +func (v ShareView) As() string { return v.ж.As } + +// BookmarkData contains security-scoped bookmark data for the Sandboxed +// Mac application. The Sandboxed Mac application gains permission to +// access the Share's folder as a result of a user selecting it in a file +// picker. In order to retain access to it across restarts, it needs to +// hold on to a security-scoped bookmark. That bookmark is stored here. See +// https://developer.apple.com/documentation/security/app_sandbox/accessing_files_from_the_macos_app_sandbox#4144043 func (v ShareView) BookmarkData() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.BookmarkData) } diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 0f0f652d1..170dc409b 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -89,14 +89,47 @@ func (v *LoginProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v LoginProfileView) ID() ProfileID { return v.ж.ID } -func (v LoginProfileView) Name() string { return v.ж.Name } -func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile } -func (v LoginProfileView) Key() StateKey { return v.ж.Key } +// ID is a unique identifier for this profile. +// It is assigned on creation and never changes. +// It may seem redundant to have both ID and UserProfile.ID +// but they are different things. UserProfile.ID may change +// over time (e.g. if a device is tagged). +func (v LoginProfileView) ID() ProfileID { return v.ж.ID } + +// Name is the user-visible name of this profile. +// It is filled in from the UserProfile.LoginName field. +func (v LoginProfileView) Name() string { return v.ж.Name } + +// NetworkProfile is a subset of netmap.NetworkMap that we +// store to remember information about the tailnet that this +// profile was logged in with. +// +// This field was added on 2023-11-17. +func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile } + +// Key is the StateKey under which the profile is stored. +// It is assigned once at profile creation time and never changes. +func (v LoginProfileView) Key() StateKey { return v.ж.Key } + +// UserProfile is the server provided UserProfile for this profile. +// This is updated whenever the server provides a new UserProfile. func (v LoginProfileView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } -func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID } -func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL } + +// NodeID is the NodeID of the node that this profile is logged into. +// This should be stable across tagging and untagging nodes. +// It may seem redundant to check against both the UserProfile.UserID +// and the NodeID. However the NodeID can change if the node is deleted +// from the admin panel. +func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } + +// LocalUserID is the user ID of the user who created this profile. +// It is only relevant on Windows where we have a multi-user system. +// It is assigned once at profile creation time and never changes. +func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID } + +// ControlURL is the URL of the control server that this profile is logged +// into. +func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _LoginProfileViewNeedsRegeneration = LoginProfile(struct { @@ -177,48 +210,253 @@ func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v PrefsView) ControlURL() string { return v.ж.ControlURL } -func (v PrefsView) RouteAll() bool { return v.ж.RouteAll } -func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID } -func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP } -func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode } +// ControlURL is the URL of the control server to use. +// +// If empty, the default for new installs, DefaultControlURL +// is used. It's set non-empty once the daemon has been started +// for the first time. +// +// TODO(apenwarr): Make it safe to update this with EditPrefs(). +// Right now, you have to pass it in the initial prefs in Start(), +// which is the only code that actually uses the ControlURL value. +// It would be more consistent to restart controlclient +// automatically whenever this variable changes. +// +// Meanwhile, you have to provide this as part of +// Options.LegacyMigrationPrefs or Options.UpdatePrefs when +// calling Backend.Start(). +func (v PrefsView) ControlURL() string { return v.ж.ControlURL } + +// RouteAll specifies whether to accept subnets advertised by +// other nodes on the Tailscale network. Note that this does not +// include default routes (0.0.0.0/0 and ::/0), those are +// controlled by ExitNodeID/IP below. +func (v PrefsView) RouteAll() bool { return v.ж.RouteAll } + +// ExitNodeID and ExitNodeIP specify the node that should be used +// as an exit node for internet traffic. At most one of these +// should be non-zero. +// +// The preferred way to express the chosen node is ExitNodeID, but +// in some cases it's not possible to use that ID (e.g. in the +// linux CLI, before tailscaled has a netmap). For those +// situations, we allow specifying the exit node by IP, and +// ipnlocal.LocalBackend will translate the IP into an ID when the +// node is found in the netmap. +// +// If the selected exit node doesn't exist (e.g. it's not part of +// the current tailnet), or it doesn't offer exit node services, a +// blackhole route will be installed on the local system to +// prevent any traffic escaping to the local network. +func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID } +func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP } + +// AutoExitNode is an optional expression that specifies whether and how +// tailscaled should pick an exit node automatically. +// +// If specified, tailscaled will use an exit node based on the expression, +// and will re-evaluate the selection periodically as network conditions, +// available exit nodes, or policy settings change. A blackhole route will +// be installed to prevent traffic from escaping to the local network until +// an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP. +// +// If empty, tailscaled will not automatically select an exit node. +// +// If the specified expression is invalid or unsupported by the client, +// it falls back to the behavior of [AnyExitNode]. +// +// As of 2025-07-02, the only supported value is [AnyExitNode]. +// It's a string rather than a boolean to allow future extensibility +// (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us"). +func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode } + +// InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by +// the backend on transition from exit node on to off and used by the +// backend. +// +// As an Internal field, it can't be set by LocalAPI clients, rather it is set indirectly +// when the ExitNodeID value is zero'd and via the set-use-exit-node-enabled endpoint. func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior } -func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess } -func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS } -func (v PrefsView) RunSSH() bool { return v.ж.RunSSH } -func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient } -func (v PrefsView) WantRunning() bool { return v.ж.WantRunning } -func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut } -func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp } -func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) } -func (v PrefsView) Hostname() string { return v.ж.Hostname } -func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs } -func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon } -func (v PrefsView) Egg() bool { return v.ж.Egg } + +// ExitNodeAllowLANAccess indicates whether locally accessible subnets should be +// routed directly or via the exit node. +func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess } + +// CorpDNS specifies whether to install the Tailscale network's +// DNS configuration, if it exists. +func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS } + +// RunSSH bool is whether this node should run an SSH +// server, permitting access to peers according to the +// policies as configured by the Tailnet's admin(s). +func (v PrefsView) RunSSH() bool { return v.ж.RunSSH } + +// RunWebClient bool is whether this node should expose +// its web client over Tailscale at port 5252, +// permitting access to peers according to the +// policies as configured by the Tailnet's admin(s). +func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient } + +// WantRunning indicates whether networking should be active on +// this node. +func (v PrefsView) WantRunning() bool { return v.ж.WantRunning } + +// LoggedOut indicates whether the user intends to be logged out. +// There are other reasons we may be logged out, including no valid +// keys. +// We need to remember this state so that, on next startup, we can +// generate the "Login" vs "Connect" buttons correctly, without having +// to contact the server to confirm our nodekey status first. +func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut } + +// ShieldsUp indicates whether to block all incoming connections, +// regardless of the control-provided packet filter. If false, we +// use the packet filter as provided. If true, we block incoming +// connections. This overrides tailcfg.Hostinfo's ShieldsUp. +func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp } + +// AdvertiseTags specifies tags that should be applied to this node, for +// purposes of ACL enforcement. These can be referenced from the ACL policy +// document. Note that advertising a tag on the client doesn't guarantee +// that the control server will allow the node to adopt that tag. +func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) } + +// Hostname is the hostname to use for identifying the node. If +// not set, os.Hostname is used. +func (v PrefsView) Hostname() string { return v.ж.Hostname } + +// NotepadURLs is a debugging setting that opens OAuth URLs in +// notepad.exe on Windows, rather than loading them in a browser. +// +// apenwarr 2020-04-29: Unfortunately this is still needed sometimes. +// Windows' default browser setting is sometimes screwy and this helps +// users narrow it down a bit. +func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs } + +// ForceDaemon specifies whether a platform that normally +// operates in "client mode" (that is, requires an active user +// logged in with the GUI app running) should keep running after the +// GUI ends and/or the user logs out. +// +// The only current applicable platform is Windows. This +// forced Windows to go into "server mode" where Tailscale is +// running even with no users logged in. This might also be +// used for macOS in the future. This setting has no effect +// for Linux/etc, which always operate in daemon mode. +func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon } + +// Egg is a optional debug flag. +func (v PrefsView) Egg() bool { return v.ж.Egg } + +// AdvertiseRoutes specifies CIDR prefixes to advertise into the +// Tailscale network as reachable through the current +// node. func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AdvertiseRoutes) } + +// AdvertiseServices specifies the list of services that this +// node can serve as a destination for. Note that an advertised +// service must still go through the approval process from the +// control server. func (v PrefsView) AdvertiseServices() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseServices) } -func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT } -func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering } + +// NoSNAT specifies whether to source NAT traffic going to +// destinations in AdvertiseRoutes. The default is to apply source +// NAT, which makes the traffic appear to come from the router +// machine rather than the peer's Tailscale IP. +// +// Disabling SNAT requires additional manual configuration in your +// network to route Tailscale traffic back to the subnet relay +// machine. +// +// Linux-only. +func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT } + +// NoStatefulFiltering specifies whether to apply stateful filtering when +// advertising routes in AdvertiseRoutes. The default is to not apply +// stateful filtering. +// +// To allow inbound connections from advertised routes, both NoSNAT and +// NoStatefulFiltering must be true. +// +// This is an opt.Bool because it was first added after NoSNAT, with a +// backfill based on the value of that parameter. The backfill has been +// removed since then, but the field remains an opt.Bool. +// +// Linux-only. +func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering } + +// NetfilterMode specifies how much to manage netfilter rules for +// Tailscale, if at all. func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode } -func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser } -func (v PrefsView) ProfileName() string { return v.ж.ProfileName } -func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } -func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } -func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } -func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } + +// OperatorUser is the local machine user name who is allowed to +// operate tailscaled without being root or using sudo. +func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser } + +// ProfileName is the desired name of the profile. If empty, then the user's +// LoginName is used. It is only used for display purposes in the client UI +// and CLI. +func (v PrefsView) ProfileName() string { return v.ж.ProfileName } + +// AutoUpdate sets the auto-update preferences for the node agent. See +// AutoUpdatePrefs docs for more details. +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } + +// AppConnector sets the app connector preferences for the node agent. See +// AppConnectorPrefs docs for more details. +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } + +// PostureChecking enables the collection of information used for device +// posture checks. +// +// Note: this should be named ReportPosture, but it was shipped as +// PostureChecking in some early releases and this JSON field is written to +// disk, so we just keep its old name. (akin to CorpDNS which is an internal +// pref name that doesn't match the public interface) +func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } + +// NetfilterKind specifies what netfilter implementation to use. +// +// Linux-only. +func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } + +// DriveShares are the configured DriveShares, stored in increasing order +// by name. func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] { return views.SliceOfViews[*drive.Share, drive.ShareView](v.ж.DriveShares) } + +// RelayServerPort is the UDP port number for the relay server to bind to, +// on all interfaces. A non-nil zero value signifies a random unused port +// should be used. A nil value signifies relay server functionality +// should be disabled. This field is currently experimental, and therefore +// no guarantees are made about its current naming and functionality when +// non-nil/enabled. func (v PrefsView) RelayServerPort() views.ValuePointer[int] { return views.ValuePointerOf(v.ж.RelayServerPort) } +// AllowSingleHosts was a legacy field that was always true +// for the past 4.5 years. It controlled whether Tailscale +// peers got /32 or /127 routes for each other. +// As of 2024-05-17 we're starting to ignore it, but to let +// people still downgrade Tailscale versions and not break +// all peer-to-peer networking we still write it to disk (as JSON) +// so it can be loaded back by old versions. +// TODO(bradfitz): delete this in 2025 sometime. See #12058. func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.ж.AllowSingleHosts } -func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } + +// The Persist field is named 'Config' in the file for backward +// compatibility with earlier versions. +// TODO(apenwarr): We should move this out of here, it's not a pref. +// +// We can maybe do that once we're sure which module should persist +// it (backend or frontend?) +func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { @@ -324,33 +562,52 @@ func (v *ServeConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// TCP are the list of TCP port numbers that tailscaled should handle for +// the Tailscale IP addresses. (not subnet routers, etc) func (v ServeConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView { return t.View() }) } +// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers +// keyed by mount point ("/", "/foo", etc) func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView { return t.View() }) } +// Services maps from service name (in the form "svc:dns-label") to a ServiceConfig. +// Which describes the L3, L4, and L7 forwarding information for the service. func (v ServeConfigView) Services() views.MapFn[tailcfg.ServiceName, *ServiceConfig, ServiceConfigView] { return views.MapFnOf(v.ж.Services, func(t *ServiceConfig) ServiceConfigView { return t.View() }) } +// AllowFunnel is the set of SNI:port values for which funnel +// traffic is allowed, from trusted ingress peers. func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] { return views.MapOf(v.ж.AllowFunnel) } +// Foreground is a map of an IPN Bus session ID to an alternate foreground serve config that's valid for the +// life of that WatchIPNBus session ID. This allows the config to specify ephemeral configs that are used +// in the CLI's foreground mode to ensure ungraceful shutdowns of either the client or the LocalBackend does not +// expose ports that users are not aware of. In practice this contains any serve config set via 'tailscale +// serve' command run without the '--bg' flag. ServeConfig contained by Foreground is not expected itself to contain +// another Foreground block. func (v ServeConfigView) Foreground() views.MapFn[string, *ServeConfig, ServeConfigView] { return views.MapFnOf(v.ж.Foreground, func(t *ServeConfig) ServeConfigView { return t.View() }) } + +// ETag is the checksum of the serve config that's populated +// by the LocalClient through the HTTP ETag header during a +// GetServeConfig request and is translated to an If-Match header +// during a SetServeConfig request. func (v ServeConfigView) ETag() string { return v.ж.ETag } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -430,17 +687,23 @@ func (v *ServiceConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// TCP are the list of TCP port numbers that tailscaled should handle for +// the Tailscale IP addresses. (not subnet routers, etc) func (v ServiceConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView { return t.View() }) } +// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers +// keyed by mount point ("/", "/foo", etc) func (v ServiceConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView { return t.View() }) } + +// Tun determines if the service should be using L3 forwarding (Tun mode). func (v ServiceConfigView) Tun() bool { return v.ж.Tun } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -517,9 +780,29 @@ func (v *TCPPortHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS } -func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP } -func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward } +// HTTPS, if true, means that tailscaled should handle this connection as an +// HTTPS request as configured by ServeConfig.Web. +// +// It is mutually exclusive with TCPForward. +func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS } + +// HTTP, if true, means that tailscaled should handle this connection as an +// HTTP request as configured by ServeConfig.Web. +// +// It is mutually exclusive with TCPForward. +func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP } + +// TCPForward is the IP:port to forward TCP connections to. +// Whether or not TLS is terminated by tailscaled depends on +// TerminateTLS. +// +// It is mutually exclusive with HTTPS. +func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward } + +// TerminateTLS, if non-empty, means that tailscaled should terminate the +// TLS connections before forwarding them to TCPForward, permitting only the +// SNI name with this value. It is only used if TCPForward is non-empty. +// (the HTTPS mode uses ServeConfig.Web) func (v TCPPortHandlerView) TerminateTLS() string { return v.ж.TerminateTLS } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -597,9 +880,14 @@ func (v *HTTPHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v HTTPHandlerView) Path() string { return v.ж.Path } +// absolute path to directory or file to serve +func (v HTTPHandlerView) Path() string { return v.ж.Path } + +// http://localhost:3000/, localhost:3030, 3030 func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy } -func (v HTTPHandlerView) Text() string { return v.ж.Text } + +// plaintext to serve (primarily for testing) +func (v HTTPHandlerView) Text() string { return v.ж.Text } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { @@ -675,6 +963,7 @@ func (v *WebServerConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// mountPoint => handler func (v WebServerConfigView) Handlers() views.MapFn[string, *HTTPHandler, HTTPHandlerView] { return views.MapFnOf(v.ж.Handlers, func(t *HTTPHandler) HTTPHandlerView { return t.View() diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 8dc4f1ca8..e44d0bbef 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -90,8 +90,12 @@ func (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v UserView) ID() UserID { return v.ж.ID } -func (v UserView) DisplayName() string { return v.ж.DisplayName } +func (v UserView) ID() UserID { return v.ж.ID } + +// if non-empty overrides Login field +func (v UserView) DisplayName() string { return v.ж.DisplayName } + +// if non-empty overrides Login field func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL } func (v UserView) Created() time.Time { return v.ж.Created } @@ -172,53 +176,202 @@ func (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v NodeView) ID() NodeID { return v.ж.ID } func (v NodeView) StableID() StableNodeID { return v.ж.StableID } -func (v NodeView) Name() string { return v.ж.Name } -func (v NodeView) User() UserID { return v.ж.User } -func (v NodeView) Sharer() UserID { return v.ж.Sharer } -func (v NodeView) Key() key.NodePublic { return v.ж.Key } -func (v NodeView) KeyExpiry() time.Time { return v.ж.KeyExpiry } + +// Name is the FQDN of the node. +// It is also the MagicDNS name for the node. +// It has a trailing dot. +// e.g. "host.tail-scale.ts.net." +func (v NodeView) Name() string { return v.ж.Name } + +// User is the user who created the node. If ACL tags are in use for the +// node then it doesn't reflect the ACL identity that the node is running +// as. +func (v NodeView) User() UserID { return v.ж.User } + +// Sharer, if non-zero, is the user who shared this node, if different than User. +func (v NodeView) Sharer() UserID { return v.ж.Sharer } +func (v NodeView) Key() key.NodePublic { return v.ж.Key } + +// the zero value if this node does not expire +func (v NodeView) KeyExpiry() time.Time { return v.ж.KeyExpiry } func (v NodeView) KeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.ж.KeySignature) } -func (v NodeView) Machine() key.MachinePublic { return v.ж.Machine } -func (v NodeView) DiscoKey() key.DiscoPublic { return v.ж.DiscoKey } -func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Addresses) } -func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AllowedIPs) } -func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) } -func (v NodeView) LegacyDERPString() string { return v.ж.LegacyDERPString } -func (v NodeView) HomeDERP() int { return v.ж.HomeDERP } -func (v NodeView) Hostinfo() HostinfoView { return v.ж.Hostinfo } -func (v NodeView) Created() time.Time { return v.ж.Created } -func (v NodeView) Cap() CapabilityVersion { return v.ж.Cap } -func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } +func (v NodeView) Machine() key.MachinePublic { return v.ж.Machine } +func (v NodeView) DiscoKey() key.DiscoPublic { return v.ж.DiscoKey } + +// Addresses are the IP addresses of this Node directly. +func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Addresses) } + +// AllowedIPs are the IP ranges to route to this node. +// +// As of CapabilityVersion 112, this may be nil (null or undefined) on the wire +// to mean the same as Addresses. Internally, it is always filled in with +// its possibly-implicit value. +func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AllowedIPs) } + +// IP+port (public via STUN, and local LANs) +func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) } + +// LegacyDERPString is this node's home LegacyDERPString region ID integer, but shoved into an +// IP:port string for legacy reasons. The IP address is always "127.3.3.40" +// (a loopback address (127) followed by the digits over the letters DERP on +// a QWERTY keyboard (3.3.40)). The "port number" is the home LegacyDERPString region ID +// integer. +// +// Deprecated: HomeDERP has replaced this, but old servers might still send +// this field. See tailscale/tailscale#14636. Do not use this field in code +// other than in the upgradeNode func, which canonicalizes it to HomeDERP +// if it arrives as a LegacyDERPString string on the wire. +func (v NodeView) LegacyDERPString() string { return v.ж.LegacyDERPString } + +// HomeDERP is the modern version of the DERP string field, with just an +// integer. The client advertises support for this as of capver 111. +// +// HomeDERP may be zero if not (yet) known, but ideally always be non-zero +// for magicsock connectivity to function normally. +func (v NodeView) HomeDERP() int { return v.ж.HomeDERP } +func (v NodeView) Hostinfo() HostinfoView { return v.ж.Hostinfo } +func (v NodeView) Created() time.Time { return v.ж.Created } + +// if non-zero, the node's capability version; old servers might not send +func (v NodeView) Cap() CapabilityVersion { return v.ж.Cap } + +// Tags are the list of ACL tags applied to this node. +// Tags take the form of `tag:` where value starts +// with a letter and only contains alphanumerics and dashes `-`. +// Some valid tag examples: +// +// `tag:prod` +// `tag:database` +// `tag:lab-1` +func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } + +// PrimaryRoutes are the routes from AllowedIPs that this node +// is currently the primary subnet router for, as determined +// by the control plane. It does not include the self address +// values from Addresses that are in AllowedIPs. func (v NodeView) PrimaryRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.PrimaryRoutes) } + +// LastSeen is when the node was last online. It is not +// updated when Online is true. It is nil if the current +// node doesn't have permission to know, or the node +// has never been online. func (v NodeView) LastSeen() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.LastSeen) } +// Online is whether the node is currently connected to the +// coordination server. A value of nil means unknown, or the +// current node doesn't have permission to know. func (v NodeView) Online() views.ValuePointer[bool] { return views.ValuePointerOf(v.ж.Online) } -func (v NodeView) MachineAuthorized() bool { return v.ж.MachineAuthorized } +// TODO(crawshaw): replace with MachineStatus +func (v NodeView) MachineAuthorized() bool { return v.ж.MachineAuthorized } + +// Capabilities are capabilities that the node has. +// They're free-form strings, but should be in the form of URLs/URIs +// such as: +// +// "https://tailscale.com/cap/is-admin" +// "https://tailscale.com/cap/file-sharing" +// +// Deprecated: use CapMap instead. See https://github.com/tailscale/tailscale/issues/11508 func (v NodeView) Capabilities() views.Slice[NodeCapability] { return views.SliceOf(v.ж.Capabilities) } +// CapMap is a map of capabilities to their optional argument/data values. +// +// It is valid for a capability to not have any argument/data values; such +// capabilities can be tested for using the HasCap method. These type of +// capabilities are used to indicate that a node has a capability, but there +// is no additional data associated with it. These were previously +// represented by the Capabilities field, but can now be represented by +// CapMap with an empty value. +// +// See NodeCapability for more information on keys. +// +// Metadata about nodes can be transmitted in 3 ways: +// 1. MapResponse.Node.CapMap describes attributes that affect behavior for +// this node, such as which features have been enabled through the admin +// panel and any associated configuration details. +// 2. MapResponse.PacketFilter(s) describes access (both IP and application +// based) that should be granted to peers. +// 3. MapResponse.Peers[].CapMap describes attributes regarding a peer node, +// such as which features the peer supports or if that peer is preferred +// for a particular task vs other peers that could also be chosen. func (v NodeView) CapMap() views.MapSlice[NodeCapability, RawMessage] { return views.MapSliceOf(v.ж.CapMap) } -func (v NodeView) UnsignedPeerAPIOnly() bool { return v.ж.UnsignedPeerAPIOnly } -func (v NodeView) ComputedName() string { return v.ж.ComputedName } + +// UnsignedPeerAPIOnly means that this node is not signed nor subject to TKA +// restrictions. However, in exchange for that privilege, it does not get +// network access. It can only access this node's peerapi, which may not let +// it do anything. It is the tailscaled client's job to double-check the +// MapResponse's PacketFilter to verify that its AllowedIPs will not be +// accepted by the packet filter. +func (v NodeView) UnsignedPeerAPIOnly() bool { return v.ж.UnsignedPeerAPIOnly } + +// MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) +func (v NodeView) ComputedName() string { return v.ж.ComputedName } + +// either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set func (v NodeView) ComputedNameWithHost() string { return v.ж.ComputedNameWithHost } -func (v NodeView) DataPlaneAuditLogID() string { return v.ж.DataPlaneAuditLogID } -func (v NodeView) Expired() bool { return v.ж.Expired } + +// DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging. +func (v NodeView) DataPlaneAuditLogID() string { return v.ж.DataPlaneAuditLogID } + +// Expired is whether this node's key has expired. Control may send +// this; clients are only allowed to set this from false to true. On +// the client, this is calculated client-side based on a timestamp sent +// from control, to avoid clock skew issues. +func (v NodeView) Expired() bool { return v.ж.Expired } + +// SelfNodeV4MasqAddrForThisPeer is the IPv4 that this peer knows the current node as. +// It may be empty if the peer knows the current node by its native +// IPv4 address. +// This field is only populated in a MapResponse for peers and not +// for the current node. +// +// If set, it should be used to masquerade traffic originating from the +// current node to this peer. The masquerade address is only relevant +// for this peer and not for other peers. +// +// This only applies to traffic originating from the current node to the +// peer or any of its subnets. Traffic originating from subnet routes will +// not be masqueraded (e.g. in case of --snat-subnet-routes). func (v NodeView) SelfNodeV4MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.SelfNodeV4MasqAddrForThisPeer) } +// SelfNodeV6MasqAddrForThisPeer is the IPv6 that this peer knows the current node as. +// It may be empty if the peer knows the current node by its native +// IPv6 address. +// This field is only populated in a MapResponse for peers and not +// for the current node. +// +// If set, it should be used to masquerade traffic originating from the +// current node to this peer. The masquerade address is only relevant +// for this peer and not for other peers. +// +// This only applies to traffic originating from the current node to the +// peer or any of its subnets. Traffic originating from subnet routes will +// not be masqueraded (e.g. in case of --snat-subnet-routes). func (v NodeView) SelfNodeV6MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.SelfNodeV6MasqAddrForThisPeer) } +// IsWireGuardOnly indicates that this is a non-Tailscale WireGuard peer, it +// is not expected to speak Disco or DERP, and it must have Endpoints in +// order to be reachable. func (v NodeView) IsWireGuardOnly() bool { return v.ж.IsWireGuardOnly } -func (v NodeView) IsJailed() bool { return v.ж.IsJailed } + +// IsJailed indicates that this node is jailed and should not be allowed +// initiate connections, however outbound connections to it should still be +// allowed. +func (v NodeView) IsJailed() bool { return v.ж.IsJailed } + +// ExitNodeDNSResolvers is the list of DNS servers that should be used when this +// node is marked IsWireGuardOnly and being used as an exit node. func (v NodeView) ExitNodeDNSResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.ExitNodeDNSResolvers) } @@ -331,47 +484,144 @@ func (v *HostinfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v HostinfoView) IPNVersion() string { return v.ж.IPNVersion } -func (v HostinfoView) FrontendLogID() string { return v.ж.FrontendLogID } -func (v HostinfoView) BackendLogID() string { return v.ж.BackendLogID } -func (v HostinfoView) OS() string { return v.ж.OS } -func (v HostinfoView) OSVersion() string { return v.ж.OSVersion } -func (v HostinfoView) Container() opt.Bool { return v.ж.Container } -func (v HostinfoView) Env() string { return v.ж.Env } -func (v HostinfoView) Distro() string { return v.ж.Distro } -func (v HostinfoView) DistroVersion() string { return v.ж.DistroVersion } -func (v HostinfoView) DistroCodeName() string { return v.ж.DistroCodeName } -func (v HostinfoView) App() string { return v.ж.App } -func (v HostinfoView) Desktop() opt.Bool { return v.ж.Desktop } -func (v HostinfoView) Package() string { return v.ж.Package } -func (v HostinfoView) DeviceModel() string { return v.ж.DeviceModel } -func (v HostinfoView) PushDeviceToken() string { return v.ж.PushDeviceToken } -func (v HostinfoView) Hostname() string { return v.ж.Hostname } -func (v HostinfoView) ShieldsUp() bool { return v.ж.ShieldsUp } -func (v HostinfoView) ShareeNode() bool { return v.ж.ShareeNode } -func (v HostinfoView) NoLogsNoSupport() bool { return v.ж.NoLogsNoSupport } -func (v HostinfoView) WireIngress() bool { return v.ж.WireIngress } -func (v HostinfoView) IngressEnabled() bool { return v.ж.IngressEnabled } -func (v HostinfoView) AllowsUpdate() bool { return v.ж.AllowsUpdate } -func (v HostinfoView) Machine() string { return v.ж.Machine } -func (v HostinfoView) GoArch() string { return v.ж.GoArch } -func (v HostinfoView) GoArchVar() string { return v.ж.GoArchVar } -func (v HostinfoView) GoVersion() string { return v.ж.GoVersion } +// version of this code (in version.Long format) +func (v HostinfoView) IPNVersion() string { return v.ж.IPNVersion } + +// logtail ID of frontend instance +func (v HostinfoView) FrontendLogID() string { return v.ж.FrontendLogID } + +// logtail ID of backend instance +func (v HostinfoView) BackendLogID() string { return v.ж.BackendLogID } + +// operating system the client runs on (a version.OS value) +func (v HostinfoView) OS() string { return v.ж.OS } + +// OSVersion is the version of the OS, if available. +// +// For Android, it's like "10", "11", "12", etc. For iOS and macOS it's like +// "15.6.1" or "12.4.0". For Windows it's like "10.0.19044.1889". For +// FreeBSD it's like "12.3-STABLE". +// +// For Linux, prior to Tailscale 1.32, we jammed a bunch of fields into this +// string on Linux, like "Debian 10.4; kernel=xxx; container; env=kn" and so +// on. As of Tailscale 1.32, this is simply the kernel version on Linux, like +// "5.10.0-17-amd64". +func (v HostinfoView) OSVersion() string { return v.ж.OSVersion } + +// best-effort whether the client is running in a container +func (v HostinfoView) Container() opt.Bool { return v.ж.Container } + +// a hostinfo.EnvType in string form +func (v HostinfoView) Env() string { return v.ж.Env } + +// "debian", "ubuntu", "nixos", ... +func (v HostinfoView) Distro() string { return v.ж.Distro } + +// "20.04", ... +func (v HostinfoView) DistroVersion() string { return v.ж.DistroVersion } + +// "jammy", "bullseye", ... +func (v HostinfoView) DistroCodeName() string { return v.ж.DistroCodeName } + +// App is used to disambiguate Tailscale clients that run using tsnet. +func (v HostinfoView) App() string { return v.ж.App } + +// if a desktop was detected on Linux +func (v HostinfoView) Desktop() opt.Bool { return v.ж.Desktop } + +// Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) +func (v HostinfoView) Package() string { return v.ж.Package } + +// mobile phone model ("Pixel 3a", "iPhone12,3") +func (v HostinfoView) DeviceModel() string { return v.ж.DeviceModel } + +// macOS/iOS APNs device token for notifications (and Android in the future) +func (v HostinfoView) PushDeviceToken() string { return v.ж.PushDeviceToken } + +// name of the host the client runs on +func (v HostinfoView) Hostname() string { return v.ж.Hostname } + +// indicates whether the host is blocking incoming connections +func (v HostinfoView) ShieldsUp() bool { return v.ж.ShieldsUp } + +// indicates this node exists in netmap because it's owned by a shared-to user +func (v HostinfoView) ShareeNode() bool { return v.ж.ShareeNode } + +// indicates that the user has opted out of sending logs and support +func (v HostinfoView) NoLogsNoSupport() bool { return v.ж.NoLogsNoSupport } + +// WireIngress indicates that the node would like to be wired up server-side +// (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently +// enabled. For example, the user might only use it for intermittent +// foreground CLI serve sessions, for which they'd like it to work right +// away, even if it's disabled most of the time. As an optimization, this is +// only sent if IngressEnabled is false, as IngressEnabled implies that this +// option is true. +func (v HostinfoView) WireIngress() bool { return v.ж.WireIngress } + +// if the node has any funnel endpoint enabled +func (v HostinfoView) IngressEnabled() bool { return v.ж.IngressEnabled } + +// indicates that the node has opted-in to admin-console-drive remote updates +func (v HostinfoView) AllowsUpdate() bool { return v.ж.AllowsUpdate } + +// the current host's machine type (uname -m) +func (v HostinfoView) Machine() string { return v.ж.Machine } + +// GOARCH value (of the built binary) +func (v HostinfoView) GoArch() string { return v.ж.GoArch } + +// GOARM, GOAMD64, etc (of the built binary) +func (v HostinfoView) GoArchVar() string { return v.ж.GoArchVar } + +// Go version binary was built with +func (v HostinfoView) GoVersion() string { return v.ж.GoVersion } + +// set of IP ranges this client can route func (v HostinfoView) RoutableIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.RoutableIPs) } -func (v HostinfoView) RequestTags() views.Slice[string] { return views.SliceOf(v.ж.RequestTags) } -func (v HostinfoView) WoLMACs() views.Slice[string] { return views.SliceOf(v.ж.WoLMACs) } -func (v HostinfoView) Services() views.Slice[Service] { return views.SliceOf(v.ж.Services) } -func (v HostinfoView) NetInfo() NetInfoView { return v.ж.NetInfo.View() } -func (v HostinfoView) SSH_HostKeys() views.Slice[string] { return views.SliceOf(v.ж.SSH_HostKeys) } -func (v HostinfoView) Cloud() string { return v.ж.Cloud } -func (v HostinfoView) Userspace() opt.Bool { return v.ж.Userspace } -func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } -func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } -func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } -func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } -func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } -func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } +// set of ACL tags this node wants to claim +func (v HostinfoView) RequestTags() views.Slice[string] { return views.SliceOf(v.ж.RequestTags) } + +// MAC address(es) to send Wake-on-LAN packets to wake this node (lowercase hex w/ colons) +func (v HostinfoView) WoLMACs() views.Slice[string] { return views.SliceOf(v.ж.WoLMACs) } + +// services advertised by this machine +func (v HostinfoView) Services() views.Slice[Service] { return views.SliceOf(v.ж.Services) } +func (v HostinfoView) NetInfo() NetInfoView { return v.ж.NetInfo.View() } + +// if advertised +func (v HostinfoView) SSH_HostKeys() views.Slice[string] { return views.SliceOf(v.ж.SSH_HostKeys) } +func (v HostinfoView) Cloud() string { return v.ж.Cloud } + +// if the client is running in userspace (netstack) mode +func (v HostinfoView) Userspace() opt.Bool { return v.ж.Userspace } + +// if the client's subnet router is running in userspace (netstack) mode +func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } + +// if the client is running the app-connector service +func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } + +// opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n +func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } + +// the client’s selected exit node, empty when unselected. +func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } + +// Location represents geographical location data about a +// Tailscale host. Location is optional and only set if +// explicitly declared by a node. +func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } + +// TPM device metadata, if available +func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } + +// StateEncrypted reports whether the node state is stored encrypted on +// disk. The actual mechanism is platform-specific: +// - Apple nodes use the Keychain +// - Linux and Windows nodes use the TPM +// - Android apps use EncryptedSharedPreferences func (v HostinfoView) StateEncrypted() opt.Bool { return v.ж.StateEncrypted } func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } @@ -487,22 +737,74 @@ func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// MappingVariesByDestIP says whether the host's NAT mappings +// vary based on the destination IP. func (v NetInfoView) MappingVariesByDestIP() opt.Bool { return v.ж.MappingVariesByDestIP } -func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning } -func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 } -func (v NetInfoView) OSHasIPv6() opt.Bool { return v.ж.OSHasIPv6 } -func (v NetInfoView) WorkingUDP() opt.Bool { return v.ж.WorkingUDP } -func (v NetInfoView) WorkingICMPv4() opt.Bool { return v.ж.WorkingICMPv4 } -func (v NetInfoView) HavePortMap() bool { return v.ж.HavePortMap } -func (v NetInfoView) UPnP() opt.Bool { return v.ж.UPnP } -func (v NetInfoView) PMP() opt.Bool { return v.ж.PMP } -func (v NetInfoView) PCP() opt.Bool { return v.ж.PCP } -func (v NetInfoView) PreferredDERP() int { return v.ж.PreferredDERP } -func (v NetInfoView) LinkType() string { return v.ж.LinkType } +// HairPinning is their router does hairpinning. +// It reports true even if there's no NAT involved. +func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning } + +// WorkingIPv6 is whether the host has IPv6 internet connectivity. +func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 } + +// OSHasIPv6 is whether the OS supports IPv6 at all, regardless of +// whether IPv6 internet connectivity is available. +func (v NetInfoView) OSHasIPv6() opt.Bool { return v.ж.OSHasIPv6 } + +// WorkingUDP is whether the host has UDP internet connectivity. +func (v NetInfoView) WorkingUDP() opt.Bool { return v.ж.WorkingUDP } + +// WorkingICMPv4 is whether ICMPv4 works. +// Empty means not checked. +func (v NetInfoView) WorkingICMPv4() opt.Bool { return v.ж.WorkingICMPv4 } + +// HavePortMap is whether we have an existing portmap open +// (UPnP, PMP, or PCP). +func (v NetInfoView) HavePortMap() bool { return v.ж.HavePortMap } + +// UPnP is whether UPnP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) UPnP() opt.Bool { return v.ж.UPnP } + +// PMP is whether NAT-PMP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) PMP() opt.Bool { return v.ж.PMP } + +// PCP is whether PCP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) PCP() opt.Bool { return v.ж.PCP } + +// PreferredDERP is this node's preferred (home) DERP region ID. +// This is where the node expects to be contacted to begin a +// peer-to-peer connection. The node might be be temporarily +// connected to multiple DERP servers (to speak to other nodes +// that are located elsewhere) but PreferredDERP is the region ID +// that the node subscribes to traffic at. +// Zero means disconnected or unknown. +func (v NetInfoView) PreferredDERP() int { return v.ж.PreferredDERP } + +// LinkType is the current link type, if known. +func (v NetInfoView) LinkType() string { return v.ж.LinkType } + +// DERPLatency is the fastest recent time to reach various +// DERP STUN servers, in seconds. The map key is the +// "regionID-v4" or "-v6"; it was previously the DERP server's +// STUN host:port. +// +// This should only be updated rarely, or when there's a +// material change, as any change here also gets uploaded to +// the control plane. func (v NetInfoView) DERPLatency() views.Map[string, float64] { return views.MapOf(v.ж.DERPLatency) } -func (v NetInfoView) FirewallMode() string { return v.ж.FirewallMode } -func (v NetInfoView) String() string { return v.ж.String() } + +// FirewallMode encodes both which firewall mode was selected and why. +// It is Linux-specific (at least as of 2023-08-19) and is meant to help +// debug iptables-vs-nftables issues. The string is of the form +// "{nft,ift}-REASON", like "nft-forced" or "ipt-default". Empty means +// either not Linux or a configuration in which the host firewall rules +// are not managed by tailscaled. +func (v NetInfoView) FirewallMode() string { return v.ж.FirewallMode } +func (v NetInfoView) String() string { return v.ж.String() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoViewNeedsRegeneration = NetInfo(struct { @@ -589,10 +891,19 @@ func (v *LoginView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v LoginView) ID() LoginID { return v.ж.ID } -func (v LoginView) Provider() string { return v.ж.Provider } -func (v LoginView) LoginName() string { return v.ж.LoginName } -func (v LoginView) DisplayName() string { return v.ж.DisplayName } +// unused in the Tailscale client +func (v LoginView) ID() LoginID { return v.ж.ID } + +// "google", "github", "okta_foo", etc. +func (v LoginView) Provider() string { return v.ж.Provider } + +// an email address or "email-ish" string (like alice@github) +func (v LoginView) LoginName() string { return v.ж.LoginName } + +// from the IdP +func (v LoginView) DisplayName() string { return v.ж.DisplayName } + +// from the IdP func (v LoginView) ProfilePicURL() string { return v.ж.ProfilePicURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -672,26 +983,82 @@ func (v *DNSConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Resolvers are the DNS resolvers to use, in order of preference. func (v DNSConfigView) Resolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.Resolvers) } +// Routes maps DNS name suffixes to a set of DNS resolvers to +// use. It is used to implement "split DNS" and other advanced DNS +// routing overlays. +// +// Map keys are fully-qualified DNS name suffixes; they may +// optionally contain a trailing dot but no leading dot. +// +// If the value is an empty slice, that means the suffix should still +// be handled by Tailscale's built-in resolver (100.100.100.100), such +// as for the purpose of handling ExtraRecords. func (v DNSConfigView) Routes() views.MapFn[string, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { return views.MapFnOf(v.ж.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) }) } + +// FallbackResolvers is like Resolvers, but is only used if a +// split DNS configuration is requested in a configuration that +// doesn't work yet without explicit default resolvers. +// https://github.com/tailscale/tailscale/issues/1743 func (v DNSConfigView) FallbackResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.FallbackResolvers) } -func (v DNSConfigView) Domains() views.Slice[string] { return views.SliceOf(v.ж.Domains) } -func (v DNSConfigView) Proxied() bool { return v.ж.Proxied } + +// Domains are the search domains to use. +// Search domains must be FQDNs, but *without* the trailing dot. +func (v DNSConfigView) Domains() views.Slice[string] { return views.SliceOf(v.ж.Domains) } + +// Proxied turns on automatic resolution of hostnames for devices +// in the network map, aka MagicDNS. +// Despite the (legacy) name, does not necessarily cause request +// proxying to be enabled. +func (v DNSConfigView) Proxied() bool { return v.ж.Proxied } + +// Nameservers are the IP addresses of the global nameservers to use. +// +// Deprecated: this is only set and used by MapRequest.Version >=9 and <14. Use Resolvers instead. func (v DNSConfigView) Nameservers() views.Slice[netip.Addr] { return views.SliceOf(v.ж.Nameservers) } -func (v DNSConfigView) CertDomains() views.Slice[string] { return views.SliceOf(v.ж.CertDomains) } + +// CertDomains are the set of DNS names for which the control +// plane server will assist with provisioning TLS +// certificates. See SetDNSRequest, which can be used to +// answer dns-01 ACME challenges for e.g. LetsEncrypt. +// These names are FQDNs without trailing periods, and without +// any "_acme-challenge." prefix. +func (v DNSConfigView) CertDomains() views.Slice[string] { return views.SliceOf(v.ж.CertDomains) } + +// ExtraRecords contains extra DNS records to add to the +// MagicDNS config. func (v DNSConfigView) ExtraRecords() views.Slice[DNSRecord] { return views.SliceOf(v.ж.ExtraRecords) } + +// ExitNodeFilteredSuffixes are the DNS suffixes that the +// node, when being an exit node DNS proxy, should not answer. +// +// The entries do not contain trailing periods and are always +// all lowercase. +// +// If an entry starts with a period, it's a suffix match (but +// suffix ".a.b" doesn't match "a.b"; a prefix is required). +// +// If an entry does not start with a period, it's an exact +// match. +// +// Matches are case insensitive. func (v DNSConfigView) ExitNodeFilteredSet() views.Slice[string] { return views.SliceOf(v.ж.ExitNodeFilteredSet) } + +// TempCorpIssue13969 is a temporary (2023-08-16) field for an internal hack day prototype. +// It contains a user inputed URL that should have a list of domains to be blocked. +// See https://github.com/tailscale/corp/issues/13969. func (v DNSConfigView) TempCorpIssue13969() string { return v.ж.TempCorpIssue13969 } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -775,14 +1142,26 @@ func (v *RegisterResponseView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v RegisterResponseView) User() User { return v.ж.User } -func (v RegisterResponseView) Login() Login { return v.ж.Login } -func (v RegisterResponseView) NodeKeyExpired() bool { return v.ж.NodeKeyExpired } +func (v RegisterResponseView) User() User { return v.ж.User } +func (v RegisterResponseView) Login() Login { return v.ж.Login } + +// if true, the NodeKey needs to be replaced +func (v RegisterResponseView) NodeKeyExpired() bool { return v.ж.NodeKeyExpired } + +// TODO(crawshaw): move to using MachineStatus func (v RegisterResponseView) MachineAuthorized() bool { return v.ж.MachineAuthorized } -func (v RegisterResponseView) AuthURL() string { return v.ж.AuthURL } + +// if set, authorization pending +func (v RegisterResponseView) AuthURL() string { return v.ж.AuthURL } + +// If set, this is the current node-key signature that needs to be +// re-signed for the node's new node-key. func (v RegisterResponseView) NodeKeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.ж.NodeKeySignature) } + +// Error indicates that authorization failed. If this is non-empty, +// other status fields should be ignored. func (v RegisterResponseView) Error() string { return v.ж.Error } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -863,6 +1242,7 @@ func (v *RegisterResponseAuthView) UnmarshalJSONFrom(dec *jsontext.Decoder) erro return nil } +// used by pre-1.66 Android only func (v RegisterResponseAuthView) Oauth2Token() views.ValuePointer[Oauth2Token] { return views.ValuePointerOf(v.ж.Oauth2Token) } @@ -943,29 +1323,69 @@ func (v *RegisterRequestView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Version is the client's capabilities when using the Noise +// transport. +// +// When using the original nacl crypto_box transport, the +// value must be 1. func (v RegisterRequestView) Version() CapabilityVersion { return v.ж.Version } func (v RegisterRequestView) NodeKey() key.NodePublic { return v.ж.NodeKey } func (v RegisterRequestView) OldNodeKey() key.NodePublic { return v.ж.OldNodeKey } func (v RegisterRequestView) NLKey() key.NLPublic { return v.ж.NLKey } func (v RegisterRequestView) Auth() RegisterResponseAuthView { return v.ж.Auth.View() } -func (v RegisterRequestView) Expiry() time.Time { return v.ж.Expiry } -func (v RegisterRequestView) Followup() string { return v.ж.Followup } -func (v RegisterRequestView) Hostinfo() HostinfoView { return v.ж.Hostinfo.View() } -func (v RegisterRequestView) Ephemeral() bool { return v.ж.Ephemeral } + +// Expiry optionally specifies the requested key expiry. +// The server policy may override. +// As a special case, if Expiry is in the past and NodeKey is +// the node's current key, the key is expired. +func (v RegisterRequestView) Expiry() time.Time { return v.ж.Expiry } + +// response waits until AuthURL is visited +func (v RegisterRequestView) Followup() string { return v.ж.Followup } +func (v RegisterRequestView) Hostinfo() HostinfoView { return v.ж.Hostinfo.View() } + +// Ephemeral is whether the client is requesting that this +// node be considered ephemeral and be automatically deleted +// when it stops being active. +func (v RegisterRequestView) Ephemeral() bool { return v.ж.Ephemeral } + +// NodeKeySignature is the node's own node-key signature, re-signed +// for its new node key using its network-lock key. +// +// This field is set when the client retries registration after learning +// its NodeKeySignature (which is in need of rotation). func (v RegisterRequestView) NodeKeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.ж.NodeKeySignature) } + +// The following fields are not used for SignatureNone and are required for +// SignatureV1: func (v RegisterRequestView) SignatureType() SignatureType { return v.ж.SignatureType } + +// creation time of request to prevent replay func (v RegisterRequestView) Timestamp() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.Timestamp) } +// X.509 certificate for client device func (v RegisterRequestView) DeviceCert() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.DeviceCert) } + +// as described by SignatureType func (v RegisterRequestView) Signature() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Signature) } + +// Tailnet is an optional identifier specifying the name of the recommended or required +// network that the node should join. Its exact form should not be depended on; new +// forms are coming later. The identifier is generally a domain name (for an organization) +// or e-mail address (for a personal account on a shared e-mail provider). It is the same name +// used by the API, as described in /api.md#tailnet. +// If Tailnet begins with the prefix "required:" then the server should prevent logging in to a different +// network than the one specified. Otherwise, the server should recommend the specified network +// but still permit logging in to other networks. +// If empty, no recommendation is offered to the server and the login page should show all options. func (v RegisterRequestView) Tailnet() string { return v.ж.Tailnet } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -1055,6 +1475,19 @@ func (v *DERPHomeParamsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// RegionScore scales latencies of DERP regions by a given scaling +// factor when determining which region to use as the home +// ("preferred") DERP. Scores in the range (0, 1) will cause this +// region to be proportionally more preferred, and scores in the range +// (1, ∞) will penalize a region. +// +// If a region is not present in this map, it is treated as having a +// score of 1.0. +// +// Scores should not be 0 or negative; such scores will be ignored. +// +// A nil map means no change from the previous value (if any); an empty +// non-nil map can be sent to reset all scores back to 1.0. func (v DERPHomeParamsView) RegionScore() views.Map[int, float64] { return views.MapOf(v.ж.RegionScore) } @@ -1131,13 +1564,71 @@ func (v *DERPRegionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v DERPRegionView) RegionID() int { return v.ж.RegionID } -func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } -func (v DERPRegionView) RegionName() string { return v.ж.RegionName } -func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } -func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } -func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } +// RegionID is a unique integer for a geographic region. +// +// It corresponds to the legacy derpN.tailscale.com hostnames +// used by older clients. (Older clients will continue to resolve +// derpN.tailscale.com when contacting peers, rather than use +// the server-provided DERPMap) +// +// RegionIDs must be non-zero, positive, and guaranteed to fit +// in a JavaScript number. +// +// RegionIDs in range 900-999 are reserved for end users to run their +// own DERP nodes. +func (v DERPRegionView) RegionID() int { return v.ж.RegionID } + +// RegionCode is a short name for the region. It's usually a popular +// city or airport code in the region: "nyc", "sf", "sin", +// "fra", etc. +func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } + +// RegionName is a long English name for the region: "New York City", +// "San Francisco", "Singapore", "Frankfurt", etc. +func (v DERPRegionView) RegionName() string { return v.ж.RegionName } + +// Latitude, Longitude are optional geographical coordinates of the DERP region's city, in degrees. +func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } +func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } + +// Avoid is whether the client should avoid picking this as its home region. +// The region should only be used if a peer is there. Clients already using +// this region as their home should migrate away to a new region without +// Avoid set. +// +// Deprecated: because of bugs in past implementations combined with unclear +// docs that caused people to think the bugs were intentional, this field is +// deprecated. It was never supposed to cause STUN/DERP measurement probes, +// but due to bugs, it sometimes did. And then some parts of the code began +// to rely on that property. But then we were unable to use this field for +// its original purpose, nor its later imagined purpose, because various +// parts of the codebase thought it meant one thing and others thought it +// meant another. But it did something in the middle instead. So we're retiring +// it. Use NoMeasureNoHome instead. +func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } + +// NoMeasureNoHome says that this regions should not be measured for its +// latency distance (STUN, HTTPS, etc) or availability (e.g. captive portal +// checks) and should never be selected as the node's home region. However, +// if a peer declares this region as its home, then this client is allowed +// to connect to it for the purpose of communicating with that peer. +// +// This is what the now deprecated Avoid bool was supposed to mean +// originally but had implementation bugs and documentation omissions. func (v DERPRegionView) NoMeasureNoHome() bool { return v.ж.NoMeasureNoHome } + +// Nodes are the DERP nodes running in this region, in +// priority order for the current client. Client TLS +// connections should ideally only go to the first entry +// (falling back to the second if necessary). STUN packets +// should go to the first 1 or 2. +// +// If nodes within a region route packets amongst themselves, +// but not to other regions. That said, each user/domain +// should get a the same preferred node order, so if all nodes +// for a user/network pick the first one (as they should, when +// things are healthy), the inter-cluster routing is minimal +// to zero. func (v DERPRegionView) Nodes() views.SliceView[*DERPNode, DERPNodeView] { return views.SliceOfViews[*DERPNode, DERPNodeView](v.ж.Nodes) } @@ -1221,13 +1712,26 @@ func (v *DERPMapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// HomeParams, if non-nil, is a change in home parameters. +// +// The rest of the DEPRMap fields, if zero, means unchanged. func (v DERPMapView) HomeParams() DERPHomeParamsView { return v.ж.HomeParams.View() } +// Regions is the set of geographic regions running DERP node(s). +// +// It's keyed by the DERPRegion.RegionID. +// +// The numbers are not necessarily contiguous. func (v DERPMapView) Regions() views.MapFn[int, *DERPRegion, DERPRegionView] { return views.MapFnOf(v.ж.Regions, func(t *DERPRegion) DERPRegionView { return t.View() }) } + +// OmitDefaultRegions specifies to not use Tailscale's DERP servers, and only use those +// specified in this DERPMap. If there are none set outside of the defaults, this is a noop. +// +// This field is only meaningful if the Regions map is non-nil (indicating a change). func (v DERPMapView) OmitDefaultRegions() bool { return v.ж.OmitDefaultRegions } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -1304,18 +1808,74 @@ func (v *DERPNodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v DERPNodeView) Name() string { return v.ж.Name } -func (v DERPNodeView) RegionID() int { return v.ж.RegionID } -func (v DERPNodeView) HostName() string { return v.ж.HostName } -func (v DERPNodeView) CertName() string { return v.ж.CertName } -func (v DERPNodeView) IPv4() string { return v.ж.IPv4 } -func (v DERPNodeView) IPv6() string { return v.ж.IPv6 } -func (v DERPNodeView) STUNPort() int { return v.ж.STUNPort } -func (v DERPNodeView) STUNOnly() bool { return v.ж.STUNOnly } -func (v DERPNodeView) DERPPort() int { return v.ж.DERPPort } +// Name is a unique node name (across all regions). +// It is not a host name. +// It's typically of the form "1b", "2a", "3b", etc. (region +// ID + suffix within that region) +func (v DERPNodeView) Name() string { return v.ж.Name } + +// RegionID is the RegionID of the DERPRegion that this node +// is running in. +func (v DERPNodeView) RegionID() int { return v.ж.RegionID } + +// HostName is the DERP node's hostname. +// +// It is required but need not be unique; multiple nodes may +// have the same HostName but vary in configuration otherwise. +func (v DERPNodeView) HostName() string { return v.ж.HostName } + +// CertName optionally specifies the expected TLS cert common +// name. If empty, HostName is used. If CertName is non-empty, +// HostName is only used for the TCP dial (if IPv4/IPv6 are +// not present) + TLS ClientHello. +// +// As a special case, if CertName starts with "sha256-raw:", +// then the rest of the string is a hex-encoded SHA256 of the +// cert to expect. This is used for self-signed certs. +// In this case, the HostName field will typically be an IP +// address literal. +func (v DERPNodeView) CertName() string { return v.ж.CertName } + +// IPv4 optionally forces an IPv4 address to use, instead of using DNS. +// If empty, A record(s) from DNS lookups of HostName are used. +// If the string is not an IPv4 address, IPv4 is not used; the +// conventional string to disable IPv4 (and not use DNS) is +// "none". +func (v DERPNodeView) IPv4() string { return v.ж.IPv4 } + +// IPv6 optionally forces an IPv6 address to use, instead of using DNS. +// If empty, AAAA record(s) from DNS lookups of HostName are used. +// If the string is not an IPv6 address, IPv6 is not used; the +// conventional string to disable IPv6 (and not use DNS) is +// "none". +func (v DERPNodeView) IPv6() string { return v.ж.IPv6 } + +// Port optionally specifies a STUN port to use. +// Zero means 3478. +// To disable STUN on this node, use -1. +func (v DERPNodeView) STUNPort() int { return v.ж.STUNPort } + +// STUNOnly marks a node as only a STUN server and not a DERP +// server. +func (v DERPNodeView) STUNOnly() bool { return v.ж.STUNOnly } + +// DERPPort optionally provides an alternate TLS port number +// for the DERP HTTPS server. +// +// If zero, 443 is used. +func (v DERPNodeView) DERPPort() int { return v.ж.DERPPort } + +// InsecureForTests is used by unit tests to disable TLS verification. +// It should not be set by users. func (v DERPNodeView) InsecureForTests() bool { return v.ж.InsecureForTests } -func (v DERPNodeView) STUNTestIP() string { return v.ж.STUNTestIP } -func (v DERPNodeView) CanPort80() bool { return v.ж.CanPort80 } + +// STUNTestIP is used in tests to override the STUN server's IP. +// If empty, it's assumed to be the same as the DERP server. +func (v DERPNodeView) STUNTestIP() string { return v.ж.STUNTestIP } + +// CanPort80 specifies whether this DERP node is accessible over HTTP +// on port 80 specifically. This is used for captive portal checks. +func (v DERPNodeView) CanPort80() bool { return v.ж.CanPort80 } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPNodeViewNeedsRegeneration = DERPNode(struct { @@ -1400,17 +1960,49 @@ func (v *SSHRuleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// RuleExpires, if non-nil, is when this rule expires. +// +// For example, a (principal,sshuser) tuple might be granted +// prompt-free SSH access for N minutes, so this rule would be +// before a expiration-free rule for the same principal that +// required an auth prompt. This permits the control plane to +// be out of the path for already-authorized SSH pairs. +// +// Once a rule matches, the lifetime of any accepting connection +// is subject to the SSHAction.SessionExpires time, if any. func (v SSHRuleView) RuleExpires() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.RuleExpires) } +// Principals matches an incoming connection. If the connection +// matches anything in this list and also matches SSHUsers, +// then Action is applied. func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalView] { return views.SliceOfViews[*SSHPrincipal, SSHPrincipalView](v.ж.Principals) } +// SSHUsers are the SSH users that this rule matches. It is a +// map from either ssh-user|"*" => local-user. The map must +// contain a key for either ssh-user or, as a fallback, "*" to +// match anything. If it does, the map entry's value is the +// actual user that's logged in. +// If the map value is the empty string (for either the +// requested SSH user or "*"), the rule doesn't match. +// If the map value is "=", it means the ssh-user should map +// directly to the local-user. +// It may be nil if the Action is reject. func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.ж.SSHUsers) } -func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() } -func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.ж.AcceptEnv) } + +// Action is the outcome to task. +// A nil or invalid action means to deny. +func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() } + +// AcceptEnv is a slice of environment variable names that are allowlisted +// for the SSH rule in the policy file. +// +// AcceptEnv values may contain * and ? wildcard characters which match against +// an arbitrary number of characters or a single character respectively. +func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.ж.AcceptEnv) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _SSHRuleViewNeedsRegeneration = SSHRule(struct { @@ -1488,15 +2080,61 @@ func (v *SSHActionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v SSHActionView) Message() string { return v.ж.Message } -func (v SSHActionView) Reject() bool { return v.ж.Reject } -func (v SSHActionView) Accept() bool { return v.ж.Accept } -func (v SSHActionView) SessionDuration() time.Duration { return v.ж.SessionDuration } -func (v SSHActionView) AllowAgentForwarding() bool { return v.ж.AllowAgentForwarding } -func (v SSHActionView) HoldAndDelegate() string { return v.ж.HoldAndDelegate } -func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding } -func (v SSHActionView) AllowRemotePortForwarding() bool { return v.ж.AllowRemotePortForwarding } +// Message, if non-empty, is shown to the user before the +// action occurs. +func (v SSHActionView) Message() string { return v.ж.Message } + +// Reject, if true, terminates the connection. This action +// has higher priority that Accept, if given. +// The reason this is exists is primarily so a response +// from HoldAndDelegate has a way to stop the poll. +func (v SSHActionView) Reject() bool { return v.ж.Reject } + +// Accept, if true, accepts the connection immediately +// without further prompts. +func (v SSHActionView) Accept() bool { return v.ж.Accept } + +// SessionDuration, if non-zero, is how long the session can stay open +// before being forcefully terminated. +func (v SSHActionView) SessionDuration() time.Duration { return v.ж.SessionDuration } + +// AllowAgentForwarding, if true, allows accepted connections to forward +// the ssh agent if requested. +func (v SSHActionView) AllowAgentForwarding() bool { return v.ж.AllowAgentForwarding } + +// HoldAndDelegate, if non-empty, is a URL that serves an +// outcome verdict. The connection will be accepted and will +// block until the provided long-polling URL serves a new +// SSHAction JSON value. The URL must be fetched using the +// Noise transport (in package control/control{base,http}). +// If the long poll breaks before returning a complete HTTP +// response, it should be re-fetched as long as the SSH +// session is open. +// +// The following variables in the URL are expanded by tailscaled: +// +// - $SRC_NODE_IP (URL escaped) +// - $SRC_NODE_ID (Node.ID as int64 string) +// - $DST_NODE_IP (URL escaped) +// - $DST_NODE_ID (Node.ID as int64 string) +// - $SSH_USER (URL escaped, ssh user requested) +// - $LOCAL_USER (URL escaped, local user mapped) +func (v SSHActionView) HoldAndDelegate() string { return v.ж.HoldAndDelegate } + +// AllowLocalPortForwarding, if true, allows accepted connections +// to use local port forwarding if requested. +func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding } + +// AllowRemotePortForwarding, if true, allows accepted connections +// to use remote port forwarding if requested. +func (v SSHActionView) AllowRemotePortForwarding() bool { return v.ж.AllowRemotePortForwarding } + +// Recorders defines the destinations of the SSH session recorders. +// The recording will be uploaded to http://addr:port/record. func (v SSHActionView) Recorders() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Recorders) } + +// OnRecorderFailure is the action to take if recording fails. +// If nil, the default action is to fail open. func (v SSHActionView) OnRecordingFailure() views.ValuePointer[SSHRecorderFailureAction] { return views.ValuePointerOf(v.ж.OnRecordingFailure) } @@ -1584,8 +2222,19 @@ func (v *SSHPrincipalView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v SSHPrincipalView) Node() StableNodeID { return v.ж.Node } func (v SSHPrincipalView) NodeIP() string { return v.ж.NodeIP } -func (v SSHPrincipalView) UserLogin() string { return v.ж.UserLogin } -func (v SSHPrincipalView) Any() bool { return v.ж.Any } + +// email-ish: foo@example.com, bar@github +func (v SSHPrincipalView) UserLogin() string { return v.ж.UserLogin } + +// if true, match any connection +func (v SSHPrincipalView) Any() bool { return v.ж.Any } + +// UnusedPubKeys was public key support. It never became an official product +// feature and so as of 2024-12-12 is being removed. +// This stub exists to remind us not to re-use the JSON field name "pubKeys" +// in the future if we bring it back with different semantics. +// +// Deprecated: do not use. It does nothing. func (v SSHPrincipalView) UnusedPubKeys() views.Slice[string] { return views.SliceOf(v.ж.UnusedPubKeys) } @@ -1666,6 +2315,7 @@ func (v *ControlDialPlanView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// An empty list means the default: use DNS (unspecified which DNS). func (v ControlDialPlanView) Candidates() views.Slice[ControlIPCandidate] { return views.SliceOf(v.ж.Candidates) } @@ -1742,13 +2392,35 @@ func (v *LocationView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v LocationView) Country() string { return v.ж.Country } +// User friendly country name, with proper capitalization ("Canada") +func (v LocationView) Country() string { return v.ж.Country } + +// ISO 3166-1 alpha-2 in upper case ("CA") func (v LocationView) CountryCode() string { return v.ж.CountryCode } -func (v LocationView) City() string { return v.ж.City } -func (v LocationView) CityCode() string { return v.ж.CityCode } -func (v LocationView) Latitude() float64 { return v.ж.Latitude } -func (v LocationView) Longitude() float64 { return v.ж.Longitude } -func (v LocationView) Priority() int { return v.ж.Priority } + +// User friendly city name, with proper capitalization ("Squamish") +func (v LocationView) City() string { return v.ж.City } + +// CityCode is a short code representing the city in upper case. +// CityCode is used to disambiguate a city from another location +// with the same city name. It uniquely identifies a particular +// geographical location, within the tailnet. +// IATA, ICAO or ISO 3166-2 codes are recommended ("YSE") +func (v LocationView) CityCode() string { return v.ж.CityCode } + +// Latitude, Longitude are optional geographical coordinates of the node, in degrees. +// No particular accuracy level is promised; the coordinates may simply be the center of the city or country. +func (v LocationView) Latitude() float64 { return v.ж.Latitude } +func (v LocationView) Longitude() float64 { return v.ж.Longitude } + +// Priority determines the order of use of an exit node when a +// location based preference matches more than one exit node, +// the node with the highest priority wins. Nodes of equal +// probability may be selected arbitrarily. +// +// A value of 0 means the exit node does not have a priority +// preference. A negative int is not allowed. +func (v LocationView) Priority() int { return v.ж.Priority } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _LocationViewNeedsRegeneration = Location(struct { @@ -1828,8 +2500,12 @@ func (v *UserProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v UserProfileView) ID() UserID { return v.ж.ID } -func (v UserProfileView) LoginName() string { return v.ж.LoginName } +func (v UserProfileView) ID() UserID { return v.ж.ID } + +// "alice@smith.com"; for display purposes only (provider is not listed) +func (v UserProfileView) LoginName() string { return v.ж.LoginName } + +// "Alice Smith" func (v UserProfileView) DisplayName() string { return v.ж.DisplayName } func (v UserProfileView) ProfilePicURL() string { return v.ж.ProfilePicURL } func (v UserProfileView) Equal(v2 UserProfileView) bool { return v.ж.Equal(v2.ж) } @@ -1909,9 +2585,18 @@ func (v *VIPServiceView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v VIPServiceView) Name() ServiceName { return v.ж.Name } +// Name is the name of the service. The Name uniquely identifies a service +// on a particular tailnet, and so also corresponds uniquely to the pair of +// IP addresses belonging to the VIP service. +func (v VIPServiceView) Name() ServiceName { return v.ж.Name } + +// Ports specify which ProtoPorts are made available by this node +// on the service's IPs. func (v VIPServiceView) Ports() views.Slice[ProtoPortRange] { return views.SliceOf(v.ж.Ports) } -func (v VIPServiceView) Active() bool { return v.ж.Active } + +// Active specifies whether new requests for the service should be +// sent to this node by control. +func (v VIPServiceView) Active() bool { return v.ж.Active } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _VIPServiceViewNeedsRegeneration = VIPService(struct { diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index 0704670a2..a983864d0 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -84,10 +84,35 @@ func (v *ResolverView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Addr is the address of the DNS resolver, one of: +// - A plain IP address for a "classic" UDP+TCP DNS resolver. +// This is the common format as sent by the control plane. +// - An IP:port, for tests. +// - "https://resolver.com/path" for DNS over HTTPS; currently +// as of 2022-09-08 only used for certain well-known resolvers +// (see the publicdns package) for which the IP addresses to dial DoH are +// known ahead of time, so bootstrap DNS resolution is not required. +// - "http://node-address:port/path" for DNS over HTTP over WireGuard. This +// is implemented in the PeerAPI for exit nodes and app connectors. +// - [TODO] "tls://resolver.com" for DNS over TCP+TLS func (v ResolverView) Addr() string { return v.ж.Addr } + +// BootstrapResolution is an optional suggested resolution for the +// DoT/DoH resolver, if the resolver URL does not reference an IP +// address directly. +// BootstrapResolution may be empty, in which case clients should +// look up the DoT/DoH server using their local "classic" DNS +// resolver. +// +// As of 2022-09-08, BootstrapResolution is not yet used. func (v ResolverView) BootstrapResolution() views.Slice[netip.Addr] { return views.SliceOf(v.ж.BootstrapResolution) } + +// UseWithExitNode designates that this resolver should continue to be used when an +// exit node is in use. Normally, DNS resolution is delegated to the exit node but +// there are situations where it is preferable to still use a Split DNS server and/or +// global DNS server instead of the exit node. func (v ResolverView) UseWithExitNode() bool { return v.ж.UseWithExitNode } func (v ResolverView) Equal(v2 ResolverView) bool { return v.ж.Equal(v2.ж) } diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 99a86a6a5..7d1507468 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -86,11 +86,18 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } +func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } + +// needed to request key rotation func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } + +// DisallowedTKAStateIDs stores the tka.State.StateID values which +// this node will not operate network lock on. This is used to +// prevent bootstrapping TKA onto a key authority which was forcibly +// disabled. func (v PersistView) DisallowedTKAStateIDs() views.Slice[string] { return views.SliceOf(v.ж.DisallowedTKAStateIDs) } diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index afc9f1781..6a1a36865 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -89,38 +89,68 @@ func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v PrefsView) ControlURL() prefs.Item[string] { return v.ж.ControlURL } -func (v PrefsView) RouteAll() prefs.Item[bool] { return v.ж.RouteAll } -func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.ж.ExitNodeID } -func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.ж.ExitNodeIP } -func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.ж.ExitNodePrior } -func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.ж.ExitNodeAllowLANAccess } -func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.ж.CorpDNS } -func (v PrefsView) RunSSH() prefs.Item[bool] { return v.ж.RunSSH } -func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.ж.RunWebClient } -func (v PrefsView) WantRunning() prefs.Item[bool] { return v.ж.WantRunning } -func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.ж.LoggedOut } -func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.ж.ShieldsUp } -func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.ж.AdvertiseTags.View() } -func (v PrefsView) Hostname() prefs.Item[string] { return v.ж.Hostname } -func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.ж.NotepadURLs } -func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.ж.ForceDaemon } -func (v PrefsView) Egg() prefs.Item[bool] { return v.ж.Egg } +func (v PrefsView) ControlURL() prefs.Item[string] { return v.ж.ControlURL } +func (v PrefsView) RouteAll() prefs.Item[bool] { return v.ж.RouteAll } +func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.ж.ExitNodeID } +func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.ж.ExitNodeIP } + +// ExitNodePrior is an internal state rather than a preference. +// It can be kept in the Prefs structure but should not be wrapped +// and is ignored by the [prefs] package. +func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.ж.ExitNodePrior } +func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.ж.ExitNodeAllowLANAccess } +func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.ж.CorpDNS } +func (v PrefsView) RunSSH() prefs.Item[bool] { return v.ж.RunSSH } +func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.ж.RunWebClient } +func (v PrefsView) WantRunning() prefs.Item[bool] { return v.ж.WantRunning } +func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.ж.LoggedOut } +func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.ж.ShieldsUp } + +// AdvertiseTags is a preference whose value is a slice of strings. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (string) is immutable, we can use [prefs.List]. +func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.ж.AdvertiseTags.View() } +func (v PrefsView) Hostname() prefs.Item[string] { return v.ж.Hostname } +func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.ж.NotepadURLs } +func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.ж.ForceDaemon } +func (v PrefsView) Egg() prefs.Item[bool] { return v.ж.Egg } + +// AdvertiseRoutes is a preference whose value is a slice of netip.Prefix. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (netip.Prefix) is immutable, we can use [prefs.List]. func (v PrefsView) AdvertiseRoutes() prefs.ListView[netip.Prefix] { return v.ж.AdvertiseRoutes.View() } func (v PrefsView) NoSNAT() prefs.Item[bool] { return v.ж.NoSNAT } func (v PrefsView) NoStatefulFiltering() prefs.Item[opt.Bool] { return v.ж.NoStatefulFiltering } func (v PrefsView) NetfilterMode() prefs.Item[preftype.NetfilterMode] { return v.ж.NetfilterMode } func (v PrefsView) OperatorUser() prefs.Item[string] { return v.ж.OperatorUser } func (v PrefsView) ProfileName() prefs.Item[string] { return v.ж.ProfileName } -func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } -func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } -func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.ж.PostureChecking } -func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.ж.NetfilterKind } + +// AutoUpdate contains auto-update preferences. +// Each preference in the group can be configured and managed individually. +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } + +// AppConnector contains app connector-related preferences. +// Each preference in the group can be configured and managed individually. +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } +func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.ж.PostureChecking } +func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.ж.NetfilterKind } + +// DriveShares is a preference whose value is a slice of *[drive.Share]. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (*drive.Share) is mutable and implements [views.ViewCloner], +// we need to use [prefs.StructList] instead of [prefs.List]. func (v PrefsView) DriveShares() prefs.StructListView[*drive.Share, drive.ShareView] { return prefs.StructListViewOf(&v.ж.DriveShares) } func (v PrefsView) AllowSingleHosts() prefs.Item[marshalAsTrueInJSON] { return v.ж.AllowSingleHosts } -func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } + +// Persist is an internal state rather than a preference. +// It can be kept in the Prefs structure but should not be wrapped +// and is ignored by the [prefs] package. +func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index 44c3beb87..8993cb535 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -95,6 +95,9 @@ func (v TestPrefsView) AddrItem() Item[netip.Addr] { return v.ж.A func (v TestPrefsView) StringStringMap() MapView[string, string] { return v.ж.StringStringMap.View() } func (v TestPrefsView) IntStringMap() MapView[int, string] { return v.ж.IntStringMap.View() } func (v TestPrefsView) AddrIntMap() MapView[netip.Addr, int] { return v.ж.AddrIntMap.View() } + +// Bundles are complex preferences that usually consist of +// multiple parameters that must be configured atomically. func (v TestPrefsView) Bundle1() ItemView[*TestBundle, TestBundleView] { return ItemViewOf(&v.ж.Bundle1) } @@ -116,6 +119,10 @@ func (v TestPrefsView) IntBundleMap() StructMapView[int, *TestBundle, TestBundle func (v TestPrefsView) AddrBundleMap() StructMapView[netip.Addr, *TestBundle, TestBundleView] { return StructMapViewOf(&v.ж.AddrBundleMap) } + +// Group is a nested struct that contains one or more preferences. +// Each preference in a group can be configured individually. +// Preference groups should be included directly rather than by pointers. func (v TestPrefsView) Group() TestPrefsGroup { return v.ж.Group } // A compilation failure here means this code must be regenerated, with the command at the top of this file. From 48dbe70b540e1316fcf2cc5e481b950dae47f658 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 28 Aug 2025 13:01:20 -0700 Subject: [PATCH 1236/1708] go.mod: bump Go 1.25 release (#16969) Bump Go 1.25 release to include a go/types patch and resolve govulncheck CI exceptions. Updates tailscale/corp#31755 Signed-off-by: Patrick O'Doherty --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index e3dfee540..9c2417e7c 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -9a1a6a51164c9c7a23f711052bb8776326cd30cd +f3339c88ea24212cc3cd49b64ad1045b85db23bf From 4b9a1a008781df6d967de73686b59d1c39ed4e4e Mon Sep 17 00:00:00 2001 From: License Updater Date: Thu, 28 Aug 2025 21:31:44 +0000 Subject: [PATCH 1237/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 52 +++++++++---------------------------------- licenses/apple.md | 28 +++++++++++------------ licenses/tailscale.md | 32 +++++++++++++------------- licenses/windows.md | 41 +++++++++++++++++----------------- 4 files changed, 63 insertions(+), 90 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 37961b74c..0e68f0cac 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -9,72 +9,42 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) + - [github.com/google/go-tpm](https://pkg.go.dev/github.com/google/go-tpm) ([Apache-2.0](https://github.com/google/go-tpm/blob/v0.9.4/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - - [go4.org/intern](https://pkg.go.dev/go4.org/intern) ([BSD-3-Clause](https://github.com/go4org/intern/blob/ae77deb06f29/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.30.0:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.24.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.33.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 5a017076e..81359b270 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -33,9 +33,9 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) @@ -53,29 +53,29 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) + - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) + - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 206734fb4..6feb85aaf 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -14,9 +14,12 @@ Some packages may only be included on certain architectures or operating systems - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [fyne.io/systray](https://pkg.go.dev/fyne.io/systray) ([Apache-2.0](https://github.com/fyne-io/systray/blob/4856ac3adc3c/LICENSE)) + - [github.com/Kodeworks/golang-image-ico](https://pkg.go.dev/github.com/Kodeworks/golang-image-ico) ([BSD-3-Clause](https://github.com/Kodeworks/golang-image-ico/blob/73f0f4cfade9/LICENSE)) - [github.com/akutz/memconn](https://pkg.go.dev/github.com/akutz/memconn) ([Apache-2.0](https://github.com/akutz/memconn/blob/v0.1.0/LICENSE)) - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/anmitsu/go-shlex](https://pkg.go.dev/github.com/anmitsu/go-shlex) ([MIT](https://github.com/anmitsu/go-shlex/blob/38f4b401e2be/LICENSE)) + - [github.com/atotto/clipboard](https://pkg.go.dev/github.com/atotto/clipboard) ([BSD-3-Clause](https://github.com/atotto/clipboard/blob/v0.1.4/LICENSE)) - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) @@ -38,18 +41,18 @@ Some packages may only be included on certain architectures or operating systems - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) + - [github.com/fogleman/gg](https://pkg.go.dev/github.com/fogleman/gg) ([MIT](https://github.com/fogleman/gg/blob/v1.3.0/LICENSE.md)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) + - [github.com/golang/freetype/raster](https://pkg.go.dev/github.com/golang/freetype/raster) ([Unknown](Unknown)) + - [github.com/golang/freetype/truetype](https://pkg.go.dev/github.com/golang/freetype/truetype) ([Unknown](Unknown)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -61,7 +64,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) @@ -69,14 +71,13 @@ Some packages may only be included on certain architectures or operating systems - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.14.0/LICENSE)) @@ -84,15 +85,16 @@ Some packages may only be included on certain architectures or operating systems - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/image](https://pkg.go.dev/golang.org/x/image) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.30.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index e47bc3227..5c000cc9f 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -31,12 +31,13 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) + - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) @@ -51,38 +52,38 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) + - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) - - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.19.1/LICENSE)) - - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE)) - - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.55.0/LICENSE)) + - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.0/LICENSE)) + - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.65.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/ec1d1c113d33/LICENSE)) + - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/b2c15a420186/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) + - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.35.1/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.6/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From d42f0b6a21dc088a3b7b0366e144e148a118c642 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 28 Aug 2025 12:24:21 -0700 Subject: [PATCH 1238/1708] util/ringbuffer: rename to ringlog I need a ringbuffer in the more traditional sense, one that has a notion of item removal as well as tail loss on overrun. This implementation is really a clearable log window, and is used as such where it is used. Updates #cleanup Updates tailscale/corp#31762 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- tsnet/depaware.txt | 2 +- .../ringbuffer.go => ringlog/ringlog.go} | 35 +++++++++---------- .../ringlog_test.go} | 4 +-- wgengine/magicsock/endpoint.go | 4 +-- wgengine/magicsock/magicsock.go | 4 +-- 8 files changed, 27 insertions(+), 28 deletions(-) rename util/{ringbuffer/ringbuffer.go => ringlog/ringlog.go} (51%) rename util/{ringbuffer/ringbuffer_test.go => ringlog/ringlog_test.go} (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 85bec4a79..843ce27f2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -947,7 +947,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a83c67cca..fdc48718c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -424,7 +424,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index a695aa5f3..503454f50 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -376,7 +376,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/cmd/tsidp+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 67c182430..b490fcbca 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -371,7 +371,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ diff --git a/util/ringbuffer/ringbuffer.go b/util/ringlog/ringlog.go similarity index 51% rename from util/ringbuffer/ringbuffer.go rename to util/ringlog/ringlog.go index baca2afe8..85e0c4861 100644 --- a/util/ringbuffer/ringbuffer.go +++ b/util/ringlog/ringlog.go @@ -1,32 +1,31 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package ringbuffer contains a fixed-size concurrency-safe generic ring -// buffer. -package ringbuffer +// Package ringlog contains a limited-size concurrency-safe generic ring log. +package ringlog import "sync" -// New creates a new RingBuffer containing at most max items. -func New[T any](max int) *RingBuffer[T] { - return &RingBuffer[T]{ +// New creates a new [RingLog] containing at most max items. +func New[T any](max int) *RingLog[T] { + return &RingLog[T]{ max: max, } } -// RingBuffer is a concurrency-safe ring buffer. -type RingBuffer[T any] struct { +// RingLog is a concurrency-safe fixed size log window containing entries of [T]. +type RingLog[T any] struct { mu sync.Mutex pos int buf []T max int } -// Add appends a new item to the RingBuffer, possibly overwriting the oldest -// item in the buffer if it is already full. +// Add appends a new item to the [RingLog], possibly overwriting the oldest +// item in the log if it is already full. // // It does nothing if rb is nil. -func (rb *RingBuffer[T]) Add(t T) { +func (rb *RingLog[T]) Add(t T) { if rb == nil { return } @@ -40,11 +39,11 @@ func (rb *RingBuffer[T]) Add(t T) { } } -// GetAll returns a copy of all the entries in the ring buffer in the order they +// GetAll returns a copy of all the entries in the ring log in the order they // were added. // // It returns nil if rb is nil. -func (rb *RingBuffer[T]) GetAll() []T { +func (rb *RingLog[T]) GetAll() []T { if rb == nil { return nil } @@ -58,10 +57,10 @@ func (rb *RingBuffer[T]) GetAll() []T { return out } -// Len returns the number of elements in the ring buffer. Note that this value +// Len returns the number of elements in the ring log. Note that this value // could change immediately after being returned if a concurrent caller -// modifies the buffer. -func (rb *RingBuffer[T]) Len() int { +// modifies the log. +func (rb *RingLog[T]) Len() int { if rb == nil { return 0 } @@ -70,8 +69,8 @@ func (rb *RingBuffer[T]) Len() int { return len(rb.buf) } -// Clear will empty the ring buffer. -func (rb *RingBuffer[T]) Clear() { +// Clear will empty the ring log. +func (rb *RingLog[T]) Clear() { rb.mu.Lock() defer rb.mu.Unlock() rb.pos = 0 diff --git a/util/ringbuffer/ringbuffer_test.go b/util/ringlog/ringlog_test.go similarity index 95% rename from util/ringbuffer/ringbuffer_test.go rename to util/ringlog/ringlog_test.go index e10096bfb..d6776e181 100644 --- a/util/ringbuffer/ringbuffer_test.go +++ b/util/ringlog/ringlog_test.go @@ -1,14 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package ringbuffer +package ringlog import ( "reflect" "testing" ) -func TestRingBuffer(t *testing.T) { +func TestRingLog(t *testing.T) { const numItems = 10 rb := New[int](numItems) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 37892176b..b8778b8d8 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -33,7 +33,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/ringbuffer" + "tailscale.com/util/ringlog" "tailscale.com/util/slicesx" ) @@ -60,7 +60,7 @@ type endpoint struct { lastRecvWG mono.Time // last time there were incoming packets from this peer destined for wireguard-go (e.g. not disco) lastRecvUDPAny mono.Time // last time there were incoming UDP packets from this peer of any kind numStopAndResetAtomic int64 - debugUpdates *ringbuffer.RingBuffer[EndpointChange] + debugUpdates *ringlog.RingLog[EndpointChange] // These fields are initialized once and never modified. c *Conn diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 7fb3517e9..a7f84e352 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -62,7 +62,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/ringbuffer" + "tailscale.com/util/ringlog" "tailscale.com/util/set" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" @@ -3112,7 +3112,7 @@ func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { // ~1MB on mobile but we never used the data so the memory was just // wasted. default: - ep.debugUpdates = ringbuffer.New[EndpointChange](entriesPerBuffer) + ep.debugUpdates = ringlog.New[EndpointChange](entriesPerBuffer) } if n.Addresses().Len() > 0 { ep.nodeAddr = n.Addresses().At(0).Addr() From f5d3c59a925b2f0ea249a32ddc0decdb43ff7ee9 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 28 Aug 2025 12:00:03 -0700 Subject: [PATCH 1239/1708] wgengine/magicsock: shorten process internal DERP queue DERP writes go via TCP and the host OS will have plenty of buffer space. We've observed in the wild with a backed up TCP socket kernel side buffers of >2.4MB. The DERP internal queue being larger causes an increase in the probability that the contents of the backbuffer are "dead letters" - packets that were assumed to be lost. A first step to improvement is to size this queue only large enough to avoid some of the initial connect stall problem, but not large enough that it is contributing in a substantial way to buffer bloat / dead-letter retention. Updates tailscale/corp#31762 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscaled/depaware.txt | 1 - cmd/tsidp/depaware.txt | 1 - tsnet/depaware.txt | 1 - wgengine/magicsock/derp.go | 72 ++++++---------------------- wgengine/magicsock/magicsock_test.go | 8 ---- 6 files changed, 15 insertions(+), 69 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 843ce27f2..4b1e4a1e4 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -958,7 +958,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index fdc48718c..c2d9f3d00 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -435,7 +435,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 503454f50..e8bc2b254 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -387,7 +387,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b490fcbca..aea6baf93 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -382,7 +382,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 5afdbc6d8..9c60e4893 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -11,9 +11,7 @@ import ( "net" "net/netip" "reflect" - "runtime" "slices" - "sync" "time" "unsafe" @@ -32,7 +30,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/mak" "tailscale.com/util/rands" - "tailscale.com/util/sysresources" "tailscale.com/util/testenv" ) @@ -282,59 +279,20 @@ func (c *Conn) goDerpConnect(regionID int) { go c.derpWriteChanForRegion(regionID, key.NodePublic{}) } -var ( - bufferedDerpWrites int - bufferedDerpWritesOnce sync.Once -) - -// bufferedDerpWritesBeforeDrop returns how many packets writes can be queued -// up the DERP client to write on the wire before we start dropping. -func bufferedDerpWritesBeforeDrop() int { - // For mobile devices, always return the previous minimum value of 32; - // we can do this outside the sync.Once to avoid that overhead. - if runtime.GOOS == "ios" || runtime.GOOS == "android" { - return 32 - } - - bufferedDerpWritesOnce.Do(func() { - // Some rough sizing: for the previous fixed value of 32, the - // total consumed memory can be: - // = numDerpRegions * messages/region * sizeof(message) - // - // For sake of this calculation, assume 100 DERP regions; at - // time of writing (2023-04-03), we have 24. - // - // A reasonable upper bound for the worst-case average size of - // a message is a *disco.CallMeMaybe message with 16 endpoints; - // since sizeof(netip.AddrPort) = 32, that's 512 bytes. Thus: - // = 100 * 32 * 512 - // = 1638400 (1.6MiB) - // - // On a reasonably-small node with 4GiB of memory that's - // connected to each region and handling a lot of load, 1.6MiB - // is about 0.04% of the total system memory. - // - // For sake of this calculation, then, let's double that memory - // usage to 0.08% and scale based on total system memory. - // - // For a 16GiB Linux box, this should buffer just over 256 - // messages. - systemMemory := sysresources.TotalMemory() - memoryUsable := float64(systemMemory) * 0.0008 - - const ( - theoreticalDERPRegions = 100 - messageMaximumSizeBytes = 512 - ) - bufferedDerpWrites = int(memoryUsable / (theoreticalDERPRegions * messageMaximumSizeBytes)) - - // Never drop below the previous minimum value. - if bufferedDerpWrites < 32 { - bufferedDerpWrites = 32 - } - }) - return bufferedDerpWrites -} +// derpWriteQueueDepth is the depth of the in-process write queue to a single +// DERP region. DERP connections are TCP, and so the actual write queue depth is +// substantially larger than this suggests - often scaling into megabytes +// depending on dynamic TCP parameters and platform TCP tuning. This queue is +// excess of the TCP buffer depth, which means it's almost pure buffer bloat, +// and does not want to be deep - if there are key situations where a node can't +// keep up, either the TCP link to DERP is too slow, or there is a +// synchronization issue in the write path, fixes should be focused on those +// paths, rather than extending this queue. +// TODO(raggi): make this even shorter, ideally this should be a fairly direct +// line into a socket TCP buffer. The challenge at present is that connect and +// reconnect are in the write path and we don't want to block other write +// operations on those. +const derpWriteQueueDepth = 32 // derpWriteChanForRegion returns a channel to which to send DERP packet write // requests. It creates a new DERP connection to regionID if necessary. @@ -429,7 +387,7 @@ func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- dc.DNSCache = dnscache.Get() ctx, cancel := context.WithCancel(c.connCtx) - ch := make(chan derpWriteRequest, bufferedDerpWritesBeforeDrop()) + ch := make(chan derpWriteRequest, derpWriteQueueDepth) ad.c = dc ad.writeCh = ch diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5e348b02b..5774432d5 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2137,14 +2137,6 @@ func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { } } -func TestBufferedDerpWritesBeforeDrop(t *testing.T) { - vv := bufferedDerpWritesBeforeDrop() - if vv < 32 { - t.Fatalf("got bufferedDerpWritesBeforeDrop=%d, which is < 32", vv) - } - t.Logf("bufferedDerpWritesBeforeDrop = %d", vv) -} - // newWireguard starts up a new wireguard-go device attached to a test tun, and // returns the device, tun and endpoint port. To add peers call device.IpcSet with UAPI instructions. func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Device, *tuntest.ChannelTUN, uint16) { From 3aea0e095a411cc98f3ad0b7c1706a00ca7662b0 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 28 Aug 2025 14:09:01 -0700 Subject: [PATCH 1240/1708] syncs: delete WaitGroup and use sync.WaitGroup.Go in Go 1.25 Our own WaitGroup wrapper type was a prototype implementation for the Go method on the standard sync.WaitGroup type. Now that there is first-class support for Go, we should migrate over to using it and delete syncs.WaitGroup. Updates #cleanup Updates tailscale/tailscale#16330 Change-Id: Ib52b10f9847341ce29b4ca0da927dc9321691235 Signed-off-by: Joe Tsai --- cmd/containerboot/egressservices.go | 5 ++--- cmd/tailscale/cli/file.go | 4 ++-- cmd/tailscale/depaware.txt | 2 +- feature/taildrop/delete.go | 3 +-- syncs/syncs.go | 16 ---------------- syncs/syncs_test.go | 5 +++-- 6 files changed, 9 insertions(+), 26 deletions(-) diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 71141f17a..64ca0a13a 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -18,6 +18,7 @@ import ( "reflect" "strconv" "strings" + "sync" "time" "github.com/fsnotify/fsnotify" @@ -26,7 +27,6 @@ import ( "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/util/httpm" "tailscale.com/util/linuxfw" @@ -666,8 +666,7 @@ func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egresss return } log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...") - wg := syncs.WaitGroup{} - + var wg sync.WaitGroup for s, cfg := range *cfgs { hep := cfg.HealthCheckEndpoint if hep == "" { diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index 6f3aa40b5..e0879197e 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -20,6 +20,7 @@ import ( "path" "path/filepath" "strings" + "sync" "sync/atomic" "time" "unicode/utf8" @@ -32,7 +33,6 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/net/tsaddr" - "tailscale.com/syncs" "tailscale.com/tailcfg" tsrate "tailscale.com/tstime/rate" "tailscale.com/util/quarantine" @@ -176,7 +176,7 @@ func runCp(ctx context.Context, args []string) error { log.Printf("sending %q to %v/%v/%v ...", name, target, ip, stableID) } - var group syncs.WaitGroup + var group sync.WaitGroup ctxProgress, cancelProgress := context.WithCancel(ctx) defer cancelProgress() if isatty.IsTerminal(os.Stderr.Fd()) { diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b121a411f..02ffec0ea 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -140,7 +140,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ - tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+ + tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/client/local+ diff --git a/feature/taildrop/delete.go b/feature/taildrop/delete.go index 0b7259879..8b03a125f 100644 --- a/feature/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -12,7 +12,6 @@ import ( "time" "tailscale.com/ipn" - "tailscale.com/syncs" "tailscale.com/tstime" "tailscale.com/types/logger" ) @@ -33,7 +32,7 @@ type fileDeleter struct { byName map[string]*list.Element emptySignal chan struct{} // signal that the queue is empty - group syncs.WaitGroup + group sync.WaitGroup shutdownCtx context.Context shutdown context.CancelFunc fs FileOps // must be used for all filesystem operations diff --git a/syncs/syncs.go b/syncs/syncs.go index cf0be919b..e85b474c9 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -402,19 +402,3 @@ func (m *Map[K, V]) Swap(key K, value V) (oldValue V) { mak.Set(&m.m, key, value) return oldValue } - -// WaitGroup is identical to [sync.WaitGroup], -// but provides a Go method to start a goroutine. -type WaitGroup struct{ sync.WaitGroup } - -// Go calls the given function in a new goroutine. -// It automatically increments the counter before execution and -// automatically decrements the counter after execution. -// It must not be called concurrently with Wait. -func (wg *WaitGroup) Go(f func()) { - wg.Add(1) - go func() { - defer wg.Done() - f() - }() -} diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 2439b6068..d99c3d1a9 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -7,6 +7,7 @@ import ( "context" "io" "os" + "sync" "testing" "time" @@ -98,7 +99,7 @@ func TestMutexValue(t *testing.T) { t.Errorf("Load = %v, want %v", v.Load(), now) } - var group WaitGroup + var group sync.WaitGroup var v2 MutexValue[int] var sum int for i := range 10 { @@ -237,7 +238,7 @@ func TestMap(t *testing.T) { t.Run("LoadOrStore", func(t *testing.T) { var m Map[string, string] - var wg WaitGroup + var wg sync.WaitGroup var ok1, ok2 bool wg.Go(func() { _, ok1 = m.LoadOrStore("", "") }) wg.Go(func() { _, ok2 = m.LoadOrStore("", "") }) From 1a98943204ef628ddcb257891152988d0d20916b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 28 Aug 2025 19:05:57 -0700 Subject: [PATCH 1241/1708] go.mod: bump github.com/ulikunitz/xz for security warning Doesn't look to affect us, but pacifies security scanners. See https://github.com/ulikunitz/xz/commit/88ddf1d0d98d688db65de034f48960b2760d2ae2 It's for decoding. We only use this package for encoding (via github.com/google/rpmpack / github.com/goreleaser/nfpm/v2). Updates #8043 Change-Id: I87631aa5048f9514bb83baf1424f6abb34329c46 Signed-off-by: Brad Fitzpatrick --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index c739e8720..8cb5e078e 100644 --- a/flake.nix +++ b/flake.nix @@ -148,5 +148,5 @@ }); }; } -# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= +# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= diff --git a/go.mod b/go.mod index ecd229427..e6c480494 100644 --- a/go.mod +++ b/go.mod @@ -391,7 +391,7 @@ require ( github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.1.0 // indirect github.com/uudashr/gocognit v1.1.2 // indirect diff --git a/go.mod.sri b/go.mod.sri index 69c69b8db..781799de5 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= +sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= diff --git a/go.sum b/go.sum index f2544b9ac..72ddb730f 100644 --- a/go.sum +++ b/go.sum @@ -1031,8 +1031,8 @@ github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg= github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw= diff --git a/shell.nix b/shell.nix index e0f6e79f1..883d71bef 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= +# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= From 7cbcc10eb10cdea7cc42511f7d5c4f584c8ead7a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 29 Aug 2025 10:33:14 -0700 Subject: [PATCH 1242/1708] syncs: add Semaphore.Len (#16981) The Len reports the number of acquired tokens for metrics. Updates tailscale/corp#31252 Signed-off-by: Joe Tsai --- syncs/syncs.go | 7 +++++++ syncs/syncs_test.go | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/syncs/syncs.go b/syncs/syncs.go index e85b474c9..3b37bca08 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -201,6 +201,13 @@ func NewSemaphore(n int) Semaphore { return Semaphore{c: make(chan struct{}, n)} } +// Len reports the number of in-flight acquisitions. +// It is incremented whenever the semaphore is acquired. +// It is decremented whenever the semaphore is released. +func (s Semaphore) Len() int { + return len(s.c) +} + // Acquire blocks until a resource is acquired. func (s Semaphore) Acquire() { s.c <- struct{}{} diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index d99c3d1a9..a546b8d0a 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -162,10 +162,20 @@ func TestClosedChan(t *testing.T) { func TestSemaphore(t *testing.T) { s := NewSemaphore(2) + assertLen := func(want int) { + t.Helper() + if got := s.Len(); got != want { + t.Fatalf("Len = %d, want %d", got, want) + } + } + + assertLen(0) s.Acquire() + assertLen(1) if !s.TryAcquire() { t.Fatal("want true") } + assertLen(2) if s.TryAcquire() { t.Fatal("want false") } @@ -175,11 +185,15 @@ func TestSemaphore(t *testing.T) { t.Fatal("want false") } s.Release() + assertLen(1) if !s.AcquireContext(context.Background()) { t.Fatal("want true") } + assertLen(2) s.Release() + assertLen(1) s.Release() + assertLen(0) } func TestMap(t *testing.T) { From 76fc02be09a069bcc4e440f07ba2640a56cfb5d8 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 29 Aug 2025 14:25:58 -0400 Subject: [PATCH 1243/1708] words: just an ordinary commit, nothing fishy at all (#16982) * words: just an ordinary commit, nothing fishy at all Updates #words Signed-off-by: Naman Sood --- words/tails.txt | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/words/tails.txt b/words/tails.txt index 7e35c6970..20ff326c1 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -722,3 +722,45 @@ follow stalk caudal chronicle +trout +sturgeon +swordfish +catfish +pike +angler +anchovy +angelfish +cod +icefish +carp +mackarel +salmon +grayling +lungfish +dragonfish +barracuda +barreleye +bass +ridgehead +bigscale +blowfish +bream +bullhead +pufferfish +sardine +sunfish +mullet +snapper +pipefish +seahorse +flounder +tilapia +chub +dorado +shad +lionfish +crayfish +sailfish +billfish +taimen +sargo From 89fe2e1f126d9de3567500fa0b240cc0ac489c09 Mon Sep 17 00:00:00 2001 From: Remy Guercio Date: Fri, 29 Aug 2025 15:16:39 -0500 Subject: [PATCH 1244/1708] cmd/tsidp: add allow-insecure-no-client-registration and JSON file migration (#16881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a ternary flag that unless set explicitly to false keeps the insecure behavior of TSIDP. If the flag is false, add functionality on startup to migrate oidc-funnel-clients.json to oauth-clients.json if it doesn’t exist. If the flag is false, modify endpoints to behave similarly regardless of funnel, tailnet, or localhost. They will all verify client ID & secret when appropriate per RFC 6749. The authorize endpoint will no longer change based on funnel status or nodeID. Add extra tests verifying TSIDP endpoints behave as expected with the new flag. Safely create the redirect URL from what's passed into the authorize endpoint. Fixes #16880 Signed-off-by: Remy Guercio --- cmd/tsidp/tsidp.go | 375 +++++++++++-- cmd/tsidp/tsidp_test.go | 1140 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 1441 insertions(+), 74 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 2fc6d27e4..c02b09745 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -47,6 +47,7 @@ import ( "tailscale.com/tsnet" "tailscale.com/types/key" "tailscale.com/types/lazy" + "tailscale.com/types/opt" "tailscale.com/types/views" "tailscale.com/util/mak" "tailscale.com/util/must" @@ -61,20 +62,40 @@ type ctxConn struct{} // accessing the IDP over Funnel are persisted. const funnelClientsFile = "oidc-funnel-clients.json" +// oauthClientsFile is the new file name for OAuth clients when running in secure mode. +const oauthClientsFile = "oauth-clients.json" + +// deprecatedFunnelClientsFile is the name used when renaming the old file. +const deprecatedFunnelClientsFile = "deprecated-oidc-funnel-clients.json" + // oidcKeyFile is where the OIDC private key is persisted. const oidcKeyFile = "oidc-key.json" var ( - flagVerbose = flag.Bool("verbose", false, "be verbose") - flagPort = flag.Int("port", 443, "port to listen on") - flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") - flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") - flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") - flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") - flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagVerbose = flag.Bool("verbose", false, "be verbose") + flagPort = flag.Int("port", 443, "port to listen on") + flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") + flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") + flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") + flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") + flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagAllowInsecureRegistrationBool opt.Bool + flagAllowInsecureRegistration = opt.BoolFlag{Bool: &flagAllowInsecureRegistrationBool} ) +// getAllowInsecureRegistration returns whether to allow OAuth flows without pre-registered clients. +// Default is true for backward compatibility; explicitly set to false for strict OAuth compliance. +func getAllowInsecureRegistration() bool { + v, ok := flagAllowInsecureRegistration.Get() + if !ok { + // Flag not set, default to true (allow insecure for backward compatibility) + return true + } + return v +} + func main() { + flag.Var(&flagAllowInsecureRegistration, "allow-insecure-registration", "allow OAuth flows without pre-registered client credentials (default: true for backward compatibility; set to false for strict OAuth compliance)") flag.Parse() ctx := context.Background() if !envknob.UseWIPCode() { @@ -172,10 +193,11 @@ func main() { } srv := &idpServer{ - lc: lc, - funnel: *flagFunnel, - localTSMode: *flagUseLocalTailscaled, - rootPath: rootPath, + lc: lc, + funnel: *flagFunnel, + localTSMode: *flagUseLocalTailscaled, + rootPath: rootPath, + allowInsecureRegistration: getAllowInsecureRegistration(), } if *flagPort != 443 { @@ -184,20 +206,29 @@ func main() { srv.serverURL = fmt.Sprintf("https://%s", strings.TrimSuffix(st.Self.DNSName, ".")) } - // Load funnel clients from disk if they exist, regardless of whether funnel is enabled - // This ensures OIDC clients persist across restarts - funnelClientsFilePath, err := getConfigFilePath(rootPath, funnelClientsFile) - if err != nil { - log.Fatalf("could not get funnel clients file path: %v", err) + // If allowInsecureRegistration is enabled, the old oidc-funnel-clients.json path is used. + // If allowInsecureRegistration is disabled, attempt to migrate the old path to oidc-clients.json and use this new path. + var clientsFilePath string + if !srv.allowInsecureRegistration { + clientsFilePath, err = migrateOAuthClients(rootPath) + if err != nil { + log.Fatalf("could not migrate OAuth clients: %v", err) + } + } else { + clientsFilePath, err = getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + log.Fatalf("could not get funnel clients file path: %v", err) + } } - f, err := os.Open(funnelClientsFilePath) + + f, err := os.Open(clientsFilePath) if err == nil { if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { - log.Fatalf("could not parse %s: %v", funnelClientsFilePath, err) + log.Fatalf("could not parse %s: %v", clientsFilePath, err) } f.Close() } else if !errors.Is(err, os.ErrNotExist) { - log.Fatalf("could not open %s: %v", funnelClientsFilePath, err) + log.Fatalf("could not open %s: %v", clientsFilePath, err) } log.Printf("Running tsidp at %s ...", srv.serverURL) @@ -304,12 +335,13 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. } type idpServer struct { - lc *local.Client - loopbackURL string - serverURL string // "https://foo.bar.ts.net" - funnel bool - localTSMode bool - rootPath string // root path, used for storing state files + lc *local.Client + loopbackURL string + serverURL string // "https://foo.bar.ts.net" + funnel bool + localTSMode bool + rootPath string // root path, used for storing state files + allowInsecureRegistration bool // If true, allow OAuth without pre-registered clients lazyMux lazy.SyncValue[*http.ServeMux] lazySigningKey lazy.SyncValue[*signingKey] @@ -393,14 +425,15 @@ func (ar *authRequest) allowRelyingParty(r *http.Request, lc *local.Client) erro } func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { + // This URL is visited by the user who is being authenticated. If they are // visiting the URL over Funnel, that means they are not part of the // tailnet that they are trying to be authenticated for. + // NOTE: Funnel request behavior is the same regardless of secure or insecure mode. if isFunnelRequest(r) { http.Error(w, "tsidp: unauthorized", http.StatusUnauthorized) return } - uq := r.URL.Query() redirectURI := uq.Get("redirect_uri") @@ -409,6 +442,86 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { return } + clientID := uq.Get("client_id") + if clientID == "" { + http.Error(w, "tsidp: must specify client_id", http.StatusBadRequest) + return + } + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, validate client_id exists but defer client_secret validation to token endpoint + // This follows RFC 6749 which specifies client authentication should occur at token endpoint, not authorization endpoint + + s.mu.Lock() + c, ok := s.funnelClients[clientID] + s.mu.Unlock() + if !ok { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + + // Validate client_id matches (public identifier validation) + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(c.ID)) + if clientIDcmp != 1 { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + + // Validate redirect URI + if redirectURI != c.RedirectURI { + http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) + return + } + + // Get user information + var remoteAddr string + if s.localTSMode { + remoteAddr = r.Header.Get("X-Forwarded-For") + } else { + remoteAddr = r.RemoteAddr + } + + // Check who is visiting the authorize endpoint. + var who *apitype.WhoIsResponse + var err error + who, err = s.lc.WhoIs(r.Context(), remoteAddr) + if err != nil { + log.Printf("Error getting WhoIs: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + code := rands.HexString(32) + ar := &authRequest{ + nonce: uq.Get("nonce"), + remoteUser: who, + redirectURI: redirectURI, + clientID: clientID, + funnelRP: c, // Store the validated client + } + + s.mu.Lock() + mak.Set(&s.code, code, ar) + s.mu.Unlock() + + q := make(url.Values) + q.Set("code", code) + if state := uq.Get("state"); state != "" { + q.Set("state", state) + } + parsedURL, err := url.Parse(redirectURI) + if err != nil { + http.Error(w, "invalid redirect URI", http.StatusInternalServerError) + return + } + parsedURL.RawQuery = q.Encode() + u := parsedURL.String() + log.Printf("Redirecting to %q", u) + + http.Redirect(w, r, u, http.StatusFound) + return + } + var remoteAddr string if s.localTSMode { // in local tailscaled mode, the local tailscaled is forwarding us @@ -430,7 +543,7 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { nonce: uq.Get("nonce"), remoteUser: who, redirectURI: redirectURI, - clientID: uq.Get("client_id"), + clientID: clientID, } if r.URL.Path == "/authorize/funnel" { @@ -466,7 +579,13 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { if state := uq.Get("state"); state != "" { q.Set("state", state) } - u := redirectURI + "?" + q.Encode() + parsedURL, err := url.Parse(redirectURI) + if err != nil { + http.Error(w, "invalid redirect URI", http.StatusInternalServerError) + return + } + parsedURL.RawQuery = q.Encode() + u := parsedURL.String() log.Printf("Redirecting to %q", u) http.Redirect(w, r, u, http.StatusFound) @@ -476,7 +595,13 @@ func (s *idpServer) newMux() *http.ServeMux { mux := http.NewServeMux() mux.HandleFunc(oidcJWKSPath, s.serveJWKS) mux.HandleFunc(oidcConfigPath, s.serveOpenIDConfig) - mux.HandleFunc("/authorize/", s.authorize) + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, use a single /authorize endpoint + mux.HandleFunc("/authorize", s.authorize) + } else { + // When insecure registration is allowed, preserve original behavior with path-based routing + mux.HandleFunc("/authorize/", s.authorize) + } mux.HandleFunc("/userinfo", s.serveUserInfo) mux.HandleFunc("/token", s.serveToken) mux.HandleFunc("/clients/", s.serveClients) @@ -513,6 +638,24 @@ func (s *idpServer) serveUserInfo(w http.ResponseWriter, r *http.Request) { s.mu.Lock() delete(s.accessToken, tk) s.mu.Unlock() + return + } + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, validate that the token was issued to a valid client. + if ar.clientID == "" { + http.Error(w, "tsidp: no client associated with token", http.StatusBadRequest) + return + } + + // Validate client still exists + s.mu.Lock() + _, clientExists := s.funnelClients[ar.clientID] + s.mu.Unlock() + if !clientExists { + http.Error(w, "tsidp: client no longer exists", http.StatusUnauthorized) + return + } } ui := userInfo{} @@ -722,11 +865,58 @@ func (s *idpServer) serveToken(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: code not found", http.StatusBadRequest) return } - if err := ar.allowRelyingParty(r, s.lc); err != nil { - log.Printf("Error allowing relying party: %v", err) - http.Error(w, err.Error(), http.StatusForbidden) - return + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, always validate client credentials regardless of request source + clientID := r.FormValue("client_id") + clientSecret := r.FormValue("client_secret") + + // Try basic auth if form values are empty + if clientID == "" || clientSecret == "" { + if basicClientID, basicClientSecret, ok := r.BasicAuth(); ok { + if clientID == "" { + clientID = basicClientID + } + if clientSecret == "" { + clientSecret = basicClientSecret + } + } + } + + if clientID == "" || clientSecret == "" { + http.Error(w, "tsidp: client credentials required in when insecure registration is not allowed", http.StatusUnauthorized) + return + } + + // Validate against the stored auth request + if ar.clientID != clientID { + http.Error(w, "tsidp: client_id mismatch", http.StatusBadRequest) + return + } + + // Validate client credentials against stored clients + if ar.funnelRP == nil { + http.Error(w, "tsidp: no client information found", http.StatusBadRequest) + return + } + + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(ar.funnelRP.ID)) + clientSecretcmp := subtle.ConstantTimeCompare([]byte(clientSecret), []byte(ar.funnelRP.Secret)) + if clientIDcmp != 1 || clientSecretcmp != 1 { + http.Error(w, "tsidp: invalid client credentials", http.StatusUnauthorized) + return + } + } else { + // Original behavior when insecure registration is allowed + // Only checks ClientID and Client Secret when over funnel. + // Local connections are allowed and tailnet connections only check matching nodeIDs. + if err := ar.allowRelyingParty(r, s.lc); err != nil { + log.Printf("Error allowing relying party: %v", err) + http.Error(w, err.Error(), http.StatusForbidden) + return + } } + if ar.redirectURI != r.FormValue("redirect_uri") { http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) return @@ -977,24 +1167,38 @@ func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: not found", http.StatusNotFound) return } - ap, err := netip.ParseAddrPort(r.RemoteAddr) - if err != nil { - log.Printf("Error parsing remote addr: %v", err) - return - } + var authorizeEndpoint string rpEndpoint := s.serverURL - if isFunnelRequest(r) { - authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) - } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { - authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) - } else if ap.Addr().IsLoopback() { - rpEndpoint = s.loopbackURL - authorizeEndpoint = fmt.Sprintf("%s/authorize/localhost", s.serverURL) + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, use a single authorization endpoint for all request types + // This will be the same regardless of if the user is on localhost, tailscale, or funnel. + authorizeEndpoint = fmt.Sprintf("%s/authorize", s.serverURL) + rpEndpoint = s.serverURL } else { - log.Printf("Error getting WhoIs: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return + // When insecure registration is allowed TSIDP uses the requestors nodeID + // (typically that of the resource server during auto discovery) when on the tailnet + // and adds it to the authorize URL as a replacement clientID for when the user authorizes. + // The behavior over funnel drops the nodeID & clientID replacement behvaior and does require a + // previously created clientID and client secret. + ap, err := netip.ParseAddrPort(r.RemoteAddr) + if err != nil { + log.Printf("Error parsing remote addr: %v", err) + return + } + if isFunnelRequest(r) { + authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) + } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { + authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) + } else if ap.Addr().IsLoopback() { + rpEndpoint = s.loopbackURL + authorizeEndpoint = fmt.Sprintf("%s/authorize/localhost", s.serverURL) + } else { + log.Printf("Error getting WhoIs: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } } w.Header().Set("Content-Type", "application/json") @@ -1148,20 +1352,27 @@ func (s *idpServer) serveDeleteClient(w http.ResponseWriter, r *http.Request, cl } // storeFunnelClientsLocked writes the current mapping of OIDC client ID/secret -// pairs for RPs that access the IDP over funnel. s.mu must be held while -// calling this. +// pairs for RPs that access the IDP. When insecure registration is NOT allowed, uses oauth-clients.json; +// otherwise uses oidc-funnel-clients.json. s.mu must be held while calling this. func (s *idpServer) storeFunnelClientsLocked() error { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(s.funnelClients); err != nil { return err } - funnelClientsFilePath, err := getConfigFilePath(s.rootPath, funnelClientsFile) + var clientsFilePath string + var err error + if !s.allowInsecureRegistration { + clientsFilePath, err = getConfigFilePath(s.rootPath, oauthClientsFile) + } else { + clientsFilePath, err = getConfigFilePath(s.rootPath, funnelClientsFile) + } + if err != nil { return fmt.Errorf("storeFunnelClientsLocked: %v", err) } - return os.WriteFile(funnelClientsFilePath, buf.Bytes(), 0600) + return os.WriteFile(clientsFilePath, buf.Bytes(), 0600) } const ( @@ -1275,9 +1486,67 @@ func isFunnelRequest(r *http.Request) bool { return false } +// migrateOAuthClients migrates from oidc-funnel-clients.json to oauth-clients.json. +// If oauth-clients.json already exists, no migration is performed. +// If both files are missing a new configuration is created. +// The path to the new configuration file is returned. +func migrateOAuthClients(rootPath string) (string, error) { + // First, check for oauth-clients.json (new file) + oauthPath, err := getConfigFilePath(rootPath, oauthClientsFile) + if err != nil { + return "", fmt.Errorf("could not get oauth clients file path: %w", err) + } + if _, err := os.Stat(oauthPath); err == nil { + // oauth-clients.json already exists, use it + return oauthPath, nil + } + + // Check for old oidc-funnel-clients.json + oldPath, err := getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + return "", fmt.Errorf("could not get funnel clients file path: %w", err) + } + if _, err := os.Stat(oldPath); err == nil { + // Old file exists, migrate it + log.Printf("Migrating OAuth clients from %s to %s", oldPath, oauthPath) + + // Read the old file + data, err := os.ReadFile(oldPath) + if err != nil { + return "", fmt.Errorf("could not read old funnel clients file: %w", err) + } + + // Write to new location + if err := os.WriteFile(oauthPath, data, 0600); err != nil { + return "", fmt.Errorf("could not write new oauth clients file: %w", err) + } + + // Rename old file to deprecated name + deprecatedPath, err := getConfigFilePath(rootPath, deprecatedFunnelClientsFile) + if err != nil { + return "", fmt.Errorf("could not get deprecated file path: %w", err) + } + if err := os.Rename(oldPath, deprecatedPath); err != nil { + log.Printf("Warning: could not rename old file to deprecated name: %v", err) + } else { + log.Printf("Renamed old file to %s", deprecatedPath) + } + + return oauthPath, nil + } + + // Neither file exists, create empty oauth-clients.json + log.Printf("Creating empty OAuth clients file at %s", oauthPath) + if err := os.WriteFile(oauthPath, []byte("{}"), 0600); err != nil { + return "", fmt.Errorf("could not create empty oauth clients file: %w", err) + } + + return oauthPath, nil +} + // getConfigFilePath returns the path to the config file for the given file name. // The oidc-key.json and funnel-clients.json files were originally opened and written -// to without paths, and ended up in /root dir or home directory of the user running +// to without paths, and ended up in /root or home directory of the user running // the process. To maintain backward compatibility, we return the naked file name if that // file exists already, otherwise we return the full path in the rootPath. func getConfigFilePath(rootPath string, fileName string) (string, error) { diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index e5465d3cf..4f5af9e59 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -1,6 +1,19 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +// Package main tests for tsidp focus on OAuth security boundaries and +// correct implementation of the OpenID Connect identity provider. +// +// Test Strategy: +// - Tests are intentionally granular to provide clear failure signals when +// security-critical logic breaks +// - OAuth flow tests cover both strict mode (registered clients only) and +// legacy mode (local funnel clients) to ensure proper access controls +// - Helper functions like normalizeMap ensure deterministic comparisons +// despite JSON marshaling order variations +// - The privateKey global is reused across tests for performance (RSA key +// generation is expensive) + package main import ( @@ -16,21 +29,28 @@ import ( "net/netip" "net/url" "os" + "path/filepath" "reflect" "sort" "strings" + "sync" "testing" "time" "gopkg.in/square/go-jose.v2" "gopkg.in/square/go-jose.v2/jwt" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/types/opt" "tailscale.com/types/views" ) -// normalizeMap recursively sorts []any values in a map[string]any +// normalizeMap recursively sorts []any values in a map[string]any to ensure +// deterministic test comparisons. This is necessary because JSON marshaling +// doesn't guarantee array order, and we need stable comparisons when testing +// claim merging and flattening logic. func normalizeMap(t *testing.T, m map[string]any) map[string]any { t.Helper() normalized := make(map[string]any, len(m)) @@ -66,7 +86,13 @@ func mustMarshalJSON(t *testing.T, v any) tailcfg.RawMessage { return tailcfg.RawMessage(b) } -var privateKey *rsa.PrivateKey = nil +// privateKey is a shared RSA private key used across tests. It's lazily +// initialized on first use to avoid the expensive key generation cost +// for every test. Protected by privateKeyMu for thread safety. +var ( + privateKey *rsa.PrivateKey + privateKeyMu sync.Mutex +) func oidcTestingSigner(t *testing.T) jose.Signer { t.Helper() @@ -86,6 +112,9 @@ func oidcTestingPublicKey(t *testing.T) *rsa.PublicKey { func mustGeneratePrivateKey(t *testing.T) *rsa.PrivateKey { t.Helper() + privateKeyMu.Lock() + defer privateKeyMu.Unlock() + if privateKey != nil { return privateKey } @@ -181,7 +210,7 @@ func TestFlattenExtraClaims(t *testing.T) { {ExtraClaims: map[string]any{"foo": []any{"baz"}}}, }, expected: map[string]any{ - "foo": []any{"bar", "baz"}, // since first was scalar, second being a slice forces slice output + "foo": []any{"bar", "baz"}, // converts to slice when any rule provides a slice }, }, { @@ -462,6 +491,7 @@ func TestServeToken(t *testing.T) { omitCode bool redirectURI string remoteAddr string + strictMode bool expectError bool expected map[string]any }{ @@ -469,12 +499,14 @@ func TestServeToken(t *testing.T) { name: "GET not allowed", method: "GET", grantType: "authorization_code", + strictMode: false, expectError: true, }, { name: "unsupported grant type", method: "POST", grantType: "pkcs", + strictMode: false, expectError: true, }, { @@ -482,6 +514,7 @@ func TestServeToken(t *testing.T) { method: "POST", grantType: "authorization_code", code: "invalid-code", + strictMode: false, expectError: true, }, { @@ -489,6 +522,7 @@ func TestServeToken(t *testing.T) { method: "POST", grantType: "authorization_code", omitCode: true, + strictMode: false, expectError: true, }, { @@ -498,6 +532,7 @@ func TestServeToken(t *testing.T) { code: "valid-code", redirectURI: "https://invalid.example.com/callback", remoteAddr: "127.0.0.1:12345", + strictMode: false, expectError: true, }, { @@ -507,15 +542,17 @@ func TestServeToken(t *testing.T) { redirectURI: "https://rp.example.com/callback", code: "valid-code", remoteAddr: "192.168.0.1:12345", + strictMode: false, expectError: true, }, { - name: "extra claim included", + name: "extra claim included (non-strict)", method: "POST", grantType: "authorization_code", redirectURI: "https://rp.example.com/callback", code: "valid-code", remoteAddr: "127.0.0.1:12345", + strictMode: false, caps: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ @@ -531,11 +568,12 @@ func TestServeToken(t *testing.T) { }, }, { - name: "attempt to overwrite protected claim", + name: "attempt to overwrite protected claim (non-strict)", method: "POST", grantType: "authorization_code", redirectURI: "https://rp.example.com/callback", code: "valid-code", + strictMode: false, caps: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ @@ -554,6 +592,9 @@ func TestServeToken(t *testing.T) { t.Run(tt.name, func(t *testing.T) { now := time.Now() + // Use setupTestServer helper + s := setupTestServer(t, tt.strictMode) + // Fake user/node profile := &tailcfg.UserProfile{ LoginName: "alice@example.com", @@ -575,20 +616,27 @@ func TestServeToken(t *testing.T) { CapMap: tt.caps, } - s := &idpServer{ - code: map[string]*authRequest{ - "valid-code": { - clientID: "client-id", - nonce: "nonce123", - redirectURI: "https://rp.example.com/callback", - validTill: now.Add(5 * time.Minute), - remoteUser: remoteUser, - localRP: true, - }, - }, + // Setup auth request with appropriate configuration for strict mode + var funnelClientPtr *funnelClient + if tt.strictMode { + funnelClientPtr = &funnelClient{ + ID: "client-id", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + s.funnelClients["client-id"] = funnelClientPtr + } + + s.code["valid-code"] = &authRequest{ + clientID: "client-id", + nonce: "nonce123", + redirectURI: "https://rp.example.com/callback", + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: !tt.strictMode, + funnelRP: funnelClientPtr, } - // Inject a working signer - s.lazySigner.Set(oidcTestingSigner(t)) form := url.Values{} form.Set("grant_type", tt.grantType) @@ -596,6 +644,11 @@ func TestServeToken(t *testing.T) { if !tt.omitCode { form.Set("code", tt.code) } + // Add client credentials for strict mode + if tt.strictMode { + form.Set("client_id", "client-id") + form.Set("client_secret", "test-secret") + } req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) req.RemoteAddr = tt.remoteAddr @@ -779,6 +832,7 @@ func TestExtraUserInfo(t *testing.T) { // Insert a valid token into the idpServer s := &idpServer{ + allowInsecureRegistration: true, // Default to allowing insecure registration for backward compatibility accessToken: map[string]*authRequest{ token: { validTill: tt.tokenValidTill, @@ -854,7 +908,7 @@ func TestFunnelClientsPersistence(t *testing.T) { t.Fatalf("failed to write test file: %v", err) } - t.Run("step1_load_from_existing_file", func(t *testing.T) { + t.Run("load_from_existing_file", func(t *testing.T) { srv := &idpServer{} // Simulate the funnel clients loading logic from main() @@ -887,7 +941,7 @@ func TestFunnelClientsPersistence(t *testing.T) { } }) - t.Run("step2_initialize_empty_when_no_file", func(t *testing.T) { + t.Run("initialize_empty_when_no_file", func(t *testing.T) { nonExistentFile := t.TempDir() + "/non-existent.json" srv := &idpServer{} @@ -913,7 +967,7 @@ func TestFunnelClientsPersistence(t *testing.T) { } }) - t.Run("step3_persist_and_reload_clients", func(t *testing.T) { + t.Run("persist_and_reload_clients", func(t *testing.T) { tmpFile2 := t.TempDir() + "/test-persistence.json" // Create initial server with one client @@ -962,4 +1016,1048 @@ func TestFunnelClientsPersistence(t *testing.T) { } } }) + + t.Run("strict_mode_file_handling", func(t *testing.T) { + tmpDir := t.TempDir() + + // Test strict mode uses oauth-clients.json + srv1 := setupTestServer(t, true) + srv1.rootPath = tmpDir + srv1.funnelClients["oauth-client"] = &funnelClient{ + ID: "oauth-client", + Secret: "oauth-secret", + Name: "OAuth Client", + RedirectURI: "https://oauth.example.com/callback", + } + + // Test storeFunnelClientsLocked in strict mode + srv1.mu.Lock() + err := srv1.storeFunnelClientsLocked() + srv1.mu.Unlock() + + if err != nil { + t.Fatalf("failed to store clients in strict mode: %v", err) + } + + // Verify oauth-clients.json was created + oauthPath := tmpDir + "/" + oauthClientsFile + if _, err := os.Stat(oauthPath); err != nil { + t.Errorf("expected oauth-clients.json to be created: %v", err) + } + + // Verify oidc-funnel-clients.json was NOT created + funnelPath := tmpDir + "/" + funnelClientsFile + if _, err := os.Stat(funnelPath); !os.IsNotExist(err) { + t.Error("expected oidc-funnel-clients.json NOT to be created in strict mode") + } + }) + + t.Run("non_strict_mode_file_handling", func(t *testing.T) { + tmpDir := t.TempDir() + + // Test non-strict mode uses oidc-funnel-clients.json + srv1 := setupTestServer(t, false) + srv1.rootPath = tmpDir + srv1.funnelClients["funnel-client"] = &funnelClient{ + ID: "funnel-client", + Secret: "funnel-secret", + Name: "Funnel Client", + RedirectURI: "https://funnel.example.com/callback", + } + + // Test storeFunnelClientsLocked in non-strict mode + srv1.mu.Lock() + err := srv1.storeFunnelClientsLocked() + srv1.mu.Unlock() + + if err != nil { + t.Fatalf("failed to store clients in non-strict mode: %v", err) + } + + // Verify oidc-funnel-clients.json was created + funnelPath := tmpDir + "/" + funnelClientsFile + if _, err := os.Stat(funnelPath); err != nil { + t.Errorf("expected oidc-funnel-clients.json to be created: %v", err) + } + + // Verify oauth-clients.json was NOT created + oauthPath := tmpDir + "/" + oauthClientsFile + if _, err := os.Stat(oauthPath); !os.IsNotExist(err) { + t.Error("expected oauth-clients.json NOT to be created in non-strict mode") + } + }) +} + +// Test helper functions for strict OAuth mode testing +func setupTestServer(t *testing.T, strictMode bool) *idpServer { + return setupTestServerWithClient(t, strictMode, nil) +} + +// setupTestServerWithClient creates a test server with an optional LocalClient. +// If lc is nil, the server will have no LocalClient (original behavior). +// If lc is provided, it will be used for WhoIs calls during testing. +func setupTestServerWithClient(t *testing.T, strictMode bool, lc *local.Client) *idpServer { + t.Helper() + + srv := &idpServer{ + allowInsecureRegistration: !strictMode, + code: make(map[string]*authRequest), + accessToken: make(map[string]*authRequest), + funnelClients: make(map[string]*funnelClient), + serverURL: "https://test.ts.net", + rootPath: t.TempDir(), + lc: lc, + } + + // Add a test client for funnel/strict mode testing + srv.funnelClients["test-client"] = &funnelClient{ + ID: "test-client", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + + // Inject a working signer for token tests + srv.lazySigner.Set(oidcTestingSigner(t)) + + return srv +} + +func TestGetAllowInsecureRegistration(t *testing.T) { + tests := []struct { + name string + flagSet bool + flagValue bool + expectAllowInsecureRegistration bool + }{ + { + name: "flag explicitly set to false - insecure registration disabled (strict mode)", + flagSet: true, + flagValue: false, + expectAllowInsecureRegistration: false, + }, + { + name: "flag explicitly set to true - insecure registration enabled", + flagSet: true, + flagValue: true, + expectAllowInsecureRegistration: true, + }, + { + name: "flag unset - insecure registration enabled (default for backward compatibility)", + flagSet: false, + flagValue: false, // not used when unset + expectAllowInsecureRegistration: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save original state + originalFlag := flagAllowInsecureRegistration + defer func() { + flagAllowInsecureRegistration = originalFlag + }() + + // Set up test state by creating a new BoolFlag and setting values + var b opt.Bool + flagAllowInsecureRegistration = opt.BoolFlag{Bool: &b} + if tt.flagSet { + flagAllowInsecureRegistration.Bool.Set(tt.flagValue) + } + // Note: when tt.flagSet is false, the Bool remains unset (which is what we want) + + got := getAllowInsecureRegistration() + if got != tt.expectAllowInsecureRegistration { + t.Errorf("getAllowInsecureRegistration() = %v, want %v", got, tt.expectAllowInsecureRegistration) + } + }) + } +} + +// TestMigrateOAuthClients verifies the migration from legacy funnel clients +// to OAuth clients. This migration is necessary when transitioning from +// non-strict to strict OAuth mode. The migration logic should: +// - Copy clients from oidc-funnel-clients.json to oauth-clients.json +// - Rename the old file to mark it as deprecated +// - Handle cases where files already exist or are missing +func TestMigrateOAuthClients(t *testing.T) { + tests := []struct { + name string + setupOldFile bool + setupNewFile bool + oldFileContent map[string]*funnelClient + newFileContent map[string]*funnelClient + expectError bool + expectNewFileExists bool + expectOldRenamed bool + }{ + { + name: "migrate from old file to new file", + setupOldFile: true, + oldFileContent: map[string]*funnelClient{ + "old-client": { + ID: "old-client", + Secret: "old-secret", + Name: "Old Client", + RedirectURI: "https://old.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: true, + }, + { + name: "new file already exists - no migration", + setupNewFile: true, + newFileContent: map[string]*funnelClient{ + "existing-client": { + ID: "existing-client", + Secret: "existing-secret", + Name: "Existing Client", + RedirectURI: "https://existing.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: false, + }, + { + name: "neither file exists - create empty new file", + expectNewFileExists: true, + expectOldRenamed: false, + }, + { + name: "both files exist - prefer new file", + setupOldFile: true, + setupNewFile: true, + oldFileContent: map[string]*funnelClient{ + "old-client": { + ID: "old-client", + Secret: "old-secret", + Name: "Old Client", + RedirectURI: "https://old.example.com/callback", + }, + }, + newFileContent: map[string]*funnelClient{ + "new-client": { + ID: "new-client", + Secret: "new-secret", + Name: "New Client", + RedirectURI: "https://new.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rootPath := t.TempDir() + + // Setup old file if needed + if tt.setupOldFile { + oldData, err := json.Marshal(tt.oldFileContent) + if err != nil { + t.Fatalf("failed to marshal old file content: %v", err) + } + oldPath := rootPath + "/" + funnelClientsFile + if err := os.WriteFile(oldPath, oldData, 0600); err != nil { + t.Fatalf("failed to create old file: %v", err) + } + } + + // Setup new file if needed + if tt.setupNewFile { + newData, err := json.Marshal(tt.newFileContent) + if err != nil { + t.Fatalf("failed to marshal new file content: %v", err) + } + newPath := rootPath + "/" + oauthClientsFile + if err := os.WriteFile(newPath, newData, 0600); err != nil { + t.Fatalf("failed to create new file: %v", err) + } + } + + // Call migrateOAuthClients + resultPath, err := migrateOAuthClients(rootPath) + + if tt.expectError && err == nil { + t.Fatalf("expected error but got none") + } + if !tt.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.expectError { + return + } + + // Verify result path points to oauth-clients.json + expectedPath := filepath.Join(rootPath, oauthClientsFile) + if resultPath != expectedPath { + t.Errorf("expected result path %s, got %s", expectedPath, resultPath) + } + + // Verify new file exists if expected + if tt.expectNewFileExists { + if _, err := os.Stat(resultPath); err != nil { + t.Errorf("expected new file to exist at %s: %v", resultPath, err) + } + + // Verify content + data, err := os.ReadFile(resultPath) + if err != nil { + t.Fatalf("failed to read new file: %v", err) + } + + var clients map[string]*funnelClient + if err := json.Unmarshal(data, &clients); err != nil { + t.Fatalf("failed to unmarshal new file: %v", err) + } + + // Determine expected content + var expectedContent map[string]*funnelClient + if tt.setupNewFile { + expectedContent = tt.newFileContent + } else if tt.setupOldFile { + expectedContent = tt.oldFileContent + } else { + expectedContent = make(map[string]*funnelClient) + } + + if len(clients) != len(expectedContent) { + t.Errorf("expected %d clients, got %d", len(expectedContent), len(clients)) + } + + for id, expectedClient := range expectedContent { + actualClient, ok := clients[id] + if !ok { + t.Errorf("expected client %s not found", id) + continue + } + if actualClient.ID != expectedClient.ID || + actualClient.Secret != expectedClient.Secret || + actualClient.Name != expectedClient.Name || + actualClient.RedirectURI != expectedClient.RedirectURI { + t.Errorf("client %s mismatch: got %+v, want %+v", id, actualClient, expectedClient) + } + } + } + + // Verify old file renamed if expected + if tt.expectOldRenamed { + deprecatedPath := rootPath + "/" + deprecatedFunnelClientsFile + if _, err := os.Stat(deprecatedPath); err != nil { + t.Errorf("expected old file to be renamed to %s: %v", deprecatedPath, err) + } + + // Verify original old file is gone + oldPath := rootPath + "/" + funnelClientsFile + if _, err := os.Stat(oldPath); !os.IsNotExist(err) { + t.Errorf("expected old file %s to be removed", oldPath) + } + } + }) + } +} + +// TestGetConfigFilePath verifies backward compatibility for config file location. +// The function must check current directory first (legacy deployments) before +// falling back to rootPath (new installations) to prevent breaking existing +// tsidp deployments that have config files in unexpected locations. +func TestGetConfigFilePath(t *testing.T) { + tests := []struct { + name string + fileName string + createInCwd bool + createInRoot bool + expectInCwd bool + expectError bool + }{ + { + name: "file exists in current directory - use current directory", + fileName: "test-config.json", + createInCwd: true, + expectInCwd: true, + }, + { + name: "file does not exist - use root path", + fileName: "test-config.json", + createInCwd: false, + expectInCwd: false, + }, + { + name: "file exists in both - prefer current directory", + fileName: "test-config.json", + createInCwd: true, + createInRoot: true, + expectInCwd: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create temporary directories + rootPath := t.TempDir() + originalWd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working directory: %v", err) + } + + // Create a temporary working directory + tmpWd := t.TempDir() + if err := os.Chdir(tmpWd); err != nil { + t.Fatalf("failed to change to temp directory: %v", err) + } + defer func() { + os.Chdir(originalWd) + }() + + // Setup files as needed + if tt.createInCwd { + if err := os.WriteFile(tt.fileName, []byte("{}"), 0600); err != nil { + t.Fatalf("failed to create file in cwd: %v", err) + } + } + if tt.createInRoot { + rootFilePath := filepath.Join(rootPath, tt.fileName) + if err := os.WriteFile(rootFilePath, []byte("{}"), 0600); err != nil { + t.Fatalf("failed to create file in root: %v", err) + } + } + + // Call getConfigFilePath + resultPath, err := getConfigFilePath(rootPath, tt.fileName) + + if tt.expectError && err == nil { + t.Fatalf("expected error but got none") + } + if !tt.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.expectError { + return + } + + // Verify result + if tt.expectInCwd { + if resultPath != tt.fileName { + t.Errorf("expected path %s, got %s", tt.fileName, resultPath) + } + } else { + expectedPath := filepath.Join(rootPath, tt.fileName) + if resultPath != expectedPath { + t.Errorf("expected path %s, got %s", expectedPath, resultPath) + } + } + }) + } +} + +// TestAuthorizeStrictMode verifies OAuth authorization endpoint security and validation logic. +// Tests both the security boundary (funnel rejection) and the business logic (strict mode validation). +func TestAuthorizeStrictMode(t *testing.T) { + tests := []struct { + name string + strictMode bool + clientID string + redirectURI string + state string + nonce string + setupClient bool + clientRedirect string + useFunnel bool // whether to simulate funnel request + mockWhoIsError bool // whether to make WhoIs return an error + expectError bool + expectCode int + expectRedirect bool + }{ + // Security boundary test: funnel rejection + { + name: "funnel requests are always rejected for security", + strictMode: true, + clientID: "test-client", + redirectURI: "https://rp.example.com/callback", + state: "random-state", + nonce: "random-nonce", + setupClient: true, + clientRedirect: "https://rp.example.com/callback", + useFunnel: true, + expectError: true, + expectCode: http.StatusUnauthorized, + }, + + // Strict mode parameter validation tests (non-funnel) + { + name: "strict mode - missing client_id", + strictMode: true, + clientID: "", + redirectURI: "https://rp.example.com/callback", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - missing redirect_uri", + strictMode: true, + clientID: "test-client", + redirectURI: "", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + + // Strict mode client validation tests (non-funnel) + { + name: "strict mode - invalid client_id", + strictMode: true, + clientID: "invalid-client", + redirectURI: "https://rp.example.com/callback", + setupClient: false, + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - redirect_uri mismatch", + strictMode: true, + clientID: "test-client", + redirectURI: "https://wrong.example.com/callback", + setupClient: true, + clientRedirect: "https://rp.example.com/callback", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // For non-funnel tests, we'll test the parameter validation logic + // without needing to mock WhoIs, since the validation happens before WhoIs calls + + // Setup client if needed + if tt.setupClient { + srv.funnelClients["test-client"] = &funnelClient{ + ID: "test-client", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: tt.clientRedirect, + } + } else if !tt.strictMode { + // For non-strict mode tests that don't need a specific client setup + // but might reference one, clear the default client + delete(srv.funnelClients, "test-client") + } + + // Create request + reqURL := "/authorize" + if !tt.strictMode { + // In non-strict mode, use the node-specific endpoint + reqURL = "/authorize/123" + } + + query := url.Values{} + if tt.clientID != "" { + query.Set("client_id", tt.clientID) + } + if tt.redirectURI != "" { + query.Set("redirect_uri", tt.redirectURI) + } + if tt.state != "" { + query.Set("state", tt.state) + } + if tt.nonce != "" { + query.Set("nonce", tt.nonce) + } + + reqURL += "?" + query.Encode() + req := httptest.NewRequest("GET", reqURL, nil) + req.RemoteAddr = "127.0.0.1:12345" + + // Set funnel header only when explicitly testing funnel behavior + if tt.useFunnel { + req.Header.Set("Tailscale-Funnel-Request", "true") + } + + rr := httptest.NewRecorder() + srv.authorize(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectRedirect { + if rr.Code != http.StatusFound { + t.Errorf("expected redirect (302), got %d: %s", rr.Code, rr.Body.String()) + } + + location := rr.Header().Get("Location") + if location == "" { + t.Error("expected Location header in redirect response") + } else { + // Parse the redirect URL to verify it contains a code + redirectURL, err := url.Parse(location) + if err != nil { + t.Errorf("failed to parse redirect URL: %v", err) + } else { + code := redirectURL.Query().Get("code") + if code == "" { + t.Error("expected 'code' parameter in redirect URL") + } + + // Verify state is preserved if provided + if tt.state != "" { + returnedState := redirectURL.Query().Get("state") + if returnedState != tt.state { + t.Errorf("expected state '%s', got '%s'", tt.state, returnedState) + } + } + + // Verify the auth request was stored + srv.mu.Lock() + ar, ok := srv.code[code] + srv.mu.Unlock() + + if !ok { + t.Error("expected authorization request to be stored") + } else { + if ar.clientID != tt.clientID { + t.Errorf("expected clientID '%s', got '%s'", tt.clientID, ar.clientID) + } + if ar.redirectURI != tt.redirectURI { + t.Errorf("expected redirectURI '%s', got '%s'", tt.redirectURI, ar.redirectURI) + } + if ar.nonce != tt.nonce { + t.Errorf("expected nonce '%s', got '%s'", tt.nonce, ar.nonce) + } + } + } + } + } else { + t.Errorf("unexpected test case: not expecting error or redirect") + } + }) + } +} + +// TestServeTokenWithClientValidation verifies OAuth token endpoint security in both strict and non-strict modes. +// In strict mode, the token endpoint must: +// - Require and validate client credentials (client_id + client_secret) +// - Only accept tokens from registered funnel clients +// - Validate that redirect_uri matches the registered client +// - Support both form-based and HTTP Basic authentication for client credentials +func TestServeTokenWithClientValidation(t *testing.T) { + tests := []struct { + name string + strictMode bool + method string + grantType string + code string + clientID string + clientSecret string + redirectURI string + useBasicAuth bool + setupAuthRequest bool + authRequestClient string + authRequestRedirect string + expectError bool + expectCode int + expectIDToken bool + }{ + { + name: "strict mode - valid token exchange with form credentials", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "test-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + { + name: "strict mode - valid token exchange with basic auth", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + useBasicAuth: true, + clientID: "test-client", + clientSecret: "test-secret", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + { + name: "strict mode - missing client credentials", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - client_id mismatch", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "wrong-client", + clientSecret: "test-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - invalid client secret", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "wrong-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - redirect_uri mismatch", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "test-secret", + redirectURI: "https://wrong.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "non-strict mode - no client validation required", + strictMode: false, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // Setup authorization request if needed + if tt.setupAuthRequest { + now := time.Now() + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tailcfg.PeerCapMap{}, + } + + var funnelClientPtr *funnelClient + if tt.strictMode && tt.authRequestClient != "" { + funnelClientPtr = &funnelClient{ + ID: tt.authRequestClient, + Secret: "test-secret", + Name: "Test Client", + RedirectURI: tt.authRequestRedirect, + } + srv.funnelClients[tt.authRequestClient] = funnelClientPtr + } + + srv.code["valid-code"] = &authRequest{ + clientID: tt.authRequestClient, + nonce: "nonce123", + redirectURI: tt.authRequestRedirect, + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: !tt.strictMode, + funnelRP: funnelClientPtr, + } + } + + // Create form data + form := url.Values{} + form.Set("grant_type", tt.grantType) + form.Set("code", tt.code) + form.Set("redirect_uri", tt.redirectURI) + + if !tt.useBasicAuth { + if tt.clientID != "" { + form.Set("client_id", tt.clientID) + } + if tt.clientSecret != "" { + form.Set("client_secret", tt.clientSecret) + } + } + + req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.RemoteAddr = "127.0.0.1:12345" + + if tt.useBasicAuth && tt.clientID != "" && tt.clientSecret != "" { + req.SetBasicAuth(tt.clientID, tt.clientSecret) + } + + rr := httptest.NewRecorder() + srv.serveToken(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectIDToken { + if rr.Code != http.StatusOK { + t.Errorf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp struct { + IDToken string `json:"id_token"` + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + } + + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to unmarshal response: %v", err) + } + + if resp.IDToken == "" { + t.Error("expected id_token in response") + } + if resp.AccessToken == "" { + t.Error("expected access_token in response") + } + if resp.TokenType != "Bearer" { + t.Errorf("expected token_type 'Bearer', got '%s'", resp.TokenType) + } + if resp.ExpiresIn != 300 { + t.Errorf("expected expires_in 300, got %d", resp.ExpiresIn) + } + + // Verify access token was stored + srv.mu.Lock() + _, ok := srv.accessToken[resp.AccessToken] + srv.mu.Unlock() + + if !ok { + t.Error("expected access token to be stored") + } + + // Verify authorization code was consumed + srv.mu.Lock() + _, ok = srv.code[tt.code] + srv.mu.Unlock() + + if ok { + t.Error("expected authorization code to be consumed") + } + } + }) + } +} + +// TestServeUserInfoWithClientValidation verifies UserInfo endpoint security in both strict and non-strict modes. +// In strict mode, the UserInfo endpoint must: +// - Validate that access tokens are associated with registered clients +// - Reject tokens for clients that have been deleted/unregistered +// - Enforce token expiration properly +// - Return appropriate user claims based on client capabilities +func TestServeUserInfoWithClientValidation(t *testing.T) { + tests := []struct { + name string + strictMode bool + setupToken bool + setupClient bool + clientID string + token string + tokenValidTill time.Time + expectError bool + expectCode int + expectUserInfo bool + }{ + { + name: "strict mode - valid token with existing client", + strictMode: true, + setupToken: true, + setupClient: true, + clientID: "test-client", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectUserInfo: true, + }, + { + name: "strict mode - valid token but client no longer exists", + strictMode: true, + setupToken: true, + setupClient: false, + clientID: "deleted-client", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - expired token", + strictMode: true, + setupToken: true, + setupClient: true, + clientID: "test-client", + token: "expired-token", + tokenValidTill: time.Now().Add(-5 * time.Minute), + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - invalid token", + strictMode: true, + setupToken: false, + token: "invalid-token", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - token without client association", + strictMode: true, + setupToken: true, + setupClient: false, + clientID: "", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "non-strict mode - no client validation required", + strictMode: false, + setupToken: true, + setupClient: false, + clientID: "", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectUserInfo: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // Setup client if needed + if tt.setupClient { + srv.funnelClients[tt.clientID] = &funnelClient{ + ID: tt.clientID, + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + } + + // Setup token if needed + if tt.setupToken { + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tailcfg.PeerCapMap{}, + } + + srv.accessToken[tt.token] = &authRequest{ + clientID: tt.clientID, + validTill: tt.tokenValidTill, + remoteUser: remoteUser, + } + } + + // Create request + req := httptest.NewRequest("GET", "/userinfo", nil) + req.Header.Set("Authorization", "Bearer "+tt.token) + req.RemoteAddr = "127.0.0.1:12345" + + rr := httptest.NewRecorder() + srv.serveUserInfo(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectUserInfo { + if rr.Code != http.StatusOK { + t.Errorf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp map[string]any + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON response: %v", err) + } + + // Check required fields + expectedFields := []string{"sub", "name", "email", "picture", "username"} + for _, field := range expectedFields { + if _, ok := resp[field]; !ok { + t.Errorf("expected field '%s' in user info response", field) + } + } + + // Verify specific values + if resp["name"] != "Alice Example" { + t.Errorf("expected name 'Alice Example', got '%v'", resp["name"]) + } + if resp["email"] != "alice@example.com" { + t.Errorf("expected email 'alice@example.com', got '%v'", resp["email"]) + } + if resp["username"] != "alice" { + t.Errorf("expected username 'alice', got '%v'", resp["username"]) + } + } + }) + } } From 3b68d607be1e3069e9ddbd99d85966e4f059c237 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 28 Aug 2025 21:29:11 -0700 Subject: [PATCH 1245/1708] wgengine/magicsock: drop DERP queue from head rather than tail If the DERP queue is full, drop the oldest item first, rather than the youngest, on the assumption that older data is more likely to be unanswerable. Updates tailscale/corp#31762 Signed-off-by: James Tucker --- wgengine/magicsock/derp.go | 4 ++-- wgengine/magicsock/magicsock.go | 32 +++++++++++++++++++++----------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 9c60e4893..b5fc36bb8 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -91,7 +91,7 @@ func (c *Conn) fallbackDERPRegionForPeer(peer key.NodePublic) (regionID int) { type activeDerp struct { c *derphttp.Client cancel context.CancelFunc - writeCh chan<- derpWriteRequest + writeCh chan derpWriteRequest // lastWrite is the time of the last request for its write // channel (currently even if there was no write). // It is always non-nil and initialized to a non-zero Time. @@ -302,7 +302,7 @@ const derpWriteQueueDepth = 32 // // It returns nil if the network is down, the Conn is closed, or the regionID is // not known. -func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- derpWriteRequest { +func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan derpWriteRequest { if c.networkDown() { return nil } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a7f84e352..a11e8a1cd 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1642,18 +1642,27 @@ func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, is // internal locks. pkt := bytes.Clone(b) - select { - case <-c.donec: - metricSendDERPErrorClosed.Add(1) - return false, errConnClosed - case ch <- derpWriteRequest{addr, pubKey, pkt, isDisco}: - metricSendDERPQueued.Add(1) - return true, nil - default: - metricSendDERPErrorQueue.Add(1) - // Too many writes queued. Drop packet. - return false, errDropDerpPacket + wr := derpWriteRequest{addr, pubKey, pkt, isDisco} + for range 3 { + select { + case <-c.donec: + metricSendDERPErrorClosed.Add(1) + return false, errConnClosed + case ch <- wr: + metricSendDERPQueued.Add(1) + return true, nil + default: + select { + case <-ch: + metricSendDERPDropped.Add(1) + default: + } + } } + // gave up after 3 write attempts + metricSendDERPErrorQueue.Add(1) + // Too many writes queued. Drop packet. + return false, errDropDerpPacket } type receiveBatch struct { @@ -3937,6 +3946,7 @@ var ( metricSendDERPErrorChan = clientmetric.NewCounter("magicsock_send_derp_error_chan") metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") + metricSendDERPDropped = clientmetric.NewCounter("magicsock_send_derp_dropped") metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") From 442f4758a9f4df88138eee2d24d282f0bb1f5c06 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Sun, 31 Aug 2025 12:30:17 -0400 Subject: [PATCH 1246/1708] .github/workflows: reviewing depaware.txt is unnecessary (#16989) @tailscale/dataplane almost never needs to review depaware.txt, when it is the only change to the DERP implementation. Related #16372 Updates #cleanup Signed-off-by: Simon Law --- .github/workflows/request-dataplane-review.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 836fef6fb..d5ef78d17 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -8,6 +8,8 @@ on: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" - "**/derp*/**" + paths-ignore: + - "**/depaware.txt" jobs: request-dataplane-review: From 6d45fcfc931d78d9796a885544c0053a3d5f033e Mon Sep 17 00:00:00 2001 From: Simon Law Date: Sun, 31 Aug 2025 13:29:25 -0400 Subject: [PATCH 1247/1708] .github/workflows: reviewing depaware.txt is unnecessary (#16990) Apparently, #16989 introduced a bug in request-dataplane-review.yml: > you may only define one of `paths` and `paths-ignore` for a single event Related #16372 Updates #cleanup Signed-off-by: Simon Law --- .github/workflows/request-dataplane-review.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index d5ef78d17..4a86b0541 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -8,8 +8,7 @@ on: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" - "**/derp*/**" - paths-ignore: - - "**/depaware.txt" + - "!**/depaware.txt" jobs: request-dataplane-review: From cc532efc20004522b99e2d1c1029734205caec7f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 30 Aug 2025 08:02:35 -0700 Subject: [PATCH 1248/1708] util/syspolicy/*: move syspolicy keys to new const leaf "pkey" package This is step 1 of ~3, breaking up #14720 into reviewable chunks, with the aim to make syspolicy be a build-time configurable feature. In this first (very noisy) step, all the syspolicy string key constants move to a new constant-only (code-free) package. This will make future steps more reviewable, without this movement noise. There are no code or behavior changes here. The future steps of this series can be seen in #14720: removing global funcs from syspolicy resolution and using an interface that's plumbed around instead. Then adding build tags. Updates #12614 Change-Id: If73bf2c28b9c9b1a408fe868b0b6a25b03eeabd1 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/tailscaled.go | 3 +- cmd/tailscaled/tailscaled_windows.go | 6 +- cmd/tsidp/depaware.txt | 1 + control/controlclient/direct.go | 3 +- control/controlclient/sign_supported.go | 3 +- ipn/desktop/extension.go | 3 +- ipn/ipnauth/policy.go | 5 +- ipn/ipnlocal/c2n.go | 3 +- ipn/ipnlocal/local.go | 53 ++-- ipn/ipnlocal/local_test.go | 117 ++++---- ipn/prefs.go | 3 +- logpolicy/logpolicy.go | 3 +- net/dns/manager_windows.go | 5 +- posture/serialnumber_syspolicy.go | 3 +- tsnet/depaware.txt | 1 + .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + util/syspolicy/handler.go | 9 +- util/syspolicy/internal/metrics/metrics.go | 7 +- .../internal/metrics/metrics_test.go | 3 +- util/syspolicy/pkey/pkey.go | 177 ++++++++++++ util/syspolicy/policy_keys.go | 253 +++--------------- util/syspolicy/policy_keys_test.go | 7 +- util/syspolicy/rsop/change_callbacks.go | 7 +- util/syspolicy/rsop/resultant_policy_test.go | 59 ++-- util/syspolicy/setting/key.go | 13 - util/syspolicy/setting/raw_item.go | 3 +- util/syspolicy/setting/setting.go | 13 +- util/syspolicy/setting/setting_test.go | 7 +- util/syspolicy/setting/snapshot.go | 25 +- util/syspolicy/setting/snapshot_test.go | 145 +++++----- util/syspolicy/source/env_policy_store.go | 15 +- .../syspolicy/source/env_policy_store_test.go | 5 +- util/syspolicy/source/policy_reader.go | 5 +- util/syspolicy/source/policy_reader_test.go | 9 +- util/syspolicy/source/policy_source.go | 9 +- util/syspolicy/source/policy_store_windows.go | 29 +- .../source/policy_store_windows_test.go | 7 +- util/syspolicy/source/test_store.go | 29 +- util/syspolicy/syspolicy.go | 21 +- util/syspolicy/syspolicy_test.go | 77 +++--- 48 files changed, 601 insertions(+), 554 deletions(-) create mode 100644 util/syspolicy/pkey/pkey.go delete mode 100644 util/syspolicy/setting/key.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 6b149e5f5..ccea25a8a 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -174,6 +174,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 4b1e4a1e4..a0214575b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -955,6 +955,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 02ffec0ea..7f09be33f 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -195,6 +195,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c2d9f3d00..46efa5b21 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -432,6 +432,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 06d366aa6..f55535470 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -65,6 +65,7 @@ import ( "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -1011,6 +1012,6 @@ func defaultEncryptState() bool { // (plan9/FreeBSD/etc). return false } - v, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + v, _ := syspolicy.GetBoolean(pkey.EncryptState, false) return v } diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 1b5068892..2d4e71d3c 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -56,6 +56,7 @@ import ( "tailscale.com/types/logid" "tailscale.com/util/osdiag" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" "tailscale.com/version" @@ -155,7 +156,7 @@ func runWindowsService(pol *logpolicy.Policy) error { if syslog, err := eventlog.Open(serviceName); err == nil { syslogf = func(format string, args ...any) { - if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions { + if logSCMInteractions, _ := syspolicy.GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { syslog.Info(0, fmt.Sprintf(format, args...)) } } @@ -389,8 +390,7 @@ func handleSessionChange(chgRequest svc.ChangeRequest) { if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK { return } - - if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { + if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") go func() { err := dns.Flush() diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index e8bc2b254..f1e22efbf 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -384,6 +384,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 78a86e935..cee938779 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -54,6 +54,7 @@ import ( "tailscale.com/util/multierr" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" @@ -616,7 +617,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new return regen, opt.URL, nil, err } - tailnet, err := syspolicy.GetString(syspolicy.Tailnet, "") + tailnet, err := syspolicy.GetString(pkey.Tailnet, "") if err != nil { c.logf("unable to provide Tailnet field in register request. err: %v", err) } diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index a5d42ad7d..fab7cd16b 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -19,6 +19,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) // getMachineCertificateSubject returns the exact name of a Subject that needs @@ -31,7 +32,7 @@ import ( // // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" func getMachineCertificateSubject() string { - machineCertSubject, _ := syspolicy.GetString(syspolicy.MachineCertificateSubject, "") + machineCertSubject, _ := syspolicy.GetString(pkey.MachineCertificateSubject, "") return machineCertSubject } diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index f204a90de..15d239f89 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -19,6 +19,7 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/types/logger" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) // featureName is the name of the feature implemented by this package. @@ -135,7 +136,7 @@ func (e *desktopSessionsExt) getBackgroundProfile(profiles ipnext.ProfileStore) e.mu.Lock() defer e.mu.Unlock() - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { // If the Always-On mode is disabled, there's no background profile // as far as the desktop session extension is concerned. return ipn.LoginProfileView{} diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index aa4ec4100..36004b293 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -11,6 +11,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) type actorWithPolicyChecks struct{ Actor } @@ -50,10 +51,10 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } - if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason { + if allowWithReason, _ := syspolicy.GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { return errors.New("disconnect not allowed: always-on mode is enabled") } if reason == "" { diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 4b91c3cb9..8c3bf7b26 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -30,6 +30,7 @@ import ( "tailscale.com/util/goroutines" "tailscale.com/util/set" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -342,7 +343,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // this will first check syspolicy, MDM settings like Registry // on Windows or defaults on macOS. If they are not set, it falls // back to the cli-flag, `--posture-checking`. - choice, err := syspolicy.GetPreferenceOption(syspolicy.PostureChecking) + choice, err := syspolicy.GetPreferenceOption(pkey.PostureChecking) if err != nil { b.logf( "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 43d7e1216..bcfb99b09 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -108,6 +108,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/systemd" "tailscale.com/util/testenv" @@ -1762,51 +1763,51 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } type preferencePolicyInfo struct { - key syspolicy.Key + key pkey.Key get func(ipn.PrefsView) bool set func(*ipn.Prefs, bool) } var preferencePolicies = []preferencePolicyInfo{ { - key: syspolicy.EnableIncomingConnections, + key: pkey.EnableIncomingConnections, // Allow Incoming (used by the UI) is the negation of ShieldsUp (used by the // backend), so this has to convert between the two conventions. get: func(p ipn.PrefsView) bool { return !p.ShieldsUp() }, set: func(p *ipn.Prefs, v bool) { p.ShieldsUp = !v }, }, { - key: syspolicy.EnableServerMode, + key: pkey.EnableServerMode, get: func(p ipn.PrefsView) bool { return p.ForceDaemon() }, set: func(p *ipn.Prefs, v bool) { p.ForceDaemon = v }, }, { - key: syspolicy.ExitNodeAllowLANAccess, + key: pkey.ExitNodeAllowLANAccess, get: func(p ipn.PrefsView) bool { return p.ExitNodeAllowLANAccess() }, set: func(p *ipn.Prefs, v bool) { p.ExitNodeAllowLANAccess = v }, }, { - key: syspolicy.EnableTailscaleDNS, + key: pkey.EnableTailscaleDNS, get: func(p ipn.PrefsView) bool { return p.CorpDNS() }, set: func(p *ipn.Prefs, v bool) { p.CorpDNS = v }, }, { - key: syspolicy.EnableTailscaleSubnets, + key: pkey.EnableTailscaleSubnets, get: func(p ipn.PrefsView) bool { return p.RouteAll() }, set: func(p *ipn.Prefs, v bool) { p.RouteAll = v }, }, { - key: syspolicy.CheckUpdates, + key: pkey.CheckUpdates, get: func(p ipn.PrefsView) bool { return p.AutoUpdate().Check }, set: func(p *ipn.Prefs, v bool) { p.AutoUpdate.Check = v }, }, { - key: syspolicy.ApplyUpdates, + key: pkey.ApplyUpdates, get: func(p ipn.PrefsView) bool { v, _ := p.AutoUpdate().Apply.Get(); return v }, set: func(p *ipn.Prefs, v bool) { p.AutoUpdate.Apply.Set(v) }, }, { - key: syspolicy.EnableRunExitNode, + key: pkey.EnableRunExitNode, get: func(p ipn.PrefsView) bool { return p.AdvertisesExitNode() }, set: func(p *ipn.Prefs, v bool) { p.SetAdvertiseExitNode(v) }, }, @@ -1817,13 +1818,13 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { + if controlURL, err := syspolicy.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true } const sentinel = "HostnameDefaultValue" - hostnameFromPolicy, _ := syspolicy.GetString(syspolicy.Hostname, sentinel) + hostnameFromPolicy, _ := syspolicy.GetString(pkey.Hostname, sentinel) switch hostnameFromPolicy { case sentinel: // An empty string for this policy value means that the admin wants to delete @@ -1858,7 +1859,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { anyChange = true } - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { + if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { prefs.WantRunning = true anyChange = true } @@ -1882,7 +1883,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { + if exitNodeIDStr, _ := syspolicy.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], @@ -1923,7 +1924,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange prefs.ExitNodeIP = netip.Addr{} anyChange = true } - } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { + } else if exitNodeIPStr, _ := syspolicy.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { if prefs.AutoExitNode != "" { prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP anyChange = true @@ -1970,7 +1971,7 @@ func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChangedAnyOf(syspolicy.AlwaysOn, syspolicy.AlwaysOnOverrideWithReason) { + if policy.HasChangedAnyOf(pkey.AlwaysOn, pkey.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might // no longer be valid. @@ -1979,7 +1980,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } - if policy.HasChangedAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP, syspolicy.AllowExitNodeOverride) { + if policy.HasChangedAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP, pkey.AllowExitNodeOverride) { // Reset the exit node override if a policy that enforces exit node usage // or allows the user to override automatic exit node selection has changed. b.mu.Lock() @@ -1987,7 +1988,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } - if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { + if policy.HasChanged(pkey.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { @@ -2348,7 +2349,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { - sysak, _ := syspolicy.GetString(syspolicy.AuthKey, "") + sysak, _ := syspolicy.GetString(pkey.AuthKey, "") if sysak != "" { b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) @@ -4407,7 +4408,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // Prevent users from changing exit node preferences // when exit node usage is managed by policy. if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { - isManaged, err := syspolicy.HasAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP) + isManaged, err := syspolicy.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) if err != nil { err = fmt.Errorf("policy check failed: %w", err) } else if isManaged { @@ -4415,7 +4416,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // if permitted by [syspolicy.AllowExitNodeOverride]. // // Disabling exit node usage entirely is not allowed. - allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false) + allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false) if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { err = errManagedByPolicy } @@ -4519,7 +4520,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o // mode on them until the policy changes, they switch to a different profile, etc. b.overrideAlwaysOn = true - if reconnectAfter, _ := syspolicy.GetDuration(syspolicy.ReconnectAfter, 0); reconnectAfter > 0 { + if reconnectAfter, _ := syspolicy.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { b.startReconnectTimerLocked(reconnectAfter) } } @@ -4530,7 +4531,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o b.overrideExitNodePolicy = false } if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { - if allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false); allowExitNodeOverride { + if allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { // If applying exit node policy settings to the new prefs results in no change, // the user is not overriding the policy. Otherwise, it is an override. b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) @@ -7807,9 +7808,9 @@ type selectRegionFunc func(views.Slice[int]) int type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { - nodes, err := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil) + nodes, err := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) if err != nil { - log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", syspolicy.AllowedSuggestedExitNodes, err) + log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", pkey.AllowedSuggestedExitNodes, err) return nil } if nodes == nil { @@ -8176,7 +8177,7 @@ func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { if exitNodeID == "" { return false // an exit node is required } - if nodes, _ := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil); nodes != nil { + if nodes, _ := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) } @@ -8339,7 +8340,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { // the Keychain. A future release will clean up the on-disk state // files. // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + sp, _ := syspolicy.GetBoolean(pkey.EncryptState, false) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 60b5b2c5b..2b83e47f8 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -62,6 +62,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/set" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" @@ -1182,16 +1183,16 @@ func TestConfigureExitNode(t *testing.T) { // Configure policy settings, if any. store := source.NewTestStore(t) if tt.exitNodeIDPolicy != nil { - store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) + store.SetStrings(source.TestSettingOf(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy))) } if tt.exitNodeIPPolicy != nil { - store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) + store.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String())) } if tt.exitNodeAllowedIDs != nil { - store.SetStringLists(source.TestSettingOf(syspolicy.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) + store.SetStringLists(source.TestSettingOf(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) } if tt.exitNodeAllowOverride { - store.SetBooleans(source.TestSettingOf(syspolicy.AllowExitNodeOverride, true)) + store.SetBooleans(source.TestSettingOf(pkey.AllowExitNodeOverride, true)) } if store.IsEmpty() { // No syspolicy settings, so don't register a store. @@ -2890,10 +2891,10 @@ func TestSetExitNodeIDPolicy(t *testing.T) { policyStore := source.NewTestStore(t) if test.exitNodeIDKey { - policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID)) + policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeID, test.exitNodeID)) } if test.exitNodeIPKey { - policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP)) + policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, test.exitNodeIP)) } syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) @@ -3029,7 +3030,7 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { syspolicy.RegisterWellKnownSettingsForTest(t) policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", + pkey.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) @@ -3114,7 +3115,7 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { b.cc = cc syspolicy.RegisterWellKnownSettingsForTest(t) policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", + pkey.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes()) @@ -3223,7 +3224,7 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { b := newTestLocalBackend(t) syspolicy.RegisterWellKnownSettingsForTest(t) policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", + pkey.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) b.currentNode().SetNetMap(nm) @@ -3255,7 +3256,7 @@ func TestApplySysPolicy(t *testing.T) { prefs ipn.Prefs wantPrefs ipn.Prefs wantAnyChange bool - stringPolicies map[syspolicy.Key]string + stringPolicies map[pkey.Key]string }{ { name: "empty prefs without policies", @@ -3290,13 +3291,13 @@ func TestApplySysPolicy(t *testing.T) { RouteAll: true, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "1", - syspolicy.EnableIncomingConnections: "never", - syspolicy.EnableServerMode: "always", - syspolicy.ExitNodeAllowLANAccess: "always", - syspolicy.EnableTailscaleDNS: "always", - syspolicy.EnableTailscaleSubnets: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "1", + pkey.EnableIncomingConnections: "never", + pkey.EnableServerMode: "always", + pkey.ExitNodeAllowLANAccess: "always", + pkey.EnableTailscaleDNS: "always", + pkey.EnableTailscaleSubnets: "always", }, }, { @@ -3311,13 +3312,13 @@ func TestApplySysPolicy(t *testing.T) { ShieldsUp: true, ForceDaemon: true, }, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "1", - syspolicy.EnableIncomingConnections: "never", - syspolicy.EnableServerMode: "always", - syspolicy.ExitNodeAllowLANAccess: "never", - syspolicy.EnableTailscaleDNS: "never", - syspolicy.EnableTailscaleSubnets: "never", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "1", + pkey.EnableIncomingConnections: "never", + pkey.EnableServerMode: "always", + pkey.ExitNodeAllowLANAccess: "never", + pkey.EnableTailscaleDNS: "never", + pkey.EnableTailscaleSubnets: "never", }, }, { @@ -3339,13 +3340,13 @@ func TestApplySysPolicy(t *testing.T) { RouteAll: true, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "2", - syspolicy.EnableIncomingConnections: "always", - syspolicy.EnableServerMode: "never", - syspolicy.ExitNodeAllowLANAccess: "always", - syspolicy.EnableTailscaleDNS: "never", - syspolicy.EnableTailscaleSubnets: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "2", + pkey.EnableIncomingConnections: "always", + pkey.EnableServerMode: "never", + pkey.ExitNodeAllowLANAccess: "always", + pkey.EnableTailscaleDNS: "never", + pkey.EnableTailscaleSubnets: "always", }, }, { @@ -3366,12 +3367,12 @@ func TestApplySysPolicy(t *testing.T) { CorpDNS: true, RouteAll: true, }, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.EnableIncomingConnections: "user-decides", - syspolicy.EnableServerMode: "user-decides", - syspolicy.ExitNodeAllowLANAccess: "user-decides", - syspolicy.EnableTailscaleDNS: "user-decides", - syspolicy.EnableTailscaleSubnets: "user-decides", + stringPolicies: map[pkey.Key]string{ + pkey.EnableIncomingConnections: "user-decides", + pkey.EnableServerMode: "user-decides", + pkey.ExitNodeAllowLANAccess: "user-decides", + pkey.EnableTailscaleDNS: "user-decides", + pkey.EnableTailscaleSubnets: "user-decides", }, }, { @@ -3380,8 +3381,8 @@ func TestApplySysPolicy(t *testing.T) { ControlURL: "set", }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "set", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "set", }, }, { @@ -3399,8 +3400,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ApplyUpdates: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ApplyUpdates: "always", }, }, { @@ -3418,8 +3419,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ApplyUpdates: "never", + stringPolicies: map[pkey.Key]string{ + pkey.ApplyUpdates: "never", }, }, { @@ -3437,8 +3438,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.CheckUpdates: "always", + stringPolicies: map[pkey.Key]string{ + pkey.CheckUpdates: "always", }, }, { @@ -3456,8 +3457,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.CheckUpdates: "never", + stringPolicies: map[pkey.Key]string{ + pkey.CheckUpdates: "never", }, }, } @@ -5574,7 +5575,7 @@ func TestFillAllowedSuggestions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.AllowedSuggestedExitNodes, tt.allowPolicy, + pkey.AllowedSuggestedExitNodes, tt.allowPolicy, )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) @@ -6480,23 +6481,23 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { }{ { name: "ShieldsUp/True", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "never")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableIncomingConnections, "never")}, want: wantPrefsChanges(fieldChange{"ShieldsUp", true}), }, { name: "ShieldsUp/False", initialPrefs: &ipn.Prefs{ShieldsUp: true}, - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "always")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableIncomingConnections, "always")}, want: wantPrefsChanges(fieldChange{"ShieldsUp", false}), }, { name: "ExitNodeID", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.ExitNodeID, "foo")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.ExitNodeID, "foo")}, want: wantPrefsChanges(fieldChange{"ExitNodeID", tailcfg.StableNodeID("foo")}), }, { name: "EnableRunExitNode", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableRunExitNode, "always")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableRunExitNode, "always")}, want: wantPrefsChanges(fieldChange{"AdvertiseRoutes", []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}}), }, { @@ -6505,9 +6506,9 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { ExitNodeAllowLANAccess: true, }, stringSettings: []source.TestSetting[string]{ - source.TestSettingOf(syspolicy.EnableServerMode, "always"), - source.TestSettingOf(syspolicy.ExitNodeAllowLANAccess, "never"), - source.TestSettingOf(syspolicy.ExitNodeIP, "127.0.0.1"), + source.TestSettingOf(pkey.EnableServerMode, "always"), + source.TestSettingOf(pkey.ExitNodeAllowLANAccess, "never"), + source.TestSettingOf(pkey.ExitNodeIP, "127.0.0.1"), }, want: wantPrefsChanges( fieldChange{"ForceDaemon", true}, @@ -6523,9 +6524,9 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { AdvertiseRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, stringSettings: []source.TestSetting[string]{ - source.TestSettingOf(syspolicy.EnableTailscaleDNS, "always"), - source.TestSettingOf(syspolicy.ExitNodeID, "foo"), - source.TestSettingOf(syspolicy.EnableRunExitNode, "always"), + source.TestSettingOf(pkey.EnableTailscaleDNS, "always"), + source.TestSettingOf(pkey.ExitNodeID, "foo"), + source.TestSettingOf(pkey.EnableRunExitNode, "always"), }, want: nil, // syspolicy settings match the preferences; no change notification is expected. }, diff --git a/ipn/prefs.go b/ipn/prefs.go index 2eb0ccf0c..4c049688c 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -29,6 +29,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" ) @@ -726,7 +727,7 @@ func (p PrefsView) ControlURLOrDefault() string { // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. func (p *Prefs) ControlURLOrDefault() string { - controlURL, err := syspolicy.GetString(syspolicy.ControlURL, p.ControlURL) + controlURL, err := syspolicy.GetString(pkey.ControlURL, p.ControlURL) if err != nil { controlURL = p.ControlURL } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index f5c475712..295dc6fff 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -52,6 +52,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/testenv" "tailscale.com/version" "tailscale.com/version/distro" @@ -65,7 +66,7 @@ var getLogTargetOnce struct { func getLogTarget() string { getLogTargetOnce.Do(func() { envTarget, _ := os.LookupEnv("TS_LOG_TARGET") - getLogTargetOnce.v, _ = syspolicy.GetString(syspolicy.LogTarget, envTarget) + getLogTargetOnce.v, _ = syspolicy.GetString(pkey.LogTarget, envTarget) }) return getLogTargetOnce.v diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 6ed5d3ba6..d1cec2a00 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -30,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" @@ -508,7 +509,7 @@ func (m *windowsManager) Close() error { // sysPolicyChanged is a callback triggered by [syspolicy] when it detects // a change in one or more syspolicy settings. func (m *windowsManager) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChanged(syspolicy.EnableDNSRegistration) { + if policy.HasChanged(pkey.EnableDNSRegistration) { m.reconfigureDNSRegistration() } } @@ -520,7 +521,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(syspolicy.EnableDNSRegistration, setting.NeverByPolicy) + enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, setting.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/posture/serialnumber_syspolicy.go b/posture/serialnumber_syspolicy.go index d6491ff21..5123d561d 100644 --- a/posture/serialnumber_syspolicy.go +++ b/posture/serialnumber_syspolicy.go @@ -10,13 +10,14 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) // GetSerialNumbers returns the serial number of the device as reported by an // MDM solution. It requires configuration via the DeviceSerialNumber system policy. // This is the only way to gather serial numbers on iOS, tvOS and Android. func GetSerialNumbers(_ logger.Logf) ([]string, error) { - s, err := syspolicy.GetString(syspolicy.DeviceSerialNumber, "") + s, err := syspolicy.GetString(pkey.DeviceSerialNumber, "") if err != nil { return nil, fmt.Errorf("failed to get serial number from MDM: %v", err) } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index aea6baf93..bdf90c9a8 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -379,6 +379,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index a73c6ebf6..c8a0bb274 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index a73c6ebf6..c8a0bb274 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index a73c6ebf6..c8a0bb274 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index a73c6ebf6..c8a0bb274 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index b5919b962..c9a1cd0cf 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -63,6 +63,7 @@ import ( _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/winutil" _ "tailscale.com/util/winutil/gp" _ "tailscale.com/version" diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go index c4bfd9de9..cdf32a7f7 100644 --- a/util/syspolicy/handler.go +++ b/util/syspolicy/handler.go @@ -4,6 +4,7 @@ package syspolicy import ( + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -85,22 +86,22 @@ func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func() } // ReadString implements [source.Store]. -func (s handlerStore) ReadString(key setting.Key) (string, error) { +func (s handlerStore) ReadString(key pkey.Key) (string, error) { return s.h.ReadString(string(key)) } // ReadUInt64 implements [source.Store]. -func (s handlerStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s handlerStore) ReadUInt64(key pkey.Key) (uint64, error) { return s.h.ReadUInt64(string(key)) } // ReadBoolean implements [source.Store]. -func (s handlerStore) ReadBoolean(key setting.Key) (bool, error) { +func (s handlerStore) ReadBoolean(key pkey.Key) (bool, error) { return s.h.ReadBoolean(string(key)) } // ReadStringArray implements [source.Store]. -func (s handlerStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s handlerStore) ReadStringArray(key pkey.Key) ([]string, error) { return s.h.ReadStringArray(string(key)) } diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 43f2a285a..8f2745673 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -17,6 +17,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) @@ -209,7 +210,7 @@ func scopeMetrics(origin *setting.Origin) *policyScopeMetrics { var ( settingMetricsMu sync.RWMutex - settingMetricsMap map[setting.Key]*settingMetrics + settingMetricsMap map[pkey.Key]*settingMetrics ) func settingMetricsFor(setting *setting.Definition) *settingMetrics { @@ -283,8 +284,8 @@ func SetHooksForTest(tb testenv.TB, addMetric, setMetric metricFn) { lazyUserMetrics.SetForTest(tb, newScopeMetrics(setting.UserSetting), nil) } -func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { - name := strings.ReplaceAll(string(key), string(setting.KeyPathSeparator), "_") +func newSettingMetric(key pkey.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { + name := strings.ReplaceAll(string(key), string(pkey.KeyPathSeparator), "_") name = strings.ReplaceAll(name, ".", "_") // dots are not allowed in metric names return newMetric([]string{name, metricScopeName(scope), suffix}, typ) } diff --git a/util/syspolicy/internal/metrics/metrics_test.go b/util/syspolicy/internal/metrics/metrics_test.go index 07be4773c..a99938769 100644 --- a/util/syspolicy/internal/metrics/metrics_test.go +++ b/util/syspolicy/internal/metrics/metrics_test.go @@ -10,13 +10,14 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/clientmetric" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestSettingMetricNames(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key scope setting.Scope suffix string typ clientmetric.Type diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go new file mode 100644 index 000000000..cfef9e17a --- /dev/null +++ b/util/syspolicy/pkey/pkey.go @@ -0,0 +1,177 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package pkey defines the keys used to store system policies in the registry. +// +// This is a leaf package meant to only contain string constants, not code. +package pkey + +// Key is a string that uniquely identifies a policy and must remain unchanged +// once established and documented for a given policy setting. It may contain +// alphanumeric characters and zero or more [KeyPathSeparator]s to group +// individual policy settings into categories. +type Key string + +// KeyPathSeparator allows logical grouping of policy settings into categories. +const KeyPathSeparator = '/' + +// The const block below lists known policy keys. +// When adding a key to this list, remember to add a corresponding +// [setting.Definition] to [implicitDefinitions] in util/syspolicy/policy_keys.go. +// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. + +const ( + // Keys with a string value + ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. + LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. + Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. + + // AlwaysOn is a boolean key that controls whether Tailscale + // should always remain in a connected state, and the user should + // not be able to disconnect at their discretion. + // + // Warning: This policy setting is experimental and may change or be removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. + AlwaysOn Key = "AlwaysOn.Enabled" + + // AlwaysOnOverrideWithReason is a boolean key that alters the behavior + // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale + // by providing a reason. The reason is logged and sent to the control + // for auditing purposes. It has no effect when [AlwaysOn] is false. + AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" + + // ReconnectAfter is a string value formatted for use with time.ParseDuration() + // that defines the duration after which the client should automatically reconnect + // to the Tailscale network following a user-initiated disconnect. + // An empty string or a zero duration disables automatic reconnection. + ReconnectAfter Key = "ReconnectAfter" + + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. + // Exit node ID takes precedence over exit node IP. + // To find the node ID, go to /api.md#device. + ExitNodeID Key = "ExitNodeID" + ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. + + // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings + // and manually select an exit node. It does not allow disabling exit node usage entirely. + // It is typically used in conjunction with [ExitNodeID] set to "auto:any". + // + // Warning: This policy setting is experimental and may change, be renamed or removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#29969. + AllowExitNodeOverride Key = "ExitNode.AllowOverride" + + // Keys with a string value that specifies an option: "always", "never", "user-decides". + // The default is "user-decides" unless otherwise stated. Enforcement of + // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs + // typically hide menu items related to policies that are enforced. + EnableIncomingConnections Key = "AllowIncomingConnections" + EnableServerMode Key = "UnattendedMode" + ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" + EnableTailscaleDNS Key = "UseTailscaleDNSSettings" + EnableTailscaleSubnets Key = "UseTailscaleSubnets" + + // EnableDNSRegistration is a string value that can be set to "always", "never" + // or "user-decides". It controls whether DNS registration and dynamic DNS + // updates are enabled for the Tailscale interface. For historical reasons + // and to maintain compatibility with existing setups, the default is "never". + // It is only used on Windows. + EnableDNSRegistration Key = "EnableDNSRegistration" + + // CheckUpdates is the key to signal if the updater should periodically + // check for updates. + CheckUpdates Key = "CheckUpdates" + // ApplyUpdates is the key to signal if updates should be automatically + // installed. Its value is "InstallUpdates" because of an awkwardly-named + // visibility option "ApplyUpdates" on MacOS. + ApplyUpdates Key = "InstallUpdates" + // EnableRunExitNode controls if the device acts as an exit node. Even when + // running as an exit node, the device must be approved by a tailnet + // administrator. Its name is slightly awkward because RunExitNodeVisibility + // predates this option but is preserved for backwards compatibility. + EnableRunExitNode Key = "AdvertiseExitNode" + + // Keys with a string value that controls visibility: "show", "hide". + // The default is "show" unless otherwise stated. Enforcement of these + // policies is typically performed by the UI code for the relevant operating + // system. + AdminConsoleVisibility Key = "AdminConsole" + NetworkDevicesVisibility Key = "NetworkDevices" + TestMenuVisibility Key = "TestMenu" + UpdateMenuVisibility Key = "UpdateMenu" + ResetToDefaultsVisibility Key = "ResetToDefaults" + // RunExitNodeVisibility controls if the "run as exit node" menu item is + // visible, without controlling the setting itself. This is preserved for + // backwards compatibility but prefer EnableRunExitNode in new deployments. + RunExitNodeVisibility Key = "RunExitNode" + PreferencesMenuVisibility Key = "PreferencesMenu" + ExitNodeMenuVisibility Key = "ExitNodesPicker" + // AutoUpdateVisibility is the key to signal if the menu item for automatic + // installation of updates should be visible. It is only used by macsys + // installations and uses the Sparkle naming convention, even though it does + // not actually control updates, merely the UI for that setting. + AutoUpdateVisibility Key = "ApplyUpdates" + // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. + // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. + SuggestedExitNodeVisibility Key = "SuggestedExitNode" + // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. + // When this system policy is set to 'hide', the onboarding flow is never shown to the user. + OnboardingFlowVisibility Key = "OnboardingFlow" + + // Keys with a string value formatted for use with time.ParseDuration(). + KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours + + // Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as + // DWORD or QWORD (either is acceptable). 0 means false, and anything else means true. + // The default is 0 unless otherwise stated. + LogSCMInteractions Key = "LogSCMInteractions" + FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" + + // EncryptState is a boolean setting that specifies whether to encrypt the + // tailscaled state file with a TPM device. + EncryptState Key = "EncryptState" + + // PostureChecking indicates if posture checking is enabled and the client shall gather + // posture data. + // Key is a string value that specifies an option: "always", "never", "user-decides". + // The default is "user-decides" unless otherwise stated. + PostureChecking Key = "PostureChecking" + // DeviceSerialNumber is the serial number of the device that is running Tailscale. + // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. + // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. + DeviceSerialNumber Key = "DeviceSerialNumber" + + // ManagedByOrganizationName indicates the name of the organization managing the Tailscale + // install. It is displayed inside the client UI in a prominent location. + ManagedByOrganizationName Key = "ManagedByOrganizationName" + // ManagedByCaption is an info message displayed inside the client UI as a caption when + // ManagedByOrganizationName is set. It can be used to provide a pointer to support resources + // for Tailscale within the organization. + ManagedByCaption Key = "ManagedByCaption" + // ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the + // organization. A button in the client UI provides easy access to this URL. + ManagedByURL Key = "ManagedByURL" + + // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to + // automatically authenticate managed devices, without requiring user interaction. + AuthKey Key = "AuthKey" + + // MachineCertificateSubject is the exact name of a Subject that needs + // to be present in an identity's certificate chain to sign a RegisterRequest, + // formatted as per pkix.Name.String(). The Subject may be that of the identity + // itself, an intermediate CA or the root CA. + // + // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" + MachineCertificateSubject Key = "MachineCertificateSubject" + + // Hostname is the hostname of the device that is running Tailscale. + // When this policy is set, it overrides the hostname that the client + // would otherwise obtain from the OS, e.g. by calling os.Hostname(). + Hostname Key = "Hostname" + + // Keys with a string array value. + + // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. + AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" +) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index cd5f8172c..e32d9cdf4 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -6,225 +6,60 @@ package syspolicy import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) -// Key is a string that uniquely identifies a policy and must remain unchanged -// once established and documented for a given policy setting. It may contain -// alphanumeric characters and zero or more [KeyPathSeparator]s to group -// individual policy settings into categories. -type Key = setting.Key - -// The const block below lists known policy keys. -// When adding a key to this list, remember to add a corresponding -// [setting.Definition] to [implicitDefinitions] below. -// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. - -const ( - // Keys with a string value - ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. - LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. - Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. - - // AlwaysOn is a boolean key that controls whether Tailscale - // should always remain in a connected state, and the user should - // not be able to disconnect at their discretion. - // - // Warning: This policy setting is experimental and may change or be removed in the future. - // It may also not be fully supported by all Tailscale clients until it is out of experimental status. - // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. - AlwaysOn Key = "AlwaysOn.Enabled" - - // AlwaysOnOverrideWithReason is a boolean key that alters the behavior - // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale - // by providing a reason. The reason is logged and sent to the control - // for auditing purposes. It has no effect when [AlwaysOn] is false. - AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" - - // ReconnectAfter is a string value formatted for use with time.ParseDuration() - // that defines the duration after which the client should automatically reconnect - // to the Tailscale network following a user-initiated disconnect. - // An empty string or a zero duration disables automatic reconnection. - ReconnectAfter Key = "ReconnectAfter" - - // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. - // Exit node ID takes precedence over exit node IP. - // To find the node ID, go to /api.md#device. - ExitNodeID Key = "ExitNodeID" - ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. - - // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings - // and manually select an exit node. It does not allow disabling exit node usage entirely. - // It is typically used in conjunction with [ExitNodeID] set to "auto:any". - // - // Warning: This policy setting is experimental and may change, be renamed or removed in the future. - // It may also not be fully supported by all Tailscale clients until it is out of experimental status. - // See tailscale/corp#29969. - AllowExitNodeOverride Key = "ExitNode.AllowOverride" - - // Keys with a string value that specifies an option: "always", "never", "user-decides". - // The default is "user-decides" unless otherwise stated. Enforcement of - // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs - // typically hide menu items related to policies that are enforced. - EnableIncomingConnections Key = "AllowIncomingConnections" - EnableServerMode Key = "UnattendedMode" - ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" - EnableTailscaleDNS Key = "UseTailscaleDNSSettings" - EnableTailscaleSubnets Key = "UseTailscaleSubnets" - - // EnableDNSRegistration is a string value that can be set to "always", "never" - // or "user-decides". It controls whether DNS registration and dynamic DNS - // updates are enabled for the Tailscale interface. For historical reasons - // and to maintain compatibility with existing setups, the default is "never". - // It is only used on Windows. - EnableDNSRegistration Key = "EnableDNSRegistration" - - // CheckUpdates is the key to signal if the updater should periodically - // check for updates. - CheckUpdates Key = "CheckUpdates" - // ApplyUpdates is the key to signal if updates should be automatically - // installed. Its value is "InstallUpdates" because of an awkwardly-named - // visibility option "ApplyUpdates" on MacOS. - ApplyUpdates Key = "InstallUpdates" - // EnableRunExitNode controls if the device acts as an exit node. Even when - // running as an exit node, the device must be approved by a tailnet - // administrator. Its name is slightly awkward because RunExitNodeVisibility - // predates this option but is preserved for backwards compatibility. - EnableRunExitNode Key = "AdvertiseExitNode" - - // Keys with a string value that controls visibility: "show", "hide". - // The default is "show" unless otherwise stated. Enforcement of these - // policies is typically performed by the UI code for the relevant operating - // system. - AdminConsoleVisibility Key = "AdminConsole" - NetworkDevicesVisibility Key = "NetworkDevices" - TestMenuVisibility Key = "TestMenu" - UpdateMenuVisibility Key = "UpdateMenu" - ResetToDefaultsVisibility Key = "ResetToDefaults" - // RunExitNodeVisibility controls if the "run as exit node" menu item is - // visible, without controlling the setting itself. This is preserved for - // backwards compatibility but prefer EnableRunExitNode in new deployments. - RunExitNodeVisibility Key = "RunExitNode" - PreferencesMenuVisibility Key = "PreferencesMenu" - ExitNodeMenuVisibility Key = "ExitNodesPicker" - // AutoUpdateVisibility is the key to signal if the menu item for automatic - // installation of updates should be visible. It is only used by macsys - // installations and uses the Sparkle naming convention, even though it does - // not actually control updates, merely the UI for that setting. - AutoUpdateVisibility Key = "ApplyUpdates" - // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. - // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. - SuggestedExitNodeVisibility Key = "SuggestedExitNode" - // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. - // When this system policy is set to 'hide', the onboarding flow is never shown to the user. - OnboardingFlowVisibility Key = "OnboardingFlow" - - // Keys with a string value formatted for use with time.ParseDuration(). - KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours - - // Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as - // DWORD or QWORD (either is acceptable). 0 means false, and anything else means true. - // The default is 0 unless otherwise stated. - LogSCMInteractions Key = "LogSCMInteractions" - FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" - - // EncryptState is a boolean setting that specifies whether to encrypt the - // tailscaled state file with a TPM device. - EncryptState Key = "EncryptState" - - // PostureChecking indicates if posture checking is enabled and the client shall gather - // posture data. - // Key is a string value that specifies an option: "always", "never", "user-decides". - // The default is "user-decides" unless otherwise stated. - PostureChecking Key = "PostureChecking" - // DeviceSerialNumber is the serial number of the device that is running Tailscale. - // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. - // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. - DeviceSerialNumber Key = "DeviceSerialNumber" - - // ManagedByOrganizationName indicates the name of the organization managing the Tailscale - // install. It is displayed inside the client UI in a prominent location. - ManagedByOrganizationName Key = "ManagedByOrganizationName" - // ManagedByCaption is an info message displayed inside the client UI as a caption when - // ManagedByOrganizationName is set. It can be used to provide a pointer to support resources - // for Tailscale within the organization. - ManagedByCaption Key = "ManagedByCaption" - // ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the - // organization. A button in the client UI provides easy access to this URL. - ManagedByURL Key = "ManagedByURL" - - // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to - // automatically authenticate managed devices, without requiring user interaction. - AuthKey Key = "AuthKey" - - // MachineCertificateSubject is the exact name of a Subject that needs - // to be present in an identity's certificate chain to sign a RegisterRequest, - // formatted as per pkix.Name.String(). The Subject may be that of the identity - // itself, an intermediate CA or the root CA. - // - // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" - MachineCertificateSubject Key = "MachineCertificateSubject" - - // Hostname is the hostname of the device that is running Tailscale. - // When this policy is set, it overrides the hostname that the client - // would otherwise obtain from the OS, e.g. by calling os.Hostname(). - Hostname Key = "Hostname" - - // Keys with a string array value. - // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. - AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" -) - // implicitDefinitions is a list of [setting.Definition] that will be registered // automatically when the policy setting definitions are first used by the syspolicy package hierarchy. // This includes the first time a policy needs to be read from any source. var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): - setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), - setting.NewDefinition(AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ControlURL, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(EncryptState, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(Hostname, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ReconnectAfter, setting.DeviceSetting, setting.DurationValue), - setting.NewDefinition(Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(pkey.AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AlwaysOn, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.AuthKey, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ControlURL, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ExitNodeID, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.ExitNodeIP, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.EncryptState, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.Hostname, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.LogTarget, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ReconnectAfter, setting.DeviceSetting, setting.DurationValue), + setting.NewDefinition(pkey.Tailnet, setting.DeviceSetting, setting.StringValue), // User policy settings (can be configured on a user- or device-basis): - setting.NewDefinition(AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), - setting.NewDefinition(ManagedByCaption, setting.UserSetting, setting.StringValue), - setting.NewDefinition(ManagedByOrganizationName, setting.UserSetting, setting.StringValue), - setting.NewDefinition(ManagedByURL, setting.UserSetting, setting.StringValue), - setting.NewDefinition(NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), + setting.NewDefinition(pkey.ManagedByCaption, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.ManagedByOrganizationName, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.ManagedByURL, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), } func init() { @@ -248,7 +83,7 @@ var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap] // WellKnownSettingDefinition returns a well-known, implicit setting definition by its key, // or an [ErrNoSuchKey] if a policy setting with the specified key does not exist // among implicit policy definitions. -func WellKnownSettingDefinition(k Key) (*setting.Definition, error) { +func WellKnownSettingDefinition(k pkey.Key) (*setting.Definition, error) { m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) { return setting.DefinitionMapOf(implicitDefinitions) }) diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go index 4d3260f3e..490353c81 100644 --- a/util/syspolicy/policy_keys_test.go +++ b/util/syspolicy/policy_keys_test.go @@ -14,14 +14,19 @@ import ( "strconv" "testing" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestKnownKeysRegistered(t *testing.T) { - keyConsts, err := listStringConsts[Key]("policy_keys.go") + const file = "pkey/pkey.go" + keyConsts, err := listStringConsts[pkey.Key](file) if err != nil { t.Fatalf("listStringConsts failed: %v", err) } + if len(keyConsts) == 0 { + t.Fatalf("no key constants found in %s", file) + } m, err := setting.DefinitionMapOf(implicitDefinitions) if err != nil { diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 87b45b654..59dba07c6 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -11,6 +11,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -37,8 +38,8 @@ func (c PolicyChange) Old() *setting.Snapshot { return c.snapshots.Old } -// HasChanged reports whether a policy setting with the specified [setting.Key], has changed. -func (c PolicyChange) HasChanged(key setting.Key) bool { +// HasChanged reports whether a policy setting with the specified [pkey.Key], has changed. +func (c PolicyChange) HasChanged(key pkey.Key) bool { new, newErr := c.snapshots.New.GetErr(key) old, oldErr := c.snapshots.Old.GetErr(key) if newErr != nil && oldErr != nil { @@ -60,7 +61,7 @@ func (c PolicyChange) HasChanged(key setting.Key) bool { } // HasChangedAnyOf reports whether any of the specified policy settings has changed. -func (c PolicyChange) HasChangedAnyOf(keys ...setting.Key) bool { +func (c PolicyChange) HasChangedAnyOf(keys ...pkey.Key) bool { return slices.ContainsFunc(keys, c.HasChanged) } diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index e4bfb1a88..2da46a8ca 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tstest" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -80,7 +81,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { type sourceConfig struct { name string scope setting.PolicyScope - settingKey setting.Key + settingKey pkey.Key settingValue string wantEffective bool } @@ -113,7 +114,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, @@ -129,7 +130,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, @@ -159,7 +160,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), "TestKeyC": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), @@ -191,7 +192,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), }, setting.DeviceScope), @@ -245,7 +246,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueF", nil, setting.NewNamedOrigin("TestSourceF", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), "TestKeyC": setting.RawItemWith("TestValueE", nil, setting.NewNamedOrigin("TestSourceE", setting.DeviceScope)), @@ -263,7 +264,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, setting.CurrentUserScope, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, @@ -288,7 +289,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("UserValue", nil, setting.NewNamedOrigin("TestSourceUser", setting.CurrentUserScope)), }, setting.CurrentUserScope), @@ -321,7 +322,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("ProfileValue", nil, setting.NewNamedOrigin("TestSourceProfile", setting.CurrentProfileScope)), }, setting.CurrentUserScope), @@ -347,7 +348,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: false, // Registering a user source should have no impact on the device policy. }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, @@ -497,61 +498,61 @@ func TestPolicyFor(t *testing.T) { func TestPolicyChangeHasChanged(t *testing.T) { tests := []struct { name string - old, new map[setting.Key]setting.RawItem - wantChanged []setting.Key - wantUnchanged []setting.Key + old, new map[pkey.Key]setting.RawItem + wantChanged []pkey.Key + wantUnchanged []pkey.Key }{ { name: "String-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf("Old"), "UnchangedSetting": setting.RawItemOf("Value"), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf("New"), "UnchangedSetting": setting.RawItemOf("Value"), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "UInt64-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(uint64(0)), "UnchangedSetting": setting.RawItemOf(uint64(42)), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(uint64(1)), "UnchangedSetting": setting.RawItemOf(uint64(42)), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "StringSlice-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf([]string{"Chicago"}), "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf([]string{"New York"}), "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "Int8-Settings", // We don't have actual int8 settings, but this should still work. - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(int8(0)), "UnchangedSetting": setting.RawItemOf(int8(42)), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(int8(1)), "UnchangedSetting": setting.RawItemOf(int8(42)), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, } for _, tt := range tests { diff --git a/util/syspolicy/setting/key.go b/util/syspolicy/setting/key.go deleted file mode 100644 index aa7606d36..000000000 --- a/util/syspolicy/setting/key.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package setting - -// Key is a string that uniquely identifies a policy and must remain unchanged -// once established and documented for a given policy setting. It may contain -// alphanumeric characters and zero or more [KeyPathSeparator]s to group -// individual policy settings into categories. -type Key string - -// KeyPathSeparator allows logical grouping of policy settings into categories. -const KeyPathSeparator = '/' diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index 9a96073b0..ea97865f5 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -11,6 +11,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" "tailscale.com/types/structs" + "tailscale.com/util/syspolicy/pkey" ) // RawItem contains a raw policy setting value as read from a policy store, or an @@ -169,4 +170,4 @@ func (v *RawValue) UnmarshalJSON(b []byte) error { } // RawValues is a map of keyed setting values that can be read from a JSON. -type RawValues map[Key]RawValue +type RawValues map[pkey.Key]RawValue diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 13c7a2a5f..9285afade 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -16,6 +16,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/testenv" ) @@ -134,7 +135,7 @@ type ValueType interface { // Definition defines policy key, scope and value type. type Definition struct { - key Key + key pkey.Key scope Scope typ Type platforms PlatformList @@ -142,12 +143,12 @@ type Definition struct { // NewDefinition returns a new [Definition] with the specified // key, scope, type and supported platforms (see [PlatformList]). -func NewDefinition(k Key, s Scope, t Type, platforms ...string) *Definition { +func NewDefinition(k pkey.Key, s Scope, t Type, platforms ...string) *Definition { return &Definition{key: k, scope: s, typ: t, platforms: platforms} } // Key returns a policy setting's identifier. -func (d *Definition) Key() Key { +func (d *Definition) Key() pkey.Key { if d == nil { return "" } @@ -208,7 +209,7 @@ func (d *Definition) Equal(d2 *Definition) bool { } // DefinitionMap is a map of setting [Definition] by [Key]. -type DefinitionMap map[Key]*Definition +type DefinitionMap map[pkey.Key]*Definition var ( definitions lazy.SyncValue[DefinitionMap] @@ -224,7 +225,7 @@ var ( // invoking any functions that use the registered policy definitions. This // includes calling [Definitions] or [DefinitionOf] directly, or reading any // policy settings via syspolicy. -func Register(k Key, s Scope, t Type, platforms ...string) { +func Register(k pkey.Key, s Scope, t Type, platforms ...string) { RegisterDefinition(NewDefinition(k, s, t, platforms...)) } @@ -290,7 +291,7 @@ func SetDefinitionsForTest(tb testenv.TB, ds ...*Definition) error { // DefinitionOf returns a setting definition by key, // or [ErrNoSuchKey] if the specified key does not exist, // or an error if there are conflicting policy definitions. -func DefinitionOf(k Key) (*Definition, error) { +func DefinitionOf(k pkey.Key) (*Definition, error) { ds, err := settingDefinitions() if err != nil { return nil, err diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index 3cc08e7da..e43495a16 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -11,6 +11,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/ptr" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" ) func TestSettingDefinition(t *testing.T) { @@ -18,7 +19,7 @@ func TestSettingDefinition(t *testing.T) { name string setting *Definition osOverride string - wantKey Key + wantKey pkey.Key wantScope Scope wantType Type wantIsSupported bool @@ -163,10 +164,10 @@ func TestSettingDefinition(t *testing.T) { } func TestRegisterSettingDefinition(t *testing.T) { - const testPolicySettingKey Key = "TestPolicySetting" + const testPolicySettingKey pkey.Key = "TestPolicySetting" tests := []struct { name string - key Key + key pkey.Key wantEq *Definition wantErr error }{ diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 3a40785dc..94c7ecadb 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -15,34 +15,35 @@ import ( "github.com/go-json-experiment/json/jsontext" xmaps "golang.org/x/exp/maps" "tailscale.com/util/deephash" + "tailscale.com/util/syspolicy/pkey" ) // Snapshot is an immutable collection of ([Key], [RawItem]) pairs, representing // a set of policy settings applied at a specific moment in time. // A nil pointer to [Snapshot] is valid. type Snapshot struct { - m map[Key]RawItem + m map[pkey.Key]RawItem sig deephash.Sum // of m summary Summary } // NewSnapshot returns a new [Snapshot] with the specified items and options. -func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot { +func NewSnapshot(items map[pkey.Key]RawItem, opts ...SummaryOption) *Snapshot { return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)} } // All returns an iterator over policy settings in s. The iteration order is not // specified and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) All() iter.Seq2[Key, RawItem] { +func (s *Snapshot) All() iter.Seq2[pkey.Key, RawItem] { if s == nil { - return func(yield func(Key, RawItem) bool) {} + return func(yield func(pkey.Key, RawItem) bool) {} } return maps.All(s.m) } // Get returns the value of the policy setting with the specified key // or nil if it is not configured or has an error. -func (s *Snapshot) Get(k Key) any { +func (s *Snapshot) Get(k pkey.Key) any { v, _ := s.GetErr(k) return v } @@ -50,7 +51,7 @@ func (s *Snapshot) Get(k Key) any { // GetErr returns the value of the policy setting with the specified key, // [ErrNotConfigured] if it is not configured, or an error returned by // the policy Store if the policy setting could not be read. -func (s *Snapshot) GetErr(k Key) (any, error) { +func (s *Snapshot) GetErr(k pkey.Key) (any, error) { if s != nil { if s, ok := s.m[k]; ok { return s.Value(), s.Error() @@ -62,7 +63,7 @@ func (s *Snapshot) GetErr(k Key) (any, error) { // GetSetting returns the untyped policy setting with the specified key and true // if a policy setting with such key has been configured; // otherwise, it returns zero, false. -func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) { +func (s *Snapshot) GetSetting(k pkey.Key) (setting RawItem, ok bool) { setting, ok = s.m[k] return setting, ok } @@ -94,9 +95,9 @@ func (s *Snapshot) EqualItems(s2 *Snapshot) bool { // Keys return an iterator over keys in s. The iteration order is not specified // and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) Keys() iter.Seq[Key] { +func (s *Snapshot) Keys() iter.Seq[pkey.Key] { if s.m == nil { - return func(yield func(Key) bool) {} + return func(yield func(pkey.Key) bool) {} } return maps.Keys(s.m) } @@ -144,8 +145,8 @@ func (s *Snapshot) String() string { // snapshotJSON holds JSON-marshallable data for [Snapshot]. type snapshotJSON struct { - Summary Summary `json:",omitzero"` - Settings map[Key]RawItem `json:",omitempty"` + Summary Summary `json:",omitzero"` + Settings map[pkey.Key]RawItem `json:",omitempty"` } var ( @@ -232,7 +233,7 @@ func MergeSnapshots(snapshot1, snapshot2 *Snapshot) *Snapshot { } return &Snapshot{snapshot2.m, snapshot2.sig, SummaryWith(summaryOpts...)} } - m := make(map[Key]RawItem, snapshot1.Len()+snapshot2.Len()) + m := make(map[pkey.Key]RawItem, snapshot1.Len()+snapshot2.Len()) xmaps.Copy(m, snapshot1.m) xmaps.Copy(m, snapshot2.m) // snapshot2 has higher precedence return &Snapshot{m, deephash.Hash(&m), SummaryWith(summaryOpts...)} diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index 19f014aca..99c619cd9 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -11,6 +11,7 @@ import ( jsonv2 "github.com/go-json-experiment/json" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" ) func TestMergeSnapshots(t *testing.T) { @@ -23,23 +24,23 @@ func TestMergeSnapshots(t *testing.T) { name: "both-nil", s1: nil, s2: nil, - want: NewSnapshot(map[Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{}), }, { name: "both-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{}), }, { name: "first-nil", s1: nil, - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -47,13 +48,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "first-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -61,13 +62,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "second-nil", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), s2: nil, - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -75,13 +76,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -89,17 +90,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "no-conflicts", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting4": RawItemOf(2 * time.Hour), "Setting5": RawItemOf(VisibleByPolicy), "Setting6": RawItemOf(ShowChoiceByPolicy), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -110,17 +111,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-conflicts", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -129,17 +130,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-first-wins", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }, CurrentUserScope), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -148,17 +149,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-second-wins", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }, DeviceScope), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -167,18 +168,18 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-both-empty", - s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{}, DeviceScope), - want: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), + s1: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[pkey.Key]RawItem{}, DeviceScope), + want: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), }, { name: "with-scope-first-empty", - s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true)}, DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -186,13 +187,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -226,28 +227,28 @@ func TestSnapshotEqual(t *testing.T) { { name: "nil-empty", s1: nil, - s2: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: true, wantEqualItems: true, }, { name: "empty-nil", - s1: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), s2: nil, wantEqual: true, wantEqualItems: true, }, { name: "empty-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: true, wantEqualItems: true, }, { name: "first-nil", s1: nil, - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -257,8 +258,8 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "first-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -268,7 +269,7 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "second-nil", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -279,23 +280,23 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: false, wantEqualItems: false, }, { name: "same-items-same-order-no-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -305,12 +306,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-same-order-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -320,12 +321,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-different-order-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting3": RawItemOf(false), "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), @@ -335,12 +336,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-same-order-different-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -350,12 +351,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "different-items-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting4": RawItemOf(2 * time.Hour), "Setting5": RawItemOf(VisibleByPolicy), "Setting6": RawItemOf(ShowChoiceByPolicy), @@ -404,7 +405,7 @@ func TestSnapshotString(t *testing.T) { }, { name: "non-empty", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(2 * time.Hour), "Setting2": RawItemOf(VisibleByPolicy), "Setting3": RawItemOf(ShowChoiceByPolicy), @@ -416,14 +417,14 @@ Setting3 = user-decides`, }, { name: "non-empty-with-item-origin", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemWith(42, nil, NewNamedOrigin("Test Policy", DeviceScope)), }), wantString: `Setting1 = 42 - {Test Policy (Device)}`, }, { name: "non-empty-with-item-error", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemWith(nil, NewErrorText("bang!"), nil), }), wantString: `Setting1 = Error{"bang!"}`, @@ -458,55 +459,55 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { }, { name: "Bool/True", - snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(true)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"BoolPolicy": RawItemOf(true)}), wantJSON: `{"Settings": {"BoolPolicy": {"Value": true}}}`, }, { name: "Bool/False", - snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(false)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"BoolPolicy": RawItemOf(false)}), wantJSON: `{"Settings": {"BoolPolicy": {"Value": false}}}`, }, { name: "String/Non-Empty", - snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), wantJSON: `{"Settings": {"StringPolicy": {"Value": "StringValue"}}}`, }, { name: "String/Empty", - snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("")}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"StringPolicy": RawItemOf("")}), wantJSON: `{"Settings": {"StringPolicy": {"Value": ""}}}`, }, { name: "Integer/NonZero", - snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), wantJSON: `{"Settings": {"IntPolicy": {"Value": 42}}}`, }, { name: "Integer/Zero", - snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), wantJSON: `{"Settings": {"IntPolicy": {"Value": 0}}}`, }, { name: "String-List", - snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`, }, { name: "Duration/Zero", - snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), wantJSON: `{"Settings": {"DurationPolicy": {"Value": "0s"}}}`, - wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("0s")}), + wantBack: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf("0s")}), }, { name: "Duration/NonZero", - snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), wantJSON: `{"Settings": {"DurationPolicy": {"Value": "2h0m0s"}}}`, - wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), + wantBack: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), }, { name: "Empty/With-Summary", snapshot: NewSnapshot( - map[Key]RawItem{}, + map[pkey.Key]RawItem{}, SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), ), wantJSON: `{"Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"}}`, @@ -514,7 +515,7 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { { name: "Setting/With-Summary", snapshot: NewSnapshot( - map[Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, + map[pkey.Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), ), wantJSON: `{ @@ -525,7 +526,7 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { { name: "Settings/With-Origins", snapshot: NewSnapshot( - map[Key]RawItem{ + map[pkey.Key]RawItem{ "SettingA": RawItemWith(uint64(42), nil, NewNamedOrigin("SourceA", DeviceScope)), "SettingB": RawItemWith("B", nil, NewNamedOrigin("SourceB", CurrentProfileScope)), "SettingC": RawItemWith(true, nil, NewNamedOrigin("SourceC", CurrentUserScope)), diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go index 299132b4e..be363b79a 100644 --- a/util/syspolicy/source/env_policy_store.go +++ b/util/syspolicy/source/env_policy_store.go @@ -11,6 +11,7 @@ import ( "strings" "unicode/utf8" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -22,7 +23,7 @@ var _ Store = (*EnvPolicyStore)(nil) type EnvPolicyStore struct{} // ReadString implements [Store]. -func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { +func (s *EnvPolicyStore) ReadString(key pkey.Key) (string, error) { _, str, err := s.lookupSettingVariable(key) if err != nil { return "", err @@ -31,7 +32,7 @@ func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { } // ReadUInt64 implements [Store]. -func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s *EnvPolicyStore) ReadUInt64(key pkey.Key) (uint64, error) { name, str, err := s.lookupSettingVariable(key) if err != nil { return 0, err @@ -47,7 +48,7 @@ func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { } // ReadBoolean implements [Store]. -func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { +func (s *EnvPolicyStore) ReadBoolean(key pkey.Key) (bool, error) { name, str, err := s.lookupSettingVariable(key) if err != nil { return false, err @@ -63,7 +64,7 @@ func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadStringArray implements [Store]. -func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s *EnvPolicyStore) ReadStringArray(key pkey.Key) ([]string, error) { _, str, err := s.lookupSettingVariable(key) if err != nil || str == "" { return nil, err @@ -79,7 +80,7 @@ func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { return res[0:dst], nil } -func (s *EnvPolicyStore) lookupSettingVariable(key setting.Key) (name, value string, err error) { +func (s *EnvPolicyStore) lookupSettingVariable(key pkey.Key) (name, value string, err error) { name, err = keyToEnvVarName(key) if err != nil { return "", "", err @@ -103,7 +104,7 @@ var ( // // It's fine to use this in [EnvPolicyStore] without caching variable names since it's not a hot path. // [EnvPolicyStore] is not a [Changeable] policy store, so the conversion will only happen once. -func keyToEnvVarName(key setting.Key) (string, error) { +func keyToEnvVarName(key pkey.Key) (string, error) { if len(key) == 0 { return "", errEmptyKey } @@ -135,7 +136,7 @@ func keyToEnvVarName(key setting.Key) (string, error) { } case isDigit(c): split = currentWord.Len() > 0 && !isDigit(key[i-1]) - case c == setting.KeyPathSeparator: + case c == pkey.KeyPathSeparator: words = append(words, currentWord.String()) currentWord.Reset() continue diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go index 9eacf6378..3255095b2 100644 --- a/util/syspolicy/source/env_policy_store_test.go +++ b/util/syspolicy/source/env_policy_store_test.go @@ -11,13 +11,14 @@ import ( "strconv" "testing" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestKeyToEnvVarName(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key want string // suffix after "TS_DEBUGSYSPOLICY_" wantErr error }{ @@ -166,7 +167,7 @@ func TestEnvPolicyStore(t *testing.T) { } tests := []struct { name string - key setting.Key + key pkey.Key lookup func(string) (string, bool) want any wantErr error diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go index a1bd3147e..e6360e5f8 100644 --- a/util/syspolicy/source/policy_reader.go +++ b/util/syspolicy/source/policy_reader.go @@ -16,6 +16,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -138,9 +139,9 @@ func (r *Reader) reload(force bool) (*setting.Snapshot, error) { metrics.Reset(r.origin) - var m map[setting.Key]setting.RawItem + var m map[pkey.Key]setting.RawItem if lastPolicyCount := r.lastPolicy.Len(); lastPolicyCount > 0 { - m = make(map[setting.Key]setting.RawItem, lastPolicyCount) + m = make(map[pkey.Key]setting.RawItem, lastPolicyCount) } for _, s := range r.settings { if !r.origin.Scope().IsConfigurableSetting(s) { diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go index 57676e67d..06246a209 100644 --- a/util/syspolicy/source/policy_reader_test.go +++ b/util/syspolicy/source/policy_reader_test.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/util/must" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -72,7 +73,7 @@ func TestReaderLifecycle(t *testing.T) { initWant: setting.NewSnapshot(nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), addStrings: []TestSetting[string]{TestSettingOf("StringValue", "S1")}, addStringLists: []TestSetting[[]string]{TestSettingOf("StringListValue", []string{"S1", "S2", "S3"})}, - newWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + newWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "StringValue": setting.RawItemWith("S1", nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "StringListValue": setting.RawItemWith([]string{"S1", "S2", "S3"}, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, setting.NewNamedOrigin("Test", setting.DeviceScope)), @@ -136,7 +137,7 @@ func TestReaderLifecycle(t *testing.T) { TestSettingOf("PreferenceOptionValue", "always"), TestSettingOf("VisibilityValue", "show"), }, - initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue": setting.RawItemWith(must.Get(time.ParseDuration("2h30m")), nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "PreferenceOptionValue": setting.RawItemWith(setting.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), @@ -165,7 +166,7 @@ func TestReaderLifecycle(t *testing.T) { initUInt64s: []TestSetting[uint64]{ TestSettingOf[uint64]("VisibilityValue", 42), // type mismatch }, - initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue1": setting.RawItemWith(nil, setting.NewErrorText("time: invalid duration \"soon\""), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "DurationValue2": setting.RawItemWith(nil, setting.NewErrorText("bang!"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "PreferenceOptionValue": setting.RawItemWith(setting.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), @@ -277,7 +278,7 @@ func TestReadingSession(t *testing.T) { t.Fatalf("the session was closed prematurely") } - want := setting.NewSnapshot(map[setting.Key]setting.RawItem{ + want := setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "StringValue": setting.RawItemWith("S1", nil, origin), }, origin) if got := session.GetSettings(); !got.Equal(want) { diff --git a/util/syspolicy/source/policy_source.go b/util/syspolicy/source/policy_source.go index 7f2821b59..c4774217c 100644 --- a/util/syspolicy/source/policy_source.go +++ b/util/syspolicy/source/policy_source.go @@ -13,6 +13,7 @@ import ( "io" "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -31,19 +32,19 @@ type Store interface { // ReadString returns the value of a [setting.StringValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadString(key setting.Key) (string, error) + ReadString(key pkey.Key) (string, error) // ReadUInt64 returns the value of a [setting.IntegerValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadUInt64(key setting.Key) (uint64, error) + ReadUInt64(key pkey.Key) (uint64, error) // ReadBoolean returns the value of a [setting.BooleanValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadBoolean(key setting.Key) (bool, error) + ReadBoolean(key pkey.Key) (bool, error) // ReadStringArray returns the value of a [setting.StringListValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadStringArray(key setting.Key) ([]string, error) + ReadStringArray(key pkey.Key) ([]string, error) } // Lockable is an optional interface that [Store] implementations may support. diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go index 621701e84..f97b17f3a 100644 --- a/util/syspolicy/source/policy_store_windows.go +++ b/util/syspolicy/source/policy_store_windows.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows/registry" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil/gp" ) @@ -251,7 +252,7 @@ func (ps *PlatformPolicyStore) onChange() { // ReadString retrieves a string policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err error) { +func (ps *PlatformPolicyStore) ReadString(key pkey.Key) (val string, err error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (string, error) { val, _, err := key.GetStringValue(valueName) @@ -261,7 +262,7 @@ func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err erro // ReadUInt64 retrieves an integer policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { +func (ps *PlatformPolicyStore) ReadUInt64(key pkey.Key) (uint64, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (uint64, error) { val, _, err := key.GetIntegerValue(valueName) @@ -271,7 +272,7 @@ func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { // ReadBoolean retrieves a boolean policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { +func (ps *PlatformPolicyStore) ReadBoolean(key pkey.Key) (bool, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (bool, error) { val, _, err := key.GetIntegerValue(valueName) @@ -283,8 +284,8 @@ func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadString retrieves a multi-string policy with the specified key. -// It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { +// It returns [pkey.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadStringArray(key pkey.Key) ([]string, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) ([]string, error) { val, _, err := key.GetStringsValue(valueName) @@ -322,25 +323,25 @@ func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error }) } -// splitSettingKey extracts the registry key name and value name from a [setting.Key]. -// The [setting.Key] format allows grouping settings into nested categories using one -// or more [setting.KeyPathSeparator]s in the path. How individual policy settings are +// splitSettingKey extracts the registry key name and value name from a [pkey.Key]. +// The [pkey.Key] format allows grouping settings into nested categories using one +// or more [pkey.KeyPathSeparator]s in the path. How individual policy settings are // stored is an implementation detail of each [Store]. In the [PlatformPolicyStore] // for Windows, we map nested policy categories onto the Registry key hierarchy. -// The last component after a [setting.KeyPathSeparator] is treated as the value name, +// The last component after a [pkey.KeyPathSeparator] is treated as the value name, // while everything preceding it is considered a subpath (relative to the {HKLM,HKCU}\Software\Policies\Tailscale key). -// If there are no [setting.KeyPathSeparator]s in the key, the policy setting value +// If there are no [pkey.KeyPathSeparator]s in the key, the policy setting value // is meant to be stored directly under {HKLM,HKCU}\Software\Policies\Tailscale. -func splitSettingKey(key setting.Key) (path, valueName string) { - if idx := strings.LastIndexByte(string(key), setting.KeyPathSeparator); idx != -1 { - path = strings.ReplaceAll(string(key[:idx]), string(setting.KeyPathSeparator), `\`) +func splitSettingKey(key pkey.Key) (path, valueName string) { + if idx := strings.LastIndexByte(string(key), pkey.KeyPathSeparator); idx != -1 { + path = strings.ReplaceAll(string(key[:idx]), string(pkey.KeyPathSeparator), `\`) valueName = string(key[idx+1:]) return path, valueName } return "", string(key) } -func getPolicyValue[T any](ps *PlatformPolicyStore, key setting.Key, getter registryValueGetter[T]) (T, error) { +func getPolicyValue[T any](ps *PlatformPolicyStore, key pkey.Key, getter registryValueGetter[T]) (T, error) { var zero T ps.mu.Lock() diff --git a/util/syspolicy/source/policy_store_windows_test.go b/util/syspolicy/source/policy_store_windows_test.go index 33f85dc0b..4ab1da805 100644 --- a/util/syspolicy/source/policy_store_windows_test.go +++ b/util/syspolicy/source/policy_store_windows_test.go @@ -19,6 +19,7 @@ import ( "tailscale.com/tstest" "tailscale.com/util/cibuild" "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" @@ -31,7 +32,7 @@ import ( type subkeyStrings []string type testPolicyValue struct { - name setting.Key + name pkey.Key value any } @@ -100,7 +101,7 @@ func TestReadPolicyStore(t *testing.T) { t.Skipf("test requires running as elevated user") } tests := []struct { - name setting.Key + name pkey.Key newValue any legacyValue any want any @@ -269,7 +270,7 @@ func TestPolicyStoreChangeNotifications(t *testing.T) { func TestSplitSettingKey(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key wantPath string wantValue string }{ diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index efaf4cd5a..ddec9efbb 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -12,6 +12,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) @@ -31,7 +32,7 @@ type TestValueType interface { // TestSetting is a policy setting in a [TestStore]. type TestSetting[T TestValueType] struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Error is the error to be returned by the [TestStore] when reading // a policy setting with the specified key. Error error @@ -43,20 +44,20 @@ type TestSetting[T TestValueType] struct { // TestSettingOf returns a [TestSetting] representing a policy setting // configured with the specified key and value. -func TestSettingOf[T TestValueType](key setting.Key, value T) TestSetting[T] { +func TestSettingOf[T TestValueType](key pkey.Key, value T) TestSetting[T] { return TestSetting[T]{Key: key, Value: value} } // TestSettingWithError returns a [TestSetting] representing a policy setting // with the specified key and error. -func TestSettingWithError[T TestValueType](key setting.Key, err error) TestSetting[T] { +func TestSettingWithError[T TestValueType](key pkey.Key, err error) TestSetting[T] { return TestSetting[T]{Key: key, Error: err} } // testReadOperation describes a single policy setting read operation. type testReadOperation struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Type is a value type of a read operation. // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] Type setting.Type @@ -65,7 +66,7 @@ type testReadOperation struct { // TestExpectedReads is the number of read operations with the specified details. type TestExpectedReads struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Type is a value type of a read operation. // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] Type setting.Type @@ -87,8 +88,8 @@ type TestStore struct { storeLockCount atomic.Int32 mu sync.RWMutex - suspendCount int // change callback are suspended if > 0 - mr, mw map[setting.Key]any // maps for reading and writing; they're the same unless the store is suspended. + suspendCount int // change callback are suspended if > 0 + mr, mw map[pkey.Key]any // maps for reading and writing; they're the same unless the store is suspended. cbs set.HandleSet[func()] closed bool @@ -99,7 +100,7 @@ type TestStore struct { // NewTestStore returns a new [TestStore]. // The tb will be used to report coding errors detected by the [TestStore]. func NewTestStore(tb testenv.TB) *TestStore { - m := make(map[setting.Key]any) + m := make(map[pkey.Key]any) store := &TestStore{ tb: tb, done: make(chan struct{}), @@ -162,7 +163,7 @@ func (s *TestStore) IsEmpty() bool { } // ReadString implements [Store]. -func (s *TestStore) ReadString(key setting.Key) (string, error) { +func (s *TestStore) ReadString(key pkey.Key) (string, error) { defer s.recordRead(key, setting.StringValue) s.mu.RLock() defer s.mu.RUnlock() @@ -181,7 +182,7 @@ func (s *TestStore) ReadString(key setting.Key) (string, error) { } // ReadUInt64 implements [Store]. -func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s *TestStore) ReadUInt64(key pkey.Key) (uint64, error) { defer s.recordRead(key, setting.IntegerValue) s.mu.RLock() defer s.mu.RUnlock() @@ -200,7 +201,7 @@ func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { } // ReadBoolean implements [Store]. -func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { +func (s *TestStore) ReadBoolean(key pkey.Key) (bool, error) { defer s.recordRead(key, setting.BooleanValue) s.mu.RLock() defer s.mu.RUnlock() @@ -219,7 +220,7 @@ func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadStringArray implements [Store]. -func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s *TestStore) ReadStringArray(key pkey.Key) ([]string, error) { defer s.recordRead(key, setting.StringListValue) s.mu.RLock() defer s.mu.RUnlock() @@ -237,7 +238,7 @@ func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { return slice, nil } -func (s *TestStore) recordRead(key setting.Key, typ setting.Type) { +func (s *TestStore) recordRead(key pkey.Key, typ setting.Type) { s.readsMu.Lock() op := testReadOperation{key, typ} num := s.reads[op] @@ -399,7 +400,7 @@ func (s *TestStore) SetStringLists(settings ...TestSetting[[]string]) { } // Delete deletes the specified settings from s. -func (s *TestStore) Delete(keys ...setting.Key) { +func (s *TestStore) Delete(keys ...pkey.Key) { s.storeLock.Lock() for _, key := range keys { s.mu.Lock() diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 6555a58ac..0ac1d2517 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -17,6 +17,7 @@ import ( "time" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -58,7 +59,7 @@ func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicySc // HasAnyOf returns whether at least one of the specified policy settings is configured, // or an error if no keys are provided or the check fails. -func HasAnyOf(keys ...Key) (bool, error) { +func HasAnyOf(keys ...pkey.Key) (bool, error) { if len(keys) == 0 { return false, errors.New("at least one key must be specified") } @@ -82,25 +83,25 @@ func HasAnyOf(keys ...Key) (bool, error) { // GetString returns a string policy setting with the specified key, // or defaultValue if it does not exist. -func GetString(key Key, defaultValue string) (string, error) { +func GetString(key pkey.Key, defaultValue string) (string, error) { return getCurrentPolicySettingValue(key, defaultValue) } // GetUint64 returns a numeric policy setting with the specified key, // or defaultValue if it does not exist. -func GetUint64(key Key, defaultValue uint64) (uint64, error) { +func GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { return getCurrentPolicySettingValue(key, defaultValue) } // GetBoolean returns a boolean policy setting with the specified key, // or defaultValue if it does not exist. -func GetBoolean(key Key, defaultValue bool) (bool, error) { +func GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { return getCurrentPolicySettingValue(key, defaultValue) } // GetStringArray returns a multi-string policy setting with the specified key, // or defaultValue if it does not exist. -func GetStringArray(key Key, defaultValue []string) ([]string, error) { +func GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { return getCurrentPolicySettingValue(key, defaultValue) } @@ -110,14 +111,14 @@ func GetStringArray(key Key, defaultValue []string) ([]string, error) { // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not // present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { +func GetPreferenceOption(name pkey.Key) (setting.PreferenceOption, error) { return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) } // GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows // specifying a default value to return if the policy setting is not configured. // It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { +func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } @@ -126,7 +127,7 @@ func GetPreferenceOptionOrDefault(name Key, defaultValue setting.PreferenceOptio // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name Key) (setting.Visibility, error) { +func GetVisibility(name pkey.Key) (setting.Visibility, error) { return getCurrentPolicySettingValue(name, setting.VisibleByPolicy) } @@ -135,7 +136,7 @@ func GetVisibility(name Key) (setting.Visibility, error) { // action. The registry value should be a string that time.ParseDuration // understands. If the registry value is "" or can not be processed, // defaultValue is returned instead. -func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) { +func GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { d, err := getCurrentPolicySettingValue(name, defaultValue) if err != nil { return d, err @@ -160,7 +161,7 @@ func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), er // specified by its key from the [rsop.Policy] of the [setting.DefaultScope]. It // returns def if the policy setting is not configured, or an error if it has // an error or could not be converted to the specified type T. -func getCurrentPolicySettingValue[T setting.ValueType](key Key, def T) (T, error) { +func getCurrentPolicySettingValue[T setting.ValueType](key pkey.Key, def T) (T, error) { effective, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { return def, err diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index fc01f3645..5e822a0b7 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -12,6 +12,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -22,7 +23,7 @@ var someOtherError = errors.New("error other than not found") func TestGetString(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error defaultValue string @@ -32,7 +33,7 @@ func TestGetString(t *testing.T) { }{ { name: "read existing value", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "hide", wantValue: "hide", wantMetrics: []metrics.TestState{ @@ -42,13 +43,13 @@ func TestGetString(t *testing.T) { }, { name: "read non-existing value", - key: EnableServerMode, + key: pkey.EnableServerMode, handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non-blank default", - key: EnableServerMode, + key: pkey.EnableServerMode, handlerError: ErrNotConfigured, defaultValue: "test", wantValue: "test", @@ -56,7 +57,7 @@ func TestGetString(t *testing.T) { }, { name: "reading value returns other error", - key: NetworkDevicesVisibility, + key: pkey.NetworkDevicesVisibility, handlerError: someOtherError, wantError: someOtherError, wantMetrics: []metrics.TestState{ @@ -103,7 +104,7 @@ func TestGetString(t *testing.T) { func TestGetUint64(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue uint64 handlerError error defaultValue uint64 @@ -112,27 +113,27 @@ func TestGetUint64(t *testing.T) { }{ { name: "read existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: 1, wantValue: 1, }, { name: "read non-existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: 0, handlerError: ErrNotConfigured, wantValue: 0, }, { name: "read non-existing value, non-zero default", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, defaultValue: 2, handlerError: ErrNotConfigured, wantValue: 2, }, { name: "reading value returns other error", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerError: someOtherError, wantError: someOtherError, }, @@ -169,7 +170,7 @@ func TestGetUint64(t *testing.T) { func TestGetBoolean(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue bool handlerError error defaultValue bool @@ -179,7 +180,7 @@ func TestGetBoolean(t *testing.T) { }{ { name: "read existing value", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerValue: true, wantValue: true, wantMetrics: []metrics.TestState{ @@ -189,14 +190,14 @@ func TestGetBoolean(t *testing.T) { }, { name: "read non-existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: false, handlerError: ErrNotConfigured, wantValue: false, }, { name: "reading value returns other error", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerError: someOtherError, wantError: someOtherError, // expect error... defaultValue: true, @@ -245,7 +246,7 @@ func TestGetBoolean(t *testing.T) { func TestGetPreferenceOption(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error wantValue setting.PreferenceOption @@ -254,7 +255,7 @@ func TestGetPreferenceOption(t *testing.T) { }{ { name: "always by policy", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "always", wantValue: setting.AlwaysByPolicy, wantMetrics: []metrics.TestState{ @@ -264,7 +265,7 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "never by policy", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "never", wantValue: setting.NeverByPolicy, wantMetrics: []metrics.TestState{ @@ -274,7 +275,7 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "use default", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "", wantValue: setting.ShowChoiceByPolicy, wantMetrics: []metrics.TestState{ @@ -284,13 +285,13 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "read non-existing value", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerError: ErrNotConfigured, wantValue: setting.ShowChoiceByPolicy, }, { name: "other error is returned", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerError: someOtherError, wantValue: setting.ShowChoiceByPolicy, wantError: someOtherError, @@ -338,7 +339,7 @@ func TestGetPreferenceOption(t *testing.T) { func TestGetVisibility(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error wantValue setting.Visibility @@ -347,7 +348,7 @@ func TestGetVisibility(t *testing.T) { }{ { name: "hidden by policy", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "hide", wantValue: setting.HiddenByPolicy, wantMetrics: []metrics.TestState{ @@ -357,7 +358,7 @@ func TestGetVisibility(t *testing.T) { }, { name: "visibility default", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", wantValue: setting.VisibleByPolicy, wantMetrics: []metrics.TestState{ @@ -367,14 +368,14 @@ func TestGetVisibility(t *testing.T) { }, { name: "read non-existing value", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: ErrNotConfigured, wantValue: setting.VisibleByPolicy, }, { name: "other error is returned", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: someOtherError, wantValue: setting.VisibleByPolicy, @@ -423,7 +424,7 @@ func TestGetVisibility(t *testing.T) { func TestGetDuration(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error defaultValue time.Duration @@ -433,7 +434,7 @@ func TestGetDuration(t *testing.T) { }{ { name: "read existing value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerValue: "2h", wantValue: 2 * time.Hour, defaultValue: 24 * time.Hour, @@ -444,7 +445,7 @@ func TestGetDuration(t *testing.T) { }, { name: "invalid duration value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerValue: "-20", wantValue: 24 * time.Hour, wantError: errors.New(`time: missing unit in duration "-20"`), @@ -456,21 +457,21 @@ func TestGetDuration(t *testing.T) { }, { name: "read non-existing value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: ErrNotConfigured, wantValue: 24 * time.Hour, defaultValue: 24 * time.Hour, }, { name: "read non-existing value different default", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: ErrNotConfigured, wantValue: 0 * time.Second, defaultValue: 0 * time.Second, }, { name: "other error is returned", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: someOtherError, wantValue: 24 * time.Hour, wantError: someOtherError, @@ -519,7 +520,7 @@ func TestGetDuration(t *testing.T) { func TestGetStringArray(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue []string handlerError error defaultValue []string @@ -529,7 +530,7 @@ func TestGetStringArray(t *testing.T) { }{ { name: "read existing value", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, wantMetrics: []metrics.TestState{ @@ -539,13 +540,13 @@ func TestGetStringArray(t *testing.T) { }, { name: "read non-existing value", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non nil default", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: ErrNotConfigured, defaultValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, @@ -553,7 +554,7 @@ func TestGetStringArray(t *testing.T) { }, { name: "reading value returns other error", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: someOtherError, wantError: someOtherError, wantMetrics: []metrics.TestState{ @@ -607,11 +608,11 @@ func BenchmarkGetString(b *testing.B) { RegisterWellKnownSettingsForTest(b) wantControlURL := "https://login.tailscale.com" - registerSingleSettingStoreForTest(b, source.TestSettingOf(ControlURL, wantControlURL)) + registerSingleSettingStoreForTest(b, source.TestSettingOf(pkey.ControlURL, wantControlURL)) b.ResetTimer() for i := 0; i < b.N; i++ { - gotControlURL, _ := GetString(ControlURL, "https://controlplane.tailscale.com") + gotControlURL, _ := GetString(pkey.ControlURL, "https://controlplane.tailscale.com") if gotControlURL != wantControlURL { b.Fatalf("got %v; want %v", gotControlURL, wantControlURL) } From 921d53904c29761649057bdd2610cd6733dd030e Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 1 Sep 2025 13:02:01 +0100 Subject: [PATCH 1249/1708] CODE_OF_CONDUCT.md: fix duplicate entry (#16814) Remove duplicate entry not present on approved wording Updates #cleanup Signed-off-by: Erisa A --- CODE_OF_CONDUCT.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 3d33bba98..a5877cb11 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -32,8 +32,6 @@ Examples of unacceptable behavior include without limitation: * Distributing or promoting malware; * Other conduct which could reasonably be considered inappropriate in a professional setting. -* Please also see the Tailscale Acceptable Use Policy, available at - [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). From d05e6dc09e7a36e2b6082ce259e33eb3eecd0c0c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 08:04:17 -0700 Subject: [PATCH 1250/1708] util/syspolicy/policyclient: add policyclient.Client interface, start plumbing This is step 2 of ~4, breaking up #14720 into reviewable chunks, with the aim to make syspolicy be a build-time configurable feature. Step 1 was #16984. In this second step, the util/syspolicy/policyclient package is added with the policyclient.Client interface. This is the interface that's always present (regardless of build tags), and is what code around the tree uses to ask syspolicy/MDM questions. There are two implementations of policyclient.Client for now: 1) NoPolicyClient, which only returns default values. 2) the unexported, temporary 'globalSyspolicy', which is implemented in terms of the global functions we wish to later eliminate. This then starts to plumb around the policyclient.Client to most callers. Future changes will plumb it more. When the last of the global func callers are gone, then we can unexport the global functions and make a proper policyclient.Client type and constructor in the syspolicy package, removing the globalSyspolicy impl out of tsd. The final change will sprinkle build tags in a few more places and lock it in with dependency tests to make sure the dependencies don't later creep back in. Updates #16998 Updates #12614 Change-Id: Ib2c93d15c15c1f2b981464099177cd492d50391c Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 5 +- control/controlclient/direct.go | 10 ++- control/controlclient/sign_supported.go | 10 +-- control/controlclient/sign_unsupported.go | 3 +- ipn/ipnlocal/c2n.go | 2 +- ipn/ipnlocal/local.go | 9 ++- net/dns/manager_windows.go | 4 +- posture/serialnumber_macos.go | 3 +- posture/serialnumber_macos_test.go | 3 +- posture/serialnumber_notmacos.go | 3 +- posture/serialnumber_notmacos_test.go | 3 +- posture/serialnumber_stub.go | 3 +- posture/serialnumber_syspolicy.go | 6 +- posture/serialnumber_test.go | 3 +- tsd/syspolicy_off.go | 12 ++++ tsd/syspolicy_on.go | 41 ++++++++++++ tsd/tsd.go | 7 +++ tsnet/depaware.txt | 5 +- util/syspolicy/policyclient/policyclient.go | 66 ++++++++++++++++++++ util/syspolicy/rsop/change_callbacks.go | 3 +- util/syspolicy/rsop/resultant_policy_test.go | 13 ++-- 25 files changed, 184 insertions(+), 36 deletions(-) create mode 100644 tsd/syspolicy_off.go create mode 100644 tsd/syspolicy_on.go create mode 100644 util/syspolicy/policyclient/policyclient.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index ccea25a8a..0597d5d1f 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -175,6 +175,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index a0214575b..40c8abb08 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -951,11 +951,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/ipn+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7f09be33f..cf1691c71 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -196,6 +196,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 46efa5b21..f08601f81 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -433,6 +433,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f1e22efbf..743492904 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -380,12 +380,13 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/ipn+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ - tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index cee938779..47283a673 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -6,6 +6,7 @@ package controlclient import ( "bufio" "bytes" + "cmp" "context" "encoding/binary" "encoding/json" @@ -53,8 +54,8 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" "tailscale.com/util/singleflight" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" @@ -77,6 +78,7 @@ type Direct struct { debugFlags []string skipIPForwardingCheck bool pinger Pinger + polc policyclient.Client // always non-nil popBrowser func(url string) // or nil c2nHandler http.Handler // or nil onClientVersion func(*tailcfg.ClientVersion) // or nil @@ -125,6 +127,7 @@ type Options struct { Clock tstime.Clock Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc DiscoPublicKey key.DiscoPublic + PolicyClient policyclient.Client // or nil for none Logf logger.Logf HTTPTestClient *http.Client // optional HTTP client to use (for tests only) NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) @@ -299,6 +302,7 @@ func NewDirect(opts Options) (*Direct, error) { health: opts.HealthTracker, skipIPForwardingCheck: opts.SkipIPForwardingCheck, pinger: opts.Pinger, + polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), popBrowser: opts.PopBrowserURL, onClientVersion: opts.OnClientVersion, onTailnetDefaultAutoUpdate: opts.OnTailnetDefaultAutoUpdate, @@ -617,7 +621,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new return regen, opt.URL, nil, err } - tailnet, err := syspolicy.GetString(pkey.Tailnet, "") + tailnet, err := c.polc.GetString(pkey.Tailnet, "") if err != nil { c.logf("unable to provide Tailnet field in register request. err: %v", err) } @@ -647,7 +651,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new AuthKey: authKey, } } - err = signRegisterRequest(&request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public()) + err = signRegisterRequest(c.polc, &request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public()) if err != nil { // If signing failed, clear all related fields request.SignatureType = tailcfg.SignatureNone diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index fab7cd16b..439e6d36b 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -18,8 +18,8 @@ import ( "github.com/tailscale/certstore" "tailscale.com/tailcfg" "tailscale.com/types/key" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // getMachineCertificateSubject returns the exact name of a Subject that needs @@ -31,8 +31,8 @@ import ( // each RegisterRequest will be unsigned. // // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" -func getMachineCertificateSubject() string { - machineCertSubject, _ := syspolicy.GetString(pkey.MachineCertificateSubject, "") +func getMachineCertificateSubject(polc policyclient.Client) string { + machineCertSubject, _ := polc.GetString(pkey.MachineCertificateSubject, "") return machineCertSubject } @@ -137,7 +137,7 @@ func findIdentity(subject string, st certstore.Store) (certstore.Identity, []*x5 // using that identity's public key. In addition to the signature, the full // certificate chain is included so that the control server can validate the // certificate from a copy of the root CA's certificate. -func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) { +func signRegisterRequest(polc policyclient.Client, req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) { defer func() { if err != nil { err = fmt.Errorf("signRegisterRequest: %w", err) @@ -148,7 +148,7 @@ func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverP return errBadRequest } - machineCertificateSubject := getMachineCertificateSubject() + machineCertificateSubject := getMachineCertificateSubject(polc) if machineCertificateSubject == "" { return errCertificateNotConfigured } diff --git a/control/controlclient/sign_unsupported.go b/control/controlclient/sign_unsupported.go index 5e161dcbc..f6c4ddc62 100644 --- a/control/controlclient/sign_unsupported.go +++ b/control/controlclient/sign_unsupported.go @@ -8,9 +8,10 @@ package controlclient import ( "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/syspolicy/policyclient" ) // signRegisterRequest on non-supported platforms always returns errNoCertStore. -func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error { +func signRegisterRequest(polc policyclient.Client, req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error { return errNoCertStore } diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 8c3bf7b26..b1a780cc1 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -353,7 +353,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http } if choice.ShouldEnable(b.Prefs().PostureChecking()) { - res.SerialNumbers, err = posture.GetSerialNumbers(b.logf) + res.SerialNumbers, err = posture.GetSerialNumbers(b.polc, b.logf) if err != nil { b.logf("c2n: GetSerialNumbers returned error: %v", err) } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bcfb99b09..61bde31e4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -109,7 +109,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" @@ -203,7 +203,8 @@ type LocalBackend struct { keyLogf logger.Logf // for printing list of peers on change statsLogf logger.Logf // for printing peers stats on change sys *tsd.System - health *health.Tracker // always non-nil + health *health.Tracker // always non-nil + polc policyclient.Client // always non-nil metrics metrics e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys @@ -515,6 +516,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, + polc: sys.PolicyClientOrDefault(), health: sys.HealthTracker(), metrics: m, e: e, @@ -1970,7 +1972,7 @@ func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. -func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { +func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { if policy.HasChangedAnyOf(pkey.AlwaysOn, pkey.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might @@ -2468,6 +2470,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { DiscoPublicKey: discoPublic, DebugFlags: debugFlags, HealthTracker: b.health, + PolicyClient: b.sys.PolicyClientOrDefault(), Pinger: b, PopBrowserURL: b.tellClientToBrowseToURL, OnClientVersion: b.onClientVersion, diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index d1cec2a00..901ab6dd0 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -31,7 +31,7 @@ import ( "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" ) @@ -508,7 +508,7 @@ func (m *windowsManager) Close() error { // sysPolicyChanged is a callback triggered by [syspolicy] when it detects // a change in one or more syspolicy settings. -func (m *windowsManager) sysPolicyChanged(policy *rsop.PolicyChange) { +func (m *windowsManager) sysPolicyChanged(policy policyclient.PolicyChange) { if policy.HasChanged(pkey.EnableDNSRegistration) { m.reconfigureDNSRegistration() } diff --git a/posture/serialnumber_macos.go b/posture/serialnumber_macos.go index 48355d313..18c929107 100644 --- a/posture/serialnumber_macos.go +++ b/posture/serialnumber_macos.go @@ -59,10 +59,11 @@ import ( "strings" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumber returns the platform serial sumber as reported by IOKit. -func GetSerialNumbers(_ logger.Logf) ([]string, error) { +func GetSerialNumbers(policyclient.Client, logger.Logf) ([]string, error) { csn := C.getSerialNumber() serialNumber := C.GoString(csn) diff --git a/posture/serialnumber_macos_test.go b/posture/serialnumber_macos_test.go index 9f0ce1c6a..9d9b9f578 100644 --- a/posture/serialnumber_macos_test.go +++ b/posture/serialnumber_macos_test.go @@ -11,6 +11,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/cibuild" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumberMac(t *testing.T) { @@ -20,7 +21,7 @@ func TestGetSerialNumberMac(t *testing.T) { t.Skip() } - sns, err := GetSerialNumbers(logger.Discard) + sns, err := GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) if err != nil { t.Fatalf("failed to get serial number: %s", err) } diff --git a/posture/serialnumber_notmacos.go b/posture/serialnumber_notmacos.go index 8b91738b0..132fa08f6 100644 --- a/posture/serialnumber_notmacos.go +++ b/posture/serialnumber_notmacos.go @@ -13,6 +13,7 @@ import ( "github.com/digitalocean/go-smbios/smbios" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // getByteFromSmbiosStructure retrieves a 8-bit unsigned integer at the given specOffset. @@ -71,7 +72,7 @@ func init() { numOfTables = len(validTables) } -func GetSerialNumbers(logf logger.Logf) ([]string, error) { +func GetSerialNumbers(polc policyclient.Client, logf logger.Logf) ([]string, error) { // Find SMBIOS data in operating system-specific location. rc, _, err := smbios.Stream() if err != nil { diff --git a/posture/serialnumber_notmacos_test.go b/posture/serialnumber_notmacos_test.go index f2a15e037..da5aada85 100644 --- a/posture/serialnumber_notmacos_test.go +++ b/posture/serialnumber_notmacos_test.go @@ -12,6 +12,7 @@ import ( "testing" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumberNotMac(t *testing.T) { @@ -21,7 +22,7 @@ func TestGetSerialNumberNotMac(t *testing.T) { // Comment out skip for local testing. t.Skip() - sns, err := GetSerialNumbers(logger.Discard) + sns, err := GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) if err != nil { t.Fatalf("failed to get serial number: %s", err) } diff --git a/posture/serialnumber_stub.go b/posture/serialnumber_stub.go index 4cc84fa13..854a0014b 100644 --- a/posture/serialnumber_stub.go +++ b/posture/serialnumber_stub.go @@ -14,9 +14,10 @@ import ( "errors" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumber returns client machine serial number(s). -func GetSerialNumbers(_ logger.Logf) ([]string, error) { +func GetSerialNumbers(polc policyclient.Client, _ logger.Logf) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/posture/serialnumber_syspolicy.go b/posture/serialnumber_syspolicy.go index 5123d561d..64a154a2c 100644 --- a/posture/serialnumber_syspolicy.go +++ b/posture/serialnumber_syspolicy.go @@ -9,15 +9,15 @@ import ( "fmt" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumbers returns the serial number of the device as reported by an // MDM solution. It requires configuration via the DeviceSerialNumber system policy. // This is the only way to gather serial numbers on iOS, tvOS and Android. -func GetSerialNumbers(_ logger.Logf) ([]string, error) { - s, err := syspolicy.GetString(pkey.DeviceSerialNumber, "") +func GetSerialNumbers(polc policyclient.Client, _ logger.Logf) ([]string, error) { + s, err := polc.GetString(pkey.DeviceSerialNumber, "") if err != nil { return nil, fmt.Errorf("failed to get serial number from MDM: %v", err) } diff --git a/posture/serialnumber_test.go b/posture/serialnumber_test.go index fac4392fa..6db3651e2 100644 --- a/posture/serialnumber_test.go +++ b/posture/serialnumber_test.go @@ -7,10 +7,11 @@ import ( "testing" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumber(t *testing.T) { // ensure GetSerialNumbers is implemented // or covered by a stub on a given platform. - _, _ = GetSerialNumbers(logger.Discard) + _, _ = GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) } diff --git a/tsd/syspolicy_off.go b/tsd/syspolicy_off.go new file mode 100644 index 000000000..221b8f223 --- /dev/null +++ b/tsd/syspolicy_off.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_syspolicy + +package tsd + +import ( + "tailscale.com/util/syspolicy/policyclient" +) + +func getPolicyClient() policyclient.Client { return policyclient.NoPolicyClient{} } diff --git a/tsd/syspolicy_on.go b/tsd/syspolicy_on.go new file mode 100644 index 000000000..8d7762bd9 --- /dev/null +++ b/tsd/syspolicy_on.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package tsd + +import ( + "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" +) + +func getPolicyClient() policyclient.Client { return globalSyspolicy{} } + +// globalSyspolicy implements [policyclient.Client] using the syspolicy global +// functions and global registrations. +// +// TODO: de-global-ify. This implementation using the old global functions +// is an intermediate stage while changing policyclient to be modular. +type globalSyspolicy struct{} + +func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return syspolicy.GetBoolean(key, defaultValue) +} + +func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { + return syspolicy.GetString(key, defaultValue) +} + +func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return syspolicy.GetStringArray(key, defaultValue) +} + +func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { + syspolicy.SetDebugLoggingEnabled(enabled) +} + +func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { + return syspolicy.RegisterChangeCallback(cb) +} diff --git a/tsd/tsd.go b/tsd/tsd.go index ccd804f81..b7194a3d7 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -33,6 +33,7 @@ import ( "tailscale.com/proxymap" "tailscale.com/types/netmap" "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/usermetric" "tailscale.com/wgengine" "tailscale.com/wgengine/magicsock" @@ -165,6 +166,12 @@ func (s *System) UserMetricsRegistry() *usermetric.Registry { return &s.userMetricsRegistry } +// PolicyClientOrDefault returns the policy client if set or a no-op default +// otherwise. It always returns a non-nil value. +func (s *System) PolicyClientOrDefault() policyclient.Client { + return getPolicyClient() +} + // SubSystem represents some subsystem of the Tailscale node daemon. // // A subsystem can be set to a value, and then later retrieved. A subsystem diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index bdf90c9a8..f4b0dc775 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -375,12 +375,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/ipn+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ - tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go new file mode 100644 index 000000000..0b15599c1 --- /dev/null +++ b/util/syspolicy/policyclient/policyclient.go @@ -0,0 +1,66 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package policyclient contains the minimal syspolicy interface as needed by +// client code using syspolicy. It's the part that's always linked in, even if the rest +// of syspolicy is omitted from the build. +package policyclient + +import "tailscale.com/util/syspolicy/pkey" + +// Client is the interface between code making questions about the system policy +// and the actual implementation. +type Client interface { + // GetString returns a string policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetString(key pkey.Key, defaultValue string) (string, error) + + // GetStringArray returns a string array policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) + + // GetBoolean returns a boolean policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetBoolean(key pkey.Key, defaultValue bool) (bool, error) + + // SetDebugLoggingEnabled enables or disables debug logging for the policy client. + SetDebugLoggingEnabled(enabled bool) + + // RegisterChangeCallback registers a callback function that will be called + // whenever a policy change is detected. It returns a function to unregister + // the callback and an error if the registration fails. + RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) +} + +// PolicyChange is the interface representing a change in policy settings. +type PolicyChange interface { + // HasChanged reports whether the policy setting identified by the given key + // has changed. + HasChanged(pkey.Key) bool + + // HasChangedAnyOf reports whether any of the provided policy settings + // changed in this change. + HasChangedAnyOf(keys ...pkey.Key) bool +} + +// NoPolicyClient is a no-op implementation of [Client] that only +// returns default values. +type NoPolicyClient struct{} + +func (NoPolicyClient) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetString(key pkey.Key, defaultValue string) (string, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return defaultValue, nil +} + +func (NoPolicyClient) SetDebugLoggingEnabled(enabled bool) {} + +func (NoPolicyClient) RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) { + return func() {}, nil +} diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 59dba07c6..4e71f683a 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -12,6 +12,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" ) @@ -21,7 +22,7 @@ type Change[T any] struct { } // PolicyChangeCallback is a function called whenever a policy changes. -type PolicyChangeCallback func(*PolicyChange) +type PolicyChangeCallback func(policyclient.PolicyChange) // PolicyChange describes a policy change. type PolicyChange struct { diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index 2da46a8ca..3ff142119 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -16,6 +16,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tstest" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -602,8 +603,8 @@ func TestChangePolicySetting(t *testing.T) { } // Subscribe to the policy change callback... - policyChanged := make(chan *PolicyChange) - unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + policyChanged := make(chan policyclient.PolicyChange) + unregister := policy.RegisterChangeCallback(func(pc policyclient.PolicyChange) { policyChanged <- pc }) t.Cleanup(unregister) // ...make the change, and measure the time between initiating the change @@ -611,7 +612,7 @@ func TestChangePolicySetting(t *testing.T) { start := time.Now() const wantValueA = "TestValueA" store.SetStrings(source.TestSettingOf(settingA.Key(), wantValueA)) - change := <-policyChanged + change := (<-policyChanged).(*PolicyChange) gotDelay := time.Since(start) // Ensure there is at least a [policyReloadMinDelay] delay between @@ -653,7 +654,7 @@ func TestChangePolicySetting(t *testing.T) { // The callback should be invoked only once, even though the policy setting // has changed N times. - change = <-policyChanged + change = (<-policyChanged).(*PolicyChange) gotDelay = time.Since(start) gotCallbacks := 1 drain: @@ -853,8 +854,8 @@ func TestReplacePolicySource(t *testing.T) { } // Subscribe to the policy change callback. - policyChanged := make(chan *PolicyChange, 1) - unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + policyChanged := make(chan policyclient.PolicyChange, 1) + unregister := policy.RegisterChangeCallback(func(pc policyclient.PolicyChange) { policyChanged <- pc }) t.Cleanup(unregister) // Now, let's replace the initial store with the new store. From 12ad630128846919908fbd0b08580864b86bb913 Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 2 Sep 2025 13:10:03 +0100 Subject: [PATCH 1251/1708] cmd/k8s-operator: allow specifying replicas for connectors (#16721) This commit adds a `replicas` field to the `Connector` custom resource that allows users to specify the number of desired replicas deployed for their connectors. This allows users to deploy exit nodes, subnet routers and app connectors in a highly available fashion. Fixes #14020 Signed-off-by: David Bond --- cmd/k8s-operator/connector.go | 41 +- cmd/k8s-operator/connector_test.go | 121 +++++- .../deploy/crds/tailscale.com_connectors.yaml | 47 ++- .../deploy/manifests/operator.yaml | 47 ++- cmd/k8s-operator/ingress.go | 29 +- cmd/k8s-operator/ingress_test.go | 3 +- cmd/k8s-operator/operator_test.go | 13 + cmd/k8s-operator/sts.go | 354 +++++++++++------- cmd/k8s-operator/svc.go | 20 +- cmd/k8s-operator/testutils_test.go | 84 ++++- k8s-operator/api.md | 25 +- k8s-operator/apis/v1alpha1/types_connector.go | 41 +- .../apis/v1alpha1/zz_generated.deepcopy.go | 32 ++ 13 files changed, 660 insertions(+), 197 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 8406a1156..7fa311532 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -25,7 +25,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -176,6 +175,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge if cn.Spec.Hostname != "" { hostname = string(cn.Spec.Hostname) } + crl := childResourceLabels(cn.Name, a.tsnamespace, "connector") proxyClass := cn.Spec.ProxyClass @@ -188,10 +188,17 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } } + var replicas int32 = 1 + if cn.Spec.Replicas != nil { + replicas = *cn.Spec.Replicas + } + sts := &tailscaleSTSConfig{ + Replicas: replicas, ParentResourceName: cn.Name, ParentResourceUID: string(cn.UID), Hostname: hostname, + HostnamePrefix: string(cn.Spec.HostnamePrefix), ChildResourceLabels: crl, Tags: cn.Spec.Tags.Stringify(), Connector: &connector{ @@ -219,16 +226,19 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } else { a.exitNodes.Remove(cn.UID) } + if cn.Spec.SubnetRouter != nil { a.subnetRouters.Add(cn.GetUID()) } else { a.subnetRouters.Remove(cn.GetUID()) } + if cn.Spec.AppConnector != nil { a.appConnectors.Add(cn.GetUID()) } else { a.appConnectors.Remove(cn.GetUID()) } + a.mu.Unlock() gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len())) gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len())) @@ -244,21 +254,23 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge return err } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return err } - if dev == nil || dev.hostname == "" { - logger.Debugf("no Tailscale hostname known yet, waiting for Connector Pod to finish auth") - // No hostname yet. Wait for the connector pod to auth. - cn.Status.TailnetIPs = nil - cn.Status.Hostname = "" - return nil + cn.Status.Devices = make([]tsapi.ConnectorDevice, len(devices)) + for i, dev := range devices { + cn.Status.Devices[i] = tsapi.ConnectorDevice{ + Hostname: dev.hostname, + TailnetIPs: dev.ips, + } } - cn.Status.TailnetIPs = dev.ips - cn.Status.Hostname = dev.hostname + if len(cn.Status.Devices) > 0 { + cn.Status.Hostname = cn.Status.Devices[0].Hostname + cn.Status.TailnetIPs = cn.Status.Devices[0].TailnetIPs + } return nil } @@ -302,6 +314,15 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error { if (cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) && cn.Spec.AppConnector != nil { return errors.New("invalid spec: a Connector that is configured as an app connector must not be also configured as a subnet router or exit node") } + + // These two checks should be caught by the Connector schema validation. + if cn.Spec.Replicas != nil && *cn.Spec.Replicas > 1 && cn.Spec.Hostname != "" { + return errors.New("invalid spec: a Connector that is configured with multiple replicas cannot specify a hostname. Instead, use a hostnamePrefix") + } + if cn.Spec.HostnamePrefix != "" && cn.Spec.Hostname != "" { + return errors.New("invalid spec: a Connect cannot use both a hostname and hostname prefix") + } + if cn.Spec.AppConnector != nil { return validateAppConnector(cn.Spec.AppConnector) } diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index d5829c37f..afc7d2d6e 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -7,6 +7,8 @@ package main import ( "context" + "strconv" + "strings" "testing" "time" @@ -20,6 +22,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" + "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -36,6 +39,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.com/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -55,7 +59,8 @@ func TestConnector(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) cr := &ConnectorReconciler{ - Client: fc, + Client: fc, + recorder: record.NewFakeRecorder(10), ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -78,6 +83,7 @@ func TestConnector(t *testing.T) { isExitNode: true, subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -94,6 +100,10 @@ func TestConnector(t *testing.T) { cn.Status.IsExitNode = cn.Spec.ExitNode cn.Status.SubnetRoutes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify() cn.Status.Hostname = hostname + cn.Status.Devices = []tsapi.ConnectorDevice{{ + Hostname: hostname, + TailnetIPs: []string{"127.0.0.1", "::1"}, + }} cn.Status.TailnetIPs = []string{"127.0.0.1", "::1"} expectEqual(t, fc, cn, func(o *tsapi.Connector) { o.Status.Conditions = nil @@ -156,6 +166,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -174,6 +185,7 @@ func TestConnector(t *testing.T) { subnetRoutes: "10.40.0.0/14", hostname: "test-connector", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -217,9 +229,11 @@ func TestConnectorWithProxyClass(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, + ExitNode: true, }, } @@ -260,6 +274,7 @@ func TestConnectorWithProxyClass(t *testing.T) { isExitNode: true, subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -311,6 +326,7 @@ func TestConnectorWithAppConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), AppConnector: &tsapi.AppConnector{}, }, } @@ -340,7 +356,7 @@ func TestConnectorWithAppConnector(t *testing.T) { recorder: fr, } - // 1. Connector with app connnector is created and becomes ready + // 1. Connector with app connector is created and becomes ready expectReconciled(t, cr, "", "test") fullName, shortName := findGenName(t, fc, "", "test", "connector") opts := configOpts{ @@ -350,6 +366,7 @@ func TestConnectorWithAppConnector(t *testing.T) { hostname: "test-connector", app: kubetypes.AppConnector, isAppConnector: true, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -357,6 +374,7 @@ func TestConnectorWithAppConnector(t *testing.T) { cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") cn.Status.IsAppConnector = true + cn.Status.Devices = []tsapi.ConnectorDevice{} cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), Status: metav1.ConditionTrue, @@ -368,9 +386,9 @@ func TestConnectorWithAppConnector(t *testing.T) { // 2. Connector with invalid app connector routes has status set to invalid mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + conn.Spec.AppConnector.Routes = tsapi.Routes{"1.2.3.4/5"} }) - cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + cn.Spec.AppConnector.Routes = tsapi.Routes{"1.2.3.4/5"} expectReconciled(t, cr, "", "test") cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), @@ -383,9 +401,9 @@ func TestConnectorWithAppConnector(t *testing.T) { // 3. Connector with valid app connnector routes becomes ready mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + conn.Spec.AppConnector.Routes = tsapi.Routes{"10.88.2.21/32"} }) - cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + cn.Spec.AppConnector.Routes = tsapi.Routes{"10.88.2.21/32"} cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), Status: metav1.ConditionTrue, @@ -395,3 +413,94 @@ func TestConnectorWithAppConnector(t *testing.T) { }} expectReconciled(t, cr, "", "test") } + +func TestConnectorWithMultipleReplicas(t *testing.T) { + cn := &tsapi.Connector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: types.UID("1234-UID"), + }, + TypeMeta: metav1.TypeMeta{ + Kind: tsapi.ConnectorKind, + APIVersion: "tailscale.io/v1alpha1", + }, + Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](3), + AppConnector: &tsapi.AppConnector{}, + HostnamePrefix: "test-connector", + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(cn). + WithStatusSubresource(cn). + Build() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + fr := record.NewFakeRecorder(1) + cr := &ConnectorReconciler{ + Client: fc, + clock: cl, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + recorder: fr, + } + + // 1. Ensure that our connector resource is reconciled. + expectReconciled(t, cr, "", "test") + + // 2. Ensure we have a number of secrets matching the number of replicas. + names := findGenNames(t, fc, "", "test", "connector") + if int32(len(names)) != *cn.Spec.Replicas { + t.Fatalf("expected %d secrets, got %d", *cn.Spec.Replicas, len(names)) + } + + // 3. Ensure each device has the correct hostname prefix and ordinal suffix. + for i, name := range names { + expected := expectedSecret(t, fc, configOpts{ + secretName: name, + hostname: string(cn.Spec.HostnamePrefix) + "-" + strconv.Itoa(i), + isAppConnector: true, + parentType: "connector", + namespace: cr.tsnamespace, + }) + + expectEqual(t, fc, expected) + } + + // 4. Ensure the generated stateful set has the matching number of replicas + shortName := strings.TrimSuffix(names[0], "-0") + + var sts appsv1.StatefulSet + if err = fc.Get(t.Context(), types.NamespacedName{Namespace: "operator-ns", Name: shortName}, &sts); err != nil { + t.Fatalf("failed to get StatefulSet %q: %v", shortName, err) + } + + if sts.Spec.Replicas == nil { + t.Fatalf("actual StatefulSet %q does not have replicas set", shortName) + } + + if *sts.Spec.Replicas != *cn.Spec.Replicas { + t.Fatalf("expected %d replicas, got %d", *cn.Spec.Replicas, *sts.Spec.Replicas) + } + + // 5. We'll scale the connector down by 1 replica and make sure its secret is cleaned up + mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { + conn.Spec.Replicas = ptr.To[int32](2) + }) + expectReconciled(t, cr, "", "test") + names = findGenNames(t, fc, "", "test", "connector") + if len(names) != 2 { + t.Fatalf("expected 2 secrets, got %d", len(names)) + } +} diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index d645e3922..74d32d53d 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -115,9 +115,19 @@ spec: Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 - and 63 characters long. + and 63 characters long. This field should only be used when creating a connector + with an unspecified number of replicas, or a single replica. type: string pattern: ^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$ + hostnamePrefix: + description: |- + HostnamePrefix specifies the hostname prefix for each + replica. Each device will have the integer number + from its StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + type: string + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that @@ -125,6 +135,14 @@ spec: resources created for this Connector. If unset, the operator will create resources with the default configuration. type: string + replicas: + description: |- + Replicas specifies how many devices to create. Set this to enable + high availability for app connectors, subnet routers, or exit nodes. + https://tailscale.com/kb/1115/high-availability. Defaults to 1. + type: integer + format: int32 + minimum: 0 subnetRouter: description: |- SubnetRouter defines subnet routes that the Connector device should @@ -168,6 +186,10 @@ spec: message: A Connector needs to have at least one of exit node, subnet router or app connector configured. - rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. + - rule: '!(has(self.hostname) && has(self.replicas) && self.replicas > 1)' + message: The hostname field cannot be specified when replicas is greater than 1. + - rule: '!(has(self.hostname) && has(self.hostnamePrefix))' + message: The hostname and hostnamePrefix fields are mutually exclusive. status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -235,11 +257,32 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + devices: + description: Devices contains information on each device managed by the Connector resource. + type: array + items: + type: object + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the Connector replica. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the Connector replica. + type: array + items: + type: string hostname: description: |- Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - node. + node. When using multiple replicas, this field will be populated with the + first replica's hostname. Use the Hostnames field for the full list + of hostnames. type: string isAppConnector: description: IsAppConnector is set to true if the Connector acts as an app connector. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 5e0cca9b5..766d7f0d6 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -140,9 +140,19 @@ spec: Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 - and 63 characters long. + and 63 characters long. This field should only be used when creating a connector + with an unspecified number of replicas, or a single replica. pattern: ^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$ type: string + hostnamePrefix: + description: |- + HostnamePrefix specifies the hostname prefix for each + replica. Each device will have the integer number + from its StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + type: string proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that @@ -150,6 +160,14 @@ spec: resources created for this Connector. If unset, the operator will create resources with the default configuration. type: string + replicas: + description: |- + Replicas specifies how many devices to create. Set this to enable + high availability for app connectors, subnet routers, or exit nodes. + https://tailscale.com/kb/1115/high-availability. Defaults to 1. + format: int32 + minimum: 0 + type: integer subnetRouter: description: |- SubnetRouter defines subnet routes that the Connector device should @@ -194,6 +212,10 @@ spec: rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) - message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' + - message: The hostname field cannot be specified when replicas is greater than 1. + rule: '!(has(self.hostname) && has(self.replicas) && self.replicas > 1)' + - message: The hostname and hostnamePrefix fields are mutually exclusive. + rule: '!(has(self.hostname) && has(self.hostnamePrefix))' status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -260,11 +282,32 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + devices: + description: Devices contains information on each device managed by the Connector resource. + items: + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the Connector replica. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the Connector replica. + items: + type: string + type: array + type: object + type: array hostname: description: |- Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - node. + node. When using multiple replicas, this field will be populated with the + first replica's hostname. Use the Hostnames field for the full list + of hostnames. type: string isAppConnector: description: IsAppConnector is set to true if the Connector acts as an app connector. diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index d66cf9116..fb11f717d 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -212,6 +212,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga hostname := hostnameForIngress(ing) sts := &tailscaleSTSConfig{ + Replicas: 1, Hostname: hostname, ParentResourceName: ing.Name, ParentResourceUID: string(ing.UID), @@ -227,27 +228,23 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga sts.ForwardClusterTrafficViaL7IngressProxy = true } - if _, err := a.ssr.Provision(ctx, logger, sts); err != nil { + if _, err = a.ssr.Provision(ctx, logger, sts); err != nil { return fmt.Errorf("failed to provision: %w", err) } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err) } - if dev == nil || dev.ingressDNSName == "" { - logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress") - // No hostname yet. Wait for the proxy pod to auth. - ing.Status.LoadBalancer.Ingress = nil - if err := a.Status().Update(ctx, ing); err != nil { - return fmt.Errorf("failed to update ingress status: %w", err) + + ing.Status.LoadBalancer.Ingress = nil + for _, dev := range devices { + if dev.ingressDNSName == "" { + continue } - return nil - } - logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) - ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ - { + logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) + ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ Hostname: dev.ingressDNSName, Ports: []networkingv1.IngressPortStatus{ { @@ -255,11 +252,13 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga Port: 443, }, }, - }, + }) } - if err := a.Status().Update(ctx, ing); err != nil { + + if err = a.Status().Update(ctx, ing); err != nil { return fmt.Errorf("failed to update ingress status: %w", err) } + return nil } diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index fe4d90c78..f5e23cfe9 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -57,6 +57,7 @@ func TestTailscaleIngress(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "ingress") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -766,7 +767,7 @@ func ingress() *networkingv1.Ingress { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: networkingv1.IngressSpec{ IngressClassName: ptr.To("tailscale"), diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 1f700f13a..50f8738ce 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -122,6 +122,7 @@ func TestLoadBalancerClass(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -260,6 +261,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -372,6 +374,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -623,6 +626,7 @@ func TestAnnotations(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -729,6 +733,7 @@ func TestAnnotationIntoLB(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -859,6 +864,7 @@ func TestLBIntoAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -999,6 +1005,7 @@ func TestCustomHostname(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1111,6 +1118,7 @@ func TestCustomPriorityClassName(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1359,6 +1367,7 @@ func TestProxyClassForService(t *testing.T) { expectReconciled(t, sr, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1454,6 +1463,7 @@ func TestDefaultLoadBalancer(t *testing.T) { expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1509,6 +1519,7 @@ func TestProxyFirewallMode(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1800,6 +1811,7 @@ func Test_authKeyRemoval(t *testing.T) { hostname: "default-test", clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, + replicas: ptr.To[int32](1), } expectEqual(t, fc, expectedSecret(t, fc, opts)) @@ -1867,6 +1879,7 @@ func Test_externalNameService(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 911d02832..9a87d2643 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -13,6 +13,7 @@ import ( "fmt" "net/http" "os" + "path" "slices" "strconv" "strings" @@ -20,6 +21,7 @@ import ( "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -114,6 +116,7 @@ var ( ) type tailscaleSTSConfig struct { + Replicas int32 ParentResourceName string ParentResourceUID string ChildResourceLabels map[string]string @@ -144,6 +147,10 @@ type tailscaleSTSConfig struct { // LoginServer denotes the URL of the control plane that should be used by the proxy. LoginServer string + + // HostnamePrefix specifies the desired prefix for the device's hostname. The hostname will be suffixed with the + // ordinal number generated by the StatefulSet. + HostnamePrefix string } type connector struct { @@ -205,11 +212,12 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretName, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretNames, err := a.provisionSecrets(ctx, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName) + + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretNames) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -239,6 +247,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare if err != nil { return false, fmt.Errorf("getting statefulset: %w", err) } + if sts != nil { if !sts.GetDeletionTimestamp().IsZero() { // Deletion in progress, check again later. We'll get another @@ -246,29 +255,39 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare logger.Debugf("waiting for statefulset %s/%s deletion", sts.GetNamespace(), sts.GetName()) return false, nil } - err := a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, client.InNamespace(a.operatorNamespace), client.MatchingLabels(labels), client.PropagationPolicy(metav1.DeletePropagationForeground)) - if err != nil { + + options := []client.DeleteAllOfOption{ + client.InNamespace(a.operatorNamespace), + client.MatchingLabels(labels), + client.PropagationPolicy(metav1.DeletePropagationForeground), + } + + if err = a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, options...); err != nil { return false, fmt.Errorf("deleting statefulset: %w", err) } + logger.Debugf("started deletion of statefulset %s/%s", sts.GetNamespace(), sts.GetName()) return false, nil } - dev, err := a.DeviceInfo(ctx, labels, logger) + devices, err := a.DeviceInfo(ctx, labels, logger) if err != nil { return false, fmt.Errorf("getting device info: %w", err) } - if dev != nil && dev.id != "" { - logger.Debugf("deleting device %s from control", string(dev.id)) - if err := a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + + for _, dev := range devices { + if dev.id != "" { + logger.Debugf("deleting device %s from control", string(dev.id)) + if err = a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + } else { + return false, fmt.Errorf("deleting device: %w", err) + } } else { - return false, fmt.Errorf("deleting device: %w", err) + logger.Debugf("device %s deleted from control", string(dev.id)) } - } else { - logger.Debugf("device %s deleted from control", string(dev.id)) } } @@ -286,9 +305,10 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare tsNamespace: a.operatorNamespace, proxyType: typ, } - if err := maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { + if err = maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } + return true, nil } @@ -339,91 +359,139 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName string, configs tailscaledConfigs, _ error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - // Hardcode a -0 suffix so that in future, if we support - // multiple StatefulSet replicas, we can provision -N for - // those. - Name: hsvc.Name + "-0", - Namespace: a.operatorNamespace, - Labels: stsC.ChildResourceLabels, - }, - } - var orig *corev1.Secret // unmodified copy of secret - if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { - logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) - orig = secret.DeepCopy() - } else if !apierrors.IsNotFound(err) { - return "", nil, err - } +func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) ([]string, error) { + secretNames := make([]string, stsC.Replicas) + + // Start by ensuring we have Secrets for the desired number of replicas. This will handle both creating and scaling + // up a StatefulSet. + for i := range stsC.Replicas { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", hsvc.Name, i), + Namespace: a.operatorNamespace, + Labels: stsC.ChildResourceLabels, + }, + } - var authKey string - if orig == nil { - // Initially it contains only tailscaled config, but when the - // proxy starts, it will also store there the state, certs and - // ACME account key. - sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels) - if err != nil { - return "", nil, err + // If we only have a single replica, use the hostname verbatim. Otherwise, use the hostname prefix and add + // an ordinal suffix. + hostname := stsC.Hostname + if stsC.HostnamePrefix != "" { + hostname = fmt.Sprintf("%s-%d", stsC.HostnamePrefix, i) } - if sts != nil { - // StatefulSet exists, so we have already created the secret. - // If the secret is missing, they should delete the StatefulSet. - logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName()) - return "", nil, nil + + secretNames[i] = secret.Name + + var orig *corev1.Secret // unmodified copy of secret + if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { + logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) + orig = secret.DeepCopy() + } else if !apierrors.IsNotFound(err) { + return nil, err } - // Create API Key secret which is going to be used by the statefulset - // to authenticate with Tailscale. - logger.Debugf("creating authkey for new tailscale proxy") - tags := stsC.Tags - if len(tags) == 0 { - tags = a.defaultTags + + var ( + authKey string + err error + ) + if orig == nil { + // Create API Key secret which is going to be used by the statefulset + // to authenticate with Tailscale. + logger.Debugf("creating authkey for new tailscale proxy") + tags := stsC.Tags + if len(tags) == 0 { + tags = a.defaultTags + } + authKey, err = newAuthKey(ctx, a.tsClient, tags) + if err != nil { + return nil, err + } } - authKey, err = newAuthKey(ctx, a.tsClient, tags) + + configs, err := tailscaledConfig(stsC, authKey, orig, hostname) if err != nil { - return "", nil, err + return nil, fmt.Errorf("error creating tailscaled config: %w", err) + } + + latest := tailcfg.CapabilityVersion(-1) + var latestConfig ipn.ConfigVAlpha + for key, val := range configs { + fn := tsoperator.TailscaledConfigFileName(key) + b, err := json.Marshal(val) + if err != nil { + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + } + + mak.Set(&secret.StringData, fn, string(b)) + if key > latest { + latest = key + latestConfig = val + } + } + + if stsC.ServeConfig != nil { + j, err := json.Marshal(stsC.ServeConfig) + if err != nil { + return nil, err + } + + mak.Set(&secret.StringData, "serve-config", string(j)) + } + + if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { + logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { + return nil, err + } + } else { + logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + if err = a.Create(ctx, secret); err != nil { + return nil, err + } } } - configs, err := tailscaledConfig(stsC, authKey, orig) - if err != nil { - return "", nil, fmt.Errorf("error creating tailscaled config: %w", err) + + // Next, we check if we have additional secrets and remove them and their associated device. This happens when we + // scale an StatefulSet down. + var secrets corev1.SecretList + if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(stsC.ChildResourceLabels)); err != nil { + return nil, err } - latest := tailcfg.CapabilityVersion(-1) - var latestConfig ipn.ConfigVAlpha - for key, val := range configs { - fn := tsoperator.TailscaledConfigFileName(key) - b, err := json.Marshal(val) - if err != nil { - return "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + + for _, secret := range secrets.Items { + var ordinal int32 + if _, err := fmt.Sscanf(secret.Name, hsvc.Name+"-%d", &ordinal); err != nil { + return nil, err } - mak.Set(&secret.StringData, fn, string(b)) - if key > latest { - latest = key - latestConfig = val + + if ordinal < stsC.Replicas { + continue } - } - if stsC.ServeConfig != nil { - j, err := json.Marshal(stsC.ServeConfig) + dev, err := deviceInfo(&secret, "", logger) if err != nil { - return "", nil, err + return nil, err } - mak.Set(&secret.StringData, "serve-config", string(j)) - } - if orig != nil { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) - if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { - return "", nil, err + if dev != nil && dev.id != "" { + var errResp *tailscale.ErrResponse + + err = a.tsClient.DeleteDevice(ctx, string(dev.id)) + switch { + case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: + // This device has possibly already been deleted in the admin console. So we can ignore this + // and move on to removing the secret. + case err != nil: + return nil, err + } } - } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) - if err := a.Create(ctx, secret); err != nil { - return "", nil, err + + if err = a.Delete(ctx, &secret); err != nil { + return nil, err } } - return secret.Name, configs, nil + + return secretNames, nil } // sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted @@ -443,22 +511,38 @@ func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { // It retrieves info from a Kubernetes Secret labeled with the provided labels. Capver is cross-validated against the // Pod to ensure that it is the currently running Pod that set the capver. If the Pod or the Secret does not exist, the // returned capver is -1. Either of device ID, hostname and IPs can be empty string if not found in the Secret. -func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) { - sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels) - if err != nil { - return dev, err - } - if sec == nil { - return dev, nil +func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) ([]*device, error) { + var secrets corev1.SecretList + if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(childLabels)); err != nil { + return nil, err } - podUID := "" - pod := new(corev1.Pod) - if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) { - return dev, err - } else if err == nil { - podUID = string(pod.ObjectMeta.UID) + + devices := make([]*device, 0) + for _, sec := range secrets.Items { + podUID := "" + pod := new(corev1.Pod) + err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod) + switch { + case apierrors.IsNotFound(err): + // If the Pod is not found, we won't have its UID. We can still get the device information but the + // capability version will be unknown. + case err != nil: + return nil, err + default: + podUID = string(pod.ObjectMeta.UID) + } + + info, err := deviceInfo(&sec, podUID, logger) + if err != nil { + return nil, err + } + + if info != nil { + devices = append(devices, info) + } } - return deviceInfo(sec, podUID, logger) + + return devices, nil } // device contains tailscale state of a proxy device as gathered from its tailscale state Secret. @@ -534,7 +618,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret string) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecrets []string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -573,18 +657,22 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod } + if sts.Replicas > 0 { + ss.Spec.Replicas = ptr.To(sts.Replicas) + } + // Generic containerboot configuration options. container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_KUBE_SECRET", - Value: proxySecret, + Value: "$(POD_NAME)", }, corev1.EnvVar{ - // New style is in the form of cap-.hujson. Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", - Value: "/etc/tsconfig", + Value: "/etc/tsconfig/$(POD_NAME)", }, ) + if sts.ForwardClusterTrafficViaL7IngressProxy { container.Env = append(container.Env, corev1.EnvVar{ Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", @@ -592,20 +680,23 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }) } - configVolume := corev1.Volume{ - Name: "tailscaledconfig", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: proxySecret, + for i, secret := range proxySecrets { + configVolume := corev1.Volume{ + Name: "tailscaledconfig-" + strconv.Itoa(i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + }, }, - }, + } + + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + ReadOnly: true, + MountPath: path.Join("/etc/tsconfig/", secret), + }) } - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "tailscaledconfig", - ReadOnly: true, - MountPath: "/etc/tsconfig", - }) if a.tsFirewallMode != "" { container.Env = append(container.Env, corev1.EnvVar{ @@ -643,22 +734,27 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S } else if sts.ServeConfig != nil { container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", - Value: "/etc/tailscaled/serve-config", - }) - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "serve-config", - ReadOnly: true, - MountPath: "/etc/tailscaled", + Value: "/etc/tailscaled/$(POD_NAME)/serve-config", }) - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ - Name: "serve-config", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: proxySecret, - Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + + for i, secret := range proxySecrets { + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: "serve-config-" + strconv.Itoa(i), + ReadOnly: true, + MountPath: path.Join("/etc/tailscaled", secret), + }) + + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "serve-config-" + strconv.Itoa(i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + }, }, - }, - }) + }) + } + } app, err := appInfoForProxy(sts) @@ -918,13 +1014,13 @@ func isMainContainer(c *corev1.Container) bool { // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { +func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret, hostname string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", - Hostname: &stsC.Hostname, + Hostname: &hostname, NoStatefulFiltering: "true", // Explicitly enforce default value, see #14216 AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 52c8bec7f..51ad1aea3 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -23,7 +23,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -265,6 +264,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } sts := &tailscaleSTSConfig{ + Replicas: 1, ParentResourceName: svc.Name, ParentResourceUID: string(svc.UID), Hostname: nameForService(svc), @@ -332,11 +332,12 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to get device ID: %w", err) } - if dev == nil || dev.hostname == "" { + + if len(devices) == 0 || devices[0].hostname == "" { msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth" logger.Debug(msg) // No hostname yet. Wait for the proxy pod to auth. @@ -345,26 +346,29 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } + dev := devices[0] logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", ")) - ingress := []corev1.LoadBalancerIngress{ - {Hostname: dev.hostname}, - } + svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{ + Hostname: dev.hostname, + }) + clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP) if err != nil { msg := fmt.Sprintf("failed to parse cluster IP: %v", err) tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger) return errors.New(msg) } + for _, ip := range dev.ips { addr, err := netip.ParseAddr(ip) if err != nil { continue } if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family - ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip}) + svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{IP: ip}) } } - svc.Status.LoadBalancer.Ingress = ingress + tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionTrue, reasonProxyCreated, reasonProxyCreated, a.clock, logger) return nil } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 6ae32d6fb..b4c468c8e 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -11,6 +11,7 @@ import ( "fmt" "net/http" "net/netip" + "path" "reflect" "strings" "sync" @@ -69,9 +70,9 @@ type configOpts struct { shouldRemoveAuthKey bool secretExtraData map[string][]byte resourceVersion string - - enableMetrics bool - serviceMonitorLabels tsapi.Labels + replicas *int32 + enableMetrics bool + serviceMonitorLabels tsapi.Labels } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -88,8 +89,8 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, }, SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -106,7 +107,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef var volumes []corev1.Volume volumes = []corev1.Volume{ { - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, @@ -115,9 +116,9 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }, } tsContainer.VolumeMounts = []corev1.VolumeMount{{ - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", ReadOnly: true, - MountPath: "/etc/tsconfig", + MountPath: "/etc/tsconfig/" + opts.secretName, }} if opts.firewallMode != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ @@ -154,10 +155,21 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef if opts.serveConfig != nil { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", - Value: "/etc/tailscaled/serve-config", + Value: "/etc/tailscaled/$(POD_NAME)/serve-config", + }) + volumes = append(volumes, corev1.Volume{ + Name: "serve-config-0", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: opts.secretName, + Items: []corev1.KeyToPath{{ + Key: "serve-config", + Path: "serve-config", + }}, + }, + }, }) - volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}) - tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}) + tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}) } tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_INTERNAL_APP", @@ -202,7 +214,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: opts.replicas, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "1234-UID"}, }, @@ -266,15 +278,15 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, - {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, {Name: "TS_INTERNAL_APP", Value: opts.app}, }, ImagePullPolicy: "Always", VolumeMounts: []corev1.VolumeMount{ - {Name: "tailscaledconfig", ReadOnly: true, MountPath: "/etc/tsconfig"}, - {Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}, + {Name: "tailscaledconfig-0", ReadOnly: true, MountPath: path.Join("/etc/tsconfig", opts.secretName)}, + {Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}, }, } if opts.enableMetrics { @@ -302,16 +314,22 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps } volumes := []corev1.Volume{ { - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, }, }, }, - {Name: "serve-config", + { + Name: "serve-config-0", VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}, + Secret: &corev1.SecretVolumeSource{ + SecretName: opts.secretName, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + }, + }, + }, } ss := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ @@ -592,6 +610,32 @@ func findGenName(t *testing.T, client client.Client, ns, name, typ string) (full return s.GetName(), strings.TrimSuffix(s.GetName(), "-0") } +func findGenNames(t *testing.T, cl client.Client, ns, name, typ string) []string { + t.Helper() + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, + } + + var list corev1.SecretList + if err := cl.List(t.Context(), &list, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + t.Fatalf("finding secrets for %q: %v", name, err) + } + + if len(list.Items) == 0 { + t.Fatalf("no secrets found for %q %s %+#v", name, ns, labels) + } + + names := make([]string, len(list.Items)) + for i, secret := range list.Items { + names[i] = secret.GetName() + } + + return names +} + func mustCreate(t *testing.T, client client.Client, obj client.Object) { t.Helper() if err := client.Create(context.Background(), obj); err != nil { diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 93a024b31..79c8469e1 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -81,6 +81,23 @@ _Appears in:_ | `status` _[ConnectorStatus](#connectorstatus)_ | ConnectorStatus describes the status of the Connector. This is set
                      and managed by the Tailscale operator. | | | +#### ConnectorDevice + + + + + + + +_Appears in:_ +- [ConnectorStatus](#connectorstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector replica.
                      If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
                      node. | | | +| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
                      assigned to the Connector replica. | | | + + #### ConnectorList @@ -115,11 +132,13 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
                      Defaults to [tag:k8s].
                      To autoapprove the subnet routes or exit node defined by a Connector,
                      you can configure Tailscale ACLs to give these tags the necessary
                      permissions.
                      See https://tailscale.com/kb/1337/acl-syntax#autoapprovers.
                      If you specify custom tags here, you must also make the operator an owner of these tags.
                      See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
                      Tags cannot be changed once a Connector node has been created.
                      Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
                      Type: string
                      | -| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
                      Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
                      dashes, it must not start or end with a dash and must be between 2
                      and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
                      Type: string
                      | +| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
                      Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
                      dashes, it must not start or end with a dash and must be between 2
                      and 63 characters long. This field should only be used when creating a connector
                      with an unspecified number of replicas, or a single replica. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
                      Type: string
                      | +| `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix specifies the hostname prefix for each
                      replica. Each device will have the integer number
                      from its StatefulSet pod appended to this prefix to form the full hostname.
                      HostnamePrefix can contain lower case letters, numbers and dashes, it
                      must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
                      Type: string
                      | | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
                      contains configuration options that should be applied to the
                      resources created for this Connector. If unset, the operator will
                      create resources with the default configuration. | | | | `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector device should
                      expose to tailnet as a Tailscale subnet router.
                      https://tailscale.com/kb/1019/subnets/
                      If this field is unset, the device does not get configured as a Tailscale subnet router.
                      This field is mutually exclusive with the appConnector field. | | | | `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
                      configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
                      Connector does not act as an app connector.
                      Note that you will need to manually configure the permissions and the domains for the app connector via the
                      Admin panel.
                      Note also that the main tested and supported use case of this config option is to deploy an app connector on
                      Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
                      cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
                      tested or optimised for.
                      If you are using the app connector to access SaaS applications because you need a predictable egress IP that
                      can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
                      via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
                      device with a static IP address.
                      https://tailscale.com/kb/1281/app-connectors | | | | `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
                      This field is mutually exclusive with the appConnector field.
                      https://tailscale.com/kb/1103/exit-nodes | | | +| `replicas` _integer_ | Replicas specifies how many devices to create. Set this to enable
                      high availability for app connectors, subnet routers, or exit nodes.
                      https://tailscale.com/kb/1115/high-availability. Defaults to 1. | | Minimum: 0
                      | #### ConnectorStatus @@ -140,7 +159,8 @@ _Appears in:_ | `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | | | `isAppConnector` _boolean_ | IsAppConnector is set to true if the Connector acts as an app connector. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
                      assigned to the Connector node. | | | -| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
                      If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
                      node. | | | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
                      If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
                      node. When using multiple replicas, this field will be populated with the
                      first replica's hostname. Use the Hostnames field for the full list
                      of hostnames. | | | +| `devices` _[ConnectorDevice](#connectordevice) array_ | Devices contains information on each device managed by the Connector resource. | | | #### Container @@ -324,6 +344,7 @@ _Validation:_ - Type: string _Appears in:_ +- [ConnectorSpec](#connectorspec) - [ProxyGroupSpec](#proxygroupspec) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index ce6a1411b..58457500f 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -59,6 +59,8 @@ type ConnectorList struct { // ConnectorSpec describes a Tailscale node to be deployed in the cluster. // +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)",message="A Connector needs to have at least one of exit node, subnet router or app connector configured." // +kubebuilder:validation:XValidation:rule="!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))",message="The appConnector field is mutually exclusive with exitNode and subnetRouter fields." +// +kubebuilder:validation:XValidation:rule="!(has(self.hostname) && has(self.replicas) && self.replicas > 1)",message="The hostname field cannot be specified when replicas is greater than 1." +// +kubebuilder:validation:XValidation:rule="!(has(self.hostname) && has(self.hostnamePrefix))",message="The hostname and hostnamePrefix fields are mutually exclusive." type ConnectorSpec struct { // Tags that the Tailscale node will be tagged with. // Defaults to [tag:k8s]. @@ -76,9 +78,19 @@ type ConnectorSpec struct { // Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and // dashes, it must not start or end with a dash and must be between 2 - // and 63 characters long. + // and 63 characters long. This field should only be used when creating a connector + // with an unspecified number of replicas, or a single replica. // +optional Hostname Hostname `json:"hostname,omitempty"` + + // HostnamePrefix specifies the hostname prefix for each + // replica. Each device will have the integer number + // from its StatefulSet pod appended to this prefix to form the full hostname. + // HostnamePrefix can contain lower case letters, numbers and dashes, it + // must not start with a dash and must be between 1 and 62 characters long. + // +optional + HostnamePrefix HostnamePrefix `json:"hostnamePrefix,omitempty"` + // ProxyClass is the name of the ProxyClass custom resource that // contains configuration options that should be applied to the // resources created for this Connector. If unset, the operator will @@ -108,11 +120,19 @@ type ConnectorSpec struct { // https://tailscale.com/kb/1281/app-connectors // +optional AppConnector *AppConnector `json:"appConnector,omitempty"` + // ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false. // This field is mutually exclusive with the appConnector field. // https://tailscale.com/kb/1103/exit-nodes // +optional ExitNode bool `json:"exitNode"` + + // Replicas specifies how many devices to create. Set this to enable + // high availability for app connectors, subnet routers, or exit nodes. + // https://tailscale.com/kb/1115/high-availability. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` } // SubnetRouter defines subnet routes that should be exposed to tailnet via a @@ -197,9 +217,26 @@ type ConnectorStatus struct { TailnetIPs []string `json:"tailnetIPs,omitempty"` // Hostname is the fully qualified domain name of the Connector node. // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - // node. + // node. When using multiple replicas, this field will be populated with the + // first replica's hostname. Use the Hostnames field for the full list + // of hostnames. // +optional Hostname string `json:"hostname,omitempty"` + // Devices contains information on each device managed by the Connector resource. + // +optional + Devices []ConnectorDevice `json:"devices"` +} + +type ConnectorDevice struct { + // Hostname is the fully qualified domain name of the Connector replica. + // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + // node. + // +optional + Hostname string `json:"hostname"` + // TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + // assigned to the Connector replica. + // +optional + TailnetIPs []string `json:"tailnetIPs,omitempty"` } type ConditionType string diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 6586c1354..d7a90ad0f 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -60,6 +60,26 @@ func (in *Connector) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorDevice) DeepCopyInto(out *ConnectorDevice) { + *out = *in + if in.TailnetIPs != nil { + in, out := &in.TailnetIPs, &out.TailnetIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorDevice. +func (in *ConnectorDevice) DeepCopy() *ConnectorDevice { + if in == nil { + return nil + } + out := new(ConnectorDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectorList) DeepCopyInto(out *ConnectorList) { *out = *in @@ -110,6 +130,11 @@ func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) { *out = new(AppConnector) (*in).DeepCopyInto(*out) } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec. @@ -137,6 +162,13 @@ func (in *ConnectorStatus) DeepCopyInto(out *ConnectorStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]ConnectorDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorStatus. From 61d3693e61072dea3899d860f99a0c0b91255b1a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 08:21:47 -0700 Subject: [PATCH 1252/1708] cmd/tailscale/cli: add a debug command to force a risky action For testing risky action flows. Updates #15445 Change-Id: Id81e54678a1fe5ccedb5dd9c6542ff48c162b349 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index fb062fd17..6fe15b238 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -374,6 +374,17 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print the current set of candidate peer relay servers", Exec: runPeerRelayServers, }, + { + Name: "test-risk", + ShortUsage: "tailscale debug test-risk", + ShortHelp: "Do a fake risky action", + Exec: runTestRisk, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("test-risk") + fs.StringVar(&testRiskArgs.acceptedRisk, "accept-risk", "", "comma-separated list of accepted risks") + return fs + })(), + }, }...), } } @@ -1403,3 +1414,18 @@ func runPeerRelayServers(ctx context.Context, args []string) error { e.Encode(v) return nil } + +var testRiskArgs struct { + acceptedRisk string +} + +func runTestRisk(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unexpected arguments") + } + if err := presentRiskToUser("test-risk", "This is a test risky action.", testRiskArgs.acceptedRisk); err != nil { + return err + } + fmt.Println("did-test-risky-action") + return nil +} From 0f5d3969cad44527d371c4f0b0403b4c305bd1ac Mon Sep 17 00:00:00 2001 From: nikiUppal-TS Date: Tue, 2 Sep 2025 11:26:10 -0500 Subject: [PATCH 1253/1708] tailcfg: add tailnet display name field (#16907) Updates the NodeCapabilities to contain Tailnet Display Name Updates tailscale/corp#30462 Signed-off-by: nikiUppal-TS --- tailcfg/tailcfg.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6383af486..94d0b19d5 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2656,6 +2656,14 @@ const ( // NodeAttrTrafficSteering configures the node to use the traffic // steering subsystem for via routes. See tailscale/corp#29966. NodeAttrTrafficSteering NodeCapability = "traffic-steering" + + // NodeAttrTailnetDisplayName is an optional alternate name for the tailnet + // to be displayed to the user. + // If empty or absent, a default is used. + // If this value is present and set by a user this will only include letters, + // numbers, apostrophe, spaces, and hyphens. This may not be true for the default. + // Values can look like "foo.com" or "Foo's Test Tailnet - Staging". + NodeAttrTailnetDisplayName NodeCapability = "tailnet-display-name" ) // SetDNSRequest is a request to add a DNS record. From dbc54addd0170fadcb7271db550f72887721fc9c Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 1 Sep 2025 15:02:24 +0000 Subject: [PATCH 1254/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/licenses/android.md b/licenses/android.md index 0e68f0cac..881f3ed3d 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -12,7 +12,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-tpm](https://pkg.go.dev/github.com/google/go-tpm) ([Apache-2.0](https://github.com/google/go-tpm/blob/v0.9.4/LICENSE)) From 42a215e12adb3a4da9012de9e450faecc24f88dd Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 2 Sep 2025 09:25:21 -0500 Subject: [PATCH 1255/1708] cmd/tailscale/cli: prompt for y/n when attempting risky action Previously, when attempting a risky action, the CLI printed a 5 second countdown saying "Continuing in 5 seconds...". When the countdown finished, the CLI aborted rather than continuing. To avoid confusion, but also avoid accidentally continuing if someone (or an automated process) fails to manually abort within the countdown, we now explicitly prompt for a y/n response on whether or not to continue. Updates #15445 Co-authored-by: Kot C Signed-off-by: Percy Wegmann --- cmd/tailscale/cli/risks.go | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index 9b03025a8..dfde87f64 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -7,15 +7,11 @@ import ( "context" "errors" "flag" - "fmt" - "os" - "os/signal" "runtime" "strings" - "syscall" - "time" "tailscale.com/ipn" + "tailscale.com/util/prompt" "tailscale.com/util/testenv" ) @@ -57,11 +53,6 @@ func isRiskAccepted(riskType, acceptedRisks string) bool { var errAborted = errors.New("aborted, no changes made") -// riskAbortTimeSeconds is the number of seconds to wait after displaying the -// risk message before continuing with the operation. -// It is used by the presentRiskToUser function below. -const riskAbortTimeSeconds = 5 - // presentRiskToUser displays the risk message and waits for the user to cancel. // It returns errorAborted if the user aborts. In tests it returns errAborted // immediately unless the risk has been explicitly accepted. @@ -75,22 +66,10 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { outln(riskMessage) printf("To skip this warning, use --accept-risk=%s\n", riskType) - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, syscall.SIGINT) - var msgLen int - for left := riskAbortTimeSeconds; left > 0; left-- { - msg := fmt.Sprintf("\rContinuing in %d seconds...", left) - msgLen = len(msg) - printf("%s", msg) - select { - case <-interrupt: - printf("\r%s\r", strings.Repeat("x", msgLen+1)) - return errAborted - case <-time.After(time.Second): - continue - } + if prompt.YesNo("Continue?") { + return nil } - printf("\r%s\r", strings.Repeat(" ", msgLen)) + return errAborted } From 2434bc69fc67fd146021fcb6743e692a51953ab8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 14:37:45 -0700 Subject: [PATCH 1256/1708] util/syspolicy/{setting,ptype}: move PreferenceOption and Visibility to new leaf package Step 3 in the series. See earlier cc532efc2000 and d05e6dc09e. This step moves some types into a new leaf "ptype" package out of the big "settings" package. The policyclient.Client will later get new methods to return those things (as well as Duration and Uint64, which weren't done at the time of the earlier prototype). Updates #16998 Updates #12614 Change-Id: I4d72d8079de3b5351ed602eaa72863372bd474a2 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + net/dns/manager_windows.go | 4 ++-- tsnet/depaware.txt | 1 + tstest/deptest/deptest.go | 4 ++++ .../{setting/types.go => ptype/ptype.go} | 22 +++++++---------- util/syspolicy/ptype/ptype_test.go | 24 +++++++++++++++++++ util/syspolicy/rsop/change_callbacks.go | 3 ++- util/syspolicy/setting/setting.go | 3 ++- util/syspolicy/setting/snapshot_test.go | 6 +++++ util/syspolicy/source/policy_reader.go | 9 +++---- util/syspolicy/source/policy_reader_test.go | 9 +++---- util/syspolicy/syspolicy.go | 11 +++++---- util/syspolicy/syspolicy_test.go | 23 +++++++++--------- 17 files changed, 83 insertions(+), 41 deletions(-) rename util/syspolicy/{setting/types.go => ptype/ptype.go} (88%) create mode 100644 util/syspolicy/ptype/ptype_test.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 0597d5d1f..2c6c4690c 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -176,6 +176,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 40c8abb08..ccba96707 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -957,6 +957,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index cf1691c71..047bac6c2 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -197,6 +197,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index f08601f81..ee55f914c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -434,6 +434,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 743492904..155ad03e3 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -386,6 +386,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 901ab6dd0..8830861d1 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -32,7 +32,7 @@ import ( "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/winutil" ) @@ -521,7 +521,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, setting.NeverByPolicy) + enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, ptype.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f4b0dc775..1c2be4781 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -381,6 +381,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 4effd4a78..c248d6c20 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -24,6 +24,7 @@ import ( type DepChecker struct { GOOS string // optional GOARCH string // optional + OnDep func(string) // if non-nil, called per import BadDeps map[string]string // package => why WantDeps set.Set[string] // packages expected Tags string // comma-separated @@ -66,6 +67,9 @@ func (c DepChecker) Check(t *testing.T) { }) for _, dep := range res.Deps { + if c.OnDep != nil { + c.OnDep(dep) + } if why, ok := c.BadDeps[dep]; ok { t.Errorf("package %q is not allowed as a dependency (env: %q); reason: %s", dep, extraEnv, why) } diff --git a/util/syspolicy/setting/types.go b/util/syspolicy/ptype/ptype.go similarity index 88% rename from util/syspolicy/setting/types.go rename to util/syspolicy/ptype/ptype.go index 9f110ab03..65ca9e631 100644 --- a/util/syspolicy/setting/types.go +++ b/util/syspolicy/ptype/ptype.go @@ -1,11 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package setting - -import ( - "encoding" -) +// Package ptype contains types used by syspolicy. +// +// It's a leaf package for dependency reasons and should not contain much if any +// code, and should not import much (or anything). +package ptype // PreferenceOption is a policy that governs whether a boolean variable // is forcibly assigned an administrator-defined value, or allowed to receive @@ -18,9 +18,10 @@ const ( AlwaysByPolicy ) -// Show returns if the UI option that controls the choice administered by this -// policy should be shown. Currently this is true if and only if the policy is -// [ShowChoiceByPolicy]. +// Show reports whether the UI option that controls the choice administered by +// this policy should be shown (that is, available for users to change). +// +// Currently this is true if and only if the policy is [ShowChoiceByPolicy]. func (p PreferenceOption) Show() bool { return p == ShowChoiceByPolicy } @@ -91,11 +92,6 @@ func (p *PreferenceOption) UnmarshalText(text []byte) error { // component of a user interface is to be shown. type Visibility byte -var ( - _ encoding.TextMarshaler = (*Visibility)(nil) - _ encoding.TextUnmarshaler = (*Visibility)(nil) -) - const ( VisibleByPolicy Visibility = 'v' HiddenByPolicy Visibility = 'h' diff --git a/util/syspolicy/ptype/ptype_test.go b/util/syspolicy/ptype/ptype_test.go new file mode 100644 index 000000000..7c963398b --- /dev/null +++ b/util/syspolicy/ptype/ptype_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ptype + +import ( + "encoding" + "testing" + + "tailscale.com/tstest/deptest" +) + +var ( + _ encoding.TextMarshaler = (*Visibility)(nil) + _ encoding.TextUnmarshaler = (*Visibility)(nil) +) + +func TestImports(t *testing.T) { + deptest.DepChecker{ + OnDep: func(dep string) { + t.Errorf("unexpected dep %q in leaf package; this package should not contain much code", dep) + }, + }.Check(t) +} diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 4e71f683a..fdf51c253 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -13,6 +13,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -50,7 +51,7 @@ func (c PolicyChange) HasChanged(key pkey.Key) bool { return true } switch newVal := new.(type) { - case bool, uint64, string, setting.Visibility, setting.PreferenceOption, time.Duration: + case bool, uint64, string, ptype.Visibility, ptype.PreferenceOption, time.Duration: return newVal != old case []string: oldVal, ok := old.([]string) diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 9285afade..091cf58d3 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -17,6 +17,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/testenv" ) @@ -130,7 +131,7 @@ func (t Type) String() string { // ValueType is a constraint that allows Go types corresponding to [Type]. type ValueType interface { - bool | uint64 | string | []string | Visibility | PreferenceOption | time.Duration + bool | uint64 | string | []string | ptype.Visibility | ptype.PreferenceOption | time.Duration } // Definition defines policy key, scope and value type. diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index 99c619cd9..762a9681c 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -12,6 +12,12 @@ import ( jsonv2 "github.com/go-json-experiment/json" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +const ( + VisibleByPolicy = ptype.VisibleByPolicy + ShowChoiceByPolicy = ptype.ShowChoiceByPolicy ) func TestMergeSnapshots(t *testing.T) { diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go index e6360e5f8..33ef22912 100644 --- a/util/syspolicy/source/policy_reader.go +++ b/util/syspolicy/source/policy_reader.go @@ -17,6 +17,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -365,21 +366,21 @@ func readPolicySettingValue(store Store, s *setting.Definition) (value any, err case setting.PreferenceOptionValue: s, err := store.ReadString(key) if err == nil { - var value setting.PreferenceOption + var value ptype.PreferenceOption if err = value.UnmarshalText([]byte(s)); err == nil { return value, nil } } - return setting.ShowChoiceByPolicy, err + return ptype.ShowChoiceByPolicy, err case setting.VisibilityValue: s, err := store.ReadString(key) if err == nil { - var value setting.Visibility + var value ptype.Visibility if err = value.UnmarshalText([]byte(s)); err == nil { return value, nil } } - return setting.VisibleByPolicy, err + return ptype.VisibleByPolicy, err case setting.DurationValue: s, err := store.ReadString(key) if err == nil { diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go index 06246a209..32e8c51a6 100644 --- a/util/syspolicy/source/policy_reader_test.go +++ b/util/syspolicy/source/policy_reader_test.go @@ -10,6 +10,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -139,8 +140,8 @@ func TestReaderLifecycle(t *testing.T) { }, initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue": setting.RawItemWith(must.Get(time.ParseDuration("2h30m")), nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), - "PreferenceOptionValue": setting.RawItemWith(setting.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), - "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "PreferenceOptionValue": setting.RawItemWith(ptype.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "VisibilityValue": setting.RawItemWith(ptype.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, { @@ -169,8 +170,8 @@ func TestReaderLifecycle(t *testing.T) { initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue1": setting.RawItemWith(nil, setting.NewErrorText("time: invalid duration \"soon\""), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "DurationValue2": setting.RawItemWith(nil, setting.NewErrorText("bang!"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), - "PreferenceOptionValue": setting.RawItemWith(setting.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), - "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "PreferenceOptionValue": setting.RawItemWith(ptype.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "VisibilityValue": setting.RawItemWith(ptype.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), }, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), }, } diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 0ac1d2517..189f41107 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -18,6 +18,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -111,14 +112,14 @@ func GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not // present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name pkey.Key) (setting.PreferenceOption, error) { - return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) +func GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { + return getCurrentPolicySettingValue(name, ptype.ShowChoiceByPolicy) } // GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows // specifying a default value to return if the policy setting is not configured. // It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { +func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } @@ -127,8 +128,8 @@ func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue setting.Preference // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name pkey.Key) (setting.Visibility, error) { - return getCurrentPolicySettingValue(name, setting.VisibleByPolicy) +func GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return getCurrentPolicySettingValue(name, ptype.VisibleByPolicy) } // GetDuration loads a policy from the registry that can be managed diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 5e822a0b7..3130f5d07 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -13,6 +13,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -249,7 +250,7 @@ func TestGetPreferenceOption(t *testing.T) { key pkey.Key handlerValue string handlerError error - wantValue setting.PreferenceOption + wantValue ptype.PreferenceOption wantError error wantMetrics []metrics.TestState }{ @@ -257,7 +258,7 @@ func TestGetPreferenceOption(t *testing.T) { name: "always by policy", key: pkey.EnableIncomingConnections, handlerValue: "always", - wantValue: setting.AlwaysByPolicy, + wantValue: ptype.AlwaysByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -267,7 +268,7 @@ func TestGetPreferenceOption(t *testing.T) { name: "never by policy", key: pkey.EnableIncomingConnections, handlerValue: "never", - wantValue: setting.NeverByPolicy, + wantValue: ptype.NeverByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -277,7 +278,7 @@ func TestGetPreferenceOption(t *testing.T) { name: "use default", key: pkey.EnableIncomingConnections, handlerValue: "", - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -287,13 +288,13 @@ func TestGetPreferenceOption(t *testing.T) { name: "read non-existing value", key: pkey.EnableIncomingConnections, handlerError: ErrNotConfigured, - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, }, { name: "other error is returned", key: pkey.EnableIncomingConnections, handlerError: someOtherError, - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, wantError: someOtherError, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_errors", Value: 1}, @@ -342,7 +343,7 @@ func TestGetVisibility(t *testing.T) { key pkey.Key handlerValue string handlerError error - wantValue setting.Visibility + wantValue ptype.Visibility wantError error wantMetrics []metrics.TestState }{ @@ -350,7 +351,7 @@ func TestGetVisibility(t *testing.T) { name: "hidden by policy", key: pkey.AdminConsoleVisibility, handlerValue: "hide", - wantValue: setting.HiddenByPolicy, + wantValue: ptype.HiddenByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AdminConsole", Value: 1}, @@ -360,7 +361,7 @@ func TestGetVisibility(t *testing.T) { name: "visibility default", key: pkey.AdminConsoleVisibility, handlerValue: "show", - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AdminConsole", Value: 1}, @@ -371,14 +372,14 @@ func TestGetVisibility(t *testing.T) { key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: ErrNotConfigured, - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, }, { name: "other error is returned", key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: someOtherError, - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, wantError: someOtherError, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_errors", Value: 1}, From 1ca4ae598a8369c53f91eec09e19c7f2326ed539 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 15:05:06 -0700 Subject: [PATCH 1257/1708] ipn/ipnlocal: use policyclient.Client always, stop using global syspolicy funcs Step 4 of N. See earlier commits in the series (via the issue) for the plan. This adds the missing methods to policyclient.Client and then uses it everywhere in ipn/ipnlocal and locks it in with a new dep test. Still plenty of users of the global syspolicy elsewhere in the tree, but this is a lot of them. Updates #16998 Updates #12614 Change-Id: I25b136539ae1eedbcba80124de842970db0ca314 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/c2n.go | 3 +- ipn/ipnlocal/local.go | 71 ++++++++++----------- ipn/ipnlocal/local_test.go | 50 +++++++++++++-- tsd/syspolicy_on.go | 23 +++++++ tstest/deptest/deptest.go | 12 +++- util/syspolicy/policyclient/policyclient.go | 56 +++++++++++++++- 6 files changed, 168 insertions(+), 47 deletions(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index b1a780cc1..339fad50a 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -29,7 +29,6 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" "tailscale.com/util/set" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/version/distro" @@ -343,7 +342,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // this will first check syspolicy, MDM settings like Registry // on Windows or defaults on macOS. If they are not set, it falls // back to the cli-flag, `--posture-checking`. - choice, err := syspolicy.GetPreferenceOption(pkey.PostureChecking) + choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking) if err != nil { b.logf( "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 61bde31e4..5f70ae8ef 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -107,7 +107,6 @@ import ( "tailscale.com/util/rands" "tailscale.com/util/set" "tailscale.com/util/slicesx" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/systemd" @@ -382,7 +381,7 @@ type LocalBackend struct { lastSuggestedExitNode tailcfg.StableNodeID // allowedSuggestedExitNodes is a set of exit nodes permitted by the most recent - // [syspolicy.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu + // [pkey.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu // mutex guards access to this set. allowedSuggestedExitNodesMu sync.Mutex allowedSuggestedExitNodes set.Set[tailcfg.StableNodeID] @@ -405,10 +404,10 @@ type LocalBackend struct { // (sending false). needsCaptiveDetection chan bool - // overrideAlwaysOn is whether [syspolicy.AlwaysOn] is overridden by the user + // overrideAlwaysOn is whether [pkey.AlwaysOn] is overridden by the user // and should have no impact on the WantRunning state until the policy changes, // or the user re-connects manually, switches to a different profile, etc. - // Notably, this is true when [syspolicy.AlwaysOnOverrideWithReason] is enabled, + // Notably, this is true when [pkey.AlwaysOnOverrideWithReason] is enabled, // and the user has disconnected with a reason. // See tailscale/corp#26146. overrideAlwaysOn bool @@ -418,9 +417,9 @@ type LocalBackend struct { reconnectTimer tstime.TimerController // overrideExitNodePolicy is whether the user has overridden the exit node policy - // by manually selecting an exit node, as allowed by [syspolicy.AllowExitNodeOverride]. + // by manually selecting an exit node, as allowed by [pkey.AllowExitNodeOverride]. // - // If true, the [syspolicy.ExitNodeID] and [syspolicy.ExitNodeIP] policy settings are ignored, + // If true, the [pkey.ExitNodeID] and [pkey.ExitNodeIP] policy settings are ignored, // and the suggested exit node is not applied automatically. // // It is cleared when the user switches back to the state required by policy (typically, auto:any), @@ -679,7 +678,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim } } case "syspolicy": - setEnabled = syspolicy.SetDebugLoggingEnabled + setEnabled = b.polc.SetDebugLoggingEnabled } if setEnabled == nil || !slices.Contains(ipn.DebuggableComponents, component) { return fmt.Errorf("unknown component %q", component) @@ -1820,13 +1819,13 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if controlURL, err := syspolicy.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { + if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true } const sentinel = "HostnameDefaultValue" - hostnameFromPolicy, _ := syspolicy.GetString(pkey.Hostname, sentinel) + hostnameFromPolicy, _ := b.polc.GetString(pkey.Hostname, sentinel) switch hostnameFromPolicy { case sentinel: // An empty string for this policy value means that the admin wants to delete @@ -1861,13 +1860,13 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { anyChange = true } - if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { + if alwaysOn, _ := b.polc.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { prefs.WantRunning = true anyChange = true } for _, opt := range preferencePolicies { - if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { + if po, err := b.polc.GetPreferenceOption(opt.key); err == nil { curVal := opt.get(prefs.View()) newVal := po.ShouldEnable(curVal) if curVal != newVal { @@ -1885,7 +1884,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if exitNodeIDStr, _ := syspolicy.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { + if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], @@ -1914,7 +1913,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange // or requires an auto exit node ID and the current one isn't allowed, // then update the exit node ID. if prefs.ExitNodeID != exitNodeID { - if !useAutoExitNode || !isAllowedAutoExitNodeID(prefs.ExitNodeID) { + if !useAutoExitNode || !isAllowedAutoExitNodeID(b.polc, prefs.ExitNodeID) { prefs.ExitNodeID = exitNodeID anyChange = true } @@ -1926,7 +1925,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange prefs.ExitNodeIP = netip.Addr{} anyChange = true } - } else if exitNodeIPStr, _ := syspolicy.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { + } else if exitNodeIPStr, _ := b.polc.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { if prefs.AutoExitNode != "" { prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP anyChange = true @@ -1946,7 +1945,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange // registerSysPolicyWatch subscribes to syspolicy change notifications // and immediately applies the effective syspolicy settings to the current profile. func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { - if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil { + if unregister, err = b.polc.RegisterChangeCallback(b.sysPolicyChanged); err != nil { return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err) } if prefs, anyChange := b.reconcilePrefs(); anyChange { @@ -1996,7 +1995,7 @@ func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } - // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID + // If [pkey.ExitNodeID] is set to `auto:any`, the suggested exit node ID // will be used when [applySysPolicy] updates the current profile's prefs. } @@ -2132,7 +2131,7 @@ func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged if !b.lastSuggestedExitNode.IsZero() { // If we have a suggested exit node, use it. newExitNodeID = b.lastSuggestedExitNode - } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { + } else if isAllowedAutoExitNodeID(b.polc, prefs.ExitNodeID) { // If we don't have a suggested exit node, but the prefs already // specify an allowed auto exit node ID, retain it. newExitNodeID = prefs.ExitNodeID @@ -2351,7 +2350,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { - sysak, _ := syspolicy.GetString(pkey.AuthKey, "") + sysak, _ := b.polc.GetString(pkey.AuthKey, "") if sysak != "" { b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) @@ -4111,7 +4110,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac if b.currentUser != nil { profile := b.pm.CurrentProfile() // TODO(nickkhyl): check if the current profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // such as when [pkey.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. if uid := b.currentUser.UserID(); profile.LocalUserID() != uid { profile = b.pm.DefaultUserProfile(uid) @@ -4138,7 +4137,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // using the current profile. // // TODO(nickkhyl): check if the current profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // such as when [pkey.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. return b.pm.CurrentProfile(), false } @@ -4411,15 +4410,15 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // Prevent users from changing exit node preferences // when exit node usage is managed by policy. if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { - isManaged, err := syspolicy.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) + isManaged, err := b.polc.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) if err != nil { err = fmt.Errorf("policy check failed: %w", err) } else if isManaged { // Allow users to override ExitNode policy settings and select an exit node manually - // if permitted by [syspolicy.AllowExitNodeOverride]. + // if permitted by [pkey.AllowExitNodeOverride]. // // Disabling exit node usage entirely is not allowed. - allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false) + allowExitNodeOverride, _ := b.polc.GetBoolean(pkey.AllowExitNodeOverride, false) if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { err = errManagedByPolicy } @@ -4517,13 +4516,13 @@ func (b *LocalBackend) adjustEditPrefsLocked(prefs ipn.PrefsView, mp *ipn.Masked // b.mu must be held; mp must not be mutated by this method. func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, oldPrefs, newPrefs ipn.PrefsView) { if mp.WantRunningSet && !mp.WantRunning && oldPrefs.WantRunning() { - // If a user has enough rights to disconnect, such as when [syspolicy.AlwaysOn] - // is disabled, or [syspolicy.AlwaysOnOverrideWithReason] is also set and the user + // If a user has enough rights to disconnect, such as when [pkey.AlwaysOn] + // is disabled, or [pkey.AlwaysOnOverrideWithReason] is also set and the user // provides a reason for disconnecting, then we should not force the "always on" // mode on them until the policy changes, they switch to a different profile, etc. b.overrideAlwaysOn = true - if reconnectAfter, _ := syspolicy.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { + if reconnectAfter, _ := b.polc.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { b.startReconnectTimerLocked(reconnectAfter) } } @@ -4534,7 +4533,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o b.overrideExitNodePolicy = false } if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { - if allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { + if allowExitNodeOverride, _ := b.polc.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { // If applying exit node policy settings to the new prefs results in no change, // the user is not overriding the policy. Otherwise, it is an override. b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) @@ -5643,7 +5642,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // was selected, if any. // // If auto exit node is enabled (via [ipn.Prefs.AutoExitNode] or - // [syspolicy.ExitNodeID]), or an exit node is specified by ExitNodeIP + // [pkey.ExitNodeID]), or an exit node is specified by ExitNodeIP // instead of ExitNodeID , and we don't yet have enough info to resolve // it (usually due to missing netmap or net report), then ExitNodeID in // the prefs may be invalid (typically, [unresolvedExitNodeID]) until @@ -7786,7 +7785,7 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes } // getAllowedSuggestions returns a set of exit nodes permitted by the most recent -// [syspolicy.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. +// [pkey.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() @@ -7794,11 +7793,11 @@ func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { } // refreshAllowedSuggestions rebuilds the set of permitted exit nodes -// from the current [syspolicy.AllowedSuggestedExitNodes] value. +// from the current [pkey.AllowedSuggestedExitNodes] value. func (b *LocalBackend) refreshAllowedSuggestions() { b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() - b.allowedSuggestedExitNodes = fillAllowedSuggestions() + b.allowedSuggestedExitNodes = fillAllowedSuggestions(b.polc) } // selectRegionFunc returns a DERP region from the slice of candidate regions. @@ -7810,8 +7809,8 @@ type selectRegionFunc func(views.Slice[int]) int // choice. type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView -func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { - nodes, err := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) +func fillAllowedSuggestions(polc policyclient.Client) set.Set[tailcfg.StableNodeID] { + nodes, err := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) if err != nil { log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", pkey.AllowedSuggestedExitNodes, err) return nil @@ -8176,11 +8175,11 @@ const ( unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" ) -func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { +func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.StableNodeID) bool { if exitNodeID == "" { return false // an exit node is required } - if nodes, _ := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { + if nodes, _ := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) } @@ -8343,7 +8342,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { // the Keychain. A future release will clean up the on-disk state // files. // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := syspolicy.GetBoolean(pkey.EncryptState, false) + sp, _ := b.polc.GetBoolean(pkey.EncryptState, false) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 2b83e47f8..0967bf1ff 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -47,6 +47,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" "tailscale.com/types/key" @@ -63,6 +64,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" @@ -5541,6 +5543,28 @@ func TestReadWriteRouteInfo(t *testing.T) { } } +// staticPolicy maps policy keys to their corresponding values, +// which must be of the correct type (string, []string, bool, etc). +// +// It is used for testing purposes to simulate policy client behavior. +// It panics if the values are the wrong type. +type staticPolicy map[pkey.Key]any + +type testPolicy struct { + staticPolicy + policyclient.Client +} + +func (sp testPolicy) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { + if val, ok := sp.staticPolicy[key]; ok { + if arr, ok := val.([]string); ok { + return arr, nil + } + return nil, fmt.Errorf("key %s is not a []string", key) + } + return defaultVal, nil +} + func TestFillAllowedSuggestions(t *testing.T) { tests := []struct { name string @@ -5571,15 +5595,16 @@ func TestFillAllowedSuggestions(t *testing.T) { want: []tailcfg.StableNodeID{"ABC", "def", "gHiJ"}, }, } - syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.AllowedSuggestedExitNodes, tt.allowPolicy, - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + polc := testPolicy{ + staticPolicy: staticPolicy{ + pkey.AllowedSuggestedExitNodes: tt.allowPolicy, + }, + } - got := fillAllowedSuggestions() + got := fillAllowedSuggestions(polc) if got == nil { if tt.want == nil { return @@ -7008,6 +7033,19 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func TestDeps(t *testing.T) { + deptest.DepChecker{ + OnImport: func(pkg string) { + switch pkg { + case "tailscale.com/util/syspolicy", + "tailscale.com/util/syspolicy/setting", + "tailscale.com/util/syspolicy/rsop": + t.Errorf("ipn/ipnlocal: importing syspolicy package %q is not allowed; only policyclient and its deps should be used by ipn/ipnlocal", pkg) + } + }, + }.Check(t) +} + func checkError(tb testing.TB, got, want error, fatal bool) { tb.Helper() f := tb.Errorf diff --git a/tsd/syspolicy_on.go b/tsd/syspolicy_on.go index 8d7762bd9..e9811b88b 100644 --- a/tsd/syspolicy_on.go +++ b/tsd/syspolicy_on.go @@ -6,9 +6,12 @@ package tsd import ( + "time" + "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" ) func getPolicyClient() policyclient.Client { return globalSyspolicy{} } @@ -36,6 +39,26 @@ func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { syspolicy.SetDebugLoggingEnabled(enabled) } +func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return syspolicy.GetUint64(key, defaultValue) +} + +func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return syspolicy.GetDuration(name, defaultValue) +} + +func (globalSyspolicy) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { + return syspolicy.GetPreferenceOption(name) +} + +func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return syspolicy.GetVisibility(name) +} + +func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + return syspolicy.HasAnyOf(keys...) +} + func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { return syspolicy.RegisterChangeCallback(cb) } diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index c248d6c20..c0b6d8b8c 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -24,7 +24,8 @@ import ( type DepChecker struct { GOOS string // optional GOARCH string // optional - OnDep func(string) // if non-nil, called per import + OnDep func(string) // if non-nil, called per dependency + OnImport func(string) // if non-nil, called per import BadDeps map[string]string // package => why WantDeps set.Set[string] // packages expected Tags string // comma-separated @@ -52,7 +53,8 @@ func (c DepChecker) Check(t *testing.T) { t.Fatal(err) } var res struct { - Deps []string + Imports []string + Deps []string } if err := json.Unmarshal(out, &res); err != nil { t.Fatal(err) @@ -66,6 +68,12 @@ func (c DepChecker) Check(t *testing.T) { return strings.TrimSpace(string(out)) }) + if c.OnImport != nil { + for _, imp := range res.Imports { + c.OnImport(imp) + } + } + for _, dep := range res.Deps { if c.OnDep != nil { c.OnDep(dep) diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index 0b15599c1..aadcbc60e 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -6,7 +6,12 @@ // of syspolicy is omitted from the build. package policyclient -import "tailscale.com/util/syspolicy/pkey" +import ( + "time" + + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) // Client is the interface between code making questions about the system policy // and the actual implementation. @@ -23,9 +28,38 @@ type Client interface { // or defaultValue (and a nil error) if it does not exist. GetBoolean(key pkey.Key, defaultValue bool) (bool, error) + // GetUint64 returns a numeric policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) + + // GetDuration loads a policy from the registry that can be managed by an + // enterprise policy management system and describes a duration for some + // action. The registry value should be a string that time.ParseDuration + // understands. If the registry value is "" or can not be processed, + // defaultValue (and a nil error) is returned instead. + GetDuration(key pkey.Key, defaultValue time.Duration) (time.Duration, error) + + // GetPreferenceOption loads a policy from the registry that can be + // managed by an enterprise policy management system and allows administrative + // overrides of users' choices in a way that we do not want tailcontrol to have + // the authority to set. It describes user-decides/always/never options, where + // "always" and "never" remove the user's ability to make a selection. If not + // present or set to a different value, "user-decides" is the default. + GetPreferenceOption(key pkey.Key) (ptype.PreferenceOption, error) + + // GetVisibility returns whether a UI element should be visible based on + // the system's configuration. + // If unconfigured, implementations should return [ptype.VisibleByPolicy] + // and a nil error. + GetVisibility(key pkey.Key) (ptype.Visibility, error) + // SetDebugLoggingEnabled enables or disables debug logging for the policy client. SetDebugLoggingEnabled(enabled bool) + // HasAnyOf returns whether at least one of the specified policy settings is + // configured, or an error if no keys are provided or the check fails. + HasAnyOf(keys ...pkey.Key) (bool, error) + // RegisterChangeCallback registers a callback function that will be called // whenever a policy change is detected. It returns a function to unregister // the callback and an error if the registration fails. @@ -59,6 +93,26 @@ func (NoPolicyClient) GetStringArray(key pkey.Key, defaultValue []string) ([]str return defaultValue, nil } +func (NoPolicyClient) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { + return ptype.ShowChoiceByPolicy, nil +} + +func (NoPolicyClient) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return ptype.VisibleByPolicy, nil +} + +func (NoPolicyClient) HasAnyOf(keys ...pkey.Key) (bool, error) { + return false, nil +} + func (NoPolicyClient) SetDebugLoggingEnabled(enabled bool) {} func (NoPolicyClient) RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) { From 0d23490e1a7593661d9ef3b76dd151d2d70778b9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 15:31:49 -0700 Subject: [PATCH 1258/1708] ipn/ipnlocal: simplify a test with a new simpler syspolicy client test type Less indirection. Updates #16998 Updates #12614 Change-Id: I5a3a3c3f3b195486b2731ec002d2532337b3d211 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 97 ++++++++++++++++++++++++-------------- tsd/tsd.go | 6 +++ 2 files changed, 68 insertions(+), 35 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 0967bf1ff..4843a941f 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -632,7 +632,7 @@ func TestConfigureExitNode(t *testing.T) { exitNodeIDPolicy *tailcfg.StableNodeID exitNodeIPPolicy *netip.Addr exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes - exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true + exitNodeAllowOverride bool // whether [pkey.AllowExitNodeOverride] should be set to true wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] wantPrefs ipn.Prefs wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] @@ -970,7 +970,7 @@ func TestConfigureExitNode(t *testing.T) { name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID prefs: ipn.Prefs{ ControlURL: controlURL, - ExitNodeID: exitNode2.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + ExitNodeID: exitNode2.StableID(), // not allowed by [pkey.AllowedSuggestedExitNodes] }, netMap: nil, report: report, @@ -989,7 +989,7 @@ func TestConfigureExitNode(t *testing.T) { name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID prefs: ipn.Prefs{ ControlURL: controlURL, - ExitNodeID: exitNode1.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + ExitNodeID: exitNode1.StableID(), // not allowed by [pkey.AllowedSuggestedExitNodes] }, netMap: clientNetmap, report: report, @@ -1072,7 +1072,7 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode1.StableID(), }, { - name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] + name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [pkey.AllowExitNodeOverride] prefs: ipn.Prefs{ ControlURL: controlURL, }, @@ -1094,7 +1094,7 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode2.StableID(), }, { - name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] + name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [pkey.AllowExitNodeOverride] prefs: ipn.Prefs{ ControlURL: controlURL, }, @@ -1118,7 +1118,7 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode1.StableID(), }, { - name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] + name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [pkey.AllowExitNodeOverride] prefs: ipn.Prefs{ ControlURL: controlURL, }, @@ -1179,36 +1179,32 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode1.StableID(), }, } - syspolicy.RegisterWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var pol testPolicy // Configure policy settings, if any. - store := source.NewTestStore(t) if tt.exitNodeIDPolicy != nil { - store.SetStrings(source.TestSettingOf(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy))) + pol.Set(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy)) } if tt.exitNodeIPPolicy != nil { - store.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String())) + pol.Set(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String()) } if tt.exitNodeAllowedIDs != nil { - store.SetStringLists(source.TestSettingOf(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) + pol.Set(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs)) } if tt.exitNodeAllowOverride { - store.SetBooleans(source.TestSettingOf(pkey.AllowExitNodeOverride, true)) - } - if store.IsEmpty() { - // No syspolicy settings, so don't register a store. - // This allows the test to run in parallel with other tests. - t.Parallel() - } else { - // Register the store for syspolicy settings to make them available to the LocalBackend. - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, store) + pol.Set(pkey.AllowExitNodeOverride, true) } // Create a new LocalBackend with the given prefs. // Any syspolicy settings will be applied to the initial prefs. - lb := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(pol) + lb := newTestLocalBackendWithSys(t, sys) lb.SetPrefsForTest(tt.prefs.Clone()) + // Then set the netcheck report and netmap, if any. if tt.report != nil { lb.MagicConn().SetLastNetcheckReportForTest(t.Context(), tt.report) @@ -5543,28 +5539,62 @@ func TestReadWriteRouteInfo(t *testing.T) { } } -// staticPolicy maps policy keys to their corresponding values, -// which must be of the correct type (string, []string, bool, etc). +// testPolicy is a [policyclient.Client] with a static mapping of values. +// The map value must be of the correct type (string, []string, bool, etc). // // It is used for testing purposes to simulate policy client behavior. // It panics if the values are the wrong type. -type staticPolicy map[pkey.Key]any - type testPolicy struct { - staticPolicy - policyclient.Client + v map[pkey.Key]any + policyclient.NoPolicyClient +} + +func (sp *testPolicy) Set(key pkey.Key, value any) { + if sp.v == nil { + sp.v = make(map[pkey.Key]any) + } + sp.v[key] = value } func (sp testPolicy) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { - if val, ok := sp.staticPolicy[key]; ok { + if val, ok := sp.v[key]; ok { if arr, ok := val.([]string); ok { return arr, nil } - return nil, fmt.Errorf("key %s is not a []string", key) + panic(fmt.Sprintf("key %s is not a []string", key)) + } + return defaultVal, nil +} + +func (sp testPolicy) GetString(key pkey.Key, defaultVal string) (string, error) { + if val, ok := sp.v[key]; ok { + if str, ok := val.(string); ok { + return str, nil + } + panic(fmt.Sprintf("key %s is not a string", key)) + } + return defaultVal, nil +} + +func (sp testPolicy) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { + if val, ok := sp.v[key]; ok { + if b, ok := val.(bool); ok { + return b, nil + } + panic(fmt.Sprintf("key %s is not a bool", key)) } return defaultVal, nil } +func (sp testPolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + for _, key := range keys { + if _, ok := sp.v[key]; ok { + return true, nil + } + } + return false, nil +} + func TestFillAllowedSuggestions(t *testing.T) { tests := []struct { name string @@ -5598,13 +5628,10 @@ func TestFillAllowedSuggestions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - polc := testPolicy{ - staticPolicy: staticPolicy{ - pkey.AllowedSuggestedExitNodes: tt.allowPolicy, - }, - } + var pol testPolicy + pol.Set(pkey.AllowedSuggestedExitNodes, tt.allowPolicy) - got := fillAllowedSuggestions(polc) + got := fillAllowedSuggestions(pol) if got == nil { if tt.want == nil { return diff --git a/tsd/tsd.go b/tsd/tsd.go index b7194a3d7..17795d3c5 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -59,6 +59,7 @@ type System struct { Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] + PolicyClient SubSystem[policyclient.Client] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -127,6 +128,8 @@ func (s *System) Set(v any) { s.DriveForLocal.Set(v) case drive.FileSystemForRemote: s.DriveForRemote.Set(v) + case policyclient.Client: + s.PolicyClient.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } @@ -169,6 +172,9 @@ func (s *System) UserMetricsRegistry() *usermetric.Registry { // PolicyClientOrDefault returns the policy client if set or a no-op default // otherwise. It always returns a non-nil value. func (s *System) PolicyClientOrDefault() policyclient.Client { + if client, ok := s.PolicyClient.GetOK(); ok { + return client + } return getPolicyClient() } From 9e9bf130633fc1816646e5f1834054c0d7e551dc Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 2 Sep 2025 15:57:31 -0700 Subject: [PATCH 1259/1708] ipn/ipnlocal: revert some locking changes ahead of release branch cut (#17011) --- ipn/ipnlocal/local.go | 416 ++++++++++++++++++------------------- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/profiles.go | 15 +- 3 files changed, 212 insertions(+), 221 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5f70ae8ef..54dcda30a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -799,8 +799,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if b.conf == nil { return false, nil } @@ -808,7 +808,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLocked(conf); err != nil { + if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -865,9 +865,10 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLocked uses the provided config to update the backend's prefs +// setConfigLockedOnEntry uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { +func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { + defer unlock() p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -875,7 +876,8 @@ func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLocked(p) + b.setPrefsLockedOnEntry(p, unlock) + b.conf = conf return nil } @@ -1503,6 +1505,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } if st.Err != nil { + // The following do not depend on any data for which we need b locked. + unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return @@ -1511,7 +1515,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) + b.send(ipn.Notify{ErrMessage: &s}) } return } @@ -1960,13 +1964,13 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { + unlock.UnlockEarly() return prefs.View(), false } - return b.setPrefsLocked(prefs), true + return b.setPrefsLockedOnEntry(prefs, unlock), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects @@ -2329,8 +2333,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { clientToShutdown.Shutdown() } }() - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -2536,7 +2540,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLocked() + b.stateMachineLockedOnEntry(unlock) + return nil } @@ -3537,8 +3542,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3560,14 +3565,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLocked( + _, err := b.editPrefsLockedOnEntry( ipnauth.Self, &ipn.MaskedPrefs{ Prefs: *prefsClone, AutoUpdateSet: ipn.AutoUpdatePrefsMask{ ApplySet: true, }, - }) + }, unlock) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -4004,8 +4009,8 @@ func (b *LocalBackend) shouldUploadServices() bool { // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -4027,7 +4032,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLocked(reason) + b.switchToBestProfileLockedOnEntry(reason, unlock) } // SwitchToBestProfile selects the best profile to use, @@ -4037,14 +4042,13 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.mu.Lock() - defer b.mu.Unlock() - b.switchToBestProfileLocked(reason) + b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) } -// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], but -// the caller must hold b.mu. -func (b *LocalBackend) switchToBestProfileLocked(reason string) { +// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], +// but b.mu must held on entry. It is released on exit. +func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { + defer unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) @@ -4075,7 +4079,7 @@ func (b *LocalBackend) switchToBestProfileLocked(reason string) { if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLocked(); err != nil { + if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -4311,8 +4315,8 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() p0 := b.pm.CurrentPrefs() if v && p0.ExitNodeID() != "" { @@ -4353,7 +4357,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLocked(actor, mp) + return b.editPrefsLockedOnEntry(actor, mp, unlock) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4382,9 +4386,7 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - b.mu.Lock() - defer b.mu.Unlock() - return b.editPrefsLocked(actor, mp) + return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) } // checkEditPrefsAccessLocked checks whether the current user has access @@ -4572,8 +4574,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4591,7 +4593,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { + if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4620,8 +4622,11 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// The caller must hold b.mu. -func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { +// Warning: b.mu must be held on entry, but it unlocks it on the way out. +// TODO(bradfitz): redo the locking on all these weird methods like this. +func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { + defer unlock() // for error paths + p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -4658,10 +4663,12 @@ func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) // before the modified prefs are actually set for the current profile. b.onEditPrefsLocked(actor, mp, p0, p1.View()) - newPrefs := b.setPrefsLocked(p1) + newPrefs := b.setPrefsLockedOnEntry(p1, unlock) + + // Note: don't perform any actions for the new prefs here. Not + // every prefs change goes through EditPrefs. Put your actions + // in setPrefsLocksOnEntry instead. - // Note: don't perform any actions for the new prefs here. Not every prefs - // change goes through EditPrefs. Put your actions in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil } @@ -4709,9 +4716,12 @@ func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() } -// setPrefsLocked requires b.mu be held to call it. It returns a read-only -// copy of the new prefs. -func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { +// setPrefsLockedOnEntry requires b.mu be held to call it, but it +// unlocks b.mu when done. newp ownership passes to this function. +// It returns a read-only copy of the new prefs. +func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { + defer unlock() + cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4780,33 +4790,28 @@ func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { b.stopOfflineAutoUpdate() } - // Update status that needs to happen outside the lock, but reacquire it - // before returning (including in case of panics). - func() { - b.mu.Unlock() - defer b.mu.Lock() + unlock.UnlockEarly() - if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() - } + if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { + b.doSetHostinfoFilterServices() + } - if netMap != nil { - b.MagicConn().SetDERPMap(netMap.DERPMap) - } + if netMap != nil { + b.MagicConn().SetDERPMap(netMap.DERPMap) + } - if !oldp.WantRunning() && newp.WantRunning && cc != nil { - b.logf("transitioning to running; doing Login...") - cc.Login(controlclient.LoginDefault) - } + if !oldp.WantRunning() && newp.WantRunning && cc != nil { + b.logf("transitioning to running; doing Login...") + cc.Login(controlclient.LoginDefault) + } - if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() - } else { - b.authReconfig() - } + if oldp.WantRunning() != newp.WantRunning { + b.stateMachine() + } else { + b.authReconfig() + } - b.send(ipn.Notify{Prefs: &prefs}) - }() + b.send(ipn.Notify{Prefs: &prefs}) return prefs } @@ -4949,34 +4954,36 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - // Check the control client, hostinfo, and services under the mutex. - // On return, either both the client and hostinfo are nil, or both are non-nil. - // When non-nil, the Hostinfo is a clone of the value carried by b, safe to modify. - cc, hi, peerAPIServices := func() (controlclient.Client, *tailcfg.Hostinfo, []tailcfg.Service) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() - if b.cc == nil { - return nil, nil, nil // control client isn't up yet - } else if b.hostinfo == nil { - b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") - return nil, nil, nil - } - svc := b.peerAPIServicesLocked() - if b.egg { - svc = append(svc, tailcfg.Service{Proto: "egg", Port: 1}) - } - // Make a clone of hostinfo so we can mutate the service field, below. - return b.cc, b.hostinfo.Clone(), svc - }() - if cc == nil || hi == nil { + cc := b.cc + if cc == nil { + // Control client isn't up yet. return } + if b.hostinfo == nil { + b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") + return + } + peerAPIServices := b.peerAPIServicesLocked() + if b.egg { + peerAPIServices = append(peerAPIServices, tailcfg.Service{Proto: "egg", Port: 1}) + } + // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. + hi := *b.hostinfo // shallow copy + unlock.UnlockEarly() + + // Make a shallow copy of hostinfo so we can mutate + // at the Service field. if !b.shouldUploadServices() { hi.Services = []tailcfg.Service{} } - hi.Services = append(hi.Services, peerAPIServices...) + // Don't mutate hi.Service's underlying array. Append to + // the slice with no free capacity. + c := len(hi.Services) + hi.Services = append(hi.Services[:c:c], peerAPIServices...) hi.PushDeviceToken = b.pushDeviceToken.Load() // Compare the expected ports from peerAPIServices to the actual ports in hi.Services. @@ -4986,7 +4993,7 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) } - cc.SetHostinfo(hi) + cc.SetHostinfo(&hi) } type portPair struct { @@ -5665,13 +5672,13 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // really this is more "one of several places in which random things // happen". func (b *LocalBackend) enterState(newState ipn.State) { - b.mu.Lock() - defer b.mu.Unlock() - b.enterStateLocked(newState) + unlock := b.lockAndGetUnlock() + b.enterStateLockedOnEntry(newState, unlock) } -// enterStateLocked is like enterState but requires the caller to hold b.mu. -func (b *LocalBackend) enterStateLocked(newState ipn.State) { +// enterStateLockedOnEntry is like enterState but requires b.mu be held to call +// it, but it unlocks b.mu when done (via unlock, a once func). +func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5720,56 +5727,51 @@ func (b *LocalBackend) enterStateLocked(newState ipn.State) { b.maybeStartOfflineAutoUpdate(prefs) } - // Resolve the state transition outside the lock, but reacquire it before - // returning (including in case of panics). - func() { - b.mu.Unlock() - defer b.mu.Lock() + unlock.UnlockEarly() - // prefs may change irrespective of state; WantRunning should be explicitly - // set before potential early return even if the state is unchanged. - b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) - if oldState == newState { - return - } - b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", - oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + // prefs may change irrespective of state; WantRunning should be explicitly + // set before potential early return even if the state is unchanged. + b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) + if oldState == newState { + return + } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", + oldState, newState, prefs.WantRunning(), netMap != nil) + b.send(ipn.Notify{State: &newState}) - switch newState { - case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } - b.blockEngineUpdates(true) - fallthrough - case ipn.Stopped, ipn.NoState: - // Unconfigure the engine if it has stopped (WantRunning is set to false) - // or if we've switched to a different profile and the state is unknown. - err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - if err != nil { - b.logf("Reconfig(down): %v", err) - } + switch newState { + case ipn.NeedsLogin: + systemd.Status("Needs login: %s", authURL) + if b.seamlessRenewalEnabled() { + break + } + b.blockEngineUpdates(true) + fallthrough + case ipn.Stopped, ipn.NoState: + // Unconfigure the engine if it has stopped (WantRunning is set to false) + // or if we've switched to a different profile and the state is unknown. + err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + b.logf("Reconfig(down): %v", err) + } - if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") - } - case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() - // Needed so that UpdateEndpoints can run - b.e.RequestStatus() - case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) - } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) - default: - b.logf("[unexpected] unknown newState %#v", newState) + if newState == ipn.Stopped && authURL == "" { + systemd.Status("Stopped; run 'tailscale up' to log in") } - }() + case ipn.Starting, ipn.NeedsMachineAuth: + b.authReconfig() + // Needed so that UpdateEndpoints can run + b.e.RequestStatus() + case ipn.Running: + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + default: + b.logf("[unexpected] unknown newState %#v", newState) + } } func (b *LocalBackend) hasNodeKeyLocked() bool { @@ -5869,29 +5871,27 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // TODO(apenwarr): use a channel or something to prevent reentrancy? // Or maybe just call the state machine from fewer places. func (b *LocalBackend) stateMachine() { - b.mu.Lock() - defer b.mu.Unlock() - b.stateMachineLocked() + unlock := b.lockAndGetUnlock() + b.stateMachineLockedOnEntry(unlock) } -// stateMachineLocked is like stateMachine but requires b.mu be held. -func (b *LocalBackend) stateMachineLocked() { - b.enterStateLocked(b.nextStateLocked()) +// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to +// call it, but it unlocks b.mu when done (via unlock, a once func). +func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { + b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) } -// lockAndGetUnlock locks b.mu and returns a function that will unlock it at -// most once. +// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will +// unlock it at most once. // -// TODO(creachadair): This was added as a guardrail against the unfortunate -// "LockedOnEntry" methods that were originally used in this package (primarily -// enterStateLockedOnEntry) that required b.mu held to be locked on entry to -// the function but unlocked the mutex on their way out. -// -// Now that these have all been updated, we could remove this type and acquire -// and release locks directly. For now, however, I've left it alone to reduce -// the scope of lock-related changes. -// -// See: https://github.com/tailscale/tailscale/issues/11649 +// This is all very unfortunate but exists as a guardrail against the +// unfortunate "lockedOnEntry" methods in this package (primarily +// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the +// function but unlock the mutex on their way out. As a stepping stone to +// cleaning things up (as of 2024-04-06), we at least pass the unlock func +// around now and defer unlock in the caller to avoid missing unlocks and double +// unlocks. TODO(bradfitz,maisem): make the locking in this package more +// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { b.mu.Lock() var unlocked atomic.Bool @@ -6059,35 +6059,30 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { - // These values are initialized inside the lock on success. - var cc controlclient.Client - var profile ipn.LoginProfileView - - if err := func() error { - b.mu.Lock() - defer b.mu.Unlock() - - if !b.hasNodeKeyLocked() { - // Already logged out. - return nil - } - cc = b.cc + unlock := b.lockAndGetUnlock() + defer unlock() - // Grab the current profile before we unlock the mutex, so that we can - // delete it later. - profile = b.pm.CurrentProfile() + if !b.hasNodeKeyLocked() { + // Already logged out. + return nil + } + cc := b.cc - _, err := b.editPrefsLocked( - actor, - &ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }) - return err - }(); err != nil { + // Grab the current profile before we unlock the mutex, so that we can + // delete it later. + profile := b.pm.CurrentProfile() + + _, err := b.editPrefsLockedOnEntry( + actor, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }, unlock) + if err != nil { return err } + // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -6107,14 +6102,14 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - b.mu.Lock() - defer b.mu.Unlock() + unlock = b.lockAndGetUnlock() + defer unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -7290,8 +7285,8 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { @@ -7303,7 +7298,7 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { b.resetDialPlan() } - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } func (b *LocalBackend) initTKALocked() error { @@ -7383,10 +7378,12 @@ func (b *LocalBackend) getHardwareAddrs() ([]string, error) { return addrs, nil } -// resetForProfileChangeLocked resets the backend for a profile change. +// resetForProfileChangeLockedOnEntry resets the backend for a profile change. // -// The caller must hold b.mu. -func (b *LocalBackend) resetForProfileChangeLocked() error { +// b.mu must held on entry. It is released on exit. +func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { + defer unlock() + if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting @@ -7417,26 +7414,19 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLocked(ipn.NoState) - - // Update health status and start outside the lock. - return func() error { - b.mu.Unlock() - defer b.mu.Lock() - - b.health.SetLocalLogConfigHealth(nil) - if tkaErr != nil { - return tkaErr - } - return b.Start(ipn.Options{}) - }() + b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu + b.health.SetLocalLogConfigHealth(nil) + if tkaErr != nil { + return tkaErr + } + return b.Start(ipn.Options{}) } // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -7448,7 +7438,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { if !needToRestart { return nil } - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } // CurrentProfile returns the current LoginProfile. @@ -7461,8 +7451,8 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() b.pm.SwitchToNewProfile() @@ -7470,7 +7460,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } // ListProfiles returns a list of all LoginProfiles. @@ -7485,8 +7475,8 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() prevCC := b.resetControlClientLocked() if prevCC != nil { @@ -7499,7 +7489,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 4843a941f..a3a26af04 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4299,7 +4299,7 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { } unlock := b.lockAndGetUnlock() defer unlock() - b.setPrefsLocked(newp) + b.setPrefsLockedOnEntry(newp, unlock) } type peerOptFunc func(*tailcfg.Node) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 7519ee157..1d312cfa6 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -180,7 +180,7 @@ func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn. f(pm.currentProfile, pm.prefs, false) } // Do not call pm.extHost.NotifyProfileChange here; it is invoked in - // [LocalBackend.resetForProfileChangeLocked] after the netmap reset. + // [LocalBackend.resetForProfileChangeLockedOnEntry] after the netmap reset. // TODO(nickkhyl): Consider moving it here (or into the stateChangeCb handler // in [LocalBackend]) once the profile/node state, including the netmap, // is actually tied to the current profile. @@ -359,9 +359,9 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) // where prefsIn is the previous profile's prefs with an updated Persist, LoggedOut, // WantRunning and possibly other fields. This may not be the desired behavior. // - // Additionally, LocalBackend doesn't treat it as a proper profile switch, - // meaning that [LocalBackend.resetForProfileChangeLocked] is not called and - // certain node/profile-specific state may not be reset as expected. + // Additionally, LocalBackend doesn't treat it as a proper profile switch, meaning that + // [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain + // node/profile-specific state may not be reset as expected. // // However, [profileManager] notifies [ipnext.Extension]s about the profile change, // so features migrated from LocalBackend to external packages should not be affected. @@ -494,9 +494,10 @@ func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileVie oldPrefs := pm.prefs pm.prefs = clonedPrefs - // Sadly, profile prefs can be changed in multiple ways. It's pretty - // chaotic, and in many cases callers use unexported methods of the - // profile manager instead of going through [LocalBackend.setPrefsLocked] + // Sadly, profile prefs can be changed in multiple ways. + // It's pretty chaotic, and in many cases callers use + // unexported methods of the profile manager instead of + // going through [LocalBackend.setPrefsLockedOnEntry] // or at least using [profileManager.SetPrefs]. // // While we should definitely clean this up to improve From 2b3e53304871fccb4f91fdef32a59ef8a30c9752 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 12:49:37 -0700 Subject: [PATCH 1260/1708] util/syspolicy: finish plumbing policyclient, add feature/syspolicy, move global impl This is step 4 of making syspolicy a build-time feature. This adds a policyclient.Get() accessor to return the correct implementation to use: either the real one, or the no-op one. (A third type, a static one for testing, also exists, so in general a policyclient.Client should be plumbed around and not always fetched via policyclient.Get whenever possible, especially if tests need to use alternate syspolicy) Updates #16998 Updates #12614 Change-Id: Iaf19670744a596d5918acfa744f5db4564272978 Signed-off-by: Brad Fitzpatrick --- client/web/auth.go | 2 +- client/web/web.go | 12 +- client/web/web_test.go | 2 + cmd/derper/depaware.txt | 20 ++-- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscale/cli/maybe_syspolicy.go | 8 ++ cmd/tailscale/cli/up.go | 3 +- cmd/tailscale/depaware.txt | 13 ++- cmd/tailscaled/depaware.txt | 3 +- cmd/tailscaled/tailscaled.go | 8 +- cmd/tailscaled/tailscaled_windows.go | 6 +- cmd/tsidp/depaware.txt | 3 +- feature/condregister/maybe_syspolicy.go | 8 ++ feature/syspolicy/syspolicy.go | 7 ++ ipn/desktop/extension.go | 4 +- ipn/ipnauth/policy.go | 6 +- ipn/ipnlocal/c2n.go | 3 +- ipn/ipnlocal/local.go | 21 ++-- ipn/prefs.go | 16 +-- ipn/prefs_test.go | 8 +- logpolicy/logpolicy.go | 4 +- logpolicy/maybe_syspolicy.go | 8 ++ net/dns/manager.go | 3 +- net/dns/manager_darwin.go | 3 +- net/dns/manager_default.go | 3 +- net/dns/manager_freebsd.go | 3 +- net/dns/manager_linux.go | 3 +- net/dns/manager_openbsd.go | 3 +- net/dns/manager_plan9.go | 3 +- net/dns/manager_solaris.go | 3 +- net/dns/manager_windows.go | 12 +- net/dns/manager_windows_test.go | 5 +- tsd/syspolicy_off.go | 12 -- tsd/syspolicy_on.go | 64 ----------- tsd/tsd.go | 2 +- tsnet/depaware.txt | 3 +- .../tailscaled_deps_test_darwin.go | 2 +- .../tailscaled_deps_test_freebsd.go | 2 +- .../integration/tailscaled_deps_test_linux.go | 2 +- .../tailscaled_deps_test_openbsd.go | 2 +- .../tailscaled_deps_test_windows.go | 2 +- util/syspolicy/policyclient/policyclient.go | 25 +++- util/syspolicy/syspolicy.go | 108 ++++++++++++------ util/syspolicy/syspolicy_test.go | 16 +-- 44 files changed, 242 insertions(+), 207 deletions(-) create mode 100644 cmd/tailscale/cli/maybe_syspolicy.go create mode 100644 feature/condregister/maybe_syspolicy.go create mode 100644 feature/syspolicy/syspolicy.go create mode 100644 logpolicy/maybe_syspolicy.go delete mode 100644 tsd/syspolicy_off.go delete mode 100644 tsd/syspolicy_on.go diff --git a/client/web/auth.go b/client/web/auth.go index 8b195a417..27eb24ee4 100644 --- a/client/web/auth.go +++ b/client/web/auth.go @@ -192,7 +192,7 @@ func (s *Server) controlSupportsCheckMode(ctx context.Context) bool { if err != nil { return true } - controlURL, err := url.Parse(prefs.ControlURLOrDefault()) + controlURL, err := url.Parse(prefs.ControlURLOrDefault(s.polc)) if err != nil { return true } diff --git a/client/web/web.go b/client/web/web.go index f3158cd1f..71a015dab 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -5,6 +5,7 @@ package web import ( + "cmp" "context" "encoding/json" "errors" @@ -36,6 +37,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -49,6 +51,7 @@ type Server struct { mode ServerMode logf logger.Logf + polc policyclient.Client // must be non-nil lc *local.Client timeNow func() time.Time @@ -139,9 +142,13 @@ type ServerOpts struct { TimeNow func() time.Time // Logf optionally provides a logger function. - // log.Printf is used as default. + // If nil, log.Printf is used as default. Logf logger.Logf + // PolicyClient, if non-nil, will be used to fetch policy settings. + // If nil, the default policy client will be used. + PolicyClient policyclient.Client + // The following two fields are required and used exclusively // in ManageServerMode to facilitate the control server login // check step for authorizing browser sessions. @@ -178,6 +185,7 @@ func NewServer(opts ServerOpts) (s *Server, err error) { } s = &Server{ mode: opts.Mode, + polc: cmp.Or(opts.PolicyClient, policyclient.Get()), logf: opts.Logf, devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"), lc: opts.LocalClient, @@ -950,7 +958,7 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { UnraidToken: os.Getenv("UNRAID_CSRF_TOKEN"), RunningSSHServer: prefs.RunSSH, URLPrefix: strings.TrimSuffix(s.pathPrefix, "/"), - ControlAdminURL: prefs.AdminPageURL(), + ControlAdminURL: prefs.AdminPageURL(s.polc), LicensesURL: licenses.LicensesURL(), Features: availableFeatures(), diff --git a/client/web/web_test.go b/client/web/web_test.go index 12dbb5c79..9ba16bccf 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/views" "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/policyclient" ) func TestQnapAuthnURL(t *testing.T) { @@ -576,6 +577,7 @@ func TestServeAuth(t *testing.T) { timeNow: func() time.Time { return timeNow }, newAuthURL: mockNewAuthURL, waitAuthURL: mockWaitAuthURL, + polc: policyclient.NoPolicyClient{}, } successCookie := "ts-cookie-success" diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 2c6c4690c..52b82b228 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -170,21 +170,15 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ - tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ - tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ - tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop - tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ - tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ - tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/testenv from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/ipn + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy/policyclient+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local + tailscale.com/util/testenv from tailscale.com/net/bakedroots+ tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ - W 💣 tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/derp+ tailscale.com/version/distro from tailscale.com/envknob+ @@ -205,7 +199,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/winutil+ - golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ @@ -393,7 +387,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa os from crypto/internal/sysrand+ os/exec from github.com/coreos/go-iptables/iptables+ os/signal from tailscale.com/cmd/derper - W os/user from tailscale.com/util/winutil+ + W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ccba96707..d94b5b6cf 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,6 +798,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -951,7 +952,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/ipn+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/cmd/tailscale/cli/maybe_syspolicy.go b/cmd/tailscale/cli/maybe_syspolicy.go new file mode 100644 index 000000000..937a27833 --- /dev/null +++ b/cmd/tailscale/cli/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package cli + +import _ "tailscale.com/feature/syspolicy" diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 1863957d3..ebbe3b19e 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -39,6 +39,7 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -609,7 +610,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if env.upArgs.json { printUpDoneJSON(ipn.NeedsMachineAuth, "") } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL()) + fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) } case ipn.Running: // Done full authentication process diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 047bac6c2..445320636 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -106,6 +106,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ @@ -191,15 +192,15 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ L tailscale.com/util/stringsx from tailscale.com/client/systray - tailscale.com/util/syspolicy from tailscale.com/ipn + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ - tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop - tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/client/web+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy/policyclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli @@ -228,7 +229,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ - golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/image/draw from github.com/fogleman/gg L golang.org/x/image/font from github.com/fogleman/gg+ L golang.org/x/image/font/basicfont from github.com/fogleman/gg diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index ee55f914c..3d9368143 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -276,6 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/relayserver from tailscale.com/feature/condregister + tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/tpm from tailscale.com/feature/condregister @@ -428,7 +429,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ - tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index f55535470..ddf6d9ef6 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -64,8 +64,8 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" "tailscale.com/util/osshare" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -773,7 +773,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -807,7 +807,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() @@ -1012,6 +1012,6 @@ func defaultEncryptState() bool { // (plan9/FreeBSD/etc). return false } - v, _ := syspolicy.GetBoolean(pkey.EncryptState, false) + v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) return v } diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 2d4e71d3c..3a2edcac5 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -55,8 +55,8 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/osdiag" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" "tailscale.com/version" @@ -156,7 +156,7 @@ func runWindowsService(pol *logpolicy.Policy) error { if syslog, err := eventlog.Open(serviceName); err == nil { syslogf = func(format string, args ...any) { - if logSCMInteractions, _ := syspolicy.GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { + if logSCMInteractions, _ := policyclient.Get().GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { syslog.Info(0, fmt.Sprintf(format, args...)) } } @@ -390,7 +390,7 @@ func handleSessionChange(chgRequest svc.ChangeRequest) { if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK { return } - if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { + if flushDNSOnSessionUnlock, _ := policyclient.Get().GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") go func() { err := dns.Flush() diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 155ad03e3..efe9456d8 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -240,6 +240,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -380,7 +381,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/ipn+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/feature/condregister/maybe_syspolicy.go b/feature/condregister/maybe_syspolicy.go new file mode 100644 index 000000000..49ec5c02c --- /dev/null +++ b/feature/condregister/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package condregister + +import _ "tailscale.com/feature/syspolicy" diff --git a/feature/syspolicy/syspolicy.go b/feature/syspolicy/syspolicy.go new file mode 100644 index 000000000..08c3cf373 --- /dev/null +++ b/feature/syspolicy/syspolicy.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package syspolicy provides an interface for system-wide policy management. +package syspolicy + +import _ "tailscale.com/util/syspolicy" // for its registration side effects diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index 15d239f89..027772671 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -18,8 +18,8 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // featureName is the name of the feature implemented by this package. @@ -136,7 +136,7 @@ func (e *desktopSessionsExt) getBackgroundProfile(profiles ipnext.ProfileStore) e.mu.Lock() defer e.mu.Unlock() - if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { // If the Always-On mode is disabled, there's no background profile // as far as the desktop session extension is concerned. return ipn.LoginProfileView{} diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index 36004b293..42366dbd9 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -10,8 +10,8 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" "tailscale.com/tailcfg" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) type actorWithPolicyChecks struct{ Actor } @@ -51,10 +51,10 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { - if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } - if allowWithReason, _ := syspolicy.GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { + if allowWithReason, _ := policyclient.Get().GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { return errors.New("disconnect not allowed: always-on mode is enabled") } if reason == "" { diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 339fad50a..2c13f0619 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -30,6 +30,7 @@ import ( "tailscale.com/util/goroutines" "tailscale.com/util/set" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -342,7 +343,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // this will first check syspolicy, MDM settings like Registry // on Windows or defaults on macOS. If they are not set, it falls // back to the cli-flag, `--posture-checking`. - choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking) + choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) if err != nil { b.logf( "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 54dcda30a..700e2de37 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -109,6 +109,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" @@ -1610,7 +1611,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // future "tailscale up" to start checking for // implicit setting reverts, which it doesn't do when // ControlURL is blank. - prefs.ControlURL = prefs.ControlURLOrDefault() + prefs.ControlURL = prefs.ControlURLOrDefault(b.polc) prefsChanged = true } if st.Persist.Valid() { @@ -1870,7 +1871,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { } for _, opt := range preferencePolicies { - if po, err := b.polc.GetPreferenceOption(opt.key); err == nil { + if po, err := b.polc.GetPreferenceOption(opt.key, ptype.ShowChoiceByPolicy); err == nil { curVal := opt.get(prefs.View()) newVal := po.ShouldEnable(curVal) if curVal != newVal { @@ -2425,7 +2426,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { loggedOut := prefs.LoggedOut() - serverURL := prefs.ControlURLOrDefault() + serverURL := prefs.ControlURLOrDefault(b.polc) if inServerMode := prefs.ForceDaemon(); inServerMode || runtime.GOOS == "windows" { b.logf("Start: serverMode=%v", inServerMode) } @@ -3498,7 +3499,7 @@ func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { if err != nil { return false } - serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault() + serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault(b.polc) if ipn.IsLoginServerSynonym(serverURL) { // When connected to the official Tailscale control plane, only allow // URLs from tailscale.com or its subdomains. @@ -4049,7 +4050,7 @@ func (b *LocalBackend) SwitchToBestProfile(reason string) { // but b.mu must held on entry. It is released on exit. func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { defer unlock() - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) switch { @@ -4076,7 +4077,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un return } // As an optimization, only reset the dialPlan if the control URL changed. - if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { @@ -4250,7 +4251,7 @@ func (b *LocalBackend) isDefaultServerLocked() bool { if !prefs.Valid() { return true // assume true until set otherwise } - return prefs.ControlURLOrDefault() == ipn.DefaultControlURL + return prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL } var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ @@ -5687,7 +5688,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Some temporary (2024-05-05) debugging code to help us catch // https://github.com/tailscale/tailscale/issues/11962 in the act. if prefs.WantRunning() && - prefs.ControlURLOrDefault() == ipn.DefaultControlURL && + prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { panic("[unexpected] use of main control server in integration test") } @@ -7288,13 +7289,13 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { unlock := b.lockAndGetUnlock() defer unlock() - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { return err // nil if we're already on the target profile } // As an optimization, only reset the dialPlan if the control URL changed. - if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } diff --git a/ipn/prefs.go b/ipn/prefs.go index 4c049688c..14b8078c0 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -28,8 +28,8 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" ) @@ -718,16 +718,16 @@ func NewPrefs() *Prefs { // // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. -func (p PrefsView) ControlURLOrDefault() string { - return p.ж.ControlURLOrDefault() +func (p PrefsView) ControlURLOrDefault(polc policyclient.Client) string { + return p.ж.ControlURLOrDefault(polc) } // ControlURLOrDefault returns the coordination server's URL base. // // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. -func (p *Prefs) ControlURLOrDefault() string { - controlURL, err := syspolicy.GetString(pkey.ControlURL, p.ControlURL) +func (p *Prefs) ControlURLOrDefault(polc policyclient.Client) string { + controlURL, err := polc.GetString(pkey.ControlURL, p.ControlURL) if err != nil { controlURL = p.ControlURL } @@ -756,11 +756,11 @@ func (p *Prefs) DefaultRouteAll(goos string) bool { } // AdminPageURL returns the admin web site URL for the current ControlURL. -func (p PrefsView) AdminPageURL() string { return p.ж.AdminPageURL() } +func (p PrefsView) AdminPageURL(polc policyclient.Client) string { return p.ж.AdminPageURL(polc) } // AdminPageURL returns the admin web site URL for the current ControlURL. -func (p *Prefs) AdminPageURL() string { - url := p.ControlURLOrDefault() +func (p *Prefs) AdminPageURL(polc policyclient.Client) string { + url := p.ControlURLOrDefault(polc) if IsLoginServerSynonym(url) { // TODO(crawshaw): In future release, make this https://console.tailscale.com url = "https://login.tailscale.com" diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 43e360c6a..7aac20c80 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -23,6 +23,7 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" + "tailscale.com/util/syspolicy/policyclient" ) func fieldsOf(t reflect.Type) (fields []string) { @@ -1032,15 +1033,16 @@ func TestExitNodeIPOfArg(t *testing.T) { func TestControlURLOrDefault(t *testing.T) { var p Prefs - if got, want := p.ControlURLOrDefault(), DefaultControlURL; got != want { + polc := policyclient.NoPolicyClient{} + if got, want := p.ControlURLOrDefault(polc), DefaultControlURL; got != want { t.Errorf("got %q; want %q", got, want) } p.ControlURL = "http://foo.bar" - if got, want := p.ControlURLOrDefault(), "http://foo.bar"; got != want { + if got, want := p.ControlURLOrDefault(polc), "http://foo.bar"; got != want { t.Errorf("got %q; want %q", got, want) } p.ControlURL = "https://login.tailscale.com" - if got, want := p.ControlURLOrDefault(), DefaultControlURL; got != want { + if got, want := p.ControlURLOrDefault(polc), DefaultControlURL; got != want { t.Errorf("got %q; want %q", got, want) } } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 295dc6fff..587b421f3 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -51,8 +51,8 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/must" "tailscale.com/util/racebuild" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/testenv" "tailscale.com/version" "tailscale.com/version/distro" @@ -66,7 +66,7 @@ var getLogTargetOnce struct { func getLogTarget() string { getLogTargetOnce.Do(func() { envTarget, _ := os.LookupEnv("TS_LOG_TARGET") - getLogTargetOnce.v, _ = syspolicy.GetString(pkey.LogTarget, envTarget) + getLogTargetOnce.v, _ = policyclient.Get().GetString(pkey.LogTarget, envTarget) }) return getLogTargetOnce.v diff --git a/logpolicy/maybe_syspolicy.go b/logpolicy/maybe_syspolicy.go new file mode 100644 index 000000000..8b2836c97 --- /dev/null +++ b/logpolicy/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package logpolicy + +import _ "tailscale.com/feature/syspolicy" diff --git a/net/dns/manager.go b/net/dns/manager.go index 5d6f225ce..4a5c4925c 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -30,6 +30,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/policyclient" ) var ( @@ -576,7 +577,7 @@ func (m *Manager) FlushCaches() error { // // health must not be nil func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { - oscfg, err := NewOSConfigurator(logf, nil, nil, interfaceName) + oscfg, err := NewOSConfigurator(logf, health, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) return diff --git a/net/dns/manager_darwin.go b/net/dns/manager_darwin.go index ccfafaa45..d73ad71a8 100644 --- a/net/dns/manager_darwin.go +++ b/net/dns/manager_darwin.go @@ -14,12 +14,13 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { return &darwinConfigurator{logf: logf, ifName: ifName}, nil } diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index dbe985cac..1a86690c5 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -9,11 +9,12 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logger.Logf, *health.Tracker, *controlknobs.Knobs, string) (OSConfigurator, error) { +func NewOSConfigurator(logger.Logf, *health.Tracker, policyclient.Client, *controlknobs.Knobs, string) (OSConfigurator, error) { return NewNoopManager() } diff --git a/net/dns/manager_freebsd.go b/net/dns/manager_freebsd.go index 1ec9ea841..3237fb382 100644 --- a/net/dns/manager_freebsd.go +++ b/net/dns/manager_freebsd.go @@ -10,12 +10,13 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { bs, err := os.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { return newDirectManager(logf, health), nil diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 643cc280a..8b66ac3a6 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -22,6 +22,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/cmpver" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -38,7 +39,7 @@ var publishOnce sync.Once // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { if distro.Get() == distro.JetKVM { return NewNoopManager() } diff --git a/net/dns/manager_openbsd.go b/net/dns/manager_openbsd.go index 1a1c4390c..6168a9e08 100644 --- a/net/dns/manager_openbsd.go +++ b/net/dns/manager_openbsd.go @@ -11,6 +11,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) type kv struct { @@ -24,7 +25,7 @@ func (kv kv) String() string { // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { return newOSConfigurator(logf, health, interfaceName, newOSConfigEnv{ rcIsResolvd: rcIsResolvd, diff --git a/net/dns/manager_plan9.go b/net/dns/manager_plan9.go index ca179f27f..ef1ceea17 100644 --- a/net/dns/manager_plan9.go +++ b/net/dns/manager_plan9.go @@ -21,9 +21,10 @@ import ( "tailscale.com/health" "tailscale.com/types/logger" "tailscale.com/util/set" + "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, _ policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { return &plan9DNSManager{ logf: logf, ht: ht, diff --git a/net/dns/manager_solaris.go b/net/dns/manager_solaris.go index 1f48efb9e..de7e72bb5 100644 --- a/net/dns/manager_solaris.go +++ b/net/dns/manager_solaris.go @@ -7,8 +7,9 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { return newDirectManager(logf, health), nil } diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 8830861d1..444c5d37d 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -29,7 +29,6 @@ import ( "tailscale.com/health" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" @@ -48,6 +47,7 @@ type windowsManager struct { knobs *controlknobs.Knobs // or nil nrptDB *nrptRuleDatabase wslManager *wslManager + polc policyclient.Client unregisterPolicyChangeCb func() // called when the manager is closing @@ -58,11 +58,15 @@ type windowsManager struct { // NewOSConfigurator created a new OS configurator. // // The health tracker and the knobs may be nil. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, polc policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { + if polc == nil { + panic("nil policyclient.Client") + } ret := &windowsManager{ logf: logf, guid: interfaceName, knobs: knobs, + polc: polc, wslManager: newWSLManager(logf, health), } @@ -71,7 +75,7 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlk } var err error - if ret.unregisterPolicyChangeCb, err = syspolicy.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { + if ret.unregisterPolicyChangeCb, err = polc.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { logf("error registering policy change callback: %v", err) // non-fatal } @@ -521,7 +525,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, ptype.NeverByPolicy) + enableDNSRegistration, err := m.polc.GetPreferenceOption(pkey.EnableDNSRegistration, ptype.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index edcf24ec0..7c0139f45 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -17,6 +17,7 @@ import ( "golang.org/x/sys/windows/registry" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" ) @@ -133,7 +134,7 @@ func TestManagerWindowsGPCopy(t *testing.T) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } @@ -262,7 +263,7 @@ func runTest(t *testing.T, isLocal bool) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } diff --git a/tsd/syspolicy_off.go b/tsd/syspolicy_off.go deleted file mode 100644 index 221b8f223..000000000 --- a/tsd/syspolicy_off.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_syspolicy - -package tsd - -import ( - "tailscale.com/util/syspolicy/policyclient" -) - -func getPolicyClient() policyclient.Client { return policyclient.NoPolicyClient{} } diff --git a/tsd/syspolicy_on.go b/tsd/syspolicy_on.go deleted file mode 100644 index e9811b88b..000000000 --- a/tsd/syspolicy_on.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_syspolicy - -package tsd - -import ( - "time" - - "tailscale.com/util/syspolicy" - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/syspolicy/ptype" -) - -func getPolicyClient() policyclient.Client { return globalSyspolicy{} } - -// globalSyspolicy implements [policyclient.Client] using the syspolicy global -// functions and global registrations. -// -// TODO: de-global-ify. This implementation using the old global functions -// is an intermediate stage while changing policyclient to be modular. -type globalSyspolicy struct{} - -func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { - return syspolicy.GetBoolean(key, defaultValue) -} - -func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { - return syspolicy.GetString(key, defaultValue) -} - -func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { - return syspolicy.GetStringArray(key, defaultValue) -} - -func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { - syspolicy.SetDebugLoggingEnabled(enabled) -} - -func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { - return syspolicy.GetUint64(key, defaultValue) -} - -func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { - return syspolicy.GetDuration(name, defaultValue) -} - -func (globalSyspolicy) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { - return syspolicy.GetPreferenceOption(name) -} - -func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { - return syspolicy.GetVisibility(name) -} - -func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { - return syspolicy.HasAnyOf(keys...) -} - -func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { - return syspolicy.RegisterChangeCallback(cb) -} diff --git a/tsd/tsd.go b/tsd/tsd.go index 17795d3c5..bd333bd31 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -175,7 +175,7 @@ func (s *System) PolicyClientOrDefault() policyclient.Client { if client, ok := s.PolicyClient.GetOK(); ok { return client } - return getPolicyClient() + return policyclient.Get() } // SubSystem represents some subsystem of the Tailscale node daemon. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 1c2be4781..187237e2f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -236,6 +236,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -375,7 +376,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/ipn+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index c8a0bb274..a87a3ec65 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index c8a0bb274..a87a3ec65 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index c8a0bb274..a87a3ec65 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index c8a0bb274..a87a3ec65 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index c9a1cd0cf..54e1bcc04 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -62,8 +62,8 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/util/winutil" _ "tailscale.com/util/winutil/gp" _ "tailscale.com/version" diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index aadcbc60e..5a7842448 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -44,8 +44,8 @@ type Client interface { // overrides of users' choices in a way that we do not want tailcontrol to have // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not - // present or set to a different value, "user-decides" is the default. - GetPreferenceOption(key pkey.Key) (ptype.PreferenceOption, error) + // present or set to a different value, defaultValue (and a nil error) is returned. + GetPreferenceOption(key pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) // GetVisibility returns whether a UI element should be visible based on // the system's configuration. @@ -66,6 +66,21 @@ type Client interface { RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) } +// Get returns a non-nil [Client] implementation as a function of the +// build tags. It returns a no-op implementation if the full syspolicy +// package is omitted from the build. +func Get() Client { + return client +} + +// RegisterClientImpl registers a [Client] implementation to be returned by +// [Get]. +func RegisterClientImpl(c Client) { + client = c +} + +var client Client = NoPolicyClient{} + // PolicyChange is the interface representing a change in policy settings. type PolicyChange interface { // HasChanged reports whether the policy setting identified by the given key @@ -81,6 +96,8 @@ type PolicyChange interface { // returns default values. type NoPolicyClient struct{} +var _ Client = NoPolicyClient{} + func (NoPolicyClient) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { return defaultValue, nil } @@ -101,8 +118,8 @@ func (NoPolicyClient) GetDuration(name pkey.Key, defaultValue time.Duration) (ti return defaultValue, nil } -func (NoPolicyClient) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { - return ptype.ShowChoiceByPolicy, nil +func (NoPolicyClient) GetPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { + return defaultValue, nil } func (NoPolicyClient) GetVisibility(name pkey.Key) (ptype.Visibility, error) { diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 189f41107..2367e21eb 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -1,13 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package syspolicy facilitates retrieval of the current policy settings -// applied to the device or user and receiving notifications when the policy -// changes. -// -// It provides functions that return specific policy settings by their unique -// [setting.Key]s, such as [GetBoolean], [GetUint64], [GetString], -// [GetStringArray], [GetPreferenceOption], [GetVisibility] and [GetDuration]. +// Package syspolicy contains the implementation of system policy management. +// Calling code should use the client interface in +// tailscale.com/util/syspolicy/policyclient. package syspolicy import ( @@ -18,6 +14,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" @@ -58,9 +55,9 @@ func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicySc return reg } -// HasAnyOf returns whether at least one of the specified policy settings is configured, +// hasAnyOf returns whether at least one of the specified policy settings is configured, // or an error if no keys are provided or the check fails. -func HasAnyOf(keys ...pkey.Key) (bool, error) { +func hasAnyOf(keys ...pkey.Key) (bool, error) { if len(keys) == 0 { return false, errors.New("at least one key must be specified") } @@ -82,62 +79,55 @@ func HasAnyOf(keys ...pkey.Key) (bool, error) { return false, nil } -// GetString returns a string policy setting with the specified key, +// getString returns a string policy setting with the specified key, // or defaultValue if it does not exist. -func GetString(key pkey.Key, defaultValue string) (string, error) { +func getString(key pkey.Key, defaultValue string) (string, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetUint64 returns a numeric policy setting with the specified key, +// getUint64 returns a numeric policy setting with the specified key, // or defaultValue if it does not exist. -func GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { +func getUint64(key pkey.Key, defaultValue uint64) (uint64, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetBoolean returns a boolean policy setting with the specified key, +// getBoolean returns a boolean policy setting with the specified key, // or defaultValue if it does not exist. -func GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { +func getBoolean(key pkey.Key, defaultValue bool) (bool, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetStringArray returns a multi-string policy setting with the specified key, +// getStringArray returns a multi-string policy setting with the specified key, // or defaultValue if it does not exist. -func GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { +func getStringArray(key pkey.Key, defaultValue []string) ([]string, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetPreferenceOption loads a policy from the registry that can be +// getPreferenceOption loads a policy from the registry that can be // managed by an enterprise policy management system and allows administrative // overrides of users' choices in a way that we do not want tailcontrol to have // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not -// present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { - return getCurrentPolicySettingValue(name, ptype.ShowChoiceByPolicy) -} - -// GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows -// specifying a default value to return if the policy setting is not configured. -// It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { +// present or set to a different value, defaultValue (and a nil error) is returned. +func getPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } -// GetVisibility loads a policy from the registry that can be managed +// getVisibility loads a policy from the registry that can be managed // by an enterprise policy management system and describes show/hide decisions // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name pkey.Key) (ptype.Visibility, error) { +func getVisibility(name pkey.Key) (ptype.Visibility, error) { return getCurrentPolicySettingValue(name, ptype.VisibleByPolicy) } -// GetDuration loads a policy from the registry that can be managed +// getDuration loads a policy from the registry that can be managed // by an enterprise policy management system and describes a duration for some // action. The registry value should be a string that time.ParseDuration // understands. If the registry value is "" or can not be processed, // defaultValue is returned instead. -func GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { +func getDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { d, err := getCurrentPolicySettingValue(name, defaultValue) if err != nil { return d, err @@ -148,9 +138,9 @@ func GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, erro return d, nil } -// RegisterChangeCallback adds a function that will be called whenever the effective policy +// registerChangeCallback adds a function that will be called whenever the effective policy // for the default scope changes. The returned function can be used to unregister the callback. -func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { +func registerChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { effective, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { return nil, err @@ -233,7 +223,53 @@ func SelectControlURL(reg, disk string) string { return def } -// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. -func SetDebugLoggingEnabled(v bool) { - loggerx.SetDebugLoggingEnabled(v) +func init() { + policyclient.RegisterClientImpl(globalSyspolicy{}) +} + +// globalSyspolicy implements [policyclient.Client] using the syspolicy global +// functions and global registrations. +// +// TODO: de-global-ify. This implementation using the old global functions +// is an intermediate stage while changing policyclient to be modular. +type globalSyspolicy struct{} + +func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return getBoolean(key, defaultValue) +} + +func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { + return getString(key, defaultValue) +} + +func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return getStringArray(key, defaultValue) +} + +func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { + loggerx.SetDebugLoggingEnabled(enabled) +} + +func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return getUint64(key, defaultValue) +} + +func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return getDuration(name, defaultValue) +} + +func (globalSyspolicy) GetPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { + return getPreferenceOption(name, defaultValue) +} + +func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return getVisibility(name) +} + +func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + return hasAnyOf(keys...) +} + +func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { + return registerChangeCallback(cb) } diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 3130f5d07..0ee62efb1 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -82,7 +82,7 @@ func TestGetString(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetString(tt.key, tt.defaultValue) + value, err := getString(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -157,7 +157,7 @@ func TestGetUint64(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetUint64(tt.key, tt.defaultValue) + value, err := getUint64(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -224,7 +224,7 @@ func TestGetBoolean(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetBoolean(tt.key, tt.defaultValue) + value, err := getBoolean(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -317,7 +317,7 @@ func TestGetPreferenceOption(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - option, err := GetPreferenceOption(tt.key) + option, err := getPreferenceOption(tt.key, ptype.ShowChoiceByPolicy) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -402,7 +402,7 @@ func TestGetVisibility(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - visibility, err := GetVisibility(tt.key) + visibility, err := getVisibility(tt.key) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -498,7 +498,7 @@ func TestGetDuration(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - duration, err := GetDuration(tt.key, tt.defaultValue) + duration, err := getDuration(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -579,7 +579,7 @@ func TestGetStringArray(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetStringArray(tt.key, tt.defaultValue) + value, err := getStringArray(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -613,7 +613,7 @@ func BenchmarkGetString(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - gotControlURL, _ := GetString(pkey.ControlURL, "https://controlplane.tailscale.com") + gotControlURL, _ := getString(pkey.ControlURL, "https://controlplane.tailscale.com") if gotControlURL != wantControlURL { b.Fatalf("got %v; want %v", gotControlURL, wantControlURL) } From 24b8a57b1e9c61154d45d87402fadcb56ff27843 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 16:50:10 -0700 Subject: [PATCH 1261/1708] util/syspolicy/policytest: move policy test helper to its own package Updates #16998 Updates #12614 Change-Id: I9fd27d653ebee547951705dc5597481e85b60747 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 62 +------------ util/syspolicy/policytest/policytest.go | 117 ++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 59 deletions(-) create mode 100644 util/syspolicy/policytest/policytest.go diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index a3a26af04..bd81a09c3 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -64,7 +64,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/policytest" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" @@ -1183,7 +1183,7 @@ func TestConfigureExitNode(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - var pol testPolicy + var pol policytest.Config // Configure policy settings, if any. if tt.exitNodeIDPolicy != nil { pol.Set(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy)) @@ -5539,62 +5539,6 @@ func TestReadWriteRouteInfo(t *testing.T) { } } -// testPolicy is a [policyclient.Client] with a static mapping of values. -// The map value must be of the correct type (string, []string, bool, etc). -// -// It is used for testing purposes to simulate policy client behavior. -// It panics if the values are the wrong type. -type testPolicy struct { - v map[pkey.Key]any - policyclient.NoPolicyClient -} - -func (sp *testPolicy) Set(key pkey.Key, value any) { - if sp.v == nil { - sp.v = make(map[pkey.Key]any) - } - sp.v[key] = value -} - -func (sp testPolicy) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { - if val, ok := sp.v[key]; ok { - if arr, ok := val.([]string); ok { - return arr, nil - } - panic(fmt.Sprintf("key %s is not a []string", key)) - } - return defaultVal, nil -} - -func (sp testPolicy) GetString(key pkey.Key, defaultVal string) (string, error) { - if val, ok := sp.v[key]; ok { - if str, ok := val.(string); ok { - return str, nil - } - panic(fmt.Sprintf("key %s is not a string", key)) - } - return defaultVal, nil -} - -func (sp testPolicy) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { - if val, ok := sp.v[key]; ok { - if b, ok := val.(bool); ok { - return b, nil - } - panic(fmt.Sprintf("key %s is not a bool", key)) - } - return defaultVal, nil -} - -func (sp testPolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { - for _, key := range keys { - if _, ok := sp.v[key]; ok { - return true, nil - } - } - return false, nil -} - func TestFillAllowedSuggestions(t *testing.T) { tests := []struct { name string @@ -5628,7 +5572,7 @@ func TestFillAllowedSuggestions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var pol testPolicy + var pol policytest.Config pol.Set(pkey.AllowedSuggestedExitNodes, tt.allowPolicy) got := fillAllowedSuggestions(pol) diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go new file mode 100644 index 000000000..e05d8938e --- /dev/null +++ b/util/syspolicy/policytest/policytest.go @@ -0,0 +1,117 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package policytest contains test helpers for the syspolicy packages. +package policytest + +import ( + "fmt" + "time" + + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" +) + +// Config is a [policyclient.Client] implementation with a static mapping of +// values. +// +// It is used for testing purposes to simulate policy client behavior. +// +// It panics if a value is Set with one type and then accessed with a different +// expected type. +type Config map[pkey.Key]any + +var _ policyclient.Client = Config{} + +func (c *Config) Set(key pkey.Key, value any) { + if *c == nil { + *c = make(map[pkey.Key]any) + } + (*c)[key] = value +} + +func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { + if val, ok := c[key]; ok { + if arr, ok := val.([]string); ok { + return arr, nil + } + panic(fmt.Sprintf("key %s is not a []string", key)) + } + return defaultVal, nil +} + +func (c Config) GetString(key pkey.Key, defaultVal string) (string, error) { + if val, ok := c[key]; ok { + if str, ok := val.(string); ok { + return str, nil + } + panic(fmt.Sprintf("key %s is not a string", key)) + } + return defaultVal, nil +} + +func (c Config) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { + if val, ok := c[key]; ok { + if b, ok := val.(bool); ok { + return b, nil + } + panic(fmt.Sprintf("key %s is not a bool", key)) + } + return defaultVal, nil +} + +func (c Config) GetUint64(key pkey.Key, defaultVal uint64) (uint64, error) { + if val, ok := c[key]; ok { + if u, ok := val.(uint64); ok { + return u, nil + } + panic(fmt.Sprintf("key %s is not a uint64", key)) + } + return defaultVal, nil +} + +func (c Config) GetDuration(key pkey.Key, defaultVal time.Duration) (time.Duration, error) { + if val, ok := c[key]; ok { + if d, ok := val.(time.Duration); ok { + return d, nil + } + panic(fmt.Sprintf("key %s is not a time.Duration", key)) + } + return defaultVal, nil +} + +func (c Config) GetPreferenceOption(key pkey.Key, defaultVal ptype.PreferenceOption) (ptype.PreferenceOption, error) { + if val, ok := c[key]; ok { + if p, ok := val.(ptype.PreferenceOption); ok { + return p, nil + } + panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) + } + return defaultVal, nil +} + +func (c Config) GetVisibility(key pkey.Key) (ptype.Visibility, error) { + if val, ok := c[key]; ok { + if p, ok := val.(ptype.Visibility); ok { + return p, nil + } + panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) + } + return ptype.Visibility(ptype.ShowChoiceByPolicy), nil +} + +func (c Config) HasAnyOf(keys ...pkey.Key) (bool, error) { + for _, key := range keys { + if _, ok := c[key]; ok { + return true, nil + } + } + return false, nil +} + +func (sp Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { + return func() {}, nil +} + +func (sp Config) SetDebugLoggingEnabled(enabled bool) {} From 21f21bd2a2be7999a328b29ef1ff05e4c973ec35 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 16:50:10 -0700 Subject: [PATCH 1262/1708] util/syspolicy: finish adding ts_omit_syspolicy build tags, tests Fixes #16998 Updates #12614 Change-Id: Idf2b1657898111df4be31f356091b2376d0d7f0b Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- client/local/local.go | 28 -------------- client/local/syspolicy.go | 40 +++++++++++++++++++ cmd/tailscale/cli/cli.go | 3 +- cmd/tailscale/cli/syspolicy.go | 71 ++++++++++++++++++---------------- cmd/tailscaled/deps_test.go | 14 +++++++ ipn/localapi/localapi.go | 50 ------------------------ ipn/localapi/syspolicy_api.go | 68 ++++++++++++++++++++++++++++++++ 8 files changed, 163 insertions(+), 113 deletions(-) create mode 100644 client/local/syspolicy.go create mode 100644 ipn/localapi/syspolicy_api.go diff --git a/build_dist.sh b/build_dist.sh index 0fc123ade..12f366e06 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/client/local/local.go b/client/local/local.go index 55d14f95e..0257c7a26 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -43,7 +43,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/tkatype" "tailscale.com/util/eventbus" - "tailscale.com/util/syspolicy/setting" ) // defaultClient is the default Client when using the legacy @@ -926,33 +925,6 @@ func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Pref return decodeJSON[*ipn.Prefs](body) } -// GetEffectivePolicy returns the effective policy for the specified scope. -func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { - scopeID, err := scope.MarshalText() - if err != nil { - return nil, err - } - body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) - if err != nil { - return nil, err - } - return decodeJSON[*setting.Snapshot](body) -} - -// ReloadEffectivePolicy reloads the effective policy for the specified scope -// by reading and merging policy settings from all applicable policy sources. -func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { - scopeID, err := scope.MarshalText() - if err != nil { - return nil, err - } - body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) - if err != nil { - return nil, err - } - return decodeJSON[*setting.Snapshot](body) -} - // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { diff --git a/client/local/syspolicy.go b/client/local/syspolicy.go new file mode 100644 index 000000000..6eff17783 --- /dev/null +++ b/client/local/syspolicy.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package local + +import ( + "context" + "net/http" + + "tailscale.com/util/syspolicy/setting" +) + +// GetEffectivePolicy returns the effective policy for the specified scope. +func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} + +// ReloadEffectivePolicy reloads the effective policy for the specified scope +// by reading and merging policy settings from all applicable policy sources. +func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 208ee93fd..5db030888 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -209,6 +209,7 @@ func noDupFlagify(c *ffcli.Command) { } var fileCmd func() *ffcli.Command +var sysPolicyCmd func() *ffcli.Command func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -239,7 +240,7 @@ change in the future. logoutCmd, switchCmd, configureCmd(), - syspolicyCmd, + nilOrCall(sysPolicyCmd), netcheckCmd, ipCmd, dnsCmd, diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go index a71952a9f..97f3f2122 100644 --- a/cmd/tailscale/cli/syspolicy.go +++ b/cmd/tailscale/cli/syspolicy.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_syspolicy + package cli import ( @@ -20,38 +22,42 @@ var syspolicyArgs struct { json bool // JSON output mode } -var syspolicyCmd = &ffcli.Command{ - Name: "syspolicy", - ShortHelp: "Diagnose the MDM and system policy configuration", - LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", - ShortUsage: "tailscale syspolicy ", - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "list", - ShortUsage: "tailscale syspolicy list", - Exec: runSysPolicyList, - ShortHelp: "Print effective policy settings", - LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("syspolicy list") - fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") - return fs - })(), - }, - { - Name: "reload", - ShortUsage: "tailscale syspolicy reload", - Exec: runSysPolicyReload, - ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", - LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("syspolicy reload") - fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") - return fs - })(), - }, - }, +func init() { + sysPolicyCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "syspolicy", + ShortHelp: "Diagnose the MDM and system policy configuration", + LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", + ShortUsage: "tailscale syspolicy ", + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "list", + ShortUsage: "tailscale syspolicy list", + Exec: runSysPolicyList, + ShortHelp: "Print effective policy settings", + LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy list") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + { + Name: "reload", + ShortUsage: "tailscale syspolicy reload", + Exec: runSysPolicyReload, + ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", + LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy reload") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + }, + } + } } func runSysPolicyList(ctx context.Context, args []string) error { @@ -61,7 +67,6 @@ func runSysPolicyList(ctx context.Context, args []string) error { } printPolicySettings(policy) return nil - } func runSysPolicyReload(ctx context.Context, args []string) error { diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 7f06abc6c..6d2ea3837 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -27,3 +27,17 @@ func TestOmitSSH(t *testing.T) { }, }.Check(t) } + +func TestOmitSyspolicy(t *testing.T) { + const msg = "unexpected syspolicy usage with ts_omit_syspolicy" + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_syspolicy,ts_include_cli", + BadDeps: map[string]string{ + "tailscale.com/util/syspolicy": msg, + "tailscale.com/util/syspolicy/setting": msg, + "tailscale.com/util/syspolicy/rsop": msg, + }, + }.Check(t) +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a199a2908..2dc75c0d9 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -58,8 +58,6 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/osdiag" "tailscale.com/util/rands" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -79,7 +77,6 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: "cert/": (*Handler).serveCert, - "policy/": (*Handler).servePolicy, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME @@ -1603,53 +1600,6 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { e.Encode(prefs) } -func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "policy access denied", http.StatusForbidden) - return - } - - suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") - if !ok { - http.Error(w, "misconfigured", http.StatusInternalServerError) - return - } - - var scope setting.PolicyScope - if suffix == "" { - scope = setting.DefaultScope() - } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { - http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) - return - } - - policy, err := rsop.PolicyFor(scope) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - var effectivePolicy *setting.Snapshot - switch r.Method { - case httpm.GET: - effectivePolicy = policy.Get() - case httpm.POST: - effectivePolicy, err = policy.Reload() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("Content-Type", "application/json") - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(effectivePolicy) -} - type resJSON struct { Error string `json:",omitempty"` } diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go new file mode 100644 index 000000000..a438d352b --- /dev/null +++ b/ipn/localapi/syspolicy_api.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package localapi + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" +) + +func init() { + handler["policy/"] = (*Handler).servePolicy +} + +func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "policy access denied", http.StatusForbidden) + return + } + + suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") + if !ok { + http.Error(w, "misconfigured", http.StatusInternalServerError) + return + } + + var scope setting.PolicyScope + if suffix == "" { + scope = setting.DefaultScope() + } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { + http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) + return + } + + policy, err := rsop.PolicyFor(scope) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var effectivePolicy *setting.Snapshot + switch r.Method { + case httpm.GET: + effectivePolicy = policy.Get() + case httpm.POST: + effectivePolicy, err = policy.Reload() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Content-Type", "application/json") + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(effectivePolicy) +} From d06d9007a6854b381fede40e25047c213c5e9bc3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 18:47:48 -0700 Subject: [PATCH 1263/1708] ipn/ipnlocal: convert more tests to use policytest, de-global-ify Now that we have policytest and the policyclient.Client interface, we can de-global-ify many of the tests, letting them run concurrently with each other, and just removing global variable complexity. This does ~half of the LocalBackend ones. Updates #16998 Change-Id: Iece754e1ef4e49744ccd967fa83629d0dca6f66a Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 96 ++++++++++++------------- ipn/ipnlocal/serve_test.go | 13 +++- util/syspolicy/policytest/policytest.go | 87 ++++++++++++++++------ 3 files changed, 125 insertions(+), 71 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bd81a09c3..4debcdd8d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2881,20 +2881,16 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, test := range tests { t.Run(test.name, func(t *testing.T) { - b := newTestBackend(t) - - policyStore := source.NewTestStore(t) + var polc policytest.Config if test.exitNodeIDKey { - policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeID, test.exitNodeID)) + polc.Set(pkey.ExitNodeID, test.exitNodeID) } if test.exitNodeIPKey { - policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, test.exitNodeIP)) + polc.Set(pkey.ExitNodeIP, test.exitNodeIP) } - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + b := newTestBackend(t, polc) if test.nm == nil { test.nm = new(netmap.NetworkMap) @@ -3026,15 +3022,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - b := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(policytest.Config{ + pkey.ExitNodeID: "auto:any", + }) + b := newTestLocalBackendWithSys(t, sys) b.currentNode().SetNetMap(tt.netmap) b.lastSuggestedExitNode = tt.lastSuggestedExitNode b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, tt.report) @@ -3094,7 +3088,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { } func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { - b := newTestLocalBackend(t) + polc := policytest.Config{ + pkey.ExitNodeID: "auto:any", + } + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + b := newTestLocalBackendWithSys(t, sys) hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni @@ -3106,16 +3106,12 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), - Logf: b.logf, + Dialer: tsdial.NewDialer(netmon.NewStatic()), + Logf: b.logf, + PolicyClient: polc, } cc = newClient(t, opts) b.cc = cc - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes()) peer2 := makePeer(2, withCap(26), withDERP(2), withSuggest(), withExitRoutes()) selfNode := tailcfg.Node{ @@ -3219,12 +3215,14 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { }, DERPMap: derpMap, } - b := newTestLocalBackend(t) - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + + polc := policytest.Config{ + pkey.ExitNodeID: "auto:any", + } + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + b := newTestLocalBackendWithSys(t, sys) b.currentNode().SetNetMap(nm) // Peer 2 should be the initial exit node, as it's better than peer 1 // in terms of latency and DERP region. @@ -3461,21 +3459,20 @@ func TestApplySysPolicy(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - settings := make([]source.TestSetting[string], 0, len(tt.stringPolicies)) - for p, v := range tt.stringPolicies { - settings = append(settings, source.TestSettingOf(p, v)) + var polc policytest.Config + for k, v := range tt.stringPolicies { + polc.Set(k, v) } - policyStore := source.NewTestStoreOf(t, settings...) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - lb := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + lb := newTestLocalBackendWithSys(t, sys) gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange && prefs.Equals(&tt.prefs) { @@ -3508,7 +3505,7 @@ func TestApplySysPolicy(t *testing.T) { pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) pm.prefs = usePrefs.View() - b := newTestBackend(t) + b := newTestBackend(t, polc) b.mu.Lock() b.pm = pm b.mu.Unlock() @@ -3607,24 +3604,26 @@ func TestPreferencePolicyInfo(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, pp := range preferencePolicies { t.Run(string(pp.key), func(t *testing.T) { - s := source.TestSetting[string]{ - Key: pp.key, - Error: tt.policyError, - Value: tt.policyValue, + t.Parallel() + + var polc policytest.Config + if tt.policyError != nil { + polc.Set(pp.key, tt.policyError) + } else { + polc.Set(pp.key, tt.policyValue) } - policyStore := source.NewTestStoreOf(t, s) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - lb := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + lb := newTestLocalBackendWithSys(t, sys) gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange != tt.wantChange { @@ -6534,7 +6533,8 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { store := source.NewTestStoreOf[string](t) syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store) - lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + sys := tsd.NewSystem() + lb := newLocalBackendWithSysAndTestControl(t, enableLogging, sys, func(tb testing.TB, opts controlclient.Options) controlclient.Client { return newClient(tb, opts) }) if tt.initialPrefs != nil { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 57d1a4745..e2561cba9 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/wgengine" ) @@ -870,7 +871,7 @@ func mustCreateURL(t *testing.T, u string) url.URL { return *uParsed } -func newTestBackend(t *testing.T) *LocalBackend { +func newTestBackend(t *testing.T, opts ...any) *LocalBackend { var logf logger.Logf = logger.Discard const debug = true if debug { @@ -878,6 +879,16 @@ func newTestBackend(t *testing.T) *LocalBackend { } sys := tsd.NewSystem() + + for _, o := range opts { + switch v := o.(type) { + case policyclient.Client: + sys.PolicyClient.Set(v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go index e05d8938e..7ea0ad91f 100644 --- a/util/syspolicy/policytest/policytest.go +++ b/util/syspolicy/policytest/policytest.go @@ -19,7 +19,12 @@ import ( // It is used for testing purposes to simulate policy client behavior. // // It panics if a value is Set with one type and then accessed with a different -// expected type. +// expected type and/or value. Some accessors such as GetPreferenceOption and +// GetVisibility support either a ptype.PreferenceOption/ptype.Visibility in the +// map, or the string representation as supported by their UnmarshalText +// methods. +// +// The map value may be an error to return that error value from the accessor. type Config map[pkey.Key]any var _ policyclient.Client = Config{} @@ -33,70 +38,108 @@ func (c *Config) Set(key pkey.Key, value any) { func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { if val, ok := c[key]; ok { - if arr, ok := val.([]string); ok { - return arr, nil + switch val := val.(type) { + case []string: + return val, nil + case error: + return nil, val + default: + panic(fmt.Sprintf("key %s is not a []string; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a []string", key)) } return defaultVal, nil } func (c Config) GetString(key pkey.Key, defaultVal string) (string, error) { if val, ok := c[key]; ok { - if str, ok := val.(string); ok { - return str, nil + switch val := val.(type) { + case string: + return val, nil + case error: + return "", val + default: + panic(fmt.Sprintf("key %s is not a string; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a string", key)) } return defaultVal, nil } func (c Config) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { if val, ok := c[key]; ok { - if b, ok := val.(bool); ok { - return b, nil + switch val := val.(type) { + case bool: + return val, nil + case error: + return false, val + default: + panic(fmt.Sprintf("key %s is not a bool; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a bool", key)) } return defaultVal, nil } func (c Config) GetUint64(key pkey.Key, defaultVal uint64) (uint64, error) { if val, ok := c[key]; ok { - if u, ok := val.(uint64); ok { - return u, nil + switch val := val.(type) { + case uint64: + return val, nil + case error: + return 0, val + default: + panic(fmt.Sprintf("key %s is not a uint64; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a uint64", key)) } return defaultVal, nil } func (c Config) GetDuration(key pkey.Key, defaultVal time.Duration) (time.Duration, error) { if val, ok := c[key]; ok { - if d, ok := val.(time.Duration); ok { - return d, nil + switch val := val.(type) { + case time.Duration: + return val, nil + case error: + return 0, val + default: + panic(fmt.Sprintf("key %s is not a time.Duration; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a time.Duration", key)) } return defaultVal, nil } func (c Config) GetPreferenceOption(key pkey.Key, defaultVal ptype.PreferenceOption) (ptype.PreferenceOption, error) { if val, ok := c[key]; ok { - if p, ok := val.(ptype.PreferenceOption); ok { - return p, nil + switch val := val.(type) { + case ptype.PreferenceOption: + return val, nil + case error: + var zero ptype.PreferenceOption + return zero, val + case string: + var p ptype.PreferenceOption + err := p.UnmarshalText(([]byte)(val)) + return p, err + default: + panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) } - panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) } return defaultVal, nil } func (c Config) GetVisibility(key pkey.Key) (ptype.Visibility, error) { if val, ok := c[key]; ok { - if p, ok := val.(ptype.Visibility); ok { - return p, nil + switch val := val.(type) { + case ptype.Visibility: + return val, nil + case error: + var zero ptype.Visibility + return zero, val + case string: + var p ptype.Visibility + err := p.UnmarshalText(([]byte)(val)) + return p, err + default: + panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) } - panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) } return ptype.Visibility(ptype.ShowChoiceByPolicy), nil } From c9f214e503af5357c2cea77629441d8647e6402f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 3 Sep 2025 13:47:32 +0100 Subject: [PATCH 1264/1708] ipn: warn about self as the exit node if backend is running (#17018) Before: $ tailscale ip -4 1.2.3.4 $ tailscale set --exit-node=1.2.3.4 no node found in netmap with IP 1.2.3.4 After: $ tailscale set --exit-node=1.2.3.4 cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine; did you mean --advertise-exit-node? The new error message already existed in the code, but would only be triggered if the backend wasn't running -- which means, in practice, it would almost never be triggered. The old error message is technically true, but could be confusing if you don't know the distinction between "netmap" and "tailnet" -- it could sound like the exit node isn't part of your tailnet. A node is never in its own netmap, but it is part of your tailnet. This error confused me when I was doing some local dev work, and it's confused customers before (e.g. #7513). Using the more specific error message should reduce confusion. Updates #7513 Updates https://github.com/tailscale/corp/issues/23596 Signed-off-by: Alex Chan --- ipn/prefs.go | 6 +++--- ipn/prefs_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/ipn/prefs.go b/ipn/prefs.go index 14b8078c0..88c73ead3 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -847,6 +847,9 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) { } ip, err = netip.ParseAddr(s) if err == nil { + if !isRemoteIP(st, ip) { + return ip, ExitNodeLocalIPError{s} + } // If we're online already and have a netmap, double check that the IP // address specified is valid. if st.BackendState == "Running" { @@ -858,9 +861,6 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) { return ip, fmt.Errorf("node %v is not advertising an exit node", ip) } } - if !isRemoteIP(st, ip) { - return ip, ExitNodeLocalIPError{s} - } return ip, nil } match := 0 diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 7aac20c80..3339a631c 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -897,6 +897,23 @@ func TestExitNodeIPOfArg(t *testing.T) { }, wantErr: `no node found in netmap with IP 1.2.3.4`, }, + { + name: "ip_is_self", + arg: "1.2.3.4", + st: &ipnstate.Status{ + TailscaleIPs: []netip.Addr{mustIP("1.2.3.4")}, + }, + wantErr: "cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine", + }, + { + name: "ip_is_self_when_backend_running", + arg: "1.2.3.4", + st: &ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: []netip.Addr{mustIP("1.2.3.4")}, + }, + wantErr: "cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine", + }, { name: "ip_not_exit", arg: "1.2.3.4", From 0f3598b46741cbd0c005dc7d95c6e24fc8cf1924 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 3 Sep 2025 09:17:24 -0700 Subject: [PATCH 1265/1708] util/syspolicy: delete some unused code in handler.go There's a TODO to delete all of handler.go, but part of it's still used in another repo. But this deletes some. Updates #17022 Change-Id: Ic5a8a5a694ca258440307436731cd92b45ee2d21 Signed-off-by: Brad Fitzpatrick --- util/syspolicy/handler.go | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go index cdf32a7f7..690ff2162 100644 --- a/util/syspolicy/handler.go +++ b/util/syspolicy/handler.go @@ -8,7 +8,6 @@ import ( "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" - "tailscale.com/util/testenv" ) // TODO(nickkhyl): delete this file once other repos are updated. @@ -36,19 +35,10 @@ type Handler interface { // // Deprecated: using [RegisterStore] should be preferred. func RegisterHandler(h Handler) { - rsop.RegisterStore("DeviceHandler", setting.DeviceScope, WrapHandler(h)) + rsop.RegisterStore("DeviceHandler", setting.DeviceScope, handlerStore{h}) } -// SetHandlerForTest wraps and sets the specified handler as the device's policy -// [source.Store] for the duration of tb. -// -// Deprecated: using [MustRegisterStoreForTest] should be preferred. -func SetHandlerForTest(tb testenv.TB, h Handler) { - RegisterWellKnownSettingsForTest(tb) - MustRegisterStoreForTest(tb, "DeviceHandler-TestOnly", setting.DefaultScope(), WrapHandler(h)) -} - -var _ source.Store = (*handlerStore)(nil) +var _ source.Store = handlerStore{} // handlerStore is a [source.Store] that calls the underlying [Handler]. // @@ -57,11 +47,6 @@ type handlerStore struct { h Handler } -// WrapHandler returns a [source.Store] that wraps the specified [Handler]. -func WrapHandler(h Handler) source.Store { - return handlerStore{h} -} - // Lock implements [source.Lockable]. func (s handlerStore) Lock() error { if lockable, ok := s.h.(source.Lockable); ok { From 2b9d055101a0a2731af9ef5d2caf513bfb7da75e Mon Sep 17 00:00:00 2001 From: Craig Hesling Date: Tue, 2 Sep 2025 02:27:34 -0700 Subject: [PATCH 1266/1708] drive: fix StatCache mishandling of paths with spaces Fix "file not found" errors when WebDAV clients access files/dirs inside directories with spaces. The issue occurred because StatCache was mixing URL-escaped and unescaped paths, causing cache key mismatches. Specifically, StatCache.set() parsed WebDAV responses containing URL-escaped paths (ex. "Dir%20Space/file1.txt") and stored them alongside unescaped cache keys (ex. "Dir Space/file1.txt"). This mismatch prevented StatCache.get() from correctly determining whether a child file existed. See https://github.com/tailscale/tailscale/issues/13632#issuecomment-3243522449 for the full explanation of the issue. The decision to keep all paths references unescaped inside the StatCache is consistent with net/http.Request.URL.Path and rewrite.go (sole consumer) Update unit test to detect this directory space mishandling. Fixes tailscale#13632 Signed-off-by: Craig Hesling --- drive/driveimpl/compositedav/stat_cache.go | 8 +++++++- drive/driveimpl/compositedav/stat_cache_test.go | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drive/driveimpl/compositedav/stat_cache.go b/drive/driveimpl/compositedav/stat_cache.go index fc57ff064..36463fe7e 100644 --- a/drive/driveimpl/compositedav/stat_cache.go +++ b/drive/driveimpl/compositedav/stat_cache.go @@ -8,6 +8,7 @@ import ( "encoding/xml" "log" "net/http" + "net/url" "sync" "time" @@ -165,7 +166,12 @@ func (c *StatCache) set(name string, depth int, ce *cacheEntry) { children = make(map[string]*cacheEntry, len(ms.Responses)-1) for i := 0; i < len(ms.Responses); i++ { response := ms.Responses[i] - name := shared.Normalize(response.Href) + name, err := url.PathUnescape(response.Href) + if err != nil { + log.Printf("statcache.set child parse error: %s", err) + return + } + name = shared.Normalize(name) raw := marshalMultiStatus(response) entry := newCacheEntry(ce.Status, raw) if i == 0 { diff --git a/drive/driveimpl/compositedav/stat_cache_test.go b/drive/driveimpl/compositedav/stat_cache_test.go index fa63457a2..baa4fdda2 100644 --- a/drive/driveimpl/compositedav/stat_cache_test.go +++ b/drive/driveimpl/compositedav/stat_cache_test.go @@ -16,12 +16,12 @@ import ( "tailscale.com/tstest" ) -var parentPath = "/parent" +var parentPath = "/parent with spaces" -var childPath = "/parent/child.txt" +var childPath = "/parent with spaces/child.txt" var parentResponse = ` -/parent/ +/parent%20with%20spaces/ Mon, 29 Apr 2024 19:52:23 GMT @@ -36,7 +36,7 @@ var parentResponse = ` var childResponse = ` -/parent/child.txt +/parent%20with%20spaces/child.txt Mon, 29 Apr 2024 19:52:23 GMT From a2f2ac6ba15283dcf0a6e8a62d64cf4122ea9360 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 3 Sep 2025 15:35:05 -0400 Subject: [PATCH 1267/1708] ipn/local: fix deadlock in initial suggested exit node query (#17025) updates tailscale/corp#26369 b.mu is locked here. We need to use suggestExitNodeLocked. Signed-off-by: Jonathan Nobels --- ipn/ipnlocal/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 700e2de37..7592e9b4b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3141,7 +3141,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A ini.Health = b.HealthTracker().CurrentState() } if mask&ipn.NotifyInitialSuggestedExitNode != 0 { - if en, err := b.SuggestExitNode(); err != nil { + if en, err := b.suggestExitNodeLocked(); err == nil { ini.SuggestedExitNode = &en.ID } } From 04f00339b6079f5afb3512dfe8cf929f42097cd8 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 3 Sep 2025 22:08:45 +0100 Subject: [PATCH 1268/1708] cmd/k8s-operator: update connector example (#17020) This commit modifies the connector example to use the new hostname prefix and replicas fields Signed-off-by: David Bond --- cmd/k8s-operator/deploy/examples/connector.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/deploy/examples/connector.yaml b/cmd/k8s-operator/deploy/examples/connector.yaml index d29f27cf5..f5447400e 100644 --- a/cmd/k8s-operator/deploy/examples/connector.yaml +++ b/cmd/k8s-operator/deploy/examples/connector.yaml @@ -11,7 +11,8 @@ metadata: spec: tags: - "tag:prod" - hostname: ts-prod + hostnamePrefix: ts-prod + replicas: 2 subnetRouter: advertiseRoutes: - "10.40.0.0/14" From d8ac539bf9617bc18cd2c5f231c77b1edb48849e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 3 Sep 2025 15:05:38 -0700 Subject: [PATCH 1269/1708] util/syspolicy: remove handler, other dead code Fixes #17022 Change-Id: I6a0f6488ae3ea75c5844dfcba68e1e8024e930be Signed-off-by: Brad Fitzpatrick --- util/syspolicy/handler.go | 99 ------------------------------ util/syspolicy/policy_keys.go | 19 ------ util/syspolicy/policy_keys_test.go | 7 --- 3 files changed, 125 deletions(-) delete mode 100644 util/syspolicy/handler.go diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go deleted file mode 100644 index 690ff2162..000000000 --- a/util/syspolicy/handler.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import ( - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" - "tailscale.com/util/syspolicy/source" -) - -// TODO(nickkhyl): delete this file once other repos are updated. - -// Handler reads system policies from OS-specific storage. -// -// Deprecated: implementing a [source.Store] should be preferred. -type Handler interface { - // ReadString reads the policy setting's string value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadString(key string) (string, error) - // ReadUInt64 reads the policy setting's uint64 value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadUInt64(key string) (uint64, error) - // ReadBool reads the policy setting's boolean value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadBoolean(key string) (bool, error) - // ReadStringArray reads the policy setting's string array value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadStringArray(key string) ([]string, error) -} - -// RegisterHandler wraps and registers the specified handler as the device's -// policy [source.Store] for the program's lifetime. -// -// Deprecated: using [RegisterStore] should be preferred. -func RegisterHandler(h Handler) { - rsop.RegisterStore("DeviceHandler", setting.DeviceScope, handlerStore{h}) -} - -var _ source.Store = handlerStore{} - -// handlerStore is a [source.Store] that calls the underlying [Handler]. -// -// TODO(nickkhyl): remove it when the corp and android repos are updated. -type handlerStore struct { - h Handler -} - -// Lock implements [source.Lockable]. -func (s handlerStore) Lock() error { - if lockable, ok := s.h.(source.Lockable); ok { - return lockable.Lock() - } - return nil -} - -// Unlock implements [source.Lockable]. -func (s handlerStore) Unlock() { - if lockable, ok := s.h.(source.Lockable); ok { - lockable.Unlock() - } -} - -// RegisterChangeCallback implements [source.Changeable]. -func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func(), err error) { - if changeable, ok := s.h.(source.Changeable); ok { - return changeable.RegisterChangeCallback(callback) - } - return func() {}, nil -} - -// ReadString implements [source.Store]. -func (s handlerStore) ReadString(key pkey.Key) (string, error) { - return s.h.ReadString(string(key)) -} - -// ReadUInt64 implements [source.Store]. -func (s handlerStore) ReadUInt64(key pkey.Key) (uint64, error) { - return s.h.ReadUInt64(string(key)) -} - -// ReadBoolean implements [source.Store]. -func (s handlerStore) ReadBoolean(key pkey.Key) (bool, error) { - return s.h.ReadBoolean(string(key)) -} - -// ReadStringArray implements [source.Store]. -func (s handlerStore) ReadStringArray(key pkey.Key) ([]string, error) { - return s.h.ReadStringArray(string(key)) -} - -// Done implements [source.Expirable]. -func (s handlerStore) Done() <-chan struct{} { - if expirable, ok := s.h.(source.Expirable); ok { - return expirable.Done() - } - return nil -} diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index e32d9cdf4..1bbcfe6ca 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -4,7 +4,6 @@ package syspolicy import ( - "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" @@ -78,24 +77,6 @@ func init() { }) } -var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap] - -// WellKnownSettingDefinition returns a well-known, implicit setting definition by its key, -// or an [ErrNoSuchKey] if a policy setting with the specified key does not exist -// among implicit policy definitions. -func WellKnownSettingDefinition(k pkey.Key) (*setting.Definition, error) { - m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) { - return setting.DefinitionMapOf(implicitDefinitions) - }) - if err != nil { - return nil, err - } - if d, ok := m[k]; ok { - return d, nil - } - return nil, ErrNoSuchKey -} - // RegisterWellKnownSettingsForTest registers all implicit setting definitions // for the duration of the test. func RegisterWellKnownSettingsForTest(tb testenv.TB) { diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go index 490353c81..c2b8d5741 100644 --- a/util/syspolicy/policy_keys_test.go +++ b/util/syspolicy/policy_keys_test.go @@ -46,13 +46,6 @@ func TestKnownKeysRegistered(t *testing.T) { } } -func TestNotAWellKnownSetting(t *testing.T) { - d, err := WellKnownSettingDefinition("TestSettingDoesNotExist") - if d != nil || err == nil { - t.Fatalf("got %v, %v; want nil, %v", d, err, ErrNoSuchKey) - } -} - func listStringConsts[T ~string](filename string) (map[string]T, error) { fset := token.NewFileSet() src, err := os.ReadFile(filename) From 624cdd2961ac88ac2c187072dc2cb322d05a653b Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 4 Sep 2025 12:40:55 +0100 Subject: [PATCH 1270/1708] cmd/containerboot: do not reset state on non-existant secret (#17021) This commit modifies containerboot's state reset process to handle the state secret not existing. During other parts of the boot process we gracefully handle the state secret not being created yet, but missed that check within `resetContainerbootState` Fixes https://github.com/tailscale/tailscale/issues/16804 Signed-off-by: David Bond --- cmd/containerboot/kube.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index d4a974e6f..4873ae13f 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -124,10 +124,13 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { // ensure the operator doesn't use stale state when a Pod is first recreated. func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error { existingSecret, err := kc.GetSecret(ctx, kc.stateSecret) - if err != nil { + switch { + case kubeclient.IsNotFoundErr(err): + // In the case that the Secret doesn't exist, we don't have any state to reset and can return early. + return nil + case err != nil: return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err) } - s := &kubeapi.Secret{ Data: map[string][]byte{ kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion), From b034f7cca95476c89394b3419b8fb7b9d7e3534c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 3 Sep 2025 16:06:39 -0700 Subject: [PATCH 1271/1708] ipn/ipnlocal, util/syspolicy: convert last RegisterWellKnownSettingsForTest caller, remove Updates #16998 Change-Id: I735d75129a97a929092e9075107e41cdade18944 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 14 ++-- util/syspolicy/policy_keys.go | 10 --- util/syspolicy/policytest/policytest.go | 93 ++++++++++++++++++++++++- util/syspolicy/syspolicy.go | 11 --- util/syspolicy/syspolicy_test.go | 37 +++++++--- 5 files changed, 129 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 4debcdd8d..7d1c452f3 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -65,7 +65,6 @@ import ( "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policytest" - "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" @@ -6529,12 +6528,13 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - syspolicy.RegisterWellKnownSettingsForTest(t) - store := source.NewTestStoreOf[string](t) - syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store) + var polc policytest.Config + polc.EnableRegisterChangeCallback() sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) lb := newLocalBackendWithSysAndTestControl(t, enableLogging, sys, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + opts.PolicyClient = polc return newClient(tb, opts) }) if tt.initialPrefs != nil { @@ -6551,7 +6551,11 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { nw.watch(0, nil, unexpectedPrefsChange) } - store.SetStrings(tt.stringSettings...) + var batch policytest.Config + for _, ss := range tt.stringSettings { + batch.Set(ss.Key, ss.Value) + } + polc.SetMultiple(batch) nw.check() }) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 1bbcfe6ca..ef2ac430d 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -76,13 +76,3 @@ func init() { return nil }) } - -// RegisterWellKnownSettingsForTest registers all implicit setting definitions -// for the duration of the test. -func RegisterWellKnownSettingsForTest(tb testenv.TB) { - tb.Helper() - err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) - if err != nil { - tb.Fatalf("Failed to register well-known settings: %v", err) - } -} diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go index 7ea0ad91f..e5c1c7856 100644 --- a/util/syspolicy/policytest/policytest.go +++ b/util/syspolicy/policytest/policytest.go @@ -6,8 +6,12 @@ package policytest import ( "fmt" + "maps" + "slices" + "sync" "time" + "tailscale.com/util/set" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" @@ -29,11 +33,85 @@ type Config map[pkey.Key]any var _ policyclient.Client = Config{} +// Set sets key to value. The value should be of the correct type that it will +// be read as later. For PreferenceOption and Visibility, you may also set them +// to 'string' values and they'll be UnmarshalText'ed into their correct value +// at Get time. +// +// As a special case, the value can also be of type error to make the accessors +// return that error value. func (c *Config) Set(key pkey.Key, value any) { if *c == nil { *c = make(map[pkey.Key]any) } (*c)[key] = value + + if w, ok := (*c)[watchersKey].(*watchers); ok && key != watchersKey { + w.mu.Lock() + vals := slices.Collect(maps.Values(w.s)) + w.mu.Unlock() + for _, f := range vals { + f(policyChange(key)) + } + } +} + +// SetMultiple is a batch version of [Config.Set]. It copies the contents of o +// into c and does at most one notification wake-up for the whole batch. +func (c *Config) SetMultiple(o Config) { + if *c == nil { + *c = make(map[pkey.Key]any) + } + + maps.Copy(*c, o) + + if w, ok := (*c)[watchersKey].(*watchers); ok { + w.mu.Lock() + vals := slices.Collect(maps.Values(w.s)) + w.mu.Unlock() + for _, f := range vals { + f(policyChanges(o)) + } + } +} + +type policyChange pkey.Key + +func (pc policyChange) HasChanged(v pkey.Key) bool { return pkey.Key(pc) == v } +func (pc policyChange) HasChangedAnyOf(keys ...pkey.Key) bool { + return slices.Contains(keys, pkey.Key(pc)) +} + +type policyChanges map[pkey.Key]any + +func (pc policyChanges) HasChanged(v pkey.Key) bool { + _, ok := pc[v] + return ok +} +func (pc policyChanges) HasChangedAnyOf(keys ...pkey.Key) bool { + for _, k := range keys { + if pc.HasChanged(k) { + return true + } + } + return false +} + +const watchersKey = "_policytest_watchers" + +type watchers struct { + mu sync.Mutex + s set.HandleSet[func(policyclient.PolicyChange)] +} + +// EnableRegisterChangeCallback makes c support the RegisterChangeCallback +// for testing. Without calling this, the RegisterChangeCallback does nothing. +// For watchers to be notified, use the [Config.Set] method. Changing the map +// directly obviously wouldn't work. +func (c *Config) EnableRegisterChangeCallback() { + if _, ok := (*c)[watchersKey]; !ok { + c.Set(watchersKey, new(watchers)) + } } func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { @@ -153,8 +231,19 @@ func (c Config) HasAnyOf(keys ...pkey.Key) (bool, error) { return false, nil } -func (sp Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { - return func() {}, nil +func (c Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { + w, ok := c[watchersKey].(*watchers) + if !ok { + return func() {}, nil + } + w.mu.Lock() + defer w.mu.Unlock() + h := w.s.Add(callback) + return func() { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.s, h) + }, nil } func (sp Config) SetDebugLoggingEnabled(enabled bool) {} diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 2367e21eb..48e430b67 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -19,7 +19,6 @@ import ( "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" - "tailscale.com/util/testenv" ) var ( @@ -45,16 +44,6 @@ func RegisterStore(name string, scope setting.PolicyScope, store source.Store) ( return rsop.RegisterStore(name, scope, store) } -// MustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. -func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { - tb.Helper() - reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) - if err != nil { - tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) - } - return reg -} - // hasAnyOf returns whether at least one of the specified policy settings is configured, // or an error if no keys are provided or the check fails. func hasAnyOf(keys ...pkey.Key) (bool, error) { diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 0ee62efb1..10f8da486 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" + "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -21,6 +22,16 @@ import ( var someOtherError = errors.New("error other than not found") +// registerWellKnownSettingsForTest registers all implicit setting definitions +// for the duration of the test. +func registerWellKnownSettingsForTest(tb testenv.TB) { + tb.Helper() + err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) + if err != nil { + tb.Fatalf("Failed to register well-known settings: %v", err) + } +} + func TestGetString(t *testing.T) { tests := []struct { name string @@ -68,7 +79,7 @@ func TestGetString(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -210,7 +221,7 @@ func TestGetBoolean(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -303,7 +314,7 @@ func TestGetPreferenceOption(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -388,7 +399,7 @@ func TestGetVisibility(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -484,7 +495,7 @@ func TestGetDuration(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -565,7 +576,7 @@ func TestGetStringArray(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -599,14 +610,24 @@ func TestGetStringArray(t *testing.T) { } } +// mustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. +func mustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { + tb.Helper() + reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) + if err != nil { + tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) + } + return reg +} + func registerSingleSettingStoreForTest[T source.TestValueType](tb testenv.TB, s source.TestSetting[T]) { policyStore := source.NewTestStoreOf(tb, s) - MustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) + mustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) } func BenchmarkGetString(b *testing.B) { loggerx.SetForTest(b, logger.Discard, logger.Discard) - RegisterWellKnownSettingsForTest(b) + registerWellKnownSettingsForTest(b) wantControlURL := "https://login.tailscale.com" registerSingleSettingStoreForTest(b, source.TestSettingOf(pkey.ControlURL, wantControlURL)) From 46369f06af2729b2e553433aef16c821670c2455 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 21:41:06 -0700 Subject: [PATCH 1272/1708] util/syspolicy/policyclient: always use no-op policyclient in tests by default We should never use the real syspolicy implementation in tests by default. (the machine's configuration shouldn't affect tests) You either specify a test policy, or you get a no-op one. Updates #16998 Change-Id: I3350d392aad11573a5ad7caab919bb3bbaecb225 Signed-off-by: Brad Fitzpatrick --- util/syspolicy/policyclient/policyclient.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index 5a7842448..728a16718 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -11,6 +11,7 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" + "tailscale.com/util/testenv" ) // Client is the interface between code making questions about the system policy @@ -68,8 +69,15 @@ type Client interface { // Get returns a non-nil [Client] implementation as a function of the // build tags. It returns a no-op implementation if the full syspolicy -// package is omitted from the build. +// package is omitted from the build, or in tests. func Get() Client { + if testenv.InTest() { + // This is a little redundant (the Windows implementation at least + // already does this) but it's here for redundancy and clarity, that we + // don't want to accidentally use the real system policy when running + // tests. + return NoPolicyClient{} + } return client } From 046b8830c76b29f04fc95f3880e6abe41eeb16e7 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 5 Sep 2025 14:52:44 +0100 Subject: [PATCH 1273/1708] ipn/ipnlocal: add state change test for key expiry Updates tailscale/corp#31478 Signed-off-by: James Sanderson --- ipn/ipnlocal/state_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index c29589acc..4097a3773 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1349,6 +1349,21 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Hosts: hostsFor(node3), }, }, + { + name: "Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node3) + cc().send(nil, "", false, &netmap.NetworkMap{ + Expiry: time.Now().Add(-time.Minute), + }) + }, + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, } for _, tt := range tests { From 23297da10d180a4b30b1a6db9e131e463b447813 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Fri, 5 Sep 2025 15:56:23 -0400 Subject: [PATCH 1274/1708] cmd/tailscale/cli: add new line for set --webclient (#17043) Fixes #17042 Signed-off-by: Mike O'Driscoll --- cmd/tailscale/cli/set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index f1b21995e..d265090e2 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -264,7 +264,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.runWebClient && len(st.TailscaleIPs) > 0 { - printf("\nWeb interface now running at %s:%d", st.TailscaleIPs[0], web.ListenPort) + printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], web.ListenPort) } return nil From a29545e9cc6a71439741836ea9ba0e8cbfbc7134 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 5 Sep 2025 17:58:36 -0700 Subject: [PATCH 1275/1708] wgengine/magicsock: log the peer failing disco writes are intended for Updates tailscale/corp#31762 Signed-off-by: James Tucker --- wgengine/magicsock/magicsock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a11e8a1cd..695039ea6 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2026,7 +2026,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. // Can't send. (e.g. no IPv6 locally) } else { if !c.networkDown() && pmtuShouldLogDiscoTxErr(m, err) { - c.logf("magicsock: disco: failed to send %v to %v: %v", disco.MessageSummary(m), dst, err) + c.logf("magicsock: disco: failed to send %v to %v %s: %v", disco.MessageSummary(m), dst, dstKey.ShortString(), err) } } return sent, err From ed6aa50bd549bdc5e79dcf0326c358f40e9aced2 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Sat, 6 Sep 2025 09:28:07 +0100 Subject: [PATCH 1276/1708] prober: include current probe results in run-probe text response It was a bit confusing that provided history did not include the current probe results. Updates tailscale/corp#20583 Signed-off-by: Anton Tolchanov --- prober/prober.go | 6 +++--- prober/prober_test.go | 12 +++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/prober/prober.go b/prober/prober.go index 9c494c3c9..af0e19934 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -570,9 +570,9 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { return nil } - stats := fmt.Sprintf("Last %d probes: success rate %d%%, median latency %v\n", - len(prevInfo.RecentResults), - int(prevInfo.RecentSuccessRatio()*100), prevInfo.RecentMedianLatency()) + stats := fmt.Sprintf("Last %d probes (including this one): success rate %d%%, median latency %v\n", + len(info.RecentResults), + int(info.RecentSuccessRatio()*100), info.RecentMedianLatency()) if err != nil { return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err) } diff --git a/prober/prober_test.go b/prober/prober_test.go index 15db21a5e..1e045fa89 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -12,6 +12,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "regexp" "strings" "sync" "sync/atomic" @@ -546,7 +547,7 @@ func TestProberRunHandler(t *testing.T) { probeFunc func(context.Context) error wantResponseCode int wantJSONResponse RunHandlerResponse - wantPlaintextResponse string + wantPlaintextResponse *regexp.Regexp }{ { name: "success", @@ -561,7 +562,7 @@ func TestProberRunHandler(t *testing.T) { }, PreviousSuccessRatio: 1, }, - wantPlaintextResponse: "Probe succeeded", + wantPlaintextResponse: regexp.MustCompile("(?s)Probe succeeded .*Last 2 probes.*success rate 100%"), }, { name: "failure", @@ -576,7 +577,7 @@ func TestProberRunHandler(t *testing.T) { RecentResults: []bool{false, false}, }, }, - wantPlaintextResponse: "Probe failed", + wantPlaintextResponse: regexp.MustCompile("(?s)Probe failed: .*Last 2 probes.*success rate 0%"), }, } @@ -607,6 +608,7 @@ func TestProberRunHandler(t *testing.T) { if err != nil { t.Fatalf("failed to make request: %v", err) } + defer resp.Body.Close() if resp.StatusCode != tt.wantResponseCode { t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tt.wantResponseCode) @@ -630,8 +632,8 @@ func TestProberRunHandler(t *testing.T) { } } else { body, _ := io.ReadAll(resp.Body) - if !strings.Contains(string(body), tt.wantPlaintextResponse) { - t.Errorf("unexpected response body: got %q, want to contain %q", body, tt.wantPlaintextResponse) + if !tt.wantPlaintextResponse.MatchString(string(body)) { + t.Errorf("unexpected response body: got %q, want to match %q", body, tt.wantPlaintextResponse) } } }) From ff8900583ceb1876a8453c6d2c03dbd2985a2857 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Sep 2025 15:25:50 +0100 Subject: [PATCH 1277/1708] cmd/tailscale/cli: fix the spelling of "routes" (#17039) Updates #cleanup Signed-off-by: Alex Chan --- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/up.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index d265090e2..a1c6987e8 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -185,7 +185,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } } - warnOnAdvertiseRouts(ctx, &maskedPrefs.Prefs) + warnOnAdvertiseRoutes(ctx, &maskedPrefs.Prefs) if err := checkExitNodeRisk(ctx, &maskedPrefs.Prefs, setArgs.acceptedRisks); err != nil { return err } diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index ebbe3b19e..097af725b 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -486,7 +486,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE fatalf("%s", err) } - warnOnAdvertiseRouts(ctx, prefs) + warnOnAdvertiseRoutes(ctx, prefs) if err := checkExitNodeRisk(ctx, prefs, upArgs.acceptedRisks); err != nil { return err } @@ -1184,7 +1184,7 @@ func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { return authkey, nil } -func warnOnAdvertiseRouts(ctx context.Context, prefs *ipn.Prefs) { +func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding // into a single HTTP req. From 14adf5b71783d039c1a8a978eea5ce75f081144f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Sep 2025 15:27:24 +0100 Subject: [PATCH 1278/1708] utils/expvarx, tstest/integration: mark two tests as known flaky (#17052) * utils/expvarx: mark TestSafeFuncHappyPath as known flaky Updates #15348 Signed-off-by: Alex Chan * tstest/integration: mark TestCollectPanic as known flaky Updates #15865 Signed-off-by: Alex Chan --------- Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 1 + util/expvarx/expvarx_test.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 7cb251f31..de464108c 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -170,6 +170,7 @@ func TestControlKnobs(t *testing.T) { } func TestCollectPanic(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15865") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) diff --git a/util/expvarx/expvarx_test.go b/util/expvarx/expvarx_test.go index 74ec152f4..50131dfb3 100644 --- a/util/expvarx/expvarx_test.go +++ b/util/expvarx/expvarx_test.go @@ -10,6 +10,8 @@ import ( "sync/atomic" "testing" "time" + + "tailscale.com/cmd/testwrapper/flakytest" ) func ExampleNewSafeFunc() { @@ -52,6 +54,7 @@ func ExampleNewSafeFunc() { } func TestSafeFuncHappyPath(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15348") var count int f := NewSafeFunc(expvar.Func(func() any { count++ From 1cb855fb3682c9c1f0052bfe298d058fe76a0b03 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 08:46:53 -0700 Subject: [PATCH 1279/1708] util/expvarx: deflake TestSafeFuncHappyPath with synctest I probably could've deflaked this without synctest, but might as well use it now that Go 1.25 has it. Fixes #15348 Change-Id: I81c9253fcb7eada079f3e943ab5f1e29ba8e8e31 Signed-off-by: Brad Fitzpatrick --- util/expvarx/expvarx_test.go | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/util/expvarx/expvarx_test.go b/util/expvarx/expvarx_test.go index 50131dfb3..9ed2e8f20 100644 --- a/util/expvarx/expvarx_test.go +++ b/util/expvarx/expvarx_test.go @@ -9,9 +9,8 @@ import ( "sync" "sync/atomic" "testing" + "testing/synctest" "time" - - "tailscale.com/cmd/testwrapper/flakytest" ) func ExampleNewSafeFunc() { @@ -54,19 +53,21 @@ func ExampleNewSafeFunc() { } func TestSafeFuncHappyPath(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15348") - var count int - f := NewSafeFunc(expvar.Func(func() any { - count++ - return count - }), time.Millisecond, nil) - - if got, want := f.Value(), 1; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := f.Value(), 2; got != want { - t.Errorf("got %v, want %v", got, want) - } + synctest.Test(t, func(t *testing.T) { + var count int + f := NewSafeFunc(expvar.Func(func() any { + count++ + return count + }), time.Second, nil) + + if got, want := f.Value(), 1; got != want { + t.Errorf("got %v, want %v", got, want) + } + time.Sleep(5 * time.Second) // (fake time in synctest) + if got, want := f.Value(), 2; got != want { + t.Errorf("got %v, want %v", got, want) + } + }) } func TestSafeFuncSlow(t *testing.T) { From 71cb6d4cbd8758197a82449ffa86b3288a35d29c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Sep 2025 17:51:59 +0100 Subject: [PATCH 1280/1708] cmd/tailscale/cli, derp: use client/local instead of deprecated client/tailscale (#17061) * cmd/tailscale/cli: use client/local instead of deprecated client/tailscale Updates tailscale/corp#22748 Signed-off-by: Alex Chan * derp: use client/local instead of deprecated client/tailscale Updates tailscale/corp#22748 Signed-off-by: Alex Chan --------- Signed-off-by: Alex Chan --- cmd/derper/depaware.txt | 8 +++----- cmd/tailscale/cli/bugreport.go | 4 ++-- cmd/tailscale/cli/cli.go | 5 ++--- cmd/tailscale/cli/debug.go | 4 ++-- cmd/tailscale/cli/ping.go | 4 ++-- cmd/tailscale/cli/serve_legacy.go | 4 ++-- cmd/tailscale/cli/serve_legacy_test.go | 4 ++-- cmd/tailscale/cli/serve_v2.go | 6 +++--- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 9 ++++----- cmd/tsidp/depaware.txt | 2 +- derp/derp_server.go | 3 +-- tsnet/depaware.txt | 2 +- 13 files changed, 26 insertions(+), 31 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 52b82b228..8adb2d338 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -89,9 +89,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/local from tailscale.com/derp + tailscale.com/client/tailscale/apitype from tailscale.com/client/local tailscale.com/derp from tailscale.com/cmd/derper+ tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper @@ -142,7 +141,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/envknob+ tailscale.com/types/persist from tailscale.com/ipn tailscale.com/types/preftype from tailscale.com/ipn tailscale.com/types/ptr from tailscale.com/hostinfo+ @@ -160,7 +159,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ diff --git a/cmd/tailscale/cli/bugreport.go b/cmd/tailscale/cli/bugreport.go index d671f3df6..50e6ffd82 100644 --- a/cmd/tailscale/cli/bugreport.go +++ b/cmd/tailscale/cli/bugreport.go @@ -10,7 +10,7 @@ import ( "fmt" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" ) var bugReportCmd = &ffcli.Command{ @@ -40,7 +40,7 @@ func runBugReport(ctx context.Context, args []string) error { default: return errors.New("unknown arguments") } - opts := tailscale.BugReportOpts{ + opts := local.BugReportOpts{ Note: note, Diagnose: bugReportArgs.diagnose, } diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 5db030888..42f1cb3a5 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -23,7 +23,6 @@ import ( "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" "tailscale.com/paths" @@ -113,7 +112,7 @@ func Run(args []string) (err error) { } var warnOnce sync.Once - tailscale.SetVersionMismatchHandler(func(clientVer, serverVer string) { + local.SetVersionMismatchHandler(func(clientVer, serverVer string) { warnOnce.Do(func() { fmt.Fprintf(Stderr, "Warning: client version %q != tailscaled server version %q\n", clientVer, serverVer) }) @@ -164,7 +163,7 @@ func Run(args []string) (err error) { } err = rootCmd.Run(context.Background()) - if tailscale.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { + if local.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { return fmt.Errorf("%v\n\nUse 'sudo tailscale %s'.\nTo not require root, use 'sudo tailscale set --operator=$USER' once.", err, strings.Join(args, " ")) } if errors.Is(err, flag.ErrHelp) { diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 6fe15b238..4960aeec2 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -30,7 +30,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" "tailscale.com/hostinfo" @@ -1219,7 +1219,7 @@ var debugPortmapArgs struct { } func debugPortmap(ctx context.Context, args []string) error { - opts := &tailscale.DebugPortmapOpts{ + opts := &local.DebugPortmapOpts{ Duration: debugPortmapArgs.duration, Type: debugPortmapArgs.ty, LogHTTP: debugPortmapArgs.logHTTP, diff --git a/cmd/tailscale/cli/ping.go b/cmd/tailscale/cli/ping.go index d438cb228..8ece7c93d 100644 --- a/cmd/tailscale/cli/ping.go +++ b/cmd/tailscale/cli/ping.go @@ -16,7 +16,7 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -128,7 +128,7 @@ func runPing(ctx context.Context, args []string) error { for { n++ ctx, cancel := context.WithTimeout(ctx, pingArgs.timeout) - pr, err := localClient.PingWithOpts(ctx, netip.MustParseAddr(ip), pingType(), tailscale.PingOpts{Size: pingArgs.size}) + pr, err := localClient.PingWithOpts(ctx, netip.MustParseAddr(ip), pingType(), local.PingOpts{Size: pingArgs.size}) cancel() if err != nil { if errors.Is(err, context.DeadlineExceeded) { diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 1a05d0543..3fbddeabf 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -139,7 +139,7 @@ type localServeClient interface { GetServeConfig(context.Context) (*ipn.ServeConfig, error) SetServeConfig(context.Context, *ipn.ServeConfig) error QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) - WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) + WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, error) IncrementCounter(ctx context.Context, name string, delta int) error GetPrefs(ctx context.Context) (*ipn.Prefs, error) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 1ea76e72c..c509508df 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -925,7 +925,7 @@ func (lc *fakeLocalServeClient) QueryFeature(ctx context.Context, feature string return &tailcfg.QueryFeatureResponse{Complete: true}, nil // fallback to already enabled } -func (lc *fakeLocalServeClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) { +func (lc *fakeLocalServeClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, error) { return nil, nil // unused in tests } diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index acefd881f..903036db4 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -365,7 +365,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } - var watcher *tailscale.IPNBusWatcher + var watcher *local.IPNBusWatcher svcName := noService if forService { @@ -426,7 +426,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } if err := e.lc.SetServeConfig(ctx, parentSC); err != nil { - if tailscale.IsPreconditionsFailedError(err) { + if local.IsPreconditionsFailedError(err) { fmt.Fprintln(e.stderr(), "Another client is changing the serve config; please try again.") } return err diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 445320636..a983f1c09 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,7 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli - tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3d9368143..3ca570772 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -244,9 +244,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate @@ -388,7 +387,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ @@ -410,7 +409,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httphdr from tailscale.com/feature/taildrop - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/control/controlclient+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index efe9456d8..38d2c76c0 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -218,7 +218,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/tailscale from tailscale.com/tsnet tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/derp/derp_server.go b/derp/derp_server.go index bd67e7eec..f0c635a5a 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -38,7 +38,6 @@ import ( "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" @@ -1384,7 +1383,7 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf // tailscaled-based verification: if s.verifyClientsLocalTailscaled { _, err := s.localClient.WhoIsNodeKey(ctx, clientKey) - if err == tailscale.ErrPeerNotFound { + if err == local.ErrPeerNotFound { return fmt.Errorf("peer %v not authorized (not found in local tailscaled)", clientKey) } if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 187237e2f..ed61de531 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -214,7 +214,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/tailscale from tailscale.com/tsnet tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ From 2da52dce7aba5150b7b9e637b9fb0d7307fed916 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 8 Sep 2025 15:02:43 +0000 Subject: [PATCH 1281/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 10 +++++----- licenses/windows.md | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 81359b270..91ba96698 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -68,13 +68,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 5c000cc9f..aff149d4d 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -72,18 +72,18 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.6/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From 6f9f190f4d8655a1699c8424a7e0f7860349023d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 11:23:32 -0700 Subject: [PATCH 1282/1708] go.toolchain.rev: bump to Go 1.25.1 Updates #17064 Change-Id: Ibbca837e0921fe9f82fc931dde8bb51b017e4e48 Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.toolchain.rev | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e6c480494..6883d2552 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.0 +go 1.25.1 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 9c2417e7c..1fd4f3df2 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -f3339c88ea24212cc3cd49b64ad1045b85db23bf +aa85d1541af0921f830f053f29d91971fa5838f6 From ffc82ad82014b03533e7214a2b259e62801d2191 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 09:33:39 -0700 Subject: [PATCH 1283/1708] util/eventbus: add ts_omit_debugeventbus Updates #17063 Change-Id: Ibc98dd2088f82c829effa71f72f3e2a5abda5038 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- util/eventbus/debughttp.go | 2 +- util/eventbus/debughttp_off.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/build_dist.sh b/build_dist.sh index 12f366e06..9514d53b6 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index a94eaa9cf..617502b93 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_debugeventbus package eventbus diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 85330579c..7d9fb327c 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android +//go:build ios || android || ts_omit_debugeventbus package eventbus From 3e4b0c1516819ea47a90189a4f116a2e44b97e39 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 12:09:29 -0700 Subject: [PATCH 1284/1708] cmd/tailscale, ipn/ipnlocal: add ts_omit_webclient Fixes #17063 Updates #12614 Change-Id: I0a189f6a4d1c4558351e3195839867725774fa96 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/tailscale/cli/cli.go | 3 ++- cmd/tailscale/cli/set.go | 4 ++-- cmd/tailscale/cli/web.go | 41 ++++++++++++++++++++------------- cmd/tailscaled/deps_test.go | 20 ++++++++++++++++ ipn/ipnlocal/web_client.go | 5 ++-- ipn/ipnlocal/web_client_stub.go | 2 +- tsconst/webclient.go | 9 ++++++++ 8 files changed, 63 insertions(+), 23 deletions(-) create mode 100644 tsconst/webclient.go diff --git a/build_dist.sh b/build_dist.sh index 9514d53b6..57231eb70 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus,ts_omit_webclient" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 42f1cb3a5..46aa29c71 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -209,6 +209,7 @@ func noDupFlagify(c *ffcli.Command) { var fileCmd func() *ffcli.Command var sysPolicyCmd func() *ffcli.Command +var maybeWebCmd func() *ffcli.Command func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -251,7 +252,7 @@ change in the future. funnelCmd(), serveCmd(), versionCmd, - webCmd, + nilOrCall(maybeWebCmd), nilOrCall(fileCmd), bugReportCmd, certCmd, diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index a1c6987e8..c0ce0b1c1 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -15,13 +15,13 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/web" "tailscale.com/clientupdate" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/net/tsaddr" "tailscale.com/safesocket" + "tailscale.com/tsconst" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/types/views" @@ -264,7 +264,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.runWebClient && len(st.TailscaleIPs) > 0 { - printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], web.ListenPort) + printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], tsconst.WebListenPort) } return nil diff --git a/cmd/tailscale/cli/web.go b/cmd/tailscale/cli/web.go index 5e1821dd0..2713f730b 100644 --- a/cmd/tailscale/cli/web.go +++ b/cmd/tailscale/cli/web.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_webclient + package cli import ( @@ -22,14 +24,20 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/web" "tailscale.com/ipn" + "tailscale.com/tsconst" ) -var webCmd = &ffcli.Command{ - Name: "web", - ShortUsage: "tailscale web [flags]", - ShortHelp: "Run a web server for controlling Tailscale", +func init() { + maybeWebCmd = webCmd +} + +func webCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "web", + ShortUsage: "tailscale web [flags]", + ShortHelp: "Run a web server for controlling Tailscale", - LongHelp: strings.TrimSpace(` + LongHelp: strings.TrimSpace(` "tailscale web" runs a webserver for controlling the Tailscale daemon. It's primarily intended for use on Synology, QNAP, and other @@ -37,16 +45,17 @@ NAS devices where a web interface is the natural place to control Tailscale, as opposed to a CLI or a native app. `), - FlagSet: (func() *flag.FlagSet { - webf := newFlagSet("web") - webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic") - webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") - webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") - webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") - webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") - return webf - })(), - Exec: runWeb, + FlagSet: (func() *flag.FlagSet { + webf := newFlagSet("web") + webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic") + webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") + webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") + webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") + webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") + return webf + })(), + Exec: runWeb, + } } var webArgs struct { @@ -101,7 +110,7 @@ func runWeb(ctx context.Context, args []string) error { var startedManagementClient bool // we started the management client if !existingWebClient && !webArgs.readonly { // Also start full client in tailscaled. - log.Printf("starting tailscaled web client at http://%s\n", netip.AddrPortFrom(selfIP, web.ListenPort)) + log.Printf("starting tailscaled web client at http://%s\n", netip.AddrPortFrom(selfIP, tsconst.WebListenPort)) if err := setRunWebClient(ctx, true); err != nil { return fmt.Errorf("starting web client in tailscaled: %w", err) } diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 6d2ea3837..a672e32e2 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -4,6 +4,7 @@ package main import ( + "strings" "testing" "tailscale.com/tstest/deptest" @@ -41,3 +42,22 @@ func TestOmitSyspolicy(t *testing.T) { }, }.Check(t) } + +// Test that we can build a binary without reflect.MethodByName. +// See https://github.com/tailscale/tailscale/issues/17063 +func TestOmitReflectThings(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_include_cli,ts_omit_systray,ts_omit_debugeventbus,ts_omit_webclient", + BadDeps: map[string]string{ + "text/template": "unexpected text/template usage", + "html/template": "unexpected text/template usage", + }, + OnDep: func(dep string) { + if strings.Contains(dep, "systray") { + t.Errorf("unexpected systray dep %q", dep) + } + }, + }.Check(t) +} diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 18145d1bb..7cfb30ca4 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_webclient package ipnlocal @@ -22,11 +22,12 @@ import ( "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/types/logger" "tailscale.com/util/mak" ) -const webClientPort = web.ListenPort +const webClientPort = tsconst.WebListenPort // webClient holds state for the web interface for managing this // tailscale instance. The web interface is not used by default, diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 31735de25..5f37560cc 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android +//go:build ios || android || ts_omit_webclient package ipnlocal diff --git a/tsconst/webclient.go b/tsconst/webclient.go new file mode 100644 index 000000000..d4b3c8db5 --- /dev/null +++ b/tsconst/webclient.go @@ -0,0 +1,9 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// WebListenPort is the static port used for the web client when run inside +// tailscaled. (5252 are the numbers above the letters "TSTS" on a qwerty +// keyboard.) +const WebListenPort = 5252 From f4ae81e015c32918e1198f0f2e2b0dd6332d4c99 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 9 Sep 2025 14:12:08 +0100 Subject: [PATCH 1285/1708] tsnet: remove APIClient() which is deprecated and now unused (#17073) Updates tailscale/corp#22748 Signed-off-by: Alex Chan --- cmd/tsidp/depaware.txt | 7 +++---- tsnet/depaware.txt | 7 +++---- tsnet/tsnet.go | 20 -------------------- 3 files changed, 6 insertions(+), 28 deletions(-) diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 38d2c76c0..cfe44d1dc 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -217,8 +217,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/tsnet + tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -343,7 +342,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/cmd/tsidp+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ @@ -364,7 +363,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ed61de531..74f3f8c53 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -213,8 +213,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/tsnet + tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -338,7 +337,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ @@ -359,7 +358,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 4cb977c73..359fbc1c5 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -27,7 +27,6 @@ import ( "time" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/health" @@ -910,25 +909,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// APIClient returns a tailscale.Client that can be used to make authenticated -// requests to the Tailscale control server. -// It requires the user to set tailscale.I_Acknowledge_This_API_Is_Unstable. -// -// Deprecated: use AuthenticatedAPITransport with tailscale.com/client/tailscale/v2 instead. -func (s *Server) APIClient() (*tailscale.Client, error) { - if !tailscale.I_Acknowledge_This_API_Is_Unstable { - return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") - } - if err := s.Start(); err != nil { - return nil, err - } - - c := tailscale.NewClient("-", nil) - c.UserAgent = "tailscale-tsnet" - c.HTTPClient = &http.Client{Transport: s.lb.KeyProvingNoiseRoundTripper()} - return c, nil -} - // I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() // for now. var I_Acknowledge_This_API_Is_Experimental = false From f1ded844540f66c1a426fa54700ee626a0f9e658 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 9 Sep 2025 07:36:55 -0700 Subject: [PATCH 1286/1708] cmd/tailscaled: add disabled debug file to force reflect for binary size experiments This adds a file that's not compiled by default that exists just to make it easier to do binary size checks, probing what a binary would be like if it included reflect methods (as used by html/template, etc). As an example, once tailscaled uses reflect.Type.MethodByName(non-const-string) anywhere, the build jumps up by 14.5 MB: $ GOOS=linux GOARCH=amd64 ./tool/go build -tags=ts_include_cli,ts_omit_webclient,ts_omit_systray,ts_omit_debugeventbus -o before ./cmd/tailscaled $ GOOS=linux GOARCH=amd64 ./tool/go build -tags=ts_include_cli,ts_omit_webclient,ts_omit_systray,ts_omit_debugeventbus,ts_debug_forcereflect -o after ./cmd/tailscaled $ ls -l before after -rwxr-xr-x@ 1 bradfitz staff 41011861 Sep 9 07:28 before -rwxr-xr-x@ 1 bradfitz staff 55610948 Sep 9 07:29 after This is particularly pronounced with large deps like the AWS SDK. If you compare using ts_omit_aws: -rwxr-xr-x@ 1 bradfitz staff 38284771 Sep 9 07:40 no-aws-no-reflect -rwxr-xr-x@ 1 bradfitz staff 45546491 Sep 9 07:41 no-aws-with-reflect That means adding AWS to a non-reflect binary adds 2.7 MB but adding AWS to a reflect binary adds 10 MB. Updates #17063 Updates #12614 Change-Id: I18e9b77c9cf33565ce5bba65ac5584fa9433f7fb Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/debug_forcereflect.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 cmd/tailscaled/debug_forcereflect.go diff --git a/cmd/tailscaled/debug_forcereflect.go b/cmd/tailscaled/debug_forcereflect.go new file mode 100644 index 000000000..7378753ce --- /dev/null +++ b/cmd/tailscaled/debug_forcereflect.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_debug_forcereflect + +// This file exists for benchmarking binary sizes. When the build tag is +// enabled, it forces use of part of the reflect package that makes the Go +// linker go into conservative retention mode where its deadcode pass can't +// eliminate exported method. + +package main + +import ( + "reflect" + "time" +) + +func init() { + // See Go's src/cmd/compile/internal/walk/expr.go:usemethod for + // why this is isn't a const. + name := []byte("Bar") + if time.Now().Unix()&1 == 0 { + name[0] = 'X' + } + _, _ = reflect.TypeOf(12).MethodByName(string(name)) +} From 77250a301aee83d67c1bbe497391500f7c70e7b4 Mon Sep 17 00:00:00 2001 From: Nick O'Neill Date: Tue, 9 Sep 2025 09:03:01 -0700 Subject: [PATCH 1287/1708] ipn/ipnlocal, types: plumb tailnet display name cap through to network profile (#17045) Updates tailscale/corp#30456 Signed-off-by: Nick O'Neill --- ipn/ipnlocal/local.go | 8 ++++++++ ipn/ipnlocal/node_backend.go | 1 + ipn/prefs.go | 1 + types/netmap/netmap.go | 16 ++++++++++++++++ 4 files changed, 26 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7592e9b4b..2d917ae54 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1650,12 +1650,18 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } + // If the tailnet's display name has changed, update prefs. + if st.NetMap != nil && st.NetMap.TailnetDisplayName() != b.pm.CurrentProfile().NetworkProfile().DisplayName { + prefsChanged = true + } + // Perform all mutations of prefs based on the netmap here. if prefsChanged { // Prefs will be written out if stale; this is not safe unless locked or cloned. if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: curNetMap.MagicDNSSuffix(), DomainName: curNetMap.DomainName(), + DisplayName: curNetMap.TailnetDisplayName(), }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } @@ -1716,6 +1722,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control if err := b.pm.SetPrefs(p, ipn.NetworkProfile{ MagicDNSName: st.NetMap.MagicDNSSuffix(), DomainName: st.NetMap.DomainName(), + DisplayName: st.NetMap.TailnetDisplayName(), }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } @@ -6185,6 +6192,7 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: nm.MagicDNSSuffix(), DomainName: nm.DomainName(), + DisplayName: nm.TailnetDisplayName(), }); err != nil { b.logf("failed to save exit node changes: %v", err) } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index a3889b643..4319ed372 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -168,6 +168,7 @@ func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile { // These are ok to call with nil netMap. MagicDNSName: nb.netMap.MagicDNSSuffix(), DomainName: nb.netMap.DomainName(), + DisplayName: nb.netMap.TailnetDisplayName(), } } diff --git a/ipn/prefs.go b/ipn/prefs.go index 88c73ead3..7c3c50f73 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -988,6 +988,7 @@ type WindowsUserID string type NetworkProfile struct { MagicDNSName string DomainName string + DisplayName string } // RequiresBackfill returns whether this object does not have all the data diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 963f80a44..cc6bec1db 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -252,6 +252,22 @@ func (nm *NetworkMap) DomainName() string { return nm.Domain } +// TailnetDisplayName returns the admin-editable name contained in +// NodeAttrTailnetDisplayName. If the capability is not present it +// returns an empty string. +func (nm *NetworkMap) TailnetDisplayName() string { + if nm == nil || !nm.SelfNode.Valid() { + return "" + } + + tailnetDisplayNames, err := tailcfg.UnmarshalNodeCapViewJSON[string](nm.SelfNode.CapMap(), tailcfg.NodeAttrTailnetDisplayName) + if err != nil || len(tailnetDisplayNames) == 0 { + return "" + } + + return tailnetDisplayNames[0] +} + // HasSelfCapability reports whether nm.SelfNode contains capability c. // // It exists to satisify an unused (as of 2025-01-04) interface in the logknob package. From 88d7db33dab4bab8a0ae7beb3838b82898488a87 Mon Sep 17 00:00:00 2001 From: nikiUppal-TS Date: Tue, 9 Sep 2025 16:02:56 -0500 Subject: [PATCH 1288/1708] cmd/tailscale: use tailnet display name on cli (#17079) Updates cli to use tailnet display name Updates tailscale/corp#32108 Signed-off-by: nikiUppal-TS --- cmd/tailscale/cli/switch.go | 18 +++++++++++++----- ipn/prefs.go | 8 ++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index af8b51326..0677da1b3 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -24,7 +24,7 @@ var switchCmd = &ffcli.Command{ LongHelp: `"tailscale switch" switches between logged in accounts. You can use the ID that's returned from 'tailnet switch -list' to pick which profile you want to switch to. Alternatively, you -can use the Tailnet or the account names to switch as well. +can use the Tailnet, account names, or display names to switch as well. This command is currently in alpha and may change in the future.`, @@ -46,7 +46,7 @@ func init() { seen := make(map[string]bool, 3*len(all)) wordfns := []func(prof ipn.LoginProfile) string{ func(prof ipn.LoginProfile) string { return string(prof.ID) }, - func(prof ipn.LoginProfile) string { return prof.NetworkProfile.DomainName }, + func(prof ipn.LoginProfile) string { return prof.NetworkProfile.DisplayNameOrDefault() }, func(prof ipn.LoginProfile) string { return prof.Name }, } @@ -57,7 +57,7 @@ func init() { continue } seen[word] = true - words = append(words, fmt.Sprintf("%s\tid: %s, tailnet: %s, account: %s", word, prof.ID, prof.NetworkProfile.DomainName, prof.Name)) + words = append(words, fmt.Sprintf("%s\tid: %s, tailnet: %s, account: %s", word, prof.ID, prof.NetworkProfile.DisplayNameOrDefault(), prof.Name)) } } return words, ffcomplete.ShellCompDirectiveNoFileComp, nil @@ -86,7 +86,7 @@ func listProfiles(ctx context.Context) error { } printRow( string(prof.ID), - prof.NetworkProfile.DomainName, + prof.NetworkProfile.DisplayNameOrDefault(), name, ) } @@ -107,7 +107,7 @@ func switchProfile(ctx context.Context, args []string) error { os.Exit(1) } var profID ipn.ProfileID - // Allow matching by ID, Tailnet, or Account + // Allow matching by ID, Tailnet, Account, or Display Name // in that order. for _, p := range all { if p.ID == ipn.ProfileID(args[0]) { @@ -131,6 +131,14 @@ func switchProfile(ctx context.Context, args []string) error { } } } + if profID == "" { + for _, p := range all { + if p.NetworkProfile.DisplayName == args[0] { + profID = p.ID + break + } + } + } if profID == "" { errf("No profile named %q\n", args[0]) os.Exit(1) diff --git a/ipn/prefs.go b/ipn/prefs.go index 7c3c50f73..1efb5d0fe 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -5,6 +5,7 @@ package ipn import ( "bytes" + "cmp" "encoding/json" "errors" "fmt" @@ -1001,6 +1002,13 @@ func (n NetworkProfile) RequiresBackfill() bool { return n == NetworkProfile{} } +// DisplayNameOrDefault will always return a non-empty string. +// If there is a defined display name, it will return that. +// If they did not it will default to their domain name. +func (n NetworkProfile) DisplayNameOrDefault() string { + return cmp.Or(n.DisplayName, n.DomainName) +} + // LoginProfile represents a single login profile as managed // by the ProfileManager. type LoginProfile struct { From 09bfee2e06418c48397c988f1bb6d535186b67ca Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 9 Sep 2025 14:54:22 -0700 Subject: [PATCH 1289/1708] disco: add missing message types to MessageSummary (#17081) Updates tailscale/corp#30818 Signed-off-by: Jordan Whited --- disco/disco.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/disco/disco.go b/disco/disco.go index 1689d2a93..f58bc1b8c 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -287,12 +287,18 @@ func MessageSummary(m Message) string { return fmt.Sprintf("pong tx=%x", m.TxID[:6]) case *CallMeMaybe: return "call-me-maybe" + case *CallMeMaybeVia: + return "call-me-maybe-via" case *BindUDPRelayEndpoint: return "bind-udp-relay-endpoint" case *BindUDPRelayEndpointChallenge: return "bind-udp-relay-endpoint-challenge" case *BindUDPRelayEndpointAnswer: return "bind-udp-relay-endpoint-answer" + case *AllocateUDPRelayEndpointRequest: + return "allocate-udp-relay-endpoint-request" + case *AllocateUDPRelayEndpointResponse: + return "allocate-udp-relay-endpoint-response" default: return fmt.Sprintf("%#v", m) } From 2d9d869d3dbdf485c9d04276a84435b329d2739f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 9 Sep 2025 15:38:08 -0700 Subject: [PATCH 1290/1708] wgengine/magicsock: fix debug disco printing of alloc resp disco keys (#17087) Updates tailscale/corp#30818 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 695039ea6..1bff7153b 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2411,11 +2411,11 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } else { - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, for %d<->%d", + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s for %v<->%v", c.discoShort, epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, - req.ClientDisco[0], req.ClientDisco[1]) + req.ClientDisco[0].ShortString(), req.ClientDisco[1].ShortString()) } if c.filt == nil { From 1ec3d20d10d4cf400b26b938187820f111e912e3 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 10 Sep 2025 13:02:59 +0100 Subject: [PATCH 1291/1708] cmd/k8s-operator: simplify scope of e2e tests (#17076) Removes ACL edits from e2e tests in favour of trying to simplify the tests and separate the actual test logic from the environment setup logic as much as possible. Also aims to fit in with the requirements that will generally be filled anyway for most devs working on the operator; in particular using tags that fit in with our documentation. Updates tailscale/corp#32085 Change-Id: I7659246e39ec0b7bcc4ec0a00c6310f25fe6fac2 Signed-off-by: Tom Proctor --- cmd/k8s-operator/e2e/acl.hujson | 33 +++++ cmd/k8s-operator/e2e/ingress_test.go | 70 ++++++---- cmd/k8s-operator/e2e/main_test.go | 202 +++++++++------------------ cmd/k8s-operator/e2e/proxy_test.go | 88 +++--------- 4 files changed, 168 insertions(+), 225 deletions(-) create mode 100644 cmd/k8s-operator/e2e/acl.hujson diff --git a/cmd/k8s-operator/e2e/acl.hujson b/cmd/k8s-operator/e2e/acl.hujson new file mode 100644 index 000000000..1a7b61767 --- /dev/null +++ b/cmd/k8s-operator/e2e/acl.hujson @@ -0,0 +1,33 @@ +// To run the e2e tests against a tailnet, ensure its access controls are a +// superset of the following: +{ + "tagOwners": { + "tag:k8s-operator": [], + "tag:k8s": ["tag:k8s-operator"], + "tag:k8s-recorder": ["tag:k8s-operator"], + }, + "autoApprovers": { + // Could be relaxed if we coordinated with the cluster config, but this + // wide subnet maximises compatibility for most clusters. + "routes": { + "10.0.0.0/8": ["tag:k8s"], + }, + "services": { + "tag:k8s": ["tag:k8s"], + }, + }, + "grants": [ + { + "src": ["tag:k8s"], + "dst": ["tag:k8s", "tag:k8s-operator"], + "ip": ["tcp:80", "tcp:443"], + "app": { + "tailscale.com/cap/kubernetes": [{ + "impersonate": { + "groups": ["ts:e2e-test-proxy"], + }, + }], + }, + }, + ], +} \ No newline at end of file diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index 373dd2c7d..23f0711ec 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -17,45 +18,63 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" kube "tailscale.com/k8s-operator" "tailscale.com/tstest" + "tailscale.com/types/ptr" + "tailscale.com/util/httpm" ) // See [TestMain] for test requirements. func TestIngress(t *testing.T) { - if tsClient == nil { - t.Skip("TestIngress requires credentials for a tailscale client") + if apiClient == nil { + t.Skip("TestIngress requires TS_API_CLIENT_SECRET set") } - ctx := context.Background() cfg := config.GetConfigOrDie() cl, err := client.New(cfg, client.Options{}) if err != nil { t.Fatal(err) } // Apply nginx - createAndCleanup(t, ctx, cl, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nginx", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "nginx", + createAndCleanup(t, cl, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx", + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, }, }, - }, - }) + }) // Apply service to expose it as ingress svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ingress", Namespace: "default", Annotations: map[string]string{ - "tailscale.com/expose": "true", + "tailscale.com/expose": "true", + "tailscale.com/proxy-class": "prod", }, }, Spec: corev1.ServiceSpec{ @@ -71,10 +90,10 @@ func TestIngress(t *testing.T) { }, }, } - createAndCleanup(t, ctx, cl, svc) + createAndCleanup(t, cl, svc) // TODO: instead of timing out only when test times out, cancel context after 60s or so. - if err := wait.PollUntilContextCancel(ctx, time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextCancel(t.Context(), time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { maybeReadySvc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} if err := get(ctx, cl, maybeReadySvc); err != nil { return false, err @@ -89,17 +108,20 @@ func TestIngress(t *testing.T) { } var resp *http.Response - if err := tstest.WaitFor(time.Second*60, func() error { + if err := tstest.WaitFor(time.Minute, func() error { // TODO(tomhjp): Get the tailnet DNS name from the associated secret instead. // If we are not the first tailnet node with the requested name, we'll get // a -N suffix. - resp, err = tsClient.HTTPClient.Get(fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name)) + req, err := http.NewRequest(httpm.GET, fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name), nil) if err != nil { return err } - return nil + ctx, cancel := context.WithTimeout(t.Context(), time.Second) + defer cancel() + resp, err = tailnetClient.HTTPClient().Do(req.WithContext(ctx)) + return err }); err != nil { - t.Fatalf("error trying to reach service: %v", err) + t.Fatalf("error trying to reach Service: %v", err) } if resp.StatusCode != http.StatusOK { diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index 5a1364e09..fb5e5c859 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -6,167 +6,89 @@ package e2e import ( "context" "errors" - "fmt" "log" "os" - "slices" "strings" "testing" + "time" - "github.com/go-logr/zapr" - "github.com/tailscale/hujson" - "go.uber.org/zap/zapcore" "golang.org/x/oauth2/clientcredentials" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn/store/mem" + "tailscale.com/tsnet" ) -const ( - e2eManagedComment = "// This is managed by the k8s-operator e2e tests" -) - +// This test suite is currently not run in CI. +// It requires some setup not handled by this code: +// - Kubernetes cluster with local kubeconfig for it (direct connection, no API server proxy) +// - Tailscale operator installed with --set apiServerProxyConfig.mode="true" +// - ACLs from acl.hujson +// - OAuth client secret in TS_API_CLIENT_SECRET env, with at least auth_keys write scope and tag:k8s tag var ( - tsClient *tailscale.Client - testGrants = map[string]string{ - "test-proxy": `{ - "src": ["tag:e2e-test-proxy"], - "dst": ["tag:k8s-operator"], - "app": { - "tailscale.com/cap/kubernetes": [{ - "impersonate": { - "groups": ["ts:e2e-test-proxy"], - }, - }], - }, - }`, - } + apiClient *tailscale.Client // For API calls to control. + tailnetClient *tsnet.Server // For testing real tailnet traffic. ) -// This test suite is currently not run in CI. -// It requires some setup not handled by this code: -// - Kubernetes cluster with tailscale operator installed -// - Current kubeconfig context set to connect to that cluster (directly, no operator proxy) -// - Operator installed with --set apiServerProxyConfig.mode="true" -// - ACLs that define tag:e2e-test-proxy tag. TODO(tomhjp): Can maybe replace this prereq onwards with an API key -// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env -// - OAuth client must have auth_keys and policy_file write for tag:e2e-test-proxy tag func TestMain(m *testing.M) { code, err := runTests(m) if err != nil { - log.Fatal(err) + log.Printf("Error: %v", err) + os.Exit(1) } os.Exit(code) } func runTests(m *testing.M) (int, error) { - zlog := kzap.NewRaw([]kzap.Opts{kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel)}...).Sugar() - logf.SetLogger(zapr.NewLogger(zlog.Desugar())) + secret := os.Getenv("TS_API_CLIENT_SECRET") + if secret != "" { + secretParts := strings.Split(secret, "-") + if len(secretParts) != 4 { + return 0, errors.New("TS_API_CLIENT_SECRET is not valid") + } + ctx := context.Background() + credentials := clientcredentials.Config{ + ClientID: secretParts[2], + ClientSecret: secret, + TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + Scopes: []string{"auth_keys"}, + } + apiClient = tailscale.NewClient("-", nil) + apiClient.HTTPClient = credentials.Client(ctx) + + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Preauthorized: true, + Ephemeral: true, + Tags: []string{"tag:k8s"}, + }, + }, + } - if clientID := os.Getenv("TS_API_CLIENT_ID"); clientID != "" { - cleanup, err := setupClientAndACLs() + authKey, authKeyMeta, err := apiClient.CreateKeyWithExpiry(ctx, caps, 10*time.Minute) if err != nil { return 0, err } - defer func() { - err = errors.Join(err, cleanup()) - }() - } - - return m.Run(), nil -} - -func setupClientAndACLs() (cleanup func() error, _ error) { - ctx := context.Background() - credentials := clientcredentials.Config{ - ClientID: os.Getenv("TS_API_CLIENT_ID"), - ClientSecret: os.Getenv("TS_API_CLIENT_SECRET"), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", - Scopes: []string{"auth_keys", "policy_file"}, - } - tsClient = tailscale.NewClient("-", nil) - tsClient.HTTPClient = credentials.Client(ctx) - - if err := patchACLs(ctx, tsClient, func(acls *hujson.Value) { - for test, grant := range testGrants { - deleteTestGrants(test, acls) - addTestGrant(test, grant, acls) - } - }); err != nil { - return nil, err - } - - return func() error { - return patchACLs(ctx, tsClient, func(acls *hujson.Value) { - for test := range testGrants { - deleteTestGrants(test, acls) - } - }) - }, nil -} - -func patchACLs(ctx context.Context, tsClient *tailscale.Client, patchFn func(*hujson.Value)) error { - acls, err := tsClient.ACLHuJSON(ctx) - if err != nil { - return err - } - hj, err := hujson.Parse([]byte(acls.ACL)) - if err != nil { - return err - } - - patchFn(&hj) - - hj.Format() - acls.ACL = hj.String() - if _, err := tsClient.SetACLHuJSON(ctx, *acls, true); err != nil { - return err - } - - return nil -} + defer apiClient.DeleteKey(context.Background(), authKeyMeta.ID) -func addTestGrant(test, grant string, acls *hujson.Value) error { - v, err := hujson.Parse([]byte(grant)) - if err != nil { - return err - } - - // Add the managed comment to the first line of the grant object contents. - v.Value.(*hujson.Object).Members[0].Name.BeforeExtra = hujson.Extra(fmt.Sprintf("%s: %s\n", e2eManagedComment, test)) - - if err := acls.Patch([]byte(fmt.Sprintf(`[{"op": "add", "path": "/grants/-", "value": %s}]`, v.String()))); err != nil { - return err - } - - return nil -} - -func deleteTestGrants(test string, acls *hujson.Value) error { - grants := acls.Find("/grants") - - var patches []string - for i, g := range grants.Value.(*hujson.Array).Elements { - members := g.Value.(*hujson.Object).Members - if len(members) == 0 { - continue + tailnetClient = &tsnet.Server{ + Hostname: "test-proxy", + Ephemeral: true, + Store: &mem.Store{}, + AuthKey: authKey, } - comment := strings.TrimSpace(string(members[0].Name.BeforeExtra)) - if name, found := strings.CutPrefix(comment, e2eManagedComment+": "); found && name == test { - patches = append(patches, fmt.Sprintf(`{"op": "remove", "path": "/grants/%d"}`, i)) + _, err = tailnetClient.Up(ctx) + if err != nil { + return 0, err } + defer tailnetClient.Close() } - // Remove in reverse order so we don't affect the found indices as we mutate. - slices.Reverse(patches) - - if err := acls.Patch([]byte(fmt.Sprintf("[%s]", strings.Join(patches, ",")))); err != nil { - return err - } - - return nil + return m.Run(), nil } func objectMeta(namespace, name string) metav1.ObjectMeta { @@ -176,13 +98,25 @@ func objectMeta(namespace, name string) metav1.ObjectMeta { } } -func createAndCleanup(t *testing.T, ctx context.Context, cl client.Client, obj client.Object) { +func createAndCleanup(t *testing.T, cl client.Client, obj client.Object) { t.Helper() - if err := cl.Create(ctx, obj); err != nil { - t.Fatal(err) + + // Try to create the object first + err := cl.Create(t.Context(), obj) + if err != nil { + if apierrors.IsAlreadyExists(err) { + if updateErr := cl.Update(t.Context(), obj); updateErr != nil { + t.Fatal(updateErr) + } + } else { + t.Fatal(err) + } } + t.Cleanup(func() { - if err := cl.Delete(ctx, obj); err != nil { + // Use context.Background() for cleanup, as t.Context() is cancelled + // just before cleanup functions are called. + if err := cl.Delete(context.Background(), obj); err != nil { t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) } }) diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index eac983e88..b3010f97e 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -4,10 +4,8 @@ package e2e import ( - "context" "encoding/json" "fmt" - "strings" "testing" "time" @@ -17,18 +15,16 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" - "tailscale.com/client/tailscale" - "tailscale.com/tsnet" + "tailscale.com/ipn" "tailscale.com/tstest" ) // See [TestMain] for test requirements. func TestProxy(t *testing.T) { - if tsClient == nil { - t.Skip("TestProxy requires credentials for a tailscale client") + if apiClient == nil { + t.Skip("TestIngress requires TS_API_CLIENT_SECRET set") } - ctx := context.Background() cfg := config.GetConfigOrDie() cl, err := client.New(cfg, client.Options{}) if err != nil { @@ -36,7 +32,7 @@ func TestProxy(t *testing.T) { } // Create role and role binding to allow a group we'll impersonate to do stuff. - createAndCleanup(t, ctx, cl, &rbacv1.Role{ + createAndCleanup(t, cl, &rbacv1.Role{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Rules: []rbacv1.PolicyRule{{ APIGroups: []string{""}, @@ -44,7 +40,7 @@ func TestProxy(t *testing.T) { Resources: []string{"secrets"}, }}, }) - createAndCleanup(t, ctx, cl, &rbacv1.RoleBinding{ + createAndCleanup(t, cl, &rbacv1.RoleBinding{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Subjects: []rbacv1.Subject{{ Kind: "Group", @@ -60,16 +56,14 @@ func TestProxy(t *testing.T) { operatorSecret := corev1.Secret{ ObjectMeta: objectMeta("tailscale", "operator"), } - if err := get(ctx, cl, &operatorSecret); err != nil { + if err := get(t.Context(), cl, &operatorSecret); err != nil { t.Fatal(err) } - // Connect to tailnet with test-specific tag so we can use the - // [testGrants] ACLs when connecting to the API server proxy - ts := tsnetServerWithTag(t, ctx, "tag:e2e-test-proxy") + // Join tailnet as a client of the API server proxy. proxyCfg := &rest.Config{ Host: fmt.Sprintf("https://%s:443", hostNameFromOperatorSecret(t, operatorSecret)), - Dial: ts.Dial, + Dial: tailnetClient.Dial, } proxyCl, err := client.New(proxyCfg, client.Options{}) if err != nil { @@ -82,8 +76,8 @@ func TestProxy(t *testing.T) { } // Wait for up to a minute the first time we use the proxy, to give it time // to provision the TLS certs. - if err := tstest.WaitFor(time.Second*60, func() error { - return get(ctx, proxyCl, &allowedSecret) + if err := tstest.WaitFor(time.Minute, func() error { + return get(t.Context(), proxyCl, &allowedSecret) }); err != nil { t.Fatal(err) } @@ -92,65 +86,25 @@ func TestProxy(t *testing.T) { forbiddenSecret := corev1.Secret{ ObjectMeta: objectMeta("default", "operator"), } - if err := get(ctx, proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { + if err := get(t.Context(), proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { t.Fatalf("expected forbidden error fetching secret from default namespace: %s", err) } } -func tsnetServerWithTag(t *testing.T, ctx context.Context, tag string) *tsnet.Server { - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Preauthorized: true, - Ephemeral: true, - Tags: []string{tag}, - }, - }, - } - - authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) - if err != nil { - t.Fatal(err) +func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { + t.Helper() + prefsBytes, ok := s.Data[string(s.Data["_current-profile"])] + if !ok { + t.Fatalf("no state in operator Secret data: %#v", s.Data) } - t.Cleanup(func() { - if err := tsClient.DeleteKey(ctx, authKeyMeta.ID); err != nil { - t.Errorf("error deleting auth key: %s", err) - } - }) - ts := &tsnet.Server{ - Hostname: "test-proxy", - Ephemeral: true, - Dir: t.TempDir(), - AuthKey: authKey, - } - _, err = ts.Up(ctx) - if err != nil { + prefs := ipn.Prefs{} + if err := json.Unmarshal(prefsBytes, &prefs); err != nil { t.Fatal(err) } - t.Cleanup(func() { - if err := ts.Close(); err != nil { - t.Errorf("error shutting down tsnet.Server: %s", err) - } - }) - - return ts -} -func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { - profiles := map[string]any{} - if err := json.Unmarshal(s.Data["_profiles"], &profiles); err != nil { - t.Fatal(err) + if prefs.Persist == nil { + t.Fatalf("no hostname in operator Secret data: %#v", s.Data) } - key, ok := strings.CutPrefix(string(s.Data["_current-profile"]), "profile-") - if !ok { - t.Fatal(string(s.Data["_current-profile"])) - } - profile, ok := profiles[key] - if !ok { - t.Fatal(profiles) - } - - return ((profile.(map[string]any))["Name"]).(string) + return prefs.Persist.UserProfile.LoginName } From 6feb6f3c753aca44d284a3b1a103692e96c62aee Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 10 Sep 2025 12:36:53 -0700 Subject: [PATCH 1292/1708] wgengine/magicsock: add relayManager event logs (#17091) These are gated behind magicsock component debug logging. Updates tailscale/corp#30818 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/relaymanager.go | 78 ++++++++++++++++++++++++++---- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1bff7153b..8ab7957ca 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2411,7 +2411,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } else { - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s for %v<->%v", + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s disco[0]=%v disco[1]=%v", c.discoShort, epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 8a1a4fcf5..4680832d9 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -6,6 +6,7 @@ package magicsock import ( "context" "errors" + "fmt" "net/netip" "sync" "time" @@ -76,8 +77,11 @@ type serverDiscoVNI struct { // relayHandshakeWork serves to track in-progress relay handshake work for a // [udprelay.ServerEndpoint]. This structure is immutable once initialized. type relayHandshakeWork struct { - wlb endpointWithLastBest - se udprelay.ServerEndpoint + wlb endpointWithLastBest + se udprelay.ServerEndpoint + server candidatePeerRelay + + handshakeGen uint32 // handshakeServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to @@ -91,6 +95,26 @@ type relayHandshakeWork struct { cancel context.CancelFunc } +func (r *relayHandshakeWork) dlogf(format string, args ...any) { + if !r.wlb.ep.c.debugLogging.Load() { + return + } + var relay string + if r.server.nodeKey.IsZero() { + relay = "from-call-me-maybe-via" + } else { + relay = r.server.nodeKey.ShortString() + } + r.wlb.ep.c.logf("%s node=%v relay=%v handshakeGen=%d disco[0]=%v disco[1]=%v", + fmt.Sprintf(format, args...), + r.wlb.ep.publicKey.ShortString(), + relay, + r.handshakeGen, + r.se.ClientDisco[0].ShortString(), + r.se.ClientDisco[1].ShortString(), + ) +} + // newRelayServerEndpointEvent indicates a new [udprelay.ServerEndpoint] has // become known either via allocation with a relay server, or via // [disco.CallMeMaybeVia] reception. This structure is immutable once @@ -257,7 +281,9 @@ type relayDiscoMsgEvent struct { type relayEndpointAllocWork struct { wlb endpointWithLastBest discoKeys key.SortedPairOfDiscoPublic - candidatePeerRelay candidatePeerRelay + candidatePeerRelay candidatePeerRelay // zero value if learned via [disco.CallMeMaybeVia] + + allocGen uint32 // allocateServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to @@ -271,6 +297,20 @@ type relayEndpointAllocWork struct { cancel context.CancelFunc } +func (r *relayEndpointAllocWork) dlogf(format string, args ...any) { + if !r.wlb.ep.c.debugLogging.Load() { + return + } + r.wlb.ep.c.logf("%s node=%v relay=%v allocGen=%d disco[0]=%v disco[1]=%v", + fmt.Sprintf(format, args...), + r.wlb.ep.publicKey.ShortString(), + r.candidatePeerRelay.nodeKey.ShortString(), + r.allocGen, + r.discoKeys.Get()[0].ShortString(), + r.discoKeys.Get()[1].ShortString(), + ) +} + // init initializes [relayManager] if it is not already initialized. func (r *relayManager) init() { r.initOnce.Do(func() { @@ -712,6 +752,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay work := &relayHandshakeWork{ wlb: newServerEndpoint.wlb, se: newServerEndpoint.se, + server: newServerEndpoint.server, rxDiscoMsgCh: make(chan relayDiscoMsgEvent), doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), ctx: ctx, @@ -728,8 +769,9 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay if r.handshakeGeneration == 0 { // generation must be nonzero r.handshakeGeneration++ } + work.handshakeGen = r.handshakeGeneration - go r.handshakeServerEndpoint(work, r.handshakeGeneration) + go r.handshakeServerEndpoint(work) } // sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be @@ -758,7 +800,7 @@ func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoi ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) } -func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generation uint32) { +func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -777,10 +819,13 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat common := disco.BindUDPRelayEndpointCommon{ VNI: work.se.VNI, - Generation: generation, + Generation: work.handshakeGen, RemoteKey: epDisco.key, } + work.dlogf("[v1] magicsock: relayManager: starting handshake addrPorts=%v", + work.se.AddrPorts, + ) sentBindAny := false bind := &disco.BindUDPRelayEndpoint{ BindUDPRelayEndpointCommon: common, @@ -848,6 +893,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat for { select { case <-work.ctx.Done(): + work.dlogf("[v1] magicsock: relayManager: handshake canceled") return case msgEvent := <-work.rxDiscoMsgCh: switch msg := msgEvent.msg.(type) { @@ -859,12 +905,14 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat if handshakeState >= disco.BindUDPRelayHandshakeStateAnswerSent { continue } + work.dlogf("[v1] magicsock: relayManager: got handshake challenge from %v", msgEvent.from) txPing(msgEvent.from, &msg.Challenge) handshakeState = disco.BindUDPRelayHandshakeStateAnswerSent case *disco.Ping: if handshakeState < disco.BindUDPRelayHandshakeStateAnswerSent { continue } + work.dlogf("[v1] magicsock: relayManager: got relayed ping from %v", msgEvent.from) // An inbound ping from the remote peer indicates we completed a // handshake with the relay server (our answer msg was // received). Chances are our ping was dropped before the remote @@ -885,6 +933,10 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat // round-trip latency and return. done.pongReceivedFrom = msgEvent.from done.latency = time.Since(at) + work.dlogf("[v1] magicsock: relayManager: got relayed pong from %v latency=%v", + msgEvent.from, + done.latency.Round(time.Millisecond), + ) return default: // unexpected message type, silently discard @@ -892,6 +944,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat } case <-timer.C: // The handshake timed out. + work.dlogf("[v1] magicsock: relayManager: handshake timed out") return } } @@ -899,7 +952,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat const allocateUDPRelayEndpointRequestTimeout = time.Second * 10 -func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, generation uint32) { +func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork) { done := relayEndpointAllocWorkDoneEvent{work: work} defer func() { @@ -910,7 +963,7 @@ func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, gene dm := &disco.AllocateUDPRelayEndpointRequest{ ClientDisco: work.discoKeys.Get(), - Generation: generation, + Generation: work.allocGen, } sendAllocReq := func() { @@ -923,6 +976,7 @@ func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, gene dm, discoVerboseLog, ) + work.dlogf("[v1] magicsock: relayManager: sent alloc request") } go sendAllocReq() @@ -938,16 +992,19 @@ func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, gene for { select { case <-work.ctx.Done(): + work.dlogf("[v1] magicsock: relayManager: alloc request canceled") return case <-returnAfterTimer.C: + work.dlogf("[v1] magicsock: relayManager: alloc request timed out") return case <-retryAfterTimer.C: go sendAllocReq() case resp := <-work.rxDiscoMsgCh: - if resp.Generation != generation || + if resp.Generation != work.allocGen || !work.discoKeys.Equal(key.NewSortedPairOfDiscoPublic(resp.ClientDisco[0], resp.ClientDisco[1])) { continue } + work.dlogf("[v1] magicsock: relayManager: got alloc response") done.allocated = udprelay.ServerEndpoint{ ServerDisco: resp.ServerDisco, ClientDisco: resp.ClientDisco, @@ -1004,6 +1061,7 @@ func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { } byCandidatePeerRelay[v] = started r.allocGeneration++ - go r.allocateServerEndpoint(started, r.allocGeneration) + started.allocGen = r.allocGeneration + go r.allocateServerEndpoint(started) } } From 32bfd7275234d336b6e2fc22d4e3889ba4f4c3cf Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 10 Sep 2025 16:30:25 -0700 Subject: [PATCH 1293/1708] tstest/integration/testcontrol: propagate CapVer (#17093) To support integration testing of client features that rely on it, e.g. peer relay. Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- tstest/integration/testcontrol/testcontrol.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 739795bb3..2fbf37de9 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -674,6 +674,7 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. AllowedIPs: allowedIPs, Hostinfo: req.Hostinfo.View(), Name: req.Hostinfo.Hostname, + Cap: req.Version, Capabilities: []tailcfg.NodeCapability{ tailcfg.CapabilityHTTPS, tailcfg.NodeAttrFunnel, @@ -811,6 +812,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi endpoints := filterInvalidIPv6Endpoints(req.Endpoints) node.Endpoints = endpoints node.DiscoKey = req.DiscoKey + node.Cap = req.Version if req.Hostinfo != nil { node.Hostinfo = req.Hostinfo.View() if ni := node.Hostinfo.NetInfo(); ni.Valid() { From fb9d9ba86e42680cde20c890de8857cbfe40f2c3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 10 Sep 2025 16:48:40 -0700 Subject: [PATCH 1294/1708] wgengine/magicsock: add TS_DEBUG_NEVER_DIRECT_UDP debug knob (#17094) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/debugknobs.go | 3 +++ wgengine/magicsock/debugknobs_stubs.go | 1 + wgengine/magicsock/endpoint.go | 3 +++ 3 files changed, 7 insertions(+) diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index f8fd9f040..b0a47ff87 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,6 +62,9 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") + // debugNeverDirectUDP disables the use of direct UDP connections, forcing + // all peer communication over DERP or peer relay. + debugNeverDirectUDP = envknob.RegisterBool("TS_DEBUG_NEVER_DIRECT_UDP") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 336d7baa1..7dee1d6b0 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -31,3 +31,4 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } +func debugNeverDirectUDP() bool { return false } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index b8778b8d8..1f36aabd3 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1286,6 +1286,9 @@ func (de *endpoint) startDiscoPingLocked(ep epAddr, now mono.Time, purpose disco if runtime.GOOS == "js" { return } + if debugNeverDirectUDP() && !ep.vni.IsSet() && ep.ap.Addr() != tailcfg.DerpMagicIPAddr { + return + } epDisco := de.disco.Load() if epDisco == nil { return From 49aa798d18ac070de48aafec65cbd853ba18ed96 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 11 Sep 2025 10:56:02 -0700 Subject: [PATCH 1295/1708] VERSION.txt: this is v1.88.0 (#17098) Signed-off-by: Will Hannah --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index f63427167..59be59214 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.87.0 +1.88.0 From 1be9c6b23ed08befba62c3ca44b2e3f98f335a59 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 11 Sep 2025 11:19:17 -0700 Subject: [PATCH 1296/1708] VERSION.txt: this is v1.89.0 (#17099) Signed-off-by: Will Hannah --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 59be59214..636ea711a 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.88.0 +1.89.0 From 921d77062ebfb4b4d26629278abea7ea55cfc942 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 7 Sep 2025 20:25:54 -0700 Subject: [PATCH 1297/1708] cmd/omitsize: add tool to dump build sizes Updates #12614 Change-Id: I8f85d7275bc8eecedbabe6631b50e1cf70791d2d Signed-off-by: Brad Fitzpatrick --- cmd/omitsize/omitsize.go | 124 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 cmd/omitsize/omitsize.go diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go new file mode 100644 index 000000000..d8e1a6540 --- /dev/null +++ b/cmd/omitsize/omitsize.go @@ -0,0 +1,124 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The omitsize tool prints out how large the Tailscale binaries are with +// different build tags. +package main + +import ( + "crypto/sha256" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strconv" + "strings" + + "tailscale.com/util/must" +) + +var ( + cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") + features = flag.String("features", "", "comma-separated list of features to consider, with or without the ts_omit_ prefix (default: all detected in build_dist.sh)") +) + +func main() { + flag.Parse() + + var all []string + if *features == "" { + sh := must.Get(os.ReadFile("build_dist.sh")) + omitRx := regexp.MustCompile(`\b(ts_omit_\w+)\b`) + all = omitRx.FindAllString(string(sh), -1) + } else { + for v := range strings.SplitSeq(*features, ",") { + if !strings.HasPrefix(v, "ts_omit_") { + v = "ts_omit_" + v + } + all = append(all, v) + } + } + + slices.Sort(all) + all = slices.Compact(all) + + baseD := measure("tailscaled") + baseC := measure("tailscale") + baseBoth := measure("tailscaled", "ts_include_cli") + + fmt.Printf("(a) starting with everything and removing a feature...\n\n") + + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) + + minD := measure("tailscaled", all...) + minC := measure("tailscale", all...) + minBoth := measure("tailscaled", append(slices.Clone(all), "ts_include_cli")...) + fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) + + for _, t := range all { + sizeD := measure("tailscaled", t) + sizeC := measure("tailscale", t) + sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) + saveD := max(baseD-sizeD, 0) + saveC := max(baseC-sizeC, 0) + saveBoth := max(baseBoth-sizeBoth, 0) + fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + } + + fmt.Printf("\n(b) or, starting at minimal and adding one feature back...\n") + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) + for _, t := range all { + tags := allExcept(all, t) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.TrimPrefix(t, "ts_omit_")) + } + +} + +func allExcept(all []string, omit string) []string { + return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return s == omit }) +} + +func measure(bin string, tags ...string) int64 { + tags = slices.Clone(tags) + slices.Sort(tags) + tags = slices.Compact(tags) + comma := strings.Join(tags, ",") + + var cacheFile string + if *cacheDir != "" { + cacheFile = filepath.Join(*cacheDir, fmt.Sprintf("%02x", sha256.Sum256(fmt.Appendf(nil, "%s-%s.size", bin, comma)))) + if v, err := os.ReadFile(cacheFile); err == nil { + if size, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { + return size + } + } + } + + cmd := exec.Command("go", "build", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) + log.Printf("# Measuring %v", cmd.Args) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64") + out, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("error measuring %q: %v, %s\n", bin, err, out) + } + fi, err := os.Stat("tmpbin") + if err != nil { + log.Fatal(err) + } + n := fi.Size() + if cacheFile != "" { + if err := os.WriteFile(cacheFile, fmt.Appendf(nil, "%d", n), 0644); err != nil { + log.Fatalf("error writing size to cache: %v\n", err) + } + } + return n +} From 82c5024f036c440ce22c6a2ff9bfe73b2fb991e3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 23 Jun 2025 21:24:37 -0700 Subject: [PATCH 1298/1708] net/netns: fix controlLogf doc Its doc said its signature matched a std signature, but it used Tailscale-specific types. Nowadays it's the caller (func control) that curries the logf/netmon and returns the std-matching signature. Updates #cleanup (while answering a question on Slack) Change-Id: Ic99de41fc6a1c720575a7f33c564d0bcfd9a2c30 Signed-off-by: Brad Fitzpatrick --- net/netns/netns_darwin.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index f2ed16601..1f30f00d2 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -33,10 +33,8 @@ var bindToInterfaceByRouteEnv = envknob.RegisterBool("TS_BIND_TO_INTERFACE_BY_RO var errInterfaceStateInvalid = errors.New("interface state invalid") -// controlLogf marks c as necessary to dial in a separate network namespace. -// -// It's intentionally the same signature as net.Dialer.Control -// and net.ListenConfig.Control. +// controlLogf binds c to a particular interface as necessary to dial the +// provided (network, address). func controlLogf(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) error { if isLocalhost(address) { // Don't bind to an interface for localhost connections. From a1dcf12b671e8668b1bd3eedc7cfcb4381b9d29c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 08:13:49 -0700 Subject: [PATCH 1299/1708] feature/drive: start factoring out Taildrive, add ts_omit_drive build tag As of this commit (per the issue), the Taildrive code remains where it was, but in new files that are protected by the new ts_omit_drive build tag. Future commits will move it. Updates #17058 Change-Id: Idf0a51db59e41ae8da6ea2b11d238aefc48b219e Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/tailscale/cli/cli.go | 3 +- cmd/tailscale/cli/drive.go | 80 +++++---- cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/deps_test.go | 16 ++ cmd/tailscaled/tailscaled.go | 45 ++--- cmd/tailscaled/tailscaled_drive.go | 56 ++++++ feature/condregister/maybe_drive.go | 8 + feature/drive/drive.go | 5 + ipn/ipnlocal/drive.go | 163 ++++++++++++++++-- ipn/ipnlocal/drive_tomove.go | 30 ++++ ipn/ipnlocal/local.go | 139 +-------------- ipn/ipnlocal/peerapi.go | 95 ---------- ipn/ipnlocal/peerapi_drive.go | 110 ++++++++++++ ipn/localapi/localapi.go | 123 ------------- ipn/localapi/localapi_drive.go | 141 +++++++++++++++ .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + 21 files changed, 582 insertions(+), 440 deletions(-) create mode 100644 cmd/tailscaled/tailscaled_drive.go create mode 100644 feature/condregister/maybe_drive.go create mode 100644 feature/drive/drive.go create mode 100644 ipn/ipnlocal/drive_tomove.go create mode 100644 ipn/ipnlocal/peerapi_drive.go create mode 100644 ipn/localapi/localapi_drive.go diff --git a/build_dist.sh b/build_dist.sh index 57231eb70..9dc879b1e 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus,ts_omit_webclient" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus,ts_omit_webclient,ts_omit_drive" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 46aa29c71..39fdce60d 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -210,6 +210,7 @@ func noDupFlagify(c *ffcli.Command) { var fileCmd func() *ffcli.Command var sysPolicyCmd func() *ffcli.Command var maybeWebCmd func() *ffcli.Command +var maybeDriveCmd func() *ffcli.Command func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -262,7 +263,7 @@ change in the future. updateCmd, whoisCmd, debugCmd(), - driveCmd, + nilOrCall(maybeDriveCmd), idTokenCmd, configureHostCmd(), systrayCmd, diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 929852b4c..67536ace0 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive + package cli import ( @@ -20,43 +22,49 @@ const ( driveListUsage = "tailscale drive list" ) -var driveCmd = &ffcli.Command{ - Name: "drive", - ShortHelp: "Share a directory with your tailnet", - ShortUsage: strings.Join([]string{ - driveShareUsage, - driveRenameUsage, - driveUnshareUsage, - driveListUsage, - }, "\n"), - LongHelp: buildShareLongHelp(), - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "share", - ShortUsage: driveShareUsage, - Exec: runDriveShare, - ShortHelp: "[ALPHA] Create or modify a share", - }, - { - Name: "rename", - ShortUsage: driveRenameUsage, - ShortHelp: "[ALPHA] Rename a share", - Exec: runDriveRename, - }, - { - Name: "unshare", - ShortUsage: driveUnshareUsage, - ShortHelp: "[ALPHA] Remove a share", - Exec: runDriveUnshare, - }, - { - Name: "list", - ShortUsage: driveListUsage, - ShortHelp: "[ALPHA] List current shares", - Exec: runDriveList, +func init() { + maybeDriveCmd = driveCmd +} + +func driveCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "drive", + ShortHelp: "Share a directory with your tailnet", + ShortUsage: strings.Join([]string{ + driveShareUsage, + driveRenameUsage, + driveUnshareUsage, + driveListUsage, + }, "\n"), + LongHelp: buildShareLongHelp(), + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "share", + ShortUsage: driveShareUsage, + Exec: runDriveShare, + ShortHelp: "[ALPHA] Create or modify a share", + }, + { + Name: "rename", + ShortUsage: driveRenameUsage, + ShortHelp: "[ALPHA] Rename a share", + Exec: runDriveRename, + }, + { + Name: "unshare", + ShortUsage: driveUnshareUsage, + ShortHelp: "[ALPHA] Remove a share", + Exec: runDriveUnshare, + }, + { + Name: "list", + ShortUsage: driveListUsage, + ShortHelp: "[ALPHA] List current shares", + Exec: runDriveList, + }, }, - }, + } } // runDriveShare is the entry point for the "tailscale drive share" command. diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3ca570772..a0842b45b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -274,6 +274,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/drive from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a672e32e2..5c71a62fd 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -61,3 +61,19 @@ func TestOmitReflectThings(t *testing.T) { }, }.Check(t) } + +func TestOmitDrive(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_drive,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "driveimpl") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + if strings.Contains(dep, "webdav") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index ddf6d9ef6..890ff7bf8 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -33,8 +33,8 @@ import ( "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" - "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + "tailscale.com/feature" _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -153,7 +153,6 @@ var subCommands = map[string]*func([]string) error{ "uninstall-system-daemon": &uninstallSystemDaemon, "debug": &debugModeFunc, "be-child": &beChildFunc, - "serve-taildrive": &serveDriveFunc, } var beCLI func() // non-nil if CLI is linked in with the "ts_include_cli" build tag @@ -480,7 +479,9 @@ func run() (err error) { debugMux = newDebugMux() } - sys.Set(driveimpl.NewFileSystemForRemote(logf)) + if f, ok := hookSetSysDrive.GetOk(); ok { + f(sys, logf) + } if app := envknob.App(); app != "" { hostinfo.SetApp(app) @@ -489,6 +490,11 @@ func run() (err error) { return startIPNServer(context.Background(), logf, pol.PublicID, sys) } +var ( + hookSetSysDrive feature.Hook[func(*tsd.System, logger.Logf)] + hookSetWgEnginConfigDrive feature.Hook[func(*wgengine.Config, logger.Logf)] +) + var sigPipe os.Signal // set by sigpipe.go func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) error { @@ -749,7 +755,9 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), EventBus: sys.Bus.Get(), - DriveForLocal: driveimpl.NewFileSystemForLocal(logf), + } + if f, ok := hookSetWgEnginConfigDrive.GetOk(); ok { + f(&conf, logf) } sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) @@ -943,35 +951,6 @@ func beChild(args []string) error { return f(args[1:]) } -var serveDriveFunc = serveDrive - -// serveDrive serves one or more Taildrives on localhost using the WebDAV -// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child -// tailscaled processes in serve-taildrive mode in order to access the fliesystem -// as specific (usually unprivileged) users. -// -// serveDrive prints the address on which it's listening to stdout so that the -// parent process knows where to connect to. -func serveDrive(args []string) error { - if len(args) == 0 { - return errors.New("missing shares") - } - if len(args)%2 != 0 { - return errors.New("need pairs") - } - s, err := driveimpl.NewFileServer() - if err != nil { - return fmt.Errorf("unable to start Taildrive file server: %v", err) - } - shares := make(map[string]string) - for i := 0; i < len(args); i += 2 { - shares[args[i]] = args[i+1] - } - s.SetShares(shares) - fmt.Printf("%v\n", s.Addr()) - return s.Serve() -} - // dieOnPipeReadErrorOfFD reads from the pipe named by fd and exit the process // when the pipe becomes readable. We use this in tests as a somewhat more // portable mechanism for the Linux PR_SET_PDEATHSIG, which we wish existed on diff --git a/cmd/tailscaled/tailscaled_drive.go b/cmd/tailscaled/tailscaled_drive.go new file mode 100644 index 000000000..49f35a381 --- /dev/null +++ b/cmd/tailscaled/tailscaled_drive.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package main + +import ( + "errors" + "fmt" + + "tailscale.com/drive/driveimpl" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine" +) + +func init() { + subCommands["serve-taildrive"] = &serveDriveFunc + + hookSetSysDrive.Set(func(sys *tsd.System, logf logger.Logf) { + sys.Set(driveimpl.NewFileSystemForRemote(logf)) + }) + hookSetWgEnginConfigDrive.Set(func(conf *wgengine.Config, logf logger.Logf) { + conf.DriveForLocal = driveimpl.NewFileSystemForLocal(logf) + }) +} + +var serveDriveFunc = serveDrive + +// serveDrive serves one or more Taildrives on localhost using the WebDAV +// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child +// tailscaled processes in serve-taildrive mode in order to access the fliesystem +// as specific (usually unprivileged) users. +// +// serveDrive prints the address on which it's listening to stdout so that the +// parent process knows where to connect to. +func serveDrive(args []string) error { + if len(args) == 0 { + return errors.New("missing shares") + } + if len(args)%2 != 0 { + return errors.New("need pairs") + } + s, err := driveimpl.NewFileServer() + if err != nil { + return fmt.Errorf("unable to start Taildrive file server: %v", err) + } + shares := make(map[string]string) + for i := 0; i < len(args); i += 2 { + shares[args[i]] = args[i+1] + } + s.SetShares(shares) + fmt.Printf("%v\n", s.Addr()) + return s.Serve() +} diff --git a/feature/condregister/maybe_drive.go b/feature/condregister/maybe_drive.go new file mode 100644 index 000000000..cb447ff28 --- /dev/null +++ b/feature/condregister/maybe_drive.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package condregister + +import _ "tailscale.com/feature/drive" diff --git a/feature/drive/drive.go b/feature/drive/drive.go new file mode 100644 index 000000000..3660a2b95 --- /dev/null +++ b/feature/drive/drive.go @@ -0,0 +1,5 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package drive registers the Taildrive (file server) feature. +package drive diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index d77481903..7d6dc2427 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -1,38 +1,35 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive + package ipnlocal import ( + "errors" "fmt" + "io" + "net/http" + "net/netip" "os" "slices" "tailscale.com/drive" "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/httpm" ) -const ( - // DriveLocalPort is the port on which the Taildrive listens for location - // connections on quad 100. - DriveLocalPort = 8080 -) - -// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is -// enabled. This is currently based on checking for the drive:share node -// attribute. -func (b *LocalBackend) DriveSharingEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +func init() { + hookSetNetMapLockedDrive.Set(setNetMapLockedDrive) } -// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes -// is enabled. This is currently based on checking for the drive:access node -// attribute. -func (b *LocalBackend) DriveAccessEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +func setNetMapLockedDrive(b *LocalBackend, nm *netmap.NetworkMap) { + b.updateDrivePeersLocked(nm) + b.driveNotifyCurrentSharesLocked() } // DriveSetServerAddr tells Taildrive to use the given address for connecting @@ -363,3 +360,137 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem } return driveRemotes } + +// responseBodyWrapper wraps an io.ReadCloser and stores +// the number of bytesRead. +type responseBodyWrapper struct { + io.ReadCloser + logVerbose bool + bytesRx int64 + bytesTx int64 + log logger.Logf + method string + statusCode int + contentType string + fileExtension string + shareNodeKey string + selfNodeKey string + contentLength int64 +} + +// logAccess logs the taildrive: access: log line. If the logger is nil, +// the log will not be written. +func (rbw *responseBodyWrapper) logAccess(err string) { + if rbw.log == nil { + return + } + + // Some operating systems create and copy lots of 0 length hidden files for + // tracking various states. Omit these to keep logs from being too verbose. + if rbw.logVerbose || rbw.contentLength > 0 { + levelPrefix := "" + if rbw.logVerbose { + levelPrefix = "[v1] " + } + rbw.log( + "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", + levelPrefix, + rbw.method, + rbw.selfNodeKey, + rbw.shareNodeKey, + rbw.statusCode, + rbw.fileExtension, + rbw.contentType, + roundTraffic(rbw.contentLength), + roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) + } +} + +// Read implements the io.Reader interface. +func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { + n, err := rbw.ReadCloser.Read(b) + rbw.bytesRx += int64(n) + if err != nil && !errors.Is(err, io.EOF) { + rbw.logAccess(err.Error()) + } + + return n, err +} + +// Close implements the io.Close interface. +func (rbw *responseBodyWrapper) Close() error { + err := rbw.ReadCloser.Close() + var errStr string + if err != nil { + errStr = err.Error() + } + rbw.logAccess(errStr) + + return err +} + +// driveTransport is an http.RoundTripper that wraps +// b.Dialer().PeerAPITransport() with metrics tracking. +type driveTransport struct { + b *LocalBackend + tr *http.Transport +} + +func (b *LocalBackend) newDriveTransport() *driveTransport { + return &driveTransport{ + b: b, + tr: b.Dialer().PeerAPITransport(), + } +} + +func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + // Some WebDAV clients include origin and refer headers, which peerapi does + // not like. Remove them. + req.Header.Del("origin") + req.Header.Del("referer") + + bw := &requestBodyWrapper{} + if req.Body != nil { + bw.ReadCloser = req.Body + req.Body = bw + } + + defer func() { + contentType := "unknown" + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct + } + + dt.b.mu.Lock() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() + dt.b.mu.Unlock() + n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) + shareNodeKey := "unknown" + if ok { + shareNodeKey = string(n.Key().ShortString()) + } + + rbw := responseBodyWrapper{ + log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level + method: req.Method, + bytesTx: int64(bw.bytesRead), + selfNodeKey: selfNodeKey, + shareNodeKey: shareNodeKey, + contentType: contentType, + contentLength: resp.ContentLength, + fileExtension: parseDriveFileExtensionForLog(req.URL.Path), + statusCode: resp.StatusCode, + ReadCloser: resp.Body, + } + + if resp.StatusCode >= 400 { + // in case of error response, just log immediately + rbw.logAccess("") + } else { + resp.Body = &rbw + } + }() + + return dt.tr.RoundTrip(req) +} diff --git a/ipn/ipnlocal/drive_tomove.go b/ipn/ipnlocal/drive_tomove.go new file mode 100644 index 000000000..290fe0970 --- /dev/null +++ b/ipn/ipnlocal/drive_tomove.go @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// This is the Taildrive stuff that should ideally be registered in init only when +// the ts_omit_drive is not set, but for transition reasons is currently (2025-09-08) +// always defined, as we work to pull it out of LocalBackend. + +package ipnlocal + +import "tailscale.com/tailcfg" + +const ( + // DriveLocalPort is the port on which the Taildrive listens for location + // connections on quad 100. + DriveLocalPort = 8080 +) + +// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is +// enabled. This is currently based on checking for the drive:share node +// attribute. +func (b *LocalBackend) DriveSharingEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +} + +// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes +// is enabled. This is currently based on checking for the drive:access node +// attribute. +func (b *LocalBackend) DriveAccessEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2d917ae54..8a6d0e013 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -52,6 +52,7 @@ import ( "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -100,7 +101,6 @@ import ( "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/goroutines" - "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/osuser" @@ -6326,143 +6326,12 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.metrics.approvedRoutes.Set(approved) } - b.updateDrivePeersLocked(nm) - b.driveNotifyCurrentSharesLocked() -} - -// responseBodyWrapper wraps an io.ReadCloser and stores -// the number of bytesRead. -type responseBodyWrapper struct { - io.ReadCloser - logVerbose bool - bytesRx int64 - bytesTx int64 - log logger.Logf - method string - statusCode int - contentType string - fileExtension string - shareNodeKey string - selfNodeKey string - contentLength int64 -} - -// logAccess logs the taildrive: access: log line. If the logger is nil, -// the log will not be written. -func (rbw *responseBodyWrapper) logAccess(err string) { - if rbw.log == nil { - return - } - - // Some operating systems create and copy lots of 0 length hidden files for - // tracking various states. Omit these to keep logs from being too verbose. - if rbw.logVerbose || rbw.contentLength > 0 { - levelPrefix := "" - if rbw.logVerbose { - levelPrefix = "[v1] " - } - rbw.log( - "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", - levelPrefix, - rbw.method, - rbw.selfNodeKey, - rbw.shareNodeKey, - rbw.statusCode, - rbw.fileExtension, - rbw.contentType, - roundTraffic(rbw.contentLength), - roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) - } -} - -// Read implements the io.Reader interface. -func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { - n, err := rbw.ReadCloser.Read(b) - rbw.bytesRx += int64(n) - if err != nil && !errors.Is(err, io.EOF) { - rbw.logAccess(err.Error()) - } - - return n, err -} - -// Close implements the io.Close interface. -func (rbw *responseBodyWrapper) Close() error { - err := rbw.ReadCloser.Close() - var errStr string - if err != nil { - errStr = err.Error() + if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { + f(b, nm) } - rbw.logAccess(errStr) - - return err -} - -// driveTransport is an http.RoundTripper that wraps -// b.Dialer().PeerAPITransport() with metrics tracking. -type driveTransport struct { - b *LocalBackend - tr *http.Transport } -func (b *LocalBackend) newDriveTransport() *driveTransport { - return &driveTransport{ - b: b, - tr: b.Dialer().PeerAPITransport(), - } -} - -func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - // Some WebDAV clients include origin and refer headers, which peerapi does - // not like. Remove them. - req.Header.Del("origin") - req.Header.Del("referer") - - bw := &requestBodyWrapper{} - if req.Body != nil { - bw.ReadCloser = req.Body - req.Body = bw - } - - defer func() { - contentType := "unknown" - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - - dt.b.mu.Lock() - selfNodeKey := dt.b.currentNode().Self().Key().ShortString() - dt.b.mu.Unlock() - n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) - shareNodeKey := "unknown" - if ok { - shareNodeKey = string(n.Key().ShortString()) - } - - rbw := responseBodyWrapper{ - log: dt.b.logf, - logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level - method: req.Method, - bytesTx: int64(bw.bytesRead), - selfNodeKey: selfNodeKey, - shareNodeKey: shareNodeKey, - contentType: contentType, - contentLength: resp.ContentLength, - fileExtension: parseDriveFileExtensionForLog(req.URL.Path), - statusCode: resp.StatusCode, - ReadCloser: resp.Body, - } - - if resp.StatusCode >= 400 { - // in case of error response, just log immediately - rbw.logAccess("") - } else { - resp.Body = &rbw - } - }() - - return dt.tr.RoundTrip(req) -} +var hookSetNetMapLockedDrive feature.Hook[func(*LocalBackend, *netmap.NetworkMap)] // roundTraffic rounds bytes. This is used to preserve user privacy within logs. func roundTraffic(bytes int64) float64 { diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 89554f0ff..23c349087 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -16,7 +16,6 @@ import ( "net/http" "net/netip" "os" - "path/filepath" "runtime" "slices" "strconv" @@ -26,7 +25,6 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" - "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" @@ -39,14 +37,9 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/clientmetric" - "tailscale.com/util/httpm" "tailscale.com/wgengine/filter" ) -const ( - taildrivePrefix = "/v0/drive" -) - var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error // addH2C is non-nil on platforms where we want to add H2C @@ -369,10 +362,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.handleDNSQuery(w, r) return } - if strings.HasPrefix(r.URL.Path, taildrivePrefix) { - h.handleServeDrive(w, r) - return - } switch r.URL.Path { case "/v0/goroutines": h.handleServeGoroutines(w, r) @@ -1018,90 +1007,6 @@ func (rbw *requestBodyWrapper) Read(b []byte) (int, error) { return n, err } -func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request) { - h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) - if !h.ps.b.DriveSharingEnabled() { - h.logf("taildrive: not enabled") - http.Error(w, "taildrive not enabled", http.StatusNotFound) - return - } - - capsMap := h.PeerCaps() - driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] - if !ok { - h.logf("taildrive: not permitted") - http.Error(w, "taildrive not permitted", http.StatusForbidden) - return - } - - rawPerms := make([][]byte, 0, len(driveCaps)) - for _, cap := range driveCaps { - rawPerms = append(rawPerms, []byte(cap)) - } - - p, err := drive.ParsePermissions(rawPerms) - if err != nil { - h.logf("taildrive: error parsing permissions: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - fs, ok := h.ps.b.sys.DriveForRemote.GetOK() - if !ok { - h.logf("taildrive: not supported on platform") - http.Error(w, "taildrive not supported on platform", http.StatusNotFound) - return - } - wr := &httpResponseWrapper{ - ResponseWriter: w, - } - bw := &requestBodyWrapper{ - ReadCloser: r.Body, - } - r.Body = bw - - defer func() { - switch wr.statusCode { - case 304: - // 304s are particularly chatty so skip logging. - default: - log := h.logf - if r.Method != httpm.PUT && r.Method != httpm.GET { - log = h.logfv1 - } - contentType := "unknown" - if ct := wr.Header().Get("Content-Type"); ct != "" { - contentType = ct - } - - log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) - } - }() - - r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) - fs.ServeHTTPWithPerms(p, wr, r) -} - -// parseDriveFileExtensionForLog parses the file extension, if available. -// If a file extension is not present or parsable, the file extension is -// set to "unknown". If the file extension contains a double quote, it is -// replaced with "removed". -// All whitespace is removed from a parsed file extension. -// File extensions including the leading ., e.g. ".gif". -func parseDriveFileExtensionForLog(path string) string { - fileExt := "unknown" - if fe := filepath.Ext(path); fe != "" { - if strings.Contains(fe, "\"") { - // Do not log include file extensions with quotes within them. - return "removed" - } - // Remove white space from user defined inputs. - fileExt = strings.ReplaceAll(fe, " ", "") - } - - return fileExt -} - // peerAPIURL returns an HTTP URL for the peer's peerapi service, // without a trailing slash. // diff --git a/ipn/ipnlocal/peerapi_drive.go b/ipn/ipnlocal/peerapi_drive.go new file mode 100644 index 000000000..8dffacd9a --- /dev/null +++ b/ipn/ipnlocal/peerapi_drive.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package ipnlocal + +import ( + "net/http" + "path/filepath" + "strings" + + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/util/httpm" +) + +const ( + taildrivePrefix = "/v0/drive" +) + +func init() { + peerAPIHandlerPrefixes[taildrivePrefix] = handleServeDrive +} + +func handleServeDrive(hi PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := hi.(*peerAPIHandler) + + h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) + if !h.ps.b.DriveSharingEnabled() { + h.logf("taildrive: not enabled") + http.Error(w, "taildrive not enabled", http.StatusNotFound) + return + } + + capsMap := h.PeerCaps() + driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] + if !ok { + h.logf("taildrive: not permitted") + http.Error(w, "taildrive not permitted", http.StatusForbidden) + return + } + + rawPerms := make([][]byte, 0, len(driveCaps)) + for _, cap := range driveCaps { + rawPerms = append(rawPerms, []byte(cap)) + } + + p, err := drive.ParsePermissions(rawPerms) + if err != nil { + h.logf("taildrive: error parsing permissions: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + fs, ok := h.ps.b.sys.DriveForRemote.GetOK() + if !ok { + h.logf("taildrive: not supported on platform") + http.Error(w, "taildrive not supported on platform", http.StatusNotFound) + return + } + wr := &httpResponseWrapper{ + ResponseWriter: w, + } + bw := &requestBodyWrapper{ + ReadCloser: r.Body, + } + r.Body = bw + + defer func() { + switch wr.statusCode { + case 304: + // 304s are particularly chatty so skip logging. + default: + log := h.logf + if r.Method != httpm.PUT && r.Method != httpm.GET { + log = h.logfv1 + } + contentType := "unknown" + if ct := wr.Header().Get("Content-Type"); ct != "" { + contentType = ct + } + + log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) + } + }() + + r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) + fs.ServeHTTPWithPerms(p, wr, r) +} + +// parseDriveFileExtensionForLog parses the file extension, if available. +// If a file extension is not present or parsable, the file extension is +// set to "unknown". If the file extension contains a double quote, it is +// replaced with "removed". +// All whitespace is removed from a parsed file extension. +// File extensions including the leading ., e.g. ".gif". +func parseDriveFileExtensionForLog(path string) string { + fileExt := "unknown" + if fe := filepath.Ext(path); fe != "" { + if strings.Contains(fe, "\"") { + // Do not log include file extensions with quotes within them. + return "removed" + } + // Remove white space from user defined inputs. + fileExt = strings.ReplaceAll(fe, " ", "") + } + + return fileExt +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2dc75c0d9..2a245be27 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -18,8 +18,6 @@ import ( "net/http" "net/netip" "net/url" - "os" - "path" "reflect" "runtime" "slices" @@ -31,7 +29,6 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" - "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -104,8 +101,6 @@ var handler = map[string]LocalAPIHandler{ "disconnect-control": (*Handler).disconnectControl, "dns-osconfig": (*Handler).serveDNSOSConfig, "dns-query": (*Handler).serveDNSQuery, - "drive/fileserver-address": (*Handler).serveDriveServerAddr, - "drive/shares": (*Handler).serveShares, "goroutines": (*Handler).serveGoroutines, "handle-push-message": (*Handler).serveHandlePushMessage, "id-token": (*Handler).serveIDToken, @@ -2661,124 +2656,6 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { }) } -// serveDriveServerAddr handles updates of the Taildrive file server address. -func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.PUT { - http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) - return - } - - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - h.b.DriveSetServerAddr(string(b)) - w.WriteHeader(http.StatusCreated) -} - -// serveShares handles the management of Taildrive shares. -// -// PUT - adds or updates an existing share -// DELETE - removes a share -// GET - gets a list of all shares, sorted by name -// POST - renames an existing share -func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { - if !h.b.DriveSharingEnabled() { - http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) - return - } - switch r.Method { - case httpm.PUT: - var share drive.Share - err := json.NewDecoder(r.Body).Decode(&share) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - share.Path = path.Clean(share.Path) - fi, err := os.Stat(share.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !fi.IsDir() { - http.Error(w, "not a directory", http.StatusBadRequest) - return - } - if drive.AllowShareAs() { - // share as the connected user - username, err := h.Actor.Username() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - share.As = username - } - err = h.b.DriveSetShare(&share) - if err != nil { - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusCreated) - case httpm.DELETE: - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRemoveShare(string(b)) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.POST: - var names [2]string - err := json.NewDecoder(r.Body).Decode(&names) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRenameShare(names[0], names[1]) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - if os.IsExist(err) { - http.Error(w, "share name already used", http.StatusBadRequest) - return - } - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.GET: - shares := h.b.DriveGetShares() - err := json.NewEncoder(w).Encode(shares) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) - } -} - // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.GET { diff --git a/ipn/localapi/localapi_drive.go b/ipn/localapi/localapi_drive.go new file mode 100644 index 000000000..eb765ec2e --- /dev/null +++ b/ipn/localapi/localapi_drive.go @@ -0,0 +1,141 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package localapi + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "os" + "path" + + "tailscale.com/drive" + "tailscale.com/util/httpm" +) + +func init() { + Register("drive/fileserver-address", (*Handler).serveDriveServerAddr) + Register("drive/shares", (*Handler).serveShares) +} + +// serveDriveServerAddr handles updates of the Taildrive file server address. +func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.PUT { + http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) + return + } + + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + h.b.DriveSetServerAddr(string(b)) + w.WriteHeader(http.StatusCreated) +} + +// serveShares handles the management of Taildrive shares. +// +// PUT - adds or updates an existing share +// DELETE - removes a share +// GET - gets a list of all shares, sorted by name +// POST - renames an existing share +func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { + if !h.b.DriveSharingEnabled() { + http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) + return + } + switch r.Method { + case httpm.PUT: + var share drive.Share + err := json.NewDecoder(r.Body).Decode(&share) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + share.Path = path.Clean(share.Path) + fi, err := os.Stat(share.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !fi.IsDir() { + http.Error(w, "not a directory", http.StatusBadRequest) + return + } + if drive.AllowShareAs() { + // share as the connected user + username, err := h.Actor.Username() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + share.As = username + } + err = h.b.DriveSetShare(&share) + if err != nil { + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusCreated) + case httpm.DELETE: + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRemoveShare(string(b)) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.POST: + var names [2]string + err := json.NewDecoder(r.Body).Decode(&names) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRenameShare(names[0], names[1]) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + if os.IsExist(err) { + http.Error(w, "share name already used", http.StatusBadRequest) + return + } + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.GET: + shares := h.b.DriveGetShares() + err := json.NewEncoder(w).Encode(shares) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + } +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index a87a3ec65..b025e3a43 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index a87a3ec65..b025e3a43 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index a87a3ec65..b025e3a43 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index a87a3ec65..b025e3a43 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 54e1bcc04..32f95357d 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -25,6 +25,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" From cfb2ca724b5faf8007576014a8350893868f7629 Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 12 Sep 2025 12:04:39 +0100 Subject: [PATCH 1300/1708] tsnet: expose logtail's Logf method (#17057) This commit adds a new method to the tsnet.Server type named `Logger` that returns the underlying logtail instance's Logf method. This is intended to be used within the Kubernetes operator to wrap its existing logger in a way such that operator specific logs can also be sent to control for support & debugging purposes. Updates https://github.com/tailscale/corp/issues/32037 Signed-off-by: David Bond --- tsnet/tsnet.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 359fbc1c5..d25da0996 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -492,6 +492,16 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return ip4, ip6 } +// Logtailf returns a [logger.Logf] that outputs to Tailscale's logging service and will be only visible to Tailscale's +// support team. Logs written there cannot be retrieved by the user. This method always returns a non-nil value. +func (s *Server) Logtailf() logger.Logf { + if s.logtail == nil { + return logger.Discard + } + + return s.logtail.Logf +} + func (s *Server) getAuthKey() string { if v := s.AuthKey; v != "" { return v From 0e3d942e39030e886e19bd0083969a192b340026 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 12 Sep 2025 11:22:36 -0700 Subject: [PATCH 1301/1708] feature/featuretags: move list of omit-able features to a Go package Updates #12614 Change-Id: I4012c33095c6a7ccf80ad36dbab5cedbae5b3d47 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 4 +- cmd/featuretags/featuretags.go | 73 ++++++++++++++++++++++++++++++ cmd/omitsize/omitsize.go | 11 ++--- feature/featuretags/featuretags.go | 27 +++++++++++ 4 files changed, 107 insertions(+), 8 deletions(-) create mode 100644 cmd/featuretags/featuretags.go create mode 100644 feature/featuretags/featuretags.go diff --git a/build_dist.sh b/build_dist.sh index 9dc879b1e..45d471be0 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -18,7 +18,7 @@ fi eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion` -if [ "$1" = "shellvars" ]; then +if [ "$#" -ge 1 ] && [ "$1" = "shellvars" ]; then cat < Date: Fri, 12 Sep 2025 09:51:00 -0700 Subject: [PATCH 1302/1708] all: add ts_omit_tailnetlock as a start of making it build-time modular Updates #17115 Change-Id: I6b083c0db4c4d359e49eb129d626b7f128f0a9d2 Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 187 ------------ client/local/tailnetlock.go | 204 +++++++++++++ cmd/tailscale/cli/cli.go | 14 +- cmd/tailscale/cli/network-lock.go | 6 + cmd/tailscaled/deps_test.go | 13 + feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/local.go | 48 ---- ipn/ipnlocal/network-lock.go | 49 ++++ ipn/ipnlocal/network-lock_test.go | 2 + ipn/ipnlocal/tailnetlock_disabled.go | 31 ++ ipn/localapi/localapi.go | 394 ------------------------- ipn/localapi/tailnetlock.go | 413 +++++++++++++++++++++++++++ tka/aum.go | 2 + tka/builder.go | 2 + tka/deeplink.go | 2 + tka/disabled_stub.go | 149 ++++++++++ tka/sig.go | 2 + tka/state.go | 2 + tka/state_test.go | 2 + tka/sync.go | 2 + tka/tailchonk.go | 2 + tka/tka.go | 2 + types/netlogtype/netlogtype_test.go | 2 + 23 files changed, 897 insertions(+), 634 deletions(-) create mode 100644 client/local/tailnetlock.go create mode 100644 ipn/ipnlocal/tailnetlock_disabled.go create mode 100644 ipn/localapi/tailnetlock.go create mode 100644 tka/disabled_stub.go diff --git a/client/local/local.go b/client/local/local.go index 0257c7a26..03ca10bb7 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -38,10 +38,8 @@ import ( "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/types/dnstype" "tailscale.com/types/key" - "tailscale.com/types/tkatype" "tailscale.com/util/eventbus" ) @@ -1219,183 +1217,6 @@ func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.Ping return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } -// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. -func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockInit initializes the tailnet key authority. -// -// TODO(tom): Plumb through disablement secrets. -func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { - var b bytes.Buffer - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - - if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { - return nil, err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockWrapPreauthKey wraps a pre-auth key with information to -// enable unattended bringup in the locked tailnet. -func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { - encodedPrivate, err := tkaKey.MarshalText() - if err != nil { - return "", err - } - - var b bytes.Buffer - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { - return "", err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) - if err != nil { - return "", fmt.Errorf("error: %w", err) - } - return string(body), nil -} - -// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. -func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { - var b bytes.Buffer - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - - if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. -// rotationPublic, if specified, must be an ed25519 public key. -func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { - var b bytes.Buffer - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - - if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. -func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[[]tkatype.MarshaledSignature](body) -} - -// NetworkLockLog returns up to maxEntries number of changes to network-lock state. -func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { - v := url.Values{} - v.Set("limit", fmt.Sprint(maxEntries)) - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) - if err != nil { - return nil, fmt.Errorf("error %w: %s", err, body) - } - return decodeJSON[[]ipnstate.NetworkLockUpdate](body) -} - -// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. -func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { - // This endpoint expects an empty JSON stanza as the payload. - var b bytes.Buffer - if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained -// in url and returns information extracted from it. -func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { - vr := struct { - URL string - }{url} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending verify-deeplink: %w", err) - } - - return decodeJSON[*tka.DeeplinkValidationResult](body) -} - -// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. -func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { - vr := struct { - Keys []tkatype.KeyID - ForkFrom string - }{removeKeys, forkFrom.String()} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. -func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { - r := bytes.NewReader(aum.Serialize()) - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) - if err != nil { - return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. -func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { - r := bytes.NewReader(aum.Serialize()) - _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) - if err != nil { - return fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - return nil -} - // SetServeConfig sets or replaces the serving settings. // If config is nil, settings are cleared and serving is disabled. func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { @@ -1421,14 +1242,6 @@ func (lc *Client) DisconnectControl(ctx context.Context) error { return nil } -// NetworkLockDisable shuts down network-lock across the tailnet. -func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - // GetServeConfig return the current serve config. // // If the serve config is empty, it returns (nil, nil). diff --git a/client/local/tailnetlock.go b/client/local/tailnetlock.go new file mode 100644 index 000000000..9d37d2f35 --- /dev/null +++ b/client/local/tailnetlock.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package local + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. +func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockInit initializes the tailnet key authority. +// +// TODO(tom): Plumb through disablement secrets. +func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { + var b bytes.Buffer + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + + if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { + return nil, err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockWrapPreauthKey wraps a pre-auth key with information to +// enable unattended bringup in the locked tailnet. +func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { + encodedPrivate, err := tkaKey.MarshalText() + if err != nil { + return "", err + } + + var b bytes.Buffer + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { + return "", err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) + if err != nil { + return "", fmt.Errorf("error: %w", err) + } + return string(body), nil +} + +// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. +func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { + var b bytes.Buffer + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + + if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. +// rotationPublic, if specified, must be an ed25519 public key. +func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { + var b bytes.Buffer + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + + if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. +func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[[]tkatype.MarshaledSignature](body) +} + +// NetworkLockLog returns up to maxEntries number of changes to network-lock state. +func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { + v := url.Values{} + v.Set("limit", fmt.Sprint(maxEntries)) + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[[]ipnstate.NetworkLockUpdate](body) +} + +// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. +func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { + // This endpoint expects an empty JSON stanza as the payload. + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained +// in url and returns information extracted from it. +func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { + vr := struct { + URL string + }{url} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending verify-deeplink: %w", err) + } + + return decodeJSON[*tka.DeeplinkValidationResult](body) +} + +// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. +func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { + vr := struct { + Keys []tkatype.KeyID + ForkFrom string + }{removeKeys, forkFrom.String()} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. +func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { + r := bytes.NewReader(aum.Serialize()) + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) + if err != nil { + return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. +func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { + r := bytes.NewReader(aum.Serialize()) + _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) + if err != nil { + return fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + return nil +} + +// NetworkLockDisable shuts down network-lock across the tailnet. +func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 39fdce60d..ef0dc9820 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -207,10 +207,14 @@ func noDupFlagify(c *ffcli.Command) { } } -var fileCmd func() *ffcli.Command -var sysPolicyCmd func() *ffcli.Command -var maybeWebCmd func() *ffcli.Command -var maybeDriveCmd func() *ffcli.Command +var ( + fileCmd, + sysPolicyCmd, + maybeWebCmd, + maybeDriveCmd, + maybeNetlockCmd, + _ func() *ffcli.Command +) func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -257,7 +261,7 @@ change in the future. nilOrCall(fileCmd), bugReportCmd, certCmd, - netlockCmd, + nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), updateCmd, diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index d19909576..ec3b01ad6 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package cli import ( @@ -27,6 +29,10 @@ import ( "tailscale.com/util/prompt" ) +func init() { + maybeNetlockCmd = func() *ffcli.Command { return netlockCmd } +} + var netlockCmd = &ffcli.Command{ Name: "lock", ShortUsage: "tailscale lock [arguments...]", diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 5c71a62fd..a334eb9b7 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -77,3 +77,16 @@ func TestOmitDrive(t *testing.T) { }, }.Check(t) } + +func TestOmitTailnetLock(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_tailnetlock,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "cbor") { + t.Errorf("unexpected dep with ts_omit_tailnetlock: %q", dep) + } + }, + }.Check(t) +} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6f87dab76..00ad0b4c2 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -19,6 +19,7 @@ var Features = map[string]string{ "syspolicy": "System policy configuration (MDM) support", "systray": "Linux system tray", "taildrop": "Taildrop (file sending) support", + "tailnetlock": "Tailnet Lock support", "tap": "Experimental Layer 2 (ethernet) support", "tka": "Tailnet Lock (TKA) support", "tpm": "TPM support", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8a6d0e013..6108aa830 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -82,7 +82,6 @@ import ( "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tsd" "tailscale.com/tstime" "tailscale.com/types/appctype" @@ -7179,53 +7178,6 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { return b.resetForProfileChangeLockedOnEntry(unlock) } -func (b *LocalBackend) initTKALocked() error { - cp := b.pm.CurrentProfile() - if cp.ID() == "" { - b.tka = nil - return nil - } - if b.tka != nil { - if b.tka.profile == cp.ID() { - // Already initialized. - return nil - } - // As we're switching profiles, we need to reset the TKA to nil. - b.tka = nil - } - root := b.TailscaleVarRoot() - if root == "" { - b.tka = nil - b.logf("network-lock unavailable; no state directory") - return nil - } - - chonkDir := b.chonkPathLocked() - if _, err := os.Stat(chonkDir); err == nil { - // The directory exists, which means network-lock has been initialized. - storage, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("opening tailchonk: %v", err) - } - authority, err := tka.Open(storage) - if err != nil { - return fmt.Errorf("initializing tka: %v", err) - } - if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { - b.logf("tka compaction failed: %v", err) - } - - b.tka = &tkaState{ - profile: cp.ID(), - authority: authority, - storage: storage, - } - b.logf("tka initialized at head %x", authority.Head()) - } - - return nil -} - // resetDialPlan resets the dialPlan for this LocalBackend. It will log if // anything is reset. // diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 10f0cc827..499082445 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( @@ -56,6 +58,53 @@ type tkaState struct { filtered []ipnstate.TKAPeer } +func (b *LocalBackend) initTKALocked() error { + cp := b.pm.CurrentProfile() + if cp.ID() == "" { + b.tka = nil + return nil + } + if b.tka != nil { + if b.tka.profile == cp.ID() { + // Already initialized. + return nil + } + // As we're switching profiles, we need to reset the TKA to nil. + b.tka = nil + } + root := b.TailscaleVarRoot() + if root == "" { + b.tka = nil + b.logf("network-lock unavailable; no state directory") + return nil + } + + chonkDir := b.chonkPathLocked() + if _, err := os.Stat(chonkDir); err == nil { + // The directory exists, which means network-lock has been initialized. + storage, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("opening tailchonk: %v", err) + } + authority, err := tka.Open(storage) + if err != nil { + return fmt.Errorf("initializing tka: %v", err) + } + if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { + b.logf("tka compaction failed: %v", err) + } + + b.tka = &tkaState{ + profile: cp.ID(), + authority: authority, + storage: storage, + } + b.logf("tka initialized at head %x", authority.Head()) + } + + return nil +} + // tkaFilterNetmapLocked checks the signatures on each node key, dropping // nodes from the netmap whose signature does not verify. // diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 443539aec..842b75c43 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( diff --git a/ipn/ipnlocal/tailnetlock_disabled.go b/ipn/ipnlocal/tailnetlock_disabled.go new file mode 100644 index 000000000..85cf4bd3f --- /dev/null +++ b/ipn/ipnlocal/tailnetlock_disabled.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/netmap" +) + +type tkaState struct { + authority *tka.Authority +} + +func (b *LocalBackend) initTKALocked() error { + return nil +} + +func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsView) error { + return nil +} + +func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {} + +func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { + return &ipnstate.NetworkLockStatus{Enabled: false} +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2a245be27..ac5b0ee7d 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -41,14 +41,12 @@ import ( "tailscale.com/net/netutil" "tailscale.com/net/portmapper" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/ptr" - "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/httpm" @@ -124,19 +122,6 @@ var handler = map[string]LocalAPIHandler{ "start": (*Handler).serveStart, "status": (*Handler).serveStatus, "suggest-exit-node": (*Handler).serveSuggestExitNode, - "tka/affected-sigs": (*Handler).serveTKAAffectedSigs, - "tka/cosign-recovery-aum": (*Handler).serveTKACosignRecoveryAUM, - "tka/disable": (*Handler).serveTKADisable, - "tka/force-local-disable": (*Handler).serveTKALocalDisable, - "tka/generate-recovery-aum": (*Handler).serveTKAGenerateRecoveryAUM, - "tka/init": (*Handler).serveTKAInit, - "tka/log": (*Handler).serveTKALog, - "tka/modify": (*Handler).serveTKAModify, - "tka/sign": (*Handler).serveTKASign, - "tka/status": (*Handler).serveTKAStatus, - "tka/submit-recovery-aum": (*Handler).serveTKASubmitRecoveryAUM, - "tka/verify-deeplink": (*Handler).serveTKAVerifySigningDeeplink, - "tka/wrap-preauth-key": (*Handler).serveTKAWrapPreauthKey, "update/check": (*Handler).serveUpdateCheck, "update/install": (*Handler).serveUpdateInstall, "update/progress": (*Handler).serveUpdateProgress, @@ -1892,25 +1877,6 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques json.NewEncoder(w).Encode(struct{}{}) } -func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "lock status access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) @@ -1958,366 +1924,6 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ e.Encode(prefs) } -func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock sign access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - var req signRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { - http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock init access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - var req initRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if !h.b.NetworkLockAllowed() { - http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) - return - } - - if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { - http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - var req modifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { - http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(204) -} - -func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - var req wrapRequest - if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - var priv key.NLPrivate - if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(wrappedKey)) -} - -func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - URL string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - res := h.b.NetworkLockVerifySigningDeeplink(req.URL) - j, err := json.MarshalIndent(res, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - secret, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading secret", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockDisable(secret); err != nil { - http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - // Require a JSON stanza for the body as an additional CSRF protection. - var req struct{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockForceLocalDisable(); err != nil { - http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - limit := 50 - if limitStr := r.FormValue("limit"); limitStr != "" { - l, err := strconv.Atoi(limitStr) - if err != nil { - http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) - return - } - limit = int(l) - } - - updates, err := h.b.NetworkLockLog(limit) - if err != nil { - http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(updates, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) - if err != nil { - http.Error(w, "reading body", http.StatusBadRequest) - return - } - - sigs, err := h.b.NetworkLockAffectedSigs(keyID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(sigs, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - Keys []tkatype.KeyID - ForkFrom string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - var forkFrom tka.AUMHash - if req.ForkFrom != "" { - if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { - http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) - return - } - } - - res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) -} - // serveProfiles serves profile switching-related endpoints. Supported methods // and paths are: // - GET /profiles/: list all profiles (JSON-encoded array of ipn.LoginProfiles) diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go new file mode 100644 index 000000000..797150938 --- /dev/null +++ b/ipn/localapi/tailnetlock.go @@ -0,0 +1,413 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package localapi + +import ( + "encoding/json" + "io" + "net/http" + "strconv" + + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" + "tailscale.com/util/httpm" +) + +func init() { + handler["tka/affected-sigs"] = (*Handler).serveTKAAffectedSigs + handler["tka/cosign-recovery-aum"] = (*Handler).serveTKACosignRecoveryAUM + handler["tka/disable"] = (*Handler).serveTKADisable + handler["tka/force-local-disable"] = (*Handler).serveTKALocalDisable + handler["tka/generate-recovery-aum"] = (*Handler).serveTKAGenerateRecoveryAUM + handler["tka/init"] = (*Handler).serveTKAInit + handler["tka/log"] = (*Handler).serveTKALog + handler["tka/modify"] = (*Handler).serveTKAModify + handler["tka/sign"] = (*Handler).serveTKASign + handler["tka/status"] = (*Handler).serveTKAStatus + handler["tka/submit-recovery-aum"] = (*Handler).serveTKASubmitRecoveryAUM + handler["tka/verify-deeplink"] = (*Handler).serveTKAVerifySigningDeeplink + handler["tka/wrap-preauth-key"] = (*Handler).serveTKAWrapPreauthKey +} + +func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "lock status access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock sign access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + var req signRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { + http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock init access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + var req initRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if !h.b.NetworkLockAllowed() { + http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) + return + } + + if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { + http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + var req modifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { + http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(204) +} + +func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + var req wrapRequest + if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + var priv key.NLPrivate + if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(wrappedKey)) +} + +func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + URL string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + res := h.b.NetworkLockVerifySigningDeeplink(req.URL) + j, err := json.MarshalIndent(res, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + secret, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading secret", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockDisable(secret); err != nil { + http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + // Require a JSON stanza for the body as an additional CSRF protection. + var req struct{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockForceLocalDisable(); err != nil { + http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + limit := 50 + if limitStr := r.FormValue("limit"); limitStr != "" { + l, err := strconv.Atoi(limitStr) + if err != nil { + http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) + return + } + limit = int(l) + } + + updates, err := h.b.NetworkLockLog(limit) + if err != nil { + http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(updates, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) + if err != nil { + http.Error(w, "reading body", http.StatusBadRequest) + return + } + + sigs, err := h.b.NetworkLockAffectedSigs(keyID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(sigs, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + Keys []tkatype.KeyID + ForkFrom string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + var forkFrom tka.AUMHash + if req.ForkFrom != "" { + if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { + http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) + return + } + } + + res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) +} diff --git a/tka/aum.go b/tka/aum.go index 07a34b4f6..08d70897e 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/builder.go b/tka/builder.go index ec38bb6fa..642f39d77 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/deeplink.go b/tka/deeplink.go index 5cf24fc5c..5570a19d7 100644 --- a/tka/deeplink.go +++ b/tka/deeplink.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go new file mode 100644 index 000000000..15bf12c33 --- /dev/null +++ b/tka/disabled_stub.go @@ -0,0 +1,149 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/tkatype" +) + +type Authority struct { + head AUM + oldestAncestor AUM + state State +} + +func (*Authority) Head() AUMHash { return AUMHash{} } + +func (AUMHash) MarshalText() ([]byte, error) { return nil, errNoTailnetLock } + +type State struct{} + +// AUMKind describes valid AUM types. +type AUMKind uint8 + +type AUMHash [32]byte + +type AUM struct { + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash []byte `cbor:"2,keyasint"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key *Key `cbor:"3,keyasint,omitempty"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID tkatype.KeyID `cbor:"4,keyasint,omitempty"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State *State `cbor:"5,keyasint,omitempty"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes *uint `cbor:"6,keyasint,omitempty"` + Meta map[string]string `cbor:"7,keyasint,omitempty"` + + // Signatures lists the signatures over this AUM. + // CBOR key 23 is the last key which can be encoded as a single byte. + Signatures []tkatype.Signature `cbor:"23,keyasint,omitempty"` +} + +type Chonk interface { + // AUM returns the AUM with the specified digest. + // + // If the AUM does not exist, then os.ErrNotExist is returned. + AUM(hash AUMHash) (AUM, error) + + // ChildAUMs returns all AUMs with a specified previous + // AUM hash. + ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) + + // CommitVerifiedAUMs durably stores the provided AUMs. + // Callers MUST ONLY provide AUMs which are verified (specifically, + // a call to aumVerify() must return a nil error). + // as the implementation assumes that only verified AUMs are stored. + CommitVerifiedAUMs(updates []AUM) error + + // Heads returns AUMs for which there are no children. In other + // words, the latest AUM in all possible chains (the 'leaves'). + Heads() ([]AUM, error) + + // SetLastActiveAncestor is called to record the oldest-known AUM + // that contributed to the current state. This value is used as + // a hint on next startup to determine which chain to pick when computing + // the current state, if there are multiple distinct chains. + SetLastActiveAncestor(hash AUMHash) error + + // LastActiveAncestor returns the oldest-known AUM that was (in a + // previous run) an ancestor of the current state. This is used + // as a hint to pick the correct chain in the event that the Chonk stores + // multiple distinct chains. + LastActiveAncestor() (*AUMHash, error) +} + +// SigKind describes valid NodeKeySignature types. +type SigKind uint8 + +type NodeKeySignature struct { + // SigKind identifies the variety of signature. + SigKind SigKind `cbor:"1,keyasint"` + // Pubkey identifies the key.NodePublic which is being authorized. + // SigCredential signatures do not use this field. + Pubkey []byte `cbor:"2,keyasint,omitempty"` + + // KeyID identifies which key in the tailnet key authority should + // be used to verify this signature. Only set for SigDirect and + // SigCredential signature kinds. + KeyID []byte `cbor:"3,keyasint,omitempty"` + + // Signature is the packed (R, S) ed25519 signature over all other + // fields of the structure. + Signature []byte `cbor:"4,keyasint,omitempty"` + + // Nested describes a NodeKeySignature which authorizes the node-key + // used as Pubkey. Only used for SigRotation signatures. + Nested *NodeKeySignature `cbor:"5,keyasint,omitempty"` + + // WrappingPubkey specifies the ed25519 public key which must be used + // to sign a Signature which embeds this one. + // + // For SigRotation signatures multiple levels deep, intermediate + // signatures may omit this value, in which case the parent WrappingPubkey + // is used. + // + // SigCredential signatures use this field to specify the public key + // they are certifying, following the usual semanticsfor WrappingPubkey. + WrappingPubkey []byte `cbor:"6,keyasint,omitempty"` +} + +type DeeplinkValidationResult struct { +} + +func (h *AUMHash) UnmarshalText(text []byte) error { + return errNoTailnetLock +} + +var errNoTailnetLock = errors.New("tailnet lock is not enabled") + +func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) { + return wrappedAuthKey, false, nil, nil +} + +func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func SignByCredential(privKey []byte, wrapped *NodeKeySignature, nodeKey key.NodePublic) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func (s NodeKeySignature) String() string { return "" } diff --git a/tka/sig.go b/tka/sig.go index c82f9715c..7b1838d40 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/state.go b/tka/state.go index 0a459bd9a..0a30c56a0 100644 --- a/tka/state.go +++ b/tka/state.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/state_test.go b/tka/state_test.go index 060bd9350..32b656314 100644 --- a/tka/state_test.go +++ b/tka/state_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/sync.go b/tka/sync.go index 6131f54d0..6c2b7cbb8 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 32d2215de..6c441669a 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/tka.go b/tka/tka.go index ade621bc6..3929ff22a 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + // Package tka (WIP) implements the Tailnet Key Authority. package tka diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 7f29090c5..403cb9508 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package netlogtype import ( From 0cc1b2ff76560ee4675909272fa37ba6b397744c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 12 Sep 2025 08:10:48 -0700 Subject: [PATCH 1303/1708] cmd/derper: add start of ACE support Updates tailscale/corp#32168 Updates tailscale/corp#32226 Change-Id: Ia46abcaa09dcfd53bf8d4699909537bacf84d57a Signed-off-by: Brad Fitzpatrick --- cmd/derper/ace.go | 50 +++++++++++++++++++++++++++++++++++++++++ cmd/derper/depaware.txt | 1 + cmd/derper/derper.go | 8 +++++++ 3 files changed, 59 insertions(+) create mode 100644 cmd/derper/ace.go diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go new file mode 100644 index 000000000..301b029cc --- /dev/null +++ b/cmd/derper/ace.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// TODO: docs about all this + +package main + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" + + "tailscale.com/derp" + "tailscale.com/net/connectproxy" +) + +// serveConnect handles a CONNECT request for ACE support. +func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) { + if !*flagACEEnabled { + http.Error(w, "CONNECT not enabled", http.StatusForbidden) + return + } + if r.TLS == nil { + // This should already be enforced by the caller of serveConnect, but + // double check. + http.Error(w, "CONNECT requires TLS", http.StatusForbidden) + return + } + + ch := &connectproxy.Handler{ + Check: func(hostPort string) error { + host, port, err := net.SplitHostPort(hostPort) + if err != nil { + return err + } + if port != "443" { + return fmt.Errorf("only port 443 is allowed") + } + // TODO(bradfitz): make policy configurable from flags and/or come + // from local tailscaled nodeAttrs + if !strings.HasSuffix(host, ".tailscale.com") || strings.Contains(host, "derp") { + return errors.New("bad host") + } + return nil + }, + } + ch.ServeHTTP(w, r) +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 8adb2d338..61e42ede1 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -105,6 +105,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + tailscale.com/net/connectproxy from tailscale.com/cmd/derper tailscale.com/net/dnscache from tailscale.com/derp/derphttp tailscale.com/net/ktimeout from tailscale.com/cmd/derper tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 7ea404beb..b25bf22de 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -91,6 +91,9 @@ var ( tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + + // ACE + flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]") ) var ( @@ -373,6 +376,11 @@ func main() { tlsRequestVersion.Add(label, 1) tlsActiveVersion.Add(label, 1) defer tlsActiveVersion.Add(label, -1) + + if r.Method == "CONNECT" { + serveConnect(s, w, r) + return + } } mux.ServeHTTP(w, r) From 7d2101f3520f16b86f2ed5e15f23c44d720534e6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 12 Sep 2025 17:09:23 -0700 Subject: [PATCH 1304/1708] cmd/omitsize: add flag to disable the removal table And remove a bogus omit feature from feature/featuretags. Updates #12614 Change-Id: I0a08183fb75c73ae75b6fd4216d134e352dcf5a0 Signed-off-by: Brad Fitzpatrick --- cmd/featuretags/featuretags.go | 22 ++++++++--------- cmd/omitsize/omitsize.go | 39 ++++++++++++++++++------------ feature/featuretags/featuretags.go | 33 +++++++++++++++++++++++-- 3 files changed, 65 insertions(+), 29 deletions(-) diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index c97d66c47..5213fda4c 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -35,16 +35,14 @@ func main() { return } - var keep = map[string]bool{} + var keep = map[featuretags.FeatureTag]bool{} for t := range strings.SplitSeq(*add, ",") { if t != "" { - keep[t] = true + keep[featuretags.FeatureTag(t)] = true } } var tags []string - if keep["cli"] { - // The "cli" --add value is special in that it's a build tag - // that adds something, rather than removes something. + if keep[featuretags.CLI] { tags = append(tags, "ts_include_cli") } if *min { @@ -52,22 +50,24 @@ func main() { if f == "" { continue } - if !keep[f] { - tags = append(tags, "ts_omit_"+f) + if !keep[f] && f.IsOmittable() { + tags = append(tags, f.OmitTag()) } } } - for f := range strings.SplitSeq(*remove, ",") { - if f == "" { + for v := range strings.SplitSeq(*remove, ",") { + if v == "" { continue } + f := featuretags.FeatureTag(v) if _, ok := features[f]; !ok { log.Fatalf("unknown feature %q in --remove", f) } - tags = append(tags, "ts_omit_"+f) + tags = append(tags, f.OmitTag()) } + slices.Sort(tags) + tags = slices.Compact(tags) if len(tags) != 0 { fmt.Println(strings.Join(tags, ",")) } - } diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index bce072973..841f3ab9e 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -23,6 +23,8 @@ import ( var ( cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") features = flag.String("features", "", "comma-separated list of features to consider, with or without the ts_omit_ prefix") + + showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set") ) func main() { @@ -31,7 +33,9 @@ func main() { var all []string if *features == "" { for k := range featuretags.Features { - all = append(all, "ts_omit_"+k) + if k.IsOmittable() { + all = append(all, k.OmitTag()) + } } } else { for v := range strings.SplitSeq(*features, ",") { @@ -49,27 +53,30 @@ func main() { baseC := measure("tailscale") baseBoth := measure("tailscaled", "ts_include_cli") - fmt.Printf("(a) starting with everything and removing a feature...\n\n") - - fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") - fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) - minD := measure("tailscaled", all...) minC := measure("tailscale", all...) minBoth := measure("tailscaled", append(slices.Clone(all), "ts_include_cli")...) - fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) - for _, t := range all { - sizeD := measure("tailscaled", t) - sizeC := measure("tailscale", t) - sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) - saveD := max(baseD-sizeD, 0) - saveC := max(baseC-sizeC, 0) - saveBoth := max(baseBoth-sizeBoth, 0) - fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + if *showRemovals { + fmt.Printf("Starting with everything and removing a feature...\n\n") + + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) + + fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) + + for _, t := range all { + sizeD := measure("tailscaled", t) + sizeC := measure("tailscale", t) + sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) + saveD := max(baseD-sizeD, 0) + saveC := max(baseC-sizeC, 0) + saveBoth := max(baseBoth-sizeBoth, 0) + fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + } } - fmt.Printf("\n(b) or, starting at minimal and adding one feature back...\n") + fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n") fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) for _, t := range all { diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 00ad0b4c2..87bc22fc6 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -4,7 +4,37 @@ // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags -var Features = map[string]string{ +// CLI is a special feature in the [Features] map that works opposite +// from the others: it is opt-in, rather than opt-out, having a different +// build tag format. +const CLI FeatureTag = "cli" + +// FeatureTag names a Tailscale feature that can be selectively added or removed +// via build tags. +type FeatureTag string + +// IsOmittable reports whether this feature tag is one that can be +// omitted via a ts_omit_ build tag. +func (ft FeatureTag) IsOmittable() bool { + switch ft { + case CLI: + return false + } + return true +} + +// OmitTag returns the ts_omit_ build tag for this feature tag. +// It panics if the feature tag is not omitable. +func (ft FeatureTag) OmitTag() string { + if !ft.IsOmittable() { + panic("not omitable: " + string(ft)) + } + return "ts_omit_" + string(ft) +} + +// Features are the known Tailscale features that can be selectively included or +// excluded via build tags, and a description of each. +var Features = map[FeatureTag]string{ "aws": "AWS integration", "bird": "Bird BGP integration", "capture": "Packet capture", @@ -21,7 +51,6 @@ var Features = map[string]string{ "taildrop": "Taildrop (file sending) support", "tailnetlock": "Tailnet Lock support", "tap": "Experimental Layer 2 (ethernet) support", - "tka": "Tailnet Lock (TKA) support", "tpm": "TPM support", "wakeonlan": "Wake-on-LAN support", "webclient": "Web client support", From 782c16c5138fb0f83ea80ed1793e3be93791d280 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 15 Sep 2025 12:37:28 +0100 Subject: [PATCH 1305/1708] k8s-operator: reset service status before append (#17120) This commit fixes an issue within the service reconciler where we end up in a constant reconciliation loop. When reconciling, the loadbalancer status is appended to but not reset between each reconciliation, leading to an ever growing slice of duplicate statuses. Fixes https://github.com/tailscale/tailscale/issues/17105 Fixes https://github.com/tailscale/tailscale/issues/17107 Signed-off-by: David Bond --- cmd/k8s-operator/operator_test.go | 4 ++++ cmd/k8s-operator/svc.go | 10 ++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 50f8738ce..5af237342 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -173,6 +173,10 @@ func TestLoadBalancerClass(t *testing.T) { }, }, } + + // Perform an additional reconciliation loop here to ensure resources don't change through side effects. Mainly + // to prevent infinite reconciliation + expectReconciled(t, sr, "default", "test") expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, which should make the diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 51ad1aea3..eec1924e7 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -348,9 +348,10 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga dev := devices[0] logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", ")) - svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{ - Hostname: dev.hostname, - }) + + ingress := []corev1.LoadBalancerIngress{ + {Hostname: dev.hostname}, + } clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP) if err != nil { @@ -365,10 +366,11 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga continue } if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family - svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{IP: ip}) + ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip}) } } + svc.Status.LoadBalancer.Ingress = ingress tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionTrue, reasonProxyCreated, reasonProxyCreated, a.clock, logger) return nil } From b816fd71176132ee6e2912f8bdb2e55e9eb181ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 15 Sep 2025 10:36:17 -0400 Subject: [PATCH 1306/1708] control/controlclient: introduce eventbus messages instead of callbacks (#16956) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a small introduction of the eventbus into controlclient that communicates with mainly ipnlocal. While ipnlocal is a complicated part of the codebase, the subscribers here are from the perspective of ipnlocal already called async. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/auto.go | 6 +- control/controlclient/client.go | 3 + control/controlclient/controlclient_test.go | 7 + control/controlclient/direct.go | 174 +++++++++++--------- control/controlclient/direct_test.go | 5 + ipn/ipnlocal/expiry.go | 43 ++++- ipn/ipnlocal/expiry_test.go | 10 +- ipn/ipnlocal/local.go | 94 +++++++---- ipn/ipnlocal/local_test.go | 13 +- ipn/ipnlocal/network-lock_test.go | 3 + ipn/ipnlocal/serve_test.go | 29 ++-- ipn/ipnlocal/state_test.go | 31 ++-- ipn/localapi/localapi_test.go | 4 +- tsd/tsd.go | 12 +- util/eventbus/eventbustest/eventbustest.go | 2 +- 15 files changed, 293 insertions(+), 143 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index e6335e54d..7bca6c8d8 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -205,7 +205,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { } }) return c, nil - } // SetPaused controls whether HTTP activity should be paused. @@ -424,6 +423,11 @@ func (c *Auto) unpausedChanLocked() <-chan bool { return unpaused } +// ClientID returns the ClientID of the direct controlClient +func (c *Auto) ClientID() int64 { + return c.direct.ClientID() +} + // mapRoutineState is the state of Auto.mapRoutine while it's running. type mapRoutineState struct { c *Auto diff --git a/control/controlclient/client.go b/control/controlclient/client.go index 8df64f9e8..d0aa129ae 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -81,6 +81,9 @@ type Client interface { // in a separate http request. It has nothing to do with the rest of // the state machine. UpdateEndpoints(endpoints []tailcfg.Endpoint) + // ClientID returns the ClientID of a client. This ID is meant to + // distinguish one client from another. + ClientID() int64 } // UserVisibleError is an error that should be shown to users. diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 792c26955..2efc27b5e 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" ) func fieldsOf(t reflect.Type) (fields []string) { @@ -218,6 +219,8 @@ func TestDirectProxyManual(t *testing.T) { t.Skip("skipping without --live-network-test") } + bus := eventbustest.NewBus(t) + dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) @@ -239,6 +242,7 @@ func TestDirectProxyManual(t *testing.T) { }, Dialer: dialer, ControlKnobs: &controlknobs.Knobs{}, + Bus: bus, } d, err := NewDirect(opts) if err != nil { @@ -263,6 +267,8 @@ func TestHTTPSWithProxy(t *testing.T) { testHTTPS(t, true) } func testHTTPS(t *testing.T, withProxy bool) { bakedroots.ResetForTest(t, tlstest.TestRootCA()) + bus := eventbustest.NewBus(t) + controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlane.ServerTLSConfig()) if err != nil { t.Fatal(err) @@ -327,6 +333,7 @@ func testHTTPS(t *testing.T, withProxy bool) { t.Logf("PopBrowserURL: %q", url) }, Dialer: dialer, + Bus: bus, } d, err := NewDirect(opts) if err != nil { diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 47283a673..b9e26cc98 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -14,6 +14,7 @@ import ( "fmt" "io" "log" + "math/rand/v2" "net" "net/http" "net/netip" @@ -52,6 +53,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/multierr" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" @@ -63,30 +65,31 @@ import ( // Direct is the client that connects to a tailcontrol server for a node. type Direct struct { - httpc *http.Client // HTTP client used to talk to tailcontrol - interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - controlKnobs *controlknobs.Knobs // always non-nil - serverURL string // URL of the tailcontrol server - clock tstime.Clock - logf logger.Logf - netMon *netmon.Monitor // non-nil - health *health.Tracker - discoPubKey key.DiscoPublic - getMachinePrivKey func() (key.MachinePrivate, error) - debugFlags []string - skipIPForwardingCheck bool - pinger Pinger - polc policyclient.Client // always non-nil - popBrowser func(url string) // or nil - c2nHandler http.Handler // or nil - onClientVersion func(*tailcfg.ClientVersion) // or nil - onControlTime func(time.Time) // or nil - onTailnetDefaultAutoUpdate func(bool) // or nil - panicOnUse bool // if true, panic if client is used (for testing) - closedCtx context.Context // alive until Direct.Close is called - closeCtx context.CancelFunc // cancels closedCtx + httpc *http.Client // HTTP client used to talk to tailcontrol + interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial + dialer *tsdial.Dialer + dnsCache *dnscache.Resolver + controlKnobs *controlknobs.Knobs // always non-nil + serverURL string // URL of the tailcontrol server + clock tstime.Clock + logf logger.Logf + netMon *netmon.Monitor // non-nil + health *health.Tracker + discoPubKey key.DiscoPublic + busClient *eventbus.Client + clientVersionPub *eventbus.Publisher[tailcfg.ClientVersion] + autoUpdatePub *eventbus.Publisher[AutoUpdate] + controlTimePub *eventbus.Publisher[ControlTime] + getMachinePrivKey func() (key.MachinePrivate, error) + debugFlags []string + skipIPForwardingCheck bool + pinger Pinger + popBrowser func(url string) // or nil + polc policyclient.Client // always non-nil + c2nHandler http.Handler // or nil + panicOnUse bool // if true, panic if client is used (for testing) + closedCtx context.Context // alive until Direct.Close is called + closeCtx context.CancelFunc // cancels closedCtx dialPlan ControlDialPlanner // can be nil @@ -107,6 +110,8 @@ type Direct struct { tkaHead string lastPingURL string // last PingRequest.URL received, for dup suppression connectionHandleForTest string // sent in MapRequest.ConnectionHandleForTest + + controlClientID int64 // Random ID used to differentiate clients for consumers of messages. } // Observer is implemented by users of the control client (such as LocalBackend) @@ -120,26 +125,24 @@ type Observer interface { } type Options struct { - Persist persist.Persist // initial persistent data - GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use - ServerURL string // URL of the tailcontrol server - AuthKey string // optional node auth key for auto registration - Clock tstime.Clock - Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc - DiscoPublicKey key.DiscoPublic - PolicyClient policyclient.Client // or nil for none - Logf logger.Logf - HTTPTestClient *http.Client // optional HTTP client to use (for tests only) - NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) - DebugFlags []string // debug settings to send to control - HealthTracker *health.Tracker - PopBrowserURL func(url string) // optional func to open browser - OnClientVersion func(*tailcfg.ClientVersion) // optional func to inform GUI of client version status - OnControlTime func(time.Time) // optional func to notify callers of new time from control - OnTailnetDefaultAutoUpdate func(bool) // optional func to inform GUI of default auto-update setting for the tailnet - Dialer *tsdial.Dialer // non-nil - C2NHandler http.Handler // or nil - ControlKnobs *controlknobs.Knobs // or nil to ignore + Persist persist.Persist // initial persistent data + GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use + ServerURL string // URL of the tailcontrol server + AuthKey string // optional node auth key for auto registration + Clock tstime.Clock + Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc + DiscoPublicKey key.DiscoPublic + PolicyClient policyclient.Client // or nil for none + Logf logger.Logf + HTTPTestClient *http.Client // optional HTTP client to use (for tests only) + NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) + DebugFlags []string // debug settings to send to control + HealthTracker *health.Tracker + PopBrowserURL func(url string) // optional func to open browser + Dialer *tsdial.Dialer // non-nil + C2NHandler http.Handler // or nil + ControlKnobs *controlknobs.Knobs // or nil to ignore + Bus *eventbus.Bus // Observer is called when there's a change in status to report // from the control client. @@ -287,33 +290,32 @@ func NewDirect(opts Options) (*Direct, error) { } c := &Direct{ - httpc: httpc, - interceptedDial: interceptedDial, - controlKnobs: opts.ControlKnobs, - getMachinePrivKey: opts.GetMachinePrivateKey, - serverURL: opts.ServerURL, - clock: opts.Clock, - logf: opts.Logf, - persist: opts.Persist.View(), - authKey: opts.AuthKey, - discoPubKey: opts.DiscoPublicKey, - debugFlags: opts.DebugFlags, - netMon: netMon, - health: opts.HealthTracker, - skipIPForwardingCheck: opts.SkipIPForwardingCheck, - pinger: opts.Pinger, - polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), - popBrowser: opts.PopBrowserURL, - onClientVersion: opts.OnClientVersion, - onTailnetDefaultAutoUpdate: opts.OnTailnetDefaultAutoUpdate, - onControlTime: opts.OnControlTime, - c2nHandler: opts.C2NHandler, - dialer: opts.Dialer, - dnsCache: dnsCache, - dialPlan: opts.DialPlan, + httpc: httpc, + interceptedDial: interceptedDial, + controlKnobs: opts.ControlKnobs, + getMachinePrivKey: opts.GetMachinePrivateKey, + serverURL: opts.ServerURL, + clock: opts.Clock, + logf: opts.Logf, + persist: opts.Persist.View(), + authKey: opts.AuthKey, + discoPubKey: opts.DiscoPublicKey, + debugFlags: opts.DebugFlags, + netMon: netMon, + health: opts.HealthTracker, + skipIPForwardingCheck: opts.SkipIPForwardingCheck, + pinger: opts.Pinger, + polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), + popBrowser: opts.PopBrowserURL, + c2nHandler: opts.C2NHandler, + dialer: opts.Dialer, + dnsCache: dnsCache, + dialPlan: opts.DialPlan, } c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) + c.controlClientID = rand.Int64() + if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) } else { @@ -331,6 +333,12 @@ func NewDirect(opts Options) (*Direct, error) { if strings.Contains(opts.ServerURL, "controlplane.tailscale.com") && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { c.panicOnUse = true } + + c.busClient = opts.Bus.Client("controlClient.direct") + c.clientVersionPub = eventbus.Publish[tailcfg.ClientVersion](c.busClient) + c.autoUpdatePub = eventbus.Publish[AutoUpdate](c.busClient) + c.controlTimePub = eventbus.Publish[ControlTime](c.busClient) + return c, nil } @@ -340,6 +348,7 @@ func (c *Direct) Close() error { c.mu.Lock() defer c.mu.Unlock() + c.busClient.Close() if c.noiseClient != nil { if err := c.noiseClient.Close(); err != nil { return err @@ -826,6 +835,23 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } +// ClientID returns the ControlClientID of the controlClient +func (c *Direct) ClientID() int64 { + return c.controlClientID +} + +// AutoUpdate wraps a bool for naming on the eventbus +type AutoUpdate struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct + Value bool +} + +// ControlTime wraps a [time.Time] for naming on the eventbus +type ControlTime struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct + Value time.Time +} + // If we go more than watchdogTimeout without hearing from the server, // end the long poll. We should be receiving a keep alive ping // every minute. @@ -1085,14 +1111,12 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.logf("netmap: control says to open URL %v; no popBrowser func", u) } } - if resp.ClientVersion != nil && c.onClientVersion != nil { - c.onClientVersion(resp.ClientVersion) + if resp.ClientVersion != nil { + c.clientVersionPub.Publish(*resp.ClientVersion) } if resp.ControlTime != nil && !resp.ControlTime.IsZero() { c.logf.JSON(1, "controltime", resp.ControlTime.UTC()) - if c.onControlTime != nil { - c.onControlTime(*resp.ControlTime) - } + c.controlTimePub.Publish(ControlTime{c.controlClientID, *resp.ControlTime}) } if resp.KeepAlive { vlogf("netmap: got keep-alive") @@ -1112,9 +1136,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap continue } if au, ok := resp.DefaultAutoUpdate.Get(); ok { - if c.onTailnetDefaultAutoUpdate != nil { - c.onTailnetDefaultAutoUpdate(au) - } + c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, au}) } metricMapResponseMap.Add(1) diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index e2a6f9fa4..bba76d6f0 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -17,12 +17,14 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/eventbus/eventbustest" ) func TestNewDirect(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() opts := Options{ @@ -32,6 +34,7 @@ func TestNewDirect(t *testing.T) { return k, nil }, Dialer: tsdial.NewDialer(netmon.NewStatic()), + Bus: bus, } c, err := NewDirect(opts) if err != nil { @@ -99,6 +102,7 @@ func TestTsmpPing(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() opts := Options{ @@ -108,6 +112,7 @@ func TestTsmpPing(t *testing.T) { return k, nil }, Dialer: tsdial.NewDialer(netmon.NewStatic()), + Bus: bus, } c, err := NewDirect(opts) diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index d11199815..3d20d57b4 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -6,12 +6,14 @@ package ipnlocal import ( "time" + "tailscale.com/control/controlclient" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" ) // For extra defense-in-depth, when we're testing expired nodes we check @@ -40,14 +42,46 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock + + eventClient *eventbus.Client + controlTimeSub *eventbus.Subscriber[controlclient.ControlTime] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns } -func newExpiryManager(logf logger.Logf) *expiryManager { - return &expiryManager{ +func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { + em := &expiryManager{ previouslyExpired: map[tailcfg.StableNodeID]bool{}, logf: logf, clock: tstime.StdClock{}, } + + em.eventClient = bus.Client("ipnlocal.expiryManager") + em.controlTimeSub = eventbus.Subscribe[controlclient.ControlTime](em.eventClient) + + em.subsDoneCh = make(chan struct{}) + go em.consumeEventbusTopics() + + return em +} + +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [controlclient.ControlTime] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (em *expiryManager) consumeEventbusTopics() { + defer close(em.subsDoneCh) + + for { + select { + case <-em.controlTimeSub.Done(): + return + case time := <-em.controlTimeSub.Events(): + em.onControlTime(time.Value) + } + } } // onControlTime is called whenever we receive a new timestamp from the control @@ -218,6 +252,11 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } +func (em *expiryManager) close() { + em.eventClient.Close() + <-em.subsDoneCh +} + // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded // when the LocalBackend last received a time message from the control server. diff --git a/ipn/ipnlocal/expiry_test.go b/ipn/ipnlocal/expiry_test.go index a2b10fe32..2c646ca72 100644 --- a/ipn/ipnlocal/expiry_test.go +++ b/ipn/ipnlocal/expiry_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" ) func TestFlagExpiredPeers(t *testing.T) { @@ -110,7 +111,8 @@ func TestFlagExpiredPeers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) if tt.controlTime != nil { em.onControlTime(*tt.controlTime) @@ -240,7 +242,8 @@ func TestNextPeerExpiry(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) got := em.nextPeerExpiry(tt.netmap, now) if !got.Equal(tt.want) { @@ -253,7 +256,8 @@ func TestNextPeerExpiry(t *testing.T) { t.Run("ClockSkew", func(t *testing.T) { t.Logf("local time: %q", now.Format(time.RFC3339)) - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) // The local clock is "running fast"; our clock skew is -2h diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6108aa830..c98a0810d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -99,6 +99,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/mak" "tailscale.com/util/multierr" @@ -202,6 +203,10 @@ type LocalBackend struct { keyLogf logger.Logf // for printing list of peers on change statsLogf logger.Logf // for printing peers stats on change sys *tsd.System + eventClient *eventbus.Client + clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] + autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns health *health.Tracker // always non-nil polc policyclient.Client // always non-nil metrics metrics @@ -525,7 +530,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo backendLogID: logID, state: ipn.NoState, portpoll: new(portlist.Poller), - em: newExpiryManager(logf), + em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), @@ -533,7 +538,11 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), + subsDoneCh: make(chan struct{}), } + b.eventClient = b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) + b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -604,9 +613,32 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } + go b.consumeEventbusTopics() return b, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [tailcfg.ClientVersion] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (b *LocalBackend) consumeEventbusTopics() { + defer close(b.subsDoneCh) + + for { + select { + case <-b.clientVersionSub.Done(): + return + case clientVersion := <-b.clientVersionSub.Events(): + b.onClientVersion(&clientVersion) + case au := <-b.autoUpdateSub.Events(): + b.onTailnetDefaultAutoUpdate(au.Value) + } + } +} + func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } @@ -1065,6 +1097,17 @@ func (b *LocalBackend) ClearCaptureSink() { // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { + // Close the [eventbus.Client] and wait for LocalBackend.consumeEventbusTopics + // to return. Do this before acquiring b.mu: + // 1. LocalBackend.consumeEventbusTopics event handlers also acquire b.mu, + // they can deadlock with c.Shutdown(). + // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against + // undesirable post/in-progress LocalBackend.Shutdown() behaviors. + b.eventClient.Close() + <-b.subsDoneCh + + b.em.close() + b.mu.Lock() if b.shutdownCalled { b.mu.Unlock() @@ -2465,33 +2508,32 @@ func (b *LocalBackend) Start(opts ipn.Options) error { cb() } } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, // but it won't take effect until the next Start. cc, err := b.getNewControlClientFuncLocked()(controlclient.Options{ - GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), - Logf: logger.WithPrefix(b.logf, "control: "), - Persist: *persistv, - ServerURL: serverURL, - AuthKey: opts.AuthKey, - Hostinfo: hostinfo, - HTTPTestClient: httpTestClient, - DiscoPublicKey: discoPublic, - DebugFlags: debugFlags, - HealthTracker: b.health, - PolicyClient: b.sys.PolicyClientOrDefault(), - Pinger: b, - PopBrowserURL: b.tellClientToBrowseToURL, - OnClientVersion: b.onClientVersion, - OnTailnetDefaultAutoUpdate: b.onTailnetDefaultAutoUpdate, - OnControlTime: b.em.onControlTime, - Dialer: b.Dialer(), - Observer: b, - C2NHandler: http.HandlerFunc(b.handleC2N), - DialPlan: &b.dialPlan, // pointer because it can't be copied - ControlKnobs: b.sys.ControlKnobs(), - Shutdown: ccShutdown, + GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), + Logf: logger.WithPrefix(b.logf, "control: "), + Persist: *persistv, + ServerURL: serverURL, + AuthKey: opts.AuthKey, + Hostinfo: hostinfo, + HTTPTestClient: httpTestClient, + DiscoPublicKey: discoPublic, + DebugFlags: debugFlags, + HealthTracker: b.health, + PolicyClient: b.sys.PolicyClientOrDefault(), + Pinger: b, + PopBrowserURL: b.tellClientToBrowseToURL, + Dialer: b.Dialer(), + Observer: b, + C2NHandler: http.HandlerFunc(b.handleC2N), + DialPlan: &b.dialPlan, // pointer because it can't be copied + ControlKnobs: b.sys.ControlKnobs(), + Shutdown: ccShutdown, + Bus: b.sys.Bus.Get(), // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -4482,7 +4524,6 @@ func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change // but wasn't empty before, then the change disables // exit node usage. return tmpPrefs.ExitNodeID == "" - } // adjustEditPrefsLocked applies additional changes to mp if necessary, @@ -8001,7 +8042,6 @@ func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.Stable } if nodes, _ := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) - } return true // no policy configured; allow all exit nodes } @@ -8145,9 +8185,7 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf return servicesList } -var ( - metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") -) +var metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") func (b *LocalBackend) stateEncrypted() opt.Bool { switch runtime.GOOS { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 7d1c452f3..261d5c4c2 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -59,6 +59,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" @@ -455,7 +456,8 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { } func newTestLocalBackend(t testing.TB) *LocalBackend { - return newTestLocalBackendWithSys(t, tsd.NewSystem()) + bus := eventbustest.NewBus(t) + return newTestLocalBackendWithSys(t, tsd.NewSystemWithBus(bus)) } // newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. @@ -533,7 +535,6 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { ExitNodeID: "", }, }, user) - if err != nil { t.Fatalf("enabling first exit node: %v", err) } @@ -543,7 +544,6 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { if got, want := pv.InternalExitNodePrior(), tailcfg.StableNodeID(""); got != want { t.Fatalf("unexpected InternalExitNodePrior %q, want: %q", got, want) } - } func TestSetUseExitNodeEnabled(t *testing.T) { @@ -3619,7 +3619,8 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.PolicyClient.Set(polc) lb := newTestLocalBackendWithSys(t, sys) @@ -5786,7 +5787,8 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { - return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystem(), newControl) + bus := eventbustest.NewBus(t) + return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystemWithBus(bus), newControl) } func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { @@ -5945,7 +5947,6 @@ func (w *notificationWatcher) watch(mask ipn.NotifyWatchOpt, wanted []wantedNoti return true }) - }() <-watchAddedCh } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 842b75c43..93ecd977f 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" ) @@ -49,6 +50,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() opts := controlclient.Options{ @@ -61,6 +63,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { NoiseTestClient: c, Observer: observerFunc(func(controlclient.Status) {}), Dialer: tsdial.NewDialer(netmon.NewStatic()), + Bus: bus, } cc, err := controlclient.NewNoStart(opts) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index e2561cba9..86b56ab4b 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -33,6 +33,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/syspolicy/policyclient" @@ -240,11 +241,15 @@ func TestServeConfigForeground(t *testing.T) { err := b.SetServeConfig(&ipn.ServeConfig{ Foreground: map[string]*ipn.ServeConfig{ - session1: {TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:3000"}}, + session1: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:3000"}, + }, }, - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -267,8 +272,10 @@ func TestServeConfigForeground(t *testing.T) { 5000: {TCPForward: "http://localhost:5000"}, }, Foreground: map[string]*ipn.ServeConfig{ - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -491,7 +498,6 @@ func TestServeConfigServices(t *testing.T) { } }) } - } func TestServeConfigETag(t *testing.T) { @@ -659,6 +665,7 @@ func TestServeHTTPProxyPath(t *testing.T) { }) } } + func TestServeHTTPProxyHeaders(t *testing.T) { b := newTestBackend(t) @@ -859,7 +866,6 @@ func Test_reverseProxyConfiguration(t *testing.T) { wantsURL: mustCreateURL(t, "https://example3.com"), }, }) - } func mustCreateURL(t *testing.T, u string) url.URL { @@ -878,7 +884,8 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) for _, o := range opts { switch v := o.(type) { @@ -952,13 +959,13 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { func TestServeFileOrDirectory(t *testing.T) { td := t.TempDir() writeFile := func(suffix, contents string) { - if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0600); err != nil { + if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0o600); err != nil { t.Fatal(err) } } writeFile("foo", "this is foo") writeFile("bar", "this is bar") - os.MkdirAll(filepath.Join(td, "subdir"), 0700) + os.MkdirAll(filepath.Join(td, "subdir"), 0o700) writeFile("subdir/file-a", "this is A") writeFile("subdir/file-b", "this is B") writeFile("subdir/file-c", "this is C") diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 4097a3773..30538f2c8 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "math/rand/v2" "net/netip" "strings" "sync" @@ -39,6 +40,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/types/preftype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/wgengine" @@ -113,10 +115,11 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { // in the controlclient.Client, so by controlling it, we can check that // the state machine works as expected. type mockControl struct { - tb testing.TB - logf logger.Logf - opts controlclient.Options - paused atomic.Bool + tb testing.TB + logf logger.Logf + opts controlclient.Options + paused atomic.Bool + controlClientID int64 mu sync.Mutex persist *persist.Persist @@ -127,12 +130,13 @@ type mockControl struct { func newClient(tb testing.TB, opts controlclient.Options) *mockControl { return &mockControl{ - tb: tb, - authBlocked: true, - logf: opts.Logf, - opts: opts, - shutdown: make(chan struct{}), - persist: opts.Persist.Clone(), + tb: tb, + authBlocked: true, + logf: opts.Logf, + opts: opts, + shutdown: make(chan struct{}), + persist: opts.Persist.Clone(), + controlClientID: rand.Int64(), } } @@ -287,6 +291,10 @@ func (cc *mockControl) UpdateEndpoints(endpoints []tailcfg.Endpoint) { cc.called("UpdateEndpoints") } +func (cc *mockControl) ClientID() int64 { + return cc.controlClientID +} + func (b *LocalBackend) nonInteractiveLoginForStateTest() { b.mu.Lock() if b.cc == nil { @@ -1507,7 +1515,8 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( dialer := &tsdial.Dialer{Logf: logf} dialer.SetNetMon(netmon.NewStatic()) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.Set(dialer) sys.Set(dialer.NetMon()) diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 970f798d0..046eb744d 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/slicesx" "tailscale.com/wgengine" ) @@ -158,7 +159,6 @@ func TestWhoIsArgTypes(t *testing.T) { t.Fatalf("backend called with %v; want %v", k, keyStr) } return match() - }, peerCaps: map[netip.Addr]tailcfg.PeerCapMap{ netip.MustParseAddr("100.101.102.103"): map[tailcfg.PeerCapability][]tailcfg.RawMessage{ @@ -336,7 +336,7 @@ func TestServeWatchIPNBus(t *testing.T) { func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { var logf logger.Logf = logger.Discard - sys := tsd.NewSystem() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) store := new(mem.Store) sys.Set(store) eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) diff --git a/tsd/tsd.go b/tsd/tsd.go index bd333bd31..e4a512e4b 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -80,9 +80,17 @@ type System struct { // NewSystem constructs a new otherwise-empty [System] with a // freshly-constructed event bus populated. -func NewSystem() *System { +func NewSystem() *System { return NewSystemWithBus(eventbus.New()) } + +// NewSystemWithBus constructs a new otherwise-empty [System] with an +// eventbus provided by the caller. The provided bus must not be nil. +// This is mainly intended for testing; for production use call [NewBus]. +func NewSystemWithBus(bus *eventbus.Bus) *System { + if bus == nil { + panic("nil eventbus") + } sys := new(System) - sys.Set(eventbus.New()) + sys.Set(bus) return sys } diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 98536ae0a..b7375adc4 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -15,7 +15,7 @@ import ( // NewBus constructs an [eventbus.Bus] that will be shut automatically when // its controlling test ends. -func NewBus(t *testing.T) *eventbus.Bus { +func NewBus(t testing.TB) *eventbus.Bus { bus := eventbus.New() t.Cleanup(bus.Close) return bus From 082c6a25b02fc4caeea8cfeb1705b00f52dc132e Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 15 Sep 2025 09:04:00 -0700 Subject: [PATCH 1307/1708] client/systray: only send clipboard notification on success Fixes #14430 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index bd7c15972..536cfe182 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -540,9 +540,9 @@ func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) { err := clipboard.WriteAll(ip) if err != nil { log.Printf("clipboard error: %v", err) + } else { + menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } - - menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } // sendNotification sends a desktop notification with the given title and content. From 17ffa8013835d41801f3a18ea957ddab0215d247 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 13 Sep 2025 20:20:08 -0700 Subject: [PATCH 1308/1708] feature/featuretags: add auto-generated constants for all modular features So code (in upcoming PRs) can test for the build tags with consts and get dead code elimination from the compiler+linker. Updates #12614 Change-Id: If6160453ffd01b798f09894141e7631a93385941 Signed-off-by: Brad Fitzpatrick --- cmd/featuretags/featuretags.go | 2 +- feature/featuretags/feature_aws_disabled.go | 13 +++++ feature/featuretags/feature_aws_enabled.go | 13 +++++ feature/featuretags/feature_bird_disabled.go | 13 +++++ feature/featuretags/feature_bird_enabled.go | 13 +++++ .../featuretags/feature_capture_disabled.go | 13 +++++ .../featuretags/feature_capture_enabled.go | 13 +++++ .../feature_completion_disabled.go | 13 +++++ .../featuretags/feature_completion_enabled.go | 13 +++++ .../feature_debugeventbus_disabled.go | 13 +++++ .../feature_debugeventbus_enabled.go | 13 +++++ .../feature_desktop_sessions_disabled.go | 13 +++++ .../feature_desktop_sessions_enabled.go | 13 +++++ feature/featuretags/feature_drive_disabled.go | 13 +++++ feature/featuretags/feature_drive_enabled.go | 13 +++++ feature/featuretags/feature_kube_disabled.go | 13 +++++ feature/featuretags/feature_kube_enabled.go | 13 +++++ .../feature_relayserver_disabled.go | 13 +++++ .../feature_relayserver_enabled.go | 13 +++++ feature/featuretags/feature_serve_disabled.go | 13 +++++ feature/featuretags/feature_serve_enabled.go | 13 +++++ feature/featuretags/feature_ssh_disabled.go | 13 +++++ feature/featuretags/feature_ssh_enabled.go | 13 +++++ .../featuretags/feature_syspolicy_disabled.go | 13 +++++ .../featuretags/feature_syspolicy_enabled.go | 13 +++++ .../featuretags/feature_systray_disabled.go | 13 +++++ .../featuretags/feature_systray_enabled.go | 13 +++++ .../featuretags/feature_taildrop_disabled.go | 13 +++++ .../featuretags/feature_taildrop_enabled.go | 13 +++++ .../feature_tailnetlock_disabled.go | 13 +++++ .../feature_tailnetlock_enabled.go | 13 +++++ feature/featuretags/feature_tap_disabled.go | 13 +++++ feature/featuretags/feature_tap_enabled.go | 13 +++++ feature/featuretags/feature_tpm_disabled.go | 13 +++++ feature/featuretags/feature_tpm_enabled.go | 13 +++++ .../featuretags/feature_wakeonlan_disabled.go | 13 +++++ .../featuretags/feature_wakeonlan_enabled.go | 13 +++++ .../featuretags/feature_webclient_disabled.go | 13 +++++ .../featuretags/feature_webclient_enabled.go | 13 +++++ feature/featuretags/featuretags.go | 50 +++++++++++-------- feature/featuretags/gen-featuretags.go | 49 ++++++++++++++++++ 41 files changed, 574 insertions(+), 21 deletions(-) create mode 100644 feature/featuretags/feature_aws_disabled.go create mode 100644 feature/featuretags/feature_aws_enabled.go create mode 100644 feature/featuretags/feature_bird_disabled.go create mode 100644 feature/featuretags/feature_bird_enabled.go create mode 100644 feature/featuretags/feature_capture_disabled.go create mode 100644 feature/featuretags/feature_capture_enabled.go create mode 100644 feature/featuretags/feature_completion_disabled.go create mode 100644 feature/featuretags/feature_completion_enabled.go create mode 100644 feature/featuretags/feature_debugeventbus_disabled.go create mode 100644 feature/featuretags/feature_debugeventbus_enabled.go create mode 100644 feature/featuretags/feature_desktop_sessions_disabled.go create mode 100644 feature/featuretags/feature_desktop_sessions_enabled.go create mode 100644 feature/featuretags/feature_drive_disabled.go create mode 100644 feature/featuretags/feature_drive_enabled.go create mode 100644 feature/featuretags/feature_kube_disabled.go create mode 100644 feature/featuretags/feature_kube_enabled.go create mode 100644 feature/featuretags/feature_relayserver_disabled.go create mode 100644 feature/featuretags/feature_relayserver_enabled.go create mode 100644 feature/featuretags/feature_serve_disabled.go create mode 100644 feature/featuretags/feature_serve_enabled.go create mode 100644 feature/featuretags/feature_ssh_disabled.go create mode 100644 feature/featuretags/feature_ssh_enabled.go create mode 100644 feature/featuretags/feature_syspolicy_disabled.go create mode 100644 feature/featuretags/feature_syspolicy_enabled.go create mode 100644 feature/featuretags/feature_systray_disabled.go create mode 100644 feature/featuretags/feature_systray_enabled.go create mode 100644 feature/featuretags/feature_taildrop_disabled.go create mode 100644 feature/featuretags/feature_taildrop_enabled.go create mode 100644 feature/featuretags/feature_tailnetlock_disabled.go create mode 100644 feature/featuretags/feature_tailnetlock_enabled.go create mode 100644 feature/featuretags/feature_tap_disabled.go create mode 100644 feature/featuretags/feature_tap_enabled.go create mode 100644 feature/featuretags/feature_tpm_disabled.go create mode 100644 feature/featuretags/feature_tpm_enabled.go create mode 100644 feature/featuretags/feature_wakeonlan_disabled.go create mode 100644 feature/featuretags/feature_wakeonlan_enabled.go create mode 100644 feature/featuretags/feature_webclient_disabled.go create mode 100644 feature/featuretags/feature_webclient_enabled.go create mode 100644 feature/featuretags/gen-featuretags.go diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index 5213fda4c..c34adbb3f 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -30,7 +30,7 @@ func main() { if *list { for _, f := range slices.Sorted(maps.Keys(features)) { - fmt.Printf("%20s: %s\n", f, features[f]) + fmt.Printf("%20s: %s\n", f, features[f].Desc) } return } diff --git a/feature/featuretags/feature_aws_disabled.go b/feature/featuretags/feature_aws_disabled.go new file mode 100644 index 000000000..045feb269 --- /dev/null +++ b/feature/featuretags/feature_aws_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_aws + +package featuretags + +// AWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const AWS = false diff --git a/feature/featuretags/feature_aws_enabled.go b/feature/featuretags/feature_aws_enabled.go new file mode 100644 index 000000000..d935c9d26 --- /dev/null +++ b/feature/featuretags/feature_aws_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_aws + +package featuretags + +// AWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const AWS = true diff --git a/feature/featuretags/feature_bird_disabled.go b/feature/featuretags/feature_bird_disabled.go new file mode 100644 index 000000000..986c98458 --- /dev/null +++ b/feature/featuretags/feature_bird_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_bird + +package featuretags + +// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const Bird = false diff --git a/feature/featuretags/feature_bird_enabled.go b/feature/featuretags/feature_bird_enabled.go new file mode 100644 index 000000000..ac9404704 --- /dev/null +++ b/feature/featuretags/feature_bird_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_bird + +package featuretags + +// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const Bird = true diff --git a/feature/featuretags/feature_capture_disabled.go b/feature/featuretags/feature_capture_disabled.go new file mode 100644 index 000000000..cee424542 --- /dev/null +++ b/feature/featuretags/feature_capture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_capture + +package featuretags + +// Capture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const Capture = false diff --git a/feature/featuretags/feature_capture_enabled.go b/feature/featuretags/feature_capture_enabled.go new file mode 100644 index 000000000..40aabf110 --- /dev/null +++ b/feature/featuretags/feature_capture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_capture + +package featuretags + +// Capture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const Capture = true diff --git a/feature/featuretags/feature_completion_disabled.go b/feature/featuretags/feature_completion_disabled.go new file mode 100644 index 000000000..7b3f3cb6d --- /dev/null +++ b/feature/featuretags/feature_completion_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_completion + +package featuretags + +// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const Completion = false diff --git a/feature/featuretags/feature_completion_enabled.go b/feature/featuretags/feature_completion_enabled.go new file mode 100644 index 000000000..b6d5218f2 --- /dev/null +++ b/feature/featuretags/feature_completion_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_completion + +package featuretags + +// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const Completion = true diff --git a/feature/featuretags/feature_debugeventbus_disabled.go b/feature/featuretags/feature_debugeventbus_disabled.go new file mode 100644 index 000000000..c826de691 --- /dev/null +++ b/feature/featuretags/feature_debugeventbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_debugeventbus + +package featuretags + +// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const DebugEventBus = false diff --git a/feature/featuretags/feature_debugeventbus_enabled.go b/feature/featuretags/feature_debugeventbus_enabled.go new file mode 100644 index 000000000..068efa859 --- /dev/null +++ b/feature/featuretags/feature_debugeventbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_debugeventbus + +package featuretags + +// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const DebugEventBus = true diff --git a/feature/featuretags/feature_desktop_sessions_disabled.go b/feature/featuretags/feature_desktop_sessions_disabled.go new file mode 100644 index 000000000..73644d911 --- /dev/null +++ b/feature/featuretags/feature_desktop_sessions_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_desktop_sessions + +package featuretags + +// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const DesktopSessions = false diff --git a/feature/featuretags/feature_desktop_sessions_enabled.go b/feature/featuretags/feature_desktop_sessions_enabled.go new file mode 100644 index 000000000..93c776a04 --- /dev/null +++ b/feature/featuretags/feature_desktop_sessions_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_desktop_sessions + +package featuretags + +// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const DesktopSessions = true diff --git a/feature/featuretags/feature_drive_disabled.go b/feature/featuretags/feature_drive_disabled.go new file mode 100644 index 000000000..550ed0bd1 --- /dev/null +++ b/feature/featuretags/feature_drive_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_drive + +package featuretags + +// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const Drive = false diff --git a/feature/featuretags/feature_drive_enabled.go b/feature/featuretags/feature_drive_enabled.go new file mode 100644 index 000000000..2ed83b271 --- /dev/null +++ b/feature/featuretags/feature_drive_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_drive + +package featuretags + +// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const Drive = true diff --git a/feature/featuretags/feature_kube_disabled.go b/feature/featuretags/feature_kube_disabled.go new file mode 100644 index 000000000..3a140e869 --- /dev/null +++ b/feature/featuretags/feature_kube_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_kube + +package featuretags + +// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const Kube = false diff --git a/feature/featuretags/feature_kube_enabled.go b/feature/featuretags/feature_kube_enabled.go new file mode 100644 index 000000000..1dd119a2b --- /dev/null +++ b/feature/featuretags/feature_kube_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_kube + +package featuretags + +// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const Kube = true diff --git a/feature/featuretags/feature_relayserver_disabled.go b/feature/featuretags/feature_relayserver_disabled.go new file mode 100644 index 000000000..e6122ef9c --- /dev/null +++ b/feature/featuretags/feature_relayserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_relayserver + +package featuretags + +// RelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const RelayServer = false diff --git a/feature/featuretags/feature_relayserver_enabled.go b/feature/featuretags/feature_relayserver_enabled.go new file mode 100644 index 000000000..34ed23a84 --- /dev/null +++ b/feature/featuretags/feature_relayserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_relayserver + +package featuretags + +// RelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const RelayServer = true diff --git a/feature/featuretags/feature_serve_disabled.go b/feature/featuretags/feature_serve_disabled.go new file mode 100644 index 000000000..a143e951f --- /dev/null +++ b/feature/featuretags/feature_serve_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_serve + +package featuretags + +// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const Serve = false diff --git a/feature/featuretags/feature_serve_enabled.go b/feature/featuretags/feature_serve_enabled.go new file mode 100644 index 000000000..1d1af0809 --- /dev/null +++ b/feature/featuretags/feature_serve_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_serve + +package featuretags + +// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const Serve = true diff --git a/feature/featuretags/feature_ssh_disabled.go b/feature/featuretags/feature_ssh_disabled.go new file mode 100644 index 000000000..c22be2945 --- /dev/null +++ b/feature/featuretags/feature_ssh_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_ssh + +package featuretags + +// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const SSH = false diff --git a/feature/featuretags/feature_ssh_enabled.go b/feature/featuretags/feature_ssh_enabled.go new file mode 100644 index 000000000..52fa10b58 --- /dev/null +++ b/feature/featuretags/feature_ssh_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_ssh + +package featuretags + +// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const SSH = true diff --git a/feature/featuretags/feature_syspolicy_disabled.go b/feature/featuretags/feature_syspolicy_disabled.go new file mode 100644 index 000000000..db73b0261 --- /dev/null +++ b/feature/featuretags/feature_syspolicy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_syspolicy + +package featuretags + +// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const SystemPolicy = false diff --git a/feature/featuretags/feature_syspolicy_enabled.go b/feature/featuretags/feature_syspolicy_enabled.go new file mode 100644 index 000000000..2ad332676 --- /dev/null +++ b/feature/featuretags/feature_syspolicy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_syspolicy + +package featuretags + +// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const SystemPolicy = true diff --git a/feature/featuretags/feature_systray_disabled.go b/feature/featuretags/feature_systray_disabled.go new file mode 100644 index 000000000..a358bbf6f --- /dev/null +++ b/feature/featuretags/feature_systray_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_systray + +package featuretags + +// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const SysTray = false diff --git a/feature/featuretags/feature_systray_enabled.go b/feature/featuretags/feature_systray_enabled.go new file mode 100644 index 000000000..aebf3ad9e --- /dev/null +++ b/feature/featuretags/feature_systray_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_systray + +package featuretags + +// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const SysTray = true diff --git a/feature/featuretags/feature_taildrop_disabled.go b/feature/featuretags/feature_taildrop_disabled.go new file mode 100644 index 000000000..5c95c28b6 --- /dev/null +++ b/feature/featuretags/feature_taildrop_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_taildrop + +package featuretags + +// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const Taildrop = false diff --git a/feature/featuretags/feature_taildrop_enabled.go b/feature/featuretags/feature_taildrop_enabled.go new file mode 100644 index 000000000..e5212f03a --- /dev/null +++ b/feature/featuretags/feature_taildrop_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_taildrop + +package featuretags + +// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const Taildrop = true diff --git a/feature/featuretags/feature_tailnetlock_disabled.go b/feature/featuretags/feature_tailnetlock_disabled.go new file mode 100644 index 000000000..2a07233de --- /dev/null +++ b/feature/featuretags/feature_tailnetlock_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_tailnetlock + +package featuretags + +// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const TailnetLock = false diff --git a/feature/featuretags/feature_tailnetlock_enabled.go b/feature/featuretags/feature_tailnetlock_enabled.go new file mode 100644 index 000000000..1abf0c3bc --- /dev/null +++ b/feature/featuretags/feature_tailnetlock_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_tailnetlock + +package featuretags + +// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const TailnetLock = true diff --git a/feature/featuretags/feature_tap_disabled.go b/feature/featuretags/feature_tap_disabled.go new file mode 100644 index 000000000..d4dfded2b --- /dev/null +++ b/feature/featuretags/feature_tap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_tap + +package featuretags + +// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const Tap = false diff --git a/feature/featuretags/feature_tap_enabled.go b/feature/featuretags/feature_tap_enabled.go new file mode 100644 index 000000000..a6ce1415c --- /dev/null +++ b/feature/featuretags/feature_tap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_tap + +package featuretags + +// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const Tap = true diff --git a/feature/featuretags/feature_tpm_disabled.go b/feature/featuretags/feature_tpm_disabled.go new file mode 100644 index 000000000..15d888cfe --- /dev/null +++ b/feature/featuretags/feature_tpm_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_tpm + +package featuretags + +// TPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const TPM = false diff --git a/feature/featuretags/feature_tpm_enabled.go b/feature/featuretags/feature_tpm_enabled.go new file mode 100644 index 000000000..3525f744c --- /dev/null +++ b/feature/featuretags/feature_tpm_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_tpm + +package featuretags + +// TPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const TPM = true diff --git a/feature/featuretags/feature_wakeonlan_disabled.go b/feature/featuretags/feature_wakeonlan_disabled.go new file mode 100644 index 000000000..7b2b39c44 --- /dev/null +++ b/feature/featuretags/feature_wakeonlan_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_wakeonlan + +package featuretags + +// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const WakeOnLAN = false diff --git a/feature/featuretags/feature_wakeonlan_enabled.go b/feature/featuretags/feature_wakeonlan_enabled.go new file mode 100644 index 000000000..87eed5abf --- /dev/null +++ b/feature/featuretags/feature_wakeonlan_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_wakeonlan + +package featuretags + +// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const WakeOnLAN = true diff --git a/feature/featuretags/feature_webclient_disabled.go b/feature/featuretags/feature_webclient_disabled.go new file mode 100644 index 000000000..d49cbf8a7 --- /dev/null +++ b/feature/featuretags/feature_webclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_webclient + +package featuretags + +// WebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const WebClient = false diff --git a/feature/featuretags/feature_webclient_enabled.go b/feature/featuretags/feature_webclient_enabled.go new file mode 100644 index 000000000..020ff64a0 --- /dev/null +++ b/feature/featuretags/feature_webclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_webclient + +package featuretags + +// WebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const WebClient = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 87bc22fc6..55945075b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:generate go run gen-featuretags.go + // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags @@ -32,26 +34,34 @@ func (ft FeatureTag) OmitTag() string { return "ts_omit_" + string(ft) } +// FeatureMeta describes a modular feature that can be conditionally linked into +// the binary. +type FeatureMeta struct { + Sym string // exported Go symbol for boolean const + Desc string // human-readable description +} + // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. -var Features = map[FeatureTag]string{ - "aws": "AWS integration", - "bird": "Bird BGP integration", - "capture": "Packet capture", - "cli": "embed the CLI into the tailscaled binary", - "completion": "CLI shell completion", - "debugeventbus": "eventbus debug support", - "desktop_sessions": "Desktop sessions support", - "drive": "Tailscale Drive (file server) support", - "kube": "Kubernetes integration", - "relayserver": "Relay server", - "ssh": "Tailscale SSH support", - "syspolicy": "System policy configuration (MDM) support", - "systray": "Linux system tray", - "taildrop": "Taildrop (file sending) support", - "tailnetlock": "Tailnet Lock support", - "tap": "Experimental Layer 2 (ethernet) support", - "tpm": "TPM support", - "wakeonlan": "Wake-on-LAN support", - "webclient": "Web client support", +var Features = map[FeatureTag]FeatureMeta{ + "aws": {"AWS", "AWS integration"}, + "bird": {"Bird", "Bird BGP integration"}, + "capture": {"Capture", "Packet capture"}, + "cli": {"CLI", "embed the CLI into the tailscaled binary"}, + "completion": {"Completion", "CLI shell completion"}, + "debugeventbus": {"DebugEventBus", "eventbus debug support"}, + "desktop_sessions": {"DesktopSessions", "Desktop sessions support"}, + "drive": {"Drive", "Tailscale Drive (file server) support"}, + "kube": {"Kube", "Kubernetes integration"}, + "relayserver": {"RelayServer", "Relay server"}, + "serve": {"Serve", "Serve and Funnel support"}, + "ssh": {"SSH", "Tailscale SSH support"}, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support"}, + "systray": {"SysTray", "Linux system tray"}, + "taildrop": {"Taildrop", "Taildrop (file sending) support"}, + "tailnetlock": {"TailnetLock", "Tailnet Lock support"}, + "tap": {"Tap", "Experimental Layer 2 (ethernet) support"}, + "tpm": {"TPM", "TPM support"}, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support"}, + "webclient": {"WebClient", "Web client support"}, } diff --git a/feature/featuretags/gen-featuretags.go b/feature/featuretags/gen-featuretags.go new file mode 100644 index 000000000..27701fb78 --- /dev/null +++ b/feature/featuretags/gen-featuretags.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ignore + +// The gen-featuretags.go program generates the feature__enabled.go +// and feature__disabled.go files for each feature tag. +package main + +import ( + "cmp" + "fmt" + "os" + "strings" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/must" +) + +const header = `// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code g|e|n|e|r|a|t|e|d by gen-featuretags.go; D|O N|OT E|D|I|T. + +` + +func main() { + header := strings.ReplaceAll(header, "|", "") // to avoid this file being marked as generated + for k, m := range featuretags.Features { + if !k.IsOmittable() { + continue + } + sym := cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) + for _, suf := range []string{"enabled", "disabled"} { + bang := "" + if suf == "enabled" { + bang = "!" // !ts_omit_... + } + must.Do(os.WriteFile("feature_"+string(k)+"_"+suf+".go", + fmt.Appendf(nil, "%s//go:build %s%s\n\npackage featuretags\n\n"+ + "// %s is whether the binary was built with support for modular feature %q.\n"+ + "// Specifically, it's whether the binary was NOT built with the %q build tag.\n"+ + "// It's a const so it can be used for dead code elimination.\n"+ + "const %s = %t\n", + header, bang, k.OmitTag(), sym, m.Desc, k.OmitTag(), sym, suf == "enabled"), 0644)) + + } + } +} From 510830ca7aa2987cce1e76b92efbe5fede6eff8b Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Mon, 15 Sep 2025 10:11:38 -0700 Subject: [PATCH 1309/1708] tailcfg: add HardwareAttestationKey to MapRequest (#17102) Extend the client state management to generate a hardware attestation key if none exists. Extend MapRequest with HardwareAttestationKey{,Signature} fields that optionally contain the public component of the hardware attestation key and a signature of the node's node key using it. This will be used by control to associate hardware attesation keys with node identities on a TOFU basis. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- tailcfg/tailcfg.go | 7 +++ types/key/hardware_attestation.go | 97 ++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 94d0b19d5..6c1357a63 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1360,6 +1360,13 @@ type MapRequest struct { NodeKey key.NodePublic DiscoKey key.DiscoPublic + // HardwareAttestationKey is the public key of the node's hardware-backed + // identity attestation key, if any. + HardwareAttestationKey key.HardwareAttestationPublic `json:",omitzero"` + // HardwareAttestationKeySignature is the signature of the NodeKey + // serialized using MarshalText using its hardware attestation key, if any. + HardwareAttestationKeySignature []byte `json:",omitempty"` + // Stream is whether the client wants to receive multiple MapResponses over // the same HTTP connection. // diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index be2eefb78..ead077a5d 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -5,12 +5,19 @@ package key import ( "crypto" + "crypto/ecdsa" + "crypto/elliptic" "encoding/json" "fmt" + "io" + + "go4.org/mem" ) var ErrUnsupported = fmt.Errorf("key type not supported on this platform") +const hardwareAttestPublicHexPrefix = "hwattestpub:" + // HardwareAttestationKey describes a hardware-backed key that is used to // identify a node. Implementation details will // vary based on the platform in use (SecureEnclave for Apple, TPM for @@ -20,10 +27,96 @@ type HardwareAttestationKey interface { crypto.Signer json.Marshaler json.Unmarshaler + io.Closer + Clone() HardwareAttestationKey +} + +// HardwareAttestationPublicFromPlatformKey creates a HardwareAttestationPublic +// for communicating the public component of the hardware attestation key +// with control and other nodes. +func HardwareAttestationPublicFromPlatformKey(k HardwareAttestationKey) HardwareAttestationPublic { + if k == nil { + return HardwareAttestationPublic{} + } + pub := k.Public() + ecdsaPub, ok := pub.(*ecdsa.PublicKey) + if !ok { + panic("hardware attestation key is not ECDSA") + } + return HardwareAttestationPublic{k: ecdsaPub} +} + +// HardwareAttestationPublic is the public key counterpart to +// HardwareAttestationKey. +type HardwareAttestationPublic struct { + k *ecdsa.PublicKey +} + +func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { + if k.k == nil || o.k == nil { + return k.k == o.k + } + return k.k.X.Cmp(o.k.X) == 0 && k.k.Y.Cmp(o.k.Y) == 0 && k.k.Curve == o.k.Curve +} + +// IsZero reports whether k is the zero value. +func (k HardwareAttestationPublic) IsZero() bool { + return k.k == nil +} + +// String returns the hex-encoded public key with a type prefix. +func (k HardwareAttestationPublic) String() string { + bs, err := k.MarshalText() + if err != nil { + panic(err) + } + return string(bs) +} + +// MarshalText implements encoding.TextMarshaler. +func (k HardwareAttestationPublic) MarshalText() ([]byte, error) { + if k.k == nil { + return nil, nil + } + return k.AppendText(nil) +} + +// UnmarshalText implements encoding.TextUnmarshaler. It expects a typed prefix +// followed by a hex encoded representation of k. +func (k *HardwareAttestationPublic) UnmarshalText(b []byte) error { + if len(b) == 0 { + *k = HardwareAttestationPublic{} + return nil + } + + kb := make([]byte, 65) + if err := parseHex(kb, mem.B(b), mem.S(hardwareAttestPublicHexPrefix)); err != nil { + return err + } + + pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) + if err != nil { + return err + } + k.k = pk + return nil +} + +func (k HardwareAttestationPublic) AppendText(dst []byte) ([]byte, error) { + b, err := k.k.Bytes() + if err != nil { + return nil, err + } + return appendHexKey(dst, hardwareAttestPublicHexPrefix, b), nil +} + +// Verifier returns the ECDSA public key for verifying signatures made by k. +func (k HardwareAttestationPublic) Verifier() *ecdsa.PublicKey { + return k.k } // emptyHardwareAttestationKey is a function that returns an empty -// HardwareAttestationKey suitable for use with JSON unmarshalling. +// HardwareAttestationKey suitable for use with JSON unmarshaling. var emptyHardwareAttestationKey func() HardwareAttestationKey // createHardwareAttestationKey is a function that creates a new @@ -50,7 +143,7 @@ func RegisterHardwareAttestationKeyFns(emptyFn func() HardwareAttestationKey, cr } // NewEmptyHardwareAttestationKey returns an empty HardwareAttestationKey -// suitable for JSON unmarshalling. +// suitable for JSON unmarshaling. func NewEmptyHardwareAttestationKey() (HardwareAttestationKey, error) { if emptyHardwareAttestationKey == nil { return nil, ErrUnsupported From 6fb316f5edceb5d534a22058dc6804263971e269 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 09:52:17 -0700 Subject: [PATCH 1310/1708] feature/buildfeatures: split const bools out of the featuretags package, add Has prefix This renames the package+symbols in the earlier 17ffa8013835d4 to be in their own package ("buildfeatures") and start with the word "Has" like "if buildfeatures.HasFoo {". Updates #12614 Change-Id: I510e5f65993e5b76a0e163e3aa4543755213cbf6 Signed-off-by: Brad Fitzpatrick --- feature/buildfeatures/buildfeatures.go | 10 ++++++++++ .../feature_aws_disabled.go | 8 ++++---- .../feature_aws_enabled.go | 8 ++++---- .../feature_bird_disabled.go | 8 ++++---- .../feature_bird_enabled.go | 8 ++++---- .../feature_capture_disabled.go | 8 ++++---- .../feature_capture_enabled.go | 8 ++++---- .../feature_completion_disabled.go | 8 ++++---- .../feature_completion_enabled.go | 8 ++++---- .../feature_debugeventbus_disabled.go | 8 ++++---- .../feature_debugeventbus_enabled.go | 8 ++++---- .../feature_desktop_sessions_disabled.go | 8 ++++---- .../feature_desktop_sessions_enabled.go | 8 ++++---- .../feature_drive_disabled.go | 8 ++++---- .../feature_drive_enabled.go | 8 ++++---- .../feature_kube_disabled.go | 8 ++++---- .../feature_kube_enabled.go | 8 ++++---- .../feature_relayserver_disabled.go | 8 ++++---- .../feature_relayserver_enabled.go | 8 ++++---- .../feature_serve_disabled.go | 8 ++++---- .../feature_serve_enabled.go | 8 ++++---- .../feature_ssh_disabled.go | 8 ++++---- .../feature_ssh_enabled.go | 8 ++++---- .../feature_syspolicy_disabled.go | 8 ++++---- .../feature_syspolicy_enabled.go | 8 ++++---- .../feature_systray_disabled.go | 8 ++++---- .../feature_systray_enabled.go | 8 ++++---- .../feature_taildrop_disabled.go | 8 ++++---- .../feature_taildrop_enabled.go | 8 ++++---- .../feature_tailnetlock_disabled.go | 8 ++++---- .../feature_tailnetlock_enabled.go | 8 ++++---- .../feature_tap_disabled.go | 8 ++++---- .../feature_tap_enabled.go | 8 ++++---- .../feature_tpm_disabled.go | 8 ++++---- .../feature_tpm_enabled.go | 8 ++++---- .../feature_wakeonlan_disabled.go | 8 ++++---- .../feature_wakeonlan_enabled.go | 8 ++++---- .../feature_webclient_disabled.go | 8 ++++---- .../feature_webclient_enabled.go | 8 ++++---- .../gen-featuretags.go => buildfeatures/gen.go} | 8 ++++---- feature/featuretags/featuretags.go | 2 -- 41 files changed, 166 insertions(+), 158 deletions(-) create mode 100644 feature/buildfeatures/buildfeatures.go rename feature/{featuretags => buildfeatures}/feature_aws_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_aws_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_bird_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_bird_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_capture_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_capture_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_completion_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_completion_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_debugeventbus_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_debugeventbus_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_desktop_sessions_disabled.go (56%) rename feature/{featuretags => buildfeatures}/feature_desktop_sessions_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_drive_disabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_drive_enabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_kube_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_kube_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_relayserver_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_relayserver_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_serve_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_serve_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_ssh_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_ssh_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_syspolicy_disabled.go (54%) rename feature/{featuretags => buildfeatures}/feature_syspolicy_enabled.go (54%) rename feature/{featuretags => buildfeatures}/feature_systray_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_systray_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_taildrop_disabled.go (56%) rename feature/{featuretags => buildfeatures}/feature_taildrop_enabled.go (56%) rename feature/{featuretags => buildfeatures}/feature_tailnetlock_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_tailnetlock_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_tap_disabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_tap_enabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_tpm_disabled.go (59%) rename feature/{featuretags => buildfeatures}/feature_tpm_enabled.go (59%) rename feature/{featuretags => buildfeatures}/feature_wakeonlan_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_wakeonlan_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_webclient_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_webclient_enabled.go (57%) rename feature/{featuretags/gen-featuretags.go => buildfeatures/gen.go} (80%) diff --git a/feature/buildfeatures/buildfeatures.go b/feature/buildfeatures/buildfeatures.go new file mode 100644 index 000000000..cdb31dc01 --- /dev/null +++ b/feature/buildfeatures/buildfeatures.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:generate go run gen.go + +// The buildfeatures package contains boolean constants indicating which +// features were included in the binary (via build tags), for use in dead code +// elimination when using separate build tag protected files is impractical +// or undesirable. +package buildfeatures diff --git a/feature/featuretags/feature_aws_disabled.go b/feature/buildfeatures/feature_aws_disabled.go similarity index 58% rename from feature/featuretags/feature_aws_disabled.go rename to feature/buildfeatures/feature_aws_disabled.go index 045feb269..66b670c1f 100644 --- a/feature/featuretags/feature_aws_disabled.go +++ b/feature/buildfeatures/feature_aws_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_aws -package featuretags +package buildfeatures -// AWS is whether the binary was built with support for modular feature "AWS integration". +// HasAWS is whether the binary was built with support for modular feature "AWS integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. // It's a const so it can be used for dead code elimination. -const AWS = false +const HasAWS = false diff --git a/feature/featuretags/feature_aws_enabled.go b/feature/buildfeatures/feature_aws_enabled.go similarity index 58% rename from feature/featuretags/feature_aws_enabled.go rename to feature/buildfeatures/feature_aws_enabled.go index d935c9d26..30203b2aa 100644 --- a/feature/featuretags/feature_aws_enabled.go +++ b/feature/buildfeatures/feature_aws_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_aws -package featuretags +package buildfeatures -// AWS is whether the binary was built with support for modular feature "AWS integration". +// HasAWS is whether the binary was built with support for modular feature "AWS integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. // It's a const so it can be used for dead code elimination. -const AWS = true +const HasAWS = true diff --git a/feature/featuretags/feature_bird_disabled.go b/feature/buildfeatures/feature_bird_disabled.go similarity index 57% rename from feature/featuretags/feature_bird_disabled.go rename to feature/buildfeatures/feature_bird_disabled.go index 986c98458..469aa41f9 100644 --- a/feature/featuretags/feature_bird_disabled.go +++ b/feature/buildfeatures/feature_bird_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_bird -package featuretags +package buildfeatures -// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. // It's a const so it can be used for dead code elimination. -const Bird = false +const HasBird = false diff --git a/feature/featuretags/feature_bird_enabled.go b/feature/buildfeatures/feature_bird_enabled.go similarity index 58% rename from feature/featuretags/feature_bird_enabled.go rename to feature/buildfeatures/feature_bird_enabled.go index ac9404704..792129f64 100644 --- a/feature/featuretags/feature_bird_enabled.go +++ b/feature/buildfeatures/feature_bird_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_bird -package featuretags +package buildfeatures -// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. // It's a const so it can be used for dead code elimination. -const Bird = true +const HasBird = true diff --git a/feature/featuretags/feature_capture_disabled.go b/feature/buildfeatures/feature_capture_disabled.go similarity index 58% rename from feature/featuretags/feature_capture_disabled.go rename to feature/buildfeatures/feature_capture_disabled.go index cee424542..58535958f 100644 --- a/feature/featuretags/feature_capture_disabled.go +++ b/feature/buildfeatures/feature_capture_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_capture -package featuretags +package buildfeatures -// Capture is whether the binary was built with support for modular feature "Packet capture". +// HasCapture is whether the binary was built with support for modular feature "Packet capture". // Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. // It's a const so it can be used for dead code elimination. -const Capture = false +const HasCapture = false diff --git a/feature/featuretags/feature_capture_enabled.go b/feature/buildfeatures/feature_capture_enabled.go similarity index 58% rename from feature/featuretags/feature_capture_enabled.go rename to feature/buildfeatures/feature_capture_enabled.go index 40aabf110..7120a3d06 100644 --- a/feature/featuretags/feature_capture_enabled.go +++ b/feature/buildfeatures/feature_capture_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_capture -package featuretags +package buildfeatures -// Capture is whether the binary was built with support for modular feature "Packet capture". +// HasCapture is whether the binary was built with support for modular feature "Packet capture". // Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. // It's a const so it can be used for dead code elimination. -const Capture = true +const HasCapture = true diff --git a/feature/featuretags/feature_completion_disabled.go b/feature/buildfeatures/feature_completion_disabled.go similarity index 57% rename from feature/featuretags/feature_completion_disabled.go rename to feature/buildfeatures/feature_completion_disabled.go index 7b3f3cb6d..ea319beb0 100644 --- a/feature/featuretags/feature_completion_disabled.go +++ b/feature/buildfeatures/feature_completion_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_completion -package featuretags +package buildfeatures -// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". // Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. // It's a const so it can be used for dead code elimination. -const Completion = false +const HasCompletion = false diff --git a/feature/featuretags/feature_completion_enabled.go b/feature/buildfeatures/feature_completion_enabled.go similarity index 57% rename from feature/featuretags/feature_completion_enabled.go rename to feature/buildfeatures/feature_completion_enabled.go index b6d5218f2..6db41c97b 100644 --- a/feature/featuretags/feature_completion_enabled.go +++ b/feature/buildfeatures/feature_completion_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_completion -package featuretags +package buildfeatures -// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". // Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. // It's a const so it can be used for dead code elimination. -const Completion = true +const HasCompletion = true diff --git a/feature/featuretags/feature_debugeventbus_disabled.go b/feature/buildfeatures/feature_debugeventbus_disabled.go similarity index 57% rename from feature/featuretags/feature_debugeventbus_disabled.go rename to feature/buildfeatures/feature_debugeventbus_disabled.go index c826de691..2eb599934 100644 --- a/feature/featuretags/feature_debugeventbus_disabled.go +++ b/feature/buildfeatures/feature_debugeventbus_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_debugeventbus -package featuretags +package buildfeatures -// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". // Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. // It's a const so it can be used for dead code elimination. -const DebugEventBus = false +const HasDebugEventBus = false diff --git a/feature/featuretags/feature_debugeventbus_enabled.go b/feature/buildfeatures/feature_debugeventbus_enabled.go similarity index 57% rename from feature/featuretags/feature_debugeventbus_enabled.go rename to feature/buildfeatures/feature_debugeventbus_enabled.go index 068efa859..df13b6fa2 100644 --- a/feature/featuretags/feature_debugeventbus_enabled.go +++ b/feature/buildfeatures/feature_debugeventbus_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_debugeventbus -package featuretags +package buildfeatures -// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". // Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. // It's a const so it can be used for dead code elimination. -const DebugEventBus = true +const HasDebugEventBus = true diff --git a/feature/featuretags/feature_desktop_sessions_disabled.go b/feature/buildfeatures/feature_desktop_sessions_disabled.go similarity index 56% rename from feature/featuretags/feature_desktop_sessions_disabled.go rename to feature/buildfeatures/feature_desktop_sessions_disabled.go index 73644d911..1536c886f 100644 --- a/feature/featuretags/feature_desktop_sessions_disabled.go +++ b/feature/buildfeatures/feature_desktop_sessions_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_desktop_sessions -package featuretags +package buildfeatures -// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". // Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. // It's a const so it can be used for dead code elimination. -const DesktopSessions = false +const HasDesktopSessions = false diff --git a/feature/featuretags/feature_desktop_sessions_enabled.go b/feature/buildfeatures/feature_desktop_sessions_enabled.go similarity index 57% rename from feature/featuretags/feature_desktop_sessions_enabled.go rename to feature/buildfeatures/feature_desktop_sessions_enabled.go index 93c776a04..84658de95 100644 --- a/feature/featuretags/feature_desktop_sessions_enabled.go +++ b/feature/buildfeatures/feature_desktop_sessions_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_desktop_sessions -package featuretags +package buildfeatures -// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". // Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. // It's a const so it can be used for dead code elimination. -const DesktopSessions = true +const HasDesktopSessions = true diff --git a/feature/featuretags/feature_drive_disabled.go b/feature/buildfeatures/feature_drive_disabled.go similarity index 55% rename from feature/featuretags/feature_drive_disabled.go rename to feature/buildfeatures/feature_drive_disabled.go index 550ed0bd1..072026389 100644 --- a/feature/featuretags/feature_drive_disabled.go +++ b/feature/buildfeatures/feature_drive_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_drive -package featuretags +package buildfeatures -// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. // It's a const so it can be used for dead code elimination. -const Drive = false +const HasDrive = false diff --git a/feature/featuretags/feature_drive_enabled.go b/feature/buildfeatures/feature_drive_enabled.go similarity index 55% rename from feature/featuretags/feature_drive_enabled.go rename to feature/buildfeatures/feature_drive_enabled.go index 2ed83b271..9f58836a4 100644 --- a/feature/featuretags/feature_drive_enabled.go +++ b/feature/buildfeatures/feature_drive_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_drive -package featuretags +package buildfeatures -// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. // It's a const so it can be used for dead code elimination. -const Drive = true +const HasDrive = true diff --git a/feature/featuretags/feature_kube_disabled.go b/feature/buildfeatures/feature_kube_disabled.go similarity index 57% rename from feature/featuretags/feature_kube_disabled.go rename to feature/buildfeatures/feature_kube_disabled.go index 3a140e869..2b76c57e7 100644 --- a/feature/featuretags/feature_kube_disabled.go +++ b/feature/buildfeatures/feature_kube_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_kube -package featuretags +package buildfeatures -// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. // It's a const so it can be used for dead code elimination. -const Kube = false +const HasKube = false diff --git a/feature/featuretags/feature_kube_enabled.go b/feature/buildfeatures/feature_kube_enabled.go similarity index 57% rename from feature/featuretags/feature_kube_enabled.go rename to feature/buildfeatures/feature_kube_enabled.go index 1dd119a2b..7abca1759 100644 --- a/feature/featuretags/feature_kube_enabled.go +++ b/feature/buildfeatures/feature_kube_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_kube -package featuretags +package buildfeatures -// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. // It's a const so it can be used for dead code elimination. -const Kube = true +const HasKube = true diff --git a/feature/featuretags/feature_relayserver_disabled.go b/feature/buildfeatures/feature_relayserver_disabled.go similarity index 58% rename from feature/featuretags/feature_relayserver_disabled.go rename to feature/buildfeatures/feature_relayserver_disabled.go index e6122ef9c..08ced8310 100644 --- a/feature/featuretags/feature_relayserver_disabled.go +++ b/feature/buildfeatures/feature_relayserver_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_relayserver -package featuretags +package buildfeatures -// RelayServer is whether the binary was built with support for modular feature "Relay server". +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". // Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. // It's a const so it can be used for dead code elimination. -const RelayServer = false +const HasRelayServer = false diff --git a/feature/featuretags/feature_relayserver_enabled.go b/feature/buildfeatures/feature_relayserver_enabled.go similarity index 58% rename from feature/featuretags/feature_relayserver_enabled.go rename to feature/buildfeatures/feature_relayserver_enabled.go index 34ed23a84..6a35f8305 100644 --- a/feature/featuretags/feature_relayserver_enabled.go +++ b/feature/buildfeatures/feature_relayserver_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_relayserver -package featuretags +package buildfeatures -// RelayServer is whether the binary was built with support for modular feature "Relay server". +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". // Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. // It's a const so it can be used for dead code elimination. -const RelayServer = true +const HasRelayServer = true diff --git a/feature/featuretags/feature_serve_disabled.go b/feature/buildfeatures/feature_serve_disabled.go similarity index 57% rename from feature/featuretags/feature_serve_disabled.go rename to feature/buildfeatures/feature_serve_disabled.go index a143e951f..6d7971350 100644 --- a/feature/featuretags/feature_serve_disabled.go +++ b/feature/buildfeatures/feature_serve_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_serve -package featuretags +package buildfeatures -// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". // Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. // It's a const so it can be used for dead code elimination. -const Serve = false +const HasServe = false diff --git a/feature/featuretags/feature_serve_enabled.go b/feature/buildfeatures/feature_serve_enabled.go similarity index 57% rename from feature/featuretags/feature_serve_enabled.go rename to feature/buildfeatures/feature_serve_enabled.go index 1d1af0809..57bf2c6b0 100644 --- a/feature/featuretags/feature_serve_enabled.go +++ b/feature/buildfeatures/feature_serve_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_serve -package featuretags +package buildfeatures -// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". // Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. // It's a const so it can be used for dead code elimination. -const Serve = true +const HasServe = true diff --git a/feature/featuretags/feature_ssh_disabled.go b/feature/buildfeatures/feature_ssh_disabled.go similarity index 57% rename from feature/featuretags/feature_ssh_disabled.go rename to feature/buildfeatures/feature_ssh_disabled.go index c22be2945..754f50eb6 100644 --- a/feature/featuretags/feature_ssh_disabled.go +++ b/feature/buildfeatures/feature_ssh_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_ssh -package featuretags +package buildfeatures -// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". // Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. // It's a const so it can be used for dead code elimination. -const SSH = false +const HasSSH = false diff --git a/feature/featuretags/feature_ssh_enabled.go b/feature/buildfeatures/feature_ssh_enabled.go similarity index 58% rename from feature/featuretags/feature_ssh_enabled.go rename to feature/buildfeatures/feature_ssh_enabled.go index 52fa10b58..dbdc3a89f 100644 --- a/feature/featuretags/feature_ssh_enabled.go +++ b/feature/buildfeatures/feature_ssh_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_ssh -package featuretags +package buildfeatures -// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". // Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. // It's a const so it can be used for dead code elimination. -const SSH = true +const HasSSH = true diff --git a/feature/featuretags/feature_syspolicy_disabled.go b/feature/buildfeatures/feature_syspolicy_disabled.go similarity index 54% rename from feature/featuretags/feature_syspolicy_disabled.go rename to feature/buildfeatures/feature_syspolicy_disabled.go index db73b0261..54d32e32e 100644 --- a/feature/featuretags/feature_syspolicy_disabled.go +++ b/feature/buildfeatures/feature_syspolicy_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_syspolicy -package featuretags +package buildfeatures -// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. // It's a const so it can be used for dead code elimination. -const SystemPolicy = false +const HasSystemPolicy = false diff --git a/feature/featuretags/feature_syspolicy_enabled.go b/feature/buildfeatures/feature_syspolicy_enabled.go similarity index 54% rename from feature/featuretags/feature_syspolicy_enabled.go rename to feature/buildfeatures/feature_syspolicy_enabled.go index 2ad332676..f7c403ae9 100644 --- a/feature/featuretags/feature_syspolicy_enabled.go +++ b/feature/buildfeatures/feature_syspolicy_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_syspolicy -package featuretags +package buildfeatures -// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. // It's a const so it can be used for dead code elimination. -const SystemPolicy = true +const HasSystemPolicy = true diff --git a/feature/featuretags/feature_systray_disabled.go b/feature/buildfeatures/feature_systray_disabled.go similarity index 58% rename from feature/featuretags/feature_systray_disabled.go rename to feature/buildfeatures/feature_systray_disabled.go index a358bbf6f..4ae1edb0a 100644 --- a/feature/featuretags/feature_systray_disabled.go +++ b/feature/buildfeatures/feature_systray_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_systray -package featuretags +package buildfeatures -// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". // Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. // It's a const so it can be used for dead code elimination. -const SysTray = false +const HasSysTray = false diff --git a/feature/featuretags/feature_systray_enabled.go b/feature/buildfeatures/feature_systray_enabled.go similarity index 58% rename from feature/featuretags/feature_systray_enabled.go rename to feature/buildfeatures/feature_systray_enabled.go index aebf3ad9e..5fd7fd220 100644 --- a/feature/featuretags/feature_systray_enabled.go +++ b/feature/buildfeatures/feature_systray_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_systray -package featuretags +package buildfeatures -// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". // Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. // It's a const so it can be used for dead code elimination. -const SysTray = true +const HasSysTray = true diff --git a/feature/featuretags/feature_taildrop_disabled.go b/feature/buildfeatures/feature_taildrop_disabled.go similarity index 56% rename from feature/featuretags/feature_taildrop_disabled.go rename to feature/buildfeatures/feature_taildrop_disabled.go index 5c95c28b6..8ffe90617 100644 --- a/feature/featuretags/feature_taildrop_disabled.go +++ b/feature/buildfeatures/feature_taildrop_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_taildrop -package featuretags +package buildfeatures -// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. // It's a const so it can be used for dead code elimination. -const Taildrop = false +const HasTaildrop = false diff --git a/feature/featuretags/feature_taildrop_enabled.go b/feature/buildfeatures/feature_taildrop_enabled.go similarity index 56% rename from feature/featuretags/feature_taildrop_enabled.go rename to feature/buildfeatures/feature_taildrop_enabled.go index e5212f03a..4f55d2801 100644 --- a/feature/featuretags/feature_taildrop_enabled.go +++ b/feature/buildfeatures/feature_taildrop_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_taildrop -package featuretags +package buildfeatures -// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. // It's a const so it can be used for dead code elimination. -const Taildrop = true +const HasTaildrop = true diff --git a/feature/featuretags/feature_tailnetlock_disabled.go b/feature/buildfeatures/feature_tailnetlock_disabled.go similarity index 57% rename from feature/featuretags/feature_tailnetlock_disabled.go rename to feature/buildfeatures/feature_tailnetlock_disabled.go index 2a07233de..6b5a57f24 100644 --- a/feature/featuretags/feature_tailnetlock_disabled.go +++ b/feature/buildfeatures/feature_tailnetlock_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_tailnetlock -package featuretags +package buildfeatures -// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. // It's a const so it can be used for dead code elimination. -const TailnetLock = false +const HasTailnetLock = false diff --git a/feature/featuretags/feature_tailnetlock_enabled.go b/feature/buildfeatures/feature_tailnetlock_enabled.go similarity index 57% rename from feature/featuretags/feature_tailnetlock_enabled.go rename to feature/buildfeatures/feature_tailnetlock_enabled.go index 1abf0c3bc..afedb7faa 100644 --- a/feature/featuretags/feature_tailnetlock_enabled.go +++ b/feature/buildfeatures/feature_tailnetlock_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_tailnetlock -package featuretags +package buildfeatures -// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. // It's a const so it can be used for dead code elimination. -const TailnetLock = true +const HasTailnetLock = true diff --git a/feature/featuretags/feature_tap_disabled.go b/feature/buildfeatures/feature_tap_disabled.go similarity index 55% rename from feature/featuretags/feature_tap_disabled.go rename to feature/buildfeatures/feature_tap_disabled.go index d4dfded2b..f0b3eec8d 100644 --- a/feature/featuretags/feature_tap_disabled.go +++ b/feature/buildfeatures/feature_tap_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_tap -package featuretags +package buildfeatures -// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. // It's a const so it can be used for dead code elimination. -const Tap = false +const HasTap = false diff --git a/feature/featuretags/feature_tap_enabled.go b/feature/buildfeatures/feature_tap_enabled.go similarity index 55% rename from feature/featuretags/feature_tap_enabled.go rename to feature/buildfeatures/feature_tap_enabled.go index a6ce1415c..1363c4b44 100644 --- a/feature/featuretags/feature_tap_enabled.go +++ b/feature/buildfeatures/feature_tap_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_tap -package featuretags +package buildfeatures -// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. // It's a const so it can be used for dead code elimination. -const Tap = true +const HasTap = true diff --git a/feature/featuretags/feature_tpm_disabled.go b/feature/buildfeatures/feature_tpm_disabled.go similarity index 59% rename from feature/featuretags/feature_tpm_disabled.go rename to feature/buildfeatures/feature_tpm_disabled.go index 15d888cfe..b9d55815e 100644 --- a/feature/featuretags/feature_tpm_disabled.go +++ b/feature/buildfeatures/feature_tpm_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_tpm -package featuretags +package buildfeatures -// TPM is whether the binary was built with support for modular feature "TPM support". +// HasTPM is whether the binary was built with support for modular feature "TPM support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. // It's a const so it can be used for dead code elimination. -const TPM = false +const HasTPM = false diff --git a/feature/featuretags/feature_tpm_enabled.go b/feature/buildfeatures/feature_tpm_enabled.go similarity index 59% rename from feature/featuretags/feature_tpm_enabled.go rename to feature/buildfeatures/feature_tpm_enabled.go index 3525f744c..dcfc8a304 100644 --- a/feature/featuretags/feature_tpm_enabled.go +++ b/feature/buildfeatures/feature_tpm_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_tpm -package featuretags +package buildfeatures -// TPM is whether the binary was built with support for modular feature "TPM support". +// HasTPM is whether the binary was built with support for modular feature "TPM support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. // It's a const so it can be used for dead code elimination. -const TPM = true +const HasTPM = true diff --git a/feature/featuretags/feature_wakeonlan_disabled.go b/feature/buildfeatures/feature_wakeonlan_disabled.go similarity index 57% rename from feature/featuretags/feature_wakeonlan_disabled.go rename to feature/buildfeatures/feature_wakeonlan_disabled.go index 7b2b39c44..816ac661f 100644 --- a/feature/featuretags/feature_wakeonlan_disabled.go +++ b/feature/buildfeatures/feature_wakeonlan_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_wakeonlan -package featuretags +package buildfeatures -// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". // Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. // It's a const so it can be used for dead code elimination. -const WakeOnLAN = false +const HasWakeOnLAN = false diff --git a/feature/featuretags/feature_wakeonlan_enabled.go b/feature/buildfeatures/feature_wakeonlan_enabled.go similarity index 57% rename from feature/featuretags/feature_wakeonlan_enabled.go rename to feature/buildfeatures/feature_wakeonlan_enabled.go index 87eed5abf..34b3348a1 100644 --- a/feature/featuretags/feature_wakeonlan_enabled.go +++ b/feature/buildfeatures/feature_wakeonlan_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_wakeonlan -package featuretags +package buildfeatures -// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". // Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. // It's a const so it can be used for dead code elimination. -const WakeOnLAN = true +const HasWakeOnLAN = true diff --git a/feature/featuretags/feature_webclient_disabled.go b/feature/buildfeatures/feature_webclient_disabled.go similarity index 57% rename from feature/featuretags/feature_webclient_disabled.go rename to feature/buildfeatures/feature_webclient_disabled.go index d49cbf8a7..a7b24f4ac 100644 --- a/feature/featuretags/feature_webclient_disabled.go +++ b/feature/buildfeatures/feature_webclient_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_webclient -package featuretags +package buildfeatures -// WebClient is whether the binary was built with support for modular feature "Web client support". +// HasWebClient is whether the binary was built with support for modular feature "Web client support". // Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. // It's a const so it can be used for dead code elimination. -const WebClient = false +const HasWebClient = false diff --git a/feature/featuretags/feature_webclient_enabled.go b/feature/buildfeatures/feature_webclient_enabled.go similarity index 57% rename from feature/featuretags/feature_webclient_enabled.go rename to feature/buildfeatures/feature_webclient_enabled.go index 020ff64a0..e40dad33c 100644 --- a/feature/featuretags/feature_webclient_enabled.go +++ b/feature/buildfeatures/feature_webclient_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_webclient -package featuretags +package buildfeatures -// WebClient is whether the binary was built with support for modular feature "Web client support". +// HasWebClient is whether the binary was built with support for modular feature "Web client support". // Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. // It's a const so it can be used for dead code elimination. -const WebClient = true +const HasWebClient = true diff --git a/feature/featuretags/gen-featuretags.go b/feature/buildfeatures/gen.go similarity index 80% rename from feature/featuretags/gen-featuretags.go rename to feature/buildfeatures/gen.go index 27701fb78..e967cb8ff 100644 --- a/feature/featuretags/gen-featuretags.go +++ b/feature/buildfeatures/gen.go @@ -3,7 +3,7 @@ //go:build ignore -// The gen-featuretags.go program generates the feature__enabled.go +// The gens.go program generates the feature__enabled.go // and feature__disabled.go files for each feature tag. package main @@ -20,7 +20,7 @@ import ( const header = `// Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code g|e|n|e|r|a|t|e|d by gen-featuretags.go; D|O N|OT E|D|I|T. +// Code g|e|n|e|r|a|t|e|d by gen.go; D|O N|OT E|D|I|T. ` @@ -30,14 +30,14 @@ func main() { if !k.IsOmittable() { continue } - sym := cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) + sym := "Has" + cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) for _, suf := range []string{"enabled", "disabled"} { bang := "" if suf == "enabled" { bang = "!" // !ts_omit_... } must.Do(os.WriteFile("feature_"+string(k)+"_"+suf+".go", - fmt.Appendf(nil, "%s//go:build %s%s\n\npackage featuretags\n\n"+ + fmt.Appendf(nil, "%s//go:build %s%s\n\npackage buildfeatures\n\n"+ "// %s is whether the binary was built with support for modular feature %q.\n"+ "// Specifically, it's whether the binary was NOT built with the %q build tag.\n"+ "// It's a const so it can be used for dead code elimination.\n"+ diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 55945075b..6778593fa 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -1,8 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run gen-featuretags.go - // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags From 4bb03609bc95734644855976525d7203bb0da7f6 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 15 Sep 2025 11:40:34 -0600 Subject: [PATCH 1311/1708] tool/gocross: ensure child process error codes are propagated on non-Unix The Unix implementation of doExec propagates error codes by virtue of the fact that it does an execve; the replacement binary will return the exit code. On non-Unix, we need to simulate these semantics by checking for an ExitError and, when present, passing that value on to os.Exit. We also add error handling to the doExec call for the benefit of handling any errors where doExec fails before being able to execute the desired binary. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/exec_other.go | 12 +++++++++++- tool/gocross/gocross.go | 6 +++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 7bce0c099..4dd74f84d 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -6,6 +6,7 @@ package main import ( + "errors" "os" "os/exec" ) @@ -16,5 +17,14 @@ func doExec(cmd string, args []string, env []string) error { c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr - return c.Run() + err := c.Run() + + // Propagate ExitErrors within this func to give us similar semantics to + // the Unix variant. + var ee *exec.ExitError + if errors.As(err, &ee) { + os.Exit(ee.ExitCode()) + } + + return err } diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index c71012d73..41fab3d58 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -114,7 +114,11 @@ func main() { } - doExec(filepath.Join(toolchain, "bin/go"), args, os.Environ()) + // Note that doExec only returns if the exec call failed. + if err := doExec(filepath.Join(toolchain, "bin", "go"), args, os.Environ()); err != nil { + fmt.Fprintf(os.Stderr, "executing process: %v\n", err) + os.Exit(1) + } } //go:embed gocross-wrapper.sh From 09dfd94613ebe181217fabec46a254cbd04f94e5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 12:10:58 -0700 Subject: [PATCH 1312/1708] cmd/omitsize: fix the --features flag When you say --features=foo,bar, that was supposed to mean to only show features "foo" and "bar" in the table. But it was also being used as the set of all features that are omittable, which was wrong, leading to misleading numbers when --features was non-empty. Updates #12614 Change-Id: Idad2fa67fb49c39454032e84a3dede967890fdf5 Signed-off-by: Brad Fitzpatrick --- cmd/omitsize/omitsize.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index 841f3ab9e..a4bce6329 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -31,12 +31,16 @@ func main() { flag.Parse() var all []string - if *features == "" { - for k := range featuretags.Features { - if k.IsOmittable() { - all = append(all, k.OmitTag()) - } + var allOmittable []string + + for k := range featuretags.Features { + if k.IsOmittable() { + allOmittable = append(allOmittable, k.OmitTag()) } + } + + if *features == "" { + all = slices.Clone(allOmittable) } else { for v := range strings.SplitSeq(*features, ",") { if !strings.HasPrefix(v, "ts_omit_") { @@ -49,15 +53,15 @@ func main() { slices.Sort(all) all = slices.Compact(all) - baseD := measure("tailscaled") - baseC := measure("tailscale") - baseBoth := measure("tailscaled", "ts_include_cli") - - minD := measure("tailscaled", all...) - minC := measure("tailscale", all...) - minBoth := measure("tailscaled", append(slices.Clone(all), "ts_include_cli")...) + minD := measure("tailscaled", allOmittable...) + minC := measure("tailscale", allOmittable...) + minBoth := measure("tailscaled", append(slices.Clone(allOmittable), "ts_include_cli")...) if *showRemovals { + baseD := measure("tailscaled") + baseC := measure("tailscale") + baseBoth := measure("tailscaled", "ts_include_cli") + fmt.Printf("Starting with everything and removing a feature...\n\n") fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") @@ -80,7 +84,7 @@ func main() { fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) for _, t := range all { - tags := allExcept(all, t) + tags := allExcept(allOmittable, t) sizeD := measure("tailscaled", tags...) sizeC := measure("tailscale", tags...) sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) From 998a667cd5eb42a1f49374c328007db647405d11 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 15 Sep 2025 15:22:13 -0700 Subject: [PATCH 1313/1708] wgengine/magicsock: don't add DERP addrs to endpointState (#17147) endpointState is used for tracking UDP direct connection candidate addresses. If it contains a DERP addr, then direct connection path discovery will always send a wasteful disco ping over it. Additionally, CLI "tailscale ping" via peer relay will race over DERP, leading to a misleading result if pong arrives via DERP first. Disco pongs arriving via DERP never influence path selection. Disco ping/pong via DERP only serves "tailscale ping" reporting. Updates #17121 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8ab7957ca..fa1f1f88f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2539,10 +2539,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // Remember this route if not present. var dup bool if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return - } + if _, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { numNodes = 1 } } else { From 5c24f0ed803a0f60d3a05f148f3e20f99f3d00d7 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 15 Sep 2025 06:53:41 +0100 Subject: [PATCH 1314/1708] wgengine/magicsock: send a valid payload in TestNetworkDownSendErrors This test ostensibly checks whether we record an error metric if a packet is dropped because the network is down, but the network connectivity is irrelevant -- the send error is actually because the arguments to Send() are invalid: RebindingUDPConn.WriteWireGuardBatchTo: [unexpected] offset (0) != Geneve header length (8) This patch changes the test so we try to send a valid packet, and we verify this by sending it once before taking the network down. The new error is: magicsock: network down which is what we're trying to test. We then test sending an invalid payload as a separate test case. Updates tailscale/corp#22075 Signed-off-by: Alex Chan --- wgengine/magicsock/magicsock_test.go | 83 +++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 14 deletions(-) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5774432d5..bb5922c8c 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3131,34 +3131,89 @@ func TestMaybeRebindOnError(t *testing.T) { }) } -func TestNetworkDownSendErrors(t *testing.T) { +func newTestConnAndRegistry(t *testing.T) (*Conn, *usermetric.Registry, func()) { + t.Helper() bus := eventbus.New() - defer bus.Close() - netMon := must.Get(netmon.New(bus, t.Logf)) - defer netMon.Close() reg := new(usermetric.Registry) + conn := must.Get(NewConn(Options{ DisablePortMapper: true, Logf: t.Logf, NetMon: netMon, - Metrics: reg, EventBus: bus, + Metrics: reg, })) - defer conn.Close() - conn.SetNetworkUp(false) - if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0); err == nil { - t.Error("expected error, got nil") - } - resp := httptest.NewRecorder() - reg.Handler(resp, new(http.Request)) - if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { - t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + return conn, reg, func() { + bus.Close() + netMon.Close() + conn.Close() } } +func TestNetworkSendErrors(t *testing.T) { + t.Run("network-down", func(t *testing.T) { + // TODO(alexc): This test case fails on Windows because it never + // successfully sends the first packet: + // + // expected successful Send, got err: "write udp4 0.0.0.0:57516->127.0.0.1:9999: + // wsasendto: The requested address is not valid in its context." + // + // It would be nice to run this test on Windows, but I was already + // on a side quest and it was unclear if this test has ever worked + // correctly on Windows. + if runtime.GOOS == "windows" { + t.Skipf("skipping on %s", runtime.GOOS) + } + + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + buffs := [][]byte{{00, 00, 00, 00, 00, 00, 00, 00}} + ep := &lazyEndpoint{ + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:9999")}, + } + offset := 8 + + // Check this is a valid payload to send when the network is up + conn.SetNetworkUp(true) + if err := conn.Send(buffs, ep, offset); err != nil { + t.Errorf("expected successful Send, got err: %q", err) + } + + // Now we know the payload would be sent if the network is up, + // send it again when the network is down + conn.SetNetworkUp(false) + err := conn.Send(buffs, ep, offset) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + } + }) + + t.Run("invalid-payload", func(t *testing.T) { + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + conn.SetNetworkUp(false) + err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected invalid payload to increment packet dropped metric; got %q", resp.Body.String()) + } + }) +} + func Test_packetLooksLike(t *testing.T) { discoPub := key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 30: 30, 31: 31})) nakedDisco := make([]byte, 0, 512) From 8b48f3847d91d9a309b9593dcd17d7fe6aae1291 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 15:49:56 -0700 Subject: [PATCH 1315/1708] net/netmon, wgengine/magicsock: simplify LinkChangeLogLimiter signature Remove the need for the caller to hold on to and call an unregister function. Both two callers (one real, one test) already have a context they can use. Use context.AfterFunc instead. There are no observable side effects from scheduling too late if the goroutine doesn't run sync. Updates #17148 Change-Id: Ie697dae0e797494fa8ef27fbafa193bfe5ceb307 Signed-off-by: Brad Fitzpatrick --- net/netmon/loghelper.go | 12 +++++++----- net/netmon/loghelper_test.go | 19 ++++++++++++++----- wgengine/magicsock/magicsock.go | 12 ++++-------- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 824faeef0..96991644c 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -4,6 +4,7 @@ package netmon import ( + "context" "sync" "tailscale.com/types/logger" @@ -12,16 +13,17 @@ import ( // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique // format string to the underlying logger only once per major LinkChange event. // -// The returned function should be called when the logger is no longer needed, -// to release resources from the Monitor. -func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregister func()) { +// The logger stops tracking seen format strings when the provided context is +// done. +func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - unregister = nm.RegisterChangeCallback(func(cd *ChangeDelta) { + unregister := nm.RegisterChangeCallback(func(cd *ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } }) + context.AfterFunc(ctx, unregister) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it @@ -38,5 +40,5 @@ func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregis } logf(format, args...) - }, unregister + } } diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 44aa46783..aeac9f031 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -5,13 +5,17 @@ package netmon import ( "bytes" + "context" "fmt" "testing" + "testing/synctest" "tailscale.com/util/eventbus" ) -func TestLinkChangeLogLimiter(t *testing.T) { +func TestLinkChangeLogLimiter(t *testing.T) { synctest.Test(t, syncTestLinkChangeLogLimiter) } + +func syncTestLinkChangeLogLimiter(t *testing.T) { bus := eventbus.New() defer bus.Close() mon, err := New(bus, t.Logf) @@ -30,8 +34,10 @@ func TestLinkChangeLogLimiter(t *testing.T) { fmt.Fprintf(&logBuffer, format, args...) } - logf, unregister := LinkChangeLogLimiter(logf, mon) - defer unregister() + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + logf = LinkChangeLogLimiter(ctx, logf, mon) // Log once, which should write to our log buffer. logf("hello %s", "world") @@ -72,8 +78,11 @@ func TestLinkChangeLogLimiter(t *testing.T) { t.Errorf("unexpected log buffer contents: %q", got) } - // Unregistering the callback should clear our 'cbs' set. - unregister() + // Canceling the context we passed to LinkChangeLogLimiter should + // unregister the callback from the netmon. + cancel() + synctest.Wait() + mon.mu.Lock() if len(mon.cbs) != 0 { t.Errorf("expected no callbacks, got %v", mon.cbs) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fa1f1f88f..36402122c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -209,10 +209,6 @@ type Conn struct { // port mappings from NAT devices. portMapper *portmapper.Client - // portMapperLogfUnregister is the function to call to unregister - // the portmapper log limiter. - portMapperLogfUnregister func() - // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. derpRecvCh chan derpReadResult @@ -748,10 +744,13 @@ func NewConn(opts Options) (*Conn, error) { c.subsDoneCh = make(chan struct{}) go c.consumeEventbusTopics() + c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) + c.donec = c.connCtx.Done() + // Don't log the same log messages possibly every few seconds in our // portmapper. portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") - portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon) + portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) portMapOpts := &portmapper.DebugKnobs{ DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, } @@ -772,8 +771,6 @@ func NewConn(opts Options) (*Conn, error) { return nil, err } - c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) - c.donec = c.connCtx.Done() c.netChecker = &netcheck.Client{ Logf: logger.WithPrefix(c.logf, "netcheck: "), NetMon: c.netMon, @@ -3330,7 +3327,6 @@ func (c *Conn) Close() error { } c.stopPeriodicReSTUNTimerLocked() c.portMapper.Close() - c.portMapperLogfUnregister() c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() From 24dd19c9a01235363f20b762fbf3b83a7d488313 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 15 Sep 2025 16:32:12 -0700 Subject: [PATCH 1316/1708] tstest/integration{/testcontrol}: add peer relay integration test (#17103) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 26 ++++- tstest/integration/integration.go | 18 ++- tstest/integration/integration_test.go | 103 ++++++++++++++++++ tstest/integration/testcontrol/testcontrol.go | 17 ++- 4 files changed, 155 insertions(+), 9 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index b90a62345..24304e8ec 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,9 +6,13 @@ package relayserver import ( + "log" + "net/netip" + "strings" "sync" "tailscale.com/disco" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -115,6 +119,26 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.handleBusLifetimeLocked() } +// overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It +// can be between 0 and 3 comma-separated Addrs. TS_DEBUG_RELAY_SERVER_ADDRS is +// not a stable interface, and is subject to change. +var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { + all := envknob.String("TS_DEBUG_RELAY_SERVER_ADDRS") + const max = 3 + remain := all + for remain != "" && len(ret) < max { + var s string + s, remain, _ = strings.Cut(remain, ",") + addr, err := netip.ParseAddr(s) + if err != nil { + log.Printf("ignoring invalid Addr %q in TS_DEBUG_RELAY_SERVER_ADDRS %q: %v", s, all, err) + continue + } + ret = append(ret, addr) + } + return +}) + func (e *extension) consumeEventbusTopics(port int) { defer close(e.busDoneCh) @@ -140,7 +164,7 @@ func (e *extension) consumeEventbusTopics(port int) { case req := <-reqSub.Events(): if rs == nil { var err error - rs, err = udprelay.NewServer(e.logf, port, nil) + rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) if err != nil { e.logf("error initializing server: %v", err) continue diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 987bb569a..b28ebaba1 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -480,11 +480,13 @@ func (lc *LogCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { // TestEnv contains the test environment (set of servers) used by one // or more nodes. type TestEnv struct { - t testing.TB - tunMode bool - cli string - daemon string - loopbackPort *int + t testing.TB + tunMode bool + cli string + daemon string + loopbackPort *int + neverDirectUDP bool + relayServerUseLoopback bool LogCatcher *LogCatcher LogCatcherServer *httptest.Server @@ -842,6 +844,12 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { if n.env.loopbackPort != nil { cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) } + if n.env.neverDirectUDP { + cmd.Env = append(cmd.Env, "TS_DEBUG_NEVER_DIRECT_UDP=1") + } + if n.env.relayServerUseLoopback { + cmd.Env = append(cmd.Env, "TS_DEBUG_RELAY_SERVER_ADDRS=::1,127.0.0.1") + } if version.IsRace() { cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index de464108c..b282adcf8 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -44,6 +44,7 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/must" + "tailscale.com/util/set" ) func TestMain(m *testing.M) { @@ -1530,3 +1531,105 @@ func TestEncryptStateMigration(t *testing.T) { runNode(t, wantPlaintextStateKeys) }) } + +// TestPeerRelayPing creates three nodes with one acting as a peer relay. +// The test succeeds when "tailscale ping" flows through the peer +// relay between all 3 nodes. +func TestPeerRelayPing(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl(func(server *testcontrol.Server) { + server.PeerRelayGrants = true + })) + env.neverDirectUDP = true + env.relayServerUseLoopback = true + + n1 := NewTestNode(t, env) + n2 := NewTestNode(t, env) + peerRelay := NewTestNode(t, env) + + allNodes := []*TestNode{n1, n2, peerRelay} + wantPeerRelayServers := make(set.Set[string]) + for _, n := range allNodes { + n.StartDaemon() + n.AwaitResponding() + n.MustUp() + wantPeerRelayServers.Add(n.AwaitIP4().String()) + n.AwaitRunning() + } + + if err := peerRelay.Tailscale("set", "--relay-server-port=0").Run(); err != nil { + t.Fatal(err) + } + + errCh := make(chan error) + for _, a := range allNodes { + go func() { + err := tstest.WaitFor(time.Second*5, func() error { + out, err := a.Tailscale("debug", "peer-relay-servers").CombinedOutput() + if err != nil { + return fmt.Errorf("debug peer-relay-servers failed: %v", err) + } + servers := make([]string, 0) + err = json.Unmarshal(out, &servers) + if err != nil { + return fmt.Errorf("failed to unmarshal debug peer-relay-servers: %v", err) + } + gotPeerRelayServers := make(set.Set[string]) + for _, server := range servers { + gotPeerRelayServers.Add(server) + } + if !gotPeerRelayServers.Equal(wantPeerRelayServers) { + return fmt.Errorf("got peer relay servers: %v want: %v", gotPeerRelayServers, wantPeerRelayServers) + } + return nil + }) + errCh <- err + }() + } + for range allNodes { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } + + pingPairs := make([][2]*TestNode, 0) + for _, a := range allNodes { + for _, z := range allNodes { + if a == z { + continue + } + pingPairs = append(pingPairs, [2]*TestNode{a, z}) + } + } + for _, pair := range pingPairs { + go func() { + a := pair[0] + z := pair[1] + err := tstest.WaitFor(time.Second*10, func() error { + remoteKey := z.MustStatus().Self.PublicKey + if err := a.Tailscale("ping", "--until-direct=false", "--c=1", "--timeout=1s", z.AwaitIP4().String()).Run(); err != nil { + return err + } + remotePeer, ok := a.MustStatus().Peer[remoteKey] + if !ok { + return fmt.Errorf("%v->%v remote peer not found", a.MustStatus().Self.ID, z.MustStatus().Self.ID) + } + if len(remotePeer.PeerRelay) == 0 { + return fmt.Errorf("%v->%v not using peer relay, curAddr=%v relay=%v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.CurAddr, remotePeer.Relay) + } + t.Logf("%v->%v using peer relay addr: %v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.PeerRelay) + return nil + }) + errCh <- err + }() + } + for range pingPairs { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 2fbf37de9..66d868aca 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -55,6 +55,10 @@ type Server struct { MagicDNSDomain string HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + // PeerRelayGrants, if true, inserts relay capabilities into the wildcard + // grants rules. + PeerRelayGrants bool + // AllNodesSameUser, if true, makes all created nodes // belong to the same user. AllNodesSameUser bool @@ -931,14 +935,21 @@ var keepAliveMsg = &struct { KeepAlive: true, } -func packetFilterWithIngressCaps() []tailcfg.FilterRule { +func packetFilterWithIngress(addRelayCaps bool) []tailcfg.FilterRule { out := slices.Clone(tailcfg.FilterAllowAll) + caps := []tailcfg.PeerCapability{ + tailcfg.PeerCapabilityIngress, + } + if addRelayCaps { + caps = append(caps, tailcfg.PeerCapabilityRelay) + caps = append(caps, tailcfg.PeerCapabilityRelayTarget) + } out = append(out, tailcfg.FilterRule{ SrcIPs: []string{"*"}, CapGrant: []tailcfg.CapGrant{ { Dsts: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - Caps: []tailcfg.PeerCapability{tailcfg.PeerCapabilityIngress}, + Caps: caps, }, }, }) @@ -977,7 +988,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, DERPMap: s.DERPMap, Domain: domain, CollectServices: "true", - PacketFilter: packetFilterWithIngressCaps(), + PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, } From 5ad3bd9f47ab631bf10d6f480e7c22850e7c024f Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 15 Sep 2025 19:45:07 -0400 Subject: [PATCH 1317/1708] flake.nix: fix go version (#17152) Bump to 1.25.1 to match go.mod Fixes #17150 Signed-off-by: Mike O'Driscoll --- flake.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 8cb5e078e..8f1fe026d 100644 --- a/flake.nix +++ b/flake.nix @@ -46,8 +46,8 @@ systems, flake-compat, }: let - go125Version = "1.25.0"; - goHash = "sha256-S9AekSlyB7+kUOpA1NWpOxtTGl5DhHOyoG4Y4HciciU="; + go125Version = "1.25.1"; + goHash = "sha256-0BDBCc7pTYDv5oHqtGvepJGskGv0ZYPDLp8NuwvRpZQ="; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { From 5b5ae2b2eea44f30ea4afe78f2176d1b3fcd4809 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 16 Sep 2025 07:44:08 -0700 Subject: [PATCH 1318/1708] util/eventbus: add a Done channel to the Client (#17118) Subscribers already have a Done channel that the caller can use to detect when the subscriber has been closed. Typically this happens when the governing Client closes, which in turn is typically because the Bus closed. But clients and subscribers can stop at other times too, and a caller has no good way to tell the difference between "this subscriber closed but the rest are OK" and "the client closed and all these subscribers are finished". We've worked around this in practice by knowing the closure of one subscriber implies the fate of the rest, but we can do better: Add a Done method to the Client that allows us to tell when that has been closed explicitly, after all the publishers and subscribers associated with that client have been closed. This allows the caller to be sure that, by the time that occurs, no further pending events are forthcoming on that client. Updates #15160 Change-Id: Id601a79ba043365ecdb47dd035f1fdadd984f303 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 45 ++++++++++++++++++++++++++++++++++++++- util/eventbus/client.go | 13 ++++++++--- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index e159b6a12..9fd0e4409 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -27,7 +27,16 @@ func TestBus(t *testing.T) { defer b.Close() c := b.Client("TestSub") - defer c.Close() + cdone := c.Done() + defer func() { + c.Close() + select { + case <-cdone: + t.Log("Client close signal received (OK)") + case <-time.After(time.Second): + t.Error("timed out waiting for client close signal") + } + }() s := eventbus.Subscribe[EventA](c) go func() { @@ -178,6 +187,40 @@ func TestSpam(t *testing.T) { // subsequences of the received slices. } +func TestClient_Done(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + s := eventbus.Subscribe[string](c) + + // The client is not Done until closed. + select { + case <-c.Done(): + t.Fatal("Client done before being closed") + default: + // OK + } + + go c.Close() + + // Once closed, the client becomes Done. + select { + case <-c.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Client to be done") + } + + // Thereafter, the subscriber should also be closed. + select { + case <-s.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timoeout waiting for Subscriber to be done") + } +} + type queueChecker struct { t *testing.T want []any diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a6266a4d8..176b6f2bc 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -21,9 +21,10 @@ type Client struct { bus *Bus publishDebug hook[PublishedEvent] - mu sync.Mutex - pub set.Set[publisher] - sub *subscribeState // Lazily created on first subscribe + mu sync.Mutex + pub set.Set[publisher] + sub *subscribeState // Lazily created on first subscribe + stop stopFlag // signaled on Close } func (c *Client) Name() string { return c.name } @@ -47,8 +48,14 @@ func (c *Client) Close() { for p := range pub { p.Close() } + c.stop.Stop() } +// Done returns a channel that is closed when [Client.Close] is called. +// The channel is closed after all the publishers and subscribers governed by +// the client have been closed. +func (c *Client) Done() <-chan struct{} { return c.stop.Done() } + func (c *Client) snapshotSubscribeQueue() []DeliveredEvent { return c.peekSubscribeState().snapshotQueue() } From 4cca9f7c673f0a3b027b28170bd218520875ea4c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 13 Sep 2025 20:20:08 -0700 Subject: [PATCH 1319/1708] all: add ts_omit_serve, start making tailscale serve/funnel be modular tailscaled tailscale combined (linux/amd64) 29853147 17384418 31412596 omitting everything + 621570 + 219277 + 554256 .. add serve Updates #17128 Change-Id: I87c2c6c3d3fc2dc026c3de8ef7000a813b41d31c Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 40 ---- client/local/serve.go | 55 +++++ cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/cli.go | 6 +- cmd/tailscale/cli/funnel.go | 45 ++++ cmd/tailscale/cli/serve_legacy.go | 6 + cmd/tailscale/cli/serve_v2.go | 2 + cmd/tailscale/cli/status.go | 41 +--- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + ipn/ipnlocal/c2n.go | 13 -- ipn/ipnlocal/local.go | 277 ++++--------------------- ipn/ipnlocal/peerapi.go | 69 +------ ipn/ipnlocal/serve.go | 332 ++++++++++++++++++++++++++++++ ipn/ipnlocal/serve_disabled.go | 34 +++ ipn/ipnlocal/serve_test.go | 2 + ipn/localapi/localapi.go | 86 -------- ipn/localapi/serve.go | 108 ++++++++++ tsnet/depaware.txt | 1 + wgengine/netstack/netstack.go | 20 +- 21 files changed, 651 insertions(+), 491 deletions(-) create mode 100644 client/local/serve.go create mode 100644 ipn/ipnlocal/serve_disabled.go create mode 100644 ipn/localapi/serve.go diff --git a/client/local/local.go b/client/local/local.go index 03ca10bb7..32e8208da 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1217,20 +1217,6 @@ func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.Ping return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } -// SetServeConfig sets or replaces the serving settings. -// If config is nil, settings are cleared and serving is disabled. -func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { - h := make(http.Header) - if config != nil { - h.Set("If-Match", config.ETag) - } - _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) - if err != nil { - return fmt.Errorf("sending serve config: %w", err) - } - return nil -} - // DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be // run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over // to another replica whilst there is still some grace period for the existing connections to terminate. @@ -1242,32 +1228,6 @@ func (lc *Client) DisconnectControl(ctx context.Context) error { return nil } -// GetServeConfig return the current serve config. -// -// If the serve config is empty, it returns (nil, nil). -func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { - body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) - if err != nil { - return nil, fmt.Errorf("getting serve config: %w", err) - } - sc, err := getServeConfigFromJSON(body) - if err != nil { - return nil, err - } - if sc == nil { - sc = new(ipn.ServeConfig) - } - sc.ETag = h.Get("Etag") - return sc, nil -} - -func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { - if err := json.Unmarshal(body, &sc); err != nil { - return nil, err - } - return sc, nil -} - // tailscaledConnectHint gives a little thing about why tailscaled (or // platform equivalent) is not answering localapi connections. // diff --git a/client/local/serve.go b/client/local/serve.go new file mode 100644 index 000000000..51d15e7e5 --- /dev/null +++ b/client/local/serve.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package local + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "tailscale.com/ipn" +) + +// GetServeConfig return the current serve config. +// +// If the serve config is empty, it returns (nil, nil). +func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { + body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) + if err != nil { + return nil, fmt.Errorf("getting serve config: %w", err) + } + sc, err := getServeConfigFromJSON(body) + if err != nil { + return nil, err + } + if sc == nil { + sc = new(ipn.ServeConfig) + } + sc.ETag = h.Get("Etag") + return sc, nil +} + +func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { + if err := json.Unmarshal(body, &sc); err != nil { + return nil, err + } + return sc, nil +} + +// SetServeConfig sets or replaces the serving settings. +// If config is nil, settings are cleared and serving is disabled. +func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { + h := make(http.Header) + if config != nil { + h.Set("If-Match", config.ETag) + } + _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) + if err != nil { + return fmt.Errorf("sending serve config: %w", err) + } + return nil +} diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d94b5b6cf..87bae60c8 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,6 +798,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index ef0dc9820..d039be607 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -213,6 +213,8 @@ var ( maybeWebCmd, maybeDriveCmd, maybeNetlockCmd, + maybeFunnelCmd, + maybeServeCmd, _ func() *ffcli.Command ) @@ -254,8 +256,8 @@ change in the future. pingCmd, ncCmd, sshCmd, - funnelCmd(), - serveCmd(), + nilOrCall(maybeFunnelCmd), + nilOrCall(maybeServeCmd), versionCmd, nilOrCall(maybeWebCmd), nilOrCall(fileCmd), diff --git a/cmd/tailscale/cli/funnel.go b/cmd/tailscale/cli/funnel.go index f4a1c6bfd..34b0c74c2 100644 --- a/cmd/tailscale/cli/funnel.go +++ b/cmd/tailscale/cli/funnel.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -16,6 +18,10 @@ import ( "tailscale.com/tailcfg" ) +func init() { + maybeFunnelCmd = funnelCmd +} + var funnelCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true @@ -174,3 +180,42 @@ func printFunnelWarning(sc *ipn.ServeConfig) { fmt.Fprintf(Stderr, " run: `tailscale serve --help` to see how to configure handlers\n") } } + +func init() { + hookPrintFunnelStatus.Set(printFunnelStatus) +} + +// printFunnelStatus prints the status of the funnel, if it's running. +// It prints nothing if the funnel is not running. +func printFunnelStatus(ctx context.Context) { + sc, err := localClient.GetServeConfig(ctx) + if err != nil { + outln() + printf("# Funnel:\n") + printf("# - Unable to get Funnel status: %v\n", err) + return + } + if !sc.IsFunnelOn() { + return + } + outln() + printf("# Funnel on:\n") + for hp, on := range sc.AllowFunnel { + if !on { // if present, should be on + continue + } + sni, portStr, _ := net.SplitHostPort(string(hp)) + p, _ := strconv.ParseUint(portStr, 10, 16) + isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) + url := "https://" + if isTCP { + url = "tcp://" + } + url += sni + if isTCP || p != 443 { + url += ":" + portStr + } + printf("# - %s\n", url) + } + outln() +} diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 3fbddeabf..b60e9833b 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -31,6 +33,10 @@ import ( "tailscale.com/version" ) +func init() { + maybeServeCmd = serveCmd +} + var serveCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 903036db4..058d80649 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 726606109..97f6708db 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -15,12 +15,12 @@ import ( "net/http" "net/netip" "os" - "strconv" "strings" "github.com/peterbourgon/ff/v3/ffcli" "github.com/toqueteos/webbrowser" "golang.org/x/net/idna" + "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netmon" @@ -238,44 +238,13 @@ func runStatus(ctx context.Context, args []string) error { outln() printHealth() } - printFunnelStatus(ctx) + if f, ok := hookPrintFunnelStatus.GetOk(); ok { + f(ctx) + } return nil } -// printFunnelStatus prints the status of the funnel, if it's running. -// It prints nothing if the funnel is not running. -func printFunnelStatus(ctx context.Context) { - sc, err := localClient.GetServeConfig(ctx) - if err != nil { - outln() - printf("# Funnel:\n") - printf("# - Unable to get Funnel status: %v\n", err) - return - } - if !sc.IsFunnelOn() { - return - } - outln() - printf("# Funnel on:\n") - for hp, on := range sc.AllowFunnel { - if !on { // if present, should be on - continue - } - sni, portStr, _ := net.SplitHostPort(string(hp)) - p, _ := strconv.ParseUint(portStr, 10, 16) - isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) - url := "https://" - if isTCP { - url = "tcp://" - } - url += sni - if isTCP || p != 443 { - url += ":" + portStr - } - printf("# - %s\n", url) - } - outln() -} +var hookPrintFunnelStatus feature.Hook[func(context.Context)] // isRunningOrStarting reports whether st is in state Running or Starting. // It also returns a description of the status suitable to display to a user. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index a983f1c09..a39363353 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -104,7 +104,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a0842b45b..736c268dc 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -272,6 +272,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/drive from tailscale.com/feature/condregister diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index cfe44d1dc..c9cd12d41 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -239,6 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 2c13f0619..b5f50f3bc 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -72,9 +72,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // Linux netfilter. req("POST /netfilter-kind"): handleC2NSetNetfilterKind, - - // VIP services. - req("GET /vip-services"): handleC2NVIPServicesGet, } // RegisterC2N registers a new c2n handler for the given pattern. @@ -280,16 +277,6 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } -func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /vip-services received") - var res tailcfg.C2NVIPServicesResponse - res.VIPServices = b.VIPServices() - res.ServicesHash = b.vipServiceHash(res.VIPServices) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: GET /update received") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c98a0810d..6d92e58d0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -18,7 +18,6 @@ import ( "fmt" "io" "log" - "maps" "math" "math/rand/v2" "net" @@ -53,6 +52,7 @@ import ( "tailscale.com/envknob" "tailscale.com/envknob/featureknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -585,7 +585,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetJailedFilter(noneFilter) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsIntercepted(nil) b.statusChanged = sync.NewCond(&b.statusLock) b.e.SetStatusCallback(b.setWgengineStatus) @@ -3759,46 +3758,6 @@ func generateInterceptVIPServicesTCPPortFunc(svcAddrPorts map[netip.Addr]func(ui } } -// setVIPServicesTCPPortsIntercepted populates b.shouldInterceptVIPServicesTCPPortAtomic with an -// efficient func for ShouldInterceptTCPPort to use, which is called on every incoming packet. -func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[tailcfg.ServiceName][]uint16) { - b.mu.Lock() - defer b.mu.Unlock() - b.setVIPServicesTCPPortsInterceptedLocked(svcPorts) -} - -func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { - if len(svcPorts) == 0 { - b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) - return - } - nm := b.currentNode().NetMap() - if nm == nil { - b.logf("can't set intercept function for Service TCP Ports, netMap is nil") - return - } - vipServiceIPMap := nm.GetVIPServiceIPMap() - if len(vipServiceIPMap) == 0 { - // No approved VIP Services - return - } - - svcAddrPorts := make(map[netip.Addr]func(uint16) bool) - // Only set the intercept function if the service has been assigned a VIP. - for svcName, ports := range svcPorts { - addrs, ok := vipServiceIPMap[svcName] - if !ok { - continue - } - interceptFn := generateInterceptTCPPortFunc(ports) - for _, addr := range addrs { - svcAddrPorts[addr] = interceptFn - } - } - - b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) -} - // setAtomicValuesFromPrefsLocked populates sshAtomicBool, containsViaIPFuncAtomic, // shouldInterceptTCPPortAtomic, and exposeRemoteWebClientAtomicBool from the prefs p, // which may be !Valid(). @@ -3809,7 +3768,9 @@ func (b *LocalBackend) setAtomicValuesFromPrefsLocked(p ipn.PrefsView) { if !p.Valid() { b.containsViaIPFuncAtomic.Store(ipset.FalseContainsIPFunc()) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsInterceptedLocked(nil) + if f, ok := hookServeClearVIPServicesTCPPortsInterceptedLocked.GetOk(); ok { + f(b) + } b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} } else { @@ -4738,32 +4699,6 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { return nil } -// wantIngressLocked reports whether this node has ingress configured. This bool -// is sent to the coordination server (in Hostinfo.WireIngress) as an -// optimization hint to know primarily which nodes are NOT using ingress, to -// avoid doing work for regular nodes. -// -// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw -// mode and contains map entries with false values, sending true (from Len > 0) -// is still fine. This is only an optimization hint for the control plane and -// doesn't affect security or correctness. And we also don't expect people to -// modify their ServeConfig in raw mode. -func (b *LocalBackend) wantIngressLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() -} - -// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in -// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. -func (b *LocalBackend) hasIngressEnabledLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() -} - -// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it -// seems that it is intended to be used with funnel. -func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { - return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() -} - // setPrefsLockedOnEntry requires b.mu be held to call it, but it // unlocks b.mu when done. newp ownership passes to this function. // It returns a read-only copy of the new prefs. @@ -4907,6 +4842,16 @@ var ( magicDNSIPv6 = tsaddr.TailscaleServiceIPv6() ) +// Hook exclusively for serve. +var ( + hookServeTCPHandlerForVIPService feature.Hook[func(b *LocalBackend, dst netip.AddrPort, src netip.AddrPort) (handler func(c net.Conn) error)] + hookTCPHandlerForServe feature.Hook[func(b *LocalBackend, dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error)] + hookServeUpdateServeTCPPortNetMapAddrListenersLocked feature.Hook[func(b *LocalBackend, ports []uint16)] + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked feature.Hook[func(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16)] + hookServeClearVIPServicesTCPPortsInterceptedLocked feature.Hook[func(*LocalBackend)] +) + // TCPHandlerForDst returns a TCP handler for connections to dst, or nil if // no handler is needed. It also returns a list of TCP socket options to // apply to the socket before calling the handler. @@ -4929,10 +4874,10 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c } } - // TODO(tailscale/corp#26001): Get handler for VIP services and Local IPs using - // the same function. - if handler := b.tcpHandlerForVIPService(dst, src); handler != nil { - return handler, opts + if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { + if handler := f(b, dst, src); handler != nil { + return handler, opts + } } // Then handle external connections to the local IP. if !b.isLocalIP(dst.Addr()) { @@ -4958,8 +4903,10 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c return nil }, opts } - if handler := b.tcpHandlerForServe(dst.Port(), src, nil); handler != nil { - return handler, opts + if f, ok := hookTCPHandlerForServe.GetOk(); ok { + if handler := f(b, dst.Port(), src, nil); handler != nil { + return handler, opts + } } return nil, nil } @@ -6341,7 +6288,9 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) - b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + if buildfeatures.HasServe { + b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + } if !oldSelf.Equal(nm.SelfNodeOrZero()) { for _, f := range b.extHost.Hooks().OnSelfChange { @@ -6411,55 +6360,12 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { } } -// reloadServeConfigLocked reloads the serve config from the store or resets the -// serve config to nil if not logged in. The "changed" parameter, when false, instructs -// the method to only run the reset-logic and not reload the store from memory to ensure -// foreground sessions are not removed if they are not saved on disk. -func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { - // We're not logged in, so we don't have a profile. - // Don't try to load the serve config. - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - - confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) - // TODO(maisem,bradfitz): prevent reading the config from disk - // if the profile has not changed. - confj, err := b.store.ReadState(confKey) - if err != nil { - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - if b.lastServeConfJSON.Equal(mem.B(confj)) { - return - } - b.lastServeConfJSON = mem.B(confj) - var conf ipn.ServeConfig - if err := json.Unmarshal(confj, &conf); err != nil { - b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) - b.serveConfig = ipn.ServeConfigView{} - return - } - - // remove inactive sessions - maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { - _, ok := b.notifyWatchers[sessionID] - return !ok - }) - - b.serveConfig = conf.View() -} - // setTCPPortsInterceptedFromNetmapAndPrefsLocked calls setTCPPortsIntercepted with // the ports that tailscaled should handle as a function of b.netMap and b.prefs. // // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) - var vipServicesPorts map[tailcfg.ServiceName][]uint16 if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -6473,42 +6379,14 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } } - b.reloadServeConfigLocked(prefs) - if b.serveConfig.Valid() { - servePorts := make([]uint16, 0, 3) - for port := range b.serveConfig.TCPs() { - if port > 0 { - servePorts = append(servePorts, uint16(port)) - } - } - handlePorts = append(handlePorts, servePorts...) - - for svc, cfg := range b.serveConfig.Services().All() { - servicePorts := make([]uint16, 0, 3) - for port := range cfg.TCP().All() { - if port > 0 { - servicePorts = append(servicePorts, uint16(port)) - } - } - if _, ok := vipServicesPorts[svc]; !ok { - mak.Set(&vipServicesPorts, svc, servicePorts) - } else { - mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) - } - } - - b.setServeProxyHandlersLocked() - - // don't listen on netmap addresses if we're in userspace mode - if !b.sys.IsNetstack() { - b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) - } + if f, ok := hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.GetOk(); ok { + v := f(b, prefs) + handlePorts = append(handlePorts, v...) } // Update funnel and service hash info in hostinfo and kick off control update if needed. b.updateIngressAndServiceHashLocked(prefs) b.setTCPPortsIntercepted(handlePorts) - b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) } // updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and @@ -6541,51 +6419,6 @@ func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { } } -// setServeProxyHandlersLocked ensures there is an http proxy handler for each -// backend specified in serveConfig. It expects serveConfig to be valid and -// up-to-date, so should be called after reloadServeConfigLocked. -func (b *LocalBackend) setServeProxyHandlersLocked() { - if !b.serveConfig.Valid() { - return - } - var backends map[string]bool - for _, conf := range b.serveConfig.Webs() { - for _, h := range conf.Handlers().All() { - backend := h.Proxy() - if backend == "" { - // Only create proxy handlers for servers with a proxy backend. - continue - } - mak.Set(&backends, backend, true) - if _, ok := b.serveProxyHandlers.Load(backend); ok { - continue - } - - b.logf("serve: creating a new proxy handler for %s", backend) - p, err := b.proxyHandlerForBackend(backend) - if err != nil { - // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget - // in the CLI, so just log the error here. - b.logf("[unexpected] could not create proxy for %v: %s", backend, err) - continue - } - b.serveProxyHandlers.Store(backend, p) - } - } - - // Clean up handlers for proxy backends that are no longer present - // in configuration. - b.serveProxyHandlers.Range(func(key, value any) bool { - backend := key.(string) - if !backends[backend] { - b.logf("serve: closing idle connections to %s", backend) - b.serveProxyHandlers.Delete(backend) - value.(*reverseProxy).close() - } - return true - }) -} - // operatorUserName returns the current pref's OperatorUser's name, or the // empty string if none. func (b *LocalBackend) operatorUserName() string { @@ -7196,7 +7029,14 @@ func (b *LocalBackend) ShouldInterceptTCPPort(port uint16) bool { // ShouldInterceptVIPServiceTCPPort reports whether the given TCP port number // to a VIP service should be intercepted by Tailscaled and handled in-process. func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool { - return b.shouldInterceptVIPServicesTCPPortAtomic.Load()(ap) + if !buildfeatures.HasServe { + return false + } + f := b.shouldInterceptVIPServicesTCPPortAtomic.Load() + if f == nil { + return false + } + return f(ap) } // SwitchProfile switches to the profile with the given id. @@ -8131,15 +7971,6 @@ func maybeUsernameOf(actor ipnauth.Actor) string { return username } -// VIPServices returns the list of tailnet services that this node -// is serving as a destination for. -// The returned memory is owned by the caller. -func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { - b.mu.Lock() - defer b.mu.Unlock() - return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) -} - func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { if len(services) == 0 { return "" @@ -8153,39 +7984,9 @@ func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { return hex.EncodeToString(hash[:]) } -func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { - // keyed by service name - var services map[tailcfg.ServiceName]*tailcfg.VIPService - if b.serveConfig.Valid() { - for svc, config := range b.serveConfig.Services().All() { - mak.Set(&services, svc, &tailcfg.VIPService{ - Name: svc, - Ports: config.ServicePortRange(), - }) - } - } - - for _, s := range prefs.AdvertiseServices().All() { - sn := tailcfg.ServiceName(s) - if services == nil || services[sn] == nil { - mak.Set(&services, sn, &tailcfg.VIPService{ - Name: sn, - }) - } - services[sn].Active = true - } - - servicesList := slicesx.MapValues(services) - // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll - // be hashing a representation of this list later we want it to be in a consistent - // order. - slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { - return strings.Compare(a.Name.String(), b.Name.String()) - }) - return servicesList -} - -var metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") +var ( + metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") +) func (b *LocalBackend) stateEncrypted() opt.Bool { switch runtime.GOOS { diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 23c349087..886a71291 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -28,7 +28,6 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" - "tailscale.com/ipn" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/net/netutil" @@ -387,10 +386,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "/v0/sockstats": h.handleServeSockStats(w, r) return - case "/v0/ingress": - metricIngressCalls.Add(1) - h.handleServeIngress(w, r) - return } if ph, ok := peerAPIHandlers[r.URL.Path]; ok { ph(h, w, r) @@ -413,67 +408,6 @@ This is my Tailscale device. Your device is %v. } } -func (h *peerAPIHandler) handleServeIngress(w http.ResponseWriter, r *http.Request) { - // http.Errors only useful if hitting endpoint manually - // otherwise rely on log lines when debugging ingress connections - // as connection is hijacked for bidi and is encrypted tls - if !h.canIngress() { - h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) - http.Error(w, "denied; no ingress cap", http.StatusForbidden) - return - } - logAndError := func(code int, publicMsg string) { - h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) - http.Error(w, publicMsg, code) - } - bad := func(publicMsg string) { - logAndError(http.StatusBadRequest, publicMsg) - } - if r.Method != "POST" { - logAndError(http.StatusMethodNotAllowed, "only POST allowed") - return - } - srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") - if srcAddrStr == "" { - bad("Tailscale-Ingress-Src header not set") - return - } - srcAddr, err := netip.ParseAddrPort(srcAddrStr) - if err != nil { - bad("Tailscale-Ingress-Src header invalid; want ip:port") - return - } - target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) - if target == "" { - bad("Tailscale-Ingress-Target header not set") - return - } - if _, _, err := net.SplitHostPort(string(target)); err != nil { - bad("Tailscale-Ingress-Target header invalid; want host:port") - return - } - - getConnOrReset := func() (net.Conn, bool) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - h.logf("ingress: failed hijacking conn") - http.Error(w, "failed hijacking conn", http.StatusInternalServerError) - return nil, false - } - io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") - return &ipn.FunnelConn{ - Conn: conn, - Src: srcAddr, - Target: target, - }, true - } - sendRST := func() { - http.Error(w, "denied", http.StatusForbidden) - } - - h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) -} - func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -1099,6 +1033,5 @@ var ( metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests") // Non-debug PeerAPI endpoints. - metricDNSCalls = clientmetric.NewCounter("peerapi_dns") - metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + metricDNSCalls = clientmetric.NewCounter("peerapi_dns") ) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 36738b881..cbf84fb29 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1,6 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + +// TODO: move this whole file to its own package, out of ipnlocal. + package ipnlocal import ( @@ -12,6 +16,7 @@ import ( "errors" "fmt" "io" + "maps" "mime" "net" "net/http" @@ -28,6 +33,7 @@ import ( "time" "unicode/utf8" + "go4.org/mem" "golang.org/x/net/http2" "tailscale.com/ipn" "tailscale.com/logtail/backoff" @@ -36,11 +42,26 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" "tailscale.com/util/mak" + "tailscale.com/util/slicesx" "tailscale.com/version" ) +func init() { + hookServeTCPHandlerForVIPService.Set((*LocalBackend).tcpHandlerForVIPService) + hookTCPHandlerForServe.Set((*LocalBackend).tcpHandlerForServe) + hookServeUpdateServeTCPPortNetMapAddrListenersLocked.Set((*LocalBackend).updateServeTCPPortNetMapAddrListenersLocked) + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.Set(serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked) + hookServeClearVIPServicesTCPPortsInterceptedLocked.Set(func(b *LocalBackend) { + b.setVIPServicesTCPPortsInterceptedLocked(nil) + }) + + RegisterC2N("GET /vip-services", handleC2NVIPServicesGet) +} + const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" @@ -222,6 +243,10 @@ func (s *localListener) handleListenersAccept(ln net.Listener) error { // // b.mu must be held. func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint16) { + if b.sys.IsNetstack() { + // don't listen on netmap addresses if we're in userspace mode + return + } // close existing listeners where port // is no longer in incoming ports list for ap, sl := range b.serveListeners { @@ -439,6 +464,38 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target handler(c) } +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + // keyed by service name + var services map[tailcfg.ServiceName]*tailcfg.VIPService + if b.serveConfig.Valid() { + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), + }) + } + } + + for _, s := range prefs.AdvertiseServices().All() { + sn := tailcfg.ServiceName(s) + if services == nil || services[sn] == nil { + mak.Set(&services, sn, &tailcfg.VIPService{ + Name: sn, + }) + } + services[sn].Active = true + } + + servicesList := slicesx.MapValues(services) + // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll + // be hashing a representation of this list later we want it to be in a consistent + // order. + slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name.String(), b.Name.String()) + }) + return servicesList +} + // tcpHandlerForVIPService returns a handler for a TCP connection to a VIP service // that is being served via the ipn.ServeConfig. It returns nil if the destination // address is not a VIP service or if the VIP service does not have a TCP handler set. @@ -1046,3 +1103,278 @@ func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService tailcfg return &cert, nil } } + +// setServeProxyHandlersLocked ensures there is an http proxy handler for each +// backend specified in serveConfig. It expects serveConfig to be valid and +// up-to-date, so should be called after reloadServeConfigLocked. +func (b *LocalBackend) setServeProxyHandlersLocked() { + if !b.serveConfig.Valid() { + return + } + var backends map[string]bool + for _, conf := range b.serveConfig.Webs() { + for _, h := range conf.Handlers().All() { + backend := h.Proxy() + if backend == "" { + // Only create proxy handlers for servers with a proxy backend. + continue + } + mak.Set(&backends, backend, true) + if _, ok := b.serveProxyHandlers.Load(backend); ok { + continue + } + + b.logf("serve: creating a new proxy handler for %s", backend) + p, err := b.proxyHandlerForBackend(backend) + if err != nil { + // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget + // in the CLI, so just log the error here. + b.logf("[unexpected] could not create proxy for %v: %s", backend, err) + continue + } + b.serveProxyHandlers.Store(backend, p) + } + } + + // Clean up handlers for proxy backends that are no longer present + // in configuration. + b.serveProxyHandlers.Range(func(key, value any) bool { + backend := key.(string) + if !backends[backend] { + b.logf("serve: closing idle connections to %s", backend) + b.serveProxyHandlers.Delete(backend) + value.(*reverseProxy).close() + } + return true + }) +} + +// VIPServices returns the list of tailnet services that this node +// is serving as a destination for. +// The returned memory is owned by the caller. +func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { + b.mu.Lock() + defer b.mu.Unlock() + return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) +} + +func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + b.logf("c2n: GET /vip-services received") + var res tailcfg.C2NVIPServicesResponse + res.VIPServices = b.VIPServices() + res.ServicesHash = b.vipServiceHash(res.VIPServices) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +var metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + +func init() { + RegisterPeerAPIHandler("/v0/ingress", handleServeIngress) + +} + +func handleServeIngress(ph PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := ph.(*peerAPIHandler) + metricIngressCalls.Add(1) + + // http.Errors only useful if hitting endpoint manually + // otherwise rely on log lines when debugging ingress connections + // as connection is hijacked for bidi and is encrypted tls + if !h.canIngress() { + h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) + http.Error(w, "denied; no ingress cap", http.StatusForbidden) + return + } + logAndError := func(code int, publicMsg string) { + h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) + http.Error(w, publicMsg, code) + } + bad := func(publicMsg string) { + logAndError(http.StatusBadRequest, publicMsg) + } + if r.Method != "POST" { + logAndError(http.StatusMethodNotAllowed, "only POST allowed") + return + } + srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") + if srcAddrStr == "" { + bad("Tailscale-Ingress-Src header not set") + return + } + srcAddr, err := netip.ParseAddrPort(srcAddrStr) + if err != nil { + bad("Tailscale-Ingress-Src header invalid; want ip:port") + return + } + target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) + if target == "" { + bad("Tailscale-Ingress-Target header not set") + return + } + if _, _, err := net.SplitHostPort(string(target)); err != nil { + bad("Tailscale-Ingress-Target header invalid; want host:port") + return + } + + getConnOrReset := func() (net.Conn, bool) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + h.logf("ingress: failed hijacking conn") + http.Error(w, "failed hijacking conn", http.StatusInternalServerError) + return nil, false + } + io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") + return &ipn.FunnelConn{ + Conn: conn, + Src: srcAddr, + Target: target, + }, true + } + sendRST := func() { + http.Error(w, "denied", http.StatusForbidden) + } + + h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) +} + +// wantIngressLocked reports whether this node has ingress configured. This bool +// is sent to the coordination server (in Hostinfo.WireIngress) as an +// optimization hint to know primarily which nodes are NOT using ingress, to +// avoid doing work for regular nodes. +// +// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw +// mode and contains map entries with false values, sending true (from Len > 0) +// is still fine. This is only an optimization hint for the control plane and +// doesn't affect security or correctness. And we also don't expect people to +// modify their ServeConfig in raw mode. +func (b *LocalBackend) wantIngressLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() +} + +// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in +// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. +func (b *LocalBackend) hasIngressEnabledLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() +} + +// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it +// seems that it is intended to be used with funnel. +func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { + return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() +} + +func serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16) { + var vipServicesPorts map[tailcfg.ServiceName][]uint16 + + b.reloadServeConfigLocked(prefs) + if b.serveConfig.Valid() { + servePorts := make([]uint16, 0, 3) + for port := range b.serveConfig.TCPs() { + if port > 0 { + servePorts = append(servePorts, uint16(port)) + } + } + handlePorts = append(handlePorts, servePorts...) + + for svc, cfg := range b.serveConfig.Services().All() { + servicePorts := make([]uint16, 0, 3) + for port := range cfg.TCP().All() { + if port > 0 { + servicePorts = append(servicePorts, uint16(port)) + } + } + if _, ok := vipServicesPorts[svc]; !ok { + mak.Set(&vipServicesPorts, svc, servicePorts) + } else { + mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) + } + } + + b.setServeProxyHandlersLocked() + + // don't listen on netmap addresses if we're in userspace mode + if !b.sys.IsNetstack() { + b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) + } + } + + b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) + + return handlePorts +} + +// reloadServeConfigLocked reloads the serve config from the store or resets the +// serve config to nil if not logged in. The "changed" parameter, when false, instructs +// the method to only run the reset-logic and not reload the store from memory to ensure +// foreground sessions are not removed if they are not saved on disk. +func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { + if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { + // We're not logged in, so we don't have a profile. + // Don't try to load the serve config. + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + + confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) + // TODO(maisem,bradfitz): prevent reading the config from disk + // if the profile has not changed. + confj, err := b.store.ReadState(confKey) + if err != nil { + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + if b.lastServeConfJSON.Equal(mem.B(confj)) { + return + } + b.lastServeConfJSON = mem.B(confj) + var conf ipn.ServeConfig + if err := json.Unmarshal(confj, &conf); err != nil { + b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) + b.serveConfig = ipn.ServeConfigView{} + return + } + + // remove inactive sessions + maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { + _, ok := b.notifyWatchers[sessionID] + return !ok + }) + + b.serveConfig = conf.View() +} + +func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { + if len(svcPorts) == 0 { + b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) + return + } + nm := b.currentNode().NetMap() + if nm == nil { + b.logf("can't set intercept function for Service TCP Ports, netMap is nil") + return + } + vipServiceIPMap := nm.GetVIPServiceIPMap() + if len(vipServiceIPMap) == 0 { + // No approved VIP Services + return + } + + svcAddrPorts := make(map[netip.Addr]func(uint16) bool) + // Only set the intercept function if the service has been assigned a VIP. + for svcName, ports := range svcPorts { + addrs, ok := vipServiceIPMap[svcName] + if !ok { + continue + } + interceptFn := generateInterceptTCPPortFunc(ports) + for _, addr := range addrs { + svcAddrPorts[addr] = interceptFn + } + } + + b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) +} diff --git a/ipn/ipnlocal/serve_disabled.go b/ipn/ipnlocal/serve_disabled.go new file mode 100644 index 000000000..a97112941 --- /dev/null +++ b/ipn/ipnlocal/serve_disabled.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_serve + +// These are temporary (2025-09-13) stubs for when tailscaled is built with the +// ts_omit_serve build tag, disabling serve. +// +// TODO: move serve to a separate package, out of ipnlocal, and delete this +// file. One step at a time. + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +const serveEnabled = false + +type localListener = struct{} + +func (b *LocalBackend) DeleteForegroundSession(sessionID string) error { + return nil +} + +type funnelFlow = struct{} + +func (*LocalBackend) hasIngressEnabledLocked() bool { return false } +func (*LocalBackend) shouldWireInactiveIngressLocked() bool { return false } + +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + return nil +} diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 86b56ab4b..d18ee4db9 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package ipnlocal import ( diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ac5b0ee7d..7e54cef85 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -8,8 +8,6 @@ import ( "bytes" "cmp" "context" - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -112,7 +110,6 @@ var handler = map[string]LocalAPIHandler{ "query-feature": (*Handler).serveQueryFeature, "reload-config": (*Handler).reloadConfig, "reset-auth": (*Handler).serveResetAuth, - "serve-config": (*Handler).serveServeConfig, "set-dns": (*Handler).serveSetDNS, "set-expiry-sooner": (*Handler).serveSetExpirySooner, "set-gui-visible": (*Handler).serveSetGUIVisible, @@ -1209,89 +1206,6 @@ func (h *Handler) serveResetAuth(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } -func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case httpm.GET: - if !h.PermitRead { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - config := h.b.ServeConfig() - bts, err := json.Marshal(config) - if err != nil { - http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) - return - } - sum := sha256.Sum256(bts) - etag := hex.EncodeToString(sum[:]) - w.Header().Set("Etag", etag) - w.Header().Set("Content-Type", "application/json") - w.Write(bts) - case httpm.POST: - if !h.PermitWrite { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - configIn := new(ipn.ServeConfig) - if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { - WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) - return - } - - // require a local admin when setting a path handler - // TODO: roll-up this Windows-specific check into either PermitWrite - // or a global admin escalation check. - if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - etag := r.Header.Get("If-Match") - if err := h.b.SetServeConfig(configIn, etag); err != nil { - if errors.Is(err, ipnlocal.ErrETagMismatch) { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) - return - } - w.WriteHeader(http.StatusOK) - default: - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - } -} - -func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { - switch goos { - case "windows", "linux", "darwin", "illumos", "solaris": - default: - return nil - } - // Only check for local admin on tailscaled-on-mac (based on "sudo" - // permissions). On sandboxed variants (MacSys and AppStore), tailscaled - // cannot serve files outside of the sandbox and this check is not - // relevant. - if goos == "darwin" && version.IsSandboxedMacOS() { - return nil - } - if !configIn.HasPathHandler() { - return nil - } - if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { - return nil - } - switch goos { - case "windows": - return errors.New("must be a Windows local admin to serve a path") - case "linux", "darwin", "illumos", "solaris": - return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") - default: - // We filter goos at the start of the func, this default case - // should never happen. - panic("unreachable") - } -} - func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "IP forwarding check access denied", http.StatusForbidden) diff --git a/ipn/localapi/serve.go b/ipn/localapi/serve.go new file mode 100644 index 000000000..56c8b486c --- /dev/null +++ b/ipn/localapi/serve.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package localapi + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "runtime" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/util/httpm" + "tailscale.com/version" +) + +func init() { + Register("serve-config", (*Handler).serveServeConfig) +} + +func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case httpm.GET: + if !h.PermitRead { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + config := h.b.ServeConfig() + bts, err := json.Marshal(config) + if err != nil { + http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) + return + } + sum := sha256.Sum256(bts) + etag := hex.EncodeToString(sum[:]) + w.Header().Set("Etag", etag) + w.Header().Set("Content-Type", "application/json") + w.Write(bts) + case httpm.POST: + if !h.PermitWrite { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + configIn := new(ipn.ServeConfig) + if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { + WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) + return + } + + // require a local admin when setting a path handler + // TODO: roll-up this Windows-specific check into either PermitWrite + // or a global admin escalation check. + if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + etag := r.Header.Get("If-Match") + if err := h.b.SetServeConfig(configIn, etag); err != nil { + if errors.Is(err, ipnlocal.ErrETagMismatch) { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) + return + } + w.WriteHeader(http.StatusOK) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { + switch goos { + case "windows", "linux", "darwin", "illumos", "solaris": + default: + return nil + } + // Only check for local admin on tailscaled-on-mac (based on "sudo" + // permissions). On sandboxed variants (MacSys and AppStore), tailscaled + // cannot serve files outside of the sandbox and this check is not + // relevant. + if goos == "darwin" && version.IsSandboxedMacOS() { + return nil + } + if !configIn.HasPathHandler() { + return nil + } + if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { + return nil + } + switch goos { + case "windows": + return errors.New("must be a Windows local admin to serve a path") + case "linux", "darwin", "illumos", "solaris": + return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") + default: + // We filter goos at the start of the func, this default case + // should never happen. + panic("unreachable") + } +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 74f3f8c53..9b93ce8db 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -235,6 +235,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index d97c66946..7381c515a 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -33,6 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnlocal" "tailscale.com/metrics" "tailscale.com/net/dns" @@ -643,13 +644,15 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { var selfNode tailcfg.NodeView var serviceAddrSet set.Set[netip.Addr] if nm != nil { - vipServiceIPMap := nm.GetVIPServiceIPMap() - serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) - for _, addrs := range vipServiceIPMap { - serviceAddrSet.AddSlice(addrs) - } ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses())) - ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + if buildfeatures.HasServe { + vipServiceIPMap := nm.GetVIPServiceIPMap() + serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) + for _, addrs := range vipServiceIPMap { + serviceAddrSet.AddSlice(addrs) + } + ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + } selfNode = nm.SelfNode } else { ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) @@ -1032,6 +1035,9 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool { // isVIPServiceIP reports whether ip is an IP address that's // assigned to a VIP service. func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool { + if !buildfeatures.HasServe { + return false + } return ns.atomicIsVIPServiceIPFunc.Load()(ip) } @@ -1074,7 +1080,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { return true } } - if isService { + if buildfeatures.HasServe && isService { if p.IsEchoRequest() { return true } From 2015ce40814dd175f7d441c83d7517a2128b37e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 16 Sep 2025 11:25:29 -0400 Subject: [PATCH 1320/1708] health,ipn/ipnlocal: introduce eventbus in heath.Tracker (#17085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Tracker was using direct callbacks to ipnlocal. This PR moves those to be triggered via the eventbus. Additionally, the eventbus is now closed on exit from tailscaled explicitly, and health is now a SubSystem in tsd. Updates #15160 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/debug.go | 4 +- cmd/tailscaled/tailscaled.go | 14 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- control/controlclient/controlclient_test.go | 4 +- control/controlclient/map_test.go | 5 +- control/controlhttp/http_test.go | 5 +- health/health.go | 46 +++ health/health_test.go | 323 +++++++++++++------- ipn/ipnlocal/extension_host_test.go | 3 +- ipn/ipnlocal/local.go | 17 +- ipn/ipnlocal/local_test.go | 8 +- ipn/ipnlocal/loglines_test.go | 2 +- ipn/ipnlocal/network-lock_test.go | 39 +-- ipn/ipnlocal/peerapi_test.go | 21 +- ipn/ipnlocal/profiles.go | 5 +- ipn/ipnlocal/profiles_test.go | 25 +- ipn/ipnlocal/serve_test.go | 4 +- ipn/ipnlocal/ssh_test.go | 3 +- ipn/ipnlocal/state_test.go | 6 +- ipn/lapitest/backend.go | 2 +- ipn/localapi/localapi_test.go | 2 +- net/dns/manager_tcp_test.go | 5 +- net/dns/manager_test.go | 5 +- net/dns/resolver/forwarder_test.go | 7 +- net/dns/resolver/tsdns_test.go | 9 +- net/tlsdial/tlsdial_test.go | 3 +- ssh/tailssh/tailssh_test.go | 2 +- tsd/tsd.go | 13 +- tsnet/tsnet.go | 6 +- util/eventbus/eventbustest/eventbustest.go | 4 +- wgengine/bench/wg.go | 4 +- wgengine/magicsock/magicsock_test.go | 13 +- wgengine/netstack/netstack_test.go | 4 +- wgengine/router/router_linux_test.go | 2 +- wgengine/userspace_ext_test.go | 4 +- wgengine/userspace_test.go | 17 +- wgengine/watchdog_test.go | 7 +- 37 files changed, 402 insertions(+), 243 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 2f469a0d1..85dd787c1 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -161,7 +161,9 @@ func getURL(ctx context.Context, urlStr string) error { } func checkDerp(ctx context.Context, derpRegion string) (err error) { - ht := new(health.Tracker) + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) req, err := http.NewRequestWithContext(ctx, "GET", ipn.DefaultControlURL+"/derpmap/default", nil) if err != nil { return fmt.Errorf("create derp map request: %w", err) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 890ff7bf8..734c8e8e8 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -426,7 +426,7 @@ func run() (err error) { sys.Set(netMon) } - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker(), nil /* use log.Printf */) + pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) pol.SetVerbosityLevel(args.verbose) logPol = pol defer func() { @@ -461,7 +461,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, sys.HealthTracker(), args.tunname) + dns.CleanUp(logf, netMon, sys.HealthTracker.Get(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -749,7 +749,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo conf := wgengine.Config{ ListenPort: args.port, NetMon: sys.NetMon.Get(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), Dialer: sys.Dialer.Get(), SetSubsystem: sys.Set, @@ -760,7 +760,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo f(&conf, logf) } - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) onlyNetstack = name == "userspace-networking" netstackSubnetRouter := onlyNetstack // but mutated later on some platforms @@ -781,7 +781,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -809,13 +809,13 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo sys.NetMon.Get().SetTailscaleInterfaceName(devName) } - r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker(), sys.Bus.Get()) + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { dev.Close() return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 87f814866..ea40dba9c 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -108,7 +108,7 @@ func newIPN(jsConfig js.Value) map[string]any { Dialer: dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 2efc27b5e..78646d76a 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -236,7 +236,7 @@ func TestDirectProxyManual(t *testing.T) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, @@ -328,7 +328,7 @@ func testHTTPS(t *testing.T, withProxy bool) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index ff5df8207..59b8988fc 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/ptr" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -1326,7 +1327,7 @@ func TestNetmapDisplayMessage(t *testing.T) { // [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapHealthIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -1371,7 +1372,7 @@ func TestNetmapHealthIntegration(t *testing.T) { // passing the [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapDisplayMessageIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index daf262023..0b4e117f9 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus/eventbustest" ) type httpTestParam struct { @@ -228,7 +229,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { omitCertErrorLogging: true, testFallbackDelay: fallbackDelay, Clock: clock, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), } if param.httpInDial { @@ -730,7 +731,7 @@ func TestDialPlan(t *testing.T) { omitCertErrorLogging: true, testFallbackDelay: 50 * time.Millisecond, Clock: clock, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), } conn, err := a.dial(ctx) diff --git a/health/health.go b/health/health.go index 058870438..c456b53cb 100644 --- a/health/health.go +++ b/health/health.go @@ -25,6 +25,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/set" @@ -76,6 +77,9 @@ type Tracker struct { testClock tstime.Clock // nil means use time.Now / tstime.StdClock{} + eventClient *eventbus.Client + changePub *eventbus.Publisher[Change] + // mu guards everything that follows. mu sync.Mutex @@ -119,6 +123,20 @@ type Tracker struct { metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] } +// NewTracker contructs a new [Tracker] and attaches the given eventbus. +// NewTracker will panic is no eventbus is given. +func NewTracker(bus *eventbus.Bus) *Tracker { + if bus == nil { + panic("no eventbus set") + } + + cli := bus.Client("health.Tracker") + return &Tracker{ + eventClient: cli, + changePub: eventbus.Publish[Change](cli), + } +} + func (t *Tracker) now() time.Time { if t.testClock != nil { return t.testClock.Now() @@ -418,6 +436,28 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { Warnable: w, UnhealthyState: w.unhealthyState(ws), } + // Publish the change to the event bus. If the change is already visible + // now, publish it immediately; otherwise queue a timer to publish it at + // a future time when it becomes visible. + if w.IsVisible(ws, t.now) { + t.changePub.Publish(change) + } else { + visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) + tc := t.clock().AfterFunc(visibleIn, func() { + t.mu.Lock() + defer t.mu.Unlock() + // Check if the Warnable is still unhealthy, as it could have become healthy between the time + // the timer was set for and the time it was executed. + if t.warnableVal[w] != nil { + t.changePub.Publish(change) + delete(t.pendingVisibleTimers, w) + } + }) + mak.Set(&t.pendingVisibleTimers, w, tc) + } + + // Direct callbacks + // TODO(cmol): Remove once all watchers have been moved to events for _, cb := range t.watchers { // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable @@ -473,7 +513,9 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { WarnableChanged: true, Warnable: w, } + t.changePub.Publish(change) for _, cb := range t.watchers { + // TODO(cmol): Remove once all watchers have been moved to events cb(change) } } @@ -484,7 +526,11 @@ func (t *Tracker) notifyWatchersControlChangedLocked() { change := Change{ ControlHealthChanged: true, } + if t.changePub != nil { + t.changePub.Publish(change) + } for _, cb := range t.watchers { + // TODO(cmol): Remove once all watchers have been moved to events cb(change) } } diff --git a/health/health_test.go b/health/health_test.go index d66cea06c..c55b0e1f3 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -18,12 +18,34 @@ import ( "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/opt" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/version" ) +func wantChange(c Change) func(c Change) (bool, error) { + return func(cEv Change) (bool, error) { + if cEv.ControlHealthChanged != c.ControlHealthChanged { + return false, fmt.Errorf("expected ControlHealthChanged %t, got %t", c.ControlHealthChanged, cEv.ControlHealthChanged) + } + if cEv.WarnableChanged != c.WarnableChanged { + return false, fmt.Errorf("expected WarnableChanged %t, got %t", c.WarnableChanged, cEv.WarnableChanged) + } + if c.Warnable != nil && (cEv.Warnable == nil || cEv.Warnable != c.Warnable) { + return false, fmt.Errorf("expected Warnable %+v, got %+v", c.Warnable, cEv.Warnable) + } + + if c.UnhealthyState != nil { + panic("comparison of UnhealthyState is not yet supported") + } + + return true, nil + } +} + func TestAppendWarnableDebugFlags(t *testing.T) { - var tr Tracker + tr := NewTracker(eventbustest.NewBus(t)) for i := range 10 { w := Register(&Warnable{ @@ -68,7 +90,9 @@ func TestNilMethodsDontCrash(t *testing.T) { } func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -92,10 +116,20 @@ func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { if !reflect.DeepEqual(ht.Strings(), want) { t.Fatalf("after setting the healthy, newTracker.Strings() = %v; want = %v", ht.Strings(), want) } + + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } func TestRemoveAllWarnings(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -109,67 +143,105 @@ func TestRemoveAllWarnings(t *testing.T) { if len(ht.Strings()) != 0 { t.Fatalf("after RemoveAll, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } // TestWatcher tests that a registered watcher function gets called with the correct // Warnable and non-nil/nil UnhealthyState upon setting a Warnable to unhealthy/healthy. func TestWatcher(t *testing.T) { - ht := Tracker{} - wantText := "Hello world" - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) - - watcherFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != testWarnable { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) - } + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-callbacks", + preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { + t.Cleanup(tht.RegisterWatcher(fn)) + if len(tht.watchers) != 1 { + t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) + } + }, + }, + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } - if us != nil { - if us.Text != wantText { - t.Fatalf("unexpected us.Text: %s, want: %s", us.Text, wantText) - } - if us.Args[ArgError] != wantText { - t.Fatalf("unexpected us.Args[ArgError]: %s, want: %s", us.Args[ArgError], wantText) + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + wantText := "Hello world" + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) + + watcherFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != testWarnable { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) + } + + if us != nil { + if us.Text != wantText { + t.Fatalf("unexpected us.Text: %q, want: %s", us.Text, wantText) + } + if us.Args[ArgError] != wantText { + t.Fatalf("unexpected us.Args[ArgError]: %q, want: %s", us.Args[ArgError], wantText) + } + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } } - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } - unregisterFunc := ht.RegisterWatcher(watcherFunc) - if len(ht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(ht.watchers)) - } - ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) + // Set up test + tt.preFunc(t, ht, bus, watcherFunc) - select { - case <-becameUnhealthy: - // Test passed because the watcher got notified of an unhealthy state - case <-becameHealthy: - // Test failed because the watcher got of a healthy state instead of an unhealthy one - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + // Start running actual test + ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) - ht.SetHealthy(testWarnable) + select { + case <-becameUnhealthy: + // Test passed because the watcher got notified of an unhealthy state + case <-becameHealthy: + // Test failed because the watcher got of a healthy state instead of an unhealthy one + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got of an unhealthy state instead of a healthy one - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test passed because the watcher got notified of a healthy state - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + ht.SetHealthy(testWarnable) - unregisterFunc() - if len(ht.watchers) != 0 { - t.Fatalf("after unregisterFunc, len(newTracker.watchers) = %d; want = 0", len(ht.watchers)) + select { + case <-becameUnhealthy: + // Test failed because the watcher got of an unhealthy state instead of a healthy one + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test passed because the watcher got notified of a healthy state + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } + }) } } @@ -178,45 +250,81 @@ func TestWatcher(t *testing.T) { // has a TimeToVisible set, which means that a watcher should only be notified of an unhealthy state after // the TimeToVisible duration has passed. func TestSetUnhealthyWithTimeToVisible(t *testing.T) { - ht := Tracker{} - mw := Register(&Warnable{ - Code: "test-warnable-3-secs-to-visible", - Title: "Test Warnable with 3 seconds to visible", - Text: StaticMessage("Hello world"), - TimeToVisible: 2 * time.Second, - ImpactsConnectivity: true, - }) - defer unregister(mw) - - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-callbacks", + preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { + t.Cleanup(tht.RegisterWatcher(fn)) + if len(tht.watchers) != 1 { + t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) + } + }, + }, + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + mw := Register(&Warnable{ + Code: "test-warnable-3-secs-to-visible", + Title: "Test Warnable with 3 seconds to visible", + Text: StaticMessage("Hello world"), + TimeToVisible: 2 * time.Second, + ImpactsConnectivity: true, + }) - watchFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != mw { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) - } + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) - if us != nil { - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } + watchFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != mw { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) + } - ht.RegisterWatcher(watchFunc) - ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + if us != nil { + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got notified of an unhealthy state - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test failed because the watcher got of a healthy state - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - // As expected, watcherFunc still had not been called after 1 second + tt.preFunc(t, ht, bus, watchFunc) + ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + + select { + case <-becameUnhealthy: + // Test failed because the watcher got notified of an unhealthy state + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test failed because the watcher got of a healthy state + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(1 * time.Second): + // As expected, watcherFunc still had not been called after 1 second + } + unregister(mw) + }) } } @@ -242,7 +350,7 @@ func TestRegisterWarnablePanicsWithDuplicate(t *testing.T) { // TestCheckDependsOnAppearsInUnhealthyState asserts that the DependsOn field in the UnhealthyState // is populated with the WarnableCode(s) of the Warnable(s) that a warning depends on. func TestCheckDependsOnAppearsInUnhealthyState(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) w1 := Register(&Warnable{ Code: "w1", Text: StaticMessage("W1 Text"), @@ -352,11 +460,11 @@ func TestShowUpdateWarnable(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv + gotWarnable, gotShow := tr.showUpdateWarnable() if gotWarnable != tt.wantWarnable { t.Errorf("got warnable: %v, want: %v", gotWarnable, tt.wantWarnable) @@ -401,11 +509,10 @@ func TestHealthMetric(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv tr.SetMetricsRegistry(&usermetric.Registry{}) if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) @@ -426,9 +533,8 @@ func TestNoDERPHomeWarnable(t *testing.T) { Start: time.Unix(123, 0), FollowRealTime: false, }) - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) // Advance 30 seconds to get past the "recentlyLoggedIn" check. @@ -448,7 +554,7 @@ func TestNoDERPHomeWarnable(t *testing.T) { // but doesn't use tstest.Clock so avoids the deadlock // I hit: https://github.com/tailscale/tailscale/issues/14798 func TestNoDERPHomeWarnableManual(t *testing.T) { - ht := &Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) // Avoid wantRunning: @@ -462,7 +568,7 @@ func TestNoDERPHomeWarnableManual(t *testing.T) { } func TestControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -620,7 +726,7 @@ func TestControlHealthNotifies(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -643,7 +749,7 @@ func TestControlHealthNotifies(t *testing.T) { } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) gotNotified := false @@ -671,7 +777,7 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { // created from Control health & returned by [Tracker.CurrentState] is different // when the details of the [tailcfg.DisplayMessage] are different. func TestCurrentStateETagControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -776,9 +882,8 @@ func TestCurrentStateETagControlHealth(t *testing.T) { // when the details of the Warnable are different. func TestCurrentStateETagWarnable(t *testing.T) { newTracker := func(clock tstime.Clock) *Tracker { - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() return ht diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 509833ff6..f5c081a5b 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -847,7 +848,7 @@ func TestBackgroundProfileResolver(t *testing.T) { // Create a new profile manager and add the profiles to it. // We expose the profile manager to the extensions via the read-only [ipnext.ProfileStore] interface. - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) for i, p := range tt.profiles { // Generate a unique ID and key for each profile, // unless the profile already has them set diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6d92e58d0..4c27bea45 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -206,6 +206,7 @@ type LocalBackend struct { eventClient *eventbus.Client clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] + healthChangeSub *eventbus.Subscriber[health.Change] subsDoneCh chan struct{} // closed when consumeEventbusTopics returns health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -216,7 +217,6 @@ type LocalBackend struct { pushDeviceToken syncs.AtomicValue[string] backendLogID logid.PublicID unregisterNetMon func() - unregisterHealthWatch func() unregisterSysPolicyWatch func() portpoll *portlist.Poller // may be nil portpollOnce sync.Once // guards starting readPoller @@ -488,7 +488,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo if loginFlags&controlclient.LocalBackendStartKeyOSNeutral != 0 { goos = "" } - pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker(), goos) + pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker.Get(), goos) if err != nil { return nil, err } @@ -521,7 +521,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, polc: sys.PolicyClientOrDefault(), - health: sys.HealthTracker(), + health: sys.HealthTracker.Get(), metrics: m, e: e, dialer: dialer, @@ -543,6 +543,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.eventClient = b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) + b.healthChangeSub = eventbus.Subscribe[health.Change](b.eventClient) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -570,7 +571,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo }() netMon := sys.NetMon.Get() - b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker()) + b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get()) if err != nil { log.Printf("error setting up sockstat logger: %v", err) } @@ -595,8 +596,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) b.unregisterNetMon = netMon.RegisterChangeCallback(b.linkChange) - b.unregisterHealthWatch = b.health.RegisterWatcher(b.onHealthChange) - if tunWrap, ok := b.sys.Tun.GetOK(); ok { tunWrap.PeerAPIPort = b.GetPeerAPIPort } else { @@ -628,12 +627,17 @@ func (b *LocalBackend) consumeEventbusTopics() { for { select { + // TODO(cmol): Move to using b.eventClient.Done() once implemented. + // In the meantime, we rely on the subs not going away until the client is + // closed, closing all its subscribers. case <-b.clientVersionSub.Done(): return case clientVersion := <-b.clientVersionSub.Events(): b.onClientVersion(&clientVersion) case au := <-b.autoUpdateSub.Events(): b.onTailnetDefaultAutoUpdate(au.Value) + case change := <-b.healthChangeSub.Events(): + b.onHealthChange(change) } } } @@ -1162,7 +1166,6 @@ func (b *LocalBackend) Shutdown() { b.stopOfflineAutoUpdate() b.unregisterNetMon() - b.unregisterHealthWatch() b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 261d5c4c2..354cf6864 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -470,7 +470,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { t.Log("Added memory store for testing") } if _, ok := sys.Engine.GetOK(); !ok { - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -2897,7 +2897,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { if test.prefs == nil { test.prefs = ipn.NewPrefs() } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = test.prefs.View() b.currentNode().SetNetMap(test.nm) b.pm = pm @@ -3501,7 +3501,7 @@ func TestApplySysPolicy(t *testing.T) { wantPrefs.ControlURL = ipn.DefaultControlURL } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = usePrefs.View() b := newTestBackend(t, polc) @@ -5802,7 +5802,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys sys.Set(store) } if _, hasEngine := sys.Engine.GetOK(); !hasEngine { - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index 5bea6cabc..d831aa8b0 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -50,7 +50,7 @@ func TestLocalLogLines(t *testing.T) { sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 93ecd977f..0d3f7db43 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" @@ -46,7 +47,7 @@ func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlcl f(s) } -func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { +func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni @@ -70,7 +71,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { if err != nil { t.Fatal(err) } - return cc + return cc, bus } func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) { @@ -158,8 +159,8 @@ func TestTKAEnablementFlow(t *testing.T) { defer ts.Close() temp := t.TempDir() - cc := fakeControlClient(t, client) - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + cc, bus := fakeControlClient(t, client) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(bus))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -199,7 +200,7 @@ func TestTKADisablementFlow(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -267,7 +268,7 @@ func TestTKADisablementFlow(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -391,7 +392,7 @@ func TestTKASync(t *testing.T) { t.Run(tc.name, func(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -518,7 +519,7 @@ func TestTKASync(t *testing.T) { defer ts.Close() // Setup the client. - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -707,7 +708,7 @@ func TestTKADisable(t *testing.T) { disablementSecret := bytes.Repeat([]byte{0xa5}, 32) nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -769,7 +770,7 @@ func TestTKADisable(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -798,7 +799,7 @@ func TestTKASign(t *testing.T) { toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -860,7 +861,7 @@ func TestTKASign(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -887,7 +888,7 @@ func TestTKAForceDisable(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -940,7 +941,7 @@ func TestTKAForceDisable(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) sys := tsd.NewSystem() sys.Set(pm.Store()) @@ -985,7 +986,7 @@ func TestTKAAffectedSigs(t *testing.T) { // toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -1076,7 +1077,7 @@ func TestTKAAffectedSigs(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1118,7 +1119,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { cosignPriv := key.NewNLPrivate() compromisedPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -1188,7 +1189,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1209,7 +1210,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { // Cosign using the cosigning key. { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 5654cf277..db01dd608 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -194,10 +195,9 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) @@ -249,10 +249,9 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) @@ -323,11 +322,10 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) rc := &appctest.RouteCollector{} - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) reg := new(usermetric.Registry) @@ -392,10 +390,9 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 1d312cfa6..6e1db4ff2 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -21,6 +21,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" ) var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") @@ -838,7 +839,9 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView { // ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing. func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) { - ht := new(health.Tracker) // in tests, don't care about the health status + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) // in tests, don't care about the health status pm, err := newProfileManager(store, logf, ht) if err != nil { return ipn.PrefsView{}, err diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 52b095be1..8dce388bc 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -20,13 +20,14 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) func TestProfileCurrentUserSwitch(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -63,7 +64,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty()) } - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -81,7 +82,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { func TestProfileList(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -285,7 +286,7 @@ func TestProfileDupe(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -318,7 +319,7 @@ func TestProfileDupe(t *testing.T) { func TestProfileManagement(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -416,7 +417,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -432,7 +433,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store after deleting default profile") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -474,7 +475,7 @@ func TestProfileManagement(t *testing.T) { t.Fatal("SetPrefs failed to save auto-update setting") } // Re-load profiles to trigger migration for invalid auto-update value. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -496,7 +497,7 @@ func TestProfileManagementWindows(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -565,7 +566,7 @@ func TestProfileManagementWindows(t *testing.T) { t.Logf("Recreate profile manager from store, should reset prefs") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -588,7 +589,7 @@ func TestProfileManagementWindows(t *testing.T) { } // Recreate the profile manager to ensure that it starts with test profile. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -1091,7 +1092,7 @@ func TestProfileStateChangeCallback(t *testing.T) { t.Parallel() store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatalf("newProfileManagerWithGOOS: %v", err) } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index d18ee4db9..a081ed27b 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -900,7 +900,7 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -918,7 +918,7 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { dir := t.TempDir() b.SetVarRoot(dir) - pm := must.Get(newProfileManager(new(mem.Store), logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), logf, health.NewTracker(bus))) pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm diff --git a/ipn/ipnlocal/ssh_test.go b/ipn/ipnlocal/ssh_test.go index 6e93b34f0..b24cd6732 100644 --- a/ipn/ipnlocal/ssh_test.go +++ b/ipn/ipnlocal/ssh_test.go @@ -13,6 +13,7 @@ import ( "tailscale.com/health" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -50,7 +51,7 @@ type fakeSSHServer struct { } func TestGetSSHUsernames(t *testing.T) { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) b := &LocalBackend{pm: pm, store: pm.Store()} b.sshServer = fakeSSHServer{} res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest)) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 30538f2c8..ff21c920c 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -336,7 +336,7 @@ func TestStateMachine(t *testing.T) { sys := tsd.NewSystem() store := new(testStateStorage) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -974,7 +974,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() sys.Set(new(mem.Store)) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -1525,7 +1525,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( EventBus: sys.Bus.Get(), NetMon: dialer.NetMon(), Metrics: sys.UserMetricsRegistry(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), DisablePortMapper: true, }) if err != nil { diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index ddf48fb28..6a83431f3 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -33,7 +33,7 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { sys.Set(&mem.Store{}) } - e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { opts.tb.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 046eb744d..fa24717f7 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -339,7 +339,7 @@ func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index f4c42791e..46883a1e7 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -20,6 +20,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) func mkDNSRequest(domain dnsname.FQDN, tp dns.Type, modify func(*dns.Builder)) []byte { @@ -89,7 +90,7 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -174,7 +175,7 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + m := NewManager(log, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 522f9636a..b5a510862 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -19,6 +19,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) type fakeOSConfigurator struct { @@ -932,7 +933,7 @@ func TestManager(t *testing.T) { goos = "linux" } knobs := &controlknobs.Knobs{} - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) + m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1038,7 +1039,7 @@ func TestConfigRecompilation(t *testing.T) { SearchDomains: fqdns("foo.ts.net"), } - m := NewManager(t.Logf, f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + m := NewManager(t.Logf, f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f7cda15f6..f77388ca7 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -29,7 +29,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/types/dnstype" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func (rr resolverAndDelay) String() string { @@ -455,8 +455,7 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { logf := tstest.WhileTestRunningLogger(tb) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(tb) netMon, err := netmon.New(bus, logf) if err != nil { tb.Fatal(err) @@ -465,7 +464,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) - fwd := newForwarder(logf, netMon, nil, &dialer, new(health.Tracker), nil) + fwd := newForwarder(logf, netMon, nil, &dialer, health.NewTracker(bus), nil) if modify != nil { modify(fwd) } diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 4bbfd4d6a..0823ea139 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -31,7 +31,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) var ( @@ -356,7 +356,7 @@ func newResolver(t testing.TB) *Resolver { return New(t.Logf, nil, // no link selector tsdial.NewDialer(netmon.NewStatic()), - new(health.Tracker), + health.NewTracker(eventbustest.NewBus(t)), nil, // no control knobs ) } @@ -1060,8 +1060,7 @@ func TestForwardLinkSelection(t *testing.T) { // routes differently. specialIP := netaddr.IPv4(1, 2, 3, 4) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, ".... netmon: ")) if err != nil { @@ -1074,7 +1073,7 @@ func TestForwardLinkSelection(t *testing.T) { return "special" } return "" - }), new(tsdial.Dialer), new(health.Tracker), nil /* no control knobs */) + }), new(tsdial.Dialer), health.NewTracker(bus), nil /* no control knobs */) // Test non-special IP. if got, err := fwd.packetListener(netip.Addr{}); err != nil { diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index e2c4cdd4f..a288d7653 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -16,6 +16,7 @@ import ( "tailscale.com/health" "tailscale.com/net/bakedroots" + "tailscale.com/util/eventbus/eventbustest" ) func TestFallbackRootWorks(t *testing.T) { @@ -85,7 +86,7 @@ func TestFallbackRootWorks(t *testing.T) { }, DisableKeepAlives: true, // for test cleanup ease } - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) tr.TLSClientConfig = Config(ht, tr.TLSClientConfig) c := &http.Client{Transport: tr} diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 96fb87f49..44b2d68df 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1062,7 +1062,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/tsd/tsd.go b/tsd/tsd.go index e4a512e4b..263b8de70 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -60,6 +60,7 @@ type System struct { DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] PolicyClient SubSystem[policyclient.Client] + HealthTracker SubSystem[*health.Tracker] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -74,7 +75,6 @@ type System struct { controlKnobs controlknobs.Knobs proxyMap proxymap.Mapper - healthTracker health.Tracker userMetricsRegistry usermetric.Registry } @@ -91,6 +91,10 @@ func NewSystemWithBus(bus *eventbus.Bus) *System { } sys := new(System) sys.Set(bus) + + tracker := health.NewTracker(bus) + sys.Set(tracker) + return sys } @@ -138,6 +142,8 @@ func (s *System) Set(v any) { s.DriveForRemote.Set(v) case policyclient.Client: s.PolicyClient.Set(v) + case *health.Tracker: + s.HealthTracker.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } @@ -167,11 +173,6 @@ func (s *System) ProxyMapper() *proxymap.Mapper { return &s.proxyMap } -// HealthTracker returns the system health tracker. -func (s *System) HealthTracker() *health.Tracker { - return &s.healthTracker -} - // UserMetricsRegistry returns the system usermetrics. func (s *System) UserMetricsRegistry() *usermetric.Registry { return &s.userMetricsRegistry diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d25da0996..d9b9b64c1 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -577,7 +577,7 @@ func (s *Server) start() (reterr error) { sys := tsd.NewSystem() s.sys = sys - if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { + if err := s.startLogger(&closePool, sys.HealthTracker.Get(), tsLogf); err != nil { return err } @@ -595,7 +595,7 @@ func (s *Server) start() (reterr error) { Dialer: s.dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), }) if err != nil { @@ -603,7 +603,7 @@ func (s *Server) start() (reterr error) { } closePool.add(s.dialer) sys.Set(eng) - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) // TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how? ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper()) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index b7375adc4..af725ace1 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -100,7 +100,7 @@ func Expect(tw *Watcher, filters ...any) error { case <-time.After(tw.TimeOut): return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", - eventCount, head) + eventCount, len(filters)) case <-tw.chDone: return errors.New("watcher closed while waiting for events") } @@ -138,7 +138,7 @@ func ExpectExactly(tw *Watcher, filters ...any) error { case <-time.After(tw.TimeOut): return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", - eventCount, pos) + eventCount, len(filters)) case <-tw.chDone: return errors.New("watcher closed while waiting for events") } diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 9b195bdb7..4de7677f2 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -53,7 +53,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t1, SetSubsystem: s1.Set, - HealthTracker: s1.HealthTracker(), + HealthTracker: s1.HealthTracker.Get(), }) if err != nil { log.Fatalf("e1 init: %v", err) @@ -80,7 +80,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t2, SetSubsystem: s2.Set, - HealthTracker: s2.HealthTracker(), + HealthTracker: s2.HealthTracker.Get(), }) if err != nil { log.Fatalf("e2 init: %v", err) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index bb5922c8c..1b885c3f1 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -67,6 +67,7 @@ import ( "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" @@ -179,14 +180,13 @@ func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, der func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() - bus := eventbus.New() - t.Cleanup(bus.Close) + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logf) if err != nil { t.Fatalf("netmon.New: %v", err) } - ht := new(health.Tracker) + ht := health.NewTracker(bus) var reg usermetric.Registry epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary @@ -1352,8 +1352,7 @@ func newTestConn(t testing.TB) *Conn { t.Helper() port := pickPort(t) - bus := eventbus.New() - t.Cleanup(bus.Close) + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -1364,7 +1363,7 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, EventBus: bus, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(bus), Metrics: new(usermetric.Registry), DisablePortMapper: true, Logf: t.Logf, @@ -3038,7 +3037,7 @@ func TestMaybeSetNearestDERP(t *testing.T) { } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) c := newConn(t.Logf) c.myDerp = tt.old c.derpMap = derpMap diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 584b3babc..93022811c 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -50,7 +50,7 @@ func TestInjectInboundLeak(t *testing.T) { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -110,7 +110,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index b6a5a1ac0..3b1eb7db6 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -375,7 +375,7 @@ ip route add throw 192.168.0.0/24 table 52` + basic, defer mon.Close() fake := NewFakeOS(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht, bus) router.(*linuxRouter).nfr = fake.nfr if err != nil { diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 5e7d1ce6a..8e7bbb7a9 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -21,7 +21,7 @@ func TestIsNetstack(t *testing.T) { tstest.WhileTestRunningLogger(t), wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }, @@ -73,7 +73,7 @@ func TestIsNetstackRouter(t *testing.T) { } conf := tt.conf conf.SetSubsystem = sys.Set - conf.HealthTracker = sys.HealthTracker() + conf.HealthTracker = sys.HealthTracker.Get() conf.Metrics = sys.UserMetricsRegistry() conf.EventBus = sys.Bus.Get() e, err := wgengine.NewUserspaceEngine(logger.Discard, conf) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 87a36c673..89d75b98a 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -25,7 +25,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -101,10 +101,9 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { } func TestUserspaceEngineReconfig(t *testing.T) { - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { @@ -170,12 +169,11 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) // Keep making a wgengine until we find an unused port var ue *userspaceEngine - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) for i := range 100 { attempt := uint16(defaultPort + i) @@ -258,9 +256,8 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) if err != nil { diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index a54a0d3fa..35fd8f331 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -9,7 +9,7 @@ import ( "time" "tailscale.com/health" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" ) @@ -25,9 +25,8 @@ func TestWatchdog(t *testing.T) { t.Run("default watchdog does not fire", func(t *testing.T) { t.Parallel() - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { From 84659b1dc6afab63c7fca16b250d1ac1624515b4 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 16 Sep 2025 17:39:21 +0100 Subject: [PATCH 1321/1708] ipn: fix the string representation of an empty ipn.Notify Before: `ipn.Notify}` After: `ipn.Notify{}` Updates #cleanup Signed-off-by: Alex Chan --- ipn/backend.go | 6 +++++- ipn/backend_test.go | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 ipn/backend_test.go diff --git a/ipn/backend.go b/ipn/backend.go index fd4442f71..91cf81ca5 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -205,7 +205,11 @@ func (n Notify) String() string { } s := sb.String() - return s[0:len(s)-1] + "}" + if s == "Notify{" { + return "Notify{}" + } else { + return s[0:len(s)-1] + "}" + } } // PartialFile represents an in-progress incoming file transfer. diff --git a/ipn/backend_test.go b/ipn/backend_test.go new file mode 100644 index 000000000..d72b96615 --- /dev/null +++ b/ipn/backend_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipn + +import ( + "testing" + + "tailscale.com/health" + "tailscale.com/types/empty" +) + +func TestNotifyString(t *testing.T) { + for _, tt := range []struct { + name string + value Notify + expected string + }{ + { + name: "notify-empty", + value: Notify{}, + expected: "Notify{}", + }, + { + name: "notify-with-login-finished", + value: Notify{LoginFinished: &empty.Message{}}, + expected: "Notify{LoginFinished}", + }, + { + name: "notify-with-multiple-fields", + value: Notify{LoginFinished: &empty.Message{}, Health: &health.State{}}, + expected: "Notify{LoginFinished Health{...}}", + }, + } { + t.Run(tt.name, func(t *testing.T) { + actual := tt.value.String() + if actual != tt.expected { + t.Fatalf("expected=%q, actual=%q", tt.expected, actual) + } + }) + } +} From b63f5d7e7def89c73b3c4e7262b448164faaa5c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bojanowski?= Date: Sat, 13 Sep 2025 12:58:46 +0200 Subject: [PATCH 1322/1708] logpolicy/logpolicy: use noopPretendSuccessTransport if NoLogsNoSupport envknob is set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Bojanowski --- logpolicy/logpolicy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 587b421f3..823c118b7 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -868,7 +868,7 @@ type TransportOptions struct { // New returns an HTTP Transport particularly suited to uploading logs // to the given host name. See [DialContext] for details on how it works. func (opts TransportOptions) New() http.RoundTripper { - if testenv.InTest() { + if testenv.InTest() || envknob.NoLogsNoSupport() { return noopPretendSuccessTransport{} } if opts.NetMon == nil { From 8608e421031746187392c838e3008d087aaed4df Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 16 Sep 2025 10:52:39 -0700 Subject: [PATCH 1323/1708] feature,ipn/ipnlocal,wgengine: improve how eventbus shutdown is handled (#17156) Instead of waiting for a designated subscription to close as a canary for the bus being stopped, use the bus Client's own signal for closure added in #17118. Updates #cleanup Change-Id: I384ea39f3f1f6a030a6282356f7b5bdcdf8d7102 Signed-off-by: M. J. Fromberger --- feature/relayserver/relayserver.go | 4 +--- ipn/ipnlocal/expiry.go | 6 ++---- ipn/ipnlocal/local.go | 9 ++------- wgengine/magicsock/magicsock.go | 6 ++---- wgengine/router/router_linux.go | 6 ++---- 5 files changed, 9 insertions(+), 22 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 24304e8ec..d77d7145a 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -157,9 +157,7 @@ func (e *extension) consumeEventbusTopics(port int) { select { case <-e.disconnectFromBusCh: return - case <-reqSub.Done(): - // If reqSub is done, the eventClient has been closed, which is a - // signal to return. + case <-eventClient.Done(): return case req := <-reqSub.Events(): if rs == nil { diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 3d20d57b4..9427f0738 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -68,15 +68,13 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [controlclient.ControlTime] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (em *expiryManager) consumeEventbusTopics() { defer close(em.subsDoneCh) for { select { - case <-em.controlTimeSub.Done(): + case <-em.eventClient.Done(): return case time := <-em.controlTimeSub.Events(): em.onControlTime(time.Value) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4c27bea45..5cdfaf549 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -619,18 +619,13 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [tailcfg.ClientVersion] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (b *LocalBackend) consumeEventbusTopics() { defer close(b.subsDoneCh) for { select { - // TODO(cmol): Move to using b.eventClient.Done() once implemented. - // In the meantime, we rely on the subs not going away until the client is - // closed, closing all its subscribers. - case <-b.clientVersionSub.Done(): + case <-b.eventClient.Done(): return case clientVersion := <-b.clientVersionSub.Events(): b.onClientVersion(&clientVersion) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 36402122c..719cc68a4 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -640,15 +640,13 @@ func newConn(logf logger.Logf) *Conn { // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (c *Conn) consumeEventbusTopics() { defer close(c.subsDoneCh) for { select { - case <-c.pmSub.Done(): + case <-c.eventClient.Done(): return case <-c.pmSub.Events(): c.onPortMapChanged() diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index 2382e87cd..a9edd7f96 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -158,13 +158,11 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (r *linuxRouter) consumeEventbusTopics() { for { select { - case <-r.ruleDeletedSub.Done(): + case <-r.eventClient.Done(): return case rulesDeleted := <-r.ruleDeletedSub.Events(): r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) From 2b0f59cd3880275d786f8546321a1e02509f060c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 16 Sep 2025 18:35:55 +0100 Subject: [PATCH 1324/1708] logpolicy: remove the deprecated and now-unused `NewWithConfigPath` Updates #cleanup Signed-off-by: Alex Chan --- logpolicy/logpolicy.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 823c118b7..4c90378d0 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -464,18 +464,6 @@ func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf }.New() } -// Deprecated: Use [Options.New] instead. -func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { - return Options{ - Collection: collection, - Dir: dir, - CmdName: cmdName, - NetMon: netMon, - Health: health, - Logf: logf, - }.New() -} - // Options is used to construct a [Policy]. type Options struct { // Collection is a required collection to upload logs under. From 99b3f69126e503dd18a794e24e822f667b330212 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 19:50:21 -0700 Subject: [PATCH 1325/1708] feature/portmapper: make the portmapper & its debugging tools modular Starting at a minimal binary and adding one feature back... tailscaled tailscale combined (linux/amd64) 30073135 17451704 31543692 omitting everything + 480302 + 10258 + 493896 .. add debugportmapper + 475317 + 151943 + 467660 .. add portmapper + 500086 + 162873 + 510511 .. add portmapper+debugportmapper Fixes #17148 Change-Id: I90bd0e9d1bd8cbe64fa2e885e9afef8fb5ee74b1 Signed-off-by: Brad Fitzpatrick --- client/local/debugportmapper.go | 84 ++++++++ client/local/local.go | 64 ------ client/tailscale/localclient_aliases.go | 5 - cmd/k8s-operator/depaware.txt | 7 +- cmd/omitsize/omitsize.go | 25 ++- cmd/tailscale/cli/debug-portmap.go | 79 +++++++ cmd/tailscale/cli/debug.go | 56 +---- cmd/tailscale/cli/netcheck.go | 29 ++- cmd/tailscale/depaware.txt | 11 +- cmd/tailscaled/depaware.txt | 8 +- cmd/tailscaled/deps_test.go | 18 ++ cmd/tsidp/depaware.txt | 7 +- .../feature_debugportmapper_disabled.go | 13 ++ .../feature_debugportmapper_enabled.go | 13 ++ .../feature_portmapper_disabled.go | 13 ++ .../feature_portmapper_enabled.go | 13 ++ feature/condregister/condregister.go | 7 + feature/condregister/maybe_debugportmapper.go | 8 + feature/condregister/portmapper/doc.go | 6 + .../portmapper/maybe_portmapper.go | 8 + feature/debugportmapper/debugportmapper.go | 204 ++++++++++++++++++ feature/featuretags/featuretags.go | 2 + feature/portmapper/portmapper.go | 38 ++++ ipn/ipnlocal/local.go | 5 + ipn/ipnlocal/local_test.go | 1 + ipn/localapi/localapi.go | 163 -------------- net/netcheck/netcheck.go | 6 +- net/portmapper/igd_test.go | 8 +- net/portmapper/portmapper.go | 85 ++++---- net/portmapper/portmapper_test.go | 10 +- .../portmappertype/portmappertype.go | 88 ++++++++ net/portmapper/upnp.go | 4 +- net/portmapper/upnp_test.go | 3 +- tsnet/depaware.txt | 7 +- tsnet/tsnet.go | 1 + wgengine/magicsock/magicsock.go | 58 +++-- 36 files changed, 758 insertions(+), 399 deletions(-) create mode 100644 client/local/debugportmapper.go create mode 100644 cmd/tailscale/cli/debug-portmap.go create mode 100644 feature/buildfeatures/feature_debugportmapper_disabled.go create mode 100644 feature/buildfeatures/feature_debugportmapper_enabled.go create mode 100644 feature/buildfeatures/feature_portmapper_disabled.go create mode 100644 feature/buildfeatures/feature_portmapper_enabled.go create mode 100644 feature/condregister/maybe_debugportmapper.go create mode 100644 feature/condregister/portmapper/doc.go create mode 100644 feature/condregister/portmapper/maybe_portmapper.go create mode 100644 feature/debugportmapper/debugportmapper.go create mode 100644 feature/portmapper/portmapper.go create mode 100644 net/portmapper/portmappertype/portmappertype.go diff --git a/client/local/debugportmapper.go b/client/local/debugportmapper.go new file mode 100644 index 000000000..04ed1c109 --- /dev/null +++ b/client/local/debugportmapper.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package local + +import ( + "cmp" + "context" + "fmt" + "io" + "net/http" + "net/netip" + "net/url" + "strconv" + "time" + + "tailscale.com/client/tailscale/apitype" +) + +// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. +type DebugPortmapOpts struct { + // Duration is how long the mapping should be created for. It defaults + // to 5 seconds if not set. + Duration time.Duration + + // Type is the kind of portmap to debug. The empty string instructs the + // portmap client to perform all known types. Other valid options are + // "pmp", "pcp", and "upnp". + Type string + + // GatewayAddr specifies the gateway address used during portmapping. + // If set, SelfAddr must also be set. If unset, it will be + // autodetected. + GatewayAddr netip.Addr + + // SelfAddr specifies the gateway address used during portmapping. If + // set, GatewayAddr must also be set. If unset, it will be + // autodetected. + SelfAddr netip.Addr + + // LogHTTP instructs the debug-portmap endpoint to print all HTTP + // requests and responses made to the logs. + LogHTTP bool +} + +// DebugPortmap invokes the debug-portmap endpoint, and returns an +// io.ReadCloser that can be used to read the logs that are printed during this +// process. +// +// opts can be nil; if so, default values will be used. +func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { + vals := make(url.Values) + if opts == nil { + opts = &DebugPortmapOpts{} + } + + vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) + vals.Set("type", opts.Type) + vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) + + if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { + return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") + } else if opts.GatewayAddr.IsValid() { + vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) + } + + req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) + if err != nil { + return nil, err + } + res, err := lc.doLocalRequestNiceError(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + body, _ := io.ReadAll(res.Body) + res.Body.Close() + return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) + } + + return res.Body, nil +} diff --git a/client/local/local.go b/client/local/local.go index 32e8208da..a132e577b 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -591,70 +591,6 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } -// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. -type DebugPortmapOpts struct { - // Duration is how long the mapping should be created for. It defaults - // to 5 seconds if not set. - Duration time.Duration - - // Type is the kind of portmap to debug. The empty string instructs the - // portmap client to perform all known types. Other valid options are - // "pmp", "pcp", and "upnp". - Type string - - // GatewayAddr specifies the gateway address used during portmapping. - // If set, SelfAddr must also be set. If unset, it will be - // autodetected. - GatewayAddr netip.Addr - - // SelfAddr specifies the gateway address used during portmapping. If - // set, GatewayAddr must also be set. If unset, it will be - // autodetected. - SelfAddr netip.Addr - - // LogHTTP instructs the debug-portmap endpoint to print all HTTP - // requests and responses made to the logs. - LogHTTP bool -} - -// DebugPortmap invokes the debug-portmap endpoint, and returns an -// io.ReadCloser that can be used to read the logs that are printed during this -// process. -// -// opts can be nil; if so, default values will be used. -func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { - vals := make(url.Values) - if opts == nil { - opts = &DebugPortmapOpts{} - } - - vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) - vals.Set("type", opts.Type) - vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) - - if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { - return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") - } else if opts.GatewayAddr.IsValid() { - vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) - } - - req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) - if err != nil { - return nil, err - } - res, err := lc.doLocalRequestNiceError(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - body, _ := io.ReadAll(res.Body) - res.Body.Close() - return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) - } - - return res.Body, nil -} - // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // The schema (including when keys are re-read) is not a stable interface. func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 2b53906b7..58be312b4 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -32,11 +32,6 @@ type IPNBusWatcher = local.IPNBusWatcher // Deprecated: import [tailscale.com/client/local] instead. type BugReportOpts = local.BugReportOpts -// DebugPortmapOpts is an alias for [tailscale.com/client/local.DebugPortmapOpts]. -// -// Deprecated: import [tailscale.com/client/local] instead. -type DebugPortmapOpts = local.DebugPortmapOpts - // PingOpts is an alias for [tailscale.com/client/local.PingOpts]. // // Deprecated: import [tailscale.com/client/local] instead. diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 87bae60c8..faf7b2f83 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,7 +798,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -866,7 +868,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index a4bce6329..5940ba520 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -22,9 +22,9 @@ import ( var ( cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") - features = flag.String("features", "", "comma-separated list of features to consider, with or without the ts_omit_ prefix") + features = flag.String("features", "", "comma-separated list of features to list in the table, with or without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") - showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set") + showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set.") ) func main() { @@ -43,10 +43,14 @@ func main() { all = slices.Clone(allOmittable) } else { for v := range strings.SplitSeq(*features, ",") { - if !strings.HasPrefix(v, "ts_omit_") { - v = "ts_omit_" + v + var withOmit []string + for v := range strings.SplitSeq(v, "+") { + if !strings.HasPrefix(v, "ts_omit_") { + v = "ts_omit_" + v + } + withOmit = append(withOmit, v) } - all = append(all, v) + all = append(all, strings.Join(withOmit, "+")) } } @@ -70,6 +74,9 @@ func main() { fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) for _, t := range all { + if strings.Contains(t, "+") { + log.Fatalf("TODO: make --show-removals support ANDed features like %q", t) + } sizeD := measure("tailscaled", t) sizeC := measure("tailscale", t) sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) @@ -84,17 +91,17 @@ func main() { fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) for _, t := range all { - tags := allExcept(allOmittable, t) + tags := allExcept(allOmittable, strings.Split(t, "+")) sizeD := measure("tailscaled", tags...) sizeC := measure("tailscale", tags...) sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) - fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.TrimPrefix(t, "ts_omit_")) + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.ReplaceAll(t, "ts_omit_", "")) } } -func allExcept(all []string, omit string) []string { - return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return s == omit }) +func allExcept(all, omit []string) []string { + return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return slices.Contains(omit, s) }) } func measure(bin string, tags ...string) int64 { diff --git a/cmd/tailscale/cli/debug-portmap.go b/cmd/tailscale/cli/debug-portmap.go new file mode 100644 index 000000000..d8db1442c --- /dev/null +++ b/cmd/tailscale/cli/debug-portmap.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_debugportmapper + +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "net/netip" + "os" + "time" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/local" +) + +func init() { + debugPortmapCmd = mkDebugPortmapCmd +} + +func mkDebugPortmapCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "portmap", + ShortUsage: "tailscale debug portmap", + Exec: debugPortmap, + ShortHelp: "Run portmap debugging", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("portmap") + fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") + fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) + fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) + fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) + fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) + return fs + })(), + } +} + +var debugPortmapArgs struct { + duration time.Duration + gatewayAddr string + selfAddr string + ty string + logHTTP bool +} + +func debugPortmap(ctx context.Context, args []string) error { + opts := &local.DebugPortmapOpts{ + Duration: debugPortmapArgs.duration, + Type: debugPortmapArgs.ty, + LogHTTP: debugPortmapArgs.logHTTP, + } + if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { + return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") + } + if debugPortmapArgs.gatewayAddr != "" { + var err error + opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) + if err != nil { + return fmt.Errorf("invalid --gateway-addr: %w", err) + } + opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) + if err != nil { + return fmt.Errorf("invalid --self-addr: %w", err) + } + } + rc, err := localClient.DebugPortmap(ctx, opts) + if err != nil { + return err + } + defer rc.Close() + + _, err = io.Copy(os.Stdout, rc) + return err +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 4960aeec2..39c9748ef 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -30,7 +30,6 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" - "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" "tailscale.com/hostinfo" @@ -50,6 +49,7 @@ import ( var ( debugCaptureCmd func() *ffcli.Command // or nil + debugPortmapCmd func() *ffcli.Command // or nil ) func debugCmd() *ffcli.Command { @@ -319,21 +319,7 @@ func debugCmd() *ffcli.Command { ShortHelp: "Test a DERP configuration", }, ccall(debugCaptureCmd), - { - Name: "portmap", - ShortUsage: "tailscale debug portmap", - Exec: debugPortmap, - ShortHelp: "Run portmap debugging", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("portmap") - fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") - fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) - fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) - fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) - fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) - return fs - })(), - }, + ccall(debugPortmapCmd), { Name: "peer-endpoint-changes", ShortUsage: "tailscale debug peer-endpoint-changes ", @@ -1210,44 +1196,6 @@ func runSetExpire(ctx context.Context, args []string) error { return localClient.DebugSetExpireIn(ctx, setExpireArgs.in) } -var debugPortmapArgs struct { - duration time.Duration - gatewayAddr string - selfAddr string - ty string - logHTTP bool -} - -func debugPortmap(ctx context.Context, args []string) error { - opts := &local.DebugPortmapOpts{ - Duration: debugPortmapArgs.duration, - Type: debugPortmapArgs.ty, - LogHTTP: debugPortmapArgs.logHTTP, - } - if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { - return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") - } - if debugPortmapArgs.gatewayAddr != "" { - var err error - opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) - if err != nil { - return fmt.Errorf("invalid --gateway-addr: %w", err) - } - opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) - if err != nil { - return fmt.Errorf("invalid --self-addr: %w", err) - } - } - rc, err := localClient.DebugPortmap(ctx, opts) - if err != nil { - return err - } - defer rc.Close() - - _, err = io.Copy(os.Stdout, rc) - return err -} - func runPeerEndpointChanges(ctx context.Context, args []string) error { st, err := localClient.Status(ctx) if err != nil { diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 0bdab59cb..5ae8db8fa 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -17,14 +17,23 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/tlsdial" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + + // The "netcheck" command also wants the portmapper linked. + // + // TODO: make that subcommand either hit LocalAPI for that info, or use a + // tailscaled subcommand, to avoid making the CLI also link in the portmapper. + // For now (2025-09-15), keep doing what we've done for the past five years and + // keep linking it here. + _ "tailscale.com/feature/condregister/portmapper" ) var netcheckCmd = &ffcli.Command{ @@ -56,14 +65,13 @@ func runNetcheck(ctx context.Context, args []string) error { return err } - // Ensure that we close the portmapper after running a netcheck; this - // will release any port mappings created. - pm := portmapper.NewClient(portmapper.Config{ - Logf: logf, - NetMon: netMon, - EventBus: bus, - }) - defer pm.Close() + var pm portmappertype.Client + if buildfeatures.HasPortMapper { + // Ensure that we close the portmapper after running a netcheck; this + // will release any port mappings created. + pm = portmappertype.HookNewPortMapper.Get()(logf, bus, netMon, nil, nil) + defer pm.Close() + } c := &netcheck.Client{ NetMon: netMon, @@ -210,6 +218,9 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { } func portMapping(r *netcheck.Report) string { + if !buildfeatures.HasPortMapper { + return "binary built without portmapper support" + } if !r.AnyPortMappingChecked() { return "not checked" } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index a39363353..c86af7ea7 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -96,7 +96,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp - tailscale.com/control/controlknobs from tailscale.com/net/portmapper tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck @@ -105,7 +104,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli @@ -131,7 +133,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlhttp+ tailscale.com/net/ping from tailscale.com/net/netcheck - tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ tailscale.com/net/stun from tailscale.com/net/netcheck L tailscale.com/net/tcpinfo from tailscale.com/derp @@ -175,7 +178,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/eventbus from tailscale.com/net/portmapper+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ @@ -351,7 +354,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/tailscale/goupnp+ + encoding/xml from github.com/godbus/dbus/v5/introspect+ errors from archive/tar+ expvar from tailscale.com/derp+ flag from github.com/peterbourgon/ff/v3+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 736c268dc..d4e1f13bf 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -272,10 +272,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister @@ -338,7 +341,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/feature/portmapper+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a334eb9b7..1609ba633 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -90,3 +90,21 @@ func TestOmitTailnetLock(t *testing.T) { }, }.Check(t) } + +func TestOmitPortmapper(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portmapper,ts_include_cli,ts_omit_debugportmapper", + OnDep: func(dep string) { + if dep == "tailscale.com/net/portmapper" { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + return + } + if strings.Contains(dep, "goupnp") || strings.Contains(dep, "/soap") || + strings.Contains(dep, "internetgateway2") { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index c9cd12d41..0aafff8e1 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -239,7 +239,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -295,7 +297,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock diff --git a/feature/buildfeatures/feature_debugportmapper_disabled.go b/feature/buildfeatures/feature_debugportmapper_disabled.go new file mode 100644 index 000000000..eff85b8ba --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = false diff --git a/feature/buildfeatures/feature_debugportmapper_enabled.go b/feature/buildfeatures/feature_debugportmapper_enabled.go new file mode 100644 index 000000000..491aa5ed8 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = true diff --git a/feature/buildfeatures/feature_portmapper_disabled.go b/feature/buildfeatures/feature_portmapper_disabled.go new file mode 100644 index 000000000..212b22d40 --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = false diff --git a/feature/buildfeatures/feature_portmapper_enabled.go b/feature/buildfeatures/feature_portmapper_enabled.go new file mode 100644 index 000000000..2f915d277 --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = true diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index f90250951..69e2b071c 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -5,3 +5,10 @@ // by build tags. It is one central package that callers can empty import // to ensure all conditional features are registered. package condregister + +// Portmapper is special in that the CLI also needs to link it in, +// so it's pulled out into its own package, rather than using a maybe_*.go +// file in condregister. +import ( + _ "tailscale.com/feature/condregister/portmapper" +) diff --git a/feature/condregister/maybe_debugportmapper.go b/feature/condregister/maybe_debugportmapper.go new file mode 100644 index 000000000..4990d09ea --- /dev/null +++ b/feature/condregister/maybe_debugportmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package condregister + +import _ "tailscale.com/feature/debugportmapper" diff --git a/feature/condregister/portmapper/doc.go b/feature/condregister/portmapper/doc.go new file mode 100644 index 000000000..5c30538c4 --- /dev/null +++ b/feature/condregister/portmapper/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for portmapper +// if it's not disabled via the ts_omit_portmapper build tag. +package portmapper diff --git a/feature/condregister/portmapper/maybe_portmapper.go b/feature/condregister/portmapper/maybe_portmapper.go new file mode 100644 index 000000000..c306fd3d5 --- /dev/null +++ b/feature/condregister/portmapper/maybe_portmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portmapper + +package portmapper + +import _ "tailscale.com/feature/portmapper" diff --git a/feature/debugportmapper/debugportmapper.go b/feature/debugportmapper/debugportmapper.go new file mode 100644 index 000000000..2625086c6 --- /dev/null +++ b/feature/debugportmapper/debugportmapper.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package debugportmapper registers support for debugging Tailscale's +// portmapping support. +package debugportmapper + +import ( + "context" + "fmt" + "net" + "net/http" + "net/netip" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/ipn/localapi" + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + localapi.Register("debug-portmap", serveDebugPortmap) +} + +func serveDebugPortmap(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/plain") + + dur, err := time.ParseDuration(r.FormValue("duration")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + gwSelf := r.FormValue("gateway_and_self") + + trueFunc := func() bool { return true } + // Update portmapper debug flags + debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} + switch r.FormValue("type") { + case "": + case "pmp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "pcp": + debugKnobs.DisablePMPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "upnp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisablePMPFunc = trueFunc + default: + http.Error(w, "unknown portmap debug type", http.StatusBadRequest) + return + } + if k := h.LocalBackend().ControlKnobs(); k != nil { + if k.DisableUPnP.Load() { + debugKnobs.DisableUPnPFunc = trueFunc + } + } + + if defBool(r.FormValue("log_http"), false) { + debugKnobs.LogHTTP = true + } + + var ( + logLock sync.Mutex + handlerDone bool + ) + logf := func(format string, args ...any) { + if !strings.HasSuffix(format, "\n") { + format = format + "\n" + } + + logLock.Lock() + defer logLock.Unlock() + + // The portmapper can call this log function after the HTTP + // handler returns, which is not allowed and can cause a panic. + // If this happens, ignore the log lines since this typically + // occurs due to a client disconnect. + if handlerDone { + return + } + + // Write and flush each line to the client so that output is streamed + fmt.Fprintf(w, format, args...) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + } + defer func() { + logLock.Lock() + handlerDone = true + logLock.Unlock() + }() + + ctx, cancel := context.WithTimeout(r.Context(), dur) + defer cancel() + + done := make(chan bool, 1) + + var c *portmapper.Client + c = portmapper.NewClient(portmapper.Config{ + Logf: logger.WithPrefix(logf, "portmapper: "), + NetMon: h.LocalBackend().NetMon(), + DebugKnobs: debugKnobs, + EventBus: h.LocalBackend().EventBus(), + OnChange: func() { + logf("portmapping changed.") + logf("have mapping: %v", c.HaveMapping()) + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("cb: mapping: %v", ext) + select { + case done <- true: + default: + } + return + } + logf("cb: no mapping") + }, + }) + defer c.Close() + + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) + if err != nil { + logf("error creating monitor: %v", err) + return + } + + gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { + if a, b, ok := strings.Cut(gwSelf, "/"); ok { + gw = netip.MustParseAddr(a) + self = netip.MustParseAddr(b) + return gw, self, true + } + return netMon.GatewayAndSelfIP() + } + + c.SetGatewayLookupFunc(gatewayAndSelfIP) + + gw, selfIP, ok := gatewayAndSelfIP() + if !ok { + logf("no gateway or self IP; %v", netMon.InterfaceState()) + return + } + logf("gw=%v; self=%v", gw, selfIP) + + uc, err := net.ListenPacket("udp", "0.0.0.0:0") + if err != nil { + return + } + defer uc.Close() + c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) + + res, err := c.Probe(ctx) + if err != nil { + logf("error in Probe: %v", err) + return + } + logf("Probe: %+v", res) + + if !res.PCP && !res.PMP && !res.UPnP { + logf("no portmapping services available") + return + } + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("mapping: %v", ext) + } else { + logf("no mapping") + } + + select { + case <-done: + case <-ctx.Done(): + if r.Context().Err() == nil { + logf("serveDebugPortmap: context done: %v", ctx.Err()) + } else { + h.Logf("serveDebugPortmap: context done: %v", ctx.Err()) + } + } +} + +func defBool(a string, def bool) bool { + if a == "" { + return def + } + v, err := strconv.ParseBool(a) + if err != nil { + return def + } + return v +} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6778593fa..2c5f32310 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -48,9 +48,11 @@ var Features = map[FeatureTag]FeatureMeta{ "cli": {"CLI", "embed the CLI into the tailscaled binary"}, "completion": {"Completion", "CLI shell completion"}, "debugeventbus": {"DebugEventBus", "eventbus debug support"}, + "debugportmapper": {"DebugPortMapper", "portmapper debug support"}, "desktop_sessions": {"DesktopSessions", "Desktop sessions support"}, "drive": {"Drive", "Tailscale Drive (file server) support"}, "kube": {"Kube", "Kubernetes integration"}, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support"}, "relayserver": {"RelayServer", "Relay server"}, "serve": {"Serve", "Serve and Funnel support"}, "ssh": {"SSH", "Tailscale SSH support"}, diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go new file mode 100644 index 000000000..e7be00ad1 --- /dev/null +++ b/feature/portmapper/portmapper.go @@ -0,0 +1,38 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for NAT-PMP, PCP, and UPnP port +// mapping protocols to help get direction connections through NATs. +package portmapper + +import ( + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + portmappertype.HookNewPortMapper.Set(newPortMapper) +} + +func newPortMapper( + logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil func() bool, + onlyTCP443OrNil func() bool) portmappertype.Client { + + pm := portmapper.NewClient(portmapper.Config{ + EventBus: bus, + Logf: logf, + NetMon: netMon, + DebugKnobs: &portmapper.DebugKnobs{ + DisableAll: onlyTCP443OrNil, + DisableUPnPFunc: disableUPnPOrNil, + }, + }) + pm.SetGatewayLookupFunc(netMon.GatewayAndSelfIP) + return pm +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5cdfaf549..988c0b538 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6780,6 +6780,11 @@ func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs { return b.sys.ControlKnobs() } +// EventBus returns the node's event bus. +func (b *LocalBackend) EventBus() *eventbus.Bus { + return b.sys.Bus.Get() +} + // MagicConn returns the backend's *magicsock.Conn. func (b *LocalBackend) MagicConn() *magicsock.Conn { return b.sys.MagicSock.Get() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 354cf6864..0505e068b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -34,6 +34,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 7e54cef85..0c3a0a4ed 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -35,9 +35,7 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" - "tailscale.com/net/netmon" "tailscale.com/net/netutil" - "tailscale.com/net/portmapper" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/dnstype" @@ -90,7 +88,6 @@ var handler = map[string]LocalAPIHandler{ "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, - "debug-portmap": (*Handler).serveDebugPortmap, "derpmap": (*Handler).serveDERPMap, "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, @@ -762,166 +759,6 @@ func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.R enc.Encode(nm.PacketFilter) } -func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/plain") - - dur, err := time.ParseDuration(r.FormValue("duration")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - gwSelf := r.FormValue("gateway_and_self") - - // Update portmapper debug flags - debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} - switch r.FormValue("type") { - case "": - case "pmp": - debugKnobs.DisablePCP = true - debugKnobs.DisableUPnP = true - case "pcp": - debugKnobs.DisablePMP = true - debugKnobs.DisableUPnP = true - case "upnp": - debugKnobs.DisablePCP = true - debugKnobs.DisablePMP = true - default: - http.Error(w, "unknown portmap debug type", http.StatusBadRequest) - return - } - - if defBool(r.FormValue("log_http"), false) { - debugKnobs.LogHTTP = true - } - - var ( - logLock sync.Mutex - handlerDone bool - ) - logf := func(format string, args ...any) { - if !strings.HasSuffix(format, "\n") { - format = format + "\n" - } - - logLock.Lock() - defer logLock.Unlock() - - // The portmapper can call this log function after the HTTP - // handler returns, which is not allowed and can cause a panic. - // If this happens, ignore the log lines since this typically - // occurs due to a client disconnect. - if handlerDone { - return - } - - // Write and flush each line to the client so that output is streamed - fmt.Fprintf(w, format, args...) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - } - defer func() { - logLock.Lock() - handlerDone = true - logLock.Unlock() - }() - - ctx, cancel := context.WithTimeout(r.Context(), dur) - defer cancel() - - done := make(chan bool, 1) - - var c *portmapper.Client - c = portmapper.NewClient(portmapper.Config{ - Logf: logger.WithPrefix(logf, "portmapper: "), - NetMon: h.b.NetMon(), - DebugKnobs: debugKnobs, - ControlKnobs: h.b.ControlKnobs(), - EventBus: h.eventBus, - OnChange: func() { - logf("portmapping changed.") - logf("have mapping: %v", c.HaveMapping()) - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("cb: mapping: %v", ext) - select { - case done <- true: - default: - } - return - } - logf("cb: no mapping") - }, - }) - defer c.Close() - - bus := eventbus.New() - defer bus.Close() - netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) - if err != nil { - logf("error creating monitor: %v", err) - return - } - - gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { - if a, b, ok := strings.Cut(gwSelf, "/"); ok { - gw = netip.MustParseAddr(a) - self = netip.MustParseAddr(b) - return gw, self, true - } - return netMon.GatewayAndSelfIP() - } - - c.SetGatewayLookupFunc(gatewayAndSelfIP) - - gw, selfIP, ok := gatewayAndSelfIP() - if !ok { - logf("no gateway or self IP; %v", netMon.InterfaceState()) - return - } - logf("gw=%v; self=%v", gw, selfIP) - - uc, err := net.ListenPacket("udp", "0.0.0.0:0") - if err != nil { - return - } - defer uc.Close() - c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) - - res, err := c.Probe(ctx) - if err != nil { - logf("error in Probe: %v", err) - return - } - logf("Probe: %+v", res) - - if !res.PCP && !res.PMP && !res.UPnP { - logf("no portmapping services available") - return - } - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("mapping: %v", ext) - } else { - logf("no mapping") - } - - select { - case <-done: - case <-ctx.Done(): - if r.Context().Err() == nil { - logf("serveDebugPortmap: context done: %v", ctx.Err()) - } else { - h.logf("serveDebugPortmap: context done: %v", ctx.Err()) - } - } -} - // EventError provides the JSON encoding of internal errors from event processing. type EventError struct { Error string diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index cb622a339..ba9a8cb0f 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -33,7 +33,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/syncs" @@ -215,7 +215,7 @@ type Client struct { // PortMapper, if non-nil, is used for portmap queries. // If nil, portmap discovery is not done. - PortMapper *portmapper.Client // lazily initialized on first use + PortMapper portmappertype.Client // UseDNSCache controls whether this client should use a // *dnscache.Resolver to resolve DERP hostnames, when no IP address is @@ -730,7 +730,7 @@ func (rs *reportState) probePortMapServices() { res, err := rs.c.PortMapper.Probe(context.Background()) if err != nil { - if !errors.Is(err, portmapper.ErrGatewayRange) { + if !errors.Is(err, portmappertype.ErrGatewayRange) { // "skipping portmap; gateway range likely lacks support" // is not very useful, and too spammy on cloud systems. // If there are other errors, we want to log those. diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index cca87e0b8..77015f5bf 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -14,7 +14,6 @@ import ( "sync/atomic" "testing" - "tailscale.com/control/controlknobs" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/syncs" @@ -273,10 +272,9 @@ func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { } var c *Client c = NewClient(Config{ - Logf: tstest.WhileTestRunningLogger(t), - NetMon: netmon.NewStatic(), - ControlKnobs: new(controlknobs.Knobs), - EventBus: bus, + Logf: tstest.WhileTestRunningLogger(t), + NetMon: netmon.NewStatic(), + EventBus: bus, OnChange: func() { // TODO(creachadair): Remove. t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index a1ab86815..024c6dc78 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -8,7 +8,6 @@ package portmapper import ( "context" "encoding/binary" - "errors" "fmt" "io" "net" @@ -20,12 +19,12 @@ import ( "time" "go4.org/mem" - "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/net/netaddr" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/syncs" "tailscale.com/types/logger" @@ -34,6 +33,13 @@ import ( "tailscale.com/util/eventbus" ) +var ( + ErrNoPortMappingServices = portmappertype.ErrNoPortMappingServices + ErrGatewayRange = portmappertype.ErrGatewayRange + ErrGatewayIPv6 = portmappertype.ErrGatewayIPv6 + ErrPortMappingDisabled = portmappertype.ErrPortMappingDisabled +) + var disablePortMapperEnv = envknob.RegisterBool("TS_DISABLE_PORTMAPPER") // DebugKnobs contains debug configuration that can be provided when creating a @@ -49,15 +55,33 @@ type DebugKnobs struct { LogHTTP bool // Disable* disables a specific service from mapping. - DisableUPnP bool - DisablePMP bool - DisablePCP bool + // If the funcs are nil or return false, the service is not disabled. + // Use the corresponding accessor methods without the "Func" suffix + // to check whether a service is disabled. + DisableUPnPFunc func() bool + DisablePMPFunc func() bool + DisablePCPFunc func() bool // DisableAll, if non-nil, is a func that reports whether all port // mapping attempts should be disabled. DisableAll func() bool } +// DisableUPnP reports whether UPnP is disabled. +func (k *DebugKnobs) DisableUPnP() bool { + return k != nil && k.DisableUPnPFunc != nil && k.DisableUPnPFunc() +} + +// DisablePMP reports whether NAT-PMP is disabled. +func (k *DebugKnobs) DisablePMP() bool { + return k != nil && k.DisablePMPFunc != nil && k.DisablePMPFunc() +} + +// DisablePCP reports whether PCP is disabled. +func (k *DebugKnobs) DisablePCP() bool { + return k != nil && k.DisablePCPFunc != nil && k.DisablePCPFunc() +} + func (k *DebugKnobs) disableAll() bool { if disablePortMapperEnv() { return true @@ -88,11 +112,10 @@ type Client struct { // The following two fields must both be non-nil. // Both are immutable after construction. pubClient *eventbus.Client - updates *eventbus.Publisher[Mapping] + updates *eventbus.Publisher[portmappertype.Mapping] logf logger.Logf netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand - controlKnobs *controlknobs.Knobs ipAndGateway func() (gw, ip netip.Addr, ok bool) onChange func() // or nil debug DebugKnobs @@ -130,6 +153,8 @@ type Client struct { mapping mapping // non-nil if we have a mapping } +var _ portmappertype.Client = (*Client)(nil) + func (c *Client) vlogf(format string, args ...any) { if c.debug.VerboseLogs { c.logf(format, args...) @@ -159,7 +184,6 @@ type mapping interface { MappingDebug() string } -// HaveMapping reports whether we have a current valid mapping. func (c *Client) HaveMapping() bool { c.mu.Lock() defer c.mu.Unlock() @@ -223,10 +247,6 @@ type Config struct { // debugging. If nil, a sensible set of defaults will be used. DebugKnobs *DebugKnobs - // ControlKnobs, if non-nil, specifies knobs from the control plane that - // might disable port mapping. - ControlKnobs *controlknobs.Knobs - // OnChange is called to run in a new goroutine whenever the port mapping // status has changed. If nil, no callback is issued. OnChange func() @@ -246,10 +266,9 @@ func NewClient(c Config) *Client { netMon: c.NetMon, ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon onChange: c.OnChange, - controlKnobs: c.ControlKnobs, } ret.pubClient = c.EventBus.Client("portmapper") - ret.updates = eventbus.Publish[Mapping](ret.pubClient) + ret.updates = eventbus.Publish[portmappertype.Mapping](ret.pubClient) if ret.logf == nil { ret.logf = logger.Discard } @@ -448,13 +467,6 @@ func IsNoMappingError(err error) bool { return ok } -var ( - ErrNoPortMappingServices = errors.New("no port mapping services were found") - ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") - ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") - ErrPortMappingDisabled = errors.New("port mapping is disabled") -) - // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. // If there's not one, it starts up a background goroutine to create one. // If the background goroutine ends up creating one, the onChange hook registered with the @@ -512,7 +524,7 @@ func (c *Client) createMapping() { // the control flow to eliminate that possibility. Meanwhile, this // mitigates a panic downstream, cf. #16662. } - c.updates.Publish(Mapping{ + c.updates.Publish(portmappertype.Mapping{ External: mapping.External(), Type: mapping.MappingType(), GoodUntil: mapping.GoodUntil(), @@ -524,15 +536,6 @@ func (c *Client) createMapping() { } } -// Mapping is an event recording the allocation of a port mapping. -type Mapping struct { - External netip.AddrPort - Type string - GoodUntil time.Time - - // TODO(creachadair): Record whether we reused an existing mapping? -} - // wildcardIP is used when the previous external IP is not known for PCP port mapping. var wildcardIP = netip.MustParseAddr("0.0.0.0") @@ -545,7 +548,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter if c.debug.disableAll() { return nil, netip.AddrPort{}, NoMappingError{ErrPortMappingDisabled} } - if c.debug.DisableUPnP && c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisableUPnP() && c.debug.DisablePCP() && c.debug.DisablePMP() { return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } gw, myIP, ok := c.gatewayAndSelfIP() @@ -624,7 +627,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter prevPort = m.External().Port() } - if c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisablePCP() && c.debug.DisablePMP() { c.mu.Unlock() if external, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { return nil, external, nil @@ -675,7 +678,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter pxpAddr := netip.AddrPortFrom(gw, c.pxpPort()) - preferPCP := !c.debug.DisablePCP && (c.debug.DisablePMP || (!haveRecentPMP && haveRecentPCP)) + preferPCP := !c.debug.DisablePCP() && (c.debug.DisablePMP() || (!haveRecentPMP && haveRecentPCP)) // Create a mapping, defaulting to PMP unless only PCP was seen recently. if preferPCP { @@ -860,19 +863,13 @@ func parsePMPResponse(pkt []byte) (res pmpResponse, ok bool) { return res, true } -type ProbeResult struct { - PCP bool - PMP bool - UPnP bool -} - // Probe returns a summary of which port mapping services are // available on the network. // // If a probe has run recently and there haven't been any network changes since, // the returned result might be server from the Client's cache, without // sending any network traffic. -func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { +func (c *Client) Probe(ctx context.Context) (res portmappertype.ProbeResult, err error) { if c.debug.disableAll() { return res, ErrPortMappingDisabled } @@ -907,19 +904,19 @@ func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { // https://github.com/tailscale/tailscale/issues/1001 if c.sawPMPRecently() { res.PMP = true - } else if !c.debug.DisablePMP { + } else if !c.debug.DisablePMP() { metricPMPSent.Add(1) uc.WriteToUDPAddrPort(pmpReqExternalAddrPacket, pxpAddr) } if c.sawPCPRecently() { res.PCP = true - } else if !c.debug.DisablePCP { + } else if !c.debug.DisablePCP() { metricPCPSent.Add(1) uc.WriteToUDPAddrPort(pcpAnnounceRequest(myIP), pxpAddr) } if c.sawUPnPRecently() { res.UPnP = true - } else if !c.debug.DisableUPnP { + } else if !c.debug.DisableUPnP() { // Strictly speaking, you discover UPnP services by sending an // SSDP query (which uPnPPacket is) to udp/1900 on the SSDP // multicast address, and then get a flood of responses back diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index e66d3c159..a697a3908 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "tailscale.com/control/controlknobs" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/util/eventbus/eventbustest" ) @@ -19,7 +19,7 @@ func TestCreateOrGetMapping(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.SetLocalPort(1234) for i := range 2 { @@ -35,7 +35,7 @@ func TestClientProbe(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() for i := range 3 { if i > 0 { @@ -50,7 +50,7 @@ func TestClientProbeThenMap(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.debug.VerboseLogs = true c.SetLocalPort(1234) @@ -150,7 +150,7 @@ func TestUpdateEvent(t *testing.T) { t.Fatalf("Probe failed: %v", err) } c.GetCachedMappingOrStartCreatingOne() - if err := eventbustest.Expect(tw, eventbustest.Type[Mapping]()); err != nil { + if err := eventbustest.Expect(tw, eventbustest.Type[portmappertype.Mapping]()); err != nil { t.Error(err.Error()) } } diff --git a/net/portmapper/portmappertype/portmappertype.go b/net/portmapper/portmappertype/portmappertype.go new file mode 100644 index 000000000..cc8358a4a --- /dev/null +++ b/net/portmapper/portmappertype/portmappertype.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmappertype defines the net/portmapper interface, which may or may not be +// linked into the binary. +package portmappertype + +import ( + "context" + "errors" + "net/netip" + "time" + + "tailscale.com/feature" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +// HookNewPortMapper is a hook to install the portmapper creation function. +// It must be set by an init function when buildfeatures.HasPortmapper is true. +var HookNewPortMapper feature.Hook[func(logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil, + onlyTCP443OrNil func() bool) Client] + +var ( + ErrNoPortMappingServices = errors.New("no port mapping services were found") + ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") + ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") + ErrPortMappingDisabled = errors.New("port mapping is disabled") +) + +// ProbeResult is the result of a portmapper probe, saying +// which port mapping protocols were discovered. +type ProbeResult struct { + PCP bool + PMP bool + UPnP bool +} + +// Client is the interface implemented by a portmapper client. +type Client interface { + // Probe returns a summary of which port mapping services are available on + // the network. + // + // If a probe has run recently and there haven't been any network changes + // since, the returned result might be server from the Client's cache, + // without sending any network traffic. + Probe(context.Context) (ProbeResult, error) + + // HaveMapping reports whether we have a current valid mapping. + HaveMapping() bool + + // SetGatewayLookupFunc set the func that returns the machine's default + // gateway IP, and the primary IP address for that gateway. It must be + // called before the client is used. If not called, + // interfaces.LikelyHomeRouterIP is used. + SetGatewayLookupFunc(f func() (gw, myIP netip.Addr, ok bool)) + + // NoteNetworkDown should be called when the network has transitioned to a down state. + // It's too late to release port mappings at this point (the user might've just turned off + // their wifi), but we can make sure we invalidate mappings for later when the network + // comes back. + NoteNetworkDown() + + // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. + // If there's not one, it starts up a background goroutine to create one. + // If the background goroutine ends up creating one, the onChange hook registered with the + // NewClient constructor (if any) will fire. + GetCachedMappingOrStartCreatingOne() (external netip.AddrPort, ok bool) + + // SetLocalPort updates the local port number to which we want to port + // map UDP traffic + SetLocalPort(localPort uint16) + + Close() error +} + +// Mapping is an event recording the allocation of a port mapping. +type Mapping struct { + External netip.AddrPort + Type string + GoodUntil time.Time + + // TODO(creachadair): Record whether we reused an existing mapping? +} diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 134183135..d65d6e94d 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -209,7 +209,7 @@ func addAnyPortMapping( // The meta is the most recently parsed UDP discovery packet response // from the Internet Gateway Device. func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, gw netip.Addr, meta uPnPDiscoResponse) (rootDev *goupnp.RootDevice, loc *url.URL, err error) { - if debug.DisableUPnP { + if debug.DisableUPnP() { return nil, nil, nil } @@ -434,7 +434,7 @@ func (c *Client) getUPnPPortMapping( internal netip.AddrPort, prevPort uint16, ) (external netip.AddrPort, ok bool) { - if disableUPnpEnv() || c.debug.DisableUPnP || (c.controlKnobs != nil && c.controlKnobs.DisableUPnP.Load()) { + if disableUPnpEnv() || c.debug.DisableUPnP() { return netip.AddrPort{}, false } diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index c07ec0208..a954b2bea 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -18,6 +18,7 @@ import ( "sync/atomic" "testing" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/tstest" ) @@ -1039,7 +1040,7 @@ func (u *upnpServer) handleControl(w http.ResponseWriter, r *http.Request, handl } } -func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) ProbeResult { +func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) portmappertype.ProbeResult { tb.Helper() res, err := c.Probe(ctx) if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9b93ce8db..b3e2b7f0e 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -235,7 +235,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -291,7 +293,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d9b9b64c1..6b083132f 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,6 +29,7 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 719cc68a4..6eb566076 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -33,6 +33,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/disco" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -44,7 +45,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/packet" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockopts" "tailscale.com/net/sockstats" "tailscale.com/net/stun" @@ -177,7 +178,7 @@ type Conn struct { // These [eventbus.Subscriber] fields are solely accessed by // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmapper.Mapping] + pmSub *eventbus.Subscriber[portmappertype.Mapping] filterSub *eventbus.Subscriber[FilterUpdate] nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] @@ -207,7 +208,8 @@ type Conn struct { // portMapper is the NAT-PMP/PCP/UPnP prober/client, for requesting // port mappings from NAT devices. - portMapper *portmapper.Client + // If nil, the portmapper is disabled. + portMapper portmappertype.Client // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. @@ -731,7 +733,7 @@ func NewConn(opts Options) (*Conn, error) { // Subscribe calls must return before NewConn otherwise published // events can be missed. - c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) + c.pmSub = eventbus.Subscribe[portmappertype.Mapping](c.eventClient) c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) @@ -747,19 +749,21 @@ func NewConn(opts Options) (*Conn, error) { // Don't log the same log messages possibly every few seconds in our // portmapper. - portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") - portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) - portMapOpts := &portmapper.DebugKnobs{ - DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, - } - c.portMapper = portmapper.NewClient(portmapper.Config{ - EventBus: c.eventBus, - Logf: portmapperLogf, - NetMon: opts.NetMon, - DebugKnobs: portMapOpts, - ControlKnobs: opts.ControlKnobs, - }) - c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) + if buildfeatures.HasPortMapper && !opts.DisablePortMapper { + portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") + portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) + var disableUPnP func() bool + if c.controlKnobs != nil { + disableUPnP = c.controlKnobs.DisableUPnP.Load + } + newPortMapper, ok := portmappertype.HookNewPortMapper.GetOk() + if ok { + c.portMapper = newPortMapper(portmapperLogf, opts.EventBus, opts.NetMon, disableUPnP, c.onlyTCP443.Load) + } else if !testenv.InTest() { + panic("unexpected: HookNewPortMapper not set") + } + } + c.netMon = opts.NetMon c.health = opts.HealthTracker c.onPortUpdate = opts.OnPortUpdate @@ -1081,7 +1085,9 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) { UPnP: report.UPnP, PMP: report.PMP, PCP: report.PCP, - HavePortMap: c.portMapper.HaveMapping(), + } + if c.portMapper != nil { + ni.HavePortMap = c.portMapper.HaveMapping() } for rid, d := range report.RegionV4Latency { ni.DERPLatency[fmt.Sprintf("%d-v4", rid)] = d.Seconds() @@ -1248,7 +1254,7 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, error) { var havePortmap bool var portmapExt netip.AddrPort - if runtime.GOOS != "js" { + if runtime.GOOS != "js" && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } @@ -1288,7 +1294,7 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro } // If we didn't have a portmap earlier, maybe it's done by now. - if !havePortmap { + if !havePortmap && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } if havePortmap { @@ -2662,7 +2668,9 @@ func (c *Conn) SetNetworkUp(up bool) { if up { c.startDerpHomeConnectLocked() } else { - c.portMapper.NoteNetworkDown() + if c.portMapper != nil { + c.portMapper.NoteNetworkDown() + } c.closeAllDerpLocked("network-down") } } @@ -3324,7 +3332,9 @@ func (c *Conn) Close() error { c.derpCleanupTimer.Stop() } c.stopPeriodicReSTUNTimerLocked() - c.portMapper.Close() + if c.portMapper != nil { + c.portMapper.Close() + } c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() @@ -3577,7 +3587,9 @@ func (c *Conn) rebind(curPortFate currentPortFate) error { if err := c.bindSocket(&c.pconn4, "udp4", curPortFate); err != nil { return fmt.Errorf("magicsock: Rebind IPv4 failed: %w", err) } - c.portMapper.SetLocalPort(c.LocalPort()) + if c.portMapper != nil { + c.portMapper.SetLocalPort(c.LocalPort()) + } c.UpdatePMTUD() return nil } From e180fc267b2fab61641bce08d075ad3e52b97a97 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 10:07:50 -0700 Subject: [PATCH 1326/1708] feature/featuretags, all: add ts_omit_acme to disable TLS cert support I'd started to do this in the earlier ts_omit_server PR but decided to split it into this separate PR. Updates #17128 Change-Id: Ief8823a78d1f7bbb79e64a5cab30a7d0a5d6ff4b Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- client/local/cert.go | 151 ++++++++++++++++++ client/local/local.go | 135 ---------------- client/tailscale/cert.go | 34 ++++ client/tailscale/localclient_aliases.go | 22 --- cmd/tailscale/cli/cert.go | 32 ++-- cmd/tailscale/cli/cli.go | 3 +- cmd/tailscale/cli/configure-synology-cert.go | 6 + .../cli/configure-synology-cert_test.go | 2 + cmd/tailscale/cli/configure.go | 8 +- cmd/tailscaled/deps_test.go | 13 ++ .../buildfeatures/feature_acme_disabled.go | 13 ++ feature/buildfeatures/feature_acme_enabled.go | 13 ++ feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/c2n.go | 56 ------- ipn/ipnlocal/cert.go | 59 ++++++- ipn/ipnlocal/{cert_js.go => cert_disabled.go} | 21 ++- ipn/localapi/cert.go | 6 +- ipn/localapi/localapi.go | 1 - 19 files changed, 342 insertions(+), 236 deletions(-) create mode 100644 client/local/cert.go create mode 100644 client/tailscale/cert.go create mode 100644 feature/buildfeatures/feature_acme_disabled.go create mode 100644 feature/buildfeatures/feature_acme_enabled.go rename ipn/ipnlocal/{cert_js.go => cert_disabled.go} (51%) diff --git a/build_dist.sh b/build_dist.sh index 45d471be0..be0d4d47e 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,},$($go run ./cmd/featuretags --min)" + tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min)" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/client/local/cert.go b/client/local/cert.go new file mode 100644 index 000000000..bfaac7303 --- /dev/null +++ b/client/local/cert.go @@ -0,0 +1,151 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package local + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "go4.org/mem" +) + +// SetDNS adds a DNS TXT record for the given domain name, containing +// the provided TXT value. The intended use case is answering +// LetsEncrypt/ACME dns-01 challenges. +// +// The control plane will only permit SetDNS requests with very +// specific names and values. The name should be +// "_acme-challenge." + your node's MagicDNS name. It's expected that +// clients cache the certs from LetsEncrypt (or whichever CA is +// providing them) and only request new ones as needed; the control plane +// rate limits SetDNS requests. +// +// This is a low-level interface; it's expected that most Tailscale +// users use a higher level interface to getting/using TLS +// certificates. +func (lc *Client) SetDNS(ctx context.Context, name, value string) error { + v := url.Values{} + v.Set("name", name) + v.Set("value", value) + _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) + return err +} + +// CertPair returns a cert and private key for the provided DNS domain. +// +// It returns a cached certificate from disk if it's still valid. +// +// Deprecated: use [Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return defaultClient.CertPair(ctx, domain) +} + +// CertPair returns a cert and private key for the provided DNS domain. +// +// It returns a cached certificate from disk if it's still valid. +// +// API maturity: this is considered a stable API. +func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return lc.CertPairWithValidity(ctx, domain, 0) +} + +// CertPairWithValidity returns a cert and private key for the provided DNS +// domain. +// +// It returns a cached certificate from disk if it's still valid. +// When minValidity is non-zero, the returned certificate will be valid for at +// least the given duration, if permitted by the CA. If the certificate is +// valid, but for less than minValidity, it will be synchronously renewed. +// +// API maturity: this is considered a stable API. +func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { + res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) + if err != nil { + return nil, nil, err + } + // with ?type=pair, the response PEM is first the one private + // key PEM block, then the cert PEM blocks. + i := mem.Index(mem.B(res), mem.S("--\n--")) + if i == -1 { + return nil, nil, fmt.Errorf("unexpected output: no delimiter") + } + i += len("--\n") + keyPEM, certPEM = res[:i], res[i:] + if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { + return nil, nil, fmt.Errorf("unexpected output: key in cert") + } + return certPEM, keyPEM, nil +} + +// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. +// +// It returns a cached certificate from disk if it's still valid. +// +// It's the right signature to use as the value of +// [tls.Config.GetCertificate]. +// +// Deprecated: use [Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return defaultClient.GetCertificate(hi) +} + +// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. +// +// It returns a cached certificate from disk if it's still valid. +// +// It's the right signature to use as the value of +// [tls.Config.GetCertificate]. +// +// API maturity: this is considered a stable API. +func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + if hi == nil || hi.ServerName == "" { + return nil, errors.New("no SNI ServerName") + } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + name := hi.ServerName + if !strings.Contains(name, ".") { + if v, ok := lc.ExpandSNIName(ctx, name); ok { + name = v + } + } + certPEM, keyPEM, err := lc.CertPair(ctx, name) + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + return nil, err + } + return &cert, nil +} + +// ExpandSNIName expands bare label name into the most likely actual TLS cert name. +// +// Deprecated: use [Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return defaultClient.ExpandSNIName(ctx, name) +} + +// ExpandSNIName expands bare label name into the most likely actual TLS cert name. +func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + st, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return "", false + } + for _, d := range st.CertDomains { + if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { + return d, true + } + } + return "", false +} diff --git a/client/local/local.go b/client/local/local.go index a132e577b..a606fbdf3 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -9,7 +9,6 @@ import ( "bytes" "cmp" "context" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -28,7 +27,6 @@ import ( "sync" "time" - "go4.org/mem" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" @@ -907,28 +905,6 @@ func (lc *Client) Logout(ctx context.Context) error { return err } -// SetDNS adds a DNS TXT record for the given domain name, containing -// the provided TXT value. The intended use case is answering -// LetsEncrypt/ACME dns-01 challenges. -// -// The control plane will only permit SetDNS requests with very -// specific names and values. The name should be -// "_acme-challenge." + your node's MagicDNS name. It's expected that -// clients cache the certs from LetsEncrypt (or whichever CA is -// providing them) and only request new ones as needed; the control plane -// rate limits SetDNS requests. -// -// This is a low-level interface; it's expected that most Tailscale -// users use a higher level interface to getting/using TLS -// certificates. -func (lc *Client) SetDNS(ctx context.Context, name, value string) error { - v := url.Values{} - v.Set("name", name) - v.Set("value", value) - _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) - return err -} - // DialTCP connects to the host's port via Tailscale. // // The host may be a base DNS name (resolved from the netmap inside @@ -1009,117 +985,6 @@ func (lc *Client) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) return &derpMap, nil } -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// Deprecated: use [Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return defaultClient.CertPair(ctx, domain) -} - -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return lc.CertPairWithValidity(ctx, domain, 0) -} - -// CertPairWithValidity returns a cert and private key for the provided DNS -// domain. -// -// It returns a cached certificate from disk if it's still valid. -// When minValidity is non-zero, the returned certificate will be valid for at -// least the given duration, if permitted by the CA. If the certificate is -// valid, but for less than minValidity, it will be synchronously renewed. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { - res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) - if err != nil { - return nil, nil, err - } - // with ?type=pair, the response PEM is first the one private - // key PEM block, then the cert PEM blocks. - i := mem.Index(mem.B(res), mem.S("--\n--")) - if i == -1 { - return nil, nil, fmt.Errorf("unexpected output: no delimiter") - } - i += len("--\n") - keyPEM, certPEM = res[:i], res[i:] - if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { - return nil, nil, fmt.Errorf("unexpected output: key in cert") - } - return certPEM, keyPEM, nil -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// Deprecated: use [Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return defaultClient.GetCertificate(hi) -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// API maturity: this is considered a stable API. -func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if hi == nil || hi.ServerName == "" { - return nil, errors.New("no SNI ServerName") - } - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - name := hi.ServerName - if !strings.Contains(name, ".") { - if v, ok := lc.ExpandSNIName(ctx, name); ok { - name = v - } - } - certPEM, keyPEM, err := lc.CertPair(ctx, name) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return nil, err - } - return &cert, nil -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -// -// Deprecated: use [Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return defaultClient.ExpandSNIName(ctx, name) -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - st, err := lc.StatusWithoutPeers(ctx) - if err != nil { - return "", false - } - for _, d := range st.CertDomains { - if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { - return d, true - } - } - return "", false -} - // PingOpts contains options for the ping request. // // The zero value is valid, which means to use defaults. diff --git a/client/tailscale/cert.go b/client/tailscale/cert.go new file mode 100644 index 000000000..4f351ab99 --- /dev/null +++ b/client/tailscale/cert.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package tailscale + +import ( + "context" + "crypto/tls" + + "tailscale.com/client/local" +) + +// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return local.GetCertificate(hi) +} + +// CertPair is an alias for [tailscale.com/client/local.CertPair]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return local.CertPair(ctx, domain) +} + +// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return local.ExpandSNIName(ctx, name) +} diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 58be312b4..e3492e841 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -5,7 +5,6 @@ package tailscale import ( "context" - "crypto/tls" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" @@ -37,13 +36,6 @@ type BugReportOpts = local.BugReportOpts // Deprecated: import [tailscale.com/client/local] instead. type PingOpts = local.PingOpts -// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return local.GetCertificate(hi) -} - // SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler]. // // Deprecated: import [tailscale.com/client/local] instead. @@ -85,17 +77,3 @@ func Status(ctx context.Context) (*ipnstate.Status, error) { func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { return local.StatusWithoutPeers(ctx) } - -// CertPair is an alias for [tailscale.com/client/local.CertPair]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return local.CertPair(ctx, domain) -} - -// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return local.ExpandSNIName(ctx, name) -} diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index 9c8eca5b7..171eebe1e 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !js && !ts_omit_acme + package cli import ( @@ -25,19 +27,23 @@ import ( "tailscale.com/version" ) -var certCmd = &ffcli.Command{ - Name: "cert", - Exec: runCert, - ShortHelp: "Get TLS certs", - ShortUsage: "tailscale cert [flags] ", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("cert") - fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") - fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") - fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") - fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") - return fs - })(), +func init() { + maybeCertCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "cert", + Exec: runCert, + ShortHelp: "Get TLS certs", + ShortUsage: "tailscale cert [flags] ", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("cert") + fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") + fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") + fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") + fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") + return fs + })(), + } + } } var certArgs struct { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d039be607..dfc8f3249 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -215,6 +215,7 @@ var ( maybeNetlockCmd, maybeFunnelCmd, maybeServeCmd, + maybeCertCmd, _ func() *ffcli.Command ) @@ -262,7 +263,7 @@ change in the future. nilOrCall(maybeWebCmd), nilOrCall(fileCmd), bugReportCmd, - certCmd, + nilOrCall(maybeCertCmd), nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 663d0c879..6ceef33ca 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme + package cli import ( @@ -22,6 +24,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + maybeConfigSynologyCertCmd = synologyConfigureCertCmd +} + func synologyConfigureCertCmd() *ffcli.Command { if runtime.GOOS != "linux" || distro.Get() != distro.Synology { return nil diff --git a/cmd/tailscale/cli/configure-synology-cert_test.go b/cmd/tailscale/cli/configure-synology-cert_test.go index 801285e55..c7da5622f 100644 --- a/cmd/tailscale/cli/configure-synology-cert_test.go +++ b/cmd/tailscale/cli/configure-synology-cert_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme + package cli import ( diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index 0354a1944..20236eb28 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -10,7 +10,11 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" ) -var maybeJetKVMConfigureCmd func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +var ( + maybeJetKVMConfigureCmd, + maybeConfigSynologyCertCmd, + _ func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +) func configureCmd() *ffcli.Command { return &ffcli.Command{ @@ -28,7 +32,7 @@ services on the host to use Tailscale in more ways. Subcommands: nonNilCmds( configureKubeconfigCmd(), synologyConfigureCmd(), - synologyConfigureCertCmd(), + ccall(maybeConfigSynologyCertCmd), ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), ccall(maybeJetKVMConfigureCmd), diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 1609ba633..0d56b55d2 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -108,3 +108,16 @@ func TestOmitPortmapper(t *testing.T) { }, }.Check(t) } + +func TestOmitACME(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_acme,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "/acme") { + t.Errorf("unexpected dep with ts_omit_acme: %q", dep) + } + }, + }.Check(t) +} diff --git a/feature/buildfeatures/feature_acme_disabled.go b/feature/buildfeatures/feature_acme_disabled.go new file mode 100644 index 000000000..0a7f25a82 --- /dev/null +++ b/feature/buildfeatures/feature_acme_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = false diff --git a/feature/buildfeatures/feature_acme_enabled.go b/feature/buildfeatures/feature_acme_enabled.go new file mode 100644 index 000000000..f074bfb4e --- /dev/null +++ b/feature/buildfeatures/feature_acme_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 2c5f32310..fc26dd370 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -42,6 +42,7 @@ type FeatureMeta struct { // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ + "acme": {"ACME", "ACME TLS certificate management"}, "aws": {"AWS", "AWS integration"}, "bird": {"Bird", "Bird BGP integration"}, "capture": {"Capture", "Packet capture"}, diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index b5f50f3bc..0487774db 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -4,9 +4,7 @@ package ipnlocal import ( - "crypto/x509" "encoding/json" - "encoding/pem" "errors" "fmt" "io" @@ -54,9 +52,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ req("POST /logtail/flush"): handleC2NLogtailFlush, req("POST /sockstats"): handleC2NSockStats, - // Check TLS certificate status. - req("GET /tls-cert-status"): handleC2NTLSCertStatus, - // SSH req("/ssh/usernames"): handleC2NSSHUsernames, @@ -497,54 +492,3 @@ func regularFileExists(path string) bool { fi, err := os.Stat(path) return err == nil && fi.Mode().IsRegular() } - -// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the -// provided domain. This can be called by the controlplane to clean up DNS TXT -// records when they're no longer needed by LetsEncrypt. -// -// It does not kick off a cert fetch or async refresh. It only reports anything -// that's already sitting on disk, and only reports metadata about the public -// cert (stuff that'd be the in CT logs anyway). -func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - cs, err := b.getCertStore() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - domain := r.FormValue("domain") - if domain == "" { - http.Error(w, "no 'domain'", http.StatusBadRequest) - return - } - - ret := &tailcfg.C2NTLSCertInfo{} - pair, err := getCertPEMCached(cs, domain, b.clock.Now()) - ret.Valid = err == nil - if err != nil { - ret.Error = err.Error() - if errors.Is(err, errCertExpired) { - ret.Expired = true - } else if errors.Is(err, ipn.ErrStateNotExist) { - ret.Missing = true - ret.Error = "no certificate" - } - } else { - block, _ := pem.Decode(pair.CertPEM) - if block == nil { - ret.Error = "invalid PEM" - ret.Valid = false - } else { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - ret.Error = fmt.Sprintf("invalid certificate: %v", err) - ret.Valid = false - } else { - ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) - ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) - } - } - } - - writeJSON(w, ret) -} diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 86052eb8d..bf85affa6 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_acme package ipnlocal @@ -24,6 +24,7 @@ import ( "log" randv2 "math/rand/v2" "net" + "net/http" "os" "path/filepath" "runtime" @@ -40,6 +41,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/tailcfg" "tailscale.com/tempfork/acme" "tailscale.com/types/logger" "tailscale.com/util/testenv" @@ -47,6 +49,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatus) +} + // Process-wide cache. (A new *Handler is created per connection, // effectively per request) var ( @@ -836,3 +842,54 @@ func checkCertDomain(st *ipnstate.Status, domain string) error { } return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains) } + +// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the +// provided domain. This can be called by the controlplane to clean up DNS TXT +// records when they're no longer needed by LetsEncrypt. +// +// It does not kick off a cert fetch or async refresh. It only reports anything +// that's already sitting on disk, and only reports metadata about the public +// cert (stuff that'd be the in CT logs anyway). +func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + cs, err := b.getCertStore() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + domain := r.FormValue("domain") + if domain == "" { + http.Error(w, "no 'domain'", http.StatusBadRequest) + return + } + + ret := &tailcfg.C2NTLSCertInfo{} + pair, err := getCertPEMCached(cs, domain, b.clock.Now()) + ret.Valid = err == nil + if err != nil { + ret.Error = err.Error() + if errors.Is(err, errCertExpired) { + ret.Expired = true + } else if errors.Is(err, ipn.ErrStateNotExist) { + ret.Missing = true + ret.Error = "no certificate" + } + } else { + block, _ := pem.Decode(pair.CertPEM) + if block == nil { + ret.Error = "invalid PEM" + ret.Valid = false + } else { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + ret.Error = fmt.Sprintf("invalid certificate: %v", err) + ret.Valid = false + } else { + ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) + ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) + } + } + } + + writeJSON(w, ret) +} diff --git a/ipn/ipnlocal/cert_js.go b/ipn/ipnlocal/cert_disabled.go similarity index 51% rename from ipn/ipnlocal/cert_js.go rename to ipn/ipnlocal/cert_disabled.go index 6acc57a60..17d446c11 100644 --- a/ipn/ipnlocal/cert_js.go +++ b/ipn/ipnlocal/cert_disabled.go @@ -1,20 +1,30 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build js || ts_omit_acme + package ipnlocal import ( "context" "errors" + "io" + "net/http" "time" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatusDisabled) +} + +var errNoCerts = errors.New("cert support not compiled in this build") + type TLSCertKeyPair struct { CertPEM, KeyPEM []byte } func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts } var errCertExpired = errors.New("cert expired") @@ -22,9 +32,14 @@ var errCertExpired = errors.New("cert expired") type certStore interface{} func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts } func (b *LocalBackend) getCertStore() (certStore, error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts +} + +func handleC2NTLSCertStatusDisabled(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"Missing":true}`) // a minimal tailcfg.C2NTLSCertInfo } diff --git a/ipn/localapi/cert.go b/ipn/localapi/cert.go index 323406f7b..2313631cc 100644 --- a/ipn/localapi/cert.go +++ b/ipn/localapi/cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_acme package localapi @@ -14,6 +14,10 @@ import ( "tailscale.com/ipn/ipnlocal" ) +func init() { + Register("cert/", (*Handler).serveCert) +} + func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite && !h.PermitCert { http.Error(w, "cert access denied", http.StatusForbidden) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 0c3a0a4ed..01966f84b 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -67,7 +67,6 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) // then it's a prefix match. var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: - "cert/": (*Handler).serveCert, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME From 6db30a10f7a160efeaeeb955e92569c767ca8b2d Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 16 Sep 2025 15:49:03 -0700 Subject: [PATCH 1327/1708] cmd/tailscale: shrink QR codes using half blocks (#17084) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running `tailscale up --qr`, the QR code is rendered using two full blocks ██ to form a square pixel. This is a problem for people with smaller terminals, because the output is 37 lines high. All modern terminals support half block characters, like ▀ and ▄, which only takes 19 lines and can easily fit in a regular terminal window. For example, https://login.tailscale.com/a/0123456789 is now rendered: ``` user@host:~$ tailscale up --qr █████████████████████████████████████ █████████████████████████████████████ ████ ▄▄▄▄▄ █ ▀▀ █▄▀▀ ▄ █ ▄▄▄▄▄ ████ ████ █ █ █▀ ▄▄▄█▀█▄▀ ▄█ █ █ ████ ████ █▄▄▄█ ██▄ ▄▀▀▄▄ ▀▀ ▀█ █▄▄▄█ ████ ████▄▄▄▄▄▄▄█ ▀▄▀ █▄▀▄▀▄█ █▄▄▄▄▄▄▄████ ████▄█▄ ▀▄▄▄█▀▄█▀ ▀▄ ▄ ▀▀ ▀▀▄█▄ ████ ████▄▀▄▀▄█▄ █ ▄▄▄▄█▀██▀██▄▄█▀█▄▄▀████ ████▄█▀ ▀ ▄█▄▄▀▄▀█ ▄ ▄█▀█▄▀██▄ ▀▀████ █████▀ ▀ ▄▀▀▀▀▄▀▄▀▀ ▄▄ ▄ ▀ █▄ ▄████ ██████ ▄▄█▄▄▄▄▄▀ █ ▄▀▀▄█▀ █ ▄ ▀ █████ ████▄█▄▄ ▄▀ ▀██▀ ▄█▀▀████▄▀█ ██████ █████▄▄▄█▄▄▄▀▀ █▄▄▄▄▄ ▀█ ▄▄▄ ▀▀████ ████ ▄▄▄▄▄ █ ██▄ ▀ █▀█ ▄ █▄█ █▄█████ ████ █ █ █▀ █ ▀█▄▄ █▀ ▄ ▀▄▀▄████ ████ █▄▄▄█ █▄█▀█▄▀██▀██▄ ▀█▄▀▀▄▀▄████ ████▄▄▄▄▄▄▄█▄▄███▄▄▄███▄▄▄██▄██▄█████ █████████████████████████████████████ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ``` To render a QR code with full blocks, like we did in the past, use the new `--qr-format` flag: ``` user@host:~$ tailscale up --qr --qr-format=large ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ████████ ██ ████ ██ ████ ██ ████████ ████████ ██████████ ██ ████ ██ ██ ██████████ ████████ ████████ ██ ██ ████ ██████ ██ ██ ██ ██ ████████ ████████ ██ ██ ██ ████████ ████ ████ ██ ██ ████████ ████████ ██ ██ ████ ████ ████ ████ ██ ██ ████████ ████████ ██████████ ██████ ██ ████ ██ ██████████ ████████ ████████ ██ ██ ██ ██ ██ ██ ██ ██ ████████ ████████████████████████ ██ ████ ██ ████ ████████████████████████ ████████ ██ ██ ████ ████ ██ ████ ████ ██ ████████ ██████████████ ████████ ████ ██ ██ ██████ ████████ ████████ ██ ██ ██ ██ ██████████████ ██████ ██████████ ██████████ ██ ██████ ██ ██████████ ████ ██████████ ██████ ████████ ████████ ████ ██ ██ ██ ████ ██████ ██████ ████████████ ████████████ ████████ ██ ██ ██ ████ ████ ██████ ████████ ████████████ ██ ████████ ██ ████ ██ ██ ████████ ██████████ ██ ██ ██ ████ ██ ████ ██████████ ████████████ ██ ██ ██ ████ ████ ██ ██ ██████████ ████████████ ████████████████ ██ ██ ████ ██ ██ ██████████ ████████ ██ ██ ████████ ██████████████ ████ ████████████ ████████████████ ██ ████ ████ ██████████ ██ ████████████ ██████████ ██ ████ ██ ████ ████████████ ████████████████████████ ████████████ ██ ██████ ████████ ████████ ██ ████ ██ ██████ ██ ██ ██ ██████████ ████████ ██████████ ██ ██████ ██ ██ ██ ██████ ██████████████ ████████ ██ ██ ████ ██ ████ ████ ██ ██ ████████ ████████ ██ ██ ██ ██ ██████ ██ ██ ██ ██████████ ████████ ██ ██ ██ ██████ ████████████ ████ ████ ██ ████████ ████████ ██████████ ██████ ████ ████ ██████ ████ ██ ██████████ ████████ ██ ██████ ██████ ████ ████ ██████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ``` Fixes #17083 Signed-off-by: Simon Law --- cmd/tailscale/cli/up.go | 13 +++++++++++-- cmd/tailscale/cli/up_test.go | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 097af725b..c78a63569 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -95,6 +95,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // When adding new flags, prefer to put them under "tailscale set" instead // of here. Setting preferences via "tailscale up" is deprecated. upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") + upf.StringVar(&upArgs.qrFormat, "qr-format", "small", "QR code formatting (small or large)") upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server") @@ -164,6 +165,7 @@ func defaultNetfilterMode() string { // added to it. Add new arguments to setArgsT instead. type upArgsT struct { qr bool + qrFormat string reset bool server string acceptRoutes bool @@ -658,7 +660,14 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { log.Printf("QR code error: %v", err) } else { - fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + switch upArgs.qrFormat { + case "large": + fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + case "small": + fmt.Fprintf(Stderr, "%s\n", q.ToSmallString(false)) + default: + log.Printf("unknown QR code format: %q", upArgs.qrFormat) + } } } } @@ -805,7 +814,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "json", "timeout", "accept-risk", "host-routes": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes": return true } return false diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index eb06f84dc..efddb5324 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -35,6 +35,7 @@ var validUpFlags = set.Of( "operator", "report-posture", "qr", + "qr-format", "reset", "shields-up", "snat-subnet-routes", From 697098ed6ccc0f2fd8727fa36a86d952495acf50 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 18:11:28 -0700 Subject: [PATCH 1328/1708] ipn/ipnlocal: fix a case where ts_omit_ssh was still linking in x/crypto/ssh And add a test. Updates #12614 Change-Id: Icb1c77f5890def794a4938583725c1a0886b197d Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 3 ++- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/ssh.go | 2 +- ipn/ipnlocal/ssh_stub.go | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 0d56b55d2..9e6624d9a 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -15,8 +15,9 @@ func TestOmitSSH(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", - Tags: "ts_omit_ssh", + Tags: "ts_omit_ssh,ts_include_cli", BadDeps: map[string]string{ + "golang.org/x/crypto/ssh": msg, "tailscale.com/ssh/tailssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 988c0b538..134094201 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5610,7 +5610,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) var sshHostKeys []string - if prefs.RunSSH() && envknob.CanSSHD() { + if buildfeatures.HasSSH && prefs.RunSSH() && envknob.CanSSHD() { // TODO(bradfitz): this is called with b.mu held. Not ideal. // If the filesystem gets wedged or something we could block for // a long time. But probably fine. diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index e48b1f2f1..e2c2f5067 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 +//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh package ipnlocal diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go index d129084e4..6b2e36015 100644 --- a/ipn/ipnlocal/ssh_stub.go +++ b/ipn/ipnlocal/ssh_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) +//go:build ts_omit_ssh || ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) package ipnlocal From 312582bdbfca47948453b446a055c87a40b416d4 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 17 Sep 2025 11:11:35 +0100 Subject: [PATCH 1329/1708] ssh/tailssh: mark TestSSHRecordingCancelsSessionsOnUploadFailure as flaky Updates https://github.com/tailscale/tailscale/issues/7707 Signed-off-by: Alex Chan --- ssh/tailssh/tailssh_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 44b2d68df..3b6d3c52c 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -36,6 +36,7 @@ import ( gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -489,6 +490,8 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { } func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7707") + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } From ddc0cd7e1eb289ab1b9d491762b6b5249a960b77 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 10 Sep 2025 17:31:25 +0100 Subject: [PATCH 1330/1708] ipn/ipnlocal: disconnect and block when key expires even when using seamless Updates tailscale/corp#31478 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 6 +- ipn/ipnlocal/state_test.go | 142 ++++++++++++++++++++++++++++++++++++- 2 files changed, 144 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 134094201..a712dc98a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5735,9 +5735,9 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock switch newState { case ipn.NeedsLogin: systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } + // always block updates on NeedsLogin even if seamless renewal is enabled, + // to prevent calls to authReconfig from reconfiguring the engine when our + // key has expired and we're waiting to authenticate to use the new key. b.blockEngineUpdates(true) fallthrough case ipn.Stopped, ipn.NoState: diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ff21c920c..609a51c5b 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -204,6 +204,16 @@ func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { cc.send(nil, "", true, nm) } +func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { + s := controlclient.Status{ + URL: "https://example.com/a/foo", + NetMap: nm, + Persist: cc.persist.View(), + } + s.SetStateForTest(controlclient.StateURLVisitRequired) + cc.opts.Observer.SetControlClientStatus(cc, s) +} + // called records that a particular function name was called. func (cc *mockControl) called(s string) { cc.mu.Lock() @@ -1362,11 +1372,141 @@ func TestEngineReconfigOnStateChange(t *testing.T) { steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) - cc().authenticated(node3) + cc().authenticated(node1) + cc().send(nil, "", false, &netmap.NetworkMap{ + Expiry: time.Now().Add(-time.Minute), + }) + }, + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // Without seamless renewal, even starting a reauth tears down everything: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // With seamless renewal, starting a reauth should leave everything up: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) cc().send(nil, "", false, &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), }) }, + // Even with seamless, if the key we are using expires, we want to disconnect: wantState: ipn.NeedsLogin, wantCfg: &wgcfg.Config{}, wantRouterCfg: &router.Config{}, From db0b9a361c6ae3398b05733086e17b59b27110b6 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 17 Sep 2025 10:46:13 +0100 Subject: [PATCH 1331/1708] net/dns: don't timeout if inotify sends multiple events This fixes a flaky test which has been occasionally timing out in CI. In particular, this test times out if `watchFile` receives multiple notifications from inotify before we cancel the test context. We block processing the second notification, because we've stopped listening to the `callbackDone` channel. This patch changes the test so we only send on the first notification. Testing this locally with `stress` confirms that the test is no longer flaky. Fixes #17172 Updates #14699 Signed-off-by: Alex Chan --- net/dns/direct_linux_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/dns/direct_linux_test.go b/net/dns/direct_linux_test.go index 079d060ed..e8f917b90 100644 --- a/net/dns/direct_linux_test.go +++ b/net/dns/direct_linux_test.go @@ -25,8 +25,13 @@ func TestWatchFile(t *testing.T) { var callbackCalled atomic.Bool callbackDone := make(chan bool) callback := func() { - callbackDone <- true - callbackCalled.Store(true) + // We only send to the channel once to avoid blocking if the + // callback is called multiple times -- this happens occasionally + // if inotify sends multiple events before we cancel the context. + if !callbackCalled.Load() { + callbackDone <- true + callbackCalled.Store(true) + } } var eg errgroup.Group From 48029a897df2643bc47446076b9516e1cf55a03c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 17 Sep 2025 07:20:34 -0700 Subject: [PATCH 1332/1708] util/eventbus: allow test expectations reporting only an error (#17146) Extend the Expect method of a Watcher to allow filter functions that report only an error value, and which "pass" when the reported error is nil. Updates #15160 Change-Id: I582d804554bd1066a9e499c1f3992d068c9e8148 Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest.go | 24 ++++++++++++++----- .../eventbustest/eventbustest_test.go | 21 ++++++++++++++++ 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index af725ace1..d5cfe5395 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -79,6 +79,11 @@ func Type[T any]() func(T) { return func(T) {} } // // The if error != nil, the test helper will return that error immediately. // func(e ExpectedType) (bool, error) // +// // Tests for event type and whatever is defined in the body. +// // If a non-nil error is reported, the test helper will return that error +// // immediately; otherwise the expectation is considered to be met. +// func(e ExpectedType) error +// // If the list of events must match exactly with no extra events, // use [ExpectExactly]. func Expect(tw *Watcher, filters ...any) error { @@ -179,15 +184,22 @@ func eventFilter(f any) filter { return []reflect.Value{reflect.ValueOf(true), reflect.Zero(reflect.TypeFor[error]())} } case 1: - if ft.Out(0) != reflect.TypeFor[bool]() { - panic(fmt.Sprintf("result is %T, want bool", ft.Out(0))) - } - fixup = func(vals []reflect.Value) []reflect.Value { - return append(vals, reflect.Zero(reflect.TypeFor[error]())) + switch ft.Out(0) { + case reflect.TypeFor[bool](): + fixup = func(vals []reflect.Value) []reflect.Value { + return append(vals, reflect.Zero(reflect.TypeFor[error]())) + } + case reflect.TypeFor[error](): + fixup = func(vals []reflect.Value) []reflect.Value { + pass := vals[0].IsZero() + return append([]reflect.Value{reflect.ValueOf(pass)}, vals...) + } + default: + panic(fmt.Sprintf("result is %v, want bool or error", ft.Out(0))) } case 2: if ft.Out(0) != reflect.TypeFor[bool]() || ft.Out(1) != reflect.TypeFor[error]() { - panic(fmt.Sprintf("results are %T, %T; want bool, error", ft.Out(0), ft.Out(1))) + panic(fmt.Sprintf("results are %v, %v; want bool, error", ft.Out(0), ft.Out(1))) } fixup = func(vals []reflect.Value) []reflect.Value { return vals } default: diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index fd95973e5..351553cc8 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -54,6 +54,27 @@ func TestExpectFilter(t *testing.T) { }, wantErr: false, }, + { + name: "filter-with-nil-error", + events: []int{1, 2, 3}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + }, + { + name: "filter-with-non-nil-error", + events: []int{100, 200, 300}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + wantErr: true, + }, { name: "first event has to be func", events: []int{24, 42}, From df362d0a0899e57b7e11e5de397b3688e850847b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 17 Sep 2025 10:49:41 -0400 Subject: [PATCH 1333/1708] net/netmon: make ChangeDelta event not a pointer (#17112) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes things work slightly better over the eventbus. Also switches ipnlocal to use the event over the eventbus instead of the direct callback. Updates #15160 Signed-off-by: Claus Lensbøl --- ipn/ipnlocal/local.go | 11 +++--- net/netmon/netmon.go | 12 ++---- net/netmon/netmon_test.go | 2 +- wgengine/userspace.go | 78 +++++++++++++++++++++++++-------------- 4 files changed, 61 insertions(+), 42 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a712dc98a..017349165 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -207,6 +207,7 @@ type LocalBackend struct { clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] healthChangeSub *eventbus.Subscriber[health.Change] + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] subsDoneCh chan struct{} // closed when consumeEventbusTopics returns health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -216,7 +217,6 @@ type LocalBackend struct { dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys pushDeviceToken syncs.AtomicValue[string] backendLogID logid.PublicID - unregisterNetMon func() unregisterSysPolicyWatch func() portpoll *portlist.Poller // may be nil portpollOnce sync.Once // guards starting readPoller @@ -544,6 +544,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) b.healthChangeSub = eventbus.Subscribe[health.Change](b.eventClient) + b.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](b.eventClient) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -591,10 +592,9 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() - // Call our linkChange code once with the current state, and - // then also whenever it changes: + // Call our linkChange code once with the current state. + // Following changes are triggered via the eventbus. b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) - b.unregisterNetMon = netMon.RegisterChangeCallback(b.linkChange) if tunWrap, ok := b.sys.Tun.GetOK(); ok { tunWrap.PeerAPIPort = b.GetPeerAPIPort @@ -633,6 +633,8 @@ func (b *LocalBackend) consumeEventbusTopics() { b.onTailnetDefaultAutoUpdate(au.Value) case change := <-b.healthChangeSub.Events(): b.onHealthChange(change) + case changeDelta := <-b.changeDeltaSub.Events(): + b.linkChange(&changeDelta) } } } @@ -1160,7 +1162,6 @@ func (b *LocalBackend) Shutdown() { } b.stopOfflineAutoUpdate() - b.unregisterNetMon() b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index b97b184d4..fcac9c4ee 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -53,7 +53,7 @@ type osMon interface { type Monitor struct { logf logger.Logf b *eventbus.Client - changed *eventbus.Publisher[*ChangeDelta] + changed *eventbus.Publisher[ChangeDelta] om osMon // nil means not supported on this platform change chan bool // send false to wake poller, true to also force ChangeDeltas be sent @@ -84,9 +84,6 @@ type ChangeFunc func(*ChangeDelta) // ChangeDelta describes the difference between two network states. type ChangeDelta struct { - // Monitor is the network monitor that sent this delta. - Monitor *Monitor - // Old is the old interface state, if known. // It's nil if the old state is unknown. // Do not mutate it. @@ -126,7 +123,7 @@ func New(bus *eventbus.Bus, logf logger.Logf) (*Monitor, error) { stop: make(chan struct{}), lastWall: wallTime(), } - m.changed = eventbus.Publish[*ChangeDelta](m.b) + m.changed = eventbus.Publish[ChangeDelta](m.b) st, err := m.interfaceStateUncached() if err != nil { return nil, err @@ -401,8 +398,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { return } - delta := &ChangeDelta{ - Monitor: m, + delta := ChangeDelta{ Old: oldState, New: newState, TimeJumped: timeJumped, @@ -437,7 +433,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { } m.changed.Publish(delta) for _, cb := range m.cbs { - go cb(delta) + go cb(&delta) } } diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index b8ec1b75f..5fcdcc6cc 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -81,7 +81,7 @@ func TestMonitorInjectEventOnBus(t *testing.T) { mon.Start() mon.InjectEvent() - if err := eventbustest.Expect(tw, eventbustest.Type[*ChangeDelta]()); err != nil { + if err := eventbustest.Expect(tw, eventbustest.Type[ChangeDelta]()); err != nil { t.Error(err) } } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 4a9f32143..42c12c008 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -93,26 +93,28 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - - logf logger.Logf - wgLogger *wglog.Logger // a wireguard-go logging wrapper - reqCh chan struct{} - waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool - timeNow func() mono.Time - tundev *tstun.Wrapper - wgdev *device.Device - router router.Router - dialer *tsdial.Dialer - confListenPort uint16 // original conf.ListenPort - dns *dns.Manager - magicConn *magicsock.Conn - netMon *netmon.Monitor - health *health.Tracker - netMonOwned bool // whether we created netMon (and thus need to close it) - netMonUnregister func() // unsubscribes from changes; used regardless of netMonOwned - birdClient BIRDClient // or nil - controlKnobs *controlknobs.Knobs // or nil + eventBus *eventbus.Bus + eventClient *eventbus.Client + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + + logf logger.Logf + wgLogger *wglog.Logger // a wireguard-go logging wrapper + reqCh chan struct{} + waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool + timeNow func() mono.Time + tundev *tstun.Wrapper + wgdev *device.Device + router router.Router + dialer *tsdial.Dialer + confListenPort uint16 // original conf.ListenPort + dns *dns.Manager + magicConn *magicsock.Conn + netMon *netmon.Monitor + health *health.Tracker + netMonOwned bool // whether we created netMon (and thus need to close it) + birdClient BIRDClient // or nil + controlKnobs *controlknobs.Knobs // or nil testMaybeReconfigHook func() // for tests; if non-nil, fires if maybeReconfigWireguardLocked called @@ -352,7 +354,11 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) controlKnobs: conf.ControlKnobs, reconfigureVPN: conf.ReconfigureVPN, health: conf.HealthTracker, + subsDoneCh: make(chan struct{}), } + e.eventClient = e.eventBus.Client("userspaceEngine") + e.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](e.eventClient) + closePool.addFunc(e.eventClient.Close) if e.birdClient != nil { // Disable the protocol at start time. @@ -385,13 +391,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) logf("link state: %+v", e.netMon.InterfaceState()) - unregisterMonWatch := e.netMon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - tshttpproxy.InvalidateCache() - e.linkChange(delta) - }) - closePool.addFunc(unregisterMonWatch) - e.netMonUnregister = unregisterMonWatch - endpointsFn := func(endpoints []tailcfg.Endpoint) { e.mu.Lock() e.endpoints = append(e.endpoints[:0], endpoints...) @@ -546,10 +545,31 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } + go e.consumeEventbusTopics() + e.logf("Engine created.") return e, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [eventbus.Client] is closed. +func (e *userspaceEngine) consumeEventbusTopics() { + defer close(e.subsDoneCh) + + for { + select { + case <-e.eventClient.Done(): + return + case changeDelta := <-e.changeDeltaSub.Events(): + tshttpproxy.InvalidateCache() + e.linkChange(&changeDelta) + } + } +} + // echoRespondToAll is an inbound post-filter responding to all echo requests. func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if p.IsEchoRequest() { @@ -1208,6 +1228,9 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { + e.eventClient.Close() + <-e.subsDoneCh + e.mu.Lock() if e.closing { e.mu.Unlock() @@ -1219,7 +1242,6 @@ func (e *userspaceEngine) Close() { r := bufio.NewReader(strings.NewReader("")) e.wgdev.IpcSetOperation(r) e.magicConn.Close() - e.netMonUnregister() if e.netMonOwned { e.netMon.Close() } From 8a4b1eb6a3cf9c3f082c2d725968239084bfeb51 Mon Sep 17 00:00:00 2001 From: Elliot Blackburn Date: Wed, 17 Sep 2025 16:18:25 +0100 Subject: [PATCH 1334/1708] words: add some more (#17177) Updates #words Signed-off-by: Elliot Blackburn --- words/scales.txt | 9 +++++++++ words/tails.txt | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/words/scales.txt b/words/scales.txt index 532734f6d..bb623fb6f 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -442,3 +442,12 @@ salary fujita caiman cichlid +logarithm +exponential +geological +cosmological +barometric +ph +pain +temperature +wyrm diff --git a/words/tails.txt b/words/tails.txt index 20ff326c1..f5e93bf50 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -764,3 +764,12 @@ sailfish billfish taimen sargo +story +tale +gecko +wyrm +meteor +ribbon +echo +lemming +worm From 6992f958fc5eb8309f204da953664181256e96ed Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 17 Sep 2025 08:39:29 -0700 Subject: [PATCH 1335/1708] util/eventbus: add an EqualTo helper for testing (#17178) For a common case of events being simple struct types with some exported fields, add a helper to check (reflectively) for equal values using cmp.Diff so that a failed comparison gives a useful diff in the test output. More complex uses will still want to provide their own comparisons; this (intentionally) does not export diff options or other hooks from the cmp package. Updates #15160 Change-Id: I86bee1771cad7debd9e3491aa6713afe6fd577a6 Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest.go | 14 ++++++++ .../eventbustest/eventbustest_test.go | 35 ++++++++++++++----- 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index d5cfe5395..c32e71140 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "tailscale.com/util/eventbus" ) @@ -249,3 +250,16 @@ func Inject[T any](inj *Injector, event T) { } pub.(*eventbus.Publisher[T]).Publish(event) } + +// EqualTo returns an event-matching function for use with [Expect] and +// [ExpectExactly] that matches on an event of the given type that is equal to +// want by comparison with [cmp.Diff]. The expectation fails with an error +// message including the diff, if present. +func EqualTo[T any](want T) func(T) error { + return func(got T) error { + if diff := cmp.Diff(got, want); diff != "" { + return fmt.Errorf("wrong result (-got, +want):\n%s", diff) + } + return nil + } +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index 351553cc8..f8b37eefe 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -5,6 +5,7 @@ package eventbustest_test import ( "fmt" + "strings" "testing" "time" @@ -29,19 +30,17 @@ func TestExpectFilter(t *testing.T) { name string events []int expectFunc any - wantErr bool + wantErr string // if non-empty, an error is expected containing this text }{ { name: "single event", events: []int{42}, expectFunc: eventbustest.Type[EventFoo](), - wantErr: false, }, { name: "multiple events, single expectation", events: []int{42, 1, 2, 3, 4, 5}, expectFunc: eventbustest.Type[EventFoo](), - wantErr: false, }, { name: "filter on event with function", @@ -52,7 +51,6 @@ func TestExpectFilter(t *testing.T) { } return false, nil }, - wantErr: false, }, { name: "filter-with-nil-error", @@ -73,7 +71,7 @@ func TestExpectFilter(t *testing.T) { } return nil }, - wantErr: true, + wantErr: "value > 10", }, { name: "first event has to be func", @@ -84,7 +82,18 @@ func TestExpectFilter(t *testing.T) { } return false, nil }, - wantErr: true, + wantErr: "expected 42, got 24", + }, + { + name: "equal-values", + events: []int{23}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + }, + { + name: "unequal-values", + events: []int{37}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + wantErr: "wrong result (-got, +want)", }, { name: "no events", @@ -92,7 +101,7 @@ func TestExpectFilter(t *testing.T) { expectFunc: func(event EventFoo) (bool, error) { return true, nil }, - wantErr: true, + wantErr: "timed out waiting", }, } @@ -113,8 +122,16 @@ func TestExpectFilter(t *testing.T) { updater.Publish(EventFoo{i}) } - if err := eventbustest.Expect(tw, tt.expectFunc); (err != nil) != tt.wantErr { - t.Errorf("ExpectFilter[EventFoo]: error = %v, wantErr %v", err, tt.wantErr) + if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { + if tt.wantErr == "" { + t.Errorf("Expect[EventFoo]: unexpected error: %v", err) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) + } else { + t.Logf("Got expected error: %v (OK)", err) + } + } else if tt.wantErr != "" { + t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) } }) } From 9d661663f33a0cba3c372278864600f62c39a4b4 Mon Sep 17 00:00:00 2001 From: Remy Guercio Date: Wed, 17 Sep 2025 14:05:22 -0500 Subject: [PATCH 1336/1708] cmd/tsidp: update README with new repo location warning Fixes: #17170 Signed-off-by: Remy Guercio --- cmd/tsidp/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index ffc296b87..1635feabf 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -1,3 +1,6 @@ +> [!CAUTION] +> Development of tsidp has been moved to [https://github.com/tailscale/tsidp](https://github.com/tailscale/tsidp) and it is no longer maintained here. Please visit the new repository to see the latest updates, file an issue, or contribute. + # `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider [![status: community project](https://img.shields.io/badge/status-community_project-blue)](https://tailscale.com/kb/1531/community-projects) From bb38bf74144b69130acffdd479db31607a14b339 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 17 Sep 2025 20:22:24 +0100 Subject: [PATCH 1337/1708] docker: bump alpine v3.19 -> 3.22 (#17155) Updates #15328 Change-Id: Ib33baf8756b648176dce461b25169e079cbd5533 Signed-off-by: Tom Proctor --- ALPINE.txt | 2 +- Dockerfile | 6 +++--- Dockerfile.base | 8 ++++---- build_docker.sh | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ALPINE.txt b/ALPINE.txt index 318956c3d..93a84c380 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.19 \ No newline at end of file +3.22 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index bd0f2840f..c546cf657 100644 --- a/Dockerfile +++ b/Dockerfile @@ -71,10 +71,10 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN ln -s /sbin/iptables-legacy /sbin/iptables +RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ # For compat with the previous run.sh, although ideally you should be diff --git a/Dockerfile.base b/Dockerfile.base index b7e79a43c..6c3c8ed08 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,12 +1,12 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils -# Alpine 3.19 replaces legacy iptables with nftables based implementation. We +# Alpine 3.19 replaced legacy iptables with nftables based implementation. We # can't be certain that all hosts that run Tailscale containers currently # suppport nftables, so link back to legacy for backwards compatibility reasons. # TODO(irbekrm): add some way how to determine if we still run on nodes that # don't support nftables, so that we can eventually remove these symlinks. -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN ln -s /sbin/iptables-legacy /sbin/iptables +RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables diff --git a/build_docker.sh b/build_docker.sh index bdeaa8659..37f00bf53 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -26,7 +26,7 @@ eval "$(./build_dist.sh shellvars)" DEFAULT_TARGET="client" DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}" -DEFAULT_BASE="tailscale/alpine-base:3.19" +DEFAULT_BASE="tailscale/alpine-base:3.22" # Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked # Github repo to find release notes for any new image tags. Note that for official Tailscale images the default # annotations defined here will be overriden by release scripts that call this script. From 73c371f78403b9e11259d7241caba2ca4654911b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 12:49:00 -0700 Subject: [PATCH 1338/1708] cmd/derper: permit port 80 in ACE targets Updates tailscale/corp#32168 Updates tailscale/corp#32226 Change-Id: Iddc017b060c76e6eab8f6d0c989a775bcaae3518 Signed-off-by: Brad Fitzpatrick --- cmd/derper/ace.go | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go index 301b029cc..a11539a6e 100644 --- a/cmd/derper/ace.go +++ b/cmd/derper/ace.go @@ -35,8 +35,35 @@ func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) { if err != nil { return err } - if port != "443" { - return fmt.Errorf("only port 443 is allowed") + if port != "443" && port != "80" { + // There are only two types of CONNECT requests the client makes + // via ACE: requests for /key (port 443) and requests to upgrade + // to the bidirectional ts2021 Noise protocol. + // + // The ts2021 layer can bootstrap over port 80 (http) or port + // 443 (https). + // + // Without ACE, we prefer port 80 to avoid unnecessary double + // encryption. But enough places require TLS+port 443 that we do + // support that double encryption path as a fallback. + // + // But ACE adds its own TLS layer (ACE is always CONNECT over + // https). If we don't permit port 80 here as a target, we'd + // have three layers of encryption (TLS + TLS + Noise) which is + // even more silly than two. + // + // So we permit port 80 such that we can only have two layers of + // encryption, varying by the request type: + // + // 1. TLS from client to ACE proxy (CONNECT) + // 2a. TLS from ACE proxy to https://controlplane.tailscale.com/key (port 443) + // 2b. ts2021 Noise from ACE proxy to http://controlplane.tailscale.com/ts2021 (port 80) + // + // But nothing's stopping the client from doing its ts2021 + // upgrade over https anyway and having three layers of + // encryption. But we can at least permit the client to do a + // "CONNECT controlplane.tailscale.com:80 HTTP/1.1" if it wants. + return fmt.Errorf("only ports 443 and 80 are allowed") } // TODO(bradfitz): make policy configurable from flags and/or come // from local tailscaled nodeAttrs From 55d0e6d3a8f2622355d9dde1c71c4932731fb319 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 20:32:28 -0700 Subject: [PATCH 1339/1708] net/dns/recursive: remove recursive DNS resolver It doesn't really pull its weight: it adds 577 KB to the binary and is rarely useful. Also, we now have static IPs and other connectivity paths coming soon enough. Updates #5853 Updates #1278 Updates tailscale/corp#32168 Change-Id: If336fed00a9c9ae9745419e6d81f7de6da6f7275 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 6 +- cmd/tailscale/depaware.txt | 12 +- cmd/tailscaled/depaware.txt | 8 +- cmd/tsidp/depaware.txt | 6 +- net/dns/recursive/recursive.go | 622 ----------------------- net/dns/recursive/recursive_test.go | 742 ---------------------------- net/dnsfallback/dnsfallback.go | 155 +----- tsnet/depaware.txt | 6 +- 8 files changed, 15 insertions(+), 1542 deletions(-) delete mode 100644 net/dns/recursive/recursive.go delete mode 100644 net/dns/recursive/recursive_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index faf7b2f83..e65977875 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -168,7 +168,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go @@ -847,7 +846,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -1026,8 +1024,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index c86af7ea7..ae4a7bd4d 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -48,7 +48,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ @@ -121,7 +120,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+ tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -192,8 +190,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp+ - tailscale.com/util/singleflight from tailscale.com/net/dnscache+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + tailscale.com/util/singleflight from tailscale.com/net/dnscache + tailscale.com/util/slicesx from tailscale.com/client/systray+ L tailscale.com/util/stringsx from tailscale.com/client/systray tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ @@ -250,8 +248,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from golang.org/x/net/icmp+ + golang.org/x/net/ipv6 from golang.org/x/net/icmp+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials @@ -337,7 +335,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/miekg/dns+ + crypto/tls from golang.org/x/net/http2+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d4e1f13bf..4482ad125 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -149,7 +149,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ @@ -321,7 +320,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -433,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ @@ -504,8 +502,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 0aafff8e1..7db7849b7 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -132,7 +132,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ @@ -276,7 +275,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -455,8 +453,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/net/dns/recursive/recursive.go b/net/dns/recursive/recursive.go deleted file mode 100644 index fd865e37a..000000000 --- a/net/dns/recursive/recursive.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package recursive implements a simple recursive DNS resolver. -package recursive - -import ( - "context" - "errors" - "fmt" - "net" - "net/netip" - "slices" - "strings" - "time" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/net/netns" - "tailscale.com/types/logger" - "tailscale.com/util/dnsname" - "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/slicesx" -) - -const ( - // maxDepth is how deep from the root nameservers we'll recurse when - // resolving; passing this limit will instead return an error. - // - // maxDepth must be at least 20 to resolve "console.aws.amazon.com", - // which is a domain with a moderately complicated DNS setup. The - // current value of 30 was chosen semi-arbitrarily to ensure that we - // have about 50% headroom. - maxDepth = 30 - // numStartingServers is the number of root nameservers that we use as - // initial candidates for our recursion. - numStartingServers = 3 - // udpQueryTimeout is the amount of time we wait for a UDP response - // from a nameserver before falling back to a TCP connection. - udpQueryTimeout = 5 * time.Second - - // These constants aren't typed in the DNS package, so we create typed - // versions here to avoid having to do repeated type casts. - qtypeA dns.Type = dns.Type(dns.TypeA) - qtypeAAAA dns.Type = dns.Type(dns.TypeAAAA) -) - -var ( - // ErrMaxDepth is returned when recursive resolving exceeds the maximum - // depth limit for this package. - ErrMaxDepth = fmt.Errorf("exceeded max depth %d when resolving", maxDepth) - - // ErrAuthoritativeNoResponses is the error returned when an - // authoritative nameserver indicates that there are no responses to - // the given query. - ErrAuthoritativeNoResponses = errors.New("authoritative server returned no responses") - - // ErrNoResponses is returned when our resolution process completes - // with no valid responses from any nameserver, but no authoritative - // server explicitly returned NXDOMAIN. - ErrNoResponses = errors.New("no responses to query") -) - -var rootServersV4 = []netip.Addr{ - netip.MustParseAddr("198.41.0.4"), // a.root-servers.net - netip.MustParseAddr("170.247.170.2"), // b.root-servers.net - netip.MustParseAddr("192.33.4.12"), // c.root-servers.net - netip.MustParseAddr("199.7.91.13"), // d.root-servers.net - netip.MustParseAddr("192.203.230.10"), // e.root-servers.net - netip.MustParseAddr("192.5.5.241"), // f.root-servers.net - netip.MustParseAddr("192.112.36.4"), // g.root-servers.net - netip.MustParseAddr("198.97.190.53"), // h.root-servers.net - netip.MustParseAddr("192.36.148.17"), // i.root-servers.net - netip.MustParseAddr("192.58.128.30"), // j.root-servers.net - netip.MustParseAddr("193.0.14.129"), // k.root-servers.net - netip.MustParseAddr("199.7.83.42"), // l.root-servers.net - netip.MustParseAddr("202.12.27.33"), // m.root-servers.net -} - -var rootServersV6 = []netip.Addr{ - netip.MustParseAddr("2001:503:ba3e::2:30"), // a.root-servers.net - netip.MustParseAddr("2801:1b8:10::b"), // b.root-servers.net - netip.MustParseAddr("2001:500:2::c"), // c.root-servers.net - netip.MustParseAddr("2001:500:2d::d"), // d.root-servers.net - netip.MustParseAddr("2001:500:a8::e"), // e.root-servers.net - netip.MustParseAddr("2001:500:2f::f"), // f.root-servers.net - netip.MustParseAddr("2001:500:12::d0d"), // g.root-servers.net - netip.MustParseAddr("2001:500:1::53"), // h.root-servers.net - netip.MustParseAddr("2001:7fe::53"), // i.root-servers.net - netip.MustParseAddr("2001:503:c27::2:30"), // j.root-servers.net - netip.MustParseAddr("2001:7fd::1"), // k.root-servers.net - netip.MustParseAddr("2001:500:9f::42"), // l.root-servers.net - netip.MustParseAddr("2001:dc3::35"), // m.root-servers.net -} - -var debug = envknob.RegisterBool("TS_DEBUG_RECURSIVE_DNS") - -// Resolver is a recursive DNS resolver that is designed for looking up A and AAAA records. -type Resolver struct { - // Dialer is used to create outbound connections. If nil, a zero - // net.Dialer will be used instead. - Dialer netns.Dialer - - // Logf is the logging function to use; if none is specified, then logs - // will be dropped. - Logf logger.Logf - - // NoIPv6, if set, will prevent this package from querying for AAAA - // records and will avoid contacting nameservers over IPv6. - NoIPv6 bool - - // Test mocks - testQueryHook func(name dnsname.FQDN, nameserver netip.Addr, protocol string, qtype dns.Type) (*dns.Msg, error) - testExchangeHook func(nameserver netip.Addr, network string, msg *dns.Msg) (*dns.Msg, error) - rootServers []netip.Addr - timeNow func() time.Time - - // Caching - // NOTE(andrew): if we make resolution parallel, this needs a mutex - queryCache map[dnsQuery]dnsMsgWithExpiry - - // Possible future additions: - // - Additional nameservers? From the system maybe? - // - NoIPv4 for IPv4 - // - DNS-over-HTTPS or DNS-over-TLS support -} - -// queryState stores all state during the course of a single query -type queryState struct { - // rootServers are the root nameservers to start from - rootServers []netip.Addr - - // TODO: metrics? -} - -type dnsQuery struct { - nameserver netip.Addr - name dnsname.FQDN - qtype dns.Type -} - -func (q dnsQuery) String() string { - return fmt.Sprintf("dnsQuery{nameserver:%q,name:%q,qtype:%v}", q.nameserver.String(), q.name, q.qtype) -} - -type dnsMsgWithExpiry struct { - *dns.Msg - expiresAt time.Time -} - -func (r *Resolver) now() time.Time { - if r.timeNow != nil { - return r.timeNow() - } - return time.Now() -} - -func (r *Resolver) logf(format string, args ...any) { - if r.Logf == nil { - return - } - r.Logf(format, args...) -} - -func (r *Resolver) depthlogf(depth int, format string, args ...any) { - if r.Logf == nil || !debug() { - return - } - prefix := fmt.Sprintf("[%d] %s", depth, strings.Repeat(" ", depth)) - r.Logf(prefix+format, args...) -} - -var defaultDialer net.Dialer - -func (r *Resolver) dialer() netns.Dialer { - if r.Dialer != nil { - return r.Dialer - } - - return &defaultDialer -} - -func (r *Resolver) newState() *queryState { - var rootServers []netip.Addr - if len(r.rootServers) > 0 { - rootServers = r.rootServers - } else { - // Select a random subset of root nameservers to start from, since if - // we don't get responses from those, something else has probably gone - // horribly wrong. - roots4 := slices.Clone(rootServersV4) - slicesx.Shuffle(roots4) - roots4 = roots4[:numStartingServers] - - var roots6 []netip.Addr - if !r.NoIPv6 { - roots6 = slices.Clone(rootServersV6) - slicesx.Shuffle(roots6) - roots6 = roots6[:numStartingServers] - } - - // Interleave the root servers so that we try to contact them over - // IPv4, then IPv6, IPv4, IPv6, etc. - rootServers = slicesx.Interleave(roots4, roots6) - } - - return &queryState{ - rootServers: rootServers, - } -} - -// Resolve will perform a recursive DNS resolution for the provided name, -// starting at a randomly-chosen root DNS server, and return the A and AAAA -// responses as a slice of netip.Addrs along with the minimum TTL for the -// returned records. -func (r *Resolver) Resolve(ctx context.Context, name string) (addrs []netip.Addr, minTTL time.Duration, err error) { - dnsName, err := dnsname.ToFQDN(name) - if err != nil { - return nil, 0, err - } - - qstate := r.newState() - - r.logf("querying IPv4 addresses for: %q", name) - addrs4, minTTL4, err4 := r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeA) - - var ( - addrs6 []netip.Addr - minTTL6 time.Duration - err6 error - ) - if !r.NoIPv6 { - r.logf("querying IPv6 addresses for: %q", name) - addrs6, minTTL6, err6 = r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeAAAA) - } - - if err4 != nil && err6 != nil { - if err4 == err6 { - return nil, 0, err4 - } - - return nil, 0, multierr.New(err4, err6) - } - if err4 != nil { - return addrs6, minTTL6, nil - } else if err6 != nil { - return addrs4, minTTL4, nil - } - - minTTL = minTTL4 - if minTTL6 < minTTL { - minTTL = minTTL6 - } - - addrs = append(addrs4, addrs6...) - if len(addrs) == 0 { - return nil, 0, ErrNoResponses - } - - slicesx.Shuffle(addrs) - return addrs, minTTL, nil -} - -func (r *Resolver) resolveRecursiveFromRoot( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - r.depthlogf(depth, "resolving %q from root (type: %v)", name, qtype) - - var depthError bool - for _, server := range qstate.rootServers { - addrs, minTTL, err := r.resolveRecursive(ctx, qstate, depth, name, server, qtype) - if err == nil { - return addrs, minTTL, err - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - depthError = true - } - } - - if depthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -func (r *Resolver) resolveRecursive( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - if depth == maxDepth { - r.depthlogf(depth, "not recursing past maximum depth") - return nil, 0, ErrMaxDepth - } - - // Ask this nameserver for an answer. - resp, err := r.queryNameserver(ctx, depth, name, nameserver, qtype) - if err != nil { - return nil, 0, err - } - - // If we get an actual answer from the nameserver, then return it. - var ( - answers []netip.Addr - cnames []dnsname.FQDN - minTTL = 24 * 60 * 60 // 24 hours in seconds - ) - for _, answer := range resp.Answer { - if crec, ok := answer.(*dns.CNAME); ok { - cnameFQDN, err := dnsname.ToFQDN(crec.Target) - if err != nil { - r.logf("bad CNAME %q returned: %v", crec.Target, err) - continue - } - - cnames = append(cnames, cnameFQDN) - continue - } - - addr := addrFromRecord(answer) - if !addr.IsValid() { - r.logf("[unexpected] invalid record in %T answer", answer) - } else if addr.Is4() && qtype != qtypeA { - r.logf("[unexpected] got IPv4 answer but qtype=%v", qtype) - } else if addr.Is6() && qtype != qtypeAAAA { - r.logf("[unexpected] got IPv6 answer but qtype=%v", qtype) - } else { - answers = append(answers, addr) - minTTL = min(minTTL, int(answer.Header().Ttl)) - } - } - - if len(answers) > 0 { - r.depthlogf(depth, "got answers for %q: %v", name, answers) - return answers, time.Duration(minTTL) * time.Second, nil - } - - r.depthlogf(depth, "no answers for %q", name) - - // If we have a non-zero number of CNAMEs, then try resolving those - // (from the root again) and return the first one that succeeds. - // - // TODO: return the union of all responses? - // TODO: parallelism? - if len(cnames) > 0 { - r.depthlogf(depth, "got CNAME responses for %q: %v", name, cnames) - } - var cnameDepthError bool - for _, cname := range cnames { - answers, minTTL, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, cname, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - cnameDepthError = true - } - } - - // If this is an authoritative response, then we know that continuing - // to look further is not going to result in any answers and we should - // bail out. - if resp.MsgHdr.Authoritative { - // If we failed to recurse into a CNAME due to a depth limit, - // propagate that here. - if cnameDepthError { - return nil, 0, ErrMaxDepth - } - - r.depthlogf(depth, "got authoritative response with no answers; stopping") - return nil, 0, ErrAuthoritativeNoResponses - } - - r.depthlogf(depth, "got %d NS responses and %d ADDITIONAL responses for %q", len(resp.Ns), len(resp.Extra), name) - - // No CNAMEs and no answers; see if we got any AUTHORITY responses, - // which indicate which nameservers to query next. - var authorities []dnsname.FQDN - for _, rr := range resp.Ns { - ns, ok := rr.(*dns.NS) - if !ok { - continue - } - - nsName, err := dnsname.ToFQDN(ns.Ns) - if err != nil { - r.logf("unexpected bad NS name %q: %v", ns.Ns, err) - continue - } - - authorities = append(authorities, nsName) - } - - // Also check for "glue" records, which are IP addresses provided by - // the DNS server for authority responses; these are required when the - // authority server is a subdomain of what's being resolved. - glueRecords := make(map[dnsname.FQDN][]netip.Addr) - for _, rr := range resp.Extra { - name, err := dnsname.ToFQDN(rr.Header().Name) - if err != nil { - r.logf("unexpected bad Name %q in Extra addr: %v", rr.Header().Name, err) - continue - } - - if addr := addrFromRecord(rr); addr.IsValid() { - glueRecords[name] = append(glueRecords[name], addr) - } else { - r.logf("unexpected bad Extra %T addr", rr) - } - } - - // Try authorities with glue records first, to minimize the number of - // additional DNS queries that we need to make. - authoritiesGlue, authoritiesNoGlue := slicesx.Partition(authorities, func(aa dnsname.FQDN) bool { - return len(glueRecords[aa]) > 0 - }) - - authorityDepthError := false - - r.depthlogf(depth, "authorities with glue records for recursion: %v", authoritiesGlue) - for _, authority := range authoritiesGlue { - for _, nameserver := range glueRecords[authority] { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - - r.depthlogf(depth, "authorities with no glue records for recursion: %v", authoritiesNoGlue) - for _, authority := range authoritiesNoGlue { - // First, resolve the IP for the authority server from the - // root, querying for both IPv4 and IPv6 addresses regardless - // of what the current question type is. - // - // TODO: check for infinite recursion; it'll get caught by our - // recursion depth, but we want to bail early. - for _, authorityQtype := range []dns.Type{qtypeAAAA, qtypeA} { - answers, _, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, authority, authorityQtype) - if err != nil { - r.depthlogf(depth, "error querying authority %q: %v", authority, err) - continue - } - r.depthlogf(depth, "resolved authority %q (type %v) to: %v", authority, authorityQtype, answers) - - // Now, query this authority for the final address. - for _, nameserver := range answers { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - } - - if authorityDepthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -// queryNameserver sends a query for "name" to the nameserver "nameserver" for -// records of type "qtype", trying both UDP and TCP connections as -// appropriate. -func (r *Resolver) queryNameserver( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - qtype dns.Type, -) (*dns.Msg, error) { - // TODO(andrew): we should QNAME minimisation here to avoid sending the - // full name to intermediate/root nameservers. See: - // https://www.rfc-editor.org/rfc/rfc7816 - - // Handle the case where UDP is blocked by adding an explicit timeout - // for the UDP portion of this query. - udpCtx, udpCtxCancel := context.WithTimeout(ctx, udpQueryTimeout) - defer udpCtxCancel() - - msg, err := r.queryNameserverProto(udpCtx, depth, name, nameserver, "udp", qtype) - if err == nil { - return msg, nil - } - - msg, err2 := r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err2 == nil { - return msg, nil - } - - return nil, multierr.New(err, err2) -} - -// queryNameserverProto sends a query for "name" to the nameserver "nameserver" -// for records of type "qtype" over the provided protocol (either "udp" -// or "tcp"), and returns the DNS response or an error. -func (r *Resolver) queryNameserverProto( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - protocol string, - qtype dns.Type, -) (resp *dns.Msg, err error) { - if r.testQueryHook != nil { - return r.testQueryHook(name, nameserver, protocol, qtype) - } - - now := r.now() - nameserverStr := nameserver.String() - - cacheKey := dnsQuery{ - nameserver: nameserver, - name: name, - qtype: qtype, - } - cacheEntry, ok := r.queryCache[cacheKey] - if ok && cacheEntry.expiresAt.Before(now) { - r.depthlogf(depth, "using cached response from %s about %q (type: %v)", nameserverStr, name, qtype) - return cacheEntry.Msg, nil - } - - var network string - if nameserver.Is4() { - network = protocol + "4" - } else { - network = protocol + "6" - } - - // Prepare a message asking for an appropriately-typed record - // for the name we're querying. - m := new(dns.Msg) - m.SetEdns0(1232, false /* no DNSSEC */) - m.SetQuestion(name.WithTrailingDot(), uint16(qtype)) - - // Allow mocking out the network components with our exchange hook. - if r.testExchangeHook != nil { - resp, err = r.testExchangeHook(nameserver, network, m) - } else { - // Dial the current nameserver using our dialer. - var nconn net.Conn - nconn, err = r.dialer().DialContext(ctx, network, net.JoinHostPort(nameserverStr, "53")) - if err != nil { - return nil, err - } - - var c dns.Client // TODO: share? - conn := &dns.Conn{ - Conn: nconn, - UDPSize: c.UDPSize, - } - - // Send the DNS request to the current nameserver. - r.depthlogf(depth, "asking %s over %s about %q (type: %v)", nameserverStr, protocol, name, qtype) - resp, _, err = c.ExchangeWithConnContext(ctx, m, conn) - } - if err != nil { - return nil, err - } - - // If the message was truncated and we're using UDP, re-run with TCP. - if resp.MsgHdr.Truncated && protocol == "udp" { - r.depthlogf(depth, "response message truncated; re-running query with TCP") - resp, err = r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err != nil { - return nil, err - } - } - - // Find minimum expiry for all records in this message. - var minTTL int - for _, rr := range resp.Answer { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Ns { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Extra { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - - mak.Set(&r.queryCache, cacheKey, dnsMsgWithExpiry{ - Msg: resp, - expiresAt: now.Add(time.Duration(minTTL) * time.Second), - }) - return resp, nil -} - -func addrFromRecord(rr dns.RR) netip.Addr { - switch v := rr.(type) { - case *dns.A: - ip, ok := netip.AddrFromSlice(v.A) - if !ok || !ip.Is4() { - return netip.Addr{} - } - return ip - case *dns.AAAA: - ip, ok := netip.AddrFromSlice(v.AAAA) - if !ok || !ip.Is6() { - return netip.Addr{} - } - return ip - } - return netip.Addr{} -} diff --git a/net/dns/recursive/recursive_test.go b/net/dns/recursive/recursive_test.go deleted file mode 100644 index d47e4cebf..000000000 --- a/net/dns/recursive/recursive_test.go +++ /dev/null @@ -1,742 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package recursive - -import ( - "context" - "errors" - "flag" - "fmt" - "net" - "net/netip" - "reflect" - "strings" - "testing" - "time" - - "slices" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/tstest" -) - -const testDomain = "tailscale.com" - -// Recursively resolving the AWS console requires being able to handle CNAMEs, -// glue records, falling back from UDP to TCP for oversize queries, and more; -// it's a great integration test for DNS resolution and they can handle the -// traffic :) -const complicatedTestDomain = "console.aws.amazon.com" - -var flagNetworkAccess = flag.Bool("enable-network-access", false, "run tests that need external network access") - -func init() { - envknob.Setenv("TS_DEBUG_RECURSIVE_DNS", "true") -} - -func newResolver(tb testing.TB) *Resolver { - clock := tstest.NewClock(tstest.ClockOpts{ - Step: 50 * time.Millisecond, - }) - return &Resolver{ - Logf: tb.Logf, - timeNow: clock.Now, - } -} - -func TestResolve(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } - - var has4, has6 bool - for _, addr := range addrs { - has4 = has4 || addr.Is4() - has6 = has6 || addr.Is6() - } - - if !has4 { - t.Errorf("expected at least one IPv4 address") - } - if !has6 { - t.Errorf("expected at least one IPv6 address") - } -} - -func TestResolveComplicated(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, complicatedTestDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } -} - -func TestResolveNoIPv6(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - r := newResolver(t) - r.NoIPv6 = true - - addrs, _, err := r.Resolve(context.Background(), testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - for _, addr := range addrs { - if addr.Is6() { - t.Errorf("got unexpected IPv6 address: %v", addr) - } - } -} - -func TestResolveFallbackToTCP(t *testing.T) { - var udpCalls, tcpCalls int - hook := func(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if strings.HasPrefix(network, "udp") { - t.Logf("got %q query; returning truncated result", network) - udpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Truncated = true - return resp, nil - } - - t.Logf("got %q query; returning real result", network) - tcpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Answer = append(resp.Answer, &dns.A{ - Hdr: dns.RR_Header{ - Name: req.Question[0].Name, - Rrtype: req.Question[0].Qtype, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IPv4(1, 2, 3, 4), - }) - return resp, nil - } - - r := newResolver(t) - r.testExchangeHook = hook - - ctx := context.Background() - resp, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - - if len(resp.Answer) < 1 { - t.Fatalf("no answers in response: %v", resp) - } - rrA, ok := resp.Answer[0].(*dns.A) - if !ok { - t.Fatalf("invalid RR type: %T", resp.Answer[0]) - } - if !rrA.A.Equal(net.IPv4(1, 2, 3, 4)) { - t.Errorf("wanted A response 1.2.3.4, got: %v", rrA.A) - } - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } - - // Verify that we're cached and re-run to fetch from the cache. - if len(r.queryCache) < 1 { - t.Errorf("wanted entries in the query cache") - } - - resp2, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(resp, resp2) { - t.Errorf("expected equal responses; old=%+v new=%+v", resp, resp2) - } - - // We didn't make any more network requests since we loaded from the cache. - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } -} - -func dnsIPRR(name string, addr netip.Addr) dns.RR { - if addr.Is4() { - return &dns.A{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(addr.AsSlice()), - } - } - - return &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 300, - }, - AAAA: net.IP(addr.AsSlice()), - } -} - -func cnameRR(name, target string) dns.RR { - return &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 300, - }, - Target: target, - } -} - -func nsRR(name, target string) dns.RR { - return &dns.NS{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 300, - }, - Ns: target, - } -} - -type mockReply struct { - name string - qtype dns.Type - resp *dns.Msg -} - -type replyMock struct { - tb testing.TB - replies map[netip.Addr][]mockReply -} - -func (r *replyMock) exchangeHook(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if len(req.Question) != 1 { - r.tb.Fatalf("unsupported multiple or empty question: %v", req.Question) - } - question := req.Question[0] - - replies := r.replies[nameserver] - if len(replies) == 0 { - r.tb.Fatalf("no configured replies for nameserver: %v", nameserver) - } - - for _, reply := range replies { - if reply.name == question.Name && reply.qtype == dns.Type(question.Qtype) { - return reply.resp.Copy(), nil - } - } - - r.tb.Fatalf("no replies found for query %q of type %v to %v", question.Name, question.Qtype, nameserver) - panic("unreachable") -} - -// responses for mocking, shared between the following tests -var ( - rootServerAddr = netip.MustParseAddr("198.41.0.4") // a.root-servers.net. - comNSAddr = netip.MustParseAddr("192.5.6.30") // a.gtld-servers.net. - - // DNS response from the root nameservers for a .com nameserver - comRecord = &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "a.gtld-servers.net.")}, - Extra: []dns.RR{dnsIPRR("a.gtld-servers.net.", comNSAddr)}, - } - - // Random Amazon nameservers that we use in glue records - amazonNS = netip.MustParseAddr("205.251.192.197") - amazonNSv6 = netip.MustParseAddr("2600:9000:5306:1600::1") - - // Nameservers for the tailscale.com domain - tailscaleNameservers = &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", "ns-197.awsdns-24.com."), - nsRR("tailscale.com.", "ns-557.awsdns-05.net."), - nsRR("tailscale.com.", "ns-1558.awsdns-02.co.uk."), - nsRR("tailscale.com.", "ns-1359.awsdns-41.org."), - }, - Extra: []dns.RR{ - dnsIPRR("ns-197.awsdns-24.com.", amazonNS), - }, - } -) - -func TestBasicRecursion(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("76.223.15.28")), - }, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5")), - }, - }}, - }, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - ctx := context.Background() - addrs, minTTL, err := r.Resolve(ctx, "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("76.223.15.28"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestNoAnswers(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns no responses, authoritatively. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -func TestRecursionCNAME(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "subdomain.otherdomain.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionNoGlue(t *testing.T) { - coukNS := netip.MustParseAddr("213.248.216.1") - coukRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "dns1.nic.uk.")}, - Extra: []dns.RR{dnsIPRR("dns1.nic.uk.", coukNS)}, - } - - intermediateNS := netip.MustParseAddr("205.251.193.66") // g-ns-322.awsdns-02.co.uk. - intermediateRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("awsdns-02.co.uk.", "g-ns-322.awsdns-02.co.uk.")}, - Extra: []dns.RR{dnsIPRR("g-ns-322.awsdns-02.co.uk.", intermediateNS)}, - } - - const amazonNameserver = "ns-1558.awsdns-02.co.uk." - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", amazonNameserver), - }, - } - - tailscaleResponses := []mockReply{ - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - } - - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - rootServerAddr: { - // Query to the root server returns the .com server + a glue record - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - // Querying the .co.uk nameserver returns the .co.uk nameserver + a glue record. - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: coukRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: coukRecord}, - }, - - // Queries to the ".com" server return the nameservers - // for tailscale.com, which don't contain a glue - // record. - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Queries to the ".co.uk" nameserver returns the - // address of the intermediate Amazon nameserver. - coukNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: intermediateRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: intermediateRecord}, - }, - - // Queries to the intermediate nameserver returns an - // answer for the final Amazon nameserver. - intermediateNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNS)}, - }}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNSv6)}, - }}, - }, - - // Queries to the actual nameserver work and return - // responses to the query. - amazonNS: tailscaleResponses, - amazonNSv6: tailscaleResponses, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionLimit(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{}, - } - - // Fill out a CNAME chain equal to our recursion limit; we won't get - // this far since each CNAME is more than 1 level "deep", but this - // ensures that we have more than the limit. - for i := range maxDepth + 1 { - curr := fmt.Sprintf("%d-tailscale.com.", i) - - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{nsRR(curr, "ns-197.awsdns-24.com.")}, - Extra: []dns.RR{dnsIPRR("ns-197.awsdns-24.com.", amazonNS)}, - } - - // Query to the root server returns the .com server + a glue record - mock.replies[rootServerAddr] = append(mock.replies[rootServerAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: comRecord}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - ) - - // Query to the ".com" server return the nameservers for NN-tailscale.com - mock.replies[comNSAddr] = append(mock.replies[comNSAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - ) - - // Queries to the nameserver return a CNAME for the n+1th server. - next := fmt.Sprintf("%d-tailscale.com.", i+1) - mock.replies[amazonNS] = append(mock.replies[amazonNS], - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeAAAA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - ) - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for the first node in the chain, 0-tailscale.com, and verify - // we get a max-depth error. - ctx := context.Background() - _, _, err := r.Resolve(ctx, "0-tailscale.com") - if err == nil { - t.Fatal("expected error, got nil") - } else if !errors.Is(err, ErrMaxDepth) { - t.Fatalf("got err=%v, want ErrMaxDepth", err) - } -} - -func TestInvalidResponses(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns an invalid IP address - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - // Note: this is an IPv6 addr in an IPv4 response - A: net.IP(netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5").AsSlice()), - }}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - // This an IPv4 response to an IPv6 query - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(netip.MustParseAddr("13.248.141.131").AsSlice()), - }}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get no responses since the - // addresses are invalid. - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -// TODO(andrew): test for more edge cases that aren't currently covered: -// * Nameservers that cross between IPv4 and IPv6 -// * Authoritative no replies after following CNAME -// * Authoritative no replies after following non-glue NS record -// * Error querying non-glue NS record followed by success diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 8e53c3b29..9843d46f9 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -22,35 +22,20 @@ import ( "net/url" "os" "reflect" - "slices" "sync/atomic" "time" "tailscale.com/atomicfile" - "tailscale.com/envknob" "tailscale.com/health" - "tailscale.com/net/dns/recursive" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/tlsdial" "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/logger" - "tailscale.com/util/clientmetric" - "tailscale.com/util/singleflight" "tailscale.com/util/slicesx" ) -var ( - optRecursiveResolver = envknob.RegisterOptBool("TS_DNSFALLBACK_RECURSIVE_RESOLVER") - disableRecursiveResolver = envknob.RegisterBool("TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER") // legacy pre-1.52 env knob name -) - -type resolveResult struct { - addrs []netip.Addr - minTTL time.Duration -} - // MakeLookupFunc creates a function that can be used to resolve hostnames // (e.g. as a LookupIPFallback from dnscache.Resolver). // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. @@ -68,145 +53,13 @@ type fallbackResolver struct { logf logger.Logf netMon *netmon.Monitor // or nil healthTracker *health.Tracker // or nil - sf singleflight.Group[string, resolveResult] // for tests waitForCompare bool } func (fr *fallbackResolver) Lookup(ctx context.Context, host string) ([]netip.Addr, error) { - // If they've explicitly disabled the recursive resolver with the legacy - // TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER envknob or not set the - // newer TS_DNSFALLBACK_RECURSIVE_RESOLVER to true, then don't use the - // recursive resolver. (tailscale/corp#15261) In the future, we might - // change the default (the opt.Bool being unset) to mean enabled. - if disableRecursiveResolver() || !optRecursiveResolver().EqualBool(true) { - return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - } - - addrsCh := make(chan []netip.Addr, 1) - - // Run the recursive resolver in the background so we can - // compare the results. For tests, we also allow waiting for the - // comparison to complete; normally, we do this entirely asynchronously - // so as not to block the caller. - var done chan struct{} - if fr.waitForCompare { - done = make(chan struct{}) - go func() { - defer close(done) - fr.compareWithRecursive(ctx, addrsCh, host) - }() - } else { - go fr.compareWithRecursive(ctx, addrsCh, host) - } - - addrs, err := lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - if err != nil { - addrsCh <- nil - return nil, err - } - - addrsCh <- slices.Clone(addrs) - if fr.waitForCompare { - select { - case <-done: - case <-ctx.Done(): - } - } - return addrs, nil -} - -// compareWithRecursive is responsible for comparing the DNS resolution -// performed via the "normal" path (bootstrap DNS requests to the DERP servers) -// with DNS resolution performed with our in-process recursive DNS resolver. -// -// It will select on addrsCh to read exactly one set of addrs (returned by the -// "normal" path) and compare against the results returned by the recursive -// resolver. If ctx is canceled, then it will abort. -func (fr *fallbackResolver) compareWithRecursive( - ctx context.Context, - addrsCh <-chan []netip.Addr, - host string, -) { - logf := logger.WithPrefix(fr.logf, "recursive: ") - - // Ensure that we catch panics while we're testing this - // code path; this should never panic, but we don't - // want to take down the process by having the panic - // propagate to the top of the goroutine's stack and - // then terminate. - defer func() { - if r := recover(); r != nil { - logf("bootstrap DNS: recovered panic: %v", r) - metricRecursiveErrors.Add(1) - } - }() - - // Don't resolve the same host multiple times - // concurrently; if we end up in a tight loop, this can - // take up a lot of CPU. - var didRun bool - result, err, _ := fr.sf.Do(host, func() (resolveResult, error) { - didRun = true - resolver := &recursive.Resolver{ - Dialer: netns.NewDialer(logf, fr.netMon), - Logf: logf, - } - addrs, minTTL, err := resolver.Resolve(ctx, host) - if err != nil { - logf("error using recursive resolver: %v", err) - metricRecursiveErrors.Add(1) - return resolveResult{}, err - } - return resolveResult{addrs, minTTL}, nil - }) - - // The singleflight function handled errors; return if - // there was one. Additionally, don't bother doing the - // comparison if we waited on another singleflight - // caller; the results are likely to be the same, so - // rather than spam the logs we can just exit and let - // the singleflight call that did execute do the - // comparison. - // - // Returning here is safe because the addrsCh channel - // is buffered, so the main function won't block even - // if we never read from it. - if err != nil || !didRun { - return - } - - addrs, minTTL := result.addrs, result.minTTL - compareAddr := func(a, b netip.Addr) int { return a.Compare(b) } - slices.SortFunc(addrs, compareAddr) - - // Wait for a response from the main function; try this once before we - // check whether the context is canceled since selects are - // nondeterministic. - var oldAddrs []netip.Addr - select { - case oldAddrs = <-addrsCh: - // All good; continue - default: - // Now block. - select { - case oldAddrs = <-addrsCh: - case <-ctx.Done(): - return - } - } - slices.SortFunc(oldAddrs, compareAddr) - - matches := slices.Equal(addrs, oldAddrs) - - logf("bootstrap DNS comparison: matches=%v oldAddrs=%v addrs=%v minTTL=%v", matches, oldAddrs, addrs, minTTL) - - if matches { - metricRecursiveMatches.Add(1) - } else { - metricRecursiveMismatches.Add(1) - } + return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) } func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Tracker, netMon *netmon.Monitor) ([]netip.Addr, error) { @@ -428,9 +281,3 @@ func SetCachePath(path string, logf logger.Logf) { cachedDERPMap.Store(dm) logf("[v2] dnsfallback: SetCachePath loaded cached DERP map") } - -var ( - metricRecursiveMatches = clientmetric.NewCounter("dnsfallback_recursive_matches") - metricRecursiveMismatches = clientmetric.NewCounter("dnsfallback_recursive_mismatches") - metricRecursiveErrors = clientmetric.NewCounter("dnsfallback_recursive_errors") -) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b3e2b7f0e..c115332fa 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -132,7 +132,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ @@ -272,7 +271,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -448,8 +446,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ From 5e3e536c2d1faf9109a703f557c96b4144667d53 Mon Sep 17 00:00:00 2001 From: Esteban-Bermudez Date: Thu, 8 May 2025 21:11:33 -0700 Subject: [PATCH 1340/1708] cmd/tailscale/cli: add `remove` subcommand Fixes #12255 Add a new subcommand to `switch` for removing a profile from the local client. This does not delete the profile from the Tailscale account, but removes it from the local machine. This functionality is available on the GUI's, but not yet on the CLI. Signed-off-by: Esteban-Bermudez --- cmd/tailscale/cli/switch.go | 62 +++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index 0677da1b3..a06630f78 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -34,6 +34,22 @@ This command is currently in alpha and may change in the future.`, return fs }(), Exec: switchProfile, + + // Add remove subcommand + Subcommands: []*ffcli.Command{ + { + Name: "remove", + ShortUsage: "tailscale switch remove ", + ShortHelp: "Remove a Tailscale account", + LongHelp: `"tailscale switch remove" removes a Tailscale account from the +local machine. This does not delete the account itself, but +it will no longer be available for switching to. You can +add it back by logging in again. + +This command is currently in alpha and may change in the future.`, + Exec: removeProfile, + }, + }, } func init() { @@ -186,3 +202,49 @@ func switchProfile(ctx context.Context, args []string) error { } } } + +func removeProfile(ctx context.Context, args []string) error { + if len(args) != 1 { + outln("usage: tailscale switch remove NAME") + os.Exit(1) + } + cp, all, err := localClient.ProfileStatus(ctx) + if err != nil { + errf("Failed to remove account: %v\n", err) + os.Exit(1) + } + + profID, ok := matchProfile(args[0], all) + if !ok { + errf("No profile named %q\n", args[0]) + os.Exit(1) + } + + if profID == cp.ID { + printf("Already on account %q\n", args[0]) + os.Exit(0) + } + + return localClient.DeleteProfile(ctx, profID) +} + +func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { + // Allow matching by ID, Tailnet, or Account + // in that order. + for _, p := range all { + if p.ID == ipn.ProfileID(arg) { + return p.ID, true + } + } + for _, p := range all { + if p.NetworkProfile.DomainName == arg { + return p.ID, true + } + } + for _, p := range all { + if p.Name == arg { + return p.ID, true + } + } + return "", false +} From 1c9aaa444da163bf0597cef09a100a4e7a0221b8 Mon Sep 17 00:00:00 2001 From: Esteban-Bermudez Date: Wed, 25 Jun 2025 12:15:53 -0700 Subject: [PATCH 1341/1708] cmd/tailscale/cli: use helper function for matching profiles This makes the `switch` command use the helper `matchProfile` function that was introduced in the `remove` sub command. Signed-off-by: Esteban-Bermudez --- cmd/tailscale/cli/switch.go | 43 +++++++------------------------------ 1 file changed, 8 insertions(+), 35 deletions(-) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index a06630f78..b315a21e7 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -122,40 +122,8 @@ func switchProfile(ctx context.Context, args []string) error { errf("Failed to switch to account: %v\n", err) os.Exit(1) } - var profID ipn.ProfileID - // Allow matching by ID, Tailnet, Account, or Display Name - // in that order. - for _, p := range all { - if p.ID == ipn.ProfileID(args[0]) { - profID = p.ID - break - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DomainName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.Name == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DisplayName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { + profID, ok := matchProfile(args[0], all) + if !ok { errf("No profile named %q\n", args[0]) os.Exit(1) } @@ -229,7 +197,7 @@ func removeProfile(ctx context.Context, args []string) error { } func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { - // Allow matching by ID, Tailnet, or Account + // Allow matching by ID, Tailnet, Account, or Display Name // in that order. for _, p := range all { if p.ID == ipn.ProfileID(arg) { @@ -246,5 +214,10 @@ func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { return p.ID, true } } + for _, p := range all { + if p.NetworkProfile.DisplayName == arg { + return p.ID, true + } + } return "", false } From cd153aa644dd861602e386e71df20a61733b56a8 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Thu, 11 Sep 2025 13:11:41 +0100 Subject: [PATCH 1342/1708] control, ipn, tailcfg: enable seamless key renewal by default Previously, seamless key renewal was an opt-in feature. Customers had to set a `seamless-key-renewal` node attribute in their policy file. This patch enables seamless key renewal by default for all clients. It includes a `disable-seamless-key-renewal` node attribute we can set in Control, so we can manage the rollout and disable the feature for clients with known bugs. This new attribute makes the feature opt-out. Updates tailscale/corp#31479 Signed-off-by: Alex Chan --- control/controlknobs/controlknobs.go | 22 +++++++++++++++++++--- ipn/ipnlocal/local.go | 8 ++++---- tailcfg/tailcfg.go | 18 +++++++++++++++--- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 2578744ca..09c16b8b1 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -62,8 +62,9 @@ type Knobs struct { // netfiltering, unless overridden by the user. LinuxForceNfTables atomic.Bool - // SeamlessKeyRenewal is whether to enable the alpha functionality of - // renewing node keys without breaking connections. + // SeamlessKeyRenewal is whether to renew node keys without breaking connections. + // This is enabled by default in 1.90 and later, but we but we can remotely disable + // it from the control plane if there's a problem. // http://go/seamless-key-renewal SeamlessKeyRenewal atomic.Bool @@ -128,6 +129,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { forceIPTables = has(tailcfg.NodeAttrLinuxMustUseIPTables) forceNfTables = has(tailcfg.NodeAttrLinuxMustUseNfTables) seamlessKeyRenewal = has(tailcfg.NodeAttrSeamlessKeyRenewal) + disableSeamlessKeyRenewal = has(tailcfg.NodeAttrDisableSeamlessKeyRenewal) probeUDPLifetime = has(tailcfg.NodeAttrProbeUDPLifetime) appCStoreRoutes = has(tailcfg.NodeAttrStoreAppCRoutes) userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes) @@ -154,7 +156,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.SilentDisco.Store(silentDisco) k.LinuxForceIPTables.Store(forceIPTables) k.LinuxForceNfTables.Store(forceNfTables) - k.SeamlessKeyRenewal.Store(seamlessKeyRenewal) k.ProbeUDPLifetime.Store(probeUDPLifetime) k.AppCStoreRoutes.Store(appCStoreRoutes) k.UserDialUseRoutes.Store(userDialUseRoutes) @@ -162,6 +163,21 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) + + // If both attributes are present, then "enable" should win. This reflects + // the history of seamless key renewal. + // + // Before 1.90, seamless was a private alpha, opt-in feature. Devices would + // only seamless do if customers opted in using the seamless renewal attr. + // + // In 1.90 and later, seamless is the default behaviour, and devices will use + // seamless unless explicitly told not to by control (e.g. if we discover + // a bug and want clients to use the prior behaviour). + // + // If a customer has opted in to the pre-1.90 seamless implementation, we + // don't want to switch it off for them -- we only want to switch it off for + // devices that haven't opted in. + k.SeamlessKeyRenewal.Store(seamlessKeyRenewal || !disableSeamlessKeyRenewal) } // AsDebugJSON returns k as something that can be marshalled with json.Marshal diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 017349165..1e102d53e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7420,10 +7420,10 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { return ri, nil } -// seamlessRenewalEnabled reports whether seamless key renewals are enabled -// (i.e. we saw our self node with the SeamlessKeyRenewal attr in a netmap). -// This enables beta functionality of renewing node keys without breaking -// connections. +// seamlessRenewalEnabled reports whether seamless key renewals are enabled. +// +// As of 2025-09-11, this is the default behaviour unless nodes receive +// [tailcfg.NodeAttrDisableSeamlessKeyRenewal] in their netmap. func (b *LocalBackend) seamlessRenewalEnabled() bool { return b.ControlKnobs().SeamlessKeyRenewal.Load() } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6c1357a63..6130df901 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -170,7 +170,8 @@ type CapabilityVersion int // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. -const CurrentCapabilityVersion CapabilityVersion = 125 +// - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) +const CurrentCapabilityVersion CapabilityVersion = 126 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2530,8 +2531,19 @@ const ( // This cannot be set simultaneously with NodeAttrLinuxMustUseIPTables. NodeAttrLinuxMustUseNfTables NodeCapability = "linux-netfilter?v=nftables" - // NodeAttrSeamlessKeyRenewal makes clients enable beta functionality - // of renewing node keys without breaking connections. + // NodeAttrDisableSeamlessKeyRenewal disables seamless key renewal, which is + // enabled by default in clients as of 2025-09-17 (1.90 and later). + // + // We will use this attribute to manage the rollout, and disable seamless in + // clients with known bugs. + // http://go/seamless-key-renewal + NodeAttrDisableSeamlessKeyRenewal NodeCapability = "disable-seamless-key-renewal" + + // NodeAttrSeamlessKeyRenewal was used to opt-in to seamless key renewal + // during its private alpha. + // + // Deprecated: NodeAttrSeamlessKeyRenewal is deprecated as of CapabilityVersion 126, + // because seamless key renewal is now enabled by default. NodeAttrSeamlessKeyRenewal NodeCapability = "seamless-key-renewal" // NodeAttrProbeUDPLifetime makes the client probe UDP path lifetime at the From 61751a0c9a80ca83dc55ffacff6d595e7c6c348d Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 18 Sep 2025 15:50:01 +0100 Subject: [PATCH 1343/1708] scripts/installer.sh: add Siemens Industrial OS (#17185) Fixes #17179 Signed-off-by: Erisa A --- scripts/installer.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 4d968cd2b..b40177005 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -110,6 +110,17 @@ main() { APT_KEY_TYPE="keyring" fi ;; + industrial-os) + OS="debian" + PACKAGETYPE="apt" + if [ "$(printf %.1s "$VERSION_ID")" -lt 5 ]; then + VERSION="buster" + APT_KEY_TYPE="legacy" + else + VERSION="bullseye" + APT_KEY_TYPE="keyring" + fi + ;; parrot|mendel) OS="debian" PACKAGETYPE="apt" From 73bbd7cacaf1990926a24032c04e1fa379d0cf72 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 20:22:14 -0700 Subject: [PATCH 1344/1708] build_dist.sh: add -trimpath Saves 81KB (20320440 to 20238520 bytes for linux/amd64) Updates #1278 Change-Id: Id607480c76220c74c8854ef1a2459aee650ad7b6 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_dist.sh b/build_dist.sh index be0d4d47e..564e30221 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -57,4 +57,4 @@ while [ "$#" -gt 1 ]; do esac done -exec $go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@" +exec $go build ${tags:+-tags=$tags} -trimpath -ldflags "$ldflags" "$@" From 70dfdac609396440308a390f58dff0a97f78f0f4 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 18 Sep 2025 09:10:33 -0700 Subject: [PATCH 1345/1708] prober: allow custom tls.Config for TLS probes (#17186) Updates https://github.com/tailscale/corp/issues/28569 Signed-off-by: Andrew Lytvynov --- prober/derp.go | 5 +++-- prober/derp_test.go | 2 +- prober/dns_example_test.go | 3 ++- prober/tls.go | 30 ++++++++++++++++-------------- prober/tls_test.go | 18 ++++++++++++++++-- 5 files changed, 38 insertions(+), 20 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index c7a82317d..52e56fd4e 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -8,6 +8,7 @@ import ( "cmp" "context" crand "crypto/rand" + "crypto/tls" "encoding/binary" "encoding/json" "errors" @@ -68,7 +69,7 @@ type derpProber struct { ProbeMap ProbeClass // Probe classes for probing individual derpers. - tlsProbeFn func(string) ProbeClass + tlsProbeFn func(string, *tls.Config) ProbeClass udpProbeFn func(string, int) ProbeClass meshProbeFn func(string, string) ProbeClass bwProbeFn func(string, string, int64) ProbeClass @@ -206,7 +207,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { if d.probes[n] == nil { log.Printf("adding DERP TLS probe for %s (%s) every %v", server.Name, region.RegionName, d.tlsInterval) derpPort := cmp.Or(server.DERPPort, 443) - d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort))) + d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort), nil)) } } diff --git a/prober/derp_test.go b/prober/derp_test.go index 93b8d760b..1ace9983c 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -74,7 +74,7 @@ func TestDerpProber(t *testing.T) { p: p, derpMapURL: srv.URL, tlsInterval: time.Second, - tlsProbeFn: func(_ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + tlsProbeFn: func(_ string, _ *tls.Config) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, udpInterval: time.Second, udpProbeFn: func(_ string, _ int) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, meshInterval: time.Second, diff --git a/prober/dns_example_test.go b/prober/dns_example_test.go index a8326fd72..089816919 100644 --- a/prober/dns_example_test.go +++ b/prober/dns_example_test.go @@ -5,6 +5,7 @@ package prober_test import ( "context" + "crypto/tls" "flag" "fmt" "log" @@ -40,7 +41,7 @@ func ExampleForEachAddr() { // This function is called every time we discover a new IP address to check. makeTLSProbe := func(addr netip.Addr) []*prober.Probe { - pf := prober.TLSWithIP(*hostname, netip.AddrPortFrom(addr, 443)) + pf := prober.TLSWithIP(netip.AddrPortFrom(addr, 443), &tls.Config{ServerName: *hostname}) if *verbose { logger := logger.WithPrefix(log.Printf, fmt.Sprintf("[tls %s]: ", addr)) pf = probeLogWrapper(logger, pf) diff --git a/prober/tls.go b/prober/tls.go index 4fb4aa9c6..777b2b508 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -9,9 +9,9 @@ import ( "crypto/x509" "fmt" "io" - "net" "net/http" "net/netip" + "slices" "time" "tailscale.com/util/multierr" @@ -28,33 +28,31 @@ const letsEncryptStartedStaplingCRL int64 = 1746576000 // 2025-05-07 00:00:00 UT // The ProbeFunc connects to a hostPort (host:port string), does a TLS // handshake, verifies that the hostname matches the presented certificate, // checks certificate validity time and OCSP revocation status. -func TLS(hostPort string) ProbeClass { +// +// The TLS config is optional and may be nil. +func TLS(hostPort string, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - certDomain, _, err := net.SplitHostPort(hostPort) - if err != nil { - return err - } - return probeTLS(ctx, certDomain, hostPort) + return probeTLS(ctx, config, hostPort) }, Class: "tls", } } -// TLSWithIP is like TLS, but dials the provided dialAddr instead -// of using DNS resolution. The certDomain is the expected name in -// the cert (and the SNI name to send). -func TLSWithIP(certDomain string, dialAddr netip.AddrPort) ProbeClass { +// TLSWithIP is like TLS, but dials the provided dialAddr instead of using DNS +// resolution. Use config.ServerName to send SNI and validate the name in the +// cert. +func TLSWithIP(dialAddr netip.AddrPort, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - return probeTLS(ctx, certDomain, dialAddr.String()) + return probeTLS(ctx, config, dialAddr.String()) }, Class: "tls", } } -func probeTLS(ctx context.Context, certDomain string, dialHostPort string) error { - dialer := &tls.Dialer{Config: &tls.Config{ServerName: certDomain}} +func probeTLS(ctx context.Context, config *tls.Config, dialHostPort string) error { + dialer := &tls.Dialer{Config: config} conn, err := dialer.DialContext(ctx, "tcp", dialHostPort) if err != nil { return fmt.Errorf("connecting to %q: %w", dialHostPort, err) @@ -108,6 +106,10 @@ func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr } if len(leafCert.CRLDistributionPoints) == 0 { + if !slices.Contains(leafCert.Issuer.Organization, "Let's Encrypt") { + // LE certs contain a CRL, but certs from other CAs might not. + return + } if leafCert.NotBefore.Before(time.Unix(letsEncryptStartedStaplingCRL, 0)) { // Certificate might not have a CRL. return diff --git a/prober/tls_test.go b/prober/tls_test.go index f6ca4aeb1..86fba91b9 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -83,7 +83,7 @@ func TestTLSConnection(t *testing.T) { srv.StartTLS() defer srv.Close() - err = probeTLS(context.Background(), "fail.example.com", srv.Listener.Addr().String()) + err = probeTLS(context.Background(), &tls.Config{ServerName: "fail.example.com"}, srv.Listener.Addr().String()) // The specific error message here is platform-specific ("certificate is not trusted" // on macOS and "certificate signed by unknown authority" on Linux), so only check // that it contains the word 'certificate'. @@ -269,40 +269,54 @@ func TestCRL(t *testing.T) { name string cert *x509.Certificate crlBytes []byte + issuer pkix.Name wantErr string }{ { "ValidCert", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { "RevokedCert", leafCertParsed, rlBytes, + caCert.Issuer, "has been revoked on", }, { "EmptyCRL", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { - "NoCRL", + "NoCRLLetsEncrypt", leafCertParsed, nil, + pkix.Name{CommonName: "tlsprobe.test", Organization: []string{"Let's Encrypt"}}, "no CRL server presented in leaf cert for", }, + { + "NoCRLOtherCA", + leafCertParsed, + nil, + caCert.Issuer, + "", + }, { "NotBeforeCRLStaplingDate", noCRLStapledParsed, nil, + caCert.Issuer, "", }, } { t.Run(tt.name, func(t *testing.T) { + tt.cert.Issuer = tt.issuer cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{tt.cert, caCert}} if tt.crlBytes != nil { crlServer.crlBytes = tt.crlBytes From 4f211ea5c5d40f14a861d3482a6edc75342b627d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 18 Sep 2025 12:44:06 -0700 Subject: [PATCH 1346/1708] util/eventbus: add a LogAllEvents helper for testing (#17187) When developing (and debugging) tests, it is useful to be able to see all the traffic that transits the event bus during the execution of a test. Updates #15160 Change-Id: I929aee62ccf13bdd4bd07d786924ce9a74acd17a Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest.go | 22 +++++++++++++++++++ .../eventbustest/eventbustest_test.go | 7 ++++++ 2 files changed, 29 insertions(+) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index c32e71140..0916ae522 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -263,3 +263,25 @@ func EqualTo[T any](want T) func(T) error { return nil } } + +// LogAllEvents logs summaries of all the events routed via the specified bus +// during the execution of the test governed by t. This is intended to support +// development and debugging of tests. +func LogAllEvents(t testing.TB, bus *eventbus.Bus) { + dw := bus.Debugger().WatchBus() + done := make(chan struct{}) + go func() { + defer close(done) + var i int + for { + select { + case <-dw.Done(): + return + case re := <-dw.Events(): + i++ + t.Logf("[eventbus] #%[1]d: %[2]T | %+[2]v", i, re.Event) + } + } + }() + t.Cleanup(func() { dw.Close(); <-done }) +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index f8b37eefe..f1b21ea8f 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -4,6 +4,7 @@ package eventbustest_test import ( + "flag" "fmt" "strings" "testing" @@ -13,6 +14,8 @@ import ( "tailscale.com/util/eventbus/eventbustest" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + type EventFoo struct { Value int } @@ -109,7 +112,11 @@ func TestExpectFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest tw.TimeOut = 10 * time.Millisecond From 78035fb9d2e894b4f307f4a840a7f83aba7e0f2c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 09:03:17 -0700 Subject: [PATCH 1347/1708] feature/featuretags,cmd/omitsize: support feature dependencies This produces the following omitsizes output: Starting with everything and removing a feature... tailscaled tailscale combined (linux/amd64) 27005112 18153656 39727288 - 7696384 - 7282688 -19607552 .. remove * - 167936 - 110592 - 245760 .. remove acme - 1925120 - 0 - 7340032 .. remove aws - 4096 - 0 - 8192 .. remove bird - 20480 - 12288 - 32768 .. remove capture - 0 - 57344 - 61440 .. remove completion - 249856 - 696320 - 692224 .. remove debugeventbus - 12288 - 4096 - 24576 .. remove debugportmapper - 0 - 0 - 0 .. remove desktop_sessions - 815104 - 8192 - 544768 .. remove drive - 65536 - 356352 - 425984 .. remove kube - 233472 - 286720 - 311296 .. remove portmapper (and debugportmapper) - 90112 - 0 - 110592 .. remove relayserver - 655360 - 712704 - 598016 .. remove serve (and webclient) - 937984 - 0 - 950272 .. remove ssh - 708608 - 401408 - 344064 .. remove syspolicy - 0 - 4071424 -11132928 .. remove systray - 159744 - 61440 - 225280 .. remove taildrop - 618496 - 454656 - 757760 .. remove tailnetlock - 122880 - 0 - 131072 .. remove tap - 442368 - 0 - 483328 .. remove tpm - 16384 - 0 - 20480 .. remove wakeonlan - 278528 - 368640 - 286720 .. remove webclient Starting at a minimal binary and adding one feature back... tailscaled tailscale combined (linux/amd64) 19308728 10870968 20119736 omitting everything + 352256 + 454656 + 643072 .. add acme + 2035712 + 0 + 2035712 .. add aws + 8192 + 0 + 8192 .. add bird + 20480 + 12288 + 36864 .. add capture + 0 + 57344 + 61440 .. add completion + 262144 + 274432 + 266240 .. add debugeventbus + 344064 + 118784 + 360448 .. add debugportmapper (and portmapper) + 0 + 0 + 0 .. add desktop_sessions + 978944 + 8192 + 991232 .. add drive + 61440 + 364544 + 425984 .. add kube + 331776 + 110592 + 335872 .. add portmapper + 122880 + 0 + 102400 .. add relayserver + 598016 + 155648 + 737280 .. add serve + 1142784 + 0 + 1142784 .. add ssh + 708608 + 860160 + 720896 .. add syspolicy + 0 + 4079616 + 6221824 .. add systray + 180224 + 65536 + 237568 .. add taildrop + 647168 + 393216 + 720896 .. add tailnetlock + 122880 + 0 + 126976 .. add tap + 446464 + 0 + 454656 .. add tpm + 20480 + 0 + 24576 .. add wakeonlan + 1011712 + 1011712 + 1138688 .. add webclient (and serve) Fixes #17139 Change-Id: Ia91be2da00de8481a893243d577d20e988a0920a Signed-off-by: Brad Fitzpatrick --- cmd/featuretags/featuretags.go | 17 ++- cmd/omitsize/omitsize.go | 158 ++++++++++++++++++------ feature/featuretags/featuretags.go | 105 ++++++++++++---- feature/featuretags/featuretags_test.go | 81 ++++++++++++ 4 files changed, 299 insertions(+), 62 deletions(-) create mode 100644 feature/featuretags/featuretags_test.go diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index c34adbb3f..8c8a2ceaf 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -14,6 +14,7 @@ import ( "strings" "tailscale.com/feature/featuretags" + "tailscale.com/util/set" ) var ( @@ -38,7 +39,9 @@ func main() { var keep = map[featuretags.FeatureTag]bool{} for t := range strings.SplitSeq(*add, ",") { if t != "" { - keep[featuretags.FeatureTag(t)] = true + for ft := range featuretags.Requires(featuretags.FeatureTag(t)) { + keep[ft] = true + } } } var tags []string @@ -55,6 +58,7 @@ func main() { } } } + removeSet := set.Set[featuretags.FeatureTag]{} for v := range strings.SplitSeq(*remove, ",") { if v == "" { continue @@ -63,7 +67,16 @@ func main() { if _, ok := features[f]; !ok { log.Fatalf("unknown feature %q in --remove", f) } - tags = append(tags, f.OmitTag()) + removeSet.Add(f) + } + for ft := range removeSet { + set := featuretags.RequiredBy(ft) + for dependent := range set { + if !removeSet.Contains(dependent) { + log.Fatalf("cannot remove %q without also removing %q, which depends on it", ft, dependent) + } + } + tags = append(tags, ft.OmitTag()) } slices.Sort(tags) tags = slices.Compact(tags) diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index 5940ba520..35e03d268 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -10,56 +10,69 @@ import ( "flag" "fmt" "log" + "maps" "os" "os/exec" "path/filepath" "slices" "strconv" "strings" + "sync" "tailscale.com/feature/featuretags" + "tailscale.com/util/set" ) var ( cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") - features = flag.String("features", "", "comma-separated list of features to list in the table, with or without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") + features = flag.String("features", "", "comma-separated list of features to list in the table, without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set.") ) -func main() { - flag.Parse() - - var all []string - var allOmittable []string - +// allOmittable returns the list of all build tags that remove features. +var allOmittable = sync.OnceValue(func() []string { + var ret []string // all build tags that can be omitted for k := range featuretags.Features { if k.IsOmittable() { - allOmittable = append(allOmittable, k.OmitTag()) + ret = append(ret, k.OmitTag()) } } + slices.Sort(ret) + return ret +}) + +func main() { + flag.Parse() + + // rows is a set (usually of size 1) of feature(s) to add/remove, without deps + // included at this point (as dep direction depends on whether we're adding or removing, + // so it's expanded later) + var rows []set.Set[featuretags.FeatureTag] if *features == "" { - all = slices.Clone(allOmittable) + for _, k := range slices.Sorted(maps.Keys(featuretags.Features)) { + if k.IsOmittable() { + rows = append(rows, set.Of(k)) + } + } } else { for v := range strings.SplitSeq(*features, ",") { - var withOmit []string - for v := range strings.SplitSeq(v, "+") { - if !strings.HasPrefix(v, "ts_omit_") { - v = "ts_omit_" + v + s := set.Set[featuretags.FeatureTag]{} + for fts := range strings.SplitSeq(v, "+") { + ft := featuretags.FeatureTag(fts) + if _, ok := featuretags.Features[ft]; !ok { + log.Fatalf("unknown feature %q", v) } - withOmit = append(withOmit, v) + s.Add(ft) } - all = append(all, strings.Join(withOmit, "+")) + rows = append(rows, s) } } - slices.Sort(all) - all = slices.Compact(all) - - minD := measure("tailscaled", allOmittable...) - minC := measure("tailscale", allOmittable...) - minBoth := measure("tailscaled", append(slices.Clone(allOmittable), "ts_include_cli")...) + minD := measure("tailscaled", allOmittable()...) + minC := measure("tailscale", allOmittable()...) + minBoth := measure("tailscaled", append(slices.Clone(allOmittable()), "ts_include_cli")...) if *showRemovals { baseD := measure("tailscaled") @@ -71,33 +84,108 @@ func main() { fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) - fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) + fmt.Printf("-%8d -%8d -%8d .. remove *\n", baseD-minD, baseC-minC, baseBoth-minBoth) - for _, t := range all { - if strings.Contains(t, "+") { - log.Fatalf("TODO: make --show-removals support ANDed features like %q", t) - } - sizeD := measure("tailscaled", t) - sizeC := measure("tailscale", t) - sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) + for _, s := range rows { + title, tags := computeRemove(s) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(slices.Clone(tags), "ts_include_cli")...) saveD := max(baseD-sizeD, 0) saveC := max(baseC-sizeC, 0) saveBoth := max(baseBoth-sizeBoth, 0) - fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + fmt.Printf("-%8d -%8d -%8d .. remove %s\n", saveD, saveC, saveBoth, title) + } } - fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n") + fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n\n") fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) - for _, t := range all { - tags := allExcept(allOmittable, strings.Split(t, "+")) + for _, s := range rows { + title, tags := computeAdd(s) sizeD := measure("tailscaled", tags...) sizeC := measure("tailscale", tags...) sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) - fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.ReplaceAll(t, "ts_omit_", "")) + + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), title) + } + +} + +// computeAdd returns a human-readable title of a set of features and the build +// tags to use to add that set of features to a minimal binary, including their +// feature dependencies. +func computeAdd(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their outbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.Requires(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) + } + tags = allExcept(allOmittable(), removeTags) + return titleBuf.String(), tags +} + +// computeRemove returns a human-readable title of a set of features and the build +// tags to use to remove that set of features from a full binary, including removing +// any features that depend on features in the provided set. +func computeRemove(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their inbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.RequiredBy(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) } + return titleBuf.String(), removeTags } func allExcept(all, omit []string) []string { @@ -120,7 +208,7 @@ func measure(bin string, tags ...string) int64 { } } - cmd := exec.Command("go", "build", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) + cmd := exec.Command("go", "build", "-trimpath", "-ldflags=-w -s", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) log.Printf("# Measuring %v", cmd.Args) cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64") out, err := cmd.CombinedOutput() diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index fc26dd370..6afb40893 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -4,6 +4,8 @@ // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags +import "tailscale.com/util/set" + // CLI is a special feature in the [Features] map that works opposite // from the others: it is opt-in, rather than opt-out, having a different // build tag format. @@ -32,37 +34,90 @@ func (ft FeatureTag) OmitTag() string { return "ts_omit_" + string(ft) } +// Requires returns the set of features that must be included to +// use the given feature, including the provided feature itself. +func Requires(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + var add func(FeatureTag) + add = func(ft FeatureTag) { + if !ft.IsOmittable() { + return + } + s.Add(ft) + for _, dep := range Features[ft].Deps { + add(dep) + } + } + add(ft) + return s +} + +// RequiredBy is the inverse of Requires: it returns the set of features that +// depend on the given feature (directly or indirectly), including the feature +// itself. +func RequiredBy(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + for f := range Features { + if featureDependsOn(f, ft) { + s.Add(f) + } + } + return s +} + +// featureDependsOn reports whether feature a (directly or indirectly) depends on b. +// It returns true if a == b. +func featureDependsOn(a, b FeatureTag) bool { + if a == b { + return true + } + for _, dep := range Features[a].Deps { + if featureDependsOn(dep, b) { + return true + } + } + return false +} + // FeatureMeta describes a modular feature that can be conditionally linked into // the binary. type FeatureMeta struct { - Sym string // exported Go symbol for boolean const - Desc string // human-readable description + Sym string // exported Go symbol for boolean const + Desc string // human-readable description + Deps []FeatureTag // other features this feature requires } // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ - "acme": {"ACME", "ACME TLS certificate management"}, - "aws": {"AWS", "AWS integration"}, - "bird": {"Bird", "Bird BGP integration"}, - "capture": {"Capture", "Packet capture"}, - "cli": {"CLI", "embed the CLI into the tailscaled binary"}, - "completion": {"Completion", "CLI shell completion"}, - "debugeventbus": {"DebugEventBus", "eventbus debug support"}, - "debugportmapper": {"DebugPortMapper", "portmapper debug support"}, - "desktop_sessions": {"DesktopSessions", "Desktop sessions support"}, - "drive": {"Drive", "Tailscale Drive (file server) support"}, - "kube": {"Kube", "Kubernetes integration"}, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support"}, - "relayserver": {"RelayServer", "Relay server"}, - "serve": {"Serve", "Serve and Funnel support"}, - "ssh": {"SSH", "Tailscale SSH support"}, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support"}, - "systray": {"SysTray", "Linux system tray"}, - "taildrop": {"Taildrop", "Taildrop (file sending) support"}, - "tailnetlock": {"TailnetLock", "Tailnet Lock support"}, - "tap": {"Tap", "Experimental Layer 2 (ethernet) support"}, - "tpm": {"TPM", "TPM support"}, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support"}, - "webclient": {"WebClient", "Web client support"}, + "acme": {"ACME", "ACME TLS certificate management", nil}, + "aws": {"AWS", "AWS integration", nil}, + "bird": {"Bird", "Bird BGP integration", nil}, + "capture": {"Capture", "Packet capture", nil}, + "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, + "completion": {"Completion", "CLI shell completion", nil}, + "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, + "debugportmapper": { + Sym: "DebugPortMapper", + Desc: "portmapper debug support", + Deps: []FeatureTag{"portmapper"}, + }, + "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, + "drive": {"Drive", "Tailscale Drive (file server) support", nil}, + "kube": {"Kube", "Kubernetes integration", nil}, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "relayserver": {"RelayServer", "Relay server", nil}, + "serve": {"Serve", "Serve and Funnel support", nil}, + "ssh": {"SSH", "Tailscale SSH support", nil}, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "systray": {"SysTray", "Linux system tray", nil}, + "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, + "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, + "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, + "tpm": {"TPM", "TPM support", nil}, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "webclient": { + Sym: "WebClient", Desc: "Web client support", + Deps: []FeatureTag{"serve"}, + }, } diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go new file mode 100644 index 000000000..4a268c90d --- /dev/null +++ b/feature/featuretags/featuretags_test.go @@ -0,0 +1,81 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package featuretags + +import ( + "maps" + "slices" + "testing" + + "tailscale.com/util/set" +) + +func TestRequires(t *testing.T) { + for tag, meta := range Features { + for _, dep := range meta.Deps { + if _, ok := Features[dep]; !ok { + t.Errorf("feature %q has unknown dependency %q", tag, dep) + } + } + + // And indirectly check for cycles. If there were a cycle, + // this would infinitely loop. + deps := Requires(tag) + t.Logf("deps of %q: %v", tag, slices.Sorted(maps.Keys(deps))) + } +} + +func TestDepSet(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "serve", + want: setOf("serve"), + }, + { + in: "webclient", + want: setOf("webclient", "serve"), + }, + } + for _, tt := range tests { + got := Requires(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("DepSet(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} + +func TestRequiredBy(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "webclient", + want: setOf("webclient"), + }, + { + in: "serve", + want: setOf("webclient", "serve"), + }, + } + for _, tt := range tests { + got := RequiredBy(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("FeaturesWhichDependOn(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} From fc9a74a4055b3b86a1ddcc8a5b816fcab35a1856 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 07:56:45 -0700 Subject: [PATCH 1348/1708] util/eventbus: fix flakes in eventbustest tests (#17198) When tests run in parallel, events from multiple tests on the same bus can intercede with each other. This is working as intended, but for the test cases we want to control exactly what goes through the bus. To fix that, allocate a fresh bus for each subtest. Fixes #17197 Change-Id: I53f285ebed8da82e72a2ed136a61884667ef9a5e Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index f1b21ea8f..7a6b511c7 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -108,10 +108,11 @@ func TestExpectFilter(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + bus := eventbustest.NewBus(t) + t.Cleanup(bus.Close) + if *doDebug { eventbustest.LogAllEvents(t, bus) } @@ -241,10 +242,11 @@ func TestExpectEvents(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + bus := eventbustest.NewBus(t) + t.Cleanup(bus.Close) + tw := eventbustest.NewWatcher(t, bus) // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest @@ -374,10 +376,11 @@ func TestExpectExactlyEventsFilter(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + bus := eventbustest.NewBus(t) + t.Cleanup(bus.Close) + tw := eventbustest.NewWatcher(t, bus) // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest From 394718a4ca78e376e9f20782d3bb2efa07e5280f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 9 Sep 2025 13:31:01 +0100 Subject: [PATCH 1349/1708] tstest/integration: support multiple C2N handlers in testcontrol Instead of a single hard-coded C2N handler, add support for calling arbitrary C2N endpoints via a node roundtripper. Updates tailscale/corp#32095 Signed-off-by: Anton Tolchanov --- tstest/integration/integration_test.go | 56 +++++------- tstest/integration/testcontrol/testcontrol.go | 86 ++++++++++++++++++- 2 files changed, 105 insertions(+), 37 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index b282adcf8..5fef04488 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -596,22 +596,6 @@ func TestC2NPingRequest(t *testing.T) { env := NewTestEnv(t) - gotPing := make(chan bool, 1) - env.Control.HandleC2N = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Errorf("unexpected ping method %q", r.Method) - } - got, err := io.ReadAll(r.Body) - if err != nil { - t.Errorf("ping body read error: %v", err) - } - const want = "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Type: text/plain; charset=utf-8\r\n\r\nabc" - if string(got) != want { - t.Errorf("body error\n got: %q\nwant: %q", got, want) - } - gotPing <- true - }) - n1 := NewTestNode(t, env) n1.StartDaemon() @@ -635,27 +619,33 @@ func TestC2NPingRequest(t *testing.T) { } cancel() - pr := &tailcfg.PingRequest{ - URL: fmt.Sprintf("https://unused/some-c2n-path/ping-%d", try), - Log: true, - Types: "c2n", - Payload: []byte("POST /echo HTTP/1.0\r\nContent-Length: 3\r\n\r\nabc"), + ctx, cancel = context.WithTimeout(t.Context(), 2*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "POST", "/echo", bytes.NewReader([]byte("abc"))) + if err != nil { + t.Errorf("failed to create request: %v", err) + continue } - if !env.Control.AddPingRequest(nodeKey, pr) { - t.Logf("failed to AddPingRequest") + r, err := env.Control.NodeRoundTripper(nodeKey).RoundTrip(req) + if err != nil { + t.Errorf("RoundTrip failed: %v", err) continue } - - // Wait for PingRequest to come back - pingTimeout := time.NewTimer(2 * time.Second) - defer pingTimeout.Stop() - select { - case <-gotPing: - t.Logf("got ping; success") - return - case <-pingTimeout.C: - // Try again. + if r.StatusCode != 200 { + t.Errorf("unexpected status code: %d", r.StatusCode) + continue + } + b, err := io.ReadAll(r.Body) + if err != nil { + t.Errorf("error reading body: %v", err) + continue + } + if string(b) != "abc" { + t.Errorf("body = %q; want %q", b, "abc") + continue } + return } t.Error("all ping attempts failed") } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 66d868aca..7a371ef76 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -5,6 +5,7 @@ package testcontrol import ( + "bufio" "bytes" "cmp" "context" @@ -30,10 +31,12 @@ import ( "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" + "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/rands" @@ -53,7 +56,7 @@ type Server struct { Verbose bool DNSConfig *tailcfg.DNSConfig // nil means no DNS config MagicDNSDomain string - HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func // PeerRelayGrants, if true, inserts relay capabilities into the wildcard // grants rules. @@ -183,6 +186,52 @@ func (s *Server) AddPingRequest(nodeKeyDst key.NodePublic, pr *tailcfg.PingReque return s.addDebugMessage(nodeKeyDst, pr) } +// c2nRoundTripper is an http.RoundTripper that sends requests to a node via C2N. +type c2nRoundTripper struct { + s *Server + n key.NodePublic +} + +func (s *Server) NodeRoundTripper(n key.NodePublic) http.RoundTripper { + return c2nRoundTripper{s, n} +} + +func (rt c2nRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + resc := make(chan *http.Response, 1) + if err := rt.s.SendC2N(rt.n, req, func(r *http.Response) { resc <- r }); err != nil { + return nil, err + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case r := <-resc: + return r, nil + } +} + +// SendC2N sends req to node. When the response is received, onRes is called. +func (s *Server) SendC2N(node key.NodePublic, req *http.Request, onRes func(*http.Response)) error { + var buf bytes.Buffer + if err := req.Write(&buf); err != nil { + return err + } + + token := rands.HexString(10) + pr := &tailcfg.PingRequest{ + URL: "https://unused/c2n/" + token, + Log: true, + Types: "c2n", + Payload: buf.Bytes(), + } + s.C2NResponses.Store(token, onRes) + if !s.AddPingRequest(node, pr) { + s.C2NResponses.Delete(token) + return fmt.Errorf("node %v not connected", node) + } + return nil +} + // AddRawMapResponse delivers the raw MapResponse mr to nodeKeyDst. It's meant // for testing incremental map updates. // @@ -269,9 +318,7 @@ func (s *Server) initMux() { s.mux.HandleFunc("/key", s.serveKey) s.mux.HandleFunc("/machine/", s.serveMachine) s.mux.HandleFunc("/ts2021", s.serveNoiseUpgrade) - if s.HandleC2N != nil { - s.mux.Handle("/some-c2n-path/", s.HandleC2N) - } + s.mux.HandleFunc("/c2n/", s.serveC2N) } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -285,6 +332,37 @@ func (s *Server) serveUnhandled(w http.ResponseWriter, r *http.Request) { go panic(fmt.Sprintf("testcontrol.Server received unhandled request: %s", got.Bytes())) } +// serveC2N handles a POST from a node containing a c2n response. +func (s *Server) serveC2N(w http.ResponseWriter, r *http.Request) { + if err := func() error { + if r.Method != httpm.POST { + return fmt.Errorf("POST required") + } + token, ok := strings.CutPrefix(r.URL.Path, "/c2n/") + if !ok { + return fmt.Errorf("invalid path %q", r.URL.Path) + } + + onRes, ok := s.C2NResponses.Load(token) + if !ok { + return fmt.Errorf("unknown c2n token %q", token) + } + s.C2NResponses.Delete(token) + + res, err := http.ReadResponse(bufio.NewReader(r.Body), nil) + if err != nil { + return fmt.Errorf("error reading c2n response: %w", err) + } + onRes(res) + return nil + }(); err != nil { + s.logf("testcontrol: %s", err) + http.Error(w, err.Error(), 500) + return + } + w.WriteHeader(http.StatusNoContent) +} + type peerMachinePublicContextKey struct{} func (s *Server) serveNoiseUpgrade(w http.ResponseWriter, r *http.Request) { From 4a04161828edf8b950b923530e286d74fe6f329f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Wed, 13 Aug 2025 15:00:35 +0100 Subject: [PATCH 1350/1708] ipn/ipnlocal: add a C2N endpoint for fetching a netmap For debugging purposes, add a new C2N endpoint returning the current netmap. Optionally, coordination server can send a new "candidate" map response, which the client will generate a separate netmap for. Coordination server can later compare two netmaps, detecting unexpected changes to the client state. Updates tailscale/corp#32095 Signed-off-by: Anton Tolchanov --- control/controlclient/direct.go | 21 ++ control/controlclient/map_test.go | 26 +++ ipn/ipnlocal/c2n.go | 65 +++++++ ipn/ipnlocal/c2n_test.go | 183 ++++++++++++++++++ ipn/ipnlocal/local.go | 28 ++- tailcfg/c2ntypes.go | 31 ++- tailcfg/tailcfg.go | 3 +- tstest/integration/integration_test.go | 145 ++++++++++++++ tstest/integration/testcontrol/testcontrol.go | 13 +- 9 files changed, 506 insertions(+), 9 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index b9e26cc98..ea8661bff 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1160,6 +1160,27 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap return nil } +// NetmapFromMapResponseForDebug returns a NetworkMap from the given MapResponse. +// It is intended for debugging only. +func NetmapFromMapResponseForDebug(ctx context.Context, pr persist.PersistView, resp *tailcfg.MapResponse) (*netmap.NetworkMap, error) { + if resp == nil { + return nil, errors.New("nil MapResponse") + } + if resp.Node == nil { + return nil, errors.New("MapResponse lacks Node") + } + + nu := &rememberLastNetmapUpdater{} + sess := newMapSession(pr.PrivateNodeKey(), nu, nil) + defer sess.Close() + + if err := sess.HandleNonKeepAliveMapResponse(ctx, resp); err != nil { + return nil, fmt.Errorf("HandleNonKeepAliveMapResponse: %w", err) + } + + return sess.netmap(), nil +} + func (c *Direct) handleDebugMessage(ctx context.Context, debug *tailcfg.Debug) error { if code := debug.Exit; code != nil { c.logf("exiting process with status %v per controlplane", *code) diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 59b8988fc..4e8c911e3 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -20,6 +20,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -27,6 +28,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/types/persist" "tailscale.com/types/ptr" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" @@ -1419,3 +1421,27 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { t.Errorf("unexpected message contents (-want +got):\n%s", diff) } } + +func TestNetmapForMapResponseForDebug(t *testing.T) { + mr := &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + ID: 1, + Name: "foo.bar.ts.net.", + }, + Peers: []*tailcfg.Node{ + {ID: 2, Name: "peer1.bar.ts.net.", HomeDERP: 1}, + {ID: 3, Name: "peer2.bar.ts.net.", HomeDERP: 1}, + }, + } + ms := newTestMapSession(t, nil) + nm1 := ms.netmapForResponse(mr) + + prefs := &ipn.Prefs{Persist: &persist.Persist{PrivateNodeKey: ms.privateNodeKey}} + nm2, err := NetmapFromMapResponseForDebug(t.Context(), prefs.View().Persist(), mr) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(nm1, nm2) { + t.Errorf("mismatch\nnm1: %s\nnm2: %s\n", logger.AsJSON(nm1), logger.AsJSON(nm2)) + } +} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 0487774db..2b48b19fa 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -13,19 +13,23 @@ import ( "os/exec" "path" "path/filepath" + "reflect" "runtime" "strconv" "strings" "time" "tailscale.com/clientupdate" + "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/posture" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" + "tailscale.com/util/httpm" "tailscale.com/util/set" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" @@ -44,6 +48,7 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ req("/debug/metrics"): handleC2NDebugMetrics, req("/debug/component-logging"): handleC2NDebugComponentLogging, req("/debug/logheap"): handleC2NDebugLogHeap, + req("/debug/netmap"): handleC2NDebugNetMap, // PPROF - We only expose a subset of typical pprof endpoints for security. req("/debug/pprof/heap"): handleC2NPprof, @@ -142,6 +147,66 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } } +func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if r.Method != httpm.POST && r.Method != httpm.GET { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + b.logf("c2n: %s /debug/netmap received", r.Method) + + // redactAndMarshal redacts private keys from the given netmap, clears fields + // that should be omitted, and marshals it to JSON. + redactAndMarshal := func(nm *netmap.NetworkMap, omitFields []string) (json.RawMessage, error) { + for _, f := range omitFields { + field := reflect.ValueOf(nm).Elem().FieldByName(f) + if !field.IsValid() { + b.logf("c2n: /debug/netmap: unknown field %q in omitFields", f) + continue + } + field.SetZero() + } + nm, _ = redactNetmapPrivateKeys(nm) + return json.Marshal(nm) + } + + var omitFields []string + resp := &tailcfg.C2NDebugNetmapResponse{} + + if r.Method == httpm.POST { + var req tailcfg.C2NDebugNetmapRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, fmt.Sprintf("failed to decode request body: %v", err), http.StatusBadRequest) + return + } + omitFields = req.OmitFields + + if req.Candidate != nil { + cand, err := controlclient.NetmapFromMapResponseForDebug(ctx, b.unsanitizedPersist(), req.Candidate) + if err != nil { + http.Error(w, fmt.Sprintf("failed to convert candidate MapResponse: %v", err), http.StatusBadRequest) + return + } + candJSON, err := redactAndMarshal(cand, omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal candidate netmap: %v", err), http.StatusInternalServerError) + return + } + resp.Candidate = candJSON + } + } + + var err error + resp.Current, err = redactAndMarshal(b.currentNode().netMapWithPeers(), omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal current netmap: %v", err), http.StatusInternalServerError) + return + } + + writeJSON(w, resp) +} + func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Write(goroutines.ScrubbedGoroutineDump(true)) diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index cc31e284a..04ed8bf5d 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -4,9 +4,11 @@ package ipnlocal import ( + "bytes" "cmp" "crypto/x509" "encoding/json" + "fmt" "net/http/httptest" "net/url" "os" @@ -18,8 +20,15 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/types/opt" + "tailscale.com/types/views" "tailscale.com/util/must" + + gcmp "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) func TestHandleC2NTLSCertStatus(t *testing.T) { @@ -132,3 +141,177 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } } + +// reflectNonzero returns a non-zero value for a given reflect.Value. +func reflectNonzero(t reflect.Type) reflect.Value { + switch t.Kind() { + case reflect.Bool: + return reflect.ValueOf(true) + case reflect.String: + if reflect.TypeFor[opt.Bool]() == t { + return reflect.ValueOf("true").Convert(t) + } + return reflect.ValueOf("foo").Convert(t) + case reflect.Int64: + return reflect.ValueOf(int64(1)).Convert(t) + case reflect.Slice: + return reflect.MakeSlice(t, 1, 1) + case reflect.Ptr: + return reflect.New(t.Elem()) + case reflect.Map: + return reflect.MakeMap(t) + case reflect.Struct: + switch t { + case reflect.TypeFor[key.NodePrivate](): + return reflect.ValueOf(key.NewNode()) + } + } + panic(fmt.Sprintf("unhandled %v", t)) +} + +// setFieldsToRedact sets fields in the given netmap to non-zero values +// according to the fieldMap, which maps field names to whether they +// should be reset (true) or not (false). +func setFieldsToRedact(t *testing.T, nm *netmap.NetworkMap, fieldMap map[string]bool) { + t.Helper() + v := reflect.ValueOf(nm).Elem() + for i := range v.NumField() { + name := v.Type().Field(i).Name + f := v.Field(i) + if !f.CanSet() { + continue + } + shouldReset, ok := fieldMap[name] + if !ok { + t.Errorf("fieldMap missing field %q", name) + } + if shouldReset { + f.Set(reflectNonzero(f.Type())) + } + } +} + +func TestRedactNetmapPrivateKeys(t *testing.T) { + fieldMap := map[string]bool{ + // Private fields (should be redacted): + "PrivateKey": true, + + // Public fields (should not be redacted): + "AllCaps": false, + "CollectServices": false, + "DERPMap": false, + "DNS": false, + "DisplayMessages": false, + "Domain": false, + "DomainAuditLogID": false, + "Expiry": false, + "MachineKey": false, + "Name": false, + "NodeKey": false, + "PacketFilter": false, + "PacketFilterRules": false, + "Peers": false, + "SSHPolicy": false, + "SelfNode": false, + "TKAEnabled": false, + "TKAHead": false, + "UserProfiles": false, + } + + nm := &netmap.NetworkMap{} + setFieldsToRedact(t, nm, fieldMap) + + got, _ := redactNetmapPrivateKeys(nm) + if !reflect.DeepEqual(got, &netmap.NetworkMap{}) { + t.Errorf("redacted netmap is not empty: %+v", got) + } +} + +func TestHandleC2NDebugNetmap(t *testing.T) { + nm := &netmap.NetworkMap{ + Name: "myhost", + SelfNode: (&tailcfg.Node{ + ID: 100, + Name: "myhost", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "myhost"}).View(), + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 101, + Name: "peer1", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "peer1"}).View(), + }).View(), + }, + PrivateKey: key.NewNode(), + } + withoutPrivateKey := *nm + withoutPrivateKey.PrivateKey = key.NodePrivate{} + + for _, tt := range []struct { + name string + req *tailcfg.C2NDebugNetmapRequest + want *netmap.NetworkMap + }{ + { + name: "simple_get", + want: &withoutPrivateKey, + }, + { + name: "post_no_omit", + req: &tailcfg.C2NDebugNetmapRequest{}, + want: &withoutPrivateKey, + }, + { + name: "post_omit_peers_and_name", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"Peers", "Name"}}, + want: &netmap.NetworkMap{ + SelfNode: nm.SelfNode, + }, + }, + { + name: "post_omit_nonexistent_field", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"ThisFieldDoesNotExist"}}, + want: &withoutPrivateKey, + }, + } { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + b.currentNode().SetNetMap(nm) + + rec := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/debug/netmap", nil) + if tt.req != nil { + b, err := json.Marshal(tt.req) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + req = httptest.NewRequest("POST", "/debug/netmap", bytes.NewReader(b)) + } + handleC2NDebugNetMap(b, rec, req) + res := rec.Result() + wantStatus := 200 + if res.StatusCode != wantStatus { + t.Fatalf("status code = %v; want %v. Body: %s", res.Status, wantStatus, rec.Body.Bytes()) + } + var resp tailcfg.C2NDebugNetmapResponse + if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil { + t.Fatalf("bad JSON: %v", err) + } + got := &netmap.NetworkMap{} + if err := json.Unmarshal(resp.Current, got); err != nil { + t.Fatalf("bad JSON: %v", err) + } + + if diff := gcmp.Diff(tt.want, got, + gcmp.AllowUnexported(netmap.NetworkMap{}, key.NodePublic{}, views.Slice[tailcfg.FilterRule]{}), + cmpopts.EquateComparable(key.MachinePublic{}), + ); diff != "" { + t.Errorf("netmap mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 1e102d53e..7ac8f0ecb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1223,6 +1223,13 @@ func (b *LocalBackend) sanitizedPrefsLocked() ipn.PrefsView { return stripKeysFromPrefs(b.pm.CurrentPrefs()) } +// unsanitizedPersist returns the current PersistView, including any private keys. +func (b *LocalBackend) unsanitizedPersist() persist.PersistView { + b.mu.Lock() + defer b.mu.Unlock() + return b.pm.CurrentPrefs().Persist() +} + // Status returns the latest status of the backend and its // sub-components. func (b *LocalBackend) Status() *ipnstate.Status { @@ -3257,21 +3264,34 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A // listener. func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { return func(n *ipn.Notify) bool { - if n.NetMap == nil || n.NetMap.PrivateKey.IsZero() { + redacted, changed := redactNetmapPrivateKeys(n.NetMap) + if !changed { return fn(n) } // The netmap in n is shared across all watchers, so to mutate it for a // single watcher we have to clone the notify and the netmap. We can // make shallow clones, at least. - nm2 := *n.NetMap n2 := *n - n2.NetMap = &nm2 - n2.NetMap.PrivateKey = key.NodePrivate{} + n2.NetMap = redacted return fn(&n2) } } +// redactNetmapPrivateKeys returns a copy of nm with private keys zeroed out. +// If no change was needed, it returns nm unmodified. +func redactNetmapPrivateKeys(nm *netmap.NetworkMap) (redacted *netmap.NetworkMap, changed bool) { + if nm == nil || nm.PrivateKey.IsZero() { + return nm, false + } + + // The netmap might be shared across watchers, so make at least a shallow + // clone before mutating it. + nm2 := *nm + nm2.PrivateKey = key.NodePrivate{} + return &nm2, true +} + // appendHealthActions returns an IPN listener func that wraps the supplied IPN // listener func and transforms health messages passed to the wrapped listener. // If health messages with PrimaryActions are present, it appends the label & diff --git a/tailcfg/c2ntypes.go b/tailcfg/c2ntypes.go index 66f95785c..d78baef1c 100644 --- a/tailcfg/c2ntypes.go +++ b/tailcfg/c2ntypes.go @@ -5,7 +5,10 @@ package tailcfg -import "net/netip" +import ( + "encoding/json" + "net/netip" +) // C2NSSHUsernamesRequest is the request for the /ssh/usernames. // A GET request without a request body is equivalent to the zero value of this type. @@ -117,3 +120,29 @@ type C2NVIPServicesResponse struct { // changes. This value matches what is reported in latest [Hostinfo.ServicesHash]. ServicesHash string } + +// C2NDebugNetmapRequest is the request (from control to node) for the +// /debug/netmap handler. +type C2NDebugNetmapRequest struct { + // Candidate is an optional full MapResponse to be used for generating a candidate + // network map. If unset, only the current network map is returned. + Candidate *MapResponse `json:"candidate,omitzero"` + + // OmitFields is an optional list of netmap fields to omit from the response. + // If unset, no fields are omitted. + OmitFields []string `json:"omitFields,omitzero"` +} + +// C2NDebugNetmapResponse is the response (from node to control) from the +// /debug/netmap handler. It contains the current network map and, if a +// candidate full MapResponse was provided in the request, a candidate network +// map generated from it. +// To avoid import cycles, and reflect the non-stable nature of +// netmap.NetworkMap values, they are returned as json.RawMessage. +type C2NDebugNetmapResponse struct { + // Current is the current network map (netmap.NetworkMap). + Current json.RawMessage `json:"current"` + + // Candidate is a network map produced based on the candidate MapResponse. + Candidate json.RawMessage `json:"candidate,omitzero"` +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6130df901..057e1a54b 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -171,7 +171,8 @@ type CapabilityVersion int // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. // - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) -const CurrentCapabilityVersion CapabilityVersion = 126 +// - 127: 2025-09-19: can handle C2N /debug/netmap. +const CurrentCapabilityVersion CapabilityVersion = 127 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 5fef04488..6e0dc87eb 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/miekg/dns" "go4.org/mem" "tailscale.com/client/local" @@ -41,6 +42,7 @@ import ( "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" + "tailscale.com/types/netmap" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/must" @@ -1623,3 +1625,146 @@ func TestPeerRelayPing(t *testing.T) { } } } + +func TestC2NDebugNetmap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + var testNodes []*TestNode + var nodes []*tailcfg.Node + for i := range 2 { + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + testNodes = append(testNodes, n) + + controlNodes := env.Control.AllNodes() + if len(controlNodes) != i+1 { + t.Fatalf("expected %d nodes, got %d nodes", i+1, len(controlNodes)) + } + for _, cn := range controlNodes { + if n.MustStatus().Self.PublicKey == cn.Key { + nodes = append(nodes, cn) + break + } + } + } + + // getC2NNetmap fetches the current netmap. If a candidate map response is provided, + // a candidate netmap is also fetched and compared to the current netmap. + getC2NNetmap := func(node key.NodePublic, cand *tailcfg.MapResponse) *netmap.NetworkMap { + t.Helper() + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + var req *http.Request + if cand != nil { + body := must.Get(json.Marshal(&tailcfg.C2NDebugNetmapRequest{Candidate: cand})) + req = must.Get(http.NewRequestWithContext(ctx, "POST", "/debug/netmap", bytes.NewReader(body))) + } else { + req = must.Get(http.NewRequestWithContext(ctx, "GET", "/debug/netmap", nil)) + } + httpResp := must.Get(env.Control.NodeRoundTripper(node).RoundTrip(req)) + defer httpResp.Body.Close() + + if httpResp.StatusCode != 200 { + t.Errorf("unexpected status code: %d", httpResp.StatusCode) + return nil + } + + respBody := must.Get(io.ReadAll(httpResp.Body)) + var resp tailcfg.C2NDebugNetmapResponse + must.Do(json.Unmarshal(respBody, &resp)) + + var current netmap.NetworkMap + must.Do(json.Unmarshal(resp.Current, ¤t)) + + if !current.PrivateKey.IsZero() { + t.Errorf("current netmap has non-zero private key: %v", current.PrivateKey) + } + // Check candidate netmap if we sent a map response. + if cand != nil { + var candidate netmap.NetworkMap + must.Do(json.Unmarshal(resp.Candidate, &candidate)) + if !candidate.PrivateKey.IsZero() { + t.Errorf("candidate netmap has non-zero private key: %v", candidate.PrivateKey) + } + if diff := cmp.Diff(current.SelfNode, candidate.SelfNode); diff != "" { + t.Errorf("SelfNode differs (-current +candidate):\n%s", diff) + } + if diff := cmp.Diff(current.Peers, candidate.Peers); diff != "" { + t.Errorf("Peers differ (-current +candidate):\n%s", diff) + } + } + return ¤t + } + + for _, n := range nodes { + mr := must.Get(env.Control.MapResponse(&tailcfg.MapRequest{NodeKey: n.Key})) + nm := getC2NNetmap(n.Key, mr) + + // Make sure peers do not have "testcap" initially (we'll change this later). + if len(nm.Peers) != 1 || nm.Peers[0].CapMap().Contains("testcap") { + t.Fatalf("expected 1 peer without testcap, got: %v", nm.Peers) + } + + // Make sure nodes think each other are offline initially. + if nm.Peers[0].Online().Get() { + t.Fatalf("expected 1 peer to be offline, got: %v", nm.Peers) + } + } + + // Send a delta update to n0, setting "testcap" on node 1. + env.Control.AddRawMapResponse(nodes[0].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[1].ID, CapMap: tailcfg.NodeCapMap{"testcap": []tailcfg.RawMessage{}}, + }}, + }) + + // node 0 should see node 1 with "testcap". + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[0].MustStatus() + p, ok := st.Peer[nodes[1].Key] + if !ok { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + if _, ok := p.CapMap["testcap"]; !ok { + return fmt.Errorf("node 0 (%s) sees node 1 (%s) as peer but without testcap\n%v", nodes[0].Key, nodes[1].Key, p) + } + return nil + })) + + // Check that node 0's current netmap has "testcap" for node 1. + nm := getC2NNetmap(nodes[0].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].CapMap().Contains("testcap") { + t.Errorf("current netmap missing testcap: %v", nm.Peers[0].CapMap()) + } + + // Send a delta update to n1, marking node 0 as online. + env.Control.AddRawMapResponse(nodes[1].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[0].ID, Online: ptr.To(true), + }}, + }) + + // node 1 should see node 0 as online. + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[1].MustStatus() + p, ok := st.Peer[nodes[0].Key] + if !ok || !p.Online { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as an online peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + return nil + })) + + // The netmap from node 1 should show node 0 as online. + nm = getC2NNetmap(nodes[1].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].Online().Get() { + t.Errorf("expected peer to be online; got %+v", nm.Peers[0].AsStruct()) + } +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 7a371ef76..7ce7186e7 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -336,7 +336,7 @@ func (s *Server) serveUnhandled(w http.ResponseWriter, r *http.Request) { func (s *Server) serveC2N(w http.ResponseWriter, r *http.Request) { if err := func() error { if r.Method != httpm.POST { - return fmt.Errorf("POST required") + return errors.New("POST required") } token, ok := strings.CutPrefix(r.URL.Path, "/c2n/") if !ok { @@ -1148,18 +1148,25 @@ func (s *Server) canGenerateAutomaticMapResponseFor(nk key.NodePublic) bool { func (s *Server) hasPendingRawMapMessage(nk key.NodePublic) bool { s.mu.Lock() defer s.mu.Unlock() - _, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + _, ok := s.msgToSend[nk] return ok } func (s *Server) takeRawMapMessage(nk key.NodePublic) (mapResJSON []byte, ok bool) { s.mu.Lock() defer s.mu.Unlock() - mr, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + mr, ok := s.msgToSend[nk] if !ok { return nil, false } delete(s.msgToSend, nk) + + // If it's a bare PingRequest, wrap it in a MapResponse. + switch pr := mr.(type) { + case *tailcfg.PingRequest: + mr = &tailcfg.MapResponse{PingRequest: pr} + } + var err error mapResJSON, err = json.Marshal(mr) if err != nil { From 2351cc0d0ec6b21635058e29652c5dcbf362e64b Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 19 Sep 2025 16:18:14 +0100 Subject: [PATCH 1351/1708] ipn/ipnlocal: make the redactNetmapPrivateKeys test recursive Expand TestRedactNetmapPrivateKeys to cover all sub-structs of NetworkMap and confirm that a) all fields are annotated as private or public, and b) all private fields are getting redacted. Updates tailscale/corp#32095 Signed-off-by: Anton Tolchanov --- ipn/ipnlocal/c2n_test.go | 389 ++++++++++++++++++++++++++++++++------- 1 file changed, 319 insertions(+), 70 deletions(-) diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 04ed8bf5d..75a57dee5 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -8,24 +8,26 @@ import ( "cmp" "crypto/x509" "encoding/json" - "fmt" "net/http/httptest" "net/url" "os" "path/filepath" "reflect" + "strings" "testing" "time" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/opt" "tailscale.com/types/views" "tailscale.com/util/must" + "tailscale.com/util/set" + "tailscale.com/wgengine/filter/filtertype" gcmp "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -142,88 +144,335 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } -// reflectNonzero returns a non-zero value for a given reflect.Value. -func reflectNonzero(t reflect.Type) reflect.Value { - switch t.Kind() { - case reflect.Bool: - return reflect.ValueOf(true) - case reflect.String: - if reflect.TypeFor[opt.Bool]() == t { - return reflect.ValueOf("true").Convert(t) - } - return reflect.ValueOf("foo").Convert(t) - case reflect.Int64: - return reflect.ValueOf(int64(1)).Convert(t) - case reflect.Slice: - return reflect.MakeSlice(t, 1, 1) - case reflect.Ptr: - return reflect.New(t.Elem()) - case reflect.Map: - return reflect.MakeMap(t) - case reflect.Struct: - switch t { - case reflect.TypeFor[key.NodePrivate](): - return reflect.ValueOf(key.NewNode()) +// eachStructField calls cb for each struct field in struct type tp, recursively. +func eachStructField(tp reflect.Type, cb func(reflect.Type, reflect.StructField)) { + if !strings.HasPrefix(tp.PkgPath(), "tailscale.com/") { + // Stop traversing when we reach a non-tailscale type. + return + } + + for i := range tp.NumField() { + cb(tp, tp.Field(i)) + + switch tp.Field(i).Type.Kind() { + case reflect.Struct: + eachStructField(tp.Field(i).Type, cb) + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + if tp.Field(i).Type.Elem().Kind() == reflect.Struct { + eachStructField(tp.Field(i).Type.Elem(), cb) + } } } - panic(fmt.Sprintf("unhandled %v", t)) } -// setFieldsToRedact sets fields in the given netmap to non-zero values -// according to the fieldMap, which maps field names to whether they -// should be reset (true) or not (false). -func setFieldsToRedact(t *testing.T, nm *netmap.NetworkMap, fieldMap map[string]bool) { - t.Helper() - v := reflect.ValueOf(nm).Elem() +// eachStructValue calls cb for each struct field in the struct value v, recursively. +func eachStructValue(v reflect.Value, cb func(reflect.Type, reflect.StructField, reflect.Value)) { + if v.IsZero() { + return + } + for i := range v.NumField() { - name := v.Type().Field(i).Name - f := v.Field(i) - if !f.CanSet() { - continue - } - shouldReset, ok := fieldMap[name] - if !ok { - t.Errorf("fieldMap missing field %q", name) - } - if shouldReset { - f.Set(reflectNonzero(f.Type())) + cb(v.Type(), v.Type().Field(i), v.Field(i)) + + switch v.Type().Field(i).Type.Kind() { + case reflect.Struct: + eachStructValue(v.Field(i), cb) + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + if v.Field(i).Type().Elem().Kind() == reflect.Struct { + eachStructValue(v.Field(i).Addr().Elem(), cb) + } } } } +// TestRedactNetmapPrivateKeys tests that redactNetmapPrivateKeys redacts all private keys +// and other private fields from a netmap.NetworkMap, and only those fields. func TestRedactNetmapPrivateKeys(t *testing.T) { - fieldMap := map[string]bool{ - // Private fields (should be redacted): - "PrivateKey": true, + type field struct { + t reflect.Type + f string + } + f := func(t any, f string) field { + return field{reflect.TypeOf(t), f} + } + // fields is a map of all struct fields in netmap.NetworkMap and its + // sub-structs, marking each field as private (true) or public (false). + // If you add a new field to netmap.NetworkMap or its sub-structs, + // you must add it to this list, marking it as private or public. + fields := map[field]bool{ + // Private fields to be redacted. + f(netmap.NetworkMap{}, "PrivateKey"): true, + + // All other fields are public. + f(netmap.NetworkMap{}, "AllCaps"): false, + f(netmap.NetworkMap{}, "CollectServices"): false, + f(netmap.NetworkMap{}, "DERPMap"): false, + f(netmap.NetworkMap{}, "DNS"): false, + f(netmap.NetworkMap{}, "DisplayMessages"): false, + f(netmap.NetworkMap{}, "Domain"): false, + f(netmap.NetworkMap{}, "DomainAuditLogID"): false, + f(netmap.NetworkMap{}, "Expiry"): false, + f(netmap.NetworkMap{}, "MachineKey"): false, + f(netmap.NetworkMap{}, "Name"): false, + f(netmap.NetworkMap{}, "NodeKey"): false, + f(netmap.NetworkMap{}, "PacketFilter"): false, + f(netmap.NetworkMap{}, "PacketFilterRules"): false, + f(netmap.NetworkMap{}, "Peers"): false, + f(netmap.NetworkMap{}, "SSHPolicy"): false, + f(netmap.NetworkMap{}, "SelfNode"): false, + f(netmap.NetworkMap{}, "TKAEnabled"): false, + f(netmap.NetworkMap{}, "TKAHead"): false, + f(netmap.NetworkMap{}, "UserProfiles"): false, + f(filtertype.CapMatch{}, "Cap"): false, + f(filtertype.CapMatch{}, "Dst"): false, + f(filtertype.CapMatch{}, "Values"): false, + f(filtertype.Match{}, "Caps"): false, + f(filtertype.Match{}, "Dsts"): false, + f(filtertype.Match{}, "IPProto"): false, + f(filtertype.Match{}, "SrcCaps"): false, + f(filtertype.Match{}, "Srcs"): false, + f(filtertype.Match{}, "SrcsContains"): false, + f(filtertype.NetPortRange{}, "Net"): false, + f(filtertype.NetPortRange{}, "Ports"): false, + f(filtertype.PortRange{}, "First"): false, + f(filtertype.PortRange{}, "Last"): false, + f(key.DiscoPublic{}, "k"): false, + f(key.MachinePublic{}, "k"): false, + f(key.NodePrivate{}, "_"): false, + f(key.NodePrivate{}, "k"): false, + f(key.NodePublic{}, "k"): false, + f(tailcfg.CapGrant{}, "CapMap"): false, + f(tailcfg.CapGrant{}, "Caps"): false, + f(tailcfg.CapGrant{}, "Dsts"): false, + f(tailcfg.DERPHomeParams{}, "RegionScore"): false, + f(tailcfg.DERPMap{}, "HomeParams"): false, + f(tailcfg.DERPMap{}, "OmitDefaultRegions"): false, + f(tailcfg.DERPMap{}, "Regions"): false, + f(tailcfg.DNSConfig{}, "CertDomains"): false, + f(tailcfg.DNSConfig{}, "Domains"): false, + f(tailcfg.DNSConfig{}, "ExitNodeFilteredSet"): false, + f(tailcfg.DNSConfig{}, "ExtraRecords"): false, + f(tailcfg.DNSConfig{}, "FallbackResolvers"): false, + f(tailcfg.DNSConfig{}, "Nameservers"): false, + f(tailcfg.DNSConfig{}, "Proxied"): false, + f(tailcfg.DNSConfig{}, "Resolvers"): false, + f(tailcfg.DNSConfig{}, "Routes"): false, + f(tailcfg.DNSConfig{}, "TempCorpIssue13969"): false, + f(tailcfg.DNSRecord{}, "Name"): false, + f(tailcfg.DNSRecord{}, "Type"): false, + f(tailcfg.DNSRecord{}, "Value"): false, + f(tailcfg.DisplayMessageAction{}, "Label"): false, + f(tailcfg.DisplayMessageAction{}, "URL"): false, + f(tailcfg.DisplayMessage{}, "ImpactsConnectivity"): false, + f(tailcfg.DisplayMessage{}, "PrimaryAction"): false, + f(tailcfg.DisplayMessage{}, "Severity"): false, + f(tailcfg.DisplayMessage{}, "Text"): false, + f(tailcfg.DisplayMessage{}, "Title"): false, + f(tailcfg.FilterRule{}, "CapGrant"): false, + f(tailcfg.FilterRule{}, "DstPorts"): false, + f(tailcfg.FilterRule{}, "IPProto"): false, + f(tailcfg.FilterRule{}, "SrcBits"): false, + f(tailcfg.FilterRule{}, "SrcIPs"): false, + f(tailcfg.HostinfoView{}, "ж"): false, + f(tailcfg.Hostinfo{}, "AllowsUpdate"): false, + f(tailcfg.Hostinfo{}, "App"): false, + f(tailcfg.Hostinfo{}, "AppConnector"): false, + f(tailcfg.Hostinfo{}, "BackendLogID"): false, + f(tailcfg.Hostinfo{}, "Cloud"): false, + f(tailcfg.Hostinfo{}, "Container"): false, + f(tailcfg.Hostinfo{}, "Desktop"): false, + f(tailcfg.Hostinfo{}, "DeviceModel"): false, + f(tailcfg.Hostinfo{}, "Distro"): false, + f(tailcfg.Hostinfo{}, "DistroCodeName"): false, + f(tailcfg.Hostinfo{}, "DistroVersion"): false, + f(tailcfg.Hostinfo{}, "Env"): false, + f(tailcfg.Hostinfo{}, "ExitNodeID"): false, + f(tailcfg.Hostinfo{}, "FrontendLogID"): false, + f(tailcfg.Hostinfo{}, "GoArch"): false, + f(tailcfg.Hostinfo{}, "GoArchVar"): false, + f(tailcfg.Hostinfo{}, "GoVersion"): false, + f(tailcfg.Hostinfo{}, "Hostname"): false, + f(tailcfg.Hostinfo{}, "IPNVersion"): false, + f(tailcfg.Hostinfo{}, "IngressEnabled"): false, + f(tailcfg.Hostinfo{}, "Location"): false, + f(tailcfg.Hostinfo{}, "Machine"): false, + f(tailcfg.Hostinfo{}, "NetInfo"): false, + f(tailcfg.Hostinfo{}, "NoLogsNoSupport"): false, + f(tailcfg.Hostinfo{}, "OS"): false, + f(tailcfg.Hostinfo{}, "OSVersion"): false, + f(tailcfg.Hostinfo{}, "Package"): false, + f(tailcfg.Hostinfo{}, "PushDeviceToken"): false, + f(tailcfg.Hostinfo{}, "RequestTags"): false, + f(tailcfg.Hostinfo{}, "RoutableIPs"): false, + f(tailcfg.Hostinfo{}, "SSH_HostKeys"): false, + f(tailcfg.Hostinfo{}, "Services"): false, + f(tailcfg.Hostinfo{}, "ServicesHash"): false, + f(tailcfg.Hostinfo{}, "ShareeNode"): false, + f(tailcfg.Hostinfo{}, "ShieldsUp"): false, + f(tailcfg.Hostinfo{}, "StateEncrypted"): false, + f(tailcfg.Hostinfo{}, "TPM"): false, + f(tailcfg.Hostinfo{}, "Userspace"): false, + f(tailcfg.Hostinfo{}, "UserspaceRouter"): false, + f(tailcfg.Hostinfo{}, "WireIngress"): false, + f(tailcfg.Hostinfo{}, "WoLMACs"): false, + f(tailcfg.Location{}, "City"): false, + f(tailcfg.Location{}, "CityCode"): false, + f(tailcfg.Location{}, "Country"): false, + f(tailcfg.Location{}, "CountryCode"): false, + f(tailcfg.Location{}, "Latitude"): false, + f(tailcfg.Location{}, "Longitude"): false, + f(tailcfg.Location{}, "Priority"): false, + f(tailcfg.NetInfo{}, "DERPLatency"): false, + f(tailcfg.NetInfo{}, "FirewallMode"): false, + f(tailcfg.NetInfo{}, "HairPinning"): false, + f(tailcfg.NetInfo{}, "HavePortMap"): false, + f(tailcfg.NetInfo{}, "LinkType"): false, + f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, + f(tailcfg.NetInfo{}, "OSHasIPv6"): false, + f(tailcfg.NetInfo{}, "PCP"): false, + f(tailcfg.NetInfo{}, "PMP"): false, + f(tailcfg.NetInfo{}, "PreferredDERP"): false, + f(tailcfg.NetInfo{}, "UPnP"): false, + f(tailcfg.NetInfo{}, "WorkingICMPv4"): false, + f(tailcfg.NetInfo{}, "WorkingIPv6"): false, + f(tailcfg.NetInfo{}, "WorkingUDP"): false, + f(tailcfg.NetPortRange{}, "Bits"): false, + f(tailcfg.NetPortRange{}, "IP"): false, + f(tailcfg.NetPortRange{}, "Ports"): false, + f(tailcfg.NetPortRange{}, "_"): false, + f(tailcfg.NodeView{}, "ж"): false, + f(tailcfg.Node{}, "Addresses"): false, + f(tailcfg.Node{}, "AllowedIPs"): false, + f(tailcfg.Node{}, "Cap"): false, + f(tailcfg.Node{}, "CapMap"): false, + f(tailcfg.Node{}, "Capabilities"): false, + f(tailcfg.Node{}, "ComputedName"): false, + f(tailcfg.Node{}, "ComputedNameWithHost"): false, + f(tailcfg.Node{}, "Created"): false, + f(tailcfg.Node{}, "DataPlaneAuditLogID"): false, + f(tailcfg.Node{}, "DiscoKey"): false, + f(tailcfg.Node{}, "Endpoints"): false, + f(tailcfg.Node{}, "ExitNodeDNSResolvers"): false, + f(tailcfg.Node{}, "Expired"): false, + f(tailcfg.Node{}, "HomeDERP"): false, + f(tailcfg.Node{}, "Hostinfo"): false, + f(tailcfg.Node{}, "ID"): false, + f(tailcfg.Node{}, "IsJailed"): false, + f(tailcfg.Node{}, "IsWireGuardOnly"): false, + f(tailcfg.Node{}, "Key"): false, + f(tailcfg.Node{}, "KeyExpiry"): false, + f(tailcfg.Node{}, "KeySignature"): false, + f(tailcfg.Node{}, "LastSeen"): false, + f(tailcfg.Node{}, "LegacyDERPString"): false, + f(tailcfg.Node{}, "Machine"): false, + f(tailcfg.Node{}, "MachineAuthorized"): false, + f(tailcfg.Node{}, "Name"): false, + f(tailcfg.Node{}, "Online"): false, + f(tailcfg.Node{}, "PrimaryRoutes"): false, + f(tailcfg.Node{}, "SelfNodeV4MasqAddrForThisPeer"): false, + f(tailcfg.Node{}, "SelfNodeV6MasqAddrForThisPeer"): false, + f(tailcfg.Node{}, "Sharer"): false, + f(tailcfg.Node{}, "StableID"): false, + f(tailcfg.Node{}, "Tags"): false, + f(tailcfg.Node{}, "UnsignedPeerAPIOnly"): false, + f(tailcfg.Node{}, "User"): false, + f(tailcfg.Node{}, "computedHostIfDifferent"): false, + f(tailcfg.PortRange{}, "First"): false, + f(tailcfg.PortRange{}, "Last"): false, + f(tailcfg.SSHPolicy{}, "Rules"): false, + f(tailcfg.Service{}, "Description"): false, + f(tailcfg.Service{}, "Port"): false, + f(tailcfg.Service{}, "Proto"): false, + f(tailcfg.Service{}, "_"): false, + f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, + f(tailcfg.TPMInfo{}, "Manufacturer"): false, + f(tailcfg.TPMInfo{}, "Model"): false, + f(tailcfg.TPMInfo{}, "SpecRevision"): false, + f(tailcfg.TPMInfo{}, "Vendor"): false, + f(tailcfg.UserProfileView{}, "ж"): false, + f(tailcfg.UserProfile{}, "DisplayName"): false, + f(tailcfg.UserProfile{}, "ID"): false, + f(tailcfg.UserProfile{}, "LoginName"): false, + f(tailcfg.UserProfile{}, "ProfilePicURL"): false, + f(views.Slice[ipproto.Proto]{}, "ж"): false, + f(views.Slice[tailcfg.FilterRule]{}, "ж"): false, + } + + t.Run("field_list_is_complete", func(t *testing.T) { + seen := set.Set[field]{} + eachStructField(reflect.TypeOf(netmap.NetworkMap{}), func(rt reflect.Type, sf reflect.StructField) { + f := field{rt, sf.Name} + seen.Add(f) + if _, ok := fields[f]; !ok { + // Fail the test if netmap has a field not in the list. If you see this test + // failure, please add the new field to the fields map above, marking it as private or public. + t.Errorf("netmap field has not been declared as private or public: %v.%v", rt, sf.Name) + } + }) + + for want := range fields { + if !seen.Contains(want) { + // Fail the test if the list has a field not in netmap. If you see this test + // failure, please remove the field from the fields map above. + t.Errorf("field declared that has not been found in netmap: %v.%v", want.t, want.f) + } + } + }) - // Public fields (should not be redacted): - "AllCaps": false, - "CollectServices": false, - "DERPMap": false, - "DNS": false, - "DisplayMessages": false, - "Domain": false, - "DomainAuditLogID": false, - "Expiry": false, - "MachineKey": false, - "Name": false, - "NodeKey": false, - "PacketFilter": false, - "PacketFilterRules": false, - "Peers": false, - "SSHPolicy": false, - "SelfNode": false, - "TKAEnabled": false, - "TKAHead": false, - "UserProfiles": false, + // tests is a list of test cases, each with a non-redacted netmap and the expected redacted netmap. + // If you add a new private field to netmap.NetworkMap or its sub-structs, please add a test case + // here that has that field set in nm, and the expected redacted value in wantRedacted. + tests := []struct { + name string + nm *netmap.NetworkMap + wantRedacted *netmap.NetworkMap + }{ + { + name: "redact_private_key", + nm: &netmap.NetworkMap{ + PrivateKey: key.NewNode(), + }, + wantRedacted: &netmap.NetworkMap{}, + }, } - nm := &netmap.NetworkMap{} - setFieldsToRedact(t, nm, fieldMap) + // confirmedRedacted is a set of all private fields that have been covered by the tests above. + confirmedRedacted := set.Set[field]{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Record which of the private fields are set in the non-redacted netmap. + eachStructValue(reflect.ValueOf(tt.nm).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { + f := field{tt, sf.Name} + if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { + confirmedRedacted.Add(f) + } + }) + + got, _ := redactNetmapPrivateKeys(tt.nm) + if !reflect.DeepEqual(got, tt.wantRedacted) { + t.Errorf("unexpected redacted netmap: %+v", got) + } + + // Check that all private fields in the redacted netmap are zero. + eachStructValue(reflect.ValueOf(got).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { + f := field{tt, sf.Name} + if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { + t.Errorf("field not redacted: %v.%v", tt, sf.Name) + } + }) + }) + } - got, _ := redactNetmapPrivateKeys(nm) - if !reflect.DeepEqual(got, &netmap.NetworkMap{}) { - t.Errorf("redacted netmap is not empty: %+v", got) + // Check that all private fields in netmap.NetworkMap and its sub-structs + // are covered by the tests above. If you see a test failure here, + // please add a test case above that has that field set in nm. + for f, shouldRedact := range fields { + if shouldRedact { + if !confirmedRedacted.Contains(f) { + t.Errorf("field not covered by tests: %v.%v", f.t, f.f) + } + } } } From b9cda4bca5a0c5562021deae1512de8e3a3c2bc4 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 19 Sep 2025 12:31:44 -0400 Subject: [PATCH 1352/1708] tsnet,internal/client/tailscale: resolve OAuth into authkeys in tsnet (#17191) * tsnet,internal/client/tailscale: resolve OAuth into authkeys in tsnet Updates #8403. * internal/client/tailscale: omit OAuth library via build tag Updates #12614. Signed-off-by: Naman Sood --- cmd/k8s-operator/depaware.txt | 6 +- cmd/tailscale/cli/up.go | 98 ++-------------- cmd/tailscale/depaware.txt | 6 +- cmd/tailscaled/deps_test.go | 13 +++ cmd/tsidp/depaware.txt | 7 ++ .../feature_oauthkey_disabled.go | 13 +++ .../buildfeatures/feature_oauthkey_enabled.go | 13 +++ feature/condregister/oauthkey/doc.go | 10 ++ .../condregister/oauthkey/maybe_oauthkey.go | 8 ++ feature/featuretags/featuretags.go | 1 + feature/oauthkey/oauthkey.go | 108 ++++++++++++++++++ internal/client/tailscale/oauthkeys.go | 20 ++++ tsnet/depaware.txt | 7 ++ tsnet/tsnet.go | 10 ++ 14 files changed, 226 insertions(+), 94 deletions(-) create mode 100644 feature/buildfeatures/feature_oauthkey_disabled.go create mode 100644 feature/buildfeatures/feature_oauthkey_enabled.go create mode 100644 feature/condregister/oauthkey/doc.go create mode 100644 feature/condregister/oauthkey/maybe_oauthkey.go create mode 100644 feature/oauthkey/oauthkey.go create mode 100644 internal/client/tailscale/oauthkeys.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e65977875..e5eccf2c2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,13 +798,15 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator + tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ @@ -1030,7 +1032,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator + golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator+ golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index c78a63569..12c26b21c 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -12,13 +12,11 @@ import ( "fmt" "log" "net/netip" - "net/url" "os" "os/signal" "reflect" "runtime" "sort" - "strconv" "strings" "syscall" "time" @@ -26,7 +24,7 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" - "golang.org/x/oauth2/clientcredentials" + _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" @@ -566,9 +564,13 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } - authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags) - if err != nil { - return err + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + authKey, err = f(ctx, authKey, strings.Split(upArgs.advertiseTags, ",")) + if err != nil { + return err + } } err = localClient.Start(ctx, ipn.Options{ AuthKey: authKey, @@ -1109,90 +1111,6 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { return } -// resolveAuthKey either returns v unchanged (in the common case) or, if it -// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like -// -// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] -// -// and does the OAuth2 dance to get and return an authkey. The "ephemeral" -// property defaults to true if unspecified. The "preauthorized" defaults to -// false. The "baseURL" defaults to https://api.tailscale.com. -// The passed in tags are required, and must be non-empty. These will be -// set on the authkey generated by the OAuth2 dance. -func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { - if !strings.HasPrefix(v, "tskey-client-") { - return v, nil - } - if tags == "" { - return "", errors.New("oauth authkeys require --advertise-tags") - } - - clientSecret, named, _ := strings.Cut(v, "?") - attrs, err := url.ParseQuery(named) - if err != nil { - return "", err - } - for k := range attrs { - switch k { - case "ephemeral", "preauthorized", "baseURL": - default: - return "", fmt.Errorf("unknown attribute %q", k) - } - } - getBool := func(name string, def bool) (bool, error) { - v := attrs.Get(name) - if v == "" { - return def, nil - } - ret, err := strconv.ParseBool(v) - if err != nil { - return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) - } - return ret, nil - } - ephemeral, err := getBool("ephemeral", true) - if err != nil { - return "", err - } - preauth, err := getBool("preauthorized", false) - if err != nil { - return "", err - } - - baseURL := "https://api.tailscale.com" - if v := attrs.Get("baseURL"); v != "" { - baseURL = v - } - - credentials := clientcredentials.Config{ - ClientID: "some-client-id", // ignored - ClientSecret: clientSecret, - TokenURL: baseURL + "/api/v2/oauth/token", - } - - tsClient := tailscale.NewClient("-", nil) - tsClient.UserAgent = "tailscale-cli" - tsClient.HTTPClient = credentials.Client(ctx) - tsClient.BaseURL = baseURL - - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Ephemeral: ephemeral, - Preauthorized: preauth, - Tags: strings.Split(tags, ","), - }, - }, - } - - authkey, _, err := tsClient.CreateKey(ctx, caps) - if err != nil { - return "", err - } - return authkey, nil -} - func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index ae4a7bd4d..e25eece59 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -105,13 +105,15 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ @@ -253,7 +255,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 9e6624d9a..538cdc115 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -122,3 +122,16 @@ func TestOmitACME(t *testing.T) { }, }.Check(t) } + +func TestOmitOAuthKey(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_oauthkey,ts_include_cli", + OnDep: func(dep string) { + if strings.HasPrefix(dep, "golang.org/x/oauth2") { + t.Errorf("unexpected dep with ts_omit_oauthkey: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 7db7849b7..df5476a60 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -217,6 +217,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -239,12 +240,15 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ @@ -457,6 +461,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LD golang.org/x/sys/unix from github.com/google/nftables+ diff --git a/feature/buildfeatures/feature_oauthkey_disabled.go b/feature/buildfeatures/feature_oauthkey_disabled.go new file mode 100644 index 000000000..72ad1723b --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = false diff --git a/feature/buildfeatures/feature_oauthkey_enabled.go b/feature/buildfeatures/feature_oauthkey_enabled.go new file mode 100644 index 000000000..39c52a2b0 --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = true diff --git a/feature/condregister/oauthkey/doc.go b/feature/condregister/oauthkey/doc.go new file mode 100644 index 000000000..4c4ea5e4e --- /dev/null +++ b/feature/condregister/oauthkey/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for OAuth key resolution +// if it's not disabled via the ts_omit_oauthkey build tag. +// Currently (2025-09-19), tailscaled does not need OAuth key +// resolution, only the CLI and tsnet do, so this package is +// pulled out separately to avoid linking OAuth packages into +// tailscaled. +package oauthkey diff --git a/feature/condregister/oauthkey/maybe_oauthkey.go b/feature/condregister/oauthkey/maybe_oauthkey.go new file mode 100644 index 000000000..be8d04b8e --- /dev/null +++ b/feature/condregister/oauthkey/maybe_oauthkey.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_oauthkey + +package oauthkey + +import _ "tailscale.com/feature/oauthkey" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6afb40893..325f46a44 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -105,6 +105,7 @@ var Features = map[FeatureTag]FeatureMeta{ "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, + "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "relayserver": {"RelayServer", "Relay server", nil}, "serve": {"Serve", "Serve and Funnel support", nil}, diff --git a/feature/oauthkey/oauthkey.go b/feature/oauthkey/oauthkey.go new file mode 100644 index 000000000..5834c33be --- /dev/null +++ b/feature/oauthkey/oauthkey.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for using OAuth client secrets to +// automatically request authkeys for logging in. +package oauthkey + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2/clientcredentials" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" +) + +func init() { + feature.Register("oauthkey") + tailscale.HookResolveAuthKey.Set(resolveAuthKey) +} + +// resolveAuthKey either returns v unchanged (in the common case) or, if it +// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like +// +// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] +// +// and does the OAuth2 dance to get and return an authkey. The "ephemeral" +// property defaults to true if unspecified. The "preauthorized" defaults to +// false. The "baseURL" defaults to https://api.tailscale.com. +// The passed in tags are required, and must be non-empty. These will be +// set on the authkey generated by the OAuth2 dance. +func resolveAuthKey(ctx context.Context, v string, tags []string) (string, error) { + if !strings.HasPrefix(v, "tskey-client-") { + return v, nil + } + if len(tags) == 0 { + return "", errors.New("oauth authkeys require --advertise-tags") + } + + clientSecret, named, _ := strings.Cut(v, "?") + attrs, err := url.ParseQuery(named) + if err != nil { + return "", err + } + for k := range attrs { + switch k { + case "ephemeral", "preauthorized", "baseURL": + default: + return "", fmt.Errorf("unknown attribute %q", k) + } + } + getBool := func(name string, def bool) (bool, error) { + v := attrs.Get(name) + if v == "" { + return def, nil + } + ret, err := strconv.ParseBool(v) + if err != nil { + return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) + } + return ret, nil + } + ephemeral, err := getBool("ephemeral", true) + if err != nil { + return "", err + } + preauth, err := getBool("preauthorized", false) + if err != nil { + return "", err + } + + baseURL := "https://api.tailscale.com" + if v := attrs.Get("baseURL"); v != "" { + baseURL = v + } + + credentials := clientcredentials.Config{ + ClientID: "some-client-id", // ignored + ClientSecret: clientSecret, + TokenURL: baseURL + "/api/v2/oauth/token", + } + + tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-cli" + tsClient.HTTPClient = credentials.Client(ctx) + tsClient.BaseURL = baseURL + + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + } + + authkey, _, err := tsClient.CreateKey(ctx, caps) + if err != nil { + return "", err + } + return authkey, nil +} diff --git a/internal/client/tailscale/oauthkeys.go b/internal/client/tailscale/oauthkeys.go new file mode 100644 index 000000000..21102ce0b --- /dev/null +++ b/internal/client/tailscale/oauthkeys.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKey resolves to [oauthkey.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// authKey is a standard device auth key or an OAuth client secret to +// resolve into an auth key. +// tags is the list of tags being advertised by the client (required to be +// provided for the OAuth secret case, and required to be the same as the +// list of tags for which the OAuth secret is allowed to issue auth keys). +var HookResolveAuthKey feature.Hook[func(ctx context.Context, authKey string, tags []string) (string, error)] diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index c115332fa..4fd9b7dba 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -213,6 +213,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -235,12 +236,15 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ @@ -450,6 +454,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LDAI golang.org/x/sys/unix from github.com/google/nftables+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 6b083132f..978819519 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,9 +29,11 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" @@ -680,6 +682,14 @@ func (s *Server) start() (reterr error) { prefs.RunWebClient = s.RunWebClient prefs.AdvertiseTags = s.AdvertiseTags authKey := s.getAuthKey() + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + authKey, err = f(s.shutdownCtx, s.getAuthKey(), prefs.AdvertiseTags) + if err != nil { + return fmt.Errorf("resolving auth key: %w", err) + } + } err = lb.Start(ipn.Options{ UpdatePrefs: prefs, AuthKey: authKey, From ecfdd86fc9956631759277d1ddbd78f0456dc365 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 09:44:50 -0700 Subject: [PATCH 1353/1708] net/ace, control/controlhttp: start adding ACE dialing support Updates tailscale/corp#32227 Change-Id: I38afc668f99eb1d6f7632e82554b82922f3ebb9f Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/debug.go | 22 +++++- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + control/controlhttp/client.go | 78 ++++++++++++++------- net/ace/ace.go | 123 ++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 9 ++- tsnet/depaware.txt | 1 + 9 files changed, 211 insertions(+), 26 deletions(-) create mode 100644 net/ace/ace.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e5eccf2c2..b962f51f2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -842,6 +842,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 39c9748ef..9e8fa0d7f 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -35,6 +35,7 @@ import ( "tailscale.com/hostinfo" "tailscale.com/internal/noiseconn" "tailscale.com/ipn" + "tailscale.com/net/ace" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tshttpproxy" @@ -287,6 +288,7 @@ func debugCmd() *ffcli.Command { fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane") fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") + fs.StringVar(&ts2021Args.aceHost, "ace", "", "if non-empty, use this ACE server IP/hostname as a candidate path") return fs })(), }, @@ -964,6 +966,7 @@ var ts2021Args struct { host string // "controlplane.tailscale.com" version int // 27 or whatever verbose bool + aceHost string // if non-empty, FQDN of https ACE server to use ("ace.example.com") } func runTS2021(ctx context.Context, args []string) error { @@ -972,6 +975,13 @@ func runTS2021(ctx context.Context, args []string) error { keysURL := "https://" + ts2021Args.host + "/key?v=" + strconv.Itoa(ts2021Args.version) + keyTransport := http.DefaultTransport.(*http.Transport).Clone() + if ts2021Args.aceHost != "" { + log.Printf("using ACE server %q", ts2021Args.aceHost) + keyTransport.Proxy = nil + keyTransport.DialContext = (&ace.Dialer{ACEHost: ts2021Args.aceHost}).Dial + } + if ts2021Args.verbose { u, err := url.Parse(keysURL) if err != nil { @@ -997,7 +1007,7 @@ func runTS2021(ctx context.Context, args []string) error { if err != nil { return err } - res, err := http.DefaultClient.Do(req) + res, err := keyTransport.RoundTrip(req) if err != nil { log.Printf("Do: %v", err) return err @@ -1052,6 +1062,16 @@ func runTS2021(ctx context.Context, args []string) error { Logf: logf, NetMon: netMon, } + if ts2021Args.aceHost != "" { + noiseDialer.DialPlan = &tailcfg.ControlDialPlan{ + Candidates: []tailcfg.ControlIPCandidate{ + { + ACEHost: ts2021Args.aceHost, + DialTimeoutSec: 10, + }, + }, + } + } const tries = 2 for i := range tries { err := tryConnect(ctx, keys.PublicKey, noiseDialer) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index e25eece59..27d7864ae 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -120,6 +120,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4482ad125..e4405a689 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -314,6 +314,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index df5476a60..872dc8f81 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -273,6 +273,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 1bb60d672..87061c310 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -20,6 +20,7 @@ package controlhttp import ( + "cmp" "context" "crypto/tls" "encoding/base64" @@ -41,6 +42,7 @@ import ( "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/net/ace" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/netutil" @@ -104,7 +106,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // host we know about. useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN") if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 { - return a.dialHost(ctx, netip.Addr{}) + return a.dialHost(ctx) } candidates := a.DialPlan.Candidates @@ -125,10 +127,9 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // Now, for each candidate, kick off a dial in parallel. type dialResult struct { - conn *ClientConn - err error - addr netip.Addr - priority int + conn *ClientConn + err error + cand tailcfg.ControlIPCandidate } resultsCh := make(chan dialResult, len(candidates)) @@ -143,7 +144,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // Always send results back to our channel. defer func() { - resultsCh <- dialResult{conn, err, c.IP, c.Priority} + resultsCh <- dialResult{conn, err, c} if pending.Add(-1) == 0 { close(resultsCh) } @@ -168,9 +169,13 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second))) defer cancel() + if c.IP.IsValid() { + a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) + } else if c.ACEHost != "" { + a.logf("[v2] controlhttp: trying to dial %q via ACE %q", a.Hostname, c.ACEHost) + } // This will dial, and the defer above sends it back to our parent. - a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) - conn, err = a.dialHost(ctx, c.IP) + conn, err = a.dialHostOpt(ctx, c.IP, c.ACEHost) }(ctx, c) } @@ -183,8 +188,8 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // TODO(andrew): we could make this better by keeping track of // the highest remaining priority dynamically, instead of just // checking for the highest total - if res.priority == highestPriority && res.conn != nil { - a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr) + if res.cand.Priority == highestPriority && res.conn != nil { + a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, cmp.Or(res.cand.ACEHost, res.cand.IP.String())) // Drain the channel and any existing connections in // the background. @@ -232,7 +237,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { sort.Slice(results, func(i, j int) bool { // NOTE: intentionally inverted so that the highest priority // item comes first - return results[i].priority > results[j].priority + return results[i].cand.Priority > results[j].cand.Priority }) var ( @@ -245,7 +250,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { continue } - a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr) + a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(result.cand.ACEHost, result.cand.IP.String())) conn = result.conn results[i].conn = nil // so we don't close it in the defer return conn, nil @@ -259,7 +264,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error()) - return a.dialHost(ctx, netip.Addr{}) + return a.dialHost(ctx) } // The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to @@ -316,10 +321,19 @@ var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL") // dialHost connects to the configured Dialer.Hostname and upgrades the // connection into a controlbase.Conn. +func (a *Dialer) dialHost(ctx context.Context) (*ClientConn, error) { + return a.dialHostOpt(ctx, + netip.Addr{}, // no pre-resolved IP + "", // don't use ACE + ) +} + +// dialHostOpt connects to the configured Dialer.Hostname and upgrades the +// connection into a controlbase.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialHostOpt(ctx context.Context, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { // Create one shared context used by both port 80 and port 443 dials. // If port 80 is still in flight when 443 returns, this deferred cancel // will stop the port 80 dial. @@ -341,7 +355,7 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")), Path: serverUpgradePath, } - if a.HTTPSPort == NoPort { + if a.HTTPSPort == NoPort || optACEHost != "" { u443 = nil } @@ -353,11 +367,11 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, ch := make(chan tryURLRes) // must be unbuffered try := func(u *url.URL) { if debugNoiseDial() { - a.logf("trying noise dial (%v, %v) ...", u, optAddr) + a.logf("trying noise dial (%v, %v) ...", u, cmp.Or(optACEHost, optAddr.String())) } - cbConn, err := a.dialURL(ctx, u, optAddr) + cbConn, err := a.dialURL(ctx, u, optAddr, optACEHost) if debugNoiseDial() { - a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err) + a.logf("noise dial (%v, %v) = (%v, %v)", u, cmp.Or(optACEHost, optAddr.String()), cbConn, err) } select { case ch <- tryURLRes{u, cbConn, err}: @@ -423,12 +437,12 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion) if err != nil { return nil, err } - netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init) + netConn, err := a.tryURLUpgrade(ctx, u, optAddr, optACEHost, init) if err != nil { return nil, err } @@ -480,7 +494,7 @@ var macOSScreenTime = health.Register(&health.Warnable{ // the provided address. // // Only the provided ctx is used, not a.ctx. -func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) { +func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string, init []byte) (_ net.Conn, retErr error) { var dns *dnscache.Resolver // If we were provided an address to dial, then create a resolver that just @@ -502,6 +516,14 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad dialer = stdDialer.DialContext } + if optACEHost != "" { + dialer = (&ace.Dialer{ + ACEHost: optACEHost, + ACEHostIP: optAddr, // may be zero + NetDialer: dialer, + }).Dial + } + // On macOS, see if Screen Time is blocking things. if runtime.GOOS == "darwin" { var proxydIntercepted atomic.Bool // intercepted by macOS webfilterproxyd @@ -528,9 +550,17 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad tr := http.DefaultTransport.(*http.Transport).Clone() defer tr.CloseIdleConnections() - tr.Proxy = a.getProxyFunc() - tshttpproxy.SetTransportGetProxyConnectHeader(tr) - tr.DialContext = dnscache.Dialer(dialer, dns) + if optACEHost != "" { + // If using ACE, we don't want to use any HTTP proxy. + // ACE is already a tunnel+proxy. + // TODO(tailscale/corp#32483): use system proxy too? + tr.Proxy = nil + tr.DialContext = dialer + } else { + tr.Proxy = a.getProxyFunc() + tshttpproxy.SetTransportGetProxyConnectHeader(tr) + tr.DialContext = dnscache.Dialer(dialer, dns) + } // Disable HTTP2, since h2 can't do protocol switching. tr.TLSClientConfig.NextProtos = []string{} tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} diff --git a/net/ace/ace.go b/net/ace/ace.go new file mode 100644 index 000000000..1bb64d64d --- /dev/null +++ b/net/ace/ace.go @@ -0,0 +1,123 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace implements a Dialer that dials via a Tailscale ACE (CONNECT) +// proxy. +// +// TODO: document this more, when it's more done. As of 2025-09-17, it's in +// development. +package ace + +import ( + "bufio" + "cmp" + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/netip" + "sync/atomic" +) + +// Dialer is an HTTP CONNECT proxy dialer to dial the control plane via an ACE +// proxy. +type Dialer struct { + ACEHost string + ACEHostIP netip.Addr // optional; if non-zero, use this IP instead of DNS + ACEPort int // zero means 443 + + NetDialer func(ctx context.Context, network, address string) (net.Conn, error) +} + +func (d *Dialer) netDialer() func(ctx context.Context, network, address string) (net.Conn, error) { + if d.NetDialer != nil { + return d.NetDialer + } + var std net.Dialer + return std.DialContext +} + +func (d *Dialer) acePort() int { return cmp.Or(d.ACEPort, 443) } + +func (d *Dialer) Dial(ctx context.Context, network, address string) (_ net.Conn, err error) { + if network != "tcp" { + return nil, errors.New("only TCP is supported") + } + + var targetHost string + if d.ACEHostIP.IsValid() { + targetHost = d.ACEHostIP.String() + } else { + targetHost = d.ACEHost + } + + cc, err := d.netDialer()(ctx, "tcp", net.JoinHostPort(targetHost, fmt.Sprint(d.acePort()))) + if err != nil { + return nil, err + } + + // Now that we've dialed, we're about to do three potentially blocking + // operations: the TLS handshake, the CONNECT write, and the HTTP response + // read. To make our context work over all that, we use a context.AfterFunc + // to start a goroutine that'll tear down the underlying connection if the + // context expires. + // + // To prevent races, we use an atomic.Bool to guard access to the underlying + // connection being either good or bad. Only one goroutine (the success path + // in this goroutine after the ReadResponse or the AfterFunc's failure + // goroutine) will compare-and-swap it from false to true. + var done atomic.Bool + stop := context.AfterFunc(ctx, func() { + if done.CompareAndSwap(false, true) { + cc.Close() + } + }) + defer func() { + if err != nil { + if ctx.Err() != nil { + // Prefer the context error. The other error is likely a side + // effect of the context expiring and our tearing down of the + // underlying connection, and is thus probably something like + // "use of closed network connection", which isn't useful (and + // actually misleading) for the caller. + err = ctx.Err() + } + stop() + cc.Close() + } + }() + + tc := tls.Client(cc, &tls.Config{ServerName: d.ACEHost}) + if err := tc.Handshake(); err != nil { + return nil, err + } + + // TODO(tailscale/corp#32484): send proxy-auth header + if _, err := fmt.Fprintf(tc, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", address, d.ACEHost); err != nil { + return nil, err + } + + br := bufio.NewReader(tc) + connRes, err := http.ReadResponse(br, &http.Request{Method: "CONNECT"}) + if err != nil { + return nil, fmt.Errorf("reading CONNECT response: %w", err) + } + + // Now that we're done with blocking operations, mark the connection + // as good, to prevent the context's AfterFunc from closing it. + if !stop() || !done.CompareAndSwap(false, true) { + // We lost a race and the context expired. + return nil, ctx.Err() + } + + if connRes.StatusCode != http.StatusOK { + return nil, fmt.Errorf("ACE CONNECT response: %s", connRes.Status) + } + + if br.Buffered() > 0 { + return nil, fmt.Errorf("unexpected %d bytes of buffered data after ACE CONNECT", br.Buffered()) + } + return tc, nil +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 057e1a54b..88cda044f 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2264,7 +2264,14 @@ type ControlDialPlan struct { // connecting to the control server. type ControlIPCandidate struct { // IP is the address to attempt connecting to. - IP netip.Addr + IP netip.Addr `json:",omitzero"` + + // ACEHost, if non-empty, means that the client should connect to the + // control plane using an HTTPS CONNECT request to the provided hostname. If + // the IP field is also set, then the IP is the IP address of the ACEHost + // (and not the control plane) and DNS should not be used. The target (the + // argument to CONNECT) is always the control plane's hostname, not an IP. + ACEHost string `json:",omitempty"` // DialStartSec is the number of seconds after the beginning of the // connection process to wait before trying this candidate. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4fd9b7dba..5f7ca2e32 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -269,6 +269,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ From 5e698a81b688c57a7241f69385a5461b53b5aa7f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 09:44:50 -0700 Subject: [PATCH 1354/1708] cmd/tailscaled: make the outbound HTTP/SOCKS5 proxy modular Updates #12614 Change-Id: Icba6f1c0838dce6ee13aa2dc662fb551813262e4 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 13 +++ cmd/tailscaled/proxy.go | 94 ++++++++++++++++++- cmd/tailscaled/tailscaled.go | 90 ++++-------------- .../feature_netstack_disabled.go | 13 +++ .../buildfeatures/feature_netstack_enabled.go | 13 +++ .../feature_outboundproxy_disabled.go | 13 +++ .../feature_outboundproxy_enabled.go | 13 +++ feature/featuretags/featuretags.go | 36 ++++--- feature/featuretags/featuretags_test.go | 8 +- 9 files changed, 207 insertions(+), 86 deletions(-) create mode 100644 feature/buildfeatures/feature_netstack_disabled.go create mode 100644 feature/buildfeatures/feature_netstack_enabled.go create mode 100644 feature/buildfeatures/feature_outboundproxy_disabled.go create mode 100644 feature/buildfeatures/feature_outboundproxy_enabled.go diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 538cdc115..50e584fe0 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -135,3 +135,16 @@ func TestOmitOAuthKey(t *testing.T) { }, }.Check(t) } + +func TestOmitOutboundProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_outboundproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "socks5") || strings.Contains(dep, "proxymux") { + t.Errorf("unexpected dep with ts_omit_outboundproxy: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index a91c62bfa..790b5e18e 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_outboundproxy // HTTP proxy code @@ -9,13 +9,105 @@ package main import ( "context" + "flag" "io" + "log" "net" "net/http" "net/http/httputil" "strings" + + "tailscale.com/net/proxymux" + "tailscale.com/net/socks5" + "tailscale.com/net/tsdial" + "tailscale.com/net/tshttpproxy" + "tailscale.com/types/logger" ) +func init() { + hookRegisterOutboundProxyFlags.Set(registerOutboundProxyFlags) + hookOutboundProxyListen.Set(outboundProxyListen) +} + +func registerOutboundProxyFlags() { + flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) + flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) +} + +// outboundProxyListen creates listeners for local SOCKS and HTTP proxies, if +// the respective addresses are not empty. args.socksAddr and args.httpProxyAddr +// can be the same, in which case the SOCKS5 Listener will receive connections +// that look like they're speaking SOCKS and httpListener will receive +// everything else. +// +// socksListener and httpListener can be nil, if their respective addrs are +// empty. +// +// The returned func closes over those two (possibly nil) listeners and +// starts the respective servers on the listener when called. +func outboundProxyListen() proxyStartFunc { + socksAddr, httpAddr := args.socksAddr, args.httpProxyAddr + + if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { + ln, err := net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("proxy listener: %v", err) + } + return mkProxyStartFunc(proxymux.SplitSOCKSAndHTTP(ln)) + } + + var socksListener, httpListener net.Listener + var err error + if socksAddr != "" { + socksListener, err = net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("SOCKS5 listener: %v", err) + } + if strings.HasSuffix(socksAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("SOCKS5 listening on %v", socksListener.Addr()) + } + } + if httpAddr != "" { + httpListener, err = net.Listen("tcp", httpAddr) + if err != nil { + log.Fatalf("HTTP proxy listener: %v", err) + } + if strings.HasSuffix(httpAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("HTTP proxy listening on %v", httpListener.Addr()) + } + } + + return mkProxyStartFunc(socksListener, httpListener) +} + +func mkProxyStartFunc(socksListener, httpListener net.Listener) proxyStartFunc { + return func(logf logger.Logf, dialer *tsdial.Dialer) { + var addrs []string + if httpListener != nil { + hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} + go func() { + log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpListener)) + }() + addrs = append(addrs, httpListener.Addr().String()) + } + if socksListener != nil { + ss := &socks5.Server{ + Logf: logger.WithPrefix(logf, "socks5: "), + Dialer: dialer.UserDial, + } + go func() { + log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) + }() + addrs = append(addrs, socksListener.Addr().String()) + } + tshttpproxy.SetSelfProxy(addrs...) + } +} + // httpProxyHandler returns an HTTP proxy http.Handler using the // provided backend dialer. func httpProxyHandler(dialer func(ctx context.Context, netw, addr string) (net.Conn, error)) http.Handler { diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 734c8e8e8..9e099f9cb 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -48,10 +48,7 @@ import ( "tailscale.com/net/dnsfallback" "tailscale.com/net/netmon" "tailscale.com/net/netns" - "tailscale.com/net/proxymux" - "tailscale.com/net/socks5" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/paths" "tailscale.com/safesocket" @@ -176,6 +173,17 @@ func shouldRunCLI() bool { return false } +// Outbound Proxy hooks +var ( + hookRegisterOutboundProxyFlags feature.Hook[func()] + hookOutboundProxyListen feature.Hook[func() proxyStartFunc] +) + +// proxyStartFunc is the type of the function returned by +// outboundProxyListen, to start the servers on the Listeners +// started by hookOutboundProxyListen. +type proxyStartFunc = func(logf logger.Logf, dialer *tsdial.Dialer) + func main() { envknob.PanicIfAnyEnvCheckedInInit() if shouldRunCLI() { @@ -190,8 +198,6 @@ func main() { flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") - flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) - flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) @@ -202,6 +208,9 @@ func main() { flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { + f() + } if runtime.GOOS == "plan9" && os.Getenv("_NETSHELL_CHILD_") != "" { os.Args = []string{"tailscaled", "be-child", "plan9-netshell"} @@ -595,7 +604,10 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID logPol.Logtail.SetNetMon(sys.NetMon.Get()) } - socksListener, httpProxyListener := mustStartProxyListeners(args.socksAddr, args.httpProxyAddr) + var startProxy proxyStartFunc + if listen, ok := hookOutboundProxyListen.GetOk(); ok { + startProxy = listen() + } dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used) sys.Set(dialer) @@ -646,26 +658,8 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID return udpConn, nil } } - if socksListener != nil || httpProxyListener != nil { - var addrs []string - if httpProxyListener != nil { - hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} - go func() { - log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpProxyListener)) - }() - addrs = append(addrs, httpProxyListener.Addr().String()) - } - if socksListener != nil { - ss := &socks5.Server{ - Logf: logger.WithPrefix(logf, "socks5: "), - Dialer: dialer.UserDial, - } - go func() { - log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) - }() - addrs = append(addrs, socksListener.Addr().String()) - } - tshttpproxy.SetSelfProxy(addrs...) + if startProxy != nil { + go startProxy(logf, dialer) } opts := ipnServerOpts() @@ -893,50 +887,6 @@ func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { return ret, nil } -// mustStartProxyListeners creates listeners for local SOCKS and HTTP -// proxies, if the respective addresses are not empty. socksAddr and -// httpAddr can be the same, in which case socksListener will receive -// connections that look like they're speaking SOCKS and httpListener -// will receive everything else. -// -// socksListener and httpListener can be nil, if their respective -// addrs are empty. -func mustStartProxyListeners(socksAddr, httpAddr string) (socksListener, httpListener net.Listener) { - if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { - ln, err := net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("proxy listener: %v", err) - } - return proxymux.SplitSOCKSAndHTTP(ln) - } - - var err error - if socksAddr != "" { - socksListener, err = net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("SOCKS5 listener: %v", err) - } - if strings.HasSuffix(socksAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("SOCKS5 listening on %v", socksListener.Addr()) - } - } - if httpAddr != "" { - httpListener, err = net.Listen("tcp", httpAddr) - if err != nil { - log.Fatalf("HTTP proxy listener: %v", err) - } - if strings.HasSuffix(httpAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("HTTP proxy listening on %v", httpListener.Addr()) - } - } - - return socksListener, httpListener -} - var beChildFunc = beChild func beChild(args []string) error { diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go new file mode 100644 index 000000000..7369645a0 --- /dev/null +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = false diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go new file mode 100644 index 000000000..a7e57098b --- /dev/null +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = true diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go new file mode 100644 index 000000000..a84c24e6d --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = false diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go new file mode 100644 index 000000000..c306bbeb2 --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 325f46a44..ec21122db 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -106,17 +106,31 @@ var Features = map[FeatureTag]FeatureMeta{ "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "relayserver": {"RelayServer", "Relay server", nil}, - "serve": {"Serve", "Serve and Funnel support", nil}, - "ssh": {"SSH", "Tailscale SSH support", nil}, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, - "systray": {"SysTray", "Linux system tray", nil}, - "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, - "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, - "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, - "tpm": {"TPM", "TPM support", nil}, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "outboundproxy": { + Sym: "OutboundProxy", + Desc: "Outbound localhost HTTP/SOCK5 proxy support", + Deps: []FeatureTag{"netstack"}, + }, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "relayserver": {"RelayServer", "Relay server", nil}, + "serve": { + Sym: "Serve", + Desc: "Serve and Funnel support", + Deps: []FeatureTag{"netstack"}, + }, + "ssh": { + Sym: "SSH", + Desc: "Tailscale SSH support", + Deps: []FeatureTag{"netstack"}, + }, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "systray": {"SysTray", "Linux system tray", nil}, + "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, + "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, + "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, + "tpm": {"TPM", "TPM support", nil}, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go index 4a268c90d..b1524ce4f 100644 --- a/feature/featuretags/featuretags_test.go +++ b/feature/featuretags/featuretags_test.go @@ -11,7 +11,7 @@ import ( "tailscale.com/util/set" ) -func TestRequires(t *testing.T) { +func TestKnownDeps(t *testing.T) { for tag, meta := range Features { for _, dep := range meta.Deps { if _, ok := Features[dep]; !ok { @@ -26,7 +26,7 @@ func TestRequires(t *testing.T) { } } -func TestDepSet(t *testing.T) { +func TestRequires(t *testing.T) { var setOf = set.Of[FeatureTag] tests := []struct { in FeatureTag @@ -38,11 +38,11 @@ func TestDepSet(t *testing.T) { }, { in: "serve", - want: setOf("serve"), + want: setOf("serve", "netstack"), }, { in: "webclient", - want: setOf("webclient", "serve"), + want: setOf("webclient", "serve", "netstack"), }, } for _, tt := range tests { From d559a214189d40a9493e2a2df3f46dc1b08928c0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 19 Sep 2025 10:34:55 -0700 Subject: [PATCH 1355/1708] util/eventbus/eventbustest: fix typo of test name And another case of the same typo in a comment elsewhere. Updates #cleanup Change-Id: Iaa9d865a1cf83318d4a30263c691451b5d708c9c Signed-off-by: Brad Fitzpatrick --- cmd/containerboot/egressservices.go | 2 +- util/eventbus/eventbustest/examples_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 64ca0a13a..fe835a69e 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -570,7 +570,7 @@ func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner } // ensureRulesDeleted ensures that the given rules are deleted from the firewall -// configuration. For any rules that do not exist, calling this funcion is a +// configuration. For any rules that do not exist, calling this function is a // no-op. func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { for svc, rules := range rulesPerSvc { diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go index 914e29933..bc06e60a9 100644 --- a/util/eventbus/eventbustest/examples_test.go +++ b/util/eventbus/eventbustest/examples_test.go @@ -157,7 +157,7 @@ func TestExample_Expect_WithMultipleFunctions(t *testing.T) { // OK } -func TestExample_ExpectExactly_WithMultipleFuncions(t *testing.T) { +func TestExample_ExpectExactly_WithMultipleFunctions(t *testing.T) { type eventOfInterest struct { value int } From 009d702adfa0fca9f0319f6767f6a3259e484092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 19 Sep 2025 14:58:37 -0400 Subject: [PATCH 1356/1708] health: remove direct callback and replace with eventbus (#17199) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pulls out the last callback logic and ensures timers are still running. The eventbustest package is updated support the absence of events. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/auto.go | 40 +++++- health/health.go | 120 +++--------------- health/health_test.go | 88 +++++++------ util/eventbus/eventbustest/eventbustest.go | 7 +- .../eventbustest/eventbustest_test.go | 2 +- 5 files changed, 110 insertions(+), 147 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 7bca6c8d8..bbc129c5e 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -23,6 +23,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/types/structs" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" ) @@ -122,7 +123,9 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - unregisterHealthWatch func() + eventClient *eventbus.Client + healthChangeSub *eventbus.Subscriber[health.Change] + subsDoneCh chan struct{} // close-only channel when eventClient has closed mu sync.Mutex // mutex guards the following fields @@ -192,21 +195,42 @@ func NewNoStart(opts Options) (_ *Auto, err error) { updateDone: make(chan struct{}), observer: opts.Observer, shutdownFn: opts.Shutdown, + subsDoneCh: make(chan struct{}), } + + c.eventClient = opts.Bus.Client("controlClient.Auto") + c.healthChangeSub = eventbus.Subscribe[health.Change](c.eventClient) + c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(func(c health.Change) { - if c.WarnableChanged { - direct.ReportWarnableChange(c.Warnable, c.UnhealthyState) - } - }) + go c.consumeEventbusTopics() return c, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [eventbus.Client] is closed. +func (c *Auto) consumeEventbusTopics() { + defer close(c.subsDoneCh) + + for { + select { + case <-c.eventClient.Done(): + return + case change := <-c.healthChangeSub.Events(): + if change.WarnableChanged { + c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) + } + } + } +} + // SetPaused controls whether HTTP activity should be paused. // // The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once. @@ -760,6 +784,9 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } func (c *Auto) Shutdown() { + c.eventClient.Close() + <-c.subsDoneCh + c.mu.Lock() if c.closed { c.mu.Unlock() @@ -783,7 +810,6 @@ func (c *Auto) Shutdown() { shutdownFn() } - c.unregisterHealthWatch() <-c.authDone <-c.mapDone <-c.updateDone diff --git a/health/health.go b/health/health.go index c456b53cb..3d1c46a3d 100644 --- a/health/health.go +++ b/health/health.go @@ -28,7 +28,6 @@ import ( "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/multierr" - "tailscale.com/util/set" "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -65,6 +64,21 @@ var receiveNames = []string{ // Tracker tracks the health of various Tailscale subsystems, // comparing each subsystems' state with each other to make sure // they're consistent based on the user's intended state. +// +// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, +// an event will be emitted with WarnableChanged set to true and the Warnable +// and its UnhealthyState: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: us} +// +// If a Warnable becomes healthy, an event will be emitted with +// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil} +// +// If the health messages from the control-plane change, an event will be +// emitted with ControlHealthChanged set to true. Recipients can fetch the set of +// control-plane health messages by calling [Tracker.CurrentState]: type Tracker struct { // MagicSockReceiveFuncs tracks the state of the three // magicsock receive functions: IPv4, IPv6, and DERP. @@ -91,9 +105,8 @@ type Tracker struct { // sysErr maps subsystems to their current error (or nil if the subsystem is healthy) // Deprecated: using Warnables should be preferred - sysErr map[Subsystem]error - watchers set.HandleSet[func(Change)] // opt func to run if error state changes - timer tstime.TimerController + sysErr map[Subsystem]error + timer tstime.TimerController latestVersion *tailcfg.ClientVersion // or nil checkForUpdates bool @@ -131,10 +144,12 @@ func NewTracker(bus *eventbus.Bus) *Tracker { } cli := bus.Client("health.Tracker") - return &Tracker{ + t := &Tracker{ eventClient: cli, changePub: eventbus.Publish[Change](cli), } + t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) + return t } func (t *Tracker) now() time.Time { @@ -455,33 +470,6 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { }) mak.Set(&t.pendingVisibleTimers, w, tc) } - - // Direct callbacks - // TODO(cmol): Remove once all watchers have been moved to events - for _, cb := range t.watchers { - // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be - // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable - // becomes visible. - if w.IsVisible(ws, t.now) { - cb(change) - continue - } - - // The time remaining until the Warnable will be visible to the user is the TimeToVisible - // minus the time that has already passed since the Warnable became unhealthy. - visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) - var tc tstime.TimerController = t.clock().AfterFunc(visibleIn, func() { - t.mu.Lock() - defer t.mu.Unlock() - // Check if the Warnable is still unhealthy, as it could have become healthy between the time - // the timer was set for and the time it was executed. - if t.warnableVal[w] != nil { - cb(change) - delete(t.pendingVisibleTimers, w) - } - }) - mak.Set(&t.pendingVisibleTimers, w, tc) - } } } @@ -514,10 +502,6 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { Warnable: w, } t.changePub.Publish(change) - for _, cb := range t.watchers { - // TODO(cmol): Remove once all watchers have been moved to events - cb(change) - } } // notifyWatchersControlChangedLocked calls each watcher to signal that control @@ -526,13 +510,7 @@ func (t *Tracker) notifyWatchersControlChangedLocked() { change := Change{ ControlHealthChanged: true, } - if t.changePub != nil { - t.changePub.Publish(change) - } - for _, cb := range t.watchers { - // TODO(cmol): Remove once all watchers have been moved to events - cb(change) - } + t.changePub.Publish(change) } // AppendWarnableDebugFlags appends to base any health items that are currently in failed @@ -577,62 +555,6 @@ type Change struct { UnhealthyState *UnhealthyState } -// RegisterWatcher adds a function that will be called its own goroutine -// whenever the health state of any client [Warnable] or control-plane health -// messages changes. The returned function can be used to unregister the -// callback. -// -// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, -// the callback will be called with WarnableChanged set to true and the Warnable -// and its UnhealthyState: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: us}) -// -// If a Warnable becomes healthy, the callback will be called with -// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil}) -// -// If the health messages from the control-plane change, the callback will be -// called with ControlHealthChanged set to true. Recipients can fetch the set of -// control-plane health messages by calling [Tracker.CurrentState]: -// -// go cb(Change{ControlHealthChanged: true}) -func (t *Tracker) RegisterWatcher(cb func(Change)) (unregister func()) { - return t.registerSyncWatcher(func(c Change) { - go cb(c) - }) -} - -// registerSyncWatcher adds a function that will be called whenever the health -// state changes. The provided callback function will be executed synchronously. -// Call RegisterWatcher to register any callbacks that won't return from -// execution immediately. -func (t *Tracker) registerSyncWatcher(cb func(c Change)) (unregister func()) { - if t.nil() { - return func() {} - } - t.initOnce.Do(t.doOnceInit) - t.mu.Lock() - defer t.mu.Unlock() - if t.watchers == nil { - t.watchers = set.HandleSet[func(Change)]{} - } - handle := t.watchers.Add(cb) - if t.timer == nil { - t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) - } - return func() { - t.mu.Lock() - defer t.mu.Unlock() - delete(t.watchers, handle) - if len(t.watchers) == 0 && t.timer != nil { - t.timer.Stop() - t.timer = nil - } - } -} - // SetRouterHealth sets the state of the wgengine/router.Router. // // Deprecated: Warnables should be preferred over Subsystem errors. diff --git a/health/health_test.go b/health/health_test.go index c55b0e1f3..3ada37755 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -4,6 +4,7 @@ package health import ( + "errors" "fmt" "maps" "reflect" @@ -158,15 +159,6 @@ func TestWatcher(t *testing.T) { name string preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) }{ - { - name: "with-callbacks", - preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { - t.Cleanup(tht.RegisterWatcher(fn)) - if len(tht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) - } - }, - }, { name: "with-eventbus", preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { @@ -254,15 +246,6 @@ func TestSetUnhealthyWithTimeToVisible(t *testing.T) { name string preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) }{ - { - name: "with-callbacks", - preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { - t.Cleanup(tht.RegisterWatcher(fn)) - if len(tht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) - } - }, - }, { name: "with-eventbus", preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { @@ -668,7 +651,7 @@ func TestControlHealthNotifies(t *testing.T) { name string initialState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage newState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage - wantNotify bool + wantEvents []any } tests := []test{ { @@ -679,7 +662,7 @@ func TestControlHealthNotifies(t *testing.T) { newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test": {}, }, - wantNotify: false, + wantEvents: []any{}, }, { name: "on-set", @@ -687,7 +670,9 @@ func TestControlHealthNotifies(t *testing.T) { newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test": {}, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, { name: "details-change", @@ -701,7 +686,9 @@ func TestControlHealthNotifies(t *testing.T) { Title: "Updated title", }, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, { name: "action-changes", @@ -721,42 +708,54 @@ func TestControlHealthNotifies(t *testing.T) { }, }, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ht := NewTracker(eventbustest.NewBus(t)) + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + tw.TimeOut = time.Second + + ht := NewTracker(bus) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() + // Expect events at starup, before doing anything else + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[Change](), // warming-up + eventbustest.Type[Change](), // is-using-unstable-version + eventbustest.Type[Change](), // not-in-map-poll + ); err != nil { + t.Errorf("startup error: %v", err) + } + + // Only set initial state if we need to if len(test.initialState) != 0 { ht.SetControlHealth(test.initialState) + if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { + t.Errorf("initial state error: %v", err) + } } - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - ht.SetControlHealth(test.newState) - if gotNotified != test.wantNotify { - t.Errorf("notified: got %v, want %v", gotNotified, test.wantNotify) + if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { + t.Errorf("event error: %v", err) } }) } } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - ht := NewTracker(eventbustest.NewBus(t)) + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + tw.TimeOut = 100 * time.Millisecond + ht := NewTracker(bus) ht.SetIPNState("NeedsLogin", true) - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "control-health": {}, }) @@ -768,8 +767,19 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { t.Error("got a warning with code 'control-health', want none") } - if gotNotified { - t.Error("watcher got called, want it to not be called") + // An event is emitted when SetIPNState is run above, + // so only fail on the second event. + eventCounter := 0 + expectOne := func(c *Change) error { + eventCounter++ + if eventCounter == 1 { + return nil + } + return errors.New("saw more than 1 event") + } + + if err := eventbustest.Expect(tw, expectOne); err == nil { + t.Error("event got emitted, want it to not be called") } } diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 0916ae522..3f7bf4553 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -120,7 +120,12 @@ func Expect(tw *Watcher, filters ...any) error { // [Expect]. Use [Expect] if other events are allowed. func ExpectExactly(tw *Watcher, filters ...any) error { if len(filters) == 0 { - return errors.New("no event filters were provided") + select { + case event := <-tw.events: + return fmt.Errorf("saw event type %s, expected none", reflect.TypeOf(event)) + case <-time.After(tw.TimeOut): + return nil + } } eventCount := 0 for pos, next := range filters { diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index 7a6b511c7..2d126767d 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -250,7 +250,7 @@ func TestExpectEvents(t *testing.T) { tw := eventbustest.NewWatcher(t, bus) // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + tw.TimeOut = 100 * time.Millisecond client := bus.Client("testClient") defer client.Close() From ca9d79500615082dc46fffc4b1d93ad66fa6b8eb Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 12:34:06 -0700 Subject: [PATCH 1357/1708] util/eventbus: add a Monitor type to manage subscriber goroutines (#17127) A common pattern in event bus usage is to run a goroutine to service a collection of subscribers on a single bus client. To have an orderly shutdown, however, we need a way to wait for such a goroutine to be finished. This commit adds a Monitor type that makes this pattern easier to wire up: rather than having to track all the subscribers and an extra channel, the component need only track the client and the monitor. For example: cli := bus.Client("example") m := cli.Monitor(func(c *eventbus.Client) { s1 := eventbus.Subscribe[T](cli) s2 := eventbus.Subscribe[U](cli) for { select { case <-c.Done(): return case t := <-s1.Events(): processT(t) case u := <-s2.Events(): processU(u) } } }) To shut down the client and wait for the goroutine, the caller can write: m.Close() which closes cli and waits for the goroutine to finish. Or, separately: cli.Close() // do other stuff m.Wait() While the goroutine management is not explicitly tied to subscriptions, it is a common enough pattern that this seems like a useful simplification in use. Updates #15160 Change-Id: I657afda1cfaf03465a9dce1336e9fd518a968bca Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 73 +++++++++++++++++++++++++++++++++++++++ util/eventbus/monitor.go | 42 ++++++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 util/eventbus/monitor.go diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 9fd0e4409..7782634ae 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -221,6 +221,79 @@ func TestClient_Done(t *testing.T) { } } +func TestMonitor(t *testing.T) { + t.Run("ZeroWait", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Wait(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Wait to return") + } + }) + + t.Run("ZeroClose", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Close(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Close to return") + } + }) + + testMon := func(t *testing.T, release func(*eventbus.Client, eventbus.Monitor)) func(t *testing.T) { + t.Helper() + return func(t *testing.T) { + bus := eventbus.New() + cli := bus.Client("test client") + + // The monitored goroutine runs until the client or test subscription ends. + m := cli.Monitor(func(c *eventbus.Client) { + sub := eventbus.Subscribe[string](cli) + select { + case <-c.Done(): + t.Log("client closed") + case <-sub.Done(): + t.Log("subscription closed") + } + }) + + done := make(chan struct{}) + go func() { + defer close(done) + m.Wait() + }() + + // While the goroutine is running, Wait does not complete. + select { + case <-done: + t.Error("monitor is ready before its goroutine is finished") + default: + // OK + } + + release(cli, m) + select { + case <-done: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete") + } + } + } + t.Run("Close", testMon(t, func(_ *eventbus.Client, m eventbus.Monitor) { m.Close() })) + t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) +} + type queueChecker struct { t *testing.T want []any diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go new file mode 100644 index 000000000..18cc2a413 --- /dev/null +++ b/util/eventbus/monitor.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +// A Monitor monitors the execution of a goroutine processing events from a +// [Client], allowing the caller to block until it is complete. The zero value +// of m is valid and its Close and Wait methods return immediately. +type Monitor struct { + // These fields are immutable after initialization + cli *Client + done <-chan struct{} +} + +// Close closes the client associated with m and blocks until the processing +// goroutine is complete. +func (m Monitor) Close() { + if m.cli == nil { + return + } + m.cli.Close() + <-m.done +} + +// Wait blocks until the goroutine monitored by m has finished executing, but +// does not close the associated client. It is safe to call Wait repeatedly, +// and from multiple concurrent goroutines. +func (m Monitor) Wait() { + if m.done == nil { + return + } + <-m.done +} + +// Monitor executes f in a new goroutine attended by a [Monitor]. The caller +// is responsible for waiting for the goroutine to complete, by calling either +// [Monitor.Close] or [Monitor.Wait]. +func (c *Client) Monitor(f func(*Client)) Monitor { + done := make(chan struct{}) + go func() { defer close(done); f(c) }() + return Monitor{cli: c, done: done} +} From 2b6bc11586b65259ed737d3f77e3879647ac9df3 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 13:20:50 -0700 Subject: [PATCH 1358/1708] wgengine: use eventbus.Client.Monitor to simplify subscriber maintenance (#17203) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I40c23b183c2a6a6ea3feec7767c8e5417019fc07 Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 88 +++++++++++++++------------------ wgengine/userspace.go | 39 ++++++--------- 2 files changed, 56 insertions(+), 71 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6eb566076..39a7bb2e6 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -156,7 +156,7 @@ type Conn struct { // struct. Initialized once at construction, then constant. eventBus *eventbus.Bus - eventClient *eventbus.Client + eventSubs eventbus.Monitor logf logger.Logf epFunc func([]tailcfg.Endpoint) derpActiveFunc func() @@ -176,17 +176,10 @@ type Conn struct { connCtxCancel func() // closes connCtx donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call - // These [eventbus.Subscriber] fields are solely accessed by - // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmappertype.Mapping] - filterSub *eventbus.Subscriber[FilterUpdate] - nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] - nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] - syncSub *eventbus.Subscriber[syncPoint] + // A publisher for synchronization points to ensure correct ordering of + // config changes between magicsock and wireguard. syncPub *eventbus.Publisher[syncPoint] allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] - allocRelayEndpointSub *eventbus.Subscriber[UDPRelayAllocResp] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -643,26 +636,34 @@ func newConn(logf logger.Logf) *Conn { // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (c *Conn) consumeEventbusTopics() { - defer close(c.subsDoneCh) - - for { - select { - case <-c.eventClient.Done(): - return - case <-c.pmSub.Events(): - c.onPortMapChanged() - case filterUpdate := <-c.filterSub.Events(): - c.onFilterUpdate(filterUpdate) - case nodeViews := <-c.nodeViewsSub.Events(): - c.onNodeViewsUpdate(nodeViews) - case nodeMuts := <-c.nodeMutsSub.Events(): - c.onNodeMutationsUpdate(nodeMuts) - case syncPoint := <-c.syncSub.Events(): - c.dlogf("magicsock: received sync point after reconfig") - syncPoint.Signal() - case allocResp := <-c.allocRelayEndpointSub.Events(): - c.onUDPRelayAllocResp(allocResp) +func (c *Conn) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { + // Subscribe calls must return before NewConn otherwise published + // events can be missed. + pmSub := eventbus.Subscribe[portmappertype.Mapping](cli) + filterSub := eventbus.Subscribe[FilterUpdate](cli) + nodeViewsSub := eventbus.Subscribe[NodeViewsUpdate](cli) + nodeMutsSub := eventbus.Subscribe[NodeMutationsUpdate](cli) + syncSub := eventbus.Subscribe[syncPoint](cli) + allocRelayEndpointSub := eventbus.Subscribe[UDPRelayAllocResp](cli) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case <-pmSub.Events(): + c.onPortMapChanged() + case filterUpdate := <-filterSub.Events(): + c.onFilterUpdate(filterUpdate) + case nodeViews := <-nodeViewsSub.Events(): + c.onNodeViewsUpdate(nodeViews) + case nodeMuts := <-nodeMutsSub.Events(): + c.onNodeMutationsUpdate(nodeMuts) + case syncPoint := <-syncSub.Events(): + c.dlogf("magicsock: received sync point after reconfig") + syncPoint.Signal() + case allocResp := <-allocRelayEndpointSub.Events(): + c.onUDPRelayAllocResp(allocResp) + } } } } @@ -729,20 +730,12 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity - c.eventClient = c.eventBus.Client("magicsock.Conn") - - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - c.pmSub = eventbus.Subscribe[portmappertype.Mapping](c.eventClient) - c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) - c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) - c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) - c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) - c.syncPub = eventbus.Publish[syncPoint](c.eventClient) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) - c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) - c.subsDoneCh = make(chan struct{}) - go c.consumeEventbusTopics() + // Set up publishers and subscribers. Subscribe calls must return before + // NewConn otherwise published events can be missed. + cli := c.eventBus.Client("magicsock.Conn") + c.syncPub = eventbus.Publish[syncPoint](cli) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](cli) + c.eventSubs = cli.Monitor(c.consumeEventbusTopics(cli)) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) c.donec = c.connCtx.Done() @@ -3313,14 +3306,13 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { - // Close the [eventbus.Client] and wait for Conn.consumeEventbusTopics to - // return. Do this before acquiring c.mu: + // Close the [eventbus.Client] and wait for c.consumeEventbusTopics to + // return before acquiring c.mu: // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can // deadlock with c.Close(). // 2. Conn.consumeEventbusTopics event handlers may not guard against // undesirable post/in-progress Conn.Close() behaviors. - c.eventClient.Close() - <-c.subsDoneCh + c.eventSubs.Close() c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 42c12c008..86136d977 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -93,10 +93,8 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - eventClient *eventbus.Client - changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + eventBus *eventbus.Bus + eventSubs eventbus.Monitor logf logger.Logf wgLogger *wglog.Logger // a wireguard-go logging wrapper @@ -354,11 +352,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) controlKnobs: conf.ControlKnobs, reconfigureVPN: conf.ReconfigureVPN, health: conf.HealthTracker, - subsDoneCh: make(chan struct{}), } - e.eventClient = e.eventBus.Client("userspaceEngine") - e.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](e.eventClient) - closePool.addFunc(e.eventClient.Close) if e.birdClient != nil { // Disable the protocol at start time. @@ -545,8 +539,8 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } - go e.consumeEventbusTopics() - + cli := e.eventBus.Client("userspaceEngine") + e.eventSubs = cli.Monitor(e.consumeEventbusTopics(cli)) e.logf("Engine created.") return e, nil } @@ -556,16 +550,17 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (e *userspaceEngine) consumeEventbusTopics() { - defer close(e.subsDoneCh) - - for { - select { - case <-e.eventClient.Done(): - return - case changeDelta := <-e.changeDeltaSub.Events(): - tshttpproxy.InvalidateCache() - e.linkChange(&changeDelta) +func (e *userspaceEngine) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { + changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](cli) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case changeDelta := <-changeDeltaSub.Events(): + tshttpproxy.InvalidateCache() + e.linkChange(&changeDelta) + } } } } @@ -1228,9 +1223,7 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { - e.eventClient.Close() - <-e.subsDoneCh - + e.eventSubs.Close() e.mu.Lock() if e.closing { e.mu.Unlock() From f9c699812adaa286980aed97811217d884cf37fb Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 14:31:55 -0700 Subject: [PATCH 1359/1708] ipn/ipnlocal: use eventbus.Monitor in expiryManager (#17204) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I0a175e67e867459daaedba0731bf68bd331e5ebc Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/expiry.go | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 9427f0738..849e28610 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -43,9 +43,7 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock - eventClient *eventbus.Client - controlTimeSub *eventbus.Subscriber[controlclient.ControlTime] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + eventSubs eventbus.Monitor } func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { @@ -55,12 +53,8 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { clock: tstime.StdClock{}, } - em.eventClient = bus.Client("ipnlocal.expiryManager") - em.controlTimeSub = eventbus.Subscribe[controlclient.ControlTime](em.eventClient) - - em.subsDoneCh = make(chan struct{}) - go em.consumeEventbusTopics() - + cli := bus.Client("ipnlocal.expiryManager") + em.eventSubs = cli.Monitor(em.consumeEventbusTopics(cli)) return em } @@ -69,15 +63,16 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (em *expiryManager) consumeEventbusTopics() { - defer close(em.subsDoneCh) - - for { - select { - case <-em.eventClient.Done(): - return - case time := <-em.controlTimeSub.Events(): - em.onControlTime(time.Value) +func (em *expiryManager) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { + controlTimeSub := eventbus.Subscribe[controlclient.ControlTime](cli) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case time := <-controlTimeSub.Events(): + em.onControlTime(time.Value) + } } } } @@ -250,10 +245,7 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } -func (em *expiryManager) close() { - em.eventClient.Close() - <-em.subsDoneCh -} +func (em *expiryManager) close() { em.eventSubs.Close() } // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded From 798fddbe5cf21d6f87ee24ce630dfef9420afdb6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 19 Sep 2025 17:15:04 -0700 Subject: [PATCH 1360/1708] feature/linuxdnsfight: move inotify watching of /etc/resolv.conf out to a feature tsnet apps in particular never use the Linux DNS OSManagers, so they don't need DBus, etc. I started to pull that all out into separate features so tsnet doesn't need to bring in DBus, but hit this first. Here you can see that tsnet (and the k8s-operator) no longer pulls in inotify. Updates #17206 Change-Id: I7af0f391f60c5e7dbeed7a080346f83262346591 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 - cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 2 - .../feature_linuxdnsfight_disabled.go | 13 +++ .../feature_linuxdnsfight_enabled.go | 13 +++ feature/condregister/maybe_linuxdnsfight.go | 8 ++ feature/featuretags/featuretags.go | 1 + feature/linuxdnsfight/linuxdnsfight.go | 51 +++++++++ .../linuxdnsfight/linuxdnsfight_test.go | 4 +- net/dns/direct.go | 68 ++++++++++++ net/dns/direct_linux.go | 104 ------------------ net/dns/direct_notlinux.go | 10 -- tsnet/depaware.txt | 2 - 13 files changed, 159 insertions(+), 122 deletions(-) create mode 100644 feature/buildfeatures/feature_linuxdnsfight_disabled.go create mode 100644 feature/buildfeatures/feature_linuxdnsfight_enabled.go create mode 100644 feature/condregister/maybe_linuxdnsfight.go create mode 100644 feature/linuxdnsfight/linuxdnsfight.go rename net/dns/direct_linux_test.go => feature/linuxdnsfight/linuxdnsfight_test.go (96%) delete mode 100644 net/dns/direct_linux.go delete mode 100644 net/dns/direct_notlinux.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b962f51f2..442a96611 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -145,8 +145,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e4405a689..22f80d5d7 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -124,7 +124,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/feature/linuxdnsfight L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 @@ -277,6 +277,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister + L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 872dc8f81..d92a0b41a 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -114,8 +114,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink diff --git a/feature/buildfeatures/feature_linuxdnsfight_disabled.go b/feature/buildfeatures/feature_linuxdnsfight_disabled.go new file mode 100644 index 000000000..2e5b50ea0 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = false diff --git a/feature/buildfeatures/feature_linuxdnsfight_enabled.go b/feature/buildfeatures/feature_linuxdnsfight_enabled.go new file mode 100644 index 000000000..b9419fccb --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = true diff --git a/feature/condregister/maybe_linuxdnsfight.go b/feature/condregister/maybe_linuxdnsfight.go new file mode 100644 index 000000000..0dae62b00 --- /dev/null +++ b/feature/condregister/maybe_linuxdnsfight.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linuxdnsfight + +package condregister + +import _ "tailscale.com/feature/linuxdnsfight" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index ec21122db..269ff1fc1 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -105,6 +105,7 @@ var Features = map[FeatureTag]FeatureMeta{ "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, + "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", diff --git a/feature/linuxdnsfight/linuxdnsfight.go b/feature/linuxdnsfight/linuxdnsfight.go new file mode 100644 index 000000000..02d99a314 --- /dev/null +++ b/feature/linuxdnsfight/linuxdnsfight.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +// Package linuxdnsfight provides Linux support for detecting DNS fights +// (inotify watching of /etc/resolv.conf). +package linuxdnsfight + +import ( + "context" + "fmt" + + "github.com/illarion/gonotify/v3" + "tailscale.com/net/dns" +) + +func init() { + dns.HookWatchFile.Set(watchFile) +} + +// watchFile sets up an inotify watch for a given directory and +// calls the callback function every time a particular file is changed. +// The filename should be located in the provided directory. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + const events = gonotify.IN_ATTRIB | + gonotify.IN_CLOSE_WRITE | + gonotify.IN_CREATE | + gonotify.IN_DELETE | + gonotify.IN_MODIFY | + gonotify.IN_MOVE + + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) + } + + for { + select { + case event := <-watcher.C: + if event.Name == filename { + cb() + } + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/net/dns/direct_linux_test.go b/feature/linuxdnsfight/linuxdnsfight_test.go similarity index 96% rename from net/dns/direct_linux_test.go rename to feature/linuxdnsfight/linuxdnsfight_test.go index e8f917b90..bd3463666 100644 --- a/net/dns/direct_linux_test.go +++ b/feature/linuxdnsfight/linuxdnsfight_test.go @@ -1,7 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package dns +//go:build linux && !android + +package linuxdnsfight import ( "context" diff --git a/net/dns/direct.go b/net/dns/direct.go index f23723d9a..59eb06964 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/tsaddr" @@ -415,6 +416,73 @@ func (m *directManager) GetBaseConfig() (OSConfig, error) { return oscfg, nil } +// HookWatchFile is a hook for watching file changes, for platforms that support it. +// The function is called with a directory and filename to watch, and a callback +// to call when the file changes. It returns an error if the watch could not be set up. +var HookWatchFile feature.Hook[func(ctx context.Context, dir, filename string, cb func()) error] + +func (m *directManager) runFileWatcher() { + watchFile, ok := HookWatchFile.GetOk() + if !ok { + return + } + if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { + // This is all best effort for now, so surface warnings to users. + m.logf("dns: inotify: %s", err) + } +} + +var resolvTrampleWarnable = health.Register(&health.Warnable{ + Code: "resolv-conf-overwritten", + Severity: health.SeverityMedium, + Title: "DNS configuration issue", + Text: health.StaticMessage("System DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), +}) + +// checkForFileTrample checks whether /etc/resolv.conf has been trampled +// by another program on the system. (e.g. a DHCP client) +func (m *directManager) checkForFileTrample() { + m.mu.Lock() + want := m.wantResolvConf + lastWarn := m.lastWarnContents + m.mu.Unlock() + + if want == nil { + return + } + + cur, err := m.fs.ReadFile(resolvConf) + if err != nil { + m.logf("trample: read error: %v", err) + return + } + if bytes.Equal(cur, want) { + m.health.SetHealthy(resolvTrampleWarnable) + if lastWarn != nil { + m.mu.Lock() + m.lastWarnContents = nil + m.mu.Unlock() + m.logf("trample: resolv.conf again matches expected content") + } + return + } + if bytes.Equal(cur, lastWarn) { + // We already logged about this, so not worth doing it again. + return + } + + m.mu.Lock() + m.lastWarnContents = cur + m.mu.Unlock() + + show := cur + if len(show) > 1024 { + show = show[:1024] + } + m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) + m.health.SetUnhealthy(resolvTrampleWarnable, nil) +} + func (m *directManager) Close() error { m.ctxClose() diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go deleted file mode 100644 index 0558f0f51..000000000 --- a/net/dns/direct_linux.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && !android - -package dns - -import ( - "bytes" - "context" - "fmt" - - "github.com/illarion/gonotify/v3" - "tailscale.com/health" -) - -func (m *directManager) runFileWatcher() { - if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { - // This is all best effort for now, so surface warnings to users. - m.logf("dns: inotify: %s", err) - } -} - -// watchFile sets up an inotify watch for a given directory and -// calls the callback function every time a particular file is changed. -// The filename should be located in the provided directory. -func watchFile(ctx context.Context, dir, filename string, cb func()) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - const events = gonotify.IN_ATTRIB | - gonotify.IN_CLOSE_WRITE | - gonotify.IN_CREATE | - gonotify.IN_DELETE | - gonotify.IN_MODIFY | - gonotify.IN_MOVE - - watcher, err := gonotify.NewDirWatcher(ctx, events, dir) - if err != nil { - return fmt.Errorf("NewDirWatcher: %w", err) - } - - for { - select { - case event := <-watcher.C: - if event.Name == filename { - cb() - } - case <-ctx.Done(): - return ctx.Err() - } - } -} - -var resolvTrampleWarnable = health.Register(&health.Warnable{ - Code: "resolv-conf-overwritten", - Severity: health.SeverityMedium, - Title: "Linux DNS configuration issue", - Text: health.StaticMessage("Linux DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), -}) - -// checkForFileTrample checks whether /etc/resolv.conf has been trampled -// by another program on the system. (e.g. a DHCP client) -func (m *directManager) checkForFileTrample() { - m.mu.Lock() - want := m.wantResolvConf - lastWarn := m.lastWarnContents - m.mu.Unlock() - - if want == nil { - return - } - - cur, err := m.fs.ReadFile(resolvConf) - if err != nil { - m.logf("trample: read error: %v", err) - return - } - if bytes.Equal(cur, want) { - m.health.SetHealthy(resolvTrampleWarnable) - if lastWarn != nil { - m.mu.Lock() - m.lastWarnContents = nil - m.mu.Unlock() - m.logf("trample: resolv.conf again matches expected content") - } - return - } - if bytes.Equal(cur, lastWarn) { - // We already logged about this, so not worth doing it again. - return - } - - m.mu.Lock() - m.lastWarnContents = cur - m.mu.Unlock() - - show := cur - if len(show) > 1024 { - show = show[:1024] - } - m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) - m.health.SetUnhealthy(resolvTrampleWarnable, nil) -} diff --git a/net/dns/direct_notlinux.go b/net/dns/direct_notlinux.go deleted file mode 100644 index a73a35e5e..000000000 --- a/net/dns/direct_notlinux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !android && !ios - -package dns - -func (m *directManager) runFileWatcher() { - // Not implemented on other platforms. Maybe it could resort to polling. -} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 5f7ca2e32..de9e69f9c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -114,8 +114,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/google/nftables/xt from github.com/google/nftables/expr+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink From d7ec043306ed128e5c5f540e944371a98474f36c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 20 Sep 2025 15:55:33 -0700 Subject: [PATCH 1361/1708] cmd/tailscale/cli: add ts2021 debug flag to set a dial plan Updates tailscale/corp#32534 Change-Id: Ief4ee0a263ea1edbf652b74d8c335c1e5ee209d7 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 9e8fa0d7f..b3170d000 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -289,6 +289,7 @@ func debugCmd() *ffcli.Command { fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") fs.StringVar(&ts2021Args.aceHost, "ace", "", "if non-empty, use this ACE server IP/hostname as a candidate path") + fs.StringVar(&ts2021Args.dialPlanJSONFile, "dial-plan", "", "if non-empty, use this JSON file to configure the dial plan") return fs })(), }, @@ -967,6 +968,8 @@ var ts2021Args struct { version int // 27 or whatever verbose bool aceHost string // if non-empty, FQDN of https ACE server to use ("ace.example.com") + + dialPlanJSONFile string // if non-empty, path to JSON file [tailcfg.ControlDialPlan] JSON } func runTS2021(ctx context.Context, args []string) error { @@ -1051,6 +1054,18 @@ func runTS2021(ctx context.Context, args []string) error { return fmt.Errorf("creating netmon: %w", err) } + var dialPlan *tailcfg.ControlDialPlan + if ts2021Args.dialPlanJSONFile != "" { + b, err := os.ReadFile(ts2021Args.dialPlanJSONFile) + if err != nil { + return fmt.Errorf("reading dial plan JSON file: %w", err) + } + dialPlan = new(tailcfg.ControlDialPlan) + if err := json.Unmarshal(b, dialPlan); err != nil { + return fmt.Errorf("unmarshaling dial plan JSON file: %w", err) + } + } + noiseDialer := &controlhttp.Dialer{ Hostname: ts2021Args.host, HTTPPort: "80", @@ -1058,6 +1073,7 @@ func runTS2021(ctx context.Context, args []string) error { MachineKey: machinePrivate, ControlKey: keys.PublicKey, ProtocolVersion: uint16(ts2021Args.version), + DialPlan: dialPlan, Dialer: dialFunc, Logf: logf, NetMon: netMon, From 1b6bc37f2859007dc4ed949b14f1f8531990b3cf Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 20 Sep 2025 16:14:44 -0700 Subject: [PATCH 1362/1708] net/dnscache: fix case where Resolver could return zero IP with single IPv6 address The controlhttp dialer with a ControlDialPlan IPv6 entry was hitting a case where the dnscache Resolver was returning an netip.Addr zero value, where it should've been returning the IPv6 address. We then tried to dial "invalid IP:80", which would immediately fail, at least locally. Mostly this was causing spammy logs when debugging other stuff. Updates tailscale/corp#32534 Change-Id: If8b9a20f10c1a6aa8a662c324151d987fe9bd2f8 Signed-off-by: Brad Fitzpatrick --- net/dnscache/dnscache.go | 3 ++ net/dnscache/dnscache_test.go | 58 +++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index d60e92f0b..94d4bbee7 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -205,6 +205,9 @@ func (r *Resolver) LookupIP(ctx context.Context, host string) (ip, v6 netip.Addr } allIPs = append(allIPs, naIP) } + if !ip.IsValid() && v6.IsValid() { + ip = v6 + } r.dlogf("returning %d static results", len(allIPs)) return } diff --git a/net/dnscache/dnscache_test.go b/net/dnscache/dnscache_test.go index ef4249b74..58bb6cd7f 100644 --- a/net/dnscache/dnscache_test.go +++ b/net/dnscache/dnscache_test.go @@ -11,6 +11,7 @@ import ( "net" "net/netip" "reflect" + "slices" "testing" "time" @@ -240,3 +241,60 @@ func TestShouldTryBootstrap(t *testing.T) { }) } } + +func TestSingleHostStaticResult(t *testing.T) { + v4 := netip.MustParseAddr("0.0.0.1") + v6 := netip.MustParseAddr("2001::a") + + tests := []struct { + name string + static []netip.Addr + wantIP netip.Addr + wantIP6 netip.Addr + wantAll []netip.Addr + }{ + { + name: "just-v6", + static: []netip.Addr{v6}, + wantIP: v6, + wantIP6: v6, + wantAll: []netip.Addr{v6}, + }, + { + name: "just-v4", + static: []netip.Addr{v4}, + wantIP: v4, + wantIP6: netip.Addr{}, + wantAll: []netip.Addr{v4}, + }, + { + name: "v6-then-v4", + static: []netip.Addr{v6, v4}, + wantIP: v4, + wantIP6: v6, + wantAll: []netip.Addr{v6, v4}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Resolver{ + SingleHost: "example.com", + SingleHostStaticResult: tt.static, + } + ip, ip6, all, err := r.LookupIP(context.Background(), "example.com") + if err != nil { + t.Fatal(err) + } + if ip != tt.wantIP { + t.Errorf("got ip %v; want %v", ip, tt.wantIP) + } + if ip6 != tt.wantIP6 { + t.Errorf("got ip6 %v; want %v", ip6, tt.wantIP6) + } + if !slices.Equal(all, tt.wantAll) { + t.Errorf("got all %v; want %v", all, tt.wantAll) + } + }) + } +} From db048e905d6636006d06c93da06fad3ff075e97b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 20 Sep 2025 16:48:18 -0700 Subject: [PATCH 1363/1708] control/controlhttp: simplify, fix race dialing, remove priority concept controlhttp has the responsibility of dialing a set of candidate control endpoints in a way that minimizes user facing latency. If one control endpoint is unavailable we promptly dial another, racing across the dimensions of: IPv6, IPv4, port 80, and port 443, over multiple server endpoints. In the case that the top priority endpoint was not available, the prior implementation would hang waiting for other results, so as to try to return the highest priority successful connection to the rest of the client code. This hang would take too long with a large dialplan and sufficient client to endpoint latency as to cause the server to timeout the connection due to inactivity in the intermediate state. Instead of trying to prioritize non-ideal candidate connections, the first successful connection is now used unconditionally, improving user facing latency and avoiding any delays that would encroach on the server-side timeout. The tests are converted to memnet and synctest, running on all platforms. Fixes #8442 Fixes tailscale/corp#32534 Co-authored-by: James Tucker Change-Id: I4eb57f046d8b40403220e40eb67a31c41adb3a38 Signed-off-by: Brad Fitzpatrick Signed-off-by: James Tucker --- cmd/tailscale/depaware.txt | 2 +- control/controlhttp/client.go | 180 +++---------- control/controlhttp/constants.go | 1 - control/controlhttp/http_test.go | 447 ++++++++++++++++++------------- 4 files changed, 306 insertions(+), 324 deletions(-) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 27d7864ae..b9b7db525 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -186,7 +186,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/multierr from tailscale.com/control/controlhttp+ + tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 87061c310..da9590c48 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -27,14 +27,12 @@ import ( "errors" "fmt" "io" - "math" "net" "net/http" "net/http/httptrace" "net/netip" "net/url" "runtime" - "sort" "sync/atomic" "time" @@ -53,7 +51,6 @@ import ( "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) var stdDialer net.Dialer @@ -110,18 +107,8 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { } candidates := a.DialPlan.Candidates - // Otherwise, we try dialing per the plan. Store the highest priority - // in the list, so that if we get a connection to one of those - // candidates we can return quickly. - var highestPriority int = math.MinInt - for _, c := range candidates { - if c.Priority > highestPriority { - highestPriority = c.Priority - } - } - - // This context allows us to cancel in-flight connections if we get a - // highest-priority connection before we're all done. + // Create a context to be canceled as we return, so once we get a good connection, + // we can drop all the other ones. ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -129,142 +116,58 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { type dialResult struct { conn *ClientConn err error - cand tailcfg.ControlIPCandidate - } - resultsCh := make(chan dialResult, len(candidates)) - - var pending atomic.Int32 - pending.Store(int32(len(candidates))) - for _, c := range candidates { - go func(ctx context.Context, c tailcfg.ControlIPCandidate) { - var ( - conn *ClientConn - err error - ) - - // Always send results back to our channel. - defer func() { - resultsCh <- dialResult{conn, err, c} - if pending.Add(-1) == 0 { - close(resultsCh) - } - }() - - // If non-zero, wait the configured start timeout - // before we do anything. - if c.DialStartDelaySec > 0 { - a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP) - tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second))) - defer tmr.Stop() - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-tmrChannel: - } - } + } + resultsCh := make(chan dialResult) // unbuffered, never closed - // Now, create a sub-context with the given timeout and - // try dialing the provided host. - ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second))) - defer cancel() + dialCand := func(cand tailcfg.ControlIPCandidate) (*ClientConn, error) { + if cand.ACEHost != "" { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q via ACE %s (%s)", cand.DialStartDelaySec, a.Hostname, cand.ACEHost, cmp.Or(cand.IP.String(), "dns")) + } else { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q @ %s", cand.DialStartDelaySec, a.Hostname, cand.IP.String()) + } - if c.IP.IsValid() { - a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) - } else if c.ACEHost != "" { - a.logf("[v2] controlhttp: trying to dial %q via ACE %q", a.Hostname, c.ACEHost) - } - // This will dial, and the defer above sends it back to our parent. - conn, err = a.dialHostOpt(ctx, c.IP, c.ACEHost) - }(ctx, c) + ctx, cancel := context.WithTimeout(ctx, time.Duration(cand.DialTimeoutSec*float64(time.Second))) + defer cancel() + return a.dialHostOpt(ctx, cand.IP, cand.ACEHost) } - var results []dialResult - for res := range resultsCh { - // If we get a response that has the highest priority, we don't - // need to wait for any of the other connections to finish; we - // can just return this connection. - // - // TODO(andrew): we could make this better by keeping track of - // the highest remaining priority dynamically, instead of just - // checking for the highest total - if res.cand.Priority == highestPriority && res.conn != nil { - a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, cmp.Or(res.cand.ACEHost, res.cand.IP.String())) - - // Drain the channel and any existing connections in - // the background. + for _, cand := range candidates { + timer := time.AfterFunc(time.Duration(cand.DialStartDelaySec*float64(time.Second)), func() { go func() { - for _, res := range results { - if res.conn != nil { - res.conn.Close() + conn, err := dialCand(cand) + select { + case resultsCh <- dialResult{conn, err}: + if err == nil { + a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(cand.ACEHost, cand.IP.String())) } - } - for res := range resultsCh { - if res.conn != nil { - res.conn.Close() + case <-ctx.Done(): + if conn != nil { + conn.Close() } } - if a.drainFinished != nil { - close(a.drainFinished) - } }() - return res.conn, nil - } - - // This isn't a highest-priority result, so just store it until - // we're done. - results = append(results, res) + }) + defer timer.Stop() } - // After we finish this function, close any remaining open connections. - defer func() { - for _, result := range results { - // Note: below, we nil out the returned connection (if - // any) in the slice so we don't close it. - if result.conn != nil { - result.conn.Close() + var errs []error + for { + select { + case res := <-resultsCh: + if res.err == nil { + return res.conn, nil } + errs = append(errs, res.err) + if len(errs) == len(candidates) { + // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. + a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", errors.Join(errs...)) + return a.dialHost(ctx) + } + case <-ctx.Done(): + a.logf("controlhttp: context aborted dialing") + return nil, ctx.Err() } - - // We don't drain asynchronously after this point, so notify our - // channel when we return. - if a.drainFinished != nil { - close(a.drainFinished) - } - }() - - // Sort by priority, then take the first non-error response. - sort.Slice(results, func(i, j int) bool { - // NOTE: intentionally inverted so that the highest priority - // item comes first - return results[i].cand.Priority > results[j].cand.Priority - }) - - var ( - conn *ClientConn - errs []error - ) - for i, result := range results { - if result.err != nil { - errs = append(errs, result.err) - continue - } - - a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(result.cand.ACEHost, result.cand.IP.String())) - conn = result.conn - results[i].conn = nil // so we don't close it in the defer - return conn, nil } - if ctx.Err() != nil { - a.logf("controlhttp: context aborted dialing") - return nil, ctx.Err() - } - - merr := multierr.New(errs...) - - // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. - a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error()) - return a.dialHost(ctx) } // The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to @@ -402,6 +305,9 @@ func (a *Dialer) dialHostOpt(ctx context.Context, optAddr netip.Addr, optACEHost } var err80, err443 error + if forceTLS { + err80 = errors.New("TLS forced: no port 80 dialed") + } for { select { case <-ctx.Done(): diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 12038fae4..58fed1b76 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -98,7 +98,6 @@ type Dialer struct { logPort80Failure atomic.Bool // For tests only - drainFinished chan struct{} omitCertErrorLogging bool testFallbackDelay time.Duration diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 0b4e117f9..6485761ac 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -15,19 +15,20 @@ import ( "net/http/httputil" "net/netip" "net/url" - "runtime" "slices" "strconv" + "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/health" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" - "tailscale.com/net/netx" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -36,6 +37,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/must" ) type httpTestParam struct { @@ -532,6 +534,28 @@ EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== } } +// slowListener wraps a memnet listener to delay accept operations +type slowListener struct { + net.Listener + delay time.Duration +} + +func (sl *slowListener) Accept() (net.Conn, error) { + // Add delay before accepting connections + timer := time.NewTimer(sl.delay) + defer timer.Stop() + <-timer.C + + return sl.Listener.Accept() +} + +func newSlowListener(inner net.Listener, delay time.Duration) net.Listener { + return &slowListener{ + Listener: inner, + delay: delay, + } +} + func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue) @@ -545,33 +569,102 @@ func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { } func TestDialPlan(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip("only works on Linux due to multiple localhost addresses") + testCases := []struct { + name string + plan *tailcfg.ControlDialPlan + want []netip.Addr + allowFallback bool + maxDuration time.Duration + }{ + { + name: "single", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "broken-then-good", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10, DialStartDelaySec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "multiple-candidates-with-broken", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + // Multiple good IPs plus a broken one + // Should succeed with any of the good ones + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.4"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.4"), netip.MustParseAddr("10.0.0.3")}, + }, + { + name: "multiple-candidates-race", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.3"), netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "fallback", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + allowFallback: true, + }, + { + // In tailscale/corp#32534 we discovered that a prior implementation + // of the dial race was waiting for all dials to complete when the + // top priority dial was failing. This delay was long enough that in + // real scenarios the server will close the connection due to + // inactivity, because the client does not send the first inside of + // noise request soon enough. This test is a regression guard + // against that behavior - proving that the dial returns promptly + // even if there is some cause of a slow race. + name: "slow-endpoint-doesnt-block", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.12"), Priority: 5, DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), Priority: 1, DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + maxDuration: 2 * time.Second, // Must complete quickly, not wait for slow endpoint + }, } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + runDialPlanTest(t, tt.plan, tt.want, tt.allowFallback, tt.maxDuration) + }) + }) + } +} + +func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.Addr, allowFallback bool, maxDuration time.Duration) { client, server := key.NewMachine(), key.NewMachine() const ( testProtocolVersion = 1 + httpPort = "80" + httpsPort = "443" ) - getRandomPort := func() string { - ln, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("net.Listen: %v", err) - } - defer ln.Close() - _, port, err := net.SplitHostPort(ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - return port - } + memNetwork := &memnet.Network{} - // We need consistent ports for each address; these are chosen - // randomly and we hope that they won't conflict during this test. - httpPort := getRandomPort() - httpsPort := getRandomPort() + fallbackAddr := netip.MustParseAddr("10.0.0.1") + goodAddr := netip.MustParseAddr("10.0.0.2") + otherAddr := netip.MustParseAddr("10.0.0.3") + other2Addr := netip.MustParseAddr("10.0.0.4") + brokenAddr := netip.MustParseAddr("10.0.0.10") + slowAddr := netip.MustParseAddr("10.0.0.12") makeHandler := func(t *testing.T, name string, host netip.Addr, wrap func(http.Handler) http.Handler) { done := make(chan struct{}) @@ -592,17 +685,66 @@ func TestDialPlan(t *testing.T) { handler = wrap(handler) } - httpLn, err := net.Listen("tcp", host.String()+":"+httpPort) + httpLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpPort)) + httpsLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpsPort)) + + httpServer := &http.Server{Handler: handler} + go httpServer.Serve(httpLn) + t.Cleanup(func() { + httpServer.Close() + }) + + httpsServer := &http.Server{ + Handler: handler, + TLSConfig: tlsConfig(t), + ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), + } + go httpsServer.ServeTLS(httpsLn, "", "") + t.Cleanup(func() { + httpsServer.Close() + }) + } + + // Use synctest's controlled time + clock := tstime.StdClock{} + makeHandler(t, "fallback", fallbackAddr, nil) + makeHandler(t, "good", goodAddr, nil) + makeHandler(t, "other", otherAddr, nil) + makeHandler(t, "other2", other2Addr, nil) + makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { + return brokenMITMHandler(clock) + }) + // Create slow listener that delays accept by 5 seconds + makeSlowHandler := func(t *testing.T, name string, host netip.Addr, delay time.Duration) { + done := make(chan struct{}) + t.Cleanup(func() { + close(done) + }) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, nil) + if err != nil { + log.Print(err) + } else { + defer conn.Close() + } + w.Header().Set("X-Handler-Name", name) + <-done + }) + + httpLn, err := memNetwork.Listen("tcp", host.String()+":"+httpPort) if err != nil { t.Fatalf("HTTP listen: %v", err) } - httpsLn, err := net.Listen("tcp", host.String()+":"+httpsPort) + httpsLn, err := memNetwork.Listen("tcp", host.String()+":"+httpsPort) if err != nil { t.Fatalf("HTTPS listen: %v", err) } + slowHttpLn := newSlowListener(httpLn, delay) + slowHttpsLn := newSlowListener(httpsLn, delay) + httpServer := &http.Server{Handler: handler} - go httpServer.Serve(httpLn) + go httpServer.Serve(slowHttpLn) t.Cleanup(func() { httpServer.Close() }) @@ -612,213 +754,148 @@ func TestDialPlan(t *testing.T) { TLSConfig: tlsConfig(t), ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), } - go httpsServer.ServeTLS(httpsLn, "", "") + go httpsServer.ServeTLS(slowHttpsLn, "", "") t.Cleanup(func() { httpsServer.Close() }) - return } + makeSlowHandler(t, "slow", slowAddr, 5*time.Second) - fallbackAddr := netip.MustParseAddr("127.0.0.1") - goodAddr := netip.MustParseAddr("127.0.0.2") - otherAddr := netip.MustParseAddr("127.0.0.3") - other2Addr := netip.MustParseAddr("127.0.0.4") - brokenAddr := netip.MustParseAddr("127.0.0.10") + // memnetDialer with connection tracking, so we can catch connection leaks. + dialer := &memnetDialer{ + inner: memNetwork.Dial, + t: t, + } + defer dialer.waitForAllClosedSynctest() - testCases := []struct { - name string - plan *tailcfg.ControlDialPlan - wrap func(http.Handler) http.Handler - want netip.Addr + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() - allowFallback bool - }{ - { - name: "single", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: goodAddr, - }, - { - name: "broken-then-good", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Dials the broken one, which fails, and then - // eventually dials the good one and succeeds - {IP: brokenAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10, DialStartDelaySec: 1}, - }}, - want: goodAddr, - }, - // TODO(#8442): fix this test - // { - // name: "multiple-priority-fast-path", - // plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // // Dials some good IPs and our bad one (which - // // hangs forever), which then hits the fast - // // path where we bail without waiting. - // {IP: brokenAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: other2Addr, Priority: 1, DialTimeoutSec: 10}, - // {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - // }}, - // want: otherAddr, - // }, - { - name: "multiple-priority-slow-path", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Our broken address is the highest priority, - // so we don't hit our fast path. - {IP: brokenAddr, Priority: 10, DialTimeoutSec: 10}, - {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: otherAddr, - }, - { - name: "fallback", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: brokenAddr, Priority: 1, DialTimeoutSec: 1}, - }}, - want: fallbackAddr, - allowFallback: true, - }, + host := "example.com" + if allowFallback { + host = fallbackAddr.String() } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - // TODO(awly): replace this with tstest.NewClock and update the - // test to advance the clock correctly. - clock := tstime.StdClock{} - makeHandler(t, "fallback", fallbackAddr, nil) - makeHandler(t, "good", goodAddr, nil) - makeHandler(t, "other", otherAddr, nil) - makeHandler(t, "other2", other2Addr, nil) - makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { - return brokenMITMHandler(clock) - }) - dialer := closeTrackDialer{ - t: t, - inner: tsdial.NewDialer(netmon.NewStatic()).SystemDial, - conns: make(map[*closeTrackConn]bool), - } - defer dialer.Done() + a := &Dialer{ + Hostname: host, + HTTPPort: httpPort, + HTTPSPort: httpsPort, + MachineKey: client, + ControlKey: server.Public(), + ProtocolVersion: testProtocolVersion, + Dialer: dialer.Dial, + Logf: t.Logf, + DialPlan: plan, + proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, + omitCertErrorLogging: true, + testFallbackDelay: 50 * time.Millisecond, + Clock: clock, + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), + } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + start := time.Now() + conn, err := a.dial(ctx) + duration := time.Since(start) - // By default, we intentionally point to something that - // we know won't connect, since we want a fallback to - // DNS to be an error. - host := "example.com" - if tt.allowFallback { - host = "localhost" - } + if err != nil { + t.Fatalf("dialing controlhttp: %v", err) + } + defer conn.Close() - drained := make(chan struct{}) - a := &Dialer{ - Hostname: host, - HTTPPort: httpPort, - HTTPSPort: httpsPort, - MachineKey: client, - ControlKey: server.Public(), - ProtocolVersion: testProtocolVersion, - Dialer: dialer.Dial, - Logf: t.Logf, - DialPlan: tt.plan, - proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, - drainFinished: drained, - omitCertErrorLogging: true, - testFallbackDelay: 50 * time.Millisecond, - Clock: clock, - HealthTracker: health.NewTracker(eventbustest.NewBus(t)), - } + if maxDuration > 0 && duration > maxDuration { + t.Errorf("dial took %v, expected < %v (should not wait for slow endpoints)", duration, maxDuration) + } - conn, err := a.dial(ctx) - if err != nil { - t.Fatalf("dialing controlhttp: %v", err) - } - defer conn.Close() + raddr := conn.RemoteAddr() + raddrStr := raddr.String() - raddr := conn.RemoteAddr().(*net.TCPAddr) + // split on "|" first to remove memnet pipe suffix + addrPart := raddrStr + if idx := strings.Index(raddrStr, "|"); idx >= 0 { + addrPart = raddrStr[:idx] + } - got, ok := netip.AddrFromSlice(raddr.IP) - if !ok { - t.Errorf("invalid remote IP: %v", raddr.IP) - } else if got != tt.want { - t.Errorf("got connection from %q; want %q", got, tt.want) - } else { - t.Logf("successfully connected to %q", raddr.String()) - } + host, _, err2 := net.SplitHostPort(addrPart) + if err2 != nil { + t.Fatalf("failed to parse remote address %q: %v", addrPart, err2) + } - // Wait until our dialer drains so we can verify that - // all connections are closed. - <-drained - }) + got, err3 := netip.ParseAddr(host) + if err3 != nil { + t.Errorf("invalid remote IP: %v", host) + } else { + found := slices.Contains(want, got) + if !found { + t.Errorf("got connection from %q; want one of %v", got, want) + } else { + t.Logf("successfully connected to %q", raddr.String()) + } } } -type closeTrackDialer struct { - t testing.TB - inner netx.DialFunc +// memnetDialer wraps memnet.Network.Dial to track connections for testing +type memnetDialer struct { + inner func(ctx context.Context, network, addr string) (net.Conn, error) + t *testing.T mu sync.Mutex - conns map[*closeTrackConn]bool + conns map[net.Conn]string // conn -> remote address for debugging } -func (d *closeTrackDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { - c, err := d.inner(ctx, network, addr) +func (d *memnetDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := d.inner(ctx, network, addr) if err != nil { return nil, err } - ct := &closeTrackConn{Conn: c, d: d} d.mu.Lock() - d.conns[ct] = true + if d.conns == nil { + d.conns = make(map[net.Conn]string) + } + d.conns[conn] = conn.RemoteAddr().String() + d.t.Logf("tracked connection opened to %s", conn.RemoteAddr()) d.mu.Unlock() - return ct, nil + + return &memnetTrackedConn{Conn: conn, dialer: d}, nil } -func (d *closeTrackDialer) Done() { - // Unfortunately, tsdial.Dialer.SystemDial closes connections - // asynchronously in a goroutine, so we can't assume that everything is - // closed by the time we get here. - // - // Sleep/wait a few times on the assumption that things will close - // "eventually". - const iters = 100 - for i := range iters { +func (d *memnetDialer) waitForAllClosedSynctest() { + const maxWait = 15 * time.Second + const checkInterval = 100 * time.Millisecond + + for range int(maxWait / checkInterval) { d.mu.Lock() - if len(d.conns) == 0 { + remaining := len(d.conns) + if remaining == 0 { d.mu.Unlock() return } + d.mu.Unlock() - // Only error on last iteration - if i != iters-1 { - d.mu.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } + time.Sleep(checkInterval) + } - for conn := range d.conns { - d.t.Errorf("expected close of conn %p; RemoteAddr=%q", conn, conn.RemoteAddr().String()) - } - d.mu.Unlock() + d.mu.Lock() + defer d.mu.Unlock() + for _, addr := range d.conns { + d.t.Errorf("connection to %s was not closed after %v", addr, maxWait) } } -func (d *closeTrackDialer) noteClose(c *closeTrackConn) { +func (d *memnetDialer) noteClose(conn net.Conn) { d.mu.Lock() - delete(d.conns, c) // safe if already deleted + if addr, exists := d.conns[conn]; exists { + d.t.Logf("tracked connection closed to %s", addr) + delete(d.conns, conn) + } d.mu.Unlock() } -type closeTrackConn struct { +type memnetTrackedConn struct { net.Conn - d *closeTrackDialer + dialer *memnetDialer } -func (c *closeTrackConn) Close() error { - c.d.noteClose(c) +func (c *memnetTrackedConn) Close() error { + c.dialer.noteClose(c.Conn) return c.Conn.Close() } From 8ec07b5f7fc31e5d86aa9db4f0c7fe5498d3f9fa Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 21 Sep 2025 08:08:41 -0700 Subject: [PATCH 1364/1708] ipn/ipnauth: don't crash on OpenBSD trying to log username of unknown peer We never implemented the peercred package on OpenBSD (and I just tried again and failed), but we've always documented that the creds pointer can be nil for operating systems where we can't map the unix socket back to its UID. On those platforms, we set the default unix socket permissions such that only the admin can open it anyway and we don't have a read-only vs read-write distinction. OpenBSD was always in that camp, where any access to Tailscale's unix socket meant full access. But during some refactoring, we broke OpenBSD in that we started assuming during one logging path (during login) that Creds was non-nil when looking up an ipnauth.Actor's username, which wasn't relevant (it was called from a function "maybeUsernameOf" anyway, which threw away errors). Verified on an OpenBSD VM. We don't have any OpenBSD integration tests yet. Fixes #17209 Updates #17221 Change-Id: I473c5903dfaa645694bcc75e7f5d484f3dd6044d Signed-off-by: Brad Fitzpatrick --- ipn/ipnauth/ipnauth.go | 2 +- ipn/ipnauth/ipnauth_notwindows.go | 7 ++++++- ipn/ipnserver/actor.go | 6 +++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index e6560570c..513daf5b3 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -64,7 +64,7 @@ type ConnIdentity struct { // Fields used when NotWindows: isUnixSock bool // Conn is a *net.UnixConn - creds *peercred.Creds // or nil + creds *peercred.Creds // or nil if peercred.Get was not implemented on this OS // Used on Windows: // TODO(bradfitz): merge these into the peercreds package and diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_notwindows.go index d9d11bd0a..f5dc07a8c 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_notwindows.go @@ -18,8 +18,13 @@ import ( func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { ci = &ConnIdentity{conn: c, notWindows: true} _, ci.isUnixSock = c.(*net.UnixConn) - if ci.creds, _ = peercred.Get(c); ci.creds != nil { + if ci.creds, err = peercred.Get(c); ci.creds != nil { ci.pid, _ = ci.creds.PID() + } else if err == peercred.ErrNotImplemented { + // peercred.Get is not implemented on this OS (such as OpenBSD) + // Just leave creds as nil, as documented. + } else if err != nil { + return nil, err } return ci, nil } diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 9d86d2c82..924417a33 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -145,7 +145,11 @@ func (a *actor) Username() (string, error) { defer tok.Close() return tok.Username() case "darwin", "linux", "illumos", "solaris", "openbsd": - uid, ok := a.ci.Creds().UserID() + creds := a.ci.Creds() + if creds == nil { + return "", errors.New("peer credentials not implemented on this OS") + } + uid, ok := creds.UserID() if !ok { return "", errors.New("missing user ID") } From 986b4d1b0b22b71126b9fbc32c0563331eb4f4ea Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 8 Sep 2025 10:36:14 +0200 Subject: [PATCH 1365/1708] control/controlclient: fix tka godoc Updates #cleanup Signed-off-by: Kristoffer Dalby --- control/controlclient/direct.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ea8661bff..991767e5d 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -398,7 +398,7 @@ func (c *Direct) SetNetInfo(ni *tailcfg.NetInfo) bool { return true } -// SetNetInfo stores a new TKA head value for next update. +// SetTKAHead stores a new TKA head value for next update. // It reports whether the TKA head changed. func (c *Direct) SetTKAHead(tkaHead string) bool { c.mu.Lock() From cc1761e8d272f5ddf326d35de8a647c6cbf6a8c7 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 22 Sep 2025 13:55:16 +0100 Subject: [PATCH 1366/1708] cmd/k8s-operator: send operator logs to tailscale (#17110) This commit modifies the k8s operator to wrap its logger using the logtail logger provided via the tsnet server. This causes any logs written by the operator to make their way to Tailscale in the same fashion as wireguard logs to be used by support. This functionality can also be opted-out of entirely using the "TS_NO_LOGS_NO_SUPPORT" environment variable. Updates https://github.com/tailscale/corp/issues/32037 Signed-off-by: David Bond --- cmd/k8s-operator/logger.go | 26 ++++++++++++++++++++++++++ cmd/k8s-operator/operator.go | 9 +++++++++ cmd/k8s-operator/sts.go | 19 +++++++++---------- tsnet/tsnet.go | 8 ++++---- 4 files changed, 48 insertions(+), 14 deletions(-) create mode 100644 cmd/k8s-operator/logger.go diff --git a/cmd/k8s-operator/logger.go b/cmd/k8s-operator/logger.go new file mode 100644 index 000000000..46b1fc0c8 --- /dev/null +++ b/cmd/k8s-operator/logger.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "io" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// wrapZapCore returns a zapcore.Core implementation that splits the core chain using zapcore.NewTee. This causes +// logs to be simultaneously written to both the original core and the provided io.Writer implementation. +func wrapZapCore(core zapcore.Core, writer io.Writer) zapcore.Core { + encoder := &kzap.KubeAwareEncoder{ + Encoder: zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + } + + // We use a tee logger here so that logs are written to stdout/stderr normally while at the same time being + // sent upstream. + return zapcore.NewTee(core, zapcore.NewCore(encoder, zapcore.AddSync(writer), zap.DebugLevel)) +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 76d2df51d..1d988eb03 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -44,6 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/envknob" "tailscale.com/client/local" "tailscale.com/client/tailscale" @@ -133,6 +134,14 @@ func main() { } }() } + + // Operator log uploads can be opted-out using the "TS_NO_LOGS_NO_SUPPORT" environment variable. + if !envknob.NoLogsNoSupport() { + zlog = zlog.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return wrapZapCore(core, s.LogtailWriter()) + })) + } + rOpts := reconcilerOpts{ log: zlog, tsServer: s, diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 9a87d2643..80c9ca806 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -439,12 +439,12 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z } if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + logger.With("config", sanitizeConfig(latestConfig)).Debugf("patching the existing proxy Secret") if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { return nil, err } } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + logger.With("config", sanitizeConfig(latestConfig)).Debugf("creating a new Secret for the proxy") if err = a.Create(ctx, secret); err != nil { return nil, err } @@ -494,17 +494,16 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z return secretNames, nil } -// sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted -// auth key. -func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { +// sanitizeConfig returns an ipn.ConfigVAlpha with sensitive fields redacted. Since we pump everything +// into JSON-encoded logs it's easier to read this with a .With method than converting it to a string. +func sanitizeConfig(c ipn.ConfigVAlpha) ipn.ConfigVAlpha { + // Explicitly redact AuthKey because we never want it appearing in logs. Never populate this with the + // actual auth key. if c.AuthKey != nil { c.AuthKey = ptr.To("**redacted**") } - sanitizedBytes, err := json.Marshal(c) - if err != nil { - return "invalid config" - } - return string(sanitizedBytes) + + return c } // DeviceInfo returns the device ID, hostname, IPs and capver for the Tailscale device that acts as an operator proxy. diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 978819519..08f08281a 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -495,14 +495,14 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return ip4, ip6 } -// Logtailf returns a [logger.Logf] that outputs to Tailscale's logging service and will be only visible to Tailscale's +// LogtailWriter returns an [io.Writer] that writes to Tailscale's logging service and will be only visible to Tailscale's // support team. Logs written there cannot be retrieved by the user. This method always returns a non-nil value. -func (s *Server) Logtailf() logger.Logf { +func (s *Server) LogtailWriter() io.Writer { if s.logtail == nil { - return logger.Discard + return io.Discard } - return s.logtail.Logf + return s.logtail } func (s *Server) getAuthKey() string { From 6e128498a788e506921059a5c17acc9452195a5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 22 Sep 2025 09:16:13 -0400 Subject: [PATCH 1367/1708] controlclient/auto: switch eventbus to using a monitor (#17205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only changes how the go routine consuming the events starts and stops, not what it does. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/auto.go | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index bbc129c5e..9a654b679 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -123,9 +123,7 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - eventClient *eventbus.Client - healthChangeSub *eventbus.Subscriber[health.Change] - subsDoneCh chan struct{} // close-only channel when eventClient has closed + eventSubs eventbus.Monitor mu sync.Mutex // mutex guards the following fields @@ -195,11 +193,11 @@ func NewNoStart(opts Options) (_ *Auto, err error) { updateDone: make(chan struct{}), observer: opts.Observer, shutdownFn: opts.Shutdown, - subsDoneCh: make(chan struct{}), } - c.eventClient = opts.Bus.Client("controlClient.Auto") - c.healthChangeSub = eventbus.Subscribe[health.Change](c.eventClient) + // Set up eventbus client and subscriber + ec := opts.Bus.Client("controlClient.Auto") + c.eventSubs = ec.Monitor(c.consumeEventbusTopics(ec)) c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) @@ -207,7 +205,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - go c.consumeEventbusTopics() return c, nil } @@ -216,16 +213,17 @@ func NewNoStart(opts Options) (_ *Auto, err error) { // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (c *Auto) consumeEventbusTopics() { - defer close(c.subsDoneCh) - - for { - select { - case <-c.eventClient.Done(): - return - case change := <-c.healthChangeSub.Events(): - if change.WarnableChanged { - c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) +func (c *Auto) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + healthChangeSub := eventbus.Subscribe[health.Change](ec) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case change := <-healthChangeSub.Events(): + if change.WarnableChanged { + c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) + } } } } @@ -784,8 +782,7 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } func (c *Auto) Shutdown() { - c.eventClient.Close() - <-c.subsDoneCh + c.eventSubs.Close() c.mu.Lock() if c.closed { From e59fbaab64ea1ba4f19ba586ca39a77af51bcd83 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 22 Sep 2025 07:07:57 -0700 Subject: [PATCH 1368/1708] util/eventbus: give a nicer error when attempting to use a closed client (#17208) It is a programming error to Publish or Subscribe on a closed Client, but now the way you discover that is by getting a panic from down in the machinery of the bus after the client state has been cleaned up. To provide a more helpful error, let's panic explicitly when that happens and say what went wrong ("the client is closed"), by preventing subscriptions from interleaving with closure of the client. With this change, either an attachment fails outright (because the client is already closed) or completes and then shuts down in good order in the normal course. This does not change the semantics of the client, publishers, or subscribers, it's just making the failure more eager so we can attach explanatory text. Updates #15160 Change-Id: Ia492f4c1dea7535aec2cdcc2e5ea5410ed5218d2 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 39 ++++++++++++++++++++++++++++++++++++++- util/eventbus/client.go | 31 ++++++++++++++++++++++++++----- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 7782634ae..67f68cd4a 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -257,8 +257,8 @@ func TestMonitor(t *testing.T) { cli := bus.Client("test client") // The monitored goroutine runs until the client or test subscription ends. + sub := eventbus.Subscribe[string](cli) m := cli.Monitor(func(c *eventbus.Client) { - sub := eventbus.Subscribe[string](cli) select { case <-c.Done(): t.Log("client closed") @@ -294,6 +294,43 @@ func TestMonitor(t *testing.T) { t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) } +func TestRegression(t *testing.T) { + bus := eventbus.New() + t.Cleanup(bus.Close) + + t.Run("SubscribeClosed", func(t *testing.T) { + c := bus.Client("test sub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Subscribe[string](c) + }() + if v == nil { + t.Fatal("Expected a panic from Subscribe on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) + + t.Run("PublishClosed", func(t *testing.T) { + c := bus.Client("test pub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Publish[string](c) + }() + if v == nil { + t.Fatal("expected a panic from Publish on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) +} + type queueChecker struct { t *testing.T want []any diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 176b6f2bc..9b4119865 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -51,6 +51,8 @@ func (c *Client) Close() { c.stop.Stop() } +func (c *Client) isClosed() bool { return c.pub == nil && c.sub == nil } + // Done returns a channel that is closed when [Client.Close] is called. // The channel is closed after all the publishers and subscribers governed by // the client have been closed. @@ -83,6 +85,10 @@ func (c *Client) subscribeTypes() []reflect.Type { func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() + return c.subscribeStateLocked() +} + +func (c *Client) subscribeStateLocked() *subscribeState { if c.sub == nil { c.sub = newSubscribeState(c) } @@ -92,6 +98,9 @@ func (c *Client) subscribeState() *subscribeState { func (c *Client) addPublisher(pub publisher) { c.mu.Lock() defer c.mu.Unlock() + if c.isClosed() { + panic("cannot Publish on a closed client") + } c.pub.Add(pub) } @@ -117,17 +126,29 @@ func (c *Client) shouldPublish(t reflect.Type) bool { return c.publishDebug.active() || c.bus.shouldPublish(t) } -// Subscribe requests delivery of events of type T through the given -// Queue. Panics if the queue already has a subscriber for T. +// Subscribe requests delivery of events of type T through the given client. +// It panics if c already has a subscriber for type T, or if c is closed. func Subscribe[T any](c *Client) *Subscriber[T] { - r := c.subscribeState() + // Hold the client lock throughout the subscription process so that a caller + // attempting to subscribe on a closed client will get a useful diagnostic + // instead of a random panic from inside the subscriber plumbing. + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot Subscribe on a closed client") + } + + r := c.subscribeStateLocked() s := newSubscriber[T](r) r.addSubscriber(s) return s } -// Publish returns a publisher for event type T using the given -// client. +// Publish returns a publisher for event type T using the given client. +// It panics if c is closed. func Publish[T any](c *Client) *Publisher[T] { p := newPublisher[T](c) c.addPublisher(p) From 1b5201023fd2a07f9b4f30331daaf3ed39086844 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 22 Sep 2025 08:43:39 -0700 Subject: [PATCH 1369/1708] ipn/ipnlocal: use eventbus.Monitor in LocalBackend (#17225) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I06860ac4e43952a9bb4d85366138c9d9a17fd9cd Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 68 ++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7ac8f0ecb..4af0a3aa6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -197,18 +197,14 @@ var ( // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelCauseFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System - eventClient *eventbus.Client - clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] - autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] - healthChangeSub *eventbus.Subscriber[health.Change] - changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + eventSubs eventbus.Monitor + health *health.Tracker // always non-nil polc policyclient.Client // always non-nil metrics metrics @@ -538,13 +534,10 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), - subsDoneCh: make(chan struct{}), } - b.eventClient = b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") - b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) - b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) - b.healthChangeSub = eventbus.Subscribe[health.Change](b.eventClient) - b.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](b.eventClient) + ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -611,7 +604,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } - go b.consumeEventbusTopics() return b, nil } @@ -620,21 +612,26 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (b *LocalBackend) consumeEventbusTopics() { - defer close(b.subsDoneCh) - - for { - select { - case <-b.eventClient.Done(): - return - case clientVersion := <-b.clientVersionSub.Events(): - b.onClientVersion(&clientVersion) - case au := <-b.autoUpdateSub.Events(): - b.onTailnetDefaultAutoUpdate(au.Value) - case change := <-b.healthChangeSub.Events(): - b.onHealthChange(change) - case changeDelta := <-b.changeDeltaSub.Events(): - b.linkChange(&changeDelta) +func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) + autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) + healthChangeSub := eventbus.Subscribe[health.Change](ec) + changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case clientVersion := <-clientVersionSub.Events(): + b.onClientVersion(&clientVersion) + case au := <-autoUpdateSub.Events(): + b.onTailnetDefaultAutoUpdate(au.Value) + case change := <-healthChangeSub.Events(): + b.onHealthChange(change) + case changeDelta := <-changeDeltaSub.Events(): + b.linkChange(&changeDelta) + } } } } @@ -1103,8 +1100,7 @@ func (b *LocalBackend) Shutdown() { // they can deadlock with c.Shutdown(). // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against // undesirable post/in-progress LocalBackend.Shutdown() behaviors. - b.eventClient.Close() - <-b.subsDoneCh + b.eventSubs.Close() b.em.close() From f67ad67c6f0588ce001ee1034a776e384b1fd1f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 22 Sep 2025 13:14:55 -0400 Subject: [PATCH 1370/1708] control/controlclient: switch ID to be incrementing instead of random (#17230) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also cleans up a a few comments. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/direct.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 991767e5d..ffac7e947 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -14,7 +14,6 @@ import ( "fmt" "io" "log" - "math/rand/v2" "net" "net/http" "net/netip" @@ -221,6 +220,8 @@ type NetmapDeltaUpdater interface { UpdateNetmapDelta([]netmap.NodeMutation) (ok bool) } +var nextControlClientID atomic.Int64 + // NewDirect returns a new Direct client. func NewDirect(opts Options) (*Direct, error) { if opts.ServerURL == "" { @@ -314,7 +315,7 @@ func NewDirect(opts Options) (*Direct, error) { } c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) - c.controlClientID = rand.Int64() + c.controlClientID = nextControlClientID.Add(1) if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) @@ -835,21 +836,21 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } -// ClientID returns the ControlClientID of the controlClient +// ClientID returns the controlClientID of the controlClient. func (c *Direct) ClientID() int64 { return c.controlClientID } -// AutoUpdate wraps a bool for naming on the eventbus +// AutoUpdate is an eventbus value, reporting the value of tailcfg.MapResponse.DefaultAutoUpdate. type AutoUpdate struct { - ClientID int64 // The ID field is used for consumers to differentiate instances of Direct - Value bool + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value bool // The Value represents DefaultAutoUpdate from [tailcfg.MapResponse]. } -// ControlTime wraps a [time.Time] for naming on the eventbus +// ControlTime is an eventbus value, reporting the value of tailcfg.MapResponse.ControlTime. type ControlTime struct { - ClientID int64 // The ID field is used for consumers to differentiate instances of Direct - Value time.Time + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value time.Time // The Value represents ControlTime from [tailcfg.MapResponse]. } // If we go more than watchdogTimeout without hearing from the server, From 5e79e497d3682741ce192d245fd193322c03b85a Mon Sep 17 00:00:00 2001 From: Mahyar Mirrashed <59240843+mahyarmirrashed@users.noreply.github.com> Date: Mon, 22 Sep 2025 12:37:27 -0500 Subject: [PATCH 1371/1708] cmd/tailscale/cli: show last seen time on status command (#16588) Add a last seen time on the cli's status command, similar to the web portal. Before: ``` 100.xxx.xxx.xxx tailscale-operator tagged-devices linux offline ``` After: ``` 100.xxx.xxx.xxx tailscale-operator tagged-devices linux offline, last seen 20d ago ``` Fixes #16584 Signed-off-by: Mahyar Mirrashed --- cmd/tailscale/cli/cli.go | 17 +++++++++++++++++ cmd/tailscale/cli/exitnode.go | 6 ++++-- cmd/tailscale/cli/status.go | 6 +++--- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index dfc8f3249..5206fdd58 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -18,6 +18,7 @@ import ( "strings" "sync" "text/tabwriter" + "time" "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" @@ -538,3 +539,19 @@ func jsonDocsWalk(cmd *ffcli.Command) *commandDoc { } return res } + +func lastSeenFmt(t time.Time) string { + if t.IsZero() { + return "" + } + d := max(time.Since(t), time.Minute) // at least 1 minute + + switch { + case d < time.Hour: + return fmt.Sprintf(", last seen %dm ago", int(d.Minutes())) + case d < 24*time.Hour: + return fmt.Sprintf(", last seen %dh ago", int(d.Hours())) + default: + return fmt.Sprintf(", last seen %dd ago", int(d.Hours()/24)) + } +} diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index b153f096d..b47b9f0bd 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -173,11 +173,13 @@ func hasAnyExitNodeSuggestions(peers []*ipnstate.PeerStatus) bool { // a peer. If there is no notable state, a - is returned. func peerStatus(peer *ipnstate.PeerStatus) string { if !peer.Active { + lastseen := lastSeenFmt(peer.LastSeen) + if peer.ExitNode { - return "selected but offline" + return "selected but offline" + lastseen } if !peer.Online { - return "offline" + return "offline" + lastseen } } diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 97f6708db..94e0977fe 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -164,7 +164,7 @@ func runStatus(ctx context.Context, args []string) error { anyTraffic := ps.TxBytes != 0 || ps.RxBytes != 0 var offline string if !ps.Online { - offline = "; offline" + offline = "; offline" + lastSeenFmt(ps.LastSeen) } if !ps.Active { if ps.ExitNode { @@ -174,7 +174,7 @@ func runStatus(ctx context.Context, args []string) error { } else if anyTraffic { f("idle" + offline) } else if !ps.Online { - f("offline") + f("offline" + lastSeenFmt(ps.LastSeen)) } else { f("-") } @@ -193,7 +193,7 @@ func runStatus(ctx context.Context, args []string) error { f("peer-relay %s", ps.PeerRelay) } if !ps.Online { - f("; offline") + f(offline) } } if anyTraffic { From daad5c2b5c6753dd1ffccffeb6e3adb4c4a36fe8 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 22 Sep 2025 10:49:28 -0700 Subject: [PATCH 1372/1708] wgengine/router: use eventbus.Monitor in linuxRouter (#17232) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I61b863f9c05459d530a4c34063a8bad9046c0e27 Signed-off-by: M. J. Fromberger --- wgengine/router/router_linux.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index a9edd7f96..dc1425708 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -49,8 +49,7 @@ type linuxRouter struct { tunname string netMon *netmon.Monitor health *health.Tracker - eventClient *eventbus.Client - ruleDeletedSub *eventbus.Subscriber[netmon.RuleDeleted] + eventSubs eventbus.Monitor rulesAddedPub *eventbus.Publisher[AddIPRules] unregNetMon func() addrs map[netip.Prefix]bool @@ -100,7 +99,6 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon tunname: tunname, netfilterMode: netfilterOff, netMon: netMon, - eventClient: bus.Client("router-linux"), health: health, cmd: cmd, @@ -108,9 +106,9 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon ipRuleFixLimiter: rate.NewLimiter(rate.Every(5*time.Second), 10), ipPolicyPrefBase: 5200, } - r.ruleDeletedSub = eventbus.Subscribe[netmon.RuleDeleted](r.eventClient) - r.rulesAddedPub = eventbus.Publish[AddIPRules](r.eventClient) - go r.consumeEventbusTopics() + ec := bus.Client("router-linux") + r.rulesAddedPub = eventbus.Publish[AddIPRules](ec) + r.eventSubs = ec.Monitor(r.consumeEventbusTopics(ec)) if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) @@ -159,13 +157,16 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (r *linuxRouter) consumeEventbusTopics() { - for { - select { - case <-r.eventClient.Done(): - return - case rulesDeleted := <-r.ruleDeletedSub.Events(): - r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) +func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + ruleDeletedSub := eventbus.Subscribe[netmon.RuleDeleted](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case rs := <-ruleDeletedSub.Events(): + r.onIPRuleDeleted(rs.Table, rs.Priority) + } } } } @@ -362,7 +363,7 @@ func (r *linuxRouter) Close() error { if r.unregNetMon != nil { r.unregNetMon() } - r.eventClient.Close() + r.eventSubs.Close() if err := r.downInterface(); err != nil { return err } From 15b3876c2c4ac98d966a2cfafce3c3411a9ecd40 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 22 Sep 2025 10:50:38 -0700 Subject: [PATCH 1373/1708] client/systray: use new tailnet display name is profile title Updates tailscale/corp#30456 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 536cfe182..4ac080588 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -339,9 +339,9 @@ func profileTitle(profile ipn.LoginProfile) string { if profile.NetworkProfile.DomainName != "" { if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { // windows and mac don't support multi-line menu - title += " (" + profile.NetworkProfile.DomainName + ")" + title += " (" + profile.NetworkProfile.DisplayNameOrDefault() + ")" } else { - title += "\n" + profile.NetworkProfile.DomainName + title += "\n" + profile.NetworkProfile.DisplayNameOrDefault() } } return title From e582fb9b53e56c39353b665f92eb7a2aeacdbf1d Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 22 Sep 2025 10:48:45 -0700 Subject: [PATCH 1374/1708] client/web: use network profile for displaying tailnet info Also update to use the new DisplayNameOrDefault. Updates tailscale/corp#30456 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/web/web.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 71a015dab..d88239843 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -978,9 +978,18 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { data.ClientVersion = cv } - if st.CurrentTailnet != nil { - data.TailnetName = st.CurrentTailnet.MagicDNSSuffix - data.DomainName = st.CurrentTailnet.Name + profile, _, err := s.lc.ProfileStatus(r.Context()) + if err != nil { + s.logf("error fetching profiles: %v", err) + // If for some reason we can't fetch profiles, + // continue to use st.CurrentTailnet if set. + if st.CurrentTailnet != nil { + data.TailnetName = st.CurrentTailnet.MagicDNSSuffix + data.DomainName = st.CurrentTailnet.Name + } + } else { + data.TailnetName = profile.NetworkProfile.MagicDNSName + data.DomainName = profile.NetworkProfile.DisplayNameOrDefault() } if st.Self.Tags != nil { data.Tags = st.Self.Tags.AsSlice() From e3307fbce137853a0cf77b4feeeafee58f938a05 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 22 Sep 2025 16:21:21 -0500 Subject: [PATCH 1375/1708] cmd/tailscale: omit the `drive` subcommand in MacOS GUI apps In MacOS GUI apps, users have to select folders to share via the GUI. This is both because the GUI app keeps its own record of shares, and because the sandboxed version of the GUI app needs to gain access to the shared folders by having the user pick them in a file selector. The new build tag `ts_mac_gui` allows the MacOS GUI app build to signal that this is a MacOS GUI app, which causes the `drive` subcommand to be omitted so that people do not mistakenly attempt to use it. Updates tailscale/tailscale#17210 Signed-off-by: Percy Wegmann --- cmd/tailscale/cli/drive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 67536ace0..131f46847 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ts_omit_drive +//go:build !ts_omit_drive && !ts_mac_gui package cli From 1791f878708ec31ef4622222a5858217e749e777 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 22 Sep 2025 15:02:38 +0000 Subject: [PATCH 1376/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 15 +++++++-------- licenses/tailscale.md | 2 -- licenses/windows.md | 17 ++++++++--------- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 91ba96698..6b6d47045 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -33,7 +33,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -53,7 +53,6 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) @@ -68,13 +67,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 6feb85aaf..b15b93744 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -65,7 +65,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) @@ -101,7 +100,6 @@ Some packages may only be included on certain architectures or operating systems - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE)) - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) - - [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) - [tailscale.com/tempfork/gliderlabs/ssh](https://pkg.go.dev/tailscale.com/tempfork/gliderlabs/ssh) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/gliderlabs/ssh/LICENSE)) - [tailscale.com/tempfork/spf13/cobra](https://pkg.go.dev/tailscale.com/tempfork/spf13/cobra) ([Apache-2.0](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/spf13/cobra/LICENSE.txt)) diff --git a/licenses/windows.md b/licenses/windows.md index aff149d4d..37c41ca3f 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -36,7 +36,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) @@ -52,7 +52,6 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) @@ -72,15 +71,15 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.27.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) From 4af15a11482d40caa4b4a2a7db244d385965ced8 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 23 Sep 2025 13:35:22 -0400 Subject: [PATCH 1377/1708] magicsock: fix deadlock in SetStaticEndpoints (#17247) updates tailscale/corp#32600 A localAPI/cli call to reload-config can end up leaving magicsock's mutex locked. We were missing an unlock for the early exit where there's no change in the static endpoints when the disk-based config is loaded. This is not likely the root cause of the linked issue - just noted during investigation. Signed-off-by: Jonathan Nobels --- wgengine/magicsock/magicsock.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 39a7bb2e6..72fff3411 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1015,6 +1015,7 @@ func (c *Conn) setEndpoints(endpoints []tailcfg.Endpoint) (changed bool) { func (c *Conn) SetStaticEndpoints(ep views.Slice[netip.AddrPort]) { c.mu.Lock() if reflect.DeepEqual(c.staticEndpoints.AsSlice(), ep.AsSlice()) { + c.mu.Unlock() return } c.staticEndpoints = ep From 87ccfbd2500cb6078be43bf7fe08e596faa06201 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 14:42:11 -0700 Subject: [PATCH 1378/1708] ipn/ipnlocal: fix eventbus data race Fixes #17252 Change-Id: Id969fca750a48fb43431c53f3e0631bd9bd496d1 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4af0a3aa6..ce42ae75a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -535,8 +535,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") - b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) @@ -604,6 +602,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } + + // Start the event bus late, once all the assignments above are done. + // (See previous race in tailscale/tailscale#17252) + ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + return b, nil } From 4657cbdb11c632cc95fa35241a2d058665ce2f12 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 23 Sep 2025 15:26:10 -0700 Subject: [PATCH 1379/1708] client, cmd/tailscale/cli, feature/relayserver, net/udprelay: implement tailscale debug peer-relay-sessions (#17239) Fixes tailscale/corp#30035 Signed-off-by: Dylan Bargatze Signed-off-by: Jordan Whited Co-authored-by: Dylan Bargatze --- client/local/local.go | 11 ++++ cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/debug-peer-relay.go | 77 ++++++++++++++++++++++++ cmd/tailscale/cli/debug.go | 6 +- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + feature/relayserver/relayserver.go | 83 ++++++++++++++++++++++++-- net/udprelay/server.go | 45 ++++++++++++++ net/udprelay/status/status.go | 75 +++++++++++++++++++++++ tsnet/depaware.txt | 1 + tstest/integration/integration_test.go | 45 +++++++++++++- 13 files changed, 341 insertions(+), 7 deletions(-) create mode 100644 cmd/tailscale/cli/debug-peer-relay.go create mode 100644 net/udprelay/status/status.go diff --git a/client/local/local.go b/client/local/local.go index a606fbdf3..1be1f2ca7 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -33,6 +33,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" + "tailscale.com/net/udprelay/status" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -1184,6 +1185,16 @@ func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error { return err } +// DebugPeerRelaySessions returns debug information about the current peer +// relay sessions running through this node. +func (lc *Client) DebugPeerRelaySessions(ctx context.Context) (*status.ServerStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/debug-peer-relay-sessions", 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[*status.ServerStatus](body) +} + // StreamDebugCapture streams a pcap-formatted packet capture. // // The provided context does not determine the lifetime of the diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 61e42ede1..b0501b588 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -122,6 +122,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/local 💣 tailscale.com/safesocket from tailscale.com/client/local diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 442a96611..e0fdc27bb 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -883,6 +883,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscale/cli/debug-peer-relay.go b/cmd/tailscale/cli/debug-peer-relay.go new file mode 100644 index 000000000..bef8b8369 --- /dev/null +++ b/cmd/tailscale/cli/debug-peer-relay.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_relayserver + +package cli + +import ( + "bytes" + "cmp" + "context" + "fmt" + "net/netip" + "slices" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/net/udprelay/status" +) + +func init() { + debugPeerRelayCmd = mkDebugPeerRelaySessionsCmd +} + +func mkDebugPeerRelaySessionsCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "peer-relay-sessions", + ShortUsage: "tailscale debug peer-relay-sessions", + Exec: runPeerRelaySessions, + ShortHelp: "Print the current set of active peer relay sessions relayed through this node", + } +} + +func runPeerRelaySessions(ctx context.Context, args []string) error { + srv, err := localClient.DebugPeerRelaySessions(ctx) + if err != nil { + return err + } + + var buf bytes.Buffer + f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + + f("Server port: ") + if srv.UDPPort == nil { + f("not configured (you can configure the port with 'tailscale set --relay-server-port=')") + } else { + f("%d", *srv.UDPPort) + } + f("\n") + f("Sessions count: %d\n", len(srv.Sessions)) + if len(srv.Sessions) == 0 { + Stdout.Write(buf.Bytes()) + return nil + } + + fmtSessionDirection := func(a, z status.ClientInfo) string { + fmtEndpoint := func(ap netip.AddrPort) string { + if ap.IsValid() { + return ap.String() + } + return "" + } + return fmt.Sprintf("%s(%s) --> %s(%s), Packets: %d Bytes: %d", + fmtEndpoint(a.Endpoint), a.ShortDisco, + fmtEndpoint(z.Endpoint), z.ShortDisco, + a.PacketsTx, a.BytesTx) + } + + f("\n") + slices.SortFunc(srv.Sessions, func(s1, s2 status.ServerSession) int { return cmp.Compare(s1.VNI, s2.VNI) }) + for _, s := range srv.Sessions { + f("VNI: %d\n", s.VNI) + f(" %s\n", fmtSessionDirection(s.Client1, s.Client2)) + f(" %s\n", fmtSessionDirection(s.Client2, s.Client1)) + } + Stdout.Write(buf.Bytes()) + return nil +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index b3170d000..c8a0d57c1 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -49,8 +49,9 @@ import ( ) var ( - debugCaptureCmd func() *ffcli.Command // or nil - debugPortmapCmd func() *ffcli.Command // or nil + debugCaptureCmd func() *ffcli.Command // or nil + debugPortmapCmd func() *ffcli.Command // or nil + debugPeerRelayCmd func() *ffcli.Command // or nil ) func debugCmd() *ffcli.Command { @@ -374,6 +375,7 @@ func debugCmd() *ffcli.Command { return fs })(), }, + ccall(debugPeerRelayCmd), }...), } } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b9b7db525..deeb9c3a3 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -143,6 +143,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 22f80d5d7..f85063ddb 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -358,6 +358,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ + tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index d92a0b41a..f6bab6978 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -314,6 +314,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index d77d7145a..91d07484c 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,7 +6,10 @@ package relayserver import ( + "encoding/json" + "fmt" "log" + "net/http" "net/netip" "strings" "sync" @@ -16,8 +19,10 @@ import ( "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -33,6 +38,32 @@ const featureName = "relayserver" func init() { feature.Register(featureName) ipnext.RegisterExtension(featureName, newExtension) + localapi.Register("debug-peer-relay-sessions", servePeerRelayDebugSessions) +} + +// servePeerRelayDebugSessions is an HTTP handler for the Local API that +// returns debug/status information for peer relay sessions being relayed by +// this Tailscale node. It writes a JSON-encoded [status.ServerStatus] into the +// HTTP response, or returns an HTTP 405/500 with error text as the body. +func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + var e *extension + if ok := h.LocalBackend().FindMatchingExtension(&e); !ok { + http.Error(w, "peer relay server extension unavailable", http.StatusInternalServerError) + return + } + + st := e.serverStatus() + j, err := json.Marshal(st) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal json: %v", err), http.StatusInternalServerError) + return + } + w.Write(j) } // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server @@ -53,16 +84,18 @@ type extension struct { mu sync.Mutex // guards the following fields shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return - busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + port *int // ipn.Prefs.RelayServerPort, nil if disabled + disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return + busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns + debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } // relayServer is the interface of [udprelay.Server]. type relayServer interface { AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) Close() error + GetSessions() []status.ServerSession } // Name implements [ipnext.Extension]. @@ -93,6 +126,7 @@ func (e *extension) handleBusLifetimeLocked() { port := *e.port e.disconnectFromBusCh = make(chan struct{}) e.busDoneCh = make(chan struct{}) + e.debugSessionsCh = make(chan chan []status.ServerSession) go e.consumeEventbusTopics(port) } @@ -139,6 +173,11 @@ var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { return }) +// consumeEventbusTopics serves endpoint allocation requests over the eventbus. +// It also serves [relayServer] debug information on a channel. +// consumeEventbusTopics must never acquire [extension.mu], which can be held by +// other goroutines while waiting to receive on [extension.busDoneCh] or the +// inner [extension.debugSessionsCh] channel. func (e *extension) consumeEventbusTopics(port int) { defer close(e.busDoneCh) @@ -159,6 +198,14 @@ func (e *extension) consumeEventbusTopics(port int) { return case <-eventClient.Done(): return + case respCh := <-e.debugSessionsCh: + if rs == nil { + // Don't initialize the server simply for a debug request. + respCh <- nil + continue + } + sessions := rs.GetSessions() + respCh <- sessions case req := <-reqSub.Events(): if rs == nil { var err error @@ -199,6 +246,7 @@ func (e *extension) disconnectFromBusLocked() { <-e.busDoneCh e.busDoneCh = nil e.disconnectFromBusCh = nil + e.debugSessionsCh = nil } } @@ -210,3 +258,30 @@ func (e *extension) Shutdown() error { e.shutdown = true return nil } + +// serverStatus gathers and returns current peer relay server status information +// for this Tailscale node, and status of each peer relay session this node is +// relaying (if any). +func (e *extension) serverStatus() status.ServerStatus { + e.mu.Lock() + defer e.mu.Unlock() + + st := status.ServerStatus{ + UDPPort: nil, + Sessions: nil, + } + if e.port == nil || e.busDoneCh == nil { + return st + } + st.UDPPort = ptr.To(*e.port) + + ch := make(chan []status.ServerSession) + select { + case e.debugSessionsCh <- ch: + resp := <-ch + st.Sessions = resp + return st + case <-e.busDoneCh: + return st + } +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 123813c16..424c7a617 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -31,6 +31,7 @@ import ( "tailscale.com/net/sockopts" "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -95,6 +96,8 @@ type serverEndpoint struct { boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time challenge [2][disco.BindUDPRelayChallengeLen]byte + packetsRx [2]uint64 // num packets received from/sent by each client after they are bound + bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound lamportID uint64 vni uint32 @@ -223,9 +226,13 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade switch { case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() + e.packetsRx[0]++ + e.bytesRx[0] += uint64(len(b)) return b, e.boundAddrPorts[1] case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() + e.packetsRx[1]++ + e.bytesRx[1] += uint64(len(b)) return b, e.boundAddrPorts[0] default: // unrecognized source @@ -782,3 +789,41 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, }, nil } + +// extractClientInfo constructs a [status.ClientInfo] for one of the two peer +// relay clients involved in this session. +func extractClientInfo(idx int, ep *serverEndpoint) status.ClientInfo { + if idx != 0 && idx != 1 { + panic(fmt.Sprintf("idx passed to extractClientInfo() must be 0 or 1; got %d", idx)) + } + + return status.ClientInfo{ + Endpoint: ep.boundAddrPorts[idx], + ShortDisco: ep.discoPubKeys.Get()[idx].ShortString(), + PacketsTx: ep.packetsRx[idx], + BytesTx: ep.bytesRx[idx], + } +} + +// GetSessions returns a slice of peer relay session statuses, with each +// entry containing detailed info about the server and clients involved in +// each session. This information is intended for debugging/status UX, and +// should not be relied on for any purpose outside of that. +func (s *Server) GetSessions() []status.ServerSession { + s.mu.Lock() + defer s.mu.Unlock() + if s.closed { + return nil + } + var sessions = make([]status.ServerSession, 0, len(s.byDisco)) + for _, se := range s.byDisco { + c1 := extractClientInfo(0, se) + c2 := extractClientInfo(1, se) + sessions = append(sessions, status.ServerSession{ + VNI: se.vni, + Client1: c1, + Client2: c2, + }) + } + return sessions +} diff --git a/net/udprelay/status/status.go b/net/udprelay/status/status.go new file mode 100644 index 000000000..3866efada --- /dev/null +++ b/net/udprelay/status/status.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package status contains types relating to the status of peer relay sessions +// between peer relay client nodes via a peer relay server. +package status + +import ( + "net/netip" +) + +// ServerStatus contains the listening UDP port and active sessions (if any) for +// this node's peer relay server at a point in time. +type ServerStatus struct { + // UDPPort is the UDP port number that the peer relay server forwards over, + // as configured by the user with 'tailscale set --relay-server-port='. + // If the port has not been configured, UDPPort will be nil. + UDPPort *int + // Sessions is a slice of detailed status information about each peer + // relay session that this node's peer relay server is involved with. It + // may be empty. + Sessions []ServerSession +} + +// ClientInfo contains status-related information about a single peer relay +// client involved in a single peer relay session. +type ClientInfo struct { + // Endpoint is the [netip.AddrPort] of this peer relay client's underlay + // endpoint participating in the session, or a zero value if the client + // has not completed a handshake. + Endpoint netip.AddrPort + // ShortDisco is a string representation of this peer relay client's disco + // public key. + // + // TODO: disco keys are pretty meaningless to end users, and they are also + // ephemeral. We really need node keys (or translation to first ts addr), + // but those are not fully plumbed into the [udprelay.Server]. Disco keys + // can also be ambiguous to a node key, but we could add node key into a + // [disco.AllocateUDPRelayEndpointRequest] in similar fashion to + // [disco.Ping]. There's also the problem of netmap trimming, where we + // can't verify a node key maps to a disco key. + ShortDisco string + // PacketsTx is the number of packets this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the number of packets that the peer relay server has + // received from this client. + PacketsTx uint64 + // BytesTx is the total overlay bytes this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the total overlay bytes that the peer relay server has + // received from this client. + BytesTx uint64 +} + +// ServerSession contains status information for a single session between two +// peer relay clients, which are relayed via one peer relay server. This is the +// status as seen by the peer relay server; each client node may have a +// different view of the session's current status based on connectivity and +// where the client is in the peer relay endpoint setup (allocation, binding, +// pinging, active). +type ServerSession struct { + // VNI is the Virtual Network Identifier for this peer relay session, which + // comes from the Geneve header and is unique to this session. + VNI uint32 + // Client1 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client1' does NOT mean this + // was/wasn't the allocating client, or the first client to bind, etc; this + // is just one client of two. + Client1 ClientInfo + // Client2 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client2' does NOT mean this + // was/wasn't the allocating client, or the second client to bind, etc; this + // is just one client of two. + Client2 ClientInfo +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index de9e69f9c..619183a60 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -310,6 +310,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 6e0dc87eb..136004bc8 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -38,6 +38,7 @@ import ( "tailscale.com/ipn" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" @@ -1526,7 +1527,8 @@ func TestEncryptStateMigration(t *testing.T) { // TestPeerRelayPing creates three nodes with one acting as a peer relay. // The test succeeds when "tailscale ping" flows through the peer -// relay between all 3 nodes. +// relay between all 3 nodes, and "tailscale debug peer-relay-sessions" returns +// expected values. func TestPeerRelayPing(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) @@ -1624,6 +1626,47 @@ func TestPeerRelayPing(t *testing.T) { t.Fatal(err) } } + + allControlNodes := env.Control.AllNodes() + wantSessionsForDiscoShorts := make(set.Set[[2]string]) + for i, a := range allControlNodes { + if i == len(allControlNodes)-1 { + break + } + for _, z := range allControlNodes[i+1:] { + wantSessionsForDiscoShorts.Add([2]string{a.DiscoKey.ShortString(), z.DiscoKey.ShortString()}) + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + debugSessions, err := peerRelay.LocalClient().DebugPeerRelaySessions(ctx) + cancel() + if err != nil { + t.Fatalf("debug peer-relay-sessions failed: %v", err) + } + if len(debugSessions.Sessions) != len(wantSessionsForDiscoShorts) { + t.Errorf("got %d peer relay sessions, want %d", len(debugSessions.Sessions), len(wantSessionsForDiscoShorts)) + } + for _, session := range debugSessions.Sessions { + if !wantSessionsForDiscoShorts.Contains([2]string{session.Client1.ShortDisco, session.Client2.ShortDisco}) && + !wantSessionsForDiscoShorts.Contains([2]string{session.Client2.ShortDisco, session.Client1.ShortDisco}) { + t.Errorf("peer relay session for disco keys %s<->%s not found in debug peer-relay-sessions: %+v", session.Client1.ShortDisco, session.Client2.ShortDisco, debugSessions.Sessions) + } + for _, client := range []status.ClientInfo{session.Client1, session.Client2} { + if client.BytesTx == 0 { + t.Errorf("unexpected 0 bytes TX counter in peer relay session: %+v", session) + } + if client.PacketsTx == 0 { + t.Errorf("unexpected 0 packets TX counter in peer relay session: %+v", session) + } + if !client.Endpoint.IsValid() { + t.Errorf("unexpected endpoint zero value in peer relay session: %+v", session) + } + if len(client.ShortDisco) == 0 { + t.Errorf("unexpected zero len short disco in peer relay session: %+v", session) + } + } + } } func TestC2NDebugNetmap(t *testing.T) { From 8fe575409f4287880b485d5bfbd05e5ef573c4bb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 15:49:49 -0700 Subject: [PATCH 1380/1708] feature/featuretags: add build tag to remove captive portal detection This doesn't yet fully pull it out into a feature/captiveportal package. This is the usual first step, moving the code to its own files within the same packages. Updates #17254 Change-Id: Idfaec839debf7c96f51ca6520ce36ccf2f8eec92 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 13 ++ .../feature_captiveportal_disabled.go | 13 ++ .../feature_captiveportal_enabled.go | 13 ++ feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/captiveportal.go | 186 ++++++++++++++++++ ipn/ipnlocal/local.go | 183 ++--------------- net/netcheck/captiveportal.go | 55 ++++++ net/netcheck/netcheck.go | 40 +--- 9 files changed, 304 insertions(+), 202 deletions(-) create mode 100644 feature/buildfeatures/feature_captiveportal_disabled.go create mode 100644 feature/buildfeatures/feature_captiveportal_enabled.go create mode 100644 ipn/ipnlocal/captiveportal.go create mode 100644 net/netcheck/captiveportal.go diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index deeb9c3a3..abb329806 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -103,7 +103,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb+ - tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 50e584fe0..818764b70 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -123,6 +123,19 @@ func TestOmitACME(t *testing.T) { }.Check(t) } +func TestOmitCaptivePortal(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_captiveportal,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "captive") { + t.Errorf("unexpected dep with ts_omit_captiveportal: %q", dep) + } + }, + }.Check(t) +} + func TestOmitOAuthKey(t *testing.T) { deptest.DepChecker{ GOOS: "linux", diff --git a/feature/buildfeatures/feature_captiveportal_disabled.go b/feature/buildfeatures/feature_captiveportal_disabled.go new file mode 100644 index 000000000..367fef81b --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = false diff --git a/feature/buildfeatures/feature_captiveportal_enabled.go b/feature/buildfeatures/feature_captiveportal_enabled.go new file mode 100644 index 000000000..bd8e1f6a8 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 269ff1fc1..9e6de018c 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -93,6 +93,7 @@ var Features = map[FeatureTag]FeatureMeta{ "acme": {"ACME", "ACME TLS certificate management", nil}, "aws": {"AWS", "AWS integration", nil}, "bird": {"Bird", "Bird BGP integration", nil}, + "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, "completion": {"Completion", "CLI shell completion", nil}, diff --git a/ipn/ipnlocal/captiveportal.go b/ipn/ipnlocal/captiveportal.go new file mode 100644 index 000000000..14f8b799e --- /dev/null +++ b/ipn/ipnlocal/captiveportal.go @@ -0,0 +1,186 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package ipnlocal + +import ( + "context" + "time" + + "tailscale.com/health" + "tailscale.com/net/captivedetection" + "tailscale.com/util/clientmetric" +) + +func init() { + hookCaptivePortalHealthChange.Set(captivePortalHealthChange) + hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop) +} + +var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") + +// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken +// before running captive portal detection. +const captivePortalDetectionInterval = 2 * time.Second + +func captivePortalHealthChange(b *LocalBackend, state *health.State) { + isConnectivityImpacted := false + for _, w := range state.Warnings { + // Ignore the captive portal warnable itself. + if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { + isConnectivityImpacted = true + break + } + } + + // captiveCtx can be changed, and is protected with 'mu'; grab that + // before we start our select, below. + // + // It is guaranteed to be non-nil. + b.mu.Lock() + ctx := b.captiveCtx + b.mu.Unlock() + + // If the context is canceled, we don't need to do anything. + if ctx.Err() != nil { + return + } + + if isConnectivityImpacted { + b.logf("health: connectivity impacted; triggering captive portal detection") + + // Ensure that we select on captiveCtx so that we can time out + // triggering captive portal detection if the backend is shutdown. + select { + case b.needsCaptiveDetection <- true: + case <-ctx.Done(): + } + } else { + // If connectivity is not impacted, we know for sure we're not behind a captive portal, + // so drop any warning, and signal that we don't need captive portal detection. + b.health.SetHealthy(captivePortalWarnable) + select { + case b.needsCaptiveDetection <- false: + case <-ctx.Done(): + } + } +} + +// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. +var captivePortalWarnable = health.Register(&health.Warnable{ + Code: "captive-portal-detected", + Title: "Captive portal detected", + // High severity, because captive portals block all traffic and require user intervention. + Severity: health.SeverityHigh, + Text: health.StaticMessage("This network requires you to log in using your web browser."), + ImpactsConnectivity: true, +}) + +func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) { + var tmr *time.Timer + + maybeStartTimer := func() { + // If there's an existing timer, nothing to do; just continue + // waiting for it to expire. Otherwise, create a new timer. + if tmr == nil { + tmr = time.NewTimer(captivePortalDetectionInterval) + } + } + maybeStopTimer := func() { + if tmr == nil { + return + } + if !tmr.Stop() { + <-tmr.C + } + tmr = nil + } + + for { + if ctx.Err() != nil { + maybeStopTimer() + return + } + + // First, see if we have a signal on our "healthy" channel, which + // takes priority over an existing timer. Because a select is + // nondeterministic, we explicitly check this channel before + // entering the main select below, so that we're guaranteed to + // stop the timer before starting captive portal detection. + select { + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + maybeStopTimer() + } + default: + } + + var timerChan <-chan time.Time + if tmr != nil { + timerChan = tmr.C + } + select { + case <-ctx.Done(): + // All done; stop the timer and then exit. + maybeStopTimer() + return + case <-timerChan: + // Kick off captive portal check + b.performCaptiveDetection() + // nil out timer to force recreation + tmr = nil + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + // Healthy; cancel any existing timer + maybeStopTimer() + } + } + } +} + +// shouldRunCaptivePortalDetection reports whether captive portal detection +// should be run. It is enabled by default, but can be disabled via a control +// knob. It is also only run when the user explicitly wants the backend to be +// running. +func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() +} + +// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs +// the detection and updates the Warnable accordingly. +func (b *LocalBackend) performCaptiveDetection() { + if !b.shouldRunCaptivePortalDetection() { + return + } + + d := captivedetection.NewDetector(b.logf) + b.mu.Lock() // for b.hostinfo + cn := b.currentNode() + dm := cn.DERPMap() + preferredDERP := 0 + if b.hostinfo != nil { + if b.hostinfo.NetInfo != nil { + preferredDERP = b.hostinfo.NetInfo.PreferredDERP + } + } + ctx := b.ctx + netMon := b.NetMon() + b.mu.Unlock() + found := d.Detect(ctx, netMon, dm, preferredDERP) + if found { + if !b.health.IsUnhealthy(captivePortalWarnable) { + metricCaptivePortalDetected.Add(1) + } + b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) + } else { + b.health.SetHealthy(captivePortalWarnable) + } +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ce42ae75a..623a0a3a3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -64,7 +64,6 @@ import ( "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" - "tailscale.com/net/captivedetection" "tailscale.com/net/dns" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -168,8 +167,6 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } -var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") - var ( // errShutdown indicates that the [LocalBackend.Shutdown] was called. errShutdown = errors.New("shutting down") @@ -943,10 +940,6 @@ func (b *LocalBackend) DisconnectControl() { cc.Shutdown() } -// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken -// before running captive portal detection. -const captivePortalDetectionInterval = 2 * time.Second - // linkChange is our network monitor callback, called whenever the network changes. func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.mu.Lock() @@ -1002,6 +995,12 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { } } +// Captive portal detection hooks. +var ( + hookCaptivePortalHealthChange feature.Hook[func(*LocalBackend, *health.State)] + hookCheckCaptivePortalLoop feature.Hook[func(*LocalBackend, context.Context)] +) + func (b *LocalBackend) onHealthChange(change health.Change) { if change.WarnableChanged { w := change.Warnable @@ -1019,45 +1018,8 @@ func (b *LocalBackend) onHealthChange(change health.Change) { Health: state, }) - isConnectivityImpacted := false - for _, w := range state.Warnings { - // Ignore the captive portal warnable itself. - if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { - isConnectivityImpacted = true - break - } - } - - // captiveCtx can be changed, and is protected with 'mu'; grab that - // before we start our select, below. - // - // It is guaranteed to be non-nil. - b.mu.Lock() - ctx := b.captiveCtx - b.mu.Unlock() - - // If the context is canceled, we don't need to do anything. - if ctx.Err() != nil { - return - } - - if isConnectivityImpacted { - b.logf("health: connectivity impacted; triggering captive portal detection") - - // Ensure that we select on captiveCtx so that we can time out - // triggering captive portal detection if the backend is shutdown. - select { - case b.needsCaptiveDetection <- true: - case <-ctx.Done(): - } - } else { - // If connectivity is not impacted, we know for sure we're not behind a captive portal, - // so drop any warning, and signal that we don't need captive portal detection. - b.health.SetHealthy(captivePortalWarnable) - select { - case b.needsCaptiveDetection <- false: - case <-ctx.Done(): - } + if f, ok := hookCaptivePortalHealthChange.GetOk(); ok { + f(b, state) } } @@ -1115,7 +1077,7 @@ func (b *LocalBackend) Shutdown() { } b.shutdownCalled = true - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.logf("canceling captive portal context") b.captiveCancel() } @@ -2767,123 +2729,6 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } -// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. -var captivePortalWarnable = health.Register(&health.Warnable{ - Code: "captive-portal-detected", - Title: "Captive portal detected", - // High severity, because captive portals block all traffic and require user intervention. - Severity: health.SeverityHigh, - Text: health.StaticMessage("This network requires you to log in using your web browser."), - ImpactsConnectivity: true, -}) - -func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) { - var tmr *time.Timer - - maybeStartTimer := func() { - // If there's an existing timer, nothing to do; just continue - // waiting for it to expire. Otherwise, create a new timer. - if tmr == nil { - tmr = time.NewTimer(captivePortalDetectionInterval) - } - } - maybeStopTimer := func() { - if tmr == nil { - return - } - if !tmr.Stop() { - <-tmr.C - } - tmr = nil - } - - for { - if ctx.Err() != nil { - maybeStopTimer() - return - } - - // First, see if we have a signal on our "healthy" channel, which - // takes priority over an existing timer. Because a select is - // nondeterministic, we explicitly check this channel before - // entering the main select below, so that we're guaranteed to - // stop the timer before starting captive portal detection. - select { - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - maybeStopTimer() - } - default: - } - - var timerChan <-chan time.Time - if tmr != nil { - timerChan = tmr.C - } - select { - case <-ctx.Done(): - // All done; stop the timer and then exit. - maybeStopTimer() - return - case <-timerChan: - // Kick off captive portal check - b.performCaptiveDetection() - // nil out timer to force recreation - tmr = nil - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - // Healthy; cancel any existing timer - maybeStopTimer() - } - } - } -} - -// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs -// the detection and updates the Warnable accordingly. -func (b *LocalBackend) performCaptiveDetection() { - if !b.shouldRunCaptivePortalDetection() { - return - } - - d := captivedetection.NewDetector(b.logf) - b.mu.Lock() // for b.hostinfo - cn := b.currentNode() - dm := cn.DERPMap() - preferredDERP := 0 - if b.hostinfo != nil { - if b.hostinfo.NetInfo != nil { - preferredDERP = b.hostinfo.NetInfo.PreferredDERP - } - } - ctx := b.ctx - netMon := b.NetMon() - b.mu.Unlock() - found := d.Detect(ctx, netMon, dm, preferredDERP) - if found { - if !b.health.IsUnhealthy(captivePortalWarnable) { - metricCaptivePortalDetected.Add(1) - } - b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) - } else { - b.health.SetHealthy(captivePortalWarnable) - } -} - -// shouldRunCaptivePortalDetection reports whether captive portal detection -// should be run. It is enabled by default, but can be disabled via a control -// knob. It is also only run when the user explicitly wants the backend to be -// running. -func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { - b.mu.Lock() - defer b.mu.Unlock() - return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() -} - // packetFilterPermitsUnlockedNodes reports any peer in peers with the // UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet // filter. @@ -5715,16 +5560,18 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it // can be shut down if we transition away from Running. - if b.captiveCancel == nil { - b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) }) + if buildfeatures.HasCaptivePortal { + if b.captiveCancel == nil { + b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) + b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, b.captiveCtx) }) + } } } else if oldState == ipn.Running { // Transitioning away from running. b.closePeerAPIListenersLocked() // Stop any existing captive portal detection loop. - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.captiveCancel() b.captiveCancel = nil diff --git a/net/netcheck/captiveportal.go b/net/netcheck/captiveportal.go new file mode 100644 index 000000000..ad11f19a0 --- /dev/null +++ b/net/netcheck/captiveportal.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package netcheck + +import ( + "context" + "time" + + "tailscale.com/net/captivedetection" + "tailscale.com/tailcfg" +) + +func init() { + hookStartCaptivePortalDetection.Set(startCaptivePortalDetection) +} + +func startCaptivePortalDetection(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (done <-chan struct{}, stop func()) { + c := rs.c + + // NOTE(andrew): we can't simply add this goroutine to the + // `NewWaitGroupChan` below, since we don't wait for that + // waitgroup to finish when exiting this function and thus get + // a data race. + ch := make(chan struct{}) + + tmr := time.AfterFunc(c.captivePortalDelay(), func() { + defer close(ch) + d := captivedetection.NewDetector(c.logf) + found := d.Detect(ctx, c.NetMon, dm, preferredDERP) + rs.report.CaptivePortal.Set(found) + }) + + stop = func() { + // Don't cancel our captive portal check if we're + // explicitly doing a verbose netcheck. + if c.Verbose { + return + } + + if tmr.Stop() { + // Stopped successfully; need to close the + // signal channel ourselves. + close(ch) + return + } + + // Did not stop; do nothing and it'll finish by itself + // and close the signal channel. + } + + return ch, stop +} diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index ba9a8cb0f..169133ceb 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -26,8 +26,9 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" - "tailscale.com/net/captivedetection" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -786,6 +787,8 @@ func (c *Client) SetForcePreferredDERP(region int) { c.ForcePreferredDERP = region } +var hookStartCaptivePortalDetection feature.Hook[func(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (<-chan struct{}, func())] + // GetReport gets a report. The 'opts' argument is optional and can be nil. // Callers are discouraged from passing a ctx with an arbitrary deadline as this // may cause GetReport to return prematurely before all reporting methods have @@ -910,38 +913,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // it's unnecessary. captivePortalDone := syncs.ClosedChan() captivePortalStop := func() {} - if !rs.incremental && !onlySTUN { - // NOTE(andrew): we can't simply add this goroutine to the - // `NewWaitGroupChan` below, since we don't wait for that - // waitgroup to finish when exiting this function and thus get - // a data race. - ch := make(chan struct{}) - captivePortalDone = ch - - tmr := time.AfterFunc(c.captivePortalDelay(), func() { - defer close(ch) - d := captivedetection.NewDetector(c.logf) - found := d.Detect(ctx, c.NetMon, dm, preferredDERP) - rs.report.CaptivePortal.Set(found) - }) - - captivePortalStop = func() { - // Don't cancel our captive portal check if we're - // explicitly doing a verbose netcheck. - if c.Verbose { - return - } - - if tmr.Stop() { - // Stopped successfully; need to close the - // signal channel ourselves. - close(ch) - return - } - - // Did not stop; do nothing and it'll finish by itself - // and close the signal channel. - } + if buildfeatures.HasCaptivePortal && !rs.incremental && !onlySTUN { + start := hookStartCaptivePortalDetection.Get() + captivePortalDone, captivePortalStop = start(ctx, rs, dm, preferredDERP) } wg := syncs.NewWaitGroupChan() From b54cdf9f38b1476de2d519c25eb84b7bedebd613 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 18:15:48 -0700 Subject: [PATCH 1381/1708] all: use buildfeatures.HasCapture const in a handful of places Help out the linker's dead code elimination. Updates #12614 Change-Id: I6c13cb44d3250bf1e3a01ad393c637da4613affb Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 6 ++++++ net/tstun/wrap.go | 4 ++++ wgengine/magicsock/magicsock.go | 3 +++ wgengine/userspace.go | 4 ++++ wgengine/watchdog.go | 4 ++++ 5 files changed, 21 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 623a0a3a3..5c5fb034b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1026,6 +1026,9 @@ func (b *LocalBackend) onHealthChange(change health.Change) { // GetOrSetCaptureSink returns the current packet capture sink, creating it // with the provided newSink function if it does not already exist. func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) packet.CaptureSink { + if !buildfeatures.HasCapture { + return nil + } b.mu.Lock() defer b.mu.Unlock() @@ -1039,6 +1042,9 @@ func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) pa } func (b *LocalBackend) ClearCaptureSink() { + if !buildfeatures.HasCapture { + return + } // Shut down & uninstall the sink if there are no longer // any outputs on it. b.mu.Lock() diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 442184065..4c88c7eef 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,6 +24,7 @@ import ( "go4.org/mem" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" + "tailscale.com/feature/buildfeatures" tsmetrics "tailscale.com/metrics" "tailscale.com/net/connstats" "tailscale.com/net/packet" @@ -1491,5 +1492,8 @@ var ( ) func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } t.captureHook.Store(cb) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 72fff3411..0d8a1e53a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -890,6 +890,9 @@ func deregisterMetrics(m *metrics) { // can be called with a nil argument to uninstall the capture // hook. func (c *Conn) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } c.captureHook.Store(cb) } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 86136d977..7fb580514 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -23,6 +23,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" @@ -1652,6 +1653,9 @@ var ( ) func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.tundev.InstallCaptureHook(cb) e.magicConn.InstallCaptureHook(cb) } diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 74a191748..13bc48fb0 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -15,6 +15,7 @@ import ( "time" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/packet" @@ -163,6 +164,9 @@ func (e *watchdogEngine) Done() <-chan struct{} { } func (e *watchdogEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.wrap.InstallCaptureHook(cb) } From b3e9a128afdbb8229a6b85eea8be4783d9224e47 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 14:11:04 -0700 Subject: [PATCH 1382/1708] net/dns, feature/featuretags: make NetworkManager, systemd-resolved, and DBus modular Saves 360 KB (19951800 => 19591352 on linux/amd64 --extra-small --box binary) Updates #12614 Updates #17206 Change-Id: Iafd5b2536dd735111b447546cba335a7a64379ed Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 13 ++ .../buildfeatures/feature_dbus_disabled.go | 13 ++ feature/buildfeatures/feature_dbus_enabled.go | 13 ++ .../feature_networkmanager_disabled.go | 13 ++ .../feature_networkmanager_enabled.go | 13 ++ .../feature_resolved_disabled.go | 13 ++ .../buildfeatures/feature_resolved_enabled.go | 13 ++ feature/featuretags/featuretags.go | 25 ++- net/dns/dbus.go | 59 +++++++ net/dns/manager_linux.go | 151 +++++++----------- net/dns/nm.go | 63 ++++++-- net/dns/resolved.go | 8 +- 12 files changed, 284 insertions(+), 113 deletions(-) create mode 100644 feature/buildfeatures/feature_dbus_disabled.go create mode 100644 feature/buildfeatures/feature_dbus_enabled.go create mode 100644 feature/buildfeatures/feature_networkmanager_disabled.go create mode 100644 feature/buildfeatures/feature_networkmanager_enabled.go create mode 100644 feature/buildfeatures/feature_resolved_disabled.go create mode 100644 feature/buildfeatures/feature_resolved_enabled.go create mode 100644 net/dns/dbus.go diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 818764b70..2e797e366 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -161,3 +161,16 @@ func TestOmitOutboundProxy(t *testing.T) { }, }.Check(t) } + +func TestOmitDBus(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_networkmanager,ts_omit_dbus,ts_omit_resolved,ts_omit_systray,ts_omit_ssh,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "dbus") { + t.Errorf("unexpected DBus dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/feature/buildfeatures/feature_dbus_disabled.go b/feature/buildfeatures/feature_dbus_disabled.go new file mode 100644 index 000000000..e6ab89677 --- /dev/null +++ b/feature/buildfeatures/feature_dbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = false diff --git a/feature/buildfeatures/feature_dbus_enabled.go b/feature/buildfeatures/feature_dbus_enabled.go new file mode 100644 index 000000000..374331cda --- /dev/null +++ b/feature/buildfeatures/feature_dbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = true diff --git a/feature/buildfeatures/feature_networkmanager_disabled.go b/feature/buildfeatures/feature_networkmanager_disabled.go new file mode 100644 index 000000000..d0ec6f017 --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = false diff --git a/feature/buildfeatures/feature_networkmanager_enabled.go b/feature/buildfeatures/feature_networkmanager_enabled.go new file mode 100644 index 000000000..ec284c310 --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = true diff --git a/feature/buildfeatures/feature_resolved_disabled.go b/feature/buildfeatures/feature_resolved_disabled.go new file mode 100644 index 000000000..283dd20c7 --- /dev/null +++ b/feature/buildfeatures/feature_resolved_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = false diff --git a/feature/buildfeatures/feature_resolved_enabled.go b/feature/buildfeatures/feature_resolved_enabled.go new file mode 100644 index 000000000..af1b3b41e --- /dev/null +++ b/feature/buildfeatures/feature_resolved_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 9e6de018c..6f8c4ac17 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, "completion": {"Completion", "CLI shell completion", nil}, + "dbus": {"DBus", "Linux DBus support", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, "debugportmapper": { Sym: "DebugPortMapper", @@ -113,9 +114,19 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Outbound localhost HTTP/SOCK5 proxy support", Deps: []FeatureTag{"netstack"}, }, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "networkmanager": { + Sym: "NetworkManager", + Desc: "Linux NetworkManager integration", + Deps: []FeatureTag{"dbus"}, + }, "relayserver": {"RelayServer", "Relay server", nil}, + "resolved": { + Sym: "Resolved", + Desc: "Linux systemd-resolved integration", + Deps: []FeatureTag{"dbus"}, + }, "serve": { Sym: "Serve", Desc: "Serve and Funnel support", @@ -124,10 +135,14 @@ var Features = map[FeatureTag]FeatureMeta{ "ssh": { Sym: "SSH", Desc: "Tailscale SSH support", - Deps: []FeatureTag{"netstack"}, + Deps: []FeatureTag{"dbus", "netstack"}, + }, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "systray": { + Sym: "SysTray", + Desc: "Linux system tray", + Deps: []FeatureTag{"dbus"}, }, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, - "systray": {"SysTray", "Linux system tray", nil}, "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, diff --git a/net/dns/dbus.go b/net/dns/dbus.go new file mode 100644 index 000000000..c53e8b720 --- /dev/null +++ b/net/dns/dbus.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_dbus + +package dns + +import ( + "context" + "time" + + "github.com/godbus/dbus/v5" +) + +func init() { + optDBusPing.Set(dbusPing) + optDBusReadString.Set(dbusReadString) +} + +func dbusPing(name, objectPath string) error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) + return call.Err +} + +// dbusReadString reads a string property from the provided name and object +// path. property must be in "interface.member" notation. +func dbusReadString(name, objectPath, iface, member string) (string, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return "", err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + + var result dbus.Variant + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) + if err != nil { + return "", err + } + + if s, ok := result.Value().(string); ok { + return s, nil + } + return result.String(), nil +} diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 8b66ac3a6..b2f8197ae 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -7,7 +7,6 @@ package dns import ( "bytes" - "context" "errors" "fmt" "os" @@ -15,13 +14,12 @@ import ( "sync" "time" - "github.com/godbus/dbus/v5" "tailscale.com/control/controlknobs" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" - "tailscale.com/util/cmpver" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -36,6 +34,31 @@ func (kv kv) String() string { var publishOnce sync.Once +// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. +// +// This is particularly useful because certain conditions can cause indefinite hangs +// (such as improper dbus auth followed by contextless dbus.Object.Call). +// Such operations should be wrapped in a timeout context. +const reconfigTimeout = time.Second + +// Set unless ts_omit_networkmanager +var ( + optNewNMManager feature.Hook[func(ifName string) (OSConfigurator, error)] + optNMIsUsingResolved feature.Hook[func() error] + optNMVersionBetween feature.Hook[func(v1, v2 string) (bool, error)] +) + +// Set unless ts_omit_resolved +var ( + optNewResolvedManager feature.Hook[func(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error)] +) + +// Set unless ts_omit_dbus +var ( + optDBusPing feature.Hook[func(name, objectPath string) error] + optDBusReadString feature.Hook[func(name, objectPath, iface, member string) (string, error)] +) + // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. @@ -45,13 +68,25 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. } env := newOSConfigEnv{ - fs: directFS{}, - dbusPing: dbusPing, - dbusReadString: dbusReadString, - nmIsUsingResolved: nmIsUsingResolved, - nmVersionBetween: nmVersionBetween, - resolvconfStyle: resolvconfStyle, + fs: directFS{}, + resolvconfStyle: resolvconfStyle, } + if f, ok := optDBusPing.GetOk(); ok { + env.dbusPing = f + } else { + env.dbusPing = func(_, _ string) error { return errors.ErrUnsupported } + } + if f, ok := optDBusReadString.GetOk(); ok { + env.dbusReadString = f + } else { + env.dbusReadString = func(_, _, _, _ string) (string, error) { return "", errors.ErrUnsupported } + } + if f, ok := optNMIsUsingResolved.GetOk(); ok { + env.nmIsUsingResolved = f + } else { + env.nmIsUsingResolved = func() error { return errors.ErrUnsupported } + } + env.nmVersionBetween, _ = optNMVersionBetween.GetOk() // GetOk to not panic if nil; unused if optNMIsUsingResolved returns an error mode, err := dnsMode(logf, health, env) if err != nil { return nil, err @@ -66,17 +101,24 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. case "direct": return newDirectManagerOnFS(logf, health, env.fs), nil case "systemd-resolved": - return newResolvedManager(logf, health, interfaceName) + if f, ok := optNewResolvedManager.GetOk(); ok { + return f(logf, health, interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "network-manager": - return newNMManager(interfaceName) + if f, ok := optNewNMManager.GetOk(); ok { + return f(interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "debian-resolvconf": return newDebianResolvconfManager(logf) case "openresolv": return newOpenresolvManager(logf) default: logf("[unexpected] detected unknown DNS mode %q, using direct manager as last resort", mode) - return newDirectManagerOnFS(logf, health, env.fs), nil } + + return newDirectManagerOnFS(logf, health, env.fs), nil } // newOSConfigEnv are the funcs newOSConfigurator needs, pulled out for testing. @@ -292,50 +334,6 @@ func dnsMode(logf logger.Logf, health *health.Tracker, env newOSConfigEnv) (ret } } -func nmVersionBetween(first, last string) (bool, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return false, err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") - if err != nil { - return false, err - } - - version, ok := v.Value().(string) - if !ok { - return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) - } - - outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 - return !outside, nil -} - -func nmIsUsingResolved() error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") - if err != nil { - return fmt.Errorf("getting NM mode: %w", err) - } - mode, ok := v.Value().(string) - if !ok { - return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) - } - if mode != "systemd-resolved" { - return errors.New("NetworkManager is not using systemd-resolved for DNS") - } - return nil -} - // resolvedIsActuallyResolver reports whether the system is using // systemd-resolved as the resolver. There are two different ways to // use systemd-resolved: @@ -396,44 +394,3 @@ func isLibnssResolveUsed(env newOSConfigEnv) error { } return fmt.Errorf("libnss_resolve not used") } - -func dbusPing(name, objectPath string) error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) - return call.Err -} - -// dbusReadString reads a string property from the provided name and object -// path. property must be in "interface.member" notation. -func dbusReadString(name, objectPath, iface, member string) (string, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return "", err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - - var result dbus.Variant - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) - if err != nil { - return "", err - } - - if s, ok := result.Value().(string); ok { - return s, nil - } - return result.String(), nil -} diff --git a/net/dns/nm.go b/net/dns/nm.go index 97557e33a..a88d29b37 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_networkmanager package dns import ( "context" "encoding/binary" + "errors" "fmt" "net" "net/netip" @@ -16,6 +17,7 @@ import ( "github.com/godbus/dbus/v5" "tailscale.com/net/tsaddr" + "tailscale.com/util/cmpver" "tailscale.com/util/dnsname" ) @@ -25,13 +27,6 @@ const ( lowerPriority = int32(200) // lower than all builtin auto priorities ) -// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. -// -// This is particularly useful because certain conditions can cause indefinite hangs -// (such as improper dbus auth followed by contextless dbus.Object.Call). -// Such operations should be wrapped in a timeout context. -const reconfigTimeout = time.Second - // nmManager uses the NetworkManager DBus API. type nmManager struct { interfaceName string @@ -39,7 +34,13 @@ type nmManager struct { dnsManager dbus.BusObject } -func newNMManager(interfaceName string) (*nmManager, error) { +func init() { + optNewNMManager.Set(newNMManager) + optNMIsUsingResolved.Set(nmIsUsingResolved) + optNMVersionBetween.Set(nmVersionBetween) +} + +func newNMManager(interfaceName string) (OSConfigurator, error) { conn, err := dbus.SystemBus() if err != nil { return nil, err @@ -389,3 +390,47 @@ func (m *nmManager) Close() error { // settings when the tailscale interface goes away. return nil } + +func nmVersionBetween(first, last string) (bool, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return false, err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") + if err != nil { + return false, err + } + + version, ok := v.Value().(string) + if !ok { + return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) + } + + outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 + return !outside, nil +} + +func nmIsUsingResolved() error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") + if err != nil { + return fmt.Errorf("getting NM mode: %w", err) + } + mode, ok := v.Value().(string) + if !ok { + return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) + } + if mode != "systemd-resolved" { + return errors.New("NetworkManager is not using systemd-resolved for DNS") + } + return nil +} diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 4f58f3f9c..5d9130f05 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_resolved package dns @@ -70,7 +70,11 @@ type resolvedManager struct { configCR chan changeRequest // tracks OSConfigs changes and error responses } -func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (*resolvedManager, error) { +func init() { + optNewResolvedManager.Set(newResolvedManager) +} + +func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error) { iface, err := net.InterfaceByName(interfaceName) if err != nil { return nil, err From df747f1c1b24057de03844ba0561e41123de7c27 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 24 Sep 2025 09:14:41 -0700 Subject: [PATCH 1383/1708] util/eventbus: add a Done method to the Monitor type (#17263) Some systems need to tell whether the monitored goroutine has finished alongside other channel operations (notably in this case the relay server, but there seem likely to be others similarly situated). Updates #15160 Change-Id: I5f0f3fae827b07f9b7102a3b08f60cda9737fe28 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 27 +++++++++++++++++++++++++-- util/eventbus/monitor.go | 14 +++++++++++++- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 67f68cd4a..f9e7ee3dd 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -236,6 +236,17 @@ func TestMonitor(t *testing.T) { } }) + t.Run("ZeroDone", func(t *testing.T) { + var zero eventbus.Monitor + + select { + case <-zero.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for zero monitor to be done") + } + }) + t.Run("ZeroClose", func(t *testing.T) { var zero eventbus.Monitor @@ -276,7 +287,13 @@ func TestMonitor(t *testing.T) { // While the goroutine is running, Wait does not complete. select { case <-done: - t.Error("monitor is ready before its goroutine is finished") + t.Error("monitor is ready before its goroutine is finished (Wait)") + default: + // OK + } + select { + case <-m.Done(): + t.Error("monitor is ready before its goroutine is finished (Done)") default: // OK } @@ -286,7 +303,13 @@ func TestMonitor(t *testing.T) { case <-done: // OK case <-time.After(time.Second): - t.Fatal("timeout waiting for monitor to complete") + t.Fatal("timeout waiting for monitor to complete (Wait)") + } + select { + case <-m.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete (Done)") } } } diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go index 18cc2a413..db6fe1be4 100644 --- a/util/eventbus/monitor.go +++ b/util/eventbus/monitor.go @@ -3,9 +3,12 @@ package eventbus +import "tailscale.com/syncs" + // A Monitor monitors the execution of a goroutine processing events from a // [Client], allowing the caller to block until it is complete. The zero value -// of m is valid and its Close and Wait methods return immediately. +// of m is valid; its Close and Wait methods return immediately, and its Done +// method returns an already-closed channel. type Monitor struct { // These fields are immutable after initialization cli *Client @@ -32,6 +35,15 @@ func (m Monitor) Wait() { <-m.done } +// Done returns a channel that is closed when the monitored goroutine has +// finished executing. +func (m Monitor) Done() <-chan struct{} { + if m.done == nil { + return syncs.ClosedChan() + } + return m.done +} + // Monitor executes f in a new goroutine attended by a [Monitor]. The caller // is responsible for waiting for the goroutine to complete, by calling either // [Monitor.Close] or [Monitor.Wait]. From 21dc5f4e212e15f48f15fceb8ec487f8be54989f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 17:07:48 -0700 Subject: [PATCH 1384/1708] derp/derpserver: split off derp.Server out of derp into its own package This exports a number of things from the derp (generic + client) package to be used by the new derpserver package, as now used by cmd/derper. And then enough other misc changes to lock in that cmd/tailscaled can be configured to not bring in tailscale.com/client/local. (The webclient in particular, even when disabled, was bringing it in, so that's now fixed) Fixes #17257 Change-Id: I88b6c7958643fb54f386dd900bddf73d2d4d96d5 Signed-off-by: Brad Fitzpatrick --- cmd/derper/ace.go | 4 +- cmd/derper/cert_test.go | 6 +- cmd/derper/depaware.txt | 15 +- cmd/derper/derper.go | 17 +- cmd/derper/derper_test.go | 10 +- cmd/derper/mesh.go | 5 +- cmd/derper/websocket.go | 4 +- cmd/k8s-operator/depaware.txt | 11 +- cmd/tailscale/depaware.txt | 14 +- cmd/tailscaled/depaware.txt | 13 +- cmd/tailscaled/deps_test.go | 11 + cmd/tailscaled/tailscaled.go | 10 +- cmd/tailscaled/webclient.go | 21 + cmd/tsidp/depaware.txt | 11 +- derp/client_test.go | 235 ++++ derp/derp.go | 105 +- derp/derp_client.go | 80 +- derp/derp_test.go | 1051 +---------------- derp/derphttp/derphttp_client.go | 2 +- derp/derphttp/derphttp_test.go | 94 +- derp/derphttp/export_test.go | 24 + derp/{ => derpserver}/derp_server.go | 159 ++- derp/{ => derpserver}/derp_server_default.go | 2 +- derp/{ => derpserver}/derp_server_linux.go | 2 +- derp/derpserver/derpserver_test.go | 782 ++++++++++++ .../handler.go} | 14 +- derp/{ => derpserver}/testdata/example_ss.txt | 0 derp/export_test.go | 10 + ipn/ipnlocal/web_client_stub.go | 4 +- net/captivedetection/captivedetection_test.go | 4 +- prober/derp_test.go | 5 +- tsnet/depaware.txt | 11 +- tstest/integration/integration.go | 7 +- tstest/natlab/vnet/vnet.go | 11 +- wgengine/magicsock/magicsock_test.go | 7 +- 35 files changed, 1442 insertions(+), 1319 deletions(-) create mode 100644 cmd/tailscaled/webclient.go create mode 100644 derp/client_test.go create mode 100644 derp/derphttp/export_test.go rename derp/{ => derpserver}/derp_server.go (94%) rename derp/{ => derpserver}/derp_server_default.go (91%) rename derp/{ => derpserver}/derp_server_linux.go (99%) create mode 100644 derp/derpserver/derpserver_test.go rename derp/{derphttp/derphttp_server.go => derpserver/handler.go} (86%) rename derp/{ => derpserver}/testdata/example_ss.txt (100%) create mode 100644 derp/export_test.go diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go index a11539a6e..56fb68c33 100644 --- a/cmd/derper/ace.go +++ b/cmd/derper/ace.go @@ -12,12 +12,12 @@ import ( "net/http" "strings" - "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/net/connectproxy" ) // serveConnect handles a CONNECT request for ACE support. -func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) { +func serveConnect(s *derpserver.Server, w http.ResponseWriter, r *http.Request) { if !*flagACEEnabled { http.Error(w, "CONNECT not enabled", http.StatusForbidden) return diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 31fd4ea44..1ef932e7f 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -22,8 +22,8 @@ import ( "testing" "time" - "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -131,9 +131,9 @@ func TestPinnedCertRawIP(t *testing.T) { } defer ln.Close() - ds := derp.NewServer(key.NewNode(), t.Logf) + ds := derpserver.NewServer(key.NewNode(), t.Logf) - derpHandler := derphttp.Handler(ds) + derpHandler := derpserver.Handler(ds) mux := http.NewServeMux() mux.Handle("/derp", derpHandler) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index b0501b588..d19ea1f17 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -89,12 +89,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/derp + tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local tailscale.com/derp from tailscale.com/cmd/derper+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper - tailscale.com/disco from tailscale.com/derp + tailscale.com/derp/derpserver from tailscale.com/cmd/derper + tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/feature from tailscale.com/tsweb @@ -117,7 +118,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/sockstats from tailscale.com/derp/derphttp tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/derper - L tailscale.com/net/tcpinfo from tailscale.com/derp + L tailscale.com/net/tcpinfo from tailscale.com/derp/derpserver tailscale.com/net/tlsdial from tailscale.com/derp/derphttp tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ @@ -132,7 +133,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/derp + tailscale.com/tstime/rate from tailscale.com/derp/derpserver tailscale.com/tsweb from tailscale.com/cmd/derper+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ @@ -167,7 +168,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/derp/derpserver+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting @@ -180,7 +181,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ - tailscale.com/version from tailscale.com/derp+ + tailscale.com/version from tailscale.com/cmd/derper+ tailscale.com/version/distro from tailscale.com/envknob+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index b25bf22de..eed94bd68 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -40,8 +40,7 @@ import ( "github.com/tailscale/setec/client/setec" "golang.org/x/time/rate" "tailscale.com/atomicfile" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/metrics" "tailscale.com/net/ktimeout" "tailscale.com/net/stunserver" @@ -90,7 +89,7 @@ var ( // tcpUserTimeout is intentionally short, so that hung connections are cleaned up promptly. DERPs should be nearby users. tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. - tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + tcpWriteTimeout = flag.Duration("tcp-write-timeout", derpserver.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") // ACE flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]") @@ -189,7 +188,7 @@ func main() { serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual" - s := derp.NewServer(cfg.PrivateKey, log.Printf) + s := derpserver.NewServer(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) @@ -256,7 +255,7 @@ func main() { mux := http.NewServeMux() if *runDERP { - derpHandler := derphttp.Handler(s) + derpHandler := derpserver.Handler(s) derpHandler = addWebSocketSupport(s, derpHandler) mux.Handle("/derp", derpHandler) } else { @@ -267,8 +266,8 @@ func main() { // These two endpoints are the same. Different versions of the clients // have assumes different paths over time so we support both. - mux.HandleFunc("/derp/probe", derphttp.ProbeHandler) - mux.HandleFunc("/derp/latency-check", derphttp.ProbeHandler) + mux.HandleFunc("/derp/probe", derpserver.ProbeHandler) + mux.HandleFunc("/derp/latency-check", derpserver.ProbeHandler) go refreshBootstrapDNSLoop() mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS)) @@ -280,7 +279,7 @@ func main() { tsweb.AddBrowserHeaders(w) io.WriteString(w, "User-agent: *\nDisallow: /\n") })) - mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent)) + mux.Handle("/generate_204", http.HandlerFunc(derpserver.ServeNoContent)) debug := tsweb.Debugger(mux) debug.KV("TLS hostname", *hostname) debug.KV("Mesh key", s.HasMeshKey()) @@ -388,7 +387,7 @@ func main() { if *httpPort > -1 { go func() { port80mux := http.NewServeMux() - port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + port80mux.HandleFunc("/generate_204", derpserver.ServeNoContent) port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux})) port80srv := &http.Server{ Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)), diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 6dce1fcdf..d27f8cb20 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/tstest/deptest" ) @@ -78,20 +78,20 @@ func TestNoContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil) if tt.input != "" { - req.Header.Set(derphttp.NoContentChallengeHeader, tt.input) + req.Header.Set(derpserver.NoContentChallengeHeader, tt.input) } w := httptest.NewRecorder() - derphttp.ServeNoContent(w, req) + derpserver.ServeNoContent(w, req) resp := w.Result() if tt.want == "" { - if h, found := resp.Header[derphttp.NoContentResponseHeader]; found { + if h, found := resp.Header[derpserver.NoContentResponseHeader]; found { t.Errorf("got %+v; expected no response header", h) } return } - if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want { + if got := resp.Header.Get(derpserver.NoContentResponseHeader); got != tt.want { t.Errorf("got %q; want %q", got, tt.want) } }) diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index cbb2fa59a..909b5f2ca 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -13,11 +13,12 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/types/logger" ) -func startMesh(s *derp.Server) error { +func startMesh(s *derpserver.Server) error { if *meshWith == "" { return nil } @@ -32,7 +33,7 @@ func startMesh(s *derp.Server) error { return nil } -func startMeshWithHost(s *derp.Server, hostTuple string) error { +func startMeshWithHost(s *derpserver.Server, hostTuple string) error { var host string var dialHost string hostParts := strings.Split(hostTuple, "/") diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go index 05f40deb8..82fd30bed 100644 --- a/cmd/derper/websocket.go +++ b/cmd/derper/websocket.go @@ -11,14 +11,14 @@ import ( "strings" "github.com/coder/websocket" - "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/net/wsconn" ) var counterWebSocketAccepts = expvar.NewInt("derp_websocket_accepts") // addWebSocketSupport returns a Handle wrapping base that adds WebSocket server support. -func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler { +func addWebSocketSupport(s *derpserver.Server, base http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { up := strings.ToLower(r.Header.Get("Upgrade")) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e0fdc27bb..2281d3819 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -784,9 +784,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -839,7 +839,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -875,7 +875,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -902,7 +901,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -1217,7 +1216,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/google/go-cmp/cmp+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from github.com/go-openapi/swag+ mime/quotedprintable from mime/multipart diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index abb329806..4bd4e6bca 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -96,9 +96,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck - tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web @@ -119,7 +118,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck @@ -138,7 +137,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ tailscale.com/net/stun from tailscale.com/net/netcheck - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -153,7 +151,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ + tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ @@ -193,7 +191,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/ipn+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/client/systray+ L tailscale.com/util/stringsx from tailscale.com/client/systray @@ -358,7 +356,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep encoding/pem from crypto/tls+ encoding/xml from github.com/godbus/dbus/v5/introspect+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from github.com/peterbourgon/ff/v3+ fmt from archive/tar+ hash from compress/zlib+ @@ -431,7 +429,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/mdlayher/netlink+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from golang.org/x/oauth2/internal+ mime/multipart from net/http mime/quotedprintable from mime/multipart diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index f85063ddb..70be690ee 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -256,9 +256,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/feature/relayserver+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -314,7 +314,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ @@ -349,7 +349,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -378,7 +377,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -432,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy @@ -613,7 +612,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/cmd/tailscaled+ flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from compress/zlib+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 2e797e366..35975b57c 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -44,6 +44,17 @@ func TestOmitSyspolicy(t *testing.T) { }.Check(t) } +func TestOmitLocalClient(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_webclient,ts_omit_relayserver,ts_omit_oauthkey,ts_omit_acme", + BadDeps: map[string]string{ + "tailscale.com/client/local": "unexpected", + }, + }.Check(t) +} + // Test that we can build a binary without reflect.MethodByName. // See https://github.com/tailscale/tailscale/issues/17063 func TestOmitReflectThings(t *testing.T) { diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 9e099f9cb..0c6e6d22f 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -30,7 +30,6 @@ import ( "syscall" "time" - "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" "tailscale.com/envknob" @@ -685,16 +684,17 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if root := lb.TailscaleVarRoot(); root != "" { dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf) } - lb.ConfigureWebClient(&local.Client{ - Socket: args.socketpath, - UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), - }) + if f, ok := hookConfigureWebClient.GetOk(); ok { + f(lb) + } if err := ns.Start(lb); err != nil { log.Fatalf("failed to start netstack: %v", err) } return lb, nil } +var hookConfigureWebClient feature.Hook[func(*ipnlocal.LocalBackend)] + // createEngine tries to the wgengine.Engine based on the order of tunnels // specified in the command line flags. // diff --git a/cmd/tailscaled/webclient.go b/cmd/tailscaled/webclient.go new file mode 100644 index 000000000..672ba7126 --- /dev/null +++ b/cmd/tailscaled/webclient.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_webclient + +package main + +import ( + "tailscale.com/client/local" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/paths" +) + +func init() { + hookConfigureWebClient.Set(func(lb *ipnlocal.LocalBackend) { + lb.ConfigureWebClient(&local.Client{ + Socket: args.socketpath, + UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), + }) + }) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f6bab6978..4fd7c8020 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -226,9 +226,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -270,7 +270,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -306,7 +306,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -332,7 +331,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tsnet from tailscale.com/cmd/tsidp tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -566,7 +565,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ fmt from archive/tar+ hash from compress/zlib+ diff --git a/derp/client_test.go b/derp/client_test.go new file mode 100644 index 000000000..a731ad197 --- /dev/null +++ b/derp/client_test.go @@ -0,0 +1,235 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import ( + "bufio" + "bytes" + "io" + "net" + "reflect" + "sync" + "testing" + "time" + + "tailscale.com/tstest" + "tailscale.com/types/key" +) + +type dummyNetConn struct { + net.Conn +} + +func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } + +func TestClientRecv(t *testing.T) { + tests := []struct { + name string + input []byte + want any + }{ + { + name: "ping", + input: []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "pong", + input: []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "health_bad", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 3, + byte('B'), byte('A'), byte('D'), + }, + want: HealthMessage{Problem: "BAD"}, + }, + { + name: "health_ok", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 0, + }, + want: HealthMessage{}, + }, + { + name: "server_restarting", + input: []byte{ + byte(FrameRestarting), 0, 0, 0, 8, + 0, 0, 0, 1, + 0, 0, 0, 2, + }, + want: ServerRestartingMessage{ + ReconnectIn: 1 * time.Millisecond, + TryFor: 2 * time.Millisecond, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + nc: dummyNetConn{}, + br: bufio.NewReader(bytes.NewReader(tt.input)), + logf: t.Logf, + clock: &tstest.Clock{}, + } + got, err := c.Recv() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %#v; want %#v", got, tt.want) + } + }) + } +} + +func TestClientSendPing(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func TestClientSendPong(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func BenchmarkWriteUint32(b *testing.B) { + w := bufio.NewWriter(io.Discard) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + writeUint32(w, 0x0ba3a) + } +} + +type nopRead struct{} + +func (r nopRead) Read(p []byte) (int, error) { + return len(p), nil +} + +var sinkU32 uint32 + +func BenchmarkReadUint32(b *testing.B) { + r := bufio.NewReader(nopRead{}) + var err error + b.ReportAllocs() + b.ResetTimer() + for range b.N { + sinkU32, err = readUint32(r) + if err != nil { + b.Fatal(err) + } + } +} + +type countWriter struct { + mu sync.Mutex + writes int + bytes int64 +} + +func (w *countWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + w.writes++ + w.bytes += int64(len(p)) + return len(p), nil +} + +func (w *countWriter) Stats() (writes int, bytes int64) { + w.mu.Lock() + defer w.mu.Unlock() + return w.writes, w.bytes +} + +func (w *countWriter) ResetStats() { + w.mu.Lock() + defer w.mu.Unlock() + w.writes, w.bytes = 0, 0 +} + +func TestClientSendRateLimiting(t *testing.T) { + cw := new(countWriter) + c := &Client{ + bw: bufio.NewWriter(cw), + clock: &tstest.Clock{}, + } + c.setSendRateLimiter(ServerInfoMessage{}) + + pkt := make([]byte, 1000) + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + writes1, bytes1 := cw.Stats() + if writes1 != 1 { + t.Errorf("writes = %v, want 1", writes1) + } + + // Flood should all succeed. + cw.ResetStats() + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writes1K, bytes1K := cw.Stats() + if writes1K != 1000 { + t.Logf("writes = %v; want 1000", writes1K) + } + if got, want := bytes1K, bytes1*1000; got != want { + t.Logf("bytes = %v; want %v", got, want) + } + + // Set a rate limiter + cw.ResetStats() + c.setSendRateLimiter(ServerInfoMessage{ + TokenBucketBytesPerSecond: 1, + TokenBucketBytesBurst: int(bytes1 * 2), + }) + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writesLimited, bytesLimited := cw.Stats() + if writesLimited == 0 || writesLimited == writes1K { + t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) + } + if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { + t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) + } +} diff --git a/derp/derp.go b/derp/derp.go index 24c1ca65c..e19a99b00 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -27,15 +27,15 @@ import ( // including its on-wire framing overhead) const MaxPacketSize = 64 << 10 -// magic is the DERP magic number, sent in the frameServerKey frame +// Magic is the DERP Magic number, sent in the frameServerKey frame // upon initial connection. -const magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 +const Magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 const ( - nonceLen = 24 - frameHeaderLen = 1 + 4 // frameType byte + 4 byte length - keyLen = 32 - maxInfoLen = 1 << 20 + NonceLen = 24 + FrameHeaderLen = 1 + 4 // frameType byte + 4 byte length + KeyLen = 32 + MaxInfoLen = 1 << 20 ) // KeepAlive is the minimum frequency at which the DERP server sends @@ -48,10 +48,10 @@ const KeepAlive = 60 * time.Second // - version 2: received packets have src addrs in frameRecvPacket at beginning const ProtocolVersion = 2 -// frameType is the one byte frame type at the beginning of the frame +// FrameType is the one byte frame type at the beginning of the frame // header. The second field is a big-endian uint32 describing the // length of the remaining frame (not including the initial 5 bytes). -type frameType byte +type FrameType byte /* Protocol flow: @@ -69,14 +69,14 @@ Steady state: * server then sends frameRecvPacket to recipient */ const ( - frameServerKey = frameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) - frameClientInfo = frameType(0x02) // 32B pub key + 24B nonce + naclbox(json) - frameServerInfo = frameType(0x03) // 24B nonce + naclbox(json) - frameSendPacket = frameType(0x04) // 32B dest pub key + packet bytes - frameForwardPacket = frameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes - frameRecvPacket = frameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes - frameKeepAlive = frameType(0x06) // no payload, no-op (to be replaced with ping/pong) - frameNotePreferred = frameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node + FrameServerKey = FrameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) + FrameClientInfo = FrameType(0x02) // 32B pub key + 24B nonce + naclbox(json) + FrameServerInfo = FrameType(0x03) // 24B nonce + naclbox(json) + FrameSendPacket = FrameType(0x04) // 32B dest pub key + packet bytes + FrameForwardPacket = FrameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes + FrameRecvPacket = FrameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes + FrameKeepAlive = FrameType(0x06) // no payload, no-op (to be replaced with ping/pong) + FrameNotePreferred = FrameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node // framePeerGone is sent from server to client to signal that // a previous sender is no longer connected. That is, if A @@ -85,7 +85,7 @@ const ( // exists on that connection to get back to A. It is also sent // if A tries to send a CallMeMaybe to B and the server has no // record of B - framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason + FramePeerGone = FrameType(0x08) // 32B pub key of peer that's gone + 1 byte reason // framePeerPresent is like framePeerGone, but for other members of the DERP // region when they're meshed up together. @@ -96,7 +96,7 @@ const ( // remaining after that, it's a PeerPresentFlags byte. // While current servers send 41 bytes, old servers will send fewer, and newer // servers might send more. - framePeerPresent = frameType(0x09) + FramePeerPresent = FrameType(0x09) // frameWatchConns is how one DERP node in a regional mesh // subscribes to the others in the region. @@ -104,30 +104,30 @@ const ( // is closed. Otherwise, the client is initially flooded with // framePeerPresent for all connected nodes, and then a stream of // framePeerPresent & framePeerGone has peers connect and disconnect. - frameWatchConns = frameType(0x10) + FrameWatchConns = FrameType(0x10) // frameClosePeer is a privileged frame type (requires the // mesh key for now) that closes the provided peer's // connection. (To be used for cluster load balancing // purposes, when clients end up on a non-ideal node) - frameClosePeer = frameType(0x11) // 32B pub key of peer to close. + FrameClosePeer = FrameType(0x11) // 32B pub key of peer to close. - framePing = frameType(0x12) // 8 byte ping payload, to be echoed back in framePong - framePong = frameType(0x13) // 8 byte payload, the contents of the ping being replied to + FramePing = FrameType(0x12) // 8 byte ping payload, to be echoed back in framePong + FramePong = FrameType(0x13) // 8 byte payload, the contents of the ping being replied to // frameHealth is sent from server to client to tell the client // if their connection is unhealthy somehow. Currently the only unhealthy state // is whether the connection is detected as a duplicate. // The entire frame body is the text of the error message. An empty message // clears the error state. - frameHealth = frameType(0x14) + FrameHealth = FrameType(0x14) // frameRestarting is sent from server to client for the // server to declare that it's restarting. Payload is two big // endian uint32 durations in milliseconds: when to reconnect, // and how long to try total. See ServerRestartingMessage docs for // more details on how the client should interpret them. - frameRestarting = frameType(0x15) + FrameRestarting = FrameType(0x15) ) // PeerGoneReasonType is a one byte reason code explaining why a @@ -154,6 +154,18 @@ const ( PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node ) +// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests +// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. +// The HTTP header value is the name of the node they wish they were connected +// to. This is an optional header. +const IdealNodeHeader = "Ideal-Node" + +// FastStartHeader is the header (with value "1") that signals to the HTTP +// server that the DERP HTTP client does not want the HTTP 101 response +// headers and it will begin writing & reading the DERP protocol immediately +// following its HTTP request. +const FastStartHeader = "Derp-Fast-Start" + var bin = binary.BigEndian func writeUint32(bw *bufio.Writer, v uint32) error { @@ -186,15 +198,24 @@ func readUint32(br *bufio.Reader) (uint32, error) { return bin.Uint32(b[:]), nil } -func readFrameTypeHeader(br *bufio.Reader, wantType frameType) (frameLen uint32, err error) { - gotType, frameLen, err := readFrameHeader(br) +// ReadFrameTypeHeader reads a frame header from br and +// verifies that the frame type matches wantType. +// +// If it does, it returns the frame length (not including +// the 5 byte header) and a nil error. +// +// If it doesn't, it returns an error and a zero length. +func ReadFrameTypeHeader(br *bufio.Reader, wantType FrameType) (frameLen uint32, err error) { + gotType, frameLen, err := ReadFrameHeader(br) if err == nil && wantType != gotType { err = fmt.Errorf("bad frame type 0x%X, want 0x%X", gotType, wantType) } return frameLen, err } -func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) { +// ReadFrameHeader reads the header of a DERP frame, +// reading 5 bytes from br. +func ReadFrameHeader(br *bufio.Reader) (t FrameType, frameLen uint32, err error) { tb, err := br.ReadByte() if err != nil { return 0, 0, err @@ -203,7 +224,7 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) if err != nil { return 0, 0, err } - return frameType(tb), frameLen, nil + return FrameType(tb), frameLen, nil } // readFrame reads a frame header and then reads its payload into @@ -216,8 +237,8 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) // bytes are read, err will be io.ErrShortBuffer, and frameLen and t // will both be set. That is, callers need to explicitly handle when // they get more data than expected. -func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLen uint32, err error) { - t, frameLen, err = readFrameHeader(br) +func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t FrameType, frameLen uint32, err error) { + t, frameLen, err = ReadFrameHeader(br) if err != nil { return 0, 0, err } @@ -239,19 +260,26 @@ func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLe return t, frameLen, err } -func writeFrameHeader(bw *bufio.Writer, t frameType, frameLen uint32) error { +// WriteFrameHeader writes a frame header to bw. +// +// The frame header is 5 bytes: a one byte frame type +// followed by a big-endian uint32 length of the +// remaining frame (not including the 5 byte header). +// +// It does not flush bw. +func WriteFrameHeader(bw *bufio.Writer, t FrameType, frameLen uint32) error { if err := bw.WriteByte(byte(t)); err != nil { return err } return writeUint32(bw, frameLen) } -// writeFrame writes a complete frame & flushes it. -func writeFrame(bw *bufio.Writer, t frameType, b []byte) error { +// WriteFrame writes a complete frame & flushes it. +func WriteFrame(bw *bufio.Writer, t FrameType, b []byte) error { if len(b) > 10<<20 { return errors.New("unreasonably large frame write") } - if err := writeFrameHeader(bw, t, uint32(len(b))); err != nil { + if err := WriteFrameHeader(bw, t, uint32(len(b))); err != nil { return err } if _, err := bw.Write(b); err != nil { @@ -270,3 +298,12 @@ type Conn interface { SetReadDeadline(time.Time) error SetWriteDeadline(time.Time) error } + +// ServerInfo is the message sent from the server to clients during +// the connection setup. +type ServerInfo struct { + Version int `json:"version,omitempty"` + + TokenBucketBytesPerSecond int `json:",omitempty"` + TokenBucketBytesBurst int `json:",omitempty"` +} diff --git a/derp/derp_client.go b/derp/derp_client.go index 69f35db1e..d28905cd2 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -133,17 +133,17 @@ func (c *Client) recvServerKey() error { if err != nil { return err } - if flen < uint32(len(buf)) || t != frameServerKey || string(buf[:len(magic)]) != magic { + if flen < uint32(len(buf)) || t != FrameServerKey || string(buf[:len(Magic)]) != Magic { return errors.New("invalid server greeting") } - c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(magic):])) + c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(Magic):])) return nil } -func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { - const maxLength = nonceLen + maxInfoLen +func (c *Client) parseServerInfo(b []byte) (*ServerInfo, error) { + const maxLength = NonceLen + MaxInfoLen fl := len(b) - if fl < nonceLen { + if fl < NonceLen { return nil, fmt.Errorf("short serverInfo frame") } if fl > maxLength { @@ -153,14 +153,16 @@ func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { if !ok { return nil, fmt.Errorf("failed to open naclbox from server key %s", c.serverKey) } - info := new(serverInfo) + info := new(ServerInfo) if err := json.Unmarshal(msg, info); err != nil { return nil, fmt.Errorf("invalid JSON: %v", err) } return info, nil } -type clientInfo struct { +// ClientInfo is the information a DERP client sends to the server +// about itself when it connects. +type ClientInfo struct { // MeshKey optionally specifies a pre-shared key used by // trusted clients. It's required to subscribe to the // connection list & forward packets. It's empty for regular @@ -180,7 +182,7 @@ type clientInfo struct { } // Equal reports if two clientInfo values are equal. -func (c *clientInfo) Equal(other *clientInfo) bool { +func (c *ClientInfo) Equal(other *ClientInfo) bool { if c == nil || other == nil { return c == other } @@ -191,7 +193,7 @@ func (c *clientInfo) Equal(other *clientInfo) bool { } func (c *Client) sendClientKey() error { - msg, err := json.Marshal(clientInfo{ + msg, err := json.Marshal(ClientInfo{ Version: ProtocolVersion, MeshKey: c.meshKey, CanAckPings: c.canAckPings, @@ -202,10 +204,10 @@ func (c *Client) sendClientKey() error { } msgbox := c.privateKey.SealTo(c.serverKey, msg) - buf := make([]byte, 0, keyLen+len(msgbox)) + buf := make([]byte, 0, KeyLen+len(msgbox)) buf = c.publicKey.AppendTo(buf) buf = append(buf, msgbox...) - return writeFrame(c.bw, frameClientInfo, buf) + return WriteFrame(c.bw, FrameClientInfo, buf) } // ServerPublicKey returns the server's public key. @@ -230,12 +232,12 @@ func (c *Client) send(dstKey key.NodePublic, pkt []byte) (ret error) { c.wmu.Lock() defer c.wmu.Unlock() if c.rate != nil { - pktLen := frameHeaderLen + key.NodePublicRawLen + len(pkt) + pktLen := FrameHeaderLen + key.NodePublicRawLen + len(pkt) if !c.rate.AllowN(c.clock.Now(), pktLen) { return nil // drop } } - if err := writeFrameHeader(c.bw, frameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { return err } if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil { @@ -264,7 +266,7 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e timer := c.clock.AfterFunc(5*time.Second, c.writeTimeoutFired) defer timer.Stop() - if err := writeFrameHeader(c.bw, frameForwardPacket, uint32(keyLen*2+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameForwardPacket, uint32(KeyLen*2+len(pkt))); err != nil { return err } if _, err := c.bw.Write(srcKey.AppendTo(nil)); err != nil { @@ -282,17 +284,17 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e func (c *Client) writeTimeoutFired() { c.nc.Close() } func (c *Client) SendPing(data [8]byte) error { - return c.sendPingOrPong(framePing, data) + return c.sendPingOrPong(FramePing, data) } func (c *Client) SendPong(data [8]byte) error { - return c.sendPingOrPong(framePong, data) + return c.sendPingOrPong(FramePong, data) } -func (c *Client) sendPingOrPong(typ frameType, data [8]byte) error { +func (c *Client) sendPingOrPong(typ FrameType, data [8]byte) error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, typ, 8); err != nil { + if err := WriteFrameHeader(c.bw, typ, 8); err != nil { return err } if _, err := c.bw.Write(data[:]); err != nil { @@ -314,7 +316,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameNotePreferred, 1); err != nil { + if err := WriteFrameHeader(c.bw, FrameNotePreferred, 1); err != nil { return err } var b byte = 0x00 @@ -332,7 +334,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { func (c *Client) WatchConnectionChanges() error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameWatchConns, 0); err != nil { + if err := WriteFrameHeader(c.bw, FrameWatchConns, 0); err != nil { return err } return c.bw.Flush() @@ -343,7 +345,7 @@ func (c *Client) WatchConnectionChanges() error { func (c *Client) ClosePeer(target key.NodePublic) error { c.wmu.Lock() defer c.wmu.Unlock() - return writeFrame(c.bw, frameClosePeer, target.AppendTo(nil)) + return WriteFrame(c.bw, FrameClosePeer, target.AppendTo(nil)) } // ReceivedMessage represents a type returned by Client.Recv. Unless @@ -502,7 +504,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro c.peeked = 0 } - t, n, err := readFrameHeader(c.br) + t, n, err := ReadFrameHeader(c.br) if err != nil { return nil, err } @@ -533,7 +535,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro switch t { default: continue - case frameServerInfo: + case FrameServerInfo: // Server sends this at start-up. Currently unused. // Just has a JSON message saying "version: 2", // but the protocol seems extensible enough as-is without @@ -550,29 +552,29 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro } c.setSendRateLimiter(sm) return sm, nil - case frameKeepAlive: + case FrameKeepAlive: // A one-way keep-alive message that doesn't require an acknowledgement. // This predated framePing/framePong. return KeepAliveMessage{}, nil - case framePeerGone: - if n < keyLen { + case FramePeerGone: + if n < KeyLen { c.logf("[unexpected] dropping short peerGone frame from DERP server") continue } // Backward compatibility for the older peerGone without reason byte reason := PeerGoneReasonDisconnected - if n > keyLen { - reason = PeerGoneReasonType(b[keyLen]) + if n > KeyLen { + reason = PeerGoneReasonType(b[KeyLen]) } pg := PeerGoneMessage{ - Peer: key.NodePublicFromRaw32(mem.B(b[:keyLen])), + Peer: key.NodePublicFromRaw32(mem.B(b[:KeyLen])), Reason: reason, } return pg, nil - case framePeerPresent: + case FramePeerPresent: remain := b - chunk, remain, ok := cutLeadingN(remain, keyLen) + chunk, remain, ok := cutLeadingN(remain, KeyLen) if !ok { c.logf("[unexpected] dropping short peerPresent frame from DERP server") continue @@ -600,17 +602,17 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro msg.Flags = PeerPresentFlags(chunk[0]) return msg, nil - case frameRecvPacket: + case FrameRecvPacket: var rp ReceivedPacket - if n < keyLen { + if n < KeyLen { c.logf("[unexpected] dropping short packet from DERP server") continue } - rp.Source = key.NodePublicFromRaw32(mem.B(b[:keyLen])) - rp.Data = b[keyLen:n] + rp.Source = key.NodePublicFromRaw32(mem.B(b[:KeyLen])) + rp.Data = b[KeyLen:n] return rp, nil - case framePing: + case FramePing: var pm PingMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -619,7 +621,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case framePong: + case FramePong: var pm PongMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -628,10 +630,10 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case frameHealth: + case FrameHealth: return HealthMessage{Problem: string(b[:])}, nil - case frameRestarting: + case FrameRestarting: var m ServerRestartingMessage if n < 8 { c.logf("[unexpected] dropping short server restarting frame") diff --git a/derp/derp_test.go b/derp/derp_test.go index 9d07e159b..e765f7b54 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -1,59 +1,56 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derp +package derp_test import ( "bufio" "bytes" - "cmp" "context" - "crypto/x509" - "encoding/asn1" "encoding/json" "errors" "expvar" "fmt" "io" - "log" "net" - "os" - "reflect" - "strconv" "strings" "sync" "testing" "time" - qt "github.com/frankban/quicktest" - "go4.org/mem" - "golang.org/x/time/rate" - "tailscale.com/derp/derpconst" + "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" + "tailscale.com/metrics" "tailscale.com/net/memnet" - "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/must" ) +type ( + ClientInfo = derp.ClientInfo + Conn = derp.Conn + Client = derp.Client +) + func TestClientInfoUnmarshal(t *testing.T) { for i, in := range map[string]struct { json string - want *clientInfo + want *ClientInfo wantErr string }{ "empty": { json: `{}`, - want: &clientInfo{}, + want: &ClientInfo{}, }, "valid": { json: `{"Version":5,"MeshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "validLowerMeshKey": { json: `{"version":5,"meshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "invalidMeshKeyToShort": { json: `{"version":5,"meshKey":"abcdefg"}`, @@ -66,7 +63,7 @@ func TestClientInfoUnmarshal(t *testing.T) { } { t.Run(i, func(t *testing.T) { t.Parallel() - var got clientInfo + var got ClientInfo err := json.Unmarshal([]byte(in.json), &got) if in.wantErr != "" { if err == nil || !strings.Contains(err.Error(), in.wantErr) { @@ -86,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) { func TestSendRecv(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() const numClients = 3 @@ -132,7 +129,7 @@ func TestSendRecv(t *testing.T) { key := clientPrivateKeys[i] brw := bufio.NewReadWriter(bufio.NewReader(cout), bufio.NewWriter(cout)) - c, err := NewClient(key, cout, brw, t.Logf) + c, err := derp.NewClient(key, cout, brw, t.Logf) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -159,16 +156,16 @@ func TestSendRecv(t *testing.T) { default: t.Errorf("unexpected message type %T", m) continue - case PeerGoneMessage: + case derp.PeerGoneMessage: switch m.Reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: peerGoneCountDisconnected.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: peerGoneCountNotHere.Add(1) default: t.Errorf("unexpected PeerGone reason %v", m.Reason) } - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { t.Errorf("zero Source address in ReceivedPacket") } @@ -198,12 +195,15 @@ func TestSendRecv(t *testing.T) { } } + serverMetrics := s.ExpVar().(*metrics.Set) + wantActive := func(total, home int64) { t.Helper() dl := time.Now().Add(5 * time.Second) var gotTotal, gotHome int64 for time.Now().Before(dl) { - gotTotal, gotHome = s.curClients.Value(), s.curHomeClients.Value() + gotTotal = serverMetrics.Get("gauge_current_connections").(*expvar.Int).Value() + gotHome = serverMetrics.Get("gauge_current_home_connections").(*expvar.Int).Value() if gotTotal == total && gotHome == home { return } @@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) { func TestSendFreeze(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() s.WriteTimeout = 100 * time.Millisecond @@ -323,7 +323,7 @@ func TestSendFreeze(t *testing.T) { go s.Accept(ctx, c1, bufio.NewReadWriter(bufio.NewReader(c1), bufio.NewWriter(c1)), name) brw := bufio.NewReadWriter(bufio.NewReader(c2), bufio.NewWriter(c2)) - c, err := NewClient(k, c2, brw, t.Logf) + c, err := derp.NewClient(k, c2, brw, t.Logf) if err != nil { t.Fatal(err) } @@ -374,7 +374,7 @@ func TestSendFreeze(t *testing.T) { default: errCh <- fmt.Errorf("%s: unexpected message type %T", name, m) return - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { errCh <- fmt.Errorf("%s: zero Source address in ReceivedPacket", name) return @@ -504,7 +504,7 @@ func TestSendFreeze(t *testing.T) { } type testServer struct { - s *Server + s *derpserver.Server ln net.Listener logf logger.Logf @@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") - s := NewServer(key.NewNode(), logf) + s := derpserver.NewServer(key.NewNode(), logf) s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -614,7 +614,7 @@ func newTestClient(t *testing.T, ts *testServer, name string, newClient func(net func newRegularClient(t *testing.T, ts *testServer, name string) *testClient { return newTestClient(t, ts, name, func(nc net.Conn, priv key.NodePrivate, logf logger.Logf) (*Client, error) { brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf) + c, err := derp.NewClient(priv, nc, brw, logf) if err != nil { return nil, err } @@ -631,7 +631,7 @@ func newTestWatcher(t *testing.T, ts *testServer, name string) *testClient { return nil, err } brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf, MeshKey(mk)) + c, err := derp.NewClient(priv, nc, brw, logf, derp.MeshKey(mk)) if err != nil { return nil, err } @@ -651,12 +651,12 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerPresentMessage: + case derp.PeerPresentMessage: got := m.Key if !want[got] { t.Fatalf("got peer present for %v; want present for %v", tc.ts.keyName(got), logger.ArgWriter(func(bw *bufio.Writer) { @@ -667,7 +667,7 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } t.Logf("got present with IP %v, flags=%v", m.IPPort, m.Flags) switch m.Flags { - case PeerPresentIsMeshPeer, PeerPresentIsRegular: + case derp.PeerPresentIsMeshPeer, derp.PeerPresentIsRegular: // Okay default: t.Errorf("unexpected PeerPresentIsMeshPeer flags %v", m.Flags) @@ -684,19 +684,19 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { func (tc *testClient) wantGone(t *testing.T, peer key.NodePublic) { t.Helper() - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerGoneMessage: + case derp.PeerGoneMessage: got := key.NodePublic(m.Peer) if peer != got { t.Errorf("got gone message for %v; want gone for %v", tc.ts.keyName(got), tc.ts.keyName(peer)) } reason := m.Reason - if reason != PeerGoneReasonDisconnected { - t.Errorf("got gone message for reason %v; wanted %v", reason, PeerGoneReasonDisconnected) + if reason != derp.PeerGoneReasonDisconnected { + t.Errorf("got gone message for reason %v; wanted %v", reason, derp.PeerGoneReasonDisconnected) } default: t.Fatalf("unexpected message type %T", m) @@ -754,863 +754,15 @@ func TestWatch(t *testing.T) { w3.wantGone(t, c1.pub) } -type testFwd int - -func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { - panic("not called in tests") -} -func (testFwd) String() string { - panic("not called in tests") -} - -func pubAll(b byte) (ret key.NodePublic) { - var bs [32]byte - for i := range bs { - bs[i] = b - } - return key.NodePublicFromRaw32(mem.B(bs[:])) -} - -func TestForwarderRegistration(t *testing.T) { - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - want := func(want map[key.NodePublic]PacketForwarder) { - t.Helper() - if got := s.clientsMesh; !reflect.DeepEqual(got, want) { - t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) - } - } - wantCounter := func(c *expvar.Int, want int) { - t.Helper() - if got := c.Value(); got != int64(want) { - t.Errorf("counter = %v; want %v", got, want) - } - } - singleClient := func(c *sclient) *clientSet { - cs := &clientSet{} - cs.activeClient.Store(c) - return cs - } - - u1 := pubAll(1) - u2 := pubAll(2) - u3 := pubAll(3) - - s.AddPacketForwarder(u1, testFwd(1)) - s.AddPacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered forwarder is no-op. - s.RemovePacketForwarder(u2, testFwd(999)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered user is no-op. - s.RemovePacketForwarder(u3, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Actual removal. - s.RemovePacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - }) - - // Adding a dup for a user. - wantCounter(&s.multiForwarderCreated, 0) - s.AddPacketForwarder(u1, testFwd(100)) - s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - wantCounter(&s.multiForwarderCreated, 1) - - // Removing a forwarder in a multi set that doesn't exist; does nothing. - s.RemovePacketForwarder(u1, testFwd(55)) - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - - // Removing a forwarder in a multi set that does exist should collapse it away - // from being a multiForwarder. - wantCounter(&s.multiForwarderDeleted, 0) - s.RemovePacketForwarder(u1, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(100), - }) - wantCounter(&s.multiForwarderDeleted, 1) - - // Removing an entry for a client that's still connected locally should result - // in a nil forwarder. - u1c := &sclient{ - key: u1, - logf: logger.Discard, - } - s.clients[u1] = singleClient(u1c) - s.RemovePacketForwarder(u1, testFwd(100)) - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - - // But once that client disconnects, it should go away. - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{}) - - // But if it already has a forwarder, it's not removed. - s.AddPacketForwarder(u1, testFwd(2)) - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(2), - }) - - // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard - // that they're also connected to a peer of ours. That shouldn't transition the forwarder - // from nil to the new one, not a multiForwarder. - s.clients[u1] = singleClient(u1c) - s.clientsMesh[u1] = nil - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - s.AddPacketForwarder(u1, testFwd(3)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(3), - }) -} - -type channelFwd struct { - // id is to ensure that different instances that reference the - // same channel are not equal, as they are used as keys in the - // multiForwarder map. - id int - c chan []byte -} - -func (f channelFwd) String() string { return "" } -func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { - f.c <- packet - return nil -} - -func TestMultiForwarder(t *testing.T) { - received := 0 - var wg sync.WaitGroup - ch := make(chan []byte) - ctx, cancel := context.WithCancel(context.Background()) - - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - u := pubAll(1) - s.AddPacketForwarder(u, channelFwd{1, ch}) - - wg.Add(2) - go func() { - defer wg.Done() - for { - select { - case <-ch: - received += 1 - case <-ctx.Done(): - return - } - } - }() - go func() { - defer wg.Done() - for { - s.AddPacketForwarder(u, channelFwd{2, ch}) - s.AddPacketForwarder(u, channelFwd{3, ch}) - s.RemovePacketForwarder(u, channelFwd{2, ch}) - s.RemovePacketForwarder(u, channelFwd{1, ch}) - s.AddPacketForwarder(u, channelFwd{1, ch}) - s.RemovePacketForwarder(u, channelFwd{3, ch}) - if ctx.Err() != nil { - return - } - } - }() - - // Number of messages is chosen arbitrarily, just for this loop to - // run long enough concurrently with {Add,Remove}PacketForwarder loop above. - numMsgs := 5000 - var fwd PacketForwarder - for i := range numMsgs { - s.mu.Lock() - fwd = s.clientsMesh[u] - s.mu.Unlock() - fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) - } - - cancel() - wg.Wait() - if received != numMsgs { - t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) - } -} -func TestMetaCert(t *testing.T) { - priv := key.NewNode() - pub := priv.Public() - s := NewServer(priv, t.Logf) - - certBytes := s.MetaCert() - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - log.Fatal(err) - } - if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(ProtocolVersion) { - t.Errorf("serial = %v; want %v", cert.SerialNumber, ProtocolVersion) - } - if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { - t.Errorf("CommonName = %q; want %q", g, w) - } - if n := len(cert.Extensions); n != 1 { - t.Fatalf("got %d extensions; want 1", n) - } - - // oidExtensionBasicConstraints is the Basic Constraints ID copied - // from the x509 package. - oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} - - if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { - t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) - } -} - -type dummyNetConn struct { - net.Conn -} - -func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } - -func TestClientRecv(t *testing.T) { - tests := []struct { - name string - input []byte - want any - }{ - { - name: "ping", - input: []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "pong", - input: []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "health_bad", - input: []byte{ - byte(frameHealth), 0, 0, 0, 3, - byte('B'), byte('A'), byte('D'), - }, - want: HealthMessage{Problem: "BAD"}, - }, - { - name: "health_ok", - input: []byte{ - byte(frameHealth), 0, 0, 0, 0, - }, - want: HealthMessage{}, - }, - { - name: "server_restarting", - input: []byte{ - byte(frameRestarting), 0, 0, 0, 8, - 0, 0, 0, 1, - 0, 0, 0, 2, - }, - want: ServerRestartingMessage{ - ReconnectIn: 1 * time.Millisecond, - TryFor: 2 * time.Millisecond, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - nc: dummyNetConn{}, - br: bufio.NewReader(bytes.NewReader(tt.input)), - logf: t.Logf, - clock: &tstest.Clock{}, - } - got, err := c.Recv() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("got %#v; want %#v", got, tt.want) - } - }) - } -} - -func TestClientSendPing(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestClientSendPong(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestServerDupClients(t *testing.T) { - serverPriv := key.NewNode() - var s *Server - - clientPriv := key.NewNode() - clientPub := clientPriv.Public() - - var c1, c2, c3 *sclient - var clientName map[*sclient]string - - // run starts a new test case and resets clients back to their zero values. - run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { - s = NewServer(serverPriv, t.Logf) - s.dupPolicy = dupPolicy - c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} - c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} - c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} - clientName = map[*sclient]string{ - c1: "c1", - c2: "c2", - c3: "c3", - } - t.Run(name, f) - } - runBothWays := func(name string, f func(t *testing.T)) { - run(name+"_disablefighters", disableFighters, f) - run(name+"_lastwriteractive", lastWriterIsActive, f) - } - wantSingleClient := func(t *testing.T, want *sclient) { - t.Helper() - got, ok := s.clients[want.key] - if !ok { - t.Error("no clients for key") - return - } - if got.dup != nil { - t.Errorf("unexpected dup set for single client") - } - cur := got.activeClient.Load() - if cur != want { - t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) - } - if cur != nil { - if cur.isDup.Load() { - t.Errorf("unexpected isDup on singleClient") - } - if cur.isDisabled.Load() { - t.Errorf("unexpected isDisabled on singleClient") - } - } - } - wantNoClient := func(t *testing.T) { - t.Helper() - _, ok := s.clients[clientPub] - if !ok { - // Good - return - } - t.Errorf("got client; want empty") - } - wantDupSet := func(t *testing.T) *dupClientSet { - t.Helper() - cs, ok := s.clients[clientPub] - if !ok { - t.Fatal("no set for key; want dup set") - return nil - } - if cs.dup != nil { - return cs.dup - } - t.Fatalf("no dup set for key; want dup set") - return nil - } - wantActive := func(t *testing.T, want *sclient) { - t.Helper() - set, ok := s.clients[clientPub] - if !ok { - t.Error("no set for key") - return - } - got := set.activeClient.Load() - if got != want { - t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) - } - } - checkDup := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDup.Load(); got != want { - t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) - } - } - checkDisabled := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDisabled.Load(); got != want { - t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) - } - } - wantDupConns := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientConns.Value(); got != int64(want) { - t.Errorf("dupClientConns = %v; want %v", got, want) - } - } - wantDupKeys := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientKeys.Value(); got != int64(want) { - t.Errorf("dupClientKeys = %v; want %v", got, want) - } - } - - // Common case: a single client comes and goes, with no dups. - runBothWays("one_comes_and_goes", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - s.unregisterClient(c1) - wantNoClient(t) - }) - - // A still somewhat common case: a single client was - // connected and then their wifi dies or laptop closes - // or they switch networks and connect from a - // different network. They have two connections but - // it's not very bad. Only their new one is - // active. The last one, being dead, doesn't send and - // thus the new one doesn't get disabled. - runBothWays("small_overlap_replacement", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - wantDupKeys(t, 0) - wantDupKeys(t, 0) - - s.registerClient(c2) // wifi dies; c2 replacement connects - wantDupSet(t) - wantDupConns(t, 2) - wantDupKeys(t, 1) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) // sends go to the replacement - - s.unregisterClient(c1) // c1 finally times out - wantSingleClient(t, c2) - checkDup(t, c2, false) // c2 is longer a dup - wantActive(t, c2) - wantDupConns(t, 0) - wantDupKeys(t, 0) - }) - - // Key cloning situation with concurrent clients, both trying - // to write. - run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - s.registerClient(c2) - wantDupSet(t) - wantDupKeys(t, 1) - wantDupConns(t, 2) - wantActive(t, c2) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - wantActive(t, nil) - - s.registerClient(c3) - wantActive(t, c3) - checkDisabled(t, c3, false) - wantDupKeys(t, 1) - wantDupConns(t, 3) - - s.unregisterClient(c3) - wantActive(t, nil) - wantDupKeys(t, 1) - wantDupConns(t, 2) - - s.unregisterClient(c2) - wantSingleClient(t, c1) - wantDupKeys(t, 0) - wantDupConns(t, 0) - }) - - // Key cloning with an A->B->C->A series instead. - run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - s.registerClient(c2) - s.registerClient(c3) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - checkDisabled(t, c3, true) - wantActive(t, nil) - }) - - run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { - wantNoClient(t) - - // Last registered client is the active one... - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - s.registerClient(c3) - s.noteClientActivity(c2) - wantActive(t, c3) - - // But if the last one goes away, the one with the - // most recent activity wins. - s.unregisterClient(c3) - wantActive(t, c2) - }) - - run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { - wantNoClient(t) - - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - - s.noteClientActivity(c1) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c1) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) - - s.unregisterClient(c2) - checkDisabled(t, c1, false) - wantActive(t, c1) - }) -} - -func TestLimiter(t *testing.T) { - rl := rate.NewLimiter(rate.Every(time.Minute), 100) - for i := range 200 { - r := rl.Reserve() - d := r.Delay() - t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) - } -} - -// BenchmarkConcurrentStreams exercises mutex contention on a -// single Server instance with multiple concurrent client flows. -func BenchmarkConcurrentStreams(b *testing.B) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go func() { - for ctx.Err() == nil { - connIn, err := ln.Accept() - if err != nil { - if ctx.Err() != nil { - return - } - b.Error(err) - return - } - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - go s.Accept(ctx, connIn, brwServer, "test-client") - } - }() - - newClient := func(t testing.TB) *Client { - t.Helper() - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - t.Cleanup(func() { connOut.Close() }) - - k := key.NewNode() - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - return client - } - - b.RunParallel(func(pb *testing.PB) { - c1, c2 := newClient(b), newClient(b) - const packetSize = 100 - msg := make([]byte, packetSize) - for pb.Next() { - if err := c1.Send(c2.PublicKey(), msg); err != nil { - b.Fatal(err) - } - _, err := c2.Recv() - if err != nil { - return - } - } - }) -} - -func BenchmarkSendRecv(b *testing.B) { - for _, size := range []int{10, 100, 1000, 10000} { - b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) - } -} - -func benchmarkSendRecvSize(b *testing.B, packetSize int) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - k := key.NewNode() - clientKey := k.Public() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - defer connOut.Close() - - connIn, err := ln.Accept() - if err != nil { - b.Fatal(err) - } - defer connIn.Close() - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go s.Accept(ctx, connIn, brwServer, "test-client") - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - - go func() { - for { - _, err := client.Recv() - if err != nil { - return - } - } - }() - - msg := make([]byte, packetSize) - b.SetBytes(int64(len(msg))) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - if err := client.Send(clientKey, msg); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkWriteUint32(b *testing.B) { - w := bufio.NewWriter(io.Discard) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - writeUint32(w, 0x0ba3a) - } -} - -type nopRead struct{} - -func (r nopRead) Read(p []byte) (int, error) { - return len(p), nil -} - -var sinkU32 uint32 - -func BenchmarkReadUint32(b *testing.B) { - r := bufio.NewReader(nopRead{}) - var err error - b.ReportAllocs() - b.ResetTimer() - for range b.N { - sinkU32, err = readUint32(r) - if err != nil { - b.Fatal(err) - } - } -} - func waitConnect(t testing.TB, c *Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) - } else if v, ok := m.(ServerInfoMessage); !ok { + } else if v, ok := m.(derp.ServerInfoMessage); !ok { t.Fatalf("client first Recv was unexpected type %T", v) } } -func TestParseSSOutput(t *testing.T) { - contents, err := os.ReadFile("testdata/example_ss.txt") - if err != nil { - t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) - } - seen := parseSSOutput(string(contents)) - if len(seen) == 0 { - t.Errorf("parseSSOutput expected non-empty map") - } -} - -type countWriter struct { - mu sync.Mutex - writes int - bytes int64 -} - -func (w *countWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - w.writes++ - w.bytes += int64(len(p)) - return len(p), nil -} - -func (w *countWriter) Stats() (writes int, bytes int64) { - w.mu.Lock() - defer w.mu.Unlock() - return w.writes, w.bytes -} - -func (w *countWriter) ResetStats() { - w.mu.Lock() - defer w.mu.Unlock() - w.writes, w.bytes = 0, 0 -} - -func TestClientSendRateLimiting(t *testing.T) { - cw := new(countWriter) - c := &Client{ - bw: bufio.NewWriter(cw), - clock: &tstest.Clock{}, - } - c.setSendRateLimiter(ServerInfoMessage{}) - - pkt := make([]byte, 1000) - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - writes1, bytes1 := cw.Stats() - if writes1 != 1 { - t.Errorf("writes = %v, want 1", writes1) - } - - // Flood should all succeed. - cw.ResetStats() - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writes1K, bytes1K := cw.Stats() - if writes1K != 1000 { - t.Logf("writes = %v; want 1000", writes1K) - } - if got, want := bytes1K, bytes1*1000; got != want { - t.Logf("bytes = %v; want %v", got, want) - } - - // Set a rate limiter - cw.ResetStats() - c.setSendRateLimiter(ServerInfoMessage{ - TokenBucketBytesPerSecond: 1, - TokenBucketBytesBurst: int(bytes1 * 2), - }) - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writesLimited, bytesLimited := cw.Stats() - if writesLimited == 0 || writesLimited == writes1K { - t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) - } - if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { - t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) - } -} - func TestServerRepliesToPing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1627,12 +779,12 @@ func TestServerRepliesToPing(t *testing.T) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PongMessage: + case derp.PongMessage: if ([8]byte(m)) != data { t.Fatalf("got pong %2x; want %2x", [8]byte(m), data) } @@ -1640,122 +792,3 @@ func TestServerRepliesToPing(t *testing.T) { } } } - -func TestGetPerClientSendQueueDepth(t *testing.T) { - c := qt.New(t) - envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" - - testCases := []struct { - envVal string - want int - }{ - // Empty case, envknob treats empty as missing also. - { - "", defaultPerClientSendQueueDepth, - }, - { - "64", 64, - }, - } - - for _, tc := range testCases { - t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { - t.Setenv(envKey, tc.envVal) - val := getPerClientSendQueueDepth() - c.Assert(val, qt.Equals, tc.want) - }) - } -} - -func TestSetMeshKey(t *testing.T) { - for name, tt := range map[string]struct { - key string - want key.DERPMesh - wantErr bool - }{ - "clobber": { - key: testMeshKey, - wantErr: false, - }, - "invalid": { - key: "badf00d", - wantErr: true, - }, - } { - t.Run(name, func(t *testing.T) { - s := &Server{} - - err := s.SetMeshKey(tt.key) - if tt.wantErr { - if err == nil { - t.Fatalf("expected err") - } - return - } - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - - want, err := key.ParseDERPMesh(tt.key) - if err != nil { - t.Fatal(err) - } - if !s.meshKey.Equal(want) { - t.Fatalf("got %v, want %v", s.meshKey, want) - } - }) - } -} - -func TestIsMeshPeer(t *testing.T) { - s := &Server{} - err := s.SetMeshKey(testMeshKey) - if err != nil { - t.Fatal(err) - } - for name, tt := range map[string]struct { - want bool - meshKey string - wantAllocs float64 - }{ - "nil": { - want: false, - wantAllocs: 0, - }, - "mismatch": { - meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", - want: false, - wantAllocs: 1, - }, - "match": { - meshKey: testMeshKey, - want: true, - wantAllocs: 0, - }, - } { - t.Run(name, func(t *testing.T) { - var got bool - var mKey key.DERPMesh - if tt.meshKey != "" { - mKey, err = key.ParseDERPMesh(tt.meshKey) - if err != nil { - t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) - } - } - - info := clientInfo{ - MeshKey: mKey, - } - allocs := testing.AllocsPerRun(1, func() { - got = s.isMeshPeer(&info) - }) - if got != tt.want { - t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) - } - - if allocs != tt.wantAllocs && tt.want { - t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) - } - }) - } -} diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 704b8175d..57f008a1a 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -522,7 +522,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien // just to get routed into the server's HTTP Handler so it // can Hijack the request, but we signal with a special header // that we don't want to deal with its HTTP response. - req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response + req.Header.Set(derp.FastStartHeader, "1") // suppresses the server's HTTP response if err := req.Write(brw); err != nil { return nil, 0, err } diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 6e8e0bd21..dd7cbcd24 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derphttp_test import ( "bytes" @@ -21,9 +21,12 @@ import ( "time" "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/key" ) @@ -41,12 +44,12 @@ func TestSendRecv(t *testing.T) { clientKeys = append(clientKeys, priv.Public()) } - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -65,7 +68,7 @@ func TestSendRecv(t *testing.T) { } }() - var clients []*Client + var clients []*derphttp.Client var recvChs []chan []byte done := make(chan struct{}) var wg sync.WaitGroup @@ -78,7 +81,7 @@ func TestSendRecv(t *testing.T) { }() for i := range numClients { key := clientPrivateKeys[i] - c, err := NewClient(key, serverURL, t.Logf, netMon) + c, err := derphttp.NewClient(key, serverURL, t.Logf, netMon) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -158,7 +161,7 @@ func TestSendRecv(t *testing.T) { recvNothing(1) } -func waitConnect(t testing.TB, c *Client) { +func waitConnect(t testing.TB, c *derphttp.Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) @@ -169,12 +172,12 @@ func waitConnect(t testing.TB, c *Client) { func TestPing(t *testing.T) { serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -193,7 +196,7 @@ func TestPing(t *testing.T) { } }() - c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) + c, err := derphttp.NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatalf("NewClient: %v", err) } @@ -221,11 +224,11 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) { - s = derp.NewServer(k, t.Logf) +func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) { + s = derpserver.NewServer(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -247,8 +250,8 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S return } -func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) { - c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) +func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *derphttp.Client) { + c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatal(err) } @@ -260,30 +263,16 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW return } -// breakConnection breaks the connection, which should trigger a reconnect. -func (c *Client) breakConnection(brokenClient *derp.Client) { - c.mu.Lock() - defer c.mu.Unlock() - if c.client != brokenClient { - return - } - if c.netConn != nil { - c.netConn.Close() - c.netConn = nil - } - c.client = nil -} - // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is waiting on recv(). func TestBreakWatcherConnRecv(t *testing.T) { + // TODO(bradfitz): use synctest + memnet instead + // Set the wait time before a retry after connection failure to be much lower. // This needs to be early in the test, for defer to run right at the end after // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) var wg sync.WaitGroup // Make the watcher server @@ -301,11 +290,11 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer watcher.Close() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() watcherChan := make(chan int, 1) defer close(watcherChan) errChan := make(chan error, 1) - defer close(errChan) // Start the watcher thread (which connects to the watched server) wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 @@ -320,7 +309,10 @@ func TestBreakWatcherConnRecv(t *testing.T) { } remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } notifyErr := func(err error) { - errChan <- err + select { + case errChan <- err: + case <-ctx.Done(): + } } watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) @@ -345,7 +337,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { t.Fatalf("watcher did not process the peer update") } timer.Reset(5 * time.Second) - watcher.breakConnection(watcher.client) + watcher.BreakConnection(watcher) // re-establish connection by sending a packet watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } @@ -357,12 +349,12 @@ func TestBreakWatcherConnRecv(t *testing.T) { // updates after a different thread breaks and reconnects the connection, while // the watcher is not waiting on recv(). func TestBreakWatcherConn(t *testing.T) { + // TODO(bradfitz): use synctest + memnet instead + // Set the wait time before a retry after connection failure to be much lower. // This needs to be early in the test, for defer to run right at the end after // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) var wg sync.WaitGroup // Make the watcher server @@ -428,7 +420,7 @@ func TestBreakWatcherConn(t *testing.T) { case <-timer.C: t.Fatalf("watcher did not process the peer update") } - watcher1.breakConnection(watcher1.client) + watcher1.BreakConnection(watcher1) // re-establish connection by sending a packet watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) // signal that the breaker is done @@ -446,7 +438,7 @@ func noopRemove(derp.PeerGoneMessage) {} func noopNotifyError(error) {} func TestRunWatchConnectionLoopServeConnect(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -461,7 +453,7 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { defer watcher.Close() // Test connecting to ourselves, and that we get hung up on. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -470,12 +462,12 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted self-connect; wasn't") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError) // Test connecting to the server with a zero value for ignoreServerKey, // so we should always connect. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -484,16 +476,14 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError) } // verify that the LocalAddr method doesn't acquire the mutex. // See https://github.com/tailscale/tailscale/issues/11519 func TestLocalAddrNoMutex(t *testing.T) { - var c Client - c.mu.Lock() - defer c.mu.Unlock() // not needed in test but for symmetry + var c derphttp.Client _, err := c.LocalAddr() if got, want := fmt.Sprint(err), "client not connected"; got != want { @@ -502,7 +492,7 @@ func TestLocalAddrNoMutex(t *testing.T) { } func TestProbe(t *testing.T) { - h := Handler(nil) + h := derpserver.Handler(nil) tests := []struct { path string @@ -523,7 +513,7 @@ func TestProbe(t *testing.T) { } func TestNotifyError(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() @@ -541,7 +531,7 @@ func TestNotifyError(t *testing.T) { })) defer watcher.Close() - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err == nil { t.Fatal("expected error connecting to server, got nil") @@ -550,7 +540,7 @@ func TestNotifyError(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) errChan := make(chan error, 1) notifyError := func(err error) { @@ -587,7 +577,7 @@ func TestManualDial(t *testing.T) { region := slices.Sorted(maps.Keys(dm.Regions))[0] netMon := netmon.NewStatic() - rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + rc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { return dm.Regions[region] }) defer rc.Close() @@ -625,7 +615,7 @@ func TestURLDial(t *testing.T) { } } netMon := netmon.NewStatic() - c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) defer c.Close() if err := c.Connect(context.Background()); err != nil { diff --git a/derp/derphttp/export_test.go b/derp/derphttp/export_test.go new file mode 100644 index 000000000..59d8324dc --- /dev/null +++ b/derp/derphttp/export_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derphttp + +func SetTestHookWatchLookConnectResult(f func(connectError error, wasSelfConnect bool) (keepRunning bool)) { + testHookWatchLookConnectResult = f +} + +// breakConnection breaks the connection, which should trigger a reconnect. +func (c *Client) BreakConnection(brokenClient *Client) { + c.mu.Lock() + defer c.mu.Unlock() + if c.client != brokenClient.client { + return + } + if c.netConn != nil { + c.netConn.Close() + c.netConn = nil + } + c.client = nil +} + +var RetryInterval = &retryInterval diff --git a/derp/derp_server.go b/derp/derpserver/derp_server.go similarity index 94% rename from derp/derp_server.go rename to derp/derpserver/derp_server.go index f0c635a5a..917ef147c 100644 --- a/derp/derp_server.go +++ b/derp/derpserver/derp_server.go @@ -1,7 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derp +// Package derpserver implements a DERP server. +package derpserver // TODO(crawshaw): with predefined serverKey in clients and HMAC on packets we could skip TLS @@ -38,6 +39,7 @@ import ( "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" + "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" @@ -55,19 +57,15 @@ import ( "tailscale.com/version" ) +type Conn = derp.Conn + // verboseDropKeys is the set of destination public keys that should // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} -// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests -// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. -// The HTTP header value is the name of the node they wish they were connected -// to. This is an optional header. -const IdealNodeHeader = "Ideal-Node" - // IdealNodeContextKey is the context key used to pass the IdealNodeHeader value // from the HTTP handler to the DERP server's Accept method. -var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "") +var IdealNodeContextKey = ctxkey.New("ideal-node", "") func init() { keys := envknob.String("TS_DEBUG_VERBOSE_DROPS") @@ -620,7 +618,7 @@ func (s *Server) initMetacert() { log.Fatal(err) } tmpl := &x509.Certificate{ - SerialNumber: big.NewInt(ProtocolVersion), + SerialNumber: big.NewInt(derp.ProtocolVersion), Subject: pkix.Name{ CommonName: derpconst.MetaCertCommonNamePrefix + s.publicKey.UntypedHexString(), }, @@ -724,7 +722,7 @@ func (s *Server) registerClient(c *sclient) { // presence changed. // // s.mu must be held. -func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags, present bool) { +func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags, present bool) { for w := range s.watchers { w.peerStateChange = append(w.peerStateChange, peerConnState{ peer: peer, @@ -868,7 +866,7 @@ func (s *Server) notePeerGoneFromRegionLocked(key key.NodePublic) { // requestPeerGoneWriteLimited sends a request to write a "peer gone" // frame, but only in reply to a disco packet, and only if we haven't // sent one recently. -func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason derp.PeerGoneReasonType) { if disco.LooksLikeDiscoWrapper(contents) != true { return } @@ -1010,7 +1008,7 @@ func (c *sclient) run(ctx context.Context) error { c.startStatsLoop(sendCtx) for { - ft, fl, err := readFrameHeader(c.br) + ft, fl, err := derp.ReadFrameHeader(c.br) c.debugLogf("read frame type %d len %d err %v", ft, fl, err) if err != nil { if errors.Is(err, io.EOF) { @@ -1025,17 +1023,17 @@ func (c *sclient) run(ctx context.Context) error { } c.s.noteClientActivity(c) switch ft { - case frameNotePreferred: + case derp.FrameNotePreferred: err = c.handleFrameNotePreferred(ft, fl) - case frameSendPacket: + case derp.FrameSendPacket: err = c.handleFrameSendPacket(ft, fl) - case frameForwardPacket: + case derp.FrameForwardPacket: err = c.handleFrameForwardPacket(ft, fl) - case frameWatchConns: + case derp.FrameWatchConns: err = c.handleFrameWatchConns(ft, fl) - case frameClosePeer: + case derp.FrameClosePeer: err = c.handleFrameClosePeer(ft, fl) - case framePing: + case derp.FramePing: err = c.handleFramePing(ft, fl) default: err = c.handleUnknownFrame(ft, fl) @@ -1046,12 +1044,12 @@ func (c *sclient) run(ctx context.Context) error { } } -func (c *sclient) handleUnknownFrame(ft frameType, fl uint32) error { +func (c *sclient) handleUnknownFrame(ft derp.FrameType, fl uint32) error { _, err := io.CopyN(io.Discard, c.br, int64(fl)) return err } -func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { +func (c *sclient) handleFrameNotePreferred(ft derp.FrameType, fl uint32) error { if fl != 1 { return fmt.Errorf("frameNotePreferred wrong size") } @@ -1063,7 +1061,7 @@ func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { +func (c *sclient) handleFrameWatchConns(ft derp.FrameType, fl uint32) error { if fl != 0 { return fmt.Errorf("handleFrameWatchConns wrong size") } @@ -1074,9 +1072,9 @@ func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFramePing(ft frameType, fl uint32) error { +func (c *sclient) handleFramePing(ft derp.FrameType, fl uint32) error { c.s.gotPing.Add(1) - var m PingMessage + var m derp.PingMessage if fl < uint32(len(m)) { return fmt.Errorf("short ping: %v", fl) } @@ -1101,8 +1099,8 @@ func (c *sclient) handleFramePing(ft frameType, fl uint32) error { return err } -func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { - if fl != keyLen { +func (c *sclient) handleFrameClosePeer(ft derp.FrameType, fl uint32) error { + if fl != derp.KeyLen { return fmt.Errorf("handleFrameClosePeer wrong size") } if !c.canMesh { @@ -1135,7 +1133,7 @@ func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { // handleFrameForwardPacket reads a "forward packet" frame from the client // (which must be a trusted client, a peer in our mesh). -func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameForwardPacket(ft derp.FrameType, fl uint32) error { if !c.canMesh { return fmt.Errorf("insufficient permissions") } @@ -1162,7 +1160,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, srcKey, dstKey, reason) return nil @@ -1178,7 +1176,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { } // handleFrameSendPacket reads a "send packet" frame from the client. -func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameSendPacket(ft derp.FrameType, fl uint32) error { s := c.s dstKey, contents, err := s.recvPacket(c.br, fl) @@ -1215,7 +1213,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, c.key, dstKey, reason) c.debugLogf("SendPacket for %s, dropping with reason=%s", dstKey.ShortString(), reason) @@ -1325,13 +1323,13 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { // notified (in a new goroutine) whenever a peer has disconnected from all DERP // nodes in the current region. func (c *sclient) onPeerGoneFromRegion(peer key.NodePublic) { - c.requestPeerGoneWrite(peer, PeerGoneReasonDisconnected) + c.requestPeerGoneWrite(peer, derp.PeerGoneReasonDisconnected) } // requestPeerGoneWrite sends a request to write a "peer gone" frame // with an explanation of why it is gone. It blocks until either the // write request is scheduled, or the client has closed. -func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason derp.PeerGoneReasonType) { select { case c.peerGone <- peerGoneMsg{ peer: peer, @@ -1358,7 +1356,7 @@ func (c *sclient) requestMeshUpdate() { // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. -func (s *Server) isMeshPeer(info *clientInfo) bool { +func (s *Server) isMeshPeer(info *derp.ClientInfo) bool { // Compare mesh keys in constant time to prevent timing attacks. // Since mesh keys are a fixed length, we don’t need to be concerned // about timing attacks on client mesh keys that are the wrong length. @@ -1372,7 +1370,7 @@ func (s *Server) isMeshPeer(info *clientInfo) bool { // verifyClient checks whether the client is allowed to connect to the derper, // depending on how & whether the server's been configured to verify. -func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *clientInfo, clientIP netip.Addr) error { +func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *derp.ClientInfo, clientIP netip.Addr) error { if s.isMeshPeer(info) { // Trusted mesh peer. No need to verify further. In fact, verifying // further wouldn't work: it's not part of the tailnet so tailscaled and @@ -1436,10 +1434,10 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf } func (s *Server) sendServerKey(lw *lazyBufioWriter) error { - buf := make([]byte, 0, len(magic)+key.NodePublicRawLen) - buf = append(buf, magic...) + buf := make([]byte, 0, len(derp.Magic)+key.NodePublicRawLen) + buf = append(buf, derp.Magic...) buf = s.publicKey.AppendTo(buf) - err := writeFrame(lw.bw(), frameServerKey, buf) + err := derp.WriteFrame(lw.bw(), derp.FrameServerKey, buf) lw.Flush() // redundant (no-op) flush to release bufio.Writer return err } @@ -1504,21 +1502,16 @@ func (s *Server) noteClientActivity(c *sclient) { dup.sendHistory = append(dup.sendHistory, c) } -type serverInfo struct { - Version int `json:"version,omitempty"` - - TokenBucketBytesPerSecond int `json:",omitempty"` - TokenBucketBytesBurst int `json:",omitempty"` -} +type ServerInfo = derp.ServerInfo func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) error { - msg, err := json.Marshal(serverInfo{Version: ProtocolVersion}) + msg, err := json.Marshal(ServerInfo{Version: derp.ProtocolVersion}) if err != nil { return err } msgbox := s.privateKey.SealTo(clientKey, msg) - if err := writeFrameHeader(bw.bw(), frameServerInfo, uint32(len(msgbox))); err != nil { + if err := derp.WriteFrameHeader(bw.bw(), derp.FrameServerInfo, uint32(len(msgbox))); err != nil { return err } if _, err := bw.Write(msgbox); err != nil { @@ -1530,12 +1523,12 @@ func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) e // recvClientKey reads the frameClientInfo frame from the client (its // proof of identity) upon its initial connection. It should be // considered especially untrusted at this point. -func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *clientInfo, err error) { - fl, err := readFrameTypeHeader(br, frameClientInfo) +func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *derp.ClientInfo, err error) { + fl, err := derp.ReadFrameTypeHeader(br, derp.FrameClientInfo) if err != nil { return zpub, nil, err } - const minLen = keyLen + nonceLen + const minLen = derp.KeyLen + derp.NonceLen if fl < minLen { return zpub, nil, errors.New("short client info") } @@ -1547,7 +1540,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if err := clientKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - msgLen := int(fl - keyLen) + msgLen := int(fl - derp.KeyLen) msgbox := make([]byte, msgLen) if _, err := io.ReadFull(br, msgbox); err != nil { return zpub, nil, fmt.Errorf("msgbox: %v", err) @@ -1556,7 +1549,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if !ok { return zpub, nil, fmt.Errorf("msgbox: cannot open len=%d with client key %s", msgLen, clientKey) } - info = new(clientInfo) + info = new(derp.ClientInfo) if err := json.Unmarshal(msg, info); err != nil { return zpub, nil, fmt.Errorf("msg: %v", err) } @@ -1564,15 +1557,15 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info } func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen { + if frameLen < derp.KeyLen { return zpub, nil, errors.New("short send packet frame") } if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - packetLen := frameLen - keyLen - if packetLen > MaxPacketSize { - return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen + if packetLen > derp.MaxPacketSize { + return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1592,7 +1585,7 @@ func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodeP var zpub key.NodePublic func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen*2 { + if frameLen < derp.KeyLen*2 { return zpub, zpub, nil, errors.New("short send packet frame") } if err := srcKey.ReadRawWithoutAllocating(br); err != nil { @@ -1601,9 +1594,9 @@ func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, d if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, zpub, nil, err } - packetLen := frameLen - keyLen*2 - if packetLen > MaxPacketSize { - return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen*2 + if packetLen > derp.MaxPacketSize { + return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1628,7 +1621,7 @@ type sclient struct { s *Server nc Conn key key.NodePublic - info clientInfo + info derp.ClientInfo logf logger.Logf done <-chan struct{} // closed when connection closes remoteIPPort netip.AddrPort // zero if remoteAddr is not ip:port. @@ -1666,19 +1659,19 @@ type sclient struct { peerGoneLim *rate.Limiter } -func (c *sclient) presentFlags() PeerPresentFlags { - var f PeerPresentFlags +func (c *sclient) presentFlags() derp.PeerPresentFlags { + var f derp.PeerPresentFlags if c.info.IsProber { - f |= PeerPresentIsProber + f |= derp.PeerPresentIsProber } if c.canMesh { - f |= PeerPresentIsMeshPeer + f |= derp.PeerPresentIsMeshPeer } if c.isNotIdealConn { - f |= PeerPresentNotIdeal + f |= derp.PeerPresentNotIdeal } if f == 0 { - return PeerPresentIsRegular + return derp.PeerPresentIsRegular } return f } @@ -1688,7 +1681,7 @@ func (c *sclient) presentFlags() PeerPresentFlags { type peerConnState struct { ipPort netip.AddrPort // if present, the peer's IP:port peer key.NodePublic - flags PeerPresentFlags + flags derp.PeerPresentFlags present bool } @@ -1709,7 +1702,7 @@ type pkt struct { // peerGoneMsg is a request to write a peerGone frame to an sclient type peerGoneMsg struct { peer key.NodePublic - reason PeerGoneReasonType + reason derp.PeerGoneReasonType } func (c *sclient) setPreferred(v bool) { @@ -1788,7 +1781,7 @@ func (c *sclient) sendLoop(ctx context.Context) error { defer c.onSendLoopDone() jitter := rand.N(5 * time.Second) - keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(KeepAlive + jitter) + keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(derp.KeepAlive + jitter) defer keepAliveTick.Stop() var werr error // last write error @@ -1887,14 +1880,14 @@ func (c *sclient) setWriteDeadline() { // sendKeepAlive sends a keep-alive frame, without flushing. func (c *sclient) sendKeepAlive() error { c.setWriteDeadline() - return writeFrameHeader(c.bw.bw(), frameKeepAlive, 0) + return derp.WriteFrameHeader(c.bw.bw(), derp.FrameKeepAlive, 0) } // sendPong sends a pong reply, without flushing. func (c *sclient) sendPong(data [8]byte) error { c.s.sentPong.Add(1) c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePong, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePong, uint32(len(data))); err != nil { return err } _, err := c.bw.Write(data[:]) @@ -1902,23 +1895,23 @@ func (c *sclient) sendPong(data [8]byte) error { } const ( - peerGoneFrameLen = keyLen + 1 - peerPresentFrameLen = keyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags + peerGoneFrameLen = derp.KeyLen + 1 + peerPresentFrameLen = derp.KeyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags ) // sendPeerGone sends a peerGone frame, without flushing. -func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) error { +func (c *sclient) sendPeerGone(peer key.NodePublic, reason derp.PeerGoneReasonType) error { switch reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: c.s.peerGoneDisconnectedFrames.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: c.s.peerGoneNotHereFrames.Add(1) } c.setWriteDeadline() data := make([]byte, 0, peerGoneFrameLen) data = peer.AppendTo(data) data = append(data, byte(reason)) - if err := writeFrameHeader(c.bw.bw(), framePeerGone, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerGone, uint32(len(data))); err != nil { return err } @@ -1927,17 +1920,17 @@ func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) e } // sendPeerPresent sends a peerPresent frame, without flushing. -func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags) error { +func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags) error { c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePeerPresent, peerPresentFrameLen); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerPresent, peerPresentFrameLen); err != nil { return err } payload := make([]byte, peerPresentFrameLen) _ = peer.AppendTo(payload[:0]) a16 := ipPort.Addr().As16() - copy(payload[keyLen:], a16[:]) - binary.BigEndian.PutUint16(payload[keyLen+16:], ipPort.Port()) - payload[keyLen+18] = byte(flags) + copy(payload[derp.KeyLen:], a16[:]) + binary.BigEndian.PutUint16(payload[derp.KeyLen+16:], ipPort.Port()) + payload[derp.KeyLen+18] = byte(flags) _, err := c.bw.Write(payload) return err } @@ -1975,7 +1968,7 @@ func (c *sclient) sendMeshUpdates() error { if pcs.present { err = c.sendPeerPresent(pcs.peer, pcs.ipPort, pcs.flags) } else { - err = c.sendPeerGone(pcs.peer, PeerGoneReasonDisconnected) + err = c.sendPeerGone(pcs.peer, derp.PeerGoneReasonDisconnected) } if err != nil { return err @@ -2010,7 +2003,7 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) pktLen += key.NodePublicRawLen c.noteSendFromSrc(srcKey) } - if err = writeFrameHeader(c.bw.bw(), frameRecvPacket, uint32(pktLen)); err != nil { + if err = derp.WriteFrameHeader(c.bw.bw(), derp.FrameRecvPacket, uint32(pktLen)); err != nil { return err } if withKey { @@ -2286,7 +2279,7 @@ func (s *Server) checkVerifyClientsLocalTailscaled() error { if err != nil { return fmt.Errorf("localClient.Status: %w", err) } - info := &clientInfo{ + info := &derp.ClientInfo{ IsProber: true, } clientIP := netip.IPv6Loopback() diff --git a/derp/derp_server_default.go b/derp/derpserver/derp_server_default.go similarity index 91% rename from derp/derp_server_default.go rename to derp/derpserver/derp_server_default.go index 014cfffd6..874e590d3 100644 --- a/derp/derp_server_default.go +++ b/derp/derpserver/derp_server_default.go @@ -3,7 +3,7 @@ //go:build !linux || android -package derp +package derpserver import "context" diff --git a/derp/derp_server_linux.go b/derp/derpserver/derp_server_linux.go similarity index 99% rename from derp/derp_server_linux.go rename to derp/derpserver/derp_server_linux.go index 5a40e114e..768e6a2ab 100644 --- a/derp/derp_server_linux.go +++ b/derp/derpserver/derp_server_linux.go @@ -3,7 +3,7 @@ //go:build linux && !android -package derp +package derpserver import ( "context" diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go new file mode 100644 index 000000000..3f0ba2ec0 --- /dev/null +++ b/derp/derpserver/derpserver_test.go @@ -0,0 +1,782 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derpserver + +import ( + "bufio" + "cmp" + "context" + "crypto/x509" + "encoding/asn1" + "expvar" + "fmt" + "log" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "time" + + qt "github.com/frankban/quicktest" + "go4.org/mem" + "golang.org/x/time/rate" + "tailscale.com/derp" + "tailscale.com/derp/derpconst" + "tailscale.com/types/key" + "tailscale.com/types/logger" +) + +const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + +func TestSetMeshKey(t *testing.T) { + for name, tt := range map[string]struct { + key string + want key.DERPMesh + wantErr bool + }{ + "clobber": { + key: testMeshKey, + wantErr: false, + }, + "invalid": { + key: "badf00d", + wantErr: true, + }, + } { + t.Run(name, func(t *testing.T) { + s := &Server{} + + err := s.SetMeshKey(tt.key) + if tt.wantErr { + if err == nil { + t.Fatalf("expected err") + } + return + } + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + want, err := key.ParseDERPMesh(tt.key) + if err != nil { + t.Fatal(err) + } + if !s.meshKey.Equal(want) { + t.Fatalf("got %v, want %v", s.meshKey, want) + } + }) + } +} + +func TestIsMeshPeer(t *testing.T) { + s := &Server{} + err := s.SetMeshKey(testMeshKey) + if err != nil { + t.Fatal(err) + } + for name, tt := range map[string]struct { + want bool + meshKey string + wantAllocs float64 + }{ + "nil": { + want: false, + wantAllocs: 0, + }, + "mismatch": { + meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", + want: false, + wantAllocs: 1, + }, + "match": { + meshKey: testMeshKey, + want: true, + wantAllocs: 0, + }, + } { + t.Run(name, func(t *testing.T) { + var got bool + var mKey key.DERPMesh + if tt.meshKey != "" { + mKey, err = key.ParseDERPMesh(tt.meshKey) + if err != nil { + t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) + } + } + + info := derp.ClientInfo{ + MeshKey: mKey, + } + allocs := testing.AllocsPerRun(1, func() { + got = s.isMeshPeer(&info) + }) + if got != tt.want { + t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) + } + + if allocs != tt.wantAllocs && tt.want { + t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) + } + }) + } +} + +type testFwd int + +func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { + panic("not called in tests") +} +func (testFwd) String() string { + panic("not called in tests") +} + +func pubAll(b byte) (ret key.NodePublic) { + var bs [32]byte + for i := range bs { + bs[i] = b + } + return key.NodePublicFromRaw32(mem.B(bs[:])) +} + +func TestForwarderRegistration(t *testing.T) { + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + want := func(want map[key.NodePublic]PacketForwarder) { + t.Helper() + if got := s.clientsMesh; !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) + } + } + wantCounter := func(c *expvar.Int, want int) { + t.Helper() + if got := c.Value(); got != int64(want) { + t.Errorf("counter = %v; want %v", got, want) + } + } + singleClient := func(c *sclient) *clientSet { + cs := &clientSet{} + cs.activeClient.Store(c) + return cs + } + + u1 := pubAll(1) + u2 := pubAll(2) + u3 := pubAll(3) + + s.AddPacketForwarder(u1, testFwd(1)) + s.AddPacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered forwarder is no-op. + s.RemovePacketForwarder(u2, testFwd(999)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered user is no-op. + s.RemovePacketForwarder(u3, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Actual removal. + s.RemovePacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + }) + + // Adding a dup for a user. + wantCounter(&s.multiForwarderCreated, 0) + s.AddPacketForwarder(u1, testFwd(100)) + s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + wantCounter(&s.multiForwarderCreated, 1) + + // Removing a forwarder in a multi set that doesn't exist; does nothing. + s.RemovePacketForwarder(u1, testFwd(55)) + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + + // Removing a forwarder in a multi set that does exist should collapse it away + // from being a multiForwarder. + wantCounter(&s.multiForwarderDeleted, 0) + s.RemovePacketForwarder(u1, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(100), + }) + wantCounter(&s.multiForwarderDeleted, 1) + + // Removing an entry for a client that's still connected locally should result + // in a nil forwarder. + u1c := &sclient{ + key: u1, + logf: logger.Discard, + } + s.clients[u1] = singleClient(u1c) + s.RemovePacketForwarder(u1, testFwd(100)) + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + + // But once that client disconnects, it should go away. + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{}) + + // But if it already has a forwarder, it's not removed. + s.AddPacketForwarder(u1, testFwd(2)) + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(2), + }) + + // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard + // that they're also connected to a peer of ours. That shouldn't transition the forwarder + // from nil to the new one, not a multiForwarder. + s.clients[u1] = singleClient(u1c) + s.clientsMesh[u1] = nil + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + s.AddPacketForwarder(u1, testFwd(3)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(3), + }) +} + +type channelFwd struct { + // id is to ensure that different instances that reference the + // same channel are not equal, as they are used as keys in the + // multiForwarder map. + id int + c chan []byte +} + +func (f channelFwd) String() string { return "" } +func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { + f.c <- packet + return nil +} + +func TestMultiForwarder(t *testing.T) { + received := 0 + var wg sync.WaitGroup + ch := make(chan []byte) + ctx, cancel := context.WithCancel(context.Background()) + + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + u := pubAll(1) + s.AddPacketForwarder(u, channelFwd{1, ch}) + + wg.Add(2) + go func() { + defer wg.Done() + for { + select { + case <-ch: + received += 1 + case <-ctx.Done(): + return + } + } + }() + go func() { + defer wg.Done() + for { + s.AddPacketForwarder(u, channelFwd{2, ch}) + s.AddPacketForwarder(u, channelFwd{3, ch}) + s.RemovePacketForwarder(u, channelFwd{2, ch}) + s.RemovePacketForwarder(u, channelFwd{1, ch}) + s.AddPacketForwarder(u, channelFwd{1, ch}) + s.RemovePacketForwarder(u, channelFwd{3, ch}) + if ctx.Err() != nil { + return + } + } + }() + + // Number of messages is chosen arbitrarily, just for this loop to + // run long enough concurrently with {Add,Remove}PacketForwarder loop above. + numMsgs := 5000 + var fwd PacketForwarder + for i := range numMsgs { + s.mu.Lock() + fwd = s.clientsMesh[u] + s.mu.Unlock() + fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) + } + + cancel() + wg.Wait() + if received != numMsgs { + t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) + } +} +func TestMetaCert(t *testing.T) { + priv := key.NewNode() + pub := priv.Public() + s := NewServer(priv, t.Logf) + + certBytes := s.MetaCert() + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + log.Fatal(err) + } + if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(derp.ProtocolVersion) { + t.Errorf("serial = %v; want %v", cert.SerialNumber, derp.ProtocolVersion) + } + if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { + t.Errorf("CommonName = %q; want %q", g, w) + } + if n := len(cert.Extensions); n != 1 { + t.Fatalf("got %d extensions; want 1", n) + } + + // oidExtensionBasicConstraints is the Basic Constraints ID copied + // from the x509 package. + oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} + + if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { + t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) + } +} + +func TestServerDupClients(t *testing.T) { + serverPriv := key.NewNode() + var s *Server + + clientPriv := key.NewNode() + clientPub := clientPriv.Public() + + var c1, c2, c3 *sclient + var clientName map[*sclient]string + + // run starts a new test case and resets clients back to their zero values. + run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { + s = NewServer(serverPriv, t.Logf) + s.dupPolicy = dupPolicy + c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} + c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} + c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} + clientName = map[*sclient]string{ + c1: "c1", + c2: "c2", + c3: "c3", + } + t.Run(name, f) + } + runBothWays := func(name string, f func(t *testing.T)) { + run(name+"_disablefighters", disableFighters, f) + run(name+"_lastwriteractive", lastWriterIsActive, f) + } + wantSingleClient := func(t *testing.T, want *sclient) { + t.Helper() + got, ok := s.clients[want.key] + if !ok { + t.Error("no clients for key") + return + } + if got.dup != nil { + t.Errorf("unexpected dup set for single client") + } + cur := got.activeClient.Load() + if cur != want { + t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) + } + if cur != nil { + if cur.isDup.Load() { + t.Errorf("unexpected isDup on singleClient") + } + if cur.isDisabled.Load() { + t.Errorf("unexpected isDisabled on singleClient") + } + } + } + wantNoClient := func(t *testing.T) { + t.Helper() + _, ok := s.clients[clientPub] + if !ok { + // Good + return + } + t.Errorf("got client; want empty") + } + wantDupSet := func(t *testing.T) *dupClientSet { + t.Helper() + cs, ok := s.clients[clientPub] + if !ok { + t.Fatal("no set for key; want dup set") + return nil + } + if cs.dup != nil { + return cs.dup + } + t.Fatalf("no dup set for key; want dup set") + return nil + } + wantActive := func(t *testing.T, want *sclient) { + t.Helper() + set, ok := s.clients[clientPub] + if !ok { + t.Error("no set for key") + return + } + got := set.activeClient.Load() + if got != want { + t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) + } + } + checkDup := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDup.Load(); got != want { + t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) + } + } + checkDisabled := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDisabled.Load(); got != want { + t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) + } + } + wantDupConns := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientConns.Value(); got != int64(want) { + t.Errorf("dupClientConns = %v; want %v", got, want) + } + } + wantDupKeys := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientKeys.Value(); got != int64(want) { + t.Errorf("dupClientKeys = %v; want %v", got, want) + } + } + + // Common case: a single client comes and goes, with no dups. + runBothWays("one_comes_and_goes", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + s.unregisterClient(c1) + wantNoClient(t) + }) + + // A still somewhat common case: a single client was + // connected and then their wifi dies or laptop closes + // or they switch networks and connect from a + // different network. They have two connections but + // it's not very bad. Only their new one is + // active. The last one, being dead, doesn't send and + // thus the new one doesn't get disabled. + runBothWays("small_overlap_replacement", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + wantDupKeys(t, 0) + wantDupKeys(t, 0) + + s.registerClient(c2) // wifi dies; c2 replacement connects + wantDupSet(t) + wantDupConns(t, 2) + wantDupKeys(t, 1) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) // sends go to the replacement + + s.unregisterClient(c1) // c1 finally times out + wantSingleClient(t, c2) + checkDup(t, c2, false) // c2 is longer a dup + wantActive(t, c2) + wantDupConns(t, 0) + wantDupKeys(t, 0) + }) + + // Key cloning situation with concurrent clients, both trying + // to write. + run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + s.registerClient(c2) + wantDupSet(t) + wantDupKeys(t, 1) + wantDupConns(t, 2) + wantActive(t, c2) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + wantActive(t, nil) + + s.registerClient(c3) + wantActive(t, c3) + checkDisabled(t, c3, false) + wantDupKeys(t, 1) + wantDupConns(t, 3) + + s.unregisterClient(c3) + wantActive(t, nil) + wantDupKeys(t, 1) + wantDupConns(t, 2) + + s.unregisterClient(c2) + wantSingleClient(t, c1) + wantDupKeys(t, 0) + wantDupConns(t, 0) + }) + + // Key cloning with an A->B->C->A series instead. + run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + s.registerClient(c2) + s.registerClient(c3) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + checkDisabled(t, c3, true) + wantActive(t, nil) + }) + + run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { + wantNoClient(t) + + // Last registered client is the active one... + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + s.registerClient(c3) + s.noteClientActivity(c2) + wantActive(t, c3) + + // But if the last one goes away, the one with the + // most recent activity wins. + s.unregisterClient(c3) + wantActive(t, c2) + }) + + run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { + wantNoClient(t) + + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + + s.noteClientActivity(c1) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c1) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) + + s.unregisterClient(c2) + checkDisabled(t, c1, false) + wantActive(t, c1) + }) +} + +func TestLimiter(t *testing.T) { + rl := rate.NewLimiter(rate.Every(time.Minute), 100) + for i := range 200 { + r := rl.Reserve() + d := r.Delay() + t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) + } +} + +// BenchmarkConcurrentStreams exercises mutex contention on a +// single Server instance with multiple concurrent client flows. +func BenchmarkConcurrentStreams(b *testing.B) { + serverPrivateKey := key.NewNode() + s := NewServer(serverPrivateKey, logger.Discard) + defer s.Close() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + for ctx.Err() == nil { + connIn, err := ln.Accept() + if err != nil { + if ctx.Err() != nil { + return + } + b.Error(err) + return + } + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + go s.Accept(ctx, connIn, brwServer, "test-client") + } + }() + + newClient := func(t testing.TB) *derp.Client { + t.Helper() + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + t.Cleanup(func() { connOut.Close() }) + + k := key.NewNode() + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + return client + } + + b.RunParallel(func(pb *testing.PB) { + c1, c2 := newClient(b), newClient(b) + const packetSize = 100 + msg := make([]byte, packetSize) + for pb.Next() { + if err := c1.Send(c2.PublicKey(), msg); err != nil { + b.Fatal(err) + } + _, err := c2.Recv() + if err != nil { + return + } + } + }) +} + +func BenchmarkSendRecv(b *testing.B) { + for _, size := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) + } +} + +func benchmarkSendRecvSize(b *testing.B, packetSize int) { + serverPrivateKey := key.NewNode() + s := NewServer(serverPrivateKey, logger.Discard) + defer s.Close() + + k := key.NewNode() + clientKey := k.Public() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + defer connOut.Close() + + connIn, err := ln.Accept() + if err != nil { + b.Fatal(err) + } + defer connIn.Close() + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go s.Accept(ctx, connIn, brwServer, "test-client") + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + + go func() { + for { + _, err := client.Recv() + if err != nil { + return + } + } + }() + + msg := make([]byte, packetSize) + b.SetBytes(int64(len(msg))) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + if err := client.Send(clientKey, msg); err != nil { + b.Fatal(err) + } + } +} + +func TestParseSSOutput(t *testing.T) { + contents, err := os.ReadFile("testdata/example_ss.txt") + if err != nil { + t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) + } + seen := parseSSOutput(string(contents)) + if len(seen) == 0 { + t.Errorf("parseSSOutput expected non-empty map") + } +} + +func TestGetPerClientSendQueueDepth(t *testing.T) { + c := qt.New(t) + envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" + + testCases := []struct { + envVal string + want int + }{ + // Empty case, envknob treats empty as missing also. + { + "", defaultPerClientSendQueueDepth, + }, + { + "64", 64, + }, + } + + for _, tc := range testCases { + t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { + t.Setenv(envKey, tc.envVal) + val := getPerClientSendQueueDepth() + c.Assert(val, qt.Equals, tc.want) + }) + } +} diff --git a/derp/derphttp/derphttp_server.go b/derp/derpserver/handler.go similarity index 86% rename from derp/derphttp/derphttp_server.go rename to derp/derpserver/handler.go index 50aba774a..7cd6aa2fd 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derpserver/handler.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derpserver import ( "fmt" @@ -12,14 +12,8 @@ import ( "tailscale.com/derp" ) -// fastStartHeader is the header (with value "1") that signals to the HTTP -// server that the DERP HTTP client does not want the HTTP 101 response -// headers and it will begin writing & reading the DERP protocol immediately -// following its HTTP request. -const fastStartHeader = "Derp-Fast-Start" - // Handler returns an http.Handler to be mounted at /derp, serving s. -func Handler(s *derp.Server) http.Handler { +func Handler(s *Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -42,7 +36,7 @@ func Handler(s *derp.Server) http.Handler { return } - fastStart := r.Header.Get(fastStartHeader) == "1" + fastStart := r.Header.Get(derp.FastStartHeader) == "1" h, ok := w.(http.Hijacker) if !ok { @@ -69,7 +63,7 @@ func Handler(s *derp.Server) http.Handler { } if v := r.Header.Get(derp.IdealNodeHeader); v != "" { - ctx = derp.IdealNodeContextKey.WithValue(ctx, v) + ctx = IdealNodeContextKey.WithValue(ctx, v) } s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String()) diff --git a/derp/testdata/example_ss.txt b/derp/derpserver/testdata/example_ss.txt similarity index 100% rename from derp/testdata/example_ss.txt rename to derp/derpserver/testdata/example_ss.txt diff --git a/derp/export_test.go b/derp/export_test.go new file mode 100644 index 000000000..677a4932d --- /dev/null +++ b/derp/export_test.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import "time" + +func (c *Client) RecvTimeoutForTest(timeout time.Duration) (m ReceivedMessage, err error) { + return c.recvTimeout(timeout) +} diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 5f37560cc..787867b4f 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -8,15 +8,13 @@ package ipnlocal import ( "errors" "net" - - "tailscale.com/client/local" ) const webClientPort = 5252 type webClient struct{} -func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {} +func (b *LocalBackend) ConfigureWebClient(any) {} func (b *LocalBackend) webClientGetOrInit() error { return errors.New("not implemented") diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 064a86c8c..0778e07df 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/syncs" "tailscale.com/tstest/nettest" @@ -136,7 +136,7 @@ func TestAgainstDERPHandler(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s := httptest.NewServer(http.HandlerFunc(derphttp.ServeNoContent)) + s := httptest.NewServer(http.HandlerFunc(derpserver.ServeNoContent)) defer s.Close() e := Endpoint{ URL: must.Get(url.Parse(s.URL + "/generate_204")), diff --git a/prober/derp_test.go b/prober/derp_test.go index 1ace9983c..92bcb0a61 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -16,6 +16,7 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -145,12 +146,12 @@ func TestDerpProber(t *testing.T) { func TestRunDerpProbeNodePair(t *testing.T) { // os.Setenv("DERP_DEBUG_LOGS", "true") serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: derphttp.Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 619183a60..795e4367f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -222,9 +222,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -266,7 +266,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -302,7 +302,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -327,7 +326,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tsd from tailscale.com/ipn/ipnext+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -559,7 +558,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from tailscale.com/util/testenv fmt from archive/tar+ hash from compress/zlib+ diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index b28ebaba1..56643f5d4 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -34,8 +34,7 @@ import ( "go4.org/mem" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/ipn" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" @@ -297,14 +296,14 @@ func exe() string { func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) { t.Helper() - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.NewServer(key.NewNode(), logf) ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0")) if err != nil { t.Fatal(err) } - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Listener.Close() httpsrv.Listener = ln httpsrv.Config.ErrorLog = logger.StdLogger(logf) diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 1fa170d87..27ee51726 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -51,8 +51,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netutil" "tailscale.com/net/netx" "tailscale.com/net/stun" @@ -601,7 +600,7 @@ func (n *node) String() string { } type derpServer struct { - srv *derp.Server + srv *derpserver.Server handler http.Handler tlsConfig *tls.Config } @@ -612,12 +611,12 @@ func newDERPServer() *derpServer { ts.Close() ds := &derpServer{ - srv: derp.NewServer(key.NewNode(), logger.Discard), + srv: derpserver.NewServer(key.NewNode(), logger.Discard), tlsConfig: ts.TLS, // self-signed; test client configure to not check } var mux http.ServeMux - mux.Handle("/derp", derphttp.Handler(ds.srv)) - mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + mux.Handle("/derp", derpserver.Handler(ds.srv)) + mux.HandleFunc("/generate_204", derpserver.ServeNoContent) ds.handler = &mux return ds diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1b885c3f1..de24a5f60 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -39,8 +39,7 @@ import ( "golang.org/x/net/ipv4" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/control/controlknobs" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/health" @@ -112,9 +111,9 @@ func (c *Conn) WaitReady(t testing.TB) { } func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.NewServer(key.NewNode(), logf) - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() From db02a4664547a7d1d82650e28bd84e5eb4b243ef Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Wed, 24 Sep 2025 10:20:41 -0700 Subject: [PATCH 1385/1708] types/key: Update HardwareAttestationPublic representation (#17233) Sidestep cmd/viewer incompatibility hiccups with HardwareAttestationPublic type due to its *ecdsa.PublicKey inner member by serializing the key to a byte slice instead. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- types/key/hardware_attestation.go | 55 +++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index ead077a5d..ac3914ab2 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -7,6 +7,7 @@ import ( "crypto" "crypto/ecdsa" "crypto/elliptic" + "crypto/subtle" "encoding/json" "fmt" "io" @@ -18,11 +19,13 @@ var ErrUnsupported = fmt.Errorf("key type not supported on this platform") const hardwareAttestPublicHexPrefix = "hwattestpub:" +const pubkeyLength = 65 // uncompressed P-256 + // HardwareAttestationKey describes a hardware-backed key that is used to // identify a node. Implementation details will // vary based on the platform in use (SecureEnclave for Apple, TPM for // Windows/Linux, Android Hardware-backed Keystore). -// This key can only be marshalled and unmarshalled on the same machine. +// This key can only be marshalled and unmarshaled on the same machine. type HardwareAttestationKey interface { crypto.Signer json.Marshaler @@ -43,25 +46,41 @@ func HardwareAttestationPublicFromPlatformKey(k HardwareAttestationKey) Hardware if !ok { panic("hardware attestation key is not ECDSA") } - return HardwareAttestationPublic{k: ecdsaPub} + bytes, err := ecdsaPub.Bytes() + if err != nil { + panic(err) + } + if len(bytes) != pubkeyLength { + panic("hardware attestation key is not uncompressed ECDSA P-256") + } + var ecdsaPubArr [pubkeyLength]byte + copy(ecdsaPubArr[:], bytes) + return HardwareAttestationPublic{k: ecdsaPubArr} } // HardwareAttestationPublic is the public key counterpart to // HardwareAttestationKey. type HardwareAttestationPublic struct { - k *ecdsa.PublicKey + k [pubkeyLength]byte } -func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { - if k.k == nil || o.k == nil { - return k.k == o.k +func (k *HardwareAttestationPublic) Clone() *HardwareAttestationPublic { + if k == nil { + return nil } - return k.k.X.Cmp(o.k.X) == 0 && k.k.Y.Cmp(o.k.Y) == 0 && k.k.Curve == o.k.Curve + var out HardwareAttestationPublic + copy(out.k[:], k.k[:]) + return &out +} + +func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { + return subtle.ConstantTimeCompare(k.k[:], o.k[:]) == 1 } // IsZero reports whether k is the zero value. func (k HardwareAttestationPublic) IsZero() bool { - return k.k == nil + var zero [pubkeyLength]byte + return k.k == zero } // String returns the hex-encoded public key with a type prefix. @@ -75,7 +94,7 @@ func (k HardwareAttestationPublic) String() string { // MarshalText implements encoding.TextMarshaler. func (k HardwareAttestationPublic) MarshalText() ([]byte, error) { - if k.k == nil { + if k.IsZero() { return nil, nil } return k.AppendText(nil) @@ -89,30 +108,30 @@ func (k *HardwareAttestationPublic) UnmarshalText(b []byte) error { return nil } - kb := make([]byte, 65) + kb := make([]byte, pubkeyLength) if err := parseHex(kb, mem.B(b), mem.S(hardwareAttestPublicHexPrefix)); err != nil { return err } - pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) + _, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) if err != nil { return err } - k.k = pk + copy(k.k[:], kb) return nil } func (k HardwareAttestationPublic) AppendText(dst []byte) ([]byte, error) { - b, err := k.k.Bytes() - if err != nil { - return nil, err - } - return appendHexKey(dst, hardwareAttestPublicHexPrefix, b), nil + return appendHexKey(dst, hardwareAttestPublicHexPrefix, k.k[:]), nil } // Verifier returns the ECDSA public key for verifying signatures made by k. func (k HardwareAttestationPublic) Verifier() *ecdsa.PublicKey { - return k.k + pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), k.k[:]) + if err != nil { + panic(err) + } + return pk } // emptyHardwareAttestationKey is a function that returns an empty From 34242df51b413351a1caec2213d7e9ca41dd75ed Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 24 Sep 2025 10:38:30 -0700 Subject: [PATCH 1386/1708] derp/derpserver: clean up extraction of derp.Server (#17264) PR #17258 extracted `derp.Server` into `derp/derpserver.Server`. This followup patch adds the following cleanups: 1. Rename `derp_server*.go` files to `derpserver*.go` to match the package name. 2. Rename the `derpserver.NewServer` constructor to `derpserver.New` to reduce stuttering. 3. Remove the unnecessary `derpserver.Conn` type alias. Updates #17257 Updates #cleanup Signed-off-by: Simon Law --- cmd/derper/cert_test.go | 2 +- cmd/derper/derper.go | 2 +- derp/derp_test.go | 6 +++--- derp/derphttp/derphttp_test.go | 6 +++--- .../derpserver/{derp_server.go => derpserver.go} | 16 +++++++--------- ...p_server_default.go => derpserver_default.go} | 0 ...{derp_server_linux.go => derpserver_linux.go} | 0 derp/derpserver/derpserver_test.go | 8 ++++---- prober/derp_test.go | 2 +- tstest/integration/integration.go | 2 +- tstest/natlab/vnet/vnet.go | 2 +- wgengine/magicsock/magicsock_test.go | 2 +- 12 files changed, 23 insertions(+), 25 deletions(-) rename derp/derpserver/{derp_server.go => derpserver.go} (99%) rename derp/derpserver/{derp_server_default.go => derpserver_default.go} (100%) rename derp/derpserver/{derp_server_linux.go => derpserver_linux.go} (100%) diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 1ef932e7f..c8a3229e9 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -131,7 +131,7 @@ func TestPinnedCertRawIP(t *testing.T) { } defer ln.Close() - ds := derpserver.NewServer(key.NewNode(), t.Logf) + ds := derpserver.New(key.NewNode(), t.Logf) derpHandler := derpserver.Handler(ds) mux := http.NewServeMux() diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index eed94bd68..857d7def3 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -188,7 +188,7 @@ func main() { serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual" - s := derpserver.NewServer(cfg.PrivateKey, log.Printf) + s := derpserver.New(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) diff --git a/derp/derp_test.go b/derp/derp_test.go index e765f7b54..52793f90f 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -83,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) { func TestSendRecv(t *testing.T) { serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() const numClients = 3 @@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) { func TestSendFreeze(t *testing.T) { serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() s.WriteTimeout = 100 * time.Millisecond @@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") - s := derpserver.NewServer(key.NewNode(), logf) + s := derpserver.New(key.NewNode(), logf) s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index dd7cbcd24..36c11f4fc 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -44,7 +44,7 @@ func TestSendRecv(t *testing.T) { clientKeys = append(clientKeys, priv.Public()) } - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ @@ -172,7 +172,7 @@ func waitConnect(t testing.TB, c *derphttp.Client) { func TestPing(t *testing.T) { serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ @@ -225,7 +225,7 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) { - s = derpserver.NewServer(k, t.Logf) + s = derpserver.New(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), Handler: derpserver.Handler(s), diff --git a/derp/derpserver/derp_server.go b/derp/derpserver/derpserver.go similarity index 99% rename from derp/derpserver/derp_server.go rename to derp/derpserver/derpserver.go index 917ef147c..31cf9363a 100644 --- a/derp/derpserver/derp_server.go +++ b/derp/derpserver/derpserver.go @@ -57,8 +57,6 @@ import ( "tailscale.com/version" ) -type Conn = derp.Conn - // verboseDropKeys is the set of destination public keys that should // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} @@ -181,7 +179,7 @@ type Server struct { mu sync.Mutex closed bool - netConns map[Conn]chan struct{} // chan is closed when conn closes + netConns map[derp.Conn]chan struct{} // chan is closed when conn closes clients map[key.NodePublic]*clientSet watchers set.Set[*sclient] // mesh peers // clientsMesh tracks all clients in the cluster, both locally @@ -354,9 +352,9 @@ var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( "DERP bytes dropped by reason and by kind", ) -// NewServer returns a new DERP server. It doesn't listen on its own. +// New returns a new DERP server. It doesn't listen on its own. // Connections are given to it via Server.Accept. -func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { +func New(privateKey key.NodePrivate, logf logger.Logf) *Server { var ms runtime.MemStats runtime.ReadMemStats(&ms) @@ -369,7 +367,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { packetsRecvByKind: metrics.LabelMap{Label: "kind"}, clients: map[key.NodePublic]*clientSet{}, clientsMesh: map[key.NodePublic]PacketForwarder{}, - netConns: map[Conn]chan struct{}{}, + netConns: map[derp.Conn]chan struct{}{}, memSys0: ms.Sys, watchers: set.Set[*sclient]{}, peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, @@ -570,7 +568,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool { // on its own. // // Accept closes nc. -func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) { +func (s *Server) Accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string) { closed := make(chan struct{}) s.mu.Lock() @@ -910,7 +908,7 @@ func (s *Server) addWatcher(c *sclient) { go c.requestMeshUpdate() } -func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { +func (s *Server) accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { br := brw.Reader nc.SetDeadline(time.Now().Add(10 * time.Second)) bw := &lazyBufioWriter{w: nc, lbw: brw.Writer} @@ -1619,7 +1617,7 @@ type sclient struct { // Static after construction. connNum int64 // process-wide unique counter, incremented each Accept s *Server - nc Conn + nc derp.Conn key key.NodePublic info derp.ClientInfo logf logger.Logf diff --git a/derp/derpserver/derp_server_default.go b/derp/derpserver/derpserver_default.go similarity index 100% rename from derp/derpserver/derp_server_default.go rename to derp/derpserver/derpserver_default.go diff --git a/derp/derpserver/derp_server_linux.go b/derp/derpserver/derpserver_linux.go similarity index 100% rename from derp/derpserver/derp_server_linux.go rename to derp/derpserver/derpserver_linux.go diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go index 3f0ba2ec0..2db5f25bc 100644 --- a/derp/derpserver/derpserver_test.go +++ b/derp/derpserver/derpserver_test.go @@ -330,7 +330,7 @@ func TestMultiForwarder(t *testing.T) { func TestMetaCert(t *testing.T) { priv := key.NewNode() pub := priv.Public() - s := NewServer(priv, t.Logf) + s := New(priv, t.Logf) certBytes := s.MetaCert() cert, err := x509.ParseCertificate(certBytes) @@ -368,7 +368,7 @@ func TestServerDupClients(t *testing.T) { // run starts a new test case and resets clients back to their zero values. run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { - s = NewServer(serverPriv, t.Logf) + s = New(serverPriv, t.Logf) s.dupPolicy = dupPolicy c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} @@ -618,7 +618,7 @@ func TestLimiter(t *testing.T) { // single Server instance with multiple concurrent client flows. func BenchmarkConcurrentStreams(b *testing.B) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) + s := New(serverPrivateKey, logger.Discard) defer s.Close() ln, err := net.Listen("tcp", "127.0.0.1:0") @@ -688,7 +688,7 @@ func BenchmarkSendRecv(b *testing.B) { func benchmarkSendRecvSize(b *testing.B, packetSize int) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) + s := New(serverPrivateKey, logger.Discard) defer s.Close() k := key.NewNode() diff --git a/prober/derp_test.go b/prober/derp_test.go index 92bcb0a61..08a65d697 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -146,7 +146,7 @@ func TestDerpProber(t *testing.T) { func TestRunDerpProbeNodePair(t *testing.T) { // os.Setenv("DERP_DEBUG_LOGS", "true") serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 56643f5d4..3788f6149 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -296,7 +296,7 @@ func exe() string { func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) { t.Helper() - d := derpserver.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0")) if err != nil { diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 27ee51726..49d47f029 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -611,7 +611,7 @@ func newDERPServer() *derpServer { ts.Close() ds := &derpServer{ - srv: derpserver.NewServer(key.NewNode(), logger.Discard), + srv: derpserver.New(key.NewNode(), logger.Discard), tlsConfig: ts.TLS, // self-signed; test client configure to not check } var mux http.ServeMux diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index de24a5f60..c6be9129d 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) { } func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { - d := derpserver.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) From 0bd4f4729b150cbbca6364affb3073064d3d522a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 24 Sep 2025 15:14:21 -0700 Subject: [PATCH 1387/1708] ipn/ipnlocal: rename misnamed DisablePortMapperForTest to DisablePortPollerForTest I think this was originally a brain-o in 9380e2dfc61a720d. It's disabling the port _poller_, listing what open ports (i.e. services) are open, not PMP/PCP/UPnP port mapping. While there, drop in some more testenv.AssertInTest() in a few places. Updates #cleanup Change-Id: Ia6f755ad3544f855883b8a7bdcfc066e8649547b Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 11 ++++++----- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/state_test.go | 2 +- ipn/lapitest/backend.go | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5c5fb034b..ef8fcab40 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1503,9 +1503,7 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { } func (b *LocalBackend) GetFilterForTest() *filter.Filter { - if !testenv.InTest() { - panic("GetFilterForTest called outside of test") - } + testenv.AssertInTest() nb := b.currentNode() return nb.filterAtomic.Load() } @@ -2304,9 +2302,10 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// DisablePortMapperForTest disables the portmapper for tests. +// DisablePortPollerForTest disables the port list poller for tests. // It must be called before Start. -func (b *LocalBackend) DisablePortMapperForTest() { +func (b *LocalBackend) DisablePortPollerForTest() { + testenv.AssertInTest() b.mu.Lock() defer b.mu.Unlock() b.portpoll = nil @@ -2315,6 +2314,7 @@ func (b *LocalBackend) DisablePortMapperForTest() { // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { + testenv.AssertInTest() return b.currentNode().PeersForTest() } @@ -4030,6 +4030,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // It is used for testing only, and will be removed along with the rest of the // "current user" functionality as we progress on the multi-user improvements (tailscale/corp#18342). func (b *LocalBackend) CurrentUserForTest() (ipn.WindowsUserID, ipnauth.Actor) { + testenv.AssertInTest() b.mu.Lock() defer b.mu.Unlock() return b.pm.CurrentUserID(), b.currentUser diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 0505e068b..56d65767b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5816,7 +5816,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() + b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { return newControl(t, opts), nil diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 609a51c5b..1a32f3156 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -358,7 +358,7 @@ func TestStateMachine(t *testing.T) { t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() + b.DisablePortPollerForTest() var cc, previousCC *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index 6a83431f3..725ffa4de 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -45,7 +45,7 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { tb.Fatalf("NewLocalBackend: %v", err) } tb.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() + b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(opts.MakeControlClient) return b } From 70400cb75f9738b7ee5bb260a8dddefbb929b4f4 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 24 Sep 2025 15:45:41 +0100 Subject: [PATCH 1388/1708] cmd/tailscale/cli: reduce strength of lose-ssh risk warning Ideally we would remove this warning entirely, as it is now possible to reauthenticate without losing connectivty. However, it is still possible to lose SSH connectivity if the user changes the ownership of the machine when they do a force-reauth, and we have no way of knowing if they are going to do that before they do it. For now, let's just reduce the strength of the warning to warn them that they "may" lose their connection, rather than they "will". Updates tailscale/corp#32429 Signed-off-by: James Sanderson --- cmd/tailscale/cli/up.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 12c26b21c..96b561bee 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -385,7 +385,7 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus } if env.upArgs.forceReauth && isSSHOverTailscale() { - if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { + if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action may result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { return false, nil, err } } From e0a77cf41a52066dd42058828799c12320d4b9cf Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 15 Sep 2025 11:44:00 +0100 Subject: [PATCH 1389/1708] tstest/integration: expand the tests for `tailscale up` Expand the integration tests to cover a wider range of scenarios, including: * Before and after a successful initial login * Auth URLs and auth keys * With and without the `--force-reauth` flag * With and without seamless key renewal These tests expose a race condition when using `--force-reauth` on an already-logged in device. The command completes too quickly, preventing the auth URL from being displayed. This issue is identified and will be fixed in a separate commit. Updates #17108 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 195 ++++++++++++++---- tstest/integration/testcontrol/testcontrol.go | 43 +++- 2 files changed, 193 insertions(+), 45 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 136004bc8..f65ae1659 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -23,6 +23,7 @@ import ( "regexp" "runtime" "strconv" + "strings" "sync/atomic" "testing" "time" @@ -267,52 +268,168 @@ func TestStateSavedOnStart(t *testing.T) { } func TestOneNodeUpAuth(t *testing.T) { - tstest.Shard(t) - tstest.Parallel(t) - env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) { - control.RequireAuth = true - })) + for _, tt := range []struct { + name string + args []string + // + // What auth key should we use for control? + authKey string + // + // Is tailscaled already logged in before we run this `up` command? + alreadyLoggedIn bool + // + // Do we need to log in again with a new /auth/ URL? + needsNewAuthURL bool + }{ + { + name: "up", + args: []string{"up"}, + needsNewAuthURL: true, + }, + { + name: "up-with-force-reauth", + args: []string{"up", "--force-reauth"}, + needsNewAuthURL: true, + }, + { + name: "up-with-auth-key", + args: []string{"up", "--auth-key=opensesame"}, + authKey: "opensesame", + needsNewAuthURL: false, + }, + { + name: "up-with-force-reauth-and-auth-key", + args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, + authKey: "opensesame", + needsNewAuthURL: false, + }, + { + name: "up-after-login", + args: []string{"up"}, + alreadyLoggedIn: true, + needsNewAuthURL: false, + }, + // TODO(alexc): This test is failing because of a bug in `tailscale up` where + // it waits for ipn to enter the "Running" state. If we're already logged in + // and running, this completes immediately, before we've had a chance to show + // the user the auth URL. + // { + // name: "up-with-force-reauth-after-login", + // args: []string{"up", "--force-reauth"}, + // alreadyLoggedIn: true, + // needsNewAuthURL: true, + // }, + { + name: "up-with-auth-key-after-login", + args: []string{"up", "--auth-key=opensesame"}, + authKey: "opensesame", + alreadyLoggedIn: true, + needsNewAuthURL: false, + }, + { + name: "up-with-force-reauth-and-auth-key-after-login", + args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, + authKey: "opensesame", + alreadyLoggedIn: true, + needsNewAuthURL: false, + }, + } { + tstest.Shard(t) + + for _, useSeamlessKeyRenewal := range []bool{true, false} { + tt := tt // subtests are run in parallel, rebind tt + t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + if tt.authKey != "" { + control.RequireAuthKey = tt.authKey + } else { + control.RequireAuth = true + } + + control.AllNodesSameUser = true + + if useSeamlessKeyRenewal { + control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{ + tailcfg.NodeAttrSeamlessKeyRenewal: []tailcfg.RawMessage{}, + } + } + }, + )) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + cmdArgs := append(tt.args, "--login-server="+env.ControlURL()) + + // This handler looks for /auth/ URLs in the stdout from "tailscale up", + // and if it sees them, completes the auth process. + // + // It counts how many auth URLs it's seen. + var authCountAtomic atomic.Int32 + authURLHandler := &authURLParserWriter{fn: func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + if env.Control.CompleteAuth(urlStr) { + if authCountAtomic.Add(1) > 1 { + err := errors.New("completed multiple auth URLs") + t.Error(err) + return err + } + t.Logf("completed login to %s", urlStr) + return nil + } else { + err := fmt.Errorf("Failed to complete initial login to %q", urlStr) + t.Fatal(err) + return err + } + }} + + // If we should be logged in at the start of the test case, go ahead + // and run the login command. + // + // Otherwise, just wait for tailscaled to be listening. + if tt.alreadyLoggedIn { + t.Logf("Running initial login: %s", strings.Join(cmdArgs, " ")) + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = authURLHandler + cmd.Stderr = cmd.Stdout + if err := cmd.Run(); err != nil { + t.Fatalf("up: %v", err) + } + authCountAtomic.Store(0) + n1.AwaitRunning() + } else { + n1.AwaitListening() + } - n1 := NewTestNode(t, env) - d1 := n1.StartDaemon() + st := n1.MustStatus() + t.Logf("Status: %s", st.BackendState) - n1.AwaitListening() + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = authURLHandler + cmd.Stderr = cmd.Stdout - st := n1.MustStatus() - t.Logf("Status: %s", st.BackendState) + if err := cmd.Run(); err != nil { + t.Fatalf("up: %v", err) + } + t.Logf("Got IP: %v", n1.AwaitIP4()) - t.Logf("Running up --login-server=%s ...", env.ControlURL()) + n1.AwaitRunning() - cmd := n1.Tailscale("up", "--login-server="+env.ControlURL()) - var authCountAtomic atomic.Int32 - cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error { - t.Logf("saw auth URL %q", urlStr) - if env.Control.CompleteAuth(urlStr) { - if authCountAtomic.Add(1) > 1 { - err := errors.New("completed multple auth URLs") - t.Error(err) - return err - } - t.Logf("completed auth path %s", urlStr) - return nil + var expectedAuthUrls int32 + if tt.needsNewAuthURL { + expectedAuthUrls = 1 + } + if n := authCountAtomic.Load(); n != expectedAuthUrls { + t.Errorf("Auth URLs completed = %d; want %d", n, expectedAuthUrls) + } + }) } - err := fmt.Errorf("Failed to complete auth path to %q", urlStr) - t.Error(err) - return err - }} - cmd.Stderr = cmd.Stdout - if err := cmd.Run(); err != nil { - t.Fatalf("up: %v", err) - } - t.Logf("Got IP: %v", n1.AwaitIP4()) - - n1.AwaitRunning() - - if n := authCountAtomic.Load(); n != 1 { - t.Errorf("Auth URLs completed = %d; want 1", n) } - - d1.MustCleanShutdown(t) } func TestConfigFileAuthKey(t *testing.T) { diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 7ce7186e7..1d3b99f7a 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -66,6 +66,9 @@ type Server struct { // belong to the same user. AllNodesSameUser bool + // DefaultNodeCapabilities overrides the capability map sent to each client. + DefaultNodeCapabilities *tailcfg.NodeCapMap + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL @@ -726,6 +729,25 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. // some follow-ups? For now all are successes. } + // The in-memory list of nodes, users, and logins is keyed by + // the node key. If the node key changes, update all the data stores + // to use the new node key. + s.mu.Lock() + if _, oldNodeKeyOk := s.nodes[req.OldNodeKey]; oldNodeKeyOk { + if _, newNodeKeyOk := s.nodes[req.NodeKey]; !newNodeKeyOk { + s.nodes[req.OldNodeKey].Key = req.NodeKey + s.nodes[req.NodeKey] = s.nodes[req.OldNodeKey] + + s.users[req.NodeKey] = s.users[req.OldNodeKey] + s.logins[req.NodeKey] = s.logins[req.OldNodeKey] + + delete(s.nodes, req.OldNodeKey) + delete(s.users, req.OldNodeKey) + delete(s.logins, req.OldNodeKey) + } + } + s.mu.Unlock() + nk := req.NodeKey user, login := s.getUser(nk) @@ -745,6 +767,19 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. v4Prefix, v6Prefix, } + + var capMap tailcfg.NodeCapMap + if s.DefaultNodeCapabilities != nil { + capMap = *s.DefaultNodeCapabilities + } else { + capMap = tailcfg.NodeCapMap{ + tailcfg.CapabilityHTTPS: []tailcfg.RawMessage{}, + tailcfg.NodeAttrFunnel: []tailcfg.RawMessage{}, + tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{}, + tailcfg.CapabilityFunnelPorts + "?ports=8080,443": []tailcfg.RawMessage{}, + } + } + node := &tailcfg.Node{ ID: tailcfg.NodeID(nodeID), StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(nodeID))), @@ -757,12 +792,8 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. Hostinfo: req.Hostinfo.View(), Name: req.Hostinfo.Hostname, Cap: req.Version, - Capabilities: []tailcfg.NodeCapability{ - tailcfg.CapabilityHTTPS, - tailcfg.NodeAttrFunnel, - tailcfg.CapabilityFileSharing, - tailcfg.CapabilityFunnelPorts + "?ports=8080,443", - }, + CapMap: capMap, + Capabilities: slices.Collect(maps.Keys(capMap)), } s.nodes[nk] = node } From 0b27871860b1203e1c7c471bfecee6cb119c862f Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Thu, 25 Sep 2025 11:26:43 -0400 Subject: [PATCH 1390/1708] k8s-operator: add IPv6 support for DNS records (#16691) This change adds full IPv6 support to the Kubernetes operator's DNS functionality, enabling dual-stack and IPv6-only cluster support. Fixes #16633 Signed-off-by: Raj Singh --- cmd/k8s-nameserver/main.go | 122 +++++++++---- cmd/k8s-nameserver/main_test.go | 68 ++++++++ .../deploy/crds/tailscale.com_dnsconfigs.yaml | 1 - .../deploy/manifests/operator.yaml | 1 - cmd/k8s-operator/dnsrecords.go | 138 ++++++++++----- cmd/k8s-operator/dnsrecords_test.go | 165 +++++++++++++++++- k8s-operator/api.md | 1 - .../apis/v1alpha1/types_tsdnsconfig.go | 1 - k8s-operator/utils.go | 5 + 9 files changed, 408 insertions(+), 94 deletions(-) diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go index ca4b44935..84e65452d 100644 --- a/cmd/k8s-nameserver/main.go +++ b/cmd/k8s-nameserver/main.go @@ -31,6 +31,9 @@ const ( tsNetDomain = "ts.net" // addr is the the address that the UDP and TCP listeners will listen on. addr = ":1053" + // defaultTTL is the default TTL for DNS records in seconds. + // Set to 0 to disable caching. Can be increased when usage patterns are better understood. + defaultTTL = 0 // The following constants are specific to the nameserver configuration // provided by a mounted Kubernetes Configmap. The Configmap mounted at @@ -39,9 +42,9 @@ const ( kubeletMountedConfigLn = "..data" ) -// nameserver is a simple nameserver that responds to DNS queries for A records +// nameserver is a simple nameserver that responds to DNS queries for A and AAAA records // for ts.net domain names over UDP or TCP. It serves DNS responses from -// in-memory IPv4 host records. It is intended to be deployed on Kubernetes with +// in-memory IPv4 and IPv6 host records. It is intended to be deployed on Kubernetes with // a ConfigMap mounted at /config that should contain the host records. It // dynamically reconfigures its in-memory mappings as the contents of the // mounted ConfigMap changes. @@ -56,10 +59,13 @@ type nameserver struct { // in-memory records. configWatcher <-chan string - mu sync.Mutex // protects following + mu sync.RWMutex // protects following // ip4 are the in-memory hostname -> IP4 mappings that the nameserver // uses to respond to A record queries. ip4 map[dnsname.FQDN][]net.IP + // ip6 are the in-memory hostname -> IP6 mappings that the nameserver + // uses to respond to AAAA record queries. + ip6 map[dnsname.FQDN][]net.IP } func main() { @@ -98,16 +104,13 @@ func main() { tcpSig <- s // stop the TCP listener } -// handleFunc is a DNS query handler that can respond to A record queries from +// handleFunc is a DNS query handler that can respond to A and AAAA record queries from // the nameserver's in-memory records. -// - If an A record query is received and the -// nameserver's in-memory records contain records for the queried domain name, -// return a success response. -// - If an A record query is received, but the -// nameserver's in-memory records do not contain records for the queried domain name, -// return NXDOMAIN. -// - If an A record query is received, but the queried domain name is not valid, return Format Error. -// - If a query is received for any other record type than A, return Not Implemented. +// - For A queries: returns IPv4 addresses if available, NXDOMAIN if the name doesn't exist +// - For AAAA queries: returns IPv6 addresses if available, NOERROR with no data if only +// IPv4 exists (per RFC 4074), or NXDOMAIN if the name doesn't exist at all +// - For invalid domain names: returns Format Error +// - For other record types: returns Not Implemented func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { h := func(w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) @@ -135,35 +138,19 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { m.RecursionAvailable = false ips := n.lookupIP4(fqdn) - if ips == nil || len(ips) == 0 { + if len(ips) == 0 { // As we are the authoritative nameserver for MagicDNS // names, if we do not have a record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } - // TODO (irbekrm): TTL is currently set to 0, meaning - // that cluster workloads will not cache the DNS - // records. Revisit this in future when we understand - // the usage patterns better- is it putting too much - // load on kube DNS server or is this fine? for _, ip := range ips { - rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}, A: ip} + rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: defaultTTL}, A: ip} m.SetRcode(r, dns.RcodeSuccess) m.Answer = append(m.Answer, rr) } case dns.TypeAAAA: - // TODO (irbekrm): add IPv6 support. - // The nameserver currently does not support IPv6 - // (records are not being created for IPv6 Pod addresses). - // However, we can expect that some callers will - // nevertheless send AAAA queries. - // We have to return NOERROR if a query is received for - // an AAAA record for a DNS name that we have an A - // record for- else the caller might not follow with an - // A record query. - // https://github.com/tailscale/tailscale/issues/12321 - // https://datatracker.ietf.org/doc/html/rfc4074 q := r.Question[0].Name fqdn, err := dnsname.ToFQDN(q) if err != nil { @@ -174,14 +161,27 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { // single source of truth for MagicDNS names by // non-tailnet Kubernetes workloads. m.Authoritative = true - ips := n.lookupIP4(fqdn) - if len(ips) == 0 { + m.RecursionAvailable = false + + ips := n.lookupIP6(fqdn) + // Also check if we have IPv4 records to determine correct response code. + // If the name exists (has A records) but no AAAA records, we return NOERROR + // per RFC 4074. If the name doesn't exist at all, we return NXDOMAIN. + ip4s := n.lookupIP4(fqdn) + + if len(ips) == 0 && len(ip4s) == 0 { // As we are the authoritative nameserver for MagicDNS - // names, if we do not have a record for this MagicDNS + // names, if we do not have any record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } + + // Return IPv6 addresses if available + for _, ip := range ips { + rr := &dns.AAAA{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: defaultTTL}, AAAA: ip} + m.Answer = append(m.Answer, rr) + } m.SetRcode(r, dns.RcodeSuccess) default: log.Printf("[unexpected] nameserver received a query for an unsupported record type: %s", r.Question[0].String()) @@ -231,10 +231,11 @@ func (n *nameserver) resetRecords() error { log.Printf("error reading nameserver's configuration: %v", err) return err } - if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 { + if len(dnsCfgBytes) == 0 { log.Print("nameserver's configuration is empty, any in-memory records will be unset") n.mu.Lock() n.ip4 = make(map[dnsname.FQDN][]net.IP) + n.ip6 = make(map[dnsname.FQDN][]net.IP) n.mu.Unlock() return nil } @@ -249,30 +250,63 @@ func (n *nameserver) resetRecords() error { } ip4 := make(map[dnsname.FQDN][]net.IP) + ip6 := make(map[dnsname.FQDN][]net.IP) defer func() { n.mu.Lock() defer n.mu.Unlock() n.ip4 = ip4 + n.ip6 = ip6 }() - if len(dnsCfg.IP4) == 0 { + if len(dnsCfg.IP4) == 0 && len(dnsCfg.IP6) == 0 { log.Print("nameserver's configuration contains no records, any in-memory records will be unset") return nil } + // Process IPv4 records for fqdn, ips := range dnsCfg.IP4 { fqdn, err := dnsname.ToFQDN(fqdn) if err != nil { log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) continue // one invalid hostname should not break the whole nameserver } + var validIPs []net.IP for _, ipS := range ips { ip := net.ParseIP(ipS).To4() if ip == nil { // To4 returns nil if IP is not a IPv4 address log.Printf("invalid nameserver's configuration: %v does not appear to be an IPv4 address; skipping this record", ipS) continue // one invalid IP address should not break the whole nameserver } - ip4[fqdn] = []net.IP{ip} + validIPs = append(validIPs, ip) + } + if len(validIPs) > 0 { + ip4[fqdn] = validIPs + } + } + + // Process IPv6 records + for fqdn, ips := range dnsCfg.IP6 { + fqdn, err := dnsname.ToFQDN(fqdn) + if err != nil { + log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) + continue // one invalid hostname should not break the whole nameserver + } + var validIPs []net.IP + for _, ipS := range ips { + ip := net.ParseIP(ipS) + if ip == nil { + log.Printf("invalid nameserver's configuration: %v does not appear to be a valid IP address; skipping this record", ipS) + continue + } + // Check if it's a valid IPv6 address + if ip.To4() != nil { + log.Printf("invalid nameserver's configuration: %v appears to be IPv4 but was in IPv6 records; skipping this record", ipS) + continue + } + validIPs = append(validIPs, ip.To16()) + } + if len(validIPs) > 0 { + ip6[fqdn] = validIPs } } return nil @@ -372,8 +406,20 @@ func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP { if n.ip4 == nil { return nil } - n.mu.Lock() - defer n.mu.Unlock() + n.mu.RLock() + defer n.mu.RUnlock() f := n.ip4[fqdn] return f } + +// lookupIP6 returns any IPv6 addresses for the given FQDN from nameserver's +// in-memory records. +func (n *nameserver) lookupIP6(fqdn dnsname.FQDN) []net.IP { + if n.ip6 == nil { + return nil + } + n.mu.RLock() + defer n.mu.RUnlock() + f := n.ip6[fqdn] + return f +} diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go index d9a33c4fa..bca010048 100644 --- a/cmd/k8s-nameserver/main_test.go +++ b/cmd/k8s-nameserver/main_test.go @@ -19,6 +19,7 @@ func TestNameserver(t *testing.T) { tests := []struct { name string ip4 map[dnsname.FQDN][]net.IP + ip6 map[dnsname.FQDN][]net.IP query *dns.Msg wantResp *dns.Msg }{ @@ -112,6 +113,49 @@ func TestNameserver(t *testing.T) { Authoritative: true, }}, }, + { + name: "AAAA record query with IPv6 record", + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1, RecursionDesired: true}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "foo.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + RecursionAvailable: false, + RecursionDesired: true, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, + { + name: "Dual-stack: both A and AAAA records exist", + ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {{10, 0, 0, 1}}}, + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "dual.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, { name: "CNAME record query", ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {{1, 2, 3, 4}}}, @@ -133,6 +177,7 @@ func TestNameserver(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.ip4, + ip6: tt.ip6, } handler := ns.handleFunc() fakeRespW := &fakeResponseWriter{} @@ -149,43 +194,63 @@ func TestResetRecords(t *testing.T) { name string config []byte hasIp4 map[dnsname.FQDN][]net.IP + hasIp6 map[dnsname.FQDN][]net.IP wantsIp4 map[dnsname.FQDN][]net.IP + wantsIp6 map[dnsname.FQDN][]net.IP wantsErr bool }{ { name: "previously empty nameserver.ip4 gets set", config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "configuration with incompatible version", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1beta1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, + wantsIp6: nil, wantsErr: true, }, { name: "nameserver.ip4 gets reset to empty config when no configuration is provided", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset to empty config when the provided configuration is empty", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {}}`), wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), + }, + { + name: "nameserver.ip6 gets set", + config: []byte(`{"version": "v1alpha1", "ip6": {"foo.bar.com": ["2001:db8::1"]}}`), + wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {net.ParseIP("2001:db8::1")}}, + }, + { + name: "dual-stack configuration", + config: []byte(`{"version": "v1alpha1", "ip4": {"dual.bar.com": ["10.0.0.1"]}, "ip6": {"dual.bar.com": ["2001:db8::1"]}}`), + wantsIp4: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {{10, 0, 0, 1}}}, + wantsIp6: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {net.ParseIP("2001:db8::1")}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.hasIp4, + ip6: tt.hasIp6, configReader: func() ([]byte, error) { return tt.config, nil }, } if err := ns.resetRecords(); err == nil == tt.wantsErr { @@ -194,6 +259,9 @@ func TestResetRecords(t *testing.T) { if diff := cmp.Diff(ns.ip4, tt.wantsIp4); diff != "" { t.Fatalf("unexpected nameserver.ip4 contents (-got +want): \n%s", diff) } + if diff := cmp.Diff(ns.ip6, tt.wantsIp6); diff != "" { + t.Fatalf("unexpected nameserver.ip6 contents (-got +want): \n%s", diff) + } }) } } diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index bffad47f9..b047e11a7 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -52,7 +52,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type: object required: - spec diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 766d7f0d6..8b3c206c8 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -390,7 +390,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. properties: apiVersion: description: |- diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index 54c1584c6..1a9395aa0 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -40,10 +40,10 @@ const ( // dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS // records. // The records that it creates are: -// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP address of -// the ingress proxy Pod. +// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP addresses +// (both IPv4 and IPv6) of the ingress proxy Pod. // - For egress proxies configured via tailscale.com/tailnet-fqdn annotation, a -// mapping of the tailnet FQDN to the IP address of the egress proxy Pod. +// mapping of the tailnet FQDN to the IP addresses (both IPv4 and IPv6) of the egress proxy Pod. // // Records will only be created if there is exactly one ready // tailscale.com/v1alpha1.DNSConfig instance in the cluster (so that we know @@ -122,16 +122,16 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. // For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from // ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses // retrieved from the EndpointSlice associated with this Service, i.e -// Records{IP4: : <[IPs of the ingress proxy Pods]>} +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For egress, the record is a mapping between tailscale.com/tailnet-fqdn // annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice // associated with this Service, i.e -// Records{IP4: {: <[IPs of the egress proxy Pods]>} +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For ProxyGroup egress, the record is a mapping between tailscale.com/magic-dnsname -// annotation and the ClusterIP Service IP (which provides portmapping), i.e -// Records{IP4: {: <[ClusterIP Service IP]>} +// annotation and the ClusterIP Service IPs (which provides portmapping), i.e +// Records{IP4: {: <[IPv4 ClusterIPs]>}, IP6: {: <[IPv6 ClusterIPs]>}} // // If records need to be created for this proxy, maybeProvision will also: // - update the Service with a tailscale.com/magic-dnsname annotation @@ -178,17 +178,22 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, proxySvc } // Get the IP addresses for the DNS record - ips, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) + ip4s, ip6s, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) if err != nil { return fmt.Errorf("error getting target IPs: %w", err) } - if len(ips) == 0 { + if len(ip4s) == 0 && len(ip6s) == 0 { logger.Debugf("No target IP addresses available yet. We will reconcile again once they are available.") return nil } updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips) + if len(ip4s) > 0 { + mak.Set(&rec.IP4, fqdn, ip4s) + } + if len(ip6s) > 0 { + mak.Set(&rec.IP6, fqdn, ip6s) + } } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) @@ -212,42 +217,45 @@ func epIsReady(ep *discoveryv1.Endpoint) bool { // has been removed from the Service. If the record is not found in the // ConfigMap, the ConfigMap does not exist, or the Service does not have // tailscale.com/magic-dnsname annotation, just remove the finalizer. -func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { +func (dnsRR *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { ix := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if ix == -1 { logger.Debugf("no finalizer, nothing to do") return nil } cm := &corev1.ConfigMap{} - err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm) + err := dnsRR.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { logger.Debug("'dnsrecords' ConfigMap not found") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } if err != nil { return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err) } if cm.Data == nil { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } _, ok := cm.Data[operatorutils.DNSRecordsCMKey] if !ok { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } - fqdn, _ := proxySvc.GetAnnotations()[annotationTSMagicDNSName] + fqdn := proxySvc.GetAnnotations()[annotationTSMagicDNSName] if fqdn == "" { - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } logger.Infof("removing DNS record for MagicDNS name %s", fqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, fqdn) + if rec.IP6 != nil { + delete(rec.IP6, fqdn) + } } - if err = h.updateDNSConfig(ctx, updateFunc); err != nil { + if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS config: %w", err) } - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } func (dnsRR *dnsRecordsReconciler) removeProxySvcFinalizer(ctx context.Context, proxySvc *corev1.Service) error { @@ -383,72 +391,106 @@ func (dnsRR *dnsRecordsReconciler) parentSvcTargetsFQDN(ctx context.Context, svc return parentSvc.Annotations[AnnotationTailnetTargetFQDN] != "" } -// getTargetIPs returns the IP addresses that should be used for DNS records +// getTargetIPs returns the IPv4 and IPv6 addresses that should be used for DNS records // for the given proxy Service. -func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { if dnsRR.isProxyGroupEgressService(proxySvc) { return dnsRR.getClusterIPServiceIPs(proxySvc, logger) } return dnsRR.getPodIPs(ctx, proxySvc, logger) } -// getClusterIPServiceIPs returns the ClusterIP of a ProxyGroup egress Service. -func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +// getClusterIPServiceIPs returns the ClusterIPs of a ProxyGroup egress Service. +// It separates IPv4 and IPv6 addresses for dual-stack services. +func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { + // Handle services with no ClusterIP if proxySvc.Spec.ClusterIP == "" || proxySvc.Spec.ClusterIP == "None" { logger.Debugf("ProxyGroup egress ClusterIP Service does not have a ClusterIP yet.") - return nil, nil + return nil, nil, nil + } + + var ip4s, ip6s []string + + // Check all ClusterIPs for dual-stack support + clusterIPs := proxySvc.Spec.ClusterIPs + if len(clusterIPs) == 0 && proxySvc.Spec.ClusterIP != "" { + // Fallback to single ClusterIP for backward compatibility + clusterIPs = []string{proxySvc.Spec.ClusterIP} } - // Validate that ClusterIP is a valid IPv4 address - if !net.IsIPv4String(proxySvc.Spec.ClusterIP) { - logger.Debugf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) - return nil, fmt.Errorf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + + for _, ip := range clusterIPs { + if net.IsIPv4String(ip) { + ip4s = append(ip4s, ip) + logger.Debugf("Using IPv4 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else if net.IsIPv6String(ip) { + ip6s = append(ip6s, ip) + logger.Debugf("Using IPv6 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else { + logger.Debugf("ClusterIP %s is not a valid IP address", ip) + } } - logger.Debugf("Using ClusterIP Service IP %s for ProxyGroup egress DNS record", proxySvc.Spec.ClusterIP) - return []string{proxySvc.Spec.ClusterIP}, nil + + if len(ip4s) == 0 && len(ip6s) == 0 { + return nil, nil, fmt.Errorf("no valid ClusterIPs found") + } + + return ip4s, ip6s, nil } -// getPodIPs returns Pod IP addresses from EndpointSlices for non-ProxyGroup Services. -func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +// getPodIPs returns Pod IPv4 and IPv6 addresses from EndpointSlices for non-ProxyGroup Services. +func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { // Get the Pod IP addresses for the proxy from the EndpointSlices for // the headless Service. The Service can have multiple EndpointSlices // associated with it, for example in dual-stack clusters. labels := map[string]string{discoveryv1.LabelServiceName: proxySvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership var eps = new(discoveryv1.EndpointSliceList) if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { - return nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) + return nil, nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) } if len(eps.Items) == 0 { logger.Debugf("proxy's Service EndpointSlice does not yet exist.") - return nil, nil + return nil, nil, nil } // Each EndpointSlice for a Service can have a list of endpoints that each // can have multiple addresses - these are the IP addresses of any Pods - // selected by that Service. Pick all the IPv4 addresses. + // selected by that Service. Separate IPv4 and IPv6 addresses. // It is also possible that multiple EndpointSlices have overlapping addresses. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints - ips := make(set.Set[string], 0) + ip4s := make(set.Set[string], 0) + ip6s := make(set.Set[string], 0) for _, slice := range eps.Items { - if slice.AddressType != discoveryv1.AddressTypeIPv4 { - logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) - continue - } for _, ep := range slice.Endpoints { if !epIsReady(&ep) { logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) continue } for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips.Add(ip) + switch slice.AddressType { + case discoveryv1.AddressTypeIPv4: + if net.IsIPv4String(ip) { + ip4s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv4 contains non-IPv4 address %q, ignoring", ip) + } + case discoveryv1.AddressTypeIPv6: + if net.IsIPv6String(ip) { + // Strip zone ID if present (e.g., fe80::1%eth0 -> fe80::1) + if idx := strings.IndexByte(ip, '%'); idx != -1 { + ip = ip[:idx] + } + ip6s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv6 contains non-IPv6 address %q, ignoring", ip) + } + default: + logger.Debugf("EndpointSlice is for unsupported AddressType %s, skipping", slice.AddressType) } } } } - if ips.Len() == 0 { - logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses.") - return nil, nil + if ip4s.Len() == 0 && ip6s.Len() == 0 { + logger.Debugf("EndpointSlice for the Service contains no IP addresses.") + return nil, nil, nil } - return ips.Slice(), nil + return ip4s.Slice(), ip6s.Slice(), nil } diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 51dfb9049..13898078f 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -99,8 +99,9 @@ func TestDNSRecordsReconciler(t *testing.T) { mustCreate(t, fc, epv6) expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service // ConfigMap should now have a record for foo.bar.ts.net -> 10.8.8.7 - wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} // IPv6 endpoint is currently ignored - expectHostsRecords(t, fc, wantHosts) + wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} + wantHostsIPv6 := map[string][]string{"foo.bar.ts.net": {"2600:1900:4011:161:0:d:0:d"}} + expectHostsRecordsWithIPv6(t, fc, wantHosts, wantHostsIPv6) // 2. DNS record is updated if tailscale.com/tailnet-fqdn annotation's // value changes @@ -271,17 +272,148 @@ func TestDNSRecordsReconcilerErrorCases(t *testing.T) { // Test invalid IP format testSvc.Spec.ClusterIP = "invalid-ip" - _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + _, _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) if err == nil { t.Error("expected error for invalid IP format") } // Test valid IP testSvc.Spec.ClusterIP = "10.0.100.50" - _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + ip4s, ip6s, err := dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) if err != nil { t.Errorf("unexpected error for valid IP: %v", err) } + if len(ip4s) != 1 || ip4s[0] != "10.0.100.50" { + t.Errorf("expected IPv4 address 10.0.100.50, got %v", ip4s) + } + if len(ip6s) != 0 { + t.Errorf("expected no IPv6 addresses, got %v", ip6s) + } +} + +func TestDNSRecordsReconcilerDualStack(t *testing.T) { + // Test dual-stack (IPv4 and IPv6) scenarios + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + // Preconfigure cluster with DNSConfig + dnsCfg := &tsapi.DNSConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + TypeMeta: metav1.TypeMeta{Kind: "DNSConfig"}, + Spec: tsapi.DNSConfigSpec{Nameserver: &tsapi.Nameserver{}}, + } + dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + }) + + // Create dual-stack ingress + ing := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dual-stack-ingress", + Namespace: "test", + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + }, + Status: networkingv1.IngressStatus{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {Hostname: "dual-stack.example.ts.net"}, + }, + }, + }, + } + + headlessSvc := headlessSvcForParent(ing, "ingress") + headlessSvc.Name = "ts-dual-stack-ingress" + headlessSvc.SetLabels(map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "dual-stack-ingress", + LabelParentNamespace: "test", + LabelParentType: "ingress", + }) + + // Create both IPv4 and IPv6 endpoints + epv4 := endpointSliceForService(headlessSvc, "10.1.2.3", discoveryv1.AddressTypeIPv4) + epv6 := endpointSliceForService(headlessSvc, "2001:db8::1", discoveryv1.AddressTypeIPv6) + + dnsRRDualStack := &dnsRecordsReconciler{ + tsNamespace: "tailscale", + logger: zl.Sugar(), + } + + // Create the dnsrecords ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorutils.DNSRecordsCMName, + Namespace: "tailscale", + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(dnsCfg, ing, headlessSvc, epv4, epv6, cm). + WithStatusSubresource(dnsCfg). + Build() + + dnsRRDualStack.Client = fc + + // Test dual-stack service records + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-dual-stack-ingress") + + wantIPv4 := map[string][]string{"dual-stack.example.ts.net": {"10.1.2.3"}} + wantIPv6 := map[string][]string{"dual-stack.example.ts.net": {"2001:db8::1"}} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) + + // Test ProxyGroup with dual-stack ClusterIPs + // First create parent service + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg-service", + Namespace: "tailscale", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + + proxyGroupSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-dualstack", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + labelProxyGroup: "test-pg", + labelSvcType: typeEgress, + LabelParentName: "pg-service", + LabelParentNamespace: "tailscale", + LabelParentType: "svc", + }, + Annotations: map[string]string{ + annotationTSMagicDNSName: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.96.0.100", + ClusterIPs: []string{"10.96.0.100", "2001:db8::100"}, + }, + } + + mustCreate(t, fc, parentEgressSvc) + mustCreate(t, fc, proxyGroupSvc) + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-proxygroup-dualstack") + + wantIPv4["pg-service.example.ts.net"] = []string{"10.96.0.100"} + wantIPv6["pg-service.example.ts.net"] = []string{"2001:db8::100"} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { @@ -344,3 +476,28 @@ func expectHostsRecords(t *testing.T, cl client.Client, wantsHosts map[string][] t.Fatalf("unexpected dns config (-got +want):\n%s", diff) } } + +func expectHostsRecordsWithIPv6(t *testing.T, cl client.Client, wantsHostsIPv4, wantsHostsIPv6 map[string][]string) { + t.Helper() + cm := new(corev1.ConfigMap) + if err := cl.Get(context.Background(), types.NamespacedName{Name: "dnsrecords", Namespace: "tailscale"}, cm); err != nil { + t.Fatalf("getting dnsconfig ConfigMap: %v", err) + } + if cm.Data == nil { + t.Fatal("dnsconfig ConfigMap has no data") + } + dnsConfigString, ok := cm.Data[operatorutils.DNSRecordsCMKey] + if !ok { + t.Fatal("dnsconfig ConfigMap does not contain dnsconfig") + } + dnsConfig := &operatorutils.Records{} + if err := json.Unmarshal([]byte(dnsConfigString), dnsConfig); err != nil { + t.Fatalf("unmarshaling dnsconfig: %v", err) + } + if diff := cmp.Diff(dnsConfig.IP4, wantsHostsIPv4); diff != "" { + t.Fatalf("unexpected IPv4 dns config (-got +want):\n%s", diff) + } + if diff := cmp.Diff(dnsConfig.IP6, wantsHostsIPv6); diff != "" { + t.Fatalf("unexpected IPv6 dns config (-got +want):\n%s", diff) + } +} diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 79c8469e1..180231bfa 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -213,7 +213,6 @@ NB: if you want cluster workloads to be able to refer to Tailscale Ingress using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. -NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0e26ee647..0b0f1eb5c 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -45,7 +45,6 @@ var DNSConfigKind = "DNSConfig" // using its MagicDNS name, you must also annotate the Ingress resource with // tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to // ensure that the proxy created for the Ingress listens on its Pod IP address. -// NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type DNSConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 420d7e49c..2acbf338d 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -27,6 +27,11 @@ type Records struct { Version string `json:"version"` // IP4 contains a mapping of DNS names to IPv4 address(es). IP4 map[string][]string `json:"ip4"` + // IP6 contains a mapping of DNS names to IPv6 address(es). + // This field is optional and will be omitted from JSON if empty. + // It enables dual-stack DNS support in Kubernetes clusters. + // +optional + IP6 map[string][]string `json:"ip6,omitempty"` } // TailscaledConfigFileName returns a tailscaled config file name in From a40f23ad4a851d20abb6d339db3b82b8c6567a26 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 09:39:27 -0700 Subject: [PATCH 1391/1708] util/eventbus: flesh out docs a bit Updates #cleanup Change-Id: Ia6b0e4b0426be1dd10a777aff0a81d4dd6b69b01 Signed-off-by: Brad Fitzpatrick --- util/eventbus/bus.go | 2 +- util/eventbus/client.go | 2 +- util/eventbus/publish.go | 4 ++++ util/eventbus/subscribe.go | 6 +++++- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index e5bf7329a..d1507d8e6 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -77,7 +77,7 @@ func (b *Bus) Debugger() *Debugger { return &Debugger{b} } -// Close closes the bus. Implicitly closes all clients, publishers and +// Close closes the bus. It implicitly closes all clients, publishers and // subscribers attached to the bus. // // Close blocks until the bus is fully shut down. The bus is diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 9b4119865..7c0268886 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -29,7 +29,7 @@ type Client struct { func (c *Client) Name() string { return c.name } -// Close closes the client. Implicitly closes all publishers and +// Close closes the client. It implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { var ( diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 4a4bdfb7e..348bb9dff 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -27,6 +27,10 @@ func newPublisher[T any](c *Client) *Publisher[T] { // Close closes the publisher. // // Calls to Publish after Close silently do nothing. +// +// If the Bus or Client from which the Publisher was created is closed, +// the Publisher is implicitly closed and does not need to be closed +// separately. func (p *Publisher[T]) Close() { // Just unblocks any active calls to Publish, no other // synchronization needed. diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ee534781a..ef155e621 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -158,7 +158,7 @@ func (q *subscribeState) subscriberFor(val any) subscriber { return q.outputs[reflect.TypeOf(val)] } -// Close closes the subscribeState. Implicitly closes all Subscribers +// Close closes the subscribeState. It implicitly closes all Subscribers // linked to this state, and any pending events are discarded. func (s *subscribeState) close() { s.dispatcher.StopAndWait() @@ -244,6 +244,10 @@ func (s *Subscriber[T]) Done() <-chan struct{} { // Close closes the Subscriber, indicating the caller no longer wishes // to receive this event type. After Close, receives on // [Subscriber.Events] block for ever. +// +// If the Bus from which the Subscriber was created is closed, +// the Subscriber is implicitly closed and does not need to be closed +// separately. func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers s.unregister() From c49ed5dd5a1ec27aa04ee87731f3e69f7b7c77fe Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 25 Sep 2025 11:54:41 -0700 Subject: [PATCH 1392/1708] feature/tpm: implement key.HardwareAttestationKey (#17256) Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 264 ++++++++++++++++++++++++++++++++ feature/tpm/attestation_test.go | 98 ++++++++++++ feature/tpm/tpm.go | 5 + 3 files changed, 367 insertions(+) create mode 100644 feature/tpm/attestation.go create mode 100644 feature/tpm/attestation_test.go diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go new file mode 100644 index 000000000..4b3018569 --- /dev/null +++ b/feature/tpm/attestation.go @@ -0,0 +1,264 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + + "github.com/google/go-tpm/tpm2" + "github.com/google/go-tpm/tpm2/transport" + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + "tailscale.com/types/key" +) + +type attestationKey struct { + tpm transport.TPMCloser + // private and public parts of the TPM key as returned from tpm2.Create. + // These are used for serialization. + tpmPrivate tpm2.TPM2BPrivate + tpmPublic tpm2.TPM2BPublic + // handle of the loaded TPM key. + handle *tpm2.NamedHandle + // pub is the parsed *ecdsa.PublicKey. + pub crypto.PublicKey +} + +func newAttestationKey() (ak *attestationKey, retErr error) { + tpm, err := open() + if err != nil { + return nil, key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak = &attestationKey{tpm: tpm} + + // Create a key under the storage hierarchy. + if err := withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Create{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPublic: tpm2.New2B( + tpm2.TPMTPublic{ + Type: tpm2.TPMAlgECC, + NameAlg: tpm2.TPMAlgSHA256, + ObjectAttributes: tpm2.TPMAObject{ + SensitiveDataOrigin: true, + UserWithAuth: true, + AdminWithPolicy: true, + NoDA: true, + FixedTPM: true, + FixedParent: true, + SignEncrypt: true, + }, + Parameters: tpm2.NewTPMUPublicParms( + tpm2.TPMAlgECC, + &tpm2.TPMSECCParms{ + CurveID: tpm2.TPMECCNistP256, + Scheme: tpm2.TPMTECCScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUAsymScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSigSchemeECDSA{ + // Unfortunately, TPMs don't let us use + // TPMAlgNull here to make the hash + // algorithm dynamic higher in the + // stack. We have to hardcode it here. + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + }, + ), + }, + ), + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Create: %w", err) + } + ak.tpmPrivate = resp.OutPrivate + ak.tpmPublic = resp.OutPublic + return nil + }); err != nil { + return nil, err + } + return ak, ak.load() +} + +func (ak *attestationKey) loaded() bool { + return ak.tpm != nil && ak.handle != nil && ak.pub != nil +} + +// load the key into the TPM from its public/private components. Must be called +// before Sign or Public. +func (ak *attestationKey) load() error { + if ak.loaded() { + return nil + } + if len(ak.tpmPrivate.Buffer) == 0 || len(ak.tpmPublic.Bytes()) == 0 { + return fmt.Errorf("attestationKey.load called without tpmPrivate or tpmPublic") + } + return withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Load{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPrivate: ak.tpmPrivate, + InPublic: ak.tpmPublic, + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Load: %w", err) + } + + ak.handle = &tpm2.NamedHandle{ + Handle: resp.ObjectHandle, + Name: resp.Name, + } + pub, err := ak.tpmPublic.Contents() + if err != nil { + return err + } + ak.pub, err = tpm2.Pub(*pub) + return err + }) +} + +// attestationKeySerialized is the JSON-serialized representation of +// attestationKey. +type attestationKeySerialized struct { + TPMPrivate []byte `json:"tpmPrivate"` + TPMPublic []byte `json:"tpmPublic"` +} + +func (ak *attestationKey) MarshalJSON() ([]byte, error) { + return json.Marshal(attestationKeySerialized{ + TPMPublic: ak.tpmPublic.Bytes(), + TPMPrivate: ak.tpmPrivate.Buffer, + }) +} + +func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { + var aks attestationKeySerialized + if err := json.Unmarshal(data, &aks); err != nil { + return err + } + + ak.tpmPrivate = tpm2.TPM2BPrivate{Buffer: aks.TPMPrivate} + ak.tpmPublic = tpm2.BytesAs2B[tpm2.TPMTPublic, *tpm2.TPMTPublic](aks.TPMPublic) + + tpm, err := open() + if err != nil { + return key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak.tpm = tpm + + return ak.load() +} + +func (ak *attestationKey) Public() crypto.PublicKey { + return ak.pub +} + +func (ak *attestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if !ak.loaded() { + return nil, errors.New("tpm2 attestation key is not loaded during Sign") + } + // Unfortunately, TPMs don't let us make keys with dynamic hash algorithms. + // The hash algorithm is fixed at key creation time (tpm2.Create). + if opts != crypto.SHA256 { + return nil, fmt.Errorf("tpm2 key is restricted to SHA256, have %q", opts) + } + resp, err := tpm2.Sign{ + KeyHandle: ak.handle, + Digest: tpm2.TPM2BDigest{ + Buffer: digest, + }, + InScheme: tpm2.TPMTSigScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUSigScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSchemeHash{ + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + Validation: tpm2.TPMTTKHashCheck{ + Tag: tpm2.TPMSTHashCheck, + }, + }.Execute(ak.tpm) + if err != nil { + return nil, fmt.Errorf("tpm2.Sign: %w", err) + } + sig, err := resp.Signature.Signature.ECDSA() + if err != nil { + return nil, err + } + return encodeSignature(sig.SignatureR.Buffer, sig.SignatureS.Buffer) +} + +// Copied from crypto/ecdsa. +func encodeSignature(r, s []byte) ([]byte, error) { + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + addASN1IntBytes(b, r) + addASN1IntBytes(b, s) + }) + return b.Bytes() +} + +// addASN1IntBytes encodes in ASN.1 a positive integer represented as +// a big-endian byte slice with zero or more leading zeroes. +func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) { + for len(bytes) > 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + b.SetError(errors.New("invalid integer")) + return + } + b.AddASN1(asn1.INTEGER, func(c *cryptobyte.Builder) { + if bytes[0]&0x80 != 0 { + c.AddUint8(0) + } + c.AddBytes(bytes) + }) +} + +func (ak *attestationKey) Close() error { + var errs []error + if ak.handle != nil && ak.tpm != nil { + _, err := tpm2.FlushContext{FlushHandle: ak.handle.Handle}.Execute(ak.tpm) + errs = append(errs, err) + } + if ak.tpm != nil { + errs = append(errs, ak.tpm.Close()) + } + return errors.Join(errs...) +} + +func (ak *attestationKey) Clone() key.HardwareAttestationKey { + return &attestationKey{ + tpm: ak.tpm, + tpmPrivate: ak.tpmPrivate, + tpmPublic: ak.tpmPublic, + handle: ak.handle, + pub: ak.pub, + } +} diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go new file mode 100644 index 000000000..ead88c955 --- /dev/null +++ b/feature/tpm/attestation_test.go @@ -0,0 +1,98 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "encoding/json" + "testing" +) + +func TestAttestationKeySign(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + + // Create a different key. + ak2, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + // Make sure that the keys are distinct via their public keys and the + // signatures they produce. + if ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Errorf("public keys of distinct attestation keys are the same") + } + sig2, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if bytes.Equal(sig, sig2) { + t.Errorf("signatures from distinct attestation keys are the same") + } +} + +func TestAttestationKeyUnmarshal(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + buf, err := ak.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var ak2 attestationKey + if err := json.Unmarshal(buf, &ak2); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + if !ak2.loaded() { + t.Error("unmarshalled key is not loaded") + } + + if !ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Error("unmarshalled public key is not the same as the original public key") + } +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 0260cca58..019224738 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -28,6 +28,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/paths" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" ) @@ -39,6 +40,10 @@ func init() { hi.TPM = infoOnce() }) store.Register(store.TPMPrefix, newStore) + key.RegisterHardwareAttestationKeyFns( + func() key.HardwareAttestationKey { return &attestationKey{} }, + func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, + ) } func info() *tailcfg.TPMInfo { From bbc5107d7d68ec0a736a568a1d4229c08c4c8202 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 25 Sep 2025 14:07:09 -0500 Subject: [PATCH 1393/1708] ipn/ipnlocal: do not reset extHost on (*LocalBackend).Shutdown We made changes to ipnext callback registration/unregistration/invocation in #15780 that made resetting b.exthost to a nil, no-op host in (*LocalBackend).Shutdown() unnecessary. But resetting it is also racy: b.exthost must be safe for concurrent use with or without b.mu held, so it shouldn't be written after NewLocalBackend returns. This PR removes it. Fixes #17279 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ef8fcab40..b36f54705 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1118,8 +1118,6 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } - extHost := b.extHost - b.extHost = nil b.mu.Unlock() b.webClientShutdown() @@ -1136,7 +1134,7 @@ func (b *LocalBackend) Shutdown() { } b.ctxCancel(errShutdown) b.currentNode().shutdown(errShutdown) - extHost.Shutdown() + b.extHost.Shutdown() b.e.Close() <-b.e.Done() b.awaitNoGoroutinesInTest() From 45d635cc98d1ef89eb3bd2a79b2c21d9c0968198 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 24 Sep 2025 15:12:24 -0700 Subject: [PATCH 1394/1708] feature/portlist: pull portlist service porting into extension, use eventbus And yay: tsnet (and thus k8s-operator etc) no longer depends on portlist! And LocalBackend is smaller. Removes 50 KB from the minimal binary. Updates #12614 Change-Id: Iee04057053dc39305303e8bd1d9599db8368d926 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 5 +- cmd/tailscaled/depaware.txt | 5 +- cmd/tailscaled/deps_test.go | 13 ++ cmd/tsidp/depaware.txt | 5 +- .../feature_portlist_disabled.go | 13 ++ .../buildfeatures/feature_portlist_enabled.go | 13 ++ feature/condregister/maybe_portlist.go | 8 + feature/featuretags/featuretags.go | 1 + feature/portlist/portlist.go | 157 ++++++++++++++++++ feature/taildrop/ext.go | 1 + ipn/ipnext/ipnext.go | 9 + ipn/ipnlocal/local.go | 131 +++++---------- ipn/ipnlocal/local_test.go | 1 - ipn/ipnlocal/node_backend.go | 6 + ipn/ipnlocal/state_test.go | 1 - ipn/lapitest/backend.go | 1 - tsnet/depaware.txt | 5 +- tsnet/tsnet_test.go | 13 ++ 18 files changed, 277 insertions(+), 111 deletions(-) create mode 100644 feature/buildfeatures/feature_portlist_disabled.go create mode 100644 feature/buildfeatures/feature_portlist_enabled.go create mode 100644 feature/condregister/maybe_portlist.go create mode 100644 feature/portlist/portlist.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2281d3819..ea0e08b19 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -813,7 +813,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ @@ -861,7 +860,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/net/connstats+ @@ -885,7 +883,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ @@ -931,7 +928,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ tailscale.com/util/execqueue from tailscale.com/appc+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 70be690ee..acd8e0459 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -278,6 +278,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister + tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ @@ -299,7 +300,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/policy from tailscale.com/feature/portlist tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store @@ -360,7 +361,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/portlist from tailscale.com/feature/portlist tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 35975b57c..24a393124 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -185,3 +185,16 @@ func TestOmitDBus(t *testing.T) { }, }.Check(t) } + +func TestOmitPortlist(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portlist,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 4fd7c8020..69904c976 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -255,7 +255,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store @@ -292,7 +291,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ @@ -316,7 +314,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ @@ -361,7 +358,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ tailscale.com/util/execqueue from tailscale.com/appc+ diff --git a/feature/buildfeatures/feature_portlist_disabled.go b/feature/buildfeatures/feature_portlist_disabled.go new file mode 100644 index 000000000..934061fd8 --- /dev/null +++ b/feature/buildfeatures/feature_portlist_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = false diff --git a/feature/buildfeatures/feature_portlist_enabled.go b/feature/buildfeatures/feature_portlist_enabled.go new file mode 100644 index 000000000..c1dc1c163 --- /dev/null +++ b/feature/buildfeatures/feature_portlist_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = true diff --git a/feature/condregister/maybe_portlist.go b/feature/condregister/maybe_portlist.go new file mode 100644 index 000000000..1be56f177 --- /dev/null +++ b/feature/condregister/maybe_portlist.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portlist + +package condregister + +import _ "tailscale.com/feature/portlist" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6f8c4ac17..d1752a80c 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -114,6 +114,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Outbound localhost HTTP/SOCK5 proxy support", Deps: []FeatureTag{"netstack"}, }, + "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, "networkmanager": { diff --git a/feature/portlist/portlist.go b/feature/portlist/portlist.go new file mode 100644 index 000000000..7d69796ff --- /dev/null +++ b/feature/portlist/portlist.go @@ -0,0 +1,157 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portlist contains code to poll the local system for open ports +// and report them to the control plane, if enabled on the tailnet. +package portlist + +import ( + "context" + "sync/atomic" + + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/policy" + "tailscale.com/portlist" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/version" +) + +func init() { + ipnext.RegisterExtension("portlist", newExtension) +} + +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + busClient := sb.Sys().Bus.Get().Client("portlist") + e := &Extension{ + sb: sb, + busClient: busClient, + logf: logger.WithPrefix(logf, "portlist: "), + pub: eventbus.Publish[ipnlocal.PortlistServices](busClient), + pollerDone: make(chan struct{}), + wakePoller: make(chan struct{}), + } + e.ctx, e.ctxCancel = context.WithCancel(context.Background()) + return e, nil +} + +// Extension implements the portlist extension. +type Extension struct { + ctx context.Context + ctxCancel context.CancelFunc + pollerDone chan struct{} // close-only chan when poller goroutine exits + wakePoller chan struct{} // best effort chan to wake poller from sleep + busClient *eventbus.Client + pub *eventbus.Publisher[ipnlocal.PortlistServices] + logf logger.Logf + sb ipnext.SafeBackend + host ipnext.Host // from Init + + shieldsUp atomic.Bool + shouldUploadServicesAtomic atomic.Bool +} + +func (e *Extension) Name() string { return "portlist" } +func (e *Extension) Shutdown() error { + e.ctxCancel() + e.busClient.Close() + <-e.pollerDone + return nil +} + +func (e *Extension) Init(h ipnext.Host) error { + if !envknob.BoolDefaultTrue("TS_PORTLIST") { + return ipnext.SkipExtension + } + + e.host = h + h.Hooks().ShouldUploadServices.Set(e.shouldUploadServicesAtomic.Load) + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().OnSelfChange.Add(e.onSelfChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/taildrop/ext.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + go e.runPollLoop() + return nil +} + +func (e *Extension) onSelfChange(tailcfg.NodeView) { + e.updateShouldUploadServices() +} + +func (e *Extension) onChangeProfile(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.shieldsUp.Store(prefs.ShieldsUp()) + e.updateShouldUploadServices() +} + +func (e *Extension) updateShouldUploadServices() { + v := !e.shieldsUp.Load() && e.host.NodeBackend().CollectServices() + if e.shouldUploadServicesAtomic.CompareAndSwap(!v, v) && v { + // Upon transition from false to true (enabling service reporting), try + // to wake the poller to do an immediate poll if it's sleeping. + // It's not a big deal if we miss waking it. It'll get to it soon enough. + select { + case e.wakePoller <- struct{}{}: + default: + } + } +} + +// runPollLoop is a goroutine that periodically checks the open +// ports and publishes them if they've changed. +func (e *Extension) runPollLoop() { + defer close(e.pollerDone) + + var poller portlist.Poller + + ticker, tickerChannel := e.sb.Clock().NewTicker(portlist.PollInterval()) + defer ticker.Stop() + for { + select { + case <-tickerChannel: + case <-e.wakePoller: + case <-e.ctx.Done(): + return + } + + if !e.shouldUploadServicesAtomic.Load() { + continue + } + + ports, changed, err := poller.Poll() + if err != nil { + e.logf("Poll: %v", err) + // TODO: this is kinda weird that we just return here and never try + // again. Maybe that was because all errors are assumed to be + // permission errors and thus permanent? Audit varioys OS + // implementation and check error types, and then make this check + // for permanent vs temporary errors and keep looping with a backoff + // for temporary errors? But for now we just give up, like we always + // have. + return + } + if !changed { + continue + } + sl := []tailcfg.Service{} + for _, p := range ports { + s := tailcfg.Service{ + Proto: tailcfg.ServiceProto(p.Proto), + Port: p.Port, + Description: p.Process, + } + if policy.IsInterestingService(s, version.OS()) { + sl = append(sl, s) + } + } + e.pub.Publish(ipnlocal.PortlistServices(sl)) + } +} diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index f8f45b53f..6bdb375cc 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -105,6 +105,7 @@ func (e *Extension) Init(h ipnext.Host) error { // TODO(nickkhyl): remove this after the profileManager refactoring. // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. profile, prefs := h.Profiles().CurrentProfileState() e.onChangeProfile(profile, prefs, false) return nil diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 066763ba4..4ff37dc8e 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -372,6 +372,10 @@ type Hooks struct { // SetPeerStatus is called to mutate PeerStatus. // Callers must only use NodeBackend to read data. SetPeerStatus feature.Hooks[func(*ipnstate.PeerStatus, tailcfg.NodeView, NodeBackend)] + + // ShouldUploadServices reports whether this node should include services + // in Hostinfo from the portlist extension. + ShouldUploadServices feature.Hook[func() bool] } // NodeBackend is an interface to query the current node and its peers. @@ -398,4 +402,9 @@ type NodeBackend interface { // It effectively just reports whether PeerAPIBase(node) is non-empty, but // potentially more efficiently. PeerHasPeerAPI(tailcfg.NodeView) bool + + // CollectServices reports whether the control plane is telling this + // node that the portlist service collection is desirable, should it + // choose to report them. + CollectServices() bool } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b36f54705..62a3a2131 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -61,7 +61,6 @@ import ( "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" - "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" "tailscale.com/net/dns" @@ -77,7 +76,6 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" - "tailscale.com/portlist" "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -211,12 +209,10 @@ type LocalBackend struct { pushDeviceToken syncs.AtomicValue[string] backendLogID logid.PublicID unregisterSysPolicyWatch func() - portpoll *portlist.Poller // may be nil - portpollOnce sync.Once // guards starting readPoller - varRoot string // or empty if SetVarRoot never called - logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend - sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend + varRoot string // or empty if SetVarRoot never called + logFlushFunc func() // or nil if SetLogFlusher wasn't called + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend @@ -522,7 +518,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo pm: pm, backendLogID: logID, state: ipn.NoState, - portpoll: new(portlist.Poller), em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, @@ -619,6 +614,12 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus healthChangeSub := eventbus.Subscribe[health.Change](ec) changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + var portlist <-chan PortlistServices + if buildfeatures.HasPortList { + portlistSub := eventbus.Subscribe[PortlistServices](ec) + portlist = portlistSub.Events() + } + return func(ec *eventbus.Client) { for { select { @@ -632,6 +633,10 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus b.onHealthChange(change) case changeDelta := <-changeDeltaSub.Events(): b.linkChange(&changeDelta) + case pl := <-portlist: + if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + b.setPortlistServices(pl) + } } } } @@ -2300,15 +2305,6 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// DisablePortPollerForTest disables the port list poller for tests. -// It must be called before Start. -func (b *LocalBackend) DisablePortPollerForTest() { - testenv.AssertInTest() - b.mu.Lock() - defer b.mu.Unlock() - b.portpoll = nil -} - // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { @@ -2457,12 +2453,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { persistv = new(persist.Persist) } - if b.portpoll != nil { - b.portpollOnce.Do(func() { - b.goTracker.Go(b.readPoller) - }) - } - discoPublic := b.MagicConn().DiscoPublicKey() var err error @@ -2906,57 +2896,6 @@ func shrinkDefaultRoute(route netip.Prefix, localInterfaceRoutes *netipx.IPSet, return b.IPSet() } -// readPoller is a goroutine that receives service lists from -// b.portpoll and propagates them into the controlclient's HostInfo. -func (b *LocalBackend) readPoller() { - if !envknob.BoolDefaultTrue("TS_PORTLIST") { - return - } - - ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval()) - defer ticker.Stop() - for { - select { - case <-tickerChannel: - case <-b.ctx.Done(): - return - } - - if !b.shouldUploadServices() { - continue - } - - ports, changed, err := b.portpoll.Poll() - if err != nil { - b.logf("error polling for open ports: %v", err) - return - } - if !changed { - continue - } - sl := []tailcfg.Service{} - for _, p := range ports { - s := tailcfg.Service{ - Proto: tailcfg.ServiceProto(p.Proto), - Port: p.Port, - Description: p.Process, - } - if policy.IsInterestingService(s, version.OS()) { - sl = append(sl, s) - } - } - - b.mu.Lock() - if b.hostinfo == nil { - b.hostinfo = new(tailcfg.Hostinfo) - } - b.hostinfo.Services = sl - b.mu.Unlock() - - b.doSetHostinfoFilterServices() - } -} - // GetPushDeviceToken returns the push notification device token. func (b *LocalBackend) GetPushDeviceToken() string { return b.pushDeviceToken.Load() @@ -3853,23 +3792,6 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt return ret } -// shouldUploadServices reports whether this node should include services -// in Hostinfo. When the user preferences currently request "shields up" -// mode, all inbound connections are refused, so services are not reported. -// Otherwise, shouldUploadServices respects NetMap.CollectServices. -// TODO(nickkhyl): move this into [nodeBackend]? -func (b *LocalBackend) shouldUploadServices() bool { - b.mu.Lock() - defer b.mu.Unlock() - - p := b.pm.CurrentPrefs() - nm := b.currentNode().NetMap() - if !p.Valid() || nm == nil { - return false // default to safest setting - } - return !p.ShieldsUp() && nm.CollectServices -} - // SetCurrentUser is used to implement support for multi-user systems (only // Windows 2022-11-25). On such systems, the actor is used to determine which // user's state should be used. The current user is maintained by active @@ -4812,6 +4734,25 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { return ret } +// PortlistServices is an eventbus topic for the portlist extension +// to advertise the running services on the host. +type PortlistServices []tailcfg.Service + +func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { + if !buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + return + } + + b.mu.Lock() + if b.hostinfo == nil { + b.hostinfo = new(tailcfg.Hostinfo) + } + b.hostinfo.Services = sl + b.mu.Unlock() + + b.doSetHostinfoFilterServices() +} + // doSetHostinfoFilterServices calls SetHostinfo on the controlclient, // possibly after mangling the given hostinfo. // @@ -4837,13 +4778,15 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. hi := *b.hostinfo // shallow copy - unlock.UnlockEarly() // Make a shallow copy of hostinfo so we can mutate // at the Service field. - if !b.shouldUploadServices() { + if f, ok := b.extHost.Hooks().ShouldUploadServices.GetOk(); !ok || !f() { hi.Services = []tailcfg.Service{} } + + unlock.UnlockEarly() + // Don't mutate hi.Service's underlying array. Append to // the slice with no free capacity. c := len(hi.Services) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 56d65767b..fd78c3418 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5816,7 +5816,6 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { return newControl(t, opts), nil diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 4319ed372..a6e4b51f1 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -258,6 +258,12 @@ func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { return ret } +func (nb *nodeBackend) CollectServices() bool { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.netMap != nil && nb.netMap.CollectServices +} + // AppendMatchingPeers returns base with all peers that match pred appended. // // It acquires b.mu to read the netmap but releases it before calling pred. diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 1a32f3156..9c0aa66a9 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -358,7 +358,6 @@ func TestStateMachine(t *testing.T) { t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortPollerForTest() var cc, previousCC *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index 725ffa4de..7a1c276a7 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -45,7 +45,6 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { tb.Fatalf("NewLocalBackend: %v", err) } tb.Cleanup(b.Shutdown) - b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(opts.MakeControlClient) return b } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 795e4367f..ece4345d5 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -251,7 +251,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store @@ -288,7 +287,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ @@ -312,7 +310,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ @@ -356,7 +353,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ tailscale.com/util/execqueue from tailscale.com/appc+ diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index d00628453..1e22681fc 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -43,6 +43,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/tstest/integration" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" @@ -1302,3 +1303,15 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { } t.Error("magicsock did not find a direct path from lc1 to lc2") } + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} From 892f8a9582156514a2bc6c3b447d3e972f4d94ff Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 24 Sep 2025 18:37:42 -0500 Subject: [PATCH 1395/1708] various: allow tailscaled shutdown via LocalAPI A customer wants to allow their employees to restart tailscaled at will, when access rights and MDM policy allow it, as a way to fully reset client state and re-create the tunnel in case of connectivity issues. On Windows, the main tailscaled process runs as a child of a service process. The service restarts the child when it exits (or crashes) until the service itself is stopped. Regular (non-admin) users can't stop the service, and allowing them to do so isn't ideal, especially in managed or multi-user environments. In this PR, we add a LocalAPI endpoint that instructs ipnserver.Server, and by extension the tailscaled process, to shut down. The service then restarts the child tailscaled. Shutting down tailscaled requires LocalAPI write access and an enabled policy setting. Updates tailscale/corp#32674 Updates tailscale/corp#32675 Signed-off-by: Nick Khyl --- client/local/local.go | 6 ++++ cmd/tailscaled/tailscaled.go | 2 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- ipn/ipnserver/server.go | 13 ++++++-- ipn/ipnserver/server_test.go | 60 +++++++++++++++++++++++++++++++++++ ipn/lapitest/server.go | 2 +- ipn/localapi/localapi.go | 37 +++++++++++++++++++++ util/syspolicy/pkey/pkey.go | 7 ++++ util/syspolicy/policy_keys.go | 1 + 9 files changed, 125 insertions(+), 5 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 1be1f2ca7..246112c37 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1368,3 +1368,9 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti } return decodeJSON[apitype.ExitNodeSuggestionResponse](body) } + +// ShutdownTailscaled requests a graceful shutdown of tailscaled. +func (lc *Client) ShutdownTailscaled(ctx context.Context) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) + return err +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 0c6e6d22f..636627539 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -546,7 +546,7 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, } }() - srv := ipnserver.New(logf, logID, sys.NetMon.Get()) + srv := ipnserver.New(logf, logID, sys.Bus.Get(), sys.NetMon.Get()) if debugMux != nil { debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus) } diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index ea40dba9c..fbf7968a0 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -138,7 +138,7 @@ func newIPN(jsConfig js.Value) map[string]any { sys.Tun.Get().Start() logid := lpc.PublicID - srv := ipnserver.New(logf, logid, sys.NetMon.Get()) + srv := ipnserver.New(logf, logid, sys.Bus.Get(), sys.NetMon.Get()) lb, err := ipnlocal.NewLocalBackend(logf, logid, sys, controlclient.LoginEphemeral) if err != nil { log.Fatalf("ipnlocal.NewLocalBackend: %v", err) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index fdbd82b0b..7e864959b 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -29,6 +29,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/systemd" @@ -40,6 +41,7 @@ import ( type Server struct { lb atomic.Pointer[ipnlocal.LocalBackend] logf logger.Logf + bus *eventbus.Bus netMon *netmon.Monitor // must be non-nil backendLogID logid.PublicID @@ -446,13 +448,14 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o // // At some point, either before or after Run, the Server's SetLocalBackend // method must also be called before Server can do anything useful. -func New(logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor) *Server { +func New(logf logger.Logf, logID logid.PublicID, bus *eventbus.Bus, netMon *netmon.Monitor) *Server { if netMon == nil { panic("nil netMon") } return &Server{ backendLogID: logID, logf: logf, + bus: bus, netMon: netMon, } } @@ -494,10 +497,16 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { runDone := make(chan struct{}) defer close(runDone) - // When the context is closed or when we return, whichever is first, close our listener + ec := s.bus.Client("ipnserver.Server") + defer ec.Close() + shutdownSub := eventbus.Subscribe[localapi.Shutdown](ec) + + // When the context is closed, a [localapi.Shutdown] event is received, + // or when we return, whichever is first, close our listener // and all open connections. go func() { select { + case <-shutdownSub.Events(): case <-ctx.Done(): case <-runDone: } diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 903cb6b73..713db9e50 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -5,6 +5,7 @@ package ipnserver_test import ( "context" + "errors" "runtime" "strconv" "sync" @@ -14,7 +15,10 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/lapitest" + "tailscale.com/tsd" "tailscale.com/types/ptr" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policytest" ) func TestUserConnectDisconnectNonWindows(t *testing.T) { @@ -253,6 +257,62 @@ func TestBlockWhileIdentityInUse(t *testing.T) { } } +func TestShutdownViaLocalAPI(t *testing.T) { + t.Parallel() + + errAccessDeniedByPolicy := errors.New("Access denied: shutdown access denied by policy") + + tests := []struct { + name string + allowTailscaledRestart *bool + wantErr error + }{ + { + name: "AllowTailscaledRestart/NotConfigured", + allowTailscaledRestart: nil, + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/False", + allowTailscaledRestart: ptr.To(false), + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/True", + allowTailscaledRestart: ptr.To(true), + wantErr: nil, // shutdown should be allowed + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + sys := tsd.NewSystem() + + var pol policytest.Config + if tt.allowTailscaledRestart != nil { + pol.Set(pkey.AllowTailscaledRestart, *tt.allowTailscaledRestart) + } + sys.Set(pol) + + server := lapitest.NewServer(t, lapitest.WithSys(sys)) + lc := server.ClientWithName("User") + + err := lc.ShutdownTailscaled(t.Context()) + checkError(t, err, tt.wantErr) + }) + } +} + +func checkError(tb testing.TB, got, want error) { + tb.Helper() + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + tb.Fatalf("gotErr: %v; wantErr: %v", got, want) + } +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go index d477dc182..457a338ab 100644 --- a/ipn/lapitest/server.go +++ b/ipn/lapitest/server.go @@ -236,7 +236,7 @@ func (s *Server) Close() { func newUnstartedIPNServer(opts *options) *ipnserver.Server { opts.TB().Helper() lb := opts.Backend() - server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.NetMon()) + server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.EventBus(), lb.NetMon()) server.SetLocalBackend(lb) return server } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 01966f84b..a83a2e17e 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -49,6 +49,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/osdiag" "tailscale.com/util/rands" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -112,6 +113,7 @@ var handler = map[string]LocalAPIHandler{ "set-push-device-token": (*Handler).serveSetPushDeviceToken, "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, + "shutdown": (*Handler).serveShutdown, "start": (*Handler).serveStart, "status": (*Handler).serveStatus, "suggest-exit-node": (*Handler).serveSuggestExitNode, @@ -2026,3 +2028,38 @@ func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) } + +// Shutdown is an eventbus value published when tailscaled shutdown +// is requested via LocalAPI. Its only consumer is [ipnserver.Server]. +type Shutdown struct{} + +// serveShutdown shuts down tailscaled. It requires write access +// and the [pkey.AllowTailscaledRestart] policy to be enabled. +// See tailscale/corp#32674. +func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + if !h.PermitWrite { + http.Error(w, "shutdown access denied", http.StatusForbidden) + return + } + + polc := h.b.Sys().PolicyClientOrDefault() + if permitShutdown, _ := polc.GetBoolean(pkey.AllowTailscaledRestart, false); !permitShutdown { + http.Error(w, "shutdown access denied by policy", http.StatusForbidden) + return + } + + ec := h.eventBus.Client("localapi.Handler") + defer ec.Close() + + w.WriteHeader(http.StatusOK) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + eventbus.Publish[Shutdown](ec).Publish(Shutdown{}) +} diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index cfef9e17a..1ef969d72 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -47,6 +47,13 @@ const ( // An empty string or a zero duration disables automatic reconnection. ReconnectAfter Key = "ReconnectAfter" + // AllowTailscaledRestart is a boolean key that controls whether users with write access + // to the LocalAPI are allowed to shutdown tailscaled with the intention of restarting it. + // On Windows, tailscaled will be restarted automatically by the service process + // (see babysitProc in cmd/tailscaled/tailscaled_windows.go). + // On other platforms, it is the client's responsibility to restart tailscaled. + AllowTailscaledRestart Key = "AllowTailscaledRestart" + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. // Exit node ID takes precedence over exit node IP. // To find the node ID, go to /api.md#device. diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ef2ac430d..ae902e8c4 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -17,6 +17,7 @@ var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(pkey.AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), setting.NewDefinition(pkey.AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AllowTailscaledRestart, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.AlwaysOn, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), From 9154bc10f09c0a72b6807fbb6b91e2e2690bac48 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 26 Sep 2025 07:31:35 +0100 Subject: [PATCH 1396/1708] tstest/integration: skip this test rather than commenting it out Updates #17108 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index f65ae1659..6e5022edb 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -309,16 +309,12 @@ func TestOneNodeUpAuth(t *testing.T) { alreadyLoggedIn: true, needsNewAuthURL: false, }, - // TODO(alexc): This test is failing because of a bug in `tailscale up` where - // it waits for ipn to enter the "Running" state. If we're already logged in - // and running, this completes immediately, before we've had a chance to show - // the user the auth URL. - // { - // name: "up-with-force-reauth-after-login", - // args: []string{"up", "--force-reauth"}, - // alreadyLoggedIn: true, - // needsNewAuthURL: true, - // }, + { + name: "up-with-force-reauth-after-login", + args: []string{"up", "--force-reauth"}, + alreadyLoggedIn: true, + needsNewAuthURL: true, + }, { name: "up-with-auth-key-after-login", args: []string{"up", "--auth-key=opensesame"}, @@ -341,6 +337,14 @@ func TestOneNodeUpAuth(t *testing.T) { t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { tstest.Parallel(t) + // TODO(alexc): This test is failing because of a bug in `tailscale up` where + // it waits for ipn to enter the "Running" state. If we're already logged in + // and running, this completes immediately, before we've had a chance to show + // the user the auth URL. + if tt.name == "up-with-force-reauth-after-login" { + t.Skip() + } + env := NewTestEnv(t, ConfigureControl( func(control *testcontrol.Server) { if tt.authKey != "" { From 260fe38ad8f7a0dfeb74872979e9e1729a211d65 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 20:48:41 -0700 Subject: [PATCH 1397/1708] Makefile, cmd/tailscaled: add minimal tailscale+cli binary depaware Updates #12614 Change-Id: I593ed30f620556c6503d80c0ccbbe242567fd5cf Signed-off-by: Brad Fitzpatrick --- Makefile | 6 +- cmd/tailscaled/depaware-minbox.txt | 498 +++++++++++++++++++++++++++++ 2 files changed, 503 insertions(+), 1 deletion(-) create mode 100644 cmd/tailscaled/depaware-minbox.txt diff --git a/Makefile b/Makefile index 532bded94..95959fcf0 100644 --- a/Makefile +++ b/Makefile @@ -25,8 +25,10 @@ updatedeps: ## Update depaware deps tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update -goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" @@ -40,6 +42,8 @@ depaware: ## Run depaware checks tailscale.com/cmd/tsidp PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt new file mode 100644 index 000000000..0c3e08c15 --- /dev/null +++ b/cmd/tailscaled/depaware-minbox.txt @@ -0,0 +1,498 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 + github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + github.com/digitalocean/go-smbios/smbios from tailscale.com/posture + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/nftables from tailscale.com/util/linuxfw + 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt + 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ + github.com/google/nftables/expr from github.com/google/nftables+ + github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ + github.com/google/nftables/xt from github.com/google/nftables/expr+ + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mdlayher/genetlink from tailscale.com/net/tstun + 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + github.com/mdlayher/netlink/nltest from github.com/google/nftables + github.com/mdlayher/sdnotify from tailscale.com/util/systemd + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + github.com/mitchellh/go-ps from tailscale.com/safesocket + 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + github.com/tailscale/hujson from tailscale.com/ipn/conffile + 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + github.com/vishvananda/netns from github.com/tailscale/netlink+ + 💣 go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer + 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs + 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ + gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log + gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ + gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ + 💣 gvisor.dev/gvisor/pkg/sleep from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ + gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state + 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack + 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ + gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 + gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ + 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro + gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/packet from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/raw from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + 💣 gvisor.dev/gvisor/pkg/tcpip/transport/tcp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal+ + tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/doctor from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/clientupdate+ + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/metrics from tailscale.com/health+ + tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/posture from tailscale.com/ipn/ipnlocal + tailscale.com/proxymap from tailscale.com/tsd+ + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/dirwalk from tailscale.com/metrics + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash + tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/linuxfw from tailscale.com/net/netns+ + tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/systemd from tailscale.com/control/controlclient+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/truncate from tailscale.com/logtail + tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ + tailscale.com/version from tailscale.com/clientupdate+ + tailscale.com/version/distro from tailscale.com/clientupdate+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/doctor/permissions+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ + golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/proxy from tailscale.com/net/netns + golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/google/nftables+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + archive/tar from tailscale.com/clientupdate + bufio from compress/flate+ + bytes from archive/tar+ + cmp from encoding/json+ + compress/flate from compress/gzip + compress/gzip from golang.org/x/net/http2+ + container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from golang.org/x/net/http2+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from expvar+ + encoding/pem from crypto/tls+ + errors from archive/tar+ + expvar from tailscale.com/cmd/tailscaled+ + flag from tailscale.com/cmd/tailscaled+ + fmt from archive/tar+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from net/http/pprof+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall from internal/runtime/cgroup+ + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime+ + internal/unsafeheader from internal/reflectlite+ + io from archive/tar+ + io/fs from archive/tar+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ + iter from bytes+ + log from expvar+ + log/internal from log + maps from archive/tar+ + math from archive/tar+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from expvar+ + net/http/httptrace from golang.org/x/net/http2+ + net/http/internal from net/http + net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http + net/http/pprof from tailscale.com/cmd/tailscaled+ + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from github.com/coreos/go-iptables/iptables+ + os/signal from tailscale.com/cmd/tailscaled + os/user from archive/tar+ + path from archive/tar+ + path/filepath from archive/tar+ + reflect from archive/tar+ + regexp from github.com/coreos/go-iptables/iptables+ + regexp/syntax from regexp + runtime from archive/tar+ + runtime/debug from github.com/klauspost/compress/zstd+ + runtime/pprof from net/http/pprof+ + runtime/trace from net/http/pprof + slices from archive/tar+ + sort from compress/flate+ + strconv from archive/tar+ + strings from archive/tar+ + sync from archive/tar+ + sync/atomic from context+ + syscall from archive/tar+ + text/tabwriter from runtime/pprof + time from archive/tar+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ From c011369de2fb4b3cc2ce505402cba968b875f767 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 15 Sep 2025 23:20:38 +0100 Subject: [PATCH 1398/1708] cmd/tailscale/cli: start WatchIPNBus before initial Start This partially reverts f3d2fd2. When that patch was written, the goroutine that responds to IPN notifications could call `StartLoginInteractive`, creating a race condition that led to flaky integration tests. We no longer call `StartLoginInteractive` in that goroutine, so the race is now impossible. Moving the `WatchIPNBus` call earlier ensures the CLI gets all necessary IPN notifications, preventing a reauth from hanging. Updates tailscale/corp#31476 Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 96b561bee..0a15c8fb7 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -540,8 +540,18 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } }() - running := make(chan bool, 1) // gets value once in state ipn.Running - watchErr := make(chan error, 1) + // Start watching the IPN bus before we call Start() or StartLoginInteractive(), + // or we could miss IPN notifications. + // + // In particular, if we're doing a force-reauth, we could miss the + // notification with the auth URL we should print for the user. The + // initial state could contain the auth URL, but only if IPN is in the + // NeedsLogin state -- sometimes it's in Starting, and we don't get the URL. + watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) + if err != nil { + return err + } + defer watcher.Close() // Special case: bare "tailscale up" means to just start // running, if there's ever been a login. @@ -587,11 +597,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) - if err != nil { - return err - } - defer watcher.Close() + running := make(chan bool, 1) + watchErr := make(chan error, 1) go func() { var printed bool // whether we've yet printed anything to stdout or stderr From 41a2aaf1da9be6c939058bdd32e253ab35373c42 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 16 Sep 2025 11:22:47 +0100 Subject: [PATCH 1399/1708] cmd/tailscale/cli: fix race condition in `up --force-reauth` This commit fixes a race condition where `tailscale up --force-reauth` would exit prematurely on an already-logged in device. Previously, the CLI would wait for IPN to report the "Running" state and then exit. However, this could happen before the new auth URL was printed, leading to two distinct issues: * **Without seamless key renewal:** The CLI could exit immediately after the `StartLoginInteractive` call, before IPN has time to switch into the "Starting" state or send a new auth URL back to the CLI. * **With seamless key renewal:** IPN stays in the "Running" state throughout the process, so the CLI exits immediately without performing any reauthentication. The fix is to change the CLI's exit condition. Instead of waiting for the "Running" state, if we're doing a `--force-reauth` we now wait to see the node key change, which is a more reliable indicator that a successful authentication has occurred. Updates tailscale/corp#31476 Updates tailscale/tailscale#17108 Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 67 ++++++++++++++++---------- tstest/integration/integration_test.go | 8 --- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 0a15c8fb7..3c0883ec8 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -446,6 +446,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return fixTailscaledConnectError(err) } origAuthURL := st.AuthURL + origNodeKey := st.Self.PublicKey // printAuthURL reports whether we should print out the // provided auth URL from an IPN notify. @@ -597,13 +598,24 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - running := make(chan bool, 1) + upComplete := make(chan bool, 1) watchErr := make(chan error, 1) go func() { var printed bool // whether we've yet printed anything to stdout or stderr var lastURLPrinted string + // If we're doing a force-reauth, we need to get two notifications: + // + // 1. IPN is running + // 2. The node key has changed + // + // These two notifications arrive separately, and trying to combine them + // has caused unexpected issues elsewhere in `tailscale up`. For now, we + // track them separately. + ipnIsRunning := false + waitingForKeyChange := upArgs.forceReauth + for { n, err := watcher.Next() if err != nil { @@ -614,29 +626,34 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE msg := *n.ErrMessage fatalf("backend error: %v\n", msg) } + if s := n.State; s != nil && *s == ipn.NeedsMachineAuth { + printed = true + if env.upArgs.json { + printUpDoneJSON(ipn.NeedsMachineAuth, "") + } else { + fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) + } + } if s := n.State; s != nil { - switch *s { - case ipn.NeedsMachineAuth: - printed = true - if env.upArgs.json { - printUpDoneJSON(ipn.NeedsMachineAuth, "") - } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) - } - case ipn.Running: - // Done full authentication process - if env.upArgs.json { - printUpDoneJSON(ipn.Running, "") - } else if printed { - // Only need to print an update if we printed the "please click" message earlier. - fmt.Fprintf(Stderr, "Success.\n") - } - select { - case running <- true: - default: - } - cancelWatch() + ipnIsRunning = *s == ipn.Running + } + if n.NetMap != nil && n.NetMap.NodeKey != origNodeKey { + waitingForKeyChange = false + } + if ipnIsRunning && !waitingForKeyChange { + // Done full authentication process + if env.upArgs.json { + printUpDoneJSON(ipn.Running, "") + } else if printed { + // Only need to print an update if we printed the "please click" message earlier. + fmt.Fprintf(Stderr, "Success.\n") + } + select { + case upComplete <- true: + default: } + cancelWatch() + return } if url := n.BrowseToURL; url != nil { authURL := *url @@ -698,18 +715,18 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE timeoutCh = timeoutTimer.C } select { - case <-running: + case <-upComplete: return nil case <-watchCtx.Done(): select { - case <-running: + case <-upComplete: return nil default: } return watchCtx.Err() case err := <-watchErr: select { - case <-running: + case <-upComplete: return nil default: } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 6e5022edb..fde4ff35a 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -337,14 +337,6 @@ func TestOneNodeUpAuth(t *testing.T) { t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { tstest.Parallel(t) - // TODO(alexc): This test is failing because of a bug in `tailscale up` where - // it waits for ipn to enter the "Running" state. If we're already logged in - // and running, this completes immediately, before we've had a chance to show - // the user the auth URL. - if tt.name == "up-with-force-reauth-after-login" { - t.Skip() - } - env := NewTestEnv(t, ConfigureControl( func(control *testcontrol.Server) { if tt.authKey != "" { From 8b3e88cd094c745f6e57f8ca53edb16792d3fee2 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 26 Sep 2025 08:06:39 -0700 Subject: [PATCH 1400/1708] wgengine/magicsock: fix rebind debouncing (#17282) On platforms that are causing EPIPE at a high frequency this is resulting in non-working connections, for example when Apple decides to forcefully close UDP sockets due to an unsoliced packet rejection in the firewall. Too frequent rebinds cause a failure to solicit the endpoints triggering the rebinds, that would normally happen via CallMeMaybe. Updates #14551 Updates tailscale/corp#25648 Signed-off-by: James Tucker --- wgengine/magicsock/magicsock.go | 1 + wgengine/magicsock/magicsock_test.go | 40 ++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0d8a1e53a..e3cf249c5 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1563,6 +1563,7 @@ func (c *Conn) maybeRebindOnError(err error) { if c.lastErrRebind.Load().Before(time.Now().Add(-5 * time.Second)) { c.logf("magicsock: performing rebind due to %q", reason) + c.lastErrRebind.Store(time.Now()) c.Rebind() go c.ReSTUN(reason) } else { diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c6be9129d..1f533ddef 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -27,6 +27,7 @@ import ( "sync/atomic" "syscall" "testing" + "testing/synctest" "time" "unsafe" @@ -3114,18 +3115,35 @@ func TestMaybeRebindOnError(t *testing.T) { } t.Run("no-frequent-rebind", func(t *testing.T) { - if runtime.GOOS != "plan9" { - err := fmt.Errorf("outer err: %w", syscall.EPERM) - conn := newTestConn(t) - defer conn.Close() - conn.lastErrRebind.Store(time.Now().Add(-1 * time.Second)) - before := metricRebindCalls.Value() - conn.maybeRebindOnError(err) - after := metricRebindCalls.Value() - if before != after { - t.Errorf("should not rebind within 5 seconds of last") + synctest.Test(t, func(t *testing.T) { + if runtime.GOOS != "plan9" { + err := fmt.Errorf("outer err: %w", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + lastRebindTime := time.Now().Add(-1 * time.Second) + conn.lastErrRebind.Store(lastRebindTime) + before := metricRebindCalls.Value() + conn.maybeRebindOnError(err) + after := metricRebindCalls.Value() + if before != after { + t.Errorf("should not rebind within 5 seconds of last") + } + + // ensure that rebinds are performed and store an updated last + // rebind time. + time.Sleep(6 * time.Second) + + conn.maybeRebindOnError(err) + newTime := conn.lastErrRebind.Load() + if newTime == lastRebindTime { + t.Errorf("expected a rebind to occur") + } + if newTime.Sub(lastRebindTime) < 5*time.Second { + t.Errorf("expected at least 5 seconds between %s and %s", lastRebindTime, newTime) + } } - } + + }) }) } From 002ecb78d0c76d2e25bd7fb0b773f37c7c19dcb4 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 26 Sep 2025 07:35:54 +0100 Subject: [PATCH 1401/1708] all: don't rebind variables in for loops See https://tip.golang.org/wiki/LoopvarExperiment#does-this-mean-i-dont-have-to-write-x--x-in-my-loops-anymore Updates https://github.com/tailscale/tailscale/issues/11058 Signed-off-by: Alex Chan --- cmd/tailscale/cli/ffcomplete/internal/complete_test.go | 1 - net/netcheck/netcheck.go | 1 - tstest/archtest/qemu_test.go | 1 - tstest/clock_test.go | 9 --------- tstest/integration/integration_test.go | 1 - wgengine/magicsock/derp.go | 1 - wgengine/netstack/netstack.go | 4 ---- 7 files changed, 18 deletions(-) diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go index 7e36b1bcd..c216bdeec 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go @@ -196,7 +196,6 @@ func TestComplete(t *testing.T) { // Run the tests. for _, test := range tests { - test := test name := strings.Join(test.args, "␣") if test.showFlags { name += "+flags" diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 169133ceb..726221675 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1073,7 +1073,6 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report continue } wg.Add(1) - rg := rg go func() { defer wg.Done() node := rg.Nodes[0] diff --git a/tstest/archtest/qemu_test.go b/tstest/archtest/qemu_test.go index 8b59ae5d9..68ec38851 100644 --- a/tstest/archtest/qemu_test.go +++ b/tstest/archtest/qemu_test.go @@ -33,7 +33,6 @@ func TestInQemu(t *testing.T) { } inCI := cibuild.On() for _, arch := range arches { - arch := arch t.Run(arch.Goarch, func(t *testing.T) { t.Parallel() qemuUser := "qemu-" + arch.Qarch diff --git a/tstest/clock_test.go b/tstest/clock_test.go index d5816564a..2ebaf752a 100644 --- a/tstest/clock_test.go +++ b/tstest/clock_test.go @@ -56,7 +56,6 @@ func TestClockWithDefinedStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -118,7 +117,6 @@ func TestClockWithDefaultStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -277,7 +275,6 @@ func TestClockSetStep(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -426,7 +423,6 @@ func TestClockAdvance(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -876,7 +872,6 @@ func TestSingleTicker(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1377,7 +1372,6 @@ func TestSingleTimer(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1911,7 +1905,6 @@ func TestClockFollowRealTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() realTimeClock := NewClock(tt.realTimeClockOpts) @@ -2364,7 +2357,6 @@ func TestAfterFunc(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -2468,7 +2460,6 @@ func TestSince(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index fde4ff35a..5e9f15798 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -333,7 +333,6 @@ func TestOneNodeUpAuth(t *testing.T) { tstest.Shard(t) for _, useSeamlessKeyRenewal := range []bool{true, false} { - tt := tt // subtests are run in parallel, rebind tt t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { tstest.Parallel(t) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index b5fc36bb8..0d419841c 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -836,7 +836,6 @@ func (c *Conn) maybeCloseDERPsOnRebind(okayLocalIPs []netip.Prefix) { c.closeOrReconnectDERPLocked(regionID, "rebind-default-route-change") continue } - regionID := regionID dc := ad.c go func() { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 7381c515a..94dbb6359 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1898,7 +1898,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"option_unknown_received", ipStats.OptionUnknownReceived}, } for _, metric := range ipMetrics { - metric := metric m.Set("counter_ip_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1925,7 +1924,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"errors", fwdStats.Errors}, } for _, metric := range fwdMetrics { - metric := metric m.Set("counter_ip_forward_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1969,7 +1967,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"forward_max_in_flight_drop", tcpStats.ForwardMaxInFlightDrop}, } for _, metric := range tcpMetrics { - metric := metric m.Set("counter_tcp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1996,7 +1993,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"checksum_errors", udpStats.ChecksumErrors}, } for _, metric := range udpMetrics { - metric := metric m.Set("counter_udp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) From f2b8d37436d047e444efa6d728961664f0d5009b Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 26 Sep 2025 08:39:02 -0700 Subject: [PATCH 1402/1708] feature/tpm: only register HardwareAttestationKey on linux/windows (#17293) We can only register one key implementation per process. When running on macOS or Android, trying to register a separate key implementation from feature/tpm causes a panic. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 019224738..e4c2b29e9 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -14,6 +14,7 @@ import ( "log" "os" "path/filepath" + "runtime" "slices" "strings" "sync" @@ -40,10 +41,12 @@ func init() { hi.TPM = infoOnce() }) store.Register(store.TPMPrefix, newStore) - key.RegisterHardwareAttestationKeyFns( - func() key.HardwareAttestationKey { return &attestationKey{} }, - func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, - ) + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + key.RegisterHardwareAttestationKeyFns( + func() key.HardwareAttestationKey { return &attestationKey{} }, + func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, + ) + } } func info() *tailcfg.TPMInfo { From b3ae1cb0ccb73a0951cccdf4096e417c2739d455 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 13:19:11 -0700 Subject: [PATCH 1403/1708] wgengine/netstack/gro: permit building without GRO This only saves ~32KB in the minimal linux/amd64 binary, but it's a step towards permitting not depending on gvisor for small builds. Updates #17283 Change-Id: Iae8da5e9465127de354dbcaf25e794a6832d891b Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/deps_test.go | 11 +++++++++++ feature/buildfeatures/feature_gro_disabled.go | 13 +++++++++++++ feature/buildfeatures/feature_gro_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 1 + wgengine/netstack/gro/gro_default.go | 2 +- .../netstack/gro/{gro_ios.go => gro_disabled.go} | 13 +++++++++---- wgengine/netstack/link_endpoint.go | 3 ++- wgengine/netstack/netstack.go | 2 +- 9 files changed, 51 insertions(+), 8 deletions(-) create mode 100644 feature/buildfeatures/feature_gro_disabled.go create mode 100644 feature/buildfeatures/feature_gro_enabled.go rename wgengine/netstack/gro/{gro_ios.go => gro_disabled.go} (59%) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0c3e08c15..f5d2831b6 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -84,7 +84,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 24a393124..92c6a872c 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -198,3 +198,14 @@ func TestOmitPortlist(t *testing.T) { }, }.Check(t) } + +func TestOmitGRO(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_include_cli", + BadDeps: map[string]string{ + "gvisor.dev/gvisor/pkg/tcpip/stack/gro": "unexpected dep with ts_omit_gro", + }, + }.Check(t) +} diff --git a/feature/buildfeatures/feature_gro_disabled.go b/feature/buildfeatures/feature_gro_disabled.go new file mode 100644 index 000000000..ffbd0da2e --- /dev/null +++ b/feature/buildfeatures/feature_gro_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = false diff --git a/feature/buildfeatures/feature_gro_enabled.go b/feature/buildfeatures/feature_gro_enabled.go new file mode 100644 index 000000000..e2c8024e0 --- /dev/null +++ b/feature/buildfeatures/feature_gro_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index d1752a80c..1a1fcf272 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -106,6 +106,7 @@ var Features = map[FeatureTag]FeatureMeta{ }, "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, + "gro": {"GRO", "Generic Receive Offload support (performance)", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, diff --git a/wgengine/netstack/gro/gro_default.go b/wgengine/netstack/gro/gro_default.go index f92ee15ec..c70e19f7c 100644 --- a/wgengine/netstack/gro/gro_default.go +++ b/wgengine/netstack/gro/gro_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios +//go:build !ios && !ts_omit_gro package gro diff --git a/wgengine/netstack/gro/gro_ios.go b/wgengine/netstack/gro/gro_disabled.go similarity index 59% rename from wgengine/netstack/gro/gro_ios.go rename to wgengine/netstack/gro/gro_disabled.go index 627b42d7e..d7ffbd913 100644 --- a/wgengine/netstack/gro/gro_ios.go +++ b/wgengine/netstack/gro/gro_disabled.go @@ -1,22 +1,27 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios +//go:build ios || ts_omit_gro package gro import ( - "gvisor.dev/gvisor/pkg/tcpip/stack" + "runtime" + "tailscale.com/net/packet" ) type GRO struct{} func NewGRO() *GRO { - panic("unsupported on iOS") + if runtime.GOOS == "ios" { + panic("unsupported on iOS") + } + panic("GRO disabled in build") + } -func (g *GRO) SetDispatcher(_ stack.NetworkDispatcher) {} +func (g *GRO) SetDispatcher(any) {} func (g *GRO) Enqueue(_ *packet.Parsed) {} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 39da64b55..50e8d755a 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -10,6 +10,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/types/ipproto" "tailscale.com/wgengine/netstack/gro" @@ -133,7 +134,7 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported // If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via // SetDispatcher(). func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { - if l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { + if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { // IPv6 may have extension headers preceding a TCP header, but we trade // for a fast path and assume p cannot be coalesced in such a case. l.injectInbound(p) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 94dbb6359..0e2712c67 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -344,7 +344,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi } supportedGSOKind := stack.GSONotSupported supportedGROKind := groNotSupported - if runtime.GOOS == "linux" { + if runtime.GOOS == "linux" && buildfeatures.HasGRO { // TODO(jwhited): add Windows support https://github.com/tailscale/corp/issues/21874 supportedGROKind = tcpGROSupported supportedGSOKind = stack.HostGSOSupported From f715ee2be97db4cbb976aaae5d8d9ea530be531b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 18:48:14 -0700 Subject: [PATCH 1404/1708] cmd/tailscaled: start implementing ts_omit_netstack Baby steps. This permits building without much of gvisor, but not all of it. Updates #17283 Change-Id: I8433146e259918cc901fe86b4ea29be22075b32c Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 36 ++------- cmd/tailscaled/netstack.go | 75 ++++++++++++++++++ cmd/tailscaled/tailscaled.go | 76 +++++-------------- feature/featuretags/featuretags.go | 12 ++- tsd/tsd.go | 4 + .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + wgengine/netstack/gro/gro.go | 2 + wgengine/netstack/gro/netstack_disabled.go | 10 +++ wgengine/netstack/link_endpoint.go | 2 +- wgengine/netstack/netstack.go | 9 ++- 14 files changed, 140 insertions(+), 91 deletions(-) create mode 100644 cmd/tailscaled/netstack.go create mode 100644 wgengine/netstack/gro/netstack_disabled.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f5d2831b6..3699ac4e7 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -14,7 +14,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header github.com/google/nftables from tailscale.com/util/linuxfw 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ @@ -63,36 +63,18 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ - 💣 gvisor.dev/gvisor/pkg/sleep from gvisor.dev/gvisor/pkg/tcpip/transport/tcp 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ - gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ - gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp - gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 - gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack - gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack - gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/ports+ + gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ - 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ - gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack - gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ - gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop from gvisor.dev/gvisor/pkg/tcpip/transport/raw - gvisor.dev/gvisor/pkg/tcpip/transport/packet from gvisor.dev/gvisor/pkg/tcpip/transport/raw - gvisor.dev/gvisor/pkg/tcpip/transport/raw from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ - 💣 gvisor.dev/gvisor/pkg/tcpip/transport/tcp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + 💣 gvisor.dev/gvisor/pkg/tcpip/stack from tailscale.com/net/tstun gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal @@ -182,7 +164,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/cmd/tailscaled+ tailscale.com/posture from tailscale.com/ipn/ipnlocal - tailscale.com/proxymap from tailscale.com/tsd+ + tailscale.com/proxymap from tailscale.com/tsd tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ @@ -263,7 +245,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/netlog from tailscale.com/wgengine - tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ @@ -317,7 +298,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de cmp from encoding/json+ compress/flate from compress/gzip compress/gzip from golang.org/x/net/http2+ - container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -393,7 +373,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from archive/tar+ - expvar from tailscale.com/cmd/tailscaled+ + expvar from tailscale.com/health+ flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from crypto+ diff --git a/cmd/tailscaled/netstack.go b/cmd/tailscaled/netstack.go new file mode 100644 index 000000000..c0b34ed41 --- /dev/null +++ b/cmd/tailscaled/netstack.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package main + +import ( + "context" + "expvar" + "net" + "net/netip" + + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine/netstack" +) + +func init() { + hookNewNetstack.Set(newNetstack) +} + +func newNetstack(logf logger.Logf, sys *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error) { + ns, err := netstack.Create(logf, + sys.Tun.Get(), + sys.Engine.Get(), + sys.MagicSock.Get(), + sys.Dialer.Get(), + sys.DNSManager.Get(), + sys.ProxyMapper(), + ) + if err != nil { + return nil, err + } + // Only register debug info if we have a debug mux + if debugMux != nil { + expvar.Publish("netstack", ns.ExpVar()) + } + + sys.Set(ns) + ns.ProcessLocalIPs = onlyNetstack + ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() + + dialer := sys.Dialer.Get() // must be set by caller already + + if onlyNetstack { + e := sys.Engine.Get() + dialer.UseNetstackForIP = func(ip netip.Addr) bool { + _, ok := e.PeerForIP(ip) + return ok + } + dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextTCP or we'll return + // *gonet.TCPConn(nil) instead of a nil interface which trips up + // callers. + tcpConn, err := ns.DialContextTCP(ctx, dst) + if err != nil { + return nil, err + } + return tcpConn, nil + } + dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextUDP or we'll return + // *gonet.UDPConn(nil) instead of a nil interface which trips up + // callers. + udpConn, err := ns.DialContextUDP(ctx, dst) + if err != nil { + return nil, err + } + return udpConn, nil + } + } + + return ns, nil +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 636627539..d01af199c 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -13,14 +13,12 @@ package main // import "tailscale.com/cmd/tailscaled" import ( "context" "errors" - "expvar" "flag" "fmt" "log" "net" "net/http" "net/http/pprof" - "net/netip" "os" "os/signal" "path/filepath" @@ -34,6 +32,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -65,7 +64,6 @@ import ( "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" - "tailscale.com/wgengine/netstack" "tailscale.com/wgengine/router" ) @@ -598,6 +596,10 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, return nil } +var ( + hookNewNetstack feature.Hook[func(_ logger.Logf, _ *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error)] +) + func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) (_ *ipnlocal.LocalBackend, retErr error) { if logPol != nil { logPol.Logtail.SetNetMon(sys.NetMon.Get()) @@ -615,6 +617,9 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if err != nil { return nil, fmt.Errorf("createEngine: %w", err) } + if onlyNetstack && !buildfeatures.HasNetstack { + return nil, errors.New("userspace-networking support is not compiled in to this binary") + } if debugMux != nil { if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) @@ -622,41 +627,14 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID go runDebugServer(logf, debugMux, args.debug) } - ns, err := newNetstack(logf, sys) - if err != nil { - return nil, fmt.Errorf("newNetstack: %w", err) - } - sys.Set(ns) - ns.ProcessLocalIPs = onlyNetstack - ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() - - if onlyNetstack { - e := sys.Engine.Get() - dialer.UseNetstackForIP = func(ip netip.Addr) bool { - _, ok := e.PeerForIP(ip) - return ok - } - dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextTCP or we'll return - // *gonet.TCPConn(nil) instead of a nil interface which trips up - // callers. - tcpConn, err := ns.DialContextTCP(ctx, dst) - if err != nil { - return nil, err - } - return tcpConn, nil - } - dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextUDP or we'll return - // *gonet.UDPConn(nil) instead of a nil interface which trips up - // callers. - udpConn, err := ns.DialContextUDP(ctx, dst) - if err != nil { - return nil, err - } - return udpConn, nil + var ns tsd.NetstackImpl // or nil if not linked in + if newNetstack, ok := hookNewNetstack.GetOk(); ok { + ns, err = newNetstack(logf, sys, onlyNetstack) + if err != nil { + return nil, fmt.Errorf("newNetstack: %w", err) } } + if startProxy != nil { go startProxy(logf, dialer) } @@ -687,8 +665,11 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if f, ok := hookConfigureWebClient.GetOk(); ok { f(lb) } - if err := ns.Start(lb); err != nil { - log.Fatalf("failed to start netstack: %v", err) + + if ns != nil { + if err := ns.Start(lb); err != nil { + log.Fatalf("failed to start netstack: %v", err) + } } return lb, nil } @@ -868,25 +849,6 @@ func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { } } -func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { - ret, err := netstack.Create(logf, - sys.Tun.Get(), - sys.Engine.Get(), - sys.MagicSock.Get(), - sys.Dialer.Get(), - sys.DNSManager.Get(), - sys.ProxyMapper(), - ) - if err != nil { - return nil, err - } - // Only register debug info if we have a debug mux - if debugMux != nil { - expvar.Publish("netstack", ret.ExpVar()) - } - return ret, nil -} - var beChildFunc = beChild func beChild(args []string) error { diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 1a1fcf272..1db377277 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -106,10 +106,14 @@ var Features = map[FeatureTag]FeatureMeta{ }, "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, - "gro": {"GRO", "Generic Receive Offload support (performance)", nil}, - "kube": {"Kube", "Kubernetes integration", nil}, - "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, - "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, + "gro": { + Sym: "GRO", + Desc: "Generic Receive Offload support (performance)", + Deps: []FeatureTag{"netstack"}, + }, + "kube": {"Kube", "Kubernetes integration", nil}, + "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, + "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", Desc: "Outbound localhost HTTP/SOCK5 proxy support", diff --git a/tsd/tsd.go b/tsd/tsd.go index 263b8de70..8223254da 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -98,10 +98,14 @@ func NewSystemWithBus(bus *eventbus.Bus) *System { return sys } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // NetstackImpl is the interface that *netstack.Impl implements. // It's an interface for circular dependency reasons: netstack.Impl // references LocalBackend, and LocalBackend has a tsd.System. type NetstackImpl interface { + Start(LocalBackend) error UpdateNetstackIPs(*netmap.NetworkMap) } diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index b025e3a43..7a26300e5 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index b025e3a43..7a26300e5 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index b025e3a43..7a26300e5 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index b025e3a43..7a26300e5 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 32f95357d..08c8c27ff 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -26,6 +26,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go index 654d17056..c8e5e56e1 100644 --- a/wgengine/netstack/gro/gro.go +++ b/wgengine/netstack/gro/gro.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netstack + // Package gro implements GRO for the receive (write) path into gVisor. package gro diff --git a/wgengine/netstack/gro/netstack_disabled.go b/wgengine/netstack/gro/netstack_disabled.go new file mode 100644 index 000000000..a0f56fa44 --- /dev/null +++ b/wgengine/netstack/gro/netstack_disabled.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package gro + +func RXChecksumOffload(any) any { + panic("unreachable") +} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 50e8d755a..260b3196a 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -187,7 +187,7 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { l.mu.RLock() d := l.dispatcher l.mu.RUnlock() - if d == nil { + if d == nil || !buildfeatures.HasNetstack { return } pkt := gro.RXChecksumOffload(p) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 0e2712c67..c2b5d8a32 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -578,9 +578,16 @@ func (ns *Impl) decrementInFlightTCPForward(tei stack.TransportEndpointID, remot } } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // Start sets up all the handlers so netstack can start working. Implements // wgengine.FakeImpl. -func (ns *Impl) Start(lb *ipnlocal.LocalBackend) error { +func (ns *Impl) Start(b LocalBackend) error { + if b == nil { + panic("nil LocalBackend interface") + } + lb := b.(*ipnlocal.LocalBackend) if lb == nil { panic("nil LocalBackend") } From 0b994ef2fe398dd9c827a2418d48f224b5d63303 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 25 Sep 2025 18:15:58 -0500 Subject: [PATCH 1405/1708] docs/windows/policy: add ADMX policy definition for AllowTailscaledRestart Updates tailscale/corp#32675 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 9 +++++++++ docs/windows/policy/tailscale.admx | 14 ++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 2e143d49c..58e13be19 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -20,6 +20,7 @@ Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later Tailscale version 1.86.0 and later + Tailscale version 1.90.0 and later Tailscale UI customization Settings @@ -121,6 +122,14 @@ If you enable this policy setting, you can specify how long Tailscale will wait If you disable or don't configure this policy setting, Tailscale will only reconnect if a user chooses to or if required by a different policy setting. Refer to https://pkg.go.dev/time#ParseDuration for information about the supported duration strings.]]> + Allow users to restart tailscaled + Allow Local Network Access when an Exit Node is in use + + + @@ -187,6 +191,16 @@ + + + + + + + + + + From 09a33b926292036c2bf4bb7754ac69fb727c1c15 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 11:15:05 -0700 Subject: [PATCH 1406/1708] net/tstun: support ts_omit_netstack Updates #17283 Change-Id: I1134bb15b3e39a3fa26c0621512aae9181de2210 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 13 ++---- net/tstun/netstack_disabled.go | 69 ++++++++++++++++++++++++++++++ net/tstun/netstack_enabled.go | 22 ++++++++++ net/tstun/wrap.go | 34 ++++++++++----- 4 files changed, 119 insertions(+), 19 deletions(-) create mode 100644 net/tstun/netstack_disabled.go create mode 100644 net/tstun/netstack_enabled.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3699ac4e7..0ec45d465 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -61,20 +61,15 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ - gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ - gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip + gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/ports+ - gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ - 💣 gvisor.dev/gvisor/pkg/tcpip/stack from tailscale.com/net/tstun - gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum+ + gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal diff --git a/net/tstun/netstack_disabled.go b/net/tstun/netstack_disabled.go new file mode 100644 index 000000000..c1266b305 --- /dev/null +++ b/net/tstun/netstack_disabled.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package tstun + +type netstack_PacketBuffer struct { + GSOOptions netstack_GSO +} + +func (*netstack_PacketBuffer) DecRef() { panic("unreachable") } +func (*netstack_PacketBuffer) Size() int { panic("unreachable") } + +type netstack_GSOType int + +const ( + netstack_GSONone netstack_GSOType = iota + netstack_GSOTCPv4 + netstack_GSOTCPv6 + netstack_GSOGvisor +) + +type netstack_GSO struct { + // Type is one of GSONone, GSOTCPv4, etc. + Type netstack_GSOType + // NeedsCsum is set if the checksum offload is enabled. + NeedsCsum bool + // CsumOffset is offset after that to place checksum. + CsumOffset uint16 + + // Mss is maximum segment size. + MSS uint16 + // L3Len is L3 (IP) header length. + L3HdrLen uint16 + + // MaxSize is maximum GSO packet size. + MaxSize uint32 +} + +func (p *netstack_PacketBuffer) NetworkHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) TransportHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) ToBuffer() netstack_Buffer { panic("unreachable") } + +func (p *netstack_PacketBuffer) Data() asRanger { + panic("unreachable") +} + +type asRanger struct{} + +func (asRanger) AsRange() toSlicer { panic("unreachable") } + +type toSlicer struct{} + +func (toSlicer) ToSlice() []byte { panic("unreachable") } + +type slicer struct{} + +func (s slicer) Slice() []byte { panic("unreachable") } + +type netstack_Buffer struct{} + +func (netstack_Buffer) Flatten() []byte { panic("unreachable") } diff --git a/net/tstun/netstack_enabled.go b/net/tstun/netstack_enabled.go new file mode 100644 index 000000000..8fc1a2e20 --- /dev/null +++ b/net/tstun/netstack_enabled.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package tstun + +import ( + "gvisor.dev/gvisor/pkg/tcpip/stack" +) + +type ( + netstack_PacketBuffer = stack.PacketBuffer + netstack_GSO = stack.GSO +) + +const ( + netstack_GSONone = stack.GSONone + netstack_GSOTCPv4 = stack.GSOTCPv4 + netstack_GSOTCPv6 = stack.GSOTCPv6 + netstack_GSOGvisor = stack.GSOGvisor +) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 4c88c7eef..c94844c90 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -22,7 +22,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "go4.org/mem" - "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" "tailscale.com/feature/buildfeatures" tsmetrics "tailscale.com/metrics" @@ -229,7 +228,7 @@ func registerMetrics(reg *usermetric.Registry) *metrics { type tunInjectedRead struct { // Only one of packet or data should be set, and are read in that order of // precedence. - packet *stack.PacketBuffer + packet *netstack_PacketBuffer data []byte } @@ -999,7 +998,10 @@ const ( minTCPHeaderSize = 20 ) -func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { +func stackGSOToTunGSO(pkt []byte, gso netstack_GSO) (tun.GSOOptions, error) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } options := tun.GSOOptions{ CsumStart: gso.L3HdrLen, CsumOffset: gso.CsumOffset, @@ -1007,12 +1009,12 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { NeedsCsum: gso.NeedsCsum, } switch gso.Type { - case stack.GSONone: + case netstack_GSONone: options.GSOType = tun.GSONone return options, nil - case stack.GSOTCPv4: + case netstack_GSOTCPv4: options.GSOType = tun.GSOTCPv4 - case stack.GSOTCPv6: + case netstack_GSOTCPv6: options.GSOType = tun.GSOTCPv6 default: return tun.GSOOptions{}, fmt.Errorf("unsupported gVisor GSOType: %v", gso.Type) @@ -1035,7 +1037,10 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { // both before and after partial checksum updates where later checksum // offloading still expects a partial checksum. // TODO(jwhited): plumb partial checksum awareness into net/packet/checksum. -func invertGSOChecksum(pkt []byte, gso stack.GSO) { +func invertGSOChecksum(pkt []byte, gso netstack_GSO) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } if gso.NeedsCsum != true { return } @@ -1049,10 +1054,13 @@ func invertGSOChecksum(pkt []byte, gso stack.GSO) { // injectedRead handles injected reads, which bypass filters. func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []int, offset int) (n int, err error) { - var gso stack.GSO + var gso netstack_GSO pkt := outBuffs[0][offset:] if res.packet != nil { + if !buildfeatures.HasNetstack { + panic("unreachable") + } bufN := copy(pkt, res.packet.NetworkHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.TransportHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.Data().AsRange().ToSlice()) @@ -1298,7 +1306,10 @@ func (t *Wrapper) SetJailedFilter(filt *filter.Filter) { // // This path is typically used to deliver synthesized packets to the // host networking stack. -func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]byte, sizes []int) error { +func (t *Wrapper) InjectInboundPacketBuffer(pkt *netstack_PacketBuffer, buffs [][]byte, sizes []int) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } buf := buffs[0][PacketStartOffset:] bufN := copy(buf, pkt.NetworkHeader().Slice()) @@ -1437,7 +1448,10 @@ func (t *Wrapper) InjectOutbound(pkt []byte) error { // InjectOutboundPacketBuffer logically behaves as InjectOutbound. It takes ownership of one // reference count on the packet, and the packet may be mutated. The packet refcount will be // decremented after the injected buffer has been read. -func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error { +func (t *Wrapper) InjectOutboundPacketBuffer(pkt *netstack_PacketBuffer) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } size := pkt.Size() if size > MaxPacketSize { pkt.DecRef() From e7a79ef5f17a623bf804480b2a118a2487348560 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 12:29:36 -0700 Subject: [PATCH 1407/1708] tstest/integration: deflake TestC2NDebugNetmap, disable service collection Fixes #17298 Change-Id: I83459fa1dad583c32395a80548510bc7ec035c41 Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration_test.go | 4 +++- tstest/integration/testcontrol/testcontrol.go | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 5e9f15798..92f7441b0 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1784,7 +1784,9 @@ func TestPeerRelayPing(t *testing.T) { func TestC2NDebugNetmap(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := NewTestEnv(t) + env := NewTestEnv(t, ConfigureControl(func(s *testcontrol.Server) { + s.CollectServices = "false" + })) var testNodes []*TestNode var nodes []*tailcfg.Node diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 1d3b99f7a..2c6ac1d6d 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -35,6 +35,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -69,6 +70,10 @@ type Server struct { // DefaultNodeCapabilities overrides the capability map sent to each client. DefaultNodeCapabilities *tailcfg.NodeCapMap + // CollectServices, if non-empty, sets whether the control server asks + // for service updates. If empty, the default is "true". + CollectServices opt.Bool + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL @@ -1096,7 +1101,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, Node: node, DERPMap: s.DERPMap, Domain: domain, - CollectServices: "true", + CollectServices: cmp.Or(s.CollectServices, "true"), PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, From e766adf71f86fcd31651a8e1f89272a0ca50bc01 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 12:15:02 -0700 Subject: [PATCH 1408/1708] net/tstun: use ts_omit_gro in another place I missed earlier I didn't notice this GRO code during b3ae1cb0ccb73a0951c. Updates #17283 Change-Id: I95c06c19e489097fc8d61180dc57ae4b8a69c58c Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 2 +- net/tstun/wrap_linux.go | 2 ++ net/tstun/wrap_noop.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0ec45d465..c57d8a94b 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -68,7 +68,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum+ + gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version diff --git a/net/tstun/wrap_linux.go b/net/tstun/wrap_linux.go index 136ddfe1e..7498f107b 100644 --- a/net/tstun/wrap_linux.go +++ b/net/tstun/wrap_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_gro + package tstun import ( diff --git a/net/tstun/wrap_noop.go b/net/tstun/wrap_noop.go index c743072ca..8ad04bafe 100644 --- a/net/tstun/wrap_noop.go +++ b/net/tstun/wrap_noop.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_gro package tstun From afe909664b0529a25760395feaaa7f3fc0a0cfd1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 12:38:49 -0700 Subject: [PATCH 1409/1708] types/opt: de-weird the API a bit with new True and False consts Updates #cleanup Change-Id: I15d8d840877d43e2b884d42354b4eb156094df7d Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration_test.go | 2 +- tstest/integration/testcontrol/testcontrol.go | 2 +- types/opt/bool.go | 40 +++++++++++++------ 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 92f7441b0..fa148abbe 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1785,7 +1785,7 @@ func TestC2NDebugNetmap(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t, ConfigureControl(func(s *testcontrol.Server) { - s.CollectServices = "false" + s.CollectServices = opt.False })) var testNodes []*TestNode diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 2c6ac1d6d..ac7804918 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1101,7 +1101,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, Node: node, DERPMap: s.DERPMap, Domain: domain, - CollectServices: cmp.Or(s.CollectServices, "true"), + CollectServices: cmp.Or(s.CollectServices, opt.True), PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, diff --git a/types/opt/bool.go b/types/opt/bool.go index 0a3ee67ad..e2fd6a054 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -18,6 +18,22 @@ import ( // field without it being dropped. type Bool string +const ( + // True is the encoding of an explicit true. + True = Bool("true") + + // False is the encoding of an explicit false. + False = Bool("false") + + // ExplicitlyUnset is the encoding used by a null + // JSON value. It is a synonym for the empty string. + ExplicitlyUnset = Bool("unset") + + // Empty means the Bool is unset and it's neither + // true nor false. + Empty = Bool("") +) + // NewBool constructs a new Bool value equal to b. The returned Bool is set, // unless Set("") or Clear() methods are called. func NewBool(b bool) Bool { @@ -50,16 +66,16 @@ func (b *Bool) Scan(src any) error { switch src := src.(type) { case bool: if src { - *b = "true" + *b = True } else { - *b = "false" + *b = False } return nil case int64: if src == 0 { - *b = "false" + *b = False } else { - *b = "true" + *b = True } return nil default: @@ -75,18 +91,18 @@ func (b Bool) EqualBool(v bool) bool { } var ( - trueBytes = []byte("true") - falseBytes = []byte("false") + trueBytes = []byte(True) + falseBytes = []byte(False) nullBytes = []byte("null") ) func (b Bool) MarshalJSON() ([]byte, error) { switch b { - case "true": + case True: return trueBytes, nil - case "false": + case False: return falseBytes, nil - case "", "unset": + case Empty, ExplicitlyUnset: return nullBytes, nil } return nil, fmt.Errorf("invalid opt.Bool value %q", string(b)) @@ -95,11 +111,11 @@ func (b Bool) MarshalJSON() ([]byte, error) { func (b *Bool) UnmarshalJSON(j []byte) error { switch string(j) { case "true": - *b = "true" + *b = True case "false": - *b = "false" + *b = False case "null": - *b = "unset" + *b = ExplicitlyUnset default: return fmt.Errorf("invalid opt.Bool value %q", j) } From c95fdb0f8a94f53566637af6b0cdef2ef554b2d8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 11:57:33 -0700 Subject: [PATCH 1410/1708] net/packet/checksum: copy the gvisor checksum, remove the dep As part of making Tailscale's gvisor dependency optional for small builds, this was one of the last places left that depended on gvisor. Just copy the couple functions were were using. Updates #17283 Change-Id: Id2bc07ba12039afe4c8a3f0b68f4d76d1863bbfe Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 9 +-- net/packet/checksum/checksum.go | 122 ++++++++++++++++++++++++++--- 2 files changed, 113 insertions(+), 18 deletions(-) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index c57d8a94b..3a7469c0f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -14,7 +14,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header github.com/google/nftables from tailscale.com/util/linuxfw 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ @@ -56,7 +55,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de go4.org/netipx from tailscale.com/ipn/ipnlocal+ gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer - 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ + 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log @@ -66,10 +65,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ - 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum - gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header + gvisor.dev/gvisor/pkg/tcpip from tailscale.com/ipn/ipnlocal + 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go index 547ea3a35..4b5b82174 100644 --- a/net/packet/checksum/checksum.go +++ b/net/packet/checksum/checksum.go @@ -8,8 +8,6 @@ import ( "encoding/binary" "net/netip" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/header" "tailscale.com/net/packet" "tailscale.com/types/ipproto" ) @@ -88,13 +86,13 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { tr := p.Transport() switch p.IPProto { case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { // Not enough space for a UDP header. return } updateV4Checksum(tr[6:8], o4[:], n4[:]) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { // Not enough space for a TCP header. return } @@ -112,34 +110,60 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { } } +const ( + minUDPSize = 8 + minTCPSize = 20 + minICMPv6Size = 8 + minIPv6Header = 40 + + offsetICMPv6Checksum = 2 + offsetUDPChecksum = 6 + offsetTCPChecksum = 16 +) + // updateV6PacketChecksums updates the checksums in the packet buffer. // p is modified in place. // If p.IPProto is unknown, no checksums are updated. func updateV6PacketChecksums(p *packet.Parsed, old, new netip.Addr) { - if len(p.Buffer()) < 40 { + if len(p.Buffer()) < minIPv6Header { // Not enough space for an IPv6 header. return } - o6, n6 := tcpip.AddrFrom16Slice(old.AsSlice()), tcpip.AddrFrom16Slice(new.AsSlice()) + o6, n6 := old.As16(), new.As16() // Now update the transport layer checksums, where applicable. tr := p.Transport() switch p.IPProto { case ipproto.ICMPv6: - if len(tr) < header.ICMPv6MinimumSize { + if len(tr) < minICMPv6Size { return } - header.ICMPv6(tr).UpdateChecksumPseudoHeaderAddress(o6, n6) + + ss := tr[offsetICMPv6Checksum:] + xsum := binary.BigEndian.Uint16(ss) + binary.BigEndian.PutUint16(ss, + ^checksumUpdate2ByteAlignedAddress(^xsum, o6, n6)) + case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { return } - header.UDP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetUDPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { return } - header.TCP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetTCPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.SCTP: // No transport layer update required. } @@ -195,3 +219,77 @@ func updateV4Checksum(oldSum, old, new []byte) { hcPrime := ^uint16(cPrime) binary.BigEndian.PutUint16(oldSum, hcPrime) } + +// checksumUpdate2ByteAlignedAddress updates an address in a calculated +// checksum. +// +// The addresses must have the same length and must contain an even number +// of bytes. The address MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor, but updated to use [16]byte. +func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new [16]byte) uint16 { + const uint16Bytes = 2 + + oldAddr := old[:] + newAddr := new[:] + + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + for len(oldAddr) != 0 { + // Convert the 2 byte sequences to uint16 values then apply the increment + // update. + xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(oldAddr[0])<<8)+uint16(oldAddr[1]), (uint16(newAddr[0])<<8)+uint16(newAddr[1])) + oldAddr = oldAddr[uint16Bytes:] + newAddr = newAddr[uint16Bytes:] + } + + return xsum +} + +// checksumUpdate2ByteAlignedUint16 updates a uint16 value in a calculated +// checksum. +// +// The value MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor. +func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 { + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + if old == new { + return xsum + } + return checksumCombine(xsum, checksumCombine(new, ^old)) +} + +// checksumCombine combines the two uint16 to form their checksum. This is done +// by adding them and the carry. +// +// Note that checksum a must have been computed on an even number of bytes. +// +// This implementation is copied from gVisor. +func checksumCombine(a, b uint16) uint16 { + v := uint32(a) + uint32(b) + return uint16(v + v>>16) +} From eaecc0be544a592473b55fd32d46dcae7fb68b19 Mon Sep 17 00:00:00 2001 From: Mahyar Mirrashed <59240843+mahyarmirrashed@users.noreply.github.com> Date: Fri, 26 Sep 2025 15:42:16 -0500 Subject: [PATCH 1411/1708] cmd/tailscale/cli: use tabwriter for tailscale status (#16596) Fixes #17238 Signed-off-by: Mahyar Mirrashed --- cmd/tailscale/cli/status.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 94e0977fe..89b18335b 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -4,7 +4,6 @@ package cli import ( - "bytes" "cmp" "context" "encoding/json" @@ -16,6 +15,7 @@ import ( "net/netip" "os" "strings" + "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" "github.com/toqueteos/webbrowser" @@ -56,6 +56,7 @@ https://github.com/tailscale/tailscale/blob/main/ipn/ipnstate/ipnstate.go fs.BoolVar(&statusArgs.peers, "peers", true, "show status of peers") fs.StringVar(&statusArgs.listen, "listen", "127.0.0.1:8384", "listen address for web mode; use port 0 for automatic") fs.BoolVar(&statusArgs.browser, "browser", true, "Open a browser in web mode") + fs.BoolVar(&statusArgs.header, "header", false, "show column headers in table format") return fs })(), } @@ -68,6 +69,7 @@ var statusArgs struct { active bool // in CLI mode, filter output to only peers with active sessions self bool // in CLI mode, show status of local machine peers bool // in CLI mode, show status of peer machines + header bool // in CLI mode, show column headers in table format } const mullvadTCD = "mullvad.ts.net." @@ -151,10 +153,15 @@ func runStatus(ctx context.Context, args []string) error { os.Exit(1) } - var buf bytes.Buffer - f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + w := tabwriter.NewWriter(Stdout, 0, 0, 2, ' ', 0) + f := func(format string, a ...any) { fmt.Fprintf(w, format, a...) } + if statusArgs.header { + fmt.Fprintln(w, "IP\tHostname\tOwner\tOS\tStatus\t") + fmt.Fprintln(w, "--\t--------\t-----\t--\t------\t") + } + printPS := func(ps *ipnstate.PeerStatus) { - f("%-15s %-20s %-12s %-7s ", + f("%s\t%s\t%s\t%s\t", firstIPString(ps.TailscaleIPs), dnsOrQuoteHostname(st, ps), ownerLogin(st, ps), @@ -199,7 +206,7 @@ func runStatus(ctx context.Context, args []string) error { if anyTraffic { f(", tx %d rx %d", ps.TxBytes, ps.RxBytes) } - f("\n") + f("\t\n") } if statusArgs.self && st.Self != nil { @@ -229,7 +236,8 @@ func runStatus(ctx context.Context, args []string) error { printPS(ps) } } - Stdout.Write(buf.Bytes()) + w.Flush() + if locBasedExitNode { outln() printf("# To see the full list of exit nodes, including location-based exit nodes, run `tailscale exit-node list` \n") From 87ee0f4e982cbb252d03d31beec251dad9c8ba1c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 13:05:20 -0700 Subject: [PATCH 1412/1708] ipn/ipnlocal: move last unconditional gvisor import, complete ts_omit_netstack support Fixes #17283 Change-Id: Ia84d269683e4a68d7d10562561204934eeaf53bb Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 17 +---- cmd/tailscaled/deps_test.go | 13 ++++ .../feature_netstack_disabled.go | 2 +- .../buildfeatures/feature_netstack_enabled.go | 2 +- feature/featuretags/featuretags.go | 2 +- ipn/ipnlocal/local.go | 60 --------------- ipn/ipnlocal/netstack.go | 74 +++++++++++++++++++ 7 files changed, 91 insertions(+), 79 deletions(-) create mode 100644 ipn/ipnlocal/netstack.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3a7469c0f..144871c9b 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -53,21 +53,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/vishvananda/netns from github.com/tailscale/netlink+ 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ - gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer - 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip - gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs - 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ - gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log - gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ - gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip - gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer - 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ - gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state - 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - gvisor.dev/gvisor/pkg/tcpip from tailscale.com/ipn/ipnlocal - 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer - gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ @@ -283,7 +268,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna - golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + golang.org/x/time/rate from tailscale.com/derp+ archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 92c6a872c..a41a08f9d 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -186,6 +186,19 @@ func TestOmitDBus(t *testing.T) { }.Check(t) } +func TestNetstack(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_omit_netstack,ts_omit_outboundproxy,ts_omit_serve,ts_omit_ssh,ts_omit_webclient,ts_omit_tap", + OnDep: func(dep string) { + if strings.Contains(dep, "gvisor") { + t.Errorf("unexpected gvisor dep: %q", dep) + } + }, + }.Check(t) +} + func TestOmitPortlist(t *testing.T) { deptest.DepChecker{ GOOS: "linux", diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go index 7369645a0..acb6e8e76 100644 --- a/feature/buildfeatures/feature_netstack_disabled.go +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. // It's a const so it can be used for dead code elimination. const HasNetstack = false diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go index a7e57098b..04f671185 100644 --- a/feature/buildfeatures/feature_netstack_enabled.go +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. // It's a const so it can be used for dead code elimination. const HasNetstack = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 1db377277..25426c973 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -121,7 +121,7 @@ var Features = map[FeatureTag]FeatureMeta{ }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, "networkmanager": { Sym: "NetworkManager", Desc: "Linux NetworkManager integration", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 62a3a2131..4b8032e9c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -38,7 +38,6 @@ import ( "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" - "gvisor.dev/gvisor/pkg/tcpip" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" @@ -4643,65 +4642,6 @@ var ( hookServeClearVIPServicesTCPPortsInterceptedLocked feature.Hook[func(*LocalBackend)] ) -// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if -// no handler is needed. It also returns a list of TCP socket options to -// apply to the socket before calling the handler. -// TCPHandlerForDst is called both for connections to our node's local IP -// as well as to the service IP (quad 100). -func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { - // First handle internal connections to the service IP - hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 - if hittingServiceIP { - switch dst.Port() { - case 80: - // TODO(mpminardi): do we want to show an error message if the web client - // has been disabled instead of the more "basic" web UI? - if b.ShouldRunWebClient() { - return b.handleWebClientConn, opts - } - return b.HandleQuad100Port80Conn, opts - case DriveLocalPort: - return b.handleDriveConn, opts - } - } - - if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { - if handler := f(b, dst, src); handler != nil { - return handler, opts - } - } - // Then handle external connections to the local IP. - if !b.isLocalIP(dst.Addr()) { - return nil, nil - } - if dst.Port() == 22 && b.ShouldRunSSH() { - // Use a higher keepalive idle time for SSH connections, as they are - // typically long lived and idle connections are more likely to be - // intentional. Ideally we would turn this off entirely, but we can't - // tell the difference between a long lived connection that is idle - // vs a connection that is dead because the peer has gone away. - // We pick 72h as that is typically sufficient for a long weekend. - opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) - return b.handleSSHConn, opts - } - // TODO(will,sonia): allow customizing web client port ? - if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { - return b.handleWebClientConn, opts - } - if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { - return func(c net.Conn) error { - b.handlePeerAPIConn(src, dst, c) - return nil - }, opts - } - if f, ok := hookTCPHandlerForServe.GetOk(); ok { - if handler := f(b, dst.Port(), src, nil); handler != nil { - return handler, opts - } - } - return nil, nil -} - func (b *LocalBackend) handleDriveConn(conn net.Conn) error { fs, ok := b.sys.DriveForLocal.GetOK() if !ok || !b.DriveAccessEnabled() { diff --git a/ipn/ipnlocal/netstack.go b/ipn/ipnlocal/netstack.go new file mode 100644 index 000000000..f7ffd0305 --- /dev/null +++ b/ipn/ipnlocal/netstack.go @@ -0,0 +1,74 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package ipnlocal + +import ( + "net" + "net/netip" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" + "tailscale.com/types/ptr" +) + +// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if +// no handler is needed. It also returns a list of TCP socket options to +// apply to the socket before calling the handler. +// TCPHandlerForDst is called both for connections to our node's local IP +// as well as to the service IP (quad 100). +func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { + // First handle internal connections to the service IP + hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 + if hittingServiceIP { + switch dst.Port() { + case 80: + // TODO(mpminardi): do we want to show an error message if the web client + // has been disabled instead of the more "basic" web UI? + if b.ShouldRunWebClient() { + return b.handleWebClientConn, opts + } + return b.HandleQuad100Port80Conn, opts + case DriveLocalPort: + return b.handleDriveConn, opts + } + } + + if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { + if handler := f(b, dst, src); handler != nil { + return handler, opts + } + } + // Then handle external connections to the local IP. + if !b.isLocalIP(dst.Addr()) { + return nil, nil + } + if dst.Port() == 22 && b.ShouldRunSSH() { + // Use a higher keepalive idle time for SSH connections, as they are + // typically long lived and idle connections are more likely to be + // intentional. Ideally we would turn this off entirely, but we can't + // tell the difference between a long lived connection that is idle + // vs a connection that is dead because the peer has gone away. + // We pick 72h as that is typically sufficient for a long weekend. + opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) + return b.handleSSHConn, opts + } + // TODO(will,sonia): allow customizing web client port ? + if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { + return b.handleWebClientConn, opts + } + if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { + return func(c net.Conn) error { + b.handlePeerAPIConn(src, dst, c) + return nil + }, opts + } + if f, ok := hookTCPHandlerForServe.GetOk(); ok { + if handler := f(b, dst.Port(), src, nil); handler != nil { + return handler, opts + } + } + return nil, nil +} From 832e94607e47258d36c07d6786d4ac12b170e63b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 13:33:08 -0700 Subject: [PATCH 1413/1708] doctor: add ts_omit_doctor support Updates #12614 Change-Id: I84c166c4b99ca75d70abe4087e5ff3f7d90d4bcc Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 9 +- cmd/tailscaled/depaware-minbox.txt | 11 +-- cmd/tailscaled/depaware.txt | 9 +- cmd/tsidp/depaware.txt | 9 +- .../buildfeatures/feature_doctor_disabled.go | 13 +++ .../buildfeatures/feature_doctor_enabled.go | 13 +++ feature/condregister/maybe_doctor.go | 8 ++ feature/doctor/doctor.go | 95 +++++++++++++++++++ feature/featuretags/featuretags.go | 1 + feature/taildrop/peerapi_test.go | 2 + ipn/ipnlocal/local.go | 56 +---------- ipn/ipnlocal/peerapi.go | 24 +---- ipn/localapi/localapi.go | 4 +- tsnet/depaware.txt | 9 +- 14 files changed, 154 insertions(+), 109 deletions(-) create mode 100644 feature/buildfeatures/feature_doctor_disabled.go create mode 100644 feature/buildfeatures/feature_doctor_enabled.go create mode 100644 feature/condregister/maybe_doctor.go create mode 100644 feature/doctor/doctor.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ea0e08b19..2adbd5f5d 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -185,7 +185,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket @@ -200,7 +200,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -787,10 +787,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ @@ -868,7 +864,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/portmapper from tailscale.com/feature/portmapper tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 144871c9b..08d7d59c6 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -37,9 +37,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/sdnotify from tailscale.com/util/systemd 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/mitchellh/go-ps from tailscale.com/safesocket - 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile - 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -69,10 +69,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/ipn+ tailscale.com/envknob from tailscale.com/cmd/tailscaled+ tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal @@ -127,7 +123,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ - tailscale.com/net/routetable from tailscale.com/doctor/routetable tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ @@ -242,7 +237,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/doctor/permissions+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index acd8e0459..579af5c0d 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -259,10 +259,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ tailscale.com/disco from tailscale.com/feature/relayserver+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/doctor from tailscale.com/feature/doctor + tailscale.com/doctor/ethtool from tailscale.com/feature/doctor + 💣 tailscale.com/doctor/permissions from tailscale.com/feature/doctor + tailscale.com/doctor/routetable from tailscale.com/feature/doctor tailscale.com/drive from tailscale.com/client/local+ tailscale.com/drive/driveimpl from tailscale.com/cmd/tailscaled tailscale.com/drive/driveimpl/compositedav from tailscale.com/drive/driveimpl @@ -276,6 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister + tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portlist from tailscale.com/feature/condregister diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 69904c976..270edd371 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -146,7 +146,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -229,10 +229,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ @@ -299,7 +295,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/portmapper from tailscale.com/feature/portmapper tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ diff --git a/feature/buildfeatures/feature_doctor_disabled.go b/feature/buildfeatures/feature_doctor_disabled.go new file mode 100644 index 000000000..8c15e951e --- /dev/null +++ b/feature/buildfeatures/feature_doctor_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = false diff --git a/feature/buildfeatures/feature_doctor_enabled.go b/feature/buildfeatures/feature_doctor_enabled.go new file mode 100644 index 000000000..a8a0bb7d2 --- /dev/null +++ b/feature/buildfeatures/feature_doctor_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = true diff --git a/feature/condregister/maybe_doctor.go b/feature/condregister/maybe_doctor.go new file mode 100644 index 000000000..3dc9ffa53 --- /dev/null +++ b/feature/condregister/maybe_doctor.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_doctor + +package condregister + +import _ "tailscale.com/feature/doctor" diff --git a/feature/doctor/doctor.go b/feature/doctor/doctor.go new file mode 100644 index 000000000..875b57d14 --- /dev/null +++ b/feature/doctor/doctor.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The doctor package registers the "doctor" problem diagnosis support into the +// rest of Tailscale. +package doctor + +import ( + "context" + "fmt" + "html" + "net/http" + "time" + + "tailscale.com/doctor" + "tailscale.com/doctor/ethtool" + "tailscale.com/doctor/permissions" + "tailscale.com/doctor/routetable" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/tsaddr" + "tailscale.com/types/logger" +) + +func init() { + ipnlocal.HookDoctor.Set(visitDoctor) + ipnlocal.RegisterPeerAPIHandler("/v0/doctor", handleServeDoctor) +} + +func handleServeDoctor(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + if !h.CanDebug() { + http.Error(w, "denied; no debug access", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintln(w, "

                      Doctor Output

                      ") + + fmt.Fprintln(w, "
                      ")
                      +
                      +	b := h.LocalBackend()
                      +	visitDoctor(r.Context(), b, func(format string, args ...any) {
                      +		line := fmt.Sprintf(format, args...)
                      +		fmt.Fprintln(w, html.EscapeString(line))
                      +	})
                      +
                      +	fmt.Fprintln(w, "
                      ") +} + +func visitDoctor(ctx context.Context, b *ipnlocal.LocalBackend, logf logger.Logf) { + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.Clock().Now) + + var checks []doctor.Check + checks = append(checks, + permissions.Check{}, + routetable.Check{}, + ethtool.Check{}, + ) + + // Print a log message if any of the global DNS resolvers are Tailscale + // IPs; this can interfere with our ability to connect to the Tailscale + // controlplane. + checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { + nm := b.NetMap() + if nm == nil { + return nil + } + + for i, resolver := range nm.DNS.Resolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("resolver %d is a Tailscale address: %v", i, resolver) + } + } + for i, resolver := range nm.DNS.FallbackResolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("fallback resolver %d is a Tailscale address: %v", i, resolver) + } + } + return nil + })) + + // TODO(andrew): more + + numChecks := len(checks) + checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { + log("%d checks", numChecks) + return nil + })) + + doctor.RunChecks(ctx, logf, checks...) +} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 25426c973..2edecef58 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -105,6 +105,7 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"portmapper"}, }, "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, + "doctor": {"Doctor", "Diagnose possible issues with Tailscale and its host environment", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "gro": { Sym: "GRO", diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 633997354..254d8794e 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -33,11 +33,13 @@ type peerAPIHandler struct { isSelf bool // whether peerNode is owned by same user as this node selfNode tailcfg.NodeView // this node; always non-nil peerNode tailcfg.NodeView // peerNode is who's making the request + canDebug bool // whether peerNode can debug this node (goroutines, metrics, magicsock internal state, etc) } func (h *peerAPIHandler) IsSelfUntagged() bool { return !h.selfNode.IsTagged() && !h.peerNode.IsTagged() && h.isSelf } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug } func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4b8032e9c..dd0a2f9f1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -43,10 +43,6 @@ import ( "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/control/controlknobs" - "tailscale.com/doctor" - "tailscale.com/doctor/ethtool" - "tailscale.com/doctor/permissions" - "tailscale.com/doctor/routetable" "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" @@ -6706,56 +6702,8 @@ func (b *LocalBackend) handleQuad100Port80Conn(w http.ResponseWriter, r *http.Re io.WriteString(w, "
                    \n") } -func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) { - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.clock.Now) - - var checks []doctor.Check - checks = append(checks, - permissions.Check{}, - routetable.Check{}, - ethtool.Check{}, - ) - - // Print a log message if any of the global DNS resolvers are Tailscale - // IPs; this can interfere with our ability to connect to the Tailscale - // controlplane. - checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { - b.mu.Lock() - nm := b.NetMap() - b.mu.Unlock() - if nm == nil { - return nil - } - - for i, resolver := range nm.DNS.Resolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("resolver %d is a Tailscale address: %v", i, resolver) - } - } - for i, resolver := range nm.DNS.FallbackResolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("fallback resolver %d is a Tailscale address: %v", i, resolver) - } - } - return nil - })) - - // TODO(andrew): more - - numChecks := len(checks) - checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { - log("%d checks", numChecks) - return nil - })) - - doctor.RunChecks(ctx, logf, checks...) -} +// HookDoctor is an optional hook for the "doctor" problem diagnosis feature. +var HookDoctor feature.Hook[func(context.Context, *LocalBackend, logger.Logf)] // SetDevStateStore updates the LocalBackend's state storage to the provided values. // diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 886a71291..9d2b49a38 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -217,6 +217,7 @@ type peerAPIHandler struct { type PeerAPIHandler interface { Peer() tailcfg.NodeView PeerCaps() tailcfg.PeerCapMap + CanDebug() bool // can remote node can debug this node (internal state, etc) Self() tailcfg.NodeView LocalBackend() *LocalBackend IsSelfUntagged() bool // whether the peer is untagged and the same as this user @@ -380,9 +381,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "/v0/interfaces": h.handleServeInterfaces(w, r) return - case "/v0/doctor": - h.handleServeDoctor(w, r) - return case "/v0/sockstats": h.handleServeSockStats(w, r) return @@ -455,24 +453,6 @@ func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Re fmt.Fprintln(w, "") } -func (h *peerAPIHandler) handleServeDoctor(w http.ResponseWriter, r *http.Request) { - if !h.canDebug() { - http.Error(w, "denied; no debug access", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintln(w, "

                    Doctor Output

                    ") - - fmt.Fprintln(w, "
                    ")
                    -
                    -	h.ps.b.Doctor(r.Context(), func(format string, args ...any) {
                    -		line := fmt.Sprintf(format, args...)
                    -		fmt.Fprintln(w, html.EscapeString(line))
                    -	})
                    -
                    -	fmt.Fprintln(w, "
                    ") -} - func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -571,6 +551,8 @@ func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Req fmt.Fprintln(w, "") } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug() } + // canDebug reports whether h can debug this node (goroutines, metrics, // magicsock internal state, etc). func (h *peerAPIHandler) canDebug() bool { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a83a2e17e..e8801e1ba 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -402,7 +402,9 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { } if defBool(r.URL.Query().Get("diagnose"), false) { - h.b.Doctor(r.Context(), logger.WithPrefix(h.logf, "diag: ")) + if f, ok := ipnlocal.HookDoctor.GetOk(); ok { + f(r.Context(), h.b, logger.WithPrefix(h.logf, "diag: ")) + } } w.Header().Set("Content-Type", "text/plain") fmt.Fprintln(w, startMarker) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ece4345d5..c196cc14d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -146,7 +146,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -225,10 +225,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ @@ -295,7 +291,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/portmapper from tailscale.com/feature/portmapper tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ From 9ae8155bab4e5bfafec0ebe90931704cda1d69c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 26 Sep 2025 17:30:24 -0400 Subject: [PATCH 1414/1708] cmol/pprof health (#17303) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit health: ensure timers are cleaned up Updates tailscale/corp#32696 Signed-off-by: Claus Lensbøl --- health/health.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/health/health.go b/health/health.go index 3d1c46a3d..d60762e31 100644 --- a/health/health.go +++ b/health/health.go @@ -143,15 +143,30 @@ func NewTracker(bus *eventbus.Bus) *Tracker { panic("no eventbus set") } - cli := bus.Client("health.Tracker") + ec := bus.Client("health.Tracker") t := &Tracker{ - eventClient: cli, - changePub: eventbus.Publish[Change](cli), + eventClient: ec, + changePub: eventbus.Publish[Change](ec), } t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) + + ec.Monitor(t.awaitEventClientDone) + return t } +func (t *Tracker) awaitEventClientDone(ec *eventbus.Client) { + <-ec.Done() + t.mu.Lock() + defer t.mu.Unlock() + + for _, timer := range t.pendingVisibleTimers { + timer.Stop() + } + t.timer.Stop() + clear(t.pendingVisibleTimers) +} + func (t *Tracker) now() time.Time { if t.testClock != nil { return t.testClock.Now() From e9dae5441e4fb877554ecc8b274a5c008f736755 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 18:21:50 -0700 Subject: [PATCH 1415/1708] tka: use ts_omit_tailnetlock in another spot, for ed25519consensus I noticed this while modularizing clientupdate. With this in first, moving clientupdate to be modular removes a bunch more stuff from the minimal build + tsnet. Updates #17115 Change-Id: I44bd055fca65808633fd3a848b0bbc09b00ad4fa Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 2 +- tka/key.go | 22 ------------------ tka/tka.go | 2 +- tka/verify.go | 36 ++++++++++++++++++++++++++++++ tka/verify_disabled.go | 18 +++++++++++++++ 5 files changed, 56 insertions(+), 24 deletions(-) create mode 100644 tka/verify.go create mode 100644 tka/verify_disabled.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 08d7d59c6..cf4a9b039 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -20,7 +20,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/google/nftables/expr from github.com/google/nftables+ github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd diff --git a/tka/key.go b/tka/key.go index 07736795d..dca1b4416 100644 --- a/tka/key.go +++ b/tka/key.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" - "github.com/hdevalence/ed25519consensus" "tailscale.com/types/tkatype" ) @@ -136,24 +135,3 @@ func (k Key) StaticValidate() error { } return nil } - -// Verify returns a nil error if the signature is valid over the -// provided AUM BLAKE2s digest, using the given key. -func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { - // NOTE(tom): Even if we can compute the public from the KeyID, - // its possible for the KeyID to be attacker-controlled - // so we should use the public contained in the state machine. - switch key.Kind { - case Key25519: - if len(key.Public) != ed25519.PublicKeySize { - return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) - } - if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { - return nil - } - return errors.New("invalid signature") - - default: - return fmt.Errorf("unhandled key type: %v", key.Kind) - } -} diff --git a/tka/tka.go b/tka/tka.go index 3929ff22a..234c87fe1 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -3,7 +3,7 @@ //go:build !ts_omit_tailnetlock -// Package tka (WIP) implements the Tailnet Key Authority. +// Package tka implements the Tailnet Key Authority (TKA) for Tailnet Lock. package tka import ( diff --git a/tka/verify.go b/tka/verify.go new file mode 100644 index 000000000..e4e22e551 --- /dev/null +++ b/tka/verify.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + "fmt" + + "github.com/hdevalence/ed25519consensus" + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + // NOTE(tom): Even if we can compute the public from the KeyID, + // its possible for the KeyID to be attacker-controlled + // so we should use the public contained in the state machine. + switch key.Kind { + case Key25519: + if len(key.Public) != ed25519.PublicKeySize { + return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) + } + if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { + return nil + } + return errors.New("invalid signature") + + default: + return fmt.Errorf("unhandled key type: %v", key.Kind) + } +} diff --git a/tka/verify_disabled.go b/tka/verify_disabled.go new file mode 100644 index 000000000..ba72f93e2 --- /dev/null +++ b/tka/verify_disabled.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "errors" + + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + return errors.New("tailnetlock disabled in build") +} From d01a0adfa6c9bbf435bd8b5042e203c46fde6a18 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 21:17:07 -0700 Subject: [PATCH 1416/1708] types/dnstype: delete unused func, move other one to its sole caller The dnstype package is used by tailcfg, which tries to be light and leafy. But it brings in dnstype. So dnstype shouldn't bring in x/net/dns/dnsmessage. Updates #12614 Change-Id: I043637a7ce7fed097e648001f13ca1927a781def Signed-off-by: Brad Fitzpatrick --- cmd/stund/depaware.txt | 2 +- ipn/localapi/localapi.go | 40 ++++++++++++- types/dnstype/messagetypes-string.go | 84 ---------------------------- 3 files changed, 39 insertions(+), 87 deletions(-) delete mode 100644 types/dnstype/messagetypes-string.go diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index c8a18eb07..97cf14cf0 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -97,7 +97,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from net golang.org/x/net/http/httpguts from net/http+ golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2/hpack from net/http+ diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e8801e1ba..e628e677b 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -38,7 +38,6 @@ import ( "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -1995,7 +1994,7 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { queryType := q.Get("type") qt := dnsmessage.TypeA if queryType != "" { - t, err := dnstype.DNSMessageTypeForString(queryType) + t, err := dnsMessageTypeForString(queryType) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -2016,6 +2015,43 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { }) } +// dnsMessageTypeForString returns the dnsmessage.Type for the given string. +// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. +func dnsMessageTypeForString(s string) (t dnsmessage.Type, err error) { + s = strings.TrimSpace(strings.ToUpper(s)) + switch s { + case "AAAA": + return dnsmessage.TypeAAAA, nil + case "ALL": + return dnsmessage.TypeALL, nil + case "A": + return dnsmessage.TypeA, nil + case "CNAME": + return dnsmessage.TypeCNAME, nil + case "HINFO": + return dnsmessage.TypeHINFO, nil + case "MINFO": + return dnsmessage.TypeMINFO, nil + case "MX": + return dnsmessage.TypeMX, nil + case "NS": + return dnsmessage.TypeNS, nil + case "OPT": + return dnsmessage.TypeOPT, nil + case "PTR": + return dnsmessage.TypePTR, nil + case "SOA": + return dnsmessage.TypeSOA, nil + case "SRV": + return dnsmessage.TypeSRV, nil + case "TXT": + return dnsmessage.TypeTXT, nil + case "WKS": + return dnsmessage.TypeWKS, nil + } + return 0, errors.New("unknown DNS message type: " + s) +} + // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.GET { diff --git a/types/dnstype/messagetypes-string.go b/types/dnstype/messagetypes-string.go deleted file mode 100644 index 34abea1ba..000000000 --- a/types/dnstype/messagetypes-string.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package dnstype - -import ( - "errors" - "strings" - - "golang.org/x/net/dns/dnsmessage" -) - -// StringForType returns the string representation of a dnsmessage.Type. -// For example, StringForType(dnsmessage.TypeA) returns "A". -func StringForDNSMessageType(t dnsmessage.Type) string { - switch t { - case dnsmessage.TypeAAAA: - return "AAAA" - case dnsmessage.TypeALL: - return "ALL" - case dnsmessage.TypeA: - return "A" - case dnsmessage.TypeCNAME: - return "CNAME" - case dnsmessage.TypeHINFO: - return "HINFO" - case dnsmessage.TypeMINFO: - return "MINFO" - case dnsmessage.TypeMX: - return "MX" - case dnsmessage.TypeNS: - return "NS" - case dnsmessage.TypeOPT: - return "OPT" - case dnsmessage.TypePTR: - return "PTR" - case dnsmessage.TypeSOA: - return "SOA" - case dnsmessage.TypeSRV: - return "SRV" - case dnsmessage.TypeTXT: - return "TXT" - case dnsmessage.TypeWKS: - return "WKS" - } - return "UNKNOWN" -} - -// DNSMessageTypeForString returns the dnsmessage.Type for the given string. -// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. -func DNSMessageTypeForString(s string) (t dnsmessage.Type, err error) { - s = strings.TrimSpace(strings.ToUpper(s)) - switch s { - case "AAAA": - return dnsmessage.TypeAAAA, nil - case "ALL": - return dnsmessage.TypeALL, nil - case "A": - return dnsmessage.TypeA, nil - case "CNAME": - return dnsmessage.TypeCNAME, nil - case "HINFO": - return dnsmessage.TypeHINFO, nil - case "MINFO": - return dnsmessage.TypeMINFO, nil - case "MX": - return dnsmessage.TypeMX, nil - case "NS": - return dnsmessage.TypeNS, nil - case "OPT": - return dnsmessage.TypeOPT, nil - case "PTR": - return dnsmessage.TypePTR, nil - case "SOA": - return dnsmessage.TypeSOA, nil - case "SRV": - return dnsmessage.TypeSRV, nil - case "TXT": - return dnsmessage.TypeTXT, nil - case "WKS": - return dnsmessage.TypeWKS, nil - } - return 0, errors.New("unknown DNS message type: " + s) -} From 7df7e01d0f3b2015283a4a5045924c47fe0dd1dd Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sat, 27 Sep 2025 10:23:58 +0300 Subject: [PATCH 1417/1708] tstest/integration/vms,.github/workflows: bump Ubuntu and NixOS for VM tests + cleanup (#16098) This PR cleans up a bunch of things in ./tstest/integration/vms: - Bumps version of Ubuntu that's actually run from CI 20.04 -> 24.04 - Removes Ubuntu 18.04 test - Bumps NixOS 21.05 -> 25.05 Updates#cleanup Signed-off-by: Irbe Krumina --- .github/workflows/test.yml | 2 +- tstest/integration/vms/README.md | 23 +---- tstest/integration/vms/distros.hujson | 18 ++-- tstest/integration/vms/nixos_test.go | 5 +- .../vms/opensuse_leap_15_1_test.go | 85 ------------------- tstest/integration/vms/regex_flag.go | 29 ------- tstest/integration/vms/regex_flag_test.go | 21 ----- tstest/integration/vms/top_level_test.go | 18 +--- tstest/integration/vms/vms_test.go | 19 ----- 9 files changed, 15 insertions(+), 205 deletions(-) delete mode 100644 tstest/integration/vms/opensuse_leap_15_1_test.go delete mode 100644 tstest/integration/vms/regex_flag.go delete mode 100644 tstest/integration/vms/regex_flag_test.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 17e08ae9d..c3aa4f1bc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -328,7 +328,7 @@ jobs: enableCrossOsArchive: true - name: Run VM tests working-directory: src - run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004 + run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2404 env: HOME: "/var/lib/ghrunner/home" TMPDIR: "/tmp" diff --git a/tstest/integration/vms/README.md b/tstest/integration/vms/README.md index 519c3d000..a68ed0514 100644 --- a/tstest/integration/vms/README.md +++ b/tstest/integration/vms/README.md @@ -1,7 +1,6 @@ # End-to-End VM-based Integration Testing -This test spins up a bunch of common linux distributions and then tries to get -them to connect to a +These tests spin up a Tailscale client in a Linux VM and try to connect it to [`testcontrol`](https://pkg.go.dev/tailscale.com/tstest/integration/testcontrol) server. @@ -55,26 +54,6 @@ If you pass the `-no-s3` flag to `go test`, the S3 step will be skipped in favor of downloading the images directly from upstream sources, which may cause the test to fail in odd places. -### Distribution Picking - -This test runs on a large number of distributions. By default it tries to run -everything, which may or may not be ideal for you. If you only want to test a -subset of distributions, you can use the `--distro-regex` flag to match a subset -of distributions using a [regular expression](https://golang.org/pkg/regexp/) -such as like this: - -```console -$ go test -run-vm-tests -distro-regex centos -``` - -This would run all tests on all versions of CentOS. - -```console -$ go test -run-vm-tests -distro-regex '(debian|ubuntu)' -``` - -This would run all tests on all versions of Debian and Ubuntu. - ### Ram Limiting This test uses a lot of memory. In order to avoid making machines run out of diff --git a/tstest/integration/vms/distros.hujson b/tstest/integration/vms/distros.hujson index 049091ed5..2c90f9a2f 100644 --- a/tstest/integration/vms/distros.hujson +++ b/tstest/integration/vms/distros.hujson @@ -12,24 +12,16 @@ // /var/log/cloud-init-output.log for what you messed up. [ { - "Name": "ubuntu-18-04", - "URL": "https://cloud-images.ubuntu.com/releases/bionic/release-20210817/ubuntu-18.04-server-cloudimg-amd64.img", - "SHA256Sum": "1ee1039f0b91c8367351413b5b5f56026aaf302fd5f66f17f8215132d6e946d2", + "Name": "ubuntu-24-04", + "URL": "https://cloud-images.ubuntu.com/noble/20250523/noble-server-cloudimg-amd64.img", + "SHA256Sum": "0e865619967706765cdc8179fb9929202417ab3a0719d77d8c8942d38aa9611b", "MemoryMegs": 512, "PackageManager": "apt", "InitSystem": "systemd" }, { - "Name": "ubuntu-20-04", - "URL": "https://cloud-images.ubuntu.com/releases/focal/release-20210819/ubuntu-20.04-server-cloudimg-amd64.img", - "SHA256Sum": "99e25e6e344e3a50a081235e825937238a3d51b099969e107ef66f0d3a1f955e", - "MemoryMegs": 512, - "PackageManager": "apt", - "InitSystem": "systemd" - }, - { - "Name": "nixos-21-11", - "URL": "channel:nixos-21.11", + "Name": "nixos-25-05", + "URL": "channel:nixos-25.05", "SHA256Sum": "lolfakesha", "MemoryMegs": 512, "PackageManager": "nix", diff --git a/tstest/integration/vms/nixos_test.go b/tstest/integration/vms/nixos_test.go index c2998ff3c..02b040fed 100644 --- a/tstest/integration/vms/nixos_test.go +++ b/tstest/integration/vms/nixos_test.go @@ -97,7 +97,7 @@ let # Wrap tailscaled with the ip and iptables commands. wrapProgram $out/bin/tailscaled --prefix PATH : ${ - lib.makeBinPath [ iproute iptables ] + lib.makeBinPath [ iproute2 iptables ] } # Install systemd unit. @@ -127,6 +127,9 @@ in { # yolo, this vm can sudo freely. security.sudo.wheelNeedsPassword = false; + # nix considers squid insecure, but this is fine for a test. + nixpkgs.config.permittedInsecurePackages = [ "squid-7.0.1" ]; + # Enable cloud-init so we can set VM hostnames and the like the same as other # distros. This will also take care of SSH keys. It's pretty handy. services.cloud-init = { diff --git a/tstest/integration/vms/opensuse_leap_15_1_test.go b/tstest/integration/vms/opensuse_leap_15_1_test.go deleted file mode 100644 index 7d3ac579e..000000000 --- a/tstest/integration/vms/opensuse_leap_15_1_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !plan9 - -package vms - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - - "github.com/google/uuid" -) - -/* - The images that we use for OpenSUSE Leap 15.1 have an issue that makes the - nocloud backend[1] for cloud-init just not work. As a distro-specific - workaround, we're gonna pretend to be OpenStack. - - TODO(Xe): delete once we no longer need to support OpenSUSE Leap 15.1. - - [1]: https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html -*/ - -type openSUSELeap151MetaData struct { - Zone string `json:"availability_zone"` // nova - Hostname string `json:"hostname"` // opensuse-leap-15-1 - LaunchIndex string `json:"launch_index"` // 0 - Meta openSUSELeap151MetaDataMeta `json:"meta"` // some openstack metadata we don't need to care about - Name string `json:"name"` // opensuse-leap-15-1 - UUID string `json:"uuid"` // e9c664cd-b116-433b-aa61-7ff420163dcd -} - -type openSUSELeap151MetaDataMeta struct { - Role string `json:"role"` // server - DSMode string `json:"dsmode"` // local - Essential string `json:"essential"` // essential -} - -func hackOpenSUSE151UserData(t *testing.T, d Distro, dir string) bool { - if d.Name != "opensuse-leap-15-1" { - return false - } - - t.Log("doing OpenSUSE Leap 15.1 hack") - osDir := filepath.Join(dir, "openstack", "latest") - err := os.MkdirAll(osDir, 0755) - if err != nil { - t.Fatalf("can't make metadata home: %v", err) - } - - metadata, err := json.Marshal(openSUSELeap151MetaData{ - Zone: "nova", - Hostname: d.Name, - LaunchIndex: "0", - Meta: openSUSELeap151MetaDataMeta{ - Role: "server", - DSMode: "local", - Essential: "false", - }, - Name: d.Name, - UUID: uuid.New().String(), - }) - if err != nil { - t.Fatalf("can't encode metadata: %v", err) - } - err = os.WriteFile(filepath.Join(osDir, "meta_data.json"), metadata, 0666) - if err != nil { - t.Fatalf("can't write to meta_data.json: %v", err) - } - - data, err := os.ReadFile(filepath.Join(dir, "user-data")) - if err != nil { - t.Fatalf("can't read user_data: %v", err) - } - - err = os.WriteFile(filepath.Join(osDir, "user_data"), data, 0666) - if err != nil { - t.Fatalf("can't create output user_data: %v", err) - } - - return true -} diff --git a/tstest/integration/vms/regex_flag.go b/tstest/integration/vms/regex_flag.go deleted file mode 100644 index 02e399ecd..000000000 --- a/tstest/integration/vms/regex_flag.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import "regexp" - -type regexValue struct { - r *regexp.Regexp -} - -func (r *regexValue) String() string { - if r.r == nil { - return "" - } - - return r.r.String() -} - -func (r *regexValue) Set(val string) error { - if rex, err := regexp.Compile(val); err != nil { - return err - } else { - r.r = rex - return nil - } -} - -func (r regexValue) Unwrap() *regexp.Regexp { return r.r } diff --git a/tstest/integration/vms/regex_flag_test.go b/tstest/integration/vms/regex_flag_test.go deleted file mode 100644 index 0f4e5f8f7..000000000 --- a/tstest/integration/vms/regex_flag_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import ( - "flag" - "testing" -) - -func TestRegexFlag(t *testing.T) { - var v regexValue - fs := flag.NewFlagSet(t.Name(), flag.PanicOnError) - fs.Var(&v, "regex", "regex to parse") - - const want = `.*` - fs.Parse([]string{"-regex", want}) - if v.Unwrap().String() != want { - t.Fatalf("got wrong regex: %q, wanted: %q", v.Unwrap().String(), want) - } -} diff --git a/tstest/integration/vms/top_level_test.go b/tstest/integration/vms/top_level_test.go index c107fd89c..5db237b6e 100644 --- a/tstest/integration/vms/top_level_test.go +++ b/tstest/integration/vms/top_level_test.go @@ -14,17 +14,13 @@ import ( expect "github.com/tailscale/goexpect" ) -func TestRunUbuntu1804(t *testing.T) { +func TestRunUbuntu2404(t *testing.T) { testOneDistribution(t, 0, Distros[0]) } -func TestRunUbuntu2004(t *testing.T) { - testOneDistribution(t, 1, Distros[1]) -} - -func TestRunNixos2111(t *testing.T) { +func TestRunNixos2505(t *testing.T) { t.Parallel() - testOneDistribution(t, 2, Distros[2]) + testOneDistribution(t, 1, Distros[1]) } // TestMITMProxy is a smoke test for derphttp through a MITM proxy. @@ -39,13 +35,7 @@ func TestRunNixos2111(t *testing.T) { func TestMITMProxy(t *testing.T) { t.Parallel() setupTests(t) - distro := Distros[2] // nixos-21.11 - - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } + distro := Distros[1] // nixos-25.05 ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index f71f2bdbf..0bab3ba5d 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -15,7 +15,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strconv" "strings" "sync" @@ -43,11 +42,6 @@ var ( useVNC = flag.Bool("use-vnc", false, "if set, display guest vms over VNC") verboseLogcatcher = flag.Bool("verbose-logcatcher", true, "if set, print logcatcher to t.Logf") verboseQemu = flag.Bool("verbose-qemu", true, "if set, print qemu console to t.Logf") - distroRex = func() *regexValue { - result := ®exValue{r: regexp.MustCompile(`.*`)} - flag.Var(result, "distro-regex", "The regex that matches what distros should be run") - return result - }() ) func TestDownloadImages(t *testing.T) { @@ -59,9 +53,6 @@ func TestDownloadImages(t *testing.T) { distro := d t.Run(distro.Name, func(t *testing.T) { t.Parallel() - if !distroRex.Unwrap().MatchString(distro.Name) { - t.Skipf("distro name %q doesn't match regex: %s", distro.Name, distroRex) - } if strings.HasPrefix(distro.Name, "nixos") { t.Skip("NixOS is built on the fly, no need to download it") } @@ -175,10 +166,6 @@ func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) { filepath.Join(dir, "user-data"), } - if hackOpenSUSE151UserData(t, d, dir) { - args = append(args, filepath.Join(dir, "openstack")) - } - run(t, tdir, "genisoimage", args...) } @@ -247,12 +234,6 @@ var ramsem struct { func testOneDistribution(t *testing.T, n int, distro Distro) { setupTests(t) - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } - ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) From dd615c8fdd6c225ae9da777a47dbbecf08478472 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 16:19:12 -0700 Subject: [PATCH 1418/1708] util/linuxfw, feature/buildfeatures: add ts_omit_iptables to make IPTables optional Updates #12614 Change-Id: Ic0eba982aa8468a55c63e1b763345f032a55b4e2 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 5 +- .../feature_iptables_disabled.go | 13 ++ .../buildfeatures/feature_iptables_enabled.go | 13 ++ feature/featuretags/featuretags.go | 1 + ipn/ipn_view.go | 2 + ipn/prefs.go | 2 + util/linuxfw/detector.go | 37 +++- util/linuxfw/fake.go | 2 +- util/linuxfw/iptables.go | 165 +++++++++++++++++- util/linuxfw/iptables_disabled.go | 20 +++ util/linuxfw/iptables_for_svcs_test.go | 14 +- util/linuxfw/iptables_runner.go | 157 +---------------- util/linuxfw/iptables_runner_test.go | 12 +- util/linuxfw/linuxfw.go | 11 ++ util/linuxfw/linuxfw_unsupported.go | 40 ----- util/linuxfw/nftables.go | 4 + wgengine/router/router.go | 2 +- 18 files changed, 282 insertions(+), 221 deletions(-) create mode 100644 feature/buildfeatures/feature_iptables_disabled.go create mode 100644 feature/buildfeatures/feature_iptables_enabled.go create mode 100644 util/linuxfw/iptables_disabled.go delete mode 100644 util/linuxfw/linuxfw_unsupported.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index d19ea1f17..a68d67b6d 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -98,7 +98,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ + L tailscale.com/feature/buildfeatures from tailscale.com/util/linuxfw tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index cf4a9b039..3b6643566 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -2,7 +2,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -420,13 +419,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from tailscale.com/clientupdate+ os/signal from tailscale.com/cmd/tailscaled os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from internal/profile+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from github.com/klauspost/compress/zstd+ diff --git a/feature/buildfeatures/feature_iptables_disabled.go b/feature/buildfeatures/feature_iptables_disabled.go new file mode 100644 index 000000000..8cda5be5d --- /dev/null +++ b/feature/buildfeatures/feature_iptables_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = false diff --git a/feature/buildfeatures/feature_iptables_enabled.go b/feature/buildfeatures/feature_iptables_enabled.go new file mode 100644 index 000000000..44d98473f --- /dev/null +++ b/feature/buildfeatures/feature_iptables_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 2edecef58..40a5ac3f5 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -112,6 +112,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, + "iptables": {"IPTables", "Linux iptables support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 170dc409b..1c7639f6f 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -421,6 +421,8 @@ func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } // NetfilterKind specifies what netfilter implementation to use. // +// It can be "iptables", "nftables", or "" to auto-detect. +// // Linux-only. func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } diff --git a/ipn/prefs.go b/ipn/prefs.go index 1efb5d0fe..a2149950d 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -264,6 +264,8 @@ type Prefs struct { // NetfilterKind specifies what netfilter implementation to use. // + // It can be "iptables", "nftables", or "" to auto-detect. + // // Linux-only. NetfilterKind string diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index fffa523af..644126131 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -10,6 +10,8 @@ import ( "os/exec" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/types/logger" "tailscale.com/version/distro" @@ -42,10 +44,12 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { var det linuxFWDetector if mode == "" { // We have no preference, so check if `iptables` is even available. - _, err := det.iptDetect() - if err != nil && errors.Is(err, exec.ErrNotFound) { - logf("iptables not found: %v; falling back to nftables", err) - mode = "nftables" + if buildfeatures.HasIPTables { + _, err := det.iptDetect() + if err != nil && errors.Is(err, exec.ErrNotFound) { + logf("iptables not found: %v; falling back to nftables", err) + mode = "nftables" + } } } @@ -59,11 +63,16 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { return FirewallModeNfTables case "iptables": hostinfo.SetFirewallMode("ipt-forced") - default: + return FirewallModeIPTables + } + if buildfeatures.HasIPTables { logf("default choosing iptables") hostinfo.SetFirewallMode("ipt-default") + return FirewallModeIPTables } - return FirewallModeIPTables + logf("default choosing nftables") + hostinfo.SetFirewallMode("nft-default") + return FirewallModeNfTables } // tableDetector abstracts helpers to detect the firewall mode. @@ -80,19 +89,33 @@ func (l linuxFWDetector) iptDetect() (int, error) { return detectIptables() } +var hookDetectNetfilter feature.Hook[func() (int, error)] + +// ErrUnsupported is the error returned from all functions on non-Linux +// platforms. +var ErrUnsupported = errors.New("linuxfw:unsupported") + // nftDetect returns the number of nftables rules in the current namespace. func (l linuxFWDetector) nftDetect() (int, error) { - return detectNetfilter() + if f, ok := hookDetectNetfilter.GetOk(); ok { + return f() + } + return 0, ErrUnsupported } // pickFirewallModeFromInstalledRules returns the firewall mode to use based on // the environment and the system's capabilities. func pickFirewallModeFromInstalledRules(logf logger.Logf, det tableDetector) FirewallMode { + if !buildfeatures.HasIPTables { + hostinfo.SetFirewallMode("nft-noipt") + return FirewallModeNfTables + } if distro.Get() == distro.Gokrazy { // Reduce startup logging on gokrazy. There's no way to do iptables on // gokrazy anyway. return FirewallModeNfTables } + iptAva, nftAva := true, true iptRuleCount, err := det.iptDetect() if err != nil { diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index 63a728d55..d01849a2e 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -128,7 +128,7 @@ func (n *fakeIPTables) DeleteChain(table, chain string) error { } } -func NewFakeIPTablesRunner() *iptablesRunner { +func NewFakeIPTablesRunner() NetfilterRunner { ipt4 := newFakeIPTables() v6Available := false var ipt6 iptablesInterface diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 234fa526c..73da92086 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,21 +1,34 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && (arm64 || amd64) && !ts_omit_iptables + // TODO(#8502): add support for more architectures -//go:build linux && (arm64 || amd64) package linuxfw import ( + "bytes" + "errors" "fmt" + "os" "os/exec" "strings" "unicode" + "github.com/coreos/go-iptables/iptables" "tailscale.com/types/logger" "tailscale.com/util/multierr" + "tailscale.com/version/distro" ) +func init() { + isNotExistError = func(err error) bool { + var e *iptables.Error + return errors.As(err, &e) && e.IsNotExist() + } +} + // DebugNetfilter prints debug information about iptables rules to the // provided log function. func DebugIptables(logf logger.Logf) error { @@ -71,3 +84,153 @@ func detectIptables() (int, error) { // return the count of non-default rules return count, nil } + +// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. +// If the underlying iptables library fails to initialize, that error is +// returned. The runner probes for IPv6 support once at initialization time and +// if not found, no IPv6 rules will be modified for the lifetime of the runner. +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + return nil, err + } + + supportsV6, supportsV6NAT, supportsV6Filter := false, false, false + v6err := CheckIPv6(logf) + ip6terr := checkIP6TablesExists() + var ipt6 *iptables.IPTables + switch { + case v6err != nil: + logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) + case ip6terr != nil: + logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) + default: + supportsV6 = true + ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + if err != nil { + return nil, err + } + supportsV6Filter = checkSupportsV6Filter(ipt6, logf) + supportsV6NAT = checkSupportsV6NAT(ipt6, logf) + logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) + } + return &iptablesRunner{ + ipt4: ipt4, + ipt6: ipt6, + v6Available: supportsV6, + v6NATAvailable: supportsV6NAT, + v6FilterAvailable: supportsV6Filter}, nil +} + +// checkSupportsV6Filter returns whether the system has a "filter" table in the +// IPv6 tables. Some container environments such as GitHub codespaces have +// limited local IPv6 support, and containers containing ip6tables, but do not +// have kernel support for IPv6 filtering. +// We will not set ip6tables rules in these instances. +func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil { + return false + } + _, filterListErr := ipt.ListChains("filter") + if filterListErr == nil { + return true + } + logf("ip6tables filtering is not supported on this host: %v", filterListErr) + return false +} + +// checkSupportsV6NAT returns whether the system has a "nat" table in the +// IPv6 netfilter stack. +// +// The nat table was added after the initial release of ipv6 +// netfilter, so some older distros ship a kernel that can't NAT IPv6 +// traffic. +// ipt must be initialized for IPv6. +func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { + return false + } + _, natListErr := ipt.ListChains("nat") + if natListErr == nil { + return true + } + + // TODO (irbekrm): the following two checks were added before the check + // above that verifies that nat chains can be listed. It is a + // container-friendly check (see + // https://github.com/tailscale/tailscale/issues/11344), but also should + // be good enough on its own in other environments. If we never observe + // it falsely succeed, let's remove the other two checks. + + bs, err := os.ReadFile("/proc/net/ip6_tables_names") + if err != nil { + return false + } + if bytes.Contains(bs, []byte("nat\n")) { + logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") + return true + } + if exec.Command("modprobe", "ip6table_nat").Run() == nil { + logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") + return true + } + return false +} + +func init() { + hookIPTablesCleanup.Set(ipTablesCleanUp) +} + +// ipTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func ipTablesCleanUp(logf logger.Logf) { + switch distro.Get() { + case distro.Gokrazy, distro.JetKVM: + // These use nftables and don't have the "iptables" command. + // Avoid log spam on cleanup. (#12277) + return + } + err := clearRules(iptables.ProtocolIPv4, logf) + if err != nil { + logf("linuxfw: clear iptables: %v", err) + } + + err = clearRules(iptables.ProtocolIPv6, logf) + if err != nil { + logf("linuxfw: clear ip6tables: %v", err) + } +} + +// clearRules clears all the iptables rules created by Tailscale +// for the given protocol. If error occurs, it's logged but not returned. +func clearRules(proto iptables.Protocol, logf logger.Logf) error { + ipt, err := iptables.NewWithProtocol(proto) + if err != nil { + return err + } + + var errs []error + + if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "filter", "ts-input"); err != nil { + errs = append(errs, err) + } + if err := delChain(ipt, "filter", "ts-forward"); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { + errs = append(errs, err) + } + + return multierr.New(errs...) +} diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go new file mode 100644 index 000000000..8736f8399 --- /dev/null +++ b/util/linuxfw/iptables_disabled.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !(arm64 || amd64)) || ts_omit_iptables + +package linuxfw + +import ( + "errors" + + "tailscale.com/types/logger" +) + +func detectIptables() (int, error) { + return 0, nil +} + +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + return nil, errors.New("iptables disabled in build") +} diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go index c3c1b1f65..0e56d70ba 100644 --- a/util/linuxfw/iptables_for_svcs_test.go +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -10,6 +10,10 @@ import ( "testing" ) +func newFakeIPTablesRunner() *iptablesRunner { + return NewFakeIPTablesRunner().(*iptablesRunner) +} + func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") @@ -45,7 +49,7 @@ func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -103,7 +107,7 @@ func Test_iptablesRunner_DeletePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -127,7 +131,7 @@ func Test_iptablesRunner_DeleteSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // create two rules that will consitute svc1 s1R1 := argsForPortMapRule("svc1", "tailscale0", v4Addr, testPM) @@ -189,7 +193,7 @@ func Test_iptablesRunner_EnsureDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) @@ -248,7 +252,7 @@ func Test_iptablesRunner_DeleteDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 78844065a..76b4cdd6f 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -6,31 +6,22 @@ package linuxfw import ( - "bytes" - "errors" "fmt" "log" "net/netip" - "os" "os/exec" "slices" "strconv" "strings" - "github.com/coreos/go-iptables/iptables" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" - "tailscale.com/util/multierr" - "tailscale.com/version/distro" ) // isNotExistError needs to be overridden in tests that rely on distinguishing // this error, because we don't have a good way how to create a new // iptables.Error of that type. -var isNotExistError = func(err error) bool { - var e *iptables.Error - return errors.As(err, &e) && e.IsNotExist() -} +var isNotExistError = func(err error) bool { return false } type iptablesInterface interface { // Adding this interface for testing purposes so we can mock out @@ -62,98 +53,6 @@ func checkIP6TablesExists() error { return nil } -// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. -// If the underlying iptables library fails to initialize, that error is -// returned. The runner probes for IPv6 support once at initialization time and -// if not found, no IPv6 rules will be modified for the lifetime of the runner. -func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { - ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) - if err != nil { - return nil, err - } - - supportsV6, supportsV6NAT, supportsV6Filter := false, false, false - v6err := CheckIPv6(logf) - ip6terr := checkIP6TablesExists() - var ipt6 *iptables.IPTables - switch { - case v6err != nil: - logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) - case ip6terr != nil: - logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) - default: - supportsV6 = true - ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) - if err != nil { - return nil, err - } - supportsV6Filter = checkSupportsV6Filter(ipt6, logf) - supportsV6NAT = checkSupportsV6NAT(ipt6, logf) - logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) - } - return &iptablesRunner{ - ipt4: ipt4, - ipt6: ipt6, - v6Available: supportsV6, - v6NATAvailable: supportsV6NAT, - v6FilterAvailable: supportsV6Filter}, nil -} - -// checkSupportsV6Filter returns whether the system has a "filter" table in the -// IPv6 tables. Some container environments such as GitHub codespaces have -// limited local IPv6 support, and containers containing ip6tables, but do not -// have kernel support for IPv6 filtering. -// We will not set ip6tables rules in these instances. -func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil { - return false - } - _, filterListErr := ipt.ListChains("filter") - if filterListErr == nil { - return true - } - logf("ip6tables filtering is not supported on this host: %v", filterListErr) - return false -} - -// checkSupportsV6NAT returns whether the system has a "nat" table in the -// IPv6 netfilter stack. -// -// The nat table was added after the initial release of ipv6 -// netfilter, so some older distros ship a kernel that can't NAT IPv6 -// traffic. -// ipt must be initialized for IPv6. -func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { - return false - } - _, natListErr := ipt.ListChains("nat") - if natListErr == nil { - return true - } - - // TODO (irbekrm): the following two checks were added before the check - // above that verifies that nat chains can be listed. It is a - // container-friendly check (see - // https://github.com/tailscale/tailscale/issues/11344), but also should - // be good enough on its own in other environments. If we never observe - // it falsely succeed, let's remove the other two checks. - - bs, err := os.ReadFile("/proc/net/ip6_tables_names") - if err != nil { - return false - } - if bytes.Contains(bs, []byte("nat\n")) { - logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") - return true - } - if exec.Command("modprobe", "ip6table_nat").Run() == nil { - logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") - return true - } - return false -} - // HasIPV6 reports true if the system supports IPv6. func (i *iptablesRunner) HasIPV6() bool { return i.v6Available @@ -685,26 +584,6 @@ func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error return nil } -// IPTablesCleanUp removes all Tailscale added iptables rules. -// Any errors that occur are logged to the provided logf. -func IPTablesCleanUp(logf logger.Logf) { - switch distro.Get() { - case distro.Gokrazy, distro.JetKVM: - // These use nftables and don't have the "iptables" command. - // Avoid log spam on cleanup. (#12277) - return - } - err := clearRules(iptables.ProtocolIPv4, logf) - if err != nil { - logf("linuxfw: clear iptables: %v", err) - } - - err = clearRules(iptables.ProtocolIPv6, logf) - if err != nil { - logf("linuxfw: clear ip6tables: %v", err) - } -} - // delTSHook deletes hook in a chain that jumps to a ts-chain. If the hook does not // exist, it's a no-op since the desired state is already achieved but we log the // error because error code from the iptables module resists unwrapping. @@ -733,40 +612,6 @@ func delChain(ipt iptablesInterface, table, chain string) error { return nil } -// clearRules clears all the iptables rules created by Tailscale -// for the given protocol. If error occurs, it's logged but not returned. -func clearRules(proto iptables.Protocol, logf logger.Logf) error { - ipt, err := iptables.NewWithProtocol(proto) - if err != nil { - return err - } - - var errs []error - - if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "filter", "ts-input"); err != nil { - errs = append(errs, err) - } - if err := delChain(ipt, "filter", "ts-forward"); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { - errs = append(errs, err) - } - - return multierr.New(errs...) -} - // argsFromPostRoutingRule accepts a rule as returned by iptables.List and, if it is a rule from POSTROUTING chain, // returns the args part, else returns the original rule. func argsFromPostRoutingRule(r string) string { diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 56f13c78a..451b8aab4 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -20,7 +20,7 @@ func init() { } func TestAddAndDeleteChains(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() err := iptr.AddChains() if err != nil { t.Fatal(err) @@ -59,7 +59,7 @@ func TestAddAndDeleteChains(t *testing.T) { } func TestAddAndDeleteHooks(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // don't need to test what happens if the chains don't exist, because // this is handled by fake iptables, in realife iptables would return error. if err := iptr.AddChains(); err != nil { @@ -113,7 +113,7 @@ func TestAddAndDeleteHooks(t *testing.T) { } func TestAddAndDeleteBase(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() tunname := "tun0" if err := iptr.AddChains(); err != nil { t.Fatal(err) @@ -176,7 +176,7 @@ func TestAddAndDeleteBase(t *testing.T) { } func TestAddAndDelLoopbackRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // We don't need to test for malformed addresses, AddLoopbackRule // takes in a netip.Addr, which is already valid. fakeAddrV4 := netip.MustParseAddr("192.168.0.2") @@ -247,7 +247,7 @@ func TestAddAndDelLoopbackRule(t *testing.T) { } func TestAddAndDelSNATRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() if err := iptr.AddChains(); err != nil { t.Fatal(err) @@ -292,7 +292,7 @@ func TestAddAndDelSNATRule(t *testing.T) { func TestEnsureSNATForDst_ipt(t *testing.T) { ip1, ip2, ip3 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("100.77.77.77") - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // 1. A new rule gets added mustCreateSNATRule_ipt(t, iptr, ip1, ip2) diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index be520e7a4..4aa0f8782 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -14,6 +14,7 @@ import ( "strings" "github.com/tailscale/netlink" + "tailscale.com/feature" "tailscale.com/types/logger" ) @@ -180,3 +181,13 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { defer netlink.RuleDel(rule) return netlink.RuleAdd(rule) } + +var hookIPTablesCleanup feature.Hook[func(logger.Logf)] + +// IPTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func IPTablesCleanUp(logf logger.Logf) { + if f, ok := hookIPTablesCleanup.GetOk(); ok { + f(logf) + } +} diff --git a/util/linuxfw/linuxfw_unsupported.go b/util/linuxfw/linuxfw_unsupported.go deleted file mode 100644 index 7bfb4fd01..000000000 --- a/util/linuxfw/linuxfw_unsupported.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// NOTE: linux_{arm64, amd64} are the only two currently supported archs due to missing -// support in upstream dependencies. - -// TODO(#8502): add support for more architectures -//go:build linux && !(arm64 || amd64) - -package linuxfw - -import ( - "errors" - - "tailscale.com/types/logger" -) - -// ErrUnsupported is the error returned from all functions on non-Linux -// platforms. -var ErrUnsupported = errors.New("linuxfw:unsupported") - -// DebugNetfilter is not supported on non-Linux platforms. -func DebugNetfilter(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectNetfilter is not supported on non-Linux platforms. -func detectNetfilter() (int, error) { - return 0, ErrUnsupported -} - -// DebugIptables is not supported on non-Linux platforms. -func debugIptables(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectIptables is not supported on non-Linux platforms. -func detectIptables() (int, error) { - return 0, ErrUnsupported -} diff --git a/util/linuxfw/nftables.go b/util/linuxfw/nftables.go index e8b267b5e..94ce51a14 100644 --- a/util/linuxfw/nftables.go +++ b/util/linuxfw/nftables.go @@ -103,6 +103,10 @@ func DebugNetfilter(logf logger.Logf) error { return nil } +func init() { + hookDetectNetfilter.Set(detectNetfilter) +} + // detectNetfilter returns the number of nftables rules present in the system. func detectNetfilter() (int, error) { // Frist try creating a dummy postrouting chain. Emperically, we have diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 25d1c08a2..edd7d14cb 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -94,7 +94,7 @@ type Config struct { SNATSubnetRoutes bool // SNAT traffic to local subnets StatefulFiltering bool // Apply stateful filtering to inbound connections NetfilterMode preftype.NetfilterMode // how much to manage netfilter rules - NetfilterKind string // what kind of netfilter to use (nftables, iptables) + NetfilterKind string // what kind of netfilter to use ("nftables", "iptables", or "" to auto-detect) } func (a *Config) Equal(b *Config) bool { From f19409482d8c58f4b9478597aa09417289a79d71 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 23 Sep 2025 11:41:29 -0700 Subject: [PATCH 1419/1708] logtail: delete AppendTextOrJSONLocked This was accidentally added in #11671 for testing. Nothing uses it. Updates tailscale/corp#21363 Signed-off-by: Joe Tsai --- logtail/logtail.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index b355addd2..6c4bbccc5 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -708,11 +708,6 @@ func appendTruncatedString(dst, src []byte, n int) []byte { return dst } -func (l *Logger) AppendTextOrJSONLocked(dst, src []byte) []byte { - l.clock = tstime.StdClock{} - return l.appendTextOrJSONLocked(dst, src, 0) -} - // appendTextOrJSONLocked appends a raw text message or a raw JSON object // in the Tailscale JSON log format. func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { From 475b520aa2d1cced66f6134712991944068287c9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 27 Sep 2025 13:07:05 -0700 Subject: [PATCH 1420/1708] tsconst, util/linuxfw, wgengine/router: move Linux fw consts to tsconst Now cmd/derper doesn't depend on iptables, nftables, and netlink code :) But this is really just a cleanup step I noticed on the way to making tsnet applications able to not link all the OS router code which they don't use. Updates #17313 Change-Id: Ic7b4e04e3a9639fd198e9dbeb0f7bae22a4a47a9 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 27 +++++------------ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 20 +++---------- cmd/tailscaled/depaware-minbox.txt | 3 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- net/netns/netns_linux.go | 4 +-- tsconst/linuxfw.go | 43 ++++++++++++++++++++++++++++ tsconst/{interface.go => tsconst.go} | 0 tsnet/depaware.txt | 2 +- util/linuxfw/iptables_runner.go | 12 ++++---- util/linuxfw/iptables_runner_test.go | 7 +++-- util/linuxfw/linuxfw.go | 26 ++++++----------- wgengine/router/router_linux.go | 13 +++++---- wgengine/router/router_linux_test.go | 13 +++++---- 15 files changed, 94 insertions(+), 82 deletions(-) create mode 100644 tsconst/linuxfw.go rename tsconst/{interface.go => tsconst.go} (100%) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index a68d67b6d..7d322aa31 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -11,7 +11,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil+ github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ @@ -21,18 +20,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/munnerz/goautoneg from github.com/prometheus/common/expfmt @@ -49,11 +41,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/setec/client/setec from tailscale.com/cmd/derper github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr @@ -98,8 +87,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb+ - L tailscale.com/feature/buildfeatures from tailscale.com/util/linuxfw + tailscale.com/feature from tailscale.com/tsweb tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -131,7 +119,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/syncs from tailscale.com/cmd/derper+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tka from tailscale.com/client/local+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + LW tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/derp/derpserver @@ -164,7 +152,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/eventbus from tailscale.com/net/netmon+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto @@ -214,7 +201,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -363,7 +350,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - L io/ioutil from github.com/mitchellh/go-ps+ + L io/ioutil from github.com/mitchellh/go-ps iter from maps+ log from expvar+ log/internal from log @@ -387,13 +374,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+ os/signal from tailscale.com/cmd/derper W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/prometheus/client_golang/prometheus+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2adbd5f5d..ba644eb03 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -932,7 +932,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 4bd4e6bca..47e5ca48e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -14,7 +14,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode L github.com/fogleman/gg from tailscale.com/client/systray @@ -31,12 +30,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon @@ -44,9 +37,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli 💣 github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli 💣 github.com/mattn/go-isatty from tailscale.com/cmd/tailscale/cli+ - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/mitchellh/go-ps from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ @@ -66,11 +58,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr @@ -183,7 +172,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -259,7 +247,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -446,13 +434,13 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from github.com/atotto/clipboard+ os/signal from tailscale.com/cmd/tailscale/cli+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3b6643566..b0cc9d9c1 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -142,6 +142,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -184,7 +185,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/linuxfw from tailscale.com/net/netns+ + tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 579af5c0d..7fdac984c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -419,7 +419,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 270edd371..bff8df411 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -362,7 +362,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/net/netns/netns_linux.go b/net/netns/netns_linux.go index aaf6dab4a..609f524b5 100644 --- a/net/netns/netns_linux.go +++ b/net/netns/netns_linux.go @@ -15,8 +15,8 @@ import ( "golang.org/x/sys/unix" "tailscale.com/envknob" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" - "tailscale.com/util/linuxfw" ) // socketMarkWorksOnce is the sync.Once & cached value for useSocketMark. @@ -111,7 +111,7 @@ func controlC(network, address string, c syscall.RawConn) error { } func setBypassMark(fd uintptr) error { - if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, linuxfw.TailscaleBypassMarkNum); err != nil { + if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, tsconst.LinuxBypassMarkNum); err != nil { return fmt.Errorf("setting SO_MARK bypass: %w", err) } return nil diff --git a/tsconst/linuxfw.go b/tsconst/linuxfw.go new file mode 100644 index 000000000..ce571e402 --- /dev/null +++ b/tsconst/linuxfw.go @@ -0,0 +1,43 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// Linux firewall constants used by Tailscale. + +// The following bits are added to packet marks for Tailscale use. +// +// We tried to pick bits sufficiently out of the way that it's +// unlikely to collide with existing uses. We have 4 bytes of mark +// bits to play with. We leave the lower byte alone on the assumption +// that sysadmins would use those. Kubernetes uses a few bits in the +// second byte, so we steer clear of that too. +// +// Empirically, most of the documentation on packet marks on the +// internet gives the impression that the marks are 16 bits +// wide. Based on this, we theorize that the upper two bytes are +// relatively unused in the wild, and so we consume bits 16:23 (the +// third byte). +// +// The constants are in the iptables/iproute2 string format for +// matching and setting the bits, so they can be directly embedded in +// commands. +const ( + // The mask for reading/writing the 'firewall mask' bits on a packet. + // See the comment on the const block on why we only use the third byte. + // + // We claim bits 16:23 entirely. For now we only use the lower four + // bits, leaving the higher 4 bits for future use. + LinuxFwmarkMask = "0xff0000" + LinuxFwmarkMaskNum = 0xff0000 + + // Packet is from Tailscale and to a subnet route destination, so + // is allowed to be routed through this machine. + LinuxSubnetRouteMark = "0x40000" + LinuxSubnetRouteMarkNum = 0x40000 + + // Packet was originated by tailscaled itself, and must not be + // routed over the Tailscale network. + LinuxBypassMark = "0x80000" + LinuxBypassMarkNum = 0x80000 +) diff --git a/tsconst/interface.go b/tsconst/tsconst.go similarity index 100% rename from tsconst/interface.go rename to tsconst/tsconst.go diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index c196cc14d..71789b7b6 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -357,7 +357,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 76b4cdd6f..4443a9071 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -246,11 +246,11 @@ func (i *iptablesRunner) addBase4(tunname string) error { // POSTROUTING. So instead, we match on the inbound interface in // filter/FORWARD, and set a packet mark that nat/POSTROUTING can // use to effectively run that same test again. - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } @@ -352,11 +352,11 @@ func (i *iptablesRunner) addBase6(tunname string) error { return fmt.Errorf("adding %v in v6/filter/ts-input: %w", args, err) } - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } @@ -445,7 +445,7 @@ func (i *iptablesRunner) DelHooks(logf logger.Logf) error { // AddSNATRule adds a netfilter rule to SNAT traffic destined for // local subnets. func (i *iptablesRunner) AddSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Append("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("adding %v in nat/ts-postrouting: %w", args, err) @@ -457,7 +457,7 @@ func (i *iptablesRunner) AddSNATRule() error { // DelSNATRule removes the netfilter rule to SNAT traffic destined for // local subnets. An error is returned if the rule does not exist. func (i *iptablesRunner) DelSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Delete("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("deleting %v in nat/ts-postrouting: %w", args, err) diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 451b8aab4..ce905aef3 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -11,6 +11,7 @@ import ( "testing" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" ) var testIsNotExistErr = "exitcode:1" @@ -132,8 +133,8 @@ func TestAddAndDeleteBase(t *testing.T) { tsRulesCommon := []fakeRule{ // table/chain/rule {"filter", "ts-input", []string{"-i", tunname, "-j", "ACCEPT"}}, - {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask}}, - {"filter", "ts-forward", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"}}, + {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask}}, + {"filter", "ts-forward", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "ACCEPT"}}, {"filter", "ts-forward", []string{"-o", tunname, "-j", "ACCEPT"}}, } @@ -254,7 +255,7 @@ func TestAddAndDelSNATRule(t *testing.T) { } rule := fakeRule{ // table/chain/rule - "nat", "ts-postrouting", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"}, + "nat", "ts-postrouting", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "MASQUERADE"}, } // Add SNAT rule diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index 4aa0f8782..ec73aacee 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -15,6 +15,7 @@ import ( "github.com/tailscale/netlink" "tailscale.com/feature" + "tailscale.com/tsconst" "tailscale.com/types/logger" ) @@ -70,23 +71,12 @@ const ( // matching and setting the bits, so they can be directly embedded in // commands. const ( - // The mask for reading/writing the 'firewall mask' bits on a packet. - // See the comment on the const block on why we only use the third byte. - // - // We claim bits 16:23 entirely. For now we only use the lower four - // bits, leaving the higher 4 bits for future use. - TailscaleFwmarkMask = "0xff0000" - TailscaleFwmarkMaskNum = 0xff0000 - - // Packet is from Tailscale and to a subnet route destination, so - // is allowed to be routed through this machine. - TailscaleSubnetRouteMark = "0x40000" - TailscaleSubnetRouteMarkNum = 0x40000 - - // Packet was originated by tailscaled itself, and must not be - // routed over the Tailscale network. - TailscaleBypassMark = "0x80000" - TailscaleBypassMarkNum = 0x80000 + fwmarkMask = tsconst.LinuxFwmarkMask + fwmarkMaskNum = tsconst.LinuxFwmarkMaskNum + subnetRouteMark = tsconst.LinuxSubnetRouteMark + subnetRouteMarkNum = tsconst.LinuxSubnetRouteMarkNum + bypassMark = tsconst.LinuxBypassMark + bypassMarkNum = tsconst.LinuxBypassMarkNum ) // getTailscaleFwmarkMaskNeg returns the negation of TailscaleFwmarkMask in bytes. @@ -170,7 +160,7 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { // Try to actually create & delete one as a test. rule := netlink.NewRule() rule.Priority = 1234 - rule.Mark = TailscaleBypassMarkNum + rule.Mark = bypassMarkNum rule.Table = 52 rule.Family = netlink.FAMILY_V6 // First delete the rule unconditionally, and don't check for diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index dc1425708..75ff64f40 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -26,6 +26,7 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/preftype" @@ -1238,14 +1239,14 @@ var baseIPRules = []netlink.Rule{ // main routing table. { Priority: 10, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: mainRouteTable.Num, }, // ...and then we try the 'default' table, for correctness, // even though it's been empty on every Linux system I've ever seen. { Priority: 30, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: defaultRouteTable.Num, }, // If neither of those matched (no default route on this system?) @@ -1253,7 +1254,7 @@ var baseIPRules = []netlink.Rule{ // to the tailscale routes, because that would create routing loops. { Priority: 50, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Type: unix.RTN_UNREACHABLE, }, // If we get to this point, capture all packets and send them @@ -1283,7 +1284,7 @@ var ubntIPRules = []netlink.Rule{ { Priority: 70, Invert: true, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: tailscaleRouteTable.Num, }, } @@ -1311,7 +1312,7 @@ func (r *linuxRouter) justAddIPRules() error { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() if ru.Mark != 0 { - ru.Mask = linuxfw.TailscaleFwmarkMaskNum + ru.Mask = tsconst.LinuxFwmarkMaskNum } ru.Goto = -1 ru.SuppressIfgroup = -1 @@ -1344,7 +1345,7 @@ func (r *linuxRouter) addIPRulesWithIPCommand() error { } if rule.Mark != 0 { if r.fwmaskWorks() { - args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, linuxfw.TailscaleFwmarkMask)) + args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, tsconst.LinuxFwmarkMask)) } else { args = append(args, "fwmark", fmt.Sprintf("0x%x", rule.Mark)) } diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index 3b1eb7db6..b7f3a8ba1 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" @@ -572,8 +573,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { newRules := []struct{ chain, rule string }{ {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j RETURN", tunname, tsaddr.ChromeOSVMRange().String())}, {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } @@ -588,8 +589,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { func (n *fakeIPTablesRunner) addBase6(tunname string) error { curIPT := n.ipt6 newRules := []struct{ chain, rule string }{ - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } for _, rule := range newRules { @@ -673,7 +674,7 @@ func (n *fakeIPTablesRunner) DelBase() error { } func (n *fakeIPTablesRunner) AddSNATRule() error { - newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := appendRule(n, ipt, "nat/ts-postrouting", newRule); err != nil { return err @@ -683,7 +684,7 @@ func (n *fakeIPTablesRunner) AddSNATRule() error { } func (n *fakeIPTablesRunner) DelSNATRule() error { - delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := deleteRule(n, ipt, "nat/ts-postrouting", delRule); err != nil { return err From 01e645fae1d3e97d1b43a78ad9b6e5cf5d390c74 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 11:03:46 -0700 Subject: [PATCH 1421/1708] util/backoff: rename logtail/backoff package to util/backoff It has nothing to do with logtail and is confusing named like that. Updates #cleanup Updates #17323 Change-Id: Idd34587ba186a2416725f72ffc4c5778b0b9db4a Signed-off-by: Brad Fitzpatrick --- cmd/containerboot/kube.go | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/k8s-operator/egress-pod-readiness.go | 2 +- cmd/stunstamp/stunstamp.go | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/install_windows.go | 2 +- cmd/tailscaled/tailscaled_windows.go | 2 +- cmd/tsidp/depaware.txt | 2 +- control/controlclient/auto.go | 2 +- feature/taildrop/retrieve.go | 2 +- ipn/ipnlocal/serve.go | 2 +- ipn/ipnlocal/web_client.go | 2 +- net/dns/resolved.go | 2 +- prober/prober.go | 2 +- ssh/tailssh/tailssh.go | 2 +- tsnet/depaware.txt | 2 +- tstest/integration/tailscaled_deps_test_windows.go | 2 +- tstest/tstest.go | 2 +- {logtail => util}/backoff/backoff.go | 0 wgengine/magicsock/derp.go | 2 +- wgengine/router/router_windows.go | 2 +- 22 files changed, 21 insertions(+), 21 deletions(-) rename {logtail => util}/backoff/backoff.go (100%) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 4873ae13f..e566fa483 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -23,9 +23,9 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/set" ) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ba644eb03..7140e57b1 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -832,7 +832,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -917,6 +916,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index 05cf1aa1a..f3a812ecb 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -25,8 +25,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tstime" + "tailscale.com/util/backoff" "tailscale.com/util/httpm" ) diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index c3842e2e8..71ed50569 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -34,10 +34,10 @@ import ( "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" "github.com/tcnksm/go-httpstat" - "tailscale.com/logtail/backoff" "tailscale.com/net/stun" "tailscale.com/net/tcpinfo" "tailscale.com/tailcfg" + "tailscale.com/util/backoff" ) var ( diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index b0cc9d9c1..c68834963 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -94,7 +94,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -170,6 +169,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/control/controlclient+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7fdac984c..6d1791052 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -314,7 +314,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ - tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -403,6 +402,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index e98a6461e..6013660f5 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -16,8 +16,8 @@ import ( "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" "tailscale.com/cmd/tailscaled/tailscaledhooks" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" ) func init() { diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 3a2edcac5..14f31968b 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -47,13 +47,13 @@ import ( _ "tailscale.com/ipn/auditlog" _ "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/net/tstun" "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/backoff" "tailscale.com/util/osdiag" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index bff8df411..21ea91b46 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -263,7 +263,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -347,6 +346,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 9a654b679..f5495f854 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -13,7 +13,6 @@ import ( "time" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -22,6 +21,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/structs" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index b048a1b3b..e767bac32 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -14,7 +14,7 @@ import ( "time" "tailscale.com/client/tailscale/apitype" - "tailscale.com/logtail/backoff" + "tailscale.com/util/backoff" "tailscale.com/util/set" ) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index cbf84fb29..dc4142404 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -36,12 +36,12 @@ import ( "go4.org/mem" "golang.org/x/net/http2" "tailscale.com/ipn" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" "tailscale.com/util/mak" diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 7cfb30ca4..a3c9387e4 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -19,11 +19,11 @@ import ( "tailscale.com/client/local" "tailscale.com/client/web" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tsconst" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" ) diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 5d9130f05..d8f63c9d6 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -15,8 +15,8 @@ import ( "github.com/godbus/dbus/v5" "golang.org/x/sys/unix" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/dnsname" ) diff --git a/prober/prober.go b/prober/prober.go index af0e19934..9073a9502 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -317,7 +317,7 @@ func (p *Probe) loop() { p.run() // Wait and then retry if probe fails. We use the inverse of the // configured negative interval as our sleep period. - // TODO(percy):implement exponential backoff, possibly using logtail/backoff. + // TODO(percy):implement exponential backoff, possibly using util/backoff. select { case <-time.After(-1 * p.interval): p.run() diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index b249a1063..7d12ab45f 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -32,7 +32,6 @@ import ( gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" - "tailscale.com/logtail/backoff" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/sessionrecording" @@ -41,6 +40,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 71789b7b6..6c7dc6b55 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -259,7 +259,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -342,6 +341,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 08c8c27ff..a5a0a428f 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -39,7 +39,6 @@ import ( _ "tailscale.com/ipn/store" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" - _ "tailscale.com/logtail/backoff" _ "tailscale.com/net/dns" _ "tailscale.com/net/dnsfallback" _ "tailscale.com/net/netmon" @@ -59,6 +58,7 @@ import ( _ "tailscale.com/types/key" _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" + _ "tailscale.com/util/backoff" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" diff --git a/tstest/tstest.go b/tstest/tstest.go index 2d0d1351e..169450686 100644 --- a/tstest/tstest.go +++ b/tstest/tstest.go @@ -14,8 +14,8 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/cibuild" ) diff --git a/logtail/backoff/backoff.go b/util/backoff/backoff.go similarity index 100% rename from logtail/backoff/backoff.go rename to util/backoff/backoff.go diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 0d419841c..d33745892 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -19,7 +19,6 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dnscache" "tailscale.com/net/netcheck" "tailscale.com/net/tsaddr" @@ -28,6 +27,7 @@ import ( "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" "tailscale.com/util/rands" "tailscale.com/util/testenv" diff --git a/wgengine/router/router_windows.go b/wgengine/router/router_windows.go index 32d05110d..edd258cb3 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/router_windows.go @@ -23,10 +23,10 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/eventbus" ) From a32102f7412bc3fda4ac773c13b208c2743c2b54 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 12:46:45 -0700 Subject: [PATCH 1422/1708] smallzstd: delete unused package As of the earlier 85febda86db1, our new preferred zstd API of choice is zstdframe. Updates #cleanup Updates tailscale/corp#18514 Change-Id: I5a6164d3162bf2513c3673b6d1e34cfae84cb104 Signed-off-by: Brad Fitzpatrick --- smallzstd/testdata | 14 ----- smallzstd/zstd.go | 78 ------------------------- smallzstd/zstd_test.go | 130 ----------------------------------------- 3 files changed, 222 deletions(-) delete mode 100644 smallzstd/testdata delete mode 100644 smallzstd/zstd.go delete mode 100644 smallzstd/zstd_test.go diff --git a/smallzstd/testdata b/smallzstd/testdata deleted file mode 100644 index 76640fdc5..000000000 --- a/smallzstd/testdata +++ /dev/null @@ -1,14 +0,0 @@ -{"logtail":{"client_time":"2020-07-01T14:49:40.196597018-07:00","server_time":"2020-07-01T21:49:40.198371511Z"},"text":"9.8M/25.6M magicsock: starting endpoint update (periodic)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:40.345925455-07:00","server_time":"2020-07-01T21:49:40.347904717Z"},"text":"9.9M/25.6M netcheck: udp=true v6=false mapvarydest=false hair=false v4a=202.188.7.1:41641 derp=2 derpdist=1v4:7ms,2v4:3ms,4v4:18ms\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347155742-07:00","server_time":"2020-07-01T21:49:43.34828658Z"},"text":"9.9M/25.6M control: map response long-poll timed out!\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347539333-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.9M/25.6M control: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347767812-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M control: sendStatus: mapRoutine1: state:authenticated\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347817165-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M blockEngineUpdates(false)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347989028-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M wgcfg: [SViTM] skipping subnet route\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.349997554-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M Received error: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.350072606-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M control: mapRoutine: backoff: 30136 msec\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998364646-07:00","server_time":"2020-07-01T21:49:47.999333754Z"},"text":"9.5M/25.6M [W1NbE] - [UcppE] Send handshake init [127.3.3.40:1, 6.1.1.6:37388*, 10.3.2.6:41641]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.99881914-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: adding connection to derp-1 for [W1NbE]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998904932-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: 2 active derp conns: derp-1=cr0s,wr0s derp-2=cr16h0m0s,wr14h38m0s\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.999045606-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M derphttp.Client.Recv: connecting to derp-1 (nyc)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:48.091104119-07:00","server_time":"2020-07-01T21:49:48.09280535Z"},"text":"9.6M/25.6M magicsock: rx [W1NbE] from 6.1.1.6:37388 (1/3), set as new priority\n"} diff --git a/smallzstd/zstd.go b/smallzstd/zstd.go deleted file mode 100644 index 1d8085422..000000000 --- a/smallzstd/zstd.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package smallzstd produces zstd encoders and decoders optimized for -// low memory usage, at the expense of compression efficiency. -// -// This package is optimized primarily for the memory cost of -// compressing and decompressing data. We reduce this cost in two -// major ways: disable parallelism within the library (i.e. don't use -// multiple CPU cores to decompress), and drop the compression window -// down from the defaults of 4-16MiB, to 8kiB. -// -// Decompressors cost 2x the window size in RAM to run, so by using an -// 8kiB window, we can run ~1000 more decompressors per unit of memory -// than with the defaults. -// -// Depending on context, the benefit is either being able to run more -// decoders (e.g. in our logs processing system), or having a lower -// memory footprint when using compression in network protocols -// (e.g. in tailscaled, which should have a minimal RAM cost). -package smallzstd - -import ( - "io" - - "github.com/klauspost/compress/zstd" -) - -// WindowSize is the window size used for zstd compression. Decoder -// memory usage scales linearly with WindowSize. -const WindowSize = 8 << 10 // 8kiB - -// NewDecoder returns a zstd.Decoder configured for low memory usage, -// at the expense of decompression performance. -func NewDecoder(r io.Reader, options ...zstd.DOption) (*zstd.Decoder, error) { - defaults := []zstd.DOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithDecoderConcurrency(1), - // Default is to allocate more upfront for performance. We - // prefer lower memory use and a bit of GC load. - zstd.WithDecoderLowmem(true), - // You might expect to see zstd.WithDecoderMaxMemory - // here. However, it's not terribly safe to use if you're - // doing stateless decoding, because it sets the maximum - // amount of memory the decompressed data can occupy, rather - // than the window size of the zstd stream. This means a very - // compressible piece of data might violate the max memory - // limit here, even if the window size (and thus total memory - // required to decompress the data) is small. - // - // As a result, we don't set a decoder limit here, and rely on - // the encoder below producing "cheap" streams. Callers are - // welcome to set their own max memory setting, if - // contextually there is a clearly correct value (e.g. it's - // known from the upper layer protocol that the decoded data - // can never be more than 1MiB). - } - - return zstd.NewReader(r, append(defaults, options...)...) -} - -// NewEncoder returns a zstd.Encoder configured for low memory usage, -// both during compression and at decompression time, at the expense -// of performance and compression efficiency. -func NewEncoder(w io.Writer, options ...zstd.EOption) (*zstd.Encoder, error) { - defaults := []zstd.EOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithEncoderConcurrency(1), - // Default is several MiB, which bloats both encoders and - // their corresponding decoders. - zstd.WithWindowSize(WindowSize), - // Encode zero-length inputs in a way that the `zstd` utility - // can read, because interoperability is handy. - zstd.WithZeroFrames(true), - } - - return zstd.NewWriter(w, append(defaults, options...)...) -} diff --git a/smallzstd/zstd_test.go b/smallzstd/zstd_test.go deleted file mode 100644 index d1225bfac..000000000 --- a/smallzstd/zstd_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package smallzstd - -import ( - "os" - "testing" - - "github.com/klauspost/compress/zstd" -) - -func BenchmarkSmallEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkSmallEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkStockEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkStockEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkSmallDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkSmallDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkStockDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func BenchmarkStockDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func benchEncoder(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - b.ResetTimer() - for range b.N { - e.EncodeAll(in, out) - } -} - -func benchEncoderWithConstruction(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - b.ResetTimer() - for range b.N { - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - e.EncodeAll(in, out) - } -} - -func benchDecoder(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - b.ResetTimer() - for range b.N { - d.DecodeAll(in, out) - } -} - -func benchDecoderWithConstruction(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - b.ResetTimer() - for range b.N { - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - d.DecodeAll(in, out) - } -} - -func testdata(b *testing.B) []byte { - b.Helper() - in, err := os.ReadFile("testdata") - if err != nil { - b.Fatalf("reading testdata: %v", err) - } - return in -} - -func compressedTestdata(b *testing.B) []byte { - b.Helper() - uncomp := testdata(b) - e, err := NewEncoder(nil) - if err != nil { - b.Fatalf("creating encoder: %v", err) - } - return e.EncodeAll(uncomp, nil) -} From e466488a2a68176569a98f59e0ace8c9896b6b92 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 29 Sep 2025 12:38:15 +0100 Subject: [PATCH 1423/1708] cmd/k8s-operator: add replica support to nameserver (#17246) This commit modifies the `DNSConfig` custom resource to allow specifying a replica count when deploying a nameserver. This allows deploying nameservers in a HA configuration. Updates https://github.com/tailscale/corp/issues/32589 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 5 +++++ cmd/k8s-operator/deploy/manifests/operator.yaml | 5 +++++ cmd/k8s-operator/nameserver.go | 12 ++++++++++-- cmd/k8s-operator/nameserver_test.go | 3 +++ k8s-operator/api.md | 1 + k8s-operator/apis/v1alpha1/types_tsdnsconfig.go | 4 ++++ k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go | 5 +++++ 7 files changed, 33 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index b047e11a7..43ebaecec 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -100,6 +100,11 @@ spec: tag: description: Tag defaults to unstable. type: string + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + type: integer + format: int32 + minimum: 0 service: description: Service configuration. type: object diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 8b3c206c8..9c19554aa 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -431,6 +431,11 @@ spec: description: Tag defaults to unstable. type: string type: object + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + format: int32 + minimum: 0 + type: integer service: description: Service configuration. properties: diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 983a28c91..3618642e1 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -30,6 +30,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstime" + "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/set" ) @@ -130,7 +131,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) } } - if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { + if err = a.maybeProvision(ctx, &dnsCfg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return reconcile.Result{}, nil @@ -167,7 +168,7 @@ func nameserverResourceLabels(name, namespace string) map[string]string { return labels } -func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { +func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig) error { labels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace) dCfg := &deployConfig{ ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))}, @@ -175,6 +176,11 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa labels: labels, imageRepo: defaultNameserverImageRepo, imageTag: defaultNameserverImageTag, + replicas: 1, + } + + if tsDNSCfg.Spec.Nameserver.Replicas != nil { + dCfg.replicas = *tsDNSCfg.Spec.Nameserver.Replicas } if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" { dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo @@ -211,6 +217,7 @@ type deployable struct { } type deployConfig struct { + replicas int32 imageRepo string imageTag string labels map[string]string @@ -236,6 +243,7 @@ var ( if err := yaml.Unmarshal(deployYaml, &d); err != nil { return fmt.Errorf("error unmarshalling Deployment yaml: %w", err) } + d.Spec.Replicas = ptr.To(cfg.replicas) d.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 55a998ac3..88e48b753 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -22,6 +22,7 @@ import ( operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" + "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -33,6 +34,7 @@ func TestNameserverReconciler(t *testing.T) { }, Spec: tsapi.DNSConfigSpec{ Nameserver: &tsapi.Nameserver{ + Replicas: ptr.To[int32](3), Image: &tsapi.NameserverImage{ Repo: "test", Tag: "v0.0.1", @@ -74,6 +76,7 @@ func TestNameserverReconciler(t *testing.T) { } wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" + wantsDeploy.Spec.Replicas = ptr.To[int32](3) wantsDeploy.Namespace = tsNamespace wantsDeploy.ObjectMeta.Labels = nameserverLabels expectEqual(t, fc, wantsDeploy) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 180231bfa..b1c56c068 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -443,6 +443,7 @@ _Appears in:_ | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | | `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | +| `replicas` _integer_ | Replicas specifies how many Pods to create. Defaults to 1. | | Minimum: 0
                    | #### NameserverImage diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0b0f1eb5c..4d8d569f6 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -84,6 +84,10 @@ type Nameserver struct { // Service configuration. // +optional Service *NameserverService `json:"service,omitempty"` + // Replicas specifies how many Pods to create. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` } type NameserverImage struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index d7a90ad0f..3fd64c28e 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -422,6 +422,11 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverService) **out = **in } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. From 11b770fbc90c8b46d4b575ce5d087a3ee8d28fa9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 10:57:22 -0700 Subject: [PATCH 1424/1708] feature/logtail: pull logtail + netlog out to modular features Removes 434 KB from the minimal Linux binary, or ~3%. Primarily this comes from not linking in the zstd encoding code. Fixes #17323 Change-Id: I0a90de307dfa1ad7422db7aa8b1b46c782bfaaf7 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 5 +- cmd/tailscaled/tailscaled.go | 30 +++++---- cmd/tailscaled/tailscaled_windows.go | 10 ++- .../buildfeatures/feature_logtail_disabled.go | 13 ++++ .../buildfeatures/feature_logtail_enabled.go | 13 ++++ .../buildfeatures/feature_netlog_disabled.go | 13 ++++ .../buildfeatures/feature_netlog_enabled.go | 13 ++++ feature/featuretags/featuretags.go | 13 +++- ipn/ipnlocal/local.go | 4 +- ipn/localapi/localapi.go | 10 +++ log/sockstatlog/logger.go | 3 +- logpolicy/logpolicy.go | 4 +- logtail/buffer.go | 2 + logtail/config.go | 65 +++++++++++++++++++ logtail/logtail.go | 54 +-------------- logtail/logtail_omit.go | 44 +++++++++++++ wgengine/netlog/{logger.go => netlog.go} | 2 + wgengine/netlog/netlog_omit.go | 13 ++++ wgengine/userspace.go | 6 +- 19 files changed, 240 insertions(+), 77 deletions(-) create mode 100644 feature/buildfeatures/feature_logtail_disabled.go create mode 100644 feature/buildfeatures/feature_logtail_enabled.go create mode 100644 feature/buildfeatures/feature_netlog_disabled.go create mode 100644 feature/buildfeatures/feature_netlog_enabled.go create mode 100644 logtail/config.go create mode 100644 logtail/logtail_omit.go rename wgengine/netlog/{logger.go => netlog.go} (99%) create mode 100644 wgengine/netlog/netlog_omit.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index c68834963..ad2bedf66 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -158,7 +158,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -205,11 +205,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ - tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth - tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ + tailscale.com/util/zstdframe from tailscale.com/control/controlclient tailscale.com/version from tailscale.com/clientupdate+ tailscale.com/version/distro from tailscale.com/clientupdate+ tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index d01af199c..2b0eec482 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -402,7 +402,7 @@ func ipnServerOpts() (o serverOptions) { return o } -var logPol *logpolicy.Policy +var logPol *logpolicy.Policy // or nil if not used var debugMux *http.ServeMux func run() (err error) { @@ -432,15 +432,19 @@ func run() (err error) { sys.Set(netMon) } - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) - pol.SetVerbosityLevel(args.verbose) - logPol = pol - defer func() { - // Finish uploading logs after closing everything else. - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - pol.Shutdown(ctx) - }() + var publicLogID logid.PublicID + if buildfeatures.HasLogTail { + pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) + pol.SetVerbosityLevel(args.verbose) + publicLogID = pol.PublicID + logPol = pol + defer func() { + // Finish uploading logs after closing everything else. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + pol.Shutdown(ctx) + }() + } if err := envknob.ApplyDiskConfigError(); err != nil { log.Printf("Error reading environment config: %v", err) @@ -449,7 +453,7 @@ func run() (err error) { if isWinSvc { // Run the IPN server from the Windows service manager. log.Printf("Running service...") - if err := runWindowsService(pol); err != nil { + if err := runWindowsService(logPol); err != nil { log.Printf("runservice: %v", err) } log.Printf("Service ended.") @@ -493,7 +497,7 @@ func run() (err error) { hostinfo.SetApp(app) } - return startIPNServer(context.Background(), logf, pol.PublicID, sys) + return startIPNServer(context.Background(), logf, publicLogID, sys) } var ( @@ -503,6 +507,7 @@ var ( var sigPipe os.Signal // set by sigpipe.go +// logID may be the zero value if logging is not in use. func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) error { ln, err := safesocket.Listen(args.socketpath) if err != nil { @@ -600,6 +605,7 @@ var ( hookNewNetstack feature.Hook[func(_ logger.Logf, _ *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error)] ) +// logID may be the zero value if logging is not in use. func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) (_ *ipnlocal.LocalBackend, retErr error) { if logPol != nil { logPol.Logtail.SetNetMon(sys.NetMon.Get()) diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 14f31968b..3019bbaf9 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -149,6 +149,8 @@ var syslogf logger.Logf = logger.Discard // // At this point we're still the parent process that // Windows started. +// +// pol may be nil. func runWindowsService(pol *logpolicy.Policy) error { go func() { logger.Logf(log.Printf).JSON(1, "SupportInfo", osdiag.SupportInfo(osdiag.LogSupportInfoReasonStartup)) @@ -169,7 +171,7 @@ func runWindowsService(pol *logpolicy.Policy) error { } type ipnService struct { - Policy *logpolicy.Policy + Policy *logpolicy.Policy // or nil if logging not in use } // Called by Windows to execute the windows service. @@ -186,7 +188,11 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch doneCh := make(chan struct{}) go func() { defer close(doneCh) - args := []string{"/subproc", service.Policy.PublicID.String()} + publicID := "none" + if service.Policy != nil { + publicID = service.Policy.PublicID.String() + } + args := []string{"/subproc", publicID} // Make a logger without a date prefix, as filelogger // and logtail both already add their own. All we really want // from the log package is the automatic newline. diff --git a/feature/buildfeatures/feature_logtail_disabled.go b/feature/buildfeatures/feature_logtail_disabled.go new file mode 100644 index 000000000..140092a2e --- /dev/null +++ b/feature/buildfeatures/feature_logtail_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = false diff --git a/feature/buildfeatures/feature_logtail_enabled.go b/feature/buildfeatures/feature_logtail_enabled.go new file mode 100644 index 000000000..6e777216b --- /dev/null +++ b/feature/buildfeatures/feature_logtail_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = true diff --git a/feature/buildfeatures/feature_netlog_disabled.go b/feature/buildfeatures/feature_netlog_disabled.go new file mode 100644 index 000000000..60367a126 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = false diff --git a/feature/buildfeatures/feature_netlog_enabled.go b/feature/buildfeatures/feature_netlog_enabled.go new file mode 100644 index 000000000..f9d2abad3 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 40a5ac3f5..cd0db6e17 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -115,7 +115,11 @@ var Features = map[FeatureTag]FeatureMeta{ "iptables": {"IPTables", "Linux iptables support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, - "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, + "logtail": { + Sym: "LogTail", + Desc: "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)", + }, + "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", Desc: "Outbound localhost HTTP/SOCK5 proxy support", @@ -123,7 +127,12 @@ var Features = map[FeatureTag]FeatureMeta{ }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, + "netlog": { + Sym: "NetLog", + Desc: "Network flow logging support", + Deps: []FeatureTag{"logtail"}, + }, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, "networkmanager": { Sym: "NetworkManager", Desc: "Linux NetworkManager integration", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index dd0a2f9f1..e07f7041c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -202,7 +202,7 @@ type LocalBackend struct { store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys pushDeviceToken syncs.AtomicValue[string] - backendLogID logid.PublicID + backendLogID logid.PublicID // or zero value if logging not in use unregisterSysPolicyWatch func() varRoot string // or empty if SetVarRoot never called logFlushFunc func() // or nil if SetLogFlusher wasn't called @@ -456,6 +456,8 @@ type clientGen func(controlclient.Options) (controlclient.Client, error) // but is not actually running. // // If dialer is nil, a new one is made. +// +// The logID may be the zero value if logging is not in use. func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (_ *LocalBackend, err error) { e := sys.Engine.Get() store := sys.StateStore.Get() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e628e677b..e0c06b7dc 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -28,6 +28,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -575,6 +576,15 @@ func (h *Handler) serveGoroutines(w http.ResponseWriter, r *http.Request) { func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) { ctx := r.Context() + if !buildfeatures.HasLogTail { + // TODO(bradfitz): separate out logtail tap functionality from upload + // functionality to make this possible? But seems unlikely people would + // want just this. They could "tail -f" or "journalctl -f" their logs + // themselves. + http.Error(w, "logtap not supported in this build", http.StatusNotImplemented) + return + } + // Require write access (~root) as the logs could contain something // sensitive. if !h.PermitWrite { diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 3cc27c22d..4f8909725 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -17,6 +17,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -97,7 +98,7 @@ func SockstatLogID(logID logid.PublicID) logid.PrivateID { // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker) (*Logger, error) { - if !sockstats.IsAvailable { + if !sockstats.IsAvailable || !buildfeatures.HasLogTail { return nil, nil } if netMon == nil { diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 4c90378d0..c802d481f 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -31,6 +31,7 @@ import ( "golang.org/x/term" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/log/filelogger" @@ -106,6 +107,7 @@ type Policy struct { // Logtail is the logger. Logtail *logtail.Logger // PublicID is the logger's instance identifier. + // It may be the zero value if logging is not in use. PublicID logid.PublicID // Logf is where to write informational messages about this Logger. Logf logger.Logf @@ -682,7 +684,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { // New returns a new log policy (a logger and its instance ID). func (opts Options) New() *Policy { - disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" + disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" || !buildfeatures.HasLogTail _, policy := opts.init(disableLogging) return policy } diff --git a/logtail/buffer.go b/logtail/buffer.go index c9f2e1ad0..d14d8fbf6 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + package logtail import ( diff --git a/logtail/config.go b/logtail/config.go new file mode 100644 index 000000000..a6c068c0c --- /dev/null +++ b/logtail/config.go @@ -0,0 +1,65 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package logtail + +import ( + "io" + "net/http" + "time" + + "tailscale.com/tstime" + "tailscale.com/types/logid" +) + +// DefaultHost is the default host name to upload logs to when +// Config.BaseURL isn't provided. +const DefaultHost = "log.tailscale.com" + +const defaultFlushDelay = 2 * time.Second + +const ( + // CollectionNode is the name of a logtail Config.Collection + // for tailscaled (or equivalent: IPNExtension, Android app). + CollectionNode = "tailnode.log.tailscale.io" +) + +type Config struct { + Collection string // collection name, a domain name + PrivateID logid.PrivateID // private ID for the primary log stream + CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream + BaseURL string // if empty defaults to "https://log.tailscale.com" + HTTPC *http.Client // if empty defaults to http.DefaultClient + SkipClientTime bool // if true, client_time is not written to logs + LowMemory bool // if true, logtail minimizes memory use + Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now + Stderr io.Writer // if set, logs are sent here instead of os.Stderr + StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only + Buffer Buffer // temp storage, if nil a MemoryBuffer + CompressLogs bool // whether to compress the log uploads + MaxUploadSize int // maximum upload size; 0 means using the default + + // MetricsDelta, if non-nil, is a func that returns an encoding + // delta in clientmetrics to upload alongside existing logs. + // It can return either an empty string (for nothing) or a string + // that's safe to embed in a JSON string literal without further escaping. + MetricsDelta func() string + + // FlushDelayFn, if non-nil is a func that returns how long to wait to + // accumulate logs before uploading them. 0 or negative means to upload + // immediately. + // + // If nil, a default value is used. (currently 2 seconds) + FlushDelayFn func() time.Duration + + // IncludeProcID, if true, results in an ephemeral process identifier being + // included in logs. The ID is random and not guaranteed to be globally + // unique, but it can be used to distinguish between different instances + // running with same PrivateID. + IncludeProcID bool + + // IncludeProcSequence, if true, results in an ephemeral sequence number + // being included in the logs. The sequence number is incremented for each + // log message sent, but is not persisted across process restarts. + IncludeProcSequence bool +} diff --git a/logtail/logtail.go b/logtail/logtail.go index 6c4bbccc5..948c5a460 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + // Package logtail sends logs to log.tailscale.com. package logtail @@ -51,58 +53,6 @@ const lowMemRatio = 4 // but not too large to be a notable waste of memory if retained forever. const bufferSize = 4 << 10 -// DefaultHost is the default host name to upload logs to when -// Config.BaseURL isn't provided. -const DefaultHost = "log.tailscale.com" - -const defaultFlushDelay = 2 * time.Second - -const ( - // CollectionNode is the name of a logtail Config.Collection - // for tailscaled (or equivalent: IPNExtension, Android app). - CollectionNode = "tailnode.log.tailscale.io" -) - -type Config struct { - Collection string // collection name, a domain name - PrivateID logid.PrivateID // private ID for the primary log stream - CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream - BaseURL string // if empty defaults to "https://log.tailscale.com" - HTTPC *http.Client // if empty defaults to http.DefaultClient - SkipClientTime bool // if true, client_time is not written to logs - LowMemory bool // if true, logtail minimizes memory use - Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now - Stderr io.Writer // if set, logs are sent here instead of os.Stderr - StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only - Buffer Buffer // temp storage, if nil a MemoryBuffer - CompressLogs bool // whether to compress the log uploads - MaxUploadSize int // maximum upload size; 0 means using the default - - // MetricsDelta, if non-nil, is a func that returns an encoding - // delta in clientmetrics to upload alongside existing logs. - // It can return either an empty string (for nothing) or a string - // that's safe to embed in a JSON string literal without further escaping. - MetricsDelta func() string - - // FlushDelayFn, if non-nil is a func that returns how long to wait to - // accumulate logs before uploading them. 0 or negative means to upload - // immediately. - // - // If nil, a default value is used. (currently 2 seconds) - FlushDelayFn func() time.Duration - - // IncludeProcID, if true, results in an ephemeral process identifier being - // included in logs. The ID is random and not guaranteed to be globally - // unique, but it can be used to distinguish between different instances - // running with same PrivateID. - IncludeProcID bool - - // IncludeProcSequence, if true, results in an ephemeral sequence number - // being included in the logs. The sequence number is incremented for each - // log message sent, but is not persisted across process restarts. - IncludeProcSequence bool -} - func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.BaseURL == "" { cfg.BaseURL = "https://" + DefaultHost diff --git a/logtail/logtail_omit.go b/logtail/logtail_omit.go new file mode 100644 index 000000000..814fd3be9 --- /dev/null +++ b/logtail/logtail_omit.go @@ -0,0 +1,44 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_logtail + +package logtail + +import ( + "context" + + tslogger "tailscale.com/types/logger" + "tailscale.com/types/logid" +) + +// Noop implementations of everything when ts_omit_logtail is set. + +type Logger struct{} + +type Buffer any + +func Disable() {} + +func NewLogger(cfg Config, logf tslogger.Logf) *Logger { + return &Logger{} +} + +func (*Logger) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (*Logger) Logf(format string, args ...any) {} +func (*Logger) Shutdown(ctx context.Context) error { return nil } +func (*Logger) SetVerbosityLevel(level int) {} + +func (l *Logger) SetSockstatsLabel(label any) {} + +func (l *Logger) PrivateID() logid.PrivateID { return logid.PrivateID{} } +func (l *Logger) StartFlush() {} + +func RegisterLogTap(dst chan<- string) (unregister func()) { + return func() {} +} + +func (*Logger) SetNetMon(any) {} diff --git a/wgengine/netlog/logger.go b/wgengine/netlog/netlog.go similarity index 99% rename from wgengine/netlog/logger.go rename to wgengine/netlog/netlog.go index 3a696b246..8fd225c90 100644 --- a/wgengine/netlog/logger.go +++ b/wgengine/netlog/netlog.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netlog && !ts_omit_logtail + // Package netlog provides a logger that monitors a TUN device and // periodically records any traffic into a log stream. package netlog diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go new file mode 100644 index 000000000..43209df91 --- /dev/null +++ b/wgengine/netlog/netlog_omit.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netlog || ts_omit_logtail + +package netlog + +type Logger struct{} + +func (*Logger) Startup(...any) error { return nil } +func (*Logger) Running() bool { return false } +func (*Logger) Shutdown(any) error { return nil } +func (*Logger) ReconfigRoutes(any) {} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 7fb580514..158a6d06f 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -962,7 +962,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, netLogIDsWasValid := !oldLogIDs.NodeID.IsZero() && !oldLogIDs.DomainID.IsZero() netLogIDsChanged := netLogIDsNowValid && netLogIDsWasValid && newLogIDs != oldLogIDs netLogRunning := netLogIDsNowValid && !routerCfg.Equal(&router.Config{}) - if envknob.NoLogsNoSupport() { + if !buildfeatures.HasNetLog || envknob.NoLogsNoSupport() { netLogRunning = false } @@ -1017,7 +1017,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Shutdown the network logger because the IDs changed. // Let it be started back up by subsequent logic. - if netLogIDsChanged && e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogIDsChanged && e.networkLogger.Running() { e.logf("wgengine: Reconfig: shutting down network logger") ctx, cancel := context.WithTimeout(context.Background(), networkLoggerUploadTimeout) defer cancel() @@ -1028,7 +1028,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Startup the network logger. // Do this before configuring the router so that we capture initial packets. - if netLogRunning && !e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogRunning && !e.networkLogger.Running() { nid := cfg.NetworkLogging.NodeID tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled From 7bcab4ab2841883251edfbc4523704ef176ca3a6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 19:03:26 -0700 Subject: [PATCH 1425/1708] feature/featuretags: make CLI connection error diagnostics modular Updates #12614 Change-Id: I09b8944166ee00910b402bcd5725cd7969e2c82c Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 2 +- cmd/tailscale/cli/cli.go | 10 ++++++++++ cmd/tailscale/cli/diag.go | 10 +++++++--- cmd/tailscale/cli/diag_other.go | 15 --------------- cmd/tailscaled/depaware-minbox.txt | 1 - .../buildfeatures/feature_cliconndiag_disabled.go | 13 +++++++++++++ .../buildfeatures/feature_cliconndiag_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 1 + safesocket/safesocket.go | 8 ++++++-- safesocket/safesocket_ps.go | 6 +++--- 10 files changed, 54 insertions(+), 25 deletions(-) delete mode 100644 cmd/tailscale/cli/diag_other.go create mode 100644 feature/buildfeatures/feature_cliconndiag_disabled.go create mode 100644 feature/buildfeatures/feature_cliconndiag_enabled.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7d322aa31..4a7a4b34d 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -87,7 +87,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 5206fdd58..389dc916a 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -26,6 +26,7 @@ import ( "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/paths" "tailscale.com/util/slicesx" "tailscale.com/version/distro" @@ -555,3 +556,12 @@ func lastSeenFmt(t time.Time) string { return fmt.Sprintf(", last seen %dd ago", int(d.Hours()/24)) } } + +var hookFixTailscaledConnectError feature.Hook[func(error) error] // for cliconndiag + +func fixTailscaledConnectError(origErr error) error { + if f, ok := hookFixTailscaledConnectError.GetOk(); ok { + return f(origErr) + } + return origErr +} diff --git a/cmd/tailscale/cli/diag.go b/cmd/tailscale/cli/diag.go index ebf26985f..3b2aa504b 100644 --- a/cmd/tailscale/cli/diag.go +++ b/cmd/tailscale/cli/diag.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || darwin +//go:build (linux || windows || darwin) && !ts_omit_cliconndiag package cli @@ -16,11 +16,15 @@ import ( "tailscale.com/version/distro" ) -// fixTailscaledConnectError is called when the local tailscaled has +func init() { + hookFixTailscaledConnectError.Set(fixTailscaledConnectErrorImpl) +} + +// fixTailscaledConnectErrorImpl is called when the local tailscaled has // been determined unreachable due to the provided origErr value. It // returns either the same error or a better one to help the user // understand why tailscaled isn't running for their platform. -func fixTailscaledConnectError(origErr error) error { +func fixTailscaledConnectErrorImpl(origErr error) error { procs, err := ps.Processes() if err != nil { return fmt.Errorf("failed to connect to local Tailscaled process and failed to enumerate processes while looking for it") diff --git a/cmd/tailscale/cli/diag_other.go b/cmd/tailscale/cli/diag_other.go deleted file mode 100644 index ece10cc79..000000000 --- a/cmd/tailscale/cli/diag_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !windows && !darwin - -package cli - -import "fmt" - -// The github.com/mitchellh/go-ps package doesn't work on all platforms, -// so just don't diagnose connect failures. - -func fixTailscaledConnectError(origErr error) error { - return fmt.Errorf("failed to connect to local tailscaled process (is it running?); got: %w", origErr) -} diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index ad2bedf66..5c2cbefc2 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -35,7 +35,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/netlink/nltest from github.com/google/nftables github.com/mdlayher/sdnotify from tailscale.com/util/systemd 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/mitchellh/go-ps from tailscale.com/safesocket 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ diff --git a/feature/buildfeatures/feature_cliconndiag_disabled.go b/feature/buildfeatures/feature_cliconndiag_disabled.go new file mode 100644 index 000000000..06d8c7935 --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = false diff --git a/feature/buildfeatures/feature_cliconndiag_enabled.go b/feature/buildfeatures/feature_cliconndiag_enabled.go new file mode 100644 index 000000000..d6125ef08 --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index cd0db6e17..3e4a6043a 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -96,6 +96,7 @@ var Features = map[FeatureTag]FeatureMeta{ "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, + "cliconndiag": {"CLIConnDiag", "CLI connection error diagnostics", nil}, "completion": {"Completion", "CLI shell completion", nil}, "dbus": {"DBus", "Linux DBus support", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index 721b694dc..ea79edab0 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -11,6 +11,8 @@ import ( "net" "runtime" "time" + + "tailscale.com/feature" ) type closeable interface { @@ -31,7 +33,8 @@ func ConnCloseWrite(c net.Conn) error { } var processStartTime = time.Now() -var tailscaledProcExists = func() bool { return false } // set by safesocket_ps.go + +var tailscaledProcExists feature.Hook[func() bool] // tailscaledStillStarting reports whether tailscaled is probably // still starting up. That is, it reports whether the caller should @@ -50,7 +53,8 @@ func tailscaledStillStarting() bool { if d > 5*time.Second { return false } - return tailscaledProcExists() + f, ok := tailscaledProcExists.GetOk() + return ok && f() } // ConnectContext connects to tailscaled using a unix socket or named pipe. diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index 48a8dd483..d3f409df5 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || windows || (darwin && !ios) || freebsd +//go:build ((linux && !android) || windows || (darwin && !ios) || freebsd) && !ts_omit_cliconndiag package safesocket @@ -12,7 +12,7 @@ import ( ) func init() { - tailscaledProcExists = func() bool { + tailscaledProcExists.Set(func() bool { procs, err := ps.Processes() if err != nil { return false @@ -30,5 +30,5 @@ func init() { } } return false - } + }) } From 976389c0f73de5048191cca329bfef4886fc5f21 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 27 Sep 2025 19:28:35 -0700 Subject: [PATCH 1426/1708] feature/sdnotify: move util/systemd to a modular feature Updates #12614 Change-Id: I08e714c83b455df7f538cc99cafe940db936b480 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 2 -- cmd/stund/depaware.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 2 -- cmd/tailscaled/depaware.txt | 4 +-- cmd/tsidp/depaware.txt | 2 -- control/controlclient/direct.go | 6 ++-- .../feature_sdnotify_disabled.go | 13 ++++++++ .../buildfeatures/feature_sdnotify_enabled.go | 13 ++++++++ feature/condregister/maybe_sdnotify.go | 8 +++++ feature/featuretags/featuretags.go | 4 +++ feature/sdnotify.go | 32 +++++++++++++++++++ .../doc.go => feature/sdnotify/sdnotify.go | 4 +-- .../sdnotify/sdnotify_linux.go | 16 +++++++--- ipn/ipnlocal/local.go | 7 ++-- ipn/ipnserver/server.go | 6 ++-- tsnet/depaware.txt | 2 -- util/systemd/systemd_nonlinux.go | 9 ------ 18 files changed, 98 insertions(+), 34 deletions(-) create mode 100644 feature/buildfeatures/feature_sdnotify_disabled.go create mode 100644 feature/buildfeatures/feature_sdnotify_enabled.go create mode 100644 feature/condregister/maybe_sdnotify.go create mode 100644 feature/sdnotify.go rename util/systemd/doc.go => feature/sdnotify/sdnotify.go (81%) rename util/systemd/systemd_linux.go => feature/sdnotify/sdnotify_linux.go (84%) delete mode 100644 util/systemd/systemd_nonlinux.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 4a7a4b34d..258ff4686 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -88,6 +88,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/feature tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7140e57b1..7a66f25e9 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -164,7 +164,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go @@ -957,7 +956,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 97cf14cf0..20f58ef25 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -51,6 +51,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature/buildfeatures from tailscale.com/feature tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 5c2cbefc2..0498971b3 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -33,7 +33,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/mdlayher/netlink from github.com/google/nftables+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ github.com/mdlayher/netlink/nltest from github.com/google/nftables - github.com/mdlayher/sdnotify from tailscale.com/util/systemd 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile @@ -202,7 +201,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 6d1791052..68a29b46e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -147,7 +147,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L github.com/mdlayher/sdnotify from tailscale.com/feature/sdnotify L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio @@ -282,6 +282,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister + L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister @@ -446,7 +447,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 21ea91b46..b68336d9d 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -128,7 +128,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -387,7 +386,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ffac7e947..6d18e306f 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -29,6 +29,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -57,7 +58,6 @@ import ( "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" ) @@ -543,7 +543,9 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new } else { if expired { c.logf("Old key expired -> regen=true") - systemd.Status("key expired; run 'tailscale up' to authenticate") + if f, ok := feature.HookSystemdStatus.GetOk(); ok { + f("key expired; run 'tailscale up' to authenticate") + } regen = true } if (opt.Flags & LoginInteractive) != 0 { diff --git a/feature/buildfeatures/feature_sdnotify_disabled.go b/feature/buildfeatures/feature_sdnotify_disabled.go new file mode 100644 index 000000000..7efa2d22f --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = false diff --git a/feature/buildfeatures/feature_sdnotify_enabled.go b/feature/buildfeatures/feature_sdnotify_enabled.go new file mode 100644 index 000000000..40fec9755 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = true diff --git a/feature/condregister/maybe_sdnotify.go b/feature/condregister/maybe_sdnotify.go new file mode 100644 index 000000000..647996f88 --- /dev/null +++ b/feature/condregister/maybe_sdnotify.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_sdnotify + +package condregister + +import _ "tailscale.com/feature/sdnotify" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 3e4a6043a..c566eb949 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -145,6 +145,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Linux systemd-resolved integration", Deps: []FeatureTag{"dbus"}, }, + "sdnotify": { + Sym: "SDNotify", + Desc: "systemd notification support", + }, "serve": { Sym: "Serve", Desc: "Serve and Funnel support", diff --git a/feature/sdnotify.go b/feature/sdnotify.go new file mode 100644 index 000000000..e785dc1ac --- /dev/null +++ b/feature/sdnotify.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +import ( + "runtime" + + "tailscale.com/feature/buildfeatures" +) + +// HookSystemdReady sends a readiness to systemd. This will unblock service +// dependents from starting. +var HookSystemdReady Hook[func()] + +// HookSystemdStatus holds a func that will send a single line status update to +// systemd so that information shows up in systemctl output. +var HookSystemdStatus Hook[func(format string, args ...any)] + +// SystemdStatus sends a single line status update to systemd so that +// information shows up in systemctl output. +// +// It does nothing on non-Linux systems or if the binary was built without +// the sdnotify feature. +func SystemdStatus(format string, args ...any) { + if runtime.GOOS != "linux" || !buildfeatures.HasSDNotify { + return + } + if f, ok := HookSystemdStatus.GetOk(); ok { + f(format, args...) + } +} diff --git a/util/systemd/doc.go b/feature/sdnotify/sdnotify.go similarity index 81% rename from util/systemd/doc.go rename to feature/sdnotify/sdnotify.go index 0c28e1823..d13aa63f2 100644 --- a/util/systemd/doc.go +++ b/feature/sdnotify/sdnotify.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause /* -Package systemd contains a minimal wrapper around systemd-notify to enable +Package sdnotify contains a minimal wrapper around systemd-notify to enable applications to signal readiness and status to systemd. This package will only have effect on Linux systems running Tailscale in a @@ -10,4 +10,4 @@ systemd unit with the Type=notify flag set. On other operating systems (or when running in a Linux distro without being run from inside systemd) this package will become a no-op. */ -package systemd +package sdnotify diff --git a/util/systemd/systemd_linux.go b/feature/sdnotify/sdnotify_linux.go similarity index 84% rename from util/systemd/systemd_linux.go rename to feature/sdnotify/sdnotify_linux.go index fdfd1bba0..b005f1bdb 100644 --- a/util/systemd/systemd_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -3,7 +3,7 @@ //go:build linux && !android -package systemd +package sdnotify import ( "errors" @@ -12,8 +12,14 @@ import ( "sync" "github.com/mdlayher/sdnotify" + "tailscale.com/feature" ) +func init() { + feature.HookSystemdReady.Set(ready) + feature.HookSystemdStatus.Set(status) +} + var getNotifyOnce struct { sync.Once v *sdnotify.Notifier @@ -46,15 +52,15 @@ func notifier() *sdnotify.Notifier { return getNotifyOnce.v } -// Ready signals readiness to systemd. This will unblock service dependents from starting. -func Ready() { +// ready signals readiness to systemd. This will unblock service dependents from starting. +func ready() { err := notifier().Notify(sdnotify.Ready) if err != nil { readyOnce.logf("systemd: error notifying: %v", err) } } -// Status sends a single line status update to systemd so that information shows up +// status sends a single line status update to systemd so that information shows up // in systemctl output. For example: // // $ systemctl status tailscale @@ -69,7 +75,7 @@ func Ready() { // CPU: 2min 38.469s // CGroup: /system.slice/tailscale.service // └─26741 /nix/store/sv6cj4mw2jajm9xkbwj07k29dj30lh0n-tailscale-date.20200727/bin/tailscaled --port 41641 -func Status(format string, args ...any) { +func status(format string, args ...any) { err := notifier().Notify(sdnotify.Statusf(format, args...)) if err != nil { statusOnce.logf("systemd: error notifying: %v", err) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e07f7041c..f84a023f8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -102,7 +102,6 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -5488,7 +5487,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock switch newState { case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) + feature.SystemdStatus("Needs login: %s", authURL) // always block updates on NeedsLogin even if seamless renewal is enabled, // to prevent calls to authReconfig from reconfiguring the engine when our // key has expired and we're waiting to authenticate to use the new key. @@ -5503,7 +5502,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") + feature.SystemdStatus("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: b.authReconfig() @@ -5515,7 +5514,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock for _, p := range addrs.All() { addrStrs = append(addrStrs, p.Addr().String()) } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) default: b.logf("[unexpected] unknown newState %#v", newState) } diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 7e864959b..6c382a57e 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -23,6 +23,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" @@ -32,7 +33,6 @@ import ( "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" ) @@ -513,7 +513,9 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { ln.Close() }() - systemd.Ready() + if ready, ok := feature.HookSystemdReady.GetOk(); ok { + ready() + } hs := &http.Server{ Handler: http.HandlerFunc(s.serveHTTP), diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6c7dc6b55..97256508a 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -128,7 +128,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -382,7 +381,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/util/systemd/systemd_nonlinux.go b/util/systemd/systemd_nonlinux.go deleted file mode 100644 index 5d7772bb3..000000000 --- a/util/systemd/systemd_nonlinux.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package systemd - -func Ready() {} -func Status(string, ...any) {} From 65d6c80695b27b57a45572caad0f96d8f374f327 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 24 Sep 2025 15:02:57 -0700 Subject: [PATCH 1427/1708] cmd/tailscale/cli,client,ipn: add appc-routes cli command Allow the user to access information about routes an app connector has learned, such as how many routes for each domain. Fixes tailscale/corp#32624 Signed-off-by: Fran Bull --- client/local/local.go | 9 ++ cmd/derper/depaware.txt | 2 + cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/appcroutes.go | 153 +++++++++++++++++++++++++++++ cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/depaware.txt | 2 + cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- ipn/ipnlocal/local.go | 9 ++ ipn/localapi/localapi.go | 20 ++++ tsnet/depaware.txt | 2 +- 12 files changed, 201 insertions(+), 5 deletions(-) create mode 100644 cmd/tailscale/cli/appcroutes.go diff --git a/client/local/local.go b/client/local/local.go index 246112c37..a3717ad77 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -27,6 +27,7 @@ import ( "sync" "time" + "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" @@ -1374,3 +1375,11 @@ func (lc *Client) ShutdownTailscaled(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) return err } + +func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appc.RouteInfo, error) { + body, err := lc.get200(ctx, "/localapi/v0/appc-route-info") + if err != nil { + return appc.RouteInfo{}, err + } + return decodeJSON[appc.RouteInfo](body) +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 258ff4686..08aa374d6 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -77,6 +77,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/client/local 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local @@ -151,6 +152,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ + tailscale.com/util/execqueue from tailscale.com/appc 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/health+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7a66f25e9..d81abf550 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -769,7 +769,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go new file mode 100644 index 000000000..83443f56c --- /dev/null +++ b/cmd/tailscale/cli/appcroutes.go @@ -0,0 +1,153 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "slices" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/appc" +) + +var appcRoutesArgs struct { + all bool + domainMap bool + n bool +} + +var appcRoutesCmd = &ffcli.Command{ + Name: "appc-routes", + ShortUsage: "tailscale appc-routes", + Exec: runAppcRoutesInfo, + ShortHelp: "Print the current app connector routes", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("appc-routes") + fs.BoolVar(&appcRoutesArgs.all, "all", false, "Print learned domains and routes and extra policy configured routes.") + fs.BoolVar(&appcRoutesArgs.domainMap, "map", false, "Print the map of learned domains: [routes].") + fs.BoolVar(&appcRoutesArgs.n, "n", false, "Print the total number of routes this node advertises.") + return fs + })(), + LongHelp: strings.TrimSpace(` +The 'tailscale appc-routes' command prints the current App Connector route status. + +By default this command prints the domains configured in the app connector configuration and how many routes have been +learned for each domain. + +--all prints the routes learned from the domains configured in the app connector configuration; and any extra routes provided +in the the policy app connector 'routes' field. + +--map prints the routes learned from the domains configured in the app connector configuration. + +-n prints the total number of routes advertised by this device, whether learned, set in the policy, or set locally. + +For more information about App Connectors, refer to +https://tailscale.com/kb/1281/app-connectors +`), +} + +func getAllOutput(ri *appc.RouteInfo) (string, error) { + domains, err := json.MarshalIndent(ri.Domains, " ", " ") + if err != nil { + return "", err + } + control, err := json.MarshalIndent(ri.Control, " ", " ") + if err != nil { + return "", err + } + s := fmt.Sprintf(`Learned Routes +============== +%s + +Routes from Policy +================== +%s +`, domains, control) + return s, nil +} + +type domainCount struct { + domain string + count int +} + +func getSummarizeLearnedOutput(ri *appc.RouteInfo) string { + x := make([]domainCount, len(ri.Domains)) + i := 0 + maxDomainWidth := 0 + for k, v := range ri.Domains { + if len(k) > maxDomainWidth { + maxDomainWidth = len(k) + } + x[i] = domainCount{domain: k, count: len(v)} + i++ + } + slices.SortFunc(x, func(i, j domainCount) int { + if i.count > j.count { + return -1 + } + if i.count < j.count { + return 1 + } + if i.domain > j.domain { + return 1 + } + if i.domain < j.domain { + return -1 + } + return 0 + }) + s := "" + fmtString := fmt.Sprintf("%%-%ds %%d\n", maxDomainWidth) // eg "%-10s %d\n" + for _, dc := range x { + s += fmt.Sprintf(fmtString, dc.domain, dc.count) + } + return s +} + +func runAppcRoutesInfo(ctx context.Context, args []string) error { + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + if !prefs.AppConnector.Advertise { + fmt.Println("not a connector") + return nil + } + + if appcRoutesArgs.n { + fmt.Println(len(prefs.AdvertiseRoutes)) + return nil + } + + routeInfo, err := localClient.GetAppConnectorRouteInfo(ctx) + if err != nil { + return err + } + + if appcRoutesArgs.domainMap { + domains, err := json.Marshal(routeInfo.Domains) + if err != nil { + return err + } + fmt.Println(string(domains)) + return nil + } + + if appcRoutesArgs.all { + s, err := getAllOutput(&routeInfo) + if err != nil { + return err + } + fmt.Println(s) + return nil + } + + fmt.Print(getSummarizeLearnedOutput(&routeInfo)) + return nil +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 389dc916a..5ebc23a5b 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -276,6 +276,7 @@ change in the future. idTokenCmd, configureHostCmd(), systrayCmd, + appcRoutesCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 47e5ca48e..2d724a900 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -70,6 +70,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/client/local+ 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli @@ -168,6 +169,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/eventbus from tailscale.com/client/local+ + tailscale.com/util/execqueue from tailscale.com/appc tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0498971b3..aefa78c42 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -51,7 +51,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 68a29b46e..dde1e0681 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -240,7 +240,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b68336d9d..71c274794 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -211,7 +211,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f84a023f8..a95aef0f2 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7124,6 +7124,15 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { return ri, nil } +// ReadRouteInfo returns the app connector route information that is +// stored in prefs to be consistent across restarts. It should be up +// to date with the RouteInfo in memory being used by appc. +func (b *LocalBackend) ReadRouteInfo() (*appc.RouteInfo, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.readRouteInfoLocked() +} + // seamlessRenewalEnabled reports whether seamless key renewals are enabled. // // As of 2025-09-11, this is the default behaviour unless nodes receive diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e0c06b7dc..caebbe0cc 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -25,6 +25,7 @@ import ( "time" "golang.org/x/net/dns/dnsmessage" + "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/envknob" @@ -73,6 +74,7 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 + "appc-route-info": (*Handler).serveGetAppcRouteInfo, "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, @@ -2111,3 +2113,21 @@ func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { eventbus.Publish[Shutdown](ec).Publish(Shutdown{}) } + +func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + res, err := h.b.ReadRouteInfo() + if err != nil { + if errors.Is(err, ipn.ErrStateNotExist) { + res = &appc.RouteInfo{} + } else { + WriteErrorJSON(w, err) + return + } + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 97256508a..47c6b033c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -207,7 +207,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale From 39e35379d41fc78871362bf9dea2111a92744e21 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 27 Sep 2025 15:18:25 -0700 Subject: [PATCH 1428/1708] wgengine/router{,/osrouter}: split OS router implementations into subpackage So wgengine/router is just the docs + entrypoint + types, and then underscore importing wgengine/router/osrouter registers the constructors with the wgengine/router package. Then tsnet can not pull those in. Updates #17313 Change-Id: If313226f6987d709ea9193c8f16a909326ceefe7 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 17 +------ cmd/tailscaled/depaware-minbox.txt | 21 ++------ cmd/tailscaled/depaware.txt | 5 +- cmd/tsidp/depaware.txt | 19 +------ .../feature_osrouter_disabled.go | 13 +++++ .../buildfeatures/feature_osrouter_enabled.go | 13 +++++ feature/condregister/maybe_osrouter.go | 8 +++ feature/featuretags/featuretags.go | 10 ++++ license_test.go | 2 +- tsnet/depaware.txt | 19 +------ wgengine/router/consolidating_router_test.go | 3 +- .../router/{ => osrouter}/ifconfig_windows.go | 5 +- .../{ => osrouter}/ifconfig_windows_test.go | 2 +- wgengine/router/osrouter/osrouter.go | 15 ++++++ wgengine/router/osrouter/osrouter_test.go | 15 ++++++ .../router/{ => osrouter}/router_freebsd.go | 17 +++---- .../router/{ => osrouter}/router_linux.go | 20 ++++++-- .../{ => osrouter}/router_linux_test.go | 5 +- .../router/{ => osrouter}/router_openbsd.go | 18 +++++-- .../router/{ => osrouter}/router_plan9.go | 21 +++++--- .../{ => osrouter}/router_userspace_bsd.go | 13 +++-- .../router/{ => osrouter}/router_windows.go | 17 ++++--- .../{ => osrouter}/router_windows_test.go | 2 +- wgengine/router/{ => osrouter}/runner.go | 2 +- wgengine/router/router.go | 49 ++++++++++++++++--- wgengine/router/router_android.go | 30 ------------ wgengine/router/router_darwin.go | 20 -------- wgengine/router/router_default.go | 25 ---------- wgengine/router/router_test.go | 9 ---- 29 files changed, 208 insertions(+), 207 deletions(-) create mode 100644 feature/buildfeatures/feature_osrouter_disabled.go create mode 100644 feature/buildfeatures/feature_osrouter_enabled.go create mode 100644 feature/condregister/maybe_osrouter.go rename wgengine/router/{ => osrouter}/ifconfig_windows.go (99%) rename wgengine/router/{ => osrouter}/ifconfig_windows_test.go (99%) create mode 100644 wgengine/router/osrouter/osrouter.go create mode 100644 wgengine/router/osrouter/osrouter_test.go rename wgengine/router/{ => osrouter}/router_freebsd.go (54%) rename wgengine/router/{ => osrouter}/router_linux.go (98%) rename wgengine/router/{ => osrouter}/router_linux_test.go (99%) rename wgengine/router/{ => osrouter}/router_openbsd.go (93%) rename wgengine/router/{ => osrouter}/router_plan9.go (89%) rename wgengine/router/{ => osrouter}/router_userspace_bsd.go (93%) rename wgengine/router/{ => osrouter}/router_windows.go (97%) rename wgengine/router/{ => osrouter}/router_windows_test.go (95%) rename wgengine/router/{ => osrouter}/runner.go (99%) delete mode 100644 wgengine/router/router_android.go delete mode 100644 wgengine/router/router_darwin.go delete mode 100644 wgengine/router/router_default.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d81abf550..223baa43c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -86,7 +86,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ @@ -113,8 +112,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-logr/logr from github.com/go-logr/logr/slogr+ github.com/go-logr/logr/slogr from github.com/go-logr/zapr github.com/go-logr/zapr from sigs.k8s.io/controller-runtime/pkg/log/zap+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet github.com/go-openapi/jsonpointer from github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference @@ -137,12 +134,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm @@ -161,9 +152,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go @@ -199,8 +189,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -213,7 +201,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ @@ -931,7 +918,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -980,7 +966,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index aefa78c42..6cc3733a9 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -13,12 +13,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/nftables from tailscale.com/util/linuxfw - 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - github.com/google/nftables/expr from github.com/google/nftables+ - github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -30,14 +24,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mdlayher/genetlink from tailscale.com/net/tstun - 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - github.com/mdlayher/netlink/nltest from github.com/google/nftables 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile - 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ @@ -47,7 +38,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - github.com/vishvananda/netns from github.com/tailscale/netlink+ 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version @@ -139,7 +129,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/control/controlclient+ - tailscale.com/tsconst from tailscale.com/net/netns+ + tailscale.com/tsconst from tailscale.com/net/netns tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -183,7 +173,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -253,13 +242,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ - golang.org/x/sys/unix from github.com/google/nftables+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ golang.org/x/term from tailscale.com/logpolicy golang.org/x/text/secure/bidirule from golang.org/x/net/idna golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna - golang.org/x/time/rate from tailscale.com/derp+ + golang.org/x/time/rate from tailscale.com/derp archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ @@ -392,7 +381,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/digitalocean/go-smbios/smbios iter from bytes+ log from expvar+ log/internal from log diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index dde1e0681..4051000a6 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -420,7 +420,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router/osrouter tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -468,11 +468,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/router/osrouter from tailscale.com/feature/condregister tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router/osrouter golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 71c274794..dfb6553bd 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -84,7 +84,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc @@ -101,17 +100,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm @@ -125,9 +116,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -145,8 +135,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -159,7 +147,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -361,7 +348,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -410,7 +396,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ @@ -455,7 +440,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ diff --git a/feature/buildfeatures/feature_osrouter_disabled.go b/feature/buildfeatures/feature_osrouter_disabled.go new file mode 100644 index 000000000..ccd7192bb --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = false diff --git a/feature/buildfeatures/feature_osrouter_enabled.go b/feature/buildfeatures/feature_osrouter_enabled.go new file mode 100644 index 000000000..a5dacc596 --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = true diff --git a/feature/condregister/maybe_osrouter.go b/feature/condregister/maybe_osrouter.go new file mode 100644 index 000000000..7ab85add2 --- /dev/null +++ b/feature/condregister/maybe_osrouter.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_osrouter + +package condregister + +import _ "tailscale.com/wgengine/router/osrouter" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c566eb949..c41764741 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -126,6 +126,16 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Outbound localhost HTTP/SOCK5 proxy support", Deps: []FeatureTag{"netstack"}, }, + "osrouter": { + Sym: "OSRouter", + Desc: "Configure the operating system's network stack, IPs, and routing tables", + // TODO(bradfitz): if this is omitted, and netstack is too, then tailscaled needs + // external config to be useful. Some people may want that, and we should support it, + // but it's rare. Maybe there should be a way to declare here that this "Provides" + // another feature (and netstack can too), and then if those required features provided + // by some other feature are missing, then it's an error by default unless you accept + // that it's okay to proceed without that meta feature. + }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "netlog": { diff --git a/license_test.go b/license_test.go index ec452a6e3..9b62c48ed 100644 --- a/license_test.go +++ b/license_test.go @@ -34,7 +34,7 @@ func TestLicenseHeaders(t *testing.T) { // WireGuard copyright "cmd/tailscale/cli/authenticode_windows.go", - "wgengine/router/ifconfig_windows.go", + "wgengine/router/osrouter/ifconfig_windows.go", // noiseexplorer.com copyright "control/controlbase/noiseexplorer_test.go", diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 47c6b033c..bda491f37 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -84,7 +84,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc @@ -101,17 +100,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm @@ -125,9 +116,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -145,8 +135,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -159,7 +147,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -356,7 +343,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -405,7 +391,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ @@ -448,7 +433,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LDAI golang.org/x/sys/unix from github.com/google/nftables+ + LDAI golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ diff --git a/wgengine/router/consolidating_router_test.go b/wgengine/router/consolidating_router_test.go index 871682d13..ba2e4d07a 100644 --- a/wgengine/router/consolidating_router_test.go +++ b/wgengine/router/consolidating_router_test.go @@ -4,7 +4,6 @@ package router import ( - "log" "net/netip" "testing" @@ -56,7 +55,7 @@ func TestConsolidateRoutes(t *testing.T) { }, } - cr := &consolidatingRouter{logf: log.Printf} + cr := &consolidatingRouter{logf: t.Logf} for _, test := range tests { t.Run(test.name, func(t *testing.T) { got := cr.consolidateRoutes(test.cfg) diff --git a/wgengine/router/ifconfig_windows.go b/wgengine/router/osrouter/ifconfig_windows.go similarity index 99% rename from wgengine/router/ifconfig_windows.go rename to wgengine/router/osrouter/ifconfig_windows.go index 40e9dc6e0..78ac8d45f 100644 --- a/wgengine/router/ifconfig_windows.go +++ b/wgengine/router/osrouter/ifconfig_windows.go @@ -3,7 +3,7 @@ * Copyright (C) 2019 WireGuard LLC. All Rights Reserved. */ -package router +package osrouter import ( "errors" @@ -19,6 +19,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" "tailscale.com/util/multierr" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/winnet" ole "github.com/go-ole/go-ole" @@ -246,7 +247,7 @@ var networkCategoryWarnable = health.Register(&health.Warnable{ MapDebugFlag: "warn-network-category-unhealthy", }) -func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { +func configureInterface(cfg *router.Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { var mtu = tstun.DefaultTUNMTU() luid := winipcfg.LUID(tun.LUID()) iface, err := interfaceFromLUID(luid, diff --git a/wgengine/router/ifconfig_windows_test.go b/wgengine/router/osrouter/ifconfig_windows_test.go similarity index 99% rename from wgengine/router/ifconfig_windows_test.go rename to wgengine/router/osrouter/ifconfig_windows_test.go index 11b98d1d7..b858ef4f6 100644 --- a/wgengine/router/ifconfig_windows_test.go +++ b/wgengine/router/osrouter/ifconfig_windows_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "fmt" diff --git a/wgengine/router/osrouter/osrouter.go b/wgengine/router/osrouter/osrouter.go new file mode 100644 index 000000000..281454b06 --- /dev/null +++ b/wgengine/router/osrouter/osrouter.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package osrouter contains OS-specific router implementations. +// This package has no API; it exists purely to import +// for the side effect of it registering itself with the wgengine/router +// package. +package osrouter + +import "tailscale.com/wgengine/router" + +// shutdownConfig is a routing configuration that removes all router +// state from the OS. It's the config used when callers pass in a nil +// Config. +var shutdownConfig router.Config diff --git a/wgengine/router/osrouter/osrouter_test.go b/wgengine/router/osrouter/osrouter_test.go new file mode 100644 index 000000000..d0cb3db69 --- /dev/null +++ b/wgengine/router/osrouter/osrouter_test.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package osrouter + +import "net/netip" + +//lint:ignore U1000 used in Windows/Linux tests only +func mustCIDRs(ss ...string) []netip.Prefix { + var ret []netip.Prefix + for _, s := range ss { + ret = append(ret, netip.MustParsePrefix(s)) + } + return ret +} diff --git a/wgengine/router/router_freebsd.go b/wgengine/router/osrouter/router_freebsd.go similarity index 54% rename from wgengine/router/router_freebsd.go rename to wgengine/router/osrouter/router_freebsd.go index ce4753d7d..a142e7a84 100644 --- a/wgengine/router/router_freebsd.go +++ b/wgengine/router/osrouter/router_freebsd.go @@ -1,23 +1,18 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" - "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) -// For now this router only supports the userspace WireGuard implementations. -// -// Work is currently underway for an in-kernel FreeBSD implementation of wireguard -// https://svnweb.freebsd.org/base?view=revision&revision=357986 - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) } func cleanUp(logf logger.Logf, interfaceName string) { diff --git a/wgengine/router/router_linux.go b/wgengine/router/osrouter/router_linux.go similarity index 98% rename from wgengine/router/router_linux.go rename to wgengine/router/osrouter/router_linux.go index 75ff64f40..478935483 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -3,7 +3,7 @@ //go:build !android -package router +package osrouter import ( "errors" @@ -34,8 +34,18 @@ import ( "tailscale.com/util/linuxfw" "tailscale.com/util/multierr" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + var getDistroFunc = distro.Get const ( @@ -81,7 +91,7 @@ type linuxRouter struct { magicsockPortV6 uint16 } -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tunDev.Name() if err != nil { return nil, err @@ -94,7 +104,7 @@ func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Moni return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health, bus) } -func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { r := &linuxRouter{ logf: logf, tunname: tunname, @@ -401,7 +411,7 @@ func (r *linuxRouter) setupNetfilter(kind string) error { } // Set implements the Router interface. -func (r *linuxRouter) Set(cfg *Config) error { +func (r *linuxRouter) Set(cfg *router.Config) error { var errs []error if cfg == nil { cfg = &shutdownConfig @@ -488,7 +498,7 @@ var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("Stateful filtering is enabled and Docker was detected; this may prevent Docker containers on this host from resolving DNS and connecting to Tailscale nodes. See https://tailscale.com/s/stateful-docker"), }) -func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *Config) { +func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *router.Config) { // If stateful filtering is disabled, clear the warning. if !r.statefulFiltering { r.health.SetHealthy(dockerStatefulFilteringWarnable) diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go similarity index 99% rename from wgengine/router/router_linux_test.go rename to wgengine/router/osrouter/router_linux_test.go index b7f3a8ba1..39210ddef 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -32,8 +32,11 @@ import ( "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +type Config = router.Config + func TestRouterStates(t *testing.T) { basic := ` ip rule add -4 pref 5210 fwmark 0x80000/0xff0000 table main diff --git a/wgengine/router/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go similarity index 93% rename from wgengine/router/router_openbsd.go rename to wgengine/router/osrouter/router_openbsd.go index f91878b4c..8f3599309 100644 --- a/wgengine/router/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -17,10 +17,18 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/wgengine/router" ) -// For now this router only supports the WireGuard userspace implementation. -// There is an experimental kernel version in the works for OpenBSD: +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + // https://git.zx2c4.com/wireguard-openbsd. type openbsdRouter struct { @@ -32,7 +40,7 @@ type openbsdRouter struct { routes set.Set[netip.Prefix] } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -68,7 +76,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *openbsdRouter) Set(cfg *Config) error { +func (r *openbsdRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } diff --git a/wgengine/router/router_plan9.go b/wgengine/router/osrouter/router_plan9.go similarity index 89% rename from wgengine/router/router_plan9.go rename to wgengine/router/osrouter/router_plan9.go index fd6850ade..5872aa7fc 100644 --- a/wgengine/router/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -15,10 +15,19 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" - "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanAllTailscaleRoutes(logf) + }) + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon) + }) +} + +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor) (router.Router, error) { r := &plan9Router{ logf: logf, tundev: tundev, @@ -39,7 +48,7 @@ func (r *plan9Router) Up() error { return nil } -func (r *plan9Router) Set(cfg *Config) error { +func (r *plan9Router) Set(cfg *router.Config) error { if cfg == nil { cleanAllTailscaleRoutes(r.logf) return nil @@ -118,10 +127,6 @@ func (r *plan9Router) Close() error { return nil } -func cleanUp(logf logger.Logf, _ string) { - cleanAllTailscaleRoutes(logf) -} - func cleanAllTailscaleRoutes(logf logger.Logf) { routes, err := os.OpenFile("/net/iproute", os.O_RDWR, 0) if err != nil { diff --git a/wgengine/router/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go similarity index 93% rename from wgengine/router/router_userspace_bsd.go rename to wgengine/router/osrouter/router_userspace_bsd.go index 0b7e4f36a..cdaf3adea 100644 --- a/wgengine/router/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -3,7 +3,7 @@ //go:build darwin || freebsd -package router +package osrouter import ( "fmt" @@ -19,8 +19,15 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/version" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceBSDRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health) + }) +} + type userspaceBSDRouter struct { logf logger.Logf netMon *netmon.Monitor @@ -30,7 +37,7 @@ type userspaceBSDRouter struct { routes map[netip.Prefix]bool } -func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -99,7 +106,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *userspaceBSDRouter) Set(cfg *Config) (reterr error) { +func (r *userspaceBSDRouter) Set(cfg *router.Config) (reterr error) { if cfg == nil { cfg = &shutdownConfig } diff --git a/wgengine/router/router_windows.go b/wgengine/router/osrouter/router_windows.go similarity index 97% rename from wgengine/router/router_windows.go rename to wgengine/router/osrouter/router_windows.go index edd258cb3..05bf210e8 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -28,8 +28,15 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/backoff" "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) +} + type winRouter struct { logf func(fmt string, args ...any) netMon *netmon.Monitor // may be nil @@ -39,7 +46,7 @@ type winRouter struct { firewall *firewallTweaker } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { nativeTun := tundev.(*tun.NativeTun) luid := winipcfg.LUID(nativeTun.LUID()) guid, err := luid.GUID() @@ -73,7 +80,7 @@ func (r *winRouter) Up() error { return nil } -func (r *winRouter) Set(cfg *Config) error { +func (r *winRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } @@ -124,10 +131,6 @@ func (r *winRouter) Close() error { return nil } -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} - // firewallTweaker changes the Windows firewall. Normally this wouldn't be so complicated, // but it can be REALLY SLOW to change the Windows firewall for reasons not understood. // Like 4 minutes slow. But usually it's tens of milliseconds. diff --git a/wgengine/router/router_windows_test.go b/wgengine/router/osrouter/router_windows_test.go similarity index 95% rename from wgengine/router/router_windows_test.go rename to wgengine/router/osrouter/router_windows_test.go index 9989ddbc7..119b6a778 100644 --- a/wgengine/router/router_windows_test.go +++ b/wgengine/router/osrouter/router_windows_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "path/filepath" diff --git a/wgengine/router/runner.go b/wgengine/router/osrouter/runner.go similarity index 99% rename from wgengine/router/runner.go rename to wgengine/router/osrouter/runner.go index 8fa068e33..7afb7fdc2 100644 --- a/wgengine/router/runner.go +++ b/wgengine/router/osrouter/runner.go @@ -3,7 +3,7 @@ //go:build linux -package router +package osrouter import ( "errors" diff --git a/wgengine/router/router.go b/wgengine/router/router.go index edd7d14cb..7723138f4 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -6,10 +6,15 @@ package router import ( + "errors" + "fmt" "net/netip" "reflect" + "runtime" "github.com/tailscale/wireguard-go/tun" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" @@ -41,6 +46,22 @@ type Router interface { Close() error } +// NewOpts are the options passed to the NewUserspaceRouter hook. +type NewOpts struct { + Logf logger.Logf // required + Tun tun.Device // required + NetMon *netmon.Monitor // optional + Health *health.Tracker // required (but TODO: support optional later) + Bus *eventbus.Bus // required +} + +// HookNewUserspaceRouter is the registration point for router implementations +// to register a constructor for userspace routers. It's meant for implementations +// in wgengine/router/osrouter. +// +// If no implementation is registered, [New] will return an error. +var HookNewUserspaceRouter feature.Hook[func(NewOpts) (Router, error)] + // New returns a new Router for the current platform, using the // provided tun device. // @@ -50,14 +71,33 @@ func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, ) (Router, error) { logf = logger.WithPrefix(logf, "router: ") - return newUserspaceRouter(logf, tundev, netMon, health, bus) + if f, ok := HookNewUserspaceRouter.GetOk(); ok { + return f(NewOpts{ + Logf: logf, + Tun: tundev, + NetMon: netMon, + Health: health, + Bus: bus, + }) + } + if !buildfeatures.HasOSRouter { + return nil, errors.New("router: tailscaled was built without OSRouter support") + } + return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) } +// HookCleanUp is the optional registration point for router implementations +// to register a cleanup function for [CleanUp] to use. It's meant for +// implementations in wgengine/router/osrouter. +var HookCleanUp feature.Hook[func(_ logger.Logf, _ *netmon.Monitor, ifName string)] + // CleanUp restores the system network configuration to its original state // in case the Tailscale daemon terminated without closing the router. // No other state needs to be instantiated before this runs. func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) { - cleanUp(logf, interfaceName) + if f, ok := HookCleanUp.GetOk(); ok { + f(logf, netMon, interfaceName) + } } // Config is the subset of Tailscale configuration that is relevant to @@ -106,8 +146,3 @@ func (a *Config) Equal(b *Config) bool { } return reflect.DeepEqual(a, b) } - -// shutdownConfig is a routing configuration that removes all router -// state from the OS. It's the config used when callers pass in a nil -// Config. -var shutdownConfig = Config{} diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go deleted file mode 100644 index de680606f..000000000 --- a/wgengine/router/router_android.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build android - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { - // Note, this codepath is _not_ used when building the android app - // from github.com/tailscale/tailscale-android. The android app - // constructs its own wgengine with a custom router implementation - // that plugs into Android networking APIs. - // - // In practice, the only place this fake router gets used is when - // you build a tsnet app for android, in which case we don't want - // to touch the OS network stack and a no-op router is correct. - return NewFake(logf), nil -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_darwin.go b/wgengine/router/router_darwin.go deleted file mode 100644 index ebb2615a0..000000000 --- a/wgengine/router/router_darwin.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) -} - -func cleanUp(logger.Logf, string) { - // Nothing to do. -} diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go deleted file mode 100644 index 190575973..000000000 --- a/wgengine/router/router_default.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !linux && !darwin && !openbsd && !freebsd && !plan9 - -package router - -import ( - "fmt" - "runtime" - - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { - return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_test.go b/wgengine/router/router_test.go index 8842173d7..fd17b8c5d 100644 --- a/wgengine/router/router_test.go +++ b/wgengine/router/router_test.go @@ -11,15 +11,6 @@ import ( "tailscale.com/types/preftype" ) -//lint:ignore U1000 used in Windows/Linux tests only -func mustCIDRs(ss ...string) []netip.Prefix { - var ret []netip.Prefix - for _, s := range ss { - ret = append(ret, netip.MustParsePrefix(s)) - } - return ret -} - func TestConfigEqual(t *testing.T) { testedFields := []string{ "LocalAddrs", "Routes", "LocalRoutes", "NewMTU", From 72bc7334fbcba0c03d3f926167f4c3149a4bb36d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 13:57:04 -0700 Subject: [PATCH 1429/1708] net/speedtest: mark flaky test, and skip it by default as it's slow Updates #17338 Change-Id: I1f3dbc154ba274f615cc77d2aa76f6ff9d40137c Signed-off-by: Brad Fitzpatrick --- net/speedtest/speedtest_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index 55dcbeea1..69fdb6b56 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -4,12 +4,22 @@ package speedtest import ( + "flag" "net" "testing" "time" + + "tailscale.com/cmd/testwrapper/flakytest" ) +var manualTest = flag.Bool("do-speedtest", false, "if true, run the speedtest TestDownload test. Otherwise skip it because it's slow and flaky; see https://github.com/tailscale/tailscale/issues/17338") + func TestDownload(t *testing.T) { + if !*manualTest { + t.Skip("skipping slow test without --do-speedtest") + } + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") + // start a listener and find the port where the server will be listening. l, err := net.Listen("tcp", ":0") if err != nil { From 1aaa1648c4e7fd5a690c17d87cf056816ebe4553 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 29 Sep 2025 17:44:59 +0100 Subject: [PATCH 1430/1708] README: update the version of Go in the README Updates #17064 Signed-off-by: Alex Chan --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2c9713a6f..70b92d411 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ not open source. ## Building -We always require the latest Go release, currently Go 1.23. (While we build +We always require the latest Go release, currently Go 1.25. (While we build releases with our [Go fork](https://github.com/tailscale/go/), its use is not required.) From bdb69d1b1fc4ee08cfb13b5d0b7bab79e162bd4e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 14:03:32 -0700 Subject: [PATCH 1431/1708] net/dns/resolver: fix data race in test Fixes #17339 Change-Id: I486d2a0e0931d701923c1e0f8efbda99510ab19b Signed-off-by: Brad Fitzpatrick --- net/dns/resolver/forwarder.go | 20 ++++++++++-------- net/dns/resolver/forwarder_test.go | 34 +++++++++--------------------- 2 files changed, 21 insertions(+), 33 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c87fbd504..105229fb8 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -217,11 +217,12 @@ type resolverAndDelay struct { // forwarder forwards DNS packets to a number of upstream nameservers. type forwarder struct { - logf logger.Logf - netMon *netmon.Monitor // always non-nil - linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it - dialer *tsdial.Dialer - health *health.Tracker // always non-nil + logf logger.Logf + netMon *netmon.Monitor // always non-nil + linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it + dialer *tsdial.Dialer + health *health.Tracker // always non-nil + verboseFwd bool // if true, log all DNS forwarding controlKnobs *controlknobs.Knobs // or nil @@ -258,6 +259,7 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS dialer: dialer, health: health, controlKnobs: knobs, + verboseFwd: verboseDNSForward(), } f.ctx, f.ctxCancel = context.WithCancel(context.Background()) return f @@ -515,7 +517,7 @@ var ( // // send expects the reply to have the same txid as txidOut. func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) (ret []byte, err error) { - if verboseDNSForward() { + if f.verboseFwd { id := forwarderCount.Add(1) domain, typ, _ := nameFromQuery(fq.packet) f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id) @@ -978,7 +980,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } defer fq.closeOnCtxDone.Close() - if verboseDNSForward() { + if f.verboseFwd { domainSha256 := sha256.Sum256([]byte(domain)) domainSig := base64.RawStdEncoding.EncodeToString(domainSha256[:3]) f.logf("request(%d, %v, %d, %s) %d...", fq.txid, typ, len(domain), domainSig, len(fq.packet)) @@ -1023,7 +1025,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo metricDNSFwdErrorContext.Add(1) return fmt.Errorf("waiting to send response: %w", ctx.Err()) case responseChan <- packet{v, query.family, query.addr}: - if verboseDNSForward() { + if f.verboseFwd { f.logf("response(%d, %v, %d) = %d, nil", fq.txid, typ, len(domain), len(v)) } metricDNSFwdSuccess.Add(1) @@ -1053,7 +1055,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) case responseChan <- res: - if verboseDNSForward() { + if f.verboseFwd { f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) } return nil diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f77388ca7..b5cc7d018 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -12,7 +12,6 @@ import ( "io" "net" "net/netip" - "os" "reflect" "slices" "strings" @@ -23,7 +22,6 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" - "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -400,13 +398,6 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on return } -func enableDebug(tb testing.TB) { - const debugKnob = "TS_DEBUG_DNS_FORWARD_SEND" - oldVal := os.Getenv(debugKnob) - envknob.Setenv(debugKnob, "true") - tb.Cleanup(func() { envknob.Setenv(debugKnob, oldVal) }) -} - func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) { name := dns.MustNewName(domain) @@ -554,9 +545,11 @@ func mustRunTestQuery(tb testing.TB, request []byte, modify func(*forwarder), po return resp } -func TestForwarderTCPFallback(t *testing.T) { - enableDebug(t) +func beVerbose(f *forwarder) { + f.verboseFwd = true +} +func TestForwarderTCPFallback(t *testing.T) { const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -576,7 +569,7 @@ func TestForwarderTCPFallback(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -592,8 +585,6 @@ func TestForwarderTCPFallback(t *testing.T) { // Test to ensure that if the UDP listener is unresponsive, we always make a // TCP request even if we never get a response. func TestForwarderTCPFallbackTimeout(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -614,7 +605,7 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -624,8 +615,6 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } func TestForwarderTCPFallbackDisabled(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -646,6 +635,7 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { }) resp := mustRunTestQuery(t, request, func(fwd *forwarder) { + fwd.verboseFwd = true // Disable retries for this test. fwd.controlKnobs = &controlknobs.Knobs{} fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) @@ -668,8 +658,6 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { // Test to ensure that we propagate DNS errors func TestForwarderTCPFallbackError(t *testing.T) { - enableDebug(t) - const domain = "error-response.tailscale.com." // Our response is a SERVFAIL @@ -686,7 +674,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { } }) - resp, err := runTestQuery(t, request, nil, port) + resp, err := runTestQuery(t, request, beVerbose, port) if !sawRequest.Load() { t.Error("did not see DNS request") } @@ -706,8 +694,6 @@ func TestForwarderTCPFallbackError(t *testing.T) { // Test to ensure that if we have more than one resolver, and at least one of them // returns a successful response, we propagate it. func TestForwarderWithManyResolvers(t *testing.T) { - enableDebug(t) - const domain = "example.com." request := makeTestRequest(t, domain) @@ -810,7 +796,7 @@ func TestForwarderWithManyResolvers(t *testing.T) { for i := range tt.responses { ports[i] = runDNSServer(t, nil, tt.responses[i], func(isTCP bool, gotRequest []byte) {}) } - gotResponse, err := runTestQuery(t, request, nil, ports...) + gotResponse, err := runTestQuery(t, request, beVerbose, ports...) if err != nil { t.Fatalf("wanted nil, got %v", err) } @@ -869,7 +855,7 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { }) - res, err := runTestQuery(t, request, nil, port) + res, err := runTestQuery(t, request, beVerbose, port) if err != nil { t.Fatal(err) } From 54e50230a10dbbf1a251589b683291df780783d9 Mon Sep 17 00:00:00 2001 From: Brian Palmer Date: Mon, 29 Sep 2025 16:30:23 -0600 Subject: [PATCH 1432/1708] net/memnet: allow listener address reuse (#17342) Listen address reuse is allowed as soon as the previous listener is closed. There is no attempt made to emulate more complex address reuse logic. Updates tailscale/corp#28078 Change-Id: I56be1c4848e7b3f9fc97fd4ef13a2de9dcfab0f2 Signed-off-by: Brian Palmer --- net/memnet/listener.go | 6 ++++++ net/memnet/memnet.go | 5 +++++ net/memnet/memnet_test.go | 23 +++++++++++++++++++++++ 3 files changed, 34 insertions(+) create mode 100644 net/memnet/memnet_test.go diff --git a/net/memnet/listener.go b/net/memnet/listener.go index d84a2e443..202026e16 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -22,6 +22,7 @@ type Listener struct { ch chan Conn closeOnce sync.Once closed chan struct{} + onClose func() // or nil // NewConn, if non-nil, is called to create a new pair of connections // when dialing. If nil, NewConn is used. @@ -44,9 +45,14 @@ func (l *Listener) Addr() net.Addr { // Close closes the pipe listener. func (l *Listener) Close() error { + var cleanup func() l.closeOnce.Do(func() { + cleanup = l.onClose close(l.closed) }) + if cleanup != nil { + cleanup() + } return nil } diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index 7c2435684..1e43df2da 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -61,6 +61,11 @@ func (m *Network) Listen(network, address string) (net.Listener, error) { } ln := Listen(key) m.lns[key] = ln + ln.onClose = func() { + m.mu.Lock() + delete(m.lns, key) + m.mu.Unlock() + } return ln, nil } } diff --git a/net/memnet/memnet_test.go b/net/memnet/memnet_test.go new file mode 100644 index 000000000..38086cec0 --- /dev/null +++ b/net/memnet/memnet_test.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package memnet + +import "testing" + +func TestListenAddressReuse(t *testing.T) { + var nw Network + ln1, err := nw.Listen("tcp", "127.0.0.1:80") + if err != nil { + t.Fatalf("listen failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err == nil { + t.Errorf("listen on in-use address succeeded") + } + if err := ln1.Close(); err != nil { + t.Fatalf("close failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err != nil { + t.Errorf("listen on same address after close failed: %v", err) + } +} From 69c79cb9f3f9e2fe9ce4333c9a034591709e469b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 15:26:24 -0700 Subject: [PATCH 1433/1708] ipn/store, feature/condregister: move AWS + Kube store registration to condregister Otherwise they're uselessly imported by tsnet applications, even though they do nothing. tsnet applications wanting to use these already had to explicitly import them and use kubestore.New or awsstore.New and assign those to their tsnet.Server.Store fields. Updates #12614 Change-Id: I358e3923686ddf43a85e6923c3828ba2198991d4 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 97 ++--------------- cmd/tailscaled/depaware.txt | 4 +- cmd/tsidp/depaware.txt | 102 ++---------------- .../condregister/maybe_store_aws.go | 5 +- .../condregister/maybe_store_kube.go | 5 +- tsnet/depaware.txt | 102 ++---------------- 6 files changed, 40 insertions(+), 275 deletions(-) rename ipn/store/store_aws.go => feature/condregister/maybe_store_aws.go (76%) rename ipn/store/store_kube.go => feature/condregister/maybe_store_kube.go (74%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 223baa43c..3aa0a496c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -5,81 +5,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/util/eventbus @@ -136,7 +61,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -796,8 +720,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ + tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/api-proxy from tailscale.com/cmd/k8s-operator @@ -1026,7 +949,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ bytes from archive/tar+ cmp from github.com/gaissmai/bart+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from github.com/emicklei/go-restful/v3+ compress/zlib from debug/pe+ container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp+ container/list from crypto/tls+ @@ -1091,7 +1014,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -1110,7 +1033,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + encoding/xml from github.com/emicklei/go-restful/v3+ errors from archive/tar+ expvar from github.com/prometheus/client_golang/prometheus+ flag from github.com/spf13/pflag+ @@ -1179,7 +1102,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from go/ast+ log from expvar+ log/internal from log+ @@ -1198,25 +1121,25 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ net from crypto/tls+ net/http from expvar+ net/http/httptrace from github.com/prometheus-community/pro-bing+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4051000a6..90cba0734 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -304,8 +304,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ tailscale.com/ipn/policy from tailscale.com/feature/portlist tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + L tailscale.com/ipn/store/awsstore from tailscale.com/feature/condregister + L tailscale.com/ipn/store/kubestore from tailscale.com/feature/condregister tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index dfb6553bd..fb97296bc 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -5,81 +5,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -105,7 +30,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -238,12 +162,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal @@ -456,7 +376,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar bytes from archive/tar+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from golang.org/x/net/http2+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ @@ -521,7 +441,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -538,7 +458,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + encoding/xml from github.com/tailscale/goupnp+ errors from archive/tar+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ @@ -598,7 +518,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log @@ -613,26 +533,26 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from tailscale.com/cmd/tsidp os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from archive/tar+ diff --git a/ipn/store/store_aws.go b/feature/condregister/maybe_store_aws.go similarity index 76% rename from ipn/store/store_aws.go rename to feature/condregister/maybe_store_aws.go index 834b657d3..48ef06ecf 100644 --- a/ipn/store/store_aws.go +++ b/feature/condregister/maybe_store_aws.go @@ -3,16 +3,17 @@ //go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws -package store +package condregister import ( "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/awsstore" "tailscale.com/types/logger" ) func init() { - Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) if err != nil { return nil, err diff --git a/ipn/store/store_kube.go b/feature/condregister/maybe_store_kube.go similarity index 74% rename from ipn/store/store_kube.go rename to feature/condregister/maybe_store_kube.go index 7eac75c19..0aa2c1692 100644 --- a/ipn/store/store_kube.go +++ b/feature/condregister/maybe_store_kube.go @@ -3,18 +3,19 @@ //go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube -package store +package condregister import ( "strings" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/kubestore" "tailscale.com/types/logger" ) func init() { - Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { + store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { secretName := strings.TrimPrefix(path, "kube:") return kubestore.New(logf, secretName) }) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index bda491f37..2e8ca0f0a 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -5,81 +5,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm LDW github.com/coder/websocket from tailscale.com/util/eventbus LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -105,7 +30,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -234,12 +158,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob LDW tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal @@ -449,7 +369,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) bytes from archive/tar+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from golang.org/x/net/http2+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ @@ -514,7 +434,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ DI crypto/x509/internal/macos from crypto/x509 @@ -531,7 +451,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + encoding/xml from github.com/tailscale/goupnp+ errors from archive/tar+ expvar from tailscale.com/health+ flag from tailscale.com/util/testenv @@ -591,7 +511,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log @@ -606,25 +526,25 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from archive/tar+ From 038cdb4640275e44fd8cf5a95f23d5d5b4987ba3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 16:41:26 -0700 Subject: [PATCH 1434/1708] feature/clientupdate: move clientupdate to a modular feature, disabled for tsnet Updates #12614 Change-Id: I5f685dec84a5396b7c2b66f2788ae3d286e1ddc6 Signed-off-by: Brad Fitzpatrick --- client/web/web.go | 4 +- clientupdate/clientupdate.go | 9 +- cmd/k8s-operator/depaware.txt | 55 +- cmd/tailscale/cli/set.go | 23 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 61 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 67 ++- .../feature_clientupdate_disabled.go | 13 + .../feature_clientupdate_enabled.go | 13 + feature/clientupdate/clientupdate.go | 530 ++++++++++++++++++ feature/condregister/maybe_clientupdate.go | 8 + feature/featuretags/featuretags.go | 1 + feature/hooks.go | 17 + ipn/ipnlocal/autoupdate.go | 65 --- ipn/ipnlocal/autoupdate_disabled.go | 18 - ipn/ipnlocal/c2n.go | 190 ------- ipn/ipnlocal/local.go | 155 +---- ipn/ipnlocal/local_test.go | 6 +- ipn/ipnlocal/profiles.go | 4 +- ipn/ipnlocal/profiles_test.go | 5 +- ipn/localapi/localapi.go | 37 +- tsnet/depaware.txt | 67 ++- tstest/integration/integration_test.go | 5 +- 24 files changed, 749 insertions(+), 609 deletions(-) create mode 100644 feature/buildfeatures/feature_clientupdate_disabled.go create mode 100644 feature/buildfeatures/feature_clientupdate_enabled.go create mode 100644 feature/clientupdate/clientupdate.go create mode 100644 feature/condregister/maybe_clientupdate.go create mode 100644 feature/hooks.go delete mode 100644 ipn/ipnlocal/autoupdate.go delete mode 100644 ipn/ipnlocal/autoupdate_disabled.go diff --git a/client/web/web.go b/client/web/web.go index d88239843..2421403c1 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -24,9 +24,9 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -1049,7 +1049,7 @@ func availableFeatures() map[string]bool { "advertise-routes": true, // available on all platforms "use-exit-node": featureknob.CanUseExitNode() == nil, "ssh": featureknob.CanRunTailscaleSSH() == nil, - "auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(), + "auto-update": version.IsUnstableBuild() && feature.CanAutoUpdate(), } return features } diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index ffd3fb03b..84b289615 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -252,9 +253,13 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { var canAutoUpdateCache lazy.SyncValue[bool] -// CanAutoUpdate reports whether auto-updating via the clientupdate package +func init() { + feature.HookCanAutoUpdate.Set(canAutoUpdate) +} + +// canAutoUpdate reports whether auto-updating via the clientupdate package // is supported for the current os/distro. -func CanAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } +func canAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } func canAutoUpdateUncached() bool { if version.IsMacSysExt() { diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 3aa0a496c..a85f5731b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -12,7 +12,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com @@ -60,7 +60,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/hdevalence/ed25519consensus from tailscale.com/tka github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -686,8 +686,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -780,7 +778,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -829,7 +827,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -843,7 +841,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -869,8 +867,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -907,7 +905,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ @@ -944,13 +942,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from github.com/gaissmai/bart+ compress/flate from compress/gzip+ compress/gzip from github.com/emicklei/go-restful/v3+ - compress/zlib from debug/pe+ + compress/zlib from github.com/emicklei/go-restful/v3+ container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp+ container/list from crypto/tls+ context from crypto/tls+ @@ -1034,10 +1031,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ encoding/json from expvar+ encoding/pem from crypto/tls+ encoding/xml from github.com/emicklei/go-restful/v3+ - errors from archive/tar+ + errors from bufio+ expvar from github.com/prometheus/client_golang/prometheus+ flag from github.com/spf13/pflag+ - fmt from archive/tar+ + fmt from compress/flate+ go/ast from go/doc+ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime @@ -1063,7 +1060,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -1100,8 +1097,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from go/ast+ log from expvar+ @@ -1110,7 +1107,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/slog/internal from log/slog log/slog/internal/buffer from log/slog maps from sigs.k8s.io/controller-runtime/pkg/predicate+ - math from archive/tar+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/google/go-cmp/cmp+ @@ -1132,29 +1129,29 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ os from crypto/internal/sysrand+ os/exec from github.com/godbus/dbus/v5+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from encoding/base32+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index c0ce0b1c1..1807ada13 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -15,8 +15,8 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/clientupdate" "tailscale.com/cmd/tailscale/cli/ffcomplete" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/net/tsaddr" @@ -226,21 +226,14 @@ func runSet(ctx context.Context, args []string) (retErr error) { return err } } - if maskedPrefs.AutoUpdateSet.ApplySet { - if !clientupdate.CanAutoUpdate() { - return errors.New("automatic updates are not supported on this platform") + if maskedPrefs.AutoUpdateSet.ApplySet && buildfeatures.HasClientUpdate && version.IsMacSysExt() { + apply := "0" + if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { + apply = "1" } - // On macsys, tailscaled will set the Sparkle auto-update setting. It - // does not use clientupdate. - if version.IsMacSysExt() { - apply := "0" - if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { - apply = "1" - } - out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) - } + out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) } } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 2d724a900..2df600702 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -77,7 +77,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 6cc3733a9..42d8f9181 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,7 +1,5 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) - filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus - filippo.io/edwards25519/field from filippo.io/edwards25519 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -13,7 +11,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -44,8 +41,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ - tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ @@ -65,7 +60,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ - tailscale.com/hostinfo from tailscale.com/clientupdate+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ @@ -116,7 +111,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile @@ -142,7 +137,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/cmd/tailscaled+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext @@ -161,7 +156,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -171,11 +165,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/osdiag from tailscale.com/ipn/localapi tailscale.com/util/osshare from tailscale.com/cmd/tailscaled @@ -195,8 +189,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth tailscale.com/util/zstdframe from tailscale.com/control/controlclient - tailscale.com/version from tailscale.com/clientupdate+ - tailscale.com/version/distro from tailscale.com/clientupdate+ + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ @@ -249,9 +243,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip compress/gzip from golang.org/x/net/http2+ @@ -329,10 +322,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - errors from archive/tar+ + errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tailscaled+ - fmt from archive/tar+ + fmt from compress/flate+ hash from crypto+ hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -348,7 +341,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/filepathlite from os+ internal/fmtsort from fmt internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -379,14 +372,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/mdlayher/netlink+ @@ -405,27 +398,27 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from tailscale.com/clientupdate+ + os/exec from tailscale.com/hostinfo+ os/signal from tailscale.com/cmd/tailscaled - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from internal/profile+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ - sync from archive/tar+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 90cba0734..a3bac20aa 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -246,7 +246,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/feature/clientupdate LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ @@ -273,6 +273,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister + tailscale.com/feature/clientupdate from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index fb97296bc..8a78af493 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -9,7 +9,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com @@ -28,8 +28,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + D github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -128,8 +128,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -208,7 +206,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -237,7 +235,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/cmd/tsidp+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext @@ -256,12 +254,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ @@ -270,7 +268,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/cmd/tsidp+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -296,8 +294,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -336,7 +334,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ @@ -371,9 +369,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ compress/gzip from golang.org/x/net/http2+ @@ -446,7 +443,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DW database/sql/driver from github.com/google/uuid + D database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -459,11 +456,11 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/json from expvar+ encoding/pem from crypto/tls+ encoding/xml from github.com/tailscale/goupnp+ - errors from archive/tar+ + errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -480,7 +477,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -516,14 +513,14 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -545,28 +542,28 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar os from crypto/internal/sysrand+ os/exec from github.com/godbus/dbus/v5+ os/signal from tailscale.com/cmd/tsidp - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/feature/buildfeatures/feature_clientupdate_disabled.go b/feature/buildfeatures/feature_clientupdate_disabled.go new file mode 100644 index 000000000..165c9cc9a --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = false diff --git a/feature/buildfeatures/feature_clientupdate_enabled.go b/feature/buildfeatures/feature_clientupdate_enabled.go new file mode 100644 index 000000000..3c3c7878c --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = true diff --git a/feature/clientupdate/clientupdate.go b/feature/clientupdate/clientupdate.go new file mode 100644 index 000000000..45fd21129 --- /dev/null +++ b/feature/clientupdate/clientupdate.go @@ -0,0 +1,530 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package clientupdate enables the client update feature. +package clientupdate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/clientupdate" + "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" + "tailscale.com/ipn/localapi" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/httpm" + "tailscale.com/version" + "tailscale.com/version/distro" +) + +func init() { + ipnext.RegisterExtension("clientupdate", newExt) + + // C2N + ipnlocal.RegisterC2N("GET /update", handleC2NUpdateGet) + ipnlocal.RegisterC2N("POST /update", handleC2NUpdatePost) + + // LocalAPI: + localapi.Register("update/install", serveUpdateInstall) + localapi.Register("update/progress", serveUpdateProgress) +} + +func newExt(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + logf: logf, + sb: sb, + + lastSelfUpdateState: ipnstate.UpdateFinished, + }, nil +} + +type extension struct { + logf logger.Logf + sb ipnext.SafeBackend + + mu sync.Mutex + + // c2nUpdateStatus is the status of c2n-triggered client update. + c2nUpdateStatus updateStatus + prefs ipn.PrefsView + state ipn.State + + lastSelfUpdateState ipnstate.SelfUpdateStatus + selfUpdateProgress []ipnstate.UpdateProgress + + // offlineAutoUpdateCancel stops offline auto-updates when called. It + // should be used via stopOfflineAutoUpdate and + // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are + // not running. + // + //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go + offlineAutoUpdateCancel func() +} + +func (e *extension) Name() string { return "clientupdate" } + +func (e *extension) Init(h ipnext.Host) error { + + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().BackendStateChange.Add(e.onBackendStateChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + return nil +} + +func (e *extension) Shutdown() error { + e.stopOfflineAutoUpdate() + return nil +} + +func (e *extension) onBackendStateChange(newState ipn.State) { + e.mu.Lock() + defer e.mu.Unlock() + e.state = newState + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) onChangeProfile(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.prefs = prefs + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) updateOfflineAutoUpdateLocked() { + want := e.prefs.Valid() && e.prefs.AutoUpdate().Apply.EqualBool(true) && + e.state != ipn.Running && e.state != ipn.Starting + + cur := e.offlineAutoUpdateCancel != nil + + if want && !cur { + e.maybeStartOfflineAutoUpdateLocked(e.prefs) + } else if !want && cur { + e.stopOfflineAutoUpdateLocked() + } +} + +type updateStatus struct { + started bool +} + +func (e *extension) clearSelfUpdateProgress() { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) + e.lastSelfUpdateState = ipnstate.UpdateFinished +} + +func (e *extension) GetSelfUpdateProgress() []ipnstate.UpdateProgress { + e.mu.Lock() + defer e.mu.Unlock() + res := make([]ipnstate.UpdateProgress, len(e.selfUpdateProgress)) + copy(res, e.selfUpdateProgress) + return res +} + +func (e *extension) DoSelfUpdate() { + e.mu.Lock() + updateState := e.lastSelfUpdateState + e.mu.Unlock() + // don't start an update if one is already in progress + if updateState == ipnstate.UpdateInProgress { + return + } + e.clearSelfUpdateProgress() + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) + up, err := clientupdate.NewUpdater(clientupdate.Arguments{ + Logf: func(format string, args ...any) { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) + }, + }) + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } + err = up.Update() + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } else { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) + } +} + +// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale +// self-update. A successful response does not indicate whether the update +// succeeded, only that the request was accepted. Clients should use +// serveUpdateProgress after pinging this endpoint to check how the update is +// going. +func serveUpdateInstall(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusAccepted) + + go ext.DoSelfUpdate() +} + +// serveUpdateProgress returns the status of an in-progress Tailscale self-update. +// This is provided as a slice of ipnstate.UpdateProgress structs with various +// log messages in order from oldest to newest. If an update is not in progress, +// the returned slice will be empty. +func serveUpdateProgress(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + ups := ext.GetSelfUpdateProgress() + + json.NewEncoder(w).Encode(ups) +} + +func (e *extension) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = append(e.selfUpdateProgress, up) + e.lastSelfUpdateState = up.Status +} + +func handleC2NUpdateGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + e.logf("c2n: GET /update received") + + res := e.newC2NUpdateResponse() + res.Started = e.c2nUpdateStarted() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func handleC2NUpdatePost(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + e.logf("c2n: POST /update received") + res := e.newC2NUpdateResponse() + defer func() { + if res.Err != "" { + e.logf("c2n: POST /update failed: %s", res.Err) + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + }() + + if !res.Enabled { + res.Err = "not enabled" + return + } + if !res.Supported { + res.Err = "not supported" + return + } + + // Do not update if we have active inbound SSH connections. Control can set + // force=true query parameter to override this. + if r.FormValue("force") != "true" && b.ActiveSSHConns() > 0 { + res.Err = "not updating due to active SSH connections" + return + } + + if err := e.startAutoUpdate("c2n"); err != nil { + res.Err = err.Error() + return + } + res.Started = true +} + +func (e *extension) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { + e.mu.Lock() + defer e.mu.Unlock() + + // If NewUpdater does not return an error, we can update the installation. + // + // Note that we create the Updater solely to check for errors; we do not + // invoke it here. For this purpose, it is ok to pass it a zero Arguments. + var upPref ipn.AutoUpdatePrefs + if e.prefs.Valid() { + upPref = e.prefs.AutoUpdate() + } + return tailcfg.C2NUpdateResponse{ + Enabled: envknob.AllowsRemoteUpdate() || upPref.Apply.EqualBool(true), + Supported: feature.CanAutoUpdate() && !version.IsMacSysExt(), + } +} + +func (e *extension) c2nUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + return e.c2nUpdateStatus.started +} + +func (e *extension) setC2NUpdateStarted(v bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.c2nUpdateStatus.started = v +} + +func (e *extension) trySetC2NUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + if e.c2nUpdateStatus.started { + return false + } + e.c2nUpdateStatus.started = true + return true +} + +// findCmdTailscale looks for the cmd/tailscale that corresponds to the +// currently running cmd/tailscaled. It's up to the caller to verify that the +// two match, but this function does its best to find the right one. Notably, it +// doesn't use $PATH for security reasons. +func findCmdTailscale() (string, error) { + self, err := os.Executable() + if err != nil { + return "", err + } + var ts string + switch runtime.GOOS { + case "linux": + if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { + ts = "/usr/bin/tailscale" + } + if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + switch distro.Get() { + case distro.QNAP: + // The volume under /share/ where qpkg are installed is not + // predictable. But the rest of the path is. + ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) + if err == nil && ok { + ts = filepath.Join(filepath.Dir(self), "tailscale") + } + case distro.Unraid: + if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { + ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" + } + } + case "windows": + ts = filepath.Join(filepath.Dir(self), "tailscale.exe") + case "freebsd", "openbsd": + if self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + default: + return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) + } + if ts != "" && regularFileExists(ts) { + return ts, nil + } + return "", errors.New("tailscale executable not found in expected place") +} + +func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { + defaultCmd := exec.Command(cmdTS, "update", "--yes") + if runtime.GOOS != "linux" { + return defaultCmd + } + if _, err := exec.LookPath("systemd-run"); err != nil { + return defaultCmd + } + + // When systemd-run is available, use it to run the update command. This + // creates a new temporary unit separate from the tailscaled unit. When + // tailscaled is restarted during the update, systemd won't kill this + // temporary update unit, which could cause unexpected breakage. + // + // We want to use a few optional flags: + // * --wait, to block the update command until completion (added in systemd 232) + // * --pipe, to collect stdout/stderr (added in systemd 235) + // * --collect, to clean up failed runs from memory (added in systemd 236) + // + // We need to check the version of systemd to figure out if those flags are + // available. + // + // The output will look like: + // + // systemd 255 (255.7-1-arch) + // +PAM +AUDIT ... other feature flags ... + systemdVerOut, err := exec.Command("systemd-run", "--version").Output() + if err != nil { + return defaultCmd + } + parts := strings.Fields(string(systemdVerOut)) + if len(parts) < 2 || parts[0] != "systemd" { + return defaultCmd + } + systemdVer, err := strconv.Atoi(parts[1]) + if err != nil { + return defaultCmd + } + if systemdVer >= 236 { + return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") + } else if systemdVer >= 235 { + return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") + } else if systemdVer >= 232 { + return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") + } else { + return exec.Command("systemd-run", cmdTS, "update", "--yes") + } +} + +func regularFileExists(path string) bool { + fi, err := os.Stat(path) + return err == nil && fi.Mode().IsRegular() +} + +// startAutoUpdate triggers an auto-update attempt. The actual update happens +// asynchronously. If another update is in progress, an error is returned. +func (e *extension) startAutoUpdate(logPrefix string) (retErr error) { + // Check if update was already started, and mark as started. + if !e.trySetC2NUpdateStarted() { + return errors.New("update already started") + } + defer func() { + // Clear the started flag if something failed. + if retErr != nil { + e.setC2NUpdateStarted(false) + } + }() + + cmdTS, err := findCmdTailscale() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + var ver struct { + Long string `json:"long"` + } + out, err := exec.Command(cmdTS, "version", "--json").Output() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + if err := json.Unmarshal(out, &ver); err != nil { + return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) + } + if ver.Long != version.Long() { + return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) + } + + cmd := tailscaleUpdateCmd(cmdTS) + buf := new(bytes.Buffer) + cmd.Stdout = buf + cmd.Stderr = buf + e.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start cmd/tailscale update: %w", err) + } + + go func() { + if err := cmd.Wait(); err != nil { + e.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) + } else { + e.logf("%s: update attempt complete", logPrefix) + } + e.setC2NUpdateStarted(false) + }() + return nil +} + +func (e *extension) stopOfflineAutoUpdate() { + e.mu.Lock() + defer e.mu.Unlock() + e.stopOfflineAutoUpdateLocked() +} + +func (e *extension) stopOfflineAutoUpdateLocked() { + if e.offlineAutoUpdateCancel == nil { + return + } + e.logf("offline auto-update: stopping update checks") + e.offlineAutoUpdateCancel() + e.offlineAutoUpdateCancel = nil +} + +// e.mu must be held +func (e *extension) maybeStartOfflineAutoUpdateLocked(prefs ipn.PrefsView) { + if !prefs.Valid() || !prefs.AutoUpdate().Apply.EqualBool(true) { + return + } + // AutoUpdate.Apply field in prefs can only be true for platforms that + // support auto-updates. But check it here again, just in case. + if !feature.CanAutoUpdate() { + return + } + // On macsys, auto-updates are managed by Sparkle. + if version.IsMacSysExt() { + return + } + + if e.offlineAutoUpdateCancel != nil { + // Already running. + return + } + ctx, cancel := context.WithCancel(context.Background()) + e.offlineAutoUpdateCancel = cancel + + e.logf("offline auto-update: starting update checks") + go e.offlineAutoUpdate(ctx) +} + +const offlineAutoUpdateCheckPeriod = time.Hour + +func (e *extension) offlineAutoUpdate(ctx context.Context) { + t := time.NewTicker(offlineAutoUpdateCheckPeriod) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + } + if err := e.startAutoUpdate("offline auto-update"); err != nil { + e.logf("offline auto-update: failed: %v", err) + } + } +} diff --git a/feature/condregister/maybe_clientupdate.go b/feature/condregister/maybe_clientupdate.go new file mode 100644 index 000000000..bc694f970 --- /dev/null +++ b/feature/condregister/maybe_clientupdate.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_clientupdate + +package condregister + +import _ "tailscale.com/feature/clientupdate" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c41764741..289536099 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, "cliconndiag": {"CLIConnDiag", "CLI connection error diagnostics", nil}, + "clientupdate": {"ClientUpdate", "Client auto-update support", nil}, "completion": {"Completion", "CLI shell completion", nil}, "dbus": {"DBus", "Linux DBus support", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, diff --git a/feature/hooks.go b/feature/hooks.go new file mode 100644 index 000000000..fc3971dda --- /dev/null +++ b/feature/hooks.go @@ -0,0 +1,17 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +// HookCanAutoUpdate is a hook for the clientupdate package +// to conditionally initialize. +var HookCanAutoUpdate Hook[func() bool] + +// CanAutoUpdate reports whether the current binary is built with auto-update +// support and, if so, whether the current platform supports it. +func CanAutoUpdate() bool { + if f, ok := HookCanAutoUpdate.GetOk(); ok { + return f() + } + return false +} diff --git a/ipn/ipnlocal/autoupdate.go b/ipn/ipnlocal/autoupdate.go deleted file mode 100644 index b7d217a10..000000000 --- a/ipn/ipnlocal/autoupdate.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux || windows - -package ipnlocal - -import ( - "context" - "time" - - "tailscale.com/clientupdate" - "tailscale.com/ipn" - "tailscale.com/version" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - if b.offlineAutoUpdateCancel != nil { - b.logf("offline auto-update: stopping update checks") - b.offlineAutoUpdateCancel() - b.offlineAutoUpdateCancel = nil - } -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - if !prefs.AutoUpdate().Apply.EqualBool(true) { - return - } - // AutoUpdate.Apply field in prefs can only be true for platforms that - // support auto-updates. But check it here again, just in case. - if !clientupdate.CanAutoUpdate() { - return - } - // On macsys, auto-updates are managed by Sparkle. - if version.IsMacSysExt() { - return - } - - if b.offlineAutoUpdateCancel != nil { - // Already running. - return - } - ctx, cancel := context.WithCancel(context.Background()) - b.offlineAutoUpdateCancel = cancel - - b.logf("offline auto-update: starting update checks") - go b.offlineAutoUpdate(ctx) -} - -const offlineAutoUpdateCheckPeriod = time.Hour - -func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) { - t := time.NewTicker(offlineAutoUpdateCheckPeriod) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - } - if err := b.startAutoUpdate("offline auto-update"); err != nil { - b.logf("offline auto-update: failed: %v", err) - } - } -} diff --git a/ipn/ipnlocal/autoupdate_disabled.go b/ipn/ipnlocal/autoupdate_disabled.go deleted file mode 100644 index 88ed68c95..000000000 --- a/ipn/ipnlocal/autoupdate_disabled.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !(linux || windows) - -package ipnlocal - -import ( - "tailscale.com/ipn" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - // Not supported on this platform. -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - // Not supported on this platform. -} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 2b48b19fa..cbc4cae78 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -5,23 +5,16 @@ package ipnlocal import ( "encoding/json" - "errors" "fmt" "io" "net/http" - "os" - "os/exec" "path" - "path/filepath" "reflect" - "runtime" "strconv" "strings" "time" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" - "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/posture" @@ -34,7 +27,6 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" - "tailscale.com/version/distro" ) // c2nHandlers maps an HTTP method and URI path (without query parameters) to @@ -60,10 +52,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // SSH req("/ssh/usernames"): handleC2NSSHUsernames, - // Auto-updates. - req("GET /update"): handleC2NUpdateGet, - req("POST /update"): handleC2NUpdatePost, - // Device posture. req("GET /posture/identity"): handleC2NPostureIdentityGet, @@ -337,50 +325,6 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } -func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /update received") - - res := b.newC2NUpdateResponse() - res.Started = b.c2nUpdateStarted() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: POST /update received") - res := b.newC2NUpdateResponse() - defer func() { - if res.Err != "" { - b.logf("c2n: POST /update failed: %s", res.Err) - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - }() - - if !res.Enabled { - res.Err = "not enabled" - return - } - if !res.Supported { - res.Err = "not supported" - return - } - - // Do not update if we have active inbound SSH connections. Control can set - // force=true query parameter to override this. - if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 { - res.Err = "not updating due to active SSH connections" - return - } - - if err := b.startAutoUpdate("c2n"); err != nil { - res.Err = err.Error() - return - } - res.Started = true -} - func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: GET /posture/identity received") @@ -423,137 +367,3 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) } - -func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { - // If NewUpdater does not return an error, we can update the installation. - // - // Note that we create the Updater solely to check for errors; we do not - // invoke it here. For this purpose, it is ok to pass it a zero Arguments. - prefs := b.Prefs().AutoUpdate() - return tailcfg.C2NUpdateResponse{ - Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true), - Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(), - } -} - -func (b *LocalBackend) c2nUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.c2nUpdateStatus.started -} - -func (b *LocalBackend) setC2NUpdateStarted(v bool) { - b.mu.Lock() - defer b.mu.Unlock() - b.c2nUpdateStatus.started = v -} - -func (b *LocalBackend) trySetC2NUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.c2nUpdateStatus.started { - return false - } - b.c2nUpdateStatus.started = true - return true -} - -// findCmdTailscale looks for the cmd/tailscale that corresponds to the -// currently running cmd/tailscaled. It's up to the caller to verify that the -// two match, but this function does its best to find the right one. Notably, it -// doesn't use $PATH for security reasons. -func findCmdTailscale() (string, error) { - self, err := os.Executable() - if err != nil { - return "", err - } - var ts string - switch runtime.GOOS { - case "linux": - if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { - ts = "/usr/bin/tailscale" - } - if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - switch distro.Get() { - case distro.QNAP: - // The volume under /share/ where qpkg are installed is not - // predictable. But the rest of the path is. - ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) - if err == nil && ok { - ts = filepath.Join(filepath.Dir(self), "tailscale") - } - case distro.Unraid: - if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { - ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" - } - } - case "windows": - ts = filepath.Join(filepath.Dir(self), "tailscale.exe") - case "freebsd", "openbsd": - if self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - default: - return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) - } - if ts != "" && regularFileExists(ts) { - return ts, nil - } - return "", errors.New("tailscale executable not found in expected place") -} - -func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { - defaultCmd := exec.Command(cmdTS, "update", "--yes") - if runtime.GOOS != "linux" { - return defaultCmd - } - if _, err := exec.LookPath("systemd-run"); err != nil { - return defaultCmd - } - - // When systemd-run is available, use it to run the update command. This - // creates a new temporary unit separate from the tailscaled unit. When - // tailscaled is restarted during the update, systemd won't kill this - // temporary update unit, which could cause unexpected breakage. - // - // We want to use a few optional flags: - // * --wait, to block the update command until completion (added in systemd 232) - // * --pipe, to collect stdout/stderr (added in systemd 235) - // * --collect, to clean up failed runs from memory (added in systemd 236) - // - // We need to check the version of systemd to figure out if those flags are - // available. - // - // The output will look like: - // - // systemd 255 (255.7-1-arch) - // +PAM +AUDIT ... other feature flags ... - systemdVerOut, err := exec.Command("systemd-run", "--version").Output() - if err != nil { - return defaultCmd - } - parts := strings.Fields(string(systemdVerOut)) - if len(parts) < 2 || parts[0] != "systemd" { - return defaultCmd - } - systemdVer, err := strconv.Atoi(parts[1]) - if err != nil { - return defaultCmd - } - if systemdVer >= 236 { - return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") - } else if systemdVer >= 235 { - return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") - } else if systemdVer >= 232 { - return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") - } else { - return exec.Command("systemd-run", cmdTS, "update", "--yes") - } -} - -func regularFileExists(path string) bool { - fi, err := os.Stat(path) - return err == nil && fi.Mode().IsRegular() -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a95aef0f2..72fc8808c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6,7 +6,6 @@ package ipnlocal import ( - "bytes" "cmp" "context" "crypto/sha256" @@ -25,7 +24,6 @@ import ( "net/netip" "net/url" "os" - "os/exec" "reflect" "runtime" "slices" @@ -40,7 +38,6 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/control/controlknobs" "tailscale.com/drive" @@ -302,22 +299,11 @@ type LocalBackend struct { notifyWatchers map[string]*watchSession // by session ID lastStatusTime time.Time // status.AsOf value of the last processed status update componentLogUntil map[string]componentLogState - // c2nUpdateStatus is the status of c2n-triggered client update. - c2nUpdateStatus updateStatus - currentUser ipnauth.Actor + currentUser ipnauth.Actor - selfUpdateProgress []ipnstate.UpdateProgress - lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. capForcedNetfilter string // TODO(nickkhyl): move to nodeBackend - // offlineAutoUpdateCancel stops offline auto-updates when called. It - // should be used via stopOfflineAutoUpdate and - // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are - // note running. - // - //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go - offlineAutoUpdateCancel func() // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig @@ -433,10 +419,6 @@ func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() } -type updateStatus struct { - started bool -} - type metrics struct { // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. // This informs the user of how many routes are being advertised by the local node, excluding exit routes. @@ -517,8 +499,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, - selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), - lastSelfUpdateState: ipnstate.UpdateFinished, captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), @@ -1127,7 +1107,6 @@ func (b *LocalBackend) Shutdown() { defer cancel() b.sockstatLogger.Shutdown(ctx) } - b.stopOfflineAutoUpdate() b.unregisterSysPolicyWatch() if cc != nil { @@ -3412,7 +3391,7 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { // can still manually enable auto-updates on this node. return } - if clientupdate.CanAutoUpdate() { + if buildfeatures.HasClientUpdate && feature.CanAutoUpdate() { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) @@ -4100,7 +4079,12 @@ func (b *LocalBackend) checkFunnelEnabledLocked(p *ipn.Prefs) error { } func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { - if p.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if !buildfeatures.HasClientUpdate { + if p.AutoUpdate.Apply.EqualBool(true) { + return errors.New("Auto-update support is disabled in this build") + } + } + if p.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { return errors.New("Auto-updates are not supported on this platform.") } return nil @@ -4552,14 +4536,6 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.resetAlwaysOnOverrideLocked() } - if newp.AutoUpdate.Apply.EqualBool(true) { - if b.state != ipn.Running { - b.maybeStartOfflineAutoUpdate(newp.View()) - } - } else { - b.stopOfflineAutoUpdate() - } - unlock.UnlockEarly() if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { @@ -5467,12 +5443,6 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.pauseOrResumeControlClientLocked() - if newState == ipn.Running { - b.stopOfflineAutoUpdate() - } else { - b.maybeStartOfflineAutoUpdate(prefs) - } - unlock.UnlockEarly() // prefs may change irrespective of state; WantRunning should be explicitly @@ -6611,6 +6581,15 @@ func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) return cc.DoNoiseRequest(req) } +// ActiveSSHConns returns the number of active SSH connections, +// or 0 if SSH is not linked into the binary or available on the platform. +func (b *LocalBackend) ActiveSSHConns() int { + if b.sshServer == nil { + return 0 + } + return b.sshServer.NumActiveConns() +} + func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { b.mu.Lock() defer b.mu.Unlock() @@ -6941,54 +6920,6 @@ func (b *LocalBackend) DebugBreakDERPConns() error { return b.MagicConn().DebugBreakDERPConns() } -func (b *LocalBackend) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = append(b.selfUpdateProgress, up) - b.lastSelfUpdateState = up.Status -} - -func (b *LocalBackend) clearSelfUpdateProgress() { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) - b.lastSelfUpdateState = ipnstate.UpdateFinished -} - -func (b *LocalBackend) GetSelfUpdateProgress() []ipnstate.UpdateProgress { - b.mu.Lock() - defer b.mu.Unlock() - res := make([]ipnstate.UpdateProgress, len(b.selfUpdateProgress)) - copy(res, b.selfUpdateProgress) - return res -} - -func (b *LocalBackend) DoSelfUpdate() { - b.mu.Lock() - updateState := b.lastSelfUpdateState - b.mu.Unlock() - // don't start an update if one is already in progress - if updateState == ipnstate.UpdateInProgress { - return - } - b.clearSelfUpdateProgress() - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) - up, err := clientupdate.NewUpdater(clientupdate.Arguments{ - Logf: func(format string, args ...any) { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) - }, - }) - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } - err = up.Update() - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } else { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) - } -} - // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. func (b *LocalBackend) ObserveDNSResponse(res []byte) error { @@ -7603,58 +7534,6 @@ func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.Stable return true // no policy configured; allow all exit nodes } -// startAutoUpdate triggers an auto-update attempt. The actual update happens -// asynchronously. If another update is in progress, an error is returned. -func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { - // Check if update was already started, and mark as started. - if !b.trySetC2NUpdateStarted() { - return errors.New("update already started") - } - defer func() { - // Clear the started flag if something failed. - if retErr != nil { - b.setC2NUpdateStarted(false) - } - }() - - cmdTS, err := findCmdTailscale() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - var ver struct { - Long string `json:"long"` - } - out, err := exec.Command(cmdTS, "version", "--json").Output() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - if err := json.Unmarshal(out, &ver); err != nil { - return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) - } - if ver.Long != version.Long() { - return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) - } - - cmd := tailscaleUpdateCmd(cmdTS) - buf := new(bytes.Buffer) - cmd.Stdout = buf - cmd.Stderr = buf - b.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) - if err := cmd.Start(); err != nil { - return fmt.Errorf("failed to start cmd/tailscale update: %w", err) - } - - go func() { - if err := cmd.Wait(); err != nil { - b.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) - } else { - b.logf("%s: update attempt complete", logPrefix) - } - b.setC2NUpdateStarted(false) - }() - return nil -} - // srcIPHasCapForFilter is called by the packet filter when evaluating firewall // rules that require a source IP to have a certain node capability. // diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index fd78c3418..70923efde 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -30,10 +30,10 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/appc/appctest" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + "tailscale.com/feature" _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" @@ -3710,7 +3710,7 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { // On platforms that don't support auto-update we can never // transition to auto-updates being enabled. The value should // remain unchanged after onTailnetDefaultAutoUpdate. - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { want = tt.before } if got := b.pm.CurrentPrefs().AutoUpdate().Apply; got != want { @@ -5455,7 +5455,7 @@ func TestEnableAutoUpdates(t *testing.T) { }) // Enabling may fail, depending on which environment we are running this // test in. - wantErr := !clientupdate.CanAutoUpdate() + wantErr := !feature.CanAutoUpdate() gotErr := err != nil if gotErr != wantErr { t.Fatalf("enabling auto-updates: got error: %v (%v); want error: %v", gotErr, err, wantErr) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 6e1db4ff2..67e71aa70 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -13,8 +13,8 @@ import ( "slices" "strings" - "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -674,7 +674,7 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error // cause any EditPrefs calls to fail (other than disabling auto-updates). // // Reset AutoUpdate.Apply if we detect such invalid prefs. - if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { savedPrefs.AutoUpdate.Apply.Clear() } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 8dce388bc..60c92ff8d 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -12,7 +12,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "tailscale.com/clientupdate" + _ "tailscale.com/clientupdate" // for feature registration side effects + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -464,7 +465,7 @@ func TestProfileManagement(t *testing.T) { wantCurProfile = "user@2.example.com" checkProfiles(t) - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Logf("Save an invalid AutoUpdate pref value") prefs := pm.CurrentPrefs().AsStruct() prefs.AutoUpdate.Apply.Set(true) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index caebbe0cc..ab556702d 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -27,8 +27,8 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -120,8 +120,6 @@ var handler = map[string]LocalAPIHandler{ "status": (*Handler).serveStatus, "suggest-exit-node": (*Handler).serveSuggestExitNode, "update/check": (*Handler).serveUpdateCheck, - "update/install": (*Handler).serveUpdateInstall, - "update/progress": (*Handler).serveUpdateProgress, "upload-client-metrics": (*Handler).serveUploadClientMetrics, "usermetrics": (*Handler).serveUserMetrics, "watch-ipn-bus": (*Handler).serveWatchIPNBus, @@ -1897,7 +1895,7 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { return } - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { // if we don't support auto-update, just say that we're up to date json.NewEncoder(w).Encode(tailcfg.ClientVersion{RunningLatest: true}) return @@ -1915,37 +1913,6 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(cv) } -// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale -// self-update. A successful response does not indicate whether the update -// succeeded, only that the request was accepted. Clients should use -// serveUpdateProgress after pinging this endpoint to check how the update is -// going. -func (h *Handler) serveUpdateInstall(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - w.WriteHeader(http.StatusAccepted) - - go h.b.DoSelfUpdate() -} - -// serveUpdateProgress returns the status of an in-progress Tailscale self-update. -// This is provided as a slice of ipnstate.UpdateProgress structs with various -// log messages in order from oldest to newest. If an update is not in progress, -// the returned slice will be empty. -func (h *Handler) serveUpdateProgress(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) - return - } - - ups := h.b.GetSelfUpdateProgress() - - json.NewEncoder(w).Encode(ups) -} - // serveDNSOSConfig serves the current system DNS configuration as a JSON object, if // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 2e8ca0f0a..ba509e268 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -9,7 +9,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com @@ -28,8 +28,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + DI github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -124,8 +124,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -204,7 +202,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -232,7 +230,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext @@ -251,12 +249,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ @@ -265,7 +263,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -291,8 +289,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -329,7 +327,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ @@ -364,9 +362,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ compress/gzip from golang.org/x/net/http2+ @@ -439,7 +436,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/x509 from crypto/tls+ DI crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DWI database/sql/driver from github.com/google/uuid + DI database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -452,11 +449,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/json from expvar+ encoding/pem from crypto/tls+ encoding/xml from github.com/tailscale/goupnp+ - errors from archive/tar+ + errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/util/testenv - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -473,7 +470,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -509,14 +506,14 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -537,28 +534,28 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from github.com/godbus/dbus/v5+ - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof LDW text/template from html/template LDW text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index fa148abbe..c274c31a9 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -33,8 +33,9 @@ import ( "go4.org/mem" "tailscale.com/client/local" "tailscale.com/client/tailscale" - "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/feature" + _ "tailscale.com/feature/clientupdate" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" @@ -1125,7 +1126,7 @@ func TestLogoutRemovesAllPeers(t *testing.T) { } func TestAutoUpdateDefaults(t *testing.T) { - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Skip("auto-updates not supported on this platform") } tstest.Shard(t) From ba76578447a033f0b8033a90405cf9e0643ff12a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 15:50:17 -0700 Subject: [PATCH 1435/1708] ipn/ipnlocal, feature/posture: pull posture out into a modular feature Updates #12614 Change-Id: I9d08a1330b9c55e1a23e7979a707e11d8e090d79 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 3 - cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 4 +- .../buildfeatures/feature_posture_disabled.go | 13 ++ .../buildfeatures/feature_posture_enabled.go | 13 ++ feature/condregister/maybe_posture.go | 8 ++ feature/featuretags/featuretags.go | 1 + feature/posture/posture.go | 114 ++++++++++++++++++ ipn/ipnlocal/c2n.go | 49 -------- ipn/ipnlocal/local.go | 29 +---- tsnet/depaware.txt | 4 +- 12 files changed, 157 insertions(+), 88 deletions(-) create mode 100644 feature/buildfeatures/feature_posture_disabled.go create mode 100644 feature/buildfeatures/feature_posture_enabled.go create mode 100644 feature/condregister/maybe_posture.go create mode 100644 feature/posture/posture.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index a85f5731b..1fd3c7630 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -17,7 +17,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/distribution/reference from tailscale.com/cmd/k8s-operator github.com/emicklei/go-restful/v3 from k8s.io/kube-openapi/pkg/common github.com/emicklei/go-restful/v3/log from github.com/emicklei/go-restful/v3 @@ -784,7 +783,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ @@ -1099,7 +1097,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/godbus/dbus/v5+ iter from go/ast+ log from expvar+ log/internal from log+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 42d8f9181..595296229 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,6 +1,5 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) - github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart @@ -116,7 +115,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/cmd/tailscaled+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ tailscale.com/syncs from tailscale.com/cmd/tailscaled+ @@ -374,7 +372,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios iter from bytes+ log from expvar+ log/internal from log diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a3bac20aa..a5ae214a0 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -282,6 +282,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/posture from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ @@ -364,7 +365,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/feature/portlist - tailscale.com/posture from tailscale.com/ipn/ipnlocal + tailscale.com/posture from tailscale.com/feature/posture tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 8a78af493..b6e794f8c 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -14,7 +14,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -212,7 +211,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ @@ -515,7 +513,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log diff --git a/feature/buildfeatures/feature_posture_disabled.go b/feature/buildfeatures/feature_posture_disabled.go new file mode 100644 index 000000000..a78b1a957 --- /dev/null +++ b/feature/buildfeatures/feature_posture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = false diff --git a/feature/buildfeatures/feature_posture_enabled.go b/feature/buildfeatures/feature_posture_enabled.go new file mode 100644 index 000000000..dcd9595f9 --- /dev/null +++ b/feature/buildfeatures/feature_posture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = true diff --git a/feature/condregister/maybe_posture.go b/feature/condregister/maybe_posture.go new file mode 100644 index 000000000..6f14c2713 --- /dev/null +++ b/feature/condregister/maybe_posture.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_posture + +package condregister + +import _ "tailscale.com/feature/posture" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 289536099..22b93e0a1 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -139,6 +139,7 @@ var Features = map[FeatureTag]FeatureMeta{ }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "posture": {"Posture", "Device posture checking support", nil}, "netlog": { Sym: "NetLog", Desc: "Network flow logging support", diff --git a/feature/posture/posture.go b/feature/posture/posture.go new file mode 100644 index 000000000..8e1945d7d --- /dev/null +++ b/feature/posture/posture.go @@ -0,0 +1,114 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package posture registers support for device posture checking, +// reporting machine-specific information to the control plane +// when enabled by the user and tailnet. +package posture + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/posture" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +func init() { + ipnext.RegisterExtension("posture", newExtension) + ipnlocal.RegisterC2N("GET /posture/identity", handleC2NPostureIdentityGet) +} + +func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { + e := &extension{ + logf: logger.WithPrefix(logf, "posture: "), + } + return e, nil +} + +type extension struct { + logf logger.Logf + + // lastKnownHardwareAddrs is a list of the previous known hardware addrs. + // Previously known hwaddrs are kept to work around an issue on Windows + // where all addresses might disappear. + // http://go/corp/25168 + lastKnownHardwareAddrs syncs.AtomicValue[[]string] +} + +func (e *extension) Name() string { return "posture" } +func (e *extension) Init(h ipnext.Host) error { return nil } +func (e *extension) Shutdown() error { return nil } + +func handleC2NPostureIdentityGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "posture extension not available", http.StatusInternalServerError) + return + } + e.logf("c2n: GET /posture/identity received") + + res := tailcfg.C2NPostureIdentityResponse{} + + // Only collect posture identity if enabled on the client, + // this will first check syspolicy, MDM settings like Registry + // on Windows or defaults on macOS. If they are not set, it falls + // back to the cli-flag, `--posture-checking`. + choice, err := b.PolicyClient().GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) + if err != nil { + e.logf( + "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", + b.Prefs().PostureChecking(), + err, + ) + } + + if choice.ShouldEnable(b.Prefs().PostureChecking()) { + res.SerialNumbers, err = posture.GetSerialNumbers(b.PolicyClient(), e.logf) + if err != nil { + e.logf("c2n: GetSerialNumbers returned error: %v", err) + } + + // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release + // and looks good in client metrics, remove this parameter and always report MAC + // addresses. + if r.FormValue("hwaddrs") == "true" { + res.IfaceHardwareAddrs, err = e.getHardwareAddrs() + if err != nil { + e.logf("c2n: GetHardwareAddrs returned error: %v", err) + } + } + } else { + res.PostureDisabled = true + } + + e.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +// getHardwareAddrs returns the hardware addresses for the machine. If the list +// of hardware addresses is empty, it will return the previously known hardware +// addresses. Both the current, and previously known hardware addresses might be +// empty. +func (e *extension) getHardwareAddrs() ([]string, error) { + addrs, err := posture.GetHardwareAddrs() + if err != nil { + return nil, err + } + + if len(addrs) == 0 { + e.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") + return e.lastKnownHardwareAddrs.Load(), nil + } + + e.lastKnownHardwareAddrs.Store(addrs) + return addrs, nil +} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index cbc4cae78..38c65fee8 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -17,15 +17,12 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/ipn" "tailscale.com/net/sockstats" - "tailscale.com/posture" "tailscale.com/tailcfg" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" "tailscale.com/util/httpm" "tailscale.com/util/set" - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" ) @@ -52,9 +49,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // SSH req("/ssh/usernames"): handleC2NSSHUsernames, - // Device posture. - req("GET /posture/identity"): handleC2NPostureIdentityGet, - // App Connectors. req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet, @@ -324,46 +318,3 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } - -func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /posture/identity received") - - res := tailcfg.C2NPostureIdentityResponse{} - - // Only collect posture identity if enabled on the client, - // this will first check syspolicy, MDM settings like Registry - // on Windows or defaults on macOS. If they are not set, it falls - // back to the cli-flag, `--posture-checking`. - choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) - if err != nil { - b.logf( - "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", - b.Prefs().PostureChecking(), - err, - ) - } - - if choice.ShouldEnable(b.Prefs().PostureChecking()) { - res.SerialNumbers, err = posture.GetSerialNumbers(b.polc, b.logf) - if err != nil { - b.logf("c2n: GetSerialNumbers returned error: %v", err) - } - - // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release - // and looks good in client metrics, remove this parameter and always report MAC - // addresses. - if r.FormValue("hwaddrs") == "true" { - res.IfaceHardwareAddrs, err = b.getHardwareAddrs() - if err != nil { - b.logf("c2n: GetHardwareAddrs returned error: %v", err) - } - } - } else { - res.PostureDisabled = true - } - - b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 72fc8808c..c9fff50c3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -68,7 +68,6 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" - "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tsd" @@ -344,12 +343,6 @@ type LocalBackend struct { // notified about. lastNotifiedDriveShares *views.SliceView[*drive.Share, drive.ShareView] - // lastKnownHardwareAddrs is a list of the previous known hardware addrs. - // Previously known hwaddrs are kept to work around an issue on Windows - // where all addresses might disappear. - // http://go/corp/25168 - lastKnownHardwareAddrs syncs.AtomicValue[[]string] - // lastSuggestedExitNode stores the last suggested exit node suggestion to // avoid unnecessary churn between multiple equally-good options. lastSuggestedExitNode tailcfg.StableNodeID @@ -419,6 +412,9 @@ func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() } +// PolicyClient returns the policy client for the backend. +func (b *LocalBackend) PolicyClient() policyclient.Client { return b.polc } + type metrics struct { // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. // This informs the user of how many routes are being advertised by the local node, excluding exit routes. @@ -6757,25 +6753,6 @@ func (b *LocalBackend) resetDialPlan() { } } -// getHardwareAddrs returns the hardware addresses for the machine. If the list -// of hardware addresses is empty, it will return the previously known hardware -// addresses. Both the current, and previously known hardware addresses might be -// empty. -func (b *LocalBackend) getHardwareAddrs() ([]string, error) { - addrs, err := posture.GetHardwareAddrs() - if err != nil { - return nil, err - } - - if len(addrs) == 0 { - b.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") - return b.lastKnownHardwareAddrs.Load(), nil - } - - b.lastKnownHardwareAddrs.Store(addrs) - return addrs, nil -} - // resetForProfileChangeLockedOnEntry resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ba509e268..0644a0692 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -14,7 +14,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -208,7 +207,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ @@ -508,7 +506,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log From 9aa16bf97b977e10b83900473bfd2dd8c3f043e8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 16:28:06 -0700 Subject: [PATCH 1436/1708] feature/featuretags, Makefile: fix bug with CLI build tag and depaware, add variant When I added dependency support to featuretag, I broke the handling of the non-omit build tags (as used by the "box" support for bundling the CLI into tailscaled). That then affected depaware. The depaware-minbox.txt this whole time recently has not included the CLI. So fix that, and also add a new depaware variant that's only the daemon, without the CLI. Updates #12614 Updates #17139 Change-Id: I4a4591942aa8c66ad8e3242052e3d9baa42902ca Signed-off-by: Brad Fitzpatrick --- Makefile | 4 + cmd/tailscaled/depaware-min.txt | 424 ++++++++++++++++++++++++ cmd/tailscaled/depaware-minbox.txt | 55 ++- feature/featuretags/featuretags.go | 3 - feature/featuretags/featuretags_test.go | 4 + 5 files changed, 477 insertions(+), 13 deletions(-) create mode 100644 cmd/tailscaled/depaware-min.txt diff --git a/Makefile b/Makefile index 95959fcf0..05b984348 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,8 @@ updatedeps: ## Update depaware deps tailscale.com/tsnet PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" @@ -44,6 +46,8 @@ depaware: ## Run depaware checks tailscale.com/tsnet PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt new file mode 100644 index 000000000..ee66d7700 --- /dev/null +++ b/cmd/tailscaled/depaware-min.txt @@ -0,0 +1,424 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mdlayher/genetlink from tailscale.com/net/tstun + 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/tailscale/hujson from tailscale.com/ipn/conffile + github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + 💣 go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/metrics from tailscale.com/health+ + tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/proxymap from tailscale.com/tsd + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogtype from tailscale.com/net/connstats + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/dirwalk from tailscale.com/metrics + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/must from tailscale.com/logpolicy+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ + golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/proxy from tailscale.com/net/netns + golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from tailscale.com/derp + bufio from compress/flate+ + bytes from bufio+ + cmp from encoding/json+ + compress/flate from compress/gzip + compress/gzip from golang.org/x/net/http2+ + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from golang.org/x/net/http2+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from expvar+ + encoding/pem from crypto/tls+ + errors from bufio+ + expvar from tailscale.com/health+ + flag from tailscale.com/cmd/tailscaled+ + fmt from compress/flate+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from net/http/pprof+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from crypto/internal/fips140deps/godebug+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall from internal/runtime/cgroup+ + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime+ + internal/unsafeheader from internal/reflectlite+ + io from bufio+ + io/fs from crypto/x509+ + iter from bytes+ + log from expvar+ + log/internal from log + maps from crypto/x509+ + math from compress/flate+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from expvar+ + net/http/httptrace from golang.org/x/net/http2+ + net/http/internal from net/http + net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http + net/http/pprof from tailscale.com/cmd/tailscaled+ + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from tailscale.com/hostinfo+ + os/signal from tailscale.com/cmd/tailscaled + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + regexp from internal/profile+ + regexp/syntax from regexp + runtime from crypto/internal/fips140+ + runtime/debug from github.com/klauspost/compress/zstd+ + runtime/pprof from net/http/pprof+ + runtime/trace from net/http/pprof + slices from crypto/tls+ + sort from compress/flate+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ + sync/atomic from context+ + syscall from crypto/internal/sysrand+ + text/tabwriter from runtime/pprof + time from compress/gzip+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 595296229..86e75660a 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,5 +1,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart @@ -10,8 +12,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd @@ -19,11 +23,19 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli + github.com/mattn/go-isatty from github.com/mattn/go-colorable+ github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ + github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ + github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli + github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ + github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -34,16 +46,24 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli + tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscaled + tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli + tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/controlclient+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ @@ -56,11 +76,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/cmd/tailscaled+ tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled - tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ @@ -72,13 +94,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/store from tailscale.com/cmd/tailscaled tailscale.com/ipn/store/mem from tailscale.com/ipn/store tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/licenses from tailscale.com/cmd/tailscale/cli tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ - tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/ace from tailscale.com/control/controlhttp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ @@ -113,6 +136,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/cmd/tailscaled+ tailscale.com/proxymap from tailscale.com/tsd @@ -121,8 +145,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/control/controlclient+ - tailscale.com/tsconst from tailscale.com/net/netns + tailscale.com/tsconst from tailscale.com/net/netns+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -154,6 +179,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -172,6 +198,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/osdiag from tailscale.com/ipn/localapi tailscale.com/util/osshare from tailscale.com/cmd/tailscaled tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ @@ -241,11 +268,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp + archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ - compress/flate from compress/gzip + compress/flate from compress/gzip+ compress/gzip from golang.org/x/net/http2+ + compress/zlib from image/png container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -325,9 +354,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ + hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from net/http/pprof+ + image from github.com/skip2/go-qrcode+ + image/color from github.com/skip2/go-qrcode+ + image/png from github.com/skip2/go-qrcode internal/abi from hash/maphash+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -372,6 +405,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ + io/ioutil from github.com/skip2/go-qrcode iter from bytes+ log from expvar+ log/internal from log @@ -387,8 +421,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net from crypto/tls+ net/http from expvar+ net/http/httptrace from golang.org/x/net/http2+ - net/http/internal from net/http - net/http/internal/ascii from net/http + net/http/httputil from tailscale.com/cmd/tailscale/cli + net/http/internal from net/http+ + net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from crypto/x509+ @@ -396,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from tailscale.com/hostinfo+ - os/signal from tailscale.com/cmd/tailscaled + os/signal from tailscale.com/cmd/tailscaled+ os/user from tailscale.com/ipn/ipnauth+ path from io/fs+ path/filepath from crypto/x509+ @@ -414,7 +449,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ - text/tabwriter from runtime/pprof + text/tabwriter from runtime/pprof+ time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 22b93e0a1..709d96edd 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -40,9 +40,6 @@ func Requires(ft FeatureTag) set.Set[FeatureTag] { s := set.Set[FeatureTag]{} var add func(FeatureTag) add = func(ft FeatureTag) { - if !ft.IsOmittable() { - return - } s.Add(ft) for _, dep := range Features[ft].Deps { add(dep) diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go index b1524ce4f..893ab0e6a 100644 --- a/feature/featuretags/featuretags_test.go +++ b/feature/featuretags/featuretags_test.go @@ -36,6 +36,10 @@ func TestRequires(t *testing.T) { in: "drive", want: setOf("drive"), }, + { + in: "cli", + want: setOf("cli"), + }, { in: "serve", want: setOf("serve", "netstack"), From a45473c4c58832073761c4619d1c912e2a49c7fa Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Tue, 30 Sep 2025 05:33:50 -0400 Subject: [PATCH 1437/1708] cmd/k8s-operator: add DNS policy and config support to ProxyClass (#16887) DNS configuration support to ProxyClass, allowing users to customize DNS resolution for Tailscale proxy pods. Fixes #16886 Signed-off-by: Raj Singh --- .../crds/tailscale.com_proxyclasses.yaml | 56 +++++++++++++++++++ .../deploy/manifests/operator.yaml | 56 +++++++++++++++++++ cmd/k8s-operator/sts.go | 6 ++ cmd/k8s-operator/sts_test.go | 13 +++++ k8s-operator/api.md | 2 + .../apis/v1alpha1/types_proxyclass.go | 11 ++++ .../apis/v1alpha1/zz_generated.deepcopy.go | 10 ++++ 7 files changed, 154 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index cb9e0b991..516e75f48 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1046,6 +1046,62 @@ spec: type: object additionalProperties: type: string + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + type: object + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + type: array + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + type: object + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + type: string + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None imagePullSecrets: description: |- Proxy Pod's image pull Secrets. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 9c19554aa..520d17eae 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1574,6 +1574,62 @@ spec: Annotations must be valid Kubernetes annotations. https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set type: object + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None + type: string imagePullSecrets: description: |- Proxy Pod's image pull Secrets. diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 80c9ca806..6300341b7 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -906,6 +906,12 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations ss.Spec.Template.Spec.PriorityClassName = wantsPod.PriorityClassName ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints + if wantsPod.DNSPolicy != nil { + ss.Spec.Template.Spec.DNSPolicy = *wantsPod.DNSPolicy + } + if wantsPod.DNSConfig != nil { + ss.Spec.Template.Spec.DNSConfig = wantsPod.DNSConfig + } // Update containers. updateContainer := func(overlay *tsapi.Container, base corev1.Container) corev1.Container { diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index e2cb2962f..ea28e77a1 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -87,6 +87,15 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, }, }, + DNSPolicy: ptr.To(corev1.DNSClusterFirstWithHostNet), + DNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"1.1.1.1", "8.8.8.8"}, + Searches: []string{"example.com", "test.local"}, + Options: []corev1.PodDNSConfigOption{ + {Name: "ndots", Value: ptr.To("2")}, + {Name: "edns0"}, + }, + }, TailscaleContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -200,6 +209,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.InitContainers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -239,6 +250,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index b1c56c068..d75a21e37 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -537,6 +537,8 @@ _Appears in:_ | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
                    By default Tailscale Kubernetes operator does not apply any
                    tolerations.
                    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
                    By default Tailscale Kubernetes operator does not apply any topology spread constraints.
                    https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | | `priorityClassName` _string_ | PriorityClassName for the proxy Pod.
                    By default Tailscale Kubernetes operator does not apply any priority class.
                    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#dnspolicy-v1-core)_ | DNSPolicy defines how DNS will be configured for the proxy Pod.
                    By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default).
                    https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | Enum: [ClusterFirstWithHostNet ClusterFirst Default None]
                    | +| `dnsConfig` _[PodDNSConfig](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#poddnsconfig-v1-core)_ | DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy.
                    When DNSPolicy is set to "None", DNSConfig must be specified.
                    https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | | | #### PortRange diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index ea4e6a27c..4026f9084 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -303,6 +303,17 @@ type Pod struct { // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling // +optional PriorityClassName string `json:"priorityClassName,omitempty"` + // DNSPolicy defines how DNS will be configured for the proxy Pod. + // By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + // +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None + // +optional + DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + // DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + // When DNSPolicy is set to "None", DNSConfig must be specified. + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + // +optional + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` } // +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 3fd64c28e..5684fd5f8 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -574,6 +574,16 @@ func (in *Pod) DeepCopyInto(out *Pod) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(corev1.DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(corev1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. From bcd79b161acbf90dfcfe71cbde847a320a41b7fe Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 22:10:28 -0700 Subject: [PATCH 1438/1708] feature/featuretags: add option to turn off DNS Saves 328 KB (2.5%) off the minimal binary. For IoT devices that don't need MagicDNS (e.g. they don't make outbound connections), this provides a knob to disable all the DNS functionality. Rather than a massive refactor today, this uses constant false values as a deadcode sledgehammer, guided by shotizam to find the largest DNS functions which survived deadcode. A future refactor could make it so that the net/dns/resolver and publicdns packages don't even show up in the import graph (along with their imports) but really it's already pretty good looking with just these consts, so it's not at the top of my list to refactor it more soon. Also do the same in a few places with the ACME (cert) functionality, as I saw those while searching for DNS stuff. Updates #12614 Change-Id: I8e459f595c2fde68ca16503ff61c8ab339871f97 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 4 +++ feature/buildfeatures/feature_dns_disabled.go | 13 +++++++++ feature/buildfeatures/feature_dns_enabled.go | 13 +++++++++ feature/feature.go | 7 ++++- feature/featuretags/featuretags.go | 4 +++ ipn/ipnlocal/local.go | 9 ++++++ ipn/ipnlocal/node_backend.go | 4 +++ ipn/ipnlocal/peerapi.go | 10 ++++++- ipn/localapi/localapi.go | 8 +++++ net/dns/manager.go | 29 ++++++++++++++++++- net/dns/manager_linux.go | 3 +- net/dns/osconfig.go | 5 ++++ net/dns/publicdns/publicdns.go | 5 ++++ net/dns/resolver/debug.go | 4 +++ net/dns/resolver/forwarder.go | 7 +++++ net/dns/resolver/tsdns.go | 23 +++++++++++++++ 16 files changed, 144 insertions(+), 4 deletions(-) create mode 100644 feature/buildfeatures/feature_dns_disabled.go create mode 100644 feature/buildfeatures/feature_dns_enabled.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 6d18e306f..31f41eac8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -30,6 +30,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -1580,6 +1581,9 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er // SetDNS sends the SetDNSRequest request to the control plane server, // requesting a DNS record be created or updated. func (c *Direct) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) (err error) { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } metricSetDNS.Add(1) defer func() { if err != nil { diff --git a/feature/buildfeatures/feature_dns_disabled.go b/feature/buildfeatures/feature_dns_disabled.go new file mode 100644 index 000000000..30d7379cb --- /dev/null +++ b/feature/buildfeatures/feature_dns_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = false diff --git a/feature/buildfeatures/feature_dns_enabled.go b/feature/buildfeatures/feature_dns_enabled.go new file mode 100644 index 000000000..962f2596b --- /dev/null +++ b/feature/buildfeatures/feature_dns_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = true diff --git a/feature/feature.go b/feature/feature.go index 5976d7f5a..70f05d192 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -4,7 +4,12 @@ // Package feature tracks which features are linked into the binary. package feature -import "reflect" +import ( + "errors" + "reflect" +) + +var ErrUnavailable = errors.New("feature not included in this build") var in = map[string]bool{} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 709d96edd..5c5352657 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -137,6 +137,10 @@ var Features = map[FeatureTag]FeatureMeta{ "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "posture": {"Posture", "Device posture checking support", nil}, + "dns": { + Sym: "DNS", + Desc: "MagicDNS and system DNS configuration support", + }, "netlog": { Sym: "NetLog", Desc: "Network flow logging support", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c9fff50c3..3b55fd324 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -729,6 +729,9 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim // GetDNSOSConfig returns the base OS DNS configuration, as seen by the DNS manager. func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return dns.OSConfig{}, errors.New("DNS manager not available") @@ -740,6 +743,9 @@ func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { // the raw DNS response and the resolvers that are were able to handle the query (the internal forwarder // may race multiple resolvers). func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return nil, nil, errors.New("DNS manager not available") @@ -6189,6 +6195,9 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK // This is the low-level interface. Other layers will provide more // friendly options to get HTTPS certs. func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } req := &tailcfg.SetDNSRequest{ Version: 1, // TODO(bradfitz,maisem): use tailcfg.CurrentCapabilityVersion when using the Noise transport Type: "TXT", diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index a6e4b51f1..b1ce9e07c 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -12,6 +12,7 @@ import ( "sync/atomic" "go4.org/netipx" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/net/tsaddr" @@ -630,6 +631,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. if nm == nil { return nil } + if !buildfeatures.HasDNS { + return &dns.Config{} + } // If the current node's key is expired, then we don't program any DNS // configuration into the operating system. This ensures that if the diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 9d2b49a38..fb0d80d18 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -26,6 +26,7 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/net/netaddr" @@ -636,6 +637,10 @@ func (h *peerAPIHandler) handleServeMetrics(w http.ResponseWriter, r *http.Reque } func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) return @@ -649,6 +654,9 @@ func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Reques } func (h *peerAPIHandler) replyToDNSQueries() bool { + if !buildfeatures.HasDNS { + return false + } if h.isSelf { // If the peer is owned by the same user, just allow it // without further checks. @@ -700,7 +708,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool { // handleDNSQuery implements a DoH server (RFC 8484) over the peerapi. // It's not over HTTPS as the spec dictates, but rather HTTP-over-WireGuard. func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) { - if h.ps.resolver == nil { + if !buildfeatures.HasDNS || h.ps.resolver == nil { http.Error(w, "DNS not wired up", http.StatusNotImplemented) return } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ab556702d..404516942 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1916,6 +1916,10 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { // serveDNSOSConfig serves the current system DNS configuration as a JSON object, if // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return @@ -1959,6 +1963,10 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return diff --git a/net/dns/manager.go b/net/dns/manager.go index 4a5c4925c..edf156ece 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -20,6 +20,7 @@ import ( "time" "tailscale.com/control/controlknobs" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" @@ -71,6 +72,9 @@ type Manager struct { // // knobs may be nil. func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string) *Manager { + if !buildfeatures.HasDNS { + return nil + } if dialer == nil { panic("nil Dialer") } @@ -97,7 +101,12 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, } // Resolver returns the Manager's DNS Resolver. -func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } +func (m *Manager) Resolver() *resolver.Resolver { + if !buildfeatures.HasDNS { + return nil + } + return m.resolver +} // RecompileDNSConfig recompiles the last attempted DNS configuration, which has // the side effect of re-querying the OS's interface nameservers. This should be used @@ -111,6 +120,9 @@ func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } // // It returns [ErrNoDNSConfig] if [Manager.Set] has never been called. func (m *Manager) RecompileDNSConfig() error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() if m.config != nil { @@ -120,6 +132,9 @@ func (m *Manager) RecompileDNSConfig() error { } func (m *Manager) Set(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() return m.setLocked(cfg) @@ -127,6 +142,9 @@ func (m *Manager) Set(cfg Config) error { // GetBaseConfig returns the current base OS DNS configuration as provided by the OSConfigurator. func (m *Manager) GetBaseConfig() (OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } return m.os.GetBaseConfig() } @@ -559,6 +577,9 @@ func (m *Manager) HandleTCPConn(conn net.Conn, srcAddr netip.AddrPort) { } func (m *Manager) Down() error { + if !buildfeatures.HasDNS { + return nil + } m.ctxCancel() if err := m.os.Close(); err != nil { return err @@ -568,6 +589,9 @@ func (m *Manager) Down() error { } func (m *Manager) FlushCaches() error { + if !buildfeatures.HasDNS { + return nil + } return flushCaches() } @@ -577,6 +601,9 @@ func (m *Manager) FlushCaches() error { // // health must not be nil func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { + if !buildfeatures.HasDNS { + return + } oscfg, err := NewOSConfigurator(logf, health, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index b2f8197ae..4304df261 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -16,6 +16,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/types/logger" @@ -63,7 +64,7 @@ var ( // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { - if distro.Get() == distro.JetKVM { + if !buildfeatures.HasDNS || distro.Get() == distro.JetKVM { return NewNoopManager() } diff --git a/net/dns/osconfig.go b/net/dns/osconfig.go index 842c5ac60..af4c0f01f 100644 --- a/net/dns/osconfig.go +++ b/net/dns/osconfig.go @@ -11,6 +11,7 @@ import ( "slices" "strings" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/dnsname" ) @@ -158,6 +159,10 @@ func (a OSConfig) Equal(b OSConfig) bool { // Fixes https://github.com/tailscale/tailscale/issues/5669 func (a OSConfig) Format(f fmt.State, verb rune) { logger.ArgWriter(func(w *bufio.Writer) { + if !buildfeatures.HasDNS { + w.WriteString(`{DNS-unlinked}`) + return + } w.WriteString(`{Nameservers:[`) for i, ns := range a.Nameservers { if i != 0 { diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index 0dbd3ab82..b8a7f8809 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -17,6 +17,8 @@ import ( "strconv" "strings" "sync" + + "tailscale.com/feature/buildfeatures" ) // dohOfIP maps from public DNS IPs to their DoH base URL. @@ -163,6 +165,9 @@ const ( // populate is called once to initialize the knownDoH and dohIPsOfBase maps. func populate() { + if !buildfeatures.HasDNS { + return + } // Cloudflare // https://developers.cloudflare.com/1.1.1.1/ip-addresses/ addDoH("1.1.1.1", "https://cloudflare-dns.com/dns-query") diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index da195d49d..0f9b106bb 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -12,10 +12,14 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" ) func init() { + if !buildfeatures.HasDNS { + return + } health.RegisterDebugHandler("dnsfwd", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { n, _ := strconv.Atoi(r.FormValue("n")) if n <= 0 { diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 105229fb8..a7a8932e8 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -27,6 +27,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/publicdns" "tailscale.com/net/dnscache" @@ -249,6 +250,9 @@ type forwarder struct { } func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { + if !buildfeatures.HasDNS { + return nil + } if netMon == nil { panic("nil netMon") } @@ -750,6 +754,9 @@ var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DEBUG_DNS_FORWARD_USE_R // // See tailscale/tailscale#12027. func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { + if !buildfeatures.HasDNS { + return false + } switch runtime.GOOS { case "android", "ios": // On mobile platforms with lower memory limits (e.g., 50MB on iOS), diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 33fa9c3c0..93cbf3839 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -25,6 +25,8 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/netaddr" @@ -254,6 +256,9 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, h func (r *Resolver) TestOnlySetHook(hook func(Config)) { r.saveConfigForTests = hook } func (r *Resolver) SetConfig(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } if r.saveConfigForTests != nil { r.saveConfigForTests(cfg) } @@ -279,6 +284,9 @@ func (r *Resolver) SetConfig(cfg Config) error { // Close shuts down the resolver and ensures poll goroutines have exited. // The Resolver cannot be used again after Close is called. func (r *Resolver) Close() { + if !buildfeatures.HasDNS { + return + } select { case <-r.closed: return @@ -296,6 +304,9 @@ func (r *Resolver) Close() { const dnsQueryTimeout = 10 * time.Second func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from netip.AddrPort) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSQueryLocal.Add(1) select { case <-r.closed: @@ -323,6 +334,9 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net // GetUpstreamResolvers returns the resolvers that would be used to resolve // the given FQDN. func (r *Resolver) GetUpstreamResolvers(name dnsname.FQDN) []*dnstype.Resolver { + if !buildfeatures.HasDNS { + return nil + } return r.forwarder.GetUpstreamResolvers(name) } @@ -351,6 +365,9 @@ func parseExitNodeQuery(q []byte) *response { // and a nil error. // TODO: figure out if we even need an error result. func (r *Resolver) HandlePeerDNSQuery(ctx context.Context, q []byte, from netip.AddrPort, allowName func(name string) bool) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSExitProxyQuery.Add(1) ch := make(chan packet, 1) @@ -427,6 +444,9 @@ var debugExitNodeDNSNetPkg = envknob.RegisterBool("TS_DEBUG_EXIT_NODE_DNS_NET_PK // response contains the pre-serialized response, which notably // includes the original question and its header. func handleExitNodeDNSQueryWithNetPkg(ctx context.Context, logf logger.Logf, resolver *net.Resolver, resp *response) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } logf = logger.WithPrefix(logf, "exitNodeDNSQueryWithNetPkg: ") if resp.Question.Class != dns.ClassINET { return nil, errors.New("unsupported class") @@ -1247,6 +1267,9 @@ func (r *Resolver) respondReverse(query []byte, name dnsname.FQDN, resp *respons // respond returns a DNS response to query if it can be resolved locally. // Otherwise, it returns errNotOurName. func (r *Resolver) respond(query []byte) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } parser := dnsParserPool.Get().(*dnsParser) defer dnsParserPool.Put(parser) From 3f5c560fd4566481379766ccf2d950c0c965b854 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 17:42:38 -0700 Subject: [PATCH 1439/1708] ipn/ipnlocal: drop h2c package, use net/http's support In Dec 2021 in d3d503d9977ed I had grand plans to make exit node DNS cheaper by using HTTP/2 over PeerAPI, at least on some platforms. I only did server-side support though and never made it to the client. In the ~4 years since, some things have happened: * Go 1.24 got support for http.Protocols (https://pkg.go.dev/net/http#Protocols) and doing UnencryptedHTTP2 ("HTTP2 with prior knowledge") * The old h2c upgrade mechanism was deprecated; see https://github.com/golang/go/issues/63565 and https://github.com/golang/go/issues/67816 * Go plans to deprecate x/net/http2 and move everything to the standard library. So this drops our use of the x/net/http2/h2c package and instead enables h2c (on all platforms now) using the standard library. This does mean we lose the deprecated h2c Upgrade support, but that's fine. If/when we do the h2c client support for ExitDNS, we'll have to probe the peer to see whether it supports it. Or have it reply with a header saying that future requests can us h2c. (It's tempting to use capver, but maybe people will disable that support anyway, so we should discover it at runtime instead.) Also do the same in the sessionrecording package. Updates #17305 Change-Id: If323f5ef32486effb18ed836888aa05c0efb701e Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +-- cmd/tailscaled/depaware-min.txt | 3 +-- cmd/tailscaled/depaware-minbox.txt | 3 +-- cmd/tailscaled/depaware.txt | 3 +-- cmd/tsidp/depaware.txt | 3 +-- ipn/ipnlocal/peerapi.go | 12 ++++-------- ipn/ipnlocal/peerapi_h2c.go | 20 -------------------- sessionrecording/connect.go | 11 +++++------ tsnet/depaware.txt | 3 +-- 9 files changed, 15 insertions(+), 46 deletions(-) delete mode 100644 ipn/ipnlocal/peerapi_h2c.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 1fd3c7630..b2fe54d6a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -910,8 +910,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from k8s.io/apimachinery/pkg/util/net+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index ee66d7700..83fb32b21 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -220,8 +220,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 86e75660a..f9429c860 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -247,8 +247,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a5ae214a0..24c619a2c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -500,8 +500,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b6e794f8c..d933f3249 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -338,8 +338,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index fb0d80d18..bd542e0f0 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -42,10 +42,6 @@ import ( var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error -// addH2C is non-nil on platforms where we want to add H2C -// ("cleartext" HTTP/2) support to the peerAPI. -var addH2C func(*http.Server) - // peerDNSQueryHandler is implemented by tsdns.Resolver. type peerDNSQueryHandler interface { HandlePeerDNSQuery(context.Context, []byte, netip.AddrPort, func(name string) bool) (res []byte, err error) @@ -195,11 +191,11 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) { peerUser: peerUser, } httpServer := &http.Server{ - Handler: h, - } - if addH2C != nil { - addH2C(httpServer) + Handler: h, + Protocols: new(http.Protocols), } + httpServer.Protocols.SetHTTP1(true) + httpServer.Protocols.SetUnencryptedHTTP2(true) // over WireGuard; "unencrypted" means no TLS go httpServer.Serve(netutil.NewOneConnListener(c, nil)) } diff --git a/ipn/ipnlocal/peerapi_h2c.go b/ipn/ipnlocal/peerapi_h2c.go deleted file mode 100644 index fbfa86398..000000000 --- a/ipn/ipnlocal/peerapi_h2c.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ios && !android && !js - -package ipnlocal - -import ( - "net/http" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" -) - -func init() { - addH2C = func(s *http.Server) { - h2s := &http2.Server{} - s.Handler = h2c.NewHandler(s.Handler, h2s) - } -} diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index dc697d071..ccb7e5fd9 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -7,7 +7,6 @@ package sessionrecording import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -19,7 +18,6 @@ import ( "sync/atomic" "time" - "golang.org/x/net/http2" "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" @@ -312,14 +310,15 @@ func clientHTTP1(dialCtx context.Context, dial netx.DialFunc) *http.Client { // requests (HTTP/2 over plaintext). Unfortunately the same client does not // work for HTTP/1 so we need to split these up. func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { + var p http.Protocols + p.SetUnencryptedHTTP2(true) return &http.Client{ - Transport: &http2.Transport{ - // Allow "http://" scheme in URLs. - AllowHTTP: true, + Transport: &http.Transport{ + Protocols: &p, // Pretend like we're using TLS, but actually use the provided // DialFunc underneath. This is necessary to convince the transport // to actually dial. - DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 0644a0692..6e627f6f7 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -331,8 +331,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - LDW golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ From 2c956e30bea76678e7c2ec1204f2be398a64e94d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 17:57:04 -0700 Subject: [PATCH 1440/1708] ipn/ipnlocal: proxy h2c grpc using net/http.Transport instead of x/net/http2 (Kinda related: #17351) Updates #17305 Change-Id: I47df2612732a5713577164e74652bc9fa3cd14b3 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/serve.go | 22 +++++----- ipn/ipnlocal/serve_test.go | 88 +++++++++++++++++++++++++++++++++++++- 2 files changed, 98 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index dc4142404..3c967fd1e 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -34,7 +34,6 @@ import ( "unicode/utf8" "go4.org/mem" - "golang.org/x/net/http2" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/syncs" @@ -761,8 +760,8 @@ type reverseProxy struct { insecure bool backend string lb *LocalBackend - httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends - h2cTransport lazy.SyncValue[*http2.Transport] // transport for h2c backends + httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends + h2cTransport lazy.SyncValue[*http.Transport] // transport for h2c backends // closed tracks whether proxy is closed/currently closing. closed atomic.Bool } @@ -770,9 +769,7 @@ type reverseProxy struct { // close ensures that any open backend connections get closed. func (rp *reverseProxy) close() { rp.closed.Store(true) - if h2cT := rp.h2cTransport.Get(func() *http2.Transport { - return nil - }); h2cT != nil { + if h2cT := rp.h2cTransport.Get(func() *http.Transport { return nil }); h2cT != nil { h2cT.CloseIdleConnections() } if httpTransport := rp.httpTransport.Get(func() *http.Transport { @@ -843,14 +840,17 @@ func (rp *reverseProxy) getTransport() *http.Transport { // getH2CTransport returns the Transport used for GRPC requests to the backend. // The Transport gets created lazily, at most once. -func (rp *reverseProxy) getH2CTransport() *http2.Transport { - return rp.h2cTransport.Get(func() *http2.Transport { - return &http2.Transport{ - AllowHTTP: true, - DialTLSContext: func(ctx context.Context, network string, addr string, _ *tls.Config) (net.Conn, error) { +func (rp *reverseProxy) getH2CTransport() http.RoundTripper { + return rp.h2cTransport.Get(func() *http.Transport { + var p http.Protocols + p.SetUnencryptedHTTP2(true) + tr := &http.Transport{ + Protocols: &p, + DialTLSContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host) }, } + return tr }) } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index a081ed27b..b4461d12f 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -15,6 +15,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "net/http/httptest" "net/netip" @@ -881,7 +882,7 @@ func mustCreateURL(t *testing.T, u string) url.URL { func newTestBackend(t *testing.T, opts ...any) *LocalBackend { var logf logger.Logf = logger.Discard - const debug = true + const debug = false if debug { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } @@ -1085,3 +1086,88 @@ func TestEncTailscaleHeaderValue(t *testing.T) { } } } + +func TestServeGRPCProxy(t *testing.T) { + const msg = "some-response\n" + backend := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Path-Was", r.RequestURI) + w.Header().Set("Proto-Was", r.Proto) + io.WriteString(w, msg) + })) + backend.EnableHTTP2 = true + backend.Config.Protocols = new(http.Protocols) + backend.Config.Protocols.SetHTTP1(true) + backend.Config.Protocols.SetUnencryptedHTTP2(true) + backend.Start() + defer backend.Close() + + backendURL := must.Get(url.Parse(backend.URL)) + + lb := newTestBackend(t) + rp := &reverseProxy{ + logf: t.Logf, + url: backendURL, + backend: backend.URL, + lb: lb, + } + + req := func(method, urlStr string, opt ...any) *http.Request { + req := httptest.NewRequest(method, urlStr, nil) + for _, o := range opt { + switch v := o.(type) { + case int: + req.ProtoMajor = v + case string: + req.Header.Set("Content-Type", v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + return req + } + + tests := []struct { + name string + req *http.Request + wantPath string + wantProto string + wantBody string + }{ + { + name: "non-gRPC", + req: req("GET", "http://foo/bar"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC-but-not-http2", + req: req("GET", "http://foo/bar", "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC--http2", + req: req("GET", "http://foo/bar", 2, "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/2.0", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rec := httptest.NewRecorder() + rp.ServeHTTP(rec, tt.req) + + res := rec.Result() + got := must.Get(io.ReadAll(res.Body)) + if got, want := res.Header.Get("Path-Was"), tt.wantPath; want != got { + t.Errorf("Path-Was %q, want %q", got, want) + } + if got, want := res.Header.Get("Proto-Was"), tt.wantProto; want != got { + t.Errorf("Proto-Was %q, want %q", got, want) + } + if string(got) != msg { + t.Errorf("got body %q, want %q", got, msg) + } + }) + } +} From 1803226945e2503bdd446a5054fc920853328c27 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 08:30:13 -0700 Subject: [PATCH 1441/1708] net/tstun: fix typo in doc Updates #cleanup Change-Id: Icaca974237cf678f3e036b1dfdd2f2e5082483db Signed-off-by: Brad Fitzpatrick --- net/tstun/tun.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tstun/tun.go b/net/tstun/tun.go index bfdaddf58..2891e9af4 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -21,7 +21,7 @@ import ( "tailscale.com/types/logger" ) -// CrateTAP is the hook set by feature/tap. +// CreateTAP is the hook set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] // modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". From 9b997c8f2f96454f5771c4ec4c835e2334f93bb6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 08:21:02 -0700 Subject: [PATCH 1442/1708] feature/tpm: don't log to stderr in tests Fixes #17336 Change-Id: I7d2be4e8acf59116c57ce26049a6a5baa8f32436 Signed-off-by: Brad Fitzpatrick --- feature/tpm/tpm.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index e4c2b29e9..b700637e6 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-tpm/tpm2/transport" "golang.org/x/crypto/nacl/secretbox" "tailscale.com/atomicfile" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -31,6 +32,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/testenv" ) var infoOnce = sync.OnceValue(info) @@ -49,13 +51,20 @@ func init() { } } +var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") + func info() *tailcfg.TPMInfo { + logf := logger.Discard + if !testenv.InTest() || verboseTPM() { + logf = log.New(log.Default().Writer(), "TPM: ", 0).Printf + } + tpm, err := open() if err != nil { - log.Printf("TPM: error opening: %v", err) + logf("error opening: %v", err) return nil } - log.Printf("TPM: successfully opened") + logf("successfully opened") defer tpm.Close() info := new(tailcfg.TPMInfo) @@ -84,12 +93,12 @@ func info() *tailcfg.TPMInfo { PropertyCount: 1, }.Execute(tpm) if err != nil { - log.Printf("TPM: GetCapability %v: %v", cap.prop, err) + logf("GetCapability %v: %v", cap.prop, err) continue } props, err := resp.CapabilityData.Data.TPMProperties() if err != nil { - log.Printf("TPM: GetCapability %v: %v", cap.prop, err) + logf("GetCapability %v: %v", cap.prop, err) continue } if len(props.TPMProperty) == 0 { @@ -97,6 +106,7 @@ func info() *tailcfg.TPMInfo { } cap.apply(info, props.TPMProperty[0].Value) } + logf("successfully read all properties") return info } From 442a3a779d29f78ba03cbd61509824f21c90cc59 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 09:12:42 -0700 Subject: [PATCH 1443/1708] feature, net/tshttpproxy: pull out support for using proxies as a feature Saves 139 KB. Also Synology support, which I saw had its own large-ish proxy parsing support on Linux, but support for proxies without Synology proxy support is reasonable, so I pulled that out as its own thing. Updates #12614 Change-Id: I22de285a3def7be77fdcf23e2bec7c83c9655593 Signed-off-by: Brad Fitzpatrick --- client/web/web.go | 5 ++++ clientupdate/distsign/distsign.go | 4 +-- cmd/derper/depaware.txt | 11 +++----- cmd/k8s-operator/depaware.txt | 4 ++- cmd/tailscale/cli/configure-synology-cert.go | 2 +- cmd/tailscale/cli/debug.go | 14 ++++------- cmd/tailscale/depaware.txt | 4 ++- cmd/tailscaled/debug.go | 19 ++++++++++---- cmd/tailscaled/depaware-min.txt | 4 +-- cmd/tailscaled/depaware-minbox.txt | 4 +-- cmd/tailscaled/depaware.txt | 4 ++- cmd/tailscaled/deps_test.go | 13 ++++++++++ cmd/tailscaled/proxy.go | 6 +++-- cmd/tsidp/depaware.txt | 4 ++- control/controlclient/direct.go | 9 ++++--- control/controlhttp/client.go | 13 +++++++--- derp/derphttp/derphttp_client.go | 23 +++++++++++------ .../feature_outboundproxy_disabled.go | 2 +- .../feature_outboundproxy_enabled.go | 2 +- .../feature_synology_disabled.go | 13 ++++++++++ .../buildfeatures/feature_synology_enabled.go | 13 ++++++++++ .../feature_useproxy_disabled.go | 13 ++++++++++ .../buildfeatures/feature_useproxy_enabled.go | 13 ++++++++++ feature/condregister/condregister.go | 10 +++++--- feature/condregister/useproxy/doc.go | 6 +++++ feature/condregister/useproxy/useproxy.go | 8 ++++++ feature/feature.go | 8 +++++- feature/featuretags/featuretags.go | 12 +++++++-- feature/hooks.go | 25 +++++++++++++++++++ feature/useproxy/useproxy.go | 18 +++++++++++++ ipn/ipnlocal/cert.go | 3 ++- logpolicy/logpolicy.go | 10 +++++--- net/dnsfallback/dnsfallback.go | 4 +-- net/netmon/interfaces_windows.go | 8 +++++- net/netmon/state.go | 11 +++++--- net/tshttpproxy/tshttpproxy_linux.go | 3 ++- tsnet/depaware.txt | 4 ++- tsnet/tsnet.go | 1 + .../tailscaled_deps_test_darwin.go | 1 - .../tailscaled_deps_test_freebsd.go | 1 - .../integration/tailscaled_deps_test_linux.go | 1 - .../tailscaled_deps_test_openbsd.go | 1 - .../tailscaled_deps_test_windows.go | 1 - wgengine/netstack/netstack_userping.go | 5 ++-- wgengine/userspace.go | 6 +++-- 45 files changed, 267 insertions(+), 79 deletions(-) create mode 100644 feature/buildfeatures/feature_synology_disabled.go create mode 100644 feature/buildfeatures/feature_synology_enabled.go create mode 100644 feature/buildfeatures/feature_useproxy_disabled.go create mode 100644 feature/buildfeatures/feature_useproxy_enabled.go create mode 100644 feature/condregister/useproxy/doc.go create mode 100644 feature/condregister/useproxy/useproxy.go create mode 100644 feature/useproxy/useproxy.go diff --git a/client/web/web.go b/client/web/web.go index 2421403c1..dbd3d5df0 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -27,6 +27,7 @@ import ( "tailscale.com/envknob" "tailscale.com/envknob/featureknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -496,6 +497,10 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo // Client using system-specific auth. switch distro.Get() { case distro.Synology: + if !buildfeatures.HasSynology { + // Synology support not built in. + return false + } authorized, _ := authorizeSynology(r) return authorized case distro.QNAP: diff --git a/clientupdate/distsign/distsign.go b/clientupdate/distsign/distsign.go index eba4b9267..270ee4c1f 100644 --- a/clientupdate/distsign/distsign.go +++ b/clientupdate/distsign/distsign.go @@ -55,7 +55,7 @@ import ( "github.com/hdevalence/ed25519consensus" "golang.org/x/crypto/blake2s" - "tailscale.com/net/tshttpproxy" + "tailscale.com/feature" "tailscale.com/types/logger" "tailscale.com/util/httpm" "tailscale.com/util/must" @@ -330,7 +330,7 @@ func fetch(url string, limit int64) ([]byte, error) { // limit bytes. On success, the returned value is a BLAKE2s hash of the file. func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() defer tr.CloseIdleConnections() hc := &http.Client{Transport: tr} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 08aa374d6..8c122105f 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -2,16 +2,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ - W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate - W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/cmd/derper+ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ @@ -89,7 +86,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/feature from tailscale.com/tsweb+ - tailscale.com/feature/buildfeatures from tailscale.com/feature + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -113,7 +110,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/tlsdial from tailscale.com/derp/derphttp tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/local @@ -146,7 +142,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/net/netmon+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy tailscale.com/util/ctxkey from tailscale.com/tsweb+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -195,7 +190,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ golang.org/x/net/internal/socks from golang.org/x/net/proxy diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b2fe54d6a..f8ae3d261 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -701,9 +701,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -777,7 +779,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 6ceef33ca..b5168ef92 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_acme +//go:build linux && !ts_omit_acme && !ts_omit_synology package cli diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index c8a0d57c1..8d0357716 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -28,17 +28,17 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" + "tailscale.com/feature" + _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/hostinfo" "tailscale.com/internal/noiseconn" "tailscale.com/ipn" "tailscale.com/net/ace" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -992,14 +992,10 @@ func runTS2021(ctx context.Context, args []string) error { if err != nil { return err } - envConf := httpproxy.FromEnvironment() - if *envConf == (httpproxy.Config{}) { - log.Printf("HTTP proxy env: (none)") - } else { - log.Printf("HTTP proxy env: %+v", envConf) + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxy, err := proxyFromEnv(&http.Request{URL: u}) + log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } - proxy, err := tshttpproxy.ProxyFromEnvironment(&http.Request{URL: u}) - log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } machinePrivate := key.NewMachine() var dialer net.Dialer diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 2df600702..9fb7b63ed 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -96,9 +96,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ @@ -130,7 +132,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 85dd787c1..ebcbe54e0 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -21,10 +21,11 @@ import ( "time" "tailscale.com/derp/derphttp" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/netmon" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/util/eventbus" @@ -124,9 +125,14 @@ func getURL(ctx context.Context, urlStr string) error { if err != nil { return fmt.Errorf("http.NewRequestWithContext: %v", err) } - proxyURL, err := tshttpproxy.ProxyFromEnvironment(req) - if err != nil { - return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + var proxyURL *url.URL + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxyURL, err = proxyFromEnv(req) + if err != nil { + return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + } + } } log.Printf("proxy: %v", proxyURL) tr := &http.Transport{ @@ -135,7 +141,10 @@ func getURL(ctx context.Context, urlStr string) error { DisableKeepAlives: true, } if proxyURL != nil { - auth, err := tshttpproxy.GetAuthHeader(proxyURL) + var auth string + if f, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + auth, err = f(proxyURL) + } if err == nil && auth != "" { tr.ProxyConnectHeader.Set("Proxy-Authorization", auth) } diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 83fb32b21..7e994300b 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -57,6 +57,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ @@ -110,7 +111,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile @@ -219,7 +219,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f9429c860..d7f88c32c 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -78,6 +78,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ @@ -133,7 +134,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -246,7 +246,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 24c619a2c..b1bb83d92 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -276,6 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/clientupdate from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister @@ -289,6 +290,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/tpm from tailscale.com/feature/condregister + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -357,7 +359,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a41a08f9d..89d9db796 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -222,3 +222,16 @@ func TestOmitGRO(t *testing.T) { }, }.Check(t) } + +func TestOmitUseProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_useproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "tshttproxy") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index 790b5e18e..85c3d91f9 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -17,10 +17,10 @@ import ( "net/http/httputil" "strings" + "tailscale.com/feature" "tailscale.com/net/proxymux" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/types/logger" ) @@ -104,7 +104,9 @@ func mkProxyStartFunc(socksListener, httpListener net.Listener) proxyStartFunc { }() addrs = append(addrs, socksListener.Addr().String()) } - tshttpproxy.SetSelfProxy(addrs...) + if set, ok := feature.HookProxySetSelfProxy.GetOk(); ok { + set(addrs...) + } } } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index d933f3249..033ff6570 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -143,9 +143,11 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -205,7 +207,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 31f41eac8..3a40aa6fd 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -42,7 +42,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/tempfork/httprec" "tailscale.com/tka" @@ -275,8 +274,12 @@ func NewDirect(opts Options) (*Direct, error) { var interceptedDial *atomic.Bool if httpc == nil { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if f, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + f(tr) + } + } tr.TLSClientConfig = tlsdial.Config(opts.HealthTracker, tr.TLSClientConfig) var dialFunc netx.DialFunc dialFunc, interceptedDial = makeScreenTimeDetectingDialFunc(opts.Dialer.SystemDial) diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index da9590c48..f1ee7a6f9 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -39,6 +39,8 @@ import ( "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/ace" "tailscale.com/net/dnscache" @@ -47,7 +49,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -81,7 +82,7 @@ func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) { if a.proxyFunc != nil { return a.proxyFunc } - return tshttpproxy.ProxyFromEnvironment + return feature.HookProxyFromEnvironment.GetOrNil() } // httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before @@ -463,8 +464,12 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad tr.Proxy = nil tr.DialContext = dialer } else { - tr.Proxy = a.getProxyFunc() - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = a.getProxyFunc() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } tr.DialContext = dnscache.Dialer(dialer, dns) } // Disable HTTP2, since h2 can't do protocol switching. diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 57f008a1a..db56c4a44 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -32,6 +32,8 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" @@ -39,7 +41,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -734,8 +735,12 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e Path: "/", // unused }, } - if proxyURL, err := tshttpproxy.ProxyFromEnvironment(proxyReq); err == nil && proxyURL != nil { - return c.dialNodeUsingProxy(ctx, n, proxyURL) + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if proxyURL, err := proxyFromEnv(proxyReq); err == nil && proxyURL != nil { + return c.dialNodeUsingProxy(ctx, n, proxyURL) + } + } } type res struct { @@ -865,10 +870,14 @@ func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, pr target := net.JoinHostPort(n.HostName, "443") var authHeader string - if v, err := tshttpproxy.GetAuthHeader(pu); err != nil { - c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) - } else if v != "" { - authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + if buildfeatures.HasUseProxy { + if getAuthHeader, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + if v, err := getAuthHeader(pu); err != nil { + c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) + } else if v != "" { + authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + } + } } if _, err := fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n%s\r\n", target, target, authHeader); err != nil { diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go index a84c24e6d..bf74db060 100644 --- a/feature/buildfeatures/feature_outboundproxy_disabled.go +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". // Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. // It's a const so it can be used for dead code elimination. const HasOutboundProxy = false diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go index c306bbeb2..53bb99d5c 100644 --- a/feature/buildfeatures/feature_outboundproxy_enabled.go +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". // Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. // It's a const so it can be used for dead code elimination. const HasOutboundProxy = true diff --git a/feature/buildfeatures/feature_synology_disabled.go b/feature/buildfeatures/feature_synology_disabled.go new file mode 100644 index 000000000..0cdf084c3 --- /dev/null +++ b/feature/buildfeatures/feature_synology_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = false diff --git a/feature/buildfeatures/feature_synology_enabled.go b/feature/buildfeatures/feature_synology_enabled.go new file mode 100644 index 000000000..dde4123b6 --- /dev/null +++ b/feature/buildfeatures/feature_synology_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = true diff --git a/feature/buildfeatures/feature_useproxy_disabled.go b/feature/buildfeatures/feature_useproxy_disabled.go new file mode 100644 index 000000000..9f29a9820 --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = false diff --git a/feature/buildfeatures/feature_useproxy_enabled.go b/feature/buildfeatures/feature_useproxy_enabled.go new file mode 100644 index 000000000..9195f2fdc --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = true diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index 69e2b071c..654483d1d 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -6,9 +6,13 @@ // to ensure all conditional features are registered. package condregister -// Portmapper is special in that the CLI also needs to link it in, -// so it's pulled out into its own package, rather than using a maybe_*.go -// file in condregister. import ( + // Portmapper is special in that the CLI also needs to link it in, + // so it's pulled out into its own package, rather than using a maybe_*.go + // file in condregister. _ "tailscale.com/feature/condregister/portmapper" + + // HTTP proxy support is also needed by the CLI, and tsnet, so it's its + // own package too. + _ "tailscale.com/feature/condregister/useproxy" ) diff --git a/feature/condregister/useproxy/doc.go b/feature/condregister/useproxy/doc.go new file mode 100644 index 000000000..1e8abb358 --- /dev/null +++ b/feature/condregister/useproxy/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using proxies +// if it's not disabled via the ts_omit_useproxy build tag. +package useproxy diff --git a/feature/condregister/useproxy/useproxy.go b/feature/condregister/useproxy/useproxy.go new file mode 100644 index 000000000..bda6e49c0 --- /dev/null +++ b/feature/condregister/useproxy/useproxy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_useproxy + +package useproxy + +import _ "tailscale.com/feature/useproxy" diff --git a/feature/feature.go b/feature/feature.go index 70f05d192..0d383b398 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -50,7 +50,8 @@ func (h *Hook[Func]) Set(f Func) { } // Get returns the hook function, or panics if it hasn't been set. -// Use IsSet to check if it's been set. +// Use IsSet to check if it's been set, or use GetOrNil if you're +// okay with a nil return value. func (h *Hook[Func]) Get() Func { if !h.ok { panic("Get on unset feature hook, without IsSet") @@ -64,6 +65,11 @@ func (h *Hook[Func]) GetOk() (f Func, ok bool) { return h.f, h.ok } +// GetOrNil returns the hook function or nil if it hasn't been set. +func (h *Hook[Func]) GetOrNil() Func { + return h.f +} + // Hooks is a slice of funcs. // // As opposed to a single Hook, this is meant to be used when diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5c5352657..b85d1b9dc 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -121,7 +121,7 @@ var Features = map[FeatureTag]FeatureMeta{ "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", - Desc: "Outbound localhost HTTP/SOCK5 proxy support", + Desc: "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale", Deps: []FeatureTag{"netstack"}, }, "osrouter": { @@ -172,6 +172,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Tailscale SSH support", Deps: []FeatureTag{"dbus", "netstack"}, }, + "synology": { + Sym: "Synology", + Desc: "Synology NAS integration (applies to Linux builds only)", + }, "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, "systray": { Sym: "SysTray", @@ -182,7 +186,11 @@ var Features = map[FeatureTag]FeatureMeta{ "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, "tpm": {"TPM", "TPM support", nil}, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "useproxy": { + Sym: "UseProxy", + Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", + }, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, diff --git a/feature/hooks.go b/feature/hooks.go index fc3971dda..bc42bd8d9 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -3,6 +3,11 @@ package feature +import ( + "net/http" + "net/url" +) + // HookCanAutoUpdate is a hook for the clientupdate package // to conditionally initialize. var HookCanAutoUpdate Hook[func() bool] @@ -15,3 +20,23 @@ func CanAutoUpdate() bool { } return false } + +// HookProxyFromEnvironment is a hook for feature/useproxy to register +// a function to use as http.ProxyFromEnvironment. +var HookProxyFromEnvironment Hook[func(*http.Request) (*url.URL, error)] + +// HookProxyInvalidateCache is a hook for feature/useproxy to register +// [tshttpproxy.InvalidateCache]. +var HookProxyInvalidateCache Hook[func()] + +// HookProxyGetAuthHeader is a hook for feature/useproxy to register +// [tshttpproxy.GetAuthHeader]. +var HookProxyGetAuthHeader Hook[func(*url.URL) (string, error)] + +// HookProxySetSelfProxy is a hook for feature/useproxy to register +// [tshttpproxy.SetSelfProxy]. +var HookProxySetSelfProxy Hook[func(...string)] + +// HookProxySetTransportGetProxyConnectHeader is a hook for feature/useproxy to register +// [tshttpproxy.SetTransportGetProxyConnectHeader]. +var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] diff --git a/feature/useproxy/useproxy.go b/feature/useproxy/useproxy.go new file mode 100644 index 000000000..a18e60577 --- /dev/null +++ b/feature/useproxy/useproxy.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using system proxies. +package useproxy + +import ( + "tailscale.com/feature" + "tailscale.com/net/tshttpproxy" +) + +func init() { + feature.HookProxyFromEnvironment.Set(tshttpproxy.ProxyFromEnvironment) + feature.HookProxyInvalidateCache.Set(tshttpproxy.InvalidateCache) + feature.HookProxyGetAuthHeader.Set(tshttpproxy.GetAuthHeader) + feature.HookProxySetSelfProxy.Set(tshttpproxy.SetSelfProxy) + feature.HookProxySetTransportGetProxyConnectHeader.Set(tshttpproxy.SetTransportGetProxyConnectHeader) +} diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index bf85affa6..ab49976c8 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -35,6 +35,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -73,7 +74,7 @@ func (b *LocalBackend) certDir() (string, error) { // As a workaround for Synology DSM6 not having a "var" directory, use the // app's "etc" directory (on a small partition) to hold certs at least. // See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251 - if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { + if buildfeatures.HasSynology && d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { d = "/var/packages/Tailscale/etc" // base; we append "certs" below } if d == "" { diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index c802d481f..c1f3e553a 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -31,6 +31,7 @@ import ( "golang.org/x/term" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" @@ -44,7 +45,6 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/types/logger" @@ -870,8 +870,12 @@ func (opts TransportOptions) New() http.RoundTripper { tr.TLSClientConfig = opts.TLSClientConfig.Clone() } - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } // We do our own zstd compression on uploads, and responses never contain any payload, // so don't send "Accept-Encoding: gzip" to save a few bytes on the wire, since there diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 9843d46f9..74b625970 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -26,11 +26,11 @@ import ( "time" "tailscale.com/atomicfile" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/slicesx" @@ -135,7 +135,7 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr dialer := netns.NewDialer(logf, netMon) tr := http.DefaultTransport.(*http.Transport).Clone() tr.DisableKeepAlives = true // This transport is meant to be used once. - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443")) } diff --git a/net/netmon/interfaces_windows.go b/net/netmon/interfaces_windows.go index 00b686e59..d6625ead3 100644 --- a/net/netmon/interfaces_windows.go +++ b/net/netmon/interfaces_windows.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsconst" ) @@ -22,7 +23,9 @@ const ( func init() { likelyHomeRouterIP = likelyHomeRouterIPWindows - getPAC = getPACWindows + if buildfeatures.HasUseProxy { + getPAC = getPACWindows + } } func likelyHomeRouterIPWindows() (ret netip.Addr, _ netip.Addr, ok bool) { @@ -244,6 +247,9 @@ const ( ) func getPACWindows() string { + if !buildfeatures.HasUseProxy { + return "" + } var res *uint16 r, _, e := detectAutoProxyConfigURL.Call( winHTTP_AUTO_DETECT_TYPE_DHCP|winHTTP_AUTO_DETECT_TYPE_DNS_A, diff --git a/net/netmon/state.go b/net/netmon/state.go index bd0960768..cdb427d47 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -15,10 +15,11 @@ import ( "strings" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" "tailscale.com/util/mak" ) @@ -501,13 +502,15 @@ func getState(optTSInterfaceName string) (*State, error) { } } - if s.AnyInterfaceUp() { + if buildfeatures.HasUseProxy && s.AnyInterfaceUp() { req, err := http.NewRequest("GET", LoginEndpointForProxyDetermination, nil) if err != nil { return nil, err } - if u, err := tshttpproxy.ProxyFromEnvironment(req); err == nil && u != nil { - s.HTTPProxy = u.String() + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if u, err := proxyFromEnv(req); err == nil && u != nil { + s.HTTPProxy = u.String() + } } if getPAC != nil { s.PAC = getPAC() diff --git a/net/tshttpproxy/tshttpproxy_linux.go b/net/tshttpproxy/tshttpproxy_linux.go index b241c256d..7e086e492 100644 --- a/net/tshttpproxy/tshttpproxy_linux.go +++ b/net/tshttpproxy/tshttpproxy_linux.go @@ -9,6 +9,7 @@ import ( "net/http" "net/url" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -17,7 +18,7 @@ func init() { } func linuxSysProxyFromEnv(req *http.Request) (*url.URL, error) { - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return synologyProxyFromConfigCached(req) } return nil, nil diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6e627f6f7..858bb6d64 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -139,9 +139,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -201,7 +203,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 08f08281a..42e4198a0 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -31,6 +31,7 @@ import ( "tailscale.com/envknob" _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" + _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/internal/client/tailscale" diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 7a26300e5..72615330d 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 7a26300e5..72615330d 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 7a26300e5..72615330d 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 7a26300e5..72615330d 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index a5a0a428f..c2761d019 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -46,7 +46,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/wgengine/netstack/netstack_userping.go b/wgengine/netstack/netstack_userping.go index ee635bd87..b35a6eca9 100644 --- a/wgengine/netstack/netstack_userping.go +++ b/wgengine/netstack/netstack_userping.go @@ -13,6 +13,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -20,7 +21,7 @@ import ( // CAP_NET_RAW from tailscaled's binary. var setAmbientCapsRaw func(*exec.Cmd) -var isSynology = runtime.GOOS == "linux" && distro.Get() == distro.Synology +var isSynology = runtime.GOOS == "linux" && buildfeatures.HasSynology && distro.Get() == distro.Synology // sendOutboundUserPing sends a non-privileged ICMP (or ICMPv6) ping to dstIP with the given timeout. func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) error { @@ -61,7 +62,7 @@ func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) er ping = "/bin/ping" } cmd := exec.Command(ping, "-c", "1", "-W", "3", dstIP.String()) - if isSynology && os.Getuid() != 0 { + if buildfeatures.HasSynology && isSynology && os.Getuid() != 0 { // On DSM7 we run as non-root and need to pass // CAP_NET_RAW if our binary has it. setAmbientCapsRaw(cmd) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 158a6d06f..049abcf17 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -23,6 +23,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn/ipnstate" @@ -35,7 +36,6 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -559,7 +559,9 @@ func (e *userspaceEngine) consumeEventbusTopics(cli *eventbus.Client) func(*even case <-cli.Done(): return case changeDelta := <-changeDeltaSub.Events(): - tshttpproxy.InvalidateCache() + if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { + f() + } e.linkChange(&changeDelta) } } From b9cdef18c04b48a52235af4eadcd9a3193cafb3c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 12 Sep 2025 12:33:46 -0700 Subject: [PATCH 1444/1708] util/prompt: add a default and take default in non-interactive cases The Tailscale CLI is the primary configuration interface and as such it is used in scripts, container setups, and many other places that do not have a terminal available and should not be made to respond to prompts. The default is set to false where the "risky" API is being used by the CLI and true otherwise, this means that the `--yes` flags are only required under interactive runs and scripts do not need to be concerned with prompts or extra flags. Updates #19445 Signed-off-by: James Tucker --- cmd/tailscale/cli/network-lock.go | 2 +- cmd/tailscale/cli/risks.go | 2 +- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tailscale/cli/update.go | 2 +- util/prompt/prompt.go | 19 +++++++++++++++++-- 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index ec3b01ad6..9b2f6fbdb 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -384,7 +384,7 @@ Removal of a signing key(s) without resigning nodes (--re-sign=false) will cause any nodes signed by the the given key(s) to be locked out of the Tailscale network. Proceed with caution. `) - if !prompt.YesNo("Are you sure you want to remove the signing key(s)?") { + if !prompt.YesNo("Are you sure you want to remove the signing key(s)?", true) { fmt.Printf("aborting removal of signing key(s)\n") os.Exit(0) } diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index dfde87f64..d4572842b 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -66,7 +66,7 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { outln(riskMessage) printf("To skip this warning, use --accept-risk=%s\n", riskType) - if prompt.YesNo("Continue?") { + if prompt.YesNo("Continue?", false) { return nil } diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 058d80649..8831db2a9 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1086,7 +1086,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u if len(mounts) > 1 { msg := fmt.Sprintf("Are you sure you want to delete %d handlers under port %s?", len(mounts), portStr) - if !e.yes && !prompt.YesNo(msg) { + if !e.yes && !prompt.YesNo(msg, true) { return nil } } diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 7c0269f6a..7eb0dccac 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -87,5 +87,5 @@ func confirmUpdate(ver string) bool { } msg := fmt.Sprintf("This will update Tailscale from %v to %v. Continue?", version.Short(), ver) - return prompt.YesNo(msg) + return prompt.YesNo(msg, true) } diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go index 4e589ceb3..a6d86fb48 100644 --- a/util/prompt/prompt.go +++ b/util/prompt/prompt.go @@ -6,19 +6,34 @@ package prompt import ( "fmt" + "os" "strings" + + "github.com/mattn/go-isatty" ) // YesNo takes a question and prompts the user to answer the // question with a yes or no. It appends a [y/n] to the message. -func YesNo(msg string) bool { - fmt.Print(msg + " [y/n] ") +// +// If there is no TTY on both Stdin and Stdout, assume that we're in a script +// and return the dflt result. +func YesNo(msg string, dflt bool) bool { + if !(isatty.IsTerminal(os.Stdin.Fd()) && isatty.IsTerminal(os.Stdout.Fd())) { + return dflt + } + if dflt { + fmt.Print(msg + " [Y/n] ") + } else { + fmt.Print(msg + " [y/N] ") + } var resp string fmt.Scanln(&resp) resp = strings.ToLower(resp) switch resp { case "y", "yes", "sure": return true + case "": + return dflt } return false } From bbb16e4e72c58d43aa1ee356cb974d669a0a02fe Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 10:55:42 -0700 Subject: [PATCH 1445/1708] drive: don't use regexp package in leaf types package Even with ts_omit_drive, the drive package is currently still imported for some types. So it should be light. But it was depending on the "regexp" packge, which I'd like to remove from our minimal builds. Updates #12614 Change-Id: I5bf85d8eb15a739793723b1da11c370d3fcd2f32 Signed-off-by: Brad Fitzpatrick --- drive/remote.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drive/remote.go b/drive/remote.go index 9aeead710..2c6fba894 100644 --- a/drive/remote.go +++ b/drive/remote.go @@ -9,7 +9,6 @@ import ( "bytes" "errors" "net/http" - "regexp" "strings" ) @@ -21,10 +20,6 @@ var ( ErrInvalidShareName = errors.New("Share names may only contain the letters a-z, underscore _, parentheses (), or spaces") ) -var ( - shareNameRegex = regexp.MustCompile(`^[a-z0-9_\(\) ]+$`) -) - // AllowShareAs reports whether sharing files as a specific user is allowed. func AllowShareAs() bool { return !DisallowShareAs && doAllowShareAs() @@ -125,9 +120,26 @@ func NormalizeShareName(name string) (string, error) { // Trim whitespace name = strings.TrimSpace(name) - if !shareNameRegex.MatchString(name) { + if !validShareName(name) { return "", ErrInvalidShareName } return name, nil } + +func validShareName(name string) bool { + if name == "" { + return false + } + for _, r := range name { + if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' { + continue + } + switch r { + case '_', ' ', '(', ')': + continue + } + return false + } + return true +} From ee034d48fccbedf0fff24f065cf59e3410441f03 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 09:53:55 -0700 Subject: [PATCH 1446/1708] feature/featuretags: add a catch-all "Debug" feature flag Saves 168 KB. Updates #12614 Change-Id: Iaab3ae3efc6ddc7da39629ef13e5ec44976952ba Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 11 + cmd/tailscaled/depaware-min.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 4 +- control/controlclient/direct.go | 2 +- .../buildfeatures/feature_debug_disabled.go | 13 + .../buildfeatures/feature_debug_enabled.go | 13 + feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/c2n.go | 22 + ipn/ipnlocal/local.go | 20 +- ipn/localapi/debug.go | 465 ++++++++++++++++++ ipn/localapi/debugderp.go | 2 + ipn/localapi/localapi.go | 436 +--------------- ipn/localapi/syspolicy_api.go | 2 +- ipn/localapi/tailnetlock.go | 26 +- wgengine/magicsock/debughttp.go | 7 + 15 files changed, 573 insertions(+), 455 deletions(-) create mode 100644 feature/buildfeatures/feature_debug_disabled.go create mode 100644 feature/buildfeatures/feature_debug_enabled.go create mode 100644 ipn/localapi/debug.go diff --git a/client/local/local.go b/client/local/local.go index a3717ad77..8da8f57e5 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -31,6 +31,8 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" @@ -608,6 +610,9 @@ func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) er // the provided duration. If the duration is in the past, the debug logging // is disabled. func (lc *Client) SetComponentDebugLogging(ctx context.Context, component string, d time.Duration) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } body, err := lc.send(ctx, "POST", fmt.Sprintf("/localapi/v0/component-debug-logging?component=%s&secs=%d", url.QueryEscape(component), int64(d.Seconds())), 200, nil) @@ -862,6 +867,9 @@ func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Pref // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") if err != nil { return nil, err @@ -877,6 +885,9 @@ func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, err // It returns the raw DNS response bytes and the resolvers that were used to answer the query // (often just one, but can be more if we raced multiple resolvers). func (lc *Client) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, fmt.Sprintf("/localapi/v0/dns-query?name=%s&type=%s", url.QueryEscape(name), queryType)) if err != nil { return nil, nil, err diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 7e994300b..0fe1538fd 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -106,7 +106,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/sockstats from tailscale.com/control/controlclient+ - tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ @@ -141,7 +141,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ - tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index d7f88c32c..1932e9791 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -129,7 +129,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/sockstats from tailscale.com/control/controlclient+ - tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ @@ -166,7 +166,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ - tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 3a40aa6fd..54f2de1c9 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1193,7 +1193,7 @@ func (c *Direct) handleDebugMessage(ctx context.Context, debug *tailcfg.Debug) e c.logf("exiting process with status %v per controlplane", *code) os.Exit(*code) } - if debug.DisableLogTail { + if buildfeatures.HasLogTail && debug.DisableLogTail { logtail.Disable() envknob.SetNoLogsNoSupport() } diff --git a/feature/buildfeatures/feature_debug_disabled.go b/feature/buildfeatures/feature_debug_disabled.go new file mode 100644 index 000000000..eb048c082 --- /dev/null +++ b/feature/buildfeatures/feature_debug_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = false diff --git a/feature/buildfeatures/feature_debug_enabled.go b/feature/buildfeatures/feature_debug_enabled.go new file mode 100644 index 000000000..12a2700a4 --- /dev/null +++ b/feature/buildfeatures/feature_debug_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index b85d1b9dc..7cfc79f65 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "clientupdate": {"ClientUpdate", "Client auto-update support", nil}, "completion": {"Completion", "CLI shell completion", nil}, "dbus": {"DBus", "Linux DBus support", nil}, + "debug": {"Debug", "various debug support, for things that don't have or need their own more specific feature", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, "debugportmapper": { Sym: "DebugPortMapper", diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 38c65fee8..f064628fc 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -15,6 +15,8 @@ import ( "time" "tailscale.com/control/controlclient" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" @@ -130,6 +132,10 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } ctx := r.Context() if r.Method != httpm.POST && r.Method != httpm.GET { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) @@ -190,20 +196,36 @@ func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Reques } func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") w.Write(goroutines.ScrubbedGoroutineDump(true)) } func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } writeJSON(w, b.Prefs()) } func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") clientmetric.WritePrometheusExpositionFormat(w) } func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } component := r.FormValue("component") secs, _ := strconv.Atoi(r.FormValue("secs")) if secs == 0 { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3b55fd324..c3d7d3fb8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -557,12 +557,14 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) } - for _, component := range ipn.DebuggableComponents { - key := componentStateKey(component) - if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { - if until := time.Unix(ut, 0); until.After(b.clock.Now()) { - // conditional to avoid log spam at start when off - b.SetComponentDebugLogging(component, until) + if buildfeatures.HasDebug { + for _, component := range ipn.DebuggableComponents { + key := componentStateKey(component) + if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { + if until := time.Unix(ut, 0); until.After(b.clock.Now()) { + // conditional to avoid log spam at start when off + b.SetComponentDebugLogging(component, until) + } } } } @@ -666,6 +668,9 @@ func componentStateKey(component string) ipn.StateKey { // - magicsock // - sockstats func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Time) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() @@ -790,6 +795,9 @@ func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []b // enabled until, or the zero time if component's time is not currently // enabled. func (b *LocalBackend) GetComponentDebugLogging(component string) time.Time { + if !buildfeatures.HasDebug { + return time.Time{} + } b.mu.Lock() defer b.mu.Unlock() diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go new file mode 100644 index 000000000..b3b919d31 --- /dev/null +++ b/ipn/localapi/debug.go @@ -0,0 +1,465 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debug + +package localapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "reflect" + "slices" + "strconv" + "sync" + "time" + + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" + "tailscale.com/ipn" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/httpm" +) + +func init() { + Register("component-debug-logging", (*Handler).serveComponentDebugLogging) + Register("debug", (*Handler).serveDebug) + Register("dev-set-state-store", (*Handler).serveDevSetStateStore) + Register("debug-bus-events", (*Handler).serveDebugBusEvents) + Register("debug-bus-graph", (*Handler).serveEventBusGraph) + Register("debug-derp-region", (*Handler).serveDebugDERPRegion) + Register("debug-dial-types", (*Handler).serveDebugDialTypes) + Register("debug-log", (*Handler).serveDebugLog) + Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches) + Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules) + Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges) +} + +func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "status access denied", http.StatusForbidden) + return + } + + ipStr := r.FormValue("ip") + if ipStr == "" { + http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) + return + } + ip, err := netip.ParseAddr(ipStr) + if err != nil { + http.Error(w, "invalid IP", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(chs) +} + +func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + component := r.FormValue("component") + secs, _ := strconv.Atoi(r.FormValue("secs")) + err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) + var res struct { + Error string + } + if err != nil { + res.Error = err.Error() + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug-dial-types access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + ip := r.FormValue("ip") + port := r.FormValue("port") + network := r.FormValue("network") + + addr := ip + ":" + port + if _, err := netip.ParseAddrPort(addr); err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "invalid address %q: %v", addr, err) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + var bareDialer net.Dialer + + dialer := h.b.Dialer() + + var peerDialer net.Dialer + peerDialer.Control = dialer.PeerDialControlFunc() + + // Kick off a dial with each available dialer in parallel. + dialers := []struct { + name string + dial func(context.Context, string, string) (net.Conn, error) + }{ + {"SystemDial", dialer.SystemDial}, + {"UserDial", dialer.UserDial}, + {"PeerDial", peerDialer.DialContext}, + {"BareDial", bareDialer.DialContext}, + } + type result struct { + name string + conn net.Conn + err error + } + results := make(chan result, len(dialers)) + + var wg sync.WaitGroup + for _, dialer := range dialers { + dialer := dialer // loop capture + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := dialer.dial(ctx, network, addr) + results <- result{dialer.name, conn, err} + }() + } + + wg.Wait() + for range len(dialers) { + res := <-results + fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) + if res.conn != nil { + res.conn.Close() + } + } +} + +func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, "debug not supported in this build", http.StatusNotImplemented) + return + } + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + // The action is normally in a POST form parameter, but + // some actions (like "notify") want a full JSON body, so + // permit some to have their action in a header. + var action string + switch v := r.Header.Get("Debug-Action"); v { + case "notify": + action = v + default: + action = r.FormValue("action") + } + var err error + switch action { + case "derp-set-homeless": + h.b.MagicConn().SetHomeless(true) + case "derp-unset-homeless": + h.b.MagicConn().SetHomeless(false) + case "rebind": + err = h.b.DebugRebind() + case "restun": + err = h.b.DebugReSTUN() + case "notify": + var n ipn.Notify + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugNotify(n) + case "notify-last-netmap": + h.b.DebugNotifyLastNetMap() + case "break-tcp-conns": + err = h.b.DebugBreakTCPConns() + case "break-derp-conns": + err = h.b.DebugBreakDERPConns() + case "force-netmap-update": + h.b.DebugForceNetmapUpdate() + case "control-knobs": + k := h.b.ControlKnobs() + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(k.AsDebugJSON()) + if err == nil { + return + } + case "pick-new-derp": + err = h.b.DebugPickNewDERP() + case "force-prefer-derp": + var n int + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugForcePreferDERP(n) + case "peer-relay-servers": + servers := h.b.DebugPeerRelayServers().Slice() + slices.SortFunc(servers, func(a, b netip.Addr) int { + return a.Compare(b) + }) + err = json.NewEncoder(w).Encode(servers) + if err == nil { + return + } + case "": + err = fmt.Errorf("missing parameter 'action'") + default: + err = fmt.Errorf("unknown action %q", action) + } + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilterRules) +} + +func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilter) +} + +// debugEventError provides the JSON encoding of internal errors from event processing. +type debugEventError struct { + Error string +} + +// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams +// events to the client. +func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { + // Require write access (~root) as the logs could contain something + // sensitive. + if !h.PermitWrite { + http.Error(w, "event bus access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusNoContent) + return + } + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming unsupported", http.StatusInternalServerError) + return + } + + io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") + f.Flush() + + mon := bus.Debugger().WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) + return + case <-mon.Done(): + return + case event := <-mon.Events(): + data := eventbus.DebugEvent{ + Count: i, + Type: reflect.TypeOf(event.Event).String(), + Event: event.Event, + From: event.From.Name(), + } + for _, client := range event.To { + data.To = append(data.To, client.Name()) + } + + if msg, err := json.Marshal(data); err != nil { + data.Event = debugEventError{Error: fmt.Sprintf( + "failed to marshal JSON for %T", event.Event, + )} + if errMsg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, + `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, + i, event.Event) + } else { + w.Write(errMsg) + } + } else { + w.Write(msg) + } + f.Flush() + i++ + } + } +} + +// serveEventBusGraph taps into the event bus and dumps out the active graph of +// publishers and subscribers. It does not represent anything about the messages +// exchanged. +func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + clients := debugger.Clients() + + graph := map[string]eventbus.DebugTopic{} + + for _, client := range clients { + for _, pub := range debugger.PublishTypes(client) { + topic, ok := graph[pub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: pub.Name()} + } + topic.Publisher = client.Name() + graph[pub.Name()] = topic + } + for _, sub := range debugger.SubscribeTypes(client) { + topic, ok := graph[sub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: sub.Name()} + } + topic.Subscribers = append(topic.Subscribers, client.Name()) + graph[sub.Name()] = topic + } + } + + // The top level map is not really needed for the client, convert to a list. + topics := eventbus.DebugTopics{} + for _, v := range graph { + topics.Topics = append(topics.Topics, v) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(topics) +} + +func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasLogTail { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + if !h.PermitRead { + http.Error(w, "debug-log access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + defer h.b.TryFlushLogs() // kick off upload after we're done logging + + type logRequestJSON struct { + Lines []string + Prefix string + } + + var logRequest logRequestJSON + if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + prefix := logRequest.Prefix + if prefix == "" { + prefix = "debug-log" + } + logf := logger.WithPrefix(h.logf, prefix+": ") + + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) + + for _, line := range logRequest.Lines { + logf("%s", line) + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 017b90692..3edbc0856 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package localapi import ( diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 404516942..3948b4293 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -7,7 +7,6 @@ package localapi import ( "bytes" "cmp" - "context" "encoding/json" "errors" "fmt" @@ -16,7 +15,6 @@ import ( "net/http" "net/netip" "net/url" - "reflect" "runtime" "slices" "strconv" @@ -80,18 +78,7 @@ var handler = map[string]LocalAPIHandler{ "check-prefs": (*Handler).serveCheckPrefs, "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "component-debug-logging": (*Handler).serveComponentDebugLogging, - "debug": (*Handler).serveDebug, - "debug-bus-events": (*Handler).serveDebugBusEvents, - "debug-bus-graph": (*Handler).serveEventBusGraph, - "debug-derp-region": (*Handler).serveDebugDERPRegion, - "debug-dial-types": (*Handler).serveDebugDialTypes, - "debug-log": (*Handler).serveDebugLog, - "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, - "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, - "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, "derpmap": (*Handler).serveDERPMap, - "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, "disconnect-control": (*Handler).disconnectControl, "dns-osconfig": (*Handler).serveDNSOSConfig, @@ -638,352 +625,6 @@ func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { h.b.UserMetricsRegistry().Handler(w, r) } -func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - // The action is normally in a POST form parameter, but - // some actions (like "notify") want a full JSON body, so - // permit some to have their action in a header. - var action string - switch v := r.Header.Get("Debug-Action"); v { - case "notify": - action = v - default: - action = r.FormValue("action") - } - var err error - switch action { - case "derp-set-homeless": - h.b.MagicConn().SetHomeless(true) - case "derp-unset-homeless": - h.b.MagicConn().SetHomeless(false) - case "rebind": - err = h.b.DebugRebind() - case "restun": - err = h.b.DebugReSTUN() - case "notify": - var n ipn.Notify - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugNotify(n) - case "notify-last-netmap": - h.b.DebugNotifyLastNetMap() - case "break-tcp-conns": - err = h.b.DebugBreakTCPConns() - case "break-derp-conns": - err = h.b.DebugBreakDERPConns() - case "force-netmap-update": - h.b.DebugForceNetmapUpdate() - case "control-knobs": - k := h.b.ControlKnobs() - w.Header().Set("Content-Type", "application/json") - err = json.NewEncoder(w).Encode(k.AsDebugJSON()) - if err == nil { - return - } - case "pick-new-derp": - err = h.b.DebugPickNewDERP() - case "force-prefer-derp": - var n int - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugForcePreferDERP(n) - case "peer-relay-servers": - servers := h.b.DebugPeerRelayServers().Slice() - slices.SortFunc(servers, func(a, b netip.Addr) int { - return a.Compare(b) - }) - err = json.NewEncoder(w).Encode(servers) - if err == nil { - return - } - case "": - err = fmt.Errorf("missing parameter 'action'") - default: - err = fmt.Errorf("unknown action %q", action) - } - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilterRules) -} - -func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilter) -} - -// EventError provides the JSON encoding of internal errors from event processing. -type EventError struct { - Error string -} - -// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams -// events to the client. -func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { - // Require write access (~root) as the logs could contain something - // sensitive. - if !h.PermitWrite { - http.Error(w, "event bus access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusNoContent) - return - } - - f, ok := w.(http.Flusher) - if !ok { - http.Error(w, "streaming unsupported", http.StatusInternalServerError) - return - } - - io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") - f.Flush() - - mon := bus.Debugger().WatchBus() - defer mon.Close() - - i := 0 - for { - select { - case <-r.Context().Done(): - fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) - return - case <-mon.Done(): - return - case event := <-mon.Events(): - data := eventbus.DebugEvent{ - Count: i, - Type: reflect.TypeOf(event.Event).String(), - Event: event.Event, - From: event.From.Name(), - } - for _, client := range event.To { - data.To = append(data.To, client.Name()) - } - - if msg, err := json.Marshal(data); err != nil { - data.Event = EventError{Error: fmt.Sprintf( - "failed to marshal JSON for %T", event.Event, - )} - if errMsg, err := json.Marshal(data); err != nil { - fmt.Fprintf(w, - `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, - i, event.Event) - } else { - w.Write(errMsg) - } - } else { - w.Write(msg) - } - f.Flush() - i++ - } - } -} - -// serveEventBusGraph taps into the event bus and dumps out the active graph of -// publishers and subscribers. It does not represent anything about the messages -// exchanged. -func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusPreconditionFailed) - return - } - - debugger := bus.Debugger() - clients := debugger.Clients() - - graph := map[string]eventbus.DebugTopic{} - - for _, client := range clients { - for _, pub := range debugger.PublishTypes(client) { - topic, ok := graph[pub.Name()] - if !ok { - topic = eventbus.DebugTopic{Name: pub.Name()} - } - topic.Publisher = client.Name() - graph[pub.Name()] = topic - } - for _, sub := range debugger.SubscribeTypes(client) { - topic, ok := graph[sub.Name()] - if !ok { - topic = eventbus.DebugTopic{Name: sub.Name()} - } - topic.Subscribers = append(topic.Subscribers, client.Name()) - graph[sub.Name()] = topic - } - } - - // The top level map is not really needed for the client, convert to a list. - topics := eventbus.DebugTopics{} - for _, v := range graph { - topics.Topics = append(topics.Topics, v) - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(topics) -} - -func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - component := r.FormValue("component") - secs, _ := strconv.Atoi(r.FormValue("secs")) - err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) - var res struct { - Error string - } - if err != nil { - res.Error = err.Error() - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug-dial-types access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - ip := r.FormValue("ip") - port := r.FormValue("port") - network := r.FormValue("network") - - addr := ip + ":" + port - if _, err := netip.ParseAddrPort(addr); err != nil { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "invalid address %q: %v", addr, err) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - defer cancel() - - var bareDialer net.Dialer - - dialer := h.b.Dialer() - - var peerDialer net.Dialer - peerDialer.Control = dialer.PeerDialControlFunc() - - // Kick off a dial with each available dialer in parallel. - dialers := []struct { - name string - dial func(context.Context, string, string) (net.Conn, error) - }{ - {"SystemDial", dialer.SystemDial}, - {"UserDial", dialer.UserDial}, - {"PeerDial", peerDialer.DialContext}, - {"BareDial", bareDialer.DialContext}, - } - type result struct { - name string - conn net.Conn - err error - } - results := make(chan result, len(dialers)) - - var wg sync.WaitGroup - for _, dialer := range dialers { - dialer := dialer // loop capture - - wg.Add(1) - go func() { - defer wg.Done() - conn, err := dialer.dial(ctx, network, addr) - results <- result{dialer.name, conn, err} - }() - } - - wg.Wait() - for range len(dialers) { - res := <-results - fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) - if res.conn != nil { - res.conn.Close() - } - } -} - // servePprofFunc is the implementation of Handler.servePprof, after auth, // for platforms where we want to link it in. var servePprofFunc func(http.ResponseWriter, *http.Request) @@ -1116,6 +757,10 @@ func (h *Handler) serveCheckUDPGROForwarding(w http.ResponseWriter, r *http.Requ } func (h *Handler) serveSetUDPGROForwarding(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasGRO { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if !h.PermitWrite { http.Error(w, "UDP GRO forwarding set access denied", http.StatusForbidden) return @@ -1149,34 +794,6 @@ func (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) { e.Encode(st) } -func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "status access denied", http.StatusForbidden) - return - } - - ipStr := r.FormValue("ip") - if ipStr == "" { - http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) - return - } - ip, err := netip.ParseAddr(ipStr) - if err != nil { - http.Error(w, "invalid IP", http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/json") - chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(chs) -} - // InUseOtherUserIPNStream reports whether r is a request for the watch-ipn-bus // handler. If so, it writes an ipn.Notify InUseOtherUser message to the user // and returns true. Otherwise it returns false, in which case it doesn't write @@ -1842,47 +1459,6 @@ func defBool(a string, def bool) bool { return v } -func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "debug-log access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - defer h.b.TryFlushLogs() // kick off upload after we're done logging - - type logRequestJSON struct { - Lines []string - Prefix string - } - - var logRequest logRequestJSON - if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - prefix := logRequest.Prefix - if prefix == "" { - prefix = "debug-log" - } - logf := logger.WithPrefix(h.logf, prefix+": ") - - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) - - for _, line := range logRequest.Lines { - logf("%s", line) - } - - w.WriteHeader(http.StatusNoContent) -} - // serveUpdateCheck returns the ClientVersion from Status, which contains // information on whether an update is available, and if so, what version, // *if* we support auto-updates on this platform. If we don't, this endpoint @@ -1917,7 +1493,7 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasDNS { - http.NotFound(w, r) + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) return } if r.Method != httpm.GET { @@ -1964,7 +1540,7 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasDNS { - http.NotFound(w, r) + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) return } if r.Method != httpm.GET { diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go index a438d352b..edb82e042 100644 --- a/ipn/localapi/syspolicy_api.go +++ b/ipn/localapi/syspolicy_api.go @@ -17,7 +17,7 @@ import ( ) func init() { - handler["policy/"] = (*Handler).servePolicy + Register("policy/", (*Handler).servePolicy) } func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go index 797150938..4baadb733 100644 --- a/ipn/localapi/tailnetlock.go +++ b/ipn/localapi/tailnetlock.go @@ -18,19 +18,19 @@ import ( ) func init() { - handler["tka/affected-sigs"] = (*Handler).serveTKAAffectedSigs - handler["tka/cosign-recovery-aum"] = (*Handler).serveTKACosignRecoveryAUM - handler["tka/disable"] = (*Handler).serveTKADisable - handler["tka/force-local-disable"] = (*Handler).serveTKALocalDisable - handler["tka/generate-recovery-aum"] = (*Handler).serveTKAGenerateRecoveryAUM - handler["tka/init"] = (*Handler).serveTKAInit - handler["tka/log"] = (*Handler).serveTKALog - handler["tka/modify"] = (*Handler).serveTKAModify - handler["tka/sign"] = (*Handler).serveTKASign - handler["tka/status"] = (*Handler).serveTKAStatus - handler["tka/submit-recovery-aum"] = (*Handler).serveTKASubmitRecoveryAUM - handler["tka/verify-deeplink"] = (*Handler).serveTKAVerifySigningDeeplink - handler["tka/wrap-preauth-key"] = (*Handler).serveTKAWrapPreauthKey + Register("tka/affected-sigs", (*Handler).serveTKAAffectedSigs) + Register("tka/cosign-recovery-aum", (*Handler).serveTKACosignRecoveryAUM) + Register("tka/disable", (*Handler).serveTKADisable) + Register("tka/force-local-disable", (*Handler).serveTKALocalDisable) + Register("tka/generate-recovery-aum", (*Handler).serveTKAGenerateRecoveryAUM) + Register("tka/init", (*Handler).serveTKAInit) + Register("tka/log", (*Handler).serveTKALog) + Register("tka/modify", (*Handler).serveTKAModify) + Register("tka/sign", (*Handler).serveTKASign) + Register("tka/status", (*Handler).serveTKAStatus) + Register("tka/submit-recovery-aum", (*Handler).serveTKASubmitRecoveryAUM) + Register("tka/verify-deeplink", (*Handler).serveTKAVerifySigningDeeplink) + Register("tka/wrap-preauth-key", (*Handler).serveTKAWrapPreauthKey) } func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index a0159d21e..9aecab74b 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -13,6 +13,8 @@ import ( "strings" "time" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -24,6 +26,11 @@ import ( // /debug/magicsock) or via peerapi to a peer that's owned by the same // user (so they can e.g. inspect their phones). func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + c.mu.Lock() defer c.mu.Unlock() From be6cfa00cb5090c0922949bf9d543688a49131d6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 11:25:10 -0700 Subject: [PATCH 1447/1708] util/eventbus: when ts_omit_debugeventbus is set, don't import tsweb I'm trying to remove the "regexp" and "regexp/syntax" packages from our minimal builds. But tsweb pulls in regexp (via net/http/pprof etc) and util/eventbus was importing the tsweb for no reason. Updates #12614 Change-Id: Ifa8c371ece348f1dbf80d6b251381f3ed39d5fbd Signed-off-by: Brad Fitzpatrick --- util/eventbus/debughttp_off.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 7d9fb327c..ed491f1f2 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -5,9 +5,7 @@ package eventbus -import "tailscale.com/tsweb" - -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { +func registerHTTPDebugger(d *Debugger, tsWebDebugHandler any) { // The event bus debugging UI uses html/template, which uses // reflection for method lookups. This forces the compiler to // retain a lot more code and information to make dynamic method From 840c7668e2e5eb5d3fa72913afc56544a3038fdd Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 30 Sep 2025 11:53:07 -0700 Subject: [PATCH 1448/1708] types/key: add IsZero method to HardwareAttestationKey (#17370) We will need this for unmarshaling node prefs: use the zero HardwareAttestationKey implementation when parsing and later check `IsZero` to see if anything was loaded. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 2 ++ types/key/hardware_attestation.go | 1 + 2 files changed, 3 insertions(+) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 4b3018569..92617f995 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -262,3 +262,5 @@ func (ak *attestationKey) Clone() key.HardwareAttestationKey { pub: ak.pub, } } + +func (ak *attestationKey) IsZero() bool { return !ak.loaded() } diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index ac3914ab2..9d4a21ee4 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -32,6 +32,7 @@ type HardwareAttestationKey interface { json.Unmarshaler io.Closer Clone() HardwareAttestationKey + IsZero() bool } // HardwareAttestationPublicFromPlatformKey creates a HardwareAttestationPublic From 9386a101d88521325c460c4e56b092a801c07d1d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 11:54:16 -0700 Subject: [PATCH 1449/1708] cmd/tailscaled, ipn/localapi, util/eventbus: don't link in regexp when debug is omitted Saves 442 KB. Lock it with a new min test. Updates #12614 Change-Id: Ia7bf6f797b6cbf08ea65419ade2f359d390f8e91 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/debug.go | 21 ++++++++++-- cmd/tailscaled/depaware-min.txt | 20 ++++------- cmd/tailscaled/depaware-minbox.txt | 20 +++++------ cmd/tailscaled/deps_test.go | 55 +++++++++++++++++++++++------- cmd/tailscaled/tailscaled.go | 28 ++++++--------- ipn/localapi/pprof.go | 2 +- tsnet/depaware.txt | 8 ++--- util/eventbus/debug-demo/main.go | 4 +++ util/eventbus/debug.go | 4 --- util/eventbus/debughttp.go | 2 +- util/eventbus/debughttp_off.go | 14 ++------ 11 files changed, 100 insertions(+), 78 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index ebcbe54e0..96f98d9d6 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_debug package main @@ -16,6 +16,7 @@ import ( "log" "net/http" "net/http/httptrace" + "net/http/pprof" "net/url" "os" "time" @@ -39,7 +40,23 @@ var debugArgs struct { portmap bool } -var debugModeFunc = debugMode // so it can be addressable +func init() { + debugModeFunc := debugMode // to be addressable + subCommands["debug"] = &debugModeFunc + + hookNewDebugMux.Set(newDebugMux) +} + +func newDebugMux() *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/debug/metrics", servePrometheusMetrics) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + return mux +} func debugMode(args []string) error { fs := flag.NewFlagSet("debug", flag.ExitOnError) diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 0fe1538fd..b779e8c1b 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -48,7 +48,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ - tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/ipn+ tailscale.com/envknob from tailscale.com/cmd/tailscaled+ @@ -58,7 +58,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister - tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient @@ -127,14 +127,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/ipn+ - tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/key from tailscale.com/control/controlbase+ tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ @@ -158,7 +157,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/eventbus from tailscale.com/control/controlclient+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth @@ -326,7 +325,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash from crypto+ hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem - html from net/http/pprof+ + html from tailscale.com/ipn/ipnlocal+ internal/abi from hash/maphash+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -347,7 +346,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/runtime/maps+ internal/reflectlite from context+ @@ -367,7 +365,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/syscall/execenv from os+ internal/syscall/unix from crypto/internal/sysrand+ internal/testlog from os - internal/trace/tracev2 from runtime+ + internal/trace/tracev2 from runtime internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -389,7 +387,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/internal from net/http net/http/internal/ascii from net/http net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from crypto/x509+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -400,12 +397,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from internal/profile+ - regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof + runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 1932e9791..20e1c791b 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -68,7 +68,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ - tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/ipn+ tailscale.com/envknob from tailscale.com/cmd/tailscaled+ @@ -79,7 +79,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ - tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli @@ -152,14 +152,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/ipn+ - tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ @@ -184,7 +183,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth @@ -356,7 +355,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem - html from net/http/pprof+ + html from tailscale.com/ipn/ipnlocal+ image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode @@ -380,7 +379,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/runtime/maps+ internal/reflectlite from context+ @@ -400,7 +398,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/syscall/execenv from os+ internal/syscall/unix from crypto/internal/sysrand+ internal/testlog from os - internal/trace/tracev2 from runtime+ + internal/trace/tracev2 from runtime internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -424,7 +422,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from crypto/x509+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -435,12 +432,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from internal/profile+ + regexp from tailscale.com/clientupdate regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof + runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 89d9db796..fd5d31836 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -4,9 +4,12 @@ package main import ( + "maps" + "slices" "strings" "testing" + "tailscale.com/feature/featuretags" "tailscale.com/tstest/deptest" ) @@ -90,19 +93,6 @@ func TestOmitDrive(t *testing.T) { }.Check(t) } -func TestOmitTailnetLock(t *testing.T) { - deptest.DepChecker{ - GOOS: "linux", - GOARCH: "amd64", - Tags: "ts_omit_tailnetlock,ts_include_cli", - OnDep: func(dep string) { - if strings.Contains(dep, "cbor") { - t.Errorf("unexpected dep with ts_omit_tailnetlock: %q", dep) - } - }, - }.Check(t) -} - func TestOmitPortmapper(t *testing.T) { deptest.DepChecker{ GOOS: "linux", @@ -235,3 +225,42 @@ func TestOmitUseProxy(t *testing.T) { }, }.Check(t) } + +func minTags() string { + var tags []string + for _, f := range slices.Sorted(maps.Keys(featuretags.Features)) { + if f.IsOmittable() { + tags = append(tags, f.OmitTag()) + } + } + return strings.Join(tags, ",") +} + +func TestMinTailscaledNoCLI(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags(), + OnDep: func(dep string) { + if strings.Contains(dep, "regexp") { + t.Errorf("unexpected dep: %q", dep) + } + if strings.Contains(dep, "cbor") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func TestMinTailscaledWithCLI(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags() + ",ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "cbor") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 2b0eec482..48eefbea7 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -18,7 +18,6 @@ import ( "log" "net" "net/http" - "net/http/pprof" "os" "os/signal" "path/filepath" @@ -145,7 +144,6 @@ var ( var subCommands = map[string]*func([]string) error{ "install-system-daemon": &installSystemDaemon, "uninstall-system-daemon": &uninstallSystemDaemon, - "debug": &debugModeFunc, "be-child": &beChildFunc, } @@ -194,7 +192,9 @@ func main() { printVersion := false flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") - flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") + if buildfeatures.HasDebug { + flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") + } flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) @@ -485,8 +485,8 @@ func run() (err error) { log.Printf("error in synology migration: %v", err) } - if args.debug != "" { - debugMux = newDebugMux() + if buildfeatures.HasDebug && args.debug != "" { + debugMux = hookNewDebugMux.Get()() } if f, ok := hookSetSysDrive.GetOk(); ok { @@ -550,7 +550,7 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, }() srv := ipnserver.New(logf, logID, sys.Bus.Get(), sys.NetMon.Get()) - if debugMux != nil { + if buildfeatures.HasDebug && debugMux != nil { debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus) } var lbErr syncs.AtomicValue[error] @@ -626,7 +626,7 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if onlyNetstack && !buildfeatures.HasNetstack { return nil, errors.New("userspace-networking support is not compiled in to this binary") } - if debugMux != nil { + if buildfeatures.HasDebug && debugMux != nil { if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) } @@ -820,16 +820,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return onlyNetstack, nil } -func newDebugMux() *http.ServeMux { - mux := http.NewServeMux() - mux.HandleFunc("/debug/metrics", servePrometheusMetrics) - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - return mux -} +var hookNewDebugMux feature.Hook[func() *http.ServeMux] func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") @@ -838,6 +829,9 @@ func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { } func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { + if !buildfeatures.HasDebug { + return + } ln, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("debug server: %v", err) diff --git a/ipn/localapi/pprof.go b/ipn/localapi/pprof.go index 8c9429b31..9476f721f 100644 --- a/ipn/localapi/pprof.go +++ b/ipn/localapi/pprof.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_debug // We don't include it on mobile where we're more memory constrained and // there's no CLI to get at the results anyway. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 858bb6d64..037e6c264 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -222,7 +222,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb from tailscale.com/util/eventbus + LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/bools from tailscale.com/tsnet @@ -478,7 +478,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof + LDW internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ @@ -527,7 +527,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/ipn/localapi+ + LDW net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ @@ -542,7 +542,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof + LDW runtime/trace from net/http/pprof slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go index a6d232d88..71894d2ea 100644 --- a/util/eventbus/debug-demo/main.go +++ b/util/eventbus/debug-demo/main.go @@ -14,12 +14,16 @@ import ( "net/netip" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsweb" "tailscale.com/types/key" "tailscale.com/util/eventbus" ) func main() { + if !buildfeatures.HasDebugEventBus { + log.Fatalf("debug-demo requires the \"debugeventbus\" feature enabled") + } b := eventbus.New() c := b.Client("RouteMonitor") go testPub[RouteAdded](c, 5*time.Second) diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index a055f078f..6d5463bec 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -10,8 +10,6 @@ import ( "slices" "sync" "sync/atomic" - - "tailscale.com/tsweb" ) // A Debugger offers access to a bus's privileged introspection and @@ -137,8 +135,6 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { return client.subscribeTypes() } -func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { registerHTTPDebugger(d, td) } - // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index 617502b93..9e03676d0 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -29,7 +29,7 @@ type httpDebugger struct { *Debugger } -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { +func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { dh := httpDebugger{d} td.Handle("bus", "Event bus", dh) td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor)) diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index ed491f1f2..332525262 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -5,14 +5,6 @@ package eventbus -func registerHTTPDebugger(d *Debugger, tsWebDebugHandler any) { - // The event bus debugging UI uses html/template, which uses - // reflection for method lookups. This forces the compiler to - // retain a lot more code and information to make dynamic method - // dispatch work, which is unacceptable bloat for the iOS build. - // We also disable it on Android while we're at it, as nobody - // is debugging Tailscale internals on Android. - // - // TODO: https://github.com/tailscale/tailscale/issues/15297 to - // bring the debug UI back to iOS somehow. -} +type tswebDebugHandler = any // actually *tsweb.DebugHandler; any to avoid import tsweb with ts_omit_debugeventbus + +func (*Debugger) RegisterHTTP(td tswebDebugHandler) {} From 6c6a1d834122b2fe54a3f781cff12698d70e71e8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 13:11:48 -0700 Subject: [PATCH 1450/1708] feature/appconnectors: start making it modular Saves 45 KB. Updates #12614 Change-Id: Iaeb73e69633878ce0a0f58c986024784bbe218f1 Signed-off-by: Brad Fitzpatrick --- appc/appconnector.go | 120 ---------------- appc/observe.go | 132 ++++++++++++++++++ appc/observe_disabled.go | 8 ++ cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 1 + feature/appconnectors/appconnectors.go | 39 ++++++ .../feature_appconnectors_disabled.go | 13 ++ .../feature_appconnectors_enabled.go | 13 ++ feature/condregister/maybe_appconnectors.go | 8 ++ feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/c2n.go | 24 ---- ipn/ipnlocal/local.go | 32 ++++- ipn/ipnlocal/peerapi.go | 2 +- ipn/localapi/localapi.go | 23 ++- 15 files changed, 263 insertions(+), 157 deletions(-) create mode 100644 appc/observe.go create mode 100644 appc/observe_disabled.go create mode 100644 feature/appconnectors/appconnectors.go create mode 100644 feature/buildfeatures/feature_appconnectors_disabled.go create mode 100644 feature/buildfeatures/feature_appconnectors_enabled.go create mode 100644 feature/condregister/maybe_appconnectors.go diff --git a/appc/appconnector.go b/appc/appconnector.go index 89c6c9aeb..8d7dd54e8 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -18,13 +18,11 @@ import ( "sync" "time" - "golang.org/x/net/dns/dnsmessage" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" "tailscale.com/util/execqueue" - "tailscale.com/util/mak" "tailscale.com/util/slicesx" ) @@ -372,124 +370,6 @@ func (e *AppConnector) DomainRoutes() map[string][]netip.Addr { return drCopy } -// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS -// response is being returned over the PeerAPI. The response is parsed and -// matched against the configured domains, if matched the routeAdvertiser is -// advised to advertise the discovered route. -func (e *AppConnector) ObserveDNSResponse(res []byte) error { - var p dnsmessage.Parser - if _, err := p.Start(res); err != nil { - return err - } - if err := p.SkipAllQuestions(); err != nil { - return err - } - - // cnameChain tracks a chain of CNAMEs for a given query in order to reverse - // a CNAME chain back to the original query for flattening. The keys are - // CNAME record targets, and the value is the name the record answers, so - // for www.example.com CNAME example.com, the map would contain - // ["example.com"] = "www.example.com". - var cnameChain map[string]string - - // addressRecords is a list of address records found in the response. - var addressRecords map[string][]netip.Addr - - for { - h, err := p.AnswerHeader() - if err == dnsmessage.ErrSectionDone { - break - } - if err != nil { - return err - } - - if h.Class != dnsmessage.ClassINET { - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - - switch h.Type { - case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - - } - - domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") - if len(domain) == 0 { - continue - } - - if h.Type == dnsmessage.TypeCNAME { - res, err := p.CNAMEResource() - if err != nil { - return err - } - cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") - if len(cname) == 0 { - continue - } - mak.Set(&cnameChain, cname, domain) - continue - } - - switch h.Type { - case dnsmessage.TypeA: - r, err := p.AResource() - if err != nil { - return err - } - addr := netip.AddrFrom4(r.A) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - case dnsmessage.TypeAAAA: - r, err := p.AAAAResource() - if err != nil { - return err - } - addr := netip.AddrFrom16(r.AAAA) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - } - - e.mu.Lock() - defer e.mu.Unlock() - - for domain, addrs := range addressRecords { - domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) - - // domain and none of the CNAMEs in the chain are routed - if !isRouted { - continue - } - - // advertise each address we have learned for the routed domain, that - // was not already known. - var toAdvertise []netip.Prefix - for _, addr := range addrs { - if !e.isAddrKnownLocked(domain, addr) { - toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) - } - } - - if len(toAdvertise) > 0 { - e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) - e.scheduleAdvertisement(domain, toAdvertise...) - } - } - return nil -} - // starting from the given domain that resolved to an address, find it, or any // of the domains in the CNAME chain toward resolving it, that are routed // domains, returning the routed domain name and a bool indicating whether a diff --git a/appc/observe.go b/appc/observe.go new file mode 100644 index 000000000..06dc04f9d --- /dev/null +++ b/appc/observe.go @@ -0,0 +1,132 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package appc + +import ( + "net/netip" + "strings" + + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/util/mak" +) + +// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS +// response is being returned over the PeerAPI. The response is parsed and +// matched against the configured domains, if matched the routeAdvertiser is +// advised to advertise the discovered route. +func (e *AppConnector) ObserveDNSResponse(res []byte) error { + var p dnsmessage.Parser + if _, err := p.Start(res); err != nil { + return err + } + if err := p.SkipAllQuestions(); err != nil { + return err + } + + // cnameChain tracks a chain of CNAMEs for a given query in order to reverse + // a CNAME chain back to the original query for flattening. The keys are + // CNAME record targets, and the value is the name the record answers, so + // for www.example.com CNAME example.com, the map would contain + // ["example.com"] = "www.example.com". + var cnameChain map[string]string + + // addressRecords is a list of address records found in the response. + var addressRecords map[string][]netip.Addr + + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return err + } + + if h.Class != dnsmessage.ClassINET { + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + + switch h.Type { + case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + + } + + domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") + if len(domain) == 0 { + continue + } + + if h.Type == dnsmessage.TypeCNAME { + res, err := p.CNAMEResource() + if err != nil { + return err + } + cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") + if len(cname) == 0 { + continue + } + mak.Set(&cnameChain, cname, domain) + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + return err + } + addr := netip.AddrFrom4(r.A) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + return err + } + addr := netip.AddrFrom16(r.AAAA) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + } + + e.mu.Lock() + defer e.mu.Unlock() + + for domain, addrs := range addressRecords { + domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) + + // domain and none of the CNAMEs in the chain are routed + if !isRouted { + continue + } + + // advertise each address we have learned for the routed domain, that + // was not already known. + var toAdvertise []netip.Prefix + for _, addr := range addrs { + if !e.isAddrKnownLocked(domain, addr) { + toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) + } + } + + if len(toAdvertise) > 0 { + e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) + e.scheduleAdvertisement(domain, toAdvertise...) + } + } + return nil +} diff --git a/appc/observe_disabled.go b/appc/observe_disabled.go new file mode 100644 index 000000000..45aa285ea --- /dev/null +++ b/appc/observe_disabled.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_appconnectors + +package appc + +func (e *AppConnector) ObserveDNSResponse(res []byte) error { return nil } diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index b779e8c1b..f37dde001 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -164,7 +164,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 20e1c791b..7e12a9c36 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -190,7 +190,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index b1bb83d92..9dde241ca 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -271,6 +271,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/appconnectors from tailscale.com/feature/condregister tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/clientupdate from tailscale.com/feature/condregister diff --git a/feature/appconnectors/appconnectors.go b/feature/appconnectors/appconnectors.go new file mode 100644 index 000000000..28f5ccde3 --- /dev/null +++ b/feature/appconnectors/appconnectors.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package appconnectors registers support for Tailscale App Connectors. +package appconnectors + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" +) + +func init() { + ipnlocal.RegisterC2N("GET /appconnector/routes", handleC2NAppConnectorDomainRoutesGet) +} + +// handleC2NAppConnectorDomainRoutesGet handles returning the domains +// that the app connector is responsible for, as well as the resolved +// IP addresses for each domain. If the node is not configured as +// an app connector, an empty map is returned. +func handleC2NAppConnectorDomainRoutesGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + logf := b.Logger() + logf("c2n: GET /appconnector/routes received") + + var res tailcfg.C2NAppConnectorDomainRoutesResponse + appConnector := b.AppConnector() + if appConnector == nil { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + return + } + + res.Domains = appConnector.DomainRoutes() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} diff --git a/feature/buildfeatures/feature_appconnectors_disabled.go b/feature/buildfeatures/feature_appconnectors_disabled.go new file mode 100644 index 000000000..64ea8f86b --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = false diff --git a/feature/buildfeatures/feature_appconnectors_enabled.go b/feature/buildfeatures/feature_appconnectors_enabled.go new file mode 100644 index 000000000..e00eaffa3 --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = true diff --git a/feature/condregister/maybe_appconnectors.go b/feature/condregister/maybe_appconnectors.go new file mode 100644 index 000000000..70112d781 --- /dev/null +++ b/feature/condregister/maybe_appconnectors.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package condregister + +import _ "tailscale.com/feature/appconnectors" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 7cfc79f65..daf4c71eb 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -88,6 +88,7 @@ type FeatureMeta struct { // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ "acme": {"ACME", "ACME TLS certificate management", nil}, + "appconnectors": {"AppConnectors", "App Connectors support", nil}, "aws": {"AWS", "AWS integration", nil}, "bird": {"Bird", "Bird BGP integration", nil}, "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index f064628fc..ae9e67126 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -51,9 +51,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // SSH req("/ssh/usernames"): handleC2NSSHUsernames, - // App Connectors. - req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet, - // Linux netfilter. req("POST /netfilter-kind"): handleC2NSetNetfilterKind, } @@ -294,27 +291,6 @@ func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo()) } -// handleC2NAppConnectorDomainRoutesGet handles returning the domains -// that the app connector is responsible for, as well as the resolved -// IP addresses for each domain. If the node is not configured as -// an app connector, an empty map is returned. -func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /appconnector/routes received") - - var res tailcfg.C2NAppConnectorDomainRoutesResponse - appConnector := b.AppConnector() - if appConnector == nil { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - return - } - - res.Domains = appConnector.DomainRoutes() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: POST /netfilter-kind received") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c3d7d3fb8..5897614d0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -398,9 +398,10 @@ type LocalBackend struct { } // HealthTracker returns the health tracker for the backend. -func (b *LocalBackend) HealthTracker() *health.Tracker { - return b.health -} +func (b *LocalBackend) HealthTracker() *health.Tracker { return b.health } + +// Logger returns the logger for the backend. +func (b *LocalBackend) Logger() logger.Logf { return b.logf } // UserMetricsRegistry returns the usermetrics registry for the backend func (b *LocalBackend) UserMetricsRegistry() *usermetric.Registry { @@ -4154,6 +4155,9 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P // MaybeClearAppConnector clears the routes from any AppConnector if // AdvertiseRoutes has been set in the MaskedPrefs. func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { + if !buildfeatures.HasAppConnectors { + return nil + } var err error if ac := b.AppConnector(); ac != nil && mp.AdvertiseRoutesSet { err = ac.ClearRoutes() @@ -4770,6 +4774,9 @@ func (b *LocalBackend) blockEngineUpdates(block bool) { // current network map and preferences. // b.mu must be held. func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs ipn.PrefsView) { + if !buildfeatures.HasAppConnectors { + return + } const appConnectorCapName = "tailscale.com/app-connectors" defer func() { if b.hostinfo != nil { @@ -4943,7 +4950,9 @@ func (b *LocalBackend) authReconfig() { b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) b.initPeerAPIListener() - b.readvertiseAppConnectorRoutes() + if buildfeatures.HasAppConnectors { + b.readvertiseAppConnectorRoutes() + } } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -6363,6 +6372,9 @@ func (b *LocalBackend) OfferingExitNode() bool { // OfferingAppConnector reports whether b is currently offering app // connector services. func (b *LocalBackend) OfferingAppConnector() bool { + if !buildfeatures.HasAppConnectors { + return false + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector != nil @@ -6372,6 +6384,9 @@ func (b *LocalBackend) OfferingAppConnector() bool { // // TODO(nickkhyl): move app connectors to [nodeBackend], or perhaps a feature package? func (b *LocalBackend) AppConnector() *appc.AppConnector { + if !buildfeatures.HasAppConnectors { + return nil + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector @@ -6917,6 +6932,9 @@ func (b *LocalBackend) DebugBreakDERPConns() error { // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. func (b *LocalBackend) ObserveDNSResponse(res []byte) error { + if !buildfeatures.HasAppConnectors { + return nil + } var appConnector *appc.AppConnector b.mu.Lock() if b.appConnector == nil { @@ -7020,6 +7038,9 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { + if !buildfeatures.HasAppConnectors { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() if b.pm.CurrentProfile().ID() == "" { @@ -7034,6 +7055,9 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { } func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { + if !buildfeatures.HasAppConnectors { + return nil, feature.ErrUnavailable + } if b.pm.CurrentProfile().ID() == "" { return &appc.RouteInfo{}, nil } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index bd542e0f0..4f99525f9 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -745,7 +745,7 @@ func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) // TODO(raggi): consider pushing the integration down into the resolver // instead to avoid re-parsing the DNS response for improved performance in // the future. - if h.ps.b.OfferingAppConnector() { + if buildfeatures.HasAppConnectors && h.ps.b.OfferingAppConnector() { if err := h.ps.b.ObserveDNSResponse(res); err != nil { h.logf("ObserveDNSResponse error: %v", err) // This is not fatal, we probably just failed to parse the upstream diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 3948b4293..b07df8b02 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -72,7 +72,6 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "appc-route-info": (*Handler).serveGetAppcRouteInfo, "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, @@ -113,6 +112,12 @@ var handler = map[string]LocalAPIHandler{ "whois": (*Handler).serveWhoIs, } +func init() { + if buildfeatures.HasAppConnectors { + Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) + } +} + // Register registers a new LocalAPI handler for the given name. func Register(name string, fn LocalAPIHandler) { if _, ok := handler[name]; ok { @@ -934,11 +939,13 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - if err := h.b.MaybeClearAppConnector(mp); err != nil { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusInternalServerError) - json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) - return + if buildfeatures.HasAppConnectors { + if err := h.b.MaybeClearAppConnector(mp); err != nil { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) + return + } } var err error prefs, err = h.b.EditPrefsAs(mp, h.Actor) @@ -1666,6 +1673,10 @@ func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasAppConnectors { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return From f7afb9b6cadd6f8fbfe8243b20fd11e4f4e49c32 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 12:46:43 -0700 Subject: [PATCH 1451/1708] feature/featuretags, ipn/conffile: make HuJSON support in config files optional Saves 33 KB. Updates #12614 Change-Id: Ie701c230e0765281f409f29ed263910b9be9cc77 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/deps_test.go | 10 ++++++++-- .../feature_hujsonconf_disabled.go | 13 +++++++++++++ .../feature_hujsonconf_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 1 + ipn/conffile/cloudconf.go | 5 +++++ ipn/conffile/conffile.go | 19 +++++++++++-------- ipn/conffile/conffile_hujson.go | 2 +- 9 files changed, 52 insertions(+), 13 deletions(-) create mode 100644 feature/buildfeatures/feature_hujsonconf_disabled.go create mode 100644 feature/buildfeatures/feature_hujsonconf_enabled.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index f37dde001..bada798db 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -24,7 +24,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 7e12a9c36..ef0d2a8ee 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -36,7 +36,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode - github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index fd5d31836..a9f125e19 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -253,13 +253,19 @@ func TestMinTailscaledNoCLI(t *testing.T) { } func TestMinTailscaledWithCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "hujson", + } deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", Tags: minTags() + ",ts_include_cli", OnDep: func(dep string) { - if strings.Contains(dep, "cbor") { - t.Errorf("unexpected dep: %q", dep) + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } } }, }.Check(t) diff --git a/feature/buildfeatures/feature_hujsonconf_disabled.go b/feature/buildfeatures/feature_hujsonconf_disabled.go new file mode 100644 index 000000000..cee076bc2 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = false diff --git a/feature/buildfeatures/feature_hujsonconf_enabled.go b/feature/buildfeatures/feature_hujsonconf_enabled.go new file mode 100644 index 000000000..aefeeace5 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index daf4c71eb..347ccdec0 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -113,6 +113,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, + "hujsonconf": {"HuJSONConf", "HuJSON config file support", nil}, "iptables": {"IPTables", "Linux iptables support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, diff --git a/ipn/conffile/cloudconf.go b/ipn/conffile/cloudconf.go index 650611cf1..4475a2d7b 100644 --- a/ipn/conffile/cloudconf.go +++ b/ipn/conffile/cloudconf.go @@ -10,6 +10,8 @@ import ( "net/http" "strings" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/omit" ) @@ -35,6 +37,9 @@ func getEC2MetadataToken() (string, error) { } func readVMUserData() ([]byte, error) { + if !buildfeatures.HasAWS { + return nil, feature.ErrUnavailable + } // TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init // (NoCloud/ConfigDrive ISO), etc. diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index a2bafb8b7..3a2aeffb3 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -8,11 +8,11 @@ package conffile import ( "bytes" "encoding/json" - "errors" "fmt" "os" "runtime" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" ) @@ -51,10 +51,6 @@ func Load(path string) (*Config, error) { // compile-time for deadcode elimination return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS) } - if hujsonStandardize == nil { - // Build tags are wrong in conffile_hujson.go - return nil, errors.New("[unexpected] config file loading not wired up") - } var c Config c.Path = path var err error @@ -68,14 +64,21 @@ func Load(path string) (*Config, error) { if err != nil { return nil, err } - c.Std, err = hujsonStandardize(c.Raw) - if err != nil { - return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + if buildfeatures.HasHuJSONConf && hujsonStandardize != nil { + c.Std, err = hujsonStandardize(c.Raw) + if err != nil { + return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + } + } else { + c.Std = c.Raw // config file must be valid JSON with ts_omit_hujsonconf } var ver struct { Version string `json:"version"` } if err := json.Unmarshal(c.Std, &ver); err != nil { + if !buildfeatures.HasHuJSONConf { + return nil, fmt.Errorf("error parsing config file %s, which must be valid standard JSON: %w", path, err) + } return nil, fmt.Errorf("error parsing config file %s: %w", path, err) } switch ver.Version { diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go index 6825a0638..1e967f1bd 100644 --- a/ipn/conffile/conffile_hujson.go +++ b/ipn/conffile/conffile_hujson.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_hujsonconf package conffile From 5b09913d640c2ab31c2c9c82d32b04a2c83ff2f7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 13:31:38 -0700 Subject: [PATCH 1452/1708] ipn/ipnlocal, engine: avoid runtime/pprof with two usages of ts_omit_debug Saves 258 KB. Updates #12614 Change-Id: I37c2f7f916480e3534883f338de4c64d08f7ef2b Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-min.txt | 4 +--- cmd/tailscaled/depaware-minbox.txt | 5 ++--- cmd/tailscaled/deps_test.go | 1 + ipn/ipnlocal/c2n_pprof.go | 2 +- wgengine/watchdog.go | 2 +- wgengine/watchdog_js.go | 17 ----------------- wgengine/watchdog_omit.go | 8 ++++++++ 7 files changed, 14 insertions(+), 25 deletions(-) delete mode 100644 wgengine/watchdog_js.go create mode 100644 wgengine/watchdog_omit.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index bada798db..3a6d0e7fd 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -345,7 +345,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profilerecord from runtime+ + internal/profilerecord from runtime internal/race from internal/runtime/maps+ internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ @@ -398,7 +398,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de reflect from crypto/x509+ runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ @@ -406,7 +405,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ - text/tabwriter from runtime/pprof time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index ef0d2a8ee..0dd36447f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -378,7 +378,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profilerecord from runtime+ + internal/profilerecord from runtime internal/race from internal/runtime/maps+ internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ @@ -435,7 +435,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ @@ -443,7 +442,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ - text/tabwriter from runtime/pprof+ + text/tabwriter from github.com/peterbourgon/ff/v3/ffcli+ time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a9f125e19..521eb3ced 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -256,6 +256,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { badSubstrs := []string{ "cbor", "hujson", + "pprof", } deptest.DepChecker{ GOOS: "linux", diff --git a/ipn/ipnlocal/c2n_pprof.go b/ipn/ipnlocal/c2n_pprof.go index b4bc35790..13237cc4f 100644 --- a/ipn/ipnlocal/c2n_pprof.go +++ b/ipn/ipnlocal/c2n_pprof.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js && !wasm +//go:build !js && !wasm && !ts_omit_debug package ipnlocal diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 13bc48fb0..0500e6f7f 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_debug package wgengine diff --git a/wgengine/watchdog_js.go b/wgengine/watchdog_js.go deleted file mode 100644 index 872ce36d5..000000000 --- a/wgengine/watchdog_js.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build js - -package wgengine - -import "tailscale.com/net/dns/resolver" - -type watchdogEngine struct { - Engine - wrap Engine -} - -func (e *watchdogEngine) GetResolver() (r *resolver.Resolver, ok bool) { - return nil, false -} diff --git a/wgengine/watchdog_omit.go b/wgengine/watchdog_omit.go new file mode 100644 index 000000000..1d175b41a --- /dev/null +++ b/wgengine/watchdog_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || ts_omit_debug + +package wgengine + +func NewWatchdog(e Engine) Engine { return e } From 9781b7c25cbaae314f3ca95741d20c6125a89531 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 30 Sep 2025 20:45:12 -0500 Subject: [PATCH 1453/1708] ipn/ipnlocal: plumb logf into nodeBackend Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 8 ++++---- ipn/ipnlocal/local_test.go | 4 ++-- ipn/ipnlocal/node_backend.go | 9 ++++++--- ipn/ipnlocal/node_backend_test.go | 11 ++++++----- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5897614d0..c091e0c61 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -501,7 +501,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo needsCaptiveDetection: make(chan bool), } - nb := newNodeBackend(ctx, b.sys.Bus.Get()) + nb := newNodeBackend(ctx, b.logf, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -629,7 +629,7 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.sys.Bus.Get()) + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.logf, b.sys.Bus.Get()) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } @@ -4890,7 +4890,7 @@ func (b *LocalBackend) authReconfig() { hasPAC := b.prevIfState.HasPAC() disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) - dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) closing := b.shutdownCalled @@ -6797,7 +6797,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } - newNode := newNodeBackend(b.ctx, b.sys.Bus.Get()) + newNode := newNodeBackend(b.ctx, b.logf, b.sys.Bus.Get()) if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil { oldNode.shutdown(errNodeContextChanged) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 70923efde..a984d66bf 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4904,7 +4904,7 @@ func TestSuggestExitNode(t *testing.T) { allowList = set.SetOf(tt.allowPolicy) } - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) @@ -5357,7 +5357,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { tt.netMap.AllCaps = set.SetOf(slices.Collect(caps)) } - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index b1ce9e07c..95bf350ce 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -65,6 +65,8 @@ import ( // Even if they're tied to the local node, instead of moving them here, we should extract the entire feature // into a separate package and have it install proper hooks. type nodeBackend struct { + logf logger.Logf + ctx context.Context // canceled by [nodeBackend.shutdown] ctxCancel context.CancelCauseFunc // cancels ctx @@ -104,9 +106,10 @@ type nodeBackend struct { nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { +func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *nodeBackend { ctx, ctxCancel := context.WithCancelCause(ctx) nb := &nodeBackend{ + logf: logf, ctx: ctx, ctxCancel: ctxCancel, eventClient: bus.Client("ipnlocal.nodeBackend"), @@ -520,10 +523,10 @@ func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f}) } -func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { +func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, versionOS string) *dns.Config { nb.mu.Lock() defer nb.mu.Unlock() - return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) + return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, nb.logf, versionOS) } func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index dc67d327c..b305837fd 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -9,11 +9,12 @@ import ( "testing" "time" + "tailscale.com/tstest" "tailscale.com/util/eventbus" ) func TestNodeBackendReadiness(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // The node backend is not ready until [nodeBackend.ready] is called, // and [nodeBackend.Wait] should fail with [context.DeadlineExceeded]. @@ -44,7 +45,7 @@ func TestNodeBackendReadiness(t *testing.T) { } func TestNodeBackendShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") @@ -82,7 +83,7 @@ func TestNodeBackendShutdown(t *testing.T) { } func TestNodeBackendReadyAfterShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") nb.shutdown(shutdownCause) @@ -94,7 +95,7 @@ func TestNodeBackendReadyAfterShutdown(t *testing.T) { func TestNodeBackendParentContextCancellation(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) - nb := newNodeBackend(ctx, eventbus.New()) + nb := newNodeBackend(ctx, tstest.WhileTestRunningLogger(t), eventbus.New()) cancelCtx() @@ -111,7 +112,7 @@ func TestNodeBackendParentContextCancellation(t *testing.T) { } func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently // should not cause issues, and [nodeBackend.Wait] should unblock, From af1114e896fd16378dbf8f0584b0d55ebd46930b Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 1 Oct 2025 12:24:21 +0100 Subject: [PATCH 1454/1708] cmd/k8s-proxy: importing feature/condregister on cmd/k8s-proxy (#17383) https://github.com/tailscale/tailscale/pull/17346 moved the kube and aws arn store initializations to feature/condregister, under the assumption that anything using it would use kubestore.New. Unfortunately, cmd/k8s-proxy makes use of store.New, which compares the `:` supplied in the provided `path string` argument against known stores. If it doesn't find it, it fallsback to using a FileStore. Since cmd/k8s-proxy uses store.New to try and initialize a kube store in some cases (without importing feature/condregister), it silently creates a FileStore and that leads to misleading errors further along in execution. This fixes this issue by importing condregister, and successfully initializes a kube store. Updates #12614 Signed-off-by: chaosinthecrd --- cmd/k8s-proxy/k8s-proxy.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 7a7707214..57a2632e2 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -31,6 +31,7 @@ import ( "k8s.io/utils/strings/slices" "tailscale.com/client/local" "tailscale.com/cmd/k8s-proxy/internal/config" + _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" From ebc370e517a4221a092c1c2a33cc7b749c651aa0 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 1 Oct 2025 14:44:15 +0100 Subject: [PATCH 1455/1708] ipn/ipnlocal: fail test if more notifies are put than expected The `put` callback runs on a different goroutine to the test, so calling t.Fatalf in put had no effect. `drain` is always called when checking what was put and is called from the test goroutine, so that's a good place to fail the test if the channel was too full. Updates #17363 Signed-off-by: James Sanderson --- ipn/ipnlocal/state_test.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 9c0aa66a9..347aaf8b8 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -59,8 +59,9 @@ type notifyThrottler struct { // ch gets replaced frequently. Lock the mutex before getting or // setting it, but not while waiting on it. - mu sync.Mutex - ch chan ipn.Notify + mu sync.Mutex + ch chan ipn.Notify + putErr error // set by put if the channel is full } // expect tells the throttler to expect count upcoming notifications. @@ -81,7 +82,11 @@ func (nt *notifyThrottler) put(n ipn.Notify) { case ch <- n: return default: - nt.t.Fatalf("put: channel full: %v", n) + err := fmt.Errorf("put: channel full: %v", n) + nt.t.Log(err) + nt.mu.Lock() + nt.putErr = err + nt.mu.Unlock() } } @@ -91,8 +96,13 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { nt.t.Helper() nt.mu.Lock() ch := nt.ch + putErr := nt.putErr nt.mu.Unlock() + if putErr != nil { + nt.t.Fatalf("drain: previous call to put errored: %s", putErr) + } + nn := []ipn.Notify{} for i := range count { select { From 91fa51ca153e39e0bfaf2cb580a2071065230b97 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 07:30:39 -0700 Subject: [PATCH 1456/1708] ipn/store, feature/condregister: permit callers to empty import optonal ipn stores This permits other programs (in other repos) to conditionally import ipn/store/awsstore and/or ipn/store/kubestore and have them register themselves, rather than feature/condregister doing it. Updates tailscale/corp#32922 Change-Id: I2936229ce37fd2acf9be5bf5254d4a262d090ec1 Signed-off-by: Brad Fitzpatrick --- feature/condregister/maybe_store_aws.go | 17 +---------------- feature/condregister/maybe_store_kube.go | 16 +--------------- ipn/store/awsstore/store_aws.go | 13 ++++++++++++- ipn/store/awsstore/store_aws_test.go | 2 +- ipn/store/kubestore/store_kube.go | 8 ++++++++ 5 files changed, 23 insertions(+), 33 deletions(-) diff --git a/feature/condregister/maybe_store_aws.go b/feature/condregister/maybe_store_aws.go index 48ef06ecf..8358b49f0 100644 --- a/feature/condregister/maybe_store_aws.go +++ b/feature/condregister/maybe_store_aws.go @@ -5,19 +5,4 @@ package condregister -import ( - "tailscale.com/ipn" - "tailscale.com/ipn/store" - "tailscale.com/ipn/store/awsstore" - "tailscale.com/types/logger" -) - -func init() { - store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { - ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) - if err != nil { - return nil, err - } - return awsstore.New(logf, ssmARN, opts...) - }) -} +import _ "tailscale.com/ipn/store/awsstore" diff --git a/feature/condregister/maybe_store_kube.go b/feature/condregister/maybe_store_kube.go index 0aa2c1692..bb795b05e 100644 --- a/feature/condregister/maybe_store_kube.go +++ b/feature/condregister/maybe_store_kube.go @@ -5,18 +5,4 @@ package condregister -import ( - "strings" - - "tailscale.com/ipn" - "tailscale.com/ipn/store" - "tailscale.com/ipn/store/kubestore" - "tailscale.com/types/logger" -) - -func init() { - store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { - secretName := strings.TrimPrefix(path, "kube:") - return kubestore.New(logf, secretName) - }) -} +import _ "tailscale.com/ipn/store/kubestore" diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 40bbbf037..78b72d0bc 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws // Package awsstore contains an ipn.StateStore implementation using AWS SSM. package awsstore @@ -20,10 +20,21 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssm" ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/types/logger" ) +func init() { + store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + ssmARN, opts, err := ParseARNAndOpts(arg) + if err != nil { + return nil, err + } + return New(logf, ssmARN, opts...) + }) +} + const ( parameterNameRxStr = `^parameter(/.*)` ) diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index 3382635a7..3cc23e48d 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws package awsstore diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 5b25471c7..f48237c05 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -16,6 +16,7 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" @@ -25,6 +26,13 @@ import ( "tailscale.com/util/mak" ) +func init() { + store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { + secretName := strings.TrimPrefix(path, "kube:") + return New(logf, secretName) + }) +} + const ( // timeout is the timeout for a single state update that includes calls to the API server to write or read a // state Secret and emit an Event. From c2f37c891c6c6c37c1320ad7edf77f94292c4fb5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 19:47:50 -0700 Subject: [PATCH 1457/1708] all: use Go 1.20's errors.Join instead of our multierr package Updates #7123 Change-Id: Ie9be6814831f661ad5636afcd51d063a0d7a907d Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 - cmd/k8s-operator/depaware.txt | 1 - cmd/tailscale/depaware.txt | 1 - cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 1 + cmd/tailscaled/tailscaled.go | 3 +-- cmd/tsidp/depaware.txt | 1 - control/controlclient/direct.go | 3 +-- control/controlclient/noise.go | 7 +++---- derp/xdp/xdp_linux.go | 3 +-- feature/tap/tap_linux.go | 4 ++-- health/health.go | 7 +++---- ipn/ipnlocal/local.go | 5 ++--- k8s-operator/sessionrecording/hijacker.go | 7 +++---- k8s-operator/sessionrecording/ws/conn.go | 3 +-- kube/kubeclient/client.go | 4 ++-- net/netcheck/standalone.go | 3 +-- net/ping/ping.go | 8 ++++---- prober/tls.go | 5 ++--- release/dist/dist.go | 3 +-- sessionrecording/connect.go | 3 +-- tsnet/depaware.txt | 1 - tstest/integration/tailscaled_deps_test_darwin.go | 1 - tstest/integration/tailscaled_deps_test_freebsd.go | 1 - tstest/integration/tailscaled_deps_test_linux.go | 1 - tstest/integration/tailscaled_deps_test_openbsd.go | 1 - tstest/integration/tailscaled_deps_test_windows.go | 1 - util/linuxfw/iptables.go | 5 ++--- util/winutil/restartmgr_windows.go | 3 +-- wgengine/netlog/netlog.go | 4 ++-- wgengine/router/osrouter/ifconfig_windows.go | 3 +-- wgengine/router/osrouter/router_linux.go | 3 +-- wgengine/wgcfg/device.go | 6 +++--- 35 files changed, 40 insertions(+), 67 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 8c122105f..7f0252148 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -151,7 +151,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/health+ - tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp/derpserver+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index f8ae3d261..e225cebf9 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -840,7 +840,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 9fb7b63ed..cfa073a71 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -177,7 +177,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 3a6d0e7fd..22f360ac5 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -164,7 +164,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0dd36447f..4b80f4a56 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -190,7 +190,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 9dde241ca..5e92438e7 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -427,7 +427,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/wgengine/router/osrouter tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/multierr from tailscale.com/feature/taildrop tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 521eb3ced..c364a9306 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -257,6 +257,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { "cbor", "hujson", "pprof", + "multierr", // https://github.com/tailscale/tailscale/pull/17379 } deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 48eefbea7..8de473b7c 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -56,7 +56,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -701,7 +700,7 @@ func createEngine(logf logger.Logf, sys *tsd.System) (onlyNetstack bool, err err logf("wgengine.NewUserspaceEngine(tun %q) error: %v", name, err) errs = append(errs, err) } - return false, multierr.New(errs...) + return false, errors.Join(errs...) } // handleSubnetsInNetstack reports whether netstack should handle subnet routers diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 033ff6570..9ced6f966 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -267,7 +267,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/cmd/tsidp+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 54f2de1c9..199e1479b 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -54,7 +54,6 @@ import ( "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" - "tailscale.com/util/multierr" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -1307,7 +1306,7 @@ func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string out = tailcfg.OverTLSPublicKeyResponse{} k, err := key.ParseMachinePublicUntyped(mem.B(b)) if err != nil { - return nil, multierr.New(jsonErr, err) + return nil, errors.Join(jsonErr, err) } out.LegacyPublicKey = k return &out, nil diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index 4bd8cfc25..a0f344664 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -28,7 +28,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/singleflight" ) @@ -295,13 +294,13 @@ func (nc *NoiseClient) Close() error { nc.connPool = nil nc.mu.Unlock() - var errors []error + var errs []error for _, c := range conns { if err := c.Close(); err != nil { - errors = append(errors, err) + errs = append(errs, err) } } - return multierr.New(errors...) + return errors.Join(errs...) } // dial opens a new connection to tailcontrol, fetching the server noise key diff --git a/derp/xdp/xdp_linux.go b/derp/xdp/xdp_linux.go index 3ebe0a052..309d9ee9a 100644 --- a/derp/xdp/xdp_linux.go +++ b/derp/xdp/xdp_linux.go @@ -14,7 +14,6 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/link" "github.com/prometheus/client_golang/prometheus" - "tailscale.com/util/multierr" ) //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -type config -type counters_key -type counter_key_af -type counter_key_packets_bytes_action -type counter_key_prog_end bpf xdp.c -- -I headers @@ -110,7 +109,7 @@ func (s *STUNServer) Close() error { errs = append(errs, s.link.Close()) } errs = append(errs, s.objs.Close()) - return multierr.New(errs...) + return errors.Join(errs...) } type stunServerMetrics struct { diff --git a/feature/tap/tap_linux.go b/feature/tap/tap_linux.go index 58ac00593..53dcabc36 100644 --- a/feature/tap/tap_linux.go +++ b/feature/tap/tap_linux.go @@ -6,6 +6,7 @@ package tap import ( "bytes" + "errors" "fmt" "net" "net/netip" @@ -29,7 +30,6 @@ import ( "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // TODO: this was randomly generated once. Maybe do it per process start? But @@ -482,7 +482,7 @@ func (t *tapDevice) Write(buffs [][]byte, offset int) (int, error) { wrote++ } } - return wrote, multierr.New(errs...) + return wrote, errors.Join(errs...) } func (t *tapDevice) MTU() (int, error) { diff --git a/health/health.go b/health/health.go index d60762e31..c41256614 100644 --- a/health/health.go +++ b/health/health.go @@ -27,7 +27,6 @@ import ( "tailscale.com/util/cibuild" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -992,8 +991,8 @@ func (t *Tracker) selfCheckLocked() { // OverallError returns a summary of the health state. // -// If there are multiple problems, the error will be of type -// multierr.Error. +// If there are multiple problems, the error will be joined using +// [errors.Join]. func (t *Tracker) OverallError() error { if t.nil() { return nil @@ -1071,7 +1070,7 @@ func (t *Tracker) errorsLocked() []error { // This function is here for legacy compatibility purposes and is deprecated. func (t *Tracker) multiErrLocked() error { errs := t.errorsLocked() - return multierr.New(errs...) + return errors.Join(errs...) } var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c091e0c61..f214c5def 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -90,7 +90,6 @@ import ( "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/osuser" "tailscale.com/util/rands" "tailscale.com/util/set" @@ -3981,7 +3980,7 @@ func (b *LocalBackend) checkPrefsLocked(p *ipn.Prefs) error { if err := b.checkAutoUpdatePrefsLocked(p); err != nil { errs = append(errs, err) } - return multierr.New(errs...) + return errors.Join(errs...) } func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { @@ -4225,7 +4224,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn } } - return multierr.New(errs...) + return errors.Join(errs...) } // changeDisablesExitNodeLocked reports whether applying the change diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 789a9fdb9..ebd77641b 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -11,6 +11,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "net" @@ -19,7 +20,6 @@ import ( "net/netip" "strings" - "github.com/pkg/errors" "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" "tailscale.com/k8s-operator/sessionrecording/spdy" @@ -31,7 +31,6 @@ import ( "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" ) const ( @@ -166,7 +165,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } msg = msg + "; failure mode is 'fail closed'; closing connection." if err := closeConnWithWarning(conn, msg); err != nil { - return nil, multierr.New(errors.New(msg), err) + return nil, errors.Join(errors.New(msg), err) } return nil, errors.New(msg) } else { @@ -245,7 +244,7 @@ func closeConnWithWarning(conn net.Conn, msg string) error { b := io.NopCloser(bytes.NewBuffer([]byte(msg))) resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b} if err := resp.Write(conn); err != nil { - return multierr.New(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) + return errors.Join(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) } return conn.Close() } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index a34379658..a618f85fb 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/util/remotecommand" "tailscale.com/k8s-operator/sessionrecording/tsrecorder" "tailscale.com/sessionrecording" - "tailscale.com/util/multierr" ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. @@ -316,7 +315,7 @@ func (c *conn) Close() error { c.closed = true connCloseErr := c.Conn.Close() recCloseErr := c.rec.Close() - return multierr.New(connCloseErr, recCloseErr) + return errors.Join(connCloseErr, recCloseErr) } // writeBufHasIncompleteFragment returns true if the latest data message diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index 332b21106..0ed960f4d 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -15,6 +15,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "log" @@ -29,7 +30,6 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) const ( @@ -397,7 +397,7 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) } } if len(errs) > 0 { - return false, false, multierr.New(errs...) + return false, false, errors.Join(errs...) } canPatch, err = c.checkPermission(ctx, "patch", TypeSecrets, secretName) if err != nil { diff --git a/net/netcheck/standalone.go b/net/netcheck/standalone.go index c72d7005f..b4523a832 100644 --- a/net/netcheck/standalone.go +++ b/net/netcheck/standalone.go @@ -13,7 +13,6 @@ import ( "tailscale.com/net/stun" "tailscale.com/types/logger" "tailscale.com/types/nettype" - "tailscale.com/util/multierr" ) // Standalone creates the necessary UDP sockets on the given bindAddr and starts @@ -62,7 +61,7 @@ func (c *Client) Standalone(ctx context.Context, bindAddr string) error { // If both v4 and v6 failed, report an error, otherwise let one succeed. if len(errs) == 2 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/net/ping/ping.go b/net/ping/ping.go index 01f3dcf2c..1ff3862dc 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -10,6 +10,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "log" @@ -24,7 +25,6 @@ import ( "golang.org/x/net/ipv6" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/multierr" ) const ( @@ -157,17 +157,17 @@ func (p *Pinger) Close() error { p.conns = nil p.mu.Unlock() - var errors []error + var errs []error for _, c := range conns { if err := c.Close(); err != nil { - errors = append(errors, err) + errs = append(errs, err) } } p.wg.Wait() p.cleanupOutstanding() - return multierr.New(errors...) + return errors.Join(errs...) } func (p *Pinger) run(ctx context.Context, conn net.PacketConn, typ string) { diff --git a/prober/tls.go b/prober/tls.go index 777b2b508..3ce535435 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -7,14 +7,13 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "io" "net/http" "net/netip" "slices" "time" - - "tailscale.com/util/multierr" ) const expiresSoon = 7 * 24 * time.Hour // 7 days from now @@ -69,7 +68,7 @@ func probeTLS(ctx context.Context, config *tls.Config, dialHostPort string) erro func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr error) { var errs []error defer func() { - returnerr = multierr.New(errs...) + returnerr = errors.Join(errs...) }() latestAllowedExpiration := time.Now().Add(expiresSoon) diff --git a/release/dist/dist.go b/release/dist/dist.go index 802d9041b..6fb010299 100644 --- a/release/dist/dist.go +++ b/release/dist/dist.go @@ -20,7 +20,6 @@ import ( "sync" "time" - "tailscale.com/util/multierr" "tailscale.com/version/mkversion" ) @@ -176,7 +175,7 @@ func (b *Build) Build(targets []Target) (files []string, err error) { } sort.Strings(files) - return files, multierr.New(errs...) + return files, errors.Join(errs...) } // Once runs fn if Once hasn't been called with name before. diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index ccb7e5fd9..a470969d8 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -21,7 +21,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" - "tailscale.com/util/multierr" ) const ( @@ -91,7 +90,7 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial netx.Dia } return pw, attempts, errChan, nil } - return nil, attempts, nil, multierr.New(errs...) + return nil, attempts, nil, errors.Join(errs...) } // supportsV2 checks whether a recorder instance supports the /v2/record diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 037e6c264..b5f524088 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -262,7 +262,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 72615330d..217188f75 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 72615330d..217188f75 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 72615330d..217188f75 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 72615330d..217188f75 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index c2761d019..f3cd5e75b 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -60,7 +60,6 @@ import ( _ "tailscale.com/util/backoff" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 73da92086..5bd7c528b 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -18,7 +18,6 @@ import ( "github.com/coreos/go-iptables/iptables" "tailscale.com/types/logger" - "tailscale.com/util/multierr" "tailscale.com/version/distro" ) @@ -67,7 +66,7 @@ func detectIptables() (int, error) { default: return 0, FWModeNotSupportedError{ Mode: FirewallModeIPTables, - Err: fmt.Errorf("iptables command run fail: %w", multierr.New(err, ip6err)), + Err: fmt.Errorf("iptables command run fail: %w", errors.Join(err, ip6err)), } } @@ -232,5 +231,5 @@ func clearRules(proto iptables.Protocol, logf logger.Logf) error { errs = append(errs, err) } - return multierr.New(errs...) + return errors.Join(errs...) } diff --git a/util/winutil/restartmgr_windows.go b/util/winutil/restartmgr_windows.go index a52e2fee9..6f549de55 100644 --- a/util/winutil/restartmgr_windows.go +++ b/util/winutil/restartmgr_windows.go @@ -19,7 +19,6 @@ import ( "github.com/dblohm7/wingoes" "golang.org/x/sys/windows" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) var ( @@ -538,7 +537,7 @@ func (rps RestartableProcesses) Terminate(logf logger.Logf, exitCode uint32, tim } if len(errs) != 0 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 8fd225c90..34b78a2b5 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -10,6 +10,7 @@ package netlog import ( "context" "encoding/json" + "errors" "fmt" "io" "log" @@ -28,7 +29,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" - "tailscale.com/util/multierr" "tailscale.com/wgengine/router" ) @@ -272,5 +272,5 @@ func (nl *Logger) Shutdown(ctx context.Context) error { nl.addrs = nil nl.prefixes = nil - return multierr.New(err1, err2) + return errors.Join(err1, err2) } diff --git a/wgengine/router/osrouter/ifconfig_windows.go b/wgengine/router/osrouter/ifconfig_windows.go index 78ac8d45f..cb87ad5f2 100644 --- a/wgengine/router/osrouter/ifconfig_windows.go +++ b/wgengine/router/osrouter/ifconfig_windows.go @@ -18,7 +18,6 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" - "tailscale.com/util/multierr" "tailscale.com/wgengine/router" "tailscale.com/wgengine/winnet" @@ -831,5 +830,5 @@ func syncRoutes(ifc *winipcfg.IPAdapterAddresses, want []*routeData, dontDelete } } - return multierr.New(errs...) + return errors.Join(errs...) } diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 478935483..1f825b917 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -32,7 +32,6 @@ import ( "tailscale.com/types/preftype" "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" - "tailscale.com/util/multierr" "tailscale.com/version/distro" "tailscale.com/wgengine/router" ) @@ -488,7 +487,7 @@ func (r *linuxRouter) Set(cfg *router.Config) error { r.enableIPForwarding() } - return multierr.New(errs...) + return errors.Join(errs...) } var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ diff --git a/wgengine/wgcfg/device.go b/wgengine/wgcfg/device.go index 80fa159e3..ee7eb91c9 100644 --- a/wgengine/wgcfg/device.go +++ b/wgengine/wgcfg/device.go @@ -4,6 +4,7 @@ package wgcfg import ( + "errors" "io" "sort" @@ -11,7 +12,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // NewDevice returns a wireguard-go Device configured for Tailscale use. @@ -31,7 +31,7 @@ func DeviceConfig(d *device.Device) (*Config, error) { cfg, fromErr := FromUAPI(r) r.Close() getErr := <-errc - err := multierr.New(getErr, fromErr) + err := errors.Join(getErr, fromErr) if err != nil { return nil, err } @@ -64,5 +64,5 @@ func ReconfigDevice(d *device.Device, cfg *Config, logf logger.Logf) (err error) toErr := cfg.ToUAPI(logf, w, prev) w.Close() setErr := <-errc - return multierr.New(setErr, toErr) + return errors.Join(setErr, toErr) } From 05a4c8e8392c216db5a4e951ffccc95e8a72d152 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 20:53:47 -0700 Subject: [PATCH 1458/1708] tsnet: remove AuthenticatedAPITransport (API-over-noise) support It never launched and I've lost hope of it launching and it's in my way now, so I guess it's time to say goodbye. Updates tailscale/corp#4383 Updates #17305 Change-Id: I2eb551d49f2fb062979cc307f284df4b3dfa5956 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 9 ------ control/controlclient/auto.go | 10 ------ control/controlclient/direct.go | 14 -------- control/controlclient/noise.go | 23 ------------- internal/noiseconn/conn.go | 16 --------- ipn/ipnlocal/local.go | 57 --------------------------------- tsnet/tsnet.go | 35 -------------------- 7 files changed, 164 deletions(-) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 8d0357716..7e800dbc5 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -1128,15 +1128,6 @@ func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDiale } defer nc.Close() - // Reserve a RoundTrip for the whoami request. - ok, _, err := nc.ReserveNewRequest(ctx) - if err != nil { - return fmt.Errorf("ReserveNewRequest: %w", err) - } - if !ok { - return errors.New("ReserveNewRequest failed") - } - // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index f5495f854..224838d56 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -845,13 +845,3 @@ func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error { func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) { return c.direct.DoNoiseRequest(req) } - -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - return c.direct.GetSingleUseNoiseRoundTripper(ctx) -} diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 199e1479b..ed84d63ff 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1606,20 +1606,6 @@ func (c *Direct) DoNoiseRequest(req *http.Request) (*http.Response, error) { return nc.Do(req) } -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Direct) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - nc, err := c.getNoiseClient() - if err != nil { - return nil, nil, err - } - return nc.GetSingleUseRoundTripper(ctx) -} - // doPingerPing sends a Ping to pr.IP using pinger, and sends an http request back to // pr.URL with ping response data. func doPingerPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, pinger Pinger, pingType tailcfg.PingType) { diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index a0f344664..c001de0cd 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -181,29 +181,6 @@ func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { return np, nil } -// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once -// (and must be used once) to make a single HTTP request over the noise channel -// to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - for tries := 0; tries < 3; tries++ { - conn, err := nc.getConn(ctx) - if err != nil { - return nil, nil, err - } - ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx) - if err != nil { - return nil, nil, err - } - if ok { - return conn, earlyPayloadMaybeNil, nil - } - } - return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection") -} - // contextErr is an error that wraps another error and is used to indicate that // the error was because a context expired. type contextErr struct { diff --git a/internal/noiseconn/conn.go b/internal/noiseconn/conn.go index 7476b7ecc..29fd1a283 100644 --- a/internal/noiseconn/conn.go +++ b/internal/noiseconn/conn.go @@ -84,22 +84,6 @@ func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) } } -// ReserveNewRequest will reserve a new concurrent request on the connection. -// -// It returns whether the reservation was successful, and any early Noise -// payload if present. If a reservation was not successful, it will return -// false and nil for the early payload. -func (c *Conn) ReserveNewRequest(ctx context.Context) (bool, *tailcfg.EarlyNoise, error) { - earlyPayloadMaybeNil, err := c.GetEarlyPayload(ctx) - if err != nil { - return false, nil, err - } - if c.h2cc.ReserveNewRequest() { - return true, earlyPayloadMaybeNil, nil - } - return false, nil, nil -} - // CanTakeNewRequest reports whether the underlying HTTP/2 connection can take // a new request, meaning it has not been closed or received or sent a GOAWAY. func (c *Conn) CanTakeNewRequest() bool { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f214c5def..09f317f0f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -9,7 +9,6 @@ import ( "cmp" "context" "crypto/sha256" - "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" @@ -6540,62 +6539,6 @@ func (b *LocalBackend) MagicConn() *magicsock.Conn { return b.sys.MagicSock.Get() } -type keyProvingNoiseRoundTripper struct { - b *LocalBackend -} - -func (n keyProvingNoiseRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b := n.b - - var priv key.NodePrivate - - b.mu.Lock() - cc := b.ccAuto - if nm := b.NetMap(); nm != nil { - priv = nm.PrivateKey - } - b.mu.Unlock() - if cc == nil { - return nil, errors.New("no client") - } - if priv.IsZero() { - return nil, errors.New("no netmap or private key") - } - rt, ep, err := cc.GetSingleUseNoiseRoundTripper(req.Context()) - if err != nil { - return nil, err - } - if ep == nil || ep.NodeKeyChallenge.IsZero() { - go rt.RoundTrip(new(http.Request)) // return our reservation with a bogus request - return nil, errors.New("this coordination server does not support API calls over the Noise channel") - } - - // QueryEscape the node key since it has a colon in it. - nk := url.QueryEscape(priv.Public().String()) - req.SetBasicAuth(nk, "") - - // genNodeProofHeaderValue returns the Tailscale-Node-Proof header's value to prove - // to chalPub that we control claimedPrivate. - genNodeProofHeaderValue := func(claimedPrivate key.NodePrivate, chalPub key.ChallengePublic) string { - // TODO(bradfitz): cache this somewhere? - box := claimedPrivate.SealToChallenge(chalPub, []byte(chalPub.String())) - return claimedPrivate.Public().String() + " " + base64.StdEncoding.EncodeToString(box) - } - - // And prove we have the private key corresponding to the public key sent - // tin the basic auth username. - req.Header.Set("Tailscale-Node-Proof", genNodeProofHeaderValue(priv, ep.NodeKeyChallenge)) - - return rt.RoundTrip(req) -} - -// KeyProvingNoiseRoundTripper returns an http.RoundTripper that uses the LocalBackend's -// DoNoiseRequest method and mutates the request to add an authorization header -// to prove the client's nodekey. -func (b *LocalBackend) KeyProvingNoiseRoundTripper() http.RoundTripper { - return keyProvingNoiseRoundTripper{b} -} - // DoNoiseRequest sends a request to URL over the control plane // Noise connection. func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) { diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 42e4198a0..d14f1f16c 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -931,41 +931,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() -// for now. -var I_Acknowledge_This_API_Is_Experimental = false - -// AuthenticatedAPITransport provides an HTTP transport that can be used with -// the control server API without needing additional authentication details. It -// authenticates using the current client's nodekey. -// -// It requires the user to set I_Acknowledge_This_API_Is_Experimental. -// -// For example: -// -// import "net/http" -// import "tailscale.com/client/tailscale/v2" -// import "tailscale.com/tsnet" -// -// var s *tsnet.Server -// ... -// rt, err := s.AuthenticatedAPITransport() -// // handler err ... -// var client tailscale.Client{HTTP: http.Client{ -// Timeout: 1*time.Minute, -// UserAgent: "your-useragent-here", -// Transport: rt, -// }} -func (s *Server) AuthenticatedAPITransport() (http.RoundTripper, error) { - if !I_Acknowledge_This_API_Is_Experimental { - return nil, errors.New("use of AuthenticatedAPITransport without setting I_Acknowledge_This_API_Is_Experimental") - } - if err := s.Start(); err != nil { - return nil, err - } - return s.lb.KeyProvingNoiseRoundTripper(), nil -} - // Listen announces only on the Tailscale network. // It will start the server if it has not been started yet. // From 6f7ce5eb5dc8540b6430557e48107ff5b46b385c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 1 Oct 2025 11:39:01 -0700 Subject: [PATCH 1459/1708] appc: factor app connector arguments into a Config type (#17389) Replace the positional arguments to NewAppConnector with a Config struct. Update the existing uses. Other than the API change, there are no functional changes in this commit. Updates #15160 Updates #17192 Change-Id: Ibf37f021372155a4db8aaf738f4b4f2c746bf623 Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 35 +++++++++++---- appc/appconnector_test.go | 87 ++++++++++++++++++++++++++++-------- ipn/ipnlocal/local.go | 7 ++- ipn/ipnlocal/local_test.go | 15 +++++-- ipn/ipnlocal/peerapi_test.go | 27 ++++++++--- 5 files changed, 133 insertions(+), 38 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 8d7dd54e8..8c1d49d22 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -162,17 +162,36 @@ type AppConnector struct { writeRateDay *rateLogger } +// Config carries the settings for an [AppConnector]. +type Config struct { + // Logf is the logger to which debug logs from the connector will be sent. + // It must be non-nil. + Logf logger.Logf + + // RouteAdvertiser allows the connector to update the set of advertised routes. + // It must be non-nil. + RouteAdvertiser RouteAdvertiser + + // RouteInfo, if non-nil, use used as the initial set of routes for the + // connector. If nil, the connector starts empty. + RouteInfo *RouteInfo + + // StoreRoutesFunc, if non-nil, is called when the connector's routes + // change, to allow the routes to be persisted. + StoreRoutesFunc func(*RouteInfo) error +} + // NewAppConnector creates a new AppConnector. -func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInfo *RouteInfo, storeRoutesFunc func(*RouteInfo) error) *AppConnector { +func NewAppConnector(c Config) *AppConnector { ac := &AppConnector{ - logf: logger.WithPrefix(logf, "appc: "), - routeAdvertiser: routeAdvertiser, - storeRoutesFunc: storeRoutesFunc, + logf: logger.WithPrefix(c.Logf, "appc: "), + routeAdvertiser: c.RouteAdvertiser, + storeRoutesFunc: c.StoreRoutesFunc, } - if routeInfo != nil { - ac.domains = routeInfo.Domains - ac.wildcards = routeInfo.Wildcards - ac.controlRoutes = routeInfo.Control + if c.RouteInfo != nil { + ac.domains = c.RouteInfo.Domains + ac.wildcards = c.RouteInfo.Wildcards + ac.controlRoutes = c.RouteInfo.Control } ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index c13835f39..12a39f040 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -28,9 +28,14 @@ func TestUpdateDomains(t *testing.T) { ctx := context.Background() var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: &appctest.RouteCollector{}, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) } a.UpdateDomains([]string{"example.com"}) @@ -63,9 +68,13 @@ func TestUpdateRoutes(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -112,9 +121,14 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) @@ -133,9 +147,14 @@ func TestDomainRoutes(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -159,9 +178,14 @@ func TestObserveDNSResponse(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } // a has no domains configured, so it should not advertise any routes @@ -248,9 +272,14 @@ func TestWildcardDomains(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -408,9 +437,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -453,9 +487,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -508,9 +547,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -649,7 +693,12 @@ func TestMetricBucketsAreSorted(t *testing.T) { func TestUpdateRoutesDeadlock(t *testing.T) { ctx := context.Background() rc := &appctest.RouteCollector{} - a := NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a := NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) advertiseCalled := new(atomic.Bool) unadvertiseCalled := new(atomic.Bool) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 09f317f0f..5e738572f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4802,7 +4802,12 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } storeFunc = b.storeRouteInfo } - b.appConnector = appc.NewAppConnector(b.logf, b, ri, storeFunc) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: b.logf, + RouteAdvertiser: b, + RouteInfo: ri, + StoreRoutesFunc: storeFunc, + }) } if nm == nil { return diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index a984d66bf..571f472cc 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2309,9 +2309,11 @@ func TestOfferingAppConnector(t *testing.T) { t.Fatal("unexpected offering app connector") } if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, nil, &appc.RouteInfo{}, fakeStoreRoutes) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + }) } else { - b.appConnector = appc.NewAppConnector(t.Logf, nil, nil, nil) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf}) } if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") @@ -2370,9 +2372,14 @@ func TestObserveDNSResponse(t *testing.T) { rc := &appctest.RouteCollector{} if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - b.appConnector = appc.NewAppConnector(t.Logf, rc, nil, nil) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) } b.appConnector.UpdateDomains([]string{"example.com"}) b.appConnector.Wait(context.Background()) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index db01dd608..a6a5f6ff5 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -257,9 +257,14 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, &appc.RouteInfo{}, fakeStoreRoutes) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: &appctest.RouteCollector{}, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) } sys.Set(pm.Store()) sys.Set(eng) @@ -332,9 +337,14 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) var a *appc.AppConnector if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) @@ -399,9 +409,14 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) From ce752b8a88214a2d45477aa8b77384175ebbdf18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 1 Oct 2025 14:59:38 -0400 Subject: [PATCH 1460/1708] net/netmon: remove usage of direct callbacks from netmon (#17292) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The callback itself is not removed as it is used in other repos, making it simpler for those to slowly transition to the eventbus. Updates #15160 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/debug.go | 33 ++++++++++++---- cmd/tailscaled/tailscaled.go | 11 +++++- cmd/tsconnect/wasm/wasm_js.go | 2 +- control/controlclient/controlclient_test.go | 2 + control/controlclient/direct_test.go | 8 +++- control/controlclient/noise_test.go | 3 ++ control/controlhttp/http_test.go | 7 +++- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/local_test.go | 8 +++- ipn/ipnlocal/network-lock_test.go | 4 +- ipn/ipnlocal/state_test.go | 1 + log/sockstatlog/logger.go | 4 +- log/sockstatlog/logger_test.go | 2 +- logpolicy/logpolicy.go | 7 ++++ logtail/config.go | 2 + logtail/logtail.go | 31 +++++++++++++++ logtail/logtail_test.go | 7 +++- net/dns/manager.go | 4 +- net/dns/manager_tcp_test.go | 10 ++++- net/dns/manager_test.go | 10 ++++- net/dns/resolver/forwarder_test.go | 2 +- net/dns/resolver/tsdns_test.go | 7 +++- net/netmon/loghelper.go | 22 +++++++++-- net/netmon/loghelper_test.go | 21 ++++------ net/tsdial/tsdial.go | 43 +++++++++++++++++++++ tsnet/tsnet.go | 2 + wgengine/netlog/netlog.go | 4 +- wgengine/userspace.go | 6 ++- 28 files changed, 217 insertions(+), 48 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 96f98d9d6..bcc34fb0d 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -104,14 +104,10 @@ func runMonitor(ctx context.Context, loop bool) error { } defer mon.Close() - mon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if !delta.Major { - log.Printf("Network monitor fired; not a major change") - return - } - log.Printf("Network monitor fired. New state:") - dump(delta.New) - }) + eventClient := b.Client("debug.runMonitor") + m := eventClient.Monitor(changeDeltaWatcher(eventClient, ctx, dump)) + defer m.Close() + if loop { log.Printf("Starting link change monitor; initial state:") } @@ -124,6 +120,27 @@ func runMonitor(ctx context.Context, loop bool) error { select {} } +func changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, dump func(st *netmon.State)) func(*eventbus.Client) { + changeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-ec.Done(): + return + case delta := <-changeSub.Events(): + if !delta.Major { + log.Printf("Network monitor fired; not a major change") + return + } + log.Printf("Network monitor fired. New state:") + dump(delta.New) + } + } + } +} + func getURL(ctx context.Context, urlStr string) error { if urlStr == "login" { urlStr = "https://login.tailscale.com" diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 8de473b7c..27fec05a3 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -433,7 +433,13 @@ func run() (err error) { var publicLogID logid.PublicID if buildfeatures.HasLogTail { - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) + + pol := logpolicy.Options{ + Collection: logtail.CollectionNode, + NetMon: netMon, + Health: sys.HealthTracker.Get(), + Bus: sys.Bus.Get(), + }.New() pol.SetVerbosityLevel(args.verbose) publicLogID = pol.PublicID logPol = pol @@ -470,7 +476,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, sys.HealthTracker.Get(), args.tunname) + dns.CleanUp(logf, netMon, sys.Bus.Get(), sys.HealthTracker.Get(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -616,6 +622,7 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID } dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used) + dialer.SetBus(sys.Bus.Get()) sys.Set(dialer) onlyNetstack, err := createEngine(logf, sys) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index fbf7968a0..2e81fa4a8 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -104,6 +104,7 @@ func newIPN(jsConfig js.Value) map[string]any { sys := tsd.NewSystem() sys.Set(store) dialer := &tsdial.Dialer{Logf: logf} + dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ Dialer: dialer, SetSubsystem: sys.Set, @@ -463,7 +464,6 @@ func (s *jsSSHSession) Run() { cols = s.pendingResizeCols } err = session.RequestPty("xterm", rows, cols, ssh.TerminalModes{}) - if err != nil { writeError("Pseudo Terminal", err) return diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 78646d76a..3914d10ef 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -223,6 +223,7 @@ func TestDirectProxyManual(t *testing.T) { dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ Persist: persist.Persist{}, @@ -300,6 +301,7 @@ func testHTTPS(t *testing.T, withProxy bool) { dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) dialer.SetSystemDialerForTest(func(ctx context.Context, network, addr string) (net.Conn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index bba76d6f0..dd93dc7b3 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -27,13 +27,15 @@ func TestNewDirect(t *testing.T) { bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Bus: bus, } c, err := NewDirect(opts) @@ -105,13 +107,15 @@ func TestTsmpPing(t *testing.T) { bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Bus: bus, } diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index 4904016f2..d9c71cf27 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -22,6 +22,7 @@ import ( "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus/eventbustest" ) // maxAllowedNoiseVersion is the highest we expect the Tailscale @@ -175,6 +176,7 @@ func (tt noiseClientTest) run(t *testing.T) { serverPrivate := key.NewMachine() clientPrivate := key.NewMachine() chalPrivate := key.NewChallenge() + bus := eventbustest.NewBus(t) const msg = "Hello, client" h2 := &http2.Server{} @@ -194,6 +196,7 @@ func (tt noiseClientTest) run(t *testing.T) { defer hs.Close() dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) if nettest.PreferMemNetwork() { dialer.SetSystemDialerForTest(nw.Dial) } diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 6485761ac..648b9e5ed 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -149,6 +149,8 @@ func testControlHTTP(t *testing.T, param httpTestParam) { proxy := param.proxy client, server := key.NewMachine(), key.NewMachine() + bus := eventbustest.NewBus(t) + const testProtocolVersion = 1 const earlyWriteMsg = "Hello, world!" sch := make(chan serverResult, 1) @@ -218,6 +220,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { netMon := netmon.NewStatic() dialer := tsdial.NewDialer(netMon) + dialer.SetBus(bus) a := &Dialer{ Hostname: "localhost", HTTPPort: strconv.Itoa(httpLn.Addr().(*net.TCPAddr).Port), @@ -775,7 +778,7 @@ func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.A if allowFallback { host = fallbackAddr.String() } - + bus := eventbustest.NewBus(t) a := &Dialer{ Hostname: host, HTTPPort: httpPort, @@ -790,7 +793,7 @@ func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.A omitCertErrorLogging: true, testFallbackDelay: 50 * time.Millisecond, Clock: clock, - HealthTracker: health.NewTracker(eventbustest.NewBus(t)), + HealthTracker: health.NewTracker(bus), } start := time.Now() diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5e738572f..af5a40550 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -526,7 +526,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo }() netMon := sys.NetMon.Get() - b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get()) + b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { log.Printf("error setting up sockstat logger: %v", err) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 571f472cc..ec65c67ee 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -480,7 +480,9 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { t.Log("Added fake userspace engine for testing") } if _, ok := sys.Dialer.GetOK(); !ok { - sys.Set(tsdial.NewDialer(netmon.NewStatic())) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) + sys.Set(dialer) t.Log("Added static dialer for testing") } lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) @@ -3108,12 +3110,14 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { b.hostinfo = hi k := key.NewMachine() var cc *mockControl + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) opts := controlclient.Options{ ServerURL: "https://example.com", GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Logf: b.logf, PolicyClient: polc, } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 0d3f7db43..c7c4c905f 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -54,6 +54,8 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := controlclient.Options{ ServerURL: "https://example.com", Hostinfo: hi, @@ -63,7 +65,7 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even HTTPTestClient: c, NoiseTestClient: c, Observer: observerFunc(func(controlclient.Status) {}), - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Bus: bus, } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 347aaf8b8..a387af035 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1668,6 +1668,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( sys := tsd.NewSystemWithBus(bus) sys.Set(dialer) sys.Set(dialer.NetMon()) + dialer.SetBus(bus) magicConn, err := magicsock.NewConn(magicsock.Options{ Logf: logf, diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 4f8909725..e0744de0f 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -26,6 +26,7 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" ) @@ -97,7 +98,7 @@ func SockstatLogID(logID logid.PublicID) logid.PrivateID { // // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. -func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker) (*Logger, error) { +func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (*Logger, error) { if !sockstats.IsAvailable || !buildfeatures.HasLogTail { return nil, nil } @@ -127,6 +128,7 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne PrivateID: SockstatLogID(logID), Collection: "sockstats.log.tailscale.io", Buffer: filch, + Bus: bus, CompressLogs: true, FlushDelayFn: func() time.Duration { // set flush delay to 100 years so it never flushes automatically diff --git a/log/sockstatlog/logger_test.go b/log/sockstatlog/logger_test.go index 31fb17e46..e5c2feb29 100644 --- a/log/sockstatlog/logger_test.go +++ b/log/sockstatlog/logger_test.go @@ -24,7 +24,7 @@ func TestResourceCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil) + lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index c1f3e553a..9c7e62ab0 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -50,6 +50,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/syspolicy/pkey" @@ -489,6 +490,11 @@ type Options struct { // If non-nil, it's used to construct the default HTTP client. Health *health.Tracker + // Bus is an optional parameter for communication on the eventbus. + // If non-nil, it's passed to logtail for use in interface monitoring. + // TODO(cmol): Make this non-optional when it's plumbed in by the clients. + Bus *eventbus.Bus + // Logf is an optional logger to use. // If nil, [log.Printf] will be used instead. Logf logger.Logf @@ -615,6 +621,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { Stderr: logWriter{console}, CompressLogs: true, MaxUploadSize: opts.MaxUploadSize, + Bus: opts.Bus, } if opts.Collection == logtail.CollectionNode { conf.MetricsDelta = clientmetric.EncodeLogTailMetricsDelta diff --git a/logtail/config.go b/logtail/config.go index a6c068c0c..bf47dd8aa 100644 --- a/logtail/config.go +++ b/logtail/config.go @@ -10,6 +10,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" ) // DefaultHost is the default host name to upload logs to when @@ -34,6 +35,7 @@ type Config struct { LowMemory bool // if true, logtail minimizes memory use Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now Stderr io.Writer // if set, logs are sent here instead of os.Stderr + Bus *eventbus.Bus // if set, uses the eventbus for awaitInternetUp instead of callback StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only Buffer Buffer // temp storage, if nil a MemoryBuffer CompressLogs bool // whether to compress the log uploads diff --git a/logtail/logtail.go b/logtail/logtail.go index 948c5a460..675422890 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -32,6 +32,7 @@ import ( "tailscale.com/tstime" tslogger "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/set" "tailscale.com/util/truncate" "tailscale.com/util/zstdframe" @@ -120,6 +121,10 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { shutdownStart: make(chan struct{}), shutdownDone: make(chan struct{}), } + + if cfg.Bus != nil { + l.eventClient = cfg.Bus.Client("logtail.Logger") + } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -156,6 +161,7 @@ type Logger struct { privateID logid.PrivateID httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel + eventClient *eventbus.Client procID uint32 includeProcSequence bool @@ -221,6 +227,9 @@ func (l *Logger) Shutdown(ctx context.Context) error { l.httpc.CloseIdleConnections() }() + if l.eventClient != nil { + l.eventClient.Close() + } l.shutdownStartMu.Lock() select { case <-l.shutdownStart: @@ -417,6 +426,10 @@ func (l *Logger) internetUp() bool { } func (l *Logger) awaitInternetUp(ctx context.Context) { + if l.eventClient != nil { + l.awaitInternetUpBus(ctx) + return + } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { @@ -436,6 +449,24 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } +func (l *Logger) awaitInternetUpBus(ctx context.Context) { + if l.internetUp() { + return + } + sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) + defer sub.Close() + select { + case delta := <-sub.Events(): + if delta.New.AnyInterfaceUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + case <-ctx.Done(): + return + } +} + // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index b8c46c448..a92f88b4b 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -17,6 +17,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/tstest" "tailscale.com/tstime" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -30,6 +31,7 @@ func TestFastShutdown(t *testing.T) { l := NewLogger(Config{ BaseURL: testServ.URL, + Bus: eventbustest.NewBus(t), }, t.Logf) err := l.Shutdown(ctx) if err != nil { @@ -62,7 +64,10 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Cleanup(ts.srv.Close) - l := NewLogger(Config{BaseURL: ts.srv.URL}, t.Logf) + l := NewLogger(Config{ + BaseURL: ts.srv.URL, + Bus: eventbustest.NewBus(t), + }, t.Logf) // There is always an initial "logtail started" message body := <-ts.uploaded diff --git a/net/dns/manager.go b/net/dns/manager.go index edf156ece..de99fe646 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -30,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/policyclient" ) @@ -600,7 +601,7 @@ func (m *Manager) FlushCaches() error { // No other state needs to be instantiated before this runs. // // health must not be nil -func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { +func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health *health.Tracker, interfaceName string) { if !buildfeatures.HasDNS { return } @@ -611,6 +612,7 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, i } d := &tsdial.Dialer{Logf: logf} d.SetNetMon(netMon) + d.SetBus(bus) dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS) if err := dns.Down(); err != nil { logf("dns down: %v", err) diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index 46883a1e7..dcdc88c7a 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -90,7 +90,10 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -175,7 +178,10 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(log, &f, health.NewTracker(bus), dialer, nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index b5a510862..92b660007 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -933,7 +933,10 @@ func TestManager(t *testing.T) { goos = "linux" } knobs := &controlknobs.Knobs{} - m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, knobs, goos) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1039,7 +1042,10 @@ func TestConfigRecompilation(t *testing.T) { SearchDomains: fqdns("foo.ts.net"), } - m := NewManager(t.Logf, f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "darwin") var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index b5cc7d018..ec491c581 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -122,7 +122,6 @@ func TestResolversWithDelays(t *testing.T) { } }) } - } func TestGetRCode(t *testing.T) { @@ -454,6 +453,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) + dialer.SetBus(bus) fwd := newForwarder(logf, netMon, nil, &dialer, health.NewTracker(bus), nil) if modify != nil { diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 0823ea139..f0dbb48b3 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -353,10 +353,13 @@ func TestRDNSNameToIPv6(t *testing.T) { } func newResolver(t testing.TB) *Resolver { + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) return New(t.Logf, nil, // no link selector - tsdial.NewDialer(netmon.NewStatic()), - health.NewTracker(eventbustest.NewBus(t)), + dialer, + health.NewTracker(bus), nil, // no control knobs ) } diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 96991644c..2e28e8cda 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -8,6 +8,7 @@ import ( "sync" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique @@ -17,13 +18,12 @@ import ( // done. func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - unregister := nm.RegisterChangeCallback(func(cd *ChangeDelta) { + nm.b.Monitor(nm.changeDeltaWatcher(nm.b, ctx, func(cd ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } - }) - context.AfterFunc(ctx, unregister) + })) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it @@ -42,3 +42,19 @@ func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) lo logf(format, args...) } } + +func (nm *Monitor) changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, fn func(ChangeDelta)) func(*eventbus.Client) { + sub := eventbus.Subscribe[ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + } +} diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index aeac9f031..ca3b1284c 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -11,6 +11,7 @@ import ( "testing/synctest" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func TestLinkChangeLogLimiter(t *testing.T) { synctest.Test(t, syncTestLinkChangeLogLimiter) } @@ -61,21 +62,15 @@ func syncTestLinkChangeLogLimiter(t *testing.T) { // string cache and allow the next log to write to our log buffer. // // InjectEvent doesn't work because it's not a major event, so we - // instead reach into the netmon and grab the callback, and then call - // it ourselves. - mon.mu.Lock() - var cb func(*ChangeDelta) - for _, c := range mon.cbs { - cb = c - break - } - mon.mu.Unlock() - - cb(&ChangeDelta{Major: true}) + // instead inject the event ourselves. + injector := eventbustest.NewInjector(t, bus) + eventbustest.Inject(injector, ChangeDelta{Major: true}) + synctest.Wait() logf("hello %s", "world") - if got := logBuffer.String(); got != "hello world\nother message\nhello world\n" { - t.Errorf("unexpected log buffer contents: %q", got) + want := "hello world\nother message\nhello world\n" + if got := logBuffer.String(); got != want { + t.Errorf("unexpected log buffer contents, got: %q, want, %q", got, want) } // Canceling the context we passed to LinkChangeLogLimiter should diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index e4e4e9e8b..bec196a2e 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -28,6 +28,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/testenv" "tailscale.com/version" @@ -86,6 +87,8 @@ type Dialer struct { dnsCache *dnscache.MessageCache // nil until first non-empty SetExitDNSDoH nextSysConnID int activeSysConns map[int]net.Conn // active connections not yet closed + eventClient *eventbus.Client + eventBusSubs eventbus.Monitor } // sysConn wraps a net.Conn that was created using d.SystemDial. @@ -158,6 +161,9 @@ func (d *Dialer) SetRoutes(routes, localRoutes []netip.Prefix) { } func (d *Dialer) Close() error { + if d.eventClient != nil { + d.eventBusSubs.Close() + } d.mu.Lock() defer d.mu.Unlock() d.closed = true @@ -186,6 +192,14 @@ func (d *Dialer) SetNetMon(netMon *netmon.Monitor) { d.netMonUnregister = nil } d.netMon = netMon + // Having multiple watchers could lead to problems, + // so remove the eventClient if it exists. + // This should really not happen, but better checking for it than not. + // TODO(cmol): Should this just be a panic? + if d.eventClient != nil { + d.eventBusSubs.Close() + d.eventClient = nil + } d.netMonUnregister = d.netMon.RegisterChangeCallback(d.linkChanged) } @@ -197,6 +211,35 @@ func (d *Dialer) NetMon() *netmon.Monitor { return d.netMon } +func (d *Dialer) SetBus(bus *eventbus.Bus) { + d.mu.Lock() + defer d.mu.Unlock() + if d.eventClient != nil { + panic("eventbus has already been set") + } + // Having multiple watchers could lead to problems, + // so unregister the callback if it exists. + if d.netMonUnregister != nil { + d.netMonUnregister() + } + d.eventClient = bus.Client("tsdial.Dialer") + d.eventBusSubs = d.eventClient.Monitor(d.linkChangeWatcher(d.eventClient)) +} + +func (d *Dialer) linkChangeWatcher(ec *eventbus.Client) func(*eventbus.Client) { + linkChangeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case cd := <-linkChangeSub.Events(): + d.linkChanged(&cd) + } + } + } +} + var ( metricLinkChangeConnClosed = clientmetric.NewCounter("tsdial_linkchange_closes") metricChangeDeltaNoDefaultRoute = clientmetric.NewCounter("tsdial_changedelta_no_default_route") diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d14f1f16c..890193d0b 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -592,6 +592,7 @@ func (s *Server) start() (reterr error) { closePool.add(s.netMon) s.dialer = &tsdial.Dialer{Logf: tsLogf} // mutated below (before used) + s.dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(tsLogf, wgengine.Config{ EventBus: sys.Bus.Get(), ListenPort: s.Port, @@ -767,6 +768,7 @@ func (s *Server) startLogger(closePool *closeOnErrorPool, health *health.Tracker Stderr: io.Discard, // log everything to Buffer Buffer: s.logbuffer, CompressLogs: true, + Bus: s.sys.Bus.Get(), HTTPC: &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, s.netMon, health, tsLogf)}, MetricsDelta: clientmetric.EncodeLogTailMetricsDelta, } diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 34b78a2b5..b7281e542 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -29,6 +29,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" + "tailscale.com/util/eventbus" "tailscale.com/wgengine/router" ) @@ -95,7 +96,7 @@ var testClient *http.Client // The IP protocol and source port are always zero. // The sock is used to populated the PhysicalTraffic field in Message. // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. -func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, logExitFlowEnabledEnabled bool) error { +func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { nl.mu.Lock() defer nl.mu.Unlock() if nl.logger != nil { @@ -112,6 +113,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo Collection: "tailtraffic.log.tailscale.io", PrivateID: nodeLogID, CopyPrivateID: domainLogID, + Bus: bus, Stderr: io.Discard, CompressLogs: true, HTTPC: httpc, diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 049abcf17..30486f7a9 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -312,6 +312,9 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } if conf.Dialer == nil { conf.Dialer = &tsdial.Dialer{Logf: logf} + if conf.EventBus != nil { + conf.Dialer.SetBus(conf.EventBus) + } } var tsTUNDev *tstun.Wrapper @@ -379,6 +382,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) tunName, _ := conf.Tun.Name() conf.Dialer.SetTUNName(tunName) conf.Dialer.SetNetMon(e.netMon) + conf.Dialer.SetBus(e.eventBus) e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS) // TODO: there's probably a better place for this @@ -1035,7 +1039,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public()) - if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, logExitFlowEnabled); err != nil { + if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { e.logf("wgengine: Reconfig: error starting up network logger: %v", err) } e.networkLogger.ReconfigRoutes(routerCfg) From 67f108126930a019e2318a43d0ddd30c0c80fd13 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 1 Oct 2025 12:00:32 -0700 Subject: [PATCH 1461/1708] appc,ipn/ipnlocal: add a required event bus to the AppConnector type (#17390) Require the presence of the bus, but do not use it yet. Check for required fields and update tests and production use to plumb the necessary arguments. Updates #15160 Updates #17192 Change-Id: I8cefd2fdb314ca9945317d3320bd5ea6a92e8dcb Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 17 ++++++++++ appc/appconnector_test.go | 60 ++++++++++++++++++++++++------------ ipn/ipnlocal/local.go | 1 + ipn/ipnlocal/local_test.go | 10 ++++-- ipn/ipnlocal/peerapi_test.go | 13 ++++++-- 5 files changed, 75 insertions(+), 26 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 8c1d49d22..c86bf2d0f 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -22,6 +22,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" "tailscale.com/util/slicesx" ) @@ -136,7 +137,9 @@ type RouteInfo struct { // routes not yet served by the AppConnector the local node configuration is // updated to advertise the new route. type AppConnector struct { + // These fields are immutable after initialization. logf logger.Logf + eventBus *eventbus.Bus routeAdvertiser RouteAdvertiser // storeRoutesFunc will be called to persist routes if it is not nil. @@ -168,6 +171,10 @@ type Config struct { // It must be non-nil. Logf logger.Logf + // EventBus receives events when the collection of routes maintained by the + // connector is updated. It must be non-nil. + EventBus *eventbus.Bus + // RouteAdvertiser allows the connector to update the set of advertised routes. // It must be non-nil. RouteAdvertiser RouteAdvertiser @@ -183,8 +190,18 @@ type Config struct { // NewAppConnector creates a new AppConnector. func NewAppConnector(c Config) *AppConnector { + switch { + case c.Logf == nil: + panic("missing logger") + case c.EventBus == nil: + panic("missing event bus") + case c.RouteAdvertiser == nil: + panic("missing route advertiser") + } + ac := &AppConnector{ logf: logger.WithPrefix(c.Logf, "appc: "), + eventBus: c.EventBus, routeAdvertiser: c.RouteAdvertiser, storeRoutesFunc: c.StoreRoutesFunc, } diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 12a39f040..c23908c28 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -4,7 +4,6 @@ package appc import ( - "context" "net/netip" "reflect" "slices" @@ -16,6 +15,7 @@ import ( "tailscale.com/appc/appctest" "tailscale.com/tstest" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/slicesx" @@ -24,18 +24,20 @@ import ( func fakeStoreRoutes(*RouteInfo) error { return nil } func TestUpdateDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}}) } a.UpdateDomains([]string{"example.com"}) @@ -63,18 +65,20 @@ func TestUpdateDomains(t *testing.T) { } func TestUpdateRoutes(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -116,19 +120,21 @@ func TestUpdateRoutes(t *testing.T) { } func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) @@ -143,24 +149,26 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { } func TestDomainRoutes(t *testing.T) { + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - a.Wait(context.Background()) + a.Wait(t.Context()) want := map[string][]netip.Addr{ "example.com": {netip.MustParseAddr("192.0.0.8")}, @@ -173,19 +181,21 @@ func TestDomainRoutes(t *testing.T) { } func TestObserveDNSResponse(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } // a has no domains configured, so it should not advertise any routes @@ -267,19 +277,21 @@ func TestObserveDNSResponse(t *testing.T) { } func TestWildcardDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -422,8 +434,9 @@ func prefixes(in ...string) []netip.Prefix { } func TestUpdateRouteRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -439,12 +452,13 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -472,8 +486,9 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { } func TestUpdateDomainRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -489,12 +504,13 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -532,8 +548,9 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { } func TestUpdateWildcardRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -549,12 +566,13 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -691,10 +709,12 @@ func TestMetricBucketsAreSorted(t *testing.T) { // back into AppConnector via authReconfig. If everything is called // synchronously, this results in a deadlock on AppConnector.mu. func TestUpdateRoutesDeadlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) rc := &appctest.RouteCollector{} a := NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index af5a40550..e8952216b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4804,6 +4804,7 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } b.appConnector = appc.NewAppConnector(appc.Config{ Logf: b.logf, + EventBus: b.sys.Bus.Get(), RouteAdvertiser: b, RouteInfo: ri, StoreRoutesFunc: storeFunc, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index ec65c67ee..6737266be 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2307,15 +2307,17 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { func TestOfferingAppConnector(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() if b.OfferingAppConnector() { t.Fatal("unexpected offering app connector") } + rc := &appctest.RouteCollector{} if shouldStore { b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf}) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") @@ -2366,6 +2368,7 @@ func TestRouterAdvertiserIgnoresContainedRoutes(t *testing.T) { func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() // ensure no error when no app connector is configured if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -2376,12 +2379,13 @@ func TestObserveDNSResponse(t *testing.T) { if shouldStore { b.appConnector = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } b.appConnector.UpdateDomains([]string{"example.com"}) b.appConnector.Wait(context.Background()) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index a6a5f6ff5..43b3c49fc 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -259,12 +259,17 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { if shouldStore { a = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: sys.Bus.Get(), RouteAdvertiser: &appctest.RouteCollector{}, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: &appctest.RouteCollector{}, + }) } sys.Set(pm.Store()) sys.Set(eng) @@ -339,12 +344,13 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { if shouldStore { a = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: sys.Bus.Get(), RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) @@ -411,12 +417,13 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if shouldStore { a = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: sys.Bus.Get(), RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) From 801aac59db732b7c6adafc882add0f3c71a8e48a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 11:41:42 -0700 Subject: [PATCH 1462/1708] Makefile, cmd/*/depaware.txt: split out vendor packages explicitly depaware was merging golang.org/x/foo and std's vendor/golang.org/x/foo packages (which could both be in the binary!), leading to confusing output, especially when I was working on eliminating duplicate packages imported under different names. This makes the depaware output longer and grosser, but doesn't hide reality from us. Updates #17305 Change-Id: I21cc3418014e127f6c1a81caf4e84213ce84ab57 Signed-off-by: Brad Fitzpatrick --- Makefile | 16 +++++++------- cmd/derper/depaware.txt | 33 ++++++++++++++++++---------- cmd/k8s-operator/depaware.txt | 24 +++++++++++++++----- cmd/stund/depaware.txt | 35 ++++++++++++++++-------------- cmd/tailscale/depaware.txt | 28 ++++++++++++++++++------ cmd/tailscaled/depaware-min.txt | 23 +++++++++++++++----- cmd/tailscaled/depaware-minbox.txt | 23 +++++++++++++++----- cmd/tailscaled/depaware.txt | 26 +++++++++++++++++----- cmd/tsidp/depaware.txt | 24 +++++++++++++++----- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- tsnet/depaware.txt | 24 +++++++++++++++----- 15 files changed, 189 insertions(+), 79 deletions(-) diff --git a/Makefile b/Makefile index 05b984348..b78ef0469 100644 --- a/Makefile +++ b/Makefile @@ -18,35 +18,35 @@ lint: ## Run golangci-lint updatedeps: ## Update depaware deps # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ tailscale.com/cmd/tailscaled - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ tailscale.com/cmd/tailscaled - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7f0252148..0628afd63 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -174,24 +174,17 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/tka - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/winutil+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http+ - golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/idna from golang.org/x/crypto/acme/autocert golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ @@ -208,6 +201,22 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/derper+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -368,7 +377,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e225cebf9..89b50edc2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -891,9 +891,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -908,9 +906,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from k8s.io/apimachinery/pkg/util/net+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ @@ -940,6 +938,22 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from github.com/gaissmai/bart+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 20f58ef25..a5e4b9ba3 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -87,29 +87,32 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz - golang.org/x/net/dns/dnsmessage from net - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http+ - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus - golang.org/x/text/secure/bidirule from golang.org/x/net/idna - golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ - golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ - golang.org/x/text/unicode/norm from golang.org/x/net/idna + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -268,7 +271,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from mime/multipart+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/signal from tailscale.com/cmd/stund diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index cfa073a71..80bb40c26 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -211,9 +211,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/clientupdate/distsign+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from tailscale.com/control/controlbase golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -230,11 +228,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/f64 from github.com/fogleman/gg+ L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from net/http+ + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 @@ -260,6 +258,22 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/tailscale/cli+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 22f360ac5..f3a6eb12c 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -200,9 +200,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -214,9 +212,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping @@ -237,6 +234,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 4b80f4a56..1b2fff01f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -227,9 +227,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -241,9 +239,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping @@ -264,6 +261,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from bufio+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5e92438e7..12c06f611 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -484,9 +484,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/cryptobyte from tailscale.com/feature/tpm + golang.org/x/crypto/cryptobyte/asn1 from golang.org/x/crypto/cryptobyte+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -500,9 +500,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ @@ -530,6 +530,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 9ced6f966..76254c6cd 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -318,9 +318,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/ed25519 from gopkg.in/square/go-jose.v2 golang.org/x/crypto/hkdf from tailscale.com/control/controlbase @@ -336,9 +334,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ @@ -367,6 +365,22 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ diff --git a/flake.nix b/flake.nix index 8f1fe026d..e8ef03853 100644 --- a/flake.nix +++ b/flake.nix @@ -148,5 +148,5 @@ }); }; } -# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= diff --git a/go.mod b/go.mod index 6883d2552..bce634431 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/studio-b12/gowebdav v0.9.0 github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e - github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b + github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 diff --git a/go.mod.sri b/go.mod.sri index 781799de5..a1d81c1a9 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= diff --git a/go.sum b/go.sum index 72ddb730f..5e2205575 100644 --- a/go.sum +++ b/go.sum @@ -972,8 +972,8 @@ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplB github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b h1:ewWb4cA+YO9/3X+v5UhdV+eKFsNBOPcGRh39Glshx/4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f h1:PDPGJtm9PFBLNudHGwkfUGp/FWvP+kXXJ0D1pB35F40= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= diff --git a/shell.nix b/shell.nix index 883d71bef..1891111b2 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b5f524088..4dffb5000 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -313,9 +313,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -329,9 +327,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ @@ -360,6 +358,22 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ From 78af49dd1acb287aa6a50c0ee8012c9f4b3d1783 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 15:07:55 -0700 Subject: [PATCH 1463/1708] control/ts2021: rename from internal/noiseconn in prep for controlclient split A following change will split out the controlclient.NoiseClient type out, away from the rest of the controlclient package which is relatively dependency heavy. A question was where to move it, and whether to make a new (a fifth!) package in the ts2021 dependency chain. @creachadair and I brainstormed and decided to merge internal/noiseconn and controlclient.NoiseClient into one package, with names ts2021.Conn and ts2021.Client. For ease of reviewing the subsequent PR, this is the first step that just renames the internal/noiseconn package to control/ts2021. Updates #17305 Change-Id: Ib5ea162dc1d336c1d805bdd9548d1702dd6e1468 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/debug.go | 4 ++-- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- control/controlclient/noise.go | 18 +++++++++--------- control/controlclient/noise_test.go | 4 ++-- {internal/noiseconn => control/ts2021}/conn.go | 10 ++++------ tsnet/depaware.txt | 2 +- 11 files changed, 24 insertions(+), 26 deletions(-) rename {internal/noiseconn => control/ts2021}/conn.go (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 89b50edc2..41a6c39e3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -690,6 +690,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -710,7 +711,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 7e800dbc5..224070842 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -31,10 +31,10 @@ import ( "golang.org/x/net/http2" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" + "tailscale.com/control/ts2021" "tailscale.com/feature" _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/hostinfo" - "tailscale.com/internal/noiseconn" "tailscale.com/ipn" "tailscale.com/net/ace" "tailscale.com/net/netmon" @@ -1122,7 +1122,7 @@ func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDiale } // Now, create a Noise conn over the existing conn. - nc, err := noiseconn.New(conn.Conn, h2Transport, 0, nil) + nc, err := ts2021.New(conn.Conn, h2Transport, 0, nil) if err != nil { return fmt.Errorf("noiseconn.New: %w", err) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 80bb40c26..3e100d4a7 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck @@ -105,7 +106,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ - tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index f3a6eb12c..87138e4dd 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -45,6 +45,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ @@ -60,7 +61,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 1b2fff01f..0fd7286e7 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -65,6 +65,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp from tailscale.com/control/controlclient+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ @@ -82,7 +83,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 12c06f611..26f27e986 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -255,6 +255,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ @@ -296,7 +297,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 76254c6cd..b423e0bb0 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -132,6 +132,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -152,7 +153,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index c001de0cd..1daa07620 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -18,8 +18,8 @@ import ( "golang.org/x/net/http2" "tailscale.com/control/controlhttp" + "tailscale.com/control/ts2021" "tailscale.com/health" - "tailscale.com/internal/noiseconn" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -50,7 +50,7 @@ type NoiseClient struct { // sfDial ensures that two concurrent requests for a noise connection only // produce one shared one between the two callers. - sfDial singleflight.Group[struct{}, *noiseconn.Conn] + sfDial singleflight.Group[struct{}, *ts2021.Conn] dialer *tsdial.Dialer dnsCache *dnscache.Resolver @@ -72,9 +72,9 @@ type NoiseClient struct { // mu only protects the following variables. mu sync.Mutex closed bool - last *noiseconn.Conn // or nil + last *ts2021.Conn // or nil nextID int - connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close + connPool map[int]*ts2021.Conn // active connections not yet closed; see ts2021.Conn.Close } // NoiseOpts contains options for the NewNoiseClient function. All fields are @@ -195,12 +195,12 @@ func (e contextErr) Unwrap() error { return e.err } -// getConn returns a noiseconn.Conn that can be used to make requests to the +// getConn returns a ts2021.Conn that can be used to make requests to the // coordination server. It may return a cached connection or create a new one. // Dials are singleflighted, so concurrent calls to getConn may only dial once. // As such, context values may not be respected as there are no guarantees that // the context passed to getConn is the same as the context passed to dial. -func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) { +func (nc *NoiseClient) getConn(ctx context.Context) (*ts2021.Conn, error) { nc.mu.Lock() if last := nc.last; last != nil && last.CanTakeNewRequest() { nc.mu.Unlock() @@ -214,7 +214,7 @@ func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) { // canceled. Instead, we have to additionally check that the context // which was canceled is our context and retry if our context is still // valid. - conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) { + conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*ts2021.Conn, error) { c, err := nc.dial(ctx) if err != nil { if ctx.Err() != nil { @@ -282,7 +282,7 @@ func (nc *NoiseClient) Close() error { // dial opens a new connection to tailcontrol, fetching the server noise key // if not cached. -func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { +func (nc *NoiseClient) dial(ctx context.Context) (*ts2021.Conn, error) { nc.mu.Lock() connID := nc.nextID nc.nextID++ @@ -352,7 +352,7 @@ func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { return nil, err } - ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) + ncc, err := ts2021.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) if err != nil { return nil, err } diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index d9c71cf27..0022bdf88 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -15,7 +15,7 @@ import ( "golang.org/x/net/http2" "tailscale.com/control/controlhttp/controlhttpserver" - "tailscale.com/internal/noiseconn" + "tailscale.com/control/ts2021" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -310,7 +310,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte - copy(notH2Frame[:], noiseconn.EarlyPayloadMagic) + copy(notH2Frame[:], ts2021.EarlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them diff --git a/internal/noiseconn/conn.go b/control/ts2021/conn.go similarity index 95% rename from internal/noiseconn/conn.go rename to control/ts2021/conn.go index 29fd1a283..99b1f24cb 100644 --- a/internal/noiseconn/conn.go +++ b/control/ts2021/conn.go @@ -1,12 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package noiseconn contains an internal-only wrapper around controlbase.Conn -// that properly handles the early payload sent by the server before the HTTP/2 -// session begins. -// -// See the documentation on the Conn type for more details. -package noiseconn +// Package ts2021 handles the details of the Tailscale 2021 control protocol +// that are after (above) the Noise layer. In particular, the +// "tailcfg.EarlyNoise" message and the subsequent HTTP/2 connection. +package ts2021 import ( "bytes" diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4dffb5000..3cf1d06e9 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -128,6 +128,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -148,7 +149,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ From cca70ddbfc2727a2f38d9d178b52efcca842a256 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 1 Oct 2025 20:18:58 -0700 Subject: [PATCH 1464/1708] cmd/tailscaled: default --encrypt-state to true if TPM is available (#17376) Whenever running on a platform that has a TPM (and tailscaled can access it), default to encrypting the state. The user can still explicitly set this flag to disable encryption. Updates https://github.com/tailscale/corp/issues/32909 Signed-off-by: Andrew Lytvynov --- cmd/tailscaled/flag.go | 31 +++++++++++++++++++++++++++++++ cmd/tailscaled/tailscaled.go | 13 ++++++++----- feature/hooks.go | 12 ++++++++++++ feature/tpm/tpm.go | 10 ++++++++++ feature/tpm/tpm_test.go | 9 --------- ipn/ipnlocal/local.go | 6 +----- util/syspolicy/pkey/pkey.go | 4 +++- 7 files changed, 65 insertions(+), 20 deletions(-) create mode 100644 cmd/tailscaled/flag.go diff --git a/cmd/tailscaled/flag.go b/cmd/tailscaled/flag.go new file mode 100644 index 000000000..f640aceed --- /dev/null +++ b/cmd/tailscaled/flag.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import "strconv" + +// boolFlag is a flag.Value that tracks whether it was ever set. +type boolFlag struct { + set bool + v bool +} + +func (b *boolFlag) String() string { + if b == nil || !b.set { + return "unset" + } + return strconv.FormatBool(b.v) +} + +func (b *boolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.v = v + b.set = true + return nil +} + +func (b *boolFlag) IsBoolFlag() bool { return true } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 27fec05a3..c3a4c8b05 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -120,7 +120,7 @@ var args struct { debug string port uint16 statepath string - encryptState bool + encryptState boolFlag statedir string socketpath string birdSocketPath string @@ -197,7 +197,7 @@ func main() { flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) - flag.BoolVar(&args.encryptState, "encrypt-state", defaultEncryptState(), "encrypt the state file on disk; uses TPM on Linux and Windows, on all other platforms this flag is not supported") + flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") @@ -275,7 +275,10 @@ func main() { } } - if args.encryptState { + if !args.encryptState.set { + args.encryptState.v = defaultEncryptState() + } + if args.encryptState.v { if runtime.GOOS != "linux" && runtime.GOOS != "windows" { log.SetFlags(0) log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) @@ -351,7 +354,7 @@ func statePathOrDefault() string { if path == "" && args.statedir != "" { path = filepath.Join(args.statedir, "tailscaled.state") } - if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState { + if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState.v { path = store.TPMPrefix + path } return path @@ -909,6 +912,6 @@ func defaultEncryptState() bool { // (plan9/FreeBSD/etc). return false } - v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) + v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) return v } diff --git a/feature/hooks.go b/feature/hooks.go index bc42bd8d9..2eade1ead 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -40,3 +40,15 @@ var HookProxySetSelfProxy Hook[func(...string)] // HookProxySetTransportGetProxyConnectHeader is a hook for feature/useproxy to register // [tshttpproxy.SetTransportGetProxyConnectHeader]. var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] + +// HookTPMAvailable is a hook that reports whether a TPM device is supported +// and available. +var HookTPMAvailable Hook[func() bool] + +// TPMAvailable reports whether a TPM device is supported and available. +func TPMAvailable() bool { + if f, ok := HookTPMAvailable.GetOk(); ok { + return f() + } + return false +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index b700637e6..b67cb4e3b 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -39,6 +39,7 @@ var infoOnce = sync.OnceValue(info) func init() { feature.Register("tpm") + feature.HookTPMAvailable.Set(tpmSupported) hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) @@ -51,6 +52,15 @@ func init() { } } +func tpmSupported() bool { + tpm, err := open() + if err != nil { + return false + } + tpm.Close() + return true +} + var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") func info() *tailcfg.TPMInfo { diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index f4497f8c7..5401fd5c3 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -277,15 +277,6 @@ func TestMigrateStateToTPM(t *testing.T) { } } -func tpmSupported() bool { - tpm, err := open() - if err != nil { - return false - } - tpm.Close() - return true -} - type mockTPMSealProvider struct { path string data map[ipn.StateKey][]byte diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e8952216b..965768660 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7559,11 +7559,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { case version.IsMacAppStore(): return opt.NewBool(true) case version.IsMacSysExt(): - // MacSys still stores its state in plaintext on disk in addition to - // the Keychain. A future release will clean up the on-disk state - // files. - // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := b.polc.GetBoolean(pkey.EncryptState, false) + sp, _ := b.polc.GetBoolean(pkey.EncryptState, true) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index 1ef969d72..79b4af1e6 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -136,7 +136,9 @@ const ( FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" // EncryptState is a boolean setting that specifies whether to encrypt the - // tailscaled state file with a TPM device. + // tailscaled state file. + // Windows and Linux use a TPM device, Apple uses the Keychain. + // It's a noop on other platforms. EncryptState Key = "EncryptState" // PostureChecking indicates if posture checking is enabled and the client shall gather From 7dfa26778e7ca36a34e7d50c0f80fb60f6f54540 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 30 Sep 2025 09:02:56 +0100 Subject: [PATCH 1465/1708] derp/derphttp: de-flake DERP HTTP clients tests with memnet and synctest Using memnet and synctest removes flakiness caused by real networking and subtle timing differences. Additionally, remove the `t.Logf` call inside the server's shutdown goroutine that was causing a false positive data race detection. The race detector is flagging a double write during this `t.Logf` call. This is a common pattern, noted in golang/go#40343 and elsehwere in this file, where using `t.Logf` after a test has finished can interact poorly with the test runner. This is a long-standing issue which became more common after rewriting this test to use memnet and synctest. Fixed #17355 Signed-off-by: Alex Chan --- derp/derphttp/derphttp_test.go | 312 +++++++++++++++++---------------- 1 file changed, 158 insertions(+), 154 deletions(-) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 36c11f4fc..76681d498 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -8,6 +8,7 @@ import ( "context" "crypto/tls" "encoding/json" + "errors" "flag" "fmt" "maps" @@ -18,11 +19,13 @@ import ( "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/derp/derpserver" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/tailcfg" @@ -224,24 +227,21 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) { +func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server, ln *memnet.Listener) { s = derpserver.New(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), Handler: derpserver.Handler(s), } - ln, err := net.Listen("tcp4", "localhost:0") - if err != nil { - t.Fatal(err) - } + ln = memnet.Listen("localhost:0") + serverURL = "http://" + ln.Addr().String() s.SetMeshKey(testMeshKey) go func() { if err := httpsrv.Serve(ln); err != nil { - if err == http.ErrServerClosed { - t.Logf("server closed") + if errors.Is(err, net.ErrClosed) { return } panic(err) @@ -250,7 +250,7 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpse return } -func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *derphttp.Client) { +func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string, ln *memnet.Listener) (c *derphttp.Client) { c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatal(err) @@ -260,6 +260,7 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW t.Fatal(err) } c.MeshKey = k + c.SetURLDialer(ln.Dial) return } @@ -267,170 +268,171 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW // updates after a different thread breaks and reconnects the connection, while // the watcher is waiting on recv(). func TestBreakWatcherConnRecv(t *testing.T) { - // TODO(bradfitz): use synctest + memnet instead - - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) - - var wg sync.WaitGroup - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher.Close() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + watcherChan := make(chan int, 1) + defer close(watcherChan) + errChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 + go func() { + defer wg.Done() + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyErr := func(err error) { + select { + case errChan <- err: + case <-ctx.Done(): + } + } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) + }() - watcherChan := make(chan int, 1) - defer close(watcherChan) - errChan := make(chan error, 1) + synctest.Wait() - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyErr := func(err error) { + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { select { - case errChan <- err: - case <-ctx.Done(): + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) + } + case err := <-errChan: + if err.Error() != "derp.Recv: EOF" { + t.Fatalf("expected notifyError connection error to be EOF, got %v", err) + } } - } - - watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) - }() - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) - } - case err := <-errChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher.BreakConnection(watcher) + // re-establish connection by sending a packet + watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } - timer.Reset(5 * time.Second) - watcher.BreakConnection(watcher) - // re-establish connection by sending a packet - watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - } - cancel() // Cancel the context to stop the watcher loop. - wg.Wait() + cancel() // Cancel the context to stop the watcher loop. + wg.Wait() + }) } // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is not waiting on recv(). func TestBreakWatcherConn(t *testing.T) { - // TODO(bradfitz): use synctest + memnet instead - - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) - - var wg sync.WaitGroup - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher1.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + watcherChan := make(chan int, 1) + breakerChan := make(chan bool, 1) + errorChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 + go func() { + defer wg.Done() + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + select { + case <-ctx.Done(): + return + // Wait for breaker to run + case <-breakerChan: + } + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyError := func(err error) { + errorChan <- err + } - ctx, cancel := context.WithCancel(context.Background()) + watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) + }() - watcherChan := make(chan int, 1) - breakerChan := make(chan bool, 1) - errorChan := make(chan error, 1) + synctest.Wait() - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { select { - case <-ctx.Done(): - return - // Wait for breaker to run - case <-breakerChan: + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) + } + case err := <-errorChan: + if !errors.Is(err, net.ErrClosed) { + t.Fatalf("expected notifyError connection error to fail with ErrClosed, got %v", err) + } } - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyError := func(err error) { - errorChan <- err - } - - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) - }() - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) - } - case err := <-errorChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher1.BreakConnection(watcher1) + // re-establish connection by sending a packet + watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) + // signal that the breaker is done + breakerChan <- true } - watcher1.BreakConnection(watcher1) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - // signal that the breaker is done - breakerChan <- true - - timer.Reset(5 * time.Second) - } - watcher1.Close() - cancel() - wg.Wait() + watcher1.Close() + cancel() + wg.Wait() + }) } func noopAdd(derp.PeerPresentMessage) {} @@ -444,12 +446,13 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) defer watcher.Close() // Test connecting to ourselves, and that we get hung up on. @@ -518,13 +521,14 @@ func TestNotifyError(t *testing.T) { defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() // Test early error notification when c.connect fails. - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) watcher.SetURLDialer(netx.DialFunc(func(ctx context.Context, network, addr string) (net.Conn, error) { t.Helper() return nil, fmt.Errorf("test error: %s", addr) From 16e0abe0311b8fe6417b5225c2d608951ebf1a85 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 2 Oct 2025 13:29:03 +0100 Subject: [PATCH 1466/1708] build_docker.sh: support including extra files (#17405) mkctr already has support for including extra files in the built container image. Wire up a new optional environment variable to thread that through to mkctr. The operator e2e tests will use this to bake additional trusted CAs into the test image without significantly departing from the normal build or deployment process for our containers. Updates tailscale/corp#32085 Change-Id: Ica94ed270da13782c4f5524fdc949f9218f79477 Signed-off-by: Tom Proctor --- build_docker.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/build_docker.sh b/build_docker.sh index 37f00bf53..4552f8d8e 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -38,6 +38,7 @@ TARGET="${TARGET:-${DEFAULT_TARGET}}" TAGS="${TAGS:-${DEFAULT_TAGS}}" BASE="${BASE:-${DEFAULT_BASE}}" PLATFORM="${PLATFORM:-}" # default to all platforms +FILES="${FILES:-}" # default to no extra files # OCI annotations that will be added to the image. # https://github.com/opencontainers/image-spec/blob/main/annotations.md ANNOTATIONS="${ANNOTATIONS:-${DEFAULT_ANNOTATIONS}}" @@ -62,6 +63,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/containerboot ;; k8s-operator) @@ -80,6 +82,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/operator ;; k8s-nameserver) @@ -98,6 +101,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/k8s-nameserver ;; tsidp) @@ -116,6 +120,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/tsidp ;; k8s-proxy) @@ -134,6 +139,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/k8s-proxy ;; *) From aa5b2ce83be402eca9fa3862d257072274261229 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 2 Oct 2025 13:30:00 +0100 Subject: [PATCH 1467/1708] cmd/k8s-operator: add .gitignore for generated chart CRDs (#17406) Add a .gitignore for the chart version of the CRDs that we never commit, because the static manifest CRD files are the canonical version. This makes it easier to deploy the CRDs via the helm chart in a way that reflects the production workflow without making the git checkout "dirty". Given that the chart CRDs are ignored, we can also now safely generate them for the kube-generate-all Makefile target without being a nuisance to the state of the git checkout. Added a slightly more robust repo root detection to the generation logic to make sure the command works from the context of both the Makefile and the image builder command we run for releases in corp. Updates tailscale/corp#32085 Change-Id: Id44a4707c183bfaf95a160911ec7a42ffb1a1287 Signed-off-by: Tom Proctor --- cmd/k8s-operator/deploy/chart/templates/.gitignore | 10 ++++++++++ cmd/k8s-operator/generate/main.go | 9 +++++++-- cmd/k8s-operator/operator.go | 3 +++ 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 cmd/k8s-operator/deploy/chart/templates/.gitignore diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore new file mode 100644 index 000000000..ae7c682d9 --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -0,0 +1,10 @@ +# Don't add helm chart CRDs to git. Canonical CRD files live in +# cmd/k8s-operator/deploy/crds. +# +# Generate for local usage with: +# go run tailscale.com/cmd/k8s-operator/generate helmcrd +/connector.yaml +/dnsconfig.yaml +/proxyclass.yaml +/proxygroup.yaml +/recorder.yaml diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 25435a47c..6904f1df0 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -41,11 +41,16 @@ func main() { if len(os.Args) < 2 { log.Fatalf("usage ./generate [staticmanifests|helmcrd]") } - repoRoot := "../../" + gitOut, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() + if err != nil { + log.Fatalf("error determining git root: %v: %s", err, gitOut) + } + + repoRoot := strings.TrimSpace(string(gitOut)) switch os.Args[1] { case "helmcrd": // insert CRDs to Helm templates behind a installCRDs=true conditional check log.Print("Adding CRDs to Helm templates") - if err := generate("./"); err != nil { + if err := generate(repoRoot); err != nil { log.Fatalf("error adding CRDs to Helm templates: %v", err) } return diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 1d988eb03..89c8ff3e2 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -67,6 +67,9 @@ import ( // Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart. //go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests +// Generate the helm chart's CRDs (which are ignored from git). +//go:generate go run tailscale.com/cmd/k8s-operator/generate helmcrd + // Generate CRD API docs. //go:generate go run github.com/elastic/crd-ref-docs --renderer=markdown --source-path=../../k8s-operator/apis/ --config=../../k8s-operator/api-docs-config.yaml --output-path=../../k8s-operator/api.md From c45f8813b4651f3486955104a9ea5bd1075733a2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 14:47:40 -0700 Subject: [PATCH 1468/1708] feature/featuretags, all: add build features, use existing ones in more places Saves 270 KB. Updates #12614 Change-Id: I4c3fe06d32c49edb3a4bb0758a8617d83f291cf5 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 5 +- cmd/tailscaled/depaware-minbox.txt | 5 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 16 +++- cmd/tsidp/depaware.txt | 2 +- .../feature_bakedroots_disabled.go | 13 +++ .../feature_bakedroots_enabled.go | 13 +++ .../buildfeatures/feature_cloud_disabled.go | 13 +++ .../buildfeatures/feature_cloud_enabled.go | 13 +++ .../feature_listenrawdisco_disabled.go | 13 +++ .../feature_listenrawdisco_enabled.go | 13 +++ .../feature_unixsocketidentity_disabled.go | 13 +++ .../feature_unixsocketidentity_enabled.go | 13 +++ feature/featuretags/featuretags.go | 74 ++++++++------- ipn/ipnauth/ipnauth.go | 20 ++-- .../ipnauth_omit_unixsocketidentity.go | 25 +++++ ...th_notwindows.go => ipnauth_unix_creds.go} | 2 +- ipn/ipnlocal/c2n.go | 53 +++++++---- ipn/ipnlocal/local.go | 7 +- ipn/ipnlocal/peerapi.go | 48 +++++----- ipn/ipnserver/actor.go | 6 ++ ipn/ipnserver/proxyconnect.go | 6 ++ ipn/ipnserver/server.go | 17 ++++ net/netns/socks.go | 2 +- net/tlsdial/blockblame/blockblame.go | 94 +++++++++++-------- net/tlsdial/tlsdial.go | 37 ++++---- safesocket/safesocket.go | 8 +- tsnet/depaware.txt | 2 +- util/clientmetric/clientmetric.go | 22 +++-- util/cloudenv/cloudenv.go | 7 ++ wgengine/magicsock/cloudinfo.go | 7 ++ wgengine/magicsock/magicsock_default.go | 2 +- wgengine/magicsock/magicsock_linux.go | 2 + wgengine/userspace.go | 2 +- 35 files changed, 410 insertions(+), 169 deletions(-) create mode 100644 feature/buildfeatures/feature_bakedroots_disabled.go create mode 100644 feature/buildfeatures/feature_bakedroots_enabled.go create mode 100644 feature/buildfeatures/feature_cloud_disabled.go create mode 100644 feature/buildfeatures/feature_cloud_enabled.go create mode 100644 feature/buildfeatures/feature_listenrawdisco_disabled.go create mode 100644 feature/buildfeatures/feature_listenrawdisco_enabled.go create mode 100644 feature/buildfeatures/feature_unixsocketidentity_disabled.go create mode 100644 feature/buildfeatures/feature_unixsocketidentity_enabled.go create mode 100644 ipn/ipnauth/ipnauth_omit_unixsocketidentity.go rename ipn/ipnauth/{ipnauth_notwindows.go => ipnauth_unix_creds.go} (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 41a6c39e3..aac465a30 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -112,7 +112,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 87138e4dd..accaab8f0 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -22,9 +22,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device @@ -221,10 +220,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ - golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0fd7286e7..f558c4c0b 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -28,7 +28,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 @@ -36,7 +36,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device @@ -248,10 +247,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ - golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 26f27e986..7e6dff7df 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -174,7 +174,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web W 💣 github.com/tailscale/wf from tailscale.com/wf 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index c364a9306..1ec1998d7 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -237,16 +237,22 @@ func minTags() string { } func TestMinTailscaledNoCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "regexp", + "golang.org/x/net/proxy", + "internal/socks", + "github.com/tailscale/peercred", + } deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", Tags: minTags(), OnDep: func(dep string) { - if strings.Contains(dep, "regexp") { - t.Errorf("unexpected dep: %q", dep) - } - if strings.Contains(dep, "cbor") { - t.Errorf("unexpected dep: %q", dep) + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } } }, }.Check(t) diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b423e0bb0..f39f4fbf0 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -58,7 +58,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn diff --git a/feature/buildfeatures/feature_bakedroots_disabled.go b/feature/buildfeatures/feature_bakedroots_disabled.go new file mode 100644 index 000000000..f203bc1b0 --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = false diff --git a/feature/buildfeatures/feature_bakedroots_enabled.go b/feature/buildfeatures/feature_bakedroots_enabled.go new file mode 100644 index 000000000..69cf2c34c --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = true diff --git a/feature/buildfeatures/feature_cloud_disabled.go b/feature/buildfeatures/feature_cloud_disabled.go new file mode 100644 index 000000000..3b877a9c6 --- /dev/null +++ b/feature/buildfeatures/feature_cloud_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = false diff --git a/feature/buildfeatures/feature_cloud_enabled.go b/feature/buildfeatures/feature_cloud_enabled.go new file mode 100644 index 000000000..8fd748de5 --- /dev/null +++ b/feature/buildfeatures/feature_cloud_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = true diff --git a/feature/buildfeatures/feature_listenrawdisco_disabled.go b/feature/buildfeatures/feature_listenrawdisco_disabled.go new file mode 100644 index 000000000..291178063 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = false diff --git a/feature/buildfeatures/feature_listenrawdisco_enabled.go b/feature/buildfeatures/feature_listenrawdisco_enabled.go new file mode 100644 index 000000000..4a4f85ae3 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = true diff --git a/feature/buildfeatures/feature_unixsocketidentity_disabled.go b/feature/buildfeatures/feature_unixsocketidentity_disabled.go new file mode 100644 index 000000000..d64e48b82 --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = false diff --git a/feature/buildfeatures/feature_unixsocketidentity_enabled.go b/feature/buildfeatures/feature_unixsocketidentity_enabled.go new file mode 100644 index 000000000..463ac2ced --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 347ccdec0..e9d566a86 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -87,41 +87,47 @@ type FeatureMeta struct { // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ - "acme": {"ACME", "ACME TLS certificate management", nil}, - "appconnectors": {"AppConnectors", "App Connectors support", nil}, - "aws": {"AWS", "AWS integration", nil}, - "bird": {"Bird", "Bird BGP integration", nil}, - "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, - "capture": {"Capture", "Packet capture", nil}, - "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, - "cliconndiag": {"CLIConnDiag", "CLI connection error diagnostics", nil}, - "clientupdate": {"ClientUpdate", "Client auto-update support", nil}, - "completion": {"Completion", "CLI shell completion", nil}, - "dbus": {"DBus", "Linux DBus support", nil}, - "debug": {"Debug", "various debug support, for things that don't have or need their own more specific feature", nil}, - "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, + "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, + "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, + "aws": {Sym: "AWS", Desc: "AWS integration"}, + "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, + "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, + "capture": {Sym: "Capture", Desc: "Packet capture"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, + "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, + "clientupdate": {Sym: "ClientUpdate", Desc: "Client auto-update support"}, + "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "dbus": {Sym: "DBus", Desc: "Linux DBus support"}, + "debug": {Sym: "Debug", Desc: "various debug support, for things that don't have or need their own more specific feature"}, + "debugeventbus": {Sym: "DebugEventBus", Desc: "eventbus debug support"}, "debugportmapper": { Sym: "DebugPortMapper", Desc: "portmapper debug support", Deps: []FeatureTag{"portmapper"}, }, - "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, - "doctor": {"Doctor", "Diagnose possible issues with Tailscale and its host environment", nil}, - "drive": {"Drive", "Tailscale Drive (file server) support", nil}, + "desktop_sessions": {Sym: "DesktopSessions", Desc: "Desktop sessions support"}, + "doctor": {Sym: "Doctor", Desc: "Diagnose possible issues with Tailscale and its host environment"}, + "drive": {Sym: "Drive", Desc: "Tailscale Drive (file server) support"}, "gro": { Sym: "GRO", Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, - "hujsonconf": {"HuJSONConf", "HuJSON config file support", nil}, - "iptables": {"IPTables", "Linux iptables support", nil}, - "kube": {"Kube", "Kubernetes integration", nil}, - "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, + "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, + "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, + "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "listenrawdisco": { + Sym: "ListenRawDisco", + Desc: "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)", + }, "logtail": { Sym: "LogTail", Desc: "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)", }, - "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, + "oauthkey": {Sym: "OAuthKey", Desc: "OAuth secret-to-authkey resolution support"}, "outboundproxy": { Sym: "OutboundProxy", Desc: "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale", @@ -137,9 +143,9 @@ var Features = map[FeatureTag]FeatureMeta{ // by some other feature are missing, then it's an error by default unless you accept // that it's okay to proceed without that meta feature. }, - "portlist": {"PortList", "Optionally advertise listening service ports", nil}, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "posture": {"Posture", "Device posture checking support", nil}, + "portlist": {Sym: "PortList", Desc: "Optionally advertise listening service ports"}, + "portmapper": {Sym: "PortMapper", Desc: "NAT-PMP/PCP/UPnP port mapping support"}, + "posture": {Sym: "Posture", Desc: "Device posture checking support"}, "dns": { Sym: "DNS", Desc: "MagicDNS and system DNS configuration support", @@ -149,13 +155,13 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Network flow logging support", Deps: []FeatureTag{"logtail"}, }, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, + "netstack": {Sym: "Netstack", Desc: "gVisor netstack (userspace networking) support"}, "networkmanager": { Sym: "NetworkManager", Desc: "Linux NetworkManager integration", Deps: []FeatureTag{"dbus"}, }, - "relayserver": {"RelayServer", "Relay server", nil}, + "relayserver": {Sym: "RelayServer", Desc: "Relay server"}, "resolved": { Sym: "Resolved", Desc: "Linux systemd-resolved integration", @@ -179,21 +185,25 @@ var Features = map[FeatureTag]FeatureMeta{ Sym: "Synology", Desc: "Synology NAS integration (applies to Linux builds only)", }, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "syspolicy": {Sym: "SystemPolicy", Desc: "System policy configuration (MDM) support"}, "systray": { Sym: "SysTray", Desc: "Linux system tray", Deps: []FeatureTag{"dbus"}, }, - "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, - "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, - "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, - "tpm": {"TPM", "TPM support", nil}, + "taildrop": {Sym: "Taildrop", Desc: "Taildrop (file sending) support"}, + "tailnetlock": {Sym: "TailnetLock", Desc: "Tailnet Lock support"}, + "tap": {Sym: "Tap", Desc: "Experimental Layer 2 (ethernet) support"}, + "tpm": {Sym: "TPM", Desc: "TPM support"}, + "unixsocketidentity": { + Sym: "UnixSocketIdentity", + Desc: "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)", + }, "useproxy": { Sym: "UseProxy", Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", }, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index 513daf5b3..1395a39ae 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -14,7 +14,6 @@ import ( "runtime" "strconv" - "github.com/tailscale/peercred" "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/safesocket" @@ -63,8 +62,8 @@ type ConnIdentity struct { notWindows bool // runtime.GOOS != "windows" // Fields used when NotWindows: - isUnixSock bool // Conn is a *net.UnixConn - creds *peercred.Creds // or nil if peercred.Get was not implemented on this OS + isUnixSock bool // Conn is a *net.UnixConn + creds PeerCreds // or nil if peercred.Get was not implemented on this OS // Used on Windows: // TODO(bradfitz): merge these into the peercreds package and @@ -97,9 +96,18 @@ func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { return "" } -func (ci *ConnIdentity) Pid() int { return ci.pid } -func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } -func (ci *ConnIdentity) Creds() *peercred.Creds { return ci.creds } +func (ci *ConnIdentity) Pid() int { return ci.pid } +func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } +func (ci *ConnIdentity) Creds() PeerCreds { return ci.creds } + +// PeerCreds is the interface for a github.com/tailscale/peercred.Creds, +// if linked into the binary. +// +// (It's not used on some platforms, or if ts_omit_unixsocketidentity is set.) +type PeerCreds interface { + UserID() (uid string, ok bool) + PID() (pid int, ok bool) +} var metricIssue869Workaround = clientmetric.NewCounter("issue_869_workaround") diff --git a/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go new file mode 100644 index 000000000..defe7d89c --- /dev/null +++ b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows && ts_omit_unixsocketidentity + +package ipnauth + +import ( + "net" + + "tailscale.com/types/logger" +) + +// GetConnIdentity extracts the identity information from the connection +// based on the user who owns the other end of the connection. +// and couldn't. The returned connIdentity has NotWindows set to true. +func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { + return &ConnIdentity{conn: c, notWindows: true}, nil +} + +// WindowsToken is unsupported when GOOS != windows and always returns +// ErrNotImplemented. +func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { + return nil, ErrNotImplemented +} diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_unix_creds.go similarity index 95% rename from ipn/ipnauth/ipnauth_notwindows.go rename to ipn/ipnauth/ipnauth_unix_creds.go index f5dc07a8c..8ce2ac8a4 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows +//go:build !windows && !ts_omit_unixsocketidentity package ipnauth diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index ae9e67126..e2dfecec2 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -10,6 +10,7 @@ import ( "net/http" "path" "reflect" + "runtime" "strconv" "strings" "time" @@ -33,26 +34,34 @@ import ( // exists for that, a map entry with an empty method is used as a fallback. var c2nHandlers = map[methodAndPath]c2nHandler{ // Debug. - req("/echo"): handleC2NEcho, - req("/debug/goroutines"): handleC2NDebugGoroutines, - req("/debug/prefs"): handleC2NDebugPrefs, - req("/debug/metrics"): handleC2NDebugMetrics, - req("/debug/component-logging"): handleC2NDebugComponentLogging, - req("/debug/logheap"): handleC2NDebugLogHeap, - req("/debug/netmap"): handleC2NDebugNetMap, - - // PPROF - We only expose a subset of typical pprof endpoints for security. - req("/debug/pprof/heap"): handleC2NPprof, - req("/debug/pprof/allocs"): handleC2NPprof, - - req("POST /logtail/flush"): handleC2NLogtailFlush, - req("POST /sockstats"): handleC2NSockStats, - - // SSH - req("/ssh/usernames"): handleC2NSSHUsernames, - - // Linux netfilter. - req("POST /netfilter-kind"): handleC2NSetNetfilterKind, + req("/echo"): handleC2NEcho, +} + +func init() { + if buildfeatures.HasSSH { + RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) + } + if buildfeatures.HasLogTail { + RegisterC2N("POST /logtail/flush", handleC2NLogtailFlush) + } + if buildfeatures.HasDebug { + RegisterC2N("POST /sockstats", handleC2NSockStats) + + // pprof: + // we only expose a subset of typical pprof endpoints for security. + RegisterC2N("/debug/pprof/heap", handleC2NPprof) + RegisterC2N("/debug/pprof/allocs", handleC2NPprof) + + RegisterC2N("/debug/goroutines", handleC2NDebugGoroutines) + RegisterC2N("/debug/prefs", handleC2NDebugPrefs) + RegisterC2N("/debug/metrics", handleC2NDebugMetrics) + RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging) + RegisterC2N("/debug/logheap", handleC2NDebugLogHeap) + RegisterC2N("/debug/netmap", handleC2NDebugNetMap) + } + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind) + } } // RegisterC2N registers a new c2n handler for the given pattern. @@ -265,6 +274,10 @@ func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) { } func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasSSH { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } var req tailcfg.C2NSSHUsernamesRequest if r.Method == "POST" { if err := json.NewDecoder(r.Body).Decode(&req); err != nil { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 965768660..9e2fbb999 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1442,7 +1442,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi cn := b.currentNode() nid, ok := cn.NodeByAddr(ipp.Addr()) - if !ok { + if !ok && buildfeatures.HasNetstack { var ip netip.Addr if ipp.Port() != 0 { var protos []string @@ -5015,6 +5015,9 @@ func (b *LocalBackend) SetVarRoot(dir string) { // // It should only be called before the LocalBackend is used. func (b *LocalBackend) SetLogFlusher(flushFunc func()) { + if !buildfeatures.HasLogTail { + return + } b.logFlushFunc = flushFunc } @@ -5023,7 +5026,7 @@ func (b *LocalBackend) SetLogFlusher(flushFunc func()) { // // TryFlushLogs should not block. func (b *LocalBackend) TryFlushLogs() bool { - if b.logFlushFunc == nil { + if !buildfeatures.HasLogTail || b.logFlushFunc == nil { return false } b.logFlushFunc() diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 4f99525f9..9ad3e3c36 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -354,33 +354,35 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - if strings.HasPrefix(r.URL.Path, "/dns-query") { + if buildfeatures.HasDNS && strings.HasPrefix(r.URL.Path, "/dns-query") { metricDNSCalls.Add(1) h.handleDNSQuery(w, r) return } - switch r.URL.Path { - case "/v0/goroutines": - h.handleServeGoroutines(w, r) - return - case "/v0/env": - h.handleServeEnv(w, r) - return - case "/v0/metrics": - h.handleServeMetrics(w, r) - return - case "/v0/magicsock": - h.handleServeMagicsock(w, r) - return - case "/v0/dnsfwd": - h.handleServeDNSFwd(w, r) - return - case "/v0/interfaces": - h.handleServeInterfaces(w, r) - return - case "/v0/sockstats": - h.handleServeSockStats(w, r) - return + if buildfeatures.HasDebug { + switch r.URL.Path { + case "/v0/goroutines": + h.handleServeGoroutines(w, r) + return + case "/v0/env": + h.handleServeEnv(w, r) + return + case "/v0/metrics": + h.handleServeMetrics(w, r) + return + case "/v0/magicsock": + h.handleServeMagicsock(w, r) + return + case "/v0/dnsfwd": + h.handleServeDNSFwd(w, r) + return + case "/v0/interfaces": + h.handleServeInterfaces(w, r) + return + case "/v0/sockstats": + h.handleServeSockStats(w, r) + return + } } if ph, ok := peerAPIHandlers[r.URL.Path]; ok { ph(h, w, r) diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 924417a33..628e3c37c 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -12,6 +12,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/types/logger" @@ -237,6 +238,11 @@ func connIsLocalAdmin(logf logger.Logf, ci *ipnauth.ConnIdentity, operatorUID st // Linux. fallthrough case "linux": + if !buildfeatures.HasUnixSocketIdentity { + // Everybody is an admin if support for unix socket identities + // is omitted for the build. + return true + } uid, ok := ci.Creds().UserID() if !ok { return false diff --git a/ipn/ipnserver/proxyconnect.go b/ipn/ipnserver/proxyconnect.go index 030c4efe4..7d41273bd 100644 --- a/ipn/ipnserver/proxyconnect.go +++ b/ipn/ipnserver/proxyconnect.go @@ -10,6 +10,8 @@ import ( "net" "net/http" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/logpolicy" ) @@ -23,6 +25,10 @@ import ( // precludes that from working and instead the GUI fails to dial out. // So, go through tailscaled (with a CONNECT request) instead. func (s *Server) handleProxyConnectConn(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasOutboundProxy { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } ctx := r.Context() if r.Method != "CONNECT" { panic("[unexpected] miswired") diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 6c382a57e..d473252e1 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -15,6 +15,7 @@ import ( "net" "net/http" "os/user" + "runtime" "strconv" "strings" "sync" @@ -24,6 +25,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" @@ -120,6 +122,10 @@ func (s *Server) awaitBackend(ctx context.Context) (_ *ipnlocal.LocalBackend, ok // This is primarily for the Windows GUI, because wintun can take awhile to // come up. See https://github.com/tailscale/tailscale/issues/6522. func (s *Server) serveServerStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } ctx := r.Context() w.Header().Set("Content-Type", "application/json") @@ -382,6 +388,9 @@ func isAllDigit(s string) bool { // connection. It's intended to give your non-root webserver access // (www-data, caddy, nginx, etc) to certs. func (a *actor) CanFetchCerts() bool { + if !buildfeatures.HasACME { + return false + } if a.ci.IsUnixSock() && a.ci.Creds() != nil { connUID, ok := a.ci.Creds().UserID() if ok && connUID == userIDFromString(envknob.String("TS_PERMIT_CERT_UID")) { @@ -398,6 +407,10 @@ func (a *actor) CanFetchCerts() bool { // // onDone must be called when the HTTP request is done. func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (onDone func(), err error) { + if runtime.GOOS != "windows" && !buildfeatures.HasUnixSocketIdentity { + return func() {}, nil + } + if actor == nil { return nil, errors.New("internal error: nil actor") } @@ -538,6 +551,10 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { // Windows and via $DEBUG_LISTENER/debug/ipn when tailscaled's --debug flag // is used to run a debug server. func (s *Server) ServeHTMLStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } lb := s.lb.Load() if lb == nil { http.Error(w, "no LocalBackend", http.StatusServiceUnavailable) diff --git a/net/netns/socks.go b/net/netns/socks.go index ee8dfa20e..9a137db7f 100644 --- a/net/netns/socks.go +++ b/net/netns/socks.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !js && !android +//go:build !ios && !js && !android && !ts_omit_useproxy package netns diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go index 57dc7a6e6..5b48dc009 100644 --- a/net/tlsdial/blockblame/blockblame.go +++ b/net/tlsdial/blockblame/blockblame.go @@ -9,13 +9,19 @@ package blockblame import ( "crypto/x509" "strings" + "sync" + + "tailscale.com/feature/buildfeatures" ) // VerifyCertificate checks if the given certificate c is issued by a firewall manufacturer // that is known to block Tailscale connections. It returns true and the Manufacturer of // the equipment if it is, or false and nil if it is not. func VerifyCertificate(c *x509.Certificate) (m *Manufacturer, ok bool) { - for _, m := range Manufacturers { + if !buildfeatures.HasDebug { + return nil, false + } + for _, m := range manufacturers() { if m.match != nil && m.match(c) { return m, true } @@ -33,46 +39,56 @@ type Manufacturer struct { match matchFunc } -var Manufacturers = []*Manufacturer{ - { - Name: "Aruba Networks", - match: issuerContains("Aruba"), - }, - { - Name: "Cisco", - match: issuerContains("Cisco"), - }, - { - Name: "Fortinet", - match: matchAny( - issuerContains("Fortinet"), - certEmail("support@fortinet.com"), - ), - }, - { - Name: "Huawei", - match: certEmail("mobile@huawei.com"), - }, - { - Name: "Palo Alto Networks", - match: matchAny( - issuerContains("Palo Alto Networks"), - issuerContains("PAN-FW"), - ), - }, - { - Name: "Sophos", - match: issuerContains("Sophos"), - }, - { - Name: "Ubiquiti", - match: matchAny( - issuerContains("UniFi"), - issuerContains("Ubiquiti"), - ), - }, +func manufacturers() []*Manufacturer { + manufacturersOnce.Do(func() { + manufacturersList = []*Manufacturer{ + { + Name: "Aruba Networks", + match: issuerContains("Aruba"), + }, + { + Name: "Cisco", + match: issuerContains("Cisco"), + }, + { + Name: "Fortinet", + match: matchAny( + issuerContains("Fortinet"), + certEmail("support@fortinet.com"), + ), + }, + { + Name: "Huawei", + match: certEmail("mobile@huawei.com"), + }, + { + Name: "Palo Alto Networks", + match: matchAny( + issuerContains("Palo Alto Networks"), + issuerContains("PAN-FW"), + ), + }, + { + Name: "Sophos", + match: issuerContains("Sophos"), + }, + { + Name: "Ubiquiti", + match: matchAny( + issuerContains("UniFi"), + issuerContains("Ubiquiti"), + ), + }, + } + }) + return manufacturersList } +var ( + manufacturersOnce sync.Once + manufacturersList []*Manufacturer +) + type matchFunc func(*x509.Certificate) bool func issuerContains(s string) matchFunc { diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 80f3bfc06..ee4771d8d 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -28,6 +28,7 @@ import ( "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/net/bakedroots" @@ -36,12 +37,6 @@ import ( var counterFallbackOK int32 // atomic -// If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys -// in a way that WireShark can read. -// -// See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format -var sslKeyLogFile = os.Getenv("SSLKEYLOGFILE") - var debug = envknob.RegisterBool("TS_DEBUG_TLS_DIAL") // tlsdialWarningPrinted tracks whether we've printed a warning about a given @@ -80,13 +75,19 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { // the real TCP connection) because host is the ultimate hostname, but this // tls.Config is used for both the proxy and the ultimate target. - if n := sslKeyLogFile; n != "" { - f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatal(err) + if buildfeatures.HasDebug { + // If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys + // in a way that WireShark can read. + // + // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format + if n := os.Getenv("SSLKEYLOGFILE"); n != "" { + f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) + conf.KeyLogWriter = f } - log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) - conf.KeyLogWriter = f } if conf.InsecureSkipVerify { @@ -164,10 +165,12 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { if debug() { log.Printf("tlsdial(sys %q): %v", dialedHost, errSys) } + if !buildfeatures.HasBakedRoots || (errSys == nil && !debug()) { + return errSys + } - // Always verify with our baked-in Let's Encrypt certificate, - // so we can log an informational message. This is useful for - // detecting SSL MiTM. + // If we have baked-in LetsEncrypt roots and we either failed above, or + // debug logging is enabled, also verify with LetsEncrypt. opts.Roots = bakedroots.Get() _, bakedErr := cs.PeerCertificates[0].Verify(opts) if debug() { @@ -239,8 +242,8 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { if debug() { log.Printf("tlsdial(sys %q/%q): %v", c.ServerName, certDNSName, errSys) } - if errSys == nil { - return nil + if !buildfeatures.HasBakedRoots || errSys == nil { + return errSys } opts.Roots = bakedroots.Get() _, err := certs[0].Verify(opts) diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index ea79edab0..287cdca59 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -13,6 +13,7 @@ import ( "time" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" ) type closeable interface { @@ -108,7 +109,12 @@ func LocalTCPPortAndToken() (port int, token string, err error) { // PlatformUsesPeerCreds reports whether the current platform uses peer credentials // to authenticate connections. -func PlatformUsesPeerCreds() bool { return GOOSUsesPeerCreds(runtime.GOOS) } +func PlatformUsesPeerCreds() bool { + if !buildfeatures.HasUnixSocketIdentity { + return false + } + return GOOSUsesPeerCreds(runtime.GOOS) +} // GOOSUsesPeerCreds is like PlatformUsesPeerCreds but takes a // runtime.GOOS value instead of using the current one. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 3cf1d06e9..1f9609745 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -58,7 +58,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LDAI github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 5c1116019..2243ec3de 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -18,6 +18,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/util/set" ) @@ -130,15 +131,17 @@ func (m *Metric) Publish() { metrics[m.name] = m sortedDirty = true - if m.f != nil { - lastLogVal = append(lastLogVal, scanEntry{f: m.f}) - } else { - if len(valFreeList) == 0 { - valFreeList = make([]int64, 256) + if buildfeatures.HasLogTail { + if m.f != nil { + lastLogVal = append(lastLogVal, scanEntry{f: m.f}) + } else { + if len(valFreeList) == 0 { + valFreeList = make([]int64, 256) + } + m.v = &valFreeList[0] + valFreeList = valFreeList[1:] + lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } - m.v = &valFreeList[0] - valFreeList = valFreeList[1:] - lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } m.regIdx = len(unsorted) @@ -319,6 +322,9 @@ const ( // - increment a metric: (decrements if negative) // 'I' + hex(varint(wireid)) + hex(varint(value)) func EncodeLogTailMetricsDelta() string { + if !buildfeatures.HasLogTail { + return "" + } mu.Lock() defer mu.Unlock() diff --git a/util/cloudenv/cloudenv.go b/util/cloudenv/cloudenv.go index be60ca007..f55f7dfb0 100644 --- a/util/cloudenv/cloudenv.go +++ b/util/cloudenv/cloudenv.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -51,6 +52,9 @@ const ( // ResolverIP returns the cloud host's recursive DNS server or the // empty string if not available. func (c Cloud) ResolverIP() string { + if !buildfeatures.HasCloud { + return "" + } switch c { case GCP: return GoogleMetadataAndDNSIP @@ -92,6 +96,9 @@ var cloudAtomic syncs.AtomicValue[Cloud] // Get returns the current cloud, or the empty string if unknown. func Get() Cloud { + if !buildfeatures.HasCloud { + return "" + } if c, ok := cloudAtomic.LoadOk(); ok { return c } diff --git a/wgengine/magicsock/cloudinfo.go b/wgengine/magicsock/cloudinfo.go index 1de369631..0db56b3f6 100644 --- a/wgengine/magicsock/cloudinfo.go +++ b/wgengine/magicsock/cloudinfo.go @@ -17,6 +17,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" ) @@ -34,6 +35,9 @@ type cloudInfo struct { } func newCloudInfo(logf logger.Logf) *cloudInfo { + if !buildfeatures.HasCloud { + return nil + } tr := &http.Transport{ DisableKeepAlives: true, Dial: (&net.Dialer{ @@ -53,6 +57,9 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { // if the tailscaled process is running in a known cloud and there are any such // IPs present. func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { + if !buildfeatures.HasCloud { + return nil, nil + } switch ci.cloud { case cloudenv.AWS: ret, err := ci.getAWS(ctx) diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 1c315034a..88759d3ac 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_listenrawdisco package magicsock diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index cad0e9b5e..f37e19165 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_listenrawdisco + package magicsock import ( diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 30486f7a9..735181ec7 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -435,7 +435,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.tundev.PreFilterPacketOutboundToWireGuardEngineIntercept = e.handleLocalPackets - if envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { + if buildfeatures.HasDebug && envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { if e.tundev.PreFilterPacketInboundFromWireGuard != nil { return nil, errors.New("unexpected PreFilterIn already set") } From 1d93bdce20ddd2887651e4c2324dd4e113cd864a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 08:53:48 -0700 Subject: [PATCH 1469/1708] control/controlclient: remove x/net/http2, use net/http Saves 352 KB, removing one of our two HTTP/2 implementations linked into the binary. Fixes #17305 Updates #15015 Change-Id: I53a04b1f2687dca73c8541949465038b69aa6ade Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/debug.go | 87 ++-- cmd/tailscale/depaware.txt | 20 +- cmd/tailscale/tailscale_test.go | 1 - cmd/tailscaled/depaware-min.txt | 15 +- cmd/tailscaled/depaware-minbox.txt | 15 +- cmd/tailscaled/depaware.txt | 9 +- cmd/tailscaled/deps_test.go | 3 + cmd/tsidp/depaware.txt | 9 +- control/controlclient/direct.go | 37 +- control/controlclient/noise.go | 394 ------------------ control/controlhttp/constants.go | 4 +- control/ts2021/client.go | 289 +++++++++++++ .../noise_test.go => ts2021/client_test.go} | 93 +++-- control/ts2021/conn.go | 57 +-- net/tsdial/tsdial.go | 9 +- tsnet/depaware.txt | 9 +- 17 files changed, 467 insertions(+), 586 deletions(-) delete mode 100644 control/controlclient/noise.go create mode 100644 control/ts2021/client.go rename control/{controlclient/noise_test.go => ts2021/client_test.go} (80%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index aac465a30..eae1354a1 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -687,7 +687,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 224070842..2836ae298 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -17,6 +17,7 @@ import ( "log" "net" "net/http" + "net/http/httptrace" "net/http/httputil" "net/netip" "net/url" @@ -28,17 +29,18 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "golang.org/x/net/http2" "tailscale.com/client/tailscale/apitype" - "tailscale.com/control/controlhttp" "tailscale.com/control/ts2021" "tailscale.com/feature" _ "tailscale.com/feature/condregister/useproxy" + "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/ace" + "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" + "tailscale.com/net/tsdial" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -1062,22 +1064,8 @@ func runTS2021(ctx context.Context, args []string) error { if err := json.Unmarshal(b, dialPlan); err != nil { return fmt.Errorf("unmarshaling dial plan JSON file: %w", err) } - } - - noiseDialer := &controlhttp.Dialer{ - Hostname: ts2021Args.host, - HTTPPort: "80", - HTTPSPort: "443", - MachineKey: machinePrivate, - ControlKey: keys.PublicKey, - ProtocolVersion: uint16(ts2021Args.version), - DialPlan: dialPlan, - Dialer: dialFunc, - Logf: logf, - NetMon: netMon, - } - if ts2021Args.aceHost != "" { - noiseDialer.DialPlan = &tailcfg.ControlDialPlan{ + } else if ts2021Args.aceHost != "" { + dialPlan = &tailcfg.ControlDialPlan{ Candidates: []tailcfg.ControlIPCandidate{ { ACEHost: ts2021Args.aceHost, @@ -1086,9 +1074,25 @@ func runTS2021(ctx context.Context, args []string) error { }, } } + + opts := ts2021.ClientOpts{ + ServerURL: "https://" + ts2021Args.host, + DialPlan: func() *tailcfg.ControlDialPlan { + return dialPlan + }, + Logf: logf, + NetMon: netMon, + PrivKey: machinePrivate, + ServerPubKey: keys.PublicKey, + Dialer: tsdial.NewFromFuncForDebug(logf, dialFunc), + DNSCache: &dnscache.Resolver{}, + HealthTracker: &health.Tracker{}, + } + + // TODO: ProtocolVersion: uint16(ts2021Args.version), const tries = 2 for i := range tries { - err := tryConnect(ctx, keys.PublicKey, noiseDialer) + err := tryConnect(ctx, keys.PublicKey, opts) if err != nil { log.Printf("error on attempt %d/%d: %v", i+1, tries, err) continue @@ -1098,44 +1102,37 @@ func runTS2021(ctx context.Context, args []string) error { return nil } -func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDialer *controlhttp.Dialer) error { - conn, err := noiseDialer.Dial(ctx) - log.Printf("controlhttp.Dial = %p, %v", conn, err) - if err != nil { - return err - } - log.Printf("did noise handshake") - - gotPeer := conn.Peer() - if gotPeer != controlPublic { - log.Printf("peer = %v, want %v", gotPeer, controlPublic) - return errors.New("key mismatch") - } +func tryConnect(ctx context.Context, controlPublic key.MachinePublic, opts ts2021.ClientOpts) error { - log.Printf("final underlying conn: %v / %v", conn.LocalAddr(), conn.RemoteAddr()) - - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Second, + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + log.Printf("GotConn: %T", ci.Conn) + ncc, ok := ci.Conn.(*ts2021.Conn) + if !ok { + return + } + log.Printf("did noise handshake") + log.Printf("final underlying conn: %v / %v", ncc.LocalAddr(), ncc.RemoteAddr()) + gotPeer := ncc.Peer() + if gotPeer != controlPublic { + log.Fatalf("peer = %v, want %v", gotPeer, controlPublic) + } + }, }) - if err != nil { - return fmt.Errorf("http2.ConfigureTransports: %w", err) - } - // Now, create a Noise conn over the existing conn. - nc, err := ts2021.New(conn.Conn, h2Transport, 0, nil) + nc, err := ts2021.NewClient(opts) if err != nil { - return fmt.Errorf("noiseconn.New: %w", err) + return fmt.Errorf("NewNoiseClient: %w", err) } - defer nc.Close() // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. - whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" + whoamiURL := "https://" + ts2021Args.host + "/machine/whoami" req, err := http.NewRequestWithContext(ctx, "GET", whoamiURL, nil) if err != nil { return err } - resp, err := nc.RoundTrip(req) + resp, err := nc.Do(req) if err != nil { return fmt.Errorf("RoundTrip whoami request: %w", err) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 3e100d4a7..6facd19f9 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -18,6 +18,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/gaissmai/bart from tailscale.com/net/tsdial + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ @@ -83,7 +86,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ - tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli tailscale.com/derp from tailscale.com/derp/derphttp+ @@ -119,7 +122,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli tailscale.com/net/neterror from tailscale.com/net/netcheck+ - tailscale.com/net/netknob from tailscale.com/net/netns + tailscale.com/net/netknob from tailscale.com/net/netns+ 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ @@ -132,6 +135,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/paths from tailscale.com/client/local+ @@ -229,13 +233,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/idna from golang.org/x/net/http/httpproxy+ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -342,7 +342,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from golang.org/x/net/http2+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -441,14 +441,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net from crypto/tls+ net/http from expvar+ net/http/cgi from tailscale.com/cmd/tailscale/cli - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from github.com/atotto/clipboard+ diff --git a/cmd/tailscale/tailscale_test.go b/cmd/tailscale/tailscale_test.go index dc477fb6e..a7a3c2323 100644 --- a/cmd/tailscale/tailscale_test.go +++ b/cmd/tailscale/tailscale_test.go @@ -19,7 +19,6 @@ func TestDeps(t *testing.T) { "gvisor.dev/gvisor/pkg/tcpip/header": "https://github.com/tailscale/tailscale/issues/9756", "tailscale.com/wgengine/filter": "brings in bart, etc", "github.com/bits-and-blooms/bitset": "unneeded in CLI", - "github.com/gaissmai/bart": "unneeded in CLI", "tailscale.com/net/ipset": "unneeded in CLI", }, }.Check(t) diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index accaab8f0..be13c7b68 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -41,7 +41,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -212,12 +212,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/icmp from tailscale.com/net/ping - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/idna from golang.org/x/net/http/httpguts golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ @@ -251,7 +248,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip - compress/gzip from golang.org/x/net/http2+ + compress/gzip from net/http container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -313,7 +310,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from golang.org/x/net/http2+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509 @@ -391,7 +388,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http net/http/internal/httpcommon from net/http diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f558c4c0b..a91aa8afd 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -61,10 +61,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient+ + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ - tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ @@ -239,12 +239,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ @@ -279,7 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from net/http+ compress/zlib from image/png container/list from crypto/tls+ context from crypto/tls+ @@ -342,7 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from golang.org/x/net/http2+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509 @@ -425,7 +422,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/httputil from tailscale.com/cmd/tailscale/cli net/http/internal from net/http+ net/http/internal/ascii from net/http+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7e6dff7df..00c1a0ac4 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -252,7 +252,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -501,13 +501,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -551,7 +548,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bytes from archive/tar+ cmp from slices+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ W compress/zlib from debug/pe container/heap from github.com/jellydator/ttlcache/v3+ container/list from crypto/tls+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 1ec1998d7..c54f014f6 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -276,5 +276,8 @@ func TestMinTailscaledWithCLI(t *testing.T) { } } }, + BadDeps: map[string]string{ + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + }, }.Check(t) } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f39f4fbf0..4ddc5eda1 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -129,7 +129,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -335,13 +335,10 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -385,7 +382,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ed84d63ff..a3f908da4 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -28,6 +28,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" + "tailscale.com/control/ts2021" "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" @@ -95,8 +96,8 @@ type Direct struct { serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic - sfGroup singleflight.Group[struct{}, *NoiseClient] // protects noiseClient creation. - noiseClient *NoiseClient + sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. + noiseClient *ts2021.Client persist persist.PersistView authKey string @@ -329,7 +330,7 @@ func NewDirect(opts Options) (*Direct, error) { } } if opts.NoiseTestClient != nil { - c.noiseClient = &NoiseClient{ + c.noiseClient = &ts2021.Client{ Client: opts.NoiseTestClient, } c.serverNoiseKey = key.NewMachine().Public() // prevent early error before hitting test client @@ -359,9 +360,7 @@ func (c *Direct) Close() error { } } c.noiseClient = nil - if tr, ok := c.httpc.Transport.(*http.Transport); ok { - tr.CloseIdleConnections() - } + c.httpc.CloseIdleConnections() return nil } @@ -703,8 +702,8 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if err != nil { return regen, opt.URL, nil, err } - addLBHeader(req, request.OldNodeKey) - addLBHeader(req, request.NodeKey) + ts2021.AddLBHeader(req, request.OldNodeKey) + ts2021.AddLBHeader(req, request.NodeKey) res, err := httpc.Do(req) if err != nil { @@ -1012,7 +1011,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap if err != nil { return err } - addLBHeader(req, nodeKey) + ts2021.AddLBHeader(req, nodeKey) res, err := httpc.Do(req) if err != nil { @@ -1507,7 +1506,7 @@ func sleepAsRequested(ctx context.Context, logf logger.Logf, d time.Duration, cl } // getNoiseClient returns the noise client, creating one if one doesn't exist. -func (c *Direct) getNoiseClient() (*NoiseClient, error) { +func (c *Direct) getNoiseClient() (*ts2021.Client, error) { c.mu.Lock() serverNoiseKey := c.serverNoiseKey nc := c.noiseClient @@ -1522,13 +1521,13 @@ func (c *Direct) getNoiseClient() (*NoiseClient, error) { if c.dialPlan != nil { dp = c.dialPlan.Load } - nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*NoiseClient, error) { + nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*ts2021.Client, error) { k, err := c.getMachinePrivKey() if err != nil { return nil, err } c.logf("[v1] creating new noise client") - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := ts2021.NewClient(ts2021.ClientOpts{ PrivKey: k, ServerPubKey: serverNoiseKey, ServerURL: c.serverURL, @@ -1562,7 +1561,7 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er if err != nil { return err } - res, err := nc.post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) + res, err := nc.Post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) if err != nil { return err } @@ -1696,7 +1695,7 @@ func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthySt // Best effort, no logging: ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) defer cancel() - res, err := np.post(ctx, "/machine/update-health", nodeKey, req) + res, err := np.Post(ctx, "/machine/update-health", nodeKey, req) if err != nil { return } @@ -1741,7 +1740,7 @@ func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) e ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - res, err := nc.doWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) + res, err := nc.DoWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) if err != nil { return err } @@ -1782,7 +1781,7 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ panic("tainted client") } - res, err := nc.post(ctx, "/machine/audit-log", nodeKey, req) + res, err := nc.Post(ctx, "/machine/audit-log", nodeKey, req) if err != nil { return fmt.Errorf("%w: %w", errHTTPPostFailure, err) } @@ -1794,12 +1793,6 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ return nil } -func addLBHeader(req *http.Request, nodeKey key.NodePublic) { - if !nodeKey.IsZero() { - req.Header.Add(tailcfg.LBHeader, nodeKey.String()) - } -} - // makeScreenTimeDetectingDialFunc returns dialFunc, optionally wrapped (on // Apple systems) with a func that sets the returned atomic.Bool for whether // Screen Time seemed to intercept the connection. diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go deleted file mode 100644 index 1daa07620..000000000 --- a/control/controlclient/noise.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package controlclient - -import ( - "bytes" - "cmp" - "context" - "encoding/json" - "errors" - "math" - "net/http" - "net/netip" - "net/url" - "sync" - "time" - - "golang.org/x/net/http2" - "tailscale.com/control/controlhttp" - "tailscale.com/control/ts2021" - "tailscale.com/health" - "tailscale.com/net/dnscache" - "tailscale.com/net/netmon" - "tailscale.com/net/tsdial" - "tailscale.com/tailcfg" - "tailscale.com/tstime" - "tailscale.com/types/key" - "tailscale.com/types/logger" - "tailscale.com/util/mak" - "tailscale.com/util/singleflight" -) - -// NoiseClient provides a http.Client to connect to tailcontrol over -// the ts2021 protocol. -type NoiseClient struct { - // Client is an HTTP client to talk to the coordination server. - // It automatically makes a new Noise connection as needed. - // It does not support node key proofs. To do that, call - // noiseClient.getConn instead to make a connection. - *http.Client - - // h2t is the HTTP/2 transport we use a bit to create new - // *http2.ClientConns. We don't use its connection pool and we don't use its - // dialing. We use it for exactly one reason: its idle timeout that can only - // be configured via the HTTP/1 config. And then we call NewClientConn (with - // an existing Noise connection) on the http2.Transport which sets up an - // http2.ClientConn using that idle timeout from an http1.Transport. - h2t *http2.Transport - - // sfDial ensures that two concurrent requests for a noise connection only - // produce one shared one between the two callers. - sfDial singleflight.Group[struct{}, *ts2021.Conn] - - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - privKey key.MachinePrivate - serverPubKey key.MachinePublic - host string // the host part of serverURL - httpPort string // the default port to dial - httpsPort string // the fallback Noise-over-https port or empty if none - - // dialPlan optionally returns a ControlDialPlan previously received - // from the control server; either the function or the return value can - // be nil. - dialPlan func() *tailcfg.ControlDialPlan - - logf logger.Logf - netMon *netmon.Monitor - health *health.Tracker - - // mu only protects the following variables. - mu sync.Mutex - closed bool - last *ts2021.Conn // or nil - nextID int - connPool map[int]*ts2021.Conn // active connections not yet closed; see ts2021.Conn.Close -} - -// NoiseOpts contains options for the NewNoiseClient function. All fields are -// required unless otherwise specified. -type NoiseOpts struct { - // PrivKey is this node's private key. - PrivKey key.MachinePrivate - // ServerPubKey is the public key of the server. - ServerPubKey key.MachinePublic - // ServerURL is the URL of the server to connect to. - ServerURL string - // Dialer's SystemDial function is used to connect to the server. - Dialer *tsdial.Dialer - // DNSCache is the caching Resolver to use to connect to the server. - // - // This field can be nil. - DNSCache *dnscache.Resolver - // Logf is the log function to use. This field can be nil. - Logf logger.Logf - // NetMon is the network monitor that, if set, will be used to get the - // network interface state. This field can be nil; if so, the current - // state will be looked up dynamically. - NetMon *netmon.Monitor - // HealthTracker, if non-nil, is the health tracker to use. - HealthTracker *health.Tracker - // DialPlan, if set, is a function that should return an explicit plan - // on how to connect to the server. - DialPlan func() *tailcfg.ControlDialPlan -} - -// NewNoiseClient returns a new noiseClient for the provided server and machine key. -// serverURL is of the form https://: (no trailing slash). -// -// netMon may be nil, if non-nil it's used to do faster interface lookups. -// dialPlan may be nil -func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { - logf := opts.Logf - u, err := url.Parse(opts.ServerURL) - if err != nil { - return nil, err - } - - if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.New("invalid ServerURL scheme, must be http or https") - } - - var httpPort string - var httpsPort string - addr, _ := netip.ParseAddr(u.Hostname()) - isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" - if port := u.Port(); port != "" { - // If there is an explicit port specified, entirely rely on the scheme, - // unless it's http with a private host in which case we never try using HTTPS. - if u.Scheme == "https" { - httpPort = "" - httpsPort = port - } else if u.Scheme == "http" { - httpPort = port - httpsPort = "443" - if isPrivateHost { - logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) - httpsPort = "" - } - } - } else if u.Scheme == "http" && isPrivateHost { - // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, - // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. - httpPort = "80" - httpsPort = "" - } else { - // Otherwise, use the standard ports - httpPort = "80" - httpsPort = "443" - } - - np := &NoiseClient{ - serverPubKey: opts.ServerPubKey, - privKey: opts.PrivKey, - host: u.Hostname(), - httpPort: httpPort, - httpsPort: httpsPort, - dialer: opts.Dialer, - dnsCache: opts.DNSCache, - dialPlan: opts.DialPlan, - logf: opts.Logf, - netMon: opts.NetMon, - health: opts.HealthTracker, - } - - // Create the HTTP/2 Transport using a net/http.Transport - // (which only does HTTP/1) because it's the only way to - // configure certain properties on the http2.Transport. But we - // never actually use the net/http.Transport for any HTTP/1 - // requests. - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Minute, - }) - if err != nil { - return nil, err - } - np.h2t = h2Transport - - np.Client = &http.Client{Transport: np} - return np, nil -} - -// contextErr is an error that wraps another error and is used to indicate that -// the error was because a context expired. -type contextErr struct { - err error -} - -func (e contextErr) Error() string { - return e.err.Error() -} - -func (e contextErr) Unwrap() error { - return e.err -} - -// getConn returns a ts2021.Conn that can be used to make requests to the -// coordination server. It may return a cached connection or create a new one. -// Dials are singleflighted, so concurrent calls to getConn may only dial once. -// As such, context values may not be respected as there are no guarantees that -// the context passed to getConn is the same as the context passed to dial. -func (nc *NoiseClient) getConn(ctx context.Context) (*ts2021.Conn, error) { - nc.mu.Lock() - if last := nc.last; last != nil && last.CanTakeNewRequest() { - nc.mu.Unlock() - return last, nil - } - nc.mu.Unlock() - - for { - // We singeflight the dial to avoid making multiple connections, however - // that means that we can't simply cancel the dial if the context is - // canceled. Instead, we have to additionally check that the context - // which was canceled is our context and retry if our context is still - // valid. - conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*ts2021.Conn, error) { - c, err := nc.dial(ctx) - if err != nil { - if ctx.Err() != nil { - return nil, contextErr{ctx.Err()} - } - return nil, err - } - return c, nil - }) - var ce contextErr - if err == nil || !errors.As(err, &ce) { - return conn, err - } - if ctx.Err() == nil { - // The dial failed because of a context error, but our context - // is still valid. Retry. - continue - } - // The dial failed because our context was canceled. Return the - // underlying error. - return nil, ce.Unwrap() - } -} - -func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) { - ctx := req.Context() - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} - -// connClosed removes the connection with the provided ID from the pool -// of active connections. -func (nc *NoiseClient) connClosed(id int) { - nc.mu.Lock() - defer nc.mu.Unlock() - conn := nc.connPool[id] - if conn != nil { - delete(nc.connPool, id) - if nc.last == conn { - nc.last = nil - } - } -} - -// Close closes all the underlying noise connections. -// It is a no-op and returns nil if the connection is already closed. -func (nc *NoiseClient) Close() error { - nc.mu.Lock() - nc.closed = true - conns := nc.connPool - nc.connPool = nil - nc.mu.Unlock() - - var errs []error - for _, c := range conns { - if err := c.Close(); err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - -// dial opens a new connection to tailcontrol, fetching the server noise key -// if not cached. -func (nc *NoiseClient) dial(ctx context.Context) (*ts2021.Conn, error) { - nc.mu.Lock() - connID := nc.nextID - nc.nextID++ - nc.mu.Unlock() - - if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { - // Panic, because a test should have started failing several - // thousand version numbers before getting to this point. - panic("capability version is too high to fit in the wire protocol") - } - - var dialPlan *tailcfg.ControlDialPlan - if nc.dialPlan != nil { - dialPlan = nc.dialPlan() - } - - // If we have a dial plan, then set our timeout as slightly longer than - // the maximum amount of time contained therein; we assume that - // explicit instructions on timeouts are more useful than a single - // hard-coded timeout. - // - // The default value of 5 is chosen so that, when there's no dial plan, - // we retain the previous behaviour of 10 seconds end-to-end timeout. - timeoutSec := 5.0 - if dialPlan != nil { - for _, c := range dialPlan.Candidates { - if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { - timeoutSec = v - } - } - } - - // After we establish a connection, we need some time to actually - // upgrade it into a Noise connection. With a ballpark worst-case RTT - // of 1000ms, give ourselves an extra 5 seconds to complete the - // handshake. - timeoutSec += 5 - - // Be extremely defensive and ensure that the timeout is in the range - // [5, 60] seconds (e.g. if we accidentally get a negative number). - if timeoutSec > 60 { - timeoutSec = 60 - } else if timeoutSec < 5 { - timeoutSec = 5 - } - - timeout := time.Duration(timeoutSec * float64(time.Second)) - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - clientConn, err := (&controlhttp.Dialer{ - Hostname: nc.host, - HTTPPort: nc.httpPort, - HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), - MachineKey: nc.privKey, - ControlKey: nc.serverPubKey, - ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion), - Dialer: nc.dialer.SystemDial, - DNSCache: nc.dnsCache, - DialPlan: dialPlan, - Logf: nc.logf, - NetMon: nc.netMon, - HealthTracker: nc.health, - Clock: tstime.StdClock{}, - }).Dial(ctx) - if err != nil { - return nil, err - } - - ncc, err := ts2021.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) - if err != nil { - return nil, err - } - - nc.mu.Lock() - if nc.closed { - nc.mu.Unlock() - ncc.Close() // Needs to be called without holding the lock. - return nil, errors.New("noise client closed") - } - defer nc.mu.Unlock() - mak.Set(&nc.connPool, connID, ncc) - nc.last = ncc - return ncc, nil -} - -// post does a POST to the control server at the given path, JSON-encoding body. -// The provided nodeKey is an optional load balancing hint. -func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - return nc.doWithBody(ctx, "POST", path, nodeKey, body) -} - -func (nc *NoiseClient) doWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - jbody, err := json.Marshal(body) - if err != nil { - return nil, err - } - req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) - if err != nil { - return nil, err - } - addLBHeader(req, nodeKey) - req.Header.Set("Content-Type", "application/json") - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 58fed1b76..359410ae9 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -78,8 +78,8 @@ type Dialer struct { // dropped. Logf logger.Logf - // NetMon is the [netmon.Monitor] to use for this Dialer. It must be - // non-nil. + // NetMon is the [netmon.Monitor] to use for this Dialer. + // It is optional. NetMon *netmon.Monitor // HealthTracker, if non-nil, is the health tracker to use. diff --git a/control/ts2021/client.go b/control/ts2021/client.go new file mode 100644 index 000000000..9a9a3ded8 --- /dev/null +++ b/control/ts2021/client.go @@ -0,0 +1,289 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ts2021 + +import ( + "bytes" + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net" + "net/http" + "net/netip" + "net/url" + "sync" + "time" + + "tailscale.com/control/controlhttp" + "tailscale.com/health" + "tailscale.com/net/dnscache" + "tailscale.com/net/netmon" + "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/types/key" + "tailscale.com/types/logger" +) + +// Client provides a http.Client to connect to tailcontrol over +// the ts2021 protocol. +type Client struct { + // Client is an HTTP client to talk to the coordination server. + // It automatically makes a new Noise connection as needed. + *http.Client + + logf logger.Logf // non-nil + opts ClientOpts + host string // the host part of serverURL + httpPort string // the default port to dial + httpsPort string // the fallback Noise-over-https port or empty if none + + // mu protects the following + mu sync.Mutex + closed bool +} + +// ClientOpts contains options for the [NewClient] function. All fields are +// required unless otherwise specified. +type ClientOpts struct { + // ServerURL is the URL of the server to connect to. + ServerURL string + + // PrivKey is this node's private key. + PrivKey key.MachinePrivate + + // ServerPubKey is the public key of the server. + // It is of the form https://: (no trailing slash). + ServerPubKey key.MachinePublic + + // Dialer's SystemDial function is used to connect to the server. + Dialer *tsdial.Dialer + + // Optional fields follow + + // Logf is the log function to use. + // If nil, log.Printf is used. + Logf logger.Logf + + // NetMon is the network monitor that will be used to get the + // network interface state. This field can be nil; if so, the current + // state will be looked up dynamically. + NetMon *netmon.Monitor + + // DNSCache is the caching Resolver to use to connect to the server. + // + // This field can be nil. + DNSCache *dnscache.Resolver + + // HealthTracker, if non-nil, is the health tracker to use. + HealthTracker *health.Tracker + + // DialPlan, if set, is a function that should return an explicit plan + // on how to connect to the server. + DialPlan func() *tailcfg.ControlDialPlan + + // ProtocolVersion, if non-zero, specifies an alternate + // protocol version to use instead of the default, + // of [tailcfg.CurrentCapabilityVersion]. + ProtocolVersion uint16 +} + +// NewClient returns a new noiseClient for the provided server and machine key. +// +// netMon may be nil, if non-nil it's used to do faster interface lookups. +// dialPlan may be nil +func NewClient(opts ClientOpts) (*Client, error) { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + if opts.ServerURL == "" { + return nil, errors.New("ServerURL is required") + } + if opts.PrivKey.IsZero() { + return nil, errors.New("PrivKey is required") + } + if opts.ServerPubKey.IsZero() { + return nil, errors.New("ServerPubKey is required") + } + if opts.Dialer == nil { + return nil, errors.New("Dialer is required") + } + + u, err := url.Parse(opts.ServerURL) + if err != nil { + return nil, fmt.Errorf("invalid ClientOpts.ServerURL: %w", err) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.New("invalid ServerURL scheme, must be http or https") + } + + httpPort, httpsPort := "80", "443" + addr, _ := netip.ParseAddr(u.Hostname()) + isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" + if port := u.Port(); port != "" { + // If there is an explicit port specified, entirely rely on the scheme, + // unless it's http with a private host in which case we never try using HTTPS. + if u.Scheme == "https" { + httpPort = "" + httpsPort = port + } else if u.Scheme == "http" { + httpPort = port + httpsPort = "443" + if isPrivateHost { + logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) + httpsPort = "" + } + } + } else if u.Scheme == "http" && isPrivateHost { + // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, + // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. + httpPort = "80" + httpsPort = "" + } + + np := &Client{ + opts: opts, + host: u.Hostname(), + httpPort: httpPort, + httpsPort: httpsPort, + logf: logf, + } + + tr := &http.Transport{ + Protocols: new(http.Protocols), + MaxConnsPerHost: 1, + } + // We force only HTTP/2 for this transport, which is what the control server + // speaks inside the ts2021 Noise encryption. But Go doesn't know about that, + // so we use "SetUnencryptedHTTP2" even though it's actually encrypted. + tr.Protocols.SetUnencryptedHTTP2(true) + tr.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return np.dial(ctx) + } + + np.Client = &http.Client{Transport: tr} + return np, nil +} + +// Close closes all the underlying noise connections. +// It is a no-op and returns nil if the connection is already closed. +func (nc *Client) Close() error { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.closed = true + nc.Client.CloseIdleConnections() + return nil +} + +// dial opens a new connection to tailcontrol, fetching the server noise key +// if not cached. +func (nc *Client) dial(ctx context.Context) (*Conn, error) { + if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { + // Panic, because a test should have started failing several + // thousand version numbers before getting to this point. + panic("capability version is too high to fit in the wire protocol") + } + + var dialPlan *tailcfg.ControlDialPlan + if nc.opts.DialPlan != nil { + dialPlan = nc.opts.DialPlan() + } + + // If we have a dial plan, then set our timeout as slightly longer than + // the maximum amount of time contained therein; we assume that + // explicit instructions on timeouts are more useful than a single + // hard-coded timeout. + // + // The default value of 5 is chosen so that, when there's no dial plan, + // we retain the previous behaviour of 10 seconds end-to-end timeout. + timeoutSec := 5.0 + if dialPlan != nil { + for _, c := range dialPlan.Candidates { + if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { + timeoutSec = v + } + } + } + + // After we establish a connection, we need some time to actually + // upgrade it into a Noise connection. With a ballpark worst-case RTT + // of 1000ms, give ourselves an extra 5 seconds to complete the + // handshake. + timeoutSec += 5 + + // Be extremely defensive and ensure that the timeout is in the range + // [5, 60] seconds (e.g. if we accidentally get a negative number). + if timeoutSec > 60 { + timeoutSec = 60 + } else if timeoutSec < 5 { + timeoutSec = 5 + } + + timeout := time.Duration(timeoutSec * float64(time.Second)) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + chd := &controlhttp.Dialer{ + Hostname: nc.host, + HTTPPort: nc.httpPort, + HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), + MachineKey: nc.opts.PrivKey, + ControlKey: nc.opts.ServerPubKey, + ProtocolVersion: cmp.Or(nc.opts.ProtocolVersion, uint16(tailcfg.CurrentCapabilityVersion)), + Dialer: nc.opts.Dialer.SystemDial, + DNSCache: nc.opts.DNSCache, + DialPlan: dialPlan, + Logf: nc.logf, + NetMon: nc.opts.NetMon, + HealthTracker: nc.opts.HealthTracker, + Clock: tstime.StdClock{}, + } + clientConn, err := chd.Dial(ctx) + if err != nil { + return nil, err + } + + ncc := NewConn(clientConn.Conn) + + nc.mu.Lock() + if nc.closed { + nc.mu.Unlock() + ncc.Close() // Needs to be called without holding the lock. + return nil, errors.New("noise client closed") + } + defer nc.mu.Unlock() + return ncc, nil +} + +// post does a POST to the control server at the given path, JSON-encoding body. +// The provided nodeKey is an optional load balancing hint. +func (nc *Client) Post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + return nc.DoWithBody(ctx, "POST", path, nodeKey, body) +} + +func (nc *Client) DoWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + jbody, err := json.Marshal(body) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) + if err != nil { + return nil, err + } + AddLBHeader(req, nodeKey) + req.Header.Set("Content-Type", "application/json") + return nc.Do(req) +} + +// AddLBHeader adds the load balancer header to req if nodeKey is non-zero. +func AddLBHeader(req *http.Request, nodeKey key.NodePublic) { + if !nodeKey.IsZero() { + req.Header.Add(tailcfg.LBHeader, nodeKey.String()) + } +} diff --git a/control/controlclient/noise_test.go b/control/ts2021/client_test.go similarity index 80% rename from control/controlclient/noise_test.go rename to control/ts2021/client_test.go index 0022bdf88..72fa1f442 100644 --- a/control/controlclient/noise_test.go +++ b/control/ts2021/client_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package controlclient +package ts2021 import ( "context" @@ -10,19 +10,20 @@ import ( "io" "math" "net/http" + "net/http/httptrace" + "sync/atomic" "testing" "time" "golang.org/x/net/http2" "tailscale.com/control/controlhttp/controlhttpserver" - "tailscale.com/control/ts2021" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/must" ) // maxAllowedNoiseVersion is the highest we expect the Tailscale @@ -55,14 +56,23 @@ func TestNoiseClientHTTP2Upgrade_earlyPayload(t *testing.T) { }.run(t) } -func makeClientWithURL(t *testing.T, url string) *NoiseClient { - nc, err := NewNoiseClient(NoiseOpts{ - Logf: t.Logf, - ServerURL: url, +var ( + testPrivKey = key.NewMachine() + testServerPub = key.NewMachine().Public() +) + +func makeClientWithURL(t *testing.T, url string) *Client { + nc, err := NewClient(ClientOpts{ + Logf: t.Logf, + PrivKey: testPrivKey, + ServerPubKey: testServerPub, + ServerURL: url, + Dialer: tsdial.NewDialer(netmon.NewStatic()), }) if err != nil { t.Fatal(err) } + t.Cleanup(func() { nc.Close() }) return nc } @@ -176,7 +186,6 @@ func (tt noiseClientTest) run(t *testing.T) { serverPrivate := key.NewMachine() clientPrivate := key.NewMachine() chalPrivate := key.NewChallenge() - bus := eventbustest.NewBus(t) const msg = "Hello, client" h2 := &http2.Server{} @@ -196,12 +205,11 @@ func (tt noiseClientTest) run(t *testing.T) { defer hs.Close() dialer := tsdial.NewDialer(netmon.NewStatic()) - dialer.SetBus(bus) if nettest.PreferMemNetwork() { dialer.SetSystemDialerForTest(nw.Dial) } - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := NewClient(ClientOpts{ PrivKey: clientPrivate, ServerPubKey: serverPrivate.Public(), ServerURL: hs.URL, @@ -212,28 +220,39 @@ func (tt noiseClientTest) run(t *testing.T) { t.Fatal(err) } - // Get a conn and verify it read its early payload before the http/2 - // handshake. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - c, err := nc.getConn(ctx) - if err != nil { - t.Fatal(err) - } - payload, err := c.GetEarlyPayload(ctx) - if err != nil { - t.Fatal("timed out waiting for didReadHeaderCh") - } + var sawConn atomic.Bool + trace := httptrace.WithClientTrace(t.Context(), &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + ncc, ok := ci.Conn.(*Conn) + if !ok { + // This trace hook sees two dials: the lower-level controlhttp upgrade's + // dial (a tsdial.sysConn), and then the *ts2021.Conn we want. + // Ignore the first one. + return + } + sawConn.Store(true) - gotNonNil := payload != nil - if gotNonNil != tt.sendEarlyPayload { - t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) - } - if payload != nil { - if payload.NodeKeyChallenge != chalPrivate.Public() { - t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) - } - } + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + payload, err := ncc.GetEarlyPayload(ctx) + if err != nil { + t.Errorf("GetEarlyPayload: %v", err) + return + } + + gotNonNil := payload != nil + if gotNonNil != tt.sendEarlyPayload { + t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) + } + if payload != nil { + if payload.NodeKeyChallenge != chalPrivate.Public() { + t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) + } + } + }, + }) + req := must.Get(http.NewRequestWithContext(trace, "GET", "https://unused.example/", nil)) checkRes := func(t *testing.T, res *http.Response) { t.Helper() @@ -247,15 +266,19 @@ func (tt noiseClientTest) run(t *testing.T) { } } - // And verify we can do HTTP/2 against that conn. - res, err := (&http.Client{Transport: c}).Get("https://unused.example/") + // Verify we can do HTTP/2 against that conn. + res, err := nc.Do(req) if err != nil { t.Fatal(err) } checkRes(t, res) + if !sawConn.Load() { + t.Error("ClientTrace.GotConn never saw the *ts2021.Conn") + } + // And try using the high-level nc.post API as well. - res, err = nc.post(context.Background(), "/", key.NodePublic{}, nil) + res, err = nc.Post(context.Background(), "/", key.NodePublic{}, nil) if err != nil { t.Fatal(err) } @@ -310,7 +333,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte - copy(notH2Frame[:], ts2021.EarlyPayloadMagic) + copy(notH2Frame[:], EarlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them diff --git a/control/ts2021/conn.go b/control/ts2021/conn.go index 99b1f24cb..ecf184d3c 100644 --- a/control/ts2021/conn.go +++ b/control/ts2021/conn.go @@ -13,10 +13,8 @@ import ( "encoding/json" "errors" "io" - "net/http" "sync" - "golang.org/x/net/http2" "tailscale.com/control/controlbase" "tailscale.com/tailcfg" ) @@ -27,11 +25,11 @@ import ( // the pool when the connection is closed, properly handles an optional "early // payload" that's sent prior to beginning the HTTP/2 session, and provides a // way to return a connection to a pool when the connection is closed. +// +// Use [NewConn] to build a new Conn if you want [Conn.GetEarlyPayload] to work. +// Otherwise making a Conn directly, only setting Conn, is fine. type Conn struct { *controlbase.Conn - id int - onClose func(int) - h2cc *http2.ClientConn readHeaderOnce sync.Once // guards init of reader field reader io.Reader // (effectively Conn.Reader after header) @@ -40,31 +38,18 @@ type Conn struct { earlyPayloadErr error } -// New creates a new Conn that wraps the given controlbase.Conn. +// NewConn creates a new Conn that wraps the given controlbase.Conn. // // h2t is the HTTP/2 transport to use for the connection; a new // http2.ClientConn will be created that reads from the returned Conn. // // connID should be a unique ID for this connection. When the Conn is closed, // the onClose function will be called with the connID if it is non-nil. -func New(conn *controlbase.Conn, h2t *http2.Transport, connID int, onClose func(int)) (*Conn, error) { - ncc := &Conn{ +func NewConn(conn *controlbase.Conn) *Conn { + return &Conn{ Conn: conn, - id: connID, - onClose: onClose, earlyPayloadReady: make(chan struct{}), } - h2cc, err := h2t.NewClientConn(ncc) - if err != nil { - return nil, err - } - ncc.h2cc = h2cc - return ncc, nil -} - -// RoundTrip implements the http.RoundTripper interface. -func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { - return c.h2cc.RoundTrip(r) } // GetEarlyPayload waits for the early Noise payload to arrive. @@ -74,6 +59,15 @@ func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { // early Noise payload is ready (if any) and will return the same result for // the lifetime of the Conn. func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) { + if c.earlyPayloadReady == nil { + return nil, errors.New("Conn was not created with NewConn; early payload not supported") + } + select { + case <-c.earlyPayloadReady: + return c.earlyPayload, c.earlyPayloadErr + default: + go c.readHeaderOnce.Do(c.readHeader) + } select { case <-c.earlyPayloadReady: return c.earlyPayload, c.earlyPayloadErr @@ -82,12 +76,6 @@ func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) } } -// CanTakeNewRequest reports whether the underlying HTTP/2 connection can take -// a new request, meaning it has not been closed or received or sent a GOAWAY. -func (c *Conn) CanTakeNewRequest() bool { - return c.h2cc.CanTakeNewRequest() -} - // The first 9 bytes from the server to client over Noise are either an HTTP/2 // settings frame (a normal HTTP/2 setup) or, as we added later, an "early payload" // header that's also 9 bytes long: 5 bytes (EarlyPayloadMagic) followed by 4 bytes @@ -122,7 +110,9 @@ func (c *Conn) Read(p []byte) (n int, err error) { // c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for // future reads. func (c *Conn) readHeader() { - defer close(c.earlyPayloadReady) + if c.earlyPayloadReady != nil { + defer close(c.earlyPayloadReady) + } setErr := func(err error) { c.reader = returnErrReader{err} @@ -156,14 +146,3 @@ func (c *Conn) readHeader() { } c.reader = c.Conn } - -// Close closes the connection. -func (c *Conn) Close() error { - if err := c.Conn.Close(); err != nil { - return err - } - if c.onClose != nil { - c.onClose(c.id) - } - return nil -} diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index bec196a2e..87b58f2a0 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -45,6 +45,13 @@ func NewDialer(netMon *netmon.Monitor) *Dialer { return d } +// NewFromFuncForDebug is like NewDialer but takes a netx.DialFunc +// and no netMon. It's meant exclusively for the "tailscale debug ts2021" +// debug command, and perhaps tests. +func NewFromFuncForDebug(logf logger.Logf, dial netx.DialFunc) *Dialer { + return &Dialer{sysDialForTest: dial, Logf: logf} +} + // Dialer dials out of tailscaled, while taking care of details while // handling the dozens of edge cases depending on the server mode // (TUN, netstack), the OS network sandboxing style (macOS/iOS @@ -420,7 +427,7 @@ func (d *Dialer) SetSystemDialerForTest(fn netx.DialFunc) { // Control and (in the future, as of 2022-04-27) DERPs.. func (d *Dialer) SystemDial(ctx context.Context, network, addr string) (net.Conn, error) { d.mu.Lock() - if d.netMon == nil { + if d.netMon == nil && d.sysDialForTest == nil { d.mu.Unlock() if testenv.InTest() { panic("SystemDial requires a netmon.Monitor; call SetNetMon first") diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 1f9609745..a0d9f9ebb 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -125,7 +125,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -328,13 +328,10 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -378,7 +375,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ From 3c32f87624ca2cbe384dc4b7a2e3b1925c672e5d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 2 Oct 2025 09:18:55 -0700 Subject: [PATCH 1470/1708] feature/relayserver: use eventbus.Monitor to simplify lifecycle management (#17234) Instead of using separate channels to manage the lifecycle of the eventbus client, use the recently-added eventbus.Monitor, which handles signaling the processing loop to stop and waiting for it to complete. This allows us to simplify some of the setup and cleanup code in the relay server. Updates #15160 Change-Id: Ia1a47ce2e5a31bc8f546dca4c56c3141a40d67af Signed-off-by: M. J. Fromberger --- feature/relayserver/relayserver.go | 135 +++++++++++------------- feature/relayserver/relayserver_test.go | 12 +-- 2 files changed, 70 insertions(+), 77 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 91d07484c..95bf29a11 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -82,11 +82,11 @@ type extension struct { logf logger.Logf bus *eventbus.Bus - mu sync.Mutex // guards the following fields - shutdown bool + mu sync.Mutex // guards the following fields + shutdown bool + port *int // ipn.Prefs.RelayServerPort, nil if disabled - disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return - busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns + eventSubs *eventbus.Monitor // nil if not connected to eventbus debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } @@ -119,15 +119,13 @@ func (e *extension) handleBusLifetimeLocked() { if !busShouldBeRunning { e.disconnectFromBusLocked() return - } - if e.busDoneCh != nil { + } else if e.eventSubs != nil { return // already running } - port := *e.port - e.disconnectFromBusCh = make(chan struct{}) - e.busDoneCh = make(chan struct{}) + + ec := e.bus.Client("relayserver.extension") e.debugSessionsCh = make(chan chan []status.ServerSession) - go e.consumeEventbusTopics(port) + e.eventSubs = ptr.To(ec.Monitor(e.consumeEventbusTopics(ec, *e.port))) } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { @@ -175,77 +173,72 @@ var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { // consumeEventbusTopics serves endpoint allocation requests over the eventbus. // It also serves [relayServer] debug information on a channel. -// consumeEventbusTopics must never acquire [extension.mu], which can be held by -// other goroutines while waiting to receive on [extension.busDoneCh] or the +// consumeEventbusTopics must never acquire [extension.mu], which can be held +// by other goroutines while waiting to receive on [extension.eventSubs] or the // inner [extension.debugSessionsCh] channel. -func (e *extension) consumeEventbusTopics(port int) { - defer close(e.busDoneCh) +func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*eventbus.Client) { + reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](ec) + respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](ec) + debugSessionsCh := e.debugSessionsCh - eventClient := e.bus.Client("relayserver.extension") - reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](eventClient) - respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](eventClient) - defer eventClient.Close() - - var rs relayServer // lazily initialized - defer func() { - if rs != nil { - rs.Close() - } - }() - for { - select { - case <-e.disconnectFromBusCh: - return - case <-eventClient.Done(): - return - case respCh := <-e.debugSessionsCh: - if rs == nil { - // Don't initialize the server simply for a debug request. - respCh <- nil - continue + return func(ec *eventbus.Client) { + var rs relayServer // lazily initialized + defer func() { + if rs != nil { + rs.Close() } - sessions := rs.GetSessions() - respCh <- sessions - case req := <-reqSub.Events(): - if rs == nil { - var err error - rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) + }() + for { + select { + case <-ec.Done(): + return + case respCh := <-debugSessionsCh: + if rs == nil { + // Don't initialize the server simply for a debug request. + respCh <- nil + continue + } + sessions := rs.GetSessions() + respCh <- sessions + case req := <-reqSub.Events(): + if rs == nil { + var err error + rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) + continue + } + } + se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) if err != nil { - e.logf("error initializing server: %v", err) + e.logf("error allocating endpoint: %v", err) continue } - } - se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) - if err != nil { - e.logf("error allocating endpoint: %v", err) - continue - } - respPub.Publish(magicsock.UDPRelayAllocResp{ - ReqRxFromNodeKey: req.RxFromNodeKey, - ReqRxFromDiscoKey: req.RxFromDiscoKey, - Message: &disco.AllocateUDPRelayEndpointResponse{ - Generation: req.Message.Generation, - UDPRelayEndpoint: disco.UDPRelayEndpoint{ - ServerDisco: se.ServerDisco, - ClientDisco: se.ClientDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, + respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, }, - }, - }) + }) + } } } } func (e *extension) disconnectFromBusLocked() { - if e.busDoneCh != nil { - close(e.disconnectFromBusCh) - <-e.busDoneCh - e.busDoneCh = nil - e.disconnectFromBusCh = nil + if e.eventSubs != nil { + e.eventSubs.Close() + e.eventSubs = nil e.debugSessionsCh = nil } } @@ -270,7 +263,7 @@ func (e *extension) serverStatus() status.ServerStatus { UDPPort: nil, Sessions: nil, } - if e.port == nil || e.busDoneCh == nil { + if e.port == nil || e.eventSubs == nil { return st } st.UDPPort = ptr.To(*e.port) @@ -281,7 +274,7 @@ func (e *extension) serverStatus() status.ServerStatus { resp := <-ch st.Sessions = resp return st - case <-e.busDoneCh: + case <-e.eventSubs.Done(): return st } } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index d3fc36a83..89c004dc7 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -101,8 +101,8 @@ func Test_extension_profileStateChanged(t *testing.T) { } defer e.disconnectFromBusLocked() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantBusRunning != (e.busDoneCh != nil) { - t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + if tt.wantBusRunning != (e.eventSubs != nil) { + t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) @@ -118,7 +118,7 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { name string shutdown bool port *int - busDoneCh chan struct{} + eventSubs *eventbus.Monitor hasNodeAttrDisableRelayServer bool wantBusRunning bool }{ @@ -157,13 +157,13 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { bus: eventbus.New(), shutdown: tt.shutdown, port: tt.port, - busDoneCh: tt.busDoneCh, + eventSubs: tt.eventSubs, hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, } e.handleBusLifetimeLocked() defer e.disconnectFromBusLocked() - if tt.wantBusRunning != (e.busDoneCh != nil) { - t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + if tt.wantBusRunning != (e.eventSubs != nil) { + t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) } }) } From 127a9672079213bdcf8d4f92c53e3442e231745b Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 2 Oct 2025 09:31:42 -0700 Subject: [PATCH 1471/1708] appc,*: publish events for route updates and storage (#17392) Add and wire up event publishers for these two event types in the AppConnector. Nothing currently subscribes to them, so this is harmless. Subscribers for these events will be added in a near-future commit. As part of this, move the appc.RouteInfo type to the types/appctype package. It does not contain any package-specific details from appc. Beside it, add appctype.RouteUpdate to carry route update event state, likewise not specific to appc. Update all usage of the appc.* types throughout to use appctype.* instead, and update depaware files to reflect these changes. Add a Close method to the AppConnector to make sure the client gets cleaned up when the connector is dropped (we re-create connectors). Update the unit tests in the appc package to also check the events published alongside calls to the RouteAdvertiser. For now the tests still rely on the RouteAdvertiser for correctness; this is OK for now as the two methods are always performed together. In the near future, we need to rework the tests so not require that, but that will require building some more test fixtures that we can handle separately. Updates #15160 Updates #17192 Change-Id: I184670ba2fb920e0d2cb2be7c6816259bca77afe Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 70 +++++++---- appc/appconnector_test.go | 189 +++++++++++++++++++++++++++-- client/local/local.go | 8 +- cmd/derper/depaware.txt | 7 +- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscale/cli/appcroutes.go | 6 +- cmd/tailscale/depaware.txt | 5 +- cmd/tailscaled/depaware-min.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 4 +- cmd/tailscaled/depaware.txt | 4 +- cmd/tsidp/depaware.txt | 4 +- ipn/ipnlocal/local.go | 20 +-- ipn/ipnlocal/local_test.go | 13 +- ipn/ipnlocal/peerapi_test.go | 7 +- ipn/localapi/localapi.go | 4 +- tsnet/depaware.txt | 4 +- types/appctype/appconnector.go | 20 +++ 17 files changed, 294 insertions(+), 79 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index c86bf2d0f..291884065 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -12,12 +12,14 @@ package appc import ( "context" "fmt" + "maps" "net/netip" "slices" "strings" "sync" "time" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/clientmetric" @@ -114,19 +116,6 @@ func metricStoreRoutes(rate, nRoutes int64) { recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN) } -// RouteInfo is a data structure used to persist the in memory state of an AppConnector -// so that we can know, even after a restart, which routes came from ACLs and which were -// learned from domains. -type RouteInfo struct { - // Control is the routes from the 'routes' section of an app connector acl. - Control []netip.Prefix `json:",omitempty"` - // Domains are the routes discovered by observing DNS lookups for configured domains. - Domains map[string][]netip.Addr `json:",omitempty"` - // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, - // its result is added to Domains. - Wildcards []string `json:",omitempty"` -} - // AppConnector is an implementation of an AppConnector that performs // its function as a subsystem inside of a tailscale node. At the control plane // side App Connector routing is configured in terms of domains rather than IP @@ -141,9 +130,12 @@ type AppConnector struct { logf logger.Logf eventBus *eventbus.Bus routeAdvertiser RouteAdvertiser + pubClient *eventbus.Client + updatePub *eventbus.Publisher[appctype.RouteUpdate] + storePub *eventbus.Publisher[appctype.RouteInfo] // storeRoutesFunc will be called to persist routes if it is not nil. - storeRoutesFunc func(*RouteInfo) error + storeRoutesFunc func(*appctype.RouteInfo) error // mu guards the fields that follow mu sync.Mutex @@ -181,11 +173,11 @@ type Config struct { // RouteInfo, if non-nil, use used as the initial set of routes for the // connector. If nil, the connector starts empty. - RouteInfo *RouteInfo + RouteInfo *appctype.RouteInfo // StoreRoutesFunc, if non-nil, is called when the connector's routes // change, to allow the routes to be persisted. - StoreRoutesFunc func(*RouteInfo) error + StoreRoutesFunc func(*appctype.RouteInfo) error } // NewAppConnector creates a new AppConnector. @@ -198,10 +190,14 @@ func NewAppConnector(c Config) *AppConnector { case c.RouteAdvertiser == nil: panic("missing route advertiser") } + ec := c.EventBus.Client("appc.AppConnector") ac := &AppConnector{ logf: logger.WithPrefix(c.Logf, "appc: "), eventBus: c.EventBus, + pubClient: ec, + updatePub: eventbus.Publish[appctype.RouteUpdate](ec), + storePub: eventbus.Publish[appctype.RouteInfo](ec), routeAdvertiser: c.RouteAdvertiser, storeRoutesFunc: c.StoreRoutesFunc, } @@ -228,6 +224,14 @@ func (e *AppConnector) ShouldStoreRoutes() bool { // storeRoutesLocked takes the current state of the AppConnector and persists it func (e *AppConnector) storeRoutesLocked() error { + if e.storePub.ShouldPublish() { + e.storePub.Publish(appctype.RouteInfo{ + // Clone here, as the subscriber will handle these outside our lock. + Control: slices.Clone(e.controlRoutes), + Domains: maps.Clone(e.domains), + Wildcards: slices.Clone(e.wildcards), + }) + } if !e.ShouldStoreRoutes() { return nil } @@ -240,7 +244,8 @@ func (e *AppConnector) storeRoutesLocked() error { e.writeRateMinute.update(numRoutes) e.writeRateDay.update(numRoutes) - return e.storeRoutesFunc(&RouteInfo{ + // TODO(creachdair): Remove this once it's delivered over the event bus. + return e.storeRoutesFunc(&appctype.RouteInfo{ Control: e.controlRoutes, Domains: e.domains, Wildcards: e.wildcards, @@ -283,6 +288,18 @@ func (e *AppConnector) Wait(ctx context.Context) { e.queue.Wait(ctx) } +// Close closes the connector and cleans up resources associated with it. +// It is safe (and a noop) to call Close on nil. +func (e *AppConnector) Close() { + if e == nil { + return + } + e.mu.Lock() + defer e.mu.Unlock() + e.queue.Shutdown() // TODO(creachadair): Should we wait for it too? + e.pubClient.Close() +} + func (e *AppConnector) updateDomains(domains []string) { e.mu.Lock() defer e.mu.Unlock() @@ -323,11 +340,15 @@ func (e *AppConnector) updateDomains(domains []string) { toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen())) } } - e.queue.Add(func() { - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) - } - }) + + if len(toRemove) != 0 { + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) + e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove}) + } } e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards) @@ -377,6 +398,10 @@ nextRoute: e.logf("failed to unadvertise routes: %v: %v", toRemove, err) } }) + e.updatePub.Publish(appctype.RouteUpdate{ + Advertise: routes, + Unadvertise: toRemove, + }) e.controlRoutes = routes if err := e.storeRoutesLocked(); err != nil { @@ -464,6 +489,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) return } + e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes}) e.mu.Lock() defer e.mu.Unlock() diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index c23908c28..91f0185d0 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -4,6 +4,8 @@ package appc import ( + stdcmp "cmp" + "fmt" "net/netip" "reflect" "slices" @@ -11,9 +13,12 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc/appctest" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" @@ -21,7 +26,7 @@ import ( "tailscale.com/util/slicesx" ) -func fakeStoreRoutes(*RouteInfo) error { return nil } +func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } func TestUpdateDomains(t *testing.T) { ctx := t.Context() @@ -33,14 +38,15 @@ func TestUpdateDomains(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}}) } - a.UpdateDomains([]string{"example.com"}) + t.Cleanup(a.Close) + a.UpdateDomains([]string{"example.com"}) a.Wait(ctx) if got, want := a.Domains().AsSlice(), []string{"example.com"}; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) @@ -68,6 +74,7 @@ func TestUpdateRoutes(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -75,11 +82,14 @@ func TestUpdateRoutes(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + RouteInfo: &appctype.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + a.updateDomains([]string{"*.example.com"}) // This route should be collapsed into the range @@ -116,6 +126,20 @@ func TestUpdateRoutes(t *testing.T) { if !slices.EqualFunc(rc.RemovedRoutes(), wantRemoved, prefixEqual) { t.Fatalf("unexpected removed routes: %v", rc.RemovedRoutes()) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.0.1/32", "192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -123,6 +147,7 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -130,12 +155,14 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")} @@ -145,12 +172,23 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) { t.Fatalf("got %v, want %v", rc.Routes(), routes) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestDomainRoutes(t *testing.T) { bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -158,12 +196,13 @@ func TestDomainRoutes(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) @@ -177,6 +216,13 @@ func TestDomainRoutes(t *testing.T) { if got := a.DomainRoutes(); !reflect.DeepEqual(got, want) { t.Fatalf("DomainRoutes: got %v, want %v", got, want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -184,6 +230,7 @@ func TestObserveDNSResponse(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -191,12 +238,13 @@ func TestObserveDNSResponse(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) // a has no domains configured, so it should not advertise any routes if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -273,6 +321,22 @@ func TestObserveDNSResponse(t *testing.T) { if !slices.Contains(a.domains["example.com"], netip.MustParseAddr("192.0.2.1")) { t.Errorf("missing %v from %v", "192.0.2.1", a.domains["exmaple.com"]) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), // from initial DNS response, via example.com + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.9/32")}), // from CNAME response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.10/32")}), // from CNAME response, mid-chain + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("2001:db8::1/128")}), // v6 DNS response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.0/24")}), // additional prefix + eventbustest.Type[appctype.RouteInfo](), + // N.B. no update for 192.0.2.1 as it is already covered + ); err != nil { + t.Error(err) + } } } @@ -280,6 +344,7 @@ func TestWildcardDomains(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -287,12 +352,13 @@ func TestWildcardDomains(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) if err := a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")); err != nil { @@ -319,6 +385,13 @@ func TestWildcardDomains(t *testing.T) { if len(a.wildcards) != 1 { t.Errorf("expected only one wildcard domain, got %v", a.wildcards) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -437,6 +510,7 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -454,12 +528,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -482,6 +558,13 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.2/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32", "1.2.3.2/32")}), // no duplicates here + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -489,6 +572,7 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -506,12 +590,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "b.example.com"}, []netip.Prefix{}) @@ -544,6 +630,22 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } @@ -551,6 +653,7 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -568,12 +671,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "*.b.example.com"}, []netip.Prefix{}) @@ -606,6 +711,22 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } @@ -708,17 +829,23 @@ func TestMetricBucketsAreSorted(t *testing.T) { // routeAdvertiser, calls to Advertise/UnadvertiseRoutes can end up calling // back into AppConnector via authReconfig. If everything is called // synchronously, this results in a deadlock on AppConnector.mu. +// +// TODO(creachadair, 2025-09-18): Remove this along with the advertiser +// interface once the LocalBackend is switched to use the event bus and the +// tests have been updated not to need it. func TestUpdateRoutesDeadlock(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} a := NewAppConnector(Config{ Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) + t.Cleanup(a.Close) advertiseCalled := new(atomic.Bool) unadvertiseCalled := new(atomic.Bool) @@ -762,4 +889,42 @@ func TestUpdateRoutesDeadlock(t *testing.T) { if want := []netip.Prefix{netip.MustParsePrefix("127.0.0.1/32")}; !slices.Equal(slices.Compact(rc.Routes()), want) { t.Fatalf("got %v, want %v", rc.Routes(), want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32", "127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32"), Unadvertise: prefixes("127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } +} + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +// eqUpdate generates an eventbus test filter that matches a appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want), + cmpopts.SortSlices(stdcmp.Less[string]), + ); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } } diff --git a/client/local/local.go b/client/local/local.go index 8da8f57e5..9faeadca3 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -27,7 +27,6 @@ import ( "sync" "time" - "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" @@ -40,6 +39,7 @@ import ( "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/util/eventbus" @@ -1387,10 +1387,10 @@ func (lc *Client) ShutdownTailscaled(ctx context.Context) error { return err } -func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appc.RouteInfo, error) { +func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appctype.RouteInfo, error) { body, err := lc.get200(ctx, "/localapi/v0/appc-route-info") if err != nil { - return appc.RouteInfo{}, err + return appctype.RouteInfo{}, err } - return decodeJSON[appc.RouteInfo](body) + return decodeJSON[appctype.RouteInfo](body) } diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 0628afd63..278d54b1f 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -74,7 +74,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/client/local 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local @@ -124,6 +123,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/tsweb from tailscale.com/cmd/derper+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ + tailscale.com/types/appctype from tailscale.com/client/local tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/tailcfg+ @@ -140,14 +140,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ tailscale.com/util/cibuild from tailscale.com/health - tailscale.com/util/clientmetric from tailscale.com/net/netmon+ + tailscale.com/util/clientmetric from tailscale.com/net/netmon tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/tsweb+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ - tailscale.com/util/execqueue from tailscale.com/appc 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/health+ @@ -183,7 +182,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/exp/constraints from tailscale.com/util/winutil+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/dns/dnsmessage from tailscale.com/net/dnscache golang.org/x/net/idna from golang.org/x/crypto/acme/autocert golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index eae1354a1..be9ac3a08 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -679,7 +679,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ @@ -802,7 +802,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go index 83443f56c..4a1ba87e3 100644 --- a/cmd/tailscale/cli/appcroutes.go +++ b/cmd/tailscale/cli/appcroutes.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/appc" + "tailscale.com/types/appctype" ) var appcRoutesArgs struct { @@ -51,7 +51,7 @@ https://tailscale.com/kb/1281/app-connectors `), } -func getAllOutput(ri *appc.RouteInfo) (string, error) { +func getAllOutput(ri *appctype.RouteInfo) (string, error) { domains, err := json.MarshalIndent(ri.Domains, " ", " ") if err != nil { return "", err @@ -76,7 +76,7 @@ type domainCount struct { count int } -func getSummarizeLearnedOutput(ri *appc.RouteInfo) string { +func getSummarizeLearnedOutput(ri *appctype.RouteInfo) string { x := make([]domainCount, len(ri.Domains)) i := 0 maxDomainWidth := 0 diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 6facd19f9..7b32fc2b4 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -73,7 +73,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/client/local+ 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli @@ -150,6 +149,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ + tailscale.com/types/appctype from tailscale.com/client/local+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/ipn+ @@ -175,7 +175,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/eventbus from tailscale.com/client/local+ - tailscale.com/util/execqueue from tailscale.com/appc tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ @@ -232,7 +231,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/f64 from github.com/fogleman/gg+ L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpproxy+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index be13c7b68..ba35ecd4a 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -35,7 +35,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled @@ -126,7 +126,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index a91aa8afd..e98c0da48 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -48,7 +48,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -151,7 +151,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 00c1a0ac4..21e333af7 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -240,7 +240,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ @@ -387,7 +387,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 4ddc5eda1..dfd338410 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -121,7 +121,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -229,7 +229,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9e2fbb999..2af78b2be 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1108,6 +1108,7 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } + b.appConnector.Close() b.mu.Unlock() b.webClientShutdown() @@ -4783,25 +4784,28 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i }() if !prefs.AppConnector().Advertise { + b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = nil return } shouldAppCStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() if b.appConnector == nil || b.appConnector.ShouldStoreRoutes() != shouldAppCStoreRoutes { - var ri *appc.RouteInfo - var storeFunc func(*appc.RouteInfo) error + var ri *appctype.RouteInfo + var storeFunc func(*appctype.RouteInfo) error if shouldAppCStoreRoutes { var err error ri, err = b.readRouteInfoLocked() if err != nil { - ri = &appc.RouteInfo{} + ri = &appctype.RouteInfo{} if err != ipn.ErrStateNotExist { b.logf("Unsuccessful Read RouteInfo: ", err) } } storeFunc = b.storeRouteInfo } + + b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = appc.NewAppConnector(appc.Config{ Logf: b.logf, EventBus: b.sys.Bus.Get(), @@ -6988,7 +6992,7 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" -func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { +func (b *LocalBackend) storeRouteInfo(ri *appctype.RouteInfo) error { if !buildfeatures.HasAppConnectors { return feature.ErrUnavailable } @@ -7005,16 +7009,16 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { return b.pm.WriteState(key, bs) } -func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { +func (b *LocalBackend) readRouteInfoLocked() (*appctype.RouteInfo, error) { if !buildfeatures.HasAppConnectors { return nil, feature.ErrUnavailable } if b.pm.CurrentProfile().ID() == "" { - return &appc.RouteInfo{}, nil + return &appctype.RouteInfo{}, nil } key := namespaceKeyForCurrentProfile(b.pm, routeInfoStateStoreKey) bs, err := b.pm.Store().ReadState(key) - ri := &appc.RouteInfo{} + ri := &appctype.RouteInfo{} if err != nil { return nil, err } @@ -7027,7 +7031,7 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { // ReadRouteInfo returns the app connector route information that is // stored in prefs to be consistent across restarts. It should be up // to date with the RouteInfo in memory being used by appc. -func (b *LocalBackend) ReadRouteInfo() (*appc.RouteInfo, error) { +func (b *LocalBackend) ReadRouteInfo() (*appctype.RouteInfo, error) { b.mu.Lock() defer b.mu.Unlock() return b.readRouteInfoLocked() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6737266be..a662793db 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -49,6 +49,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/tstest/deptest" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" "tailscale.com/types/key" @@ -74,7 +75,7 @@ import ( "tailscale.com/wgengine/wgcfg" ) -func fakeStoreRoutes(*appc.RouteInfo) error { return nil } +func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } func inRemove(ip netip.Addr) bool { for _, pfx := range removeFromDefaultRoute { @@ -2314,7 +2315,7 @@ func TestOfferingAppConnector(t *testing.T) { rc := &appctest.RouteCollector{} if shouldStore { b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) @@ -2381,7 +2382,7 @@ func TestObserveDNSResponse(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { @@ -2548,7 +2549,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appc.RouteInfo{ + if err := b.storeRouteInfo(&appctype.RouteInfo{ Domains: map[string][]netip.Addr{ "example.com": {ip}, }, @@ -5501,10 +5502,10 @@ func TestReadWriteRouteInfo(t *testing.T) { b.pm.currentProfile = prof1.View() // set up routeInfo - ri1 := &appc.RouteInfo{} + ri1 := &appctype.RouteInfo{} ri1.Wildcards = []string{"1"} - ri2 := &appc.RouteInfo{} + ri2 := &appctype.RouteInfo{} ri2.Wildcards = []string{"2"} // read before write diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 43b3c49fc..a16d55b8c 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -23,6 +23,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/eventbus/eventbustest" @@ -261,7 +262,7 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { @@ -346,7 +347,7 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { @@ -419,7 +420,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index b07df8b02..723081e62 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -23,7 +23,6 @@ import ( "time" "golang.org/x/net/dns/dnsmessage" - "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" "tailscale.com/feature" @@ -38,6 +37,7 @@ import ( "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tstime" + "tailscale.com/types/appctype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -1684,7 +1684,7 @@ func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) res, err := h.b.ReadRouteInfo() if err != nil { if errors.Is(err, ipn.ErrStateNotExist) { - res = &appc.RouteInfo{} + res = &appctype.RouteInfo{} } else { WriteErrorJSON(w, err) return diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index a0d9f9ebb..739d0b33b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -117,7 +117,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -224,7 +224,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tstime/rate from tailscale.com/wgengine/filter LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ diff --git a/types/appctype/appconnector.go b/types/appctype/appconnector.go index f4ced65a4..567ab755f 100644 --- a/types/appctype/appconnector.go +++ b/types/appctype/appconnector.go @@ -73,3 +73,23 @@ type AppConnectorAttr struct { // tag of the form tag:. Connectors []string `json:"connectors,omitempty"` } + +// RouteInfo is a data structure used to persist the in memory state of an AppConnector +// so that we can know, even after a restart, which routes came from ACLs and which were +// learned from domains. +type RouteInfo struct { + // Control is the routes from the 'routes' section of an app connector acl. + Control []netip.Prefix `json:",omitempty"` + // Domains are the routes discovered by observing DNS lookups for configured domains. + Domains map[string][]netip.Addr `json:",omitempty"` + // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, + // its result is added to Domains. + Wildcards []string `json:",omitempty"` +} + +// RouteUpdate records a set of routes that should be advertised and a set of +// routes that should be unadvertised in event bus updates. +type RouteUpdate struct { + Advertise []netip.Prefix + Unadvertise []netip.Prefix +} From 3ae7a351b4b2e9f33ca9f63dbc4128065de0e22d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 18:34:11 -0700 Subject: [PATCH 1472/1708] feature/featuretags: make clientmetrics optional Saves 57 KB Updates #12614 Change-Id: If7eebec12b3cb30ae6264171d36a258c04b05a70 Signed-off-by: Brad Fitzpatrick --- .../feature_clientmetrics_disabled.go | 13 ++++++++++ .../feature_clientmetrics_enabled.go | 13 ++++++++++ feature/featuretags/featuretags.go | 1 + ipn/localapi/localapi.go | 5 ++++ util/clientmetric/clientmetric.go | 2 ++ util/clientmetric/omit.go | 24 +++++++++++++++++++ 6 files changed, 58 insertions(+) create mode 100644 feature/buildfeatures/feature_clientmetrics_disabled.go create mode 100644 feature/buildfeatures/feature_clientmetrics_enabled.go create mode 100644 util/clientmetric/omit.go diff --git a/feature/buildfeatures/feature_clientmetrics_disabled.go b/feature/buildfeatures/feature_clientmetrics_disabled.go new file mode 100644 index 000000000..721908bb0 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = false diff --git a/feature/buildfeatures/feature_clientmetrics_enabled.go b/feature/buildfeatures/feature_clientmetrics_enabled.go new file mode 100644 index 000000000..deaeb6e69 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index e9d566a86..5792a1927 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, + "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, "clientupdate": {Sym: "ClientUpdate", Desc: "Client auto-update support"}, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, "dbus": {Sym: "DBus", Desc: "Linux DBus support"}, diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 723081e62..7f6452ad3 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1218,6 +1218,11 @@ func (h *Handler) serveHandlePushMessage(w http.ResponseWriter, r *http.Request) } func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasClientMetrics { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(struct{}{}) + return + } if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 2243ec3de..65223e6a9 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_clientmetrics + // Package clientmetric provides client-side metrics whose values // get occasionally logged. package clientmetric diff --git a/util/clientmetric/omit.go b/util/clientmetric/omit.go new file mode 100644 index 000000000..5349fc724 --- /dev/null +++ b/util/clientmetric/omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_clientmetrics + +package clientmetric + +type Metric struct{} + +func (*Metric) Add(int64) {} +func (*Metric) Set(int64) {} +func (*Metric) Value() int64 { return 0 } +func (*Metric) Register(expvarInt any) {} +func (*Metric) UnregisterAll() {} + +func HasPublished(string) bool { panic("unreachable") } +func EncodeLogTailMetricsDelta() string { return "" } +func WritePrometheusExpositionFormat(any) {} + +var zeroMetric Metric + +func NewCounter(string) *Metric { return &zeroMetric } +func NewGauge(string) *Metric { return &zeroMetric } +func NewAggregateCounter(string) *Metric { return &zeroMetric } From 2cd518a8b651b0018a7fed84df45cc82e62987db Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 15:19:27 -0700 Subject: [PATCH 1473/1708] control/controlclient: optimize zstd decode of KeepAlive messages Maybe it matters? At least globally across all nodes? Fixes #17343 Change-Id: I3f61758ea37de527e16602ec1a6e453d913b3195 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 18 +++++++++++++-- control/controlclient/map.go | 3 +++ control/controlclient/map_test.go | 38 +++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index a3f908da4..069affbd1 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1091,7 +1091,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap vlogf("netmap: read body after %v", time.Since(t0).Round(time.Millisecond)) var resp tailcfg.MapResponse - if err := c.decodeMsg(msg, &resp); err != nil { + if err := sess.decodeMsg(msg, &resp); err != nil { vlogf("netmap: decode error: %v", err) return err } @@ -1240,12 +1240,23 @@ func decode(res *http.Response, v any) error { var jsonEscapedZero = []byte(`\u0000`) +const justKeepAliveStr = `{"KeepAlive":true}` + // decodeMsg is responsible for uncompressing msg and unmarshaling into v. -func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { +func (sess *mapSession) decodeMsg(compressedMsg []byte, v *tailcfg.MapResponse) error { + // Fast path for common case of keep-alive message. + // See tailscale/tailscale#17343. + if sess.keepAliveZ != nil && bytes.Equal(compressedMsg, sess.keepAliveZ) { + v.KeepAlive = true + return nil + } + b, err := zstdframe.AppendDecode(nil, compressedMsg) if err != nil { return err } + sess.ztdDecodesForTest++ + if DevKnob.DumpNetMaps() { var buf bytes.Buffer json.Indent(&buf, b, "", " ") @@ -1258,6 +1269,9 @@ func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { if err := json.Unmarshal(b, v); err != nil { return fmt.Errorf("response: %v", err) } + if v.KeepAlive && string(b) == justKeepAliveStr { + sess.keepAliveZ = compressedMsg + } return nil } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 22cea5aca..eafdb2d56 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -57,6 +57,9 @@ type mapSession struct { altClock tstime.Clock // if nil, regular time is used cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context + keepAliveZ []byte // if non-nil, the learned zstd encoding of the just-KeepAlive message for this session + ztdDecodesForTest int // for testing + // sessionAliveCtx is a Background-based context that's alive for the // duration of the mapSession that we own the lifetime of. It's closed by // sessionAliveCtxClose. diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 4e8c911e3..2be4b6ad7 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -4,6 +4,7 @@ package controlclient import ( + "bytes" "context" "encoding/json" "fmt" @@ -33,6 +34,7 @@ import ( "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/zstdframe" ) func eps(s ...string) []netip.AddrPort { @@ -1445,3 +1447,39 @@ func TestNetmapForMapResponseForDebug(t *testing.T) { t.Errorf("mismatch\nnm1: %s\nnm2: %s\n", logger.AsJSON(nm1), logger.AsJSON(nm2)) } } + +func TestLearnZstdOfKeepAlive(t *testing.T) { + keepAliveMsgZstd := (func() []byte { + msg := must.Get(json.Marshal(tailcfg.MapResponse{ + KeepAlive: true, + })) + return zstdframe.AppendEncode(nil, msg, zstdframe.FastestCompression) + })() + + sess := newTestMapSession(t, nil) + + // The first time we see a zstd keep-alive message, we learn how + // the server encodes that. + var mr tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr)) + if !mr.KeepAlive { + t.Fatal("mr.KeepAlive false; want true") + } + if !bytes.Equal(sess.keepAliveZ, keepAliveMsgZstd) { + t.Fatalf("sess.keepAlive = %q; want %q", sess.keepAliveZ, keepAliveMsgZstd) + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } + + // The second time on the session where we see that message, we + // decode it without needing to decompress. + var mr2 tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr2)) + if !mr2.KeepAlive { + t.Fatal("mr2.KeepAlive false; want true") + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } +} From a208cb9fd5ac7a3e8a7ca37daf0c1560ee84e35f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 19:18:46 -0700 Subject: [PATCH 1474/1708] feature/featuretags: add features for c2n, peerapi, advertise/use routes/exit nodes Saves 262 KB so far. I'm sure I missed some places, but shotizam says these were the low hanging fruit. Updates #12614 Change-Id: Ia31c01b454f627e6d0470229aae4e19d615e45e3 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 3 + .../feature_advertiseexitnode_disabled.go | 13 ++ .../feature_advertiseexitnode_enabled.go | 13 ++ .../feature_advertiseroutes_disabled.go | 13 ++ .../feature_advertiseroutes_enabled.go | 13 ++ feature/buildfeatures/feature_c2n_disabled.go | 13 ++ feature/buildfeatures/feature_c2n_enabled.go | 13 ++ .../feature_peerapiclient_disabled.go | 13 ++ .../feature_peerapiclient_enabled.go | 13 ++ .../feature_peerapiserver_disabled.go | 13 ++ .../feature_peerapiserver_enabled.go | 13 ++ .../feature_useexitnode_disabled.go | 13 ++ .../feature_useexitnode_enabled.go | 13 ++ .../feature_useroutes_disabled.go | 13 ++ .../feature_useroutes_enabled.go | 13 ++ feature/featuretags/featuretags.go | 75 ++++++++- ipn/ipnlocal/c2n.go | 16 +- ipn/ipnlocal/local.go | 157 +++++++++++++----- ipn/ipnlocal/node_backend.go | 29 ++-- ipn/ipnlocal/peerapi.go | 14 ++ ipn/ipnlocal/prefs_metrics.go | 4 + ipn/localapi/localapi.go | 24 ++- ipn/prefs.go | 4 + net/dns/resolver/forwarder.go | 4 + net/netmon/interfaces_linux.go | 4 + net/netmon/netmon.go | 4 + net/netmon/state.go | 3 + net/portmapper/portmapper.go | 12 +- net/tsdial/tsdial.go | 13 +- 29 files changed, 469 insertions(+), 79 deletions(-) create mode 100644 feature/buildfeatures/feature_advertiseexitnode_disabled.go create mode 100644 feature/buildfeatures/feature_advertiseexitnode_enabled.go create mode 100644 feature/buildfeatures/feature_advertiseroutes_disabled.go create mode 100644 feature/buildfeatures/feature_advertiseroutes_enabled.go create mode 100644 feature/buildfeatures/feature_c2n_disabled.go create mode 100644 feature/buildfeatures/feature_c2n_enabled.go create mode 100644 feature/buildfeatures/feature_peerapiclient_disabled.go create mode 100644 feature/buildfeatures/feature_peerapiclient_enabled.go create mode 100644 feature/buildfeatures/feature_peerapiserver_disabled.go create mode 100644 feature/buildfeatures/feature_peerapiserver_enabled.go create mode 100644 feature/buildfeatures/feature_useexitnode_disabled.go create mode 100644 feature/buildfeatures/feature_useexitnode_enabled.go create mode 100644 feature/buildfeatures/feature_useroutes_disabled.go create mode 100644 feature/buildfeatures/feature_useroutes_enabled.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 069affbd1..c77e93e1c 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1409,6 +1409,9 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { answerHeadPing(c.logf, httpc, pr) return case "c2n": + if !buildfeatures.HasC2N { + return + } if !useNoise && !envknob.Bool("TS_DEBUG_PERMIT_HTTP_C2N") { c.logf("refusing to answer c2n ping without noise") return diff --git a/feature/buildfeatures/feature_advertiseexitnode_disabled.go b/feature/buildfeatures/feature_advertiseexitnode_disabled.go new file mode 100644 index 000000000..d4fdcec22 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = false diff --git a/feature/buildfeatures/feature_advertiseexitnode_enabled.go b/feature/buildfeatures/feature_advertiseexitnode_enabled.go new file mode 100644 index 000000000..28246143e --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = true diff --git a/feature/buildfeatures/feature_advertiseroutes_disabled.go b/feature/buildfeatures/feature_advertiseroutes_disabled.go new file mode 100644 index 000000000..59042720f --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = false diff --git a/feature/buildfeatures/feature_advertiseroutes_enabled.go b/feature/buildfeatures/feature_advertiseroutes_enabled.go new file mode 100644 index 000000000..118fcd55d --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = true diff --git a/feature/buildfeatures/feature_c2n_disabled.go b/feature/buildfeatures/feature_c2n_disabled.go new file mode 100644 index 000000000..bc37e9e7b --- /dev/null +++ b/feature/buildfeatures/feature_c2n_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = false diff --git a/feature/buildfeatures/feature_c2n_enabled.go b/feature/buildfeatures/feature_c2n_enabled.go new file mode 100644 index 000000000..5950e7157 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = true diff --git a/feature/buildfeatures/feature_peerapiclient_disabled.go b/feature/buildfeatures/feature_peerapiclient_disabled.go new file mode 100644 index 000000000..83cc2bdfe --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = false diff --git a/feature/buildfeatures/feature_peerapiclient_enabled.go b/feature/buildfeatures/feature_peerapiclient_enabled.go new file mode 100644 index 000000000..0bd3f50a8 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = true diff --git a/feature/buildfeatures/feature_peerapiserver_disabled.go b/feature/buildfeatures/feature_peerapiserver_disabled.go new file mode 100644 index 000000000..4a4f32b8a --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = false diff --git a/feature/buildfeatures/feature_peerapiserver_enabled.go b/feature/buildfeatures/feature_peerapiserver_enabled.go new file mode 100644 index 000000000..17d0547b8 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = true diff --git a/feature/buildfeatures/feature_useexitnode_disabled.go b/feature/buildfeatures/feature_useexitnode_disabled.go new file mode 100644 index 000000000..51bec8046 --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = false diff --git a/feature/buildfeatures/feature_useexitnode_enabled.go b/feature/buildfeatures/feature_useexitnode_enabled.go new file mode 100644 index 000000000..f7ab414de --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = true diff --git a/feature/buildfeatures/feature_useroutes_disabled.go b/feature/buildfeatures/feature_useroutes_disabled.go new file mode 100644 index 000000000..ecf9d022b --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = false diff --git a/feature/buildfeatures/feature_useroutes_enabled.go b/feature/buildfeatures/feature_useroutes_enabled.go new file mode 100644 index 000000000..c0a59322e --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5792a1927..db7f2d272 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -82,6 +82,12 @@ type FeatureMeta struct { Sym string // exported Go symbol for boolean const Desc string // human-readable description Deps []FeatureTag // other features this feature requires + + // ImplementationDetail is whether the feature is an internal implementation + // detail. That is, it's not something a user wuold care about having or not + // having, but we'd like to able to omit from builds if no other + // user-visible features depend on it. + ImplementationDetail bool } // Features are the known Tailscale features that can be selectively included or @@ -90,17 +96,45 @@ var Features = map[FeatureTag]FeatureMeta{ "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, "aws": {Sym: "AWS", Desc: "AWS integration"}, - "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, - "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "advertiseexitnode": { + Sym: "AdvertiseExitNode", + Desc: "Run an exit node", + Deps: []FeatureTag{ + "peerapiserver", // to run the ExitDNS server + "advertiseroutes", + }, + }, + "advertiseroutes": { + Sym: "AdvertiseRoutes", + Desc: "Advertise routes for other nodes to use", + Deps: []FeatureTag{ + "c2n", // for control plane to probe health for HA subnet router leader election + }, + }, + "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, + "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "c2n": { + Sym: "C2N", + Desc: "Control-to-node (C2N) support", + ImplementationDetail: true, + }, "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, "capture": {Sym: "Capture", Desc: "Packet capture"}, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, - "clientupdate": {Sym: "ClientUpdate", Desc: "Client auto-update support"}, - "completion": {Sym: "Completion", Desc: "CLI shell completion"}, - "dbus": {Sym: "DBus", Desc: "Linux DBus support"}, + "clientupdate": { + Sym: "ClientUpdate", + Desc: "Client auto-update support", + Deps: []FeatureTag{"c2n"}, + }, + "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "dbus": { + Sym: "DBus", + Desc: "Linux DBus support", + ImplementationDetail: true, + }, "debug": {Sym: "Debug", Desc: "various debug support, for things that don't have or need their own more specific feature"}, "debugeventbus": {Sym: "DebugEventBus", Desc: "eventbus debug support"}, "debugportmapper": { @@ -144,6 +178,16 @@ var Features = map[FeatureTag]FeatureMeta{ // by some other feature are missing, then it's an error by default unless you accept // that it's okay to proceed without that meta feature. }, + "peerapiclient": { + Sym: "PeerAPIClient", + Desc: "PeerAPI client support", + ImplementationDetail: true, + }, + "peerapiserver": { + Sym: "PeerAPIServer", + Desc: "PeerAPI server support", + ImplementationDetail: true, + }, "portlist": {Sym: "PortList", Desc: "Optionally advertise listening service ports"}, "portmapper": {Sym: "PortMapper", Desc: "NAT-PMP/PCP/UPnP port mapping support"}, "posture": {Sym: "Posture", Desc: "Device posture checking support"}, @@ -180,7 +224,7 @@ var Features = map[FeatureTag]FeatureMeta{ "ssh": { Sym: "SSH", Desc: "Tailscale SSH support", - Deps: []FeatureTag{"dbus", "netstack"}, + Deps: []FeatureTag{"c2n", "dbus", "netstack"}, }, "synology": { Sym: "Synology", @@ -192,7 +236,13 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Linux system tray", Deps: []FeatureTag{"dbus"}, }, - "taildrop": {Sym: "Taildrop", Desc: "Taildrop (file sending) support"}, + "taildrop": { + Sym: "Taildrop", + Desc: "Taildrop (file sending) support", + Deps: []FeatureTag{ + "peerapiclient", "peerapiserver", // assume Taildrop is both sides for now + }, + }, "tailnetlock": {Sym: "TailnetLock", Desc: "Tailnet Lock support"}, "tap": {Sym: "Tap", Desc: "Experimental Layer 2 (ethernet) support"}, "tpm": {Sym: "TPM", Desc: "TPM support"}, @@ -200,6 +250,15 @@ var Features = map[FeatureTag]FeatureMeta{ Sym: "UnixSocketIdentity", Desc: "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)", }, + "useroutes": { + Sym: "UseRoutes", + Desc: "Use routes advertised by other nodes", + }, + "useexitnode": { + Sym: "UseExitNode", + Desc: "Use exit nodes", + Deps: []FeatureTag{"peerapiclient", "useroutes"}, + }, "useproxy": { Sym: "UseProxy", Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index e2dfecec2..4b5b581aa 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -32,12 +32,17 @@ import ( // c2nHandlers maps an HTTP method and URI path (without query parameters) to // its handler. The exact method+path match is preferred, but if no entry // exists for that, a map entry with an empty method is used as a fallback. -var c2nHandlers = map[methodAndPath]c2nHandler{ - // Debug. - req("/echo"): handleC2NEcho, -} +var c2nHandlers map[methodAndPath]c2nHandler func init() { + c2nHandlers = map[methodAndPath]c2nHandler{} + if buildfeatures.HasC2N { + // Echo is the basic "ping" handler as used by the control plane to probe + // whether a node is reachable. In particular, it's important for + // high-availability subnet routers for the control plane to probe which of + // several candidate nodes is reachable and actually alive. + RegisterC2N("/echo", handleC2NEcho) + } if buildfeatures.HasSSH { RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) } @@ -69,6 +74,9 @@ func init() { // A pattern is like "GET /foo" (specific to an HTTP method) or "/foo" (all // methods). It panics if the pattern is already registered. func RegisterC2N(pattern string, h func(*LocalBackend, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasC2N { + return + } k := req(pattern) if _, ok := c2nHandlers[k]; ok { panic(fmt.Sprintf("c2n: duplicate handler for %q", pattern)) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2af78b2be..38f98f8fb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -550,10 +550,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // Following changes are triggered via the eventbus. b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) - if tunWrap, ok := b.sys.Tun.GetOK(); ok { - tunWrap.PeerAPIPort = b.GetPeerAPIPort - } else { - b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + if buildfeatures.HasPeerAPIServer { + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.PeerAPIPort = b.GetPeerAPIPort + } else { + b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + } } if buildfeatures.HasDebug { @@ -972,15 +974,17 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.updateFilterLocked(prefs) updateExitNodeUsageWarning(prefs, delta.New, b.health) - cn := b.currentNode() - nm := cn.NetMap() - if peerAPIListenAsync && nm != nil && b.state == ipn.Running { - want := nm.GetAddresses().Len() - have := len(b.peerAPIListeners) - b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) - if have < want { - b.logf("linkChange: peerAPIListeners too low; trying again") - b.goTracker.Go(b.initPeerAPIListener) + if buildfeatures.HasPeerAPIServer { + cn := b.currentNode() + nm := cn.NetMap() + if peerAPIListenAsync && nm != nil && b.state == ipn.Running { + want := nm.GetAddresses().Len() + have := len(b.peerAPIListeners) + b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) + if have < want { + b.logf("linkChange: peerAPIListeners too low; trying again") + b.goTracker.Go(b.initPeerAPIListener) + } } } } @@ -1368,7 +1372,7 @@ func peerStatusFromNode(ps *ipnstate.PeerStatus, n tailcfg.NodeView) { ps.PublicKey = n.Key() ps.ID = n.StableID() ps.Created = n.Created() - ps.ExitNodeOption = tsaddr.ContainsExitRoutes(n.AllowedIPs()) + ps.ExitNodeOption = buildfeatures.HasUseExitNode && tsaddr.ContainsExitRoutes(n.AllowedIPs()) if n.Tags().Len() != 0 { v := n.Tags() ps.Tags = &v @@ -1897,6 +1901,9 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasUseExitNode { + return false + } if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) @@ -2002,7 +2009,7 @@ func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { b.mu.Unlock() } - if policy.HasChanged(pkey.AllowedSuggestedExitNodes) { + if buildfeatures.HasUseExitNode && policy.HasChanged(pkey.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { @@ -2073,6 +2080,9 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // mustationsAreWorthyOfRecalculatingSuggestedExitNode reports whether any mutation type in muts is // worthy of recalculating the suggested exit node. func mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts []netmap.NodeMutation, cn *nodeBackend, sid tailcfg.StableNodeID) bool { + if !buildfeatures.HasUseExitNode { + return false + } for _, m := range muts { n, ok := cn.NodeByID(m.NodeIDBeingMutated()) if !ok { @@ -2126,6 +2136,9 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // // b.mu must be held. func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false + } // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. // // However, to maintain forward compatibility with future auto exit node expressions, @@ -2170,6 +2183,9 @@ func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged // // b.mu must be held. func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false + } // If we have a desired IP on file, try to find the corresponding node. if !prefs.ExitNodeIP.IsValid() { return false @@ -2455,6 +2471,11 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } } + var c2nHandler http.Handler + if buildfeatures.HasC2N { + c2nHandler = http.HandlerFunc(b.handleC2N) + } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, @@ -2475,7 +2496,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { PopBrowserURL: b.tellClientToBrowseToURL, Dialer: b.Dialer(), Observer: b, - C2NHandler: http.HandlerFunc(b.handleC2N), + C2NHandler: c2nHandler, DialPlan: &b.dialPlan, // pointer because it can't be copied ControlKnobs: b.sys.ControlKnobs(), Shutdown: ccShutdown, @@ -2623,31 +2644,33 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } if prefs.Valid() { - for _, r := range prefs.AdvertiseRoutes().All() { - if r.Bits() == 0 { - // When offering a default route to the world, we - // filter out locally reachable LANs, so that the - // default route effectively appears to be a "guest - // wifi": you get internet access, but to additionally - // get LAN access the LAN(s) need to be offered - // explicitly as well. - localInterfaceRoutes, hostIPs, err := interfaceRoutes() - if err != nil { - b.logf("getting local interface routes: %v", err) - continue - } - s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) - if err != nil { - b.logf("computing default route filter: %v", err) - continue + if buildfeatures.HasAdvertiseRoutes { + for _, r := range prefs.AdvertiseRoutes().All() { + if r.Bits() == 0 { + // When offering a default route to the world, we + // filter out locally reachable LANs, so that the + // default route effectively appears to be a "guest + // wifi": you get internet access, but to additionally + // get LAN access the LAN(s) need to be offered + // explicitly as well. + localInterfaceRoutes, hostIPs, err := interfaceRoutes() + if err != nil { + b.logf("getting local interface routes: %v", err) + continue + } + s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) + if err != nil { + b.logf("computing default route filter: %v", err) + continue + } + localNetsB.AddSet(s) + } else { + localNetsB.AddPrefix(r) + // When advertising a non-default route, we assume + // this is a corporate subnet that should be present + // in the audit logs. + logNetsB.AddPrefix(r) } - localNetsB.AddSet(s) - } else { - localNetsB.AddPrefix(r) - // When advertising a non-default route, we assume - // this is a corporate subnet that should be present - // in the audit logs. - logNetsB.AddPrefix(r) } } @@ -2658,7 +2681,7 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { // The correct filter rules are synthesized by the coordination server // and sent down, but the address needs to be part of the 'local net' for the // filter package to even bother checking the filter rules, so we set them here. - if prefs.AppConnector().Advertise { + if buildfeatures.HasAppConnectors && prefs.AppConnector().Advertise { localNetsB.Add(netip.MustParseAddr("0.0.0.0")) localNetsB.Add(netip.MustParseAddr("::0")) } @@ -3712,6 +3735,9 @@ func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg } func (b *LocalBackend) pingPeerAPI(ctx context.Context, ip netip.Addr) (peer tailcfg.NodeView, peerBase string, err error) { + if !buildfeatures.HasPeerAPIClient { + return peer, peerBase, feature.ErrUnavailable + } var zero tailcfg.NodeView ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -4051,6 +4077,9 @@ var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ // updateExitNodeUsageWarning updates a warnable meant to notify users of // configuration issues that could break exit node usage. func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTracker *health.Tracker) { + if !buildfeatures.HasUseExitNode { + return + } var msg string if p.ExitNodeIP().IsValid() || p.ExitNodeID() != "" { warn, _ := netutil.CheckReversePathFiltering(state) @@ -4070,6 +4099,9 @@ func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { if !tryingToUseExitNode { return nil } + if !buildfeatures.HasUseExitNode { + return feature.ErrUnavailable + } if err := featureknob.CanUseExitNode(); err != nil { return err @@ -4110,6 +4142,9 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P defer unlock() p0 := b.pm.CurrentPrefs() + if !buildfeatures.HasUseExitNode { + return p0, nil + } if v && p0.ExitNodeID() != "" { // Already on. return p0, nil @@ -4240,6 +4275,9 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // // b.mu must be held. func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + if !buildfeatures.HasUseExitNode { + return false + } if !change.AutoExitNodeSet && !change.ExitNodeIDSet && !change.ExitNodeIPSet { // The change does not affect exit node usage. return false @@ -4577,6 +4615,9 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // GetPeerAPIPort returns the port number for the peerapi server // running on the provided IP. func (b *LocalBackend) GetPeerAPIPort(ip netip.Addr) (port uint16, ok bool) { + if !buildfeatures.HasPeerAPIServer { + return 0, false + } b.mu.Lock() defer b.mu.Unlock() for _, pln := range b.peerAPIListeners { @@ -4936,10 +4977,12 @@ func (b *LocalBackend) authReconfig() { // Keep the dialer updated about whether we're supposed to use // an exit node's DNS server (so SOCKS5/HTTP outgoing dials // can use it for name resolution) - if dohURLOK { - b.dialer.SetExitDNSDoH(dohURL) - } else { - b.dialer.SetExitDNSDoH("") + if buildfeatures.HasUseExitNode { + if dohURLOK { + b.dialer.SetExitDNSDoH(dohURL) + } else { + b.dialer.SetExitDNSDoH("") + } } cfg, err := nmcfg.WGCfg(nm, b.logf, flags, prefs.ExitNodeID()) @@ -5064,6 +5107,9 @@ func (b *LocalBackend) TailscaleVarRoot() string { // // b.mu must be held. func (b *LocalBackend) closePeerAPIListenersLocked() { + if !buildfeatures.HasPeerAPIServer { + return + } b.peerAPIServer = nil for _, pln := range b.peerAPIListeners { pln.Close() @@ -5079,6 +5125,9 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { + if !buildfeatures.HasPeerAPIServer { + return + } b.logf("[v1] initPeerAPIListener: entered") b.mu.Lock() defer b.mu.Unlock() @@ -5903,6 +5952,9 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { // RefreshExitNode determines which exit node to use based on the current // prefs and netmap and switches to it if needed. func (b *LocalBackend) RefreshExitNode() { + if !buildfeatures.HasUseExitNode { + return + } if b.resolveExitNode() { b.authReconfig() } @@ -5918,6 +5970,9 @@ func (b *LocalBackend) RefreshExitNode() { // // b.mu must not be held. func (b *LocalBackend) resolveExitNode() (changed bool) { + if !buildfeatures.HasUseExitNode { + return false + } b.mu.Lock() defer b.mu.Unlock() @@ -6468,6 +6523,9 @@ func (b *LocalBackend) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpd // // If exitNodeID is the zero valid, it returns "", false. func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } if exitNodeID.IsZero() { return "", false } @@ -7084,6 +7142,9 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // // b.mu.lock() must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode @@ -7101,6 +7162,9 @@ func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggest } func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() return b.suggestExitNodeLocked() @@ -7117,6 +7181,9 @@ func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { // refreshAllowedSuggestions rebuilds the set of permitted exit nodes // from the current [pkey.AllowedSuggestedExitNodes] value. func (b *LocalBackend) refreshAllowedSuggestions() { + if !buildfeatures.HasUseExitNode { + return + } b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() b.allowedSuggestedExitNodes = fillAllowedSuggestions(b.polc) diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 95bf350ce..22e965fa6 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -530,6 +530,9 @@ func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, } func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } nb.mu.Lock() defer nb.mu.Unlock() return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) @@ -769,18 +772,20 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // If we're using an exit node and that exit node is new enough (1.19.x+) // to run a DoH DNS proxy, then send all our DNS traffic through it, // unless we find resolvers with UseWithExitNode set, in which case we use that. - if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) - if len(filtered) > 0 { - addDefault(filtered) - } else { - // If no default global resolvers with the override - // are configured, configure the exit node's resolver. - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) - } + if buildfeatures.HasUseExitNode { + if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { + filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) + if len(filtered) > 0 { + addDefault(filtered) + } else { + // If no default global resolvers with the override + // are configured, configure the exit node's resolver. + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + } - addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) - return dcfg + addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) + return dcfg + } } // If the user has set default resolvers ("override local DNS"), prefer to @@ -788,7 +793,7 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // node resolvers, use those as the default. if len(nm.DNS.Resolvers) > 0 { addDefault(nm.DNS.Resolvers) - } else { + } else if buildfeatures.HasUseExitNode { if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok { addDefault(resolvers) } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 9ad3e3c36..a045086d4 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -26,6 +26,7 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" @@ -131,6 +132,9 @@ type peerAPIListener struct { } func (pln *peerAPIListener) Close() error { + if !buildfeatures.HasPeerAPIServer { + return nil + } if pln.ln != nil { return pln.ln.Close() } @@ -138,6 +142,9 @@ func (pln *peerAPIListener) Close() error { } func (pln *peerAPIListener) serve() { + if !buildfeatures.HasPeerAPIServer { + return + } if pln.ln == nil { return } @@ -319,6 +326,9 @@ func peerAPIRequestShouldGetSecurityHeaders(r *http.Request) bool { // // It panics if the path is already registered. func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasPeerAPIServer { + return + } if _, ok := peerAPIHandlers[path]; ok { panic(fmt.Sprintf("duplicate PeerAPI handler %q", path)) } @@ -337,6 +347,10 @@ var ( ) func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasPeerAPIServer { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if err := h.validatePeerAPIRequest(r); err != nil { metricInvalidRequests.Add(1) h.logf("invalid request from %v: %v", h.remoteAddr, err) diff --git a/ipn/ipnlocal/prefs_metrics.go b/ipn/ipnlocal/prefs_metrics.go index fa768ba3c..34c5f5504 100644 --- a/ipn/ipnlocal/prefs_metrics.go +++ b/ipn/ipnlocal/prefs_metrics.go @@ -6,6 +6,7 @@ package ipnlocal import ( "errors" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/clientmetric" @@ -85,6 +86,9 @@ func (e *prefsMetricsEditEvent) record() error { // false otherwise. The caller is responsible for ensuring that the id belongs to // an exit node. func (e *prefsMetricsEditEvent) exitNodeType(id tailcfg.StableNodeID) (props []exitNodeProperty, isNode bool) { + if !buildfeatures.HasUseExitNode { + return nil, false + } var peer tailcfg.NodeView if peer, isNode = e.node.PeerByStableID(id); isNode { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 7f6452ad3..d7cd42c75 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -72,7 +72,6 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, @@ -90,21 +89,17 @@ var handler = map[string]LocalAPIHandler{ "logtap": (*Handler).serveLogTap, "metrics": (*Handler).serveMetrics, "ping": (*Handler).servePing, - "pprof": (*Handler).servePprof, "prefs": (*Handler).servePrefs, "query-feature": (*Handler).serveQueryFeature, "reload-config": (*Handler).reloadConfig, "reset-auth": (*Handler).serveResetAuth, - "set-dns": (*Handler).serveSetDNS, "set-expiry-sooner": (*Handler).serveSetExpirySooner, "set-gui-visible": (*Handler).serveSetGUIVisible, "set-push-device-token": (*Handler).serveSetPushDeviceToken, "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, "shutdown": (*Handler).serveShutdown, "start": (*Handler).serveStart, "status": (*Handler).serveStatus, - "suggest-exit-node": (*Handler).serveSuggestExitNode, "update/check": (*Handler).serveUpdateCheck, "upload-client-metrics": (*Handler).serveUploadClientMetrics, "usermetrics": (*Handler).serveUserMetrics, @@ -116,6 +111,17 @@ func init() { if buildfeatures.HasAppConnectors { Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) } + if buildfeatures.HasUseExitNode { + Register("suggest-exit-node", (*Handler).serveSuggestExitNode) + Register("set-use-exit-node-enabled", (*Handler).serveSetUseExitNodeEnabled) + } + if buildfeatures.HasACME { + Register("set-dns", (*Handler).serveSetDNS) + } + if buildfeatures.HasDebug { + Register("bugreport", (*Handler).serveBugReport) + Register("pprof", (*Handler).servePprof) + } } // Register registers a new LocalAPI handler for the given name. @@ -1291,6 +1297,10 @@ func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) return @@ -1629,6 +1639,10 @@ func dnsMessageTypeForString(s string) (t dnsmessage.Type, err error) { // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return diff --git a/ipn/prefs.go b/ipn/prefs.go index a2149950d..8a5b17af6 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -20,6 +20,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/drive" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" @@ -787,6 +788,9 @@ func (p *Prefs) AdvertisesExitNode() bool { // SetAdvertiseExitNode mutates p (if non-nil) to add or remove the two // /0 exit node routes. func (p *Prefs) SetAdvertiseExitNode(runExit bool) { + if !buildfeatures.HasAdvertiseExitNode { + return + } if p == nil { return } diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index a7a8932e8..86f0f5b8c 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -27,6 +27,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/publicdns" @@ -530,6 +531,9 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe }() } if strings.HasPrefix(rr.name.Addr, "http://") { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } return f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) } if strings.HasPrefix(rr.name.Addr, "https://") { diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index d0fb15aba..a9b93c0a1 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -22,6 +22,7 @@ import ( "github.com/mdlayher/netlink" "go4.org/mem" "golang.org/x/sys/unix" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/util/lineiter" ) @@ -41,6 +42,9 @@ ens18 00000000 0100000A 0003 0 0 0 00000000 ens18 0000000A 00000000 0001 0 0 0 0000FFFF 0 0 0 */ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if procNetRouteErr.Load() { // If we failed to read /proc/net/route previously, don't keep trying. return ret, myIP, false diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index fcac9c4ee..f7d1b1107 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -181,6 +182,9 @@ func (m *Monitor) SetTailscaleInterfaceName(ifName string) { // It's the same as interfaces.LikelyHomeRouterIP, but it caches the // result until the monitor detects a network change. func (m *Monitor) GatewayAndSelfIP() (gw, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if m.static { return } diff --git a/net/netmon/state.go b/net/netmon/state.go index cdb427d47..73497e93f 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -573,6 +573,9 @@ var disableLikelyHomeRouterIPSelf = envknob.RegisterBool("TS_DEBUG_DISABLE_LIKEL // the LAN using that gateway. // This is used as the destination for UPnP, NAT-PMP, PCP, etc queries. func LikelyHomeRouterIP() (gateway, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } // If we don't have a way to get the home router IP, then we can't do // anything; just return. if likelyHomeRouterIP == nil { diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 024c6dc78..9368d1c4e 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -20,6 +20,7 @@ import ( "go4.org/mem" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -262,10 +263,13 @@ func NewClient(c Config) *Client { panic("nil EventBus") } ret := &Client{ - logf: c.Logf, - netMon: c.NetMon, - ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon - onChange: c.OnChange, + logf: c.Logf, + netMon: c.NetMon, + onChange: c.OnChange, + } + if buildfeatures.HasPortMapper { + // TODO(bradfitz): move this to method on netMon + ret.ipAndGateway = netmon.LikelyHomeRouterIP } ret.pubClient = c.EventBus.Client("portmapper") ret.updates = eventbus.Publish[portmappertype.Mapping](ret.pubClient) diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 87b58f2a0..a0e2a11a4 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -19,6 +19,8 @@ import ( "time" "github.com/gaissmai/bart" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/dnscache" "tailscale.com/net/netknob" "tailscale.com/net/netmon" @@ -135,6 +137,9 @@ func (d *Dialer) TUNName() string { // // For example, "http://100.68.82.120:47830/dns-query". func (d *Dialer) SetExitDNSDoH(doh string) { + if !buildfeatures.HasUseExitNode { + return + } d.mu.Lock() defer d.mu.Unlock() if d.exitDNSDoHBase == doh { @@ -372,7 +377,7 @@ func (d *Dialer) userDialResolve(ctx context.Context, network, addr string) (net } var r net.Resolver - if exitDNSDoH != "" { + if buildfeatures.HasUseExitNode && buildfeatures.HasPeerAPIClient && exitDNSDoH != "" { r.PreferGo = true r.Dial = func(ctx context.Context, network, address string) (net.Conn, error) { return &dohConn{ @@ -509,6 +514,9 @@ func (d *Dialer) UserDial(ctx context.Context, network, addr string) (net.Conn, // network must a "tcp" type, and addr must be an ip:port. Name resolution // is not supported. func (d *Dialer) dialPeerAPI(ctx context.Context, network, addr string) (net.Conn, error) { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } switch network { case "tcp", "tcp6", "tcp4": default: @@ -551,6 +559,9 @@ func (d *Dialer) getPeerDialer() *net.Dialer { // The returned Client must not be mutated; it's owned by the Dialer // and shared by callers. func (d *Dialer) PeerAPIHTTPClient() *http.Client { + if !buildfeatures.HasPeerAPIClient { + panic("unreachable") + } d.peerClientOnce.Do(func() { t := http.DefaultTransport.(*http.Transport).Clone() t.Dial = nil From 24e38eb7294a057776a9942185460456ca1ebf95 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 2 Oct 2025 12:01:59 -0700 Subject: [PATCH 1475/1708] control/controlclient,health,ipn/ipnlocal,health: fix deadlock by deleting health reporting A recent change (009d702adfa0fc) introduced a deadlock where the /machine/update-health network request to report the client's health status update to the control plane was moved to being synchronous within the eventbus's pump machinery. I started to instead make the health reporting be async, but then we realized in the three years since we added that, it's barely been used and doesn't pay for itself, for how many HTTP requests it makes. Instead, delete it all and replace it with a c2n handler, which provides much more helpful information. Fixes tailscale/corp#32952 Change-Id: I9e8a5458269ebfdda1c752d7bbb8af2780d71b04 Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 31 ------------------------- control/controlclient/direct.go | 41 --------------------------------- health/state.go | 3 +++ ipn/ipnlocal/c2n.go | 10 ++++++++ tailcfg/tailcfg.go | 6 ++++- 5 files changed, 18 insertions(+), 73 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 224838d56..9f5bf38ae 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -12,7 +12,6 @@ import ( "sync/atomic" "time" - "tailscale.com/health" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -23,7 +22,6 @@ import ( "tailscale.com/types/structs" "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" - "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" ) @@ -123,8 +121,6 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - eventSubs eventbus.Monitor - mu sync.Mutex // mutex guards the following fields wantLoggedIn bool // whether the user wants to be logged in per last method call @@ -195,10 +191,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { shutdownFn: opts.Shutdown, } - // Set up eventbus client and subscriber - ec := opts.Bus.Client("controlClient.Auto") - c.eventSubs = ec.Monitor(c.consumeEventbusTopics(ec)) - c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) @@ -208,27 +200,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { return c, nil } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (c *Auto) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { - healthChangeSub := eventbus.Subscribe[health.Change](ec) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case change := <-healthChangeSub.Events(): - if change.WarnableChanged { - c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) - } - } - } - } -} - // SetPaused controls whether HTTP activity should be paused. // // The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once. @@ -782,8 +753,6 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } func (c *Auto) Shutdown() { - c.eventSubs.Close() - c.mu.Lock() if c.closed { c.mu.Unlock() diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index c77e93e1c..de577bea4 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1678,47 +1678,6 @@ func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailc return nil } -// ReportWarnableChange reports to the control plane a change to this node's -// health. w must be non-nil. us can be nil to indicate a healthy state for w. -func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthyState) { - if w == health.NetworkStatusWarnable || w == health.IPNStateWarnable || w == health.LoginStateWarnable { - // We don't report these. These include things like the network is down - // (in which case we can't report anyway) or the user wanted things - // stopped, as opposed to the more unexpected failure types in the other - // subsystems. - return - } - np, err := c.getNoiseClient() - if err != nil { - // Don't report errors to control if the server doesn't support noise. - return - } - nodeKey, ok := c.GetPersist().PublicNodeKeyOK() - if !ok { - return - } - if c.panicOnUse { - panic("tainted client") - } - // TODO(angott): at some point, update `Subsys` in the request to be `Warnable` - req := &tailcfg.HealthChangeRequest{ - Subsys: string(w.Code), - NodeKey: nodeKey, - } - if us != nil { - req.Error = us.Text - } - - // Best effort, no logging: - ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) - defer cancel() - res, err := np.Post(ctx, "/machine/update-health", nodeKey, req) - if err != nil { - return - } - res.Body.Close() -} - // SetDeviceAttrs does a synchronous call to the control plane to update // the node's attributes. // diff --git a/health/state.go b/health/state.go index 116518629..2efff92b1 100644 --- a/health/state.go +++ b/health/state.go @@ -14,6 +14,9 @@ import ( // State contains the health status of the backend, and is // provided to the client UI via LocalAPI through ipn.Notify. +// +// It is also exposed via c2n for debugging purposes, so try +// not to change its structure too gratuitously. type State struct { // Each key-value pair in Warnings represents a Warnable that is currently // unhealthy. If a Warnable is healthy, it will not be present in this map. diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 4b5b581aa..0c228060f 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -18,6 +18,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" @@ -63,6 +64,7 @@ func init() { RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging) RegisterC2N("/debug/logheap", handleC2NDebugLogHeap) RegisterC2N("/debug/netmap", handleC2NDebugNetMap) + RegisterC2N("/debug/health", handleC2NDebugHealth) } if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind) @@ -145,6 +147,14 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } } +func handleC2NDebugHealth(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + var st *health.State + if buildfeatures.HasDebug && b.health != nil { + st = b.health.CurrentState() + } + writeJSON(w, st) +} + func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasDebug { http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 88cda044f..01ecc96b3 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -172,7 +172,8 @@ type CapabilityVersion int // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. // - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) // - 127: 2025-09-19: can handle C2N /debug/netmap. -const CurrentCapabilityVersion CapabilityVersion = 127 +// - 128: 2025-10-02: can handle C2N /debug/health. +const CurrentCapabilityVersion CapabilityVersion = 128 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2734,6 +2735,9 @@ type SetDNSResponse struct{} // node health changes to: // // POST https:///machine/update-health. +// +// As of 2025-10-02, we stopped sending this to the control plane proactively. +// It was never useful enough with its current design and needs more thought. type HealthChangeRequest struct { Subsys string // a health.Subsystem value in string form Error string // or empty if cleared From cd523eae52c220ed8731cee349efd77c1aa4a5fe Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 2 Oct 2025 16:01:55 -0700 Subject: [PATCH 1476/1708] ipn/ipnlocal: introduce the concept of client-side-reachability (#17367) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The control plane will sometimes determine that a node is not online, while the node is still able to connect to its peers. This patch doesn’t solve this problem, but it does mitigate it. This PR introduces the `client-side-reachability` node attribute that switches the node to completely ignore the online signal from control. In the future, the client itself should collect reachability data from active Wireguard flows and Tailscale pings. Updates #17366 Updates tailscale/corp#30379 Updates tailscale/corp#32686 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 12 +++++- ipn/ipnlocal/node_backend.go | 34 ++++++++++++++++ ipn/ipnlocal/node_backend_test.go | 68 +++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 6 +++ 4 files changed, 118 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 38f98f8fb..199ee7248 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7240,6 +7240,10 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta // the lowest latency to this device. For peers without a DERP home, we look for // geographic proximity to this device's DERP home. func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP @@ -7248,7 +7252,7 @@ func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSugg // since the netmap doesn't include delta updates (e.g., home DERP or Online // status changes) from the control plane since the last full update. candidates := nb.AppendMatchingPeers(nil, func(peer tailcfg.NodeView) bool { - if !peer.Valid() || !peer.Online().Get() { + if !peer.Valid() || !nb.PeerIsReachable(ctx, peer) { return false } if allowList != nil && !allowList.Contains(peer.StableID()) { @@ -7367,6 +7371,10 @@ var ErrNoNetMap = errors.New("no network map, try again later") // the node’s [tailcfg.Location]. To be eligible for consideration, the node // must have NodeAttrSuggestExitNode in its CapMap. func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + nm := nb.NetMap() if nm == nil { return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap @@ -7386,7 +7394,7 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf if !p.Valid() { return false } - if !p.Online().Get() { + if !nb.PeerIsReachable(ctx, p) { return false } if allowed != nil && !allowed.Contains(p.StableID()) { diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 22e965fa6..3408d4cbb 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -362,6 +362,40 @@ func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { return peerAPIBase(nm, p) } +// PeerIsReachable reports whether the current node can reach p. If the ctx is +// done, this function may return a result based on stale reachability data. +func (nb *nodeBackend) PeerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + if !nb.SelfHasCap(tailcfg.NodeAttrClientSideReachability) { + // Legacy behavior is to always trust the control plane, which + // isn’t always correct because the peer could be slow to check + // in so that control marks it as offline. + // See tailscale/corp#32686. + return p.Online().Get() + } + + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() + + if self := nm.SelfNode; self.Valid() && self.ID() == p.ID() { + // This node can always reach itself. + return true + } + return nb.peerIsReachable(ctx, p) +} + +func (nb *nodeBackend) peerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + // TODO(sfllaw): The following does not actually test for client-side + // reachability. This would require a mechanism that tracks whether the + // current node can actually reach this peer, either because they are + // already communicating or because they can ping each other. + // + // Instead, it makes the client ignore p.Online completely. + // + // See tailscale/corp#32686. + return true +} + func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { for _, pfx := range n.Addresses().All() { if pfx.IsSingleIP() && pred(pfx.Addr()) { diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index b305837fd..f6698bd4b 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -9,7 +9,10 @@ import ( "testing" "time" + "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/netmap" + "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) @@ -122,3 +125,68 @@ func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { nb.Wait(context.Background()) } + +func TestNodeBackendReachability(t *testing.T) { + for _, tc := range []struct { + name string + + // Cap sets [tailcfg.NodeAttrClientSideReachability] on the self + // node. + // + // When disabled, the client relies on the control plane sending + // an accurate peer.Online flag. When enabled, the client + // ignores peer.Online and determines whether it can reach the + // peer node. + cap bool + + peer tailcfg.Node + want bool + }{ + { + name: "disabled/offline", + cap: false, + peer: tailcfg.Node{ + Online: ptr.To(false), + }, + want: false, + }, + { + name: "disabled/online", + cap: false, + peer: tailcfg.Node{ + Online: ptr.To(true), + }, + want: true, + }, + { + name: "enabled/offline", + cap: true, + peer: tailcfg.Node{ + Online: ptr.To(false), + }, + want: true, + }, + { + name: "enabled/online", + cap: true, + peer: tailcfg.Node{ + Online: ptr.To(true), + }, + want: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) + nb.netMap = &netmap.NetworkMap{} + if tc.cap { + nb.netMap.AllCaps.Make() + nb.netMap.AllCaps.Add(tailcfg.NodeAttrClientSideReachability) + } + + got := nb.PeerIsReachable(t.Context(), tc.peer.View()) + if got != tc.want { + t.Errorf("got %v, want %v", got, tc.want) + } + }) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 01ecc96b3..96e7fbbd9 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2692,6 +2692,12 @@ const ( // numbers, apostrophe, spaces, and hyphens. This may not be true for the default. // Values can look like "foo.com" or "Foo's Test Tailnet - Staging". NodeAttrTailnetDisplayName NodeCapability = "tailnet-display-name" + + // NodeAttrClientSideReachability configures the node to determine + // reachability itself when choosing connectors. When absent, the + // default behavior is to trust the control plane when it claims that a + // node is no longer online, but that is not a reliable signal. + NodeAttrClientSideReachability = "client-side-reachability" ) // SetDNSRequest is a request to add a DNS record. From 206d98e84be6cc309f3fbe9eb34844f0c7883a28 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 2 Oct 2025 18:29:54 -0700 Subject: [PATCH 1477/1708] control/controlclient: restore aggressive Direct.Close teardown In the earlier http2 package migration (1d93bdce20ddd2, #17394) I had removed Direct.Close's tracking of the connPool, thinking it wasn't necessary. Some tests (in another repo) are strict and like it to tear down the world and wait, to check for leaked goroutines. And they caught this letting some goroutines idle past Close, even if they'd eventually close down on their own. This restores the connPool accounting and the aggressife close. Updates #17305 Updates #17394 Change-Id: I5fed283a179ff7c3e2be104836bbe58b05130cc7 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 4 ++-- control/ts2021/client.go | 32 +++++++++++++++++++++++++++----- control/ts2021/conn.go | 14 ++++++++++++-- util/set/handle.go | 16 ++++++++++++---- 4 files changed, 53 insertions(+), 13 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index de577bea4..482affe33 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -64,7 +64,7 @@ import ( // Direct is the client that connects to a tailcontrol server for a node. type Direct struct { - httpc *http.Client // HTTP client used to talk to tailcontrol + httpc *http.Client // HTTP client used to do TLS requests to control (just https://controlplane.tailscale.com/key?v=123) interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial dialer *tsdial.Dialer dnsCache *dnscache.Resolver @@ -97,7 +97,7 @@ type Direct struct { serverNoiseKey key.MachinePublic sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. - noiseClient *ts2021.Client + noiseClient *ts2021.Client // also protected by mu persist persist.PersistView authKey string diff --git a/control/ts2021/client.go b/control/ts2021/client.go index 9a9a3ded8..e0b82b89c 100644 --- a/control/ts2021/client.go +++ b/control/ts2021/client.go @@ -28,6 +28,8 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/mak" + "tailscale.com/util/set" ) // Client provides a http.Client to connect to tailcontrol over @@ -44,8 +46,9 @@ type Client struct { httpsPort string // the fallback Noise-over-https port or empty if none // mu protects the following - mu sync.Mutex - closed bool + mu sync.Mutex + closed bool + connPool set.HandleSet[*Conn] // all live connections } // ClientOpts contains options for the [NewClient] function. All fields are @@ -175,9 +178,15 @@ func NewClient(opts ClientOpts) (*Client, error) { // It is a no-op and returns nil if the connection is already closed. func (nc *Client) Close() error { nc.mu.Lock() - defer nc.mu.Unlock() + live := nc.connPool nc.closed = true + nc.mu.Unlock() + + for _, c := range live { + c.Close() + } nc.Client.CloseIdleConnections() + return nil } @@ -249,18 +258,31 @@ func (nc *Client) dial(ctx context.Context) (*Conn, error) { return nil, err } - ncc := NewConn(clientConn.Conn) - nc.mu.Lock() + + handle := set.NewHandle() + ncc := NewConn(clientConn.Conn, func() { nc.noteConnClosed(handle) }) + mak.Set(&nc.connPool, handle, ncc) + if nc.closed { nc.mu.Unlock() ncc.Close() // Needs to be called without holding the lock. return nil, errors.New("noise client closed") } + defer nc.mu.Unlock() return ncc, nil } +// noteConnClosed notes that the *Conn with the given handle has closed and +// should be removed from the live connPool (which is usually of size 0 or 1, +// except perhaps briefly 2 during a network failure and reconnect). +func (nc *Client) noteConnClosed(handle set.Handle) { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.connPool.Delete(handle) +} + // post does a POST to the control server at the given path, JSON-encoding body. // The provided nodeKey is an optional load balancing hint. func (nc *Client) Post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { diff --git a/control/ts2021/conn.go b/control/ts2021/conn.go index ecf184d3c..52d663272 100644 --- a/control/ts2021/conn.go +++ b/control/ts2021/conn.go @@ -31,6 +31,7 @@ import ( type Conn struct { *controlbase.Conn + onClose func() // or nil readHeaderOnce sync.Once // guards init of reader field reader io.Reader // (effectively Conn.Reader after header) earlyPayloadReady chan struct{} // closed after earlyPayload is set (including set to nil) @@ -44,11 +45,12 @@ type Conn struct { // http2.ClientConn will be created that reads from the returned Conn. // // connID should be a unique ID for this connection. When the Conn is closed, -// the onClose function will be called with the connID if it is non-nil. -func NewConn(conn *controlbase.Conn) *Conn { +// the onClose function will be called if it is non-nil. +func NewConn(conn *controlbase.Conn, onClose func()) *Conn { return &Conn{ Conn: conn, earlyPayloadReady: make(chan struct{}), + onClose: sync.OnceFunc(onClose), } } @@ -103,6 +105,14 @@ func (c *Conn) Read(p []byte) (n int, err error) { return c.reader.Read(p) } +// Close closes the connection. +func (c *Conn) Close() error { + if c.onClose != nil { + defer c.onClose() + } + return c.Conn.Close() +} + // readHeader reads the optional "early payload" from the server that arrives // after the Noise handshake but before the HTTP/2 session begins. // diff --git a/util/set/handle.go b/util/set/handle.go index 471ceeba2..9c6b6dab0 100644 --- a/util/set/handle.go +++ b/util/set/handle.go @@ -9,20 +9,28 @@ package set type HandleSet[T any] map[Handle]T // Handle is an opaque comparable value that's used as the map key in a -// HandleSet. The only way to get one is to call HandleSet.Add. +// HandleSet. type Handle struct { v *byte } +// NewHandle returns a new handle value. +func NewHandle() Handle { + return Handle{new(byte)} +} + // Add adds the element (map value) e to the set. // -// It returns the handle (map key) with which e can be removed, using a map -// delete. +// It returns a new handle (map key) with which e can be removed, using a map +// delete or the [HandleSet.Delete] method. func (s *HandleSet[T]) Add(e T) Handle { - h := Handle{new(byte)} + h := NewHandle() if *s == nil { *s = make(HandleSet[T]) } (*s)[h] = e return h } + +// Delete removes the element with handle h from the set. +func (s HandleSet[T]) Delete(h Handle) { delete(s, h) } From 304dabce17cbde7698568c8144159c2b4f8ad9b1 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 3 Oct 2025 14:08:50 +0100 Subject: [PATCH 1478/1708] ipn/ipnauth: fix a null pointer panic in GetConnIdentity When running integration tests on macOS, we get a panic from a nil pointer dereference when calling `ci.creds.PID()`. This panic occurs because the `ci.creds != nil` check is insufficient after a recent refactoring (c45f881) that changed `ci.creds` from a pointer to the `PeerCreds` interface. Now `ci.creds` always compares as non-nil, so we enter this block even when the underlying value is nil. The integration tests fail on macOS when `peercred.Get()` returns the error `unix.GetsockoptInt: socket is not connected`. This error isn't new, and the previous code was ignoring it correctly. Since we trust that `peercred` returns either a usable value or an error, checking for a nil error is a sufficient and correct gate to prevent the method call and avoid the panic. Fixes #17421 Signed-off-by: Alex Chan --- ipn/ipnauth/ipnauth_unix_creds.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ipn/ipnauth/ipnauth_unix_creds.go b/ipn/ipnauth/ipnauth_unix_creds.go index 8ce2ac8a4..89a9ceaa9 100644 --- a/ipn/ipnauth/ipnauth_unix_creds.go +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -18,12 +18,13 @@ import ( func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { ci = &ConnIdentity{conn: c, notWindows: true} _, ci.isUnixSock = c.(*net.UnixConn) - if ci.creds, err = peercred.Get(c); ci.creds != nil { + if creds, err := peercred.Get(c); err == nil { + ci.creds = creds ci.pid, _ = ci.creds.PID() } else if err == peercred.ErrNotImplemented { // peercred.Get is not implemented on this OS (such as OpenBSD) // Just leave creds as nil, as documented. - } else if err != nil { + } else { return nil, err } return ci, nil From 8d4ea55cc126a0ca3f7aacb78bc9cdd1b3924d48 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 3 Oct 2025 17:19:38 +0100 Subject: [PATCH 1479/1708] cmd/k8s-proxy: switching to using ipn/store/kubestore (#17402) kubestore init function has now been moved to a more explicit path of ipn/store/kubestore meaning we can now avoid the generic import of feature/condregister. Updates #12614 Signed-off-by: chaosinthecrd --- cmd/k8s-proxy/k8s-proxy.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 57a2632e2..9b2bb6749 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -31,10 +31,12 @@ import ( "k8s.io/utils/strings/slices" "tailscale.com/client/local" "tailscale.com/cmd/k8s-proxy/internal/config" - _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" + + // we need to import this package so that the `kube:` ipn store gets registered + _ "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" healthz "tailscale.com/kube/health" From 59a39841c371ff03f8a52b7d7a6b0b2207b83d4f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 3 Oct 2025 12:48:22 +0100 Subject: [PATCH 1480/1708] tstest/integration: mark TestClientSideJailing as flaky Updates #17419 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index c274c31a9..481de57fd 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -828,6 +828,7 @@ func TestOneNodeUpWindowsStyle(t *testing.T) { // jailed node cannot initiate connections to the other node however the other // node can initiate connections to the jailed node. func TestClientSideJailing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17419") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) From f42be719de9ef38d1dc22ea48f590a01a227bfe5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 2 Oct 2025 22:04:12 -0700 Subject: [PATCH 1481/1708] all: use buildfeature constants in a few more places Saves 21 KB. Updates #12614 Change-Id: I0cd3e735937b0f5c0fcc9f09a24476b1c4ac9a15 Signed-off-by: Brad Fitzpatrick --- cmd/stund/depaware.txt | 2 +- envknob/envknob.go | 18 +++++++++++-- ipn/ipnauth/ipnauth.go | 8 ++++++ ipn/ipnauth/policy.go | 4 +++ ipn/ipnlocal/local.go | 60 ++++++++++++++++++++++++------------------ 5 files changed, 63 insertions(+), 29 deletions(-) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index a5e4b9ba3..5eadfc0d1 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -51,7 +51,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb - tailscale.com/feature/buildfeatures from tailscale.com/feature + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr diff --git a/envknob/envknob.go b/envknob/envknob.go index e581eb27e..9dea8f74d 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -32,6 +32,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" "tailscale.com/version" @@ -463,7 +464,12 @@ var allowRemoteUpdate = RegisterBool("TS_ALLOW_ADMIN_CONSOLE_REMOTE_UPDATE") // AllowsRemoteUpdate reports whether this node has opted-in to letting the // Tailscale control plane initiate a Tailscale update (e.g. on behalf of an // admin on the admin console). -func AllowsRemoteUpdate() bool { return allowRemoteUpdate() } +func AllowsRemoteUpdate() bool { + if !buildfeatures.HasClientUpdate { + return false + } + return allowRemoteUpdate() +} // SetNoLogsNoSupport enables no-logs-no-support mode. func SetNoLogsNoSupport() { @@ -474,6 +480,9 @@ func SetNoLogsNoSupport() { var notInInit atomic.Bool func assertNotInInit() { + if !buildfeatures.HasDebug { + return + } if notInInit.Load() { return } @@ -533,6 +542,11 @@ func ApplyDiskConfigError() error { return applyDiskConfigErr } // for App Store builds // - /etc/tailscale/tailscaled-env.txt for tailscaled-on-macOS (homebrew, etc) func ApplyDiskConfig() (err error) { + if runtime.GOOS == "linux" && !(buildfeatures.HasDebug || buildfeatures.HasSynology) { + // This function does nothing on Linux, unless you're + // using TS_DEBUG_ENV_FILE or are on Synology. + return nil + } var f *os.File defer func() { if err != nil { @@ -593,7 +607,7 @@ func getPlatformEnvFiles() []string { filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt"), } case "linux": - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return []string{"/etc/tailscale/tailscaled-env.txt"} } case "darwin": diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index 1395a39ae..497f30f8c 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -15,6 +15,7 @@ import ( "strconv" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/safesocket" "tailscale.com/types/logger" @@ -77,6 +78,13 @@ type ConnIdentity struct { // It's suitable for passing to LookupUserFromID (os/user.LookupId) on any // operating system. func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + // This function is only implemented on non-Windows for simulating + // Windows in tests. But that test (per comments below) is broken + // anyway. So disable this testing path in non-debug builds + // and just do the thing that optimizes away. + return "" + } if envknob.GOOS() != "windows" { return "" } diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index 42366dbd9..eeee32435 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -8,6 +8,7 @@ import ( "fmt" "tailscale.com/client/tailscale/apitype" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/syspolicy/pkey" @@ -51,6 +52,9 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { + if !buildfeatures.HasSystemPolicy { + return nil + } if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 199ee7248..7488a06a9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1135,7 +1135,7 @@ func (b *LocalBackend) Shutdown() { } func (b *LocalBackend) awaitNoGoroutinesInTest() { - if !testenv.InTest() { + if !buildfeatures.HasDebug || !testenv.InTest() { return } ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second) @@ -1836,6 +1836,9 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasSystemPolicy { + return false + } if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -5328,7 +5331,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC NetfilterKind: netfilterKind, } - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { // Issue 1995: we don't use iptables on Synology. rs.NetfilterMode = preftype.NetfilterOff } @@ -5339,7 +5342,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC // likely to break some functionality, but if the user expressed a // preference for routing remotely, we want to avoid leaking // traffic at the expense of functionality. - if prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid() { + if buildfeatures.HasUseExitNode && (prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid()) { var default4, default6 bool for _, route := range rs.Routes { switch route { @@ -5411,7 +5414,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.RoutableIPs = prefs.AdvertiseRoutes().AsSlice() hi.RequestTags = prefs.AdvertiseTags().AsSlice() hi.ShieldsUp = prefs.ShieldsUp() - hi.AllowsUpdate = envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true) + hi.AllowsUpdate = buildfeatures.HasClientUpdate && (envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true)) b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) @@ -6076,18 +6079,22 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.health.SetControlHealth(nil) } - if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { - b.capForcedNetfilter = "iptables" - } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { - b.capForcedNetfilter = "nftables" - } else { - b.capForcedNetfilter = "" // empty string means client can auto-detect + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { + b.capForcedNetfilter = "iptables" + } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { + b.capForcedNetfilter = "nftables" + } else { + b.capForcedNetfilter = "" // empty string means client can auto-detect + } } b.MagicConn().SetSilentDisco(b.ControlKnobs().SilentDisco.Load()) b.MagicConn().SetProbeUDPLifetime(b.ControlKnobs().ProbeUDPLifetime.Load()) - b.setDebugLogsByCapabilityLocked(nm) + if buildfeatures.HasDebug { + b.setDebugLogsByCapabilityLocked(nm) + } // See the netns package for documentation on what this capability does. netns.SetBindToInterfaceByRoute(nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) @@ -6104,25 +6111,26 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } } - if nm == nil { - // If there is no netmap, the client is going into a "turned off" - // state so reset the metrics. - b.metrics.approvedRoutes.Set(0) - return - } - - if nm.SelfNode.Valid() { - var approved float64 - for _, route := range nm.SelfNode.AllowedIPs().All() { - if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { - approved++ + if buildfeatures.HasAdvertiseRoutes { + if nm == nil { + // If there is no netmap, the client is going into a "turned off" + // state so reset the metrics. + b.metrics.approvedRoutes.Set(0) + } else if nm.SelfNode.Valid() { + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } } + b.metrics.approvedRoutes.Set(approved) } - b.metrics.approvedRoutes.Set(approved) } - if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { - f(b, nm) + if buildfeatures.HasDrive && nm != nil { + if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { + f(b, nm) + } } } From 9c3aec58badd142c2f8442aaaf38a7ae167ecae0 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 3 Oct 2025 16:29:50 -0700 Subject: [PATCH 1482/1708] ipn/ipnlocal: remove junk from suggestExitNodeUsingTrafficSteering (#17436) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch removes some code that didn’t get removed before merging the changes in #16580. Updates #cleanup Updates #16551 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7488a06a9..8cdb49876 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7397,7 +7397,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf panic("missing traffic-steering capability") } - var force tailcfg.NodeView nodes := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { if !p.Valid() { return false @@ -7416,9 +7415,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf } return true }) - if force.Valid() { - nodes = append(nodes[:0], force) - } scores := make(map[tailcfg.NodeID]int, len(nodes)) score := func(n tailcfg.NodeView) int { From 447cbdd1d0515858acf2fee0c20e8bbc7ac6359e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 11:05:37 -0700 Subject: [PATCH 1483/1708] health: make it omittable Saves 86 KB. And stop depending on expvar and usermetrics when disabled, in prep to removing all the expvar/metrics/tsweb stuff. Updates #12614 Change-Id: I35d2479ddd1d39b615bab32b1fa940ae8cbf9b11 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 6 +- cmd/tailscaled/depaware-minbox.txt | 6 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- .../buildfeatures/feature_health_disabled.go | 13 + .../buildfeatures/feature_health_enabled.go | 13 + feature/featuretags/featuretags.go | 1 + health/health.go | 86 ++-- health/health_test.go | 13 +- health/state.go | 3 +- health/usermetrics.go | 52 +++ health/usermetrics_omit.go | 8 + health/warnings.go | 388 ++++++++++-------- tsnet/depaware.txt | 2 +- wgengine/magicsock/magicsock.go | 2 +- 17 files changed, 370 insertions(+), 231 deletions(-) create mode 100644 feature/buildfeatures/feature_health_disabled.go create mode 100644 feature/buildfeatures/feature_health_enabled.go create mode 100644 health/usermetrics.go create mode 100644 health/usermetrics_omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index be9ac3a08..97eebf1d5 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -741,7 +741,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7b32fc2b4..81d5f3e0d 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -112,7 +112,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index ba35ecd4a..25594b124 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -76,7 +76,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -179,7 +179,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ - tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth tailscale.com/util/zstdframe from tailscale.com/control/controlclient @@ -324,7 +324,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/health+ + expvar from tailscale.com/metrics+ flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index e98c0da48..3829737e6 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -99,7 +99,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -206,7 +206,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ - tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth tailscale.com/util/zstdframe from tailscale.com/control/controlclient @@ -353,7 +353,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/health+ + expvar from tailscale.com/metrics+ flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 21e333af7..5f40d9417 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -321,7 +321,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index dfd338410..2563cb2fa 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -169,7 +169,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock diff --git a/feature/buildfeatures/feature_health_disabled.go b/feature/buildfeatures/feature_health_disabled.go new file mode 100644 index 000000000..2f2bcf240 --- /dev/null +++ b/feature/buildfeatures/feature_health_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = false diff --git a/feature/buildfeatures/feature_health_enabled.go b/feature/buildfeatures/feature_health_enabled.go new file mode 100644 index 000000000..00ce3684e --- /dev/null +++ b/feature/buildfeatures/feature_health_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index db7f2d272..041b68ec5 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -150,6 +150,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, + "health": {Sym: "Health", Desc: "Health checking support"}, "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, diff --git a/health/health.go b/health/health.go index c41256614..cbfa599c5 100644 --- a/health/health.go +++ b/health/health.go @@ -8,7 +8,6 @@ package health import ( "context" "errors" - "expvar" "fmt" "maps" "net/http" @@ -20,14 +19,13 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -132,12 +130,15 @@ type Tracker struct { lastLoginErr error localLogConfigErr error tlsConnectionErrors map[string]error // map[ServerName]error - metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] + metricHealthMessage any // nil or *metrics.MultiLabelMap[metricHealthMessageLabel] } // NewTracker contructs a new [Tracker] and attaches the given eventbus. // NewTracker will panic is no eventbus is given. func NewTracker(bus *eventbus.Bus) *Tracker { + if !buildfeatures.HasHealth { + return &Tracker{} + } if bus == nil { panic("no eventbus set") } @@ -221,6 +222,9 @@ const legacyErrorArgKey = "LegacyError" // temporarily (2024-06-14) while we migrate the old health infrastructure based // on Subsystems to the new Warnables architecture. func (s Subsystem) Warnable() *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } w, ok := subsystemsWarnables[s] if !ok { panic(fmt.Sprintf("health: no Warnable for Subsystem %q", s)) @@ -230,10 +234,15 @@ func (s Subsystem) Warnable() *Warnable { var registeredWarnables = map[WarnableCode]*Warnable{} +var noopWarnable Warnable + // Register registers a new Warnable with the health package and returns it. // Register panics if the Warnable was already registered, because Warnables // should be unique across the program. func Register(w *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } if registeredWarnables[w.Code] != nil { panic(fmt.Sprintf("health: a Warnable with code %q was already registered", w.Code)) } @@ -245,6 +254,9 @@ func Register(w *Warnable) *Warnable { // unregister removes a Warnable from the health package. It should only be used // for testing purposes. func unregister(w *Warnable) { + if !buildfeatures.HasHealth { + return + } if registeredWarnables[w.Code] == nil { panic(fmt.Sprintf("health: attempting to unregister Warnable %q that was not registered", w.Code)) } @@ -317,6 +329,9 @@ func StaticMessage(s string) func(Args) string { // some lost Tracker plumbing, we want to capture stack trace // samples when it occurs. func (t *Tracker) nil() bool { + if !buildfeatures.HasHealth { + return true + } if t != nil { return false } @@ -385,37 +400,10 @@ func (w *Warnable) IsVisible(ws *warningState, clockNow func() time.Time) bool { return clockNow().Sub(ws.BrokenSince) >= w.TimeToVisible } -// SetMetricsRegistry sets up the metrics for the Tracker. It takes -// a usermetric.Registry and registers the metrics there. -func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { - if reg == nil || t.metricHealthMessage != nil { - return - } - - t.metricHealthMessage = usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( - reg, - "tailscaled_health_messages", - "gauge", - "Number of health messages broken down by type.", - ) - - t.metricHealthMessage.Set(metricHealthMessageLabel{ - Type: MetricLabelWarning, - }, expvar.Func(func() any { - if t.nil() { - return 0 - } - t.mu.Lock() - defer t.mu.Unlock() - t.updateBuiltinWarnablesLocked() - return int64(len(t.stringsLocked())) - })) -} - // IsUnhealthy reports whether the current state is unhealthy because the given // warnable is set. func (t *Tracker) IsUnhealthy(w *Warnable) bool { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return false } t.mu.Lock() @@ -429,7 +417,7 @@ func (t *Tracker) IsUnhealthy(w *Warnable) bool { // SetUnhealthy takes ownership of args. The args can be nil if no additional information is // needed for the unhealthy state. func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -438,7 +426,7 @@ func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { } func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { - if w == nil { + if !buildfeatures.HasHealth || w == nil { return } @@ -489,7 +477,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // SetHealthy removes any warningState for the given Warnable. func (t *Tracker) SetHealthy(w *Warnable) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -498,7 +486,7 @@ func (t *Tracker) SetHealthy(w *Warnable) { } func (t *Tracker) setHealthyLocked(w *Warnable) { - if t.warnableVal[w] == nil { + if !buildfeatures.HasHealth || t.warnableVal[w] == nil { // Nothing to remove return } @@ -1009,7 +997,7 @@ func (t *Tracker) OverallError() error { // each Warning to show a localized version of them instead. This function is // here for legacy compatibility purposes and is deprecated. func (t *Tracker) Strings() []string { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return nil } t.mu.Lock() @@ -1018,6 +1006,9 @@ func (t *Tracker) Strings() []string { } func (t *Tracker) stringsLocked() []string { + if !buildfeatures.HasHealth { + return nil + } result := []string{} for w, ws := range t.warnableVal { if !w.IsVisible(ws, t.now) { @@ -1078,6 +1069,9 @@ var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") // updateBuiltinWarnablesLocked performs a number of checks on the state of the backend, // and adds/removes Warnings from the Tracker as needed. func (t *Tracker) updateBuiltinWarnablesLocked() { + if !buildfeatures.HasHealth { + return + } t.updateWarmingUpWarnableLocked() if w, show := t.showUpdateWarnable(); show { @@ -1316,11 +1310,17 @@ func (s *ReceiveFuncStats) Name() string { } func (s *ReceiveFuncStats) Enter() { + if !buildfeatures.HasHealth { + return + } s.numCalls.Add(1) s.inCall.Store(true) } func (s *ReceiveFuncStats) Exit() { + if !buildfeatures.HasHealth { + return + } s.inCall.Store(false) } @@ -1329,7 +1329,7 @@ func (s *ReceiveFuncStats) Exit() { // // If t is nil, it returns nil. func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { - if t == nil { + if !buildfeatures.HasHealth || t == nil { return nil } t.initOnce.Do(t.doOnceInit) @@ -1337,6 +1337,9 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { } func (t *Tracker) doOnceInit() { + if !buildfeatures.HasHealth { + return + } for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] f.name = (ReceiveFunc(i)).String() @@ -1385,10 +1388,3 @@ func (t *Tracker) LastNoiseDialWasRecent() bool { t.lastNoiseDial = now return dur < 2*time.Minute } - -const MetricLabelWarning = "warning" - -type metricHealthMessageLabel struct { - // TODO: break down by warnable.severity as well? - Type string -} diff --git a/health/health_test.go b/health/health_test.go index 3ada37755..3b5ebbb38 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/metrics" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -497,7 +498,11 @@ func TestHealthMetric(t *testing.T) { tr.applyUpdates = tt.apply tr.latestVersion = tt.cv tr.SetMetricsRegistry(&usermetric.Registry{}) - if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { + m, ok := tr.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + if val := m.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) } for _, w := range tr.CurrentState().Warnings { @@ -634,7 +639,11 @@ func TestControlHealth(t *testing.T) { var r usermetric.Registry ht.SetMetricsRegistry(&r) - got := ht.metricHealthMessage.Get(metricHealthMessageLabel{ + m, ok := ht.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + got := m.Get(metricHealthMessageLabel{ Type: MetricLabelWarning, }).String() want := strconv.Itoa( diff --git a/health/state.go b/health/state.go index 2efff92b1..e6d937b6a 100644 --- a/health/state.go +++ b/health/state.go @@ -9,6 +9,7 @@ import ( "encoding/json" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" ) @@ -120,7 +121,7 @@ func (w *Warnable) unhealthyState(ws *warningState) *UnhealthyState { // The returned State is a snapshot of shared memory, and the caller should not // mutate the returned value. func (t *Tracker) CurrentState() *State { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return &State{} } diff --git a/health/usermetrics.go b/health/usermetrics.go new file mode 100644 index 000000000..110c57b57 --- /dev/null +++ b/health/usermetrics.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_health && !ts_omit_usermetrics + +package health + +import ( + "expvar" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/util/usermetric" +) + +const MetricLabelWarning = "warning" + +type metricHealthMessageLabel struct { + // TODO: break down by warnable.severity as well? + Type string +} + +// SetMetricsRegistry sets up the metrics for the Tracker. It takes +// a usermetric.Registry and registers the metrics there. +func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { + if !buildfeatures.HasHealth { + return + } + + if reg == nil || t.metricHealthMessage != nil { + return + } + + m := usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( + reg, + "tailscaled_health_messages", + "gauge", + "Number of health messages broken down by type.", + ) + + m.Set(metricHealthMessageLabel{ + Type: MetricLabelWarning, + }, expvar.Func(func() any { + if t.nil() { + return 0 + } + t.mu.Lock() + defer t.mu.Unlock() + t.updateBuiltinWarnablesLocked() + return int64(len(t.stringsLocked())) + })) + t.metricHealthMessage = m +} diff --git a/health/usermetrics_omit.go b/health/usermetrics_omit.go new file mode 100644 index 000000000..9d5e35b86 --- /dev/null +++ b/health/usermetrics_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_health || ts_omit_usermetrics + +package health + +func (t *Tracker) SetMetricsRegistry(any) {} diff --git a/health/warnings.go b/health/warnings.go index 3997e66b3..26577130d 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -8,234 +8,278 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/version" ) +func condRegister(f func() *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return nil + } + return f() +} + /** This file contains definitions for the Warnables maintained within this `health` package. */ // updateAvailableWarnable is a Warnable that warns the user that an update is available. -var updateAvailableWarnable = Register(&Warnable{ - Code: "update-available", - Title: "Update available", - Severity: SeverityLow, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var updateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "update-available", + Title: "Update available", + Severity: SeverityLow, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // securityUpdateAvailableWarnable is a Warnable that warns the user that an important security update is available. -var securityUpdateAvailableWarnable = Register(&Warnable{ - Code: "security-update-available", - Title: "Security update available", - Severity: SeverityMedium, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var securityUpdateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "security-update-available", + Title: "Security update available", + Severity: SeverityMedium, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // unstableWarnable is a Warnable that warns the user that they are using an unstable version of Tailscale // so they won't be surprised by all the issues that may arise. -var unstableWarnable = Register(&Warnable{ - Code: "is-using-unstable-version", - Title: "Using an unstable version", - Severity: SeverityLow, - Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), +var unstableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "is-using-unstable-version", + Title: "Using an unstable version", + Severity: SeverityLow, + Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), + } }) // NetworkStatusWarnable is a Warnable that warns the user that the network is down. -var NetworkStatusWarnable = Register(&Warnable{ - Code: "network-status", - Title: "Network down", - Severity: SeverityMedium, - Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 5 * time.Second, +var NetworkStatusWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "network-status", + Title: "Network down", + Severity: SeverityMedium, + Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 5 * time.Second, + } }) // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. -var IPNStateWarnable = Register(&Warnable{ - Code: "wantrunning-false", - Title: "Tailscale off", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is stopped."), +var IPNStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "wantrunning-false", + Title: "Tailscale off", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is stopped."), + } }) // localLogWarnable is a Warnable that warns the user that the local log is misconfigured. -var localLogWarnable = Register(&Warnable{ - Code: "local-log-config-error", - Title: "Local log misconfiguration", - Severity: SeverityLow, - Text: func(args Args) string { - return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) - }, +var localLogWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "local-log-config-error", + Title: "Local log misconfiguration", + Severity: SeverityLow, + Text: func(args Args) string { + return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) + }, + } }) // LoginStateWarnable is a Warnable that warns the user that they are logged out, // and provides the last login error if available. -var LoginStateWarnable = Register(&Warnable{ - Code: "login-state", - Title: "Logged out", - Severity: SeverityMedium, - Text: func(args Args) string { - if args[ArgError] != "" { - return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) - } else { - return "You are logged out." - } - }, - DependsOn: []*Warnable{IPNStateWarnable}, +var LoginStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "login-state", + Title: "Logged out", + Severity: SeverityMedium, + Text: func(args Args) string { + if args[ArgError] != "" { + return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) + } else { + return "You are logged out." + } + }, + DependsOn: []*Warnable{IPNStateWarnable}, + } }) // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. -var notInMapPollWarnable = Register(&Warnable{ - Code: "not-in-map-poll", - Title: "Out of sync", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), - // 8 minutes reflects a maximum maintenance window for the coordination server. - TimeToVisible: 8 * time.Minute, +var notInMapPollWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "not-in-map-poll", + Title: "Out of sync", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), + // 8 minutes reflects a maximum maintenance window for the coordination server. + TimeToVisible: 8 * time.Minute, + } }) // noDERPHomeWarnable is a Warnable that warns the user that Tailscale doesn't have a home DERP. -var noDERPHomeWarnable = Register(&Warnable{ - Code: "no-derp-home", - Title: "No home relay server", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPHomeWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "no-derp-home", + Title: "No home relay server", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. -var noDERPConnectionWarnable = Register(&Warnable{ - Code: "no-derp-connection", - Title: "Relay server unavailable", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - - // Technically noDERPConnectionWarnable could be used to warn about - // failure to connect to a specific DERP server (e.g. your home is derp1 - // but you're trying to connect to a peer's derp4 and are unable) but as - // of 2024-09-25 we only use this for connecting to your home DERP, so - // we depend on noDERPHomeWarnable which is the ability to figure out - // what your DERP home even is. - noDERPHomeWarnable, - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) - } else { - return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) - } - }, - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPConnectionWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "no-derp-connection", + Title: "Relay server unavailable", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + + // Technically noDERPConnectionWarnable could be used to warn about + // failure to connect to a specific DERP server (e.g. your home is derp1 + // but you're trying to connect to a peer's derp4 and are unable) but as + // of 2024-09-25 we only use this for connecting to your home DERP, so + // we depend on noDERPHomeWarnable which is the ability to figure out + // what your DERP home even is. + noDERPHomeWarnable, + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) + } else { + return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) + } + }, + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // derpTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't // heard from the home DERP region for a while. -var derpTimeoutWarnable = Register(&Warnable{ - Code: "derp-timed-out", - Title: "Relay server timed out", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected - noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) - } else { - return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) - } - }, +var derpTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "derp-timed-out", + Title: "Relay server timed out", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected + noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) + } else { + return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) + } + }, + } }) // derpRegionErrorWarnable is a Warnable that warns the user that a DERP region is reporting an issue. -var derpRegionErrorWarnable = Register(&Warnable{ - Code: "derp-region-error", - Title: "Relay server error", - Severity: SeverityLow, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) - }, +var derpRegionErrorWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "derp-region-error", + Title: "Relay server error", + Severity: SeverityLow, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) + }, + } }) // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. -var noUDP4BindWarnable = Register(&Warnable{ - Code: "no-udp4-bind", - Title: "NAT traversal setup failure", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), - ImpactsConnectivity: true, +var noUDP4BindWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "no-udp4-bind", + Title: "NAT traversal setup failure", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), + ImpactsConnectivity: true, + } }) // mapResponseTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't received a network map from the coordination server in a while. -var mapResponseTimeoutWarnable = Register(&Warnable{ - Code: "mapresponse-timeout", - Title: "Network map response timeout", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) - }, +var mapResponseTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "mapresponse-timeout", + Title: "Network map response timeout", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) + }, + } }) // tlsConnectionFailedWarnable is a Warnable that warns the user that Tailscale could not establish an encrypted connection with a server. -var tlsConnectionFailedWarnable = Register(&Warnable{ - Code: "tls-connection-failed", - Title: "Encrypted connection failed", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) - }, +var tlsConnectionFailedWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "tls-connection-failed", + Title: "Encrypted connection failed", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) + }, + } }) // magicsockReceiveFuncWarnable is a Warnable that warns the user that one of the Magicsock functions is not running. -var magicsockReceiveFuncWarnable = Register(&Warnable{ - Code: "magicsock-receive-func-error", - Title: "MagicSock function not running", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) - }, +var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "magicsock-receive-func-error", + Title: "MagicSock function not running", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) + }, + } }) // testWarnable is a Warnable that is used within this package for testing purposes only. -var testWarnable = Register(&Warnable{ - Code: "test-warnable", - Title: "Test warnable", - Severity: SeverityLow, - Text: func(args Args) string { - return args[ArgError] - }, +var testWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "test-warnable", + Title: "Test warnable", + Severity: SeverityLow, + Text: func(args Args) string { + return args[ArgError] + }, + } }) // applyDiskConfigWarnable is a Warnable that warns the user that there was an error applying the envknob config stored on disk. -var applyDiskConfigWarnable = Register(&Warnable{ - Code: "apply-disk-config", - Title: "Could not apply configuration", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) - }, +var applyDiskConfigWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "apply-disk-config", + Title: "Could not apply configuration", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) + }, + } }) // warmingUpWarnableDuration is the duration for which the warmingUpWarnable is reported by the backend after the user @@ -245,9 +289,11 @@ const warmingUpWarnableDuration = 5 * time.Second // warmingUpWarnable is a Warnable that is reported by the backend when it is starting up, for a maximum time of // warmingUpWarnableDuration. The GUIs use the presence of this Warnable to prevent showing any other warnings until // the backend is fully started. -var warmingUpWarnable = Register(&Warnable{ - Code: "warming-up", - Title: "Tailscale is starting", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is starting. Please wait."), +var warmingUpWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "warming-up", + Title: "Tailscale is starting", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is starting. Please wait."), + } }) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 739d0b33b..f8d7bf7a8 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -165,7 +165,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e3cf249c5..b6cb7b336 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1710,7 +1710,7 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu var epCache epAddrEndpointCache return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (_ int, retErr error) { - if healthItem != nil { + if buildfeatures.HasHealth && healthItem != nil { healthItem.Enter() defer healthItem.Exit() defer func() { From 141eb64d3fe2d00c47ca6a77372e84d265e15edd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 13:31:49 -0700 Subject: [PATCH 1484/1708] wgengine/router/osrouter: fix data race in magicsock port update callback As found by @cmol in #17423. Updates #17423 Change-Id: I1492501f74ca7b57a8c5278ea6cb87a56a4086b9 Signed-off-by: Brad Fitzpatrick --- wgengine/router/osrouter/router_linux.go | 36 +++++++++++++----------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 1f825b917..cf1a9f027 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -86,8 +86,8 @@ type linuxRouter struct { cmd commandRunner nfr linuxfw.NetfilterRunner - magicsockPortV4 uint16 - magicsockPortV6 uint16 + magicsockPortV4 atomic.Uint32 // actually a uint16 + magicsockPortV6 atomic.Uint32 // actually a uint16 } func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { @@ -546,7 +546,7 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { } } - var magicsockPort *uint16 + var magicsockPort *atomic.Uint32 switch network { case "udp4": magicsockPort = &r.magicsockPortV4 @@ -566,27 +566,29 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { // set the port, we'll make the firewall rule when netfilter turns back on if r.netfilterMode == netfilterOff { - *magicsockPort = port + magicsockPort.Store(uint32(port)) return nil } - if *magicsockPort == port { + cur := magicsockPort.Load() + + if cur == uint32(port) { return nil } - if *magicsockPort != 0 { - if err := r.nfr.DelMagicsockPortRule(*magicsockPort, network); err != nil { + if cur != 0 { + if err := r.nfr.DelMagicsockPortRule(uint16(cur), network); err != nil { return fmt.Errorf("del magicsock port rule: %w", err) } } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { + if err := r.nfr.AddMagicsockPortRule(uint16(port), network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } - *magicsockPort = port + magicsockPort.Store(uint32(port)) return nil } @@ -658,13 +660,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if r.magicsockPortV4 != 0 { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { + if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { + if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { + if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } @@ -698,13 +700,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if r.magicsockPortV4 != 0 { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { + if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { + if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { + if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } From 223ced84b571df1e2045d3977459374bc43f5515 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 17:32:17 -0700 Subject: [PATCH 1485/1708] feature/ace: make ACE modular Updates #12614 Change-Id: Iaee75d8831c4ba5c9705d7877bb78044424c6da1 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 5 ++-- cmd/tsidp/depaware.txt | 1 - control/controlhttp/client.go | 16 +++++++----- feature/ace/ace.go | 25 +++++++++++++++++++ feature/buildfeatures/feature_ace_disabled.go | 13 ++++++++++ feature/buildfeatures/feature_ace_enabled.go | 13 ++++++++++ feature/condregister/maybe_ace.go | 8 ++++++ feature/featuretags/featuretags.go | 1 + net/ace/ace.go | 2 ++ tsnet/depaware.txt | 1 - 14 files changed, 77 insertions(+), 14 deletions(-) create mode 100644 feature/ace/ace.go create mode 100644 feature/buildfeatures/feature_ace_disabled.go create mode 100644 feature/buildfeatures/feature_ace_enabled.go create mode 100644 feature/condregister/maybe_ace.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 97eebf1d5..85d912fab 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -742,7 +742,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 81d5f3e0d..b0b4359e4 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -113,7 +113,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/tsweb+ - tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 25594b124..626fe5acd 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -77,7 +77,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3829737e6..37909089d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -100,7 +100,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5f40d9417..ff3cc5c10 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -252,7 +252,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/ts2021 + tailscale.com/control/controlhttp from tailscale.com/control/ts2021+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -272,6 +272,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/ace from tailscale.com/feature/condregister tailscale.com/feature/appconnectors from tailscale.com/feature/condregister tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister @@ -322,7 +323,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/ace from tailscale.com/feature/ace tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 2563cb2fa..47c8086c5 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -170,7 +170,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index f1ee7a6f9..06a2131fd 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -42,7 +42,6 @@ import ( "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" - "tailscale.com/net/ace" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/netutil" @@ -395,6 +394,8 @@ var macOSScreenTime = health.Register(&health.Warnable{ ImpactsConnectivity: true, }) +var HookMakeACEDialer feature.Hook[func(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc] + // tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to @@ -424,11 +425,14 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad } if optACEHost != "" { - dialer = (&ace.Dialer{ - ACEHost: optACEHost, - ACEHostIP: optAddr, // may be zero - NetDialer: dialer, - }).Dial + if !buildfeatures.HasACE { + return nil, feature.ErrUnavailable + } + f, ok := HookMakeACEDialer.GetOk() + if !ok { + return nil, feature.ErrUnavailable + } + dialer = f(dialer, optACEHost, optAddr) } // On macOS, see if Screen Time is blocking things. diff --git a/feature/ace/ace.go b/feature/ace/ace.go new file mode 100644 index 000000000..b6d36543c --- /dev/null +++ b/feature/ace/ace.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace registers support for Alternate Connectivity Endpoints (ACE). +package ace + +import ( + "net/netip" + + "tailscale.com/control/controlhttp" + "tailscale.com/net/ace" + "tailscale.com/net/netx" +) + +func init() { + controlhttp.HookMakeACEDialer.Set(mkDialer) +} + +func mkDialer(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc { + return (&ace.Dialer{ + ACEHost: aceHost, + ACEHostIP: optIP, // may be zero + NetDialer: dialer, + }).Dial +} diff --git a/feature/buildfeatures/feature_ace_disabled.go b/feature/buildfeatures/feature_ace_disabled.go new file mode 100644 index 000000000..b4808d497 --- /dev/null +++ b/feature/buildfeatures/feature_ace_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = false diff --git a/feature/buildfeatures/feature_ace_enabled.go b/feature/buildfeatures/feature_ace_enabled.go new file mode 100644 index 000000000..4812f9a61 --- /dev/null +++ b/feature/buildfeatures/feature_ace_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = true diff --git a/feature/condregister/maybe_ace.go b/feature/condregister/maybe_ace.go new file mode 100644 index 000000000..070231711 --- /dev/null +++ b/feature/condregister/maybe_ace.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_ace + +package condregister + +import _ "tailscale.com/feature/ace" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 041b68ec5..5884d48d5 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -93,6 +93,7 @@ type FeatureMeta struct { // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ + "ace": {Sym: "ACE", Desc: "Alternate Connectivity Endpoints"}, "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, "aws": {Sym: "AWS", Desc: "AWS integration"}, diff --git a/net/ace/ace.go b/net/ace/ace.go index 1bb64d64d..47e780313 100644 --- a/net/ace/ace.go +++ b/net/ace/ace.go @@ -28,6 +28,8 @@ type Dialer struct { ACEHostIP netip.Addr // optional; if non-zero, use this IP instead of DNS ACEPort int // zero means 443 + // NetDialer optionally specifies the underlying dialer to use to reach the + // ACEHost. If nil, net.Dialer.DialContext is used. NetDialer func(ctx context.Context, network, address string) (net.Conn, error) } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f8d7bf7a8..5df2c41b9 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -166,7 +166,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ From db65f3fcf87035c64b810a01bba60745e48b2444 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 17:42:40 -0700 Subject: [PATCH 1486/1708] ipn/ipnlocal: use buildfeature consts in a few more places Updates #12614 Change-Id: I561d434d9829172a3d7f6933399237924ff80490 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 15 +++++++++++---- tailcfg/tailcfg.go | 4 ++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8cdb49876..9a47b7cb1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5416,7 +5416,9 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.ShieldsUp = prefs.ShieldsUp() hi.AllowsUpdate = buildfeatures.HasClientUpdate && (envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true)) - b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + if buildfeatures.HasAdvertiseRoutes { + b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + } var sshHostKeys []string if buildfeatures.HasSSH && prefs.RunSSH() && envknob.CanSSHD() { @@ -5445,7 +5447,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // DNS records are needed, so we can save bandwidth and not send // WireIngress. hi.WireIngress = b.shouldWireInactiveIngressLocked() - hi.AppConnector.Set(prefs.AppConnector().Advertise) + + if buildfeatures.HasAppConnectors { + hi.AppConnector.Set(prefs.AppConnector().Advertise) + } // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node // was selected, if any. @@ -5461,8 +5466,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // ExitNodeID here; [LocalBackend.ResolveExitNode] will be called once // the netmap and/or net report have been received to both pick the exit // node and notify control of the change. - if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { - hi.ExitNodeID = prefs.ExitNodeID() + if buildfeatures.HasUseExitNode { + if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { + hi.ExitNodeID = prefs.ExitNodeID() + } } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 96e7fbbd9..da53b777e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -1088,6 +1089,9 @@ func (ni *NetInfo) String() string { } func (ni *NetInfo) portMapSummary() string { + if !buildfeatures.HasPortMapper { + return "x" + } if !ni.HavePortMap && ni.UPnP == "" && ni.PMP == "" && ni.PCP == "" { return "?" } From 2e381557b856f4a8969e6a4b3f1104b77830c3e7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 09:18:57 -0700 Subject: [PATCH 1487/1708] feature/c2n: move answerC2N code + deps out of control/controlclient c2n was already a conditional feature, but it didn't have a feature/c2n directory before (rather, it was using consts + DCE). This adds it, and moves some code, which removes the httprec dependency. Also, remove some unnecessary code from our httprec fork. Updates #12614 Change-Id: I2fbe538e09794c517038e35a694a363312c426a2 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware-min.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 3 +- cmd/tailscaled/depaware.txt | 5 ++- cmd/tsidp/depaware.txt | 5 ++- control/controlclient/direct.go | 58 +++---------------------- feature/c2n/c2n.go | 70 ++++++++++++++++++++++++++++++ feature/condregister/maybe_c2n.go | 8 ++++ tempfork/httprec/httprec.go | 38 ---------------- tsnet/depaware.txt | 5 ++- tsnet/tsnet.go | 1 + 11 files changed, 99 insertions(+), 100 deletions(-) create mode 100644 feature/c2n/c2n.go create mode 100644 feature/condregister/maybe_c2n.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 85d912fab..6e2a83e3c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -700,6 +700,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet @@ -791,7 +792,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 626fe5acd..9210b4377 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -117,7 +117,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/control/controlclient+ tailscale.com/tsconst from tailscale.com/net/netns tailscale.com/tsd from tailscale.com/cmd/tailscaled+ @@ -211,7 +210,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts golang.org/x/net/internal/iana from golang.org/x/net/icmp+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 37909089d..b183609f3 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -142,7 +142,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/control/controlclient+ tailscale.com/tsconst from tailscale.com/net/netns+ @@ -239,7 +238,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index ff3cc5c10..3e0930fcb 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -275,6 +275,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/ace from tailscale.com/feature/condregister tailscale.com/feature/appconnectors from tailscale.com/feature/condregister tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/feature/condregister tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/clientupdate from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled @@ -379,7 +380,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ @@ -502,7 +503,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 47c8086c5..2a87061e4 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -142,6 +142,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet @@ -218,7 +219,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ @@ -334,7 +335,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 482affe33..922355369 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -4,7 +4,6 @@ package controlclient import ( - "bufio" "bytes" "cmp" "context" @@ -44,7 +43,6 @@ import ( "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" - "tailscale.com/tempfork/httprec" "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/key" @@ -1389,6 +1387,10 @@ func (c *Direct) isUniquePingRequest(pr *tailcfg.PingRequest) bool { return true } +// HookAnswerC2NPing is where feature/c2n conditionally registers support +// for handling C2N (control-to-node) HTTP requests. +var HookAnswerC2NPing feature.Hook[func(logger.Logf, http.Handler, *http.Client, *tailcfg.PingRequest)] + func (c *Direct) answerPing(pr *tailcfg.PingRequest) { httpc := c.httpc useNoise := pr.URLIsNoise || pr.Types == "c2n" @@ -1416,7 +1418,9 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { c.logf("refusing to answer c2n ping without noise") return } - answerC2NPing(c.logf, c.c2nHandler, httpc, pr) + if f, ok := HookAnswerC2NPing.GetOk(); ok { + f(c.logf, c.c2nHandler, httpc, pr) + } return } for _, t := range strings.Split(pr.Types, ",") { @@ -1451,54 +1455,6 @@ func answerHeadPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest) { } } -func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { - if c2nHandler == nil { - logf("answerC2NPing: c2nHandler not defined") - return - } - hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) - if err != nil { - logf("answerC2NPing: ReadRequest: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) - } - handlerTimeout := time.Minute - if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { - handlerTimeout, _ = time.ParseDuration(v) - } - handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) - defer cancel() - hreq = hreq.WithContext(handlerCtx) - rec := httprec.NewRecorder() - c2nHandler.ServeHTTP(rec, hreq) - cancel() - - c2nResBuf := new(bytes.Buffer) - rec.Result().Write(c2nResBuf) - - replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) - if err != nil { - logf("answerC2NPing: NewRequestWithContext: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: sending POST ping to %v ...", pr.URL) - } - t0 := clock.Now() - _, err = c.Do(req) - d := time.Since(t0).Round(time.Millisecond) - if err != nil { - logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) - } else if pr.Log { - logf("answerC2NPing complete to %v (after %v)", pr.URL, d) - } -} - // sleepAsRequest implements the sleep for a tailcfg.Debug message requesting // that the client sleep. The complication is that while we're sleeping (if for // a long time), we need to periodically reset the watchdog timer before it diff --git a/feature/c2n/c2n.go b/feature/c2n/c2n.go new file mode 100644 index 000000000..ae942e31d --- /dev/null +++ b/feature/c2n/c2n.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package c2n registers support for C2N (Control-to-Node) communications. +package c2n + +import ( + "bufio" + "bytes" + "context" + "net/http" + "time" + + "tailscale.com/control/controlclient" + "tailscale.com/tailcfg" + "tailscale.com/tempfork/httprec" + "tailscale.com/types/logger" +) + +func init() { + controlclient.HookAnswerC2NPing.Set(answerC2NPing) +} + +func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { + if c2nHandler == nil { + logf("answerC2NPing: c2nHandler not defined") + return + } + hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) + if err != nil { + logf("answerC2NPing: ReadRequest: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) + } + handlerTimeout := time.Minute + if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { + handlerTimeout, _ = time.ParseDuration(v) + } + handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) + defer cancel() + hreq = hreq.WithContext(handlerCtx) + rec := httprec.NewRecorder() + c2nHandler.ServeHTTP(rec, hreq) + cancel() + + c2nResBuf := new(bytes.Buffer) + rec.Result().Write(c2nResBuf) + + replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) + if err != nil { + logf("answerC2NPing: NewRequestWithContext: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: sending POST ping to %v ...", pr.URL) + } + t0 := time.Now() + _, err = c.Do(req) + d := time.Since(t0).Round(time.Millisecond) + if err != nil { + logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) + } else if pr.Log { + logf("answerC2NPing complete to %v (after %v)", pr.URL, d) + } +} diff --git a/feature/condregister/maybe_c2n.go b/feature/condregister/maybe_c2n.go new file mode 100644 index 000000000..c222af533 --- /dev/null +++ b/feature/condregister/maybe_c2n.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_c2n + +package condregister + +import _ "tailscale.com/feature/c2n" diff --git a/tempfork/httprec/httprec.go b/tempfork/httprec/httprec.go index 13786aaf6..07ca673fe 100644 --- a/tempfork/httprec/httprec.go +++ b/tempfork/httprec/httprec.go @@ -14,9 +14,6 @@ import ( "net/http" "net/textproto" "strconv" - "strings" - - "golang.org/x/net/http/httpguts" ) // ResponseRecorder is an implementation of [http.ResponseWriter] that @@ -59,10 +56,6 @@ func NewRecorder() *ResponseRecorder { } } -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder]. -const DefaultRemoteAddr = "1.2.3.4" - // Header implements [http.ResponseWriter]. It returns the response // headers to mutate within a handler. To test the headers that were // written after a handler completes, use the [ResponseRecorder.Result] method and see @@ -206,37 +199,6 @@ func (rw *ResponseRecorder) Result() *http.Response { res.Body = http.NoBody } res.ContentLength = parseContentLength(res.Header.Get("Content-Length")) - - if trailers, ok := rw.snapHeader["Trailer"]; ok { - res.Trailer = make(http.Header, len(trailers)) - for _, k := range trailers { - for _, k := range strings.Split(k, ",") { - k = http.CanonicalHeaderKey(textproto.TrimString(k)) - if !httpguts.ValidTrailerHeader(k) { - // Ignore since forbidden by RFC 7230, section 4.1.2. - continue - } - vv, ok := rw.HeaderMap[k] - if !ok { - continue - } - vv2 := make([]string, len(vv)) - copy(vv2, vv) - res.Trailer[k] = vv2 - } - } - } - for k, vv := range rw.HeaderMap { - if !strings.HasPrefix(k, http.TrailerPrefix) { - continue - } - if res.Trailer == nil { - res.Trailer = make(http.Header) - } - for _, v := range vv { - res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v) - } - } return res } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 5df2c41b9..6e6ea4050 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -138,6 +138,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet @@ -214,7 +215,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ @@ -327,7 +328,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 890193d0b..2944f6359 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,6 +29,7 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/c2n" _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" _ "tailscale.com/feature/condregister/useproxy" From 3c7e35167133003531d217e9597fd9e6477fc3d3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 11:43:17 -0700 Subject: [PATCH 1488/1708] net/connstats: make it modular (omittable) Saves only 12 KB, but notably removes some deps on packages that future changes can then eliminate entirely. Updates #12614 Change-Id: Ibf830d3ee08f621d0a2011b1d4cd175427ef50df Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-min.txt | 3 +-- cmd/tailscaled/depaware-minbox.txt | 3 +-- cmd/tailscaled/deps_test.go | 1 + .../feature_connstats_disabled.go | 13 ++++++++++ .../feature_connstats_enabled.go | 13 ++++++++++ feature/featuretags/featuretags.go | 6 ++++- net/connstats/stats.go | 2 ++ net/connstats/stats_omit.go | 24 +++++++++++++++++ net/tstun/wrap.go | 26 ++++++++++++------- wgengine/magicsock/magicsock.go | 10 ++++--- wgengine/netlog/netlog.go | 21 +++++++++------ 11 files changed, 97 insertions(+), 25 deletions(-) create mode 100644 feature/buildfeatures/feature_connstats_disabled.go create mode 100644 feature/buildfeatures/feature_connstats_enabled.go create mode 100644 net/connstats/stats_omit.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 9210b4377..6ed602dc1 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -134,7 +134,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -217,7 +216,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ golang.org/x/term from tailscale.com/logpolicy diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index b183609f3..70fed796f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -160,7 +160,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -245,7 +244,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ golang.org/x/term from tailscale.com/logpolicy diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index c54f014f6..2dd140f23 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -243,6 +243,7 @@ func TestMinTailscaledNoCLI(t *testing.T) { "golang.org/x/net/proxy", "internal/socks", "github.com/tailscale/peercred", + "tailscale.com/types/netlogtype", } deptest.DepChecker{ GOOS: "linux", diff --git a/feature/buildfeatures/feature_connstats_disabled.go b/feature/buildfeatures/feature_connstats_disabled.go new file mode 100644 index 000000000..d9aac0e80 --- /dev/null +++ b/feature/buildfeatures/feature_connstats_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_connstats + +package buildfeatures + +// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. +// It's a const so it can be used for dead code elimination. +const HasConnStats = false diff --git a/feature/buildfeatures/feature_connstats_enabled.go b/feature/buildfeatures/feature_connstats_enabled.go new file mode 100644 index 000000000..c0451ce1e --- /dev/null +++ b/feature/buildfeatures/feature_connstats_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_connstats + +package buildfeatures + +// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. +// It's a const so it can be used for dead code elimination. +const HasConnStats = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5884d48d5..4ae4e1b86 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -130,7 +130,11 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"c2n"}, }, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "connstats": { + Sym: "ConnStats", + Desc: "Track per-packet connection statistics", + }, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "dbus": { Sym: "DBus", Desc: "Linux DBus support", diff --git a/net/connstats/stats.go b/net/connstats/stats.go index 4e6d8e109..44b276254 100644 --- a/net/connstats/stats.go +++ b/net/connstats/stats.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_connstats + // Package connstats maintains statistics about connections // flowing through a TUN device (which operate at the IP layer). package connstats diff --git a/net/connstats/stats_omit.go b/net/connstats/stats_omit.go new file mode 100644 index 000000000..15d16c9e4 --- /dev/null +++ b/net/connstats/stats_omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_connstats + +package connstats + +import ( + "context" + "net/netip" + "time" +) + +type Statistics struct{} + +func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical any)) *Statistics { + return &Statistics{} +} + +func (s *Statistics) UpdateTxVirtual(b []byte) {} +func (s *Statistics) UpdateRxVirtual(b []byte) {} +func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} +func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} +func (s *Statistics) Shutdown(context.Context) error { return nil } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index c94844c90..a6d88075d 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -973,8 +973,10 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(p.Buffer()) + if buildfeatures.HasConnStats { + if stats := t.stats.Load(); stats != nil { + stats.UpdateTxVirtual(p.Buffer()) + } } buffsPos++ } @@ -1098,9 +1100,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) } - if stats := t.stats.Load(); stats != nil { - for i := 0; i < n; i++ { - stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + if buildfeatures.HasConnStats { + if stats := t.stats.Load(); stats != nil { + for i := 0; i < n; i++ { + stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + } } } @@ -1266,9 +1270,11 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { - if stats := t.stats.Load(); stats != nil { - for i := range buffs { - stats.UpdateRxVirtual((buffs)[i][offset:]) + if buildfeatures.HasConnStats { + if stats := t.stats.Load(); stats != nil { + for i := range buffs { + stats.UpdateRxVirtual((buffs)[i][offset:]) + } } } return t.tdev.Write(buffs, offset) @@ -1490,7 +1496,9 @@ func (t *Wrapper) Unwrap() tun.Device { // SetStatistics specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (t *Wrapper) SetStatistics(stats *connstats.Statistics) { - t.stats.Store(stats) + if buildfeatures.HasConnStats { + t.stats.Store(stats) + } } var ( diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b6cb7b336..76fbfb3b4 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1865,8 +1865,10 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + if buildfeatures.HasConnStats { + if stats := c.stats.Load(); stats != nil { + stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + } } if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who @@ -3743,7 +3745,9 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { // SetStatistics specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (c *Conn) SetStatistics(stats *connstats.Statistics) { - c.stats.Store(stats) + if buildfeatures.HasConnStats { + c.stats.Store(stats) + } } // SetHomeless sets whether magicsock should idle harder and not have a DERP diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index b7281e542..7e1938d27 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -19,6 +19,7 @@ import ( "sync" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -130,20 +131,24 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) + if buildfeatures.HasConnStats { + nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.mu.Lock() + addrs := nl.addrs + prefixes := nl.prefixes + nl.mu.Unlock() + recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) + }) + } // Register the connection tracker into the TUN device. if tun == nil { tun = noopDevice{} } nl.tun = tun - nl.tun.SetStatistics(nl.stats) + if buildfeatures.HasConnStats { + nl.tun.SetStatistics(nl.stats) + } // Register the connection tracker into magicsock. if sock == nil { From 3aa8b6d683bdf59a383719a8fff2adbcc85d0fb1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 4 Oct 2025 15:05:41 -0700 Subject: [PATCH 1489/1708] wgengine/magicsock: remove misleading unexpected log message (#17445) Switching to a Geneve-encapsulated (peer relay) path in endpoint.handlePongConnLocked is expected around port rebinds, which end up clearing endpoint.bestAddr. Fixes tailscale/corp#33036 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 1f36aabd3..f4c8b1469 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1768,11 +1768,6 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // we don't clear direct UDP paths on disco ping timeout (see // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { - if src.vni.IsSet() { - // This would be unexpected. Switching to a Geneve-encapsulated - // path should only happen in de.relayEndpointReady(). - de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) - } de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ When: time.Now(), From e44e28efcd95596c0a86270c177ef912119bf851 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 4 Oct 2025 20:27:57 -0700 Subject: [PATCH 1490/1708] wgengine/magicsock: fix relayManager deadlock (#17449) Updates tailscale/corp#32978 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 5 +++- wgengine/magicsock/relaymanager_test.go | 39 +++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 4680832d9..a9dca70ae 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -758,7 +758,10 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay ctx: ctx, cancel: cancel, } - if byServerDisco == nil { + // We must look up byServerDisco again. The previous value may have been + // deleted from the outer map when cleaning up duplicate work. + byServerDisco, ok = r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] + if !ok { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] = byServerDisco } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index e4891f567..6ae21b8fb 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -7,6 +7,7 @@ import ( "testing" "tailscale.com/disco" + udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" "tailscale.com/util/set" ) @@ -78,3 +79,41 @@ func TestRelayManagerGetServers(t *testing.T) { t.Errorf("got %v != want %v", got, servers) } } + +// Test for http://go/corp/32978 +func TestRelayManager_handleNewServerEndpointRunLoop(t *testing.T) { + rm := relayManager{} + rm.init() + <-rm.runLoopStoppedCh // prevent runLoop() from starting, we will inject/handle events in the test + ep := &endpoint{} + conn := newConn(t.Logf) + ep.c = conn + serverDisco := key.NewDisco().Public() + rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ + ep: ep, + }, + se: udprelay.ServerEndpoint{ + ServerDisco: serverDisco, + LamportID: 1, + VNI: 1, + }, + }) + rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ + ep: ep, + }, + se: udprelay.ServerEndpoint{ + ServerDisco: serverDisco, + LamportID: 2, + VNI: 2, + }, + }) + rm.stopWorkRunLoop(ep) + if len(rm.handshakeWorkByServerDiscoByEndpoint) != 0 || + len(rm.handshakeWorkByServerDiscoVNI) != 0 || + len(rm.handshakeWorkAwaitingPong) != 0 || + len(rm.addrPortVNIToHandshakeWork) != 0 { + t.Fatal("stranded relayHandshakeWork state") + } +} From 6820ec5bbb3e1b1c3a3fa77324d2fb55e6efee95 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 14:17:19 -0700 Subject: [PATCH 1491/1708] wgengine: stop importing flowtrack when unused Updates #12614 Change-Id: I42b5c4d623d356af4bee5bbdabaaf0f6822f2bf4 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- net/packet/tsmp.go | 5 ----- tsnet/depaware.txt | 2 +- wgengine/pendopen.go | 12 ++++++++++-- wgengine/pendopen_omit.go | 24 ++++++++++++++++++++++++ wgengine/userspace.go | 3 +-- 10 files changed, 41 insertions(+), 15 deletions(-) create mode 100644 wgengine/pendopen_omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6e2a83e3c..6ecbd3df8 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -753,7 +753,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 6ed602dc1..6d7a11623 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -86,7 +86,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 70fed796f..5a71cebd4 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -110,7 +110,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3e0930fcb..1bd165c17 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -335,7 +335,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 2a87061e4..f348a394f 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -181,7 +181,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index d78d10d36..0ea321e84 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -15,7 +15,6 @@ import ( "fmt" "net/netip" - "tailscale.com/net/flowtrack" "tailscale.com/types/ipproto" ) @@ -58,10 +57,6 @@ type TailscaleRejectedHeader struct { const rejectFlagBitMaybeBroken = 0x1 -func (rh TailscaleRejectedHeader) Flow() flowtrack.Tuple { - return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) -} - func (rh TailscaleRejectedHeader) String() string { return fmt.Sprintf("TSMP-reject-flow{%s %s > %s}: %s", rh.Proto, rh.Src, rh.Dst, rh.Reason) } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6e6ea4050..014ea109c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -177,7 +177,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 28d1f4f9d..7eaf43e52 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package wgengine import ( @@ -20,6 +22,8 @@ import ( "tailscale.com/wgengine/filter" ) +type flowtrackTuple = flowtrack.Tuple + const tcpTimeoutBeforeDebug = 5 * time.Second type pendingOpenFlow struct { @@ -56,6 +60,10 @@ func (e *userspaceEngine) noteFlowProblemFromPeer(f flowtrack.Tuple, problem pac of.problem = problem } +func tsRejectFlow(rh packet.TailscaleRejectedHeader) flowtrack.Tuple { + return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) +} + func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { res = filter.Accept // always @@ -66,8 +74,8 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp return } if rh.MaybeBroken { - e.noteFlowProblemFromPeer(rh.Flow(), rh.Reason) - } else if f := rh.Flow(); e.removeFlow(f) { + e.noteFlowProblemFromPeer(tsRejectFlow(rh), rh.Reason) + } else if f := tsRejectFlow(rh); e.removeFlow(f) { e.logf("open-conn-track: flow %v %v > %v rejected due to %v", rh.Proto, rh.Src, rh.Dst, rh.Reason) } return diff --git a/wgengine/pendopen_omit.go b/wgengine/pendopen_omit.go new file mode 100644 index 000000000..013425d35 --- /dev/null +++ b/wgengine/pendopen_omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug + +package wgengine + +import ( + "tailscale.com/net/packet" + "tailscale.com/net/tstun" + "tailscale.com/wgengine/filter" +) + +type flowtrackTuple = struct{} + +type pendingOpenFlow struct{} + +func (*userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} + +func (*userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 735181ec7..be0fe5031 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -29,7 +29,6 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/dns/resolver" - "tailscale.com/net/flowtrack" "tailscale.com/net/ipset" "tailscale.com/net/netmon" "tailscale.com/net/packet" @@ -147,7 +146,7 @@ type userspaceEngine struct { statusCallback StatusCallback peerSequence []key.NodePublic endpoints []tailcfg.Endpoint - pendOpen map[flowtrack.Tuple]*pendingOpenFlow // see pendopen.go + pendOpen map[flowtrackTuple]*pendingOpenFlow // see pendopen.go // pongCallback is the map of response handlers waiting for disco or TSMP // pong callbacks. The map key is a random slice of bytes. From f80c7e7c23e3201c62c3bb132ba66e87d9f06e6c Mon Sep 17 00:00:00 2001 From: kscooo Date: Thu, 2 Oct 2025 11:25:17 +0800 Subject: [PATCH 1492/1708] net/wsconn: clarify package comment Explain that this file stays forked from coder/websocket until we can depend on an upstream release for the helper. Updates #cleanup Signed-off-by: kscooo --- net/wsconn/wsconn.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 22b511ea8..3c83ffd8c 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -2,9 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause // Package wsconn contains an adapter type that turns -// a websocket connection into a net.Conn. It a temporary fork of the -// netconn.go file from the github.com/coder/websocket package while we wait for -// https://github.com/nhooyr/websocket/pull/350 to be merged. +// a websocket connection into a net.Conn. package wsconn import ( From cf520a33713e96b6ab9d2dfd50eee84e2f63d8b4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 14:04:36 -0700 Subject: [PATCH 1493/1708] feature/featuretags: add LazyWG modular feature Due to iOS memory limitations in 2020 (see https://tailscale.com/blog/go-linker, etc) and wireguard-go using multiple goroutines per peer, commit 16a9cfe2f4ce7d introduced some convoluted pathsways through Tailscale to look at packets before they're delivered to wireguard-go and lazily reconfigure wireguard on the fly before delivering a packet, only telling wireguard about peers that are active. We eventually want to remove that code and integrate wireguard-go's configuration with Tailscale's existing netmap tracking. To make it easier to find that code later, this makes it modular. It saves 12 KB (of disk) to turn it off (at the expense of lots of RAM), but that's not really the point. The point is rather making it obvious (via the new constants) where this code even is. Updates #12614 Change-Id: I113b040f3e35f7d861c457eaa710d35f47cee1cb Signed-off-by: Brad Fitzpatrick --- .../buildfeatures/feature_lazywg_disabled.go | 13 +++++ .../buildfeatures/feature_lazywg_enabled.go | 13 +++++ feature/featuretags/featuretags.go | 1 + net/tstun/wrap.go | 22 +++++--- wgengine/magicsock/magicsock.go | 3 +- wgengine/userspace.go | 52 ++++++++++++------- 6 files changed, 76 insertions(+), 28 deletions(-) create mode 100644 feature/buildfeatures/feature_lazywg_disabled.go create mode 100644 feature/buildfeatures/feature_lazywg_enabled.go diff --git a/feature/buildfeatures/feature_lazywg_disabled.go b/feature/buildfeatures/feature_lazywg_disabled.go new file mode 100644 index 000000000..ce81d80ba --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = false diff --git a/feature/buildfeatures/feature_lazywg_enabled.go b/feature/buildfeatures/feature_lazywg_enabled.go new file mode 100644 index 000000000..259357f7f --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 4ae4e1b86..429431ec6 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -159,6 +159,7 @@ var Features = map[FeatureTag]FeatureMeta{ "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, "listenrawdisco": { Sym: "ListenRawDisco", diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index a6d88075d..7f25784a0 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -312,7 +312,9 @@ func (t *Wrapper) now() time.Time { // // The map ownership passes to the Wrapper. It must be non-nil. func (t *Wrapper) SetDestIPActivityFuncs(m map[netip.Addr]func()) { - t.destIPActivity.Store(m) + if buildfeatures.HasLazyWG { + t.destIPActivity.Store(m) + } } // SetDiscoKey sets the current discovery key. @@ -948,12 +950,14 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { for _, data := range res.data { p.Decode(data[res.dataOffset:]) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } - if captHook != nil { + if buildfeatures.HasCapture && captHook != nil { captHook(packet.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) } if !t.disableFilter { @@ -1085,9 +1089,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i pc.snat(p) invertGSOChecksum(pkt, gso) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 76fbfb3b4..81ca49d3d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -468,7 +468,8 @@ type Options struct { // NoteRecvActivity, if provided, is a func for magicsock to call // whenever it receives a packet from a a peer if it's been more // than ~10 seconds since the last one. (10 seconds is somewhat - // arbitrary; the sole user just doesn't need or want it called on + // arbitrary; the sole user, lazy WireGuard configuration, + // just doesn't need or want it called on // every packet, just every minute or two for WireGuard timeouts, // and 10 seconds seems like a good trade-off between often enough // and not too often.) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index be0fe5031..c88ab78a1 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -404,19 +404,21 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } magicsockOpts := magicsock.Options{ - EventBus: e.eventBus, - Logf: logf, - Port: conf.ListenPort, - EndpointsFunc: endpointsFn, - DERPActiveFunc: e.RequestStatus, - IdleFunc: e.tundev.IdleDuration, - NoteRecvActivity: e.noteRecvActivity, - NetMon: e.netMon, - HealthTracker: e.health, - Metrics: conf.Metrics, - ControlKnobs: conf.ControlKnobs, - OnPortUpdate: onPortUpdate, - PeerByKeyFunc: e.PeerByKey, + EventBus: e.eventBus, + Logf: logf, + Port: conf.ListenPort, + EndpointsFunc: endpointsFn, + DERPActiveFunc: e.RequestStatus, + IdleFunc: e.tundev.IdleDuration, + NetMon: e.netMon, + HealthTracker: e.health, + Metrics: conf.Metrics, + ControlKnobs: conf.ControlKnobs, + OnPortUpdate: onPortUpdate, + PeerByKeyFunc: e.PeerByKey, + } + if buildfeatures.HasLazyWG { + magicsockOpts.NoteRecvActivity = e.noteRecvActivity } var err error @@ -748,15 +750,22 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // the past 5 minutes. That's more than WireGuard's key // rotation time anyway so it's no harm if we remove it // later if it's been inactive. - activeCutoff := e.timeNow().Add(-lazyPeerIdleThreshold) + var activeCutoff mono.Time + if buildfeatures.HasLazyWG { + activeCutoff = e.timeNow().Add(-lazyPeerIdleThreshold) + } // Not all peers can be trimmed from the network map (see // isTrimmablePeer). For those that are trimmable, keep track of // their NodeKey and Tailscale IPs. These are the ones we'll need // to install tracking hooks for to watch their send/receive // activity. - trackNodes := make([]key.NodePublic, 0, len(full.Peers)) - trackIPs := make([]netip.Addr, 0, len(full.Peers)) + var trackNodes []key.NodePublic + var trackIPs []netip.Addr + if buildfeatures.HasLazyWG { + trackNodes = make([]key.NodePublic, 0, len(full.Peers)) + trackIPs = make([]netip.Addr, 0, len(full.Peers)) + } // Don't re-alloc the map; the Go compiler optimizes map clears as of // Go 1.11, so we can re-use the existing + allocated map. @@ -770,7 +779,7 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node for i := range full.Peers { p := &full.Peers[i] nk := p.PublicKey - if !e.isTrimmablePeer(p, len(full.Peers)) { + if !buildfeatures.HasLazyWG || !e.isTrimmablePeer(p, len(full.Peers)) { min.Peers = append(min.Peers, *p) if discoChanged[nk] { needRemoveStep = true @@ -803,7 +812,9 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node return nil } - e.updateActivityMapsLocked(trackNodes, trackIPs) + if buildfeatures.HasLazyWG { + e.updateActivityMapsLocked(trackNodes, trackIPs) + } if needRemoveStep { minner := min @@ -839,6 +850,9 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // // e.wgLock must be held. func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic, trackIPs []netip.Addr) { + if !buildfeatures.HasLazyWG { + return + } // Generate the new map of which nodekeys we want to track // receive times for. mr := map[key.NodePublic]mono.Time{} // TODO: only recreate this if set of keys changed @@ -943,7 +957,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, peerMTUEnable := e.magicConn.ShouldPMTUD() isSubnetRouter := false - if e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { + if buildfeatures.HasBird && e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { isSubnetRouter = hasOverlap(nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs()) e.logf("[v1] Reconfig: hasOverlap(%v, %v) = %v; isSubnetRouter=%v lastIsSubnetRouter=%v", nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs(), From f208bf8cb11e792e6c8411990995939117265016 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 08:23:11 -0700 Subject: [PATCH 1494/1708] types/lazy: document difference from sync.OnceValue Updates #8419 Updates github.com/golang#62202 Change-Id: I0c082c4258fb7a95a17054f270dc32019bcc7581 Signed-off-by: Brad Fitzpatrick --- types/lazy/lazy.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index f5d7be494..f537758fa 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -23,6 +23,9 @@ var nilErrPtr = ptr.To[error](nil) // Recursive use of a SyncValue from its own fill function will deadlock. // // SyncValue is safe for concurrent use. +// +// Unlike [sync.OnceValue], the linker can do better dead code elimination +// with SyncValue. See https://github.com/golang/go/issues/62202. type SyncValue[T any] struct { once sync.Once v T From 6db895774426688c1d11c6d2d6365970532ad2c8 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 09:13:03 +0100 Subject: [PATCH 1495/1708] tstest/integration: mark TestPeerRelayPing as flaky Updates #17251 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 481de57fd..f7c133f5c 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1644,6 +1644,7 @@ func TestEncryptStateMigration(t *testing.T) { // relay between all 3 nodes, and "tailscale debug peer-relay-sessions" returns // expected values. func TestPeerRelayPing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17251") tstest.Shard(t) tstest.Parallel(t) From 44e1d735c32f20eeba4573db65f16d009feb63c5 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 6 Oct 2025 09:41:52 -0700 Subject: [PATCH 1496/1708] tailcfg: bump CapVer for magicsock deadlock fix (#17450) The fix that was applied in e44e28efcd95596c0a86270c177ef912119bf851. Updates tailscale/corp#32978 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index da53b777e..7484c7466 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -174,7 +174,8 @@ type CapabilityVersion int // - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) // - 127: 2025-09-19: can handle C2N /debug/netmap. // - 128: 2025-10-02: can handle C2N /debug/health. -const CurrentCapabilityVersion CapabilityVersion = 128 +// - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) +const CurrentCapabilityVersion CapabilityVersion = 129 // ID is an integer ID for a user, node, or login allocated by the // control plane. From 541a4ed5b483087c11fb190c443ff1510fb8932f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 09:03:10 -0700 Subject: [PATCH 1497/1708] all: use buildfeatures consts in a few more places Saves ~25 KB. Updates #12614 Change-Id: I7b976e57819a0d2692824d779c8cc98033df0d30 Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 3 ++ cmd/tailscale/cli/up.go | 4 ++- control/controlclient/direct.go | 2 +- ipn/ipnlocal/local.go | 22 +++++++++--- ipn/localapi/localapi.go | 10 ++++-- ipn/prefs.go | 64 +++++++++++++++++++-------------- 6 files changed, 70 insertions(+), 35 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 9faeadca3..a4a871dd8 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -752,6 +752,9 @@ func (lc *Client) PushFile(ctx context.Context, target tailcfg.StableNodeID, siz // machine is properly configured to forward IP packets as a subnet router // or exit node. func (lc *Client) CheckIPForwarding(ctx context.Context) error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } body, err := lc.get200(ctx, "/localapi/v0/check-ip-forwarding") if err != nil { return err diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 3c0883ec8..90c9c23af 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -24,6 +24,7 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" + "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" @@ -1136,7 +1137,8 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { } func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { - if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { + if buildfeatures.HasAdvertiseRoutes && len(prefs.AdvertiseRoutes) > 0 || + buildfeatures.HasAppConnectors && prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding // into a single HTTP req. if err := localClient.CheckIPForwarding(ctx); err != nil { diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 922355369..5f26e2ba1 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -945,7 +945,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap ConnectionHandleForTest: connectionHandleForTest, } var extraDebugFlags []string - if hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && + if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { extraDebugFlags = append(extraDebugFlags, "warn-ip-forwarding-off") } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9a47b7cb1..b0a8d9985 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -586,7 +586,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) - healthChangeSub := eventbus.Subscribe[health.Change](ec) + + var healthChange <-chan health.Change + if buildfeatures.HasHealth { + healthChangeSub := eventbus.Subscribe[health.Change](ec) + healthChange = healthChangeSub.Events() + } changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) var portlist <-chan PortlistServices @@ -604,7 +609,7 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus b.onClientVersion(&clientVersion) case au := <-autoUpdateSub.Events(): b.onTailnetDefaultAutoUpdate(au.Value) - case change := <-healthChangeSub.Events(): + case change := <-healthChange: b.onHealthChange(change) case changeDelta := <-changeDeltaSub.Events(): b.linkChange(&changeDelta) @@ -996,6 +1001,9 @@ var ( ) func (b *LocalBackend) onHealthChange(change health.Change) { + if !buildfeatures.HasHealth { + return + } if change.WarnableChanged { w := change.Warnable us := change.UnhealthyState @@ -6025,10 +6033,10 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { // // b.mu must be held. func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { - if b.applySysPolicyLocked(prefs) { + if buildfeatures.HasSystemPolicy && b.applySysPolicyLocked(prefs) { changed = true } - if b.resolveExitNodeInPrefsLocked(prefs) { + if buildfeatures.HasUseExitNode && b.resolveExitNodeInPrefsLocked(prefs) { changed = true } if changed { @@ -6043,6 +6051,9 @@ func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + if !buildfeatures.HasUseExitNode { + return false + } if b.resolveAutoExitNodeLocked(prefs) { changed = true } @@ -6338,6 +6349,9 @@ func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) { } func (b *LocalBackend) CheckIPForwarding() error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } if b.sys.IsNetstackRouter() { return nil } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d7cd42c75..74ff96f9f 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -72,15 +72,12 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, "derpmap": (*Handler).serveDERPMap, "dial": (*Handler).serveDial, "disconnect-control": (*Handler).disconnectControl, - "dns-osconfig": (*Handler).serveDNSOSConfig, - "dns-query": (*Handler).serveDNSQuery, "goroutines": (*Handler).serveGoroutines, "handle-push-message": (*Handler).serveHandlePushMessage, "id-token": (*Handler).serveIDToken, @@ -111,6 +108,9 @@ func init() { if buildfeatures.HasAppConnectors { Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) } + if buildfeatures.HasAdvertiseRoutes { + Register("check-ip-forwarding", (*Handler).serveCheckIPForwarding) + } if buildfeatures.HasUseExitNode { Register("suggest-exit-node", (*Handler).serveSuggestExitNode) Register("set-use-exit-node-enabled", (*Handler).serveSetUseExitNodeEnabled) @@ -122,6 +122,10 @@ func init() { Register("bugreport", (*Handler).serveBugReport) Register("pprof", (*Handler).servePprof) } + if buildfeatures.HasDNS { + Register("dns-osconfig", (*Handler).serveDNSOSConfig) + Register("dns-query", (*Handler).serveDNSQuery) + } } // Register registers a new LocalAPI handler for the given name. diff --git a/ipn/prefs.go b/ipn/prefs.go index 8a5b17af6..4a0680bba 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -532,12 +532,16 @@ func (p *Prefs) Pretty() string { return p.pretty(runtime.GOOS) } func (p *Prefs) pretty(goos string) string { var sb strings.Builder sb.WriteString("Prefs{") - fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) - fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) - if p.RunSSH { + if buildfeatures.HasUseRoutes { + fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) + } + if buildfeatures.HasDNS { + fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) + } + if buildfeatures.HasSSH && p.RunSSH { sb.WriteString("ssh=true ") } - if p.RunWebClient { + if buildfeatures.HasWebClient && p.RunWebClient { sb.WriteString("webclient=true ") } if p.LoggedOut { @@ -552,26 +556,30 @@ func (p *Prefs) pretty(goos string) string { if p.ShieldsUp { sb.WriteString("shields=true ") } - if p.ExitNodeIP.IsValid() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) - } else if !p.ExitNodeID.IsZero() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) - } - if p.AutoExitNode.IsSet() { - fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) - } - if len(p.AdvertiseRoutes) > 0 || goos == "linux" { - fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) - } - if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { - fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + if buildfeatures.HasUseExitNode { + if p.ExitNodeIP.IsValid() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) + } else if !p.ExitNodeID.IsZero() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) + } + if p.AutoExitNode.IsSet() { + fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) + } } - if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { - // Only print if we're advertising any routes, or the user has - // turned off stateful filtering (NoStatefulFiltering=true ⇒ - // StatefulFiltering=false). - bb, _ := p.NoStatefulFiltering.Get() - fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + if buildfeatures.HasAdvertiseRoutes { + if len(p.AdvertiseRoutes) > 0 || goos == "linux" { + fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) + } + if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { + fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + } + if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { + // Only print if we're advertising any routes, or the user has + // turned off stateful filtering (NoStatefulFiltering=true ⇒ + // StatefulFiltering=false). + bb, _ := p.NoStatefulFiltering.Get() + fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + } } if len(p.AdvertiseTags) > 0 { fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ",")) @@ -594,9 +602,13 @@ func (p *Prefs) pretty(goos string) string { if p.NetfilterKind != "" { fmt.Fprintf(&sb, "netfilterKind=%s ", p.NetfilterKind) } - sb.WriteString(p.AutoUpdate.Pretty()) - sb.WriteString(p.AppConnector.Pretty()) - if p.RelayServerPort != nil { + if buildfeatures.HasClientUpdate { + sb.WriteString(p.AutoUpdate.Pretty()) + } + if buildfeatures.HasAppConnectors { + sb.WriteString(p.AppConnector.Pretty()) + } + if buildfeatures.HasRelayServer && p.RelayServerPort != nil { fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort) } if p.Persist != nil { From 525f9921fe680f52c67a4d2c5b51c332d77bfe51 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 08:05:35 -0700 Subject: [PATCH 1498/1708] cmd/testwrapper/flakytest: use t.Attr annotation on flaky tests Updates #17460 Change-Id: I7381e9a6dd73514c73deb6b863749eef1a87efdc Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/flakytest/flakytest.go | 11 ++++++++++- cmd/testwrapper/flakytest/flakytest_test.go | 3 ++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index 6302900cb..856cb28ef 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -27,7 +27,7 @@ const FlakyTestLogMessage = "flakytest: this is a known flaky test" // starting at 1. const FlakeAttemptEnv = "TS_TESTWRAPPER_ATTEMPT" -var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/tailscale/[a-zA-Z0-9_.-]+/issues/\d+\z`) +var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/issues/\d+\z`) var ( rootFlakesMu sync.Mutex @@ -49,6 +49,15 @@ func Mark(t testing.TB, issue string) { // spamming people running tests without the wrapper) fmt.Fprintf(os.Stderr, "%s: %s\n", FlakyTestLogMessage, issue) } + t.Attr("flaky-test-issue-url", issue) + + // The Attr method above also emits human-readable output, so this t.Logf + // is somewhat redundant, but we keep it for compatibility with + // old test runs, so cmd/testwrapper doesn't need to be modified. + // TODO(bradfitz): switch testwrapper to look for Action "attr" + // instead: + // "Action":"attr","Package":"tailscale.com/cmd/testwrapper/flakytest","Test":"TestMarked_Root","Key":"flaky-test-issue-url","Value":"https://github.com/tailscale/tailscale/issues/0"} + // And then remove this Logf a month or so after that. t.Logf("flakytest: issue tracking this flaky test: %s", issue) // Record the root test name as flakey. diff --git a/cmd/testwrapper/flakytest/flakytest_test.go b/cmd/testwrapper/flakytest/flakytest_test.go index 64cbfd9a3..9b744de13 100644 --- a/cmd/testwrapper/flakytest/flakytest_test.go +++ b/cmd/testwrapper/flakytest/flakytest_test.go @@ -14,7 +14,8 @@ func TestIssueFormat(t *testing.T) { want bool }{ {"https://github.com/tailscale/cOrp/issues/1234", true}, - {"https://github.com/otherproject/corp/issues/1234", false}, + {"https://github.com/otherproject/corp/issues/1234", true}, + {"https://not.huyb/tailscale/corp/issues/1234", false}, {"https://github.com/tailscale/corp/issues/", false}, } for _, testCase := range testCases { From ea8e991d69c02ce8c9b65fda70ac56a4707416bf Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 6 Oct 2025 16:43:27 +0000 Subject: [PATCH 1499/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 1 - licenses/apple.md | 2 +- licenses/tailscale.md | 1 - licenses/windows.md | 27 ++------------------------- 4 files changed, 3 insertions(+), 28 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 881f3ed3d..f578c17cb 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -24,7 +24,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 6b6d47045..4c50e9559 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -68,7 +68,7 @@ See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index b15b93744..0ef5bcf61 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -73,7 +73,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 37c41ca3f..f6704cf32 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -10,29 +10,11 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) @@ -40,12 +22,10 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gregjones/httpcache](https://pkg.go.dev/github.com/gregjones/httpcache) ([MIT](https://github.com/gregjones/httpcache/blob/901d90724c79/LICENSE.txt)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) @@ -62,24 +42,21 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.27.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.28.0:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) From d816454a88f3f0276294c8f5150ba7b7e0471552 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 12:02:16 -0700 Subject: [PATCH 1500/1708] feature/featuretags: make usermetrics modular Saves ~102 KB from the min build. Updates #12614 Change-Id: Ie1d4f439321267b9f98046593cb289ee3c4d6249 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/debug.go | 8 +++++ cmd/tailscaled/depaware-min.txt | 7 ++--- cmd/tailscaled/depaware-minbox.txt | 7 ++--- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 3 ++ cmd/tailscaled/tailscaled.go | 8 ----- cmd/tsidp/depaware.txt | 2 +- .../feature_usermetrics_disabled.go | 13 +++++++++ .../feature_usermetrics_enabled.go | 13 +++++++++ feature/featuretags/featuretags.go | 4 +++ ipn/localapi/localapi.go | 4 ++- net/tstun/wrap.go | 5 ++-- tsnet/depaware.txt | 2 +- util/usermetric/metrics.go | 13 +++++---- util/usermetric/omit.go | 29 +++++++++++++++++++ util/usermetric/usermetric.go | 6 ++++ 17 files changed, 97 insertions(+), 31 deletions(-) create mode 100644 feature/buildfeatures/feature_usermetrics_disabled.go create mode 100644 feature/buildfeatures/feature_usermetrics_enabled.go create mode 100644 util/usermetric/omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6ecbd3df8..2c4cd9e85 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -742,7 +742,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index bcc34fb0d..b16cb28e0 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -28,7 +28,9 @@ import ( "tailscale.com/ipn" "tailscale.com/net/netmon" "tailscale.com/tailcfg" + "tailscale.com/tsweb/varz" "tailscale.com/types/key" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -58,6 +60,12 @@ func newDebugMux() *http.ServeMux { return mux } +func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + varz.Handler(w, r) + clientmetric.WritePrometheusExpositionFormat(w) +} + func debugMode(args []string) error { fs := flag.NewFlagSet("debug", flag.ExitOnError) fs.BoolVar(&debugArgs.ifconfig, "ifconfig", false, "If true, print network interface state") diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 6d7a11623..ed7ddee2a 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -76,7 +76,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ @@ -123,7 +122,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -150,7 +148,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/control/controlclient+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -205,7 +202,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ + golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ @@ -321,7 +318,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/metrics+ + expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 5a71cebd4..93a884c1e 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -99,7 +99,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -149,7 +148,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -177,7 +175,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -233,7 +230,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ + golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ @@ -351,7 +348,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/metrics+ + expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1bd165c17..7ef5c2ede 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -323,7 +323,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/ace from tailscale.com/feature/ace tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 2dd140f23..a66706db2 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -265,6 +265,9 @@ func TestMinTailscaledWithCLI(t *testing.T) { "hujson", "pprof", "multierr", // https://github.com/tailscale/tailscale/pull/17379 + "tailscale.com/metrics", + "tailscale.com/tsweb/varz", + "dirwalk", } deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index c3a4c8b05..62df4067d 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -51,11 +51,9 @@ import ( "tailscale.com/safesocket" "tailscale.com/syncs" "tailscale.com/tsd" - "tailscale.com/tsweb/varz" "tailscale.com/types/flagtype" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/util/clientmetric" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -831,12 +829,6 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo var hookNewDebugMux feature.Hook[func() *http.ServeMux] -func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - varz.Handler(w, r) - clientmetric.WritePrometheusExpositionFormat(w) -} - func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { if !buildfeatures.HasDebug { return diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f348a394f..fb7c59ebc 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -170,7 +170,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/feature/buildfeatures/feature_usermetrics_disabled.go b/feature/buildfeatures/feature_usermetrics_disabled.go new file mode 100644 index 000000000..092c89c3b --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = false diff --git a/feature/buildfeatures/feature_usermetrics_enabled.go b/feature/buildfeatures/feature_usermetrics_enabled.go new file mode 100644 index 000000000..813e3c347 --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 429431ec6..a751f65fb 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -270,6 +270,10 @@ var Features = map[FeatureTag]FeatureMeta{ Sym: "UseProxy", Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", }, + "usermetrics": { + Sym: "UserMetrics", + Desc: "Usermetrics (documented, stable) metrics support", + }, "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, "webclient": { Sym: "WebClient", Desc: "Web client support", diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 74ff96f9f..fb2c964e7 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -99,7 +99,6 @@ var handler = map[string]LocalAPIHandler{ "status": (*Handler).serveStatus, "update/check": (*Handler).serveUpdateCheck, "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "usermetrics": (*Handler).serveUserMetrics, "watch-ipn-bus": (*Handler).serveWatchIPNBus, "whois": (*Handler).serveWhoIs, } @@ -126,6 +125,9 @@ func init() { Register("dns-osconfig", (*Handler).serveDNSOSConfig) Register("dns-query", (*Handler).serveDNSQuery) } + if buildfeatures.HasUserMetrics { + Register("usermetrics", (*Handler).serveUserMetrics) + } } // Register registers a new LocalAPI handler for the given name. diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 7f25784a0..fb93ca21e 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,7 +24,6 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/feature/buildfeatures" - tsmetrics "tailscale.com/metrics" "tailscale.com/net/connstats" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" @@ -213,8 +212,8 @@ type Wrapper struct { } type metrics struct { - inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] - outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] + inboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] + outboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] } func registerMetrics(reg *usermetric.Registry) *metrics { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 014ea109c..4c3d8018f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -166,7 +166,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index 044b4d65f..be425fb87 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -10,15 +10,15 @@ package usermetric import ( "sync" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" ) // Metrics contains user-facing metrics that are used by multiple packages. type Metrics struct { initOnce sync.Once - droppedPacketsInbound *metrics.MultiLabelMap[DropLabels] - droppedPacketsOutbound *metrics.MultiLabelMap[DropLabels] + droppedPacketsInbound *MultiLabelMap[DropLabels] + droppedPacketsOutbound *MultiLabelMap[DropLabels] } // DropReason is the reason why a packet was dropped. @@ -55,6 +55,9 @@ type DropLabels struct { // initOnce initializes the common metrics. func (r *Registry) initOnce() { + if !buildfeatures.HasUserMetrics { + return + } r.m.initOnce.Do(func() { r.m.droppedPacketsInbound = NewMultiLabelMapWithRegistry[DropLabels]( r, @@ -73,13 +76,13 @@ func (r *Registry) initOnce() { // DroppedPacketsOutbound returns the outbound dropped packet metric, creating it // if necessary. -func (r *Registry) DroppedPacketsOutbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsOutbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsOutbound } // DroppedPacketsInbound returns the inbound dropped packet metric. -func (r *Registry) DroppedPacketsInbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsInbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsInbound } diff --git a/util/usermetric/omit.go b/util/usermetric/omit.go new file mode 100644 index 000000000..0611990ab --- /dev/null +++ b/util/usermetric/omit.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_usermetrics + +package usermetric + +type Registry struct { + m Metrics +} + +func (*Registry) NewGauge(name, help string) *Gauge { return nil } + +type MultiLabelMap[T comparable] = noopMap[T] + +type noopMap[T comparable] struct{} + +type Gauge struct{} + +func (*Gauge) Set(float64) {} + +func NewMultiLabelMapWithRegistry[T comparable](m *Registry, name string, promType, helpText string) *MultiLabelMap[T] { + return nil +} + +func (*noopMap[T]) Add(T, int64) {} +func (*noopMap[T]) Set(T, any) {} + +func (r *Registry) Handler(any, any) {} // no-op HTTP handler diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index 74e9447a6..1805a5dbe 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_usermetrics + // Package usermetric provides a container and handler // for user-facing metrics. package usermetric @@ -25,6 +27,10 @@ type Registry struct { m Metrics } +// MultiLabelMap is an alias for metrics.MultiLabelMap in the common case, +// or an alias to a lighter type when usermetrics are omitted from the build. +type MultiLabelMap[T comparable] = metrics.MultiLabelMap[T] + // NewMultiLabelMapWithRegistry creates and register a new // MultiLabelMap[T] variable with the given name and returns it. // The variable is registered with the userfacing metrics package. From 7407f404d996594de43a546cfabbd40d776a4e22 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Mon, 6 Oct 2025 22:48:43 +0100 Subject: [PATCH 1501/1708] ipn/ipnlocal: fix setAuthURL / setWgengineStatus race condition (#17408) If we received a wg engine status while processing an auth URL, there was a race condition where the authURL could be reset to "" immediately after we set it. To fix this we need to check that we are moving from a non-Running state to a Running state rather than always resetting the URL when we "move" into a Running state even if that is the current state. We also need to make sure that we do not return from stopEngineAndWait until the engine is stopped: before, we would return as soon as we received any engine status update, but that might have been an update already in-flight before we asked the engine to stop. Now we wait until we see an update that is indicative of a stopped engine, or we see that the engine is unblocked again, which indicates that the engine stopped and then started again while we were waiting before we checked the state. Updates #17388 Signed-off-by: James Sanderson Co-authored-by: Nick Khyl --- ipn/ipnlocal/local.go | 64 +++++++---- ipn/ipnlocal/local_test.go | 9 ++ ipn/ipnlocal/state_test.go | 229 +++++++++++++++++++++++++++++++++++++ 3 files changed, 278 insertions(+), 24 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b0a8d9985..c560fdae1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -313,9 +313,8 @@ type LocalBackend struct { serveListeners map[netip.AddrPort]*localListener // listeners for local serve traffic serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *reverseProxy - // statusLock must be held before calling statusChanged.Wait() or + // mu must be held before calling statusChanged.Wait() or // statusChanged.Broadcast(). - statusLock sync.Mutex statusChanged *sync.Cond // dialPlan is any dial plan that we've received from the control @@ -542,7 +541,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.setTCPPortsIntercepted(nil) - b.statusChanged = sync.NewCond(&b.statusLock) + b.statusChanged = sync.NewCond(&b.mu) b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() @@ -2265,14 +2264,15 @@ func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { b.send(ipn.Notify{Engine: &es}) } +// broadcastStatusChanged must not be called with b.mu held. func (b *LocalBackend) broadcastStatusChanged() { // The sync.Cond docs say: "It is allowed but not required for the caller to hold c.L during the call." - // In this particular case, we must acquire b.statusLock. Otherwise we might broadcast before + // In this particular case, we must acquire b.mu. Otherwise we might broadcast before // the waiter (in requestEngineStatusAndWait) starts to wait, in which case // the waiter can get stuck indefinitely. See PR 2865. - b.statusLock.Lock() + b.mu.Lock() b.statusChanged.Broadcast() - b.statusLock.Unlock() + b.mu.Unlock() } // SetNotifyCallback sets the function to call when the backend has something to @@ -3343,11 +3343,12 @@ func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient if !b.seamlessRenewalEnabled() || keyExpired { b.blockEngineUpdates(true) b.stopEngineAndWait() + + if b.State() == ipn.Running { + b.enterState(ipn.Starting) + } } b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) - if b.State() == ipn.Running { - b.enterState(ipn.Starting) - } } // validPopBrowserURL reports whether urlStr is a valid value for a @@ -5513,7 +5514,13 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { - b.resetAuthURLLocked() + // TODO(zofrex): Is this needed? As of 2025-10-03 it doesn't seem to be + // necessary when logging in or authenticating. When do we need to reset it + // here, rather than the other places it is reset? We should test if it is + // necessary and add unit tests to cover those cases, or remove it. + if oldState != ipn.Running { + b.resetAuthURLLocked() + } // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it @@ -5750,29 +5757,38 @@ func (u unlockOnce) UnlockEarly() { } // stopEngineAndWait deconfigures the local network data plane, and -// waits for it to deliver a status update before returning. -// -// TODO(danderson): this may be racy. We could unblock upon receiving -// a status update that predates the "I've shut down" update. +// waits for it to deliver a status update indicating it has stopped +// before returning. func (b *LocalBackend) stopEngineAndWait() { b.logf("stopEngineAndWait...") b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - b.requestEngineStatusAndWait() + b.requestEngineStatusAndWaitForStopped() b.logf("stopEngineAndWait: done.") } -// Requests the wgengine status, and does not return until the status -// was delivered (to the usual callback). -func (b *LocalBackend) requestEngineStatusAndWait() { - b.logf("requestEngineStatusAndWait") +// Requests the wgengine status, and does not return until a status was +// delivered (to the usual callback) that indicates the engine is stopped. +func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { + b.logf("requestEngineStatusAndWaitForStopped") - b.statusLock.Lock() - defer b.statusLock.Unlock() + b.mu.Lock() + defer b.mu.Unlock() b.goTracker.Go(b.e.RequestStatus) - b.logf("requestEngineStatusAndWait: waiting...") - b.statusChanged.Wait() // temporarily releases lock while waiting - b.logf("requestEngineStatusAndWait: got status update.") + b.logf("requestEngineStatusAndWaitForStopped: waiting...") + for { + b.statusChanged.Wait() // temporarily releases lock while waiting + + if !b.blocked { + b.logf("requestEngineStatusAndWaitForStopped: engine is no longer blocked, must have stopped and started again, not safe to wait.") + break + } + if b.engineStatus.NumLive == 0 && b.engineStatus.LiveDERPs == 0 { + b.logf("requestEngineStatusAndWaitForStopped: engine is stopped.") + break + } + b.logf("requestEngineStatusAndWaitForStopped: engine is still running. Waiting...") + } } // setControlClientLocked sets the control client to cc, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index a662793db..bc8bd2a67 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1505,6 +1505,15 @@ func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { } } +func wantStateNotify(want ipn.State) wantedNotification { + return wantedNotification{ + name: "State=" + want.String(), + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.State != nil && *n.State == want + }, + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a387af035..d773f7227 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1561,6 +1561,235 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } +// TestStateMachineURLRace tests that wgengine updates arriving in the middle of +// processing an auth URL doesn't result in the auth URL being cleared. +func TestStateMachineURLRace(t *testing.T) { + runTestStateMachineURLRace(t, false) +} + +func TestStateMachineURLRaceSeamless(t *testing.T) { + runTestStateMachineURLRace(t, true) +} + +func runTestStateMachineURLRace(t *testing.T, seamless bool) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) + + t.Logf("Start") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.NeedsLogin)}) + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + nw.check() + + t.Logf("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + if seamless { + b.sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Starting)}) + cc.send(nil, "", true, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }) + nw.check() + + t.Logf("Running") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Running)}) + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + nw.check() + + t.Logf("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + stop := make(chan struct{}) + stopSpamming := sync.OnceFunc(func() { + stop <- struct{}{} + }) + // if seamless renewal is enabled, the engine won't be disabled, and we won't + // ever call stopSpamming, so make sure it does get called + defer stopSpamming() + + // Intercept updates between the engine and localBackend, so that we can see + // when the "stopped" update comes in and ensure we stop sending our "we're + // up" updates after that point. + b.e.SetStatusCallback(func(s *wgengine.Status, err error) { + // This is not one of our fake status updates, this is generated from the + // engine in response to LocalBackend calling RequestStatus. Stop spamming + // our fake statuses. + // + // TODO(zofrex): This is fragile, it works right now but would break if the + // calling pattern of RequestStatus changes. We should ensure that we keep + // sending "we're up" statuses right until Reconfig is called with + // zero-valued configs, and after that point only send "stopped" statuses. + stopSpamming() + + // Once stopSpamming returns we are guaranteed to not send any more updates, + // so we can now send the real update (indicating shutdown) and be certain + // it will be received after any fake updates we sent. This is possibly a + // stronger guarantee than we get from the real engine? + b.setWgengineStatus(s, err) + }) + + // time needs to be >= last time for the status to be accepted, send all our + // spam with the same stale time so that when a real update comes in it will + // definitely be accepted. + time := b.lastStatusTime + + // Flood localBackend with a lot of wgengine status updates, so if there are + // any race conditions in the multiple locks/unlocks that happen as we process + // the received auth URL, we will hit them. + go func() { + t.Logf("sending lots of fake wgengine status updates") + for { + select { + case <-stop: + t.Logf("stopping fake wgengine status updates") + return + default: + b.setWgengineStatus(&wgengine.Status{AsOf: time, DERPs: 1}, nil) + } + } + }() + + t.Logf("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + cc.send(nil, url1, false, nil) + + // Don't need to wait on anything else - once .send completes, authURL should + // be set, and once .send has completed, any opportunities for a WG engine + // status update to trample it have ended as well. + if b.authURL == "" { + t.Fatalf("expected authURL to be set") + } +} + +func TestWGEngineDownThenUpRace(t *testing.T) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) + + t.Logf("Start") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.NeedsLogin)}) + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + nw.check() + + t.Logf("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Starting)}) + cc.send(nil, "", true, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }) + nw.check() + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Running)}) + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + nw.check() + + t.Logf("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + var timeLock sync.RWMutex + timestamp := b.lastStatusTime + + engineShutdown := make(chan struct{}) + gotShutdown := sync.OnceFunc(func() { + t.Logf("engineShutdown") + engineShutdown <- struct{}{} + }) + + b.e.SetStatusCallback(func(s *wgengine.Status, err error) { + timeLock.Lock() + if s.AsOf.After(timestamp) { + timestamp = s.AsOf + } + timeLock.Unlock() + + if err != nil || (s.DERPs == 0 && len(s.Peers) == 0) { + gotShutdown() + } else { + b.setWgengineStatus(s, err) + } + }) + + t.Logf("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + + done := make(chan struct{}) + var wg sync.WaitGroup + + wg.Go(func() { + t.Log("cc.send starting") + cc.send(nil, url1, false, nil) // will block until engine stops + t.Log("cc.send returned") + }) + + <-engineShutdown // will get called once cc.send is blocked + gotShutdown = sync.OnceFunc(func() { + t.Logf("engineShutdown") + engineShutdown <- struct{}{} + }) + + wg.Go(func() { + t.Log("StartLoginInteractive starting") + b.StartLoginInteractive(t.Context()) // will also block until engine stops + t.Log("StartLoginInteractive returned") + }) + + <-engineShutdown // will get called once StartLoginInteractive is blocked + + st := controlclient.Status{} + st.SetStateForTest(controlclient.StateAuthenticated) + b.SetControlClientStatus(cc, st) + + timeLock.RLock() + b.setWgengineStatus(&wgengine.Status{AsOf: timestamp}, nil) // engine is down event finally arrives + b.setWgengineStatus(&wgengine.Status{AsOf: timestamp, DERPs: 1}, nil) // engine is back up + timeLock.RUnlock() + + go func() { + wg.Wait() + done <- struct{}{} + }() + + t.Log("waiting for .send and .StartLoginInteractive to return") + + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timed out waiting") + } + + t.Log("both returned") +} + func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( firstAutoUserID = tailcfg.UserID(10000) From e0f222b686ca4e542c6d83075f08a7e34dd69d34 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 6 Oct 2025 15:04:17 -0700 Subject: [PATCH 1502/1708] appc,ipn/ipnlocal: receive AppConnector updates via the event bus (#17411) Add subscribers for AppConnector events Make the RouteAdvertiser interface optional We cannot yet remove it because the tests still depend on it to verify correctness. We will need to separately update the test fixtures to remove that dependency. Publish RouteInfo via the event bus, so we do not need a callback to do that. Replace it with a flag that indicates whether to treat the route info the connector has as "definitive" for filtering purposes. Update the tests to simplify the construction of AppConnector values now that a store callback is no longer required. Also fix a couple of pre-existing racy tests that were hidden by not being concurrent in the same way production is. Updates #15160 Updates #17192 Change-Id: Id39525c0f02184e88feaf0d8a3c05504850e47ee Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 111 +++++++++++------------ appc/appconnector_test.go | 166 ++++++++++++----------------------- ipn/ipnlocal/local.go | 61 ++++++++----- ipn/ipnlocal/local_test.go | 88 +++++++++++++------ ipn/ipnlocal/peerapi_test.go | 79 ++++++++--------- 5 files changed, 238 insertions(+), 267 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 291884065..e7b5032f0 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -134,8 +134,9 @@ type AppConnector struct { updatePub *eventbus.Publisher[appctype.RouteUpdate] storePub *eventbus.Publisher[appctype.RouteInfo] - // storeRoutesFunc will be called to persist routes if it is not nil. - storeRoutesFunc func(*appctype.RouteInfo) error + // hasStoredRoutes records whether the connector was initialized with + // persisted route information. + hasStoredRoutes bool // mu guards the fields that follow mu sync.Mutex @@ -168,16 +169,14 @@ type Config struct { EventBus *eventbus.Bus // RouteAdvertiser allows the connector to update the set of advertised routes. - // It must be non-nil. RouteAdvertiser RouteAdvertiser // RouteInfo, if non-nil, use used as the initial set of routes for the // connector. If nil, the connector starts empty. RouteInfo *appctype.RouteInfo - // StoreRoutesFunc, if non-nil, is called when the connector's routes - // change, to allow the routes to be persisted. - StoreRoutesFunc func(*appctype.RouteInfo) error + // HasStoredRoutes indicates that the connector should assume stored routes. + HasStoredRoutes bool } // NewAppConnector creates a new AppConnector. @@ -187,8 +186,6 @@ func NewAppConnector(c Config) *AppConnector { panic("missing logger") case c.EventBus == nil: panic("missing event bus") - case c.RouteAdvertiser == nil: - panic("missing route advertiser") } ec := c.EventBus.Client("appc.AppConnector") @@ -199,7 +196,7 @@ func NewAppConnector(c Config) *AppConnector { updatePub: eventbus.Publish[appctype.RouteUpdate](ec), storePub: eventbus.Publish[appctype.RouteInfo](ec), routeAdvertiser: c.RouteAdvertiser, - storeRoutesFunc: c.StoreRoutesFunc, + hasStoredRoutes: c.HasStoredRoutes, } if c.RouteInfo != nil { ac.domains = c.RouteInfo.Domains @@ -218,13 +215,19 @@ func NewAppConnector(c Config) *AppConnector { // ShouldStoreRoutes returns true if the appconnector was created with the controlknob on // and is storing its discovered routes persistently. -func (e *AppConnector) ShouldStoreRoutes() bool { - return e.storeRoutesFunc != nil -} +func (e *AppConnector) ShouldStoreRoutes() bool { return e.hasStoredRoutes } // storeRoutesLocked takes the current state of the AppConnector and persists it -func (e *AppConnector) storeRoutesLocked() error { +func (e *AppConnector) storeRoutesLocked() { if e.storePub.ShouldPublish() { + // log write rate and write size + numRoutes := int64(len(e.controlRoutes)) + for _, rs := range e.domains { + numRoutes += int64(len(rs)) + } + e.writeRateMinute.update(numRoutes) + e.writeRateDay.update(numRoutes) + e.storePub.Publish(appctype.RouteInfo{ // Clone here, as the subscriber will handle these outside our lock. Control: slices.Clone(e.controlRoutes), @@ -232,24 +235,6 @@ func (e *AppConnector) storeRoutesLocked() error { Wildcards: slices.Clone(e.wildcards), }) } - if !e.ShouldStoreRoutes() { - return nil - } - - // log write rate and write size - numRoutes := int64(len(e.controlRoutes)) - for _, rs := range e.domains { - numRoutes += int64(len(rs)) - } - e.writeRateMinute.update(numRoutes) - e.writeRateDay.update(numRoutes) - - // TODO(creachdair): Remove this once it's delivered over the event bus. - return e.storeRoutesFunc(&appctype.RouteInfo{ - Control: e.controlRoutes, - Domains: e.domains, - Wildcards: e.wildcards, - }) } // ClearRoutes removes all route state from the AppConnector. @@ -259,7 +244,8 @@ func (e *AppConnector) ClearRoutes() error { e.controlRoutes = nil e.domains = nil e.wildcards = nil - return e.storeRoutesLocked() + e.storeRoutesLocked() + return nil } // UpdateDomainsAndRoutes starts an asynchronous update of the configuration @@ -331,9 +317,9 @@ func (e *AppConnector) updateDomains(domains []string) { } } - // Everything left in oldDomains is a domain we're no longer tracking - // and if we are storing route info we can unadvertise the routes - if e.ShouldStoreRoutes() { + // Everything left in oldDomains is a domain we're no longer tracking and we + // can unadvertise the routes. + if e.hasStoredRoutes { toRemove := []netip.Prefix{} for _, addrs := range oldDomains { for _, a := range addrs { @@ -342,11 +328,13 @@ func (e *AppConnector) updateDomains(domains []string) { } if len(toRemove) != 0 { - e.queue.Add(func() { - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) - } - }) + if ra := e.routeAdvertiser; ra != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) + } e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove}) } } @@ -369,11 +357,10 @@ func (e *AppConnector) updateRoutes(routes []netip.Prefix) { var toRemove []netip.Prefix - // If we're storing routes and know e.controlRoutes is a good - // representation of what should be in AdvertisedRoutes we can stop - // advertising routes that used to be in e.controlRoutes but are not - // in routes. - if e.ShouldStoreRoutes() { + // If we know e.controlRoutes is a good representation of what should be in + // AdvertisedRoutes we can stop advertising routes that used to be in + // e.controlRoutes but are not in routes. + if e.hasStoredRoutes { toRemove = routesWithout(e.controlRoutes, routes) } @@ -390,23 +377,23 @@ nextRoute: } } - e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes: %v: %v", routes, err) - } - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes: %v: %v", toRemove, err) - } - }) + if e.routeAdvertiser != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes: %v: %v", routes, err) + } + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes: %v: %v", toRemove, err) + } + }) + } e.updatePub.Publish(appctype.RouteUpdate{ Advertise: routes, Unadvertise: toRemove, }) e.controlRoutes = routes - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() } // Domains returns the currently configured domain list. @@ -485,9 +472,11 @@ func (e *AppConnector) isAddrKnownLocked(domain string, addr netip.Addr) bool { // associated with the given domain. func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) { e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) - return + if e.routeAdvertiser != nil { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) + return + } } e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes}) e.mu.Lock() @@ -503,9 +492,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref e.logf("[v2] advertised route for %v: %v", domain, addr) } } - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() }) } diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 91f0185d0..5c362d6fd 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -26,24 +26,15 @@ import ( "tailscale.com/util/slicesx" ) -func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } - func TestUpdateDomains(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.UpdateDomains([]string{"example.com"}) @@ -76,18 +67,12 @@ func TestUpdateRoutes(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) @@ -149,18 +134,12 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) @@ -190,18 +169,12 @@ func TestDomainRoutes(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -232,18 +205,12 @@ func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) // a has no domains configured, so it should not advertise any routes @@ -346,18 +313,12 @@ func TestWildcardDomains(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) @@ -522,18 +483,12 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) // nothing has yet been advertised @@ -584,18 +539,12 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -665,18 +614,12 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -842,8 +785,7 @@ func TestUpdateRoutesDeadlock(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, + HasStoredRoutes: true, }) t.Cleanup(a.Close) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c560fdae1..bf6fab8ce 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -592,6 +592,8 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus healthChange = healthChangeSub.Events() } changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + routeUpdateSub := eventbus.Subscribe[appctype.RouteUpdate](ec) + storeRoutesSub := eventbus.Subscribe[appctype.RouteInfo](ec) var portlist <-chan PortlistServices if buildfeatures.HasPortList { @@ -612,10 +614,31 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus b.onHealthChange(change) case changeDelta := <-changeDeltaSub.Events(): b.linkChange(&changeDelta) + case pl := <-portlist: if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans b.setPortlistServices(pl) } + case ru := <-routeUpdateSub.Events(): + // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under + // one profile to arrive and be applied after a switch to another profile. + // We need to find a way to ensure that changes to the backend state are applied + // consistently in the presnce of profile changes, which currently may not happen in + // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) + } + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) + } + case ri := <-storeRoutesSub.Events(): + // Whether or not routes should be stored can change over time. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if shouldStoreRoutes { + if err := b.storeRouteInfo(ri); err != nil { + b.logf("appc: failed to store route info: %v", err) + } + } } } } @@ -4836,35 +4859,27 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } }() + // App connectors have been disabled. if !prefs.AppConnector().Advertise { b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = nil return } - shouldAppCStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() - if b.appConnector == nil || b.appConnector.ShouldStoreRoutes() != shouldAppCStoreRoutes { - var ri *appctype.RouteInfo - var storeFunc func(*appctype.RouteInfo) error - if shouldAppCStoreRoutes { - var err error - ri, err = b.readRouteInfoLocked() - if err != nil { - ri = &appctype.RouteInfo{} - if err != ipn.ErrStateNotExist { - b.logf("Unsuccessful Read RouteInfo: ", err) - } - } - storeFunc = b.storeRouteInfo + // We don't (yet) have an app connector configured, or the configured + // connector has a different route persistence setting. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if b.appConnector == nil || (shouldStoreRoutes != b.appConnector.ShouldStoreRoutes()) { + ri, err := b.readRouteInfoLocked() + if err != nil && err != ipn.ErrStateNotExist { + b.logf("Unsuccessful Read RouteInfo: %v", err) } - b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = appc.NewAppConnector(appc.Config{ Logf: b.logf, EventBus: b.sys.Bus.Get(), - RouteAdvertiser: b, RouteInfo: ri, - StoreRoutesFunc: storeFunc, + HasStoredRoutes: shouldStoreRoutes, }) } if nm == nil { @@ -7008,9 +7023,9 @@ func (b *LocalBackend) ObserveDNSResponse(res []byte) error { // ErrDisallowedAutoRoute is returned by AdvertiseRoute when a route that is not allowed is requested. var ErrDisallowedAutoRoute = errors.New("route is not allowed") -// AdvertiseRoute implements the appc.RouteAdvertiser interface. It sets a new -// route advertisement if one is not already present in the existing routes. -// If the route is disallowed, ErrDisallowedAutoRoute is returned. +// AdvertiseRoute implements the appctype.RouteAdvertiser interface. It sets a +// new route advertisement if one is not already present in the existing +// routes. If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() var newRoutes []netip.Prefix @@ -7066,8 +7081,8 @@ func coveredRouteRangeNoDefault(finalRoutes []netip.Prefix, ipp netip.Prefix) bo return false } -// UnadvertiseRoute implements the appc.RouteAdvertiser interface. It removes -// a route advertisement if one is present in the existing routes. +// UnadvertiseRoute implements the appctype.RouteAdvertiser interface. It +// removes a route advertisement if one is present in the existing routes. func (b *LocalBackend) UnadvertiseRoute(toRemove ...netip.Prefix) error { currentRoutes := b.Prefs().AdvertiseRoutes().AsSlice() finalRoutes := currentRoutes[:0] @@ -7095,7 +7110,7 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" -func (b *LocalBackend) storeRouteInfo(ri *appctype.RouteInfo) error { +func (b *LocalBackend) storeRouteInfo(ri appctype.RouteInfo) error { if !buildfeatures.HasAppConnectors { return feature.ErrUnavailable } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bc8bd2a67..168f76268 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -75,8 +75,6 @@ import ( "tailscale.com/wgengine/wgcfg" ) -func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } - func inRemove(ip netip.Addr) bool { for _, pfx := range removeFromDefaultRoute { if pfx.Contains(ip) { @@ -2321,14 +2319,9 @@ func TestOfferingAppConnector(t *testing.T) { if b.OfferingAppConnector() { t.Fatal("unexpected offering app connector") } - rc := &appctest.RouteCollector{} - if shouldStore { - b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, EventBus: bus, HasStoredRoutes: shouldStore, + }) if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") } @@ -2379,6 +2372,7 @@ func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) bus := b.sys.Bus.Get() + w := eventbustest.NewWatcher(t, bus) // ensure no error when no app connector is configured if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -2386,28 +2380,30 @@ func TestObserveDNSResponse(t *testing.T) { } rc := &appctest.RouteCollector{} - if shouldStore { - b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } - b.appConnector.UpdateDomains([]string{"example.com"}) - b.appConnector.Wait(context.Background()) + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + a.UpdateDomains([]string{"example.com"}) + a.Wait(t.Context()) + b.appConnector = a if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - b.appConnector.Wait(context.Background()) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Fatalf("got routes %v, want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -2558,7 +2554,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appctype.RouteInfo{ + if err := b.storeRouteInfo(appctype.RouteInfo{ Domains: map[string][]netip.Addr{ "example.com": {ip}, }, @@ -5511,10 +5507,10 @@ func TestReadWriteRouteInfo(t *testing.T) { b.pm.currentProfile = prof1.View() // set up routeInfo - ri1 := &appctype.RouteInfo{} + ri1 := appctype.RouteInfo{} ri1.Wildcards = []string{"1"} - ri2 := &appctype.RouteInfo{} + ri2 := appctype.RouteInfo{} ri2.Wildcards = []string{"2"} // read before write @@ -7066,3 +7062,41 @@ func toStrings[T ~string](in []T) []string { } return out } + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +func mustPrefix(ss ...string) (out []netip.Prefix) { + for _, s := range ss { + out = append(out, netip.MustParsePrefix(s)) + } + return +} + +// eqUpdate generates an eventbus test filter that matches an appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +// +// TODO(creachadair): This is copied from the appc test package, but we can't +// put it into the appctest package because the appc tests depend on it and +// that makes a cycle. Clean up those tests and put this somewhere common. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want)); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } +} diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index a16d55b8c..7c2e677a4 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -256,22 +256,12 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: &appctest.RouteCollector{}, - }) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -329,11 +319,11 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { for _, shouldStore := range []bool{false, true} { - ctx := context.Background() var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) rc := &appctest.RouteCollector{} ht := health.NewTracker(sys.Bus.Get()) @@ -341,18 +331,13 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -362,7 +347,7 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.AResource( @@ -392,12 +377,18 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -408,24 +399,20 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -482,6 +469,12 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } From 192f8d28042d69634ab17e2a7f9bab0fc5c13688 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 6 Oct 2025 15:43:42 -0700 Subject: [PATCH 1503/1708] wgengine/magicsock: add more handleNewServerEndpointRunLoop tests (#17469) Updates tailscale/corp#32978 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager_test.go | 195 ++++++++++++++++++++---- 1 file changed, 166 insertions(+), 29 deletions(-) diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 6ae21b8fb..d40081839 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -80,40 +80,177 @@ func TestRelayManagerGetServers(t *testing.T) { } } -// Test for http://go/corp/32978 func TestRelayManager_handleNewServerEndpointRunLoop(t *testing.T) { - rm := relayManager{} - rm.init() - <-rm.runLoopStoppedCh // prevent runLoop() from starting, we will inject/handle events in the test - ep := &endpoint{} + wantHandshakeWorkCount := func(t *testing.T, rm *relayManager, n int) { + t.Helper() + byServerDiscoByEndpoint := 0 + for _, v := range rm.handshakeWorkByServerDiscoByEndpoint { + byServerDiscoByEndpoint += len(v) + } + byServerDiscoVNI := len(rm.handshakeWorkByServerDiscoVNI) + if byServerDiscoByEndpoint != n || + byServerDiscoVNI != n || + byServerDiscoByEndpoint != byServerDiscoVNI { + t.Fatalf("want handshake work count %d byServerDiscoByEndpoint=%d byServerDiscoVNI=%d", + n, + byServerDiscoByEndpoint, + byServerDiscoVNI, + ) + } + } + conn := newConn(t.Logf) - ep.c = conn - serverDisco := key.NewDisco().Public() - rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ - wlb: endpointWithLastBest{ - ep: ep, + epA := &endpoint{c: conn} + epB := &endpoint{c: conn} + serverDiscoA := key.NewDisco().Public() + serverDiscoB := key.NewDisco().Public() + + serverAendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport1VNI1LastBestMatching := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA, lastBestIsTrusted: true, lastBest: addrQuality{relayServerDisco: serverDiscoA}}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport2VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 1}, + } + serverAendpointALamport2VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 2}, + } + serverAendpointBLamport1VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epB}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 2}, + } + serverBendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoB, LamportID: 1, VNI: 1}, + } + + tests := []struct { + name string + events []newRelayServerEndpointEvent + want []newRelayServerEndpointEvent + }{ + { + // Test for http://go/corp/32978 + name: "eq server+ep neq VNI higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+ep neq VNI lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointBLamport1VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointBLamport1VNI2, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+endpoint+vni higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, }, - se: udprelay.ServerEndpoint{ - ServerDisco: serverDisco, - LamportID: 1, - VNI: 1, + { + name: "eq server+endpoint+vni lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, }, - }) - rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ - wlb: endpointWithLastBest{ - ep: ep, + { + name: "eq endpoint+vni+lamport neq server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, }, - se: udprelay.ServerEndpoint{ - ServerDisco: serverDisco, - LamportID: 2, - VNI: 2, + { + name: "trusted last best with matching server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1LastBestMatching, + }, + want: []newRelayServerEndpointEvent{}, }, - }) - rm.stopWorkRunLoop(ep) - if len(rm.handshakeWorkByServerDiscoByEndpoint) != 0 || - len(rm.handshakeWorkByServerDiscoVNI) != 0 || - len(rm.handshakeWorkAwaitingPong) != 0 || - len(rm.addrPortVNIToHandshakeWork) != 0 { - t.Fatal("stranded relayHandshakeWork state") + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rm := &relayManager{} + rm.init() + <-rm.runLoopStoppedCh // prevent runLoop() from starting + + // feed events + for _, event := range tt.events { + rm.handleNewServerEndpointRunLoop(event) + } + + // validate state + wantHandshakeWorkCount(t, rm, len(tt.want)) + for _, want := range tt.want { + byServerDisco, ok := rm.handshakeWorkByServerDiscoByEndpoint[want.wlb.ep] + if !ok { + t.Fatal("work not found by endpoint") + } + workByServerDiscoByEndpoint, ok := byServerDisco[want.se.ServerDisco] + if !ok { + t.Fatal("work not found by server disco by endpoint") + } + workByServerDiscoVNI, ok := rm.handshakeWorkByServerDiscoVNI[serverDiscoVNI{want.se.ServerDisco, want.se.VNI}] + if !ok { + t.Fatal("work not found by server disco + VNI") + } + if workByServerDiscoByEndpoint != workByServerDiscoVNI { + t.Fatal("workByServerDiscoByEndpoint != workByServerDiscoVNI") + } + } + + // cleanup + for _, event := range tt.events { + rm.stopWorkRunLoop(event.wlb.ep) + } + wantHandshakeWorkCount(t, rm, 0) + }) } } From 059f53e67a3fbee151da70638a517ed4d511a749 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 13:10:58 -0700 Subject: [PATCH 1504/1708] feature/condlite/expvar: add expvar stub package when metrics not needed Saves ~53 KB from the min build. Updates #12614 Change-Id: I73f9544a9feea06027c6ebdd222d712ada851299 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware-min.txt | 8 ++++---- cmd/tailscaled/depaware-minbox.txt | 8 ++++---- cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/deps_test.go | 1 + cmd/tsidp/depaware.txt | 1 + feature/condlite/expvar/expvar.go | 12 ++++++++++++ feature/condlite/expvar/omit.go | 11 +++++++++++ tsnet/depaware.txt | 1 + wgengine/magicsock/magicsock.go | 3 +-- 10 files changed, 37 insertions(+), 10 deletions(-) create mode 100644 feature/condlite/expvar/expvar.go create mode 100644 feature/condlite/expvar/omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2c4cd9e85..e06782674 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -701,6 +701,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index ed7ddee2a..2cf0f1561 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -54,6 +54,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal tailscale.com/feature from tailscale.com/cmd/tailscaled+ tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister @@ -315,10 +316,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ encoding/hex from crypto/x509+ - encoding/json from expvar+ + encoding/json from github.com/gaissmai/bart+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ @@ -369,7 +369,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de io from bufio+ io/fs from crypto/x509+ iter from bytes+ - log from expvar+ + log from github.com/klauspost/compress/zstd+ log/internal from log maps from crypto/x509+ math from compress/flate+ @@ -381,7 +381,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ - net/http from expvar+ + net/http from tailscale.com/cmd/tailscaled+ net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 93a884c1e..483a32c71 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -74,6 +74,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal tailscale.com/feature from tailscale.com/cmd/tailscaled+ tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ @@ -345,10 +346,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ encoding/hex from crypto/x509+ - encoding/json from expvar+ + encoding/json from github.com/gaissmai/bart+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ @@ -404,7 +404,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de io/fs from crypto/x509+ io/ioutil from github.com/skip2/go-qrcode iter from bytes+ - log from expvar+ + log from github.com/klauspost/compress/zstd+ log/internal from log maps from crypto/x509+ math from compress/flate+ @@ -416,7 +416,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ - net/http from expvar+ + net/http from net/http/httputil+ net/http/httptrace from net/http+ net/http/httputil from tailscale.com/cmd/tailscale/cli net/http/internal from net/http+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7ef5c2ede..d58cebec2 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -278,6 +278,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/c2n from tailscale.com/feature/condregister tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/clientupdate from tailscale.com/feature/condregister + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a66706db2..3c3115f42 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -282,6 +282,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { }, BadDeps: map[string]string{ "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", }, }.Check(t) } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index fb7c59ebc..ba7bc46cd 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -143,6 +143,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet diff --git a/feature/condlite/expvar/expvar.go b/feature/condlite/expvar/expvar.go new file mode 100644 index 000000000..edc16ac77 --- /dev/null +++ b/feature/condlite/expvar/expvar.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics) + +// Package expvar contains type aliases for expvar types, to allow conditionally +// excluding the package from builds. +package expvar + +import "expvar" + +type Int = expvar.Int diff --git a/feature/condlite/expvar/omit.go b/feature/condlite/expvar/omit.go new file mode 100644 index 000000000..a21d94deb --- /dev/null +++ b/feature/condlite/expvar/omit.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics + +// excluding the package from builds. +package expvar + +type Int int64 + +func (*Int) Add(int64) {} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4c3d8018f..e6e986f92 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -139,6 +139,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 81ca49d3d..112085053 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -11,7 +11,6 @@ import ( "context" "encoding/binary" "errors" - "expvar" "fmt" "io" "net" @@ -29,11 +28,11 @@ import ( "github.com/tailscale/wireguard-go/device" "go4.org/mem" "golang.org/x/net/ipv6" - "tailscale.com/control/controlknobs" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/feature/buildfeatures" + "tailscale.com/feature/condlite/expvar" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" From 0415a56b6c91435eeeef83cc2d6bea91990ac861 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 6 Oct 2025 20:59:47 -0700 Subject: [PATCH 1505/1708] ipn/ipnlocal: fix another racy test (#17472) Some of the test cases access fields of the backend that are supposed to be locked while the test is running, which can trigger the race detector. I fixed a few of these in #17411, but I missed these two cases. Updates #15160 Updates #17192 Change-Id: I45664d5e34320ecdccd2844e0f8b228145aaf603 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/peerapi_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 7c2e677a4..3c9f57f1f 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -422,7 +422,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"www.example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( @@ -463,7 +463,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { From 10cb59fa879b1e21daf30f8809efe774a27418fa Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 16:55:18 -0700 Subject: [PATCH 1506/1708] build_dist.sh: keep --extra-small making a usable build, add --min Historically, and until recently, --extra-small produced a usable build. When I recently made osrouter be modular in 39e35379d41fc788 (which is useful in, say, tsnet builds) after also making netstack modular, that meant --min now lacked both netstack support for routing and system support for routing, making no way to get packets into wireguard. That's not a nice default to users. (we've documented build_dist.sh in our KB) Restore --extra-small to making a usable build, and add --min for benchmarking purposes. Updates #12614 Change-Id: I649e41e324a36a0ca94953229c9914046b5dc497 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/build_dist.sh b/build_dist.sh index 564e30221..c05644711 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,6 +41,14 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" + tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min --add=osrouter)" + ;; + --min) + # --min is like --extra-small but even smaller, removing all features, + # even if it results in a useless binary (e.g. removing both netstack + + # osrouter). It exists for benchmarking purposes only. + shift + ldflags="$ldflags -w -s" tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min)" ;; --box) From 28b1b4c3c19225dcda6e44fda964c96a9fe1f9b2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 16:48:41 -0700 Subject: [PATCH 1507/1708] cmd/tailscaled: guard some flag work with buildfeatures checks Updates #12614 Change-Id: Iec6f15d33a6500e7b0b7e8f5c098f7c00334460f Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 64 ++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 62df4067d..a46457fac 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -79,13 +79,11 @@ func defaultTunName() string { case "aix", "solaris", "illumos": return "userspace-networking" case "linux": - switch distro.Get() { - case distro.Synology: + if buildfeatures.HasSynology && buildfeatures.HasNetstack && distro.Get() == distro.Synology { // Try TUN, but fall back to userspace networking if needed. // See https://github.com/tailscale/tailscale-synology/issues/35 return "tailscale0,userspace-networking" } - } return "tailscale0" } @@ -195,10 +193,14 @@ func main() { flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) - flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) + if buildfeatures.HasTPM { + flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) + } flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") - flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + if buildfeatures.HasBird { + flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + } flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") @@ -252,7 +254,7 @@ func main() { log.Fatalf("--socket is required") } - if args.birdSocketPath != "" && createBIRDClient == nil { + if buildfeatures.HasBird && args.birdSocketPath != "" && createBIRDClient == nil { log.SetFlags(0) log.Fatalf("--bird-socket is not supported on %s", runtime.GOOS) } @@ -273,28 +275,30 @@ func main() { } } - if !args.encryptState.set { - args.encryptState.v = defaultEncryptState() - } - if args.encryptState.v { - if runtime.GOOS != "linux" && runtime.GOOS != "windows" { - log.SetFlags(0) - log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) + if buildfeatures.HasTPM { + if !args.encryptState.set { + args.encryptState.v = defaultEncryptState() } - // Check if we have TPM support in this build. - if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported in this build of tailscaled") - } - // Check if we have TPM access. - if !hostinfo.New().TPM.Present() { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") - } - // Check for conflicting prefix in --state, like arn: or kube:. - if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { - log.SetFlags(0) - log.Fatal("--encrypt-state can only be used with --state set to a local file path") + if args.encryptState.v { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + log.SetFlags(0) + log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM support in this build. + if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported in this build of tailscaled") + } + // Check if we have TPM access. + if !hostinfo.New().TPM.Present() { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + log.SetFlags(0) + log.Fatal("--encrypt-state can only be used with --state set to a local file path") + } } } @@ -308,8 +312,10 @@ func main() { err := run() - // Remove file sharing from Windows shell (noop in non-windows) - osshare.SetFileSharingEnabled(false, logger.Discard) + if buildfeatures.HasTaildrop { + // Remove file sharing from Windows shell (noop in non-windows) + osshare.SetFileSharingEnabled(false, logger.Discard) + } if err != nil { log.Fatal(err) From 316afe7d02babc24001b23ccfefd28eaa26adb7c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 17:40:09 -0700 Subject: [PATCH 1508/1708] util/checkchange: stop using deephash everywhere Saves 45 KB from the min build, no longer pulling in deephash or util/hashx, both with unsafe code. It can actually be more efficient to not use deephash, as you don't have to walk all bytes of all fields recursively to answer that two things are not equal. Instead, you can just return false at the first difference you see. And then with views (as we use ~everywhere nowadays), the cloning the old value isn't expensive, as it's just a pointer under the hood. Updates #12614 Change-Id: I7b08616b8a09b3ade454bb5e0ac5672086fe8aec Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware-min.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 3 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tailscaled/deps_test.go | 4 ++ cmd/tsidp/depaware.txt | 3 +- ipn/ipnlocal/local.go | 74 +++++++++++++++++------- net/dns/config.go | 21 +++++++ tailcfg/tailcfg.go | 2 +- tailcfg/tailcfg_clone.go | 37 +++++++++++- tailcfg/tailcfg_view.go | 93 +++++++++++++++++++++++++++++- tsnet/depaware.txt | 3 +- util/checkchange/checkchange.go | 25 ++++++++ wgengine/router/router.go | 13 +++++ wgengine/userspace.go | 56 ++++++++++++------ wgengine/wgcfg/config.go | 33 +++++++++++ wgengine/wgcfg/config_test.go | 41 +++++++++++++ 17 files changed, 367 insertions(+), 50 deletions(-) create mode 100644 util/checkchange/checkchange.go create mode 100644 wgengine/wgcfg/config_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e06782674..d1a63a188 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -825,12 +825,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 2cf0f1561..1ef3568d1 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -144,17 +144,16 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/tkatype from tailscale.com/control/controlclient+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/control/controlclient+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth - 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 483a32c71..a7f5d2e0e 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -170,18 +170,17 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/tkatype from tailscale.com/control/controlclient+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth - 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d58cebec2..541e9f3fc 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -412,12 +412,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 3c3115f42..0711bafba 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -244,6 +244,8 @@ func TestMinTailscaledNoCLI(t *testing.T) { "internal/socks", "github.com/tailscale/peercred", "tailscale.com/types/netlogtype", + "deephash", + "util/hashx", } deptest.DepChecker{ GOOS: "linux", @@ -268,6 +270,8 @@ func TestMinTailscaledWithCLI(t *testing.T) { "tailscale.com/metrics", "tailscale.com/tsweb/varz", "dirwalk", + "deephash", + "util/hashx", } deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index ba7bc46cd..eb2086947 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -252,12 +252,13 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bf6fab8ce..c8b49de75 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -83,8 +83,8 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" @@ -262,13 +262,13 @@ type LocalBackend struct { // of [LocalBackend]'s own state that is not tied to the node context. currentNodeAtomic atomic.Pointer[nodeBackend] - conf *conffile.Config // latest parsed config, or nil if not in declarative mode - pm *profileManager // mu guards access - filterHash deephash.Sum // TODO(nickkhyl): move to nodeBackend - httpTestClient *http.Client // for controlclient. nil by default, used by tests. - ccGen clientGen // function for producing controlclient; lazily populated - sshServer SSHServer // or nil, initialized lazily. - appConnector *appc.AppConnector // or nil, initialized when configured. + conf *conffile.Config // latest parsed config, or nil if not in declarative mode + pm *profileManager // mu guards access + lastFilterInputs *filterInputs + httpTestClient *http.Client // for controlclient. nil by default, used by tests. + ccGen clientGen // function for producing controlclient; lazily populated + sshServer SSHServer // or nil, initialized lazily. + appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. notifyCancel context.CancelFunc cc controlclient.Client // TODO(nickkhyl): move to nodeBackend @@ -2626,6 +2626,36 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("The coordination server sent an invalid packet filter permitting traffic to unlocked nodes; rejecting all packets for safety"), }) +// filterInputs holds the inputs to the packet filter. +// +// Any field changes or additions here should be accompanied by a change to +// [filterInputs.Equal] and [filterInputs.Clone] if necessary. (e.g. non-view +// and non-value fields) +type filterInputs struct { + HaveNetmap bool + Addrs views.Slice[netip.Prefix] + FilterMatch views.Slice[filter.Match] + LocalNets views.Slice[netipx.IPRange] + LogNets views.Slice[netipx.IPRange] + ShieldsUp bool + SSHPolicy tailcfg.SSHPolicyView +} + +func (fi *filterInputs) Equal(o *filterInputs) bool { + if fi == nil || o == nil { + return fi == o + } + return reflect.DeepEqual(fi, o) +} + +func (fi *filterInputs) Clone() *filterInputs { + if fi == nil { + return nil + } + v := *fi // all fields are shallow copyable + return &v +} + // updateFilterLocked updates the packet filter in wgengine based on the // given netMap and user preferences. // @@ -2722,20 +2752,20 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } localNets, _ := localNetsB.IPSet() logNets, _ := logNetsB.IPSet() - var sshPol tailcfg.SSHPolicy - if haveNetmap && netMap.SSHPolicy != nil { - sshPol = *netMap.SSHPolicy - } - - changed := deephash.Update(&b.filterHash, &struct { - HaveNetmap bool - Addrs views.Slice[netip.Prefix] - FilterMatch []filter.Match - LocalNets []netipx.IPRange - LogNets []netipx.IPRange - ShieldsUp bool - SSHPolicy tailcfg.SSHPolicy - }{haveNetmap, addrs, packetFilter, localNets.Ranges(), logNets.Ranges(), shieldsUp, sshPol}) + var sshPol tailcfg.SSHPolicyView + if buildfeatures.HasSSH && haveNetmap && netMap.SSHPolicy != nil { + sshPol = netMap.SSHPolicy.View() + } + + changed := checkchange.Update(&b.lastFilterInputs, &filterInputs{ + HaveNetmap: haveNetmap, + Addrs: addrs, + FilterMatch: views.SliceOf(packetFilter), + LocalNets: views.SliceOf(localNets.Ranges()), + LogNets: views.SliceOf(logNets.Ranges()), + ShieldsUp: shieldsUp, + SSHPolicy: sshPol, + }) if !changed { return } diff --git a/net/dns/config.go b/net/dns/config.go index b2c7c4285..22caf6ef5 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -8,6 +8,7 @@ import ( "bufio" "fmt" "net/netip" + "reflect" "slices" "sort" @@ -188,3 +189,23 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { } return true } + +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + return &Config{ + DefaultResolvers: slices.Clone(c.DefaultResolvers), + Routes: make(map[dnsname.FQDN][]*dnstype.Resolver, len(c.Routes)), + SearchDomains: slices.Clone(c.SearchDomains), + Hosts: make(map[dnsname.FQDN][]netip.Addr, len(c.Hosts)), + OnlyIPv6: c.OnlyIPv6, + } +} + +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return reflect.DeepEqual(c, o) +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 7484c7466..3edc9aef0 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -5,7 +5,7 @@ // the node and the coordination server. package tailcfg -//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService --clonefunc +//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy --clonefunc import ( "bytes" diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 95f8905b8..9aa767388 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -651,9 +651,35 @@ var _VIPServiceCloneNeedsRegeneration = VIPService(struct { Active bool }{}) +// Clone makes a deep copy of SSHPolicy. +// The result aliases no memory with the original. +func (src *SSHPolicy) Clone() *SSHPolicy { + if src == nil { + return nil + } + dst := new(SSHPolicy) + *dst = *src + if src.Rules != nil { + dst.Rules = make([]*SSHRule, len(src.Rules)) + for i := range dst.Rules { + if src.Rules[i] == nil { + dst.Rules[i] = nil + } else { + dst.Rules[i] = src.Rules[i].Clone() + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyCloneNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService. +// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy. func Clone(dst, src any) bool { switch src := src.(type) { case *User: @@ -836,6 +862,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *SSHPolicy: + switch dst := dst.(type) { + case *SSHPolicy: + *dst = *src.Clone() + return true + case **SSHPolicy: + *dst = src.Clone() + return true + } } return false } diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index e44d0bbef..88dd90096 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -21,7 +21,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy // View returns a read-only view of User. func (p *User) View() UserView { @@ -2604,3 +2604,94 @@ var _VIPServiceViewNeedsRegeneration = VIPService(struct { Ports []ProtoPortRange Active bool }{}) + +// View returns a read-only view of SSHPolicy. +func (p *SSHPolicy) View() SSHPolicyView { + return SSHPolicyView{ж: p} +} + +// SSHPolicyView provides a read-only view over SSHPolicy. +// +// Its methods should only be called if `Valid()` returns true. +type SSHPolicyView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *SSHPolicy +} + +// Valid reports whether v's underlying value is non-nil. +func (v SSHPolicyView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v SSHPolicyView) AsStruct() *SSHPolicy { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPolicyView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPolicyView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *SSHPolicyView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x SSHPolicy + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPolicyView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHPolicy + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// Rules are the rules to process for an incoming SSH connection. The first +// matching rule takes its action and stops processing further rules. +// +// When an incoming connection first starts, all rules are evaluated in +// "none" auth mode, where the client hasn't even been asked to send a +// public key. All SSHRule.Principals requiring a public key won't match. If +// a rule matches on the first pass and its Action is reject, the +// authentication fails with that action's rejection message, if any. +// +// If the first pass rule evaluation matches nothing without matching an +// Action with Reject set, the rules are considered to see whether public +// keys might still result in a match. If not, "none" auth is terminated +// before proceeding to public key mode. If so, the client is asked to try +// public key authentication and the rules are evaluated again for each of +// the client's present keys. +func (v SSHPolicyView) Rules() views.SliceView[*SSHRule, SSHRuleView] { + return views.SliceOfViews[*SSHRule, SSHRuleView](v.ж.Rules) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyViewNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index e6e986f92..9dd8f0d65 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -247,12 +247,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go new file mode 100644 index 000000000..4d18730f1 --- /dev/null +++ b/util/checkchange/checkchange.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package checkchange defines a utility for determining whether a value +// has changed since the last time it was checked. +package checkchange + +// EqualCloner is an interface for types that can be compared for equality +// and can be cloned. +type EqualCloner[T any] interface { + Equal(T) bool + Clone() T +} + +// Update sets *old to a clone of new if they are not equal, returning whether +// they were different. +// +// It only modifies *old if they are different. old must be non-nil. +func Update[T EqualCloner[T]](old *T, new T) (changed bool) { + if new.Equal(*old) { + return false + } + *old = new.Clone() + return true +} diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 7723138f4..df65e697d 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -11,6 +11,7 @@ import ( "net/netip" "reflect" "runtime" + "slices" "github.com/tailscale/wireguard-go/tun" "tailscale.com/feature" @@ -146,3 +147,15 @@ func (a *Config) Equal(b *Config) bool { } return reflect.DeepEqual(a, b) } + +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + c2 := *c + c2.LocalAddrs = slices.Clone(c.LocalAddrs) + c2.Routes = slices.Clone(c.Routes) + c2.LocalRoutes = slices.Clone(c.LocalRoutes) + c2.SubnetRoutes = slices.Clone(c.SubnetRoutes) + return &c2 +} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index c88ab78a1..e971f0e39 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -10,8 +10,10 @@ import ( "errors" "fmt" "io" + "maps" "math" "net/netip" + "reflect" "runtime" "slices" "strings" @@ -45,8 +47,8 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" @@ -128,9 +130,9 @@ type userspaceEngine struct { wgLock sync.Mutex // serializes all wgdev operations; see lock order comment below lastCfgFull wgcfg.Config lastNMinPeers int - lastRouterSig deephash.Sum // of router.Config - lastEngineSigFull deephash.Sum // of full wireguard config - lastEngineSigTrim deephash.Sum // of trimmed wireguard config + lastRouter *router.Config + lastEngineFull *wgcfg.Config // of full wireguard config, not trimmed + lastEngineInputs *maybeReconfigInputs lastDNSConfig *dns.Config lastIsSubnetRouter bool // was the node a primary subnet router in the last run. recvActivityAt map[key.NodePublic]mono.Time @@ -725,6 +727,29 @@ func (e *userspaceEngine) isActiveSinceLocked(nk key.NodePublic, ip netip.Addr, return timePtr.LoadAtomic().After(t) } +// maybeReconfigInputs holds the inputs to the maybeReconfigWireguardLocked +// function. If these things don't change between calls, there's nothing to do. +type maybeReconfigInputs struct { + WGConfig *wgcfg.Config + TrimmedNodes map[key.NodePublic]bool + TrackNodes views.Slice[key.NodePublic] + TrackIPs views.Slice[netip.Addr] +} + +func (i *maybeReconfigInputs) Equal(o *maybeReconfigInputs) bool { + return reflect.DeepEqual(i, o) +} + +func (i *maybeReconfigInputs) Clone() *maybeReconfigInputs { + if i == nil { + return nil + } + v := *i + v.WGConfig = i.WGConfig.Clone() + v.TrimmedNodes = maps.Clone(i.TrimmedNodes) + return &v +} + // discoChanged are the set of peers whose disco keys have changed, implying they've restarted. // If a peer is in this set and was previously in the live wireguard config, // it needs to be first removed and then re-added to flush out its wireguard session key. @@ -803,12 +828,12 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node } e.lastNMinPeers = len(min.Peers) - if changed := deephash.Update(&e.lastEngineSigTrim, &struct { - WGConfig *wgcfg.Config - TrimmedNodes map[key.NodePublic]bool - TrackNodes []key.NodePublic - TrackIPs []netip.Addr - }{&min, e.trimmedNodes, trackNodes, trackIPs}); !changed { + if changed := checkchange.Update(&e.lastEngineInputs, &maybeReconfigInputs{ + WGConfig: &min, + TrimmedNodes: e.trimmedNodes, + TrackNodes: views.SliceOf(trackNodes), + TrackIPs: views.SliceOf(trackIPs), + }); !changed { return nil } @@ -937,7 +962,6 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, e.wgLock.Lock() defer e.wgLock.Unlock() e.tundev.SetWGConfig(cfg) - e.lastDNSConfig = dnsCfg peerSet := make(set.Set[key.NodePublic], len(cfg.Peers)) e.mu.Lock() @@ -965,14 +989,12 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, } isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter - engineChanged := deephash.Update(&e.lastEngineSigFull, cfg) - routerChanged := deephash.Update(&e.lastRouterSig, &struct { - RouterConfig *router.Config - DNSConfig *dns.Config - }{routerCfg, dnsCfg}) + engineChanged := checkchange.Update(&e.lastEngineFull, cfg) + dnsChanged := checkchange.Update(&e.lastDNSConfig, dnsCfg) + routerChanged := checkchange.Update(&e.lastRouter, routerCfg) listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() - if !engineChanged && !routerChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { + if !engineChanged && !routerChanged && !dnsChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { return ErrNoChanges } newLogIDs := cfg.NetworkLogging diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 154dc0a30..926964a4b 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -6,6 +6,7 @@ package wgcfg import ( "net/netip" + "slices" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -35,6 +36,20 @@ type Config struct { } } +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return c.Name == o.Name && + c.NodeID == o.NodeID && + c.PrivateKey.Equal(o.PrivateKey) && + c.MTU == o.MTU && + c.NetworkLogging == o.NetworkLogging && + slices.Equal(c.Addresses, o.Addresses) && + slices.Equal(c.DNS, o.DNS) && + slices.EqualFunc(c.Peers, o.Peers, Peer.Equal) +} + type Peer struct { PublicKey key.NodePublic DiscoKey key.DiscoPublic // present only so we can handle restarts within wgengine, not passed to WireGuard @@ -50,6 +65,24 @@ type Peer struct { WGEndpoint key.NodePublic } +func addrPtrEq(a, b *netip.Addr) bool { + if a == nil || b == nil { + return a == b + } + return *a == *b +} + +func (p Peer) Equal(o Peer) bool { + return p.PublicKey == o.PublicKey && + p.DiscoKey == o.DiscoKey && + slices.Equal(p.AllowedIPs, o.AllowedIPs) && + p.IsJailed == o.IsJailed && + p.PersistentKeepalive == o.PersistentKeepalive && + addrPtrEq(p.V4MasqAddr, o.V4MasqAddr) && + addrPtrEq(p.V6MasqAddr, o.V6MasqAddr) && + p.WGEndpoint == o.WGEndpoint +} + // PeerWithKey returns the Peer with key k and reports whether it was found. func (config Config) PeerWithKey(k key.NodePublic) (Peer, bool) { for _, p := range config.Peers { diff --git a/wgengine/wgcfg/config_test.go b/wgengine/wgcfg/config_test.go new file mode 100644 index 000000000..5ac3b7cd5 --- /dev/null +++ b/wgengine/wgcfg/config_test.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package wgcfg + +import ( + "reflect" + "testing" +) + +// Tests that [Config.Equal] tests all fields of [Config], even ones +// that might get added in the future. +func TestConfigEqual(t *testing.T) { + rt := reflect.TypeFor[Config]() + for i := range rt.NumField() { + sf := rt.Field(i) + switch sf.Name { + case "Name", "NodeID", "PrivateKey", "MTU", "Addresses", "DNS", "Peers", + "NetworkLogging": + // These are compared in [Config.Equal]. + default: + t.Errorf("Have you added field %q to Config.Equal? Do so if not, and then update TestConfigEqual", sf.Name) + } + } +} + +// Tests that [Peer.Equal] tests all fields of [Peer], even ones +// that might get added in the future. +func TestPeerEqual(t *testing.T) { + rt := reflect.TypeFor[Peer]() + for i := range rt.NumField() { + sf := rt.Field(i) + switch sf.Name { + case "PublicKey", "DiscoKey", "AllowedIPs", "IsJailed", + "PersistentKeepalive", "V4MasqAddr", "V6MasqAddr", "WGEndpoint": + // These are compared in [Peer.Equal]. + default: + t.Errorf("Have you added field %q to Peer.Equal? Do so if not, and then update TestPeerEqual", sf.Name) + } + } +} From eabc62a9ddc45646bf55f20928832b6c4e4ad2d8 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Tue, 7 Oct 2025 11:52:41 +0100 Subject: [PATCH 1509/1708] ipn/ipnlocal: don't send LoginFinished unless auth was in progress (#17266) Before we introduced seamless, the "blocked" state was used to track: * Whether a login was required for connectivity, and therefore we should keep the engine deconfigured until that happened * Whether authentication was in progress "blocked" would stop authReconfig from running. We want this when a login is required: if your key has expired we want to deconfigure the engine and keep it down, so that you don't keep using exit nodes (which won't work because your key has expired). Taking the engine down while auth was in progress was undesirable, so we don't do that with seamless renewal. However, not entering the "blocked" state meant that we needed to change the logic for when to send LoginFinished on the IPN bus after seeing StateAuthenticated from the controlclient. Initially we changed the "if blocked" check to "if blocked or seamless is enabled" which was correct in other places. In this place however, it introduced a bug: we are sending LoginFinished every time we see StateAuthenticated, which happens even on a down & up, or a profile switch. This in turn made it harder for UI clients to track when authentication is complete. Instead we should only send it out if we were blocked (i.e. seamless is disabled, or our key expired) or an auth was in progress. Updates tailscale/corp#31476 Updates tailscale/corp#32645 Fixes #17363 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 3 ++- ipn/ipnlocal/state_test.go | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c8b49de75..c07cc42a1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1600,6 +1600,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } wasBlocked := b.blocked + authWasInProgress := b.authURL != "" keyExpiryExtended := false if st.NetMap != nil { wasExpired := b.keyExpired @@ -1617,7 +1618,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.blockEngineUpdates(false) } - if st.LoginFinished() && (wasBlocked || b.seamlessRenewalEnabled()) { + if st.LoginFinished() && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine b.blockEngineUpdates(false) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index d773f7227..a4b9ba1f4 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -348,6 +348,14 @@ func (b *LocalBackend) nonInteractiveLoginForStateTest() { // predictable, but maybe a bit less thorough. This is more of an overall // state machine test than a test of the wgengine+magicsock integration. func TestStateMachine(t *testing.T) { + runTestStateMachine(t, false) +} + +func TestStateMachineSeamless(t *testing.T) { + runTestStateMachine(t, true) +} + +func runTestStateMachine(t *testing.T, seamless bool) { envknob.Setenv("TAILSCALE_USE_WIP_CODE", "1") defer envknob.Setenv("TAILSCALE_USE_WIP_CODE", "") c := qt.New(t) @@ -545,6 +553,13 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user1" cc.persist.NodeID = "node1" + + // even if seamless is being enabled by default rather than by policy, this is + // the point where it will first get enabled. + if seamless { + sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + cc.send(nil, "", true, &netmap.NetworkMap{}) { nn := notifies.drain(3) From 63f7a400a8fbe89eaa9b2ba559a4300df842fcc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 7 Oct 2025 09:30:27 -0400 Subject: [PATCH 1510/1708] wgengine/{magicsock,userspace,router}: move portupdates to the eventbus (#17423) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also pull out interface method only needed in Linux. Instead of having userspace do the call into the router, just let the router pick up the change itself. Updates #15160 Signed-off-by: Claus Lensbøl --- wgengine/magicsock/magicsock.go | 19 ++- wgengine/router/callback.go | 7 - wgengine/router/osrouter/router_linux.go | 122 ++++++++++-------- wgengine/router/osrouter/router_openbsd.go | 7 - wgengine/router/osrouter/router_plan9.go | 7 - .../router/osrouter/router_userspace_bsd.go | 7 - wgengine/router/osrouter/router_windows.go | 7 - wgengine/router/router.go | 16 +-- wgengine/router/router_fake.go | 5 - wgengine/userspace.go | 8 -- 10 files changed, 83 insertions(+), 122 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 112085053..c7d07c277 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -67,6 +67,7 @@ import ( "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgint" ) @@ -179,6 +180,7 @@ type Conn struct { // config changes between magicsock and wireguard. syncPub *eventbus.Publisher[syncPoint] allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] + portUpdatePub *eventbus.Publisher[router.PortUpdate] // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -393,10 +395,6 @@ type Conn struct { // wgPinger is the WireGuard only pinger used for latency measurements. wgPinger lazy.SyncValue[*ping.Pinger] - // onPortUpdate is called with the new port when magicsock rebinds to - // a new port. - onPortUpdate func(port uint16, network string) - // getPeerByKey optionally specifies a function to look up a peer's // wireguard state by its public key. If nil, it's not used. getPeerByKey func(key.NodePublic) (_ wgint.Peer, ok bool) @@ -492,10 +490,6 @@ type Options struct { // If nil, they're ignored and not updated. ControlKnobs *controlknobs.Knobs - // OnPortUpdate is called with the new port when magicsock rebinds to - // a new port. - OnPortUpdate func(port uint16, network string) - // PeerByKeyFunc optionally specifies a function to look up a peer's // WireGuard state by its public key. If nil, it's not used. // In regular use, this will be wgengine.(*userspaceEngine).PeerByKey. @@ -735,6 +729,7 @@ func NewConn(opts Options) (*Conn, error) { cli := c.eventBus.Client("magicsock.Conn") c.syncPub = eventbus.Publish[syncPoint](cli) c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](cli) + c.portUpdatePub = eventbus.Publish[router.PortUpdate](cli) c.eventSubs = cli.Monitor(c.consumeEventbusTopics(cli)) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) @@ -759,7 +754,6 @@ func NewConn(opts Options) (*Conn, error) { c.netMon = opts.NetMon c.health = opts.HealthTracker - c.onPortUpdate = opts.OnPortUpdate c.getPeerByKey = opts.PeerByKeyFunc if err := c.rebind(keepCurrentPort); err != nil { @@ -3533,7 +3527,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur c.logf("magicsock: unable to bind %v port %d: %v", network, port, err) continue } - if c.onPortUpdate != nil { + if c.portUpdatePub.ShouldPublish() { _, gotPortStr, err := net.SplitHostPort(pconn.LocalAddr().String()) if err != nil { c.logf("could not parse port from %s: %w", pconn.LocalAddr().String(), err) @@ -3542,7 +3536,10 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur if err != nil { c.logf("could not parse port from %s: %w", gotPort, err) } else { - c.onPortUpdate(uint16(gotPort), network) + c.portUpdatePub.Publish(router.PortUpdate{ + UDPPort: uint16(gotPort), + EndpointNetwork: network, + }) } } } diff --git a/wgengine/router/callback.go b/wgengine/router/callback.go index 1d9091277..c1838539b 100644 --- a/wgengine/router/callback.go +++ b/wgengine/router/callback.go @@ -56,13 +56,6 @@ func (r *CallbackRouter) Set(rcfg *Config) error { return r.SetBoth(r.rcfg, r.dcfg) } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *CallbackRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - // SetDNS implements dns.OSConfigurator. func (r *CallbackRouter) SetDNS(dcfg dns.OSConfig) error { r.mu.Lock() diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index cf1a9f027..835a9050f 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -14,6 +14,7 @@ import ( "os/exec" "strconv" "strings" + "sync" "sync/atomic" "syscall" "time" @@ -54,21 +55,14 @@ const ( ) type linuxRouter struct { - closed atomic.Bool - logf func(fmt string, args ...any) - tunname string - netMon *netmon.Monitor - health *health.Tracker - eventSubs eventbus.Monitor - rulesAddedPub *eventbus.Publisher[AddIPRules] - unregNetMon func() - addrs map[netip.Prefix]bool - routes map[netip.Prefix]bool - localRoutes map[netip.Prefix]bool - snatSubnetRoutes bool - statefulFiltering bool - netfilterMode preftype.NetfilterMode - netfilterKind string + closed atomic.Bool + logf func(fmt string, args ...any) + tunname string + netMon *netmon.Monitor + health *health.Tracker + eventSubs eventbus.Monitor + rulesAddedPub *eventbus.Publisher[AddIPRules] + unregNetMon func() // ruleRestorePending is whether a timer has been started to // restore deleted ip rules. @@ -86,8 +80,16 @@ type linuxRouter struct { cmd commandRunner nfr linuxfw.NetfilterRunner - magicsockPortV4 atomic.Uint32 // actually a uint16 - magicsockPortV6 atomic.Uint32 // actually a uint16 + mu sync.Mutex + addrs map[netip.Prefix]bool + routes map[netip.Prefix]bool + localRoutes map[netip.Prefix]bool + snatSubnetRoutes bool + statefulFiltering bool + netfilterMode preftype.NetfilterMode + netfilterKind string + magicsockPortV4 uint16 + magicsockPortV6 uint16 } func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { @@ -169,6 +171,7 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon // [eventbus.Client] is closed. func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { ruleDeletedSub := eventbus.Subscribe[netmon.RuleDeleted](ec) + portUpdateSub := eventbus.Subscribe[router.PortUpdate](ec) return func(ec *eventbus.Client) { for { select { @@ -176,6 +179,11 @@ func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus. return case rs := <-ruleDeletedSub.Events(): r.onIPRuleDeleted(rs.Table, rs.Priority) + case pu := <-portUpdateSub.Events(): + r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) + if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { + r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) + } } } } @@ -355,7 +363,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { } func (r *linuxRouter) Up() error { - if err := r.setNetfilterMode(netfilterOff); err != nil { + r.mu.Lock() + defer r.mu.Unlock() + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return fmt.Errorf("setting netfilter mode: %w", err) } if err := r.addIPRules(); err != nil { @@ -369,6 +379,8 @@ func (r *linuxRouter) Up() error { } func (r *linuxRouter) Close() error { + r.mu.Lock() + defer r.mu.Unlock() r.closed.Store(true) if r.unregNetMon != nil { r.unregNetMon() @@ -380,7 +392,7 @@ func (r *linuxRouter) Close() error { if err := r.delIPRules(); err != nil { return err } - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return err } if err := r.delRoutes(); err != nil { @@ -394,10 +406,10 @@ func (r *linuxRouter) Close() error { return nil } -// setupNetfilter initializes the NetfilterRunner in r.nfr. It expects r.nfr +// setupNetfilterLocked initializes the NetfilterRunner in r.nfr. It expects r.nfr // to be nil, or the current netfilter to be set to netfilterOff. // kind should be either a linuxfw.FirewallMode, or the empty string for auto. -func (r *linuxRouter) setupNetfilter(kind string) error { +func (r *linuxRouter) setupNetfilterLocked(kind string) error { r.netfilterKind = kind var err error @@ -411,24 +423,26 @@ func (r *linuxRouter) setupNetfilter(kind string) error { // Set implements the Router interface. func (r *linuxRouter) Set(cfg *router.Config) error { + r.mu.Lock() + defer r.mu.Unlock() var errs []error if cfg == nil { cfg = &shutdownConfig } if cfg.NetfilterKind != r.netfilterKind { - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { err = fmt.Errorf("could not disable existing netfilter: %w", err) errs = append(errs, err) } else { r.nfr = nil - if err := r.setupNetfilter(cfg.NetfilterKind); err != nil { + if err := r.setupNetfilterLocked(cfg.NetfilterKind); err != nil { errs = append(errs, err) } } } - if err := r.setNetfilterMode(cfg.NetfilterMode); err != nil { + if err := r.setNetfilterModeLocked(cfg.NetfilterMode); err != nil { errs = append(errs, err) } @@ -470,11 +484,11 @@ func (r *linuxRouter) Set(cfg *router.Config) error { case cfg.StatefulFiltering == r.statefulFiltering: // state already correct, nothing to do. case cfg.StatefulFiltering: - if err := r.addStatefulRule(); err != nil { + if err := r.addStatefulRuleLocked(); err != nil { errs = append(errs, err) } default: - if err := r.delStatefulRule(); err != nil { + if err := r.delStatefulRuleLocked(); err != nil { errs = append(errs, err) } } @@ -538,15 +552,17 @@ func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *router.Confi r.health.SetHealthy(dockerStatefulFilteringWarnable) } -// UpdateMagicsockPort implements the Router interface. -func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { +// updateMagicsockPort implements the Router interface. +func (r *linuxRouter) updateMagicsockPort(port uint16, network string) error { + r.mu.Lock() + defer r.mu.Unlock() if r.nfr == nil { - if err := r.setupNetfilter(r.netfilterKind); err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return fmt.Errorf("could not setup netfilter: %w", err) } } - var magicsockPort *atomic.Uint32 + var magicsockPort *uint16 switch network { case "udp4": magicsockPort = &r.magicsockPortV4 @@ -566,45 +582,41 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { // set the port, we'll make the firewall rule when netfilter turns back on if r.netfilterMode == netfilterOff { - magicsockPort.Store(uint32(port)) + *magicsockPort = port return nil } - cur := magicsockPort.Load() - - if cur == uint32(port) { + if *magicsockPort == port { return nil } - if cur != 0 { - if err := r.nfr.DelMagicsockPortRule(uint16(cur), network); err != nil { + if *magicsockPort != 0 { + if err := r.nfr.DelMagicsockPortRule(*magicsockPort, network); err != nil { return fmt.Errorf("del magicsock port rule: %w", err) } } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(uint16(port), network); err != nil { + if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } - magicsockPort.Store(uint32(port)) + *magicsockPort = port return nil } -// setNetfilterMode switches the router to the given netfilter +// setNetfilterModeLocked switches the router to the given netfilter // mode. Netfilter state is created or deleted appropriately to // reflect the new mode, and r.snatSubnetRoutes is updated to reflect // the current state of subnet SNATing. -func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { +func (r *linuxRouter) setNetfilterModeLocked(mode preftype.NetfilterMode) error { if !platformCanNetfilter() { mode = netfilterOff } if r.nfr == nil { - var err error - r.nfr, err = linuxfw.New(r.logf, r.netfilterKind) - if err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return err } } @@ -660,13 +672,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { - if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { + if r.magicsockPortV4 != 0 { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { + if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } @@ -700,13 +712,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { - if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { + if r.magicsockPortV4 != 0 { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { + if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } @@ -1483,9 +1495,9 @@ func (r *linuxRouter) delSNATRule() error { return nil } -// addStatefulRule adds a netfilter rule to perform stateful filtering from +// addStatefulRuleLocked adds a netfilter rule to perform stateful filtering from // subnets onto the tailnet. -func (r *linuxRouter) addStatefulRule() error { +func (r *linuxRouter) addStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } @@ -1493,9 +1505,9 @@ func (r *linuxRouter) addStatefulRule() error { return r.nfr.AddStatefulRule(r.tunname) } -// delStatefulRule removes the netfilter rule to perform stateful filtering +// delStatefulRuleLocked removes the netfilter rule to perform stateful filtering // from subnets onto the tailnet. -func (r *linuxRouter) delStatefulRule() error { +func (r *linuxRouter) delStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } diff --git a/wgengine/router/osrouter/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go index 8f3599309..55b485f0e 100644 --- a/wgengine/router/osrouter/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -238,13 +238,6 @@ func (r *openbsdRouter) Set(cfg *router.Config) error { return errq } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *openbsdRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *openbsdRouter) Close() error { cleanUp(r.logf, r.tunname) return nil diff --git a/wgengine/router/osrouter/router_plan9.go b/wgengine/router/osrouter/router_plan9.go index 5872aa7fc..a5b461a6f 100644 --- a/wgengine/router/osrouter/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -115,13 +115,6 @@ func (r *plan9Router) Set(cfg *router.Config) error { return nil } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *plan9Router) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *plan9Router) Close() error { // TODO(bradfitz): unbind return nil diff --git a/wgengine/router/osrouter/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go index cdaf3adea..70ef2b6bf 100644 --- a/wgengine/router/osrouter/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -206,13 +206,6 @@ func (r *userspaceBSDRouter) Set(cfg *router.Config) (reterr error) { return reterr } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *userspaceBSDRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *userspaceBSDRouter) Close() error { return nil } diff --git a/wgengine/router/osrouter/router_windows.go b/wgengine/router/osrouter/router_windows.go index 05bf210e8..a1acbe3b6 100644 --- a/wgengine/router/osrouter/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -114,13 +114,6 @@ func hasDefaultRoute(routes []netip.Prefix) bool { return false } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *winRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *winRouter) Close() error { r.firewall.clear() diff --git a/wgengine/router/router.go b/wgengine/router/router.go index df65e697d..04cc89887 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -35,14 +35,6 @@ type Router interface { // implementation should handle gracefully. Set(*Config) error - // UpdateMagicsockPort tells the OS network stack what port magicsock - // is currently listening on, so it can be threaded through firewalls - // and such. This is distinct from Set() since magicsock may rebind - // ports independently from the Config changing. - // - // network should be either "udp4" or "udp6". - UpdateMagicsockPort(port uint16, network string) error - // Close closes the router. Close() error } @@ -56,6 +48,14 @@ type NewOpts struct { Bus *eventbus.Bus // required } +// PortUpdate is an eventbus value, reporting the port and address family +// magicsock is currently listening on, so it can be threaded through firewalls +// and such. +type PortUpdate struct { + UDPPort uint16 + EndpointNetwork string // either "udp4" or "udp6". +} + // HookNewUserspaceRouter is the registration point for router implementations // to register a constructor for userspace routers. It's meant for implementations // in wgengine/router/osrouter. diff --git a/wgengine/router/router_fake.go b/wgengine/router/router_fake.go index 549867eca..db35fc9ee 100644 --- a/wgengine/router/router_fake.go +++ b/wgengine/router/router_fake.go @@ -27,11 +27,6 @@ func (r fakeRouter) Set(cfg *Config) error { return nil } -func (r fakeRouter) UpdateMagicsockPort(_ uint16, _ string) error { - r.logf("[v1] warning: fakeRouter.UpdateMagicsockPort: not implemented.") - return nil -} - func (r fakeRouter) Close() error { r.logf("[v1] warning: fakeRouter.Close: not implemented.") return nil diff --git a/wgengine/userspace.go b/wgengine/userspace.go index e971f0e39..b8a136da7 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -398,13 +398,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) e.RequestStatus() } - onPortUpdate := func(port uint16, network string) { - e.logf("onPortUpdate(port=%v, network=%s)", port, network) - - if err := e.router.UpdateMagicsockPort(port, network); err != nil { - e.logf("UpdateMagicsockPort(port=%v, network=%s) failed: %v", port, network, err) - } - } magicsockOpts := magicsock.Options{ EventBus: e.eventBus, Logf: logf, @@ -416,7 +409,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) HealthTracker: e.health, Metrics: conf.Metrics, ControlKnobs: conf.ControlKnobs, - OnPortUpdate: onPortUpdate, PeerByKeyFunc: e.PeerByKey, } if buildfeatures.HasLazyWG { From 232b928974500c3b5617a47f3a8a7cf911d9e194 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 22:08:38 -0700 Subject: [PATCH 1511/1708] feature/linkspeed: move cosmetic tstun netlink code out to modular feature Part of making all netlink monitoring code optional. Updates #17311 (how I got started down this path) Updates #12614 Change-Id: Ic80d8a7a44dc261c4b8678b3c2241c3b3778370d Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +-- cmd/tailscaled/depaware-min.txt | 3 +-- cmd/tailscaled/depaware-minbox.txt | 3 +-- cmd/tailscaled/depaware.txt | 3 ++- cmd/tailscaled/deps_test.go | 5 +++-- cmd/tsidp/depaware.txt | 3 +-- .../buildfeatures/feature_linkspeed_disabled.go | 13 +++++++++++++ feature/buildfeatures/feature_linkspeed_enabled.go | 13 +++++++++++++ feature/condregister/maybe_linkspeed.go | 8 ++++++++ feature/featuretags/featuretags.go | 4 ++++ feature/linkspeed/doc.go | 6 ++++++ .../linkspeed/linkspeed_linux.go | 9 +++++++-- net/tstun/linkattrs_notlinux.go | 12 ------------ net/tstun/tun.go | 14 +++++++++++--- tsnet/depaware.txt | 3 +-- 15 files changed, 72 insertions(+), 30 deletions(-) create mode 100644 feature/buildfeatures/feature_linkspeed_disabled.go create mode 100644 feature/buildfeatures/feature_linkspeed_enabled.go create mode 100644 feature/condregister/maybe_linkspeed.go create mode 100644 feature/linkspeed/doc.go rename net/tstun/linkattrs_linux.go => feature/linkspeed/linkspeed_linux.go (91%) delete mode 100644 net/tstun/linkattrs_notlinux.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d1a63a188..9851cf9af 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -74,7 +74,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag - L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ @@ -907,7 +906,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 1ef3568d1..30974287c 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -19,7 +19,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink @@ -204,7 +203,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index a7f5d2e0e..32c84d744 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -25,7 +25,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli github.com/mattn/go-isatty from github.com/mattn/go-colorable+ - github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink @@ -232,7 +231,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 541e9f3fc..60bf623e2 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -143,7 +143,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/kortschak/wol from tailscale.com/feature/wakeonlan LD github.com/kr/fs from github.com/pkg/sftp - L github.com/mdlayher/genetlink from tailscale.com/net/tstun + L github.com/mdlayher/genetlink from tailscale.com/feature/linkspeed L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables @@ -285,6 +285,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister + L tailscale.com/feature/linkspeed from tailscale.com/feature/condregister L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 0711bafba..b98c53eb5 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -285,8 +285,9 @@ func TestMinTailscaledWithCLI(t *testing.T) { } }, BadDeps: map[string]string{ - "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", - "expvar": "unexpected expvar dep", + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", + "github.com/mdlayher/genetlink": "unexpected genetlink dep", }, }.Check(t) } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index eb2086947..0ae8761e5 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -38,7 +38,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ @@ -335,7 +334,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy diff --git a/feature/buildfeatures/feature_linkspeed_disabled.go b/feature/buildfeatures/feature_linkspeed_disabled.go new file mode 100644 index 000000000..19e254a74 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = false diff --git a/feature/buildfeatures/feature_linkspeed_enabled.go b/feature/buildfeatures/feature_linkspeed_enabled.go new file mode 100644 index 000000000..939858a16 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = true diff --git a/feature/condregister/maybe_linkspeed.go b/feature/condregister/maybe_linkspeed.go new file mode 100644 index 000000000..46064b39a --- /dev/null +++ b/feature/condregister/maybe_linkspeed.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linkspeed + +package condregister + +import _ "tailscale.com/feature/linkspeed" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index a751f65fb..9c87586db 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -161,6 +161,10 @@ var Features = map[FeatureTag]FeatureMeta{ "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "linkspeed": { + Sym: "LinkSpeed", + Desc: "Set link speed on TUN device for better OS integration (Linux only)", + }, "listenrawdisco": { Sym: "ListenRawDisco", Desc: "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)", diff --git a/feature/linkspeed/doc.go b/feature/linkspeed/doc.go new file mode 100644 index 000000000..2d2fcf092 --- /dev/null +++ b/feature/linkspeed/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package linkspeed registers support for setting the TUN link speed on Linux, +// to better integrate with system monitoring tools. +package linkspeed diff --git a/net/tstun/linkattrs_linux.go b/feature/linkspeed/linkspeed_linux.go similarity index 91% rename from net/tstun/linkattrs_linux.go rename to feature/linkspeed/linkspeed_linux.go index 320385ba6..90e33d4c9 100644 --- a/net/tstun/linkattrs_linux.go +++ b/feature/linkspeed/linkspeed_linux.go @@ -1,17 +1,22 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !android +//go:build linux && !android -package tstun +package linkspeed import ( "github.com/mdlayher/genetlink" "github.com/mdlayher/netlink" "github.com/tailscale/wireguard-go/tun" "golang.org/x/sys/unix" + "tailscale.com/net/tstun" ) +func init() { + tstun.HookSetLinkAttrs.Set(setLinkAttrs) +} + // setLinkSpeed sets the advertised link speed of the TUN interface. func setLinkSpeed(iface tun.Device, mbps int) error { name, err := iface.Name() diff --git a/net/tstun/linkattrs_notlinux.go b/net/tstun/linkattrs_notlinux.go deleted file mode 100644 index 77d227934..000000000 --- a/net/tstun/linkattrs_notlinux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package tstun - -import "github.com/tailscale/wireguard-go/tun" - -func setLinkAttrs(iface tun.Device) error { - return nil -} diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 2891e9af4..19b0a53f5 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -18,12 +18,16 @@ import ( "github.com/tailscale/wireguard-go/tun" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" ) -// CreateTAP is the hook set by feature/tap. +// CreateTAP is the hook maybe set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] +// HookSetLinkAttrs is the hook maybe set by feature/linkspeed. +var HookSetLinkAttrs feature.Hook[func(tun.Device) error] + // modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". var modprobeTunHook feature.Hook[func() error] @@ -78,8 +82,12 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { dev.Close() return nil, "", err } - if err := setLinkAttrs(dev); err != nil { - logf("setting link attributes: %v", err) + if buildfeatures.HasLinkSpeed { + if f, ok := HookSetLinkAttrs.GetOk(); ok { + if err := f(dev); err != nil { + logf("setting link attributes: %v", err) + } + } } name, err := interfaceName(dev) if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9dd8f0d65..339d18877 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -38,7 +38,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ @@ -328,7 +327,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy From a9334576ea233d873938bf8240e8373642efd488 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 7 Oct 2025 12:24:58 +0100 Subject: [PATCH 1512/1708] ipn/ipnlocal: use named arguments for `mockControl.send()` Updates #cleanup Signed-off-by: Alex Chan --- ipn/ipnlocal/local_test.go | 10 +++--- ipn/ipnlocal/state_test.go | 74 +++++++++++++++++++++----------------- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 168f76268..c8367d14d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -6145,7 +6145,7 @@ func TestLoginNotifications(t *testing.T) { t.Fatal(err) } - lb.cc.(*mockControl).send(nil, loginURL, false, nil) + lb.cc.(*mockControl).send(sendOpt{url: loginURL}) var wg sync.WaitGroup wg.Add(len(sessions)) @@ -6810,7 +6810,7 @@ func TestSrcCapPacketFilter(t *testing.T) { must.Do(k.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) controlClient := lb.cc.(*mockControl) - controlClient.send(nil, "", false, &netmap.NetworkMap{ + controlClient.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, }).View(), @@ -6839,7 +6839,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }, }}, }}, - }) + }}) f := lb.GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) @@ -7015,10 +7015,10 @@ func TestDisplayMessageIPNBus(t *testing.T) { cc := lb.cc.(*mockControl) // Assert that we are logged in and authorized, and also send our DisplayMessages - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), DisplayMessages: msgs, - }) + }}) // Tell the health tracker that we are in a map poll because // mockControl doesn't tell it diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a4b9ba1f4..fca01f105 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -182,9 +182,17 @@ func (cc *mockControl) populateKeys() (newKeys bool) { return newKeys } +type sendOpt struct { + err error + url string + loginFinished bool + nm *netmap.NetworkMap +} + // send publishes a controlclient.Status notification upstream. // (In our tests here, upstream is the ipnlocal.Local instance.) -func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netmap.NetworkMap) { +func (cc *mockControl) send(opts sendOpt) { + err, url, loginFinished, nm := opts.err, opts.url, opts.loginFinished, opts.nm if loginFinished { cc.mu.Lock() cc.authBlocked = false @@ -211,7 +219,7 @@ func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { cc.persist.UserProfile = *selfUser.AsStruct() } cc.persist.NodeID = nm.SelfNode.StableID() - cc.send(nil, "", true, nm) + cc.send(sendOpt{loginFinished: true, nm: nm}) } func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { @@ -480,7 +488,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { }, }) url1 := "https://localhost:1/1" - cc.send(nil, url1, false, nil) + cc.send(sendOpt{url: url1}) { cc.assertCalls() @@ -533,7 +541,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nLogin2 (url response)") notifies.expect(1) url2 := "https://localhost:1/2" - cc.send(nil, url2, false, nil) + cc.send(sendOpt{url: url2}) { cc.assertCalls() @@ -560,7 +568,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { sys.ControlKnobs().SeamlessKeyRenewal.Store(true) } - cc.send(nil, "", true, &netmap.NetworkMap{}) + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{}}) { nn := notifies.drain(3) // Arguably it makes sense to unpause now, since the machine @@ -589,9 +597,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // but the current code is brittle. // (ie. I suspect it would be better to change false->true in send() // below, and do the same in the real controlclient.) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -752,7 +760,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { // an interactive login URL to visit. notifies.expect(2) url3 := "https://localhost:1/3" - cc.send(nil, url3, false, nil) + cc.send(sendOpt{url: url3}) { nn := notifies.drain(2) cc.assertCalls("Login") @@ -763,9 +771,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user2" cc.persist.NodeID = "node2" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) t.Logf("\n\nLoginFinished3") { nn := notifies.drain(3) @@ -833,9 +841,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // the control server at all when stopped). t.Logf("\n\nStart4 -> netmap") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls("pause") @@ -880,7 +888,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.expect(1) b.StartLoginInteractive(context.Background()) url4 := "https://localhost:1/4" - cc.send(nil, url4, false, nil) + cc.send(sendOpt{url: url4}) { nn := notifies.drain(1) // It might seem like WantRunning should switch to true here, @@ -902,9 +910,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user3" cc.persist.NodeID = "node3" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(3) // BUG: pause() being called here is a bad sign. @@ -950,9 +958,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // Control server accepts our valid key from before. t.Logf("\n\nLoginFinished5") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls() @@ -965,10 +973,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { } t.Logf("\n\nExpireKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -980,10 +988,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nExtendKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(time.Minute), SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -1118,9 +1126,9 @@ func TestWGEngineStatusRace(t *testing.T) { wantState(ipn.NeedsLogin) // Assert that we are logged in and authorized. - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) wantState(ipn.Starting) // Simulate multiple concurrent callbacks from wgengine. @@ -1397,9 +1405,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) - cc().send(nil, "", false, &netmap.NetworkMap{ + cc().send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), - }) + }}) }, wantState: ipn.NeedsLogin, wantCfg: &wgcfg.Config{}, @@ -1526,9 +1534,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) - cc().send(nil, "", false, &netmap.NetworkMap{ + cc().send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), - }) + }}) }, // Even with seamless, if the key we are using expires, we want to disconnect: wantState: ipn.NeedsLogin, @@ -1616,9 +1624,9 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { nw.watch(0, []wantedNotification{ wantStateNotify(ipn.Starting)}) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) nw.check() t.Logf("Running") @@ -1682,7 +1690,7 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { t.Logf("Re-auth (receive URL)") url1 := "https://localhost:1/1" - cc.send(nil, url1, false, nil) + cc.send(sendOpt{url: url1}) // Don't need to wait on anything else - once .send completes, authURL should // be set, and once .send has completed, any opportunities for a WG engine @@ -1718,9 +1726,9 @@ func TestWGEngineDownThenUpRace(t *testing.T) { nw.watch(0, []wantedNotification{ wantStateNotify(ipn.Starting)}) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) nw.check() nw.watch(0, []wantedNotification{ @@ -1762,7 +1770,7 @@ func TestWGEngineDownThenUpRace(t *testing.T) { wg.Go(func() { t.Log("cc.send starting") - cc.send(nil, url1, false, nil) // will block until engine stops + cc.send(sendOpt{url: url1}) // will block until engine stops t.Log("cc.send returned") }) From 5c1e26b42fa60db7eb7b87ce50d9b7e0befce008 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 7 Oct 2025 07:34:29 -0700 Subject: [PATCH 1513/1708] ipn/localapi: dead code eliminate unreachable/useless LocalAPI handlers when disabled Saves ~94 KB from the min build. Updates #12614 Change-Id: I3b0b8a47f80b9fd3b1038c2834b60afa55bf02c2 Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 3 ++ ipn/ipnlocal/local.go | 5 +- ipn/localapi/localapi.go | 111 +++++++++++++++++++++------------------ 3 files changed, 66 insertions(+), 53 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index a4a871dd8..582c7b848 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -381,6 +381,9 @@ func (lc *Client) UserMetrics(ctx context.Context) ([]byte, error) { // // IncrementCounter does not support gauge metrics or negative delta values. func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) error { + if !buildfeatures.HasClientMetrics { + return nil + } type metricUpdate struct { Name string `json:"name"` Type string `json:"type"` diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c07cc42a1..6f991ffae 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4621,7 +4621,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.updateFilterLocked(newp.View()) - if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { + if buildfeatures.HasSSH && oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { b.goTracker.Go(b.sshServer.Shutdown) b.sshServer = nil @@ -5917,6 +5917,9 @@ func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { // // b.mu must be held. func (b *LocalBackend) setExposeRemoteWebClientAtomicBoolLocked(prefs ipn.PrefsView) { + if !buildfeatures.HasWebClient { + return + } shouldExpose := prefs.Valid() && prefs.RunWebClient() b.exposeRemoteWebClientAtomicBool.Store(shouldExpose) } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index fb2c964e7..32dc2963f 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -71,36 +71,20 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "check-prefs": (*Handler).serveCheckPrefs, - "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, - "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "derpmap": (*Handler).serveDERPMap, - "dial": (*Handler).serveDial, - "disconnect-control": (*Handler).disconnectControl, - "goroutines": (*Handler).serveGoroutines, - "handle-push-message": (*Handler).serveHandlePushMessage, - "id-token": (*Handler).serveIDToken, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "logtap": (*Handler).serveLogTap, - "metrics": (*Handler).serveMetrics, - "ping": (*Handler).servePing, - "prefs": (*Handler).servePrefs, - "query-feature": (*Handler).serveQueryFeature, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "set-gui-visible": (*Handler).serveSetGUIVisible, - "set-push-device-token": (*Handler).serveSetPushDeviceToken, - "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "shutdown": (*Handler).serveShutdown, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "update/check": (*Handler).serveUpdateCheck, - "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "watch-ipn-bus": (*Handler).serveWatchIPNBus, - "whois": (*Handler).serveWhoIs, + "check-prefs": (*Handler).serveCheckPrefs, + "derpmap": (*Handler).serveDERPMap, + "goroutines": (*Handler).serveGoroutines, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "ping": (*Handler).servePing, + "prefs": (*Handler).servePrefs, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "shutdown": (*Handler).serveShutdown, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "whois": (*Handler).serveWhoIs, } func init() { @@ -109,6 +93,17 @@ func init() { } if buildfeatures.HasAdvertiseRoutes { Register("check-ip-forwarding", (*Handler).serveCheckIPForwarding) + Register("check-udp-gro-forwarding", (*Handler).serveCheckUDPGROForwarding) + Register("set-udp-gro-forwarding", (*Handler).serveSetUDPGROForwarding) + } + if buildfeatures.HasUseExitNode && runtime.GOOS == "linux" { + Register("check-reverse-path-filtering", (*Handler).serveCheckReversePathFiltering) + } + if buildfeatures.HasClientMetrics { + Register("upload-client-metrics", (*Handler).serveUploadClientMetrics) + } + if buildfeatures.HasClientUpdate { + Register("update/check", (*Handler).serveUpdateCheck) } if buildfeatures.HasUseExitNode { Register("suggest-exit-node", (*Handler).serveSuggestExitNode) @@ -121,6 +116,9 @@ func init() { Register("bugreport", (*Handler).serveBugReport) Register("pprof", (*Handler).servePprof) } + if buildfeatures.HasDebug || buildfeatures.HasServe { + Register("watch-ipn-bus", (*Handler).serveWatchIPNBus) + } if buildfeatures.HasDNS { Register("dns-osconfig", (*Handler).serveDNSOSConfig) Register("dns-query", (*Handler).serveDNSQuery) @@ -128,6 +126,36 @@ func init() { if buildfeatures.HasUserMetrics { Register("usermetrics", (*Handler).serveUserMetrics) } + if buildfeatures.HasServe { + Register("query-feature", (*Handler).serveQueryFeature) + } + if buildfeatures.HasOutboundProxy || buildfeatures.HasSSH { + Register("dial", (*Handler).serveDial) + } + if buildfeatures.HasClientMetrics || buildfeatures.HasDebug { + Register("metrics", (*Handler).serveMetrics) + } + if buildfeatures.HasDebug || buildfeatures.HasAdvertiseRoutes { + Register("disconnect-control", (*Handler).disconnectControl) + } + // Alpha/experimental/debug features. These should be moved to + // their own features if/when they graduate. + if buildfeatures.HasDebug { + Register("id-token", (*Handler).serveIDToken) + Register("alpha-set-device-attrs", (*Handler).serveSetDeviceAttrs) // see tailscale/corp#24690 + Register("handle-push-message", (*Handler).serveHandlePushMessage) + Register("set-push-device-token", (*Handler).serveSetPushDeviceToken) + } + if buildfeatures.HasDebug || runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + Register("set-gui-visible", (*Handler).serveSetGUIVisible) + } + if buildfeatures.HasLogTail { + // TODO(bradfitz): separate out logtail tap functionality from upload + // functionality to make this possible? But seems unlikely people would + // want just this. They could "tail -f" or "journalctl -f" their logs + // themselves. + Register("logtap", (*Handler).serveLogTap) + } } // Register registers a new LocalAPI handler for the given name. @@ -580,15 +608,6 @@ func (h *Handler) serveGoroutines(w http.ResponseWriter, r *http.Request) { func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - if !buildfeatures.HasLogTail { - // TODO(bradfitz): separate out logtail tap functionality from upload - // functionality to make this possible? But seems unlikely people would - // want just this. They could "tail -f" or "journalctl -f" their logs - // themselves. - http.Error(w, "logtap not supported in this build", http.StatusNotImplemented) - return - } - // Require write access (~root) as the logs could contain something // sensitive. if !h.PermitWrite { @@ -662,7 +681,7 @@ func (h *Handler) servePprof(w http.ResponseWriter, r *http.Request) { // disconnectControl is the handler for local API /disconnect-control endpoint that shuts down control client, so that // node no longer communicates with control. Doing this makes control consider this node inactive. This can be used -// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the +// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the // peers to switch over to another replica whilst still maintaining th existing peer connections. func (h *Handler) disconnectControl(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { @@ -1230,11 +1249,6 @@ func (h *Handler) serveHandlePushMessage(w http.ResponseWriter, r *http.Request) } func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Request) { - if !buildfeatures.HasClientMetrics { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(struct{}{}) - return - } if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return @@ -1498,13 +1512,6 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } - - if !feature.CanAutoUpdate() { - // if we don't support auto-update, just say that we're up to date - json.NewEncoder(w).Encode(tailcfg.ClientVersion{RunningLatest: true}) - return - } - cv := h.b.StatusWithoutPeers().ClientVersion // ipnstate.Status documentation notes that ClientVersion may be nil on some // platforms where this information is unavailable. In that case, return a From 98a0ccc18aa3e5894b1219f6f4322d400f37fa8d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 7 Oct 2025 19:32:22 +0100 Subject: [PATCH 1514/1708] cmd/tailscaled: default state encryption off for incompatible args (#17480) Since #17376, containerboot crashes on startup in k8s because state encryption is enabled by default without first checking that it's compatible with the selected state store. Make sure we only default state encryption to enabled if it's not going to immediately clash with other bits of tailscaled config. Updates tailscale/corp#32909 Change-Id: I76c586772750d6da188cc97b647c6e0c1a8734f0 Signed-off-by: Tom Proctor --- cmd/tailscaled/tailscaled.go | 66 +++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 28 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index a46457fac..92c44f4c1 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -276,30 +276,7 @@ func main() { } if buildfeatures.HasTPM { - if !args.encryptState.set { - args.encryptState.v = defaultEncryptState() - } - if args.encryptState.v { - if runtime.GOOS != "linux" && runtime.GOOS != "windows" { - log.SetFlags(0) - log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) - } - // Check if we have TPM support in this build. - if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported in this build of tailscaled") - } - // Check if we have TPM access. - if !hostinfo.New().TPM.Present() { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") - } - // Check for conflicting prefix in --state, like arn: or kube:. - if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { - log.SetFlags(0) - log.Fatal("--encrypt-state can only be used with --state set to a local file path") - } - } + handleTPMFlags() } if args.disableLogs { @@ -902,14 +879,47 @@ func applyIntegrationTestEnvKnob() { } } -func defaultEncryptState() bool { +// handleTPMFlags validates the --encrypt-state flag if set, and defaults +// state encryption on if it's supported and compatible with other settings. +func handleTPMFlags() { + switch { + case args.encryptState.v: + // Explicitly enabled, validate. + if err := canEncryptState(); err != nil { + log.SetFlags(0) + log.Fatal(err) + } + case !args.encryptState.set: + policyEncrypt, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) + if !policyEncrypt { + // Default disabled, no need to validate. + return + } + // Default enabled if available. + if err := canEncryptState(); err == nil { + args.encryptState.v = true + } + } +} + +// canEncryptState returns an error if state encryption can't be enabled, +// either due to availability or compatibility with other settings. +func canEncryptState() error { if runtime.GOOS != "windows" && runtime.GOOS != "linux" { // TPM encryption is only configurable on Windows and Linux. Other // platforms either use system APIs and are not configurable // (Android/Apple), or don't support any form of encryption yet // (plan9/FreeBSD/etc). - return false + return fmt.Errorf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM access. + if !feature.TPMAvailable() { + return errors.New("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + return errors.New("--encrypt-state can only be used with --state set to a local file path") } - v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) - return v + + return nil } From ad6cf2f8f369ae54652a0808cda872ca558ab429 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 16:43:22 -0700 Subject: [PATCH 1515/1708] util/eventbus: add a function-based subscriber type (#17432) Originally proposed by @bradfitz in #17413. In practice, a lot of subscribers have only one event type of interest, or a small number of mostly independent ones. In that case, the overhead of running and maintaining a goroutine to select on multiple channels winds up being more noisy than we'd like for the user of the API. For this common case, add a new SubscriberFunc[T] type that delivers events to a callback owned by the subscriber, directly on the goroutine belonging to the client itself. This frees the consumer from the need to maintain their own goroutine to pull events from the channel, and to watch for closure of the subscriber. Before: s := eventbus.Subscribe[T](eventClient) go func() { for { select { case <-s.Done(): return case e := <-s.Events(): doSomethingWith(e) } } }() // ... s.Close() After: func doSomethingWithT(e T) { ... } s := eventbus.SubscribeFunc(eventClient, doSomethingWithT) // ... s.Close() Moreover, unless the caller wants to explicitly stop the subscriber separately from its governing client, it need not capture the SubscriberFunc value at all. One downside of this approach is that a slow or deadlocked callback could block client's service routine and thus stall all other subscriptions on that client, However, this can already happen more broadly if a subscriber fails to service its delivery channel in a timely manner, it just feeds back more immediately. Updates #17487 Change-Id: I64592d786005177aa9fd445c263178ed415784d5 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 247 +++++++++++++++++++++++++++---------- util/eventbus/client.go | 23 ++++ util/eventbus/subscribe.go | 79 +++++++++--- 3 files changed, 270 insertions(+), 79 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index f9e7ee3dd..de292cf1a 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "testing" + "testing/synctest" "time" "github.com/creachadair/taskgroup" @@ -64,6 +65,55 @@ func TestBus(t *testing.T) { } } +func TestSubscriberFunc(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + exp := expectEvents(t, EventA{12345}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { exp.Got(e) }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() + c.Close() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) + + t.Run("SubscriberPublishes", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + pa := eventbus.Publish[EventA](c) + pb := eventbus.Publish[EventB](c) + exp := expectEvents(t, EventA{127}, EventB{128}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + exp.Got(e) + pb.Publish(EventB{Counter: e.Counter + 1}) + }) + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + exp.Got(e) + }) + + pa.Publish(EventA{127}) + + synctest.Wait() + c.Close() + if !exp.Empty() { + t.Errorf("unepxected extra events: %+v", exp.want) + } + }) + }) +} + func TestBusMultipleConsumers(t *testing.T) { b := eventbus.New() defer b.Close() @@ -111,80 +161,149 @@ func TestBusMultipleConsumers(t *testing.T) { } } -func TestSpam(t *testing.T) { - b := eventbus.New() - defer b.Close() +func TestClientMixedSubscribers(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + var gotA EventA + s1 := eventbus.Subscribe[EventA](c) - const ( - publishers = 100 - eventsPerPublisher = 20 - wantEvents = publishers * eventsPerPublisher - subscribers = 100 - ) - - var g taskgroup.Group - - received := make([][]EventA, subscribers) - for i := range subscribers { - c := b.Client(fmt.Sprintf("Subscriber%d", i)) - defer c.Close() - s := eventbus.Subscribe[EventA](c) - g.Go(func() error { - for range wantEvents { + var gotB EventB + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + t.Logf("func sub received %[1]T %+[1]v", e) + gotB = e + }) + + go func() { + for { select { - case evt := <-s.Events(): - received[i] = append(received[i], evt) - case <-s.Done(): - t.Errorf("queue done before expected number of events received") - return errors.New("queue prematurely closed") - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for expected bus event after %d events", len(received[i])) - return errors.New("timeout") + case <-s1.Done(): + return + case e := <-s1.Events(): + t.Logf("chan sub received %[1]T %+[1]v", e) + gotA = e } } - return nil - }) - } + }() + + p1 := eventbus.Publish[EventA](c) + p2 := eventbus.Publish[EventB](c) + + go p1.Publish(EventA{12345}) + go p2.Publish(EventB{67890}) - published := make([][]EventA, publishers) - for i := range publishers { - g.Run(func() { + synctest.Wait() + c.Close() + synctest.Wait() + + if diff := cmp.Diff(gotB, EventB{67890}); diff != "" { + t.Errorf("Chan sub (-got, +want):\n%s", diff) + } + if diff := cmp.Diff(gotA, EventA{12345}); diff != "" { + t.Errorf("Func sub (-got, +want):\n%s", diff) + } + }) +} + +func TestSpam(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + const ( + publishers = 100 + eventsPerPublisher = 20 + wantEvents = publishers * eventsPerPublisher + subscribers = 100 + ) + + var g taskgroup.Group + + // A bunch of subscribers receiving on channels. + chanReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("Subscriber%d", i)) + defer c.Close() + + s := eventbus.Subscribe[EventA](c) + g.Go(func() error { + for range wantEvents { + select { + case evt := <-s.Events(): + chanReceived[i] = append(chanReceived[i], evt) + case <-s.Done(): + t.Errorf("queue done before expected number of events received") + return errors.New("queue prematurely closed") + case <-time.After(5 * time.Second): + t.Logf("timed out waiting for expected bus event after %d events", len(chanReceived[i])) + return errors.New("timeout") + } + } + return nil + }) + } + + // A bunch of subscribers receiving via a func. + funcReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("SubscriberFunc%d", i)) + defer c.Close() + eventbus.SubscribeFunc(c, func(e EventA) { + funcReceived[i] = append(funcReceived[i], e) + }) + } + + published := make([][]EventA, publishers) + for i := range publishers { c := b.Client(fmt.Sprintf("Publisher%d", i)) p := eventbus.Publish[EventA](c) - for j := range eventsPerPublisher { - evt := EventA{i*eventsPerPublisher + j} - p.Publish(evt) - published[i] = append(published[i], evt) - } - }) - } + g.Run(func() { + defer c.Close() + for j := range eventsPerPublisher { + evt := EventA{i*eventsPerPublisher + j} + p.Publish(evt) + published[i] = append(published[i], evt) + } + }) + } - if err := g.Wait(); err != nil { - t.Fatal(err) - } - var last []EventA - for i, got := range received { - if len(got) != wantEvents { - // Receiving goroutine already reported an error, we just need - // to fail early within the main test goroutine. - t.FailNow() + if err := g.Wait(); err != nil { + t.Fatal(err) } - if last == nil { - continue + synctest.Wait() + + tests := []struct { + name string + recv [][]EventA + }{ + {"Subscriber", chanReceived}, + {"SubscriberFunc", funcReceived}, } - if diff := cmp.Diff(got, last); diff != "" { - t.Errorf("Subscriber %d did not see the same events as %d (-got+want):\n%s", i, i-1, diff) + for _, tc := range tests { + for i, got := range tc.recv { + if len(got) != wantEvents { + t.Errorf("%s %d: got %d events, want %d", tc.name, i, len(got), wantEvents) + } + if i == 0 { + continue + } + if diff := cmp.Diff(got, tc.recv[i-1]); diff != "" { + t.Errorf("%s %d did not see the same events as %d (-got+want):\n%s", tc.name, i, i-1, diff) + } + } } - last = got - } - for i, sent := range published { - if got := len(sent); got != eventsPerPublisher { - t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + for i, sent := range published { + if got := len(sent); got != eventsPerPublisher { + t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + } } - } - // TODO: check that the published sequences are proper - // subsequences of the received slices. + // TODO: check that the published sequences are proper + // subsequences of the received slices. + }) } func TestClient_Done(t *testing.T) { @@ -366,10 +485,12 @@ func expectEvents(t *testing.T, want ...any) *queueChecker { func (q *queueChecker) Got(v any) { q.t.Helper() if q.Empty() { - q.t.Fatalf("queue got unexpected %v", v) + q.t.Errorf("queue got unexpected %v", v) + return } if v != q.want[0] { - q.t.Fatalf("queue got %#v, want %#v", v, q.want[0]) + q.t.Errorf("queue got %#v, want %#v", v, q.want[0]) + return } q.want = q.want[1:] } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 7c0268886..9e3f3ee76 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -147,6 +147,29 @@ func Subscribe[T any](c *Client) *Subscriber[T] { return s } +// SubscribeFunc is like [Subscribe], but calls the provided func for each +// event of type T. +// +// A SubscriberFunc calls f synchronously from the client's goroutine. +// This means the callback must not block for an extended period of time, +// as this will block the subscriber and slow event processing for all +// subscriptions on c. +func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] { + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot SubscribeFunc on a closed client") + } + + r := c.subscribeStateLocked() + s := newSubscriberFunc[T](r, f) + r.addSubscriber(s) + return s +} + // Publish returns a publisher for event type T using the given client. // It panics if c is closed. func Publish[T any](c *Client) *Publisher[T] { diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ef155e621..56da413ef 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -61,45 +61,45 @@ func newSubscribeState(c *Client) *subscribeState { return ret } -func (q *subscribeState) pump(ctx context.Context) { +func (s *subscribeState) pump(ctx context.Context) { var vals queue[DeliveredEvent] acceptCh := func() chan DeliveredEvent { if vals.Full() { return nil } - return q.write + return s.write } for { if !vals.Empty() { val := vals.Peek() - sub := q.subscriberFor(val.Event) + sub := s.subscriberFor(val.Event) if sub == nil { // Raced with unsubscribe. vals.Drop() continue } - if !sub.dispatch(ctx, &vals, acceptCh, q.snapshot) { + if !sub.dispatch(ctx, &vals, acceptCh, s.snapshot) { return } - if q.debug.active() { - q.debug.run(DeliveredEvent{ + if s.debug.active() { + s.debug.run(DeliveredEvent{ Event: val.Event, From: val.From, - To: q.client, + To: s.client, }) } } else { // Keep the cases in this select in sync with - // Subscriber.dispatch below. The only difference should be - // that this select doesn't deliver queued values to - // anyone, and unconditionally accepts new values. + // Subscriber.dispatch and SubscriberFunc.dispatch below. + // The only difference should be that this select doesn't deliver + // queued values to anyone, and unconditionally accepts new values. select { - case val := <-q.write: + case val := <-s.write: vals.Add(val) case <-ctx.Done(): return - case ch := <-q.snapshot: + case ch := <-s.snapshot: ch <- vals.Snapshot() } } @@ -152,10 +152,10 @@ func (s *subscribeState) deleteSubscriber(t reflect.Type) { s.client.deleteSubscriber(t, s) } -func (q *subscribeState) subscriberFor(val any) subscriber { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - return q.outputs[reflect.TypeOf(val)] +func (s *subscribeState) subscriberFor(val any) subscriber { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + return s.outputs[reflect.TypeOf(val)] } // Close closes the subscribeState. It implicitly closes all Subscribers @@ -177,6 +177,7 @@ func (s *subscribeState) closed() <-chan struct{} { } // A Subscriber delivers one type of event from a [Client]. +// Events are sent to the [Subscriber.Events] channel. type Subscriber[T any] struct { stop stopFlag read chan T @@ -252,3 +253,49 @@ func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers s.unregister() } + +// A SubscriberFunc delivers one type of event from a [Client]. +// Events are forwarded synchronously to a function provided at construction. +type SubscriberFunc[T any] struct { + stop stopFlag + read func(T) + unregister func() +} + +func newSubscriberFunc[T any](r *subscribeState, f func(T)) *SubscriberFunc[T] { + return &SubscriberFunc[T]{ + read: f, + unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + } +} + +// Close closes the SubscriberFunc, indicating the caller no longer wishes to +// receive this event type. After Close, no further events will be passed to +// the callback. +// +// If the [Bus] from which s was created is closed, s is implicitly closed and +// does not need to be closed separately. +func (s *SubscriberFunc[T]) Close() { s.stop.Stop(); s.unregister() } + +// subscribeType implements part of the subscriber interface. +func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } + +// dispatch implements part of the subscriber interface. +func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { + // Keep the cases in this select in sync with subscribeState.pump + // above. The only different should be that this select + // delivers a value by calling s.read. + select { + case val := <-acceptCh(): + vals.Add(val) + case <-ctx.Done(): + return false + case ch := <-snapshot: + ch <- vals.Snapshot() + default: + } + t := vals.Peek().Event.(T) + s.read(t) + vals.Drop() + return true +} From f25e47cdeb61cfb7c4f1187aafd33add6d1c31a4 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 8 Oct 2025 10:01:25 -0400 Subject: [PATCH 1516/1708] flake.nix: use tailscale go fork (#17486) Move our nix flake to use Tailscale's go toolchain instead of upstream go. Fixes #17494 Signed-off-by: Mike O'Driscoll --- flake.nix | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index e8ef03853..7b97c8a13 100644 --- a/flake.nix +++ b/flake.nix @@ -46,8 +46,9 @@ systems, flake-compat, }: let - go125Version = "1.25.1"; - goHash = "sha256-0BDBCc7pTYDv5oHqtGvepJGskGv0ZYPDLp8NuwvRpZQ="; + goVersion = "1.25.1"; + toolChainRev = nixpkgs.lib.fileContents ./go.toolchain.rev; + gitHash = "sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk="; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { @@ -55,10 +56,12 @@ overlays = [ (final: prev: { go_1_25 = prev.go_1_25.overrideAttrs { - version = go125Version; - src = prev.fetchurl { - url = "https://go.dev/dl/go${go125Version}.src.tar.gz"; - hash = goHash; + version = goVersion; + src = prev.fetchFromGitHub { + owner = "tailscale"; + repo = "go"; + rev = toolChainRev; + hash = gitHash; }; }; }) From cd2a3425cb54a66f1531229d99c9af840e0a5807 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 8 Oct 2025 15:15:12 +0100 Subject: [PATCH 1517/1708] cmd/tsrecorder: adds sending api level logging to tsrecorder (#16960) Updates #17141 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/depaware.txt | 32 +- cmd/k8s-operator/sts.go | 2 +- flake.nix | 2 +- go.mod | 2 + go.mod.sri | 2 +- go.sum | 4 + k8s-operator/api-proxy/proxy.go | 143 ++++- k8s-operator/api-proxy/proxy_events_test.go | 548 ++++++++++++++++++++ sessionrecording/connect.go | 91 ++++ sessionrecording/connect_test.go | 102 +++- sessionrecording/event.go | 104 ++++ sessionrecording/header.go | 1 - shell.nix | 2 +- 13 files changed, 1014 insertions(+), 21 deletions(-) create mode 100644 k8s-operator/api-proxy/proxy_events_test.go create mode 100644 sessionrecording/event.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 9851cf9af..da43ac177 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -6,6 +6,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus + github.com/blang/semver/v4 from k8s.io/component-base/metrics 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket @@ -60,6 +61,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/tka + W 💣 github.com/inconshreveable/mousetrap from github.com/spf13/cobra github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -87,17 +89,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ - github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics + github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+ github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd + github.com/spf13/cobra from k8s.io/component-base/cli/flag + github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -124,6 +127,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 + go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/codes from go.opentelemetry.io/otel/trace + 💣 go.opentelemetry.io/otel/internal from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/internal/attribute from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/trace from k8s.io/component-base/metrics + go.opentelemetry.io/otel/trace/embedded from go.opentelemetry.io/otel/trace go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ go.uber.org/zap/buffer from go.uber.org/zap/internal/bufferpool+ @@ -283,8 +292,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/api/meta/testrestmapper from k8s.io/client-go/testing k8s.io/apimachinery/pkg/api/resource from k8s.io/api/autoscaling/v1+ k8s.io/apimachinery/pkg/api/validation from k8s.io/apimachinery/pkg/util/managedfields/internal+ + k8s.io/apimachinery/pkg/api/validation/path from k8s.io/apiserver/pkg/endpoints/request 💣 k8s.io/apimachinery/pkg/apis/meta/internalversion from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ - k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata + k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata+ k8s.io/apimachinery/pkg/apis/meta/internalversion/validation from k8s.io/client-go/util/watchlist 💣 k8s.io/apimachinery/pkg/apis/meta/v1 from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/apis/meta/v1/unstructured from k8s.io/apimachinery/pkg/runtime/serializer/versioning+ @@ -327,13 +337,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/util/uuid from sigs.k8s.io/controller-runtime/pkg/internal/controller+ k8s.io/apimachinery/pkg/util/validation from k8s.io/apimachinery/pkg/api/validation+ k8s.io/apimachinery/pkg/util/validation/field from k8s.io/apimachinery/pkg/api/errors+ + k8s.io/apimachinery/pkg/util/version from k8s.io/apiserver/pkg/features+ k8s.io/apimachinery/pkg/util/wait from k8s.io/client-go/tools/cache+ k8s.io/apimachinery/pkg/util/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/version from k8s.io/client-go/discovery+ k8s.io/apimachinery/pkg/watch from k8s.io/apimachinery/pkg/apis/meta/v1+ k8s.io/apimachinery/third_party/forked/golang/json from k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/third_party/forked/golang/reflect from k8s.io/apimachinery/pkg/conversion + k8s.io/apiserver/pkg/authentication/user from k8s.io/apiserver/pkg/endpoints/request + k8s.io/apiserver/pkg/endpoints/request from tailscale.com/k8s-operator/api-proxy + k8s.io/apiserver/pkg/features from k8s.io/apiserver/pkg/endpoints/request k8s.io/apiserver/pkg/storage/names from tailscale.com/cmd/k8s-operator + k8s.io/apiserver/pkg/util/feature from k8s.io/apiserver/pkg/endpoints/request+ k8s.io/client-go/applyconfigurations/admissionregistration/v1 from k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1+ k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 @@ -603,6 +618,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/util/keyutil from k8s.io/client-go/util/cert k8s.io/client-go/util/watchlist from k8s.io/client-go/dynamic+ k8s.io/client-go/util/workqueue from k8s.io/client-go/transport+ + k8s.io/component-base/cli/flag from k8s.io/component-base/featuregate + k8s.io/component-base/featuregate from k8s.io/apiserver/pkg/features+ + k8s.io/component-base/metrics from k8s.io/component-base/metrics/legacyregistry+ + k8s.io/component-base/metrics/legacyregistry from k8s.io/component-base/metrics/prometheus/feature + k8s.io/component-base/metrics/prometheus/feature from k8s.io/component-base/featuregate + k8s.io/component-base/metrics/prometheusextension from k8s.io/component-base/metrics + k8s.io/component-base/version from k8s.io/component-base/featuregate+ k8s.io/klog/v2 from k8s.io/apimachinery/pkg/api/meta+ k8s.io/klog/v2/internal/buffer from k8s.io/klog/v2 k8s.io/klog/v2/internal/clock from k8s.io/klog/v2 @@ -1162,7 +1184,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sync/atomic from context+ syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ - text/template from html/template + text/template from html/template+ text/template/parse from html/template+ time from compress/gzip+ unicode from bytes+ diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 6300341b7..c52ffce85 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -63,7 +63,7 @@ const ( AnnotationHostname = "tailscale.com/hostname" annotationTailnetTargetIPOld = "tailscale.com/ts-tailnet-target-ip" AnnotationTailnetTargetIP = "tailscale.com/tailnet-ip" - //MagicDNS name of tailnet node. + // MagicDNS name of tailnet node. AnnotationTailnetTargetFQDN = "tailscale.com/tailnet-fqdn" AnnotationProxyGroup = "tailscale.com/proxy-group" diff --git a/flake.nix b/flake.nix index 7b97c8a13..9481248f0 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= +# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/go.mod b/go.mod index bce634431..965a447b9 100644 --- a/go.mod +++ b/go.mod @@ -136,6 +136,7 @@ require ( github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect @@ -186,6 +187,7 @@ require ( go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + k8s.io/component-base v0.32.0 // indirect ) require ( diff --git a/go.mod.sri b/go.mod.sri index a1d81c1a9..f94054422 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= +sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/go.sum b/go.sum index 5e2205575..bc386d1fd 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,8 @@ github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJ github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= @@ -1546,6 +1548,8 @@ k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= +k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index a0f2f930b..fdb798152 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -6,10 +6,13 @@ package apiproxy import ( + "bytes" "context" "crypto/tls" + "encoding/json" "errors" "fmt" + "io" "net" "net/http" "net/http/httputil" @@ -19,13 +22,16 @@ import ( "time" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/client-go/rest" "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/k8s-operator/sessionrecording" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -83,12 +89,13 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn } ap := &APIServerProxy{ - log: zlog, - lc: lc, - authMode: mode == kubetypes.APIServerProxyModeAuth, - https: https, - upstreamURL: u, - ts: ts, + log: zlog, + lc: lc, + authMode: mode == kubetypes.APIServerProxyModeAuth, + https: https, + upstreamURL: u, + ts: ts, + sendEventFunc: sessionrecording.SendEvent, } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -183,6 +190,8 @@ type APIServerProxy struct { ts *tsnet.Server hs *http.Server upstreamURL *url.URL + + sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error } // serveDefault is the default handler for Kubernetes API server requests. @@ -192,7 +201,16 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { ap.authError(w, err) return } + + if err = ap.recordRequestAsEvent(r, who); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + counterNumRequestsProxied.Add(1) + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } @@ -220,7 +238,7 @@ func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) } -func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType sessionrecording.SessionType, proto ksr.Protocol) { +func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType ksr.SessionType, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -232,6 +250,14 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.authError(w, err) return } + + if err = ap.recordRequestAsEvent(r, who); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + counterNumRequestsProxied.Add(1) failOpen, addrs, err := determineRecorderConfig(who) if err != nil { @@ -283,6 +309,107 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } +func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse) error { + failOpen, addrs, err := determineRecorderConfig(who) + if err != nil { + return fmt.Errorf("error trying to determine whether the kubernetes api request needs to be recorded: %w", err) + } + if len(addrs) == 0 { + if failOpen { + return nil + } else { + return fmt.Errorf("forbidden: kubernetes api request must be recorded, but no recorders are available") + } + } + + factory := &request.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + + reqInfo, err := factory.NewRequestInfo(req) + if err != nil { + return fmt.Errorf("error parsing request %s %s: %w", req.Method, req.URL.Path, err) + } + + kubeReqInfo := sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: reqInfo.IsResourceRequest, + Path: reqInfo.Path, + Verb: reqInfo.Verb, + APIPrefix: reqInfo.APIPrefix, + APIGroup: reqInfo.APIGroup, + APIVersion: reqInfo.APIVersion, + Namespace: reqInfo.Namespace, + Resource: reqInfo.Resource, + Subresource: reqInfo.Subresource, + Name: reqInfo.Name, + Parts: reqInfo.Parts, + FieldSelector: reqInfo.FieldSelector, + LabelSelector: reqInfo.LabelSelector, + } + event := &sessionrecording.Event{ + Timestamp: time.Now().Unix(), + Kubernetes: kubeReqInfo, + Type: sessionrecording.KubernetesAPIEventType, + UserAgent: req.UserAgent(), + Request: sessionrecording.Request{ + Method: req.Method, + Path: req.URL.String(), + QueryParameters: req.URL.Query(), + }, + Source: sessionrecording.Source{ + NodeID: who.Node.StableID, + Node: strings.TrimSuffix(who.Node.Name, "."), + }, + } + + if !who.Node.IsTagged() { + event.Source.NodeUser = who.UserProfile.LoginName + event.Source.NodeUserID = who.UserProfile.ID + } else { + event.Source.NodeTags = who.Node.Tags + } + + bodyBytes, err := io.ReadAll(req.Body) + if err != nil { + return fmt.Errorf("failed to read body: %w", err) + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + event.Request.Body = bodyBytes + + var errs []error + // TODO: ChaosInTheCRD ensure that if there are multiple addrs timing out we don't experience slowdown on client waiting for response. + fail := true + for _, addr := range addrs { + data := new(bytes.Buffer) + if err := json.NewEncoder(data).Encode(event); err != nil { + return fmt.Errorf("error marshaling request event: %w", err) + } + + if err := ap.sendEventFunc(addr, data, ap.ts.Dial); err != nil { + if apiSupportErr, ok := err.(sessionrecording.EventAPINotSupportedErr); ok { + ap.log.Warnf(apiSupportErr.Error()) + fail = false + } else { + err := fmt.Errorf("error sending event to recorder with address %q: %v", addr.String(), err) + errs = append(errs, err) + } + } else { + return nil + } + } + + merr := errors.Join(errs...) + if fail && failOpen { + msg := fmt.Sprintf("[unexpected] failed to send event to recorders with errors: %s", merr.Error()) + msg = msg + "; failure mode is 'fail open'; continuing request without recording." + ap.log.Warn(msg) + return nil + } + + return merr +} + func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { r.URL.Scheme = ap.upstreamURL.Scheme r.URL.Host = ap.upstreamURL.Host diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go new file mode 100644 index 000000000..230927dc0 --- /dev/null +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -0,0 +1,548 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package apiproxy + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/netip" + "net/url" + "reflect" + "testing" + + "go.uber.org/zap" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" +) + +type fakeSender struct { + sent map[netip.AddrPort][]byte + err error + calls int +} + +func (s *fakeSender) Send(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error { + s.calls++ + if s.err != nil { + return s.err + } + if s.sent == nil { + s.sent = make(map[netip.AddrPort][]byte) + } + data, _ := io.ReadAll(event) + s.sent[ap] = data + return nil +} + +func (s *fakeSender) Reset() { + s.sent = nil + s.err = nil + s.calls = 0 +} + +func TestRecordRequestAsEvent(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + sender := &fakeSender{} + ap := &APIServerProxy{ + log: zl.Sugar(), + ts: &tsnet.Server{}, + sendEventFunc: sender.Send, + } + + defaultWho := &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + }, + UserProfile: &tailcfg.UserProfile{ + ID: 1, + LoginName: "user@example.com", + }, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234"]}`), + tailcfg.RawMessage(`{"enforceRecorder": true}`), + }, + }, + } + + defaultSource := sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeUser: "user@example.com", + NodeUserID: 1, + } + + tests := []struct { + name string + req func() *http.Request + who *apitype.WhoIsResponse + setupSender func() + wantErr bool + wantEvent *sessionrecording.Event + wantNumCalls int + }{ + { + name: "request-with-dot-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo.bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo.bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo.bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo.bar", + Parts: []string{"pods", "foo.bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-dash-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo-bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo-bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo-bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo-bar", + Parts: []string{"pods", "foo-bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-query-parameter", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?watch=true", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?watch=true", + Body: nil, + QueryParameters: url.Values{"watch": []string{"true"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "watch", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-label-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?labelSelector=app%3Dfoo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?labelSelector=app%3Dfoo", + Body: nil, + QueryParameters: url.Values{"labelSelector": []string{"app=foo"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + LabelSelector: "app=foo", + }, + Source: defaultSource, + }, + }, + { + name: "request-with-field-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?fieldSelector=status.phase%3DRunning", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?fieldSelector=status.phase%3DRunning", + Body: nil, + QueryParameters: url.Values{"fieldSelector": []string{"status.phase=Running"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + FieldSelector: "status.phase=Running", + }, + Source: defaultSource, + }, + }, + { + name: "request-for-non-existent-resource", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/foo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/foo", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/foo", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "foo", + Parts: []string{"foo"}, + }, + Source: defaultSource, + }, + }, + { + name: "basic-request", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "multiple-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234", "127.0.0.1:5678"]}`), + }, + }, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + }, + { + name: "request-with-body", + req: func() *http.Request { + req := httptest.NewRequest("POST", "/api/v1/pods", bytes.NewBufferString(`{"foo":"bar"}`)) + req.Header.Set("Content-Type", "application/json") + return req + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "POST", + Path: "/api/v1/pods", + Body: json.RawMessage(`{"foo":"bar"}`), + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "create", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "tagged-node", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + Tags: []string{"tag:foo"}, + }, + UserProfile: &tailcfg.UserProfile{}, + CapMap: defaultWho.CapMap, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeTags: []string{"tag:foo"}, + }, + }, + }, + { + name: "no-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{}, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 0, + }, + { + name: "error-sending", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { + sender.Reset() + sender.err = errors.New("send error") + }, + wantErr: true, + wantNumCalls: 1, + }, + { + name: "request-for-crd", + req: func() *http.Request { + return httptest.NewRequest("GET", "/apis/custom.example.com/v1/myresources", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/apis/custom.example.com/v1/myresources", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/apis/custom.example.com/v1/myresources", + Verb: "list", + APIPrefix: "apis", + APIGroup: "custom.example.com", + APIVersion: "v1", + Resource: "myresources", + Parts: []string{"myresources"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-proxy-verb", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo/proxy", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Subresource: "proxy", + Name: "foo", + Parts: []string{"pods", "foo", "proxy"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-complex-path", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "services", + Subresource: "proxy-subpath", + Name: "foo:8080", + Parts: []string{"services", "foo:8080", "proxy-subpath", "more", "segments"}, + }, + Source: defaultSource, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.setupSender() + + req := tt.req() + err := ap.recordRequestAsEvent(req, tt.who) + + if (err != nil) != tt.wantErr { + t.Fatalf("recordRequestAsEvent() error = %v, wantErr %v", err, tt.wantErr) + } + + if sender.calls != tt.wantNumCalls { + t.Fatalf("expected %d calls to sender, got %d", tt.wantNumCalls, sender.calls) + } + + if tt.wantEvent != nil { + for _, sentData := range sender.sent { + var got sessionrecording.Event + if err := json.Unmarshal(sentData, &got); err != nil { + t.Fatalf("failed to unmarshal sent event: %v", err) + } + + got.Timestamp = 0 + tt.wantEvent.Timestamp = got.Timestamp + + got.UserAgent = "" + tt.wantEvent.UserAgent = "" + + if !bytes.Equal(got.Request.Body, tt.wantEvent.Request.Body) { + t.Errorf("sent event body does not match wanted event body.\nGot: %s\nWant: %s", string(got.Request.Body), string(tt.wantEvent.Request.Body)) + } + got.Request.Body = nil + tt.wantEvent.Request.Body = nil + + if !reflect.DeepEqual(&got, tt.wantEvent) { + t.Errorf("sent event does not match wanted event.\nGot: %#v\nWant: %#v", &got, tt.wantEvent) + } + } + } + }) + } +} diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index a470969d8..8abf9dd7e 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -110,6 +110,97 @@ func supportsV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) bool { return resp.StatusCode == http.StatusOK && resp.ProtoMajor > 1 } +// supportsEvent checks whether a recorder instance supports the /v2/event +// endpoint. +func supportsEvent(ctx context.Context, hc *http.Client, ap netip.AddrPort) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, http2ProbeTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, httpm.HEAD, fmt.Sprintf("http://%s/v2/event", ap), nil) + if err != nil { + return false, err + } + resp, err := hc.Do(req) + if err != nil { + return false, err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return true, nil + } + + if resp.StatusCode != http.StatusNotFound { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return false, fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return false, fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return false, nil +} + +const addressNotSupportEventv2 = `recorder at address %q does not support "/v2/event" endpoint` + +type EventAPINotSupportedErr struct { + ap netip.AddrPort +} + +func (e EventAPINotSupportedErr) Error() string { + return fmt.Sprintf(addressNotSupportEventv2, e.ap) +} + +// SendEvent sends an event the tsrecorders /v2/event endpoint. +func SendEvent(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) (retErr error) { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() + + client := clientHTTP1(ctx, dial) + + supported, err := supportsEvent(ctx, client, ap) + if err != nil { + return fmt.Errorf("error checking support for `/v2/event` endpoint: %w", err) + } + + if !supported { + return EventAPINotSupportedErr{ + ap: ap, + } + } + + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/v2/event", ap.String()), event) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + // connectV1 connects to the legacy /record endpoint on the recorder. It is // used for backwards-compatibility with older tsrecorder instances. // diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index c0fcf6d40..cacf061d7 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -9,11 +9,13 @@ import ( "crypto/rand" "crypto/sha256" "encoding/json" + "fmt" "io" "net" "net/http" "net/http/httptest" "net/netip" + "strings" "testing" "time" @@ -148,9 +150,9 @@ func TestConnectToRecorder(t *testing.T) { // Wire up h2c-compatible HTTP/2 server. This is optional // because the v1 recorder didn't support HTTP/2 and we try to // mimic that. - h2s := &http2.Server{} - srv.Config.Handler = h2c.NewHandler(mux, h2s) - if err := http2.ConfigureServer(srv.Config, h2s); err != nil { + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { t.Errorf("configuring HTTP/2 support in server: %v", err) } } @@ -187,3 +189,97 @@ func TestConnectToRecorder(t *testing.T) { }) } } + +func TestSendEvent(t *testing.T) { + t.Run("supported", func(t *testing.T) { + eventBody := `{"foo":"bar"}` + eventRecieved := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + t.Error(err) + } + eventRecieved <- body + w.WriteHeader(http.StatusOK) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, bytes.NewBufferString(eventBody), d.DialContext) + if err != nil { + t.Fatalf("SendEvent: %v", err) + } + + if recv := string(<-eventRecieved); recv != eventBody { + t.Errorf("mismatch in event body, sent %q, received %q", eventBody, recv) + } + }) + + t.Run("not_supported", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), fmt.Sprintf(addressNotSupportEventv2, srv.Listener.Addr().String())) { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("server_error", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), "server returned non-OK status") { + t.Fatalf("unexpected error: %v", err) + } + }) +} diff --git a/sessionrecording/event.go b/sessionrecording/event.go new file mode 100644 index 000000000..41d8f2d58 --- /dev/null +++ b/sessionrecording/event.go @@ -0,0 +1,104 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package sessionrecording + +import ( + "net/url" + + "tailscale.com/tailcfg" +) + +const ( + KubernetesAPIEventType = "kubernetes-api-request" +) + +// Event represents the top-level structure of a tsrecorder event. +type Event struct { + // Type specifies the kind of event being recorded (e.g., "kubernetes-api-request"). + Type string `json:"type"` + + // ID is a reference of the path that this event is stored at in tsrecorder + ID string `json:"id"` + + // Timestamp is the time when the event was recorded represented as a unix timestamp. + Timestamp int64 `json:"timestamp"` + + // UserAgent is the UerAgent specified in the request, which helps identify + // the client software that initiated the request. + UserAgent string `json:"userAgent"` + + // Request holds details of the HTTP request. + Request Request `json:"request"` + + // Kubernetes contains Kubernetes-specific information about the request (if + // the type is `kubernetes-api-request`) + Kubernetes KubernetesRequestInfo `json:"kubernetes"` + + // Source provides details about the client that initiated the request. + Source Source `json:"source"` +} + +// copied from https://github.com/kubernetes/kubernetes/blob/11ade2f7dd264c2f52a4a1342458abbbaa3cb2b1/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go#L44 +// KubernetesRequestInfo contains Kubernetes specific information in the request (if the type is `kubernetes-api-request`) +type KubernetesRequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. + // for non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + + Namespace string + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with /{resource}/{name} + Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string +} + +type Source struct { + // Node is the FQDN of the node originating the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node originating the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` + + // Tailscale-specific fields: + // NodeTags is the list of tags on the node originating the connection (if any). + NodeTags []string `json:"nodeTags,omitempty"` + + // NodeUserID is the user ID of the node originating the connection (if not tagged). + NodeUserID tailcfg.UserID `json:"nodeUserID,omitempty"` // if not tagged + + // NodeUser is the LoginName of the node originating the connection (if not tagged). + NodeUser string `json:"nodeUser,omitempty"` +} + +// Request holds information about a request. +type Request struct { + Method string `json:"method"` + Path string `json:"path"` + Body []byte `json:"body"` + QueryParameters url.Values `json:"queryParameters"` +} diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 545bf06bd..220852216 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -62,7 +62,6 @@ type CastHeader struct { ConnectionID string `json:"connectionID"` // Fields that are only set for Kubernetes API server proxy session recordings: - Kubernetes *Kubernetes `json:"kubernetes,omitempty"` } diff --git a/shell.nix b/shell.nix index 1891111b2..ec345998a 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= +# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= From 0586d5d40d0f3804a94a0a074b539fa81e547118 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 8 Oct 2025 15:15:42 +0100 Subject: [PATCH 1518/1708] k8s-operator/sessionrecording: gives the connection to the recorder from the hijacker a dedicated context (#17403) The hijacker on k8s-proxy's reverse proxy is used to stream recordings to tsrecorder as they pass through the proxy to the kubernetes api server. The connection to the recorder was using the client's (e.g., kubectl) context, rather than a dedicated one. This was causing the recording stream to get cut off in scenarios where the client cancelled the context before streaming could be completed. By using a dedicated context, we can continue streaming even if the client cancels the context (for example if the client request completes). Fixes #17404 Signed-off-by: chaosinthecrd --- k8s-operator/sessionrecording/hijacker.go | 13 +++++++++++-- k8s-operator/sessionrecording/hijacker_test.go | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index ebd77641b..2d6c94710 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -122,7 +122,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, fmt.Errorf("error hijacking connection: %w", err) } - conn, err := h.setUpRecording(h.req.Context(), reqConn) + conn, err := h.setUpRecording(reqConn) if err != nil { return nil, nil, fmt.Errorf("error setting up session recording: %w", err) } @@ -133,7 +133,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { // spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording // logic. If connecting to the recorder fails or an error is received during the // session and spdyHijacker.failOpen is false, connection will be closed. -func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { +func (h *Hijacker) setUpRecording(conn net.Conn) (_ net.Conn, retErr error) { const ( // https://docs.asciinema.org/manual/asciicast/v2/ asciicastv2 = 2 @@ -147,6 +147,14 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, errChan <-chan error ) h.log.Infof("kubectl %s session will be recorded, recorders: %v, fail open policy: %t", h.sessionType, h.addrs, h.failOpen) + // NOTE: (ChaosInTheCRD) we want to use a dedicated context here, rather than the context from the request, + // otherwise the context can be cancelled by the client (kubectl) while we are still streaming to tsrecorder. + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() qp := h.req.URL.Query() container := strings.Join(qp[containerKey], "") var recorderAddr net.Addr @@ -213,6 +221,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } go func() { + defer cancel() var err error select { case <-ctx.Done(): diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index cac6f55c7..fb45820a7 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -95,7 +95,7 @@ func Test_Hijacker(t *testing.T) { proto: tt.proto, } ctx := context.Background() - _, err := h.setUpRecording(ctx, tc) + _, err := h.setUpRecording(tc) if (err != nil) != tt.wantsSetupErr { t.Errorf("spdyHijacker.setupRecording() error = %v, wantErr %v", err, tt.wantsSetupErr) return From 2d1014ead197a25350ab6e45efeaab3077244776 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Wed, 8 Oct 2025 15:34:50 +0100 Subject: [PATCH 1519/1708] ipn/ipnlocal: fix data race on captiveCtx in enterStateLockedOnEntry (#17495) Updates #17491 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6f991ffae..e04ef9e6c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5573,8 +5573,9 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // can be shut down if we transition away from Running. if buildfeatures.HasCaptivePortal { if b.captiveCancel == nil { - b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, b.captiveCtx) }) + captiveCtx, captiveCancel := context.WithCancel(b.ctx) + b.captiveCtx, b.captiveCancel = captiveCtx, captiveCancel + b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, captiveCtx) }) } } } else if oldState == ipn.Running { From 2a3d67e9b78a7f8d9a2f20ebcc8658f409fe4d1a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 16:50:34 -0700 Subject: [PATCH 1520/1708] wgengine: use eventbus.SubscribeFunc in userspaceEngine Updates #15160 Updates #17487 Change-Id: Id852098c4f9c2fdeab9151b0b8c14dceff73b99d Signed-off-by: M. J. Fromberger --- wgengine/userspace.go | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index b8a136da7..fa2379288 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -94,9 +94,8 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. - // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - eventSubs eventbus.Monitor + eventBus *eventbus.Bus + eventClient *eventbus.Client logf logger.Logf wgLogger *wglog.Logger // a wireguard-go logging wrapper @@ -539,34 +538,18 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } - cli := e.eventBus.Client("userspaceEngine") - e.eventSubs = cli.Monitor(e.consumeEventbusTopics(cli)) + ec := e.eventBus.Client("userspaceEngine") + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { + if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { + f() + } + e.linkChange(&cd) + }) + e.eventClient = ec e.logf("Engine created.") return e, nil } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (e *userspaceEngine) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { - changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](cli) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case changeDelta := <-changeDeltaSub.Events(): - if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { - f() - } - e.linkChange(&changeDelta) - } - } - } -} - // echoRespondToAll is an inbound post-filter responding to all echo requests. func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if p.IsEchoRequest() { @@ -1257,7 +1240,7 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { - e.eventSubs.Close() + e.eventClient.Close() e.mu.Lock() if e.closing { e.mu.Unlock() From 583373057741016248bc0ce21adab2e48b1b7391 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 16:55:07 -0700 Subject: [PATCH 1521/1708] wgengine/router: use eventbus.SubscribeFunc in linuxRouter Updates #15160 Updates #17487 Change-Id: Ib798e2321e55a078c8bd37f366fe4e73054e4520 Signed-off-by: M. J. Fromberger --- wgengine/router/osrouter/router_linux.go | 40 +++++++----------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 835a9050f..58bd0513a 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -60,7 +60,7 @@ type linuxRouter struct { tunname string netMon *netmon.Monitor health *health.Tracker - eventSubs eventbus.Monitor + eventClient *eventbus.Client rulesAddedPub *eventbus.Publisher[AddIPRules] unregNetMon func() @@ -120,7 +120,16 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon } ec := bus.Client("router-linux") r.rulesAddedPub = eventbus.Publish[AddIPRules](ec) - r.eventSubs = ec.Monitor(r.consumeEventbusTopics(ec)) + eventbus.SubscribeFunc(ec, func(rs netmon.RuleDeleted) { + r.onIPRuleDeleted(rs.Table, rs.Priority) + }) + eventbus.SubscribeFunc(ec, func(pu router.PortUpdate) { + r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) + if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { + r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) + } + }) + r.eventClient = ec if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) @@ -164,31 +173,6 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon return r, nil } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { - ruleDeletedSub := eventbus.Subscribe[netmon.RuleDeleted](ec) - portUpdateSub := eventbus.Subscribe[router.PortUpdate](ec) - return func(ec *eventbus.Client) { - for { - select { - case <-ec.Done(): - return - case rs := <-ruleDeletedSub.Events(): - r.onIPRuleDeleted(rs.Table, rs.Priority) - case pu := <-portUpdateSub.Events(): - r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) - if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { - r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) - } - } - } - } -} - // ipCmdSupportsFwmask returns true if the system 'ip' binary supports using a // fwmark stanza with a mask specified. To our knowledge, everything except busybox // pre-1.33 supports this. @@ -385,7 +369,7 @@ func (r *linuxRouter) Close() error { if r.unregNetMon != nil { r.unregNetMon() } - r.eventSubs.Close() + r.eventClient.Close() if err := r.downInterface(); err != nil { return err } From 241ea1c98bdfc6e28497340aa57ff46b7604ed68 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 17:03:39 -0700 Subject: [PATCH 1522/1708] wgengine/magicsock: use eventbus.SubscribeFunc in Conn Updates #15160 Updates #17487 Change-Id: Ic9eb8d82b21d9dc38cb3c681b87101dfbc95af16 Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 71 ++++++++++----------------------- 1 file changed, 21 insertions(+), 50 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c7d07c277..492dff2ce 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -156,7 +156,7 @@ type Conn struct { // struct. Initialized once at construction, then constant. eventBus *eventbus.Bus - eventSubs eventbus.Monitor + eventClient *eventbus.Client logf logger.Logf epFunc func([]tailcfg.Endpoint) derpActiveFunc func() @@ -625,43 +625,6 @@ func newConn(logf logger.Logf) *Conn { return c } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (c *Conn) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - pmSub := eventbus.Subscribe[portmappertype.Mapping](cli) - filterSub := eventbus.Subscribe[FilterUpdate](cli) - nodeViewsSub := eventbus.Subscribe[NodeViewsUpdate](cli) - nodeMutsSub := eventbus.Subscribe[NodeMutationsUpdate](cli) - syncSub := eventbus.Subscribe[syncPoint](cli) - allocRelayEndpointSub := eventbus.Subscribe[UDPRelayAllocResp](cli) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case <-pmSub.Events(): - c.onPortMapChanged() - case filterUpdate := <-filterSub.Events(): - c.onFilterUpdate(filterUpdate) - case nodeViews := <-nodeViewsSub.Events(): - c.onNodeViewsUpdate(nodeViews) - case nodeMuts := <-nodeMutsSub.Events(): - c.onNodeMutationsUpdate(nodeMuts) - case syncPoint := <-syncSub.Events(): - c.dlogf("magicsock: received sync point after reconfig") - syncPoint.Signal() - case allocResp := <-allocRelayEndpointSub.Events(): - c.onUDPRelayAllocResp(allocResp) - } - } - } -} - func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { c.mu.Lock() defer c.mu.Unlock() @@ -726,11 +689,20 @@ func NewConn(opts Options) (*Conn, error) { // Set up publishers and subscribers. Subscribe calls must return before // NewConn otherwise published events can be missed. - cli := c.eventBus.Client("magicsock.Conn") - c.syncPub = eventbus.Publish[syncPoint](cli) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](cli) - c.portUpdatePub = eventbus.Publish[router.PortUpdate](cli) - c.eventSubs = cli.Monitor(c.consumeEventbusTopics(cli)) + ec := c.eventBus.Client("magicsock.Conn") + c.eventClient = ec + c.syncPub = eventbus.Publish[syncPoint](ec) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec) + c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec) + eventbus.SubscribeFunc(ec, c.onPortMapChanged) + eventbus.SubscribeFunc(ec, c.onFilterUpdate) + eventbus.SubscribeFunc(ec, c.onNodeViewsUpdate) + eventbus.SubscribeFunc(ec, c.onNodeMutationsUpdate) + eventbus.SubscribeFunc(ec, func(sp syncPoint) { + c.dlogf("magicsock: received sync point after reconfig") + sp.Signal() + }) + eventbus.SubscribeFunc(ec, c.onUDPRelayAllocResp) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) c.donec = c.connCtx.Done() @@ -3307,13 +3279,12 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { - // Close the [eventbus.Client] and wait for c.consumeEventbusTopics to + // Close the [eventbus.Client] to wait for subscribers to // return before acquiring c.mu: - // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can - // deadlock with c.Close(). - // 2. Conn.consumeEventbusTopics event handlers may not guard against - // undesirable post/in-progress Conn.Close() behaviors. - c.eventSubs.Close() + // 1. Event handlers also acquire c.mu, they can deadlock with c.Close(). + // 2. Event handlers may not guard against undesirable post/in-progress + // Conn.Close() behaviors. + c.eventClient.Close() c.mu.Lock() defer c.mu.Unlock() @@ -3410,7 +3381,7 @@ func (c *Conn) shouldDoPeriodicReSTUNLocked() bool { return true } -func (c *Conn) onPortMapChanged() { c.ReSTUN("portmap-changed") } +func (c *Conn) onPortMapChanged(portmappertype.Mapping) { c.ReSTUN("portmap-changed") } // ReSTUN triggers an address discovery. // The provided why string is for debug logging only. From 109cb50d5fd50127652349abe997347bfad52c32 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 17:10:53 -0700 Subject: [PATCH 1523/1708] ipn/ipnlocal: use eventbus.SubscribeFunc in expiryManager Updates #15160 Updates #17487 Change-Id: I8721e3ac1af505630edca7c5cb50695b0aad832a Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/expiry.go | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 849e28610..8ea63d21a 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -43,7 +43,7 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock - eventSubs eventbus.Monitor + eventClient *eventbus.Client } func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { @@ -53,30 +53,13 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { clock: tstime.StdClock{}, } - cli := bus.Client("ipnlocal.expiryManager") - em.eventSubs = cli.Monitor(em.consumeEventbusTopics(cli)) + em.eventClient = bus.Client("ipnlocal.expiryManager") + eventbus.SubscribeFunc(em.eventClient, func(ct controlclient.ControlTime) { + em.onControlTime(ct.Value) + }) return em } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (em *expiryManager) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { - controlTimeSub := eventbus.Subscribe[controlclient.ControlTime](cli) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case time := <-controlTimeSub.Events(): - em.onControlTime(time.Value) - } - } - } -} - // onControlTime is called whenever we receive a new timestamp from the control // server to store the delta. func (em *expiryManager) onControlTime(t time.Time) { @@ -245,7 +228,7 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } -func (em *expiryManager) close() { em.eventSubs.Close() } +func (em *expiryManager) close() { em.eventClient.Close() } // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded From 9556a0c6da5b5e8186477711c2003a07e5831fda Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 07:50:56 -0700 Subject: [PATCH 1524/1708] control/ts2021: fix data race during concurrent Close and conn ending Fixes tailscale/corp#33125 Change-Id: I9911f5059d5ebe42ecf7db9becb2326cca240765 Signed-off-by: Brad Fitzpatrick --- control/ts2021/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/control/ts2021/client.go b/control/ts2021/client.go index e0b82b89c..ca10b1d1b 100644 --- a/control/ts2021/client.go +++ b/control/ts2021/client.go @@ -180,6 +180,7 @@ func (nc *Client) Close() error { nc.mu.Lock() live := nc.connPool nc.closed = true + nc.connPool = nil // stop noteConnClosed from mutating it as we loop over it (in live) below nc.mu.Unlock() for _, c := range live { From 57bd875856652e1cc6a6c2ab63ee252dfd6b4980 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 8 Oct 2025 11:36:38 -0400 Subject: [PATCH 1525/1708] control/controlclient: add missing comment (#17498) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #cleanup Signed-off-by: Claus Lensbøl --- control/controlclient/direct.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 5f26e2ba1..61886482d 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -139,7 +139,7 @@ type Options struct { Dialer *tsdial.Dialer // non-nil C2NHandler http.Handler // or nil ControlKnobs *controlknobs.Knobs // or nil to ignore - Bus *eventbus.Bus + Bus *eventbus.Bus // non-nil, for setting up publishers // Observer is called when there's a change in status to report // from the control client. From 9a72513fa49b98c906b6d3e1935a12bffd3f53a4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 07:38:10 -0700 Subject: [PATCH 1526/1708] go.toolchain.rev: bump Go to 1.25.2 Updates tailscale/go#135 Change-Id: I89cfb49b998b2fd0264f8d5f4a61af839cd06626 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 2 +- cmd/stund/depaware.txt | 1 + cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- go.mod | 2 +- go.toolchain.rev | 2 +- tsnet/depaware.txt | 2 +- 11 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 278d54b1f..2fa1fed45 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -341,6 +341,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index da43ac177..d4fdb87fc 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1119,7 +1119,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 5eadfc0d1..8cd2e49be 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -237,6 +237,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b0b4359e4..8c2fb0e92 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -409,7 +409,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 30974287c..fe50dface 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -355,6 +355,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ internal/runtime/syscall from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 32c84d744..a4999825e 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -389,6 +389,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ internal/runtime/syscall from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 60bf623e2..c7d571f1e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -680,7 +680,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 0ae8761e5..894b4a078 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -510,7 +510,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/go.mod b/go.mod index 965a447b9..0c6d33fa0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.1 +go 1.25.2 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 1fd4f3df2..d5de79558 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -aa85d1541af0921f830f053f29d91971fa5838f6 +a80a86e575c5b7b23b78540e947335d22f74d274 diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 339d18877..d602c7b2f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -503,7 +503,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ LA internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ From 4543ea5c8a2f9c9e45ddc2beb4d0635bd99cd079 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 8 Oct 2025 09:53:32 -0700 Subject: [PATCH 1527/1708] wgengine/magicsock: start peer relay path discovery sooner (#17485) This commit also shuffles the hasPeerRelayServers atomic load to happen sooner, reducing the cost for clients with no peer relay servers. Updates tailscale/corp#33099 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 38 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index f4c8b1469..7deafb752 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -879,14 +879,6 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { // discoverUDPRelayPathsLocked starts UDP relay path discovery. func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { - if !de.c.hasPeerRelayServers.Load() { - // Changes in this value between its access and the logic following - // are fine, we will eventually do the "right" thing during future path - // discovery. The worst case is we suppress path discovery for the - // current cycle, or we unnecessarily call into [relayManager] and do - // some wasted work. - return - } de.lastUDPRelayPathDiscovery = now lastBest := de.bestAddr lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) @@ -899,6 +891,14 @@ func (de *endpoint) wantUDPRelayPathDiscoveryLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } + if !de.c.hasPeerRelayServers.Load() { + // Changes in this value between its access and a call to + // [endpoint.discoverUDPRelayPathsLocked] are fine, we will eventually + // do the "right" thing during future path discovery. The worst case is + // we suppress path discovery for the current cycle, or we unnecessarily + // call into [relayManager] and do some wasted work. + return false + } if !de.relayCapable { return false } @@ -1013,14 +1013,18 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst // order to also try all candidate direct paths. fallthrough default: - // Ping all candidate direct paths. This work overlaps with what - // [de.heartbeat] will periodically fire when it calls - // [de.sendDiscoPingsLocked], but a user-initiated [pingCLI] is a - // "do it now" operation that should not be subject to + // Ping all candidate direct paths and start peer relay path discovery, + // if appropriate. This work overlaps with what [de.heartbeat] will + // periodically fire when it calls [de.sendDiscoPingsLocked] and + // [de.discoveryUDPRelayPathsLocked], but a user-initiated [pingCLI] is + // a "do it now" operation that should not be subject to // [heartbeatInterval] tick or [discoPingInterval] rate-limiting. for ep := range de.endpointState { de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } } @@ -1046,14 +1050,10 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } else if !udpAddr.isDirect() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } - // TODO(jwhited): consider triggering UDP relay path discovery here under - // certain conditions. We currently only trigger it in heartbeat(), which - // is both good and bad. It's good because the first heartbeat() tick is 3s - // after the first packet, which gives us time to discover a UDP direct - // path and potentially avoid what would be wasted UDP relay path discovery - // work. It's bad because we might not discover a UDP direct path, and we - // incur a 3s delay before we try to discover a UDP relay path. de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now de.mu.Unlock() From 06f12186d9f4672ac0a0a493e29a260ca47afda6 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 17:17:52 +0100 Subject: [PATCH 1528/1708] tstest/integration: test `tailscale up` when device approval is required This patch extends the integration tests for `tailscale up` to include tailnets where new devices need to be approved. It doesn't change the CLI, because it's mostly working correctly already -- these tests are just to prevent future regressions. I've added support for `MachineAuthorized` to mock control, and I've refactored `TestOneNodeUpAuth` to be more flexible. It now takes a sequence of steps to run and asserts whether we got a login URL and/or machine approval URL after each step. Updates tailscale/corp#31476 Updates #17361 Signed-off-by: Alex Chan --- tstest/integration/integration.go | 26 +- tstest/integration/integration_test.go | 267 ++++++++++++------ tstest/integration/testcontrol/testcontrol.go | 34 ++- 3 files changed, 224 insertions(+), 103 deletions(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 3788f6149..374dffebe 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -1099,20 +1099,40 @@ func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { type authURLParserWriter struct { buf bytes.Buffer - fn func(urlStr string) error + // Handle login URLs, and count how many times they were seen + authURLFn func(urlStr string) error + // Handle machine approval URLs, and count how many times they were seen. + deviceApprovalURLFn func(urlStr string) error } +// Note: auth URLs from testcontrol look slightly different to real auth URLs, +// e.g. http://127.0.0.1:60456/auth/96af2ff7e04ae1499a9a var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) +// Looks for any device approval URL, which is any URL ending with `/admin` +// e.g. http://127.0.0.1:60456/admin +var deviceApprovalURLRx = regexp.MustCompile(`(https?://\S+/admin)[^\S]`) + func (w *authURLParserWriter) Write(p []byte) (n int, err error) { n, err = w.buf.Write(p) + + defer w.buf.Reset() // so it's not matched again + m := authURLRx.FindSubmatch(w.buf.Bytes()) if m != nil { urlStr := string(m[1]) - w.buf.Reset() // so it's not matched again - if err := w.fn(urlStr); err != nil { + if err := w.authURLFn(urlStr); err != nil { return 0, err } } + + m = deviceApprovalURLRx.FindSubmatch(w.buf.Bytes()) + if m != nil && w.deviceApprovalURLFn != nil { + urlStr := string(m[1]) + if err := w.deviceApprovalURLFn(urlStr); err != nil { + return 0, err + } + } + return n, err } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index f7c133f5c..46b5c4fc7 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -268,7 +268,65 @@ func TestStateSavedOnStart(t *testing.T) { d1.MustCleanShutdown(t) } +// This handler receives auth URLs, and logs into control. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple login URLs. +func completeLogin(t *testing.T, control *testcontrol.Server, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + if control.CompleteAuth(urlStr) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple auth URLs") + t.Error(err) + return err + } + t.Logf("completed login to %s", urlStr) + return nil + } else { + err := fmt.Errorf("failed to complete initial login to %q", urlStr) + t.Fatal(err) + return err + } + } +} + +// This handler receives device approval URLs, and approves the device. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple device approval URLs. +func completeDeviceApproval(t *testing.T, node *TestNode, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + control := node.env.Control + nodeKey := node.MustStatus().Self.PublicKey + t.Logf("saw device approval URL %q", urlStr) + if control.CompleteDeviceApproval(&nodeKey) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple device approval URLs") + t.Error(err) + return err + } + t.Log("completed device approval") + return nil + } else { + err := errors.New("failed to complete device approval") + t.Fatal(err) + return err + } + } +} + func TestOneNodeUpAuth(t *testing.T) { + type step struct { + args []string + // + // Do we expect to log in again with a new /auth/ URL? + wantAuthURL bool + // + // Do we expect to need a device approval URL? + wantDeviceApprovalURL bool + } + for _, tt := range []struct { name string args []string @@ -276,65 +334,112 @@ func TestOneNodeUpAuth(t *testing.T) { // What auth key should we use for control? authKey string // - // Is tailscaled already logged in before we run this `up` command? - alreadyLoggedIn bool + // Do we require device approval in the tailnet? + requireDeviceApproval bool // - // Do we need to log in again with a new /auth/ URL? - needsNewAuthURL bool + // What CLI commands should we run in this test? + steps []step }{ { - name: "up", - args: []string{"up"}, - needsNewAuthURL: true, + name: "up", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-machine-auth", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth", + steps: []step{ + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + }, }, { - name: "up-with-force-reauth", - args: []string{"up", "--force-reauth"}, - needsNewAuthURL: true, + name: "up-with-auth-key-with-machine-auth", + authKey: "opensesame", + steps: []step{ + { + args: []string{"up", "--auth-key=opensesame"}, + wantAuthURL: false, + wantDeviceApprovalURL: true, + }, + }, + requireDeviceApproval: true, }, { - name: "up-with-auth-key", - args: []string{"up", "--auth-key=opensesame"}, - authKey: "opensesame", - needsNewAuthURL: false, + name: "up-with-force-reauth-and-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, }, { - name: "up-with-force-reauth-and-auth-key", - args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, - authKey: "opensesame", - needsNewAuthURL: false, + name: "up-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up"}, wantAuthURL: false}, + }, }, { - name: "up-after-login", - args: []string{"up"}, - alreadyLoggedIn: true, - needsNewAuthURL: false, + name: "up-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up"}, wantAuthURL: false, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, }, { - name: "up-with-force-reauth-after-login", - args: []string{"up", "--force-reauth"}, - alreadyLoggedIn: true, - needsNewAuthURL: true, + name: "up-with-force-reauth-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, }, { - name: "up-with-auth-key-after-login", - args: []string{"up", "--auth-key=opensesame"}, - authKey: "opensesame", - alreadyLoggedIn: true, - needsNewAuthURL: false, + name: "up-with-force-reauth-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, }, { - name: "up-with-force-reauth-and-auth-key-after-login", - args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, - authKey: "opensesame", - alreadyLoggedIn: true, - needsNewAuthURL: false, + name: "up-with-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-with-force-reauth-and-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, }, } { tstest.Shard(t) for _, useSeamlessKeyRenewal := range []bool{true, false} { - t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { + name := tt.name + if useSeamlessKeyRenewal { + name += "-with-seamless" + } + t.Run(name, func(t *testing.T) { tstest.Parallel(t) env := NewTestEnv(t, ConfigureControl( @@ -345,6 +450,10 @@ func TestOneNodeUpAuth(t *testing.T) { control.RequireAuth = true } + if tt.requireDeviceApproval { + control.RequireMachineAuth = true + } + control.AllNodesSameUser = true if useSeamlessKeyRenewal { @@ -359,69 +468,45 @@ func TestOneNodeUpAuth(t *testing.T) { d1 := n1.StartDaemon() defer d1.MustCleanShutdown(t) - cmdArgs := append(tt.args, "--login-server="+env.ControlURL()) - - // This handler looks for /auth/ URLs in the stdout from "tailscale up", - // and if it sees them, completes the auth process. - // - // It counts how many auth URLs it's seen. - var authCountAtomic atomic.Int32 - authURLHandler := &authURLParserWriter{fn: func(urlStr string) error { - t.Logf("saw auth URL %q", urlStr) - if env.Control.CompleteAuth(urlStr) { - if authCountAtomic.Add(1) > 1 { - err := errors.New("completed multiple auth URLs") - t.Error(err) - return err - } - t.Logf("completed login to %s", urlStr) - return nil - } else { - err := fmt.Errorf("Failed to complete initial login to %q", urlStr) - t.Fatal(err) - return err + for i, step := range tt.steps { + t.Logf("Running step %d", i) + cmdArgs := append(step.args, "--login-server="+env.ControlURL()) + + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + var deviceApprovalURLCount atomic.Int32 + + handler := &authURLParserWriter{ + authURLFn: completeLogin(t, env.Control, &authURLCount), + deviceApprovalURLFn: completeDeviceApproval(t, n1, &deviceApprovalURLCount), } - }} - - // If we should be logged in at the start of the test case, go ahead - // and run the login command. - // - // Otherwise, just wait for tailscaled to be listening. - if tt.alreadyLoggedIn { - t.Logf("Running initial login: %s", strings.Join(cmdArgs, " ")) + cmd := n1.Tailscale(cmdArgs...) - cmd.Stdout = authURLHandler + cmd.Stdout = handler + cmd.Stdout = handler cmd.Stderr = cmd.Stdout if err := cmd.Run(); err != nil { t.Fatalf("up: %v", err) } - authCountAtomic.Store(0) - n1.AwaitRunning() - } else { - n1.AwaitListening() - } - - st := n1.MustStatus() - t.Logf("Status: %s", st.BackendState) - t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) - cmd := n1.Tailscale(cmdArgs...) - cmd.Stdout = authURLHandler - cmd.Stderr = cmd.Stdout - - if err := cmd.Run(); err != nil { - t.Fatalf("up: %v", err) - } - t.Logf("Got IP: %v", n1.AwaitIP4()) + n1.AwaitRunning() - n1.AwaitRunning() + var wantAuthURLCount int32 + if step.wantAuthURL { + wantAuthURLCount = 1 + } + if n := authURLCount.Load(); n != wantAuthURLCount { + t.Errorf("Auth URLs completed = %d; want %d", n, wantAuthURLCount) + } - var expectedAuthUrls int32 - if tt.needsNewAuthURL { - expectedAuthUrls = 1 - } - if n := authCountAtomic.Load(); n != expectedAuthUrls { - t.Errorf("Auth URLs completed = %d; want %d", n, expectedAuthUrls) + var wantDeviceApprovalURLCount int32 + if step.wantDeviceApprovalURL { + wantDeviceApprovalURLCount = 1 + } + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } } }) } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index ac7804918..58ca956ce 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -50,14 +50,15 @@ const msgLimit = 1 << 20 // encrypted message length limit // Server is a control plane server. Its zero value is ready for use. // Everything is stored in-memory in one tailnet. type Server struct { - Logf logger.Logf // nil means to use the log package - DERPMap *tailcfg.DERPMap // nil means to use prod DERP map - RequireAuth bool - RequireAuthKey string // required authkey for all nodes - Verbose bool - DNSConfig *tailcfg.DNSConfig // nil means no DNS config - MagicDNSDomain string - C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func + Logf logger.Logf // nil means to use the log package + DERPMap *tailcfg.DERPMap // nil means to use prod DERP map + RequireAuth bool + RequireAuthKey string // required authkey for all nodes + RequireMachineAuth bool + Verbose bool + DNSConfig *tailcfg.DNSConfig // nil means no DNS config + MagicDNSDomain string + C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func // PeerRelayGrants, if true, inserts relay capabilities into the wildcard // grants rules. @@ -686,6 +687,21 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { return true } +func (s *Server) CompleteDeviceApproval(nodeKey *key.NodePublic) bool { + s.mu.Lock() + defer s.mu.Unlock() + + node, ok := s.nodes[*nodeKey] + if !ok { + return false + } + + sendUpdate(s.updates[node.ID], updateSelfChanged) + + node.MachineAuthorized = true + return true +} + func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.MachinePublic) { msg, err := io.ReadAll(io.LimitReader(r.Body, msgLimit)) r.Body.Close() @@ -761,7 +777,7 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. s.nodes = map[key.NodePublic]*tailcfg.Node{} } _, ok := s.nodes[nk] - machineAuthorized := true // TODO: add Server.RequireMachineAuth + machineAuthorized := !s.RequireMachineAuth if !ok { nodeID := len(s.nodes) + 1 From bb6bd465702d930af0a86acac1a38d1e9c669d97 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 18:36:52 +0100 Subject: [PATCH 1529/1708] tstest/integration: log all the output printed by `tailscale up` Updates tailscale/corp#31476 Updates #17361 Signed-off-by: Alex Chan --- tstest/integration/integration.go | 3 +++ tstest/integration/integration_test.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 374dffebe..6700205cf 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -1098,6 +1098,7 @@ func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type authURLParserWriter struct { + t *testing.T buf bytes.Buffer // Handle login URLs, and count how many times they were seen authURLFn func(urlStr string) error @@ -1114,6 +1115,8 @@ var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) var deviceApprovalURLRx = regexp.MustCompile(`(https?://\S+/admin)[^\S]`) func (w *authURLParserWriter) Write(p []byte) (n int, err error) { + w.t.Helper() + w.t.Logf("received bytes: %s", string(p)) n, err = w.buf.Write(p) defer w.buf.Reset() // so it's not matched again diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 46b5c4fc7..29a036cd6 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -477,7 +477,7 @@ func TestOneNodeUpAuth(t *testing.T) { var authURLCount atomic.Int32 var deviceApprovalURLCount atomic.Int32 - handler := &authURLParserWriter{ + handler := &authURLParserWriter{t: t, authURLFn: completeLogin(t, env.Control, &authURLCount), deviceApprovalURLFn: completeDeviceApproval(t, n1, &deviceApprovalURLCount), } From b7fe1cea9f17a05d5076c17b95c967013aa1c3d6 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 17:17:52 +0100 Subject: [PATCH 1530/1708] cmd/tailscale/cli: only print authURLs and device approval URLs once This patch fixes several issues related to printing login and device approval URLs, especially when `tailscale up` is interrupted: 1. Only print a login URL that will cause `tailscale up` to complete. Don't print expired URLs or URLs from previous login attempts. 2. Print the device approval URL if you run `tailscale up` after previously completing a login, but before approving the device. 3. Use the correct control URL for device approval if you run a bare `tailscale up` after previously completing a login, but before approving the device. 4. Don't print the device approval URL more than once (or at least, not consecutively). Updates tailscale/corp#31476 Updates #17361 ## How these fixes work This patch went through a lot of trial and error, and there may still be bugs! These notes capture the different scenarios and considerations as we wrote it, which are also captured by integration tests. 1. We were getting stale login URLs from the initial IPN state notification. When the IPN watcher was moved to before Start() in c011369, we mistakenly continued to request the initial state. This is only necessary if you start watching after you call Start(), because you may have missed some notifications. By getting the initial state before calling Start(), we'd get a stale login URL. If you clicked that URL, you could complete the login in the control server (if it wasn't expired), but your instance of `tailscale up` would hang, because it's listening for login updates from a different login URL. In this patch, we no longer request the initial state, and so we don't print a stale URL. 2. Once you skip the initial state from IPN, the following sequence: * Run `tailscale up` * Log into a tailnet with device approval * ^C after the device approval URL is printed, but without approving * Run `tailscale up` again means that nothing would ever be printed. `tailscale up` would send tailscaled the pref `WantRunning: true`, but that was already the case so nothing changes. You never get any IPN notifications, and in particular you never get a state change to `NeedsMachineAuth`. This means we'd never print the device approval URL. In this patch, we add a hard-coded rule that if you're doing a simple up (which won't trigger any other IPN notifications) and you start in the `NeedsMachineAuth` state, we print the device approval message without waiting for an IPN notification. 3. Consider the following sequence: * Run `tailscale up --login-server=` * Log into a tailnet with device approval * ^C after the device approval URL is printed, but without approving * Run `tailscale up` again We'd print the device approval URL for the default control server, rather than the real control server, because we were using the `prefs` from the CLI arguments (which are all the defaults) rather than the `curPrefs` (which contain the custom login server). In this patch, we use the `prefs` if the user has specified any settings (and other code will ensure this is a complete set of settings) or `curPrefs` if it's a simple `tailscale up`. 4. Consider the following sequence: you've logged in, but not completed device approval, and you run `down` and `up` in quick succession. * `up`: sees state=NeedsMachineAuth * `up`: sends `{wantRunning: true}`, prints out the device approval URL * `down`: changes state to Stopped * `up`: changes state to Starting * tailscaled: changes state to NeedsMachineAuth * `up`: gets an IPN notification with the state change, and prints a second device approval URL Either URL works, but this is annoying for the user. In this patch, we track whether the last printed URL was the device approval URL, and if so, we skip printing it a second time. Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 48 ++++- tstest/integration/integration_test.go | 181 +++++++++++++++++- tstest/integration/testcontrol/testcontrol.go | 10 +- 3 files changed, 226 insertions(+), 13 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 90c9c23af..07e008aab 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -357,6 +357,13 @@ func netfilterModeFromFlag(v string) (_ preftype.NetfilterMode, warning string, // It returns simpleUp if we're running a simple "tailscale up" to // transition to running from a previously-logged-in but down state, // without changing any settings. +// +// Note this can also mutate prefs to add implicit preferences for the +// user operator. +// +// TODO(alexc): the name of this function is confusing, and perhaps a +// sign that it's doing too much. Consider refactoring this so it's just +// telling the caller what to do next, but not changing anything itself. func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, justEditMP *ipn.MaskedPrefs, err error) { if !env.upArgs.reset { applyImplicitPrefs(prefs, curPrefs, env) @@ -497,6 +504,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs := curPrefs + if cmd == "up" { // "tailscale up" should not be able to change the // profile name. @@ -546,10 +555,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE // or we could miss IPN notifications. // // In particular, if we're doing a force-reauth, we could miss the - // notification with the auth URL we should print for the user. The - // initial state could contain the auth URL, but only if IPN is in the - // NeedsLogin state -- sometimes it's in Starting, and we don't get the URL. - watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) + // notification with the auth URL we should print for the user. + watcher, err := localClient.WatchIPNBus(watchCtx, 0) if err != nil { return err } @@ -591,6 +598,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs = prefs if upArgs.forceReauth || !st.HaveNodeKey { err := localClient.StartLoginInteractive(ctx) if err != nil { @@ -604,7 +612,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE go func() { var printed bool // whether we've yet printed anything to stdout or stderr - var lastURLPrinted string + lastURLPrinted := "" // If we're doing a force-reauth, we need to get two notifications: // @@ -617,6 +625,15 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE ipnIsRunning := false waitingForKeyChange := upArgs.forceReauth + // If we're doing a simple up (i.e. `tailscale up`, no flags) and + // the initial state is NeedsMachineAuth, then we never receive a + // state notification from ipn, so we print the device approval URL + // immediately. + if simpleUp && st.BackendState == ipn.NeedsMachineAuth.String() { + printed = true + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) + } + for { n, err := watcher.Next() if err != nil { @@ -629,11 +646,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } if s := n.State; s != nil && *s == ipn.NeedsMachineAuth { printed = true - if env.upArgs.json { - printUpDoneJSON(ipn.NeedsMachineAuth, "") - } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) - } + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) } if s := n.State; s != nil { ipnIsRunning = *s == ipn.Running @@ -737,6 +750,21 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } +func printDeviceApprovalInfo(printJson bool, prefs *ipn.Prefs, lastURLPrinted *string) { + if printJson { + printUpDoneJSON(ipn.NeedsMachineAuth, "") + } else { + deviceApprovalURL := prefs.AdminPageURL(policyclient.Get()) + + if lastURLPrinted != nil && deviceApprovalURL == *lastURLPrinted { + return + } + + *lastURLPrinted = deviceApprovalURL + errf("\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", deviceApprovalURL) + } +} + // upWorthWarning reports whether the health check message s is worth warning // about during "tailscale up". Many of the health checks are noisy or confusing // or very ephemeral and happen especially briefly at startup. diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 29a036cd6..2e85bc8be 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -294,13 +294,14 @@ func completeLogin(t *testing.T, control *testcontrol.Server, counter *atomic.In // This handler receives device approval URLs, and approves the device. // // It counts how many URLs it sees, and will fail the test if it -// sees multiple device approval URLs. +// sees multiple device approval URLs, or if you try to approve a device +// with the wrong control server. func completeDeviceApproval(t *testing.T, node *TestNode, counter *atomic.Int32) func(string) error { return func(urlStr string) error { control := node.env.Control nodeKey := node.MustStatus().Self.PublicKey t.Logf("saw device approval URL %q", urlStr) - if control.CompleteDeviceApproval(&nodeKey) { + if control.CompleteDeviceApproval(node.env.ControlURL(), urlStr, &nodeKey) { if counter.Add(1) > 1 { err := errors.New("completed multiple device approval URLs") t.Error(err) @@ -513,6 +514,182 @@ func TestOneNodeUpAuth(t *testing.T) { } } +// Returns true if the error returned by [exec.Run] fails with a non-zero +// exit code, false otherwise. +func isNonZeroExitCode(err error) bool { + if err == nil { + return false + } + + exitError, ok := err.(*exec.ExitError) + if !ok { + return false + } + + return exitError.ExitCode() != 0 +} + +// If we interrupt `tailscale up` and then run it again, we should only +// print a single auth URL. +func TestOneNodeUpInterruptedAuth(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.AllNodesSameUser = true + }, + )) + + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + cmdArgs := []string{"up", "--login-server=" + env.ControlURL()} + + // The first time we run the command, we wait for an auth URL to be + // printed, and then we cancel the command -- equivalent to ^C. + // + // At this point, we've connected to control to get an auth URL, + // and printed it in the CLI, but not clicked it. + t.Logf("Running command for the first time: %s", strings.Join(cmdArgs, " ")) + cmd1 := n.Tailscale(cmdArgs...) + + // This handler watches for auth URLs in stdout, then cancels the + // running `tailscale up` CLI command. + cmd1.Stdout = &authURLParserWriter{t: t, authURLFn: func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + cmd1.Process.Kill() + return nil + }} + cmd1.Stderr = cmd1.Stdout + + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we didn't click the auth URL, we should still be in NeedsLogin. + n.AwaitBackendState("NeedsLogin") + + // The second time we run the command, we click the first auth URL we see + // and check that we log in correctly. + // + // In #17361, there was a bug where we'd print two auth URLs, and you could + // click either auth URL and log in to control, but logging in through the + // first URL would leave `tailscale up` hanging. + // + // Using `authURLHandler` ensures we only print the new, correct auth URL. + // + // If we print both URLs, it will throw an error because it only expects + // to log in with one auth URL. + // + // If we only print the stale auth URL, the test will timeout because + // `tailscale up` will never return. + t.Logf("Running command for the second time: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmdArgs...) + cmd2.Stdout = &authURLParserWriter{ + t: t, authURLFn: completeLogin(t, env.Control, &authURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + if urls := authURLCount.Load(); urls != 1 { + t.Errorf("Auth URLs completed = %d; want %d", urls, 1) + } + + n.AwaitRunning() +} + +// If we interrupt `tailscale up` and login successfully, but don't +// complete the device approval, we should see the device approval URL +// when we run `tailscale up` a second time. +func TestOneNodeUpInterruptedDeviceApproval(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.RequireMachineAuth = true + control.AllNodesSameUser = true + }, + )) + + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + // The first time we run the command, we: + // + // * set a custom login URL + // * wait for an auth URL to be printed + // * click it to complete the login process + // * wait for a device approval URL to be printed + // * cancel the command, equivalent to ^C + // + // At this point, we've logged in to control, but our node isn't + // approved to connect to the tailnet. + cmd1Args := []string{"up", "--login-server=" + env.ControlURL()} + t.Logf("Running command: %s", strings.Join(cmd1Args, " ")) + cmd1 := n.Tailscale(cmd1Args...) + + handler1 := &authURLParserWriter{t: t, + authURLFn: completeLogin(t, env.Control, &atomic.Int32{}), + deviceApprovalURLFn: func(urlStr string) error { + t.Logf("saw device approval URL %q", urlStr) + cmd1.Process.Kill() + return nil + }, + } + cmd1.Stdout = handler1 + cmd1.Stderr = cmd1.Stdout + + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we logged in but we didn't complete the device approval, we + // should be in state NeedsMachineAuth. + n.AwaitBackendState("NeedsMachineAuth") + + // The second time we run the command, we expect not to get an auth URL + // and go straight to the device approval URL. We don't need to pass the + // login server, because `tailscale up` should remember our control URL. + cmd2Args := []string{"up"} + t.Logf("Running command: %s", strings.Join(cmd2Args, " ")) + + var deviceApprovalURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmd2Args...) + cmd2.Stdout = &authURLParserWriter{t: t, + authURLFn: func(urlStr string) error { + t.Fatalf("got unexpected auth URL: %q", urlStr) + cmd2.Process.Kill() + return nil + }, + deviceApprovalURLFn: completeDeviceApproval(t, n, &deviceApprovalURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + wantDeviceApprovalURLCount := int32(1) + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } + + n.AwaitRunning() +} + func TestConfigFileAuthKey(t *testing.T) { tstest.SkipOnUnshardedCI(t) tstest.Shard(t) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 58ca956ce..f9a33705b 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -687,7 +687,11 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { return true } -func (s *Server) CompleteDeviceApproval(nodeKey *key.NodePublic) bool { +// Complete the device approval for this node. +// +// This function returns false if the node does not exist, or you try to +// approve a device against a different control server. +func (s *Server) CompleteDeviceApproval(controlUrl string, urlStr string, nodeKey *key.NodePublic) bool { s.mu.Lock() defer s.mu.Unlock() @@ -696,6 +700,10 @@ func (s *Server) CompleteDeviceApproval(nodeKey *key.NodePublic) bool { return false } + if urlStr != controlUrl+"/admin" { + return false + } + sendUpdate(s.updates[node.ID], updateSelfChanged) node.MachineAuthorized = true From 7edb5b7d4394e322298b7c6f86ce73215224b5bc Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 8 Oct 2025 14:37:47 -0400 Subject: [PATCH 1531/1708] flake.nix: update Nix to use tailscale/go 1.25.2 (#17500) Update Nix flake to use go 1.25.2 Create the hash from the toolchain rev file automatically from update-flake.sh Updates tailscale/go#135 Signed-off-by: Mike O'Driscoll --- flake.nix | 6 +++--- go.toolchain.rev.sri | 1 + go.toolchain.version | 1 + pull-toolchain.sh | 6 +++++- update-flake.sh | 8 ++++++++ 5 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 go.toolchain.rev.sri create mode 100644 go.toolchain.version diff --git a/flake.nix b/flake.nix index 9481248f0..726757f7a 100644 --- a/flake.nix +++ b/flake.nix @@ -46,9 +46,9 @@ systems, flake-compat, }: let - goVersion = "1.25.1"; + goVersion = nixpkgs.lib.fileContents ./go.toolchain.version; toolChainRev = nixpkgs.lib.fileContents ./go.toolchain.rev; - gitHash = "sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk="; + gitHash = nixpkgs.lib.fileContents ./go.toolchain.rev.sri; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { @@ -61,7 +61,7 @@ owner = "tailscale"; repo = "go"; rev = toolChainRev; - hash = gitHash; + sha256 = gitHash; }; }; }) diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri new file mode 100644 index 000000000..9cbf36b93 --- /dev/null +++ b/go.toolchain.rev.sri @@ -0,0 +1 @@ +sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk= diff --git a/go.toolchain.version b/go.toolchain.version new file mode 100644 index 000000000..61b813d5e --- /dev/null +++ b/go.toolchain.version @@ -0,0 +1 @@ +1.25.2 diff --git a/pull-toolchain.sh b/pull-toolchain.sh index f5a19e7d7..eb8febf6b 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -11,6 +11,10 @@ if [ "$upstream" != "$current" ]; then echo "$upstream" >go.toolchain.rev fi -if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev)" ]; then +./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version + +./update-flake.sh + +if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev go.toolchain.rev.sri go.toolchain.version)" ]; then echo "pull-toolchain.sh: changes imported. Use git commit to make them permanent." >&2 fi diff --git a/update-flake.sh b/update-flake.sh index 4561183b8..c22572b86 100755 --- a/update-flake.sh +++ b/update-flake.sh @@ -10,6 +10,14 @@ rm -rf "$OUT" ./tool/go run tailscale.com/cmd/nardump --sri "$OUT" >go.mod.sri rm -rf "$OUT" +GOOUT=$(mktemp -d -t gocross-XXXXXX) +GOREV=$(xargs < ./go.toolchain.rev) +TARBALL="$GOOUT/go-$GOREV.tar.gz" +curl -Ls -o "$TARBALL" "https://github.com/tailscale/go/archive/$GOREV.tar.gz" +tar -xzf "$TARBALL" -C "$GOOUT" +./tool/go run tailscale.com/cmd/nardump --sri "$GOOUT/go-$GOREV" > go.toolchain.rev.sri +rm -rf "$GOOUT" + # nix-direnv only watches the top-level nix file for changes. As a # result, when we change a referenced SRI file, we have to cause some # change to shell.nix and flake.nix as well, so that nix-direnv From f270c3158a3d568ffbe5387b3cf0cbed042b67d3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 09:27:41 -0700 Subject: [PATCH 1532/1708] net/dns, ipn/ipnlocal: fix regressions from change moving away from deephash I got sidetracked apparently and never finished writing this Clone code in 316afe7d02babc (#17448). (It really should use views instead.) And then I missed one of the users of "routerChanged" that was broken up into "routerChanged" vs "dnsChanged". This broke integration tests elsewhere. Fixes #17506 Change-Id: I533bf0fcf3da9ac6eb4a6cdef03b8df2c1fb4c8e Signed-off-by: Brad Fitzpatrick --- net/dns/config.go | 11 +++++- net/dns/config_test.go | 66 +++++++++++++++++++++++++++++++++ util/checkchange/checkchange.go | 2 +- wgengine/userspace.go | 22 +++++++++-- 4 files changed, 94 insertions(+), 7 deletions(-) create mode 100644 net/dns/config_test.go diff --git a/net/dns/config.go b/net/dns/config.go index 22caf6ef5..6c170f19b 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -7,6 +7,7 @@ package dns import ( "bufio" "fmt" + "maps" "net/netip" "reflect" "slices" @@ -190,15 +191,21 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { return true } +// Clone makes a shallow clone of c. +// +// The returned Config still references slices and maps from c. +// +// TODO(bradfitz): use cmd/{viewer,cloner} for these and make the +// caller use views instead. func (c *Config) Clone() *Config { if c == nil { return nil } return &Config{ DefaultResolvers: slices.Clone(c.DefaultResolvers), - Routes: make(map[dnsname.FQDN][]*dnstype.Resolver, len(c.Routes)), + Routes: maps.Clone(c.Routes), SearchDomains: slices.Clone(c.SearchDomains), - Hosts: make(map[dnsname.FQDN][]netip.Addr, len(c.Hosts)), + Hosts: maps.Clone(c.Hosts), OnlyIPv6: c.OnlyIPv6, } } diff --git a/net/dns/config_test.go b/net/dns/config_test.go new file mode 100644 index 000000000..684dea6bc --- /dev/null +++ b/net/dns/config_test.go @@ -0,0 +1,66 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package dns + +import ( + "net/netip" + "reflect" + "testing" + + "tailscale.com/types/dnstype" + "tailscale.com/util/dnsname" +) + +func TestConfigClone(t *testing.T) { + tests := []struct { + name string + conf *Config + }{ + { + name: "nil", + conf: nil, + }, + { + name: "empty", + conf: &Config{}, + }, + { + name: "full", + conf: &Config{ + DefaultResolvers: []*dnstype.Resolver{ + { + Addr: "abc", + BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + UseWithExitNode: true, + }, + }, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{ + "foo.bar.": { + { + Addr: "abc", + BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + UseWithExitNode: true, + }, + }, + }, + SearchDomains: []dnsname.FQDN{"bar.baz."}, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "host.bar.": {netip.MustParseAddr("5.6.7.8")}, + }, + OnlyIPv6: true, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.conf.Clone() + if !reflect.DeepEqual(got, tt.conf) { + t.Error("Cloned result is not reflect.DeepEqual") + } + if !got.Equal(tt.conf) { + t.Error("Cloned result is not Equal") + } + }) + } +} diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go index 4d18730f1..8ba64720d 100644 --- a/util/checkchange/checkchange.go +++ b/util/checkchange/checkchange.go @@ -17,7 +17,7 @@ type EqualCloner[T any] interface { // // It only modifies *old if they are different. old must be non-nil. func Update[T EqualCloner[T]](old *T, new T) (changed bool) { - if new.Equal(*old) { + if (*old).Equal(new) { return false } *old = new.Clone() diff --git a/wgengine/userspace.go b/wgengine/userspace.go index fa2379288..9f42dae2a 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -965,8 +965,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter engineChanged := checkchange.Update(&e.lastEngineFull, cfg) - dnsChanged := checkchange.Update(&e.lastDNSConfig, dnsCfg) + dnsChanged := buildfeatures.HasDNS && checkchange.Update(&e.lastDNSConfig, dnsCfg) routerChanged := checkchange.Update(&e.lastRouter, routerCfg) + listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() if !engineChanged && !routerChanged && !dnsChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { @@ -987,7 +988,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // instead have ipnlocal populate a map of DNS IP => linkName and // put that in the *dns.Config instead, and plumb it down to the // dns.Manager. Maybe also with isLocalAddr above. - e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + if buildfeatures.HasDNS { + e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + } // See if any peers have changed disco keys, which means they've restarted. // If so, we need to update the wireguard-go/device.Device in two phases: @@ -1063,7 +1066,18 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, if err != nil { return err } + } + // We've historically re-set DNS even after just a router change. While + // refactoring in tailscale/tailscale#17448 and and + // tailscale/tailscale#17499, I'm erring on the side of keeping that + // historical quirk for now (2025-10-08), lest it's load bearing in + // unexpected ways + // + // TODO(bradfitz): try to do the "configuring DNS" part below only if + // dnsChanged, not routerChanged. The "resolver.ShouldUseRoutes" part + // probably needs to keep happening for both. + if buildfeatures.HasDNS && (routerChanged || dnsChanged) { if resolver.ShouldUseRoutes(e.controlKnobs) { e.logf("wgengine: Reconfig: user dialer") e.dialer.SetRoutes(routerCfg.Routes, routerCfg.LocalRoutes) @@ -1075,7 +1089,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // DNS managers refuse to apply settings if the device has no // assigned address. e.logf("wgengine: Reconfig: configuring DNS") - err = e.dns.Set(*dnsCfg) + err := e.dns.Set(*dnsCfg) e.health.SetDNSHealth(err) if err != nil { return err @@ -1097,7 +1111,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, } } - if isSubnetRouterChanged && e.birdClient != nil { + if buildfeatures.HasBird && isSubnetRouterChanged && e.birdClient != nil { e.logf("wgengine: Reconfig: configuring BIRD") var err error if isSubnetRouter { From 91239327100db0bc588530d5a44172add767f195 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 18:16:15 -0700 Subject: [PATCH 1533/1708] net/dns, wgengine: use viewer/cloner for Config Per earlier TODO. Updates #17506 Change-Id: I21fe851c4bcced98fcee844cb428ca9c2f6b0588 Signed-off-by: Brad Fitzpatrick --- net/dns/config.go | 22 +------ net/dns/config_test.go | 66 -------------------- net/dns/dns_clone.go | 74 ++++++++++++++++++++++ net/dns/dns_view.go | 138 +++++++++++++++++++++++++++++++++++++++++ wgengine/userspace.go | 13 ++-- 5 files changed, 222 insertions(+), 91 deletions(-) delete mode 100644 net/dns/config_test.go create mode 100644 net/dns/dns_clone.go create mode 100644 net/dns/dns_view.go diff --git a/net/dns/config.go b/net/dns/config.go index 6c170f19b..2425b304d 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:generate go run tailscale.com/cmd/viewer --type=Config --clonefunc + // Package dns contains code to configure and manage DNS settings. package dns import ( "bufio" "fmt" - "maps" "net/netip" "reflect" "slices" @@ -191,25 +192,6 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { return true } -// Clone makes a shallow clone of c. -// -// The returned Config still references slices and maps from c. -// -// TODO(bradfitz): use cmd/{viewer,cloner} for these and make the -// caller use views instead. -func (c *Config) Clone() *Config { - if c == nil { - return nil - } - return &Config{ - DefaultResolvers: slices.Clone(c.DefaultResolvers), - Routes: maps.Clone(c.Routes), - SearchDomains: slices.Clone(c.SearchDomains), - Hosts: maps.Clone(c.Hosts), - OnlyIPv6: c.OnlyIPv6, - } -} - func (c *Config) Equal(o *Config) bool { if c == nil || o == nil { return c == o diff --git a/net/dns/config_test.go b/net/dns/config_test.go deleted file mode 100644 index 684dea6bc..000000000 --- a/net/dns/config_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package dns - -import ( - "net/netip" - "reflect" - "testing" - - "tailscale.com/types/dnstype" - "tailscale.com/util/dnsname" -) - -func TestConfigClone(t *testing.T) { - tests := []struct { - name string - conf *Config - }{ - { - name: "nil", - conf: nil, - }, - { - name: "empty", - conf: &Config{}, - }, - { - name: "full", - conf: &Config{ - DefaultResolvers: []*dnstype.Resolver{ - { - Addr: "abc", - BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, - UseWithExitNode: true, - }, - }, - Routes: map[dnsname.FQDN][]*dnstype.Resolver{ - "foo.bar.": { - { - Addr: "abc", - BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, - UseWithExitNode: true, - }, - }, - }, - SearchDomains: []dnsname.FQDN{"bar.baz."}, - Hosts: map[dnsname.FQDN][]netip.Addr{ - "host.bar.": {netip.MustParseAddr("5.6.7.8")}, - }, - OnlyIPv6: true, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := tt.conf.Clone() - if !reflect.DeepEqual(got, tt.conf) { - t.Error("Cloned result is not reflect.DeepEqual") - } - if !got.Equal(tt.conf) { - t.Error("Cloned result is not Equal") - } - }) - } -} diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go new file mode 100644 index 000000000..807bfce23 --- /dev/null +++ b/net/dns/dns_clone.go @@ -0,0 +1,74 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package dns + +import ( + "net/netip" + + "tailscale.com/types/dnstype" + "tailscale.com/util/dnsname" +) + +// Clone makes a deep copy of Config. +// The result aliases no memory with the original. +func (src *Config) Clone() *Config { + if src == nil { + return nil + } + dst := new(Config) + *dst = *src + if src.DefaultResolvers != nil { + dst.DefaultResolvers = make([]*dnstype.Resolver, len(src.DefaultResolvers)) + for i := range dst.DefaultResolvers { + if src.DefaultResolvers[i] == nil { + dst.DefaultResolvers[i] = nil + } else { + dst.DefaultResolvers[i] = src.DefaultResolvers[i].Clone() + } + } + } + if dst.Routes != nil { + dst.Routes = map[dnsname.FQDN][]*dnstype.Resolver{} + for k := range src.Routes { + dst.Routes[k] = append([]*dnstype.Resolver{}, src.Routes[k]...) + } + } + dst.SearchDomains = append(src.SearchDomains[:0:0], src.SearchDomains...) + if dst.Hosts != nil { + dst.Hosts = map[dnsname.FQDN][]netip.Addr{} + for k := range src.Hosts { + dst.Hosts[k] = append([]netip.Addr{}, src.Hosts[k]...) + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigCloneNeedsRegeneration = Config(struct { + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + OnlyIPv6 bool +}{}) + +// Clone duplicates src into dst and reports whether it succeeded. +// To succeed, must be of types <*T, *T> or <*T, **T>, +// where T is one of Config. +func Clone(dst, src any) bool { + switch src := src.(type) { + case *Config: + switch dst := dst.(type) { + case *Config: + *dst = *src.Clone() + return true + case **Config: + *dst = src.Clone() + return true + } + } + return false +} diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go new file mode 100644 index 000000000..c7ce376cb --- /dev/null +++ b/net/dns/dns_view.go @@ -0,0 +1,138 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale/cmd/viewer; DO NOT EDIT. + +package dns + +import ( + jsonv1 "encoding/json" + "errors" + "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/dnstype" + "tailscale.com/types/views" + "tailscale.com/util/dnsname" +) + +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Config + +// View returns a read-only view of Config. +func (p *Config) View() ConfigView { + return ConfigView{ж: p} +} + +// ConfigView provides a read-only view over Config. +// +// Its methods should only be called if `Valid()` returns true. +type ConfigView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Config +} + +// Valid reports whether v's underlying value is non-nil. +func (v ConfigView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v ConfigView) AsStruct() *Config { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *ConfigView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x Config + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Config + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// DefaultResolvers are the DNS resolvers to use for DNS names +// which aren't covered by more specific per-domain routes below. +// If empty, the OS's default resolvers (the ones that predate +// Tailscale altering the configuration) are used. +func (v ConfigView) DefaultResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.DefaultResolvers) +} + +// Routes maps a DNS suffix to the resolvers that should be used +// for queries that fall within that suffix. +// If a query doesn't match any entry in Routes, the +// DefaultResolvers are used. +// A Routes entry with no resolvers means the route should be +// authoritatively answered using the contents of Hosts. +func (v ConfigView) Routes() views.MapFn[dnsname.FQDN, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { + return views.MapFnOf(v.ж.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) + }) +} + +// SearchDomains are DNS suffixes to try when expanding +// single-label queries. +func (v ConfigView) SearchDomains() views.Slice[dnsname.FQDN] { + return views.SliceOf(v.ж.SearchDomains) +} + +// Hosts maps DNS FQDNs to their IPs, which can be a mix of IPv4 +// and IPv6. +// Queries matching entries in Hosts are resolved locally by +// 100.100.100.100 without leaving the machine. +// Adding an entry to Hosts merely creates the record. If you want +// it to resolve, you also need to add appropriate routes to +// Routes. +func (v ConfigView) Hosts() views.MapSlice[dnsname.FQDN, netip.Addr] { + return views.MapSliceOf(v.ж.Hosts) +} + +// OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) +// instead of the IPv4 version (100.100.100.100). +func (v ConfigView) OnlyIPv6() bool { return v.ж.OnlyIPv6 } +func (v ConfigView) Equal(v2 ConfigView) bool { return v.ж.Equal(v2.ж) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigViewNeedsRegeneration = Config(struct { + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + OnlyIPv6 bool +}{}) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 9f42dae2a..d1ca21f4d 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -132,8 +132,8 @@ type userspaceEngine struct { lastRouter *router.Config lastEngineFull *wgcfg.Config // of full wireguard config, not trimmed lastEngineInputs *maybeReconfigInputs - lastDNSConfig *dns.Config - lastIsSubnetRouter bool // was the node a primary subnet router in the last run. + lastDNSConfig dns.ConfigView // or invalid if none + lastIsSubnetRouter bool // was the node a primary subnet router in the last run. recvActivityAt map[key.NodePublic]mono.Time trimmedNodes map[key.NodePublic]bool // set of node keys of peers currently excluded from wireguard config sentActivityAt map[netip.Addr]*mono.Time // value is accessed atomically @@ -965,8 +965,11 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter engineChanged := checkchange.Update(&e.lastEngineFull, cfg) - dnsChanged := buildfeatures.HasDNS && checkchange.Update(&e.lastDNSConfig, dnsCfg) routerChanged := checkchange.Update(&e.lastRouter, routerCfg) + dnsChanged := buildfeatures.HasDNS && !e.lastDNSConfig.Equal(dnsCfg.View()) + if dnsChanged { + e.lastDNSConfig = dnsCfg.View() + } listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() @@ -1322,8 +1325,8 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { e.wgLock.Lock() dnsCfg := e.lastDNSConfig e.wgLock.Unlock() - if dnsCfg != nil { - if err := e.dns.Set(*dnsCfg); err != nil { + if dnsCfg.Valid() { + if err := e.dns.Set(*dnsCfg.AsStruct()); err != nil { e.logf("wgengine: error setting DNS config after major link change: %v", err) } else if err := e.reconfigureVPNIfNecessary(); err != nil { e.logf("wgengine: error reconfiguring VPN after major link change: %v", err) From 0f4dec928e8f690a8cd36e7bd399228e129a2e7d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 18:42:13 -0700 Subject: [PATCH 1534/1708] feature/featuretags: make bird depend on advertiseroutes Updates #cleanup Change-Id: I87082919064a5652c0d976cadd6d159787bb224a Signed-off-by: Brad Fitzpatrick --- feature/featuretags/featuretags.go | 6 +++++- wgengine/userspace.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 9c87586db..c944d65eb 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -113,7 +113,11 @@ var Features = map[FeatureTag]FeatureMeta{ }, }, "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, - "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "bird": { + Sym: "Bird", + Desc: "Bird BGP integration", + Deps: []FeatureTag{"advertiseroutes"}, + }, "c2n": { Sym: "C2N", Desc: "Control-to-node (C2N) support", diff --git a/wgengine/userspace.go b/wgengine/userspace.go index d1ca21f4d..8856a3eaf 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -962,7 +962,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs(), isSubnetRouter, isSubnetRouter, e.lastIsSubnetRouter) } - isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter + isSubnetRouterChanged := buildfeatures.HasAdvertiseRoutes && isSubnetRouter != e.lastIsSubnetRouter engineChanged := checkchange.Update(&e.lastEngineFull, cfg) routerChanged := checkchange.Update(&e.lastRouter, routerCfg) From e2233b794247bf20d022d0ebefa99ad39bbad591 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 11:45:03 -0700 Subject: [PATCH 1535/1708] feature/relayserver: init server at config time instead of request time (#17484) The lazy init led to confusion and a belief that was something was wrong. It's reasonable to expect the daemon to listen on the port at the time it's configured. Updates tailscale/corp#33094 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 2 +- feature/relayserver/relayserver.go | 19 +++++++------------ feature/relayserver/relayserver_test.go | 3 +++ net/udprelay/server.go | 1 + 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c7d571f1e..6ca10f80c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -367,7 +367,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver - tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ + tailscale.com/net/udprelay/endpoint from tailscale.com/net/udprelay+ tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 95bf29a11..df2fb4cb7 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -21,10 +21,8 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" - "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" - "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" @@ -91,13 +89,6 @@ type extension struct { hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } -// relayServer is the interface of [udprelay.Server]. -type relayServer interface { - AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) - Close() error - GetSessions() []status.ServerSession -} - // Name implements [ipnext.Extension]. func (e *extension) Name() string { return featureName @@ -182,7 +173,11 @@ func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*e debugSessionsCh := e.debugSessionsCh return func(ec *eventbus.Client) { - var rs relayServer // lazily initialized + rs, err := udprelay.NewServer(e.logf, port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) + } + defer func() { if rs != nil { rs.Close() @@ -194,7 +189,6 @@ func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*e return case respCh := <-debugSessionsCh: if rs == nil { - // Don't initialize the server simply for a debug request. respCh <- nil continue } @@ -202,7 +196,8 @@ func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*e respCh <- sessions case req := <-reqSub.Events(): if rs == nil { - var err error + // The server may have previously failed to initialize if + // the configured port was in use, try again. rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) if err != nil { e.logf("error initializing server: %v", err) diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 89c004dc7..65c503524 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -8,6 +8,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tsd" + "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) @@ -96,6 +97,7 @@ func Test_extension_profileStateChanged(t *testing.T) { sys := tsd.NewSystem() bus := sys.Bus.Get() e := &extension{ + logf: logger.Discard, port: tt.fields.port, bus: bus, } @@ -154,6 +156,7 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &extension{ + logf: logger.Discard, bus: eventbus.New(), shutdown: tt.shutdown, port: tt.port, diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 424c7a617..83831dd69 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -536,6 +536,7 @@ func (s *Server) listenOn(port int) error { s.uc6 = bc s.uc6Port = uint16(portUint) } + s.logf("listening on %s:%d", network, portUint) } return nil } From d72370a6eb6e9d78b56a84a8f59d9e6f276ab85c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 15:09:07 -0700 Subject: [PATCH 1536/1708] wgengine/magicsock: remove unused arg in deregisterMetrics (#17513) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 492dff2ce..082639866 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -840,7 +840,7 @@ func registerMetrics(reg *usermetric.Registry) *metrics { // deregisterMetrics unregisters the underlying usermetrics expvar counters // from clientmetrics. -func deregisterMetrics(m *metrics) { +func deregisterMetrics() { metricRecvDataPacketsIPv4.UnregisterAll() metricRecvDataPacketsIPv6.UnregisterAll() metricRecvDataPacketsDERP.UnregisterAll() @@ -3329,7 +3329,7 @@ func (c *Conn) Close() error { pinger.Close() } - deregisterMetrics(c.metrics) + deregisterMetrics() return nil } From adf308a06407754c94fd71f7497c63178294ba6d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 15:18:31 -0700 Subject: [PATCH 1537/1708] wgengine/magicsock: add clientmetrics for RX bytes by af & conn type (#17512) Updates tailscale/corp#33206 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 17 +++++++++++++++++ wgengine/magicsock/magicsock_test.go | 2 ++ 2 files changed, 19 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 082639866..873c76a09 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -803,6 +803,11 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) metricRecvDataPacketsPeerRelayIPv4.Register(&m.inboundPacketsPeerRelayIPv4Total) metricRecvDataPacketsPeerRelayIPv6.Register(&m.inboundPacketsPeerRelayIPv6Total) + metricRecvDataBytesIPv4.Register(&m.inboundBytesIPv4Total) + metricRecvDataBytesIPv6.Register(&m.inboundBytesIPv6Total) + metricRecvDataBytesDERP.Register(&m.inboundBytesDERPTotal) + metricRecvDataBytesPeerRelayIPv4.Register(&m.inboundBytesPeerRelayIPv4Total) + metricRecvDataBytesPeerRelayIPv6.Register(&m.inboundBytesPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) @@ -846,6 +851,11 @@ func deregisterMetrics() { metricRecvDataPacketsDERP.UnregisterAll() metricRecvDataPacketsPeerRelayIPv4.UnregisterAll() metricRecvDataPacketsPeerRelayIPv6.UnregisterAll() + metricRecvDataBytesIPv4.UnregisterAll() + metricRecvDataBytesIPv6.UnregisterAll() + metricRecvDataBytesDERP.UnregisterAll() + metricRecvDataBytesPeerRelayIPv4.UnregisterAll() + metricRecvDataBytesPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() metricSendPeerRelay.UnregisterAll() @@ -3935,6 +3945,13 @@ var ( metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") + // Data bytes (non-disco) + metricRecvDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_derp") + metricRecvDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv4") + metricRecvDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv6") + metricRecvDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv4") + metricRecvDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv6") + // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1f533ddef..3468798c1 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1302,6 +1302,8 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) + c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, metricIPv4RxBytes*2) + c.Assert(metricRecvDataBytesDERP.Value(), qt.Equals, metricDERPRxBytes*2) } // tests that having a endpoint.String prevents wireguard-go's From 16a05c76803e0b7d72c555812209e73480fc1582 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 16:03:37 -0700 Subject: [PATCH 1538/1708] wgengine/magicsock: fix docs for send clientmetrics (#17514) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 873c76a09..844a607cf 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3929,13 +3929,20 @@ var ( metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") metricSendDERPDropped = clientmetric.NewCounter("magicsock_send_derp_dropped") - metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") - metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") metricSendPeerRelayError = clientmetric.NewCounter("magicsock_send_peer_relay_error") - metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") + // Sends (data) + // + // Note: Prior to v1.78 metricSendUDP & metricSendDERP counted sends of data + // AND disco packets. They were updated in v1.78 to only count data packets. + // metricSendPeerRelay was added in v1.86 and has always counted only data + // packets. + metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") + metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") + metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") + // Data packets (non-disco) metricSendData = clientmetric.NewCounter("magicsock_send_data") metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") From 154d36f73d305e147b2410263a2899fb54646909 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 9 Oct 2025 23:58:03 -0700 Subject: [PATCH 1539/1708] wgengine/magicsock: do not apply node view updates to a closed Conn (#17517) Fixes #17516 Change-Id: Iae2dab42d6f7bc618478d360a1005537c1fa1bbd Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 844a607cf..b17aa11ae 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2958,8 +2958,13 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { filt := c.filt self := c.self peers := c.peers + isClosed := c.closed c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + if isClosed { + return // nothing to do here, the conn is closed and the update is no longer relevant + } + if peersChanged || relayClientChanged { if !relayClientEnabled { c.relayManager.handleRelayServersSet(nil) From 072e6a39f49faa4d209fcbb328fe2fb8d38f9e7f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 10 Oct 2025 11:22:33 +0200 Subject: [PATCH 1540/1708] tsweb/varz: add support for ShardedInt metrics Fixes tailscale/corp#33236 Signed-off-by: Anton Tolchanov --- cmd/stund/depaware.txt | 2 +- tsweb/varz/varz.go | 4 ++++ tsweb/varz/varz_test.go | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 8cd2e49be..be3e0e0cf 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -58,7 +58,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb - tailscale.com/syncs from tailscale.com/metrics + tailscale.com/syncs from tailscale.com/metrics+ tailscale.com/tailcfg from tailscale.com/version tailscale.com/tsweb from tailscale.com/cmd/stund+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index aca2878b7..b1c66b859 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -25,6 +25,7 @@ import ( "golang.org/x/exp/constraints" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/version" ) @@ -136,6 +137,9 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { case *expvar.Int: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) return + case *syncs.ShardedInt: + fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) + return case *expvar.Float: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "gauge"), name, v.Value()) return diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index f7a9d8801..5bbacbe35 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -13,6 +13,7 @@ import ( "testing" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/tstest" "tailscale.com/util/racebuild" "tailscale.com/version" @@ -283,6 +284,20 @@ foo_foo_a 1 foo_foo_b 1 `) + "\n", }, + { + "metrics_sharded_int", + "counter_api_status_code", + func() *syncs.ShardedInt { + m := syncs.NewShardedInt() + m.Add(40) + m.Add(2) + return m + }(), + strings.TrimSpace(` +# TYPE api_status_code counter +api_status_code 42 + `) + "\n", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 08eae9affda8ca75993e216bf5da9fe80ce0d358 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 10 Oct 2025 11:27:55 +0100 Subject: [PATCH 1541/1708] sessionrecording: add destination to struct for tsrecorder (#17520) when tsrecorder receives events, it populates this field with information about the node the request was sent to. Updates #17141 Signed-off-by: chaosinthecrd --- sessionrecording/event.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/sessionrecording/event.go b/sessionrecording/event.go index 41d8f2d58..8f8172cc4 100644 --- a/sessionrecording/event.go +++ b/sessionrecording/event.go @@ -37,6 +37,9 @@ type Event struct { // Source provides details about the client that initiated the request. Source Source `json:"source"` + + // Destination provides details about the node receiving the request. + Destination Destination `json:"destination"` } // copied from https://github.com/kubernetes/kubernetes/blob/11ade2f7dd264c2f52a4a1342458abbbaa3cb2b1/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go#L44 @@ -95,6 +98,17 @@ type Source struct { NodeUser string `json:"nodeUser,omitempty"` } +type Destination struct { + // Node is the FQDN of the node receiving the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node receiving the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` +} + // Request holds information about a request. type Request struct { Method string `json:"method"` From f157f3288d3f35ac348c237b514d7e7b092393ac Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 10 Oct 2025 11:02:35 -0400 Subject: [PATCH 1542/1708] cmd/tailscale/cli,ipn/conffile: add declarative config mode for Services (#17435) This commit adds the subcommands `get-config` and `set-config` to Serve, which can be used to read the current Tailscale Services configuration in a standard syntax and provide a configuration to declaratively apply with that same syntax. Both commands must be provided with either `--service=svc:service` for one service, or `--all` for all services. When writing a config, `--set-config --all` will overwrite all existing Services configuration, and `--set-config --service=svc:service` will overwrite all configuration for that particular Service. Incremental changes are not supported. Fixes tailscale/corp#30983. cmd/tailscale/cli: hide serve "get-config"/"set-config" commands for now tailscale/corp#33152 tracks unhiding them when docs exist. Signed-off-by: Naman Sood --- cmd/tailscale/cli/serve_legacy.go | 1 + cmd/tailscale/cli/serve_v2.go | 300 +++++++++++++++++++++++++++++- cmd/tailscale/depaware.txt | 3 + ipn/conffile/serveconf.go | 239 ++++++++++++++++++++++++ tailcfg/proto_port_range.go | 16 +- tailcfg/tailcfg.go | 10 + 6 files changed, 556 insertions(+), 13 deletions(-) create mode 100644 ipn/conffile/serveconf.go diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index b60e9833b..95808fdf2 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -172,6 +172,7 @@ type serveEnv struct { yes bool // update without prompt service tailcfg.ServiceName // service name tun bool // redirect traffic to OS for service + allServices bool // apply config file to all services lc localServeClient // localClient interface, specific to serve diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 8831db2a9..9b0af2cad 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -28,10 +28,13 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" "tailscale.com/util/mak" "tailscale.com/util/prompt" + "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -128,6 +131,22 @@ const ( serveTypeTUN ) +func serveTypeFromConfString(sp conffile.ServiceProtocol) (st serveType, ok bool) { + switch sp { + case conffile.ProtoHTTP: + return serveTypeHTTP, true + case conffile.ProtoHTTPS, conffile.ProtoHTTPSInsecure, conffile.ProtoFile: + return serveTypeHTTPS, true + case conffile.ProtoTCP: + return serveTypeTCP, true + case conffile.ProtoTLSTerminatedTCP: + return serveTypeTLSTerminatedTCP, true + case conffile.ProtoTUN: + return serveTypeTUN, true + } + return -1, false +} + const noService tailcfg.ServiceName = "" var infoMap = map[serveMode]commandInfo{ @@ -232,6 +251,33 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", Exec: e.runServeAdvertise, }, + { + Name: "get-config", + ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), + ShortHelp: "Get service configuration to save to a file", + LongHelp: hidden + "Get the configuration for services that this node is currently hosting in a\n" + + "format that can later be provided to set-config. This can be used to declaratively set\n" + + "configuration for a service host.", + Exec: e.runServeGetConfig, + FlagSet: e.newFlags("serve-get-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "read config from all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "read config from a particular service") + }), + }, + { + Name: "set-config", + ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), + ShortHelp: "Define service configuration from a file", + LongHelp: hidden + "Read the provided configuration file and use it to declaratively set the configuration\n" + + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + + "all services are overwritten.", + Exec: e.runServeSetConfig, + FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "apply config to all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "apply config to a particular service") + }), + }, }, } } @@ -540,7 +586,7 @@ func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { if len(args) == 0 { - return fmt.Errorf("error: missing service name argument") + return errors.New("error: missing service name argument") } if len(args) != 1 { fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") @@ -553,6 +599,258 @@ func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { return e.addServiceToPrefs(ctx, svc) } +func (e *serveEnv) runServeGetConfig(ctx context.Context, args []string) (err error) { + forSingleService := e.service.Validate() == nil + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return err + } + + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + advertised := set.SetOf(prefs.AdvertiseServices) + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return err + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + + handleService := func(svcName tailcfg.ServiceName, serviceConfig *ipn.ServiceConfig) (*conffile.ServiceDetailsFile, error) { + var sdf conffile.ServiceDetailsFile + // Leave unset for true case since that's the default. + if !advertised.Contains(svcName.String()) { + sdf.Advertised.Set(false) + } + + if serviceConfig.Tun { + mak.Set(&sdf.Endpoints, &tailcfg.ProtoPortRange{Ports: tailcfg.PortRangeAny}, &conffile.Target{ + Protocol: conffile.ProtoTUN, + Destination: "", + DestinationPorts: tailcfg.PortRange{}, + }) + } + + for port, config := range serviceConfig.TCP { + sniName := fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix) + ppr := tailcfg.ProtoPortRange{Proto: int(ipproto.TCP), Ports: tailcfg.PortRange{First: port, Last: port}} + if config.TCPForward != "" { + var proto conffile.ServiceProtocol + if config.TerminateTLS != "" { + proto = conffile.ProtoTLSTerminatedTCP + } else { + proto = conffile.ProtoTCP + } + destHost, destPortStr, err := net.SplitHostPort(config.TCPForward) + if err != nil { + return nil, fmt.Errorf("parse TCPForward=%q: %w", config.TCPForward, err) + } + destPort, err := strconv.ParseUint(destPortStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("parse port %q: %w", destPortStr, err) + } + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: proto, + Destination: destHost, + DestinationPorts: tailcfg.PortRange{First: uint16(destPort), Last: uint16(destPort)}, + }) + } else if config.HTTP || config.HTTPS { + webKey := ipn.HostPort(net.JoinHostPort(sniName, strconv.FormatUint(uint64(port), 10))) + handlers, ok := serviceConfig.Web[webKey] + if !ok { + return nil, fmt.Errorf("service %q: HTTP/HTTPS is set but no handlers in config", svcName) + } + defaultHandler, ok := handlers.Handlers["/"] + if !ok { + return nil, fmt.Errorf("service %q: root handler not set", svcName) + } + if defaultHandler.Path != "" { + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ProtoFile, + Destination: defaultHandler.Path, + DestinationPorts: tailcfg.PortRange{}, + }) + } else if defaultHandler.Proxy != "" { + proto, rest, ok := strings.Cut(defaultHandler.Proxy, "://") + if !ok { + return nil, fmt.Errorf("service %q: invalid proxy handler %q", svcName, defaultHandler.Proxy) + } + host, portStr, err := net.SplitHostPort(rest) + if err != nil { + return nil, fmt.Errorf("service %q: invalid proxy handler %q: %w", svcName, defaultHandler.Proxy, err) + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("service %q: parse port %q: %w", svcName, portStr, err) + } + + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ServiceProtocol(proto), + Destination: host, + DestinationPorts: tailcfg.PortRange{First: uint16(port), Last: uint16(port)}, + }) + } + } + } + + return &sdf, nil + } + + var j []byte + + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + var scf conffile.ServicesConfigFile + scf.Version = "0.0.1" + for svcName, serviceConfig := range sc.Services { + sdf, err := handleService(svcName, serviceConfig) + if err != nil { + return err + } + mak.Set(&scf.Services, svcName, sdf) + } + j, err = json.MarshalIndent(scf, "", " ") + if err != nil { + return err + } + } else if forSingleService { + serviceConfig, ok := sc.Services[e.service] + if !ok { + j = []byte("{}") + } else { + sdf, err := handleService(e.service, serviceConfig) + if err != nil { + return err + } + sdf.Version = "0.0.1" + j, err = json.MarshalIndent(sdf, "", " ") + if err != nil { + return err + } + } + } else { + return errors.New("must specify either --service=svc: or --all") + } + + j = append(j, '\n') + _, err = e.stdout().Write(j) + return err +} + +func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err error) { + if len(args) != 1 { + return errors.New("must specify filename") + } + forSingleService := e.service.Validate() == nil + + var scf *conffile.ServicesConfigFile + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + scf, err = conffile.LoadServicesConfig(args[0], "") + } else if forSingleService { + scf, err = conffile.LoadServicesConfig(args[0], e.service.String()) + } else { + return errors.New("must specify either --service=svc: or --all") + } + if err != nil { + return fmt.Errorf("could not read config from file %q: %w", args[0], err) + } + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("getting client status: %w", err) + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("getting current serve config: %w", err) + } + + // Clear all existing config. + if forSingleService { + if sc.Services != nil { + if sc.Services[e.service] != nil { + delete(sc.Services, e.service) + } + } + } else { + sc.Services = map[tailcfg.ServiceName]*ipn.ServiceConfig{} + } + advertisedServices := set.Set[string]{} + + for name, details := range scf.Services { + for ppr, ep := range details.Endpoints { + if ep.Protocol == conffile.ProtoTUN { + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix) + if err != nil { + return err + } + // TUN mode is exclusive. + break + } + + if ppr.Proto != int(ipproto.TCP) { + return fmt.Errorf("service %q: source ports must be TCP", name) + } + serveType, _ := serveTypeFromConfString(ep.Protocol) + for port := ppr.Ports.First; port <= ppr.Ports.Last; port++ { + var target string + if ep.Protocol == conffile.ProtoFile { + target = ep.Destination + } else { + // map source port range 1-1 to destination port range + destPort := ep.DestinationPorts.First + (port - ppr.Ports.First) + portStr := fmt.Sprint(destPort) + target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) + } + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix) + if err != nil { + return fmt.Errorf("service %q: %w", name, err) + } + } + } + if v, set := details.Advertised.Get(); !set || v { + advertisedServices.Add(name.String()) + } + } + + var changed bool + var servicesList []string + if e.allServices { + servicesList = advertisedServices.Slice() + changed = true + } else if advertisedServices.Contains(e.service.String()) { + // If allServices wasn't set, the only service that could have been + // advertised is the one that was provided as a flag. + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + if !slices.Contains(prefs.AdvertiseServices, e.service.String()) { + servicesList = append(prefs.AdvertiseServices, e.service.String()) + changed = true + } + } + if changed { + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: servicesList, + }, + }) + if err != nil { + return err + } + } + + return e.lc.SetServeConfig(ctx, sc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8c2fb0e92..0d3a006a1 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -61,6 +61,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp + github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 @@ -109,6 +110,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscale/cli tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ @@ -137,6 +139,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tsdial from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/udprelay/status from tailscale.com/client/local+ + tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ diff --git a/ipn/conffile/serveconf.go b/ipn/conffile/serveconf.go new file mode 100644 index 000000000..bb63c1ac5 --- /dev/null +++ b/ipn/conffile/serveconf.go @@ -0,0 +1,239 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package conffile + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "strings" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/util/mak" +) + +// ServicesConfigFile is the config file format for services configuration. +type ServicesConfigFile struct { + // Version is always "0.0.1" and always present. + Version string `json:"version"` + + Services map[tailcfg.ServiceName]*ServiceDetailsFile `json:"services,omitzero"` +} + +// ServiceDetailsFile is the config syntax for an individual Tailscale Service. +type ServiceDetailsFile struct { + // Version is always "0.0.1", set if and only if this is not inside a + // [ServiceConfigFile]. + Version string `json:"version,omitzero"` + + // Endpoints are sets of reverse proxy mappings from ProtoPortRanges on a + // Service to Targets (proto+destination+port) on remote destinations (or + // localhost). + // For example, "tcp:443" -> "tcp://localhost:8000" is an endpoint definition + // mapping traffic on the TCP port 443 of the Service to port 8080 on localhost. + // The Proto in the key must be populated. + // As a special case, if the only mapping provided is "*" -> "TUN", that + // enables TUN/L3 mode, where packets are delivered to the Tailscale network + // interface with the understanding that the user will deal with them manually. + Endpoints map[*tailcfg.ProtoPortRange]*Target `json:"endpoints"` + + // Advertised is a flag that tells control whether or not the client thinks + // it is ready to host a particular Tailscale Service. If unset, it is + // assumed to be true. + Advertised opt.Bool `json:"advertised,omitzero"` +} + +// ServiceProtocol is the protocol of a Target. +type ServiceProtocol string + +const ( + ProtoHTTP ServiceProtocol = "http" + ProtoHTTPS ServiceProtocol = "https" + ProtoHTTPSInsecure ServiceProtocol = "https+insecure" + ProtoTCP ServiceProtocol = "tcp" + ProtoTLSTerminatedTCP ServiceProtocol = "tls-terminated-tcp" + ProtoFile ServiceProtocol = "file" + ProtoTUN ServiceProtocol = "TUN" +) + +// Target is a destination for traffic to go to when it arrives at a Tailscale +// Service host. +type Target struct { + // The protocol over which to communicate with the Destination. + // Protocol == ProtoTUN is a special case, activating "TUN mode" where + // packets are delivered to the Tailscale TUN interface and then manually + // handled by the user. + Protocol ServiceProtocol + + // If Protocol is ProtoFile, then Destination is a file path. + // If Protocol is ProtoTUN, then Destination is empty. + // Otherwise, it is a host. + Destination string + + // If Protocol is not ProtoFile or ProtoTUN, then DestinationPorts is the + // set of ports on which to connect to the host referred to by Destination. + DestinationPorts tailcfg.PortRange +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (t *Target) UnmarshalJSON(buf []byte) error { + return jsonv2.Unmarshal(buf, t) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (t *Target) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + var str string + if err := jsonv2.UnmarshalDecode(dec, &str); err != nil { + return err + } + + // The TUN case does not look like a standard :// arrangement, + // so handled separately. + if str == "TUN" { + t.Protocol = ProtoTUN + t.Destination = "" + t.DestinationPorts = tailcfg.PortRangeAny + return nil + } + + proto, rest, found := strings.Cut(str, "://") + if !found { + return errors.New("handler not of form ://") + } + + switch ServiceProtocol(proto) { + case ProtoFile: + target := path.Clean(rest) + t.Protocol = ProtoFile + t.Destination = target + t.DestinationPorts = tailcfg.PortRange{} + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + host, portRange, err := tailcfg.ParseHostPortRange(rest) + if err != nil { + return err + } + t.Protocol = ServiceProtocol(proto) + t.Destination = host + t.DestinationPorts = portRange + default: + return errors.New("unsupported protocol") + } + + return nil +} + +func (t *Target) MarshalText() ([]byte, error) { + var out string + switch t.Protocol { + case ProtoFile: + out = fmt.Sprintf("%s://%s", t.Protocol, t.Destination) + case ProtoTUN: + out = "TUN" + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + out = fmt.Sprintf("%s://%s", t.Protocol, net.JoinHostPort(t.Destination, t.DestinationPorts.String())) + default: + return nil, errors.New("unsupported protocol") + } + return []byte(out), nil +} + +func LoadServicesConfig(filename string, forService string) (*ServicesConfigFile, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + var json []byte + if hujsonStandardize != nil { + json, err = hujsonStandardize(data) + if err != nil { + return nil, err + } + } else { + json = data + } + var ver struct { + Version string `json:"version"` + } + if err = jsonv2.Unmarshal(json, &ver); err != nil { + return nil, fmt.Errorf("could not parse config file version: %w", err) + } + switch ver.Version { + case "": + return nil, errors.New("config file must have \"version\" field") + case "0.0.1": + return loadConfigV0(json, forService) + } + return nil, fmt.Errorf("unsupported config file version %q", ver.Version) +} + +func loadConfigV0(json []byte, forService string) (*ServicesConfigFile, error) { + var scf ServicesConfigFile + if svcName := tailcfg.AsServiceName(forService); svcName != "" { + var sdf ServiceDetailsFile + err := jsonv2.Unmarshal(json, &sdf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + mak.Set(&scf.Services, svcName, &sdf) + + } else { + err := jsonv2.Unmarshal(json, &scf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + } + for svcName, svc := range scf.Services { + if forService == "" && svc.Version != "" { + return nil, errors.New("services cannot be versioned separately from config file") + } + if err := svcName.Validate(); err != nil { + return nil, err + } + if svc.Endpoints == nil { + return nil, fmt.Errorf("service %q: missing \"endpoints\" field", svcName) + } + var sourcePorts []tailcfg.PortRange + foundTUN := false + foundNonTUN := false + for ppr, target := range svc.Endpoints { + if target.Protocol == "TUN" { + if ppr.Proto != 0 || ppr.Ports != tailcfg.PortRangeAny { + return nil, fmt.Errorf("service %q: destination \"TUN\" can only be used with source \"*\"", svcName) + } + foundTUN = true + } else { + if ppr.Ports.Last-ppr.Ports.First != target.DestinationPorts.Last-target.DestinationPorts.First { + return nil, fmt.Errorf("service %q: source and destination port ranges must be of equal size", svcName.String()) + } + foundNonTUN = true + } + if foundTUN && foundNonTUN { + return nil, fmt.Errorf("service %q: cannot mix TUN mode with non-TUN mode", svcName) + } + if pr := findOverlappingRange(sourcePorts, ppr.Ports); pr != nil { + return nil, fmt.Errorf("service %q: source port ranges %q and %q overlap", svcName, pr.String(), ppr.Ports.String()) + } + sourcePorts = append(sourcePorts, ppr.Ports) + } + } + return &scf, nil +} + +// findOverlappingRange finds and returns a reference to a [tailcfg.PortRange] +// in haystack that overlaps with needle. It returns nil if it doesn't find one. +func findOverlappingRange(haystack []tailcfg.PortRange, needle tailcfg.PortRange) *tailcfg.PortRange { + for _, pr := range haystack { + if pr.Contains(needle.First) || pr.Contains(needle.Last) || needle.Contains(pr.First) || needle.Contains(pr.Last) { + return &pr + } + } + return nil +} diff --git a/tailcfg/proto_port_range.go b/tailcfg/proto_port_range.go index f65c58804..03505dbd1 100644 --- a/tailcfg/proto_port_range.go +++ b/tailcfg/proto_port_range.go @@ -5,7 +5,6 @@ package tailcfg import ( "errors" - "fmt" "strconv" "strings" @@ -70,14 +69,7 @@ func (ppr ProtoPortRange) String() string { buf.Write(text) buf.Write([]byte(":")) } - pr := ppr.Ports - if pr.First == pr.Last { - fmt.Fprintf(&buf, "%d", pr.First) - } else if pr == PortRangeAny { - buf.WriteByte('*') - } else { - fmt.Fprintf(&buf, "%d-%d", pr.First, pr.Last) - } + buf.WriteString(ppr.Ports.String()) return buf.String() } @@ -104,7 +96,7 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { if !strings.Contains(ipProtoPort, ":") { ipProtoPort = "*:" + ipProtoPort } - protoStr, portRange, err := parseHostPortRange(ipProtoPort) + protoStr, portRange, err := ParseHostPortRange(ipProtoPort) if err != nil { return nil, err } @@ -126,9 +118,9 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { return ppr, nil } -// parseHostPortRange parses hostport as HOST:PORTS where HOST is +// ParseHostPortRange parses hostport as HOST:PORTS where HOST is // returned unchanged and PORTS is is either "*" or PORTLOW-PORTHIGH ranges. -func parseHostPortRange(hostport string) (host string, ports PortRange, err error) { +func ParseHostPortRange(hostport string) (host string, ports PortRange, err error) { hostport = strings.ToLower(hostport) colon := strings.LastIndexByte(hostport, ':') if colon < 0 { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 3edc9aef0..b2c1a402c 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -17,6 +17,7 @@ import ( "net/netip" "reflect" "slices" + "strconv" "strings" "time" @@ -1478,6 +1479,15 @@ func (pr PortRange) Contains(port uint16) bool { var PortRangeAny = PortRange{0, 65535} +func (pr PortRange) String() string { + if pr.First == pr.Last { + return strconv.FormatUint(uint64(pr.First), 10) + } else if pr == PortRangeAny { + return "*" + } + return fmt.Sprintf("%d-%d", pr.First, pr.Last) +} + // NetPortRange represents a range of ports that's allowed for one or more IPs. type NetPortRange struct { _ structs.Incomparable From 0a33aae823eb5604f7698ce1dad99605eaed97c2 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 10 Oct 2025 09:03:38 -0700 Subject: [PATCH 1543/1708] util/eventbus: run subscriber functions in a goroutine (#17510) With a channel subscriber, the subscription processing always occurs on another goroutine. The SubscriberFunc (prior to this commit) runs its callbacks on the client's own goroutine. This changes the semantics, though: In addition to more directly pushing back on the publisher, a publisher and subscriber can deadlock in a SubscriberFunc but succeed on a Subscriber. They should behave equivalently regardless which interface they use. Arguably the caller should deal with this by creating its own goroutine if it needs to. However, that loses much of the benefit of the SubscriberFunc API, as it will need to manage the lifecycle of that goroutine. So, for practical ergonomics, let's make the SubscriberFunc do this management on the user's behalf. (We discussed doing this in #17432, but decided not to do it yet). We can optimize this approach further, if we need to, without changing the API. Updates #17487 Change-Id: I19ea9e8f246f7b406711f5a16518ef7ff21a1ac9 Signed-off-by: M. J. Fromberger --- util/eventbus/subscribe.go | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 56da413ef..c35c7e7f0 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -214,7 +214,7 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump - // above. The only different should be that this select + // above. The only difference should be that this select // delivers a value on s.read. select { case s.read <- t: @@ -282,20 +282,30 @@ func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFo // dispatch implements part of the subscriber interface. func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { + t := vals.Peek().Event.(T) + callDone := make(chan struct{}) + go s.runCallback(t, callDone) // Keep the cases in this select in sync with subscribeState.pump - // above. The only different should be that this select + // above. The only difference should be that this select // delivers a value by calling s.read. - select { - case val := <-acceptCh(): - vals.Add(val) - case <-ctx.Done(): - return false - case ch := <-snapshot: - ch <- vals.Snapshot() - default: + for { + select { + case <-callDone: + vals.Drop() + return true + case val := <-acceptCh(): + vals.Add(val) + case <-ctx.Done(): + return false + case ch := <-snapshot: + ch <- vals.Snapshot() + } } - t := vals.Peek().Event.(T) - s.read(t) - vals.Drop() - return true +} + +// runCallback invokes the callback on v and closes ch when it returns. +// This should be run in a goroutine. +func (s *SubscriberFunc[T]) runCallback(v T, ch chan struct{}) { + defer close(ch) + s.read(v) } From af15ee9c5f1018a202d4c38043f2686ae3233a91 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 10 Oct 2025 09:28:27 -0700 Subject: [PATCH 1544/1708] wgengine/magicsock: add clientmetrics for TX bytes/packets by af & conn type (#17515) Updates tailscale/corp#33206 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 30 ++++++++++++++++++++++++++++ wgengine/magicsock/magicsock_test.go | 4 ++++ 2 files changed, 34 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b17aa11ae..f855936ce 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -808,6 +808,16 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataBytesDERP.Register(&m.inboundBytesDERPTotal) metricRecvDataBytesPeerRelayIPv4.Register(&m.inboundBytesPeerRelayIPv4Total) metricRecvDataBytesPeerRelayIPv6.Register(&m.inboundBytesPeerRelayIPv6Total) + metricSendDataPacketsIPv4.Register(&m.outboundPacketsIPv4Total) + metricSendDataPacketsIPv6.Register(&m.outboundPacketsIPv6Total) + metricSendDataPacketsDERP.Register(&m.outboundPacketsDERPTotal) + metricSendDataPacketsPeerRelayIPv4.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendDataPacketsPeerRelayIPv6.Register(&m.outboundPacketsPeerRelayIPv6Total) + metricSendDataBytesIPv4.Register(&m.outboundBytesIPv4Total) + metricSendDataBytesIPv6.Register(&m.outboundBytesIPv6Total) + metricSendDataBytesDERP.Register(&m.outboundBytesDERPTotal) + metricSendDataBytesPeerRelayIPv4.Register(&m.outboundBytesPeerRelayIPv4Total) + metricSendDataBytesPeerRelayIPv6.Register(&m.outboundBytesPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) @@ -856,6 +866,16 @@ func deregisterMetrics() { metricRecvDataBytesDERP.UnregisterAll() metricRecvDataBytesPeerRelayIPv4.UnregisterAll() metricRecvDataBytesPeerRelayIPv6.UnregisterAll() + metricSendDataPacketsIPv4.UnregisterAll() + metricSendDataPacketsIPv6.UnregisterAll() + metricSendDataPacketsDERP.UnregisterAll() + metricSendDataPacketsPeerRelayIPv4.UnregisterAll() + metricSendDataPacketsPeerRelayIPv6.UnregisterAll() + metricSendDataBytesIPv4.UnregisterAll() + metricSendDataBytesIPv6.UnregisterAll() + metricSendDataBytesDERP.UnregisterAll() + metricSendDataBytesPeerRelayIPv4.UnregisterAll() + metricSendDataBytesPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() metricSendPeerRelay.UnregisterAll() @@ -3956,6 +3976,11 @@ var ( metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") + metricSendDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_send_data_derp") + metricSendDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv4") + metricSendDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv6") + metricSendDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv4") + metricSendDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv6") // Data bytes (non-disco) metricRecvDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_derp") @@ -3963,6 +3988,11 @@ var ( metricRecvDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv6") metricRecvDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv4") metricRecvDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv6") + metricSendDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_derp") + metricSendDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv4") + metricSendDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv6") + metricSendDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv4") + metricSendDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv6") // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 3468798c1..d1d62a26e 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1300,6 +1300,10 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { // the metrics by 2 to get the expected value. // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420 c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricSendDataPacketsIPv4.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricSendDataPacketsDERP.Value(), qt.Equals, metricDERPTxPackets*2) + c.Assert(metricSendDataBytesIPv4.Value(), qt.Equals, metricIPv4TxBytes*2) + c.Assert(metricSendDataBytesDERP.Value(), qt.Equals, metricDERPTxBytes*2) c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, metricIPv4RxBytes*2) From 8e98ecb5f7cf2dc8c36b482030bea0c45ab1d123 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 10 Oct 2025 12:34:27 -0400 Subject: [PATCH 1545/1708] net/netmon: handle net.IPAddr types during interface address parsing (#17523) updates tailscale/tailscale#16836 Android's altNetInterfaces implementation now returns net.IPAddr types which netmon wasn't handling. Signed-off-by: Jonathan Nobels --- net/netmon/netmon_test.go | 40 +++++++++++++++++++++++++++++++++++++++ net/netmon/state.go | 8 ++++++++ 2 files changed, 48 insertions(+) diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 5fcdcc6cc..358dc0373 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -7,6 +7,7 @@ import ( "flag" "net" "net/netip" + "reflect" "sync/atomic" "testing" "time" @@ -267,6 +268,45 @@ func TestIsMajorChangeFrom(t *testing.T) { }) } } +func TestForeachInterface(t *testing.T) { + tests := []struct { + name string + addrs []net.Addr + want []string + }{ + { + name: "Mixed_IPv4_and_IPv6", + addrs: []net.Addr{ + &net.IPNet{IP: net.IPv4(1, 2, 3, 4), Mask: net.CIDRMask(24, 32)}, + &net.IPAddr{IP: net.IP{5, 6, 7, 8}, Zone: ""}, + &net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, + &net.IPAddr{IP: net.ParseIP("2001:db8::2"), Zone: ""}, + }, + want: []string{"1.2.3.4", "5.6.7.8", "2001:db8::1", "2001:db8::2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got []string + ifaces := InterfaceList{ + { + Interface: &net.Interface{Name: "eth0"}, + AltAddrs: tt.addrs, + }, + } + ifaces.ForeachInterface(func(iface Interface, prefixes []netip.Prefix) { + for _, prefix := range prefixes { + ip := prefix.Addr() + got = append(got, ip.String()) + } + }) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} type testOSMon struct { osMon diff --git a/net/netmon/state.go b/net/netmon/state.go index 73497e93f..27e3524e8 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -183,6 +183,10 @@ func (ifaces InterfaceList) ForeachInterfaceAddress(fn func(Interface, netip.Pre if pfx, ok := netaddr.FromStdIPNet(v); ok { fn(iface, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + fn(iface, netip.PrefixFrom(ip, ip.BitLen())) + } } } } @@ -215,6 +219,10 @@ func (ifaces InterfaceList) ForeachInterface(fn func(Interface, []netip.Prefix)) if pfx, ok := netaddr.FromStdIPNet(v); ok { pfxs = append(pfxs, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + pfxs = append(pfxs, netip.PrefixFrom(ip, ip.BitLen())) + } } } sort.Slice(pfxs, func(i, j int) bool { From a2dc517d7d4f571a5fe67c906d5ab885baf62f49 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 10 Oct 2025 10:08:24 -0700 Subject: [PATCH 1546/1708] all: specify explicit JSON format for time.Duration (#17307) The default representation of time.Duration has different JSON representation between v1 and v2. Apply an explicit format flag that uses the v1 representation so that this behavior does not change if serialized with v2. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- net/speedtest/speedtest.go | 2 +- tailcfg/tailcfg.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/speedtest/speedtest.go b/net/speedtest/speedtest.go index 7ab0881cc..a462dbeec 100644 --- a/net/speedtest/speedtest.go +++ b/net/speedtest/speedtest.go @@ -24,7 +24,7 @@ const ( // conduct the test. type config struct { Version int `json:"version"` - TestDuration time.Duration `json:"time"` + TestDuration time.Duration `json:"time,format:nano"` Direction Direction `json:"direction"` } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b2c1a402c..e9f97bdc4 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2903,7 +2903,7 @@ type SSHAction struct { // SessionDuration, if non-zero, is how long the session can stay open // before being forcefully terminated. - SessionDuration time.Duration `json:"sessionDuration,omitempty"` + SessionDuration time.Duration `json:"sessionDuration,omitempty,format:nano"` // AllowAgentForwarding, if true, allows accepted connections to forward // the ssh agent if requested. From e45557afc0e46c9148a4e509e639b4024cf6f197 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 10 Oct 2025 10:28:36 -0700 Subject: [PATCH 1547/1708] types/persist: add AttestationKey (#17281) Extend Persist with AttestationKey to record a hardware-backed attestation key for the node's identity. Add a flag to tailscaled to allow users to control the use of hardware-backed keys to bind node identity to individual machines. Updates tailscale/corp#31269 Change-Id: Idcf40d730a448d85f07f1bebf387f086d4c58be3 Signed-off-by: Patrick O'Doherty --- cmd/cloner/cloner.go | 7 +++- cmd/cloner/cloner_test.go | 49 ++++++++++++++++++++++++ cmd/cloner/clonerex/clonerex.go | 25 +++++++++++- cmd/cloner/clonerex/clonerex_clone.go | 30 ++++++++++++++- cmd/derper/depaware.txt | 2 +- cmd/stund/depaware.txt | 5 ++- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/tailscaled.go | 55 +++++++++++++++++++-------- control/controlclient/direct.go | 24 ++++++++++++ feature/hooks.go | 19 +++++++++ feature/tpm/attestation.go | 15 +++++++- feature/tpm/tpm.go | 2 + ipn/ipnlocal/hwattest.go | 48 +++++++++++++++++++++++ ipn/ipnlocal/local.go | 38 +++++++++++++++--- ipn/ipnlocal/local_test.go | 21 ++++++++++ ipn/ipnlocal/profiles.go | 16 ++++++-- ipn/ipnlocal/profiles_test.go | 1 + ipn/prefs.go | 1 + ipn/prefs_test.go | 2 +- tailcfg/tailcfg.go | 11 ++++-- types/persist/persist.go | 18 ++++++++- types/persist/persist_clone.go | 4 ++ types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 10 +++-- util/syspolicy/pkey/pkey.go | 4 ++ util/syspolicy/policy_keys.go | 1 + 26 files changed, 370 insertions(+), 42 deletions(-) create mode 100644 ipn/ipnlocal/hwattest.go diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 15a808141..544d00518 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -121,7 +121,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { continue } if !hasBasicUnderlying(ft) { - writef("dst.%s = *src.%s.Clone()", fname, fname) + // don't dereference if the underlying type is an interface + if _, isInterface := ft.Underlying().(*types.Interface); isInterface { + writef("if src.%s != nil { dst.%s = src.%s.Clone() }", fname, fname, fname) + } else { + writef("dst.%s = *src.%s.Clone()", fname, fname) + } continue } } diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index cf1063714..3556c14bc 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -59,3 +59,52 @@ func TestSliceContainer(t *testing.T) { }) } } + +func TestInterfaceContainer(t *testing.T) { + examples := []struct { + name string + in *clonerex.InterfaceContainer + }{ + { + name: "nil", + in: nil, + }, + { + name: "zero", + in: &clonerex.InterfaceContainer{}, + }, + { + name: "with_interface", + in: &clonerex.InterfaceContainer{ + Interface: &clonerex.CloneableImpl{Value: 42}, + }, + }, + { + name: "with_nil_interface", + in: &clonerex.InterfaceContainer{ + Interface: nil, + }, + }, + } + + for _, ex := range examples { + t.Run(ex.name, func(t *testing.T) { + out := ex.in.Clone() + if !reflect.DeepEqual(ex.in, out) { + t.Errorf("Clone() = %v, want %v", out, ex.in) + } + + // Verify no aliasing: modifying the clone should not affect the original + if ex.in != nil && ex.in.Interface != nil { + if impl, ok := out.Interface.(*clonerex.CloneableImpl); ok { + impl.Value = 999 + if origImpl, ok := ex.in.Interface.(*clonerex.CloneableImpl); ok { + if origImpl.Value == 999 { + t.Errorf("Clone() aliased memory with original") + } + } + } + } + }) + } +} diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index 96bf8a0bd..6463f9144 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer // Package clonerex is an example package for the cloner tool. package clonerex @@ -9,3 +9,26 @@ package clonerex type SliceContainer struct { Slice []*int } + +// Cloneable is an interface with a Clone method. +type Cloneable interface { + Clone() Cloneable +} + +// CloneableImpl is a concrete type that implements Cloneable. +type CloneableImpl struct { + Value int +} + +func (c *CloneableImpl) Clone() Cloneable { + if c == nil { + return nil + } + return &CloneableImpl{Value: c.Value} +} + +// InterfaceContainer has a pointer to an interface field, which tests +// the special handling for interface types in the cloner. +type InterfaceContainer struct { + Interface Cloneable +} diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index e334a4e3a..533d7e723 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -35,9 +35,28 @@ var _SliceContainerCloneNeedsRegeneration = SliceContainer(struct { Slice []*int }{}) +// Clone makes a deep copy of InterfaceContainer. +// The result aliases no memory with the original. +func (src *InterfaceContainer) Clone() *InterfaceContainer { + if src == nil { + return nil + } + dst := new(InterfaceContainer) + *dst = *src + if src.Interface != nil { + dst.Interface = src.Interface.Clone() + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct { + Interface Cloneable +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of SliceContainer. +// where T is one of SliceContainer,InterfaceContainer. func Clone(dst, src any) bool { switch src := src.(type) { case *SliceContainer: @@ -49,6 +68,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *InterfaceContainer: + switch dst := dst.(type) { + case *InterfaceContainer: + *dst = *src.Clone() + return true + case **InterfaceContainer: + *dst = src.Clone() + return true + } } return false } diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 2fa1fed45..b8dd28e6b 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn tailscale.com/types/opt from tailscale.com/envknob+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/ipn tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index be3e0e0cf..bd8eebb7b 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -59,16 +59,17 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb tailscale.com/syncs from tailscale.com/metrics+ - tailscale.com/tailcfg from tailscale.com/version + tailscale.com/tailcfg from tailscale.com/version+ tailscale.com/tsweb from tailscale.com/cmd/stund+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/ipproto from tailscale.com/tailcfg - tailscale.com/types/key from tailscale.com/tailcfg + tailscale.com/types/key from tailscale.com/tailcfg+ tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/tsweb+ tailscale.com/types/opt from tailscale.com/envknob+ + tailscale.com/types/persist from tailscale.com/feature tailscale.com/types/ptr from tailscale.com/tailcfg+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/tailcfg+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 0d3a006a1..d5b7b059f 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -162,7 +162,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/netmap from tailscale.com/ipn+ tailscale.com/types/nettype from tailscale.com/net/netcheck+ tailscale.com/types/opt from tailscale.com/client/tailscale+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+ tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 92c44f4c1..f14cdcff0 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -52,6 +52,7 @@ import ( "tailscale.com/syncs" "tailscale.com/tsd" "tailscale.com/types/flagtype" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/osshare" @@ -111,19 +112,20 @@ var args struct { // or comma-separated list thereof. tunname string - cleanUp bool - confFile string // empty, file path, or "vm:user-data" - debug string - port uint16 - statepath string - encryptState boolFlag - statedir string - socketpath string - birdSocketPath string - verbose int - socksAddr string // listen address for SOCKS5 server - httpProxyAddr string // listen address for HTTP proxy server - disableLogs bool + cleanUp bool + confFile string // empty, file path, or "vm:user-data" + debug string + port uint16 + statepath string + encryptState boolFlag + statedir string + socketpath string + birdSocketPath string + verbose int + socksAddr string // listen address for SOCKS5 server + httpProxyAddr string // listen address for HTTP proxy server + disableLogs bool + hardwareAttestation boolFlag } var ( @@ -204,6 +206,9 @@ func main() { flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if buildfeatures.HasTPM { + flag.Var(&args.hardwareAttestation, "hardware-attestation", "use hardware-backed keys to bind node identity to this device when supported by the OS and hardware. Uses TPM 2.0 on Linux and Windows; SecureEnclave on macOS and iOS; and Keystore on Android") + } if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { f() } @@ -667,6 +672,9 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID log.Fatalf("failed to start netstack: %v", err) } } + if buildfeatures.HasTPM && args.hardwareAttestation.v { + lb.SetHardwareAttested() + } return lb, nil } @@ -879,9 +887,26 @@ func applyIntegrationTestEnvKnob() { } } -// handleTPMFlags validates the --encrypt-state flag if set, and defaults -// state encryption on if it's supported and compatible with other settings. +// handleTPMFlags validates the --encrypt-state and --hardware-attestation flags +// if set, and defaults both to on if supported and compatible with other +// settings. func handleTPMFlags() { + switch { + case args.hardwareAttestation.v: + if _, err := key.NewEmptyHardwareAttestationKey(); err == key.ErrUnsupported { + log.SetFlags(0) + log.Fatalf("--hardware-attestation is not supported on this platform or in this build of tailscaled") + } + case !args.hardwareAttestation.set: + policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, feature.HardwareAttestationAvailable()) + if !policyHWAttestation { + break + } + if feature.TPMAvailable() { + args.hardwareAttestation.v = true + } + } + switch { case args.encryptState.v: // Explicitly enabled, validate. diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 61886482d..63a12b249 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -7,6 +7,8 @@ import ( "bytes" "cmp" "context" + "crypto" + "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -604,6 +606,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if persist.NetworkLockKey.IsZero() { persist.NetworkLockKey = key.NewNLPrivate() } + nlPub := persist.NetworkLockKey.Public() if tryingNewKey.IsZero() { @@ -944,6 +947,27 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap TKAHead: tkaHead, ConnectionHandleForTest: connectionHandleForTest, } + + // If we have a hardware attestation key, sign the node key with it and send + // the key & signature in the map request. + if buildfeatures.HasTPM { + if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { + hwPub := key.HardwareAttestationPublicFromPlatformKey(k) + request.HardwareAttestationKey = hwPub + + t := c.clock.Now() + msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) + digest := sha256.Sum256([]byte(msg)) + sig, err := k.Sign(nil, digest[:], crypto.SHA256) + if err != nil { + c.logf("failed to sign node key with hardware attestation key: %v", err) + } else { + request.HardwareAttestationKeySignature = sig + request.HardwareAttestationKeySignatureTimestamp = t + } + } + } + var extraDebugFlags []string if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { diff --git a/feature/hooks.go b/feature/hooks.go index 2eade1ead..a3c6c0395 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -6,6 +6,9 @@ package feature import ( "net/http" "net/url" + + "tailscale.com/types/logger" + "tailscale.com/types/persist" ) // HookCanAutoUpdate is a hook for the clientupdate package @@ -45,6 +48,8 @@ var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] // and available. var HookTPMAvailable Hook[func() bool] +var HookGenerateAttestationKeyIfEmpty Hook[func(p *persist.Persist, logf logger.Logf) (bool, error)] + // TPMAvailable reports whether a TPM device is supported and available. func TPMAvailable() bool { if f, ok := HookTPMAvailable.GetOk(); ok { @@ -52,3 +57,17 @@ func TPMAvailable() bool { } return false } + +// HookHardwareAttestationAvailable is a hook that reports whether hardware +// attestation is supported and available. +var HookHardwareAttestationAvailable Hook[func() bool] + +// HardwareAttestationAvailable reports whether hardware attestation is +// supported and available (TPM on Windows/Linux, Secure Enclave on macOS|iOS, +// KeyStore on Android) +func HardwareAttestationAvailable() bool { + if f, ok := HookHardwareAttestationAvailable.GetOk(); ok { + return f() + } + return false +} diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 92617f995..5fbda3b17 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -142,13 +142,18 @@ type attestationKeySerialized struct { TPMPublic []byte `json:"tpmPublic"` } +// MarshalJSON implements json.Marshaler. func (ak *attestationKey) MarshalJSON() ([]byte, error) { + if ak == nil || ak.IsZero() { + return []byte("null"), nil + } return json.Marshal(attestationKeySerialized{ TPMPublic: ak.tpmPublic.Bytes(), TPMPrivate: ak.tpmPrivate.Buffer, }) } +// UnmarshalJSON implements json.Unmarshaler. func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { var aks attestationKeySerialized if err := json.Unmarshal(data, &aks); err != nil { @@ -254,6 +259,9 @@ func (ak *attestationKey) Close() error { } func (ak *attestationKey) Clone() key.HardwareAttestationKey { + if ak == nil { + return nil + } return &attestationKey{ tpm: ak.tpm, tpmPrivate: ak.tpmPrivate, @@ -263,4 +271,9 @@ func (ak *attestationKey) Clone() key.HardwareAttestationKey { } } -func (ak *attestationKey) IsZero() bool { return !ak.loaded() } +func (ak *attestationKey) IsZero() bool { + if ak == nil { + return true + } + return !ak.loaded() +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index b67cb4e3b..dd37b0506 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -40,6 +40,8 @@ var infoOnce = sync.OnceValue(info) func init() { feature.Register("tpm") feature.HookTPMAvailable.Set(tpmSupported) + feature.HookHardwareAttestationAvailable.Set(tpmSupported) + hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go new file mode 100644 index 000000000..2c93cad4c --- /dev/null +++ b/ipn/ipnlocal/hwattest.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tpm + +package ipnlocal + +import ( + "errors" + + "tailscale.com/feature" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +func init() { + feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) +} + +// generateAttestationKeyIfEmpty generates a new hardware attestation key if +// none exists. It returns true if a new key was generated and stored in +// p.AttestationKey. +func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { + // attempt to generate a new hardware attestation key if none exists + var ak key.HardwareAttestationKey + if p != nil { + ak = p.AttestationKey + } + + if ak == nil || ak.IsZero() { + var err error + ak, err = key.NewHardwareAttestationKey() + if err != nil { + if !errors.Is(err, key.ErrUnsupported) { + logf("failed to create hardware attestation key: %v", err) + } + } else if ak != nil { + logf("using new hardware attestation key: %v", ak.Public()) + if p == nil { + p = &persist.Persist{} + } + p.AttestationKey = ak + return true, nil + } + } + return false, nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e04ef9e6c..8cc74c41e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -392,6 +392,23 @@ type LocalBackend struct { // // See tailscale/corp#29969. overrideExitNodePolicy bool + + // hardwareAttested is whether backend should use a hardware-backed key to + // bind the node identity to this device. + hardwareAttested atomic.Bool +} + +// SetHardwareAttested enables hardware attestation key signatures in map +// requests, if supported on this platform. SetHardwareAttested should be called +// before Start. +func (b *LocalBackend) SetHardwareAttested() { + b.hardwareAttested.Store(true) +} + +// HardwareAttested reports whether hardware-backed attestation keys should be +// used to bind the node's identity to this device. +func (b *LocalBackend) HardwareAttested() bool { + return b.hardwareAttested.Load() } // HealthTracker returns the health tracker for the backend. @@ -2455,10 +2472,23 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if b.reconcilePrefsLocked(newPrefs) { prefsChanged = true } + + // neither UpdatePrefs or reconciliation should change Persist + newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() + + if buildfeatures.HasTPM { + if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { + newKey, err := genKey(newPrefs.Persist, b.logf) + if err != nil { + b.logf("failed to populate attestation key from TPM: %v", err) + } + if newKey { + prefsChanged = true + } + } + } + if prefsChanged { - // Neither opts.UpdatePrefs nor prefs reconciliation - // is allowed to modify Persist; retain the old value. - newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { b.logf("failed to save updated and reconciled prefs: %v", err) } @@ -2491,8 +2521,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { discoPublic := b.MagicConn().DiscoPublicKey() - var err error - isNetstack := b.sys.IsNetstackRouter() debugFlags := controlDebugFlags if isNetstack { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index c8367d14d..33ecb688c 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -7030,6 +7030,27 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func TestHardwareAttested(t *testing.T) { + b := new(LocalBackend) + + // default false + if got := b.HardwareAttested(); got != false { + t.Errorf("HardwareAttested() = %v, want false", got) + } + + // set true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after SetHardwareAttested()", got) + } + + // repeat calls are safe; still true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after second SetHardwareAttested()", got) + } +} + func TestDeps(t *testing.T) { deptest.DepChecker{ OnImport: func(pkg string) { diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 67e71aa70..9c2176378 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -19,7 +19,9 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -645,8 +647,8 @@ func (pm *profileManager) setProfileAsUserDefault(profile ipn.LoginProfileView) return pm.WriteState(k, []byte(profile.Key())) } -func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) { - bs, err := pm.store.ReadState(key) +func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) { + bs, err := pm.store.ReadState(k) if err == ipn.ErrStateNotExist || len(bs) == 0 { return defaultPrefs, nil } @@ -654,10 +656,18 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() + + // if supported by the platform, create an empty hardware attestation key to use when deserializing + // to avoid type exceptions from json.Unmarshaling into an interface{}. + hw, _ := key.NewEmptyHardwareAttestationKey() + savedPrefs.Persist = &persist.Persist{ + AttestationKey: hw, + } + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } - pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty()) + pm.logf("using backend prefs for %q: %v", k, savedPrefs.Pretty()) // Ignore any old stored preferences for https://login.tailscale.com // as the control server that would override the new default of diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 60c92ff8d..deeab2ade 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -151,6 +151,7 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, + AttestationKey: nil, } } user1Node1 := newPersist(1, 1) diff --git a/ipn/prefs.go b/ipn/prefs.go index 4a0680bba..81dd1c1c3 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -709,6 +709,7 @@ func NewPrefs() *Prefs { // Provide default values for options which might be missing // from the json data for any reason. The json can still // override them to false. + p := &Prefs{ // ControlURL is explicitly not set to signal that // it's not yet configured, which relaxes the CLI "up" diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 3339a631c..233616409 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, }, { Prefs{ diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index e9f97bdc4..ea4a9d1fa 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -176,7 +176,8 @@ type CapabilityVersion int // - 127: 2025-09-19: can handle C2N /debug/netmap. // - 128: 2025-10-02: can handle C2N /debug/health. // - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) -const CurrentCapabilityVersion CapabilityVersion = 129 +// - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest +const CurrentCapabilityVersion CapabilityVersion = 130 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -1372,9 +1373,13 @@ type MapRequest struct { // HardwareAttestationKey is the public key of the node's hardware-backed // identity attestation key, if any. HardwareAttestationKey key.HardwareAttestationPublic `json:",omitzero"` - // HardwareAttestationKeySignature is the signature of the NodeKey - // serialized using MarshalText using its hardware attestation key, if any. + // HardwareAttestationKeySignature is the signature of + // "$UNIX_TIMESTAMP|$NODE_KEY" using its hardware attestation key, if any. HardwareAttestationKeySignature []byte `json:",omitempty"` + // HardwareAttestationKeySignatureTimestamp is the time at which the + // HardwareAttestationKeySignature was created, if any. This UNIX timestamp + // value is prepended to the node key when signing. + HardwareAttestationKeySignatureTimestamp time.Time `json:",omitzero"` // Stream is whether the client wants to receive multiple MapResponses over // the same HTTP connection. diff --git a/types/persist/persist.go b/types/persist/persist.go index d888a6afb..4b62c79dd 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,6 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -84,11 +85,20 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } + var pub, p2Pub key.HardwareAttestationPublic + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) + } + if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { + p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) + } + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && + pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -96,12 +106,16 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) + akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + akString = fmt.Sprintf("%v", p.AttestationKey.Public()) + } + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 680419ff2..9dbe7e0f6 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,6 +19,9 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + if src.AttestationKey != nil { + dst.AttestationKey = src.AttestationKey.Clone() + } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -31,5 +34,6 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index dbf2a6d8c..713114b74 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 7d1507468..dbf8294ef 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,10 +89,11 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -110,5 +111,6 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index 79b4af1e6..e450625cd 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -141,6 +141,10 @@ const ( // It's a noop on other platforms. EncryptState Key = "EncryptState" + // HardwareAttestation is a boolean key that controls whether to use a + // hardware-backed key to bind the node identity to this device. + HardwareAttestation Key = "HardwareAttestation" + // PostureChecking indicates if posture checking is enabled and the client shall gather // posture data. // Key is a string value that specifies an option: "always", "never", "user-decides". diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ae902e8c4..3a54f9dde 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -43,6 +43,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(pkey.PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(pkey.ReconnectAfter, setting.DeviceSetting, setting.DurationValue), setting.NewDefinition(pkey.Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.HardwareAttestation, setting.DeviceSetting, setting.BooleanValue), // User policy settings (can be configured on a user- or device-basis): setting.NewDefinition(pkey.AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), From 7c49cab1a6e6b2d05ab7133c07f6154d6b87f9ca Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 29 Sep 2025 11:44:23 -0600 Subject: [PATCH 1548/1708] clientupdate, util/osshare, util/winutil, version: improve Windows GUI filename resolution and WinUI build awareness On Windows arm64 we are going to need to ship two different GUI builds; one for Win10 (GOARCH=386) and one for Win11 (GOARCH=amd64, tags += winui). Due to quirks in MSI packaging, they cannot both share the same filename. This requires some fixes in places where we have hardcoded "tailscale-ipn" as the GUI filename. We also do some cleanup in clientupdate to ensure that autoupdates will continue to work correctly with the temporary "-winui" package variant. Fixes #17480 Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- clientupdate/clientupdate_windows.go | 32 +++++++++++----- util/osshare/filesharingstatus_windows.go | 46 ++++++++++++++--------- util/winutil/winutil_windows.go | 25 ++++++++++++ version/cmdname.go | 12 +++--- version/exename.go | 25 ++++++++++++ version/prop.go | 4 +- version/version_internal_test.go | 35 +++++++++++++++++ 7 files changed, 146 insertions(+), 33 deletions(-) create mode 100644 version/exename.go diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index b79d447ad..5faeda6dd 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -30,11 +30,6 @@ const ( // tailscale.exe process from running before the msiexec process runs and // tries to overwrite ourselves. winMSIEnv = "TS_UPDATE_WIN_MSI" - // winExePathEnv is the environment variable that is set along with - // winMSIEnv and carries the full path of the calling tailscale.exe binary. - // It is used to re-launch the GUI process (tailscale-ipn.exe) after - // install is complete. - winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" // winVersionEnv is the environment variable that is set along with // winMSIEnv and carries the version of tailscale that is being installed. // It is used for logging purposes. @@ -78,6 +73,17 @@ func verifyAuthenticode(path string) error { return authenticode.Verify(path, certSubjectTailscale) } +func isTSGUIPresent() bool { + us, err := os.Executable() + if err != nil { + return false + } + + tsgui := filepath.Join(filepath.Dir(us), "tsgui.dll") + _, err = os.Stat(tsgui) + return err == nil +} + func (up *Updater) updateWindows() error { if msi := os.Getenv(winMSIEnv); msi != "" { // stdout/stderr from this part of the install could be lost since the @@ -131,7 +137,15 @@ you can run the command prompt as Administrator one of these ways: return err } up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) - pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) + + qualifiers := []string{ver, arch} + // TODO(aaron): Temporary hack so autoupdate still works on winui builds; + // remove when we enable winui by default on the unstable track. + if isTSGUIPresent() { + qualifiers = append(qualifiers, "winui") + } + + pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s.msi", up.Track, strings.Join(qualifiers, "-")) msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { return err @@ -145,7 +159,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("making tailscale.exe copy to switch to...") up.cleanupOldDownloads(filepath.Join(os.TempDir(), updaterPrefix+"-*.exe")) - selfOrig, selfCopy, err := makeSelfCopy() + _, selfCopy, err := makeSelfCopy() if err != nil { return err } @@ -153,7 +167,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("running tailscale.exe copy for final install...") cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig, winVersionEnv+"="+ver) + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winVersionEnv+"="+ver) cmd.Stdout = up.Stderr cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin @@ -189,7 +203,7 @@ func (up *Updater) installMSI(msi string) error { case windows.ERROR_SUCCESS_REBOOT_REQUIRED: // In most cases, updating Tailscale should not require a reboot. // If it does, it might be because we failed to close the GUI - // and the installer couldn't replace tailscale-ipn.exe. + // and the installer couldn't replace its executable. // The old GUI will continue to run until the next reboot. // Not ideal, but also not a retryable error. up.Logf("[unexpected] reboot required") diff --git a/util/osshare/filesharingstatus_windows.go b/util/osshare/filesharingstatus_windows.go index 999fc1cf7..c125de159 100644 --- a/util/osshare/filesharingstatus_windows.go +++ b/util/osshare/filesharingstatus_windows.go @@ -9,30 +9,31 @@ import ( "fmt" "os" "path/filepath" - "sync" + "runtime" "golang.org/x/sys/windows/registry" + "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/winutil" ) const ( sendFileShellKey = `*\shell\tailscale` ) -var ipnExePath struct { - sync.Mutex - cache string // absolute path of tailscale-ipn.exe, populated lazily on first use -} +var ipnExePath lazy.SyncValue[string] // absolute path of the GUI executable func getIpnExePath(logf logger.Logf) string { - ipnExePath.Lock() - defer ipnExePath.Unlock() - - if ipnExePath.cache != "" { - return ipnExePath.cache + exe, err := winutil.GUIPathFromReg() + if err == nil { + return exe } - // Find the absolute path of tailscale-ipn.exe assuming that it's in the same + return findGUIInSameDirAsThisExe(logf) +} + +func findGUIInSameDirAsThisExe(logf logger.Logf) string { + // Find the absolute path of the GUI, assuming that it's in the same // directory as this executable (tailscaled.exe). p, err := os.Executable() if err != nil { @@ -43,14 +44,23 @@ func getIpnExePath(logf logger.Logf) string { logf("filepath.EvalSymlinks error: %v", err) return "" } - p = filepath.Join(filepath.Dir(p), "tailscale-ipn.exe") if p, err = filepath.Abs(p); err != nil { logf("filepath.Abs error: %v", err) return "" } - ipnExePath.cache = p - - return p + d := filepath.Dir(p) + candidates := []string{"tailscale-ipn.exe"} + if runtime.GOARCH == "arm64" { + // This name may be used on Windows 10 ARM64. + candidates = append(candidates, "tailscale-gui-386.exe") + } + for _, c := range candidates { + testPath := filepath.Join(d, c) + if _, err := os.Stat(testPath); err == nil { + return testPath + } + } + return "" } // SetFileSharingEnabled adds/removes "Send with Tailscale" from the Windows shell menu. @@ -64,7 +74,9 @@ func SetFileSharingEnabled(enabled bool, logf logger.Logf) { } func enableFileSharing(logf logger.Logf) { - path := getIpnExePath(logf) + path := ipnExePath.Get(func() string { + return getIpnExePath(logf) + }) if path == "" { return } @@ -79,7 +91,7 @@ func enableFileSharing(logf logger.Logf) { logf("k.SetStringValue error: %v", err) return } - if err := k.SetStringValue("Icon", path+",0"); err != nil { + if err := k.SetStringValue("Icon", path+",1"); err != nil { logf("k.SetStringValue error: %v", err) return } diff --git a/util/winutil/winutil_windows.go b/util/winutil/winutil_windows.go index 5dde9a347..c935b210e 100644 --- a/util/winutil/winutil_windows.go +++ b/util/winutil/winutil_windows.go @@ -8,8 +8,10 @@ import ( "fmt" "log" "math" + "os" "os/exec" "os/user" + "path/filepath" "reflect" "runtime" "strings" @@ -33,6 +35,10 @@ var ErrNoShell = errors.New("no Shell process is present") // ErrNoValue is returned when the value doesn't exist in the registry. var ErrNoValue = registry.ErrNotExist +// ErrBadRegValueFormat is returned when a string value does not match the +// expected format. +var ErrBadRegValueFormat = errors.New("registry value formatted incorrectly") + // GetDesktopPID searches the PID of the process that's running the // currently active desktop. Returns ErrNoShell if the shell is not present. // Usually the PID will be for explorer.exe. @@ -947,3 +953,22 @@ func IsDomainName(name string) (bool, error) { return isDomainName(name16) } + +// GUIPathFromReg obtains the path to the client GUI executable from the +// registry value that was written during installation. +func GUIPathFromReg() (string, error) { + regPath, err := GetRegString("GUIPath") + if err != nil { + return "", err + } + + if !filepath.IsAbs(regPath) { + return "", ErrBadRegValueFormat + } + + if _, err := os.Stat(regPath); err != nil { + return "", err + } + + return regPath, nil +} diff --git a/version/cmdname.go b/version/cmdname.go index 51e065438..c38544ce1 100644 --- a/version/cmdname.go +++ b/version/cmdname.go @@ -12,7 +12,7 @@ import ( "io" "os" "path" - "path/filepath" + "runtime" "strings" ) @@ -30,7 +30,7 @@ func CmdName() string { func cmdName(exe string) string { // fallbackName, the lowercase basename of the executable, is what we return if // we can't find the Go module metadata embedded in the file. - fallbackName := filepath.Base(strings.TrimSuffix(strings.ToLower(exe), ".exe")) + fallbackName := prepExeNameForCmp(exe, runtime.GOARCH) var ret string info, err := findModuleInfo(exe) @@ -45,10 +45,10 @@ func cmdName(exe string) string { break } } - if strings.HasPrefix(ret, "wg") && fallbackName == "tailscale-ipn" { - // The tailscale-ipn.exe binary for internal build system packaging reasons - // has a path of "tailscale.io/win/wg64", "tailscale.io/win/wg32", etc. - // Ignore that name and use "tailscale-ipn" instead. + if runtime.GOOS == "windows" && strings.HasPrefix(ret, "gui") && checkPreppedExeNameForGUI(fallbackName) { + // The GUI binary for internal build system packaging reasons + // has a path of "tailscale.io/win/gui". + // Ignore that name and use fallbackName instead. return fallbackName } if ret == "" { diff --git a/version/exename.go b/version/exename.go new file mode 100644 index 000000000..d5047c203 --- /dev/null +++ b/version/exename.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package version + +import ( + "path/filepath" + "strings" +) + +// prepExeNameForCmp strips any extension and arch suffix from exe, and +// lowercases it. +func prepExeNameForCmp(exe, arch string) string { + baseNoExt := strings.ToLower(strings.TrimSuffix(filepath.Base(exe), filepath.Ext(exe))) + archSuffix := "-" + arch + return strings.TrimSuffix(baseNoExt, archSuffix) +} + +func checkPreppedExeNameForGUI(preppedExeName string) bool { + return preppedExeName == "tailscale-ipn" || preppedExeName == "tailscale-gui" +} + +func isGUIExeName(exe, arch string) bool { + return checkPreppedExeNameForGUI(prepExeNameForCmp(exe, arch)) +} diff --git a/version/prop.go b/version/prop.go index 9327e6fe6..0d6a5c00d 100644 --- a/version/prop.go +++ b/version/prop.go @@ -159,7 +159,9 @@ func IsWindowsGUI() bool { if err != nil { return false } - return strings.EqualFold(exe, "tailscale-ipn.exe") || strings.EqualFold(exe, "tailscale-ipn") + // It is okay to use GOARCH here because we're checking whether our + // _own_ process is the GUI. + return isGUIExeName(exe, runtime.GOARCH) }) } diff --git a/version/version_internal_test.go b/version/version_internal_test.go index 19aeab442..b3b848276 100644 --- a/version/version_internal_test.go +++ b/version/version_internal_test.go @@ -25,3 +25,38 @@ func TestIsValidLongWithTwoRepos(t *testing.T) { } } } + +func TestPrepExeNameForCmp(t *testing.T) { + cases := []struct { + exe string + want string + }{ + { + "tailscale-ipn.exe", + "tailscale-ipn", + }, + { + "tailscale-gui-amd64.exe", + "tailscale-gui", + }, + { + "tailscale-gui-amd64", + "tailscale-gui", + }, + { + "tailscale-ipn", + "tailscale-ipn", + }, + { + "TaIlScAlE-iPn.ExE", + "tailscale-ipn", + }, + } + + for _, c := range cases { + got := prepExeNameForCmp(c.exe, "amd64") + if got != c.want { + t.Errorf("prepExeNameForCmp(%q) = %q; want %q", c.exe, got, c.want) + } + } +} From d8a6d0183c35db1b8e7bf35d887772244c71e806 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 10 Oct 2025 12:24:52 -0700 Subject: [PATCH 1549/1708] ipn/ipnlocal: strip AttestationKey in redacted prefs view (#17527) Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- ipn/ipnlocal/local.go | 1 + 1 file changed, 1 insertion(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8cc74c41e..36e4ad8a5 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1216,6 +1216,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} + p2.Persist.AttestationKey = nil return p2.View() } From 005e264b5456f90d52920c2d396f307c645e1cbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 10 Oct 2025 15:33:30 -0400 Subject: [PATCH 1550/1708] util/eventbus/eventbustest: add support for synctest instead of timers (#17522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before synctest, timers was needed to allow the events to flow into the test bus. There is still a timer, but this one is not derived from the test deadline and it is mostly arbitrary as synctest will render it practically non-existent. With this approach, tests that do not need to test for the absence of events do not rely on synctest. Updates #15160 Signed-off-by: Claus Lensbøl --- health/health_test.go | 117 ++++++++------ net/netmon/netmon_test.go | 2 +- util/eventbus/eventbustest/doc.go | 14 ++ util/eventbus/eventbustest/eventbustest.go | 35 ++-- .../eventbustest/eventbustest_test.go | 150 +++++++++--------- util/eventbus/eventbustest/examples_test.go | 59 +++++++ 6 files changed, 231 insertions(+), 146 deletions(-) diff --git a/health/health_test.go b/health/health_test.go index 3b5ebbb38..607071776 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -5,12 +5,14 @@ package health import ( "errors" + "flag" "fmt" "maps" "reflect" "slices" "strconv" "testing" + "testing/synctest" "time" "github.com/google/go-cmp/cmp" @@ -26,6 +28,8 @@ import ( "tailscale.com/version" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + func wantChange(c Change) func(c Change) (bool, error) { return func(cEv Change) (bool, error) { if cEv.ControlHealthChanged != c.ControlHealthChanged { @@ -724,72 +728,83 @@ func TestControlHealthNotifies(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - tw.TimeOut = time.Second - - ht := NewTracker(bus) - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - // Expect events at starup, before doing anything else - if err := eventbustest.ExpectExactly(tw, - eventbustest.Type[Change](), // warming-up - eventbustest.Type[Change](), // is-using-unstable-version - eventbustest.Type[Change](), // not-in-map-poll - ); err != nil { - t.Errorf("startup error: %v", err) - } + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) + + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + // Expect events at starup, before doing anything else + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[Change](), // warming-up + eventbustest.Type[Change](), // is-using-unstable-version + eventbustest.Type[Change](), // not-in-map-poll + ); err != nil { + t.Errorf("startup error: %v", err) + } - // Only set initial state if we need to - if len(test.initialState) != 0 { - ht.SetControlHealth(test.initialState) - if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { - t.Errorf("initial state error: %v", err) + // Only set initial state if we need to + if len(test.initialState) != 0 { + ht.SetControlHealth(test.initialState) + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { + t.Errorf("initial state error: %v", err) + } } - } - ht.SetControlHealth(test.newState) + ht.SetControlHealth(test.newState) + // Close the bus early to avoid timers triggering more events. + bus.Close() - if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { - t.Errorf("event error: %v", err) - } + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { + t.Errorf("event error: %v", err) + } + }) }) } } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - tw.TimeOut = 100 * time.Millisecond - ht := NewTracker(bus) - ht.SetIPNState("NeedsLogin", true) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "control-health": {}, - }) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "control-health": {}, + }) - state := ht.CurrentState() - _, ok := state.Warnings["control-health"] + state := ht.CurrentState() + _, ok := state.Warnings["control-health"] - if ok { - t.Error("got a warning with code 'control-health', want none") - } + if ok { + t.Error("got a warning with code 'control-health', want none") + } - // An event is emitted when SetIPNState is run above, - // so only fail on the second event. - eventCounter := 0 - expectOne := func(c *Change) error { - eventCounter++ - if eventCounter == 1 { - return nil + // An event is emitted when SetIPNState is run above, + // so only fail on the second event. + eventCounter := 0 + expectOne := func(c *Change) error { + eventCounter++ + if eventCounter == 1 { + return nil + } + return errors.New("saw more than 1 event") } - return errors.New("saw more than 1 event") - } - if err := eventbustest.Expect(tw, expectOne); err == nil { - t.Error("event got emitted, want it to not be called") - } + synctest.Wait() + if err := eventbustest.Expect(tw, expectOne); err == nil { + t.Error("event got emitted, want it to not be called") + } + }) } // TestCurrentStateETagControlHealth tests that the ETag on an [UnhealthyState] diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 358dc0373..6a87cedb8 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -144,7 +144,7 @@ func TestMonitorMode(t *testing.T) { <-done t.Logf("%v callbacks", n) case "eventbus": - tw.TimeOut = *monitorDuration + time.AfterFunc(*monitorDuration, bus.Close) n := 0 mon.Start() eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go index 9e39504a8..1e9928b9d 100644 --- a/util/eventbus/eventbustest/doc.go +++ b/util/eventbus/eventbustest/doc.go @@ -39,6 +39,20 @@ // checks that the stream contains exactly the given events in the given order, // and no others. // +// To test for the absence of events, use [ExpectExactly] without any +// expected events, along side [testing/synctest] to avoid waiting for timers +// to ensure that no events are produced. This will look like: +// +// synctest.Test(t, func(t *testing.T) { +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatShouldNotEmitsSomeEvent() +// synctest.Wait() +// if err := eventbustest.ExpectExactly(tw); err != nil { +// t.Errorf("Expected no events or errors, got %v", err) +// } +// }) +// // See the [usage examples]. // // [usage examples]: https://github.com/tailscale/tailscale/blob/main/util/eventbus/eventbustest/examples_test.go diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 3f7bf4553..fd8a15081 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -27,13 +27,9 @@ func NewBus(t testing.TB) *eventbus.Bus { // [Expect] and [ExpectExactly], to verify that the desired events were captured. func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { tw := &Watcher{ - mon: bus.Debugger().WatchBus(), - TimeOut: 5 * time.Second, - chDone: make(chan bool, 1), - events: make(chan any, 100), - } - if deadline, ok := t.Deadline(); ok { - tw.TimeOut = deadline.Sub(time.Now()) + mon: bus.Debugger().WatchBus(), + chDone: make(chan bool, 1), + events: make(chan any, 100), } t.Cleanup(tw.done) go tw.watch() @@ -41,16 +37,15 @@ func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { } // Watcher monitors and holds events for test expectations. +// The Watcher works with [synctest], and some scenarios does require the use of +// [synctest]. This is amongst others true if you are testing for the absence of +// events. +// +// For usage examples, see the documentation in the top of the package. type Watcher struct { mon *eventbus.Subscriber[eventbus.RoutedEvent] events chan any chDone chan bool - // TimeOut defines when the Expect* functions should stop looking for events - // coming from the Watcher. The value is set by [NewWatcher] and defaults to - // the deadline passed in by [testing.T]. If looking to verify the absence - // of an event, the TimeOut can be set to a lower value after creating the - // Watcher. - TimeOut time.Duration } // Type is a helper representing the expectation to see an event of type T, without @@ -103,7 +98,8 @@ func Expect(tw *Watcher, filters ...any) error { } else if ok { head++ } - case <-time.After(tw.TimeOut): + // Use synctest when you want an error here. + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", eventCount, len(filters)) @@ -118,12 +114,16 @@ func Expect(tw *Watcher, filters ...any) error { // in a given order, returning an error if the events does not match the given list // exactly. The given events are represented by a function as described in // [Expect]. Use [Expect] if other events are allowed. +// +// If you are expecting ExpectExactly to fail because of a missing event, or if +// you are testing for the absence of events, call [synctest.Wait] after +// actions that would publish an event, but before calling ExpectExactly. func ExpectExactly(tw *Watcher, filters ...any) error { if len(filters) == 0 { select { case event := <-tw.events: return fmt.Errorf("saw event type %s, expected none", reflect.TypeOf(event)) - case <-time.After(tw.TimeOut): + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return nil } } @@ -146,7 +146,7 @@ func ExpectExactly(tw *Watcher, filters ...any) error { return fmt.Errorf( "expected test ok for type %s, at index %d", argType, pos) } - case <-time.After(tw.TimeOut): + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", eventCount, len(filters)) @@ -162,6 +162,9 @@ func (tw *Watcher) watch() { select { case event := <-tw.mon.Events(): tw.events <- event.Event + case <-tw.mon.Done(): + tw.done() + return case <-tw.chDone: tw.mon.Close() return diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index 2d126767d..ac454023c 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -8,7 +8,7 @@ import ( "fmt" "strings" "testing" - "time" + "testing/synctest" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" @@ -110,37 +110,35 @@ func TestExpectFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - t.Cleanup(bus.Close) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - if *doDebug { - eventbustest.LogAllEvents(t, bus) - } - tw := eventbustest.NewWatcher(t, bus) + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + client := bus.Client("testClient") + updater := eventbus.Publish[EventFoo](client) - client := bus.Client("testClient") - defer client.Close() - updater := eventbus.Publish[EventFoo](client) + for _, i := range tt.events { + updater.Publish(EventFoo{i}) + } - for _, i := range tt.events { - updater.Publish(EventFoo{i}) - } + synctest.Wait() - if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { - if tt.wantErr == "" { - t.Errorf("Expect[EventFoo]: unexpected error: %v", err) - } else if !strings.Contains(err.Error(), tt.wantErr) { - t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) - } else { - t.Logf("Got expected error: %v (OK)", err) + if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { + if tt.wantErr == "" { + t.Errorf("Expect[EventFoo]: unexpected error: %v", err) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) + } else { + t.Logf("Got expected error: %v (OK)", err) + } + } else if tt.wantErr != "" { + t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) } - } else if tt.wantErr != "" { - t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) - } + }) }) } } @@ -244,37 +242,35 @@ func TestExpectEvents(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - t.Cleanup(bus.Close) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 100 * time.Millisecond + tw := eventbustest.NewWatcher(t, bus) - client := bus.Client("testClient") - defer client.Close() - updaterFoo := eventbus.Publish[EventFoo](client) - updaterBar := eventbus.Publish[EventBar](client) - updaterBaz := eventbus.Publish[EventBaz](client) + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) - for _, ev := range tt.events { - switch ev.(type) { - case EventFoo: - evCast := ev.(EventFoo) - updaterFoo.Publish(evCast) - case EventBar: - evCast := ev.(EventBar) - updaterBar.Publish(evCast) - case EventBaz: - evCast := ev.(EventBaz) - updaterBaz.Publish(evCast) + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } } - } - if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { - t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) - } + synctest.Wait() + if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) }) } } @@ -378,37 +374,35 @@ func TestExpectExactlyEventsFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - t.Cleanup(bus.Close) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + tw := eventbustest.NewWatcher(t, bus) - client := bus.Client("testClient") - defer client.Close() - updaterFoo := eventbus.Publish[EventFoo](client) - updaterBar := eventbus.Publish[EventBar](client) - updaterBaz := eventbus.Publish[EventBaz](client) + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) - for _, ev := range tt.events { - switch ev.(type) { - case EventFoo: - evCast := ev.(EventFoo) - updaterFoo.Publish(evCast) - case EventBar: - evCast := ev.(EventBar) - updaterBar.Publish(evCast) - case EventBaz: - evCast := ev.(EventBaz) - updaterBaz.Publish(evCast) + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } } - } - if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { - t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) - } + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) }) } } diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go index bc06e60a9..c84811317 100644 --- a/util/eventbus/eventbustest/examples_test.go +++ b/util/eventbus/eventbustest/examples_test.go @@ -5,6 +5,8 @@ package eventbustest_test import ( "testing" + "testing/synctest" + "time" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" @@ -199,3 +201,60 @@ func TestExample_ExpectExactly_WithMultipleFunctions(t *testing.T) { // Output: // expected event type eventbustest.eventOfCuriosity, saw eventbustest.eventOfNoConcern, at index 1 } + +func TestExample_ExpectExactly_NoEvents(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + t.Log("Not producing events") + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw); err != nil { + t.Error(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK + }) +} + +func TestExample_ExpectExactly_OneEventExpectingTwo(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + updater.Publish(eventOfInterest{}) + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfInterest](), + ); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // timed out waiting for event, saw 1 events, 2 was expected + }) +} From 1a93a8a704b4f07a66d5086bfc1b7dfb1a3c6406 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 5 Oct 2025 12:43:54 -0700 Subject: [PATCH 1551/1708] feature/tpm: quiet log output a bit I was debugging a customer issue and saw in their 1.88.3 logs: TPM: error opening: stat /dev/tpm0: no such file or directory That's unnecessary output. The lack of TPM will be reported by them having a nil Hostinfo.TPM, which is plenty elsewhere in logs. Let's only write out an "error opening" line if it's an interesting error. (perhaps permissions, or EIO, etc) Updates #cleanup Change-Id: I3f987f6bf1d3ada03473ca3eef555e9cfafc7677 Signed-off-by: Brad Fitzpatrick --- feature/tpm/tpm.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index dd37b0506..6acb600ec 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -73,10 +73,16 @@ func info() *tailcfg.TPMInfo { tpm, err := open() if err != nil { - logf("error opening: %v", err) + if !os.IsNotExist(err) || verboseTPM() { + // Only log if it's an interesting error, not just "no TPM", + // as is very common, especially in VMs. + logf("error opening: %v", err) + } return nil } - logf("successfully opened") + if verboseTPM() { + logf("successfully opened") + } defer tpm.Close() info := new(tailcfg.TPMInfo) From 743e5ac6960ef331e93d901faf58b7f4fea296f0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 15 Oct 2025 09:13:06 -0700 Subject: [PATCH 1552/1708] cmd/tailscale: surface relay-server-port set flag (#17528) Fixes tailscale/corp#31186 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 1807ada13..43f8bbbc3 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -85,7 +85,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") - setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") + setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { st, err := localClient.Status(context.Background()) From 6d897c4ab4de855d33a57745d392146886c1e60f Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 15 Oct 2025 14:04:45 -0700 Subject: [PATCH 1553/1708] types/netlogtype: remove CBOR representation (#17545) Remove CBOR representation since it was never used. We should support CBOR in the future, but for remove it for now so that it is less work to add more fields. Also, rely on just omitzero for JSON now that it is supported in Go 1.24. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- types/netlogtype/netlogtype.go | 43 ++++++++++------------------- types/netlogtype/netlogtype_test.go | 7 ----- 2 files changed, 14 insertions(+), 36 deletions(-) diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index f2fa2bda9..0f552611e 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -12,20 +12,17 @@ import ( "tailscale.com/types/ipproto" ) -// TODO(joetsai): Remove "omitempty" if "omitzero" is ever supported in both -// the v1 and v2 "json" packages. - // Message is the log message that captures network traffic. type Message struct { - NodeID tailcfg.StableNodeID `json:"nodeId" cbor:"0,keyasint"` // e.g., "n123456CNTRL" + NodeID tailcfg.StableNodeID `json:"nodeId"` // e.g., "n123456CNTRL" - Start time.Time `json:"start" cbor:"12,keyasint"` // inclusive - End time.Time `json:"end" cbor:"13,keyasint"` // inclusive + Start time.Time `json:"start"` // inclusive + End time.Time `json:"end"` // inclusive - VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty" cbor:"14,keyasint,omitempty"` - SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty" cbor:"15,keyasint,omitempty"` - ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty" cbor:"16,keyasint,omitempty"` - PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty" cbor:"17,keyasint,omitempty"` + VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty"` + SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty"` + ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty"` + PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty"` } const ( @@ -51,18 +48,6 @@ const ( // this object is nested within an array. // It assumes that netip.Addr never has IPv6 zones. MaxConnectionCountsJSONSize = len(maxJSONConnCounts) - - maxCBORConnCounts = "\xbf" + maxCBORConn + maxCBORCounts + "\xff" - maxCBORConn = "\x00" + maxCBORProto + "\x01" + maxCBORAddrPort + "\x02" + maxCBORAddrPort - maxCBORProto = "\x18\xff" - maxCBORAddrPort = "\x52\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" - maxCBORCounts = "\x0c" + maxCBORCount + "\x0d" + maxCBORCount + "\x0e" + maxCBORCount + "\x0f" + maxCBORCount - maxCBORCount = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff" - - // MaxConnectionCountsCBORSize is the maximum size of a ConnectionCounts - // when it is serialized as CBOR. - // It assumes that netip.Addr never has IPv6 zones. - MaxConnectionCountsCBORSize = len(maxCBORConnCounts) ) // ConnectionCounts is a flattened struct of both a connection and counts. @@ -73,19 +58,19 @@ type ConnectionCounts struct { // Connection is a 5-tuple of proto, source and destination IP and port. type Connection struct { - Proto ipproto.Proto `json:"proto,omitzero,omitempty" cbor:"0,keyasint,omitempty"` - Src netip.AddrPort `json:"src,omitzero,omitempty" cbor:"1,keyasint,omitempty"` - Dst netip.AddrPort `json:"dst,omitzero,omitempty" cbor:"2,keyasint,omitempty"` + Proto ipproto.Proto `json:"proto,omitzero"` + Src netip.AddrPort `json:"src,omitzero"` + Dst netip.AddrPort `json:"dst,omitzero"` } func (c Connection) IsZero() bool { return c == Connection{} } // Counts are statistics about a particular connection. type Counts struct { - TxPackets uint64 `json:"txPkts,omitzero,omitempty" cbor:"12,keyasint,omitempty"` - TxBytes uint64 `json:"txBytes,omitzero,omitempty" cbor:"13,keyasint,omitempty"` - RxPackets uint64 `json:"rxPkts,omitzero,omitempty" cbor:"14,keyasint,omitempty"` - RxBytes uint64 `json:"rxBytes,omitzero,omitempty" cbor:"15,keyasint,omitempty"` + TxPackets uint64 `json:"txPkts,omitzero"` + TxBytes uint64 `json:"txBytes,omitzero"` + RxPackets uint64 `json:"rxPkts,omitzero"` + RxBytes uint64 `json:"rxBytes,omitzero"` } func (c Counts) IsZero() bool { return c == Counts{} } diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 403cb9508..00f89b228 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -11,7 +11,6 @@ import ( "net/netip" "testing" - "github.com/fxamacker/cbor/v2" "github.com/google/go-cmp/cmp" "tailscale.com/util/must" ) @@ -32,10 +31,4 @@ func TestMaxSize(t *testing.T) { if string(outJSON) != maxJSONConnCounts { t.Errorf("JSON mismatch (-got +want):\n%s", cmp.Diff(string(outJSON), maxJSONConnCounts)) } - - outCBOR := must.Get(cbor.Marshal(cc)) - maxCBORConnCountsAlt := "\xa7" + maxCBORConnCounts[1:len(maxCBORConnCounts)-1] // may use a definite encoding of map - if string(outCBOR) != maxCBORConnCounts && string(outCBOR) != maxCBORConnCountsAlt { - t.Errorf("CBOR mismatch (-got +want):\n%s", cmp.Diff(string(outCBOR), maxCBORConnCounts)) - } } From e75f13bd93bd154e4e3e6c62c69ccae68863f2b7 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 15 Oct 2025 14:57:32 -0700 Subject: [PATCH 1554/1708] net/connstats: prepare to remove package (#17554) The connstats package was an unnecessary layer of indirection. It was seperated out of wgengine/netlog so that net/tstun and wgengine/magicsock wouldn't need a depenedency on the concrete implementation of network flow logging. Instead, we simply register a callback for counting connections. This PR does the bare minimum work to prepare tstun and magicsock to only care about that callback. A future PR will delete connstats and merge it into netlog. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 3 +- net/connstats/stats.go | 38 ++++++++++---------- net/tstun/wrap.go | 34 +++++++++++------- net/tstun/wrap_test.go | 15 ++++---- tsnet/depaware.txt | 3 +- types/netlogfunc/netlogfunc.go | 15 ++++++++ types/netlogtype/netlogtype.go | 42 ++++++++++++++++++++++ wgengine/magicsock/derp.go | 4 +-- wgengine/magicsock/endpoint.go | 8 ++--- wgengine/magicsock/magicsock.go | 16 ++++----- wgengine/magicsock/magicsock_test.go | 52 +++++++++++++--------------- wgengine/netlog/netlog.go | 43 +++++++++-------------- 16 files changed, 170 insertions(+), 113 deletions(-) create mode 100644 types/netlogfunc/netlogfunc.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d4fdb87fc..8a8397f28 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -768,7 +768,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ @@ -834,6 +834,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index fe50dface..96e18db43 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -78,7 +78,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -132,6 +131,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index a4999825e..d46180e2d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -102,7 +102,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -158,6 +157,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 6ca10f80c..eed40845c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -330,7 +330,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -401,6 +401,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 894b4a078..1b6bb6d63 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -174,7 +174,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -239,6 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/net/connstats/stats.go b/net/connstats/stats.go index 44b276254..206181b27 100644 --- a/net/connstats/stats.go +++ b/net/connstats/stats.go @@ -16,6 +16,7 @@ import ( "golang.org/x/sync/errgroup" "tailscale.com/net/packet" "tailscale.com/net/tsaddr" + "tailscale.com/types/ipproto" "tailscale.com/types/netlogtype" ) @@ -85,14 +86,18 @@ func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end t // The source and destination of the packet directly correspond with // the source and destination in netlogtype.Connection. func (s *Statistics) UpdateTxVirtual(b []byte) { - s.updateVirtual(b, false) + var p packet.Parsed + p.Decode(b) + s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) } // UpdateRxVirtual updates the counters for a received IP packet. // The source and destination of the packet are inverted with respect to // the source and destination in netlogtype.Connection. func (s *Statistics) UpdateRxVirtual(b []byte) { - s.updateVirtual(b, true) + var p packet.Parsed + p.Decode(b) + s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) } var ( @@ -100,23 +105,18 @@ var ( tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() ) -func (s *Statistics) updateVirtual(b []byte, receive bool) { - var p packet.Parsed - p.Decode(b) - conn := netlogtype.Connection{Proto: p.IPProto, Src: p.Src, Dst: p.Dst} - if receive { - conn.Src, conn.Dst = conn.Dst, conn.Src - } - +func (s *Statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { // Network logging is defined as traffic between two Tailscale nodes. // Traffic with the internal Tailscale service is not with another node // and should not be logged. It also happens to be a high volume // amount of discrete traffic flows (e.g., DNS lookups). - switch conn.Dst.Addr() { + switch dst.Addr() { case tailscaleServiceIPv4, tailscaleServiceIPv6: return } + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + s.mu.Lock() defer s.mu.Unlock() cnts, found := s.virtual[conn] @@ -124,11 +124,11 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { return } if receive { - cnts.RxPackets++ - cnts.RxBytes += uint64(len(b)) + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(b)) + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) } s.virtual[conn] = cnts } @@ -138,7 +138,7 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, false) + s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) } // UpdateRxPhysical updates the counters for zero or more received wireguard packets. @@ -146,11 +146,11 @@ func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packet // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, true) + s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) } -func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int, receive bool) { - conn := netlogtype.Connection{Src: netip.AddrPortFrom(src, 0), Dst: dst} +func (s *Statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} s.mu.Lock() defer s.mu.Unlock() diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index fb93ca21e..dfbab7812 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,7 +24,6 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/feature/buildfeatures" - "tailscale.com/net/connstats" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" "tailscale.com/net/tsaddr" @@ -33,6 +32,7 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/util/clientmetric" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" @@ -203,8 +203,8 @@ type Wrapper struct { // disableTSMPRejected disables TSMP rejected responses. For tests. disableTSMPRejected bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] captureHook syncs.AtomicValue[packet.CaptureCallback] @@ -977,8 +977,8 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { } sizes[buffsPos] = n if buildfeatures.HasConnStats { - if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(p.Buffer()) + if update := t.connCounter.Load(); update != nil { + updateConnCounter(update, p.Buffer(), false) } } buffsPos++ @@ -1106,9 +1106,9 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i } if buildfeatures.HasConnStats { - if stats := t.stats.Load(); stats != nil { + if update := t.connCounter.Load(); update != nil { for i := 0; i < n; i++ { - stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + updateConnCounter(update, outBuffs[i][offset:offset+sizes[i]], false) } } } @@ -1276,9 +1276,9 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { if buildfeatures.HasConnStats { - if stats := t.stats.Load(); stats != nil { + if update := t.connCounter.Load(); update != nil { for i := range buffs { - stats.UpdateRxVirtual((buffs)[i][offset:]) + updateConnCounter(update, buffs[i][offset:], true) } } } @@ -1498,11 +1498,11 @@ func (t *Wrapper) Unwrap() tun.Device { return t.tdev } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (t *Wrapper) SetStatistics(stats *connstats.Statistics) { +func (t *Wrapper) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { if buildfeatures.HasConnStats { - t.stats.Store(stats) + t.connCounter.Store(fn) } } @@ -1524,3 +1524,13 @@ func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { } t.captureHook.Store(cb) } + +func updateConnCounter(update netlogfunc.ConnectionCounter, b []byte, receive bool) { + var p packet.Parsed + p.Decode(b) + if receive { + update(p.IPProto, p.Dst, p.Src, 1, len(b), true) + } else { + update(p.IPProto, p.Src, p.Dst, 1, len(b), false) + } +} diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 223ee34f4..a66888191 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -5,7 +5,6 @@ package tstun import ( "bytes" - "context" "encoding/binary" "encoding/hex" "expvar" @@ -27,7 +26,6 @@ import ( "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/tstest" @@ -370,9 +368,8 @@ func TestFilter(t *testing.T) { }() var buf [MaxPacketSize]byte - stats := connstats.NewStatistics(0, 0, nil) - defer stats.Shutdown(context.Background()) - tun.SetStatistics(stats) + var stats netlogtype.CountsByConnection + tun.SetConnectionCounter(stats.Add) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var n int @@ -380,9 +377,10 @@ func TestFilter(t *testing.T) { var filtered bool sizes := make([]int, 1) - tunStats, _ := stats.TestExtract() + tunStats := stats.Clone() + stats.Reset() if len(tunStats) > 0 { - t.Errorf("connstats.Statistics.Extract = %v, want {}", stats) + t.Errorf("connstats.Statistics.Extract = %v, want {}", tunStats) } if tt.dir == in { @@ -415,7 +413,8 @@ func TestFilter(t *testing.T) { } } - got, _ := stats.TestExtract() + got := stats.Clone() + stats.Reset() want := map[netlogtype.Connection]netlogtype.Counts{} var wasUDP bool if !tt.drop { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index d602c7b2f..893e52f2c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -170,7 +170,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -234,6 +234,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/types/netlogfunc/netlogfunc.go b/types/netlogfunc/netlogfunc.go new file mode 100644 index 000000000..6185fcb71 --- /dev/null +++ b/types/netlogfunc/netlogfunc.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package netlogfunc defines types for network logging. +package netlogfunc + +import ( + "net/netip" + + "tailscale.com/types/ipproto" +) + +// ConnectionCounter is a function for counting packets and bytes +// for a particular connection. +type ConnectionCounter func(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index 0f552611e..a29ea6f03 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -5,7 +5,9 @@ package netlogtype import ( + "maps" "net/netip" + "sync" "time" "tailscale.com/tailcfg" @@ -83,3 +85,43 @@ func (c1 Counts) Add(c2 Counts) Counts { c1.RxBytes += c2.RxBytes return c1 } + +// CountsByConnection is a count of packets and bytes for each connection. +// All methods are safe for concurrent calls. +type CountsByConnection struct { + mu sync.Mutex + m map[Connection]Counts +} + +// Add adds packets and bytes for the specified connection. +func (c *CountsByConnection) Add(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + conn := Connection{Proto: proto, Src: src, Dst: dst} + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[Connection]Counts) + } + cnts := c.m[conn] + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + c.m[conn] = cnts +} + +// Clone deep copies the map. +func (c *CountsByConnection) Clone() map[Connection]Counts { + c.mu.Lock() + defer c.mu.Unlock() + return maps.Clone(c.m) +} + +// Reset clear the map. +func (c *CountsByConnection) Reset() { + c.mu.Lock() + defer c.mu.Unlock() + clear(c.m) +} diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index d33745892..37a4f1a64 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -717,8 +717,8 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en } ep.noteRecvActivity(srcAddr, mono.Now()) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, srcAddr.ap, 1, dm.n) + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), srcAddr.ap, 1, dm.n, true) } c.metrics.inboundPacketsDERPTotal.Add(1) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 7deafb752..2010775a1 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1105,8 +1105,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. - if stats := de.c.stats.Load(); err == nil && stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, udpAddr.ap, len(buffs), txBytes) + if update := de.c.connCounter.Load(); err == nil && update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), udpAddr.ap, len(buffs), txBytes, false) } } if derpAddr.IsValid() { @@ -1123,8 +1123,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } - if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes) + if update := de.c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), derpAddr, len(buffs), txBytes, false) } if allOk { return nil diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f855936ce..61fc50d12 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -37,7 +37,6 @@ import ( "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" "tailscale.com/net/batching" - "tailscale.com/net/connstats" "tailscale.com/net/netcheck" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -56,6 +55,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/views" @@ -261,8 +261,8 @@ type Conn struct { //lint:ignore U1000 used on Linux/Darwin only peerMTUEnabled atomic.Bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] // captureHook, if non-nil, is the pcap logging callback when capturing. captureHook syncs.AtomicValue[packet.CaptureCallback] @@ -1862,8 +1862,8 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) if buildfeatures.HasConnStats { - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), ipp, 1, geneveInclusivePacketLen, true) } } if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { @@ -3745,11 +3745,11 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { }) } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (c *Conn) SetStatistics(stats *connstats.Statistics) { +func (c *Conn) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { if buildfeatures.HasConnStats { - c.stats.Store(stats) + c.connCounter.Store(fn) } } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index d1d62a26e..60620b141 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -32,6 +32,7 @@ import ( "unsafe" qt "github.com/frankban/quicktest" + "github.com/google/go-cmp/cmp" wgconn "github.com/tailscale/wireguard-go/conn" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" @@ -45,7 +46,6 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/ipn/ipnstate" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -158,14 +158,14 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st // happiness. type magicStack struct { privateKey key.NodePrivate - epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer - stats *connstats.Statistics // per-connection statistics - conn *Conn // the magicsock itself - tun *tuntest.ChannelTUN // TUN device to send/receive packets - tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks - dev *device.Device // the wireguard-go Device that connects the previous things - wgLogger *wglog.Logger // wireguard-go log wrapper - netMon *netmon.Monitor // always non-nil + epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer + counts netlogtype.CountsByConnection // per-connection statistics + conn *Conn // the magicsock itself + tun *tuntest.ChannelTUN // TUN device to send/receive packets + tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks + dev *device.Device // the wireguard-go Device that connects the previous things + wgLogger *wglog.Logger // wireguard-go log wrapper + netMon *netmon.Monitor // always non-nil metrics *usermetric.Registry } @@ -1143,22 +1143,19 @@ func testTwoDevicePing(t *testing.T, d *devices) { } } - m1.stats = connstats.NewStatistics(0, 0, nil) - defer m1.stats.Shutdown(context.Background()) - m1.conn.SetStatistics(m1.stats) - m2.stats = connstats.NewStatistics(0, 0, nil) - defer m2.stats.Shutdown(context.Background()) - m2.conn.SetStatistics(m2.stats) + m1.conn.SetConnectionCounter(m1.counts.Add) + m2.conn.SetConnectionCounter(m2.counts.Add) checkStats := func(t *testing.T, m *magicStack, wantConns []netlogtype.Connection) { - _, stats := m.stats.TestExtract() + defer m.counts.Reset() + counts := m.counts.Clone() for _, conn := range wantConns { - if _, ok := stats[conn]; ok { + if _, ok := counts[conn]; ok { return } } t.Helper() - t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(stats)) + t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(counts)) } addrPort := netip.MustParseAddrPort @@ -1221,9 +1218,9 @@ func testTwoDevicePing(t *testing.T, d *devices) { setT(t) defer setT(outerT) m1.conn.resetMetricsForTest() - m1.stats.TestExtract() + m1.counts.Reset() m2.conn.resetMetricsForTest() - m2.stats.TestExtract() + m2.counts.Reset() t.Logf("Metrics before: %s\n", m1.metrics.String()) ping1(t) ping2(t) @@ -1249,8 +1246,6 @@ func (c *Conn) resetMetricsForTest() { } func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { - _, phys := ms.stats.TestExtract() - physIPv4RxBytes := int64(0) physIPv4TxBytes := int64(0) physDERPRxBytes := int64(0) @@ -1259,7 +1254,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets := int64(0) physDERPRxPackets := int64(0) physDERPTxPackets := int64(0) - for conn, count := range phys { + for conn, count := range ms.counts.Clone() { t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String()) if conn.Dst.String() == "127.3.3.40:1" { physDERPRxBytes += int64(count.RxBytes) @@ -1273,6 +1268,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets += int64(count.TxPackets) } } + ms.counts.Reset() metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value() metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value() @@ -3986,7 +3982,8 @@ func TestConn_receiveIP(t *testing.T) { c.noteRecvActivity = func(public key.NodePublic) { noteRecvActivityCalled = true } - c.SetStatistics(connstats.NewStatistics(0, 0, nil)) + var counts netlogtype.CountsByConnection + c.SetConnectionCounter(counts.Add) if tt.insertWantEndpointTypeInPeerMap { var insertEPIntoPeerMap *endpoint @@ -4059,9 +4056,8 @@ func TestConn_receiveIP(t *testing.T) { } // Verify physical rx stats - stats := c.stats.Load() - _, gotPhy := stats.TestExtract() wantNonzeroRxStats := false + gotPhy := counts.Clone() switch ep := tt.wantEndpointType.(type) { case *lazyEndpoint: if ep.maybeEP != nil { @@ -4081,8 +4077,8 @@ func TestConn_receiveIP(t *testing.T) { RxBytes: wantRxBytes, }, } - if !reflect.DeepEqual(gotPhy, wantPhy) { - t.Errorf("receiveIP() got physical conn stats = %v, want %v", gotPhy, wantPhy) + if d := cmp.Diff(gotPhy, wantPhy); d != "" { + t.Errorf("receiveIP() stats mismatch (-got +want):\n%s", d) } } else { if len(gotPhy) != 0 { diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 7e1938d27..a04fd2126 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -8,6 +8,7 @@ package netlog import ( + "cmp" "context" "encoding/json" "errors" @@ -19,7 +20,6 @@ import ( "sync" "time" - "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -29,6 +29,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/logid" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netlogtype" "tailscale.com/util/eventbus" "tailscale.com/wgengine/router" @@ -40,12 +41,12 @@ const pollPeriod = 5 * time.Second // Device is an abstraction over a tunnel device or a magic socket. // Both *tstun.Wrapper and *magicsock.Conn implement this interface. type Device interface { - SetStatistics(*connstats.Statistics) + SetConnectionCounter(netlogfunc.ConnectionCounter) } type noopDevice struct{} -func (noopDevice) SetStatistics(*connstats.Statistics) {} +func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // Logger logs statistics about every connection. // At present, it only logs connections within a tailscale network. @@ -131,31 +132,21 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - if buildfeatures.HasConnStats { - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) - } + nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.mu.Lock() + addrs := nl.addrs + prefixes := nl.prefixes + nl.mu.Unlock() + recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) + }) // Register the connection tracker into the TUN device. - if tun == nil { - tun = noopDevice{} - } - nl.tun = tun - if buildfeatures.HasConnStats { - nl.tun.SetStatistics(nl.stats) - } + nl.tun = cmp.Or[Device](tun, noopDevice{}) + nl.tun.SetConnectionCounter(nl.stats.UpdateVirtual) // Register the connection tracker into magicsock. - if sock == nil { - sock = noopDevice{} - } - nl.sock = sock - nl.sock.SetStatistics(nl.stats) + nl.sock = cmp.Or[Device](sock, noopDevice{}) + nl.sock.SetConnectionCounter(nl.stats.UpdatePhysical) return nil } @@ -265,8 +256,8 @@ func (nl *Logger) Shutdown(ctx context.Context) error { // Shutdown in reverse order of Startup. // Do not hold lock while shutting down since this may flush one last time. nl.mu.Unlock() - nl.sock.SetStatistics(nil) - nl.tun.SetStatistics(nil) + nl.sock.SetConnectionCounter(nil) + nl.tun.SetConnectionCounter(nil) err1 := nl.stats.Shutdown(ctx) err2 := nl.logger.Shutdown(ctx) nl.mu.Lock() From e804b6435818527884112870f17ad32a673b2f2d Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 16 Oct 2025 00:07:29 -0700 Subject: [PATCH 1555/1708] wgengine/netlog: merge connstats into package (#17557) Merge the connstats package into the netlog package and unexport all of its declarations. Remove the buildfeatures.HasConnStats and use HasNetLog instead. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/k8s-operator/depaware.txt | 5 +-- cmd/tailscaled/depaware.txt | 5 +-- cmd/tsidp/depaware.txt | 3 +- .../feature_connstats_disabled.go | 13 ------- .../feature_connstats_enabled.go | 13 ------- feature/featuretags/featuretags.go | 6 +-- net/connstats/stats_omit.go | 24 ------------ net/tstun/wrap.go | 8 ++-- net/tstun/wrap_test.go | 2 +- tsnet/depaware.txt | 3 +- wgengine/magicsock/magicsock.go | 4 +- wgengine/netlog/netlog.go | 9 ++--- {net/connstats => wgengine/netlog}/stats.go | 38 +++++++++---------- .../netlog}/stats_test.go | 14 +++---- 14 files changed, 43 insertions(+), 104 deletions(-) delete mode 100644 feature/buildfeatures/feature_connstats_disabled.go delete mode 100644 feature/buildfeatures/feature_connstats_enabled.go delete mode 100644 net/connstats/stats_omit.go rename {net/connstats => wgengine/netlog}/stats.go (85%) rename {net/connstats => wgengine/netlog}/stats_test.go (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 8a8397f28..6cffda2dd 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -768,7 +768,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ @@ -787,7 +786,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper from tailscale.com/feature/portmapper @@ -835,7 +834,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/client/tailscale+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index eed40845c..e92d41b98 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -330,7 +330,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -349,7 +348,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/feature/capture+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper from tailscale.com/feature/portmapper+ @@ -402,7 +401,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 1b6bb6d63..a2a473a50 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -174,7 +174,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -240,7 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/cmd/tsidp+ diff --git a/feature/buildfeatures/feature_connstats_disabled.go b/feature/buildfeatures/feature_connstats_disabled.go deleted file mode 100644 index d9aac0e80..000000000 --- a/feature/buildfeatures/feature_connstats_disabled.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Code generated by gen.go; DO NOT EDIT. - -//go:build ts_omit_connstats - -package buildfeatures - -// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". -// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. -// It's a const so it can be used for dead code elimination. -const HasConnStats = false diff --git a/feature/buildfeatures/feature_connstats_enabled.go b/feature/buildfeatures/feature_connstats_enabled.go deleted file mode 100644 index c0451ce1e..000000000 --- a/feature/buildfeatures/feature_connstats_enabled.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Code generated by gen.go; DO NOT EDIT. - -//go:build !ts_omit_connstats - -package buildfeatures - -// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". -// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. -// It's a const so it can be used for dead code elimination. -const HasConnStats = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c944d65eb..9c85dbaa0 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -134,11 +134,7 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"c2n"}, }, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, - "connstats": { - Sym: "ConnStats", - Desc: "Track per-packet connection statistics", - }, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "dbus": { Sym: "DBus", Desc: "Linux DBus support", diff --git a/net/connstats/stats_omit.go b/net/connstats/stats_omit.go deleted file mode 100644 index 15d16c9e4..000000000 --- a/net/connstats/stats_omit.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_connstats - -package connstats - -import ( - "context" - "net/netip" - "time" -) - -type Statistics struct{} - -func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical any)) *Statistics { - return &Statistics{} -} - -func (s *Statistics) UpdateTxVirtual(b []byte) {} -func (s *Statistics) UpdateRxVirtual(b []byte) {} -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} -func (s *Statistics) Shutdown(context.Context) error { return nil } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index dfbab7812..70cc7118a 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -976,7 +976,7 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := t.connCounter.Load(); update != nil { updateConnCounter(update, p.Buffer(), false) } @@ -1105,7 +1105,7 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) } - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := t.connCounter.Load(); update != nil { for i := 0; i < n; i++ { updateConnCounter(update, outBuffs[i][offset:offset+sizes[i]], false) @@ -1275,7 +1275,7 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := t.connCounter.Load(); update != nil { for i := range buffs { updateConnCounter(update, buffs[i][offset:], true) @@ -1501,7 +1501,7 @@ func (t *Wrapper) Unwrap() tun.Device { // SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (t *Wrapper) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { t.connCounter.Store(fn) } } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index a66888191..75cf5afb2 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -380,7 +380,7 @@ func TestFilter(t *testing.T) { tunStats := stats.Clone() stats.Reset() if len(tunStats) > 0 { - t.Errorf("connstats.Statistics.Extract = %v, want {}", tunStats) + t.Errorf("netlogtype.CountsByConnection = %v, want {}", tunStats) } if tt.dir == in { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 893e52f2c..cd734e995 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -170,7 +170,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -235,7 +234,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 61fc50d12..e3c2d478e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1861,7 +1861,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := c.connCounter.Load(); update != nil { update(0, netip.AddrPortFrom(ep.nodeAddr, 0), ipp, 1, geneveInclusivePacketLen, true) } @@ -3748,7 +3748,7 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { // SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (c *Conn) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { c.connCounter.Store(fn) } } diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index a04fd2126..2984df994 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -23,7 +23,6 @@ import ( "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" - "tailscale.com/net/connstats" "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" @@ -56,7 +55,7 @@ type Logger struct { mu sync.Mutex // protects all fields below logger *logtail.Logger - stats *connstats.Statistics + stats *statistics tun Device sock Device @@ -132,7 +131,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.stats = newStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { nl.mu.Lock() addrs := nl.addrs prefixes := nl.prefixes @@ -151,7 +150,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo return nil } -func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connstats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { +func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connStats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()} classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) { @@ -170,7 +169,7 @@ func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start } exitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) - for conn, cnts := range connstats { + for conn, cnts := range connStats { srcIsTailscaleIP, srcWithinSubnet := classifyAddr(conn.Src.Addr()) dstIsTailscaleIP, dstWithinSubnet := classifyAddr(conn.Dst.Addr()) switch { diff --git a/net/connstats/stats.go b/wgengine/netlog/stats.go similarity index 85% rename from net/connstats/stats.go rename to wgengine/netlog/stats.go index 206181b27..c06068803 100644 --- a/net/connstats/stats.go +++ b/wgengine/netlog/stats.go @@ -1,11 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ts_omit_connstats +//go:build !ts_omit_netlog && !ts_omit_logtail -// Package connstats maintains statistics about connections -// flowing through a TUN device (which operate at the IP layer). -package connstats +package netlog import ( "context" @@ -20,10 +18,10 @@ import ( "tailscale.com/types/netlogtype" ) -// Statistics maintains counters for every connection. +// statistics maintains counters for every connection. // All methods are safe for concurrent use. // The zero value is ready for use. -type Statistics struct { +type statistics struct { maxConns int // immutable once set mu sync.Mutex @@ -42,13 +40,13 @@ type connCnts struct { physical map[netlogtype.Connection]netlogtype.Counts } -// NewStatistics creates a data structure for tracking connection statistics +// newStatistics creates a data structure for tracking connection statistics // that periodically dumps the virtual and physical connection counts // depending on whether the maxPeriod or maxConns is exceeded. // The dump function is called from a single goroutine. // Shutdown must be called to cleanup resources. -func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *Statistics { - s := &Statistics{maxConns: maxConns} +func newStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *statistics { + s := &statistics{maxConns: maxConns} s.connCntsCh = make(chan connCnts, 256) s.shutdownCtx, s.shutdown = context.WithCancel(context.Background()) s.group.Go(func() error { @@ -85,7 +83,7 @@ func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end t // UpdateTxVirtual updates the counters for a transmitted IP packet // The source and destination of the packet directly correspond with // the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateTxVirtual(b []byte) { +func (s *statistics) UpdateTxVirtual(b []byte) { var p packet.Parsed p.Decode(b) s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) @@ -94,7 +92,7 @@ func (s *Statistics) UpdateTxVirtual(b []byte) { // UpdateRxVirtual updates the counters for a received IP packet. // The source and destination of the packet are inverted with respect to // the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateRxVirtual(b []byte) { +func (s *statistics) UpdateRxVirtual(b []byte) { var p packet.Parsed p.Decode(b) s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) @@ -105,7 +103,7 @@ var ( tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() ) -func (s *Statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { +func (s *statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { // Network logging is defined as traffic between two Tailscale nodes. // Traffic with the internal Tailscale service is not with another node // and should not be logged. It also happens to be a high volume @@ -137,7 +135,7 @@ func (s *Statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { +func (s *statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) } @@ -145,11 +143,11 @@ func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packet // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { +func (s *statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) } -func (s *Statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { +func (s *statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} s.mu.Lock() @@ -170,7 +168,7 @@ func (s *Statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort // preInsertConn updates the maps to handle insertion of a new connection. // It reports false if insertion is not allowed (i.e., after shutdown). -func (s *Statistics) preInsertConn() bool { +func (s *statistics) preInsertConn() bool { // Check whether insertion of a new connection will exceed maxConns. if len(s.virtual)+len(s.physical) == s.maxConns && s.maxConns > 0 { // Extract the current statistics and send it to the serializer. @@ -192,13 +190,13 @@ func (s *Statistics) preInsertConn() bool { return s.shutdownCtx.Err() == nil } -func (s *Statistics) extract() connCnts { +func (s *statistics) extract() connCnts { s.mu.Lock() defer s.mu.Unlock() return s.extractLocked() } -func (s *Statistics) extractLocked() connCnts { +func (s *statistics) extractLocked() connCnts { if len(s.virtual)+len(s.physical) == 0 { return connCnts{} } @@ -210,7 +208,7 @@ func (s *Statistics) extractLocked() connCnts { // TestExtract synchronously extracts the current network statistics map // and resets the counters. This should only be used for testing purposes. -func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { +func (s *statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { cc := s.extract() return cc.virtual, cc.physical } @@ -218,7 +216,7 @@ func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection] // Shutdown performs a final flush of statistics. // Statistics for any subsequent calls to Update will be dropped. // It is safe to call Shutdown concurrently and repeatedly. -func (s *Statistics) Shutdown(context.Context) error { +func (s *statistics) Shutdown(context.Context) error { s.shutdown() return s.group.Wait() } diff --git a/net/connstats/stats_test.go b/wgengine/netlog/stats_test.go similarity index 95% rename from net/connstats/stats_test.go rename to wgengine/netlog/stats_test.go index ae0bca8a5..6cf7eb998 100644 --- a/net/connstats/stats_test.go +++ b/wgengine/netlog/stats_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package connstats +package netlog import ( "context" @@ -54,7 +54,7 @@ func TestInterval(t *testing.T) { const maxConns = 2048 gotDump := make(chan struct{}, 1) - stats := NewStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { + stats := newStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { select { case gotDump <- struct{}{}: default: @@ -86,7 +86,7 @@ func TestConcurrent(t *testing.T) { const maxPeriod = 10 * time.Millisecond const maxConns = 10 virtualAggregate := make(map[netlogtype.Connection]netlogtype.Counts) - stats := NewStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + stats := newStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { c.Assert(start.IsZero(), qt.IsFalse) c.Assert(end.IsZero(), qt.IsFalse) c.Assert(end.Before(start), qt.IsFalse) @@ -170,7 +170,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) for j := 0; j < 1e3; j++ { s.UpdateTxVirtual(p) } @@ -181,7 +181,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) for j := 0; j < 1e3; j++ { binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination s.UpdateTxVirtual(p) @@ -193,7 +193,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) var group sync.WaitGroup for j := 0; j < runtime.NumCPU(); j++ { group.Add(1) @@ -215,7 +215,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) var group sync.WaitGroup for j := 0; j < runtime.NumCPU(); j++ { group.Add(1) From 419fba40e02c693cc02c0416d4d837a47d69e7a8 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 16 Oct 2025 10:11:34 +0100 Subject: [PATCH 1556/1708] k8s-operator/api-proxy: put kube api server events behind environment variable (#17550) This commit modifies the k8s-operator's api proxy implementation to only enable forwarding of api requests to tsrecorder when an environment variable is set. This new environment variable is named `TS_EXPERIMENTAL_KUBE_API_EVENTS`. Updates https://github.com/tailscale/corp/issues/32448 Signed-off-by: David Bond --- k8s-operator/api-proxy/proxy.go | 9 +++++++++ k8s-operator/api-proxy/proxy_events_test.go | 1 + 2 files changed, 10 insertions(+) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index fdb798152..762a52f1f 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/envknob" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" "tailscale.com/net/netx" @@ -96,6 +97,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn upstreamURL: u, ts: ts, sendEventFunc: sessionrecording.SendEvent, + eventsEnabled: envknob.Bool("TS_EXPERIMENTAL_KUBE_API_EVENTS"), } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -192,6 +194,9 @@ type APIServerProxy struct { upstreamURL *url.URL sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error + + // Flag used to enable sending API requests as events to tsrecorder. + eventsEnabled bool } // serveDefault is the default handler for Kubernetes API server requests. @@ -310,6 +315,10 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request } func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse) error { + if !ap.eventsEnabled { + return nil + } + failOpen, addrs, err := determineRecorderConfig(who) if err != nil { return fmt.Errorf("error trying to determine whether the kubernetes api request needs to be recorded: %w", err) diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go index 230927dc0..8bcf48436 100644 --- a/k8s-operator/api-proxy/proxy_events_test.go +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -61,6 +61,7 @@ func TestRecordRequestAsEvent(t *testing.T) { log: zl.Sugar(), ts: &tsnet.Server{}, sendEventFunc: sender.Send, + eventsEnabled: true, } defaultWho := &apitype.WhoIsResponse{ From 0ce88aa3433022bb96f3c2a97f5bfd7d2940d205 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Thu, 16 Oct 2025 11:13:41 +0100 Subject: [PATCH 1557/1708] all: use a consistent capitalisation for "Tailnet Lock" Updates https://github.com/tailscale/corp/issues/13108 Signed-off-by: Alex Chan --- cmd/tailscale/cli/network-lock.go | 8 ++++---- docs/windows/policy/en-US/tailscale.adml | 2 +- ipn/localapi/localapi.go | 2 +- tka/sig_test.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 9b2f6fbdb..f355f99b9 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -225,18 +225,18 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { } if st.Enabled { - fmt.Println("Tailnet lock is ENABLED.") + fmt.Println("Tailnet Lock is ENABLED.") } else { - fmt.Println("Tailnet lock is NOT enabled.") + fmt.Println("Tailnet Lock is NOT enabled.") } fmt.Println() if st.Enabled && st.NodeKey != nil && !st.PublicKey.IsZero() { if st.NodeKeySigned { - fmt.Println("This node is accessible under tailnet lock. Node signature:") + fmt.Println("This node is accessible under Tailnet Lock. Node signature:") fmt.Println(st.NodeKeySignature.String()) } else { - fmt.Println("This node is LOCKED OUT by tailnet-lock, and action is required to establish connectivity.") + fmt.Println("This node is LOCKED OUT by Tailnet Lock, and action is required to establish connectivity.") fmt.Printf("Run the following command on a node with a trusted key:\n\ttailscale lock sign %v %s\n", st.NodeKey, st.PublicKey.CLIString()) } fmt.Println() diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 58e13be19..a0be5e831 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -61,7 +61,7 @@ Managing authentication keys via Group Policy and MDM solutions poses significan While MDM solutions tend to offer better control over who can access the policy setting values, they can still be compromised. Additionally, with both Group Policy and MDM solutions, the auth key is always readable by all users who have access to the device where this policy setting applies, as well as by all applications running on the device. A compromised auth key can potentially be used by a malicious actor to gain or elevate access to the target network. -Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. +Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet Lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. If you enable this policy setting and specify an auth key, it will be used to authenticate the device unless the device is already logged in or an auth key is explicitly specified via the CLI. diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 32dc2963f..9e7c16891 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -424,7 +424,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { // OS-specific details h.logf.JSON(1, "UserBugReportOS", osdiag.SupportInfo(osdiag.LogSupportInfoReasonBugReport)) - // Tailnet lock details + // Tailnet Lock details st := h.b.NetworkLockStatus() if st.Enabled { h.logf.JSON(1, "UserBugReportTailnetLockStatus", st) diff --git a/tka/sig_test.go b/tka/sig_test.go index d64575e7c..99c25f8e5 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -507,7 +507,7 @@ func TestDecodeWrappedAuthkey(t *testing.T) { } func TestResignNKS(t *testing.T) { - // Tailnet lock keypair of a signing node. + // Tailnet Lock keypair of a signing node. authPub, authPriv := testingKey25519(t, 1) authKey := Key{Kind: Key25519, Public: authPub, Votes: 2} From c3acf25d6217f6cb7b1eb74afaf2860293abf377 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 11:27:57 +0100 Subject: [PATCH 1558/1708] tka: remove an unused Mem.Orphans() method This method was added in cca25f6 in the initial in-memory implementation of Chonk, but it's not part of the Chonk interface and isn't implemented or used anywhere else. Let's get rid of it. Updates https://github.com/tailscale/corp/issues/33465 Signed-off-by: Alex Chan --- tka/tailchonk.go | 13 ------------- tka/tailchonk_test.go | 27 --------------------------- 2 files changed, 40 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 6c441669a..bebc6cec9 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -127,19 +127,6 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) { return aum, nil } -// Orphans returns all AUMs which do not have a parent. -func (c *Mem) Orphans() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() - out := make([]AUM, 0, 6) - for _, a := range c.aums { - if _, ok := a.Parent(); !ok { - out = append(out, a) - } - } - return out, nil -} - // ChildAUMs returns all AUMs with a specified previous // AUM hash. func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 86d5642a3..376de323c 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -73,33 +73,6 @@ func TestTailchonk_AUMMissing(t *testing.T) { } } -func TestTailchonkMem_Orphans(t *testing.T) { - chonk := Mem{} - - parentHash := randHash(t, 1) - orphan := AUM{MessageKind: AUMNoOp} - aums := []AUM{ - orphan, - // A parent is specified, so we shouldnt see it in GetOrphans() - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - if err := chonk.CommitVerifiedAUMs(aums); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - stored, err := chonk.Orphans() - if err != nil { - t.Fatalf("Orphans failed: %v", err) - } - if diff := cmp.Diff([]AUM{orphan}, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } -} - func TestTailchonk_ReadChainFromHead(t *testing.T) { for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { From 55a43c3736a7a7029eec214da8b2ab5788679906 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 10:53:12 +0100 Subject: [PATCH 1559/1708] tka: don't look up parent/child information from purged AUMs We soft-delete AUMs when they're purged, but when we call `ChildAUMs()`, we look up soft-deleted AUMs to find the `Children` field. This patch changes the behaviour of `ChildAUMs()` so it only looks at not-deleted AUMs. This means we don't need to record child information on AUMs any more, which is a minor space saving for any newly-recorded AUMs. Updates https://github.com/tailscale/tailscale/issues/17566 Updates https://github.com/tailscale/corp/issues/27166 Signed-off-by: Alex Chan --- tka/tailchonk.go | 105 ++++++++++++++++++++++-------------------- tka/tailchonk_test.go | 63 +++++++++++++++++++++---- 2 files changed, 108 insertions(+), 60 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index bebc6cec9..cb683c273 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -11,6 +11,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "sync" "time" @@ -206,10 +207,14 @@ func ChonkDir(dir string) (*FS, error) { // CBOR was chosen because we are already using it and it serializes // much smaller than JSON for AUMs. The 'keyasint' thing isn't essential // but again it saves a bunch of bytes. +// +// We have removed the following fields from fsHashInfo, but they may be +// present in data stored in existing deployments. Do not reuse these values, +// to avoid getting unexpected values from legacy data: +// - cbor:1, Children type fsHashInfo struct { - Children []AUMHash `cbor:"1,keyasint"` - AUM *AUM `cbor:"2,keyasint"` - CreatedUnix int64 `cbor:"3,keyasint,omitempty"` + AUM *AUM `cbor:"2,keyasint"` + CreatedUnix int64 `cbor:"3,keyasint,omitempty"` // PurgedUnix is set when the AUM is deleted. The value is // the unix epoch at the time it was deleted. @@ -285,32 +290,15 @@ func (c *FS) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() - info, err := c.get(prevAUMHash) - if err != nil { - if os.IsNotExist(err) { - // not knowing about this hash is not an error - return nil, nil - } - return nil, err - } - // NOTE(tom): We don't check PurgedUnix here because 'purged' - // only applies to that specific AUM (i.e. info.AUM) and not to - // any information about children stored against that hash. + var out []AUM - out := make([]AUM, len(info.Children)) - for i, h := range info.Children { - c, err := c.get(h) - if err != nil { - // We expect any AUM recorded as a child on its parent to exist. - return nil, fmt.Errorf("reading child %d of %x: %v", i, h, err) - } - if c.AUM == nil || c.PurgedUnix > 0 { - return nil, fmt.Errorf("child %d of %x: AUM not stored", i, h) + err := c.scanHashes(func(info *fsHashInfo) { + if info.AUM != nil && bytes.Equal(info.AUM.PrevAUMHash, prevAUMHash[:]) { + out = append(out, *info.AUM) } - out[i] = *c.AUM - } + }) - return out, nil + return out, err } func (c *FS) get(h AUMHash) (*fsHashInfo, error) { @@ -346,13 +334,45 @@ func (c *FS) Heads() ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() + // Scan the complete list of AUMs, and build a list of all parent hashes. + // This tells us which AUMs have children. + var parentHashes []AUMHash + + allAUMs, err := c.AllAUMs() + if err != nil { + return nil, err + } + + for _, h := range allAUMs { + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + parent, hasParent := aum.Parent() + if !hasParent { + continue + } + if !slices.Contains(parentHashes, parent) { + parentHashes = append(parentHashes, parent) + } + } + + // Now scan a second time, and only include AUMs which weren't marked as + // the parent of any other AUM. out := make([]AUM, 0, 6) // 6 is arbitrary. - err := c.scanHashes(func(info *fsHashInfo) { - if len(info.Children) == 0 && info.AUM != nil && info.PurgedUnix == 0 { - out = append(out, *info.AUM) + + for _, h := range allAUMs { + if slices.Contains(parentHashes, h) { + continue } - }) - return out, err + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + out = append(out, aum) + } + + return out, nil } // AllAUMs returns all AUMs stored in the chonk. @@ -362,7 +382,7 @@ func (c *FS) AllAUMs() ([]AUMHash, error) { out := make([]AUMHash, 0, 6) // 6 is arbitrary. err := c.scanHashes(func(info *fsHashInfo) { - if info.AUM != nil && info.PurgedUnix == 0 { + if info.AUM != nil { out = append(out, info.AUM.Hash()) } }) @@ -391,6 +411,9 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error { if err != nil { return fmt.Errorf("reading %x: %v", h, err) } + if info.PurgedUnix > 0 { + continue + } eachHashInfo(info) } @@ -445,24 +468,6 @@ func (c *FS) CommitVerifiedAUMs(updates []AUM) error { for i, aum := range updates { h := aum.Hash() - // We keep track of children against their parent so that - // ChildAUMs() do not need to scan all AUMs. - parent, hasParent := aum.Parent() - if hasParent { - err := c.commit(parent, func(info *fsHashInfo) { - // Only add it if its not already there. - for i := range info.Children { - if info.Children[i] == h { - return - } - } - info.Children = append(info.Children, h) - }) - if err != nil { - return fmt.Errorf("committing update[%d] to parent %x: %v", i, parent, err) - } - } - err := c.commit(h, func(info *fsHashInfo) { info.PurgedUnix = 0 // just in-case it was set for some reason info.AUM = &aum diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 376de323c..cf6ea203b 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/crypto/blake2s" + "tailscale.com/util/must" ) // randHash derives a fake blake2s hash from the test name @@ -144,9 +145,6 @@ func TestTailchonkFS_Commit(t *testing.T) { if _, err := os.Stat(filepath.Join(dir, base)); err != nil { t.Errorf("stat of AUM file failed: %v", err) } - if _, err := os.Stat(filepath.Join(chonk.base, "M7", "M7LL2NDB4NKCZIUPVS6RDM2GUOIMW6EEAFVBWMVCPUANQJPHT3SQ")); err != nil { - t.Errorf("stat of AUM parent failed: %v", err) - } info, err := chonk.get(aum.Hash()) if err != nil { @@ -199,6 +197,14 @@ func TestTailchonkFS_PurgeAUMs(t *testing.T) { } } +func hashesLess(x, y AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + +func aumHashesLess(x, y AUM) bool { + return hashesLess(x.Hash(), y.Hash()) +} + func TestTailchonkFS_AllAUMs(t *testing.T) { chonk := &FS{base: t.TempDir()} genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} @@ -220,14 +226,54 @@ func TestTailchonkFS_AllAUMs(t *testing.T) { if err != nil { t.Fatal(err) } - hashesLess := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } if diff := cmp.Diff([]AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) } } +func TestTailchonkFS_ChildAUMsOfPurgedAUM(t *testing.T) { + chonk := &FS{base: t.TempDir()} + parent := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{0, 0}} + + parentHash := parent.Hash() + + child1 := AUM{MessageKind: AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} + child2 := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} + child3 := AUM{MessageKind: AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} + + child2Hash := child2.Hash() + grandchild2A := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + grandchild2B := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + + commitSet := []AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} + + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check the set of hashes is correct + childHashes := must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Purge the parent AUM, and check the set of child AUMs is unchanged + chonk.PurgeAUMs([]AUMHash{parent.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Now purge one of the child AUMs, and check it no longer appears as a child of the parent + chonk.PurgeAUMs([]AUMHash{child3.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } +} + func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM @@ -585,10 +631,7 @@ func (c *compactingChonkFake) CommitTime(hash AUMHash) (time.Time, error) { } func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { - lessHashes := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } - if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(lessHashes)); diff != "" { + if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { c.t.Errorf("deletion set differs (-want, +got):\n%s", diff) } return nil From 8d119f62eebd6c3782f366d225df8b5f352f3daa Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 11:13:14 +0100 Subject: [PATCH 1560/1708] wgengine/magicsock: minor tidies in Test_endpoint_maybeProbeUDPLifetimeLocked * Remove a couple of single-letter `l` variables * Use named struct parameters in the test cases for readability * Delete `wantAfterInactivityForFn` parameter when it returns the default zero Updates #cleanup Signed-off-by: Alex Chan --- wgengine/magicsock/endpoint_test.go | 171 +++++++++++++--------------- 1 file changed, 77 insertions(+), 94 deletions(-) diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 666d86231..df1c93406 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -173,130 +173,110 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { wantMaybe bool }{ { - "nil probeUDPLifetime", - higher, - &lower, - func() *probeUDPLifetime { + name: "nil probeUDPLifetime", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: func() *probeUDPLifetime { return nil }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + bestAddr: addr, }, { - "local higher disco key", - higher, - &lower, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "local higher disco key", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "remote no disco key", - higher, - nil, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "remote no disco key", + localDisco: higher, + remoteDisco: nil, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "invalid bestAddr", - lower, - &higher, - newProbeUDPLifetime, - addrQuality{}, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "invalid bestAddr", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addrQuality{}, }, { - "cycle started too recently", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now() - return l - }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 + name: "cycle started too recently", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now() + return lt }, - false, + bestAddr: addr, }, { - "maybe cliff 0 cycle not active", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now().Add(-l.config.CycleCanStartEvery).Add(-time.Second) - return l + name: "maybe cliff 0 cycle not active", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now().Add(-lt.config.CycleCanStartEvery).Add(-time.Second) + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 0", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 0 - return l + name: "maybe cliff 0", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 0 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 1", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 1 - return l + name: "maybe cliff 1", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 1 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[1] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 2", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 2 - return l + name: "maybe cliff 2", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 2 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[2] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, } for _, tt := range tests { @@ -316,7 +296,10 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { p := tt.probeUDPLifetimeFn() de.probeUDPLifetime = p gotAfterInactivityFor, gotMaybe := de.maybeProbeUDPLifetimeLocked() - wantAfterInactivityFor := tt.wantAfterInactivityForFn(p) + var wantAfterInactivityFor time.Duration + if tt.wantAfterInactivityForFn != nil { + wantAfterInactivityFor = tt.wantAfterInactivityForFn(p) + } if gotAfterInactivityFor != wantAfterInactivityFor { t.Errorf("maybeProbeUDPLifetimeLocked() gotAfterInactivityFor = %v, want %v", gotAfterInactivityFor, wantAfterInactivityFor) } From 6493206ac7f67ef4261018a3fb64122571fb5297 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 17 Oct 2025 10:00:42 -0700 Subject: [PATCH 1561/1708] .github/workflows: pin nix-related github actions (#17574) Updates #cleanup Signed-off-by: Andrew Lytvynov --- .github/workflows/flakehub-publish-tagged.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 9ff12c6a3..50bb8b9f7 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -20,8 +20,8 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - - uses: "DeterminateSystems/nix-installer-action@main" - - uses: "DeterminateSystems/flakehub-push@main" + - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 + - uses: DeterminateSystems/flakehub-push@71f57208810a5d299fc6545350981de98fdbc860 # v6 with: visibility: "public" tag: "${{ inputs.tag }}" From 9083ef1ac4ca9de0d17a5da1c6a4cb5a22dc5b8e Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 17 Oct 2025 18:32:30 +0100 Subject: [PATCH 1562/1708] cmd/k8s-operator: allow pod tolerations on nameservers (#17260) This commit modifies the `DNSConfig` custom resource to allow specifying [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) on the nameserver pods. This will allow users to dictate where their nameserver pods are located within their clusters. Fixes: https://github.com/tailscale/tailscale/issues/17092 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 43 +++++++++++++++++++ .../deploy/manifests/operator.yaml | 43 +++++++++++++++++++ cmd/k8s-operator/nameserver.go | 19 +++++--- cmd/k8s-operator/nameserver_test.go | 19 ++++++++ k8s-operator/api.md | 17 ++++++++ .../apis/v1alpha1/types_tsdnsconfig.go | 10 +++++ .../apis/v1alpha1/zz_generated.deepcopy.go | 27 ++++++++++++ 7 files changed, 171 insertions(+), 7 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 43ebaecec..a819aa651 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -100,6 +100,49 @@ spec: tag: description: Tag defaults to unstable. type: string + pod: + description: Pod configuration. + type: object + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + type: array + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + type: object + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string replicas: description: Replicas specifies how many Pods to create. Defaults to 1. type: integer diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 520d17eae..c7c5ef0a7 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -431,6 +431,49 @@ spec: description: Tag defaults to unstable. type: string type: object + pod: + description: Pod configuration. + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object replicas: description: Replicas specifies how many Pods to create. Defaults to 1. format: int32 diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 3618642e1..5de1c47ba 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -191,6 +191,9 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Service != nil { dCfg.clusterIP = tsDNSCfg.Spec.Nameserver.Service.ClusterIP } + if tsDNSCfg.Spec.Nameserver.Pod != nil { + dCfg.tolerations = tsDNSCfg.Spec.Nameserver.Pod.Tolerations + } for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { @@ -217,13 +220,14 @@ type deployable struct { } type deployConfig struct { - replicas int32 - imageRepo string - imageTag string - labels map[string]string - ownerRefs []metav1.OwnerReference - namespace string - clusterIP string + replicas int32 + imageRepo string + imageTag string + labels map[string]string + ownerRefs []metav1.OwnerReference + namespace string + clusterIP string + tolerations []corev1.Toleration } var ( @@ -248,6 +252,7 @@ var ( d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels d.ObjectMeta.OwnerReferences = cfg.ownerRefs + d.Spec.Template.Spec.Tolerations = cfg.tolerations updateF := func(oldD *appsv1.Deployment) { oldD.Spec = d.Spec } diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 88e48b753..6da52d8a2 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -42,6 +42,16 @@ func TestNameserverReconciler(t *testing.T) { Service: &tsapi.NameserverService{ ClusterIP: "5.4.3.2", }, + Pod: &tsapi.NameserverPod{ + Tolerations: []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, }, }, } @@ -79,6 +89,15 @@ func TestNameserverReconciler(t *testing.T) { wantsDeploy.Spec.Replicas = ptr.To[int32](3) wantsDeploy.Namespace = tsNamespace wantsDeploy.ObjectMeta.Labels = nameserverLabels + wantsDeploy.Spec.Template.Spec.Tolerations = []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + } + expectEqual(t, fc, wantsDeploy) }) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index d75a21e37..979d199cb 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -443,6 +443,7 @@ _Appears in:_ | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | | `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | +| `pod` _[NameserverPod](#nameserverpod)_ | Pod configuration. | | | | `replicas` _integer_ | Replicas specifies how many Pods to create. Defaults to 1. | | Minimum: 0
                    | @@ -463,6 +464,22 @@ _Appears in:_ | `tag` _string_ | Tag defaults to unstable. | | | +#### NameserverPod + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | If specified, applies tolerations to the pods deployed by the DNSConfig resource. | | | + + #### NameserverService diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 4d8d569f6..7991003b8 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -6,6 +6,7 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -84,6 +85,9 @@ type Nameserver struct { // Service configuration. // +optional Service *NameserverService `json:"service,omitempty"` + // Pod configuration. + // +optional + Pod *NameserverPod `json:"pod,omitempty"` // Replicas specifies how many Pods to create. Defaults to 1. // +optional // +kubebuilder:validation:Minimum=0 @@ -105,6 +109,12 @@ type NameserverService struct { ClusterIP string `json:"clusterIP,omitempty"` } +type NameserverPod struct { + // If specified, applies tolerations to the pods deployed by the DNSConfig resource. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + type DNSConfigStatus struct { // +listType=map // +listMapKey=type diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 5684fd5f8..7492f1e54 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -422,6 +422,11 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverService) **out = **in } + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(NameserverPod) + (*in).DeepCopyInto(*out) + } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) @@ -454,6 +459,28 @@ func (in *NameserverImage) DeepCopy() *NameserverImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverPod) DeepCopyInto(out *NameserverPod) { + *out = *in + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverPod. +func (in *NameserverPod) DeepCopy() *NameserverPod { + if in == nil { + return nil + } + out := new(NameserverPod) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverService) DeepCopyInto(out *NameserverService) { *out = *in From 54cee33baec6a2beeaa4aee2c771a8d9312fd8ac Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 17 Oct 2025 09:25:10 -0700 Subject: [PATCH 1563/1708] go.toolchain.rev: update to Go 1.25.3 Updates tailscale/go#140 Updates tailscale/go#142 Updates tailscale/go#138 Change-Id: Id25b6fa4e31eee243fec17667f14cdc48243c59e Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 0c6d33fa0..3c281fa7a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.2 +go 1.25.3 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index d5de79558..9ea6b37dc 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -a80a86e575c5b7b23b78540e947335d22f74d274 +5c01b77ad0d27a8bd4ef89ef7e713fd7043c5a91 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 9cbf36b93..a62a52599 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk= +sha256-2TYziJLJrFOW2FehhahKficnDACJEwjuvVYyeQZbrcc= diff --git a/go.toolchain.version b/go.toolchain.version index 61b813d5e..5bb76b575 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.25.2 +1.25.3 From 6a73c0bdf5539971840e19c75113d8414f22a9c8 Mon Sep 17 00:00:00 2001 From: Max Coulombe Date: Fri, 17 Oct 2025 18:05:32 -0400 Subject: [PATCH 1564/1708] cmd/tailscale/cli,feature: add support for identity federation (#17529) Add new arguments to `tailscale up` so authkeys can be generated dynamically via identity federation. Updates #9192 Signed-off-by: mcoulombe --- cmd/tailscale/cli/up.go | 50 +++++- cmd/tailscale/cli/up_test.go | 3 + cmd/tailscale/depaware.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/deps_test.go | 6 +- .../feature_identity_federation_disabled.go | 13 ++ .../feature_identity_federation_enabled.go | 13 ++ .../condregister/identityfederation/doc.go | 7 + .../maybe_identityfederation.go | 8 + feature/featuretags/featuretags.go | 13 +- .../identityfederation/identityfederation.go | 127 +++++++++++++ .../identityfederation_test.go | 167 ++++++++++++++++++ .../client/tailscale/identityfederation.go | 19 ++ internal/client/tailscale/tailscale.go | 3 + 14 files changed, 420 insertions(+), 14 deletions(-) create mode 100644 feature/buildfeatures/feature_identity_federation_disabled.go create mode 100644 feature/buildfeatures/feature_identity_federation_enabled.go create mode 100644 feature/condregister/identityfederation/doc.go create mode 100644 feature/condregister/identityfederation/maybe_identityfederation.go create mode 100644 feature/identityfederation/identityfederation.go create mode 100644 feature/identityfederation/identityfederation_test.go create mode 100644 internal/client/tailscale/identityfederation.go diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 07e008aab..91a6b6087 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -25,6 +25,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" "tailscale.com/feature/buildfeatures" + _ "tailscale.com/feature/condregister/identityfederation" _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" @@ -96,6 +97,9 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") upf.StringVar(&upArgs.qrFormat, "qr-format", "small", "QR code formatting (small or large)") upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) + upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") + upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) + upf.StringVar(&upArgs.idTokenOrFile, "id-token", "", `ID token from the identity provider to exchange with the control server for workload identity federation; if it begins with "file:", then it's a path to a file containing the token`) upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server") upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") @@ -184,6 +188,9 @@ type upArgsT struct { statefulFiltering bool netfilterMode string authKeyOrFile string // "secret" or "file:/path/to/secret" + clientID string + clientSecretOrFile string // "secret" or "file:/path/to/secret" + idTokenOrFile string // "secret" or "file:/path/to/secret" hostname string opUser string json bool @@ -193,8 +200,9 @@ type upArgsT struct { postureChecking bool } -func (a upArgsT) getAuthKey() (string, error) { - v := a.authKeyOrFile +// resolveValueFromFile returns the value as-is, or if it starts with "file:", +// reads and returns the trimmed contents of the file. +func resolveValueFromFile(v string) (string, error) { if file, ok := strings.CutPrefix(v, "file:"); ok { b, err := os.ReadFile(file) if err != nil { @@ -205,6 +213,18 @@ func (a upArgsT) getAuthKey() (string, error) { return v, nil } +func (a upArgsT) getAuthKey() (string, error) { + return resolveValueFromFile(a.authKeyOrFile) +} + +func (a upArgsT) getClientSecret() (string, error) { + return resolveValueFromFile(a.clientSecretOrFile) +} + +func (a upArgsT) getIDToken() (string, error) { + return resolveValueFromFile(a.idTokenOrFile) +} + var upArgsGlobal upArgsT // Fields output when `tailscale up --json` is used. Two JSON blocks will be output. @@ -586,11 +606,33 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE // Try to use an OAuth secret to generate an auth key if that functionality // is available. if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { - authKey, err = f(ctx, authKey, strings.Split(upArgs.advertiseTags, ",")) + clientSecret := authKey // the authkey argument accepts client secrets, if both arguments are provided authkey has precedence + if clientSecret == "" { + clientSecret, err = upArgs.getClientSecret() + if err != nil { + return err + } + } + + authKey, err = f(ctx, clientSecret, strings.Split(upArgs.advertiseTags, ",")) + if err != nil { + return err + } + } + // Try to resolve the auth key via workload identity federation if that functionality + // is available and no auth key is yet determined. + if f, ok := tailscale.HookResolveAuthKeyViaWIF.GetOk(); ok && authKey == "" { + idToken, err := upArgs.getIDToken() + if err != nil { + return err + } + + authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, strings.Split(upArgs.advertiseTags, ",")) if err != nil { return err } } + err = localClient.Start(ctx, ipn.Options{ AuthKey: authKey, UpdatePrefs: prefs, @@ -869,7 +911,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes", "client-id", "client-secret", "id-token": return true } return false diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index efddb5324..fe2f1b555 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -43,6 +43,9 @@ var validUpFlags = set.Of( "stateful-filtering", "timeout", "unattended", + "client-id", + "client-secret", + "id-token", ) // TestUpFlagSetIsFrozen complains when new flags are added to tailscale up. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index d5b7b059f..b249639bc 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -98,9 +98,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli @@ -245,7 +247,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/ipv6 from golang.org/x/net/icmp+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ - golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index d46180e2d..9633e7398 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -75,6 +75,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index b98c53eb5..64d1beca7 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -137,14 +137,14 @@ func TestOmitCaptivePortal(t *testing.T) { }.Check(t) } -func TestOmitOAuthKey(t *testing.T) { +func TestOmitAuth(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", - Tags: "ts_omit_oauthkey,ts_include_cli", + Tags: "ts_omit_oauthkey,ts_omit_identityfederation,ts_include_cli", OnDep: func(dep string) { if strings.HasPrefix(dep, "golang.org/x/oauth2") { - t.Errorf("unexpected dep with ts_omit_oauthkey: %q", dep) + t.Errorf("unexpected oauth2 dep: %q", dep) } }, }.Check(t) diff --git a/feature/buildfeatures/feature_identity_federation_disabled.go b/feature/buildfeatures/feature_identity_federation_disabled.go new file mode 100644 index 000000000..c7b16f729 --- /dev/null +++ b/feature/buildfeatures/feature_identity_federation_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_identity_federation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = false diff --git a/feature/buildfeatures/feature_identity_federation_enabled.go b/feature/buildfeatures/feature_identity_federation_enabled.go new file mode 100644 index 000000000..1f7cf1742 --- /dev/null +++ b/feature/buildfeatures/feature_identity_federation_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_identity_federation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = true diff --git a/feature/condregister/identityfederation/doc.go b/feature/condregister/identityfederation/doc.go new file mode 100644 index 000000000..503b2c8f1 --- /dev/null +++ b/feature/condregister/identityfederation/doc.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for authkey resolution +// via identity federation if it's not disabled by the +// ts_omit_identityfederation build tag. +package identityfederation diff --git a/feature/condregister/identityfederation/maybe_identityfederation.go b/feature/condregister/identityfederation/maybe_identityfederation.go new file mode 100644 index 000000000..b1db42fc3 --- /dev/null +++ b/feature/condregister/identityfederation/maybe_identityfederation.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_identityfederation + +package identityfederation + +import _ "tailscale.com/feature/identityfederation" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 9c85dbaa0..c93e8b15b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -155,12 +155,13 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, - "health": {Sym: "Health", Desc: "Health checking support"}, - "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, - "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, - "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, - "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, - "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "health": {Sym: "Health", Desc: "Health checking support"}, + "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, + "identityfederation": {Sym: "IdentityFederation", Desc: "Auth key generation via identity federation support"}, + "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, + "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, + "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, "linkspeed": { Sym: "LinkSpeed", Desc: "Set link speed on TUN device for better OS integration (Linux only)", diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go new file mode 100644 index 000000000..a4470fc27 --- /dev/null +++ b/feature/identityfederation/identityfederation.go @@ -0,0 +1,127 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for using ID tokens to +// automatically request authkeys for logging in. +package identityfederation + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" +) + +func init() { + feature.Register("identityfederation") + tailscale.HookResolveAuthKeyViaWIF.Set(resolveAuthKey) +} + +// resolveAuthKey uses OIDC identity federation to exchange the provided ID token and client ID for an authkey. +func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + if clientID == "" { + return "", nil // Short-circuit, no client ID means not using identity federation + } + + if idToken == "" { + return "", errors.New("federated identity authkeys require --id-token") + } + if len(tags) == 0 { + return "", errors.New("federated identity authkeys require --advertise-tags") + } + if baseURL == "" { + baseURL = ipn.DefaultControlURL + } + + ephemeral, preauth, err := parseOptionalAttributes(clientID) + if err != nil { + return "", fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + accessToken, err := exchangeJWTForToken(ctx, baseURL, clientID, idToken) + if err != nil { + return "", fmt.Errorf("failed to exchange JWT for access token: %w", err) + } + if accessToken == "" { + return "", errors.New("received empty access token from Tailscale") + } + + tsClient := tailscale.NewClient("-", tailscale.APIKey(accessToken)) + tsClient.UserAgent = "tailscale-cli-identity-federation" + tsClient.BaseURL = baseURL + + authkey, _, err := tsClient.CreateKey(ctx, tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("unexpected error while creating authkey: %w", err) + } + if authkey == "" { + return "", errors.New("received empty authkey from control server") + } + + return authkey, nil +} + +func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized bool, err error) { + _, attrs, found := strings.Cut(clientID, "?") + if !found { + return true, false, nil + } + + parsed, err := url.ParseQuery(attrs) + if err != nil { + return false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + for k := range parsed { + switch k { + case "ephemeral": + ephemeral, err = strconv.ParseBool(parsed.Get(k)) + case "preauthorized": + preauthorized, err = strconv.ParseBool(parsed.Get(k)) + default: + return false, false, fmt.Errorf("unknown optional config attribute %q", k) + } + } + + return ephemeral, preauthorized, err +} + +// exchangeJWTForToken exchanges a JWT for a Tailscale access token. +func exchangeJWTForToken(ctx context.Context, baseURL, clientID, idToken string) (string, error) { + httpClient := &http.Client{Timeout: 10 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + token, err := (&oauth2.Config{ + Endpoint: oauth2.Endpoint{ + TokenURL: fmt.Sprintf("%s/api/v2/oauth/token-exchange", baseURL), + }, + }).Exchange(ctx, "", oauth2.SetAuthURLParam("client_id", clientID), oauth2.SetAuthURLParam("jwt", idToken)) + if err != nil { + // Try to extract more detailed error message + var retrieveErr *oauth2.RetrieveError + if errors.As(err, &retrieveErr) { + return "", fmt.Errorf("token exchange failed with status %d: %s", retrieveErr.Response.StatusCode, string(retrieveErr.Body)) + } + return "", fmt.Errorf("unexpected token exchange request error: %w", err) + } + + return token.AccessToken, nil +} diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go new file mode 100644 index 000000000..7b75852a8 --- /dev/null +++ b/feature/identityfederation/identityfederation_test.go @@ -0,0 +1,167 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package identityfederation + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + clientID string + idToken string + tags []string + wantAuthKey string + wantErr string + }{ + { + name: "success", + clientID: "client-123", + idToken: "token", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: "", + }, + { + name: "missing client id short-circuits without error", + clientID: "", + idToken: "token", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: "", + }, + { + name: "missing id token", + clientID: "client-123", + idToken: "", + tags: []string{"tag:test"}, + wantErr: "federated identity authkeys require --id-token", + }, + { + name: "missing tags", + clientID: "client-123", + idToken: "token", + tags: []string{}, + wantErr: "federated identity authkeys require --advertise-tags", + }, + { + name: "invalid client id attributes", + clientID: "client-123?invalid=value", + idToken: "token", + tags: []string{"tag:test"}, + wantErr: `failed to parse optional config attributes: unknown optional config attribute "invalid"`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := mockedControlServer(t) + defer srv.Close() + + authKey, err := resolveAuthKey(context.Background(), srv.URL, tt.clientID, tt.idToken, tt.tags) + if tt.wantErr != "" { + if err == nil { + t.Errorf("resolveAuthKey() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("resolveAuthKey() error = %q, want %q", err.Error(), tt.wantErr) + } + } else if err != nil { + t.Fatalf("resolveAuthKey() unexpected error = %v", err) + } + if authKey != tt.wantAuthKey { + t.Errorf("resolveAuthKey() = %q, want %q", authKey, tt.wantAuthKey) + } + }) + } +} + +func TestParseOptionalAttributes(t *testing.T) { + tests := []struct { + name string + clientID string + wantEphemeral bool + wantPreauth bool + wantErr string + }{ + { + name: "default values", + clientID: "client-123", + wantEphemeral: true, + wantPreauth: false, + wantErr: "", + }, + { + name: "custom values", + clientID: "client-123?ephemeral=false&preauthorized=true", + wantEphemeral: false, + wantPreauth: true, + wantErr: "", + }, + { + name: "unknown attribute", + clientID: "client-123?unknown=value", + wantEphemeral: false, + wantPreauth: false, + wantErr: `unknown optional config attribute "unknown"`, + }, + { + name: "invalid value", + clientID: "client-123?ephemeral=invalid", + wantEphemeral: false, + wantPreauth: false, + wantErr: `strconv.ParseBool: parsing "invalid": invalid syntax`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) + if tt.wantErr != "" { + if err == nil { + t.Errorf("parseOptionalAttributes() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("parseOptionalAttributes() error = %q, want %q", err.Error(), tt.wantErr) + } + } else { + if err != nil { + t.Errorf("parseOptionalAttributes() error = %v, want nil", err) + return + } + } + if ephemeral != tt.wantEphemeral { + t.Errorf("parseOptionalAttributes() ephemeral = %v, want %v", ephemeral, tt.wantEphemeral) + } + if preauth != tt.wantPreauth { + t.Errorf("parseOptionalAttributes() preauth = %v, want %v", preauth, tt.wantPreauth) + } + }) + } +} + +func mockedControlServer(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.Contains(r.URL.Path, "/oauth/token-exchange"): + // OAuth2 library sends the token exchange request + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"access-123","token_type":"Bearer","expires_in":3600}`)) + case strings.Contains(r.URL.Path, "/api/v2/tailnet") && strings.Contains(r.URL.Path, "/keys"): + // Tailscale client creates the authkey + w.Write([]byte(`{"key":"tskey-auth-xyz","created":"2024-01-01T00:00:00Z"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go new file mode 100644 index 000000000..e1fe3559c --- /dev/null +++ b/internal/client/tailscale/identityfederation.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKeyViaWIF resolves to [identityfederation.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// baseURL is the URL of the control server used for token exchange and authkey generation. +// clientID is the federated client ID used for token exchange, the format is / +// idToken is the Identity token from the identity provider +// tags is the list of tags to be associated with the auth key +var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error)] diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index cba7228bb..0e603bf79 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -25,6 +25,9 @@ func init() { // AuthMethod is an alias to tailscale.com/client/tailscale. type AuthMethod = tsclient.AuthMethod +// APIKey is an alias to tailscale.com/client/tailscale. +type APIKey = tsclient.APIKey + // Device is an alias to tailscale.com/client/tailscale. type Device = tsclient.Device From c961d580912d25f48f1b916b9b2bc08f394b994d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 20 Oct 2025 11:23:35 +0100 Subject: [PATCH 1565/1708] cmd/tailscale: improve the error message for `lock log` with no lock Previously, running `tailscale lock log` in a tailnet without Tailnet Lock enabled would return a potentially confusing error: $ tailscale lock log 2025/10/20 11:07:09 failed to connect to local Tailscale service; is Tailscale running? It would return this error even if Tailscale was running. This patch fixes the error to be: $ tailscale lock log Tailnet Lock is not enabled Fixes #17586 Signed-off-by: Alex Chan --- cmd/tailscale/cli/network-lock.go | 8 ++++++ tstest/integration/integration_test.go | 40 ++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index f355f99b9..a15d9ab88 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -690,6 +690,14 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er } func runNetworkLockLog(ctx context.Context, args []string) error { + st, err := localClient.NetworkLockStatus(ctx) + if err != nil { + return fixTailscaledConnectError(err) + } + if !st.Enabled { + return errors.New("Tailnet Lock is not enabled") + } + updates, err := localClient.NetworkLockLog(ctx, nlLogArgs.limit) if err != nil { return fixTailscaledConnectError(err) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 2e85bc8be..234bb8c6e 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -2190,3 +2190,43 @@ func TestC2NDebugNetmap(t *testing.T) { t.Errorf("expected peer to be online; got %+v", nm.Peers[0].AsStruct()) } } + +func TestNetworkLock(t *testing.T) { + + // If you run `tailscale lock log` on a node where Tailnet Lock isn't + // enabled, you get an error explaining that. + t.Run("log-when-not-enabled", func(t *testing.T) { + tstest.Shard(t) + t.Parallel() + + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + n1.MustUp() + n1.AwaitRunning() + + cmdArgs := []string{"lock", "log"} + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var outBuf, errBuf bytes.Buffer + + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + + if err := cmd.Run(); !isNonZeroExitCode(err) { + t.Fatalf("command did not fail with non-zero exit code: %q", err) + } + + if outBuf.String() != "" { + t.Fatalf("stdout: want '', got %q", outBuf.String()) + } + + wantErr := "Tailnet Lock is not enabled\n" + if errBuf.String() != wantErr { + t.Fatalf("stderr: want %q, got %q", wantErr, errBuf.String()) + } + }) +} From 4673992b96603fbc1de370af7a6b3a1a68205d0b Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 15:06:55 +0100 Subject: [PATCH 1566/1708] tka: created a shared testing library for Chonk This patch creates a set of tests that should be true for all implementations of Chonk and CompactableChonk, which we can share with the SQLite implementation in corp. It includes all the existing tests, plus a test for LastActiveAncestor which was in corp but not in oss. Updates https://github.com/tailscale/corp/issues/33465 Signed-off-by: Alex Chan --- tka/tailchonk_test.go | 213 ++---------------------- tstest/chonktest/chonktest.go | 256 +++++++++++++++++++++++++++++ tstest/chonktest/tailchonk_test.go | 53 ++++++ 3 files changed, 322 insertions(+), 200 deletions(-) create mode 100644 tstest/chonktest/chonktest.go create mode 100644 tstest/chonktest/tailchonk_test.go diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index cf6ea203b..086865980 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -5,7 +5,6 @@ package tka import ( "bytes" - "fmt" "os" "path/filepath" "sync" @@ -18,6 +17,13 @@ import ( "tailscale.com/util/must" ) +// This package has implementation-specific tests for Mem and FS. +// +// We also have tests for the Chonk interface in `chonktest`, which exercises +// both Mem and FS. Those tests are in a separate package so they can be shared +// with other repos; we don't call the shared test helpers from this package +// to avoid creating a circular dependency. + // randHash derives a fake blake2s hash from the test name // and the given seed. func randHash(t *testing.T, seed int64) [blake2s.Size]byte { @@ -31,103 +37,8 @@ func TestImplementsChonk(t *testing.T) { t.Logf("chonks: %v", impls) } -func TestTailchonk_ChildAUMs(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - parentHash := randHash(t, 1) - data := []AUM{ - { - MessageKind: AUMRemoveKey, - KeyID: []byte{1, 2}, - PrevAUMHash: parentHash[:], - }, - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - - if err := chonk.CommitVerifiedAUMs(data); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - stored, err := chonk.ChildAUMs(parentHash) - if err != nil { - t.Fatalf("ChildAUMs failed: %v", err) - } - if diff := cmp.Diff(data, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - -func TestTailchonk_AUMMissing(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - var notExists AUMHash - notExists[:][0] = 42 - if _, err := chonk.AUM(notExists); err != os.ErrNotExist { - t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) - } - }) - } -} - -func TestTailchonk_ReadChainFromHead(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - // t.Logf("genesis hash = %X", genesis.Hash()) - // t.Logf("intermediate hash = %X", intermediate.Hash()) - // t.Logf("leaf hash = %X", leaf.Hash()) - - // Read the chain from the leaf backwards. - gotLeafs, err := chonk.Heads() - if err != nil { - t.Fatalf("Heads failed: %v", err) - } - if diff := cmp.Diff([]AUM{leaf}, gotLeafs); diff != "" { - t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) - } - - parent, _ := gotLeafs[0].Parent() - gotIntermediate, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { - t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) - } - - parent, _ = gotIntermediate.Parent() - gotGenesis, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(genesis, gotGenesis); diff != "" { - t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - func TestTailchonkFS_Commit(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -156,7 +67,7 @@ func TestTailchonkFS_Commit(t *testing.T) { } func TestTailchonkFS_CommitTime(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -172,108 +83,6 @@ func TestTailchonkFS_CommitTime(t *testing.T) { } } -func TestTailchonkFS_PurgeAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - parentHash := randHash(t, 1) - aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} - - if err := chonk.CommitVerifiedAUMs([]AUM{aum}); err != nil { - t.Fatal(err) - } - if err := chonk.PurgeAUMs([]AUMHash{aum.Hash()}); err != nil { - t.Fatal(err) - } - - if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { - t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) - } - - info, err := chonk.get(aum.Hash()) - if err != nil { - t.Fatal(err) - } - if info.PurgedUnix == 0 { - t.Errorf("recently-created AUM PurgedUnix = %d, want non-zero", info.PurgedUnix) - } -} - -func hashesLess(x, y AUMHash) bool { - return bytes.Compare(x[:], y[:]) < 0 -} - -func aumHashesLess(x, y AUM) bool { - return hashesLess(x.Hash(), y.Hash()) -} - -func TestTailchonkFS_AllAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - hashes, err := chonk.AllAUMs() - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff([]AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { - t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) - } -} - -func TestTailchonkFS_ChildAUMsOfPurgedAUM(t *testing.T) { - chonk := &FS{base: t.TempDir()} - parent := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{0, 0}} - - parentHash := parent.Hash() - - child1 := AUM{MessageKind: AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} - child2 := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} - child3 := AUM{MessageKind: AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} - - child2Hash := child2.Hash() - grandchild2A := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} - grandchild2B := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} - - commitSet := []AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} - - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - // Check the set of hashes is correct - childHashes := must.Get(chonk.ChildAUMs(parentHash)) - if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { - t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) - } - - // Purge the parent AUM, and check the set of child AUMs is unchanged - chonk.PurgeAUMs([]AUMHash{parent.Hash()}) - - childHashes = must.Get(chonk.ChildAUMs(parentHash)) - if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { - t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) - } - - // Now purge one of the child AUMs, and check it no longer appears as a child of the parent - chonk.PurgeAUMs([]AUMHash{child3.Hash()}) - - childHashes = must.Get(chonk.ChildAUMs(parentHash)) - if diff := cmp.Diff([]AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { - t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) - } -} - func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM @@ -630,6 +439,10 @@ func (c *compactingChonkFake) CommitTime(hash AUMHash) (time.Time, error) { return c.aumAge[hash], nil } +func hashesLess(x, y AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { c.t.Errorf("deletion set differs (-want, +got):\n%s", diff) diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go new file mode 100644 index 000000000..bfe394b28 --- /dev/null +++ b/tstest/chonktest/chonktest.go @@ -0,0 +1,256 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package chonktest contains a shared set of tests for the Chonk +// interface used to store AUM messages in Tailnet Lock, which we can +// share between different implementations. +package chonktest + +import ( + "bytes" + "encoding/binary" + "math/rand" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/crypto/blake2s" + "tailscale.com/tka" + "tailscale.com/util/must" +) + +// returns a random source based on the test name + extraSeed. +func testingRand(t *testing.T, extraSeed int64) *rand.Rand { + var seed int64 + if err := binary.Read(bytes.NewBuffer([]byte(t.Name())), binary.LittleEndian, &seed); err != nil { + panic(err) + } + return rand.New(rand.NewSource(seed + extraSeed)) +} + +// randHash derives a fake blake2s hash from the test name +// and the given seed. +func randHash(t *testing.T, seed int64) [blake2s.Size]byte { + var out [blake2s.Size]byte + testingRand(t, seed).Read(out[:]) + return out +} + +func hashesLess(x, y tka.AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + +func aumHashesLess(x, y tka.AUM) bool { + return hashesLess(x.Hash(), y.Hash()) +} + +// RunChonkTests is a set of tests for the behaviour of a Chonk. +// +// Any implementation of Chonk should pass these tests, so we know all +// Chonks behave in the same way. If you want to test behaviour that's +// specific to one implementation, write a separate test. +func RunChonkTests(t *testing.T, newChonk func(*testing.T) tka.Chonk) { + t.Run("ChildAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + stored, err := chonk.ChildAUMs(parentHash) + if err != nil { + t.Fatalf("ChildAUMs failed: %v", err) + } + if diff := cmp.Diff(data, stored, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Errorf("stored AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("AUMMissing", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + var notExists tka.AUMHash + notExists[:][0] = 42 + if _, err := chonk.AUM(notExists); err != os.ErrNotExist { + t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) + } + }) + + t.Run("ReadChainFromHead", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + t.Logf("genesis hash = %X", genesis.Hash()) + t.Logf("intermediate hash = %X", intermediate.Hash()) + t.Logf("leaf hash = %X", leaf.Hash()) + + // Read the chain from the leaf backwards. + gotLeafs, err := chonk.Heads() + if err != nil { + t.Fatalf("Heads failed: %v", err) + } + if diff := cmp.Diff([]tka.AUM{leaf}, gotLeafs); diff != "" { + t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) + } + + parent, _ := gotLeafs[0].Parent() + gotIntermediate, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { + t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) + } + + parent, _ = gotIntermediate.Parent() + gotGenesis, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(genesis, gotGenesis); diff != "" { + t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("LastActiveAncestor", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + + aum := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + hash := aum.Hash() + + if err := chonk.SetLastActiveAncestor(hash); err != nil { + t.Fatal(err) + } + got, err := chonk.LastActiveAncestor() + if err != nil { + t.Fatal(err) + } + if got == nil || hash.String() != got.String() { + t.Errorf("LastActiveAncestor=%s, want %s", got, hash) + } + }) +} + +// RunCompactableChonkTests is a set of tests for the behaviour of a +// CompactableChonk. +// +// Any implementation of CompactableChonk should pass these tests, so we +// know all CompactableChonk behave in the same way. If you want to test +// behaviour that's specific to one implementation, write a separate test. +func RunCompactableChonkTests(t *testing.T, newChonk func(t *testing.T) tka.CompactableChonk) { + t.Run("PurgeAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + aum := tka.AUM{MessageKind: tka.AUMNoOp, PrevAUMHash: parentHash[:]} + + if err := chonk.CommitVerifiedAUMs([]tka.AUM{aum}); err != nil { + t.Fatal(err) + } + if err := chonk.PurgeAUMs([]tka.AUMHash{aum.Hash()}); err != nil { + t.Fatal(err) + } + + if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { + t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) + } + }) + + t.Run("AllAUMs", func(t *testing.T) { + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + hashes, err := chonk.AllAUMs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff([]tka.AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { + t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) + } + }) + + t.Run("ChildAUMsOfPurgedAUM", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parent := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{0, 0}} + + parentHash := parent.Hash() + + child1 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} + child2 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} + child3 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} + + child2Hash := child2.Hash() + grandchild2A := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + grandchild2B := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + + commitSet := []tka.AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} + + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check the set of hashes is correct + childHashes := must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Purge the parent AUM, and check the set of child AUMs is unchanged + chonk.PurgeAUMs([]tka.AUMHash{parent.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Now purge one of the child AUMs, and check it no longer appears as a child of the parent + chonk.PurgeAUMs([]tka.AUMHash{child3.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + }) +} diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go new file mode 100644 index 000000000..ce6b04324 --- /dev/null +++ b/tstest/chonktest/tailchonk_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package chonktest + +import ( + "testing" + + "tailscale.com/tka" + "tailscale.com/util/must" +) + +func TestImplementsChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.Chonk + }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.Chonk { + return &tka.Mem{} + }, + }, + { + name: "FS", + newChonk: func(t *testing.T) tka.Chonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunChonkTests(t, tt.newChonk) + }) + } +} + +func TestImplementsCompactableChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.CompactableChonk + }{ + { + name: "FS", + newChonk: func(t *testing.T) tka.CompactableChonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunCompactableChonkTests(t, tt.newChonk) + }) + } +} From 4e1c270f9016040da064d474db4fca299cdea7ea Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 20 Oct 2025 15:03:03 +0000 Subject: [PATCH 1567/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/windows.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/licenses/windows.md b/licenses/windows.md index f6704cf32..b284aa136 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -42,7 +42,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/963e260a8227/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) From bf47d8e72ba672fd0f2bcc5888b01876be80e138 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 20 Oct 2025 11:04:07 -0500 Subject: [PATCH 1568/1708] VERSION.txt: this is v1.91.0 Signed-off-by: Nick Khyl --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 636ea711a..6979a6c06 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.89.0 +1.91.0 From 3dde233cd3aed75f610b63ea33ab1baa9198c81b Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 20 Oct 2025 12:22:16 -0700 Subject: [PATCH 1569/1708] ipn/ipnlocal: use eventbus.SubscribeFunc in LocalBackend (#17524) This does not change which subscriptions are made, it only swaps them to use the SubscribeFunc API instead of Subscribe. Updates #15160 Updates #17487 Change-Id: Id56027836c96942206200567a118f8bcf9c07f64 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 119 ++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 75 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 36e4ad8a5..ee3059de4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -180,13 +180,13 @@ var ( // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelCauseFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System - eventSubs eventbus.Monitor + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + eventClient *eventbus.Client health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -589,74 +589,44 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // Start the event bus late, once all the assignments above are done. // (See previous race in tailscale/tailscale#17252) ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") - b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + b.eventClient = ec + eventbus.SubscribeFunc(ec, b.onClientVersion) + eventbus.SubscribeFunc(ec, func(au controlclient.AutoUpdate) { + b.onTailnetDefaultAutoUpdate(au.Value) + }) + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { b.linkChange(&cd) }) + if buildfeatures.HasHealth { + eventbus.SubscribeFunc(ec, b.onHealthChange) + } + if buildfeatures.HasPortList { + eventbus.SubscribeFunc(ec, b.setPortlistServices) + } + eventbus.SubscribeFunc(ec, b.onAppConnectorRouteUpdate) + eventbus.SubscribeFunc(ec, b.onAppConnectorStoreRoutes) return b, nil } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { - clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) - autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) - - var healthChange <-chan health.Change - if buildfeatures.HasHealth { - healthChangeSub := eventbus.Subscribe[health.Change](ec) - healthChange = healthChangeSub.Events() +func (b *LocalBackend) onAppConnectorRouteUpdate(ru appctype.RouteUpdate) { + // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under + // one profile to arrive and be applied after a switch to another profile. + // We need to find a way to ensure that changes to the backend state are applied + // consistently in the presnce of profile changes, which currently may not happen in + // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) } - changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) - routeUpdateSub := eventbus.Subscribe[appctype.RouteUpdate](ec) - storeRoutesSub := eventbus.Subscribe[appctype.RouteInfo](ec) - - var portlist <-chan PortlistServices - if buildfeatures.HasPortList { - portlistSub := eventbus.Subscribe[PortlistServices](ec) - portlist = portlistSub.Events() + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) } +} - return func(ec *eventbus.Client) { - for { - select { - case <-ec.Done(): - return - case clientVersion := <-clientVersionSub.Events(): - b.onClientVersion(&clientVersion) - case au := <-autoUpdateSub.Events(): - b.onTailnetDefaultAutoUpdate(au.Value) - case change := <-healthChange: - b.onHealthChange(change) - case changeDelta := <-changeDeltaSub.Events(): - b.linkChange(&changeDelta) - - case pl := <-portlist: - if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans - b.setPortlistServices(pl) - } - case ru := <-routeUpdateSub.Events(): - // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under - // one profile to arrive and be applied after a switch to another profile. - // We need to find a way to ensure that changes to the backend state are applied - // consistently in the presnce of profile changes, which currently may not happen in - // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 - if err := b.AdvertiseRoute(ru.Advertise...); err != nil { - b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) - } - if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { - b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) - } - case ri := <-storeRoutesSub.Events(): - // Whether or not routes should be stored can change over time. - shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() - if shouldStoreRoutes { - if err := b.storeRouteInfo(ri); err != nil { - b.logf("appc: failed to store route info: %v", err) - } - } - } +func (b *LocalBackend) onAppConnectorStoreRoutes(ri appctype.RouteInfo) { + // Whether or not routes should be stored can change over time. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if shouldStoreRoutes { + if err := b.storeRouteInfo(ri); err != nil { + b.logf("appc: failed to store route info: %v", err) } } } @@ -1107,13 +1077,12 @@ func (b *LocalBackend) ClearCaptureSink() { // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { - // Close the [eventbus.Client] and wait for LocalBackend.consumeEventbusTopics - // to return. Do this before acquiring b.mu: - // 1. LocalBackend.consumeEventbusTopics event handlers also acquire b.mu, - // they can deadlock with c.Shutdown(). - // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against - // undesirable post/in-progress LocalBackend.Shutdown() behaviors. - b.eventSubs.Close() + // Close the [eventbus.Client] to wait for subscribers to + // return before acquiring b.mu: + // 1. Event handlers also acquire b.mu, they can deadlock with c.Shutdown(). + // 2. Event handlers may not guard against undesirable post/in-progress + // LocalBackend.Shutdown() behaviors. + b.eventClient.Close() b.em.close() From ab435ce3a6164033d976a6b8ab7d4bc1b83d3acb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 20 Oct 2025 15:24:39 -0400 Subject: [PATCH 1570/1708] client/systray: warn users launching the application with sudo (#17595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If users start the application with sudo, DBUS is likely not available or will not have the correct endpoints. We want to warn users when doing this. Closes #17593 Signed-off-by: Claus Lensbøl --- client/systray/systray.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index 4ac080588..518b2e989 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -158,6 +158,18 @@ func init() { // onReady is called by the systray package when the menu is ready to be built. func (menu *Menu) onReady() { log.Printf("starting") + if os.Getuid() == 0 || os.Getuid() != os.Geteuid() || os.Getenv("SUDO_USER") != "" || os.Getenv("DOAS_USER") != "" { + fmt.Fprintln(os.Stderr, ` +It appears that you might be running the systray with sudo/doas. +This can lead to issues with D-Bus, and should be avoided. + +The systray application should be run with the same user as your desktop session. +This usually means that you should run the application like: + +tailscale systray + +See https://tailscale.com/kb/1597/linux-systray for more information.`) + } setAppIcon(disconnected) menu.rebuild() From 675b1c6d542f71eee5dd20808a7e1aebce945580 Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Mon, 20 Oct 2025 15:36:31 -0500 Subject: [PATCH 1571/1708] cmd/tailscale/cli: error when advertising a Service from an untagged node (#17577) Service hosts must be tagged nodes, meaning it is only valid to advertise a Service from a machine which has at least one ACL tag. Fixes tailscale/corp#33197 Signed-off-by: Harry Harpham --- cmd/tailscale/cli/serve_legacy_test.go | 6 ++- cmd/tailscale/cli/serve_v2.go | 4 ++ cmd/tailscale/cli/serve_v2_test.go | 58 +++++++++++++++++++++++--- 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index c509508df..1d3854b0b 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -860,6 +860,7 @@ type fakeLocalServeClient struct { setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs + statusWithoutPeers *ipnstate.Status // nil for fakeStatus } // fakeStatus is a fake ipnstate.Status value for tests. @@ -880,7 +881,10 @@ var fakeStatus = &ipnstate.Status{ } func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { - return fakeStatus, nil + if lc.statusWithoutPeers == nil { + return fakeStatus, nil + } + return lc.statusWithoutPeers, nil } func (lc *fakeLocalServeClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 9b0af2cad..ca0497f8d 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -420,6 +420,10 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { svcName = e.service dnsName = e.service.String() } + tagged := st.Self.Tags != nil && st.Self.Tags.Len() > 0 + if forService && !tagged && !turnOff { + return errors.New("service hosts must be tagged nodes") + } if !forService && srvType == serveTypeTUN { return errors.New("tun mode is only supported for services") } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 1deeaf3ea..f9653253a 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -22,6 +22,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/views" ) func TestServeDevConfigMutations(t *testing.T) { @@ -33,10 +34,11 @@ func TestServeDevConfigMutations(t *testing.T) { } // group is a group of steps that share the same - // config mutation, but always starts from an empty config + // config mutation type group struct { - name string - steps []step + name string + steps []step + initialState fakeLocalServeClient // use the zero value for empty config } // creaet a temporary directory for path-based destinations @@ -814,17 +816,58 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, + { + name: "advertise_service", + initialState: fakeLocalServeClient{ + statusWithoutPeers: &ipnstate.Status{ + BackendState: ipn.Running.String(), + Self: &ipnstate.PeerStatus{ + DNSName: "foo.test.ts.net", + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrFunnel: nil, + tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil, + }, + Tags: ptrToReadOnlySlice([]string{"some-tag"}), + }, + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + }, + steps: []step{{ + command: cmd("serve --service=svc:foo --http=80 text:foo"), + want: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Text: "foo"}, + }}, + }, + }, + }, + }, + }}, + }, + { + name: "advertise_service_from_untagged_node", + steps: []step{{ + command: cmd("serve --service=svc:foo --http=80 text:foo"), + wantErr: anyErr(), + }}, + }, } for _, group := range groups { t.Run(group.name, func(t *testing.T) { - lc := &fakeLocalServeClient{} + lc := group.initialState for i, st := range group.steps { var stderr bytes.Buffer var stdout bytes.Buffer var flagOut bytes.Buffer e := &serveEnv{ - lc: lc, + lc: &lc, testFlagOut: &flagOut, testStdout: &stdout, testStderr: &stderr, @@ -2249,3 +2292,8 @@ func exactErrMsg(want error) func(error) string { return fmt.Sprintf("\ngot: %v\nwant: %v\n", got, want) } } + +func ptrToReadOnlySlice[T any](s []T) *views.Slice[T] { + vs := views.SliceOf(s) + return &vs +} From 3944809a118153b83aa0a606e515e20b6fe6190b Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 09:52:23 +0100 Subject: [PATCH 1572/1708] .github/workflows: pin the google/oss-fuzz GitHub Actions Updates https://github.com/tailscale/corp/issues/31017 Signed-off-by: Alex Chan --- .github/workflows/test.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c3aa4f1bc..b6d41e937 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -613,7 +613,9 @@ jobs: steps: - name: build fuzzers id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + # As of 21 October 2025, this repo doesn't tag releases, so this commit + # hash is just the tip of master. + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264 # continue-on-error makes steps.build.conclusion be 'success' even if # steps.build.outcome is 'failure'. This means this step does not # contribute to the job's overall pass/fail evaluation. @@ -643,7 +645,9 @@ jobs: # report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong # value. if: steps.build.outcome == 'success' - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + # As of 21 October 2025, this repo doesn't tag releases, so this commit + # hash is just the tip of master. + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264 with: oss-fuzz-project-name: 'tailscale' fuzz-seconds: 150 From 2b448f0696006b76d1d4cfa227472d0153782445 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 20 Oct 2025 18:12:25 +0100 Subject: [PATCH 1573/1708] ipn, tka: improve the logging around TKA sync and AUM errors * When we do the TKA sync, log whether TKA is enabled and whether we want it to be enabled. This would help us see if a node is making bootstrap errors. * When we fail to look up an AUM locally, log the ID of the AUM rather than a generic "file does not exist" error. These AUM IDs are cryptographic hashes of the TKA state, which itself just contains public keys and signatures. These IDs aren't sensitive and logging them is safe. Signed-off-by: Alex Chan Updates https://github.com/tailscale/corp/issues/33594 --- ipn/ipnlocal/network-lock.go | 11 ++++++----- tka/builder.go | 2 +- tka/tailchonk.go | 10 +++++----- tka/tka.go | 10 +++++----- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 499082445..6acb9fe1d 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -288,8 +288,11 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } - if b.tka != nil || nm.TKAEnabled { - b.logf("tkaSyncIfNeeded: enabled=%v, head=%v", nm.TKAEnabled, nm.TKAHead) + isEnabled := b.tka != nil + wantEnabled := nm.TKAEnabled + + if isEnabled || wantEnabled { + b.logf("tkaSyncIfNeeded: isEnabled=%t, wantEnabled=%t, head=%v", isEnabled, wantEnabled, nm.TKAHead) } ourNodeKey, ok := prefs.Persist().PublicNodeKeyOK() @@ -297,8 +300,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return errors.New("tkaSyncIfNeeded: no node key in prefs") } - isEnabled := b.tka != nil - wantEnabled := nm.TKAEnabled didJustEnable := false if isEnabled != wantEnabled { var ourHead tka.AUMHash @@ -948,7 +949,7 @@ func (b *LocalBackend) NetworkLockLog(maxEntries int) ([]ipnstate.NetworkLockUpd if err == os.ErrNotExist { break } - return out, fmt.Errorf("reading AUM: %w", err) + return out, fmt.Errorf("reading AUM (%v): %w", cursor, err) } update := ipnstate.NetworkLockUpdate{ diff --git a/tka/builder.go b/tka/builder.go index 642f39d77..199cec06d 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -136,7 +136,7 @@ func (b *UpdateBuilder) Finalize(storage Chonk) ([]AUM, error) { needCheckpoint = false break } - return nil, fmt.Errorf("reading AUM: %v", err) + return nil, fmt.Errorf("reading AUM (%v): %v", cursor, err) } if aum.MessageKind == AUMCheckpoint { diff --git a/tka/tailchonk.go b/tka/tailchonk.go index cb683c273..d01c5826e 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -596,7 +596,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // We've reached the end of the chain we have stored. return h, nil } - return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d): %w", i, err) + return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d, %v): %w", i, parent, err) } } @@ -616,7 +616,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor") } if next, err = storage.AUM(parent); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } } @@ -632,7 +632,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // We've reached the end of the chain we have stored. break } - return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate): %w", err) + return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate, %v): %w", parent, err) } } @@ -744,7 +744,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState if didAdjustCandidateAncestor { var next AUM if next, err = storage.AUM(candidateAncestor); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", candidateAncestor, err) } for { @@ -760,7 +760,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate candidateAncestor") } if next, err = storage.AUM(parent); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } } } diff --git a/tka/tka.go b/tka/tka.go index 234c87fe1..c37c39754 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -102,14 +102,14 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int iterAgain = false for j := range candidates { - parent, hasParent := candidates[j].Oldest.Parent() + parentHash, hasParent := candidates[j].Oldest.Parent() if hasParent { - parent, err := storage.AUM(parent) + parent, err := storage.AUM(parentHash) if err != nil { if err == os.ErrNotExist { continue } - return nil, fmt.Errorf("reading parent: %v", err) + return nil, fmt.Errorf("reading parent %s: %v", parentHash, err) } candidates[j].Oldest = parent if lastKnownOldest != nil && *lastKnownOldest == parent.Hash() { @@ -210,7 +210,7 @@ func fastForwardWithAdvancer( } nextAUM, err := storage.AUM(*startState.LastAUMHash) if err != nil { - return AUM{}, State{}, fmt.Errorf("reading next: %v", err) + return AUM{}, State{}, fmt.Errorf("reading next (%v): %v", *startState.LastAUMHash, err) } curs := nextAUM @@ -297,7 +297,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // If we got here, the current state is dependent on the previous. // Keep iterating backwards till thats not the case. if curs, err = storage.AUM(parent); err != nil { - return State{}, fmt.Errorf("reading parent: %v", err) + return State{}, fmt.Errorf("reading parent (%v): %v", parent, err) } } From 23359dc72706b7d9f32dcd428f22f5e4fdbfc4b7 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 11:07:33 +0100 Subject: [PATCH 1574/1708] tka: don't try to read AUMs which are partway through being written Fixes https://github.com/tailscale/tailscale/issues/17600 Signed-off-by: Alex Chan --- tka/tailchonk.go | 10 +++++++++- tka/tailchonk_test.go | 44 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index d01c5826e..7750b0622 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -9,6 +9,7 @@ import ( "bytes" "errors" "fmt" + "log" "os" "path/filepath" "slices" @@ -403,9 +404,16 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error { return fmt.Errorf("reading prefix dir: %v", err) } for _, file := range files { + // Ignore files whose names aren't valid AUM hashes, which may be + // temporary files which are partway through being written, or other + // files added by the OS (like .DS_Store) which we can ignore. + // TODO(alexc): it might be useful to append a suffix like `.aum` to + // filenames, so we can more easily distinguish between AUMs and + // arbitrary other files. var h AUMHash if err := h.UnmarshalText([]byte(file.Name())); err != nil { - return fmt.Errorf("invalid aum file: %s: %w", file.Name(), err) + log.Printf("ignoring unexpected non-AUM: %s: %v", file.Name(), err) + continue } info, err := c.get(h) if err != nil { diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 086865980..1a6bad459 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -7,6 +7,7 @@ import ( "bytes" "os" "path/filepath" + "slices" "sync" "testing" "time" @@ -83,6 +84,49 @@ func TestTailchonkFS_CommitTime(t *testing.T) { } } +// If we were interrupted while writing a temporary file, AllAUMs() +// should ignore it when scanning the AUM directory. +func TestTailchonkFS_IgnoreTempFile(t *testing.T) { + base := t.TempDir() + chonk := must.Get(ChonkDir(base)) + parentHash := randHash(t, 1) + aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} + must.Do(chonk.CommitVerifiedAUMs([]AUM{aum})) + + writeAUMFile := func(filename, contents string) { + t.Helper() + if err := os.MkdirAll(filepath.Join(base, filename[0:2]), os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(base, filename[0:2], filename), []byte(contents), 0600); err != nil { + t.Fatal(err) + } + } + + // Check that calling AllAUMs() returns the single committed AUM + got, err := chonk.AllAUMs() + if err != nil { + t.Fatalf("AllAUMs() failed: %v", err) + } + want := []AUMHash{aum.Hash()} + if !slices.Equal(got, want) { + t.Fatalf("AllAUMs() is wrong: got %v, want %v", got, want) + } + + // Write some temporary files which are named like partially-committed AUMs, + // then check that AllAUMs() only returns the single committed AUM. + writeAUMFile("AUM1234.tmp", "incomplete AUM\n") + writeAUMFile("AUM1234.tmp_123", "second incomplete AUM\n") + + got, err = chonk.AllAUMs() + if err != nil { + t.Fatalf("AllAUMs() failed: %v", err) + } + if !slices.Equal(got, want) { + t.Fatalf("AllAUMs() is wrong: got %v, want %v", got, want) + } +} + func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM From c59c859f7d3fa1ace1427421026f5f1b6efb9b6f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 14:01:40 +0100 Subject: [PATCH 1575/1708] tsconsensus: mark several of these tests as known flaky Updates https://github.com/tailscale/tailscale/issues/15627 Signed-off-by: Alex Chan --- tsconsensus/tsconsensus_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 3b51a093f..17f3d881f 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -251,6 +251,7 @@ func warnLogConfig() Config { } func TestStart(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) control, controlURL := startControl(t) ctx := context.Background() @@ -371,6 +372,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string } func TestApply(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -435,6 +437,7 @@ func assertCommandsWorkOnAnyNode(t testing.TB, participants []*participant) { } func TestConfig(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -474,6 +477,7 @@ func TestConfig(t *testing.T) { } func TestFollowerFailover(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -545,6 +549,7 @@ func TestFollowerFailover(t *testing.T) { } func TestRejoin(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" From c2d62d25c657c62785a0e8d06a598932fe48e6c6 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Tue, 21 Oct 2025 16:31:54 -0700 Subject: [PATCH 1576/1708] CODE_OF_CONDUCT: convert to semantic line breaks This reformats the existing text to have line breaks at sentences. This commit contains no textual changes to the code of conduct, but is done to make any subsequent changes easier to review. (sembr.org) Also apply prettier formatting for consistency. Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- CODE_OF_CONDUCT.md | 149 ++++++++++++++++----------------------------- 1 file changed, 51 insertions(+), 98 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index a5877cb11..51ffb60ab 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,146 +2,99 @@ ## Our Pledge -We are committed to creating an open, welcoming, diverse, inclusive, -healthy and respectful community. +We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community. ## Our Standards -Examples of behavior that contributes to a positive environment for our -community include: -* Demonstrating empathy and kindness toward other people. -* Being respectful of differing opinions, viewpoints, and experiences. -* Giving and gracefully accepting constructive feedback. -* Accepting responsibility and apologizing to those affected by our - mistakes, and learning from the experience. -* Focusing on what is best not just for us as individuals, but for the - overall community. +Examples of behavior that contributes to a positive environment for our community include: + +- Demonstrating empathy and kindness toward other people. +- Being respectful of differing opinions, viewpoints, and experiences. +- Giving and gracefully accepting constructive feedback. +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience. +- Focusing on what is best not just for us as individuals, but for the overall community. Examples of unacceptable behavior include without limitation: -* The use of sexualized language or imagery, and sexual attention or - advances of any kind. -* The use of violent, intimidating or bullying language or imagery. -* Trolling, insulting or derogatory comments, and personal or - political attacks. -* Public or private harassment. -* Publishing others' private information, such as a physical or email - address, without their explicit permission. -* Spamming community channels and members, such as sending repeat messages, - low-effort content, or automated messages. -* Phishing or any similar activity; -* Distributing or promoting malware; -* Other conduct which could reasonably be considered inappropriate in a - professional setting. - -Please also see the Tailscale Acceptable Use Policy, available at -[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). - -# Reporting Incidents - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported to Tailscale directly via info@tailscale.com, or to -the community leaders or moderators via DM or similar. + +- The use of sexualized language or imagery, and sexual attention or advances of any kind. +- The use of violent, intimidating or bullying language or imagery. +- Trolling, insulting or derogatory comments, and personal or political attacks. +- Public or private harassment. +- Publishing others' private information, such as a physical or email address, without their explicit permission. +- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages. +- Phishing or any similar activity; +- Distributing or promoting malware; +- Other conduct which could reasonably be considered inappropriate in a professional setting. + +Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). + +## Reporting Incidents + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via , or to the community leaders or moderators via DM or similar. All complaints will be reviewed and investigated promptly and fairly. We will respect the privacy and safety of the reporter of any issues. -Please note that this community is not moderated by staff 24/7, and we -do not have, and do not undertake, any obligation to prescreen, monitor, -edit, or remove any content or data, or to actively seek facts or -circumstances indicating illegal activity. While we strive to keep the -community safe and welcoming, moderation may not be immediate at all hours. +Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity. +While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours. If you encounter any issues, report them using the appropriate channels. ## Enforcement -Community leaders and moderators are responsible for clarifying and -enforcing our standards of acceptable behavior and will take appropriate -and fair corrective action in response to any behavior that they deem -inappropriate, threatening, offensive, or harmful. +Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. -Community leaders and moderators have the right and responsibility to remove, -edit, or reject comments, commits, code, wiki edits, issues, and other -contributions that are not aligned to this Community Code of Conduct. -Tailscale retains full discretion to take action (or not) in response -to a violation of these guidelines with or without notice or liability -to you. We will interpret our policies and resolve disputes in favor of -protecting users, customers, the public, our community and our company, -as a whole. +Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct. +Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you. +We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole. ## Enforcement Guidelines -Community leaders will follow these Community Impact Guidelines in -determining the consequences for any action they deem in violation of -this Code of Conduct: +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction -Community Impact: Use of inappropriate language or other behavior -deemed unprofessional or unwelcome in the community. +Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. -Consequence: A private, written warning from community leaders, -providing clarity around the nature of the violation and an -explanation of why the behavior was inappropriate. A public apology -may be requested. +Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. +A public apology may be requested. ### 2. Warning -Community Impact: A violation through a single incident or series -of actions. +Community Impact: A violation through a single incident or series of actions. -Consequence: A warning with consequences for continued -behavior. No interaction with the people involved, including -unsolicited interaction with those enforcing this Community Code of Conduct, -for a specified period of time. This includes avoiding interactions in -community spaces as well as external channels like social -media. Violating these terms may lead to a temporary or permanent ban. +Consequence: A warning with consequences for continued behavior. +No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time. +This includes avoiding interactions in community spaces as well as external channels like social media. +Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban -Community Impact: A serious violation of community standards, -including sustained inappropriate behavior. +Community Impact: A serious violation of community standards, including sustained inappropriate behavior. -Consequence: A temporary ban from any sort of interaction or -public communication with the community for a specified period of -time. No public or private interaction with the people involved, -including unsolicited interaction with those enforcing the Code of Conduct, -is allowed during this period. Violating these terms may lead to a permanent ban. +Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. +No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban -Community Impact: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of -an individual, or aggression toward or disparagement of -classes of individuals. +Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. -Consequence: A permanent ban from any sort of public interaction -within the community. +Consequence: A permanent ban from any sort of public interaction within the community. ## Acceptable Use Policy -Violation of this Community Code of Conduct may also violate the -Tailscale Acceptable Use Policy, which may result in suspension or -termination of your Tailscale account. For more information, please -see the Tailscale Acceptable Use Policy, available at -[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). +Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account. +For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). ## Privacy -Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) -for more information about how Tailscale collects, uses, discloses and protects -information. +Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. ## Attribution -This Code of Conduct is adapted from the [Contributor -Covenant][homepage], version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at . -Community Impact Guidelines were inspired by [Mozilla's code of -conduct enforcement ladder](https://github.com/mozilla/diversity). +Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org -For answers to common questions about this code of conduct, see the -FAQ at https://www.contributor-covenant.org/faq. Translations are -available at https://www.contributor-covenant.org/translations. - +For answers to common questions about this code of conduct, see the FAQ at . +Translations are available at . From afaa23c3b4c5fcbb7a62d42831a5b7e55e30eeac Mon Sep 17 00:00:00 2001 From: Will Norris Date: Tue, 21 Oct 2025 16:44:22 -0700 Subject: [PATCH 1577/1708] CODE_OF_CONDUCT: update document title Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- CODE_OF_CONDUCT.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 51ffb60ab..ef68d6768 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ -# Contributor Covenant Code of Conduct +# Tailscale Community Code of Conduct ## Our Pledge @@ -86,7 +86,7 @@ For more information, please see the Tailscale Acceptable Use Policy, available ## Privacy -Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. +Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. ## Attribution From 36ad24b20fcfa0b625516e6d5501972e640193bf Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 23 Oct 2025 14:56:56 -0700 Subject: [PATCH 1578/1708] feature/tpm: check TPM family data for compatibility (#17624) Check that the TPM we have opened is advertised as a 2.0 family device before using it for state sealing / hardware attestation. Updates #17622 Signed-off-by: Patrick O'Doherty --- feature/tpm/tpm.go | 8 ++++---- feature/tpm/tpm_test.go | 13 +++++++++++++ ipn/ipnlocal/c2n_test.go | 1 + tailcfg/tailcfg.go | 4 ++++ 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 6acb600ec..64a702bd9 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -55,12 +55,11 @@ func init() { } func tpmSupported() bool { - tpm, err := open() - if err != nil { + hi := infoOnce() + if hi == nil { return false } - tpm.Close() - return true + return hi.FamilyIndicator == "2.0" } var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") @@ -104,6 +103,7 @@ func info() *tailcfg.TPMInfo { {tpm2.TPMPTVendorTPMType, func(info *tailcfg.TPMInfo, value uint32) { info.Model = int(value) }}, {tpm2.TPMPTFirmwareVersion1, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) << 32 }}, {tpm2.TPMPTFirmwareVersion2, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) }}, + {tpm2.TPMPTFamilyIndicator, toStr(&info.FamilyIndicator)}, } { resp, err := tpm2.GetCapability{ Capability: tpm2.TPMCapTPMProperties, diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index 5401fd5c3..5c0fbafb6 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -133,6 +133,19 @@ func TestStore(t *testing.T) { }) } +func BenchmarkInfo(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + hi := info() + if hi == nil { + b.Fatalf("tpm info error") + } + } + b.StopTimer() +} + func BenchmarkStore(b *testing.B) { skipWithoutTPM(b) b.StopTimer() diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 75a57dee5..95cd5fa69 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -384,6 +384,7 @@ func TestRedactNetmapPrivateKeys(t *testing.T) { f(tailcfg.Service{}, "Port"): false, f(tailcfg.Service{}, "Proto"): false, f(tailcfg.Service{}, "_"): false, + f(tailcfg.TPMInfo{}, "FamilyIndicator"): false, f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, f(tailcfg.TPMInfo{}, "Manufacturer"): false, f(tailcfg.TPMInfo{}, "Model"): false, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index ea4a9d1fa..a95d0559c 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -928,6 +928,10 @@ type TPMInfo struct { // https://trustedcomputinggroup.org/resource/tpm-library-specification/. // Before revision 184, TCG used the "01.83" format for revision 183. SpecRevision int `json:",omitempty"` + + // FamilyIndicator is the TPM spec family, like "2.0". + // Read from TPM_PT_FAMILY_INDICATOR. + FamilyIndicator string `json:",omitempty"` } // Present reports whether a TPM device is present on this machine. From 672b1f0e76c074fbf922bc409f8bd1fdfc8057f3 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 23 Oct 2025 16:48:58 -0700 Subject: [PATCH 1579/1708] feature/tpm: use withSRK to probe TPM availability (#17627) On some platforms e.g. ChromeOS the owner hierarchy might not always be available to us. To avoid stale sealing exceptions later we probe to confirm it's working rather than rely solely on family indicator status. Updates #17622 Signed-off-by: Patrick O'Doherty --- feature/tpm/tpm.go | 17 ++++++++++++++++- feature/tpm/tpm_test.go | 12 ++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 64a702bd9..4b27a241f 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -59,7 +59,22 @@ func tpmSupported() bool { if hi == nil { return false } - return hi.FamilyIndicator == "2.0" + if hi.FamilyIndicator != "2.0" { + return false + } + + tpm, err := open() + if err != nil { + return false + } + defer tpm.Close() + + if err := withSRK(logger.Discard, tpm, func(srk tpm2.AuthHandle) error { + return nil + }); err != nil { + return false + } + return true } var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index 5c0fbafb6..afce570fc 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -146,6 +146,18 @@ func BenchmarkInfo(b *testing.B) { b.StopTimer() } +func BenchmarkTPMSupported(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + if !tpmSupported() { + b.Fatalf("tpmSupported returned false") + } + } + b.StopTimer() +} + func BenchmarkStore(b *testing.B) { skipWithoutTPM(b) b.StopTimer() From 8576a802caabffd5c5e94d614acc8dc954f0a443 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 23 Oct 2025 17:59:25 -0700 Subject: [PATCH 1580/1708] util/linuxfw: fix 32-bit arm regression with iptables This fixes a regression from dd615c8fdd that moved the newIPTablesRunner constructor from a any-Linux-GOARCH file to one that was only amd64 and arm64, thus breaking iptables on other platforms (notably 32-bit "arm", as seen on older Pis running Buster with iptables) Tested by hand on a Raspberry Pi 2 w/ Buster + iptables for now, for lack of automated 32-bit arm tests at the moment. But filed #17629. Fixes #17623 Updates #17629 Change-Id: Iac1a3d78f35d8428821b46f0fed3f3717891c1bd Signed-off-by: Brad Fitzpatrick --- util/linuxfw/iptables.go | 4 +--- util/linuxfw/iptables_disabled.go | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 5bd7c528b..76c5400be 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,9 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && (arm64 || amd64) && !ts_omit_iptables - -// TODO(#8502): add support for more architectures +//go:build linux && !ts_omit_iptables package linuxfw diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go index 8736f8399..538e33647 100644 --- a/util/linuxfw/iptables_disabled.go +++ b/util/linuxfw/iptables_disabled.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !(arm64 || amd64)) || ts_omit_iptables +//go:build linux && ts_omit_iptables package linuxfw From d47c697748ec2cf0d3ca663811b094ec617529cd Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 12:45:03 +0100 Subject: [PATCH 1581/1708] ipn/ipnlocal: skip TKA bootstrap request if Tailnet Lock is unavailable If you run tailscaled without passing a `--statedir`, Tailnet Lock is unavailable -- we don't have a folder to store the AUMs in. This causes a lot of unnecessary requests to bootstrap TKA, because every time the node receives a NetMap with some TKA state, it tries to bootstrap, fetches the bootstrap TKA state from the control plane, then fails with the error: TKA sync error: bootstrap: network-lock is not supported in this configuration, try setting --statedir We can't prevent the error, but we can skip the control plane request that immediately gets dropped on the floor. In local testing, a new node joining a tailnet caused *three* control plane requests which were unused. Updates tailscale/corp#19441 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 6acb9fe1d..f26c81011 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -288,6 +288,10 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } + if err := b.CanSupportNetworkLock(); err != nil { + return err + } + isEnabled := b.tka != nil wantEnabled := nm.TKAEnabled From 7418583e4735ac31bce0d4ba657e488a09ca488a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 24 Oct 2025 12:08:35 -0400 Subject: [PATCH 1582/1708] health: compare warnable codes to avoid errors on release branch (#17637) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This compares the warnings we actually care about and skips the unstable warnings and the changes with no warnings. Fixes #17635 Signed-off-by: Claus Lensbøl --- cmd/derper/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- health/health_test.go | 35 ++++++++++++++++++++++++----- health/warnings.go | 39 +++++++++++++++++---------------- tsconst/health.go | 26 ++++++++++++++++++++++ 5 files changed, 77 insertions(+), 27 deletions(-) create mode 100644 tsconst/health.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index b8dd28e6b..01c278fbd 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -116,7 +116,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/syncs from tailscale.com/cmd/derper+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tka from tailscale.com/client/local+ - LW tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/derp/derpserver diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 96e18db43..224026f25 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -116,7 +116,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tka from tailscale.com/control/controlclient+ - tailscale.com/tsconst from tailscale.com/net/netns + tailscale.com/tsconst from tailscale.com/net/netns+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ diff --git a/health/health_test.go b/health/health_test.go index 607071776..af7d06c8f 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -19,6 +19,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/metrics" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/opt" @@ -739,21 +740,27 @@ func TestControlHealthNotifies(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() - // Expect events at starup, before doing anything else + // Expect events at starup, before doing anything else, skip unstable + // event and no warning event as they show up at different times. synctest.Wait() - if err := eventbustest.ExpectExactly(tw, - eventbustest.Type[Change](), // warming-up - eventbustest.Type[Change](), // is-using-unstable-version - eventbustest.Type[Change](), // not-in-map-poll + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), + CompareWarnableCode(t, tsconst.HealthWarnableNotInMapPoll), + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), ); err != nil { t.Errorf("startup error: %v", err) } // Only set initial state if we need to if len(test.initialState) != 0 { + t.Log("Setting initial state") ht.SetControlHealth(test.initialState) synctest.Wait() - if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableMagicsockReceiveFuncError), + // Skip event with no warnable + CompareWarnableCode(t, tsconst.HealthWarnableNoDERPHome), + ); err != nil { t.Errorf("initial state error: %v", err) } } @@ -771,6 +778,22 @@ func TestControlHealthNotifies(t *testing.T) { } } +func CompareWarnableCode(t *testing.T, code string) func(Change) bool { + t.Helper() + return func(c Change) bool { + t.Helper() + if c.Warnable != nil { + t.Logf("Warnable code: %s", c.Warnable.Code) + if string(c.Warnable.Code) == code { + return true + } + } else { + t.Log("No Warnable") + } + return false + } +} + func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { synctest.Test(t, func(t *testing.T) { bus := eventbustest.NewBus(t) diff --git a/health/warnings.go b/health/warnings.go index 26577130d..a9c4b34a0 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/feature/buildfeatures" + "tailscale.com/tsconst" "tailscale.com/version" ) @@ -26,7 +27,7 @@ This file contains definitions for the Warnables maintained within this `health` // updateAvailableWarnable is a Warnable that warns the user that an update is available. var updateAvailableWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "update-available", + Code: tsconst.HealthWarnableUpdateAvailable, Title: "Update available", Severity: SeverityLow, Text: func(args Args) string { @@ -42,7 +43,7 @@ var updateAvailableWarnable = condRegister(func() *Warnable { // securityUpdateAvailableWarnable is a Warnable that warns the user that an important security update is available. var securityUpdateAvailableWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "security-update-available", + Code: tsconst.HealthWarnableSecurityUpdateAvailable, Title: "Security update available", Severity: SeverityMedium, Text: func(args Args) string { @@ -59,7 +60,7 @@ var securityUpdateAvailableWarnable = condRegister(func() *Warnable { // so they won't be surprised by all the issues that may arise. var unstableWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "is-using-unstable-version", + Code: tsconst.HealthWarnableIsUsingUnstableVersion, Title: "Using an unstable version", Severity: SeverityLow, Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), @@ -69,7 +70,7 @@ var unstableWarnable = condRegister(func() *Warnable { // NetworkStatusWarnable is a Warnable that warns the user that the network is down. var NetworkStatusWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "network-status", + Code: tsconst.HealthWarnableNetworkStatus, Title: "Network down", Severity: SeverityMedium, Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), @@ -81,7 +82,7 @@ var NetworkStatusWarnable = condRegister(func() *Warnable { // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. var IPNStateWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "wantrunning-false", + Code: tsconst.HealthWarnableWantRunningFalse, Title: "Tailscale off", Severity: SeverityLow, Text: StaticMessage("Tailscale is stopped."), @@ -91,7 +92,7 @@ var IPNStateWarnable = condRegister(func() *Warnable { // localLogWarnable is a Warnable that warns the user that the local log is misconfigured. var localLogWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "local-log-config-error", + Code: tsconst.HealthWarnableLocalLogConfigError, Title: "Local log misconfiguration", Severity: SeverityLow, Text: func(args Args) string { @@ -104,7 +105,7 @@ var localLogWarnable = condRegister(func() *Warnable { // and provides the last login error if available. var LoginStateWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "login-state", + Code: tsconst.HealthWarnableLoginState, Title: "Logged out", Severity: SeverityMedium, Text: func(args Args) string { @@ -121,7 +122,7 @@ var LoginStateWarnable = condRegister(func() *Warnable { // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. var notInMapPollWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "not-in-map-poll", + Code: tsconst.HealthWarnableNotInMapPoll, Title: "Out of sync", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, @@ -134,7 +135,7 @@ var notInMapPollWarnable = condRegister(func() *Warnable { // noDERPHomeWarnable is a Warnable that warns the user that Tailscale doesn't have a home DERP. var noDERPHomeWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "no-derp-home", + Code: tsconst.HealthWarnableNoDERPHome, Title: "No home relay server", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable}, @@ -147,7 +148,7 @@ var noDERPHomeWarnable = condRegister(func() *Warnable { // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. var noDERPConnectionWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "no-derp-connection", + Code: tsconst.HealthWarnableNoDERPConnection, Title: "Relay server unavailable", Severity: SeverityMedium, DependsOn: []*Warnable{ @@ -177,7 +178,7 @@ var noDERPConnectionWarnable = condRegister(func() *Warnable { // heard from the home DERP region for a while. var derpTimeoutWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "derp-timed-out", + Code: tsconst.HealthWarnableDERPTimedOut, Title: "Relay server timed out", Severity: SeverityMedium, DependsOn: []*Warnable{ @@ -198,7 +199,7 @@ var derpTimeoutWarnable = condRegister(func() *Warnable { // derpRegionErrorWarnable is a Warnable that warns the user that a DERP region is reporting an issue. var derpRegionErrorWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "derp-region-error", + Code: tsconst.HealthWarnableDERPRegionError, Title: "Relay server error", Severity: SeverityLow, DependsOn: []*Warnable{NetworkStatusWarnable}, @@ -211,7 +212,7 @@ var derpRegionErrorWarnable = condRegister(func() *Warnable { // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. var noUDP4BindWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "no-udp4-bind", + Code: tsconst.HealthWarnableNoUDP4Bind, Title: "NAT traversal setup failure", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, @@ -223,7 +224,7 @@ var noUDP4BindWarnable = condRegister(func() *Warnable { // mapResponseTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't received a network map from the coordination server in a while. var mapResponseTimeoutWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "mapresponse-timeout", + Code: tsconst.HealthWarnableMapResponseTimeout, Title: "Network map response timeout", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, @@ -236,7 +237,7 @@ var mapResponseTimeoutWarnable = condRegister(func() *Warnable { // tlsConnectionFailedWarnable is a Warnable that warns the user that Tailscale could not establish an encrypted connection with a server. var tlsConnectionFailedWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "tls-connection-failed", + Code: tsconst.HealthWarnableTLSConnectionFailed, Title: "Encrypted connection failed", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable}, @@ -249,7 +250,7 @@ var tlsConnectionFailedWarnable = condRegister(func() *Warnable { // magicsockReceiveFuncWarnable is a Warnable that warns the user that one of the Magicsock functions is not running. var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "magicsock-receive-func-error", + Code: tsconst.HealthWarnableMagicsockReceiveFuncError, Title: "MagicSock function not running", Severity: SeverityMedium, Text: func(args Args) string { @@ -261,7 +262,7 @@ var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { // testWarnable is a Warnable that is used within this package for testing purposes only. var testWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "test-warnable", + Code: tsconst.HealthWarnableTestWarnable, Title: "Test warnable", Severity: SeverityLow, Text: func(args Args) string { @@ -273,7 +274,7 @@ var testWarnable = condRegister(func() *Warnable { // applyDiskConfigWarnable is a Warnable that warns the user that there was an error applying the envknob config stored on disk. var applyDiskConfigWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "apply-disk-config", + Code: tsconst.HealthWarnableApplyDiskConfig, Title: "Could not apply configuration", Severity: SeverityMedium, Text: func(args Args) string { @@ -291,7 +292,7 @@ const warmingUpWarnableDuration = 5 * time.Second // the backend is fully started. var warmingUpWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "warming-up", + Code: tsconst.HealthWarnableWarmingUp, Title: "Tailscale is starting", Severity: SeverityLow, Text: StaticMessage("Tailscale is starting. Please wait."), diff --git a/tsconst/health.go b/tsconst/health.go new file mode 100644 index 000000000..5db9b1fc2 --- /dev/null +++ b/tsconst/health.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +const ( + HealthWarnableUpdateAvailable = "update-available" + HealthWarnableSecurityUpdateAvailable = "security-update-available" + HealthWarnableIsUsingUnstableVersion = "is-using-unstable-version" + HealthWarnableNetworkStatus = "network-status" + HealthWarnableWantRunningFalse = "wantrunning-false" + HealthWarnableLocalLogConfigError = "local-log-config-error" + HealthWarnableLoginState = "login-state" + HealthWarnableNotInMapPoll = "not-in-map-poll" + HealthWarnableNoDERPHome = "no-derp-home" + HealthWarnableNoDERPConnection = "no-derp-connection" + HealthWarnableDERPTimedOut = "derp-timed-out" + HealthWarnableDERPRegionError = "derp-region-error" + HealthWarnableNoUDP4Bind = "no-udp4-bind" + HealthWarnableMapResponseTimeout = "mapresponse-timeout" + HealthWarnableTLSConnectionFailed = "tls-connection-failed" + HealthWarnableMagicsockReceiveFuncError = "magicsock-receive-func-error" + HealthWarnableTestWarnable = "test-warnable" + HealthWarnableApplyDiskConfig = "apply-disk-config" + HealthWarnableWarmingUp = "warming-up" +) From fd0e541e5d72aecddcb3e989c33b9aef23c7be96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 24 Oct 2025 15:00:55 -0400 Subject: [PATCH 1583/1708] net/tsdial: do not panic if setting the same eventbus twice (#17640) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #17638 Signed-off-by: Claus Lensbøl --- net/tsdial/tsdial.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index a0e2a11a4..c7483a125 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -96,6 +96,7 @@ type Dialer struct { dnsCache *dnscache.MessageCache // nil until first non-empty SetExitDNSDoH nextSysConnID int activeSysConns map[int]net.Conn // active connections not yet closed + bus *eventbus.Bus // only used for comparison with already set bus. eventClient *eventbus.Client eventBusSubs eventbus.Monitor } @@ -226,14 +227,17 @@ func (d *Dialer) NetMon() *netmon.Monitor { func (d *Dialer) SetBus(bus *eventbus.Bus) { d.mu.Lock() defer d.mu.Unlock() - if d.eventClient != nil { - panic("eventbus has already been set") + if d.bus == bus { + return + } else if d.bus != nil { + panic("different eventbus has already been set") } // Having multiple watchers could lead to problems, // so unregister the callback if it exists. if d.netMonUnregister != nil { d.netMonUnregister() } + d.bus = bus d.eventClient = bus.Client("tsdial.Dialer") d.eventBusSubs = d.eventClient.Monitor(d.linkChangeWatcher(d.eventClient)) } From 4346615d77a6de16854c6e78f9d49375d6424e6e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 24 Oct 2025 14:08:47 -0700 Subject: [PATCH 1584/1708] logtail: avoid racing eventbus subscriptions with Shutdown (#17639) When the eventbus is enabled, set up the subscription for change deltas at the beginning when the client is created, rather than waiting for the first awaitInternetUp check. Otherwise, it is possible for a check to race with the client close in Shutdown, which triggers a panic. Updates #17638 Change-Id: I461c07939eca46699072b14b1814ecf28eec750c Signed-off-by: M. J. Fromberger --- logtail/logtail.go | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index 675422890..52823fedf 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -124,6 +124,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.Bus != nil { l.eventClient = cfg.Bus.Client("logtail.Logger") + l.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -162,6 +163,7 @@ type Logger struct { httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel eventClient *eventbus.Client + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] procID uint32 includeProcSequence bool @@ -427,8 +429,23 @@ func (l *Logger) internetUp() bool { func (l *Logger) awaitInternetUp(ctx context.Context) { if l.eventClient != nil { - l.awaitInternetUpBus(ctx) - return + for { + if l.internetUp() { + return + } + select { + case <-ctx.Done(): + return // give up + case <-l.changeDeltaSub.Done(): + return // give up (closing down) + case delta := <-l.changeDeltaSub.Events(): + if delta.New.AnyInterfaceUp() || l.internetUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + } + } } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { @@ -449,24 +466,6 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } -func (l *Logger) awaitInternetUpBus(ctx context.Context) { - if l.internetUp() { - return - } - sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) - defer sub.Close() - select { - case delta := <-sub.Events(): - if delta.New.AnyInterfaceUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - return - } - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") - case <-ctx.Done(): - return - } -} - // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. From a760cbe33f4bed64b63c6118808d02b2771ff785 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Mon, 27 Oct 2025 13:18:13 -0700 Subject: [PATCH 1585/1708] control/controlclient: back out HW key attestation (#17664) Temporarily back out the TPM-based hw attestation code while we debug Windows exceptions. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- control/controlclient/direct.go | 22 --------------- ipn/ipnlocal/hwattest.go | 48 --------------------------------- ipn/ipnlocal/local.go | 1 - ipn/ipnlocal/profiles.go | 10 ------- ipn/ipnlocal/profiles_test.go | 1 - ipn/prefs_test.go | 2 +- types/persist/persist.go | 18 ++----------- types/persist/persist_clone.go | 4 --- types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 10 +++---- 10 files changed, 8 insertions(+), 110 deletions(-) delete mode 100644 ipn/ipnlocal/hwattest.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 63a12b249..fe7cc235b 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -7,8 +7,6 @@ import ( "bytes" "cmp" "context" - "crypto" - "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -948,26 +946,6 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap ConnectionHandleForTest: connectionHandleForTest, } - // If we have a hardware attestation key, sign the node key with it and send - // the key & signature in the map request. - if buildfeatures.HasTPM { - if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { - hwPub := key.HardwareAttestationPublicFromPlatformKey(k) - request.HardwareAttestationKey = hwPub - - t := c.clock.Now() - msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) - digest := sha256.Sum256([]byte(msg)) - sig, err := k.Sign(nil, digest[:], crypto.SHA256) - if err != nil { - c.logf("failed to sign node key with hardware attestation key: %v", err) - } else { - request.HardwareAttestationKeySignature = sig - request.HardwareAttestationKeySignatureTimestamp = t - } - } - } - var extraDebugFlags []string if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go deleted file mode 100644 index 2c93cad4c..000000000 --- a/ipn/ipnlocal/hwattest.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_tpm - -package ipnlocal - -import ( - "errors" - - "tailscale.com/feature" - "tailscale.com/types/key" - "tailscale.com/types/logger" - "tailscale.com/types/persist" -) - -func init() { - feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) -} - -// generateAttestationKeyIfEmpty generates a new hardware attestation key if -// none exists. It returns true if a new key was generated and stored in -// p.AttestationKey. -func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { - // attempt to generate a new hardware attestation key if none exists - var ak key.HardwareAttestationKey - if p != nil { - ak = p.AttestationKey - } - - if ak == nil || ak.IsZero() { - var err error - ak, err = key.NewHardwareAttestationKey() - if err != nil { - if !errors.Is(err, key.ErrUnsupported) { - logf("failed to create hardware attestation key: %v", err) - } - } else if ak != nil { - logf("using new hardware attestation key: %v", ak.Public()) - if p == nil { - p = &persist.Persist{} - } - p.AttestationKey = ak - return true, nil - } - } - return false, nil -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ee3059de4..7b2257cca 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1185,7 +1185,6 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} - p2.Persist.AttestationKey = nil return p2.View() } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 9c2176378..3e80cdaa9 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -19,9 +19,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" - "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -656,14 +654,6 @@ func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() - - // if supported by the platform, create an empty hardware attestation key to use when deserializing - // to avoid type exceptions from json.Unmarshaling into an interface{}. - hw, _ := key.NewEmptyHardwareAttestationKey() - savedPrefs.Persist = &persist.Persist{ - AttestationKey: hw, - } - if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index deeab2ade..60c92ff8d 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -151,7 +151,6 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, - AttestationKey: nil, } } user1Node1 := newPersist(1, 1) diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 233616409..3339a631c 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, }, { Prefs{ diff --git a/types/persist/persist.go b/types/persist/persist.go index 4b62c79dd..d888a6afb 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,7 +26,6 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -85,20 +84,11 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } - var pub, p2Pub key.HardwareAttestationPublic - if p.AttestationKey != nil && !p.AttestationKey.IsZero() { - pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) - } - if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { - p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) - } - return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && - pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -106,16 +96,12 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) - akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - if p.AttestationKey != nil && !p.AttestationKey.IsZero() { - akString = fmt.Sprintf("%v", p.AttestationKey.Public()) - } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 9dbe7e0f6..680419ff2 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,9 +19,6 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src - if src.AttestationKey != nil { - dst.AttestationKey = src.AttestationKey.Clone() - } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -34,6 +31,5 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index 713114b74..dbf2a6d8c 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index dbf8294ef..7d1507468 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,11 +89,10 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } -func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -111,6 +110,5 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) From 34e992f59db2feed0c5cd857d4829ea5ef5e0298 Mon Sep 17 00:00:00 2001 From: Max Coulombe Date: Mon, 27 Oct 2025 16:33:03 -0400 Subject: [PATCH 1586/1708] feature/identityfederation: strip query params on clientID (#17666) Updates #9192 Signed-off-by: mcoulombe --- .../identityfederation/identityfederation.go | 19 +++++++++++-------- .../identityfederation_test.go | 10 +++++++++- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go index a4470fc27..ab1b65f12 100644 --- a/feature/identityfederation/identityfederation.go +++ b/feature/identityfederation/identityfederation.go @@ -42,12 +42,12 @@ func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags baseURL = ipn.DefaultControlURL } - ephemeral, preauth, err := parseOptionalAttributes(clientID) + strippedID, ephemeral, preauth, err := parseOptionalAttributes(clientID) if err != nil { return "", fmt.Errorf("failed to parse optional config attributes: %w", err) } - accessToken, err := exchangeJWTForToken(ctx, baseURL, clientID, idToken) + accessToken, err := exchangeJWTForToken(ctx, baseURL, strippedID, idToken) if err != nil { return "", fmt.Errorf("failed to exchange JWT for access token: %w", err) } @@ -79,15 +79,15 @@ func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags return authkey, nil } -func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized bool, err error) { - _, attrs, found := strings.Cut(clientID, "?") +func parseOptionalAttributes(clientID string) (strippedID string, ephemeral bool, preauthorized bool, err error) { + strippedID, attrs, found := strings.Cut(clientID, "?") if !found { - return true, false, nil + return clientID, true, false, nil } parsed, err := url.ParseQuery(attrs) if err != nil { - return false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) + return "", false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) } for k := range parsed { @@ -97,11 +97,14 @@ func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized boo case "preauthorized": preauthorized, err = strconv.ParseBool(parsed.Get(k)) default: - return false, false, fmt.Errorf("unknown optional config attribute %q", k) + return "", false, false, fmt.Errorf("unknown optional config attribute %q", k) } } + if err != nil { + return "", false, false, err + } - return ephemeral, preauthorized, err + return strippedID, ephemeral, preauthorized, nil } // exchangeJWTForToken exchanges a JWT for a Tailscale access token. diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go index 7b75852a8..a673a4298 100644 --- a/feature/identityfederation/identityfederation_test.go +++ b/feature/identityfederation/identityfederation_test.go @@ -87,6 +87,7 @@ func TestParseOptionalAttributes(t *testing.T) { tests := []struct { name string clientID string + wantClientID string wantEphemeral bool wantPreauth bool wantErr string @@ -94,6 +95,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "default values", clientID: "client-123", + wantClientID: "client-123", wantEphemeral: true, wantPreauth: false, wantErr: "", @@ -101,6 +103,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "custom values", clientID: "client-123?ephemeral=false&preauthorized=true", + wantClientID: "client-123", wantEphemeral: false, wantPreauth: true, wantErr: "", @@ -108,6 +111,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "unknown attribute", clientID: "client-123?unknown=value", + wantClientID: "", wantEphemeral: false, wantPreauth: false, wantErr: `unknown optional config attribute "unknown"`, @@ -115,6 +119,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "invalid value", clientID: "client-123?ephemeral=invalid", + wantClientID: "", wantEphemeral: false, wantPreauth: false, wantErr: `strconv.ParseBool: parsing "invalid": invalid syntax`, @@ -123,7 +128,7 @@ func TestParseOptionalAttributes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) + strippedID, ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) if tt.wantErr != "" { if err == nil { t.Errorf("parseOptionalAttributes() error = nil, want %q", tt.wantErr) @@ -138,6 +143,9 @@ func TestParseOptionalAttributes(t *testing.T) { return } } + if strippedID != tt.wantClientID { + t.Errorf("parseOptionalAttributes() strippedID = %v, want %v", strippedID, tt.wantClientID) + } if ephemeral != tt.wantEphemeral { t.Errorf("parseOptionalAttributes() ephemeral = %v, want %v", ephemeral, tt.wantEphemeral) } From f4e2720821d4975de8a1964b9274db3f19da48d2 Mon Sep 17 00:00:00 2001 From: srwareham Date: Mon, 27 Oct 2025 15:20:57 -0700 Subject: [PATCH 1587/1708] cmd/tailscale/cli: move JetKVM scripts to /userdata/init.d for persistence (#17610) Updates #16524 Updates jetkvm/rv1106-system#34 Signed-off-by: srwareham --- cmd/tailscale/cli/configure-jetkvm.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/configure-jetkvm.go b/cmd/tailscale/cli/configure-jetkvm.go index a8e0a7cb5..c80bf6736 100644 --- a/cmd/tailscale/cli/configure-jetkvm.go +++ b/cmd/tailscale/cli/configure-jetkvm.go @@ -48,9 +48,12 @@ func runConfigureJetKVM(ctx context.Context, args []string) error { if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { return errors.New("only implemented on JetKVM") } - err := os.WriteFile("/etc/init.d/S22tailscale", bytes.TrimLeft([]byte(` + if err := os.MkdirAll("/userdata/init.d", 0755); err != nil { + return errors.New("unable to create /userdata/init.d") + } + err := os.WriteFile("/userdata/init.d/S22tailscale", bytes.TrimLeft([]byte(` #!/bin/sh -# /etc/init.d/S22tailscale +# /userdata/init.d/S22tailscale # Start/stop tailscaled case "$1" in From 576aacd459406f3b8d76a1978825b24aa2c56291 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Fri, 3 Oct 2025 17:52:41 +0100 Subject: [PATCH 1588/1708] ipn/ipnlocal/serve: add grant headers Updates tailscale/corp/#28372 Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_legacy.go | 24 +-- cmd/tailscale/cli/serve_v2.go | 36 ++++- cmd/tailscale/cli/serve_v2_test.go | 49 +++++- ipn/ipn_clone.go | 10 +- ipn/ipn_view.go | 12 +- ipn/ipnlocal/serve.go | 68 +++++++- ipn/ipnlocal/serve_test.go | 242 +++++++++++++++++++++++++++++ ipn/serve.go | 2 + 8 files changed, 416 insertions(+), 27 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 95808fdf2..95e518998 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -162,20 +162,20 @@ type serveEnv struct { json bool // output JSON (status only for now) // v2 specific flags - bg bgBoolFlag // background mode - setPath string // serve path - https uint // HTTP port - http uint // HTTP port - tcp uint // TCP port - tlsTerminatedTCP uint // a TLS terminated TCP port - subcmd serveMode // subcommand - yes bool // update without prompt - service tailcfg.ServiceName // service name - tun bool // redirect traffic to OS for service - allServices bool // apply config file to all services + bg bgBoolFlag // background mode + setPath string // serve path + https uint // HTTP port + http uint // HTTP port + tcp uint // TCP port + tlsTerminatedTCP uint // a TLS terminated TCP port + subcmd serveMode // subcommand + yes bool // update without prompt + service tailcfg.ServiceName // service name + tun bool // redirect traffic to OS for service + allServices bool // apply config file to all services + userCaps []tailcfg.PeerCapability // user capabilities to forward lc localServeClient // localClient interface, specific to serve - // optional stuff for tests: testFlagOut io.Writer testStdout io.Writer diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index ca0497f8d..4921bf31f 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -96,6 +96,28 @@ func (b *bgBoolFlag) String() string { return strconv.FormatBool(b.Value) } +type userCapsFlag struct { + Value *[]tailcfg.PeerCapability +} + +// Set appends s to the list of userCaps. +func (u *userCapsFlag) Set(s string) error { + if s == "" { + return nil + } + *u.Value = append(*u.Value, tailcfg.PeerCapability(s)) + return nil +} + +// String returns the string representation of the userCaps slice. +func (u *userCapsFlag) String() string { + s := make([]string, len(*u.Value)) + for i, v := range *u.Value { + s[i] = string(v) + } + return strings.Join(s, ",") +} + var serveHelpCommon = strings.TrimSpace(` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., 3000), @@ -199,6 +221,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") + fs.Var(&userCapsFlag{Value: &e.userCaps}, "usercaps", "User capability to forward to the server (can be specified multiple times)") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") @@ -469,7 +492,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.userCaps) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -790,7 +813,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er for name, details := range scf.Services { for ppr, ep := range details.Endpoints { if ep.Protocol == conffile.ProtoTUN { - err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix) + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil) if err != nil { return err } @@ -812,7 +835,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er portStr := fmt.Sprint(destPort) target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) } - err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix) + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil) if err != nil { return fmt.Errorf("service %q: %w", name, err) } @@ -915,12 +938,12 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: useTLS := srvType == serveTypeHTTPS - err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds) + err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds, caps) if err != nil { return fmt.Errorf("failed apply web serve: %w", err) } @@ -1084,7 +1107,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN return output.String() } -func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string, mds string) error { +func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target, mds string, caps []tailcfg.PeerCapability) error { h := new(ipn.HTTPHandler) switch { case strings.HasPrefix(target, "text:"): @@ -1118,6 +1141,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return err } h.Proxy = t + h.UserCaps = caps } // TODO: validation needs to check nested foreground configs diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index f9653253a..d039c52cc 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -857,6 +857,53 @@ func TestServeDevConfigMutations(t *testing.T) { wantErr: anyErr(), }}, }, + { + name: "forward_grant_header", + steps: []step{ + { + command: cmd("serve --bg --usercaps=example.com/cap/foo 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, + }, + }}, + }, + }, + }, + { + command: cmd("serve --bg --usercaps=example.com/cap/foo --usercaps=example.com/cap/bar 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, + }, + }}, + }, + }, + }, + { + command: cmd("serve --bg --usercaps=example.com/cap/bar 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + UserCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, + }, + }}, + }, + }, + }, + }, + }, } for _, group := range groups { @@ -2009,7 +2056,7 @@ func TestSetServe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix) + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 3d67efc6f..54511094b 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -232,14 +232,16 @@ func (src *HTTPHandler) Clone() *HTTPHandler { } dst := new(HTTPHandler) *dst = *src + dst.UserCaps = append(src.UserCaps[:0:0], src.UserCaps...) return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string + Path string + Proxy string + Text string + UserCaps []tailcfg.PeerCapability }{}) // Clone makes a deep copy of WebServerConfig. @@ -256,7 +258,7 @@ func (src *WebServerConfig) Clone() *WebServerConfig { if v == nil { dst.Handlers[k] = nil } else { - dst.Handlers[k] = ptr.To(*v) + dst.Handlers[k] = v.Clone() } } } diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 1c7639f6f..a87b6c42e 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -891,11 +891,17 @@ func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy } // plaintext to serve (primarily for testing) func (v HTTPHandlerView) Text() string { return v.ж.Text } +// peer capabilities to forward in grant header, e.g. example.com/cap/mon +func (v HTTPHandlerView) UserCaps() views.Slice[tailcfg.PeerCapability] { + return views.SliceOf(v.ж.UserCaps) +} + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string + Path string + Proxy string + Text string + UserCaps []tailcfg.PeerCapability }{}) // View returns a read-only view of WebServerConfig. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 3c967fd1e..799161a76 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -40,6 +40,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/views" "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" @@ -64,6 +65,7 @@ func init() { const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" + grantHeaderMaxSize = 15360 // 15 KiB ) // ErrETagMismatch signals that the given @@ -79,7 +81,8 @@ type serveHTTPContext struct { DestPort uint16 // provides funnel-specific context, nil if not funneled - Funnel *funnelFlow + Funnel *funnelFlow + PeerCapsFilter views.Slice[tailcfg.PeerCapability] } // funnelFlow represents a funneled connection initiated via IngressPeer @@ -803,6 +806,7 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Out.Host = r.In.Host addProxyForwardedHeaders(r) rp.lb.addTailscaleIdentityHeaders(r) + rp.lb.addTailscaleGrantHeader(r) }} // There is no way to autodetect h2c as per RFC 9113 @@ -927,6 +931,62 @@ func encTailscaleHeaderValue(v string) string { return mime.QEncoding.Encode("utf-8", v) } +func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { + r.Out.Header.Del("Tailscale-User-Capabilities") + + c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) + if !ok || c.Funnel != nil { + return + } + filter := c.PeerCapsFilter + if filter.IsNil() { + return + } + peerCaps := b.PeerCaps(c.SrcAddr.Addr()) + if peerCaps == nil { + return + } + + peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, filter.Len()) + for _, cap := range filter.AsSlice() { + if peerCaps.HasCapability(cap) { + peerCapsFiltered[cap] = peerCaps[cap] + } + } + + serialized, truncated, err := serializeUpToNBytes(peerCapsFiltered, grantHeaderMaxSize) + if err != nil { + b.logf("serve: failed to serialize PeerCapMap: %v", err) + return + } + if truncated { + b.logf("serve: serialized PeerCapMap exceeds %d bytes, forwarding truncated PeerCapMap", grantHeaderMaxSize) + } + + r.Out.Header.Set("Tailscale-User-Capabilities", encTailscaleHeaderValue(serialized)) +} + +// serializeUpToNBytes serializes capMap. It arbitrarily truncates entries from the capMap +// if the size of the serialized capMap would exceed N bytes. +func serializeUpToNBytes(capMap tailcfg.PeerCapMap, N int) (string, bool, error) { + numBytes := 0 + capped := false + result := tailcfg.PeerCapMap{} + for k, v := range capMap { + numBytes += len(k) + len(v) + if numBytes > N { + capped = true + break + } + result[k] = v + } + marshalled, err := json.Marshal(result) + if err != nil { + return "", false, err + } + return string(marshalled), capped, nil +} + // serveWebHandler is an http.HandlerFunc that maps incoming requests to the // correct *http. func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { @@ -950,6 +1010,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "unknown proxy destination", http.StatusInternalServerError) return } + // Inject user capabilities to forward into the request context + c, ok := serveHTTPContextKey.ValueOk(r.Context()) + if !ok { + return + } + c.PeerCapsFilter = h.UserCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index b4461d12f..5d880e185 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/http/httptest" "net/netip" @@ -27,6 +28,7 @@ import ( "testing" "time" + "tailscale.com/control/controlclient" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -41,6 +43,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/wgengine" + "tailscale.com/wgengine/filter" ) func TestExpandProxyArg(t *testing.T) { @@ -768,6 +771,156 @@ func TestServeHTTPProxyHeaders(t *testing.T) { } } +func TestServeHTTPProxyGrantHeader(t *testing.T) { + b := newTestBackend(t) + + nm := b.NetMap() + matches, err := filter.MatchesFromFilterRules([]tailcfg.FilterRule{ + { + SrcIPs: []string{"100.150.151.152"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/interesting": []tailcfg.RawMessage{ + `{"role": "🐿"}`, + }, + }, + }}, + }, + { + SrcIPs: []string{"100.150.151.153"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/boring": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + "example.com/cap/irrelevant": []tailcfg.RawMessage{ + `{"role": "Editor"}`, + }, + }, + }}, + }, + }) + if err != nil { + t.Fatal(err) + } + nm.PacketFilter = matches + b.SetControlClientStatus(nil, controlclient.Status{NetMap: nm}) + + // Start test serve endpoint. + testServ := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + // Piping all the headers through the response writer + // so we can check their values in tests below. + for key, val := range r.Header { + w.Header().Add(key, strings.Join(val, ",")) + } + }, + )) + defer testServ.Close() + + conf := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: testServ.URL, + UserCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, + }, + }}, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + type headerCheck struct { + header string + want string + } + + tests := []struct { + name string + srcIP string + wantHeaders []headerCheck + }{ + { + name: "request-from-user-within-tailnet", + srcIP: "100.150.151.152", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.150.151.152"}, + {"Tailscale-User-Login", "someone@example.com"}, + {"Tailscale-User-Name", "Some One"}, + {"Tailscale-User-Profile-Pic", "https://example.com/photo.jpg"}, + {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, + {"Tailscale-User-Capabilities", `{"example.com/cap/interesting":[{"role":"🐿"}]}`}, + }, + }, + { + name: "request-from-tagged-node-within-tailnet", + srcIP: "100.150.151.153", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.150.151.153"}, + {"Tailscale-User-Login", ""}, + {"Tailscale-User-Name", ""}, + {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Headers-Info", ""}, + {"Tailscale-User-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, + }, + }, + { + name: "request-from-outside-tailnet", + srcIP: "100.160.161.162", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.160.161.162"}, + {"Tailscale-User-Login", ""}, + {"Tailscale-User-Name", ""}, + {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Headers-Info", ""}, + {"Tailscale-User-Capabilities", ""}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &http.Request{ + URL: &url.URL{Path: "/"}, + TLS: &tls.ConnectionState{ServerName: "example.ts.net"}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + DestPort: 443, + SrcAddr: netip.MustParseAddrPort(tt.srcIP + ":1234"), // random src port for tests + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + // Verify the headers. The contract with users is that identity and grant headers containing non-ASCII + // UTF-8 characters will be Q-encoded. + h := w.Result().Header + dec := new(mime.WordDecoder) + for _, c := range tt.wantHeaders { + maybeEncoded := h.Get(c.header) + got, err := dec.DecodeHeader(maybeEncoded) + if err != nil { + t.Fatalf("invalid %q header; failed to decode: %v", maybeEncoded, err) + } + if got != c.want { + t.Errorf("invalid %q header; want=%q, got=%q", c.header, c.want, got) + } + } + }) + } +} + func Test_reverseProxyConfiguration(t *testing.T) { b := newTestBackend(t) type test struct { @@ -926,6 +1079,9 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, }).View(), UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ tailcfg.UserID(1): (&tailcfg.UserProfile{ @@ -1171,3 +1327,89 @@ func TestServeGRPCProxy(t *testing.T) { }) } } + +func TestSerialisePeerCapMap(t *testing.T) { + var tests = []struct { + name string + capMap tailcfg.PeerCapMap + maxNumBytes int + wantOneOfSerialized []string + wantTruncated bool + }{ + { + name: "empty cap map", + capMap: tailcfg.PeerCapMap{}, + maxNumBytes: 50, + wantOneOfSerialized: []string{"{}"}, + wantTruncated: false, + }, + { + name: "cap map with one capability", + capMap: tailcfg.PeerCapMap{ + "tailscale.com/cap/kubernetes": []tailcfg.RawMessage{ + `{"impersonate": {"groups": ["tailnet-readers"]}}`, + }, + }, + maxNumBytes: 50, + wantOneOfSerialized: []string{ + `{"tailscale.com/cap/kubernetes":[{"impersonate":{"groups":["tailnet-readers"]}}]}`, + }, + wantTruncated: false, + }, + { + name: "cap map with two capabilities", + capMap: tailcfg.PeerCapMap{ + "foo.com/cap/something": []tailcfg.RawMessage{ + `{"role": "Admin"}`, + }, + "bar.com/cap/other-thing": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + }, + maxNumBytes: 50, + // Both cap map entries will be included, but they could appear in any order. + wantOneOfSerialized: []string{ + `{"foo.com/cap/something":[{"role":"Admin"}],"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, + `{"bar.com/cap/other-thing":[{"role":"Viewer"}],"foo.com/cap/something":[{"role":"Admin"}]}`, + }, + wantTruncated: false, + }, + { + name: "cap map that should be truncated to stay within size limits", + capMap: tailcfg.PeerCapMap{ + "foo.com/cap/something": []tailcfg.RawMessage{ + `{"role": "Admin"}`, + }, + "bar.com/cap/other-thing": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + }, + maxNumBytes: 40, + // Only one cap map entry will be included, but we don't know which one. + wantOneOfSerialized: []string{ + `{"foo.com/cap/something":[{"role":"Admin"}]}`, + `{"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, + }, + wantTruncated: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotSerialized, gotCapped, err := serializeUpToNBytes(tt.capMap, tt.maxNumBytes) + + if err != nil { + t.Fatal(err) + } + if gotCapped != tt.wantTruncated { + t.Errorf("got %t, want %t", gotCapped, tt.wantTruncated) + } + for _, wantSerialized := range tt.wantOneOfSerialized { + if gotSerialized == wantSerialized { + return + } + } + t.Errorf("want one of %v, got %q", tt.wantOneOfSerialized, gotSerialized) + }) + } +} diff --git a/ipn/serve.go b/ipn/serve.go index a0f1334d7..c4a0997d2 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -160,6 +160,8 @@ type HTTPHandler struct { Text string `json:",omitempty"` // plaintext to serve (primarily for testing) + UserCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for // temporary ones? Error codes? Redirects? } From d6fa899eba4978b73c6113318363f570524e55e4 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Wed, 15 Oct 2025 12:59:10 +0100 Subject: [PATCH 1589/1708] ipn/ipnlocal/serve: remove grant header truncation logic Given that we filter based on the usercaps argument now, truncation should not be necessary anymore. Updates tailscale/corp/#28372 Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_v2.go | 16 ++--- cmd/tailscale/cli/serve_v2_test.go | 18 +++--- ipn/ipn_clone.go | 10 ++-- ipn/ipn_view.go | 12 ++-- ipn/ipnlocal/serve.go | 37 ++---------- ipn/ipnlocal/serve_test.go | 96 ++---------------------------- ipn/serve.go | 2 +- 8 files changed, 41 insertions(+), 152 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 95e518998..5c2d8eefa 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -173,7 +173,7 @@ type serveEnv struct { service tailcfg.ServiceName // service name tun bool // redirect traffic to OS for service allServices bool // apply config file to all services - userCaps []tailcfg.PeerCapability // user capabilities to forward + acceptAppCaps []tailcfg.PeerCapability // app capabilities to forward lc localServeClient // localClient interface, specific to serve // optional stuff for tests: diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 4921bf31f..f822753ac 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -96,12 +96,12 @@ func (b *bgBoolFlag) String() string { return strconv.FormatBool(b.Value) } -type userCapsFlag struct { +type acceptAppCapsFlag struct { Value *[]tailcfg.PeerCapability } -// Set appends s to the list of userCaps. -func (u *userCapsFlag) Set(s string) error { +// Set appends s to the list of appCaps to accept. +func (u *acceptAppCapsFlag) Set(s string) error { if s == "" { return nil } @@ -109,8 +109,8 @@ func (u *userCapsFlag) Set(s string) error { return nil } -// String returns the string representation of the userCaps slice. -func (u *userCapsFlag) String() string { +// String returns the string representation of the slice of appCaps to accept. +func (u *acceptAppCapsFlag) String() string { s := make([]string, len(*u.Value)) for i, v := range *u.Value { s[i] = string(v) @@ -221,7 +221,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") - fs.Var(&userCapsFlag{Value: &e.userCaps}, "usercaps", "User capability to forward to the server (can be specified multiple times)") + fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capability to forward to the server (can be specified multiple times)") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") @@ -492,7 +492,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.userCaps) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -1141,7 +1141,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return err } h.Proxy = t - h.UserCaps = caps + h.AcceptAppCaps = caps } // TODO: validation needs to check nested foreground configs diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index d039c52cc..473acea61 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -861,42 +861,42 @@ func TestServeDevConfigMutations(t *testing.T) { name: "forward_grant_header", steps: []step{ { - command: cmd("serve --bg --usercaps=example.com/cap/foo 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: "http://127.0.0.1:3000", - UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, }, }}, }, }, }, { - command: cmd("serve --bg --usercaps=example.com/cap/foo --usercaps=example.com/cap/bar 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo --accept-app-caps=example.com/cap/bar 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: "http://127.0.0.1:3000", - UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, }, }}, }, }, }, { - command: cmd("serve --bg --usercaps=example.com/cap/bar 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/bar 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: "http://127.0.0.1:3000", - UserCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, }, }}, }, diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 54511094b..8a0a3c833 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -232,16 +232,16 @@ func (src *HTTPHandler) Clone() *HTTPHandler { } dst := new(HTTPHandler) *dst = *src - dst.UserCaps = append(src.UserCaps[:0:0], src.UserCaps...) + dst.AcceptAppCaps = append(src.AcceptAppCaps[:0:0], src.AcceptAppCaps...) return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string - UserCaps []tailcfg.PeerCapability + Path string + Proxy string + Text string + AcceptAppCaps []tailcfg.PeerCapability }{}) // Clone makes a deep copy of WebServerConfig. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index a87b6c42e..61d0dec23 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -892,16 +892,16 @@ func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy } func (v HTTPHandlerView) Text() string { return v.ж.Text } // peer capabilities to forward in grant header, e.g. example.com/cap/mon -func (v HTTPHandlerView) UserCaps() views.Slice[tailcfg.PeerCapability] { - return views.SliceOf(v.ж.UserCaps) +func (v HTTPHandlerView) AcceptAppCaps() views.Slice[tailcfg.PeerCapability] { + return views.SliceOf(v.ж.AcceptAppCaps) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string - UserCaps []tailcfg.PeerCapability + Path string + Proxy string + Text string + AcceptAppCaps []tailcfg.PeerCapability }{}) // View returns a read-only view of WebServerConfig. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 799161a76..5971476de 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -65,7 +65,6 @@ func init() { const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" - grantHeaderMaxSize = 15360 // 15 KiB ) // ErrETagMismatch signals that the given @@ -932,7 +931,7 @@ func encTailscaleHeaderValue(v string) string { } func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { - r.Out.Header.Del("Tailscale-User-Capabilities") + r.Out.Header.Del("Tailscale-App-Capabilities") c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) if !ok || c.Funnel != nil { @@ -954,37 +953,13 @@ func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { } } - serialized, truncated, err := serializeUpToNBytes(peerCapsFiltered, grantHeaderMaxSize) + peerCapsSerialized, err := json.Marshal(peerCapsFiltered) if err != nil { - b.logf("serve: failed to serialize PeerCapMap: %v", err) + b.logf("serve: failed to serialize filtered PeerCapMap: %v", err) return } - if truncated { - b.logf("serve: serialized PeerCapMap exceeds %d bytes, forwarding truncated PeerCapMap", grantHeaderMaxSize) - } - - r.Out.Header.Set("Tailscale-User-Capabilities", encTailscaleHeaderValue(serialized)) -} -// serializeUpToNBytes serializes capMap. It arbitrarily truncates entries from the capMap -// if the size of the serialized capMap would exceed N bytes. -func serializeUpToNBytes(capMap tailcfg.PeerCapMap, N int) (string, bool, error) { - numBytes := 0 - capped := false - result := tailcfg.PeerCapMap{} - for k, v := range capMap { - numBytes += len(k) + len(v) - if numBytes > N { - capped = true - break - } - result[k] = v - } - marshalled, err := json.Marshal(result) - if err != nil { - return "", false, err - } - return string(marshalled), capped, nil + r.Out.Header.Set("Tailscale-App-Capabilities", encTailscaleHeaderValue(string(peerCapsSerialized))) } // serveWebHandler is an http.HandlerFunc that maps incoming requests to the @@ -1010,12 +985,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "unknown proxy destination", http.StatusInternalServerError) return } - // Inject user capabilities to forward into the request context + // Inject app capabilities to forward into the request context c, ok := serveHTTPContextKey.ValueOk(r.Context()) if !ok { return } - c.PeerCapsFilter = h.UserCaps() + c.PeerCapsFilter = h.AcceptAppCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 5d880e185..a72c50c1f 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -828,8 +828,8 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: testServ.URL, - UserCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, + Proxy: testServ.URL, + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, }, }}, }, @@ -858,7 +858,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Name", "Some One"}, {"Tailscale-User-Profile-Pic", "https://example.com/photo.jpg"}, {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, - {"Tailscale-User-Capabilities", `{"example.com/cap/interesting":[{"role":"🐿"}]}`}, + {"Tailscale-App-Capabilities", `{"example.com/cap/interesting":[{"role":"🐿"}]}`}, }, }, { @@ -871,7 +871,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, {"Tailscale-Headers-Info", ""}, - {"Tailscale-User-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, + {"Tailscale-App-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, }, }, { @@ -884,7 +884,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, {"Tailscale-Headers-Info", ""}, - {"Tailscale-User-Capabilities", ""}, + {"Tailscale-App-Capabilities", ""}, }, }, } @@ -1327,89 +1327,3 @@ func TestServeGRPCProxy(t *testing.T) { }) } } - -func TestSerialisePeerCapMap(t *testing.T) { - var tests = []struct { - name string - capMap tailcfg.PeerCapMap - maxNumBytes int - wantOneOfSerialized []string - wantTruncated bool - }{ - { - name: "empty cap map", - capMap: tailcfg.PeerCapMap{}, - maxNumBytes: 50, - wantOneOfSerialized: []string{"{}"}, - wantTruncated: false, - }, - { - name: "cap map with one capability", - capMap: tailcfg.PeerCapMap{ - "tailscale.com/cap/kubernetes": []tailcfg.RawMessage{ - `{"impersonate": {"groups": ["tailnet-readers"]}}`, - }, - }, - maxNumBytes: 50, - wantOneOfSerialized: []string{ - `{"tailscale.com/cap/kubernetes":[{"impersonate":{"groups":["tailnet-readers"]}}]}`, - }, - wantTruncated: false, - }, - { - name: "cap map with two capabilities", - capMap: tailcfg.PeerCapMap{ - "foo.com/cap/something": []tailcfg.RawMessage{ - `{"role": "Admin"}`, - }, - "bar.com/cap/other-thing": []tailcfg.RawMessage{ - `{"role": "Viewer"}`, - }, - }, - maxNumBytes: 50, - // Both cap map entries will be included, but they could appear in any order. - wantOneOfSerialized: []string{ - `{"foo.com/cap/something":[{"role":"Admin"}],"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, - `{"bar.com/cap/other-thing":[{"role":"Viewer"}],"foo.com/cap/something":[{"role":"Admin"}]}`, - }, - wantTruncated: false, - }, - { - name: "cap map that should be truncated to stay within size limits", - capMap: tailcfg.PeerCapMap{ - "foo.com/cap/something": []tailcfg.RawMessage{ - `{"role": "Admin"}`, - }, - "bar.com/cap/other-thing": []tailcfg.RawMessage{ - `{"role": "Viewer"}`, - }, - }, - maxNumBytes: 40, - // Only one cap map entry will be included, but we don't know which one. - wantOneOfSerialized: []string{ - `{"foo.com/cap/something":[{"role":"Admin"}]}`, - `{"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, - }, - wantTruncated: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotSerialized, gotCapped, err := serializeUpToNBytes(tt.capMap, tt.maxNumBytes) - - if err != nil { - t.Fatal(err) - } - if gotCapped != tt.wantTruncated { - t.Errorf("got %t, want %t", gotCapped, tt.wantTruncated) - } - for _, wantSerialized := range tt.wantOneOfSerialized { - if gotSerialized == wantSerialized { - return - } - } - t.Errorf("want one of %v, got %q", tt.wantOneOfSerialized, gotSerialized) - }) - } -} diff --git a/ipn/serve.go b/ipn/serve.go index c4a0997d2..3f674d9ed 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -160,7 +160,7 @@ type HTTPHandler struct { Text string `json:",omitempty"` // plaintext to serve (primarily for testing) - UserCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + AcceptAppCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for // temporary ones? Error codes? Redirects? From d2e4a20f265b55216e1c0adcf9b4ba95c965d9f8 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Wed, 22 Oct 2025 09:41:19 +0100 Subject: [PATCH 1590/1708] ipn/ipnlocal/serve: error when PeerCaps serialisation fails Also consolidates variable and header naming and amends the CLI behavior * multiple app-caps have to be specified as comma-separated list * simple regex-based validation of app capability names is carried out during flag parsing Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_v2.go | 18 ++++- cmd/tailscale/cli/serve_v2_test.go | 113 ++++++++++++++++++++++++++++- ipn/ipnlocal/serve.go | 40 +++++----- 3 files changed, 150 insertions(+), 21 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index f822753ac..30adcb8e7 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -20,6 +20,7 @@ import ( "os/signal" "path" "path/filepath" + "regexp" "slices" "sort" "strconv" @@ -100,12 +101,25 @@ type acceptAppCapsFlag struct { Value *[]tailcfg.PeerCapability } +// An application capability name has the form {domain}/{name}. +// Both parts must use the (simplified) FQDN label character set. +// The "name" can contain forward slashes. +// \pL = Unicode Letter, \pN = Unicode Number, - = Hyphen +var validAppCap = regexp.MustCompile(`^([\pL\pN-]+\.)+[\pL\pN-]+\/[\pL\pN-/]+$`) + // Set appends s to the list of appCaps to accept. func (u *acceptAppCapsFlag) Set(s string) error { if s == "" { return nil } - *u.Value = append(*u.Value, tailcfg.PeerCapability(s)) + appCaps := strings.Split(s, ",") + for _, appCap := range appCaps { + appCap = strings.TrimSpace(appCap) + if !validAppCap.MatchString(appCap) { + return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", s) + } + *u.Value = append(*u.Value, tailcfg.PeerCapability(appCap)) + } return nil } @@ -221,7 +235,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") - fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capability to forward to the server (can be specified multiple times)") + fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 473acea61..dfa17f1fa 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -875,7 +875,7 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, { - command: cmd("serve --bg --accept-app-caps=example.com/cap/foo --accept-app-caps=example.com/cap/bar 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo,example.com/cap/bar 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ @@ -904,6 +904,15 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, + { + name: "invalid_accept_caps_invalid_app_cap", + steps: []step{ + { + command: cmd("serve --bg --accept-app-caps=example/cap/foo 3000"), // should be {domain.tld}/{name} + wantErr: anyErr(), + }, + }, + }, } for _, group := range groups { @@ -1220,6 +1229,108 @@ func TestSrcTypeFromFlags(t *testing.T) { } } +func TestAcceptSetAppCapsFlag(t *testing.T) { + testCases := []struct { + name string + inputs []string + expectErr bool + expectedValue []tailcfg.PeerCapability + }{ + { + name: "valid_simple", + inputs: []string{"example.com/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"example.com/name"}, + }, + { + name: "valid_unicode", + inputs: []string{"bücher.de/something"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"bücher.de/something"}, + }, + { + name: "more_valid_unicode", + inputs: []string{"example.tw/某某某"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"example.tw/某某某"}, + }, + { + name: "valid_path_slashes", + inputs: []string{"domain.com/path/to/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"domain.com/path/to/name"}, + }, + { + name: "valid_multiple_sets", + inputs: []string{"one.com/foo", "two.com/bar"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"one.com/foo", "two.com/bar"}, + }, + { + name: "valid_empty_string", + inputs: []string{""}, + expectErr: false, + expectedValue: nil, // Empty string should be a no-op and not append anything. + }, + { + name: "invalid_path_chars", + inputs: []string{"domain.com/path_with_underscore"}, + expectErr: true, + expectedValue: nil, // Slice should remain empty. + }, + { + name: "valid_subdomain", + inputs: []string{"sub.domain.com/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"sub.domain.com/name"}, + }, + { + name: "invalid_no_path", + inputs: []string{"domain.com/"}, + expectErr: true, + expectedValue: nil, + }, + { + name: "invalid_no_domain", + inputs: []string{"/path/only"}, + expectErr: true, + expectedValue: nil, + }, + { + name: "some_invalid_some_valid", + inputs: []string{"one.com/foo", "bad/bar", "two.com/baz"}, + expectErr: true, + expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var v []tailcfg.PeerCapability + flag := &acceptAppCapsFlag{Value: &v} + + var err error + for _, s := range tc.inputs { + err = flag.Set(s) + if err != nil { + break + } + } + + if tc.expectErr && err == nil { + t.Errorf("expected an error, but got none") + } + if !tc.expectErr && err != nil { + t.Errorf("did not expect an error, but got: %v", err) + } + + if !reflect.DeepEqual(tc.expectedValue, v) { + t.Errorf("unexpected value, got: %q, want: %q", v, tc.expectedValue) + } + }) + } +} + func TestCleanURLPath(t *testing.T) { tests := []struct { input string diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 5971476de..eb2c932c0 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -80,8 +80,9 @@ type serveHTTPContext struct { DestPort uint16 // provides funnel-specific context, nil if not funneled - Funnel *funnelFlow - PeerCapsFilter views.Slice[tailcfg.PeerCapability] + Funnel *funnelFlow + // AppCapabilities lists all PeerCapabilities that should be forwarded by serve + AppCapabilities views.Slice[tailcfg.PeerCapability] } // funnelFlow represents a funneled connection initiated via IngressPeer @@ -805,10 +806,11 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Out.Host = r.In.Host addProxyForwardedHeaders(r) rp.lb.addTailscaleIdentityHeaders(r) - rp.lb.addTailscaleGrantHeader(r) - }} - - // There is no way to autodetect h2c as per RFC 9113 + if err := rp.lb.addAppCapabilitiesHeader(r); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }} // There is no way to autodetect h2c as per RFC 9113 // https://datatracker.ietf.org/doc/html/rfc9113#name-starting-http-2. // However, we assume that http:// proxy prefix in combination with the // protoccol being HTTP/2 is sufficient to detect h2c for our needs. Only use this for @@ -930,24 +932,25 @@ func encTailscaleHeaderValue(v string) string { return mime.QEncoding.Encode("utf-8", v) } -func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { - r.Out.Header.Del("Tailscale-App-Capabilities") +func (b *LocalBackend) addAppCapabilitiesHeader(r *httputil.ProxyRequest) error { + const appCapabilitiesHeaderName = "Tailscale-App-Capabilities" + r.Out.Header.Del(appCapabilitiesHeaderName) c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) if !ok || c.Funnel != nil { - return + return nil } - filter := c.PeerCapsFilter - if filter.IsNil() { - return + acceptCaps := c.AppCapabilities + if acceptCaps.IsNil() { + return nil } peerCaps := b.PeerCaps(c.SrcAddr.Addr()) if peerCaps == nil { - return + return nil } - peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, filter.Len()) - for _, cap := range filter.AsSlice() { + peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, acceptCaps.Len()) + for _, cap := range acceptCaps.AsSlice() { if peerCaps.HasCapability(cap) { peerCapsFiltered[cap] = peerCaps[cap] } @@ -956,10 +959,11 @@ func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { peerCapsSerialized, err := json.Marshal(peerCapsFiltered) if err != nil { b.logf("serve: failed to serialize filtered PeerCapMap: %v", err) - return + return fmt.Errorf("unable to process app capabilities") } - r.Out.Header.Set("Tailscale-App-Capabilities", encTailscaleHeaderValue(string(peerCapsSerialized))) + r.Out.Header.Set(appCapabilitiesHeaderName, encTailscaleHeaderValue(string(peerCapsSerialized))) + return nil } // serveWebHandler is an http.HandlerFunc that maps incoming requests to the @@ -990,7 +994,7 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { if !ok { return } - c.PeerCapsFilter = h.AcceptAppCaps() + c.AppCapabilities = h.AcceptAppCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { From 02681732d12274e3a1d09708bbc0eabc5681fc34 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 28 Oct 2025 09:33:03 -0600 Subject: [PATCH 1591/1708] .github: drop branches filter with single asterisk from workflows (#17682) Drop usage of the branches filter with a single asterisk as this matches against zero or more characters but not a forward slash, resulting in PRs to branch names with forwards slashes in them not having these workflow run against them as expected. Updates https://github.com/tailscale/corp/issues/33523 Signed-off-by: Mario Minardi --- .github/workflows/docker-file-build.yml | 2 -- .github/workflows/installer.yml | 2 -- .github/workflows/request-dataplane-review.yml | 2 -- .github/workflows/webclient.yml | 2 -- 4 files changed, 8 deletions(-) diff --git a/.github/workflows/docker-file-build.yml b/.github/workflows/docker-file-build.yml index 04611e172..c61680a34 100644 --- a/.github/workflows/docker-file-build.yml +++ b/.github/workflows/docker-file-build.yml @@ -4,8 +4,6 @@ on: branches: - main pull_request: - branches: - - "*" jobs: deploy: runs-on: ubuntu-latest diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 6144864fd..bafa9925a 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -10,8 +10,6 @@ on: - scripts/installer.sh - .github/workflows/installer.yml pull_request: - branches: - - "*" paths: - scripts/installer.sh - .github/workflows/installer.yml diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 4a86b0541..7ae5668c3 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -2,8 +2,6 @@ name: request-dataplane-review on: pull_request: - branches: - - "*" paths: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index e64137f2b..bcec1f52d 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -3,8 +3,6 @@ on: workflow_dispatch: # For now, only run on requests, not the main branches. pull_request: - branches: - - "*" paths: - "client/web/**" - ".github/workflows/webclient.yml" From db5815fb978db0873752618d4531ee2ac9f5f83d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 28 Oct 2025 08:45:22 -0700 Subject: [PATCH 1592/1708] Revert "logtail: avoid racing eventbus subscriptions with Shutdown (#17639)" (#17684) This reverts commit 4346615d77a6de16854c6e78f9d49375d6424e6e. We averted the shutdown race, but will need to service the subscriber even when we are not waiting for a change so that we do not delay the bus as a whole. Updates #17638 Change-Id: I5488466ed83f5ad1141c95267f5ae54878a24657 Signed-off-by: M. J. Fromberger --- logtail/logtail.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index 52823fedf..675422890 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -124,7 +124,6 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.Bus != nil { l.eventClient = cfg.Bus.Client("logtail.Logger") - l.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -163,7 +162,6 @@ type Logger struct { httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel eventClient *eventbus.Client - changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] procID uint32 includeProcSequence bool @@ -429,23 +427,8 @@ func (l *Logger) internetUp() bool { func (l *Logger) awaitInternetUp(ctx context.Context) { if l.eventClient != nil { - for { - if l.internetUp() { - return - } - select { - case <-ctx.Done(): - return // give up - case <-l.changeDeltaSub.Done(): - return // give up (closing down) - case delta := <-l.changeDeltaSub.Events(): - if delta.New.AnyInterfaceUp() || l.internetUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - return - } - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") - } - } + l.awaitInternetUpBus(ctx) + return } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { @@ -466,6 +449,24 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } +func (l *Logger) awaitInternetUpBus(ctx context.Context) { + if l.internetUp() { + return + } + sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) + defer sub.Close() + select { + case delta := <-sub.Events(): + if delta.New.AnyInterfaceUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + case <-ctx.Done(): + return + } +} + // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. From 0a5ba8280f74c2577b7c91665aad37dc88ce6c99 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Tue, 28 Oct 2025 08:46:11 -0700 Subject: [PATCH 1593/1708] CODE_OF_CONDUCT.md: update code of conduct Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- CODE_OF_CONDUCT.md | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ef68d6768..348483df5 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -3,6 +3,7 @@ ## Our Pledge We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community. +Unacceptable, harmful and inappropriate behavior will not be tolerated. ## Our Standards @@ -16,15 +17,18 @@ Examples of behavior that contributes to a positive environment for our communit Examples of unacceptable behavior include without limitation: -- The use of sexualized language or imagery, and sexual attention or advances of any kind. -- The use of violent, intimidating or bullying language or imagery. -- Trolling, insulting or derogatory comments, and personal or political attacks. +- The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic. +- The use of sexualized content and sexual attention or advances of any kind. +- The use of violent, intimidating or bullying content. +- Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks. - Public or private harassment. -- Publishing others' private information, such as a physical or email address, without their explicit permission. +- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person. +- Posting deep fake or other AI generated content about or involving another person without the explicit permission. - Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages. -- Phishing or any similar activity; -- Distributing or promoting malware; -- Other conduct which could reasonably be considered inappropriate in a professional setting. +- Phishing or any similar activity. +- Distributing or promoting malware. +- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior. +- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting. Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). @@ -38,7 +42,7 @@ Please note that this community is not moderated by staff 24/7, and we do not ha While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours. If you encounter any issues, report them using the appropriate channels. -## Enforcement +## Enforcement Guidelines Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. @@ -46,9 +50,8 @@ Community leaders and moderators have the right and responsibility to remove, ed Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you. We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole. -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: +Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct, +and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances: ### 1. Correction From edb11e0e60ce702ebe62e7bfca345f167ac5efad Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 28 Oct 2025 08:34:34 -0700 Subject: [PATCH 1594/1708] wgengine/magicsock: fix js/wasm crash regression loading non-existent portmapper Thanks for the report, @Need-an-AwP! Fixes #17681 Updates #9394 Change-Id: I2e0b722ef9b460bd7e79499192d1a315504ca84c Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 13 +++++++++++++ client/tailscale/apitype/apitype.go | 10 ++++++++++ feature/feature.go | 6 ++++++ feature/portmapper/portmapper.go | 2 ++ ipn/localapi/debug.go | 10 ++++++++++ tstest/integration/integration_test.go | 22 ++++++++++++++++++++++ wgengine/magicsock/magicsock.go | 8 ++++++-- 7 files changed, 69 insertions(+), 2 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 582c7b848..2382a1225 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -596,6 +596,19 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } +// QueryOptionalFeatures queries the optional features supported by the Tailscale daemon. +func (lc *Client) QueryOptionalFeatures(ctx context.Context) (*apitype.OptionalFeatures, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/debug-optional-features", 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + var x apitype.OptionalFeatures + if err := json.Unmarshal(body, &x); err != nil { + return nil, err + } + return &x, nil +} + // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // The schema (including when keys are re-read) is not a stable interface. func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 58cdcecc7..6d239d082 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -94,3 +94,13 @@ type DNSQueryResponse struct { // Resolvers is the list of resolvers that the forwarder deemed able to resolve the query. Resolvers []*dnstype.Resolver } + +// OptionalFeatures describes which optional features are enabled in the build. +type OptionalFeatures struct { + // Features is the map of optional feature names to whether they are + // enabled. + // + // Disabled features may be absent from the map. (That is, false values + // are not guaranteed to be present.) + Features map[string]bool +} diff --git a/feature/feature.go b/feature/feature.go index 0d383b398..110b104da 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -13,6 +13,12 @@ var ErrUnavailable = errors.New("feature not included in this build") var in = map[string]bool{} +// Registered reports the set of registered features. +// +// The returned map should not be modified by the caller, +// not accessed concurrently with calls to Register. +func Registered() map[string]bool { return in } + // Register notes that the named feature is linked into the binary. func Register(name string) { if _, ok := in[name]; ok { diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go index e7be00ad1..d1b903cb6 100644 --- a/feature/portmapper/portmapper.go +++ b/feature/portmapper/portmapper.go @@ -6,6 +6,7 @@ package portmapper import ( + "tailscale.com/feature" "tailscale.com/net/netmon" "tailscale.com/net/portmapper" "tailscale.com/net/portmapper/portmappertype" @@ -14,6 +15,7 @@ import ( ) func init() { + feature.Register("portmapper") portmappertype.HookNewPortMapper.Set(newPortMapper) } diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index b3b919d31..8aca7f009 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -19,6 +19,7 @@ import ( "sync" "time" + "tailscale.com/client/tailscale/apitype" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" @@ -39,6 +40,7 @@ func init() { Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches) Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules) Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges) + Register("debug-optional-features", (*Handler).serveDebugOptionalFeatures) } func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { @@ -463,3 +465,11 @@ func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } + +func (h *Handler) serveDebugOptionalFeatures(w http.ResponseWriter, r *http.Request) { + of := &apitype.OptionalFeatures{ + Features: feature.Registered(), + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(of) +} diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 234bb8c6e..64f49c7b8 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -175,6 +175,28 @@ func TestControlKnobs(t *testing.T) { } } +func TestExpectedFeaturesLinked(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + + d1 := n1.StartDaemon() + n1.AwaitResponding() + lc := n1.LocalClient() + got, err := lc.QueryOptionalFeatures(t.Context()) + if err != nil { + t.Fatal(err) + } + if !got.Features["portmapper"] { + t.Errorf("optional feature portmapper unexpectedly not found: got %v", got.Features) + } + + d1.MustCleanShutdown(t) + + t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests()) +} + func TestCollectPanic(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15865") tstest.Shard(t) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e3c2d478e..658478901 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -719,9 +719,13 @@ func NewConn(opts Options) (*Conn, error) { newPortMapper, ok := portmappertype.HookNewPortMapper.GetOk() if ok { c.portMapper = newPortMapper(portmapperLogf, opts.EventBus, opts.NetMon, disableUPnP, c.onlyTCP443.Load) - } else if !testenv.InTest() { - panic("unexpected: HookNewPortMapper not set") } + // If !ok, the HookNewPortMapper hook is not set (so feature/portmapper + // isn't linked), but the build tag to explicitly omit the portmapper + // isn't set either. This should only happen to js/wasm builds, where + // the portmapper is a no-op even if linked (but it's no longer linked, + // since the move to feature/portmapper), or if people are wiring up + // their own Tailscale build from pieces. } c.netMon = opts.NetMon From 09a2a1048d83ba098c40e05fc01a0c7128e80866 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 28 Oct 2025 14:20:19 -0700 Subject: [PATCH 1595/1708] derp: fix an unchecked error in a test (#17694) Found by staticcheck, the test was calling derphttp.NewClient but not checking its error result before doing other things to it. Updates #cleanup Change-Id: I4ade35a7de7c473571f176e747866bc0ab5774db Signed-off-by: M. J. Fromberger --- derp/derphttp/derphttp_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 76681d498..5208481ed 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -620,6 +620,9 @@ func TestURLDial(t *testing.T) { } netMon := netmon.NewStatic() c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + if err != nil { + t.Errorf("NewClient: %v", err) + } defer c.Close() if err := c.Connect(context.Background()); err != nil { From fcb614a53e8e2d5bb76279639d1962e1cb24983a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 28 Oct 2025 14:48:02 -0700 Subject: [PATCH 1596/1708] cmd/jsonimports: add static analyzer for consistent "json" imports (#17669) This migrates an internal tool to open source so that we can run it on the tailscale.com module as well. We add the "util/safediff" also as a dependency of the tool. This PR does not yet set up a CI to run this analyzer. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/jsonimports/format.go | 175 +++++++++++++++++++++ cmd/jsonimports/format_test.go | 162 +++++++++++++++++++ cmd/jsonimports/jsonimports.go | 124 +++++++++++++++ util/safediff/diff.go | 280 +++++++++++++++++++++++++++++++++ util/safediff/diff_test.go | 196 +++++++++++++++++++++++ 5 files changed, 937 insertions(+) create mode 100644 cmd/jsonimports/format.go create mode 100644 cmd/jsonimports/format_test.go create mode 100644 cmd/jsonimports/jsonimports.go create mode 100644 util/safediff/diff.go create mode 100644 util/safediff/diff_test.go diff --git a/cmd/jsonimports/format.go b/cmd/jsonimports/format.go new file mode 100644 index 000000000..6dbd17558 --- /dev/null +++ b/cmd/jsonimports/format.go @@ -0,0 +1,175 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "bytes" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "path" + "slices" + "strconv" + "strings" + + "tailscale.com/util/must" +) + +// mustFormatFile formats a Go source file and adjust "json" imports. +// It panics if there are any parsing errors. +// +// - "encoding/json" is imported under the name "jsonv1" or "jsonv1std" +// - "encoding/json/v2" is rewritten to import "github.com/go-json-experiment/json" instead +// - "encoding/json/jsontext" is rewritten to import "github.com/go-json-experiment/json/jsontext" instead +// - "github.com/go-json-experiment/json" is imported under the name "jsonv2" +// - "github.com/go-json-experiment/json/v1" is imported under the name "jsonv1" +// +// If no changes to the file is made, it returns input. +func mustFormatFile(in []byte) (out []byte) { + fset := token.NewFileSet() + f := must.Get(parser.ParseFile(fset, "", in, parser.ParseComments)) + + // Check for the existence of "json" imports. + jsonImports := make(map[string][]*ast.ImportSpec) + for _, imp := range f.Imports { + switch pkgPath := must.Get(strconv.Unquote(imp.Path.Value)); pkgPath { + case + "encoding/json", + "encoding/json/v2", + "encoding/json/jsontext", + "github.com/go-json-experiment/json", + "github.com/go-json-experiment/json/v1", + "github.com/go-json-experiment/json/jsontext": + jsonImports[pkgPath] = append(jsonImports[pkgPath], imp) + } + } + if len(jsonImports) == 0 { + return in + } + + // Best-effort local type-check of the file + // to resolve local declarations to detect shadowed variables. + typeInfo := &types.Info{Uses: make(map[*ast.Ident]types.Object)} + (&types.Config{ + Error: func(err error) {}, + }).Check("", fset, []*ast.File{f}, typeInfo) + + // Rewrite imports to instead use "github.com/go-json-experiment/json". + // This ensures that code continues to build even if + // goexperiment.jsonv2 is *not* specified. + // As of https://github.com/go-json-experiment/json/pull/186, + // imports to "github.com/go-json-experiment/json" are identical + // to the standard library if built with goexperiment.jsonv2. + for fromPath, toPath := range map[string]string{ + "encoding/json/v2": "github.com/go-json-experiment/json", + "encoding/json/jsontext": "github.com/go-json-experiment/json/jsontext", + } { + for _, imp := range jsonImports[fromPath] { + imp.Path.Value = strconv.Quote(toPath) + jsonImports[toPath] = append(jsonImports[toPath], imp) + } + delete(jsonImports, fromPath) + } + + // While in a transitory state, where both v1 and v2 json imports + // may exist in our codebase, always explicitly import with + // either jsonv1 or jsonv2 in the package name to avoid ambiguities + // when looking at a particular Marshal or Unmarshal call site. + renames := make(map[string]string) // mapping of old names to new names + deletes := make(map[*ast.ImportSpec]bool) // set of imports to delete + for pkgPath, imps := range jsonImports { + var newName string + switch pkgPath { + case "encoding/json": + newName = "jsonv1" + // If "github.com/go-json-experiment/json/v1" is also imported, + // then use jsonv1std for "encoding/json" to avoid a conflict. + if len(jsonImports["github.com/go-json-experiment/json/v1"]) > 0 { + newName += "std" + } + case "github.com/go-json-experiment/json": + newName = "jsonv2" + case "github.com/go-json-experiment/json/v1": + newName = "jsonv1" + } + + // Rename the import if different than expected. + if oldName := importName(imps[0]); oldName != newName && newName != "" { + renames[oldName] = newName + pos := imps[0].Pos() // preserve original positioning + imps[0].Name = ast.NewIdent(newName) + imps[0].Name.NamePos = pos + } + + // For all redundant imports, use the first imported name. + for _, imp := range imps[1:] { + renames[importName(imp)] = importName(imps[0]) + deletes[imp] = true + } + } + if len(deletes) > 0 { + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + return deletes[imp] + }) + for _, decl := range f.Decls { + if genDecl, ok := decl.(*ast.GenDecl); ok && genDecl.Tok == token.IMPORT { + genDecl.Specs = slices.DeleteFunc(genDecl.Specs, func(spec ast.Spec) bool { + return deletes[spec.(*ast.ImportSpec)] + }) + } + } + } + if len(renames) > 0 { + ast.Walk(astVisitor(func(n ast.Node) bool { + if sel, ok := n.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok { + // Just because the selector looks like "json.Marshal" + // does not mean that it is referencing the "json" package. + // There could be a local "json" declaration that shadows + // the package import. Check partial type information + // to see if there was a local declaration. + if obj, ok := typeInfo.Uses[id]; ok { + if _, ok := obj.(*types.PkgName); !ok { + return true + } + } + + if newName, ok := renames[id.String()]; ok { + id.Name = newName + } + } + } + return true + }), f) + } + + bb := new(bytes.Buffer) + must.Do(format.Node(bb, fset, f)) + return must.Get(format.Source(bb.Bytes())) +} + +// importName is the local package name used for an import. +// If no explicit local name is used, then it uses string parsing +// to derive the package name from the path, relying on the convention +// that the package name is the base name of the package path. +func importName(imp *ast.ImportSpec) string { + if imp.Name != nil { + return imp.Name.String() + } + pkgPath, _ := strconv.Unquote(imp.Path.Value) + pkgPath = strings.TrimRight(pkgPath, "/v0123456789") // exclude version directories + return path.Base(pkgPath) +} + +// astVisitor is a function that implements [ast.Visitor]. +type astVisitor func(ast.Node) bool + +func (f astVisitor) Visit(node ast.Node) ast.Visitor { + if !f(node) { + return nil + } + return f +} diff --git a/cmd/jsonimports/format_test.go b/cmd/jsonimports/format_test.go new file mode 100644 index 000000000..28654eb45 --- /dev/null +++ b/cmd/jsonimports/format_test.go @@ -0,0 +1,162 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "go/format" + "testing" + + "tailscale.com/util/must" + "tailscale.com/util/safediff" +) + +func TestFormatFile(t *testing.T) { + tests := []struct{ in, want string }{{ + in: `package foobar + + import ( + "encoding/json" + jsonv2exp "github.com/go-json-experiment/json" + ) + + func main() { + json.Marshal() + jsonv2exp.Marshal() + { + var json T // deliberately shadow "json" package name + json.Marshal() // should not be re-written + } + } + `, + want: `package foobar + + import ( + jsonv1 "encoding/json" + jsonv2 "github.com/go-json-experiment/json" + ) + + func main() { + jsonv1.Marshal() + jsonv2.Marshal() + { + var json T // deliberately shadow "json" package name + json.Marshal() // should not be re-written + } + } + `, + }, { + in: `package foobar + + import ( + "github.com/go-json-experiment/json" + jsonv2exp "github.com/go-json-experiment/json" + ) + + func main() { + json.Marshal() + jsonv2exp.Marshal() + } + `, + want: `package foobar + import ( + jsonv2 "github.com/go-json-experiment/json" + ) + func main() { + jsonv2.Marshal() + jsonv2.Marshal() + } + `, + }, { + in: `package foobar + import "github.com/go-json-experiment/json/v1" + func main() { + json.Marshal() + } + `, + want: `package foobar + import jsonv1 "github.com/go-json-experiment/json/v1" + func main() { + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + jsonv1in2 "github.com/go-json-experiment/json/v1" + ) + func main() { + json.Marshal() + jsonv1in2.Marshal() + } + `, + want: `package foobar + import ( + jsonv1std "encoding/json" + jsonv1 "github.com/go-json-experiment/json/v1" + ) + func main() { + jsonv1std.Marshal() + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + jsonv1in2 "github.com/go-json-experiment/json/v1" + ) + func main() { + json.Marshal() + jsonv1in2.Marshal() + } + `, + want: `package foobar + import ( + jsonv1std "encoding/json" + jsonv1 "github.com/go-json-experiment/json/v1" + ) + func main() { + jsonv1std.Marshal() + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + j2 "encoding/json/v2" + "encoding/json/jsontext" + ) + func main() { + json.Marshal() + j2.Marshal() + jsontext.NewEncoder + } + `, + want: `package foobar + import ( + jsonv1 "encoding/json" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + ) + func main() { + jsonv1.Marshal() + jsonv2.Marshal() + jsontext.NewEncoder + } + `, + }} + for _, tt := range tests { + got := string(must.Get(format.Source([]byte(tt.in)))) + got = string(mustFormatFile([]byte(got))) + want := string(must.Get(format.Source([]byte(tt.want)))) + if got != want { + diff, _ := safediff.Lines(got, want, -1) + t.Errorf("mismatch (-got +want)\n%s", diff) + t.Error(got) + t.Error(want) + } + } +} diff --git a/cmd/jsonimports/jsonimports.go b/cmd/jsonimports/jsonimports.go new file mode 100644 index 000000000..4be2e10cb --- /dev/null +++ b/cmd/jsonimports/jsonimports.go @@ -0,0 +1,124 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The jsonimports tool formats all Go source files in the repository +// to enforce that "json" imports are consistent. +// +// With Go 1.25, the "encoding/json/v2" and "encoding/json/jsontext" +// packages are now available under goexperiment.jsonv2. +// This leads to possible confusion over the following: +// +// - "encoding/json" +// - "encoding/json/v2" +// - "encoding/json/jsontext" +// - "github.com/go-json-experiment/json/v1" +// - "github.com/go-json-experiment/json" +// - "github.com/go-json-experiment/json/jsontext" +// +// In order to enforce consistent usage, we apply the following rules: +// +// - Until the Go standard library formally accepts "encoding/json/v2" +// and "encoding/json/jsontext" into the standard library +// (i.e., they are no longer considered experimental), +// we forbid any code from directly importing those packages. +// Go code should instead import "github.com/go-json-experiment/json" +// and "github.com/go-json-experiment/json/jsontext". +// The latter packages contain aliases to the standard library +// if built on Go 1.25 with the goexperiment.jsonv2 tag specified. +// +// - Imports of "encoding/json" or "github.com/go-json-experiment/json/v1" +// must be explicitly imported under the package name "jsonv1". +// If both packages need to be imported, then the former should +// be imported under the package name "jsonv1std". +// +// - Imports of "github.com/go-json-experiment/json" +// must be explicitly imported under the package name "jsonv2". +// +// The latter two rules exist to provide clarity when reading code. +// Without them, it is unclear whether "json.Marshal" refers to v1 or v2. +// With them, however, it is clear that "jsonv1.Marshal" is calling v1 and +// that "jsonv2.Marshal" is calling v2. +// +// TODO(@joetsai): At this present moment, there is no guidance given on +// whether to use v1 or v2 for newly written Go source code. +// I will write a document in the near future providing more guidance. +// Feel free to continue using v1 "encoding/json" as you are accustomed to. +package main + +import ( + "bytes" + "flag" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + + "tailscale.com/syncs" + "tailscale.com/util/must" + "tailscale.com/util/safediff" +) + +func main() { + update := flag.Bool("update", false, "update all Go source files") + flag.Parse() + + // Change working directory to Git repository root. + repoRoot := strings.TrimSuffix(string(must.Get(exec.Command( + "git", "rev-parse", "--show-toplevel", + ).Output())), "\n") + must.Do(os.Chdir(repoRoot)) + + // Iterate over all indexed files in the Git repository. + var printMu sync.Mutex + var group sync.WaitGroup + sema := syncs.NewSemaphore(runtime.NumCPU()) + var numDiffs int + files := string(must.Get(exec.Command("git", "ls-files").Output())) + for file := range strings.Lines(files) { + sema.Acquire() + group.Go(func() { + defer sema.Release() + + // Ignore non-Go source files. + file = strings.TrimSuffix(file, "\n") + if !strings.HasSuffix(file, ".go") { + return + } + + // Format all "json" imports in the Go source file. + srcIn := must.Get(os.ReadFile(file)) + srcOut := mustFormatFile(srcIn) + + // Print differences with each formatted file. + if !bytes.Equal(srcIn, srcOut) { + numDiffs++ + + printMu.Lock() + fmt.Println(file) + lines, _ := safediff.Lines(string(srcIn), string(srcOut), -1) + for line := range strings.Lines(lines) { + fmt.Print("\t", line) + } + fmt.Println() + printMu.Unlock() + + // If -update is specified, write out the changes. + if *update { + mode := must.Get(os.Stat(file)).Mode() + must.Do(os.WriteFile(file, srcOut, mode)) + } + } + }) + } + group.Wait() + + // Report whether any differences were detected. + if numDiffs > 0 && !*update { + fmt.Printf(`%d files with "json" imports that need formatting`+"\n", numDiffs) + fmt.Println("Please run:") + fmt.Println("\t./tool/go run tailscale.com/cmd/jsonimports -update") + os.Exit(1) + } +} diff --git a/util/safediff/diff.go b/util/safediff/diff.go new file mode 100644 index 000000000..cf8add94b --- /dev/null +++ b/util/safediff/diff.go @@ -0,0 +1,280 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package safediff computes the difference between two lists. +// +// It is guaranteed to run in O(n), but may not produce an optimal diff. +// Most diffing algorithms produce optimal diffs but run in O(n²). +// It is safe to pass in untrusted input. +package safediff + +import ( + "bytes" + "fmt" + "math" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp" +) + +var diffTest = false + +// Lines constructs a humanly readable line-by-line diff from x to y. +// The output (if multiple lines) is guaranteed to be no larger than maxSize, +// by truncating the output if necessary. A negative maxSize enforces no limit. +// +// Example diff: +// +// … 440 identical lines +// "ssh": [ +// … 35 identical lines +// { +// - "src": ["maisem@tailscale.com"], +// - "dst": ["tag:maisem-test"], +// - "users": ["maisem", "root"], +// - "action": "check", +// - // "recorder": ["100.12.34.56:80"], +// + "src": ["maisem@tailscale.com"], +// + "dst": ["tag:maisem-test"], +// + "users": ["maisem", "root"], +// + "action": "check", +// + "recorder": ["node:recorder-2"], +// }, +// … 77 identical lines +// ], +// … 345 identical lines +// +// Meaning of each line prefix: +// +// - '…' precedes a summary statement +// - ' ' precedes an identical line printed for context +// - '-' precedes a line removed from x +// - '+' precedes a line inserted from y +// +// The diffing algorithm runs in O(n) and is safe to use with untrusted inputs. +func Lines(x, y string, maxSize int) (out string, truncated bool) { + // Convert x and y into a slice of lines and compute the edit-script. + xs := strings.Split(x, "\n") + ys := strings.Split(y, "\n") + es := diffStrings(xs, ys) + + // Modify the edit-script to support printing identical lines of context. + const identicalContext edit = '*' // special edit code to indicate printed line + var xi, yi int // index into xs or ys + isIdentical := func(e edit) bool { return e == identical || e == identicalContext } + indentOf := func(s string) string { return s[:len(s)-len(strings.TrimLeftFunc(s, unicode.IsSpace))] } + for i, e := range es { + if isIdentical(e) { + // Print current line if adjacent symbols are non-identical. + switch { + case i-1 >= 0 && !isIdentical(es[i-1]): + es[i] = identicalContext + case i+1 < len(es) && !isIdentical(es[i+1]): + es[i] = identicalContext + } + } else { + // Print any preceding or succeeding lines, + // where the leading indent is a prefix of the current indent. + // Indentation often indicates a parent-child relationship + // in structured source code. + addParents := func(ss []string, si, direction int) { + childIndent := indentOf(ss[si]) + for j := direction; i+j >= 0 && i+j < len(es) && isIdentical(es[i+j]); j += direction { + parentIndent := indentOf(ss[si+j]) + if strings.HasPrefix(childIndent, parentIndent) && len(parentIndent) < len(childIndent) && parentIndent != "" { + es[i+j] = identicalContext + childIndent = parentIndent + } + } + } + switch e { + case removed, modified: // arbitrarily use the x value for modified values + addParents(xs, xi, -1) + addParents(xs, xi, +1) + case inserted: + addParents(ys, yi, -1) + addParents(ys, yi, +1) + } + } + if e != inserted { + xi++ + } + if e != removed { + yi++ + } + } + + // Show the line for a single hidden identical line, + // since it occupies the same vertical height. + for i, e := range es { + if e == identical { + prevNotIdentical := i-1 < 0 || es[i-1] != identical + nextNotIdentical := i+1 >= len(es) || es[i+1] != identical + if prevNotIdentical && nextNotIdentical { + es[i] = identicalContext + } + } + } + + // Adjust the maxSize, reserving space for the final summary. + if maxSize < 0 { + maxSize = math.MaxInt + } + maxSize -= len(stats{len(xs) + len(ys), len(xs), len(ys)}.appendText(nil)) + + // mayAppendLine appends a line if it does not exceed maxSize. + // Otherwise, it just updates prevStats. + var buf []byte + var prevStats stats + mayAppendLine := func(edit edit, line string) { + // Append the stats (if non-zero) and the line text. + // The stats reports the number of preceding identical lines. + if !truncated { + bufLen := len(buf) // original length (in case we exceed maxSize) + if !prevStats.isZero() { + buf = prevStats.appendText(buf) + prevStats = stats{} // just printed, so clear the stats + } + buf = fmt.Appendf(buf, "%c %s\n", edit, line) + truncated = len(buf) > maxSize + if !truncated { + return + } + buf = buf[:bufLen] // restore original buffer contents + } + + // Output is truncated, so just update the statistics. + switch edit { + case identical: + prevStats.numIdentical++ + case removed: + prevStats.numRemoved++ + case inserted: + prevStats.numInserted++ + } + } + + // Process the entire edit script. + for len(es) > 0 { + num := len(es) - len(bytes.TrimLeft(es, string(es[:1]))) + switch es[0] { + case identical: + prevStats.numIdentical += num + xs, ys = xs[num:], ys[num:] + case identicalContext: + for n := len(xs) - num; len(xs) > n; xs, ys = xs[1:], ys[1:] { + mayAppendLine(identical, xs[0]) // implies xs[0] == ys[0] + } + case modified: + for n := len(xs) - num; len(xs) > n; xs = xs[1:] { + mayAppendLine(removed, xs[0]) + } + for n := len(ys) - num; len(ys) > n; ys = ys[1:] { + mayAppendLine(inserted, ys[0]) + } + case removed: + for n := len(xs) - num; len(xs) > n; xs = xs[1:] { + mayAppendLine(removed, xs[0]) + } + case inserted: + for n := len(ys) - num; len(ys) > n; ys = ys[1:] { + mayAppendLine(inserted, ys[0]) + } + } + es = es[num:] + } + if len(xs)+len(ys)+len(es) > 0 { + panic("BUG: slices not fully consumed") + } + + if !prevStats.isZero() { + buf = prevStats.appendText(buf) // may exceed maxSize + } + return string(buf), truncated +} + +type stats struct{ numIdentical, numRemoved, numInserted int } + +func (s stats) isZero() bool { return s.numIdentical+s.numRemoved+s.numInserted == 0 } + +func (s stats) appendText(b []byte) []byte { + switch { + case s.numIdentical > 0 && s.numRemoved > 0 && s.numInserted > 0: + return fmt.Appendf(b, "… %d identical, %d removed, and %d inserted lines\n", s.numIdentical, s.numRemoved, s.numInserted) + case s.numIdentical > 0 && s.numRemoved > 0: + return fmt.Appendf(b, "… %d identical and %d removed lines\n", s.numIdentical, s.numRemoved) + case s.numIdentical > 0 && s.numInserted > 0: + return fmt.Appendf(b, "… %d identical and %d inserted lines\n", s.numIdentical, s.numInserted) + case s.numRemoved > 0 && s.numInserted > 0: + return fmt.Appendf(b, "… %d removed and %d inserted lines\n", s.numRemoved, s.numInserted) + case s.numIdentical > 0: + return fmt.Appendf(b, "… %d identical lines\n", s.numIdentical) + case s.numRemoved > 0: + return fmt.Appendf(b, "… %d removed lines\n", s.numRemoved) + case s.numInserted > 0: + return fmt.Appendf(b, "… %d inserted lines\n", s.numInserted) + default: + return fmt.Appendf(b, "…\n") + } +} + +// diffStrings computes an edit-script of two slices of strings. +// +// This calls cmp.Equal to access the "github.com/go-cmp/cmp/internal/diff" +// implementation, which has an O(N) diffing algorithm. It is not guaranteed +// to produce an optimal edit-script, but protects our runtime against +// adversarial inputs that would wreck the optimal O(N²) algorithm used by +// most diffing packages available in open-source. +// +// TODO(https://go.dev/issue/58893): Use "golang.org/x/tools/diff" instead? +func diffStrings(xs, ys []string) []edit { + d := new(diffRecorder) + cmp.Equal(xs, ys, cmp.Reporter(d)) + if diffTest { + numRemoved := bytes.Count(d.script, []byte{removed}) + numInserted := bytes.Count(d.script, []byte{inserted}) + if len(xs) != len(d.script)-numInserted || len(ys) != len(d.script)-numRemoved { + panic("BUG: edit-script is inconsistent") + } + } + return d.script +} + +type edit = byte + +const ( + identical edit = ' ' // equal symbol in both x and y + modified edit = '~' // modified symbol in both x and y + removed edit = '-' // removed symbol from x + inserted edit = '+' // inserted symbol from y +) + +// diffRecorder reproduces an edit-script, essentially recording +// the edit-script from "github.com/google/go-cmp/cmp/internal/diff". +// This implements the cmp.Reporter interface. +type diffRecorder struct { + last cmp.PathStep + script []edit +} + +func (d *diffRecorder) PushStep(ps cmp.PathStep) { d.last = ps } + +func (d *diffRecorder) Report(rs cmp.Result) { + if si, ok := d.last.(cmp.SliceIndex); ok { + if rs.Equal() { + d.script = append(d.script, identical) + } else { + switch xi, yi := si.SplitKeys(); { + case xi >= 0 && yi >= 0: + d.script = append(d.script, modified) + case xi >= 0: + d.script = append(d.script, removed) + case yi >= 0: + d.script = append(d.script, inserted) + } + } + } +} + +func (d *diffRecorder) PopStep() { d.last = nil } diff --git a/util/safediff/diff_test.go b/util/safediff/diff_test.go new file mode 100644 index 000000000..e580bd922 --- /dev/null +++ b/util/safediff/diff_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package safediff + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func init() { diffTest = true } + +func TestLines(t *testing.T) { + // The diffs shown below technically depend on the stability of cmp, + // but that should be fine for sufficiently simple diffs like these. + // If the output does change, that would suggest a significant regression + // in the optimality of cmp's diffing algorithm. + + x := `{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [{ + "type": "home", + "number": "212 555-1234" + }, { + "type": "office", + "number": "646 555-4567" + }], + "children": [ + "Catherine", + "Thomas", + "Trevor" + ], + "spouse": null +}` + y := x + y = strings.ReplaceAll(y, `"New York"`, `"Los Angeles"`) + y = strings.ReplaceAll(y, `"NY"`, `"CA"`) + y = strings.ReplaceAll(y, `"646 555-4567"`, `"315 252-8888"`) + + wantDiff := ` +… 5 identical lines + "address": { + "streetAddress": "21 2nd Street", +- "city": "New York", +- "state": "NY", ++ "city": "Los Angeles", ++ "state": "CA", + "postalCode": "10021-3100" + }, +… 3 identical lines + }, { + "type": "office", +- "number": "646 555-4567" ++ "number": "315 252-8888" + }], +… 7 identical lines +`[1:] + gotDiff, gotTrunc := Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } + + wantDiff = ` +… 5 identical lines + "address": { + "streetAddress": "21 2nd Street", +- "city": "New York", +- "state": "NY", ++ "city": "Los Angeles", +… 15 identical, 1 removed, and 2 inserted lines +`[1:] + gotDiff, gotTrunc = Lines(x, y, 200) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == false { + t.Errorf("Lines: output unexpectedly not truncated") + } + + wantDiff = "… 17 identical, 3 removed, and 3 inserted lines\n" + gotDiff, gotTrunc = Lines(x, y, 0) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == false { + t.Errorf("Lines: output unexpectedly not truncated") + } + + x = `{ + "unrelated": [ + "unrelated", + ], + "related": { + "unrelated": [ + "unrelated", + ], + "related": { + "unrelated": [ + "unrelated", + ], + "related": { + "related": "changed", + }, + "unrelated": [ + "unrelated", + ], + }, + "unrelated": [ + "unrelated", + ], + }, + "unrelated": [ + "unrelated", + ], +}` + y = strings.ReplaceAll(x, "changed", "CHANGED") + + wantDiff = ` +… 4 identical lines + "related": { +… 3 identical lines + "related": { +… 3 identical lines + "related": { +- "related": "changed", ++ "related": "CHANGED", + }, +… 3 identical lines + }, +… 3 identical lines + }, +… 4 identical lines +`[1:] + gotDiff, gotTrunc = Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } + + x = `{ + "ACLs": [ + { + "Action": "accept", + "Users": ["group:all"], + "Ports": ["tag:tmemes:80"], + }, + ], +}` + y = strings.ReplaceAll(x, "tag:tmemes:80", "tag:tmemes:80,8383") + wantDiff = ` + { + "ACLs": [ + { + "Action": "accept", + "Users": ["group:all"], +- "Ports": ["tag:tmemes:80"], ++ "Ports": ["tag:tmemes:80,8383"], + }, + ], + } +`[1:] + gotDiff, gotTrunc = Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } +} + +func FuzzDiff(f *testing.F) { + f.Fuzz(func(t *testing.T, x, y string, maxSize int) { + const maxInput = 1e3 + if len(x) > maxInput { + x = x[:maxInput] + } + if len(y) > maxInput { + y = y[:maxInput] + } + diff, _ := Lines(x, y, maxSize) // make sure this does not panic + if strings.Count(diff, "\n") > 1 && maxSize >= 0 && len(diff) > maxSize { + t.Fatal("maxSize exceeded") + } + }) +} From 478342a642af49278237e74b994484c107b780d2 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 28 Oct 2025 14:48:37 -0700 Subject: [PATCH 1597/1708] wgengine/netlog: embed node information in network flow logs (#17668) This rewrites the netlog package to support embedding node information in network flow logs. Some bit of complexity comes in trying to pre-compute the expected size of the log message after JSON serialization to ensure that we can respect maximum body limits in log uploading. We also fix a bug in tstun, where we were recording the IP address after SNAT, which was resulting in non-sensible connection flows being logged. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- net/tstun/wrap.go | 10 +- types/netlogtype/netlogtype.go | 47 +++- wgengine/netlog/netlog.go | 461 ++++++++++++++++++++++++--------- wgengine/netlog/netlog_omit.go | 9 +- wgengine/netlog/netlog_test.go | 236 +++++++++++++++++ wgengine/netlog/record.go | 196 ++++++++++++++ wgengine/netlog/record_test.go | 255 ++++++++++++++++++ wgengine/netlog/stats.go | 222 ---------------- wgengine/netlog/stats_test.go | 235 ----------------- wgengine/userspace.go | 5 +- 10 files changed, 1085 insertions(+), 591 deletions(-) create mode 100644 wgengine/netlog/netlog_test.go create mode 100644 wgengine/netlog/record.go create mode 100644 wgengine/netlog/record_test.go delete mode 100644 wgengine/netlog/stats.go delete mode 100644 wgengine/netlog/stats_test.go diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 70cc7118a..db4f689bf 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -967,6 +967,11 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { continue } } + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + updateConnCounter(update, p.Buffer(), false) + } + } // Make sure to do SNAT after filtering, so that any flow tracking in // the filter sees the original source address. See #12133. @@ -976,11 +981,6 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if buildfeatures.HasNetLog { - if update := t.connCounter.Load(); update != nil { - updateConnCounter(update, p.Buffer(), false) - } - } buffsPos++ } if buffsGRO != nil { diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index a29ea6f03..86d645b35 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -21,6 +21,9 @@ type Message struct { Start time.Time `json:"start"` // inclusive End time.Time `json:"end"` // inclusive + SrcNode Node `json:"srcNode,omitzero"` + DstNodes []Node `json:"dstNodes,omitempty"` + VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty"` SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty"` ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty"` @@ -28,14 +31,30 @@ type Message struct { } const ( - messageJSON = `{"nodeId":"n0123456789abcdefCNTRL",` + maxJSONTimeRange + `,` + minJSONTraffic + `}` + messageJSON = `{"nodeId":` + maxJSONStableID + `,` + minJSONNodes + `,` + maxJSONTimeRange + `,` + minJSONTraffic + `}` + maxJSONStableID = `"n0123456789abcdefCNTRL"` + minJSONNodes = `"srcNode":{},"dstNodes":[]` maxJSONTimeRange = `"start":` + maxJSONRFC3339 + `,"end":` + maxJSONRFC3339 maxJSONRFC3339 = `"0001-01-01T00:00:00.000000000Z"` minJSONTraffic = `"virtualTraffic":{},"subnetTraffic":{},"exitTraffic":{},"physicalTraffic":{}` - // MaxMessageJSONSize is the overhead size of Message when it is - // serialized as JSON assuming that each traffic map is populated. - MaxMessageJSONSize = len(messageJSON) + // MinMessageJSONSize is the overhead size of Message when it is + // serialized as JSON assuming that each field is minimally populated. + // Each [Node] occupies at least [MinNodeJSONSize]. + // Each [ConnectionCounts] occupies at most [MaxConnectionCountsJSONSize]. + MinMessageJSONSize = len(messageJSON) + + nodeJSON = `{"nodeId":` + maxJSONStableID + `,"name":"","addresses":` + maxJSONAddrs + `,"user":"","tags":[]}` + maxJSONAddrV4 = `"255.255.255.255"` + maxJSONAddrV6 = `"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"` + maxJSONAddrs = `[` + maxJSONAddrV4 + `,` + maxJSONAddrV6 + `]` + + // MinNodeJSONSize is the overhead size of Node when it is + // serialized as JSON assuming that each field is minimally populated. + // It does not account for bytes occupied by + // [Node.Name], [Node.User], or [Node.Tags]. The [Node.Addresses] + // is assumed to contain a pair of IPv4 and IPv6 address. + MinNodeJSONSize = len(nodeJSON) maxJSONConnCounts = `{` + maxJSONConn + `,` + maxJSONCounts + `}` maxJSONConn = `"proto":` + maxJSONProto + `,"src":` + maxJSONAddrPort + `,"dst":` + maxJSONAddrPort @@ -52,6 +71,26 @@ const ( MaxConnectionCountsJSONSize = len(maxJSONConnCounts) ) +// Node is information about a node. +type Node struct { + // NodeID is the stable ID of the node. + NodeID tailcfg.StableNodeID `json:"nodeId"` + + // Name is the fully-qualified name of the node. + Name string `json:"name,omitzero"` // e.g., "carbonite.example.ts.net" + + // Addresses are the Tailscale IP addresses of the node. + Addresses []netip.Addr `json:"addresses,omitempty"` + + // User is the user that owns the node. + // It is not populated if the node is tagged. + User string `json:"user,omitzero"` // e.g., "johndoe@example.com" + + // Tags are the tags of the node. + // It is not populated if the node is owned by a user. + Tags []string `json:"tags,omitempty"` // e.g., ["tag:prod","tag:logs"] +} + // ConnectionCounts is a flattened struct of both a connection and counts. type ConnectionCounts struct { Connection diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 2984df994..9809d1ce6 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -10,8 +10,6 @@ package netlog import ( "cmp" "context" - "encoding/json" - "errors" "fmt" "io" "log" @@ -26,12 +24,18 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" - "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netlogfunc" "tailscale.com/types/netlogtype" + "tailscale.com/types/netmap" "tailscale.com/util/eventbus" + "tailscale.com/util/set" "tailscale.com/wgengine/router" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" ) // pollPeriod specifies how often to poll for network traffic. @@ -49,25 +53,38 @@ func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // Logger logs statistics about every connection. // At present, it only logs connections within a tailscale network. -// Exit node traffic is not logged for privacy reasons. +// By default, exit node traffic is not logged for privacy reasons +// unless the Tailnet administrator opts-into explicit logging. // The zero value is ready for use. type Logger struct { - mu sync.Mutex // protects all fields below + mu sync.Mutex // protects all fields below + logf logger.Logf + + // shutdownLocked shuts down the logger. + // The mutex must be held when calling. + shutdownLocked func(context.Context) error - logger *logtail.Logger - stats *statistics - tun Device - sock Device + record record // the current record of network connection flows + recordLen int // upper bound on JSON length of record + recordsChan chan record // set to nil when shutdown + flushTimer *time.Timer // fires when record should flush to recordsChan - addrs map[netip.Addr]bool - prefixes map[netip.Prefix]bool + // Information about Tailscale nodes. + // These are read-only once updated by ReconfigNetworkMap. + selfNode nodeUser + allNodes map[netip.Addr]nodeUser // includes selfNode; nodeUser values are always valid + + // Information about routes. + // These are read-only once updated by ReconfigRoutes. + routeAddrs set.Set[netip.Addr] + routePrefixes []netip.Prefix } // Running reports whether the logger is running. func (nl *Logger) Running() bool { nl.mu.Lock() defer nl.mu.Unlock() - return nl.logger != nil + return nl.shutdownLocked != nil } var testClient *http.Client @@ -75,9 +92,9 @@ var testClient *http.Client // Startup starts an asynchronous network logger that monitors // statistics for the provided tun and/or sock device. // -// The tun Device captures packets within the tailscale network, -// where at least one address is a tailscale IP address. -// The source is always from the perspective of the current node. +// The tun [Device] captures packets within the tailscale network, +// where at least one address is usually a tailscale IP address. +// The source is usually from the perspective of the current node. // If one of the other endpoint is not a tailscale IP address, // then it suggests the use of a subnet router or exit node. // For example, when using a subnet router, the source address is @@ -89,28 +106,33 @@ var testClient *http.Client // In this case, the node acting as a subnet router is acting on behalf // of some remote endpoint within the subnet range. // The tun is used to populate the VirtualTraffic, SubnetTraffic, -// and ExitTraffic fields in Message. +// and ExitTraffic fields in [netlogtype.Message]. // -// The sock Device captures packets at the magicsock layer. +// The sock [Device] captures packets at the magicsock layer. // The source is always a tailscale IP address and the destination // is a non-tailscale IP address to contact for that particular tailscale node. // The IP protocol and source port are always zero. -// The sock is used to populated the PhysicalTraffic field in Message. +// The sock is used to populated the PhysicalTraffic field in [netlogtype.Message]. +// // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. -func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { +func (nl *Logger) Startup(logf logger.Logf, nm *netmap.NetworkMap, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { nl.mu.Lock() defer nl.mu.Unlock() - if nl.logger != nil { - return fmt.Errorf("network logger already running for %v", nl.logger.PrivateID().Public()) + + if nl.shutdownLocked != nil { + return fmt.Errorf("network logger already running") } + nl.selfNode, nl.allNodes = makeNodeMaps(nm) // Startup a log stream to Tailscale's logging service. - logf := log.Printf + if logf == nil { + logf = log.Printf + } httpc := &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, netMon, health, logf)} if testClient != nil { httpc = testClient } - nl.logger = logtail.NewLogger(logtail.Config{ + logger := logtail.NewLogger(logtail.Config{ Collection: "tailtraffic.log.tailscale.io", PrivateID: nodeLogID, CopyPrivateID: domainLogID, @@ -124,108 +146,311 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo IncludeProcID: true, IncludeProcSequence: true, }, logf) - nl.logger.SetSockstatsLabel(sockstats.LabelNetlogLogger) - - // Startup a data structure to track per-connection statistics. - // There is a maximum size for individual log messages that logtail - // can upload to the Tailscale log service, so stay below this limit. - const maxLogSize = 256 << 10 - const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = newStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) + logger.SetSockstatsLabel(sockstats.LabelNetlogLogger) // Register the connection tracker into the TUN device. - nl.tun = cmp.Or[Device](tun, noopDevice{}) - nl.tun.SetConnectionCounter(nl.stats.UpdateVirtual) + tun = cmp.Or[Device](tun, noopDevice{}) + tun.SetConnectionCounter(nl.updateVirtConn) // Register the connection tracker into magicsock. - nl.sock = cmp.Or[Device](sock, noopDevice{}) - nl.sock.SetConnectionCounter(nl.stats.UpdatePhysical) + sock = cmp.Or[Device](sock, noopDevice{}) + sock.SetConnectionCounter(nl.updatePhysConn) + + // Startup a goroutine to record log messages. + // This is done asynchronously so that the cost of serializing + // the network flow log message never stalls processing of packets. + nl.record = record{} + nl.recordLen = 0 + nl.recordsChan = make(chan record, 100) + recorderDone := make(chan struct{}) + go func(recordsChan chan record) { + defer close(recorderDone) + for rec := range recordsChan { + msg := rec.toMessage(false, !logExitFlowEnabledEnabled) + if b, err := jsonv2.Marshal(msg, jsontext.AllowInvalidUTF8(true)); err != nil { + if nl.logf != nil { + nl.logf("netlog: json.Marshal error: %v", err) + } + } else { + logger.Logf("%s", b) + } + } + }(nl.recordsChan) + + // Register the mechanism for shutting down. + nl.shutdownLocked = func(ctx context.Context) error { + tun.SetConnectionCounter(nil) + sock.SetConnectionCounter(nil) + + // Flush and process all pending records. + nl.flushRecordLocked() + close(nl.recordsChan) + nl.recordsChan = nil + <-recorderDone + recorderDone = nil + + // Try to upload all pending records. + err := logger.Shutdown(ctx) + + // Purge state. + nl.shutdownLocked = nil + nl.selfNode = nodeUser{} + nl.allNodes = nil + nl.routeAddrs = nil + nl.routePrefixes = nil + + return err + } return nil } -func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connStats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { - m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()} - - classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) { - // NOTE: There could be mis-classifications where an address is treated - // as a Tailscale IP address because the subnet range overlaps with - // the subnet range that Tailscale IP addresses are allocated from. - // This should never happen for IPv6, but could happen for IPv4. - withinRoute = addrs[a] - for p := range prefixes { - if p.Contains(a) && p.Bits() > 0 { - withinRoute = true - break - } - } - return withinRoute && tsaddr.IsTailscaleIP(a), withinRoute && !tsaddr.IsTailscaleIP(a) +var ( + tailscaleServiceIPv4 = tsaddr.TailscaleServiceIP() + tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() +) + +func (nl *Logger) updateVirtConn(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + // Network logging is defined as traffic between two Tailscale nodes. + // Traffic with the internal Tailscale service is not with another node + // and should not be logged. It also happens to be a high volume + // amount of discrete traffic flows (e.g., DNS lookups). + switch dst.Addr() { + case tailscaleServiceIPv4, tailscaleServiceIPv6: + return } - exitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) - for conn, cnts := range connStats { - srcIsTailscaleIP, srcWithinSubnet := classifyAddr(conn.Src.Addr()) - dstIsTailscaleIP, dstWithinSubnet := classifyAddr(conn.Dst.Addr()) - switch { - case srcIsTailscaleIP && dstIsTailscaleIP: - m.VirtualTraffic = append(m.VirtualTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - case srcWithinSubnet || dstWithinSubnet: - m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - default: - const anonymize = true - if anonymize && !logExitFlowEnabled { - // Only preserve the address if it is a Tailscale IP address. - srcOrig, dstOrig := conn.Src, conn.Dst - conn = netlogtype.Connection{} // scrub everything by default - if srcIsTailscaleIP { - conn.Src = netip.AddrPortFrom(srcOrig.Addr(), 0) - } - if dstIsTailscaleIP { - conn.Dst = netip.AddrPortFrom(dstOrig.Addr(), 0) - } - } - exitTraffic[conn] = exitTraffic[conn].Add(cnts) + nl.mu.Lock() + defer nl.mu.Unlock() + + // Lookup the connection and increment the counts. + nl.initRecordLocked() + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + cnts, found := nl.record.virtConns[conn] + if !found { + cnts.connType = nl.addNewVirtConnLocked(conn) + } + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + nl.record.virtConns[conn] = cnts +} + +// addNewVirtConnLocked adds the first insertion of a physical connection. +// The [Logger.mu] must be held. +func (nl *Logger) addNewVirtConnLocked(c netlogtype.Connection) connType { + // Check whether this is the first insertion of the src and dst node. + // If so, compute the additional JSON bytes that would be added + // to the record for the node information. + var srcNodeLen, dstNodeLen int + srcNode, srcSeen := nl.record.seenNodes[c.Src.Addr()] + if !srcSeen { + srcNode = nl.allNodes[c.Src.Addr()] + if srcNode.Valid() { + srcNodeLen = srcNode.jsonLen() } } - for conn, cnts := range exitTraffic { - m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + dstNode, dstSeen := nl.record.seenNodes[c.Dst.Addr()] + if !dstSeen { + dstNode = nl.allNodes[c.Dst.Addr()] + if dstNode.Valid() { + dstNodeLen = dstNode.jsonLen() + } + } + + // Check whether the additional [netlogtype.ConnectionCounts] + // and [netlogtype.Node] information would exceed [maxLogSize]. + if nl.recordLen+netlogtype.MaxConnectionCountsJSONSize+srcNodeLen+dstNodeLen > maxLogSize { + nl.flushRecordLocked() + nl.initRecordLocked() + } + + // Insert newly seen src and/or dst nodes. + if !srcSeen && srcNode.Valid() { + nl.record.seenNodes[c.Src.Addr()] = srcNode } - for conn, cnts := range sockStats { - m.PhysicalTraffic = append(m.PhysicalTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + if !dstSeen && dstNode.Valid() { + nl.record.seenNodes[c.Dst.Addr()] = dstNode } + nl.recordLen += netlogtype.MaxConnectionCountsJSONSize + srcNodeLen + dstNodeLen - if len(m.VirtualTraffic)+len(m.SubnetTraffic)+len(m.ExitTraffic)+len(m.PhysicalTraffic) > 0 { - if b, err := json.Marshal(m); err != nil { - logger.Logf("json.Marshal error: %v", err) + // Classify the traffic type. + var srcIsSelfNode bool + if nl.selfNode.Valid() { + srcIsSelfNode = nl.selfNode.Addresses().ContainsFunc(func(p netip.Prefix) bool { + return c.Src.Addr() == p.Addr() && p.IsSingleIP() + }) + } + switch { + case srcIsSelfNode && dstNode.Valid(): + return virtualTraffic + case srcIsSelfNode: + // TODO: Should we swap src for the node serving as the proxy? + // It is relatively useless always using the self IP address. + if nl.withinRoutesLocked(c.Dst.Addr()) { + return subnetTraffic // a client using another subnet router } else { - logger.Logf("%s", b) + return exitTraffic // a client using exit an exit node } + case dstNode.Valid(): + if nl.withinRoutesLocked(c.Src.Addr()) { + return subnetTraffic // serving as a subnet router + } else { + return exitTraffic // serving as an exit node + } + default: + return unknownTraffic + } +} + +func (nl *Logger) updatePhysConn(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + nl.mu.Lock() + defer nl.mu.Unlock() + + // Lookup the connection and increment the counts. + nl.initRecordLocked() + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + cnts, found := nl.record.physConns[conn] + if !found { + nl.addNewPhysConnLocked(conn) + } + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) } + nl.record.physConns[conn] = cnts } -func makeRouteMaps(cfg *router.Config) (addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool) { - addrs = make(map[netip.Addr]bool) - for _, p := range cfg.LocalAddrs { - if p.IsSingleIP() { - addrs[p.Addr()] = true +// addNewPhysConnLocked adds the first insertion of a physical connection. +// The [Logger.mu] must be held. +func (nl *Logger) addNewPhysConnLocked(c netlogtype.Connection) { + // Check whether this is the first insertion of the src node. + var srcNodeLen int + srcNode, srcSeen := nl.record.seenNodes[c.Src.Addr()] + if !srcSeen { + srcNode = nl.allNodes[c.Src.Addr()] + if srcNode.Valid() { + srcNodeLen = srcNode.jsonLen() } } - prefixes = make(map[netip.Prefix]bool) + + // Check whether the additional [netlogtype.ConnectionCounts] + // and [netlogtype.Node] information would exceed [maxLogSize]. + if nl.recordLen+netlogtype.MaxConnectionCountsJSONSize+srcNodeLen > maxLogSize { + nl.flushRecordLocked() + nl.initRecordLocked() + } + + // Insert newly seen src and/or dst nodes. + if !srcSeen && srcNode.Valid() { + nl.record.seenNodes[c.Src.Addr()] = srcNode + } + nl.recordLen += netlogtype.MaxConnectionCountsJSONSize + srcNodeLen +} + +// initRecordLocked initialize the current record if uninitialized. +// The [Logger.mu] must be held. +func (nl *Logger) initRecordLocked() { + if nl.recordLen != 0 { + return + } + nl.record = record{ + selfNode: nl.selfNode, + start: time.Now().UTC(), + seenNodes: make(map[netip.Addr]nodeUser), + virtConns: make(map[netlogtype.Connection]countsType), + physConns: make(map[netlogtype.Connection]netlogtype.Counts), + } + nl.recordLen = netlogtype.MinMessageJSONSize + nl.selfNode.jsonLen() + + // Start a time to auto-flush the record. + // Avoid tickers since continually waking up a goroutine + // is expensive on battery powered devices. + nl.flushTimer = time.AfterFunc(pollPeriod, func() { + nl.mu.Lock() + defer nl.mu.Unlock() + if !nl.record.start.IsZero() && time.Since(nl.record.start) > pollPeriod/2 { + nl.flushRecordLocked() + } + }) +} + +// flushRecordLocked flushes the current record if initialized. +// The [Logger.mu] must be held. +func (nl *Logger) flushRecordLocked() { + if nl.recordLen == 0 { + return + } + nl.record.end = time.Now().UTC() + if nl.recordsChan != nil { + select { + case nl.recordsChan <- nl.record: + default: + if nl.logf != nil { + nl.logf("netlog: dropped record due to processing backlog") + } + } + } + if nl.flushTimer != nil { + nl.flushTimer.Stop() + nl.flushTimer = nil + } + nl.record = record{} + nl.recordLen = 0 +} + +func makeNodeMaps(nm *netmap.NetworkMap) (selfNode nodeUser, allNodes map[netip.Addr]nodeUser) { + if nm == nil { + return + } + allNodes = make(map[netip.Addr]nodeUser) + if nm.SelfNode.Valid() { + selfNode = nodeUser{nm.SelfNode, nm.UserProfiles[nm.SelfNode.User()]} + for _, addr := range nm.SelfNode.Addresses().All() { + if addr.IsSingleIP() { + allNodes[addr.Addr()] = selfNode + } + } + } + for _, peer := range nm.Peers { + if peer.Valid() { + for _, addr := range peer.Addresses().All() { + if addr.IsSingleIP() { + allNodes[addr.Addr()] = nodeUser{peer, nm.UserProfiles[peer.User()]} + } + } + } + } + return selfNode, allNodes +} + +// ReconfigNetworkMap configures the network logger with an updated netmap. +func (nl *Logger) ReconfigNetworkMap(nm *netmap.NetworkMap) { + selfNode, allNodes := makeNodeMaps(nm) // avoid holding lock while making maps + nl.mu.Lock() + nl.selfNode, nl.allNodes = selfNode, allNodes + nl.mu.Unlock() +} + +func makeRouteMaps(cfg *router.Config) (addrs set.Set[netip.Addr], prefixes []netip.Prefix) { + addrs = make(set.Set[netip.Addr]) insertPrefixes := func(rs []netip.Prefix) { for _, p := range rs { if p.IsSingleIP() { - addrs[p.Addr()] = true + addrs.Add(p.Addr()) } else { - prefixes[p] = true + prefixes = append(prefixes, p) } } } + insertPrefixes(cfg.LocalAddrs) insertPrefixes(cfg.Routes) insertPrefixes(cfg.SubnetRoutes) return addrs, prefixes @@ -235,11 +460,25 @@ func makeRouteMaps(cfg *router.Config) (addrs map[netip.Addr]bool, prefixes map[ // The cfg is used to classify the types of connections captured by // the tun Device passed to Startup. func (nl *Logger) ReconfigRoutes(cfg *router.Config) { + addrs, prefixes := makeRouteMaps(cfg) // avoid holding lock while making maps nl.mu.Lock() - defer nl.mu.Unlock() - // TODO(joetsai): There is a race where deleted routes are not known at - // the time of extraction. We need to keep old routes around for a bit. - nl.addrs, nl.prefixes = makeRouteMaps(cfg) + nl.routeAddrs, nl.routePrefixes = addrs, prefixes + nl.mu.Unlock() +} + +// withinRoutesLocked reports whether a is within the configured routes, +// which should only contain Tailscale addresses and subnet routes. +// The [Logger.mu] must be held. +func (nl *Logger) withinRoutesLocked(a netip.Addr) bool { + if nl.routeAddrs.Contains(a) { + return true + } + for _, p := range nl.routePrefixes { + if p.Contains(a) && p.Bits() > 0 { + return true + } + } + return false } // Shutdown shuts down the network logger. @@ -248,26 +487,8 @@ func (nl *Logger) ReconfigRoutes(cfg *router.Config) { func (nl *Logger) Shutdown(ctx context.Context) error { nl.mu.Lock() defer nl.mu.Unlock() - if nl.logger == nil { + if nl.shutdownLocked == nil { return nil } - - // Shutdown in reverse order of Startup. - // Do not hold lock while shutting down since this may flush one last time. - nl.mu.Unlock() - nl.sock.SetConnectionCounter(nil) - nl.tun.SetConnectionCounter(nil) - err1 := nl.stats.Shutdown(ctx) - err2 := nl.logger.Shutdown(ctx) - nl.mu.Lock() - - // Purge state. - nl.logger = nil - nl.stats = nil - nl.tun = nil - nl.sock = nil - nl.addrs = nil - nl.prefixes = nil - - return errors.Join(err1, err2) + return nl.shutdownLocked(ctx) } diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go index 43209df91..03610a1ef 100644 --- a/wgengine/netlog/netlog_omit.go +++ b/wgengine/netlog/netlog_omit.go @@ -7,7 +7,8 @@ package netlog type Logger struct{} -func (*Logger) Startup(...any) error { return nil } -func (*Logger) Running() bool { return false } -func (*Logger) Shutdown(any) error { return nil } -func (*Logger) ReconfigRoutes(any) {} +func (*Logger) Startup(...any) error { return nil } +func (*Logger) Running() bool { return false } +func (*Logger) Shutdown(any) error { return nil } +func (*Logger) ReconfigNetworkMap(any) {} +func (*Logger) ReconfigRoutes(any) {} diff --git a/wgengine/netlog/netlog_test.go b/wgengine/netlog/netlog_test.go new file mode 100644 index 000000000..ed9f672bf --- /dev/null +++ b/wgengine/netlog/netlog_test.go @@ -0,0 +1,236 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "encoding/binary" + "math/rand/v2" + "net/netip" + "sync" + "testing" + "testing/synctest" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tailcfg" + "tailscale.com/types/bools" + "tailscale.com/types/ipproto" + "tailscale.com/types/netlogtype" + "tailscale.com/types/netmap" + "tailscale.com/wgengine/router" +) + +func TestEmbedNodeInfo(t *testing.T) { + // Initialize the logger with a particular view of the netmap. + var logger Logger + logger.ReconfigNetworkMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "n123456CNTL", + ID: 123456, + Name: "test.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:foo", "tag:bar"}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + StableID: "n123457CNTL", + ID: 123457, + Name: "peer1.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.4")}, + Tags: []string{"tag:peer"}, + }).View(), + (&tailcfg.Node{ + StableID: "n123458CNTL", + ID: 123458, + Name: "peer2.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.5")}, + User: 54321, + }).View(), + }, + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + 54321: (&tailcfg.UserProfile{ID: 54321, LoginName: "peer@example.com"}).View(), + }, + }) + logger.ReconfigRoutes(&router.Config{ + SubnetRoutes: []netip.Prefix{ + prefix("172.16.1.1/16"), + prefix("192.168.1.1/24"), + }, + }) + + // Update the counters for a few connections. + var group sync.WaitGroup + defer group.Wait() + conns := []struct { + virt bool + proto ipproto.Proto + src, dst netip.AddrPort + txP, txB, rxP, rxB int + }{ + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("100.1.2.4:1812"), 88, 278, 34, 887}, + {true, 0x6, addrPort("100.1.2.3:443"), addrPort("100.1.2.5:1742"), 96, 635, 23, 790}, + {true, 0x6, addrPort("100.1.2.3:443"), addrPort("100.1.2.6:1175"), 48, 94, 86, 618}, // unknown peer (in Tailscale IP space, but not a known peer) + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("192.168.1.241:713"), 43, 154, 66, 883}, + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("192.168.2.241:713"), 43, 154, 66, 883}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("172.16.5.18:713"), 7, 243, 40, 59}, + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("172.20.5.18:713"), 61, 753, 42, 492}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("192.168.1.241:713"), addrPort("100.1.2.3:80"), 43, 154, 66, 883}, + {true, 0x6, addrPort("192.168.2.241:713"), addrPort("100.1.2.3:80"), 43, 154, 66, 883}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("172.16.5.18:713"), addrPort("100.1.2.3:80"), 7, 243, 40, 59}, + {true, 0x6, addrPort("172.20.5.18:713"), addrPort("100.1.2.3:80"), 61, 753, 42, 492}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("14.255.192.128:39230"), addrPort("243.42.106.193:48206"), 81, 791, 79, 316}, // unknown connection + {false, 0x6, addrPort("100.1.2.4:0"), addrPort("35.92.180.165:9743"), 63, 136, 61, 409}, // physical traffic with peer1 + {false, 0x6, addrPort("100.1.2.5:0"), addrPort("131.19.35.17:9743"), 88, 452, 2, 716}, // physical traffic with peer2 + } + for range 10 { + for _, conn := range conns { + update := bools.IfElse(conn.virt, logger.updateVirtConn, logger.updatePhysConn) + group.Go(func() { update(conn.proto, conn.src, conn.dst, conn.txP, conn.txB, false) }) + group.Go(func() { update(conn.proto, conn.src, conn.dst, conn.rxP, conn.rxB, true) }) + } + } + group.Wait() + + // Verify that the counters match. + got := logger.record.toMessage(false, false) + got.Start = time.Time{} // avoid flakiness + want := netlogtype.Message{ + NodeID: "n123456CNTL", + SrcNode: netlogtype.Node{ + NodeID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.3")}, + Tags: []string{"tag:bar", "tag:foo"}, + }, + DstNodes: []netlogtype.Node{{ + NodeID: "n123457CNTL", + Name: "peer1.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.4")}, + Tags: []string{"tag:peer"}, + }, { + NodeID: "n123458CNTL", + Name: "peer2.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.5")}, + User: "peer@example.com", + }}, + VirtualTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.3:80", "100.1.2.4:1812"), Counts: counts(880, 2780, 340, 8870)}, + {Connection: conn(0x6, "100.1.2.3:443", "100.1.2.5:1742"), Counts: counts(960, 6350, 230, 7900)}, + }, + SubnetTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.3:80", "172.16.5.18:713"), Counts: counts(70, 2430, 400, 590)}, + {Connection: conn(0x6, "100.1.2.3:80", "192.168.1.241:713"), Counts: counts(430, 1540, 660, 8830)}, + {Connection: conn(0x6, "172.16.5.18:713", "100.1.2.3:80"), Counts: counts(70, 2430, 400, 590)}, + {Connection: conn(0x6, "192.168.1.241:713", "100.1.2.3:80"), Counts: counts(430, 1540, 660, 8830)}, + }, + ExitTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "14.255.192.128:39230", "243.42.106.193:48206"), Counts: counts(810, 7910, 790, 3160)}, + {Connection: conn(0x6, "100.1.2.3:80", "172.20.5.18:713"), Counts: counts(610, 7530, 420, 4920)}, + {Connection: conn(0x6, "100.1.2.3:80", "192.168.2.241:713"), Counts: counts(430, 1540, 660, 8830)}, + {Connection: conn(0x6, "100.1.2.3:443", "100.1.2.6:1175"), Counts: counts(480, 940, 860, 6180)}, + {Connection: conn(0x6, "172.20.5.18:713", "100.1.2.3:80"), Counts: counts(610, 7530, 420, 4920)}, + {Connection: conn(0x6, "192.168.2.241:713", "100.1.2.3:80"), Counts: counts(430, 1540, 660, 8830)}, + }, + PhysicalTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.4:0", "35.92.180.165:9743"), Counts: counts(630, 1360, 610, 4090)}, + {Connection: conn(0x6, "100.1.2.5:0", "131.19.35.17:9743"), Counts: counts(880, 4520, 20, 7160)}, + }, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("Message (-got +want):\n%s", d) + } +} + +func TestUpdateRace(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + go func(recordsChan chan record) { + for range recordsChan { + } + }(logger.recordsChan) + + var group sync.WaitGroup + defer group.Wait() + for i := range 1000 { + group.Go(func() { + src, dst := randAddrPort(), randAddrPort() + for j := range 1000 { + if i%2 == 0 { + logger.updateVirtConn(0x1, src, dst, rand.IntN(10), rand.IntN(1000), j%2 == 0) + } else { + logger.updatePhysConn(0x1, src, dst, rand.IntN(10), rand.IntN(1000), j%2 == 0) + } + } + }) + group.Go(func() { + for range 1000 { + logger.ReconfigNetworkMap(new(netmap.NetworkMap)) + } + }) + group.Go(func() { + for range 1000 { + logger.ReconfigRoutes(new(router.Config)) + } + }) + } + + group.Wait() + logger.mu.Lock() + close(logger.recordsChan) + logger.mu.Unlock() +} + +func randAddrPort() netip.AddrPort { + var b [4]uint8 + binary.LittleEndian.PutUint32(b[:], rand.Uint32()) + return netip.AddrPortFrom(netip.AddrFrom4(b), uint16(rand.Uint32())) +} + +func TestAutoFlushMaxConns(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + for i := 0; len(logger.recordsChan) == 0; i++ { + logger.updateVirtConn(0, netip.AddrPortFrom(netip.Addr{}, uint16(i)), netip.AddrPort{}, 1, 1, false) + } + b, _ := jsonv2.Marshal(logger.recordsChan) + if len(b) > maxLogSize { + t.Errorf("len(Message) = %v, want <= %d", len(b), maxLogSize) + } +} + +func TestAutoFlushTimeout(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + synctest.Test(t, func(t *testing.T) { + logger.updateVirtConn(0, netip.AddrPort{}, netip.AddrPort{}, 1, 1, false) + time.Sleep(pollPeriod) + }) + rec := <-logger.recordsChan + if d := rec.end.Sub(rec.start); d != pollPeriod { + t.Errorf("window = %v, want %v", d, pollPeriod) + } + if len(rec.virtConns) != 1 { + t.Errorf("len(virtConns) = %d, want 1", len(rec.virtConns)) + } +} + +func BenchmarkUpdateSameConn(b *testing.B) { + var logger Logger + b.ReportAllocs() + for range b.N { + logger.updateVirtConn(0, netip.AddrPort{}, netip.AddrPort{}, 1, 1, false) + } +} + +func BenchmarkUpdateNewConns(b *testing.B) { + var logger Logger + b.ReportAllocs() + for i := range b.N { + logger.updateVirtConn(0, netip.AddrPortFrom(netip.Addr{}, uint16(i)), netip.AddrPort{}, 1, 1, false) + } +} diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go new file mode 100644 index 000000000..b8db26fc5 --- /dev/null +++ b/wgengine/netlog/record.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "cmp" + "net/netip" + "slices" + "time" + "unicode/utf8" + + "tailscale.com/tailcfg" + "tailscale.com/types/netlogtype" + "tailscale.com/util/set" +) + +// maxLogSize is the maximum number of bytes for a log message. +const maxLogSize = 256 << 10 + +// record is the in-memory representation of a [netlogtype.Message]. +// It uses maps to efficiently look-up addresses and connections. +// In contrast, [netlogtype.Message] is designed to be JSON serializable, +// where complex keys types are not well support in JSON objects. +type record struct { + selfNode nodeUser + + start time.Time + end time.Time + + seenNodes map[netip.Addr]nodeUser + + virtConns map[netlogtype.Connection]countsType + physConns map[netlogtype.Connection]netlogtype.Counts +} + +// nodeUser is a node with additional user profile information. +type nodeUser struct { + tailcfg.NodeView + user tailcfg.UserProfileView // UserProfileView for NodeView.User +} + +// countsType is a counts with classification information about the connection. +type countsType struct { + netlogtype.Counts + connType connType +} + +type connType uint8 + +const ( + unknownTraffic connType = iota + virtualTraffic + subnetTraffic + exitTraffic +) + +// toMessage converts a [record] into a [netlogtype.Message]. +func (r record) toMessage(excludeNodeInfo, anonymizeExitTraffic bool) netlogtype.Message { + if !r.selfNode.Valid() { + return netlogtype.Message{} + } + + m := netlogtype.Message{ + NodeID: r.selfNode.StableID(), + Start: r.start.UTC(), + End: r.end.UTC(), + } + + // Convert node fields. + if !excludeNodeInfo { + m.SrcNode = r.selfNode.toNode() + seenIDs := set.Of(r.selfNode.ID()) + for _, node := range r.seenNodes { + if _, ok := seenIDs[node.ID()]; !ok && node.Valid() { + m.DstNodes = append(m.DstNodes, node.toNode()) + seenIDs.Add(node.ID()) + } + } + slices.SortFunc(m.DstNodes, func(x, y netlogtype.Node) int { + return cmp.Compare(x.NodeID, y.NodeID) + }) + } + + // Converter traffic fields. + anonymizedExitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) + for conn, cnts := range r.virtConns { + switch cnts.connType { + case virtualTraffic: + m.VirtualTraffic = append(m.VirtualTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + case subnetTraffic: + m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + default: + if anonymizeExitTraffic { + conn = netlogtype.Connection{ // scrub the IP protocol type + Src: netip.AddrPortFrom(conn.Src.Addr(), 0), // scrub the port number + Dst: netip.AddrPortFrom(conn.Dst.Addr(), 0), // scrub the port number + } + if !r.seenNodes[conn.Src.Addr()].Valid() { + conn.Src = netip.AddrPort{} // not a Tailscale node, so scrub the address + } + if !r.seenNodes[conn.Dst.Addr()].Valid() { + conn.Dst = netip.AddrPort{} // not a Tailscale node, so scrub the address + } + anonymizedExitTraffic[conn] = anonymizedExitTraffic[conn].Add(cnts.Counts) + continue + } + m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + } + } + for conn, cnts := range anonymizedExitTraffic { + m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + } + for conn, cnts := range r.physConns { + m.PhysicalTraffic = append(m.PhysicalTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + } + + // Sort the connections for deterministic results. + slices.SortFunc(m.VirtualTraffic, compareConnCnts) + slices.SortFunc(m.SubnetTraffic, compareConnCnts) + slices.SortFunc(m.ExitTraffic, compareConnCnts) + slices.SortFunc(m.PhysicalTraffic, compareConnCnts) + + return m +} + +func compareConnCnts(x, y netlogtype.ConnectionCounts) int { + return cmp.Or( + netip.AddrPort.Compare(x.Src, y.Src), + netip.AddrPort.Compare(x.Dst, y.Dst), + cmp.Compare(x.Proto, y.Proto)) +} + +// jsonLen computes an upper-bound on the size of the JSON representation. +func (nu nodeUser) jsonLen() int { + if !nu.Valid() { + return len(`{"nodeId":""}`) + } + n := netlogtype.MinNodeJSONSize + jsonQuotedLen(nu.Name()) + if nu.Tags().Len() > 0 { + for _, tag := range nu.Tags().All() { + n += jsonQuotedLen(tag) + len(",") + } + } else if nu.user.Valid() && nu.user.ID() == nu.User() { + n += jsonQuotedLen(nu.user.LoginName()) + } + return n +} + +// toNode converts the [nodeUser] into a [netlogtype.Node]. +func (nu nodeUser) toNode() netlogtype.Node { + if !nu.Valid() { + return netlogtype.Node{} + } + n := netlogtype.Node{NodeID: nu.StableID(), Name: nu.Name()} + var ipv4, ipv6 netip.Addr + for _, addr := range nu.Addresses().All() { + switch { + case addr.IsSingleIP() && addr.Addr().Is4(): + ipv4 = addr.Addr() + case addr.IsSingleIP() && addr.Addr().Is6(): + ipv6 = addr.Addr() + } + } + n.Addresses = []netip.Addr{ipv4, ipv6} + n.Addresses = slices.DeleteFunc(n.Addresses, func(a netip.Addr) bool { return !a.IsValid() }) + if nu.Tags().Len() > 0 { + n.Tags = nu.Tags().AsSlice() + slices.Sort(n.Tags) + n.Tags = slices.Compact(n.Tags) + } else if nu.user.Valid() && nu.user.ID() == nu.User() { + n.User = nu.user.LoginName() + } + return n +} + +// jsonQuotedLen computes the length of the JSON serialization of s +// according to [jsontext.AppendQuote]. +func jsonQuotedLen(s string) int { + n := len(`"`) + len(s) + len(`"`) + for i, r := range s { + switch { + case r == '\b', r == '\t', r == '\n', r == '\f', r == '\r', r == '"', r == '\\': + n += len(`\X`) - 1 + case r < ' ': + n += len(`\uXXXX`) - 1 + case r == utf8.RuneError: + if _, m := utf8.DecodeRuneInString(s[i:]); m == 1 { // exactly an invalid byte + n += len("�") - 1 + } + } + } + return n +} diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go new file mode 100644 index 000000000..d3ab8b86c --- /dev/null +++ b/wgengine/netlog/record_test.go @@ -0,0 +1,255 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "net/netip" + "testing" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/types/netlogtype" + "tailscale.com/util/must" +) + +func addr(s string) netip.Addr { + if s == "" { + return netip.Addr{} + } + return must.Get(netip.ParseAddr(s)) +} +func addrPort(s string) netip.AddrPort { + if s == "" { + return netip.AddrPort{} + } + return must.Get(netip.ParseAddrPort(s)) +} +func prefix(s string) netip.Prefix { + if p, err := netip.ParsePrefix(s); err == nil { + return p + } + a := addr(s) + return netip.PrefixFrom(a, a.BitLen()) +} + +func conn(proto ipproto.Proto, src, dst string) netlogtype.Connection { + return netlogtype.Connection{Proto: proto, Src: addrPort(src), Dst: addrPort(dst)} +} + +func counts(txP, txB, rxP, rxB uint64) netlogtype.Counts { + return netlogtype.Counts{TxPackets: txP, TxBytes: txB, RxPackets: rxP, RxBytes: rxB} +} + +func TestToMessage(t *testing.T) { + rec := record{ + selfNode: nodeUser{NodeView: (&tailcfg.Node{ + ID: 123456, + StableID: "n123456CNTL", + Name: "src.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:src"}, + }).View()}, + start: time.Now(), + end: time.Now().Add(5 * time.Second), + + seenNodes: map[netip.Addr]nodeUser{ + addr("100.1.2.4"): {NodeView: (&tailcfg.Node{ + ID: 123457, + StableID: "n123457CNTL", + Name: "dst1.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.4")}, + Tags: []string{"tag:dst1"}, + }).View()}, + addr("100.1.2.5"): {NodeView: (&tailcfg.Node{ + ID: 123458, + StableID: "n123458CNTL", + Name: "dst2.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.5")}, + Tags: []string{"tag:dst2"}, + }).View()}, + }, + + virtConns: map[netlogtype.Connection]countsType{ + conn(0x1, "100.1.2.3:1234", "100.1.2.4:80"): {Counts: counts(12, 34, 56, 78), connType: virtualTraffic}, + conn(0x1, "100.1.2.3:1234", "100.1.2.5:80"): {Counts: counts(23, 45, 78, 790), connType: virtualTraffic}, + conn(0x6, "172.16.1.1:80", "100.1.2.4:1234"): {Counts: counts(91, 54, 723, 621), connType: subnetTraffic}, + conn(0x6, "172.16.1.2:443", "100.1.2.5:1234"): {Counts: counts(42, 813, 3, 1823), connType: subnetTraffic}, + conn(0x6, "172.16.1.3:80", "100.1.2.6:1234"): {Counts: counts(34, 52, 78, 790), connType: subnetTraffic}, + conn(0x6, "100.1.2.3:1234", "12.34.56.78:80"): {Counts: counts(11, 110, 10, 100), connType: exitTraffic}, + conn(0x6, "100.1.2.4:1234", "23.34.56.78:80"): {Counts: counts(423, 1, 6, 123), connType: exitTraffic}, + conn(0x6, "100.1.2.4:1234", "23.34.56.78:443"): {Counts: counts(22, 220, 20, 200), connType: exitTraffic}, + conn(0x6, "100.1.2.5:1234", "45.34.56.78:80"): {Counts: counts(33, 330, 30, 300), connType: exitTraffic}, + conn(0x6, "100.1.2.6:1234", "67.34.56.78:80"): {Counts: counts(44, 440, 40, 400), connType: exitTraffic}, + conn(0x6, "42.54.72.42:555", "18.42.7.1:777"): {Counts: counts(44, 440, 40, 400)}, + }, + + physConns: map[netlogtype.Connection]netlogtype.Counts{ + conn(0, "100.1.2.4:0", "4.3.2.1:1234"): counts(12, 34, 56, 78), + conn(0, "100.1.2.5:0", "4.3.2.10:1234"): counts(78, 56, 34, 12), + }, + } + rec.seenNodes[rec.selfNode.toNode().Addresses[0]] = rec.selfNode + + got := rec.toMessage(false, false) + want := netlogtype.Message{ + NodeID: rec.selfNode.StableID(), + Start: rec.start, + End: rec.end, + SrcNode: rec.selfNode.toNode(), + DstNodes: []netlogtype.Node{ + rec.seenNodes[addr("100.1.2.4")].toNode(), + rec.seenNodes[addr("100.1.2.5")].toNode(), + }, + VirtualTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x1, "100.1.2.3:1234", "100.1.2.4:80"), Counts: counts(12, 34, 56, 78)}, + {Connection: conn(0x1, "100.1.2.3:1234", "100.1.2.5:80"), Counts: counts(23, 45, 78, 790)}, + }, + SubnetTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "172.16.1.1:80", "100.1.2.4:1234"), Counts: counts(91, 54, 723, 621)}, + {Connection: conn(0x6, "172.16.1.2:443", "100.1.2.5:1234"), Counts: counts(42, 813, 3, 1823)}, + {Connection: conn(0x6, "172.16.1.3:80", "100.1.2.6:1234"), Counts: counts(34, 52, 78, 790)}, + }, + ExitTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "42.54.72.42:555", "18.42.7.1:777"), Counts: counts(44, 440, 40, 400)}, + {Connection: conn(0x6, "100.1.2.3:1234", "12.34.56.78:80"), Counts: counts(11, 110, 10, 100)}, + {Connection: conn(0x6, "100.1.2.4:1234", "23.34.56.78:80"), Counts: counts(423, 1, 6, 123)}, + {Connection: conn(0x6, "100.1.2.4:1234", "23.34.56.78:443"), Counts: counts(22, 220, 20, 200)}, + {Connection: conn(0x6, "100.1.2.5:1234", "45.34.56.78:80"), Counts: counts(33, 330, 30, 300)}, + {Connection: conn(0x6, "100.1.2.6:1234", "67.34.56.78:80"), Counts: counts(44, 440, 40, 400)}, + }, + PhysicalTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0, "100.1.2.4:0", "4.3.2.1:1234"), Counts: counts(12, 34, 56, 78)}, + {Connection: conn(0, "100.1.2.5:0", "4.3.2.10:1234"), Counts: counts(78, 56, 34, 12)}, + }, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(false, false) mismatch (-got +want):\n%s", d) + } + + got = rec.toMessage(true, false) + want.SrcNode = netlogtype.Node{} + want.DstNodes = nil + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(true, false) mismatch (-got +want):\n%s", d) + } + + got = rec.toMessage(true, true) + want.ExitTraffic = []netlogtype.ConnectionCounts{ + {Connection: conn(0, "", ""), Counts: counts(44+44, 440+440, 40+40, 400+400)}, + {Connection: conn(0, "100.1.2.3:0", ""), Counts: counts(11, 110, 10, 100)}, + {Connection: conn(0, "100.1.2.4:0", ""), Counts: counts(423+22, 1+220, 6+20, 123+200)}, + {Connection: conn(0, "100.1.2.5:0", ""), Counts: counts(33, 330, 30, 300)}, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(true, true) mismatch (-got +want):\n%s", d) + } +} + +func TestToNode(t *testing.T) { + tests := []struct { + node *tailcfg.Node + user *tailcfg.UserProfile + want netlogtype.Node + }{ + {}, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:dupe", "tag:test", "tag:dupe"}, + User: 12345, // should be ignored + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.3")}, + Tags: []string{"tag:dupe", "tag:test"}, + }, + }, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + User: 12345, + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Addresses: []netip.Addr{addr("100.1.2.3")}, + }, + }, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + User: 12345, + }, + user: &tailcfg.UserProfile{ + ID: 12345, + LoginName: "user@domain", + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Addresses: []netip.Addr{addr("100.1.2.3")}, + User: "user@domain", + }, + }, + } + for _, tt := range tests { + nu := nodeUser{tt.node.View(), tt.user.View()} + got := nu.toNode() + b := must.Get(jsonv2.Marshal(got)) + if len(b) > nu.jsonLen() { + t.Errorf("jsonLen = %v, want >= %d", nu.jsonLen(), len(b)) + } + if d := cmp.Diff(got, tt.want, cmpopts.EquateComparable(netip.Addr{})); d != "" { + t.Errorf("toNode mismatch (-got +want):\n%s", d) + } + } +} + +func FuzzQuotedLen(f *testing.F) { + for _, s := range quotedLenTestdata { + f.Add(s) + } + f.Fuzz(func(t *testing.T, s string) { + testQuotedLen(t, s) + }) +} + +func TestQuotedLen(t *testing.T) { + for _, s := range quotedLenTestdata { + testQuotedLen(t, s) + } +} + +var quotedLenTestdata = []string{ + "", // empty string + func() string { + b := make([]byte, 128) + for i := range b { + b[i] = byte(i) + } + return string(b) + }(), // all ASCII + "�", // replacement rune + "\xff", // invalid UTF-8 + "ʕ◔ϖ◔ʔ", // Unicode gopher +} + +func testQuotedLen(t *testing.T, in string) { + got := jsonQuotedLen(in) + b, _ := jsontext.AppendQuote(nil, in) + want := len(b) + if got != want { + t.Errorf("jsonQuotedLen(%q) = %v, want %v", in, got, want) + } +} diff --git a/wgengine/netlog/stats.go b/wgengine/netlog/stats.go deleted file mode 100644 index c06068803..000000000 --- a/wgengine/netlog/stats.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_netlog && !ts_omit_logtail - -package netlog - -import ( - "context" - "net/netip" - "sync" - "time" - - "golang.org/x/sync/errgroup" - "tailscale.com/net/packet" - "tailscale.com/net/tsaddr" - "tailscale.com/types/ipproto" - "tailscale.com/types/netlogtype" -) - -// statistics maintains counters for every connection. -// All methods are safe for concurrent use. -// The zero value is ready for use. -type statistics struct { - maxConns int // immutable once set - - mu sync.Mutex - connCnts - - connCntsCh chan connCnts - shutdownCtx context.Context - shutdown context.CancelFunc - group errgroup.Group -} - -type connCnts struct { - start time.Time - end time.Time - virtual map[netlogtype.Connection]netlogtype.Counts - physical map[netlogtype.Connection]netlogtype.Counts -} - -// newStatistics creates a data structure for tracking connection statistics -// that periodically dumps the virtual and physical connection counts -// depending on whether the maxPeriod or maxConns is exceeded. -// The dump function is called from a single goroutine. -// Shutdown must be called to cleanup resources. -func newStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *statistics { - s := &statistics{maxConns: maxConns} - s.connCntsCh = make(chan connCnts, 256) - s.shutdownCtx, s.shutdown = context.WithCancel(context.Background()) - s.group.Go(func() error { - // TODO(joetsai): Using a ticker is problematic on mobile platforms - // where waking up a process every maxPeriod when there is no activity - // is a drain on battery life. Switch this instead to instead use - // a time.Timer that is triggered upon network activity. - ticker := new(time.Ticker) - if maxPeriod > 0 { - ticker = time.NewTicker(maxPeriod) - defer ticker.Stop() - } - - for { - var cc connCnts - select { - case cc = <-s.connCntsCh: - case <-ticker.C: - cc = s.extract() - case <-s.shutdownCtx.Done(): - cc = s.extract() - } - if len(cc.virtual)+len(cc.physical) > 0 && dump != nil { - dump(cc.start, cc.end, cc.virtual, cc.physical) - } - if s.shutdownCtx.Err() != nil { - return nil - } - } - }) - return s -} - -// UpdateTxVirtual updates the counters for a transmitted IP packet -// The source and destination of the packet directly correspond with -// the source and destination in netlogtype.Connection. -func (s *statistics) UpdateTxVirtual(b []byte) { - var p packet.Parsed - p.Decode(b) - s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) -} - -// UpdateRxVirtual updates the counters for a received IP packet. -// The source and destination of the packet are inverted with respect to -// the source and destination in netlogtype.Connection. -func (s *statistics) UpdateRxVirtual(b []byte) { - var p packet.Parsed - p.Decode(b) - s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) -} - -var ( - tailscaleServiceIPv4 = tsaddr.TailscaleServiceIP() - tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() -) - -func (s *statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { - // Network logging is defined as traffic between two Tailscale nodes. - // Traffic with the internal Tailscale service is not with another node - // and should not be logged. It also happens to be a high volume - // amount of discrete traffic flows (e.g., DNS lookups). - switch dst.Addr() { - case tailscaleServiceIPv4, tailscaleServiceIPv6: - return - } - - conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} - - s.mu.Lock() - defer s.mu.Unlock() - cnts, found := s.virtual[conn] - if !found && !s.preInsertConn() { - return - } - if receive { - cnts.RxPackets += uint64(packets) - cnts.RxBytes += uint64(bytes) - } else { - cnts.TxPackets += uint64(packets) - cnts.TxBytes += uint64(bytes) - } - s.virtual[conn] = cnts -} - -// UpdateTxPhysical updates the counters for zero or more transmitted wireguard packets. -// The src is always a Tailscale IP address, representing some remote peer. -// The dst is a remote IP address and port that corresponds -// with some physical peer backing the Tailscale IP address. -func (s *statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) -} - -// UpdateRxPhysical updates the counters for zero or more received wireguard packets. -// The src is always a Tailscale IP address, representing some remote peer. -// The dst is a remote IP address and port that corresponds -// with some physical peer backing the Tailscale IP address. -func (s *statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) -} - -func (s *statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { - conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} - - s.mu.Lock() - defer s.mu.Unlock() - cnts, found := s.physical[conn] - if !found && !s.preInsertConn() { - return - } - if receive { - cnts.RxPackets += uint64(packets) - cnts.RxBytes += uint64(bytes) - } else { - cnts.TxPackets += uint64(packets) - cnts.TxBytes += uint64(bytes) - } - s.physical[conn] = cnts -} - -// preInsertConn updates the maps to handle insertion of a new connection. -// It reports false if insertion is not allowed (i.e., after shutdown). -func (s *statistics) preInsertConn() bool { - // Check whether insertion of a new connection will exceed maxConns. - if len(s.virtual)+len(s.physical) == s.maxConns && s.maxConns > 0 { - // Extract the current statistics and send it to the serializer. - // Avoid blocking the network packet handling path. - select { - case s.connCntsCh <- s.extractLocked(): - default: - // TODO(joetsai): Log that we are dropping an entire connCounts. - } - } - - // Initialize the maps if nil. - if s.virtual == nil && s.physical == nil { - s.start = time.Now().UTC() - s.virtual = make(map[netlogtype.Connection]netlogtype.Counts) - s.physical = make(map[netlogtype.Connection]netlogtype.Counts) - } - - return s.shutdownCtx.Err() == nil -} - -func (s *statistics) extract() connCnts { - s.mu.Lock() - defer s.mu.Unlock() - return s.extractLocked() -} - -func (s *statistics) extractLocked() connCnts { - if len(s.virtual)+len(s.physical) == 0 { - return connCnts{} - } - s.end = time.Now().UTC() - cc := s.connCnts - s.connCnts = connCnts{} - return cc -} - -// TestExtract synchronously extracts the current network statistics map -// and resets the counters. This should only be used for testing purposes. -func (s *statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - cc := s.extract() - return cc.virtual, cc.physical -} - -// Shutdown performs a final flush of statistics. -// Statistics for any subsequent calls to Update will be dropped. -// It is safe to call Shutdown concurrently and repeatedly. -func (s *statistics) Shutdown(context.Context) error { - s.shutdown() - return s.group.Wait() -} diff --git a/wgengine/netlog/stats_test.go b/wgengine/netlog/stats_test.go deleted file mode 100644 index 6cf7eb998..000000000 --- a/wgengine/netlog/stats_test.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package netlog - -import ( - "context" - "encoding/binary" - "fmt" - "math/rand" - "net/netip" - "runtime" - "sync" - "testing" - "time" - - qt "github.com/frankban/quicktest" - "tailscale.com/cmd/testwrapper/flakytest" - "tailscale.com/types/ipproto" - "tailscale.com/types/netlogtype" -) - -func testPacketV4(proto ipproto.Proto, srcAddr, dstAddr [4]byte, srcPort, dstPort, size uint16) (out []byte) { - var ipHdr [20]byte - ipHdr[0] = 4<<4 | 5 - binary.BigEndian.PutUint16(ipHdr[2:], size) - ipHdr[9] = byte(proto) - *(*[4]byte)(ipHdr[12:]) = srcAddr - *(*[4]byte)(ipHdr[16:]) = dstAddr - out = append(out, ipHdr[:]...) - switch proto { - case ipproto.TCP: - var tcpHdr [20]byte - binary.BigEndian.PutUint16(tcpHdr[0:], srcPort) - binary.BigEndian.PutUint16(tcpHdr[2:], dstPort) - out = append(out, tcpHdr[:]...) - case ipproto.UDP: - var udpHdr [8]byte - binary.BigEndian.PutUint16(udpHdr[0:], srcPort) - binary.BigEndian.PutUint16(udpHdr[2:], dstPort) - out = append(out, udpHdr[:]...) - default: - panic(fmt.Sprintf("unknown proto: %d", proto)) - } - return append(out, make([]byte, int(size)-len(out))...) -} - -// TestInterval ensures that we receive at least one call to `dump` using only -// maxPeriod. -func TestInterval(t *testing.T) { - c := qt.New(t) - - const maxPeriod = 10 * time.Millisecond - const maxConns = 2048 - - gotDump := make(chan struct{}, 1) - stats := newStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { - select { - case gotDump <- struct{}{}: - default: - } - }) - defer stats.Shutdown(context.Background()) - - srcAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - dstAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - srcPort := uint16(rand.Intn(16)) - dstPort := uint16(rand.Intn(16)) - size := uint16(64 + rand.Intn(1024)) - p := testPacketV4(ipproto.TCP, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size) - stats.UpdateRxVirtual(p) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - select { - case <-ctx.Done(): - c.Fatal("didn't receive dump within context deadline") - case <-gotDump: - } -} - -func TestConcurrent(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7030") - c := qt.New(t) - - const maxPeriod = 10 * time.Millisecond - const maxConns = 10 - virtualAggregate := make(map[netlogtype.Connection]netlogtype.Counts) - stats := newStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - c.Assert(start.IsZero(), qt.IsFalse) - c.Assert(end.IsZero(), qt.IsFalse) - c.Assert(end.Before(start), qt.IsFalse) - c.Assert(len(virtual) > 0 && len(virtual) <= maxConns, qt.IsTrue) - c.Assert(len(physical) == 0, qt.IsTrue) - for conn, cnts := range virtual { - virtualAggregate[conn] = virtualAggregate[conn].Add(cnts) - } - }) - defer stats.Shutdown(context.Background()) - var wants []map[netlogtype.Connection]netlogtype.Counts - gots := make([]map[netlogtype.Connection]netlogtype.Counts, runtime.NumCPU()) - var group sync.WaitGroup - for i := range gots { - group.Add(1) - go func(i int) { - defer group.Done() - gots[i] = make(map[netlogtype.Connection]netlogtype.Counts) - rn := rand.New(rand.NewSource(time.Now().UnixNano())) - var p []byte - var t netlogtype.Connection - for j := 0; j < 1000; j++ { - delay := rn.Intn(10000) - if p == nil || rn.Intn(64) == 0 { - proto := ipproto.TCP - if rn.Intn(2) == 0 { - proto = ipproto.UDP - } - srcAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - dstAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - srcPort := uint16(rand.Intn(16)) - dstPort := uint16(rand.Intn(16)) - size := uint16(64 + rand.Intn(1024)) - p = testPacketV4(proto, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size) - t = netlogtype.Connection{Proto: proto, Src: netip.AddrPortFrom(srcAddr, srcPort), Dst: netip.AddrPortFrom(dstAddr, dstPort)} - } - t2 := t - receive := rn.Intn(2) == 0 - if receive { - t2.Src, t2.Dst = t2.Dst, t2.Src - } - - cnts := gots[i][t2] - if receive { - stats.UpdateRxVirtual(p) - cnts.RxPackets++ - cnts.RxBytes += uint64(len(p)) - } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(p)) - stats.UpdateTxVirtual(p) - } - gots[i][t2] = cnts - time.Sleep(time.Duration(rn.Intn(1 + delay))) - } - }(i) - } - group.Wait() - c.Assert(stats.Shutdown(context.Background()), qt.IsNil) - wants = append(wants, virtualAggregate) - - got := make(map[netlogtype.Connection]netlogtype.Counts) - want := make(map[netlogtype.Connection]netlogtype.Counts) - mergeMaps(got, gots...) - mergeMaps(want, wants...) - c.Assert(got, qt.DeepEquals, want) -} - -func mergeMaps(dst map[netlogtype.Connection]netlogtype.Counts, srcs ...map[netlogtype.Connection]netlogtype.Counts) { - for _, src := range srcs { - for conn, cnts := range src { - dst[conn] = dst[conn].Add(cnts) - } - } -} - -func Benchmark(b *testing.B) { - // TODO: Test IPv6 packets? - b.Run("SingleRoutine/SameConn", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - for j := 0; j < 1e3; j++ { - s.UpdateTxVirtual(p) - } - } - }) - b.Run("SingleRoutine/UniqueConns", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{}, [4]byte{}, 0, 0, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - for j := 0; j < 1e3; j++ { - binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination - s.UpdateTxVirtual(p) - } - } - }) - b.Run("MultiRoutine/SameConn", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - var group sync.WaitGroup - for j := 0; j < runtime.NumCPU(); j++ { - group.Add(1) - go func() { - defer group.Done() - for k := 0; k < 1e3; k++ { - s.UpdateTxVirtual(p) - } - }() - } - group.Wait() - } - }) - b.Run("MultiRoutine/UniqueConns", func(b *testing.B) { - ps := make([][]byte, runtime.NumCPU()) - for i := range ps { - ps[i] = testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 0, 0, 789) - } - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - var group sync.WaitGroup - for j := 0; j < runtime.NumCPU(); j++ { - group.Add(1) - go func(j int) { - defer group.Done() - p := ps[j] - j *= 1e3 - for k := 0; k < 1e3; k++ { - binary.BigEndian.PutUint32(p[20:], uint32(j+k)) // unique port combination - s.UpdateTxVirtual(p) - } - }(j) - } - group.Wait() - } - }) -} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 8856a3eaf..619df655c 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1055,7 +1055,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public()) - if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { + if err := e.networkLogger.Startup(e.logf, nm, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { e.logf("wgengine: Reconfig: error starting up network logger: %v", err) } e.networkLogger.ReconfigRoutes(routerCfg) @@ -1352,6 +1352,9 @@ func (e *userspaceEngine) SetNetworkMap(nm *netmap.NetworkMap) { e.mu.Lock() e.netMap = nm e.mu.Unlock() + if e.networkLogger.Running() { + e.networkLogger.ReconfigNetworkMap(nm) + } } func (e *userspaceEngine) UpdateStatus(sb *ipnstate.StatusBuilder) { From 9ac8105fda6de829b81280666dacaa1b9c093fe1 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 28 Oct 2025 14:53:59 -0700 Subject: [PATCH 1598/1708] cmd/jsontags: add static analyzer for incompatible `json` struct tags (#17670) This migrates an internal tool to open source so that we can run it on the tailscale.com module as well. This PR does not yet set up a CI to run this analyzer. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/jsontags/analyzer.go | 201 +++++++++++++++++++++++++++++++++++++++ cmd/jsontags/iszero.go | 75 +++++++++++++++ cmd/jsontags/report.go | 135 ++++++++++++++++++++++++++ 3 files changed, 411 insertions(+) create mode 100644 cmd/jsontags/analyzer.go create mode 100644 cmd/jsontags/iszero.go create mode 100644 cmd/jsontags/report.go diff --git a/cmd/jsontags/analyzer.go b/cmd/jsontags/analyzer.go new file mode 100644 index 000000000..d799b66cb --- /dev/null +++ b/cmd/jsontags/analyzer.go @@ -0,0 +1,201 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsontags checks for incompatible usage of JSON struct tags. +package jsontags + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "jsonvet", + Doc: "check for incompatible usages of JSON struct tags", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // TODO: Report byte arrays fields without an explicit `format` tag option. + + inspect.Preorder([]ast.Node{(*ast.StructType)(nil)}, func(n ast.Node) { + structType, ok := pass.TypesInfo.Types[n.(*ast.StructType)].Type.(*types.Struct) + if !ok { + return // type information may be incomplete + } + for i := range structType.NumFields() { + fieldVar := structType.Field(i) + tag := reflect.StructTag(structType.Tag(i)).Get("json") + if tag == "" { + continue + } + var seenName, hasFormat bool + for opt := range strings.SplitSeq(tag, ",") { + if !seenName { + seenName = true + continue + } + switch opt { + case "omitempty": + // For bools, ints, uints, floats, strings, and interfaces, + // it is always safe to migrate from `omitempty` to `omitzero` + // so long as the type does not have an IsZero method or + // the IsZero method is identical to reflect.Value.IsZero. + // + // For pointers, it is only safe to migrate from `omitempty` to `omitzero` + // so long as the type does not have an IsZero method, regardless of + // whether the IsZero method is identical to reflect.Value.IsZero. + // + // For pointers, `omitempty` behaves identically on both v1 and v2 + // so long as the type does not implement a Marshal method that + // might serialize as an empty JSON value (i.e., null, "", [], or {}). + hasIsZero := hasIsZeroMethod(fieldVar.Type()) && !hasPureIsZeroMethod(fieldVar.Type()) + underType := fieldVar.Type().Underlying() + basic, isBasic := underType.(*types.Basic) + array, isArrayKind := underType.(*types.Array) + _, isMapKind := underType.(*types.Map) + _, isSliceKind := underType.(*types.Slice) + _, isPointerKind := underType.(*types.Pointer) + _, isInterfaceKind := underType.(*types.Interface) + supportedInV1 := isNumericKind(underType) || + isBasic && basic.Kind() == types.Bool || + isBasic && basic.Kind() == types.String || + isArrayKind && array.Len() == 0 || + isMapKind || isSliceKind || isPointerKind || isInterfaceKind + notSupportedInV2 := isNumericKind(underType) || + isBasic && basic.Kind() == types.Bool + switch { + case isMapKind, isSliceKind: + // This operates the same under both v1 and v2 so long as + // the map or slice type does not implement Marshal + // that could emit an empty JSON value for cases + // other than when the map or slice are empty. + // This is very rare. + case isString(fieldVar.Type()): + // This operates the same under both v1 and v2. + // These are safe to migrate to `omitzero`, + // but doing so is probably unnecessary churn. + // Note that this is only for a unnamed string type. + case !supportedInV1: + // This never worked in v1. Switching to `omitzero` + // may lead to unexpected behavior changes. + report(pass, structType, fieldVar, OmitEmptyUnsupportedInV1) + case notSupportedInV2: + // This does not work in v2. Switching to `omitzero` + // may lead to unexpected behavior changes. + report(pass, structType, fieldVar, OmitEmptyUnsupportedInV2) + case !hasIsZero: + // These are safe to migrate to `omitzero` such that + // it behaves identically under v1 and v2. + report(pass, structType, fieldVar, OmitEmptyShouldBeOmitZero) + case isPointerKind: + // This operates the same under both v1 and v2 so long as + // the pointer type does not implement Marshal that + // could emit an empty JSON value. + // For example, time.Time is safe since the zero value + // never marshals as an empty JSON string. + default: + // This is a non-pointer type with an IsZero method. + // If IsZero is not identical to reflect.Value.IsZero, + // omission may behave slightly differently when using + // `omitzero` instead of `omitempty`. + // Thus the finding uses the word "should". + report(pass, structType, fieldVar, OmitEmptyShouldBeOmitZeroButHasIsZero) + } + case "string": + if !isNumericKind(fieldVar.Type()) { + report(pass, structType, fieldVar, StringOnNonNumericKind) + } + default: + key, _, ok := strings.Cut(opt, ":") + hasFormat = key == "format" && ok + } + } + if !hasFormat && isTimeDuration(mayPointerElem(fieldVar.Type())) { + report(pass, structType, fieldVar, FormatMissingOnTimeDuration) + } + } + }) + return nil, nil +} + +// hasIsZeroMethod reports whether t has an IsZero method. +func hasIsZeroMethod(t types.Type) bool { + for method := range types.NewMethodSet(t).Methods() { + if fn, ok := method.Type().(*types.Signature); ok && method.Obj().Name() == "IsZero" { + if fn.Params().Len() == 0 && fn.Results().Len() == 1 && isBool(fn.Results().At(0).Type()) { + return true + } + } + } + return false +} + +// isBool reports whether t is a bool type. +func isBool(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Kind() == types.Bool +} + +// isString reports whether t is a string type. +func isString(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Kind() == types.String +} + +// isTimeDuration reports whether t is a time.Duration type. +func isTimeDuration(t types.Type) bool { + return isNamed(t, "time", "Duration") +} + +// mayPointerElem returns the pointed-at type if t is a pointer, +// otherwise it returns t as-is. +func mayPointerElem(t types.Type) types.Type { + if pointer, ok := t.(*types.Pointer); ok { + return pointer.Elem() + } + return t +} + +// isNamed reports t is a named typed of the given path and name. +func isNamed(t types.Type, path, name string) bool { + gotPath, gotName := typeName(t) + return gotPath == path && gotName == name +} + +// typeName reports the pkgPath and name of the type. +// It recursively follows type aliases to get the underlying named type. +func typeName(t types.Type) (pkgPath, name string) { + if named, ok := types.Unalias(t).(*types.Named); ok { + obj := named.Obj() + if pkg := obj.Pkg(); pkg != nil { + return pkg.Path(), obj.Name() + } + return "", obj.Name() + } + return "", "" +} + +// isNumericKind reports whether t is a numeric kind. +func isNumericKind(t types.Type) bool { + if basic, ok := t.Underlying().(*types.Basic); ok { + switch basic.Kind() { + case types.Int, types.Int8, types.Int16, types.Int32, types.Int64: + case types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.Uintptr: + case types.Float32, types.Float64: + default: + return false + } + return true + } + return false +} diff --git a/cmd/jsontags/iszero.go b/cmd/jsontags/iszero.go new file mode 100644 index 000000000..77520d72c --- /dev/null +++ b/cmd/jsontags/iszero.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsontags + +import ( + "go/types" + "reflect" + + "tailscale.com/util/set" +) + +var _ = reflect.Value.IsZero // refer for hot-linking purposes + +var pureIsZeroMethods map[string]set.Set[string] + +// hasPureIsZeroMethod reports whether the IsZero method is truly +// identical to [reflect.Value.IsZero]. +func hasPureIsZeroMethod(t types.Type) bool { + // TODO: Detect this automatically by checking the method AST? + path, name := typeName(t) + return pureIsZeroMethods[path].Contains(name) +} + +// PureIsZeroMethodsInTailscaleModule is a list of known IsZero methods +// in the "tailscale.com" module that are pure. +var PureIsZeroMethodsInTailscaleModule = map[string]set.Set[string]{ + "tailscale.com/net/packet": set.Of( + "TailscaleRejectReason", + ), + "tailscale.com/tailcfg": set.Of( + "UserID", + "LoginID", + "NodeID", + "StableNodeID", + ), + "tailscale.com/tka": set.Of( + "AUMHash", + ), + "tailscale.com/types/geo": set.Of( + "Point", + ), + "tailscale.com/tstime/mono": set.Of( + "Time", + ), + "tailscale.com/types/key": set.Of( + "NLPrivate", + "NLPublic", + "DERPMesh", + "MachinePrivate", + "MachinePublic", + "ControlPrivate", + "DiscoPrivate", + "DiscoPublic", + "DiscoShared", + "HardwareAttestationPublic", + "ChallengePublic", + "NodePrivate", + "NodePublic", + ), + "tailscale.com/types/netlogtype": set.Of( + "Connection", + "Counts", + ), +} + +// RegisterPureIsZeroMethods specifies a list of pure IsZero methods +// where it is identical to calling [reflect.Value.IsZero] on the receiver. +// This is not strictly necessary, but allows for more accurate +// detection of improper use of `json` tags. +// +// This must be called at init and the input must not be mutated. +func RegisterPureIsZeroMethods(methods map[string]set.Set[string]) { + pureIsZeroMethods = methods +} diff --git a/cmd/jsontags/report.go b/cmd/jsontags/report.go new file mode 100644 index 000000000..f05788b61 --- /dev/null +++ b/cmd/jsontags/report.go @@ -0,0 +1,135 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsontags + +import ( + "fmt" + "go/types" + "os" + "strings" + + _ "embed" + + "golang.org/x/tools/go/analysis" + "tailscale.com/util/set" +) + +var jsontagsAllowlist map[ReportKind]set.Set[string] + +// ParseAllowlist parses an allowlist of reports to ignore, +// which is a newline-delimited list of tuples separated by a tab, +// where each tuple is a [ReportKind] and a fully-qualified field name. +// +// For example: +// +// OmitEmptyUnsupportedInV1 tailscale.com/path/to/package.StructType.FieldName +// OmitEmptyUnsupportedInV1 tailscale.com/path/to/package.*.FieldName +// +// The struct type name may be "*" for anonymous struct types such +// as those declared within a function or as a type literal in a variable. +func ParseAllowlist(b []byte) map[ReportKind]set.Set[string] { + var allowlist map[ReportKind]set.Set[string] + for line := range strings.SplitSeq(string(b), "\n") { + kind, field, _ := strings.Cut(strings.TrimSpace(line), "\t") + if allowlist == nil { + allowlist = make(map[ReportKind]set.Set[string]) + } + fields := allowlist[ReportKind(kind)] + if fields == nil { + fields = make(set.Set[string]) + } + fields.Add(field) + allowlist[ReportKind(kind)] = fields + } + return allowlist +} + +// RegisterAllowlist registers an allowlist of reports to ignore, +// which is represented by a set of fully-qualified field names +// for each [ReportKind]. +// +// For example: +// +// { +// "OmitEmptyUnsupportedInV1": set.Of( +// "tailscale.com/path/to/package.StructType.FieldName", +// "tailscale.com/path/to/package.*.FieldName", +// ), +// } +// +// The struct type name may be "*" for anonymous struct types such +// as those declared within a function or as a type literal in a variable. +// +// This must be called at init and the input must not be mutated. +func RegisterAllowlist(allowlist map[ReportKind]set.Set[string]) { + jsontagsAllowlist = allowlist +} + +type ReportKind string + +const ( + OmitEmptyUnsupportedInV1 ReportKind = "OmitEmptyUnsupportedInV1" + OmitEmptyUnsupportedInV2 ReportKind = "OmitEmptyUnsupportedInV2" + OmitEmptyShouldBeOmitZero ReportKind = "OmitEmptyShouldBeOmitZero" + OmitEmptyShouldBeOmitZeroButHasIsZero ReportKind = "OmitEmptyShouldBeOmitZeroButHasIsZero" + StringOnNonNumericKind ReportKind = "StringOnNonNumericKind" + FormatMissingOnTimeDuration ReportKind = "FormatMissingOnTimeDuration" +) + +func (k ReportKind) message() string { + switch k { + case OmitEmptyUnsupportedInV1: + return "uses `omitempty` on an unspported type in json/v1; should probably use `omitzero` instead" + case OmitEmptyUnsupportedInV2: + return "uses `omitempty` on an unspported type in json/v2; should probably use `omitzero` instead" + case OmitEmptyShouldBeOmitZero: + return "should use `omitzero` instead of `omitempty`" + case OmitEmptyShouldBeOmitZeroButHasIsZero: + return "should probably use `omitzero` instead of `omitempty`" + case StringOnNonNumericKind: + return "must not use `string` on non-numeric types" + case FormatMissingOnTimeDuration: + return "must use an explicit `format` tag (e.g., `format:nano`) on a time.Duration type; see https://go.dev/issue/71631" + default: + return string(k) + } +} + +func report(pass *analysis.Pass, structType *types.Struct, fieldVar *types.Var, k ReportKind) { + // Lookup the full name of the struct type. + var fullName string + for _, name := range pass.Pkg.Scope().Names() { + if obj := pass.Pkg.Scope().Lookup(name); obj != nil { + if named, ok := obj.(*types.TypeName); ok { + if types.Identical(named.Type().Underlying(), structType) { + fullName = fmt.Sprintf("%v.%v.%v", named.Pkg().Path(), named.Name(), fieldVar.Name()) + break + } + } + } + } + if fullName == "" { + // Full name could not be found since this is probably an anonymous type + // or locally declared within a function scope. + // Use just the package path and field name instead. + // This is imprecise, but better than nothing. + fullName = fmt.Sprintf("%s.*.%s", fieldVar.Pkg().Path(), fieldVar.Name()) + } + if jsontagsAllowlist[k].Contains(fullName) { + return + } + + const appendAllowlist = "" + if appendAllowlist != "" { + if f, err := os.OpenFile(appendAllowlist, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0664); err == nil { + fmt.Fprintf(f, "%v\t%v\n", k, fullName) + f.Close() + } + } + + pass.Report(analysis.Diagnostic{ + Pos: fieldVar.Pos(), + Message: fmt.Sprintf("field %q %s", fieldVar.Name(), k.message()), + }) +} From 3c19addc21c55c83ea1f4180789784c91f2bf348 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 29 Oct 2025 11:00:17 +0000 Subject: [PATCH 1599/1708] tka: rename a mutex to `mu` instead of single-letter `l` See http://go/no-ell Updates tailscale/corp#33846 Signed-off-by: Alex Chan Change-Id: I88ecd9db847e04237c1feab9dfcede5ca1050cc5 --- tka/tailchonk.go | 26 +++++++++++++------------- tka/tailchonk_test.go | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 7750b0622..90f99966c 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -82,7 +82,7 @@ type CompactableChonk interface { // // Mem implements the Chonk interface. type Mem struct { - l sync.RWMutex + mu sync.RWMutex aums map[AUMHash]AUM parentIndex map[AUMHash][]AUMHash @@ -90,23 +90,23 @@ type Mem struct { } func (c *Mem) SetLastActiveAncestor(hash AUMHash) error { - c.l.Lock() - defer c.l.Unlock() + c.mu.Lock() + defer c.mu.Unlock() c.lastActiveAncestor = &hash return nil } func (c *Mem) LastActiveAncestor() (*AUMHash, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() return c.lastActiveAncestor, nil } // Heads returns AUMs for which there are no children. In other // words, the latest AUM in all chains (the 'leaf'). func (c *Mem) Heads() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() out := make([]AUM, 0, 6) // An AUM is a 'head' if there are no nodes for which it is the parent. @@ -120,8 +120,8 @@ func (c *Mem) Heads() ([]AUM, error) { // AUM returns the AUM with the specified digest. func (c *Mem) AUM(hash AUMHash) (AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() aum, ok := c.aums[hash] if !ok { return AUM{}, os.ErrNotExist @@ -132,8 +132,8 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) { // ChildAUMs returns all AUMs with a specified previous // AUM hash. func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() out := make([]AUM, 0, 6) for _, entry := range c.parentIndex[prevAUMHash] { out = append(out, c.aums[entry]) @@ -147,8 +147,8 @@ func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { // as the rest of the TKA implementation assumes that only // verified AUMs are stored. func (c *Mem) CommitVerifiedAUMs(updates []AUM) error { - c.l.Lock() - defer c.l.Unlock() + c.mu.Lock() + defer c.mu.Unlock() if c.aums == nil { c.parentIndex = make(map[AUMHash][]AUMHash, 64) c.aums = make(map[AUMHash]AUM, 64) diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 1a6bad459..7816d2dc1 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -496,7 +496,7 @@ func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { // Avoid go vet complaining about copying a lock value func cloneMem(src, dst *Mem) { - dst.l = sync.RWMutex{} + dst.mu = sync.RWMutex{} dst.aums = src.aums dst.parentIndex = src.parentIndex dst.lastActiveAncestor = src.lastActiveAncestor From 06b092388e4efb2226a264a03df14b778505278c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 29 Oct 2025 08:37:19 -0700 Subject: [PATCH 1600/1708] ipn/ipnlocal: do not stall event processing for appc route updates (#17663) A follow-up to #17411. Put AppConnector events into a task queue, as they may take some time to process. Ensure that the queue is stopped at shutdown so that cleanup will remain orderly. Because events are delivered on a separate goroutine, slow processing of an event does not cause an immediate problem; however, a subscriber that blocks for a long time will push back on the bus as a whole. See https://godoc.org/tailscale.com/util/eventbus#hdr-Expected_subscriber_behavior for more discussion. Updates #17192 Updates #15160 Change-Id: Ib313cc68aec273daf2b1ad79538266c81ef063e3 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7b2257cca..df278a325 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -87,6 +87,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" + "tailscale.com/util/execqueue" "tailscale.com/util/goroutines" "tailscale.com/util/mak" "tailscale.com/util/osuser" @@ -187,6 +188,7 @@ type LocalBackend struct { statsLogf logger.Logf // for printing peers stats on change sys *tsd.System eventClient *eventbus.Client + appcTask execqueue.ExecQueue // handles updates from appc health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -613,12 +615,14 @@ func (b *LocalBackend) onAppConnectorRouteUpdate(ru appctype.RouteUpdate) { // We need to find a way to ensure that changes to the backend state are applied // consistently in the presnce of profile changes, which currently may not happen in // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 - if err := b.AdvertiseRoute(ru.Advertise...); err != nil { - b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) - } - if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { - b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) - } + b.appcTask.Add(func() { + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) + } + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) + } + }) } func (b *LocalBackend) onAppConnectorStoreRoutes(ri appctype.RouteInfo) { @@ -1082,6 +1086,7 @@ func (b *LocalBackend) Shutdown() { // 1. Event handlers also acquire b.mu, they can deadlock with c.Shutdown(). // 2. Event handlers may not guard against undesirable post/in-progress // LocalBackend.Shutdown() behaviors. + b.appcTask.Shutdown() b.eventClient.Close() b.em.close() From da90e3d8f25d7ef53758688a04a354c9d38d9edc Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 29 Oct 2025 12:41:44 -0400 Subject: [PATCH 1601/1708] cmd/k8s-operator: rename 'l' variables (#17700) Single letter 'l' variables can eventually become confusing when they're rendered in some fonts that make them similar to 1 or I. Updates #cleanup Signed-off-by: Fernando Serboncini --- cmd/k8s-operator/operator.go | 4 ++-- cmd/k8s-operator/proxygroup.go | 4 ++-- cmd/k8s-operator/proxygroup_test.go | 36 ++++++++++++++--------------- cmd/k8s-operator/sts_test.go | 10 ++++---- cmd/k8s-operator/tsrecorder.go | 4 ++-- cmd/k8s-operator/tsrecorder_test.go | 2 +- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 89c8ff3e2..cc97b1be2 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -636,7 +636,7 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, tsNamespace: opts.tailscaleNamespace, Client: mgr.GetClient(), - l: opts.log.Named("recorder-reconciler"), + log: opts.log.Named("recorder-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, loginServer: opts.loginServer, @@ -691,7 +691,7 @@ func runReconcilers(opts reconcilerOpts) { Complete(&ProxyGroupReconciler{ recorder: eventRecorder, Client: mgr.GetClient(), - l: opts.log.Named("proxygroup-reconciler"), + log: opts.log.Named("proxygroup-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index debeb5c6b..946e017a2 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -80,7 +80,7 @@ var ( // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. type ProxyGroupReconciler struct { client.Client - l *zap.SugaredLogger + log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock tsClient tsClient @@ -101,7 +101,7 @@ type ProxyGroupReconciler struct { } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { - return r.l.With("ProxyGroup", name) + return r.log.With("ProxyGroup", name) } func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index d763cf922..2bcc9fb7a 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -670,7 +670,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { t.Logf("created node %q with data", n.name) } - reconciler.l = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) + reconciler.log = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) pg.Spec.Replicas = r.replicas pc.Spec.StaticEndpoints = r.staticEndpointConfig @@ -784,7 +784,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), + log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), clock: cl, } @@ -845,7 +845,7 @@ func TestProxyGroup(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} @@ -1049,7 +1049,7 @@ func TestProxyGroupTypes(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zl.Sugar(), + log: zl.Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } @@ -1289,24 +1289,24 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), + log: zap.Must(zap.NewDevelopment()).Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } expectReconciled(t, r, "", pg.Name) pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Set kube-apiserver valid. mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { - tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) }) expectReconciled(t, r, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Set available. @@ -1318,17 +1318,17 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }, } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Set kube-apiserver configured. mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { - tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) }) expectReconciled(t, r, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) } @@ -1342,7 +1342,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), + log: zap.Must(zap.NewDevelopment()).Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } @@ -1427,7 +1427,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), + log: zap.Must(zap.NewDevelopment()).Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } @@ -1902,7 +1902,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { defaultProxyClass: tt.defaultProxyClass, Client: fc, tsClient: &fakeTSClient{}, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, } diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index ea28e77a1..afe54ed98 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -71,11 +71,11 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { SecurityContext: &corev1.PodSecurityContext{ RunAsUser: ptr.To(int64(0)), }, - ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, - NodeName: "some-node", - NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, - Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, - Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, + NodeName: "some-node", + NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, + Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, PriorityClassName: "high-priority", TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index ec95ecf40..c922f78fe 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -54,7 +54,7 @@ var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount // Recorder CRs. type RecorderReconciler struct { client.Client - l *zap.SugaredLogger + log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock tsNamespace string @@ -66,7 +66,7 @@ type RecorderReconciler struct { } func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger { - return r.l.With("Recorder", name) + return r.log.With("Recorder", name) } func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index 990bd6819..184af2344 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -52,7 +52,7 @@ func TestRecorder(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, loginServer: tsLoginServer, } From 74f1d8bd87931ded9540d7108afa36e28308be2e Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Wed, 29 Oct 2025 11:58:10 -0500 Subject: [PATCH 1602/1708] cmd/tailscale/cli: unhide serve get-config and serve set-config (#17598) Fixes tailscale/corp#33152 Signed-off-by: Harry Harpham --- cmd/tailscale/cli/serve_v2.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 30adcb8e7..ad143cfdc 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -292,7 +292,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Name: "get-config", ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), ShortHelp: "Get service configuration to save to a file", - LongHelp: hidden + "Get the configuration for services that this node is currently hosting in a\n" + + LongHelp: "Get the configuration for services that this node is currently hosting in a\n" + "format that can later be provided to set-config. This can be used to declaratively set\n" + "configuration for a service host.", Exec: e.runServeGetConfig, @@ -305,10 +305,11 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Name: "set-config", ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), ShortHelp: "Define service configuration from a file", - LongHelp: hidden + "Read the provided configuration file and use it to declaratively set the configuration\n" + + LongHelp: "Read the provided configuration file and use it to declaratively set the configuration\n" + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + - "all services are overwritten.", + "all services are overwritten.\n\n" + + "For information on the file format, see tailscale.com/kb/1589/tailscale-services-configuration-file", Exec: e.runServeSetConfig, FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { fs.BoolVar(&e.allServices, "all", false, "apply config to all services") From d5a40c01ab5bc5e33ef2b0ec4bea3cbd38050f48 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 29 Oct 2025 13:21:23 -0700 Subject: [PATCH 1603/1708] cmd/k8s-operator/generate: skip tests if no network or Helm is down Updates helm/helm#31434 Change-Id: I5eb20e97ff543f883d5646c9324f50f54180851d Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/generate/main.go | 2 +- cmd/k8s-operator/generate/main_test.go | 26 +++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 6904f1df0..5fd5d551b 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -144,7 +144,7 @@ func generate(baseDir string) error { if _, err := file.Write([]byte(helmConditionalEnd)); err != nil { return fmt.Errorf("error writing helm if-statement end: %w", err) } - return nil + return file.Close() } for _, crd := range []struct { crdPath, templatePath string diff --git a/cmd/k8s-operator/generate/main_test.go b/cmd/k8s-operator/generate/main_test.go index c7956dcdb..5ea7fec80 100644 --- a/cmd/k8s-operator/generate/main_test.go +++ b/cmd/k8s-operator/generate/main_test.go @@ -7,26 +7,50 @@ package main import ( "bytes" + "context" + "net" "os" "os/exec" "path/filepath" "strings" "testing" + "time" + + "tailscale.com/tstest/nettest" + "tailscale.com/util/cibuild" ) func Test_generate(t *testing.T) { + nettest.SkipIfNoNetwork(t) + + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + if _, err := net.DefaultResolver.LookupIPAddr(ctx, "get.helm.sh"); err != nil { + // https://github.com/helm/helm/issues/31434 + t.Skipf("get.helm.sh seems down or unreachable; skipping test") + } + base, err := os.Getwd() base = filepath.Join(base, "../../../") if err != nil { t.Fatalf("error getting current working directory: %v", err) } defer cleanup(base) + + helmCLIPath := filepath.Join(base, "tool/helm") + if out, err := exec.Command(helmCLIPath, "version").CombinedOutput(); err != nil && cibuild.On() { + // It's not just DNS. Azure is generating bogus certs within GitHub Actions at least for + // helm. So try to run it and see if we can even fetch it. + // + // https://github.com/helm/helm/issues/31434 + t.Skipf("error fetching helm; skipping test in CI: %v, %s", err, out) + } + if err := generate(base); err != nil { t.Fatalf("CRD template generation: %v", err) } tempDir := t.TempDir() - helmCLIPath := filepath.Join(base, "tool/helm") helmChartTemplatesPath := filepath.Join(base, "cmd/k8s-operator/deploy/chart") helmPackageCmd := exec.Command(helmCLIPath, "package", helmChartTemplatesPath, "--destination", tempDir, "--version", "0.0.1") helmPackageCmd.Stderr = os.Stderr From 89962546471472823f4fce7877ca7f906c07ecb0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 29 Oct 2025 13:02:29 -0700 Subject: [PATCH 1604/1708] sessionrecording: fix regression in recent http2 package change In 3f5c560fd45664813 I changed to use std net/http's HTTP/2 support, instead of pulling in x/net/http2. But I forgot to update DialTLSContext to DialContext, which meant it was falling back to using the std net.Dialer for its dials, instead of the passed-in one. The tests only passed because they were using localhost addresses, so the std net.Dialer worked. But in prod, where a tsnet Dialer would be needed, it didn't work, and would time out for 10 seconds before resorting to the old protocol. So this fixes the tests to use an isolated in-memory network to prevent that class of problem in the future. With the test change, the old code fails and the new code passes. Thanks to @jasonodonnell for debugging! Updates #17304 Updates 3f5c560fd45664813 Change-Id: I3602bafd07dc6548e2c62985af9ac0afb3a0e967 Signed-off-by: Brad Fitzpatrick --- sessionrecording/connect.go | 5 +---- sessionrecording/connect_test.go | 14 ++++++++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index 8abf9dd7e..9d20b41f9 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -405,10 +405,7 @@ func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { return &http.Client{ Transport: &http.Transport{ Protocols: &p, - // Pretend like we're using TLS, but actually use the provided - // DialFunc underneath. This is necessary to convince the transport - // to actually dial. - DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index cacf061d7..e834828f5 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/net/memnet" ) func TestConnectToRecorder(t *testing.T) { @@ -145,7 +146,14 @@ func TestConnectToRecorder(t *testing.T) { t.Run(tt.desc, func(t *testing.T) { mux, uploadHash := tt.setup(t) - srv := httptest.NewUnstartedServer(mux) + memNet := &memnet.Network{} + ln := memNet.NewLocalTCPListener() + + srv := &httptest.Server{ + Config: &http.Server{Handler: mux}, + Listener: ln, + } + if tt.http2 { // Wire up h2c-compatible HTTP/2 server. This is optional // because the v1 recorder didn't support HTTP/2 and we try to @@ -159,10 +167,8 @@ func TestConnectToRecorder(t *testing.T) { srv.Start() t.Cleanup(srv.Close) - d := new(net.Dialer) - ctx := context.Background() - w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(srv.Listener.Addr().String())}, d.DialContext) + w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(ln.Addr().String())}, memNet.Dial) if err != nil { t.Fatalf("ConnectToRecorder: %v", err) } From 05d2dcaf49ab0dbbc6fd726e851c7c5bc2139dfa Mon Sep 17 00:00:00 2001 From: Erisa A Date: Wed, 29 Oct 2025 21:15:46 +0000 Subject: [PATCH 1605/1708] words: remove a fish (#17704) Some combinations are problematic in non-fish contexts. Updates #words Signed-off-by: Erisa A --- words/tails.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/words/tails.txt b/words/tails.txt index f5e93bf50..b0119a756 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -755,7 +755,6 @@ pipefish seahorse flounder tilapia -chub dorado shad lionfish From d68513b0dbe352ca03165be35ebc5edd626ac83e Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 29 Oct 2025 21:27:59 -0400 Subject: [PATCH 1606/1708] ipn: add support for HTTP Redirects (#17594) Adds a new Redirect field to HTTPHandler for serving HTTP redirects from the Tailscale serve config. The redirect URL supports template variables ${HOST} and ${REQUEST_URI} that are resolved per request. By default, it redirects using HTTP Status 302 (Found). For another redirect status, like 301 - Moved Permanently, pass the HTTP status code followed by ':' on Redirect, like: "301:https://tailscale.com" Updates #11252 Updates #11330 Signed-off-by: Fernando Serboncini --- ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 10 +++ ipn/ipnlocal/serve.go | 20 ++++++ ipn/ipnlocal/serve_test.go | 127 +++++++++++++++++++++++++++++++++++++ ipn/serve.go | 11 +++- 5 files changed, 168 insertions(+), 1 deletion(-) diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 8a0a3c833..3d2670947 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -242,6 +242,7 @@ var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { Proxy string Text string AcceptAppCaps []tailcfg.PeerCapability + Redirect string }{}) // Clone makes a deep copy of WebServerConfig. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 61d0dec23..ba5477a6d 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -896,12 +896,22 @@ func (v HTTPHandlerView) AcceptAppCaps() views.Slice[tailcfg.PeerCapability] { return views.SliceOf(v.ж.AcceptAppCaps) } +// Redirect, if not empty, is the target URL to redirect requests to. +// By default, we redirect with HTTP 302 (Found) status. +// If Redirect starts with ':', then we use that status instead. +// +// The target URL supports the following expansion variables: +// - ${HOST}: replaced with the request's Host header value +// - ${REQUEST_URI}: replaced with the request's full URI (path and query string) +func (v HTTPHandlerView) Redirect() string { return v.ж.Redirect } + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { Path string Proxy string Text string AcceptAppCaps []tailcfg.PeerCapability + Redirect string }{}) // View returns a read-only view of WebServerConfig. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index eb2c932c0..554761ed7 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -966,6 +966,19 @@ func (b *LocalBackend) addAppCapabilitiesHeader(r *httputil.ProxyRequest) error return nil } +// parseRedirectWithCode parses a redirect string that may optionally start with +// a HTTP redirect status code ("3xx:"). +// Returns the status code and the final redirect URL. +// If no code prefix is found, returns http.StatusFound (302). +func parseRedirectWithCode(redirect string) (code int, url string) { + if len(redirect) >= 4 && redirect[3] == ':' { + if statusCode, err := strconv.Atoi(redirect[:3]); err == nil && statusCode >= 300 && statusCode <= 399 { + return statusCode, redirect[4:] + } + } + return http.StatusFound, redirect +} + // serveWebHandler is an http.HandlerFunc that maps incoming requests to the // correct *http. func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { @@ -979,6 +992,13 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { io.WriteString(w, s) return } + if v := h.Redirect(); v != "" { + code, v := parseRedirectWithCode(v) + v = strings.ReplaceAll(v, "${HOST}", r.Host) + v = strings.ReplaceAll(v, "${REQUEST_URI}", r.RequestURI) + http.Redirect(w, r, v, code) + return + } if v := h.Path(); v != "" { b.serveFileOrDirectory(w, r, v, mountPoint) return diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index a72c50c1f..c3e5b2ff9 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -72,6 +72,41 @@ func TestExpandProxyArg(t *testing.T) { } } +func TestParseRedirectWithRedirectCode(t *testing.T) { + tests := []struct { + in string + wantCode int + wantURL string + }{ + {"301:https://example.com", 301, "https://example.com"}, + {"302:https://example.com", 302, "https://example.com"}, + {"303:/path", 303, "/path"}, + {"307:https://example.com/path?query=1", 307, "https://example.com/path?query=1"}, + {"308:https://example.com", 308, "https://example.com"}, + + {"https://example.com", 302, "https://example.com"}, + {"/path", 302, "/path"}, + {"http://example.com", 302, "http://example.com"}, + {"git://example.com", 302, "git://example.com"}, + + {"200:https://example.com", 302, "200:https://example.com"}, + {"404:https://example.com", 302, "404:https://example.com"}, + {"500:https://example.com", 302, "500:https://example.com"}, + {"30:https://example.com", 302, "30:https://example.com"}, + {"3:https://example.com", 302, "3:https://example.com"}, + {"3012:https://example.com", 302, "3012:https://example.com"}, + {"abc:https://example.com", 302, "abc:https://example.com"}, + {"301", 302, "301"}, + } + for _, tt := range tests { + gotCode, gotURL := parseRedirectWithCode(tt.in) + if gotCode != tt.wantCode || gotURL != tt.wantURL { + t.Errorf("parseRedirectWithCode(%q) = (%d, %q), want (%d, %q)", + tt.in, gotCode, gotURL, tt.wantCode, tt.wantURL) + } + } +} + func TestGetServeHandler(t *testing.T) { const serverName = "example.ts.net" conf1 := &ipn.ServeConfig{ @@ -1327,3 +1362,95 @@ func TestServeGRPCProxy(t *testing.T) { }) } } + +func TestServeHTTPRedirect(t *testing.T) { + b := newTestBackend(t) + + tests := []struct { + host string + path string + redirect string + reqURI string + wantCode int + wantLoc string + }{ + { + host: "hardcoded-root", + path: "/", + redirect: "https://example.com/", + reqURI: "/old", + wantCode: http.StatusFound, // 302 is the default + wantLoc: "https://example.com/", + }, + { + host: "template-host-and-uri", + path: "/", + redirect: "https://${HOST}${REQUEST_URI}", + reqURI: "/path?foo=bar", + wantCode: http.StatusFound, // 302 is the default + wantLoc: "https://template-host-and-uri/path?foo=bar", + }, + { + host: "custom-301", + path: "/", + redirect: "301:https://example.com/", + reqURI: "/old", + wantCode: http.StatusMovedPermanently, // 301 + wantLoc: "https://example.com/", + }, + { + host: "custom-307", + path: "/", + redirect: "307:https://example.com/new", + reqURI: "/old", + wantCode: http.StatusTemporaryRedirect, // 307 + wantLoc: "https://example.com/new", + }, + { + host: "custom-308", + path: "/", + redirect: "308:https://example.com/permanent", + reqURI: "/old", + wantCode: http.StatusPermanentRedirect, // 308 + wantLoc: "https://example.com/permanent", + }, + } + + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + conf := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + ipn.HostPort(tt.host + ":80"): { + Handlers: map[string]*ipn.HTTPHandler{ + tt.path: {Redirect: tt.redirect}, + }, + }, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + req := &http.Request{ + Host: tt.host, + URL: &url.URL{Path: tt.path}, + RequestURI: tt.reqURI, + TLS: &tls.ConnectionState{ServerName: tt.host}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + DestPort: 80, + SrcAddr: netip.MustParseAddrPort("1.2.3.4:1234"), + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + if w.Code != tt.wantCode { + t.Errorf("got status %d, want %d", w.Code, tt.wantCode) + } + if got := w.Header().Get("Location"); got != tt.wantLoc { + t.Errorf("got Location %q, want %q", got, tt.wantLoc) + } + }) + } +} diff --git a/ipn/serve.go b/ipn/serve.go index 3f674d9ed..2ac37a141 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -162,8 +162,17 @@ type HTTPHandler struct { AcceptAppCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + // Redirect, if not empty, is the target URL to redirect requests to. + // By default, we redirect with HTTP 302 (Found) status. + // If Redirect starts with ':', then we use that status instead. + // + // The target URL supports the following expansion variables: + // - ${HOST}: replaced with the request's Host header value + // - ${REQUEST_URI}: replaced with the request's full URI (path and query string) + Redirect string `json:",omitempty"` + // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for - // temporary ones? Error codes? Redirects? + // temporary ones? Error codes? } // WebHandlerExists reports whether if the ServeConfig Web handler exists for From 95426b79a9b102c9224cebbdab170033b65ddd08 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 30 Oct 2025 08:18:32 -0700 Subject: [PATCH 1607/1708] logtail: avoid racing eventbus subscriptions with shutdown (#17695) In #17639 we moved the subscription into NewLogger to ensure we would not race subscribing with shutdown of the eventbus client. Doing so fixed that problem, but exposed another: As we were only servicing events occasionally when waiting for the network to come up, we could leave the eventbus to stall in cases where a number of network deltas arrived later and weren't processed. To address that, let's separate the concerns: As before, we'll Subscribe early to avoid conflicts with shutdown; but instead of using the subscriber directly to determine readiness, we'll keep track of the last-known network state in a selectable condition that the subscriber updates for us. When we want to wait, we'll wait on that condition (or until our context ends), ensuring all the events get processed in a timely manner. Updates #17638 Updates #15160 Change-Id: I28339a372be4ab24be46e2834a218874c33a0d2d Signed-off-by: M. J. Fromberger --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + flake.nix | 2 +- go.mod | 3 ++- go.mod.sri | 2 +- go.sum | 10 ++++--- logtail/logtail.go | 50 ++++++++++++++++++++--------------- shell.nix | 2 +- tsnet/depaware.txt | 1 + 10 files changed, 43 insertions(+), 30 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6cffda2dd..8d1f7fa06 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -12,6 +12,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/trigger from tailscale.com/logtail 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e92d41b98..c1708711a 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -86,6 +86,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + github.com/creachadair/msync/trigger from tailscale.com/logtail LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index a2a473a50..80c8e04a8 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -9,6 +9,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/trigger from tailscale.com/logtail W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc diff --git a/flake.nix b/flake.nix index 726757f7a..da4c87a0b 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= +# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= diff --git a/go.mod b/go.mod index 3c281fa7a..12f7946b8 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/creachadair/msync v0.7.1 github.com/creachadair/taskgroup v0.13.2 github.com/creack/pty v1.1.23 github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa @@ -114,7 +115,7 @@ require ( golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 - honnef.co/go/tools v0.5.1 + honnef.co/go/tools v0.6.1 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.32.0 diff --git a/go.mod.sri b/go.mod.sri index f94054422..c9f537473 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= +sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= diff --git a/go.sum b/go.sum index bc386d1fd..eea0d6c7d 100644 --- a/go.sum +++ b/go.sum @@ -244,8 +244,10 @@ github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= -github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= +github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA= +github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs= +github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho= +github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008= github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -1534,8 +1536,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= -honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= diff --git a/logtail/logtail.go b/logtail/logtail.go index 675422890..6ff4dd04f 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -25,6 +25,7 @@ import ( "sync/atomic" "time" + "github.com/creachadair/msync/trigger" "github.com/go-json-experiment/json/jsontext" "tailscale.com/envknob" "tailscale.com/net/netmon" @@ -124,6 +125,8 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.Bus != nil { l.eventClient = cfg.Bus.Client("logtail.Logger") + // Subscribe to change deltas from NetMon to detect when the network comes up. + eventbus.SubscribeFunc(l.eventClient, l.onChangeDelta) } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -162,6 +165,7 @@ type Logger struct { httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel eventClient *eventbus.Client + networkIsUp trigger.Cond // set/reset by netmon.ChangeDelta events procID uint32 includeProcSequence bool @@ -418,16 +422,36 @@ func (l *Logger) uploading(ctx context.Context) { } func (l *Logger) internetUp() bool { - if l.netMonitor == nil { - // No way to tell, so assume it is. + select { + case <-l.networkIsUp.Ready(): return true + default: + if l.netMonitor == nil { + return true // No way to tell, so assume it is. + } + return l.netMonitor.InterfaceState().AnyInterfaceUp() + } +} + +// onChangeDelta is an eventbus subscriber function that handles +// [netmon.ChangeDelta] events to detect whether the Internet is expected to be +// reachable. +func (l *Logger) onChangeDelta(delta *netmon.ChangeDelta) { + if delta.New.AnyInterfaceUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + l.networkIsUp.Set() + } else { + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up\n") + l.networkIsUp.Reset() } - return l.netMonitor.InterfaceState().AnyInterfaceUp() } func (l *Logger) awaitInternetUp(ctx context.Context) { if l.eventClient != nil { - l.awaitInternetUpBus(ctx) + select { + case <-l.networkIsUp.Ready(): + case <-ctx.Done(): + } return } upc := make(chan bool, 1) @@ -449,24 +473,6 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } -func (l *Logger) awaitInternetUpBus(ctx context.Context) { - if l.internetUp() { - return - } - sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) - defer sub.Close() - select { - case delta := <-sub.Events(): - if delta.New.AnyInterfaceUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - return - } - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") - case <-ctx.Done(): - return - } -} - // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. diff --git a/shell.nix b/shell.nix index ec345998a..99cfbd243 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= +# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index cd734e995..ef0fe0667 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -9,6 +9,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/trigger from tailscale.com/logtail W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc From adee8b9180cbdc0bd352ffbf11a7dba3b4e6b946 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Thu, 30 Oct 2025 13:57:39 +0000 Subject: [PATCH 1608/1708] cmd/tailscale/cli/serve_v2: improve validation error Specify the app apability that failed the test, instead of the entire comma-separated list. Fixes #cleanup Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tailscale/cli/serve_v2_test.go | 62 +++++++++++++++++++----------- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index ad143cfdc..74458a950 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -116,7 +116,7 @@ func (u *acceptAppCapsFlag) Set(s string) error { for _, appCap := range appCaps { appCap = strings.TrimSpace(appCap) if !validAppCap.MatchString(appCap) { - return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", s) + return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", appCap) } *u.Value = append(*u.Value, tailcfg.PeerCapability(appCap)) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index dfa17f1fa..7f7f2c37c 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "reflect" + "regexp" "slices" "strconv" "strings" @@ -908,8 +909,13 @@ func TestServeDevConfigMutations(t *testing.T) { name: "invalid_accept_caps_invalid_app_cap", steps: []step{ { - command: cmd("serve --bg --accept-app-caps=example/cap/foo 3000"), // should be {domain.tld}/{name} - wantErr: anyErr(), + command: cmd("serve --bg --accept-app-caps=example.com/cap/fine,NOTFINE 3000"), // should be {domain.tld}/{name} + wantErr: func(err error) (badErrMsg string) { + if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("%q does not match", "NOTFINE")) { + return fmt.Sprintf("wanted validation error that quotes the non-matching capability (and nothing more) but got %q", err.Error()) + } + return "" + }, }, }, }, @@ -1231,10 +1237,11 @@ func TestSrcTypeFromFlags(t *testing.T) { func TestAcceptSetAppCapsFlag(t *testing.T) { testCases := []struct { - name string - inputs []string - expectErr bool - expectedValue []tailcfg.PeerCapability + name string + inputs []string + expectErr bool + expectErrToMatch *regexp.Regexp + expectedValue []tailcfg.PeerCapability }{ { name: "valid_simple", @@ -1262,7 +1269,7 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { }, { name: "valid_multiple_sets", - inputs: []string{"one.com/foo", "two.com/bar"}, + inputs: []string{"one.com/foo,two.com/bar"}, expectErr: false, expectedValue: []tailcfg.PeerCapability{"one.com/foo", "two.com/bar"}, }, @@ -1273,10 +1280,11 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { expectedValue: nil, // Empty string should be a no-op and not append anything. }, { - name: "invalid_path_chars", - inputs: []string{"domain.com/path_with_underscore"}, - expectErr: true, - expectedValue: nil, // Slice should remain empty. + name: "invalid_path_chars", + inputs: []string{"domain.com/path_with_underscore"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"domain.com/path_with_underscore"`), + expectedValue: nil, // Slice should remain empty. }, { name: "valid_subdomain", @@ -1285,22 +1293,25 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { expectedValue: []tailcfg.PeerCapability{"sub.domain.com/name"}, }, { - name: "invalid_no_path", - inputs: []string{"domain.com/"}, - expectErr: true, - expectedValue: nil, + name: "invalid_no_path", + inputs: []string{"domain.com/"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"domain.com/"`), + expectedValue: nil, }, { - name: "invalid_no_domain", - inputs: []string{"/path/only"}, - expectErr: true, - expectedValue: nil, + name: "invalid_no_domain", + inputs: []string{"/path/only"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"/path/only"`), + expectedValue: nil, }, { - name: "some_invalid_some_valid", - inputs: []string{"one.com/foo", "bad/bar", "two.com/baz"}, - expectErr: true, - expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error + name: "some_invalid_some_valid", + inputs: []string{"one.com/foo,bad/bar,two.com/baz"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"bad/bar"`), + expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error }, } @@ -1320,6 +1331,11 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { if tc.expectErr && err == nil { t.Errorf("expected an error, but got none") } + if tc.expectErrToMatch != nil { + if !tc.expectErrToMatch.MatchString(err.Error()) { + t.Errorf("expected error to match %q, but was %q", tc.expectErrToMatch, err) + } + } if !tc.expectErr && err != nil { t.Errorf("did not expect an error, but got: %v", err) } From b6c6960e40a79bc8869b004edc7d17df06a46dec Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Thu, 30 Oct 2025 17:12:08 +0000 Subject: [PATCH 1609/1708] control/controlclient: remove unused reference to mapCtx (#17614) Updates #cleanup Signed-off-by: James Sanderson --- control/controlclient/auto.go | 3 +-- util/backoff/backoff.go | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 9f5bf38ae..52255e89f 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -433,7 +433,6 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c := mrs.c c.mu.Lock() - ctx := c.mapCtx c.inMapPoll = true if c.loggedIn { c.state = StateSynchronized @@ -447,7 +446,7 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c.sendStatus("mapRoutine-got-netmap", nil, "", nm) } // Reset the backoff timer if we got a netmap. - mrs.bo.BackOff(ctx, nil) + mrs.bo.Reset() } func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool { diff --git a/util/backoff/backoff.go b/util/backoff/backoff.go index c6aeae998..95089fc24 100644 --- a/util/backoff/backoff.go +++ b/util/backoff/backoff.go @@ -78,3 +78,9 @@ func (b *Backoff) BackOff(ctx context.Context, err error) { case <-tChannel: } } + +// Reset resets the backoff schedule, equivalent to calling BackOff with a nil +// error. +func (b *Backoff) Reset() { + b.n = 0 +} From f522b9dbb77bc82be6fc46cacc94148f3bafdf66 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 30 Oct 2025 10:32:30 -0700 Subject: [PATCH 1610/1708] feature/tpm: protect all TPM handle operations with a mutex (#17708) In particular on Windows, the `transport.TPMCloser` we get is not safe for concurrent use. This is especially noticeable because `tpm.attestationKey.Clone` uses the same open handle as the original key. So wrap the operations on ak.tpm with a mutex and make a deep copy with a new connection in Clone. Updates #15830 Updates #17662 Updates #17644 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 40 +++++++++++++++++--- feature/tpm/attestation_test.go | 66 +++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 6 deletions(-) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 5fbda3b17..597d4a649 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "log" + "sync" "github.com/google/go-tpm/tpm2" "github.com/google/go-tpm/tpm2/transport" @@ -19,7 +20,8 @@ import ( ) type attestationKey struct { - tpm transport.TPMCloser + tpmMu sync.Mutex + tpm transport.TPMCloser // private and public parts of the TPM key as returned from tpm2.Create. // These are used for serialization. tpmPrivate tpm2.TPM2BPrivate @@ -144,7 +146,7 @@ type attestationKeySerialized struct { // MarshalJSON implements json.Marshaler. func (ak *attestationKey) MarshalJSON() ([]byte, error) { - if ak == nil || ak.IsZero() { + if ak == nil || len(ak.tpmPublic.Bytes()) == 0 || len(ak.tpmPrivate.Buffer) == 0 { return []byte("null"), nil } return json.Marshal(attestationKeySerialized{ @@ -163,6 +165,13 @@ func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { ak.tpmPrivate = tpm2.TPM2BPrivate{Buffer: aks.TPMPrivate} ak.tpmPublic = tpm2.BytesAs2B[tpm2.TPMTPublic, *tpm2.TPMTPublic](aks.TPMPublic) + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + if ak.tpm != nil { + ak.tpm.Close() + ak.tpm = nil + } + tpm, err := open() if err != nil { return key.ErrUnsupported @@ -182,6 +191,9 @@ func (ak *attestationKey) Public() crypto.PublicKey { } func (ak *attestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + if !ak.loaded() { return nil, errors.New("tpm2 attestation key is not loaded during Sign") } @@ -247,6 +259,9 @@ func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) { } func (ak *attestationKey) Close() error { + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + var errs []error if ak.handle != nil && ak.tpm != nil { _, err := tpm2.FlushContext{FlushHandle: ak.handle.Handle}.Execute(ak.tpm) @@ -262,18 +277,31 @@ func (ak *attestationKey) Clone() key.HardwareAttestationKey { if ak == nil { return nil } - return &attestationKey{ - tpm: ak.tpm, + + tpm, err := open() + if err != nil { + log.Printf("[unexpected] failed to open a TPM connection in feature/tpm.attestationKey.Clone: %v", err) + return nil + } + akc := &attestationKey{ + tpm: tpm, tpmPrivate: ak.tpmPrivate, tpmPublic: ak.tpmPublic, - handle: ak.handle, - pub: ak.pub, } + if err := akc.load(); err != nil { + log.Printf("[unexpected] failed to load TPM key in feature/tpm.attestationKey.Clone: %v", err) + tpm.Close() + return nil + } + return akc } func (ak *attestationKey) IsZero() bool { if ak == nil { return true } + + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() return !ak.loaded() } diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go index ead88c955..e7ff72987 100644 --- a/feature/tpm/attestation_test.go +++ b/feature/tpm/attestation_test.go @@ -10,6 +10,8 @@ import ( "crypto/rand" "crypto/sha256" "encoding/json" + "runtime" + "sync" "testing" ) @@ -62,6 +64,37 @@ func TestAttestationKeySign(t *testing.T) { } } +func TestAttestationKeySignConcurrent(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + wg := sync.WaitGroup{} + for range runtime.GOMAXPROCS(-1) { + wg.Go(func() { + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + }) + } + wg.Wait() +} + func TestAttestationKeyUnmarshal(t *testing.T) { skipWithoutTPM(t) ak, err := newAttestationKey() @@ -96,3 +129,36 @@ func TestAttestationKeyUnmarshal(t *testing.T) { t.Error("unmarshalled public key is not the same as the original public key") } } + +func TestAttestationKeyClone(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + + ak2 := ak.Clone() + if ak2 == nil { + t.Fatal("Clone failed") + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + // Close the original key, ak2 should remain open and usable. + if err := ak.Close(); err != nil { + t.Fatal(err) + } + + data := []byte("secrets") + digest := sha256.Sum256(data) + // Check signature/validation round trip using cloned key. + sig, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak2.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } +} From 061e6266cf4e9c9a0f06b0d60d4d7840f6b7678d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 30 Oct 2025 14:40:57 -0700 Subject: [PATCH 1611/1708] util/eventbus: allow logging of slow subscribers (#17705) Add options to the eventbus.Bus to plumb in a logger. Route that logger in to the subscriber machinery, and trigger a log message to it when a subscriber fails to respond to its delivered events for 5s or more. The log message includes the package, filename, and line number of the call site that created the subscription. Add tests that verify this works. Updates #17680 Change-Id: I0546516476b1e13e6a9cf79f19db2fe55e56c698 Signed-off-by: M. J. Fromberger --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 +-- shell.nix | 2 +- util/eventbus/bus.go | 35 ++++++++++++++++-- util/eventbus/bus_test.go | 73 ++++++++++++++++++++++++++++++++++++++ util/eventbus/client.go | 7 ++-- util/eventbus/debug.go | 36 +++++++++++++++++++ util/eventbus/subscribe.go | 35 ++++++++++++++++-- 10 files changed, 185 insertions(+), 13 deletions(-) diff --git a/flake.nix b/flake.nix index da4c87a0b..e50f39638 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= +# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= diff --git a/go.mod b/go.mod index 12f7946b8..836810fc0 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/google/go-containerregistry v0.20.3 github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 diff --git a/go.mod.sri b/go.mod.sri index c9f537473..108423f4e 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= +sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= diff --git a/go.sum b/go.sum index eea0d6c7d..a0d9461ec 100644 --- a/go.sum +++ b/go.sum @@ -492,8 +492,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= diff --git a/shell.nix b/shell.nix index 99cfbd243..6b579b455 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= +# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index d1507d8e6..b1639136a 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -5,10 +5,12 @@ package eventbus import ( "context" + "log" "reflect" "slices" "sync" + "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -30,6 +32,7 @@ type Bus struct { write chan PublishedEvent snapshot chan chan []PublishedEvent routeDebug hook[RoutedEvent] + logf logger.Logf topicsMu sync.Mutex topics map[reflect.Type][]*subscribeState @@ -40,19 +43,42 @@ type Bus struct { clients set.Set[*Client] } -// New returns a new bus. Use [Publish] to make event publishers, -// and [Subscribe] and [SubscribeFunc] to make event subscribers. -func New() *Bus { +// New returns a new bus with default options. It is equivalent to +// calling [NewWithOptions] with zero [BusOptions]. +func New() *Bus { return NewWithOptions(BusOptions{}) } + +// NewWithOptions returns a new [Bus] with the specified [BusOptions]. +// Use [Bus.Client] to construct clients on the bus. +// Use [Publish] to make event publishers. +// Use [Subscribe] and [SubscribeFunc] to make event subscribers. +func NewWithOptions(opts BusOptions) *Bus { ret := &Bus{ write: make(chan PublishedEvent), snapshot: make(chan chan []PublishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, + logf: opts.logger(), } ret.router = runWorker(ret.pump) return ret } +// BusOptions are optional parameters for a [Bus]. A zero value is ready for +// use and provides defaults as described. +type BusOptions struct { + // Logf, if non-nil, is used for debug logs emitted by the bus and clients, + // publishers, and subscribers under its care. If it is nil, logs are sent + // to [log.Printf]. + Logf logger.Logf +} + +func (o BusOptions) logger() logger.Logf { + if o.Logf == nil { + return log.Printf + } + return o.Logf +} + // Client returns a new client with no subscriptions. Use [Subscribe] // to receive events, and [Publish] to emit events. // @@ -166,6 +192,9 @@ func (b *Bus) pump(ctx context.Context) { } } +// logger returns a [logger.Logf] to which logs related to bus activity should be written. +func (b *Bus) logger() logger.Logf { return b.logf } + func (b *Bus) dest(t reflect.Type) []*subscribeState { b.topicsMu.Lock() defer b.topicsMu.Unlock() diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index de292cf1a..1e0cd8abf 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -4,8 +4,11 @@ package eventbus_test import ( + "bytes" "errors" "fmt" + "log" + "regexp" "testing" "testing/synctest" "time" @@ -436,6 +439,76 @@ func TestMonitor(t *testing.T) { t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) } +func TestSlowSubs(t *testing.T) { + swapLogBuf := func(t *testing.T) *bytes.Buffer { + logBuf := new(bytes.Buffer) + save := log.Writer() + log.SetOutput(logBuf) + t.Cleanup(func() { log.SetOutput(save) }) + return logBuf + } + + t.Run("Subscriber", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + pc := b.Client("pub") + p := eventbus.Publish[EventA](pc) + + sc := b.Client("sub") + s := eventbus.Subscribe[EventA](sc) + + go func() { + time.Sleep(6 * time.Second) // trigger the slow check at 5s. + t.Logf("Subscriber accepted %v", <-s.Events()) + }() + + p.Publish(EventA{12345}) + + time.Sleep(7 * time.Second) // advance time... + synctest.Wait() // subscriber is done + + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `subscriber for eventbus_test.EventA is slow.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant: %s", got, want) + } + }) + }) + + t.Run("SubscriberFunc", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + pc := b.Client("pub") + p := eventbus.Publish[EventB](pc) + + sc := b.Client("sub") + eventbus.SubscribeFunc[EventB](sc, func(e EventB) { + time.Sleep(6 * time.Second) // trigger the slow check at 5s. + t.Logf("SubscriberFunc processed %v", e) + }) + + p.Publish(EventB{67890}) + + time.Sleep(7 * time.Second) // advance time... + synctest.Wait() // subscriber is done + + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `subscriber for eventbus_test.EventB is slow.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant: %s", got, want) + } + }) + }) +} + func TestRegression(t *testing.T) { bus := eventbus.New() t.Cleanup(bus.Close) diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 9e3f3ee76..c119c67a9 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -7,6 +7,7 @@ import ( "reflect" "sync" + "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -29,6 +30,8 @@ type Client struct { func (c *Client) Name() string { return c.name } +func (c *Client) logger() logger.Logf { return c.bus.logger() } + // Close closes the client. It implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { @@ -142,7 +145,7 @@ func Subscribe[T any](c *Client) *Subscriber[T] { } r := c.subscribeStateLocked() - s := newSubscriber[T](r) + s := newSubscriber[T](r, logfForCaller(c.logger())) r.addSubscriber(s) return s } @@ -165,7 +168,7 @@ func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] { } r := c.subscribeStateLocked() - s := newSubscriberFunc[T](r, f) + s := newSubscriberFunc[T](r, f, logfForCaller(c.logger())) r.addSubscriber(s) return s } diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 6d5463bec..2f2c9589a 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -6,12 +6,22 @@ package eventbus import ( "cmp" "fmt" + "path/filepath" "reflect" + "runtime" "slices" + "strings" "sync" "sync/atomic" + "time" + + "tailscale.com/types/logger" ) +// slowSubscriberTimeout is a timeout after which a subscriber that does not +// accept a pending event will be flagged as being slow. +const slowSubscriberTimeout = 5 * time.Second + // A Debugger offers access to a bus's privileged introspection and // debugging facilities. // @@ -204,3 +214,29 @@ type DebugTopic struct { Publisher string Subscribers []string } + +// logfForCaller returns a [logger.Logf] that prefixes its output with the +// package, filename, and line number of the caller's caller. +// If logf == nil, it returns [logger.Discard]. +// If the caller location could not be determined, it returns logf unmodified. +func logfForCaller(logf logger.Logf) logger.Logf { + if logf == nil { + return logger.Discard + } + pc, fpath, line, _ := runtime.Caller(2) // +1 for my caller, +1 for theirs + if f := runtime.FuncForPC(pc); f != nil { + return logger.WithPrefix(logf, fmt.Sprintf("%s %s:%d: ", funcPackageName(f.Name()), filepath.Base(fpath), line)) + } + return logf +} + +func funcPackageName(funcName string) string { + ls := max(strings.LastIndex(funcName, "/"), 0) + for { + i := strings.LastIndex(funcName, ".") + if i <= ls { + return funcName + } + funcName = funcName[:i] + } +} diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index c35c7e7f0..0b821b3f5 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,6 +8,9 @@ import ( "fmt" "reflect" "sync" + "time" + + "tailscale.com/types/logger" ) type DeliveredEvent struct { @@ -182,12 +185,18 @@ type Subscriber[T any] struct { stop stopFlag read chan T unregister func() + logf logger.Logf + slow *time.Timer // used to detect slow subscriber service } -func newSubscriber[T any](r *subscribeState) *Subscriber[T] { +func newSubscriber[T any](r *subscribeState, logf logger.Logf) *Subscriber[T] { + slow := time.NewTimer(0) + slow.Stop() // reset in dispatch return &Subscriber[T]{ read: make(chan T), unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + logf: logf, + slow: slow, } } @@ -212,6 +221,11 @@ func (s *Subscriber[T]) monitor(debugEvent T) { func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { t := vals.Peek().Event.(T) + + start := time.Now() + s.slow.Reset(slowSubscriberTimeout) + defer s.slow.Stop() + for { // Keep the cases in this select in sync with subscribeState.pump // above. The only difference should be that this select @@ -226,6 +240,9 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent return false case ch := <-snapshot: ch <- vals.Snapshot() + case <-s.slow.C: + s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start)) + s.slow.Reset(slowSubscriberTimeout) } } } @@ -260,12 +277,18 @@ type SubscriberFunc[T any] struct { stop stopFlag read func(T) unregister func() + logf logger.Logf + slow *time.Timer // used to detect slow subscriber service } -func newSubscriberFunc[T any](r *subscribeState, f func(T)) *SubscriberFunc[T] { +func newSubscriberFunc[T any](r *subscribeState, f func(T), logf logger.Logf) *SubscriberFunc[T] { + slow := time.NewTimer(0) + slow.Stop() // reset in dispatch return &SubscriberFunc[T]{ read: f, unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + logf: logf, + slow: slow, } } @@ -285,6 +308,11 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE t := vals.Peek().Event.(T) callDone := make(chan struct{}) go s.runCallback(t, callDone) + + start := time.Now() + s.slow.Reset(slowSubscriberTimeout) + defer s.slow.Stop() + // Keep the cases in this select in sync with subscribeState.pump // above. The only difference should be that this select // delivers a value by calling s.read. @@ -299,6 +327,9 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE return false case ch := <-snapshot: ch <- vals.Snapshot() + case <-s.slow.C: + s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start)) + s.slow.Reset(slowSubscriberTimeout) } } } From 4c856078e4912a3f3a6d1e31d0db03e423685f47 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 31 Oct 2025 09:58:09 -0700 Subject: [PATCH 1612/1708] util/eventbus: block for the subscriber during SubscribeFunc close (#17642) Prior to this change a SubscriberFunc treated the call to the subscriber's function as the completion of delivery. But that means when we are closing the subscriber, that callback could continue to execute for some time after the close returns. For channel-based subscribers that works OK because the close takes effect before the subscriber ever sees the event. To make the two subscriber types symmetric, we should also wait for the callback to finish before returning. This ensures that a Close of the client means the same thing with both kinds of subscriber. Updates #17638 Change-Id: I82fd31bcaa4e92fab07981ac0e57e6e3a7d9d60b Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 71 +++++++++++++++++++++++++++++++++----- util/eventbus/subscribe.go | 7 ++++ 2 files changed, 70 insertions(+), 8 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 1e0cd8abf..61728fbfd 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -89,6 +89,61 @@ func TestSubscriberFunc(t *testing.T) { } }) + t.Run("CloseWait", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + time.Sleep(2 * time.Second) + }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() // subscriber has the event + c.Close() + + // If close does not wait for the subscriber, the test will fail + // because an active goroutine remains in the bubble. + }) + }) + + t.Run("CloseWait/Belated", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + + // This subscriber stalls for a long time, so that when we try to + // close the client it gives up and returns in the timeout condition. + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + time.Sleep(time.Minute) // notably, longer than the wait period + }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() // subscriber has the event + c.Close() + + // Verify that the logger recorded that Close gave up on the slowpoke. + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `giving up on subscriber for eventbus_test.EventA after \d+s at close.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant %s", got, want) + } + + // Wait for the subscriber to actually finish to clean up the goroutine. + time.Sleep(2 * time.Minute) + }) + }) + t.Run("SubscriberPublishes", func(t *testing.T) { synctest.Test(t, func(t *testing.T) { b := eventbus.New() @@ -440,14 +495,6 @@ func TestMonitor(t *testing.T) { } func TestSlowSubs(t *testing.T) { - swapLogBuf := func(t *testing.T) *bytes.Buffer { - logBuf := new(bytes.Buffer) - save := log.Writer() - log.SetOutput(logBuf) - t.Cleanup(func() { log.SetOutput(save) }) - return logBuf - } - t.Run("Subscriber", func(t *testing.T) { synctest.Test(t, func(t *testing.T) { buf := swapLogBuf(t) @@ -571,3 +618,11 @@ func (q *queueChecker) Got(v any) { func (q *queueChecker) Empty() bool { return len(q.want) == 0 } + +func swapLogBuf(t *testing.T) *bytes.Buffer { + logBuf := new(bytes.Buffer) + save := log.Writer() + log.SetOutput(logBuf) + t.Cleanup(func() { log.SetOutput(save) }) + return logBuf +} diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 0b821b3f5..03d577f27 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -324,6 +324,13 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE case val := <-acceptCh(): vals.Add(val) case <-ctx.Done(): + // Wait for the callback to be complete, but not forever. + s.slow.Reset(5 * slowSubscriberTimeout) + select { + case <-s.slow.C: + s.logf("giving up on subscriber for %T after %v at close", t, time.Since(start)) + case <-callDone: + } return false case ch := <-snapshot: ch <- vals.Snapshot() From db7dcd516f7da6792cd4fa44b97bc510102941c5 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 31 Oct 2025 14:28:39 -0700 Subject: [PATCH 1613/1708] Revert "control/controlclient: back out HW key attestation (#17664)" (#17732) This reverts commit a760cbe33f4bed64b63c6118808d02b2771ff785. Signed-off-by: Andrew Lytvynov --- control/controlclient/direct.go | 22 +++++++++++++++ ipn/ipnlocal/hwattest.go | 48 +++++++++++++++++++++++++++++++++ ipn/ipnlocal/local.go | 1 + ipn/ipnlocal/profiles.go | 10 +++++++ ipn/ipnlocal/profiles_test.go | 1 + ipn/prefs_test.go | 2 +- types/persist/persist.go | 18 +++++++++++-- types/persist/persist_clone.go | 4 +++ types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 10 ++++--- 10 files changed, 110 insertions(+), 8 deletions(-) create mode 100644 ipn/ipnlocal/hwattest.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index fe7cc235b..63a12b249 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -7,6 +7,8 @@ import ( "bytes" "cmp" "context" + "crypto" + "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -946,6 +948,26 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap ConnectionHandleForTest: connectionHandleForTest, } + // If we have a hardware attestation key, sign the node key with it and send + // the key & signature in the map request. + if buildfeatures.HasTPM { + if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { + hwPub := key.HardwareAttestationPublicFromPlatformKey(k) + request.HardwareAttestationKey = hwPub + + t := c.clock.Now() + msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) + digest := sha256.Sum256([]byte(msg)) + sig, err := k.Sign(nil, digest[:], crypto.SHA256) + if err != nil { + c.logf("failed to sign node key with hardware attestation key: %v", err) + } else { + request.HardwareAttestationKeySignature = sig + request.HardwareAttestationKeySignatureTimestamp = t + } + } + } + var extraDebugFlags []string if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go new file mode 100644 index 000000000..2c93cad4c --- /dev/null +++ b/ipn/ipnlocal/hwattest.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tpm + +package ipnlocal + +import ( + "errors" + + "tailscale.com/feature" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +func init() { + feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) +} + +// generateAttestationKeyIfEmpty generates a new hardware attestation key if +// none exists. It returns true if a new key was generated and stored in +// p.AttestationKey. +func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { + // attempt to generate a new hardware attestation key if none exists + var ak key.HardwareAttestationKey + if p != nil { + ak = p.AttestationKey + } + + if ak == nil || ak.IsZero() { + var err error + ak, err = key.NewHardwareAttestationKey() + if err != nil { + if !errors.Is(err, key.ErrUnsupported) { + logf("failed to create hardware attestation key: %v", err) + } + } else if ak != nil { + logf("using new hardware attestation key: %v", ak.Public()) + if p == nil { + p = &persist.Persist{} + } + p.AttestationKey = ak + return true, nil + } + } + return false, nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index df278a325..ffab4b69d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1190,6 +1190,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} + p2.Persist.AttestationKey = nil return p2.View() } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 3e80cdaa9..9c2176378 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -19,7 +19,9 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -654,6 +656,14 @@ func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() + + // if supported by the platform, create an empty hardware attestation key to use when deserializing + // to avoid type exceptions from json.Unmarshaling into an interface{}. + hw, _ := key.NewEmptyHardwareAttestationKey() + savedPrefs.Persist = &persist.Persist{ + AttestationKey: hw, + } + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 60c92ff8d..deeab2ade 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -151,6 +151,7 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, + AttestationKey: nil, } } user1Node1 := newPersist(1, 1) diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 3339a631c..233616409 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, }, { Prefs{ diff --git a/types/persist/persist.go b/types/persist/persist.go index d888a6afb..4b62c79dd 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,6 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -84,11 +85,20 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } + var pub, p2Pub key.HardwareAttestationPublic + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) + } + if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { + p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) + } + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && + pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -96,12 +106,16 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) + akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + akString = fmt.Sprintf("%v", p.AttestationKey.Public()) + } + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 680419ff2..9dbe7e0f6 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,6 +19,9 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + if src.AttestationKey != nil { + dst.AttestationKey = src.AttestationKey.Clone() + } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -31,5 +34,6 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index dbf2a6d8c..713114b74 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 7d1507468..dbf8294ef 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,10 +89,11 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -110,5 +111,6 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) From 77123a569ba1055f091db06e2d1b59c09b02f108 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 4 Nov 2025 12:36:04 -0800 Subject: [PATCH 1614/1708] wgengine/netlog: include node OS in logged attributes (#17755) Include the node's OS with network flow log information. Refactor the JSON-length computation to be a bit more precise. Updates tailscale/corp#33352 Fixes tailscale/corp#34030 Signed-off-by: Joe Tsai --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 2 +- tsnet/depaware.txt | 2 +- types/netlogtype/netlogtype.go | 15 +++------------ wgengine/netlog/record.go | 26 ++++++++++++++++++++++---- wgengine/netlog/record_test.go | 2 ++ 7 files changed, 31 insertions(+), 19 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 8d1f7fa06..ebd22770e 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -825,7 +825,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c1708711a..bdc110e1a 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -392,6 +392,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/bools from tailscale.com/wgengine/netlog tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 80c8e04a8..ebf03b541 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -230,7 +230,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ef0fe0667..4817a511a 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -225,7 +225,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index 86d645b35..cc38684a3 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -44,18 +44,6 @@ const ( // Each [ConnectionCounts] occupies at most [MaxConnectionCountsJSONSize]. MinMessageJSONSize = len(messageJSON) - nodeJSON = `{"nodeId":` + maxJSONStableID + `,"name":"","addresses":` + maxJSONAddrs + `,"user":"","tags":[]}` - maxJSONAddrV4 = `"255.255.255.255"` - maxJSONAddrV6 = `"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"` - maxJSONAddrs = `[` + maxJSONAddrV4 + `,` + maxJSONAddrV6 + `]` - - // MinNodeJSONSize is the overhead size of Node when it is - // serialized as JSON assuming that each field is minimally populated. - // It does not account for bytes occupied by - // [Node.Name], [Node.User], or [Node.Tags]. The [Node.Addresses] - // is assumed to contain a pair of IPv4 and IPv6 address. - MinNodeJSONSize = len(nodeJSON) - maxJSONConnCounts = `{` + maxJSONConn + `,` + maxJSONCounts + `}` maxJSONConn = `"proto":` + maxJSONProto + `,"src":` + maxJSONAddrPort + `,"dst":` + maxJSONAddrPort maxJSONProto = `255` @@ -82,6 +70,9 @@ type Node struct { // Addresses are the Tailscale IP addresses of the node. Addresses []netip.Addr `json:"addresses,omitempty"` + // OS is the operating system of the node. + OS string `json:"os,omitzero"` // e.g., "linux" + // User is the user that owns the node. // It is not populated if the node is tagged. User string `json:"user,omitzero"` // e.g., "johndoe@example.com" diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go index b8db26fc5..45e30fabe 100644 --- a/wgengine/netlog/record.go +++ b/wgengine/netlog/record.go @@ -13,6 +13,7 @@ import ( "unicode/utf8" "tailscale.com/tailcfg" + "tailscale.com/types/bools" "tailscale.com/types/netlogtype" "tailscale.com/util/set" ) @@ -134,17 +135,31 @@ func compareConnCnts(x, y netlogtype.ConnectionCounts) int { } // jsonLen computes an upper-bound on the size of the JSON representation. -func (nu nodeUser) jsonLen() int { +func (nu nodeUser) jsonLen() (n int) { if !nu.Valid() { return len(`{"nodeId":""}`) } - n := netlogtype.MinNodeJSONSize + jsonQuotedLen(nu.Name()) + n += len(`{}`) + n += len(`"nodeId":`) + jsonQuotedLen(string(nu.StableID())) + len(`,`) + if len(nu.Name()) > 0 { + n += len(`"name":`) + jsonQuotedLen(nu.Name()) + len(`,`) + } + if nu.Addresses().Len() > 0 { + n += len(`"addresses":[]`) + for _, addr := range nu.Addresses().All() { + n += bools.IfElse(addr.Addr().Is4(), len(`"255.255.255.255"`), len(`"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"`)) + len(",") + } + } + if nu.Hostinfo().Valid() && len(nu.Hostinfo().OS()) > 0 { + n += len(`"os":`) + jsonQuotedLen(nu.Hostinfo().OS()) + len(`,`) + } if nu.Tags().Len() > 0 { + n += len(`"tags":[]`) for _, tag := range nu.Tags().All() { n += jsonQuotedLen(tag) + len(",") } - } else if nu.user.Valid() && nu.user.ID() == nu.User() { - n += jsonQuotedLen(nu.user.LoginName()) + } else if nu.user.Valid() && nu.user.ID() == nu.User() && len(nu.user.LoginName()) > 0 { + n += len(`"user":`) + jsonQuotedLen(nu.user.LoginName()) + len(",") } return n } @@ -166,6 +181,9 @@ func (nu nodeUser) toNode() netlogtype.Node { } n.Addresses = []netip.Addr{ipv4, ipv6} n.Addresses = slices.DeleteFunc(n.Addresses, func(a netip.Addr) bool { return !a.IsValid() }) + if nu.Hostinfo().Valid() { + n.OS = nu.Hostinfo().OS() + } if nu.Tags().Len() > 0 { n.Tags = nu.Tags().AsSlice() slices.Sort(n.Tags) diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go index d3ab8b86c..7dd840d29 100644 --- a/wgengine/netlog/record_test.go +++ b/wgengine/netlog/record_test.go @@ -190,6 +190,7 @@ func TestToNode(t *testing.T) { node: &tailcfg.Node{ StableID: "n123456CNTL", Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Hostinfo: (&tailcfg.Hostinfo{OS: "linux"}).View(), User: 12345, }, user: &tailcfg.UserProfile{ @@ -199,6 +200,7 @@ func TestToNode(t *testing.T) { want: netlogtype.Node{ NodeID: "n123456CNTL", Addresses: []netip.Addr{addr("100.1.2.3")}, + OS: "linux", User: "user@domain", }, }, From 446752687c7c5a22058d633a57ecf82578a86681 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 5 Nov 2025 14:56:31 -0800 Subject: [PATCH 1615/1708] cmd/vet: move jsontags into vet (#17777) The cmd/jsontags is non-idiomatic since it is not a main binary. Move it to a vet directory, which will eventually contain a vettool binary. Update tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/{ => vet}/jsontags/analyzer.go | 0 cmd/{ => vet}/jsontags/iszero.go | 0 cmd/{ => vet}/jsontags/report.go | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) rename cmd/{ => vet}/jsontags/analyzer.go (100%) rename cmd/{ => vet}/jsontags/iszero.go (100%) rename cmd/{ => vet}/jsontags/report.go (97%) diff --git a/cmd/jsontags/analyzer.go b/cmd/vet/jsontags/analyzer.go similarity index 100% rename from cmd/jsontags/analyzer.go rename to cmd/vet/jsontags/analyzer.go diff --git a/cmd/jsontags/iszero.go b/cmd/vet/jsontags/iszero.go similarity index 100% rename from cmd/jsontags/iszero.go rename to cmd/vet/jsontags/iszero.go diff --git a/cmd/jsontags/report.go b/cmd/vet/jsontags/report.go similarity index 97% rename from cmd/jsontags/report.go rename to cmd/vet/jsontags/report.go index f05788b61..19d40799b 100644 --- a/cmd/jsontags/report.go +++ b/cmd/vet/jsontags/report.go @@ -28,9 +28,9 @@ var jsontagsAllowlist map[ReportKind]set.Set[string] // // The struct type name may be "*" for anonymous struct types such // as those declared within a function or as a type literal in a variable. -func ParseAllowlist(b []byte) map[ReportKind]set.Set[string] { +func ParseAllowlist(s string) map[ReportKind]set.Set[string] { var allowlist map[ReportKind]set.Set[string] - for line := range strings.SplitSeq(string(b), "\n") { + for line := range strings.SplitSeq(s, "\n") { kind, field, _ := strings.Cut(strings.TrimSpace(line), "\t") if allowlist == nil { allowlist = make(map[ReportKind]set.Set[string]) From 5b40f0bc547701f461605a418d49a20a0edc9f8b Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 5 Nov 2025 16:17:49 -0800 Subject: [PATCH 1616/1708] cmd/vet: add static vet checker that runs jsontags (#17778) This starts running the jsontags vet checker on the module. All existing findings are adding to an allowlist. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- .github/workflows/vet.yml | 38 +++++ cmd/vet/jsontags_allowlist | 315 +++++++++++++++++++++++++++++++++++++ cmd/vet/vet.go | 24 +++ flake.nix | 2 +- go.mod.sri | 2 +- shell.nix | 2 +- 6 files changed, 380 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/vet.yml create mode 100644 cmd/vet/jsontags_allowlist create mode 100644 cmd/vet/vet.go diff --git a/.github/workflows/vet.yml b/.github/workflows/vet.yml new file mode 100644 index 000000000..7eff6b45f --- /dev/null +++ b/.github/workflows/vet.yml @@ -0,0 +1,38 @@ +name: tailscale.com/cmd/vet + +env: + HOME: ${{ github.workspace }} + # GOMODCACHE is the same definition on all OSes. Within the workspace, we use + # toplevel directories "src" (for the checked out source code), and "gomodcache" + # and other caches as siblings to follow. + GOMODCACHE: ${{ github.workspace }}/gomodcache + +on: + push: + branches: + - main + - "release-branch/*" + paths: + - "**.go" + pull_request: + paths: + - "**.go" + +jobs: + vet: + runs-on: [ self-hosted, linux ] + timeout-minutes: 5 + + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + + - name: Build 'go vet' tool + working-directory: src + run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet + + - name: Run 'go vet' + working-directory: src + run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/... diff --git a/cmd/vet/jsontags_allowlist b/cmd/vet/jsontags_allowlist new file mode 100644 index 000000000..060a81b05 --- /dev/null +++ b/cmd/vet/jsontags_allowlist @@ -0,0 +1,315 @@ +OmitEmptyShouldBeOmitZero tailscale.com/client/web.authResponse.ViewerIdentity +OmitEmptyShouldBeOmitZero tailscale.com/cmd/k8s-operator.OwnerRef.Resource +OmitEmptyShouldBeOmitZero tailscale.com/cmd/tailscale/cli.apiResponse.Error +OmitEmptyShouldBeOmitZero tailscale.com/health.UnhealthyState.PrimaryAction +OmitEmptyShouldBeOmitZero tailscale.com/internal/client/tailscale.VIPService.Name +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AcceptDNS +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AcceptRoutes +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AllowLANWhileUsingExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AuthKey +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.DisableSNAT +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Enabled +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Locked +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.NetfilterMode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.NoStatefulFiltering +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.OperatorUser +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.PostureChecking +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.RunSSHServer +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.RunWebClient +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ServeConfigTemp +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ServerURL +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ShieldsUp +OmitEmptyShouldBeOmitZero tailscale.com/ipn.OutgoingFile.PeerID +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.AutoExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.NoStatefulFiltering +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.RelayServerPort +OmitEmptyShouldBeOmitZero tailscale.com/ipn/auditlog.transaction.Action +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.AllowedIPs +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.Location +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.PrimaryRoutes +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.Tags +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.Status.ExitNodeStatus +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.UpdateProgress.Status +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.HostnamePrefix +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.SubnetRouter +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.Debug +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.ImagePullPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.KubeAPIServerConfig.Mode +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Image +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Pod +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Service +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.Affinity +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.DNSConfig +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.DNSPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.TailscaleContainer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.TailscaleInitContainer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.Metrics +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.StaticEndpoints +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.TailscaleConfig +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.HostnamePrefix +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.KubeAPIServer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.ImagePullPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.Affinity +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.StatefulSet.Pod +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Storage.S3 +OmitEmptyShouldBeOmitZero tailscale.com/kube/ingressservices.Config.IPv4Mapping +OmitEmptyShouldBeOmitZero tailscale.com/kube/ingressservices.Config.IPv6Mapping +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.Enabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.IssueCerts +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.Mode +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.ServiceName +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.AcceptRoutes +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.APIServerProxy +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.App +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.AuthKey +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.HealthCheckEnabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LocalAddr +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LocalPort +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LogLevel +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.MetricsEnabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.ServerURL +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.State +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.VersionedConfig.V1Alpha1 +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeapi.ObjectMeta.DeletionGracePeriodSeconds +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeapi.Status.Details +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeclient.JSONPatch.Value +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubetypes.*.Mode +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubetypes.KubernetesCapRule.Impersonate +OmitEmptyShouldBeOmitZero tailscale.com/sessionrecording.CastHeader.Kubernetes +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.AuditLogRequest.Action +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Debug.Exit +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.DERPMap.HomeParams +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.DisplayMessage.PrimaryAction +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Container +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Desktop +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Location +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.NetInfo +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.StateEncrypted +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.TPM +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Userspace +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.UserspaceRouter +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ClientVersion +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.CollectServices +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ControlDialPlan +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Debug +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DefaultAutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DERPMap +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DNSConfig +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Node +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.PingRequest +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.SSHPolicy +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.TKAInfo +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.NetPortRange.Bits +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.Online +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.SelfNodeV4MasqAddrForThisPeer +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.SelfNodeV6MasqAddrForThisPeer +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.PeerChange.Online +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.RegisterRequest.Auth +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.RegisterResponseAuth.Oauth2Token +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.SSHAction.OnRecordingFailure +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.SSHPrincipal.Node +OmitEmptyShouldBeOmitZero tailscale.com/tempfork/acme.*.ExternalAccountBinding +OmitEmptyShouldBeOmitZero tailscale.com/tsweb.AccessLogRecord.RequestID +OmitEmptyShouldBeOmitZero tailscale.com/types/opt.*.Unset +OmitEmptyShouldBeOmitZero tailscale.com/types/views.viewStruct.AddrsPtr +OmitEmptyShouldBeOmitZero tailscale.com/types/views.viewStruct.StringsPtr +OmitEmptyShouldBeOmitZero tailscale.com/wgengine/magicsock.EndpointChange.From +OmitEmptyShouldBeOmitZero tailscale.com/wgengine/magicsock.EndpointChange.To +OmitEmptyShouldBeOmitZeroButHasIsZero tailscale.com/types/persist.Persist.AttestationKey +OmitEmptyUnsupportedInV1 tailscale.com/client/tailscale.KeyCapabilities.Devices +OmitEmptyUnsupportedInV1 tailscale.com/client/tailscale/apitype.ExitNodeSuggestionResponse.Location +OmitEmptyUnsupportedInV1 tailscale.com/cmd/k8s-operator.ServiceMonitorSpec.NamespaceSelector +OmitEmptyUnsupportedInV1 tailscale.com/derp.ClientInfo.MeshKey +OmitEmptyUnsupportedInV1 tailscale.com/ipn.MaskedPrefs.AutoUpdateSet +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Connector.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Container.Resources +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.DNSConfig.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.ProxyClass.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroup.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Recorder.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.Resources +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.Container +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.ServiceAccount +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderSpec.Storage +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderStatefulSet.Pod +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.S3.Credentials +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.S3Credentials.Secret +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.FirstTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.LastTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.Source +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.ObjectMeta.CreationTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg_test.*.Groups +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg.Oauth2Token.Expiry +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg.QueryFeatureRequest.NodeKey +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.*.ExpirySeconds +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.DerpRegion.Preferred +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.DevicePostureIdentity.Disabled +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale/apitype.DNSResolver.UseWithExitNode +OmitEmptyUnsupportedInV2 tailscale.com/client/web.authResponse.NeedsSynoAuth +OmitEmptyUnsupportedInV2 tailscale.com/cmd/tsidp.tailscaleClaims.UserID +OmitEmptyUnsupportedInV2 tailscale.com/derp.ClientInfo.IsProber +OmitEmptyUnsupportedInV2 tailscale.com/derp.ClientInfo.Version +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.TokenBucketBytesBurst +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.TokenBucketBytesPerSecond +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.Version +OmitEmptyUnsupportedInV2 tailscale.com/health.UnhealthyState.ImpactsConnectivity +OmitEmptyUnsupportedInV2 tailscale.com/ipn.AutoUpdatePrefsMask.ApplySet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.AutoUpdatePrefsMask.CheckSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseRoutesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseServicesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseTagsSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AppConnectorSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AutoExitNodeSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ControlURLSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.CorpDNSSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.DriveSharesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.EggSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeAllowLANAccessSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeIDSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeIPSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ForceDaemonSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.HostnameSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.InternalExitNodePriorSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.LoggedOutSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NetfilterKindSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NetfilterModeSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NoSNATSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NoStatefulFilteringSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NotepadURLsSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.OperatorUserSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.PostureCheckingSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ProfileNameSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RelayServerPortSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RouteAllSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RunSSHSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RunWebClientSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ShieldsUpSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.WantRunningSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.PartialFile.Done +OmitEmptyUnsupportedInV2 tailscale.com/ipn.Prefs.Egg +OmitEmptyUnsupportedInV2 tailscale.com/ipn.Prefs.ForceDaemon +OmitEmptyUnsupportedInV2 tailscale.com/ipn.ServiceConfig.Tun +OmitEmptyUnsupportedInV2 tailscale.com/ipn.TCPPortHandler.HTTP +OmitEmptyUnsupportedInV2 tailscale.com/ipn.TCPPortHandler.HTTPS +OmitEmptyUnsupportedInV2 tailscale.com/ipn/auditlog.transaction.Retries +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.AltSharerUserID +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.Expired +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.ShareeNode +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PingResult.IsLocalIP +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PingResult.PeerAPIPort +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.Status.HaveNodeKey +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.PortRange.EndPort +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.UseLetsEncryptStagingEnvironment +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.RecorderSpec.EnableUI +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.TailscaleConfig.AcceptRoutes +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Event.Count +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.ObjectMeta.Generation +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Status.Code +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnforceRecorder +OmitEmptyUnsupportedInV2 tailscale.com/log/sockstatlog.event.IsCellularInterface +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.CastHeader.SrcNodeUserID +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.Source.NodeUserID +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.v2ResponseFrame.Ack +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg_test.*.ToggleOn +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.AuditLogRequest.Version +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NPostureIdentityResponse.PostureDisabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NSSHUsernamesRequest.Max +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Expired +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Missing +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Valid +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.Notify +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.RunningLatest +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.UrgentSecurityUpdate +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.DialStartDelaySec +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.DialTimeoutSec +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.Priority +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Debug.DisableLogTail +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Debug.SleepSeconds +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPMap.OmitDefaultRegions +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.CanPort80 +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.DERPPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.InsecureForTests +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.STUNOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.STUNPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Avoid +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Latitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Longitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.NoMeasureNoHome +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DisplayMessage.ImpactsConnectivity +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DNSConfig.Proxied +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.AllowsUpdate +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.IngressEnabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.NoLogsNoSupport +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.ShareeNode +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.ShieldsUp +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.WireIngress +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Latitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Longitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Priority +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.MapSessionSeq +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.OmitPeers +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.ReadOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapResponse.KeepAlive +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapResponse.Seq +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.NetInfo.HavePortMap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Cap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Expired +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.HomeDERP +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.IsJailed +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.IsWireGuardOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.MachineAuthorized +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Sharer +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.UnsignedPeerAPIOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PeerChange.Cap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PeerChange.DERPRegion +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingRequest.Log +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingRequest.URLIsNoise +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.DERPRegionID +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.IsLocalIP +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.LatencySeconds +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.PeerAPIPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.QueryFeatureResponse.Complete +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.QueryFeatureResponse.ShouldWait +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.RegisterRequest.Ephemeral +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.RegisterRequest.SignatureType +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.Accept +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowAgentForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowLocalPortForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowRemotePortForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.Reject +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.SessionDuration +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHPrincipal.Any +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TKAInfo.Disabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.FirmwareVersion +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.Model +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.SpecRevision +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.WebClientAuthResponse.Complete +OmitEmptyUnsupportedInV2 tailscale.com/tempfork/acme.*.TermsAgreed +OmitEmptyUnsupportedInV2 tailscale.com/tstime/rate.jsonValue.Updated +OmitEmptyUnsupportedInV2 tailscale.com/tstime/rate.jsonValue.Value +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Bytes +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Code +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Seconds +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.TLS +OmitEmptyUnsupportedInV2 tailscale.com/tsweb/varz.SomeStats.TotalY +OmitEmptyUnsupportedInV2 tailscale.com/types/appctype.AppConnectorConfig.AdvertiseRoutes +OmitEmptyUnsupportedInV2 tailscale.com/types/dnstype.Resolver.UseWithExitNode +OmitEmptyUnsupportedInV2 tailscale.com/types/opt.testStruct.Int +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.GitDirty +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.IsDev +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.UnstableBranch diff --git a/cmd/vet/vet.go b/cmd/vet/vet.go new file mode 100644 index 000000000..45473af48 --- /dev/null +++ b/cmd/vet/vet.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package vet is a tool to statically check Go source code. +package main + +import ( + _ "embed" + + "golang.org/x/tools/go/analysis/unitchecker" + "tailscale.com/cmd/vet/jsontags" +) + +//go:embed jsontags_allowlist +var jsontagsAllowlistSource string + +func init() { + jsontags.RegisterAllowlist(jsontags.ParseAllowlist(jsontagsAllowlistSource)) + jsontags.RegisterPureIsZeroMethods(jsontags.PureIsZeroMethodsInTailscaleModule) +} + +func main() { + unitchecker.Main(jsontags.Analyzer) +} diff --git a/flake.nix b/flake.nix index e50f39638..d2f03d4d8 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= +# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= diff --git a/go.mod.sri b/go.mod.sri index 108423f4e..325a03b43 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= +sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= diff --git a/shell.nix b/shell.nix index 6b579b455..c11b4bbcf 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= +# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= From 1ed117dbc08ac60a69ba46bdb7289b1d416bc5dc Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 6 Nov 2025 15:36:58 +0000 Subject: [PATCH 1617/1708] cmd/k8s-operator: remove Services feature flag detection Now that the feature is in beta, no one should encounter this error. Updates #cleanup Change-Id: I69ed3f460b7f28c44da43ce2f552042f980a0420 Signed-off-by: Tom Proctor --- cmd/k8s-operator/api-server-proxy-pg.go | 6 ------ cmd/k8s-operator/ingress-for-pg.go | 24 ------------------------ cmd/k8s-operator/svc-for-pg.go | 10 ---------- 3 files changed, 40 deletions(-) diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go index 252859eb3..1a81e4967 100644 --- a/cmd/k8s-operator/api-server-proxy-pg.go +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -157,12 +157,6 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s // 1. Check there isn't a Tailscale Service with the same hostname // already created and not owned by this ProxyGroup. existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(pg, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msgFeatureFlagNotEnabled, pg.Generation, r.clock, logger) - return nil - } if err != nil && !isErrorTailscaleServiceNotFound(err) { return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 3afeb528f..4d8311805 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -154,11 +154,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // needs to be explicitly enabled for a tailnet to be able to use them. serviceName := tailcfg.ServiceName("svc:" + hostname) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - return false, nil - } if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -453,11 +448,6 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName) - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - return false, nil - } if isErrorTailscaleServiceNotFound(err) { return false, nil } @@ -515,12 +505,6 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) svc, err := r.tsClient.GetVIPService(ctx, serviceName) if err != nil { - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msg) - return false, nil - } if isErrorTailscaleServiceNotFound(err) { return false, nil } @@ -1122,14 +1106,6 @@ func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string, return len(cert) > 0 && len(key) > 0, nil } -func isErrorFeatureFlagNotEnabled(err error) bool { - // messageFFNotEnabled is the error message returned by - // Tailscale control plane when a Tailscale Service API call is made for a - // tailnet that does not have the Tailscale Services feature flag enabled. - const messageFFNotEnabled = "feature unavailable for tailnet" - return err != nil && strings.Contains(err.Error(), messageFFNotEnabled) -} - func isErrorTailscaleServiceNotFound(err error) bool { var errResp tailscale.ErrResponse ok := errors.As(err, &errResp) diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 62cc36bd4..144d37558 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -207,11 +207,6 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - return false, nil - } if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -530,11 +525,6 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { svc, err := tsClient.GetVIPService(ctx, name) - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - return false, nil - } if err != nil { errResp := &tailscale.ErrResponse{} ok := errors.As(err, errResp) From d4c5b278b3dd67e31498dfbfe321c5e00a801898 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Sun, 5 Oct 2025 02:10:50 +0100 Subject: [PATCH 1618/1708] cmd/k8s-operator: support workload identity federation The feature is currently in private alpha, so requires a tailnet feature flag. Initially focuses on supporting the operator's own auth, because the operator is the only device we maintain that uses static long-lived credentials. All other operator-created devices use single-use auth keys. Testing steps: * Create a cluster with an API server accessible over public internet * kubectl get --raw /.well-known/openid-configuration | jq '.issuer' * Create a federated OAuth client in the Tailscale admin console with: * The issuer from the previous step * Subject claim `system:serviceaccount:tailscale:operator` * Write scopes services, devices:core, auth_keys * Tag tag:k8s-operator * Allow the Tailscale control plane to get the public portion of the ServiceAccount token signing key without authentication: * kubectl create clusterrolebinding oidc-discovery \ --clusterrole=system:service-account-issuer-discovery \ --group=system:unauthenticated * helm install --set oauth.clientId=... --set oauth.audience=... Updates #17457 Change-Id: Ib29c85ba97b093c70b002f4f41793ffc02e6c6e9 Signed-off-by: Tom Proctor --- .../deploy/chart/templates/deployment.yaml | 26 ++++ .../deploy/chart/templates/oauth-secret.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 21 ++- cmd/k8s-operator/generate/main.go | 2 +- cmd/k8s-operator/operator.go | 18 +-- cmd/k8s-operator/tsclient.go | 102 ++++++++++--- cmd/k8s-operator/tsclient_test.go | 135 ++++++++++++++++++ 7 files changed, 272 insertions(+), 34 deletions(-) create mode 100644 cmd/k8s-operator/tsclient_test.go diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 51d0a88c3..0f2dc42fc 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -34,7 +34,9 @@ spec: securityContext: {{- toYaml . | nindent 8 }} {{- end }} + {{- if or .Values.oauth.clientSecret .Values.oauth.audience }} volumes: + {{- if .Values.oauth.clientSecret }} - name: oauth {{- with .Values.oauthSecretVolume }} {{- toYaml . | nindent 10 }} @@ -42,6 +44,17 @@ spec: secret: secretName: operator-oauth {{- end }} + {{- else }} + - name: oidc-jwt + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: {{ .Values.oauth.audience }} + expirationSeconds: 3600 + path: token + {{- end }} + {{- end }} containers: - name: operator {{- with .Values.operatorConfig.securityContext }} @@ -72,10 +85,15 @@ spec: value: {{ .Values.loginServer }} - name: OPERATOR_INGRESS_CLASS_NAME value: {{ .Values.ingressClass.name }} + {{- if .Values.oauth.clientSecret }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE value: /oauth/client_secret + {{- else if .Values.oauth.audience }} + - name: CLIENT_ID + value: {{ .Values.oauth.clientId }} + {{- end }} {{- $proxyTag := printf ":%s" ( .Values.proxyConfig.image.tag | default .Chart.AppVersion )}} - name: PROXY_IMAGE value: {{ coalesce .Values.proxyConfig.image.repo .Values.proxyConfig.image.repository }}{{- if .Values.proxyConfig.image.digest -}}{{ printf "@%s" .Values.proxyConfig.image.digest}}{{- else -}}{{ printf "%s" $proxyTag }}{{- end }} @@ -100,10 +118,18 @@ spec: {{- with .Values.operatorConfig.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} + {{- if or .Values.oauth.clientSecret .Values.oauth.audience }} volumeMounts: + {{- if .Values.oauth.clientSecret }} - name: oauth mountPath: /oauth readOnly: true + {{- else }} + - name: oidc-jwt + mountPath: /var/run/secrets/tailscale/serviceaccount + readOnly: true + {{- end }} + {{- end }} {{- with .Values.operatorConfig.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml index b44fde0a1..b85c78915 100644 --- a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml @@ -1,7 +1,7 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -{{ if and .Values.oauth .Values.oauth.clientId -}} +{{ if and .Values.oauth .Values.oauth.clientId .Values.oauth.clientSecret -}} apiVersion: v1 kind: Secret metadata: diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index cdedb92e8..eb11fc7f2 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -1,13 +1,20 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -# Operator oauth credentials. If set a Kubernetes Secret with the provided -# values will be created in the operator namespace. If unset a Secret named -# operator-oauth must be precreated or oauthSecretVolume needs to be adjusted. -# This block will be overridden by oauthSecretVolume, if set. -oauth: {} - # clientId: "" - # clientSecret: "" +# Operator oauth credentials. If unset a Secret named operator-oauth must be +# precreated or oauthSecretVolume needs to be adjusted. This block will be +# overridden by oauthSecretVolume, if set. +oauth: + # The Client ID the operator will authenticate with. + clientId: "" + # If set a Kubernetes Secret with the provided value will be created in + # the operator namespace, and mounted into the operator Pod. Takes precedence + # over oauth.audience. + clientSecret: "" + # The audience for oauth.clientId if using a workload identity federation + # OAuth client. Mutually exclusive with oauth.clientSecret. + # See https://tailscale.com/kb/1581/workload-identity-federation. + audience: "" # URL of the control plane to be used by all resources managed by the operator. loginServer: "" diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 5fd5d551b..08bdc350d 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -69,7 +69,7 @@ func main() { }() log.Print("Templating Helm chart contents") helmTmplCmd := exec.Command("./tool/helm", "template", "operator", "./cmd/k8s-operator/deploy/chart", - "--namespace=tailscale") + "--namespace=tailscale", "--set=oauth.clientSecret=''") helmTmplCmd.Dir = repoRoot var out bytes.Buffer helmTmplCmd.Stdout = &out diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index cc97b1be2..d5ff07780 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -164,22 +164,24 @@ func main() { runReconcilers(rOpts) } -// initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the -// CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate -// with Tailscale. +// initTSNet initializes the tsnet.Server and logs in to Tailscale. If CLIENT_ID +// is set, it authenticates to the Tailscale API using the federated OIDC workload +// identity flow. Otherwise, it uses the CLIENT_ID_FILE and CLIENT_SECRET_FILE +// environment variables to authenticate with static credentials. func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { var ( - clientIDPath = defaultEnv("CLIENT_ID_FILE", "") - clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") + clientID = defaultEnv("CLIENT_ID", "") // Used for workload identity federation. + clientIDPath = defaultEnv("CLIENT_ID_FILE", "") // Used for static client credentials. + clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") // Used for static client credentials. hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") ) startlog := zlog.Named("startup") - if clientIDPath == "" || clientSecretPath == "" { - startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") + if clientID == "" && (clientIDPath == "" || clientSecretPath == "") { + startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") // TODO(tomhjp): error message can mention WIF once it's publicly available. } - tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer) + tsc, err := newTSClient(zlog.Named("ts-api-client"), clientID, clientIDPath, clientSecretPath, loginServer) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index 50620c26d..d22fa1797 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -8,8 +8,13 @@ package main import ( "context" "fmt" + "net/http" "os" + "sync" + "time" + "go.uber.org/zap" + "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" @@ -20,30 +25,53 @@ import ( // call should be performed on the default tailnet for the provided credentials. const ( defaultTailnet = "-" + oidcJWTPath = "/var/run/secrets/tailscale/serviceaccount/token" ) -func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer string) (tsClient, error) { - clientID, err := os.ReadFile(clientIDPath) - if err != nil { - return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) - } - clientSecret, err := os.ReadFile(clientSecretPath) - if err != nil { - return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) - } - const tokenURLPath = "/api/v2/oauth/token" - tokenURL := fmt.Sprintf("%s%s", ipn.DefaultControlURL, tokenURLPath) +func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecretPath, loginServer string) (*tailscale.Client, error) { + baseURL := ipn.DefaultControlURL if loginServer != "" { - tokenURL = fmt.Sprintf("%s%s", loginServer, tokenURLPath) + baseURL = loginServer } - credentials := clientcredentials.Config{ - ClientID: string(clientID), - ClientSecret: string(clientSecret), - TokenURL: tokenURL, + + var httpClient *http.Client + if clientID == "" { + // Use static client credentials mounted to disk. + id, err := os.ReadFile(clientIDPath) + if err != nil { + return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) + } + secret, err := os.ReadFile(clientSecretPath) + if err != nil { + return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) + } + credentials := clientcredentials.Config{ + ClientID: string(id), + ClientSecret: string(secret), + TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token"), + } + tokenSrc := credentials.TokenSource(context.Background()) + httpClient = oauth2.NewClient(context.Background(), tokenSrc) + } else { + // Use workload identity federation. + tokenSrc := &jwtTokenSource{ + logger: logger, + jwtPath: oidcJWTPath, + baseCfg: clientcredentials.Config{ + ClientID: clientID, + TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token-exchange"), + }, + } + httpClient = &http.Client{ + Transport: &oauth2.Transport{ + Source: tokenSrc, + }, + } } + c := tailscale.NewClient(defaultTailnet, nil) c.UserAgent = "tailscale-k8s-operator" - c.HTTPClient = credentials.Client(ctx) + c.HTTPClient = httpClient if loginServer != "" { c.BaseURL = loginServer } @@ -63,3 +91,43 @@ type tsClient interface { // DeleteVIPService is a method for deleting a Tailscale Service. DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error } + +// jwtTokenSource implements the [oauth2.TokenSource] interface, but with the +// ability to regenerate a fresh underlying token source each time a new value +// of the JWT parameter is needed due to expiration. +type jwtTokenSource struct { + logger *zap.SugaredLogger + jwtPath string // Path to the file containing an automatically refreshed JWT. + baseCfg clientcredentials.Config // Holds config that doesn't change for the lifetime of the process. + + mu sync.Mutex // Guards underlying. + underlying oauth2.TokenSource // The oauth2 client implementation. Does its own separate caching of the access token. +} + +func (s *jwtTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.underlying != nil { + t, err := s.underlying.Token() + if err == nil && t != nil && t.Valid() { + return t, nil + } + } + + s.logger.Debugf("Refreshing JWT from %s", s.jwtPath) + tk, err := os.ReadFile(s.jwtPath) + if err != nil { + return nil, fmt.Errorf("error reading JWT from %q: %w", s.jwtPath, err) + } + + // Shallow copy of the base config. + credentials := s.baseCfg + credentials.EndpointParams = map[string][]string{ + "jwt": {string(tk)}, + } + + src := credentials.TokenSource(context.Background()) + s.underlying = oauth2.ReuseTokenSourceWithExpiry(nil, src, time.Minute) + return s.underlying.Token() +} diff --git a/cmd/k8s-operator/tsclient_test.go b/cmd/k8s-operator/tsclient_test.go new file mode 100644 index 000000000..16de512d5 --- /dev/null +++ b/cmd/k8s-operator/tsclient_test.go @@ -0,0 +1,135 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "go.uber.org/zap" + "golang.org/x/oauth2" +) + +func TestNewStaticClient(t *testing.T) { + const ( + clientIDFile = "client-id" + clientSecretFile = "client-secret" + ) + + tmp := t.TempDir() + clientIDPath := filepath.Join(tmp, clientIDFile) + if err := os.WriteFile(clientIDPath, []byte("test-client-id"), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", clientIDPath, err) + } + clientSecretPath := filepath.Join(tmp, clientSecretFile) + if err := os.WriteFile(clientSecretPath, []byte("test-client-secret"), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", clientSecretPath, err) + } + + srv := testAPI(t, 3600) + cl, err := newTSClient(zap.NewNop().Sugar(), "", clientIDPath, clientSecretPath, srv.URL) + if err != nil { + t.Fatalf("error creating Tailscale client: %v", err) + } + + resp, err := cl.HTTPClient.Get(srv.URL) + if err != nil { + t.Fatalf("error making test API call: %v", err) + } + defer resp.Body.Close() + + got, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + want := "Bearer " + testToken("/api/v2/oauth/token", "test-client-id", "test-client-secret", "") + if string(got) != want { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestNewWorkloadIdentityClient(t *testing.T) { + // 5 seconds is within expiryDelta leeway, so the access token will + // immediately be considered expired and get refreshed on each access. + srv := testAPI(t, 5) + cl, err := newTSClient(zap.NewNop().Sugar(), "test-client-id", "", "", srv.URL) + if err != nil { + t.Fatalf("error creating Tailscale client: %v", err) + } + + // Modify the path where the JWT will be read from. + oauth2Transport, ok := cl.HTTPClient.Transport.(*oauth2.Transport) + if !ok { + t.Fatalf("expected oauth2.Transport, got %T", cl.HTTPClient.Transport) + } + jwtTokenSource, ok := oauth2Transport.Source.(*jwtTokenSource) + if !ok { + t.Fatalf("expected jwtTokenSource, got %T", oauth2Transport.Source) + } + tmp := t.TempDir() + jwtPath := filepath.Join(tmp, "token") + jwtTokenSource.jwtPath = jwtPath + + for _, jwt := range []string{"test-jwt", "updated-test-jwt"} { + if err := os.WriteFile(jwtPath, []byte(jwt), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", jwtPath, err) + } + resp, err := cl.HTTPClient.Get(srv.URL) + if err != nil { + t.Fatalf("error making test API call: %v", err) + } + defer resp.Body.Close() + + got, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + if want := "Bearer " + testToken("/api/v2/oauth/token-exchange", "test-client-id", "", jwt); string(got) != want { + t.Errorf("got %q; want %q", got, want) + } + } +} + +func testAPI(t *testing.T, expirationSeconds int) *httptest.Server { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("test server got request: %s %s", r.Method, r.URL.Path) + switch r.URL.Path { + case "/api/v2/oauth/token", "/api/v2/oauth/token-exchange": + id, secret, ok := r.BasicAuth() + if !ok { + t.Fatal("missing or invalid basic auth") + } + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(map[string]any{ + "access_token": testToken(r.URL.Path, id, secret, r.FormValue("jwt")), + "token_type": "Bearer", + "expires_in": expirationSeconds, + }); err != nil { + t.Fatalf("error writing response: %v", err) + } + case "/": + // Echo back the authz header for test assertions. + _, err := w.Write([]byte(r.Header.Get("Authorization"))) + if err != nil { + t.Fatalf("error writing response: %v", err) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + t.Cleanup(srv.Close) + return srv +} + +func testToken(path, id, secret, jwt string) string { + return fmt.Sprintf("%s|%s|%s|%s", path, id, secret, jwt) +} From bab5e68d0a67339de3c7f3b1fe6f0f8d84524a3a Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Fri, 7 Nov 2025 18:38:49 -0500 Subject: [PATCH 1619/1708] net/udprelay: use GetGlobalAddrs and add local port endpoint (#17797) Use GetGlobalAddrs() to discover all STUN endpoints, handling bad NATs that create multiple mappings. When MappingVariesByDestIP is true, also add the first STUN IPv4 address with the relay's local port for static port mapping scenarios. Updates #17796 Signed-off-by: Raj Singh --- net/udprelay/server.go | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 83831dd69..de1376b64 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -393,14 +393,29 @@ func (s *Server) addrDiscoveryLoop() { if err != nil { return nil, err } - if rep.GlobalV4.IsValid() { - addrPorts.Add(rep.GlobalV4) + // Add STUN-discovered endpoints with their observed ports. + v4Addrs, v6Addrs := rep.GetGlobalAddrs() + for _, addr := range v4Addrs { + if addr.IsValid() { + addrPorts.Add(addr) + } } - if rep.GlobalV6.IsValid() { - addrPorts.Add(rep.GlobalV6) + for _, addr := range v6Addrs { + if addr.IsValid() { + addrPorts.Add(addr) + } + } + + if len(v4Addrs) >= 1 && v4Addrs[0].IsValid() { + // If they're behind a hard NAT and are using a fixed + // port locally, assume they might've added a static + // port mapping on their router to the same explicit + // port that the relay is running with. Worst case + // it's an invalid candidate mapping. + if rep.MappingVariesByDestIP.EqualBool(true) && s.uc4Port != 0 { + addrPorts.Add(netip.AddrPortFrom(v4Addrs[0].Addr(), s.uc4Port)) + } } - // TODO(jwhited): consider logging if rep.MappingVariesByDestIP as - // that's a hint we are not well-positioned to operate as a UDP relay. return addrPorts.Slice(), nil } From 875a9c526d1c2c6fc6d1c4f239f27571b92404e3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 17:44:47 -0800 Subject: [PATCH 1620/1708] tsnet: skip a 30s long flaky-ish test on macOS Updates #17805 Change-Id: I540f50d067eee12e430dfd9de6871dc784fffb8a Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 1e22681fc..1b6ebf4e4 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -275,6 +275,9 @@ func TestDialBlocks(t *testing.T) { } func TestConn(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("slow on macOS: https://github.com/tailscale/tailscale/issues/17805") + } tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() From de733c5951c3ead36df8cc107996f1488337f37f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 9 Nov 2025 07:05:28 -0800 Subject: [PATCH 1621/1708] tailcfg: kill off rest of HairPinning symbols It was disabled in May 2024 in #12205 (9eb72bb51). This removes the unused symbols. Updates #188 Updates tailscale/corp#19106 Updates tailscale/corp#19116 Change-Id: I5208b7b750b18226ed703532ed58c4ea17195a8e Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/c2n_test.go | 1 - tailcfg/tailcfg.go | 9 ++------- tailcfg/tailcfg_clone.go | 1 - tailcfg/tailcfg_test.go | 1 - tailcfg/tailcfg_view.go | 5 ----- 5 files changed, 2 insertions(+), 15 deletions(-) diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 95cd5fa69..877d102d0 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -324,7 +324,6 @@ func TestRedactNetmapPrivateKeys(t *testing.T) { f(tailcfg.Location{}, "Priority"): false, f(tailcfg.NetInfo{}, "DERPLatency"): false, f(tailcfg.NetInfo{}, "FirewallMode"): false, - f(tailcfg.NetInfo{}, "HairPinning"): false, f(tailcfg.NetInfo{}, "HavePortMap"): false, f(tailcfg.NetInfo{}, "LinkType"): false, f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index a95d0559c..43ed3188f 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1018,10 +1018,6 @@ type NetInfo struct { // vary based on the destination IP. MappingVariesByDestIP opt.Bool - // HairPinning is their router does hairpinning. - // It reports true even if there's no NAT involved. - HairPinning opt.Bool - // WorkingIPv6 is whether the host has IPv6 internet connectivity. WorkingIPv6 opt.Bool @@ -1089,8 +1085,8 @@ func (ni *NetInfo) String() string { if ni == nil { return "NetInfo(nil)" } - return fmt.Sprintf("NetInfo{varies=%v hairpin=%v ipv6=%v ipv6os=%v udp=%v icmpv4=%v derp=#%v portmap=%v link=%q firewallmode=%q}", - ni.MappingVariesByDestIP, ni.HairPinning, ni.WorkingIPv6, + return fmt.Sprintf("NetInfo{varies=%v ipv6=%v ipv6os=%v udp=%v icmpv4=%v derp=#%v portmap=%v link=%q firewallmode=%q}", + ni.MappingVariesByDestIP, ni.WorkingIPv6, ni.OSHasIPv6, ni.WorkingUDP, ni.WorkingICMPv4, ni.PreferredDERP, ni.portMapSummary(), ni.LinkType, ni.FirewallMode) } @@ -1133,7 +1129,6 @@ func (ni *NetInfo) BasicallyEqual(ni2 *NetInfo) bool { return true } return ni.MappingVariesByDestIP == ni2.MappingVariesByDestIP && - ni.HairPinning == ni2.HairPinning && ni.WorkingIPv6 == ni2.WorkingIPv6 && ni.OSHasIPv6 == ni2.OSHasIPv6 && ni.WorkingUDP == ni2.WorkingUDP && diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 9aa767388..751b7c288 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -207,7 +207,6 @@ func (src *NetInfo) Clone() *NetInfo { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoCloneNeedsRegeneration = NetInfo(struct { MappingVariesByDestIP opt.Bool - HairPinning opt.Bool WorkingIPv6 opt.Bool OSHasIPv6 opt.Bool WorkingUDP opt.Bool diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index addd2330b..6691263eb 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -607,7 +607,6 @@ func TestNodeEqual(t *testing.T) { func TestNetInfoFields(t *testing.T) { handled := []string{ "MappingVariesByDestIP", - "HairPinning", "WorkingIPv6", "OSHasIPv6", "WorkingUDP", diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 88dd90096..dbd29a87a 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -741,10 +741,6 @@ func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { // vary based on the destination IP. func (v NetInfoView) MappingVariesByDestIP() opt.Bool { return v.ж.MappingVariesByDestIP } -// HairPinning is their router does hairpinning. -// It reports true even if there's no NAT involved. -func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning } - // WorkingIPv6 is whether the host has IPv6 internet connectivity. func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 } @@ -809,7 +805,6 @@ func (v NetInfoView) String() string { return v.ж.String() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoViewNeedsRegeneration = NetInfo(struct { MappingVariesByDestIP opt.Bool - HairPinning opt.Bool WorkingIPv6 opt.Bool OSHasIPv6 opt.Bool WorkingUDP opt.Bool From 2e265213fddada539452a59536c88dbbc535a27d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 21:15:13 -0800 Subject: [PATCH 1622/1708] tsnet: fix TestConn to be fast, not flaky Fixes #17805 Change-Id: I36e37cb0cfb2ea7b2341fd4b9809fbf1dd46d991 Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet_test.go | 118 +++++++++++++++++++++++++++++++++----------- 1 file changed, 89 insertions(+), 29 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 1b6ebf4e4..c19ae3c14 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -274,33 +274,56 @@ func TestDialBlocks(t *testing.T) { defer c.Close() } +// TestConn tests basic TCP connections between two tsnet Servers, s1 and s2: +// +// - s1, a subnet router, first listens on its TCP :8081. +// - s2 can connect to s1:8081 +// - s2 cannot connect to s1:8082 (no listener) +// - s2 can dial through the subnet router functionality (getting a synthetic RST +// that we verify we generated & saw) func TestConn(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("slow on macOS: https://github.com/tailscale/tailscale/issues/17805") - } tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() controlURL, c := startControl(t) s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1") - s2, _, _ := startServer(t, ctx, controlURL, "s2") - s1.lb.EditPrefs(&ipn.MaskedPrefs{ + // Track whether we saw an attempted dial to 192.0.2.1:8081. + var saw192DocNetDial atomic.Bool + s1.RegisterFallbackTCPHandler(func(src, dst netip.AddrPort) (handler func(net.Conn), intercept bool) { + t.Logf("s1: fallback TCP handler called for %v -> %v", src, dst) + if dst.String() == "192.0.2.1:8081" { + saw192DocNetDial.Store(true) + } + return nil, true // nil handler but intercept=true means to send RST + }) + + lc1 := must.Get(s1.LocalClient()) + + must.Get(lc1.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}, }, AdvertiseRoutesSet: true, - }) + })) c.SetSubnetRoutes(s1PubKey, []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}) - lc2, err := s2.LocalClient() - if err != nil { - t.Fatal(err) - } + // Start s2 after s1 is fully set up, including advertising its routes, + // otherwise the test is flaky if the test starts dialing through s2 before + // our test control server has told s2 about s1's routes. + s2, _, _ := startServer(t, ctx, controlURL, "s2") + lc2 := must.Get(s2.LocalClient()) + + must.Get(lc2.EditPrefs(ctx, &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + RouteAll: true, + }, + RouteAllSet: true, + })) // ping to make sure the connection is up. - res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP) + res, err := lc2.Ping(ctx, s1ip, tailcfg.PingTSMP) if err != nil { t.Fatal(err) } @@ -313,12 +336,26 @@ func TestConn(t *testing.T) { } defer ln.Close() - w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) - if err != nil { - t.Fatal(err) - } + s1Conns := make(chan net.Conn) + go func() { + for { + c, err := ln.Accept() + if err != nil { + if ctx.Err() != nil { + return + } + t.Errorf("s1.Accept: %v", err) + return + } + select { + case s1Conns <- c: + case <-ctx.Done(): + c.Close() + } + } + }() - r, err := ln.Accept() + w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) if err != nil { t.Fatal(err) } @@ -328,28 +365,51 @@ func TestConn(t *testing.T) { t.Fatal(err) } - got := make([]byte, len(want)) - if _, err := io.ReadAtLeast(r, got, len(got)); err != nil { - t.Fatal(err) - } - t.Logf("got: %q", got) - if string(got) != want { - t.Errorf("got %q, want %q", got, want) + select { + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for connection") + case r := <-s1Conns: + got := make([]byte, len(want)) + _, err := io.ReadAtLeast(r, got, len(got)) + r.Close() + if err != nil { + t.Fatal(err) + } + t.Logf("got: %q", got) + if string(got) != want { + t.Errorf("got %q, want %q", got, want) + } } + // Dial a non-existent port on s1 and expect it to fail. _, err = s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8082", s1ip)) // some random port if err == nil { t.Fatalf("unexpected success; should have seen a connection refused error") } - - // s1 is a subnet router for TEST-NET-1 (192.0.2.0/24). Lets dial to that - // subnet from s2 to ensure a listener without an IP address (i.e. ":8081") - // only matches destination IPs corresponding to the node's IP, and not - // to any random IP a subnet is routing. - _, err = s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", "192.0.2.1")) + t.Logf("got expected failure: %v", err) + + // s1 is a subnet router for TEST-NET-1 (192.0.2.0/24). Let's dial to that + // subnet from s2 to ensure a listener without an IP address (i.e. our + // ":8081" listen above) only matches destination IPs corresponding to the + // s1 node's IP addresses, and not to any random IP of a subnet it's routing. + // + // The RegisterFallbackTCPHandler on s1 above handles sending a RST when the + // TCP SYN arrives from s2. But we bound it to 5 seconds lest a regression + // like tailscale/tailscale#17805 recur. + s2dialer := s2.Sys().Dialer.Get() + s2dialer.SetSystemDialerForTest(func(ctx context.Context, netw, addr string) (net.Conn, error) { + t.Logf("s2: unexpected system dial called for %s %s", netw, addr) + return nil, fmt.Errorf("system dialer called unexpectedly for %s %s", netw, addr) + }) + docCtx, docCancel := context.WithTimeout(ctx, 5*time.Second) + defer docCancel() + _, err = s2.Dial(docCtx, "tcp", "192.0.2.1:8081") if err == nil { t.Fatalf("unexpected success; should have seen a connection refused error") } + if !saw192DocNetDial.Load() { + t.Errorf("expected s1's fallback TCP handler to have been called for 192.0.2.1:8081") + } } func TestLoopbackLocalAPI(t *testing.T) { From ae3dff15e40982d3aeaf0e457001da434cb4e6d8 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Sun, 9 Nov 2025 15:49:24 -0800 Subject: [PATCH 1623/1708] ipn/ipnlocal: clean up some of the weird locking (#17802) * lock released early just to call `b.send` when it can call `b.sendToLocked` instead * `UnlockEarly` called to release the lock before trivially fast operations, we can wait for a defer there Updates #11649 Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/local.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ffab4b69d..d7c16f982 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1533,8 +1533,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } if st.Err != nil { - // The following do not depend on any data for which we need b locked. - unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return @@ -1543,7 +1541,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.send(ipn.Notify{ErrMessage: &s}) + b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) } return } @@ -1743,6 +1741,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.health.SetLocalLogConfigHealth(errors.New(msg)) // Connecting to this tailnet without logging is forbidden; boot us outta here. b.mu.Lock() + defer b.mu.Unlock() // Get the current prefs again, since we unlocked above. prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false @@ -1754,8 +1753,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - b.mu.Unlock() - b.send(ipn.Notify{ErrMessage: &msg, Prefs: &p}) + b.sendToLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}, allClients) return } if oldNetMap != nil { @@ -4795,8 +4793,8 @@ func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() cc := b.cc if cc == nil { @@ -4821,8 +4819,6 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { hi.Services = []tailcfg.Service{} } - unlock.UnlockEarly() - // Don't mutate hi.Service's underlying array. Append to // the slice with no free capacity. c := len(hi.Services) From c7dbd3987eda5b2ad656ad15b412ba5c6a3cce1a Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 10 Nov 2025 09:53:40 +0000 Subject: [PATCH 1624/1708] tka: remove an unused parameter from `computeActiveAncestor` Updates #cleanup Change-Id: I86ee7a0d048dafc8c0d030291261240050451721 Signed-off-by: Alex Chan --- tka/tka.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tka/tka.go b/tka/tka.go index c37c39754..a8144e96f 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -342,7 +342,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // hint to choose what to use. For that, we rely on the chainsThroughActive // bit, which signals to us that that ancestor was part of the // chain in a previous run. -func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) { +func computeActiveAncestor(chains []chain) (AUMHash, error) { // Dedupe possible ancestors, tracking if they were part of // the active chain on a previous run. ancestors := make(map[AUMHash]bool, len(chains)) @@ -392,7 +392,7 @@ func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (c } // Find the right ancestor. - oldestHash, err := computeActiveAncestor(storage, chains) + oldestHash, err := computeActiveAncestor(chains) if err != nil { return chain{}, fmt.Errorf("computing ancestor: %v", err) } From 4c67df42f67190b6e4d65341562b17f6c502ce60 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 10 Nov 2025 12:03:41 +0000 Subject: [PATCH 1625/1708] tka: log a better error if there are no chain candidates Previously if `chains` was empty, it would be passed to `computeActiveAncestor()`, which would fail with the misleading error "multiple distinct chains". Updates tailscale/corp#33846 Signed-off-by: Alex Chan Change-Id: Ib93a755dbdf4127f81cbf69f3eece5a388db31c8 --- tka/tka.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tka/tka.go b/tka/tka.go index a8144e96f..c34e35e7b 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -391,6 +391,10 @@ func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (c return chain{}, fmt.Errorf("computing candidates: %v", err) } + if len(chains) == 0 { + return chain{}, errors.New("no chain candidates in AUM storage") + } + // Find the right ancestor. oldestHash, err := computeActiveAncestor(chains) if err != nil { From fe5501a4e95424c4501b53db83d1293b6fa61ec6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 9 Nov 2025 16:47:42 -0800 Subject: [PATCH 1626/1708] wgengine: make getStatus a bit cheaper (less alloc-y) This removes one of the O(n=peers) allocs in getStatus, as Engine.getStatus happens more often than Reconfig. Updates #17814 Change-Id: I8a87fbebbecca3aedadba38e46cc418fd163c2b0 Signed-off-by: Brad Fitzpatrick --- wgengine/userspace.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 619df655c..1e70856ca 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -145,7 +145,7 @@ type userspaceEngine struct { netMap *netmap.NetworkMap // or nil closing bool // Close was called (even if we're still closing) statusCallback StatusCallback - peerSequence []key.NodePublic + peerSequence views.Slice[key.NodePublic] endpoints []tailcfg.Endpoint pendOpen map[flowtrackTuple]*pendingOpenFlow // see pendopen.go @@ -939,12 +939,15 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, e.tundev.SetWGConfig(cfg) peerSet := make(set.Set[key.NodePublic], len(cfg.Peers)) + e.mu.Lock() - e.peerSequence = e.peerSequence[:0] + seq := make([]key.NodePublic, 0, len(cfg.Peers)) for _, p := range cfg.Peers { - e.peerSequence = append(e.peerSequence, p.PublicKey) + seq = append(seq, p.PublicKey) peerSet.Add(p.PublicKey) } + e.peerSequence = views.SliceOf(seq) + nm := e.netMap e.mu.Unlock() @@ -1199,7 +1202,7 @@ func (e *userspaceEngine) getStatus() (*Status, error) { e.mu.Lock() closing := e.closing - peerKeys := slices.Clone(e.peerSequence) + peerKeys := e.peerSequence localAddrs := slices.Clone(e.endpoints) e.mu.Unlock() @@ -1207,8 +1210,8 @@ func (e *userspaceEngine) getStatus() (*Status, error) { return nil, ErrEngineClosing } - peers := make([]ipnstate.PeerStatusLite, 0, len(peerKeys)) - for _, key := range peerKeys { + peers := make([]ipnstate.PeerStatusLite, 0, peerKeys.Len()) + for _, key := range peerKeys.All() { if status, ok := e.getPeerStatusLite(key); ok { peers = append(peers, status) } From e059382174c43dff9f237f75dba0a6470e8acc47 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 10 Nov 2025 10:22:47 -0800 Subject: [PATCH 1627/1708] wgengine/magicsock: clean up determineEndpoints docs (#17822) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 658478901..1f0a85f07 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1243,8 +1243,8 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { } // determineEndpoints returns the machine's endpoint addresses. It does a STUN -// lookup (via netcheck) to determine its public address. Additionally any -// static enpoints provided by user are always added to the returned endpoints +// lookup (via netcheck) to determine its public address. Additionally, any +// static endpoints provided by user are always added to the returned endpoints // without validating if the node can be reached via those endpoints. // // c.mu must NOT be held. From e0e87311306ed6dde78a36110307afac86146768 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 20:21:22 -0800 Subject: [PATCH 1628/1708] feature, ipn/ipnlocal: add, use feature.CanSystemdStatus for more DCE When systemd notification support was omitted from the build, or on non-Linux systems, we were unnecessarily emitting code and generating garbage stringifying addresses upon transition to the Running state. Updates #12614 Change-Id: If713f47351c7922bb70e9da85bf92725b25954b9 Signed-off-by: Brad Fitzpatrick --- feature/sdnotify.go | 9 ++++++++- ipn/ipnlocal/local.go | 12 +++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/feature/sdnotify.go b/feature/sdnotify.go index e785dc1ac..7a786dfab 100644 --- a/feature/sdnotify.go +++ b/feature/sdnotify.go @@ -23,10 +23,17 @@ var HookSystemdStatus Hook[func(format string, args ...any)] // It does nothing on non-Linux systems or if the binary was built without // the sdnotify feature. func SystemdStatus(format string, args ...any) { - if runtime.GOOS != "linux" || !buildfeatures.HasSDNotify { + if !CanSystemdStatus { // mid-stack inlining DCE return } if f, ok := HookSystemdStatus.GetOk(); ok { f(format, args...) } } + +// CanSystemdStatus reports whether the current build has systemd notifications +// linked in. +// +// It's effectively the same as HookSystemdStatus.IsSet(), but a constant for +// dead code elimination reasons. +const CanSystemdStatus = runtime.GOOS == "linux" && buildfeatures.HasSDNotify diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d7c16f982..245e23db1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5629,12 +5629,14 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Needed so that UpdateEndpoints can run b.e.RequestStatus() case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) + if feature.CanSystemdStatus { + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) } - feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) default: b.logf("[unexpected] unknown newState %#v", newState) } From 8ed6bb3198246df240d32b3361738aac6102e254 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 9 Nov 2025 16:13:39 -0800 Subject: [PATCH 1629/1708] ipn/ipnlocal: move vipServiceHash etc to serve.go, out of local.go Updates #12614 Change-Id: I3c16b94fcb997088ff18d5a21355e0279845ed7e Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 67 ++++++++++++-------------------------- ipn/ipnlocal/local_test.go | 6 ++-- ipn/ipnlocal/serve.go | 53 +++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 51 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 245e23db1..8bdc1a14a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -10,7 +10,6 @@ import ( "context" "crypto/sha256" "encoding/binary" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -5487,20 +5486,9 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } hi.SSH_HostKeys = sshHostKeys - hi.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) - - // The Hostinfo.IngressEnabled field is used to communicate to control whether - // the node has funnel enabled. - hi.IngressEnabled = b.hasIngressEnabledLocked() - // The Hostinfo.WantIngress field tells control whether the user intends - // to use funnel with this node even though it is not currently enabled. - // This is an optimization to control- Funnel requires creation of DNS - // records and because DNS propagation can take time, we want to ensure - // that the records exist for any node that intends to use funnel even - // if it's not enabled. If hi.IngressEnabled is true, control knows that - // DNS records are needed, so we can save bandwidth and not send - // WireIngress. - hi.WireIngress = b.shouldWireInactiveIngressLocked() + for _, f := range hookMaybeMutateHostinfoLocked { + f(b, hi, prefs) + } if buildfeatures.HasAppConnectors { hi.AppConnector.Set(prefs.AppConnector().Advertise) @@ -6284,36 +6272,34 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } // Update funnel and service hash info in hostinfo and kick off control update if needed. - b.updateIngressAndServiceHashLocked(prefs) + b.maybeSentHostinfoIfChangedLocked(prefs) b.setTCPPortsIntercepted(handlePorts) } -// updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and +// hookMaybeMutateHostinfoLocked is a hook that allows conditional features +// to mutate the provided hostinfo before it is sent to control. +// +// The hook function should return true if it mutated the hostinfo. +// +// The LocalBackend's mutex is held while calling. +var hookMaybeMutateHostinfoLocked feature.Hooks[func(*LocalBackend, *tailcfg.Hostinfo, ipn.PrefsView) bool] + +// maybeSentHostinfoIfChangedLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and // hostinfo.IngressEnabled fields and kicks off a Hostinfo update if the values have changed. // // b.mu must be held. -func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { +func (b *LocalBackend) maybeSentHostinfoIfChangedLocked(prefs ipn.PrefsView) { if b.hostinfo == nil { return } - hostInfoChanged := false - if ie := b.hasIngressEnabledLocked(); b.hostinfo.IngressEnabled != ie { - b.logf("Hostinfo.IngressEnabled changed to %v", ie) - b.hostinfo.IngressEnabled = ie - hostInfoChanged = true - } - if wire := b.shouldWireInactiveIngressLocked(); b.hostinfo.WireIngress != wire { - b.logf("Hostinfo.WireIngress changed to %v", wire) - b.hostinfo.WireIngress = wire - hostInfoChanged = true - } - latestHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) - if b.hostinfo.ServicesHash != latestHash { - b.hostinfo.ServicesHash = latestHash - hostInfoChanged = true + changed := false + for _, f := range hookMaybeMutateHostinfoLocked { + if f(b, b.hostinfo, prefs) { + changed = true + } } // Kick off a Hostinfo update to control if ingress status has changed. - if hostInfoChanged { + if changed { b.goTracker.Go(b.doSetHostinfoFilterServices) } } @@ -7707,19 +7693,6 @@ func maybeUsernameOf(actor ipnauth.Actor) string { return username } -func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { - if len(services) == 0 { - return "" - } - buf, err := json.Marshal(services) - if err != nil { - b.logf("vipServiceHashLocked: %v", err) - return "" - } - hash := sha256.Sum256(buf) - return hex.EncodeToString(hash[:]) -} - var ( metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") ) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 33ecb688c..bac74a33c 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -6745,7 +6745,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { if tt.hasPreviousSC { b.mu.Lock() b.serveConfig = previousSC.View() - b.hostinfo.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + b.hostinfo.ServicesHash = vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) b.mu.Unlock() } b.serveConfig = tt.sc.View() @@ -6763,7 +6763,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { })() was := b.goTracker.StartedGoroutines() - b.updateIngressAndServiceHashLocked(prefs) + b.maybeSentHostinfoIfChangedLocked(prefs) if tt.hi != nil { if tt.hi.IngressEnabled != tt.wantIngress { @@ -6773,7 +6773,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { t.Errorf("WireIngress = %v, want %v", tt.hi.WireIngress, tt.wantWireIngress) } b.mu.Lock() - svcHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + svcHash := vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) b.mu.Unlock() if tt.hi.ServicesHash != svcHash { t.Errorf("ServicesHash = %v, want %v", tt.hi.ServicesHash, svcHash) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 554761ed7..1c527e130 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -59,6 +59,9 @@ func init() { b.setVIPServicesTCPPortsInterceptedLocked(nil) }) + hookMaybeMutateHostinfoLocked.Add(maybeUpdateHostinfoServicesHashLocked) + hookMaybeMutateHostinfoLocked.Add(maybeUpdateHostinfoFunnelLocked) + RegisterC2N("GET /vip-services", handleC2NVIPServicesGet) } @@ -1227,7 +1230,7 @@ func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Req b.logf("c2n: GET /vip-services received") var res tailcfg.C2NVIPServicesResponse res.VIPServices = b.VIPServices() - res.ServicesHash = b.vipServiceHash(res.VIPServices) + res.ServicesHash = vipServiceHash(b.logf, res.VIPServices) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) @@ -1443,3 +1446,51 @@ func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tail b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) } + +func maybeUpdateHostinfoServicesHashLocked(b *LocalBackend, hi *tailcfg.Hostinfo, prefs ipn.PrefsView) bool { + latestHash := vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) + if hi.ServicesHash != latestHash { + hi.ServicesHash = latestHash + return true + } + return false +} + +func maybeUpdateHostinfoFunnelLocked(b *LocalBackend, hi *tailcfg.Hostinfo, prefs ipn.PrefsView) (changed bool) { + // The Hostinfo.IngressEnabled field is used to communicate to control whether + // the node has funnel enabled. + if ie := b.hasIngressEnabledLocked(); hi.IngressEnabled != ie { + b.logf("Hostinfo.IngressEnabled changed to %v", ie) + hi.IngressEnabled = ie + changed = true + } + // The Hostinfo.WireIngress field tells control whether the user intends + // to use funnel with this node even though it is not currently enabled. + // This is an optimization to control- Funnel requires creation of DNS + // records and because DNS propagation can take time, we want to ensure + // that the records exist for any node that intends to use funnel even + // if it's not enabled. If hi.IngressEnabled is true, control knows that + // DNS records are needed, so we can save bandwidth and not send + // WireIngress. + if wire := b.shouldWireInactiveIngressLocked(); hi.WireIngress != wire { + b.logf("Hostinfo.WireIngress changed to %v", wire) + hi.WireIngress = wire + changed = true + } + return changed +} + +func vipServiceHash(logf logger.Logf, services []*tailcfg.VIPService) string { + if len(services) == 0 { + return "" + } + h := sha256.New() + jh := json.NewEncoder(h) + if err := jh.Encode(services); err != nil { + logf("vipServiceHashLocked: %v", err) + return "" + } + var buf [sha256.Size]byte + h.Sum(buf[:0]) + return hex.EncodeToString(buf[:]) +} From 6e24f509466794dd16dc25df917ecca0686efb33 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 16:48:41 -0800 Subject: [PATCH 1630/1708] tsnet: add tstest.Shard on the slow tests So they're not all run N times on the sharded oss builders and are only run one time each. Updates tailscale/corp#28679 Change-Id: Ie21e84b06731fdc8ec3212eceb136c8fc26b0115 Signed-off-by: Brad Fitzpatrick --- tsnet/packet_filter_test.go | 2 ++ tsnet/tsnet_test.go | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/tsnet/packet_filter_test.go b/tsnet/packet_filter_test.go index 462234222..455400eaa 100644 --- a/tsnet/packet_filter_test.go +++ b/tsnet/packet_filter_test.go @@ -12,6 +12,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/netmap" @@ -47,6 +48,7 @@ func waitFor(t testing.TB, ctx context.Context, s *Server, f func(*netmap.Networ // netmaps and turning them into packet filters together. Only the control-plane // side is mocked out. func TestPacketFilterFromNetmap(t *testing.T) { + tstest.Shard(t) t.Parallel() var key key.NodePublic diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index c19ae3c14..b0deb2079 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -235,6 +235,7 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) } func TestDialBlocks(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -282,6 +283,7 @@ func TestDialBlocks(t *testing.T) { // - s2 can dial through the subnet router functionality (getting a synthetic RST // that we verify we generated & saw) func TestConn(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -414,6 +416,7 @@ func TestConn(t *testing.T) { func TestLoopbackLocalAPI(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/8557") + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -489,6 +492,7 @@ func TestLoopbackLocalAPI(t *testing.T) { func TestLoopbackSOCKS5(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/8198") + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -539,6 +543,7 @@ func TestLoopbackSOCKS5(t *testing.T) { } func TestTailscaleIPs(t *testing.T) { + tstest.Shard(t) controlURL, _ := startControl(t) tmp := t.TempDir() @@ -581,6 +586,7 @@ func TestTailscaleIPs(t *testing.T) { // TestListenerCleanup is a regression test to verify that s.Close doesn't // deadlock if a listener is still open. func TestListenerCleanup(t *testing.T) { + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -623,6 +629,7 @@ func (wc *closeTrackConn) Close() error { // tests https://github.com/tailscale/tailscale/issues/6973 -- that we can start a tsnet server, // stop it, and restart it, even on Windows. func TestStartStopStartGetsSameIP(t *testing.T) { + tstest.Shard(t) controlURL, _ := startControl(t) tmp := t.TempDir() @@ -672,6 +679,7 @@ func TestStartStopStartGetsSameIP(t *testing.T) { } func TestFunnel(t *testing.T) { + tstest.Shard(t) ctx, dialCancel := context.WithTimeout(context.Background(), 30*time.Second) defer dialCancel() @@ -733,6 +741,7 @@ func TestFunnel(t *testing.T) { } func TestListenerClose(t *testing.T) { + tstest.Shard(t) ctx := context.Background() controlURL, _ := startControl(t) @@ -812,6 +821,7 @@ func (c *bufferedConn) Read(b []byte) (int, error) { } func TestFallbackTCPHandler(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -854,6 +864,7 @@ func TestFallbackTCPHandler(t *testing.T) { } func TestCapturePcap(t *testing.T) { + tstest.Shard(t) const timeLimit = 120 ctx, cancel := context.WithTimeout(context.Background(), timeLimit*time.Second) defer cancel() @@ -907,6 +918,7 @@ func TestCapturePcap(t *testing.T) { } func TestUDPConn(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -1098,6 +1110,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC } func TestUserMetricsByteCounters(t *testing.T) { + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() @@ -1212,6 +1225,7 @@ func TestUserMetricsByteCounters(t *testing.T) { } func TestUserMetricsRouteGauges(t *testing.T) { + tstest.Shard(t) // Windows does not seem to support or report back routes when running in // userspace via tsnet. So, we skip this check on Windows. // TODO(kradalby): Figure out if this is correct. @@ -1368,6 +1382,7 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { } func TestDeps(t *testing.T) { + tstest.Shard(t) deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", From 4650061326af386d370e5ebc5b2fae018752908b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 17:23:22 -0800 Subject: [PATCH 1631/1708] ipn/ipnlocal: fix state_test data race seen in CI Unfortunately I closed the tab and lost it in my sea of CI failures I'm currently fighting. Updates #cleanup Change-Id: I4e3a652d57d52b75238f25d104fc1987add64191 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/state_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index fca01f105..53b8f78e4 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -652,7 +652,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { } // undo the state hack above. + b.mu.Lock() b.state = ipn.Starting + b.mu.Unlock() // User wants to logout. store.awaitWrite() From 18806de400a29b035a9985f22d1390a50e38fcab Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 10 Nov 2025 20:07:33 -0800 Subject: [PATCH 1632/1708] wgengine/magicsock: validate endpoint.derpAddr in Conn.onUDPRelayAllocResp (#17828) Otherwise a zero value will panic in Conn.sendUDPStd. Updates #17827 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1f0a85f07..3d7b16f30 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -651,7 +651,9 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { ep.mu.Lock() defer ep.mu.Unlock() derpAddr := ep.derpAddr - go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) + if derpAddr.IsValid() { + go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) + } } // Synchronize waits for all [eventbus] events published From 2ad2d4d409e6b5eac5dbecb59ce307eb3297587c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 10 Nov 2025 21:08:13 -0800 Subject: [PATCH 1633/1708] wgengine/magicsock: fix UDPRelayAllocReq/Resp deadlock (#17831) Updates #17830 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 3d7b16f30..f1721e1d9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2444,7 +2444,10 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake if !nodeHasCap(c.filt, c.peers.At(peerI), c.self, tailcfg.PeerCapabilityRelay) { return } - c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + // [Conn.mu] must not be held while publishing, or [Conn.onUDPRelayAllocResp] + // can deadlock as the req sub and resp pub are the same goroutine. + // See #17830. + go c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ RxFromDiscoKey: sender, RxFromNodeKey: nodeKey, Message: req, From 42ce5c88bed24817def3049d75e5a6810f172c7a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 16:51:46 -0800 Subject: [PATCH 1634/1708] wgengine/magicsock: unblock Conn.Synchronize on Conn.Close I noticed a deadlock in a test in a in-development PR where during a shutdown storm of things (from a tsnet.Server.Close), LocalBackend was trying to call magicsock.Conn.Synchronize but the magicsock and/or eventbus was already shut down and no longer processing events. Updates #16369 Change-Id: I58b1f86c8959303c3fb46e2e3b7f38f6385036f1 Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/magicsock.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f1721e1d9..d44cf1c11 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -665,7 +665,10 @@ func (c *Conn) Synchronize() { } sp := syncPoint(make(chan struct{})) c.syncPub.Publish(sp) - sp.Wait() + select { + case <-sp: + case <-c.donec: + } } // NewConn creates a magic Conn listening on opts.Port. From 1eba5b0cbdf044b5a3a45fc5372f240865fb8ca3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 15:44:55 -0800 Subject: [PATCH 1635/1708] util/eventbus: log goroutine stacks when hung in CI Updates #17680 Change-Id: Ie48dc2d64b7583d68578a28af52f6926f903ca4f Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- tsnet/depaware.txt | 2 +- util/eventbus/subscribe.go | 7 +++++++ 9 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 01c278fbd..0a75ac43e 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -139,7 +139,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netmon tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/tsweb+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ebd22770e..b800b78c6 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -848,7 +848,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b249639bc..53dc998bd 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -171,7 +171,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/types/key+ tailscale.com/types/views from tailscale.com/tailcfg+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netcheck+ tailscale.com/util/cloudenv from tailscale.com/net/dnscache+ tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 224026f25..e750f86e6 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -144,7 +144,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 9633e7398..17f1a22b2 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -171,7 +171,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cmpver from tailscale.com/clientupdate diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index bdc110e1a..1b5bdab91 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -416,7 +416,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index ebf03b541..21ca122c4 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -253,7 +253,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4817a511a..cf91aa483 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -248,7 +248,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 03d577f27..53253d330 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -7,10 +7,12 @@ import ( "context" "fmt" "reflect" + "runtime" "sync" "time" "tailscale.com/types/logger" + "tailscale.com/util/cibuild" ) type DeliveredEvent struct { @@ -329,6 +331,11 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE select { case <-s.slow.C: s.logf("giving up on subscriber for %T after %v at close", t, time.Since(start)) + if cibuild.On() { + all := make([]byte, 2<<20) + n := runtime.Stack(all, true) + s.logf("goroutine stacks:\n%s", all[:n]) + } case <-callDone: } return false From 3280dac79787d464493b2d4e735bdc1e5de0a2ef Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Mon, 10 Nov 2025 16:05:09 -0800 Subject: [PATCH 1636/1708] wgengine/router/osrouter: fix linux magicsock port changing Fixes #17837 Signed-off-by: Sachin Iyer --- wgengine/router/osrouter/router_linux.go | 2 +- wgengine/router/osrouter/router_linux_test.go | 40 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 58bd0513a..196e1d552 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -581,7 +581,7 @@ func (r *linuxRouter) updateMagicsockPort(port uint16, network string) error { } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { + if err := r.nfr.AddMagicsockPortRule(port, network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index 39210ddef..929fda1b4 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1290,3 +1290,43 @@ func TestIPRulesForUBNT(t *testing.T) { } } } + +func TestUpdateMagicsockPortChange(t *testing.T) { + nfr := &fakeIPTablesRunner{ + t: t, + ipt4: make(map[string][]string), + ipt6: make(map[string][]string), + } + nfr.ipt4["filter/ts-input"] = []string{} + + r := &linuxRouter{ + logf: logger.Discard, + health: new(health.Tracker), + netfilterMode: netfilterOn, + nfr: nfr, + } + + if err := r.updateMagicsockPort(12345, "udp4"); err != nil { + t.Fatalf("failed to set initial port: %v", err) + } + + if err := r.updateMagicsockPort(54321, "udp4"); err != nil { + t.Fatalf("failed to update port: %v", err) + } + + newPortRule := buildMagicsockPortRule(54321) + hasNewRule := slices.Contains(nfr.ipt4["filter/ts-input"], newPortRule) + + if !hasNewRule { + t.Errorf("firewall rule for NEW port 54321 not found.\nExpected: %s\nActual rules: %v", + newPortRule, nfr.ipt4["filter/ts-input"]) + } + + oldPortRule := buildMagicsockPortRule(12345) + hasOldRule := slices.Contains(nfr.ipt4["filter/ts-input"], oldPortRule) + + if hasOldRule { + t.Errorf("firewall rule for OLD port 12345 still exists (should be deleted).\nFound: %s\nAll rules: %v", + oldPortRule, nfr.ipt4["filter/ts-input"]) + } +} From 85cb64c4ff0537b5722f2df84393ef4d8c4c83ad Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Tue, 11 Nov 2025 10:07:02 -0800 Subject: [PATCH 1637/1708] wf: correct IPv6 link-local range from ff80::/10 to fe80::/10 (#17840) Fixes #17833 Signed-off-by: Sachin Iyer --- wf/firewall.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wf/firewall.go b/wf/firewall.go index 076944c8d..dc1045ff8 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -18,7 +18,7 @@ import ( // Known addresses. var ( - linkLocalRange = netip.MustParsePrefix("ff80::/10") + linkLocalRange = netip.MustParsePrefix("fe80::/10") linkLocalDHCPMulticast = netip.MustParseAddr("ff02::1:2") siteLocalDHCPMulticast = netip.MustParseAddr("ff05::1:3") linkLocalRouterMulticast = netip.MustParseAddr("ff02::2") From d37884c734762cdd96d184c877b3b6eac139e5a2 Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Wed, 12 Nov 2025 02:46:40 -0800 Subject: [PATCH 1638/1708] cmd/k8s-operator: remove early return in ingress matching (#17841) Fixes #17834 Signed-off-by: Sachin Iyer --- cmd/k8s-operator/operator.go | 2 +- cmd/k8s-operator/operator_test.go | 36 +++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index d5ff07780..6b545a827 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -1122,7 +1122,7 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger, ingre reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { - return nil + continue } if hasProxyGroupAnnotation(&ing) { // We don't want to reconcile backend Services for Ingresses for ProxyGroups. diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 5af237342..b15c93b1c 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1698,6 +1698,42 @@ func Test_serviceHandlerForIngress(t *testing.T) { } } +func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { + fc := fake.NewFakeClient() + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "backend", Namespace: "default"}, + } + mustCreate(t, fc, svc) + + mustCreate(t, fc, &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-ing", Namespace: "default"}, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("nginx"), + DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, + }, + }) + + mustCreate(t, fc, &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "ts-ing", Namespace: "default"}, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, + }, + }) + + got := serviceHandlerForIngress(fc, zl.Sugar(), "tailscale")(context.Background(), svc) + want := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "default", Name: "ts-ing"}}} + + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) + } +} + func Test_clusterDomainFromResolverConf(t *testing.T) { zl, err := zap.NewDevelopment() if err != nil { From 16e90dcb27605f1bd03bc1eda0b1d256662c89bf Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Wed, 12 Nov 2025 07:13:21 -0800 Subject: [PATCH 1639/1708] net/batching: fix gro size handling for misordered UDP_GRO messages (#17842) Fixes #17835 Signed-off-by: Sachin Iyer --- net/batching/conn_linux.go | 2 +- net/batching/conn_linux_test.go | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 7f6c4ed42..bd7ac25be 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -353,7 +353,7 @@ func getGSOSizeFromControl(control []byte) (int, error) { ) for len(rem) > unix.SizeofCmsghdr { - hdr, data, rem, err = unix.ParseOneSocketControlMessage(control) + hdr, data, rem, err = unix.ParseOneSocketControlMessage(rem) if err != nil { return 0, fmt.Errorf("error parsing socket control message: %w", err) } diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index e518c3f9f..5e3c29e5c 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -8,8 +8,11 @@ import ( "net" "testing" + "unsafe" + "github.com/tailscale/wireguard-go/conn" "golang.org/x/net/ipv6" + "golang.org/x/sys/unix" "tailscale.com/net/packet" ) @@ -314,3 +317,36 @@ func TestMinReadBatchMsgsLen(t *testing.T) { t.Fatalf("IdealBatchSize: %d != conn.IdealBatchSize(): %d", IdealBatchSize, conn.IdealBatchSize) } } + +func Test_getGSOSizeFromControl_MultipleMessages(t *testing.T) { + // Test that getGSOSizeFromControl correctly parses UDP_GRO when it's not the first control message. + const expectedGSOSize = 1420 + + // First message: IP_TOS + firstMsgLen := unix.CmsgSpace(1) + firstMsg := make([]byte, firstMsgLen) + hdr1 := (*unix.Cmsghdr)(unsafe.Pointer(&firstMsg[0])) + hdr1.Level = unix.SOL_IP + hdr1.Type = unix.IP_TOS + hdr1.SetLen(unix.CmsgLen(1)) + firstMsg[unix.SizeofCmsghdr] = 0 + + // Second message: UDP_GRO + secondMsgLen := unix.CmsgSpace(2) + secondMsg := make([]byte, secondMsgLen) + hdr2 := (*unix.Cmsghdr)(unsafe.Pointer(&secondMsg[0])) + hdr2.Level = unix.SOL_UDP + hdr2.Type = unix.UDP_GRO + hdr2.SetLen(unix.CmsgLen(2)) + binary.NativeEndian.PutUint16(secondMsg[unix.SizeofCmsghdr:], expectedGSOSize) + + control := append(firstMsg, secondMsg...) + + gsoSize, err := getGSOSizeFromControl(control) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gsoSize != expectedGSOSize { + t.Errorf("got GSO size %d, want %d", gsoSize, expectedGSOSize) + } +} From e8d2f964499989d1cd99db556b0a3e3f293dd86b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 12 Nov 2025 10:25:27 -0500 Subject: [PATCH 1640/1708] ipn/ipnlocal, net/netns: add node cap to disable netns interface binding on netext Apple clients (#17691) updates tailscale/corp#31571 It appears that on the latest macOS, iOS and tVOS versions, the work that netns is doing to bind outgoing connections to the default interface (and all of the trimmings and workarounds in netmon et al that make that work) are not needed. The kernel is extension-aware and doing nothing, is the right thing. This is, however, not the case for tailscaled (which is not a special process). To allow us to test this assertion (and where it might break things), we add a new node cap that turns this behaviour off only for network-extension equipped clients, making it possible to turn this off tailnet-wide, without breaking any tailscaled macos nodes. Signed-off-by: Jonathan Nobels --- ipn/ipnlocal/local.go | 7 ++++--- net/netns/netns.go | 23 +++++++++++++++++++---- net/netns/netns_darwin.go | 7 +++---- tailcfg/tailcfg.go | 4 ++++ 4 files changed, 30 insertions(+), 11 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8bdc1a14a..62d8ea490 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6169,9 +6169,10 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setDebugLogsByCapabilityLocked(nm) } - // See the netns package for documentation on what this capability does. - netns.SetBindToInterfaceByRoute(nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) - netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) + // See the netns package for documentation on what these capability do. + netns.SetBindToInterfaceByRoute(b.logf, nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) + netns.SetDisableBindConnToInterface(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) + netns.SetDisableBindConnToInterfaceAppleExt(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterfaceAppleExt)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) if buildfeatures.HasServe { diff --git a/net/netns/netns.go b/net/netns/netns.go index a473506fa..ccb20d27e 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -39,20 +39,35 @@ var bindToInterfaceByRoute atomic.Bool // setting the TS_BIND_TO_INTERFACE_BY_ROUTE. // // Currently, this only changes the behaviour on macOS and Windows. -func SetBindToInterfaceByRoute(v bool) { +func SetBindToInterfaceByRoute(logf logger.Logf, v bool) { + logf("netns: bindToInterfaceByRoute to %v", v) bindToInterfaceByRoute.Store(v) } var disableBindConnToInterface atomic.Bool // SetDisableBindConnToInterface disables the (normal) behavior of binding -// connections to the default network interface. +// connections to the default network interface on Darwin nodes. // -// Currently, this only has an effect on Darwin. -func SetDisableBindConnToInterface(v bool) { +// Unless you intended to disable this for tailscaled on macos (which is likely +// to break things), you probably wanted to set +// SetDisableBindConnToInterfaceAppleExt which will disable explicit interface +// binding only when tailscaled is running inside a network extension process. +func SetDisableBindConnToInterface(logf logger.Logf, v bool) { + logf("netns: disableBindConnToInterface set to %v", v) disableBindConnToInterface.Store(v) } +var disableBindConnToInterfaceAppleExt atomic.Bool + +// SetDisableBindConnToInterfaceAppleExt disables the (normal) behavior of binding +// connections to the default network interface but only on Apple clients where +// tailscaled is running inside a network extension. +func SetDisableBindConnToInterfaceAppleExt(logf logger.Logf, v bool) { + logf("netns: disableBindConnToInterfaceAppleExt set to %v", v) + disableBindConnToInterfaceAppleExt.Store(v) +} + // Listener returns a new net.Listener with its Control hook func // initialized as necessary to run in logical network namespace that // doesn't route back into Tailscale. diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index 1f30f00d2..ff05a3f31 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -21,6 +21,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/version" ) func control(logf logger.Logf, netMon *netmon.Monitor) func(network, address string, c syscall.RawConn) error { @@ -36,13 +37,11 @@ var errInterfaceStateInvalid = errors.New("interface state invalid") // controlLogf binds c to a particular interface as necessary to dial the // provided (network, address). func controlLogf(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) error { - if isLocalhost(address) { - // Don't bind to an interface for localhost connections. + if disableBindConnToInterface.Load() || (version.IsMacGUIVariant() && disableBindConnToInterfaceAppleExt.Load()) { return nil } - if disableBindConnToInterface.Load() { - logf("netns_darwin: binding connection to interfaces disabled") + if isLocalhost(address) { return nil } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 43ed3188f..346957803 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2460,6 +2460,10 @@ const ( // of connections to the default network interface on Darwin nodes. CapabilityDebugDisableBindConnToInterface NodeCapability = "https://tailscale.com/cap/debug-disable-bind-conn-to-interface" + // CapabilityDebugDisableBindConnToInterface disables the automatic binding + // of connections to the default network interface on Darwin nodes using network extensions + CapabilityDebugDisableBindConnToInterfaceAppleExt NodeCapability = "https://tailscale.com/cap/debug-disable-bind-conn-to-interface-apple-ext" + // CapabilityTailnetLock indicates the node may initialize tailnet lock. CapabilityTailnetLock NodeCapability = "https://tailscale.com/cap/tailnet-lock" From 27a0168cdc326830440e87fabd60bcdc00dd45c4 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Thu, 6 Nov 2025 14:53:22 -0800 Subject: [PATCH 1641/1708] util/dnsname: increase maxNameLength to account for trailing dot Fixes #17788 Signed-off-by: Fran Bull --- util/dnsname/dnsname.go | 2 +- util/dnsname/dnsname_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index 6404a9af1..ef898ebbd 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -14,7 +14,7 @@ const ( // maxLabelLength is the maximum length of a label permitted by RFC 1035. maxLabelLength = 63 // maxNameLength is the maximum length of a DNS name. - maxNameLength = 253 + maxNameLength = 254 ) // A FQDN is a fully-qualified DNS name or name suffix. diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index 719e28be3..49eeaee48 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -59,6 +59,38 @@ func TestFQDN(t *testing.T) { } } +func TestFQDNTooLong(t *testing.T) { + // RFC 1035 says a dns name has a max size of 255 octets, and is represented as labels of len+ASCII chars so + // example.com + // is represented as + // 7example3com0 + // which is to say that if we have a trailing dot then the dots cancel out all the len bytes except the first and + // we can accept 254 chars. + + // This name is max length + name := "aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.example.com." + if len(name) != 254 { + t.Fatalf("name should be 254 chars including trailing . (len is %d)", len(name)) + } + got, err := ToFQDN(name) + if err != nil { + t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + } + if string(got) != name { + t.Fatalf("want: %s, got: %s", name, got) + } + + // This name is too long + name = "x" + name + got, err = ToFQDN(name) + if got != "" { + t.Fatalf("want: \"\", got: %s", got) + } + if err == nil || !strings.HasSuffix(err.Error(), "is too long to be a DNS name") { + t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + } +} + func TestFQDNContains(t *testing.T) { tests := []struct { a, b string From f387b1010e92fe34656e4106aee0111cb48ea9a1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 12 Nov 2025 08:51:04 -0800 Subject: [PATCH 1642/1708] wgengine/wgcfg: remove two unused Config fields They distracted me in some refactoring. They're set but never used. Updates #17858 Change-Id: I6ec7d6841ab684a55bccca7b7cbf7da9c782694f Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/state_test.go | 14 -------------- util/deephash/tailscale_types_test.go | 1 - wgengine/bench/wg.go | 2 -- wgengine/magicsock/magicsock_test.go | 2 -- wgengine/wgcfg/config.go | 7 +------ wgengine/wgcfg/nmcfg/nmcfg.go | 2 -- wgengine/wgcfg/wgcfg_clone.go | 3 --- 7 files changed, 1 insertion(+), 30 deletions(-) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 53b8f78e4..ca281fbec 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1243,8 +1243,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // After the auth is completed, the configs must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1301,8 +1299,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // Once the auth is completed, the configs must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node2.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node2.SelfNode.Addresses().AsSlice(), }, @@ -1351,8 +1347,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1376,8 +1370,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { }, wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node3.SelfNode.StableID(), Peers: []wgcfg.Peer{ { PublicKey: node1.SelfNode.Key(), @@ -1449,8 +1441,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { }, wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1480,8 +1470,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // With seamless renewal, starting a reauth should leave everything up: wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1513,8 +1501,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { }, wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, diff --git a/util/deephash/tailscale_types_test.go b/util/deephash/tailscale_types_test.go index d76025399..eeb7fdf84 100644 --- a/util/deephash/tailscale_types_test.go +++ b/util/deephash/tailscale_types_test.go @@ -85,7 +85,6 @@ type tailscaleTypes struct { func getVal() *tailscaleTypes { return &tailscaleTypes{ &wgcfg.Config{ - Name: "foo", Addresses: []netip.Prefix{netip.PrefixFrom(netip.AddrFrom16([16]byte{3: 3}).Unmap(), 5)}, Peers: []wgcfg.Peer{ { diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 4de7677f2..f0fa38bf9 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -38,7 +38,6 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. k1 := key.NewNode() c1 := wgcfg.Config{ - Name: "e1", PrivateKey: k1, Addresses: []netip.Prefix{a1}, } @@ -65,7 +64,6 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. l2 := logger.WithPrefix(logf, "e2: ") k2 := key.NewNode() c2 := wgcfg.Config{ - Name: "e2", PrivateKey: k2, Addresses: []netip.Prefix{a2}, } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 60620b141..e91dac2ec 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1059,7 +1059,6 @@ func testTwoDevicePing(t *testing.T, d *devices) { }) m1cfg := &wgcfg.Config{ - Name: "peer1", PrivateKey: m1.privateKey, Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.1/32")}, Peers: []wgcfg.Peer{ @@ -1071,7 +1070,6 @@ func testTwoDevicePing(t *testing.T, d *devices) { }, } m2cfg := &wgcfg.Config{ - Name: "peer2", PrivateKey: m2.privateKey, Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.2/32")}, Peers: []wgcfg.Peer{ diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 926964a4b..2734f6c6e 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -8,7 +8,6 @@ import ( "net/netip" "slices" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logid" ) @@ -18,8 +17,6 @@ import ( // Config is a WireGuard configuration. // It only supports the set of things Tailscale uses. type Config struct { - Name string - NodeID tailcfg.StableNodeID PrivateKey key.NodePrivate Addresses []netip.Prefix MTU uint16 @@ -40,9 +37,7 @@ func (c *Config) Equal(o *Config) bool { if c == nil || o == nil { return c == o } - return c.Name == o.Name && - c.NodeID == o.NodeID && - c.PrivateKey.Equal(o.PrivateKey) && + return c.PrivateKey.Equal(o.PrivateKey) && c.MTU == o.MTU && c.NetworkLogging == o.NetworkLogging && slices.Equal(c.Addresses, o.Addresses) && diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 1add608e4..08b162730 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -51,7 +51,6 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { // WGCfg returns the NetworkMaps's WireGuard configuration. func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { cfg := &wgcfg.Config{ - Name: "tailscale", PrivateKey: nm.PrivateKey, Addresses: nm.GetAddresses().AsSlice(), Peers: make([]wgcfg.Peer, 0, len(nm.Peers)), @@ -59,7 +58,6 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // Setup log IDs for data plane audit logging. if nm.SelfNode.Valid() { - cfg.NodeID = nm.SelfNode.StableID() canNetworkLog := nm.SelfNode.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) logExitFlowEnabled := nm.SelfNode.HasCap(tailcfg.NodeAttrLogExitFlows) if canNetworkLog && nm.SelfNode.DataPlaneAuditLogID() != "" && nm.DomainAuditLogID != "" { diff --git a/wgengine/wgcfg/wgcfg_clone.go b/wgengine/wgcfg/wgcfg_clone.go index 749d8d816..9f3cabde1 100644 --- a/wgengine/wgcfg/wgcfg_clone.go +++ b/wgengine/wgcfg/wgcfg_clone.go @@ -8,7 +8,6 @@ package wgcfg import ( "net/netip" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logid" "tailscale.com/types/ptr" @@ -35,8 +34,6 @@ func (src *Config) Clone() *Config { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ConfigCloneNeedsRegeneration = Config(struct { - Name string - NodeID tailcfg.StableNodeID PrivateKey key.NodePrivate Addresses []netip.Prefix MTU uint16 From 37aa7e6935d5808158e7c9755ba8402a36b87925 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 12 Nov 2025 08:16:51 -0800 Subject: [PATCH 1643/1708] util/dnsname: fix test error message Updates #17788 Signed-off-by: Fran Bull --- util/dnsname/dnsname_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index 49eeaee48..b038bb1bd 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -74,7 +74,7 @@ func TestFQDNTooLong(t *testing.T) { } got, err := ToFQDN(name) if err != nil { - t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + t.Fatalf("want: no error, got: %v", err) } if string(got) != name { t.Fatalf("want: %s, got: %s", name, got) From 31fe75ad9eb9d9a48ded976e07ba60f4a734f4a6 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 10 Nov 2025 15:02:31 +0000 Subject: [PATCH 1644/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 11 ++++++----- licenses/tailscale.md | 1 + licenses/windows.md | 16 +++++++++------- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 4c50e9559..2a795ddbb 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -29,6 +29,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) @@ -67,13 +68,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.43.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.46.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.37.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.36.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.30.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 0ef5bcf61..c04e55563 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -37,6 +37,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) diff --git a/licenses/windows.md b/licenses/windows.md index b284aa136..06a5712ce 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -15,6 +15,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) @@ -36,9 +37,9 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) - - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.0/LICENSE)) + - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.2/LICENSE)) - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE)) - - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.65.0/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.66.1/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) @@ -47,19 +48,20 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) + - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.43.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.28.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.46.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.37.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.36.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.8/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From f4f9dd7f8c95bdcdc84de7de7c0de4fb591b73d0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 12 Nov 2025 15:47:01 -0800 Subject: [PATCH 1645/1708] net/udprelay: replace VNI pool with selection algorithm (#17868) This reduces memory usage when tailscaled is acting as a peer relay. Updates #17801 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 45 +++++++++++++++++++++++++++---------- net/udprelay/server_test.go | 23 +++++++++++++++++++ 2 files changed, 56 insertions(+), 12 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index de1376b64..69e0de095 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -77,11 +77,17 @@ type Server struct { addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints closed bool lamportID uint64 - vniPool []uint32 // the pool of available VNIs + nextVNI uint32 byVNI map[uint32]*serverEndpoint byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } +const ( + minVNI = uint32(1) + maxVNI = uint32(1<<24 - 1) + totalPossibleVNI = maxVNI - minVNI + 1 +) + // serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. // serverEndpoint methods are not thread-safe. type serverEndpoint struct { @@ -281,15 +287,10 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve steadyStateLifetime: defaultSteadyStateLifetime, closeCh: make(chan struct{}), byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), + nextVNI: minVNI, byVNI: make(map[uint32]*serverEndpoint), } s.discoPublic = s.disco.Public() - // TODO: instead of allocating 10s of MBs for the full pool, allocate - // smaller chunks and increase as needed - s.vniPool = make([]uint32, 0, 1<<24-1) - for i := 1; i < 1<<24; i++ { - s.vniPool = append(s.vniPool, uint32(i)) - } // TODO(creachadair): Find a way to plumb this in during initialization. // As-written, messages published here will not be seen by other components @@ -572,7 +573,6 @@ func (s *Server) Close() error { defer s.mu.Unlock() clear(s.byVNI) clear(s.byDisco) - s.vniPool = nil s.closed = true s.bus.Close() }) @@ -594,7 +594,6 @@ func (s *Server) endpointGCLoop() { if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { delete(s.byDisco, k) delete(s.byVNI, v.vni) - s.vniPool = append(s.vniPool, v.vni) } } } @@ -729,6 +728,27 @@ func (e ErrServerNotReady) Error() string { return fmt.Sprintf("server not ready, retry after %v", e.RetryAfter) } +// getNextVNILocked returns the next available VNI. It implements the +// "Traditional BSD Port Selection Algorithm" from RFC6056. This algorithm does +// not attempt to obfuscate the selection, i.e. the selection is predictable. +// For now, we favor simplicity and reducing VNI re-use over more complex +// ephemeral port (VNI) selection algorithms. +func (s *Server) getNextVNILocked() (uint32, error) { + for i := uint32(0); i < totalPossibleVNI; i++ { + vni := s.nextVNI + if vni == maxVNI { + s.nextVNI = minVNI + } else { + s.nextVNI++ + } + _, ok := s.byVNI[vni] + if !ok { + return vni, nil + } + } + return 0, errors.New("VNI pool exhausted") +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns @@ -777,8 +797,9 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv }, nil } - if len(s.vniPool) == 0 { - return endpoint.ServerEndpoint{}, errors.New("VNI pool exhausted") + vni, err := s.getNextVNILocked() + if err != nil { + return endpoint.ServerEndpoint{}, err } s.lamportID++ @@ -786,10 +807,10 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv discoPubKeys: pair, lamportID: s.lamportID, allocatedAt: time.Now(), + vni: vni, } e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1]) - e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] s.byDisco[pair] = e s.byVNI[e.vni] = e diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 8fc4a4f78..bf7f0a9b5 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + qt "github.com/frankban/quicktest" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go4.org/mem" @@ -319,3 +320,25 @@ func TestServer(t *testing.T) { }) } } + +func TestServer_getNextVNILocked(t *testing.T) { + t.Parallel() + c := qt.New(t) + s := &Server{ + nextVNI: minVNI, + byVNI: make(map[uint32]*serverEndpoint), + } + for i := uint64(0); i < uint64(totalPossibleVNI); i++ { + vni, err := s.getNextVNILocked() + if err != nil { // using quicktest here triples test time + t.Fatal(err) + } + s.byVNI[vni] = nil + } + c.Assert(s.nextVNI, qt.Equals, minVNI) + _, err := s.getNextVNILocked() + c.Assert(err, qt.IsNotNil) + delete(s.byVNI, minVNI) + _, err = s.getNextVNILocked() + c.Assert(err, qt.IsNil) +} From 6ac80b7334eb978390c75134a82462d43c78f029 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 12 Nov 2025 17:53:39 -0500 Subject: [PATCH 1646/1708] cmd/{cloner,viewer}: handle maps of views Instead of trying to call View() on something that's already a View type (or trying to Clone the view unnecessarily), we can re-use the existing View values in a map[T]ViewType. Fixes #17866 Signed-off-by: Andrew Dunham --- cmd/cloner/cloner.go | 14 ++++-- cmd/viewer/tests/tests.go | 6 ++- cmd/viewer/tests/tests_clone.go | 17 +++++++ cmd/viewer/tests/tests_view.go | 78 ++++++++++++++++++++++++++++++++- cmd/viewer/viewer.go | 15 +++++-- 5 files changed, 120 insertions(+), 10 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 544d00518..917f4856d 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -192,7 +192,16 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname) writef("\t}") writef("}") - } else if codegen.ContainsPointers(elem) { + } else if codegen.IsViewType(elem) || !codegen.ContainsPointers(elem) { + // If the map values are view types (which are + // immutable and don't need cloning) or don't + // themselves contain pointers, we can just + // clone the map itself. + it.Import("", "maps") + writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) + } else { + // Otherwise we need to clone each element of + // the map. writef("if dst.%s != nil {", fname) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tfor k, v := range src.%s {", fname) @@ -228,9 +237,6 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t}") writef("}") - } else { - it.Import("", "maps") - writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } case *types.Interface: // If ft is an interface with a "Clone() ft" method, it can be used to clone the field. diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index 4020e5651..d1c753db7 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -13,7 +13,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct --clone-only-type=OnlyGetClone +//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct,StructWithMapOfViews --clone-only-type=OnlyGetClone type StructWithoutPtrs struct { Int int @@ -238,3 +238,7 @@ type GenericTypeAliasStruct[T integer, T2 views.ViewCloner[T2, V2], V2 views.Str NonCloneable T Cloneable T2 } + +type StructWithMapOfViews struct { + MapOfViews map[string]StructWithoutPtrsView +} diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index 106a9b684..4602b9d88 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -547,3 +547,20 @@ func _GenericTypeAliasStructCloneNeedsRegeneration[T integer, T2 views.ViewClone Cloneable T2 }{}) } + +// Clone makes a deep copy of StructWithMapOfViews. +// The result aliases no memory with the original. +func (src *StructWithMapOfViews) Clone() *StructWithMapOfViews { + if src == nil { + return nil + } + dst := new(StructWithMapOfViews) + *dst = *src + dst.MapOfViews = maps.Clone(src.MapOfViews) + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithMapOfViewsCloneNeedsRegeneration = StructWithMapOfViews(struct { + MapOfViews map[string]StructWithoutPtrsView +}{}) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index e50a71c9e..495281c23 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -16,7 +16,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct,StructWithMapOfViews // View returns a read-only view of StructWithPtrs. func (p *StructWithPtrs) View() StructWithPtrsView { @@ -1053,3 +1053,79 @@ func _GenericTypeAliasStructViewNeedsRegeneration[T integer, T2 views.ViewCloner Cloneable T2 }{}) } + +// View returns a read-only view of StructWithMapOfViews. +func (p *StructWithMapOfViews) View() StructWithMapOfViewsView { + return StructWithMapOfViewsView{ж: p} +} + +// StructWithMapOfViewsView provides a read-only view over StructWithMapOfViews. +// +// Its methods should only be called if `Valid()` returns true. +type StructWithMapOfViewsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *StructWithMapOfViews +} + +// Valid reports whether v's underlying value is non-nil. +func (v StructWithMapOfViewsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v StructWithMapOfViewsView) AsStruct() *StructWithMapOfViews { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithMapOfViewsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithMapOfViewsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *StructWithMapOfViewsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x StructWithMapOfViews + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithMapOfViewsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithMapOfViews + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v StructWithMapOfViewsView) MapOfViews() views.Map[string, StructWithoutPtrsView] { + return views.MapOf(v.ж.MapOfViews) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithMapOfViewsViewNeedsRegeneration = StructWithMapOfViews(struct { + MapOfViews map[string]StructWithoutPtrsView +}{}) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 4fd81ea51..3fae737cd 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -367,14 +367,21 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, fie case *types.Struct, *types.Named, *types.Alias: strucT := u args.FieldType = it.QualifiedName(fieldType) - if codegen.ContainsPointers(strucT) { + + // We need to call View() unless the type is + // either a View itself or does not contain + // pointers (and can thus be shallow-copied). + // + // Otherwise, we need to create a View of the + // map value. + if codegen.IsViewType(strucT) || !codegen.ContainsPointers(strucT) { + template = "mapField" + args.MapValueType = it.QualifiedName(mElem) + } else { args.MapFn = "t.View()" template = "mapFnField" args.MapValueType = it.QualifiedName(mElem) args.MapValueView = appendNameSuffix(args.MapValueType, "View") - } else { - template = "mapField" - args.MapValueType = it.QualifiedName(mElem) } case *types.Basic: template = "mapField" From ca9b68aafd16eaba9b6847cd4421f8ecafc160c5 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Thu, 13 Nov 2025 07:19:17 -0500 Subject: [PATCH 1647/1708] cmd/tailscale/cli: remove service flag from funnel command (#17850) Fixes #17849. Signed-off-by: Naman Sood --- cmd/tailscale/cli/serve_v2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 74458a950..e194b1e10 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -236,10 +236,10 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") - fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), From 08e74effc0f0099b33ef266a098c52a406b76a5b Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 12 Nov 2025 23:22:55 -0500 Subject: [PATCH 1648/1708] cmd/cloner: support cloning arbitrarily-nested maps Fixes #17870 Signed-off-by: Andrew Dunham --- cmd/cloner/cloner.go | 139 ++++++++++++++++++++------ cmd/cloner/cloner_test.go | 106 ++++++++++++++++++++ cmd/cloner/clonerex/clonerex.go | 14 ++- cmd/cloner/clonerex/clonerex_clone.go | 127 ++++++++++++++++++++++- 4 files changed, 354 insertions(+), 32 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 917f4856d..a81bd10bd 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -201,40 +201,23 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } else { // Otherwise we need to clone each element of - // the map. + // the map using our recursive helper. writef("if dst.%s != nil {", fname) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tfor k, v := range src.%s {", fname) - switch elem := elem.Underlying().(type) { - case *types.Pointer: - writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname) - if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { - if _, isIface := base.(*types.Interface); isIface { - it.Import("", "tailscale.com/types/ptr") - writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname) - } else { - writef("\t\t\tdst.%s[k] = v.Clone()", fname) - } - } else { - it.Import("", "tailscale.com/types/ptr") - writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname) - } - writef("}") - case *types.Interface: - if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil { - if _, isPtr := cloneResultType.(*types.Pointer); isPtr { - writef("\t\tdst.%s[k] = *(v.Clone())", fname) - } else { - writef("\t\tdst.%s[k] = v.Clone()", fname) - } - } else { - writef(`panic("%s (%v) does not have a Clone method")`, fname, elem) - } - default: - writef("\t\tdst.%s[k] = *(v.Clone())", fname) - } - + // Use a recursive helper here; this handles + // arbitrarily nested maps in addition to + // simpler types. + writeMapValueClone(mapValueCloneParams{ + Buf: buf, + It: it, + Elem: elem, + SrcExpr: "v", + DstExpr: fmt.Sprintf("dst.%s[k]", fname), + BaseIndent: "\t", + Depth: 1, + }) writef("\t}") writef("}") } @@ -277,3 +260,99 @@ func methodResultType(typ types.Type, method string) types.Type { } return sig.Results().At(0).Type() } + +type mapValueCloneParams struct { + // Buf is the buffer to write generated code to + Buf *bytes.Buffer + // It is the import tracker for managing imports. + It *codegen.ImportTracker + // Elem is the type of the map value to clone + Elem types.Type + // SrcExpr is the expression for the source value (e.g., "v", "v2", "v3") + SrcExpr string + // DstExpr is the expression for the destination (e.g., "dst.Field[k]", "dst.Field[k][k2]") + DstExpr string + // BaseIndent is the "base" indentation string for the generated code + // (i.e. 1 or more tabs). Additional indentation will be added based on + // the Depth parameter. + BaseIndent string + // Depth is the current nesting depth (1 for first level, 2 for second, etc.) + Depth int +} + +// writeMapValueClone generates code to clone a map value recursively. +// It handles arbitrary nesting of maps, pointers, and interfaces. +func writeMapValueClone(params mapValueCloneParams) { + indent := params.BaseIndent + strings.Repeat("\t", params.Depth) + writef := func(format string, args ...any) { + fmt.Fprintf(params.Buf, indent+format+"\n", args...) + } + + switch elem := params.Elem.Underlying().(type) { + case *types.Pointer: + writef("if %s == nil { %s = nil } else {", params.SrcExpr, params.DstExpr) + if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { + if _, isIface := base.(*types.Interface); isIface { + params.It.Import("", "tailscale.com/types/ptr") + writef("\t%s = ptr.To((*%s).Clone())", params.DstExpr, params.SrcExpr) + } else { + writef("\t%s = %s.Clone()", params.DstExpr, params.SrcExpr) + } + } else { + params.It.Import("", "tailscale.com/types/ptr") + writef("\t%s = ptr.To(*%s)", params.DstExpr, params.SrcExpr) + } + writef("}") + + case *types.Map: + // Recursively handle nested maps + innerElem := elem.Elem() + if codegen.IsViewType(innerElem) || !codegen.ContainsPointers(innerElem) { + // Inner map values don't need deep cloning + params.It.Import("", "maps") + writef("%s = maps.Clone(%s)", params.DstExpr, params.SrcExpr) + } else { + // Inner map values need cloning + keyType := params.It.QualifiedName(elem.Key()) + valueType := params.It.QualifiedName(innerElem) + // Generate unique variable names for nested loops based on depth + keyVar := fmt.Sprintf("k%d", params.Depth+1) + valVar := fmt.Sprintf("v%d", params.Depth+1) + + writef("if %s == nil {", params.SrcExpr) + writef("\t%s = nil", params.DstExpr) + writef("\tcontinue") + writef("}") + writef("%s = map[%s]%s{}", params.DstExpr, keyType, valueType) + writef("for %s, %s := range %s {", keyVar, valVar, params.SrcExpr) + + // Recursively generate cloning code for the nested map value + nestedDstExpr := fmt.Sprintf("%s[%s]", params.DstExpr, keyVar) + writeMapValueClone(mapValueCloneParams{ + Buf: params.Buf, + It: params.It, + Elem: innerElem, + SrcExpr: valVar, + DstExpr: nestedDstExpr, + BaseIndent: params.BaseIndent, + Depth: params.Depth + 1, + }) + + writef("}") + } + + case *types.Interface: + if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil { + if _, isPtr := cloneResultType.(*types.Pointer); isPtr { + writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr) + } else { + writef("%s = %s.Clone()", params.DstExpr, params.SrcExpr) + } + } else { + writef(`panic("map value (%%v) does not have a Clone method")`, elem) + } + + default: + writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr) + } +} diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index 3556c14bc..754a4ac49 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -108,3 +108,109 @@ func TestInterfaceContainer(t *testing.T) { }) } } + +func TestMapWithPointers(t *testing.T) { + num1, num2 := 42, 100 + orig := &clonerex.MapWithPointers{ + Nested: map[string]*int{ + "foo": &num1, + "bar": &num2, + }, + WithCloneMethod: map[string]*clonerex.SliceContainer{ + "container1": {Slice: []*int{&num1, &num2}}, + "container2": {Slice: []*int{&num1}}, + }, + CloneInterface: map[string]clonerex.Cloneable{ + "impl1": &clonerex.CloneableImpl{Value: 123}, + "impl2": &clonerex.CloneableImpl{Value: 456}, + }, + } + + cloned := orig.Clone() + if !reflect.DeepEqual(orig, cloned) { + t.Errorf("Clone() = %v, want %v", cloned, orig) + } + + // Mutate cloned.Nested pointer values + *cloned.Nested["foo"] = 999 + if *orig.Nested["foo"] == 999 { + t.Errorf("Clone() aliased memory in Nested: original was modified") + } + + // Mutate cloned.WithCloneMethod slice values + *cloned.WithCloneMethod["container1"].Slice[0] = 888 + if *orig.WithCloneMethod["container1"].Slice[0] == 888 { + t.Errorf("Clone() aliased memory in WithCloneMethod: original was modified") + } + + // Mutate cloned.CloneInterface values + if impl, ok := cloned.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok { + impl.Value = 777 + if origImpl, ok := orig.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok { + if origImpl.Value == 777 { + t.Errorf("Clone() aliased memory in CloneInterface: original was modified") + } + } + } +} + +func TestDeeplyNestedMap(t *testing.T) { + num := 123 + orig := &clonerex.DeeplyNestedMap{ + ThreeLevels: map[string]map[string]map[string]int{ + "a": { + "b": {"c": 1, "d": 2}, + "e": {"f": 3}, + }, + "g": { + "h": {"i": 4}, + }, + }, + FourLevels: map[string]map[string]map[string]map[string]*clonerex.SliceContainer{ + "l1a": { + "l2a": { + "l3a": { + "l4a": {Slice: []*int{&num}}, + "l4b": {Slice: []*int{&num, &num}}, + }, + }, + }, + }, + } + + cloned := orig.Clone() + if !reflect.DeepEqual(orig, cloned) { + t.Errorf("Clone() = %v, want %v", cloned, orig) + } + + // Mutate the clone's ThreeLevels map + cloned.ThreeLevels["a"]["b"]["c"] = 777 + if orig.ThreeLevels["a"]["b"]["c"] == 777 { + t.Errorf("Clone() aliased memory in ThreeLevels: original was modified") + } + + // Mutate the clone's FourLevels map at the deepest pointer level + *cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] = 666 + if *orig.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] == 666 { + t.Errorf("Clone() aliased memory in FourLevels: original was modified") + } + + // Add a new top-level key to the clone's FourLevels map + newNum := 999 + cloned.FourLevels["l1b"] = map[string]map[string]map[string]*clonerex.SliceContainer{ + "l2b": { + "l3b": { + "l4c": {Slice: []*int{&newNum}}, + }, + }, + } + if _, exists := orig.FourLevels["l1b"]; exists { + t.Errorf("Clone() aliased FourLevels map: new top-level key appeared in original") + } + + // Add a new nested key to the clone's FourLevels map + cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4c"] = &clonerex.SliceContainer{Slice: []*int{&newNum}} + if _, exists := orig.FourLevels["l1a"]["l2a"]["l3a"]["l4c"]; exists { + t.Errorf("Clone() aliased FourLevels map: new nested key appeared in original") + } +} diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index 6463f9144..b9f6d60de 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap // Package clonerex is an example package for the cloner tool. package clonerex @@ -32,3 +32,15 @@ func (c *CloneableImpl) Clone() Cloneable { type InterfaceContainer struct { Interface Cloneable } + +type MapWithPointers struct { + Nested map[string]*int + WithCloneMethod map[string]*SliceContainer + CloneInterface map[string]Cloneable +} + +// DeeplyNestedMap tests arbitrary depth of map nesting (3+ levels) +type DeeplyNestedMap struct { + ThreeLevels map[string]map[string]map[string]int + FourLevels map[string]map[string]map[string]map[string]*SliceContainer +} diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index 533d7e723..13e1276c4 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -6,6 +6,8 @@ package clonerex import ( + "maps" + "tailscale.com/types/ptr" ) @@ -54,9 +56,114 @@ var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct { Interface Cloneable }{}) +// Clone makes a deep copy of MapWithPointers. +// The result aliases no memory with the original. +func (src *MapWithPointers) Clone() *MapWithPointers { + if src == nil { + return nil + } + dst := new(MapWithPointers) + *dst = *src + if dst.Nested != nil { + dst.Nested = map[string]*int{} + for k, v := range src.Nested { + if v == nil { + dst.Nested[k] = nil + } else { + dst.Nested[k] = ptr.To(*v) + } + } + } + if dst.WithCloneMethod != nil { + dst.WithCloneMethod = map[string]*SliceContainer{} + for k, v := range src.WithCloneMethod { + if v == nil { + dst.WithCloneMethod[k] = nil + } else { + dst.WithCloneMethod[k] = v.Clone() + } + } + } + if dst.CloneInterface != nil { + dst.CloneInterface = map[string]Cloneable{} + for k, v := range src.CloneInterface { + dst.CloneInterface[k] = v.Clone() + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _MapWithPointersCloneNeedsRegeneration = MapWithPointers(struct { + Nested map[string]*int + WithCloneMethod map[string]*SliceContainer + CloneInterface map[string]Cloneable +}{}) + +// Clone makes a deep copy of DeeplyNestedMap. +// The result aliases no memory with the original. +func (src *DeeplyNestedMap) Clone() *DeeplyNestedMap { + if src == nil { + return nil + } + dst := new(DeeplyNestedMap) + *dst = *src + if dst.ThreeLevels != nil { + dst.ThreeLevels = map[string]map[string]map[string]int{} + for k, v := range src.ThreeLevels { + if v == nil { + dst.ThreeLevels[k] = nil + continue + } + dst.ThreeLevels[k] = map[string]map[string]int{} + for k2, v2 := range v { + dst.ThreeLevels[k][k2] = maps.Clone(v2) + } + } + } + if dst.FourLevels != nil { + dst.FourLevels = map[string]map[string]map[string]map[string]*SliceContainer{} + for k, v := range src.FourLevels { + if v == nil { + dst.FourLevels[k] = nil + continue + } + dst.FourLevels[k] = map[string]map[string]map[string]*SliceContainer{} + for k2, v2 := range v { + if v2 == nil { + dst.FourLevels[k][k2] = nil + continue + } + dst.FourLevels[k][k2] = map[string]map[string]*SliceContainer{} + for k3, v3 := range v2 { + if v3 == nil { + dst.FourLevels[k][k2][k3] = nil + continue + } + dst.FourLevels[k][k2][k3] = map[string]*SliceContainer{} + for k4, v4 := range v3 { + if v4 == nil { + dst.FourLevels[k][k2][k3][k4] = nil + } else { + dst.FourLevels[k][k2][k3][k4] = v4.Clone() + } + } + } + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _DeeplyNestedMapCloneNeedsRegeneration = DeeplyNestedMap(struct { + ThreeLevels map[string]map[string]map[string]int + FourLevels map[string]map[string]map[string]map[string]*SliceContainer +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of SliceContainer,InterfaceContainer. +// where T is one of SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap. func Clone(dst, src any) bool { switch src := src.(type) { case *SliceContainer: @@ -77,6 +184,24 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *MapWithPointers: + switch dst := dst.(type) { + case *MapWithPointers: + *dst = *src.Clone() + return true + case **MapWithPointers: + *dst = src.Clone() + return true + } + case *DeeplyNestedMap: + switch dst := dst.(type) { + case *DeeplyNestedMap: + *dst = *src.Clone() + return true + case **DeeplyNestedMap: + *dst = src.Clone() + return true + } } return false } From 146ea42822cce4d9743218c94aaf13d3d171e0a4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 17:23:33 -0800 Subject: [PATCH 1649/1708] ipn/ipnlocal: remove all the weird locking (LockedOnEntry, UnlockEarly, etc) Fixes #11649 Updates #16369 Co-authored-by: James Sanderson Change-Id: I63eaa18fe870ddf81d84b949efac4d1b44c3db86 Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 7 + ipn/ipnext/ipnext.go | 3 +- ipn/ipnlocal/local.go | 574 ++++++++++++++++------------------ ipn/ipnlocal/local_test.go | 19 +- ipn/ipnlocal/state_test.go | 201 ++---------- util/execqueue/execqueue.go | 21 +- wgengine/userspace.go | 27 ++ wgengine/watchdog.go | 6 + wgengine/wgengine.go | 7 + 9 files changed, 360 insertions(+), 505 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 52255e89f..40b02a598 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -615,6 +615,13 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM // does its thing, which may result in a call back into the client. metricQueued.Add(1) c.observerQueue.Add(func() { + c.mu.Lock() + closed := c.closed + c.mu.Unlock() + if closed { + return + } + if canSkipStatus(newSt, c.lastStatus.Load()) { metricSkippable.Add(1) if !c.direct.controlKnobs.DisableSkipStatusQueue.Load() { diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 4ff37dc8e..fc93cc876 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -323,7 +323,8 @@ type ProfileStateChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sa // [ProfileStateChangeCallback]s are called first. // // It returns a function to be called when the cc is being shut down, -// or nil if no cleanup is needed. +// or nil if no cleanup is needed. That cleanup function should not call +// back into LocalBackend, which may be locked during shutdown. type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) (cleanup func()) // Hooks is a collection of hooks that extensions can add to (non-concurrently) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 62d8ea490..076752469 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -271,9 +271,14 @@ type LocalBackend struct { sshServer SSHServer // or nil, initialized lazily. appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. - notifyCancel context.CancelFunc - cc controlclient.Client // TODO(nickkhyl): move to nodeBackend - ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend + notifyCancel context.CancelFunc + cc controlclient.Client // TODO(nickkhyl): move to nodeBackend + ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend + + // ignoreControlClientUpdates indicates whether we want to ignore SetControlClientStatus updates + // before acquiring b.mu. This is used during shutdown to avoid deadlocks. + ignoreControlClientUpdates atomic.Bool + machinePrivKey key.MachinePrivate tka *tkaState // TODO(nickkhyl): move to nodeBackend state ipn.State // TODO(nickkhyl): move to nodeBackend @@ -314,10 +319,6 @@ type LocalBackend struct { serveListeners map[netip.AddrPort]*localListener // listeners for local serve traffic serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *reverseProxy - // mu must be held before calling statusChanged.Wait() or - // statusChanged.Broadcast(). - statusChanged *sync.Cond - // dialPlan is any dial plan that we've received from the control // server during a previous connection; it is cleared on logout. dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeBackend? @@ -520,8 +521,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.currentNodeAtomic.Store(nb) nb.ready() - mConn.SetNetInfoCallback(b.setNetInfo) - if sys.InitialConfig != nil { if err := b.initPrefsFromConfig(sys.InitialConfig); err != nil { return nil, err @@ -559,7 +558,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.setTCPPortsIntercepted(nil) - b.statusChanged = sync.NewCond(&b.mu) b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() @@ -604,6 +602,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } eventbus.SubscribeFunc(ec, b.onAppConnectorRouteUpdate) eventbus.SubscribeFunc(ec, b.onAppConnectorStoreRoutes) + mConn.SetNetInfoCallback(b.setNetInfo) // TODO(tailscale/tailscale#17887): move to eventbus return b, nil } @@ -838,8 +837,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.conf == nil { return false, nil } @@ -847,7 +846,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { + if err := b.setConfigLocked(conf); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -904,10 +903,9 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLockedOnEntry uses the provided config to update the backend's prefs +// setConfigLocked uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { - defer unlock() +func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -915,7 +913,7 @@ func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlo } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLockedOnEntry(p, unlock) + b.setPrefsLocked(p) b.conf = conf return nil @@ -1521,11 +1519,31 @@ func (b *LocalBackend) GetFilterForTest() *filter.Filter { return nb.filterAtomic.Load() } +func (b *LocalBackend) settleEventBus() { + // The move to eventbus made some things racy that + // weren't before so we have to wait for it to all be settled + // before we call certain things. + // See https://github.com/tailscale/tailscale/issues/16369 + // But we can't do this while holding b.mu without deadlocks, + // (https://github.com/tailscale/tailscale/pull/17804#issuecomment-3514426485) so + // now we just do it in lots of places before acquiring b.mu. + // Is this winning?? + if b.sys != nil { + if ms, ok := b.sys.MagicSock.GetOK(); ok { + ms.Synchronize() + } + } +} + // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. // Among other things, this is where we update the netmap, packet filters, DNS and DERP maps. func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st controlclient.Status) { - unlock := b.lockAndGetUnlock() - defer unlock() + if b.ignoreControlClientUpdates.Load() { + b.logf("ignoring SetControlClientStatus during controlclient shutdown") + return + } + b.mu.Lock() + defer b.mu.Unlock() if b.cc != c { b.logf("Ignoring SetControlClientStatus from old client") @@ -1540,7 +1558,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) + b.sendLocked(ipn.Notify{ErrMessage: &s}) } return } @@ -1600,25 +1618,20 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.keyExpired = isExpired } - unlock.UnlockEarly() - if keyExpiryExtended && wasBlocked { // Key extended, unblock the engine - b.blockEngineUpdates(false) + b.blockEngineUpdatesLocked(false) } if st.LoginFinished() && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine - b.blockEngineUpdates(false) + b.blockEngineUpdatesLocked(false) } - b.authReconfig() - b.send(ipn.Notify{LoginFinished: &empty.Message{}}) + b.authReconfigLocked() + b.sendLocked(ipn.Notify{LoginFinished: &empty.Message{}}) } - // Lock b again and do only the things that require locking. - b.mu.Lock() - prefsChanged := false cn := b.currentNode() prefs := b.pm.CurrentPrefs().AsStruct() @@ -1731,16 +1744,12 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.setNetMapLocked(st.NetMap) b.updateFilterLocked(prefs.View()) } - b.mu.Unlock() // Now complete the lock-free parts of what we started while locked. if st.NetMap != nil { if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) - // Connecting to this tailnet without logging is forbidden; boot us outta here. - b.mu.Lock() - defer b.mu.Unlock() // Get the current prefs again, since we unlocked above. prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false @@ -1752,7 +1761,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - b.sendToLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}, allClients) + b.sendLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}) return } if oldNetMap != nil { @@ -1774,11 +1783,11 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Update the DERP map in the health package, which uses it for health notifications b.health.SetDERPMap(st.NetMap.DERPMap) - b.send(ipn.Notify{NetMap: st.NetMap}) + b.sendLocked(ipn.Notify{NetMap: st.NetMap}) // The error here is unimportant as is the result. This will recalculate the suggested exit node // cache the value and push any changes to the IPN bus. - b.SuggestExitNode() + b.suggestExitNodeLocked() // Check and update the exit node if needed, now that we have a new netmap. // @@ -1788,16 +1797,16 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // // Otherwise, it might briefly show the exit node as offline and display a warning, // if the node wasn't online or wasn't advertising default routes in the previous netmap. - b.RefreshExitNode() + b.refreshExitNodeLocked() } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) - b.setAuthURL(st.URL) + b.setAuthURLLocked(st.URL) } - b.stateMachine() + b.stateMachineLocked() // This is currently (2020-07-28) necessary; conditionally disabling it is fragile! // This is where netmap information gets propagated to router and magicsock. - b.authReconfig() + b.authReconfigLocked() } type preferencePolicyInfo struct { @@ -2003,13 +2012,14 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { - unlock := b.lockAndGetUnlock() + b.mu.Lock() + defer b.mu.Unlock() + prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { - unlock.UnlockEarly() return prefs.View(), false } - return b.setPrefsLockedOnEntry(prefs, unlock), true + return b.setPrefsLocked(prefs), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects @@ -2057,6 +2067,11 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo b.send(*notify) } }() + + // Gross. See https://github.com/tailscale/tailscale/issues/16369 + b.settleEventBus() + defer b.settleEventBus() + b.mu.Lock() defer b.mu.Unlock() @@ -2077,7 +2092,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if !ok || n.StableID() != exitNodeID { continue } - b.goTracker.Go(b.RefreshExitNode) + b.refreshExitNodeLocked() break } } @@ -2241,51 +2256,60 @@ func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged b func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { if err != nil { b.logf("wgengine status error: %v", err) - b.broadcastStatusChanged() return } if s == nil { b.logf("[unexpected] non-error wgengine update with status=nil: %v", s) - b.broadcastStatusChanged() return } b.mu.Lock() + defer b.mu.Unlock() + + // For now, only check this in the callback, but don't check it in setWgengineStatusLocked if s.AsOf.Before(b.lastStatusTime) { // Don't process a status update that is older than the one we have // already processed. (corp#2579) - b.mu.Unlock() return } b.lastStatusTime = s.AsOf + + b.setWgengineStatusLocked(s) +} + +// setWgengineStatusLocked updates LocalBackend's view of the engine status and +// updates the endpoints both in the backend and in the control client. +// +// Unlike setWgengineStatus it does not discard out-of-order updates, so +// statuses sent here are always processed. This is useful for ensuring we don't +// miss a "we shut down" status during backend shutdown even if other statuses +// arrive out of order. +// +// TODO(zofrex): we should ensure updates actually do arrive in order and move +// the out-of-order check into this function. +// +// b.mu must be held. +func (b *LocalBackend) setWgengineStatusLocked(s *wgengine.Status) { es := b.parseWgStatusLocked(s) cc := b.cc + + // TODO(zofrex): the only reason we even write this is to transition from + // "Starting" to "Running" in the call to state machine a few lines below + // this. Maybe we don't even need to store it at all. b.engineStatus = es + needUpdateEndpoints := !slices.Equal(s.LocalAddrs, b.endpoints) if needUpdateEndpoints { b.endpoints = append([]tailcfg.Endpoint{}, s.LocalAddrs...) } - b.mu.Unlock() if cc != nil { if needUpdateEndpoints { cc.UpdateEndpoints(s.LocalAddrs) } - b.stateMachine() + b.stateMachineLocked() } - b.broadcastStatusChanged() - b.send(ipn.Notify{Engine: &es}) -} - -// broadcastStatusChanged must not be called with b.mu held. -func (b *LocalBackend) broadcastStatusChanged() { - // The sync.Cond docs say: "It is allowed but not required for the caller to hold c.L during the call." - // In this particular case, we must acquire b.mu. Otherwise we might broadcast before - // the waiter (in requestEngineStatusAndWait) starts to wait, in which case - // the waiter can get stuck indefinitely. See PR 2865. - b.mu.Lock() - b.statusChanged.Broadcast() - b.mu.Unlock() + b.sendLocked(ipn.Notify{Engine: &es}) } // SetNotifyCallback sets the function to call when the backend has something to @@ -2365,8 +2389,14 @@ func (b *LocalBackend) initOnce() { // actually a supported operation (it should be, but it's very unclear // from the following whether or not that is a safe transition). func (b *LocalBackend) Start(opts ipn.Options) error { - b.logf("Start") + defer b.settleEventBus() // with b.mu unlocked + b.mu.Lock() + defer b.mu.Unlock() + return b.startLocked(opts) +} +func (b *LocalBackend) startLocked(opts ipn.Options) error { + b.logf("Start") b.startOnce.Do(b.initOnce) var clientToShutdown controlclient.Client @@ -2375,8 +2405,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { clientToShutdown.Shutdown() } }() - unlock := b.lockAndGetUnlock() - defer unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -2591,7 +2619,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLockedOnEntry(unlock) + b.stateMachineLocked() return nil } @@ -3255,6 +3283,10 @@ func (b *LocalBackend) send(n ipn.Notify) { b.sendTo(n, allClients) } +func (b *LocalBackend) sendLocked(n ipn.Notify) { + b.sendToLocked(n, allClients) +} + // SendNotify sends a notification to the IPN bus, // typically to the GUI client. func (b *LocalBackend) SendNotify(n ipn.Notify) { @@ -3345,21 +3377,22 @@ func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) } } -// setAuthURL sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. +// setAuthURLLocked sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. // This method is called when a new authURL is received from the control plane, meaning that either a user // has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI), // or the control plane was unable to authenticate this node non-interactively (e.g., due to key expiration). // A non-nil b.authActor indicates that an interactive login is in progress and was initiated by the specified actor. +// +// b.mu must be held. +// // If url is "", it is equivalent to calling [LocalBackend.resetAuthURLLocked] with b.mu held. -func (b *LocalBackend) setAuthURL(url string) { +func (b *LocalBackend) setAuthURLLocked(url string) { var popBrowser, keyExpired bool var recipient ipnauth.Actor - b.mu.Lock() switch { case url == "": b.resetAuthURLLocked() - b.mu.Unlock() return case b.authURL != url: b.authURL = url @@ -3376,33 +3409,33 @@ func (b *LocalBackend) setAuthURL(url string) { // Consume the StartLoginInteractive call, if any, that caused the control // plane to send us this URL. b.authActor = nil - b.mu.Unlock() if popBrowser { - b.popBrowserAuthNow(url, keyExpired, recipient) + b.popBrowserAuthNowLocked(url, keyExpired, recipient) } } -// popBrowserAuthNow shuts down the data plane and sends the URL to the recipient's +// popBrowserAuthNowLocked shuts down the data plane and sends the URL to the recipient's // [watchSession]s if the recipient is non-nil; otherwise, it sends the URL to all watchSessions. // keyExpired is the value of b.keyExpired upon entry and indicates // whether the node's key has expired. -// It must not be called with b.mu held. -func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient ipnauth.Actor) { +// +// b.mu must be held. +func (b *LocalBackend) popBrowserAuthNowLocked(url string, keyExpired bool, recipient ipnauth.Actor) { b.logf("popBrowserAuthNow(%q): url=%v, key-expired=%v, seamless-key-renewal=%v", maybeUsernameOf(recipient), url != "", keyExpired, b.seamlessRenewalEnabled()) // Deconfigure the local network data plane if: // - seamless key renewal is not enabled; // - key is expired (in which case tailnet connectivity is down anyway). if !b.seamlessRenewalEnabled() || keyExpired { - b.blockEngineUpdates(true) - b.stopEngineAndWait() + b.blockEngineUpdatesLocked(true) + b.stopEngineAndWaitLocked() - if b.State() == ipn.Running { - b.enterState(ipn.Starting) + if b.state == ipn.Running { + b.enterStateLocked(ipn.Starting) } } - b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) + b.tellRecipientToBrowseToURLLocked(url, toNotificationTarget(recipient)) } // validPopBrowserURL reports whether urlStr is a valid value for a @@ -3450,13 +3483,16 @@ func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { } func (b *LocalBackend) tellClientToBrowseToURL(url string) { - b.tellRecipientToBrowseToURL(url, allClients) + b.mu.Lock() + defer b.mu.Unlock() + b.tellRecipientToBrowseToURLLocked(url, allClients) } -// tellRecipientToBrowseToURL is like tellClientToBrowseToURL but allows specifying a recipient. -func (b *LocalBackend) tellRecipientToBrowseToURL(url string, recipient notificationTarget) { - if b.validPopBrowserURL(url) { - b.sendTo(ipn.Notify{BrowseToURL: &url}, recipient) +// tellRecipientToBrowseToURLLocked is like tellClientToBrowseToURL but allows specifying a recipient +// and b.mu must be held. +func (b *LocalBackend) tellRecipientToBrowseToURLLocked(url string, recipient notificationTarget) { + if b.validPopBrowserURLLocked(url) { + b.sendToLocked(ipn.Notify{BrowseToURL: &url}, recipient) } } @@ -3471,8 +3507,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3494,14 +3530,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry( + _, err := b.editPrefsLocked( ipnauth.Self, &ipn.MaskedPrefs{ Prefs: *prefsClone, AutoUpdateSet: ipn.AutoUpdatePrefsMask{ ApplySet: true, }, - }, unlock) + }) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -3734,6 +3770,7 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { // active [watchSession]s. func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth.Actor) error { b.mu.Lock() + defer b.mu.Unlock() if b.cc == nil { panic("LocalBackend.assertClient: b.cc == nil") } @@ -3751,12 +3788,11 @@ func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth b.authActor = user } cc := b.cc - b.mu.Unlock() b.logf("StartLoginInteractiveAs(%q): url=%v", maybeUsernameOf(user), hasValidURL) if hasValidURL { - b.popBrowserAuthNow(url, keyExpired, user) + b.popBrowserAuthNowLocked(url, keyExpired, user) } else { cc.Login(b.loginFlags | controlclient.LoginInteractive) } @@ -3886,8 +3922,8 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -3909,7 +3945,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLockedOnEntry(reason, unlock) + b.switchToBestProfileLocked(reason) } // SwitchToBestProfile selects the best profile to use, @@ -3919,13 +3955,14 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) + b.mu.Lock() + defer b.mu.Unlock() + b.switchToBestProfileLocked(reason) } -// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], -// but b.mu must held on entry. It is released on exit. -func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { - defer unlock() +// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], +// but b.mu must held on entry. +func (b *LocalBackend) switchToBestProfileLocked(reason string) { oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) @@ -3956,7 +3993,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { + if err := b.resetForProfileChangeLocked(); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -4204,8 +4241,8 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() p0 := b.pm.CurrentPrefs() if !buildfeatures.HasUseExitNode { @@ -4249,7 +4286,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLockedOnEntry(actor, mp, unlock) + return b.editPrefsLocked(actor, mp) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4280,8 +4317,11 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip if mp.SetsInternal() { return ipn.PrefsView{}, errors.New("can't set Internal fields") } + defer b.settleEventBus() - return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) + b.mu.Lock() + defer b.mu.Unlock() + return b.editPrefsLocked(actor, mp) } // checkEditPrefsAccessLocked checks whether the current user has access @@ -4471,8 +4511,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4490,7 +4530,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { + if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4519,11 +4559,8 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry, but it unlocks it on the way out. -// TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { - defer unlock() // for error paths - +// b.mu must be held. +func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -4560,11 +4597,11 @@ func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.Maske // before the modified prefs are actually set for the current profile. b.onEditPrefsLocked(actor, mp, p0, p1.View()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) + newPrefs := b.setPrefsLocked(p1) // Note: don't perform any actions for the new prefs here. Not // every prefs change goes through EditPrefs. Put your actions - // in setPrefsLocksOnEntry instead. + // in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil @@ -4587,12 +4624,10 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { return nil } -// setPrefsLockedOnEntry requires b.mu be held to call it, but it -// unlocks b.mu when done. newp ownership passes to this function. +// setPrefsLocked requires b.mu be held to call it. +// newp ownership passes to this function. // It returns a read-only copy of the new prefs. -func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { - defer unlock() - +func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4653,10 +4688,8 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.resetAlwaysOnOverrideLocked() } - unlock.UnlockEarly() - if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() + b.doSetHostinfoFilterServicesLocked() } if netMap != nil { @@ -4669,12 +4702,12 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() + b.stateMachineLocked() } else { - b.authReconfig() + b.authReconfigLocked() } - b.send(ipn.Notify{Prefs: &prefs}) + b.sendLocked(ipn.Notify{Prefs: &prefs}) return prefs } @@ -4794,7 +4827,11 @@ func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { func (b *LocalBackend) doSetHostinfoFilterServices() { b.mu.Lock() defer b.mu.Unlock() + b.doSetHostinfoFilterServicesLocked() +} +// b.mu must be held +func (b *LocalBackend) doSetHostinfoFilterServicesLocked() { cc := b.cc if cc == nil { // Control client isn't up yet. @@ -4863,15 +4900,15 @@ func (b *LocalBackend) isEngineBlocked() bool { return b.blocked } -// blockEngineUpdate sets b.blocked to block, while holding b.mu. Its -// indirect effect is to turn b.authReconfig() into a no-op if block -// is true. -func (b *LocalBackend) blockEngineUpdates(block bool) { +// blockEngineUpdatesLocked sets b.blocked to block. +// +// Its indirect effect is to turn b.authReconfig() into a no-op if block is +// true. +// +// b.mu must be held. +func (b *LocalBackend) blockEngineUpdatesLocked(block bool) { b.logf("blockEngineUpdates(%v)", block) - - b.mu.Lock() b.blocked = block - b.mu.Unlock() } // reconfigAppConnectorLocked updates the app connector state based on the @@ -4982,38 +5019,41 @@ func (b *LocalBackend) readvertiseAppConnectorRoutes() { // updates are not currently blocked, based on the cached netmap and // user prefs. func (b *LocalBackend) authReconfig() { - // Wait for magicsock to process pending [eventbus] events, - // such as netmap updates. This should be completed before - // wireguard-go is reconfigured. See tailscale/tailscale#16369. - b.MagicConn().Synchronize() - b.mu.Lock() - blocked := b.blocked - prefs := b.pm.CurrentPrefs() - cn := b.currentNode() - nm := cn.NetMap() - hasPAC := b.prevIfState.HasPAC() - disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) - dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) - dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) - // If the current node is an app connector, ensure the app connector machine is started - b.reconfigAppConnectorLocked(nm, prefs) - closing := b.shutdownCalled - b.mu.Unlock() + defer b.mu.Unlock() + b.authReconfigLocked() +} - if closing { +// authReconfigLocked is the locked version of [LocalBackend.authReconfig]. +// +// b.mu must be held. +func (b *LocalBackend) authReconfigLocked() { + + if b.shutdownCalled { b.logf("[v1] authReconfig: skipping because in shutdown") return } - - if blocked { + if b.blocked { b.logf("[v1] authReconfig: blocked, skipping.") return } + + cn := b.currentNode() + + nm := cn.NetMap() if nm == nil { b.logf("[v1] authReconfig: netmap not yet valid. Skipping.") return } + + prefs := b.pm.CurrentPrefs() + hasPAC := b.prevIfState.HasPAC() + disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) + dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) + // If the current node is an app connector, ensure the app connector machine is started + b.reconfigAppConnectorLocked(nm, prefs) + if !prefs.WantRunning() { b.logf("[v1] authReconfig: skipping because !WantRunning.") return @@ -5048,7 +5088,7 @@ func (b *LocalBackend) authReconfig() { } oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.NetMon.Get(), b.sys.ControlKnobs(), version.OS()) - rcfg := b.routerConfig(cfg, prefs, oneCGNATRoute) + rcfg := b.routerConfigLocked(cfg, prefs, oneCGNATRoute) err = b.e.Reconfig(cfg, rcfg, dcfg) if err == wgengine.ErrNoChanges { @@ -5056,9 +5096,9 @@ func (b *LocalBackend) authReconfig() { } b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) - b.initPeerAPIListener() + b.initPeerAPIListenerLocked() if buildfeatures.HasAppConnectors { - b.readvertiseAppConnectorRoutes() + go b.goTracker.Go(b.readvertiseAppConnectorRoutes) } } @@ -5181,12 +5221,18 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { + b.mu.Lock() + defer b.mu.Unlock() + b.initPeerAPIListenerLocked() +} + +// b.mu must be held. +func (b *LocalBackend) initPeerAPIListenerLocked() { if !buildfeatures.HasPeerAPIServer { return } b.logf("[v1] initPeerAPIListener: entered") - b.mu.Lock() - defer b.mu.Unlock() + if b.shutdownCalled { b.logf("[v1] initPeerAPIListener: shutting down") return @@ -5349,15 +5395,15 @@ func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (route } // routerConfig produces a router.Config from a wireguard config and IPN prefs. -func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneCGNATRoute bool) *router.Config { +// +// b.mu must be held. +func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView, oneCGNATRoute bool) *router.Config { singleRouteThreshold := 10_000 if oneCGNATRoute { singleRouteThreshold = 1 } - b.mu.Lock() - netfilterKind := b.capForcedNetfilter // protected by b.mu - b.mu.Unlock() + netfilterKind := b.capForcedNetfilter // protected by b.mu (hence the Locked suffix) if prefs.NetfilterKind() != "" { if netfilterKind != "" { @@ -5515,21 +5561,16 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } } -// enterState transitions the backend into newState, updating internal +// enterStateLocked transitions the backend into newState, updating internal // state and propagating events out as needed. // // TODO(danderson): while this isn't a lie, exactly, a ton of other // places twiddle IPN internal state without going through here, so // really this is more "one of several places in which random things // happen". -func (b *LocalBackend) enterState(newState ipn.State) { - unlock := b.lockAndGetUnlock() - b.enterStateLockedOnEntry(newState, unlock) -} - -// enterStateLockedOnEntry is like enterState but requires b.mu be held to call -// it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { +// +// b.mu must be held. +func (b *LocalBackend) enterStateLocked(newState ipn.State) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5581,17 +5622,16 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.pauseOrResumeControlClientLocked() - unlock.UnlockEarly() - // prefs may change irrespective of state; WantRunning should be explicitly // set before potential early return even if the state is unchanged. b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) if oldState == newState { return } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + b.sendLocked(ipn.Notify{State: &newState}) switch newState { case ipn.NeedsLogin: @@ -5599,7 +5639,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // always block updates on NeedsLogin even if seamless renewal is enabled, // to prevent calls to authReconfig from reconfiguring the engine when our // key has expired and we're waiting to authenticate to use the new key. - b.blockEngineUpdates(true) + b.blockEngineUpdatesLocked(true) fallthrough case ipn.Stopped, ipn.NoState: // Unconfigure the engine if it has stopped (WantRunning is set to false) @@ -5613,9 +5653,9 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock feature.SystemdStatus("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() + b.authReconfigLocked() // Needed so that UpdateEndpoints can run - b.e.RequestStatus() + b.goTracker.Go(b.e.RequestStatus) case ipn.Running: if feature.CanSystemdStatus { var addrStrs []string @@ -5724,109 +5764,23 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // that have happened. It is invoked from the various callbacks that // feed events into LocalBackend. // -// TODO(apenwarr): use a channel or something to prevent reentrancy? -// Or maybe just call the state machine from fewer places. -func (b *LocalBackend) stateMachine() { - unlock := b.lockAndGetUnlock() - b.stateMachineLockedOnEntry(unlock) -} - -// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to -// call it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { - b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) -} - -// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will -// unlock it at most once. -// -// This is all very unfortunate but exists as a guardrail against the -// unfortunate "lockedOnEntry" methods in this package (primarily -// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the -// function but unlock the mutex on their way out. As a stepping stone to -// cleaning things up (as of 2024-04-06), we at least pass the unlock func -// around now and defer unlock in the caller to avoid missing unlocks and double -// unlocks. TODO(bradfitz,maisem): make the locking in this package more -// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 -func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { - b.mu.Lock() - var unlocked atomic.Bool - return func() bool { - if unlocked.CompareAndSwap(false, true) { - b.mu.Unlock() - return true - } - return false - } -} - -// unlockOnce is a func that unlocks only b.mu the first time it's called. -// Therefore it can be safely deferred to catch error paths, without worrying -// about double unlocks if a different point in the code later needs to explicitly -// unlock it first as well. It reports whether it was unlocked. -type unlockOnce func() bool - -// UnlockEarly unlocks the LocalBackend.mu. It panics if u returns false, -// indicating that this unlocker was already used. -// -// We're using this method to help us document & find the places that have -// atypical locking patterns. See -// https://github.com/tailscale/tailscale/issues/11649 for background. -// -// A normal unlock is a deferred one or an explicit b.mu.Unlock a few lines -// after the lock, without lots of control flow in-between. An "early" unlock is -// one that happens in weird places, like in various "LockedOnEntry" methods in -// this package that require the mutex to be locked on entry but unlock it -// somewhere in the middle (maybe several calls away) and then sometimes proceed -// to lock it again. -// -// The reason UnlockeEarly panics if already called is because these are the -// points at which it's assumed that the mutex is already held and it now needs -// to be released. If somebody already released it, that invariant was violated. -// On the other hand, simply calling u only returns false instead of panicking -// so you can defer it without care, confident you got all the error return -// paths which were previously done by hand. -func (u unlockOnce) UnlockEarly() { - if !u() { - panic("Unlock on already-called unlockOnce") - } +// requires b.mu to be held. +func (b *LocalBackend) stateMachineLocked() { + b.enterStateLocked(b.nextStateLocked()) } // stopEngineAndWait deconfigures the local network data plane, and // waits for it to deliver a status update indicating it has stopped // before returning. -func (b *LocalBackend) stopEngineAndWait() { +// +// b.mu must be held. +func (b *LocalBackend) stopEngineAndWaitLocked() { b.logf("stopEngineAndWait...") - b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - b.requestEngineStatusAndWaitForStopped() + st, _ := b.e.ResetAndStop() // TODO: what should we do if this returns an error? + b.setWgengineStatusLocked(st) b.logf("stopEngineAndWait: done.") } -// Requests the wgengine status, and does not return until a status was -// delivered (to the usual callback) that indicates the engine is stopped. -func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { - b.logf("requestEngineStatusAndWaitForStopped") - - b.mu.Lock() - defer b.mu.Unlock() - - b.goTracker.Go(b.e.RequestStatus) - b.logf("requestEngineStatusAndWaitForStopped: waiting...") - for { - b.statusChanged.Wait() // temporarily releases lock while waiting - - if !b.blocked { - b.logf("requestEngineStatusAndWaitForStopped: engine is no longer blocked, must have stopped and started again, not safe to wait.") - break - } - if b.engineStatus.NumLive == 0 && b.engineStatus.LiveDERPs == 0 { - b.logf("requestEngineStatusAndWaitForStopped: engine is stopped.") - break - } - b.logf("requestEngineStatusAndWaitForStopped: engine is still running. Waiting...") - } -} - // setControlClientLocked sets the control client to cc, // which may be nil. // @@ -5834,6 +5788,7 @@ func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) + b.ignoreControlClientUpdates.Store(cc == nil) } // resetControlClientLocked sets b.cc to nil and returns the old value. If the @@ -5927,11 +5882,11 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() if !b.hasNodeKeyLocked() { // Already logged out. + b.mu.Unlock() return nil } cc := b.cc @@ -5940,17 +5895,17 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { // delete it later. profile := b.pm.CurrentProfile() - _, err := b.editPrefsLockedOnEntry( + _, err := b.editPrefsLocked( actor, &ipn.MaskedPrefs{ WantRunningSet: true, LoggedOutSet: true, Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) + }) + b.mu.Unlock() if err != nil { return err } - // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -5970,14 +5925,14 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - unlock = b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -6028,12 +5983,19 @@ func (b *LocalBackend) RefreshExitNode() { if !buildfeatures.HasUseExitNode { return } - if b.resolveExitNode() { - b.authReconfig() + b.mu.Lock() + defer b.mu.Unlock() + b.refreshExitNodeLocked() +} + +// refreshExitNodeLocked is like RefreshExitNode but requires b.mu be held. +func (b *LocalBackend) refreshExitNodeLocked() { + if b.resolveExitNodeLocked() { + b.authReconfigLocked() } } -// resolveExitNode determines which exit node to use based on the current prefs +// resolveExitNodeLocked determines which exit node to use based on the current prefs // and netmap. It updates the exit node ID in the prefs if needed, updates the // exit node ID in the hostinfo if needed, sends a notification to clients, and // returns true if the exit node has changed. @@ -6041,13 +6003,11 @@ func (b *LocalBackend) RefreshExitNode() { // It is the caller's responsibility to reconfigure routes and actually // start using the selected exit node, if needed. // -// b.mu must not be held. -func (b *LocalBackend) resolveExitNode() (changed bool) { +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeLocked() (changed bool) { if !buildfeatures.HasUseExitNode { return false } - b.mu.Lock() - defer b.mu.Unlock() nm := b.currentNode().NetMap() prefs := b.pm.CurrentPrefs().AsStruct() @@ -6854,8 +6814,8 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { @@ -6867,7 +6827,7 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { b.resetDialPlan() } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // resetDialPlan resets the dialPlan for this LocalBackend. It will log if @@ -6881,12 +6841,10 @@ func (b *LocalBackend) resetDialPlan() { } } -// resetForProfileChangeLockedOnEntry resets the backend for a profile change. +// resetForProfileChangeLocked resets the backend for a profile change. // -// b.mu must held on entry. It is released on exit. -func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { - defer unlock() - +// b.mu must be held. +func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting @@ -6903,7 +6861,6 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { - // Needs to happen without b.mu held. defer prevCC.Shutdown() } // TKA errors should not prevent resetting the backend state. @@ -6917,19 +6874,19 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu + b.enterStateLocked(ipn.NoState) b.health.SetLocalLogConfigHealth(nil) if tkaErr != nil { return tkaErr } - return b.Start(ipn.Options{}) + return b.startLocked(ipn.Options{}) } // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -6941,7 +6898,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { if !needToRestart { return nil } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // CurrentProfile returns the current LoginProfile. @@ -6954,8 +6911,8 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.pm.SwitchToNewProfile() @@ -6963,7 +6920,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // ListProfiles returns a list of all LoginProfiles. @@ -6978,12 +6935,11 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() - prevCC := b.resetControlClientLocked() - if prevCC != nil { - defer prevCC.Shutdown() // call must happen after release b.mu + if prevCC := b.resetControlClientLocked(); prevCC != nil { + defer prevCC.Shutdown() } if err := b.clearMachineKeyLocked(); err != nil { return err @@ -6992,7 +6948,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { @@ -7223,7 +7179,7 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // be selected at random, so the result is not stable. To be eligible for // consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. // -// b.mu.lock() must be held. +// b.mu must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { if !buildfeatures.HasUseExitNode { return response, feature.ErrUnavailable diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bac74a33c..962335046 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1503,15 +1503,6 @@ func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { } } -func wantStateNotify(want ipn.State) wantedNotification { - return wantedNotification{ - name: "State=" + want.String(), - cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { - return n.State != nil && *n.State == want - }, - } -} - func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface @@ -4318,9 +4309,9 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { if newp == nil { panic("SetPrefsForTest got nil prefs") } - unlock := b.lockAndGetUnlock() - defer unlock() - b.setPrefsLockedOnEntry(newp, unlock) + b.mu.Lock() + defer b.mu.Unlock() + b.setPrefsLocked(newp) } type peerOptFunc func(*tailcfg.Node) @@ -5808,12 +5799,12 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client -func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { +func newLocalBackendWithTestControl(t testing.TB, enableLogging bool, newControl newTestControlFn) *LocalBackend { bus := eventbustest.NewBus(t) return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystemWithBus(bus), newControl) } -func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { +func newLocalBackendWithSysAndTestControl(t testing.TB, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { logf := logger.Discard if enableLogging { logf = tstest.WhileTestRunningLogger(t) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ca281fbec..2197112b2 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1542,6 +1542,11 @@ func TestEngineReconfigOnStateChange(t *testing.T) { tt.steps(t, lb, cc) } + // TODO(bradfitz): this whole event bus settling thing + // should be unnecessary once the bogus uses of eventbus + // are removed. (https://github.com/tailscale/tailscale/issues/16369) + lb.settleEventBus() + if gotState := lb.State(); gotState != tt.wantState { t.Errorf("State: got %v; want %v", gotState, tt.wantState) } @@ -1572,35 +1577,30 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } -// TestStateMachineURLRace tests that wgengine updates arriving in the middle of +// TestSendPreservesAuthURL tests that wgengine updates arriving in the middle of // processing an auth URL doesn't result in the auth URL being cleared. -func TestStateMachineURLRace(t *testing.T) { - runTestStateMachineURLRace(t, false) +func TestSendPreservesAuthURL(t *testing.T) { + runTestSendPreservesAuthURL(t, false) } -func TestStateMachineURLRaceSeamless(t *testing.T) { - runTestStateMachineURLRace(t, true) +func TestSendPreservesAuthURLSeamless(t *testing.T) { + runTestSendPreservesAuthURL(t, true) } -func runTestStateMachineURLRace(t *testing.T, seamless bool) { +func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { var cc *mockControl b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { cc = newClient(t, opts) return cc }) - nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) - t.Logf("Start") - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.NeedsLogin)}) b.Start(ipn.Options{ UpdatePrefs: &ipn.Prefs{ WantRunning: true, ControlURL: "https://localhost:1/", }, }) - nw.check() t.Logf("LoginFinished") cc.persist.UserProfile.LoginName = "user1" @@ -1610,72 +1610,16 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { b.sys.ControlKnobs().SeamlessKeyRenewal.Store(true) } - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Starting)}) cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), }}) - nw.check() t.Logf("Running") - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Running)}) b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) - nw.check() t.Logf("Re-auth (StartLoginInteractive)") b.StartLoginInteractive(t.Context()) - stop := make(chan struct{}) - stopSpamming := sync.OnceFunc(func() { - stop <- struct{}{} - }) - // if seamless renewal is enabled, the engine won't be disabled, and we won't - // ever call stopSpamming, so make sure it does get called - defer stopSpamming() - - // Intercept updates between the engine and localBackend, so that we can see - // when the "stopped" update comes in and ensure we stop sending our "we're - // up" updates after that point. - b.e.SetStatusCallback(func(s *wgengine.Status, err error) { - // This is not one of our fake status updates, this is generated from the - // engine in response to LocalBackend calling RequestStatus. Stop spamming - // our fake statuses. - // - // TODO(zofrex): This is fragile, it works right now but would break if the - // calling pattern of RequestStatus changes. We should ensure that we keep - // sending "we're up" statuses right until Reconfig is called with - // zero-valued configs, and after that point only send "stopped" statuses. - stopSpamming() - - // Once stopSpamming returns we are guaranteed to not send any more updates, - // so we can now send the real update (indicating shutdown) and be certain - // it will be received after any fake updates we sent. This is possibly a - // stronger guarantee than we get from the real engine? - b.setWgengineStatus(s, err) - }) - - // time needs to be >= last time for the status to be accepted, send all our - // spam with the same stale time so that when a real update comes in it will - // definitely be accepted. - time := b.lastStatusTime - - // Flood localBackend with a lot of wgengine status updates, so if there are - // any race conditions in the multiple locks/unlocks that happen as we process - // the received auth URL, we will hit them. - go func() { - t.Logf("sending lots of fake wgengine status updates") - for { - select { - case <-stop: - t.Logf("stopping fake wgengine status updates") - return - default: - b.setWgengineStatus(&wgengine.Status{AsOf: time, DERPs: 1}, nil) - } - } - }() - t.Logf("Re-auth (receive URL)") url1 := "https://localhost:1/1" cc.send(sendOpt{url: url1}) @@ -1685,122 +1629,11 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { // status update to trample it have ended as well. if b.authURL == "" { t.Fatalf("expected authURL to be set") + } else { + t.Log("authURL was set") } } -func TestWGEngineDownThenUpRace(t *testing.T) { - var cc *mockControl - b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { - cc = newClient(t, opts) - return cc - }) - - nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) - - t.Logf("Start") - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.NeedsLogin)}) - b.Start(ipn.Options{ - UpdatePrefs: &ipn.Prefs{ - WantRunning: true, - ControlURL: "https://localhost:1/", - }, - }) - nw.check() - - t.Logf("LoginFinished") - cc.persist.UserProfile.LoginName = "user1" - cc.persist.NodeID = "node1" - - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Starting)}) - cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }}) - nw.check() - - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Running)}) - b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) - nw.check() - - t.Logf("Re-auth (StartLoginInteractive)") - b.StartLoginInteractive(t.Context()) - - var timeLock sync.RWMutex - timestamp := b.lastStatusTime - - engineShutdown := make(chan struct{}) - gotShutdown := sync.OnceFunc(func() { - t.Logf("engineShutdown") - engineShutdown <- struct{}{} - }) - - b.e.SetStatusCallback(func(s *wgengine.Status, err error) { - timeLock.Lock() - if s.AsOf.After(timestamp) { - timestamp = s.AsOf - } - timeLock.Unlock() - - if err != nil || (s.DERPs == 0 && len(s.Peers) == 0) { - gotShutdown() - } else { - b.setWgengineStatus(s, err) - } - }) - - t.Logf("Re-auth (receive URL)") - url1 := "https://localhost:1/1" - - done := make(chan struct{}) - var wg sync.WaitGroup - - wg.Go(func() { - t.Log("cc.send starting") - cc.send(sendOpt{url: url1}) // will block until engine stops - t.Log("cc.send returned") - }) - - <-engineShutdown // will get called once cc.send is blocked - gotShutdown = sync.OnceFunc(func() { - t.Logf("engineShutdown") - engineShutdown <- struct{}{} - }) - - wg.Go(func() { - t.Log("StartLoginInteractive starting") - b.StartLoginInteractive(t.Context()) // will also block until engine stops - t.Log("StartLoginInteractive returned") - }) - - <-engineShutdown // will get called once StartLoginInteractive is blocked - - st := controlclient.Status{} - st.SetStateForTest(controlclient.StateAuthenticated) - b.SetControlClientStatus(cc, st) - - timeLock.RLock() - b.setWgengineStatus(&wgengine.Status{AsOf: timestamp}, nil) // engine is down event finally arrives - b.setWgengineStatus(&wgengine.Status{AsOf: timestamp, DERPs: 1}, nil) // engine is back up - timeLock.RUnlock() - - go func() { - wg.Wait() - done <- struct{}{} - }() - - t.Log("waiting for .send and .StartLoginInteractive to return") - - select { - case <-done: - case <-time.After(10 * time.Second): - t.Fatalf("timed out waiting") - } - - t.Log("both returned") -} - func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( firstAutoUserID = tailcfg.UserID(10000) @@ -2033,6 +1866,14 @@ func (e *mockEngine) RequestStatus() { } } +func (e *mockEngine) ResetAndStop() (*wgengine.Status, error) { + err := e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + return nil, err + } + return &wgengine.Status{AsOf: time.Now()}, nil +} + func (e *mockEngine) PeerByKey(key.NodePublic) (_ wgint.Peer, ok bool) { return wgint.Peer{}, false } diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index 889cea255..dce70c542 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -12,6 +12,8 @@ import ( type ExecQueue struct { mu sync.Mutex + ctx context.Context // context.Background + closed on Shutdown + cancel context.CancelFunc // closes ctx closed bool inFlight bool // whether a goroutine is running q.run doneWaiter chan struct{} // non-nil if waiter is waiting, then closed @@ -24,6 +26,7 @@ func (q *ExecQueue) Add(f func()) { if q.closed { return } + q.initCtxLocked() if q.inFlight { q.queue = append(q.queue, f) } else { @@ -79,18 +82,32 @@ func (q *ExecQueue) Shutdown() { q.mu.Lock() defer q.mu.Unlock() q.closed = true + if q.cancel != nil { + q.cancel() + } } -// Wait waits for the queue to be empty. +func (q *ExecQueue) initCtxLocked() { + if q.ctx == nil { + q.ctx, q.cancel = context.WithCancel(context.Background()) + } +} + +// Wait waits for the queue to be empty or shut down. func (q *ExecQueue) Wait(ctx context.Context) error { q.mu.Lock() + q.initCtxLocked() waitCh := q.doneWaiter if q.inFlight && waitCh == nil { waitCh = make(chan struct{}) q.doneWaiter = waitCh } + closed := q.closed q.mu.Unlock() + if closed { + return errors.New("execqueue shut down") + } if waitCh == nil { return nil } @@ -98,6 +115,8 @@ func (q *ExecQueue) Wait(ctx context.Context) error { select { case <-waitCh: return nil + case <-q.ctx.Done(): + return errors.New("execqueue shut down") case <-ctx.Done(): return ctx.Err() } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 1e70856ca..8ad771fc5 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -47,6 +47,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/backoff" "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -924,6 +925,32 @@ func hasOverlap(aips, rips views.Slice[netip.Prefix]) bool { return false } +// ResetAndStop resets the engine to a clean state (like calling Reconfig +// with all pointers to zero values) and waits for it to be fully stopped, +// with no live peers or DERPs. +// +// Unlike Reconfig, it does not return ErrNoChanges. +// +// If the engine stops, returns the status. NB that this status will not be sent +// to the registered status callback, it is on the caller to ensure this status +// is handled appropriately. +func (e *userspaceEngine) ResetAndStop() (*Status, error) { + if err := e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}); err != nil && !errors.Is(err, ErrNoChanges) { + return nil, err + } + bo := backoff.NewBackoff("UserspaceEngineResetAndStop", e.logf, 1*time.Second) + for { + st, err := e.getStatus() + if err != nil { + return nil, err + } + if len(st.Peers) == 0 && st.DERPs == 0 { + return st, nil + } + bo.BackOff(context.Background(), fmt.Errorf("waiting for engine to stop: peers=%d derps=%d", len(st.Peers), st.DERPs)) + } +} + func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { if routerCfg == nil { panic("routerCfg must not be nil") diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 0500e6f7f..9cc4ed3b5 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -124,6 +124,12 @@ func (e *watchdogEngine) watchdog(name string, fn func()) { func (e *watchdogEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { return e.watchdogErr("Reconfig", func() error { return e.wrap.Reconfig(cfg, routerCfg, dnsCfg) }) } +func (e *watchdogEngine) ResetAndStop() (st *Status, err error) { + e.watchdog("ResetAndStop", func() { + st, err = e.wrap.ResetAndStop() + }) + return st, err +} func (e *watchdogEngine) GetFilter() *filter.Filter { return e.wrap.GetFilter() } diff --git a/wgengine/wgengine.go b/wgengine/wgengine.go index 6aaf567ad..be7873147 100644 --- a/wgengine/wgengine.go +++ b/wgengine/wgengine.go @@ -69,6 +69,13 @@ type Engine interface { // The returned error is ErrNoChanges if no changes were made. Reconfig(*wgcfg.Config, *router.Config, *dns.Config) error + // ResetAndStop resets the engine to a clean state (like calling Reconfig + // with all pointers to zero values) and waits for it to be fully stopped, + // with no live peers or DERPs. + // + // Unlike Reconfig, it does not return ErrNoChanges. + ResetAndStop() (*Status, error) + // PeerForIP returns the node to which the provided IP routes, // if any. If none is found, (nil, false) is returned. PeerForIP(netip.Addr) (_ PeerForIP, ok bool) From 9e4d1fd87fc3ab6cfa1b91c7a7c3ced53348fb02 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 13 Nov 2025 20:57:48 -0800 Subject: [PATCH 1650/1708] feature/relayserver,ipn/ipnlocal,net/udprelay: plumb DERPMap (#17881) This commit replaces usage of local.Client in net/udprelay with DERPMap plumbing over the eventbus. This has been a longstanding TODO. This work was also accelerated by a memory leak in net/http when using local.Client over long periods of time. So, this commit also addresses said leak. Updates #17801 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 220 ++++++++++++----------- feature/relayserver/relayserver_test.go | 222 +++++++++++++++++++----- ipn/ipnlocal/node_backend.go | 13 +- net/udprelay/server.go | 38 ++-- 4 files changed, 324 insertions(+), 169 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index df2fb4cb7..2646a0cbf 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -21,8 +21,10 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" + "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" @@ -68,25 +70,41 @@ func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r * // extension. It is registered with [ipnext.RegisterExtension] if the package is // imported. func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { - return &extension{ + e := &extension{ + newServerFn: func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + return udprelay.NewServer(logf, port, overrideAddrs) + }, logf: logger.WithPrefix(logf, featureName+": "), - bus: sb.Sys().Bus.Get(), - }, nil + } + e.ec = sb.Sys().Bus.Get().Client("relayserver.extension") + e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.ec) + eventbus.SubscribeFunc(e.ec, e.onDERPMapView) + eventbus.SubscribeFunc(e.ec, e.onAllocReq) + return e, nil +} + +// relayServer is an interface for [udprelay.Server]. +type relayServer interface { + Close() error + AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) + GetSessions() []status.ServerSession + SetDERPMapView(tailcfg.DERPMapView) } // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { - logf logger.Logf - bus *eventbus.Bus + newServerFn func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) // swappable for tests + logf logger.Logf + ec *eventbus.Client + respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] - mu sync.Mutex // guards the following fields - shutdown bool - - port *int // ipn.Prefs.RelayServerPort, nil if disabled - eventSubs *eventbus.Monitor // nil if not connected to eventbus - debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + mu sync.Mutex // guards the following fields + shutdown bool // true if Shutdown() has been called + rs relayServer // nil when disabled + port *int // ipn.Prefs.RelayServerPort, nil if disabled + derpMapView tailcfg.DERPMapView // latest seen over the eventbus + hasNodeAttrDisableRelayServer bool // [tailcfg.NodeAttrDisableRelayServer] } // Name implements [ipnext.Extension]. @@ -104,26 +122,83 @@ func (e *extension) Init(host ipnext.Host) error { return nil } -// handleBusLifetimeLocked handles the lifetime of consumeEventbusTopics. -func (e *extension) handleBusLifetimeLocked() { - busShouldBeRunning := !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer - if !busShouldBeRunning { - e.disconnectFromBusLocked() +func (e *extension) onDERPMapView(view tailcfg.DERPMapView) { + e.mu.Lock() + defer e.mu.Unlock() + e.derpMapView = view + if e.rs != nil { + e.rs.SetDERPMapView(view) + } +} + +func (e *extension) onAllocReq(req magicsock.UDPRelayAllocReq) { + e.mu.Lock() + defer e.mu.Unlock() + if e.shutdown { + return + } + if e.rs == nil { + if !e.relayServerShouldBeRunningLocked() { + return + } + e.tryStartRelayServerLocked() + if e.rs == nil { + return + } + } + se, err := e.rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) + if err != nil { + e.logf("error allocating endpoint: %v", err) + return + } + e.respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, + }, + }) +} + +func (e *extension) tryStartRelayServerLocked() { + rs, err := e.newServerFn(e.logf, *e.port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) return - } else if e.eventSubs != nil { - return // already running } + e.rs = rs + e.rs.SetDERPMapView(e.derpMapView) +} - ec := e.bus.Client("relayserver.extension") - e.debugSessionsCh = make(chan chan []status.ServerSession) - e.eventSubs = ptr.To(ec.Monitor(e.consumeEventbusTopics(ec, *e.port))) +func (e *extension) relayServerShouldBeRunningLocked() bool { + return !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer +} + +// handleRelayServerLifetimeLocked handles the lifetime of [e.rs]. +func (e *extension) handleRelayServerLifetimeLocked() { + if !e.relayServerShouldBeRunningLocked() { + e.stopRelayServerLocked() + return + } else if e.rs != nil { + return // already running + } + e.tryStartRelayServerLocked() } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) - e.handleBusLifetimeLocked() + e.handleRelayServerLifetimeLocked() } func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { @@ -133,13 +208,13 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV enableOrDisableServer := ok != (e.port != nil) portChanged := ok && e.port != nil && newPort != *e.port if enableOrDisableServer || portChanged || !sameNode { - e.disconnectFromBusLocked() + e.stopRelayServerLocked() e.port = nil if ok { e.port = ptr.To(newPort) } } - e.handleBusLifetimeLocked() + e.handleRelayServerLifetimeLocked() } // overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It @@ -162,88 +237,20 @@ var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { return }) -// consumeEventbusTopics serves endpoint allocation requests over the eventbus. -// It also serves [relayServer] debug information on a channel. -// consumeEventbusTopics must never acquire [extension.mu], which can be held -// by other goroutines while waiting to receive on [extension.eventSubs] or the -// inner [extension.debugSessionsCh] channel. -func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*eventbus.Client) { - reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](ec) - respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](ec) - debugSessionsCh := e.debugSessionsCh - - return func(ec *eventbus.Client) { - rs, err := udprelay.NewServer(e.logf, port, overrideAddrs()) - if err != nil { - e.logf("error initializing server: %v", err) - } - - defer func() { - if rs != nil { - rs.Close() - } - }() - for { - select { - case <-ec.Done(): - return - case respCh := <-debugSessionsCh: - if rs == nil { - respCh <- nil - continue - } - sessions := rs.GetSessions() - respCh <- sessions - case req := <-reqSub.Events(): - if rs == nil { - // The server may have previously failed to initialize if - // the configured port was in use, try again. - rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) - if err != nil { - e.logf("error initializing server: %v", err) - continue - } - } - se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) - if err != nil { - e.logf("error allocating endpoint: %v", err) - continue - } - respPub.Publish(magicsock.UDPRelayAllocResp{ - ReqRxFromNodeKey: req.RxFromNodeKey, - ReqRxFromDiscoKey: req.RxFromDiscoKey, - Message: &disco.AllocateUDPRelayEndpointResponse{ - Generation: req.Message.Generation, - UDPRelayEndpoint: disco.UDPRelayEndpoint{ - ServerDisco: se.ServerDisco, - ClientDisco: se.ClientDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, - }, - }, - }) - } - } - } -} - -func (e *extension) disconnectFromBusLocked() { - if e.eventSubs != nil { - e.eventSubs.Close() - e.eventSubs = nil - e.debugSessionsCh = nil +func (e *extension) stopRelayServerLocked() { + if e.rs != nil { + e.rs.Close() } + e.rs = nil } // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { e.mu.Lock() defer e.mu.Unlock() - e.disconnectFromBusLocked() e.shutdown = true + e.ec.Close() + e.stopRelayServerLocked() return nil } @@ -253,23 +260,14 @@ func (e *extension) Shutdown() error { func (e *extension) serverStatus() status.ServerStatus { e.mu.Lock() defer e.mu.Unlock() - st := status.ServerStatus{ UDPPort: nil, Sessions: nil, } - if e.port == nil || e.eventSubs == nil { + if e.rs == nil { return st } st.UDPPort = ptr.To(*e.port) - - ch := make(chan []status.ServerSession) - select { - case e.debugSessionsCh <- ch: - resp := <-ch - st.Sessions = resp - return st - case <-e.eventSubs.Done(): - return st - } + st.Sessions = e.rs.GetSessions() + return st } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 65c503524..2184b5175 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -4,13 +4,20 @@ package relayserver import ( + "errors" + "net/netip" + "reflect" "testing" "tailscale.com/ipn" + "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" + "tailscale.com/tailcfg" "tailscale.com/tsd" + "tailscale.com/tstime" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" - "tailscale.com/util/eventbus" ) func Test_extension_profileStateChanged(t *testing.T) { @@ -19,29 +26,33 @@ func Test_extension_profileStateChanged(t *testing.T) { type fields struct { port *int + rs relayServer } type args struct { prefs ipn.PrefsView sameNode bool } tests := []struct { - name string - fields fields - args args - wantPort *int - wantBusRunning bool + name string + fields fields + args args + wantPort *int + wantRelayServerFieldNonNil bool + wantRelayServerFieldMutated bool }{ { - name: "no changes non-nil port", + name: "no changes non-nil port previously running", fields: fields{ port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, }, { name: "prefs port nil", @@ -52,8 +63,23 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithNilPort.View(), sameNode: true, }, - wantPort: nil, - wantBusRunning: false, + wantPort: nil, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "prefs port nil previously running", + fields: fields{ + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithNilPort.View(), + sameNode: true, + }, + wantPort: nil, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "prefs port changed", @@ -64,8 +90,23 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "prefs port changed previously running", + fields: fields{ + port: ptr.To(2), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, { name: "sameNode false", @@ -76,8 +117,23 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "sameNode false previously running", + fields: fields{ + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: false, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, { name: "prefs port non-nil extension port nil", @@ -88,85 +144,165 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sys := tsd.NewSystem() - bus := sys.Bus.Get() - e := &extension{ - logf: logger.Discard, - port: tt.fields.port, - bus: bus, + ipne, err := newExtension(logger.Discard, mockSafeBackend{sys}) + if err != nil { + t.Fatal(err) + } + e := ipne.(*extension) + e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + return &mockRelayServer{}, nil } - defer e.disconnectFromBusLocked() + e.port = tt.fields.port + e.rs = tt.fields.rs + defer e.Shutdown() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantBusRunning != (e.eventSubs != nil) { - t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) + if tt.wantRelayServerFieldNonNil != (e.rs != nil) { + t.Errorf("wantRelayServerFieldNonNil: %v != (e.rs != nil): %v", tt.wantRelayServerFieldNonNil, e.rs != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) } else if tt.wantPort != nil && *tt.wantPort != *e.port { t.Errorf("wantPort: %d != *e.port: %d", *tt.wantPort, *e.port) } + if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.fields.rs, e.rs) { + t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.fields.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.fields.rs, e.rs)) + } }) } } -func Test_extension_handleBusLifetimeLocked(t *testing.T) { +func mockRelayServerNotZeroVal() *mockRelayServer { + return &mockRelayServer{true} +} + +type mockRelayServer struct { + set bool +} + +func (mockRelayServer) Close() error { return nil } +func (mockRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { + return endpoint.ServerEndpoint{}, errors.New("not implemented") +} +func (mockRelayServer) GetSessions() []status.ServerSession { return nil } +func (mockRelayServer) SetDERPMapView(tailcfg.DERPMapView) { return } + +type mockSafeBackend struct { + sys *tsd.System +} + +func (m mockSafeBackend) Sys() *tsd.System { return m.sys } +func (mockSafeBackend) Clock() tstime.Clock { return nil } +func (mockSafeBackend) TailscaleVarRoot() string { return "" } + +func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { tests := []struct { name string shutdown bool port *int - eventSubs *eventbus.Monitor + rs relayServer hasNodeAttrDisableRelayServer bool - wantBusRunning bool + wantRelayServerFieldNonNil bool + wantRelayServerFieldMutated bool }{ { name: "want running", shutdown: false, port: ptr.To(1), hasNodeAttrDisableRelayServer: false, - wantBusRunning: true, + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "want running previously running", + shutdown: false, + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, }, { name: "shutdown true", shutdown: true, port: ptr.To(1), hasNodeAttrDisableRelayServer: false, - wantBusRunning: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "shutdown true previously running", + shutdown: true, + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "port nil", shutdown: false, port: nil, hasNodeAttrDisableRelayServer: false, - wantBusRunning: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "port nil previously running", + shutdown: false, + port: nil, + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "hasNodeAttrDisableRelayServer true", shutdown: false, port: nil, hasNodeAttrDisableRelayServer: true, - wantBusRunning: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "hasNodeAttrDisableRelayServer true previously running", + shutdown: false, + port: nil, + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: true, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &extension{ - logf: logger.Discard, - bus: eventbus.New(), - shutdown: tt.shutdown, - port: tt.port, - eventSubs: tt.eventSubs, - hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, + sys := tsd.NewSystem() + ipne, err := newExtension(logger.Discard, mockSafeBackend{sys}) + if err != nil { + t.Fatal(err) + } + e := ipne.(*extension) + e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + return &mockRelayServer{}, nil + } + e.shutdown = tt.shutdown + e.port = tt.port + e.rs = tt.rs + e.hasNodeAttrDisableRelayServer = tt.hasNodeAttrDisableRelayServer + e.handleRelayServerLifetimeLocked() + defer e.Shutdown() + if tt.wantRelayServerFieldNonNil != (e.rs != nil) { + t.Errorf("wantRelayServerFieldNonNil: %v != (e.rs != nil): %v", tt.wantRelayServerFieldNonNil, e.rs != nil) } - e.handleBusLifetimeLocked() - defer e.disconnectFromBusLocked() - if tt.wantBusRunning != (e.eventSubs != nil) { - t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) + if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.rs, e.rs) { + t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.rs, e.rs)) } }) } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 3408d4cbb..dbe23e4d5 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -75,10 +75,11 @@ type nodeBackend struct { filterAtomic atomic.Pointer[filter.Filter] // initialized once and immutable - eventClient *eventbus.Client - filterPub *eventbus.Publisher[magicsock.FilterUpdate] - nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] - nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] + eventClient *eventbus.Client + filterPub *eventbus.Publisher[magicsock.FilterUpdate] + nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] + nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] + derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView] // TODO(nickkhyl): maybe use sync.RWMutex? mu sync.Mutex // protects the following fields @@ -121,6 +122,7 @@ func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *n nb.filterPub = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) nb.nodeViewsPub = eventbus.Publish[magicsock.NodeViewsUpdate](nb.eventClient) nb.nodeMutsPub = eventbus.Publish[magicsock.NodeMutationsUpdate](nb.eventClient) + nb.derpMapViewPub = eventbus.Publish[tailcfg.DERPMapView](nb.eventClient) nb.filterPub.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) return nb } @@ -435,6 +437,9 @@ func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { if nm != nil { nv.SelfNode = nm.SelfNode nv.Peers = nm.Peers + nb.derpMapViewPub.Publish(nm.DERPMap.View()) + } else { + nb.derpMapViewPub.Publish(tailcfg.DERPMapView{}) } nb.nodeViewsPub.Publish(nv) } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 69e0de095..c050c9416 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -21,7 +21,6 @@ import ( "go4.org/mem" "golang.org/x/net/ipv6" - "tailscale.com/client/local" "tailscale.com/disco" "tailscale.com/net/batching" "tailscale.com/net/netaddr" @@ -32,6 +31,7 @@ import ( "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" + "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -72,7 +72,8 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields + mu sync.Mutex // guards the following fields + derpMap *tailcfg.DERPMap addrDiscoveryOnce bool // addrDiscovery completed once (successfully or unsuccessfully) addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints closed bool @@ -374,15 +375,12 @@ func (s *Server) addrDiscoveryLoop() { } } - // fetch DERPMap to feed to netcheck - derpMapCtx, derpMapCancel := context.WithTimeout(context.Background(), time.Second) - defer derpMapCancel() - localClient := &local.Client{} - // TODO(jwhited): We are in-process so use eventbus or similar. - // local.Client gets us going. - dm, err := localClient.CurrentDERPMap(derpMapCtx) - if err != nil { - return nil, err + dm := s.getDERPMap() + if dm == nil { + // We don't have a DERPMap which is required to dynamically + // discover external addresses, but we can return the endpoints we + // do have. + return addrPorts.Slice(), nil } // get addrPorts as visible from DERP @@ -864,3 +862,21 @@ func (s *Server) GetSessions() []status.ServerSession { } return sessions } + +// SetDERPMapView sets the [tailcfg.DERPMapView] to use for future netcheck +// reports. +func (s *Server) SetDERPMapView(view tailcfg.DERPMapView) { + s.mu.Lock() + defer s.mu.Unlock() + if !view.Valid() { + s.derpMap = nil + return + } + s.derpMap = view.AsStruct() +} + +func (s *Server) getDERPMap() *tailcfg.DERPMap { + s.mu.Lock() + defer s.mu.Unlock() + return s.derpMap +} From b5cd29932ef1836ae40b6a6f2688212f1227922d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 7 Nov 2025 12:04:18 -0500 Subject: [PATCH 1651/1708] tka: add a test for unmarshaling existing AUMs Updates https://github.com/tailscale/tailscale/issues/17613 Change-Id: I693a580949eef59263353af6e7e03a7af9bbaa0b Signed-off-by: Alex Chan --- tka/aum_test.go | 76 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/tka/aum_test.go b/tka/aum_test.go index 4297efabf..833a02654 100644 --- a/tka/aum_test.go +++ b/tka/aum_test.go @@ -5,6 +5,8 @@ package tka import ( "bytes" + "encoding/base64" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -156,6 +158,80 @@ func TestSerialization(t *testing.T) { } } +func fromBase64(s string) []byte { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(fmt.Sprintf("base64 decode failed: %v", err)) + } + return data +} + +// This test verifies that we can read AUMs which were serialized with +// older versions of our code. +func TestDeserializeExistingAUMs(t *testing.T) { + for _, tt := range []struct { + Name string + Data []byte + Want AUM + }{ + { + // This is an AUM which was created in a test tailnet, and encoded + // on 2025-11-07 with commit d4c5b27. + Name: "genesis-aum-2025-11-07", + Data: fromBase64("pAEFAvYFpQH2AopYII0sLaLSEZU3W5DT1dG2WYnzjCBr4tXtVbCT2LvA9LS6WCAQhwVGDiUGRiu3P63gucZ/8otjt2DXyk+OBjbh5iWx1Fgg5VU4oRQiMoq5qK00McfpwtmjcheVammLCRwzdp2Zje9YIHDoOXe4ogPSy7lfA/veyPCKM6iZe3PTgzhQZ4W5Sh7wWCBYQtiQ6NcRlyVARJxgAj1BbbvdJQ0t4m+vHqU1J02oDlgg2sksJA+COfsBkrohwHBWlbKrpS8Mvigpl+enuHw9rIJYIB/+CUBBBLUz0KeHu7NKrg5ZEhjjPUWhNcf9QTNHjuNWWCCJuxqPZ6/IASPTmAERaoKnBNH/D+zY4p4TUGHR4fACjFggMtDAipPutgcxKnU9Tg2663gP3KlTQfztV3hBwiePZdRYIGYeD2erBkRouSL20lOnWHHlRq5kmNfN6xFb2CTaPjnXA4KjAQECAQNYIADftG3yaitV/YMoKSBP45zgyeodClumN9ZaeQg/DmCEowEBAgEDWCBRKbmWSzOyHXbHJuYn8s7dmMPDzxmIjgBoA80cBYgItAQbEWOrxfqJzIkFG/5uNUp0s/ScF4GiAVggAN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQCWEAENvzblKV2qx6PED5YdGy8kWa7nxEnaeuMmS5Wkx0n7CXs0XxD5f2NIE+pSv9cOsNkfYNndQkYD7ne33hQOsQM"), + Want: AUM{ + MessageKind: AUMCheckpoint, + State: &State{ + DisablementSecrets: [][]byte{ + fromBase64("jSwtotIRlTdbkNPV0bZZifOMIGvi1e1VsJPYu8D0tLo="), + fromBase64("EIcFRg4lBkYrtz+t4LnGf/KLY7dg18pPjgY24eYlsdQ="), + fromBase64("5VU4oRQiMoq5qK00McfpwtmjcheVammLCRwzdp2Zje8="), + fromBase64("cOg5d7iiA9LLuV8D+97I8IozqJl7c9ODOFBnhblKHvA="), + fromBase64("WELYkOjXEZclQEScYAI9QW273SUNLeJvrx6lNSdNqA4="), + fromBase64("2sksJA+COfsBkrohwHBWlbKrpS8Mvigpl+enuHw9rII="), + fromBase64("H/4JQEEEtTPQp4e7s0quDlkSGOM9RaE1x/1BM0eO41Y="), + fromBase64("ibsaj2evyAEj05gBEWqCpwTR/w/s2OKeE1Bh0eHwAow="), + fromBase64("MtDAipPutgcxKnU9Tg2663gP3KlTQfztV3hBwiePZdQ="), + fromBase64("Zh4PZ6sGRGi5IvbSU6dYceVGrmSY183rEVvYJNo+Odc="), + }, + Keys: []Key{ + { + Kind: Key25519, + Votes: 1, + Public: fromBase64("AN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQ="), + }, + { + Kind: Key25519, + Votes: 1, + Public: fromBase64("USm5lkszsh12xybmJ/LO3ZjDw88ZiI4AaAPNHAWICLQ="), + }, + }, + StateID1: 1253033988139371657, + StateID2: 18333649726973670556, + }, + Signatures: []tkatype.Signature{ + { + KeyID: fromBase64("AN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQ="), + Signature: fromBase64("BDb825SldqsejxA+WHRsvJFmu58RJ2nrjJkuVpMdJ+wl7NF8Q+X9jSBPqUr/XDrDZH2DZ3UJGA+53t94UDrEDA=="), + }, + }, + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + var got AUM + + if err := got.Unserialize(tt.Data); err != nil { + t.Fatalf("Unserialize: %v", err) + } + + if diff := cmp.Diff(got, tt.Want); diff != "" { + t.Fatalf("wrong AUM (-got, +want):\n%s", diff) + } + }) + } +} + func TestAUMWeight(t *testing.T) { var fakeKeyID [blake2s.Size]byte testingRand(t, 1).Read(fakeKeyID[:]) From 124301fbb651382959f8bfe9b1f1765e42e8a3ef Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Fri, 14 Nov 2025 13:21:56 +0000 Subject: [PATCH 1652/1708] ipn/ipnlocal: log prefs changes and reason in Start (#17876) Updates tailscale/corp#34238 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 076752469..f0a77531b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2397,6 +2397,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { func (b *LocalBackend) startLocked(opts ipn.Options) error { b.logf("Start") + logf := logger.WithPrefix(b.logf, "Start: ") b.startOnce.Do(b.initOnce) var clientToShutdown controlclient.Client @@ -2426,7 +2427,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { sysak, _ := b.polc.GetString(pkey.AuthKey, "") if sysak != "" { - b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) + logf("setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) } } @@ -2459,11 +2460,13 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { cn := b.currentNode() - prefsChanged := false + var prefsChanged bool + var prefsChangedWhy []string newPrefs := b.pm.CurrentPrefs().AsStruct() if opts.UpdatePrefs != nil { newPrefs = opts.UpdatePrefs.Clone() prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "opts.UpdatePrefs") } // Apply any syspolicy overrides, resolve exit node ID, etc. // As of 2025-07-03, this is primarily needed in two cases: @@ -2471,6 +2474,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { // - when Always Mode is enabled and we need to set WantRunning to true if b.reconcilePrefsLocked(newPrefs) { prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "reconcilePrefsLocked") } // neither UpdatePrefs or reconciliation should change Persist @@ -2478,19 +2482,21 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { if buildfeatures.HasTPM { if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { - newKey, err := genKey(newPrefs.Persist, b.logf) + newKey, err := genKey(newPrefs.Persist, logf) if err != nil { - b.logf("failed to populate attestation key from TPM: %v", err) + logf("failed to populate attestation key from TPM: %v", err) } if newKey { prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "newKey") } } } if prefsChanged { + logf("updated prefs: %v, reason: %v", newPrefs.Pretty(), prefsChangedWhy) if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { - b.logf("failed to save updated and reconciled prefs: %v", err) + logf("failed to save updated and reconciled prefs (but still using updated prefs in memory): %v", err) } } prefs := newPrefs.View() @@ -2510,7 +2516,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { serverURL := prefs.ControlURLOrDefault(b.polc) if inServerMode := prefs.ForceDaemon(); inServerMode || runtime.GOOS == "windows" { - b.logf("Start: serverMode=%v", inServerMode) + logf("serverMode=%v", inServerMode) } b.applyPrefsToHostinfoLocked(hostinfo, prefs) @@ -2578,7 +2584,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { endpoints := b.endpoints if err := b.initTKALocked(); err != nil { - b.logf("initTKALocked: %v", err) + logf("initTKALocked: %v", err) } var tkaHead string if b.tka != nil { From 0285e1d5fb2b06cd4003ab3a7c1037caa091a85e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 14 Nov 2025 10:22:58 -0800 Subject: [PATCH 1653/1708] feature/relayserver: fix Shutdown() deadlock (#17898) Updates #17894 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 2646a0cbf..868d5f61a 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -246,10 +246,13 @@ func (e *extension) stopRelayServerLocked() { // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { + // [extension.mu] must not be held when closing the [eventbus.Client]. Close + // blocks until all [eventbus.SubscribeFunc]'s have returned, and the ones + // used in this package also acquire [extension.mu]. See #17894. + e.ec.Close() e.mu.Lock() defer e.mu.Unlock() e.shutdown = true - e.ec.Close() e.stopRelayServerLocked() return nil } From 052602752f57dd2dc273f65811a0946a6c575bda Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 14 Nov 2025 08:39:32 -0800 Subject: [PATCH 1654/1708] control/controlclient: make Observer optional As a baby step towards eventbus-ifying controlclient, make the Observer optional. This also means callers that don't care (like this network lock test, and some tests in other repos) can omit it, rather than passing in a no-op one. Updates #12639 Change-Id: Ibd776b45b4425c08db19405bc3172b238e87da4e Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 10 ++++++---- control/controlclient/direct.go | 4 ++++ ipn/ipnlocal/network-lock_test.go | 7 ------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 40b02a598..50248a647 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -117,7 +117,7 @@ type Auto struct { logf logger.Logf closed bool updateCh chan struct{} // readable when we should inform the server of a change - observer Observer // called to update Client status; always non-nil + observer Observer // if non-nil, called to update Client status observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil @@ -170,9 +170,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { } }() - if opts.Observer == nil { - return nil, errors.New("missing required Options.Observer") - } if opts.Logf == nil { opts.Logf = func(fmt string, args ...any) {} } @@ -609,6 +606,11 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM Err: err, state: state, } + + if c.observer == nil { + return + } + c.lastStatus.Store(newSt) // Launch a new goroutine to avoid blocking the caller while the observer diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 63a12b249..d30db6191 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -115,6 +115,9 @@ type Direct struct { // Observer is implemented by users of the control client (such as LocalBackend) // to get notified of changes in the control client's status. +// +// If an implementation of Observer also implements [NetmapDeltaUpdater], they get +// delta updates as well as full netmap updates. type Observer interface { // SetControlClientStatus is called when the client has a new status to // report. The Client is provided to allow the Observer to track which @@ -145,6 +148,7 @@ type Options struct { // Observer is called when there's a change in status to report // from the control client. + // If nil, no status updates are reported. Observer Observer // SkipIPForwardingCheck declares that the host's IP diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index c7c4c905f..17040fef3 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -41,12 +41,6 @@ import ( "tailscale.com/util/set" ) -type observerFunc func(controlclient.Status) - -func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlclient.Status) { - f(s) -} - func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} @@ -64,7 +58,6 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even }, HTTPTestClient: c, NoiseTestClient: c, - Observer: observerFunc(func(controlclient.Status) {}), Dialer: dialer, Bus: bus, } From 208a32af5b6c306afb797edd793677f3a6db3306 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 14 Nov 2025 11:59:09 -0500 Subject: [PATCH 1655/1708] logpolicy: fix nil pointer dereference with invalid TS_LOG_TARGET When TS_LOG_TARGET is set to an invalid URL, url.Parse returns an error and nil pointer, which caused a panic when accessing u.Host. Now we check the error from url.Parse and log a helpful message while falling back to the default log host. Fixes #17792 Signed-off-by: Andrew Dunham --- logpolicy/logpolicy.go | 14 ++++++++---- logpolicy/logpolicy_test.go | 44 +++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 9c7e62ab0..26858b713 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -640,10 +640,16 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { logHost := logtail.DefaultHost if val := getLogTarget(); val != "" { - opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") - conf.BaseURL = val - u, _ := url.Parse(val) - logHost = u.Host + u, err := url.Parse(val) + if err != nil { + opts.Logf("logpolicy: invalid TS_LOG_TARGET %q: %v; using default log host", val, err) + } else if u.Host == "" { + opts.Logf("logpolicy: invalid TS_LOG_TARGET %q: missing host; using default log host", val) + } else { + opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") + conf.BaseURL = val + logHost = u.Host + } } if conf.HTTPC == nil { diff --git a/logpolicy/logpolicy_test.go b/logpolicy/logpolicy_test.go index 28f03448a..c09e590bb 100644 --- a/logpolicy/logpolicy_test.go +++ b/logpolicy/logpolicy_test.go @@ -84,3 +84,47 @@ func TestOptions(t *testing.T) { }) } } + +// TestInvalidLogTarget is a test for #17792 +func TestInvalidLogTarget(t *testing.T) { + defer resetLogTarget() + + tests := []struct { + name string + logTarget string + }{ + { + name: "invalid_url_no_scheme", + logTarget: "not a url at all", + }, + { + name: "malformed_url", + logTarget: "ht!tp://invalid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetLogTarget() + os.Setenv("TS_LOG_TARGET", tt.logTarget) + + opts := Options{ + Collection: "test.log.tailscale.io", + Logf: t.Logf, + } + + // This should not panic even with invalid log target + config, policy := opts.init(false) + if policy == nil { + t.Fatal("expected non-nil policy") + } + defer policy.Close() + + // When log target is invalid, it should fall back to the invalid value + // but not crash. BaseURL should remain empty + if config.BaseURL != "" { + t.Errorf("got BaseURL=%q, want empty", config.BaseURL) + } + }) + } +} From ce10f7c14cdfc9bdc1c1b26efd7f79d669968a32 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 14 Nov 2025 10:58:53 -0800 Subject: [PATCH 1656/1708] wgengine/wgcfg/nmcfg: reduce wireguard reconfig log spam On the corp tailnet (using Mullvad exit nodes + bunch of expired devices + subnet routers), these were generating big ~35 KB blobs of logging regularly. This logging shouldn't even exist at this level, and should be rate limited at a higher level, but for now as a bandaid, make it less spammy. Updates #cleanup Change-Id: I0b5e9e6e859f13df5f982cd71cd5af85b73f0c0a Signed-off-by: Brad Fitzpatrick --- wgengine/wgcfg/nmcfg/nmcfg.go | 75 +++++++++++++++-------------------- 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 08b162730..28d5345d6 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -5,7 +5,8 @@ package nmcfg import ( - "bytes" + "bufio" + "cmp" "fmt" "net/netip" "strings" @@ -18,16 +19,7 @@ import ( ) func nodeDebugName(n tailcfg.NodeView) string { - name := n.Name() - if name == "" { - name = n.Hostinfo().Hostname() - } - if i := strings.Index(name, "."); i != -1 { - name = name[:i] - } - if name == "" && n.Addresses().Len() != 0 { - return n.Addresses().At(0).String() - } + name, _, _ := strings.Cut(cmp.Or(n.Name(), n.Hostinfo().Hostname()), ".") return name } @@ -77,10 +69,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, } } - // Logging buffers - skippedUnselected := new(bytes.Buffer) - skippedSubnets := new(bytes.Buffer) - skippedExpired := new(bytes.Buffer) + var skippedExitNode, skippedSubnetRouter, skippedExpired []tailcfg.NodeView for _, peer := range nm.Peers { if peer.DiscoKey().IsZero() && peer.HomeDERP() == 0 && !peer.IsWireGuardOnly() { @@ -93,16 +82,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // anyway, since control intentionally breaks node keys for // expired peers so that we can't discover endpoints via DERP. if peer.Expired() { - if skippedExpired.Len() >= 1<<10 { - if !bytes.HasSuffix(skippedExpired.Bytes(), []byte("...")) { - skippedExpired.WriteString("...") - } - } else { - if skippedExpired.Len() > 0 { - skippedExpired.WriteString(", ") - } - fmt.Fprintf(skippedExpired, "%s/%v", peer.StableID(), peer.Key().ShortString()) - } + skippedExpired = append(skippedExpired, peer) continue } @@ -112,28 +92,22 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, }) cpeer := &cfg.Peers[len(cfg.Peers)-1] - didExitNodeWarn := false + didExitNodeLog := false cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer().Clone() cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer().Clone() cpeer.IsJailed = peer.IsJailed() for _, allowedIP := range peer.AllowedIPs().All() { if allowedIP.Bits() == 0 && peer.StableID() != exitNode { - if didExitNodeWarn { + if didExitNodeLog { // Don't log about both the IPv4 /0 and IPv6 /0. continue } - didExitNodeWarn = true - if skippedUnselected.Len() > 0 { - skippedUnselected.WriteString(", ") - } - fmt.Fprintf(skippedUnselected, "%q (%v)", nodeDebugName(peer), peer.Key().ShortString()) + didExitNodeLog = true + skippedExitNode = append(skippedExitNode, peer) continue } else if cidrIsSubnet(peer, allowedIP) { if (flags & netmap.AllowSubnetRoutes) == 0 { - if skippedSubnets.Len() > 0 { - skippedSubnets.WriteString(", ") - } - fmt.Fprintf(skippedSubnets, "%v from %q (%v)", allowedIP, nodeDebugName(peer), peer.Key().ShortString()) + skippedSubnetRouter = append(skippedSubnetRouter, peer) continue } } @@ -141,14 +115,27 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, } } - if skippedUnselected.Len() > 0 { - logf("[v1] wgcfg: skipped unselected default routes from: %s", skippedUnselected.Bytes()) - } - if skippedSubnets.Len() > 0 { - logf("[v1] wgcfg: did not accept subnet routes: %s", skippedSubnets) - } - if skippedExpired.Len() > 0 { - logf("[v1] wgcfg: skipped expired peer: %s", skippedExpired) + logList := func(title string, nodes []tailcfg.NodeView) { + if len(nodes) == 0 { + return + } + logf("[v1] wgcfg: %s from %d nodes: %s", title, len(nodes), logger.ArgWriter(func(bw *bufio.Writer) { + const max = 5 + for i, n := range nodes { + if i == max { + fmt.Fprintf(bw, "... +%d", len(nodes)-max) + return + } + if i > 0 { + bw.WriteString(", ") + } + fmt.Fprintf(bw, "%s (%s)", nodeDebugName(n), n.StableID()) + } + })) } + logList("skipped unselected exit nodes", skippedExitNode) + logList("did not accept subnet routes", skippedSubnetRouter) + logList("skipped expired peers", skippedExpired) + return cfg, nil } From ab4b990d51c41aff8e1ae7a08435dedfe621ce0d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 14 Nov 2025 11:57:41 -0800 Subject: [PATCH 1657/1708] net/netmon: do not abandon a subscriber when exiting early (#17899) LinkChangeLogLimiter keeps a subscription to track rate limits for log messages. But when its context ended, it would exit the subscription loop, leaving the subscriber still alive. Ensure the subscriber gets cleaned up when the context ends, so we don't stall event processing. Updates tailscale/corp#34311 Change-Id: I82749e482e9a00dfc47f04afbc69dd0237537cb2 Signed-off-by: M. J. Fromberger --- net/netmon/loghelper.go | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 2e28e8cda..675762cd1 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -18,13 +18,13 @@ import ( // done. func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - nm.b.Monitor(nm.changeDeltaWatcher(nm.b, ctx, func(cd ChangeDelta) { + sub := eventbus.SubscribeFunc(nm.b, func(cd ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } - })) - + }) + context.AfterFunc(ctx, sub.Close) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it // means we've already logged this format string. @@ -42,19 +42,3 @@ func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) lo logf(format, args...) } } - -func (nm *Monitor) changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, fn func(ChangeDelta)) func(*eventbus.Client) { - sub := eventbus.Subscribe[ChangeDelta](ec) - return func(ec *eventbus.Client) { - for { - select { - case <-ctx.Done(): - return - case <-sub.Done(): - return - case change := <-sub.Events(): - fn(change) - } - } - } -} From bd36817e842d7d9651fc7d6b2c6781d0eb0b56e6 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 14 Nov 2025 12:05:48 -0800 Subject: [PATCH 1658/1708] scripts/installer.sh: compare major versions numerically (#17904) Most /etc/os-release files set the VERSION_ID to a `MAJOR.MINOR` string, but we were trying to compare this numerically against a major version number. I can only assume that Linux Mint used switched from a plain integer, since shells only do integer comparisons. This patch extracts a VERSION_MAJOR from the VERSION_ID using parameter expansion and unifies all the other ad-hoc comparisons to use it. Fixes #15841 Signed-off-by: Simon Law Co-authored-by: Xavier --- scripts/installer.sh | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index b40177005..e5b6cd23b 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -42,6 +42,8 @@ main() { # - VERSION_CODENAME: the codename of the OS release, if any (e.g. "buster") # - UBUNTU_CODENAME: if it exists, use instead of VERSION_CODENAME . /etc/os-release + VERSION_MAJOR="${VERSION_ID:-}" + VERSION_MAJOR="${VERSION_MAJOR%%.*}" case "$ID" in ubuntu|pop|neon|zorin|tuxedo) OS="ubuntu" @@ -53,10 +55,10 @@ main() { PACKAGETYPE="apt" # Third-party keyrings became the preferred method of # installation in Ubuntu 20.04. - if expr "$VERSION_ID" : "2.*" >/dev/null; then - APT_KEY_TYPE="keyring" - else + if [ "$VERSION_MAJOR" -lt 20 ]; then APT_KEY_TYPE="legacy" + else + APT_KEY_TYPE="keyring" fi ;; debian) @@ -76,7 +78,7 @@ main() { # They don't specify the Debian version they're based off in os-release # but Parrot 6 is based on Debian 12 Bookworm. VERSION=bookworm - elif [ "$VERSION_ID" -lt 11 ]; then + elif [ "$VERSION_MAJOR" -lt 11 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -94,7 +96,7 @@ main() { VERSION="$VERSION_CODENAME" fi PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -104,7 +106,7 @@ main() { OS="ubuntu" VERSION="$UBUNTU_CODENAME" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 6 ]; then + if [ "$VERSION_MAJOR" -lt 6 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -113,7 +115,7 @@ main() { industrial-os) OS="debian" PACKAGETYPE="apt" - if [ "$(printf %.1s "$VERSION_ID")" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then VERSION="buster" APT_KEY_TYPE="legacy" else @@ -124,7 +126,7 @@ main() { parrot|mendel) OS="debian" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then VERSION="buster" APT_KEY_TYPE="legacy" else @@ -150,7 +152,7 @@ main() { PACKAGETYPE="apt" # Third-party keyrings became the preferred method of # installation in Raspbian 11 (Bullseye). - if [ "$VERSION_ID" -lt 11 ]; then + if [ "$VERSION_MAJOR" -lt 11 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -159,12 +161,11 @@ main() { kali) OS="debian" PACKAGETYPE="apt" - YEAR="$(echo "$VERSION_ID" | cut -f1 -d.)" APT_SYSTEMCTL_START=true # Third-party keyrings became the preferred method of # installation in Debian 11 (Bullseye), which Kali switched # to in roughly 2021.x releases - if [ "$YEAR" -lt 2021 ]; then + if [ "$VERSION_MAJOR" -lt 2021 ]; then # Kali VERSION_ID is "kali-rolling", which isn't distinguishing VERSION="buster" APT_KEY_TYPE="legacy" @@ -176,7 +177,7 @@ main() { Deepin|deepin) # https://github.com/tailscale/tailscale/issues/7862 OS="debian" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 20 ]; then + if [ "$VERSION_MAJOR" -lt 20 ]; then APT_KEY_TYPE="legacy" VERSION="buster" else @@ -189,7 +190,7 @@ main() { # All versions of PikaOS are new enough to prefer keyring APT_KEY_TYPE="keyring" # Older versions of PikaOS are based on Ubuntu rather than Debian - if [ "$VERSION_ID" -lt 4 ]; then + if [ "$VERSION_MAJOR" -lt 4 ]; then OS="ubuntu" VERSION="$UBUNTU_CODENAME" else @@ -205,7 +206,7 @@ main() { ;; centos) OS="$ID" - VERSION="$VERSION_ID" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -213,7 +214,7 @@ main() { ;; ol) OS="oracle" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -224,7 +225,7 @@ main() { if [ "$ID" = "miraclelinux" ]; then OS="rhel" fi - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -247,7 +248,7 @@ main() { ;; xenenterprise) OS="centos" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="yum" ;; opensuse-leap|sles) @@ -311,7 +312,7 @@ main() { ;; freebsd) OS="$ID" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="pkg" ;; osmc) @@ -322,7 +323,7 @@ main() { ;; photon) OS="photon" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="tdnf" ;; From 91344400082af271904e08e8f81654979d3d9c10 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 14 Nov 2025 17:33:55 +0000 Subject: [PATCH 1659/1708] various: adds missing apostrophes to comments Updates #cleanup Change-Id: I7bf29cc153c3c04e087f9bdb146c3437bed0129a Signed-off-by: Alex Chan --- cmd/sniproxy/sniproxy_test.go | 8 ++++---- ipn/ipnlocal/network-lock_test.go | 2 +- tka/aum.go | 2 +- tka/builder.go | 2 +- tka/key_test.go | 4 ++-- tka/scenario_test.go | 2 +- tka/sig.go | 2 +- tka/sig_test.go | 4 ++-- tka/state.go | 2 +- tka/sync.go | 6 +++--- tka/sync_test.go | 2 +- tka/tka.go | 16 ++++++++-------- tka/verify.go | 2 +- 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/cmd/sniproxy/sniproxy_test.go b/cmd/sniproxy/sniproxy_test.go index cd2e070bd..07fbd2ece 100644 --- a/cmd/sniproxy/sniproxy_test.go +++ b/cmd/sniproxy/sniproxy_test.go @@ -152,7 +152,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { configCapKey: []tailcfg.RawMessage{tailcfg.RawMessage(b)}, }) - // Lets spin up a second node (to represent the client). + // Let's spin up a second node (to represent the client). client, _, _ := startNode(t, ctx, controlURL, "client") // Make sure that the sni node has received its config. @@ -176,7 +176,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { t.Error("sni node never received its configuration from the coordination server!") } - // Lets make the client open a connection to the sniproxy node, and + // Let's make the client open a connection to the sniproxy node, and // make sure it results in a connection to our test listener. w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) if err != nil { @@ -208,10 +208,10 @@ func TestSNIProxyWithFlagConfig(t *testing.T) { sni, _, ip := startNode(t, ctx, controlURL, "snitest") go run(ctx, sni, 0, sni.Hostname, false, 0, "", fmt.Sprintf("tcp/%d/localhost", ln.Addr().(*net.TCPAddr).Port)) - // Lets spin up a second node (to represent the client). + // Let's spin up a second node (to represent the client). client, _, _ := startNode(t, ctx, controlURL, "client") - // Lets make the client open a connection to the sniproxy node, and + // Let's make the client open a connection to the sniproxy node, and // make sure it results in a connection to our test listener. w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) if err != nil { diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 17040fef3..00d4ff6d9 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -528,7 +528,7 @@ func TestTKASync(t *testing.T) { }, } - // Finally, lets trigger a sync. + // Finally, let's trigger a sync. err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ TKAEnabled: true, TKAHead: controlAuthority.Head(), diff --git a/tka/aum.go b/tka/aum.go index 08d70897e..6d75830bd 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -226,7 +226,7 @@ func (a *AUM) Serialize() tkatype.MarshaledAUM { // Further, experience with other attempts (JWS/JWT,SAML,X509 etc) has // taught us that even subtle behaviors such as how you handle invalid // or unrecognized fields + any invariants in subsequent re-serialization - // can easily lead to security-relevant logic bugs. Its certainly possible + // can easily lead to security-relevant logic bugs. It's certainly possible // to invent a workable scheme by massaging a JSON parsing library, though // profoundly unwise. // diff --git a/tka/builder.go b/tka/builder.go index 199cec06d..ab2364d85 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -114,7 +114,7 @@ func (b *UpdateBuilder) generateCheckpoint() error { } } - // Checkpoints cant specify a parent AUM. + // Checkpoints can't specify a parent AUM. state.LastAUMHash = nil return b.mkUpdate(AUM{MessageKind: AUMCheckpoint, State: &state}) } diff --git a/tka/key_test.go b/tka/key_test.go index e912f89c4..fc379e246 100644 --- a/tka/key_test.go +++ b/tka/key_test.go @@ -42,7 +42,7 @@ func TestVerify25519(t *testing.T) { aum := AUM{ MessageKind: AUMRemoveKey, KeyID: []byte{1, 2, 3, 4}, - // Signatures is set to crap so we are sure its ignored in the sigHash computation. + // Signatures is set to crap so we are sure it's ignored in the sigHash computation. Signatures: []tkatype.Signature{{KeyID: []byte{45, 42}}}, } sigHash := aum.SigHash() @@ -89,7 +89,7 @@ func TestNLPrivate(t *testing.T) { t.Error("signature did not verify") } - // We manually compute the keyID, so make sure its consistent with + // We manually compute the keyID, so make sure it's consistent with // tka.Key.ID(). if !bytes.Equal(k.MustID(), p.KeyID()) { t.Errorf("private.KeyID() & tka KeyID differ: %x != %x", k.MustID(), p.KeyID()) diff --git a/tka/scenario_test.go b/tka/scenario_test.go index 89a8111e1..a0361a130 100644 --- a/tka/scenario_test.go +++ b/tka/scenario_test.go @@ -204,7 +204,7 @@ func TestNormalPropagation(t *testing.T) { `) control := s.mkNode("control") - // Lets say theres a node with some updates! + // Let's say there's a node with some updates! n1 := s.mkNodeWithForks("n1", true, map[string]*testChain{ "L2": newTestchain(t, `L3 -> L4`), }) diff --git a/tka/sig.go b/tka/sig.go index 7b1838d40..46d598ad9 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -277,7 +277,7 @@ func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationK // Recurse to verify the signature on the nested structure. var nestedPub key.NodePublic // SigCredential signatures certify an indirection key rather than a node - // key, so theres no need to check the node key. + // key, so there's no need to check the node key. if s.Nested.SigKind != SigCredential { if err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil { return fmt.Errorf("nested pubkey: %v", err) diff --git a/tka/sig_test.go b/tka/sig_test.go index 99c25f8e5..2fafb0436 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -119,7 +119,7 @@ func TestSigNested(t *testing.T) { } // Test verification fails if the outer signature is signed with a - // different public key to whats specified in WrappingPubkey + // different public key to what's specified in WrappingPubkey sig.Signature = ed25519.Sign(priv, sigHash[:]) if err := sig.verifySignature(node.Public(), k); err == nil { t.Error("verifySignature(node) succeeded with different signature") @@ -275,7 +275,7 @@ func TestSigCredential(t *testing.T) { } // Test verification fails if the outer signature is signed with a - // different public key to whats specified in WrappingPubkey + // different public key to what's specified in WrappingPubkey sig.Signature = ed25519.Sign(priv, sigHash[:]) if err := sig.verifySignature(node.Public(), k); err == nil { t.Error("verifySignature(node) succeeded with different signature") diff --git a/tka/state.go b/tka/state.go index 0a30c56a0..95a319bd9 100644 --- a/tka/state.go +++ b/tka/state.go @@ -140,7 +140,7 @@ func (s State) checkDisablement(secret []byte) bool { // Specifically, the rules are: // - The last AUM hash must match (transitively, this implies that this // update follows the last update message applied to the state machine) -// - Or, the state machine knows no parent (its brand new). +// - Or, the state machine knows no parent (it's brand new). func (s State) parentMatches(update AUM) bool { if s.LastAUMHash == nil { return true diff --git a/tka/sync.go b/tka/sync.go index 6c2b7cbb8..e3a858c15 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -54,7 +54,7 @@ const ( // can then be applied locally with Inform(). // // This SyncOffer + AUM exchange should be performed by both ends, -// because its possible that either end has AUMs that the other needs +// because it's possible that either end has AUMs that the other needs // to find out about. func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) { oldest := a.oldestAncestor.Hash() @@ -123,7 +123,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) ( } // Case: 'head intersection' - // If we have the remote's head, its more likely than not that + // If we have the remote's head, it's more likely than not that // we have updates that build on that head. To confirm this, // we iterate backwards through our chain to see if the given // head is an ancestor of our current chain. @@ -165,7 +165,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) ( // Case: 'tail intersection' // So we don't have a clue what the remote's head is, but // if one of the ancestors they gave us is part of our chain, - // then theres an intersection, which is a starting point for + // then there's an intersection, which is a starting point for // the remote to send us AUMs from. // // We iterate the list of ancestors in order because the remote diff --git a/tka/sync_test.go b/tka/sync_test.go index 7250eacf7..f9d86c16a 100644 --- a/tka/sync_test.go +++ b/tka/sync_test.go @@ -357,7 +357,7 @@ func TestSyncSimpleE2E(t *testing.T) { t.Fatalf("control Open() failed: %v", err) } - // Control knows the full chain, node only knows the genesis. Lets see + // Control knows the full chain, node only knows the genesis. Let's see // if they can sync. nodeOffer, err := node.SyncOffer(nodeStorage) if err != nil { diff --git a/tka/tka.go b/tka/tka.go index c34e35e7b..9dce74e9a 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -94,7 +94,7 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int // candidates.Oldest needs to be computed by working backwards from // head as far as we can. - iterAgain := true // if theres still work to be done. + iterAgain := true // if there's still work to be done. for i := 0; iterAgain; i++ { if i >= maxIter { return nil, fmt.Errorf("iteration limit exceeded (%d)", maxIter) @@ -295,7 +295,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) } // If we got here, the current state is dependent on the previous. - // Keep iterating backwards till thats not the case. + // Keep iterating backwards till that's not the case. if curs, err = storage.AUM(parent); err != nil { return State{}, fmt.Errorf("reading parent (%v): %v", parent, err) } @@ -324,7 +324,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) return curs.Hash() == wantHash }) // fastForward only terminates before the done condition if it - // doesnt have any later AUMs to process. This cant be the case + // doesn't have any later AUMs to process. This can't be the case // as we've already iterated through them above so they must exist, // but we check anyway to be super duper sure. if err == nil && *state.LastAUMHash != wantHash { @@ -336,7 +336,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // computeActiveAncestor determines which ancestor AUM to use as the // ancestor of the valid chain. // -// If all the chains end up having the same ancestor, then thats the +// If all the chains end up having the same ancestor, then that's the // only possible ancestor, ezpz. However if there are multiple distinct // ancestors, that means there are distinct chains, and we need some // hint to choose what to use. For that, we rely on the chainsThroughActive @@ -357,7 +357,7 @@ func computeActiveAncestor(chains []chain) (AUMHash, error) { } } - // Theres more than one, so we need to use the ancestor that was + // There's more than one, so we need to use the ancestor that was // part of the active chain in a previous iteration. // Note that there can only be one distinct ancestor that was // formerly part of the active chain, because AUMs can only have @@ -479,7 +479,7 @@ func (a *Authority) Head() AUMHash { // Open initializes an existing TKA from the given tailchonk. // // Only use this if the current node has initialized an Authority before. -// If a TKA exists on other nodes but theres nothing locally, use Bootstrap(). +// If a TKA exists on other nodes but there's nothing locally, use Bootstrap(). // If no TKA exists anywhere and you are creating it for the first // time, use New(). func Open(storage Chonk) (*Authority, error) { @@ -592,14 +592,14 @@ func (a *Authority) InformIdempotent(storage Chonk, updates []AUM) (Authority, e toCommit := make([]AUM, 0, len(updates)) prevHash := a.Head() - // The state at HEAD is the current state of the authority. Its likely + // The state at HEAD is the current state of the authority. It's likely // to be needed, so we prefill it rather than computing it. stateAt[prevHash] = a.state // Optimization: If the set of updates is a chain building from // the current head, EG: // ==> updates[0] ==> updates[1] ... - // Then theres no need to recompute the resulting state from the + // Then there's no need to recompute the resulting state from the // stored ancestor, because the last state computed during iteration // is the new state. This should be the common case. // isHeadChain keeps track of this. diff --git a/tka/verify.go b/tka/verify.go index e4e22e551..ed0ecea66 100644 --- a/tka/verify.go +++ b/tka/verify.go @@ -18,7 +18,7 @@ import ( // provided AUM BLAKE2s digest, using the given key. func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { // NOTE(tom): Even if we can compute the public from the KeyID, - // its possible for the KeyID to be attacker-controlled + // it's possible for the KeyID to be attacker-controlled // so we should use the public contained in the state machine. switch key.Kind { case Key25519: From 888a5d4812c97a818c4cc041a3f97aae8bd81afc Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 14 Nov 2025 12:58:53 -0800 Subject: [PATCH 1660/1708] ipn/localapi: use constant-time comparison for RequiredPassword (#17906) Updates #cleanup Signed-off-by: Andrew Lytvynov --- ipn/localapi/localapi.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 9e7c16891..de5ff53ac 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -7,6 +7,7 @@ package localapi import ( "bytes" "cmp" + "crypto/subtle" "encoding/json" "errors" "fmt" @@ -257,7 +258,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, "auth required", http.StatusUnauthorized) return } - if pass != h.RequiredPassword { + if subtle.ConstantTimeCompare([]byte(pass), []byte(h.RequiredPassword)) == 0 { metricInvalidRequests.Add(1) http.Error(w, "bad password", http.StatusForbidden) return From c5919b4ed1f38374d51fe9e92ef57e322c31c875 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 14 Nov 2025 13:23:25 -0800 Subject: [PATCH 1661/1708] feature/tpm: check IsZero in clone instead of just nil (#17884) The key.NewEmptyHardwareAttestationKey hook returns a non-nil empty attestationKey, which means that the nil check in Clone doesn't trigger and proceeds to try and clone an empty key. Check IsZero instead to reduce log spam from Clone. As a drive-by, make tpmAvailable check a sync.Once because the result won't change. Updates #17882 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 2 +- feature/tpm/tpm.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 597d4a649..49b80ade1 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -274,7 +274,7 @@ func (ak *attestationKey) Close() error { } func (ak *attestationKey) Clone() key.HardwareAttestationKey { - if ak == nil { + if ak.IsZero() { return nil } diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 4b27a241f..7cbdec088 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -35,12 +35,15 @@ import ( "tailscale.com/util/testenv" ) -var infoOnce = sync.OnceValue(info) +var ( + infoOnce = sync.OnceValue(info) + tpmSupportedOnce = sync.OnceValue(tpmSupported) +) func init() { feature.Register("tpm") - feature.HookTPMAvailable.Set(tpmSupported) - feature.HookHardwareAttestationAvailable.Set(tpmSupported) + feature.HookTPMAvailable.Set(tpmSupportedOnce) + feature.HookHardwareAttestationAvailable.Set(tpmSupportedOnce) hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() From a96ef432cfe36ef2d8c63fee7ab9c7cb95c39708 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 12 Nov 2025 16:40:23 -0800 Subject: [PATCH 1662/1708] control/controlclient,ipn/ipnlocal: replace State enum with boolean flags Remove the State enum (StateNew, StateNotAuthenticated, etc.) from controlclient and replace it with two explicit boolean fields: - LoginFinished: indicates successful authentication - Synced: indicates we've received at least one netmap This makes the state more composable and easier to reason about, as multiple conditions can be true independently rather than being encoded in a single enum value. The State enum was originally intended as the state machine for the whole client, but that abstraction moved to ipn.Backend long ago. This change continues moving away from the legacy state machine by representing state as a combination of independent facts. Also adds test helpers in ipnlocal that check independent, observable facts (hasValidNetMap, needsLogin, etc.) rather than relying on derived state enums, making tests more robust. Updates #12639 Signed-off-by: James Tucker --- control/controlclient/auto.go | 52 ++--- control/controlclient/controlclient_test.go | 28 ++- control/controlclient/status.go | 90 +------- ipn/ipnlocal/local.go | 6 +- ipn/ipnlocal/state_test.go | 241 ++++++++++++++------ 5 files changed, 224 insertions(+), 193 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 50248a647..9d648409b 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -138,7 +138,6 @@ type Auto struct { loggedIn bool // true if currently logged in loginGoal *LoginGoal // non-nil if some login activity is desired inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends - state State // TODO(bradfitz): delete this, make it computed by method from other state authCtx context.Context // context used for auth requests mapCtx context.Context // context used for netmap and update requests @@ -296,10 +295,11 @@ func (c *Auto) authRoutine() { c.mu.Lock() goal := c.loginGoal ctx := c.authCtx + loggedIn := c.loggedIn if goal != nil { - c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true) + c.logf("[v1] authRoutine: loggedIn=%v; wantLoggedIn=%v", loggedIn, true) } else { - c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused) + c.logf("[v1] authRoutine: loggedIn=%v; goal=nil paused=%v", loggedIn, c.paused) } c.mu.Unlock() @@ -322,11 +322,6 @@ func (c *Auto) authRoutine() { c.mu.Lock() c.urlToVisit = goal.url - if goal.url != "" { - c.state = StateURLVisitRequired - } else { - c.state = StateAuthenticating - } c.mu.Unlock() var url string @@ -360,7 +355,6 @@ func (c *Auto) authRoutine() { flags: LoginDefault, url: url, } - c.state = StateURLVisitRequired c.mu.Unlock() c.sendStatus("authRoutine-url", err, url, nil) @@ -380,7 +374,6 @@ func (c *Auto) authRoutine() { c.urlToVisit = "" c.loggedIn = true c.loginGoal = nil - c.state = StateAuthenticated c.mu.Unlock() c.sendStatus("authRoutine-success", nil, "", nil) @@ -431,12 +424,9 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c.mu.Lock() c.inMapPoll = true - if c.loggedIn { - c.state = StateSynchronized - } c.expiry = nm.Expiry stillAuthed := c.loggedIn - c.logf("[v1] mapRoutine: netmap received: %s", c.state) + c.logf("[v1] mapRoutine: netmap received: loggedIn=%v inMapPoll=true", stillAuthed) c.mu.Unlock() if stillAuthed { @@ -484,8 +474,8 @@ func (c *Auto) mapRoutine() { } c.mu.Lock() - c.logf("[v1] mapRoutine: %s", c.state) loggedIn := c.loggedIn + c.logf("[v1] mapRoutine: loggedIn=%v", loggedIn) ctx := c.mapCtx c.mu.Unlock() @@ -516,9 +506,6 @@ func (c *Auto) mapRoutine() { c.direct.health.SetOutOfPollNetMap() c.mu.Lock() c.inMapPoll = false - if c.state == StateSynchronized { - c.state = StateAuthenticated - } paused := c.paused c.mu.Unlock() @@ -584,12 +571,12 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM c.mu.Unlock() return } - state := c.state loggedIn := c.loggedIn inMapPoll := c.inMapPoll + loginGoal := c.loginGoal c.mu.Unlock() - c.logf("[v1] sendStatus: %s: %v", who, state) + c.logf("[v1] sendStatus: %s: loggedIn=%v inMapPoll=%v", who, loggedIn, inMapPoll) var p persist.PersistView if nm != nil && loggedIn && inMapPoll { @@ -600,11 +587,12 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM nm = nil } newSt := &Status{ - URL: url, - Persist: p, - NetMap: nm, - Err: err, - state: state, + URL: url, + Persist: p, + NetMap: nm, + Err: err, + LoggedIn: loggedIn && loginGoal == nil, + InMapPoll: inMapPoll, } if c.observer == nil { @@ -667,14 +655,15 @@ func canSkipStatus(s1, s2 *Status) bool { // we can't skip it. return false } - if s1.Err != nil || s1.URL != "" { - // If s1 has an error or a URL, we shouldn't skip it, lest the error go - // away in s2 or in-between. We want to make sure all the subsystems see - // it. Plus there aren't many of these, so not worth skipping. + if s1.Err != nil || s1.URL != "" || s1.LoggedIn { + // If s1 has an error, a URL, or LoginFinished set, we shouldn't skip it, + // lest the error go away in s2 or in-between. We want to make sure all + // the subsystems see it. Plus there aren't many of these, so not worth + // skipping. return false } - if !s1.Persist.Equals(s2.Persist) || s1.state != s2.state { - // If s1 has a different Persist or state than s2, + if !s1.Persist.Equals(s2.Persist) || s1.LoggedIn != s2.LoggedIn || s1.InMapPoll != s2.InMapPoll || s1.URL != s2.URL { + // If s1 has a different Persist, LoginFinished, Synced, or URL than s2, // don't skip it. We only care about skipping the typical // entries where the only difference is the NetMap. return false @@ -736,7 +725,6 @@ func (c *Auto) Logout(ctx context.Context) error { } c.mu.Lock() c.loggedIn = false - c.state = StateNotAuthenticated c.cancelAuthCtxLocked() c.cancelMapCtxLocked() c.mu.Unlock() diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 3914d10ef..bc3011226 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -15,7 +15,6 @@ import ( "net/netip" "net/url" "reflect" - "slices" "sync/atomic" "testing" "time" @@ -49,7 +48,7 @@ func fieldsOf(t reflect.Type) (fields []string) { func TestStatusEqual(t *testing.T) { // Verify that the Equal method stays in sync with reality - equalHandles := []string{"Err", "URL", "NetMap", "Persist", "state"} + equalHandles := []string{"Err", "URL", "LoggedIn", "InMapPoll", "NetMap", "Persist"} if have := fieldsOf(reflect.TypeFor[Status]()); !reflect.DeepEqual(have, equalHandles) { t.Errorf("Status.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, equalHandles) @@ -81,7 +80,7 @@ func TestStatusEqual(t *testing.T) { }, { &Status{}, - &Status{state: StateAuthenticated}, + &Status{LoggedIn: true, Persist: new(persist.Persist).View()}, false, }, } @@ -135,8 +134,20 @@ func TestCanSkipStatus(t *testing.T) { want: false, }, { - name: "s1-state-diff", - s1: &Status{state: 123, NetMap: nm1}, + name: "s1-login-finished-diff", + s1: &Status{LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-login-finished", + s1: &Status{LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-synced-diff", + s1: &Status{InMapPoll: true, LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, s2: &Status{NetMap: nm2}, want: false, }, @@ -167,10 +178,11 @@ func TestCanSkipStatus(t *testing.T) { }) } - want := []string{"Err", "URL", "NetMap", "Persist", "state"} - if f := fieldsOf(reflect.TypeFor[Status]()); !slices.Equal(f, want) { - t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want) + coveredFields := []string{"Err", "URL", "LoggedIn", "InMapPoll", "NetMap", "Persist"} + if have := fieldsOf(reflect.TypeFor[Status]()); !reflect.DeepEqual(have, coveredFields) { + t.Errorf("Status fields = %q; this code was only written to handle fields %q", have, coveredFields) } + } func TestRetryableErrors(t *testing.T) { diff --git a/control/controlclient/status.go b/control/controlclient/status.go index d0fdf80d7..65afb7a50 100644 --- a/control/controlclient/status.go +++ b/control/controlclient/status.go @@ -4,8 +4,6 @@ package controlclient import ( - "encoding/json" - "fmt" "reflect" "tailscale.com/types/netmap" @@ -13,57 +11,6 @@ import ( "tailscale.com/types/structs" ) -// State is the high-level state of the client. It is used only in -// unit tests for proper sequencing, don't depend on it anywhere else. -// -// TODO(apenwarr): eliminate the state, as it's now obsolete. -// -// apenwarr: Historical note: controlclient.Auto was originally -// intended to be the state machine for the whole tailscale client, but that -// turned out to not be the right abstraction layer, and it moved to -// ipn.Backend. Since ipn.Backend now has a state machine, it would be -// much better if controlclient could be a simple stateless API. But the -// current server-side API (two interlocking polling https calls) makes that -// very hard to implement. A server side API change could untangle this and -// remove all the statefulness. -type State int - -const ( - StateNew = State(iota) - StateNotAuthenticated - StateAuthenticating - StateURLVisitRequired - StateAuthenticated - StateSynchronized // connected and received map update -) - -func (s State) AppendText(b []byte) ([]byte, error) { - return append(b, s.String()...), nil -} - -func (s State) MarshalText() ([]byte, error) { - return []byte(s.String()), nil -} - -func (s State) String() string { - switch s { - case StateNew: - return "state:new" - case StateNotAuthenticated: - return "state:not-authenticated" - case StateAuthenticating: - return "state:authenticating" - case StateURLVisitRequired: - return "state:url-visit-required" - case StateAuthenticated: - return "state:authenticated" - case StateSynchronized: - return "state:synchronized" - default: - return fmt.Sprintf("state:unknown:%d", int(s)) - } -} - type Status struct { _ structs.Incomparable @@ -76,6 +23,14 @@ type Status struct { // URL, if non-empty, is the interactive URL to visit to finish logging in. URL string + // LoggedIn, if true, indicates that serveRegister has completed and no + // other login change is in progress. + LoggedIn bool + + // InMapPoll, if true, indicates that we've received at least one netmap + // and are connected to receive updates. + InMapPoll bool + // NetMap is the latest server-pushed state of the tailnet network. NetMap *netmap.NetworkMap @@ -83,26 +38,8 @@ type Status struct { // // TODO(bradfitz,maisem): clarify this. Persist persist.PersistView - - // state is the internal state. It should not be exposed outside this - // package, but we have some automated tests elsewhere that need to - // use it via the StateForTest accessor. - // TODO(apenwarr): Unexport or remove these. - state State } -// LoginFinished reports whether the controlclient is in its "StateAuthenticated" -// state where it's in a happy register state but not yet in a map poll. -// -// TODO(bradfitz): delete this and everything around Status.state. -func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated } - -// StateForTest returns the internal state of s for tests only. -func (s *Status) StateForTest() State { return s.state } - -// SetStateForTest sets the internal state of s for tests only. -func (s *Status) SetStateForTest(state State) { s.state = state } - // Equal reports whether s and s2 are equal. func (s *Status) Equal(s2 *Status) bool { if s == nil && s2 == nil { @@ -111,15 +48,8 @@ func (s *Status) Equal(s2 *Status) bool { return s != nil && s2 != nil && s.Err == s2.Err && s.URL == s2.URL && - s.state == s2.state && + s.LoggedIn == s2.LoggedIn && + s.InMapPoll == s2.InMapPoll && reflect.DeepEqual(s.Persist, s2.Persist) && reflect.DeepEqual(s.NetMap, s2.NetMap) } - -func (s Status) String() string { - b, err := json.MarshalIndent(s, "", "\t") - if err != nil { - panic(err) - } - return s.state.String() + " " + string(b) -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f0a77531b..41d110400 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1623,7 +1623,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.blockEngineUpdatesLocked(false) } - if st.LoginFinished() && (wasBlocked || authWasInProgress) { + if st.LoggedIn && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine b.blockEngineUpdatesLocked(false) @@ -1658,8 +1658,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefs.Persist = st.Persist.AsStruct() } } - if st.LoginFinished() { - if b.authURL != "" { + if st.LoggedIn { + if authWasInProgress { b.resetAuthURLLocked() // Interactive login finished successfully (URL visited). // After an interactive login, the user always wants diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 2197112b2..0c95ef4fc 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -206,9 +206,7 @@ func (cc *mockControl) send(opts sendOpt) { Err: err, } if loginFinished { - s.SetStateForTest(controlclient.StateAuthenticated) - } else if url == "" && err == nil && nm == nil { - s.SetStateForTest(controlclient.StateNotAuthenticated) + s.LoggedIn = true } cc.opts.Observer.SetControlClientStatus(cc, s) } @@ -228,7 +226,6 @@ func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { NetMap: nm, Persist: cc.persist.View(), } - s.SetStateForTest(controlclient.StateURLVisitRequired) cc.opts.Observer.SetControlClientStatus(cc, s) } @@ -434,8 +431,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { // for it, so it doesn't count as Prefs.LoggedOut==true. c.Assert(prefs.LoggedOut(), qt.IsTrue) c.Assert(prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login (prefs show logged out) + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify the actual facts about our state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Restart the state machine. @@ -455,8 +455,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify the actual facts about our state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Start non-interactive login with no token. @@ -473,7 +476,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { // (This behaviour is needed so that b.Login() won't // start connecting to an old account right away, if one // exists when you launch another login.) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need login + c.Assert(needsLogin(b), qt.IsTrue) } // Attempted non-interactive login with no key; indicate that @@ -500,10 +504,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we need URL visit + c.Assert(hasAuthURL(b), qt.IsTrue) c.Assert(nn[2].BrowseToURL, qt.IsNotNil) c.Assert(url1, qt.Equals, *nn[2].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + c.Assert(isFullyAuthenticated(b), qt.IsFalse) } // Now we'll try an interactive login. @@ -518,7 +523,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(nn[0].BrowseToURL, qt.IsNotNil) c.Assert(url1, qt.Equals, *nn[0].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need to complete login + c.Assert(needsLogin(b), qt.IsTrue) } // Sometimes users press the Login button again, in the middle of @@ -534,7 +540,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.drain(0) // backend asks control for another login sequence cc.assertCalls("Login") - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need login + c.Assert(needsLogin(b), qt.IsTrue) } // Provide a new interactive login URL. @@ -550,7 +557,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(1) c.Assert(nn[0].BrowseToURL, qt.IsNotNil) c.Assert(url2, qt.Equals, *nn[0].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need to complete login + c.Assert(needsLogin(b), qt.IsTrue) } // Pretend that the interactive login actually happened. @@ -582,10 +590,18 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user1") - c.Assert(ipn.NeedsMachineAuth, qt.Equals, *nn[2].State) - c.Assert(ipn.NeedsMachineAuth, qt.Equals, b.State()) + // nn[2] is a state notification after login + // Verify login finished but need machine auth using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(needsMachineAuth(b), qt.IsTrue) + nm := b.NetMap() + c.Assert(nm, qt.IsNotNil) + // For an empty netmap (after initial login), SelfNode may not be valid yet. + // In this case, we can't check MachineAuthorized, but needsMachineAuth already verified the state. + if nm.SelfNode.Valid() { + c.Assert(nm.SelfNode.MachineAuthorized(), qt.IsFalse) + } } // Pretend that the administrator has authorized our machine. @@ -603,8 +619,13 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + // nn[0] is a state notification after machine auth granted + c.Assert(len(nn), qt.Equals, 1) + // Verify machine authorized using backend state + nm := b.NetMap() + c.Assert(nm, qt.IsNotNil) + c.Assert(nm.SelfNode.Valid(), qt.IsTrue) + c.Assert(nm.SelfNode.MachineAuthorized(), qt.IsTrue) } // TODO: add a fake DERP server to our fake netmap, so we can @@ -627,9 +648,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification, nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) } // The user changes their preference to WantRunning after all. @@ -645,17 +666,12 @@ func runTestStateMachine(t *testing.T, seamless bool) { // BUG: Login isn't needed here. We never logged out. cc.assertCalls("Login", "unpause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification, nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) c.Assert(store.sawWrite(), qt.IsTrue) } - // undo the state hack above. - b.mu.Lock() - b.state = ipn.Starting - b.mu.Unlock() - // User wants to logout. store.awaitWrite() t.Logf("\n\nLogout") @@ -664,27 +680,26 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(5) previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown") + // nn[0] is state notification (Stopped) c.Assert(nn[0].State, qt.IsNotNil) c.Assert(*nn[0].State, qt.Equals, ipn.Stopped) - + // nn[1] is prefs notification after logout c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) cc.assertCalls("New") - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(*nn[2].State, qt.Equals, ipn.NoState) - - c.Assert(nn[3].Prefs, qt.IsNotNil) // emptyPrefs + // nn[2] is the initial state notification after New (NoState) + // nn[3] is prefs notification with emptyPrefs + c.Assert(nn[3].Prefs, qt.IsNotNil) c.Assert(nn[3].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[3].Prefs.WantRunning(), qt.IsFalse) - c.Assert(nn[4].State, qt.IsNotNil) - c.Assert(*nn[4].State, qt.Equals, ipn.NeedsLogin) - - c.Assert(b.State(), qt.Equals, ipn.NeedsLogin) - c.Assert(store.sawWrite(), qt.IsTrue) + // nn[4] is state notification (NeedsLogin) + // Verify logged out and needs new login using backend state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // A second logout should be a no-op as we are in the NeedsLogin state. @@ -696,7 +711,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(b.Prefs().LoggedOut(), qt.IsTrue) c.Assert(b.Prefs().WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify still needs login + c.Assert(needsLogin(b), qt.IsTrue) } // A third logout should also be a no-op as the cc should be in @@ -709,7 +725,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(b.Prefs().LoggedOut(), qt.IsTrue) c.Assert(b.Prefs().WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify still needs login + c.Assert(needsLogin(b), qt.IsTrue) } // Oh, you thought we were done? Ha! Now we have to test what @@ -732,11 +749,13 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls() c.Assert(nn[0].Prefs, qt.IsNotNil) - c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify we need login after restart + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Explicitly set the ControlURL to avoid defaulting to [ipn.DefaultControlURL]. @@ -787,8 +806,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) // If a user initiates an interactive login, they also expect WantRunning to become true. c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[2].State) + // nn[2] is state notification (Starting) - verify using backend state + c.Assert(isWantRunning(b), qt.IsTrue) + c.Assert(isLoggedIn(b), qt.IsTrue) } // Now we've logged in successfully. Let's disconnect. @@ -802,9 +822,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Stopped), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) } @@ -822,10 +842,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { // and WantRunning is false, so cc should be paused. cc.assertCalls("New", "Login", "pause") c.Assert(nn[0].Prefs, qt.IsNotNil) - c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) - c.Assert(*nn[1].State, qt.Equals, ipn.Stopped) + // nn[1] is state notification (Stopped) + // Verify backend shows we're not wanting to run + c.Assert(isWantRunning(b), qt.IsFalse) } // When logged in but !WantRunning, ipn leaves us unpaused to retrieve @@ -863,9 +884,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("Login", "unpause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Starting), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) } // Disconnect. @@ -879,9 +900,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Stopped), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) } // We want to try logging in as a different user, while Stopped. @@ -926,12 +947,13 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls("unpause") c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user3") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) - c.Assert(ipn.Starting, qt.Equals, *nn[2].State) + // nn[2] is state notification (Starting) - verify using backend state + c.Assert(isWantRunning(b), qt.IsTrue) + c.Assert(isLoggedIn(b), qt.IsTrue) } // The last test case is the most common one: restarting when both @@ -950,11 +972,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[0].Prefs, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[0].Prefs.WantRunning(), qt.IsTrue) - // We're logged in and have a valid netmap, so we should - // be in the Starting state. - c.Assert(nn[1].State, qt.IsNotNil) - c.Assert(*nn[1].State, qt.Equals, ipn.Starting) - c.Assert(b.State(), qt.Equals, ipn.Starting) + // nn[1] is state notification (Starting) + // Verify we're authenticated with valid netmap using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) } // Control server accepts our valid key from before. @@ -971,7 +992,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // NOTE: No prefs change this time. WantRunning stays true. // We were in Starting in the first place, so that doesn't // change either, so we don't expect any notifications. - c.Assert(ipn.Starting, qt.Equals, b.State()) + // Verify we're still authenticated with valid netmap + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) } t.Logf("\n\nExpireKey") notifies.expect(1) @@ -982,9 +1005,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[0].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // nn[0] is state notification (NeedsLogin) due to key expiry + c.Assert(len(nn), qt.Equals, 1) + // Verify key expired, need new login using backend state + c.Assert(needsLogin(b), qt.IsTrue) c.Assert(b.isEngineBlocked(), qt.IsTrue) } @@ -997,9 +1021,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) - c.Assert(ipn.Starting, qt.Equals, b.State()) + // nn[0] is state notification (Starting) after key extension + c.Assert(len(nn), qt.Equals, 1) + // Verify key extended, authenticated again using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) c.Assert(b.isEngineBlocked(), qt.IsFalse) } notifies.expect(1) @@ -1008,9 +1034,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Running, qt.Equals, *nn[0].State) - c.Assert(ipn.Running, qt.Equals, b.State()) + // nn[0] is state notification (Running) after DERP connection + c.Assert(len(nn), qt.Equals, 1) + // Verify we can route traffic using backend state + c.Assert(canRouteTraffic(b), qt.IsTrue) } } @@ -1901,3 +1928,77 @@ func (e *mockEngine) Close() { func (e *mockEngine) Done() <-chan struct{} { return e.done } + +// hasValidNetMap returns true if the backend has a valid network map with a valid self node. +func hasValidNetMap(b *LocalBackend) bool { + nm := b.NetMap() + return nm != nil && nm.SelfNode.Valid() +} + +// needsLogin returns true if the backend needs user login action. +// This is true when logged out, when an auth URL is present (interactive login in progress), +// or when the node key has expired. +func needsLogin(b *LocalBackend) bool { + // Note: b.Prefs() handles its own locking, so we lock only for authURL and keyExpired access + b.mu.Lock() + authURL := b.authURL + keyExpired := b.keyExpired + b.mu.Unlock() + return b.Prefs().LoggedOut() || authURL != "" || keyExpired +} + +// needsMachineAuth returns true if the user has logged in but the machine is not yet authorized. +// This includes the case where we have a netmap but no valid SelfNode yet (empty netmap after initial login). +func needsMachineAuth(b *LocalBackend) bool { + // Note: b.NetMap() and b.Prefs() handle their own locking + nm := b.NetMap() + prefs := b.Prefs() + if prefs.LoggedOut() || nm == nil { + return false + } + // If we have a valid SelfNode, check its MachineAuthorized status + if nm.SelfNode.Valid() { + return !nm.SelfNode.MachineAuthorized() + } + // Empty netmap (no SelfNode yet) after login also means we need machine auth + return true +} + +// hasAuthURL returns true if an authentication URL is present (user needs to visit a URL). +func hasAuthURL(b *LocalBackend) bool { + b.mu.Lock() + authURL := b.authURL + b.mu.Unlock() + return authURL != "" +} + +// canRouteTraffic returns true if the backend is capable of routing traffic. +// This requires a valid netmap, machine authorization, and WantRunning preference. +func canRouteTraffic(b *LocalBackend) bool { + // Note: b.NetMap() and b.Prefs() handle their own locking + nm := b.NetMap() + prefs := b.Prefs() + return nm != nil && + nm.SelfNode.Valid() && + nm.SelfNode.MachineAuthorized() && + prefs.WantRunning() +} + +// isFullyAuthenticated returns true if the user has completed login and no auth URL is pending. +func isFullyAuthenticated(b *LocalBackend) bool { + // Note: b.Prefs() handles its own locking, so we lock only for authURL access + b.mu.Lock() + authURL := b.authURL + b.mu.Unlock() + return !b.Prefs().LoggedOut() && authURL == "" +} + +// isWantRunning returns true if the WantRunning preference is set. +func isWantRunning(b *LocalBackend) bool { + return b.Prefs().WantRunning() +} + +// isLoggedIn returns true if the user is logged in (not logged out). +func isLoggedIn(b *LocalBackend) bool { + return !b.Prefs().LoggedOut() +} From e1f0ad7a0516d056d27c383c51effa90f1c11d2e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 14 Nov 2025 19:43:44 -0800 Subject: [PATCH 1663/1708] net/udprelay: implement Server.SetStaticAddrPorts (#17909) Only used in tests for now. Updates tailscale/corp#31489 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 32 ++--------- feature/relayserver/relayserver_test.go | 5 +- net/udprelay/server.go | 75 ++++++++++++++----------- net/udprelay/server_test.go | 24 +++++--- 4 files changed, 64 insertions(+), 72 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 868d5f61a..cfa372bd7 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -8,14 +8,10 @@ package relayserver import ( "encoding/json" "fmt" - "log" "net/http" - "net/netip" - "strings" "sync" "tailscale.com/disco" - "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -71,8 +67,8 @@ func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r * // imported. func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { e := &extension{ - newServerFn: func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { - return udprelay.NewServer(logf, port, overrideAddrs) + newServerFn: func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { + return udprelay.NewServer(logf, port, onlyStaticAddrPorts) }, logf: logger.WithPrefix(logf, featureName+": "), } @@ -94,7 +90,7 @@ type relayServer interface { // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { - newServerFn func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) // swappable for tests + newServerFn func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) // swappable for tests logf logger.Logf ec *eventbus.Client respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] @@ -170,7 +166,7 @@ func (e *extension) onAllocReq(req magicsock.UDPRelayAllocReq) { } func (e *extension) tryStartRelayServerLocked() { - rs, err := e.newServerFn(e.logf, *e.port, overrideAddrs()) + rs, err := e.newServerFn(e.logf, *e.port, false) if err != nil { e.logf("error initializing server: %v", err) return @@ -217,26 +213,6 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.handleRelayServerLifetimeLocked() } -// overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It -// can be between 0 and 3 comma-separated Addrs. TS_DEBUG_RELAY_SERVER_ADDRS is -// not a stable interface, and is subject to change. -var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { - all := envknob.String("TS_DEBUG_RELAY_SERVER_ADDRS") - const max = 3 - remain := all - for remain != "" && len(ret) < max { - var s string - s, remain, _ = strings.Cut(remain, ",") - addr, err := netip.ParseAddr(s) - if err != nil { - log.Printf("ignoring invalid Addr %q in TS_DEBUG_RELAY_SERVER_ADDRS %q: %v", s, all, err) - continue - } - ret = append(ret, addr) - } - return -}) - func (e *extension) stopRelayServerLocked() { if e.rs != nil { e.rs.Close() diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 2184b5175..3d71c55d7 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -5,7 +5,6 @@ package relayserver import ( "errors" - "net/netip" "reflect" "testing" @@ -157,7 +156,7 @@ func Test_extension_profileStateChanged(t *testing.T) { t.Fatal(err) } e := ipne.(*extension) - e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + e.newServerFn = func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { return &mockRelayServer{}, nil } e.port = tt.fields.port @@ -289,7 +288,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { t.Fatal(err) } e := ipne.(*extension) - e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + e.newServerFn = func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { return &mockRelayServer{}, nil } e.shutdown = tt.shutdown diff --git a/net/udprelay/server.go b/net/udprelay/server.go index c050c9416..7138cec7a 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -36,6 +36,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/nettype" + "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -72,15 +73,16 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields - derpMap *tailcfg.DERPMap - addrDiscoveryOnce bool // addrDiscovery completed once (successfully or unsuccessfully) - addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints - closed bool - lamportID uint64 - nextVNI uint32 - byVNI map[uint32]*serverEndpoint - byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint + mu sync.Mutex // guards the following fields + derpMap *tailcfg.DERPMap + onlyStaticAddrPorts bool // no dynamic addr port discovery when set + staticAddrPorts views.Slice[netip.AddrPort] // static ip:port pairs set with [Server.SetStaticAddrPorts] + dynamicAddrPorts []netip.AddrPort // dynamically discovered ip:port pairs + closed bool + lamportID uint64 + nextVNI uint32 + byVNI map[uint32]*serverEndpoint + byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } const ( @@ -278,15 +280,17 @@ func (e *serverEndpoint) isBound() bool { // NewServer constructs a [Server] listening on port. If port is zero, then // port selection is left up to the host networking stack. If -// len(overrideAddrs) > 0 these will be used in place of dynamic discovery, -// which is useful to override in tests. -func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, err error) { +// onlyStaticAddrPorts is true, then dynamic addr:port discovery will be +// disabled, and only addr:port's set via [Server.SetStaticAddrPorts] will be +// used. +func NewServer(logf logger.Logf, port int, onlyStaticAddrPorts bool) (s *Server, err error) { s = &Server{ logf: logf, disco: key.NewDisco(), bindLifetime: defaultBindLifetime, steadyStateLifetime: defaultSteadyStateLifetime, closeCh: make(chan struct{}), + onlyStaticAddrPorts: onlyStaticAddrPorts, byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), nextVNI: minVNI, byVNI: make(map[uint32]*serverEndpoint), @@ -321,19 +325,7 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve return nil, err } - if len(overrideAddrs) > 0 { - addrPorts := make(set.Set[netip.AddrPort], len(overrideAddrs)) - for _, addr := range overrideAddrs { - if addr.IsValid() { - if addr.Is4() { - addrPorts.Add(netip.AddrPortFrom(addr, s.uc4Port)) - } else if s.uc6 != nil { - addrPorts.Add(netip.AddrPortFrom(addr, s.uc6Port)) - } - } - } - s.addrPorts = addrPorts.Slice() - } else { + if !s.onlyStaticAddrPorts { s.wg.Add(1) go s.addrDiscoveryLoop() } @@ -429,8 +421,7 @@ func (s *Server) addrDiscoveryLoop() { s.logf("error discovering IP:port candidates: %v", err) } s.mu.Lock() - s.addrPorts = addrPorts - s.addrDiscoveryOnce = true + s.dynamicAddrPorts = addrPorts s.mu.Unlock() case <-s.closeCh: return @@ -747,6 +738,15 @@ func (s *Server) getNextVNILocked() (uint32, error) { return 0, errors.New("VNI pool exhausted") } +// getAllAddrPortsCopyLocked returns a copy of the combined +// [Server.staticAddrPorts] and [Server.dynamicAddrPorts] slices. +func (s *Server) getAllAddrPortsCopyLocked() []netip.AddrPort { + addrPorts := make([]netip.AddrPort, 0, len(s.dynamicAddrPorts)+s.staticAddrPorts.Len()) + addrPorts = append(addrPorts, s.staticAddrPorts.AsSlice()...) + addrPorts = append(addrPorts, slices.Clone(s.dynamicAddrPorts)...) + return addrPorts +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns @@ -760,11 +760,8 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{}, ErrServerClosed } - if len(s.addrPorts) == 0 { - if !s.addrDiscoveryOnce { - return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} - } - return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") + if s.staticAddrPorts.Len() == 0 && len(s.dynamicAddrPorts) == 0 { + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} } if discoA.Compare(s.discoPublic) == 0 || discoB.Compare(s.discoPublic) == 0 { @@ -787,7 +784,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv // consider storing them (maybe interning) in the [*serverEndpoint] // at allocation time. ClientDisco: pair.Get(), - AddrPorts: slices.Clone(s.addrPorts), + AddrPorts: s.getAllAddrPortsCopyLocked(), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, @@ -817,7 +814,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, ClientDisco: pair.Get(), - AddrPorts: slices.Clone(s.addrPorts), + AddrPorts: s.getAllAddrPortsCopyLocked(), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, @@ -880,3 +877,13 @@ func (s *Server) getDERPMap() *tailcfg.DERPMap { defer s.mu.Unlock() return s.derpMap } + +// SetStaticAddrPorts sets addr:port pairs the [Server] will advertise +// as candidates it is potentially reachable over, in combination with +// dynamically discovered pairs. This replaces any previously-provided static +// values. +func (s *Server) SetStaticAddrPorts(addrPorts views.Slice[netip.AddrPort]) { + s.mu.Lock() + defer s.mu.Unlock() + s.staticAddrPorts = addrPorts +} diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index bf7f0a9b5..6c3d61658 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -17,6 +17,7 @@ import ( "tailscale.com/disco" "tailscale.com/net/packet" "tailscale.com/types/key" + "tailscale.com/types/views" ) type testClient struct { @@ -185,31 +186,40 @@ func TestServer(t *testing.T) { cases := []struct { name string - overrideAddrs []netip.Addr + staticAddrs []netip.Addr forceClientsMixedAF bool }{ { - name: "over ipv4", - overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, + name: "over ipv4", + staticAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, }, { - name: "over ipv6", - overrideAddrs: []netip.Addr{netip.MustParseAddr("::1")}, + name: "over ipv6", + staticAddrs: []netip.Addr{netip.MustParseAddr("::1")}, }, { name: "mixed address families", - overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, + staticAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, forceClientsMixedAF: true, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - server, err := NewServer(t.Logf, 0, tt.overrideAddrs) + server, err := NewServer(t.Logf, 0, true) if err != nil { t.Fatal(err) } defer server.Close() + addrPorts := make([]netip.AddrPort, 0, len(tt.staticAddrs)) + for _, addr := range tt.staticAddrs { + if addr.Is4() { + addrPorts = append(addrPorts, netip.AddrPortFrom(addr, server.uc4Port)) + } else if server.uc6Port != 0 { + addrPorts = append(addrPorts, netip.AddrPortFrom(addr, server.uc6Port)) + } + } + server.SetStaticAddrPorts(views.SliceOf(addrPorts)) endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) if err != nil { From 8444659ed8eafe501485396696f78d3eddf72ef4 Mon Sep 17 00:00:00 2001 From: Xinyu Kuo Date: Fri, 17 Oct 2025 13:47:13 +0800 Subject: [PATCH 1664/1708] cmd/tailscale/cli: fix panic in netcheck with mismatched DERP region IDs Fixes #17564 Signed-off-by: Xinyu Kuo --- cmd/tailscale/cli/netcheck.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 5ae8db8fa..a8a8992f5 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -180,7 +180,11 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { printf("\t* Nearest DERP: unknown (no response to latency probes)\n") } else { if report.PreferredDERP != 0 { - printf("\t* Nearest DERP: %v\n", dm.Regions[report.PreferredDERP].RegionName) + if region, ok := dm.Regions[report.PreferredDERP]; ok { + printf("\t* Nearest DERP: %v\n", region.RegionName) + } else { + printf("\t* Nearest DERP: %v (region not found in map)\n", report.PreferredDERP) + } } else { printf("\t* Nearest DERP: [none]\n") } From 8aa46a395631f49b98104eea2b13432f1a196375 Mon Sep 17 00:00:00 2001 From: Avery Palmer Date: Sat, 15 Nov 2025 17:15:14 +0000 Subject: [PATCH 1665/1708] util/clientmetric: fix regression causing Metric.v to be uninitialised m.v was uninitialised when Tailscale built with ts_omit_logtail Fixes #17918 Signed-off-by: Avery Palmer --- util/clientmetric/clientmetric.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 65223e6a9..9e6b03a15 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -133,15 +133,18 @@ func (m *Metric) Publish() { metrics[m.name] = m sortedDirty = true + if m.f == nil { + if len(valFreeList) == 0 { + valFreeList = make([]int64, 256) + } + m.v = &valFreeList[0] + valFreeList = valFreeList[1:] + } + if buildfeatures.HasLogTail { if m.f != nil { lastLogVal = append(lastLogVal, scanEntry{f: m.f}) } else { - if len(valFreeList) == 0 { - valFreeList = make([]int64, 256) - } - m.v = &valFreeList[0] - valFreeList = valFreeList[1:] lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } } From 4e01e8a66ec6ae1d5ebecc60ad12b26ce300c860 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 10:01:44 -0800 Subject: [PATCH 1666/1708] wgengine/netlog: fix send to closed channel in test Fixes #17922 Change-Id: I2cd600b0ecda389079f2004985ac9a25ffbbfdd1 Signed-off-by: Brad Fitzpatrick --- wgengine/netlog/netlog_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wgengine/netlog/netlog_test.go b/wgengine/netlog/netlog_test.go index ed9f672bf..b4758c7ec 100644 --- a/wgengine/netlog/netlog_test.go +++ b/wgengine/netlog/netlog_test.go @@ -182,6 +182,7 @@ func TestUpdateRace(t *testing.T) { group.Wait() logger.mu.Lock() close(logger.recordsChan) + logger.recordsChan = nil logger.mu.Unlock() } From 98aadbaf548dfd55523c89c49f60ad1aed4ccb6b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 09:49:08 -0800 Subject: [PATCH 1667/1708] util/cache: remove unused code Updates #cleanup Change-Id: I9be7029c5d2a7d6297125d0147e93205a7c68989 Signed-off-by: Brad Fitzpatrick --- util/cache/cache_test.go | 199 --------------------------------------- util/cache/interface.go | 40 -------- util/cache/locking.go | 43 --------- util/cache/none.go | 23 ----- util/cache/single.go | 81 ---------------- 5 files changed, 386 deletions(-) delete mode 100644 util/cache/cache_test.go delete mode 100644 util/cache/interface.go delete mode 100644 util/cache/locking.go delete mode 100644 util/cache/none.go delete mode 100644 util/cache/single.go diff --git a/util/cache/cache_test.go b/util/cache/cache_test.go deleted file mode 100644 index a6683e12d..000000000 --- a/util/cache/cache_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import ( - "errors" - "testing" - "time" -) - -var startTime = time.Date(2023, time.March, 1, 0, 0, 0, 0, time.UTC) - -func TestSingleCache(t *testing.T) { - testTime := startTime - timeNow := func() time.Time { return testTime } - c := &Single[string, int]{ - timeNow: timeNow, - } - - t.Run("NoServeExpired", func(t *testing.T) { - testCacheImpl(t, c, &testTime, false) - }) - - t.Run("ServeExpired", func(t *testing.T) { - c.Empty() - c.ServeExpired = true - testTime = startTime - testCacheImpl(t, c, &testTime, true) - }) -} - -func TestLocking(t *testing.T) { - testTime := startTime - timeNow := func() time.Time { return testTime } - c := NewLocking(&Single[string, int]{ - timeNow: timeNow, - }) - - // Just verify that the inner cache's behaviour hasn't changed. - testCacheImpl(t, c, &testTime, false) -} - -func testCacheImpl(t *testing.T, c Cache[string, int], testTime *time.Time, serveExpired bool) { - var fillTime time.Time - t.Run("InitialFill", func(t *testing.T) { - fillTime = testTime.Add(time.Hour) - val, err := c.Get("key", func() (int, time.Time, error) { - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - }) - - // Fetching again won't call our fill function - t.Run("SecondFetch", func(t *testing.T) { - *testTime = fillTime.Add(-1 * time.Second) - called := false - val, err := c.Get("key", func() (int, time.Time, error) { - called = true - return -1, fillTime, nil - }) - if called { - t.Fatal("wanted no call to fill function") - } - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - }) - - // Fetching after the expiry time will re-fill - t.Run("ReFill", func(t *testing.T) { - *testTime = fillTime.Add(1) - fillTime = fillTime.Add(time.Hour) - val, err := c.Get("key", func() (int, time.Time, error) { - return 999, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 999 { - t.Fatalf("got val=%d; want 999", val) - } - }) - - // An error on fetch will serve the expired value. - t.Run("FetchError", func(t *testing.T) { - if !serveExpired { - t.Skipf("not testing ServeExpired") - } - - *testTime = fillTime.Add(time.Hour + 1) - val, err := c.Get("key", func() (int, time.Time, error) { - return 0, time.Time{}, errors.New("some error") - }) - if err != nil { - t.Fatal(err) - } - if val != 999 { - t.Fatalf("got val=%d; want 999", val) - } - }) - - // Fetching a different key re-fills - t.Run("DifferentKey", func(t *testing.T) { - *testTime = fillTime.Add(time.Hour + 1) - - var calls int - val, err := c.Get("key1", func() (int, time.Time, error) { - calls++ - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - if calls != 1 { - t.Errorf("got %d, want 1 call", calls) - } - - val, err = c.Get("key2", func() (int, time.Time, error) { - calls++ - return 456, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 456 { - t.Fatalf("got val=%d; want 456", val) - } - if calls != 2 { - t.Errorf("got %d, want 2 call", calls) - } - }) - - // Calling Forget with the wrong key does nothing, and with the correct - // key will drop the cache. - t.Run("Forget", func(t *testing.T) { - // Add some time so that previously-cached values don't matter. - fillTime = testTime.Add(2 * time.Hour) - *testTime = fillTime.Add(-1 * time.Second) - - const key = "key" - - var calls int - val, err := c.Get(key, func() (int, time.Time, error) { - calls++ - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - if calls != 1 { - t.Errorf("got %d, want 1 call", calls) - } - - // Forgetting the wrong key does nothing - c.Forget("other") - val, err = c.Get(key, func() (int, time.Time, error) { - t.Fatal("should not be called") - panic("unreachable") - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - - // Forgetting the correct key re-fills - c.Forget(key) - - val, err = c.Get("key2", func() (int, time.Time, error) { - calls++ - return 456, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 456 { - t.Fatalf("got val=%d; want 456", val) - } - if calls != 2 { - t.Errorf("got %d, want 2 call", calls) - } - }) -} diff --git a/util/cache/interface.go b/util/cache/interface.go deleted file mode 100644 index 0db87ba0e..000000000 --- a/util/cache/interface.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package cache contains an interface for a cache around a typed value, and -// various cache implementations that implement that interface. -package cache - -import "time" - -// Cache is the interface for the cache types in this package. -// -// Functions in this interface take a key parameter, but it is valid for a -// cache type to hold a single value associated with a key, and simply drop the -// cached value if provided with a different key. -// -// It is valid for Cache implementations to be concurrency-safe or not, and -// each implementation should document this. If you need a concurrency-safe -// cache, an existing cache can be wrapped with a lock using NewLocking(inner). -// -// K and V should be types that can be successfully passed to json.Marshal. -type Cache[K comparable, V any] interface { - // Get should return a previously-cached value or call the provided - // FillFunc to obtain a new one. The provided key can be used either to - // allow multiple cached values, or to drop the cache if the key - // changes; either is valid. - Get(K, FillFunc[V]) (V, error) - - // Forget should remove the given key from the cache, if it is present. - // If it is not present, nothing should be done. - Forget(K) - - // Empty should empty the cache such that the next call to Get should - // call the provided FillFunc for all possible keys. - Empty() -} - -// FillFunc is the signature of a function for filling a cache. It should -// return the value to be cached, the time that the cached value is valid -// until, or an error. -type FillFunc[T any] func() (T, time.Time, error) diff --git a/util/cache/locking.go b/util/cache/locking.go deleted file mode 100644 index 85e44b360..000000000 --- a/util/cache/locking.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import "sync" - -// Locking wraps an inner Cache implementation with a mutex, making it -// safe for concurrent use. All methods are serialized on the same mutex. -type Locking[K comparable, V any, C Cache[K, V]] struct { - sync.Mutex - inner C -} - -// NewLocking creates a new Locking cache wrapping inner. -func NewLocking[K comparable, V any, C Cache[K, V]](inner C) *Locking[K, V, C] { - return &Locking[K, V, C]{inner: inner} -} - -// Get implements Cache. -// -// The cache's mutex is held for the entire duration of this function, -// including while the FillFunc is being called. This function is not -// reentrant; attempting to call Get from a FillFunc will deadlock. -func (c *Locking[K, V, C]) Get(key K, f FillFunc[V]) (V, error) { - c.Lock() - defer c.Unlock() - return c.inner.Get(key, f) -} - -// Forget implements Cache. -func (c *Locking[K, V, C]) Forget(key K) { - c.Lock() - defer c.Unlock() - c.inner.Forget(key) -} - -// Empty implements Cache. -func (c *Locking[K, V, C]) Empty() { - c.Lock() - defer c.Unlock() - c.inner.Empty() -} diff --git a/util/cache/none.go b/util/cache/none.go deleted file mode 100644 index c4073e0d9..000000000 --- a/util/cache/none.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -// None provides no caching and always calls the provided FillFunc. -// -// It is safe for concurrent use if the underlying FillFunc is. -type None[K comparable, V any] struct{} - -var _ Cache[int, int] = None[int, int]{} - -// Get always calls the provided FillFunc and returns what it does. -func (c None[K, V]) Get(_ K, f FillFunc[V]) (V, error) { - v, _, e := f() - return v, e -} - -// Forget implements Cache. -func (None[K, V]) Forget(K) {} - -// Empty implements Cache. -func (None[K, V]) Empty() {} diff --git a/util/cache/single.go b/util/cache/single.go deleted file mode 100644 index 6b9ac2c11..000000000 --- a/util/cache/single.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import ( - "time" -) - -// Single is a simple in-memory cache that stores a single value until a -// defined time before it is re-fetched. It also supports returning a -// previously-expired value if refreshing the value in the cache fails. -// -// Single is not safe for concurrent use. -type Single[K comparable, V any] struct { - key K - val V - goodUntil time.Time - timeNow func() time.Time // for tests - - // ServeExpired indicates that if an error occurs when filling the - // cache, an expired value can be returned instead of an error. - // - // This value should only be set when this struct is created. - ServeExpired bool -} - -var _ Cache[int, int] = (*Single[int, int])(nil) - -// Get will return the cached value, if any, or fill the cache by calling f and -// return the corresponding value. If f returns an error and c.ServeExpired is -// true, then a previous expired value can be returned with no error. -func (c *Single[K, V]) Get(key K, f FillFunc[V]) (V, error) { - var now time.Time - if c.timeNow != nil { - now = c.timeNow() - } else { - now = time.Now() - } - - if c.key == key && now.Before(c.goodUntil) { - return c.val, nil - } - - // Re-fill cached entry - val, until, err := f() - if err == nil { - c.key = key - c.val = val - c.goodUntil = until - return val, nil - } - - // Never serve an expired entry for the wrong key. - if c.key == key && c.ServeExpired && !c.goodUntil.IsZero() { - return c.val, nil - } - - var zero V - return zero, err -} - -// Forget implements Cache. -func (c *Single[K, V]) Forget(key K) { - if c.key != key { - return - } - - c.Empty() -} - -// Empty implements Cache. -func (c *Single[K, V]) Empty() { - c.goodUntil = time.Time{} - - var zeroKey K - c.key = zeroKey - - var zeroVal V - c.val = zeroVal -} From 653d0738f9afd9ee4785eff06c4a1908a4e6eaaf Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 13:28:29 -0800 Subject: [PATCH 1668/1708] types/netmap: remove PrivateKey from NetworkMap It's an unnecessary nuisance having it. We go out of our way to redact it in so many places when we don't even need it there anyway. Updates #12639 Change-Id: I5fc72e19e9cf36caeb42cf80ba430873f67167c3 Signed-off-by: Brad Fitzpatrick --- client/systray/systray.go | 2 +- cmd/sniproxy/sniproxy.go | 2 +- cmd/stund/depaware.txt | 5 +- cmd/tailscale/cli/debug.go | 21 +- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tsidp/tsidp.go | 2 +- control/controlclient/map.go | 1 - ipn/backend.go | 2 +- ipn/ipnlocal/c2n.go | 1 - ipn/ipnlocal/c2n_test.go | 345 +------------------------ ipn/ipnlocal/local.go | 43 +-- ipn/ipnlocal/local_test.go | 104 ++++++++ ipn/ipnlocal/profiles.go | 2 + ipn/localapi/localapi.go | 8 - ipn/localapi/localapi_test.go | 19 +- tsconsensus/monitor.go | 4 +- tsnet/tsnet.go | 2 +- tstest/integration/integration_test.go | 6 - tstest/typewalk/typewalk.go | 106 ++++++++ types/key/util.go | 18 ++ types/netmap/netmap.go | 9 +- types/netmap/netmap_test.go | 9 + wgengine/bench/wg.go | 10 +- wgengine/magicsock/magicsock_test.go | 26 +- wgengine/wgcfg/nmcfg/nmcfg.go | 5 +- 25 files changed, 292 insertions(+), 462 deletions(-) create mode 100644 tstest/typewalk/typewalk.go diff --git a/client/systray/systray.go b/client/systray/systray.go index 518b2e989..bc099a1ec 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -512,7 +512,7 @@ func (menu *Menu) watchIPNBus() { } func (menu *Menu) watchIPNBusInner() error { - watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, ipn.NotifyNoPrivateKeys) + watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, 0) if err != nil { return fmt.Errorf("watching ipn bus: %w", err) } diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index c020b4a1f..2115c8095 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -141,7 +141,7 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro // in the netmap. // We set the NotifyInitialNetMap flag so we will always get woken with the // current netmap, before only being woken on changes. - bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap|ipn.NotifyNoPrivateKeys) + bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap) if err != nil { log.Fatalf("watching IPN bus: %v", err) } diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index bd8eebb7b..7b3d05f94 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -82,8 +82,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/mak from tailscale.com/syncs+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb + tailscale.com/util/set from tailscale.com/types/key tailscale.com/util/slicesx from tailscale.com/tailcfg - tailscale.com/util/testenv from tailscale.com/types/logger + tailscale.com/util/testenv from tailscale.com/types/logger+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob @@ -94,7 +95,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 2836ae298..ffed51a63 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -258,7 +258,6 @@ func debugCmd() *ffcli.Command { fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") - fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") return fs })(), @@ -270,7 +269,6 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print the current network map", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("netmap") - fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") return fs })(), }, @@ -614,11 +612,10 @@ func runPrefs(ctx context.Context, args []string) error { } var watchIPNArgs struct { - netmap bool - initial bool - showPrivateKey bool - rateLimit bool - count int + netmap bool + initial bool + rateLimit bool + count int } func runWatchIPN(ctx context.Context, args []string) error { @@ -626,9 +623,6 @@ func runWatchIPN(ctx context.Context, args []string) error { if watchIPNArgs.initial { mask = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap } - if !watchIPNArgs.showPrivateKey { - mask |= ipn.NotifyNoPrivateKeys - } if watchIPNArgs.rateLimit { mask |= ipn.NotifyRateLimit } @@ -652,18 +646,11 @@ func runWatchIPN(ctx context.Context, args []string) error { return nil } -var netmapArgs struct { - showPrivateKey bool -} - func runNetmap(ctx context.Context, args []string) error { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap - if !netmapArgs.showPrivateKey { - mask |= ipn.NotifyNoPrivateKeys - } watcher, err := localClient.WatchIPNBus(ctx, mask) if err != nil { return err diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index e194b1e10..1ce14cf09 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -475,7 +475,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { // if foreground mode, create a WatchIPNBus session // and use the nested config for all following operations // TODO(marwan-at-work): nested-config validations should happen here or previous to this point. - watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return err } diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index c02b09745..7093ab9ee 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -287,7 +287,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. // We watch the IPN bus just to get a session ID. The session expires // when we stop watching the bus, and that auto-deletes the foreground // serve/funnel configs we are creating below. - watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return nil, nil, fmt.Errorf("could not set up ipn bus watcher: %v", err) } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index eafdb2d56..a9db25517 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -867,7 +867,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { nm := &netmap.NetworkMap{ NodeKey: ms.publicNodeKey, - PrivateKey: ms.privateNodeKey, MachineKey: ms.machinePubKey, Peers: peerViews, UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfileView), diff --git a/ipn/backend.go b/ipn/backend.go index 91cf81ca5..b4ba958c5 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -74,7 +74,7 @@ const ( NotifyInitialPrefs NotifyWatchOpt = 1 << 2 // if set, the first Notify message (sent immediately) will contain the current Prefs NotifyInitialNetMap NotifyWatchOpt = 1 << 3 // if set, the first Notify message (sent immediately) will contain the current NetMap - NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // if set, private keys that would normally be sent in updates are zeroed out + NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // (no-op) it used to redact private keys; now they always are and this does nothing NotifyInitialDriveShares NotifyWatchOpt = 1 << 5 // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares NotifyInitialOutgoingFiles NotifyWatchOpt = 1 << 6 // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 0c228060f..b5e722b97 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -179,7 +179,6 @@ func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Reques } field.SetZero() } - nm, _ = redactNetmapPrivateKeys(nm) return json.Marshal(nm) } diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 877d102d0..420633c87 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -13,21 +13,17 @@ import ( "os" "path/filepath" "reflect" - "strings" "testing" "time" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" - "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/must" - "tailscale.com/util/set" - "tailscale.com/wgengine/filter/filtertype" gcmp "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -144,338 +140,6 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } -// eachStructField calls cb for each struct field in struct type tp, recursively. -func eachStructField(tp reflect.Type, cb func(reflect.Type, reflect.StructField)) { - if !strings.HasPrefix(tp.PkgPath(), "tailscale.com/") { - // Stop traversing when we reach a non-tailscale type. - return - } - - for i := range tp.NumField() { - cb(tp, tp.Field(i)) - - switch tp.Field(i).Type.Kind() { - case reflect.Struct: - eachStructField(tp.Field(i).Type, cb) - case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: - if tp.Field(i).Type.Elem().Kind() == reflect.Struct { - eachStructField(tp.Field(i).Type.Elem(), cb) - } - } - } -} - -// eachStructValue calls cb for each struct field in the struct value v, recursively. -func eachStructValue(v reflect.Value, cb func(reflect.Type, reflect.StructField, reflect.Value)) { - if v.IsZero() { - return - } - - for i := range v.NumField() { - cb(v.Type(), v.Type().Field(i), v.Field(i)) - - switch v.Type().Field(i).Type.Kind() { - case reflect.Struct: - eachStructValue(v.Field(i), cb) - case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: - if v.Field(i).Type().Elem().Kind() == reflect.Struct { - eachStructValue(v.Field(i).Addr().Elem(), cb) - } - } - } -} - -// TestRedactNetmapPrivateKeys tests that redactNetmapPrivateKeys redacts all private keys -// and other private fields from a netmap.NetworkMap, and only those fields. -func TestRedactNetmapPrivateKeys(t *testing.T) { - type field struct { - t reflect.Type - f string - } - f := func(t any, f string) field { - return field{reflect.TypeOf(t), f} - } - // fields is a map of all struct fields in netmap.NetworkMap and its - // sub-structs, marking each field as private (true) or public (false). - // If you add a new field to netmap.NetworkMap or its sub-structs, - // you must add it to this list, marking it as private or public. - fields := map[field]bool{ - // Private fields to be redacted. - f(netmap.NetworkMap{}, "PrivateKey"): true, - - // All other fields are public. - f(netmap.NetworkMap{}, "AllCaps"): false, - f(netmap.NetworkMap{}, "CollectServices"): false, - f(netmap.NetworkMap{}, "DERPMap"): false, - f(netmap.NetworkMap{}, "DNS"): false, - f(netmap.NetworkMap{}, "DisplayMessages"): false, - f(netmap.NetworkMap{}, "Domain"): false, - f(netmap.NetworkMap{}, "DomainAuditLogID"): false, - f(netmap.NetworkMap{}, "Expiry"): false, - f(netmap.NetworkMap{}, "MachineKey"): false, - f(netmap.NetworkMap{}, "Name"): false, - f(netmap.NetworkMap{}, "NodeKey"): false, - f(netmap.NetworkMap{}, "PacketFilter"): false, - f(netmap.NetworkMap{}, "PacketFilterRules"): false, - f(netmap.NetworkMap{}, "Peers"): false, - f(netmap.NetworkMap{}, "SSHPolicy"): false, - f(netmap.NetworkMap{}, "SelfNode"): false, - f(netmap.NetworkMap{}, "TKAEnabled"): false, - f(netmap.NetworkMap{}, "TKAHead"): false, - f(netmap.NetworkMap{}, "UserProfiles"): false, - f(filtertype.CapMatch{}, "Cap"): false, - f(filtertype.CapMatch{}, "Dst"): false, - f(filtertype.CapMatch{}, "Values"): false, - f(filtertype.Match{}, "Caps"): false, - f(filtertype.Match{}, "Dsts"): false, - f(filtertype.Match{}, "IPProto"): false, - f(filtertype.Match{}, "SrcCaps"): false, - f(filtertype.Match{}, "Srcs"): false, - f(filtertype.Match{}, "SrcsContains"): false, - f(filtertype.NetPortRange{}, "Net"): false, - f(filtertype.NetPortRange{}, "Ports"): false, - f(filtertype.PortRange{}, "First"): false, - f(filtertype.PortRange{}, "Last"): false, - f(key.DiscoPublic{}, "k"): false, - f(key.MachinePublic{}, "k"): false, - f(key.NodePrivate{}, "_"): false, - f(key.NodePrivate{}, "k"): false, - f(key.NodePublic{}, "k"): false, - f(tailcfg.CapGrant{}, "CapMap"): false, - f(tailcfg.CapGrant{}, "Caps"): false, - f(tailcfg.CapGrant{}, "Dsts"): false, - f(tailcfg.DERPHomeParams{}, "RegionScore"): false, - f(tailcfg.DERPMap{}, "HomeParams"): false, - f(tailcfg.DERPMap{}, "OmitDefaultRegions"): false, - f(tailcfg.DERPMap{}, "Regions"): false, - f(tailcfg.DNSConfig{}, "CertDomains"): false, - f(tailcfg.DNSConfig{}, "Domains"): false, - f(tailcfg.DNSConfig{}, "ExitNodeFilteredSet"): false, - f(tailcfg.DNSConfig{}, "ExtraRecords"): false, - f(tailcfg.DNSConfig{}, "FallbackResolvers"): false, - f(tailcfg.DNSConfig{}, "Nameservers"): false, - f(tailcfg.DNSConfig{}, "Proxied"): false, - f(tailcfg.DNSConfig{}, "Resolvers"): false, - f(tailcfg.DNSConfig{}, "Routes"): false, - f(tailcfg.DNSConfig{}, "TempCorpIssue13969"): false, - f(tailcfg.DNSRecord{}, "Name"): false, - f(tailcfg.DNSRecord{}, "Type"): false, - f(tailcfg.DNSRecord{}, "Value"): false, - f(tailcfg.DisplayMessageAction{}, "Label"): false, - f(tailcfg.DisplayMessageAction{}, "URL"): false, - f(tailcfg.DisplayMessage{}, "ImpactsConnectivity"): false, - f(tailcfg.DisplayMessage{}, "PrimaryAction"): false, - f(tailcfg.DisplayMessage{}, "Severity"): false, - f(tailcfg.DisplayMessage{}, "Text"): false, - f(tailcfg.DisplayMessage{}, "Title"): false, - f(tailcfg.FilterRule{}, "CapGrant"): false, - f(tailcfg.FilterRule{}, "DstPorts"): false, - f(tailcfg.FilterRule{}, "IPProto"): false, - f(tailcfg.FilterRule{}, "SrcBits"): false, - f(tailcfg.FilterRule{}, "SrcIPs"): false, - f(tailcfg.HostinfoView{}, "ж"): false, - f(tailcfg.Hostinfo{}, "AllowsUpdate"): false, - f(tailcfg.Hostinfo{}, "App"): false, - f(tailcfg.Hostinfo{}, "AppConnector"): false, - f(tailcfg.Hostinfo{}, "BackendLogID"): false, - f(tailcfg.Hostinfo{}, "Cloud"): false, - f(tailcfg.Hostinfo{}, "Container"): false, - f(tailcfg.Hostinfo{}, "Desktop"): false, - f(tailcfg.Hostinfo{}, "DeviceModel"): false, - f(tailcfg.Hostinfo{}, "Distro"): false, - f(tailcfg.Hostinfo{}, "DistroCodeName"): false, - f(tailcfg.Hostinfo{}, "DistroVersion"): false, - f(tailcfg.Hostinfo{}, "Env"): false, - f(tailcfg.Hostinfo{}, "ExitNodeID"): false, - f(tailcfg.Hostinfo{}, "FrontendLogID"): false, - f(tailcfg.Hostinfo{}, "GoArch"): false, - f(tailcfg.Hostinfo{}, "GoArchVar"): false, - f(tailcfg.Hostinfo{}, "GoVersion"): false, - f(tailcfg.Hostinfo{}, "Hostname"): false, - f(tailcfg.Hostinfo{}, "IPNVersion"): false, - f(tailcfg.Hostinfo{}, "IngressEnabled"): false, - f(tailcfg.Hostinfo{}, "Location"): false, - f(tailcfg.Hostinfo{}, "Machine"): false, - f(tailcfg.Hostinfo{}, "NetInfo"): false, - f(tailcfg.Hostinfo{}, "NoLogsNoSupport"): false, - f(tailcfg.Hostinfo{}, "OS"): false, - f(tailcfg.Hostinfo{}, "OSVersion"): false, - f(tailcfg.Hostinfo{}, "Package"): false, - f(tailcfg.Hostinfo{}, "PushDeviceToken"): false, - f(tailcfg.Hostinfo{}, "RequestTags"): false, - f(tailcfg.Hostinfo{}, "RoutableIPs"): false, - f(tailcfg.Hostinfo{}, "SSH_HostKeys"): false, - f(tailcfg.Hostinfo{}, "Services"): false, - f(tailcfg.Hostinfo{}, "ServicesHash"): false, - f(tailcfg.Hostinfo{}, "ShareeNode"): false, - f(tailcfg.Hostinfo{}, "ShieldsUp"): false, - f(tailcfg.Hostinfo{}, "StateEncrypted"): false, - f(tailcfg.Hostinfo{}, "TPM"): false, - f(tailcfg.Hostinfo{}, "Userspace"): false, - f(tailcfg.Hostinfo{}, "UserspaceRouter"): false, - f(tailcfg.Hostinfo{}, "WireIngress"): false, - f(tailcfg.Hostinfo{}, "WoLMACs"): false, - f(tailcfg.Location{}, "City"): false, - f(tailcfg.Location{}, "CityCode"): false, - f(tailcfg.Location{}, "Country"): false, - f(tailcfg.Location{}, "CountryCode"): false, - f(tailcfg.Location{}, "Latitude"): false, - f(tailcfg.Location{}, "Longitude"): false, - f(tailcfg.Location{}, "Priority"): false, - f(tailcfg.NetInfo{}, "DERPLatency"): false, - f(tailcfg.NetInfo{}, "FirewallMode"): false, - f(tailcfg.NetInfo{}, "HavePortMap"): false, - f(tailcfg.NetInfo{}, "LinkType"): false, - f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, - f(tailcfg.NetInfo{}, "OSHasIPv6"): false, - f(tailcfg.NetInfo{}, "PCP"): false, - f(tailcfg.NetInfo{}, "PMP"): false, - f(tailcfg.NetInfo{}, "PreferredDERP"): false, - f(tailcfg.NetInfo{}, "UPnP"): false, - f(tailcfg.NetInfo{}, "WorkingICMPv4"): false, - f(tailcfg.NetInfo{}, "WorkingIPv6"): false, - f(tailcfg.NetInfo{}, "WorkingUDP"): false, - f(tailcfg.NetPortRange{}, "Bits"): false, - f(tailcfg.NetPortRange{}, "IP"): false, - f(tailcfg.NetPortRange{}, "Ports"): false, - f(tailcfg.NetPortRange{}, "_"): false, - f(tailcfg.NodeView{}, "ж"): false, - f(tailcfg.Node{}, "Addresses"): false, - f(tailcfg.Node{}, "AllowedIPs"): false, - f(tailcfg.Node{}, "Cap"): false, - f(tailcfg.Node{}, "CapMap"): false, - f(tailcfg.Node{}, "Capabilities"): false, - f(tailcfg.Node{}, "ComputedName"): false, - f(tailcfg.Node{}, "ComputedNameWithHost"): false, - f(tailcfg.Node{}, "Created"): false, - f(tailcfg.Node{}, "DataPlaneAuditLogID"): false, - f(tailcfg.Node{}, "DiscoKey"): false, - f(tailcfg.Node{}, "Endpoints"): false, - f(tailcfg.Node{}, "ExitNodeDNSResolvers"): false, - f(tailcfg.Node{}, "Expired"): false, - f(tailcfg.Node{}, "HomeDERP"): false, - f(tailcfg.Node{}, "Hostinfo"): false, - f(tailcfg.Node{}, "ID"): false, - f(tailcfg.Node{}, "IsJailed"): false, - f(tailcfg.Node{}, "IsWireGuardOnly"): false, - f(tailcfg.Node{}, "Key"): false, - f(tailcfg.Node{}, "KeyExpiry"): false, - f(tailcfg.Node{}, "KeySignature"): false, - f(tailcfg.Node{}, "LastSeen"): false, - f(tailcfg.Node{}, "LegacyDERPString"): false, - f(tailcfg.Node{}, "Machine"): false, - f(tailcfg.Node{}, "MachineAuthorized"): false, - f(tailcfg.Node{}, "Name"): false, - f(tailcfg.Node{}, "Online"): false, - f(tailcfg.Node{}, "PrimaryRoutes"): false, - f(tailcfg.Node{}, "SelfNodeV4MasqAddrForThisPeer"): false, - f(tailcfg.Node{}, "SelfNodeV6MasqAddrForThisPeer"): false, - f(tailcfg.Node{}, "Sharer"): false, - f(tailcfg.Node{}, "StableID"): false, - f(tailcfg.Node{}, "Tags"): false, - f(tailcfg.Node{}, "UnsignedPeerAPIOnly"): false, - f(tailcfg.Node{}, "User"): false, - f(tailcfg.Node{}, "computedHostIfDifferent"): false, - f(tailcfg.PortRange{}, "First"): false, - f(tailcfg.PortRange{}, "Last"): false, - f(tailcfg.SSHPolicy{}, "Rules"): false, - f(tailcfg.Service{}, "Description"): false, - f(tailcfg.Service{}, "Port"): false, - f(tailcfg.Service{}, "Proto"): false, - f(tailcfg.Service{}, "_"): false, - f(tailcfg.TPMInfo{}, "FamilyIndicator"): false, - f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, - f(tailcfg.TPMInfo{}, "Manufacturer"): false, - f(tailcfg.TPMInfo{}, "Model"): false, - f(tailcfg.TPMInfo{}, "SpecRevision"): false, - f(tailcfg.TPMInfo{}, "Vendor"): false, - f(tailcfg.UserProfileView{}, "ж"): false, - f(tailcfg.UserProfile{}, "DisplayName"): false, - f(tailcfg.UserProfile{}, "ID"): false, - f(tailcfg.UserProfile{}, "LoginName"): false, - f(tailcfg.UserProfile{}, "ProfilePicURL"): false, - f(views.Slice[ipproto.Proto]{}, "ж"): false, - f(views.Slice[tailcfg.FilterRule]{}, "ж"): false, - } - - t.Run("field_list_is_complete", func(t *testing.T) { - seen := set.Set[field]{} - eachStructField(reflect.TypeOf(netmap.NetworkMap{}), func(rt reflect.Type, sf reflect.StructField) { - f := field{rt, sf.Name} - seen.Add(f) - if _, ok := fields[f]; !ok { - // Fail the test if netmap has a field not in the list. If you see this test - // failure, please add the new field to the fields map above, marking it as private or public. - t.Errorf("netmap field has not been declared as private or public: %v.%v", rt, sf.Name) - } - }) - - for want := range fields { - if !seen.Contains(want) { - // Fail the test if the list has a field not in netmap. If you see this test - // failure, please remove the field from the fields map above. - t.Errorf("field declared that has not been found in netmap: %v.%v", want.t, want.f) - } - } - }) - - // tests is a list of test cases, each with a non-redacted netmap and the expected redacted netmap. - // If you add a new private field to netmap.NetworkMap or its sub-structs, please add a test case - // here that has that field set in nm, and the expected redacted value in wantRedacted. - tests := []struct { - name string - nm *netmap.NetworkMap - wantRedacted *netmap.NetworkMap - }{ - { - name: "redact_private_key", - nm: &netmap.NetworkMap{ - PrivateKey: key.NewNode(), - }, - wantRedacted: &netmap.NetworkMap{}, - }, - } - - // confirmedRedacted is a set of all private fields that have been covered by the tests above. - confirmedRedacted := set.Set[field]{} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - // Record which of the private fields are set in the non-redacted netmap. - eachStructValue(reflect.ValueOf(tt.nm).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { - f := field{tt, sf.Name} - if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { - confirmedRedacted.Add(f) - } - }) - - got, _ := redactNetmapPrivateKeys(tt.nm) - if !reflect.DeepEqual(got, tt.wantRedacted) { - t.Errorf("unexpected redacted netmap: %+v", got) - } - - // Check that all private fields in the redacted netmap are zero. - eachStructValue(reflect.ValueOf(got).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { - f := field{tt, sf.Name} - if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { - t.Errorf("field not redacted: %v.%v", tt, sf.Name) - } - }) - }) - } - - // Check that all private fields in netmap.NetworkMap and its sub-structs - // are covered by the tests above. If you see a test failure here, - // please add a test case above that has that field set in nm. - for f, shouldRedact := range fields { - if shouldRedact { - if !confirmedRedacted.Contains(f) { - t.Errorf("field not covered by tests: %v.%v", f.t, f.f) - } - } - } -} - func TestHandleC2NDebugNetmap(t *testing.T) { nm := &netmap.NetworkMap{ Name: "myhost", @@ -495,10 +159,7 @@ func TestHandleC2NDebugNetmap(t *testing.T) { Hostinfo: (&tailcfg.Hostinfo{Hostname: "peer1"}).View(), }).View(), }, - PrivateKey: key.NewNode(), } - withoutPrivateKey := *nm - withoutPrivateKey.PrivateKey = key.NodePrivate{} for _, tt := range []struct { name string @@ -507,12 +168,12 @@ func TestHandleC2NDebugNetmap(t *testing.T) { }{ { name: "simple_get", - want: &withoutPrivateKey, + want: nm, }, { name: "post_no_omit", req: &tailcfg.C2NDebugNetmapRequest{}, - want: &withoutPrivateKey, + want: nm, }, { name: "post_omit_peers_and_name", @@ -524,7 +185,7 @@ func TestHandleC2NDebugNetmap(t *testing.T) { { name: "post_omit_nonexistent_field", req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"ThisFieldDoesNotExist"}}, - want: &withoutPrivateKey, + want: nm, }, } { t.Run(tt.name, func(t *testing.T) { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 41d110400..9de1f3d85 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3052,9 +3052,6 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.Actor, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) { ch := make(chan *ipn.Notify, 128) sessionID := rands.HexString(16) - if mask&ipn.NotifyNoPrivateKeys != 0 { - fn = filterPrivateKeys(fn) - } if mask&ipn.NotifyHealthActions == 0 { // if UI does not support PrimaryAction in health warnings, append // action URLs to the warning text instead. @@ -3154,39 +3151,6 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A sender.Run(ctx, ch) } -// filterPrivateKeys returns an IPN listener func that wraps the supplied IPN -// listener and zeroes out the PrivateKey in the NetMap passed to the wrapped -// listener. -func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { - return func(n *ipn.Notify) bool { - redacted, changed := redactNetmapPrivateKeys(n.NetMap) - if !changed { - return fn(n) - } - - // The netmap in n is shared across all watchers, so to mutate it for a - // single watcher we have to clone the notify and the netmap. We can - // make shallow clones, at least. - n2 := *n - n2.NetMap = redacted - return fn(&n2) - } -} - -// redactNetmapPrivateKeys returns a copy of nm with private keys zeroed out. -// If no change was needed, it returns nm unmodified. -func redactNetmapPrivateKeys(nm *netmap.NetworkMap) (redacted *netmap.NetworkMap, changed bool) { - if nm == nil || nm.PrivateKey.IsZero() { - return nm, false - } - - // The netmap might be shared across watchers, so make at least a shallow - // clone before mutating it. - nm2 := *nm - nm2.PrivateKey = key.NodePrivate{} - return &nm2, true -} - // appendHealthActions returns an IPN listener func that wraps the supplied IPN // listener func and transforms health messages passed to the wrapped listener. // If health messages with PrimaryActions are present, it appends the label & @@ -5087,7 +5051,12 @@ func (b *LocalBackend) authReconfigLocked() { } } - cfg, err := nmcfg.WGCfg(nm, b.logf, flags, prefs.ExitNodeID()) + priv := b.pm.CurrentPrefs().Persist().PrivateNodeKey() + if !priv.IsZero() && priv.Public() != nm.NodeKey { + priv = key.NodePrivate{} + } + + cfg, err := nmcfg.WGCfg(priv, nm, b.logf, flags, prefs.ExitNodeID()) if err != nil { b.logf("wgcfg: %v", err) return diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 962335046..5df0ae5bb 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -20,6 +20,7 @@ import ( "slices" "strings" "sync" + "sync/atomic" "testing" "time" @@ -49,6 +50,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/tstest/deptest" + "tailscale.com/tstest/typewalk" "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" @@ -57,6 +59,7 @@ import ( "tailscale.com/types/logid" "tailscale.com/types/netmap" "tailscale.com/types/opt" + "tailscale.com/types/persist" "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" @@ -7112,3 +7115,104 @@ func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { return nil } } + +type fakeAttestationKey struct{ key.HardwareAttestationKey } + +func (f *fakeAttestationKey) Clone() key.HardwareAttestationKey { + return &fakeAttestationKey{} +} + +// TestStripKeysFromPrefs tests that LocalBackend's [stripKeysFromPrefs] (as used +// by sendNotify etc) correctly removes all private keys from an ipn.Notify. +// +// It does so by testing the the two ways that Notifys are sent: via sendNotify, +// and via extension hooks. +func TestStripKeysFromPrefs(t *testing.T) { + // genNotify generates a sample ipn.Notify with various private keys set + // at a certain path through the Notify data structure. + genNotify := map[string]func() ipn.Notify{ + "Notify.Prefs.ж.Persist.PrivateNodeKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{PrivateNodeKey: key.NewNode()}, + }).View()), + } + }, + "Notify.Prefs.ж.Persist.OldPrivateNodeKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{OldPrivateNodeKey: key.NewNode()}, + }).View()), + } + }, + "Notify.Prefs.ж.Persist.NetworkLockKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{NetworkLockKey: key.NewNLPrivate()}, + }).View()), + } + }, + "Notify.Prefs.ж.Persist.AttestationKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{AttestationKey: new(fakeAttestationKey)}, + }).View()), + } + }, + } + + private := key.PrivateTypesForTest() + + for path := range typewalk.MatchingPaths(reflect.TypeFor[ipn.Notify](), private.Contains) { + t.Run(path.Name, func(t *testing.T) { + gen, ok := genNotify[path.Name] + if !ok { + t.Fatalf("no genNotify function for path %q", path.Name) + } + withKey := gen() + + if path.Walk(reflect.ValueOf(withKey)).IsZero() { + t.Fatalf("generated notify does not have non-zero value at path %q", path.Name) + } + + h := &ExtensionHost{} + ch := make(chan *ipn.Notify, 1) + b := &LocalBackend{ + extHost: h, + notifyWatchers: map[string]*watchSession{ + "test": {ch: ch}, + }, + } + + var okay atomic.Int32 + testNotify := func(via string) func(*ipn.Notify) { + return func(n *ipn.Notify) { + if n == nil { + t.Errorf("notify from %s is nil", via) + return + } + if !path.Walk(reflect.ValueOf(*n)).IsZero() { + t.Errorf("notify from %s has non-zero value at path %q; key not stripped", via, path.Name) + } else { + okay.Add(1) + } + } + } + + h.Hooks().MutateNotifyLocked.Add(testNotify("MutateNotifyLocked hook")) + + b.send(withKey) + + select { + case n := <-ch: + testNotify("watchSession")(n) + default: + t.Errorf("no notify sent to watcher channel") + } + + if got := okay.Load(); got != 2 { + t.Errorf("notify passed validation %d times; want 2", got) + } + }) + } +} diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 9c2176378..40a3c9887 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -24,6 +24,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/testenv" ) var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") @@ -849,6 +850,7 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView { // ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing. func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) { + testenv.AssertInTest() bus := eventbus.New() defer bus.Close() ht := health.NewTracker(bus) // in tests, don't care about the health status diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index de5ff53ac..ddd55234a 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -877,14 +877,6 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) { } mask = ipn.NotifyWatchOpt(v) } - // Users with only read access must request private key filtering. If they - // don't filter out private keys, require write access. - if (mask & ipn.NotifyNoPrivateKeys) == 0 { - if !h.PermitWrite { - http.Error(w, "watch IPN bus access denied, must set ipn.NotifyNoPrivateKeys when not running as admin/root or operator", http.StatusForbidden) - return - } - } w.Header().Set("Content-Type", "application/json") ctx := r.Context() diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index fa24717f7..d00b4117b 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -263,13 +263,17 @@ func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { }) } +// TestServeWatchIPNBus used to test that various WatchIPNBus mask flags +// changed the permissions required to access the endpoint. +// However, since the removal of the NotifyNoPrivateKeys flag requirement +// for read-only users, this test now only verifies that the endpoint +// behaves correctly based on the PermitRead and PermitWrite settings. func TestServeWatchIPNBus(t *testing.T) { tstest.Replace(t, &validLocalHostForTesting, true) tests := []struct { desc string permitRead, permitWrite bool - mask ipn.NotifyWatchOpt // extra bits in addition to ipn.NotifyInitialState wantStatus int }{ { @@ -279,20 +283,13 @@ func TestServeWatchIPNBus(t *testing.T) { wantStatus: http.StatusForbidden, }, { - desc: "read-initial-state", + desc: "read-only", permitRead: true, permitWrite: false, - wantStatus: http.StatusForbidden, - }, - { - desc: "read-initial-state-no-private-keys", - permitRead: true, - permitWrite: false, - mask: ipn.NotifyNoPrivateKeys, wantStatus: http.StatusOK, }, { - desc: "read-initial-state-with-private-keys", + desc: "read-and-write", permitRead: true, permitWrite: true, wantStatus: http.StatusOK, @@ -311,7 +308,7 @@ func TestServeWatchIPNBus(t *testing.T) { c := s.Client() ctx, cancel := context.WithCancel(context.Background()) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/localapi/v0/watch-ipn-bus?mask=%d", s.URL, ipn.NotifyInitialState|tt.mask), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/localapi/v0/watch-ipn-bus?mask=%d", s.URL, ipn.NotifyInitialState), nil) if err != nil { t.Fatal(err) } diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index 61a5a74a0..2aa4c863b 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -102,15 +102,13 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { } func (m *monitor) handleNetmap(w http.ResponseWriter, r *http.Request) { - var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap - mask |= ipn.NotifyNoPrivateKeys lc, err := m.ts.LocalClient() if err != nil { log.Printf("monitor: error LocalClient: %v", err) http.Error(w, "", http.StatusInternalServerError) return } - watcher, err := lc.WatchIPNBus(r.Context(), mask) + watcher, err := lc.WatchIPNBus(r.Context(), ipn.NotifyInitialNetMap) if err != nil { log.Printf("monitor: error WatchIPNBus: %v", err) http.Error(w, "", http.StatusInternalServerError) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 2944f6359..14747650f 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -350,7 +350,7 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { return nil, fmt.Errorf("tsnet.Up: %w", err) } - watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return nil, fmt.Errorf("tsnet.Up: %w", err) } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 64f49c7b8..9d75cfc29 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -2128,16 +2128,10 @@ func TestC2NDebugNetmap(t *testing.T) { var current netmap.NetworkMap must.Do(json.Unmarshal(resp.Current, ¤t)) - if !current.PrivateKey.IsZero() { - t.Errorf("current netmap has non-zero private key: %v", current.PrivateKey) - } // Check candidate netmap if we sent a map response. if cand != nil { var candidate netmap.NetworkMap must.Do(json.Unmarshal(resp.Candidate, &candidate)) - if !candidate.PrivateKey.IsZero() { - t.Errorf("candidate netmap has non-zero private key: %v", candidate.PrivateKey) - } if diff := cmp.Diff(current.SelfNode, candidate.SelfNode); diff != "" { t.Errorf("SelfNode differs (-current +candidate):\n%s", diff) } diff --git a/tstest/typewalk/typewalk.go b/tstest/typewalk/typewalk.go new file mode 100644 index 000000000..b22505351 --- /dev/null +++ b/tstest/typewalk/typewalk.go @@ -0,0 +1,106 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package typewalk provides utilities to walk Go types using reflection. +package typewalk + +import ( + "iter" + "reflect" + "strings" +) + +// Path describes a path via a type where a private key may be found, +// along with a function to test whether a reflect.Value at that path is +// non-zero. +type Path struct { + // Name is the path from the root type, suitable for using as a t.Run name. + Name string + + // Walk returns the reflect.Value at the end of the path, given a root + // reflect.Value. + Walk func(root reflect.Value) (leaf reflect.Value) +} + +// MatchingPaths returns a sequence of [Path] for all paths +// within the given type that end in a type matching match. +func MatchingPaths(rt reflect.Type, match func(reflect.Type) bool) iter.Seq[Path] { + // valFromRoot is a function that, given a reflect.Value of the root struct, + // returns the reflect.Value at some path within it. + type valFromRoot func(reflect.Value) reflect.Value + + return func(yield func(Path) bool) { + var walk func(reflect.Type, valFromRoot) + var path []string + var done bool + seen := map[reflect.Type]bool{} + + walk = func(t reflect.Type, getV valFromRoot) { + if seen[t] { + return + } + seen[t] = true + defer func() { seen[t] = false }() + if done { + return + } + if match(t) { + if !yield(Path{ + Name: strings.Join(path, "."), + Walk: getV, + }) { + done = true + } + return + } + switch t.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Array: + walk(t.Elem(), func(root reflect.Value) reflect.Value { + v := getV(root) + return v.Elem() + }) + case reflect.Struct: + for i := range t.NumField() { + sf := t.Field(i) + fieldName := sf.Name + if fieldName == "_" { + continue + } + path = append(path, fieldName) + walk(sf.Type, func(root reflect.Value) reflect.Value { + return getV(root).FieldByName(fieldName) + }) + path = path[:len(path)-1] + if done { + return + } + } + case reflect.Map: + walk(t.Elem(), func(root reflect.Value) reflect.Value { + v := getV(root) + if v.Len() == 0 { + return reflect.Zero(t.Elem()) + } + iter := v.MapRange() + iter.Next() + return iter.Value() + }) + if done { + return + } + walk(t.Key(), func(root reflect.Value) reflect.Value { + v := getV(root) + if v.Len() == 0 { + return reflect.Zero(t.Key()) + } + iter := v.MapRange() + iter.Next() + return iter.Key() + }) + } + } + + path = append(path, rt.Name()) + walk(rt, func(v reflect.Value) reflect.Value { return v }) + } +} diff --git a/types/key/util.go b/types/key/util.go index bdb2a06f6..50fac8275 100644 --- a/types/key/util.go +++ b/types/key/util.go @@ -10,9 +10,12 @@ import ( "errors" "fmt" "io" + "reflect" "slices" "go4.org/mem" + "tailscale.com/util/set" + "tailscale.com/util/testenv" ) // rand fills b with cryptographically strong random bytes. Panics if @@ -115,3 +118,18 @@ func debug32(k [32]byte) string { dst[6] = ']' return string(dst[:7]) } + +// PrivateTypesForTest returns the set of private key types +// in this package, for testing purposes. +func PrivateTypesForTest() set.Set[reflect.Type] { + testenv.AssertInTest() + return set.Of( + reflect.TypeFor[ChallengePrivate](), + reflect.TypeFor[ControlPrivate](), + reflect.TypeFor[DiscoPrivate](), + reflect.TypeFor[MachinePrivate](), + reflect.TypeFor[NodePrivate](), + reflect.TypeFor[NLPrivate](), + reflect.TypeFor[HardwareAttestationKey](), + ) +} diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index cc6bec1db..0a2f3ea71 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -26,11 +26,10 @@ import ( // The fields should all be considered read-only. They might // alias parts of previous NetworkMap values. type NetworkMap struct { - SelfNode tailcfg.NodeView - AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap - NodeKey key.NodePublic - PrivateKey key.NodePrivate - Expiry time.Time + SelfNode tailcfg.NodeView + AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap + NodeKey key.NodePublic + Expiry time.Time // Name is the DNS name assigned to this node. // It is the MapResponse.Node.Name value and ends with a period. Name string diff --git a/types/netmap/netmap_test.go b/types/netmap/netmap_test.go index 40f504741..ee4fecdb4 100644 --- a/types/netmap/netmap_test.go +++ b/types/netmap/netmap_test.go @@ -6,11 +6,13 @@ package netmap import ( "encoding/hex" "net/netip" + "reflect" "testing" "go4.org/mem" "tailscale.com/net/netaddr" "tailscale.com/tailcfg" + "tailscale.com/tstest/typewalk" "tailscale.com/types/key" ) @@ -316,3 +318,10 @@ func TestPeerIndexByNodeID(t *testing.T) { } } } + +func TestNoPrivateKeyMaterial(t *testing.T) { + private := key.PrivateTypesForTest() + for path := range typewalk.MatchingPaths(reflect.TypeFor[NetworkMap](), private.Contains) { + t.Errorf("NetworkMap contains private key material at path: %q", path.Name) + } +} diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index f0fa38bf9..ce6add866 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -111,9 +111,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. Endpoints: epFromTyped(st.LocalAddrs), } e2.SetNetworkMap(&netmap.NetworkMap{ - NodeKey: k2.Public(), - PrivateKey: k2, - Peers: []tailcfg.NodeView{n.View()}, + NodeKey: k2.Public(), + Peers: []tailcfg.NodeView{n.View()}, }) p := wgcfg.Peer{ @@ -143,9 +142,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. Endpoints: epFromTyped(st.LocalAddrs), } e1.SetNetworkMap(&netmap.NetworkMap{ - NodeKey: k1.Public(), - PrivateKey: k1, - Peers: []tailcfg.NodeView{n.View()}, + NodeKey: k1.Public(), + Peers: []tailcfg.NodeView{n.View()}, }) p := wgcfg.Peer{ diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index e91dac2ec..09c54f504 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -308,8 +308,7 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM buildNetmapLocked := func(myIdx int) *netmap.NetworkMap { me := ms[myIdx] nm := &netmap.NetworkMap{ - PrivateKey: me.privateKey, - NodeKey: me.privateKey.Public(), + NodeKey: me.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(1, 0, 0, byte(myIdx+1)), 32)}, }).View(), @@ -356,7 +355,7 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM peerSet.Add(peer.Key()) } m.conn.UpdatePeers(peerSet) - wg, err := nmcfg.WGCfg(nm, logf, 0, "") + wg, err := nmcfg.WGCfg(ms[i].privateKey, nm, logf, 0, "") if err != nil { // We're too far from the *testing.T to be graceful, // blow up. Shouldn't happen anyway. @@ -2201,9 +2200,8 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + Name: "ts", + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{tsaip}, }).View(), @@ -2224,7 +2222,7 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { } m.conn.onNodeViewsUpdate(nv) - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2266,9 +2264,8 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + Name: "ts", + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{tsaip}, }).View(), @@ -2290,7 +2287,7 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { } m.conn.onNodeViewsUpdate(nv) - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2334,7 +2331,7 @@ func applyNetworkMap(t *testing.T, m *magicStack, nm *netmap.NetworkMap) { m.conn.noV6.Store(true) // Turn the network map into a wireguard config (for the tailscale internal wireguard device). - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2403,9 +2400,8 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { wgEpV6 := netip.MustParseAddrPort(v6.LocalAddr().String()) nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + Name: "ts", + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{tsaip}, }).View(), diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 28d5345d6..487e78d81 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -12,6 +12,7 @@ import ( "strings" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" @@ -41,9 +42,9 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { } // WGCfg returns the NetworkMaps's WireGuard configuration. -func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { +func WGCfg(pk key.NodePrivate, nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { cfg := &wgcfg.Config{ - PrivateKey: nm.PrivateKey, + PrivateKey: pk, Addresses: nm.GetAddresses().AsSlice(), Peers: make([]wgcfg.Peer, 0, len(nm.Peers)), } From 3a41c0c585f4b008c07a02ae91ed43cdbb62c721 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Mon, 20 Oct 2025 11:40:30 -0400 Subject: [PATCH 1669/1708] ipn/ipnlocal: add PROXY protocol support to Funnel/Serve This adds the --proxy-protocol flag to 'tailscale serve' and 'tailscale funnel', which tells the Tailscale client to prepend a PROXY protocol[1] header when making connections to the proxied-to backend. I've verified that this works with our existing funnel servers without additional work, since they pass along source address information via PeerAPI already. Updates #7747 [1]: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt Change-Id: I647c24d319375c1b33e995555a541b7615d2d203 Signed-off-by: Andrew Dunham --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/serve_legacy.go | 3 +- cmd/tailscale/cli/serve_v2.go | 27 ++++++--- cmd/tailscale/cli/serve_v2_test.go | 94 +++++++++++++++++++++++++----- cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + flake.nix | 2 +- go.mod | 1 + go.mod.sri | 2 +- go.sum | 2 + ipn/ipn_clone.go | 9 +-- ipn/ipn_view.go | 15 +++-- ipn/ipnlocal/serve.go | 72 +++++++++++++++++++++++ ipn/serve.go | 19 +++++- shell.nix | 2 +- tsnet/depaware.txt | 1 + 16 files changed, 216 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b800b78c6..4542fcad6 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/opencontainers/go-digest from github.com/distribution/reference + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal github.com/pkg/errors from github.com/evanphx/json-patch/v5+ D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 5c2d8eefa..171ec335c 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -168,6 +168,7 @@ type serveEnv struct { http uint // HTTP port tcp uint // TCP port tlsTerminatedTCP uint // a TLS terminated TCP port + proxyProtocol uint // PROXY protocol version (1 or 2) subcmd serveMode // subcommand yes bool // update without prompt service tailcfg.ServiceName // service name @@ -570,7 +571,7 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) } - sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, dnsName) + sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, 0 /* proxy proto */, dnsName) if !reflect.DeepEqual(cursc, sc) { if err := e.lc.SetServeConfig(ctx, sc); err != nil { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 1ce14cf09..33b676bf8 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -240,6 +240,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") + fs.UintVar(&e.proxyProtocol, "proxy-protocol", 0, "PROXY protocol version (1 or 2) for TCP forwarding") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), @@ -413,6 +414,14 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return errHelpFunc(subcmd) } + if (srvType == serveTypeHTTP || srvType == serveTypeHTTPS) && e.proxyProtocol != 0 { + return fmt.Errorf("PROXY protocol is only supported for TCP forwarding, not HTTP/HTTPS") + } + // Validate PROXY protocol version + if e.proxyProtocol != 0 && e.proxyProtocol != 1 && e.proxyProtocol != 2 { + return fmt.Errorf("invalid PROXY protocol version %d; must be 1 or 2", e.proxyProtocol) + } + sc, err := e.lc.GetServeConfig(ctx) if err != nil { return fmt.Errorf("error getting serve config: %w", err) @@ -507,7 +516,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps, int(e.proxyProtocol)) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -828,7 +837,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er for name, details := range scf.Services { for ppr, ep := range details.Endpoints { if ep.Protocol == conffile.ProtoTUN { - err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil) + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil, 0 /* proxy protocol */) if err != nil { return err } @@ -850,7 +859,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er portStr := fmt.Sprint(destPort) target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) } - err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil) + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil, 0 /* proxy protocol */) if err != nil { return fmt.Errorf("service %q: %w", name, err) } @@ -953,7 +962,7 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability, proxyProtocol int) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: @@ -966,7 +975,7 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy if e.setPath != "" { return fmt.Errorf("cannot mount a path for TCP serve") } - err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target) + err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target, proxyProtocol) if err != nil { return fmt.Errorf("failed to apply TCP serve: %w", err) } @@ -1092,6 +1101,9 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN if tcpHandler.TerminateTLS != "" { tlsStatus = "TLS terminated" } + if ver := tcpHandler.ProxyProtocol; ver != 0 { + tlsStatus = fmt.Sprintf("%s, PROXY protocol v%d", tlsStatus, ver) + } output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus)) for _, a := range ips { @@ -1170,7 +1182,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return nil } -func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string) error { +func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string, proxyProtocol int) error { var terminateTLS bool switch srcType { case serveTypeTCP: @@ -1197,8 +1209,7 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } - sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, dnsName) - + sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, proxyProtocol, dnsName) return nil } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 7f7f2c37c..5cdb39558 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -919,6 +919,73 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, + { + name: "tcp_with_proxy_protocol_v1", + steps: []step{{ + command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: { + TCPForward: "localhost:5432", + ProxyProtocol: 1, + }, + }, + }, + }}, + }, + { + name: "tls_terminated_tcp_with_proxy_protocol_v2", + steps: []step{{ + command: cmd("serve --tls-terminated-tcp=443 --proxy-protocol=2 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + TCPForward: "localhost:5432", + TerminateTLS: "foo.test.ts.net", + ProxyProtocol: 2, + }, + }, + }, + }}, + }, + { + name: "tcp_update_to_add_proxy_protocol", + steps: []step{ + { + command: cmd("serve --tcp=8000 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: {TCPForward: "localhost:5432"}, + }, + }, + }, + { + command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: { + TCPForward: "localhost:5432", + ProxyProtocol: 1, + }, + }, + }, + }, + }, + }, + { + name: "tcp_proxy_protocol_invalid_version", + steps: []step{{ + command: cmd("serve --tcp=8000 --proxy-protocol=3 --bg tcp://localhost:5432"), + wantErr: anyErr(), + }}, + }, + { + name: "proxy_protocol_without_tcp", + steps: []step{{ + command: cmd("serve --https=443 --proxy-protocol=1 --bg http://localhost:3000"), + wantErr: anyErr(), + }}, + }, } for _, group := range groups { @@ -1889,18 +1956,19 @@ func TestSetServe(t *testing.T) { e := &serveEnv{} magicDNSSuffix := "test.ts.net" tests := []struct { - name string - desc string - cfg *ipn.ServeConfig - st *ipnstate.Status - dnsName string - srvType serveType - srvPort uint16 - mountPath string - target string - allowFunnel bool - expected *ipn.ServeConfig - expectErr bool + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mountPath string + target string + allowFunnel bool + proxyProtocol int + expected *ipn.ServeConfig + expectErr bool }{ { name: "add new handler", @@ -2183,7 +2251,7 @@ func TestSetServe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil) + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil, tt.proxyProtocol) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1b5bdab91..be0fd799e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -156,6 +156,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/pierrec/lz4/v4/internal/lz4errors from github.com/pierrec/lz4/v4+ L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal LD github.com/pkg/sftp from tailscale.com/ssh/tailssh LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 21ca122c4..c68fab634 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -43,6 +43,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient diff --git a/flake.nix b/flake.nix index d2f03d4d8..217df38c3 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= +# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= diff --git a/go.mod b/go.mod index 836810fc0..fc8870baf 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,7 @@ require ( github.com/miekg/dns v1.1.58 github.com/mitchellh/go-ps v1.0.0 github.com/peterbourgon/ff/v3 v3.4.0 + github.com/pires/go-proxyproto v0.8.1 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 diff --git a/go.mod.sri b/go.mod.sri index 325a03b43..b7df2cc2c 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= +sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= diff --git a/go.sum b/go.sum index a0d9461ec..177efd4f7 100644 --- a/go.sum +++ b/go.sum @@ -809,6 +809,8 @@ github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkM github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= +github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 3d2670947..d5af906ee 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -218,10 +218,11 @@ func (src *TCPPortHandler) Clone() *TCPPortHandler { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _TCPPortHandlerCloneNeedsRegeneration = TCPPortHandler(struct { - HTTPS bool - HTTP bool - TCPForward string - TerminateTLS string + HTTPS bool + HTTP bool + TCPForward string + TerminateTLS string + ProxyProtocol int }{}) // Clone makes a deep copy of HTTPHandler. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index ba5477a6d..3179e3bb5 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -807,12 +807,19 @@ func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward } // (the HTTPS mode uses ServeConfig.Web) func (v TCPPortHandlerView) TerminateTLS() string { return v.ж.TerminateTLS } +// ProxyProtocol indicates whether to send a PROXY protocol header +// before forwarding the connection to TCPForward. +// +// This is only valid if TCPForward is non-empty. +func (v TCPPortHandlerView) ProxyProtocol() int { return v.ж.ProxyProtocol } + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _TCPPortHandlerViewNeedsRegeneration = TCPPortHandler(struct { - HTTPS bool - HTTP bool - TCPForward string - TerminateTLS string + HTTPS bool + HTTP bool + TCPForward string + TerminateTLS string + ProxyProtocol int }{}) // View returns a read-only view of HTTPHandler. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 1c527e130..b5118873b 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -33,6 +33,7 @@ import ( "time" "unicode/utf8" + "github.com/pires/go-proxyproto" "go4.org/mem" "tailscale.com/ipn" "tailscale.com/net/netutil" @@ -671,10 +672,81 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, }) } + var proxyHeader []byte + if ver := tcph.ProxyProtocol(); ver > 0 { + // backAddr is the final "destination" of the connection, + // which is the connection to the proxied-to backend. + backAddr := backConn.RemoteAddr().(*net.TCPAddr) + + // We always want to format the PROXY protocol + // header based on the IPv4 or IPv6-ness of + // the client. The SourceAddr and + // DestinationAddr need to match in type, so we + // need to be careful to not e.g. set a + // SourceAddr of type IPv6 and DestinationAddr + // of type IPv4. + // + // If this is an IPv6-mapped IPv4 address, + // though, unmap it. + proxySrcAddr := srcAddr + if proxySrcAddr.Addr().Is4In6() { + proxySrcAddr = netip.AddrPortFrom( + proxySrcAddr.Addr().Unmap(), + proxySrcAddr.Port(), + ) + } + + is4 := proxySrcAddr.Addr().Is4() + + var destAddr netip.Addr + if self := b.currentNode().Self(); self.Valid() { + if is4 { + destAddr = nodeIP(self, netip.Addr.Is4) + } else { + destAddr = nodeIP(self, netip.Addr.Is6) + } + } + if !destAddr.IsValid() { + // Pick a best-effort destination address of localhost. + if is4 { + destAddr = netip.AddrFrom4([4]byte{127, 0, 0, 1}) + } else { + destAddr = netip.IPv6Loopback() + } + } + + header := &proxyproto.Header{ + Version: byte(ver), + Command: proxyproto.PROXY, + SourceAddr: net.TCPAddrFromAddrPort(proxySrcAddr), + DestinationAddr: &net.TCPAddr{ + IP: destAddr.AsSlice(), + Port: backAddr.Port, + }, + } + if is4 { + header.TransportProtocol = proxyproto.TCPv4 + } else { + header.TransportProtocol = proxyproto.TCPv6 + } + var err error + proxyHeader, err = header.Format() + if err != nil { + b.logf("localbackend: failed to format proxy protocol header for port %v (from %v) to %s: %v", dport, srcAddr, backDst, err) + } + } + // TODO(bradfitz): do the RegisterIPPortIdentity and // UnregisterIPPortIdentity stuff that netstack does errc := make(chan error, 1) go func() { + if len(proxyHeader) > 0 { + if _, err := backConn.Write(proxyHeader); err != nil { + errc <- err + backConn.Close() // to ensure that the other side gets EOF + return + } + } _, err := io.Copy(backConn, conn) errc <- err }() diff --git a/ipn/serve.go b/ipn/serve.go index 2ac37a141..1aab829fe 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -149,6 +149,12 @@ type TCPPortHandler struct { // SNI name with this value. It is only used if TCPForward is non-empty. // (the HTTPS mode uses ServeConfig.Web) TerminateTLS string `json:",omitempty"` + + // ProxyProtocol indicates whether to send a PROXY protocol header + // before forwarding the connection to TCPForward. + // + // This is only valid if TCPForward is non-empty. + ProxyProtocol int `json:",omitzero"` } // HTTPHandler is either a path or a proxy to serve. @@ -404,7 +410,10 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin // connections from the given port. If terminateTLS is true, TLS connections // are terminated with only the given host name permitted before passing them // to the fwdAddr. -func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, host string) { +// +// If proxyProtocol is non-zero, the corresponding PROXY protocol version +// header is sent before forwarding the connection. +func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, proxyProtocol int, host string) { if sc == nil { sc = new(ServeConfig) } @@ -417,11 +426,15 @@ func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTL } tcpPortHandler = &svcConfig.TCP } - mak.Set(tcpPortHandler, port, &TCPPortHandler{TCPForward: fwdAddr}) + handler := &TCPPortHandler{ + TCPForward: fwdAddr, + ProxyProtocol: proxyProtocol, // can be 0 + } if terminateTLS { - (*tcpPortHandler)[port].TerminateTLS = host + handler.TerminateTLS = host } + mak.Set(tcpPortHandler, port, handler) } // SetFunnel sets the sc.AllowFunnel value for the given host and port. diff --git a/shell.nix b/shell.nix index c11b4bbcf..f43108753 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= +# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index cf91aa483..6eb493ef8 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -43,6 +43,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient From 99b06eac49ba1cdc1f72409b957f526b25d62622 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 09:40:57 -0800 Subject: [PATCH 1670/1708] syncs: add Mutex/RWMutex alias/wrappers for future mutex debugging Updates #17852 Change-Id: I477340fb8e40686870e981ade11cd61597c34a20 Signed-off-by: Brad Fitzpatrick --- appc/appconnector.go | 4 ++-- client/local/local.go | 3 ++- control/controlbase/conn.go | 3 ++- control/controlclient/direct.go | 4 ++-- derp/derpserver/derpserver.go | 2 +- envknob/envknob.go | 4 ++-- feature/relayserver/relayserver.go | 4 ++-- health/health.go | 3 ++- ipn/auditlog/extension.go | 4 ++-- ipn/ipnlocal/cert.go | 6 +++--- ipn/ipnlocal/local.go | 6 +++--- ipn/ipnlocal/node_backend.go | 3 ++- logtail/buffer.go | 5 +++-- metrics/metrics.go | 3 +-- net/captivedetection/captivedetection.go | 3 ++- net/dns/manager_windows.go | 4 ++-- net/dns/resolver/debug.go | 4 ++-- net/dns/resolver/forwarder.go | 3 ++- net/dns/resolver/tsdns.go | 2 +- net/dnscache/dnscache.go | 5 +++-- net/memnet/memnet.go | 4 ++-- net/netcheck/netcheck.go | 4 ++-- net/netmon/interfaces_darwin.go | 4 ++-- net/netmon/netmon.go | 3 ++- net/netutil/netutil.go | 5 +++-- net/ping/ping.go | 3 ++- net/portmapper/portmapper.go | 3 +-- net/sockstats/sockstats_tsgo.go | 4 ++-- net/tsdial/tsdial.go | 3 ++- net/wsconn/wsconn.go | 4 ++-- proxymap/proxymap.go | 4 ++-- syncs/locked.go | 4 ++-- syncs/mutex.go | 18 ++++++++++++++++++ syncs/mutex_debug.go | 18 ++++++++++++++++++ syncs/shardedint_test.go | 3 ++- util/eventbus/bus.go | 8 ++++---- util/eventbus/client.go | 4 ++-- util/eventbus/debug.go | 4 ++-- util/eventbus/subscribe.go | 4 ++-- util/execqueue/execqueue.go | 5 +++-- util/expvarx/expvarx.go | 4 ++-- util/goroutines/tracker.go | 4 ++-- util/limiter/limiter.go | 4 ++-- util/ringlog/ringlog.go | 4 ++-- util/syspolicy/rsop/change_callbacks.go | 3 ++- util/syspolicy/rsop/resultant_policy.go | 4 ++-- util/syspolicy/rsop/rsop.go | 3 +-- util/syspolicy/setting/setting.go | 4 ++-- wgengine/magicsock/blockforever_conn.go | 4 +++- wgengine/magicsock/endpoint.go | 4 ++-- wgengine/magicsock/endpoint_tracker.go | 4 ++-- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/rebinding_conn.go | 4 ++-- wgengine/magicsock/relaymanager.go | 3 ++- wgengine/netlog/netlog.go | 4 ++-- 55 files changed, 145 insertions(+), 94 deletions(-) create mode 100644 syncs/mutex.go create mode 100644 syncs/mutex_debug.go diff --git a/appc/appconnector.go b/appc/appconnector.go index e7b5032f0..5625decbf 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -16,9 +16,9 @@ import ( "net/netip" "slices" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/views" @@ -139,7 +139,7 @@ type AppConnector struct { hasStoredRoutes bool // mu guards the fields that follow - mu sync.Mutex + mu syncs.Mutex // domains is a map of lower case domain names with no trailing dot, to an // ordered list of resolved IP addresses. diff --git a/client/local/local.go b/client/local/local.go index 2382a1225..a5e04f122 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -38,6 +38,7 @@ import ( "tailscale.com/net/udprelay/status" "tailscale.com/paths" "tailscale.com/safesocket" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/appctype" "tailscale.com/types/dnstype" @@ -1363,7 +1364,7 @@ type IPNBusWatcher struct { httpRes *http.Response dec *json.Decoder - mu sync.Mutex + mu syncs.Mutex closed bool } diff --git a/control/controlbase/conn.go b/control/controlbase/conn.go index dc22212e8..78ef73f71 100644 --- a/control/controlbase/conn.go +++ b/control/controlbase/conn.go @@ -18,6 +18,7 @@ import ( "golang.org/x/crypto/blake2s" chp "golang.org/x/crypto/chacha20poly1305" + "tailscale.com/syncs" "tailscale.com/types/key" ) @@ -48,7 +49,7 @@ type Conn struct { // rxState is all the Conn state that Read uses. type rxState struct { - sync.Mutex + syncs.Mutex cipher cipher.AEAD nonce nonce buf *maxMsgBuffer // or nil when reads exhausted diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index d30db6191..9e7d10d8d 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -23,7 +23,6 @@ import ( "runtime" "slices" "strings" - "sync" "sync/atomic" "time" @@ -44,6 +43,7 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/tstime" @@ -92,7 +92,7 @@ type Direct struct { dialPlan ControlDialPlanner // can be nil - mu sync.Mutex // mutex guards the following fields + mu syncs.Mutex // mutex guards the following fields serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic diff --git a/derp/derpserver/derpserver.go b/derp/derpserver/derpserver.go index 31cf9363a..0bbc66780 100644 --- a/derp/derpserver/derpserver.go +++ b/derp/derpserver/derpserver.go @@ -177,7 +177,7 @@ type Server struct { verifyClientsURL string verifyClientsURLFailOpen bool - mu sync.Mutex + mu syncs.Mutex closed bool netConns map[derp.Conn]chan struct{} // chan is closed when conn closes clients map[key.NodePublic]*clientSet diff --git a/envknob/envknob.go b/envknob/envknob.go index 9dea8f74d..17a21387e 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -28,19 +28,19 @@ import ( "slices" "strconv" "strings" - "sync" "sync/atomic" "time" "tailscale.com/feature/buildfeatures" "tailscale.com/kube/kubetypes" + "tailscale.com/syncs" "tailscale.com/types/opt" "tailscale.com/version" "tailscale.com/version/distro" ) var ( - mu sync.Mutex + mu syncs.Mutex // +checklocks:mu set = map[string]string{} // +checklocks:mu diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index cfa372bd7..7d12d62e5 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "net/http" - "sync" "tailscale.com/disco" "tailscale.com/feature" @@ -19,6 +18,7 @@ import ( "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -95,7 +95,7 @@ type extension struct { ec *eventbus.Client respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] - mu sync.Mutex // guards the following fields + mu syncs.Mutex // guards the following fields shutdown bool // true if Shutdown() has been called rs relayServer // nil when disabled port *int // ipn.Prefs.RelayServerPort, nil if disabled diff --git a/health/health.go b/health/health.go index cbfa599c5..f0f6a6ffb 100644 --- a/health/health.go +++ b/health/health.go @@ -20,6 +20,7 @@ import ( "tailscale.com/envknob" "tailscale.com/feature/buildfeatures" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" @@ -30,7 +31,7 @@ import ( ) var ( - mu sync.Mutex + mu syncs.Mutex debugHandler map[string]http.Handler ) diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index f73681db0..ae2a296b2 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "tailscale.com/control/controlclient" @@ -15,6 +14,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -40,7 +40,7 @@ type extension struct { store lazy.SyncValue[LogStore] // mu protects all following fields. - mu sync.Mutex + mu syncs.Mutex // logger is the current audit logger, or nil if it is not set up, // such as before the first control client is created, or after // a profile change and before the new control client is created. diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index ab49976c8..d7133d25e 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -30,7 +30,6 @@ import ( "runtime" "slices" "strings" - "sync" "time" "tailscale.com/atomicfile" @@ -42,6 +41,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tempfork/acme" "tailscale.com/types/logger" @@ -60,9 +60,9 @@ var ( // acmeMu guards all ACME operations, so concurrent requests // for certs don't slam ACME. The first will go through and // populate the on-disk cache and the rest should use that. - acmeMu sync.Mutex + acmeMu syncs.Mutex - renewMu sync.Mutex // lock order: acmeMu before renewMu + renewMu syncs.Mutex // lock order: acmeMu before renewMu renewCertAt = map[string]time.Time{} ) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9de1f3d85..ed183e508 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -247,7 +247,7 @@ type LocalBackend struct { extHost *ExtensionHost // The mutex protects the following elements. - mu sync.Mutex + mu syncs.Mutex // currentNodeAtomic is the current node context. It is always non-nil. // It must be re-created when [LocalBackend] switches to a different profile/node @@ -329,14 +329,14 @@ type LocalBackend struct { // // tkaSyncLock MUST be taken before mu (or inversely, mu must not be held // at the moment that tkaSyncLock is taken). - tkaSyncLock sync.Mutex + tkaSyncLock syncs.Mutex clock tstime.Clock // Last ClientVersion received in MapResponse, guarded by mu. lastClientVersion *tailcfg.ClientVersion // lastNotifiedDriveSharesMu guards lastNotifiedDriveShares - lastNotifiedDriveSharesMu sync.Mutex + lastNotifiedDriveSharesMu syncs.Mutex // lastNotifiedDriveShares keeps track of the last set of shares that we // notified about. diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index dbe23e4d5..6880440bd 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -16,6 +16,7 @@ import ( "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/types/key" @@ -82,7 +83,7 @@ type nodeBackend struct { derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView] // TODO(nickkhyl): maybe use sync.RWMutex? - mu sync.Mutex // protects the following fields + mu syncs.Mutex // protects the following fields shutdownOnce sync.Once // guards calling [nodeBackend.shutdown] readyCh chan struct{} // closed by [nodeBackend.ready]; nil after shutdown diff --git a/logtail/buffer.go b/logtail/buffer.go index d14d8fbf6..82c9b4610 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -9,7 +9,8 @@ import ( "bytes" "errors" "fmt" - "sync" + + "tailscale.com/syncs" ) type Buffer interface { @@ -36,7 +37,7 @@ type memBuffer struct { next []byte pending chan qentry - dropMu sync.Mutex + dropMu syncs.Mutex dropCount int } diff --git a/metrics/metrics.go b/metrics/metrics.go index d1b1c06c9..19966d395 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -11,7 +11,6 @@ import ( "io" "slices" "strings" - "sync" "tailscale.com/syncs" ) @@ -41,7 +40,7 @@ type LabelMap struct { Label string expvar.Map // shardedIntMu orders the initialization of new shardedint keys - shardedIntMu sync.Mutex + shardedIntMu syncs.Mutex } // SetInt64 sets the *Int value stored under the given map key. diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index a06362a5b..3ec820b79 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -18,6 +18,7 @@ import ( "time" "tailscale.com/net/netmon" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/logger" ) @@ -32,7 +33,7 @@ type Detector struct { // currIfIndex is the index of the interface that is currently being used by the httpClient. currIfIndex int // mu guards currIfIndex. - mu sync.Mutex + mu syncs.Mutex // logf is the logger used for logging messages. If it is nil, log.Printf is used. logf logger.Logf } diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 444c5d37d..5ccadbab2 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -16,7 +16,6 @@ import ( "slices" "sort" "strings" - "sync" "syscall" "time" @@ -27,6 +26,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy/pkey" @@ -51,7 +51,7 @@ type windowsManager struct { unregisterPolicyChangeCb func() // called when the manager is closing - mu sync.Mutex + mu syncs.Mutex closing bool } diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index 0f9b106bb..a41462e18 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -8,12 +8,12 @@ import ( "html" "net/http" "strconv" - "sync" "sync/atomic" "time" "tailscale.com/feature/buildfeatures" "tailscale.com/health" + "tailscale.com/syncs" ) func init() { @@ -39,7 +39,7 @@ func init() { var fwdLogAtomic atomic.Pointer[fwdLog] type fwdLog struct { - mu sync.Mutex + mu syncs.Mutex pos int // ent[pos] is next entry ent []fwdLogEntry } diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 86f0f5b8c..5adc43efc 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -37,6 +37,7 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tsdial" + "tailscale.com/syncs" "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/types/nettype" @@ -231,7 +232,7 @@ type forwarder struct { ctx context.Context // good until Close ctxCancel context.CancelFunc // closes ctx - mu sync.Mutex // guards following + mu syncs.Mutex // guards following dohClient map[string]*http.Client // urlBase -> client diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 93cbf3839..3185cbe2b 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -214,7 +214,7 @@ type Resolver struct { closed chan struct{} // mu guards the following fields from being updated while used. - mu sync.Mutex + mu syncs.Mutex localDomains []dnsname.FQDN hostToIP map[dnsname.FQDN][]netip.Addr ipToHost map[netip.Addr]dnsname.FQDN diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index 94d4bbee7..e222b983f 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -20,6 +20,7 @@ import ( "tailscale.com/envknob" "tailscale.com/net/netx" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" "tailscale.com/util/singleflight" @@ -97,7 +98,7 @@ type Resolver struct { sf singleflight.Group[string, ipRes] - mu sync.Mutex + mu syncs.Mutex ipCache map[string]ipCacheEntry } @@ -474,7 +475,7 @@ type dialCall struct { d *dialer network, address, host, port string - mu sync.Mutex // lock ordering: dialer.mu, then dialCall.mu + mu syncs.Mutex // lock ordering: dialer.mu, then dialCall.mu fails map[netip.Addr]error // set of IPs that failed to dial thus far } diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index 1e43df2da..db9e3872f 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -12,9 +12,9 @@ import ( "fmt" "net" "net/netip" - "sync" "tailscale.com/net/netx" + "tailscale.com/syncs" ) var _ netx.Network = (*Network)(nil) @@ -26,7 +26,7 @@ var _ netx.Network = (*Network)(nil) // // Its zero value is a valid [netx.Network] implementation. type Network struct { - mu sync.Mutex + mu syncs.Mutex lns map[string]*Listener // address -> listener } diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 726221675..95750b2d0 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -235,7 +235,7 @@ type Client struct { testEnoughRegions int testCaptivePortalDelay time.Duration - mu sync.Mutex // guards following + mu syncs.Mutex // guards following nextFull bool // do a full region scan, even if last != nil prev map[time.Time]*Report // some previous reports last *Report // most recent report @@ -597,7 +597,7 @@ type reportState struct { stopProbeCh chan struct{} waitPortMap sync.WaitGroup - mu sync.Mutex + mu syncs.Mutex report *Report // to be returned by GetReport inFlight map[stun.TxID]func(netip.AddrPort) // called without c.mu held gotEP4 netip.AddrPort diff --git a/net/netmon/interfaces_darwin.go b/net/netmon/interfaces_darwin.go index b175f980a..126040350 100644 --- a/net/netmon/interfaces_darwin.go +++ b/net/netmon/interfaces_darwin.go @@ -7,12 +7,12 @@ import ( "fmt" "net" "strings" - "sync" "syscall" "unsafe" "golang.org/x/net/route" "golang.org/x/sys/unix" + "tailscale.com/syncs" "tailscale.com/util/mak" ) @@ -26,7 +26,7 @@ func parseRoutingTable(rib []byte) ([]route.Message, error) { } var ifNames struct { - sync.Mutex + syncs.Mutex m map[int]string // ifindex => name } diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index f7d1b1107..657da04d5 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -15,6 +15,7 @@ import ( "time" "tailscale.com/feature/buildfeatures" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -65,7 +66,7 @@ type Monitor struct { // and not change at runtime. tsIfName string // tailscale interface name, if known/set ("tailscale0", "utun3", ...) - mu sync.Mutex // guards all following fields + mu syncs.Mutex // guards all following fields cbs set.HandleSet[ChangeFunc] ifState *State gwValid bool // whether gw and gwSelfIP are valid diff --git a/net/netutil/netutil.go b/net/netutil/netutil.go index bc64e8fdc..5c42f51c6 100644 --- a/net/netutil/netutil.go +++ b/net/netutil/netutil.go @@ -8,7 +8,8 @@ import ( "bufio" "io" "net" - "sync" + + "tailscale.com/syncs" ) // NewOneConnListener returns a net.Listener that returns c on its @@ -29,7 +30,7 @@ func NewOneConnListener(c net.Conn, addr net.Addr) net.Listener { type oneConnListener struct { addr net.Addr - mu sync.Mutex + mu syncs.Mutex conn net.Conn } diff --git a/net/ping/ping.go b/net/ping/ping.go index 1ff3862dc..8e16a692a 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -23,6 +23,7 @@ import ( "golang.org/x/net/icmp" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/mak" ) @@ -64,7 +65,7 @@ type Pinger struct { wg sync.WaitGroup // Following fields protected by mu - mu sync.Mutex + mu syncs.Mutex // conns is a map of "type" to net.PacketConn, type is either // "ip4:icmp" or "ip6:icmp" conns map[string]net.PacketConn diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 9368d1c4e..16a981d1d 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -14,7 +14,6 @@ import ( "net/http" "net/netip" "slices" - "sync" "sync/atomic" "time" @@ -123,7 +122,7 @@ type Client struct { testPxPPort uint16 // if non-zero, pxpPort to use for tests testUPnPPort uint16 // if non-zero, uPnPPort to use for tests - mu sync.Mutex // guards following, and all fields thereof + mu syncs.Mutex // guards following, and all fields thereof // runningCreate is whether we're currently working on creating // a port mapping (whether GetCachedMappingOrStartCreatingOne kicked diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index fec9ec3b0..aa875df9a 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -10,12 +10,12 @@ import ( "fmt" "net" "strings" - "sync" "sync/atomic" "syscall" "time" "tailscale.com/net/netmon" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/version" @@ -40,7 +40,7 @@ var sockStats = struct { // mu protects fields in this group (but not the fields within // sockStatCounters). It should not be held in the per-read/write // callbacks. - mu sync.Mutex + mu syncs.Mutex countersByLabel map[Label]*sockStatCounters knownInterfaces map[int]string // interface index -> name usedInterfaces map[int]int // set of interface indexes diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index c7483a125..065c01384 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" @@ -86,7 +87,7 @@ type Dialer struct { routes atomic.Pointer[bart.Table[bool]] // or nil if UserDial should not use routes. `true` indicates routes that point into the Tailscale interface - mu sync.Mutex + mu syncs.Mutex closed bool dns dnsMap tunName string // tun device name diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 3c83ffd8c..9e44da59c 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -12,11 +12,11 @@ import ( "math" "net" "os" - "sync" "sync/atomic" "time" "github.com/coder/websocket" + "tailscale.com/syncs" ) // NetConn converts a *websocket.Conn into a net.Conn. @@ -102,7 +102,7 @@ type netConn struct { reading atomic.Bool afterReadDeadline atomic.Bool - readMu sync.Mutex + readMu syncs.Mutex // eofed is true if the reader should return io.EOF from the Read call. // // +checklocks:readMu diff --git a/proxymap/proxymap.go b/proxymap/proxymap.go index dfe6f2d58..20dc96c84 100644 --- a/proxymap/proxymap.go +++ b/proxymap/proxymap.go @@ -9,9 +9,9 @@ import ( "fmt" "net/netip" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/mak" ) @@ -22,7 +22,7 @@ import ( // ask tailscaled (via the LocalAPI WhoIs method) the Tailscale identity that a // given localhost:port corresponds to. type Mapper struct { - mu sync.Mutex + mu syncs.Mutex // m holds the mapping from localhost IP:ports to Tailscale IPs. It is // keyed first by the protocol ("tcp" or "udp"), then by the IP:port. diff --git a/syncs/locked.go b/syncs/locked.go index d2048665d..d2e9edef7 100644 --- a/syncs/locked.go +++ b/syncs/locked.go @@ -8,7 +8,7 @@ import ( ) // AssertLocked panics if m is not locked. -func AssertLocked(m *sync.Mutex) { +func AssertLocked(m *Mutex) { if m.TryLock() { m.Unlock() panic("mutex is not locked") @@ -16,7 +16,7 @@ func AssertLocked(m *sync.Mutex) { } // AssertRLocked panics if rw is not locked for reading or writing. -func AssertRLocked(rw *sync.RWMutex) { +func AssertRLocked(rw *RWMutex) { if rw.TryLock() { rw.Unlock() panic("mutex is not locked") diff --git a/syncs/mutex.go b/syncs/mutex.go new file mode 100644 index 000000000..e61d1d1ab --- /dev/null +++ b/syncs/mutex.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_mutex_debug + +package syncs + +import "sync" + +// Mutex is an alias for sync.Mutex. +// +// It's only not a sync.Mutex when built with the ts_mutex_debug build tag. +type Mutex = sync.Mutex + +// RWMutex is an alias for sync.RWMutex. +// +// It's only not a sync.RWMutex when built with the ts_mutex_debug build tag. +type RWMutex = sync.RWMutex diff --git a/syncs/mutex_debug.go b/syncs/mutex_debug.go new file mode 100644 index 000000000..14b52ffe3 --- /dev/null +++ b/syncs/mutex_debug.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_mutex_debug + +package syncs + +import "sync" + +type Mutex struct { + sync.Mutex +} + +type RWMutex struct { + sync.RWMutex +} + +// TODO(bradfitz): actually track stuff when in debug mode. diff --git a/syncs/shardedint_test.go b/syncs/shardedint_test.go index d355a1540..815a739d1 100644 --- a/syncs/shardedint_test.go +++ b/syncs/shardedint_test.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package syncs +package syncs_test import ( "expvar" "sync" "testing" + . "tailscale.com/syncs" "tailscale.com/tstest" ) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index b1639136a..46fa5b198 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -8,8 +8,8 @@ import ( "log" "reflect" "slices" - "sync" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -34,12 +34,12 @@ type Bus struct { routeDebug hook[RoutedEvent] logf logger.Logf - topicsMu sync.Mutex + topicsMu syncs.Mutex topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - clientsMu sync.Mutex + clientsMu syncs.Mutex clients set.Set[*Client] } @@ -306,7 +306,7 @@ func (w *worker) StopAndWait() { type stopFlag struct { // guards the lazy construction of stopped, and the value of // alreadyStopped. - mu sync.Mutex + mu syncs.Mutex stopped chan struct{} alreadyStopped bool } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index c119c67a9..a7a5ab673 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -5,8 +5,8 @@ package eventbus import ( "reflect" - "sync" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -22,7 +22,7 @@ type Client struct { bus *Bus publishDebug hook[PublishedEvent] - mu sync.Mutex + mu syncs.Mutex pub set.Set[publisher] sub *subscribeState // Lazily created on first subscribe stop stopFlag // signaled on Close diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 2f2c9589a..0453defb1 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -11,10 +11,10 @@ import ( "runtime" "slices" "strings" - "sync" "sync/atomic" "time" + "tailscale.com/syncs" "tailscale.com/types/logger" ) @@ -147,7 +147,7 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { // A hook collects hook functions that can be run as a group. type hook[T any] struct { - sync.Mutex + syncs.Mutex fns []hookFn[T] } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 53253d330..b0348e125 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,9 +8,9 @@ import ( "fmt" "reflect" "runtime" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/cibuild" ) @@ -51,7 +51,7 @@ type subscribeState struct { snapshot chan chan []DeliveredEvent debug hook[DeliveredEvent] - outputsMu sync.Mutex + outputsMu syncs.Mutex outputs map[reflect.Type]subscriber } diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index dce70c542..2ea0c1f2f 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -7,11 +7,12 @@ package execqueue import ( "context" "errors" - "sync" + + "tailscale.com/syncs" ) type ExecQueue struct { - mu sync.Mutex + mu syncs.Mutex ctx context.Context // context.Background + closed on Shutdown cancel context.CancelFunc // closes ctx closed bool diff --git a/util/expvarx/expvarx.go b/util/expvarx/expvarx.go index 762f65d06..bcdc4a91a 100644 --- a/util/expvarx/expvarx.go +++ b/util/expvarx/expvarx.go @@ -7,9 +7,9 @@ package expvarx import ( "encoding/json" "expvar" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -20,7 +20,7 @@ type SafeFunc struct { limit time.Duration onSlow func(time.Duration, any) - mu sync.Mutex + mu syncs.Mutex inflight *lazy.SyncValue[any] } diff --git a/util/goroutines/tracker.go b/util/goroutines/tracker.go index 044843d33..c2a0cb8c3 100644 --- a/util/goroutines/tracker.go +++ b/util/goroutines/tracker.go @@ -4,9 +4,9 @@ package goroutines import ( - "sync" "sync/atomic" + "tailscale.com/syncs" "tailscale.com/util/set" ) @@ -15,7 +15,7 @@ type Tracker struct { started atomic.Int64 // counter running atomic.Int64 // gauge - mu sync.Mutex + mu syncs.Mutex onDone set.HandleSet[func()] } diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index 5af5f7bd1..30e0b74ed 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -8,9 +8,9 @@ import ( "fmt" "html" "io" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/lru" ) @@ -75,7 +75,7 @@ type Limiter[K comparable] struct { // perpetually in debt and cannot proceed at all. Overdraft int64 - mu sync.Mutex + mu syncs.Mutex cache *lru.Cache[K, *bucket] } diff --git a/util/ringlog/ringlog.go b/util/ringlog/ringlog.go index 85e0c4861..62dfbae5b 100644 --- a/util/ringlog/ringlog.go +++ b/util/ringlog/ringlog.go @@ -4,7 +4,7 @@ // Package ringlog contains a limited-size concurrency-safe generic ring log. package ringlog -import "sync" +import "tailscale.com/syncs" // New creates a new [RingLog] containing at most max items. func New[T any](max int) *RingLog[T] { @@ -15,7 +15,7 @@ func New[T any](max int) *RingLog[T] { // RingLog is a concurrency-safe fixed size log window containing entries of [T]. type RingLog[T any] struct { - mu sync.Mutex + mu syncs.Mutex pos int buf []T max int diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index fdf51c253..71135bb2a 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" @@ -70,7 +71,7 @@ func (c PolicyChange) HasChangedAnyOf(keys ...pkey.Key) bool { // policyChangeCallbacks are the callbacks to invoke when the effective policy changes. // It is safe for concurrent use. type policyChangeCallbacks struct { - mu sync.Mutex + mu syncs.Mutex cbs set.HandleSet[PolicyChangeCallback] } diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go index 297d26f9f..bdda90976 100644 --- a/util/syspolicy/rsop/resultant_policy.go +++ b/util/syspolicy/rsop/resultant_policy.go @@ -7,10 +7,10 @@ import ( "errors" "fmt" "slices" - "sync" "sync/atomic" "time" + "tailscale.com/syncs" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" @@ -58,7 +58,7 @@ type Policy struct { changeCallbacks policyChangeCallbacks - mu sync.Mutex + mu syncs.Mutex watcherStarted bool // whether [Policy.watchReload] was started sources source.ReadableSources closing bool // whether [Policy.Close] was called (even if we're still closing) diff --git a/util/syspolicy/rsop/rsop.go b/util/syspolicy/rsop/rsop.go index 429b9b101..333dca643 100644 --- a/util/syspolicy/rsop/rsop.go +++ b/util/syspolicy/rsop/rsop.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "slices" - "sync" "tailscale.com/syncs" "tailscale.com/util/slicesx" @@ -20,7 +19,7 @@ import ( ) var ( - policyMu sync.Mutex // protects [policySources] and [effectivePolicies] + policyMu syncs.Mutex // protects [policySources] and [effectivePolicies] policySources []*source.Source // all registered policy sources effectivePolicies []*Policy // all active (non-closed) effective policies returned by [PolicyFor] diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 091cf58d3..0ca36176e 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -11,9 +11,9 @@ import ( "fmt" "slices" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" @@ -215,7 +215,7 @@ type DefinitionMap map[pkey.Key]*Definition var ( definitions lazy.SyncValue[DefinitionMap] - definitionsMu sync.Mutex + definitionsMu syncs.Mutex definitionsList []*Definition definitionsUsed bool ) diff --git a/wgengine/magicsock/blockforever_conn.go b/wgengine/magicsock/blockforever_conn.go index f2e85dcd5..272a12513 100644 --- a/wgengine/magicsock/blockforever_conn.go +++ b/wgengine/magicsock/blockforever_conn.go @@ -10,11 +10,13 @@ import ( "sync" "syscall" "time" + + "tailscale.com/syncs" ) // blockForeverConn is a net.PacketConn whose reads block until it is closed. type blockForeverConn struct { - mu sync.Mutex + mu syncs.Mutex cond *sync.Cond closed bool } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 2010775a1..c2e5dcca3 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -17,7 +17,6 @@ import ( "reflect" "runtime" "slices" - "sync" "sync/atomic" "time" @@ -28,6 +27,7 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/stun" "tailscale.com/net/tstun" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -73,7 +73,7 @@ type endpoint struct { disco atomic.Pointer[endpointDisco] // if the peer supports disco, the key and short string // mu protects all following fields. - mu sync.Mutex // Lock ordering: Conn.mu, then endpoint.mu + mu syncs.Mutex // Lock ordering: Conn.mu, then endpoint.mu heartBeatTimer *time.Timer // nil when idle lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) diff --git a/wgengine/magicsock/endpoint_tracker.go b/wgengine/magicsock/endpoint_tracker.go index 5caddd1a0..e95852d24 100644 --- a/wgengine/magicsock/endpoint_tracker.go +++ b/wgengine/magicsock/endpoint_tracker.go @@ -6,9 +6,9 @@ package magicsock import ( "net/netip" "slices" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tempfork/heap" "tailscale.com/util/mak" @@ -107,7 +107,7 @@ func (eh endpointHeap) Min() *endpointTrackerEntry { // // See tailscale/tailscale#7877 for more information. type endpointTracker struct { - mu sync.Mutex + mu syncs.Mutex endpoints map[netip.Addr]*endpointHeap } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d44cf1c11..f610d6adb 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -286,7 +286,7 @@ type Conn struct { // mu guards all following fields; see userspaceEngine lock // ordering rules against the engine. For derphttp, mu must // be held before derphttp.Client.mu. - mu sync.Mutex + mu syncs.Mutex muCond *sync.Cond onlyTCP443 atomic.Bool diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 2798abbf2..c98e64570 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -8,7 +8,6 @@ import ( "fmt" "net" "net/netip" - "sync" "sync/atomic" "syscall" @@ -16,6 +15,7 @@ import ( "tailscale.com/net/batching" "tailscale.com/net/netaddr" "tailscale.com/net/packet" + "tailscale.com/syncs" "tailscale.com/types/nettype" ) @@ -31,7 +31,7 @@ type RebindingUDPConn struct { // Neither is expected to be nil, sockets are bound on creation. pconnAtomic atomic.Pointer[nettype.PacketConn] - mu sync.Mutex // held while changing pconn (and pconnAtomic) + mu syncs.Mutex // held while changing pconn (and pconnAtomic) pconn nettype.PacketConn port uint16 } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index a9dca70ae..2f93f1085 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -15,6 +15,7 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" @@ -58,7 +59,7 @@ type relayManager struct { getServersCh chan chan set.Set[candidatePeerRelay] derpHomeChangeCh chan derpHomeChangeEvent - discoInfoMu sync.Mutex // guards the following field + discoInfoMu syncs.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo // runLoopStoppedCh is written to by runLoop() upon return, enabling event diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 9809d1ce6..12fe9c797 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -15,7 +15,6 @@ import ( "log" "net/http" "net/netip" - "sync" "time" "tailscale.com/health" @@ -24,6 +23,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -57,7 +57,7 @@ func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // unless the Tailnet administrator opts-into explicit logging. // The zero value is ready for use. type Logger struct { - mu sync.Mutex // protects all fields below + mu syncs.Mutex // protects all fields below logf logger.Logf // shutdownLocked shuts down the logger. From 139c395d7df2479657867e24f3a75a1608b6fa6f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 22 Oct 2025 15:08:36 +0100 Subject: [PATCH 1671/1708] cmd/tailscale/cli: stabilise the output of `tailscale lock log --json` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch changes the behaviour of `tailscale lock log --json` to make it more useful for users. It also introduces versioning of our JSON output. ## Changes to `tailscale lock log --json` Previously this command would print the hash and base64-encoded bytes of each AUM, and users would need their own CBOR decoder to interpret it in a useful way: ```json [ { "Hash": [ 80, 136, 151, … ], "Change": "checkpoint", "Raw": "pAEFAvYFpQH2AopYIAkPN+8V3cJpkoC5ZY2+RI2Bcg2q5G7tRAQQd67W3YpnWCDPOo4KGeQBd8hdGsjoEQpSXyiPdlm+NXAlJ5dS1qEbFlggylNJDQM5ZQ2ULNsXxg2ZBFkPl/D93I1M56/rowU+UIlYIPZ/SxT9EA2Idy9kaCbsFzjX/s3Ms7584wWGbWd/f/QAWCBHYZzYiAPpQ+NXN+1Wn2fopQYk4yl7kNQcMXUKNAdt1lggcfjcuVACOH0J9pRNvYZQFOkbiBmLOW1hPKJsbC1D1GdYIKrJ38XMgpVMuTuBxM4YwoLmrK/RgXQw1uVEL3cywl3QWCA0FilVVv8uys8BNhS62cfNvCew1Pw5wIgSe3Prv8d8pFggQrwIt6ldYtyFPQcC5V18qrCnt7VpThACaz5RYzpx7RNYIKskOA7UoNiVtMkOrV2QoXv6EvDpbO26a01lVeh8UCeEA4KjAQECAQNYIORIdNHqSOzz1trIygnP5w3JWK2DtlY5NDIBbD7SKcjWowEBAgEDWCD27LpxiZNiA19k0QZhOWmJRvBdK2mz+dHu7rf0iGTPFwQb69Gt42fKNn0FGwRUiav/k6dDF4GiAVgg5Eh00epI7PPW2sjKCc/nDclYrYO2Vjk0MgFsPtIpyNYCWEDzIAooc+m45ay5PB/OB4AA9Fdki4KJq9Ll+PF6IJHYlOVhpTbc3E0KF7ODu1WURd0f7PXnW72dr89CSfGxIHAF" } ] ``` Now we print the AUM in an expanded form that can be easily read by scripts, although we include the raw bytes for verification and auditing. ```json { "SchemaVersion": "1", "Messages": [ { "Hash": "KCEJPRKNSXJG2TPH3EHQRLJNLIIK2DV53FUNPADWA7BZJWBDRXZQ", "AUM": { "MessageKind": "checkpoint", "PrevAUMHash": null, "Key": null, "KeyID": null, "State": { … }, "Votes": null, "Meta": null, "Signatures": [ { "KeyID": "tlpub:e44874d1ea48ecf3d6dac8ca09cfe70dc958ad83b656393432016c3ed229c8d6", "Signature": "8yAKKHPpuOWsuTwfzgeAAPRXZIuCiavS5fjxeiCR2JTlYaU23NxNChezg7tVlEXdH+z151u9na/PQknxsSBwBQ==" } ] }, "Raw": "pAEFAvYFpQH2AopYIAkPN-8V3cJpkoC5ZY2-RI2Bcg2q5G7tRAQQd67W3YpnWCDPOo4KGeQBd8hdGsjoEQpSXyiPdlm-NXAlJ5dS1qEbFlggylNJDQM5ZQ2ULNsXxg2ZBFkPl_D93I1M56_rowU-UIlYIPZ_SxT9EA2Idy9kaCbsFzjX_s3Ms7584wWGbWd_f_QAWCBHYZzYiAPpQ-NXN-1Wn2fopQYk4yl7kNQcMXUKNAdt1lggcfjcuVACOH0J9pRNvYZQFOkbiBmLOW1hPKJsbC1D1GdYIKrJ38XMgpVMuTuBxM4YwoLmrK_RgXQw1uVEL3cywl3QWCA0FilVVv8uys8BNhS62cfNvCew1Pw5wIgSe3Prv8d8pFggQrwIt6ldYtyFPQcC5V18qrCnt7VpThACaz5RYzpx7RNYIKskOA7UoNiVtMkOrV2QoXv6EvDpbO26a01lVeh8UCeEA4KjAQECAQNYIORIdNHqSOzz1trIygnP5w3JWK2DtlY5NDIBbD7SKcjWowEBAgEDWCD27LpxiZNiA19k0QZhOWmJRvBdK2mz-dHu7rf0iGTPFwQb69Gt42fKNn0FGwRUiav_k6dDF4GiAVgg5Eh00epI7PPW2sjKCc_nDclYrYO2Vjk0MgFsPtIpyNYCWEDzIAooc-m45ay5PB_OB4AA9Fdki4KJq9Ll-PF6IJHYlOVhpTbc3E0KF7ODu1WURd0f7PXnW72dr89CSfGxIHAF" } ] } ``` This output was previously marked as unstable, and it wasn't very useful, so changing it should be fine. ## Versioning our JSON output This patch introduces a way to version our JSON output on the CLI, so we can make backwards-incompatible changes in future without breaking existing scripts or integrations. You can run this command in two ways: ``` tailscale lock log --json tailscale lock log --json=1 ``` Passing an explicit version number allows you to pick a specific JSON schema. If we ever want to change the schema, we increment the version number and users must opt-in to the new output. A bare `--json` flag will always return schema version 1, for compatibility with existing scripts. Updates https://github.com/tailscale/tailscale/issues/17613 Updates https://github.com/tailscale/corp/issues/23258 Signed-off-by: Alex Chan Change-Id: I897f78521cc1a81651f5476228c0882d7b723606 --- cmd/tailscale/cli/jsonoutput/jsonoutput.go | 84 ++++++++ .../cli/jsonoutput/network-lock-v1.go | 203 +++++++++++++++++ cmd/tailscale/cli/network-lock.go | 29 ++- cmd/tailscale/cli/network-lock_test.go | 204 ++++++++++++++++++ cmd/tailscale/depaware.txt | 1 + tka/aum.go | 15 +- 6 files changed, 524 insertions(+), 12 deletions(-) create mode 100644 cmd/tailscale/cli/jsonoutput/jsonoutput.go create mode 100644 cmd/tailscale/cli/jsonoutput/network-lock-v1.go create mode 100644 cmd/tailscale/cli/network-lock_test.go diff --git a/cmd/tailscale/cli/jsonoutput/jsonoutput.go b/cmd/tailscale/cli/jsonoutput/jsonoutput.go new file mode 100644 index 000000000..aa49acc28 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/jsonoutput.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsonoutput provides stable and versioned JSON serialisation for CLI output. +// This allows us to provide stable output to scripts/clients, but also make +// breaking changes to the output when it's useful. +// +// Historically we only used `--json` as a boolean flag, so changing the output +// could break scripts that rely on the existing format. +// +// This package allows callers to pass a version number to `--json` and get +// a consistent output. We'll bump the version when we make a breaking change +// that's likely to break scripts that rely on the existing output, e.g. if +// we remove a field or change the type/format. +// +// Passing just the boolean flag `--json` will always return v1, to preserve +// compatibility with scripts written before we versioned our output. +package jsonoutput + +import ( + "errors" + "fmt" + "strconv" +) + +// JSONSchemaVersion implements flag.Value, and tracks whether the CLI has +// been called with `--json`, and if so, with what value. +type JSONSchemaVersion struct { + // IsSet tracks if the flag was provided at all. + IsSet bool + + // Value tracks the desired schema version, which defaults to 1 if + // the user passes `--json` without an argument. + Value int +} + +// String returns the default value which is printed in the CLI help text. +func (v *JSONSchemaVersion) String() string { + if v.IsSet { + return strconv.Itoa(v.Value) + } else { + return "(not set)" + } +} + +// Set is called when the user passes the flag as a command-line argument. +func (v *JSONSchemaVersion) Set(s string) error { + if v.IsSet { + return errors.New("received multiple instances of --json; only pass it once") + } + + v.IsSet = true + + // If the user doesn't supply a schema version, default to 1. + // This ensures that any existing scripts will continue to get their + // current output. + if s == "true" { + v.Value = 1 + return nil + } + + version, err := strconv.Atoi(s) + if err != nil { + return fmt.Errorf("invalid integer value passed to --json: %q", s) + } + v.Value = version + return nil +} + +// IsBoolFlag tells the flag package that JSONSchemaVersion can be set +// without an argument. +func (v *JSONSchemaVersion) IsBoolFlag() bool { + return true +} + +// ResponseEnvelope is a set of fields common to all versioned JSON output. +type ResponseEnvelope struct { + // SchemaVersion is the version of the JSON output, e.g. "1", "2", "3" + SchemaVersion string + + // ResponseWarning tells a user if a newer version of the JSON output + // is available. + ResponseWarning string `json:"_WARNING,omitzero"` +} diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-v1.go b/cmd/tailscale/cli/jsonoutput/network-lock-v1.go new file mode 100644 index 000000000..8a2d2de33 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/network-lock-v1.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsonoutput + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" +) + +// PrintNetworkLockJSONV1 prints the stored TKA state as a JSON object to the CLI, +// in a stable "v1" format. +// +// This format includes: +// +// - the AUM hash as a base32-encoded string +// - the raw AUM as base64-encoded bytes +// - the expanded AUM, which prints named fields for consumption by other tools +func PrintNetworkLockJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error { + messages := make([]logMessageV1, len(updates)) + + for i, update := range updates { + var aum tka.AUM + if err := aum.Unserialize(update.Raw); err != nil { + return fmt.Errorf("decoding: %w", err) + } + + h := aum.Hash() + + if !bytes.Equal(h[:], update.Hash[:]) { + return fmt.Errorf("incorrect AUM hash: got %v, want %v", h, update) + } + + messages[i] = toLogMessageV1(aum, update) + } + + result := struct { + ResponseEnvelope + Messages []logMessageV1 + }{ + ResponseEnvelope: ResponseEnvelope{ + SchemaVersion: "1", + }, + Messages: messages, + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + return enc.Encode(result) +} + +// toLogMessageV1 converts a [tka.AUM] and [ipnstate.NetworkLockUpdate] to the +// JSON output returned by the CLI. +func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 { + expandedAUM := expandedAUMV1{} + expandedAUM.MessageKind = aum.MessageKind.String() + if len(aum.PrevAUMHash) > 0 { + expandedAUM.PrevAUMHash = aum.PrevAUMHash.String() + } + if key := aum.Key; key != nil { + expandedAUM.Key = toExpandedKeyV1(key) + } + if keyID := aum.KeyID; keyID != nil { + expandedAUM.KeyID = fmt.Sprintf("tlpub:%x", keyID) + } + if state := aum.State; state != nil { + expandedState := expandedStateV1{} + if h := state.LastAUMHash; h != nil { + expandedState.LastAUMHash = h.String() + } + for _, secret := range state.DisablementSecrets { + expandedState.DisablementSecrets = append(expandedState.DisablementSecrets, fmt.Sprintf("%x", secret)) + } + for _, key := range state.Keys { + expandedState.Keys = append(expandedState.Keys, toExpandedKeyV1(&key)) + } + expandedState.StateID1 = state.StateID1 + expandedState.StateID2 = state.StateID2 + expandedAUM.State = expandedState + } + if votes := aum.Votes; votes != nil { + expandedAUM.Votes = *votes + } + expandedAUM.Meta = aum.Meta + for _, signature := range aum.Signatures { + expandedAUM.Signatures = append(expandedAUM.Signatures, expandedSignatureV1{ + KeyID: fmt.Sprintf("tlpub:%x", signature.KeyID), + Signature: base64.URLEncoding.EncodeToString(signature.Signature), + }) + } + + return logMessageV1{ + Hash: aum.Hash().String(), + AUM: expandedAUM, + Raw: base64.URLEncoding.EncodeToString(update.Raw), + } +} + +// toExpandedKeyV1 converts a [tka.Key] to the JSON output returned +// by the CLI. +func toExpandedKeyV1(key *tka.Key) expandedKeyV1 { + return expandedKeyV1{ + Kind: key.Kind.String(), + Votes: key.Votes, + Public: fmt.Sprintf("tlpub:%x", key.Public), + Meta: key.Meta, + } +} + +// logMessageV1 is the JSON representation of an AUM as both raw bytes and +// in its expanded form, and the CLI output is a list of these entries. +type logMessageV1 struct { + // The BLAKE2s digest of the CBOR-encoded AUM. This is printed as a + // base32-encoded string, e.g. KCE…XZQ + Hash string + + // The expanded form of the AUM, which presents the fields in a more + // accessible format than doing a CBOR decoding. + AUM expandedAUMV1 + + // The raw bytes of the CBOR-encoded AUM, encoded as base64. + // This is useful for verifying the AUM hash. + Raw string +} + +// expandedAUMV1 is the expanded version of a [tka.AUM], designed so external tools +// can read the AUM without knowing our CBOR definitions. +type expandedAUMV1 struct { + MessageKind string + PrevAUMHash string `json:"PrevAUMHash,omitzero"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key expandedKeyV1 `json:"Key,omitzero"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID string `json:"KeyID,omitzero"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State expandedStateV1 `json:"State,omitzero"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes uint `json:"Votes,omitzero"` + Meta map[string]string `json:"Meta,omitzero"` + + // Signatures lists the signatures over this AUM. + Signatures []expandedSignatureV1 `json:"Signatures,omitzero"` +} + +// expandedAUMV1 is the expanded version of a [tka.Key], which describes +// the public components of a key known to network-lock. +type expandedKeyV1 struct { + Kind string + + // Votes describes the weight applied to signatures using this key. + Votes uint + + // Public encodes the public key of the key as a hex string. + Public string + + // Meta describes arbitrary metadata about the key. This could be + // used to store the name of the key, for instance. + Meta map[string]string `json:"Meta,omitzero"` +} + +// expandedStateV1 is the expanded version of a [tka.State], which describes +// Tailnet Key Authority state at an instant in time. +type expandedStateV1 struct { + // LastAUMHash is the blake2s digest of the last-applied AUM. + LastAUMHash string `json:"LastAUMHash,omitzero"` + + // DisablementSecrets are KDF-derived values which can be used + // to turn off the TKA in the event of a consensus-breaking bug. + DisablementSecrets []string + + // Keys are the public keys of either: + // + // 1. The signing nodes currently trusted by the TKA. + // 2. Ephemeral keys that were used to generate pre-signed auth keys. + Keys []expandedKeyV1 + + // StateID's are nonce's, generated on enablement and fixed for + // the lifetime of the Tailnet Key Authority. + StateID1 uint64 + StateID2 uint64 +} + +// expandedSignatureV1 is the expanded form of a [tka.Signature], which +// describes a signature over an AUM. This signature can be verified +// using the key referenced by KeyID. +type expandedSignatureV1 struct { + KeyID string + Signature string +} diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index a15d9ab88..73b1d6201 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -10,10 +10,11 @@ import ( "context" "crypto/rand" "encoding/hex" - "encoding/json" + jsonv1 "encoding/json" "errors" "flag" "fmt" + "io" "os" "strconv" "strings" @@ -21,6 +22,7 @@ import ( "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/cmd/tailscale/cli/jsonoutput" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" "tailscale.com/tsconst" @@ -219,7 +221,7 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { } if nlStatusArgs.json { - enc := json.NewEncoder(os.Stdout) + enc := jsonv1.NewEncoder(os.Stdout) enc.SetIndent("", " ") return enc.Encode(st) } @@ -600,7 +602,7 @@ func runNetworkLockDisablementKDF(ctx context.Context, args []string) error { var nlLogArgs struct { limit int - json bool + json jsonoutput.JSONSchemaVersion } var nlLogCmd = &ffcli.Command{ @@ -612,7 +614,7 @@ var nlLogCmd = &ffcli.Command{ FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock log") fs.IntVar(&nlLogArgs.limit, "limit", 50, "max number of updates to list") - fs.BoolVar(&nlLogArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") + fs.Var(&nlLogArgs.json, "json", "output in JSON format") return fs })(), } @@ -678,7 +680,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er default: // Print a JSON encoding of the AUM as a fallback. - e := json.NewEncoder(&stanza) + e := jsonv1.NewEncoder(&stanza) e.SetIndent("", "\t") if err := e.Encode(aum); err != nil { return "", err @@ -702,14 +704,21 @@ func runNetworkLockLog(ctx context.Context, args []string) error { if err != nil { return fixTailscaledConnectError(err) } - if nlLogArgs.json { - enc := json.NewEncoder(Stdout) - enc.SetIndent("", " ") - return enc.Encode(updates) - } out, useColor := colorableOutput() + return printNetworkLockLog(updates, out, nlLogArgs.json, useColor) +} + +func printNetworkLockLog(updates []ipnstate.NetworkLockUpdate, out io.Writer, jsonSchema jsonoutput.JSONSchemaVersion, useColor bool) error { + if jsonSchema.IsSet { + if jsonSchema.Value == 1 { + return jsonoutput.PrintNetworkLockJSONV1(out, updates) + } else { + return fmt.Errorf("unrecognised version: %q", jsonSchema.Value) + } + } + for _, update := range updates { stanza, err := nlDescribeUpdate(update, useColor) if err != nil { diff --git a/cmd/tailscale/cli/network-lock_test.go b/cmd/tailscale/cli/network-lock_test.go new file mode 100644 index 000000000..ccd2957ab --- /dev/null +++ b/cmd/tailscale/cli/network-lock_test.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "bytes" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/cmd/tailscale/cli/jsonoutput" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/tkatype" +) + +func TestNetworkLockLogOutput(t *testing.T) { + votes := uint(1) + aum1 := tka.AUM{ + MessageKind: tka.AUMAddKey, + Key: &tka.Key{ + Kind: tka.Key25519, + Votes: 1, + Public: []byte{2, 2}, + }, + } + h1 := aum1.Hash() + aum2 := tka.AUM{ + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 3}, + PrevAUMHash: h1[:], + Signatures: []tkatype.Signature{ + { + KeyID: []byte{3, 4}, + Signature: []byte{4, 5}, + }, + }, + Meta: map[string]string{"en": "three", "de": "drei", "es": "tres"}, + } + h2 := aum2.Hash() + aum3 := tka.AUM{ + MessageKind: tka.AUMCheckpoint, + PrevAUMHash: h2[:], + State: &tka.State{ + Keys: []tka.Key{ + { + Kind: tka.Key25519, + Votes: 1, + Public: []byte{1, 1}, + Meta: map[string]string{"en": "one", "de": "eins", "es": "uno"}, + }, + }, + DisablementSecrets: [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + }, + }, + Votes: &votes, + } + + updates := []ipnstate.NetworkLockUpdate{ + { + Hash: aum3.Hash(), + Change: aum3.MessageKind.String(), + Raw: aum3.Serialize(), + }, + { + Hash: aum2.Hash(), + Change: aum2.MessageKind.String(), + Raw: aum2.Serialize(), + }, + { + Hash: aum1.Hash(), + Change: aum1.MessageKind.String(), + Raw: aum1.Serialize(), + }, + } + + t.Run("human-readable", func(t *testing.T) { + t.Parallel() + + var outBuf bytes.Buffer + json := jsonoutput.JSONSchemaVersion{} + useColor := false + + printNetworkLockLog(updates, &outBuf, json, useColor) + + t.Logf("%s", outBuf.String()) + + want := `update 4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA (checkpoint) +Disablement values: + - 010203 + - 040506 + - 070809 +Keys: + Type: 25519 + KeyID: tlpub:0101 + Metadata: map[de:eins en:one es:uno] + +update BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ (remove-key) +KeyID: tlpub:0303 + +update UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA (add-key) +Type: 25519 +KeyID: tlpub:0202 + +` + + if diff := cmp.Diff(outBuf.String(), want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) + + jsonV1 := `{ + "SchemaVersion": "1", + "Messages": [ + { + "Hash": "4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA", + "AUM": { + "MessageKind": "checkpoint", + "PrevAUMHash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ", + "State": { + "DisablementSecrets": [ + "010203", + "040506", + "070809" + ], + "Keys": [ + { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0101", + "Meta": { + "de": "eins", + "en": "one", + "es": "uno" + } + } + ], + "StateID1": 0, + "StateID2": 0 + }, + "Votes": 1 + }, + "Raw": "pAEFAlggCqtbndUNv4_i-JrrVbGywbw5dNWNZYysEm02CCgf3q8FowH2AoNDAQIDQwQFBkMHCAkDgaQBAQIBA0IBAQyjYmRlZGVpbnNiZW5jb25lYmVzY3VubwYB" + }, + { + "Hash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ", + "AUM": { + "MessageKind": "remove-key", + "PrevAUMHash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA", + "KeyID": "tlpub:0303", + "Meta": { + "de": "drei", + "en": "three", + "es": "tres" + }, + "Signatures": [ + { + "KeyID": "tlpub:0304", + "Signature": "BAU=" + } + ] + }, + "Raw": "pQECAlggopKFFOhcPaARv2QQU90-kWozQFAG3Hqja7Vez-_EZIAEQgMDB6NiZGVkZHJlaWJlbmV0aHJlZWJlc2R0cmVzF4GiAUIDBAJCBAU=" + }, + { + "Hash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA", + "AUM": { + "MessageKind": "add-key", + "Key": { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0202" + } + }, + "Raw": "owEBAvYDowEBAgEDQgIC" + } + ] +} +` + + t.Run("json-1", func(t *testing.T) { + t.Parallel() + t.Logf("BOOM") + + var outBuf bytes.Buffer + json := jsonoutput.JSONSchemaVersion{ + IsSet: true, + Value: 1, + } + useColor := false + + printNetworkLockLog(updates, &outBuf, json, useColor) + + want := jsonV1 + t.Logf("%s", outBuf.String()) + + if diff := cmp.Diff(outBuf.String(), want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 53dc998bd..8b576ffc3 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete + tailscale.com/cmd/tailscale/cli/jsonoutput from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp diff --git a/tka/aum.go b/tka/aum.go index 6d75830bd..bd17b2098 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -55,6 +55,17 @@ func (h AUMHash) IsZero() bool { return h == (AUMHash{}) } +// PrevAUMHash represents the BLAKE2s digest of an Authority Update Message (AUM). +// Unlike an AUMHash, this can be empty if there is no previous AUM hash +// (which occurs in the genesis AUM). +type PrevAUMHash []byte + +// String returns the PrevAUMHash encoded as base32. +// This is suitable for use as a filename, and for storing in text-preferred media. +func (h PrevAUMHash) String() string { + return base32StdNoPad.EncodeToString(h[:]) +} + // AUMKind describes valid AUM types. type AUMKind uint8 @@ -119,8 +130,8 @@ func (k AUMKind) String() string { // behavior of old clients (which will ignore the field). // - No floats! type AUM struct { - MessageKind AUMKind `cbor:"1,keyasint"` - PrevAUMHash []byte `cbor:"2,keyasint"` + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash PrevAUMHash `cbor:"2,keyasint"` // Key encodes a public key to be added to the key authority. // This field is used for AddKey AUMs. From a5b2f185679e1eb280f3056e224a4ed92268896d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 16 Nov 2025 18:36:27 -0800 Subject: [PATCH 1672/1708] control/controlclient: remove some public API, move to Options & test-only Includes adding StartPaused, which will be used in a future change to enable netmap caching testing. Updates #12639 Change-Id: Iec39915d33b8d75e9b8315b281b1af2f5d13a44a Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 35 ++++++++++++++++++++++++------- control/controlclient/direct.go | 8 +++++++ ipn/ipnlocal/network-lock_test.go | 4 +++- 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 9d648409b..20795d5a7 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -23,6 +23,7 @@ import ( "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/execqueue" + "tailscale.com/util/testenv" ) type LoginGoal struct { @@ -123,6 +124,7 @@ type Auto struct { mu sync.Mutex // mutex guards the following fields + started bool // whether [Auto.Start] has been called wantLoggedIn bool // whether the user wants to be logged in per last method call urlToVisit string // the last url we were told to visit expiry time.Time @@ -150,15 +152,21 @@ type Auto struct { // New creates and starts a new Auto. func New(opts Options) (*Auto, error) { - c, err := NewNoStart(opts) - if c != nil { - c.Start() + c, err := newNoStart(opts) + if err != nil { + return nil, err + } + if opts.StartPaused { + c.SetPaused(true) + } + if !opts.SkipStartForTests { + c.start() } return c, err } -// NewNoStart creates a new Auto, but without calling Start on it. -func NewNoStart(opts Options) (_ *Auto, err error) { +// newNoStart creates a new Auto, but without calling Start on it. +func newNoStart(opts Options) (_ *Auto, err error) { direct, err := NewDirect(opts) if err != nil { return nil, err @@ -218,10 +226,21 @@ func (c *Auto) SetPaused(paused bool) { c.unpauseWaiters = nil } -// Start starts the client's goroutines. +// StartForTest starts the client's goroutines. // -// It should only be called for clients created by NewNoStart. -func (c *Auto) Start() { +// It should only be called for clients created with [Options.SkipStartForTests]. +func (c *Auto) StartForTest() { + testenv.AssertInTest() + c.start() +} + +func (c *Auto) start() { + c.mu.Lock() + defer c.mu.Unlock() + if c.started { + return + } + c.started = true go c.authRoutine() go c.mapRoutine() go c.updateRoutine() diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 9e7d10d8d..1e1ce781f 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -146,6 +146,14 @@ type Options struct { ControlKnobs *controlknobs.Knobs // or nil to ignore Bus *eventbus.Bus // non-nil, for setting up publishers + SkipStartForTests bool // if true, don't call [Auto.Start] to avoid any background goroutines (for tests only) + + // StartPaused indicates whether the client should start in a paused state + // where it doesn't do network requests. This primarily exists for testing + // but not necessarily "go test" tests, so it isn't restricted to only + // being used in tests. + StartPaused bool + // Observer is called when there's a change in status to report // from the control client. // If nil, no status updates are reported. diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 00d4ff6d9..5fa072883 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -60,9 +60,11 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even NoiseTestClient: c, Dialer: dialer, Bus: bus, + + SkipStartForTests: true, } - cc, err := controlclient.NewNoStart(opts) + cc, err := controlclient.New(opts) if err != nil { t.Fatal(err) } From 1e95bfa1848209b004cacb612dda6a899539653f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 07:54:29 -0800 Subject: [PATCH 1673/1708] ipn: fix typo in comment Updates #cleanup Change-Id: Iec66518abd656c64943a58eb6d92f342e627a613 Signed-off-by: Brad Fitzpatrick --- ipn/ipn_view.go | 2 +- ipn/prefs.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 3179e3bb5..12fe93bab 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -444,7 +444,7 @@ func (v PrefsView) RelayServerPort() views.ValuePointer[int] { // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale -// peers got /32 or /127 routes for each other. +// peers got /32 or /128 routes for each other. // As of 2024-05-17 we're starting to ignore it, but to let // people still downgrade Tailscale versions and not break // all peer-to-peer networking we still write it to disk (as JSON) diff --git a/ipn/prefs.go b/ipn/prefs.go index 81dd1c1c3..796098c8a 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -284,7 +284,7 @@ type Prefs struct { // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale - // peers got /32 or /127 routes for each other. + // peers got /32 or /128 routes for each other. // As of 2024-05-17 we're starting to ignore it, but to let // people still downgrade Tailscale versions and not break // all peer-to-peer networking we still write it to disk (as JSON) From 200383dce5d93faedc77aa0c769d96468df41d6c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 16:35:36 +0000 Subject: [PATCH 1674/1708] various: add more missing apostrophes in comments Updates #cleanup Change-Id: I79a0fda9783064a226ee9bcee2c1148212f6df7b Signed-off-by: Alex Chan --- tka/tailchonk.go | 4 ++-- wgengine/magicsock/magicsock_test.go | 2 +- wgengine/router/osrouter/router_linux.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 90f99966c..3e8d1b6c8 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -678,7 +678,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState toScan := make([]AUMHash, 0, len(verdict)) for h, v := range verdict { if (v & retainAUMMask) == 0 { - continue // not marked for retention, so dont need to consider it + continue // not marked for retention, so don't need to consider it } if h == candidateAncestor { continue @@ -781,7 +781,7 @@ func markDescendantAUMs(storage Chonk, verdict map[AUMHash]retainState) error { toScan := make([]AUMHash, 0, len(verdict)) for h, v := range verdict { if v&retainAUMMask == 0 { - continue // not marked, so dont need to mark descendants + continue // not marked, so don't need to mark descendants } toScan = append(toScan, h) } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 09c54f504..a0142134a 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2462,7 +2462,7 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { if len(state.recentPongs) != 1 { t.Errorf("IPv4 address did not have a recentPong entry: got %v, want %v", len(state.recentPongs), 1) } - // Set the latency extremely high so we dont choose endpoint during the next + // Set the latency extremely high so we don't choose endpoint during the next // addrForSendLocked call. state.recentPongs[state.recentPong].latency = time.Second } diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 196e1d552..7442c045e 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -1617,7 +1617,7 @@ func checkOpenWRTUsingMWAN3() (bool, error) { // We want to match on a rule like this: // 2001: from all fwmark 0x100/0x3f00 lookup 1 // - // We dont match on the mask because it can vary, or the + // We don't match on the mask because it can vary, or the // table because I'm not sure if it can vary. if r.Priority >= 2001 && r.Priority <= 2004 && r.Mark != 0 { return true, nil From d01081683c44ef728c1273e3de2b285cd4c30ee1 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 17 Nov 2025 09:05:18 -0800 Subject: [PATCH 1675/1708] go.mod: bump golang.org/x/crypto (#17907) Pick up a fix for https://pkg.go.dev/vuln/GO-2025-4116 (even though we're not affected). Updates #cleanup Change-Id: I9f2571b17c1f14db58ece8a5a34785805217d9dd Signed-off-by: Andrew Lytvynov --- atomicfile/zsyscall_windows.go | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- flake.nix | 2 +- go.mod | 21 ++++++---- go.mod.sri | 2 +- go.sum | 42 +++++++++++-------- ipn/desktop/zsyscall_windows.go | 24 +++++------ net/netns/zsyscall_windows.go | 2 +- net/portmapper/pmpresultcode_string.go | 5 ++- net/sockstats/label_string.go | 5 ++- net/tshttpproxy/zsyscall_windows.go | 8 ++-- shell.nix | 2 +- tsnet/depaware.txt | 2 +- util/osdiag/zsyscall_windows.go | 10 ++--- util/winutil/authenticode/zsyscall_windows.go | 20 ++++----- util/winutil/gp/zsyscall_windows.go | 12 +++--- util/winutil/s4u/zsyscall_windows.go | 16 +++---- util/winutil/winenv/zsyscall_windows.go | 6 +-- util/winutil/zsyscall_windows.go | 26 ++++++------ wgengine/magicsock/discopingpurpose_string.go | 5 ++- 22 files changed, 115 insertions(+), 103 deletions(-) diff --git a/atomicfile/zsyscall_windows.go b/atomicfile/zsyscall_windows.go index f2f0b6d08..bd1bf8113 100644 --- a/atomicfile/zsyscall_windows.go +++ b/atomicfile/zsyscall_windows.go @@ -44,7 +44,7 @@ var ( ) func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procReplaceFileW.Addr(), 6, uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procReplaceFileW.Addr(), uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 4542fcad6..16ad089f3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -997,7 +997,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index be0fd799e..d15402092 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -570,7 +570,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index c68fab634..14db7414a 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -399,7 +399,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/flake.nix b/flake.nix index 217df38c3..fc3a466fc 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= +# nix-direnv cache busting line: sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= diff --git a/go.mod b/go.mod index fc8870baf..3b4f34b2d 100644 --- a/go.mod +++ b/go.mod @@ -102,21 +102,21 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.38.0 + golang.org/x/crypto v0.44.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac - golang.org/x/mod v0.24.0 - golang.org/x/net v0.40.0 + golang.org/x/mod v0.30.0 + golang.org/x/net v0.47.0 golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.14.0 - golang.org/x/sys v0.33.0 - golang.org/x/term v0.32.0 + golang.org/x/sync v0.18.0 + golang.org/x/sys v0.38.0 + golang.org/x/term v0.37.0 golang.org/x/time v0.11.0 - golang.org/x/tools v0.33.0 + golang.org/x/tools v0.39.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 - honnef.co/go/tools v0.6.1 + honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.32.0 @@ -187,6 +187,9 @@ require ( go.opentelemetry.io/otel/metric v1.33.0 // indirect go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/tools/go/expect v0.1.1-deprecated // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect k8s.io/component-base v0.32.0 // indirect @@ -409,7 +412,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.27.0 // indirect - golang.org/x/text v0.25.0 // indirect + golang.org/x/text v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.36.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.mod.sri b/go.mod.sri index b7df2cc2c..76c72f0c9 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= +sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= diff --git a/go.sum b/go.sum index 177efd4f7..f0758f2d4 100644 --- a/go.sum +++ b/go.sum @@ -1128,8 +1128,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1177,8 +1177,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1218,8 +1218,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1241,8 +1241,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1305,16 +1305,18 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1325,8 +1327,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1396,8 +1398,12 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1538,8 +1544,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= -honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho= +honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= diff --git a/ipn/desktop/zsyscall_windows.go b/ipn/desktop/zsyscall_windows.go index 535274016..8d97c4d80 100644 --- a/ipn/desktop/zsyscall_windows.go +++ b/ipn/desktop/zsyscall_windows.go @@ -57,12 +57,12 @@ var ( ) func setLastError(dwErrorCode uint32) { - syscall.Syscall(procSetLastError.Addr(), 1, uintptr(dwErrorCode), 0, 0) + syscall.SyscallN(procSetLastError.Addr(), uintptr(dwErrorCode)) return } func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) { - r0, _, e1 := syscall.Syscall12(procCreateWindowExW.Addr(), 12, uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) + r0, _, e1 := syscall.SyscallN(procCreateWindowExW.Addr(), uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) hWnd = windows.HWND(r0) if hWnd == 0 { err = errnoErr(e1) @@ -71,13 +71,13 @@ func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, } func defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { - r0, _, _ := syscall.Syscall6(procDefWindowProcW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + r0, _, _ := syscall.SyscallN(procDefWindowProcW.Addr(), uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam)) res = uintptr(r0) return } func destroyWindow(hwnd windows.HWND) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyWindow.Addr(), uintptr(hwnd)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -85,24 +85,24 @@ func destroyWindow(hwnd windows.HWND) (err error) { } func dispatchMessage(lpMsg *_MSG) (res uintptr) { - r0, _, _ := syscall.Syscall(procDispatchMessageW.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + r0, _, _ := syscall.SyscallN(procDispatchMessageW.Addr(), uintptr(unsafe.Pointer(lpMsg))) res = uintptr(r0) return } func getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) { - r0, _, _ := syscall.Syscall6(procGetMessageW.Addr(), 4, uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMessageW.Addr(), uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax)) ret = int32(r0) return } func postQuitMessage(exitCode int32) { - syscall.Syscall(procPostQuitMessage.Addr(), 1, uintptr(exitCode), 0, 0) + syscall.SyscallN(procPostQuitMessage.Addr(), uintptr(exitCode)) return } func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { - r0, _, e1 := syscall.Syscall(procRegisterClassExW.Addr(), 1, uintptr(unsafe.Pointer(windowClass)), 0, 0) + r0, _, e1 := syscall.SyscallN(procRegisterClassExW.Addr(), uintptr(unsafe.Pointer(windowClass))) atom = uint16(r0) if atom == 0 { err = errnoErr(e1) @@ -111,19 +111,19 @@ func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { } func sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { - r0, _, _ := syscall.Syscall6(procSendMessageW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + r0, _, _ := syscall.SyscallN(procSendMessageW.Addr(), uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam)) res = uintptr(r0) return } func translateMessage(lpMsg *_MSG) (res bool) { - r0, _, _ := syscall.Syscall(procTranslateMessage.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + r0, _, _ := syscall.SyscallN(procTranslateMessage.Addr(), uintptr(unsafe.Pointer(lpMsg))) res = r0 != 0 return } func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWTSRegisterSessionNotificationEx.Addr(), 3, uintptr(hServer), uintptr(hwnd), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procWTSRegisterSessionNotificationEx.Addr(), uintptr(hServer), uintptr(hwnd), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -131,7 +131,7 @@ func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flag } func unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) { - r1, _, e1 := syscall.Syscall(procWTSUnRegisterSessionNotificationEx.Addr(), 2, uintptr(hServer), uintptr(hwnd), 0) + r1, _, e1 := syscall.SyscallN(procWTSUnRegisterSessionNotificationEx.Addr(), uintptr(hServer), uintptr(hwnd)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/net/netns/zsyscall_windows.go b/net/netns/zsyscall_windows.go index 07e2181be..3d8f06e09 100644 --- a/net/netns/zsyscall_windows.go +++ b/net/netns/zsyscall_windows.go @@ -45,7 +45,7 @@ var ( ) func getBestInterfaceEx(sockaddr *winipcfg.RawSockaddrInet, bestIfaceIndex *uint32) (ret error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(unsafe.Pointer(sockaddr)), uintptr(unsafe.Pointer(bestIfaceIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(unsafe.Pointer(sockaddr)), uintptr(unsafe.Pointer(bestIfaceIndex))) if r0 != 0 { ret = syscall.Errno(r0) } diff --git a/net/portmapper/pmpresultcode_string.go b/net/portmapper/pmpresultcode_string.go index 603636ade..18d911d94 100644 --- a/net/portmapper/pmpresultcode_string.go +++ b/net/portmapper/pmpresultcode_string.go @@ -24,8 +24,9 @@ const _pmpResultCode_name = "OKUnsupportedVersionNotAuthorizedNetworkFailureOutO var _pmpResultCode_index = [...]uint8{0, 2, 20, 33, 47, 61, 78} func (i pmpResultCode) String() string { - if i >= pmpResultCode(len(_pmpResultCode_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_pmpResultCode_index)-1 { return "pmpResultCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _pmpResultCode_name[_pmpResultCode_index[i]:_pmpResultCode_index[i+1]] + return _pmpResultCode_name[_pmpResultCode_index[idx]:_pmpResultCode_index[idx+1]] } diff --git a/net/sockstats/label_string.go b/net/sockstats/label_string.go index f9a111ad7..cc503d943 100644 --- a/net/sockstats/label_string.go +++ b/net/sockstats/label_string.go @@ -28,8 +28,9 @@ const _Label_name = "ControlClientAutoControlClientDialerDERPHTTPClientLogtailLo var _Label_index = [...]uint8{0, 17, 36, 50, 63, 78, 93, 107, 123, 140, 157, 169, 186, 201} func (i Label) String() string { - if i >= Label(len(_Label_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Label_index)-1 { return "Label(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Label_name[_Label_index[i]:_Label_index[i+1]] + return _Label_name[_Label_index[idx]:_Label_index[idx+1]] } diff --git a/net/tshttpproxy/zsyscall_windows.go b/net/tshttpproxy/zsyscall_windows.go index c07e9ee03..5dcfae83e 100644 --- a/net/tshttpproxy/zsyscall_windows.go +++ b/net/tshttpproxy/zsyscall_windows.go @@ -48,7 +48,7 @@ var ( ) func globalFree(hglobal winHGlobal) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalFree.Addr(), 1, uintptr(hglobal), 0, 0) + r1, _, e1 := syscall.SyscallN(procGlobalFree.Addr(), uintptr(hglobal)) if r1 == 0 { err = errnoErr(e1) } @@ -56,7 +56,7 @@ func globalFree(hglobal winHGlobal) (err error) { } func winHTTPCloseHandle(whi winHTTPInternet) (err error) { - r1, _, e1 := syscall.Syscall(procWinHttpCloseHandle.Addr(), 1, uintptr(whi), 0, 0) + r1, _, e1 := syscall.SyscallN(procWinHttpCloseHandle.Addr(), uintptr(whi)) if r1 == 0 { err = errnoErr(e1) } @@ -64,7 +64,7 @@ func winHTTPCloseHandle(whi winHTTPInternet) (err error) { } func winHTTPGetProxyForURL(whi winHTTPInternet, url *uint16, options *winHTTPAutoProxyOptions, proxyInfo *winHTTPProxyInfo) (err error) { - r1, _, e1 := syscall.Syscall6(procWinHttpGetProxyForUrl.Addr(), 4, uintptr(whi), uintptr(unsafe.Pointer(url)), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(proxyInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWinHttpGetProxyForUrl.Addr(), uintptr(whi), uintptr(unsafe.Pointer(url)), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(proxyInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -72,7 +72,7 @@ func winHTTPGetProxyForURL(whi winHTTPInternet, url *uint16, options *winHTTPAut } func winHTTPOpen(agent *uint16, accessType uint32, proxy *uint16, proxyBypass *uint16, flags uint32) (whi winHTTPInternet, err error) { - r0, _, e1 := syscall.Syscall6(procWinHttpOpen.Addr(), 5, uintptr(unsafe.Pointer(agent)), uintptr(accessType), uintptr(unsafe.Pointer(proxy)), uintptr(unsafe.Pointer(proxyBypass)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procWinHttpOpen.Addr(), uintptr(unsafe.Pointer(agent)), uintptr(accessType), uintptr(unsafe.Pointer(proxy)), uintptr(unsafe.Pointer(proxyBypass)), uintptr(flags)) whi = winHTTPInternet(r0) if whi == 0 { err = errnoErr(e1) diff --git a/shell.nix b/shell.nix index f43108753..ffb28a183 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= +# nix-direnv cache busting line: sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6eb493ef8..7d5ec0a60 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -392,7 +392,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/util/osdiag/zsyscall_windows.go b/util/osdiag/zsyscall_windows.go index ab0d18d3f..2a11b4644 100644 --- a/util/osdiag/zsyscall_windows.go +++ b/util/osdiag/zsyscall_windows.go @@ -51,7 +51,7 @@ var ( ) func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLen *uint32, reserved *uint32, valueType *uint32, pData *byte, cbData *uint32) (ret error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -59,7 +59,7 @@ func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLe } func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(memStatus)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGlobalMemoryStatusEx.Addr(), uintptr(unsafe.Pointer(memStatus))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -67,19 +67,19 @@ func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) { } func wscEnumProtocols(iProtocols *int32, protocolBuffer *wsaProtocolInfo, bufLen *uint32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCEnumProtocols.Addr(), 4, uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno)), 0, 0) + r0, _, _ := syscall.SyscallN(procWSCEnumProtocols.Addr(), uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } func wscGetProviderInfo(providerId *windows.GUID, infoType _WSC_PROVIDER_INFO_TYPE, info unsafe.Pointer, infoSize *uintptr, flags uint32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCGetProviderInfo.Addr(), 6, uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno))) + r0, _, _ := syscall.SyscallN(procWSCGetProviderInfo.Addr(), uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } func wscGetProviderPath(providerId *windows.GUID, providerDllPath *uint16, providerDllPathLen *int32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCGetProviderPath.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno)), 0, 0) + r0, _, _ := syscall.SyscallN(procWSCGetProviderPath.Addr(), uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } diff --git a/util/winutil/authenticode/zsyscall_windows.go b/util/winutil/authenticode/zsyscall_windows.go index 643721e06..f1fba2828 100644 --- a/util/winutil/authenticode/zsyscall_windows.go +++ b/util/winutil/authenticode/zsyscall_windows.go @@ -56,7 +56,7 @@ var ( ) func cryptMsgClose(cryptMsg windows.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCryptMsgClose.Addr(), 1, uintptr(cryptMsg), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptMsgClose.Addr(), uintptr(cryptMsg)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -64,7 +64,7 @@ func cryptMsgClose(cryptMsg windows.Handle) (err error) { } func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, data unsafe.Pointer, dataLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptMsgGetParam.Addr(), 5, uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen)), 0) + r1, _, e1 := syscall.SyscallN(procCryptMsgGetParam.Addr(), uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -72,7 +72,7 @@ func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, d } func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signerIndex uint32, pbSignedBlob *byte, cbSignedBlob uint32, pbDecoded *byte, pdbDecoded *uint32, ppSignerCert **windows.CertContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptVerifyMessageSignature.Addr(), 7, uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptVerifyMessageSignature.Addr(), uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -80,13 +80,13 @@ func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signer } func msiGetFileSignatureInformation(signedObjectPath *uint16, flags uint32, certCtx **windows.CertContext, pbHashData *byte, cbHashData *uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall6(procMsiGetFileSignatureInformationW.Addr(), 5, uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData)), 0) + r0, _, _ := syscall.SyscallN(procMsiGetFileSignatureInformationW.Addr(), uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData))) ret = wingoes.HRESULT(r0) return } func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GUID, hashAlgorithm *uint16, strongHashPolicy *windows.CertStrongSignPara, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptCATAdminAcquireContext2.Addr(), 5, uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminAcquireContext2.Addr(), uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -94,7 +94,7 @@ func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GU } func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Handle, pcbHash *uint32, pbHash *byte, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptCATAdminCalcHashFromFileHandle2.Addr(), 5, uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminCalcHashFromFileHandle2.Addr(), uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -102,7 +102,7 @@ func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Han } func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash uint32, flags uint32, prevCatInfo *_HCATINFO) (ret _HCATINFO, err error) { - r0, _, e1 := syscall.Syscall6(procCryptCATAdminEnumCatalogFromHash.Addr(), 5, uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo)), 0) + r0, _, e1 := syscall.SyscallN(procCryptCATAdminEnumCatalogFromHash.Addr(), uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo))) ret = _HCATINFO(r0) if ret == 0 { err = errnoErr(e1) @@ -111,7 +111,7 @@ func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash } func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseCatalogContext.Addr(), 3, uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminReleaseCatalogContext.Addr(), uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -119,7 +119,7 @@ func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO } func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseContext.Addr(), 2, uintptr(hCatAdmin), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminReleaseContext.Addr(), uintptr(hCatAdmin), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -127,7 +127,7 @@ func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) } func cryptCATAdminCatalogInfoFromContext(hCatInfo _HCATINFO, catInfo *_CATALOG_INFO, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATCatalogInfoFromContext.Addr(), 3, uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCryptCATCatalogInfoFromContext.Addr(), uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/winutil/gp/zsyscall_windows.go b/util/winutil/gp/zsyscall_windows.go index 5e40ec3d1..41c240c26 100644 --- a/util/winutil/gp/zsyscall_windows.go +++ b/util/winutil/gp/zsyscall_windows.go @@ -50,7 +50,7 @@ var ( ) func impersonateLoggedOnUser(token windows.Token) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateLoggedOnUser.Addr(), uintptr(token)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -62,7 +62,7 @@ func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err erro if machine { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procEnterCriticalPolicySection.Addr(), 1, uintptr(_p0), 0, 0) + r0, _, e1 := syscall.SyscallN(procEnterCriticalPolicySection.Addr(), uintptr(_p0)) handle = policyLockHandle(r0) if int32(handle) == 0 { err = errnoErr(e1) @@ -71,7 +71,7 @@ func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err erro } func leaveCriticalPolicySection(handle policyLockHandle) (err error) { - r1, _, e1 := syscall.Syscall(procLeaveCriticalPolicySection.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procLeaveCriticalPolicySection.Addr(), uintptr(handle)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -83,7 +83,7 @@ func refreshPolicyEx(machine bool, flags uint32) (err error) { if machine { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procRefreshPolicyEx.Addr(), 2, uintptr(_p0), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procRefreshPolicyEx.Addr(), uintptr(_p0), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -95,7 +95,7 @@ func registerGPNotification(event windows.Handle, machine bool) (err error) { if machine { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procRegisterGPNotification.Addr(), 2, uintptr(event), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procRegisterGPNotification.Addr(), uintptr(event), uintptr(_p0)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -103,7 +103,7 @@ func registerGPNotification(event windows.Handle, machine bool) (err error) { } func unregisterGPNotification(event windows.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnregisterGPNotification.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnregisterGPNotification.Addr(), uintptr(event)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/winutil/s4u/zsyscall_windows.go b/util/winutil/s4u/zsyscall_windows.go index 6a8c78427..db647dee4 100644 --- a/util/winutil/s4u/zsyscall_windows.go +++ b/util/winutil/s4u/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func allocateLocallyUniqueId(luid *windows.LUID) (err error) { - r1, _, e1 := syscall.Syscall(procAllocateLocallyUniqueId.Addr(), 1, uintptr(unsafe.Pointer(luid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procAllocateLocallyUniqueId.Addr(), uintptr(unsafe.Pointer(luid))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -60,7 +60,7 @@ func allocateLocallyUniqueId(luid *windows.LUID) (err error) { } func impersonateLoggedOnUser(token windows.Token) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateLoggedOnUser.Addr(), uintptr(token)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -68,37 +68,37 @@ func impersonateLoggedOnUser(token windows.Token) (err error) { } func lsaConnectUntrusted(lsaHandle *_LSAHANDLE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaConnectUntrusted.Addr(), 1, uintptr(unsafe.Pointer(lsaHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaConnectUntrusted.Addr(), uintptr(unsafe.Pointer(lsaHandle))) ret = windows.NTStatus(r0) return } func lsaDeregisterLogonProcess(lsaHandle _LSAHANDLE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaDeregisterLogonProcess.Addr(), 1, uintptr(lsaHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaDeregisterLogonProcess.Addr(), uintptr(lsaHandle)) ret = windows.NTStatus(r0) return } func lsaFreeReturnBuffer(buffer uintptr) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaFreeReturnBuffer.Addr(), 1, uintptr(buffer), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaFreeReturnBuffer.Addr(), uintptr(buffer)) ret = windows.NTStatus(r0) return } func lsaLogonUser(lsaHandle _LSAHANDLE, originName *windows.NTString, logonType _SECURITY_LOGON_TYPE, authenticationPackage uint32, authenticationInformation unsafe.Pointer, authenticationInformationLength uint32, localGroups *windows.Tokengroups, sourceContext *_TOKEN_SOURCE, profileBuffer *uintptr, profileBufferLength *uint32, logonID *windows.LUID, token *windows.Token, quotas *_QUOTA_LIMITS, subStatus *windows.NTStatus) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall15(procLsaLogonUser.Addr(), 14, uintptr(lsaHandle), uintptr(unsafe.Pointer(originName)), uintptr(logonType), uintptr(authenticationPackage), uintptr(authenticationInformation), uintptr(authenticationInformationLength), uintptr(unsafe.Pointer(localGroups)), uintptr(unsafe.Pointer(sourceContext)), uintptr(unsafe.Pointer(profileBuffer)), uintptr(unsafe.Pointer(profileBufferLength)), uintptr(unsafe.Pointer(logonID)), uintptr(unsafe.Pointer(token)), uintptr(unsafe.Pointer(quotas)), uintptr(unsafe.Pointer(subStatus)), 0) + r0, _, _ := syscall.SyscallN(procLsaLogonUser.Addr(), uintptr(lsaHandle), uintptr(unsafe.Pointer(originName)), uintptr(logonType), uintptr(authenticationPackage), uintptr(authenticationInformation), uintptr(authenticationInformationLength), uintptr(unsafe.Pointer(localGroups)), uintptr(unsafe.Pointer(sourceContext)), uintptr(unsafe.Pointer(profileBuffer)), uintptr(unsafe.Pointer(profileBufferLength)), uintptr(unsafe.Pointer(logonID)), uintptr(unsafe.Pointer(token)), uintptr(unsafe.Pointer(quotas)), uintptr(unsafe.Pointer(subStatus))) ret = windows.NTStatus(r0) return } func lsaLookupAuthenticationPackage(lsaHandle _LSAHANDLE, packageName *windows.NTString, authenticationPackage *uint32) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaLookupAuthenticationPackage.Addr(), 3, uintptr(lsaHandle), uintptr(unsafe.Pointer(packageName)), uintptr(unsafe.Pointer(authenticationPackage))) + r0, _, _ := syscall.SyscallN(procLsaLookupAuthenticationPackage.Addr(), uintptr(lsaHandle), uintptr(unsafe.Pointer(packageName)), uintptr(unsafe.Pointer(authenticationPackage))) ret = windows.NTStatus(r0) return } func lsaRegisterLogonProcess(logonProcessName *windows.NTString, lsaHandle *_LSAHANDLE, securityMode *_LSA_OPERATIONAL_MODE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaRegisterLogonProcess.Addr(), 3, uintptr(unsafe.Pointer(logonProcessName)), uintptr(unsafe.Pointer(lsaHandle)), uintptr(unsafe.Pointer(securityMode))) + r0, _, _ := syscall.SyscallN(procLsaRegisterLogonProcess.Addr(), uintptr(unsafe.Pointer(logonProcessName)), uintptr(unsafe.Pointer(lsaHandle)), uintptr(unsafe.Pointer(securityMode))) ret = windows.NTStatus(r0) return } diff --git a/util/winutil/winenv/zsyscall_windows.go b/util/winutil/winenv/zsyscall_windows.go index 2bdfdd9b1..7e93c7952 100644 --- a/util/winutil/winenv/zsyscall_windows.go +++ b/util/winutil/winenv/zsyscall_windows.go @@ -55,7 +55,7 @@ func isDeviceRegisteredWithManagement(isMDMRegistered *bool, upnBufLen uint32, u if *isMDMRegistered { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procIsDeviceRegisteredWithManagement.Addr(), 3, uintptr(unsafe.Pointer(&_p0)), uintptr(upnBufLen), uintptr(unsafe.Pointer(upnBuf))) + r0, _, e1 := syscall.SyscallN(procIsDeviceRegisteredWithManagement.Addr(), uintptr(unsafe.Pointer(&_p0)), uintptr(upnBufLen), uintptr(unsafe.Pointer(upnBuf))) *isMDMRegistered = _p0 != 0 hr = int32(r0) if hr == 0 { @@ -65,13 +65,13 @@ func isDeviceRegisteredWithManagement(isMDMRegistered *bool, upnBufLen uint32, u } func verSetConditionMask(condMask verCondMask, typ verTypeMask, cond verCond) (res verCondMask) { - r0, _, _ := syscall.Syscall(procVerSetConditionMask.Addr(), 3, uintptr(condMask), uintptr(typ), uintptr(cond)) + r0, _, _ := syscall.SyscallN(procVerSetConditionMask.Addr(), uintptr(condMask), uintptr(typ), uintptr(cond)) res = verCondMask(r0) return } func verifyVersionInfo(verInfo *osVersionInfoEx, typ verTypeMask, cond verCondMask) (res bool) { - r0, _, _ := syscall.Syscall(procVerifyVersionInfoW.Addr(), 3, uintptr(unsafe.Pointer(verInfo)), uintptr(typ), uintptr(cond)) + r0, _, _ := syscall.SyscallN(procVerifyVersionInfoW.Addr(), uintptr(unsafe.Pointer(verInfo)), uintptr(typ), uintptr(cond)) res = r0 != 0 return } diff --git a/util/winutil/zsyscall_windows.go b/util/winutil/zsyscall_windows.go index b4674dff3..56aedb4c7 100644 --- a/util/winutil/zsyscall_windows.go +++ b/util/winutil/zsyscall_windows.go @@ -62,7 +62,7 @@ var ( ) func queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, bufLen uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(hService), uintptr(infoLevel), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(hService), uintptr(infoLevel), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -70,19 +70,19 @@ func queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, b } func getApplicationRestartSettings(process windows.Handle, commandLine *uint16, commandLineLen *uint32, flags *uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall6(procGetApplicationRestartSettings.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(commandLineLen)), uintptr(unsafe.Pointer(flags)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetApplicationRestartSettings.Addr(), uintptr(process), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(commandLineLen)), uintptr(unsafe.Pointer(flags))) ret = wingoes.HRESULT(r0) return } func registerApplicationRestart(cmdLineExclExeName *uint16, flags uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall(procRegisterApplicationRestart.Addr(), 2, uintptr(unsafe.Pointer(cmdLineExclExeName)), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procRegisterApplicationRestart.Addr(), uintptr(unsafe.Pointer(cmdLineExclExeName)), uintptr(flags)) ret = wingoes.HRESULT(r0) return } func dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.GUID, siteName *uint16, flags dsGetDcNameFlag, dcInfo **_DOMAIN_CONTROLLER_INFO) (ret error) { - r0, _, _ := syscall.Syscall6(procDsGetDcNameW.Addr(), 6, uintptr(unsafe.Pointer(computerName)), uintptr(unsafe.Pointer(domainName)), uintptr(unsafe.Pointer(domainGuid)), uintptr(unsafe.Pointer(siteName)), uintptr(flags), uintptr(unsafe.Pointer(dcInfo))) + r0, _, _ := syscall.SyscallN(procDsGetDcNameW.Addr(), uintptr(unsafe.Pointer(computerName)), uintptr(unsafe.Pointer(domainName)), uintptr(unsafe.Pointer(domainGuid)), uintptr(unsafe.Pointer(siteName)), uintptr(flags), uintptr(unsafe.Pointer(dcInfo))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -90,7 +90,7 @@ func dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.G } func netValidateName(server *uint16, name *uint16, account *uint16, password *uint16, nameType _NETSETUP_NAME_TYPE) (ret error) { - r0, _, _ := syscall.Syscall6(procNetValidateName.Addr(), 5, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(account)), uintptr(unsafe.Pointer(password)), uintptr(nameType), 0) + r0, _, _ := syscall.SyscallN(procNetValidateName.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(account)), uintptr(unsafe.Pointer(password)), uintptr(nameType)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -98,7 +98,7 @@ func netValidateName(server *uint16, name *uint16, account *uint16, password *ui } func rmEndSession(session _RMHANDLE) (ret error) { - r0, _, _ := syscall.Syscall(procRmEndSession.Addr(), 1, uintptr(session), 0, 0) + r0, _, _ := syscall.SyscallN(procRmEndSession.Addr(), uintptr(session)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -106,7 +106,7 @@ func rmEndSession(session _RMHANDLE) (ret error) { } func rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rgAffectedApps *_RM_PROCESS_INFO, pRebootReasons *uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procRmGetList.Addr(), 5, uintptr(session), uintptr(unsafe.Pointer(nProcInfoNeeded)), uintptr(unsafe.Pointer(nProcInfo)), uintptr(unsafe.Pointer(rgAffectedApps)), uintptr(unsafe.Pointer(pRebootReasons)), 0) + r0, _, _ := syscall.SyscallN(procRmGetList.Addr(), uintptr(session), uintptr(unsafe.Pointer(nProcInfoNeeded)), uintptr(unsafe.Pointer(nProcInfo)), uintptr(unsafe.Pointer(rgAffectedApps)), uintptr(unsafe.Pointer(pRebootReasons))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -114,7 +114,7 @@ func rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rg } func rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) { - r0, _, _ := syscall.Syscall(procRmJoinSession.Addr(), 2, uintptr(unsafe.Pointer(pSession)), uintptr(unsafe.Pointer(sessionKey)), 0) + r0, _, _ := syscall.SyscallN(procRmJoinSession.Addr(), uintptr(unsafe.Pointer(pSession)), uintptr(unsafe.Pointer(sessionKey))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -122,7 +122,7 @@ func rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) { } func rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16, nApplications uint32, rgApplications *_RM_UNIQUE_PROCESS, nServices uint32, rgsServiceNames **uint16) (ret error) { - r0, _, _ := syscall.Syscall9(procRmRegisterResources.Addr(), 7, uintptr(session), uintptr(nFiles), uintptr(unsafe.Pointer(rgsFileNames)), uintptr(nApplications), uintptr(unsafe.Pointer(rgApplications)), uintptr(nServices), uintptr(unsafe.Pointer(rgsServiceNames)), 0, 0) + r0, _, _ := syscall.SyscallN(procRmRegisterResources.Addr(), uintptr(session), uintptr(nFiles), uintptr(unsafe.Pointer(rgsFileNames)), uintptr(nApplications), uintptr(unsafe.Pointer(rgApplications)), uintptr(nServices), uintptr(unsafe.Pointer(rgsServiceNames))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -130,7 +130,7 @@ func rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16 } func rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret error) { - r0, _, _ := syscall.Syscall(procRmStartSession.Addr(), 3, uintptr(unsafe.Pointer(pSession)), uintptr(flags), uintptr(unsafe.Pointer(sessionKey))) + r0, _, _ := syscall.SyscallN(procRmStartSession.Addr(), uintptr(unsafe.Pointer(pSession)), uintptr(flags), uintptr(unsafe.Pointer(sessionKey))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -138,7 +138,7 @@ func rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret } func expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint16, dstLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procExpandEnvironmentStringsForUserW.Addr(), 4, uintptr(token), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(dstLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsForUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(dstLen)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -146,7 +146,7 @@ func expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint } func loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) { - r1, _, e1 := syscall.Syscall(procLoadUserProfileW.Addr(), 2, uintptr(token), uintptr(unsafe.Pointer(profileInfo)), 0) + r1, _, e1 := syscall.SyscallN(procLoadUserProfileW.Addr(), uintptr(token), uintptr(unsafe.Pointer(profileInfo))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -154,7 +154,7 @@ func loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) } func unloadUserProfile(token windows.Token, profile registry.Key) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadUserProfile.Addr(), 2, uintptr(token), uintptr(profile), 0) + r1, _, e1 := syscall.SyscallN(procUnloadUserProfile.Addr(), uintptr(token), uintptr(profile)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/wgengine/magicsock/discopingpurpose_string.go b/wgengine/magicsock/discopingpurpose_string.go index 3dc327de1..8eebf97a2 100644 --- a/wgengine/magicsock/discopingpurpose_string.go +++ b/wgengine/magicsock/discopingpurpose_string.go @@ -22,8 +22,9 @@ const _discoPingPurpose_name = "DiscoveryHeartbeatCLIHeartbeatForUDPLifetime" var _discoPingPurpose_index = [...]uint8{0, 9, 18, 21, 44} func (i discoPingPurpose) String() string { - if i < 0 || i >= discoPingPurpose(len(_discoPingPurpose_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_discoPingPurpose_index)-1 { return "discoPingPurpose(" + strconv.FormatInt(int64(i), 10) + ")" } - return _discoPingPurpose_name[_discoPingPurpose_index[i]:_discoPingPurpose_index[i+1]] + return _discoPingPurpose_name[_discoPingPurpose_index[idx]:_discoPingPurpose_index[idx+1]] } From 1723cb83ed95db76fa933348e8d9df7d9fcb960d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 29 Oct 2025 11:09:28 +0000 Subject: [PATCH 1676/1708] ipn/ipnlocal: use an in-memory TKA store if FS is unavailable This requires making the internals of LocalBackend a bit more generic, and implementing the `tka.CompactableChonk` interface for `tka.Mem`. Signed-off-by: Alex Chan Updates https://github.com/tailscale/corp/issues/33599 --- cmd/tailscale/cli/up.go | 1 + health/healthmsg/healthmsg.go | 11 ++-- ipn/ipnlocal/network-lock.go | 54 +++++++++++------- tka/tailchonk.go | 92 +++++++++++++++++++++++++++++- tka/tailchonk_test.go | 37 ++++++++++++ tstest/chonktest/tailchonk_test.go | 6 ++ 6 files changed, 174 insertions(+), 27 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 91a6b6087..61cade8de 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -818,6 +818,7 @@ func upWorthyWarning(s string) bool { strings.Contains(s, healthmsg.WarnAcceptRoutesOff) || strings.Contains(s, healthmsg.LockedOut) || strings.Contains(s, healthmsg.WarnExitNodeUsage) || + strings.Contains(s, healthmsg.InMemoryTailnetLockState) || strings.Contains(strings.ToLower(s), "update available: ") } diff --git a/health/healthmsg/healthmsg.go b/health/healthmsg/healthmsg.go index 238410373..5ea1c736d 100644 --- a/health/healthmsg/healthmsg.go +++ b/health/healthmsg/healthmsg.go @@ -8,9 +8,10 @@ package healthmsg const ( - WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" - TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller - LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" - WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" - DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" + WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" + TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller + LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" + WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" + DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" + InMemoryTailnetLockState = "Tailnet Lock state is only being stored in-memory. Set --statedir to store state on disk, which is more secure. See https://tailscale.com/kb/1226/tailnet-lock#tailnet-lock-state" ) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index f26c81011..14a3b105b 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -23,6 +23,7 @@ import ( "slices" "time" + "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -54,7 +55,7 @@ var ( type tkaState struct { profile ipn.ProfileID authority *tka.Authority - storage *tka.FS + storage tka.CompactableChonk filtered []ipnstate.TKAPeer } @@ -75,7 +76,7 @@ func (b *LocalBackend) initTKALocked() error { root := b.TailscaleVarRoot() if root == "" { b.tka = nil - b.logf("network-lock unavailable; no state directory") + b.logf("cannot fetch existing TKA state; no state directory for network-lock") return nil } @@ -90,6 +91,7 @@ func (b *LocalBackend) initTKALocked() error { if err != nil { return fmt.Errorf("initializing tka: %v", err) } + if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { b.logf("tka compaction failed: %v", err) } @@ -105,6 +107,16 @@ func (b *LocalBackend) initTKALocked() error { return nil } +// noNetworkLockStateDirWarnable is a Warnable to warn the user that Tailnet Lock data +// (in particular, the list of AUMs in the TKA state) is being stored in memory and will +// be lost when tailscaled restarts. +var noNetworkLockStateDirWarnable = health.Register(&health.Warnable{ + Code: "no-tailnet-lock-state-dir", + Title: "No statedir for Tailnet Lock", + Severity: health.SeverityMedium, + Text: health.StaticMessage(healthmsg.InMemoryTailnetLockState), +}) + // tkaFilterNetmapLocked checks the signatures on each node key, dropping // nodes from the netmap whose signature does not verify. // @@ -447,7 +459,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { // b.mu must be held & TKA must be initialized. func (b *LocalBackend) tkaApplyDisablementLocked(secret []byte) error { if b.tka.authority.ValidDisablement(secret) { - if err := os.RemoveAll(b.chonkPathLocked()); err != nil { + if err := b.tka.storage.RemoveAll(); err != nil { return err } b.tka = nil @@ -491,19 +503,21 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per } } - chonkDir := b.chonkPathLocked() - if err := os.Mkdir(filepath.Dir(chonkDir), 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("creating chonk root dir: %v", err) - } - if err := os.Mkdir(chonkDir, 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("mkdir: %v", err) - } - - chonk, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("chonk: %v", err) + root := b.TailscaleVarRoot() + var storage tka.CompactableChonk + if root == "" { + b.health.SetUnhealthy(noNetworkLockStateDirWarnable, nil) + b.logf("network-lock using in-memory storage; no state directory") + storage = &tka.Mem{} + } else { + chonkDir := b.chonkPathLocked() + chonk, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("chonk: %v", err) + } + storage = chonk } - authority, err := tka.Bootstrap(chonk, genesis) + authority, err := tka.Bootstrap(storage, genesis) if err != nil { return fmt.Errorf("tka bootstrap: %v", err) } @@ -511,7 +525,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per b.tka = &tkaState{ profile: b.pm.CurrentProfile().ID(), authority: authority, - storage: chonk, + storage: storage, } return nil } @@ -524,10 +538,6 @@ func (b *LocalBackend) CanSupportNetworkLock() error { return nil } - if b.TailscaleVarRoot() == "" { - return errors.New("network-lock is not supported in this configuration, try setting --statedir") - } - // There's a var root (aka --statedir), so if network lock gets // initialized we have somewhere to store our AUMs. That's all // we need. @@ -647,6 +657,7 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { // needing signatures is returned as a response. // The Finish RPC submits signatures for all these nodes, at which point // Control has everything it needs to atomically enable network lock. +// TODO(alexc): Only with persistent backend func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) error { if err := b.CanSupportNetworkLock(); err != nil { return err @@ -767,7 +778,7 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { return fmt.Errorf("saving prefs: %w", err) } - if err := os.RemoveAll(b.chonkPathLocked()); err != nil { + if err := b.tka.storage.RemoveAll(); err != nil { return fmt.Errorf("deleting TKA state: %w", err) } b.tka = nil @@ -776,6 +787,7 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { // NetworkLockSign signs the given node-key and submits it to the control plane. // rotationPublic, if specified, must be an ed25519 public key. +// TODO(alexc): in-memory only func (b *LocalBackend) NetworkLockSign(nodeKey key.NodePublic, rotationPublic []byte) error { ourNodeKey, sig, err := func(nodeKey key.NodePublic, rotationPublic []byte) (key.NodePublic, tka.NodeKeySignature, error) { b.mu.Lock() diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 3e8d1b6c8..2dc03a6f6 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "log" + "maps" "os" "path/filepath" "slices" @@ -57,6 +58,10 @@ type Chonk interface { // as a hint to pick the correct chain in the event that the Chonk stores // multiple distinct chains. LastActiveAncestor() (*AUMHash, error) + + // RemoveAll permanently and completely clears the TKA state. This should + // be called when the user disables Tailnet Lock. + RemoveAll() error } // CompactableChonk implementation are extensions of Chonk, which are @@ -78,12 +83,21 @@ type CompactableChonk interface { } // Mem implements in-memory storage of TKA state, suitable for -// tests. +// tests or cases where filesystem storage is unavailable. // // Mem implements the Chonk interface. +// +// Mem is thread-safe. type Mem struct { mu sync.RWMutex aums map[AUMHash]AUM + commitTimes map[AUMHash]time.Time + + // parentIndex is a map of AUMs to the AUMs for which they are + // the parent. + // + // For example, if parent index is {1 -> {2, 3, 4}}, that means + // that AUMs 2, 3, 4 all have aum.PrevAUMHash = 1. parentIndex map[AUMHash][]AUMHash lastActiveAncestor *AUMHash @@ -152,12 +166,14 @@ func (c *Mem) CommitVerifiedAUMs(updates []AUM) error { if c.aums == nil { c.parentIndex = make(map[AUMHash][]AUMHash, 64) c.aums = make(map[AUMHash]AUM, 64) + c.commitTimes = make(map[AUMHash]time.Time, 64) } updateLoop: for _, aum := range updates { aumHash := aum.Hash() c.aums[aumHash] = aum + c.commitTimes[aumHash] = time.Now() parent, ok := aum.Parent() if ok { @@ -173,6 +189,71 @@ updateLoop: return nil } +// RemoveAll permanently and completely clears the TKA state. +func (c *Mem) RemoveAll() error { + c.mu.Lock() + defer c.mu.Unlock() + c.aums = nil + c.commitTimes = nil + c.parentIndex = nil + c.lastActiveAncestor = nil + return nil +} + +// AllAUMs returns all AUMs stored in the chonk. +func (c *Mem) AllAUMs() ([]AUMHash, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return slices.Collect(maps.Keys(c.aums)), nil +} + +// CommitTime returns the time at which the AUM was committed. +// +// If the AUM does not exist, then os.ErrNotExist is returned. +func (c *Mem) CommitTime(h AUMHash) (time.Time, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + t, ok := c.commitTimes[h] + if ok { + return t, nil + } else { + return time.Time{}, os.ErrNotExist + } +} + +// PurgeAUMs marks the specified AUMs for deletion from storage. +func (c *Mem) PurgeAUMs(hashes []AUMHash) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, h := range hashes { + // Remove the deleted AUM from the list of its parents' children. + // + // However, we leave the list of this AUM's children in parentIndex, + // so we can find them later in ChildAUMs(). + if aum, ok := c.aums[h]; ok { + parent, hasParent := aum.Parent() + if hasParent { + c.parentIndex[parent] = slices.DeleteFunc( + c.parentIndex[parent], + func(other AUMHash) bool { return bytes.Equal(h[:], other[:]) }, + ) + if len(c.parentIndex[parent]) == 0 { + delete(c.parentIndex, parent) + } + } + } + + // Delete this AUM from the list of AUMs and commit times. + delete(c.aums, h) + delete(c.commitTimes, h) + } + + return nil +} + // FS implements filesystem storage of TKA state. // // FS implements the Chonk interface. @@ -184,6 +265,10 @@ type FS struct { // ChonkDir returns an implementation of Chonk which uses the // given directory to store TKA state. func ChonkDir(dir string) (*FS, error) { + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("creating chonk root dir: %v", err) + } + stat, err := os.Stat(dir) if err != nil { return nil, err @@ -376,6 +461,11 @@ func (c *FS) Heads() ([]AUM, error) { return out, nil } +// RemoveAll permanently and completely clears the TKA state. +func (c *FS) RemoveAll() error { + return os.RemoveAll(c.base) +} + // AllAUMs returns all AUMs stored in the chonk. func (c *FS) AllAUMs() ([]AUMHash, error) { c.mu.RLock() diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 7816d2dc1..70b7dc9a7 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -127,6 +127,43 @@ func TestTailchonkFS_IgnoreTempFile(t *testing.T) { } } +// If we use a non-existent directory with filesystem Chonk storage, +// it's automatically created. +func TestTailchonkFS_CreateChonkDir(t *testing.T) { + base := filepath.Join(t.TempDir(), "a", "b", "c") + + chonk, err := ChonkDir(base) + if err != nil { + t.Fatalf("ChonkDir: %v", err) + } + + aum := AUM{MessageKind: AUMNoOp} + must.Do(chonk.CommitVerifiedAUMs([]AUM{aum})) + + got, err := chonk.AUM(aum.Hash()) + if err != nil { + t.Errorf("Chonk.AUM: %v", err) + } + if diff := cmp.Diff(got, aum); diff != "" { + t.Errorf("wrong AUM; (-got+want):%v", diff) + } + + if _, err := os.Stat(base); err != nil { + t.Errorf("os.Stat: %v", err) + } +} + +// You can't use a file as the root of your filesystem Chonk storage. +func TestTailchonkFS_CannotUseFile(t *testing.T) { + base := filepath.Join(t.TempDir(), "tka_storage.txt") + must.Do(os.WriteFile(base, []byte("this won't work"), 0644)) + + _, err := ChonkDir(base) + if err == nil { + t.Fatal("ChonkDir succeeded; expected an error") + } +} + func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go index ce6b04324..6dfab798e 100644 --- a/tstest/chonktest/tailchonk_test.go +++ b/tstest/chonktest/tailchonk_test.go @@ -39,6 +39,12 @@ func TestImplementsCompactableChonk(t *testing.T) { name string newChonk func(t *testing.T) tka.CompactableChonk }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.CompactableChonk { + return &tka.Mem{} + }, + }, { name: "FS", newChonk: func(t *testing.T) tka.CompactableChonk { From 165a24744e7dfef778e9e4fb7ac65b9f8cc03b29 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 09:18:48 -0800 Subject: [PATCH 1677/1708] tka: fix typo in comment Let's fix all the typos, which lets the code be more readable, lest we confuse our readers. Updates #cleanup Change-Id: I4954601b0592b1fda40269009647bb517a4457be Signed-off-by: Brad Fitzpatrick --- tka/tka.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tka/tka.go b/tka/tka.go index 9dce74e9a..ed029c82e 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -779,8 +779,8 @@ func (a *Authority) findParentForRewrite(storage Chonk, removeKeys []tkatype.Key } } if !keyTrusted { - // Success: the revoked keys are not trusted! - // Lets check that our key was trusted to ensure + // Success: the revoked keys are not trusted. + // Check that our key was trusted to ensure // we can sign a fork from here. if _, err := state.GetKey(ourKey); err == nil { break From f1cddc6ecf4624b7608b1aeb06bd108c24687fef Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 08:06:16 -0800 Subject: [PATCH 1678/1708] ipn{,/local},cmd/tailscale: add "sync" flag and pref to disable control map poll For manual (human) testing, this lets the user disable control plane map polls with "tailscale set --sync=false" (which survives restarts) and "tailscale set --sync" to restore. A high severity health warning is shown while this is active. Updates #12639 Updates #17945 Change-Id: I83668fa5de3b5e5e25444df0815ec2a859153a6d Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/set.go | 3 +++ cmd/tailscale/cli/up.go | 3 ++- ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 7 ++++++ ipn/ipnlocal/local.go | 28 ++++++++++++++++++++++- ipn/ipnlocal/profiles_test.go | 6 +++-- ipn/prefs.go | 20 ++++++++++++++-- ipn/prefs_test.go | 43 +++++++++++++++++++++++++++++++++++ types/opt/bool.go | 11 +++++++++ types/opt/bool_test.go | 20 ++++++++++++++++ 10 files changed, 136 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 43f8bbbc3..3b5e032db 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -63,6 +63,7 @@ type setArgsT struct { reportPosture bool snat bool statefulFiltering bool + sync bool netfilterMode string relayServerPort string } @@ -85,6 +86,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") + setf.BoolVar(&setArgs.sync, "sync", false, hidden+"actively sync configuration from the control plane (set to false only for network failure testing)") setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { @@ -149,6 +151,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { OperatorUser: setArgs.opUser, NoSNAT: !setArgs.snat, ForceDaemon: setArgs.forceDaemon, + Sync: opt.NewBool(setArgs.sync), AutoUpdate: ipn.AutoUpdatePrefs{ Check: setArgs.updateCheck, Apply: opt.NewBool(setArgs.updateApply), diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 61cade8de..c34155955 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -890,6 +890,7 @@ func init() { addPrefFlagMapping("advertise-connector", "AppConnector") addPrefFlagMapping("report-posture", "PostureChecking") addPrefFlagMapping("relay-server-port", "RelayServerPort") + addPrefFlagMapping("sync", "Sync") } func addPrefFlagMapping(flagName string, prefNames ...string) { @@ -925,7 +926,7 @@ func updateMaskedPrefsFromUpOrSetFlag(mp *ipn.MaskedPrefs, flagName string) { if prefs, ok := prefsOfFlag[flagName]; ok { for _, pref := range prefs { f := reflect.ValueOf(mp).Elem() - for _, name := range strings.Split(pref, ".") { + for name := range strings.SplitSeq(pref, ".") { f = f.FieldByName(name + "Set") } f.SetBool(true) diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index d5af906ee..1be716197 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -90,6 +90,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { Egg bool AdvertiseRoutes []netip.Prefix AdvertiseServices []string + Sync opt.Bool NoSNAT bool NoStatefulFiltering opt.Bool NetfilterMode preftype.NetfilterMode diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 12fe93bab..d3836416b 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -363,6 +363,12 @@ func (v PrefsView) AdvertiseServices() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseServices) } +// Sync is whether this node should sync its configuration from +// the control plane. If unset, this defaults to true. +// This exists primarily for testing, to verify that netmap caching +// and offline operation work correctly. +func (v PrefsView) Sync() opt.Bool { return v.ж.Sync } + // NoSNAT specifies whether to source NAT traffic going to // destinations in AdvertiseRoutes. The default is to apply source // NAT, which makes the traffic appear to come from the router @@ -482,6 +488,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { Egg bool AdvertiseRoutes []netip.Prefix AdvertiseServices []string + Sync opt.Bool NoSNAT bool NoStatefulFiltering opt.Bool NetfilterMode preftype.NetfilterMode diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ed183e508..24ab41735 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -870,6 +870,7 @@ func (b *LocalBackend) initPrefsFromConfig(conf *conffile.Config) error { if err := b.pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil { return err } + b.updateWarnSync(p.View()) b.setStaticEndpointsFromConfigLocked(conf) b.conf = conf return nil @@ -931,7 +932,12 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { return } networkUp := b.prevIfState.AnyInterfaceUp() - b.cc.SetPaused((b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) + pauseForNetwork := (b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest()) + + prefs := b.pm.CurrentPrefs() + pauseForSyncPref := prefs.Valid() && prefs.Sync().EqualBool(false) + + b.cc.SetPaused(pauseForNetwork || pauseForSyncPref) } // DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe @@ -2519,6 +2525,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { logf("serverMode=%v", inServerMode) } b.applyPrefsToHostinfoLocked(hostinfo, prefs) + b.updateWarnSync(prefs) persistv := prefs.Persist().AsStruct() if persistv == nil { @@ -2570,6 +2577,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { ControlKnobs: b.sys.ControlKnobs(), Shutdown: ccShutdown, Bus: b.sys.Bus.Get(), + StartPaused: prefs.Sync().EqualBool(false), // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -4658,6 +4666,9 @@ func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { b.resetAlwaysOnOverrideLocked() } + b.pauseOrResumeControlClientLocked() // for prefs.Sync changes + b.updateWarnSync(prefs) + if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { b.doSetHostinfoFilterServicesLocked() } @@ -6665,6 +6676,13 @@ func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { return b.sshServer, nil } +var warnSyncDisabled = health.Register(&health.Warnable{ + Code: "sync-disabled", + Title: "Tailscale Sync is Disabled", + Severity: health.SeverityHigh, + Text: health.StaticMessage("Tailscale control plane syncing is disabled; run `tailscale set --sync` to restore"), +}) + var warnSSHSELinuxWarnable = health.Register(&health.Warnable{ Code: "ssh-unavailable-selinux-enabled", Title: "Tailscale SSH and SELinux", @@ -6680,6 +6698,14 @@ func (b *LocalBackend) updateSELinuxHealthWarning() { } } +func (b *LocalBackend) updateWarnSync(prefs ipn.PrefsView) { + if prefs.Sync().EqualBool(false) { + b.health.SetUnhealthy(warnSyncDisabled, nil) + } else { + b.health.SetHealthy(warnSyncDisabled) + } +} + func (b *LocalBackend) handleSSHConn(c net.Conn) (err error) { s, err := b.sshServerOrInit() if err != nil { diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index deeab2ade..95834284e 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -1129,10 +1129,12 @@ func TestProfileStateChangeCallback(t *testing.T) { } gotChanges := make([]stateChange, 0, len(tt.wantChanges)) - pm.StateChangeHook = func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + pm.StateChangeHook = func(profile ipn.LoginProfileView, prefView ipn.PrefsView, sameNode bool) { + prefs := prefView.AsStruct() + prefs.Sync = prefs.Sync.Normalized() gotChanges = append(gotChanges, stateChange{ Profile: profile.AsStruct(), - Prefs: prefs.AsStruct(), + Prefs: prefs, SameNode: sameNode, }) } diff --git a/ipn/prefs.go b/ipn/prefs.go index 796098c8a..7f8216c60 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -207,6 +207,12 @@ type Prefs struct { // control server. AdvertiseServices []string + // Sync is whether this node should sync its configuration from + // the control plane. If unset, this defaults to true. + // This exists primarily for testing, to verify that netmap caching + // and offline operation work correctly. + Sync opt.Bool + // NoSNAT specifies whether to source NAT traffic going to // destinations in AdvertiseRoutes. The default is to apply source // NAT, which makes the traffic appear to come from the router @@ -364,12 +370,13 @@ type MaskedPrefs struct { EggSet bool `json:",omitempty"` AdvertiseRoutesSet bool `json:",omitempty"` AdvertiseServicesSet bool `json:",omitempty"` + SyncSet bool `json:",omitzero"` NoSNATSet bool `json:",omitempty"` NoStatefulFilteringSet bool `json:",omitempty"` NetfilterModeSet bool `json:",omitempty"` OperatorUserSet bool `json:",omitempty"` ProfileNameSet bool `json:",omitempty"` - AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"` + AutoUpdateSet AutoUpdatePrefsMask `json:",omitzero"` AppConnectorSet bool `json:",omitempty"` PostureCheckingSet bool `json:",omitempty"` NetfilterKindSet bool `json:",omitempty"` @@ -547,6 +554,9 @@ func (p *Prefs) pretty(goos string) string { if p.LoggedOut { sb.WriteString("loggedout=true ") } + if p.Sync.EqualBool(false) { + sb.WriteString("sync=false ") + } if p.ForceDaemon { sb.WriteString("server=true ") } @@ -653,6 +663,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess && p.CorpDNS == p2.CorpDNS && p.RunSSH == p2.RunSSH && + p.Sync.Normalized() == p2.Sync.Normalized() && p.RunWebClient == p2.RunWebClient && p.WantRunning == p2.WantRunning && p.LoggedOut == p2.LoggedOut && @@ -956,10 +967,15 @@ func PrefsFromBytes(b []byte, base *Prefs) error { if len(b) == 0 { return nil } - return json.Unmarshal(b, base) } +func (p *Prefs) normalizeOptBools() { + if p.Sync == opt.ExplicitlyUnset { + p.Sync = "" + } +} + var jsonEscapedZero = []byte(`\u0000`) // LoadPrefsWindows loads a legacy relaynode config file into Prefs with diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 233616409..7c9c3ef43 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -57,6 +57,7 @@ func TestPrefsEqual(t *testing.T) { "Egg", "AdvertiseRoutes", "AdvertiseServices", + "Sync", "NoSNAT", "NoStatefulFiltering", "NetfilterMode", @@ -404,6 +405,7 @@ func checkPrefs(t *testing.T, p Prefs) { if err != nil { t.Fatalf("PrefsFromBytes(p2) failed: bytes=%q; err=%v\n", p2.ToBytes(), err) } + p2b.normalizeOptBools() p2p := p2.Pretty() p2bp := p2b.Pretty() t.Logf("\np2p: %#v\np2bp: %#v\n", p2p, p2bp) @@ -419,6 +421,42 @@ func checkPrefs(t *testing.T, p Prefs) { } } +// PrefsFromBytes documents that it preserves fields unset in the JSON. +// This verifies that stays true. +func TestPrefsFromBytesPreservesOldValues(t *testing.T) { + tests := []struct { + name string + old Prefs + json []byte + want Prefs + }{ + { + name: "preserve-control-url", + old: Prefs{ControlURL: "https://foo"}, + json: []byte(`{"RouteAll": true}`), + want: Prefs{ControlURL: "https://foo", RouteAll: true}, + }, + { + name: "opt.Bool", // test that we don't normalize it early + old: Prefs{Sync: "unset"}, + json: []byte(`{}`), + want: Prefs{Sync: "unset"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + old := tt.old // shallow + err := PrefsFromBytes(tt.json, &old) + if err != nil { + t.Fatalf("PrefsFromBytes failed: %v", err) + } + if !old.Equals(&tt.want) { + t.Fatalf("got %+v; want %+v", old, tt.want) + } + }) + } +} + func TestBasicPrefs(t *testing.T) { tstest.PanicOnLog() @@ -591,6 +629,11 @@ func TestPrefsPretty(t *testing.T) { "linux", `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}`, }, + { + Prefs{Sync: "false"}, + "linux", + "Prefs{ra=false dns=false want=false sync=false routes=[] nf=off update=off Persist=nil}", + }, } for i, tt := range tests { got := tt.p.pretty(tt.os) diff --git a/types/opt/bool.go b/types/opt/bool.go index e2fd6a054..fbc39e1dc 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -83,6 +83,17 @@ func (b *Bool) Scan(src any) error { } } +// Normalized returns the normalized form of b, mapping "unset" to "" +// and leaving other values unchanged. +func (b Bool) Normalized() Bool { + switch b { + case ExplicitlyUnset: + return Empty + default: + return b + } +} + // EqualBool reports whether b is equal to v. // If b is empty or not a valid bool, it reports false. func (b Bool) EqualBool(v bool) bool { diff --git a/types/opt/bool_test.go b/types/opt/bool_test.go index dddbcfc19..e61d66dbe 100644 --- a/types/opt/bool_test.go +++ b/types/opt/bool_test.go @@ -106,6 +106,8 @@ func TestBoolEqualBool(t *testing.T) { }{ {"", true, false}, {"", false, false}, + {"unset", true, false}, + {"unset", false, false}, {"sdflk;", true, false}, {"sldkf;", false, false}, {"true", true, true}, @@ -122,6 +124,24 @@ func TestBoolEqualBool(t *testing.T) { } } +func TestBoolNormalized(t *testing.T) { + tests := []struct { + in Bool + want Bool + }{ + {"", ""}, + {"true", "true"}, + {"false", "false"}, + {"unset", ""}, + {"foo", "foo"}, + } + for _, tt := range tests { + if got := tt.in.Normalized(); got != tt.want { + t.Errorf("(%q).Normalized() = %q; want %q", string(tt.in), string(got), string(tt.want)) + } + } +} + func TestUnmarshalAlloc(t *testing.T) { b := json.Unmarshaler(new(Bool)) n := testing.AllocsPerRun(10, func() { b.UnmarshalJSON(trueBytes) }) From 26f9b50247c9ba82ee33e4ae3acb5a107424c3a4 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 17 Nov 2025 14:42:15 -0800 Subject: [PATCH 1679/1708] feature/tpm: disable dictionary attack protection on sealing key (#17952) DA protection is not super helpful because we don't set an authorization password on the key. But if authorization fails for other reasons (like TPM being reset), we will eventually cause DA lockout with tailscaled trying to load the key. DA lockout then leads to (1) issues for other processes using the TPM and (2) the underlying authorization error being masked in logs. Updates #17654 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 10 ++++++---- feature/tpm/tpm.go | 3 +++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 49b80ade1..197a8d6b8 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -59,10 +59,12 @@ func newAttestationKey() (ak *attestationKey, retErr error) { SensitiveDataOrigin: true, UserWithAuth: true, AdminWithPolicy: true, - NoDA: true, - FixedTPM: true, - FixedParent: true, - SignEncrypt: true, + // We don't set an authorization policy on this key, so + // DA isn't helpful. + NoDA: true, + FixedTPM: true, + FixedParent: true, + SignEncrypt: true, }, Parameters: tpm2.NewTPMUPublicParms( tpm2.TPMAlgECC, diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 7cbdec088..8df269b95 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -414,6 +414,9 @@ func tpmSeal(logf logger.Logf, data []byte) (*tpmSealedData, error) { FixedTPM: true, FixedParent: true, UserWithAuth: true, + // We don't set an authorization policy on this key, so DA + // isn't helpful. + NoDA: true, }, }), } From 41662f51288465842091a357f7e9bc633da6bd4c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Sat, 15 Nov 2025 18:35:39 -0800 Subject: [PATCH 1680/1708] ssh/tailssh: fix incubator tests on macOS arm64 Perform a path check first before attempting exec of `true`. Try /usr/bin/true first, as that is now and increasingly so, the more common and more portable path. Fixes tests on macOS arm64 where exec was returning a different kind of path error than previously checked. Updates #16569 Signed-off-by: James Tucker --- ssh/tailssh/incubator.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index dd280143e..f75646771 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -74,6 +74,9 @@ var maybeStartLoginSession = func(dlogf logger.Logf, ia incubatorArgs) (close fu return nil } +// truePaths are the common locations to find the true binary, in likelihood order. +var truePaths = [...]string{"/usr/bin/true", "/bin/true"} + // tryExecInDir tries to run a command in dir and returns nil if it succeeds. // Otherwise, it returns a filesystem error or a timeout error if the command // took too long. @@ -93,10 +96,14 @@ func tryExecInDir(ctx context.Context, dir string) error { windir := os.Getenv("windir") return run(filepath.Join(windir, "system32", "doskey.exe")) } - if err := run("/bin/true"); !errors.Is(err, exec.ErrNotFound) { // including nil - return err + // Execute the first "true" we find in the list. + for _, path := range truePaths { + // Note: LookPath does not consult $PATH when passed multi-label paths. + if p, err := exec.LookPath(path); err == nil { + return run(p) + } } - return run("/usr/bin/true") + return exec.ErrNotFound } // newIncubatorCommand returns a new exec.Cmd configured with From 4860c460f5072cdb977417fc03405b3accc731d9 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 17 Nov 2025 19:17:02 -0800 Subject: [PATCH 1681/1708] wgengine/netlog: strip dot suffix from node name (#17954) The REST API does not return a node name with a trailing dot, while the internal node name reported in the netmap does have one. In order to be consistent with the API, strip the dot when recording node information. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- wgengine/netlog/record.go | 6 +++++- wgengine/netlog/record_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go index 45e30fabe..25b6b1148 100644 --- a/wgengine/netlog/record.go +++ b/wgengine/netlog/record.go @@ -9,6 +9,7 @@ import ( "cmp" "net/netip" "slices" + "strings" "time" "unicode/utf8" @@ -169,7 +170,10 @@ func (nu nodeUser) toNode() netlogtype.Node { if !nu.Valid() { return netlogtype.Node{} } - n := netlogtype.Node{NodeID: nu.StableID(), Name: nu.Name()} + n := netlogtype.Node{ + NodeID: nu.StableID(), + Name: strings.TrimSuffix(nu.Name(), "."), + } var ipv4, ipv6 netip.Addr for _, addr := range nu.Addresses().All() { switch { diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go index 7dd840d29..ec0229534 100644 --- a/wgengine/netlog/record_test.go +++ b/wgengine/netlog/record_test.go @@ -53,7 +53,7 @@ func TestToMessage(t *testing.T) { selfNode: nodeUser{NodeView: (&tailcfg.Node{ ID: 123456, StableID: "n123456CNTL", - Name: "src.tail123456.ts.net", + Name: "src.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.3")}, Tags: []string{"tag:src"}, }).View()}, @@ -64,14 +64,14 @@ func TestToMessage(t *testing.T) { addr("100.1.2.4"): {NodeView: (&tailcfg.Node{ ID: 123457, StableID: "n123457CNTL", - Name: "dst1.tail123456.ts.net", + Name: "dst1.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.4")}, Tags: []string{"tag:dst1"}, }).View()}, addr("100.1.2.5"): {NodeView: (&tailcfg.Node{ ID: 123458, StableID: "n123458CNTL", - Name: "dst2.tail123456.ts.net", + Name: "dst2.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.5")}, Tags: []string{"tag:dst2"}, }).View()}, @@ -163,7 +163,7 @@ func TestToNode(t *testing.T) { { node: &tailcfg.Node{ StableID: "n123456CNTL", - Name: "test.tail123456.ts.net", + Name: "test.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.3")}, Tags: []string{"tag:dupe", "tag:test", "tag:dupe"}, User: 12345, // should be ignored From a2e9dfacde52a083555074c1660ce237b12ed7e6 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Tue, 18 Nov 2025 07:53:42 +0000 Subject: [PATCH 1682/1708] cmd/tailscale/cli: warn if a simple up would change prefs (#17877) Updates tailscale/corp#21570 Signed-off-by: James Sanderson --- cmd/tailscale/cli/cli_test.go | 73 +++++++++++++++++++++++++++++------ cmd/tailscale/cli/up.go | 23 ++++++----- 2 files changed, 73 insertions(+), 23 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 2e1bec8c9..8762b7aae 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -174,6 +174,7 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { curUser string // os.Getenv("USER") on the client side goos string // empty means "linux" distro distro.Distro + backendState string // empty means "Running" want string }{ @@ -188,6 +189,28 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { }, want: "", }, + { + name: "bare_up_needs_login_default_prefs", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + backendState: ipn.NeedsLogin.String(), + want: "", + }, + { + name: "bare_up_needs_login_losing_prefs", + flags: []string{}, + curPrefs: &ipn.Prefs{ + // defaults: + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + NetfilterMode: preftype.NetfilterOn, + NoStatefulFiltering: opt.NewBool(true), + // non-default: + CorpDNS: false, + }, + backendState: ipn.NeedsLogin.String(), + want: accidentalUpPrefix + " --accept-dns=false", + }, { name: "losing_hostname", flags: []string{"--accept-dns"}, @@ -620,9 +643,13 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - goos := "linux" - if tt.goos != "" { - goos = tt.goos + goos := stdcmp.Or(tt.goos, "linux") + backendState := stdcmp.Or(tt.backendState, ipn.Running.String()) + // Needs to match the other conditions in checkForAccidentalSettingReverts + tt.curPrefs.Persist = &persist.Persist{ + UserProfile: tailcfg.UserProfile{ + LoginName: "janet", + }, } var upArgs upArgsT flagSet := newUpFlagSet(goos, &upArgs, "up") @@ -638,10 +665,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { curExitNodeIP: tt.curExitNodeIP, distro: tt.distro, user: tt.curUser, + backendState: backendState, } applyImplicitPrefs(newPrefs, tt.curPrefs, upEnv) var got string - if err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { + if _, err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { got = err.Error() } if strings.TrimSpace(got) != tt.want { @@ -1011,13 +1039,10 @@ func TestUpdatePrefs(t *testing.T) { wantErrSubtr string }{ { - name: "bare_up_means_up", - flags: []string{}, - curPrefs: &ipn.Prefs{ - ControlURL: ipn.DefaultControlURL, - WantRunning: false, - Hostname: "foo", - }, + name: "bare_up_means_up", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + wantSimpleUp: false, // user profile not set, so no simple up }, { name: "just_up", @@ -1031,6 +1056,32 @@ func TestUpdatePrefs(t *testing.T) { }, wantSimpleUp: true, }, + { + name: "just_up_needs_login_default_prefs", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + env: upCheckEnv{ + backendState: "NeedsLogin", + }, + wantSimpleUp: false, + }, + { + name: "just_up_needs_login_losing_prefs", + flags: []string{}, + curPrefs: &ipn.Prefs{ + // defaults: + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + NetfilterMode: preftype.NetfilterOn, + // non-default: + CorpDNS: false, + }, + env: upCheckEnv{ + backendState: "NeedsLogin", + }, + wantSimpleUp: false, + wantErrSubtr: "tailscale up --accept-dns=false", + }, { name: "just_edit", flags: []string{}, diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index c34155955..e8b0cd0d3 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -388,7 +388,8 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus if !env.upArgs.reset { applyImplicitPrefs(prefs, curPrefs, env) - if err := checkForAccidentalSettingReverts(prefs, curPrefs, env); err != nil { + simpleUp, err = checkForAccidentalSettingReverts(prefs, curPrefs, env) + if err != nil { return false, nil, err } } @@ -420,11 +421,6 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus tagsChanged := !reflect.DeepEqual(curPrefs.AdvertiseTags, prefs.AdvertiseTags) - simpleUp = env.flagSet.NFlag() == 0 && - curPrefs.Persist != nil && - curPrefs.Persist.UserProfile.LoginName != "" && - env.backendState != ipn.NeedsLogin.String() - justEdit := env.backendState == ipn.Running.String() && !env.upArgs.forceReauth && env.upArgs.authKeyOrFile == "" && @@ -968,10 +964,10 @@ type upCheckEnv struct { // // mp is the mask of settings actually set, where mp.Prefs is the new // preferences to set, including any values set from implicit flags. -func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) error { +func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, err error) { if curPrefs.ControlURL == "" { // Don't validate things on initial "up" before a control URL has been set. - return nil + return false, nil } flagIsSet := map[string]bool{} @@ -979,10 +975,13 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck flagIsSet[f.Name] = true }) - if len(flagIsSet) == 0 { + if len(flagIsSet) == 0 && + curPrefs.Persist != nil && + curPrefs.Persist.UserProfile.LoginName != "" && + env.backendState != ipn.NeedsLogin.String() { // A bare "tailscale up" is a special case to just // mean bringing the network up without any changes. - return nil + return true, nil } // flagsCur is what flags we'd need to use to keep the exact @@ -1024,7 +1023,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck missing = append(missing, fmtFlagValueArg(flagName, valCur)) } if len(missing) == 0 { - return nil + return false, nil } // Some previously provided flags are missing. This run of 'tailscale @@ -1057,7 +1056,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck fmt.Fprintf(&sb, " %s", a) } sb.WriteString("\n\n") - return errors.New(sb.String()) + return false, errors.New(sb.String()) } // applyImplicitPrefs mutates prefs to add implicit preferences for the user operator. From 9048ea25db8064b3833b7a2fcbe4b421e4a820dc Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Tue, 18 Nov 2025 08:04:03 +0000 Subject: [PATCH 1683/1708] ipn/localapi: log calls to localapi (#17880) Updates tailscale/corp#34238 Signed-off-by: James Sanderson --- ipn/localapi/localapi.go | 24 +++++++++++++++++------- ipn/localapi/localapi_test.go | 33 ++++++++++++++++++++++++--------- 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ddd55234a..c4ba2a40b 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -264,7 +264,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - if fn, ok := handlerForPath(r.URL.Path); ok { + if fn, route, ok := handlerForPath(r.URL.Path); ok { + h.logRequest(r.Method, route) fn(h, w, r) } else { http.NotFound(w, r) @@ -300,9 +301,9 @@ func (h *Handler) validHost(hostname string) bool { // handlerForPath returns the LocalAPI handler for the provided Request.URI.Path. // (the path doesn't include any query parameters) -func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { +func handlerForPath(urlPath string) (h LocalAPIHandler, route string, ok bool) { if urlPath == "/" { - return (*Handler).serveLocalAPIRoot, true + return (*Handler).serveLocalAPIRoot, "/", true } suff, ok := strings.CutPrefix(urlPath, "/localapi/v0/") if !ok { @@ -310,22 +311,31 @@ func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { // to people that they're not necessarily stable APIs. In practice we'll // probably need to keep them pretty stable anyway, but for now treat // them as an internal implementation detail. - return nil, false + return nil, "", false } if fn, ok := handler[suff]; ok { // Here we match exact handler suffixes like "status" or ones with a // slash already in their name, like "tka/status". - return fn, true + return fn, "/localapi/v0/" + suff, true } // Otherwise, it might be a prefix match like "files/*" which we look up // by the prefix including first trailing slash. if i := strings.IndexByte(suff, '/'); i != -1 { suff = suff[:i+1] if fn, ok := handler[suff]; ok { - return fn, true + return fn, "/localapi/v0/" + suff, true } } - return nil, false + return nil, "", false +} + +func (h *Handler) logRequest(method, route string) { + switch method { + case httpm.GET, httpm.HEAD, httpm.OPTIONS: + // don't log safe methods + default: + h.Logf("localapi: [%s] %s", method, route) + } } func (*Handler) serveLocalAPIRoot(w http.ResponseWriter, r *http.Request) { diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index d00b4117b..6bb9b5182 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -40,6 +40,19 @@ import ( "tailscale.com/wgengine" ) +func handlerForTest(t testing.TB, h *Handler) *Handler { + if h.Actor == nil { + h.Actor = &ipnauth.TestActor{} + } + if h.b == nil { + h.b = &ipnlocal.LocalBackend{} + } + if h.logf == nil { + h.logf = logger.TestLogger(t) + } + return h +} + func TestValidHost(t *testing.T) { tests := []struct { host string @@ -57,7 +70,7 @@ func TestValidHost(t *testing.T) { for _, test := range tests { t.Run(test.host, func(t *testing.T) { - h := &Handler{} + h := handlerForTest(t, &Handler{}) if got := h.validHost(test.host); got != test.valid { t.Errorf("validHost(%q)=%v, want %v", test.host, got, test.valid) } @@ -68,10 +81,9 @@ func TestValidHost(t *testing.T) { func TestSetPushDeviceToken(t *testing.T) { tstest.Replace(t, &validLocalHostForTesting, true) - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitWrite: true, - b: &ipnlocal.LocalBackend{}, - } + }) s := httptest.NewServer(h) defer s.Close() c := s.Client() @@ -125,9 +137,9 @@ func (b whoIsBackend) PeerCaps(ip netip.Addr) tailcfg.PeerCapMap { // // And https://github.com/tailscale/tailscale/issues/12465 func TestWhoIsArgTypes(t *testing.T) { - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitRead: true, - } + }) match := func() (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { return (&tailcfg.Node{ @@ -190,7 +202,10 @@ func TestWhoIsArgTypes(t *testing.T) { func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { newHandler := func(connIsLocalAdmin bool) *Handler { - return &Handler{Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)} + return handlerForTest(t, &Handler{ + Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, + b: newTestLocalBackend(t), + }) } tests := []struct { name string @@ -298,11 +313,11 @@ func TestServeWatchIPNBus(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitRead: tt.permitRead, PermitWrite: tt.permitWrite, b: newTestLocalBackend(t), - } + }) s := httptest.NewServer(h) defer s.Close() c := s.Client() From c2e474e729b4b665cf5acafa29f89c11af71ac35 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 18:13:44 +0000 Subject: [PATCH 1684/1708] all: rename variables with lowercase-l/uppercase-I See http://go/no-ell Signed-off-by: Alex Chan Updates #cleanup Change-Id: I8c976b51ce7a60f06315048b1920516129cc1d5d --- appc/appconnector.go | 14 +- atomicfile/atomicfile_test.go | 4 +- chirp/chirp_test.go | 8 +- client/web/src/hooks/exit-nodes.ts | 8 +- clientupdate/clientupdate.go | 8 +- cmd/containerboot/main_test.go | 40 +-- cmd/derper/derper.go | 16 +- cmd/k8s-operator/egress-eps.go | 46 ++-- cmd/k8s-operator/egress-pod-readiness.go | 32 +-- cmd/k8s-operator/egress-services-readiness.go | 32 +-- .../egress-services-readiness_test.go | 26 +- cmd/k8s-operator/egress-services.go | 124 ++++----- cmd/k8s-operator/egress-services_test.go | 4 +- cmd/k8s-operator/operator_test.go | 4 +- cmd/k8s-operator/proxygroup_specs.go | 12 +- cmd/k8s-operator/tsrecorder_specs.go | 12 +- cmd/k8s-proxy/internal/config/config.go | 44 +-- cmd/k8s-proxy/internal/config/config_test.go | 14 +- cmd/natc/ippool/consensusippool.go | 4 +- cmd/sniproxy/sniproxy_test.go | 4 +- cmd/stunstamp/stunstamp.go | 28 +- cmd/sync-containers/main.go | 4 +- cmd/tl-longchain/tl-longchain.go | 4 +- drive/driveimpl/connlistener.go | 24 +- drive/driveimpl/connlistener_test.go | 6 +- drive/driveimpl/drive_test.go | 18 +- drive/driveimpl/fileserver.go | 14 +- feature/sdnotify/sdnotify_linux.go | 4 +- ipn/localapi/tailnetlock.go | 4 +- .../apis/v1alpha1/types_proxyclass.go | 8 +- k8s-operator/sessionrecording/ws/conn_test.go | 4 +- kube/egressservices/egressservices.go | 12 +- kube/localclient/local-client.go | 8 +- log/sockstatlog/logger.go | 60 ++-- logpolicy/logpolicy.go | 4 +- logtail/logtail.go | 258 +++++++++--------- logtail/logtail_test.go | 64 ++--- net/art/stride_table.go | 12 +- net/art/stride_table_test.go | 4 +- net/dns/manager_windows_test.go | 4 +- net/ktimeout/ktimeout_linux_test.go | 12 +- net/ktimeout/ktimeout_test.go | 4 +- net/memnet/listener.go | 28 +- net/memnet/listener_test.go | 10 +- net/netaddr/netaddr.go | 2 +- net/netcheck/netcheck.go | 8 +- net/socks5/socks5.go | 6 +- net/speedtest/speedtest_server.go | 4 +- net/speedtest/speedtest_test.go | 10 +- packages/deb/deb.go | 10 +- prober/derp.go | 28 +- prober/prober.go | 34 +-- tka/aum.go | 4 +- tka/sig_test.go | 8 +- tsconsensus/monitor.go | 4 +- tsconsensus/tsconsensus_test.go | 4 +- tsnet/tsnet_test.go | 10 +- tstest/integration/vms/vms_test.go | 6 +- tsweb/tsweb.go | 38 +-- types/geo/quantize_test.go | 16 +- types/key/disco.go | 10 +- types/prefs/list.go | 24 +- types/prefs/prefs_test.go | 20 +- types/prefs/struct_list.go | 24 +- types/prefs/struct_map.go | 8 +- util/limiter/limiter.go | 68 ++--- util/limiter/limiter_test.go | 144 +++++----- util/linuxfw/detector.go | 4 +- util/lru/lru_test.go | 8 +- util/syspolicy/setting/setting.go | 22 +- util/syspolicy/setting/setting_test.go | 4 +- util/winutil/gp/gp_windows_test.go | 10 +- util/winutil/gp/policylock_windows.go | 80 +++--- util/winutil/s4u/lsa_windows.go | 4 +- util/winutil/s4u/s4u_windows.go | 8 +- util/winutil/startupinfo_windows.go | 4 +- util/winutil/winutil_windows_test.go | 4 +- wf/firewall.go | 8 +- wgengine/magicsock/magicsock_test.go | 20 +- wgengine/netstack/link_endpoint.go | 110 ++++---- wgengine/router/osrouter/router_linux_test.go | 18 +- 81 files changed, 924 insertions(+), 924 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 5625decbf..d41f9e8ba 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -203,12 +203,12 @@ func NewAppConnector(c Config) *AppConnector { ac.wildcards = c.RouteInfo.Wildcards ac.controlRoutes = c.RouteInfo.Control } - ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { - ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) - metricStoreRoutes(c, l) + ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) { + ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln) + metricStoreRoutes(c, ln) }) - ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) { - ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l) + ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) { + ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln) }) return ac } @@ -510,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) { slices.SortFunc(e.domains[domain], compareAddr) } -func compareAddr(l, r netip.Addr) int { - return l.Compare(r) +func compareAddr(a, b netip.Addr) int { + return a.Compare(b) } // routesWithout returns a without b where a and b diff --git a/atomicfile/atomicfile_test.go b/atomicfile/atomicfile_test.go index 78c93e664..a081c9040 100644 --- a/atomicfile/atomicfile_test.go +++ b/atomicfile/atomicfile_test.go @@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) { // The least troublesome thing to make that is not a file is a unix socket. // Making a null device sadly requires root. - l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) + ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) if err != nil { t.Fatal(err) } - defer l.Close() + defer ln.Close() err = WriteFile(path, []byte("hello"), 0644) if err == nil { diff --git a/chirp/chirp_test.go b/chirp/chirp_test.go index a57ef224b..c545c277d 100644 --- a/chirp/chirp_test.go +++ b/chirp/chirp_test.go @@ -24,7 +24,7 @@ type fakeBIRD struct { func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { sock := filepath.Join(t.TempDir(), "sock") - l, err := net.Listen("unix", sock) + ln, err := net.Listen("unix", sock) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { pe[p] = false } return &fakeBIRD{ - Listener: l, + Listener: ln, protocolsEnabled: pe, sock: sock, } @@ -123,12 +123,12 @@ type hangingListener struct { func newHangingListener(t *testing.T) *hangingListener { sock := filepath.Join(t.TempDir(), "sock") - l, err := net.Listen("unix", sock) + ln, err := net.Listen("unix", sock) if err != nil { t.Fatal(err) } return &hangingListener{ - Listener: l, + Listener: ln, t: t, done: make(chan struct{}), sock: sock, diff --git a/client/web/src/hooks/exit-nodes.ts b/client/web/src/hooks/exit-nodes.ts index b3ce0a9fa..5e47fbc22 100644 --- a/client/web/src/hooks/exit-nodes.ts +++ b/client/web/src/hooks/exit-nodes.ts @@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) { // match from a list of exit node `options` to `nodes`. const addBestMatchNode = ( options: ExitNode[], - name: (l: ExitNodeLocation) => string + name: (loc: ExitNodeLocation) => string ) => { const bestNode = highestPriorityNode(options) if (!bestNode || !bestNode.Location) { @@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) { locationNodesMap.forEach( // add one node per country (countryNodes) => - addBestMatchNode(flattenMap(countryNodes), (l) => l.Country) + addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country) ) } else { // Otherwise, show the best match on a city-level, @@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) { countryNodes.forEach( // add one node per city (cityNodes) => - addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`) + addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`) ) // add the "Country: Best Match" node addBestMatchNode( flattenMap(countryNodes), - (l) => `${l.Country}: Best Match` + (loc) => `${loc.Country}: Best Match` ) }) } diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 84b289615..3a0a8d03e 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -418,13 +418,13 @@ func parseSynoinfo(path string) (string, error) { // Extract the CPU in the middle (88f6282 in the above example). s := bufio.NewScanner(f) for s.Scan() { - l := s.Text() - if !strings.HasPrefix(l, "unique=") { + line := s.Text() + if !strings.HasPrefix(line, "unique=") { continue } - parts := strings.SplitN(l, "_", 3) + parts := strings.SplitN(line, "_", 3) if len(parts) != 3 { - return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l) + return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line) } return parts[1], nil } diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 96feef682..f92f35333 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -1287,8 +1287,8 @@ type localAPI struct { notify *ipn.Notify } -func (l *localAPI) Start() error { - path := filepath.Join(l.FSRoot, "tmp/tailscaled.sock.fake") +func (lc *localAPI) Start() error { + path := filepath.Join(lc.FSRoot, "tmp/tailscaled.sock.fake") if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { return err } @@ -1298,30 +1298,30 @@ func (l *localAPI) Start() error { return err } - l.srv = &http.Server{ - Handler: l, + lc.srv = &http.Server{ + Handler: lc, } - l.Path = path - l.cond = sync.NewCond(&l.Mutex) - go l.srv.Serve(ln) + lc.Path = path + lc.cond = sync.NewCond(&lc.Mutex) + go lc.srv.Serve(ln) return nil } -func (l *localAPI) Close() { - l.srv.Close() +func (lc *localAPI) Close() { + lc.srv.Close() } -func (l *localAPI) Notify(n *ipn.Notify) { +func (lc *localAPI) Notify(n *ipn.Notify) { if n == nil { return } - l.Lock() - defer l.Unlock() - l.notify = n - l.cond.Broadcast() + lc.Lock() + defer lc.Unlock() + lc.notify = n + lc.cond.Broadcast() } -func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/localapi/v0/serve-config": if r.Method != "POST" { @@ -1348,11 +1348,11 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Flush() } enc := json.NewEncoder(w) - l.Lock() - defer l.Unlock() + lc.Lock() + defer lc.Unlock() for { - if l.notify != nil { - if err := enc.Encode(l.notify); err != nil { + if lc.notify != nil { + if err := enc.Encode(lc.notify); err != nil { // Usually broken pipe as the test client disconnects. return } @@ -1360,7 +1360,7 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Flush() } } - l.cond.Wait() + lc.cond.Wait() } } diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 857d7def3..f177986a5 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -481,32 +481,32 @@ func newRateLimitedListener(ln net.Listener, limit rate.Limit, burst int) *rateL return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)} } -func (l *rateLimitedListener) ExpVar() expvar.Var { +func (ln *rateLimitedListener) ExpVar() expvar.Var { m := new(metrics.Set) - m.Set("counter_accepted_connections", &l.numAccepts) - m.Set("counter_rejected_connections", &l.numRejects) + m.Set("counter_accepted_connections", &ln.numAccepts) + m.Set("counter_rejected_connections", &ln.numRejects) return m } var errLimitedConn = errors.New("cannot accept connection; rate limited") -func (l *rateLimitedListener) Accept() (net.Conn, error) { +func (ln *rateLimitedListener) Accept() (net.Conn, error) { // Even under a rate limited situation, we accept the connection immediately // and close it, rather than being slow at accepting new connections. // This provides two benefits: 1) it signals to the client that something // is going on on the server, and 2) it prevents new connections from // piling up and occupying resources in the OS kernel. // The client will retry as needing (with backoffs in place). - cn, err := l.Listener.Accept() + cn, err := ln.Listener.Accept() if err != nil { return nil, err } - if !l.lim.Allow() { - l.numRejects.Add(1) + if !ln.lim.Allow() { + ln.numRejects.Add(1) cn.Close() return nil, errLimitedConn } - l.numAccepts.Add(1) + ln.numAccepts.Add(1) return cn, nil } diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 3441e12ba..88da99353 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -36,21 +36,21 @@ type egressEpsReconciler struct { // It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired // configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready. func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := er.logger.With("Service", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := er.logger.With("Service", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") eps := new(discoveryv1.EndpointSlice) err = er.Get(ctx, req.NamespacedName, eps) if apierrors.IsNotFound(err) { - l.Debugf("EndpointSlice not found") + lg.Debugf("EndpointSlice not found") return reconcile.Result{}, nil } if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) } if !eps.DeletionTimestamp.IsZero() { - l.Debugf("EnpointSlice is being deleted") + lg.Debugf("EnpointSlice is being deleted") return res, nil } @@ -64,7 +64,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ } err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) if apierrors.IsNotFound(err) { - l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) + lg.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) return res, nil } if err != nil { @@ -77,7 +77,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ oldEps := eps.DeepCopy() tailnetSvc := tailnetSvcName(svc) - l = l.With("tailnet-service-name", tailnetSvc) + lg = lg.With("tailnet-service-name", tailnetSvc) // Retrieve the desired tailnet service configuration from the ConfigMap. proxyGroupName := eps.Labels[labelProxyGroup] @@ -88,12 +88,12 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ if cfgs == nil { // TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later // got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow. - l.Debugf("No egress config found, likely because ProxyGroup has not been created") + lg.Debugf("No egress config found, likely because ProxyGroup has not been created") return res, nil } cfg, ok := (*cfgs)[tailnetSvc] if !ok { - l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) + lg.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) return res, nil } @@ -105,7 +105,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ } newEndpoints := make([]discoveryv1.Endpoint, 0) for _, pod := range podList.Items { - ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l) + ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, lg) if err != nil { return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err) } @@ -130,7 +130,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ // run a cleanup for deleted Pods etc. eps.Endpoints = newEndpoints if !reflect.DeepEqual(eps, oldEps) { - l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") + lg.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") if err := er.Update(ctx, eps); err != nil { return res, fmt.Errorf("error updating EndpointSlice: %w", err) } @@ -154,11 +154,11 @@ func podIPv4(pod *corev1.Pod) (string, error) { // podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to // route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service // status written there to the desired service configuration. -func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) { - l = l.With("proxy_pod", pod.Name) - l.Debugf("checking whether proxy is ready to route to egress service") +func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, lg *zap.SugaredLogger) (bool, error) { + lg = lg.With("proxy_pod", pod.Name) + lg.Debugf("checking whether proxy is ready to route to egress service") if !pod.DeletionTimestamp.IsZero() { - l.Debugf("proxy Pod is being deleted, ignore") + lg.Debugf("proxy Pod is being deleted, ignore") return false, nil } podIP, err := podIPv4(&pod) @@ -166,7 +166,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod return false, fmt.Errorf("error determining Pod IP address: %v", err) } if podIP == "" { - l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") + lg.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") return false, nil } stateS := &corev1.Secret{ @@ -177,7 +177,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod } err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) if apierrors.IsNotFound(err) { - l.Debugf("proxy does not have a state Secret, waiting...") + lg.Debugf("proxy does not have a state Secret, waiting...") return false, nil } if err != nil { @@ -185,7 +185,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod } svcStatusBS := stateS.Data[egressservices.KeyEgressServices] if len(svcStatusBS) == 0 { - l.Debugf("proxy's state Secret does not contain egress services status, waiting...") + lg.Debugf("proxy's state Secret does not contain egress services status, waiting...") return false, nil } svcStatus := &egressservices.Status{} @@ -193,22 +193,22 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod return false, fmt.Errorf("error unmarshalling egress service status: %w", err) } if !strings.EqualFold(podIP, svcStatus.PodIPv4) { - l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) + lg.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) return false, nil } st, ok := (*svcStatus).Services[tailnetSvcName] if !ok { - l.Infof("proxy's state Secret does not have egress service status, waiting...") + lg.Infof("proxy's state Secret does not have egress service status, waiting...") return false, nil } if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) { - l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) + lg.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) return false, nil } if !reflect.DeepEqual(cfg.Ports, st.Ports) { - l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) + lg.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) return false, nil } - l.Debugf("proxy is ready to route traffic to egress service") + lg.Debugf("proxy is ready to route traffic to egress service") return true, nil } diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index f3a812ecb..a732e0861 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -71,9 +71,9 @@ type egressPodsReconciler struct { // If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the // readiness condition for backwards compatibility reasons. func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := er.logger.With("Pod", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := er.logger.With("Pod", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") pod := new(corev1.Pod) err = er.Get(ctx, req.NamespacedName, pod) @@ -84,11 +84,11 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err) } if !pod.DeletionTimestamp.IsZero() { - l.Debugf("Pod is being deleted, do nothing") + lg.Debugf("Pod is being deleted, do nothing") return res, nil } if pod.Labels[LabelParentType] != proxyTypeProxyGroup { - l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") + lg.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") return res, nil } @@ -97,7 +97,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool { return r.ConditionType == tsEgressReadinessGate }) { - l.Debug("Pod does not have egress readiness gate set, skipping") + lg.Debug("Pod does not have egress readiness gate set, skipping") return res, nil } @@ -107,7 +107,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err) } if pg.Spec.Type != typeEgress { - l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) + lg.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) return res, nil } // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. @@ -125,7 +125,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return c.Type == tsEgressReadinessGate }) if idx != -1 { - l.Debugf("Pod is already ready, do nothing") + lg.Debugf("Pod is already ready, do nothing") return res, nil } @@ -134,7 +134,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req for _, svc := range svcs.Items { s := svc go func() { - ll := l.With("service_name", s.Name) + ll := lg.With("service_name", s.Name) d := retrieveClusterDomain(er.tsNamespace, ll) healthCheckAddr := healthCheckForSvc(&s, d) if healthCheckAddr == "" { @@ -178,22 +178,22 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return res, fmt.Errorf("error verifying conectivity: %w", err) } if rm := routesMissing.Load(); rm { - l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") + lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") return reconcile.Result{RequeueAfter: shortRequeue}, nil } - if err := er.setPodReady(ctx, pod, l); err != nil { + if err := er.setPodReady(ctx, pod, lg); err != nil { return res, fmt.Errorf("error setting Pod as ready: %w", err) } return res, nil } -func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error { +func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, lg *zap.SugaredLogger) error { if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { return c.Type == tsEgressReadinessGate }) { return nil } - l.Infof("Pod is ready to route traffic to all egress targets") + lg.Infof("Pod is ready to route traffic to all egress targets") pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ Type: tsEgressReadinessGate, Status: corev1.ConditionTrue, @@ -216,11 +216,11 @@ const ( ) // lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check. -func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) { +func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, lg *zap.SugaredLogger) (healthCheckState, error) { if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true" }) { - l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") + lg.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") return cannotVerify, nil } wantsIP, err := podIPv4(pod) @@ -248,7 +248,7 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c defer resp.Body.Close() gotIP := resp.Header.Get(kubetypes.PodIPv4Header) if gotIP == "" { - l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") + lg.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") return cannotVerify, nil } if !strings.EqualFold(wantsIP, gotIP) { diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index ecf99b63c..80f3c7d28 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -47,13 +47,13 @@ type egressSvcsReadinessReconciler struct { // route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress // service to determine how many replicas are currently able to route traffic. func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := esrr.logger.With("Service", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := esrr.logger.With("Service", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") svc := new(corev1.Service) if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Debugf("Service not found") + lg.Debugf("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re ) oldStatus := svc.Status.DeepCopy() defer func() { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, lg) if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { err = errors.Join(err, esrr.Status().Update(ctx, svc)) } @@ -79,7 +79,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if eps == nil { - l.Infof("EndpointSlice for Service does not yet exist, waiting...") + lg.Infof("EndpointSlice for Service does not yet exist, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -91,7 +91,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re } err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) if apierrors.IsNotFound(err) { - l.Infof("ProxyGroup for Service does not exist, waiting...") + lg.Infof("ProxyGroup for Service does not exist, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -103,7 +103,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if !tsoperator.ProxyGroupAvailable(pg) { - l.Infof("ProxyGroup for Service is not ready, waiting...") + lg.Infof("ProxyGroup for Service is not ready, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -111,7 +111,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re replicas := pgReplicas(pg) if replicas == 0 { - l.Infof("ProxyGroup replicas set to 0") + lg.Infof("ProxyGroup replicas set to 0") reason, msg = reasonNoProxies, reasonNoProxies st = metav1.ConditionFalse return res, nil @@ -128,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if pod == nil { - l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) + lg.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady return res, nil } - l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) + lg.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) ready := false for _, ep := range eps.Endpoints { - l.Debugf("looking at endpoint with addresses %v", ep.Addresses) - if endpointReadyForPod(&ep, pod, l) { - l.Debugf("endpoint is ready for Pod") + lg.Debugf("looking at endpoint with addresses %v", ep.Addresses) + if endpointReadyForPod(&ep, pod, lg) { + lg.Debugf("endpoint is ready for Pod") ready = true break } @@ -163,10 +163,10 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re // endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. // Endpoint must not be nil. -func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { +func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, lg *zap.SugaredLogger) bool { podIP, err := podIPv4(pod) if err != nil { - l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) + lg.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) return false } // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index f80759aef..fdff4fafa 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -49,12 +49,12 @@ func TestEgressServiceReadiness(t *testing.T) { }, } fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} - l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) + labels := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) eps := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "my-app", Namespace: "operator-ns", - Labels: l, + Labels: labels, }, AddressType: discoveryv1.AddressTypeIPv4, } @@ -118,26 +118,26 @@ func TestEgressServiceReadiness(t *testing.T) { }) } -func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l) +func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger) { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, lg) } -func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) { +func setNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas int32) { msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, lg) } -func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) { +func setReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas, readyReplicas int32) { reason := reasonPartiallyReady if readyReplicas == replicas { reason = reasonReady } msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, lg) } -func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) +func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, lg *zap.SugaredLogger) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, lg) } func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { @@ -153,14 +153,14 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1 } func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { - l := pgLabels(pg.Name, nil) - l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) + labels := pgLabels(pg.Name, nil) + labels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) ip := fmt.Sprintf("10.0.0.%d", ordinal) return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), Namespace: "operator-ns", - Labels: l, + Labels: labels, }, Status: corev1.PodStatus{ PodIPs: []corev1.PodIP{{IP: ip}}, diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index ca6562071..05be8efed 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -98,12 +98,12 @@ type egressSvcsReconciler struct { // - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the // portmappings. func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := esr.logger.With("Service", req.NamespacedName) - defer l.Info("reconcile finished") + lg := esr.logger.With("Service", req.NamespacedName) + defer lg.Info("reconcile finished") svc := new(corev1.Service) if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Info("Service not found") + lg.Info("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -111,7 +111,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re // Name of the 'egress service', meaning the tailnet target. tailnetSvc := tailnetSvcName(svc) - l = l.With("tailnet-service", tailnetSvc) + lg = lg.With("tailnet-service", tailnetSvc) // Note that resources for egress Services are only cleaned up when the // Service is actually deleted (and not if, for example, user decides to @@ -119,8 +119,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re // assume that the egress ExternalName Services are always created for // Tailscale operator specifically. if !svc.DeletionTimestamp.IsZero() { - l.Info("Service is being deleted, ensuring resource cleanup") - return res, esr.maybeCleanup(ctx, svc, l) + lg.Info("Service is being deleted, ensuring resource cleanup") + return res, esr.maybeCleanup(ctx, svc, lg) } oldStatus := svc.Status.DeepCopy() @@ -131,7 +131,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re }() // Validate the user-created ExternalName Service and the associated ProxyGroup. - if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil { + if ok, err := esr.validateClusterResources(ctx, svc, lg); err != nil { return res, fmt.Errorf("error validating cluster resources: %w", err) } else if !ok { return res, nil @@ -141,8 +141,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re svc.Finalizers = append(svc.Finalizers, FinalizerName) if err := esr.updateSvcSpec(ctx, svc); err != nil { err := fmt.Errorf("failed to add finalizer: %w", err) - r := svcConfiguredReason(svc, false, l) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + r := svcConfiguredReason(svc, false, lg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) return res, err } esr.mu.Lock() @@ -151,16 +151,16 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re esr.mu.Unlock() } - if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil { + if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, lg); err != nil { err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err) - r := svcConfiguredReason(svc, false, l) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + r := svcConfiguredReason(svc, false, lg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) return res, err } - if err := esr.maybeProvision(ctx, svc, l); err != nil { + if err := esr.maybeProvision(ctx, svc, lg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { - l.Infof("optimistic lock error, retrying: %s", err) + lg.Infof("optimistic lock error, retrying: %s", err) } else { return reconcile.Result{}, err } @@ -169,15 +169,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re return res, nil } -func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { - r := svcConfiguredReason(svc, false, l) +func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (err error) { + r := svcConfiguredReason(svc, false, lg) st := metav1.ConditionFalse defer func() { msg := r if st != metav1.ConditionTrue && err != nil { msg = err.Error() } - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, lg) }() crl := egressSvcChildResourceLabels(svc) @@ -189,36 +189,36 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1 if clusterIPSvc == nil { clusterIPSvc = esr.clusterIPSvcForEgress(crl) } - upToDate := svcConfigurationUpToDate(svc, l) + upToDate := svcConfigurationUpToDate(svc, lg) provisioned := true if !upToDate { - if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil { + if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, lg); err != nil { return err } } if !provisioned { - l.Infof("unable to provision cluster resources") + lg.Infof("unable to provision cluster resources") return nil } // Update ExternalName Service to point at the ClusterIP Service. - clusterDomain := retrieveClusterDomain(esr.tsNamespace, l) + clusterDomain := retrieveClusterDomain(esr.tsNamespace, lg) clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain) if svc.Spec.ExternalName != clusterIPSvcFQDN { - l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) + lg.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) svc.Spec.ExternalName = clusterIPSvcFQDN if err = esr.updateSvcSpec(ctx, svc); err != nil { err = fmt.Errorf("error updating ExternalName Service: %w", err) return err } } - r = svcConfiguredReason(svc, true, l) + r = svcConfiguredReason(svc, true, lg) st = metav1.ConditionTrue return nil } -func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) { - l.Infof("updating configuration...") +func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, lg *zap.SugaredLogger) (*corev1.Service, bool, error) { + lg.Infof("updating configuration...") usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName) if err != nil { return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err) @@ -246,7 +246,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s } } if !found { - l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) + lg.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1) } } @@ -277,7 +277,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) } p := unusedPort(usedPorts) - l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) + lg.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) usedPorts.Insert(p) clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ Name: wantsPM.Name, @@ -343,14 +343,14 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err) } if cm == nil { - l.Info("ConfigMap not yet created, waiting..") + lg.Info("ConfigMap not yet created, waiting..") return nil, false, nil } tailnetSvc := tailnetSvcName(svc) gotCfg := (*cfgs)[tailnetSvc] - wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l) + wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, lg) if !reflect.DeepEqual(gotCfg, wantsCfg) { - l.Debugf("updating egress services ConfigMap %s", cm.Name) + lg.Debugf("updating egress services ConfigMap %s", cm.Name) mak.Set(cfgs, tailnetSvc, wantsCfg) bs, err := json.Marshal(cfgs) if err != nil { @@ -361,7 +361,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err) } } - l.Infof("egress service configuration has been updated") + lg.Infof("egress service configuration has been updated") return clusterIPSvc, true, nil } @@ -402,7 +402,7 @@ func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.S return nil } -func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error { +func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) error { wantsProxyGroup := svc.Annotations[AnnotationProxyGroup] cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { @@ -416,7 +416,7 @@ func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Contex return nil } esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup) - if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil { + if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, lg); err != nil { return fmt.Errorf("error deleting egress service config: %w", err) } return nil @@ -471,17 +471,17 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, Namespace: esr.tsNamespace, }, } - l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) - l.Debug("ensuring that egress service configuration is removed from proxy config") + lggr := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) + lggr.Debug("ensuring that egress service configuration is removed from proxy config") if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) { - l.Debugf("ConfigMap not found") + lggr.Debugf("ConfigMap not found") return nil } else if err != nil { return fmt.Errorf("error retrieving ConfigMap: %w", err) } bs := cm.BinaryData[egressservices.KeyEgressServices] if len(bs) == 0 { - l.Debugf("ConfigMap does not contain egress service configs") + lggr.Debugf("ConfigMap does not contain egress service configs") return nil } cfgs := &egressservices.Configs{} @@ -491,12 +491,12 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, tailnetSvc := tailnetSvcName(svc) _, ok := (*cfgs)[tailnetSvc] if !ok { - l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") + lggr.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") return nil } - l.Infof("before deleting config %+#v", *cfgs) + lggr.Infof("before deleting config %+#v", *cfgs) delete(*cfgs, tailnetSvc) - l.Infof("after deleting config %+#v", *cfgs) + lggr.Infof("after deleting config %+#v", *cfgs) bs, err := json.Marshal(cfgs) if err != nil { return fmt.Errorf("error marshalling egress services configs: %w", err) @@ -505,7 +505,7 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, return esr.Update(ctx, cm) } -func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) { +func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (bool, error) { proxyGroupName := svc.Annotations[AnnotationProxyGroup] pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -513,36 +513,36 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s }, } if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { - l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + lg.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } else if err != nil { err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, err } if violations := validateEgressService(svc, pg); len(violations) > 0 { msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) - l.Info(msg) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l) + lg.Info(msg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } if !tsoperator.ProxyGroupAvailable(pg) { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) } - l.Debugf("egress service is valid") - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l) + lg.Debugf("egress service is valid") + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, lg) return true, nil } -func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config { - d := retrieveClusterDomain(ns, l) +func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, lg *zap.SugaredLogger) egressservices.Config { + d := retrieveClusterDomain(ns, lg) tt := tailnetTargetFromSvc(externalNameSvc) hep := healthCheckForSvc(clusterIPSvc, d) cfg := egressservices.Config{ @@ -691,18 +691,18 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { // egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { - l := egressSvcChildResourceLabels(extNSvc) + lbels := egressSvcChildResourceLabels(extNSvc) // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the // endpoints defined on this EndpointSlice. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - l[discoveryv1.LabelServiceName] = clusterIPSvc.Name + lbels[discoveryv1.LabelServiceName] = clusterIPSvc.Name // Kubernetes recommends setting this label. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management - l[discoveryv1.LabelManagedBy] = "tailscale.com" - return l + lbels[discoveryv1.LabelManagedBy] = "tailscale.com" + return lbels } -func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { +func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool { cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { return false @@ -710,21 +710,21 @@ func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { if cond.Status != metav1.ConditionTrue { return false } - wantsReadyReason := svcConfiguredReason(svc, true, l) + wantsReadyReason := svcConfiguredReason(svc, true, lg) return strings.EqualFold(wantsReadyReason, cond.Reason) } -func cfgHash(c cfg, l *zap.SugaredLogger) string { +func cfgHash(c cfg, lg *zap.SugaredLogger) string { bs, err := json.Marshal(c) if err != nil { // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. - l.Infof("error marhsalling Config: %v", err) + lg.Infof("error marhsalling Config: %v", err) return "" } h := sha256.New() if _, err := h.Write(bs); err != nil { // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. - l.Infof("error producing Config hash: %v", err) + lg.Infof("error producing Config hash: %v", err) return "" } return fmt.Sprintf("%x", h.Sum(nil)) @@ -736,7 +736,7 @@ type cfg struct { ProxyGroup string `json:"proxyGroup"` } -func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string { +func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLogger) string { var r string if configured { r = "ConfiguredFor:" @@ -750,7 +750,7 @@ func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLog TailnetTarget: tt, ProxyGroup: svc.Annotations[AnnotationProxyGroup], } - r += fmt.Sprintf(":Config:%s", cfgHash(s, l)) + r += fmt.Sprintf(":Config:%s", cfgHash(s, lg)) return r } diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index d8a5dfd32..202804d30 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -249,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort { return ports } -func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) { +func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, lg *zap.Logger) { t.Helper() - wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar()) + wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, lg.Sugar()) if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { t.Fatalf("Error retrieving ConfigMap: %v", err) } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index b15c93b1c..e11235768 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1282,8 +1282,8 @@ func TestServiceProxyClassAnnotation(t *testing.T) { slist := &corev1.SecretList{} fc.List(context.Background(), slist, client.InNamespace("operator-ns")) for _, i := range slist.Items { - l, _ := json.Marshal(i.Labels) - t.Logf("found secret %q with labels %q ", i.Name, string(l)) + labels, _ := json.Marshal(i.Labels) + t.Logf("found secret %q with labels %q ", i.Name, string(labels)) } _, shortName := findGenName(t, fc, "default", "test", "svc") diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index e185499f0..34db86db2 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -524,16 +524,16 @@ func pgSecretLabels(pgName, secretType string) map[string]string { } func pgLabels(pgName string, customLabels map[string]string) map[string]string { - l := make(map[string]string, len(customLabels)+3) + labels := make(map[string]string, len(customLabels)+3) for k, v := range customLabels { - l[k] = v + labels[k] = v } - l[kubetypes.LabelManaged] = "true" - l[LabelParentType] = "proxygroup" - l[LabelParentName] = pgName + labels[kubetypes.LabelManaged] = "true" + labels[LabelParentType] = "proxygroup" + labels[LabelParentName] = pgName - return l + return labels } func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index f5eedc2a1..83d7439db 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -281,17 +281,17 @@ func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { } func labels(app, instance string, customLabels map[string]string) map[string]string { - l := make(map[string]string, len(customLabels)+3) + labels := make(map[string]string, len(customLabels)+3) for k, v := range customLabels { - l[k] = v + labels[k] = v } // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ - l["app.kubernetes.io/name"] = app - l["app.kubernetes.io/instance"] = instance - l["app.kubernetes.io/managed-by"] = "tailscale-operator" + labels["app.kubernetes.io/name"] = app + labels["app.kubernetes.io/instance"] = instance + labels["app.kubernetes.io/managed-by"] = "tailscale-operator" - return l + return labels } func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference { diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go index 4013047e7..0f0bd1bfc 100644 --- a/cmd/k8s-proxy/internal/config/config.go +++ b/cmd/k8s-proxy/internal/config/config.go @@ -50,32 +50,32 @@ func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interf } } -func (l *configLoader) WatchConfig(ctx context.Context, path string) error { +func (ld *configLoader) WatchConfig(ctx context.Context, path string) error { secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") if isKubeSecret { secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) if !ok { return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format /", path) } - if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { + if err := ld.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) } return nil } - if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { + if err := ld.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("error watching config file %q: %w", path, err) } return nil } -func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { - if bytes.Equal(raw, l.previous) { - if l.cfgIgnored != nil && testenv.InTest() { - l.once.Do(func() { - close(l.cfgIgnored) +func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error { + if bytes.Equal(raw, ld.previous) { + if ld.cfgIgnored != nil && testenv.InTest() { + ld.once.Do(func() { + close(ld.cfgIgnored) }) } return nil @@ -89,14 +89,14 @@ func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { select { case <-ctx.Done(): return ctx.Err() - case l.cfgChan <- &cfg: + case ld.cfgChan <- &cfg: } - l.previous = raw + ld.previous = raw return nil } -func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { +func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { var ( tickChan <-chan time.Time eventChan <-chan fsnotify.Event @@ -106,14 +106,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) if w, err := fsnotify.NewWatcher(); err != nil { // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. // See https://github.com/tailscale/tailscale/issues/15081 - l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) + ld.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() tickChan = ticker.C } else { dir := filepath.Dir(path) file := filepath.Base(path) - l.logger.Infof("Watching directory %q for changes to config file %q", dir, file) + ld.logger.Infof("Watching directory %q for changes to config file %q", dir, file) defer w.Close() if err := w.Add(dir); err != nil { return fmt.Errorf("failed to add fsnotify watch: %w", err) @@ -128,7 +128,7 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) if err != nil { return fmt.Errorf("error reading config file %q: %w", path, err) } - if err := l.reloadConfig(ctx, b); err != nil { + if err := ld.reloadConfig(ctx, b); err != nil { return fmt.Errorf("error loading initial config file %q: %w", path, err) } @@ -163,14 +163,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) if len(b) == 0 { continue } - if err := l.reloadConfig(ctx, b); err != nil { + if err := ld.reloadConfig(ctx, b); err != nil { return fmt.Errorf("error reloading config file %q: %v", path, err) } } } -func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { - secrets := l.client.Secrets(secretNamespace) +func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { + secrets := ld.client.Secrets(secretNamespace) w, err := secrets.Watch(ctx, metav1.ListOptions{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -198,11 +198,11 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) } - if err := l.configFromSecret(ctx, secret); err != nil { + if err := ld.configFromSecret(ctx, secret); err != nil { return fmt.Errorf("error loading initial config: %w", err) } - l.logger.Infof("Watching config Secret %q for changes", secretName) + ld.logger.Infof("Watching config Secret %q for changes", secretName) for { var secret *corev1.Secret select { @@ -237,7 +237,7 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames if secret == nil || secret.Data == nil { continue } - if err := l.configFromSecret(ctx, secret); err != nil { + if err := ld.configFromSecret(ctx, secret); err != nil { return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) } case watch.Error: @@ -250,13 +250,13 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames } } -func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { +func (ld *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { b := s.Data[kubetypes.KubeAPIServerConfigFile] if len(b) == 0 { return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) } - if err := l.reloadConfig(ctx, b); err != nil { + if err := ld.reloadConfig(ctx, b); err != nil { return err } diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go index 1603dbe1f..bcb1b9ebd 100644 --- a/cmd/k8s-proxy/internal/config/config_test.go +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -125,15 +125,15 @@ func TestWatchConfig(t *testing.T) { } } configChan := make(chan *conf.Config) - l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) - l.cfgIgnored = make(chan struct{}) + loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + loader.cfgIgnored = make(chan struct{}) errs := make(chan error) ctx, cancel := context.WithCancel(t.Context()) defer cancel() writeFile(t, tc.initialConfig) go func() { - errs <- l.WatchConfig(ctx, cfgPath) + errs <- loader.WatchConfig(ctx, cfgPath) }() for i, p := range tc.phases { @@ -159,7 +159,7 @@ func TestWatchConfig(t *testing.T) { } else if !strings.Contains(err.Error(), p.expectedErr) { t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) } - case <-l.cfgIgnored: + case <-loader.cfgIgnored: if p.expectedConf != nil { t.Fatalf("expected config to be reloaded, but got ignored signal") } @@ -192,13 +192,13 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) { }) configChan := make(chan *conf.Config) - l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) mustCreateOrUpdate(t, cl, secretFrom(expected[0])) errs := make(chan error) go func() { - errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret") + errs <- loader.watchConfigSecretChanges(t.Context(), "default", "config-secret") }() for i := range 2 { @@ -212,7 +212,7 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) { } case err := <-errs: t.Fatalf("unexpected error: %v", err) - case <-l.cfgIgnored: + case <-loader.cfgIgnored: t.Fatalf("expected config to be reloaded, but got ignored signal") case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for expected event") diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 64807b6c2..bfa909b69 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -422,9 +422,9 @@ func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string, } // Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state. -func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { +func (ipp *ConsensusIPPool) Apply(lg *raft.Log) any { var c tsconsensus.Command - if err := json.Unmarshal(l.Data, &c); err != nil { + if err := json.Unmarshal(lg.Data, &c); err != nil { panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) } switch c.Name { diff --git a/cmd/sniproxy/sniproxy_test.go b/cmd/sniproxy/sniproxy_test.go index 07fbd2ece..65e059efa 100644 --- a/cmd/sniproxy/sniproxy_test.go +++ b/cmd/sniproxy/sniproxy_test.go @@ -156,13 +156,13 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { client, _, _ := startNode(t, ctx, controlURL, "client") // Make sure that the sni node has received its config. - l, err := sni.LocalClient() + lc, err := sni.LocalClient() if err != nil { t.Fatal(err) } gotConfigured := false for range 100 { - s, err := l.StatusWithoutPeers(ctx) + s, err := lc.StatusWithoutPeers(ctx) if err != nil { t.Fatal(err) } diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index 71ed50569..153dc9303 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -135,18 +135,18 @@ type lportsPool struct { ports []int } -func (l *lportsPool) get() int { - l.Lock() - defer l.Unlock() - ret := l.ports[0] - l.ports = append(l.ports[:0], l.ports[1:]...) +func (pl *lportsPool) get() int { + pl.Lock() + defer pl.Unlock() + ret := pl.ports[0] + pl.ports = append(pl.ports[:0], pl.ports[1:]...) return ret } -func (l *lportsPool) put(i int) { - l.Lock() - defer l.Unlock() - l.ports = append(l.ports, int(i)) +func (pl *lportsPool) put(i int) { + pl.Lock() + defer pl.Unlock() + pl.ports = append(pl.ports, int(i)) } var ( @@ -173,19 +173,19 @@ func init() { // measure dial time. type lportForTCPConn int -func (l *lportForTCPConn) Close() error { - if *l == 0 { +func (lp *lportForTCPConn) Close() error { + if *lp == 0 { return nil } - lports.put(int(*l)) + lports.put(int(*lp)) return nil } -func (l *lportForTCPConn) Write([]byte) (int, error) { +func (lp *lportForTCPConn) Write([]byte) (int, error) { return 0, errors.New("unimplemented") } -func (l *lportForTCPConn) Read([]byte) (int, error) { +func (lp *lportForTCPConn) Read([]byte) (int, error) { return 0, errors.New("unimplemented") } diff --git a/cmd/sync-containers/main.go b/cmd/sync-containers/main.go index 6317b4943..63efa5453 100644 --- a/cmd/sync-containers/main.go +++ b/cmd/sync-containers/main.go @@ -65,9 +65,9 @@ func main() { } add, remove := diffTags(stags, dtags) - if l := len(add); l > 0 { + if ln := len(add); ln > 0 { log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", ")) - if *max > 0 && l > *max { + if *max > 0 && ln > *max { log.Printf("Limiting sync to %d tags", *max) add = add[:*max] } diff --git a/cmd/tl-longchain/tl-longchain.go b/cmd/tl-longchain/tl-longchain.go index 2a4dc10ba..384d24222 100644 --- a/cmd/tl-longchain/tl-longchain.go +++ b/cmd/tl-longchain/tl-longchain.go @@ -75,8 +75,8 @@ func peerInfo(peer *ipnstate.TKAPeer) string { // print prints a message about a node key signature and a re-signing command if needed. func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) { - if l := chainLength(sig); l > *maxRotations { - log.Printf("%s: chain length %d, printing command to re-sign", info, l) + if ln := chainLength(sig); ln > *maxRotations { + log.Printf("%s: chain length %d, printing command to re-sign", info, ln) wrapping, _ := sig.UnverifiedWrappingPublic() fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString()) } else { diff --git a/drive/driveimpl/connlistener.go b/drive/driveimpl/connlistener.go index e1fcb3b67..ff60f7340 100644 --- a/drive/driveimpl/connlistener.go +++ b/drive/driveimpl/connlistener.go @@ -25,12 +25,12 @@ func newConnListener() *connListener { } } -func (l *connListener) Accept() (net.Conn, error) { +func (ln *connListener) Accept() (net.Conn, error) { select { - case <-l.closedCh: + case <-ln.closedCh: // TODO(oxtoacart): make this error match what a regular net.Listener does return nil, syscall.EINVAL - case conn := <-l.ch: + case conn := <-ln.ch: return conn, nil } } @@ -38,32 +38,32 @@ func (l *connListener) Accept() (net.Conn, error) { // Addr implements net.Listener. This always returns nil. It is assumed that // this method is currently unused, so it logs a warning if it ever does get // called. -func (l *connListener) Addr() net.Addr { +func (ln *connListener) Addr() net.Addr { log.Println("warning: unexpected call to connListener.Addr()") return nil } -func (l *connListener) Close() error { - l.closeMu.Lock() - defer l.closeMu.Unlock() +func (ln *connListener) Close() error { + ln.closeMu.Lock() + defer ln.closeMu.Unlock() select { - case <-l.closedCh: + case <-ln.closedCh: // Already closed. return syscall.EINVAL default: // We don't close l.ch because someone maybe trying to send to that, // which would cause a panic. - close(l.closedCh) + close(ln.closedCh) return nil } } -func (l *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { +func (ln *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { select { - case <-l.closedCh: + case <-ln.closedCh: return syscall.EINVAL - case l.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: + case ln.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: // Connection has been accepted. } return nil diff --git a/drive/driveimpl/connlistener_test.go b/drive/driveimpl/connlistener_test.go index d8666448a..6adf15acb 100644 --- a/drive/driveimpl/connlistener_test.go +++ b/drive/driveimpl/connlistener_test.go @@ -10,20 +10,20 @@ import ( ) func TestConnListener(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:") + ln, err := net.Listen("tcp", "127.0.0.1:") if err != nil { t.Fatalf("failed to Listen: %s", err) } cl := newConnListener() // Test that we can accept a connection - cc, err := net.Dial("tcp", l.Addr().String()) + cc, err := net.Dial("tcp", ln.Addr().String()) if err != nil { t.Fatalf("failed to Dial: %s", err) } defer cc.Close() - sc, err := l.Accept() + sc, err := ln.Accept() if err != nil { t.Fatalf("failed to Accept: %s", err) } diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index cff55fbb2..818e84990 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -467,14 +467,14 @@ func newSystem(t *testing.T) *system { tstest.ResourceCheck(t) fs := newFileSystemForLocal(log.Printf, nil) - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("failed to Listen: %s", err) } - t.Logf("FileSystemForLocal listening at %s", l.Addr()) + t.Logf("FileSystemForLocal listening at %s", ln.Addr()) go func() { for { - conn, err := l.Accept() + conn, err := ln.Accept() if err != nil { t.Logf("Accept: %v", err) return @@ -483,11 +483,11 @@ func newSystem(t *testing.T) *system { } }() - client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", l.Addr()), &noopAuthorizer{}) + client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", ln.Addr()), &noopAuthorizer{}) client.SetTransport(&http.Transport{DisableKeepAlives: true}) s := &system{ t: t, - local: &local{l: l, fs: fs}, + local: &local{l: ln, fs: fs}, client: client, remotes: make(map[string]*remote), } @@ -496,11 +496,11 @@ func newSystem(t *testing.T) *system { } func (s *system) addRemote(name string) string { - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { s.t.Fatalf("failed to Listen: %s", err) } - s.t.Logf("Remote for %v listening at %s", name, l.Addr()) + s.t.Logf("Remote for %v listening at %s", name, ln.Addr()) fileServer, err := NewFileServer() if err != nil { @@ -510,14 +510,14 @@ func (s *system) addRemote(name string) string { s.t.Logf("FileServer for %v listening at %s", name, fileServer.Addr()) r := &remote{ - l: l, + l: ln, fileServer: fileServer, fs: NewFileSystemForRemote(log.Printf), shares: make(map[string]string), permissions: make(map[string]drive.Permission), } r.fs.SetFileServerAddr(fileServer.Addr()) - go http.Serve(l, r) + go http.Serve(ln, r) s.remotes[name] = r remotes := make([]*drive.Remote, 0, len(s.remotes)) diff --git a/drive/driveimpl/fileserver.go b/drive/driveimpl/fileserver.go index 113cb3b44..d448d83af 100644 --- a/drive/driveimpl/fileserver.go +++ b/drive/driveimpl/fileserver.go @@ -20,7 +20,7 @@ import ( // It's typically used in a separate process from the actual Taildrive server to // serve up files as an unprivileged user. type FileServer struct { - l net.Listener + ln net.Listener secretToken string shareHandlers map[string]http.Handler sharesMu sync.RWMutex @@ -41,10 +41,10 @@ type FileServer struct { // called. func NewFileServer() (*FileServer, error) { // path := filepath.Join(os.TempDir(), fmt.Sprintf("%v.socket", uuid.New().String())) - // l, err := safesocket.Listen(path) + // ln, err := safesocket.Listen(path) // if err != nil { // TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???) - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, fmt.Errorf("listen: %w", err) } @@ -55,7 +55,7 @@ func NewFileServer() (*FileServer, error) { } return &FileServer{ - l: l, + ln: ln, secretToken: secretToken, shareHandlers: make(map[string]http.Handler), }, nil @@ -74,12 +74,12 @@ func generateSecretToken() (string, error) { // Addr returns the address at which this FileServer is listening. This // includes the secret token in front of the address, delimited by a pipe |. func (s *FileServer) Addr() string { - return fmt.Sprintf("%s|%s", s.secretToken, s.l.Addr().String()) + return fmt.Sprintf("%s|%s", s.secretToken, s.ln.Addr().String()) } // Serve() starts serving files and blocks until it encounters a fatal error. func (s *FileServer) Serve() error { - return http.Serve(s.l, s) + return http.Serve(s.ln, s) } // LockShares locks the map of shares in preparation for manipulating it. @@ -162,5 +162,5 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (s *FileServer) Close() error { - return s.l.Close() + return s.ln.Close() } diff --git a/feature/sdnotify/sdnotify_linux.go b/feature/sdnotify/sdnotify_linux.go index b005f1bdb..2b13e24bb 100644 --- a/feature/sdnotify/sdnotify_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -29,8 +29,8 @@ type logOnce struct { sync.Once } -func (l *logOnce) logf(format string, args ...any) { - l.Once.Do(func() { +func (lg *logOnce) logf(format string, args ...any) { + lg.Once.Do(func() { log.Printf(format, args...) }) } diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go index 4baadb733..e5f999bb8 100644 --- a/ipn/localapi/tailnetlock.go +++ b/ipn/localapi/tailnetlock.go @@ -266,12 +266,12 @@ func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { limit := 50 if limitStr := r.FormValue("limit"); limitStr != "" { - l, err := strconv.Atoi(limitStr) + lm, err := strconv.Atoi(limitStr) if err != nil { http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) return } - limit = int(l) + limit = int(lm) } updates, err := h.b.NetworkLockLog(limit) diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 4026f9084..670df3b95 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -352,12 +352,12 @@ type ServiceMonitor struct { type Labels map[string]LabelValue -func (l Labels) Parse() map[string]string { - if l == nil { +func (lb Labels) Parse() map[string]string { + if lb == nil { return nil } - m := make(map[string]string, len(l)) - for k, v := range l { + m := make(map[string]string, len(lb)) + for k, v := range lb { m[k] = string(v) } return m diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index f2fd4ea55..87205c4e6 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -99,7 +99,7 @@ func Test_conn_Read(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - l := zl.Sugar() + log := zl.Sugar() tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) @@ -110,7 +110,7 @@ func Test_conn_Read(t *testing.T) { c := &conn{ ctx: ctx, Conn: tc, - log: l, + log: log, hasTerm: true, initialCastHeaderSent: make(chan struct{}), rec: rec, diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index 2515f1bf3..56c874f31 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -69,12 +69,12 @@ var _ json.Unmarshaler = &PortMaps{} func (p *PortMaps) UnmarshalJSON(data []byte) error { *p = make(map[PortMap]struct{}) - var l []PortMap - if err := json.Unmarshal(data, &l); err != nil { + var v []PortMap + if err := json.Unmarshal(data, &v); err != nil { return err } - for _, pm := range l { + for _, pm := range v { (*p)[pm] = struct{}{} } @@ -82,12 +82,12 @@ func (p *PortMaps) UnmarshalJSON(data []byte) error { } func (p PortMaps) MarshalJSON() ([]byte, error) { - l := make([]PortMap, 0, len(p)) + v := make([]PortMap, 0, len(p)) for pm := range p { - l = append(l, pm) + v = append(v, pm) } - return json.Marshal(l) + return json.Marshal(v) } // Status represents the currently configured firewall rules for all egress diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go index 5d541e365..550b3ae74 100644 --- a/kube/localclient/local-client.go +++ b/kube/localclient/local-client.go @@ -40,10 +40,10 @@ type localClient struct { lc *local.Client } -func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { - return l.lc.WatchIPNBus(ctx, mask) +func (lc *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return lc.lc.WatchIPNBus(ctx, mask) } -func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { - return l.lc.CertPair(ctx, domain) +func (lc *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return lc.lc.CertPair(ctx, domain) } diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index e0744de0f..8ddfabb86 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -146,33 +146,33 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne // SetLoggingEnabled enables or disables logging. // When disabled, socket stats are not polled and no new logs are written to disk. // Existing logs can still be fetched via the C2N API. -func (l *Logger) SetLoggingEnabled(v bool) { - old := l.enabled.Load() - if old != v && l.enabled.CompareAndSwap(old, v) { +func (lg *Logger) SetLoggingEnabled(v bool) { + old := lg.enabled.Load() + if old != v && lg.enabled.CompareAndSwap(old, v) { if v { - if l.eventCh == nil { + if lg.eventCh == nil { // eventCh should be large enough for the number of events that will occur within logInterval. // Add an extra second's worth of events to ensure we don't drop any. - l.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) + lg.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) } - l.ctx, l.cancelFn = context.WithCancel(context.Background()) - go l.poll() - go l.logEvents() + lg.ctx, lg.cancelFn = context.WithCancel(context.Background()) + go lg.poll() + go lg.logEvents() } else { - l.cancelFn() + lg.cancelFn() } } } -func (l *Logger) Write(p []byte) (int, error) { - return l.logger.Write(p) +func (lg *Logger) Write(p []byte) (int, error) { + return lg.logger.Write(p) } // poll fetches the current socket stats at the configured time interval, // calculates the delta since the last poll, // and writes any non-zero values to the logger event channel. // This method does not return. -func (l *Logger) poll() { +func (lg *Logger) poll() { // last is the last set of socket stats we saw. var lastStats *sockstats.SockStats var lastTime time.Time @@ -180,7 +180,7 @@ func (l *Logger) poll() { ticker := time.NewTicker(pollInterval) for { select { - case <-l.ctx.Done(): + case <-lg.ctx.Done(): ticker.Stop() return case t := <-ticker.C: @@ -196,7 +196,7 @@ func (l *Logger) poll() { if stats.CurrentInterfaceCellular { e.IsCellularInterface = 1 } - l.eventCh <- e + lg.eventCh <- e } } lastTime = t @@ -207,14 +207,14 @@ func (l *Logger) poll() { // logEvents reads events from the event channel at logInterval and logs them to disk. // This method does not return. -func (l *Logger) logEvents() { - enc := json.NewEncoder(l) +func (lg *Logger) logEvents() { + enc := json.NewEncoder(lg) flush := func() { for { select { - case e := <-l.eventCh: + case e := <-lg.eventCh: if err := enc.Encode(e); err != nil { - l.logf("sockstatlog: error encoding log: %v", err) + lg.logf("sockstatlog: error encoding log: %v", err) } default: return @@ -224,7 +224,7 @@ func (l *Logger) logEvents() { ticker := time.NewTicker(logInterval) for { select { - case <-l.ctx.Done(): + case <-lg.ctx.Done(): ticker.Stop() return case <-ticker.C: @@ -233,29 +233,29 @@ func (l *Logger) logEvents() { } } -func (l *Logger) LogID() string { - if l.logger == nil { +func (lg *Logger) LogID() string { + if lg.logger == nil { return "" } - return l.logger.PrivateID().Public().String() + return lg.logger.PrivateID().Public().String() } // Flush sends pending logs to the log server and flushes them from the local buffer. -func (l *Logger) Flush() { - l.logger.StartFlush() +func (lg *Logger) Flush() { + lg.logger.StartFlush() } -func (l *Logger) Shutdown(ctx context.Context) { - if l.cancelFn != nil { - l.cancelFn() +func (lg *Logger) Shutdown(ctx context.Context) { + if lg.cancelFn != nil { + lg.cancelFn() } - l.filch.Close() - l.logger.Shutdown(ctx) + lg.filch.Close() + lg.logger.Shutdown(ctx) type closeIdler interface { CloseIdleConnections() } - if tr, ok := l.tr.(closeIdler); ok { + if tr, ok := lg.tr.(closeIdler); ok { tr.CloseIdleConnections() } } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 26858b713..f7491783a 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -193,8 +193,8 @@ type logWriter struct { logger *log.Logger } -func (l logWriter) Write(buf []byte) (int, error) { - l.logger.Printf("%s", buf) +func (lg logWriter) Write(buf []byte) (int, error) { + lg.logger.Printf("%s", buf) return len(buf), nil } diff --git a/logtail/logtail.go b/logtail/logtail.go index 6ff4dd04f..2879c6b0d 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -100,7 +100,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if !cfg.CopyPrivateID.IsZero() { urlSuffix = "?copyId=" + cfg.CopyPrivateID.String() } - l := &Logger{ + logger := &Logger{ privateID: cfg.PrivateID, stderr: cfg.Stderr, stderrLevel: int64(cfg.StderrLevel), @@ -124,19 +124,19 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { } if cfg.Bus != nil { - l.eventClient = cfg.Bus.Client("logtail.Logger") + logger.eventClient = cfg.Bus.Client("logtail.Logger") // Subscribe to change deltas from NetMon to detect when the network comes up. - eventbus.SubscribeFunc(l.eventClient, l.onChangeDelta) + eventbus.SubscribeFunc(logger.eventClient, logger.onChangeDelta) } - l.SetSockstatsLabel(sockstats.LabelLogtailLogger) - l.compressLogs = cfg.CompressLogs + logger.SetSockstatsLabel(sockstats.LabelLogtailLogger) + logger.compressLogs = cfg.CompressLogs ctx, cancel := context.WithCancel(context.Background()) - l.uploadCancel = cancel + logger.uploadCancel = cancel - go l.uploading(ctx) - l.Write([]byte("logtail started")) - return l + go logger.uploading(ctx) + logger.Write([]byte("logtail started")) + return logger } // Logger writes logs, splitting them as configured between local @@ -190,27 +190,27 @@ func (p *atomicSocktatsLabel) Store(label sockstats.Label) { p.p.Store(uint32(la // SetVerbosityLevel controls the verbosity level that should be // written to stderr. 0 is the default (not verbose). Levels 1 or higher // are increasingly verbose. -func (l *Logger) SetVerbosityLevel(level int) { - atomic.StoreInt64(&l.stderrLevel, int64(level)) +func (lg *Logger) SetVerbosityLevel(level int) { + atomic.StoreInt64(&lg.stderrLevel, int64(level)) } // SetNetMon sets the network monitor. // // It should not be changed concurrently with log writes and should // only be set once. -func (l *Logger) SetNetMon(lm *netmon.Monitor) { - l.netMonitor = lm +func (lg *Logger) SetNetMon(lm *netmon.Monitor) { + lg.netMonitor = lm } // SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger. -func (l *Logger) SetSockstatsLabel(label sockstats.Label) { - l.sockstatsLabel.Store(label) +func (lg *Logger) SetSockstatsLabel(label sockstats.Label) { + lg.sockstatsLabel.Store(label) } // PrivateID returns the logger's private log ID. // // It exists for internal use only. -func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } +func (lg *Logger) PrivateID() logid.PrivateID { return lg.privateID } // Shutdown gracefully shuts down the logger while completing any // remaining uploads. @@ -218,33 +218,33 @@ func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } // It will block, continuing to try and upload unless the passed // context object interrupts it by being done. // If the shutdown is interrupted, an error is returned. -func (l *Logger) Shutdown(ctx context.Context) error { +func (lg *Logger) Shutdown(ctx context.Context) error { done := make(chan struct{}) go func() { select { case <-ctx.Done(): - l.uploadCancel() - <-l.shutdownDone - case <-l.shutdownDone: + lg.uploadCancel() + <-lg.shutdownDone + case <-lg.shutdownDone: } close(done) - l.httpc.CloseIdleConnections() + lg.httpc.CloseIdleConnections() }() - if l.eventClient != nil { - l.eventClient.Close() + if lg.eventClient != nil { + lg.eventClient.Close() } - l.shutdownStartMu.Lock() + lg.shutdownStartMu.Lock() select { - case <-l.shutdownStart: - l.shutdownStartMu.Unlock() + case <-lg.shutdownStart: + lg.shutdownStartMu.Unlock() return nil default: } - close(l.shutdownStart) - l.shutdownStartMu.Unlock() + close(lg.shutdownStart) + lg.shutdownStartMu.Unlock() - io.WriteString(l, "logger closing down\n") + io.WriteString(lg, "logger closing down\n") <-done return nil @@ -254,8 +254,8 @@ func (l *Logger) Shutdown(ctx context.Context) error { // process, and any associated goroutines. // // Deprecated: use Shutdown -func (l *Logger) Close() { - l.Shutdown(context.Background()) +func (lg *Logger) Close() { + lg.Shutdown(context.Background()) } // drainBlock is called by drainPending when there are no logs to drain. @@ -265,11 +265,11 @@ func (l *Logger) Close() { // // If the caller specified FlushInterface, drainWake is only sent to // periodically. -func (l *Logger) drainBlock() (shuttingDown bool) { +func (lg *Logger) drainBlock() (shuttingDown bool) { select { - case <-l.shutdownStart: + case <-lg.shutdownStart: return true - case <-l.drainWake: + case <-lg.drainWake: } return false } @@ -277,20 +277,20 @@ func (l *Logger) drainBlock() (shuttingDown bool) { // drainPending drains and encodes a batch of logs from the buffer for upload. // If no logs are available, drainPending blocks until logs are available. // The returned buffer is only valid until the next call to drainPending. -func (l *Logger) drainPending() (b []byte) { - b = l.drainBuf[:0] +func (lg *Logger) drainPending() (b []byte) { + b = lg.drainBuf[:0] b = append(b, '[') defer func() { b = bytes.TrimRight(b, ",") b = append(b, ']') - l.drainBuf = b + lg.drainBuf = b if len(b) <= len("[]") { b = nil } }() - maxLen := cmp.Or(l.maxUploadSize, maxSize) - if l.lowMem { + maxLen := cmp.Or(lg.maxUploadSize, maxSize) + if lg.lowMem { // When operating in a low memory environment, it is better to upload // in multiple operations than it is to allocate a large body and OOM. // Even if maxLen is less than maxSize, we can still upload an entry @@ -298,13 +298,13 @@ func (l *Logger) drainPending() (b []byte) { maxLen /= lowMemRatio } for len(b) < maxLen { - line, err := l.buffer.TryReadLine() + line, err := lg.buffer.TryReadLine() switch { case err == io.EOF: return b case err != nil: b = append(b, '{') - b = l.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) + b = lg.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) b = bytes.TrimRight(b, ",") b = append(b, '}') return b @@ -318,10 +318,10 @@ func (l *Logger) drainPending() (b []byte) { // in our buffer from a previous large write, let it go. if cap(b) > bufferSize { b = bytes.Clone(b) - l.drainBuf = b + lg.drainBuf = b } - if shuttingDown := l.drainBlock(); shuttingDown { + if shuttingDown := lg.drainBlock(); shuttingDown { return b } continue @@ -338,18 +338,18 @@ func (l *Logger) drainPending() (b []byte) { default: // This is probably a log added to stderr by filch // outside of the logtail logger. Encode it. - if !l.explainedRaw { - fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") - fmt.Fprintf(l.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") - fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") - fmt.Fprintf(l.stderr, "RAW-STDERR:\n") - l.explainedRaw = true + if !lg.explainedRaw { + fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR:\n") + lg.explainedRaw = true } - fmt.Fprintf(l.stderr, "RAW-STDERR: %s", b) + fmt.Fprintf(lg.stderr, "RAW-STDERR: %s", b) // Do not add a client time, as it could be really old. // Do not include instance key or ID either, // since this came from a different instance. - b = l.appendText(b, line, true, 0, 0, 0) + b = lg.appendText(b, line, true, 0, 0, 0) } b = append(b, ',') } @@ -357,14 +357,14 @@ func (l *Logger) drainPending() (b []byte) { } // This is the goroutine that repeatedly uploads logs in the background. -func (l *Logger) uploading(ctx context.Context) { - defer close(l.shutdownDone) +func (lg *Logger) uploading(ctx context.Context) { + defer close(lg.shutdownDone) for { - body := l.drainPending() + body := lg.drainPending() origlen := -1 // sentinel value: uncompressed // Don't attempt to compress tiny bodies; not worth the CPU cycles. - if l.compressLogs && len(body) > 256 { + if lg.compressLogs && len(body) > 256 { zbody := zstdframe.AppendEncode(nil, body, zstdframe.FastestCompression, zstdframe.LowMemory(true)) @@ -381,20 +381,20 @@ func (l *Logger) uploading(ctx context.Context) { var numFailures int var firstFailure time.Time for len(body) > 0 && ctx.Err() == nil { - retryAfter, err := l.upload(ctx, body, origlen) + retryAfter, err := lg.upload(ctx, body, origlen) if err != nil { numFailures++ - firstFailure = l.clock.Now() + firstFailure = lg.clock.Now() - if !l.internetUp() { - fmt.Fprintf(l.stderr, "logtail: internet down; waiting\n") - l.awaitInternetUp(ctx) + if !lg.internetUp() { + fmt.Fprintf(lg.stderr, "logtail: internet down; waiting\n") + lg.awaitInternetUp(ctx) continue } // Only print the same message once. if currError := err.Error(); lastError != currError { - fmt.Fprintf(l.stderr, "logtail: upload: %v\n", err) + fmt.Fprintf(lg.stderr, "logtail: upload: %v\n", err) lastError = currError } @@ -407,55 +407,55 @@ func (l *Logger) uploading(ctx context.Context) { } else { // Only print a success message after recovery. if numFailures > 0 { - fmt.Fprintf(l.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, l.clock.Since(firstFailure).Round(time.Second)) + fmt.Fprintf(lg.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, lg.clock.Since(firstFailure).Round(time.Second)) } break } } select { - case <-l.shutdownStart: + case <-lg.shutdownStart: return default: } } } -func (l *Logger) internetUp() bool { +func (lg *Logger) internetUp() bool { select { - case <-l.networkIsUp.Ready(): + case <-lg.networkIsUp.Ready(): return true default: - if l.netMonitor == nil { + if lg.netMonitor == nil { return true // No way to tell, so assume it is. } - return l.netMonitor.InterfaceState().AnyInterfaceUp() + return lg.netMonitor.InterfaceState().AnyInterfaceUp() } } // onChangeDelta is an eventbus subscriber function that handles // [netmon.ChangeDelta] events to detect whether the Internet is expected to be // reachable. -func (l *Logger) onChangeDelta(delta *netmon.ChangeDelta) { +func (lg *Logger) onChangeDelta(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - l.networkIsUp.Set() + fmt.Fprintf(lg.stderr, "logtail: internet back up\n") + lg.networkIsUp.Set() } else { - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up\n") - l.networkIsUp.Reset() + fmt.Fprintf(lg.stderr, "logtail: network changed, but is not up\n") + lg.networkIsUp.Reset() } } -func (l *Logger) awaitInternetUp(ctx context.Context) { - if l.eventClient != nil { +func (lg *Logger) awaitInternetUp(ctx context.Context) { + if lg.eventClient != nil { select { - case <-l.networkIsUp.Ready(): + case <-lg.networkIsUp.Ready(): case <-ctx.Done(): } return } upc := make(chan bool, 1) - defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { + defer lg.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { select { case upc <- true: @@ -463,12 +463,12 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } })() - if l.internetUp() { + if lg.internetUp() { return } select { case <-upc: - fmt.Fprintf(l.stderr, "logtail: internet back up\n") + fmt.Fprintf(lg.stderr, "logtail: internet back up\n") case <-ctx.Done(): } } @@ -476,13 +476,13 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. -func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { +func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { const maxUploadTime = 45 * time.Second - ctx = sockstats.WithSockStats(ctx, l.sockstatsLabel.Load(), l.Logf) + ctx = sockstats.WithSockStats(ctx, lg.sockstatsLabel.Load(), lg.Logf) ctx, cancel := context.WithTimeout(ctx, maxUploadTime) defer cancel() - req, err := http.NewRequestWithContext(ctx, "POST", l.url, bytes.NewReader(body)) + req, err := http.NewRequestWithContext(ctx, "POST", lg.url, bytes.NewReader(body)) if err != nil { // I know of no conditions under which this could fail. // Report it very loudly. @@ -513,8 +513,8 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft compressedNote = "compressed" } - l.httpDoCalls.Add(1) - resp, err := l.httpc.Do(req) + lg.httpDoCalls.Add(1) + resp, err := lg.httpc.Do(req) if err != nil { return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err) } @@ -533,16 +533,16 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft // // TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35. // Finish cleaning this up. -func (l *Logger) Flush() error { +func (lg *Logger) Flush() error { return nil } // StartFlush starts a log upload, if anything is pending. // // If l is nil, StartFlush is a no-op. -func (l *Logger) StartFlush() { - if l != nil { - l.tryDrainWake() +func (lg *Logger) StartFlush() { + if lg != nil { + lg.tryDrainWake() } } @@ -558,41 +558,41 @@ var debugWakesAndUploads = envknob.RegisterBool("TS_DEBUG_LOGTAIL_WAKES") // tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup. // It does not block. -func (l *Logger) tryDrainWake() { - l.flushPending.Store(false) +func (lg *Logger) tryDrainWake() { + lg.flushPending.Store(false) if debugWakesAndUploads() { // Using println instead of log.Printf here to avoid recursing back into // ourselves. - println("logtail: try drain wake, numHTTP:", l.httpDoCalls.Load()) + println("logtail: try drain wake, numHTTP:", lg.httpDoCalls.Load()) } select { - case l.drainWake <- struct{}{}: + case lg.drainWake <- struct{}{}: default: } } -func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { +func (lg *Logger) sendLocked(jsonBlob []byte) (int, error) { tapSend(jsonBlob) if logtailDisabled.Load() { return len(jsonBlob), nil } - n, err := l.buffer.Write(jsonBlob) + n, err := lg.buffer.Write(jsonBlob) flushDelay := defaultFlushDelay - if l.flushDelayFn != nil { - flushDelay = l.flushDelayFn() + if lg.flushDelayFn != nil { + flushDelay = lg.flushDelayFn() } if flushDelay > 0 { - if l.flushPending.CompareAndSwap(false, true) { - if l.flushTimer == nil { - l.flushTimer = l.clock.AfterFunc(flushDelay, l.tryDrainWake) + if lg.flushPending.CompareAndSwap(false, true) { + if lg.flushTimer == nil { + lg.flushTimer = lg.clock.AfterFunc(flushDelay, lg.tryDrainWake) } else { - l.flushTimer.Reset(flushDelay) + lg.flushTimer.Reset(flushDelay) } } } else { - l.tryDrainWake() + lg.tryDrainWake() } return n, err } @@ -600,13 +600,13 @@ func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { // appendMetadata appends optional "logtail", "metrics", and "v" JSON members. // This assumes dst is already within a JSON object. // Each member is comma-terminated. -func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { +func (lg *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { // Append optional logtail metadata. if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil { dst = append(dst, `"logtail":{`...) if !skipClientTime { dst = append(dst, `"client_time":"`...) - dst = l.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) + dst = lg.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) dst = append(dst, '"', ',') } if procID != 0 { @@ -639,8 +639,8 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr } // Append optional metrics metadata. - if !skipMetrics && l.metricsDelta != nil { - if d := l.metricsDelta(); d != "" { + if !skipMetrics && lg.metricsDelta != nil { + if d := lg.metricsDelta(); d != "" { dst = append(dst, `"metrics":"`...) dst = append(dst, d...) dst = append(dst, '"', ',') @@ -660,10 +660,10 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr } // appendText appends a raw text message in the Tailscale JSON log entry format. -func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { +func (lg *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { dst = slices.Grow(dst, len(src)) dst = append(dst, '{') - dst = l.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) + dst = lg.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) if len(src) == 0 { dst = bytes.TrimRight(dst, ",") return append(dst, "}\n"...) @@ -672,7 +672,7 @@ func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, // Append the text string, which may be truncated. // Invalid UTF-8 will be mangled with the Unicode replacement character. max := maxTextSize - if l.lowMem { + if lg.lowMem { max /= lowMemRatio } dst = append(dst, `"text":`...) @@ -697,12 +697,12 @@ func appendTruncatedString(dst, src []byte, n int) []byte { // appendTextOrJSONLocked appends a raw text message or a raw JSON object // in the Tailscale JSON log format. -func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { - if l.includeProcSequence { - l.procSequence++ +func (lg *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { + if lg.includeProcSequence { + lg.procSequence++ } if len(src) == 0 || src[0] != '{' { - return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) + return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level) } // Check whether the input is a valid JSON object and @@ -714,11 +714,11 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // However, bytes.NewBuffer normally allocates unless // we immediately shallow copy it into a pre-allocated Buffer struct. // See https://go.dev/issue/67004. - l.bytesBuf = *bytes.NewBuffer(src) - defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src + lg.bytesBuf = *bytes.NewBuffer(src) + defer func() { lg.bytesBuf = bytes.Buffer{} }() // avoid pinning src - dec := &l.jsonDec - dec.Reset(&l.bytesBuf) + dec := &lg.jsonDec + dec.Reset(&lg.bytesBuf) if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil { return false } @@ -750,7 +750,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // Treat invalid JSON as a raw text message. if !validJSON { - return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) + return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level) } // Check whether the JSON payload is too large. @@ -758,13 +758,13 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // That's okay as the Tailscale log service limit is actually 2*maxSize. // However, so long as logging applications aim to target the maxSize limit, // there should be no trouble eventually uploading logs. - maxLen := cmp.Or(l.maxUploadSize, maxSize) + maxLen := cmp.Or(lg.maxUploadSize, maxSize) if len(src) > maxLen { errDetail := fmt.Sprintf("entry too large: %d bytes", len(src)) errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size dst = append(dst, '{') - dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) + dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level) dst = bytes.TrimRight(dst, ",") return append(dst, "}\n"...) } @@ -781,7 +781,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { } dst = slices.Grow(dst, len(src)) dst = append(dst, '{') - dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) + dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level) if logtailValLength > 0 { // Exclude original logtail member from the message. dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset]) @@ -808,8 +808,8 @@ func appendWithoutNewline(dst, src []byte) []byte { } // Logf logs to l using the provided fmt-style format and optional arguments. -func (l *Logger) Logf(format string, args ...any) { - fmt.Fprintf(l, format, args...) +func (lg *Logger) Logf(format string, args ...any) { + fmt.Fprintf(lg, format, args...) } // Write logs an encoded JSON blob. @@ -818,29 +818,29 @@ func (l *Logger) Logf(format string, args ...any) { // then contents is fit into a JSON blob and written. // // This is intended as an interface for the stdlib "log" package. -func (l *Logger) Write(buf []byte) (int, error) { +func (lg *Logger) Write(buf []byte) (int, error) { if len(buf) == 0 { return 0, nil } inLen := len(buf) // length as provided to us, before modifications to downstream writers level, buf := parseAndRemoveLogLevel(buf) - if l.stderr != nil && l.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&l.stderrLevel) { + if lg.stderr != nil && lg.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&lg.stderrLevel) { if buf[len(buf)-1] == '\n' { - l.stderr.Write(buf) + lg.stderr.Write(buf) } else { // The log package always line-terminates logs, // so this is an uncommon path. withNL := append(buf[:len(buf):len(buf)], '\n') - l.stderr.Write(withNL) + lg.stderr.Write(withNL) } } - l.writeLock.Lock() - defer l.writeLock.Unlock() + lg.writeLock.Lock() + defer lg.writeLock.Unlock() - b := l.appendTextOrJSONLocked(l.writeBuf[:0], buf, level) - _, err := l.sendLocked(b) + b := lg.appendTextOrJSONLocked(lg.writeBuf[:0], buf, level) + _, err := lg.sendLocked(b) return inLen, err } diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index a92f88b4b..b618fc0d7 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -29,11 +29,11 @@ func TestFastShutdown(t *testing.T) { func(w http.ResponseWriter, r *http.Request) {})) defer testServ.Close() - l := NewLogger(Config{ + logger := NewLogger(Config{ BaseURL: testServ.URL, Bus: eventbustest.NewBus(t), }, t.Logf) - err := l.Shutdown(ctx) + err := logger.Shutdown(ctx) if err != nil { t.Error(err) } @@ -64,7 +64,7 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Cleanup(ts.srv.Close) - l := NewLogger(Config{ + logger := NewLogger(Config{ BaseURL: ts.srv.URL, Bus: eventbustest.NewBus(t), }, t.Logf) @@ -75,14 +75,14 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Errorf("unknown start logging statement: %q", string(body)) } - return &ts, l + return &ts, logger } func TestDrainPendingMessages(t *testing.T) { - ts, l := NewLogtailTestHarness(t) + ts, logger := NewLogtailTestHarness(t) for range logLines { - l.Write([]byte("log line")) + logger.Write([]byte("log line")) } // all of the "log line" messages usually arrive at once, but poll if needed. @@ -96,14 +96,14 @@ func TestDrainPendingMessages(t *testing.T) { // if we never find count == logLines, the test will eventually time out. } - err := l.Shutdown(context.Background()) + err := logger.Shutdown(context.Background()) if err != nil { t.Error(err) } } func TestEncodeAndUploadMessages(t *testing.T) { - ts, l := NewLogtailTestHarness(t) + ts, logger := NewLogtailTestHarness(t) tests := []struct { name string @@ -123,7 +123,7 @@ func TestEncodeAndUploadMessages(t *testing.T) { } for _, tt := range tests { - io.WriteString(l, tt.log) + io.WriteString(logger, tt.log) body := <-ts.uploaded data := unmarshalOne(t, body) @@ -144,7 +144,7 @@ func TestEncodeAndUploadMessages(t *testing.T) { } } - err := l.Shutdown(context.Background()) + err := logger.Shutdown(context.Background()) if err != nil { t.Error(err) } @@ -322,9 +322,9 @@ func TestLoggerWriteResult(t *testing.T) { } func TestAppendMetadata(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } for _, tt := range []struct { skipClientTime bool @@ -350,7 +350,7 @@ func TestAppendMetadata(t *testing.T) { {procID: 1, procSeq: 2, errDetail: "error", errData: jsontext.Value(`["something","bad","happened"]`), level: 2, want: `"logtail":{"client_time":"2000-01-01T00:00:00Z","proc_id":1,"proc_seq":2,"error":{"detail":"error","bad_data":["something","bad","happened"]}},"metrics":"metrics","v":2,`}, } { - got := string(l.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) + got := string(lg.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) if got != tt.want { t.Errorf("appendMetadata(%v, %v, %v, %v, %v, %v, %v):\n\tgot %s\n\twant %s", tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level, got, tt.want) } @@ -362,10 +362,10 @@ func TestAppendMetadata(t *testing.T) { } func TestAppendText(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } - l.lowMem = true + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } + lg.lowMem = true for _, tt := range []struct { text string @@ -382,7 +382,7 @@ func TestAppendText(t *testing.T) { {text: "\b\f\n\r\t\"\\", want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"\b\f\n\r\t\"\\"}`}, {text: "x" + strings.Repeat("😐", maxSize), want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"x` + strings.Repeat("😐", 1023) + `…+1044484"}`}, } { - got := string(l.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) + got := string(lg.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) if !strings.HasSuffix(got, "\n") { t.Errorf("`%s` does not end with a newline", got) } @@ -397,10 +397,10 @@ func TestAppendText(t *testing.T) { } func TestAppendTextOrJSON(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } - l.lowMem = true + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } + lg.lowMem = true for _, tt := range []struct { in string @@ -419,7 +419,7 @@ func TestAppendTextOrJSON(t *testing.T) { {in: `{ "fizz" : "buzz" , "logtail" : "duplicate" , "wizz" : "wuzz" }`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"duplicate logtail member","bad_data":"duplicate"}}, "fizz" : "buzz" , "wizz" : "wuzz"}`}, {in: `{"long":"` + strings.Repeat("a", maxSize) + `"}`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"entry too large: 262155 bytes","bad_data":"{\"long\":\"` + strings.Repeat("a", 43681) + `…+218465"}}}`}, } { - got := string(l.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) + got := string(lg.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) if !strings.HasSuffix(got, "\n") { t.Errorf("`%s` does not end with a newline", got) } @@ -461,21 +461,21 @@ var testdataTextLog = []byte(`netcheck: report: udp=true v6=false v6os=true mapv var testdataJSONLog = []byte(`{"end":"2024-04-08T21:39:15.715291586Z","nodeId":"nQRJBE7CNTRL","physicalTraffic":[{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"98.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"24.x.x.x:49973","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"73.x.x.x:41641","rxBytes":732,"rxPkts":6,"src":"100.x.x.x:0","txBytes":820,"txPkts":7},{"dst":"75.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"75.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"174.x.x.x:35497","rxBytes":13008,"rxPkts":98,"src":"100.x.x.x:0","txBytes":26688,"txPkts":150},{"dst":"47.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"64.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5}],"start":"2024-04-08T21:39:11.099495616Z","virtualTraffic":[{"dst":"100.x.x.x:33008","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32984","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:32998","proto":6,"src":"100.x.x.x:22","txBytes":1020,"txPkts":10},{"dst":"100.x.x.x:32994","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:32980","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32950","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53332","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:0","proto":1,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32966","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57882","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53326","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57892","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:32934","proto":6,"src":"100.x.x.x:22","txBytes":8712,"txPkts":55},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32942","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32964","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37238","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37252","txBytes":60,"txPkts":1}]}`) func BenchmarkWriteText(b *testing.B) { - var l Logger - l.clock = tstime.StdClock{} - l.buffer = discardBuffer{} + var lg Logger + lg.clock = tstime.StdClock{} + lg.buffer = discardBuffer{} b.ReportAllocs() for range b.N { - must.Get(l.Write(testdataTextLog)) + must.Get(lg.Write(testdataTextLog)) } } func BenchmarkWriteJSON(b *testing.B) { - var l Logger - l.clock = tstime.StdClock{} - l.buffer = discardBuffer{} + var lg Logger + lg.clock = tstime.StdClock{} + lg.buffer = discardBuffer{} b.ReportAllocs() for range b.N { - must.Get(l.Write(testdataJSONLog)) + must.Get(lg.Write(testdataJSONLog)) } } diff --git a/net/art/stride_table.go b/net/art/stride_table.go index 5ff0455fe..5050df245 100644 --- a/net/art/stride_table.go +++ b/net/art/stride_table.go @@ -303,21 +303,21 @@ func formatPrefixTable(addr uint8, len int) string { // // For example, childPrefixOf("192.168.0.0/16", 8) == "192.168.8.0/24". func childPrefixOf(parent netip.Prefix, stride uint8) netip.Prefix { - l := parent.Bits() - if l%8 != 0 { + ln := parent.Bits() + if ln%8 != 0 { panic("parent prefix is not 8-bit aligned") } - if l >= parent.Addr().BitLen() { + if ln >= parent.Addr().BitLen() { panic("parent prefix cannot be extended further") } - off := l / 8 + off := ln / 8 if parent.Addr().Is4() { bs := parent.Addr().As4() bs[off] = stride - return netip.PrefixFrom(netip.AddrFrom4(bs), l+8) + return netip.PrefixFrom(netip.AddrFrom4(bs), ln+8) } else { bs := parent.Addr().As16() bs[off] = stride - return netip.PrefixFrom(netip.AddrFrom16(bs), l+8) + return netip.PrefixFrom(netip.AddrFrom16(bs), ln+8) } } diff --git a/net/art/stride_table_test.go b/net/art/stride_table_test.go index bff2bb7c5..4ccef1fe0 100644 --- a/net/art/stride_table_test.go +++ b/net/art/stride_table_test.go @@ -377,8 +377,8 @@ func pfxMask(pfxLen int) uint8 { func allPrefixes() []slowEntry[int] { ret := make([]slowEntry[int], 0, lastHostIndex) for i := 1; i < lastHostIndex+1; i++ { - a, l := inversePrefixIndex(i) - ret = append(ret, slowEntry[int]{a, l, i}) + a, ln := inversePrefixIndex(i) + ret = append(ret, slowEntry[int]{a, ln, i}) } return ret } diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index 7c0139f45..aa538a0f6 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -550,8 +550,8 @@ func genRandomSubdomains(t *testing.T, n int) []dnsname.FQDN { const charset = "abcdefghijklmnopqrstuvwxyz" for len(domains) < cap(domains) { - l := r.Intn(19) + 1 - b := make([]byte, l) + ln := r.Intn(19) + 1 + b := make([]byte, ln) for i := range b { b[i] = charset[r.Intn(len(charset))] } diff --git a/net/ktimeout/ktimeout_linux_test.go b/net/ktimeout/ktimeout_linux_test.go index df4156745..0330923a9 100644 --- a/net/ktimeout/ktimeout_linux_test.go +++ b/net/ktimeout/ktimeout_linux_test.go @@ -19,11 +19,11 @@ func TestSetUserTimeout(t *testing.T) { // set in ktimeout.UserTimeout above. lc.SetMultipathTCP(false) - l := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) - defer l.Close() + ln := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) + defer ln.Close() var err error - if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { + if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { err = SetUserTimeout(fd, 0) }); e != nil { t.Fatal(e) @@ -31,12 +31,12 @@ func TestSetUserTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - v := must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) + v := must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) if v != 0 { t.Errorf("TCP_USER_TIMEOUT: got %v; want 0", v) } - if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { + if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { err = SetUserTimeout(fd, 30*time.Second) }); e != nil { t.Fatal(e) @@ -44,7 +44,7 @@ func TestSetUserTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - v = must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) + v = must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) if v != 30000 { t.Errorf("TCP_USER_TIMEOUT: got %v; want 30000", v) } diff --git a/net/ktimeout/ktimeout_test.go b/net/ktimeout/ktimeout_test.go index 7befa3b1a..b534f046c 100644 --- a/net/ktimeout/ktimeout_test.go +++ b/net/ktimeout/ktimeout_test.go @@ -14,11 +14,11 @@ func ExampleUserTimeout() { lc := net.ListenConfig{ Control: UserTimeout(30 * time.Second), } - l, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") + ln, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") if err != nil { fmt.Printf("error: %v", err) return } - l.Close() + ln.Close() // Output: } diff --git a/net/memnet/listener.go b/net/memnet/listener.go index 202026e16..dded97995 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -39,16 +39,16 @@ func Listen(addr string) *Listener { } // Addr implements net.Listener.Addr. -func (l *Listener) Addr() net.Addr { - return l.addr +func (ln *Listener) Addr() net.Addr { + return ln.addr } // Close closes the pipe listener. -func (l *Listener) Close() error { +func (ln *Listener) Close() error { var cleanup func() - l.closeOnce.Do(func() { - cleanup = l.onClose - close(l.closed) + ln.closeOnce.Do(func() { + cleanup = ln.onClose + close(ln.closed) }) if cleanup != nil { cleanup() @@ -57,11 +57,11 @@ func (l *Listener) Close() error { } // Accept blocks until a new connection is available or the listener is closed. -func (l *Listener) Accept() (net.Conn, error) { +func (ln *Listener) Accept() (net.Conn, error) { select { - case c := <-l.ch: + case c := <-ln.ch: return c, nil - case <-l.closed: + case <-ln.closed: return nil, net.ErrClosed } } @@ -70,18 +70,18 @@ func (l *Listener) Accept() (net.Conn, error) { // The provided Context must be non-nil. If the context expires before the // connection is complete, an error is returned. Once successfully connected // any expiration of the context will not affect the connection. -func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { +func (ln *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { if !strings.HasSuffix(network, "tcp") { return nil, net.UnknownNetworkError(network) } - if connAddr(addr) != l.addr { + if connAddr(addr) != ln.addr { return nil, &net.AddrError{ Err: "invalid address", Addr: addr, } } - newConn := l.NewConn + newConn := ln.NewConn if newConn == nil { newConn = func(network, addr string, maxBuf int) (Conn, Conn) { return NewConn(addr, maxBuf) @@ -98,9 +98,9 @@ func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, select { case <-ctx.Done(): return nil, ctx.Err() - case <-l.closed: + case <-ln.closed: return nil, net.ErrClosed - case l.ch <- s: + case ln.ch <- s: return c, nil } } diff --git a/net/memnet/listener_test.go b/net/memnet/listener_test.go index 73b67841a..b6ceb3dfa 100644 --- a/net/memnet/listener_test.go +++ b/net/memnet/listener_test.go @@ -9,10 +9,10 @@ import ( ) func TestListener(t *testing.T) { - l := Listen("srv.local") - defer l.Close() + ln := Listen("srv.local") + defer ln.Close() go func() { - c, err := l.Accept() + c, err := ln.Accept() if err != nil { t.Error(err) return @@ -20,11 +20,11 @@ func TestListener(t *testing.T) { defer c.Close() }() - if c, err := l.Dial(context.Background(), "tcp", "invalid"); err == nil { + if c, err := ln.Dial(context.Background(), "tcp", "invalid"); err == nil { c.Close() t.Fatalf("dial to invalid address succeeded") } - c, err := l.Dial(context.Background(), "tcp", "srv.local") + c, err := ln.Dial(context.Background(), "tcp", "srv.local") if err != nil { t.Fatalf("dial failed: %v", err) return diff --git a/net/netaddr/netaddr.go b/net/netaddr/netaddr.go index 1ab6c053a..a04acd57a 100644 --- a/net/netaddr/netaddr.go +++ b/net/netaddr/netaddr.go @@ -34,7 +34,7 @@ func FromStdIPNet(std *net.IPNet) (prefix netip.Prefix, ok bool) { } ip = ip.Unmap() - if l := len(std.Mask); l != net.IPv4len && l != net.IPv6len { + if ln := len(std.Mask); ln != net.IPv4len && ln != net.IPv6len { // Invalid mask. return netip.Prefix{}, false } diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 95750b2d0..c5a3d2392 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -993,9 +993,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err) } else { rs.mu.Lock() - if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { + if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok { mak.Set(&rs.report.RegionLatency, reg.RegionID, d) - } else if l >= d { + } else if latency >= d { rs.report.RegionLatency[reg.RegionID] = d } // We set these IPv4 and IPv6 but they're not really used @@ -1214,9 +1214,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee } else if ok { c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d) rs.mu.Lock() - if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { + if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok { mak.Set(&rs.report.RegionLatency, reg.RegionID, d) - } else if l >= d { + } else if latency >= d { rs.report.RegionLatency[reg.RegionID] = d } diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index 4a5befa1d..2e277147b 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -120,10 +120,10 @@ func (s *Server) logf(format string, args ...any) { } // Serve accepts and handles incoming connections on the given listener. -func (s *Server) Serve(l net.Listener) error { - defer l.Close() +func (s *Server) Serve(ln net.Listener) error { + defer ln.Close() for { - c, err := l.Accept() + c, err := ln.Accept() if err != nil { return err } diff --git a/net/speedtest/speedtest_server.go b/net/speedtest/speedtest_server.go index 9dd78b195..72f85fa15 100644 --- a/net/speedtest/speedtest_server.go +++ b/net/speedtest/speedtest_server.go @@ -17,9 +17,9 @@ import ( // connections and handles each one in a goroutine. Because it runs in an infinite loop, // this function only returns if any of the speedtests return with errors, or if the // listener is closed. -func Serve(l net.Listener) error { +func Serve(ln net.Listener) error { for { - conn, err := l.Accept() + conn, err := ln.Accept() if errors.Is(err, net.ErrClosed) { return nil } diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index 69fdb6b56..bb8f2676a 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -21,13 +21,13 @@ func TestDownload(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") // start a listener and find the port where the server will be listening. - l, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", ":0") if err != nil { t.Fatal(err) } - t.Cleanup(func() { l.Close() }) + t.Cleanup(func() { ln.Close() }) - serverIP := l.Addr().String() + serverIP := ln.Addr().String() t.Log("server IP found:", serverIP) type state struct { @@ -40,7 +40,7 @@ func TestDownload(t *testing.T) { stateChan := make(chan state, 1) go func() { - err := Serve(l) + err := Serve(ln) stateChan <- state{err: err} }() @@ -84,7 +84,7 @@ func TestDownload(t *testing.T) { }) // causes the server goroutine to finish - l.Close() + ln.Close() testState := <-stateChan if testState.err != nil { diff --git a/packages/deb/deb.go b/packages/deb/deb.go index 30e3f2b4d..cab0fea07 100644 --- a/packages/deb/deb.go +++ b/packages/deb/deb.go @@ -166,14 +166,14 @@ var ( func findArchAndVersion(control []byte) (arch string, version string, err error) { b := bytes.NewBuffer(control) for { - l, err := b.ReadBytes('\n') + ln, err := b.ReadBytes('\n') if err != nil { return "", "", err } - if bytes.HasPrefix(l, archKey) { - arch = string(bytes.TrimSpace(l[len(archKey):])) - } else if bytes.HasPrefix(l, versionKey) { - version = string(bytes.TrimSpace(l[len(versionKey):])) + if bytes.HasPrefix(ln, archKey) { + arch = string(bytes.TrimSpace(ln[len(archKey):])) + } else if bytes.HasPrefix(ln, versionKey) { + version = string(bytes.TrimSpace(ln[len(versionKey):])) } if arch != "" && version != "" { return arch, version, nil diff --git a/prober/derp.go b/prober/derp.go index 52e56fd4e..22843b53a 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -323,14 +323,14 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { "derp_path": derpPath, "tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil), }, - Metrics: func(l prometheus.Labels) []prometheus.Metric { + Metrics: func(lb prometheus.Labels) []prometheus.Metric { metrics := []prometheus.Metric{ - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, lb), prometheus.GaugeValue, float64(size)), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, lb), prometheus.CounterValue, transferTimeSeconds.Value()), } if d.bwTUNIPv4Prefix != nil { // For TCP-in-TCP probes, also record cumulative bytes transferred. - metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, totalBytesTransferred.Value())) + metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, lb), prometheus.CounterValue, totalBytesTransferred.Value())) } return metrics }, @@ -361,11 +361,11 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa }, Class: "derp_qd", Labels: Labels{"derp_path": derpPath}, - Metrics: func(l prometheus.Labels) []prometheus.Metric { + Metrics: func(lb prometheus.Labels) []prometheus.Metric { qdh.mx.Lock() result := []prometheus.Metric{ - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())), - prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, lb), prometheus.CounterValue, float64(packetsDropped.Value())), + prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, lb), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), } qdh.mx.Unlock() return result @@ -1046,11 +1046,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT }() // Start a listener to receive the data - l, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) + ln, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) if err != nil { return fmt.Errorf("failed to listen: %s", err) } - defer l.Close() + defer ln.Close() // 128KB by default const writeChunkSize = 128 << 10 @@ -1062,9 +1062,9 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT } // Dial ourselves - _, port, err := net.SplitHostPort(l.Addr().String()) + _, port, err := net.SplitHostPort(ln.Addr().String()) if err != nil { - return fmt.Errorf("failed to split address %q: %w", l.Addr().String(), err) + return fmt.Errorf("failed to split address %q: %w", ln.Addr().String(), err) } connAddr := net.JoinHostPort(destinationAddr.String(), port) @@ -1085,7 +1085,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT go func() { defer wg.Done() - readConn, err := l.Accept() + readConn, err := ln.Accept() if err != nil { readFinishedC <- err return @@ -1146,11 +1146,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) { // To avoid spamming the log with regular connection messages. - l := logger.Filtered(log.Printf, func(s string) bool { + logf := logger.Filtered(log.Printf, func(s string) bool { return !strings.Contains(s, "derphttp.Client.Connect: connecting to") }) priv := key.NewNode() - dc := derphttp.NewRegionClient(priv, l, netmon.NewStatic(), func() *tailcfg.DERPRegion { + dc := derphttp.NewRegionClient(priv, logf, netmon.NewStatic(), func() *tailcfg.DERPRegion { rid := n.RegionID return &tailcfg.DERPRegion{ RegionID: rid, diff --git a/prober/prober.go b/prober/prober.go index 9073a9502..6b904dd97 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -118,25 +118,25 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob panic(fmt.Sprintf("probe named %q already registered", name)) } - l := prometheus.Labels{ + lb := prometheus.Labels{ "name": name, "class": pc.Class, } for k, v := range pc.Labels { - l[k] = v + lb[k] = v } for k, v := range labels { - l[k] = v + lb[k] = v } - probe := newProbe(p, name, interval, l, pc) + probe := newProbe(p, name, interval, lb, pc) p.probes[name] = probe go probe.loop() return probe } // newProbe creates a new Probe with the given parameters, but does not start it. -func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe { +func newProbe(p *Prober, name string, interval time.Duration, lg prometheus.Labels, pc ProbeClass) *Probe { ctx, cancel := context.WithCancel(context.Background()) probe := &Probe{ prober: p, @@ -155,17 +155,17 @@ func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Label latencyHist: ring.New(recentHistSize), metrics: prometheus.NewRegistry(), - metricLabels: l, - mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l), - mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, l), - mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, l), - mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, l), - mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, l), + metricLabels: lg, + mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, lg), + mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, lg), + mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, lg), + mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, lg), + mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, lg), mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: l, + Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: lg, }, []string{"status"}), mSeconds: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l, + Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: lg, }, []string{"status"}), } if p.metrics != nil { @@ -512,8 +512,8 @@ func (probe *Probe) probeInfoLocked() ProbeInfo { inf.Latency = probe.latency } probe.latencyHist.Do(func(v any) { - if l, ok := v.(time.Duration); ok { - inf.RecentLatencies = append(inf.RecentLatencies, l) + if latency, ok := v.(time.Duration); ok { + inf.RecentLatencies = append(inf.RecentLatencies, latency) } }) probe.successHist.Do(func(v any) { @@ -719,8 +719,8 @@ func initialDelay(seed string, interval time.Duration) time.Duration { // Labels is a set of metric labels used by a prober. type Labels map[string]string -func (l Labels) With(k, v string) Labels { - new := maps.Clone(l) +func (lb Labels) With(k, v string) Labels { + new := maps.Clone(lb) new[k] = v return new } diff --git a/tka/aum.go b/tka/aum.go index bd17b2098..b8c4b6c9e 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -31,8 +31,8 @@ func (h AUMHash) String() string { // UnmarshalText implements encoding.TextUnmarshaler. func (h *AUMHash) UnmarshalText(text []byte) error { - if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) { - return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text)) + if ln := base32StdNoPad.DecodedLen(len(text)); ln != len(h) { + return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", ln, len(text)) } if _, err := base32StdNoPad.Decode(h[:], text); err != nil { return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err) diff --git a/tka/sig_test.go b/tka/sig_test.go index 2fafb0436..c5c03ef2e 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -76,8 +76,8 @@ func TestSigNested(t *testing.T) { if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil { t.Fatalf("verifySignature(oldNode) failed: %v", err) } - if l := sigChainLength(nestedSig); l != 1 { - t.Errorf("nestedSig chain length = %v, want 1", l) + if ln := sigChainLength(nestedSig); ln != 1 { + t.Errorf("nestedSig chain length = %v, want 1", ln) } // The signature authorizing the rotation, signed by the @@ -93,8 +93,8 @@ func TestSigNested(t *testing.T) { if err := sig.verifySignature(node.Public(), k); err != nil { t.Fatalf("verifySignature(node) failed: %v", err) } - if l := sigChainLength(sig); l != 2 { - t.Errorf("sig chain length = %v, want 2", l) + if ln := sigChainLength(sig); ln != 2 { + t.Errorf("sig chain length = %v, want 2", ln) } // Test verification fails if the wrong verification key is provided diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index 2aa4c863b..c84e83454 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -92,8 +92,8 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { } slices.Sort(lines) - for _, l := range lines { - _, err = w.Write([]byte(fmt.Sprintf("%s\n", l))) + for _, ln := range lines { + _, err = w.Write([]byte(fmt.Sprintf("%s\n", ln))) if err != nil { log.Printf("monitor: error writing status: %v", err) return diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 17f3d881f..7f89eb48a 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -75,10 +75,10 @@ func fromCommand(bs []byte) (string, error) { return args, nil } -func (f *fsm) Apply(l *raft.Log) any { +func (f *fsm) Apply(lg *raft.Log) any { f.mu.Lock() defer f.mu.Unlock() - s, err := fromCommand(l.Data) + s, err := fromCommand(lg.Data) if err != nil { return CommandResult{ Err: err, diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index b0deb2079..f1531d013 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1021,11 +1021,11 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { } var b strings.Builder b.WriteString("{") - for i, l := range labels { + for i, lb := range labels { if i > 0 { b.WriteString(",") } - b.WriteString(fmt.Sprintf("%s=%q", l.GetName(), l.GetValue())) + b.WriteString(fmt.Sprintf("%s=%q", lb.GetName(), lb.GetValue())) } b.WriteString("}") return b.String() @@ -1033,8 +1033,8 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { // sendData sends a given amount of bytes from s1 to s2. func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error { - l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) - defer l.Close() + lb := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) + defer lb.Close() // Dial to s1 from s2 w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) @@ -1049,7 +1049,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC defer close(allReceived) go func() { - conn, err := l.Accept() + conn, err := lb.Accept() if err != nil { allReceived <- err return diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index 0bab3ba5d..c3a3775de 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -184,14 +184,14 @@ type ipMapping struct { // it is difficult to be 100% sure. This function should be used with care. It // will probably do what you want, but it is very easy to hold this wrong. func getProbablyFreePortNumber() (int, error) { - l, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", ":0") if err != nil { return 0, err } - defer l.Close() + defer ln.Close() - _, port, err := net.SplitHostPort(l.Addr().String()) + _, port, err := net.SplitHostPort(ln.Addr().String()) if err != nil { return 0, err } diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index 119fed2e6..869b4cc8e 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -628,8 +628,8 @@ type loggingResponseWriter struct { // from r, or falls back to logf. If a nil logger is given, the logs are // discarded. func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter { - if l, ok := logger.LogfKey.ValueOk(r.Context()); ok && l != nil { - logf = l + if lg, ok := logger.LogfKey.ValueOk(r.Context()); ok && lg != nil { + logf = lg } if logf == nil { logf = logger.Discard @@ -642,46 +642,46 @@ func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Reque } // WriteHeader implements [http.ResponseWriter]. -func (l *loggingResponseWriter) WriteHeader(statusCode int) { - if l.code != 0 { - l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode) +func (lg *loggingResponseWriter) WriteHeader(statusCode int) { + if lg.code != 0 { + lg.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", lg.code, statusCode) return } - if l.ctx.Err() == nil { - l.code = statusCode + if lg.ctx.Err() == nil { + lg.code = statusCode } - l.ResponseWriter.WriteHeader(statusCode) + lg.ResponseWriter.WriteHeader(statusCode) } // Write implements [http.ResponseWriter]. -func (l *loggingResponseWriter) Write(bs []byte) (int, error) { - if l.code == 0 { - l.code = 200 +func (lg *loggingResponseWriter) Write(bs []byte) (int, error) { + if lg.code == 0 { + lg.code = 200 } - n, err := l.ResponseWriter.Write(bs) - l.bytes += n + n, err := lg.ResponseWriter.Write(bs) + lg.bytes += n return n, err } // Hijack implements http.Hijacker. Note that hijacking can still fail // because the wrapped ResponseWriter is not required to implement // Hijacker, as this breaks HTTP/2. -func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h, ok := l.ResponseWriter.(http.Hijacker) +func (lg *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h, ok := lg.ResponseWriter.(http.Hijacker) if !ok { return nil, nil, errors.New("ResponseWriter is not a Hijacker") } conn, buf, err := h.Hijack() if err == nil { - l.hijacked = true + lg.hijacked = true } return conn, buf, err } -func (l loggingResponseWriter) Flush() { - f, _ := l.ResponseWriter.(http.Flusher) +func (lg loggingResponseWriter) Flush() { + f, _ := lg.ResponseWriter.(http.Flusher) if f == nil { - l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") + lg.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") return } f.Flush() diff --git a/types/geo/quantize_test.go b/types/geo/quantize_test.go index 3c707e303..bc1f62c9b 100644 --- a/types/geo/quantize_test.go +++ b/types/geo/quantize_test.go @@ -32,20 +32,20 @@ func TestPointAnonymize(t *testing.T) { last := geo.MakePoint(llat, 0) cur := geo.MakePoint(lat, 0) anon := cur.Quantize() - switch l, g, err := anon.LatLng(); { + switch latlng, g, err := anon.LatLng(); { case err != nil: t.Fatal(err) case lat == southPole: // initialize llng, to the first snapped longitude - llat = l + llat = latlng goto Lng case g != 0: t.Fatalf("%v is west or east of %v", anon, last) - case l < llat: + case latlng < llat: t.Fatalf("%v is south of %v", anon, last) - case l == llat: + case latlng == llat: continue - case l > llat: + case latlng > llat: switch dist, err := last.DistanceTo(anon); { case err != nil: t.Fatal(err) @@ -55,7 +55,7 @@ func TestPointAnonymize(t *testing.T) { t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon) t.Fatalf("%v is too close to %v", anon, last) default: - llat = l + llat = latlng } } @@ -65,14 +65,14 @@ func TestPointAnonymize(t *testing.T) { last := geo.MakePoint(llat, llng) cur := geo.MakePoint(lat, lng) anon := cur.Quantize() - switch l, g, err := anon.LatLng(); { + switch latlng, g, err := anon.LatLng(); { case err != nil: t.Fatal(err) case lng == dateLine: // initialize llng, to the first snapped longitude llng = g continue - case l != llat: + case latlng != llat: t.Fatalf("%v is north or south of %v", anon, last) case g != llng: const tolerance = geo.MinSeparation * 0x1p-9 diff --git a/types/key/disco.go b/types/key/disco.go index ce5f9b36f..52b40c766 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -167,11 +167,11 @@ func (k DiscoPublic) String() string { } // Compare returns an integer comparing DiscoPublic k and l lexicographically. -// The result will be 0 if k == l, -1 if k < l, and +1 if k > l. This is useful -// for situations requiring only one node in a pair to perform some operation, -// e.g. probing UDP path lifetime. -func (k DiscoPublic) Compare(l DiscoPublic) int { - return bytes.Compare(k.k[:], l.k[:]) +// The result will be 0 if k == other, -1 if k < other, and +1 if k > other. +// This is useful for situations requiring only one node in a pair to perform +// some operation, e.g. probing UDP path lifetime. +func (k DiscoPublic) Compare(other DiscoPublic) int { + return bytes.Compare(k.k[:], other.k[:]) } // AppendText implements encoding.TextAppender. diff --git a/types/prefs/list.go b/types/prefs/list.go index 7db473887..ae6b2fae3 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -45,36 +45,36 @@ func ListWithOpts[T ImmutableType](opts ...Options) List[T] { // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *List[T]) SetValue(val []T) error { - return l.preference.SetValue(cloneSlice(val)) +func (ls *List[T]) SetValue(val []T) error { + return ls.preference.SetValue(cloneSlice(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *List[T]) SetManagedValue(val []T) { - l.preference.SetManagedValue(cloneSlice(val)) +func (ls *List[T]) SetManagedValue(val []T) { + ls.preference.SetManagedValue(cloneSlice(val)) } // View returns a read-only view of l. -func (l *List[T]) View() ListView[T] { - return ListView[T]{l} +func (ls *List[T]) View() ListView[T] { + return ListView[T]{ls} } // Clone returns a copy of l that aliases no memory with l. -func (l List[T]) Clone() *List[T] { - res := ptr.To(l) - if v, ok := l.s.Value.GetOk(); ok { +func (ls List[T]) Clone() *List[T] { + res := ptr.To(ls) + if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(append(v[:0:0], v...)) } return res } // Equal reports whether l and l2 are equal. -func (l List[T]) Equal(l2 List[T]) bool { - if l.s.Metadata != l2.s.Metadata { +func (ls List[T]) Equal(l2 List[T]) bool { + if ls.s.Metadata != l2.s.Metadata { return false } - v1, ok1 := l.s.Value.GetOk() + v1, ok1 := ls.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk() if ok1 != ok2 { return false diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index d6af745bf..dc1213adb 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -487,31 +487,31 @@ func TestItemView(t *testing.T) { } func TestListView(t *testing.T) { - l := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) + ls := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) - lv := l.View() + lv := ls.View() checkIsSet(t, lv, true) checkIsManaged(t, lv, false) checkIsReadOnly(t, lv, true) - checkValue(t, lv, views.SliceOf(l.Value())) - checkValueOk(t, lv, views.SliceOf(l.Value()), true) + checkValue(t, lv, views.SliceOf(ls.Value())) + checkValueOk(t, lv, views.SliceOf(ls.Value()), true) l2 := *lv.AsStruct() - checkEqual(t, l, l2, true) + checkEqual(t, ls, l2, true) } func TestStructListView(t *testing.T) { - l := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) + ls := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) - lv := StructListViewOf(&l) + lv := StructListViewOf(&ls) checkIsSet(t, lv, true) checkIsManaged(t, lv, false) checkIsReadOnly(t, lv, true) - checkValue(t, lv, views.SliceOfViews(l.Value())) - checkValueOk(t, lv, views.SliceOfViews(l.Value()), true) + checkValue(t, lv, views.SliceOfViews(ls.Value())) + checkValueOk(t, lv, views.SliceOfViews(ls.Value()), true) l2 := *lv.AsStruct() - checkEqual(t, l, l2, true) + checkEqual(t, ls, l2, true) } func TestStructMapView(t *testing.T) { diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index 65f11011a..ba145e2cf 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -33,20 +33,20 @@ func StructListWithOpts[T views.Cloner[T]](opts ...Options) StructList[T] { // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *StructList[T]) SetValue(val []T) error { - return l.preference.SetValue(deepCloneSlice(val)) +func (ls *StructList[T]) SetValue(val []T) error { + return ls.preference.SetValue(deepCloneSlice(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *StructList[T]) SetManagedValue(val []T) { - l.preference.SetManagedValue(deepCloneSlice(val)) +func (ls *StructList[T]) SetManagedValue(val []T) { + ls.preference.SetManagedValue(deepCloneSlice(val)) } // Clone returns a copy of l that aliases no memory with l. -func (l StructList[T]) Clone() *StructList[T] { - res := ptr.To(l) - if v, ok := l.s.Value.GetOk(); ok { +func (ls StructList[T]) Clone() *StructList[T] { + res := ptr.To(ls) + if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(deepCloneSlice(v)) } return res @@ -56,11 +56,11 @@ func (l StructList[T]) Clone() *StructList[T] { // If the template type T implements an Equal(T) bool method, it will be used // instead of the == operator for value comparison. // It panics if T is not comparable. -func (l StructList[T]) Equal(l2 StructList[T]) bool { - if l.s.Metadata != l2.s.Metadata { +func (ls StructList[T]) Equal(l2 StructList[T]) bool { + if ls.s.Metadata != l2.s.Metadata { return false } - v1, ok1 := l.s.Value.GetOk() + v1, ok1 := ls.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk() if ok1 != ok2 { return false @@ -105,8 +105,8 @@ type StructListView[T views.ViewCloner[T, V], V views.StructView[T]] struct { // StructListViewOf returns a read-only view of l. // It is used by [tailscale.com/cmd/viewer]. -func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](l *StructList[T]) StructListView[T, V] { - return StructListView[T, V]{l} +func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](ls *StructList[T]) StructListView[T, V] { + return StructListView[T, V]{ls} } // Valid reports whether the underlying [StructList] is non-nil. diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index a081f7c74..83cc7447b 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -31,14 +31,14 @@ func StructMapWithOpts[K MapKeyType, V views.Cloner[V]](opts ...Options) StructM // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *StructMap[K, V]) SetValue(val map[K]V) error { - return l.preference.SetValue(deepCloneMap(val)) +func (m *StructMap[K, V]) SetValue(val map[K]V) error { + return m.preference.SetValue(deepCloneMap(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *StructMap[K, V]) SetManagedValue(val map[K]V) { - l.preference.SetManagedValue(deepCloneMap(val)) +func (m *StructMap[K, V]) SetManagedValue(val map[K]V) { + m.preference.SetManagedValue(deepCloneMap(val)) } // Clone returns a copy of m that aliases no memory with m. diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index 30e0b74ed..b86efdf29 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -94,59 +94,59 @@ type bucket struct { // Allow charges the key one token (up to the overdraft limit), and // reports whether the key can perform an action. -func (l *Limiter[K]) Allow(key K) bool { - return l.allow(key, time.Now()) +func (lm *Limiter[K]) Allow(key K) bool { + return lm.allow(key, time.Now()) } -func (l *Limiter[K]) allow(key K, now time.Time) bool { - l.mu.Lock() - defer l.mu.Unlock() - return l.allowBucketLocked(l.getBucketLocked(key, now), now) +func (lm *Limiter[K]) allow(key K, now time.Time) bool { + lm.mu.Lock() + defer lm.mu.Unlock() + return lm.allowBucketLocked(lm.getBucketLocked(key, now), now) } -func (l *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { - if l.cache == nil { - l.cache = &lru.Cache[K, *bucket]{MaxEntries: l.Size} - } else if b := l.cache.Get(key); b != nil { +func (lm *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { + if lm.cache == nil { + lm.cache = &lru.Cache[K, *bucket]{MaxEntries: lm.Size} + } else if b := lm.cache.Get(key); b != nil { return b } b := &bucket{ - cur: l.Max, - lastUpdate: now.Truncate(l.RefillInterval), + cur: lm.Max, + lastUpdate: now.Truncate(lm.RefillInterval), } - l.cache.Set(key, b) + lm.cache.Set(key, b) return b } -func (l *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { +func (lm *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { // Only update the bucket quota if needed to process request. if b.cur <= 0 { - l.updateBucketLocked(b, now) + lm.updateBucketLocked(b, now) } ret := b.cur > 0 - if b.cur > -l.Overdraft { + if b.cur > -lm.Overdraft { b.cur-- } return ret } -func (l *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { - now = now.Truncate(l.RefillInterval) +func (lm *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { + now = now.Truncate(lm.RefillInterval) if now.Before(b.lastUpdate) { return } timeDelta := max(now.Sub(b.lastUpdate), 0) - tokenDelta := int64(timeDelta / l.RefillInterval) - b.cur = min(b.cur+tokenDelta, l.Max) + tokenDelta := int64(timeDelta / lm.RefillInterval) + b.cur = min(b.cur+tokenDelta, lm.Max) b.lastUpdate = now } // peekForTest returns the number of tokens for key, also reporting // whether key was present. -func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { - l.mu.Lock() - defer l.mu.Unlock() - if b, ok := l.cache.PeekOk(key); ok { +func (lm *Limiter[K]) tokensForTest(key K) (int64, bool) { + lm.mu.Lock() + defer lm.mu.Unlock() + if b, ok := lm.cache.PeekOk(key); ok { return b.cur, true } return 0, false @@ -159,12 +159,12 @@ func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { // DumpHTML blocks other callers of the limiter while it collects the // state for dumping. It should not be called on large limiters // involved in hot codepaths. -func (l *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { - l.dumpHTML(w, onlyLimited, time.Now()) +func (lm *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { + lm.dumpHTML(w, onlyLimited, time.Now()) } -func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { - dump := l.collectDump(now) +func (lm *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { + dump := lm.collectDump(now) io.WriteString(w, "") for _, line := range dump { if onlyLimited && line.Tokens > 0 { @@ -183,13 +183,13 @@ func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { } // collectDump grabs a copy of the limiter state needed by DumpHTML. -func (l *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { - l.mu.Lock() - defer l.mu.Unlock() +func (lm *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { + lm.mu.Lock() + defer lm.mu.Unlock() - ret := make([]dumpEntry[K], 0, l.cache.Len()) - l.cache.ForEach(func(k K, v *bucket) { - l.updateBucketLocked(v, now) // so stats are accurate + ret := make([]dumpEntry[K], 0, lm.cache.Len()) + lm.cache.ForEach(func(k K, v *bucket) { + lm.updateBucketLocked(v, now) // so stats are accurate ret = append(ret, dumpEntry[K]{k, v.cur}) }) return ret diff --git a/util/limiter/limiter_test.go b/util/limiter/limiter_test.go index 1f466d882..77b1d562b 100644 --- a/util/limiter/limiter_test.go +++ b/util/limiter/limiter_test.go @@ -16,7 +16,7 @@ const testRefillInterval = time.Second func TestLimiter(t *testing.T) { // 1qps, burst of 10, 2 keys tracked - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 2, Max: 10, RefillInterval: testRefillInterval, @@ -24,48 +24,48 @@ func TestLimiter(t *testing.T) { // Consume entire burst now := time.Now().Truncate(testRefillInterval) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 10, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 10, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // Refill 1 token for both foo and bar now = now.Add(time.Second + time.Millisecond) - allowed(t, l, "foo", 1, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 1, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 1, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 1, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // Refill 2 tokens for foo and bar now = now.Add(2*time.Second + time.Millisecond) - allowed(t, l, "foo", 2, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 2, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 2, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 2, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // qux can burst 10, evicts foo so it can immediately burst 10 again too - allowed(t, l, "qux", 10, now) - denied(t, l, "qux", 1, now) - notInLimiter(t, l, "foo") - denied(t, l, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled - - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "qux", 10, now) + denied(t, limiter, "qux", 1, now) + notInLimiter(t, limiter, "foo") + denied(t, limiter, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled + + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) } func TestLimiterOverdraft(t *testing.T) { // 1qps, burst of 10, overdraft of 2, 2 keys tracked - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 2, Max: 10, Overdraft: 2, @@ -74,51 +74,51 @@ func TestLimiterOverdraft(t *testing.T) { // Consume entire burst, go 1 into debt now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) - allowed(t, l, "bar", 10, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -1) + allowed(t, limiter, "bar", 10, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -1) // Refill 1 token for both foo and bar. // Still denied, still in debt. now = now.Add(time.Second) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -1) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -1) // Refill 2 tokens for foo and bar (1 available after debt), try // to consume 4. Overdraft is capped to 2. now = now.Add(2 * time.Second) - allowed(t, l, "foo", 1, now) - denied(t, l, "foo", 3, now) - hasTokens(t, l, "foo", -2) + allowed(t, limiter, "foo", 1, now) + denied(t, limiter, "foo", 3, now) + hasTokens(t, limiter, "foo", -2) - allowed(t, l, "bar", 1, now) - denied(t, l, "bar", 3, now) - hasTokens(t, l, "bar", -2) + allowed(t, limiter, "bar", 1, now) + denied(t, limiter, "bar", 3, now) + hasTokens(t, limiter, "bar", -2) // Refill 1, not enough to allow. now = now.Add(time.Second) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -2) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -2) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -2) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -2) // qux evicts foo, foo can immediately burst 10 again. - allowed(t, l, "qux", 1, now) - hasTokens(t, l, "qux", 9) - notInLimiter(t, l, "foo") - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) + allowed(t, limiter, "qux", 1, now) + hasTokens(t, limiter, "qux", 9) + notInLimiter(t, limiter, "foo") + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) } func TestDumpHTML(t *testing.T) { - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 3, Max: 10, Overdraft: 10, @@ -126,13 +126,13 @@ func TestDumpHTML(t *testing.T) { } now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 2, now) - allowed(t, l, "bar", 4, now) - allowed(t, l, "qux", 1, now) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 2, now) + allowed(t, limiter, "bar", 4, now) + allowed(t, limiter, "qux", 1, now) var out bytes.Buffer - l.DumpHTML(&out, false) + limiter.DumpHTML(&out, false) want := strings.Join([]string{ "
                    KeyTokens
                    ", "", @@ -146,7 +146,7 @@ func TestDumpHTML(t *testing.T) { } out.Reset() - l.DumpHTML(&out, true) + limiter.DumpHTML(&out, true) want = strings.Join([]string{ "
                    KeyTokens
                    ", "", @@ -161,7 +161,7 @@ func TestDumpHTML(t *testing.T) { // organically. now = now.Add(3 * time.Second) out.Reset() - l.dumpHTML(&out, false, now) + limiter.dumpHTML(&out, false, now) want = strings.Join([]string{ "
                    KeyTokens
                    ", "", @@ -175,29 +175,29 @@ func TestDumpHTML(t *testing.T) { } } -func allowed(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { +func allowed(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { - if !l.allow(key, now) { - toks, ok := l.tokensForTest(key) + if !limiter.allow(key, now) { + toks, ok := limiter.tokensForTest(key) t.Errorf("after %d times: allow(%q, %q) = false, want true (%d tokens available, in cache = %v)", i, key, now, toks, ok) } } } -func denied(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { +func denied(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { - if l.allow(key, now) { - toks, ok := l.tokensForTest(key) + if limiter.allow(key, now) { + toks, ok := limiter.tokensForTest(key) t.Errorf("after %d times: allow(%q, %q) = true, want false (%d tokens available, in cache = %v)", i, key, now, toks, ok) } } } -func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { +func hasTokens(t *testing.T, limiter *Limiter[string], key string, want int64) { t.Helper() - got, ok := l.tokensForTest(key) + got, ok := limiter.tokensForTest(key) if !ok { t.Errorf("key %q missing from limiter", key) } else if got != want { @@ -205,9 +205,9 @@ func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { } } -func notInLimiter(t *testing.T, l *Limiter[string], key string) { +func notInLimiter(t *testing.T, limiter *Limiter[string], key string) { t.Helper() - if tokens, ok := l.tokensForTest(key); ok { + if tokens, ok := limiter.tokensForTest(key); ok { t.Errorf("key %q unexpectedly tracked by limiter, with %d tokens", key, tokens) } } diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index 644126131..149e0c960 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -85,7 +85,7 @@ type tableDetector interface { type linuxFWDetector struct{} // iptDetect returns the number of iptables rules in the current namespace. -func (l linuxFWDetector) iptDetect() (int, error) { +func (ld linuxFWDetector) iptDetect() (int, error) { return detectIptables() } @@ -96,7 +96,7 @@ var hookDetectNetfilter feature.Hook[func() (int, error)] var ErrUnsupported = errors.New("linuxfw:unsupported") // nftDetect returns the number of nftables rules in the current namespace. -func (l linuxFWDetector) nftDetect() (int, error) { +func (ld linuxFWDetector) nftDetect() (int, error) { if f, ok := hookDetectNetfilter.GetOk(); ok { return f() } diff --git a/util/lru/lru_test.go b/util/lru/lru_test.go index 5500e5e0f..04de2e507 100644 --- a/util/lru/lru_test.go +++ b/util/lru/lru_test.go @@ -84,8 +84,8 @@ func TestStressEvictions(t *testing.T) { for range numProbes { v := vals[rand.Intn(len(vals))] c.Set(v, true) - if l := c.Len(); l > cacheSize { - t.Fatalf("Cache size now %d, want max %d", l, cacheSize) + if ln := c.Len(); ln > cacheSize { + t.Fatalf("Cache size now %d, want max %d", ln, cacheSize) } } } @@ -119,8 +119,8 @@ func TestStressBatchedEvictions(t *testing.T) { c.DeleteOldest() } } - if l := c.Len(); l > cacheSizeMax { - t.Fatalf("Cache size now %d, want max %d", l, cacheSizeMax) + if ln := c.Len(); ln > cacheSizeMax { + t.Fatalf("Cache size now %d, want max %d", ln, cacheSizeMax) } } } diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 0ca36176e..97362b1dc 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -322,33 +322,33 @@ func Definitions() ([]*Definition, error) { type PlatformList []string // Has reports whether l contains the target platform. -func (l PlatformList) Has(target string) bool { - if len(l) == 0 { +func (ls PlatformList) Has(target string) bool { + if len(ls) == 0 { return true } - return slices.ContainsFunc(l, func(os string) bool { + return slices.ContainsFunc(ls, func(os string) bool { return strings.EqualFold(os, target) }) } // HasCurrent is like Has, but for the current platform. -func (l PlatformList) HasCurrent() bool { - return l.Has(internal.OS()) +func (ls PlatformList) HasCurrent() bool { + return ls.Has(internal.OS()) } // mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions, // if either l or l2 is empty, the merged result in l will also be empty. -func (l *PlatformList) mergeFrom(l2 PlatformList) { +func (ls *PlatformList) mergeFrom(l2 PlatformList) { switch { - case len(*l) == 0: + case len(*ls) == 0: // No-op. An empty list indicates no platform restrictions. case len(l2) == 0: // Merging with an empty list results in an empty list. - *l = l2 + *ls = l2 default: // Append, sort and dedup. - *l = append(*l, l2...) - slices.Sort(*l) - *l = slices.Compact(*l) + *ls = append(*ls, l2...) + slices.Sort(*ls) + *ls = slices.Compact(*ls) } } diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index e43495a16..9d99884f6 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -311,8 +311,8 @@ func TestListSettingDefinitions(t *testing.T) { t.Fatalf("SetDefinitionsForTest failed: %v", err) } - cmp := func(l, r *Definition) int { - return strings.Compare(string(l.Key()), string(r.Key())) + cmp := func(a, b *Definition) int { + return strings.Compare(string(a.Key()), string(b.Key())) } want := append([]*Definition{}, definitions...) slices.SortFunc(want, cmp) diff --git a/util/winutil/gp/gp_windows_test.go b/util/winutil/gp/gp_windows_test.go index e2520b46d..f89206883 100644 --- a/util/winutil/gp/gp_windows_test.go +++ b/util/winutil/gp/gp_windows_test.go @@ -182,16 +182,16 @@ func doWithMachinePolicyLocked(t *testing.T, f func()) { f() } -func doWithCustomEnterLeaveFuncs(t *testing.T, f func(l *PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { +func doWithCustomEnterLeaveFuncs(t *testing.T, f func(*PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { t.Helper() - l := NewMachinePolicyLock() - l.enterFn, l.leaveFn = enter, leave + lock := NewMachinePolicyLock() + lock.enterFn, lock.leaveFn = enter, leave t.Cleanup(func() { - if err := l.Close(); err != nil { + if err := lock.Close(); err != nil { t.Fatalf("(*PolicyLock).Close failed: %v", err) } }) - f(l) + f(lock) } diff --git a/util/winutil/gp/policylock_windows.go b/util/winutil/gp/policylock_windows.go index 69c5ff016..6c3ca0baf 100644 --- a/util/winutil/gp/policylock_windows.go +++ b/util/winutil/gp/policylock_windows.go @@ -127,32 +127,32 @@ func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) { return lock, nil } -// Lock locks l. -// It returns [ErrInvalidLockState] if l has a zero value or has already been closed, +// Lock locks lk. +// It returns [ErrInvalidLockState] if lk has a zero value or has already been closed, // [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place, // or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired. // // As a special case, it fails with [windows.ERROR_ACCESS_DENIED] -// if l is a user policy lock, and the corresponding user is not logged in +// if lk is a user policy lock, and the corresponding user is not logged in // interactively at the time of the call. -func (l *PolicyLock) Lock() error { +func (lk *PolicyLock) Lock() error { if policyLockRestricted.Load() > 0 { return ErrLockRestricted } - l.mu.Lock() - defer l.mu.Unlock() - if l.lockCnt.Add(2)&1 == 0 { + lk.mu.Lock() + defer lk.mu.Unlock() + if lk.lockCnt.Add(2)&1 == 0 { // The lock cannot be acquired because it has either never been properly // created or its Close method has already been called. However, we need // to call Unlock to both decrement lockCnt and leave the underlying // CriticalPolicySection if we won the race with another goroutine and // now own the lock. - l.Unlock() + lk.Unlock() return ErrInvalidLockState } - if l.handle != 0 { + if lk.handle != 0 { // The underlying CriticalPolicySection is already acquired. // It is an R-Lock (with the W-counterpart owned by the Group Policy service), // meaning that it can be acquired by multiple readers simultaneously. @@ -160,20 +160,20 @@ func (l *PolicyLock) Lock() error { return nil } - return l.lockSlow() + return lk.lockSlow() } // lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock. // It waits for either the lock to be acquired, or for the Close method to be called. // // l.mu must be held. -func (l *PolicyLock) lockSlow() (err error) { +func (lk *PolicyLock) lockSlow() (err error) { defer func() { if err != nil { // Decrement the counter if the lock cannot be acquired, // and complete the pending close request if we're the last owner. - if l.lockCnt.Add(-2) == 0 { - l.closeInternal() + if lk.lockCnt.Add(-2) == 0 { + lk.closeInternal() } } }() @@ -190,12 +190,12 @@ func (l *PolicyLock) lockSlow() (err error) { resultCh := make(chan policyLockResult) go func() { - closing := l.closing - if l.scope == UserPolicy && l.token != 0 { + closing := lk.closing + if lk.scope == UserPolicy && lk.token != 0 { // Impersonate the user whose critical policy section we want to acquire. runtime.LockOSThread() defer runtime.UnlockOSThread() - if err := impersonateLoggedOnUser(l.token); err != nil { + if err := impersonateLoggedOnUser(lk.token); err != nil { initCh <- err return } @@ -209,10 +209,10 @@ func (l *PolicyLock) lockSlow() (err error) { close(initCh) var machine bool - if l.scope == MachinePolicy { + if lk.scope == MachinePolicy { machine = true } - handle, err := l.enterFn(machine) + handle, err := lk.enterFn(machine) send_result: for { @@ -226,7 +226,7 @@ func (l *PolicyLock) lockSlow() (err error) { // The lock is being closed, and we lost the race to l.closing // it the calling goroutine. if err == nil { - l.leaveFn(handle) + lk.leaveFn(handle) } break send_result default: @@ -247,21 +247,21 @@ func (l *PolicyLock) lockSlow() (err error) { select { case result := <-resultCh: if result.err == nil { - l.handle = result.handle + lk.handle = result.handle } return result.err - case <-l.closing: + case <-lk.closing: return ErrInvalidLockState } } // Unlock unlocks l. // It panics if l is not locked on entry to Unlock. -func (l *PolicyLock) Unlock() { - l.mu.Lock() - defer l.mu.Unlock() +func (lk *PolicyLock) Unlock() { + lk.mu.Lock() + defer lk.mu.Unlock() - lockCnt := l.lockCnt.Add(-2) + lockCnt := lk.lockCnt.Add(-2) if lockCnt < 0 { panic("negative lockCnt") } @@ -273,33 +273,33 @@ func (l *PolicyLock) Unlock() { return } - if l.handle != 0 { + if lk.handle != 0 { // Impersonation is not required to unlock a critical policy section. // The handle we pass determines which mutex will be unlocked. - leaveCriticalPolicySection(l.handle) - l.handle = 0 + leaveCriticalPolicySection(lk.handle) + lk.handle = 0 } if lockCnt == 0 { // Complete the pending close request if there's no more readers. - l.closeInternal() + lk.closeInternal() } } // Close releases resources associated with l. // It is a no-op for the machine policy lock. -func (l *PolicyLock) Close() error { - lockCnt := l.lockCnt.Load() +func (lk *PolicyLock) Close() error { + lockCnt := lk.lockCnt.Load() if lockCnt&1 == 0 { // The lock has never been initialized, or close has already been called. return nil } - close(l.closing) + close(lk.closing) // Unset the LSB to indicate a pending close request. - for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { - lockCnt = l.lockCnt.Load() + for !lk.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { + lockCnt = lk.lockCnt.Load() } if lockCnt != 0 { @@ -307,16 +307,16 @@ func (l *PolicyLock) Close() error { return nil } - return l.closeInternal() + return lk.closeInternal() } -func (l *PolicyLock) closeInternal() error { - if l.token != 0 { - if err := l.token.Close(); err != nil { +func (lk *PolicyLock) closeInternal() error { + if lk.token != 0 { + if err := lk.token.Close(); err != nil { return err } - l.token = 0 + lk.token = 0 } - l.closing = nil + lk.closing = nil return nil } diff --git a/util/winutil/s4u/lsa_windows.go b/util/winutil/s4u/lsa_windows.go index 3ff2171f9..3276b2676 100644 --- a/util/winutil/s4u/lsa_windows.go +++ b/util/winutil/s4u/lsa_windows.go @@ -256,8 +256,8 @@ func checkDomainAccount(username string) (sanitizedUserName string, isDomainAcco // errors.Is to check for it. When capLevel == CapCreateProcess, the logon // enforces the user's logon hours policy (when present). func (ls *lsaSession) logonAs(srcName string, u *user.User, capLevel CapabilityLevel) (token windows.Token, err error) { - if l := len(srcName); l == 0 || l > _TOKEN_SOURCE_LENGTH { - return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, l) + if ln := len(srcName); ln == 0 || ln > _TOKEN_SOURCE_LENGTH { + return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, ln) } if err := checkASCII(srcName); err != nil { return 0, fmt.Errorf("%w: %v", ErrBadSrcName, err) diff --git a/util/winutil/s4u/s4u_windows.go b/util/winutil/s4u/s4u_windows.go index 8926aaedc..8c8e02dbe 100644 --- a/util/winutil/s4u/s4u_windows.go +++ b/util/winutil/s4u/s4u_windows.go @@ -938,10 +938,10 @@ func mergeEnv(existingEnv []string, extraEnv map[string]string) []string { result = append(result, strings.Join([]string{k, v}, "=")) } - slices.SortFunc(result, func(l, r string) int { - kl, _, _ := strings.Cut(l, "=") - kr, _, _ := strings.Cut(r, "=") - return strings.Compare(kl, kr) + slices.SortFunc(result, func(a, b string) int { + ka, _, _ := strings.Cut(a, "=") + kb, _, _ := strings.Cut(b, "=") + return strings.Compare(ka, kb) }) return result } diff --git a/util/winutil/startupinfo_windows.go b/util/winutil/startupinfo_windows.go index e04e9ea9b..edf48fa65 100644 --- a/util/winutil/startupinfo_windows.go +++ b/util/winutil/startupinfo_windows.go @@ -83,8 +83,8 @@ func (sib *StartupInfoBuilder) Resolve() (startupInfo *windows.StartupInfo, inhe // Always create a Unicode environment. createProcessFlags = windows.CREATE_UNICODE_ENVIRONMENT - if l := uint32(len(sib.attrs)); l > 0 { - attrCont, err := windows.NewProcThreadAttributeList(l) + if ln := uint32(len(sib.attrs)); ln > 0 { + attrCont, err := windows.NewProcThreadAttributeList(ln) if err != nil { return nil, false, 0, err } diff --git a/util/winutil/winutil_windows_test.go b/util/winutil/winutil_windows_test.go index d437ffa38..ead10a45d 100644 --- a/util/winutil/winutil_windows_test.go +++ b/util/winutil/winutil_windows_test.go @@ -68,8 +68,8 @@ func checkContiguousBuffer[T any, BU BufUnit](t *testing.T, extra []BU, pt *T, p if gotLen := int(ptLen); gotLen != expectedLen { t.Errorf("allocation length got %d, want %d", gotLen, expectedLen) } - if l := len(slcs); l != 1 { - t.Errorf("len(slcs) got %d, want 1", l) + if ln := len(slcs); ln != 1 { + t.Errorf("len(slcs) got %d, want 1", ln) } if len(extra) == 0 && slcs[0] != nil { t.Error("slcs[0] got non-nil, want nil") diff --git a/wf/firewall.go b/wf/firewall.go index dc1045ff8..07e160eb3 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -66,8 +66,8 @@ func (p protocol) getLayers(d direction) []wf.LayerID { return layers } -func ruleName(action wf.Action, l wf.LayerID, name string) string { - switch l { +func ruleName(action wf.Action, layerID wf.LayerID, name string) string { + switch layerID { case wf.LayerALEAuthConnectV4: return fmt.Sprintf("%s outbound %s (IPv4)", action, name) case wf.LayerALEAuthConnectV6: @@ -307,8 +307,8 @@ func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions [ func (f *Firewall) addRules(name string, w weight, conditions []*wf.Match, action wf.Action, p protocol, d direction) ([]*wf.Rule, error) { var rules []*wf.Rule - for _, l := range p.getLayers(d) { - r, err := f.newRule(name, w, l, conditions, action) + for _, layer := range p.getLayers(d) { + r, err := f.newRule(name, w, layer, conditions, action) if err != nil { return nil, err } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index a0142134a..f9d761052 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) { } } -func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { +func runDERPAndStun(t *testing.T, logf logger.Logf, ln nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { d := derpserver.New(key.NewNode(), logf) httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) @@ -119,7 +119,7 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() - stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l) + stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, ln) m := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ @@ -172,12 +172,12 @@ type magicStack struct { // newMagicStack builds and initializes an idle magicsock and // friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig // before anything interesting happens. -func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { +func newMagicStack(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { privateKey := key.NewNode() - return newMagicStackWithKey(t, logf, l, derpMap, privateKey) + return newMagicStackWithKey(t, logf, ln, derpMap, privateKey) } -func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { +func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() bus := eventbustest.NewBus(t) @@ -197,7 +197,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen Logf: logf, HealthTracker: ht, DisablePortMapper: true, - TestOnlyPacketListener: l, + TestOnlyPacketListener: ln, EndpointsFunc: func(eps []tailcfg.Endpoint) { epCh <- eps }, @@ -687,13 +687,13 @@ func (localhostListener) ListenPacket(ctx context.Context, network, address stri func TestTwoDevicePing(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762") - l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) + ln, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) n := &devices{ - m1: l, + m1: ln, m1IP: ip, - m2: l, + m2: ln, m2IP: ip, - stun: l, + stun: ln, stunIP: ip, } testTwoDevicePing(t, n) diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 260b3196a..c5a9dbcbc 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -126,24 +126,24 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported return le } -// gro attempts to enqueue p on g if l supports a GRO kind matching the +// gro attempts to enqueue p on g if ep supports a GRO kind matching the // transport protocol carried in p. gro may allocate g if it is nil. gro can // either return the existing g, a newly allocated one, or nil. Callers are // responsible for calling Flush() on the returned value if it is non-nil once // they have finished iterating through all GRO candidates for a given vector. -// If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via +// If gro allocates a *gro.GRO it will have ep's stack.NetworkDispatcher set via // SetDispatcher(). -func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { - if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { +func (ep *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { + if !buildfeatures.HasGRO || ep.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { // IPv6 may have extension headers preceding a TCP header, but we trade // for a fast path and assume p cannot be coalesced in such a case. - l.injectInbound(p) + ep.injectInbound(p) return g } if g == nil { - l.mu.RLock() - d := l.dispatcher - l.mu.RUnlock() + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() g = gro.NewGRO() g.SetDispatcher(d) } @@ -154,39 +154,39 @@ func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { // Close closes l. Further packet injections will return an error, and all // pending packets are discarded. Close may be called concurrently with // WritePackets. -func (l *linkEndpoint) Close() { - l.mu.Lock() - l.dispatcher = nil - l.mu.Unlock() - l.q.Close() - l.Drain() +func (ep *linkEndpoint) Close() { + ep.mu.Lock() + ep.dispatcher = nil + ep.mu.Unlock() + ep.q.Close() + ep.Drain() } // Read does non-blocking read one packet from the outbound packet queue. -func (l *linkEndpoint) Read() *stack.PacketBuffer { - return l.q.Read() +func (ep *linkEndpoint) Read() *stack.PacketBuffer { + return ep.q.Read() } // ReadContext does blocking read for one packet from the outbound packet queue. // It can be cancelled by ctx, and in this case, it returns nil. -func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { - return l.q.ReadContext(ctx) +func (ep *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { + return ep.q.ReadContext(ctx) } // Drain removes all outbound packets from the channel and counts them. -func (l *linkEndpoint) Drain() int { - return l.q.Drain() +func (ep *linkEndpoint) Drain() int { + return ep.q.Drain() } // NumQueued returns the number of packets queued for outbound. -func (l *linkEndpoint) NumQueued() int { - return l.q.Num() +func (ep *linkEndpoint) NumQueued() int { + return ep.q.Num() } -func (l *linkEndpoint) injectInbound(p *packet.Parsed) { - l.mu.RLock() - d := l.dispatcher - l.mu.RUnlock() +func (ep *linkEndpoint) injectInbound(p *packet.Parsed) { + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() if d == nil || !buildfeatures.HasNetstack { return } @@ -200,35 +200,35 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { // Attach saves the stack network-layer dispatcher for use later when packets // are injected. -func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { - l.mu.Lock() - defer l.mu.Unlock() - l.dispatcher = dispatcher +func (ep *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.dispatcher = dispatcher } // IsAttached implements stack.LinkEndpoint.IsAttached. -func (l *linkEndpoint) IsAttached() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return l.dispatcher != nil +func (ep *linkEndpoint) IsAttached() bool { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.dispatcher != nil } // MTU implements stack.LinkEndpoint.MTU. -func (l *linkEndpoint) MTU() uint32 { - l.mu.RLock() - defer l.mu.RUnlock() - return l.mtu +func (ep *linkEndpoint) MTU() uint32 { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.mtu } // SetMTU implements stack.LinkEndpoint.SetMTU. -func (l *linkEndpoint) SetMTU(mtu uint32) { - l.mu.Lock() - defer l.mu.Unlock() - l.mtu = mtu +func (ep *linkEndpoint) SetMTU(mtu uint32) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.mtu = mtu } // Capabilities implements stack.LinkEndpoint.Capabilities. -func (l *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { +func (ep *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { // We are required to offload RX checksum validation for the purposes of // GRO. return stack.CapabilityRXChecksumOffload @@ -242,8 +242,8 @@ func (*linkEndpoint) GSOMaxSize() uint32 { } // SupportedGSO implements stack.GSOEndpoint. -func (l *linkEndpoint) SupportedGSO() stack.SupportedGSO { - return l.SupportedGSOKind +func (ep *linkEndpoint) SupportedGSO() stack.SupportedGSO { + return ep.SupportedGSOKind } // MaxHeaderLength returns the maximum size of the link layer header. Given it @@ -253,22 +253,22 @@ func (*linkEndpoint) MaxHeaderLength() uint16 { } // LinkAddress returns the link address of this endpoint. -func (l *linkEndpoint) LinkAddress() tcpip.LinkAddress { - l.mu.RLock() - defer l.mu.RUnlock() - return l.linkAddr +func (ep *linkEndpoint) LinkAddress() tcpip.LinkAddress { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.linkAddr } // SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress. -func (l *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { - l.mu.Lock() - defer l.mu.Unlock() - l.linkAddr = addr +func (ep *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.linkAddr = addr } // WritePackets stores outbound packets into the channel. // Multiple concurrent calls are permitted. -func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { +func (ep *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { n := 0 // TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a // single packet. We can split 2 x 64K GSO across @@ -278,7 +278,7 @@ func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Err // control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to // ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage). for _, pkt := range pkts.AsSlice() { - if err := l.q.Write(pkt); err != nil { + if err := ep.q.Write(pkt); err != nil { if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 { return 0, err } diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index 929fda1b4..68ed8dbb2 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -870,7 +870,7 @@ func (o *fakeOS) run(args ...string) error { rest = family + " " + strings.Join(args[3:], " ") } - var l *[]string + var ls *[]string switch args[1] { case "link": got := strings.Join(args[2:], " ") @@ -884,31 +884,31 @@ func (o *fakeOS) run(args ...string) error { } return nil case "addr": - l = &o.ips + ls = &o.ips case "route": - l = &o.routes + ls = &o.routes case "rule": - l = &o.rules + ls = &o.rules default: return unexpected() } switch args[2] { case "add": - for _, el := range *l { + for _, el := range *ls { if el == rest { o.t.Errorf("can't add %q, already present", rest) return errors.New("already exists") } } - *l = append(*l, rest) - sort.Strings(*l) + *ls = append(*ls, rest) + sort.Strings(*ls) case "del": found := false - for i, el := range *l { + for i, el := range *ls { if el == rest { found = true - *l = append((*l)[:i], (*l)[i+1:]...) + *ls = append((*ls)[:i], (*ls)[i+1:]...) break } } From 85373ef822645f66242c9b44dbd754247d0d9c63 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 18 Nov 2025 09:44:12 +0000 Subject: [PATCH 1685/1708] tka: move RemoveAll() to CompactableChonk I added a RemoveAll() method on tka.Chonk in #17946, but it's only used in the node to purge local AUMs. We don't need it in the SQLite storage, which currently implements tka.Chonk, so move it to CompactableChonk instead. Also add some automated tests, as a safety net. Updates tailscale/corp#33599 Change-Id: I54de9ccf1d6a3d29b36a94eccb0ebd235acd4ebc Signed-off-by: Alex Chan --- tka/tailchonk.go | 8 +++--- tstest/chonktest/chonktest.go | 47 +++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 2dc03a6f6..0b7191747 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -58,10 +58,6 @@ type Chonk interface { // as a hint to pick the correct chain in the event that the Chonk stores // multiple distinct chains. LastActiveAncestor() (*AUMHash, error) - - // RemoveAll permanently and completely clears the TKA state. This should - // be called when the user disables Tailnet Lock. - RemoveAll() error } // CompactableChonk implementation are extensions of Chonk, which are @@ -80,6 +76,10 @@ type CompactableChonk interface { // PurgeAUMs permanently and irrevocably deletes the specified // AUMs from storage. PurgeAUMs(hashes []AUMHash) error + + // RemoveAll permanently and completely clears the TKA state. This should + // be called when the user disables Tailnet Lock. + RemoveAll() error } // Mem implements in-memory storage of TKA state, suitable for diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go index bfe394b28..404f1ec47 100644 --- a/tstest/chonktest/chonktest.go +++ b/tstest/chonktest/chonktest.go @@ -9,6 +9,7 @@ package chonktest import ( "bytes" "encoding/binary" + "errors" "math/rand" "os" "testing" @@ -253,4 +254,50 @@ func RunCompactableChonkTests(t *testing.T, newChonk func(t *testing.T) tka.Comp t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) } }) + + t.Run("RemoveAll", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check we can retrieve the AUMs we just stored + for _, want := range data { + got, err := chonk.AUM(want.Hash()) + if err != nil { + t.Fatalf("could not get %s: %v", want.Hash(), err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("stored AUM %s differs (-want, +got):\n%s", want.Hash(), diff) + } + } + + // Call RemoveAll() to drop all the AUM state + if err := chonk.RemoveAll(); err != nil { + t.Fatalf("RemoveAll failed: %v", err) + } + + // Check we can no longer retrieve the previously-stored AUMs + for _, want := range data { + aum, err := chonk.AUM(want.Hash()) + if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("expected os.ErrNotExist for %s, instead got aum=%v, err=%v", want.Hash(), aum, err) + } + } + }) } From af7c26aa054e7778383bade11a38c62907e92200 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 18 Nov 2025 10:36:14 +0000 Subject: [PATCH 1686/1708] cmd/vet/jsontags: fix a typo in an error message Updates #17945 Change-Id: I8987271420feb190f5e4d85caff305c8d4e84aae Signed-off-by: Alex Chan --- cmd/vet/jsontags/report.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/vet/jsontags/report.go b/cmd/vet/jsontags/report.go index 19d40799b..8e5869060 100644 --- a/cmd/vet/jsontags/report.go +++ b/cmd/vet/jsontags/report.go @@ -80,9 +80,9 @@ const ( func (k ReportKind) message() string { switch k { case OmitEmptyUnsupportedInV1: - return "uses `omitempty` on an unspported type in json/v1; should probably use `omitzero` instead" + return "uses `omitempty` on an unsupported type in json/v1; should probably use `omitzero` instead" case OmitEmptyUnsupportedInV2: - return "uses `omitempty` on an unspported type in json/v2; should probably use `omitzero` instead" + return "uses `omitempty` on an unsupported type in json/v2; should probably use `omitzero` instead" case OmitEmptyShouldBeOmitZero: return "should use `omitzero` instead of `omitempty`" case OmitEmptyShouldBeOmitZeroButHasIsZero: From 4e2f2d10889e79d338e1039e3b1263de0043235e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 20:53:14 -0800 Subject: [PATCH 1687/1708] feature/buildfeatures: re-run go generate 6a73c0bdf55 added a feature tag but didn't re-run go generate on ./feature/buildfeatures. Updates #9192 Change-Id: I7819450453e6b34c60cad29d2273e3e118291643 Signed-off-by: Brad Fitzpatrick --- ...n_disabled.go => feature_identityfederation_disabled.go} | 6 +++--- ...ion_enabled.go => feature_identityfederation_enabled.go} | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) rename feature/buildfeatures/{feature_identity_federation_disabled.go => feature_identityfederation_disabled.go} (70%) rename feature/buildfeatures/{feature_identity_federation_enabled.go => feature_identityfederation_enabled.go} (70%) diff --git a/feature/buildfeatures/feature_identity_federation_disabled.go b/feature/buildfeatures/feature_identityfederation_disabled.go similarity index 70% rename from feature/buildfeatures/feature_identity_federation_disabled.go rename to feature/buildfeatures/feature_identityfederation_disabled.go index c7b16f729..94488adc8 100644 --- a/feature/buildfeatures/feature_identity_federation_disabled.go +++ b/feature/buildfeatures/feature_identityfederation_disabled.go @@ -3,11 +3,11 @@ // Code generated by gen.go; DO NOT EDIT. -//go:build ts_omit_identity_federation +//go:build ts_omit_identityfederation package buildfeatures -// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". -// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// HasIdentityFederation is whether the binary was built with support for modular feature "Auth key generation via identity federation support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identityfederation" build tag. // It's a const so it can be used for dead code elimination. const HasIdentityFederation = false diff --git a/feature/buildfeatures/feature_identity_federation_enabled.go b/feature/buildfeatures/feature_identityfederation_enabled.go similarity index 70% rename from feature/buildfeatures/feature_identity_federation_enabled.go rename to feature/buildfeatures/feature_identityfederation_enabled.go index 1f7cf1742..892d62d66 100644 --- a/feature/buildfeatures/feature_identity_federation_enabled.go +++ b/feature/buildfeatures/feature_identityfederation_enabled.go @@ -3,11 +3,11 @@ // Code generated by gen.go; DO NOT EDIT. -//go:build !ts_omit_identity_federation +//go:build !ts_omit_identityfederation package buildfeatures -// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". -// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// HasIdentityFederation is whether the binary was built with support for modular feature "Auth key generation via identity federation support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identityfederation" build tag. // It's a const so it can be used for dead code elimination. const HasIdentityFederation = true From 2a6cbb70d9cca049dd079fbc25285fd13649a700 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 20:57:51 -0800 Subject: [PATCH 1688/1708] .github/workflows: make go_generate check detect new files Updates #17957 Change-Id: I904fd5b544ac3090b58c678c4726e7ace41a52dd Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b6d41e937..35b4ea3ef 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -703,6 +703,7 @@ jobs: run: | pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') ./tool/go generate $pkgs + git add -N . # ensure untracked files are noticed echo echo git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) From bd29b189fe8b15783b59c63ec5ebbb2584a9d5f7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 07:25:10 -0800 Subject: [PATCH 1689/1708] types/netmap,*: remove some redundant fields from NetMap Updates #12639 Change-Id: Ia50b15529bd1c002cdd2c937cdfbe69c06fa2dc8 Signed-off-by: Brad Fitzpatrick --- cmd/tsconnect/wasm/wasm_js.go | 2 +- control/controlclient/auto.go | 2 +- control/controlclient/direct.go | 2 +- control/controlclient/map.go | 2 -- ipn/ipnlocal/c2n_test.go | 1 - ipn/ipnlocal/dnsconfig_test.go | 14 ++++++++------ ipn/ipnlocal/local.go | 4 ++-- ipn/ipnlocal/local_test.go | 8 ++++---- ipn/ipnlocal/node_backend.go | 2 +- ipn/ipnlocal/state_test.go | 21 ++++++++++++++------- net/tsdial/dnsmap.go | 8 ++++---- net/tsdial/dnsmap_test.go | 6 +++--- types/netmap/netmap.go | 23 +++++++++++++++++------ wgengine/magicsock/magicsock_test.go | 6 +++--- 14 files changed, 59 insertions(+), 42 deletions(-) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 2e81fa4a8..c7aa00d1d 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -261,7 +261,7 @@ func (i *jsIPN) run(jsCallbacks js.Value) { jsNetMap := jsNetMap{ Self: jsNetMapSelfNode{ jsNetMapNode: jsNetMapNode{ - Name: nm.Name, + Name: nm.SelfName(), Addresses: mapSliceView(nm.GetAddresses(), func(a netip.Prefix) string { return a.Addr().String() }), NodeKey: nm.NodeKey.String(), MachineKey: nm.MachineKey.String(), diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 20795d5a7..3cbfe8581 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -443,7 +443,7 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c.mu.Lock() c.inMapPoll = true - c.expiry = nm.Expiry + c.expiry = nm.SelfKeyExpiry() stillAuthed := c.loggedIn c.logf("[v1] mapRoutine: netmap received: loggedIn=%v inMapPoll=true", stillAuthed) c.mu.Unlock() diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 1e1ce781f..62bbb3586 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1093,7 +1093,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.persist = newPersist.View() persist = c.persist } - c.expiry = nm.Expiry + c.expiry = nm.SelfKeyExpiry() } // gotNonKeepAliveMessage is whether we've yet received a MapResponse message without diff --git a/control/controlclient/map.go b/control/controlclient/map.go index a9db25517..9aa8e3710 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -891,8 +891,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { if node := ms.lastNode; node.Valid() { nm.SelfNode = node - nm.Expiry = node.KeyExpiry() - nm.Name = node.Name() nm.AllCaps = ms.lastCapSet } diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 420633c87..86cc6a549 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -142,7 +142,6 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { func TestHandleC2NDebugNetmap(t *testing.T) { nm := &netmap.NetworkMap{ - Name: "myhost", SelfNode: (&tailcfg.Node{ ID: 100, Name: "myhost", diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 71f175148..e23d8a057 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -70,8 +70,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "self_name_and_peers", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), }, @@ -109,15 +109,15 @@ func TestDNSConfigForNetmap(t *testing.T) { // even if they have IPv4. name: "v6_only_self", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("fe75::1"), }).View(), }, peers: nodeViews([]*tailcfg.Node{ { ID: 1, - Name: "peera.net", + Name: "peera.net.", Addresses: ipps("100.102.0.1", "100.102.0.2", "fe75::1001"), }, { @@ -146,8 +146,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "extra_records", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), DNS: tailcfg.DNSConfig{ @@ -171,7 +171,9 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "corp_dns_misc", nm: &netmap.NetworkMap{ - Name: "host.some.domain.net.", + SelfNode: (&tailcfg.Node{ + Name: "host.some.domain.net.", + }).View(), DNS: tailcfg.DNSConfig{ Proxied: true, Domains: []string{"foo.com", "bar.com"}, @@ -331,8 +333,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "self_expired", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), }, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 24ab41735..7eb673e6d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1301,7 +1301,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { if hi := nm.SelfNode.Hostinfo(); hi.Valid() { ss.HostName = hi.Hostname() } - ss.DNSName = nm.Name + ss.DNSName = nm.SelfName() ss.UserID = nm.User() if sn := nm.SelfNode; sn.Valid() { peerStatusFromNode(ss, sn) @@ -1617,7 +1617,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control keyExpiryExtended := false if st.NetMap != nil { wasExpired := b.keyExpired - isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(b.clock.Now()) + isExpired := !st.NetMap.SelfKeyExpiry().IsZero() && st.NetMap.SelfKeyExpiry().Before(b.clock.Now()) if wasExpired && !isExpired { keyExpiryExtended = true } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5df0ae5bb..f17fabb60 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2712,8 +2712,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIPWant: "127.0.0.1", prefsChanged: false, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -2749,8 +2749,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -2787,8 +2787,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -2827,8 +2827,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 6880440bd..efef57ea4 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -748,7 +748,7 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } dcfg.Hosts[fqdn] = ips } - set(nm.Name, nm.GetAddresses()) + set(nm.SelfName(), nm.GetAddresses()) for _, peer := range peers { set(peer.Name(), peer.Addresses()) } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 0c95ef4fc..b7325e957 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -999,8 +999,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nExpireKey") notifies.expect(1) cc.send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + MachineAuthorized: true, + }).View(), }}) { nn := notifies.drain(1) @@ -1015,8 +1017,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nExtendKey") notifies.expect(1) cc.send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(time.Minute), - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + SelfNode: (&tailcfg.Node{ + MachineAuthorized: true, + KeyExpiry: time.Now().Add(time.Minute), + }).View(), }}) { nn := notifies.drain(1) @@ -1427,7 +1431,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) cc().send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + }).View(), }}) }, wantState: ipn.NeedsLogin, @@ -1550,7 +1556,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) cc().send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + }).View(), }}) }, // Even with seamless, if the key we are using expires, we want to disconnect: @@ -1725,7 +1733,6 @@ func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *net return &netmap.NetworkMap{ SelfNode: self, - Name: self.Name(), Domain: domain, Peers: peers, UserProfiles: users, diff --git a/net/tsdial/dnsmap.go b/net/tsdial/dnsmap.go index 2ef1cb1f1..37fedd14c 100644 --- a/net/tsdial/dnsmap.go +++ b/net/tsdial/dnsmap.go @@ -36,11 +36,11 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap { suffix := nm.MagicDNSSuffix() have4 := false addrs := nm.GetAddresses() - if nm.Name != "" && addrs.Len() > 0 { + if name := nm.SelfName(); name != "" && addrs.Len() > 0 { ip := addrs.At(0).Addr() - ret[canonMapKey(nm.Name)] = ip - if dnsname.HasSuffix(nm.Name, suffix) { - ret[canonMapKey(dnsname.TrimSuffix(nm.Name, suffix))] = ip + ret[canonMapKey(name)] = ip + if dnsname.HasSuffix(name, suffix) { + ret[canonMapKey(dnsname.TrimSuffix(name, suffix))] = ip } for _, p := range addrs.All() { if p.Addr().Is4() { diff --git a/net/tsdial/dnsmap_test.go b/net/tsdial/dnsmap_test.go index 43461a135..41a957f18 100644 --- a/net/tsdial/dnsmap_test.go +++ b/net/tsdial/dnsmap_test.go @@ -31,8 +31,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -47,8 +47,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self_and_peers", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -82,8 +82,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self_has_v6_only", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100::123/128"), }, diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 0a2f3ea71..c54562f4d 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -29,10 +29,6 @@ type NetworkMap struct { SelfNode tailcfg.NodeView AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap NodeKey key.NodePublic - Expiry time.Time - // Name is the DNS name assigned to this node. - // It is the MapResponse.Node.Name value and ends with a period. - Name string MachineKey key.MachinePublic @@ -235,10 +231,25 @@ func MagicDNSSuffixOfNodeName(nodeName string) string { // // It will neither start nor end with a period. func (nm *NetworkMap) MagicDNSSuffix() string { - if nm == nil { + return MagicDNSSuffixOfNodeName(nm.SelfName()) +} + +// SelfName returns nm.SelfNode.Name, or the empty string +// if nm is nil or nm.SelfNode is invalid. +func (nm *NetworkMap) SelfName() string { + if nm == nil || !nm.SelfNode.Valid() { return "" } - return MagicDNSSuffixOfNodeName(nm.Name) + return nm.SelfNode.Name() +} + +// SelfKeyExpiry returns nm.SelfNode.KeyExpiry, or the zero +// value if nil or nm.SelfNode is invalid. +func (nm *NetworkMap) SelfKeyExpiry() time.Time { + if nm == nil || !nm.SelfNode.Valid() { + return time.Time{} + } + return nm.SelfNode.KeyExpiry() } // DomainName returns the name of the NetworkMap's diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index f9d761052..2a20b3cf6 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2200,9 +2200,9 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2264,9 +2264,9 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2400,9 +2400,9 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { wgEpV6 := netip.MustParseAddrPort(v6.LocalAddr().String()) nm := &netmap.NetworkMap{ - Name: "ts", NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ From 04a9d25a545824d499af9bcff967a235566c8389 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 18 Nov 2025 17:04:08 +0000 Subject: [PATCH 1690/1708] tka: mark young AUMs as active even if the chain is long Existing compaction logic seems to have had an assumption that markActiveChain would cover a longer part of the chain than markYoungAUMs. This prevented long, but fresh, chains, from being compacted correctly. Updates tailscale/corp#33537 Signed-off-by: Anton Tolchanov --- tka/tailchonk.go | 23 +++++++++++++---------- tka/tailchonk_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 0b7191747..d92016c45 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -668,7 +668,7 @@ const ( ) // markActiveChain marks AUMs in the active chain. -// All AUMs that are within minChain ancestors of head are +// All AUMs that are within minChain ancestors of head, or are marked as young, are // marked retainStateActive, and all remaining ancestors are // marked retainStateCandidate. // @@ -700,19 +700,22 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // If we got this far, we have at least minChain AUMs stored, and minChain number // of ancestors have been marked for retention. We now continue to iterate backwards - // till we find an AUM which we can compact to (a Checkpoint AUM). + // till we find an AUM which we can compact to: either a Checkpoint AUM which is old + // enough, or the genesis AUM. for { h := next.Hash() verdict[h] |= retainStateActive + + parent, hasParent := next.Parent() + isYoung := verdict[h]&retainStateYoung != 0 + if next.MessageKind == AUMCheckpoint { lastActiveAncestor = h - break + if !isYoung || !hasParent { + break + } } - parent, hasParent := next.Parent() - if !hasParent { - return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor") - } if next, err = storage.AUM(parent); err != nil { return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } @@ -917,12 +920,12 @@ func Compact(storage CompactableChonk, head AUMHash, opts CompactionOptions) (la verdict[h] = 0 } - if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil { - return AUMHash{}, fmt.Errorf("marking active chain: %w", err) - } if err := markYoungAUMs(storage, verdict, opts.MinAge); err != nil { return AUMHash{}, fmt.Errorf("marking young AUMs: %w", err) } + if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil { + return AUMHash{}, fmt.Errorf("marking active chain: %w", err) + } if err := markDescendantAUMs(storage, verdict); err != nil { return AUMHash{}, fmt.Errorf("marking descendant AUMs: %w", err) } diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 70b7dc9a7..7125c99fe 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/crypto/blake2s" + "tailscale.com/types/key" "tailscale.com/util/must" ) @@ -601,3 +602,33 @@ func TestCompact(t *testing.T) { } } } + +func TestCompactLongButYoung(t *testing.T) { + ourPriv := key.NewNLPrivate() + ourKey := Key{Kind: Key25519, Public: ourPriv.Public().Verifier(), Votes: 1} + someOtherKey := Key{Kind: Key25519, Public: key.NewNLPrivate().Public().Verifier(), Votes: 1} + + storage := &Mem{} + auth, _, err := Create(storage, State{ + Keys: []Key{ourKey, someOtherKey}, + DisablementSecrets: [][]byte{DisablementKDF(bytes.Repeat([]byte{0xa5}, 32))}, + }, ourPriv) + if err != nil { + t.Fatalf("tka.Create() failed: %v", err) + } + + genesis := auth.Head() + + for range 100 { + upd := auth.NewUpdater(ourPriv) + must.Do(upd.RemoveKey(someOtherKey.MustID())) + must.Do(upd.AddKey(someOtherKey)) + aums := must.Get(upd.Finalize(storage)) + must.Do(auth.Inform(storage, aums)) + } + + lastActiveAncestor := must.Get(Compact(storage, auth.Head(), CompactionOptions{MinChain: 5, MinAge: time.Hour})) + if lastActiveAncestor != genesis { + t.Errorf("last active ancestor = %v, want %v", lastActiveAncestor, genesis) + } +} From d0daa5a398ec4a17499938c3c25ce1cf5058d1b9 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 17:12:05 +0000 Subject: [PATCH 1691/1708] tka: marshal AUMHash totext even if Tailnet Lock is omitted We use `tka.AUMHash` in `netmap.NetworkMap`, and we serialise it as JSON in the `/debug/netmap` C2N endpoint. If the binary omits Tailnet Lock support, the debug endpoint returns an error because it's unable to marshal the AUMHash. This patch adds a sentinel value so this marshalling works, and we can use the debug endpoint. Updates https://github.com/tailscale/tailscale/issues/17115 Signed-off-by: Alex Chan Change-Id: I51ec1491a74e9b9f49d1766abd89681049e09ce4 --- tka/disabled_stub.go | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go index 15bf12c33..4c4afa370 100644 --- a/tka/disabled_stub.go +++ b/tka/disabled_stub.go @@ -22,7 +22,24 @@ type Authority struct { func (*Authority) Head() AUMHash { return AUMHash{} } -func (AUMHash) MarshalText() ([]byte, error) { return nil, errNoTailnetLock } +// MarshalText returns a dummy value explaining that Tailnet Lock +// is not compiled in to this binary. +// +// We need to be able to marshal AUMHash to text because it's included +// in [netmap.NetworkMap], which gets serialised as JSON in the +// c2n /debug/netmap endpoint. +// +// We provide a basic marshaller so that endpoint works correctly +// with nodes that omit Tailnet Lock support, but we don't want the +// base32 dependency used for the regular marshaller, and we don't +// need unmarshalling support at time of writing (2025-11-18). +func (h AUMHash) MarshalText() ([]byte, error) { + return []byte(""), nil +} + +func (h *AUMHash) UnmarshalText(text []byte) error { + return errors.New("tailnet lock is not supported by this binary") +} type State struct{} @@ -128,12 +145,6 @@ type NodeKeySignature struct { type DeeplinkValidationResult struct { } -func (h *AUMHash) UnmarshalText(text []byte) error { - return errNoTailnetLock -} - -var errNoTailnetLock = errors.New("tailnet lock is not enabled") - func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) { return wrappedAuthKey, false, nil, nil } From da508c504de626e1dcd9a218bed6cfb758298ba6 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Mon, 17 Nov 2025 13:58:59 -0800 Subject: [PATCH 1692/1708] appc: add ippool type As part of the conn25 work we will want to be able to keep track of a pool of IP Addresses and know which have been used and which have not. Fixes tailscale/corp#34247 Signed-off-by: Fran Bull --- appc/ippool.go | 61 +++++++++++++++++++++++++++++++++++++++++++++ appc/ippool_test.go | 60 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 appc/ippool.go create mode 100644 appc/ippool_test.go diff --git a/appc/ippool.go b/appc/ippool.go new file mode 100644 index 000000000..a2e86a7c2 --- /dev/null +++ b/appc/ippool.go @@ -0,0 +1,61 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "errors" + "net/netip" + + "go4.org/netipx" +) + +// errPoolExhausted is returned when there are no more addresses to iterate over. +var errPoolExhausted = errors.New("ip pool exhausted") + +// ippool allows for iteration over all the addresses within a netipx.IPSet. +// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]". +// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over +// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address +// to get the new one, and if we run off the edge of the current range, starting on the next one. +type ippool struct { + // ranges defines the addresses in the pool + ranges []netipx.IPRange + // last is internal tracking of which the last address provided was. + last netip.Addr + // rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on. + rangeIdx int +} + +func newIPPool(ipset *netipx.IPSet) *ippool { + if ipset == nil { + return &ippool{} + } + return &ippool{ranges: ipset.Ranges()} +} + +// next returns the next address from the set, or errPoolExhausted if we have +// iterated over the whole set. +func (ipp *ippool) next() (netip.Addr, error) { + if ipp.rangeIdx >= len(ipp.ranges) { + // ipset is empty or we have iterated off the end + return netip.Addr{}, errPoolExhausted + } + if !ipp.last.IsValid() { + // not initialized yet + ipp.last = ipp.ranges[0].From() + return ipp.last, nil + } + currRange := ipp.ranges[ipp.rangeIdx] + if ipp.last == currRange.To() { + // then we need to move to the next range + ipp.rangeIdx++ + if ipp.rangeIdx >= len(ipp.ranges) { + return netip.Addr{}, errPoolExhausted + } + ipp.last = ipp.ranges[ipp.rangeIdx].From() + return ipp.last, nil + } + ipp.last = ipp.last.Next() + return ipp.last, nil +} diff --git a/appc/ippool_test.go b/appc/ippool_test.go new file mode 100644 index 000000000..64b76738f --- /dev/null +++ b/appc/ippool_test.go @@ -0,0 +1,60 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "errors" + "net/netip" + "testing" + + "go4.org/netipx" + "tailscale.com/util/must" +) + +func TestNext(t *testing.T) { + a := ippool{} + _, err := a.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + + var isb netipx.IPSetBuilder + ipset := must.Get(isb.IPSet()) + b := newIPPool(ipset) + _, err = b.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2"))) + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0"))) + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1"))) + ipset = must.Get(isb.IPSet()) + c := newIPPool(ipset) + expected := []string{ + "192.168.0.0", + "192.168.0.1", + "192.168.0.2", + "200.0.0.0", + "201.0.0.0", + "201.0.0.1", + } + for i, want := range expected { + addr, err := c.next() + if err != nil { + t.Fatal(err) + } + if addr != netip.MustParseAddr(want) { + t.Fatalf("next call %d want: %s, got: %v", i, want, addr) + } + } + _, err = c.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + _, err = c.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } +} From c09c95ef67d5fe9ff127cf2102f189e47e41b119 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 3 Nov 2025 16:41:37 -0800 Subject: [PATCH 1693/1708] types/key,wgengine/magicsock,control/controlclient,ipn: add debug disco key rotation Adds the ability to rotate discovery keys on running clients, needed for testing upcoming disco key distribution changes. Introduces key.DiscoKey, an atomic container for a disco private key, public key, and the public key's ShortString, replacing the prior separate atomic fields. magicsock.Conn has a new RotateDiscoKey method, and access to this is provided via localapi and a CLI debug command. Note that this implementation is primarily for testing as it stands, and regular use should likely introduce an additional mechanism that allows the old key to be used for some time, to provide a seamless key rotation rather than one that invalidates all sessions. Updates tailscale/corp#34037 Signed-off-by: James Tucker --- cmd/tailscale/cli/debug.go | 6 +++ control/controlclient/auto.go | 7 +++ control/controlclient/client.go | 6 +++ control/controlclient/direct.go | 16 ++++-- control/controlclient/direct_test.go | 26 +++++++++ ipn/ipnlocal/local.go | 24 +++++++++ ipn/ipnlocal/state_test.go | 5 ++ ipn/localapi/debug.go | 20 +++++++ wgengine/magicsock/disco_atomic.go | 58 ++++++++++++++++++++ wgengine/magicsock/disco_atomic_test.go | 70 +++++++++++++++++++++++++ wgengine/magicsock/endpoint.go | 4 +- wgengine/magicsock/endpoint_test.go | 25 +++++++-- wgengine/magicsock/magicsock.go | 63 ++++++++++++++-------- wgengine/magicsock/magicsock_test.go | 70 +++++++++++++++++++++++++ wgengine/magicsock/relaymanager.go | 4 +- wgengine/magicsock/relaymanager_test.go | 8 ++- 16 files changed, 375 insertions(+), 37 deletions(-) create mode 100644 wgengine/magicsock/disco_atomic.go create mode 100644 wgengine/magicsock/disco_atomic_test.go diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ffed51a63..2facd66ae 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -182,6 +182,12 @@ func debugCmd() *ffcli.Command { Exec: localAPIAction("rebind"), ShortHelp: "Force a magicsock rebind", }, + { + Name: "rotate-disco-key", + ShortUsage: "tailscale debug rotate-disco-key", + Exec: localAPIAction("rotate-disco-key"), + ShortHelp: "Rotate the discovery key", + }, { Name: "derp-set-on-demand", ShortUsage: "tailscale debug derp-set-on-demand", diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 3cbfe8581..336a8d491 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -767,6 +767,13 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } } +// SetDiscoPublicKey sets the client's Disco public to key and sends the change +// to the control server. +func (c *Auto) SetDiscoPublicKey(key key.DiscoPublic) { + c.direct.SetDiscoPublicKey(key) + c.updateControl() +} + func (c *Auto) Shutdown() { c.mu.Lock() if c.closed { diff --git a/control/controlclient/client.go b/control/controlclient/client.go index d0aa129ae..41b39622b 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -12,6 +12,7 @@ import ( "context" "tailscale.com/tailcfg" + "tailscale.com/types/key" ) // LoginFlags is a bitmask of options to change the behavior of Client.Login @@ -80,7 +81,12 @@ type Client interface { // TODO: a server-side change would let us simply upload this // in a separate http request. It has nothing to do with the rest of // the state machine. + // Note: the auto client uploads the new endpoints to control immediately. UpdateEndpoints(endpoints []tailcfg.Endpoint) + // SetDiscoPublicKey updates the disco public key that will be sent in + // future map requests. This should be called after rotating the discovery key. + // Note: the auto client uploads the new key to control immediately. + SetDiscoPublicKey(key.DiscoPublic) // ClientID returns the ClientID of a client. This ID is meant to // distinguish one client from another. ClientID() int64 diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 62bbb3586..006a801ef 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -74,7 +74,6 @@ type Direct struct { logf logger.Logf netMon *netmon.Monitor // non-nil health *health.Tracker - discoPubKey key.DiscoPublic busClient *eventbus.Client clientVersionPub *eventbus.Publisher[tailcfg.ClientVersion] autoUpdatePub *eventbus.Publisher[AutoUpdate] @@ -95,6 +94,7 @@ type Direct struct { mu syncs.Mutex // mutex guards the following fields serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic + discoPubKey key.DiscoPublic // protected by mu; can be updated via [SetDiscoPublicKey] sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. noiseClient *ts2021.Client // also protected by mu @@ -316,7 +316,6 @@ func NewDirect(opts Options) (*Direct, error) { logf: opts.Logf, persist: opts.Persist.View(), authKey: opts.AuthKey, - discoPubKey: opts.DiscoPublicKey, debugFlags: opts.DebugFlags, netMon: netMon, health: opts.HealthTracker, @@ -329,6 +328,7 @@ func NewDirect(opts Options) (*Direct, error) { dnsCache: dnsCache, dialPlan: opts.DialPlan, } + c.discoPubKey = opts.DiscoPublicKey c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) c.controlClientID = nextControlClientID.Add(1) @@ -853,6 +853,14 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } +// SetDiscoPublicKey updates the disco public key in local state. +// It does not implicitly trigger [SendUpdate]; callers should arrange for that. +func (c *Direct) SetDiscoPublicKey(key key.DiscoPublic) { + c.mu.Lock() + defer c.mu.Unlock() + c.discoPubKey = key +} + // ClientID returns the controlClientID of the controlClient. func (c *Direct) ClientID() int64 { return c.controlClientID @@ -902,6 +910,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap persist := c.persist serverURL := c.serverURL serverNoiseKey := c.serverNoiseKey + discoKey := c.discoPubKey hi := c.hostInfoLocked() backendLogID := hi.BackendLogID connectionHandleForTest := c.connectionHandleForTest @@ -945,11 +954,12 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap } nodeKey := persist.PublicNodeKey() + request := &tailcfg.MapRequest{ Version: tailcfg.CurrentCapabilityVersion, KeepAlive: true, NodeKey: nodeKey, - DiscoKey: c.discoPubKey, + DiscoKey: discoKey, Endpoints: eps, EndpointTypes: epTypes, Stream: isStreaming, diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index dd93dc7b3..4329fc878 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -20,6 +20,32 @@ import ( "tailscale.com/util/eventbus/eventbustest" ) +func TestSetDiscoPublicKey(t *testing.T) { + initialKey := key.NewDisco().Public() + + c := &Direct{ + discoPubKey: initialKey, + } + + c.mu.Lock() + if c.discoPubKey != initialKey { + t.Fatalf("initial disco key mismatch: got %v, want %v", c.discoPubKey, initialKey) + } + c.mu.Unlock() + + newKey := key.NewDisco().Public() + c.SetDiscoPublicKey(newKey) + + c.mu.Lock() + if c.discoPubKey != newKey { + t.Fatalf("disco key not updated: got %v, want %v", c.discoPubKey, newKey) + } + if c.discoPubKey == initialKey { + t.Fatal("disco key should have changed") + } + c.mu.Unlock() +} + func TestNewDirect(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7eb673e6d..0ff299399 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6620,6 +6620,30 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } +func (b *LocalBackend) DebugRotateDiscoKey() error { + if !buildfeatures.HasDebug { + return nil + } + + mc := b.MagicConn() + mc.RotateDiscoKey() + + newDiscoKey := mc.DiscoPublicKey() + + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.SetDiscoKey(newDiscoKey) + } + + b.mu.Lock() + cc := b.cc + b.mu.Unlock() + if cc != nil { + cc.SetDiscoPublicKey(newDiscoKey) + } + + return nil +} + func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.Addr] { return b.MagicConn().PeerRelays() } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index b7325e957..152b375b0 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -316,6 +316,11 @@ func (cc *mockControl) UpdateEndpoints(endpoints []tailcfg.Endpoint) { cc.called("UpdateEndpoints") } +func (cc *mockControl) SetDiscoPublicKey(key key.DiscoPublic) { + cc.logf("SetDiscoPublicKey: %v", key) + cc.called("SetDiscoPublicKey") +} + func (cc *mockControl) ClientID() int64 { return cc.controlClientID } diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index 8aca7f009..ae9cb01e0 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -31,6 +31,7 @@ import ( func init() { Register("component-debug-logging", (*Handler).serveComponentDebugLogging) Register("debug", (*Handler).serveDebug) + Register("debug-rotate-disco-key", (*Handler).serveDebugRotateDiscoKey) Register("dev-set-state-store", (*Handler).serveDevSetStateStore) Register("debug-bus-events", (*Handler).serveDebugBusEvents) Register("debug-bus-graph", (*Handler).serveEventBusGraph) @@ -232,6 +233,8 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { if err == nil { return } + case "rotate-disco-key": + err = h.b.DebugRotateDiscoKey() case "": err = fmt.Errorf("missing parameter 'action'") default: @@ -473,3 +476,20 @@ func (h *Handler) serveDebugOptionalFeatures(w http.ResponseWriter, r *http.Requ w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(of) } + +func (h *Handler) serveDebugRotateDiscoKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.DebugRotateDiscoKey(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} diff --git a/wgengine/magicsock/disco_atomic.go b/wgengine/magicsock/disco_atomic.go new file mode 100644 index 000000000..5b765fbc2 --- /dev/null +++ b/wgengine/magicsock/disco_atomic.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "sync/atomic" + + "tailscale.com/types/key" +) + +type discoKeyPair struct { + private key.DiscoPrivate + public key.DiscoPublic + short string // public.ShortString() +} + +// discoAtomic is an atomic container for a disco private key, public key, and +// the public key's ShortString. The private and public keys are always kept +// synchronized. +// +// The zero value is not ready for use. Use [Set] to provide a usable value. +type discoAtomic struct { + pair atomic.Pointer[discoKeyPair] +} + +// Pair returns the private and public keys together atomically. +// Code that needs both the private and public keys synchronized should +// use Pair instead of calling Private and Public separately. +func (dk *discoAtomic) Pair() (key.DiscoPrivate, key.DiscoPublic) { + p := dk.pair.Load() + return p.private, p.public +} + +// Private returns the private key. +func (dk *discoAtomic) Private() key.DiscoPrivate { + return dk.pair.Load().private +} + +// Public returns the public key. +func (dk *discoAtomic) Public() key.DiscoPublic { + return dk.pair.Load().public +} + +// Short returns the short string of the public key (see [DiscoPublic.ShortString]). +func (dk *discoAtomic) Short() string { + return dk.pair.Load().short +} + +// Set updates the private key (and the cached public key and short string). +func (dk *discoAtomic) Set(private key.DiscoPrivate) { + public := private.Public() + dk.pair.Store(&discoKeyPair{ + private: private, + public: public, + short: public.ShortString(), + }) +} diff --git a/wgengine/magicsock/disco_atomic_test.go b/wgengine/magicsock/disco_atomic_test.go new file mode 100644 index 000000000..a1de9b843 --- /dev/null +++ b/wgengine/magicsock/disco_atomic_test.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "testing" + + "tailscale.com/types/key" +) + +func TestDiscoAtomic(t *testing.T) { + var dk discoAtomic + dk.Set(key.NewDisco()) + + private := dk.Private() + public := dk.Public() + short := dk.Short() + + if private.IsZero() { + t.Fatal("DiscoKey private key should not be zero") + } + if public.IsZero() { + t.Fatal("DiscoKey public key should not be zero") + } + if short == "" { + t.Fatal("DiscoKey short string should not be empty") + } + + if public != private.Public() { + t.Fatal("DiscoKey public key doesn't match private key") + } + if short != public.ShortString() { + t.Fatal("DiscoKey short string doesn't match public key") + } + + gotPrivate, gotPublic := dk.Pair() + if !gotPrivate.Equal(private) { + t.Fatal("Pair() returned different private key") + } + if gotPublic != public { + t.Fatal("Pair() returned different public key") + } +} + +func TestDiscoAtomicSet(t *testing.T) { + var dk discoAtomic + dk.Set(key.NewDisco()) + oldPrivate := dk.Private() + oldPublic := dk.Public() + + newPrivate := key.NewDisco() + dk.Set(newPrivate) + + currentPrivate := dk.Private() + currentPublic := dk.Public() + + if currentPrivate.Equal(oldPrivate) { + t.Fatal("DiscoKey private key should have changed after Set") + } + if currentPublic == oldPublic { + t.Fatal("DiscoKey public key should have changed after Set") + } + if !currentPrivate.Equal(newPrivate) { + t.Fatal("DiscoKey private key doesn't match the set key") + } + if currentPublic != newPrivate.Public() { + t.Fatal("DiscoKey public key doesn't match derived from set private key") + } +} diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index c2e5dcca3..eda589e14 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -697,7 +697,7 @@ func (de *endpoint) maybeProbeUDPLifetimeLocked() (afterInactivityFor time.Durat // shuffling probing probability where the local node ends up with a large // key value lexicographically relative to the other nodes it tends to // communicate with. If de's disco key changes, the cycle will reset. - if de.c.discoPublic.Compare(epDisco.key) >= 0 { + if de.c.discoAtomic.Public().Compare(epDisco.key) >= 0 { // lower disco pub key node probes higher return afterInactivityFor, false } @@ -1739,7 +1739,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd } if sp.purpose != pingHeartbeat && sp.purpose != pingHeartbeatForUDPLifetime { - de.c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pktlen=%v pong.src=%v%v", de.c.discoShort, de.discoShort(), de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), pktLen, m.Src, logger.ArgWriter(func(bw *bufio.Writer) { + de.c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pktlen=%v pong.src=%v%v", de.c.discoAtomic.Short(), de.discoShort(), de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), pktLen, m.Src, logger.ArgWriter(func(bw *bufio.Writer) { if sp.to != src { fmt.Fprintf(bw, " ping.to=%v", sp.to) } diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index df1c93406..f1dab924f 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -146,15 +146,22 @@ func TestProbeUDPLifetimeConfig_Valid(t *testing.T) { } func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { + var lowerPriv, higherPriv key.DiscoPrivate var lower, higher key.DiscoPublic - a := key.NewDisco().Public() - b := key.NewDisco().Public() + privA := key.NewDisco() + privB := key.NewDisco() + a := privA.Public() + b := privB.Public() if a.String() < b.String() { lower = a higher = b + lowerPriv = privA + higherPriv = privB } else { lower = b higher = a + lowerPriv = privB + higherPriv = privA } addr := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:1")}} newProbeUDPLifetime := func() *probeUDPLifetime { @@ -281,10 +288,18 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + c := &Conn{} + if tt.localDisco.IsZero() { + c.discoAtomic.Set(key.NewDisco()) + } else if tt.localDisco.Compare(lower) == 0 { + c.discoAtomic.Set(lowerPriv) + } else if tt.localDisco.Compare(higher) == 0 { + c.discoAtomic.Set(higherPriv) + } else { + t.Fatalf("unexpected localDisco value") + } de := &endpoint{ - c: &Conn{ - discoPublic: tt.localDisco, - }, + c: c, bestAddr: tt.bestAddr, } if tt.remoteDisco != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f610d6adb..064838a2d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -273,14 +273,8 @@ type Conn struct { // channel operations and goroutine creation. hasPeerRelayServers atomic.Bool - // discoPrivate is the private naclbox key used for active - // discovery traffic. It is always present, and immutable. - discoPrivate key.DiscoPrivate - // public of discoPrivate. It is always present and immutable. - discoPublic key.DiscoPublic - // ShortString of discoPublic (to save logging work later). It is always - // present and immutable. - discoShort string + // discoAtomic is the current disco private and public keypair for this conn. + discoAtomic discoAtomic // ============================================================ // mu guards all following fields; see userspaceEngine lock @@ -603,11 +597,9 @@ func newConn(logf logger.Logf) *Conn { peerLastDerp: make(map[key.NodePublic]int), peerMap: newPeerMap(), discoInfo: make(map[key.DiscoPublic]*discoInfo), - discoPrivate: discoPrivate, - discoPublic: discoPrivate.Public(), cloudInfo: newCloudInfo(logf), } - c.discoShort = c.discoPublic.ShortString() + c.discoAtomic.Set(discoPrivate) c.bind = &connBind{Conn: c, closed: true} c.receiveBatchPool = sync.Pool{New: func() any { msgs := make([]ipv6.Message, c.bind.BatchSize()) @@ -635,7 +627,7 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { // now versus taking a network round-trip through DERP. selfNodeKey := c.publicKeyAtomic.Load() if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && - allocResp.ReqRxFromDiscoKey.Compare(c.discoPublic) == 0 { + allocResp.ReqRxFromDiscoKey.Compare(c.discoAtomic.Public()) == 0 { c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) metricLocalDiscoAllocUDPRelayEndpointResponse.Add(1) } @@ -765,7 +757,7 @@ func NewConn(opts Options) (*Conn, error) { c.logf("[v1] couldn't create raw v6 disco listener, using regular listener instead: %v", err) } - c.logf("magicsock: disco key = %v", c.discoShort) + c.logf("magicsock: disco key = %v", c.discoAtomic.Short()) return c, nil } @@ -1244,7 +1236,32 @@ func (c *Conn) GetEndpointChanges(peer tailcfg.NodeView) ([]EndpointChange, erro // DiscoPublicKey returns the discovery public key. func (c *Conn) DiscoPublicKey() key.DiscoPublic { - return c.discoPublic + return c.discoAtomic.Public() +} + +// RotateDiscoKey generates a new discovery key pair and updates the connection +// to use it. This invalidates all existing disco sessions and will cause peers +// to re-establish discovery sessions with the new key. +// +// This is primarily for debugging and testing purposes, a future enhancement +// should provide a mechanism for seamless rotation by supporting short term use +// of the old key. +func (c *Conn) RotateDiscoKey() { + oldShort := c.discoAtomic.Short() + newPrivate := key.NewDisco() + + c.mu.Lock() + c.discoAtomic.Set(newPrivate) + newShort := c.discoAtomic.Short() + c.discoInfo = make(map[key.DiscoPublic]*discoInfo) + connCtx := c.connCtx + c.mu.Unlock() + + c.logf("magicsock: rotated disco key from %v to %v", oldShort, newShort) + + if connCtx != nil { + c.ReSTUN("disco-key-rotation") + } } // determineEndpoints returns the machine's endpoint addresses. It does a STUN @@ -1914,7 +1931,7 @@ func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.N if isDERP && dstKey.Compare(selfNodeKey) == 0 { c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ RxFromNodeKey: selfNodeKey, - RxFromDiscoKey: c.discoPublic, + RxFromDiscoKey: c.discoAtomic.Public(), Message: allocReq, }) metricLocalDiscoAllocUDPRelayEndpointRequest.Add(1) @@ -1985,7 +2002,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. } } pkt = append(pkt, disco.Magic...) - pkt = c.discoPublic.AppendTo(pkt) + pkt = c.discoAtomic.Public().AppendTo(pkt) if isDERP { metricSendDiscoDERP.Add(1) @@ -2003,7 +2020,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. if !dstKey.IsZero() { node = dstKey.ShortString() } - c.dlogf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v len %v\n", c.discoShort, dstDisco.ShortString(), node, derpStr(dst.String()), disco.MessageSummary(m), len(pkt)) + c.dlogf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v len %v\n", c.discoAtomic.Short(), dstDisco.ShortString(), node, derpStr(dst.String()), disco.MessageSummary(m), len(pkt)) } if isDERP { metricSentDiscoDERP.Add(1) @@ -2352,13 +2369,13 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } if isVia { c.dlogf("[v1] magicsock: disco: %v<-%v via %v (%v, %v) got call-me-maybe-via, %d endpoints", - c.discoShort, epDisco.short, via.ServerDisco.ShortString(), + c.discoAtomic.Short(), epDisco.short, via.ServerDisco.ShortString(), ep.publicKey.ShortString(), derpStr(src.String()), len(via.AddrPorts)) c.relayManager.handleCallMeMaybeVia(ep, lastBest, lastBestIsTrusted, via) } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), len(cmm.MyNumber)) go ep.handleCallMeMaybe(cmm) @@ -2404,7 +2421,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake if isResp { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, %d endpoints", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, len(resp.AddrPorts)) @@ -2418,7 +2435,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s disco[0]=%v disco[1]=%v", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, req.ClientDisco[0].ShortString(), req.ClientDisco[1].ShortString()) @@ -2583,7 +2600,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN if numNodes > 1 { pingNodeSrcStr = "[one-of-multi]" } - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x padding=%v", c.discoShort, di.discoShort, pingNodeSrcStr, src, dm.TxID[:6], dm.Padding) + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x padding=%v", c.discoAtomic.Short(), di.discoShort, pingNodeSrcStr, src, dm.TxID[:6], dm.Padding) } ipDst := src @@ -2656,7 +2673,7 @@ func (c *Conn) discoInfoForKnownPeerLocked(k key.DiscoPublic) *discoInfo { di = &discoInfo{ discoKey: k, discoShort: k.ShortString(), - sharedKey: c.discoPrivate.Shared(k), + sharedKey: c.discoAtomic.Private().Shared(k), } c.discoInfo[k] = di } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 2a20b3cf6..7ae422906 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -4235,3 +4235,73 @@ func Test_lazyEndpoint_FromPeer(t *testing.T) { }) } } + +func TestRotateDiscoKey(t *testing.T) { + c := newConn(t.Logf) + + oldPrivate, oldPublic := c.discoAtomic.Pair() + oldShort := c.discoAtomic.Short() + + if oldPublic != oldPrivate.Public() { + t.Fatalf("old public key doesn't match old private key") + } + if oldShort != oldPublic.ShortString() { + t.Fatalf("old short string doesn't match old public key") + } + + testDiscoKey := key.NewDisco().Public() + c.mu.Lock() + c.discoInfo[testDiscoKey] = &discoInfo{ + discoKey: testDiscoKey, + discoShort: testDiscoKey.ShortString(), + } + if len(c.discoInfo) != 1 { + t.Fatalf("expected 1 discoInfo entry, got %d", len(c.discoInfo)) + } + c.mu.Unlock() + + c.RotateDiscoKey() + + newPrivate, newPublic := c.discoAtomic.Pair() + newShort := c.discoAtomic.Short() + + if newPublic.Compare(oldPublic) == 0 { + t.Fatalf("disco key didn't change after rotation") + } + if newShort == oldShort { + t.Fatalf("short string didn't change after rotation") + } + + if newPublic != newPrivate.Public() { + t.Fatalf("new public key doesn't match new private key") + } + if newShort != newPublic.ShortString() { + t.Fatalf("new short string doesn't match new public key") + } + + c.mu.Lock() + if len(c.discoInfo) != 0 { + t.Fatalf("expected discoInfo to be cleared, got %d entries", len(c.discoInfo)) + } + c.mu.Unlock() +} + +func TestRotateDiscoKeyMultipleTimes(t *testing.T) { + c := newConn(t.Logf) + + keys := make([]key.DiscoPublic, 0, 5) + keys = append(keys, c.discoAtomic.Public()) + + for i := 0; i < 4; i++ { + c.RotateDiscoKey() + newKey := c.discoAtomic.Public() + + for j, oldKey := range keys { + if newKey.Compare(oldKey) == 0 { + t.Fatalf("rotation %d produced same key as rotation %d", i+1, j) + } + } + + keys = append(keys, newKey) + } +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 2f93f1085..69831a4df 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -361,7 +361,7 @@ func (r *relayManager) ensureDiscoInfoFor(work *relayHandshakeWork) { di.di = &discoInfo{ discoKey: work.se.ServerDisco, discoShort: work.se.ServerDisco.ShortString(), - sharedKey: work.wlb.ep.c.discoPrivate.Shared(work.se.ServerDisco), + sharedKey: work.wlb.ep.c.discoAtomic.Private().Shared(work.se.ServerDisco), } } } @@ -1031,7 +1031,7 @@ func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { if remoteDisco == nil { return } - discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoPublic, remoteDisco.key) + discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoAtomic.Public(), remoteDisco.key) for _, v := range r.serversByNodeKey { byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] if !ok { diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index d40081839..e8fddfd91 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -22,11 +22,15 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) + c1 := &Conn{} + c1.discoAtomic.Set(key.NewDisco()) + rm.handleCallMeMaybeVia(&endpoint{c: c1}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleRxDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) + c2 := &Conn{} + c2.discoAtomic.Set(key.NewDisco()) + rm.handleRxDiscoMsg(c2, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} From 3b865d7c33b1e945e9122dbe6f4eeff696a84e0a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 18 Nov 2025 14:16:27 -0800 Subject: [PATCH 1694/1708] cmd/netlogfmt: support resolving IP addresses to synonymous labels (#17955) We now embed node information into network flow logs. By default, netlogfmt still prints out using Tailscale IP addresses. Support a "--resolve-addrs=TYPE" flag that can be used to specify resolving IP addresses as node IDs, hostnames, users, or tags. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/netlogfmt/main.go | 149 ++++++++++++++++++++++++------------------ 1 file changed, 87 insertions(+), 62 deletions(-) diff --git a/cmd/netlogfmt/main.go b/cmd/netlogfmt/main.go index 65e87098f..b8aba4aaa 100644 --- a/cmd/netlogfmt/main.go +++ b/cmd/netlogfmt/main.go @@ -44,25 +44,51 @@ import ( "github.com/dsnet/try" jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/bools" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" "tailscale.com/util/must" ) var ( - resolveNames = flag.Bool("resolve-names", false, "convert tailscale IP addresses to hostnames; must also specify --api-key and --tailnet-id") - apiKey = flag.String("api-key", "", "API key to query the Tailscale API with; see https://login.tailscale.com/admin/settings/keys") - tailnetName = flag.String("tailnet-name", "", "tailnet domain name to lookup devices in; see https://login.tailscale.com/admin/settings/general") + resolveNames = flag.Bool("resolve-names", false, "This is equivalent to specifying \"--resolve-addrs=name\".") + resolveAddrs = flag.String("resolve-addrs", "", "Resolve each tailscale IP address as a node ID, name, or user.\n"+ + "If network flow logs do not support embedded node information,\n"+ + "then --api-key and --tailnet-name must also be provided.\n"+ + "Valid values include \"nodeId\", \"name\", or \"user\".") + apiKey = flag.String("api-key", "", "The API key to query the Tailscale API with.\nSee https://login.tailscale.com/admin/settings/keys") + tailnetName = flag.String("tailnet-name", "", "The Tailnet name to lookup nodes within.\nSee https://login.tailscale.com/admin/settings/general") ) -var namesByAddr map[netip.Addr]string +var ( + tailnetNodesByAddr map[netip.Addr]netlogtype.Node + tailnetNodesByID map[tailcfg.StableNodeID]netlogtype.Node +) func main() { flag.Parse() if *resolveNames { - namesByAddr = mustMakeNamesByAddr() + *resolveAddrs = "name" + } + *resolveAddrs = strings.ToLower(*resolveAddrs) // make case-insensitive + *resolveAddrs = strings.TrimSuffix(*resolveAddrs, "s") // allow plural form + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, " ", "") // ignore spaces + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "-", "") // ignore dashes + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "_", "") // ignore underscores + switch *resolveAddrs { + case "id", "nodeid": + *resolveAddrs = "nodeid" + case "name", "hostname": + *resolveAddrs = "name" + case "user", "tag", "usertag", "taguser": + *resolveAddrs = "user" // tag resolution is implied + default: + log.Fatalf("--resolve-addrs must be \"nodeId\", \"name\", or \"user\"") } + mustLoadTailnetNodes() + // The logic handles a stream of arbitrary JSON. // So long as a JSON object seems like a network log message, // then this will unmarshal and print it. @@ -103,7 +129,7 @@ func processArray(dec *jsontext.Decoder) { func processObject(dec *jsontext.Decoder) { var hasTraffic bool - var rawMsg []byte + var rawMsg jsontext.Value try.E1(dec.ReadToken()) // parse '{' for dec.PeekKind() != '}' { // Capture any members that could belong to a network log message. @@ -111,13 +137,13 @@ func processObject(dec *jsontext.Decoder) { case "virtualTraffic", "subnetTraffic", "exitTraffic", "physicalTraffic": hasTraffic = true fallthrough - case "logtail", "nodeId", "logged", "start", "end": + case "logtail", "nodeId", "logged", "srcNode", "dstNodes", "start", "end": if len(rawMsg) == 0 { rawMsg = append(rawMsg, '{') } else { rawMsg = append(rawMsg[:len(rawMsg)-1], ',') } - rawMsg = append(append(append(rawMsg, '"'), name.String()...), '"') + rawMsg, _ = jsontext.AppendQuote(rawMsg, name.String()) rawMsg = append(rawMsg, ':') rawMsg = append(rawMsg, try.E1(dec.ReadValue())...) rawMsg = append(rawMsg, '}') @@ -145,6 +171,32 @@ type message struct { } func printMessage(msg message) { + var nodesByAddr map[netip.Addr]netlogtype.Node + var tailnetDNS string // e.g., ".acme-corp.ts.net" + if *resolveAddrs != "" { + nodesByAddr = make(map[netip.Addr]netlogtype.Node) + insertNode := func(node netlogtype.Node) { + for _, addr := range node.Addresses { + nodesByAddr[addr] = node + } + } + for _, node := range msg.DstNodes { + insertNode(node) + } + insertNode(msg.SrcNode) + + // Derive the Tailnet DNS of the self node. + detectTailnetDNS := func(nodeName string) { + if prefix, ok := strings.CutSuffix(nodeName, ".ts.net"); ok { + if i := strings.LastIndexByte(prefix, '.'); i > 0 { + tailnetDNS = nodeName[i:] + } + } + } + detectTailnetDNS(msg.SrcNode.Name) + detectTailnetDNS(tailnetNodesByID[msg.NodeID].Name) + } + // Construct a table of network traffic per connection. rows := [][7]string{{3: "Tx[P/s]", 4: "Tx[B/s]", 5: "Rx[P/s]", 6: "Rx[B/s]"}} duration := msg.End.Sub(msg.Start) @@ -175,16 +227,25 @@ func printMessage(msg message) { if !a.IsValid() { return "" } - if name, ok := namesByAddr[a.Addr()]; ok { - if a.Port() == 0 { - return name + name := a.Addr().String() + node, ok := tailnetNodesByAddr[a.Addr()] + if !ok { + node, ok = nodesByAddr[a.Addr()] + } + if ok { + switch *resolveAddrs { + case "nodeid": + name = cmp.Or(string(node.NodeID), name) + case "name": + name = cmp.Or(strings.TrimSuffix(string(node.Name), tailnetDNS), name) + case "user": + name = cmp.Or(bools.IfElse(len(node.Tags) > 0, fmt.Sprint(node.Tags), node.User), name) } - return name + ":" + strconv.Itoa(int(a.Port())) } - if a.Port() == 0 { - return a.Addr().String() + if a.Port() != 0 { + return name + ":" + strconv.Itoa(int(a.Port())) } - return a.String() + return name } for _, cc := range traffic { row := [7]string{ @@ -279,8 +340,10 @@ func printMessage(msg message) { } } -func mustMakeNamesByAddr() map[netip.Addr]string { +func mustLoadTailnetNodes() { switch { + case *apiKey == "" && *tailnetName == "": + return // rely on embedded node information in the logs themselves case *apiKey == "": log.Fatalf("--api-key must be specified with --resolve-names") case *tailnetName == "": @@ -300,57 +363,19 @@ func mustMakeNamesByAddr() map[netip.Addr]string { // Unmarshal the API response. var m struct { - Devices []struct { - Name string `json:"name"` - Addrs []netip.Addr `json:"addresses"` - } `json:"devices"` + Devices []netlogtype.Node `json:"devices"` } must.Do(json.Unmarshal(b, &m)) - // Construct a unique mapping of Tailscale IP addresses to hostnames. - // For brevity, we start with the first segment of the name and - // use more segments until we find the shortest prefix that is unique - // for all names in the tailnet. - seen := make(map[string]bool) - namesByAddr := make(map[netip.Addr]string) -retry: - for i := range 10 { - clear(seen) - clear(namesByAddr) - for _, d := range m.Devices { - name := fieldPrefix(d.Name, i) - if seen[name] { - continue retry - } - seen[name] = true - for _, a := range d.Addrs { - namesByAddr[a] = name - } - } - return namesByAddr - } - panic("unable to produce unique mapping of address to names") -} - -// fieldPrefix returns the first n number of dot-separated segments. -// -// Example: -// -// fieldPrefix("foo.bar.baz", 0) returns "" -// fieldPrefix("foo.bar.baz", 1) returns "foo" -// fieldPrefix("foo.bar.baz", 2) returns "foo.bar" -// fieldPrefix("foo.bar.baz", 3) returns "foo.bar.baz" -// fieldPrefix("foo.bar.baz", 4) returns "foo.bar.baz" -func fieldPrefix(s string, n int) string { - s0 := s - for i := 0; i < n && len(s) > 0; i++ { - if j := strings.IndexByte(s, '.'); j >= 0 { - s = s[j+1:] - } else { - s = "" + // Construct a mapping of Tailscale IP addresses to node information. + tailnetNodesByAddr = make(map[netip.Addr]netlogtype.Node) + tailnetNodesByID = make(map[tailcfg.StableNodeID]netlogtype.Node) + for _, node := range m.Devices { + for _, addr := range node.Addresses { + tailnetNodesByAddr[addr] = node } + tailnetNodesByID[node.NodeID] = node } - return strings.TrimSuffix(s0[:len(s0)-len(s)], ".") } func appendRepeatByte(b []byte, c byte, n int) []byte { From 5b0c57f497ffe1c83dc2e4c7026541264ecd0f8a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 14:35:02 -0800 Subject: [PATCH 1695/1708] tailcfg: add some omitzero, adjust some omitempty to omitzero Updates tailscale/corp#25406 Change-Id: I7832dbe3dce3774bcc831e3111feb75bcc9e021d Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 228 ++++++++++++++++++++++----------------------- 1 file changed, 114 insertions(+), 114 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 346957803..41e0a0b28 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -255,9 +255,9 @@ func (u StableNodeID) IsZero() bool { // have a general gmail address login associated with the user. type User struct { ID UserID - DisplayName string // if non-empty overrides Login field - ProfilePicURL string // if non-empty overrides Login field - Created time.Time + DisplayName string // if non-empty overrides Login field + ProfilePicURL string `json:",omitzero"` // if non-empty overrides Login field + Created time.Time `json:",omitzero"` } // Login is a user from a specific identity provider, not associated with any @@ -268,7 +268,7 @@ type Login struct { Provider string // "google", "github", "okta_foo", etc. LoginName string // an email address or "email-ish" string (like alice@github) DisplayName string // from the IdP - ProfilePicURL string // from the IdP + ProfilePicURL string `json:",omitzero"` // from the IdP } // A UserProfile is display-friendly data for a [User]. @@ -278,7 +278,7 @@ type UserProfile struct { ID UserID LoginName string // "alice@smith.com"; for display purposes only (provider is not listed) DisplayName string // "Alice Smith" - ProfilePicURL string `json:",omitempty"` + ProfilePicURL string `json:",omitzero"` } func (p *UserProfile) Equal(p2 *UserProfile) bool { @@ -345,13 +345,13 @@ type Node struct { User UserID // Sharer, if non-zero, is the user who shared this node, if different than User. - Sharer UserID `json:",omitempty"` + Sharer UserID `json:",omitzero"` Key key.NodePublic - KeyExpiry time.Time // the zero value if this node does not expire + KeyExpiry time.Time `json:",omitzero"` // the zero value if this node does not expire KeySignature tkatype.MarshaledSignature `json:",omitempty"` - Machine key.MachinePublic - DiscoKey key.DiscoPublic + Machine key.MachinePublic `json:",omitzero"` + DiscoKey key.DiscoPublic `json:",omitzero"` // Addresses are the IP addresses of this Node directly. Addresses []netip.Prefix @@ -361,7 +361,7 @@ type Node struct { // As of CapabilityVersion 112, this may be nil (null or undefined) on the wire // to mean the same as Addresses. Internally, it is always filled in with // its possibly-implicit value. - AllowedIPs []netip.Prefix + AllowedIPs []netip.Prefix `json:",omitzero"` // _not_ omitempty; only nil is special Endpoints []netip.AddrPort `json:",omitempty"` // IP+port (public via STUN, and local LANs) @@ -375,18 +375,18 @@ type Node struct { // this field. See tailscale/tailscale#14636. Do not use this field in code // other than in the upgradeNode func, which canonicalizes it to HomeDERP // if it arrives as a LegacyDERPString string on the wire. - LegacyDERPString string `json:"DERP,omitempty"` // DERP-in-IP:port ("127.3.3.40:N") endpoint + LegacyDERPString string `json:"DERP,omitzero"` // DERP-in-IP:port ("127.3.3.40:N") endpoint // HomeDERP is the modern version of the DERP string field, with just an // integer. The client advertises support for this as of capver 111. // // HomeDERP may be zero if not (yet) known, but ideally always be non-zero // for magicsock connectivity to function normally. - HomeDERP int `json:",omitempty"` // DERP region ID of the node's home DERP + HomeDERP int `json:",omitzero"` // DERP region ID of the node's home DERP - Hostinfo HostinfoView - Created time.Time - Cap CapabilityVersion `json:",omitempty"` // if non-zero, the node's capability version; old servers might not send + Hostinfo HostinfoView `json:",omitzero"` + Created time.Time `json:",omitzero"` + Cap CapabilityVersion `json:",omitzero"` // if non-zero, the node's capability version; old servers might not send // Tags are the list of ACL tags applied to this node. // Tags take the form of `tag:` where value starts @@ -453,25 +453,25 @@ type Node struct { // it do anything. It is the tailscaled client's job to double-check the // MapResponse's PacketFilter to verify that its AllowedIPs will not be // accepted by the packet filter. - UnsignedPeerAPIOnly bool `json:",omitempty"` + UnsignedPeerAPIOnly bool `json:",omitzero"` // The following three computed fields hold the various names that can // be used for this node in UIs. They are populated from controlclient // (not from control) by calling node.InitDisplayNames. These can be // used directly or accessed via node.DisplayName or node.DisplayNames. - ComputedName string `json:",omitempty"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) + ComputedName string `json:",omitzero"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) computedHostIfDifferent string // hostname, if different than ComputedName, otherwise empty - ComputedNameWithHost string `json:",omitempty"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set + ComputedNameWithHost string `json:",omitzero"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set // DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging. - DataPlaneAuditLogID string `json:",omitempty"` + DataPlaneAuditLogID string `json:",omitzero"` // Expired is whether this node's key has expired. Control may send // this; clients are only allowed to set this from false to true. On // the client, this is calculated client-side based on a timestamp sent // from control, to avoid clock skew issues. - Expired bool `json:",omitempty"` + Expired bool `json:",omitzero"` // SelfNodeV4MasqAddrForThisPeer is the IPv4 that this peer knows the current node as. // It may be empty if the peer knows the current node by its native @@ -486,7 +486,7 @@ type Node struct { // This only applies to traffic originating from the current node to the // peer or any of its subnets. Traffic originating from subnet routes will // not be masqueraded (e.g. in case of --snat-subnet-routes). - SelfNodeV4MasqAddrForThisPeer *netip.Addr `json:",omitempty"` + SelfNodeV4MasqAddrForThisPeer *netip.Addr `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // SelfNodeV6MasqAddrForThisPeer is the IPv6 that this peer knows the current node as. // It may be empty if the peer knows the current node by its native @@ -501,17 +501,17 @@ type Node struct { // This only applies to traffic originating from the current node to the // peer or any of its subnets. Traffic originating from subnet routes will // not be masqueraded (e.g. in case of --snat-subnet-routes). - SelfNodeV6MasqAddrForThisPeer *netip.Addr `json:",omitempty"` + SelfNodeV6MasqAddrForThisPeer *netip.Addr `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // IsWireGuardOnly indicates that this is a non-Tailscale WireGuard peer, it // is not expected to speak Disco or DERP, and it must have Endpoints in // order to be reachable. - IsWireGuardOnly bool `json:",omitempty"` + IsWireGuardOnly bool `json:",omitzero"` // IsJailed indicates that this node is jailed and should not be allowed // initiate connections, however outbound connections to it should still be // allowed. - IsJailed bool `json:",omitempty"` + IsJailed bool `json:",omitzero"` // ExitNodeDNSResolvers is the list of DNS servers that should be used when this // node is marked IsWireGuardOnly and being used as an exit node. @@ -827,10 +827,10 @@ type Location struct { // Because it contains pointers (slices), this type should not be used // as a value type. type Hostinfo struct { - IPNVersion string `json:",omitempty"` // version of this code (in version.Long format) - FrontendLogID string `json:",omitempty"` // logtail ID of frontend instance - BackendLogID string `json:",omitempty"` // logtail ID of backend instance - OS string `json:",omitempty"` // operating system the client runs on (a version.OS value) + IPNVersion string `json:",omitzero"` // version of this code (in version.Long format) + FrontendLogID string `json:",omitzero"` // logtail ID of frontend instance + BackendLogID string `json:",omitzero"` // logtail ID of backend instance + OS string `json:",omitzero"` // operating system the client runs on (a version.OS value) // OSVersion is the version of the OS, if available. // @@ -842,25 +842,25 @@ type Hostinfo struct { // string on Linux, like "Debian 10.4; kernel=xxx; container; env=kn" and so // on. As of Tailscale 1.32, this is simply the kernel version on Linux, like // "5.10.0-17-amd64". - OSVersion string `json:",omitempty"` + OSVersion string `json:",omitzero"` - Container opt.Bool `json:",omitempty"` // best-effort whether the client is running in a container - Env string `json:",omitempty"` // a hostinfo.EnvType in string form - Distro string `json:",omitempty"` // "debian", "ubuntu", "nixos", ... - DistroVersion string `json:",omitempty"` // "20.04", ... - DistroCodeName string `json:",omitempty"` // "jammy", "bullseye", ... + Container opt.Bool `json:",omitzero"` // best-effort whether the client is running in a container + Env string `json:",omitzero"` // a hostinfo.EnvType in string form + Distro string `json:",omitzero"` // "debian", "ubuntu", "nixos", ... + DistroVersion string `json:",omitzero"` // "20.04", ... + DistroCodeName string `json:",omitzero"` // "jammy", "bullseye", ... // App is used to disambiguate Tailscale clients that run using tsnet. - App string `json:",omitempty"` // "k8s-operator", "golinks", ... - - Desktop opt.Bool `json:",omitempty"` // if a desktop was detected on Linux - Package string `json:",omitempty"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) - DeviceModel string `json:",omitempty"` // mobile phone model ("Pixel 3a", "iPhone12,3") - PushDeviceToken string `json:",omitempty"` // macOS/iOS APNs device token for notifications (and Android in the future) - Hostname string `json:",omitempty"` // name of the host the client runs on - ShieldsUp bool `json:",omitempty"` // indicates whether the host is blocking incoming connections - ShareeNode bool `json:",omitempty"` // indicates this node exists in netmap because it's owned by a shared-to user - NoLogsNoSupport bool `json:",omitempty"` // indicates that the user has opted out of sending logs and support + App string `json:",omitzero"` // "k8s-operator", "golinks", ... + + Desktop opt.Bool `json:",omitzero"` // if a desktop was detected on Linux + Package string `json:",omitzero"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) + DeviceModel string `json:",omitzero"` // mobile phone model ("Pixel 3a", "iPhone12,3") + PushDeviceToken string `json:",omitzero"` // macOS/iOS APNs device token for notifications (and Android in the future) + Hostname string `json:",omitzero"` // name of the host the client runs on + ShieldsUp bool `json:",omitzero"` // indicates whether the host is blocking incoming connections + ShareeNode bool `json:",omitzero"` // indicates this node exists in netmap because it's owned by a shared-to user + NoLogsNoSupport bool `json:",omitzero"` // indicates that the user has opted out of sending logs and support // WireIngress indicates that the node would like to be wired up server-side // (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently // enabled. For example, the user might only use it for intermittent @@ -868,38 +868,38 @@ type Hostinfo struct { // away, even if it's disabled most of the time. As an optimization, this is // only sent if IngressEnabled is false, as IngressEnabled implies that this // option is true. - WireIngress bool `json:",omitempty"` - IngressEnabled bool `json:",omitempty"` // if the node has any funnel endpoint enabled - AllowsUpdate bool `json:",omitempty"` // indicates that the node has opted-in to admin-console-drive remote updates - Machine string `json:",omitempty"` // the current host's machine type (uname -m) - GoArch string `json:",omitempty"` // GOARCH value (of the built binary) - GoArchVar string `json:",omitempty"` // GOARM, GOAMD64, etc (of the built binary) - GoVersion string `json:",omitempty"` // Go version binary was built with + WireIngress bool `json:",omitzero"` + IngressEnabled bool `json:",omitzero"` // if the node has any funnel endpoint enabled + AllowsUpdate bool `json:",omitzero"` // indicates that the node has opted-in to admin-console-drive remote updates + Machine string `json:",omitzero"` // the current host's machine type (uname -m) + GoArch string `json:",omitzero"` // GOARCH value (of the built binary) + GoArchVar string `json:",omitzero"` // GOARM, GOAMD64, etc (of the built binary) + GoVersion string `json:",omitzero"` // Go version binary was built with RoutableIPs []netip.Prefix `json:",omitempty"` // set of IP ranges this client can route RequestTags []string `json:",omitempty"` // set of ACL tags this node wants to claim WoLMACs []string `json:",omitempty"` // MAC address(es) to send Wake-on-LAN packets to wake this node (lowercase hex w/ colons) Services []Service `json:",omitempty"` // services advertised by this machine - NetInfo *NetInfo `json:",omitempty"` + NetInfo *NetInfo `json:",omitzero"` SSH_HostKeys []string `json:"sshHostKeys,omitempty"` // if advertised - Cloud string `json:",omitempty"` - Userspace opt.Bool `json:",omitempty"` // if the client is running in userspace (netstack) mode - UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode - AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service - ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n - ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. + Cloud string `json:",omitzero"` + Userspace opt.Bool `json:",omitzero"` // if the client is running in userspace (netstack) mode + UserspaceRouter opt.Bool `json:",omitzero"` // if the client's subnet router is running in userspace (netstack) mode + AppConnector opt.Bool `json:",omitzero"` // if the client is running the app-connector service + ServicesHash string `json:",omitzero"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. // Location represents geographical location data about a // Tailscale host. Location is optional and only set if // explicitly declared by a node. - Location *Location `json:",omitempty"` + Location *Location `json:",omitzero"` - TPM *TPMInfo `json:",omitempty"` // TPM device metadata, if available + TPM *TPMInfo `json:",omitzero"` // TPM device metadata, if available // StateEncrypted reports whether the node state is stored encrypted on // disk. The actual mechanism is platform-specific: // * Apple nodes use the Keychain // * Linux and Windows nodes use the TPM // * Android apps use EncryptedSharedPreferences - StateEncrypted opt.Bool `json:",omitempty"` + StateEncrypted opt.Bool `json:",omitzero"` // NOTE: any new fields containing pointers in this type // require changes to Hostinfo.Equal. @@ -913,25 +913,25 @@ type TPMInfo struct { // https://trustedcomputinggroup.org/resource/vendor-id-registry/, // for example "MSFT" for Microsoft. // Read from TPM_PT_MANUFACTURER. - Manufacturer string `json:",omitempty"` + Manufacturer string `json:",omitzero"` // Vendor is a vendor ID string, up to 16 characters. // Read from TPM_PT_VENDOR_STRING_*. - Vendor string `json:",omitempty"` + Vendor string `json:",omitzero"` // Model is a vendor-defined TPM model. // Read from TPM_PT_VENDOR_TPM_TYPE. - Model int `json:",omitempty"` + Model int `json:",omitzero"` // FirmwareVersion is the version number of the firmware. // Read from TPM_PT_FIRMWARE_VERSION_*. - FirmwareVersion uint64 `json:",omitempty"` + FirmwareVersion uint64 `json:",omitzero"` // SpecRevision is the TPM 2.0 spec revision encoded as a single number. All // revisions can be found at // https://trustedcomputinggroup.org/resource/tpm-library-specification/. // Before revision 184, TCG used the "01.83" format for revision 183. - SpecRevision int `json:",omitempty"` + SpecRevision int `json:",omitzero"` // FamilyIndicator is the TPM spec family, like "2.0". // Read from TPM_PT_FAMILY_INDICATOR. - FamilyIndicator string `json:",omitempty"` + FamilyIndicator string `json:",omitzero"` } // Present reports whether a TPM device is present on this machine. @@ -1016,37 +1016,37 @@ func (v HostinfoView) TailscaleSSHEnabled() bool { return v.ж.TailscaleSSHEnabl type NetInfo struct { // MappingVariesByDestIP says whether the host's NAT mappings // vary based on the destination IP. - MappingVariesByDestIP opt.Bool + MappingVariesByDestIP opt.Bool `json:",omitzero"` // WorkingIPv6 is whether the host has IPv6 internet connectivity. - WorkingIPv6 opt.Bool + WorkingIPv6 opt.Bool `json:",omitzero"` // OSHasIPv6 is whether the OS supports IPv6 at all, regardless of // whether IPv6 internet connectivity is available. - OSHasIPv6 opt.Bool + OSHasIPv6 opt.Bool `json:",omitzero"` // WorkingUDP is whether the host has UDP internet connectivity. - WorkingUDP opt.Bool + WorkingUDP opt.Bool `json:",omitzero"` // WorkingICMPv4 is whether ICMPv4 works. // Empty means not checked. - WorkingICMPv4 opt.Bool + WorkingICMPv4 opt.Bool `json:",omitzero"` // HavePortMap is whether we have an existing portmap open // (UPnP, PMP, or PCP). - HavePortMap bool `json:",omitempty"` + HavePortMap bool `json:",omitzero"` // UPnP is whether UPnP appears present on the LAN. // Empty means not checked. - UPnP opt.Bool + UPnP opt.Bool `json:",omitzero"` // PMP is whether NAT-PMP appears present on the LAN. // Empty means not checked. - PMP opt.Bool + PMP opt.Bool `json:",omitzero"` // PCP is whether PCP appears present on the LAN. // Empty means not checked. - PCP opt.Bool + PCP opt.Bool `json:",omitzero"` // PreferredDERP is this node's preferred (home) DERP region ID. // This is where the node expects to be contacted to begin a @@ -1055,10 +1055,10 @@ type NetInfo struct { // that are located elsewhere) but PreferredDERP is the region ID // that the node subscribes to traffic at. // Zero means disconnected or unknown. - PreferredDERP int + PreferredDERP int `json:",omitzero"` // LinkType is the current link type, if known. - LinkType string `json:",omitempty"` // "wired", "wifi", "mobile" (LTE, 4G, 3G, etc) + LinkType string `json:",omitzero"` // "wired", "wifi", "mobile" (LTE, 4G, 3G, etc) // DERPLatency is the fastest recent time to reach various // DERP STUN servers, in seconds. The map key is the @@ -1076,7 +1076,7 @@ type NetInfo struct { // "{nft,ift}-REASON", like "nft-forced" or "ipt-default". Empty means // either not Linux or a configuration in which the host firewall rules // are not managed by tailscaled. - FirewallMode string `json:",omitempty"` + FirewallMode string `json:",omitzero"` // Update BasicallyEqual when adding fields. } @@ -1364,8 +1364,8 @@ type MapRequest struct { // For current values and history, see the CapabilityVersion type's docs. Version CapabilityVersion - Compress string // "zstd" or "" (no compression) - KeepAlive bool // whether server should send keep-alives back to us + Compress string `json:",omitzero"` // "zstd" or "" (no compression) + KeepAlive bool `json:",omitzero"` // whether server should send keep-alives back to us NodeKey key.NodePublic DiscoKey key.DiscoPublic @@ -1388,7 +1388,7 @@ type MapRequest struct { // // If true and Version >= 68, the server should treat this as a read-only // request and ignore any Hostinfo or other fields that might be set. - Stream bool + Stream bool `json:",omitzero"` // Hostinfo is the client's current Hostinfo. Although it is always included // in the request, the server may choose to ignore it when Stream is true @@ -1405,14 +1405,14 @@ type MapRequest struct { // // The server may choose to ignore the request for any reason and start a // new map session. This is only applicable when Stream is true. - MapSessionHandle string `json:",omitempty"` + MapSessionHandle string `json:",omitzero"` // MapSessionSeq is the sequence number in the map session identified by // MapSesssionHandle that was most recently processed by the client. // It is only applicable when MapSessionHandle is specified. // If the server chooses to honor the MapSessionHandle request, only sequence // numbers greater than this value will be returned. - MapSessionSeq int64 `json:",omitempty"` + MapSessionSeq int64 `json:",omitzero"` // Endpoints are the client's magicsock UDP ip:port endpoints (IPv4 or IPv6). // These can be ignored if Stream is true and Version >= 68. @@ -1423,7 +1423,7 @@ type MapRequest struct { // TKAHead describes the hash of the latest AUM applied to the local // tailnet key authority, if one is operating. // It is encoded as tka.AUMHash.MarshalText. - TKAHead string `json:",omitempty"` + TKAHead string `json:",omitzero"` // ReadOnly was set when client just wanted to fetch the MapResponse, // without updating their Endpoints. The intended use was for clients to @@ -1431,7 +1431,7 @@ type MapRequest struct { // update. // // Deprecated: always false as of Version 68. - ReadOnly bool `json:",omitempty"` + ReadOnly bool `json:",omitzero"` // OmitPeers is whether the client is okay with the Peers list being omitted // in the response. @@ -1447,7 +1447,7 @@ type MapRequest struct { // If OmitPeers is true, Stream is false, but ReadOnly is true, // then all the response fields are included. (This is what the client does // when initially fetching the DERP map.) - OmitPeers bool `json:",omitempty"` + OmitPeers bool `json:",omitzero"` // DebugFlags is a list of strings specifying debugging and // development features to enable in handling this map @@ -1467,7 +1467,7 @@ type MapRequest struct { // identifies this specific connection to the server. The server may choose to // use this handle to identify the connection for debugging or testing // purposes. It has no semantic meaning. - ConnectionHandleForTest string `json:",omitempty"` + ConnectionHandleForTest string `json:",omitzero"` } // PortRange represents a range of UDP or TCP port numbers. @@ -1758,7 +1758,7 @@ type DNSConfig struct { // in the network map, aka MagicDNS. // Despite the (legacy) name, does not necessarily cause request // proxying to be enabled. - Proxied bool `json:",omitempty"` + Proxied bool `json:",omitzero"` // Nameservers are the IP addresses of the global nameservers to use. // @@ -1795,7 +1795,7 @@ type DNSConfig struct { // TempCorpIssue13969 is a temporary (2023-08-16) field for an internal hack day prototype. // It contains a user inputed URL that should have a list of domains to be blocked. // See https://github.com/tailscale/corp/issues/13969. - TempCorpIssue13969 string `json:",omitempty"` + TempCorpIssue13969 string `json:",omitzero"` } // DNSRecord is an extra DNS record to add to MagicDNS. @@ -1807,7 +1807,7 @@ type DNSRecord struct { // Type is the DNS record type. // Empty means A or AAAA, depending on value. // Other values are currently ignored. - Type string `json:",omitempty"` + Type string `json:",omitzero"` // Value is the IP address in string form. // TODO(bradfitz): if we ever add support for record types @@ -1855,11 +1855,11 @@ type PingRequest struct { // URLIsNoise, if true, means that the client should hit URL over the Noise // transport instead of TLS. - URLIsNoise bool `json:",omitempty"` + URLIsNoise bool `json:",omitzero"` // Log is whether to log about this ping in the success case. // For failure cases, the client will log regardless. - Log bool `json:",omitempty"` + Log bool `json:",omitzero"` // Types is the types of ping that are initiated. Can be any PingType, comma // separated, e.g. "disco,TSMP" @@ -1869,10 +1869,10 @@ type PingRequest struct { // node's c2n handler and the HTTP response sent in a POST to URL. For c2n, // the value of URLIsNoise is ignored and only the Noise transport (back to // the control plane) will be used, as if URLIsNoise were true. - Types string `json:",omitempty"` + Types string `json:",omitzero"` // IP is the ping target, when needed by the PingType(s) given in Types. - IP netip.Addr + IP netip.Addr `json:",omitzero"` // Payload is the ping payload. // @@ -3043,29 +3043,29 @@ type SSHRecordingAttempt struct { // See QueryFeatureResponse for response structure. type QueryFeatureRequest struct { // Feature is the string identifier for a feature. - Feature string `json:",omitempty"` + Feature string `json:",omitzero"` // NodeKey is the client's current node key. - NodeKey key.NodePublic `json:",omitempty"` + NodeKey key.NodePublic `json:",omitzero"` } // QueryFeatureResponse is the response to an QueryFeatureRequest. // See cli.enableFeatureInteractive for usage. type QueryFeatureResponse struct { // Complete is true when the feature is already enabled. - Complete bool `json:",omitempty"` + Complete bool `json:",omitzero"` // Text holds lines to display in the CLI with information // about the feature and how to enable it. // // Lines are separated by newline characters. The final // newline may be omitted. - Text string `json:",omitempty"` + Text string `json:",omitzero"` // URL is the link for the user to visit to take action on // enabling the feature. // // When empty, there is no action for this user to take. - URL string `json:",omitempty"` + URL string `json:",omitzero"` // ShouldWait specifies whether the CLI should block and // wait for the user to enable the feature. @@ -3078,7 +3078,7 @@ type QueryFeatureResponse struct { // // The CLI can watch the IPN notification bus for changes in // required node capabilities to know when to continue. - ShouldWait bool `json:",omitempty"` + ShouldWait bool `json:",omitzero"` } // WebClientAuthResponse is the response to a web client authentication request @@ -3088,15 +3088,15 @@ type WebClientAuthResponse struct { // ID is a unique identifier for the session auth request. // It can be supplied to "/machine/webclient/wait" to pause until // the session authentication has been completed. - ID string `json:",omitempty"` + ID string `json:",omitzero"` // URL is the link for the user to visit to authenticate the session. // // When empty, there is no action for the user to take. - URL string `json:",omitempty"` + URL string `json:",omitzero"` // Complete is true when the session authentication has been completed. - Complete bool `json:",omitempty"` + Complete bool `json:",omitzero"` } // OverTLSPublicKeyResponse is the JSON response to /key?v= @@ -3172,10 +3172,10 @@ type PeerChange struct { // DERPRegion, if non-zero, means that NodeID's home DERP // region ID is now this number. - DERPRegion int `json:",omitempty"` + DERPRegion int `json:",omitzero"` // Cap, if non-zero, means that NodeID's capability version has changed. - Cap CapabilityVersion `json:",omitempty"` + Cap CapabilityVersion `json:",omitzero"` // CapMap, if non-nil, means that NodeID's capability map has changed. CapMap NodeCapMap `json:",omitempty"` @@ -3185,23 +3185,23 @@ type PeerChange struct { Endpoints []netip.AddrPort `json:",omitempty"` // Key, if non-nil, means that the NodeID's wireguard public key changed. - Key *key.NodePublic `json:",omitempty"` + Key *key.NodePublic `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // KeySignature, if non-nil, means that the signature of the wireguard // public key has changed. KeySignature tkatype.MarshaledSignature `json:",omitempty"` // DiscoKey, if non-nil, means that the NodeID's discokey changed. - DiscoKey *key.DiscoPublic `json:",omitempty"` + DiscoKey *key.DiscoPublic `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // Online, if non-nil, means that the NodeID's online status changed. - Online *bool `json:",omitempty"` + Online *bool `json:",omitzero"` // LastSeen, if non-nil, means that the NodeID's online status changed. - LastSeen *time.Time `json:",omitempty"` + LastSeen *time.Time `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // KeyExpiry, if non-nil, changes the NodeID's key expiry. - KeyExpiry *time.Time `json:",omitempty"` + KeyExpiry *time.Time `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 } // DerpMagicIP is a fake WireGuard endpoint IP address that means to @@ -3279,14 +3279,14 @@ const ( // POST https:///machine/audit-log type AuditLogRequest struct { // Version is the client's current CapabilityVersion. - Version CapabilityVersion `json:",omitempty"` + Version CapabilityVersion `json:",omitzero"` // NodeKey is the client's current node key. NodeKey key.NodePublic `json:",omitzero"` // Action is the action to be logged. It must correspond to a known action in the control plane. - Action ClientAuditAction `json:",omitempty"` + Action ClientAuditAction `json:",omitzero"` // Details is an opaque string, specific to the action being logged. Empty strings may not // be valid depending on the action being logged. - Details string `json:",omitempty"` + Details string `json:",omitzero"` // Timestamp is the time at which the audit log was generated on the node. Timestamp time.Time `json:",omitzero"` } From 408336a0891288ab3bb7466734d9646fe17fbee1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 17:39:37 -0800 Subject: [PATCH 1696/1708] feature/featuretags: add CacheNetMap feature tag for upcoming work (trying to get in smaller obvious chunks ahead of later PRs to make them smaller) Updates #17925 Change-Id: I184002001055790484e4792af8ffe2a9a2465b2e Signed-off-by: Brad Fitzpatrick --- .../buildfeatures/feature_cachenetmap_disabled.go | 13 +++++++++++++ .../buildfeatures/feature_cachenetmap_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 4 ++++ 3 files changed, 30 insertions(+) create mode 100644 feature/buildfeatures/feature_cachenetmap_disabled.go create mode 100644 feature/buildfeatures/feature_cachenetmap_enabled.go diff --git a/feature/buildfeatures/feature_cachenetmap_disabled.go b/feature/buildfeatures/feature_cachenetmap_disabled.go new file mode 100644 index 000000000..22407fe38 --- /dev/null +++ b/feature/buildfeatures/feature_cachenetmap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cachenetmap + +package buildfeatures + +// HasCacheNetMap is whether the binary was built with support for modular feature "Cache the netmap on disk between runs". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cachenetmap" build tag. +// It's a const so it can be used for dead code elimination. +const HasCacheNetMap = false diff --git a/feature/buildfeatures/feature_cachenetmap_enabled.go b/feature/buildfeatures/feature_cachenetmap_enabled.go new file mode 100644 index 000000000..02663c416 --- /dev/null +++ b/feature/buildfeatures/feature_cachenetmap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cachenetmap + +package buildfeatures + +// HasCacheNetMap is whether the binary was built with support for modular feature "Cache the netmap on disk between runs". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cachenetmap" build tag. +// It's a const so it can be used for dead code elimination. +const HasCacheNetMap = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c93e8b15b..44b129576 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -123,6 +123,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Control-to-node (C2N) support", ImplementationDetail: true, }, + "cachenetmap": { + Sym: "CacheNetMap", + Desc: "Cache the netmap on disk between runs", + }, "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, "capture": {Sym: "Capture", Desc: "Packet capture"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, From 38ccdbe35c88c08311d79db651ef7d4161a2ffc2 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 19 Nov 2025 11:57:27 +0000 Subject: [PATCH 1697/1708] cmd/k8s-operator: default to stable image (#17848) This commit modifies the helm/static manifest configuration for the k8s-operator to prefer the stable image tag. This avoids making those using static manifests seeing unstable behaviour by default if they do not manually make the change. This is managed for us when using helm but not when generating the static manifests. Updates https://github.com/tailscale/tailscale/issues/10655 Signed-off-by: David Bond --- cmd/k8s-operator/deploy/chart/Chart.yaml | 2 +- cmd/k8s-operator/deploy/manifests/operator.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/Chart.yaml b/cmd/k8s-operator/deploy/chart/Chart.yaml index 363d87d15..9db6389d1 100644 --- a/cmd/k8s-operator/deploy/chart/Chart.yaml +++ b/cmd/k8s-operator/deploy/chart/Chart.yaml @@ -26,4 +26,4 @@ maintainers: version: 0.1.0 # appVersion will be set to Tailscale repo tag at release time. -appVersion: "unstable" +appVersion: "stable" diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index c7c5ef0a7..c5da367e0 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5366,7 +5366,7 @@ spec: - name: CLIENT_SECRET_FILE value: /oauth/client_secret - name: PROXY_IMAGE - value: tailscale/tailscale:unstable + value: tailscale/tailscale:stable - name: PROXY_TAGS value: tag:k8s - name: APISERVER_PROXY @@ -5381,7 +5381,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: tailscale/k8s-operator:unstable + image: tailscale/k8s-operator:stable imagePullPolicy: Always name: operator volumeMounts: From e1dd9222d4a8e8147f6067b7f3b3956995b5bc6c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 16:38:57 +0000 Subject: [PATCH 1698/1708] ipn/ipnlocal, tka: compact TKA state after every sync Previously a TKA compaction would only run when a node starts, which means a long-running node could use unbounded storage as it accumulates ever-increasing amounts of TKA state. This patch changes TKA so it runs a compaction after every sync. Updates https://github.com/tailscale/corp/issues/33537 Change-Id: I91df887ea0c5a5b00cb6caced85aeffa2a4b24ee Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 11 +- ipn/ipnlocal/network-lock_test.go | 224 ++++++++++++++++++++++++++++- tka/builder_test.go | 14 +- tka/chaintest_test.go | 8 +- tka/key_test.go | 2 +- tka/sync_test.go | 2 +- tka/tailchonk.go | 22 ++- tka/tailchonk_test.go | 6 +- tka/tka_test.go | 14 +- tstest/chonktest/tailchonk_test.go | 4 +- 10 files changed, 276 insertions(+), 31 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 14a3b105b..8c77cd92d 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -360,6 +360,13 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie if err := b.tkaSyncLocked(ourNodeKey); err != nil { return fmt.Errorf("tka sync: %w", err) } + // Try to compact the TKA state, to avoid unbounded storage on nodes. + // + // We run this on every sync so that clients compact consistently. In many + // cases this will be a no-op. + if err := b.tka.authority.Compact(b.tka.storage, tkaCompactionDefaults); err != nil { + return fmt.Errorf("tka compact: %w", err) + } } return nil @@ -508,7 +515,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per if root == "" { b.health.SetUnhealthy(noNetworkLockStateDirWarnable, nil) b.logf("network-lock using in-memory storage; no state directory") - storage = &tka.Mem{} + storage = tka.ChonkMem() } else { chonkDir := b.chonkPathLocked() chonk, err := tka.ChonkDir(chonkDir) @@ -686,7 +693,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt // We use an in-memory tailchonk because we don't want to commit to // the filesystem until we've finished the initialization sequence, // just in case something goes wrong. - _, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{ + _, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{ Keys: keys, // TODO(tom): s/tka.State.DisablementSecrets/tka.State.DisablementValues // This will center on consistent nomenclature: diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 5fa072883..c07e65ee3 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -17,6 +17,7 @@ import ( "path/filepath" "reflect" "testing" + "time" go4mem "go4.org/mem" @@ -31,6 +32,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/tsd" + "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/persist" @@ -89,7 +91,7 @@ func TestTKAEnablementFlow(t *testing.T) { // our mock server can communicate. nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - a1, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{ + a1, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{ Keys: []tka.Key{key}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, }, nlPriv) @@ -399,7 +401,7 @@ func TestTKASync(t *testing.T) { // Setup the tka authority on the control plane. key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - controlStorage := &tka.Mem{} + controlStorage := tka.ChonkMem() controlAuthority, bootstrap, err := tka.Create(controlStorage, tka.State{ Keys: []tka.Key{key, someKey}, DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)}, @@ -548,10 +550,226 @@ func TestTKASync(t *testing.T) { } } +// Whenever we run a TKA sync and get new state from control, we compact the +// local state. +func TestTKASyncTriggersCompact(t *testing.T) { + someKeyPriv := key.NewNLPrivate() + someKey := tka.Key{Kind: tka.Key25519, Public: someKeyPriv.Public().Verifier(), Votes: 1} + + disablementSecret := bytes.Repeat([]byte{0xa5}, 32) + + nodePriv := key.NewNode() + nlPriv := key.NewNLPrivate() + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) + must.Do(pm.SetPrefs((&ipn.Prefs{ + Persist: &persist.Persist{ + PrivateNodeKey: nodePriv, + NetworkLockKey: nlPriv, + }, + }).View(), ipn.NetworkProfile{})) + + // Create a clock, and roll it back by 30 days. + // + // Our compaction algorithm preserves AUMs received in the last 14 days, so + // we need to backdate the commit times to make the AUMs eligible for compaction. + clock := tstest.NewClock(tstest.ClockOpts{}) + clock.Advance(-30 * 24 * time.Hour) + + // Set up the TKA authority on the control plane. + key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} + controlStorage := tka.ChonkMem() + controlStorage.SetClock(clock) + controlAuthority, bootstrap, err := tka.Create(controlStorage, tka.State{ + Keys: []tka.Key{key, someKey}, + DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)}, + }, nlPriv) + if err != nil { + t.Fatalf("tka.Create() failed: %v", err) + } + + // Fill the control plane TKA authority with a lot of AUMs, enough so that: + // + // 1. the chain of AUMs includes some checkpoints + // 2. the chain is long enough it would be trimmed if we ran the compaction + // algorithm with the defaults + for range 100 { + upd := controlAuthority.NewUpdater(nlPriv) + if err := upd.RemoveKey(someKey.MustID()); err != nil { + t.Fatalf("RemoveKey: %v", err) + } + if err := upd.AddKey(someKey); err != nil { + t.Fatalf("AddKey: %v", err) + } + aums, err := upd.Finalize(controlStorage) + if err != nil { + t.Fatalf("Finalize: %v", err) + } + if err := controlAuthority.Inform(controlStorage, aums); err != nil { + t.Fatalf("controlAuthority.Inform() failed: %v", err) + } + } + + // Set up the TKA authority on the node. + nodeStorage := tka.ChonkMem() + nodeStorage.SetClock(clock) + nodeAuthority, err := tka.Bootstrap(nodeStorage, bootstrap) + if err != nil { + t.Fatalf("tka.Bootstrap() failed: %v", err) + } + + // Make a mock control server. + ts, client := fakeNoiseServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + switch r.URL.Path { + case "/machine/tka/sync/offer": + body := new(tailcfg.TKASyncOfferRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + t.Fatal(err) + } + t.Logf("got sync offer:\n%+v", body) + nodeOffer, err := toSyncOffer(body.Head, body.Ancestors) + if err != nil { + t.Fatal(err) + } + controlOffer, err := controlAuthority.SyncOffer(controlStorage) + if err != nil { + t.Fatal(err) + } + sendAUMs, err := controlAuthority.MissingAUMs(controlStorage, nodeOffer) + if err != nil { + t.Fatal(err) + } + + head, ancestors, err := fromSyncOffer(controlOffer) + if err != nil { + t.Fatal(err) + } + resp := tailcfg.TKASyncOfferResponse{ + Head: head, + Ancestors: ancestors, + MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), + } + for i, a := range sendAUMs { + resp.MissingAUMs[i] = a.Serialize() + } + + t.Logf("responding to sync offer with:\n%+v", resp) + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + t.Fatal(err) + } + + case "/machine/tka/sync/send": + body := new(tailcfg.TKASyncSendRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + t.Fatal(err) + } + t.Logf("got sync send:\n%+v", body) + + var remoteHead tka.AUMHash + if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { + t.Fatalf("head unmarshal: %v", err) + } + toApply := make([]tka.AUM, len(body.MissingAUMs)) + for i, a := range body.MissingAUMs { + if err := toApply[i].Unserialize(a); err != nil { + t.Fatalf("decoding missingAUM[%d]: %v", i, err) + } + } + + if len(toApply) > 0 { + if err := controlAuthority.Inform(controlStorage, toApply); err != nil { + t.Fatalf("control.Inform(%+v) failed: %v", toApply, err) + } + } + head, err := controlAuthority.Head().MarshalText() + if err != nil { + t.Fatal(err) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ + Head: string(head), + }); err != nil { + t.Fatal(err) + } + + default: + t.Errorf("unhandled endpoint path: %v", r.URL.Path) + w.WriteHeader(404) + } + })) + defer ts.Close() + + // Setup the client. + cc, _ := fakeControlClient(t, client) + b := LocalBackend{ + cc: cc, + ccAuto: cc, + logf: t.Logf, + pm: pm, + store: pm.Store(), + tka: &tkaState{ + authority: nodeAuthority, + storage: nodeStorage, + }, + } + + // Trigger a sync. + err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ + TKAEnabled: true, + TKAHead: controlAuthority.Head(), + }, pm.CurrentPrefs()) + if err != nil { + t.Errorf("tkaSyncIfNeeded() failed: %v", err) + } + + // Add a new AUM in control. + upd := controlAuthority.NewUpdater(nlPriv) + if err := upd.RemoveKey(someKey.MustID()); err != nil { + t.Fatalf("RemoveKey: %v", err) + } + aums, err := upd.Finalize(controlStorage) + if err != nil { + t.Fatalf("Finalize: %v", err) + } + if err := controlAuthority.Inform(controlStorage, aums); err != nil { + t.Fatalf("controlAuthority.Inform() failed: %v", err) + } + + // Run a second sync, which should trigger a compaction. + err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ + TKAEnabled: true, + TKAHead: controlAuthority.Head(), + }, pm.CurrentPrefs()) + if err != nil { + t.Errorf("tkaSyncIfNeeded() failed: %v", err) + } + + // Check that the node and control plane are in sync. + if nodeHead, controlHead := b.tka.authority.Head(), controlAuthority.Head(); nodeHead != controlHead { + t.Errorf("node head = %v, want %v", nodeHead, controlHead) + } + + // Check the node has compacted away some of its AUMs; that it has purged some AUMs which + // are still kept in the control plane. + nodeAUMs, err := b.tka.storage.AllAUMs() + if err != nil { + t.Errorf("AllAUMs() for node failed: %v", err) + } + controlAUMS, err := controlStorage.AllAUMs() + if err != nil { + t.Errorf("AllAUMs() for control failed: %v", err) + } + if len(nodeAUMs) == len(controlAUMS) { + t.Errorf("node has not compacted; it has the same number of AUMs as control (node = control = %d)", len(nodeAUMs)) + } +} + func TestTKAFilterNetmap(t *testing.T) { nlPriv := key.NewNLPrivate() nlKey := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - storage := &tka.Mem{} + storage := tka.ChonkMem() authority, _, err := tka.Create(storage, tka.State{ Keys: []tka.Key{nlKey}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, diff --git a/tka/builder_test.go b/tka/builder_test.go index 52907186b..3fd32f64e 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -28,7 +28,7 @@ func TestAuthorityBuilderAddKey(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -62,7 +62,7 @@ func TestAuthorityBuilderMaxKey(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -109,7 +109,7 @@ func TestAuthorityBuilderRemoveKey(t *testing.T) { pub2, _ := testingKey25519(t, 2) key2 := Key{Kind: Key25519, Public: pub2, Votes: 1} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key, key2}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -155,7 +155,7 @@ func TestAuthorityBuilderSetKeyVote(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -191,7 +191,7 @@ func TestAuthorityBuilderSetKeyMeta(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2, Meta: map[string]string{"a": "b"}} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -227,7 +227,7 @@ func TestAuthorityBuilderMultiple(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -275,7 +275,7 @@ func TestAuthorityBuilderCheckpointsAfterXUpdates(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, diff --git a/tka/chaintest_test.go b/tka/chaintest_test.go index 5811f9c83..a3122b5d1 100644 --- a/tka/chaintest_test.go +++ b/tka/chaintest_test.go @@ -285,25 +285,25 @@ func (c *testChain) makeAUM(v *testchainNode) AUM { // Chonk returns a tailchonk containing all AUMs. func (c *testChain) Chonk() Chonk { - var out Mem + out := ChonkMem() for _, update := range c.AUMs { if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil { panic(err) } } - return &out + return out } // ChonkWith returns a tailchonk containing the named AUMs. func (c *testChain) ChonkWith(names ...string) Chonk { - var out Mem + out := ChonkMem() for _, name := range names { update := c.AUMs[name] if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil { panic(err) } } - return &out + return out } type testchainOpt struct { diff --git a/tka/key_test.go b/tka/key_test.go index fc379e246..327de1a0e 100644 --- a/tka/key_test.go +++ b/tka/key_test.go @@ -72,7 +72,7 @@ func TestNLPrivate(t *testing.T) { // Test that key.NLPrivate implements Signer by making a new // authority. k := Key{Kind: Key25519, Public: pub.Verifier(), Votes: 1} - _, aum, err := Create(&Mem{}, State{ + _, aum, err := Create(ChonkMem(), State{ Keys: []Key{k}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{1}, 32)}, }, p) diff --git a/tka/sync_test.go b/tka/sync_test.go index f9d86c16a..ea14a37e5 100644 --- a/tka/sync_test.go +++ b/tka/sync_test.go @@ -346,7 +346,7 @@ func TestSyncSimpleE2E(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - nodeStorage := &Mem{} + nodeStorage := ChonkMem() node, err := Bootstrap(nodeStorage, c.AUMs["G1"]) if err != nil { t.Fatalf("node Bootstrap() failed: %v", err) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index d92016c45..a55033bcd 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -19,6 +19,8 @@ import ( "github.com/fxamacker/cbor/v2" "tailscale.com/atomicfile" + "tailscale.com/tstime" + "tailscale.com/util/testenv" ) // Chonk implementations provide durable storage for AUMs and other @@ -92,6 +94,7 @@ type Mem struct { mu sync.RWMutex aums map[AUMHash]AUM commitTimes map[AUMHash]time.Time + clock tstime.Clock // parentIndex is a map of AUMs to the AUMs for which they are // the parent. @@ -103,6 +106,23 @@ type Mem struct { lastActiveAncestor *AUMHash } +// ChonkMem returns an implementation of Chonk which stores TKA state +// in-memory. +func ChonkMem() *Mem { + return &Mem{ + clock: tstime.DefaultClock{}, + } +} + +// SetClock sets the clock used by [Mem]. This is only for use in tests, +// and will panic if called from non-test code. +func (c *Mem) SetClock(clock tstime.Clock) { + if !testenv.InTest() { + panic("used SetClock in non-test code") + } + c.clock = clock +} + func (c *Mem) SetLastActiveAncestor(hash AUMHash) error { c.mu.Lock() defer c.mu.Unlock() @@ -173,7 +193,7 @@ updateLoop: for _, aum := range updates { aumHash := aum.Hash() c.aums[aumHash] = aum - c.commitTimes[aumHash] = time.Now() + c.commitTimes[aumHash] = c.clock.Now() parent, ok := aum.Parent() if ok { diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 7125c99fe..eeb6edfff 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -35,7 +35,7 @@ func randHash(t *testing.T, seed int64) [blake2s.Size]byte { } func TestImplementsChonk(t *testing.T) { - impls := []Chonk{&Mem{}, &FS{}} + impls := []Chonk{ChonkMem(), &FS{}} t.Logf("chonks: %v", impls) } @@ -229,7 +229,7 @@ func TestMarkActiveChain(t *testing.T) { verdict := make(map[AUMHash]retainState, len(tc.chain)) // Build the state of the tailchonk for tests. - storage := &Mem{} + storage := ChonkMem() var prev AUMHash for i := range tc.chain { if !prev.IsZero() { @@ -608,7 +608,7 @@ func TestCompactLongButYoung(t *testing.T) { ourKey := Key{Kind: Key25519, Public: ourPriv.Public().Verifier(), Votes: 1} someOtherKey := Key{Kind: Key25519, Public: key.NewNLPrivate().Public().Verifier(), Votes: 1} - storage := &Mem{} + storage := ChonkMem() auth, _, err := Create(storage, State{ Keys: []Key{ourKey, someOtherKey}, DisablementSecrets: [][]byte{DisablementKDF(bytes.Repeat([]byte{0xa5}, 32))}, diff --git a/tka/tka_test.go b/tka/tka_test.go index 9e3c4e79d..78af7400d 100644 --- a/tka/tka_test.go +++ b/tka/tka_test.go @@ -253,7 +253,7 @@ func TestOpenAuthority(t *testing.T) { } // Construct the state of durable storage. - chonk := &Mem{} + chonk := ChonkMem() err := chonk.CommitVerifiedAUMs([]AUM{g1, i1, l1, i2, i3, l2, l3, g2, l4}) if err != nil { t.Fatal(err) @@ -275,7 +275,7 @@ func TestOpenAuthority(t *testing.T) { } func TestOpenAuthority_EmptyErrors(t *testing.T) { - _, err := Open(&Mem{}) + _, err := Open(ChonkMem()) if err == nil { t.Error("Expected an error initializing an empty authority, got nil") } @@ -319,7 +319,7 @@ func TestCreateBootstrapAuthority(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - a1, genesisAUM, err := Create(&Mem{}, State{ + a1, genesisAUM, err := Create(ChonkMem(), State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, }, signer25519(priv)) @@ -327,7 +327,7 @@ func TestCreateBootstrapAuthority(t *testing.T) { t.Fatalf("Create() failed: %v", err) } - a2, err := Bootstrap(&Mem{}, genesisAUM) + a2, err := Bootstrap(ChonkMem(), genesisAUM) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) } @@ -366,7 +366,7 @@ func TestAuthorityInformNonLinear(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - storage := &Mem{} + storage := ChonkMem() a, err := Bootstrap(storage, c.AUMs["G1"]) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) @@ -411,7 +411,7 @@ func TestAuthorityInformLinear(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - storage := &Mem{} + storage := ChonkMem() a, err := Bootstrap(storage, c.AUMs["G1"]) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) @@ -444,7 +444,7 @@ func TestInteropWithNLKey(t *testing.T) { pub2 := key.NewNLPrivate().Public() pub3 := key.NewNLPrivate().Public() - a, _, err := Create(&Mem{}, State{ + a, _, err := Create(ChonkMem(), State{ Keys: []Key{ { Kind: Key25519, diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go index 6dfab798e..d9343e916 100644 --- a/tstest/chonktest/tailchonk_test.go +++ b/tstest/chonktest/tailchonk_test.go @@ -18,7 +18,7 @@ func TestImplementsChonk(t *testing.T) { { name: "Mem", newChonk: func(t *testing.T) tka.Chonk { - return &tka.Mem{} + return tka.ChonkMem() }, }, { @@ -42,7 +42,7 @@ func TestImplementsCompactableChonk(t *testing.T) { { name: "Mem", newChonk: func(t *testing.T) tka.CompactableChonk { - return &tka.Mem{} + return tka.ChonkMem() }, }, { From 62d64c05e1e8f6335627de9eca17aebc2c9910c1 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Wed, 19 Nov 2025 07:08:40 -0600 Subject: [PATCH 1699/1708] cmd/k8s-operator: fix type comparison in apiserver proxy template (#17981) ArgoCD sends boolean values but the template expects strings, causing "incompatible types for comparison" errors. Wrap values with toString so both work. Fixes #17158 Signed-off-by: Raj Singh --- .../deploy/chart/templates/apiserverproxy-rbac.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml index ad0a6fb66..d6e9d1bf4 100644 --- a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml @@ -3,8 +3,8 @@ # If old setting used, enable both old (operator) and new (ProxyGroup) workflows. # If new setting used, enable only new workflow. -{{ if or (eq .Values.apiServerProxyConfig.mode "true") - (eq .Values.apiServerProxyConfig.allowImpersonation "true") }} +{{ if or (eq (toString .Values.apiServerProxyConfig.mode) "true") + (eq (toString .Values.apiServerProxyConfig.allowImpersonation) "true") }} apiVersion: v1 kind: ServiceAccount metadata: @@ -25,7 +25,7 @@ kind: ClusterRoleBinding metadata: name: tailscale-auth-proxy subjects: -{{- if eq .Values.apiServerProxyConfig.mode "true" }} +{{- if eq (toString .Values.apiServerProxyConfig.mode) "true" }} - kind: ServiceAccount name: operator namespace: {{ .Release.Namespace }} From aeda3e81832158fb70715bb99a7d249a55b21694 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 19 Nov 2025 09:41:43 +0000 Subject: [PATCH 1700/1708] ipn/ipnlocal: reduce profileManager boilerplate in network-lock tests Updates tailscale/corp#33537 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock_test.go | 116 +++++++++--------------------- 1 file changed, 33 insertions(+), 83 deletions(-) diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index c07e65ee3..5d22425a1 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -37,13 +37,12 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" - "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" ) -func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { +func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni @@ -51,7 +50,6 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even k := key.NewMachine() dialer := tsdial.NewDialer(netmon.NewStatic()) - dialer.SetBus(bus) opts := controlclient.Options{ ServerURL: "https://example.com", Hostinfo: hi, @@ -70,10 +68,11 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even if err != nil { t.Fatal(err) } - return cc, bus + return cc } func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) { + t.Helper() ts := httptest.NewUnstartedServer(handler) ts.StartTLS() client := ts.Client() @@ -84,6 +83,17 @@ func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, return ts, client } +func setupProfileManager(t *testing.T, nodePriv key.NodePrivate, nlPriv key.NLPrivate) *profileManager { + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) + must.Do(pm.SetPrefs((&ipn.Prefs{ + Persist: &persist.Persist{ + PrivateNodeKey: nodePriv, + NetworkLockKey: nlPriv, + }, + }).View(), ipn.NetworkProfile{})) + return pm +} + func TestTKAEnablementFlow(t *testing.T) { nodePriv := key.NewNode() @@ -158,14 +168,8 @@ func TestTKAEnablementFlow(t *testing.T) { defer ts.Close() temp := t.TempDir() - cc, bus := fakeControlClient(t, client) - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(bus))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + cc := fakeControlClient(t, client) + pm := setupProfileManager(t, nodePriv, nlPriv) b := LocalBackend{ capTailnetLock: true, varRoot: temp, @@ -199,13 +203,7 @@ func TestTKADisablementFlow(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -267,7 +265,7 @@ func TestTKADisablementFlow(t *testing.T) { })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -391,13 +389,7 @@ func TestTKASync(t *testing.T) { t.Run(tc.name, func(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Setup the tka authority on the control plane. key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} @@ -518,7 +510,7 @@ func TestTKASync(t *testing.T) { defer ts.Close() // Setup the client. - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -560,13 +552,7 @@ func TestTKASyncTriggersCompact(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Create a clock, and roll it back by 30 days. // @@ -702,7 +688,7 @@ func TestTKASyncTriggersCompact(t *testing.T) { defer ts.Close() // Setup the client. - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ cc: cc, ccAuto: cc, @@ -923,13 +909,7 @@ func TestTKADisable(t *testing.T) { disablementSecret := bytes.Repeat([]byte{0xa5}, 32) nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -985,7 +965,7 @@ func TestTKADisable(t *testing.T) { })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1014,13 +994,7 @@ func TestTKASign(t *testing.T) { toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1076,7 +1050,7 @@ func TestTKASign(t *testing.T) { } })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1103,13 +1077,7 @@ func TestTKAForceDisable(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -1156,7 +1124,7 @@ func TestTKAForceDisable(t *testing.T) { })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) sys := tsd.NewSystem() sys.Set(pm.Store()) @@ -1201,13 +1169,7 @@ func TestTKAAffectedSigs(t *testing.T) { // toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1292,7 +1254,7 @@ func TestTKAAffectedSigs(t *testing.T) { } })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1334,13 +1296,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { cosignPriv := key.NewNLPrivate() compromisedPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1404,7 +1360,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { } })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1425,13 +1381,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { // Cosign using the cosigning key. { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: cosignPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, cosignPriv) b := LocalBackend{ varRoot: temp, logf: t.Logf, From 336df56f85459be76c4117baf76ce7851df4ba68 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 18 Nov 2025 17:04:01 +0000 Subject: [PATCH 1701/1708] cmd/tailscale/cli: remove Latin abbreviations from CLI help text Our style guide recommends avoiding Latin abbreviations in technical documentation, which includes the CLI help text. This is causing linter issues for the docs site, because this help text is copied into the docs. See http://go/style-guide/kb/language-and-grammar/abbreviations#latin-abbreviations Updates #cleanup Change-Id: I980c28d996466f0503aaaa65127685f4af608039 Signed-off-by: Alex Chan --- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/up.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 3b5e032db..cb3a07a6f 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -110,7 +110,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { switch goos { case "linux": setf.BoolVar(&setArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") - setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)") + setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") setf.StringVar(&setArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") case "windows": setf.BoolVar(&setArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index e8b0cd0d3..7f5b2e6b4 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -122,7 +122,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { switch goos { case "linux": upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") - upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)") + upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") case "windows": upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") From 6ac4356bce25daf4f9e7da9612a95607666d3c5f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 18:15:08 -0800 Subject: [PATCH 1702/1708] util/eventbus: simplify some reflect in Bus.pump Updates #cleanup Change-Id: Ib7b497e22c6cdd80578c69cf728d45754e6f909e Signed-off-by: Brad Fitzpatrick --- util/eventbus/bus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 46fa5b198..aa6880d01 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -134,7 +134,7 @@ func (b *Bus) pump(ctx context.Context) { // queue space for it. for !vals.Empty() { val := vals.Peek() - dests := b.dest(reflect.ValueOf(val.Event).Type()) + dests := b.dest(reflect.TypeOf(val.Event)) if b.routeDebug.active() { clients := make([]*Client, len(dests)) From 976bf24f5e9e2e3a1ea93598f0a20e7820a94f11 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 19 Nov 2025 13:57:14 +0000 Subject: [PATCH 1703/1708] ipn/ipnlocal: remove the always-true CanSupportNetworkLock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we support using an in-memory backend for TKA state (#17946), this function always returns `nil` – we can always support Network Lock. We don't need it any more. Plus, clean up a couple of errant TODOs from that PR. Updates tailscale/corp#33599 Change-Id: Ief93bb9adebb82b9ad1b3e406d1ae9d2fa234877 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 8c77cd92d..78d4d236d 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -300,10 +300,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - isEnabled := b.tka != nil wantEnabled := nm.TKAEnabled @@ -488,10 +484,6 @@ func (b *LocalBackend) chonkPathLocked() string { // // b.mu must be held. func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, persist persist.PersistView) error { - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - var genesis tka.AUM if err := genesis.Unserialize(g); err != nil { return fmt.Errorf("reading genesis: %v", err) @@ -537,20 +529,6 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per return nil } -// CanSupportNetworkLock returns nil if tailscaled is able to operate -// a local tailnet key authority (and hence enforce network lock). -func (b *LocalBackend) CanSupportNetworkLock() error { - if b.tka != nil { - // If the TKA is being used, it is supported. - return nil - } - - // There's a var root (aka --statedir), so if network lock gets - // initialized we have somewhere to store our AUMs. That's all - // we need. - return nil -} - // NetworkLockStatus returns a structure describing the state of the // tailnet key authority, if any. func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { @@ -664,12 +642,7 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { // needing signatures is returned as a response. // The Finish RPC submits signatures for all these nodes, at which point // Control has everything it needs to atomically enable network lock. -// TODO(alexc): Only with persistent backend func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) error { - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - var ourNodeKey key.NodePublic var nlPriv key.NLPrivate @@ -794,7 +767,6 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { // NetworkLockSign signs the given node-key and submits it to the control plane. // rotationPublic, if specified, must be an ed25519 public key. -// TODO(alexc): in-memory only func (b *LocalBackend) NetworkLockSign(nodeKey key.NodePublic, rotationPublic []byte) error { ourNodeKey, sig, err := func(nodeKey key.NodePublic, rotationPublic []byte) (key.NodePublic, tka.NodeKeySignature, error) { b.mu.Lock() From 12c598de285f9fc719061892a59960c7ec5cf820 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 17 Nov 2025 15:02:51 +0000 Subject: [PATCH 1704/1708] licenses: update license notices Signed-off-by: License Updater --- licenses/tailscale.md | 1 + 1 file changed, 1 insertion(+) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index c04e55563..163a76d40 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -69,6 +69,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) From a0d059d74c3d3e7274d224a4cb91f7348b3faa53 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 19 Nov 2025 12:29:08 -0500 Subject: [PATCH 1705/1708] cmd/tailscale/cli: allow remote target as service destination (#17607) This commit enables user to set service backend to remote destinations, that can be a partial URL or a full URL. The commit also prevents user to set remote destinations on linux system when socket mark is not working. For user on any version of mac extension they can't serve a service either. The socket mark usability is determined by a new local api. Fixes tailscale/corp#24783 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- client/local/local.go | 17 +++++ cmd/tailscale/cli/serve_legacy.go | 1 + cmd/tailscale/cli/serve_legacy_test.go | 5 ++ cmd/tailscale/cli/serve_v2.go | 100 ++++++++++++++++++++++--- cmd/tailscale/cli/serve_v2_test.go | 43 ++++++++--- ipn/localapi/localapi.go | 47 ++++++++---- ipn/serve.go | 38 ++++++++-- ipn/serve_test.go | 6 +- net/netns/netns_default.go | 4 + net/netns/netns_dw.go | 4 + 10 files changed, 221 insertions(+), 44 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index a5e04f122..72ddbb55f 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1401,6 +1401,23 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti return decodeJSON[apitype.ExitNodeSuggestionResponse](body) } +// CheckSOMarkInUse reports whether the socket mark option is in use. This will only +// be true if tailscale is running on Linux and tailscaled uses SO_MARK. +func (lc *Client) CheckSOMarkInUse(ctx context.Context) (bool, error) { + body, err := lc.get200(ctx, "/localapi/v0/check-so-mark-in-use") + if err != nil { + return false, err + } + var res struct { + UseSOMark bool `json:"useSoMark"` + } + + if err := json.Unmarshal(body, &res); err != nil { + return false, fmt.Errorf("invalid JSON from check-so-mark-in-use: %w", err) + } + return res.UseSOMark, nil +} + // ShutdownTailscaled requests a graceful shutdown of tailscaled. func (lc *Client) ShutdownTailscaled(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 171ec335c..580393ce4 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -149,6 +149,7 @@ type localServeClient interface { IncrementCounter(ctx context.Context, name string, delta int) error GetPrefs(ctx context.Context) (*ipn.Prefs, error) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) + CheckSOMarkInUse(ctx context.Context) (bool, error) } // serveEnv is the environment the serve command runs within. All I/O should be diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 1d3854b0b..819017ad8 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -860,6 +860,7 @@ type fakeLocalServeClient struct { setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs + SOMarkInUse bool // fake SO mark in use status statusWithoutPeers *ipnstate.Status // nil for fakeStatus } @@ -937,6 +938,10 @@ func (lc *fakeLocalServeClient) IncrementCounter(ctx context.Context, name strin return nil // unused in tests } +func (lc *fakeLocalServeClient) CheckSOMarkInUse(ctx context.Context) (bool, error) { + return lc.SOMarkInUse, nil +} + // exactError returns an error checker that wants exactly the provided want error. // If optName is non-empty, it's used in the error message. func exactErr(want error, optName ...string) func(error) string { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 33b676bf8..b60e645f3 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -21,6 +21,7 @@ import ( "path" "path/filepath" "regexp" + "runtime" "slices" "sort" "strconv" @@ -33,6 +34,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" "tailscale.com/util/prompt" "tailscale.com/util/set" @@ -516,6 +518,9 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } + if err := e.shouldWarnRemoteDestCompatibility(ctx, target); err != nil { + return err + } err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps, int(e.proxyProtocol)) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } @@ -999,16 +1004,17 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy } var ( - msgFunnelAvailable = "Available on the internet:" - msgServeAvailable = "Available within your tailnet:" - msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" - msgRunningInBackground = "%s started and running in the background." - msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." - msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" - msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" - msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" - msgDisableService = "To remove config for the service, run: tailscale serve clear %s" - msgToExit = "Press Ctrl+C to exit." + msgFunnelAvailable = "Available on the internet:" + msgServeAvailable = "Available within your tailnet:" + msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" + msgRunningInBackground = "%s started and running in the background." + msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." + msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" + msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" + msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" + msgDisableService = "To remove config for the service, run: tailscale serve clear %s" + msgWarnRemoteDestCompatibility = "Warning: %s doesn't support connecting to remote destinations from non-default route, see tailscale.com/kb/1552/tailscale-services for detail." + msgToExit = "Press Ctrl+C to exit." ) // messageForPort returns a message for the given port based on the @@ -1134,6 +1140,77 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN return output.String() } +// isRemote reports whether the given destination from serve config +// is a remote destination. +func isRemote(target string) bool { + // target being a port number means it's localhost + if _, err := strconv.ParseUint(target, 10, 16); err == nil { + return false + } + + // prepend tmp:// if no scheme is present just to help parsing + if !strings.Contains(target, "://") { + target = "tmp://" + target + } + + // make sure we can parse the target, wether it's a full URL or just a host:port + u, err := url.ParseRequestURI(target) + if err != nil { + // If we can't parse the target, it doesn't matter if it's remote or not + return false + } + validHN := dnsname.ValidHostname(u.Hostname()) == nil + validIP := net.ParseIP(u.Hostname()) != nil + if !validHN && !validIP { + return false + } + if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" { + return false + } + return true +} + +// shouldWarnRemoteDestCompatibility reports whether we should warn the user +// that their current OS/environment may not be compatible with +// service's proxy destination. +func (e *serveEnv) shouldWarnRemoteDestCompatibility(ctx context.Context, target string) error { + // no target means nothing to check + if target == "" { + return nil + } + + if filepath.IsAbs(target) || strings.HasPrefix(target, "text:") { + // local path or text target, nothing to check + return nil + } + + // only check for remote destinations + if !isRemote(target) { + return nil + } + + // Check if running as Mac extension and warn + if version.IsMacAppStore() || version.IsMacSysExt() { + return fmt.Errorf(msgWarnRemoteDestCompatibility, "the MacOS extension") + } + + // Check for linux, if it's running with TS_FORCE_LINUX_BIND_TO_DEVICE=true + // and tailscale bypass mark is not working. If any of these conditions are true, and the dest is + // a remote destination, return true. + if runtime.GOOS == "linux" { + SOMarkInUse, err := e.lc.CheckSOMarkInUse(ctx) + if err != nil { + log.Printf("error checking SO mark in use: %v", err) + return nil + } + if !SOMarkInUse { + return fmt.Errorf(msgWarnRemoteDestCompatibility, "the Linux tailscaled without SO_MARK") + } + } + + return nil +} + func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target, mds string, caps []tailcfg.PeerCapability) error { h := new(ipn.HTTPHandler) switch { @@ -1193,6 +1270,8 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("invalid TCP target %q", target) } + svcName := tailcfg.AsServiceName(dnsName) + targetURL, err := ipn.ExpandProxyTargetValue(target, []string{"tcp"}, "tcp") if err != nil { return fmt.Errorf("unable to expand target: %v", err) @@ -1204,7 +1283,6 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se } // TODO: needs to account for multiple configs from foreground mode - svcName := tailcfg.AsServiceName(dnsName) if sc.IsServingWeb(srcPort, svcName) { return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 5cdb39558..491baf9dd 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -220,10 +220,20 @@ func TestServeDevConfigMutations(t *testing.T) { }}, }, { - name: "invalid_host", + name: "ip_host", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ - command: cmd("serve --https=443 --bg http://somehost:3000"), // invalid host - wantErr: anyErr(), + command: cmd("serve --https=443 --bg http://192.168.1.1:3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://192.168.1.1:3000"}, + }}, + }, + }, }}, }, { @@ -233,6 +243,16 @@ func TestServeDevConfigMutations(t *testing.T) { wantErr: anyErr(), }}, }, + { + name: "no_scheme_remote_host_tcp", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, + steps: []step{{ + command: cmd("serve --https=443 --bg 192.168.1.1:3000"), + wantErr: exactErrMsg(errHelp), + }}, + }, { name: "turn_off_https", steps: []step{ @@ -402,15 +422,11 @@ func TestServeDevConfigMutations(t *testing.T) { }, }}, }, - { - name: "unknown_host_tcp", - steps: []step{{ - command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:5432"), - wantErr: exactErrMsg(errHelp), - }}, - }, { name: "tcp_port_too_low", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:0"), wantErr: exactErrMsg(errHelp), @@ -418,6 +434,9 @@ func TestServeDevConfigMutations(t *testing.T) { }, { name: "tcp_port_too_high", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:65536"), wantErr: exactErrMsg(errHelp), @@ -532,6 +551,9 @@ func TestServeDevConfigMutations(t *testing.T) { }, { name: "bad_path", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --bg --https=443 bad/path"), wantErr: exactErrMsg(errHelp), @@ -832,6 +854,7 @@ func TestServeDevConfigMutations(t *testing.T) { }, CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, }, + SOMarkInUse: true, }, steps: []step{{ command: cmd("serve --service=svc:foo --http=80 text:foo"), diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index c4ba2a40b..d3503d302 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -35,6 +35,7 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" + "tailscale.com/net/netns" "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -72,20 +73,21 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "check-prefs": (*Handler).serveCheckPrefs, - "derpmap": (*Handler).serveDERPMap, - "goroutines": (*Handler).serveGoroutines, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "ping": (*Handler).servePing, - "prefs": (*Handler).servePrefs, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "shutdown": (*Handler).serveShutdown, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "whois": (*Handler).serveWhoIs, + "check-prefs": (*Handler).serveCheckPrefs, + "check-so-mark-in-use": (*Handler).serveCheckSOMarkInUse, + "derpmap": (*Handler).serveDERPMap, + "goroutines": (*Handler).serveGoroutines, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "ping": (*Handler).servePing, + "prefs": (*Handler).servePrefs, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "shutdown": (*Handler).serveShutdown, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "whois": (*Handler).serveWhoIs, } func init() { @@ -760,6 +762,23 @@ func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) }) } +// serveCheckSOMarkInUse reports whether SO_MARK is in use on the linux while +// running without TUN. For any other OS, it reports false. +func (h *Handler) serveCheckSOMarkInUse(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "SO_MARK check access denied", http.StatusForbidden) + return + } + usingSOMark := netns.UseSocketMark() + usingUserspaceNetworking := h.b.Sys().IsNetstack() + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(struct { + UseSOMark bool + }{ + UseSOMark: usingSOMark || usingUserspaceNetworking, + }) +} + func (h *Handler) serveCheckReversePathFiltering(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "reverse path filtering check access denied", http.StatusForbidden) diff --git a/ipn/serve.go b/ipn/serve.go index 1aab829fe..74195191c 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -17,6 +17,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" "tailscale.com/util/set" ) @@ -673,7 +674,8 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { // ExpandProxyTargetValue expands the supported target values to be proxied // allowing for input values to be a port number, a partial URL, or a full URL -// including a path. +// including a path. If it's for a service, remote addresses are allowed and +// there doesn't have to be a port specified. // // examples: // - 3000 @@ -683,17 +685,25 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { // - https://localhost:3000 // - https-insecure://localhost:3000 // - https-insecure://localhost:3000/foo +// - https://tailscale.com func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultScheme string) (string, error) { const host = "127.0.0.1" + // empty target is invalid + if target == "" { + return "", fmt.Errorf("empty target") + } + // support target being a port number if port, err := strconv.ParseUint(target, 10, 16); err == nil { return fmt.Sprintf("%s://%s:%d", defaultScheme, host, port), nil } + hasScheme := true // prepend scheme if not present if !strings.Contains(target, "://") { target = defaultScheme + "://" + target + hasScheme = false } // make sure we can parse the target @@ -707,16 +717,28 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch return "", fmt.Errorf("must be a URL starting with one of the supported schemes: %v", supportedSchemes) } - // validate the host. - switch u.Hostname() { - case "localhost", "127.0.0.1": - default: - return "", errors.New("only localhost or 127.0.0.1 proxies are currently supported") + // validate port according to host. + if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" { + // require port for localhost targets + if u.Port() == "" { + return "", fmt.Errorf("port required for localhost target %q", target) + } + } else { + validHN := dnsname.ValidHostname(u.Hostname()) == nil + validIP := net.ParseIP(u.Hostname()) != nil + if !validHN && !validIP { + return "", fmt.Errorf("invalid hostname or IP address %q", u.Hostname()) + } + // require scheme for non-localhost targets + if !hasScheme { + return "", fmt.Errorf("non-localhost target %q must include a scheme", target) + } } - - // validate the port port, err := strconv.ParseUint(u.Port(), 10, 16) if err != nil || port == 0 { + if u.Port() == "" { + return u.String(), nil // allow no port for remote destinations + } return "", fmt.Errorf("invalid port %q", u.Port()) } diff --git a/ipn/serve_test.go b/ipn/serve_test.go index 7028c1e17..063ff3a87 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -260,12 +260,16 @@ func TestExpandProxyTargetDev(t *testing.T) { {name: "https+insecure-scheme", input: "https+insecure://localhost:8080", expected: "https+insecure://localhost:8080"}, {name: "change-default-scheme", input: "localhost:8080", defaultScheme: "https", expected: "https://localhost:8080"}, {name: "change-supported-schemes", input: "localhost:8080", defaultScheme: "tcp", supportedSchemes: []string{"tcp"}, expected: "tcp://localhost:8080"}, + {name: "remote-target", input: "https://example.com:8080", expected: "https://example.com:8080"}, + {name: "remote-IP-target", input: "http://120.133.20.2:8080", expected: "http://120.133.20.2:8080"}, + {name: "remote-target-no-port", input: "https://example.com", expected: "https://example.com"}, // errors {name: "invalid-port", input: "localhost:9999999", wantErr: true}, + {name: "invalid-hostname", input: "192.168.1:8080", wantErr: true}, {name: "unsupported-scheme", input: "ftp://localhost:8080", expected: "", wantErr: true}, - {name: "not-localhost", input: "https://tailscale.com:8080", expected: "", wantErr: true}, {name: "empty-input", input: "", expected: "", wantErr: true}, + {name: "localhost-no-port", input: "localhost", expected: "", wantErr: true}, } for _, tt := range tests { diff --git a/net/netns/netns_default.go b/net/netns/netns_default.go index 94f24d8fa..58c593664 100644 --- a/net/netns/netns_default.go +++ b/net/netns/netns_default.go @@ -20,3 +20,7 @@ func control(logger.Logf, *netmon.Monitor) func(network, address string, c sysca func controlC(network, address string, c syscall.RawConn) error { return nil } + +func UseSocketMark() bool { + return false +} diff --git a/net/netns/netns_dw.go b/net/netns/netns_dw.go index f92ba9462..b9f750e8a 100644 --- a/net/netns/netns_dw.go +++ b/net/netns/netns_dw.go @@ -25,3 +25,7 @@ func parseAddress(address string) (addr netip.Addr, err error) { return netip.ParseAddr(host) } + +func UseSocketMark() bool { + return false +} From 86a849860e7a407977226359ab5e211bb0b52b34 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 20 Nov 2025 00:00:27 +0000 Subject: [PATCH 1706/1708] cmd/k8s-operator: use stable image for k8s-nameserver (#17985) This commit modifies the kubernetes operator to use the "stable" version of `k8s-nameserver` by default. Updates: https://github.com/tailscale/corp/issues/19028 Signed-off-by: David Bond --- cmd/k8s-operator/nameserver.go | 6 ++---- cmd/k8s-operator/nameserver_test.go | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 5de1c47ba..39db5f0f9 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -45,10 +46,7 @@ const ( messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present." defaultNameserverImageRepo = "tailscale/k8s-nameserver" - // TODO (irbekrm): once we start publishing nameserver images for stable - // track, replace 'unstable' here with the version of this operator - // instance. - defaultNameserverImageTag = "unstable" + defaultNameserverImageTag = "stable" ) // NameserverReconciler knows how to create nameserver resources in cluster in diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 6da52d8a2..858cd973d 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/yaml" + operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" @@ -182,7 +183,7 @@ func TestNameserverReconciler(t *testing.T) { dnsCfg.Spec.Nameserver.Image = nil }) expectReconciled(t, reconciler, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:stable" expectEqual(t, fc, wantsDeploy) }) } From 7d19813618e862d0a00cc66b600b470275c5b0bc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 19 Nov 2025 14:53:43 -0800 Subject: [PATCH 1707/1708] net/batching: fix import formatting From #17842 Updates #cleanup Change-Id: Ie041b50659361b50558d5ec1f557688d09935f7c Signed-off-by: Brad Fitzpatrick --- net/batching/conn_linux_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index 5e3c29e5c..c2cc463eb 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "net" "testing" - "unsafe" "github.com/tailscale/wireguard-go/conn" From 682172ca2d39163b2bbfbc4b1422b1dfa8d453f0 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 19 Nov 2025 11:59:51 -0500 Subject: [PATCH 1708/1708] net/netns: remove spammy logs for interface binding caps fixes tailscale/tailscale#17990 The logging for the netns caps is spammy. Log only on changes to the values and don't log Darwin specific stuff on non Darwin clients. Signed-off-by: Jonathan Nobels --- net/netns/netns.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/net/netns/netns.go b/net/netns/netns.go index ccb20d27e..81ab5e2a2 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -17,6 +17,7 @@ import ( "context" "net" "net/netip" + "runtime" "sync/atomic" "tailscale.com/net/netknob" @@ -40,8 +41,9 @@ var bindToInterfaceByRoute atomic.Bool // // Currently, this only changes the behaviour on macOS and Windows. func SetBindToInterfaceByRoute(logf logger.Logf, v bool) { - logf("netns: bindToInterfaceByRoute to %v", v) - bindToInterfaceByRoute.Store(v) + if bindToInterfaceByRoute.Swap(v) != v { + logf("netns: bindToInterfaceByRoute changed to %v", v) + } } var disableBindConnToInterface atomic.Bool @@ -54,8 +56,9 @@ var disableBindConnToInterface atomic.Bool // SetDisableBindConnToInterfaceAppleExt which will disable explicit interface // binding only when tailscaled is running inside a network extension process. func SetDisableBindConnToInterface(logf logger.Logf, v bool) { - logf("netns: disableBindConnToInterface set to %v", v) - disableBindConnToInterface.Store(v) + if disableBindConnToInterface.Swap(v) != v { + logf("netns: disableBindConnToInterface changed to %v", v) + } } var disableBindConnToInterfaceAppleExt atomic.Bool @@ -64,8 +67,9 @@ var disableBindConnToInterfaceAppleExt atomic.Bool // connections to the default network interface but only on Apple clients where // tailscaled is running inside a network extension. func SetDisableBindConnToInterfaceAppleExt(logf logger.Logf, v bool) { - logf("netns: disableBindConnToInterfaceAppleExt set to %v", v) - disableBindConnToInterfaceAppleExt.Store(v) + if runtime.GOOS == "darwin" && disableBindConnToInterfaceAppleExt.Swap(v) != v { + logf("netns: disableBindConnToInterfaceAppleExt changed to %v", v) + } } // Listener returns a new net.Listener with its Control hook func
                    KeyTokens